diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml new file mode 100644 index 0000000000..0535ccd7dc --- /dev/null +++ b/.github/workflows/CI.yml @@ -0,0 +1,33 @@ +name: CI + +on: [push, pull_request] + +jobs: + java-8: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up JDK 1.8 + uses: actions/setup-java@v1 + with: + java-version: 1.8 + - name: Test + run: | + cd h2 + echo $JAVA_OPTS + export JAVA_OPTS=-Xmx512m + ./build.sh jar testCI + java-11: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: 11 + - name: Test + run: | + cd h2 + echo $JAVA_OPTS + export JAVA_OPTS=-Xmx512m + ./build.sh jar testCI diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..4e63e5a68d --- /dev/null +++ b/.gitignore @@ -0,0 +1,21 @@ +/bin/ +/.settings/ +/.classpath +/.project +/dist/ +/build/ +/nbproject/ +/data/ +/error.lock +/error.txt +/test.out.txt +.DS_Store +/.idea/ +*.iml +*.ipr +*.iws +.checkstyle +/temp/ +/h2web/ +.pmd +docs/html/testOutput.html diff --git a/.lift.toml b/.lift.toml new file mode 100644 index 0000000000..3c7beccf52 --- /dev/null +++ b/.lift.toml @@ -0,0 +1,8 @@ +# Config file for SonaType Lift analysis tool +# +# config reference here: https://help.sonatype.com/lift/configuration-reference +# + +# Tell sonatype where our pom file lives, so it can build it again +# +build = "maven -f h2/pom.xml compile" \ No newline at end of file diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000000..eed8e4b1a1 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,552 @@ +H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License +Version 2.0) or under the EPL 1.0 (Eclipse Public License). + +------------------------------------------------------------------------------- + +Mozilla Public License, version 2.0 + +1. Definitions + + 1.1. “Contributor” + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + + 1.2. “Contributor Version” + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + + 1.3. “Contribution” + means Covered Software of a particular Contributor. + + 1.4. “Covered Software” + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, + and Modifications of such Source Code Form, in each case + including portions thereof. + + 1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms + of a Secondary License. + + 1.6. “Executable Form” + means any form of the work other than Source Code Form. + + 1.7. “Larger Work” + means a work that combines Covered Software with other material, + in a separate file or files, that is not Covered Software. + + 1.8. “License” + means this document. + + 1.9. “Licensable” + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, + any and all of the rights conveyed by this License. + + 1.10. “Modifications” + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + + 1.11. “Patent Claims” of a Contributor + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + + 1.12. “Secondary License” + means either the GNU General Public License, Version 2.0, the + GNU Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those licenses. + + 1.13. “Source Code Form” + means the form of the work preferred for making modifications. + + 1.14. “You” (or “Your”) + means an individual or a legal entity exercising rights under this License. + For legal entities, “You” includes any entity that controls, + is controlled by, or is under common control with You. For purposes of + this definition, “control” means (a) the power, direct or indirect, + to cause the direction or management of such entity, whether by contract + or otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + +2. License Grants and Conditions + + 2.1. Grants + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, + or as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, + offer for sale, have made, import, and otherwise transfer either + its Contributions or its Contributor Version. + + 2.2. Effective Date + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor + first distributes such Contribution. + + 2.3. Limitations on Grant Scope + The licenses granted in this Section 2 are the only rights granted + under this License. No additional rights or licenses will be implied + from the distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted + by a Contributor: + + a. for any code that a Contributor has removed from + Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its + Contributor Version); or + + c. under Patent Claims infringed by Covered Software in the + absence of its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + + 2.4. Subsequent Licenses + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License + (if permitted under the terms of Section 3.3). + + 2.5. Representation + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights + to grant the rights to its Contributions conveyed by this License. + + 2.6. Fair Use + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, + or other equivalents. + + 2.7. Conditions + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the + licenses granted in Section 2.1. + +3. Responsibilities + + 3.1. Distribution of Source Form + All distribution of Covered Software in Source Code Form, including + any Modifications that You create or to which You contribute, must be + under the terms of this License. You must inform recipients that the + Source Code Form of the Covered Software is governed by the terms + of this License, and how they can obtain a copy of this License. + You may not attempt to alter or restrict the recipients’ rights + in the Source Code Form. + + 3.2. Distribution of Executable Form + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more than + the cost of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients’ rights in the Source Code Form under this License. + + 3.3. Distribution of a Larger Work + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of + Covered Software with a work governed by one or more Secondary Licenses, + and the Covered Software is not Incompatible With Secondary Licenses, + this License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the + Covered Software under the terms of either this License or such + Secondary License(s). + + 3.4. Notices + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, + or limitations of liability) contained within the Source Code Form of + the Covered Software, except that You may alter any license notices to + the extent required to remedy known factual inaccuracies. + + 3.5. Application of Additional Terms + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of + Covered Software. However, You may do so only on Your own behalf, + and not on behalf of any Contributor. You must make it absolutely clear + that any such warranty, support, indemnity, or liability obligation is + offered by You alone, and You hereby agree to indemnify every Contributor + for any liability incurred by such Contributor as a result of warranty, + support, indemnity or liability terms You offer. You may include + additional disclaimers of warranty and limitations of liability + specific to any jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + +If it is impossible for You to comply with any of the terms of this License +with respect to some or all of the Covered Software due to statute, +judicial order, or regulation then You must: (a) comply with the terms of +this License to the maximum extent possible; and (b) describe the limitations +and the code they affect. Such description must be placed in a text file +included with all distributions of the Covered Software under this License. +Except to the extent prohibited by statute or regulation, such description +must be sufficiently detailed for a recipient of ordinary skill +to be able to understand it. + +5. Termination + + 5.1. The rights granted under this License will terminate automatically + if You fail to comply with any of its terms. However, if You become + compliant, then the rights granted under this License from a particular + Contributor are reinstated (a) provisionally, unless and until such + Contributor explicitly and finally terminates Your grants, and (b) on an + ongoing basis, if such Contributor fails to notify You of the + non-compliance by some reasonable means prior to 60 days after You have + come back into compliance. Moreover, Your grants from a particular + Contributor are reinstated on an ongoing basis if such Contributor + notifies You of the non-compliance by some reasonable means, + this is the first time You have received notice of non-compliance with + this License from such Contributor, and You become compliant prior to + 30 days after Your receipt of the notice. + + 5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted + to You by any and all Contributors for the Covered Software under + Section 2.1 of this License shall terminate. + + 5.3. In the event of termination under Sections 5.1 or 5.2 above, all + end user license agreements (excluding distributors and resellers) which + have been validly granted by You or Your distributors under this License + prior to termination shall survive termination. + +6. Disclaimer of Warranty + +Covered Software is provided under this License on an “as is” basis, without +warranty of any kind, either expressed, implied, or statutory, including, +without limitation, warranties that the Covered Software is free of defects, +merchantable, fit for a particular purpose or non-infringing. The entire risk +as to the quality and performance of the Covered Software is with You. +Should any Covered Software prove defective in any respect, You +(not any Contributor) assume the cost of any necessary servicing, repair, +or correction. This disclaimer of warranty constitutes an essential part of +this License. No use of any Covered Software is authorized under this +License except under this disclaimer. + +7. Limitation of Liability + +Under no circumstances and under no legal theory, whether tort +(including negligence), contract, or otherwise, shall any Contributor, or +anyone who distributes Covered Software as permitted above, be liable to +You for any direct, indirect, special, incidental, or consequential damages +of any character including, without limitation, damages for lost profits, +loss of goodwill, work stoppage, computer failure or malfunction, or any and +all other commercial damages or losses, even if such party shall have been +informed of the possibility of such damages. This limitation of liability +shall not apply to liability for death or personal injury resulting from +such party’s negligence to the extent applicable law prohibits such +limitation. Some jurisdictions do not allow the exclusion or limitation of +incidental or consequential damages, so this exclusion and limitation may +not apply to You. + +8. Litigation + +Any litigation relating to this License may be brought only in the courts of +a jurisdiction where the defendant maintains its principal place of business +and such litigation shall be governed by laws of that jurisdiction, without +reference to its conflict-of-law provisions. Nothing in this Section shall +prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + +This License represents the complete agreement concerning the subject matter +hereof. If any provision of this License is held to be unenforceable, +such provision shall be reformed only to the extent necessary to make it +enforceable. Any law or regulation which provides that the language of a +contract shall be construed against the drafter shall not be used to construe +this License against a Contributor. + +10. Versions of the License + + 10.1. New Versions + Mozilla Foundation is the license steward. Except as provided in + Section 10.3, no one other than the license steward has the right to + modify or publish new versions of this License. Each version will be + given a distinguishing version number. + + 10.2. Effect of New Versions + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published + by the license steward. + + 10.3. Modified Versions + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + + 10.4. Distributing Source Code Form that is + Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this + License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the terms of the + Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed + with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to +look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible With Secondary Licenses”, + as defined by the Mozilla Public License, v. 2.0. + +------------------------------------------------------------------------------- + +Eclipse Public License, Version 1.0 (EPL-1.0) + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC +LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM +CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial code and + documentation distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are +distributed by that particular Contributor. A Contribution 'originates' +from a Contributor if it was added to the Program by such Contributor itself +or anyone acting on such Contributor's behalf. Contributions do not include +additions to the Program which: (i) are separate modules of software +distributed in conjunction with the Program under their own license agreement, +and (ii) are not derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents " mean patent claims licensable by a Contributor which are +necessarily infringed by the use or sale of its Contribution alone or +when combined with the Program. + +"Program" means the Contributions distributed in accordance with +this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, +including all Contributors. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free copyright license to + reproduce, prepare derivative works of, publicly display, publicly + perform, distribute and sublicense the Contribution of such + Contributor, if any, and such derivative works, + in source code and object code form. + + b) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free patent license under + Licensed Patents to make, use, sell, offer to sell, import and + otherwise transfer the Contribution of such Contributor, if any, + in source code and object code form. This patent license shall apply + to the combination of the Contribution and the Program if, at the time + the Contribution is added by the Contributor, such addition of the + Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. + No hardware per se is licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby assumes + sole responsibility to secure any other intellectual property rights + needed, if any. For example, if a third party patent license is + required to allow Recipient to distribute the Program, it is + Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has sufficient + copyright rights in its Contribution, if any, to grant the copyright + license set forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under +its own license agreement, provided that: + + a) it complies with the terms and conditions of this Agreement; and + + b) its license agreement: + + i) effectively disclaims on behalf of all Contributors all warranties + and conditions, express and implied, including warranties or + conditions of title and non-infringement, and implied warranties or + conditions of merchantability and fitness for a particular purpose; + + ii) effectively excludes on behalf of all Contributors all liability + for damages, including direct, indirect, special, incidental and + consequential damages, such as lost profits; + + iii) states that any provisions which differ from this Agreement are + offered by that Contributor alone and not by any other party; and + + iv) states that source code for the Program is available from such + Contributor, and informs licensees how to obtain it in a reasonable + manner on or through a medium customarily used for software exchange. + +When the Program is made available in source code form: + + a) it must be made available under this Agreement; and + b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained +within the Program. + +Each Contributor must identify itself as the originator of its Contribution, +if any, in a manner that reasonably allows subsequent Recipients to +identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with +respect to end users, business partners and the like. While this license is +intended to facilitate the commercial use of the Program, the Contributor who +includes the Program in a commercial product offering should do so in a manner +which does not create potential liability for other Contributors. Therefore, +if a Contributor includes the Program in a commercial product offering, +such Contributor ("Commercial Contributor") hereby agrees to defend and +indemnify every other Contributor ("Indemnified Contributor") against any +losses, damages and costs (collectively "Losses") arising from claims, +lawsuits and other legal actions brought by a third party against the +Indemnified Contributor to the extent caused by the acts or omissions of +such Commercial Contributor in connection with its distribution of the Program +in a commercial product offering. The obligations in this section do not apply +to any claims or Losses relating to any actual or alleged intellectual +property infringement. In order to qualify, an Indemnified Contributor must: +a) promptly notify the Commercial Contributor in writing of such claim, +and b) allow the Commercial Contributor to control, and cooperate with the +Commercial Contributor in, the defense and any related settlement +negotiations. The Indemnified Contributor may participate in any such +claim at its own expense. + +For example, a Contributor might include the Program in a commercial product +offering, Product X. That Contributor is then a Commercial Contributor. +If that Commercial Contributor then makes performance claims, or offers +warranties related to Product X, those performance claims and warranties +are such Commercial Contributor's responsibility alone. Under this section, +the Commercial Contributor would have to defend claims against the other +Contributors related to those performance claims and warranties, and if a +court requires any other Contributor to pay any damages as a result, +the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. +Each Recipient is solely responsible for determining the appropriateness of +using and distributing the Program and assumes all risks associated with its +exercise of rights under this Agreement , including but not limited to the +risks and costs of program errors, compliance with applicable laws, damage to +or loss of data, programs or equipment, and unavailability +or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY +CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION +LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE +EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under +applicable law, it shall not affect the validity or enforceability of the +remainder of the terms of this Agreement, and without further action by +the parties hereto, such provision shall be reformed to the minimum extent +necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Program itself +(excluding combinations of the Program with other software or hardware) +infringes such Recipient's patent(s), then such Recipient's rights granted +under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to +comply with any of the material terms or conditions of this Agreement and +does not cure such failure in a reasonable period of time after becoming +aware of such noncompliance. If all Recipient's rights under this +Agreement terminate, Recipient agrees to cease use and distribution of the +Program as soon as reasonably practicable. However, Recipient's obligations +under this Agreement and any licenses granted by Recipient relating to the +Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, +but in order to avoid inconsistency the Agreement is copyrighted and may +only be modified in the following manner. The Agreement Steward reserves +the right to publish new versions (including revisions) of this Agreement +from time to time. No one other than the Agreement Steward has the right to +modify this Agreement. The Eclipse Foundation is the initial +Agreement Steward. The Eclipse Foundation may assign the responsibility to +serve as the Agreement Steward to a suitable separate entity. Each new version +of the Agreement will be given a distinguishing version number. The Program +(including Contributions) may always be distributed subject to the version +of the Agreement under which it was received. In addition, after a new version +of the Agreement is published, Contributor may elect to distribute the Program +(including its Contributions) under the new version. Except as expressly +stated in Sections 2(a) and 2(b) above, Recipient receives no rights or +licenses to the intellectual property of any Contributor under this Agreement, +whether expressly, by implication, estoppel or otherwise. All rights in the +Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the +intellectual property laws of the United States of America. No party to +this Agreement will bring a legal action under this Agreement more than one +year after the cause of action arose. Each party waives its rights to a +jury trial in any resulting litigation. diff --git a/README.md b/README.md new file mode 100644 index 0000000000..70de378686 --- /dev/null +++ b/README.md @@ -0,0 +1,40 @@ +[![CI](h2/src/docsrc/images/h2-logo-2.png)](https://github.com/h2database/h2database/actions?query=workflow%3ACI) +# Welcome to H2, the Java SQL database. + +## The main features of H2 are: + +* Very fast, open source, JDBC API +* Embedded and server modes; disk-based or in-memory databases +* Transaction support, multi-version concurrency +* Browser based Console application +* Encrypted databases +* Fulltext search +* Pure Java with small footprint: around 2.5 MB jar file size +* ODBC driver + +More information: https://h2database.com + +## Downloads + +[Download latest version](https://h2database.com/html/download.html) or add to `pom.xml`: + +```XML + + com.h2database + h2 + 2.1.210 + +``` + +## Documentation + +* [Tutorial](https://h2database.com/html/tutorial.html) +* [SQL commands](https://h2database.com/html/commands.html) +* [Functions](https://h2database.com/html/functions.html), [aggregate functions](https://h2database.com/html/functions-aggregate.html), [window functions](https://h2database.com/html/functions-window.html) +* [Data types](https://h2database.com/html/datatypes.html) + +## Support + +* [Issue tracker](https://github.com/h2database/h2database/issues) for bug reports and feature requests +* [Mailing list / forum](https://groups.google.com/g/h2-database) for questions about H2 +* ['h2' tag on Stack Overflow](https://stackoverflow.com/questions/tagged/h2) for other questions (Hibernate with H2 etc.) diff --git a/h2/.gitattributes b/h2/.gitattributes new file mode 100644 index 0000000000..76f0286ead --- /dev/null +++ b/h2/.gitattributes @@ -0,0 +1,3 @@ +*.bat eol=crlf +*.sh eol=lf +*.java diff=java diff --git a/h2/.gitignore b/h2/.gitignore new file mode 100644 index 0000000000..b90461133b --- /dev/null +++ b/h2/.gitignore @@ -0,0 +1,17 @@ +.checkstyle +.classpath +.project +.settings +benchmark.html +bin +coverage +data +docs +ext +error.* +temp +test.out.txt +.idea/ +*.log +target/ +_tmp* diff --git a/h2/.mvn/wrapper/maven-wrapper.jar b/h2/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000..c6feb8bb6f Binary files /dev/null and b/h2/.mvn/wrapper/maven-wrapper.jar differ diff --git a/h2/.mvn/wrapper/maven-wrapper.properties b/h2/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000..c9023edfe7 --- /dev/null +++ b/h2/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1 @@ +distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip \ No newline at end of file diff --git a/h2/MAVEN.md b/h2/MAVEN.md new file mode 100644 index 0000000000..427fa8a622 --- /dev/null +++ b/h2/MAVEN.md @@ -0,0 +1,63 @@ +# H2 + +Welcome to H2, the Java SQL database. The main features of H2 are: + +* Very fast, open source, JDBC API +* Embedded and server modes; in-memory databases +* Browser based Console application +* Small footprint: around 2.5 MB jar file size + +## Experimental Building & Testing with Maven + +### Preparation + +Use non-Maven build to create all necessary resources: + +```Batchfile +./build.cmd compile +``` + +or + +```sh +./build.sh compile +``` + +### Building + +To build only the database jar use + +```sh +mvn -Dmaven.test.skip=true package +``` + +If you don't have Maven installed use included [Maven Wrapper](https://github.com/takari/maven-wrapper) setup: + +```sh +./mvnw -Dmaven.test.skip=true package +``` + +or + +```Batchfile +./mvnw.cmd -Dmaven.test.skip=true package +``` + +Please note that jar generated with Maven is larger than official one and it does not include OSGi attributes. +Use build script with `jar` target instead if you need a compatible jar. + +### Testing + +To run the tests use + +```sh +mvn clean test +``` + +### Running + +You can run the server like this + +```sh +mvn exec:java -Dexec.mainClass=org.h2.tools.Server +``` diff --git a/h2/build.sh b/h2/build.sh index 1c181244e8..558a7945ab 100755 --- a/h2/build.sh +++ b/h2/build.sh @@ -1,13 +1,18 @@ #!/bin/sh if [ -z "$JAVA_HOME" ] ; then - if [ -d "/System/Library/Frameworks/JavaVM.framework/Home" ] ; then - export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Home - else - echo "Error: JAVA_HOME is not defined." + if [[ "$OSTYPE" == "darwin"* ]]; then + if [ -d "/System/Library/Frameworks/JavaVM.framework/Home" ] ; then + export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Home + else + export JAVA_HOME=`/usr/libexec/java_home` + fi fi fi +if [ -z "$JAVA_HOME" ] ; then + echo "Error: JAVA_HOME is not defined." +fi if [ "$1" = "clean" ] ; then rm -rf temp bin ; fi if [ ! -d "temp" ] ; then mkdir temp ; fi if [ ! -d "bin" ] ; then mkdir bin ; fi "$JAVA_HOME/bin/javac" -sourcepath src/tools -d bin src/tools/org/h2/build/*.java -"$JAVA_HOME/bin/java" -Xmx256m -cp "bin:$JAVA_HOME/lib/tools.jar:temp" org.h2.build.Build $@ +"$JAVA_HOME/bin/java" -Xmx512m -cp "bin:$JAVA_HOME/lib/tools.jar:temp" org.h2.build.Build $@ diff --git a/h2/mvnw b/h2/mvnw new file mode 100755 index 0000000000..a2c52ca653 --- /dev/null +++ b/h2/mvnw @@ -0,0 +1,235 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # + # Look for the Apple JDKs first to preserve the existing behaviour, and then look + # for the new JDKs provided by Oracle. + # + if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then + # + # Apple JDKs + # + export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home + fi + + if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then + # + # Apple JDKs + # + export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home + fi + + if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then + # + # Oracle JDKs + # + export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home + fi + + if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then + # + # Apple JDKs + # + export JAVA_HOME=`/usr/libexec/java_home` + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Migwn, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" + # TODO classpath? +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` +fi + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + local basedir=$(pwd) + local wdir=$(pwd) + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + wdir=$(cd "$wdir/.."; pwd) + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)} +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -Djava.net.useSystemProxies=true \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CMD_LINE_ARGS + diff --git a/h2/mvnw.cmd b/h2/mvnw.cmd new file mode 100644 index 0000000000..49520334c4 --- /dev/null +++ b/h2/mvnw.cmd @@ -0,0 +1,145 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +set MAVEN_CMD_LINE_ARGS=%MAVEN_CONFIG% %* + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" + +set WRAPPER_JAR=""%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -Djava.net.useSystemProxies=true -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CMD_LINE_ARGS% +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/h2/pom.xml b/h2/pom.xml new file mode 100644 index 0000000000..a0c0085569 --- /dev/null +++ b/h2/pom.xml @@ -0,0 +1,272 @@ + + 4.0.0 + + com.h2database + h2 + 2.1.210 + jar + H2 Database Engine + https://h2database.com + H2 Database Engine + + + + MPL 2.0 + https://www.mozilla.org/en-US/MPL/2.0/ + repo + + + EPL 1.0 + https://opensource.org/licenses/eclipse-1.0.php + repo + + + + + scm:git:https://github.com/h2database/h2database + https://github.com/h2database/h2database + + + + + thomas.tom.mueller + Thomas Mueller + thomas.tom.mueller at gmail dot com + + + + + 1.8 + 1.8 + 8.0.1 + 1.17.0 + 5.6.2 + 8.5.2 + 5.0.0 + 42.2.14 + 4.0.1 + 5.0.0 + 1.7.30 + UTF-8 + + + + + + + javax.servlet + javax.servlet-api + ${javax.servlet.version} + + + jakarta.servlet + jakarta.servlet-api + ${jakarta.servlet.version} + + + org.apache.lucene + lucene-core + ${lucene.version} + + + org.apache.lucene + lucene-analyzers-common + ${lucene.version} + + + org.apache.lucene + lucene-queryparser + ${lucene.version} + + + org.slf4j + slf4j-api + ${slf4j.version} + + + org.osgi + org.osgi.core + ${osgi.version} + + + org.osgi + org.osgi.enterprise + ${osgi.version} + + + org.locationtech.jts + jts-core + ${jts.version} + + + + + + + org.slf4j + slf4j-nop + ${slf4j.version} + test + + + org.postgresql + postgresql + ${pgjdbc.version} + test + + + org.junit.jupiter + junit-jupiter-engine + ${junit.version} + test + + + org.ow2.asm + asm + ${asm.version} + test + + + + + + + + jigsaw-jdk + + [1.9,) + + + + + default-tools.jar + + + ${java.home}/../lib/tools.jar + + + + + com.sun + tools + system + 1.8 + ${java.home}/../lib/tools.jar + + + + + default-tools.jar-mac + + + ${java.home}/../Classes/classes.jar + + + + + com.sun + tools + system + 1.8 + ${java.home}/../Classes/classes.jar + + + + + + + src/main + src/test + + + + src/main + + **/*.prop + **/*.png + **/*.jsp + **/*.ico + **/*.gif + **/*.css + **/*.js + org/h2/res/help.csv + org/h2/res/javadoc.properties + META-INF/** + + + + src/java9/precompiled + META-INF/versions/9 + + + src/java10/precompiled + META-INF/versions/10 + + + + + src/test + + org/h2/test/bench/test.properties + org/h2/test/script/testScrip.sql + org/h2/test/scripts/**/*.sql + org/h2/samples/newsfeed.sql + org/h2/samples/optimizations.sql + + + + + + org.apache.maven.plugins + maven-jar-plugin + 3.1.2 + + + + true + org.h2.tools.Console + + + com.h2database + true + org.h2.util.Profiler + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.0.0 + + + generate-test-sources + + add-test-source + + + + src/tools + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.22.2 + + + TestAllJunit.java + + + + + + + + diff --git a/h2/service/0_run_server_debug.bat b/h2/service/0_run_server_debug.bat index a5f52155e5..e7e7d2439f 100644 --- a/h2/service/0_run_server_debug.bat +++ b/h2/service/0_run_server_debug.bat @@ -1,68 +1,68 @@ -@echo off -setlocal -pushd "%~dp0" - -copy /y /b ..\bin\h2-*.jar ..\bin\h2.jar -fc /b ..\bin\h2-*.jar ..\bin\h2.jar -if not errorlevel 1 goto :start -echo Please ensure there is only one h2-*.jar file. -echo Process stopped -pause -goto :end - -:start -rem Copyright (c) 1999, 2006 Tanuki Software Inc. -rem -rem Java Service Wrapper general startup script -rem - -rem -rem Resolve the real path of the wrapper.exe -rem For non NT systems, the _REALPATH and _WRAPPER_CONF values -rem can be hard-coded below and the following test removed. -rem -if "%OS%"=="Windows_NT" goto nt -echo This script only works with NT-based versions of Windows. -goto :end - -:nt -rem -rem Find the application home. -rem -rem %~dp0 is location of current script under NT -set _REALPATH=%~dp0 - -rem Decide on the wrapper binary. -set _WRAPPER_BASE=wrapper -set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe -if exist "%_WRAPPER_EXE%" goto conf -set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe -if exist "%_WRAPPER_EXE%" goto conf -set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%.exe -if exist "%_WRAPPER_EXE%" goto conf -echo Unable to locate a Wrapper executable using any of the following names: -echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe -echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe -echo %_REALPATH%%_WRAPPER_BASE%.exe -pause -goto :end - -:conf -rem -rem Find the wrapper.conf -rem -set _WRAPPER_CONF="%~f1" -if not %_WRAPPER_CONF%=="" goto startup -set _WRAPPER_CONF="%_REALPATH%wrapper.conf" - -:startup -rem -rem Start the Wrapper -rem -"%_WRAPPER_EXE%" -c %_WRAPPER_CONF% -if not errorlevel 1 goto :end -pause - -:end -popd - +@echo off +setlocal +pushd "%~dp0" + +copy /y /b ..\bin\h2-*.jar ..\bin\h2.jar +fc /b ..\bin\h2-*.jar ..\bin\h2.jar +if not errorlevel 1 goto :start +echo Please ensure there is only one h2-*.jar file. +echo Process stopped +pause +goto :end + +:start +rem Copyright (c) 1999, 2006 Tanuki Software Inc. +rem +rem Java Service Wrapper general startup script +rem + +rem +rem Resolve the real path of the wrapper.exe +rem For non NT systems, the _REALPATH and _WRAPPER_CONF values +rem can be hard-coded below and the following test removed. +rem +if "%OS%"=="Windows_NT" goto nt +echo This script only works with NT-based versions of Windows. +goto :end + +:nt +rem +rem Find the application home. +rem +rem %~dp0 is location of current script under NT +set _REALPATH=%~dp0 + +rem Decide on the wrapper binary. +set _WRAPPER_BASE=wrapper +set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe +if exist "%_WRAPPER_EXE%" goto conf +set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe +if exist "%_WRAPPER_EXE%" goto conf +set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%.exe +if exist "%_WRAPPER_EXE%" goto conf +echo Unable to locate a Wrapper executable using any of the following names: +echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe +echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe +echo %_REALPATH%%_WRAPPER_BASE%.exe +pause +goto :end + +:conf +rem +rem Find the wrapper.conf +rem +set _WRAPPER_CONF="%~f1" +if not %_WRAPPER_CONF%=="" goto startup +set _WRAPPER_CONF="%_REALPATH%wrapper.conf" + +:startup +rem +rem Start the Wrapper +rem +"%_WRAPPER_EXE%" -c %_WRAPPER_CONF% +if not errorlevel 1 goto :end +pause + +:end +popd + diff --git a/h2/service/1_install_service.bat b/h2/service/1_install_service.bat index 9bc64e9e18..d48a5be9aa 100644 --- a/h2/service/1_install_service.bat +++ b/h2/service/1_install_service.bat @@ -1,61 +1,61 @@ -@echo off -setlocal -pushd "%~dp0" - -copy /y /b ..\bin\h2-*.jar ..\bin\h2.jar -fc /b ..\bin\h2-*.jar ..\bin\h2.jar -if not errorlevel 1 goto :start -echo Please ensure there is only one h2-*.jar file. -echo Process stopped -pause -goto :end - -:start -rem Copyright (c) 1999, 2006 Tanuki Software Inc. -rem -rem Java Service Wrapper general NT service install script -rem -if "%OS%"=="Windows_NT" goto nt -echo This script only works with NT-based versions of Windows. -goto :end - -:nt -rem -rem Find the application home. -rem -rem %~dp0 is location of current script under NT -set _REALPATH=%~dp0 - -rem Decide on the wrapper binary. -set _WRAPPER_BASE=wrapper -set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe -if exist "%_WRAPPER_EXE%" goto conf -set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe -if exist "%_WRAPPER_EXE%" goto conf -set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%.exe -if exist "%_WRAPPER_EXE%" goto conf -echo Unable to locate a Wrapper executable using any of the following names: -echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe -echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe -echo %_REALPATH%%_WRAPPER_BASE%.exe -pause -goto :end - -:conf -rem -rem Find the wrapper.conf -rem -set _WRAPPER_CONF="%~f1" -if not %_WRAPPER_CONF%=="" goto startup -set _WRAPPER_CONF="%_REALPATH%wrapper.conf" - -:startup -rem -rem Install the Wrapper as an NT service. -rem -"%_WRAPPER_EXE%" -i %_WRAPPER_CONF% -if not errorlevel 1 goto :end -pause - -:end +@echo off +setlocal +pushd "%~dp0" + +copy /y /b ..\bin\h2-*.jar ..\bin\h2.jar +fc /b ..\bin\h2-*.jar ..\bin\h2.jar +if not errorlevel 1 goto :start +echo Please ensure there is only one h2-*.jar file. +echo Process stopped +pause +goto :end + +:start +rem Copyright (c) 1999, 2006 Tanuki Software Inc. +rem +rem Java Service Wrapper general NT service install script +rem +if "%OS%"=="Windows_NT" goto nt +echo This script only works with NT-based versions of Windows. +goto :end + +:nt +rem +rem Find the application home. +rem +rem %~dp0 is location of current script under NT +set _REALPATH=%~dp0 + +rem Decide on the wrapper binary. +set _WRAPPER_BASE=wrapper +set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe +if exist "%_WRAPPER_EXE%" goto conf +set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe +if exist "%_WRAPPER_EXE%" goto conf +set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%.exe +if exist "%_WRAPPER_EXE%" goto conf +echo Unable to locate a Wrapper executable using any of the following names: +echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe +echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe +echo %_REALPATH%%_WRAPPER_BASE%.exe +pause +goto :end + +:conf +rem +rem Find the wrapper.conf +rem +set _WRAPPER_CONF="%~f1" +if not %_WRAPPER_CONF%=="" goto startup +set _WRAPPER_CONF="%_REALPATH%wrapper.conf" + +:startup +rem +rem Install the Wrapper as an NT service. +rem +"%_WRAPPER_EXE%" -i %_WRAPPER_CONF% +if not errorlevel 1 goto :end +pause + +:end popd \ No newline at end of file diff --git a/h2/service/2_start_service.bat b/h2/service/2_start_service.bat index 8f0586cd8e..b5231775ac 100644 --- a/h2/service/2_start_service.bat +++ b/h2/service/2_start_service.bat @@ -1,3 +1,3 @@ -sc start "H2DatabaseService" -if not errorlevel 1 goto :eof +sc start "H2DatabaseService" +if not errorlevel 1 goto :eof pause \ No newline at end of file diff --git a/h2/service/3_start_browser.bat b/h2/service/3_start_browser.bat index 270a2bb76c..9ad8170d56 100644 --- a/h2/service/3_start_browser.bat +++ b/h2/service/3_start_browser.bat @@ -1 +1 @@ -start http://localhost:8082 +start http://localhost:8082 diff --git a/h2/service/5_uninstall_service.bat b/h2/service/5_uninstall_service.bat index 2d74486b8e..c5a70a52e2 100644 --- a/h2/service/5_uninstall_service.bat +++ b/h2/service/5_uninstall_service.bat @@ -1,53 +1,53 @@ -@echo off -setlocal -pushd "%~dp0" - -rem Copyright (c) 1999, 2006 Tanuki Software Inc. -rem -rem Java Service Wrapper general NT service uninstall script -rem - -if "%OS%"=="Windows_NT" goto nt -echo This script only works with NT-based versions of Windows. -goto :end - -:nt -rem -rem Find the application home. -rem -rem %~dp0 is location of current script under NT -set _REALPATH=%~dp0 - -rem Decide on the wrapper binary. -set _WRAPPER_BASE=wrapper -set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe -if exist "%_WRAPPER_EXE%" goto conf -set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe -if exist "%_WRAPPER_EXE%" goto conf -set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%.exe -if exist "%_WRAPPER_EXE%" goto conf -echo Unable to locate a Wrapper executable using any of the following names: -echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe -echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe -echo %_REALPATH%%_WRAPPER_BASE%.exe -pause -goto :end - -:conf -rem -rem Find the wrapper.conf -rem -set _WRAPPER_CONF="%~f1" -if not %_WRAPPER_CONF%=="" goto startup -set _WRAPPER_CONF="%_REALPATH%wrapper.conf" - -:startup -rem -rem Uninstall the Wrapper as an NT service. -rem -"%_WRAPPER_EXE%" -r %_WRAPPER_CONF% -if not errorlevel 1 goto :end -pause - -:end -popd +@echo off +setlocal +pushd "%~dp0" + +rem Copyright (c) 1999, 2006 Tanuki Software Inc. +rem +rem Java Service Wrapper general NT service uninstall script +rem + +if "%OS%"=="Windows_NT" goto nt +echo This script only works with NT-based versions of Windows. +goto :end + +:nt +rem +rem Find the application home. +rem +rem %~dp0 is location of current script under NT +set _REALPATH=%~dp0 + +rem Decide on the wrapper binary. +set _WRAPPER_BASE=wrapper +set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe +if exist "%_WRAPPER_EXE%" goto conf +set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe +if exist "%_WRAPPER_EXE%" goto conf +set _WRAPPER_EXE=%_REALPATH%%_WRAPPER_BASE%.exe +if exist "%_WRAPPER_EXE%" goto conf +echo Unable to locate a Wrapper executable using any of the following names: +echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-32.exe +echo %_REALPATH%%_WRAPPER_BASE%-windows-x86-64.exe +echo %_REALPATH%%_WRAPPER_BASE%.exe +pause +goto :end + +:conf +rem +rem Find the wrapper.conf +rem +set _WRAPPER_CONF="%~f1" +if not %_WRAPPER_CONF%=="" goto startup +set _WRAPPER_CONF="%_REALPATH%wrapper.conf" + +:startup +rem +rem Uninstall the Wrapper as an NT service. +rem +"%_WRAPPER_EXE%" -r %_WRAPPER_CONF% +if not errorlevel 1 goto :end +pause + +:end +popd diff --git a/h2/src/docsrc/help/help.csv b/h2/src/docsrc/help/help.csv deleted file mode 100644 index ccdfbc265c..0000000000 --- a/h2/src/docsrc/help/help.csv +++ /dev/null @@ -1,4051 +0,0 @@ -# Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, -# and the EPL 1.0 (http://h2database.com/html/license.html). -# Initial Developer: H2 Group -"SECTION","TOPIC","SYNTAX","TEXT","EXAMPLE" -"Commands (DML)","SELECT"," -SELECT [ TOP term ] [ DISTINCT | ALL ] selectExpression [,...] -FROM tableExpression [,...] [ WHERE expression ] -[ GROUP BY expression [,...] ] [ HAVING expression ] -[ { UNION [ ALL ] | MINUS | EXCEPT | INTERSECT } select ] [ ORDER BY order [,...] ] -[ LIMIT expression [ OFFSET expression ] [ SAMPLE_SIZE rowCountInt ] ] -[ FOR UPDATE ] -"," -Selects data from a table or multiple tables. -GROUP BY groups the the result by the given expression(s). -HAVING filter rows after grouping. -ORDER BY sorts the result by the given column(s) or expression(s). -UNION combines the result of this query with the results of another query. - -LIMIT limits the number of rows returned by the query (no limit if null or smaller than zero). -OFFSET specified how many rows to skip. -SAMPLE_SIZE limits the number of rows read for aggregate queries. - -Multiple set operators (UNION, INTERSECT, MINUS, EXPECT) are evaluated -from left to right. For compatibility with other databases and future versions -of H2 please use parentheses. - -If FOR UPDATE is specified, the tables are locked for writing. When using -MVCC, only the selected rows are locked as in an UPDATE statement. -In this case, aggregate, GROUP BY, DISTINCT queries or joins -are not allowed in this case. -"," -SELECT * FROM TEST; -SELECT * FROM TEST ORDER BY NAME; -SELECT ID, COUNT(*) FROM TEST GROUP BY ID; -SELECT NAME, COUNT(*) FROM TEST GROUP BY NAME HAVING COUNT(*) > 2; -SELECT 'ID' COL, MAX(ID) AS MAX FROM TEST UNION SELECT 'NAME', MAX(NAME) FROM TEST; -SELECT * FROM TEST LIMIT 1000; -SELECT * FROM (SELECT ID, COUNT(*) FROM TEST - GROUP BY ID UNION SELECT NULL, COUNT(*) FROM TEST) - ORDER BY 1 NULLS LAST; -" - -"Commands (DML)","INSERT"," -INSERT INTO tableName -{ [ ( columnName [,...] ) ] - { VALUES { ( { DEFAULT | expression } [,...] ) } [,...] | [ DIRECT ] [ SORTED ] select } } | - { SET { columnName = { DEFAULT | expression } } [,...] } -"," -Inserts a new row / new rows into a table. - -When using DIRECT, then the results from the query are directly applied in the target table without any intermediate step. - -When using SORTED, b-tree pages are split at the insertion point. This can improve performance and reduce disk usage. -"," -INSERT INTO TEST VALUES(1, 'Hello') -" - -"Commands (DML)","UPDATE"," -UPDATE tableName [ [ AS ] newTableAlias ] SET -{ { columnName = { DEFAULT | expression } } [,...] } | - { ( columnName [,...] ) = ( select ) } -[ WHERE expression ] [ ORDER BY order [,...] ] [ LIMIT expression ] -"," -Updates data in a table. -ORDER BY is supported for MySQL compatibility, but it is ignored. -"," -UPDATE TEST SET NAME='Hi' WHERE ID=1; -UPDATE PERSON P SET NAME=(SELECT A.NAME FROM ADDRESS A WHERE A.ID=P.ID); -" - -"Commands (DML)","DELETE"," -DELETE [ TOP term ] FROM tableName [ WHERE expression ] [ LIMIT term ] -"," -Deletes rows form a table. -If TOP or LIMIT is specified, at most the specified number of rows are deleted (no limit if null or smaller than zero). -"," -DELETE FROM TEST WHERE ID=2 -" - -"Commands (DML)","BACKUP"," -BACKUP TO fileNameString -"," -Backs up the database files to a .zip file. Objects are not locked, but -the backup is transactionally consistent because the transaction log is also copied. -Admin rights are required to execute this command. -"," -BACKUP TO 'backup.zip' -" - -"Commands (DML)","CALL"," -CALL expression -"," -Calculates a simple expression. This statement returns a result set with one row, -except if the called function returns a result set itself. -If the called function returns an array, then each element in this array is returned as a column. -"," -CALL 15*25 -" - -"Commands (DML)","EXPLAIN"," -EXPLAIN { [ PLAN FOR ] | ANALYZE } { select | insert | update | delete | merge } -"," -Shows the execution plan for a statement. -When using EXPLAIN ANALYZE, the statement is actually executed, and the query plan -will include the actual row scan count for each table. -"," -EXPLAIN SELECT * FROM TEST WHERE ID=1 -" - -"Commands (DML)","MERGE"," -MERGE INTO tableName [ ( columnName [,...] ) ] -[ KEY ( columnName [,...] ) ] -{ VALUES { ( { DEFAULT | expression } [,...] ) } [,...] | select } -"," -Updates existing rows, and insert rows that don't exist. If no key column is -specified, the primary key columns are used to find the row. If more than one -row per new row is affected, an exception is thrown. If the table contains an -auto-incremented key or identity column, and the row was updated, the generated -key is set to 0; otherwise it is set to the new key. -"," -MERGE INTO TEST KEY(ID) VALUES(2, 'World') -" - -"Commands (DML)","RUNSCRIPT"," -RUNSCRIPT FROM fileNameString scriptCompressionEncryption -[ CHARSET charsetString ] -"," -Runs a SQL script from a file. The script is a text file containing SQL -statements; each statement must end with ';'. This command can be used to -restore a database from a backup. The password must be in single quotes; it is -case sensitive and can contain spaces. - -Instead of a file name, an URL may be used. -To read a stream from the classpath, use the prefix 'classpath:'. -See the Pluggable File System section on the Advanced page. - -The compression algorithm must match the one used when creating the script. -Instead of a file, an URL may be used. - -Admin rights are required to execute this command. -"," -RUNSCRIPT FROM 'backup.sql' -RUNSCRIPT FROM 'classpath:/com/acme/test.sql' -" - -"Commands (DML)","SCRIPT"," -SCRIPT [ SIMPLE ] [ NODATA ] [ NOPASSWORDS ] [ NOSETTINGS ] -[ DROP ] [ BLOCKSIZE blockSizeInt ] -[ TO fileNameString scriptCompressionEncryption - [ CHARSET charsetString ] ] -[ TABLE tableName [, ...] ] -[ SCHEMA schemaName [, ...] ] -"," -Creates a SQL script from the database. - -SIMPLE does not use multi-row insert statements. -NODATA will not emit INSERT statements. -If the DROP option is specified, drop statements are created for tables, views, -and sequences. If the block size is set, CLOB and BLOB values larger than this -size are split into separate blocks. -BLOCKSIZE is used when writing out LOB data, and specifies the point at the -values transition from being inserted as inline values, to be inserted using -out-of-line commands. -NOSETTINGS turns off dumping the database settings (the SET XXX commands) - -If no 'TO fileName' clause is specified, the -script is returned as a result set. This command can be used to create a backup -of the database. For long term storage, it is more portable than copying the -database files. - -If a 'TO fileName' clause is specified, then the whole -script (including insert statements) is written to this file, and a result set -without the insert statements is returned. - -The password must be in single quotes; it is case sensitive and can contain spaces. - -This command locks objects while it is running. -Admin rights are required to execute this command. - -When using the TABLE or SCHEMA option, only the selected table(s) / schema(s) are included. -"," -SCRIPT NODATA -" - -"Commands (DML)","SHOW"," -SHOW { SCHEMAS | TABLES [ FROM schemaName ] | - COLUMNS FROM tableName [ FROM schemaName ] } -"," -Lists the schemas, tables, or the columns of a table. -"," -SHOW TABLES -" - -"Commands (DDL)","ALTER INDEX RENAME"," -ALTER INDEX indexName RENAME TO newIndexName -"," -Renames an index. -This command commits an open transaction in this connection. -"," -ALTER INDEX IDXNAME RENAME TO IDX_TEST_NAME -" - -"Commands (DDL)","ALTER SCHEMA RENAME"," -ALTER SCHEMA schema RENAME TO newSchemaName -"," -Renames a schema. -This command commits an open transaction in this connection. -"," -ALTER SCHEMA TEST RENAME TO PRODUCTION -" - -"Commands (DDL)","ALTER SEQUENCE"," -ALTER SEQUENCE sequenceName [ RESTART WITH long ] [ INCREMENT BY long ] -[ MINVALUE long | NOMINVALUE | NO MINVALUE ] -[ MAXVALUE long | NOMAXVALUE | NO MAXVALUE ] -[ CYCLE long | NOCYCLE | NO CYCLE ] -[ CACHE long | NOCACHE | NO CACHE ] -"," -Changes the parameters of a sequence. -This command does not commit the current transaction; however the new value is used by other -transactions immediately, and rolling back this command has no effect. -"," -ALTER SEQUENCE SEQ_ID RESTART WITH 1000 -" - -"Commands (DDL)","ALTER TABLE ADD"," -ALTER TABLE tableName ADD [ COLUMN ] -{ [ IF NOT EXISTS ] columnDefinition [ { BEFORE | AFTER } columnName ] - | ( { columnDefinition } [,...] ) } -"," -Adds a new column to a table. -This command commits an open transaction in this connection. -"," -ALTER TABLE TEST ADD CREATEDATE TIMESTAMP -" - -"Commands (DDL)","ALTER TABLE ADD CONSTRAINT"," -ALTER TABLE tableName ADD constraint [ CHECK | NOCHECK ] -"," -Adds a constraint to a table. If NOCHECK is specified, existing rows are not -checked for consistency (the default is to check consistency for existing rows). -The required indexes are automatically created if they don't exist yet. -It is not possible to disable checking for unique constraints. -This command commits an open transaction in this connection. -"," -ALTER TABLE TEST ADD CONSTRAINT NAME_UNIQUE UNIQUE(NAME) -" - -"Commands (DDL)","ALTER TABLE ALTER COLUMN"," -ALTER TABLE tableName ALTER COLUMN columnName -{ { dataType [ DEFAULT expression ] [ [ NOT ] NULL ] [ AUTO_INCREMENT | IDENTITY ] } - | { RENAME TO name } - | { RESTART WITH long } - | { SELECTIVITY int } - | { SET DEFAULT expression } - | { SET NULL } - | { SET NOT NULL } } -"," -Changes the data type of a column, rename a column, -change the identity value, or change the selectivity. - -Changing the data type fails if the data can not be converted. - -RESTART changes the next value of an auto increment column. -The column must already be an auto increment column. -For RESTART, the same transactional rules as for ALTER SEQUENCE apply. - -SELECTIVITY sets the selectivity (1-100) for a column. -Setting the selectivity to 0 means the default value. -Selectivity is used by the cost based optimizer to calculate the estimated cost of an index. -Selectivity 100 means values are unique, 10 means every distinct value appears 10 times on average. - -SET DEFAULT changes the default value of a column. - -SET NULL sets a column to allow NULL. The row may not be part of a primary key. -Single column indexes on this column are dropped. - -SET NOT NULL sets a column to not allow NULL. Rows may not contains NULL in this column. - -This command commits an open transaction in this connection. -"," -ALTER TABLE TEST ALTER COLUMN NAME CLOB; -ALTER TABLE TEST ALTER COLUMN NAME RENAME TO TEXT; -ALTER TABLE TEST ALTER COLUMN ID RESTART WITH 10000; -ALTER TABLE TEST ALTER COLUMN NAME SELECTIVITY 100; -ALTER TABLE TEST ALTER COLUMN NAME SET DEFAULT ''; -ALTER TABLE TEST ALTER COLUMN NAME SET NOT NULL; -ALTER TABLE TEST ALTER COLUMN NAME SET NULL; -" - -"Commands (DDL)","ALTER TABLE DROP COLUMN"," -ALTER TABLE tableName DROP COLUMN [ IF EXISTS ] columnName -"," -Removes a column from a table. -This command commits an open transaction in this connection. -"," -ALTER TABLE TEST DROP COLUMN NAME -" - -"Commands (DDL)","ALTER TABLE DROP CONSTRAINT"," -ALTER TABLE tableName DROP { CONSTRAINT [ IF EXISTS ] constraintName | PRIMARY KEY } -"," -Removes a constraint or a primary key from a table. -This command commits an open transaction in this connection. -"," -ALTER TABLE TEST DROP CONSTRAINT UNIQUE_NAME -" - -"Commands (DDL)","ALTER TABLE SET"," -ALTER TABLE tableName SET REFERENTIAL_INTEGRITY - { FALSE | TRUE [ CHECK | NOCHECK ] } -"," -Disables or enables referential integrity checking for a table. This command can -be used inside a transaction. Enabling referential integrity does not check -existing data, except if CHECK is specified. Use SET REFERENTIAL_INTEGRITY to -disable it for all tables; the global flag and the flag for each table are -independent. - -This command commits an open transaction in this connection. -"," -ALTER TABLE TEST SET REFERENTIAL_INTEGRITY FALSE -" - -"Commands (DDL)","ALTER TABLE RENAME"," -ALTER TABLE tableName RENAME TO newName -"," -Renames a table. -This command commits an open transaction in this connection. -"," -ALTER TABLE TEST RENAME TO MY_DATA -" - -"Commands (DDL)","ALTER USER ADMIN"," -ALTER USER userName ADMIN { TRUE | FALSE } -"," -Switches the admin flag of a user on or off. - -Only unquoted or uppercase user names are allowed. -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -ALTER USER TOM ADMIN TRUE -" - -"Commands (DDL)","ALTER USER RENAME"," -ALTER USER userName RENAME TO newUserName -"," -Renames a user. -After renaming a user, the password becomes invalid and needs to be changed as well. - -Only unquoted or uppercase user names are allowed. -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -ALTER USER TOM RENAME TO THOMAS -" - -"Commands (DDL)","ALTER USER SET PASSWORD"," -ALTER USER userName SET { PASSWORD string | SALT bytes HASH bytes } -"," -Changes the password of a user. -Only unquoted or uppercase user names are allowed. -The password must be enclosed in single quotes. It is case sensitive -and can contain spaces. The salt and hash values are hex strings. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -ALTER USER SA SET PASSWORD 'rioyxlgt' -" - -"Commands (DDL)","ALTER VIEW"," -ALTER VIEW viewName RECOMPILE -"," -Recompiles a view after the underlying tables have been changed or created. -This command is used for views created using CREATE FORCE VIEW. -This command commits an open transaction in this connection. -"," -ALTER VIEW ADDRESS_VIEW RECOMPILE -" - -"Commands (DDL)","ANALYZE"," -ANALYZE [ SAMPLE_SIZE rowCountInt ] -"," -Updates the selectivity statistics of all tables. The selectivity is used by the -cost based optimizer to select the best index for a given query. If no sample -size is set, up to 10000 rows per table are read. The value 0 means all rows are -read. The selectivity can be set manually using ALTER TABLE ALTER COLUMN -SELECTIVITY. Manual values are overwritten by this statement. The selectivity is -available in the INFORMATION_SCHEMA.COLUMNS table. - -This command commits an open transaction in this connection. -"," -ANALYZE SAMPLE_SIZE 1000 -" - -"Commands (DDL)","COMMENT"," -COMMENT ON -{ { COLUMN [ schemaName. ] tableName.columnName } - | { { TABLE | VIEW | CONSTANT | CONSTRAINT | ALIAS | INDEX | ROLE - | SCHEMA | SEQUENCE | TRIGGER | USER | DOMAIN } [ schemaName. ] objectName } } -IS expression -"," -Sets the comment of a database object. Use NULL to remove the comment. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -COMMENT ON TABLE TEST IS 'Table used for testing' -" - -"Commands (DDL)","CREATE AGGREGATE"," -CREATE AGGREGATE [ IF NOT EXISTS ] newAggregateName FOR className -"," -Creates a new user-defined aggregate function. The method name must be the full -qualified class name. The class must implement the interface -""org.h2.api.AggregateFunction"". - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -CREATE AGGREGATE MEDIAN FOR ""com.acme.db.Median"" -" - -"Commands (DDL)","CREATE ALIAS"," -CREATE ALIAS [ IF NOT EXISTS ] newFunctionAliasName [ DETERMINISTIC ] -[ NOBUFFER ] { FOR classAndMethodName | AS sourceCodeString } -"," -Creates a new function alias. If this is a ResultSet returning function, -by default the return value is cached in a local temporary file. - -NOBUFFER - disables caching of ResultSet return value to temporary file. - -DETERMINISTIC - Deterministic functions must always return the same value for the same parameters. - -The method name must be the full qualified class and method name, -and may optionally include the parameter classes as in -""java.lang.Integer.parseInt(java.lang.String, int)"". The class and the method -must both be public, and the method must be static. The class must be available -in the classpath of the database engine (when using the server mode, -it must be in the classpath of the server). - -When defining a function alias with source code, the Sun ""javac"" is compiler -is used if the file ""tools.jar"" is in the classpath. If not, ""javac"" is run as a separate process. -Only the source code is stored in the database; the class is compiled each time -the database is re-opened. Source code is usually passed -as dollar quoted text to avoid escaping problems. If import statements are used, -then the tag @CODE must be added before the method. - -If the method throws an SQLException, it is directly re-thrown to the calling application; -all other exceptions are first converted to a SQLException. - -If the first parameter of the Java function is a ""java.sql.Connection"", then a -connection to the database is provided. This connection must not be closed. -If the class contains multiple methods with the given name but different -parameter count, all methods are mapped. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. - -If you have the Groovy jar in your classpath, it is also possible to write methods using Groovy. -"," -CREATE ALIAS MY_SQRT FOR ""java.lang.Math.sqrt""; -CREATE ALIAS GET_SYSTEM_PROPERTY FOR ""java.lang.System.getProperty""; -CALL GET_SYSTEM_PROPERTY('java.class.path'); -CALL GET_SYSTEM_PROPERTY('com.acme.test', 'true'); -CREATE ALIAS REVERSE AS $$ String reverse(String s) { return new StringBuilder(s).reverse().toString(); } $$; -CALL REVERSE('Test'); -CREATE ALIAS tr AS $$@groovy.transform.CompileStatic - static String tr(String str, String sourceSet, String replacementSet){ - return str.tr(sourceSet, replacementSet); - } -$$ -" - -"Commands (DDL)","CREATE CONSTANT"," -CREATE CONSTANT [ IF NOT EXISTS ] newConstantName VALUE expression -"," -Creates a new constant. -This command commits an open transaction in this connection. -"," -CREATE CONSTANT ONE VALUE 1 -" - -"Commands (DDL)","CREATE DOMAIN"," -CREATE DOMAIN [ IF NOT EXISTS ] newDomainName AS dataType -[ DEFAULT expression ] [ [ NOT ] NULL ] [ SELECTIVITY selectivity ] -[ CHECK condition ] -"," -Creates a new data type (domain). The check condition must evaluate to true or -to NULL (to prevent NULL, use ""NOT NULL""). In the condition, the term VALUE refers -to the value being tested. - -Domains are usable within the whole database. They can not be created in a specific schema. - -This command commits an open transaction in this connection. -"," -CREATE DOMAIN EMAIL AS VARCHAR(255) CHECK (POSITION('@', VALUE) > 1) -" - -"Commands (DDL)","CREATE INDEX"," -CREATE -{ [ UNIQUE ] [ HASH | SPATIAL] INDEX [ [ IF NOT EXISTS ] newIndexName ] - | PRIMARY KEY [ HASH ] } -ON tableName ( indexColumn [,...] ) -"," -Creates a new index. -This command commits an open transaction in this connection. - -Hash indexes are meant for in-memory databases and memory tables (CREATE MEMORY TABLE). -For other tables, or if the index contains multiple columns, the HASH keyword is ignored. -Hash indexes can only test for equality, and do not support range queries (similar to a hash table). -Non-unique keys are supported. -Spatial indexes are supported only on Geometry columns. -"," -CREATE INDEX IDXNAME ON TEST(NAME) -" - -"Commands (DDL)","CREATE LINKED TABLE"," -CREATE [ FORCE ] [ [ GLOBAL | LOCAL ] TEMPORARY ] -LINKED TABLE [ IF NOT EXISTS ] -name ( driverString, urlString, userString, passwordString, -[ originalSchemaString, ] originalTableString ) [ EMIT UPDATES | READONLY ] -"," -Creates a table link to an external table. The driver name may be empty if the -driver is already loaded. If the schema name is not set, only one table with -that name may exist in the target database. - -FORCE - Create the LINKED TABLE even if the remote database/table does not exist. - -EMIT UPDATES - Usually, for update statements, the old rows are deleted first and then the new -rows are inserted. It is possible to emit update statements (except on -rollback), however in this case multi-row unique key updates may not always -work. Linked tables to the same database share one connection. - -READONLY - is set, the remote table may not be updated. This is enforced by H2. - -If the connection to the source database is lost, the connection is re-opened -(this is a workaround for MySQL that disconnects after 8 hours of inactivity by default). - -If a query is used instead of the original table name, the table is read only. -Queries must be enclosed in parenthesis: ""(SELECT * FROM ORDERS)"". - -To use JNDI to get the connection, the driver class must be a -javax.naming.Context (for example ""javax.naming.InitialContext""), and the URL must -be the resource name (for example ""java:comp/env/jdbc/Test""). - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -CREATE LINKED TABLE LINK('org.h2.Driver', 'jdbc:h2:test2', 'sa', 'sa', 'TEST'); -CREATE LINKED TABLE LINK('', 'jdbc:h2:test2', 'sa', 'sa', - '(SELECT * FROM TEST WHERE ID>0)'); -CREATE LINKED TABLE LINK('javax.naming.InitialContext', - 'java:comp/env/jdbc/Test', NULL, NULL, '(SELECT * FROM TEST WHERE ID>0)'); -" - -"Commands (DDL)","CREATE ROLE"," -CREATE ROLE [ IF NOT EXISTS ] newRoleName -"," -Creates a new role. -This command commits an open transaction in this connection. -"," -CREATE ROLE READONLY -" - -"Commands (DDL)","CREATE SCHEMA"," -CREATE SCHEMA [ IF NOT EXISTS ] name [ AUTHORIZATION ownerUserName ] -"," -Creates a new schema. If no owner is specified, the current user is used. The -user that executes the command must have admin rights, as well as the owner. -Specifying the owner currently has no effect. - -This command commits an open transaction in this connection. -"," -CREATE SCHEMA TEST_SCHEMA AUTHORIZATION SA -" - -"Commands (DDL)","CREATE SEQUENCE"," -CREATE SEQUENCE [ IF NOT EXISTS ] newSequenceName [ START WITH long ] -[ INCREMENT BY long ] -[ MINVALUE long | NOMINVALUE | NO MINVALUE ] -[ MAXVALUE long | NOMAXVALUE | NO MAXVALUE ] -[ CYCLE long | NOCYCLE | NO CYCLE ] -[ CACHE long | NOCACHE | NO CACHE ] -"," -Creates a new sequence. -The data type of a sequence is BIGINT. -Used values are never re-used, even when the transaction is rolled back. - -The cache is the number of pre-allocated numbers. -If the system crashes without closing the database, at most this many numbers are lost. -The default cache size is 32. -To disable caching, use the cache size 1 or lower. - -This command commits an open transaction in this connection. -"," -CREATE SEQUENCE SEQ_ID -" - -"Commands (DDL)","CREATE TABLE"," -CREATE [ CACHED | MEMORY ] [ TEMP | [ GLOBAL | LOCAL ] TEMPORARY ] -TABLE [ IF NOT EXISTS ] name -[ ( { columnDefinition | constraint } [,...] ) ] -[ ENGINE tableEngineName [ WITH tableEngineParamName [,...] ] ] -[ NOT PERSISTENT ] [ TRANSACTIONAL ] -[ AS select ]"," -Creates a new table. - -Cached tables (the default for regular tables) are persistent, -and the number of rows is not limited by the main memory. -Memory tables (the default for temporary tables) are persistent, -but the index data is kept in main memory, -that means memory tables should not get too large. - -Temporary tables are deleted when closing or opening a database. -Temporary tables can be global (accessible by all connections) -or local (only accessible by the current connection). -The default for temporary tables is global. -Indexes of temporary tables are kept fully in main memory, -unless the temporary table is created using CREATE CACHED TABLE. - -The ENGINE option is only required when custom table implementations are used. -The table engine class must implement the interface ""org.h2.api.TableEngine"". -Any table engine parameters are passed down in the tableEngineParams field of the CreateTableData object. - -Tables with the NOT PERSISTENT modifier are kept fully in memory, and all -rows are lost when the database is closed. - -The column definition is optional if a query is specified. -In that case the column list of the query is used. - -This command commits an open transaction, except when using -TRANSACTIONAL (only supported for temporary tables). -"," -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)) -" - -"Commands (DDL)","CREATE TRIGGER"," -CREATE TRIGGER [ IF NOT EXISTS ] newTriggerName { BEFORE | AFTER | INSTEAD OF } -{ INSERT | UPDATE | DELETE | SELECT | ROLLBACK } [,...] ON tableName [ FOR EACH ROW ] -[ QUEUE int ] [ NOWAIT ] { CALL triggeredClassName | AS sourceCodeString } -"," -Creates a new trigger. -The trigger class must be public and implement ""org.h2.api.Trigger"". -Inner classes are not supported. -The class must be available in the classpath of the database engine -(when using the server mode, it must be in the classpath of the server). - -The sourceCodeString must define a single method with no parameters that returns ""org.h2.api.Trigger"". -See CREATE ALIAS for requirements regarding the compilation. - -BEFORE triggers are called after data conversion is made, default values are set, -null and length constraint checks have been made; -but before other constraints have been checked. -If there are multiple triggers, the order in which they are called is undefined. - -ROLLBACK can be specified in combination with INSERT, UPDATE, and DELETE. -Only row based AFTER trigger can be called on ROLLBACK. -Exceptions that occur within such triggers are ignored. -As the operations that occur within a trigger are part of the transaction, -ROLLBACK triggers are only required if an operation communicates outside of the database. - -INSTEAD OF triggers are implicitly row based and behave like BEFORE triggers. -Only the first such trigger is called. Such triggers on views are supported. -They can be used to make views updatable. - -A BEFORE SELECT trigger is fired just before the database engine tries to read from the table. -The trigger can be used to update a table on demand. -The trigger is called with both 'old' and 'new' set to null. - -The MERGE statement will call both INSERT and UPDATE triggers. -Not supported are SELECT triggers with the option FOR EACH ROW, -and AFTER SELECT triggers. - -Committing or rolling back a transaction within a trigger is not allowed, except for SELECT triggers. - -By default a trigger is called once for each statement, without the old and new rows. -FOR EACH ROW triggers are called once for each inserted, updated, or deleted row. - -QUEUE is implemented for syntax compatibility with HSQL and has no effect. - -The trigger need to be created in the same schema as the table. -The schema name does not need to be specified when creating the trigger. - -This command commits an open transaction in this connection. -"," -CREATE TRIGGER TRIG_INS BEFORE INSERT ON TEST FOR EACH ROW CALL ""MyTrigger""; -CREATE TRIGGER TRIG_SRC BEFORE INSERT ON TEST AS $$org.h2.api.Trigger create() { return new MyTrigger(""constructorParam""); } $$; -" -"Commands (DDL)","CREATE USER"," -CREATE USER [ IF NOT EXISTS ] newUserName -{ PASSWORD string | SALT bytes HASH bytes } [ ADMIN ] -"," -Creates a new user. For compatibility, only unquoted or uppercase user names are allowed. -The password must be in single quotes. It is case sensitive and can contain spaces. -The salt and hash values are hex strings. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -CREATE USER GUEST PASSWORD 'abc' -" - -"Commands (DDL)","CREATE VIEW"," -CREATE [ OR REPLACE ] [ FORCE ] VIEW [ IF NOT EXISTS ] newViewName -[ ( columnName [,...] ) ] AS select -"," -Creates a new view. If the force option is used, then the view is created even -if the underlying table(s) don't exist. - -If the OR REPLACE clause is used an existing view will be replaced, and any -dependent views will not need to be recreated. If dependent views will become -invalid as a result of the change an error will be generated, but this error -can be ignored if the FORCE clause is also used. - -Views are not updatable except when using 'instead of' triggers. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -CREATE VIEW TEST_VIEW AS SELECT * FROM TEST WHERE ID < 100 -" - -"Commands (DDL)","DROP AGGREGATE"," -DROP AGGREGATE [ IF EXISTS ] aggregateName -"," -Drops an existing user-defined aggregate function. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -DROP AGGREGATE MEDIAN -" - -"Commands (DDL)","DROP ALIAS"," -DROP ALIAS [ IF EXISTS ] existingFunctionAliasName -"," -Drops an existing function alias. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -DROP ALIAS MY_SQRT -" - -"Commands (DDL)","DROP ALL OBJECTS"," -DROP ALL OBJECTS [ DELETE FILES ] -"," -Drops all existing views, tables, sequences, schemas, function aliases, roles, -user-defined aggregate functions, domains, and users (except the current user). -If DELETE FILES is specified, the database files will be removed when the last -user disconnects from the database. Warning: this command can not be rolled -back. - -Admin rights are required to execute this command. -"," -DROP ALL OBJECTS -" - -"Commands (DDL)","DROP CONSTANT"," -DROP CONSTANT [ IF EXISTS ] constantName -"," -Drops a constant. -This command commits an open transaction in this connection. -"," -DROP CONSTANT ONE -" - -"Commands (DDL)","DROP DOMAIN"," -DROP DOMAIN [ IF EXISTS ] domainName -"," -Drops a data type (domain). -This command commits an open transaction in this connection. -"," -DROP DOMAIN EMAIL -" - -"Commands (DDL)","DROP INDEX"," -DROP INDEX [ IF EXISTS ] indexName -"," -Drops an index. -This command commits an open transaction in this connection. -"," -DROP INDEX IF EXISTS IDXNAME -" - -"Commands (DDL)","DROP ROLE"," -DROP ROLE [ IF EXISTS ] roleName -"," -Drops a role. -This command commits an open transaction in this connection. -"," -DROP ROLE READONLY -" - -"Commands (DDL)","DROP SCHEMA"," -DROP SCHEMA [ IF EXISTS ] schemaName -"," -Drops a schema. -This command commits an open transaction in this connection. -"," -DROP SCHEMA TEST_SCHEMA -" - -"Commands (DDL)","DROP SEQUENCE"," -DROP SEQUENCE [ IF EXISTS ] sequenceName -"," -Drops a sequence. -This command commits an open transaction in this connection. -"," -DROP SEQUENCE SEQ_ID -" - -"Commands (DDL)","DROP TABLE"," -DROP TABLE [ IF EXISTS ] tableName [,...] [ RESTRICT | CASCADE ] -"," -Drops an existing table, or a list of tables. -The command will fail if dependent views exist and the RESTRICT clause is used (the default). -All dependent views are dropped as well if the CASCADE clause is used. -This command commits an open transaction in this connection. -"," -DROP TABLE TEST -" - -"Commands (DDL)","DROP TRIGGER"," -DROP TRIGGER [ IF EXISTS ] triggerName -"," -Drops an existing trigger. -This command commits an open transaction in this connection. -"," -DROP TRIGGER TRIG_INS -" - -"Commands (DDL)","DROP USER"," -DROP USER [ IF EXISTS ] userName -"," -Drops a user. The current user cannot be dropped. -For compatibility, only unquoted or uppercase user names are allowed. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -DROP USER TOM -" - -"Commands (DDL)","DROP VIEW"," -DROP VIEW [ IF EXISTS ] viewName [ RESTRICT | CASCADE ] -"," -Drops an existing view. -All dependent views are dropped as well if the CASCADE clause is used (the default). -The command will fail if dependent views exist and the RESTRICT clause is used. -This command commits an open transaction in this connection. -"," -DROP VIEW TEST_VIEW -" - -"Commands (DDL)","TRUNCATE TABLE"," -TRUNCATE TABLE tableName -"," -Removes all rows from a table. -Unlike DELETE FROM without where clause, this command can not be rolled back. -This command is faster than DELETE without where clause. -Only regular data tables without foreign key constraints can be truncated -(except if referential integrity is disabled for this database or for this table). -Linked tables can't be truncated. - -This command commits an open transaction in this connection. -"," -TRUNCATE TABLE TEST -" - -"Commands (Other)","CHECKPOINT"," -CHECKPOINT -"," -Flushes the data to disk. - -Admin rights are required to execute this command. -"," -CHECKPOINT -" - -"Commands (Other)","CHECKPOINT SYNC"," -CHECKPOINT SYNC -"," -Flushes the data to disk and and forces all system buffers be written -to the underlying device. - -Admin rights are required to execute this command. -"," -CHECKPOINT SYNC -" - -"Commands (Other)","COMMIT"," -COMMIT [ WORK ] -"," -Commits a transaction. -"," -COMMIT -" - -"Commands (Other)","COMMIT TRANSACTION"," -COMMIT TRANSACTION transactionName -"," -Sets the resolution of an in-doubt transaction to 'commit'. - -Admin rights are required to execute this command. -This command is part of the 2-phase-commit protocol. -"," -COMMIT TRANSACTION XID_TEST -" - -"Commands (Other)","GRANT RIGHT"," -GRANT { SELECT | INSERT | UPDATE | DELETE | ALL } [,...] ON -tableName [,...] TO { PUBLIC | userName | roleName } -"," -Grants rights for a table to a user or role. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -GRANT SELECT ON TEST TO READONLY -" - -"Commands (Other)","GRANT ALTER ANY SCHEMA"," -GRANT ALTER ANY SCHEMA TO userName -"," -Grant schema altering rights to a user. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -GRANT ALTER ANY SCHEMA TO Bob -" - -"Commands (Other)","GRANT ROLE"," -GRANT roleName TO { PUBLIC | userName | roleName } -"," -Grants a role to a user or role. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -GRANT READONLY TO PUBLIC -" - -"Commands (Other)","HELP"," -HELP [ anything [...] ] -"," -Displays the help pages of SQL commands or keywords. -"," -HELP SELECT -" - -"Commands (Other)","PREPARE COMMIT"," -PREPARE COMMIT newTransactionName -"," -Prepares committing a transaction. -This command is part of the 2-phase-commit protocol. -"," -PREPARE COMMIT XID_TEST -" - -"Commands (Other)","REVOKE RIGHT"," -REVOKE { SELECT | INSERT | UPDATE | DELETE | ALL } [,...] ON -tableName [,...] FROM { PUBLIC | userName | roleName } -"," -Removes rights for a table from a user or role. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -REVOKE SELECT ON TEST FROM READONLY -" - -"Commands (Other)","REVOKE ROLE"," -REVOKE roleName FROM { PUBLIC | userName | roleName } -"," -Removes a role from a user or role. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -REVOKE READONLY FROM TOM -" - -"Commands (Other)","ROLLBACK"," -ROLLBACK [ TO SAVEPOINT savepointName ] -"," -Rolls back a transaction. If a savepoint name is used, the transaction is only -rolled back to the specified savepoint. -"," -ROLLBACK -" - -"Commands (Other)","ROLLBACK TRANSACTION"," -ROLLBACK TRANSACTION transactionName -"," -Sets the resolution of an in-doubt transaction to 'rollback'. - -Admin rights are required to execute this command. -This command is part of the 2-phase-commit protocol. -"," -ROLLBACK TRANSACTION XID_TEST -" - -"Commands (Other)","SAVEPOINT"," -SAVEPOINT savepointName -"," -Create a new savepoint. See also ROLLBACK. -Savepoints are only valid until the transaction is committed or rolled back. -"," -SAVEPOINT HALF_DONE -" - -"Commands (Other)","SET @"," -SET @variableName [ = ] expression -"," -Updates a user-defined variable. -Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. -This command does not commit a transaction, and rollback does not affect it. -"," -SET @TOTAL=0 -" - -"Commands (Other)","SET ALLOW_LITERALS"," -SET ALLOW_LITERALS { NONE | ALL | NUMBERS } -"," -This setting can help solve the SQL injection problem. By default, text and -number literals are allowed in SQL statements. However, this enables SQL -injection if the application dynamically builds SQL statements. SQL injection is -not possible if user data is set using parameters ('?'). - -NONE means literals of any kind are not allowed, only parameters and constants -are allowed. NUMBERS mean only numerical and boolean literals are allowed. ALL -means all literals are allowed (default). - -See also CREATE CONSTANT. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;ALLOW_LITERALS=NONE"" -"," -SET ALLOW_LITERALS NONE -" - -"Commands (Other)","SET AUTOCOMMIT"," -SET AUTOCOMMIT { TRUE | ON | FALSE | OFF } -"," -Switches auto commit on or off. -This setting can be appended to the database URL: ""jdbc:h2:test;AUTOCOMMIT=OFF"" - -however this will not work as expected when using a connection pool -(the connection pool manager will re-enable autocommit when returning -the connection to the pool, so autocommit will only be disabled the first -time the connection is used. -"," -SET AUTOCOMMIT OFF -" - -"Commands (Other)","SET CACHE_SIZE"," -SET CACHE_SIZE int -"," -Sets the size of the cache in KB (each KB being 1024 bytes) for the current database. -The default is 65536 per available GB of RAM, i.e. 64 MB per GB. -The value is rounded to the next higher power of two. -Depending on the virtual machine, the actual memory required may be higher. - -This setting is persistent and affects all connections as there is only one cache per database. -Using a very small value (specially 0) will reduce performance a lot. -This setting only affects the database engine (the server in a client/server environment; -in embedded mode, the database engine is in the same process as the application). -It has no effect for in-memory databases. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;CACHE_SIZE=8192"" -"," -SET CACHE_SIZE 8192 -" - -"Commands (Other)","SET CLUSTER"," -SET CLUSTER serverListString -"," -This command should not be used directly by an application, the statement is -executed automatically by the system. The behavior may change in future -releases. Sets the cluster server list. An empty string switches off the cluster -mode. Switching on the cluster mode requires admin rights, but any user can -switch it off (this is automatically done when the client detects the other -server is not responding). - -This command is effective immediately, but does not commit an open transaction. -"," -SET CLUSTER '' -" - -"Commands (Other)","SET BINARY_COLLATION"," -SET BINARY_COLLATION -{ UNSIGNED | SIGNED } ] } -"," -Sets the collation used for comparing BINARY columns, the default is SIGNED -for version 1.3 and older, and UNSIGNED for version 1.4 and newer. -This command can only be executed if there are no tables defined. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -This setting is persistent. -"," -SET BINARY_COLLATION SIGNED -" - -"Commands (Other)","SET COLLATION"," -SET [ DATABASE ] COLLATION -{ OFF | collationName [ STRENGTH { PRIMARY | SECONDARY | TERTIARY | IDENTICAL } ] } -"," -Sets the collation used for comparing strings. -This command can only be executed if there are no tables defined. -See ""java.text.Collator"" for details about the supported collations and the STRENGTH -(PRIMARY is usually case- and umlaut-insensitive; SECONDARY is case-insensitive but umlaut-sensitive; -TERTIARY is both case- and umlaut-sensitive; IDENTICAL is sensitive to all differences and only affects ordering). - -The ICU4J collator is used if it is in the classpath. -It is also used if the collation name starts with ICU4J_ -(in that case, the ICU4J must be in the classpath, otherwise an exception is thrown). -The default collator is used if the collation name starts with DEFAULT_ -(even if ICU4J is in the classpath). - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -This setting is persistent. -"," -SET COLLATION ENGLISH -" - -"Commands (Other)","SET COMPRESS_LOB"," -SET COMPRESS_LOB { NO | LZF | DEFLATE } -"," -This feature is only available for the PageStore storage engine. -For the MVStore engine (the default for H2 version 1.4.x), -append "";COMPRESS=TRUE"" to the database URL instead. - -Sets the compression algorithm for BLOB and CLOB data. Compression is usually -slower, but needs less disk space. LZF is faster but uses more space. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -"," -SET COMPRESS_LOB LZF -" - -"Commands (Other)","SET DATABASE_EVENT_LISTENER"," -SET DATABASE_EVENT_LISTENER classNameString -"," -Sets the event listener class. An empty string ('') means no listener should be -used. This setting is not persistent. - -Admin rights are required to execute this command, except if it is set when -opening the database (in this case it is reset just after opening the database). -This setting can be appended to the database URL: ""jdbc:h2:test;DATABASE_EVENT_LISTENER='sample.MyListener'"" -"," -SET DATABASE_EVENT_LISTENER 'sample.MyListener' -" - -"Commands (Other)","SET DB_CLOSE_DELAY"," -SET DB_CLOSE_DELAY int -"," -Sets the delay for closing a database if all connections are closed. -The value -1 means the database is never closed until the close delay is set to some other value or SHUTDOWN is called. -The value 0 means no delay (default; the database is closed if the last connection to it is closed). -Values 1 and larger mean the number of seconds the database is left open after closing the last connection. - -If the application exits normally or System.exit is called, the database is closed immediately, even if a delay is set. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;DB_CLOSE_DELAY=-1"" -"," -SET DB_CLOSE_DELAY -1 -" - -"Commands (Other)","SET DEFAULT_LOCK_TIMEOUT"," -SET DEFAULT LOCK_TIMEOUT int -"," -Sets the default lock timeout (in milliseconds) in this database that is used -for the new sessions. The default value for this setting is 1000 (one second). - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -"," -SET DEFAULT_LOCK_TIMEOUT 5000 -" - -"Commands (Other)","SET DEFAULT_TABLE_TYPE"," -SET DEFAULT_TABLE_TYPE { MEMORY | CACHED } -"," -Sets the default table storage type that is used when creating new tables. -Memory tables are kept fully in the main memory (including indexes), however -the data is still stored in the database file. The size of memory tables is -limited by the memory. The default is CACHED. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -It has no effect for in-memory databases. -"," -SET DEFAULT_TABLE_TYPE MEMORY -" - -"Commands (Other)","SET EXCLUSIVE"," -SET EXCLUSIVE { 0 | 1 | 2 } -"," -Switched the database to exclusive mode (1, 2) and back to normal mode (0). - -In exclusive mode, new connections are rejected, and operations by -other connections are paused until the exclusive mode is disabled. -When using the value 1, existing connections stay open. -When using the value 2, all existing connections are closed -(and current transactions are rolled back) except the connection -that executes SET EXCLUSIVE. -Only the connection that set the exclusive mode can disable it. -When the connection is closed, it is automatically disabled. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -"," -SET EXCLUSIVE 1 -" - -"Commands (Other)","SET IGNORECASE"," -SET IGNORECASE { TRUE | FALSE } -"," -If IGNORECASE is enabled, text columns in newly created tables will be -case-insensitive. Already existing tables are not affected. The effect of -case-insensitive columns is similar to using a collation with strength PRIMARY. -Case-insensitive columns are compared faster than when using a collation. -String literals and parameters are however still considered case sensitive even if this option is set. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;IGNORECASE=TRUE"" -"," -SET IGNORECASE TRUE -" - -"Commands (Other)","SET JAVA_OBJECT_SERIALIZER"," -SET JAVA_OBJECT_SERIALIZER -{ null | className } -"," -Sets the object used to serialize and deserialize java objects being stored in column of type OTHER. -The serializer class must be public and implement ""org.h2.api.JavaObjectSerializer"". -Inner classes are not supported. -The class must be available in the classpath of the database engine -(when using the server mode, it must be both in the classpath of the server and the client). -This command can only be executed if there are no tables defined. - -Admin rights are required to execute this command. -This command commits an open transaction in this connection. -This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'"" -"," -SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName' -" - - -"Commands (Other)","SET LOG"," -SET LOG int -"," -Sets the transaction log mode. The values 0, 1, and 2 are supported, the default is 2. -This setting affects all connections. - -LOG 0 means the transaction log is disabled completely. It is the fastest mode, -but also the most dangerous: if the process is killed while the database is open in this mode, -the data might be lost. It must only be used if this is not a problem, for example when -initially loading a database, or when running tests. - -LOG 1 means the transaction log is enabled, but FileDescriptor.sync is disabled. -This setting is about half as fast as with LOG 0. This setting is useful if no protection -against power failure is required, but the data must be protected against killing the process. - -LOG 2 (the default) means the transaction log is enabled, and FileDescriptor.sync is called -for each checkpoint. This setting is about half as fast as LOG 1. Depending on the -file system, this will also protect against power failure in the majority if cases. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is not persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;LOG=0"" -"," -SET LOG 1 -" - -"Commands (Other)","SET LOCK_MODE"," -SET LOCK_MODE int -"," -Sets the lock mode. The values 0, 1, 2, and 3 are supported. The default is 3 -(READ_COMMITTED). This setting affects all connections. - -The value 0 means no locking (should only be used for testing; also known as -READ_UNCOMMITTED). Please note that using SET LOCK_MODE 0 while at the same time -using multiple connections may result in inconsistent transactions. - -The value 1 means table level locking (also known as SERIALIZABLE). - -The value 2 means table level locking with garbage collection (if the -application does not close all connections). - -The value 3 means table level locking, but read locks are released immediately -(default; also known as READ_COMMITTED). - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;LOCK_MODE=3"" -"," -SET LOCK_MODE 1 -" - -"Commands (Other)","SET LOCK_TIMEOUT"," -SET LOCK_TIMEOUT int -"," -Sets the lock timeout (in milliseconds) for the current session. The default -value for this setting is 1000 (one second). - -This command does not commit a transaction, and rollback does not affect it. -This setting can be appended to the database URL: ""jdbc:h2:test;LOCK_TIMEOUT=10000"" -"," -SET LOCK_TIMEOUT 1000 -" - -"Commands (Other)","SET MAX_LENGTH_INPLACE_LOB"," -SET MAX_LENGTH_INPLACE_LOB int -"," -Sets the maximum size of an in-place LOB object. - -This is the maximum length of an LOB that is stored with the record itself, -and the default value is 128. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -"," -SET MAX_LENGTH_INPLACE_LOB 128 -" - -"Commands (Other)","SET MAX_LOG_SIZE"," -SET MAX_LOG_SIZE int -"," -Sets the maximum size of the transaction log, in megabytes. -If the log is larger, and if there is no open transaction, the transaction log is truncated. -If there is an open transaction, the transaction log will continue to grow however. -The default max size is 16 MB. -This setting has no effect for in-memory databases. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -"," -SET MAX_LOG_SIZE 2 -" - -"Commands (Other)","SET MAX_MEMORY_ROWS"," -SET MAX_MEMORY_ROWS int -"," -The maximum number of rows in a result set that are kept in-memory. If more rows -are read, then the rows are buffered to disk. -The default is 40000 per GB of available RAM. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -It has no effect for in-memory databases. -"," -SET MAX_MEMORY_ROWS 1000 -" - -"Commands (Other)","SET MAX_MEMORY_UNDO"," -SET MAX_MEMORY_UNDO int -"," -The maximum number of undo records per a session that are kept in-memory. -If a transaction is larger, the records are buffered to disk. -The default value is 50000. -Changes to tables without a primary key can not be buffered to disk. -This setting is not supported when using multi-version concurrency. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -It has no effect for in-memory databases. -"," -SET MAX_MEMORY_UNDO 1000 -" - -"Commands (Other)","SET MAX_OPERATION_MEMORY"," -SET MAX_OPERATION_MEMORY int -"," -Sets the maximum memory used for large operations (delete and insert), in bytes. -Operations that use more memory are buffered to disk, slowing down the -operation. The default max size is 100000. 0 means no limit. - -This setting is not persistent. -Admin rights are required to execute this command, as it affects all connections. -It has no effect for in-memory databases. -This setting can be appended to the database URL: ""jdbc:h2:test;MAX_OPERATION_MEMORY=10000"" -"," -SET MAX_OPERATION_MEMORY 0 -" - -"Commands (Other)","SET MODE"," -SET MODE { REGULAR | DB2 | DERBY | HSQLDB | MSSQLSERVER | MYSQL | ORACLE | POSTGRESQL } -"," -Changes to another database compatibility mode. For details, see Compatibility -Modes in the feature section. - -This setting is not persistent. -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting can be appended to the database URL: ""jdbc:h2:test;MODE=MYSQL"" -"," -SET MODE HSQLDB -" - -"Commands (Other)","SET MULTI_THREADED"," -SET MULTI_THREADED { 0 | 1 } -"," -Enabled (1) or disabled (0) multi-threading inside the database engine. By -default, this setting is disabled. Currently, enabling this is experimental -only. - -This is a global setting, which means it is not possible to open multiple databases with different modes at the same time in the same virtual machine. -This setting is not persistent, however the value is kept until the virtual machine exits or it is changed. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting can be appended to the database URL: ""jdbc:h2:test;MULTI_THREADED=1"" -"," -SET MULTI_THREADED 1 -" - -"Commands (Other)","SET OPTIMIZE_REUSE_RESULTS"," -SET OPTIMIZE_REUSE_RESULTS { 0 | 1 } -"," -Enabled (1) or disabled (0) the result reuse optimization. If enabled, -subqueries and views used as subqueries are only re-run if the data in one of -the tables was changed. This option is enabled by default. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting can be appended to the database URL: ""jdbc:h2:test;OPTIMIZE_REUSE_RESULTS=0"" -"," -SET OPTIMIZE_REUSE_RESULTS 0 -" - -"Commands (Other)","SET PASSWORD"," -SET PASSWORD string -"," -Changes the password of the current user. The password must be in single quotes. -It is case sensitive and can contain spaces. - -This command commits an open transaction in this connection. -"," -SET PASSWORD 'abcstzri!.5' -" - -"Commands (Other)","SET QUERY_STATISTICS"," -SET QUERY_STATISTICS { TRUE | FALSE } -"," -Disabled or enables query statistics gathering for the whole database. -The statistics are reflected in the INFORMATION_SCHEMA.QUERY_STATISTICS meta-table. - -This setting is not persistent. -This command commits an open transaction in this connection. -Admin rights are required to execute this command, as it affects all connections. -"," -SET QUERY_STATISTICS FALSE -" - -"Commands (Other)","SET QUERY_TIMEOUT"," -SET QUERY_TIMEOUT int -"," -Set the query timeout of the current session to the given value. The timeout is -in milliseconds. All kinds of statements will throw an exception if they take -longer than the given value. The default timeout is 0, meaning no timeout. - -This command does not commit a transaction, and rollback does not affect it. -"," -SET QUERY_TIMEOUT 10000 -" - -"Commands (Other)","SET REFERENTIAL_INTEGRITY"," -SET REFERENTIAL_INTEGRITY { TRUE | FALSE } -"," -Disabled or enables referential integrity checking for the whole database. -Enabling it does not check existing data. Use ALTER TABLE SET to disable it only -for one table. - -This setting is not persistent. -This command commits an open transaction in this connection. -Admin rights are required to execute this command, as it affects all connections. -"," -SET REFERENTIAL_INTEGRITY FALSE -" - -"Commands (Other)","SET RETENTION_TIME"," -SET RETENTION_TIME int -"," -This property is only used when using the MVStore storage engine. -How long to retain old, persisted data, in milliseconds. -The default is 45000 (45 seconds), 0 means overwrite data as early as possible. -It is assumed that a file system and hard disk will flush all write buffers within this time. -Using a lower value might be dangerous, unless the file system and hard disk flush the buffers earlier. -To manually flush the buffers, use CHECKPOINT SYNC, -however please note that according to various tests this does not always work as expected -depending on the operating system and hardware. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting is persistent. -This setting can be appended to the database URL: ""jdbc:h2:test;RETENTION_TIME=0"" -"," -SET RETENTION_TIME 0 -" - -"Commands (Other)","SET SALT HASH"," -SET SALT bytes HASH bytes -"," -Sets the password salt and hash for the current user. The password must be in -single quotes. It is case sensitive and can contain spaces. - -This command commits an open transaction in this connection. -"," -SET SALT '00' HASH '1122' -" - -"Commands (Other)","SET SCHEMA"," -SET SCHEMA schemaName -"," -Changes the default schema of the current connection. The default schema is used -in statements where no schema is set explicitly. The default schema for new -connections is PUBLIC. - -This command does not commit a transaction, and rollback does not affect it. -This setting can be appended to the database URL: ""jdbc:h2:test;SCHEMA=ABC"" -"," -SET SCHEMA INFORMATION_SCHEMA -" - -"Commands (Other)","SET SCHEMA_SEARCH_PATH"," -SET SCHEMA_SEARCH_PATH schemaName [,...] -"," -Changes the schema search path of the current connection. The default schema is -used in statements where no schema is set explicitly. The default schema for new -connections is PUBLIC. - -This command does not commit a transaction, and rollback does not affect it. -This setting can be appended to the database URL: ""jdbc:h2:test;SCHEMA_SEARCH_PATH=ABC,DEF"" -"," -SET SCHEMA_SEARCH_PATH INFORMATION_SCHEMA, PUBLIC -" - -"Commands (Other)","SET THROTTLE"," -SET THROTTLE int -"," -Sets the throttle for the current connection. The value is the number of -milliseconds delay after each 50 ms. The default value is 0 (throttling -disabled). - -This command does not commit a transaction, and rollback does not affect it. -This setting can be appended to the database URL: ""jdbc:h2:test;THROTTLE=50"" -"," -SET THROTTLE 200 -" - -"Commands (Other)","SET TRACE_LEVEL"," -SET { TRACE_LEVEL_FILE | TRACE_LEVEL_SYSTEM_OUT } int -"," -Sets the trace level for file the file or system out stream. Levels are: 0=off, -1=error, 2=info, 3=debug. The default level is 1 for file and 0 for system out. -To use SLF4J, append "";TRACE_LEVEL_FILE=4"" to the database URL when opening the database. - -This setting is not persistent. -Admin rights are required to execute this command, as it affects all connections. -This command does not commit a transaction, and rollback does not affect it. -This setting can be appended to the database URL: ""jdbc:h2:test;TRACE_LEVEL_SYSTEM_OUT=3"" -"," -SET TRACE_LEVEL_SYSTEM_OUT 3 -" - -"Commands (Other)","SET TRACE_MAX_FILE_SIZE"," -SET TRACE_MAX_FILE_SIZE int -"," -Sets the maximum trace file size. If the file exceeds the limit, the file is -renamed to .old and a new file is created. If another .old file exists, it is -deleted. The default max size is 16 MB. - -This setting is persistent. -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting can be appended to the database URL: ""jdbc:h2:test;TRACE_MAX_FILE_SIZE=3"" -"," -SET TRACE_MAX_FILE_SIZE 10 -" - -"Commands (Other)","SET UNDO_LOG"," -SET UNDO_LOG int -"," -Enables (1) or disables (0) the per session undo log. The undo log is enabled by -default. When disabled, transactions can not be rolled back. This setting should -only be used for bulk operations that don't need to be atomic. - -This command commits an open transaction in this connection. -"," -SET UNDO_LOG 0 -" - -"Commands (Other)","SET WRITE_DELAY"," -SET WRITE_DELAY int -"," -Set the maximum delay between a commit and flushing the log, in milliseconds. -This setting is persistent. The default is 500 ms. - -Admin rights are required to execute this command, as it affects all connections. -This command commits an open transaction in this connection. -This setting can be appended to the database URL: ""jdbc:h2:test;WRITE_DELAY=0"" -"," -SET WRITE_DELAY 2000 -" - -"Commands (Other)","SHUTDOWN"," -SHUTDOWN [ IMMEDIATELY | COMPACT | DEFRAG ] -"," -This statement closes all open connections to the database and closes the -database. This command is usually not required, as the database is -closed automatically when the last connection to it is closed. - -If no option is used, then the database is closed normally. -All connections are closed, open transactions are rolled back. - -SHUTDOWN COMPACT fully compacts the database (re-creating the database may further reduce the database size). -If the database is closed normally (using SHUTDOWN or by closing all connections), then the database is also compacted, -but only for at most the time defined by the database setting ""h2.maxCompactTime"" in milliseconds (see there). - -SHUTDOWN IMMEDIATELY closes the database files without any cleanup and without compacting. - -SHUTDOWN DEFRAG re-orders the pages when closing the database so that table scans are faster. - -Admin rights are required to execute this command. -"," -SHUTDOWN COMPACT -" - -"Other Grammar","Alias"," -name -"," -An alias is a name that is only valid in the context of the statement. -"," -A -" - -"Other Grammar","And Condition"," -condition [ { AND condition } [...] ] -"," -Value or condition. -"," -ID=1 AND NAME='Hi' -" - -"Other Grammar","Array"," -( [ expression, [ expression [,...] ] ] ) -"," -An array of values. An empty array is '()'. Trailing commas are ignored. -An array with one element must contain a comma to be parsed as an array. -"," -(1, 2) -(1, ) -() -" - -"Other Grammar","Boolean"," -TRUE | FALSE -"," -A boolean value. -"," -TRUE -" - -"Other Grammar","Bytes"," -X'hex' -"," -A binary value. The hex value is not case sensitive. -"," -X'01FF' -" - -"Other Grammar","Case"," -CASE expression { WHEN expression THEN expression } [...] -[ ELSE expression ] END -"," -Returns the first expression where the value is equal to the test expression. If -no else part is specified, return NULL. -"," -CASE CNT WHEN 0 THEN 'No' WHEN 1 THEN 'One' ELSE 'Some' END -" - -"Other Grammar","Case When"," -CASE { WHEN expression THEN expression} [...] -[ ELSE expression ] END -"," -Returns the first expression where the condition is true. If no else part is -specified, return NULL. -"," -CASE WHEN CNT<10 THEN 'Low' ELSE 'High' END -" - -"Other Grammar","Cipher"," -AES -"," -Only the algorithm AES (""AES-128"") is supported currently. -"," -AES -" - -"Other Grammar","Column Definition"," -columnName dataType -[ { DEFAULT expression | AS computedColumnExpression } ] [ [ NOT ] NULL ] -[ { AUTO_INCREMENT | IDENTITY } [ ( startInt [, incrementInt ] ) ] ] -[ SELECTIVITY selectivity ] [ COMMENT expression ] -[ PRIMARY KEY [ HASH ] | UNIQUE ] [ CHECK condition ] -"," -Default expressions are used if no explicit value was used when adding a row. -The computed column expression is evaluated and assigned whenever the row changes. - -Identity and auto-increment columns are columns with a sequence as the -default. The column declared as the identity columns is implicitly the -primary key column of this table (unlike auto-increment columns). - -The options PRIMARY KEY, UNIQUE, and CHECK are not supported for ALTER statements. - -Check constraints can reference columns of the table, -and they can reference objects that exist while the statement is executed. -Conditions are only checked when a row is added or modified -in the table where the constraint exists. -"," -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255) DEFAULT ''); -CREATE TABLE TEST(ID BIGINT IDENTITY); -CREATE TABLE TEST(QUANTITY INT, PRICE DECIMAL, AMOUNT DECIMAL AS QUANTITY*PRICE); -" - -"Other Grammar","Comments"," --- anythingUntilEndOfLine | // anythingUntilEndOfLine | /* anythingUntilEndComment */ -"," -Comments can be used anywhere in a command and are ignored by the database. Line -comments end with a newline. Block comments cannot be nested, but can be -multiple lines long. -"," -// This is a comment -" - -"Other Grammar","Compare"," -<> | <= | >= | = | < | > | != | && -"," -Comparison operator. The operator != is the same as <>. -The operator ""&&"" means overlapping; it can only be used with geometry types. -"," -<> -" - -"Other Grammar","Condition"," -operand [ conditionRightHandSide ] | NOT condition | EXISTS ( select ) -"," -Boolean value or condition. -"," -ID<>2 -" - -"Other Grammar","Condition Right Hand Side"," -compare { { { ALL | ANY | SOME } ( select ) } | operand } - | IS [ NOT ] NULL - | IS [ NOT ] [ DISTINCT FROM ] operand - | BETWEEN operand AND operand - | IN ( { select | expression [,...] } ) - | [ NOT ] LIKE operand [ ESCAPE string ] - | [ NOT ] REGEXP operand -"," -The right hand side of a condition. - -The conditions ""IS [ NOT ]"" and ""IS [ NOT ] DISTINCT FROM"" are null-safe, meaning -NULL is considered the same as NULL, and the condition never evaluates to NULL. - -When comparing with LIKE, the wildcards characters are ""_"" (any one character) -and ""%"" (any characters). The database uses an index when comparing with LIKE -except if the operand starts with a wildcard. To search for the characters ""%"" and -""_"", the characters need to be escaped. The default escape character is "" \ "" (backslash). -To select no escape character, use ""ESCAPE ''"" (empty string). -At most one escape character is allowed. -Each character that follows the escape character in the pattern needs to match exactly. -Patterns that end with an escape character are invalid and the expression returns NULL. - -When comparing with REGEXP, regular expression matching is used. -See Java ""Matcher.find"" for details. -"," -LIKE 'Jo%' -" - -"Other Grammar","Constraint"," -[ constraintNameDefinition ] -{ CHECK expression - | UNIQUE ( columnName [,...] ) - | referentialConstraint - | PRIMARY KEY [ HASH ] ( columnName [,...] ) } -"," -Defines a constraint. -The check condition must evaluate to TRUE, FALSE or NULL. -TRUE and NULL mean the operation is to be permitted, -and FALSE means the operation is to be rejected. -To prevent NULL in a column, use NOT NULL instead of a check constraint. -"," -PRIMARY KEY(ID, NAME) -" - -"Other Grammar","Constraint Name Definition"," -CONSTRAINT [ IF NOT EXISTS ] newConstraintName -"," -Defines a constraint name. -"," -CONSTRAINT CONST_ID -" - -"Other Grammar","Csv Options"," -charsetString [, fieldSepString [, fieldDelimString [, escString [, nullString]]]]] - | optionString -"," -Optional parameters for CSVREAD and CSVWRITE. -Instead of setting the options one by one, all options can be -combined into a space separated key-value pairs, as follows: -""STRINGDECODE('charset=UTF-8 escape=\"" fieldDelimiter=\"" fieldSeparator=, ' ||"" -""'lineComment=# lineSeparator=\n null= rowSeparator=')"". -The following options are supported: - -""caseSensitiveColumnNames"" (true or false; disabled by default), - -""charset"", - -""escape"", - -""fieldDelimiter"", - -""fieldSeparator"", - -""lineComment"" (disabled by default), - -""lineSeparator"" (the line separator used for writing; ignored for reading), - -""null"", Note that an empty value is always treated as null. -This feature for compatibility, it is only here to support reading existing CSV files -that contain explicit ""null"" delimiters. - -""preserveWhitespace"" (true or false; disabled by default), - -""writeColumnHeader"" (true or false; enabled by default). - -For a newline or other special character, use STRINGDECODE as in the example above. -A space needs to be escaped with a backslash (""'\ '""), and -a backslash needs to be escaped with another backslash (""'\\'""). -All other characters are not to be escaped, that means -newline and tab characters are written as such. -"," -CALL CSVWRITE('test2.csv', 'SELECT * FROM TEST', 'charset=UTF-8 fieldSeparator=|'); -" - -"Other Grammar","Data Type"," -intType | booleanType | tinyintType | smallintType | bigintType | identityType - | decimalType | doubleType | realType | dateType | timeType | timestampType - | binaryType | otherType | varcharType | varcharIgnorecaseType | charType - | blobType | clobType | uuidType | arrayType -"," -A data type definition. -"," -INT -" - -"Other Grammar","Date"," -DATE 'yyyy-MM-dd' -"," -A date literal. The limitations are the same as for the Java data type -""java.sql.Date"", but for compatibility with other databases the suggested minimum -and maximum years are 0001 and 9999. -"," -DATE '2004-12-31' -" - -"Other Grammar","Decimal"," -[ + | - ] { { number [ . number ] } | { . number } } [ E [ + | - ] expNumber [...] ] ] -"," -A decimal number with fixed precision and scale. -Internally, ""java.lang.BigDecimal"" is used. -To ensure the floating point representation is used, use CAST(X AS DOUBLE). -There are some special decimal values: to represent positive infinity, use ""POWER(0, -1)""; -for negative infinity, use ""(-POWER(0, -1))""; for -0.0, use ""(-CAST(0 AS DOUBLE))""; -for ""NaN"" (not a number), use ""SQRT(-1)"". -"," -SELECT -1600.05 -SELECT CAST(0 AS DOUBLE) -SELECT -1.4e-10 -" - -"Other Grammar","Digit"," -0-9 -"," -A digit. -"," -0 -" - -"Other Grammar","Dollar Quoted String"," -$$anythingExceptTwoDollarSigns$$ -"," -A string starts and ends with two dollar signs. Two dollar signs are not allowed -within the text. A whitespace is required before the first set of dollar signs. -No escaping is required within the text. -"," -$$John's car$$ -" - -"Other Grammar","Expression"," -andCondition [ { OR andCondition } [...] ] -"," -Value or condition. -"," -ID=1 OR NAME='Hi' -" - -"Other Grammar","Factor"," -term [ { { * | / | % } term } [...] ] -"," -A value or a numeric factor. -"," -ID * 10 -" - -"Other Grammar","Hex"," -{ { digit | a-f | A-F } { digit | a-f | A-F } } [...] -"," -The hexadecimal representation of a number or of bytes. Two characters are one -byte. -"," -cafe -" - -"Other Grammar","Hex Number"," -[ + | - ] 0x hex -"," -A number written in hexadecimal notation. -"," -0xff -" - -"Other Grammar","Index Column"," -columnName [ ASC | DESC ] [ NULLS { FIRST | LAST } ] -"," -Indexes this column in ascending or descending order. Usually it is not required -to specify the order; however doing so will speed up large queries that order -the column in the same way. -"," -NAME -" - -"Other Grammar","Int"," -[ + | - ] number -"," -The maximum integer number is 2147483647, the minimum is -2147483648. -"," -10 -" - -"Other Grammar","Long"," -[ + | - ] number -"," -Long numbers are between -9223372036854775808 and 9223372036854775807. -"," -100000 -" - -"Other Grammar","Name"," -{ { A-Z|_ } [ { A-Z|_|0-9 } [...] ] } | quotedName -"," -Names are not case sensitive. There is no maximum name length. -"," -TEST -" - -"Other Grammar","Null"," -NULL -"," -NULL is a value without data type and means 'unknown value'. -"," -NULL -" - -"Other Grammar","Number"," -digit [...] -"," -The maximum length of the number depends on the data type used. -"," -100 -" - -"Other Grammar","Numeric"," -decimal | int | long | hexNumber -"," -The data type of a numeric value is always the lowest possible for the given value. -If the number contains a dot this is decimal; otherwise it is int, long, or decimal (depending on the value). -"," -SELECT -1600.05 -SELECT CAST(0 AS DOUBLE) -SELECT -1.4e-10 -" - -"Other Grammar","Operand"," -summand [ { || summand } [...] ] -"," -A value or a concatenation of values. -In the default mode, the result is NULL if either parameter is NULL. -"," -'Hi' || ' Eva' -" - -"Other Grammar","Order"," -{ int | expression } [ ASC | DESC ] [ NULLS { FIRST | LAST } ] -"," -Sorts the result by the given column number, or by an expression. If the -expression is a single parameter, then the value is interpreted as a column -number. Negative column numbers reverse the sort order. -"," -NAME DESC NULLS LAST -" - -"Other Grammar","Quoted Name"," -""anythingExceptDoubleQuote"" -"," -Quoted names are case sensitive, and can contain spaces. There is no maximum -name length. Two double quotes can be used to create a single double quote -inside an identifier. -"," -""FirstName"" -" - -"Other Grammar","Referential Constraint"," -FOREIGN KEY ( columnName [,...] ) -REFERENCES [ refTableName ] [ ( refColumnName [,...] ) ] -[ ON DELETE referentialAction ] [ ON UPDATE referentialAction ] -"," -Defines a referential constraint. -If the table name is not specified, then the same table is referenced. -RESTRICT is the default action. -If the referenced columns are not specified, then the primary key columns are used. -The required indexes are automatically created if required. -Some tables may not be referenced, such as metadata tables. -"," -FOREIGN KEY(ID) REFERENCES TEST(ID) -" - -"Other Grammar","Referential Action"," -CASCADE | RESTRICT | NO ACTION | SET { DEFAULT | NULL } -"," -The action CASCADE will cause conflicting rows in the referencing (child) table to be deleted or updated. -RESTRICT is the default action. -As this database does not support deferred checking, RESTRICT and NO ACTION will both throw an exception if the constraint is violated. -The action SET DEFAULT will set the column in the referencing (child) table to the default value, while SET NULL will set it to NULL. -"," -FOREIGN KEY(ID) REFERENCES TEST(ID) ON UPDATE CASCADE -" - -"Other Grammar","Script Compression Encryption"," -[ COMPRESSION { DEFLATE | LZF | ZIP | GZIP } ] [ CIPHER cipher PASSWORD string ] -"," -The compression and encryption algorithm to use for script files. -When using encryption, only DEFLATE and LZF are supported. -LZF is faster but uses more space. -"," -COMPRESSION LZF -" - -"Other Grammar","Select Expression"," -* | expression [ [ AS ] columnAlias ] | tableAlias.* -"," -An expression in a SELECT statement. -"," -ID AS VALUE -" - -"Other Grammar","String"," -'anythingExceptSingleQuote' -"," -A string starts and ends with a single quote. Two single quotes can be used to -create a single quote inside a string. -"," -'John''s car' -" - -"Other Grammar","Summand"," -factor [ { { + | - } factor } [...] ] -"," -A value or a numeric sum. - -Please note the text concatenation operator is ""||"". -"," -ID + 20 -" - -"Other Grammar","Table Expression"," -{ [ schemaName. ] tableName | ( select ) | valuesExpression } [ [ AS ] newTableAlias ] -[ { { LEFT | RIGHT } [ OUTER ] | [ INNER ] | CROSS | NATURAL } - JOIN tableExpression [ ON expression ] ] -"," -Joins a table. The join expression is not supported for cross and natural joins. -A natural join is an inner join, where the condition is automatically on the -columns with the same name. -"," -TEST AS T LEFT JOIN TEST AS T1 ON T.ID = T1.ID -" - -"Other Grammar","Values Expression"," -VALUES { ( expression [,...] ) } [,...] -"," -A list of rows that can be used like a table. -The column list of the resulting table is C1, C2, and so on. -"," -SELECT * FROM (VALUES(1, 'Hello'), (2, 'World')) AS V; -" - -"Other Grammar","Term"," -value - | columnName - | ?[ int ] - | NEXT VALUE FOR sequenceName - | function - | { - | + } term - | ( expression ) - | select - | case - | caseWhen - | tableAlias.columnName -"," -A value. Parameters can be indexed, for example ""?1"" meaning the first parameter. -Each table has a pseudo-column named ""_ROWID_"" that contains the unique row identifier. -"," -'Hello' -" - -"Other Grammar","Time"," -TIME 'hh:mm:ss' -"," -A time literal. A value is between plus and minus 2 million hours -and has nanosecond resolution. -"," -TIME '23:59:59' -" - -"Other Grammar","Timestamp"," -TIMESTAMP 'yyyy-MM-dd hh:mm:ss[.nnnnnnnnn]' -"," -A timestamp literal. The limitations are the same as for the Java data type -""java.sql.Timestamp"", but for compatibility with other databases the suggested -minimum and maximum years are 0001 and 9999. -"," -TIMESTAMP '2005-12-31 23:59:59' -" - -"Other Grammar","Value"," -string | dollarQuotedString | numeric | date | time | timestamp | boolean | bytes | array | null -"," -A literal value of any data type, or null. -"," -10 -" - -"Data Types","INT Type"," -INT | INTEGER | MEDIUMINT | INT4 | SIGNED -"," -Possible values: -2147483648 to 2147483647. - -Mapped to ""java.lang.Integer"". -"," -INT -" - -"Data Types","BOOLEAN Type"," -BOOLEAN | BIT | BOOL -"," -Possible values: TRUE and FALSE. - -Mapped to ""java.lang.Boolean"". -"," -BOOLEAN -" - -"Data Types","TINYINT Type"," -TINYINT -"," -Possible values are: -128 to 127. - -Mapped to ""java.lang.Byte"". -"," -TINYINT -" - -"Data Types","SMALLINT Type"," -SMALLINT | INT2 | YEAR -"," -Possible values: -32768 to 32767. - -Mapped to ""java.lang.Short"". -"," -SMALLINT -" - -"Data Types","BIGINT Type"," -BIGINT | INT8 -"," -Possible values: -9223372036854775808 to 9223372036854775807. - -Mapped to ""java.lang.Long"". -"," -BIGINT -" - -"Data Types","IDENTITY Type"," -IDENTITY -"," -Auto-Increment value. Possible values: -9223372036854775808 to -9223372036854775807. Used values are never re-used, even when the transaction is -rolled back. - -Mapped to ""java.lang.Long"". -"," -IDENTITY -" - -"Data Types","DECIMAL Type"," -{ DECIMAL | NUMBER | DEC | NUMERIC } ( precisionInt [ , scaleInt ] ) -"," -Data type with fixed precision and scale. This data type is recommended for -storing currency values. - -Mapped to ""java.math.BigDecimal"". -"," -DECIMAL(20, 2) -" - -"Data Types","DOUBLE Type"," -{ DOUBLE [ PRECISION ] | FLOAT | FLOAT8 } -"," -A floating point number. Should not be used to represent currency values, because -of rounding problems. - -Mapped to ""java.lang.Double"". -"," -DOUBLE -" - -"Data Types","REAL Type"," -{ REAL | FLOAT4 } -"," -A single precision floating point number. Should not be used to represent currency -values, because of rounding problems. - -Mapped to ""java.lang.Float"". -"," -REAL -" - -"Data Types","TIME Type"," -TIME -"," -The time data type. The format is hh:mm:ss. - -Mapped to ""java.sql.Time"". When converted to a ""java.sql.Date"", the date is set to ""1970-01-01"". -"," -TIME -" - -"Data Types","DATE Type"," -DATE -"," -The date data type. The format is yyyy-MM-dd. - -Mapped to ""java.sql.Date"", with the time set to ""00:00:00"" -(or to the next possible time if midnight doesn't exist for the given date and timezone due to a daylight saving change). -"," -DATE -" - -"Data Types","TIMESTAMP Type"," -{ TIMESTAMP | DATETIME | SMALLDATETIME } -"," -The timestamp data type. The format is yyyy-MM-dd hh:mm:ss[.nnnnnnnnn]. - -Mapped to ""java.sql.Timestamp"" (""java.util.Date"" is also supported). -"," -TIMESTAMP -" - -"Data Types","BINARY Type"," -{ BINARY | VARBINARY | LONGVARBINARY | RAW | BYTEA } [ ( precisionInt ) ] -"," -Represents a byte array. For very long arrays, use BLOB. -The maximum size is 2 GB, but the whole object is kept in -memory when using this data type. The precision is a size constraint; -only the actual data is persisted. For large text data BLOB or CLOB -should be used. - -Mapped to byte[]. -"," -BINARY(1000) -" - -"Data Types","OTHER Type"," -OTHER -"," -This type allows storing serialized Java objects. Internally, a byte array is used. -Serialization and deserialization is done on the client side only. -Deserialization is only done when ""getObject"" is called. -Java operations cannot be executed inside the database engine for security reasons. -Use ""PreparedStatement.setObject"" to store values. - -Mapped to ""java.lang.Object"" (or any subclass). -"," -OTHER -" - -"Data Types","VARCHAR Type"," -{ VARCHAR | LONGVARCHAR | VARCHAR2 | NVARCHAR - | NVARCHAR2 | VARCHAR_CASESENSITIVE} [ ( precisionInt ) ] -"," -A Unicode String. -Use two single quotes ('') to create a quote. - -The maximum precision is ""Integer.MAX_VALUE"". -The precision is a size constraint; only the actual data is persisted. - -The whole text is loaded into memory when using this data type. -For large text data CLOB should be used; see there for details. - -Mapped to ""java.lang.String"". -"," -VARCHAR(255) -" - -"Data Types","VARCHAR_IGNORECASE Type"," -VARCHAR_IGNORECASE [ ( precisionInt ) ] -"," -Same as VARCHAR, but not case sensitive when comparing. -Stored in mixed case. - -The maximum precision is ""Integer.MAX_VALUE"". -The precision is a size constraint; only the actual data is persisted. - -The whole text is loaded into memory when using this data type. -For large text data CLOB should be used; see there for details. - -Mapped to ""java.lang.String"". -"," -VARCHAR_IGNORECASE -" - -"Data Types","CHAR Type"," -{ CHAR | CHARACTER | NCHAR } [ ( precisionInt ) ] -"," -A Unicode String. -This type is supported for compatibility with other databases and older applications. -The difference to VARCHAR is that trailing spaces are ignored and not persisted. - -The maximum precision is ""Integer.MAX_VALUE"". -The precision is a size constraint; only the actual data is persisted. - -The whole text is kept in memory when using this data type. -For large text data CLOB should be used; see there for details. - -Mapped to ""java.lang.String"". -"," -CHAR(10) -" - -"Data Types","BLOB Type"," -{ BLOB | TINYBLOB | MEDIUMBLOB | LONGBLOB | IMAGE | OID } [ ( precisionInt ) ] -"," -Like BINARY, but intended for very large values such as files or images. Unlike -when using BINARY, large objects are not kept fully in-memory. Use -""PreparedStatement.setBinaryStream"" to store values. See also CLOB and -Advanced / Large Objects. - -Mapped to ""java.sql.Blob"" (""java.io.InputStream"" is also supported). -"," -BLOB -" - -"Data Types","CLOB Type"," -{ CLOB | TINYTEXT | TEXT | MEDIUMTEXT | LONGTEXT | NTEXT | NCLOB } [ ( precisionInt ) ] -"," -CLOB is like VARCHAR, but intended for very large values. Unlike when using -VARCHAR, large CLOB objects are not kept fully in-memory; instead, they are streamed. -CLOB should be used for documents and texts with arbitrary size such as XML or -HTML documents, text files, or memo fields of unlimited size. Use -""PreparedStatement.setCharacterStream"" to store values. See also Advanced / Large Objects. - -VARCHAR should be used for text with relatively short average size (for example -shorter than 200 characters). Short CLOB values are stored inline, but there is -an overhead compared to VARCHAR. - -Mapped to ""java.sql.Clob"" (""java.io.Reader"" is also supported). -"," -CLOB -" - -"Data Types","UUID Type"," -UUID -"," -Universally unique identifier. This is a 128 bit value. -To store values, use ""PreparedStatement.setBytes"", -""setString"", or ""setObject(uuid)"" (where ""uuid"" is a ""java.util.UUID""). -""ResultSet.getObject"" will return a ""java.util.UUID"". - -Please note that using an index on randomly generated data will -result on poor performance once there are millions of rows in a table. -The reason is that the cache behavior is very bad with randomly distributed data. -This is a problem for any database system. - -For details, see the documentation of ""java.util.UUID"". -"," -UUID -" - -"Data Types","ARRAY Type"," -ARRAY -"," -An array of values. -Mapped to ""java.lang.Object[]"" (arrays of any non-primitive type are also supported). - - -Use a value list (1, 2) or ""PreparedStatement.setObject(.., new Object[] {..})"" to store values, -and ""ResultSet.getObject(..)"" or ""ResultSet.getArray(..)"" to retrieve the values. -"," -ARRAY -" - -"Data Types","GEOMETRY Type"," -GEOMETRY -"," -A spatial geometry type, based on the ""com.vividsolutions.jts"" library. -Normally represented in textual format using the WKT (well known text) format. - -Use a quoted string containing a WKT formatted string or ""PreparedStatement.setObject()"" to store values, -and ""ResultSet.getObject(..)"" or ""ResultSet.getString(..)"" to retrieve the values. -"," -GEOMETRY -" - -"Functions (Aggregate)","AVG"," -AVG ( [ DISTINCT ] { numeric } ) -"," -The average (mean) value. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -The returned value is of the same data type as the parameter. -"," -AVG(X) -" - -"Functions (Aggregate)","BIT_AND"," -BIT_AND(expression) -"," -The bitwise AND of all non-null values. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -BIT_AND(ID) -" - -"Functions (Aggregate)","BIT_OR"," -BIT_OR(expression) -"," -The bitwise OR of all non-null values. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -BIT_OR(ID) -" - -"Functions (Aggregate)","BOOL_AND"," -BOOL_AND(boolean) -"," -Returns true if all expressions are true. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -BOOL_AND(ID>10) -" - -"Functions (Aggregate)","BOOL_OR"," -BOOL_OR(boolean) -"," -Returns true if any expression is true. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -BOOL_OR(NAME LIKE 'W%') -" - -"Functions (Aggregate)","COUNT"," -COUNT( { * | { [ DISTINCT ] expression } } ) -"," -The count of all row, or of the non-null values. -This method returns a long. -If no rows are selected, the result is 0. -Aggregates are only allowed in select statements. -"," -COUNT(*) -" - -"Functions (Aggregate)","GROUP_CONCAT"," -GROUP_CONCAT ( [ DISTINCT ] string -[ ORDER BY { expression [ ASC | DESC ] } [,...] ] -[ SEPARATOR expression ] ) -"," -Concatenates strings with a separator. -The default separator is a ',' (without space). -This method returns a string. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -GROUP_CONCAT(NAME ORDER BY ID SEPARATOR ', ') -" - -"Functions (Aggregate)","MAX"," -MAX(value) -"," -The highest value. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -The returned value is of the same data type as the parameter. -"," -MAX(NAME) -" - -"Functions (Aggregate)","MIN"," -MIN(value) -"," -The lowest value. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -The returned value is of the same data type as the parameter. -"," -MIN(NAME) -" - -"Functions (Aggregate)","SUM"," -SUM( [ DISTINCT ] { numeric } ) -"," -The sum of all values. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -The data type of the returned value depends on the parameter data type like this: -""BOOLEAN, TINYINT, SMALLINT, INT -> BIGINT, BIGINT -> DECIMAL, REAL -> DOUBLE"" -"," -SUM(X) -" - -"Functions (Aggregate)","SELECTIVITY"," -SELECTIVITY(value) -"," -Estimates the selectivity (0-100) of a value. -The value is defined as (100 * distinctCount / rowCount). -The selectivity of 0 rows is 0 (unknown). -Up to 10000 values are kept in memory. -Aggregates are only allowed in select statements. -"," -SELECT SELECTIVITY(FIRSTNAME), SELECTIVITY(NAME) FROM TEST WHERE ROWNUM()<20000 -" - -"Functions (Aggregate)","STDDEV_POP"," -STDDEV_POP( [ DISTINCT ] numeric ) -"," -The population standard deviation. -This method returns a double. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -STDDEV_POP(X) -" - -"Functions (Aggregate)","STDDEV_SAMP"," -STDDEV_SAMP( [ DISTINCT ] numeric ) -"," -The sample standard deviation. -This method returns a double. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -STDDEV(X) -" - -"Functions (Aggregate)","VAR_POP"," -VAR_POP( [ DISTINCT ] numeric ) -"," -The population variance (square of the population standard deviation). -This method returns a double. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -VAR_POP(X) -" - -"Functions (Aggregate)","VAR_SAMP"," -VAR_SAMP( [ DISTINCT ] numeric ) -"," -The sample variance (square of the sample standard deviation). -This method returns a double. -If no rows are selected, the result is NULL. -Aggregates are only allowed in select statements. -"," -VAR_SAMP(X) -" - -"Functions (Numeric)","ABS"," -ABS ( { numeric } ) -"," -See also Java ""Math.abs"". -Please note that ""Math.abs(Integer.MIN_VALUE) == Integer.MIN_VALUE"" and ""Math.abs(Long.MIN_VALUE) == Long.MIN_VALUE"". -The returned value is of the same data type as the parameter. -"," -ABS(ID) -" - -"Functions (Numeric)","ACOS"," -ACOS(numeric) -"," -Calculate the arc cosine. -See also Java ""Math.acos"". -This method returns a double. -"," -ACOS(D) -" - -"Functions (Numeric)","ASIN"," -ASIN(numeric) -"," -Calculate the arc sine. -See also Java ""Math.asin"". -This method returns a double. -"," -ASIN(D) -" - -"Functions (Numeric)","ATAN"," -ATAN(numeric) -"," -Calculate the arc tangent. -See also Java ""Math.atan"". -This method returns a double. -"," -ATAN(D) -" - -"Functions (Numeric)","COS"," -COS(numeric) -"," -Calculate the trigonometric cosine. -See also Java ""Math.cos"". -This method returns a double. -"," -COS(ANGLE) -" - -"Functions (Numeric)","COSH"," -COSH(numeric) -"," -Calculate the hyperbolic cosine. -See also Java ""Math.cosh"". -This method returns a double. -"," -COSH(X) -" - -"Functions (Numeric)","COT"," -COT(numeric) -"," -Calculate the trigonometric cotangent (""1/TAN(ANGLE)""). -See also Java ""Math.*"" functions. -This method returns a double. -"," -COT(ANGLE) -" - -"Functions (Numeric)","SIN"," -SIN(numeric) -"," -Calculate the trigonometric sine. -See also Java ""Math.sin"". -This method returns a double. -"," -SIN(ANGLE) -" - -"Functions (Numeric)","SINH"," -SINH(numeric) -"," -Calculate the hyperbolic sine. -See also Java ""Math.sinh"". -This method returns a double. -"," -SINH(ANGLE) -" - -"Functions (Numeric)","TAN"," -TAN(numeric) -"," -Calculate the trigonometric tangent. -See also Java ""Math.tan"". -This method returns a double. -"," -TAN(ANGLE) -" - -"Functions (Numeric)","TANH"," -TANH(numeric) -"," -Calculate the hyperbolic tangent. -See also Java ""Math.tanh"". -This method returns a double. -"," -TANH(X) -" - -"Functions (Numeric)","ATAN2"," -ATAN2(numeric, numeric) -"," -Calculate the angle when converting the rectangular coordinates to polar coordinates. -See also Java ""Math.atan2"". -This method returns a double. -"," -ATAN2(X, Y) -" - -"Functions (Numeric)","BITAND"," -BITAND(long, long) -"," -The bitwise AND operation. -This method returns a long. -See also Java operator &. -"," -BITAND(A, B) -" - -"Functions (Numeric)","BITOR"," -BITOR(long, long) -"," -The bitwise OR operation. -This method returns a long. -See also Java operator |. -"," -BITOR(A, B) -" - -"Functions (Numeric)","BITXOR"," -BITXOR(long, long) -"," -The bitwise XOR operation. -This method returns a long. -See also Java operator ^. -"," -BITXOR(A, B) -" - -"Functions (Numeric)","MOD"," -MOD(long, long) -"," -The modulo operation. -This method returns a long. -See also Java operator %. -"," -MOD(A, B) -" - -"Functions (Numeric)","CEILING"," -{ CEILING | CEIL } (numeric) -"," -See also Java ""Math.ceil"". -This method returns a double. -"," -CEIL(A) -" - -"Functions (Numeric)","DEGREES"," -DEGREES(numeric) -"," -See also Java ""Math.toDegrees"". -This method returns a double. -"," -DEGREES(A) -" - -"Functions (Numeric)","EXP"," -EXP(numeric) -"," -See also Java ""Math.exp"". -This method returns a double. -"," -EXP(A) -" - -"Functions (Numeric)","FLOOR"," -FLOOR(numeric) -"," -See also Java ""Math.floor"". -This method returns a double. -"," -FLOOR(A) -" - -"Functions (Numeric)","LOG"," -{ LOG | LN } (numeric) -"," -See also Java ""Math.log"". -In the PostgreSQL mode, LOG(x) is base 10. -This method returns a double. -"," -LOG(A) -" - -"Functions (Numeric)","LOG10"," -LOG10(numeric) -"," -See also Java ""Math.log10"" (in Java 5). -This method returns a double. -"," -LOG10(A) -" - -"Functions (Numeric)","RADIANS"," -RADIANS(numeric) -"," -See also Java ""Math.toRadians"". -This method returns a double. -"," -RADIANS(A) -" - -"Functions (Numeric)","SQRT"," -SQRT(numeric) -"," -See also Java ""Math.sqrt"". -This method returns a double. -"," -SQRT(A) -" - -"Functions (Numeric)","PI"," -PI() -"," -See also Java ""Math.PI"". -This method returns a double. -"," -PI() -" - -"Functions (Numeric)","POWER"," -POWER(numeric, numeric) -"," -See also Java ""Math.pow"". -This method returns a double. -"," -POWER(A, B) -" - -"Functions (Numeric)","RAND"," -{ RAND | RANDOM } ( [ int ] ) -"," -Calling the function without parameter returns the next a pseudo random number. -Calling it with an parameter seeds the session's random number generator. -This method returns a double between 0 (including) and 1 (excluding). -"," -RAND() -" - -"Functions (Numeric)","RANDOM_UUID"," -RANDOM_UUID() -"," -Returns a new UUID with 122 pseudo random bits. - -Please note that using an index on randomly generated data will -result on poor performance once there are millions of rows in a table. -The reason is that the cache behavior is very bad with randomly distributed data. -This is a problem for any database system. -"," -RANDOM_UUID() -" - -"Functions (Numeric)","ROUND"," -ROUND(numeric [, digitsInt]) -"," -Rounds to a number of digits, or to the nearest long if the number of digits if not set. -This method returns a numeric (the same type as the input). -"," -ROUND(VALUE, 2) -" - -"Functions (Numeric)","ROUNDMAGIC"," -ROUNDMAGIC(numeric) -"," -This function rounds numbers in a good way, but it is slow. -It has a special handling for numbers around 0. -Only numbers smaller or equal +/-1000000000000 are supported. -The value is converted to a String internally, and then the last last 4 characters are checked. -'000x' becomes '0000' and '999x' becomes '999999', which is rounded automatically. -This method returns a double. -"," -ROUNDMAGIC(VALUE/3*3) -" - -"Functions (Numeric)","SECURE_RAND"," -SECURE_RAND(int) -"," -Generates a number of cryptographically secure random numbers. -This method returns bytes. -"," -CALL SECURE_RAND(16) -" - -"Functions (Numeric)","SIGN"," -SIGN ( { numeric } ) -"," -Returns -1 if the value is smaller 0, 0 if zero, and otherwise 1. -"," -SIGN(VALUE) -" - -"Functions (Numeric)","ENCRYPT"," -ENCRYPT(algorithmString, keyBytes, dataBytes) -"," -Encrypts data using a key. -The supported algorithm is AES. -The block size is 16 bytes. -This method returns bytes. -"," -CALL ENCRYPT('AES', '00', STRINGTOUTF8('Test')) -" - -"Functions (Numeric)","DECRYPT"," -DECRYPT(algorithmString, keyBytes, dataBytes) -"," -Decrypts data using a key. -The supported algorithm is AES. -The block size is 16 bytes. -This method returns bytes. -"," -CALL TRIM(CHAR(0) FROM UTF8TOSTRING( - DECRYPT('AES', '00', '3fabb4de8f1ee2e97d7793bab2db1116'))) -" - -"Functions (Numeric)","HASH"," -HASH(algorithmString, dataBytes, iterationInt) -"," -Calculate the hash value using an algorithm, and repeat this process for a number of iterations. -Currently, the only algorithm supported is SHA256. -This method returns bytes. -"," -CALL HASH('SHA256', STRINGTOUTF8('Password'), 1000) -" - -"Functions (Numeric)","TRUNCATE"," -{ TRUNC | TRUNCATE } ( { {numeric, digitsInt} | timestamp } ) -"," -Truncates to a number of digits (to the next value closer to 0). -This method returns a double. -When used with a timestamp, truncates a timestamp to a date (day) value. -"," -TRUNCATE(VALUE, 2) -" - -"Functions (Numeric)","COMPRESS"," -COMPRESS(dataBytes [, algorithmString]) -"," -Compresses the data using the specified compression algorithm. -Supported algorithms are: LZF (faster but lower compression; default), and DEFLATE (higher compression). -Compression does not always reduce size. Very small objects and objects with little redundancy may get larger. -This method returns bytes. -"," -COMPRESS(STRINGTOUTF8('Test')) -" - -"Functions (Numeric)","EXPAND"," -EXPAND(bytes) -"," -Expands data that was compressed using the COMPRESS function. -This method returns bytes. -"," -UTF8TOSTRING(EXPAND(COMPRESS(STRINGTOUTF8('Test')))) -" - -"Functions (Numeric)","ZERO"," -ZERO() -"," -Returns the value 0. This function can be used even if numeric literals are disabled. -"," -ZERO() -" - -"Functions (String)","ASCII"," -ASCII(string) -"," -Returns the ASCII value of the first character in the string. -This method returns an int. -"," -ASCII('Hi') -" -"Functions (String)","BIT_LENGTH"," -BIT_LENGTH(string) -"," -Returns the number of bits in a string. -This method returns a long. -For BLOB, CLOB, BYTES and JAVA_OBJECT, the precision is used. Each character needs 16 bits. -"," -BIT_LENGTH(NAME) -" - -"Functions (String)","LENGTH"," -{ LENGTH | CHAR_LENGTH | CHARACTER_LENGTH } ( string ) -"," -Returns the number of characters in a string. -This method returns a long. -For BLOB, CLOB, BYTES and JAVA_OBJECT, the precision is used. -"," -LENGTH(NAME) -" - -"Functions (String)","OCTET_LENGTH"," -OCTET_LENGTH(string) -"," -Returns the number of bytes in a string. -This method returns a long. -For BLOB, CLOB, BYTES and JAVA_OBJECT, the precision is used. -Each character needs 2 bytes. -"," -OCTET_LENGTH(NAME) -" - -"Functions (String)","CHAR"," -{ CHAR | CHR } ( int ) -"," -Returns the character that represents the ASCII value. -This method returns a string. -"," -CHAR(65) -" - -"Functions (String)","CONCAT"," -CONCAT(string, string [,...]) -"," -Combines strings. -Unlike with the operator ""||"", NULL parameters are ignored, -and do not cause the result to become NULL. -This method returns a string. -"," -CONCAT(NAME, '!') -" - -"Functions (String)","CONCAT_WS"," -CONCAT_WS(separatorString, string, string [,...]) -"," -Combines strings with separator. -Unlike with the operator ""||"", NULL parameters are ignored, -and do not cause the result to become NULL. -This method returns a string. -"," -CONCAT_WS(',', NAME, '!') -" - -"Functions (String)","DIFFERENCE"," -DIFFERENCE(string, string) -"," -Returns the difference between the sounds of two strings. -This method returns an int. -"," -DIFFERENCE(T1.NAME, T2.NAME) -" - -"Functions (String)","HEXTORAW"," -HEXTORAW(string) -"," -Converts a hex representation of a string to a string. -4 hex characters per string character are used. -"," -HEXTORAW(DATA) -" - -"Functions (String)","RAWTOHEX"," -RAWTOHEX(string) -"," -Converts a string to the hex representation. -4 hex characters per string character are used. -This method returns a string. -"," -RAWTOHEX(DATA) -" - -"Functions (String)","INSTR"," -INSTR(string, searchString, [, startInt]) -"," -Returns the location of a search string in a string. -If a start position is used, the characters before it are ignored. -If position is negative, the rightmost location is returned. -0 is returned if the search string is not found. -Please note this function is case sensitive, even if the parameters are not. -"," -INSTR(EMAIL,'@') -" - -"Functions (String)","INSERT Function"," -INSERT(originalString, startInt, lengthInt, addString) -"," -Inserts a additional string into the original string at a specified start position. -The length specifies the number of characters that are removed at the start position in the original string. -This method returns a string. -"," -INSERT(NAME, 1, 1, ' ') -" - -"Functions (String)","LOWER"," -{ LOWER | LCASE } ( string ) -"," -Converts a string to lowercase. -"," -LOWER(NAME) -" - -"Functions (String)","UPPER"," -{ UPPER | UCASE } ( string ) -"," -Converts a string to uppercase. -"," -UPPER(NAME) -" - -"Functions (String)","LEFT"," -LEFT(string, int) -"," -Returns the leftmost number of characters. -"," -LEFT(NAME, 3) -" - -"Functions (String)","RIGHT"," -RIGHT(string, int) -"," -Returns the rightmost number of characters. -"," -RIGHT(NAME, 3) -" - -"Functions (String)","LOCATE"," -LOCATE(searchString, string [, startInt]) -"," -Returns the location of a search string in a string. -If a start position is used, the characters before it are ignored. -If position is negative, the rightmost location is returned. -0 is returned if the search string is not found. -"," -LOCATE('.', NAME) -" - -"Functions (String)","POSITION"," -POSITION(searchString, string) -"," -Returns the location of a search string in a string. See also LOCATE. -"," -POSITION('.', NAME) -" - -"Functions (String)","LPAD"," -LPAD(string, int[, paddingString]) -"," -Left pad the string to the specified length. -If the length is shorter than the string, it will be truncated at the end. -If the padding string is not set, spaces will be used. -"," -LPAD(AMOUNT, 10, '*') -" - -"Functions (String)","RPAD"," -RPAD(string, int[, paddingString]) -"," -Right pad the string to the specified length. -If the length is shorter than the string, it will be truncated. -If the padding string is not set, spaces will be used. -"," -RPAD(TEXT, 10, '-') -" - -"Functions (String)","LTRIM"," -LTRIM(string) -"," -Removes all leading spaces from a string. -"," -LTRIM(NAME) -" - -"Functions (String)","RTRIM"," -RTRIM(string) -"," -Removes all trailing spaces from a string. -"," -RTRIM(NAME) -" - -"Functions (String)","TRIM"," -TRIM ( [ { LEADING | TRAILING | BOTH } [ string ] FROM ] string ) -"," -Removes all leading spaces, trailing spaces, or spaces at both ends, from a string. -Other characters can be removed as well. -"," -TRIM(BOTH '_' FROM NAME) -" - -"Functions (String)","REGEXP_REPLACE"," -REGEXP_REPLACE(inputString, regexString, replacementString) -"," -Replaces each substring that matches a regular expression. -For details, see the Java ""String.replaceAll()"" method. -If any parameter is null, the result is null. -"," -REGEXP_REPLACE('Hello World', ' +', ' ') -" - -"Functions (String)","REPEAT"," -REPEAT(string, int) -"," -Returns a string repeated some number of times. -"," -REPEAT(NAME || ' ', 10) -" - -"Functions (String)","REPLACE"," -REPLACE(string, searchString [, replacementString]) -"," -Replaces all occurrences of a search string in a text with another string. -If no replacement is specified, the search string is removed from the original string. -If any parameter is null, the result is null. -"," -REPLACE(NAME, ' ') -" - -"Functions (String)","SOUNDEX"," -SOUNDEX(string) -"," -Returns a four character code representing the sound of a string. -See also http://www.archives.gov/genealogy/census/soundex.html . -This method returns a string. -"," -SOUNDEX(NAME) -" - -"Functions (String)","SPACE"," -SPACE(int) -"," -Returns a string consisting of a number of spaces. -"," -SPACE(80) -" - -"Functions (String)","STRINGDECODE"," -STRINGDECODE(string) -"," -Converts a encoded string using the Java string literal encoding format. -Special characters are \b, \t, \n, \f, \r, \"", \\, \, \u. -This method returns a string. -"," -CALL STRINGENCODE(STRINGDECODE('Lines 1\nLine 2')) -" - -"Functions (String)","STRINGENCODE"," -STRINGENCODE(string) -"," -Encodes special characters in a string using the Java string literal encoding format. -Special characters are \b, \t, \n, \f, \r, \"", \\, \, \u. -This method returns a string. -"," -CALL STRINGENCODE(STRINGDECODE('Lines 1\nLine 2')) -" - -"Functions (String)","STRINGTOUTF8"," -STRINGTOUTF8(string) -"," -Encodes a string to a byte array using the UTF8 encoding format. -This method returns bytes. -"," -CALL UTF8TOSTRING(STRINGTOUTF8('This is a test')) -" - -"Functions (String)","SUBSTRING"," -{ SUBSTRING | SUBSTR } ( string, startInt [, lengthInt ] ) -"," -Returns a substring of a string starting at a position. -If the start index is negative, then the start index is relative to the end of the string. -The length is optional. -Also supported is: ""SUBSTRING(string [FROM start] [FOR length])"". -"," -CALL SUBSTR('[Hello]', 2, 5); -CALL SUBSTR('Hello World', -5); -" - -"Functions (String)","UTF8TOSTRING"," -UTF8TOSTRING(bytes) -"," -Decodes a byte array in the UTF8 format to a string. -"," -CALL UTF8TOSTRING(STRINGTOUTF8('This is a test')) -" - -"Functions (String)","XMLATTR"," -XMLATTR(nameString, valueString) -"," -Creates an XML attribute element of the form ""name=value"". -The value is encoded as XML text. -This method returns a string. -"," -CALL XMLNODE('a', XMLATTR('href', 'http://h2database.com')) -" - -"Functions (String)","XMLNODE"," -XMLNODE(elementString [, attributesString [, contentString [, indentBoolean]]]) -"," -Create an XML node element. -An empty or null attribute string means no attributes are set. -An empty or null content string means the node is empty. -The content is indented by default if it contains a newline. -This method returns a string. -"," -CALL XMLNODE('a', XMLATTR('href', 'http://h2database.com'), 'H2') -" - -"Functions (String)","XMLCOMMENT"," -XMLCOMMENT(commentString) -"," -Creates an XML comment. -Two dashes (""--"") are converted to ""- -"". -This method returns a string. -"," -CALL XMLCOMMENT('Test') -" - -"Functions (String)","XMLCDATA"," -XMLCDATA(valueString) -"," -Creates an XML CDATA element. -If the value contains ""]]>"", an XML text element is created instead. -This method returns a string. -"," -CALL XMLCDATA('data') -" - -"Functions (String)","XMLSTARTDOC"," -XMLSTARTDOC() -"," -Returns the XML declaration. -The result is always """". -"," -CALL XMLSTARTDOC() -" - -"Functions (String)","XMLTEXT"," -XMLTEXT(valueString [, escapeNewlineBoolean]) -"," -Creates an XML text element. -If enabled, newline and linefeed is converted to an XML entity (&#). -This method returns a string. -"," -CALL XMLTEXT('test') -" - -"Functions (String)","TO_CHAR"," -TO_CHAR(value [, formatString[, nlsParamString]]) -"," -Oracle-compatible TO_CHAR function that can format a timestamp, a number, or text. -"," -CALL TO_CHAR(TIMESTAMP '2010-01-01 00:00:00', 'DD MON, YYYY') -" - -"Functions (String)","TRANSLATE"," -TRANSLATE(value , searchString, replacementString]]) -"," -Oracle-compatible TRANSLATE function that replaces a sequence of characters in a string with another set of characters. -"," -CALL TRANSLATE('Hello world', 'eo', 'EO') -" - -"Functions (Time and Date)","CURRENT_DATE"," -{ CURRENT_DATE [ () ] | CURDATE() | SYSDATE | TODAY } -"," -Returns the current date. -This method always returns the same value within a transaction. -"," -CURRENT_DATE() -" - -"Functions (Time and Date)","CURRENT_TIME"," -{ CURRENT_TIME [ () ] | CURTIME() } -"," -Returns the current time. -This method always returns the same value within a transaction. -"," -CURRENT_TIME() -" - -"Functions (Time and Date)","CURRENT_TIMESTAMP"," -{ CURRENT_TIMESTAMP [ ( [ int ] ) ] | NOW( [ int ] ) } -"," -Returns the current timestamp. -The precision parameter for nanoseconds precision is optional. -This method always returns the same value within a transaction. -"," -CURRENT_TIMESTAMP() -" - -"Functions (Time and Date)","DATEADD"," -{ DATEADD| TIMESTAMPADD } (unitString, addIntLong, timestamp) -"," -Adds units to a timestamp. The string indicates the unit. -Use negative values to subtract units. -addIntLong may be a long value when manipulating milliseconds, -otherwise it's range is restricted to int. -The same units as in the EXTRACT function are supported. -This method returns a timestamp. -"," -DATEADD('MONTH', 1, DATE '2001-01-31') -" - -"Functions (Time and Date)","DATEDIFF"," -{ DATEDIFF | TIMESTAMPDIFF } (unitString, aTimestamp, bTimestamp) -"," -Returns the the number of crossed unit boundaries between two timestamps. -This method returns a long. -The string indicates the unit. -The same units as in the EXTRACT function are supported. -"," -DATEDIFF('YEAR', T1.CREATED, T2.CREATED) -" - -"Functions (Time and Date)","DAYNAME"," -DAYNAME(date) -"," -Returns the name of the day (in English). -"," -DAYNAME(CREATED) -" - -"Functions (Time and Date)","DAY_OF_MONTH"," -DAY_OF_MONTH(date) -"," -Returns the day of the month (1-31). -"," -DAY_OF_MONTH(CREATED) -" - -"Functions (Time and Date)","DAY_OF_WEEK"," -DAY_OF_WEEK(date) -"," -Returns the day of the week (1 means Sunday). -"," -DAY_OF_WEEK(CREATED) -" - -"Functions (Time and Date)","DAY_OF_YEAR"," -DAY_OF_YEAR(date) -"," -Returns the day of the year (1-366). -"," -DAY_OF_YEAR(CREATED) -" - -"Functions (Time and Date)","EXTRACT"," -EXTRACT ( { YEAR | YY | MONTH | MM | WEEK | DAY | DD | DAY_OF_YEAR - | DOY | HOUR | HH | MINUTE | MI | SECOND | SS | MILLISECOND | MS } - FROM timestamp ) -"," -Returns a specific value from a timestamps. -This method returns an int. -"," -EXTRACT(SECOND FROM CURRENT_TIMESTAMP) -" - -"Functions (Time and Date)","FORMATDATETIME"," -FORMATDATETIME ( timestamp, formatString -[ , localeString [ , timeZoneString ] ] ) -"," -Formats a date, time or timestamp as a string. -The most important format characters are: -y year, M month, d day, H hour, m minute, s second. -For details of the format, see ""java.text.SimpleDateFormat"". -This method returns a string. -"," -CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', - 'EEE, d MMM yyyy HH:mm:ss z', 'en', 'GMT') -" - -"Functions (Time and Date)","HOUR"," -HOUR(timestamp) -"," -Returns the hour (0-23) from a timestamp. -"," -HOUR(CREATED) -" - -"Functions (Time and Date)","MINUTE"," -MINUTE(timestamp) -"," -Returns the minute (0-59) from a timestamp. -"," -MINUTE(CREATED) -" - -"Functions (Time and Date)","MONTH"," -MONTH(timestamp) -"," -Returns the month (1-12) from a timestamp. -"," -MONTH(CREATED) -" - -"Functions (Time and Date)","MONTHNAME"," -MONTHNAME(date) -"," -Returns the name of the month (in English). -"," -MONTHNAME(CREATED) -" - -"Functions (Time and Date)","PARSEDATETIME"," -PARSEDATETIME(string, formatString -[, localeString [, timeZoneString]]) -"," -Parses a string and returns a timestamp. -The most important format characters are: -y year, M month, d day, H hour, m minute, s second. -For details of the format, see ""java.text.SimpleDateFormat"". -"," -CALL PARSEDATETIME('Sat, 3 Feb 2001 03:05:06 GMT', - 'EEE, d MMM yyyy HH:mm:ss z', 'en', 'GMT') -" - -"Functions (Time and Date)","QUARTER"," -QUARTER(timestamp) -"," -Returns the quarter (1-4) from a timestamp. -"," -QUARTER(CREATED) -" - -"Functions (Time and Date)","SECOND"," -SECOND(timestamp) -"," -Returns the second (0-59) from a timestamp. -"," -SECOND(CREATED) -" - -"Functions (Time and Date)","WEEK"," -WEEK(timestamp) -"," -Returns the week (1-53) from a timestamp. -This method uses the current system locale. -"," -WEEK(CREATED) -" - -"Functions (Time and Date)","YEAR"," -YEAR(timestamp) -"," -Returns the year from a timestamp. -"," -YEAR(CREATED) -" - -"Functions (System)","ARRAY_GET"," -ARRAY_GET(arrayExpression, indexExpression) -"," -Returns one element of an array. -This method returns a string. -"," -CALL ARRAY_GET(('Hello', 'World'), 2) -" - -"Functions (System)","ARRAY_LENGTH"," -ARRAY_LENGTH(arrayExpression) -"," -Returns the length of an array. -"," -CALL ARRAY_LENGTH(('Hello', 'World')) -" - -"Functions (System)","ARRAY_CONTAINS"," -ARRAY_CONTAINS(arrayExpression, value) -"," -Returns a boolean true if the array contains the value. -"," -CALL ARRAY_CONTAINS(('Hello', 'World'), 'Hello') -" - -"Functions (System)","AUTOCOMMIT"," -AUTOCOMMIT() -"," -Returns true if auto commit is switched on for this session. -"," -AUTOCOMMIT() -" - -"Functions (System)","CANCEL_SESSION"," -CANCEL_SESSION(sessionInt) -"," -Cancels the currently executing statement of another session. -The method only works if the multithreaded kernel is enabled (see SET MULTI_THREADED). -Returns true if the statement was canceled, false if the session is closed or no statement is currently executing. - -Admin rights are required to execute this command. -"," -CANCEL_SESSION(3) -" - -"Functions (System)","CASEWHEN Function"," -CASEWHEN(boolean, aValue, bValue) -"," -Returns 'a' if the boolean expression is true, otherwise 'b'. -Returns the same data type as the parameter. -"," -CASEWHEN(ID=1, 'A', 'B') -" - -"Functions (System)","CAST"," -CAST(value AS dataType) -"," -Converts a value to another data type. The following conversion rules are used: -When converting a number to a boolean, 0 is false and every other value is true. -When converting a boolean to a number, false is 0 and true is 1. -When converting a number to a number of another type, the value is checked for overflow. -When converting a number to binary, the number of bytes matches the precision. -When converting a string to binary, it is hex encoded (every byte two characters); -a hex string can be converted to a number by first converting it to binary. -If a direct conversion is not possible, the value is first converted to a string. -"," -CAST(NAME AS INT); -CAST(65535 AS BINARY); -CAST(CAST('FFFF' AS BINARY) AS INT); -" - -"Functions (System)","COALESCE"," -{ COALESCE | NVL } (aValue, bValue [,...]) -"," -Returns the first value that is not null. -"," -COALESCE(A, B, C) -" - -"Functions (System)","CONVERT"," -CONVERT(value, dataType) -"," -Converts a value to another data type. -"," -CONVERT(NAME, INT) -" - -"Functions (System)","CURRVAL"," -CURRVAL( [ schemaName, ] sequenceString ) -"," -Returns the current (last) value of the sequence, independent of the session. -If the sequence was just created, the method returns (start - interval). -If the schema name is not set, the current schema is used. -If the schema name is not set, the sequence name is converted to uppercase (for compatibility). -This method returns a long. -"," -CURRVAL('TEST_SEQ') -" - -"Functions (System)","CSVREAD"," -CSVREAD(fileNameString [, columnsString [, csvOptions ] ] ) -"," -Returns the result set of reading the CSV (comma separated values) file. -For each parameter, NULL means the default value should be used. - -If the column names are specified (a list of column names separated with the -fieldSeparator), those are used, otherwise (or if they are set to NULL) the first line of -the file is interpreted as the column names. -In that case, column names that contain no special characters (only letters, '_', -and digits; similar to the rule for Java identifiers) are considered case insensitive. -Other column names are case sensitive, that means you need to use quoted identifiers -(see below). - -The default charset is the default value for this system, and the default field separator -is a comma. Missing unquoted values as well as data that matches nullString is -parsed as NULL. All columns of type VARCHAR. - -The BOM (the byte-order-mark) character 0xfeff at the beginning of the file is ignored. - -This function can be used like a table: ""SELECT * FROM CSVREAD(...)"". - -Instead of a file, an URL may be used, for example -""jar:file:///c:/temp/example.zip!/org/example/nested.csv"". -To read a stream from the classpath, use the prefix ""classpath:"". -To read from HTTP, use the prefix ""http:"" (as in a browser). - -For performance reason, CSVREAD should not be used inside a join. -Instead, import the data first (possibly into a temporary table) and then use the table. - -Admin rights are required to execute this command. -"," -CALL CSVREAD('test.csv'); --- Read a file containing the columns ID, NAME with -CALL CSVREAD('test2.csv', 'ID|NAME', 'charset=UTF-8 fieldSeparator=|'); -SELECT * FROM CSVREAD('data/test.csv', null, 'rowSeparator=;'); --- Read a tab-separated file -SELECT * FROM CSVREAD('data/test.tsv', null, 'rowSeparator=' || CHAR(9)); -SELECT ""Last Name"" FROM CSVREAD('address.csv'); -SELECT ""Last Name"" FROM CSVREAD('classpath:/org/acme/data/address.csv'); -" - -"Functions (System)","CSVWRITE"," -CSVWRITE ( fileNameString, queryString [, csvOptions [, lineSepString] ] ) -"," -Writes a CSV (comma separated values). The file is overwritten if it exists. -If only a file name is specified, it will be written to the current working directory. -For each parameter, NULL means the default value should be used. -The default charset is the default value for this system, and the default field separator is a comma. - -The values are converted to text using the default string representation; -if another conversion is required you need to change the select statement accordingly. -The parameter nullString is used when writing NULL (by default nothing is written -when NULL appears). The default line separator is the default value for this -system (system property ""line.separator""). - -The returned value is the number or rows written. -Admin rights are required to execute this command. -"," -CALL CSVWRITE('data/test.csv', 'SELECT * FROM TEST'); -CALL CSVWRITE('data/test2.csv', 'SELECT * FROM TEST', 'charset=UTF-8 fieldSeparator=|'); --- Write a tab-separated file -CALL CSVWRITE('data/test.tsv', 'SELECT * FROM TEST', 'charset=UTF-8 fieldSeparator=' || CHAR(9)); -" - -"Functions (System)","DATABASE"," -DATABASE() -"," -Returns the name of the database. -"," -CALL DATABASE(); -" - -"Functions (System)","DATABASE_PATH"," -DATABASE_PATH() -"," -Returns the directory of the database files and the database name, if it is file based. -Returns NULL otherwise. -"," -CALL DATABASE_PATH(); -" - -"Functions (System)","DECODE"," -DECODE(value, whenValue, thenValue [,...]) -"," -Returns the first matching value. NULL is considered to match NULL. -If no match was found, then NULL or the last parameter (if the parameter count is even) is returned. -This function is provided for Oracle compatibility (see there for details). -"," -CALL DECODE(RAND()>0.5, 0, 'Red', 1, 'Black'); -" - -"Functions (System)","DISK_SPACE_USED"," -DISK_SPACE_USED(tableNameString) -"," -Returns the approximate amount of space used by the table specified. -Does not currently take into account indexes or LOB's. -This function may be expensive since it has to load every page in the table. -"," -CALL DISK_SPACE_USED('my_table'); -" - -"Functions (System)","FILE_READ"," -FILE_READ(fileNameString [,encodingString]) -"," -Returns the contents of a file. If only one parameter is supplied, the data are -returned as a BLOB. If two parameters are used, the data is returned as a CLOB -(text). The second parameter is the character set to use, NULL meaning the -default character set for this system. - -File names and URLs are supported. -To read a stream from the classpath, use the prefix ""classpath:"". - -Admin rights are required to execute this command. -"," -SELECT LENGTH(FILE_READ('~/.h2.server.properties')) LEN; -SELECT FILE_READ('http://localhost:8182/stylesheet.css', NULL) CSS; -" - -"Functions (System)","GREATEST"," -GREATEST(aValue, bValue [,...]) -"," -Returns the largest value that is not NULL, or NULL if all values are NULL. -"," -CALL GREATEST(1, 2, 3); -" - -"Functions (System)","IDENTITY"," -IDENTITY() -"," -Returns the last inserted identity value for this session. -This value changes whenever a new sequence number was generated, -even within a trigger or Java function. See also SCOPE_IDENTITY. -This method returns a long. -"," -CALL IDENTITY(); -" - -"Functions (System)","IFNULL"," -IFNULL(aValue, bValue) -"," -Returns the value of 'a' if it is not null, otherwise 'b'. -"," -CALL IFNULL(NULL, ''); -" - -"Functions (System)","LEAST"," -LEAST(aValue, bValue [,...]) -"," -Returns the smallest value that is not NULL, or NULL if all values are NULL. -"," -CALL LEAST(1, 2, 3); -" - -"Functions (System)","LOCK_MODE"," -LOCK_MODE() -"," -Returns the current lock mode. See SET LOCK_MODE. -This method returns an int. -"," -CALL LOCK_MODE(); -" - -"Functions (System)","LOCK_TIMEOUT"," -LOCK_TIMEOUT() -"," -Returns the lock timeout of the current session (in milliseconds). -"," -LOCK_TIMEOUT() -" - -"Functions (System)","LINK_SCHEMA"," -LINK_SCHEMA(targetSchemaString, driverString, urlString, -userString, passwordString, sourceSchemaString) -"," -Creates table links for all tables in a schema. -If tables with the same name already exist, they are dropped first. -The target schema is created automatically if it does not yet exist. -The driver name may be empty if the driver is already loaded. -The list of tables linked is returned in the form of a result set. -Admin rights are required to execute this command. -"," -CALL LINK_SCHEMA('TEST2', '', 'jdbc:h2:test2', 'sa', 'sa', 'PUBLIC'); -" - -"Functions (System)","MEMORY_FREE"," -MEMORY_FREE() -"," -Returns the free memory in KB (where 1024 bytes is a KB). -This method returns an int. -The garbage is run before returning the value. -Admin rights are required to execute this command. -"," -MEMORY_FREE() -" - -"Functions (System)","MEMORY_USED"," -MEMORY_USED() -"," -Returns the used memory in KB (where 1024 bytes is a KB). -This method returns an int. -The garbage is run before returning the value. -Admin rights are required to execute this command. -"," -MEMORY_USED() -" - -"Functions (System)","NEXTVAL"," -NEXTVAL ( [ schemaName, ] sequenceString ) -"," -Returns the next value of the sequence. -Used values are never re-used, even when the transaction is rolled back. -If the schema name is not set, the current schema is used, and the sequence name is converted to uppercase (for compatibility). -This method returns a long. -"," -NEXTVAL('TEST_SEQ') -" - -"Functions (System)","NULLIF"," -NULLIF(aValue, bValue) -"," -Returns NULL if 'a' is equals to 'b', otherwise 'a'. -"," -NULLIF(A, B) -" - -"Functions (System)","NVL2"," -NVL2(testValue, aValue, bValue) -"," -If the test value is null, then 'b' is returned. Otherwise, 'a' is returned. -The data type of the returned value is the data type of 'a' if this is a text type. -"," -NVL2(X, 'not null', 'null') -" - -"Functions (System)","READONLY"," -READONLY() -"," -Returns true if the database is read-only. -"," -READONLY() -" - -"Functions (System)","ROWNUM"," -{ ROWNUM() } | { ROW_NUMBER() OVER() } -"," -Returns the number of the current row. -This method returns a long. -It is supported for SELECT statements, as well as for DELETE and UPDATE. -The first row has the row number 1, and is calculated before ordering and grouping the result set, -but after evaluating index conditions (even when the index conditions are specified in an outer query). -To get the row number after ordering and grouping, use a subquery. -"," -SELECT ROWNUM(), * FROM TEST; -SELECT ROWNUM(), * FROM (SELECT * FROM TEST ORDER BY NAME); -SELECT ID FROM (SELECT T.*, ROWNUM AS R FROM TEST T) WHERE R BETWEEN 2 AND 3; -" - -"Functions (System)","SCHEMA"," -SCHEMA() -"," -Returns the name of the default schema for this session. -"," -CALL SCHEMA() -" - -"Functions (System)","SCOPE_IDENTITY"," -SCOPE_IDENTITY() -"," -Returns the last inserted identity value for this session for the current scope -(ie. the current statement). -Changes within triggers and Java functions are ignored. See also IDENTITY(). -This method returns a long. -"," -CALL SCOPE_IDENTITY(); -" - -"Functions (System)","SESSION_ID"," -SESSION_ID() -"," -Returns the unique session id number for the current database connection. -This id stays the same while the connection is open. -This method returns an int. -The database engine may re-use a session id after the connection is closed. -"," -CALL SESSION_ID() -" - -"Functions (System)","SET"," -SET(@variableName, value) -"," -Updates a variable with the given value. -The new value is returned. -When used in a query, the value is updated in the order the rows are read. -When used in a subquery, not all rows might be read depending on the query plan. -This can be used to implement running totals / cumulative sums. -"," -SELECT X, SET(@I, IFNULL(@I, 0)+X) RUNNING_TOTAL FROM SYSTEM_RANGE(1, 10) -" - -"Functions (System)","TABLE"," -{ TABLE | TABLE_DISTINCT } ( { name dataType = expression } [,...] ) -"," -Returns the result set. TABLE_DISTINCT removes duplicate rows. -"," -SELECT * FROM TABLE(ID INT=(1, 2), NAME VARCHAR=('Hello', 'World')) -" - -"Functions (System)","TRANSACTION_ID"," -TRANSACTION_ID() -"," -Returns the current transaction id for this session. -This method returns NULL if there is no uncommitted change, or if the the database is not persisted. -Otherwise a value of the following form is returned: -""logFileId-position-sessionId"". -This method returns a string. -The value is unique across database restarts (values are not re-used). -"," -CALL TRANSACTION_ID() -" - -"Functions (System)","TRUNCATE_VALUE"," -TRUNCATE_VALUE(value, precisionInt, forceBoolean) -"," -Truncate a value to the required precision. -The precision of the returned value may be a bit larger than requested, -because fixed precision values are not truncated (unlike the numeric TRUNCATE method). -Unlike CAST, the truncating a decimal value may lose precision if the force flag is set to true. -The method returns a value with the same data type as the first parameter. -"," -CALL TRUNCATE_VALUE(X, 10, TRUE); -" - -"Functions (System)","USER"," -{ USER | CURRENT_USER } () -"," -Returns the name of the current user of this session. -"," -CURRENT_USER() -" - -"Functions (System)","H2VERSION"," -H2VERSION() -"," -Returns the H2 version as a String. -"," -H2VERSION() -" - -"System Tables","Information Schema"," -INFORMATION_SCHEMA -"," -To get the list of system tables, execute the statement SELECT * FROM -INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'INFORMATION_SCHEMA' -"," - -" -"System Tables","Range Table"," -SYSTEM_RANGE(start, end) -"," -Contains all values from start to end (this is a dynamic table). -"," -SYSTEM_RANGE(0, 100) -" - diff --git a/h2/src/docsrc/help/information_schema.csv b/h2/src/docsrc/help/information_schema.csv new file mode 100644 index 0000000000..8008bb1e46 --- /dev/null +++ b/h2/src/docsrc/help/information_schema.csv @@ -0,0 +1,1022 @@ +# Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +# and the EPL 1.0 (https://h2database.com/html/license.html). +# Initial Developer: H2 Group + +"TABLE_NAME","COLUMN_NAME","DESCRIPTION" + +# Tables and views + +"CHECK_CONSTRAINTS",," +Contains CHECK clauses of check and domain constraints. +" + +"COLLATIONS",," +Contains available collations. +" + +"COLUMNS",," +Contains information about columns of tables. +" + +"COLUMN_PRIVILEGES",," +Contains information about privileges of columns. +H2 doesn't have per-column privileges, so this view actually contains privileges of their tables. +" + +"CONSTANTS",," +Contains information about constants. +" + +"CONSTRAINT_COLUMN_USAGE",," +Contains information about columns used in constraints. +" + +"DOMAINS",," +Contains information about domains. +" + +"DOMAIN_CONSTRAINTS",," +Contains basic information about domain constraints. +See also INFORMATION_SCHEMA.CHECK_CONSTRAINTS. +" + +"ELEMENT_TYPES",," +Contains information about types of array elements. +" + +"ENUM_VALUES",," +Contains information about enum values. +" + +"FIELDS",," +Contains information about fields of row values. +" + +"INDEXES",," +Contains information about indexes. +" + +"INDEX_COLUMNS",," +Contains information about columns used in indexes. +" + +"INFORMATION_SCHEMA_CATALOG_NAME",," +Contains a single row with the name of catalog (database name). +" + +"IN_DOUBT",," +Contains information about prepared transactions. +" + +"KEY_COLUMN_USAGE",," +Contains information about columns used by primary key, unique, or referential constraint. +" + +"LOCKS",," +Contains information about tables locked by sessions. +" + +"PARAMETERS",," +Contains information about parameters of routines. +" + +"QUERY_STATISTICS",," +Contains statistics of queries when query statistics gathering is enabled. +" + +"REFERENTIAL_CONSTRAINTS",," +Contains additional information about referential constraints. +" + +"RIGHTS",," +Contains information about granted rights and roles. +" + +"ROLES",," +Contains information about roles. +" + +"ROUTINES",," +Contains information about user-defined routines, including aggregate functions. +" + +"SCHEMATA",," +Contains information about schemas. +" + +"SEQUENCES",," +Contains information about sequences. +" + +"SESSIONS",," +Contains information about sessions. +Only users with ADMIN privileges can see all sessions, other users can see only own session. +" + +"SESSION_STATE",," +Contains the state of the current session. +" + +"SETTINGS",," +Contains values of various settings. +" + +"SYNONYMS",," +Contains information about table synonyms. +" + +"TABLES",," +Contains information about tables. +See also INFORMATION_SCHEMA.COLUMNS. +" + +"TABLE_CONSTRAINTS",," +Contains basic information about table constraints (check, primary key, unique, and referential). +" + +"TABLE_PRIVILEGES",," +Contains information about privileges of tables. +See INFORMATION_SCHEMA.CHECK_CONSTRAINTS, INFORMATION_SCHEMA.KEY_COLUMN_USAGE, +and INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS for additional information. +" + +"TRIGGERS",," +Contains information about triggers. +" + +"USERS",," +Contains information about users. +Only users with ADMIN privileges can see all users, other users can see only themselves. +" + +"VIEWS",," +Contains additional information about views. +See INFORMATION_SCHEMA.TABLES for basic information. +" + +# Common columns with data type information + +,"DATA_TYPE"," +The SQL data type name. +" + +,"CHARACTER_MAXIMUM_LENGTH"," +The maximum length in characters for character string data types. +For binary string data types contains the same value as CHARACTER_OCTET_LENGTH. +" + +,"CHARACTER_OCTET_LENGTH"," +The maximum length in bytes for binary string data types. +For character string data types contains the same value as CHARACTER_MAXIMUM_LENGTH. +" + +,"NUMERIC_PRECISION"," +The precision for numeric data types. +" + +,"NUMERIC_PRECISION_RADIX"," +The radix of precision (2 or 10) for numeric data types. +" + +,"NUMERIC_SCALE"," +The scale for numeric data types. +" + +,"DATETIME_PRECISION"," +The fractional seconds precision for datetime data types. +" + +,"INTERVAL_TYPE"," +The data type of interval qualifier for interval data types. +" + +,"INTERVAL_PRECISION"," +The leading field precision for interval data types. +" + +,"CHARACTER_SET_CATALOG"," +The catalog (database name) for character string data types. +" + +,"CHARACTER_SET_SCHEMA"," +The name of public schema for character string data types. +" + +,"CHARACTER_SET_NAME"," +The 'Unicode' for character string data types. +" + +,"COLLATION_CATALOG"," +The catalog (database name) for character string data types. +" + +,"COLLATION_SCHEMA"," +The name of public schema for character string data types. +" + +,"COLLATION_NAME"," +The name of collation for character string data types. +" + +,"MAXIMUM_CARDINALITY"," +The maximum cardinality for array data types. +" + +,"DTD_IDENTIFIER"," +The data type identifier to read additional information from INFORMATION_SCHEMA.ELEMENT_TYPES for array data types, +INFORMATION_SCHEMA.ENUM_VALUES for ENUM data type, and INFORMATION_SCHEMA.FIELDS for row value data types. +" + +,"DECLARED_DATA_TYPE"," +The declared SQL data type name for numeric data types. +" + +,"DECLARED_NUMERIC_PRECISION"," +The declared precision, if any, for numeric data types. +" + +,"DECLARED_NUMERIC_SCALE"," +The declared scale, if any, for numeric data types. +" + +,"GEOMETRY_TYPE"," +The geometry type constraint, if any, for geometry data types. +" + +,"GEOMETRY_SRID"," +The geometry SRID (Spatial Reference Identifier) constraint, if any, for geometry data types. +" + +# Other common fields + +,"CONSTRAINT_CATALOG"," +The catalog (database name). +" + +,"CONSTRAINT_SCHEMA"," +The schema of the constraint. +" + +,"CONSTRAINT_NAME"," +The name of the constraint. +" + +,"DOMAIN_CATALOG"," +The catalog (database name). +" + +,"DOMAIN_SCHEMA"," +The schema of domain. +" + +,"DOMAIN_NAME"," +The name of domain. +" + +,"INDEX_CATALOG"," +The catalog (database name). +" + +,"INDEX_SCHEMA"," +The schema of the index. +" + +,"INDEX_NAME"," +The name of the index. +" + +,"OBJECT_CATALOG"," +The catalog (database name). +" + +,"OBJECT_SCHEMA"," +The schema of the object. +" + +,"OBJECT_NAME"," +The name of the object. +" + +,"OBJECT_TYPE"," +The TYPE of the object ('CONSTANT', 'DOMAIN', 'TABLE', or 'ROUTINE'). +" + +,"SPECIFIC_CATALOG"," +The catalog (database name). +" + +,"SPECIFIC_SCHEMA"," +The schema of the overloaded version of routine. +" + +,"SPECIFIC_NAME"," +The name of the overloaded version of routine. +" + +,"TABLE_CATALOG"," +The catalog (database name). +" + +,"TABLE_SCHEMA"," +The schema of the table. +" + +,"TABLE_NAME"," +The name of the table. +" + +,"COLUMN_NAME"," +The name of the column. +" + +,"ORDINAL_POSITION"," +The ordinal position (1-based). +" + +,"GRANTOR"," +NULL. +" + +,"GRANTEE"," +The name of grantee. +" + +,"PRIVILEGE_TYPE"," +'SELECT', 'INSERT', 'UPDATE', or 'DELETE'. +" + +,"IS_GRANTABLE"," +Whether grantee may grant rights to this object to others ('YES' or 'NO'). +" + +,"REMARKS"," +Optional remarks. +" + +,"SESSION_ID"," +The identifier of the session. +" + +# Individual fields + +"CHECK_CONSTRAINTS","CHECK_CLAUSE"," +The SQL of CHECK clause. +" + +"COLLATIONS","PAD_ATTRIBUTE"," +'NO PAD'. +" + +"COLLATIONS","LANGUAGE_TAG"," +The language tag. +" + +"COLUMNS","COLUMN_DEFAULT"," +The SQL of DEFAULT expression, if any. +" + +"COLUMNS","IS_NULLABLE"," +Whether column may contain NULL value ('YES' or 'NO'). +" + +"COLUMNS","DOMAIN_CATALOG"," +The catalog for columns with domain. +" + +"COLUMNS","DOMAIN_SCHEMA"," +The schema of domain for columns with domain. +" + +"COLUMNS","DOMAIN_NAME"," +The name of domain for columns with domain. +" + +"COLUMNS","IS_IDENTITY"," +Whether column is an identity column ('YES' or 'NO'). +" + +"COLUMNS","IDENTITY_GENERATION"," +Identity generation ('ALWAYS' or 'BY DEFAULT') for identity columns. +" + +"COLUMNS","IDENTITY_START"," +The initial start value for identity columns. +" + +"COLUMNS","IDENTITY_INCREMENT"," +The increment value for identity columns. +" + +"COLUMNS","IDENTITY_MAXIMUM"," +The maximum value for identity columns. +" + +"COLUMNS","IDENTITY_MINIMUM"," +The minimum value for identity columns. +" + +"COLUMNS","IDENTITY_CYCLE"," +Whether identity values are cycled ('YES' or 'NO') for identity columns. +" + +"COLUMNS","IS_GENERATED"," +Whether column is an generated column ('ALWAYS' or 'NEVER') +" + +"COLUMNS","GENERATION_EXPRESSION"," +The SQL of GENERATED ALWAYS AS expression for generated columns. +" + +"COLUMNS","IDENTITY_BASE"," +The current base value for identity columns. +" + +"COLUMNS","IDENTITY_CACHE"," +The cache size for identity columns. +" + +"COLUMNS","COLUMN_ON_UPDATE"," +The SQL of ON UPDATE expression, if any. +" + +"COLUMNS","IS_VISIBLE"," +Whether column is visible (included into SELECT *). +" + +"COLUMNS","DEFAULT_ON_NULL"," +Whether value of DEFAULT expression is used when NULL value is inserted. +" + +"COLUMNS","SELECTIVITY"," +The selectivity of a column (0-100), used to choose the best index. +" + +"CONSTANTS","CONSTANT_CATALOG"," +The catalog (database name). +" + +"CONSTANTS","CONSTANT_SCHEMA"," +The schema of the constant. +" + +"CONSTANTS","CONSTANT_NAME"," +The name of the constant. +" + +"CONSTANTS","VALUE_DEFINITION"," +The SQL of value. +" + +"DOMAINS","DOMAIN_DEFAULT"," +The SQL of DEFAULT expression, if any. +" + +"DOMAINS","DOMAIN_ON_UPDATE"," +The SQL of ON UPDATE expression, if any. +" + +"DOMAINS","PARENT_DOMAIN_CATALOG"," +The catalog (database name) for domains with parent domain. +" + +"DOMAINS","PARENT_DOMAIN_SCHEMA"," +The schema of parent domain for domains with parent domain. +" + +"DOMAINS","PARENT_DOMAIN_NAME"," +The name of parent domain for domains with parent domain. +" + +"DOMAIN_CONSTRAINTS","IS_DEFERRABLE"," +'NO'. +" + +"DOMAIN_CONSTRAINTS","INITIALLY_DEFERRED"," +'NO'. +" + +"ELEMENT_TYPES","COLLECTION_TYPE_IDENTIFIER"," +The DTD_IDENTIFIER value of the object. +" + +"ENUM_VALUES","ENUM_IDENTIFIER"," +The DTD_IDENTIFIER value of the object. +" + +"ENUM_VALUES","VALUE_NAME"," +The name of enum value. +" + +"ENUM_VALUES","VALUE_ORDINAL"," +The ordinal of enum value. +" + +"FIELDS","ROW_IDENTIFIER"," +The DTD_IDENTIFIER value of the object. +" + +"FIELDS","FIELD_NAME"," +The name of the field of the row value. +" + +"INDEXES","INDEX_TYPE_NAME"," +The type of the index ('PRIMARY KEY', 'UNIQUE INDEX', 'SPATIAL INDEX', etc.) +" + +"INDEXES","IS_GENERATED"," +Whether index is generated by a constraint and belongs to it. +" + +"INDEXES","INDEX_CLASS"," +The Java class name of index implementation. +" + +"INDEX_COLUMNS","ORDERING_SPECIFICATION"," +'ASC' or 'DESC'. +" + +"INDEX_COLUMNS","NULL_ORDERING"," +'FIRST', 'LAST', or NULL. +" + +"INDEX_COLUMNS","IS_UNIQUE"," +Whether this column is a part of unique column list of a unique index (TRUE or FALSE). +" + +"INFORMATION_SCHEMA_CATALOG_NAME","CATALOG_NAME"," +The catalog (database name). +" + +"IN_DOUBT","TRANSACTION_NAME"," +The name of prepared transaction. +" + +"IN_DOUBT","TRANSACTION_STATE"," +The state of prepared transaction ('IN_DOUBT', 'COMMIT', or 'ROLLBACK'). +" + +"KEY_COLUMN_USAGE","POSITION_IN_UNIQUE_CONSTRAINT"," +The ordinal position in the referenced unique constraint (1-based). +" + +"LOCKS","LOCK_TYPE"," +'READ' or 'WRITE'. +" + +"PARAMETERS","PARAMETER_MODE"," +'IN'. +" + +"PARAMETERS","IS_RESULT"," +'NO'. +" + +"PARAMETERS","AS_LOCATOR"," +'YES' for LOBs, 'NO' for others. +" + +"PARAMETERS","PARAMETER_NAME"," +The name of the parameter. +" + +"PARAMETERS","PARAMETER_DEFAULT"," +NULL. +" + +"QUERY_STATISTICS","SQL_STATEMENT"," +The SQL statement. +" + +"QUERY_STATISTICS","EXECUTION_COUNT"," +The execution count. +" + +"QUERY_STATISTICS","MIN_EXECUTION_TIME"," +The minimum execution time in milliseconds. +" + +"QUERY_STATISTICS","MAX_EXECUTION_TIME"," +The maximum execution time in milliseconds. +" + +"QUERY_STATISTICS","CUMULATIVE_EXECUTION_TIME"," +The total execution time in milliseconds. +" + +"QUERY_STATISTICS","AVERAGE_EXECUTION_TIME"," +The average execution time in milliseconds. +" + +"QUERY_STATISTICS","STD_DEV_EXECUTION_TIME"," +The standard deviation of execution time in milliseconds. +" + +"QUERY_STATISTICS","MIN_ROW_COUNT"," +The minimum number of rows. +" + +"QUERY_STATISTICS","MAX_ROW_COUNT"," +The maximum number of rows. +" + +"QUERY_STATISTICS","CUMULATIVE_ROW_COUNT"," +The total number of rows. +" + +"QUERY_STATISTICS","AVERAGE_ROW_COUNT"," +The average number of rows. +" + +"QUERY_STATISTICS","STD_DEV_ROW_COUNT"," +The standard deviation of number of rows. +" + +"REFERENTIAL_CONSTRAINTS","UNIQUE_CONSTRAINT_CATALOG"," +The catalog (database name). +" + +"REFERENTIAL_CONSTRAINTS","UNIQUE_CONSTRAINT_SCHEMA"," +The schema of referenced unique constraint. +" + +"REFERENTIAL_CONSTRAINTS","UNIQUE_CONSTRAINT_NAME"," +The name of referenced unique constraint. +" + +"REFERENTIAL_CONSTRAINTS","MATCH_OPTION"," +'NONE'. +" + +"REFERENTIAL_CONSTRAINTS","UPDATE_RULE"," +The rule for UPDATE in referenced table ('RESTRICT', 'CASCADE', 'SET DEFAULT', or 'SET NULL'). +" + +"REFERENTIAL_CONSTRAINTS","DELETE_RULE"," +The rule for DELETE in referenced table ('RESTRICT', 'CASCADE', 'SET DEFAULT', or 'SET NULL'). +" + +"RIGHTS","GRANTEETYPE"," +'USER' if grantee is a user, 'ROLE' if grantee is a role. +" + +"RIGHTS","GRANTEDROLE"," +The name of the granted role for role grants. +" + +"RIGHTS","RIGHTS"," +The set of rights ('SELECT', 'DELETE', 'INSERT', 'UPDATE', or 'ALTER ANY SCHEMA' separated with ', ') for table grants. +" + +"ROLES","ROLE_NAME"," +The name of the role. +" + +"ROUTINES","ROUTINE_CATALOG"," +The catalog (database name). +" + +"ROUTINES","ROUTINE_SCHEMA"," +The schema of the routine. +" + +"ROUTINES","ROUTINE_NAME"," +The name of the routine. +" + +"ROUTINES","ROUTINE_TYPE"," +'PROCEDURE', 'FUNCTION', or 'AGGREGATE'. +" + +"ROUTINES","ROUTINE_BODY"," +'EXTERNAL'. +" + +"ROUTINES","ROUTINE_DEFINITION"," +Source code or NULL if not applicable or user doesn't have ADMIN privileges. +" + +"ROUTINES","EXTERNAL_NAME"," +The name of the class or method. +" + +"ROUTINES","EXTERNAL_LANGUAGE"," +'JAVA'. +" + +"ROUTINES","PARAMETER_STYLE"," +'GENERAL'. +" + +"ROUTINES","IS_DETERMINISTIC"," +Whether routine is deterministic ('YES' or 'NO'). +" + +"SCHEMATA","CATALOG_NAME"," +The catalog (database name). +" + +"SCHEMATA","SCHEMA_NAME"," +The schema name. +" + +"SCHEMATA","SCHEMA_OWNER"," +The name of schema owner. +" + +"SCHEMATA","DEFAULT_CHARACTER_SET_CATALOG"," +The catalog (database name). +" + +"SCHEMATA","DEFAULT_CHARACTER_SET_SCHEMA"," +The name of public schema. +" + +"SCHEMATA","DEFAULT_CHARACTER_SET_NAME"," +'Unicode'. +" + +"SCHEMATA","SQL_PATH"," +NULL. +" + +"SCHEMATA","DEFAULT_COLLATION_NAME"," +The name of database collation. +" + +"SEQUENCES","SEQUENCE_CATALOG"," +The catalog (database name). +" + +"SEQUENCES","SEQUENCE_SCHEMA"," +The schema of the sequence. +" + +"SEQUENCES","SEQUENCE_NAME"," +The name of the sequence. +" + +"SEQUENCES","START_VALUE"," +The initial start value. +" + +"SEQUENCES","MINIMUM_VALUE"," +The maximum value. +" + +"SEQUENCES","MAXIMUM_VALUE"," +The minimum value. +" + +"SEQUENCES","INCREMENT"," +The increment value. +" + +"SEQUENCES","CYCLE_OPTION"," +Whether values are cycled ('YES' or 'NO'). +" + +"SEQUENCES","BASE_VALUE"," +The current base value. +" + +"SEQUENCES","CACHE"," +The cache size. +" + +"SESSIONS","USER_NAME"," +The name of the user. +" + +"SESSIONS","SERVER"," +The name of the server used by remote connection. +" + +"SESSIONS","CLIENT_ADDR"," +The client address and port used by remote connection. +" + +"SESSIONS","CLIENT_INFO"," +Additional client information provided by remote connection. +" + +"SESSIONS","SESSION_START"," +When this session was started. +" + +"SESSIONS","ISOLATION_LEVEL"," +The isolation level of the session ('READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ', 'SNAPSHOT', +or 'SERIALIZABLE'). +" + +"SESSIONS","EXECUTING_STATEMENT"," +The currently executing statement, if any. +" + +"SESSIONS","EXECUTING_STATEMENT_START"," +When the current command was started, if any. +" + +"SESSIONS","CONTAINS_UNCOMMITTED"," +Whether the session contains any uncommitted changes. +" + +"SESSIONS","SESSION_STATE"," +The state of the session ('RUNNING', 'SLEEP', etc.) +" + +"SESSIONS","BLOCKER_ID"," +The identifier or blocking session, if any. +" + +"SESSIONS","SLEEP_SINCE"," +When the last command was finished if session is sleeping. +" + +"SESSION_STATE","STATE_KEY"," +The key. +" + +"SESSION_STATE","STATE_COMMAND"," +The SQL command that can be used to restore the state. +" + +"SETTINGS","SETTING_NAME"," +The name of the setting. +" + +"SETTINGS","SETTING_VALUE"," +The value of the setting. +" + +"SYNONYMS","SYNONYM_CATALOG"," +The catalog (database name). +" + +"SYNONYMS","SYNONYM_SCHEMA"," +The schema of the synonym. +" + +"SYNONYMS","SYNONYM_NAME"," +The name of the synonym. +" + +"SYNONYMS","SYNONYM_FOR"," +The name of the referenced table. +" + +"SYNONYMS","SYNONYM_FOR_SCHEMA"," +The name of the referenced schema. +" + +"SYNONYMS","TYPE_NAME"," +'SYNONYM'. +" + +"SYNONYMS","STATUS"," +'VALID'. +" + +"TABLES","TABLE_TYPE"," +'BASE TABLE', 'VIEW', 'GLOBAL TEMPORARY', or 'LOCAL TEMPORARY'. +" + +"TABLES","IS_INSERTABLE_INTO"," +Whether the table is insertable ('YES' or 'NO'). +" + +"TABLES","COMMIT_ACTION"," +'DELETE', 'DROP', or 'PRESERVE' for temporary tables. +" + +"TABLES","STORAGE_TYPE"," +'CACHED' for regular persisted tables, 'MEMORY' for in-memory tables or persisted tables with in-memory indexes, +'GLOBAL TEMPORARY' or 'LOCAL TEMPORARY' for temporary tables, 'EXTERNAL' for tables with external table engines, +or 'TABLE LINK' for linked tables. +" + +"TABLES","LAST_MODIFICATION"," +The sequence number of the last modification, if applicable. +" + +"TABLES","TABLE_CLASS"," +The Java class name of implementation. +" + +"TABLES","ROW_COUNT_ESTIMATE"," +The approximate number of rows if known or some default value if unknown. +For regular tables contains the total number of rows including the uncommitted rows. +" + +"TABLE_CONSTRAINTS","CONSTRAINT_TYPE"," +'CHECK', 'PRIMARY KEY', 'UNIQUE', or 'REFERENTIAL'. +" + +"TABLE_CONSTRAINTS","IS_DEFERRABLE"," +'NO'. +" + +"TABLE_CONSTRAINTS","INITIALLY_DEFERRED"," +'NO'. +" + +"TABLE_CONSTRAINTS","ENFORCED"," +'YES' for non-referential constants. +'YES' for referential constants when checks for referential integrity are enabled for the both referenced and +referencing tables and 'NO' when they are disabled. +" + +"TABLE_PRIVILEGES","WITH_HIERARCHY"," +'NO'. +" + +"TRIGGERS","TRIGGER_CATALOG"," +The catalog (database name). +" + +"TRIGGERS","TRIGGER_SCHEMA"," +The schema of the trigger. +" + +"TRIGGERS","TRIGGER_NAME"," +The name of the trigger. +" + +"TRIGGERS","EVENT_MANIPULATION"," +'INSERT', 'UPDATE', 'DELETE', or 'SELECT'. +" + +"TRIGGERS","EVENT_OBJECT_CATALOG"," +The catalog (database name). +" + +"TRIGGERS","EVENT_OBJECT_SCHEMA"," +The schema of the table. +" + +"TRIGGERS","EVENT_OBJECT_TABLE"," +The name of the table. +" + +"TRIGGERS","ACTION_ORIENTATION"," +'ROW' or 'STATEMENT'. +" + +"TRIGGERS","ACTION_TIMING"," +'BEFORE', 'AFTER', or 'INSTEAD OF'. +" + +"TRIGGERS","IS_ROLLBACK"," +Whether this trigger is executed on rollback. +" + +"TRIGGERS","JAVA_CLASS"," +The Java class name. +" + +"TRIGGERS","QUEUE_SIZE"," +The size of the queue (is not actually used). +" + +"TRIGGERS","NO_WAIT"," +Whether trigger is defined with NO WAIT clause (is not actually used). +" + +"USERS","USER_NAME"," +The name of the user. +" + +"USERS","IS_ADMIN"," +Whether user has ADMIN privileges. +" + +"VIEWS","VIEW_DEFINITION"," +The query SQL, if applicable. +" + +"VIEWS","CHECK_OPTION"," +'NONE'. +" + +"VIEWS","IS_UPDATABLE"," +'NO'. +" + +"VIEWS","INSERTABLE_INTO"," +'NO'. +" + +"VIEWS","IS_TRIGGER_UPDATABLE"," +Whether the view has INSTEAD OF trigger for UPDATE ('YES' or 'NO'). +" + +"VIEWS","IS_TRIGGER_DELETABLE"," +Whether the view has INSTEAD OF trigger for DELETE ('YES' or 'NO'). +" + +"VIEWS","IS_TRIGGER_INSERTABLE_INTO"," +Whether the view has INSTEAD OF trigger for INSERT ('YES' or 'NO'). +" + +"VIEWS","STATUS"," +'VALID' or 'INVALID'. +" diff --git a/h2/src/docsrc/html/advanced.html b/h2/src/docsrc/html/advanced.html index fa3441cb93..68e865b1ff 100644 --- a/h2/src/docsrc/html/advanced.html +++ b/h2/src/docsrc/html/advanced.html @@ -1,7 +1,7 @@ @@ -41,14 +41,14 @@

Advanced

Two Phase Commit
Compatibility
+ + Keywords / Reserved Words
Standards Compliance
Run as Windows Service
ODBC Driver
- - Using H2 in Microsoft .NET
ACID
@@ -81,8 +81,6 @@

Advanced

Pluggable File System

Split File System
- - Database Upgrade
Java Objects Serialization
@@ -94,7 +92,10 @@

Result Sets

Statements that Return a Result Set

-The following statements return a result set: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP. +The following statements return a result set: SELECT, TABLE, VALUES, +EXPLAIN, CALL, SCRIPT, SHOW, HELP. +EXECUTE may return either a result set or an update count. +Result of a WITH statement depends on inner command. All other statements return an update count.

@@ -104,8 +105,8 @@

Limiting the Number of Rows

Server side cursors are not supported currently. If only the first few rows are interesting for the application, then the result set size should be limited to improve the performance. -This can be done using LIMIT in a query -(example: SELECT * FROM TEST LIMIT 100), +This can be done using FETCH in a query +(example: SELECT * FROM TEST FETCH FIRST 100 ROWS ONLY), or by using Statement.setMaxRows(max).

@@ -137,7 +138,7 @@

When to use CLOB/BLOB

By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using -MAX_LENGTH_INPLACE_LOB, +MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster @@ -145,18 +146,6 @@

When to use CLOB/BLOB

that don't involve this column.

-

Large Object Compression

-

-The following feature is only available for the PageStore storage engine. -For the MVStore engine (the default for H2 version 1.4.x), -append ;COMPRESS=TRUE to the database URL instead. -CLOB and BLOB values can be compressed by using -SET COMPRESS_LOB. -The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write -operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, -then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster. -

-

Linked Tables

This database supports linked tables, which means tables that don't exist in the current database but @@ -184,7 +173,7 @@

Linked Tables

is shared. To disable this, set the system property h2.shareLinkedConnections=false.

-The statement CREATE LINKED TABLE +The statement CREATE LINKED TABLE supports an optional schema name parameter.

@@ -219,72 +208,72 @@

Transaction Isolation

Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. -See the Grammar for details. +See the Commands for details.

Transaction isolation is provided for all data manipulation language (DML) statements.

-Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. -In this case, table level locking is not used. -Instead, rows are locked for update, and read committed is used in all cases -(changing the isolation level has no effect). -

-

-This database supports the following transaction isolation levels: +H2 supports read uncommitted, read committed, repeatable read, snapshot, +and serializable (partially, see below) isolation levels:

    -
  • Read Committed
    +
  • Read uncommitted
    + Dirty reads, non-repeatable reads, and phantom reads are possible. + To enable, execute the SQL statement + SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ UNCOMMITTED +
  • +
  • Read committed
    This is the default level. - Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. - Higher concurrency is possible when using this level.
    - To enable, execute the SQL statement SET LOCK_MODE 3
    - or append ;LOCK_MODE=3 to the database URL: jdbc:h2:~/test;LOCK_MODE=3 -
  • -Serializable
    - Both read locks and write locks are kept until the transaction commits. - To enable, execute the SQL statement SET LOCK_MODE 1
    - or append ;LOCK_MODE=1 to the database URL: jdbc:h2:~/test;LOCK_MODE=1 -
  • Read Uncommitted
    - This level means that transaction isolation is disabled.
    - To enable, execute the SQL statement SET LOCK_MODE 0
    - or append ;LOCK_MODE=0 to the database URL: jdbc:h2:~/test;LOCK_MODE=0 + Dirty reads aren't possible; non-repeatable reads and phantom reads are possible. + To enable, execute the SQL statement + SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED +
  • +
  • Repeatable read
    + Dirty reads and non-repeatable reads aren't possible, phantom reads are possible. + To enable, execute the SQL statement + SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ +
  • +
  • Snapshot
    + Dirty reads, non-repeatable reads, and phantom reads aren't possible. + This isolation level is very expensive in databases with many tables. + To enable, execute the SQL statement + SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SNAPSHOT +
  • +
  • Serializable
    + Dirty reads, non-repeatable reads, and phantom reads aren't possible. + Note that this isolation level in H2 currently doesn't ensure equivalence of concurrent and serializable execution + of transactions that perform write operations. + This isolation level is very expensive in databases with many tables. + To enable, execute the SQL statement + SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE
-

-When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited. -

    -
  • Dirty Reads
    +
  • Dirty reads
    Means a connection can read uncommitted changes made by another connection.
    - Possible with: read uncommitted -
  • Non-Repeatable Reads
    + Possible with: read uncommitted. +
  • Non-repeatable reads
    A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result.
    - Possible with: read uncommitted, read committed -
  • Phantom Reads
    + Possible with: read uncommitted, read committed. +
  • Phantom reads
    A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row.
    - Possible with: read uncommitted, read committed + Possible with: read uncommitted, read committed, repeatable read.
-

Table Level Locking

+

Multi-Version Concurrency Control (MVCC)

-The database allows multiple concurrent connections to the same database. -To make sure all connections only see consistent data, table level locking is used by default. -This mechanism does not allow high concurrency, but is very fast. -Shared locks and exclusive locks are supported. -Before reading from a table, the database tries to add a shared lock to the table -(this is only possible if there is no exclusive lock on the object by another connection). -If the shared lock is added successfully, the table can be read. It is allowed that -other connections also have a shared lock on the same object. If a connection wants -to write to a table (update or delete a row), an exclusive lock is required. To get the -exclusive lock, other connection must not have any locks on the object. After the -connection commits, all locks are released. -This database keeps all locks in memory. -When a lock is released, and multiple connections are waiting for it, one of them is picked at random. +Insert and update operations only issue a shared lock on the table. +An exclusive lock is still used when adding or removing columns or when dropping the table. +Connections only 'see' committed data, and own changes. That means, if connection A updates +a row but doesn't commit this change yet, connection B will see the old value. +Only when the change is committed, the new value is visible by other connections +(read committed). If multiple connections concurrently try to lock or update the same row, the +database waits until it can apply the change, but at most until the lock timeout expires.

Lock Timeout

@@ -297,43 +286,6 @@

Lock Timeout

for each connection.

-

Multi-Version Concurrency Control (MVCC)

-

-The MVCC feature allows higher concurrency than using (table level or row level) locks. -When using MVCC in this database, delete, insert and update operations will only issue a -shared lock on the table. An exclusive lock is still used when adding or removing columns, -when dropping the table, and when using SELECT ... FOR UPDATE. -Connections only 'see' committed data, and own changes. That means, if connection A updates -a row but doesn't commit this change yet, connection B will see the old value. -Only when the change is committed, the new value is visible by other connections -(read committed). If multiple connections concurrently try to update the same row, the -database waits until it can apply the change, but at most until the lock timeout expires. -

-

-To use the MVCC feature, append ;MVCC=TRUE to the database URL: -

-
-jdbc:h2:~/test;MVCC=TRUE
-
-

-The setting must be specified in the first connection (the one that opens the database). -It is not possible to enable or disable this setting while the database is already open. -

-

-If MVCC is enabled, changing the lock mode (LOCK_MODE) has no effect. -

-The MVCC mode is enabled by default in version 1.4.x, -with the default MVStore storage engine. -MVCC is disabled by default when using the PageStore storage engine -(which is the default in version 1.3.x). -The following applies when using the PageStore storage engine: -The MVCC feature is not fully tested yet. -The limitations of the MVCC mode are: -with the PageStore storage engine, it can not be used at the same time as -MULTI_THREADED=TRUE; -the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency. -The setting MAX_MEMORY_UNDO has no effect. -

Clustering / High Availability

This database supports a simple clustering / high availability mechanism. The architecture is: @@ -409,7 +361,7 @@

Detect Which Cluster Instances are Running

To find out which cluster nodes are currently running, execute the following SQL statement:

-SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME='CLUSTER'
+SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'CLUSTER'
 

If the result is '' (two single quotes), then the cluster mode is disabled. Otherwise, the list of @@ -436,12 +388,12 @@

Clustering Algorithm and Limitations

Read-only queries are only executed against the first cluster node, but all other statements are executed against all nodes. There is currently no load balancing made to avoid problems with transactions. The following functions may yield different results on different cluster nodes and must be -executed with care: RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), +executed with care: UUID(), RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND() [when not using a seed]. Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE). However, they can be used in read-only statements and the result can then be used for modifying statements. -Using auto-increment and identity columns is currently not supported. +Identity columns aren't supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements).

@@ -492,21 +444,258 @@

Transaction Commit when Autocommit is On

Other database engines may commit the transaction in this case when the result set is closed.

-

Keywords / Reserved Words

+

Keywords / Reserved Words

There is a list of keywords that can't be used as identifiers (table names, column names and so on), -unless they are quoted (surrounded with double quotes). The list is currently: -

- -CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, -FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, -NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, -TRUE, UNION, UNIQUE, WHERE - -

-Certain words of this list are keywords because they are functions that can be used without '()' for compatibility, -for example CURRENT_TIMESTAMP. +unless they are quoted (surrounded with double quotes). +The following tokens are keywords in H2: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
KeywordH2SQL Standard
2016201120082003199992
ALL+++++++
AND+++++++
ANY+++++++
ARRAY++++++
AS+++++++
ASYMMETRIC+++++NR
AUTHORIZATION+++++++
BETWEEN+++++NR+
BOTHCS++++++
CASE+++++++
CAST+++++++
CHECK+++++++
CONSTRAINT+++++++
CROSS+++++++
CURRENT_CATALOG++++
CURRENT_DATE+++++++
CURRENT_PATH++++++
CURRENT_ROLE++++++
CURRENT_SCHEMA++++
CURRENT_TIME+++++++
CURRENT_TIMESTAMP+++++++
CURRENT_USER+++++++
DAY+++++++
DEFAULT+++++++
DISTINCT+++++++
ELSE+++++++
END+++++++
EXCEPT+++++++
EXISTS+++++NR+
FALSE+++++++
FETCH+++++++
FILTERCS++++
FOR+++++++
FOREIGN+++++++
FROM+++++++
FULL+++++++
GROUP+++++++
GROUPSCS++
HAVING+++++++
HOUR+++++++
IF+
ILIKECS
IN+++++++
INNER+++++++
INTERSECT+++++++
INTERVAL+++++++
IS+++++++
JOIN+++++++
KEY+NRNRNRNR++
LEADINGCS++++++
LEFT+++++++
LIKE+++++++
LIMITMS+
LOCALTIME++++++
LOCALTIMESTAMP++++++
MINUSMS
MINUTE+++++++
MONTH+++++++
NATURAL+++++++
NOT+++++++
NULL+++++++
OFFSET++++
ON+++++++
OR+++++++
ORDER+++++++
OVERCS++++
PARTITIONCS++++
PRIMARY+++++++
QUALIFY+
RANGECS++++
REGEXPCS
RIGHT+++++++
ROW++++++
ROWNUM+
ROWSCS++++++
SECOND+++++++
SELECT+++++++
SESSION_USER++++++
SET+++++++
SOME+++++++
SYMMETRIC+++++NR
SYSTEM_USER+++++++
TABLE+++++++
TO+++++++
TOPMS
CS
TRAILINGCS++++++
TRUE+++++++
UESCAPE+++++
UNION+++++++
UNIQUE+++++++
UNKNOWN+++++++
USER+++++++
USING+++++++
VALUE+++++++
VALUES+++++++
WHEN+++++++
WHERE+++++++
WINDOW+++++
WITH+++++++
YEAR+++++++
_ROWID_+
+

+Mode-sensitive keywords (MS) are keywords only in some compatibility modes. +

+
  • LIMIT is a keywords only in Regular, Legacy, DB2, HSQLDB, MariaDB, MySQL, and PostgreSQL compatibility modes. +It is an identifier in Strict, Derby, MSSQLServer, and Oracle compatibility modes. +
  • MINUS is a keyword only in Regular, Legacy, DB2, HSQLDB, and Oracle compatibility modes. +It is an identifier in Strict, Derby, MSSQLServer, MariaDB, MySQL, and PostgreSQL compatibility modes. +
  • TOP is a context-sensitive keyword (can be either keyword or identifier) +only in Regular, Legacy, HSQLDB, and MSSQLServer compatibility modes. +It is an identifier unconditionally in Strict, Derby, DB2, MariaDB, MySQL, Oracle, and PostgreSQL compatibility modes. +
+

+Context-sensitive keywords (CS) can be used as identifiers in some places, +but cannot be used as identifiers in others. +Normal keywords (+) are always treated as keywords.

+

+Most keywords in H2 are also reserved (+) or non-reserved (NR) words in the SQL Standard. +Newer versions of H2 may have more keywords than older ones. +Reserved words from the SQL Standard are potential candidates for keywords in future versions. +

+ +

There is a compatibility setting +SET NON_KEYWORDS +that can be used as a temporary workaround for applications that use keywords as unquoted identifiers.

Standards Compliance

@@ -525,7 +714,7 @@

Run as Windows Service

Using a native wrapper / adapter, Java applications can be run as a Windows Service. There are various tools available to do that. The Java Service Wrapper from -Tanuki Software, Inc. +Tanuki Software, Inc. is included in the installation. Batch files are provided to install, start, stop and uninstall the H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application. The batch files are located in the directory h2/service. @@ -534,7 +723,7 @@

Run as Windows Service

The service wrapper bundled with H2 is a 32-bit version. To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper, for example the one from - + Simon Krenger.

@@ -600,7 +789,7 @@

ODBC Driver

first run c:/windows/syswow64/odbcad32.exe. At this point you set up your DSN just like you would on any other system. See also: -Re: ODBC Driver on Windows 64 bit +Re: ODBC Driver on Windows 64 bit

ODBC Installation

@@ -608,7 +797,7 @@

ODBC Installation

First, the ODBC driver must be installed. Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*) or newer is recommended. The Windows version of the PostgreSQL ODBC driver is available at -http://www.postgresql.org/ftp/odbc/versions/msi. +https://www.postgresql.org/ftp/odbc/versions/msi/.

Starting the Server

@@ -714,55 +903,6 @@

Using Microsoft Access

Tools - Options - Edit/Find - ODBC fields.

-

Using H2 in Microsoft .NET

-

-The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. -You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface. -

- -

Using the ADO.NET API on .NET

-

-An implementation of the ADO.NET interface is available in the open source project -H2Sharp. -

- -

Using the JDBC API on .NET

-
  • Install the .NET Framework from Microsoft. - Mono has not yet been tested. -
  • Install IKVM.NET. -
  • Copy the h2*.jar file to ikvm/bin -
  • Run the H2 Console using: - ikvm -jar h2*.jar -
  • Convert the H2 Console to an .exe file using: - ikvmc -target:winexe h2*.jar. - You may ignore the warnings. -
  • Create a .dll file using (change the version accordingly): - ikvmc.exe -target:library -version:1.0.69.0 h2*.jar -
-

-If you want your C# application use H2, you need to add the h2.dll and the -IKVM.OpenJDK.ClassLibrary.dll to your C# solution. Here some sample code: -

-
-using System;
-using java.sql;
-
-class Test
-{
-    static public void Main()
-    {
-        org.h2.Driver.load();
-        Connection conn = DriverManager.getConnection("jdbc:h2:~/test", "sa", "sa");
-        Statement stat = conn.createStatement();
-        ResultSet rs = stat.executeQuery("SELECT 'Hello World'");
-        while (rs.next())
-        {
-            Console.WriteLine(rs.getString(1));
-        }
-    }
-}
-
-

ACID

In the database world, ACID stands for: @@ -790,7 +930,8 @@

Isolation

For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. -H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'. +H2 supports the transaction isolation levels 'read uncommitted', 'read committed', 'repeatable read', +and 'serializable'.

Durability

@@ -851,9 +992,9 @@

Ways to (Not) Achieve Durability

FileChannel.force(), data is not always persisted to the hard drive, because most hard drives do not obey fsync(): see -Your Hard Drive Lies to You. +Your Hard Drive Lies to You. In Mac OS X, fsync does not flush hard drive buffers. See -Bad fsync?. +Bad fsync?. So the situation is confusing, and tests prove there is a problem.

@@ -900,7 +1041,8 @@

Using the Recover Tool

For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript tool or a -RUNSCRIPT FROM SQL statement. The script includes at least one +RUNSCRIPT SQL statement. +The script includes at least one CREATE USER statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script. @@ -1063,7 +1205,6 @@

Passwords: Using Char Arrays instead of Strings

import java.util.*; public class Test { public static void main(String[] args) throws Exception { - Class.forName("org.h2.Driver"); String url = "jdbc:h2:~/test"; Properties prop = new Properties(); prop.setProperty("user", "sa"); @@ -1081,7 +1222,6 @@

Passwords: Using Char Arrays instead of Strings

}

-This example requires Java 1.6. When using Swing, use javax.swing.JPasswordField.

@@ -1221,7 +1361,7 @@

Protection against Remote Access

If you enable remote access using -tcpAllowOthers or -pgAllowOthers, -please also consider using the options -baseDir, -ifExists, +please also consider using the options -baseDir, so that remote users can not create new databases or access existing databases with weak passwords. When using the option -baseDir, only databases within that directory may be accessed. @@ -1230,9 +1370,10 @@

Protection against Remote Access

If you enable remote access using -webAllowOthers, please ensure the web server can only be accessed from trusted networks. -The options -baseDir, -ifExists don't protect -access to the tools section, prevent remote shutdown of the web server, -changes to the preferences, the saved connection settings, +If this option is specified, -webExternalNames should be also specified with +comma-separated list of external names or addresses of this server. +The options -baseDir don't protect +access to the saved connection settings, or access to other databases accessible from the system.

@@ -1378,7 +1519,7 @@

TLS Connections

To use your own keystore, set the system properties javax.net.ssl.keyStore and javax.net.ssl.keyStorePassword before starting the H2 server and client. -See also +See also Customizing the Default Key and Trust Stores, Store Types, and Store Passwords for more information.

@@ -1395,7 +1536,7 @@

Universally Unique Identifiers (UUID)

Standardized randomly generated UUIDs have 122 random bits. 4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz). This database supports generating such UUIDs using the built-in function -RANDOM_UUID(). +RANDOM_UUID() or UUID(). Here is a small program to estimate the probability of having two identical UUIDs after generating a number of values:

@@ -1430,52 +1571,45 @@

Universally Unique Identifiers (UUID)

Spatial Features

-H2 supports the geometry data type and spatial indexes if -the JTS Topology Suite -is in the classpath. -To run the H2 Console tool with the JTS tool, you need to download the -JTS 1.13 jar file -and place it in the h2 bin directory. Then edit the h2.sh file as follows: -

-
-#!/bin/sh
-dir=$(dirname "$0")
-java -cp "$dir/h2.jar:jts-1.13.jar:$H2DRIVERS:$CLASSPATH" org.h2.tools.Console "$@"
-
-

+H2 supports the geometry data type and spatial indexes. Here is an example SQL script to create a table with a spatial column and index:

-CREATE TABLE GEO_TABLE(GID SERIAL, THE_GEOM GEOMETRY);
+CREATE TABLE GEO_TABLE(
+    GID BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+    THE_GEOM GEOMETRY);
 INSERT INTO GEO_TABLE(THE_GEOM) VALUES
     ('POINT(500 505)'),
     ('LINESTRING(550 551, 525 512, 565 566)'),
     ('POLYGON ((550 521, 580 540, 570 564, 512 566, 550 521))');
-CREATE SPATIAL INDEX GEO_TABLE_SPATIAL_INDEX ON GEO_TABLE(THE_GEOM);
+CREATE SPATIAL INDEX GEO_TABLE_SPATIAL_INDEX
+    ON GEO_TABLE(THE_GEOM);
 

To query the table using geometry envelope intersection, -use the operation &&, as in PostGIS: +use the operation &&, as in PostGIS:

 SELECT * FROM GEO_TABLE
-WHERE THE_GEOM && 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))';
+    WHERE THE_GEOM &&
+    'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))';
 

You can verify that the spatial index is used using the "explain plan" feature:

 EXPLAIN SELECT * FROM GEO_TABLE
-WHERE THE_GEOM && 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))';
+    WHERE THE_GEOM &&
+    'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))';
 -- Result
 SELECT
-    GEO_TABLE.GID,
-    GEO_TABLE.THE_GEOM
-FROM PUBLIC.GEO_TABLE
-    /* PUBLIC.GEO_TABLE_SPATIAL_INDEX:
-    THE_GEOM && 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))' */
-WHERE INTERSECTS(THE_GEOM,
-    'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))')
+    "PUBLIC"."GEO_TABLE"."GID",
+    "PUBLIC"."GEO_TABLE"."THE_GEOM"
+FROM "PUBLIC"."GEO_TABLE"
+    /* PUBLIC.GEO_TABLE_SPATIAL_INDEX: THE_GEOM &&
+    GEOMETRY 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))' */
+WHERE "THE_GEOM" &&
+    GEOMETRY 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))'
 

For persistent databases, the spatial index is stored on disk; @@ -1512,7 +1646,7 @@

Recursive Queries

WITH LINK(ID, NAME, LEVEL) AS ( SELECT ID, NAME, 0 FROM FOLDER WHERE PARENT IS NULL UNION ALL - SELECT FOLDER.ID, IFNULL(LINK.NAME || '/', '') || FOLDER.NAME, LEVEL + 1 + SELECT FOLDER.ID, COALESCE(LINK.NAME || '/', '') || FOLDER.NAME, LEVEL + 1 FROM LINK INNER JOIN FOLDER ON LINK.ID = FOLDER.PARENT ) SELECT NAME FROM LINK WHERE NAME IS NOT NULL ORDER BY ID; @@ -1561,7 +1695,7 @@

Settings Read from System Properties

For a complete list of settings, see -SysProperties. +SysProperties.

Setting the Server Bind Address

@@ -1577,21 +1711,31 @@

Pluggable File System

This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. -Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. +Internally, the interfaces are very similar to the Java 7 NIO2 API. The following file systems are included:

-
  • zip: read-only zip-file based file system. Format: zip:/zipFileName!/fileName. +
    • file: the default file system that uses FileChannel. +
    • zip: read-only zip-file based file system. Format: zip:~/zipFileName!/fileName.
    • split: file system that splits files in 1 GB files (stackable with other file systems). -
    • nio: file system that uses FileChannel instead of RandomAccessFile (faster in some operating systems).
    • nioMapped: file system that uses memory mapped files (faster in some operating systems). - Please note that there currently is a file size limitation of 2 GB when using this file system when using a 32-bit JVM. - To work around this limitation, combine it with the split file system: split:nioMapped:test. + Please note that there currently is a file size limitation of 2 GB when using this file system. + To work around this limitation, combine it with the split file system: split:nioMapped:~/test. +
    • async: experimental file system that uses AsynchronousFileChannel instead of FileChannel (faster in some operating systems).
    • memFS: in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself).
    • memLZF: compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself). +
    • nioMemFS: stores data outside of the VM's heap - useful for large memory DBs without incurring GC costs. +
    • +
    • + nioMemLZF: stores compressed data outside of the VM's heap - + useful for large memory DBs without incurring GC costs. + Use "nioMemLZF:12:" to tweak the % of blocks that are stored uncompressed. + If you size this to your working set correctly, + compressed storage is roughly the same performance as uncompressed. + The default value is 1%.

    -As an example, to use the the nio file system, use the following database URL: -jdbc:h2:nio:~/test. +As an example, to use the async: file system +use the following database URL: jdbc:h2:async:~/test.

    To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase, @@ -1619,43 +1763,10 @@

    Split File System

    However this can be changed if required, by specifying the block size in the file name. The file name format is: split:<x>:<fileName> where the file size per block is 2^x. For 1 MiB block sizes, use x = 20 (because 2^20 is 1 MiB). -The following file name means the logical file is split into 1 MiB blocks: split:20:test.h2.db. +The following file name means the logical file is split into 1 MiB blocks: split:20:~/test.h2.db. An example database URL for this case is jdbc:h2:split:20:~/test.

    -

    Database Upgrade

    -

    -In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. -To automatically convert databases to the new file store, it is necessary to include an additional jar file. -The file can be found at http://h2database.com/h2mig_pagestore_addon.jar . -If this file is in the classpath, every connect to an older database will result in a conversion process. -

    -

    -The conversion itself is done internally via 'script to' and 'runscript from'. After the conversion process, the files will be -renamed from -

      -
    • dbName.data.db to dbName.data.db.backup -
    • dbName.index.db to dbName.index.db.backup -
    -by default. Also, the temporary script will be written to the database directory instead of a temporary directory. -Both defaults can be customized via -
      -
    • org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean) -
    • org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean) -
    -prior opening a database connection. -

    -

    -Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. -The automatic upgrade .jar file must be present, and the URL must start with jdbc:h2v1_1: -(the JDBC driver class is org.h2.upgrade.v1_1.Driver). -If the database should automatically connect using the old version if a database with the old format exists -(without upgrade), and use the new version otherwise, then append ;NO_UPGRADE=TRUE -to the database URL. -Please note the old driver did not process the system property "h2.baseDir" correctly, -so that using this setting is not supported when upgrading. -

    -

    Java Objects Serialization

    Java objects serialization is enabled by default for columns of type OTHER, using standard Java serialization/deserialization semantics. @@ -1664,7 +1775,9 @@

    Java Objects Serialization

    To disable this feature set the system property h2.serializeJavaObject=false (default: true).

    -Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation: +Serialization and deserialization of java objects is customizable both at system level and at database level providing a +JavaObjectSerializer implementation: +

    • At system level set the system property h2.javaObjectSerializer with the @@ -1680,7 +1793,6 @@

      Java Objects Serialization

    -

    Limits and Limitations

    @@ -1697,28 +1809,29 @@

    Limits and Limitations

    An example database URL is: jdbc:h2:split:~/test.
  • The maximum number of rows per table is 2^64.
  • The maximum number of open transactions is 65535. +
  • The maximum number of columns in a table or expressions in a SELECT statement is 16384. +The actual possible number can be smaller if their definitions are too long. +
  • The maximum length of an identifier (table name, column name, and so on) is 256 characters. +
  • The maximum length of CHARACTER, CHARACTER VARYING and VARCHAR_IGNORECASE values and columns +is 1048576 characters. +
  • The maximum length of BINARY, BINARY VARYING, JAVA_OBJECT, GEOMETRY, and JSON values and columns +is 1048576 bytes. +
  • The maximum precision of NUMERIC and DECFLOAT values and columns is 100000. +
  • The maximum length of an ENUM value is 1048576 characters, the maximum number of ENUM values is 65536. +
  • The maximum cardinality of an ARRAY value or column is 65536. +
  • The maximum degree of a ROW value or column is 16384. +
  • The maximum index of parameter is 100000.
  • Main memory requirements: The larger the database, the more main memory is required. - With the current storage mechanism (the page store), - the minimum main memory required is around 1 MB for each 8 GB database file size.
  • Limit on the complexity of SQL statements. -Statements of the following form will result in a stack overflow exception: -
    -SELECT * FROM DUAL WHERE X = 1
    -OR X = 2 OR X = 2 OR X = 2 OR X = 2 OR X = 2
    --- repeat previous line 500 times --
    -
    +Very complex expressions may result in a stack overflow exception.
  • There is no limit for the following entities, except the memory and storage capacity: - maximum identifier length (table name, column name, and so on); - maximum number of tables, columns, indexes, triggers, and other database objects; - maximum statement length, number of parameters per statement, tables per statement, expressions - in order by, group by, having, and so on; + maximum number of tables, indexes, triggers, and other database objects; + maximum statement length, tables per statement; maximum rows per query; - maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; - maximum row length, index row length, select row length; - maximum length of a varchar column, decimal column, literal in a statement. + maximum indexes per table, lob columns per table, and so on; + maximum row length, index row length, select row length.
  • Querying from the metadata tables is slow if there are many tables (thousands). -
  • For limitations on data types, see the documentation of the respective Java data type - or the data type documentation of this database. +
  • For other limitations on data types, see the data type documentation of this database.
@@ -1730,60 +1843,53 @@ AES-128 A block encryption algorithm. See also: Wikipedia: - AES + href="https://en.wikipedia.org/wiki/Advanced_Encryption_Standard">Wikipedia: + Advanced Encryption Standard Birthday Paradox Describes the higher than expected probability that two persons in a room have the same birthday. Also valid for randomly generated UUIDs. See also: Wikipedia: - Birthday Paradox + href="https://en.wikipedia.org/wiki/Birthday_problem">Wikipedia: + Birthday problem Digest Protocol to protect a password (but not to protect data). - See also: RFC + See also: RFC 2617: HTTP Digest Access Authentication - - GCJ - Compiler for Java. GNU - Compiler for the Java and NativeJ - (commercial) - HTTPS A protocol to provide security to HTTP connections. See - also: RFC 2818: + also: RFC 2818: HTTP Over TLS Modes of Operation Wikipedia: - Block cipher modes of operation + href="https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation">Wikipedia: + Block cipher mode of operation Salt Random number to increase the security of passwords. See - also: Wikipedia: + also: Wikipedia: Key derivation function SHA-256 A cryptographic one-way hash function. See also: Wikipedia: SHA - hash functions + href="https://en.wikipedia.org/wiki/Secure_Hash_Algorithms">Wikipedia: + Secure Hash Algorithms SQL Injection A security vulnerability where an application embeds SQL statements or expressions in user input. See also: Wikipedia: - SQL Injection + href="https://en.wikipedia.org/wiki/SQL_injection">Wikipedia: + SQL injection Watermark Attack @@ -1795,7 +1901,7 @@ SSL/TLS Secure Sockets Layer / Transport Layer Security. See also: - Java Secure Socket + Java Secure Socket Extension (JSSE) diff --git a/h2/src/docsrc/html/architecture.html b/h2/src/docsrc/html/architecture.html index 9875867357..af4ccdca18 100644 --- a/h2/src/docsrc/html/architecture.html +++ b/h2/src/docsrc/html/architecture.html @@ -1,7 +1,7 @@ @@ -50,6 +50,7 @@

Introduction

Top-down Overview

Working from the top down, the layers look like this: +

  • JDBC driver.
  • Connection/session management.
  • SQL Parser. @@ -59,7 +60,6 @@

    Top-down Overview

  • B-tree engine and page-based storage allocation.
  • Filesystem abstraction.
-

JDBC Driver

@@ -69,6 +69,7 @@

JDBC Driver

Connection/session management

The primary classes of interest are: +

@@ -79,14 +80,13 @@

Connection/session management

PackageDescription
org.h2.engine.Databasethe root/global class
org.h2.engine.SessionRemote remote session
-

Parser

The parser lives in org.h2.command.Parser. It uses a straightforward recursive-descent design.

-See Wikipedia Recursive-descent parser page. +See Wikipedia Recursive descent parser page.

@@ -95,14 +95,15 @@

Command execution and planning

Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. - +

+

The primary packages of interest are: +

PackageDescription
org.h2.command.ddlCommands that modify schema data structures
org.h2.command.dmlCommands that modify data
-

Table/Index/Constraints

@@ -110,18 +111,18 @@

Table/Index/Constraints

The primary packages of interest are: +

PackageDescription
org.h2.tableImplementations of different kinds of tables
org.h2.indexImplementations of different kinds of indices
-

Undo log, redo log, and transactions layer

We have a transaction log, which is shared among all sessions. See also -http://en.wikipedia.org/wiki/Transaction_log -http://h2database.com/html/grammar.html#set_log +https://en.wikipedia.org/wiki/Transaction_log +https://h2database.com/html/grammar.html#set_log

We also have an undo log, which is per session, to undo an operation (an update that fails for example) diff --git a/h2/src/docsrc/html/build.html b/h2/src/docsrc/html/build.html index 19294814ca..87a588d72b 100644 --- a/h2/src/docsrc/html/build.html +++ b/h2/src/docsrc/html/build.html @@ -1,7 +1,7 @@ @@ -18,15 +18,13 @@
-

Build

+

Build

Portability
Environment
Building the Software
- - Build Targets
Using Maven 2
@@ -34,7 +32,7 @@

Build

Translating
- Providing Patches
+ Submitting Source Code Changes
Reporting Problems or Requests
@@ -45,34 +43,31 @@

Build

Portability

This database is written in Java and therefore works on many platforms. -It can also be compiled to a native executable using GCJ.

Environment

-To run this database, a Java Runtime Environment (JRE) version 1.6 or higher is required. +To run this database, a Java Runtime Environment (JRE) version 8 or higher is required.

To create the database executables, the following software stack was used. To use this database, it is not required to install this software however.

Building the Software

-You need to install a JDK, for example the Sun JDK version 1.6 or 1.7. +You need to install a JDK, for example the Oracle JDK version 8. Ensure that Java binary directory is included in the PATH environment variable, and that the environment variable JAVA_HOME points to your Java installation. On the command line, go to the directory h2 and execute the following command: @@ -90,49 +85,16 @@

Building the Software

build jar

-To run the build tool in shell mode, use the command line option - as in ./build.sh -. -

- -

Switching the Source Code

-

-The source code uses Java 1.6 features. -To switch the source code to the installed version of Java, run: -

-
-build switchSource
-
- -

Build Targets

-

-The build system can generate smaller jar files as well. The following targets are currently supported: -

-
  • jarClient - creates the file h2client.jar. This only contains the JDBC client. -
  • jarSmall - creates the file h2small.jar. - This only contains the embedded database. Debug information is disabled. -
  • jarJaqu - creates the file h2jaqu.jar. - This only contains the JaQu (Java Query) implementation. All other jar files do not include JaQu. -
  • javadocImpl creates the Javadocs of the implementation. -
-

-To create the file h2client.jar, go to the directory h2 and execute the following command: +To run the build tool in shell mode, use the command line option -:

-build jarClient
+./build.sh -
 
-

Using Lucene 2 / 3

+

Using Apache Lucene

-Both Apache Lucene 2 and Lucene 3 are supported. -Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, -and Lucene version 3.x is used by default for H2 version 1.3.x. -To use a different version of Lucene when compiling, it needs to be specified as follows: +Apache Lucene 8.5.2 is used for testing.

-
-build -Dlucene=2 clean compile
-

Using Maven 2

Using a Central Repository

@@ -149,13 +111,13 @@

Using a Central Repository

New versions of this database are first uploaded to http://hsql.sourceforge.net/m2-repo/ and then automatically -synchronized with the main Maven repository; +synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there.

Maven Plugin to Start and Stop the TCP Server

A Maven plugin to start and stop the H2 TCP server is available from -Laird Nelson at GitHub. +Laird Nelson at GitHub. To start the H2 server, use:

@@ -190,11 +152,12 @@ 

Using Eclipse

To create an Eclipse project for H2, use the following steps:

-
  • Install Subversion and Eclipse. -
  • Get the H2 source code from the Subversion repository:
    - svn checkout http://h2database.googlecode.com/svn/trunk h2database-read-only -
  • Download all dependencies (Windows):
    - build.bat download +
    • Install Git and Eclipse. +
    • Get the H2 source code from Github:
      + git clone https://github.com/h2database/h2database +
    • Download all dependencies:
      + build.bat download(Windows)
      + ./build.sh download(otherwise)
    • In Eclipse, create a new Java project from existing source code: File, New, Project, Java Project, Create project from existing source.
    • Select the h2 folder, click Next and Finish. @@ -216,11 +179,11 @@

      Translating

      The web site is currently translated using Google.

      -

      Providing Patches

      +

      Submitting Source Code Changes

      -If you like to provide patches, please consider the following guidelines to simplify merging them: +If you'd like to contribute bug fixes or new features, please consider the following guidelines to simplify merging them:

      -
      • Only use Java 6 features (do not use Java 7) (see Environment). +
        • Only use Java 8 features (do not use Java 9/10/etc) (see Environment).
        • Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml. @@ -230,34 +193,34 @@

          Providing Patches

          The formatting options (eclipseCodeStyle) are also included.
        • Please provide test cases and integrate them into the test suite. For Java level tests, see src/test/org/h2/test/TestAll.java. - For SQL level tests, see src/test/org/h2/test/test.in.txt or - testSimple.in.txt. + For SQL level tests, see SQL files in src/test/org/h2/test/scripts.
        • The test cases should cover at least 90% of the changed and new code; use a code coverage tool to verify that (see above). or use the build target coverage.
        • Verify that you did not break other features: run the test cases by executing build test.
        • Provide end user documentation if required (src/docsrc/html/*). -
        • Document grammar changes in src/docsrc/help/help.csv +
        • Document grammar changes in src/main/org/h2/res/help.csv
        • Provide a change log entry (src/docsrc/html/changelog.html).
        • Verify the spelling using build spellcheck. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt.
        • Run src/installer/buildRelease to find and fix formatting errors.
        • Verify the formatting using build docs and build javadoc. -
        • Submit patches as .patch files (compressed if big). - To create a patch using Eclipse, use Team / Create Patch. +
        • Submit changes using GitHub's "pull requests". You'll require a free GitHub + account. If you are not familiar with pull requests, please read GitHub's + Using pull requests page.

        -For legal reasons, patches need to be public in the form of an email to the -group, or in the form -of an issue report or attachment. +For legal reasons, patches need to be public in the form of an + issue report or attachment or in the form of an email + to the group. Significant contributions need to include the following statement:

        "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 -(http://h2database.com/html/license.html)." +(https://h2database.com/html/license.html)."

        Reporting Problems or Requests

        @@ -268,36 +231,36 @@

        Reporting Problems or Requests

        • For bug reports, please provide a short, self contained, correct (compilable), example of the problem.
        • Feature requests are always welcome, even if the feature is already on the - roadmap. Your mail will help prioritize feature requests. + issue tracker + you can comment it. If you urgently need a feature, consider providing a patch.
        • Before posting problems, check the FAQ and do a Google search.
        • When got an unexpected exception, please try the - Error Analyzer tool. If this doesn't help, + Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s).
        • When sending source code, please use a public web clipboard such as - Pastebin, - Cl1p, or - Mystic Paste + Pastebin or + Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. As a template, use: - HelloWorld.java. + HelloWorld.java. Method that simply call other methods should be avoided, as well as unnecessary exception handling. Please use the JDBC API and no external tools or libraries. The test should include all required initialization code, and should be started with the main method. -
        • For large attachments, use a public temporary storage such as - Rapidshare. +
        • For large attachments, use a public storage such as + Google Drive.
        • Google Group versus issue tracking: Use the - Google Group + Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an - issue, + issue, but you don't need to (sending an email to the group is enough). Please note that only few people monitor the issue tracking system.
        • For out-of-memory problems, please analyze the problem yourself first, @@ -305,7 +268,7 @@

          Reporting Problems or Requests

          -XX:+HeapDumpOnOutOfMemoryError (to create a heap dump file on out of memory) and a memory analysis tool such as the - Eclipse Memory Analyzer (MAT). + Eclipse Memory Analyzer (MAT).
        • It may take a few days to get an answers. Please do not double post.
        @@ -313,15 +276,9 @@

        Automated Build

        This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line -./build.sh clean jar coverage -Dh2.ftpPassword=... uploadBuild. -The last results are available here: +./build.sh jar testCI. +The results are available on CI workflow page.

        -

        Generating Railroad Diagrams

        diff --git a/h2/src/docsrc/html/changelog.html b/h2/src/docsrc/html/changelog.html index e3712369e4..23c1e63e38 100644 --- a/h2/src/docsrc/html/changelog.html +++ b/h2/src/docsrc/html/changelog.html @@ -1,7 +1,7 @@ @@ -20,418 +20,1232 @@

        Change Log

        Next Version (unreleased)

        -
        • - -
        +
          +
        • Nothing yet... +
        • +
        -

        Version 1.4.187 Beta (2015-04-10)

        -
        • MVStore: concurrent changes to the same row could result in - the exception "The transaction log might be corrupt for key ...". - This could only be reproduced with 3 or more threads. -
        • Results with CLOB or BLOB data are no longer reused. -
        • References to BLOB and CLOB objects now have a timeout. - The configuration setting is LOB_TIMEOUT (default 5 minutes). - This should avoid growing the database file if there are many queries that return BLOB or CLOB objects, - and the database is not closed for a longer time. -
        • MVStore: when committing a session that removed LOB values, - changes were flushed unnecessarily. -
        • Issue 610: possible integer overflow in WriteBuffer.grow(). -
        • Issue 609: the spatial index did not support NULL (ClassCastException). -
        • MVStore: in some cases, CLOB/BLOB data blocks were removed - incorrectly when opening a database. -
        • MVStore: updates that affected many rows were were slow - in some cases if there was a secondary index. -
        • Using "runscript" with autocommit disabled could result - in a lock timeout on the internal table "SYS". -
        • Issue 603: there was a memory leak when using H2 in a web application. - Apache Tomcat logged an error message: "The web application ... - created a ThreadLocal with key of type [org.h2.util.DateTimeUtils$1]". -
        • When using the MVStore, - running a SQL script generate by the Recover tool from a PageStore file - failed with a strange error message (NullPointerException), - now a clear error message is shown. -
        • Issue 605: with version 1.4.186, opening a database could result in - an endless loop in LobStorageMap.init. -
        • Queries that use the same table alias multiple times now work. - Before, the select expression list was expanded incorrectly. - Example: "select * from a as x, b as x". -
        • The MySQL compatibility feature "insert ... on duplicate key update" - did not work with a non-default schema. -
        • Issue 599: the condition "in(x, y)" could not be used in the select list - when using "group by". -
        • The LIRS cache could grow larger than the allocated memory. -
        • A new file system implementation that re-opens the file if it was closed due - to the application calling Thread.interrupt(). File name prefix "retry:". - Please note it is strongly recommended to avoid calling Thread.interrupt; - this is a problem for various libraries, including Apache Lucene. -
        • MVStore: use RandomAccessFile file system if the file name starts with "file:". -
        • Allow DATEADD to take a long value for count when manipulating milliseconds. -
        • When using MV_STORE=TRUE and the SET CACHE_SIZE setting, the cache size was incorrectly set, - so that it was effectively 1024 times smaller than it should be. -
        • Concurrent CREATE TABLE... IF NOT EXISTS in the presence of MULTI_THREAD=TRUE could - throw an exception. -
        • Fix bug in MVStore when creating lots of temporary tables, where we could run out of - transaction IDs. -
        • Add support for PostgreSQL STRING_AGG function. Patch by Fred Aquiles. -
        • Fix bug in "jdbc:h2:nioMemFS" isRoot() function. - Also, the page size was increased to 64 KB. -
        +

        Version 2.1.210 (2022-01-17)

        +
          +
        • PR #3381: Add IDENTITY() and SCOPE_IDENTITY() to LEGACY mode +
        • +
        • Issue #3376: Data cannot be read after insert of clob data > MAX_LENGTH_INPLACE_LOB with data change delta table +
        • +
        • PR #3377: Add -webExternalNames setting and fix WebServer.getConnection() +
        • +
        • PR #3367: Use faster checks of dimension systems of geometries +
        • +
        • PR #3369: Added v2 changes in migration docs +
        • +
        • Issue #3361: MemoryEstimator.estimateMemory() can return negative size +
        • +
        • PR #3362: Use BufferedReader instead of BufferedInputStream to avoid Illegal seek exception +
        • +
        • Issue #3353: Wrong rownum() scope for DML with change delta table +
        • +
        • PR #3352: make Javadoc happier +
        • +
        • Issue #3344: Changelog could link to github issue +
        • +
        • Issue #3340: JDBC index type seems wrong +
        • +
        • Issue #3336: FT_INIT error when mode=MySQL +
        • +
        • Issue #3334: Regression with CREATE ALIAS - Parameter "#2" is not set +
        • +
        • Issue #3321: Insert Primary Key after import CSV Data does not work +
        • +
        • PR #3323: Tokenize SQL before parsing and preserve tokens for recompilation +
        • +
        • PR #3320: Add Servlet 5-compatible servlet for H2 Console +
        • +
        • Issue #918: Parser fails recognising set operations in correlated subqueries +
        • +
        • Issue #2050: PostgreSQL with recursive fail with union in the final query +
        • +
        • PR #3316: Update copyright years +
        • +
        • PR #3315: Never put read locks into lockSharedSessions and other minor changes +
        • +
        • Issue #492: H2 does not correctly parse <parenthesized joined table> +
        • +
        • Issue #3311: Parser creates wrong join graph in some cases and uses wrong tables for column mapping +
        • +
        • FORCE_JOIN_ORDER setting is removed +
        • +
        • Issue #1983: Official build script is not compatible with Java 13 +
        • +
        • PR #3305: Add UNIQUE(VALUE) and remove some non-standard keywords +
        • +
        • PR #3299: Remove useless StringBuilder.toString() call +
        • +
        • PR #3298: Delete unused sqlTypes array +
        • +
        -

        Version 1.4.186 Beta (2015-03-02)

        -
        • The Servlet API 3.0.1 is now used, instead of 2.4. -
        • MVStore: old chunks no longer removed in append-only mode. -
        • MVStore: the cache for page references could grow far too big, resulting in out of memory in some cases. -
        • MVStore: orphaned lob objects were not correctly removed in some cases, - making the database grow unnecessarily. -
        • MVStore: the maximum cache size was artificially limited to 2 GB - (due to an integer overflow). -
        • MVStore / TransactionStore: concurrent updates could result in a - "Too many open transactions" exception. -
        • StringUtils.toUpperEnglish now has a small cache. - This should speed up reading from a ResultSet when using the column name. -
        • MVStore: up to 65535 open transactions are now supported. - Previously, the limit was at most 65535 transactions between the oldest open and the - newest open transaction (which was quite a strange limit). -
        • The default limit for in-place LOB objects was changed from 128 to 256 bytes. - This is because each read creates a reference to a LOB, and maintaining the references - is a big overhead. With the higher limit, less references are needed. -
        • Tables without columns didn't work. - (The use case for such tables is testing.) -
        • The LIRS cache now resizes the table automatically in all cases - and no longer needs the averageMemory configuration. -
        • Creating a linked table from an MVStore database to a non-MVStore database - created a second (non-MVStore) database file. -
        • In version 1.4.184, a bug was introduced that broke queries - that have both joins and wildcards, for example: - select * from dual join(select x from dual) on 1=1 -
        • Issue 598: parser fails on timestamp "24:00:00.1234" - prevent the creation of out-of-range time values. -
        • Allow declaring triggers as source code (like functions). Patch by Sylvain Cuaz. -
        • Make the planner use indexes for sorting when doing a GROUP BY where - all of the GROUP BY columns are not mentioned in the select. Patch by Frederico (zepfred). -
        • PostgreSQL compatibility: generate_series (as an alias for system_range). Patch by litailang. -
        • Fix missing "column" type in right-hand parameter in ConditionIn. Patch by Arnaud Thimel. -
        +

        Version 2.0.206 (2022-01-04)

        +
          +
        • Issue #3322: Create linked table fails when the table contains a Geometry with a data type specified +
        • +
        • Issue #3297: Unexpected GROUP BY results with indexed IGNORECASE column +
        • +
        -

        Version 1.4.185 Beta (2015-01-16)

        -
        • In version 1.4.184, "group by" ignored the table name, - and could pick a select column by mistake. - Example: select 0 as x from system_range(1, 2) d group by d.x; -
        • New connection setting "REUSE_SPACE" (default: true). If disabled, - all changes are appended to the database file, and existing content is never overwritten. - This allows to rollback to a previous state of the database by truncating - the database file. -
        • Issue 587: MVStore: concurrent compaction and store operations could result in an IllegalStateException. -
        • Issue 594: Profiler.copyInThread does not work properly. -
        • Script tool: Now, SCRIPT ... TO is always used (for higher speed and lower disk space usage). -
        • Script tool: Fix parsing of BLOCKSIZE parameter, original patch by Ken Jorissen. -
        • Fix bug in PageStore#commit method - when the ignoreBigLog flag was set, - the logic that cleared the flag could never be reached, resulting in performance degradation. - Reported by Alexander Nesterov. -
        • Issue 552: Implement BIT_AND and BIT_OR aggregate functions. -
        +

        Version 2.0.204 (2021-12-21)

        +
          +
        • Issue #3291: Add Legacy and Strict modes +
        • +
        • Issue #3287: SELECT statement works on 1.4.200 but fails on 2.0.202 with "Column XYZ must be in the GROUP BY list" +
        • +
        • PR #3284: Remove unused UNDO_LOG setting +
        • +
        • Issue #3251: Table with GEOMETRY column can't have a TriggerAdapter-based trigger any more +
        • +
        • PR #3281: DateTimeFormatter-based FORMATDATETIME and PARSEDATETIME and other changes +
        • +
        • Issue #3246: Spatial predicates with comparison are broken in MySQL compatibility mode +
        • +
        • Issue #3270: org.h2.jdbc.JdbcSQLFeatureNotSupportedException: Feature not supported: "Unsafe comparison or cast" +
        • +
        • Issue #3268 / PR #3275: Add TO_DATE and TO_TIMESTAMP to PostgreSQL compatibility mode +
        • +
        • PR #3274: Remove some dead code and unused params +
        • +
        • Issue #3266: Oracle compatibility NUMBER without precision and scale should have variable scale +
        • +
        • Issue #3263: Unable to store BigDecimal with negative scale in NUMERIC(19,6) column +
        • +
        • PR #3261: Small optimization for MIN and MAX +
        • +
        • Issue #3258 / PR #3259: Prevent incorrect optimization of COUNT(*) and other changes +
        • +
        • PR #3255: Throw proper exception when type of argument isn't known +
        • +
        • Issue #3249: Multi-column assignment with subquery throws exception when subquery doesn't return any rows +
        • +
        • PR #3248: Remove redundant uniqueness check, correct version in pom +
        • +
        • PR #3247: Avoid AIOBE exception in TestCrashAPI and in Transaction +
        • +
        • Issue #3241: ResultSetMetaData::getColumnTypeName should produce the correct ARRAY type +
        • +
        • Issue #3204: H2 Tools Web Console: Unicode 32 +
        • +
        • Issue #3227: Regression when referencing outer joined column from correlated subquery +
        • +
        • Issue #3237: Can no longer cast CHAR(n) to BOOLEAN with n > 1 +
        • +
        • Issue #3235: Regression in IN predicate with empty in list +
        • +
        • Issue #3236: NullPointerException in DatabaseMetaData::getIndexInfo when querying the info for views +
        • +
        • Issue #3233: General error when using NULL predicate on _ROWID_ column +
        • +
        • Issue #3223: TRUNC(v, p) with negative precisions no longer works +
        • +
        • Issue #3221: NullPointerException when creating domain +
        • +
        • Issue #3186: ResultSetMetaData.getSchemaName() returns empty string for aliased columns +
        • +
        -

        Version 1.4.184 Beta (2014-12-19)

        -
        • In version 1.3.183, indexes were not used if the table contains - columns with a default value generated by a sequence. - This includes tables with identity and auto-increment columns. - This bug was introduced by supporting "rownum" in views and derived tables. -
        • MVStore: imported BLOB and CLOB data sometimes disappeared. - This was caused by a bug in the ObjectDataType comparison. -
        • Reading from a StreamStore now throws an - IOException if the underlying data doesn't exist. -
        • MVStore: if there is an exception while saving, the store is now in all cases immediately closed. -
        • MVStore: the dump tool could go into an endless loop for some files. -
        • MVStore: recovery for a database with many CLOB or BLOB entries is now much faster. -
        • Group by with a quoted select column name alias didn't work. Example: - select 1 "a" from dual group by "a" -
        • Auto-server mode: the host name is now stored in the .lock.db file. -
        +

        Version 2.0.202 (2021-11-25)

        +
          +
        • Issue #3206: CVE Vulnerability CVE-2018-14335 +
        • +
        • Issue #3174: Add keyword AUTOCOMMIT on create linked table to control the commit mode +
        • +
        • Issue #3130: Precision of NUMERIC values isn't verified in the Oracle compatibility mode +
        • +
        • Issue #3122: Documentation: Syntax diagram for RENAME CONSTRAINT incorrect +
        • +
        • PR #3129: remove LOB compression +
        • +
        • PR #3127: Cleanups post PageStore removal +
        • +
        • PR #3126: Change nested classes to static nested classes where possible +
        • +
        • PR #3125: Strongly typed LobStorageMap +
        • +
        • PR #3124: Remove PageStore engine +
        • +
        • Issue #3118: SHUTDOWN COMPACT causes 2PC to corrupt database in a simulated crash +
        • +
        • Issue #3115: Infinite loop then OOM in org.h2.mvstore.tx.Transaction.waitFor() when deadlock occurs +
        • +
        • Issue #3113: Data lost when 2 threads read/write TransactionStore and close it normally even if MVStore autoCommit +disabled +
        • +
        • PR #3110: Fix possible int overflow and minor doc change +
        • +
        • Issue #3036: A database that contains BLOBs might grow without being able to be compacted +
        • +
        • Issue #3097: Possible MVStore compaction issue +
        • +
        • PR #3096: Add separate LOB data layer for values +
        • +
        • Issue #3093: ROWNUM filter doesn't work with more than one table +
        • +
        • PR #3087: Add "CONVERT TO CHARACTER SET" to compatibility modes +
        • +
        • Issue #3080: Complex Query returns different results depending on the number of arguments in the IN clause +
        • +
        • Issue #3066: Very high DB opening/closing times +
        • +
        • PR #3077: Add CREATE UNIQUE INDEX ... INCLUDE +
        • +
        • Issue #3061 / PR #3074: GROUP BY using column index for MySQL/MariaDB/PostgreSQL compatibility modes +
        • +
        • PR #3067: Restrict identity data types and result limitation clauses to compatibility modes +
        • +
        • PR #3065: Remove duplicate method IOUtils.getBufferedReader +
        • +
        • Issue #3055: Phantom table leftover after INSERT .. WITH +
        • +
        • PR #3062: Add ALTER DOMAIN RENAME CONSTRAINT command +
        • +
        • Issue #3059: ALTER TABLE DROP CONSTRAINT doesn't check owner of constraint +
        • +
        • Issue #3054: Add binary set aggregate functions +
        • +
        • Issue #3049: Java value getters of ValueNull should throw exceptions +
        • +
        • Issue #3046: SYSTEM_RANGE can't handle bind variable as step size and produces wrong error message +
        • +
        • Issue #3033: NPE during BLOB read after 2PC rollback +
        • +
        • PR #3034: Don't evaluate ValueTimestampTimeZone at begin and end of each command +
        • +
        • PR #3029: Optimize row storage in MVStore and other changes +
        • +
        • PR #3028: Remove back compatibility +
        • +
        • PR #3025: Switch from Travis CI to GitHub Workflows +
        • +
        • PR #3024: Add initial version of upgrade utility +
        • +
        • Issue #3017: ROUND() does not set correct precision and scale of result +
        • +
        • Issue #3003: CREATE TABLE ... AS SELECT ... FROM creates invalid column definition when aggregate functions are used +
        • +
        • Issue #3008: TestCrashAPI: Exception in Arrays.sort() called by LocalResult.done() +
        • +
        • Issue #3006 / PR #3007: Unlock meta during query execution in CREATE TABLE AS query +
        • +
        • PR #3001: PostgreSQL compatibility: UPDATE with FROM +
        • +
        • PR #2998: Fix off-by-one error with -webAdminPassword in Server +
        • +
        • PR #2995: Add FETCH_SIZE clause to CREATE LINKED TABLE +
        • +
        • Issue #2907 / PR #2994: Prevent "Chunk not found" on LOB operations +
        • +
        • PR #2993: Update copyright years +
        • +
        • Issue #2991: TestCrashAPI: NPE in ScriptCommand.dumpDomains() +
        • +
        • Issue #2950 / PR #2987: Issue commit() right before "non-transactional" DDL command starts +
        • +
        • PR #2980: Assorted minor changes +
        • +
        • PR #2966: H2 2.0.201: Linked Tables freeze the Database and freeze the Server Process +
        • +
        • Issue #2972: Memory leak due to negative Page memory in the MVStore +
        • +
        • PR #2971: create skeleton of migration to V2 document +
        • +
        • Issue #2967: MVStore: averageSize int overflow in the class ObjectDataType +
        • +
        • Issue #2963: Syntax error for large hexadecimal constants with DATABASE_TO_UPPER=false +
        • +
        • Issue #2961: Accept CREATE PRIMARY KEY only in metadata or in quirks mode +
        • +
        • Issue #2960: Reject invalid CREATE { UNIQUE | HASH } SPATIAL INDEX +
        • +
        • Issue #2958: TableLink is broken for Oracle database after pull request #2903 +
        • +
        • PR #2955: Prevent incorrect index sorting +
        • +
        • PR #2951: Add documentation for INFORMATION_SCHEMA +
        • +
        • PR #2943: some small prep for next release +
        • +
        • PR #2948: Add support of Infinity, -Infinity, and NaN to DECFLOAT data type +
        • +
        • Issue #2947: Encoding of Unicode and special characters in error messages +
        • +
        • Issue #2891: Fix import of unnamed referential constraints from SQL scripts generated by older versions of H2 +
        • +
        • Issue #2812: Unexpected result for query that compares an integer with a string +
        • +
        • Issue #2936: Add data type conversion code from datetime and UUID values to JSON +
        • +
        • Issue #2935: ENUM ARRAY isn't read properly from persisted data +
        • +
        • Issue #2923: Combination of fileName() with fileStore() should throw an exception +
        • +
        • Issue #2928: JSON_ARRAYAGG and all NULL values +
        • +
        • PR #2918: Removal of unnecessary lock +
        • +
        • Issue #2911: org.h2.mvstore.MVStoreException: Transaction was illegally transitioned from ROLLING_BACK to +ROLLED_BACK +
        • +
        • Issue #1022: JdbcDatabaseMetaData.getPseudoColumns() should be implemented +
        • +
        • Issue #2914: (T1.A = T2.B) OR (T1.A = T2.C) should be optimized to T1.A IN(T2.B, T2.C) to allow index conditions +
        • +
        • PR #2903: Assorted changes +
        • +
        • Issue #2901: PgServer returns less rows when fetchSize is set +
        • +
        • Issue #2894: NPE in DROP SCHEMA when unique constraint is removed before linked referential constraint +
        • +
        • Issue #2888: H2 should pass time zone of client to the server +
        • +
        • PR #2890: Fixed possible eternal wait(0) +
        • +
        • Issue #2846: GRANT SELECT, INSERT, UPDATE, DELETE incorrectly gives privileges to drop a table +
        • +
        • Issue #2882: NPE in UPDATE with SELECT UNION +
        • +
        • PR #2881: Store users and roles together and user-defined functions and aggregates together +
        • +
        • Issue #2878: Disallow spatial indexes in PageStore databases +
        • +
        • PR #2874: Use 64-bit row counts in results and other changes +
        • +
        • Issue #2866: New INFORMATION_SCHEMA should not use keywords as column names +
        • +
        • Issue #2867: PageStore + Lazy + INSERT ... SELECT cause infinite loop +
        • +
        • PR #2869: Normalize binary geometry literals and improve EWKB representation of POLYGON EMPTY +
        • +
        • Issue #2860: CHAR columns in PgCatalogTable have incorrect length +
        • +
        • Issue #2848: Add support for standard <listagg overflow clause> +
        • +
        • Issue #2858: Throw 22001 on attempt to use getString() or getBytes() on LOB object longer than 1,048,576 +chars/octets +
        • +
        • Issue #2854: Define limits for identifiers, number of columns, etc. +
        • +
        • PR #2853: Small optimization for Page compression / decompression +
        • +
        • Issue #2832: Define length limits for non-LOB data types +
        • +
        • Issue #2842: Querying view that uses LTRIM/RTRIM results in a syntax error +
        • +
        • Issue #2841: Call to STRINGDECODE results in StringIndexOutOfBoundsException +
        • +
        • Issue #2839: Querying a view that uses the POSITION() function results in an unexpected syntax error +
        • +
        • Issue #2838: INSERT() with NULL arguments for the original string and string to be added results in NPE +
        • +
        • Issue #2837: ROUND() function should reject invalid number of digits immediately +
        • +
        • Issue #2835: Calling math functions with a string argument results in a NullPointerException +
        • +
        • Issue #2833: MERGE INTO causes an unexpected syntax error +
        • +
        • Issue #2831: Restore YEAR data type for MySQL compatibility mode +
        • +
        • Issue #2822: Suspicious logic in Database.closeImpl() +
        • +
        • Issue #2829: Incorrect manifest entries in sources jar +
        • +
        • Issue #2828: Parser can't parse NOT in simple when operand +
        • +
        • Issue #2826: Table with a generated column cycle results in a NullPointerException +
        • +
        • Issue #2825: Query with % operator results in a ClassCastException +
        • +
        • Issue #2818: TableFilter.getValue() can read value of delegated column faster +
        • +
        • Issue #2816: Query on view that uses the BETWEEN operator results in an unexpected syntax error +
        • +
        • PR #2815: Remove BINARY_COLLATION and UUID_COLLATION settings +
        • +
        • Issue #2813: Query with CASE operator unexpectedly results in "Column must be in the GROUP BY list" error +
        • +
        • Issue #2811: Update build numbers and data format versions +
        • +
        • Issue #2674: OPTIMIZE_IN_SELECT shouldn't convert value to incompatible data types +
        • +
        • Issue #2803: Disallow comparison operations between incomparable data types +
        • +
        • Issue #2561: Separate normal functions and table value functions +
        • +
        • Issue #2804: NPE in ConditionNot.getNotIfPossible() +
        • +
        • Issue #2801: Instances of TableView objects leaking +
        • +
        • PR #2799: Additional bit functions BITNAND, BITNOR, BITXNOR, BITCOUNT, ULSHIFT, URSHIFT, ROTATELEFT, ROTATERIGHT, +BIT_NAND_AGG, BIT_NOR_AGG, and BIT_XNOR_AGG. +
        • +
        • PR #2798: Complete separation of Function class +
        • +
        • Issue #2795: Sporadic issues with trigger during concurrent insert in 1.4.199/1.4.200 +
        • +
        • PR #2796: Assorted refactorings +
        • +
        • Issue #2786: Failure in CREATE TABLE AS leaves inconsistent transaction if some rows were successfully inserted +
        • +
        • Issue #2790: Examples in documentation of CREATE ALIAS should use standard literals only +
        • +
        • Issue #2787: CONCAT and CONCAT_WS functions +
        • +
        • PR #2784: Oracle REGEXP_REPLACE support +
        • +
        • Issue #2780: Remove SCOPE_GENERATED_KEYS setting +
        • +
        • PR #2779: Fix incorrect FK restrictions and other changes +
        • +
        • PR #2778: Assorted changes +
        • +
        • Issue #2776: Referential constraint can create a unique constraint in the wrong schema +
        • +
        • Issue #2771: Add documented DEFAULT ON NULL flag for all types of columns +
        • +
        • Issue #2742 / PR #2768: Better separation of MVStore aimed at smaller h2-mvstore jar +
        • +
        • Issue #2764: Identity columns don't accept large numbers +
        • +
        • IDENTITY() function is removed, SCOPE_IDENTITY() is now available only in MSSQLServer compatibility mode. +
        • +
        • Issue #2757: Intermittent TestFileSystem failures +
        • +
        • Issue #2758: Issues with sequences +
        • +
        • PR #2756: Prevent DROP NOT NULL for identity columns +
        • +
        • Issue #2753: UPDATE statement changes value of GENERATED ALWAYS AS IDENTITY columns +
        • +
        • PR #2751: Add comment explaining seemingly dummy operation +
        • +
        • PR #2750: Use RFC 4122 compliant UUID comparison by default +
        • +
        • PR #2748: PgServer set type text to NULL value +
        • +
        • Issue #2746: Old TCP clients with current server +
        • +
        • PR #2745: PgServer can send bool in binary mode +
        • +
        • PR #2744: Remove jarSmall and jarClient targets +
        • +
        • PR #2743: Add IS_TRIGGER_UPDATABLE and other similar fields to INFORMATION_SCHEMA +
        • +
        • PR #2738: Fix VIEWS.VIEW_DEFINITION and support it for other databases in H2 Console +
        • +
        • PR #2737: Assorted changes +
        • +
        • PR #2734: Update dependencies and fix ResultSetMetaData.isSigned() +
        • +
        • PR #2733: Replace h2.sortNullsHigh with DEFAULT_NULL_ORDERING setting +
        • +
        • PR #2731: Fix spelling errors in German translation +
        • +
        • PR #2728: Add and use DATA_TYPE_SQL() function and remove INFORMATION_SCHEMA.PARAMETERS.REMARKS +
        • +
        • Issue #1015: ENUM and arithmetic operators +
        • +
        • Issue #2711: Store normalized names of data types in metadata +
        • +
        • PR #2722: Implement getRowCount() for some INFORMATION_SCHEMA tables +
        • +
        • PR #2721: Improve LOCKS, SESSIONS, and USERS and optimize COUNT(*) on other isolation levels in some cases +
        • +
        • Issue #2655: TestCrashAPI: AssertionError at MVPrimaryIndex.<init> +
        • +
        • Issue #2716: Fix URL of Maven repository +
        • +
        • Issue #2715: Mention `DB_CLOSE_DELAY=-1` flag in JDBC URL on the "Cheat Sheet" page +
        • +
        • PR #2714: fixed few code smells discovered by PVS-Studio +
        • +
        • Issue #2712: `NOT LIKE` to a sub-query doesn't work +
        • +
        • PR #2710: PgServer: set oid and attnum in RowDescription +
        • +
        • Issue #2254: Add standard DECFLOAT data type +
        • +
        • PR #2708: Add declared data type attributes to the INFORMATION_SCHEMA +
        • +
        • Issue #2706: Empty comments / remarks on objects +
        • +
        • PR #2705: Return standard-compliant DATA_TYPE for strings +
        • +
        • PR #2703: Fix case-insensitive comparison issues with national characters +
        • +
        • Issue #2701: Subquery with FETCH should not accept global conditions +
        • +
        • Issue #2699: Remove FUNCTIONS_IN_SCHEMA setting +
        • +
        • Issue #452: Add possibility to use user-defined aggregate functions with schema +
        • +
        • PR #2695: Refactor handling of parentheses in getSQL() methods +
        • +
        • PR #2693: disallow VARCHAR_IGNORECASE in PostgreSQL mode +
        • +
        • Issue #2407: Implement CHAR whitespace handling correctly +
        • +
        • PR #2685: Check existing data in ALTER DOMAIN ADD CONSTRAINT +
        • +
        • PR #2683: Fix data types in Transfer +
        • +
        • PR #2681: Report user functions in standard ROUTINES and PARAMETERS views +
        • +
        • PR #2680: Reimplement remaining DatabaseMetaData methods and fix precision of binary numeric types +
        • +
        • PR #2679: Reimplement getTables(), getTableTypes(), and getColumns() +
        • +
        • PR #2678: Reimplement getPrimaryKeys(), getBestRowIdentifier(), getIndexInfo() and others +
        • +
        • PR #2675: Reimplement getImportedKeys(), getExportedKeys(), and getCrossReferences() +
        • +
        • PR #2673: Reimplement some metadata methods +
        • +
        • PR #2672: Forward DatabaseMetaData calls to server +
        • +
        • Issue #2329: Content of INFORMATION_SCHEMA should be listed as VIEWS +
        • +
        • PR #2668: Sequence generator data type option and length parameter for JSON data type +
        • +
        • PR #2666: Add ALTER DOMAIN RENAME command +
        • +
        • PR #2663: Add ALTER DOMAIN { SET | DROP } { DEFAULT | ON UPDATE } +
        • +
        • PR #2661: Don't allow construction of incomplete ARRAY and ROW data types +
        • +
        • Issue #2659: NULLIF with row values +
        • +
        • PR #2658: Extract date-time and some other groups of functions into own classes +
        • +
        • PR #2656: add `_int2` and `_int4` for PgServer +
        • +
        • PR #2654: Move out JSON, cardinality, ABS, MOD, FLOOR, and CEIL functions from the Function class +
        • +
        • PR #2653: Use full TypeInfo for conversions between PG and H2 data types +
        • +
        • PR #2652: Add "SHOW ALL" +
        • +
        • PR #2651: add `pg_type.typelem` and `pg_type.typdelim` +
        • +
        • PR #2649: Extract some groups of functions from Function class +
        • +
        • PR #2646: Add some PostgreSQL compatibility features +
        • +
        • PR #2645: Add CURRENT_PATH, CURRENT_ROLE, SESSION_USER, and SYSTEM_USER +
        • +
        • Issue #2643: Send PG_TYPE_TEXTARRAY values to ODBC drivers properly +
        • +
        • PR #2642: Throw proper exceptions from array element reference and TRIM_ARRAY +
        • +
        • PR #2640: German translations +
        • +
        • Issue #2108: Add possible candidates in different case to table not found exception +
        • +
        • Issue #2633: Multi-column UPDATE assignment needs to be reimplemented +
        • +
        • PR #2635: Implement REGEXP_SUBSTR function +
        • +
        • PR #2632: Improve ROW data type +
        • +
        • PR #2630: fix: quoted VALUE in documentation +
        • +
        • Issue #2628: Cached SQL throws JdbcSQLSyntaxErrorException if executed with different parameter values than before +
        • +
        • Issue #2611: Add quantified distinct predicate +
        • +
        • Issue #2620: LOBs in triggers +
        • +
        • PR #2619: ARRAY_MAX_CARDINALITY and TRIM_ARRAY functions +
        • +
        • PR #2617: Add Feature F262: Extended CASE expression +
        • +
        • PR #2615: Add feature T461: Symmetric BETWEEN predicate +
        • +
        • PR #2614: Fix support of multi-dimensional arrays in Java functions +
        • +
        • Issue #2608: Improve concatenation operation for multiple operands +
        • +
        • PR #2605: Assorted minor changes +
        • +
        • Issue #2602: H2 doesn't allow to create trigger from Java source code if there are nested classes +
        • +
        • PR #2601: Add field SLEEP_SINCE to INFORMATION_SCHEMA.SESSIONS table +
        • +
        • Issue #1973: Standard MERGE statement doesn't work with views +
        • +
        • Issue #2552: MERGE statement should process each row only once +
        • +
        • Issue #2548: Wrong update count when MERGE statement visits matched rows more than once +
        • +
        • Issue #2394: H2 does not accept DCL after source merge table +
        • +
        • Issue #2196: Standard MERGE statement doesn't release the source view +
        • +
        • Issue #2567: ARRAY-returning Java functions don't return the proper data type +
        • +
        • Issue #2584: Regression in NULL handling in multiple AND or OR conditions +
        • +
        • PR #2577: PgServer: `array_to_string()` and `set join_collapse_limit` +
        • +
        • PR #2568: Add BIT_XOR_AGG aggregate function +
        • +
        • PR #2565: Assorted minor changes +
        • +
        • PR #2563: defrag is not contributing much, remove from test run +
        • +
        • PR #2562: new exception MVStoreException +
        • +
        • PR #2557: don't throw IllegalStateException in checkOpen +
        • +
        • PR #2554: Reenable mvstore TestCrashAPI +
        • +
        • Issue #2556: TestOutOfMemory: Table "STUFF" not found +
        • +
        • PR #2555: Move current datetime value functions into own class +
        • +
        • PR #2547: split up the ValueLob classes +
        • +
        • PR #2542: Pipelining mvstore chunk creation / save +
        • +
        • Issue #2550: NullPointerException with MERGE containing unknown column in AND condition of WHEN +
        • +
        • Issue #2546: Disallow empty CASE specifications and END CASE +
        • +
        • Issue #2530: Long query with many AND expressions causes StackOverflowError +
        • +
        • PR #2543: Improve case specification support and fix some issues with it +
        • +
        • Issue #2539: Replace non-standard functions with standard code directly in Parser +
        • +
        • Issue #2521: Disallow untyped arrays +
        • +
        • Issue #2532: Duplicate column names in derived table should be acceptable in the presence of a derived column list +that removes ambiguities +
        • +
        • PR #2527: Feature: allow @ meta commands from Console +
        • +
        • PR #2526: Reduce I/O during database presence check and restrict some compatibility settings to their modes +
        • +
        • PR #2525: Restore support of third-party drivers in the Shell tool +
        • +
        • Issue #1710: getHigherType() returns incorrect type for some arguments +
        • +
        • PR #2516: SHUTDOWN IMMEDIATELY should be a normal shut down +
        • +
        • PR #2515: Fix nested comments in ScriptReader +
        • +
        • Issue #2511: Restrict Oracle compatibility functions to Oracle compatibility mode +
        • +
        • PR #2508: Minor refactoring around Tx isolation level +
        • +
        • PR #2505: Assorted changes in DATEADD, DATEDIFF, DATE_TRUNC, and EXTRACT +
        • +
        • Issue #2502: Combination of DML with data change delta table skips subsequent update +
        • +
        • PR #2499: Performance fix for PageStore under concurrent load +
        • +
        • PR #2498: Add some PostgreSQL compatibility features mentioned in issue #2450 +
        • +
        • Issue #2496: Error when using empty JSON_OBJECT() or JSON_ARRAY() functions +
        • +
        • PR #2495: Fix JSON_OBJECT grammar in documentation +
        • +
        • Issue #2493 / PR #2494: Replace ColumnNamer with mode-specific generation of column names for views +
        • +
        • PR #2492: Assorted changes in parser, keywords, and ILIKE condition +
        • +
        • PR #2490: Replace pg_catalog.sql with PgCatalogTable and use DATABASE_TO_LOWER in PG Server +
        • +
        • Issue #2488 / PR #2489: Mark version functions as not deterministic +
        • +
        • Issue #2481: Convert TO to keyword +
        • +
        • PR #2476: Add some PostgreSQL compatibility features mentioned in issue #2450 +
        • +
        • PR #2479: Recognize absolute path on Windows without drive letter +
        • +
        • Issue #2475: Select order by clause is exported with non-portable SQL +
        • +
        • Issue #2472: Updating column to empty string in Oracle mode with prepared statement does not result in null +
        • +
        • PR #2468: MVStore scalability improvements +
        • +
        • PR #2466: Add partial support for MySQL COLLATE and CHARACTER statements +
        • +
        • Issue #2464: `client_encoding='utf-8'` (single quoted) from `node-postgres` not recognized +
        • +
        • Issue #2461: Support for binary_float and binary_double type aliases +
        • +
        • Issue #2460: Exception when accessing empty arrays +
        • +
        • Issue #2318: Remove incorrect rows from DatabaseMetaData.getTypeInfo() and INFORMATION_SCHEMA.TYPE_INFO +
        • +
        • Issue #2455: `bytea` column incorrectly read by `psycopg2` +
        • +
        • PR #2456: Add standard array value constructor by query +
        • +
        • PR #2451: Add some PostgreSQL compatibility features mentioned in issue #2450 +
        • +
        • Issue #2448: Change default data type name from DOUBLE to DOUBLE PRECISION +
        • +
        • PR #2452: Do not use unsafe and unnecessary FROM DUAL internally +
        • +
        • PR #2449: Add support for standard trigraphs +
        • +
        • Issue #2439: StringIndexOutOfBoundsException when using TO_CHAR +
        • +
        • Issue #2444: WHEN NOT MATCHED THEN INSERT should accept only one row +
        • +
        • Issue #2434: Next value expression should return the same value within a processed row +
        • +
        • PR #2437: Assorted changes in MVStore +
        • +
        • Issue #2430: Postgres `bytea` column should be read with and without `forceBinary` +
        • +
        • Issue #2267: BINARY and VARBINARY should be different +
        • +
        • Issue #2266: CHAR and BINARY should have length 1 by default +
        • +
        • PR #2426: Add MD5 and all SHA-1, SHA-2, and SHA-3 digests to the HASH() function +
        • +
        • Issue #2424: 0 should not be accepted as a length of data type +
        • +
        • Issue #2378: JAVA_OBJECT and TableLink +
        • +
        • Issue #2417: Casts between binary strings and non-string data types +
        • +
        • Issue #2416: OTHER and JAVA_OBJECT +
        • +
        • Issue #2379: SQL export can change data type of a constant +
        • +
        • Issue #2411: ArrayIndexOutOfBoundsException when HAVING and duplicate columns in SELECT +
        • +
        • Issue #2194: Add own enumeration of data types to API +
        • +
        • PR #2408: Descending MVMap and TransactionMap cursor +
        • +
        • Issue #2399: Cast to ARRAY with a nested ARRAY does not check the maximum cardinality of the nested ARRAY +
        • +
        • Issue #2402: Remove old ValueLob and DbUpgrade +
        • +
        • Issue #2400: Inconsistent data type conversion between strings and LOBs +
        • +
        • PR #2398: Add expandable flags for SQL generation methods +
        • +
        • PR #2395: Fix for two recent page format bugs +
        • +
        • PR #2386: Chunk occupancy mask +
        • +
        • PR #2385: Memory estimate +
        • +
        • PR #2381: Follow up REPEATABLE_READ-related changes +
        • +
        • PR #2380: use JIRA tracker URLs for JDK bugs +
        • +
        • PR #2376: Fix IN condition with row value expressions in its right side +
        • +
        • Issue #2367 / PR #2370: fix backward compatibility with 1.4.200 +
        • +
        • Issue #2371: REPEATABLE READ isolation level does not work in MVStore +
        • +
        • Issue #2363: Soft links in -baseDir and database path cause error 90028 +
        • +
        • Issue #2364: TestScript datatypes/timestamp-with-time-zone.sql fails if TZ=Europe/Berlin +
        • +
        • Issue #2359: Complete implementation of generated columns +
        • +
        • PR #2361: Fix unused result +
        • +
        • PR #2353: Push binary search operation from Page to DataType +
        • +
        • Issue #2348: Add USING clause to ALTER COLUMN CHANGE DATA TYPE +
        • +
        • Issue #2350: License Problem in POM +
        • +
        • Issue #2345: Add standard SET TIME ZONE command to set current time zone of the session +
        • +
        • PR #2341: Cleanup file backend sync +
        • +
        • Issue #2343: Domain-based domains: Domain not found after reconnection +
        • +
        • Issue #2338: Domains should not support NULL constraints +
        • +
        • Issue #2334: build target mavenInstallLocal broken since commit 7cbbd55e +
        • +
        • #2335: TestDateTimeUtils fails if system timezone has DST in the future +
        • +
        • Issue #2330: Syntax error with parenthesized expression in GROUP BY clause +
        • +
        • Issue #2256: <interval value expression> with datetime subtraction +
        • +
        • Issue #2325: H2 does not parse nested bracketed comments correctly +
        • +
        • Issue #466: Confusion about INFORMATION_SCHEMA content related to UNIQUE constraints +
        • +
        • PR #2323: Assorted changes +
        • +
        • Issue #2320: Remove SAMPLE_SIZE clause from SELECT +
        • +
        • Issue #2301: Add compatibility setting to accept some keywords as identifiers +
        • +
        • PR #2317: Replace CHECK_COLUMN_USAGE with CONSTRAINT_COLUMN_USAGE and other changes +
        • +
        • Issue #2315: Sequence must remember its original START WITH value +
        • +
        • Issue #2313: DISTINCT does not work in ordered aggregate functions +
        • +
        • PR #2306: Add support for RESTART of sequence without initial value +
        • +
        • Issue #2304: NPE in multiple define commands in one statement after upgrade from H2 4.1.197 +
        • +
        • PR #2303: Assorted minor changes +
        • +
        • Issue #2286: Inline check constraints not in INFORMATION_SCHEMA +
        • +
        • PR #2300: Continue generification of MVStore codebase +
        • +
        • PR #2298: add some minimal security documentation +
        • +
        • PR #2292: synchronize fileBase subclasses use of position +
        • +
        • PR #2238: Some MVStore refactoring +
        • +
        • Issue #2288: ConcurrentModificationException during commit +
        • +
        • Issue #2293: Remove TestClearReferences and workarounds for old versions of Apache Tomcat +
        • +
        • Issue #2288: ConcurrentModificationException during commit +
        • +
        • PR #2284: Remove unrelated information from README and add some information about H2 +
        • +
        • PR #2282: add PostgreSQL compatible variable STATEMENT_TIMEOUT +
        • +
        • PR #2280: little comment +
        • +
        • Issue #2205: H2 1.4.200 split FS issue +
        • +
        • Issue #2272: UpdatableView and obtaining the Generated Keys +
        • +
        • PR #2276: Split up filesystem classes +
        • +
        • PR #2275: improve detection of JAVA_HOME on Mac OS +
        • +
        • Issue #2268: Numeric division needs better algorithm for scale selection +
        • +
        • Issue #2270: IGNORE_UNKNOWN_SETTINGS is ignored +
        • +
        • PR #2269: Fix existence check of non-persistent databases +
        • +
        • Issue #1910: BinaryOperation should evaluate precision and scale properly +
        • +
        • PR #2264: Clean up redundant parts of file system abstraction +
        • +
        • PR #2262: add setting AUTO_COMPACT_FILL_RATE +
        • +
        • Issue #2255 / PR #2259: Use NIO2 in main sources and build +
        • +
        • PR #2257: Catch java.lang.NoClassDefFoundError +
        • +
        • Issue #2241: Mark H2-specific and compatibility only clauses in documentation +
        • +
        • PR #2246: Update third-party drivers +
        • +
        • Issue #2239 / PR #2236: Add NETWORK_TIMEOUT setting for SO_TIMEOUT +
        • +
        • PR #2235: Don't use RandomAccessFile in FilePathNio +
        • +
        • Issue #2233: "Prepared.getObjectId() was called before" when granting on multiple tables +
        • +
        • PR #2230: Add factory methods for Row +
        • +
        • Issue #2226, PR #2227: Remove support of Apache Ignite +
        • +
        • PR #2224: Update some hyperlinks and use https in them where possible +
        • +
        • PR #2223: Fix data change delta tables in views +
        • +
        • Issue #1943: Deadlock in TestTriggersConstraints +
        • +
        • PR #2219: do not retry failed DDL commands +
        • +
        • PR #2214: Fix TRACE_LEVEL_FILE=4 for in-memory databases +
        • +
        • PR #2216: Add FileChannel.lock in the connection URL summary +
        • +
        • PR #2215: Add white-space: pre to tables with query results +
        • +
        • Issue #2213: NUMERIC scale can be larger than a precision +
        • +
        • PR #2212: Get rid of multi-version CurrentTimestamp and fix negative scale of NUMERIC +
        • +
        • PR #2210: Meta table extras +
        • +
        • PR #2209: Add standard expressions with interval qualifier +
        • +
        • PR #2195: Feature abort_session function +
        • +
        • PR #2201: Add padding to negative years and other changes +
        • +
        • PR #2197: Add some additional methods from JDBC 4.2 and return 4.2 as supported version +
        • +
        • PR #2193: Require Java 8 and remove Java 7 support +
        • +
        • Issue #2191: NPE with H2 v1.4.200 repeatable read select queries +
        • +
        • Issue #1390: Add standard-compliant ARRAY data type syntax +
        • +
        • PR #2186: Refactor Parser.parseColumnWithType() and fix some minor issues with CAST +
        • +
        • Issue #2181: SET EXCLUSIVE quirks +
        • +
        • PR #2173: Move snapshots from Transaction to TransactionMap +
        • +
        • Issue #2175: Regression: NPE in ResultSet#getTime(int) +
        • +
        • Issue #2171: Wrong PostgreSQL compatibility syntax for the creation of indexes +
        • +
        • PR #2169: Clean up some find methods of indexes and fix minor issues with them +
        • +
        -

        Version 1.4.183 Beta (2014-12-13)

        -
        • MVStore: the default auto-commit buffer size is now about twice as big. - This should reduce the database file size after inserting a lot of data. -
        • The built-in functions "power" and "radians" now always return a double. -
        • Using "row_number" or "rownum" in views or derived tables had unexpected results - if the outer query contained constraints for the given view. Example: - select b.nr, b.id from (select row_number() over() as nr, a.id as id - from (select id from test order by name) as a) as b where b.id = 1 -
        • MVStore: the Recover tool can now deal with more types of corruption in the file. -
        • MVStore: the TransactionStore now first needs to be initialized before it can be used. -
        • Views and derived tables with equality and range conditions on the same columns - did not work properly. example: select x from (select x from (select 1 as x) - where x > 0 and x < 2) where x = 1 -
        • The database URL setting PAGE_SIZE setting is now also used for the MVStore. -
        • MVStore: the default page split size for persistent stores is now 4096 - (it was 16 KB so far). This should reduce the database file size for most situations - (in some cases, less than half the size of the previous version). -
        • With query literals disabled, auto-analyze of a table with CLOB or BLOB did not work. -
        • MVStore: use a mark and sweep GC algorithm instead of reference counting, - to ensure used chunks are never overwrite, even if the reference counting - algorithm does not work properly. -
        • In the multi-threaded mode, updating the column selectivity ("analyze") - in the background sometimes did not work. -
        • In the multi-threaded mode, database metadata operations - did sometimes not work if the schema was changed at the same time - (for example, if tables were dropped). -
        • Some CLOB and BLOB values could no longer be read when - the original row was removed (even when using the MVCC mode). -
        • The MVStoreTool could throw an IllegalArgumentException. -
        • Improved performance for some - date / time / timestamp conversion operations. - Thanks to Sergey Evdokimov for reporting the problem. -
        • H2 Console: the built-in web server did not work properly - if an unknown file was requested. -
        • MVStore: the jar file is renamed to "h2-mvstore-*.jar" and is - deployed to Maven separately. -
        • MVStore: support for concurrent reads and writes is now enabled by default. -
        • Server mode: the transfer buffer size has been changed from 16 KB to 64 KB, - after it was found that this improves performance on Linux quite a lot. -
        • H2 Console and server mode: SSL is now disabled and TLS is used - to protect against the Poodle SSLv3 vulnerability. - The system property to disable secure anonymous connections is now - "h2.enableAnonymousTLS". - The default certificate is still self-signed, so you need to manually install - another one if you want to avoid man in the middle attacks. -
        • MVStore: the R-tree did not correctly measure the memory usage. -
        • MVStore: compacting a store with an R-tree did not always work. -
        • Issue 581: When running in LOCK_MODE=0, - JdbcDatabaseMetaData#supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED) - should return false -
        • Fix bug which could generate deadlocks when multiple connections accessed the same table. -
        • Some places in the code were not respecting the value set in the "SET MAX_MEMORY_ROWS x" command -
        • Fix bug which could generate a NegativeArraySizeException when performing large (>40M) row union operations -
        • Fix "USE schema" command for MySQL compatibility, patch by mfulton -
        • Parse and ignore the ROW_FORMAT=DYNAMIC MySQL syntax, patch by mfulton -
        - -

        Version 1.4.182 Beta (2014-10-17)

        -
        • MVStore: improved error messages and logging; - improved behavior if there is an error when serializing objects. -
        • OSGi: the MVStore packages are now exported. -
        • With the MVStore option, when using multiple threads - that concurrently create indexes or tables, - it was relatively easy to get a lock timeout on the "SYS" table. -
        • When using the multi-threaded option, the exception - "Unexpected code path" could be thrown, specially if the option - "analyze_auto" was set to a low value. -
        • In the server mode, when reading from a CLOB or BLOB, if the connection - was closed, a NullPointerException could be thrown instead of an exception saying - the connection is closed. -
        • DatabaseMetaData.getProcedures and getProcedureColumns - could throw an exception if a user defined class is not available. -
        • Issue 584: the error message for a wrong sequence definition was wrong. -
        • CSV tool: the rowSeparator option is no longer supported, - as the same can be achieved with the lineSeparator. -
        • Descending indexes on MVStore tables did not work properly. -
        • Issue 579: Conditions on the "_rowid_" pseudo-column didn't use an index - when using the MVStore. -
        • Fixed documentation that "offset" and "fetch" are also keywords since version 1.4.x. -
        • The Long.MIN_VALUE could not be parsed for auto-increment (identity) columns. -
        • Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in - other JDBC classes. -
        • Issue 572: MySQL compatibility for "order by" in update statements. -
        • The change in JDBC escape processing in version 1.4.181 affects both the parser - (which is running on the server) and the JDBC API (which is running on the client). - If you (or a tool you use) use the syntax "{t 'time}", or "{ts 'timestamp'}", or "{d 'data'}", - then both the client and the server need to be upgraded to version 1.4.181 or later. -
        - -

        Version 1.4.181 Beta (2014-08-06)

        -
        • Improved MySQL compatibility by supporting "use schema". - Thanks a lot to Karl Pietrzak for the patch! -
        • Writing to the trace file is now faster, specially with the debug level. -
        • The database option "defrag_always=true" did not work with the MVStore. -
        • The JDBC escape syntax {ts 'value'} did not interpret the value as a timestamp. - The same for {d 'value'} (for date) and {t 'value'} (for time). - Thanks to Lukas Eder for reporting the issue. - The following problem was detected after version 1.4.181 was released: - The change in JDBC escape processing affects both the parser (which is running on the server) - and the JDBC API (which is running on the client). - If you (or a tool you use) use the syntax {t 'time'}, or {ts 'timestamp'}, or {d 'date'}, - then both the client and the server need to be upgraded to version 1.4.181 or later. -
        • File system abstraction: support replacing existing files using move - (currently not for Windows). -
        • The statement "shutdown defrag" now compresses the database (with the MVStore). - This command can greatly reduce the file size, and is relatively fast, - but is not incremental. -
        • The MVStore now automatically compacts the store in the background if there is no read or write activity, - which should (after some time; sometimes about one minute) reduce the file size. - This is still work in progress, feedback is welcome! -
        • Change default value of PAGE_SIZE from 2048 to 4096 to more closely match most file systems block size - (PageStore only; the MVStore already used 4096). -
        • Auto-scale MAX_MEMORY_ROWS and CACHE_SIZE settings by the amount of available RAM. Gives a better - out of box experience for people with more powerful machines. -
        • Handle tabs like 4 spaces in web console, patch by Martin Grajcar. -
        • Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in JdbcConnection.java, - patch by BigMichi1. -
        - -

        Version 1.4.180 Beta (2014-07-13)

        -
        • MVStore: the store is now auto-compacted automatically up to some point, - to avoid very large file sizes. This area is still work in progress. -
        • Sequences of temporary tables (auto-increment or identity columns) - were persisted unnecessarily in the database file, and were not removed - when re-opening the database. -
        • MVStore: an IndexOutOfBoundsException could sometimes - occur MVMap.openVersion when concurrently accessing the store. -
        • The LIRS cache now re-sizes the internal hash map if needed. -
        • Optionally persist session history in the H2 console. (patch from Martin Grajcar) -
        • Add client-info property to get the number of servers currently in the cluster - and which servers that are available. (patch from Nikolaj Fogh) -
        • Fix bug in changing encrypted DB password that kept the file handle - open when the wrong password was supplied. (test case from Jens Hohmuth). -
        • Issue 567: H2 hangs for a long time then (sometimes) recovers. - Introduce a queue when doing table locking to prevent session starvation. -
        - -

        Version 1.4.179 Beta (2014-06-23)

        -
        • The license was changed to MPL 2.0 (from 1.0) and EPL 1.0. -
        • Issue 565: MVStore: concurrently adding LOB objects - (with MULTI_THREADED option) resulted in a NullPointerException. -
        • MVStore: reduced dependencies to other H2 classes. -
        • There was a way to prevent a database from being re-opened, - by creating a column constraint that references a table with a higher id, - for example with "check" constraints that contains queries. - This is now detected, and creating the table is prohibited. - In future versions of H2, most likely creating references to other - tables will no longer be supported because of such problems. -
        • MVStore: descending indexes with "nulls first" did not work as expected - (null was ordered last). -
        • Large result sets now always create temporary tables instead of temporary files. -
        • When using the PageStore, opening a database failed in some cases with a NullPointerException - if temporary tables were used (explicitly, or implicitly when using large result sets). -
        • If a database file in the PageStore file format exists, this file and this mode - is now used, even if the database URL does not contain "MV_STORE=FALSE". - If a MVStore file exists, it is used. -
        • Databases created with version 1.3.175 and earlier - that contained foreign keys in combination with multi-column indexes - could not be opened in some cases. - This was due to a bugfix in version 1.3.176: - Referential integrity constraints sometimes used the wrong index. -
        • MVStore: the ObjectDataType comparison method was incorrect if one - key was Serializable and the other was of a common class. -
        • Recursive queries with many result rows (more than the setting "max_memory_rows") - did not work correctly. -
        • The license has changed to MPL 2.0 + EPL 1.0. -
        • MVStore: temporary tables from result sets could survive re-opening a database, - which could result in a ClassCastException. -
        • Issue 566: MVStore: unique indexes that were created later on did not work correctly - if there were over 5000 rows in the table. - Existing databases need to be re-created (at least the broken index need to be re-built). -
        • MVStore: creating secondary indexes on large tables - results in missing rows in the index. -
        • Metadata: the password of linked tables is now only visible for admin users. -
        • For Windows, database URLs of the form "jdbc:h2:/test" where considered - relative and did not work unless the system property "h2.implicitRelativePath" was used. -
        • Windows: using a base directory of "C:/" and similar did not work as expected. -
        • Follow JDBC specification on Procedures MetaData, use P0 as - return type of procedure. -
        • Issue 531: IDENTITY ignored for added column. -
        • FileSystem: improve exception throwing compatibility with JDK -
        • Spatial Index: adjust costs so we do not use the spatial index if the - query does not contain an intersects operator. -
        • Fix multi-threaded deadlock when using a View that includes a TableFunction. -
        • Fix bug in dividing very-small BigDecimal numbers. -
        - -

        Version 1.4.178 Beta (2014-05-02)

        -
        • Issue 559: Make dependency on org.osgi.service.jdbc optional. -
        • Improve error message when the user specifies an unsupported combination of database settings. -
        • MVStore: in the multi-threaded mode, NullPointerException and other exceptions could occur. -
        • MVStore: some database file could not be compacted due to a bug in - the bookkeeping of the fill rate. Also, database file were compacted quite slowly. - This has been improved; but more changes in this area are expected. -
        • MVStore: support for volatile maps (that don't store changes). -
        • MVStore mode: in-memory databases now also use the MVStore. -
        • In server mode, appending ";autocommit=false" to the database URL was working, - but the return value of Connection.getAutoCommit() was wrong. -
        • Issue 561: OSGi: the import package declaration of org.h2 excluded version 1.4. -
        • Issue 558: with the MVStore, a NullPointerException could occur when using LOBs - at session commit (LobStorageMap.removeLob). -
        • Remove the "h2.MAX_MEMORY_ROWS_DISTINCT" system property to reduce confusion. - We already have the MAX_MEMORY_ROWS setting which does a very similar thing, and is better documented. -
        • Issue 554: Web Console in an IFrame was not fully supported. -
        - -

        Version 1.4.177 Beta (2014-04-12)

        -
        • By default, the MV_STORE option is enabled, so it is using the new MVStore - storage. The MVCC setting is by default set to the same values as the MV_STORE setting, - so it is also enabled by default. For testing, both settings can be disabled by appending - ";MV_STORE=FALSE" and/or ";MVCC=FALSE" to the database URL. -
        • The file locking method 'serialized' is no longer supported. - This mode might return in a future version, - however this is not clear right now. - A new implementation and new tests would be needed. -
        • Enable the new storage format for dates (system property "h2.storeLocalTime"). - For the MVStore mode, this is always enabled, but with version 1.4 - this is even enabled in the PageStore mode. -
        • Implicit relative paths are disabled (system property "h2.implicitRelativePath"), - so that the database URL jdbc:h2:test now needs to be written as jdbc:h2:./test. -
        • "select ... fetch first 1 row only" is supported with the regular mode. - This was disabled so far because "fetch" and "offset" are now keywords. - See also Mode.supportOffsetFetch. -
        • Byte arrays are now sorted in unsigned mode - (x'99' is larger than x'09'). - (System property "h2.sortBinaryUnsigned", Mode.binaryUnsigned, setting "binary_collation"). -
        • Csv.getInstance will be removed in future versions of 1.4. - Use the public constructor instead. -
        • Remove support for the limited old-style outer join syntax using "(+)". - Use "outer join" instead. - System property "h2.oldStyleOuterJoin". -
        • Support the data type "DATETIME2" as an alias for "DATETIME", for MS SQL Server compatibility. -
        • Add Oracle-compatible TRANSLATE function, patch by Eric Chatellier. -
        - -

        Version 1.3.176 (2014-04-05)

        -
        • The file locking method 'serialized' is no longer documented, - as it will not be available in version 1.4. -
        • The static method Csv.getInstance() was removed. - Use the public constructor instead. -
        • The default user name for the Script, RunScript, Shell, - and CreateCluster tools are no longer "sa" but an empty string. -
        • The stack trace of the exception "The object is already closed" is no longer logged by default. -
        • If a value of a result set was itself a result set, the result - could only be read once. -
        • Column constraints are also visible in views (patch from Nicolas Fortin for H2GIS). -
        • Granting a additional right to a role that already had a right for that table was not working. -
        • Spatial index: a few bugs have been fixed (using spatial constraints in views, - transferring geometry objects over TCP/IP, the returned geometry object is copied when needed). -
        • Issue 551: the datatype documentation was incorrect (found by Bernd Eckenfels). -
        • Issue 368: ON DUPLICATE KEY UPDATE did not work for multi-row inserts. - Test case from Angus Macdonald. -
        • OSGi: the package javax.tools is now imported (as an optional). -
        • H2 Console: auto-complete is now disabled by default, but there is a hot-key (Ctrl+Space). -
        • H2 Console: auto-complete did not work with multi-line statements. -
        • CLOB and BLOB data was not immediately removed after a rollback. -
        • There is a new Aggregate API that supports the internal H2 data types - (GEOMETRY for example). Thanks a lot to Nicolas Fortin for the patch! -
        • Referential integrity constraints sometimes used the wrong index, - such that updating a row in the referenced table incorrectly failed with - a constraint violation. -
        • The Polish translation was completed and corrected by Wojtek Jurczyk. Thanks a lot! -
        • Issue 545: Unnecessary duplicate code was removed. -
        • The profiler tool can now process files with full thread dumps. -
        • MVStore: the file format was changed slightly. -
        • MVStore mode: the CLOB and BLOB storage was re-implemented and is - now much faster than with the PageStore (which is still the default storage). -
        • MVStore mode: creating indexes is now much faster - (in many cases faster than with the default PageStore). -
        • Various bugs in the MVStore storage and have been fixed, - including a bug in the R-tree implementation. - The database could get corrupt if there were transient IO exceptions while storing. -
        • The method org.h2.expression.Function.getCost could throw a NullPointException. -
        • Storing LOBs in separate files (outside of the main database file) - is no longer supported for new databases. -
        • Lucene 2 is no longer supported. -
        • Fix bug in calculating default MIN and MAX values for SEQUENCE. -
        • Fix bug in performing IN queries with multiple values when IGNORECASE=TRUE -
        • Add entry-point to org.h2.tools.Shell so it can be called from inside an application. - patch by Thomas Gillet. -
        • Fix bug that prevented the PgServer from being stopped and started multiple times. -
        • Support some more DDL syntax for MySQL, patch from Peter Jentsch. -
        • Issue 548: TO_CHAR does not format MM and DD correctly when the month or day of - the month is 1 digit, patch from "the.tucc" -
        • Fix bug in varargs support in ALIAS's, patch from Nicolas Fortin -
        +

        Version 1.4.200 (2019-10-14)

        +
          +
        • PR #2168: Add non-standard SNAPSHOT isolation level to MVStore databases +
        • +
        • Issue #2165: Problem with secondary index on SERIALIZABLE isolation level +
        • +
        • Issue #2161: Remove undocumented PageStore-only FILE_LOCK=SERIALIZED +
        • +
        • PR #2155: Reduce code duplication +
        • +
        • Issue #1894: Confusing error message when database creation is disallowed +
        • +
        • Issue #2123: Random failures in TestTransactionStore +
        • +
        • Issue #2153: Different behavior in SET LOCK_TIMEOUT after 1.4.197 +
        • +
        • Issue #2150: Remove MULTI_THREADED setting and use multi-threaded MVStore and single-threaded PageStore backends +
        • +
        • Issue #216: Support READ UNCOMMITTED isolation level in MVStore mode +
        • +
        • Issue #678: Support REPEATABLE READ isolation level in MVStore mode +
        • +
        • Issue #174: Support SERIALIZABLE isolation level in MVStore mode +
        • +
        • Issue #2144: MVStore: read uncommitted doesn't see committed rows +
        • +
        • Issue #2142: CURRVAL / CURRENT VALUE FOR should return the value for the current session +
        • +
        • Issue #2136: ConstraintCheck concurrency regression +
        • +
        • PR #2137: Don't use SYSTEM_RANGE for SELECT without a FROM +
        • +
        • PR #2134: Assorted fixes and other changes in DateTimeUtils +
        • +
        • PR #2133: Optimize COUNT([ALL] constant) and other changes +
        • +
        • PR #2132: Typo and another bug in MVStore.readStoreHeader() +
        • +
        • Issue #2130: Group-sorted query returns invalid results with duplicate grouped columns in select list +
        • +
        • Issue #2120: Add IF EXISTS clause to column name in ALTER TABLE ALTER COLUMN statement +
        • +
        • Issue #521: Add support for the TIME WITH TIME ZONE data type +
        • +
        • PR #2127: Fix race condition / performance issue during snapshotting +
        • +
        • Issue #2124: MVStore build is broken +
        • +
        • PR #2122: Add support for LMT in time zones and fix large years in datetime values +
        • +
        • Issue #2067: Incorrect chunk space allocation during chunks movement +
        • +
        • PR #2066: Not so happy path - "four alternatives" implementation +
        • +
        • PR #2121: Reduce code duplication for datetime API with custom Calendar instances +
        • +
        • PR #2119: SQL: statement read consistency +
        • +
        • Issue #2116: Empty IN() operator should result in error (MSSQL) +
        • +
        • Issue #2036: CAST from TIME to TIMESTAMP returns incorrect result +
        • +
        • PR #2114: Assorted changes +
        • +
        • PR #2113: Add feature F411: Time zone specification +
        • +
        • PR #2111: CURRENT_CATALOG, SET CATALOG and other changes +
        • +
        • Issue #2109: IW date formatting does not produce proper output +
        • +
        • PR #2104: Fix ordinary grouping set with parentheses and empty grouping set in GROUP BY +
        • +
        • Issue #2103: Add QUOTE_IDENT() function to enquote an identifier in SQL +
        • +
        • Issue #2075: Add EXECUTE IMMEDIATE implementation +
        • +
        • PR #2101: Fix infinite loop in Schema.removeChildrenAndResources() +
        • +
        • Issue #2096: Convert LEFT and RIGHT to keywords and disallow comma before closing parenthesis +
        • +
        • PR #2098: Fix typos +
        • +
        • Issue #1305 / PR #2097: Remove unused and outdated website translation infrastructure +
        • +
        • PR #2093: CURRENT VALUE FOR and other sequence-related changes +
        • +
        • PR #2092: Allow to simulate usage of multiple catalogs by one connection +
        • +
        • PR #2091: Oracle mode now uses DECIMAL with NEXTVAL +
        • +
        • Issue #2088: Division by zero caused by evaluation of global conditions before local conditions +
        • +
        • Issue #2086: TCP_QUICKACK on server socket +
        • +
        • Issue #2073: TableLink should not pass queries to DatabaseMetaData.getColumns() +
        • +
        • Issue #2074: MySQL and MSSQLServer Mode: TRUNCATE TABLE should always RESTART IDENTITY +
        • +
        • Issue #2063: MySQL mode: "drop foreign key if exists" support +
        • +
        • PR #2061: Use VirtualTable as a base class for RangeTable +
        • +
        • PR #2059: Parse IN predicate with multiple subqueries correctly +
        • +
        • PR #2057: Fix TestCrashAPI failure with Statement.enquoteIdentifier() +
        • +
        • PR #2056: Happy path: speed up database opening +
        • +
        • Issue #2051: The website shows outdated information about the storage engine +
        • +
        • PR #2049: bugfix - mvstore data lost issue when partial write occurs +
        • +
        • PR #2047: File maintenance +
        • +
        • PR #2046: Recovery mode +
        • +
        • Issue #2044: setTransactionIsolation always call commit() even if transaction is auto-commit +
        • +
        • Issue #2042: Add possibility to specify generated columns for query in web console +
        • +
        • Issue #2040: INFORMATION_SCHEMA.SETTINGS contains irrelevant settings +
        • +
        • PR #2038: MVMap: lock reduction on updates +
        • +
        • PR #2037: Fix SYS_GUID, RAWTOHEX, and HEXTORAW in Oracle mode +
        • +
        • Issue #2016: ExpressionColumn.mapColumns() performance complexity is quadratic +
        • +
        • Issue #2028: Sporadic inconsistent state after concurrent UPDATE in 1.4.199 +
        • +
        • PR #2033: Assorted changes +
        • +
        • Issue #2025: Incorrect query result when (OFFSET + FETCH) > Integer.MAX_VALUE +
        • +
        • PR #2023: traverseDown() code deduplication +
        • +
        • PR #2022: Mvmap minor cleanup +
        • +
        • Issue #2020: Wrong implementation of IN predicate with subquery +
        • +
        • PR #2003: Change dead chunks determination algorithm +
        • +
        • Issue #2013: DECIMAL is casted to double in ROUND function +
        • +
        • PR #2011: ZonedDateTime and (INTERVAL / INTERVAL) +
        • +
        • Issue #1997: TestRandomSQL failure with ClassCastException +
        • +
        • Issue #2007: PostgreSQL compatibility mode: support ON CONFLICT DO NOTHING +
        • +
        • Issue #1927: Do not allow commit() when auto-commit is enabled +
        • +
        • PR #1998: Reduce TxCounter memory footprint +
        • +
        • PR #1999: Make RootReference lock re-entrant +
        • +
        • PR #2001: Test improvements, OOME elimination +
        • +
        • Issue #1995: Obscure condition in MVPrimaryIndex.extractPKFromRow() +
        • +
        • Issue #1975: Add client ip address to information_schema +
        • +
        • PR #1982: Hindi language translation added +
        • +
        • Issue #1985: Add thread number to TCP server thread names +
        • +
        • Do not allow empty password for management DB +
        • +
        • Issue #1978: getGeneratedKeys() can use the same rules as FINAL TABLE +
        • +
        • PR #1977: Change JSON literals and add support for compound character literals +
        • +
        • PR #1974: Use proleptic Gregorian calendar for datetime values +
        • +
        • Issue #1847: Add support for data change delta tables +
        • +
        • PR #1971: Add maximum cardinality parameter to ARRAY data type +
        • +
        • PR #1970: Switch from log map rename to "committed" marker log record +
        • +
        • PR #1969: Add unique predicate +
        • +
        • Issue #1963: Expression.addFilterConditions() with outer joins +
        • +
        • PR #1966: Add standard CURRENT_SCHEMA function +
        • +
        • PR #1964: Add Feature T571: Truth value tests +
        • +
        • PR #1962: Fix data types of optimized conditions +
        • +
        • PR #1961: Failure to open DB after improper shutdown +
        • +
        • Issue #1957: NullPointerException with DISTINCT and ORDER BY CASE +
        • +
        • PR #1956: Fix row value handling in the null predicate +
        • +
        • PR #1955: Add standard UNKNOWN literal +
        • +
        • Issue #1952: Connection.setSchema doesn't work with query cache +
        • +
        • PR #1951: Assorted changes +
        • +
        • PR #1950: Fix NULL handling in ARRAY_AGG +
        • +
        • PR #1949: Extract aggregate and window functions into own pages in documentation +
        • +
        • PR #1948: Add standard LOG() function with two arguments +
        • +
        • Issue #1935: Improve file locking on shared filesystems like SMB +
        • +
        • PR #1946: Reimplement table value constructor on top of Query +
        • +
        • PR #1945: Fix IN (SELECT UNION with OFFSET/FETCH) +
        • +
        • Issue #1942: MySQL Mode: convertInsertNullToZero should be turned off by default? +
        • +
        • Issue #1940: MySQL Mode: Modify column from NOT NULL to NULL syntax +
        • +
        • PR #1941: Extract OFFSET / FETCH handling from Select and SelectUnion to Query +
        • +
        • Issue #1938: Regression with CREATE OR REPLACE VIEW. Causes "Duplicate column name" exception. +
        • +
        • PR #1937: Get rid of FunctionCursorResultSet +
        • +
        • Issue #1932: Incoherence between DbSettings.mvStore and getSettings() +
        • +
        • PR #1931: Fix wildcard expansion for multiple schemas +
        • +
        • PR #1930: Move PageStore table engine into own package +
        • +
        • PR #1929: Initial implementation of type predicate and other changes +
        • +
        • PR #1926: Assorted improvements for BINARY data type +
        • +
        • Issue #1925: Support SQL Server binary literal syntax +
        • +
        • Issue #1918: MySQL: CREATE TABLE with both CHARSET and COMMENT failed +
        • +
        • Issue #1913: MySQL: auto_increment changing SQL not supported +
        • +
        • Issue #1585: The translate function on DB2 mode could have parameters order changed +
        • +
        • PR #1914: Change storage and network format of JSON to byte[] +
        • +
        • Issue #1911: Foreign key constraint does not prevent table being dropped +
        • +
        • PR #1909: Add JSON_OBJECTAGG and JSON_ARRAYAGG aggregate functions +
        • +
        • PR #1908: Cast VARCHAR to JSON properly and require FORMAT JSON in literals +
        • +
        • PR #1906: Add JSON_OBJECT and JSON_ARRAY functions +
        • +
        • Issue #1887: Infinite recursion in ConditionAndOr.java +
        • +
        • Issue #1903: MSSQLServer Mode - Support Update TOP(X) +
        • +
        • Issue #1900: Support SQLServer stored procedure execution syntax +
        • +
        • PR #1898: Add IS JSON predicate +
        • +
        • Issue #1896: MSSQLServer compatibility mode - GETDATE() incorrectly omits time +
        • +
        • PR #1895: Add standard array concatenation operation +
        • +
        • Issue #1892: Window aggregate functions return incorrect result without window ordering and with ROWS unit +
        • +
        • Issue #1890: ArrayIndexOutOfBoundsException in MVSortedTempResult.getKey +
        • +
        • Issue #308: Mode MySQL and LAST_INSERT_ID with argument +
        • +
        • Issue #1883: Suspicious code in Session.getLocks() +
        • +
        • Issue #1878: OPTIMIZE_REUSE_RESULTS causes incorrect result after rollback since 1.4.198 +
        • +
        • PR #1880: Collation names like CHARSET_* recognition +
        • +
        • Issue #1844: MySQL Compatibility: create table error when primary key has comment +
        • +
        • PR #1873: Concurrency in database metadata +
        • +
        • Issue #1864: Failing to format NotSerializableException corrupting the database +
        • +
        • PR #1868: add more checking to TestFileLock +
        • +
        • Issue #1819: Trace.db file exceed file size limit (64MB) +
        • +
        • Issue #1861: Use COALESCE in named columns join for some data types +
        • +
        • PR #1860: Additional fix for deadlock on shutdown (exclusively in PageStore mode) +
        • +
        • Issue #1855: Wrong qualified asterisked projections in named column join +
        • +
        • Issue #1854: Wrong asterisked projection and result in named column right outer join +
        • +
        • Issue #1852: Named column joins doesn't work with the VALUES constructor and derived column lists +
        • +
        • Issue #1851: Wrong asterisked projection in named column joins +
        • +
        • PR #1850: Duplicate map identifiers +
        • +
        • PR #1849: Reimplement MVStore.findOldChunks() with PriorityQueue +
        • +
        • PR #1848: Reimplement MVStore.findChunksToMove() with PriorityQueue +
        • +
        • Issue #1843: Named columns join syntax is not supported +
        • +
        • Issue #1841: Deadlock during concurrent shutdown attempts with 1.4.199 +
        • +
        • Issue #1834: NUMERIC does not preserve its scale for some values +
        • +
        • PR #1838: Implement conversion from JSON to GEOMETRY +
        • +
        • PR #1837: Implement conversion from GEOMETRY to JSON +
        • +
        • PR #1836: Add LSHIFT and RSHIFT function +
        • +
        • PR #1833: Add BITNOT function +
        • +
        • PR #1832: JSON validation and normalization +
        • +
        • PR #1829: MVStore chunks occupancy rate calculation fixes +
        • +
        • PR #1828: Basis for implementation of SQL/JSON standard +
        • +
        • PR #1827: Add support for Lucene 8.0.0 +
        • +
        • Issue #1820: Performance problem on commit +
        • +
        • Issue #1822: Use https:// in h2database.com hyperlinks +
        • +
        • PR #1817: Assorted minor changes in documentation and other places +
        • +
        • PR #1812: An IllegalStateException that wraps EOFException is thrown when partial writes happens +
        • +
- diff --git a/h2/src/docsrc/html/cheatSheet.html b/h2/src/docsrc/html/cheatSheet.html index a2dc326d7c..7226e3b749 100644 --- a/h2/src/docsrc/html/cheatSheet.html +++ b/h2/src/docsrc/html/cheatSheet.html @@ -1,7 +1,7 @@ @@ -108,18 +108,18 @@

H2 Database Engine Cheat Sheet

Using H2

-
diff --git a/h2/src/docsrc/html/faq.html b/h2/src/docsrc/html/faq.html index 5bfdf8ea55..932ef197ac 100644 --- a/h2/src/docsrc/html/faq.html +++ b/h2/src/docsrc/html/faq.html @@ -1,7 +1,7 @@ @@ -17,7 +17,7 @@ @@ -507,7 +287,7 @@

Database URL Overview

@@ -568,7 +348,7 @@

Database URL Overview

@@ -639,19 +419,30 @@

In-Memory Databases

To keep the database open, add ;DB_CLOSE_DELAY=-1 to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc:h2:mem:test;DB_CLOSE_DELAY=-1. +This may create a memory leak, when you need to remove the database, use +the SHUTDOWN command.

Database Files Encryption

-The database files can be encrypted. Two encryption algorithm AES is supported. +The database files can be encrypted. +Three encryption algorithms are supported: +

+
    +
  • "AES" - also known as Rijndael, only AES-128 is implemented.
  • +
  • "XTEA" - the 32 round version.
  • +
  • "FOG" - pseudo-encryption only useful for hiding data from a text editor.
  • +
+

To use file encryption, you need to specify the encryption algorithm (the 'cipher') and the file password (in addition to the user password) when connecting to the database.

Creating a New Database with File Encryption

-By default, a new database is automatically created if it does not exist yet. -To create an encrypted database, connect to it as it would already exist. +By default, a new database is automatically created if it does not exist yet +when the embedded url is used. +To create an encrypted database, connect to it as it would already exist locally using the embedded URL.

Connecting to an Encrypted Database

@@ -663,7 +454,6 @@

Connecting to an Encrypted Database

password-encrypted database:

-Class.forName("org.h2.Driver");
 String url = "jdbc:h2:~/test;CIPHER=AES";
 String user = "sa";
 String pwds = "filepwd userpwd";
@@ -722,7 +512,8 @@ 

Database File Locking

Opening a Database Only if it Already Exists

By default, when an application calls DriverManager.getConnection(url, ...) -and the database specified in the URL does not yet exist, a new (empty) database is created. +with embedded URL and the database specified in the URL does not yet exist, +a new (empty) database is created. In some situations, it is better to restrict creating new databases, and only allow to open existing databases. To do this, add ;IFEXISTS=TRUE to the database URL. In this case, if the database does not already exist, an exception is thrown when @@ -814,7 +605,7 @@

Changing Other Settings when Opening a Connection

Adding ;setting=value at the end of a database URL is the same as executing the statement SET setting value just after connecting. For a list of supported settings, see SQL Grammar -or the DbSettings javadoc. +or the DbSettings javadoc.

Custom File Access Mode

@@ -853,36 +644,24 @@

Multiple Connections to the Same Database: Client/Server

Multithreading Support

-This database is multithreading-safe. That means, if an application is multi-threaded, it does not need -to worry about synchronizing access to the database. Internally, most requests to the same database -are synchronized. That means an application can use multiple threads that access the same database -at the same time, however if one thread executes a long running query, the other threads -need to wait. +This database is multithreading-safe. +If an application is multi-threaded, it does not need to worry about synchronizing access to the database. +An application should normally use one connection per thread. +This database synchronizes access to the same connection, but other databases may not do this. +To get higher concurrency, you need to use multiple connections.

-An application should normally use one connection per thread. This database synchronizes -access to the same connection, but other databases may not do this. +An application can use multiple threads that access the same database at the same time. +Threads that use different connections can use the database concurrently.

Locking, Lock-Timeout, Deadlocks

-Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. -In this case, table level locking is not used. - -If multi-version concurrency is not used, -the database uses table level locks to give each connection a consistent state of the data. -There are two kinds of locks: read locks (shared locks) and write locks (exclusive locks). -All locks are released when the transaction commits or rolls back. -When using the default transaction isolation level 'read committed', read locks are already released after each statement. -

-If a connection wants to reads from a table, and there is no write lock on the table, -then a read lock is added to the table. If there is a write lock, then this connection waits -for the other connection to release the lock. If a connection cannot get a lock for a specified time, -then a lock timeout exception is thrown. -

Usually, SELECT statements will generate read locks. This includes subqueries. -Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, +Statements that modify data use write locks on the modified rows. +It is also possible to issue write locks without modifying data, using the statement SELECT ... FOR UPDATE. +Data definition statements may issue exclusive locks on tables. The statements COMMIT and ROLLBACK releases all open locks. The commands SAVEPOINT and @@ -903,18 +682,18 @@

Locking, Lock-Timeout, Deadlocks

SCRIPT;
- + - + - + @@ -928,13 +707,6 @@

Locking, Lock-Timeout, Deadlocks

SET DEFAULT_LOCK_TIMEOUT <milliseconds>. The default lock timeout is persistent.

-

Avoiding Deadlocks

-

-To avoid deadlocks, ensure that all transactions lock the tables in the same order -(for example in alphabetical order), and avoid upgrading read locks to write locks. -Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE. -

-

Database File Layout

The following files are created for persistent databases: @@ -942,14 +714,32 @@

Database File Layout

-

Frequently Asked Questions

+

Frequently Asked Questions

I Have a Problem or Feature Request
@@ -47,8 +47,6 @@

Frequently Asked Questions

Column Names are Incorrect?
Float is Double?
- - Is the GCJ Version Stable? Faster?
How to Translate this Project?
@@ -69,29 +67,20 @@

Are there Known Bugs? When is the Next Release?

will differ. This is not a problem within regions that use the same rules (such as, within USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new - timezone. This problem does not occur when using the system property "h2.storeLocalTime" - (however such database files are not compatible with older versions of H2). -
  • Apache Harmony: there seems to be a bug in Harmony that affects H2. - See HARMONY-6505. -
  • Tomcat and Glassfish 3 set most static fields (final or non-final) to null when - unloading a web application. This can cause a NullPointerException in H2 versions - 1.1.107 and older, and may still not work in newer versions. Please report it if you - run into this issue. In Tomcat >= 6.0 this behavior can be disabled by setting the - system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false, - however Tomcat may then run out of memory. A known workaround is to - put the h2*.jar file in a shared lib directory + timezone. +
  • Old versions of Tomcat and Glassfish 3 set most static fields (final or non-final) to null when + unloading a web application. This can cause a NullPointerException. + In Tomcat >= 6.0 this behavior can be disabled by setting the + system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false. + A known workaround is to put the h2*.jar file in a shared lib directory (common/lib). + Tomcat 8.5 and newer versions don't clear fields and don't have such property.
  • Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3. -
  • When using Install4j before 4.1.4 on Linux and enabling pack200, - the h2*.jar becomes corrupted by the install process, causing application failure. - A workaround is to add an empty file h2*.jar.nopack - next to the h2*.jar file. - This problem is solved in Install4j 4.1.4.
  • -For a complete list, see Open Issues. +For a complete list, see Open Issues.

    Is this Database Engine Open Source?

    @@ -102,13 +91,13 @@

    Is this Database Engine Open Source?

    Is Commercial Support Available?

    -Yes, commercial support is available, -see Commercial Support. +No, currently commercial support is not available.

    How to Create a New Database?

    -By default, a new database is automatically created if it does not yet exist. +By default, a new database is automatically created if it does not yet exist when +embedded URL is used. See Creating New Databases.

    @@ -119,7 +108,6 @@

    How to Connect to a Database?

    To connect to a database using JDBC, use the following code:

    -Class.forName("org.h2.Driver");
     Connection conn = DriverManager.getConnection("jdbc:h2:~/test", "sa", "");
     
    @@ -130,15 +118,17 @@

    Where are the Database Files Stored?

    For Windows, this is usually C:\Documents and Settings\<userName> or C:\Users\<userName>. -If the base directory is not set (as in jdbc:h2:test), +If the base directory is not set (as in jdbc:h2:./test), the database files are stored in the directory where the application is started -(the current working directory). When using the H2 Console application from the start menu, +(the current working directory). +When using the H2 Console application from the start menu, this is <Installation Directory>/bin. -The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL -jdbc:h2:file:data/sample, the database is stored in the directory +The base directory can be set in the database URL. +A fixed or relative path can be used. When using the URL +jdbc:h2:file:./data/sample, the database is stored in the directory data (relative to the current working directory). -The directory is created automatically if it does not yet exist. It is also possible to use the -fully qualified directory name (and for Windows, drive name). +The directory is created automatically if it does not yet exist. +It is also possible to use the fully qualified directory name (and for Windows, drive name). Example: jdbc:h2:file:C:/data/test

    @@ -175,10 +165,9 @@

    Is it Reliable?

    is well tested (if possible with automated test cases). The areas that are not well tested are:

      -
    • Platforms other than Windows XP, Linux, Mac OS X, or JVMs other than Sun 1.6 or 1.7 +
    • Platforms other than Windows, Linux, Mac OS X, or runtime environments other than Oracle / OpenJDK 7, 8, 9.
    • The features AUTO_SERVER and AUTO_RECONNECT.
    • Cluster mode, 2-phase commit, savepoints. -
    • 24/7 operation.
    • Fulltext search.
    • Operations on LOBs over 2 GB.
    • The optimizer may not always select the best plan. @@ -191,7 +180,6 @@

      Is it Reliable?

    • The PostgreSQL server
    • Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session). -
    • Multi-threading within the engine using SET MULTI_THREADED=1.
    • Compatibility modes for other databases (only some features are implemented).
    • The soft reference cache (CACHE_TYPE=SOFT_LRU). It might not improve performance, and out of memory issues have been reported. @@ -246,13 +234,12 @@

      Column Names are Incorrect?

      return X. What's wrong?

      -This is not a bug. According the the JDBC specification, the method +This is not a bug. According the JDBC specification, the method ResultSetMetaData.getColumnName() should return the name of the column and not the alias name. If you need the alias name, use -ResultSetMetaData.getColumnLabel(). +ResultSetMetaData.getColumnLabel(). Some other database don't work like this yet (they don't follow the JDBC specification). -If you need compatibility with those databases, use the Compatibility Mode, -or append ;ALIAS_COLUMN_NAME=TRUE to the database URL. +If you need compatibility with those databases, use the Compatibility Mode.

      This also applies to DatabaseMetaData calls that return a result set. @@ -266,20 +253,12 @@

      Float is Double?

      return a java.lang.Float. What's wrong?

      -This is not a bug. According the the JDBC specification, the JDBC data type FLOAT +This is not a bug. According the JDBC specification, the JDBC data type FLOAT is equivalent to DOUBLE, and both are mapped to java.lang.Double. See also - + Mapping SQL and Java Types - 8.3.10 FLOAT.

      - -

      Is the GCJ Version Stable? Faster?

      -

      -The GCJ version is not as stable as the Java version. -When running the regression test with the GCJ version, sometimes the application just stops -at what seems to be a random point without error message. -Currently, the GCJ version is also slower than when using the Sun VM. -However, the startup of the GCJ version is faster than when using a VM. -

      +

      Use REAL or FLOAT(24) data type for java.lang.Float values.

      How to Translate this Project?

      @@ -297,7 +276,7 @@

      How to Contribute to this Project?

      code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the -feature request list. +feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well.

      diff --git a/h2/src/docsrc/html/features.html b/h2/src/docsrc/html/features.html index e8c963431e..5d1f7c7f22 100644 --- a/h2/src/docsrc/html/features.html +++ b/h2/src/docsrc/html/features.html @@ -1,7 +1,7 @@ @@ -21,8 +21,6 @@

      Features

      Feature List
      - - Comparison to Other Database Engines
      H2 in Use
      @@ -69,8 +67,8 @@

      Features

      Read Only Databases

      Read Only Databases in Zip or Jar File
      - - Computed Columns / Function Based Index
      + + Generated Columns (Computed Columns) / Function Based Index
      Multi-Dimensional Indexes
      @@ -83,6 +81,8 @@

      Features

      Compacting a Database

      Cache Settings
      + + External Authentication (Experimental)

      Feature List

      Main Features

      @@ -100,8 +100,8 @@

      Main Features

      Additional Features

      • Disk based or in-memory databases and tables, read-only database support, temporary tables -
      • Transaction support (read committed), 2-phase-commit -
      • Multiple connections, table level locking +
      • Transaction support (read uncommitted, read committed, repeatable read, snapshot), 2-phase-commit +
      • Multiple connections, row-level locking
      • Cost based optimizer, using a genetic algorithm for complex queries, zero-administration
      • Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set @@ -116,8 +116,10 @@

        SQL Support

      • Triggers and Java functions / stored procedures
      • Many built-in functions, including XML and lossless data compression
      • Wide range of data types including large objects (BLOB/CLOB) and arrays -
      • Sequence and autoincrement columns, computed columns (can be used for function based indexes) -
      • ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP +
      • Sequences and identity columns, generated columns (can be used for function based indexes) +
      • ORDER BY, GROUP BY, HAVING, UNION, OFFSET / FETCH (including PERCENT and WITH TIES), LIMIT, TOP, + DISTINCT / DISTINCT ON (...) +
      • Window functions
      • Collation support, including support for the ICU4J library
      • Support for users and roles
      • Compatibility modes for IBM DB2, Apache Derby, HSQLDB, @@ -140,7 +142,7 @@

        Security Features

        Other Features and Tools

          -
        • Small footprint (smaller than 1.5 MB), low memory requirements +
        • Small footprint (around 2.5 MB), low memory requirements
        • Multiple index types (b-tree, tree, hash)
        • Support for multi-dimensional indexes
        • CSV (comma separated values) file support @@ -157,241 +159,10 @@

          Other Features and Tools

        • Well tested (high code coverage, randomized stress tests)
        -

        Comparison to Other Database Engines

        -

        -This comparison is based on -H2 1.3, -Apache Derby version 10.8, -HSQLDB 2.2, -MySQL 5.5, -PostgreSQL 9.0. -

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        FeatureH2DerbyHSQLDBMySQLPostgreSQL
        Pure JavaYesYesYesNoNo
        Embedded Mode (Java)YesYesYesNoNo
        In-Memory ModeYesYesYesNoNo
        Explain PlanYesYes *12YesYesYes
        Built-in Clustering / ReplicationYesYesNoYesYes
        Encrypted DatabaseYesYes *10Yes *10NoNo
        Linked TablesYesNoPartially *1Partially *2No
        ODBC DriverYesNoNoYesYes
        Fulltext SearchYesYesNoYesYes
        Domains (User-Defined Types)YesNoYesYesYes
        Files per DatabaseFewManyFewManyMany
        Row Level LockingYes *9YesYes *9YesYes
        Multi Version ConcurrencyYesNoYesYesYes
        Multi-Threaded Statement ProcessingNo *11YesYesYesYes
        Role Based SecurityYesYes *3YesYesYes
        Updatable Result SetsYesYes *7YesYesYes
        SequencesYesYesYesNoYes
        Limit and OffsetYesYes *13YesYesYes
        Window FunctionsNo *15No *15NoNoYes
        Temporary TablesYesYes *4YesYesYes
        Information SchemaYesNo *8YesYesYes
        Computed ColumnsYesYesYesNoYes *6
        Case Insensitive ColumnsYesYes *14YesYesYes *6
        Custom Aggregate FunctionsYesNoYesYesYes
        CLOB/BLOB CompressionYesNoNoNoYes
        Footprint (jar/dll size)~1.5 MB *5~3 MB~1.5 MB~4 MB~6 MB
        -

        -*1 HSQLDB supports text tables.
        -*2 MySQL supports linked MySQL tables under the name 'federated tables'.
        -*3 Derby support for roles based security and password checking as an option.
        -*4 Derby only supports global temporary tables.
        -*5 The default H2 jar file contains debug information, jar files for other databases do not.
        -*6 PostgreSQL supports functional indexes.
        -*7 Derby only supports updatable result sets if the query is not sorted.
        -*8 Derby doesn't support standard compliant information schema tables.
        -*9 When using MVCC (multi version concurrency).
        -*10 Derby and HSQLDB - don't hide data patterns well.
        -*11 The MULTI_THREADED option is not enabled by default, and not yet supported when using MVCC.
        -*12 Derby doesn't support the EXPLAIN statement, but it supports runtime statistics and retrieving statement execution plans.
        -*13 Derby doesn't support the syntax LIMIT .. [OFFSET ..], however it supports FETCH FIRST .. ROW[S] ONLY.
        -*14 Using collations. -*15 Derby and H2 support ROW_NUMBER() OVER(). -

        - -

        DaffodilDb and One$Db

        -

        -It looks like the development of this database has stopped. The last release was February 2006. -

        - -

        McKoi

        -

        -It looks like the development of this database has stopped. The last release was August 2004. -

        -

        H2 in Use

        For a list of applications that work with or use H2, see: -Links. +Links.

        Connection Modes

        @@ -413,6 +184,15 @@

        Embedded Mode

        There is no limit on the number of database open concurrently, or on the number of open connections.

        +

        +In embedded mode I/O operations can be performed by application's threads that execute a SQL command. +The application may not interrupt these threads, it can lead to database corruption, +because JVM closes I/O handle during thread interruption. +Consider other ways to control execution of your application. +When interrupts are possible the async: +file system can be used as a workaround, but full safety is not guaranteed. +It's recommended to use the client-server model instead, the client side may interrupt own threads. +

        The database is embedded in the application @@ -492,7 +272,7 @@

        Database URL Overview

    Server mode (remote connections)
    using TLS
    - jdbc:h2:ssl://<server>[:<port>]/<databaseName>
    + jdbc:h2:ssl://<server>[:<port>]/[<path>]<databaseName>
    jdbc:h2:ssl://localhost:8085/~/sample;
    File locking methods - jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|NO}
    + jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|FS|NO}
    jdbc:h2:file:~/private;CIPHER=AES;FILE_LOCK=SOCKET
    Compatibility mode jdbc:h2:<url>;MODE=<databaseType>
    - jdbc:h2:~/test;MODE=MYSQL + jdbc:h2:~/test;MODE=MYSQL;DATABASE_TO_LOWER=TRUE
    WriteWrite (row-level) SELECT * FROM TEST WHERE 1=0 FOR UPDATE;
    WriteWrite (row-level) INSERT INTO TEST VALUES(1, 'Hello');
    INSERT INTO TEST SELECT * FROM TEST;
    UPDATE TEST SET NAME='Hi';
    DELETE FROM TEST;
    WriteExclusive ALTER TABLE TEST ...;
    CREATE INDEX ... ON TEST ...;
    DROP INDEX ...;
    + + -
    File NameDescriptionNumber of Files
    - test.h2.db + test.mv.db Database file.
    Contains the transaction log, indexes, and data for all tables.
    - Format: <database>.h2.db + Format: <database>.mv.db
    1 per database
    + test.newFile + + Temporary file for database compaction.
    + Contains the new MVStore file.
    + Format: <database>.newFile +
    + 0 or 1 per database +
    + test.tempFile + + Temporary file for database compaction.
    + Contains the temporary MVStore file.
    + Format: <database>.tempFile +
    + 0 or 1 per database +
    test.lock.db @@ -965,19 +755,10 @@

    Database File Layout

    Trace file (if the trace option is enabled).
    Contains trace information.
    Format: <database>.trace.db
    - Renamed to <database>.trace.db.old is too big. + Renamed to <database>.trace.db.old if too big.
    0 or 1 per database
    - test.lobs.db/* - - Directory containing one file for each
    - BLOB or CLOB value larger than a certain size.
    - Format: <id>.t<tableId>.lob.db -
    - 1 per large object -
    test.123.temp.db @@ -1030,31 +811,101 @@

    Compatibility

    (example: jdbc:h2:~/test;IGNORECASE=TRUE).

    -

    Compatibility Modes

    +

    Compatibility Modes

    For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode:

    +

    REGULAR Compatibility mode

    +

    +This mode is used by default. +

    +
    • Empty IN predicate is allowed. +
    • TOP clause in SELECT is allowed. +
    • OFFSET/LIMIT clauses are allowed. +
    • MINUS can be used instead of EXCEPT. +
    • IDENTITY can be used as a data type. +
    • Legacy SERIAL and BIGSERIAL data types are supported. +
    • AUTO_INCREMENT clause can be used instead of GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY. +
    + +

    STRICT Compatibility Mode

    +

    +To use the STRICT mode, use the database URL jdbc:h2:~/test;MODE=STRICT +or the SQL statement SET MODE STRICT. +In this mode some deprecated features are disabled. +

    +

    +If your application or library uses only the H2 or it generates different SQL for different database systems +it is recommended to use this compatibility mode in unit tests +to reduce possibility of accidental misuse of such features. +This mode cannot be used as SQL validator, however. +

    +

    +It is not recommended to enable this mode in production builds of libraries, +because this mode may become more restrictive in future releases of H2 that may break your library +if it will be used together with newer version of H2. +

    +
    • Empty IN predicate is disallowed. +
    • TOP and OFFSET/LIMIT clauses are disallowed, only OFFSET/FETCH can be used. +
    • MINUS cannot be used instead of EXCEPT. +
    • IDENTITY cannot be used as a data type and AUTO_INCREMENT clause cannot be specified. +Use GENERATED BY DEFAULT AS IDENTITY clause instead. +
    • SERIAL and BIGSERIAL data types are disallowed. +Use INTEGER GENERATED BY DEFAULT AS IDENTITY or BIGINT GENERATED BY DEFAULT AS IDENTITY instead. +
    + +

    LEGACY Compatibility Mode

    +

    +To use the LEGACY mode, use the database URL jdbc:h2:~/test;MODE=LEGACY +or the SQL statement SET MODE LEGACY. +In this mode some compatibility features for applications written for H2 1.X are enabled. +This mode doesn't provide full compatibility with H2 1.X. +

    +
    • Empty IN predicate is allowed. +
    • TOP clause in SELECT is allowed. +
    • OFFSET/LIMIT clauses are allowed. +
    • MINUS can be used instead of EXCEPT. +
    • IDENTITY can be used as a data type. +
    • MS SQL Server-style IDENTITY clause is supported. +
    • Legacy SERIAL and BIGSERIAL data types are supported. +
    • AUTO_INCREMENT clause can be used instead of GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY. +
    • If a value for identity column was specified in an INSERT command +the base value of sequence generator of this column is updated if current value of generator was smaller +(larger for generators with negative increment) than the inserted value. +
    • Identity columns have implicit DEFAULT ON NULL clause. +It means a NULL value may be specified for this column in INSERT command and it will be treated as DEFAULT. +
    • Oracle-style CURRVAL and NEXTVAL can be used on sequences. +
    • TOP clause can be used in DELETE and UPDATE. +
    • Non-standard Oracle-style WHERE clause can be used in standard MERGE command. +
    • Attempt to reference a non-unique set of columns from a referential constraint +will create an UNIQUE constraint on them automatically. +
    • Unsafe comparison operators between numeric and boolean values are allowed. +
    • IDENTITY() and SCOPE_IDENTITY() are supported, but both are implemented like SCOPE_IDENTITY() +
    +

    DB2 Compatibility Mode

    -To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2 +To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2;DEFAULT_NULL_ORDERING=HIGH or the SQL statement SET MODE DB2.

    • For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -
    • Support for the syntax [OFFSET .. ROW] [FETCH ... ONLY] - as an alternative for LIMIT .. OFFSET. -
    • Concatenating NULL with another value - results in the other value.
    • Support the pseudo-table SYSIBM.SYSDUMMY1. +
    • Timestamps with dash between date and time are supported. +
    • Datetime value functions return the same value within a command. +
    • Second and third arguments of TRANSLATE() function are swapped. +
    • LIMIT / OFFSET clauses are supported. +
    • MINUS can be used instead of EXCEPT. +
    • Unsafe comparison operators between numeric and boolean values are allowed.

    Derby Compatibility Mode

    -To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby +To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby;DEFAULT_NULL_ORDERING=HIGH or the SQL statement SET MODE Derby.

    • For aliased columns, ResultSetMetaData.getColumnName() @@ -1062,24 +913,22 @@

      Derby Compatibility Mode

      null.
    • For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. -
    • Concatenating NULL with another value - results in the other value.
    • Support the pseudo-table SYSIBM.SYSDUMMY1. +
    • Datetime value functions return the same value within a command.

    HSQLDB Compatibility Mode

    -To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB +To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB;DEFAULT_NULL_ORDERING=FIRST or the SQL statement SET MODE HSQLDB.

    -
    • For aliased columns, ResultSetMetaData.getColumnName() - returns the alias name and getTableName() returns - null. -
    • When converting the scale of decimal data, the number is only converted if the new scale is - smaller than the current scale. Usually, the scale is converted and 0s are added if required. -
    • For unique indexes, NULL is distinct. - That means only one row with NULL in one of the columns is allowed. -
    • Text can be concatenated using '+'. +
      • Text can be concatenated using '+'. +
      • NULL value works like DEFAULT value is assignments to identity columns. +
      • Datetime value functions return the same value within a command. +
      • TOP clause in SELECT is supported. +
      • LIMIT / OFFSET clauses are supported. +
      • MINUS can be used instead of EXCEPT. +
      • Unsafe comparison operators between numeric and boolean values are allowed.

      MS SQL Server Compatibility Mode

      @@ -1093,28 +942,92 @@

      MS SQL Server Compatibility Mode

    • Identifiers may be quoted using square brackets as in [Test].
    • For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. -
    • Concatenating NULL with another value - results in the other value.
    • Text can be concatenated using '+'. +
    • Arguments of LOG() function are swapped. +
    • MONEY data type is treated like NUMERIC(19, 4) data type. SMALLMONEY data type is treated like NUMERIC(10, 4) + data type. +
    • IDENTITY can be used for automatic id generation on column level. +
    • Table hints are discarded. Example: SELECT * FROM table WITH (NOLOCK). +
    • Datetime value functions return the same value within a command. +
    • 0x literals are parsed as binary string literals. +
    • TRUNCATE TABLE restarts next values of generated columns. +
    • TOP clause in SELECT, UPDATE, and DELETE is supported. +
    • Unsafe comparison operators between numeric and boolean values are allowed.
    +

    MariaDB Compatibility Mode

    +

    +To use the MariaDB mode, use the database URL jdbc:h2:~/test;MODE=MariaDB;DATABASE_TO_LOWER=TRUE. +When case-insensitive identifiers are needed append ;CASE_INSENSITIVE_IDENTIFIERS=TRUE to URL. +Do not change value of DATABASE_TO_LOWER after creation of database. +

    +
    • Creating indexes in the CREATE TABLE statement is allowed using + INDEX(..) or KEY(..). + Example: create table test(id int primary key, name varchar(255), key idx_name(name)); +
    • When converting a floating point number to an integer, the fractional + digits are not truncated, but the value is rounded. +
    • ON DUPLICATE KEY UPDATE is supported in INSERT statements, due to this feature VALUES has special non-standard + meaning is some contexts. +
    • INSERT IGNORE is partially supported and may be used to skip rows with duplicate keys if ON DUPLICATE KEY + UPDATE is not specified. +
    • REPLACE INTO is partially supported. +
    • Spaces are trimmed from the right side of CHAR values. +
    • REGEXP_REPLACE() uses \ for back-references. +
    • Datetime value functions return the same value within a command. +
    • 0x literals are parsed as binary string literals. +
    • Unrelated expressions in ORDER BY clause of DISTINCT queries are allowed. +
    • Some MariaDB-specific ALTER TABLE commands are partially supported. +
    • TRUNCATE TABLE restarts next values of generated columns. +
    • NEXT VALUE FOR returns different values when invoked multiple times within the same row. +
    • If value of an identity column was manually specified, its sequence is updated to generate values after +inserted. +
    • NULL value works like DEFAULT value is assignments to identity columns. +
    • LIMIT / OFFSET clauses are supported. +
    • AUTO_INCREMENT clause can be used. +
    • YEAR data type is treated like SMALLINT data type. +
    • GROUP BY clause can contain 1-based positions of expressions from the SELECT list. +
    • Unsafe comparison operators between numeric and boolean values are allowed. +
    +

    +Text comparison in MariaDB is case insensitive by default, while in H2 it is case sensitive (as in most other databases). +H2 does support case insensitive text comparison, but it needs to be set separately, +using SET IGNORECASE TRUE. +This affects comparison using =, LIKE, REGEXP. +

    +

    MySQL Compatibility Mode

    -To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL -or the SQL statement SET MODE MySQL. +To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL;DATABASE_TO_LOWER=TRUE. +When case-insensitive identifiers are needed append ;CASE_INSENSITIVE_IDENTIFIERS=TRUE to URL. +Do not change value of DATABASE_TO_LOWER after creation of database.

    -
    • When inserting data, if a column is defined to be NOT NULL - and NULL is inserted, - then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. - Usually, this operation is not allowed and an exception is thrown. -
    • Creating indexes in the CREATE TABLE statement is allowed using +
      • Creating indexes in the CREATE TABLE statement is allowed using INDEX(..) or KEY(..). Example: create table test(id int primary key, name varchar(255), key idx_name(name)); -
      • Meta data calls return identifiers in lower case.
      • When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded. -
      • Concatenating NULL with another value - results in the other value. +
      • ON DUPLICATE KEY UPDATE is supported in INSERT statements, due to this feature VALUES has special non-standard + meaning is some contexts. +
      • INSERT IGNORE is partially supported and may be used to skip rows with duplicate keys if ON DUPLICATE KEY + UPDATE is not specified. +
      • REPLACE INTO is partially supported. +
      • Spaces are trimmed from the right side of CHAR values. +
      • REGEXP_REPLACE() uses \ for back-references. +
      • Datetime value functions return the same value within a command. +
      • 0x literals are parsed as binary string literals. +
      • Unrelated expressions in ORDER BY clause of DISTINCT queries are allowed. +
      • Some MySQL-specific ALTER TABLE commands are partially supported. +
      • TRUNCATE TABLE restarts next values of generated columns. +
      • If value of an identity column was manually specified, its sequence is updated to generate values after +inserted. +
      • NULL value works like DEFAULT value is assignments to identity columns. +
      • Referential constraints don't require an existing primary key or unique constraint on referenced columns +and create a unique constraint automatically if such constraint doesn't exist. +
      • LIMIT / OFFSET clauses are supported. +
      • AUTO_INCREMENT clause can be used. +
      • YEAR data type is treated like SMALLINT data type. +
      • GROUP BY clause can contain 1-based positions of expressions from the SELECT list. +
      • Unsafe comparison operators between numeric and boolean values are allowed.

      Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). @@ -1125,7 +1038,7 @@

      MySQL Compatibility Mode

      Oracle Compatibility Mode

      -To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle +To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle;DEFAULT_NULL_ORDERING=HIGH or the SQL statement SET MODE Oracle.

      • For aliased columns, ResultSetMetaData.getColumnName() @@ -1134,24 +1047,50 @@

        Oracle Compatibility Mode

      • When using unique indexes, multiple rows with NULL in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise. -
      • Concatenating NULL with another value +
      • Empty strings are treated like NULL values, concatenating NULL with another value results in the other value. -
      • Empty strings are treated like NULL values. +
      • REGEXP_REPLACE() uses \ for back-references. +
      • RAWTOHEX() converts character strings to hexadecimal representation of their UTF-8 encoding. +
      • HEXTORAW() decodes a hexadecimal character string to a binary string. +
      • DATE data type is treated like TIMESTAMP(0) data type. +
      • Datetime value functions return the same value within a command. +
      • ALTER TABLE MODIFY COLUMN command is partially supported. +
      • SEQUENCE.NEXTVAL and SEQUENCE.CURRVAL are supported and return values with DECIMAL/NUMERIC data type. +
      • Merge when matched clause may have WHERE clause. +
      • MINUS can be used instead of EXCEPT.

      PostgreSQL Compatibility Mode

      -To use the PostgreSQL mode, use the database URL jdbc:h2:~/test;MODE=PostgreSQL -or the SQL statement SET MODE PostgreSQL. +To use the PostgreSQL mode, use the database URL +jdbc:h2:~/test;MODE=PostgreSQL;DATABASE_TO_LOWER=TRUE;DEFAULT_NULL_ORDERING=HIGH. +Do not change value of DATABASE_TO_LOWER after creation of database.

      • For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null.
      • When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded. -
      • The system columns CTID and - OID are supported. +
      • The system columns ctid and + oid are supported.
      • LOG(x) is base 10 in this mode. +
      • REGEXP_REPLACE(): +
          +
        • uses \ for back-references;
        • +
        • does not throw an exception when the flagsString parameter contains a 'g';
        • +
        • replaces only the first matched substring in the absence of the 'g' flag in the flagsString parameter.
        • +
        +
      • LIMIT / OFFSET clauses are supported. +
      • Legacy SERIAL and BIGSERIAL data types are supported. +
      • ON CONFLICT DO NOTHING is supported in INSERT statements. +
      • Spaces are trimmed from the right side of CHAR values, but CHAR values in result sets are right-padded with + spaces to the declared length. +
      • MONEY data type is treated like NUMERIC(19, 2) data type. +
      • Datetime value functions return the same value within a transaction. +
      • ARRAY_SLICE() out of bounds parameters are silently corrected. +
      • EXTRACT function with DOW field returns (0-6), Sunday is 0. +
      • UPDATE with FROM is supported. +
      • GROUP BY clause can contain 1-based positions of expressions from the SELECT list.

      Auto-Reconnect

      @@ -1194,7 +1133,7 @@

      Automatic Mixed Mode

      which is faster than the server mode. Therefore the main application should open the database first if possible. The first connection automatically starts a server on a random port. This server allows remote connections, however only to this database (to ensure that, -the client reads .lock.db file and sends the the random key that is stored there to the server). +the client reads .lock.db file and sends the random key that is stored there to the server). When the first connection is closed, the server stops. If other (remote) connections are still open, one of them will then start a server (auto-reconnect is enabled automatically).

      @@ -1224,10 +1163,11 @@

      Automatic Mixed Mode

      Page Size

      -The page size for new databases is 2 KB (2048), unless the page size is set +The page size for new databases is 4 KiB (4096 bytes), unless the page size is set explicitly in the database URL using PAGE_SIZE= when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created. +The page size of encrypted databases must be a multiple of 4096 (4096, 8192, …).

      Using the Trace Options

      @@ -1293,7 +1233,7 @@

      Java Code Generation

      12-20 20:58:09 jdbc[0]: /**/dbMeta3.getURL(); 12-20 20:58:09 jdbc[0]: -/**/dbMeta3.getTables(null, "", null, new String[]{"TABLE", "VIEW"}); +/**/dbMeta3.getTables(null, "", null, new String[]{"BASE TABLE", "VIEW"}); ...

      @@ -1319,7 +1259,7 @@

      Using Other Logging APIs

      facility as the application, for example Log4j. To do that, this database support SLF4J.

      -SLF4J is a simple facade for various logging APIs +SLF4J is a simple facade for various logging APIs and allows to plug in the desired implementation at deployment time. SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), Java logging, x4juli, and Simple Log. @@ -1382,7 +1322,7 @@

      Read Only Databases in Zip or Jar File

      If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files, because random access in compressed files is not possible. -See also the sample application ReadOnlyDatabaseInZip. +See also the sample application ReadOnlyDatabaseInZip.

      Opening a Corrupted Database

      @@ -1392,26 +1332,32 @@

      Opening a Corrupted Database

      The exceptions are logged, but opening the database will continue.

      -

      Computed Columns / Function Based Index

      +

      Generated Columns (Computed Columns) / Function Based Index

      -A computed column is a column whose value is calculated before storing. +Each column is either a base column or a generated column. +A generated column is a column whose value is calculated before storing and cannot be assigned directly. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time:

      -CREATE TABLE TEST(ID INT, NAME VARCHAR, LAST_MOD TIMESTAMP AS NOW());
      +CREATE TABLE TEST(
      +    ID INT,
      +    NAME VARCHAR,
      +    LAST_MOD TIMESTAMP WITH TIME ZONE
      +        GENERATED ALWAYS AS CURRENT_TIMESTAMP
      +);
       

      Function indexes are not directly supported by this database, but they can be emulated -by using computed columns. For example, if an index on the upper-case version of -a column is required, create a computed column with the upper-case version of the original column, +by using generated columns. For example, if an index on the upper-case version of +a column is required, create a generated column with the upper-case version of the original column, and create an index for this column:

       CREATE TABLE ADDRESS(
           ID INT PRIMARY KEY,
           NAME VARCHAR,
      -    UPPER_NAME VARCHAR AS UPPER(NAME)
      +    UPPER_NAME VARCHAR GENERATED ALWAYS AS UPPER(NAME)
       );
       CREATE INDEX IDX_U_NAME ON ADDRESS(UPPER_NAME);
       
      @@ -1436,7 +1382,7 @@

      Multi-Dimensional Indexes

      Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. -The scalar value is indexed using a B-Tree index (usually using a computed column). +The scalar value is indexed using a B-Tree index (usually using a generated column).

      The method can result in a drastic performance improvement over just using an index on the first column. Depending on the @@ -1486,18 +1432,20 @@

      Referencing a Compiled Method

      Declaring Functions as Source Code

      When defining a function alias with source code, the database tries to compile -the source code using the Sun Java compiler (the class com.sun.tools.javac.Main) -if the tools.jar is in the classpath. If not, javac is run as a separate process. +the source code using the Java compiler (the class javax.tool.ToolProvider.getSystemJavaCompiler()) +if it is in the classpath. If not, javac is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. -Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. +Source code can be passed as dollar quoted text ($$source code$$) to avoid escaping problems. +If you use some third-party script processing tool, use standard single quotes instead and don't forget to repeat +each single quotation mark twice within the source code. Example:

      -CREATE ALIAS NEXT_PRIME AS $$
      +CREATE ALIAS NEXT_PRIME AS '
       String nextPrime(String value) {
           return new BigInteger(value).nextProbablePrime().toString();
       }
      -$$;
      +';
       

      By default, the three packages java.util, java.math, java.sql are imported. @@ -1507,13 +1455,13 @@

      Declaring Functions as Source Code

      and separated with the tag @CODE:

      -CREATE ALIAS IP_ADDRESS AS $$
      +CREATE ALIAS IP_ADDRESS AS '
       import java.net.*;
       @CODE
       String ipAddress(String host) throws Exception {
           return InetAddress.getByName(host).getHostAddress();
       }
      -$$;
      +';
       

      The following template is used to create a complete Java class: @@ -1647,6 +1595,7 @@

      Pluggable or User-Defined Tables

      In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine interface e.g. something like this: +

       package acme;
       public static class MyTableEngine implements org.h2.api.TableEngine {
      @@ -1660,12 +1609,13 @@ 

      Pluggable or User-Defined Tables

      } }
      +

      and then create the table from SQL like this: +

       CREATE TABLE TEST(ID INT, NAME VARCHAR)
           ENGINE "acme.MyTableEngine";
       
      -

      It is also possible to pass in parameters to the table engine, like so:

      @@ -1675,6 +1625,15 @@

      Pluggable or User-Defined Tables

      In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object.

      +

      +It is also possible to specify default table engine params on schema creation: +

      +
      +CREATE SCHEMA TEST_SCHEMA WITH "param1", "param2";
      +
      +

      +Params from the schema are used when CREATE TABLE issued on this schema does not have its own engine params specified. +

      Triggers

      @@ -1732,7 +1691,7 @@

      Triggers

       import org.h2.tools.TriggerAdapter;
       ...
      -public class TriggerSample implements TriggerAdapter {
      +public class TriggerSample extends TriggerAdapter {
       
           public void fire(Connection conn, ResultSet oldRow, ResultSet newRow)
                   throws SQLException {
      @@ -1782,7 +1741,7 @@ 

      Cache Settings

      is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query -SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'info.CACHE_MAX_SIZE' +SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'info.CACHE_MAX_SIZE'

      An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE=TQ to the database URL. @@ -1801,6 +1760,79 @@

      Cache Settings

      call SELECT * FROM INFORMATION_SCHEMA.SETTINGS. The number of pages read / written is listed.

      +

      External authentication (Experimental)

      +

      +External authentication allows to optionally validate user credentials externally (JAAS,LDAP,custom classes). +Is also possible to temporary assign roles to externally authenticated users. This feature is experimental and subject to change +

      +

      Master user cannot be externally authenticated

      +

      +To enable external authentication on a database execute statement SET AUTHENTICATOR TRUE. This setting in persisted on the database. +

      +

      +To connect on a database by using external credentials client must append AUTHREALM=H2 to the database URL. H2 +is the identifier of the authentication realm (see later). +

      +

      External authentication requires to send password to the server. For this reason is works only on local connection or remote over ssl

      +

      +By default external authentication is performed through JAAS login interface (configuration name is h2). +To configure JAAS add argument -Djava.security.auth.login.config=jaas.conf +Here an example of +JAAS login configuration file content: +

      +
      +h2 {
      +    com.sun.security.auth.module.LdapLoginModule REQUIRED \
      +    userProvider="ldap://127.0.0.1:10389" authIdentity="uid={USERNAME},ou=people,dc=example,dc=com" \
      +    debug=true useSSL=false ;
      +};
      +
      +

      +Is it possible to specify custom authentication settings by using +JVM argument -Dh2auth.configurationFile={urlOfH2Auth.xml}. Here an example of h2auth.xml file content: +

      +
      +<h2Auth allowUserRegistration="false" createMissingRoles="true">
      +
      +    <!-- realm: DUMMY authenticate users named DUMMY[0-9] with a static password -->
      +    <realm name="DUMMY"
      +    validatorClass="org.h2.security.auth.impl.FixedPasswordCredentialsValidator">
      +        <property name="userNamePattern" value="DUMMY[0-9]" />
      +        <property name="password" value="mock" />
      +    </realm>
      +
      +    <!-- realm LDAPEXAMPLE:perform credentials validation on LDAP -->
      +    <realm name="LDAPEXAMPLE"
      +    validatorClass="org.h2.security.auth.impl.LdapCredentialsValidator">
      +        <property name="bindDnPattern" value="uid=%u,ou=people,dc=example,dc=com" />
      +        <property name="host" value="127.0.0.1" />
      +        <property name="port" value="10389" />
      +        <property name="secure" value="false" />
      +    </realm>
      +
      +    <!-- realm JAAS: perform credentials validation by using JAAS api -->
      +    <realm name="JAAS"
      +    validatorClass="org.h2.security.auth.impl.JaasCredentialsValidator">
      +        <property name="appName" value="H2" />
      +    </realm>
      +
      +    <!--Assign to each user role @{REALM} -->
      +    <userToRolesMapper class="org.h2.security.auth.impl.AssignRealmNameRole"/>
      +
      +    <!--Assign to each user role REMOTEUSER -->
      +    <userToRolesMapper class="org.h2.security.auth.impl.StaticRolesMapper">
      +        <property name="roles" value="REMOTEUSER"/>
      +    </userToRolesMapper>
      +</h2Auth>
      +
      +

      +Custom credentials validators must implement the interface +org.h2.api.CredentialsValidator +

      +

      +Custom criteria for role assignments must implement the interface +org.h2.api.UserToRoleMapper +

    diff --git a/h2/src/docsrc/html/fragments.html b/h2/src/docsrc/html/fragments.html index f82a5e3bbb..b35432e0f1 100644 --- a/h2/src/docsrc/html/fragments.html +++ b/h2/src/docsrc/html/fragments.html @@ -1,6 +1,6 @@ @@ -27,7 +27,7 @@ @@ -71,31 +71,35 @@ Installation
    Tutorial
    Features
    +Security
    Performance
    Advanced

    Reference
    -SQL Grammar
    +Commands
    Functions
    +• Aggregate +• Window +

    Data Types
    +SQL Grammar
    +System Tables
    Javadoc
    -PDF (1 MB)
    +PDF (2 MB)

    Support
    FAQ
    Error Analyzer
    -Google Group (English)
    -Google Group (Japanese)
    -Google Group (Chinese)
    +Google Group

    Appendix
    -History & Roadmap
    +History
    License
    Build
    Links
    -JaQu
    MVStore
    Architecture
    +Migration to 2.0

    @@ -116,7 +120,7 @@ document.getElementById('translate').style.display=''; var script=document.createElement('script'); script.setAttribute("type","text/javascript"); - script.setAttribute("src", "http://translate.google.com/translate_a/element.js?cb=googleTranslateElementInit"); + script.setAttribute("src", "https://translate.google.com/translate_a/element.js?cb=googleTranslateElementInit"); document.getElementsByTagName("head")[0].appendChild(script); } function googleTranslateElementInit() { diff --git a/h2/src/docsrc/html/frame.html b/h2/src/docsrc/html/frame.html index 0395283c25..42c7d4932f 100644 --- a/h2/src/docsrc/html/frame.html +++ b/h2/src/docsrc/html/frame.html @@ -1,6 +1,6 @@ diff --git a/h2/src/docsrc/html/functions-aggregate.html b/h2/src/docsrc/html/functions-aggregate.html new file mode 100644 index 0000000000..dd40bca0d3 --- /dev/null +++ b/h2/src/docsrc/html/functions-aggregate.html @@ -0,0 +1,326 @@ + + + + + + +Aggregate Functions + + + + + +
    + + +

    Aggregate Functions

    +

    Index

    +

    General Aggregate Functions

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    Binary Set Functions

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    Ordered Aggregate Functions

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    Hypothetical Set Functions

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    Inverse Distribution Functions

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    JSON Aggregate Functions

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    Details

    + +

    Click on the header of the function to switch between railroad diagram and BNF.

    + +

    Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red, +don't use it unless you need it for compatibility with other databases or old versions of H2.

    + +

    General Aggregate Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    Binary Set Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    Ordered Aggregate Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    Hypothetical Set Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    Inverse Distribution Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    JSON Aggregate Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + + + +
    + diff --git a/h2/src/docsrc/html/functions-window.html b/h2/src/docsrc/html/functions-window.html new file mode 100644 index 0000000000..f7ad4e5933 --- /dev/null +++ b/h2/src/docsrc/html/functions-window.html @@ -0,0 +1,277 @@ + + + + + + +Window Functions + + + + + +
    + + +

    Window Functions

    +

    Index

    +

    Row Number Function

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    Rank Functions

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    Lead or Lag Functions

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    Nth Value Functions

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    Other Window Functions

    + + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + +

    Details

    + +

    Click on the header of the function to switch between railroad diagram and BNF.

    + +

    Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red, +don't use it unless you need it for compatibility with other databases or old versions of H2.

    + +

    Row Number Function

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    Rank Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    Lead or Lag Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    Nth Value Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    Other Window Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + + + +
    + diff --git a/h2/src/docsrc/html/functions.html b/h2/src/docsrc/html/functions.html index b31d1eee2d..d62066ff5d 100644 --- a/h2/src/docsrc/html/functions.html +++ b/h2/src/docsrc/html/functions.html @@ -1,7 +1,7 @@ @@ -18,11 +18,11 @@

    Functions

    -

    Index

    -

    Aggregate Functions

    +

    Index

    +

    Numeric Functions

    -

    Numeric Functions

    +

    String Functions

    -

    String Functions

    +

    Time and Date Functions

    -

    Time and Date Functions

    +

    System Functions

    -

    System Functions

    +

    JSON Functions

    +

    Table Functions

    + + + + + +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + + ${item.topic}
    +
    +
    + +

    Details

    -

    Click on the header to switch between railroad diagram and BNF.

    + +

    Click on the header of the function to switch between railroad diagram and BNF.

    + +

    Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red, +don't use it unless you need it for compatibility with other databases or old versions of H2.

    + +

    Numeric Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    String Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    Time and Date Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    System Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    + +

    JSON Functions

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    - +

    Table Functions

    +

    ${item.topic}

    diff --git a/h2/src/docsrc/html/grammar.html b/h2/src/docsrc/html/grammar.html
    index f409106ca4..e4f4b98297 100644
    --- a/h2/src/docsrc/html/grammar.html
    +++ b/h2/src/docsrc/html/grammar.html
    @@ -1,7 +1,7 @@
     
     
     
    @@ -18,11 +18,11 @@
     
     
     

    SQL Grammar

    -

    Index

    -

    Commands (Data Manipulation)

    +

    Index

    +

    Literals

    -

    Commands (Data Definition)

    +

    Datetime fields

    - -

    Commands (Other)

    - - - - - @@ -131,18 +103,15 @@

    Other Grammar

    - - ${item.topic}
    -
    -
    - - ${item.topic}
    -
    -
    - + ${item.topic}
    -

    System Tables

    -

    -Information Schema
    -Range Table
    -

    - -

    Details

    -

    Click on the header to switch between railroad diagram and BNF.

    + +

    Click on the header of the grammar element to switch between railroad diagram and BNF.

    +

    Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red, +don't use it unless you need it for compatibility with other databases or old versions of H2.

    - +

    Literals

    +

    ${item.topic}

    @@ -163,10 +132,16 @@ 

    ${item.topic ${item.example}

    - +

    Datetime fields

    +

    ${item.topic}

    +
    +${item.syntax}
    +
    +
    ${item.railroad} +

    ${item.text}

    Example:

    -

    ${item.example}

    -
    - -

    Information Schema

    -

    -The system tables in the schema INFORMATION_SCHEMA contain the meta data -of all tables in the database as well as the current settings. -

    - - - - - - - +

    +${item.example}

    -
    TableColumns
    ${item.topic}${item.syntax}
    -

    Range Table

    -

    -The range table is a dynamic system table that contains all values from a start to an end value. -The table contains one column called X. Both the start and end values are included in the result. -The table is used as follows: -

    -

    Example:

    +

    Other Grammar

    + +

    ${item.topic}

    + +
    +${item.syntax}
    +
    +
    +${item.railroad} +
    + + +

    ${item.text}

    +

    Example:

    +

    ${item.example}

    +
    diff --git a/h2/src/docsrc/html/history.html b/h2/src/docsrc/html/history.html index 4446f91a64..b5068a54c6 100644 --- a/h2/src/docsrc/html/history.html +++ b/h2/src/docsrc/html/history.html @@ -1,7 +1,7 @@ @@ -17,11 +17,9 @@
    -

    History and Roadmap

    +

    History

    Change Log
    - - Roadmap
    History of this Database Engine
    @@ -31,25 +29,15 @@

    History and Roadmap

    Change Log

    -The up-to-date change log is available at - -http://www.h2database.com/html/changelog.html - -

    - -

    Roadmap

    -

    -The current roadmap is available at - -http://www.h2database.com/html/roadmap.html - +The up-to-date change log is available +here

    History of this Database Engine

    The development of H2 was started in May 2004, but it was first published on December 14th 2005. -The main author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. +The original author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database. At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed to continued to work on the Hypersonic SQL codebase. @@ -90,20 +78,28 @@

    Why Java

    Supporters

    Many thanks for those who reported bugs, gave valuable feedback, -spread the word, and translated this project. Also many thanks to the donors. +spread the word, and translated this project. +

    +

    +Also many thanks to the donors. To become a donor, use PayPal (at the very bottom of the main web page). +Donators are:

    -
    diff --git a/h2/src/docsrc/html/installation.html b/h2/src/docsrc/html/installation.html index 0b365ae045..f787f957ed 100644 --- a/h2/src/docsrc/html/installation.html +++ b/h2/src/docsrc/html/installation.html @@ -1,7 +1,7 @@ @@ -36,7 +36,7 @@

    Requirements

    Database Engine

    • Windows XP or Vista, Mac OS X, or Linux -
    • Sun Java 6 or newer +
    • Oracle Java 8 or newer
    • Recommended Windows file system: NTFS (FAT32 only supports files up to 4 GB)
    @@ -47,11 +47,8 @@

    H2 Console

    Supported Platforms

    As this database is written in Java, it can run on many different platforms. -It is tested with Java 6 and 7. -Currently, the database is developed and tested on Windows 8 -and Mac OS X using Java 6, but it also works in many other operating systems -and using other Java runtime environments. -All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported. +It is tested with Java 8 and 11. +All major operating systems (Windows, Mac OS X, Linux, ...) are supported.

    Installing the Software

    diff --git a/h2/src/docsrc/html/jaqu.html b/h2/src/docsrc/html/jaqu.html deleted file mode 100644 index 0f072afa1f..0000000000 --- a/h2/src/docsrc/html/jaqu.html +++ /dev/null @@ -1,340 +0,0 @@ - - - - - - -JaQu - - - - - -
    - - -

    JaQu

    - - What is JaQu
    - - Differences to Other Data Access Tools
    - - Current State
    - - Building the JaQu Library
    - - Requirements
    - - Example Code
    - - Configuration
    - - Natural Syntax
    - - Other Ideas
    - - Similar Projects
    - -

    What is JaQu

    -

    -Note: This project is currently in maintenance mode. -A friendly fork of JaQu is -available under the name iciql. -

    -

    -JaQu stands for Java Query and allows to access databases using pure Java. -JaQu provides a fluent interface (or internal DSL). -JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a -Microsoft .NET technology). The following JaQu code: -

    -
    -Product p = new Product();
    -List<Product> soldOutProducts =
    -    db.from(p).where(p.unitsInStock).is(0).select();
    -
    -

    -stands for the SQL statement: -

    -
    -SELECT * FROM PRODUCTS P
    -WHERE P.UNITS_IN_STOCK = 0
    -
    - -

    Differences to Other Data Access Tools

    -

    -Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java, -auto-complete in the IDE is supported. Type checking is performed by the compiler. -JaQu fully protects against SQL injection. -

    -

    -JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate. -With JaQu, you don't write SQL statements as strings. -JaQu is much smaller and simpler than other persistence frameworks such as Hibernate, -but it also does not provide all the features of those. -Unlike iBatis and Hibernate, no XML or annotation based configuration is required; -instead the configuration (if required at all) is done in pure Java, within the application. -

    -

    -JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis, -JaQu provides full control over when and what SQL statements are executed -(but without having to write SQL statements as strings). -

    - -

    Restrictions

    -

    -Primitive types (eg. boolean, int, long, double) are not supported. -Use java.lang.Boolean, Integer, Long, Double instead. -

    - -

    Why in Java?

    -

    -Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy) -in the same application is complicated: you would need to split the application and database code, -and write adapter / wrapper code. -

    - -

    Current State

    -

    -Currently, JaQu is only tested with the H2 database. The API may change in future versions. -JaQu is not part of the h2 jar file, however the source code is included in H2, under: -

    -
    • src/test/org/h2/test/jaqu/* (samples and tests) -
    • src/tools/org/h2/jaqu/* (framework) -
    - -

    Building the JaQu Library

    -

    -To create the JaQu jar file, run: build jarJaqu. This will create the file bin/h2jaqu.jar. -

    - -

    Requirements

    -

    -JaQu requires Java 6. Annotations are not need. -Currently, JaQu is only tested with the H2 database engine, however in theory it should -work with any database that supports the JDBC API. -

    - -

    Example Code

    -
    -package org.h2.test.jaqu;
    -import java.math.BigDecimal;
    -import java.util.List;
    -import org.h2.jaqu.Db;
    -import static org.h2.jaqu.Function.*;
    -
    -public class Test {
    -    Db db;
    -
    -    public static void main(String[] args) throws Exception {
    -        new SamplesTest().test();
    -    }
    -
    -    public void test() throws Exception {
    -        db = Db.open("jdbc:h2:mem:", "sa", "sa");
    -        db.insertAll(Product.getProductList());
    -        db.insertAll(Customer.getCustomerList());
    -        db.insertAll(Order.getOrderList());
    -        testLength();
    -        testCount();
    -        testGroup();
    -        testSelectManyCompoundFrom2();
    -        testWhereSimple4();
    -        testSelectSimple2();
    -        testAnonymousTypes3();
    -        testWhereSimple2();
    -        testWhereSimple3();
    -        db.close();
    -    }
    -
    -    private void testWhereSimple2() throws Exception {
    -        Product p = new Product();
    -        List<Product> soldOutProducts =
    -            db.from(p).
    -            where(p.unitsInStock).is(0).
    -            orderBy(p.productId).select();
    -    }
    -
    -    private void testWhereSimple3() throws Exception {
    -        Product p = new Product();
    -        List<Product> expensiveInStockProducts =
    -            db.from(p).
    -            where(p.unitsInStock).bigger(0).
    -            and(p.unitPrice).bigger(3.0).
    -            orderBy(p.productId).select();
    -    }
    -
    -    private void testWhereSimple4() throws Exception {
    -        Customer c = new Customer();
    -        List<Customer> waCustomers =
    -            db.from(c).
    -            where(c.region).is("WA").
    -            select();
    -    }
    -
    -    private void testSelectSimple2() throws Exception {
    -        Product p = new Product();
    -        List<String> productNames =
    -            db.from(p).
    -            orderBy(p.productId).select(p.productName);
    -    }
    -
    -    public static class ProductPrice {
    -        public String productName;
    -        public String category;
    -        public Double price;
    -    }
    -
    -    private void testAnonymousTypes3() throws Exception {
    -        final Product p = new Product();
    -        List<ProductPrice> productInfos =
    -            db.from(p).orderBy(p.productId).
    -            select(new ProductPrice() {{
    -                    productName = p.productName;
    -                    category = p.category;
    -                    price = p.unitPrice;
    -            }});
    -    }
    -
    -    public static class CustOrder {
    -        public String customerId;
    -        public Integer orderId;
    -        public BigDecimal total;
    -    }
    -
    -    private void testSelectManyCompoundFrom2() throws Exception {
    -        final Customer c = new Customer();
    -        final Order o = new Order();
    -        List<CustOrder> orders =
    -            db.from(c).
    -            innerJoin(o).on(c.customerId).is(o.customerId).
    -            where(o.total).smaller(new BigDecimal("500.00")).
    -            orderBy(1).
    -            select(new CustOrder() {{
    -                customerId = c.customerId;
    -                orderId = o.orderId;
    -                total = o.total;
    -            }});
    -    }
    -
    -    private void testLength() throws Exception {
    -        Product p = new Product();
    -        List<Integer> lengths =
    -            db.from(p).
    -            where(length(p.productName)).smaller(10).
    -            orderBy(1).
    -            selectDistinct(length(p.productName));
    -    }
    -
    -    private void testCount() throws Exception {
    -        long count = db.from(new Product()).selectCount();
    -    }
    -
    -    public static class ProductGroup {
    -        public String category;
    -        public Long productCount;
    -    }
    -
    -    private void testGroup() throws Exception {
    -        final Product p = new Product();
    -        List<ProductGroup> list =
    -            db.from(p).
    -            groupBy(p.category).
    -            orderBy(1).
    -            select(new ProductGroup() {{
    -                category = p.category;
    -                productCount = count();
    -            }});
    -    }
    -
    -}
    -
    - -

    Configuration

    -

    -JaQu does not require any configuration when using the default field to column mapping. -To define table indices, or if you want to map a class to a table with a different name, -or a field to a column with another name, create a function called define in the data class. -Example: -

    -
    -import static org.h2.jaqu.Define.*;
    -
    -public class Product implements Table {
    -
    -    public Integer productId;
    -    public String productName;
    -    public String category;
    -    public Double unitPrice;
    -    public Integer unitsInStock;
    -
    -    public void define() {
    -        tableName("Product");
    -        primaryKey(productId);
    -        index(productName, category);
    -    }
    -
    -}
    -
    -

    -The method define() contains the mapping definition. It is called once -when the class is used for the first time. Like annotations, the mapping is defined in the class itself. -Unlike when using annotations, the compiler can check the syntax even for multi-column -objects (multi-column indexes, multi-column primary keys and so on). -Because the definition is written in Java, the configuration can be set at runtime, -which is not possible using annotations. -Unlike XML mapping configuration, the configuration is integrated in the class itself. -

    - -

    Natural Syntax

    -

    The plan is to support more natural (pure Java) syntax in conditions. -To do that, the condition class is de-compiled to a SQL condition. -A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome). -The planned syntax is: -

    -
    -long count = db.from(co).
    -    where(new Filter() { public boolean where() {
    -        return co.id == x
    -            && co.name.equals(name)
    -            && co.value == new BigDecimal("1")
    -            && co.amount == 1L
    -            && co.birthday.before(new java.util.Date())
    -            && co.created.before(java.sql.Timestamp.valueOf("2005-05-05 05:05:05"))
    -            && co.time.before(java.sql.Time.valueOf("23:23:23"));
    -        } }).selectCount();
    -
    - -

    Other Ideas

    -

    -This project has just been started, and nothing is fixed yet. -Some ideas are: -

    -
    • Support queries on collections (instead of using a database). -
    • Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA). -
    • Internally use a JPA implementation (for example Hibernate) instead of SQL directly. -
    • Use PreparedStatements and cache them. -
    - -

    Similar Projects

    -

    -iciql (a friendly fork of JaQu)
    -Cement Framework
    -Dreamsource ORM
    -Empire-db
    -JEQUEL: Java Embedded QUEry Language
    -Joist
    -jOOQ
    -JoSQL
    -LIQUidFORM
    -Quaere (Alias implementation)
    -Quaere
    -Querydsl
    -Squill
    -

    - -
    - diff --git a/h2/src/docsrc/html/license.html b/h2/src/docsrc/html/license.html index 699609dd50..1f228df8f1 100644 --- a/h2/src/docsrc/html/license.html +++ b/h2/src/docsrc/html/license.html @@ -1,7 +1,7 @@ @@ -30,8 +30,8 @@

    License

    Summary and License FAQ

    -H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) -or under the EPL 1.0 (Eclipse Public License). +H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) +or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL.

      @@ -47,7 +47,7 @@

      Summary and License FAQ

      However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the -Wayback Machine and visit old web pages of http://www.bungisoft.com. +Wayback Machine and visit old web pages of http://www.bungisoft.com.

      About porting the source code to another language (for example C# or C++): converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code. @@ -62,11 +62,11 @@

      Summary and License FAQ

       This software contains unmodified binary redistributions for
      -H2 database engine (http://www.h2database.com/),
      +H2 database engine (https://h2database.com/),
       which is dual licensed and available under the MPL 2.0
       (Mozilla Public License) or under the EPL 1.0 (Eclipse Public License).
       An original copy of the license agreement can be found at:
      -http://www.h2database.com/html/license.html
      +https://h2database.com/html/license.html
       

      Mozilla Public License Version 2.0

      @@ -158,7 +158,7 @@

      Exhibit A - Source Code Form License Notice

       This Source Code Form is subject to the terms of the Mozilla
       Public License, v. 2.0. If a copy of the MPL was not distributed
      -with this file, you can obtain one at http://mozilla.org/MPL/2.0
      +with this file, you can obtain one at https://mozilla.org/MPL/2.0
       

      If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice.

      You may add additional accurate notices of copyright ownership.

      @@ -395,9 +395,9 @@

      7. GENERAL

      Export Control Classification Number (ECCN)

      -As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002. +As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002. However, for legal reasons, we can make no warranty that this information is correct. -For details, see also the Apache Software Foundation Export Classifications page. +For details, see also the Apache Software Foundation Export Classifications page.

      diff --git a/h2/src/docsrc/html/links.html b/h2/src/docsrc/html/links.html index 91dc087b9e..98cf0cad6a 100644 --- a/h2/src/docsrc/html/links.html +++ b/h2/src/docsrc/html/links.html @@ -1,7 +1,7 @@ @@ -21,8 +21,6 @@

      Links

      If you want to add a link, please send it to the support email address or post it to the group.

      - - Commercial Support
      Quotes
      @@ -36,23 +34,9 @@

      Links

      Products and Projects
      -

      Commercial Support

      -

      -Commercial support for H2 is available -from Steve McLeod (steve dot mcleod at gmail dot com). -Please note he is not one of the main developers of H2. He describes himself as follows: -

      -
      • I'm a long time user of H2, routinely working with H2 databases several gigabytes in size. -
      • I'm the creator of popular commercial desktop software that uses H2. -
      • I'm a certified Java developer (SCJP). -
      • I have a decade and more of IT consulting experience with large and small clients in Australia, the UK, and Germany. -
      • I'm based in Germany, and willing to travel within Europe. - I can work remotely with teams in the USA and other locations." -
      -

      Quotes

      - + Quote: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. @@ -61,34 +45,34 @@

      Quotes

      Books

      - + Seam In Action

      Extensions

      - + Grails H2 Database Plugin
      - + h2osgi: OSGi for the H2 Database
      - + H2Sharp: ADO.NET interface for the H2 database engine
      A spatial extension of the H2 database.

      Blog Articles, Videos

      - + Youtube: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2
      Analyzing CSVs with H2 in under 10 minutes (2009-12-07)
      - + Efficient sorting and iteration on large databases (2009-06-15)
      Porting Flexive to the H2 Database (2008-12-05)
      H2 Database with GlassFish (2008-11-24)
      - + H2 Database - Performance Tracing (2008-04-30)
      Open Source Databases Comparison (2007-09-11)
      @@ -102,13 +86,13 @@

      Blog Articles, Videos

      The Codist: Write Your Own Database, Again (2006-11-13)

      Project Pages

      - + Ohloh
      - + Freshmeat Project Page
      - + Wikipedia
      - + Java Source Net
      Linux Package Manager
      @@ -125,7 +109,7 @@

      Database Frontends / Tools

      SQL query tool.

      -

      +

      DbVisualizer
      Database tool.

      @@ -135,7 +119,7 @@

      Database Frontends / Tools

      Database utility written in Java.

      -

      +

      Flyway
      The agile database migration framework for Java.

      @@ -156,17 +140,17 @@

      Database Frontends / Tools

      HenPlus is a SQL shell written in Java.

      -

      +

      JDBC lint
      Helps write correct and efficient code when using the JDBC API.

      -

      +

      OpenOffice
      Base is OpenOffice.org's database application. It provides access to relational data sources.

      -

      +

      RazorSQL
      An SQL query tool, database browser, SQL editor, and database administration tool.

      @@ -176,7 +160,7 @@

      Database Frontends / Tools

      Universal Database Frontend.

      -

      +

      SQL Workbench/J
      Free DBMS-independent SQL tool.

      @@ -186,7 +170,7 @@

      Database Frontends / Tools

      Graphical tool to view the structure of a database, browse the data, issue SQL commands etc.

      -

      +

      SQuirreL DB Copy Plugin
      Tool to copy data from one database to another.

      @@ -198,7 +182,7 @@

      Products and Projects

      Visual business process modeling and simulation software for business users.

      -

      +

      Adeptia BPM
      A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows.

      @@ -208,7 +192,7 @@

      Products and Projects

      Process-centric, services-based application integration suite.

      -

      +

      Aejaks
      A server-side scripting environment to build AJAX enabled web applications.

      @@ -218,17 +202,17 @@

      Products and Projects

      A web framework that let's you write dynamic web applications with Zen-like simplicity.

      -

      +

      Apache Cayenne
      Open source persistence framework providing object-relational mapping (ORM) and remoting services.

      -

      +

      Apache Jackrabbit
      Open source implementation of the Java Content Repository API (JCR).

      -

      +

      Apache OpenJPA
      Open source implementation of the Java Persistence API (JPA).

      @@ -238,7 +222,7 @@

      Products and Projects

      Helps building web applications.

      -

      +

      BGBlitz
      The Swiss army knife of Backgammon.

      @@ -254,7 +238,7 @@

      Products and Projects

      JSR 168 compliant bookmarks management portlet application.

      -

      +

      Claros inTouch
      Ajax communication suite with mail, addresses, notes, IM, and rss reader.

      @@ -285,7 +269,7 @@

      Products and Projects

      Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets).

      -

      +

      District Health Information Software 2 (DHIS)
      The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities. @@ -296,7 +280,7 @@

      Products and Projects

      Open source Java Object Relational Mapping tool.

      -

      +

      Eclipse CDO
      The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution. @@ -307,7 +291,7 @@

      Products and Projects

      Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http://www.osoa.org).

      -

      +

      FIT4Data
      A testing framework for data management applications built on the Java implementation of FIT.

      @@ -322,7 +306,7 @@

      Products and Projects

      GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing.

      -

      +

      GBIF Integrated Publishing Toolkit (IPT)
      The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data: taxon primary occurrence data, @@ -339,7 +323,7 @@

      Products and Projects

      Fun-to-play games with a simple interface.

      -

      +

      GridGain
      GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications @@ -356,12 +340,12 @@

      Products and Projects

      High-Availability JDBC: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver.

      -

      +

      Hibernate
      Relational persistence for idiomatic Java (O-R mapping tool).

      -

      +

      Hibicius
      Online Banking Client for the HBCI protocol.

      @@ -383,12 +367,12 @@

      Products and Projects

      Java Spatial. Jaspa potentially brings around 200 spatial functions.

      -

      +

      Java Simon
      Simple Monitoring API.

      -

      +

      JBoss jBPM
      A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration.

      @@ -409,7 +393,7 @@

      Products and Projects

      Free, multi platform, open source GIS based on the GIS framework of uDig.

      -

      +

      Jena
      Java framework for building Semantic Web applications.

      @@ -419,8 +403,8 @@

      Products and Projects

      Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern.

      -

      -jOOQ (Java Object Oriented Querying)
      +

      +jOOQ (JOOQ Object Oriented Querying)
      jOOQ is a fluent API for typesafe SQL query construction and execution

      @@ -429,7 +413,7 @@

      Products and Projects

      A Scala-based, secure, developer friendly web framework.

      -

      +

      LiquiBase
      A tool to manage database changes and refactorings.

      @@ -439,7 +423,7 @@

      Products and Projects

      Build automation and management tool.

      -

      +

      localdb
      A tool that locates the full file path of the folder containing the database files.

      @@ -465,7 +449,7 @@

      Products and Projects

      Java web app that provides dynamic web content and Java libraries access from JavaScript.

      -

      +

      MyTunesRss
      MyTunesRSS lets you listen to your music wherever you are.

      @@ -501,7 +485,7 @@

      Products and Projects

      understand the application structure.

      -

      +

      Ontology Works
      This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), @@ -526,7 +510,7 @@

      Products and Projects

      OpenGroove is a groupware program that allows users to synchronize data.

      -

      +

      OpenSocial Development Environment (OSDE)
      Development tool for OpenSocial application.

      @@ -538,10 +522,10 @@

      Products and Projects

      P5H2
      -A library for the Processing programming language and environment. +A library for the Processing programming language and environment.

      -

      +

      Phase-6
      A computer based learning software.

      @@ -561,7 +545,7 @@

      Products and Projects

      Open source database benchmark.

      -

      +

      Poormans
      Very basic CMS running as a SWT application and generating static html pages.

      @@ -572,7 +556,7 @@

      Products and Projects

      programmed in CFML into Java bytecode and executes it on a servlet engine.

      -

      +

      Razuna
      Open source Digital Asset Management System with integrated Web Content Management.

      @@ -592,7 +576,7 @@

      Products and Projects

      ETL (Extract-Transform-Load) and script execution tool.

      -

      +

      Sesar
      Dependency Injection Container with Aspect Oriented Programming.

      @@ -607,7 +591,7 @@

      Products and Projects

      A free, light-weight, java data access framework.

      -

      +

      ShapeLogic
      Toolkit for declarative programming, image processing and computer vision.

      @@ -632,7 +616,7 @@

      Products and Projects

      A web-enabled, database independent, data synchronization/replication software.

      -

      +

      SmartFoxServer
      Platform for developing multiuser applications and games with Macromedia Flash.

      @@ -647,7 +631,7 @@

      Products and Projects

      Simple object relational mapping.

      -

      +

      Springfuse
      Code generation For Spring, Spring MVC & Hibernate.

      @@ -674,10 +658,10 @@

      Products and Projects

      Event (stream) processing kernel.

      -

      +

      SUSE Manager, part of Linux Enterprise Server 11
      The SUSE Manager - + eases the burden of compliance with regulatory requirements and corporate policies.

      @@ -686,7 +670,15 @@

      Products and Projects

      Easy-to-use backup solution for your iTunes library.

      -

      +

      +TimeWriter
      +TimeWriter is a very flexible program for time administration / time tracking. +The older versions used dBase tables. +The new version 5 is completely rewritten, now using the H2 database. +TimeWriter is delivered in Dutch and English. +

      + +

      weblica
      Desktop CMS.

      @@ -696,7 +688,7 @@

      Products and Projects

      Collaborative and realtime interactive media platform for the web.

      -

      +

      Werkzeugkasten
      Minimum Java Toolset.

      @@ -707,7 +699,7 @@

      Products and Projects

      for building applications composed from server components - view providers.

      -

      +

      Volunteer database
      A database front end to register volunteers, partnership and donation for a Non Profit organization.

      diff --git a/h2/src/docsrc/html/main.html b/h2/src/docsrc/html/main.html index 4aa9d265f8..ea060a9132 100644 --- a/h2/src/docsrc/html/main.html +++ b/h2/src/docsrc/html/main.html @@ -1,7 +1,7 @@ diff --git a/h2/src/docsrc/html/mainWeb.html b/h2/src/docsrc/html/mainWeb.html index 3c2c95e168..07f12b2267 100644 --- a/h2/src/docsrc/html/mainWeb.html +++ b/h2/src/docsrc/html/mainWeb.html @@ -1,7 +1,7 @@ @@ -13,8 +13,8 @@ H2 Database Engine - - + + @@ -29,7 +29,7 @@

      H2 Database Engine

    • Very fast, open source, JDBC API
    • Embedded and server modes; in-memory databases
    • Browser based Console application -
    • Small footprint: around 1.5 MB jar file size +
    • Small footprint: around 2.5 MB jar file size
    @@ -37,17 +37,17 @@

    H2 Database Engine

    Download

    - Version ${version} (${versionDate}), Beta + Version ${version} (${versionDate})
    - Download this database + Download this database - Windows Installer (5 MB) + Windows Installer (6.7 MB)
    - Download this database + Download this database - All Platforms (zip, 8 MB) + All Platforms (zip, 9.5 MB)
    All Downloads @@ -60,9 +60,8 @@

    Download

    Support

    - Stack Overflow (tag H2)

    - Google Group English, - Japanese

    + Stack Overflow (tag H2)

    + Google Group

    For non-technical issues, use:
    + +
    + + +

    Contents

    + + Introduction
    + + Upgrading
    + + File Format
    + + Data types
    + + Identity columns and sequences
    + + INFORMATION_SCHEMA
    + + General
    + +

    Introduction

    + +

    +Between version 1.4.200 and version 2.0.202 there have been considerable changes, such that a simple update is +not possible. +

    + +

    +It would have been nice to write some kind of migration tool, or auto-detect the file and upgrade. Unfortunately, this +is purely a volunteer-run project, so this is just the way it has to be. There exists a migration tool H2MigrationTool available +in GitHub, but it hasn't been tested by our team. Use at +your own risk. +

    + +

    Upgrading

    + +

    +The official way to upgrade is to export it into SQL script with the +SCRIPT command +USING YOUR CURRENT VERSION OF H2. +

    + +

    +Then create a fresh database USING THE NEW VERSION OF H2, then perform a +RUNSCRIPT to load your data. +You may need to specify FROM_1X flag, see documentation of this command for details. +

    + +

    MVStore file format

    + +

    +The MVStore file format we use (i.e. the default) is still mostly the same, but some subtle changes have been made +to the undo logs, +for the purposes of improving crash safety and also read/write performance. +

    + +

    Data types

    + +

    +The maximum length of CHARACTER +and CHARACTER VARYING data types +is n 1,048,576 characters. For larger values use +CHARACTER LARGE OBJECT. +

    + +

    +BINARY +and BINARY VARYING +are now different data types. BINARY means fixed-length data type and its default length is 1. +The maximum length of binary strings is 1,048,576 bytes. For larger values use +BINARY LARGE OBJECT +

    + +

    +NUMERIC / DECIMAL / DEC without parameters +now have scale 0. For a variable-scale data type see +DECFLOAT. +Negative scale isn't allowed for these data types any more. +The maximum precision is now 100,000. +

    + +

    +ENUM values now have 1-based ordinal numbers. +

    + +

    +Arrays are now typed. +Arrays with mixed types of elements aren't supported. +In some cases they can be replaced with a new ROW +data type. +

    + +

    +All non-standard data types, with exception for TINYINT, JAVA_OBJECT, ENUM, GEOMETRY, JSON, and UUID are deprecated. +

    + +

    Identity columns and sequences

    + +

    +Various legacy vendor-specific declarations and expressions are deprecated +and may not work at all depending on compatibility mode. +

    + +

    +Identity columns should be normally declared with GENERATED BY DEFAULT AS IDENTITY or GENERATED ALWAYS AS IDENTITY +clauses, options may also be specified. +GENERATED ALWAYS AS IDENTITY columns cannot be assigned to a user-provided value +unless OVERRIDING SYSTEM VALUE is specified. +

    + +

    +NULL cannot be specified as a value for IDENTITY column to force identity generation +(with exception for some compatibility modes). +Use DEFAULT or simply exclude this column from insert column list. +

    + +

    +IDENTITY() and SCOPE_IDENTITY() aren't available in Regular mode. If you need to get a generated value, +you need to use data change delta tables +or Statement.getGeneratedKeys(). +

    + +

    +Undocumented Oracle-style .NEXTVAL and .CURRVAL expressions are restricted to Oracle compatibility mode. +Other functions are deprecated for Regular mode. +Use sequence value expression instead. +

    + +

    INFORMATION_SCHEMA

    + +

    +INFORMATION_SCHEMA in H2 is now compliant with the SQL Standard and other database systems, +but it isn't compliant with previous versions of H2. +You may need to update your queries. +

    + +

    General

    + +

    +There are a lot more SQL keywords now. Many SQL statements feature far better support of SQL-Standard behaviour. +There is a NON_KEYWORDS setting that +can be used as a temporary workaround if your application uses them as unquoted identifiers. +

    + +

    +Numeric and boolean values aren't comparable. It means you need to use TRUE, FALSE, or UNKNOWN (NULL) +as boolean literals. 1 and 0 don't work any more (with exception for some compatibility modes). +

    + +

    +Some other non-standard SQL syntax has been restricted to related compatibility modes. +Since H2 2.0.204 there is a LEGACY compatibility mode that provides some limited compatibility with previous versions. +

    + +

    +Various deprecated grammar elements are marked in red in documentation. Please, avoid their usage. +

    + +

    +Migrating an old database to the new version works most of the times. However, there are a couple of important changes in the new version to keep in mind: +

    + +
      +
    • Oracle-style units were never supported officially without being in Oracle compatibility mode, although some worked before. For example, the length of the VARCHAR datatype cannot be more specified using CHAR but CHARACTERS or OCTETS. CHAR and BYTE need to be used in Oracle compatibility mode. +
    • IDENTITY syntax changed when type is specified: if the type for IDENTITY is specified, then the clause needs to be expanded as INTEGER GENERATED ALWAYS AS IDENTITY. Using just INTEGER IDENTITY is no more working. +
    • LOG connection setting removed: PageStore was removed from H2 so the "LOG=0" setting at the end of the URL (like +"jdbc:h2:file:/tmp/test;LOG=0") is no longer available. +
    + +
    diff --git a/h2/src/docsrc/html/mvstore.html b/h2/src/docsrc/html/mvstore.html index 2d85f48011..a5fd229d05 100644 --- a/h2/src/docsrc/html/mvstore.html +++ b/h2/src/docsrc/html/mvstore.html @@ -1,7 +1,7 @@ @@ -59,7 +59,7 @@

    MVStore

    Overview

    The MVStore is a persistent, log structured key-value store. -It is planned to be the next storage subsystem of H2, +It is used as default storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL.

    • MVStore stands for "multi-version store". @@ -172,7 +172,7 @@

      Maps

      including access to the first and last key, iterate over some or all keys, and so on.

      Also supported, and very uncommon for maps, is fast index lookup: -the entries of the map can be be efficiently accessed like a random-access list +the entries of the map can be efficiently accessed like a random-access list (get the entry at the given index), and the index of a key can be calculated efficiently. That also means getting the median of two keys is very fast, and a range of keys can be counted very quickly. @@ -234,7 +234,7 @@

      Transactions

      To support multiple concurrent open transactions, a transaction utility is included, the TransactionStore. -The tool supports PostgreSQL style "read committed" transaction isolation +The tool supports "read committed" transaction isolation with savepoints, two-phase commit, and other features typically available in a database. There is no limit on the size of a transaction (the log is written to disk for large or long running transactions). @@ -295,8 +295,7 @@

      R-Tree and Pluggable Map Implementations

      The map implementation is pluggable. In addition to the default MVMap (multi-version map), -there is a map that supports concurrent write operations, -and a multi-version R-tree map implementation for spatial operations. +there is a multi-version R-tree map implementation for spatial operations.

      Concurrent Operations and Caching

      @@ -467,8 +466,6 @@

      Storage Engine for H2

      For older versions, append ;MV_STORE=TRUE to the database URL. -Even though it can be used with the default table level locking, -by default the MVCC mode is enabled when using the MVStore.

      File Format

      @@ -478,7 +475,7 @@

      File Format

      The file headers are one block each; a block is 4096 bytes. Each chunk is at least one block, but typically 200 blocks or more. Data is stored in the chunks in the form of a -log structured storage. +log structured storage. There is one chunk for every version.

      @@ -496,7 +493,7 @@ 

      File Format

      } s.commit(); for (int i = 0; i < 100; i++) { - map.put(0, "Hi"); + map.put(i, "Hi"); } s.commit(); s.close(); @@ -512,7 +509,7 @@

      File Format

      Chunk 2:
      -- Page 4: (root) node with 2 entries pointing to page 3 and 5
      +- Page 4: (root) node with 2 entries pointing to page 5 and 3
      - Page 5: leaf with 140 entries (keys 0 - 139)

      @@ -536,18 +533,18 @@

      File Header

      The data is stored in the form of a key-value pair. Each value is stored as a hexadecimal number. The entries are:

      -
      • H: The entry "H:2" stands for the the H2 database. +
        • H: The entry "H:2" stands for the H2 database.
        • block: The block number where one of the newest chunks starts (but not necessarily the newest).
        • blockSize: The block size of the file; currently always hex 1000, which is decimal 4096, - to match the disk sector + to match the disk sector length of modern hard disks.
        • chunk: The chunk id, which is normally the same value as the version; however, the chunk id might roll over to 0, while the version doesn't.
        • created: The number of milliseconds since 1970 when the file was created.
        • format: The file format number. Currently 1.
        • version: The version number of the chunk. -
        • fletcher: The +
        • fletcher: The Fletcher-32 checksum of the header.

        @@ -604,11 +601,11 @@

        Chunk Format

        If an entry in a map is changed, removed, or added, then the respective page is copied, modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented. This mechanism is called copy-on-write, and is similar to how the -Btrfs file system works. +Btrfs file system works. Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). -There is a +There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first.

        @@ -630,14 +627,14 @@

        Chunk Format

        Page Format

        -Each map is a B-tree, +Each map is a B-tree, and the map data is stored in (B-tree-) pages. There are leaf pages that contain the key-value pairs of the map, and internal nodes, which only contain keys and pointers to leaf pages. The root of a tree is either a leaf or an internal node. Unlike file header and chunk header and footer, the page data is not human readable. Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes), -and variable size int and long +and variable size int and long (1 to 5 / 10 bytes). The page format is:

        • length (int): Length of the page in bytes. @@ -681,7 +678,7 @@

          Page Format

          The total number of entries in child pages are kept to allow efficient range counting, lookup by index, and skip operations. -The pages form a counted B-tree. +The pages form a counted B-tree.

          Data compression: The data after the page type are optionally compressed using the LZF algorithm. diff --git a/h2/src/docsrc/html/navigation.js b/h2/src/docsrc/html/navigation.js index aeb90d3e14..1262d1bf5f 100644 --- a/h2/src/docsrc/html/navigation.js +++ b/h2/src/docsrc/html/navigation.js @@ -1,7 +1,7 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ function scroll() { diff --git a/h2/src/docsrc/html/performance.html b/h2/src/docsrc/html/performance.html index 81a37159be..54d1b4ba15 100644 --- a/h2/src/docsrc/html/performance.html +++ b/h2/src/docsrc/html/performance.html @@ -1,7 +1,7 @@ @@ -52,54 +52,54 @@

          Performance Comparison

          Embedded

          - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + +
          Test CaseUnitH2HSQLDBDerby
          Simple: Initms101919078280
          Simple: Query (random)ms13048731912
          Simple: Query (sequential)ms83518395415
          Simple: Update (sequential)ms961233321759
          Simple: Delete (sequential)ms950192232016
          Simple: Memory UsageMB21108
          BenchA: Initms91921337528
          BenchA: Transactionsms121922978541
          BenchA: Memory UsageMB12157
          BenchB: Initms90519938049
          BenchB: Transactionsms10915831165
          BenchB: Memory UsageMB17118
          BenchC: Initms249140038064
          BenchC: Transactionsms19798032840
          BenchC: Memory UsageMB19229
          Executed statements#193099519309951930995
          Total timems1367320686105569
          Statements per second#1412269334718291
          Simple: Initms102125106762
          Simple: Query (random)ms5136532035
          Simple: Query (sequential)ms134422107665
          Simple: Update (sequential)ms164230407034
          Simple: Delete (sequential)ms169723109981
          Simple: Memory UsageMB181513
          BenchA: Initms80128776576
          BenchA: Transactionsms136926294987
          BenchA: Memory UsageMB12159
          BenchB: Initms96625447161
          BenchB: Transactionsms3412316815
          BenchB: Memory UsageMB141010
          BenchC: Initms263031447420
          BenchC: Transactionsms173217422735
          BenchC: Memory UsageMB193411
          Executed statements#222203222220322222032
          Total timems140562597563171
          Statements per second#/s1580848554535174

          Client-Server

          - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + +
          Test CaseUnitH2 (Server)HSQLDBDerbyPostgreSQLMySQL
          Simple: Initms1633817198278603015629409
          Simple: Query (random)ms33992582619033153342
          Simple: Query (sequential)ms2184118699423473077432611
          Simple: Update (sequential)ms69137745285763269811350
          Simple: Delete (sequential)ms80519751422024448016555
          Simple: Memory UsageMB2211901
          BenchA: Initms1299614720247222637526060
          BenchA: Transactionsms1013410250184522145315877
          BenchA: Memory UsageMB1315901
          BenchB: Initms1526416889285463161029747
          BenchB: Transactionsms30173376184227711433
          BenchB: Memory UsageMB17121111
          BenchC: Initms1402010407176551952017532
          BenchC: Transactionsms50763160641160634530
          BenchC: Memory UsageMB19211111
          Executed statements#19309951930995193099519309951930995
          Total timems117049114777244803249215188446
          Statements per second#16497168237887774810246
          Test CaseUnitH2HSQLDBDerbyPostgreSQLMySQL
          Simple: Initms27989480554714232972109482
          Simple: Query (random)ms4821598414741408915140
          Simple: Query (sequential)ms33656491129599935676143536
          Simple: Update (sequential)ms987823565314182611350676
          Simple: Delete (sequential)ms1305628584439552098564647
          Simple: Memory UsageMB18151524
          BenchA: Initms20993425253833527794107723
          BenchA: Transactionsms1654929255289952311365036
          BenchA: Memory UsageMB12181114
          BenchB: Initms26785487723975632369115398
          BenchB: Transactionsms8981004619168181794
          BenchB: Memory UsageMB16111225
          BenchC: Initms1826626865393252454770531
          BenchC: Transactionsms656977839412891619150
          BenchC: Memory UsageMB17351327
          Executed statements#22220322222032222203222220322222032
          Total timems179460320546390994237392763113
          Statements per second#/s123816932568393602911

          Benchmark Results and Comments

          H2

          -Version 1.4.177 (2014-04-12) was used for the test. +Version 2.0.202 (2021-11-25) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. @@ -108,14 +108,14 @@

          H2

          HSQLDB

          -Version 2.3.2 was used for the test. +Version 2.5.1 was used for the test. Cached tables are used in this test (hsqldb.default_table_type=cached), and the write delay is 1 second (SET WRITE_DELAY 1).

          Derby

          -Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. +Version 10.14.2.0 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified: leaving autocommit on is a problem for Derby. @@ -132,33 +132,42 @@

          Derby

          PostgreSQL

          -Version 9.1.5 was used for the test. +Version 13.4 was used for the test. The following options where changed in postgresql.conf: -fsync = off, commit_delay = 1000. +fsync = off, commit_delay = 100000 (microseconds). PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.

          MySQL

          -Version 5.1.65-log was used for the test. +Version 8.0.27 was used for the test. MySQL was run with the InnoDB backend. -The setting innodb_flush_log_at_trx_commit -(found in the my.ini / my.cnf file) was set to 0. Otherwise (and by default), MySQL is slow -(around 140 statements per second in this test) because it tries to flush the data to disk for each commit. + The setting innodb_flush_log_at_trx_commit and sync_binlogcode> +(found in the my.ini / community-mysql-server.cnf file) was set to 0. Otherwise +(and by default), MySQL is slow (around 140 statements per second in this test) +because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. -You need to change this setting manually in the file my.ini / my.cnf, and then restart the service. +You need to change those settings manually in the file my.ini / community-mysql-server.cnf, +and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.

          +

          SQLite

          +

          +SQLite 3.36.0.2 was tested, but the results are not published currently, +because it's about 50 times slower than H2 in embedded mode. +Any tips on how to configure SQLite for higher performance are welcome. +

          +

          Firebird

          -Firebird 1.5 (default installation) was tested, but the results are not published currently. -It is possible to run the performance test with the Firebird database, -and any information on how to configure Firebird for higher performance are welcome. +Firebird 3.0 (default installation) was tested, but failed on multi-threaded part of the test. +It is likely possible to run the performance test with the Firebird database, +and any information on how to configure Firebird for this are welcome.

          Why Oracle / MS SQL Server / DB2 are Not Listed

          @@ -166,7 +175,6 @@

          Why Oracle / MS SQL Server / DB2 are Not Listed

          The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. -SQLite was not tested because the JDBC driver doesn't support transactions.

          About this Benchmark

          @@ -210,8 +218,7 @@

          Comparing Embedded with Server Databases

          Test Platform

          -This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. -The JVM used is Sun JDK 1.6. +This test is run on Fedora v.34 with Oracle JVM 1.8 and SSD drive.

          Multiple Runs

          @@ -371,6 +378,22 @@

          Index Usage

          For other columns, indexes need to be created manually using the CREATE INDEX statement.

          +

          Index Hints

          +

          +If you have determined that H2 is not using the optimal index for your query, you can use index hints to force +H2 to use specific indexes. +

          +
          +SELECT * FROM TEST USE INDEX (index_name_1, index_name_2) WHERE X=1
          +
          +

          Only indexes in the list will be used when choosing an index to use on the given table. There +is no significance to order in this list. +

          +It is possible that no index in the list is chosen, in which case a full table scan will be used. +

          +

          An empty list of index names forces a full table scan to be performed.

          +

          Each index in the list must exist.

          +

          How Data is Stored Internally

          For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT, @@ -385,7 +408,7 @@

          How Data is Stored Internally

          then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, -then a hidden auto-increment column of type BIGINT is added to the table, +then a hidden identity column of type BIGINT is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB columns, which are stored externally). @@ -474,8 +497,8 @@

          Prepared Statements and IN(...)

           PreparedStatement prep = conn.prepareStatement(
          -    "SELECT * FROM TABLE(X INT=?) T INNER JOIN TEST ON T.X=TEST.ID");
          -prep.setObject(1, new Object[] { "1", "2" });
          +    "SELECT * FROM TEST WHERE ID = ANY(?)");
          +prep.setObject(1, new Long[] { 1L, 2L });
           ResultSet rs = prep.executeQuery();
           
          @@ -496,7 +519,7 @@

          Data Types

          Each data type has different storage and performance characteristics:

          • The DECIMAL/NUMERIC type is slower - and requires more storage than the REAL and DOUBLE types. + and requires more storage than the REAL and DOUBLE PRECISION types.
          • Text types are slower to read, write, and compare than numeric types and generally require more storage.
          • See Large Objects for information on BINARY vs. BLOB @@ -582,7 +605,7 @@

            Database Profiling

            For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE=2). The easiest way to set the trace level is to append the setting to the database URL, for example: jdbc:h2:~/test;TRACE_LEVEL_FILE=2 or jdbc:h2:tcp://localhost/~/test;TRACE_LEVEL_FILE=2. -As an example, execute the the following script using the H2 Console: +As an example, execute the following script using the H2 Console:

             SET TRACE_LEVEL_FILE 2;
            @@ -733,7 +756,8 @@ 

            How Data is Stored and How Indexes Work

    Access by row id is fast because the data is sorted by this key. -Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). +Please note the row id is not available until after the row was added +(that means, it can not be used in generated columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT: @@ -856,19 +880,6 @@

    Using Multiple Indexes

    Fast Database Import

    -To speed up large imports, consider using the following options temporarily: -

    -
    • SET LOG 0 (disabling the transaction log) -
    • SET CACHE_SIZE (a large cache is faster) -
    • SET LOCK_MODE 0 (disable locking) -
    • SET UNDO_LOG 0 (disable the session undo log) -
    -

    -These options can be set in the database URL: -jdbc:h2:~/test;LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0. -Most of those options are not recommended for regular use, that means you need to reset them after use. -

    -

    If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ... is faster than CREATE TABLE(...); INSERT INTO ... SELECT .... diff --git a/h2/src/docsrc/html/quickstart.html b/h2/src/docsrc/html/quickstart.html index 74d6bf38d7..5bb4fc0a41 100644 --- a/h2/src/docsrc/html/quickstart.html +++ b/h2/src/docsrc/html/quickstart.html @@ -1,7 +1,7 @@ diff --git a/h2/src/docsrc/html/roadmap.html b/h2/src/docsrc/html/roadmap.html deleted file mode 100644 index 17ba4b1ac4..0000000000 --- a/h2/src/docsrc/html/roadmap.html +++ /dev/null @@ -1,591 +0,0 @@ - - - - - - - -Roadmap - - - - - -
    - - -

    Roadmap

    -

    -New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. -Of course, patches are always welcome, but are not always applied as is. -See also Providing Patches. -

    - -

    Version 1.5.x: Planned Changes

    -
    • Replace file password hash with file encryption key; validate encryption key when connecting. -
    • Remove "set binary collation" feature. -
    • Remove the encryption algorithm XTEA. -
    • Disallow referencing other tables in a table (via constraints for example). -
    • Remove PageStore features like compress_lob. -
    - -

    Version 1.4.x: Planned Changes

    -
    • Change license to MPL 2.0. -
    • Automatic migration from 1.3 databases to 1.4. -
    • Option to disable the file name suffix somehow (issue 447). -
    - -

    Priority 1

    -
    • Bugfixes. -
    • More tests with MULTI_THREADED=1 (and MULTI_THREADED with MVCC): - Online backup (using the 'backup' statement). -
    • Server side cursors. -
    - -

    Priority 2

    -
    • Support hints for the optimizer (which index to use, enforce the join order). -
    • Full outer joins. -
    • Access rights: remember the owner of an object. - Create, alter and drop privileges. - COMMENT: allow owner of object to change it. - Issue 208: Access rights for schemas. -
    • Test multi-threaded in-memory db access. -
    • MySQL, MS SQL Server compatibility: support case sensitive (mixed case) identifiers without quotes. -
    • Support GRANT SELECT, UPDATE ON [schemaName.] *. -
    • Migrate database tool (also from other database engines). For Oracle, maybe use - DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL. -
    • Clustering: support mixed clustering mode (one embedded, others in server mode). -
    • Clustering: reads should be randomly distributed (optional) or to a designated database on RAM (parameter: READ_FROM=3). -
    • Window functions: RANK() and DENSE_RANK(), partition using OVER(). - select *, count(*) over() as fullCount from ... limit 4; -
    • PostgreSQL catalog: use BEFORE SELECT triggers instead of views over metadata tables. -
    • Compatibility: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211. -
    • Test very large databases and LOBs (up to 256 GB). -
    • Store all temp files in the temp directory. -
    • Don't use temp files, specially not deleteOnExit (bug 4513817: File.deleteOnExit consumes memory). - Also to allow opening client / server (remote) connections when using LOBs. -
    • Make DDL (Data Definition) operations transactional. -
    • Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED). -
    • Groovy Stored Procedures: http://groovy.codehaus.org/GSQL -
    • Add a migration guide (list differences between databases). -
    • Optimization: automatic index creation suggestion using the trace file? -
    • Fulltext search Lucene: analyzer configuration, mergeFactor. -
    • Compression performance: don't allocate buffers, compress / expand in to out buffer. -
    • Rebuild index functionality to shrink index size and improve performance. -
    • Console: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA). -
    • Test performance again with SQL Server, Oracle, DB2. -
    • Test with Spatial DB in a box / JTS: http://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification. -
    • Write more tests and documentation for MVCC (Multi Version Concurrency Control). -
    • Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after. -
    • Implement, test, document XAConnection and so on. -
    • Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption). -
    • CHECK: find out what makes CHECK=TRUE slow, move to CHECK2. -
    • Drop with invalidate views (so that source code is not lost). Check what other databases do exactly. -
    • Index usage for (ID, NAME)=(1, 'Hi'); document. -
    • Set a connection read only (Connection.setReadOnly) or using a connection parameter. -
    • Access rights: finer grained access control (grant access for specific functions). -
    • ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]). -
    • Version check: docs / web console (using Javascript), and maybe in the library (using TCP/IP). -
    • Web server classloader: override findResource / getResourceFrom. -
    • Cost for embedded temporary view is calculated wrong, if result is constant. -
    • Count index range query (count(*) where id between 10 and 20). -
    • Performance: update in-place. -
    • Clustering: when a database is back alive, automatically synchronize with the master (requires readable transaction log). -
    • Database file name suffix: a way to use no or a different suffix (for example using a slash). -
    • Eclipse plugin. -
    • Asynchronous queries to support publish/subscribe: SELECT ... FOR READ WAIT [maxMillisToWait]. - See also MS SQL Server "Query Notification". -
    • Fulltext search (native): reader / tokenizer / filter. -
    • Linked schema using CSV files: one schema for a directory of files; support indexes for CSV files. -
    • iReport to support H2. -
    • Include SMTP (mail) client (alert on cluster failure, low disk space,...). -
    • Option for SCRIPT to only process one or a set of schemas or tables, and append to a file. -
    • JSON parser and functions. -
    • Copy database: tool with config GUI and batch mode, extensible (example: compare). -
    • Document, implement tool for long running transactions using user-defined compensation statements. -
    • Support SET TABLE DUAL READONLY. -
    • GCJ: what is the state now? -
    • Events for: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http://docs.openlinksw.com/virtuoso/fn_dbev_startup.html -
    • Optimization: simpler log compression. -
    • Support standard INFORMATION_SCHEMA tables, as defined in http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE: http://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif -
    • Compatibility: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby: division by zero. HSQLDB: 0.0e1 / 0.0e1 is NaN. -
    • Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R). -
    • Custom class loader to reload functions on demand. -
    • Test http://mysql-je.sourceforge.net/ -
    • H2 Console: the webclient could support more features like phpMyAdmin. -
    • Support Oracle functions: TO_DATE, TO_NUMBER. -
    • Work on the Java to C converter. -
    • The HELP information schema can be directly exposed in the Console. -
    • Maybe use the 0x1234 notation for binary fields, see MS SQL Server. -
    • Support Oracle CONNECT BY in some way: http://www.adp-gmbh.ch/ora/sql/connect_by.html http://philip.greenspun.com/sql/trees.html -
    • SQL Server 2005, Oracle: support COUNT(*) OVER(). See http://www.orafusion.com/art_anlytc.htm -
    • SQL 2003: http://www.wiscorp.com/sql_2003_standard.zip -
    • Version column (number/sequence and timestamp based). -
    • Optimize getGeneratedKey: send last identity after each execute (server). -
    • Test and document UPDATE TEST SET (ID, NAME) = (SELECT ID*10, NAME || '!' FROM TEST T WHERE T.ID=TEST.ID). -
    • Max memory rows / max undo log size: use block count / row size not row count. -
    • Implement point-in-time recovery. -
    • Support PL/SQL (programming language / control flow statements). -
    • LIKE: improved version for larger texts (currently using naive search). -
    • Throw an exception when the application calls getInt on a Long (optional). -
    • Default date format for input and output (local date constants). -
    • Document ROWNUM usage for reports: SELECT ROWNUM, * FROM (subquery). -
    • File system that writes to two file systems (replication, replicating file system). -
    • Standalone tool to get relevant system properties and add it to the trace output. -
    • Support 'call proc(1=value)' (PostgreSQL, Oracle). -
    • Console: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?). -
    • Console: autocomplete Ctrl+Space inserts template. -
    • Option to encrypt .trace.db file. -
    • Auto-Update feature for database, .jar file. -
    • ResultSet SimpleResultSet.readFromURL(String url): id varchar, state varchar, released timestamp. -
    • Partial indexing (see PostgreSQL). -
    • Add GUI to build a custom version (embedded, fulltext,...) using build flags. -
    • http://rubyforge.org/projects/hypersonic/ -
    • Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app). -
    • Table order: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility). -
    • Backup tool should work with other databases as well. -
    • Console: -ifExists doesn't work for the console. Add a flag to disable other dbs. -
    • Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess). -
    • Java static code analysis: http://pmd.sourceforge.net/ -
    • Java static code analysis: http://www.eclipse.org/tptp/ -
    • Compatibility for CREATE SCHEMA AUTHORIZATION. -
    • Implement Clob / Blob truncate and the remaining functionality. -
    • Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ... -
    • File locking: writing a system property to detect concurrent access from the same VM (different classloaders). -
    • Pure SQL triggers (example: update parent table if the child table is changed). -
    • Add H2 to Gem (Ruby install system). -
    • Support linked JCR tables. -
    • Native fulltext search: min word length; store word positions. -
    • Add an option to the SCRIPT command to generate only portable / standard SQL. -
    • Updatable views: create 'instead of' triggers automatically if possible (simple cases first). -
    • Improve create index performance. -
    • Compact databases without having to close the database (vacuum). -
    • Implement more JDBC 4.0 features. -
    • Support TRANSFORM / PIVOT as in MS Access. -
    • SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...). -
    • Support updatable views with join on primary keys (to extend a table). -
    • Public interface for functions (not public static). -
    • Support reading the transaction log. -
    • Feature matrix as in i-net software. -
    • Updatable result set on table without primary key or unique index. -
    • Compatibility with Derby and PostgreSQL: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221. -
    • Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString') -
    • Support data type INTERVAL -
    • Support nested transactions (possibly using savepoints internally). -
    • Add a benchmark for bigger databases, and one for many users. -
    • Compression in the result set over TCP/IP. -
    • Support curtimestamp (like curtime, curdate). -
    • Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options. -
    • Release locks (shared or exclusive) on demand -
    • Support OUTER UNION -
    • Support parameterized views (similar to CSVREAD, but using just SQL for the definition) -
    • A way (JDBC driver) to map an URL (jdbc:h2map:c1) to a connection object -
    • Support dynamic linked schema (automatically adding/updating/removing tables) -
    • Clustering: adding a node should be very fast and without interrupting clients (very short lock) -
    • Compatibility: # is the start of a single line comment (MySQL) but date quote (Access). Mode specific -
    • Run benchmarks with Android, Java 7, java -server -
    • Optimizations: faster hash function for strings. -
    • DatabaseEventListener: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality -
    • Benchmark: add a graph to show how databases scale (performance/database size) -
    • Implement a SQLData interface to map your data over to a custom object -
    • In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers = true) -
    • Support multiple directories (on different hard drives) for the same database -
    • Server protocol: use challenge response authentication, but client sends hash(user+password) encrypted with response -
    • Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server) -
    • Support native XML data type - see http://en.wikipedia.org/wiki/SQL/XML -
    • Support triggers with a string property or option: SpringTrigger, OSGITrigger -
    • MySQL compatibility: update test1 t1, test2 t2 set t1.id = t2.id where t1.id = t2.id; -
    • Ability to resize the cache array when resizing the cache -
    • Time based cache writing (one second after writing the log) -
    • Check state of H2 driver for DDLUtils: http://issues.apache.org/jira/browse/DDLUTILS-185 -
    • Index usage for REGEXP LIKE. -
    • Compatibility: add a role DBA (like ADMIN). -
    • Better support multiple processors for in-memory databases. -
    • Support N'text' -
    • Support compatibility for jdbc:hsqldb:res: -
    • HSQLDB compatibility: automatically convert to the next 'higher' data type. - Example: cast(2000000000 as int) + cast(2000000000 as int); - (HSQLDB: long; PostgreSQL: integer out of range) -
    • Provide an Java SQL builder with standard and H2 syntax -
    • Trace: write OS, file system, JVM,... when opening the database -
    • Support indexes for views (probably requires materialized views) -
    • Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters -
    • Server: use one listener (detect if the request comes from an PG or TCP client) -
    • Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200 -
    • Sequence: PostgreSQL compatibility (rename, create) http://www.postgresql.org/docs/8.2/static/sql-altersequence.html -
    • DISTINCT: support large result sets by sorting on all columns (additionally) and then removing duplicates. -
    • Support a special trigger on all tables to allow building a transaction log reader. -
    • File system with a background writer thread; test if this is faster -
    • Better document the source code (high level documentation). -
    • Support select * from dual a left join dual b on b.x=(select max(x) from dual) -
    • Optimization: don't lock when the database is read-only -
    • Issue 146: Support merge join. -
    • Integrate spatial functions from http://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download -
    • Cluster: hot deploy (adding a node at runtime). -
    • Support DatabaseMetaData.insertsAreDetected: updatable result sets should detect inserts. -
    • Oracle: support DECODE method (convert to CASE WHEN). -
    • Native search: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping -
    • Improve documentation of access rights. -
    • Support opening a database that is in the classpath, maybe using a new file system. Workaround: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation(). -
    • Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others). -
    • Remember the user defined data type (domain) of a column. -
    • MVCC: support multi-threaded kernel with multi-version concurrency. -
    • Auto-server: add option to define the port range or list. -
    • Support Jackcess (MS Access databases) -
    • Built-in methods to write large objects (BLOB and CLOB): FILE_WRITE('test.txt', 'Hello World') -
    • Improve time to open large databases (see mail 'init time for distributed setup') -
    • Move Maven 2 repository from hsql.sf.net to h2database.sf.net -
    • Java 1.5 tool: JdbcUtils.closeSilently(s1, s2,...) -
    • Optimize A=? OR B=? to UNION if the cost is lower. -
    • Javadoc: document design patterns used -
    • Support custom collators, for example for natural sort (for text that contains numbers). -
    • Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt) -
    • Convert SQL-injection-2.txt to html document, include SQLInjection.java sample -
    • Support OUT parameters in user-defined procedures. -
    • Web site design: http://www.igniterealtime.org/projects/openfire/index.jsp -
    • HSQLDB compatibility: Openfire server uses: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; - CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC -
    • Translation: use ${.} in help.csv -
    • Translated .pdf -
    • Recovery tool: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file -
    • Issue 357: support getGeneratedKeys to return multiple rows when used with batch updates. - This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys. - Also support it when using INSERT ... SELECT. -
    • RECOVER=2 to backup the database, run recovery, open the database -
    • Recovery should work with encrypted databases -
    • Corruption: new error code, add help -
    • Space reuse: after init, scan all storages and free those that don't belong to a live database object -
    • Access rights: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects) -
    • Support NOCACHE table option (Oracle). -
    • Support table partitioning. -
    • Add regular javadocs (using the default doclet, but another css) to the homepage. -
    • The database should be kept open for a longer time when using the server mode. -
    • Javadocs: for each tool, add a copy & paste sample in the class level. -
    • Javadocs: add @author tags. -
    • Fluent API for tools: Server.createTcpServer().setPort(9081).setPassword(password).start(); -
    • MySQL compatibility: real SQL statement for DESCRIBE TEST -
    • Use a default delay of 1 second before closing a database. -
    • Write (log) to system table before adding to internal data structures. -
    • Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup). -
    • Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case). -
    • MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem). -
    • Oracle compatibility: support NLS_DATE_FORMAT. -
    • Support for Thread.interrupt to cancel running statements. -
    • Cluster: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process). -
    • H2 Console: support CLOB/BLOB download using a link. -
    • Support flashback queries as in Oracle. -
    • Import / Export of fixed with text files. -
    • HSQLDB compatibility: automatic data type for SUM if value is the value is too big (by default use the same type as the data). -
    • Improve the optimizer to select the right index for special cases: where id between 2 and 4 and booleanColumn -
    • Linked tables: make hidden columns available (Oracle: rowid and ora_rowscn columns). -
    • H2 Console: in-place autocomplete. -
    • Support large databases: split database files to multiple directories / disks (similar to tablespaces). -
    • H2 Console: support configuration option for fixed width (monospace) font. -
    • Native fulltext search: support analyzers (specially for Chinese, Japanese). -
    • Automatically compact databases from time to time (as a background process). -
    • Test Eclipse DTP. -
    • H2 Console: autocomplete: keep the previous setting -
    • executeBatch: option to stop at the first failed statement. -
    • Implement OLAP features as described here: http://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5 -
    • Support Oracle ROWID (unique identifier for each row). -
    • MySQL compatibility: alter table add index i(c), add constraint c foreign key(c) references t(c); -
    • Server mode: improve performance for batch updates. -
    • Applets: support read-only databases in a zip file (accessed as a resource). -
    • Long running queries / errors / trace system table. -
    • H2 Console should support JaQu directly. -
    • Better document FTL_SEARCH, FTL_SEARCH_DATA. -
    • Sequences: CURRVAL should be session specific. Compatibility with PostgreSQL. -
    • Index creation using deterministic functions. -
    • ANALYZE: for unique indexes that allow null, count the number of null. -
    • MySQL compatibility: multi-table delete: DELETE .. FROM .. [,...] USING - See http://dev.mysql.com/doc/refman/5.0/en/delete.html -
    • AUTO_SERVER: support changing IP addresses (disable a network while the database is open). -
    • Avoid using java.util.Calendar internally because it's slow, complicated, and buggy. -
    • Support TRUNCATE .. CASCADE like PostgreSQL. -
    • Fulltext search: lazy result generation using SimpleRowSource. -
    • Fulltext search: support alternative syntax: WHERE FTL_CONTAINS(name, 'hello'). -
    • MySQL compatibility: support REPLACE, see http://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73. -
    • MySQL compatibility: support INSERT INTO table SET column1 = value1, column2 = value2 -
    • Docs: add a one line description for each functions and SQL statements at the top (in the link section). -
    • Javadoc search: weight for titles should be higher ('random' should list Functions as the best match). -
    • Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes. -
    • Issue 50: Oracle compatibility: support calling 0-parameters functions without parenthesis. Make constants obsolete. -
    • MySQL, HSQLDB compatibility: support where 'a'=1 (not supported by Derby, PostgreSQL) -
    • Support a data type "timestamp with timezone" using java.util.Calendar. -
    • Finer granularity for SLF4J trace - See http://code.google.com/p/h2database/issues/detail?id=62 -
    • Add database creation date and time to the database. -
    • Support ASSERTION. -
    • MySQL compatibility: support comparing 1='a' -
    • Support PostgreSQL lock modes: http://www.postgresql.org/docs/8.3/static/explicit-locking.html -
    • PostgreSQL compatibility: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver. -
    • RunScript should be able to read from system in (or quite mode for Shell). -
    • Natural join: support select x from dual natural join dual. -
    • Support using system properties in database URLs (may be a security problem). -
    • Natural join: somehow support this: select a.x, b.x, x from dual a natural join dual b -
    • Use the Java service provider mechanism to register file systems and function libraries. -
    • MySQL compatibility: for auto_increment columns, convert 0 to next value (as when inserting NULL). -
    • Optimization for multi-column IN: use an index if possible. Example: (A, B) IN((1, 2), (2, 3)). -
    • Optimization for EXISTS: convert to inner join or IN(..) if possible. -
    • Functions: support hashcode(value); cryptographic and fast -
    • Serialized file lock: support long running queries. -
    • Network: use 127.0.0.1 if other addresses don't work. -
    • Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication. -
    • Support reading JCR data: one table per node type; query table; cache option -
    • OSGi: create a sample application, test, document. -
    • help.csv: use complete examples for functions; run as test case. -
    • Functions to calculate the memory and disk space usage of a table, a row, or a value. -
    • Re-implement PooledConnection; use a lightweight connection object. -
    • Doclet: convert tests in javadocs to a java class. -
    • Doclet: format fields like methods, but support sorting by name and value. -
    • Doclet: shrink the html files. -
    • MySQL compatibility: support SET NAMES 'latin1' - See also http://code.google.com/p/h2database/issues/detail?id=56 -
    • Allow to scan index backwards starting with a value (to better support ORDER BY DESC). -
    • Java Service Wrapper: try http://yajsw.sourceforge.net/ -
    • Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE. -
    • MySQL compatibility: support ALTER TABLE .. MODIFY COLUMN. -
    • Use a lazy and auto-close input stream (open resource when reading, close on eof). -
    • Connection pool: 'reset session' command (delete temp tables, rollback, auto-commit true). -
    • Improve SQL documentation, see http://www.w3schools.com/sql/ -
    • MySQL compatibility: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL. -
    • MS SQL Server compatibility: support DATEPART syntax. -
    • Sybase/DB2/Oracle compatibility: support out parameters in stored procedures - See http://code.google.com/p/h2database/issues/detail?id=83 -
    • Support INTERVAL data type (see Oracle and others). -
    • Combine Server and Console tool (only keep Server). -
    • Store the Lucene index in the database itself. -
    • Support standard MERGE statement: http://en.wikipedia.org/wiki/Merge_%28SQL%29 -
    • Oracle compatibility: support DECODE(x, ...). -
    • MVCC: compare concurrent update behavior with PostgreSQL and Oracle. -
    • HSQLDB compatibility: CREATE FUNCTION (maybe using a Function interface). -
    • HSQLDB compatibility: support CALL "java.lang.Math.sqrt"(2.0) -
    • Support comma as the decimal separator in the CSV tool. -
    • Compatibility: Java functions with SQLJ Part1 http://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz -
    • Compatibility: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation. -
    • CACHE_SIZE: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache. -
    • Support date/time/timestamp as documented in http://en.wikipedia.org/wiki/ISO_8601 -
    • PostgreSQL compatibility: when in PG mode, treat BYTEA data like PG. -
    • Support =ANY(array) as in PostgreSQL. See also http://www.postgresql.org/docs/8.0/interactive/arrays.html -
    • IBM DB2 compatibility: support PREVIOUS VALUE FOR sequence. -
    • Compatibility: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer). -
    • Oracle compatibility: support CREATE SYNONYM table FOR schema.table. -
    • FTP: document the server, including -ftpTask option to execute / kill remote processes -
    • FTP: problems with multithreading? -
    • FTP: implement SFTP / FTPS -
    • FTP: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file). -
    • More secure default configuration if remote access is enabled. -
    • Improve database file locking (maybe use native file locking). The current approach seems to be problematic - if the file system is on a remote share (see Google Group 'Lock file modification time is in the future'). -
    • Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE. -
    • Issue 107: Prefer using the ORDER BY index if LIMIT is used. -
    • An index on (id, name) should be used for a query: select * from t where s=? order by i -
    • Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). - See PostgreSQL. -
    • Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true). -
    • Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2). -
    • Fast alter table add column. -
    • Improve concurrency for in-memory database operations. -
    • Issue 122: Support for connection aliases for remote tcp connections. -
    • Fast scrambling (strong encryption doesn't help if the password is included in the application). -
    • H2 Console: support -webPassword to require a password to access preferences or shutdown. -
    • Issue 126: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number. -
    • Issue 127: Support activation/deactivation of triggers -
    • Issue 130: Custom log event listeners -
    • Issue 131: IBM DB2 compatibility: sysibm.sysdummy1 -
    • Issue 132: Use Java enum trigger type. -
    • Issue 134: IBM DB2 compatibility: session global variables. -
    • Cluster: support load balance with values for each server / auto detect. -
    • FTL_SET_OPTION(keyString, valueString) with key stopWords at first. -
    • Pluggable access control mechanism. -
    • Fulltext search (Lucene): support streaming CLOB data. -
    • Document/example how to create and read an encrypted script file. -
    • Check state of http://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins). -
    • Fulltext search (Lucene): only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible. -
    • Support a way to create or read compressed encrypted script files using an API. -
    • Scripting language support (Javascript). -
    • The network client should better detect if the server is not an H2 server and fail early. -
    • H2 Console: support CLOB/BLOB upload. -
    • Database file lock: detect hibernate / standby / very slow threads (compare system time). -
    • Automatic detection of redundant indexes. -
    • Maybe reject join without "on" (except natural join). -
    • Implement GiST (Generalized Search Tree for Secondary Storage). -
    • Function to read a number of bytes/characters from an BLOB or CLOB. -
    • Issue 156: Support SELECT ? UNION SELECT ?. -
    • Automatic mixed mode: support a port range list (to avoid firewall problems). -
    • Support the pseudo column rowid, oid, _rowid_. -
    • H2 Console / large result sets: stream early instead of keeping a whole result in-memory -
    • Support TRUNCATE for linked tables. -
    • UNION: evaluate INTERSECT before UNION (like most other database except Oracle). -
    • Delay creating the information schema, and share metadata columns. -
    • TCP Server: use a nonce (number used once) to protect unencrypted channels against replay attacks. -
    • Simplify running scripts and recovery: CREATE FORCE USER (overwrites an existing user). -
    • Support CREATE DATABASE LINK (a custom JDBC driver is already supported). -
    • Support large GROUP BY operations. Issue 216. -
    • Issue 163: Allow to create foreign keys on metadata types. -
    • Logback: write a native DBAppender. -
    • Cache size: don't use more cache than what is available. -
    • Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread. -
    • Tree index: Instead of an AVL tree, use a general balanced trees or a scapegoat tree. -
    • User defined functions: allow to store the bytecode (of just the class, or the jar file of the extension) in the database. -
    • Compatibility: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL. -
    • Optimizer: WHERE X=? AND Y IN(?), it always uses the index on Y. Should be cost based. -
    • Common Table Expression (CTE) / recursive queries: support parameters. Issue 314. -
    • Oracle compatibility: support INSERT ALL. -
    • Issue 178: Optimizer: index usage when both ascending and descending indexes are available. -
    • Issue 179: Related subqueries in HAVING clause. -
    • IBM DB2 compatibility: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero. -
    • Creating primary key: always create a constraint. -
    • Maybe use a different page layout: keep the data at the head of the page, and ignore the tail - (don't store / read it). This may increase write / read performance depending on the file system. -
    • Indexes of temporary tables are currently kept in-memory. Is this how it should be? -
    • The Shell tool should support the same built-in commands as the H2 Console. -
    • Maybe use PhantomReference instead of finalize. -
    • Database file name suffix: should only have one dot by default. Example: .h2db -
    • Issue 196: Function based indexes -
    • ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName. -
    • Fix the disk space leak (killing the process at the exact right moment will increase - the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java -
    • ROWNUM: Oracle compatibility when used within a subquery. Issue 198. -
    • Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way. -
    • ODBC: encrypted databases are not supported because the ;CIPHER= can not be set. -
    • Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1); -
    • Optimizer: index usage when both ascending and descending indexes are available. Issue 178. -
    • Issue 306: Support schema specific domains. -
    • Triggers: support user defined execution order. Oracle: - CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT - ON TEST FOR EACH ROW FOLLOWS TEST_1. - SQL specifies that multiple triggers should be fired in time-of-creation order. - PostgreSQL uses name order, which was judged to be more convenient. - Derby: triggers are fired in the order in which they were created. -
    • PostgreSQL compatibility: combine "users" and "roles". See: - http://www.postgresql.org/docs/8.1/interactive/user-manag.html -
    • Improve documentation of system properties: only list the property names, default values, and description. -
    • Support running totals / cumulative sum using SUM(..) OVER(..). -
    • Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize) -
    • Triggers: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others). -
    • Common Table Expression (CTE) / recursive queries: support INSERT INTO ... SELECT ... Issue 219. -
    • Common Table Expression (CTE) / recursive queries: support non-recursive queries. Issue 217. -
    • Common Table Expression (CTE) / recursive queries: avoid endless loop. Issue 218. -
    • Common Table Expression (CTE) / recursive queries: support multiple named queries. Issue 220. -
    • Common Table Expression (CTE) / recursive queries: identifier scope may be incorrect. Issue 222. -
    • Log long running transactions (similar to long running statements). -
    • Parameter data type is data type of other operand. Issue 205. -
    • Some combinations of nested join with right outer join are not supported. -
    • DatabaseEventListener.openConnection(id) and closeConnection(id). -
    • Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, - or to prevent to login with the same username and password from different IPs. - Possibly using the DatabaseEventListener API, or a new API. -
    • Compatibility for data type CHAR (Derby, HSQLDB). Issue 212. -
    • Compatibility with MySQL TIMESTAMPDIFF. Issue 209. -
    • Optimizer: use a histogram of the data, specially for non-normal distributions. -
    • Trigger: allow declaring as source code (like functions). -
    • User defined aggregate: allow declaring as source code (like functions). -
    • The error "table not found" is sometimes caused by using the wrong database. - Add "(this database is empty)" to the exception message if applicable. -
    • MySQL + PostgreSQL compatibility: support string literal escape with \n. -
    • PostgreSQL compatibility: support string literal escape with double \\. -
    • Document the TCP server "management_db". Maybe include the IP address of the client. -
    • Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main -
    • If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message. -
    • Optimization to use an index for OR when using multiple keys: where (key1 = ? and key2 = ?) OR (key1 = ? and key2 = ?) -
    • Issue 302: Support optimizing queries with both inner and outer joins, as in: - select * from test a inner join test b on a.id=b.id inner join o on o.id=a.id where b.x=1 - (the optimizer should swap a and b here). - See also TestNestedJoins, tag "swapInnerJoinTables". -
    • JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool). -
    • Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...; -
    • nioMapped file system: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example). -
    • Column as parameter of function table. Issue 228. -
    • Connection pool: detect ;AUTOCOMMIT=FALSE in the database URL, and if set, - disable autocommit for all connections. -
    • Compatibility with MS Access: support "&" to concatenate text. -
    • The BACKUP statement should not synchronize on the database, and therefore should not block other users. -
    • Document the database file format. -
    • Support reading LOBs. -
    • Require appending DANGEROUS=TRUE when using certain dangerous settings such as - LOG=0, LOG=1, LOCK_MODE=0, disabling FILE_LOCK,... -
    • Support UDT (user defined types) similar to how Apache Derby supports it: - check constraint, allow to use it in Java functions as parameters (return values already seem to work). -
    • Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, - optional compatibility with current encrypted database files). -
    • Issue 229: SELECT with simple OR tests uses tableScan when it could use indexes. -
    • GROUP BY queries should use a temporary table if there are too many rows. -
    • BLOB: support random access when reading. -
    • CLOB: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form). -
    • Compatibility: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...). -
    • Compatibility with MySQL: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...). -
    • Compatibility with MySQL: support non-strict mode (sql_mode = "") any data - that is too large for the column will just be truncated or set to the default value. -
    • The full condition should be sent to the linked table, not just the indexed condition. - Example: TestLinkedTableFullCondition -
    • Compatibility with IBM DB2: CREATE PROCEDURE. -
    • Compatibility with IBM DB2: SQL cursors. -
    • Single-column primary key values are always stored explicitly. This is not required. -
    • Compatibility with MySQL: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8). -
    • CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true. -
    • Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set - (maybe only for a part of the values - the ones that can be evaluated). -
    • Compatibility for ARRAY data type (Oracle: VARRAY(n) of VARCHAR(m); HSQLDB: VARCHAR(n) ARRAY; Postgres: VARCHAR(n)[]). -
    • PostgreSQL compatible array literal syntax: ARRAY[['a', 'b'], ['c', 'd']] -
    • PostgreSQL compatibility: UPDATE with FROM. -
    • Issue 297: Oracle compatibility for "at time zone". -
    • IBM DB2 compatibility: IDENTITY_VAL_LOCAL(). -
    • Support SQL/XML. -
    • Support concurrent opening of databases. -
    • Improved error message and diagnostics in case of network configuration problems. -
    • TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases). -
    • Adding a primary key should make the columns 'not null' unless if there is a row with null - (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby). -
    • ARRAY data type: support Integer[] and so on in Java functions (currently only Object[] is supported). -
    • MySQL compatibility: LOCK TABLES a READ, b READ - see also http://dev.mysql.com/doc/refman/5.0/en/lock-tables.html -
    • The HTML to PDF converter should use http://code.google.com/p/wkhtmltopdf/ -
    • Issue 303: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)". -
    • MySQL compatibility: update test1 t1, test2 t2 set t1.name=t2.name where t1.id=t2.id. -
    • Issue 283: Improve performance of H2 on Android. -
    • Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s). -
    • Column compression option - see http://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d -
    • PostgreSQL compatibility: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID). -
    • MS SQL Server compatibility: support @@ROWCOUNT. -
    • PostgreSQL compatibility: LOG(x) is LOG10(x) and not LN(x). -
    • Issue 311: Serialized lock mode: executeQuery of write operations fails. -
    • PostgreSQL compatibility: support PgAdmin III (specially the function current_setting). -
    • MySQL compatibility: support TIMESTAMPADD. -
    • Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). -
    • Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). -
    • Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase). -
    • TRANSACTION_ID() for in-memory databases. -
    • TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL). -
    • Support [INNER | OUTER] JOIN USING(column [,...]). -
    • Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle) -
    • GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby). -
    • Sybase / MS SQL Server compatibility: CONVERT(..) parameters are swapped. -
    • Index conditions: WHERE AGE>1 should not scan through all rows with AGE=1. -
    • PHP support: H2 should support PDO, or test with PostgreSQL PDO. -
    • Outer joins: if no column of the outer join table is referenced, the outer join table could be removed from the query. -
    • Cluster: allow using auto-increment and identity columns by ensuring executed in lock-step. -
    • MySQL compatibility: index names only need to be unique for the given table. -
    • Issue 352: constraints: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, - and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases. -
    • Oracle compatibility: support MEDIAN aggregate function. -
    • Issue 348: Oracle compatibility: division should return a decimal result. -
    • Read rows on demand: instead of reading the whole row, only read up to that column that is requested. - Keep an pointer to the data area and the column id that is already read. -
    • Long running transactions: log session id when detected. -
    • Optimization: "select id from test" should use the index on id even without "order by". -
    • Issue 362: LIMIT support for UPDATE statements (MySQL compatibility). -
    • Sybase SQL Anywhere compatibility: SELECT TOP ... START AT ... -
    • Use Java 6 SQLException subclasses. -
    • Issue 390: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR -
    • Use Java 6 exceptions: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,.. -
    - -

    Not Planned

    -
      -
    • HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility. -
    • String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively. -
    • In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements. -
    - -
    - diff --git a/h2/src/docsrc/html/search.js b/h2/src/docsrc/html/search.js index 33644612c9..6d32a658d3 100644 --- a/h2/src/docsrc/html/search.js +++ b/h2/src/docsrc/html/search.js @@ -1,7 +1,7 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ var pages = new Array(); diff --git a/h2/src/docsrc/html/security.html b/h2/src/docsrc/html/security.html new file mode 100644 index 0000000000..fe8d29f841 --- /dev/null +++ b/h2/src/docsrc/html/security.html @@ -0,0 +1,73 @@ + + + + + + +Features + + + + + +
    + + +

    Securing your H2

    + + + Introduction
    + + Network exposed
    + + Alias / Stored Procedures
    + + Grants / Roles / Permissions
    + + Encrypted storage
    + +

    Introduction

    +

    +H2 is __not__ designed to be run in an adversarial environment. You should absolutely not expose your H2 server to untrusted connections. +

    +

    +Running H2 in embedded mode is the best choice - it is not externally exposed. +

    + +

    Network exposed

    +

    +When running an H2 server in TCP mode, first prize is to run with it only listening to connections on localhost (i.e 127.0.0.1). +

    +

    +Second prize is running listening to restricted ports on a secured network. +

    +

    +If you expose H2 to the broader Internet, you can secure the connection with SSL, but this is a rather tricky thing to get right, between JVM bugs, certificates and choosing a decent cipher. +

    + +

    Alias / Stored procedures

    +

    +Anything created with CREATE ALIAS can do anything the JVM can do, which includes reading/writing from the filesystem on the machine the JVM is running on. +

    + +

    Grants / Roles / Permissions

    +

    +GRANT / REVOKE TODO +

    + +

    Encrypted storage

    +

    +Encrypting your on-disk database will provide a small measure of security to your stored data. +You should not assume that this is any kind of real security against a determined opponent however, +since there are many repeated data structures that will allow someone with resources and time to extract the secret key. +

    +

    +Also the secret key is visible to anything that can read the memory of the process. +

    + +
    + diff --git a/h2/src/docsrc/html/source.html b/h2/src/docsrc/html/source.html index d6b31efe5d..5b8f130680 100644 --- a/h2/src/docsrc/html/source.html +++ b/h2/src/docsrc/html/source.html @@ -1,33 +1,36 @@ - -Source Code Viewer + -

    -
    -    
    -        
    -        
    -    
    -
    - - - -
    - - - -
    + + diff --git a/h2/src/docsrc/html/sourceError.html b/h2/src/docsrc/html/sourceError.html index d181391f1b..84538c4bce 100644 --- a/h2/src/docsrc/html/sourceError.html +++ b/h2/src/docsrc/html/sourceError.html @@ -1,7 +1,7 @@ @@ -40,6 +40,9 @@ function getVersion(build) { if (build == 64) { return '1.0/version-1.0.' + build; + } else if (build > 200) { + var b = build + 1; + return Math.floor(b / 100) + '.' + Math.floor(b % 100 / 10) + '.' + build; } else if (build >= 177) { return '1.4.' + build; } else if (build >= 146 && build != 147) { @@ -67,7 +70,7 @@ code = code.replace('HY', '50'); code = code.replace('C', '1'); code = code.replace('T', '2'); - get('more').src = 'http://h2database.com/javadoc/org/h2/constant/ErrorCode.html#c' + code; + get('more').src = 'https://h2database.com/javadoc/org/h2/constant/ErrorCode.html#c' + code; } function go(file, line) { @@ -82,13 +85,12 @@ get('file').innerHTML = file; get('code').src = url; } else { + url = 'https://github.com/h2database/h2database/tree/' if (build && build > 0) { - var tag = 'tags/version-' + getVersion(build) + '/h2'; + url += 'version-' + getVersion(parseInt(build)) + '/h2'; } else { - var tag = 'trunk/h2'; + var tag = 'master/h2'; } - url = 'http://code.google.com/p/h2database/source/browse/'; - url += tag; url += '/src/main/'; url += file; url += '#'; @@ -115,7 +117,7 @@ hasData = true; idx = errorCode.indexOf("-"); build = parseInt(errorCode.substring(idx + 1)); - get('version').innerHTML = getVersion(build); + get('version').innerHTML = getVersion(parseInt(build)); errorCode = errorCode.substring(0, idx); while (errorCode.length > 1 && errorCode.charAt(0) == '0') { errorCode = errorCode.substring(1); diff --git a/h2/src/docsrc/html/stylesheet.css b/h2/src/docsrc/html/stylesheet.css index faec065a6b..a30f4d5adc 100644 --- a/h2/src/docsrc/html/stylesheet.css +++ b/h2/src/docsrc/html/stylesheet.css @@ -1,7 +1,7 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ td, input, select, textarea, body, code, pre, td, th { @@ -283,6 +283,23 @@ td.index { vertical-align: top; } +div.ruleCompat code { + border-color: coral; + background-color: mistyrose; +} + +div.ruleH2 code { + border-color: lightseagreen; +} + +span.ruleCompat { + color: darkred; +} + +span.ruleH2 { + color: green; +} + .c { padding: 1px 3px; margin: 0px 0px; diff --git a/h2/src/docsrc/html/stylesheetPdf.css b/h2/src/docsrc/html/stylesheetPdf.css index 0977e2c5e3..dacc282997 100644 --- a/h2/src/docsrc/html/stylesheetPdf.css +++ b/h2/src/docsrc/html/stylesheetPdf.css @@ -1,21 +1,21 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ td, input, select, textarea, body, code, pre, td, th { - font: 9pt Tahoma, Arial, Helvetica, sans-serif; + font: 14pt Tahoma, Arial, Helvetica, sans-serif; font-weight: normal; } h1, h2, h3, h4, h5 { - font: 9pt Arial, Helvetica, sans-serif; + font: 14pt Arial, Helvetica, sans-serif; font-weight: bold; } td, input, select, textarea, body, code, pre { - font-size: 9pt; + font-size: 14pt; } pre { @@ -32,26 +32,27 @@ body { margin: 0px; } -h1 { +h1, p.title { background-color: #0000bb; padding: 2px 4px 2px 4px; color: #fff; - font-size: 15pt; + font-size: 24pt; + font-weight: bold; line-height: normal; } h2 { - font-size: 13pt; + font-size: 18pt; margin-top: 1.5em; } h3 { - font-size: 11pt; + font-size: 16pt; margin-top: 1.5em; } h4 { - font-size: 9pt; + font-size: 14pt; margin-top: 1.5em; } @@ -69,17 +70,16 @@ table { } th { - font-size: 9pt; - font-weight: normal; + font-size: 14pt; + font-weight: bold; text-align: left; - background-color: #ece9d8; border: 1px solid #aca899; padding: 2px; } td { background-color: #ffffff; - font-size: 9pt; + font-size: 14pt; text-align: left; vertical-align: top; border: 1px solid #aca899; @@ -152,3 +152,11 @@ td.index { border-collapse: collapse; vertical-align: top; } + +span.ruleCompat { + color: darkred; +} + +span.ruleH2 { + color: green; +} diff --git a/h2/src/docsrc/html/systemtables.html b/h2/src/docsrc/html/systemtables.html new file mode 100644 index 0000000000..fa19549629 --- /dev/null +++ b/h2/src/docsrc/html/systemtables.html @@ -0,0 +1,104 @@ + + + + + + +System Tables + + + + + +
    + + +

    System Tables

    + +

    Index

    + +

    +Information Schema +

    + + + + + + + +
    + + ${item.table}
    +
    +
    + + ${item.table}
    +
    +
    + + ${item.table}
    +
    +
    + + +

    +Range Table
    +

    + +

    Information Schema

    +

    +The system tables and views in the schema INFORMATION_SCHEMA contain the meta data +of all tables, views, domains, and other objects in the database as well as the current settings. +This documentation describes the default new version of INFORMATION_SCHEMA for H2 2.0. +Old TCP clients (1.4.200 and below) see the legacy version of INFORMATION_SCHEMA, +because they can't work with the new one. The legacy version is not documented. +

    + + +

    ${item.table}

    +

    ${item.description}

    + + +${item.columns} + +
    +
    + +

    Range Table

    +

    +The range table is a dynamic system table that contains all values from a start to an end value. +Non-zero step value may be also specified, default is 1. +Start value, end value, and optional step value are converted to BIGINT data type. +The table contains one column called X. +If start value is greater than end value and step is positive the result is empty. +If start value is less than end value and step is negative the result is empty too. +If start value is equal to end value the result contains only start value. +Start value, start value plus step, start value plus step multiplied by two and so on are included in result. +If step is positive the last value is less than or equal to the specified end value. +If step in negative the last value is greater than or equal to the specified end value. +The table is used as follows: +

    +

    Examples:

    +
    +SELECT X FROM SYSTEM_RANGE(1, 10);
    +-- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
    +SELECT X FROM SYSTEM_RANGE(1, 10, 2);
    +-- 1, 3, 5, 7, 9
    +SELECT X FROM SYSTEM_RANGE(1, 10, -1);
    +-- No rows
    +SELECT X FROM SYSTEM_RANGE(10, 2, -2);
    +-- 10, 8, 6, 4, 2
    +
    + +
    diff --git a/h2/src/docsrc/html/tutorial.html b/h2/src/docsrc/html/tutorial.html index 6f71a25b80..3dadf0f822 100644 --- a/h2/src/docsrc/html/tutorial.html +++ b/h2/src/docsrc/html/tutorial.html @@ -1,7 +1,7 @@ @@ -18,7 +18,7 @@
    +

    +If the console startup procedure is unable to locate the default system web browser, +an error message may be displayed. It is possible to explicitly tell H2 which +program/script to use when opening a system web browser by setting either the BROWSER +environment variable, or the h2.browser java property. +

    Firewall

    @@ -295,7 +299,7 @@

    Special H2 Console Syntax

    @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, - @procedure_columns, @schemas, @super_tables, @super_types, + @procedure_columns, @pseudo_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns
    @@ -311,10 +315,13 @@

    Special H2 Console Syntax

    - @generated insert into test() values(); + @generated insert into test() values();
    + @generated(1) insert into test() values();
    + @generated(ID, "TIMESTAMP") insert into test() values(); Show the result of Statement.getGeneratedKeys(). + Names or one-based indexes of required columns can be optionally specified. @@ -436,6 +443,7 @@

    Settings of the H2 Console

    • webAllowOthers: allow other computers to connect.
    • webPort: the port of the H2 Console
    • webSSL: use encrypted TLS (HTTPS) connections. +
    • webAdminPassword: password to access preferences and tools of H2 Console.

    In addition to those settings, the properties of the last recently used connection @@ -456,7 +464,6 @@

    Connecting to a Database using JDBC

    public class Test { public static void main(String[] a) throws Exception { - Class.forName("org.h2.Driver"); Connection conn = DriverManager. getConnection("jdbc:h2:~/test", "sa", ""); // add application code here @@ -465,8 +472,7 @@

    Connecting to a Database using JDBC

    }

    -This code first loads the driver (Class.forName(...)) -and then opens a connection (using DriverManager.getConnection()). +This code opens a connection (using DriverManager.getConnection()). The driver name is "org.h2.Driver". The database URL always needs to start with jdbc:h2: to be recognized by this database. The second parameter in the getConnection() call @@ -476,14 +482,61 @@

    Connecting to a Database using JDBC

    Creating New Databases

    -By default, if the database specified in the URL does not yet exist, a new (empty) -database is created automatically. The user that created the database automatically becomes -the administrator of this database. +By default, if the database specified in the embedded URL does not yet exist, +a new (empty) database is created automatically. +The user that created the database automatically becomes the administrator of this database.

    -Auto-creating new database can be disabled, see +Auto-creation of databases can be disabled, see Opening a Database Only if it Already Exists.

    +

    +H2 Console does not allow creation of databases unless a browser window is opened by Console during its +startup or from its icon in the system tray and remote access is not enabled. +A context menu of the tray icon can also be used to create a new database. +

    +

    +You can also create a new local database from a command line with a Shell tool: +

    +
    +> java -cp h2-*.jar org.h2.tools.Shell
    +
    +Welcome to H2 Shell
    +Exit with Ctrl+C
    +[Enter]   jdbc:h2:mem:2
    +URL       jdbc:h2:./path/to/database
    +[Enter]   org.h2.Driver
    +Driver
    +[Enter]   sa
    +User      your_username
    +Password  (hidden)
    +Type the same password again to confirm database creation.
    +Password  (hidden)
    +Connected
    +
    +sql> quit
    +Connection closed
    +
    +

    +By default remote creation of databases from a TCP connection or a web interface is not allowed. +It's not recommended to enable remote creation of databases due to security reasons. +User who creates a new database becomes its administrator and therefore gets the same access to your JVM as H2 has +and the same access to your operating system as Java and your system account allows. +It's recommended to create all databases locally using an embedded URL, local H2 Console, or the Shell tool. +

    +

    +If you really need to allow remote database creation, you can pass -ifNotExists parameter to +TCP, PG, or Web servers (but not to the Console tool). +Its combination with -tcpAllowOthers, -pgAllowOthers, or -webAllowOthers +effectively creates a remote security hole in your system, if you use it, always guard your ports with a firewall +or some other solution and use such combination of settings only in trusted networks. +

    +

    +H2 Servlet also supports such option. +When you use it always protect the servlet with security constraints, +see Using the H2 Console Servlet for example; +don't forget to uncomment and adjust security configuration for your needs. +

    Using the Server

    @@ -545,13 +598,13 @@

    Stopping a TCP Server from Another Process

    To stop the server from the command line, run:

    -java org.h2.tools.Server -tcpShutdown tcp://localhost:9092
    +java org.h2.tools.Server -tcpShutdown tcp://localhost:9092 -tcpPassword password
     

    To stop the server from a user application, use the following code:

    -org.h2.tools.Server.shutdownTcpServer("tcp://localhost:9094");
    +org.h2.tools.Server.shutdownTcpServer("tcp://localhost:9092", "password", false, false);
     

    This function will only stop the TCP server. @@ -559,18 +612,14 @@

    Stopping a TCP Server from Another Process

    To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. -Shutting down a TCP server can be protected using the option -tcpPassword +Shutting down a TCP server is protected using the option -tcpPassword (the same password must be used to start and stop the TCP server).

    Using Hibernate

    This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect, -or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy. -A patch -for Hibernate has been submitted and is now applied. -You can rename it to H2Dialect.java and include this as a patch in your application, -or upgrade to a version of Hibernate where this is fixed. +or the native H2 Dialect.

    When using Hibernate, try to use the H2Dialect if possible. @@ -614,7 +663,7 @@

    To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. -See also H2Platform. +See also H2Platform.

    Using Apache ActiveMQ

    @@ -632,13 +681,9 @@

    Using Apache ActiveMQ

    Using H2 within NetBeans

    -The project H2 Database Engine Support For NetBeans -allows you to start and stop the H2 server from within the IDE. -

    -

    There is a known issue when using the Netbeans SQL Execution Window: before executing a query, another query in the form SELECT COUNT(*) FROM <query> is run. -This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL. +This is a problem for queries that modify state, such as SELECT NEXT VALUE FOR SEQ. In this case, two sequence values are allocated instead of just one.

    @@ -656,7 +701,7 @@

    Using H2 with jOOQ

    then run the jOOQ code generator on the command line using this command:

    -java -cp jooq.jar;jooq-meta.jar;jooq-codegen.jar;h2-1.3.158.jar;.
    +java -cp jooq.jar;jooq-meta.jar;jooq-codegen.jar;h2-1.4.199.jar;.
     org.jooq.util.GenerationTool /codegen.xml
     

    @@ -664,7 +709,7 @@

    Using H2 with jOOQ

     <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
    -<configuration xmlns="http://www.jooq.org/xsd/jooq-codegen-2.3.0.xsd">
    +<configuration xmlns="http://www.jooq.org/xsd/jooq-codegen-3.11.0.xsd">
         <jdbc>
             <driver>org.h2.Driver</driver>
             <url>jdbc:h2:~/test</url>
    @@ -672,14 +717,11 @@ 

    Using H2 with jOOQ

    <password></password> </jdbc> <generator> - <name>org.jooq.util.DefaultGenerator</name> <database> - <name>org.jooq.util.h2.H2Database</name> <includes>.*</includes> <excludes></excludes> <inputSchema>PUBLIC</inputSchema> </database> - <generate></generate> <target> <packageName>org.jooq.h2.generated</packageName> <directory>./src</directory> @@ -691,16 +733,16 @@

    Using H2 with jOOQ

    Using the generated source, you can query the database as follows:

    -Factory create = new H2Factory(connection);
    +DSLContext dsl = DSL.using(connection);
     Result<UserRecord> result =
    -create.selectFrom(USER)
    +dsl.selectFrom(USER)
         .where(NAME.like("Johnny%"))
         .orderBy(ID)
         .fetch();
     

    -See more details on jOOQ Homepage -and in the jOOQ Tutorial +See more details on jOOQ Homepage +and in the jOOQ Tutorial

    Using Databases in Web Applications

    @@ -747,6 +789,15 @@

    Using a Servlet Listener to Start and Stop a Database

    </listener>

    +If your servlet container is already Servlet 5-compatible, use the following +snippet instead: +

    +
    +<listener>
    +    <listener-class>org.h2.server.web.JakartaDbStarter</listener-class>
    +</listener>
    +
    +

    For details on how to access the database, see the file DbStarter.java. By default this tool opens an embedded connection using the database URL jdbc:h2:~/test, @@ -786,10 +837,10 @@

    Using a Servlet Listener to Start and Stop a Database

    If the TCP server is started within the DbStarter, it will also be stopped automatically.

    -

    Using the H2 Console Servlet

    +

    Using the H2 Console Servlet

    The H2 Console is a standalone application and includes its own web server, but it can be -used as a servlet as well. To do that, include the the h2*.jar file in your application, and +used as a servlet as well. To do that, include the h2*.jar file in your application, and add the following configuration to your web.xml:

    @@ -812,68 +863,34 @@ 

    Using the H2 Console Servlet

    <servlet-name>H2Console</servlet-name> <url-pattern>/console/*</url-pattern> </servlet-mapping> +<!-- +<security-role> + <role-name>admin</role-name> +</security-role> +<security-constraint> + <web-resource-collection> + <web-resource-name>H2 Console</web-resource-name> + <url-pattern>/console/*</url-pattern> + </web-resource-collection> + <auth-constraint> + <role-name>admin</role-name> + </auth-constraint> +</security-constraint> +-->

    For details, see also src/tools/WEB-INF/web.xml.

    -To create a web application with just the H2 Console, run the following command: -

    -
    -build warConsole
    -
    - -

    Android

    -

    -You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. -So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, -except for opening and closing a database, which is not yet optimized in H2 -(H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). -Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. -So far, only very few tests have been run, and everything seems to work as expected. -Fulltext search was not yet tested, however the native fulltext search should work. -

    -

    -Reasons to use H2 instead of SQLite are: -

    -
    • Full Unicode support including UPPER() and LOWER(). -
    • Streaming API for BLOB and CLOB data. -
    • Fulltext search. -
    • Multiple connections. -
    • User defined functions and triggers. -
    • Database file encryption. -
    • Reading and writing CSV files (this feature can be used outside the database as well). -
    • Referential integrity and check constraints. -
    • Better data type and SQL support. -
    • In-memory databases, read-only databases, linked tables. -
    • Better compatibility with other databases which simplifies porting applications. -
    • Possibly better performance (so far for read operations). -
    • Server mode (accessing a database on a different machine over TCP/IP). -
    -

    -Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). -Both the regular H2 jar file and the smaller h2small-*.jar can be used. -To create the smaller jar file, run the command ./build.sh jarSmall (Linux / Mac OS) -or build.bat jarSmall (Windows). +If your application is already Servlet 5-compatible, use the servlet class +org.h2.server.web.JakartaWebServlet instead.

    -The database files needs to be stored in a place that is accessible for the application. -Example: +To create a web application with just the H2 Console, run the following command:

    -String url = "jdbc:h2:/data/data/" +
    -    "com.example.hello" +
    -    "/data/hello" +
    -    ";FILE_LOCK=FS" +
    -    ";PAGE_SIZE=1024" +
    -    ";CACHE_SIZE=8192";
    -Class.forName("org.h2.Driver");
    -conn = DriverManager.getConnection(url);
    -...
    +build warConsole
     
    -

    -Limitations: Using a connection pool is currently not supported, because the required javax.sql. classes are not available on Android. -

    CSV (Comma Separated Values) Support

    @@ -1008,6 +1025,15 @@

    Restore from a Script

    need to be available on the server side.

    +

    +If the script was generated by H2 1.4.200 or an older version, add VARIABLE_BINARY option to import it +into more recent version. +

    + +
    +java org.h2.tools.RunScript -url jdbc:h2:~/test -user sa -script test.zip -options compression zip variable_binary
    +
    +

    Online Backup

    The BACKUP SQL statement and the Backup tool both create a zip file @@ -1135,7 +1161,7 @@

    Using OpenOffice Base

    This can be done by create it using the NetBeans OpenOffice plugin. -See also Extensions Development. +See also Extensions Development.

    Java Web Start / JNLP

    @@ -1157,9 +1183,9 @@

    Using a Connection Pool

    For H2, opening a connection is fast if the database is already open. Still, using a connection pool improves performance if you open and close connections a lot. A simple connection pool is included in H2. It is based on the -Mini Connection Pool Manager +Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, -for example the Apache Commons DBCP. +for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection().The build-in connection pool is used as follows:

    @@ -1242,11 +1268,10 @@

    Using the Native Fulltext Search

    org.h2.fulltext.FullText.searchData(conn, text, limit, offset);
    -

    Using the Lucene Fulltext Search

    +

    Using the Apache Lucene Fulltext Search

    -To use the Lucene full text search, you need the Lucene library in the classpath. -Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, -and Lucene version 3.x is used by default for H2 version 1.3.x. +To use the Apache Lucene full text search, you need the Lucene library in the classpath. +Apache Lucene 8.5.2 or binary compatible version is required. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS or CLASSPATH. @@ -1320,13 +1345,6 @@

    Using the Lucene Fulltext Search

    SELECT * FROM FTL_SEARCH_DATA('LAST_NAME:John', 0, 0); CALL FTL_DROP_ALL(); -

    -The Lucene fulltext search implementation is not synchronized internally. -If you update the database and query the fulltext search concurrently -(directly using the Java API of H2 or Lucene itself), you need to ensure -operations are properly synchronized. If this is not the case, you may get -exceptions such as org.apache.lucene.store.AlreadyClosedException: this IndexReader is closed. -

    User-Defined Variables

    @@ -1342,7 +1360,7 @@

    User-Defined Variables

     SET @TOTAL = NULL;
    -SELECT X, SET(@TOTAL, IFNULL(@TOTAL, 1.) * X) F FROM SYSTEM_RANGE(1, 50);
    +SELECT X, SET(@TOTAL, COALESCE(@TOTAL, 1.) * X) F FROM SYSTEM_RANGE(1, 50);
     

    Variables that are not set evaluate to NULL. @@ -1354,18 +1372,34 @@

    User-Defined Variables

    Date and Time

    -Date, time and timestamp values support ISO 8601 formatting, including time zone: +Date, time and timestamp values support standard literals:

    -CALL TIMESTAMP '2008-01-01 12:00:00+01:00';
    +VALUES (
    +    DATE '2008-01-01',
    +    TIME '12:00:00',
    +    TIME WITH TIME ZONE '12:00:00+01:00',
    +    TIMESTAMP '2008-01-01 12:00:00',
    +    TIMESTAMP WITH TIME ZONE '2008-01-01 12:00:00+01:00'
    +);
     

    -If the time zone is not set, the value is parsed using the current time zone setting of the system. -Date and time information is stored in H2 database files without time zone information. -If the database is opened using another system time zone, the date and time will be the same. -That means if you store the value '2000-01-01 12:00:00' in one time zone, then close the database -and open the database again in a different time zone, you will also get '2000-01-01 12:00:00'. -Please note that changing the time zone after the H2 driver is loaded is not supported. +ISO 8601-style datetime formats with T instead of space between date and time parts are also supported. +

    +

    +TIME and TIMESTAMP values are preserved without time zone information as local time. +That means if you store the value '2000-01-01 12:00:00' in one time zone, then change time zone of the session +you will also get '2000-01-01 12:00:00', the value will not be adjusted to the new time zone, +therefore its absolute value in UTC may be different. +

    +

    +TIME WITH TIME ZONE and TIMESTAMP WITH TIME ZONE values preserve the specified time zone offset +and if you store the value '2008-01-01 12:00:00+01:00' it also remains the same +even if you change time zone of the session, +and because it has a time zone offset its absolute value in UTC will be the same. +TIMESTAMP WITH TIME ZONE values may be also specified with time zone name like '2008-01-01 12:00:00 Europe/Berlin'. +It that case this name will be converted into time zone offset. +Names of time zones are not stored.

    Using Spring

    @@ -1386,53 +1420,13 @@

    Using the TCP Server

    The destroy-method will help prevent exceptions on hot-redeployment or when restarting the server.

    -

    Error Code Incompatibility

    -

    -There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer, -because of a change in the error code. This will cause the JdbcTemplate to not detect -a duplicate key condition, and so a DataIntegrityViolationException is thrown instead of -DuplicateKeyException. -See also the issue SPR-8235. -The workaround is to add the following XML file to the root of the classpath: -

    -
    -<beans
    -    xmlns="http://www.springframework.org/schema/beans"
    -    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    -    xsi:schemaLocation=
    -        "http://www.springframework.org/schema/beans
    -        http://www.springframework.org/schema/beans/spring-beans.xsd"
    -    >
    -    <import resource="classpath:org/springframework/jdbc/support/sql-error-codes.xml"/>
    -    <bean id = "H2" class="org.springframework.jdbc.support.SQLErrorCodes">
    -        <property name="badSqlGrammarCodes">
    -            <value>
    -                42000,42001,42101,42102,42111,42112,42121,42122,42132
    -            </value>
    -        </property>
    -        <property name="duplicateKeyCodes">
    -            <value>23001,23505</value>
    -        </property>
    -        <property name="dataIntegrityViolationCodes">
    -            <value>22003,22012,22025,23000</value>
    -        </property>
    -        <property name="dataAccessResourceFailureCodes">
    -            <value>90046,90100,90117,90121,90126</value>
    -        </property>
    -        <property name="cannotAcquireLockCodes">
    -            <value>50200</value>
    -        </property>
    -    </bean>
    -</beans>
    -
    -

    OSGi

    The standard H2 jar can be dropped in as a bundle in an OSGi container. H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification. The H2 Data Source Factory service is registered with the following properties: OSGI_JDBC_DRIVER_CLASS=org.h2.Driver -and OSGI_JDBC_DRIVER_NAME=H2. +and OSGI_JDBC_DRIVER_NAME=H2 JDBC Driver. The OSGI_JDBC_DRIVER_VERSION property reflects the version of the driver as is.

    diff --git a/h2/src/docsrc/images/favicon.ico b/h2/src/docsrc/images/favicon.ico index 6e0f78aeb1..fd5e73a416 100644 Binary files a/h2/src/docsrc/images/favicon.ico and b/h2/src/docsrc/images/favicon.ico differ diff --git a/h2/src/docsrc/images/h2-16.png b/h2/src/docsrc/images/h2-16.png index 2dee09e17f..747340ac7d 100644 Binary files a/h2/src/docsrc/images/h2-16.png and b/h2/src/docsrc/images/h2-16.png differ diff --git a/h2/src/docsrc/images/h2-24.png b/h2/src/docsrc/images/h2-24.png index 1d83623bd2..9f682d6861 100644 Binary files a/h2/src/docsrc/images/h2-24.png and b/h2/src/docsrc/images/h2-24.png differ diff --git a/h2/src/docsrc/images/h2-32.png b/h2/src/docsrc/images/h2-32.png index 7e6c3e8c9c..c7af904cf1 100644 Binary files a/h2/src/docsrc/images/h2-32.png and b/h2/src/docsrc/images/h2-32.png differ diff --git a/h2/src/docsrc/images/h2-64.png b/h2/src/docsrc/images/h2-64.png index 754cc59543..51a47e34cf 100644 Binary files a/h2/src/docsrc/images/h2-64.png and b/h2/src/docsrc/images/h2-64.png differ diff --git a/h2/src/docsrc/images/h2-logo-2.png b/h2/src/docsrc/images/h2-logo-2.png index d8025aa52d..218fe975bd 100644 Binary files a/h2/src/docsrc/images/h2-logo-2.png and b/h2/src/docsrc/images/h2-logo-2.png differ diff --git a/h2/src/docsrc/images/h2-logo.png b/h2/src/docsrc/images/h2-logo.png index 52ebd8e7f8..fb65afe0b5 100644 Binary files a/h2/src/docsrc/images/h2-logo.png and b/h2/src/docsrc/images/h2-logo.png differ diff --git a/h2/src/docsrc/images/h2-logo.svg b/h2/src/docsrc/images/h2-logo.svg index a73119867d..1beb7606f1 100644 --- a/h2/src/docsrc/images/h2-logo.svg +++ b/h2/src/docsrc/images/h2-logo.svg @@ -1,69 +1,23 @@ + + viewBox="0 0 210 297" + version="1.1" + id="svg8552" + inkscape:version="0.92.2 (5c3e80d, 2017-08-06)" + sodipodi:docname="h2-logo.svg"> - - - - - - - - - + id="defs8546" /> - - + inkscape:window-width="3840" + inkscape:window-height="2115" + inkscape:window-x="-13" + inkscape:window-y="-13" + inkscape:window-maximized="1" /> + id="metadata8549"> image/svg+xml + - - - - - - H2 - png - font + d="m 118.88848,142.31555 c 0.36476,-0.0389 0.72463,-0.0584 1.07965,-0.0584 1.78761,0 3.15733,0.40328 4.10924,1.20993 0.95192,0.80656 1.42793,1.95109 1.42793,3.43314 0,1.23601 -0.37064,2.48554 -1.1118,3.74954 -0.74123,1.26488 -1.99106,2.74719 -3.74961,4.44784 -1.17719,1.14758 -2.74314,2.51353 -4.69785,4.09776 -1.9547,1.58426 -3.90579,3.08101 -5.85327,4.49115 v 5.36266 h 25.48412 v -6.21254 h -14.58422 c 0.56686,-0.40687 1.51876,-1.12322 2.85581,-2.14722 1.33708,-1.02489 2.66691,-2.14813 3.98944,-3.36881 2.10727,-1.96138 3.6732,-3.86592 4.69783,-5.71091 1.02463,-1.84589 1.53689,-3.79375 1.53689,-5.84273 0,-3.12466 -1.0682,-5.54084 -3.2046,-7.24852 -2.13631,-1.70759 -5.23924,-2.56143 -9.30851,-2.56143 -0.89111,0 -1.7814,0.0438 -2.67105,0.13154 v -12.40108 h -8.53425 v 12.11228 H 97.752873 v -12.11228 h -8.533889 v 33.09188 h 8.533889 v -14.57898 h 12.601357 v 14.57896 h 0.0967 c 0.79988,-0.6198 1.60032,-1.25405 2.40138,-1.90362 1.95471,-1.58336 3.52073,-2.95019 4.69792,-4.09779 0.48476,-0.46914 0.9309,-0.92114 1.33832,-1.35691 z m -13.28694,25.83146 c -16.060472,0 -29.133317,-13.07285 -29.133317,-29.13352 0,-16.04854 13.060215,-29.07304 29.133317,-29.07304 16.06065,0 29.07314,13.01241 29.07314,29.07304 v 0.90222 h 1.8044 v -0.90222 c 0,-17.0572 -13.82032,-30.87744 -30.87754,-30.87744 -17.068227,0 -30.937713,13.83125 -30.937713,30.87744 0,17.0576 13.880313,30.93792 30.937713,30.93792 h 0.90219 v -1.8044 z" + id="path6394" + inkscape:connector-curvature="0" + style="fill:#09476b;fill-opacity:1;stroke-width:0.90219772" /> diff --git a/h2/src/docsrc/images/h2_v2_3_7.svg b/h2/src/docsrc/images/h2_v2_3_7.svg new file mode 100644 index 0000000000..c2dc03d239 --- /dev/null +++ b/h2/src/docsrc/images/h2_v2_3_7.svg @@ -0,0 +1,61 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/h2/src/docsrc/index.html b/h2/src/docsrc/index.html index 41b0314be3..2e09c2fef2 100644 --- a/h2/src/docsrc/index.html +++ b/h2/src/docsrc/index.html @@ -1,7 +1,7 @@ diff --git a/h2/src/docsrc/javadoc/animate.js b/h2/src/docsrc/javadoc/animate.js deleted file mode 100644 index b6ee70e9a4..0000000000 --- a/h2/src/docsrc/javadoc/animate.js +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group - */ - -function on(id) { - return switchTag(id, 'titleOff', 'detailOn'); -} - -function off(id) { - return switchTag(id, '', 'detail'); -} - -function allDetails() { - for (i = 0;; i++) { - x = document.getElementById('_' + i); - if (x == null) { - break; - } - switchTag(i, 'titleOff', 'detailOn'); - } - return false; -} - -function switchTag(id, title, detail) { - if (document.getElementById('__' + id) != null) { - document.getElementById('__' + id).className = title; - document.getElementById('_' + id).className = detail; - } - return false; -} - -function openLink() { - page = new String(self.document.location); - var pos = page.lastIndexOf("#") + 1; - if (pos == 0) { - return; - } - var ref = page.substr(pos); - link = decodeURIComponent(ref); - el = document.getElementById(link); - if (el.nodeName.toLowerCase() == 'h4') { - // constant - return true; - } - el = el.parentNode.parentNode; - window.scrollTo(0, el.offsetTop); - on(el.id.substr(2)); - return false; -} \ No newline at end of file diff --git a/h2/src/docsrc/javadoc/classes.html b/h2/src/docsrc/javadoc/classes.html deleted file mode 100644 index 56d26ae0b8..0000000000 --- a/h2/src/docsrc/javadoc/classes.html +++ /dev/null @@ -1,93 +0,0 @@ - - - - - - - H2 Documentation - - - - - - -
    -
    - diff --git a/h2/src/docsrc/javadoc/index.html b/h2/src/docsrc/javadoc/index.html deleted file mode 100644 index 68cbb169df..0000000000 --- a/h2/src/docsrc/javadoc/index.html +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - H2 Documentation - - - - - - - -<body> - Sorry, Lynx is not supported -</body> - - - diff --git a/h2/src/docsrc/javadoc/overview.html b/h2/src/docsrc/javadoc/overview.html deleted file mode 100644 index c54079fbd4..0000000000 --- a/h2/src/docsrc/javadoc/overview.html +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - API Overview - - - - - -
    -
    - -

    API Overview

    - -

    JDBC API

    - -

    -Use the JDBC API to connect to a database and execute queries. -

    - -

    Tools API

    - -

    -The Tools API can be used to do maintenance operations, -such as deleting database files or changing the database file password, -that do not require a connection to the database. -

    - -
    - - diff --git a/h2/src/docsrc/javadoc/stylesheet.css b/h2/src/docsrc/javadoc/stylesheet.css deleted file mode 100644 index 5b4ff0cec6..0000000000 --- a/h2/src/docsrc/javadoc/stylesheet.css +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group - */ - -td, input, select, textarea, body, code, pre, td, th { - font: 13px/1.4 Arial, sans-serif; - font-weight: normal; -} - -pre { - background-color: #ece9d8; - border: 1px solid rgb(172, 168, 153); - padding: 4px; -} - -body { - margin: 0px; - max-width: 800px; -} - -h1 { - background-color: #0000bb; - padding: 2px 4px 2px 4px; - margin-top: 11px; - color: #fff; - font-size: 22px; - line-height: normal; -} - -h2 { - font-size: 19px; -} - -h3 { - font-size: 16px; -} - -h4 { - font-size: 13px; -} - -hr { - color: #CCC; - background-color: #CCC; - height: 1px; - border: 0px solid blue; -} - -.menu { - margin: 10px 10px 10px 10px; -} - -.block { - border: 0px; -} - -.titleOff { - display: none; -} - -.detail { - border: 0px; - display: none; -} - -.detailOn { - border: 0px; -} - -td.return { - white-space:nowrap; - width: 1%; -} - -td.method { - width: 99%; -} - -.deprecated { - text-decoration: line-through; -} - -.methodText { - color: #000000; - font-weight: normal; - margin: 0px 0px 0px 20px; -} - -.method { -} - -.fieldText { - margin: 6px 20px 6px 20px; -} - -.methodName { - font-weight: bold; -} - -.itemTitle { -} - -.item { - margin: 0px 0px 0px 20px; -} - -table { - background-color: #ffffff; - border-collapse: collapse; - border: 1px solid #aca899; -} - -th { - text-align: left; - background-color: #ece9d8; - border: 1px solid #aca899; - padding: 2px; -} - -td { - background-color: #ffffff; - text-align: left; - vertical-align:top; - border: 1px solid #aca899; - padding: 2px; -} - - -ul, ol { - list-style-position: outside; - padding-left: 20px; -} - -li { - margin-top: 8px; - line-height: 100%; -} - -a { - text-decoration: none; - color: #0000ff; -} - -a:hover { - text-decoration: underline; -} - -table.content { - width: 100%; - height: 100%; - border: 0px; -} - -tr.content { - border:0px; - border-left:1px solid #aca899; -} - -td.content { - border:0px; - border-left:1px solid #aca899; -} - -.contentDiv { - margin:10px; -} - - - diff --git a/h2/src/docsrc/text/_docs_en.utf8.txt b/h2/src/docsrc/text/_docs_en.utf8.txt deleted file mode 100644 index 641108d54d..0000000000 --- a/h2/src/docsrc/text/_docs_en.utf8.txt +++ /dev/null @@ -1,11985 +0,0 @@ -@advanced_1000_h1 -Advanced - -@advanced_1001_a - Result Sets - -@advanced_1002_a - Large Objects - -@advanced_1003_a - Linked Tables - -@advanced_1004_a - Spatial Features - -@advanced_1005_a - Recursive Queries - -@advanced_1006_a - Updatable Views - -@advanced_1007_a - Transaction Isolation - -@advanced_1008_a - Multi-Version Concurrency Control (MVCC) - -@advanced_1009_a - Clustering / High Availability - -@advanced_1010_a - Two Phase Commit - -@advanced_1011_a - Compatibility - -@advanced_1012_a - Standards Compliance - -@advanced_1013_a - Run as Windows Service - -@advanced_1014_a - ODBC Driver - -@advanced_1015_a - Using H2 in Microsoft .NET - -@advanced_1016_a - ACID - -@advanced_1017_a - Durability Problems - -@advanced_1018_a - Using the Recover Tool - -@advanced_1019_a - File Locking Protocols - -@advanced_1020_a - Using Passwords - -@advanced_1021_a - Password Hash - -@advanced_1022_a - Protection against SQL Injection - -@advanced_1023_a - Protection against Remote Access - -@advanced_1024_a - Restricting Class Loading and Usage - -@advanced_1025_a - Security Protocols - -@advanced_1026_a - TLS Connections - -@advanced_1027_a - Universally Unique Identifiers (UUID) - -@advanced_1028_a - Settings Read from System Properties - -@advanced_1029_a - Setting the Server Bind Address - -@advanced_1030_a - Pluggable File System - -@advanced_1031_a - Split File System - -@advanced_1032_a - Database Upgrade - -@advanced_1033_a - Java Objects Serialization - -@advanced_1034_a - Limits and Limitations - -@advanced_1035_a - Glossary and Links - -@advanced_1036_h2 -Result Sets - -@advanced_1037_h3 -Statements that Return a Result Set - -@advanced_1038_p - The following statements return a result set: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP. All other statements return an update count. - -@advanced_1039_h3 -Limiting the Number of Rows - -@advanced_1040_p - Before the result is returned to the application, all rows are read by the database. Server side cursors are not supported currently. If only the first few rows are interesting for the application, then the result set size should be limited to improve the performance. This can be done using LIMIT in a query (example: SELECT * FROM TEST LIMIT 100), or by using Statement.setMaxRows(max). - -@advanced_1041_h3 -Large Result Sets and External Sorting - -@advanced_1042_p - For large result set, the result is buffered to disk. The threshold can be defined using the statement SET MAX_MEMORY_ROWS. If ORDER BY is used, the sorting is done using an external sort algorithm. In this case, each block of rows is sorted using quick sort, then written to disk; when reading the data, the blocks are merged together. - -@advanced_1043_h2 -Large Objects - -@advanced_1044_h3 -Storing and Reading Large Objects - -@advanced_1045_p - If it is possible that the objects don't fit into memory, then the data type CLOB (for textual data) or BLOB (for binary data) should be used. For these data types, the objects are not fully read into memory, by using streams. To store a BLOB, use PreparedStatement.setBinaryStream. To store a CLOB, use PreparedStatement.setCharacterStream. To read a BLOB, use ResultSet.getBinaryStream, and to read a CLOB, use ResultSet.getCharacterStream. When using the client/server mode, large BLOB and CLOB data is stored in a temporary file on the client side. - -@advanced_1046_h3 -When to use CLOB/BLOB - -@advanced_1047_p - By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster than reading from separate files, but slows down the performance of operations that don't involve this column. - -@advanced_1048_h3 -Large Object Compression - -@advanced_1049_p - The following feature is only available for the PageStore storage engine. For the MVStore engine (the default for H2 version 1.4.x), append ;COMPRESS=TRUE to the database URL instead. CLOB and BLOB values can be compressed by using SET COMPRESS_LOB. The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster. - -@advanced_1050_h2 -Linked Tables - -@advanced_1051_p - This database supports linked tables, which means tables that don't exist in the current database but are just links to another database. To create such a link, use the CREATE LINKED TABLE statement: - -@advanced_1052_p - You can then access the table in the usual way. Whenever the linked table is accessed, the database issues specific queries over JDBC. Using the example above, if you issue the query SELECT * FROM LINK WHERE ID=1, then the following query is run against the PostgreSQL database: SELECT * FROM TEST WHERE ID=?. The same happens for insert and update statements. Only simple statements are executed against the target database, that means no joins (queries that contain joins are converted to simple queries). Prepared statements are used where possible. - -@advanced_1053_p - To view the statements that are executed against the target table, set the trace level to 3. - -@advanced_1054_p - If multiple linked tables point to the same database (using the same database URL), the connection is shared. To disable this, set the system property h2.shareLinkedConnections=false. - -@advanced_1055_p - The statement CREATE LINKED TABLE supports an optional schema name parameter. - -@advanced_1056_p - The following are not supported because they may result in a deadlock: creating a linked table to the same database, and creating a linked table to another database using the server mode if the other database is open in the same server (use the embedded mode instead). - -@advanced_1057_p - Data types that are not supported in H2 are also not supported for linked tables, for example unsigned data types if the value is outside the range of the signed type. In such cases, the columns needs to be cast to a supported type. - -@advanced_1058_h2 -Updatable Views - -@advanced_1059_p - By default, views are not updatable. To make a view updatable, use an "instead of" trigger as follows: - -@advanced_1060_p - Update the base table(s) within the trigger as required. For details, see the sample application org.h2.samples.UpdatableView. - -@advanced_1061_h2 -Transaction Isolation - -@advanced_1062_p - Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. See the Grammar for details. - -@advanced_1063_p - Transaction isolation is provided for all data manipulation language (DML) statements. - -@advanced_1064_p - Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. Instead, rows are locked for update, and read committed is used in all cases (changing the isolation level has no effect). - -@advanced_1065_p - This database supports the following transaction isolation levels: - -@advanced_1066_b -Read Committed - -@advanced_1067_li - This is the default level. Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. Higher concurrency is possible when using this level. - -@advanced_1068_li - To enable, execute the SQL statement SET LOCK_MODE 3 - -@advanced_1069_li - or append ;LOCK_MODE=3 to the database URL: jdbc:h2:~/test;LOCK_MODE=3 - -@advanced_1070_b -Serializable - -@advanced_1071_li - Both read locks and write locks are kept until the transaction commits. To enable, execute the SQL statement SET LOCK_MODE 1 - -@advanced_1072_li - or append ;LOCK_MODE=1 to the database URL: jdbc:h2:~/test;LOCK_MODE=1 - -@advanced_1073_b -Read Uncommitted - -@advanced_1074_li - This level means that transaction isolation is disabled. - -@advanced_1075_li - To enable, execute the SQL statement SET LOCK_MODE 0 - -@advanced_1076_li - or append ;LOCK_MODE=0 to the database URL: jdbc:h2:~/test;LOCK_MODE=0 - -@advanced_1077_p - When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited. - -@advanced_1078_b -Dirty Reads - -@advanced_1079_li - Means a connection can read uncommitted changes made by another connection. - -@advanced_1080_li - Possible with: read uncommitted - -@advanced_1081_b -Non-Repeatable Reads - -@advanced_1082_li - A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result. - -@advanced_1083_li - Possible with: read uncommitted, read committed - -@advanced_1084_b -Phantom Reads - -@advanced_1085_li - A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row. - -@advanced_1086_li - Possible with: read uncommitted, read committed - -@advanced_1087_h3 -Table Level Locking - -@advanced_1088_p - The database allows multiple concurrent connections to the same database. To make sure all connections only see consistent data, table level locking is used by default. This mechanism does not allow high concurrency, but is very fast. Shared locks and exclusive locks are supported. Before reading from a table, the database tries to add a shared lock to the table (this is only possible if there is no exclusive lock on the object by another connection). If the shared lock is added successfully, the table can be read. It is allowed that other connections also have a shared lock on the same object. If a connection wants to write to a table (update or delete a row), an exclusive lock is required. To get the exclusive lock, other connection must not have any locks on the object. After the connection commits, all locks are released. This database keeps all locks in memory. When a lock is released, and multiple connections are waiting for it, one of them is picked at random. - -@advanced_1089_h3 -Lock Timeout - -@advanced_1090_p - If a connection cannot get a lock on an object, the connection waits for some amount of time (the lock timeout). During this time, hopefully the connection holding the lock commits and it is then possible to get the lock. If this is not possible because the other connection does not release the lock for some time, the unsuccessful connection will get a lock timeout exception. The lock timeout can be set individually for each connection. - -@advanced_1091_h2 -Multi-Version Concurrency Control (MVCC) - -@advanced_1092_p - The MVCC feature allows higher concurrency than using (table level or row level) locks. When using MVCC in this database, delete, insert and update operations will only issue a shared lock on the table. An exclusive lock is still used when adding or removing columns, when dropping the table, and when using SELECT ... FOR UPDATE. Connections only 'see' committed data, and own changes. That means, if connection A updates a row but doesn't commit this change yet, connection B will see the old value. Only when the change is committed, the new value is visible by other connections (read committed). If multiple connections concurrently try to update the same row, the database waits until it can apply the change, but at most until the lock timeout expires. - -@advanced_1093_p - To use the MVCC feature, append ;MVCC=TRUE to the database URL: - -@advanced_1094_p - The setting must be specified in the first connection (the one that opens the database). It is not possible to enable or disable this setting while the database is already open. - -@advanced_1095_p - If MVCC is enabled, changing the lock mode (LOCK_MODE) has no effect. - -@advanced_1096_div - The MVCC mode is enabled by default in version 1.4.x, with the default MVStore storage engine. MVCC is disabled by default when using the PageStore storage engine (which is the default in version 1.3.x). The following applies when using the PageStore storage engine: The MVCC feature is not fully tested yet. The limitations of the MVCC mode are: with the PageStore storage engine, it can not be used at the same time as MULTI_THREADED=TRUE; the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency. The setting MAX_MEMORY_UNDO has no effect. Clustering / High Availability - -@advanced_1097_p - This database supports a simple clustering / high availability mechanism. The architecture is: two database servers run on two different computers, and on both computers is a copy of the same database. If both servers run, each database operation is executed on both computers. If one server fails (power, hardware or network failure), the other server can still continue to work. From this point on, the operations will be executed only on one server until the other server is back up. - -@advanced_1098_p - Clustering can only be used in the server mode (the embedded mode does not support clustering). The cluster can be re-created using the CreateCluster tool without stopping the remaining server. Applications that are still connected are automatically disconnected, however when appending ;AUTO_RECONNECT=TRUE, they will recover from that. - -@advanced_1099_p - To initialize the cluster, use the following steps: - -@advanced_1100_li -Create a database - -@advanced_1101_li -Use the CreateCluster tool to copy the database to another location and initialize the clustering. Afterwards, you have two databases containing the same data. - -@advanced_1102_li -Start two servers (one for each copy of the database) - -@advanced_1103_li -You are now ready to connect to the databases with the client application(s) - -@advanced_1104_h3 -Using the CreateCluster Tool - -@advanced_1105_p - To understand how clustering works, please try out the following example. In this example, the two databases reside on the same computer, but usually, the databases will be on different servers. - -@advanced_1106_li -Create two directories: server1, server2. Each directory will simulate a directory on a computer. - -@advanced_1107_li -Start a TCP server pointing to the first directory. You can do this using the command line: - -@advanced_1108_li -Start a second TCP server pointing to the second directory. This will simulate a server running on a second (redundant) computer. You can do this using the command line: - -@advanced_1109_li -Use the CreateCluster tool to initialize clustering. This will automatically create a new, empty database if it does not exist. Run the tool on the command line: - -@advanced_1110_li -You can now connect to the databases using an application or the H2 Console using the JDBC URL jdbc:h2:tcp://localhost:9101,localhost:9102/~/test - -@advanced_1111_li -If you stop a server (by killing the process), you will notice that the other machine continues to work, and therefore the database is still accessible. - -@advanced_1112_li -To restore the cluster, you first need to delete the database that failed, then restart the server that was stopped, and re-run the CreateCluster tool. - -@advanced_1113_h3 -Detect Which Cluster Instances are Running - -@advanced_1114_p - To find out which cluster nodes are currently running, execute the following SQL statement: - -@advanced_1115_p - If the result is '' (two single quotes), then the cluster mode is disabled. Otherwise, the list of servers is returned, enclosed in single quote. Example: 'server1:9191,server2:9191'. - -@advanced_1116_p - It is also possible to get the list of servers by using Connection.getClientInfo(). - -@advanced_1117_p - The property list returned from getClientInfo() contains a numServers property that returns the number of servers that are in the connection list. To get the actual servers, getClientInfo() also has properties server0..serverX, where serverX is the number of servers minus 1. - -@advanced_1118_p - Example: To get the 2nd server in the connection list one uses getClientInfo('server1'). Note: The serverX property only returns IP addresses and ports and not hostnames. - -@advanced_1119_h3 -Clustering Algorithm and Limitations - -@advanced_1120_p - Read-only queries are only executed against the first cluster node, but all other statements are executed against all nodes. There is currently no load balancing made to avoid problems with transactions. The following functions may yield different results on different cluster nodes and must be executed with care: RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND() [when not using a seed]. Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE). However, they can be used in read-only statements and the result can then be used for modifying statements. Using auto-increment and identity columns is currently not supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements). - -@advanced_1121_p - When using the cluster modes, result sets are read fully in memory by the client, so that there is no problem if the server dies that executed the query. Result sets must fit in memory on the client side. - -@advanced_1122_p - The SQL statement SET AUTOCOMMIT FALSE is not supported in the cluster mode. To disable autocommit, the method Connection.setAutoCommit(false) needs to be called. - -@advanced_1123_p - It is possible that a transaction from one connection overtakes a transaction from a different connection. Depending on the operations, this might result in different results, for example when conditionally incrementing a value in a row. - -@advanced_1124_h2 -Two Phase Commit - -@advanced_1125_p - The two phase commit protocol is supported. 2-phase-commit works as follows: - -@advanced_1126_li -Autocommit needs to be switched off - -@advanced_1127_li -A transaction is started, for example by inserting a row - -@advanced_1128_li -The transaction is marked 'prepared' by executing the SQL statement PREPARE COMMIT transactionName - -@advanced_1129_li -The transaction can now be committed or rolled back - -@advanced_1130_li -If a problem occurs before the transaction was successfully committed or rolled back (for example because a network problem occurred), the transaction is in the state 'in-doubt' - -@advanced_1131_li -When re-connecting to the database, the in-doubt transactions can be listed with SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT - -@advanced_1132_li -Each transaction in this list must now be committed or rolled back by executing COMMIT TRANSACTION transactionName or ROLLBACK TRANSACTION transactionName - -@advanced_1133_li -The database needs to be closed and re-opened to apply the changes - -@advanced_1134_h2 -Compatibility - -@advanced_1135_p - This database is (up to a certain point) compatible to other databases such as HSQLDB, MySQL and PostgreSQL. There are certain areas where H2 is incompatible. - -@advanced_1136_h3 -Transaction Commit when Autocommit is On - -@advanced_1137_p - At this time, this database engine commits a transaction (if autocommit is switched on) just before returning the result. For a query, this means the transaction is committed even before the application scans through the result set, and before the result set is closed. Other database engines may commit the transaction in this case when the result set is closed. - -@advanced_1138_h3 -Keywords / Reserved Words - -@advanced_1139_p - There is a list of keywords that can't be used as identifiers (table names, column names and so on), unless they are quoted (surrounded with double quotes). The list is currently: - -@advanced_1140_code - CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, TRUE, UNION, UNIQUE, WHERE - -@advanced_1141_p - Certain words of this list are keywords because they are functions that can be used without '()' for compatibility, for example CURRENT_TIMESTAMP. - -@advanced_1142_h2 -Standards Compliance - -@advanced_1143_p - This database tries to be as much standard compliant as possible. For the SQL language, ANSI/ISO is the main standard. There are several versions that refer to the release date: SQL-92, SQL:1999, and SQL:2003. Unfortunately, the standard documentation is not freely available. Another problem is that important features are not standardized. Whenever this is the case, this database tries to be compatible to other databases. - -@advanced_1144_h3 -Supported Character Sets, Character Encoding, and Unicode - -@advanced_1145_p - H2 internally uses Unicode, and supports all character encoding systems and character sets supported by the virtual machine you use. - -@advanced_1146_h2 -Run as Windows Service - -@advanced_1147_p - Using a native wrapper / adapter, Java applications can be run as a Windows Service. There are various tools available to do that. The Java Service Wrapper from Tanuki Software, Inc. is included in the installation. Batch files are provided to install, start, stop and uninstall the H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application. The batch files are located in the directory h2/service. - -@advanced_1148_p - The service wrapper bundled with H2 is a 32-bit version. To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper, for example the one from Simon Krenger. - -@advanced_1149_p - When running the database as a service, absolute path should be used. Using ~ in the database URL is problematic in this case, because it means to use the home directory of the current user. The service might run without or with the wrong user, so that the database files might end up in an unexpected place. - -@advanced_1150_h3 -Install the Service - -@advanced_1151_p - The service needs to be registered as a Windows Service first. To do that, double click on 1_install_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. - -@advanced_1152_h3 -Start the Service - -@advanced_1153_p - You can start the H2 Database Engine Service using the service manager of Windows, or by double clicking on 2_start_service.bat. Please note that the batch file does not print an error message if the service is not installed. - -@advanced_1154_h3 -Connect to the H2 Console - -@advanced_1155_p - After installing and starting the service, you can connect to the H2 Console application using a browser. Double clicking on 3_start_browser.bat to do that. The default port (8082) is hard coded in the batch file. - -@advanced_1156_h3 -Stop the Service - -@advanced_1157_p - To stop the service, double click on 4_stop_service.bat. Please note that the batch file does not print an error message if the service is not installed or started. - -@advanced_1158_h3 -Uninstall the Service - -@advanced_1159_p - To uninstall the service, double click on 5_uninstall_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. - -@advanced_1160_h3 -Additional JDBC drivers - -@advanced_1161_p - To use other databases (for example MySQL), the location of the JDBC drivers of those databases need to be added to the environment variables H2DRIVERS or CLASSPATH before installing the service. Multiple drivers can be set; each entry needs to be separated with a ; (Windows) or : (other operating systems). Spaces in the path names are supported. The settings must not be quoted. - -@advanced_1162_h2 -ODBC Driver - -@advanced_1163_p - This database does not come with its own ODBC driver at this time, but it supports the PostgreSQL network protocol. Therefore, the PostgreSQL ODBC driver can be used. Support for the PostgreSQL network protocol is quite new and should be viewed as experimental. It should not be used for production applications. - -@advanced_1164_p - To use the PostgreSQL ODBC driver on 64 bit versions of Windows, first run c:/windows/syswow64/odbcad32.exe. At this point you set up your DSN just like you would on any other system. See also: Re: ODBC Driver on Windows 64 bit - -@advanced_1165_h3 -ODBC Installation - -@advanced_1166_p - First, the ODBC driver must be installed. Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*) or newer is recommended. The Windows version of the PostgreSQL ODBC driver is available at http://www.postgresql.org/ftp/odbc/versions/msi. - -@advanced_1167_h3 -Starting the Server - -@advanced_1168_p - After installing the ODBC driver, start the H2 Server using the command line: - -@advanced_1169_p - The PG Server (PG for PostgreSQL protocol) is started as well. By default, databases are stored in the current working directory where the server is started. Use -baseDir to save databases in another directory, for example the user home directory: - -@advanced_1170_p - The PG server can be started and stopped from within a Java application as follows: - -@advanced_1171_p - By default, only connections from localhost are allowed. To allow remote connections, use -pgAllowOthers when starting the server. - -@advanced_1172_p - To map an ODBC database name to a different JDBC database name, use the option -key when starting the server. Please note only one mapping is allowed. The following will map the ODBC database named TEST to the database URL jdbc:h2:~/data/test;cipher=aes: - -@advanced_1173_h3 -ODBC Configuration - -@advanced_1174_p - After installing the driver, a new Data Source must be added. In Windows, run odbcad32.exe to open the Data Source Administrator. Then click on 'Add...' and select the PostgreSQL Unicode driver. Then click 'Finish'. You will be able to change the connection properties. The property column represents the property key in the odbc.ini file (which may be different from the GUI). - -@advanced_1175_th -Property - -@advanced_1176_th -Example - -@advanced_1177_th -Remarks - -@advanced_1178_td -Data Source - -@advanced_1179_td -H2 Test - -@advanced_1180_td -The name of the ODBC Data Source - -@advanced_1181_td -Database - -@advanced_1182_td -~/test;ifexists=true - -@advanced_1183_td - The database name. This can include connections settings. By default, the database is stored in the current working directory where the Server is started except when the -baseDir setting is used. The name must be at least 3 characters. - -@advanced_1184_td -Servername - -@advanced_1185_td -localhost - -@advanced_1186_td -The server name or IP address. - -@advanced_1187_td -By default, only remote connections are allowed - -@advanced_1188_td -Username - -@advanced_1189_td -sa - -@advanced_1190_td -The database user name. - -@advanced_1191_td -SSL - -@advanced_1192_td -false (disabled) - -@advanced_1193_td -At this time, SSL is not supported. - -@advanced_1194_td -Port - -@advanced_1195_td -5435 - -@advanced_1196_td -The port where the PG Server is listening. - -@advanced_1197_td -Password - -@advanced_1198_td -sa - -@advanced_1199_td -The database password. - -@advanced_1200_p - To improve performance, please enable 'server side prepare' under Options / Datasource / Page 2 / Server side prepare. - -@advanced_1201_p - Afterwards, you may use this data source. - -@advanced_1202_h3 -PG Protocol Support Limitations - -@advanced_1203_p - At this time, only a subset of the PostgreSQL network protocol is implemented. Also, there may be compatibility problems on the SQL level, with the catalog, or with text encoding. Problems are fixed as they are found. Currently, statements can not be canceled when using the PG protocol. Also, H2 does not provide index meta over ODBC. - -@advanced_1204_p - PostgreSQL ODBC Driver Setup requires a database password; that means it is not possible to connect to H2 databases without password. This is a limitation of the ODBC driver. - -@advanced_1205_h3 -Security Considerations - -@advanced_1206_p - Currently, the PG Server does not support challenge response or encrypt passwords. This may be a problem if an attacker can listen to the data transferred between the ODBC driver and the server, because the password is readable to the attacker. Also, it is currently not possible to use encrypted SSL connections. Therefore the ODBC driver should not be used where security is important. - -@advanced_1207_p - The first connection that opens a database using the PostgreSQL server needs to be an administrator user. Subsequent connections don't need to be opened by an administrator. - -@advanced_1208_h3 -Using Microsoft Access - -@advanced_1209_p - When using Microsoft Access to edit data in a linked H2 table, you may need to enable the following option: Tools - Options - Edit/Find - ODBC fields. - -@advanced_1210_h2 -Using H2 in Microsoft .NET - -@advanced_1211_p - The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface. - -@advanced_1212_h3 -Using the ADO.NET API on .NET - -@advanced_1213_p - An implementation of the ADO.NET interface is available in the open source project H2Sharp. - -@advanced_1214_h3 -Using the JDBC API on .NET - -@advanced_1215_li -Install the .NET Framework from Microsoft. Mono has not yet been tested. - -@advanced_1216_li -Install IKVM.NET. - -@advanced_1217_li -Copy the h2*.jar file to ikvm/bin - -@advanced_1218_li -Run the H2 Console using: ikvm -jar h2*.jar - -@advanced_1219_li -Convert the H2 Console to an .exe file using: ikvmc -target:winexe h2*.jar. You may ignore the warnings. - -@advanced_1220_li -Create a .dll file using (change the version accordingly): ikvmc.exe -target:library -version:1.0.69.0 h2*.jar - -@advanced_1221_p - If you want your C# application use H2, you need to add the h2.dll and the IKVM.OpenJDK.ClassLibrary.dll to your C# solution. Here some sample code: - -@advanced_1222_h2 -ACID - -@advanced_1223_p - In the database world, ACID stands for: - -@advanced_1224_li -Atomicity: transactions must be atomic, meaning either all tasks are performed or none. - -@advanced_1225_li -Consistency: all operations must comply with the defined constraints. - -@advanced_1226_li -Isolation: transactions must be isolated from each other. - -@advanced_1227_li -Durability: committed transaction will not be lost. - -@advanced_1228_h3 -Atomicity - -@advanced_1229_p - Transactions in this database are always atomic. - -@advanced_1230_h3 -Consistency - -@advanced_1231_p - By default, this database is always in a consistent state. Referential integrity rules are enforced except when explicitly disabled. - -@advanced_1232_h3 -Isolation - -@advanced_1233_p - For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'. - -@advanced_1234_h3 -Durability - -@advanced_1235_p - This database does not guarantee that all committed transactions survive a power failure. Tests show that all databases sometimes lose transactions on power failure (for details, see below). Where losing transactions is not acceptable, a laptop or UPS (uninterruptible power supply) should be used. If durability is required for all possible cases of hardware failure, clustering should be used, such as the H2 clustering mode. - -@advanced_1236_h2 -Durability Problems - -@advanced_1237_p - Complete durability means all committed transaction survive a power failure. Some databases claim they can guarantee durability, but such claims are wrong. A durability test was run against H2, HSQLDB, PostgreSQL, and Derby. All of those databases sometimes lose committed transactions. The test is included in the H2 download, see org.h2.test.poweroff.Test. - -@advanced_1238_h3 -Ways to (Not) Achieve Durability - -@advanced_1239_p - Making sure that committed transactions are not lost is more complicated than it seems first. To guarantee complete durability, a database must ensure that the log record is on the hard drive before the commit call returns. To do that, databases use different methods. One is to use the 'synchronous write' file access mode. In Java, RandomAccessFile supports the modes rws and rwd: - -@advanced_1240_code -rwd - -@advanced_1241_li -: every update to the file's content is written synchronously to the underlying storage device. - -@advanced_1242_code -rws - -@advanced_1243_li -: in addition to rwd, every update to the metadata is written synchronously. - -@advanced_1244_p - A test (org.h2.test.poweroff.TestWrite) with one of those modes achieves around 50 thousand write operations per second. Even when the operating system write buffer is disabled, the write rate is around 50 thousand operations per second. This feature does not force changes to disk because it does not flush all buffers. The test updates the same byte in the file again and again. If the hard drive was able to write at this rate, then the disk would need to make at least 50 thousand revolutions per second, or 3 million RPM (revolutions per minute). There are no such hard drives. The hard drive used for the test is about 7200 RPM, or about 120 revolutions per second. There is an overhead, so the maximum write rate must be lower than that. - -@advanced_1245_p - Calling fsync flushes the buffers. There are two ways to do that in Java: - -@advanced_1246_code -FileDescriptor.sync() - -@advanced_1247_li -. The documentation says that this forces all system buffers to synchronize with the underlying device. This method is supposed to return after all in-memory modified copies of buffers associated with this file descriptor have been written to the physical medium. - -@advanced_1248_code -FileChannel.force() - -@advanced_1249_li -. This method is supposed to force any updates to this channel's file to be written to the storage device that contains it. - -@advanced_1250_p - By default, MySQL calls fsync for each commit. When using one of those methods, only around 60 write operations per second can be achieved, which is consistent with the RPM rate of the hard drive used. Unfortunately, even when calling FileDescriptor.sync() or FileChannel.force(), data is not always persisted to the hard drive, because most hard drives do not obey fsync(): see Your Hard Drive Lies to You. In Mac OS X, fsync does not flush hard drive buffers. See Bad fsync?. So the situation is confusing, and tests prove there is a problem. - -@advanced_1251_p - Trying to flush hard drive buffers is hard, and if you do the performance is very bad. First you need to make sure that the hard drive actually flushes all buffers. Tests show that this can not be done in a reliable way. Then the maximum number of transactions is around 60 per second. Because of those reasons, the default behavior of H2 is to delay writing committed transactions. - -@advanced_1252_p - In H2, after a power failure, a bit more than one second of committed transactions may be lost. To change the behavior, use SET WRITE_DELAY and CHECKPOINT SYNC. Most other databases support commit delay as well. In the performance comparison, commit delay was used for all databases that support it. - -@advanced_1253_h3 -Running the Durability Test - -@advanced_1254_p - To test the durability / non-durability of this and other databases, you can use the test application in the package org.h2.test.poweroff. Two computers with network connection are required to run this test. One computer just listens, while the test application is run (and power is cut) on the other computer. The computer with the listener application opens a TCP/IP port and listens for an incoming connection. The second computer first connects to the listener, and then created the databases and starts inserting records. The connection is set to 'autocommit', which means after each inserted record a commit is performed automatically. Afterwards, the test computer notifies the listener that this record was inserted successfully. The listener computer displays the last inserted record number every 10 seconds. Now, switch off the power manually, then restart the computer, and run the application again. You will find out that in most cases, none of the databases contains all the records that the listener computer knows about. For details, please consult the source code of the listener and test application. - -@advanced_1255_h2 -Using the Recover Tool - -@advanced_1256_p - The Recover tool can be used to extract the contents of a database file, even if the database is corrupted. It also extracts the content of the transaction log and large objects (CLOB or BLOB). To run the tool, type on the command line: - -@advanced_1257_p - For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript tool or a RUNSCRIPT FROM SQL statement. The script includes at least one CREATE USER statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script. - -@advanced_1258_p - The Recover tool creates a SQL script from database file. It also processes the transaction log. - -@advanced_1259_p - To verify the database can recover at any time, append ;RECOVER_TEST=64 to the database URL in your test environment. This will simulate an application crash after each 64 writes to the database file. A log file named databaseName.h2.db.log is created that lists the operations. The recovery is tested using an in-memory file system, that means it may require a larger heap setting. - -@advanced_1260_h2 -File Locking Protocols - -@advanced_1261_p - Multiple concurrent connections to the same database are supported, however a database file can only be open for reading and writing (in embedded mode) by one process at the same time. Otherwise, the processes would overwrite each others data and corrupt the database file. To protect against this problem, whenever a database is opened, a lock file is created to signal other processes that the database is in use. If the database is closed, or if the process that opened the database stops normally, this lock file is deleted. - -@advanced_1262_p - In special cases (if the process did not terminate normally, for example because there was a power failure), the lock file is not deleted by the process that created it. That means the existence of the lock file is not a safe protocol for file locking. However, this software uses a challenge-response protocol to protect the database files. There are two methods (algorithms) implemented to provide both security (that is, the same database files cannot be opened by two processes at the same time) and simplicity (that is, the lock file does not need to be deleted manually by the user). The two methods are 'file method' and 'socket methods'. - -@advanced_1263_p - The file locking protocols (except the file locking method 'FS') have the following limitation: if a shared file system is used, and the machine with the lock owner is sent to sleep (standby or hibernate), another machine may take over. If the machine that originally held the lock wakes up, the database may become corrupt. If this situation can occur, the application must ensure the database is closed when the application is put to sleep. - -@advanced_1264_h3 -File Locking Method 'File' - -@advanced_1265_p - The default method for database file locking for version 1.3 and older is the 'File Method'. The algorithm is: - -@advanced_1266_li -If the lock file does not exist, it is created (using the atomic operation File.createNewFile). Then, the process waits a little bit (20 ms) and checks the file again. If the file was changed during this time, the operation is aborted. This protects against a race condition when one process deletes the lock file just after another one create it, and a third process creates the file again. It does not occur if there are only two writers. - -@advanced_1267_li - If the file can be created, a random number is inserted together with the locking method ('file'). Afterwards, a watchdog thread is started that checks regularly (every second once by default) if the file was deleted or modified by another (challenger) thread / process. Whenever that occurs, the file is overwritten with the old data. The watchdog thread runs with high priority so that a change to the lock file does not get through undetected even if the system is very busy. However, the watchdog thread does use very little resources (CPU time), because it waits most of the time. Also, the watchdog only reads from the hard disk and does not write to it. - -@advanced_1268_li - If the lock file exists and was recently modified, the process waits for some time (up to two seconds). If it was still changed, an exception is thrown (database is locked). This is done to eliminate race conditions with many concurrent writers. Afterwards, the file is overwritten with a new version (challenge). After that, the thread waits for 2 seconds. If there is a watchdog thread protecting the file, he will overwrite the change and this process will fail to lock the database. However, if there is no watchdog thread, the lock file will still be as written by this thread. In this case, the file is deleted and atomically created again. The watchdog thread is started in this case and the file is locked. - -@advanced_1269_p - This algorithm is tested with over 100 concurrent threads. In some cases, when there are many concurrent threads trying to lock the database, they block each other (meaning the file cannot be locked by any of them) for some time. However, the file never gets locked by two threads at the same time. However using that many concurrent threads / processes is not the common use case. Generally, an application should throw an error to the user if it cannot open a database, and not try again in a (fast) loop. - -@advanced_1270_h3 -File Locking Method 'Socket' - -@advanced_1271_p - There is a second locking mechanism implemented, but disabled by default. To use it, append ;FILE_LOCK=SOCKET to the database URL. The algorithm is: - -@advanced_1272_li -If the lock file does not exist, it is created. Then a server socket is opened on a defined port, and kept open. The port and IP address of the process that opened the database is written into the lock file. - -@advanced_1273_li -If the lock file exists, and the lock method is 'file', then the software switches to the 'file' method. - -@advanced_1274_li -If the lock file exists, and the lock method is 'socket', then the process checks if the port is in use. If the original process is still running, the port is in use and this process throws an exception (database is in use). If the original process died (for example due to a power failure, or abnormal termination of the virtual machine), then the port was released. The new process deletes the lock file and starts again. - -@advanced_1275_p - This method does not require a watchdog thread actively polling (reading) the same file every second. The problem with this method is, if the file is stored on a network share, two processes (running on different computers) could still open the same database files, if they do not have a direct TCP/IP connection. - -@advanced_1276_h3 -File Locking Method 'FS' - -@advanced_1277_p - This is the default mode for version 1.4 and newer. This database file locking mechanism uses native file system lock on the database file. No *.lock.db file is created in this case, and no background thread is started. This mechanism may not work on all systems as expected. Some systems allow to lock the same file multiple times within the same virtual machine, and on some system native file locking is not supported or files are not unlocked after a power failure. - -@advanced_1278_p - To enable this feature, append ;FILE_LOCK=FS to the database URL. - -@advanced_1279_p - This feature is relatively new. When using it for production, please ensure your system does in fact lock files as expected. - -@advanced_1280_h2 -Using Passwords - -@advanced_1281_h3 -Using Secure Passwords - -@advanced_1282_p - Remember that weak passwords can be broken regardless of the encryption and security protocols. Don't use passwords that can be found in a dictionary. Appending numbers does not make passwords secure. A way to create good passwords that can be remembered is: take the first letters of a sentence, use upper and lower case characters, and creatively include special characters (but it's more important to use a long password than to use special characters). Example: - -@advanced_1283_code -i'sE2rtPiUKtT - -@advanced_1284_p - from the sentence it's easy to remember this password if you know the trick. - -@advanced_1285_h3 -Passwords: Using Char Arrays instead of Strings - -@advanced_1286_p - Java strings are immutable objects and cannot be safely 'destroyed' by the application. After creating a string, it will remain in the main memory of the computer at least until it is garbage collected. The garbage collection cannot be controlled by the application, and even if it is garbage collected the data may still remain in memory. It might also be possible that the part of memory containing the password is swapped to disk (if not enough main memory is available), which is a problem if the attacker has access to the swap file of the operating system. - -@advanced_1287_p - It is a good idea to use char arrays instead of strings for passwords. Char arrays can be cleared (filled with zeros) after use, and therefore the password will not be stored in the swap file. - -@advanced_1288_p - This database supports using char arrays instead of string to pass user and file passwords. The following code can be used to do that: - -@advanced_1289_p - This example requires Java 1.6. When using Swing, use javax.swing.JPasswordField. - -@advanced_1290_h3 -Passing the User Name and/or Password in the URL - -@advanced_1291_p - Instead of passing the user name as a separate parameter as in Connection conn = DriverManager. getConnection("jdbc:h2:~/test", "sa", "123"); the user name (and/or password) can be supplied in the URL itself: Connection conn = DriverManager. getConnection("jdbc:h2:~/test;USER=sa;PASSWORD=123"); The settings in the URL override the settings passed as a separate parameter. - -@advanced_1292_h2 -Password Hash - -@advanced_1293_p - Sometimes the database password needs to be stored in a configuration file (for example in the web.xml file). In addition to connecting with the plain text password, this database supports connecting with the password hash. This means that only the hash of the password (and not the plain text password) needs to be stored in the configuration file. This will only protect others from reading or re-constructing the plain text password (even if they have access to the configuration file); it does not protect others from accessing the database using the password hash. - -@advanced_1294_p - To connect using the password hash instead of plain text password, append ;PASSWORD_HASH=TRUE to the database URL, and replace the password with the password hash. To calculate the password hash from a plain text password, run the following command within the H2 Console tool: @password_hash <upperCaseUserName> <password>. As an example, if the user name is sa and the password is test, run the command @password_hash SA test. Then use the resulting password hash as you would use the plain text password. When using an encrypted database, then the user password and file password need to be hashed separately. To calculate the hash of the file password, run: @password_hash file <filePassword>. - -@advanced_1295_h2 -Protection against SQL Injection - -@advanced_1296_h3 -What is SQL Injection - -@advanced_1297_p - This database engine provides a solution for the security vulnerability known as 'SQL Injection'. Here is a short description of what SQL injection means. Some applications build SQL statements with embedded user input such as: - -@advanced_1298_p - If this mechanism is used anywhere in the application, and user input is not correctly filtered or encoded, it is possible for a user to inject SQL functionality or statements by using specially built input such as (in this example) this password: ' OR ''='. In this case the statement becomes: - -@advanced_1299_p - Which is always true no matter what the password stored in the database is. For more information about SQL Injection, see Glossary and Links. - -@advanced_1300_h3 -Disabling Literals - -@advanced_1301_p - SQL Injection is not possible if user input is not directly embedded in SQL statements. A simple solution for the problem above is to use a prepared statement: - -@advanced_1302_p - This database provides a way to enforce usage of parameters when passing user input to the database. This is done by disabling embedded literals in SQL statements. To do this, execute the statement: - -@advanced_1303_p - Afterwards, SQL statements with text and number literals are not allowed any more. That means, SQL statement of the form WHERE NAME='abc' or WHERE CustomerId=10 will fail. It is still possible to use prepared statements and parameters as described above. Also, it is still possible to generate SQL statements dynamically, and use the Statement API, as long as the SQL statements do not include literals. There is also a second mode where number literals are allowed: SET ALLOW_LITERALS NUMBERS. To allow all literals, execute SET ALLOW_LITERALS ALL (this is the default setting). Literals can only be enabled or disabled by an administrator. - -@advanced_1304_h3 -Using Constants - -@advanced_1305_p - Disabling literals also means disabling hard-coded 'constant' literals. This database supports defining constants using the CREATE CONSTANT command. Constants can be defined only when literals are enabled, but used even when literals are disabled. To avoid name clashes with column names, constants can be defined in other schemas: - -@advanced_1306_p - Even when literals are enabled, it is better to use constants instead of hard-coded number or text literals in queries or views. With constants, typos are found at compile time, the source code is easier to understand and change. - -@advanced_1307_h3 -Using the ZERO() Function - -@advanced_1308_p - It is not required to create a constant for the number 0 as there is already a built-in function ZERO(): - -@advanced_1309_h2 -Protection against Remote Access - -@advanced_1310_p - By default this database does not allow connections from other machines when starting the H2 Console, the TCP server, or the PG server. Remote access can be enabled using the command line options -webAllowOthers, -tcpAllowOthers, -pgAllowOthers. - -@advanced_1311_p - If you enable remote access using -tcpAllowOthers or -pgAllowOthers, please also consider using the options -baseDir, -ifExists, so that remote users can not create new databases or access existing databases with weak passwords. When using the option -baseDir, only databases within that directory may be accessed. Ensure the existing accessible databases are protected using strong passwords. - -@advanced_1312_p - If you enable remote access using -webAllowOthers, please ensure the web server can only be accessed from trusted networks. The options -baseDir, -ifExists don't protect access to the tools section, prevent remote shutdown of the web server, changes to the preferences, the saved connection settings, or access to other databases accessible from the system. - -@advanced_1313_h2 -Restricting Class Loading and Usage - -@advanced_1314_p - By default there is no restriction on loading classes and executing Java code for admins. That means an admin may call system functions such as System.setProperty by executing: - -@advanced_1315_p - To restrict users (including admins) from loading classes and executing code, the list of allowed classes can be set in the system property h2.allowedClasses in the form of a comma separated list of classes or patterns (items ending with *). By default all classes are allowed. Example: - -@advanced_1316_p - This mechanism is used for all user classes, including database event listeners, trigger classes, user-defined functions, user-defined aggregate functions, and JDBC driver classes (with the exception of the H2 driver) when using the H2 Console. - -@advanced_1317_h2 -Security Protocols - -@advanced_1318_p - The following paragraphs document the security protocols used in this database. These descriptions are very technical and only intended for security experts that already know the underlying security primitives. - -@advanced_1319_h3 -User Password Encryption - -@advanced_1320_p - When a user tries to connect to a database, the combination of user name, @, and password are hashed using SHA-256, and this hash value is transmitted to the database. This step does not protect against an attacker that re-uses the value if he is able to listen to the (unencrypted) transmission between the client and the server. But, the passwords are never transmitted as plain text, even when using an unencrypted connection between client and server. That means if a user reuses the same password for different things, this password is still protected up to some point. See also 'RFC 2617 - HTTP Authentication: Basic and Digest Access Authentication' for more information. - -@advanced_1321_p - When a new database or user is created, a new random salt value is generated. The size of the salt is 64 bits. Using the random salt reduces the risk of an attacker pre-calculating hash values for many different (commonly used) passwords. - -@advanced_1322_p - The combination of user-password hash value (see above) and salt is hashed using SHA-256. The resulting value is stored in the database. When a user tries to connect to the database, the database combines user-password hash value with the stored salt value and calculates the hash value. Other products use multiple iterations (hash the hash value again and again), but this is not done in this product to reduce the risk of denial of service attacks (where the attacker tries to connect with bogus passwords, and the server spends a lot of time calculating the hash value for each password). The reasoning is: if the attacker has access to the hashed passwords, he also has access to the data in plain text, and therefore does not need the password any more. If the data is protected by storing it on another computer and only accessible remotely, then the iteration count is not required at all. - -@advanced_1323_h3 -File Encryption - -@advanced_1324_p - The database files can be encrypted using the AES-128 algorithm. - -@advanced_1325_p - When a user tries to connect to an encrypted database, the combination of file@ and the file password is hashed using SHA-256. This hash value is transmitted to the server. - -@advanced_1326_p - When a new database file is created, a new cryptographically secure random salt value is generated. The size of the salt is 64 bits. The combination of the file password hash and the salt value is hashed 1024 times using SHA-256. The reason for the iteration is to make it harder for an attacker to calculate hash values for common passwords. - -@advanced_1327_p - The resulting hash value is used as the key for the block cipher algorithm. Then, an initialization vector (IV) key is calculated by hashing the key again using SHA-256. This is to make sure the IV is unknown to the attacker. The reason for using a secret IV is to protect against watermark attacks. - -@advanced_1328_p - Before saving a block of data (each block is 8 bytes long), the following operations are executed: first, the IV is calculated by encrypting the block number with the IV key (using the same block cipher algorithm). This IV is combined with the plain text using XOR. The resulting data is encrypted using the AES-128 algorithm. - -@advanced_1329_p - When decrypting, the operation is done in reverse. First, the block is decrypted using the key, and then the IV is calculated combined with the decrypted text using XOR. - -@advanced_1330_p - Therefore, the block cipher mode of operation is CBC (cipher-block chaining), but each chain is only one block long. The advantage over the ECB (electronic codebook) mode is that patterns in the data are not revealed, and the advantage over multi block CBC is that flipped cipher text bits are not propagated to flipped plaintext bits in the next block. - -@advanced_1331_p - Database encryption is meant for securing the database while it is not in use (stolen laptop and so on). It is not meant for cases where the attacker has access to files while the database is in use. When he has write access, he can for example replace pieces of files with pieces of older versions and manipulate data like this. - -@advanced_1332_p - File encryption slows down the performance of the database engine. Compared to unencrypted mode, database operations take about 2.5 times longer using AES (embedded mode). - -@advanced_1333_h3 -Wrong Password / User Name Delay - -@advanced_1334_p - To protect against remote brute force password attacks, the delay after each unsuccessful login gets double as long. Use the system properties h2.delayWrongPasswordMin and h2.delayWrongPasswordMax to change the minimum (the default is 250 milliseconds) or maximum delay (the default is 4000 milliseconds, or 4 seconds). The delay only applies for those using the wrong password. Normally there is no delay for a user that knows the correct password, with one exception: after using the wrong password, there is a delay of up to (randomly distributed) the same delay as for a wrong password. This is to protect against parallel brute force attacks, so that an attacker needs to wait for the whole delay. Delays are synchronized. This is also required to protect against parallel attacks. - -@advanced_1335_p - There is only one exception message for both wrong user and for wrong password, to make it harder to get the list of user names. It is not possible from the stack trace to see if the user name was wrong or the password. - -@advanced_1336_h3 -HTTPS Connections - -@advanced_1337_p - The web server supports HTTP and HTTPS connections using SSLServerSocket. There is a default self-certified certificate to support an easy starting point, but custom certificates are supported as well. - -@advanced_1338_h2 -TLS Connections - -@advanced_1339_p - Remote TLS connections are supported using the Java Secure Socket Extension (SSLServerSocket, SSLSocket). By default, anonymous TLS is enabled. - -@advanced_1340_p - To use your own keystore, set the system properties javax.net.ssl.keyStore and javax.net.ssl.keyStorePassword before starting the H2 server and client. See also Customizing the Default Key and Trust Stores, Store Types, and Store Passwords for more information. - -@advanced_1341_p - To disable anonymous TLS, set the system property h2.enableAnonymousTLS to false. - -@advanced_1342_h2 -Universally Unique Identifiers (UUID) - -@advanced_1343_p - This database supports UUIDs. Also supported is a function to create new UUIDs using a cryptographically strong pseudo random number generator. With random UUIDs, the chance of two having the same value can be calculated using the probability theory. See also 'Birthday Paradox'. Standardized randomly generated UUIDs have 122 random bits. 4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz). This database supports generating such UUIDs using the built-in function RANDOM_UUID(). Here is a small program to estimate the probability of having two identical UUIDs after generating a number of values: - -@advanced_1344_p - Some values are: - -@advanced_1345_th -Number of UUIs - -@advanced_1346_th -Probability of Duplicates - -@advanced_1347_td -2^36=68'719'476'736 - -@advanced_1348_td -0.000'000'000'000'000'4 - -@advanced_1349_td -2^41=2'199'023'255'552 - -@advanced_1350_td -0.000'000'000'000'4 - -@advanced_1351_td -2^46=70'368'744'177'664 - -@advanced_1352_td -0.000'000'000'4 - -@advanced_1353_p - To help non-mathematicians understand what those numbers mean, here a comparison: one's annual risk of being hit by a meteorite is estimated to be one chance in 17 billion, that means the probability is about 0.000'000'000'06. - -@advanced_1354_h2 -Spatial Features - -@advanced_1355_p - H2 supports the geometry data type and spatial indexes if the JTS Topology Suite is in the classpath. To run the H2 Console tool with the JTS tool, you need to download the JTS 1.13 jar file and place it in the h2 bin directory. Then edit the h2.sh file as follows: - -@advanced_1356_p - Here is an example SQL script to create a table with a spatial column and index: - -@advanced_1357_p - To query the table using geometry envelope intersection, use the operation &&, as in PostGIS: - -@advanced_1358_p - You can verify that the spatial index is used using the "explain plan" feature: - -@advanced_1359_p - For persistent databases, the spatial index is stored on disk; for in-memory databases, the index is kept in memory. - -@advanced_1360_h2 -Recursive Queries - -@advanced_1361_p - H2 has experimental support for recursive queries using so called "common table expressions" (CTE). Examples: - -@advanced_1362_p - Limitations: Recursive queries need to be of the type UNION ALL, and the recursion needs to be on the second part of the query. No tables or views with the name of the table expression may exist. Different table expression names need to be used when using multiple distinct table expressions within the same transaction and for the same session. All columns of the table expression are of type VARCHAR, and may need to be cast to the required data type. Views with recursive queries are not supported. Subqueries and INSERT INTO ... FROM with recursive queries are not supported. Parameters are only supported within the last SELECT statement (a workaround is to use session variables like @start within the table expression). The syntax is: - -@advanced_1363_h2 -Settings Read from System Properties - -@advanced_1364_p - Some settings of the database can be set on the command line using -DpropertyName=value. It is usually not required to change those settings manually. The settings are case sensitive. Example: - -@advanced_1365_p - The current value of the settings can be read in the table INFORMATION_SCHEMA.SETTINGS. - -@advanced_1366_p - For a complete list of settings, see SysProperties. - -@advanced_1367_h2 -Setting the Server Bind Address - -@advanced_1368_p - Usually server sockets accept connections on any/all local addresses. This may be a problem on multi-homed hosts. To bind only to one address, use the system property h2.bindAddress. This setting is used for both regular server sockets and for TLS server sockets. IPv4 and IPv6 address formats are supported. - -@advanced_1369_h2 -Pluggable File System - -@advanced_1370_p - This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. The following file systems are included: - -@advanced_1371_code -zip: - -@advanced_1372_li - read-only zip-file based file system. Format: zip:/zipFileName!/fileName. - -@advanced_1373_code -split: - -@advanced_1374_li - file system that splits files in 1 GB files (stackable with other file systems). - -@advanced_1375_code -nio: - -@advanced_1376_li - file system that uses FileChannel instead of RandomAccessFile (faster in some operating systems). - -@advanced_1377_code -nioMapped: - -@advanced_1378_li - file system that uses memory mapped files (faster in some operating systems). Please note that there currently is a file size limitation of 2 GB when using this file system when using a 32-bit JVM. To work around this limitation, combine it with the split file system: split:nioMapped:test. - -@advanced_1379_code -memFS: - -@advanced_1380_li - in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself). - -@advanced_1381_code -memLZF: - -@advanced_1382_li - compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself). - -@advanced_1383_p - As an example, to use the the nio file system, use the following database URL: jdbc:h2:nio:~/test. - -@advanced_1384_p - To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase, and call the method FilePath.register before using it. - -@advanced_1385_p - For input streams (but not for random access files), URLs may be used in addition to the registered file systems. Example: jar:file:///c:/temp/example.zip!/org/example/nested.csv. To read a stream from the classpath, use the prefix classpath:, as in classpath:/org/h2/samples/newsfeed.sql. - -@advanced_1386_h2 -Split File System - -@advanced_1387_p - The file system prefix split: is used to split logical files into multiple physical files, for example so that a database can get larger than the maximum file system size of the operating system. If the logical file is larger than the maximum file size, then the file is split as follows: - -@advanced_1388_code -<fileName> - -@advanced_1389_li - (first block, is always created) - -@advanced_1390_code -<fileName>.1.part - -@advanced_1391_li - (second block) - -@advanced_1392_p - More physical files (*.2.part, *.3.part) are automatically created / deleted if needed. The maximum physical file size of a block is 2^30 bytes, which is also called 1 GiB or 1 GB. However this can be changed if required, by specifying the block size in the file name. The file name format is: split:<x>:<fileName> where the file size per block is 2^x. For 1 MiB block sizes, use x = 20 (because 2^20 is 1 MiB). The following file name means the logical file is split into 1 MiB blocks: split:20:test.h2.db. An example database URL for this case is jdbc:h2:split:20:~/test. - -@advanced_1393_h2 -Database Upgrade - -@advanced_1394_p - In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. To automatically convert databases to the new file store, it is necessary to include an additional jar file. The file can be found at http://h2database.com/h2mig_pagestore_addon.jar . If this file is in the classpath, every connect to an older database will result in a conversion process. - -@advanced_1395_p - The conversion itself is done internally via 'script to' and 'runscript from'. After the conversion process, the files will be renamed from - -@advanced_1396_code -dbName.data.db - -@advanced_1397_li - to dbName.data.db.backup - -@advanced_1398_code -dbName.index.db - -@advanced_1399_li - to dbName.index.db.backup - -@advanced_1400_p - by default. Also, the temporary script will be written to the database directory instead of a temporary directory. Both defaults can be customized via - -@advanced_1401_code -org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean) - -@advanced_1402_code -org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean) - -@advanced_1403_p - prior opening a database connection. - -@advanced_1404_p - Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. The automatic upgrade .jar file must be present, and the URL must start with jdbc:h2v1_1: (the JDBC driver class is org.h2.upgrade.v1_1.Driver). If the database should automatically connect using the old version if a database with the old format exists (without upgrade), and use the new version otherwise, then append ;NO_UPGRADE=TRUE to the database URL. Please note the old driver did not process the system property "h2.baseDir" correctly, so that using this setting is not supported when upgrading. - -@advanced_1405_h2 -Java Objects Serialization - -@advanced_1406_p - Java objects serialization is enabled by default for columns of type OTHER, using standard Java serialization/deserialization semantics. - -@advanced_1407_p - To disable this feature set the system property h2.serializeJavaObject=false (default: true). - -@advanced_1408_p - Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation: - -@advanced_1409_li - At system level set the system property h2.javaObjectSerializer with the Fully Qualified Name of the JavaObjectSerializer interface implementation. It will be used over the entire JVM session to (de)serialize java objects being stored in column of type OTHER. Example h2.javaObjectSerializer=com.acme.SerializerClassName. - -@advanced_1410_li - At database level execute the SQL statement SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName' or append ;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName' to the database URL: jdbc:h2:~/test;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'. - -@advanced_1411_p - Please note that this SQL statement can only be executed before any tables are defined. - -@advanced_1412_h2 -Limits and Limitations - -@advanced_1413_p - This database has the following known limitations: - -@advanced_1414_li -Database file size limit: 4 TB (using the default page size of 2 KB) or higher (when using a larger page size). This limit is including CLOB and BLOB data. - -@advanced_1415_li -The maximum file size for FAT or FAT32 file systems is 4 GB. That means when using FAT or FAT32, the limit is 4 GB for the data. This is the limitation of the file system. The database does provide a workaround for this problem, it is to use the file name prefix split:. In that case files are split into files of 1 GB by default. An example database URL is: jdbc:h2:split:~/test. - -@advanced_1416_li -The maximum number of rows per table is 2^64. - -@advanced_1417_li -The maximum number of open transactions is 65535. - -@advanced_1418_li -Main memory requirements: The larger the database, the more main memory is required. With the current storage mechanism (the page store), the minimum main memory required is around 1 MB for each 8 GB database file size. - -@advanced_1419_li -Limit on the complexity of SQL statements. Statements of the following form will result in a stack overflow exception: - -@advanced_1420_li -There is no limit for the following entities, except the memory and storage capacity: maximum identifier length (table name, column name, and so on); maximum number of tables, columns, indexes, triggers, and other database objects; maximum statement length, number of parameters per statement, tables per statement, expressions in order by, group by, having, and so on; maximum rows per query; maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; maximum row length, index row length, select row length; maximum length of a varchar column, decimal column, literal in a statement. - -@advanced_1421_li -Querying from the metadata tables is slow if there are many tables (thousands). - -@advanced_1422_li -For limitations on data types, see the documentation of the respective Java data type or the data type documentation of this database. - -@advanced_1423_h2 -Glossary and Links - -@advanced_1424_th -Term - -@advanced_1425_th -Description - -@advanced_1426_td -AES-128 - -@advanced_1427_td -A block encryption algorithm. See also: Wikipedia: AES - -@advanced_1428_td -Birthday Paradox - -@advanced_1429_td -Describes the higher than expected probability that two persons in a room have the same birthday. Also valid for randomly generated UUIDs. See also: Wikipedia: Birthday Paradox - -@advanced_1430_td -Digest - -@advanced_1431_td -Protocol to protect a password (but not to protect data). See also: RFC 2617: HTTP Digest Access Authentication - -@advanced_1432_td -GCJ - -@advanced_1433_td -Compiler for Java. GNU Compiler for the Java and NativeJ (commercial) - -@advanced_1434_td -HTTPS - -@advanced_1435_td -A protocol to provide security to HTTP connections. See also: RFC 2818: HTTP Over TLS - -@advanced_1436_td -Modes of Operation - -@advanced_1437_a -Wikipedia: Block cipher modes of operation - -@advanced_1438_td -Salt - -@advanced_1439_td -Random number to increase the security of passwords. See also: Wikipedia: Key derivation function - -@advanced_1440_td -SHA-256 - -@advanced_1441_td -A cryptographic one-way hash function. See also: Wikipedia: SHA hash functions - -@advanced_1442_td -SQL Injection - -@advanced_1443_td -A security vulnerability where an application embeds SQL statements or expressions in user input. See also: Wikipedia: SQL Injection - -@advanced_1444_td -Watermark Attack - -@advanced_1445_td -Security problem of certain encryption programs where the existence of certain data can be proven without decrypting. For more information, search in the internet for 'watermark attack cryptoloop' - -@advanced_1446_td -SSL/TLS - -@advanced_1447_td -Secure Sockets Layer / Transport Layer Security. See also: Java Secure Socket Extension (JSSE) - -@architecture_1000_h1 -Architecture - -@architecture_1001_a - Introduction - -@architecture_1002_a - Top-down overview - -@architecture_1003_a - JDBC driver - -@architecture_1004_a - Connection/session management - -@architecture_1005_a - Command execution and planning - -@architecture_1006_a - Table/index/constraints - -@architecture_1007_a - Undo log, redo log, and transactions layer - -@architecture_1008_a - B-tree engine and page-based storage allocation - -@architecture_1009_a - Filesystem abstraction - -@architecture_1010_h2 -Introduction - -@architecture_1011_p - H2 implements an embedded and standalone ANSI-SQL89 compliant SQL engine on top of a B-tree based disk store. - -@architecture_1012_p - As of October 2013, Thomas is still working on our next-generation storage engine called MVStore. This will in time replace the B-tree based storage engine. - -@architecture_1013_h2 -Top-down Overview - -@architecture_1014_p - Working from the top down, the layers look like this: - -@architecture_1015_li -JDBC driver. - -@architecture_1016_li -Connection/session management. - -@architecture_1017_li -SQL Parser. - -@architecture_1018_li -Command execution and planning. - -@architecture_1019_li -Table/Index/Constraints. - -@architecture_1020_li -Undo log, redo log, and transactions layer. - -@architecture_1021_li -B-tree engine and page-based storage allocation. - -@architecture_1022_li -Filesystem abstraction. - -@architecture_1023_h2 -JDBC Driver - -@architecture_1024_p - The JDBC driver implementation lives in org.h2.jdbc, org.h2.jdbcx - -@architecture_1025_h2 -Connection/session management - -@architecture_1026_p - The primary classes of interest are: - -@architecture_1027_th -Package - -@architecture_1028_th -Description - -@architecture_1029_td -org.h2.engine.Database - -@architecture_1030_td -the root/global class - -@architecture_1031_td -org.h2.engine.SessionInterface - -@architecture_1032_td -abstracts over the differences between embedded and remote sessions - -@architecture_1033_td -org.h2.engine.Session - -@architecture_1034_td -local/embedded session - -@architecture_1035_td -org.h2.engine.SessionRemote - -@architecture_1036_td -remote session - -@architecture_1037_h2 -Parser - -@architecture_1038_p - The parser lives in org.h2.command.Parser. It uses a straightforward recursive-descent design. - -@architecture_1039_p - See Wikipedia Recursive-descent parser page. - -@architecture_1040_h2 -Command execution and planning - -@architecture_1041_p - Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. The primary packages of interest are: - -@architecture_1042_th -Package - -@architecture_1043_th -Description - -@architecture_1044_td -org.h2.command.ddl - -@architecture_1045_td -Commands that modify schema data structures - -@architecture_1046_td -org.h2.command.dml - -@architecture_1047_td -Commands that modify data - -@architecture_1048_h2 -Table/Index/Constraints - -@architecture_1049_p - One thing to note here is that indexes are simply stored as special kinds of tables. - -@architecture_1050_p - The primary packages of interest are: - -@architecture_1051_th -Package - -@architecture_1052_th -Description - -@architecture_1053_td -org.h2.table - -@architecture_1054_td -Implementations of different kinds of tables - -@architecture_1055_td -org.h2.index - -@architecture_1056_td -Implementations of different kinds of indices - -@architecture_1057_h2 -Undo log, redo log, and transactions layer - -@architecture_1058_p - We have a transaction log, which is shared among all sessions. See also http://en.wikipedia.org/wiki/Transaction_log http://h2database.com/html/grammar.html#set_log - -@architecture_1059_p - We also have an undo log, which is per session, to undo an operation (an update that fails for example) and to rollback a transaction. Theoretically, the transaction log could be used, but for simplicity, H2 currently uses it's own "list of operations" (usually in-memory). - -@architecture_1060_p - With the MVStore, this is no longer needed (just the transaction log). - -@architecture_1061_h2 -B-tree engine and page-based storage allocation. - -@architecture_1062_p - The primary package of interest is org.h2.store. - -@architecture_1063_p - This implements a storage mechanism which allocates pages of storage (typically 2k in size) and also implements a b-tree over those pages to allow fast retrieval and update. - -@architecture_1064_h2 -Filesystem abstraction. - -@architecture_1065_p - The primary class of interest is org.h2.store.FileStore. - -@architecture_1066_p - This implements an abstraction of a random-access file. This allows the higher layers to treat in-memory vs. on-disk vs. zip-file databases the same. - -@build_1000_h1 -Build - -@build_1001_a - Portability - -@build_1002_a - Environment - -@build_1003_a - Building the Software - -@build_1004_a - Build Targets - -@build_1005_a - Using Maven 2 - -@build_1006_a - Using Eclipse - -@build_1007_a - Translating - -@build_1008_a - Providing Patches - -@build_1009_a - Reporting Problems or Requests - -@build_1010_a - Automated Build - -@build_1011_a - Generating Railroad Diagrams - -@build_1012_h2 -Portability - -@build_1013_p - This database is written in Java and therefore works on many platforms. It can also be compiled to a native executable using GCJ. - -@build_1014_h2 -Environment - -@build_1015_p - To run this database, a Java Runtime Environment (JRE) version 1.6 or higher is required. - -@build_1016_p - To create the database executables, the following software stack was used. To use this database, it is not required to install this software however. - -@build_1017_li -Mac OS X and Windows - -@build_1018_a -Sun JDK Version 1.6 and 1.7 - -@build_1019_a -Eclipse - -@build_1020_li -Eclipse Plugins: Subclipse, Eclipse Checkstyle Plug-in, EclEmma Java Code Coverage - -@build_1021_a -Emma Java Code Coverage - -@build_1022_a -Mozilla Firefox - -@build_1023_a -OpenOffice - -@build_1024_a -NSIS - -@build_1025_li - (Nullsoft Scriptable Install System) - -@build_1026_a -Maven - -@build_1027_h2 -Building the Software - -@build_1028_p - You need to install a JDK, for example the Sun JDK version 1.6 or 1.7. Ensure that Java binary directory is included in the PATH environment variable, and that the environment variable JAVA_HOME points to your Java installation. On the command line, go to the directory h2 and execute the following command: - -@build_1029_p - For Linux and OS X, use ./build.sh instead of build. - -@build_1030_p - You will get a list of targets. If you want to build the jar file, execute (Windows): - -@build_1031_p - To run the build tool in shell mode, use the command line option - as in ./build.sh -. - -@build_1032_h3 -Switching the Source Code - -@build_1033_p - The source code uses Java 1.6 features. To switch the source code to the installed version of Java, run: - -@build_1034_h2 -Build Targets - -@build_1035_p - The build system can generate smaller jar files as well. The following targets are currently supported: - -@build_1036_code -jarClient - -@build_1037_li - creates the file h2client.jar. This only contains the JDBC client. - -@build_1038_code -jarSmall - -@build_1039_li - creates the file h2small.jar. This only contains the embedded database. Debug information is disabled. - -@build_1040_code -jarJaqu - -@build_1041_li - creates the file h2jaqu.jar. This only contains the JaQu (Java Query) implementation. All other jar files do not include JaQu. - -@build_1042_code -javadocImpl - -@build_1043_li - creates the Javadocs of the implementation. - -@build_1044_p - To create the file h2client.jar, go to the directory h2 and execute the following command: - -@build_1045_h3 -Using Lucene 2 / 3 - -@build_1046_p - Both Apache Lucene 2 and Lucene 3 are supported. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. To use a different version of Lucene when compiling, it needs to be specified as follows: - -@build_1047_h2 -Using Maven 2 - -@build_1048_h3 -Using a Central Repository - -@build_1049_p - You can include the database in your Maven 2 project as a dependency. Example: - -@build_1050_p - New versions of this database are first uploaded to http://hsql.sourceforge.net/m2-repo/ and then automatically synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there. - -@build_1051_h3 -Maven Plugin to Start and Stop the TCP Server - -@build_1052_p - A Maven plugin to start and stop the H2 TCP server is available from Laird Nelson at GitHub. To start the H2 server, use: - -@build_1053_p - To stop the H2 server, use: - -@build_1054_h3 -Using Snapshot Version - -@build_1055_p - To build a h2-*-SNAPSHOT.jar file and upload it the to the local Maven 2 repository, execute the following command: - -@build_1056_p - Afterwards, you can include the database in your Maven 2 project as a dependency: - -@build_1057_h2 -Using Eclipse - -@build_1058_p - To create an Eclipse project for H2, use the following steps: - -@build_1059_li -Install Subversion and Eclipse. - -@build_1060_li -Get the H2 source code from the Subversion repository: - -@build_1061_code -svn checkout http://h2database.googlecode.com/svn/trunk h2database-read-only - -@build_1062_li -Download all dependencies (Windows): - -@build_1063_code -build.bat download - -@build_1064_li -In Eclipse, create a new Java project from existing source code: File, New, Project, Java Project, Create project from existing source. - -@build_1065_li -Select the h2 folder, click Next and Finish. - -@build_1066_li -To resolve com.sun.javadoc import statements, you may need to manually add the file <java.home>/../lib/tools.jar to the build path. - -@build_1067_h2 -Translating - -@build_1068_p - The translation of this software is split into the following parts: - -@build_1069_li -H2 Console: src/main/org/h2/server/web/res/_text_*.prop - -@build_1070_li -Error messages: src/main/org/h2/res/_messages_*.prop - -@build_1071_p - To translate the H2 Console, start it and select Preferences / Translate. After you are done, send the translated *.prop file to the Google Group. The web site is currently translated using Google. - -@build_1072_h2 -Providing Patches - -@build_1073_p - If you like to provide patches, please consider the following guidelines to simplify merging them: - -@build_1074_li -Only use Java 6 features (do not use Java 7) (see Environment). - -@build_1075_li -Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml. - -@build_1076_li -A template of the Eclipse settings are in src/installer/eclipse.settings/*. If you want to use them, you need to copy them to the .settings directory. The formatting options (eclipseCodeStyle) are also included. - -@build_1077_li -Please provide test cases and integrate them into the test suite. For Java level tests, see src/test/org/h2/test/TestAll.java. For SQL level tests, see src/test/org/h2/test/test.in.txt or testSimple.in.txt. - -@build_1078_li -The test cases should cover at least 90% of the changed and new code; use a code coverage tool to verify that (see above). or use the build target coverage. - -@build_1079_li -Verify that you did not break other features: run the test cases by executing build test. - -@build_1080_li -Provide end user documentation if required (src/docsrc/html/*). - -@build_1081_li -Document grammar changes in src/docsrc/help/help.csv - -@build_1082_li -Provide a change log entry (src/docsrc/html/changelog.html). - -@build_1083_li -Verify the spelling using build spellcheck. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt. - -@build_1084_li -Run src/installer/buildRelease to find and fix formatting errors. - -@build_1085_li -Verify the formatting using build docs and build javadoc. - -@build_1086_li -Submit patches as .patch files (compressed if big). To create a patch using Eclipse, use Team / Create Patch. - -@build_1087_p - For legal reasons, patches need to be public in the form of an email to the group, or in the form of an issue report or attachment. Significant contributions need to include the following statement: - -@build_1088_p - "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 (http://h2database.com/html/license.html)." - -@build_1089_h2 -Reporting Problems or Requests - -@build_1090_p - Please consider the following checklist if you have a question, want to report a problem, or if you have a feature request: - -@build_1091_li -For bug reports, please provide a short, self contained, correct (compilable), example of the problem. - -@build_1092_li -Feature requests are always welcome, even if the feature is already on the roadmap. Your mail will help prioritize feature requests. If you urgently need a feature, consider providing a patch. - -@build_1093_li -Before posting problems, check the FAQ and do a Google search. - -@build_1094_li -When got an unexpected exception, please try the Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s). - -@build_1095_li -When sending source code, please use a public web clipboard such as Pastebin, Cl1p, or Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. As a template, use: HelloWorld.java. Method that simply call other methods should be avoided, as well as unnecessary exception handling. Please use the JDBC API and no external tools or libraries. The test should include all required initialization code, and should be started with the main method. - -@build_1096_li -For large attachments, use a public temporary storage such as Rapidshare. - -@build_1097_li -Google Group versus issue tracking: Use the Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an issue, but you don't need to (sending an email to the group is enough). Please note that only few people monitor the issue tracking system. - -@build_1098_li -For out-of-memory problems, please analyze the problem yourself first, for example using the command line option -XX:+HeapDumpOnOutOfMemoryError (to create a heap dump file on out of memory) and a memory analysis tool such as the Eclipse Memory Analyzer (MAT). - -@build_1099_li -It may take a few days to get an answers. Please do not double post. - -@build_1100_h2 -Automated Build - -@build_1101_p - This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line ./build.sh clean jar coverage -Dh2.ftpPassword=... uploadBuild. The last results are available here: - -@build_1102_a -Test Output - -@build_1103_a -Code Coverage Summary - -@build_1104_a -Code Coverage Details (download, 1.3 MB) - -@build_1105_a -Build Newsfeed - -@build_1106_a -Latest Jar File (download, 1 MB) - -@build_1107_h2 -Generating Railroad Diagrams - -@build_1108_p - The railroad diagrams of the SQL grammar are HTML, formatted as nested tables. The diagrams are generated as follows: - -@build_1109_li -The BNF parser (org.h2.bnf.Bnf) reads and parses the BNF from the file help.csv. - -@build_1110_li -The page parser (org.h2.server.web.PageParser) reads the template HTML file and fills in the diagrams. - -@build_1111_li -The rail images (one straight, four junctions, two turns) are generated using a simple Java application. - -@build_1112_p - To generate railroad diagrams for other grammars, see the package org.h2.jcr. This package is used to generate the SQL-2 railroad diagrams for the JCR 2.0 specification. - -@changelog_1000_h1 -Change Log - -@changelog_1001_h2 -Next Version (unreleased) - -@changelog_1002_li -- - -@changelog_1003_h2 -Version 1.4.187 Beta (2015-04-10) - -@changelog_1004_li -MVStore: concurrent changes to the same row could result in the exception "The transaction log might be corrupt for key ...". This could only be reproduced with 3 or more threads. - -@changelog_1005_li -Results with CLOB or BLOB data are no longer reused. - -@changelog_1006_li -References to BLOB and CLOB objects now have a timeout. The configuration setting is LOB_TIMEOUT (default 5 minutes). This should avoid growing the database file if there are many queries that return BLOB or CLOB objects, and the database is not closed for a longer time. - -@changelog_1007_li -MVStore: when committing a session that removed LOB values, changes were flushed unnecessarily. - -@changelog_1008_li -Issue 610: possible integer overflow in WriteBuffer.grow(). - -@changelog_1009_li -Issue 609: the spatial index did not support NULL (ClassCastException). - -@changelog_1010_li -MVStore: in some cases, CLOB/BLOB data blocks were removed incorrectly when opening a database. - -@changelog_1011_li -MVStore: updates that affected many rows were were slow in some cases if there was a secondary index. - -@changelog_1012_li -Using "runscript" with autocommit disabled could result in a lock timeout on the internal table "SYS". - -@changelog_1013_li -Issue 603: there was a memory leak when using H2 in a web application. Apache Tomcat logged an error message: "The web application ... created a ThreadLocal with key of type [org.h2.util.DateTimeUtils$1]". - -@changelog_1014_li -When using the MVStore, running a SQL script generate by the Recover tool from a PageStore file failed with a strange error message (NullPointerException), now a clear error message is shown. - -@changelog_1015_li -Issue 605: with version 1.4.186, opening a database could result in an endless loop in LobStorageMap.init. - -@changelog_1016_li -Queries that use the same table alias multiple times now work. Before, the select expression list was expanded incorrectly. Example: "select * from a as x, b as x". - -@changelog_1017_li -The MySQL compatibility feature "insert ... on duplicate key update" did not work with a non-default schema. - -@changelog_1018_li -Issue 599: the condition "in(x, y)" could not be used in the select list when using "group by". - -@changelog_1019_li -The LIRS cache could grow larger than the allocated memory. - -@changelog_1020_li -A new file system implementation that re-opens the file if it was closed due to the application calling Thread.interrupt(). File name prefix "retry:". Please note it is strongly recommended to avoid calling Thread.interrupt; this is a problem for various libraries, including Apache Lucene. - -@changelog_1021_li -MVStore: use RandomAccessFile file system if the file name starts with "file:". - -@changelog_1022_li -Allow DATEADD to take a long value for count when manipulating milliseconds. - -@changelog_1023_li -When using MV_STORE=TRUE and the SET CACHE_SIZE setting, the cache size was incorrectly set, so that it was effectively 1024 times smaller than it should be. - -@changelog_1024_li -Concurrent CREATE TABLE... IF NOT EXISTS in the presence of MULTI_THREAD=TRUE could throw an exception. - -@changelog_1025_li -Fix bug in MVStore when creating lots of temporary tables, where we could run out of transaction IDs. - -@changelog_1026_li -Add support for PostgreSQL STRING_AGG function. Patch by Fred Aquiles. - -@changelog_1027_li -Fix bug in "jdbc:h2:nioMemFS" isRoot() function. Also, the page size was increased to 64 KB. - -@changelog_1028_h2 -Version 1.4.186 Beta (2015-03-02) - -@changelog_1029_li -The Servlet API 3.0.1 is now used, instead of 2.4. - -@changelog_1030_li -MVStore: old chunks no longer removed in append-only mode. - -@changelog_1031_li -MVStore: the cache for page references could grow far too big, resulting in out of memory in some cases. - -@changelog_1032_li -MVStore: orphaned lob objects were not correctly removed in some cases, making the database grow unnecessarily. - -@changelog_1033_li -MVStore: the maximum cache size was artificially limited to 2 GB (due to an integer overflow). - -@changelog_1034_li -MVStore / TransactionStore: concurrent updates could result in a "Too many open transactions" exception. - -@changelog_1035_li -StringUtils.toUpperEnglish now has a small cache. This should speed up reading from a ResultSet when using the column name. - -@changelog_1036_li -MVStore: up to 65535 open transactions are now supported. Previously, the limit was at most 65535 transactions between the oldest open and the newest open transaction (which was quite a strange limit). - -@changelog_1037_li -The default limit for in-place LOB objects was changed from 128 to 256 bytes. This is because each read creates a reference to a LOB, and maintaining the references is a big overhead. With the higher limit, less references are needed. - -@changelog_1038_li -Tables without columns didn't work. (The use case for such tables is testing.) - -@changelog_1039_li -The LIRS cache now resizes the table automatically in all cases and no longer needs the averageMemory configuration. - -@changelog_1040_li -Creating a linked table from an MVStore database to a non-MVStore database created a second (non-MVStore) database file. - -@changelog_1041_li -In version 1.4.184, a bug was introduced that broke queries that have both joins and wildcards, for example: select * from dual join(select x from dual) on 1=1 - -@changelog_1042_li -Issue 598: parser fails on timestamp "24:00:00.1234" - prevent the creation of out-of-range time values. - -@changelog_1043_li -Allow declaring triggers as source code (like functions). Patch by Sylvain Cuaz. - -@changelog_1044_li -Make the planner use indexes for sorting when doing a GROUP BY where all of the GROUP BY columns are not mentioned in the select. Patch by Frederico (zepfred). - -@changelog_1045_li -PostgreSQL compatibility: generate_series (as an alias for system_range). Patch by litailang. - -@changelog_1046_li -Fix missing "column" type in right-hand parameter in ConditionIn. Patch by Arnaud Thimel. - -@changelog_1047_h2 -Version 1.4.185 Beta (2015-01-16) - -@changelog_1048_li -In version 1.4.184, "group by" ignored the table name, and could pick a select column by mistake. Example: select 0 as x from system_range(1, 2) d group by d.x; - -@changelog_1049_li -New connection setting "REUSE_SPACE" (default: true). If disabled, all changes are appended to the database file, and existing content is never overwritten. This allows to rollback to a previous state of the database by truncating the database file. - -@changelog_1050_li -Issue 587: MVStore: concurrent compaction and store operations could result in an IllegalStateException. - -@changelog_1051_li -Issue 594: Profiler.copyInThread does not work properly. - -@changelog_1052_li -Script tool: Now, SCRIPT ... TO is always used (for higher speed and lower disk space usage). - -@changelog_1053_li -Script tool: Fix parsing of BLOCKSIZE parameter, original patch by Ken Jorissen. - -@changelog_1054_li -Fix bug in PageStore#commit method - when the ignoreBigLog flag was set, the logic that cleared the flag could never be reached, resulting in performance degradation. Reported by Alexander Nesterov. - -@changelog_1055_li -Issue 552: Implement BIT_AND and BIT_OR aggregate functions. - -@changelog_1056_h2 -Version 1.4.184 Beta (2014-12-19) - -@changelog_1057_li -In version 1.3.183, indexes were not used if the table contains columns with a default value generated by a sequence. This includes tables with identity and auto-increment columns. This bug was introduced by supporting "rownum" in views and derived tables. - -@changelog_1058_li -MVStore: imported BLOB and CLOB data sometimes disappeared. This was caused by a bug in the ObjectDataType comparison. - -@changelog_1059_li -Reading from a StreamStore now throws an IOException if the underlying data doesn't exist. - -@changelog_1060_li -MVStore: if there is an exception while saving, the store is now in all cases immediately closed. - -@changelog_1061_li -MVStore: the dump tool could go into an endless loop for some files. - -@changelog_1062_li -MVStore: recovery for a database with many CLOB or BLOB entries is now much faster. - -@changelog_1063_li -Group by with a quoted select column name alias didn't work. Example: select 1 "a" from dual group by "a" - -@changelog_1064_li -Auto-server mode: the host name is now stored in the .lock.db file. - -@changelog_1065_h2 -Version 1.4.183 Beta (2014-12-13) - -@changelog_1066_li -MVStore: the default auto-commit buffer size is now about twice as big. This should reduce the database file size after inserting a lot of data. - -@changelog_1067_li -The built-in functions "power" and "radians" now always return a double. - -@changelog_1068_li -Using "row_number" or "rownum" in views or derived tables had unexpected results if the outer query contained constraints for the given view. Example: select b.nr, b.id from (select row_number() over() as nr, a.id as id from (select id from test order by name) as a) as b where b.id = 1 - -@changelog_1069_li -MVStore: the Recover tool can now deal with more types of corruption in the file. - -@changelog_1070_li -MVStore: the TransactionStore now first needs to be initialized before it can be used. - -@changelog_1071_li -Views and derived tables with equality and range conditions on the same columns did not work properly. example: select x from (select x from (select 1 as x) where x > 0 and x < 2) where x = 1 - -@changelog_1072_li -The database URL setting PAGE_SIZE setting is now also used for the MVStore. - -@changelog_1073_li -MVStore: the default page split size for persistent stores is now 4096 (it was 16 KB so far). This should reduce the database file size for most situations (in some cases, less than half the size of the previous version). - -@changelog_1074_li -With query literals disabled, auto-analyze of a table with CLOB or BLOB did not work. - -@changelog_1075_li -MVStore: use a mark and sweep GC algorithm instead of reference counting, to ensure used chunks are never overwrite, even if the reference counting algorithm does not work properly. - -@changelog_1076_li -In the multi-threaded mode, updating the column selectivity ("analyze") in the background sometimes did not work. - -@changelog_1077_li -In the multi-threaded mode, database metadata operations did sometimes not work if the schema was changed at the same time (for example, if tables were dropped). - -@changelog_1078_li -Some CLOB and BLOB values could no longer be read when the original row was removed (even when using the MVCC mode). - -@changelog_1079_li -The MVStoreTool could throw an IllegalArgumentException. - -@changelog_1080_li -Improved performance for some date / time / timestamp conversion operations. Thanks to Sergey Evdokimov for reporting the problem. - -@changelog_1081_li -H2 Console: the built-in web server did not work properly if an unknown file was requested. - -@changelog_1082_li -MVStore: the jar file is renamed to "h2-mvstore-*.jar" and is deployed to Maven separately. - -@changelog_1083_li -MVStore: support for concurrent reads and writes is now enabled by default. - -@changelog_1084_li -Server mode: the transfer buffer size has been changed from 16 KB to 64 KB, after it was found that this improves performance on Linux quite a lot. - -@changelog_1085_li -H2 Console and server mode: SSL is now disabled and TLS is used to protect against the Poodle SSLv3 vulnerability. The system property to disable secure anonymous connections is now "h2.enableAnonymousTLS". The default certificate is still self-signed, so you need to manually install another one if you want to avoid man in the middle attacks. - -@changelog_1086_li -MVStore: the R-tree did not correctly measure the memory usage. - -@changelog_1087_li -MVStore: compacting a store with an R-tree did not always work. - -@changelog_1088_li -Issue 581: When running in LOCK_MODE=0, JdbcDatabaseMetaData#supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED) should return false - -@changelog_1089_li -Fix bug which could generate deadlocks when multiple connections accessed the same table. - -@changelog_1090_li -Some places in the code were not respecting the value set in the "SET MAX_MEMORY_ROWS x" command - -@changelog_1091_li -Fix bug which could generate a NegativeArraySizeException when performing large (>40M) row union operations - -@changelog_1092_li -Fix "USE schema" command for MySQL compatibility, patch by mfulton - -@changelog_1093_li -Parse and ignore the ROW_FORMAT=DYNAMIC MySQL syntax, patch by mfulton - -@changelog_1094_h2 -Version 1.4.182 Beta (2014-10-17) - -@changelog_1095_li -MVStore: improved error messages and logging; improved behavior if there is an error when serializing objects. - -@changelog_1096_li -OSGi: the MVStore packages are now exported. - -@changelog_1097_li -With the MVStore option, when using multiple threads that concurrently create indexes or tables, it was relatively easy to get a lock timeout on the "SYS" table. - -@changelog_1098_li -When using the multi-threaded option, the exception "Unexpected code path" could be thrown, specially if the option "analyze_auto" was set to a low value. - -@changelog_1099_li -In the server mode, when reading from a CLOB or BLOB, if the connection was closed, a NullPointerException could be thrown instead of an exception saying the connection is closed. - -@changelog_1100_li -DatabaseMetaData.getProcedures and getProcedureColumns could throw an exception if a user defined class is not available. - -@changelog_1101_li -Issue 584: the error message for a wrong sequence definition was wrong. - -@changelog_1102_li -CSV tool: the rowSeparator option is no longer supported, as the same can be achieved with the lineSeparator. - -@changelog_1103_li -Descending indexes on MVStore tables did not work properly. - -@changelog_1104_li -Issue 579: Conditions on the "_rowid_" pseudo-column didn't use an index when using the MVStore. - -@changelog_1105_li -Fixed documentation that "offset" and "fetch" are also keywords since version 1.4.x. - -@changelog_1106_li -The Long.MIN_VALUE could not be parsed for auto-increment (identity) columns. - -@changelog_1107_li -Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in other JDBC classes. - -@changelog_1108_li -Issue 572: MySQL compatibility for "order by" in update statements. - -@changelog_1109_li -The change in JDBC escape processing in version 1.4.181 affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax "{t 'time}", or "{ts 'timestamp'}", or "{d 'data'}", then both the client and the server need to be upgraded to version 1.4.181 or later. - -@changelog_1110_h2 -Version 1.4.181 Beta (2014-08-06) - -@changelog_1111_li -Improved MySQL compatibility by supporting "use schema". Thanks a lot to Karl Pietrzak for the patch! - -@changelog_1112_li -Writing to the trace file is now faster, specially with the debug level. - -@changelog_1113_li -The database option "defrag_always=true" did not work with the MVStore. - -@changelog_1114_li -The JDBC escape syntax {ts 'value'} did not interpret the value as a timestamp. The same for {d 'value'} (for date) and {t 'value'} (for time). Thanks to Lukas Eder for reporting the issue. The following problem was detected after version 1.4.181 was released: The change in JDBC escape processing affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax {t 'time'}, or {ts 'timestamp'}, or {d 'date'}, then both the client and the server need to be upgraded to version 1.4.181 or later. - -@changelog_1115_li -File system abstraction: support replacing existing files using move (currently not for Windows). - -@changelog_1116_li -The statement "shutdown defrag" now compresses the database (with the MVStore). This command can greatly reduce the file size, and is relatively fast, but is not incremental. - -@changelog_1117_li -The MVStore now automatically compacts the store in the background if there is no read or write activity, which should (after some time; sometimes about one minute) reduce the file size. This is still work in progress, feedback is welcome! - -@changelog_1118_li -Change default value of PAGE_SIZE from 2048 to 4096 to more closely match most file systems block size (PageStore only; the MVStore already used 4096). - -@changelog_1119_li -Auto-scale MAX_MEMORY_ROWS and CACHE_SIZE settings by the amount of available RAM. Gives a better out of box experience for people with more powerful machines. - -@changelog_1120_li -Handle tabs like 4 spaces in web console, patch by Martin Grajcar. - -@changelog_1121_li -Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in JdbcConnection.java, patch by BigMichi1. - -@changelog_1122_h2 -Version 1.4.180 Beta (2014-07-13) - -@changelog_1123_li -MVStore: the store is now auto-compacted automatically up to some point, to avoid very large file sizes. This area is still work in progress. - -@changelog_1124_li -Sequences of temporary tables (auto-increment or identity columns) were persisted unnecessarily in the database file, and were not removed when re-opening the database. - -@changelog_1125_li -MVStore: an IndexOutOfBoundsException could sometimes occur MVMap.openVersion when concurrently accessing the store. - -@changelog_1126_li -The LIRS cache now re-sizes the internal hash map if needed. - -@changelog_1127_li -Optionally persist session history in the H2 console. (patch from Martin Grajcar) - -@changelog_1128_li -Add client-info property to get the number of servers currently in the cluster and which servers that are available. (patch from Nikolaj Fogh) - -@changelog_1129_li -Fix bug in changing encrypted DB password that kept the file handle open when the wrong password was supplied. (test case from Jens Hohmuth). - -@changelog_1130_li -Issue 567: H2 hangs for a long time then (sometimes) recovers. Introduce a queue when doing table locking to prevent session starvation. - -@changelog_1131_h2 -Version 1.4.179 Beta (2014-06-23) - -@changelog_1132_li -The license was changed to MPL 2.0 (from 1.0) and EPL 1.0. - -@changelog_1133_li -Issue 565: MVStore: concurrently adding LOB objects (with MULTI_THREADED option) resulted in a NullPointerException. - -@changelog_1134_li -MVStore: reduced dependencies to other H2 classes. - -@changelog_1135_li -There was a way to prevent a database from being re-opened, by creating a column constraint that references a table with a higher id, for example with "check" constraints that contains queries. This is now detected, and creating the table is prohibited. In future versions of H2, most likely creating references to other tables will no longer be supported because of such problems. - -@changelog_1136_li -MVStore: descending indexes with "nulls first" did not work as expected (null was ordered last). - -@changelog_1137_li -Large result sets now always create temporary tables instead of temporary files. - -@changelog_1138_li -When using the PageStore, opening a database failed in some cases with a NullPointerException if temporary tables were used (explicitly, or implicitly when using large result sets). - -@changelog_1139_li -If a database file in the PageStore file format exists, this file and this mode is now used, even if the database URL does not contain "MV_STORE=FALSE". If a MVStore file exists, it is used. - -@changelog_1140_li -Databases created with version 1.3.175 and earlier that contained foreign keys in combination with multi-column indexes could not be opened in some cases. This was due to a bugfix in version 1.3.176: Referential integrity constraints sometimes used the wrong index. - -@changelog_1141_li -MVStore: the ObjectDataType comparison method was incorrect if one key was Serializable and the other was of a common class. - -@changelog_1142_li -Recursive queries with many result rows (more than the setting "max_memory_rows") did not work correctly. - -@changelog_1143_li -The license has changed to MPL 2.0 + EPL 1.0. - -@changelog_1144_li -MVStore: temporary tables from result sets could survive re-opening a database, which could result in a ClassCastException. - -@changelog_1145_li -Issue 566: MVStore: unique indexes that were created later on did not work correctly if there were over 5000 rows in the table. Existing databases need to be re-created (at least the broken index need to be re-built). - -@changelog_1146_li -MVStore: creating secondary indexes on large tables results in missing rows in the index. - -@changelog_1147_li -Metadata: the password of linked tables is now only visible for admin users. - -@changelog_1148_li -For Windows, database URLs of the form "jdbc:h2:/test" where considered relative and did not work unless the system property "h2.implicitRelativePath" was used. - -@changelog_1149_li -Windows: using a base directory of "C:/" and similar did not work as expected. - -@changelog_1150_li -Follow JDBC specification on Procedures MetaData, use P0 as return type of procedure. - -@changelog_1151_li -Issue 531: IDENTITY ignored for added column. - -@changelog_1152_li -FileSystem: improve exception throwing compatibility with JDK - -@changelog_1153_li -Spatial Index: adjust costs so we do not use the spatial index if the query does not contain an intersects operator. - -@changelog_1154_li -Fix multi-threaded deadlock when using a View that includes a TableFunction. - -@changelog_1155_li -Fix bug in dividing very-small BigDecimal numbers. - -@changelog_1156_h2 -Version 1.4.178 Beta (2014-05-02) - -@changelog_1157_li -Issue 559: Make dependency on org.osgi.service.jdbc optional. - -@changelog_1158_li -Improve error message when the user specifies an unsupported combination of database settings. - -@changelog_1159_li -MVStore: in the multi-threaded mode, NullPointerException and other exceptions could occur. - -@changelog_1160_li -MVStore: some database file could not be compacted due to a bug in the bookkeeping of the fill rate. Also, database file were compacted quite slowly. This has been improved; but more changes in this area are expected. - -@changelog_1161_li -MVStore: support for volatile maps (that don't store changes). - -@changelog_1162_li -MVStore mode: in-memory databases now also use the MVStore. - -@changelog_1163_li -In server mode, appending ";autocommit=false" to the database URL was working, but the return value of Connection.getAutoCommit() was wrong. - -@changelog_1164_li -Issue 561: OSGi: the import package declaration of org.h2 excluded version 1.4. - -@changelog_1165_li -Issue 558: with the MVStore, a NullPointerException could occur when using LOBs at session commit (LobStorageMap.removeLob). - -@changelog_1166_li -Remove the "h2.MAX_MEMORY_ROWS_DISTINCT" system property to reduce confusion. We already have the MAX_MEMORY_ROWS setting which does a very similar thing, and is better documented. - -@changelog_1167_li -Issue 554: Web Console in an IFrame was not fully supported. - -@changelog_1168_h2 -Version 1.4.177 Beta (2014-04-12) - -@changelog_1169_li -By default, the MV_STORE option is enabled, so it is using the new MVStore storage. The MVCC setting is by default set to the same values as the MV_STORE setting, so it is also enabled by default. For testing, both settings can be disabled by appending ";MV_STORE=FALSE" and/or ";MVCC=FALSE" to the database URL. - -@changelog_1170_li -The file locking method 'serialized' is no longer supported. This mode might return in a future version, however this is not clear right now. A new implementation and new tests would be needed. - -@changelog_1171_li -Enable the new storage format for dates (system property "h2.storeLocalTime"). For the MVStore mode, this is always enabled, but with version 1.4 this is even enabled in the PageStore mode. - -@changelog_1172_li -Implicit relative paths are disabled (system property "h2.implicitRelativePath"), so that the database URL jdbc:h2:test now needs to be written as jdbc:h2:./test. - -@changelog_1173_li -"select ... fetch first 1 row only" is supported with the regular mode. This was disabled so far because "fetch" and "offset" are now keywords. See also Mode.supportOffsetFetch. - -@changelog_1174_li -Byte arrays are now sorted in unsigned mode (x'99' is larger than x'09'). (System property "h2.sortBinaryUnsigned", Mode.binaryUnsigned, setting "binary_collation"). - -@changelog_1175_li -Csv.getInstance will be removed in future versions of 1.4. Use the public constructor instead. - -@changelog_1176_li -Remove support for the limited old-style outer join syntax using "(+)". Use "outer join" instead. System property "h2.oldStyleOuterJoin". - -@changelog_1177_li -Support the data type "DATETIME2" as an alias for "DATETIME", for MS SQL Server compatibility. - -@changelog_1178_li -Add Oracle-compatible TRANSLATE function, patch by Eric Chatellier. - -@changelog_1179_h2 -Version 1.3.176 (2014-04-05) - -@changelog_1180_li -The file locking method 'serialized' is no longer documented, as it will not be available in version 1.4. - -@changelog_1181_li -The static method Csv.getInstance() was removed. Use the public constructor instead. - -@changelog_1182_li -The default user name for the Script, RunScript, Shell, and CreateCluster tools are no longer "sa" but an empty string. - -@changelog_1183_li -The stack trace of the exception "The object is already closed" is no longer logged by default. - -@changelog_1184_li -If a value of a result set was itself a result set, the result could only be read once. - -@changelog_1185_li -Column constraints are also visible in views (patch from Nicolas Fortin for H2GIS). - -@changelog_1186_li -Granting a additional right to a role that already had a right for that table was not working. - -@changelog_1187_li -Spatial index: a few bugs have been fixed (using spatial constraints in views, transferring geometry objects over TCP/IP, the returned geometry object is copied when needed). - -@changelog_1188_li -Issue 551: the datatype documentation was incorrect (found by Bernd Eckenfels). - -@changelog_1189_li -Issue 368: ON DUPLICATE KEY UPDATE did not work for multi-row inserts. Test case from Angus Macdonald. - -@changelog_1190_li -OSGi: the package javax.tools is now imported (as an optional). - -@changelog_1191_li -H2 Console: auto-complete is now disabled by default, but there is a hot-key (Ctrl+Space). - -@changelog_1192_li -H2 Console: auto-complete did not work with multi-line statements. - -@changelog_1193_li -CLOB and BLOB data was not immediately removed after a rollback. - -@changelog_1194_li -There is a new Aggregate API that supports the internal H2 data types (GEOMETRY for example). Thanks a lot to Nicolas Fortin for the patch! - -@changelog_1195_li -Referential integrity constraints sometimes used the wrong index, such that updating a row in the referenced table incorrectly failed with a constraint violation. - -@changelog_1196_li -The Polish translation was completed and corrected by Wojtek Jurczyk. Thanks a lot! - -@changelog_1197_li -Issue 545: Unnecessary duplicate code was removed. - -@changelog_1198_li -The profiler tool can now process files with full thread dumps. - -@changelog_1199_li -MVStore: the file format was changed slightly. - -@changelog_1200_li -MVStore mode: the CLOB and BLOB storage was re-implemented and is now much faster than with the PageStore (which is still the default storage). - -@changelog_1201_li -MVStore mode: creating indexes is now much faster (in many cases faster than with the default PageStore). - -@changelog_1202_li -Various bugs in the MVStore storage and have been fixed, including a bug in the R-tree implementation. The database could get corrupt if there were transient IO exceptions while storing. - -@changelog_1203_li -The method org.h2.expression.Function.getCost could throw a NullPointException. - -@changelog_1204_li -Storing LOBs in separate files (outside of the main database file) is no longer supported for new databases. - -@changelog_1205_li -Lucene 2 is no longer supported. - -@changelog_1206_li -Fix bug in calculating default MIN and MAX values for SEQUENCE. - -@changelog_1207_li -Fix bug in performing IN queries with multiple values when IGNORECASE=TRUE - -@changelog_1208_li -Add entry-point to org.h2.tools.Shell so it can be called from inside an application. patch by Thomas Gillet. - -@changelog_1209_li -Fix bug that prevented the PgServer from being stopped and started multiple times. - -@changelog_1210_li -Support some more DDL syntax for MySQL, patch from Peter Jentsch. - -@changelog_1211_li -Issue 548: TO_CHAR does not format MM and DD correctly when the month or day of the month is 1 digit, patch from "the.tucc" - -@changelog_1212_li -Fix bug in varargs support in ALIAS's, patch from Nicolas Fortin - -@cheatSheet_1000_h1 -H2 Database Engine Cheat Sheet - -@cheatSheet_1001_h2 -Using H2 - -@cheatSheet_1002_a -H2 - -@cheatSheet_1003_li - is open source, free to use and distribute. - -@cheatSheet_1004_a -Download - -@cheatSheet_1005_li -: jar, installer (Windows), zip. - -@cheatSheet_1006_li -To start the H2 Console tool, double click the jar file, or run java -jar h2*.jar, h2.bat, or h2.sh. - -@cheatSheet_1007_a -A new database is automatically created - -@cheatSheet_1008_a -by default - -@cheatSheet_1009_li -. - -@cheatSheet_1010_a -Closing the last connection closes the database - -@cheatSheet_1011_li -. - -@cheatSheet_1012_h2 -Documentation - -@cheatSheet_1013_p - Reference: SQL grammar, functions, data types, tools, API - -@cheatSheet_1014_a -Features - -@cheatSheet_1015_p -: fulltext search, encryption, read-only (zip/jar), CSV, auto-reconnect, triggers, user functions - -@cheatSheet_1016_a -Database URLs - -@cheatSheet_1017_a -Embedded - -@cheatSheet_1018_code -jdbc:h2:~/test - -@cheatSheet_1019_p - 'test' in the user home directory - -@cheatSheet_1020_code -jdbc:h2:/data/test - -@cheatSheet_1021_p - 'test' in the directory /data - -@cheatSheet_1022_code -jdbc:h2:test - -@cheatSheet_1023_p - in the current(!) working directory - -@cheatSheet_1024_a -In-Memory - -@cheatSheet_1025_code -jdbc:h2:mem:test - -@cheatSheet_1026_p - multiple connections in one process - -@cheatSheet_1027_code -jdbc:h2:mem: - -@cheatSheet_1028_p - unnamed private; one connection - -@cheatSheet_1029_a -Server Mode - -@cheatSheet_1030_code -jdbc:h2:tcp://localhost/~/test - -@cheatSheet_1031_p - user home dir - -@cheatSheet_1032_code -jdbc:h2:tcp://localhost//data/test - -@cheatSheet_1033_p - absolute dir - -@cheatSheet_1034_a -Server start - -@cheatSheet_1035_p -:java -cp *.jar org.h2.tools.Server - -@cheatSheet_1036_a -Settings - -@cheatSheet_1037_code -jdbc:h2:..;MODE=MySQL - -@cheatSheet_1038_a -compatibility (or HSQLDB,...) - -@cheatSheet_1039_code -jdbc:h2:..;TRACE_LEVEL_FILE=3 - -@cheatSheet_1040_a -log to *.trace.db - -@cheatSheet_1041_a -Using the JDBC API - -@cheatSheet_1042_a -Connection Pool - -@cheatSheet_1043_a -Maven 2 - -@cheatSheet_1044_a -Hibernate - -@cheatSheet_1045_p - hibernate.cfg.xml (or use the HSQLDialect): - -@cheatSheet_1046_a -TopLink and Glassfish - -@cheatSheet_1047_p - Datasource class: org.h2.jdbcx.JdbcDataSource - -@cheatSheet_1048_code -oracle.toplink.essentials.platform. - -@cheatSheet_1049_code -database.H2Platform - -@download_1000_h1 -Downloads - -@download_1001_h3 -Version 1.4.187 (2015-04-10), Beta - -@download_1002_a -Windows Installer - -@download_1003_a -Platform-Independent Zip - -@download_1004_h3 -Version 1.3.176 (2014-04-05), Last Stable - -@download_1005_a -Windows Installer - -@download_1006_a -Platform-Independent Zip - -@download_1007_h3 -Download Mirror and Older Versions - -@download_1008_a -Platform-Independent Zip - -@download_1009_h3 -Jar File - -@download_1010_a -Maven.org - -@download_1011_a -Sourceforge.net - -@download_1012_a -Latest Automated Build (not released) - -@download_1013_h3 -Maven (Binary, Javadoc, and Source) - -@download_1014_a -Binary - -@download_1015_a -Javadoc - -@download_1016_a -Sources - -@download_1017_h3 -Database Upgrade Helper File - -@download_1018_a -Upgrade database from 1.1 to the current version - -@download_1019_h3 -Subversion Source Repository - -@download_1020_a -Google Code - -@download_1021_p - For details about changes, see the Change Log. - -@download_1022_h3 -News and Project Information - -@download_1023_a -Atom Feed - -@download_1024_a -RSS Feed - -@download_1025_a -DOAP File - -@download_1026_p - (what is this) - -@faq_1000_h1 -Frequently Asked Questions - -@faq_1001_a - I Have a Problem or Feature Request - -@faq_1002_a - Are there Known Bugs? When is the Next Release? - -@faq_1003_a - Is this Database Engine Open Source? - -@faq_1004_a - Is Commercial Support Available? - -@faq_1005_a - How to Create a New Database? - -@faq_1006_a - How to Connect to a Database? - -@faq_1007_a - Where are the Database Files Stored? - -@faq_1008_a - What is the Size Limit (Maximum Size) of a Database? - -@faq_1009_a - Is it Reliable? - -@faq_1010_a - Why is Opening my Database Slow? - -@faq_1011_a - My Query is Slow - -@faq_1012_a - H2 is Very Slow - -@faq_1013_a - Column Names are Incorrect? - -@faq_1014_a - Float is Double? - -@faq_1015_a - Is the GCJ Version Stable? Faster? - -@faq_1016_a - How to Translate this Project? - -@faq_1017_a - How to Contribute to this Project? - -@faq_1018_h3 -I Have a Problem or Feature Request - -@faq_1019_p - Please read the support checklist. - -@faq_1020_h3 -Are there Known Bugs? When is the Next Release? - -@faq_1021_p - Usually, bugs get fixes as they are found. There is a release every few weeks. Here is the list of known and confirmed issues: - -@faq_1022_li -When opening a database file in a timezone that has different daylight saving rules: the time part of dates where the daylight saving doesn't match will differ. This is not a problem within regions that use the same rules (such as, within USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new timezone. This problem does not occur when using the system property "h2.storeLocalTime" (however such database files are not compatible with older versions of H2). - -@faq_1023_li -Apache Harmony: there seems to be a bug in Harmony that affects H2. See HARMONY-6505. - -@faq_1024_li -Tomcat and Glassfish 3 set most static fields (final or non-final) to null when unloading a web application. This can cause a NullPointerException in H2 versions 1.1.107 and older, and may still not work in newer versions. Please report it if you run into this issue. In Tomcat >= 6.0 this behavior can be disabled by setting the system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false, however Tomcat may then run out of memory. A known workaround is to put the h2*.jar file in a shared lib directory (common/lib). - -@faq_1025_li -Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3. - -@faq_1026_li -When using Install4j before 4.1.4 on Linux and enabling pack200, the h2*.jar becomes corrupted by the install process, causing application failure. A workaround is to add an empty file h2*.jar.nopack next to the h2*.jar file. This problem is solved in Install4j 4.1.4. - -@faq_1027_p - For a complete list, see Open Issues. - -@faq_1028_h3 -Is this Database Engine Open Source? - -@faq_1029_p - Yes. It is free to use and distribute, and the source code is included. See also under license. - -@faq_1030_h3 -Is Commercial Support Available? - -@faq_1031_p - Yes, commercial support is available, see Commercial Support. - -@faq_1032_h3 -How to Create a New Database? - -@faq_1033_p - By default, a new database is automatically created if it does not yet exist. See Creating New Databases. - -@faq_1034_h3 -How to Connect to a Database? - -@faq_1035_p - The database driver is org.h2.Driver, and the database URL starts with jdbc:h2:. To connect to a database using JDBC, use the following code: - -@faq_1036_h3 -Where are the Database Files Stored? - -@faq_1037_p - When using database URLs like jdbc:h2:~/test, the database is stored in the user directory. For Windows, this is usually C:\Documents and Settings\<userName> or C:\Users\<userName>. If the base directory is not set (as in jdbc:h2:test), the database files are stored in the directory where the application is started (the current working directory). When using the H2 Console application from the start menu, this is <Installation Directory>/bin. The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL jdbc:h2:file:data/sample, the database is stored in the directory data (relative to the current working directory). The directory is created automatically if it does not yet exist. It is also possible to use the fully qualified directory name (and for Windows, drive name). Example: jdbc:h2:file:C:/data/test - -@faq_1038_h3 -What is the Size Limit (Maximum Size) of a Database? - -@faq_1039_p - See Limits and Limitations. - -@faq_1040_h3 -Is it Reliable? - -@faq_1041_p - That is not easy to say. It is still a quite new product. A lot of tests have been written, and the code coverage of these tests is higher than 80% for each package. Randomized stress tests are run regularly. But there are probably still bugs that have not yet been found (as with most software). Some features are known to be dangerous, they are only supported for situations where performance is more important than reliability. Those dangerous features are: - -@faq_1042_li -Disabling the transaction log or FileDescriptor.sync() using LOG=0 or LOG=1. - -@faq_1043_li -Using the transaction isolation level READ_UNCOMMITTED (LOCK_MODE 0) while at the same time using multiple connections. - -@faq_1044_li -Disabling database file protection using (setting FILE_LOCK to NO in the database URL). - -@faq_1045_li -Disabling referential integrity using SET REFERENTIAL_INTEGRITY FALSE. - -@faq_1046_p - In addition to that, running out of memory should be avoided. In older versions, OutOfMemory errors while using the database could corrupt a databases. - -@faq_1047_p - This database is well tested using automated test cases. The tests run every night and run for more than one hour. But not all areas of this database are equally well tested. When using one of the following features for production, please ensure your use case is well tested (if possible with automated test cases). The areas that are not well tested are: - -@faq_1048_li -Platforms other than Windows XP, Linux, Mac OS X, or JVMs other than Sun 1.6 or 1.7 - -@faq_1049_li -The features AUTO_SERVER and AUTO_RECONNECT. - -@faq_1050_li -Cluster mode, 2-phase commit, savepoints. - -@faq_1051_li -24/7 operation. - -@faq_1052_li -Fulltext search. - -@faq_1053_li -Operations on LOBs over 2 GB. - -@faq_1054_li -The optimizer may not always select the best plan. - -@faq_1055_li -Using the ICU4J collator. - -@faq_1056_p - Areas considered experimental are: - -@faq_1057_li -The PostgreSQL server - -@faq_1058_li -Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session). - -@faq_1059_li -Multi-threading within the engine using SET MULTI_THREADED=1. - -@faq_1060_li -Compatibility modes for other databases (only some features are implemented). - -@faq_1061_li -The soft reference cache (CACHE_TYPE=SOFT_LRU). It might not improve performance, and out of memory issues have been reported. - -@faq_1062_p - Some users have reported that after a power failure, the database cannot be opened sometimes. In this case, use a backup of the database or the Recover tool. Please report such problems. The plan is that the database automatically recovers in all situations. - -@faq_1063_h3 -Why is Opening my Database Slow? - -@faq_1064_p - To find out what the problem is, use the H2 Console and click on "Test Connection" instead of "Login". After the "Login Successful" appears, click on it (it's a link). This will list the top stack traces. Then either analyze this yourself, or post those stack traces in the Google Group. - -@faq_1065_p - Other possible reasons are: the database is very big (many GB), or contains linked tables that are slow to open. - -@faq_1066_h3 -My Query is Slow - -@faq_1067_p - Slow SELECT (or DELETE, UPDATE, MERGE) statement can have multiple reasons. Follow this checklist: - -@faq_1068_li -Run ANALYZE (see documentation for details). - -@faq_1069_li -Run the query with EXPLAIN and check if indexes are used (see documentation for details). - -@faq_1070_li -If required, create additional indexes and try again using ANALYZE and EXPLAIN. - -@faq_1071_li -If it doesn't help please report the problem. - -@faq_1072_h3 -H2 is Very Slow - -@faq_1073_p - By default, H2 closes the database when the last connection is closed. If your application closes the only connection after each operation, the database is opened and closed a lot, which is quite slow. There are multiple ways to solve this problem, see Database Performance Tuning. - -@faq_1074_h3 -Column Names are Incorrect? - -@faq_1075_p - For the query SELECT ID AS X FROM TEST the method ResultSetMetaData.getColumnName() returns ID, I expect it to return X. What's wrong? - -@faq_1076_p - This is not a bug. According the the JDBC specification, the method ResultSetMetaData.getColumnName() should return the name of the column and not the alias name. If you need the alias name, use ResultSetMetaData.getColumnLabel(). Some other database don't work like this yet (they don't follow the JDBC specification). If you need compatibility with those databases, use the Compatibility Mode, or append ;ALIAS_COLUMN_NAME=TRUE to the database URL. - -@faq_1077_p - This also applies to DatabaseMetaData calls that return a result set. The columns in the JDBC API are column labels, not column names. - -@faq_1078_h3 -Float is Double? - -@faq_1079_p - For a table defined as CREATE TABLE TEST(X FLOAT) the method ResultSet.getObject() returns a java.lang.Double, I expect it to return a java.lang.Float. What's wrong? - -@faq_1080_p - This is not a bug. According the the JDBC specification, the JDBC data type FLOAT is equivalent to DOUBLE, and both are mapped to java.lang.Double. See also Mapping SQL and Java Types - 8.3.10 FLOAT. - -@faq_1081_h3 -Is the GCJ Version Stable? Faster? - -@faq_1082_p - The GCJ version is not as stable as the Java version. When running the regression test with the GCJ version, sometimes the application just stops at what seems to be a random point without error message. Currently, the GCJ version is also slower than when using the Sun VM. However, the startup of the GCJ version is faster than when using a VM. - -@faq_1083_h3 -How to Translate this Project? - -@faq_1084_p - For more information, see Build/Translating. - -@faq_1085_h3 -How to Contribute to this Project? - -@faq_1086_p - There are various way to help develop an open source project like H2. The first step could be to translate the error messages and the GUI to your native language. Then, you could provide patches. Please start with small patches. That could be adding a test case to improve the code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well. - -@features_1000_h1 -Features - -@features_1001_a - Feature List - -@features_1002_a - Comparison to Other Database Engines - -@features_1003_a - H2 in Use - -@features_1004_a - Connection Modes - -@features_1005_a - Database URL Overview - -@features_1006_a - Connecting to an Embedded (Local) Database - -@features_1007_a - In-Memory Databases - -@features_1008_a - Database Files Encryption - -@features_1009_a - Database File Locking - -@features_1010_a - Opening a Database Only if it Already Exists - -@features_1011_a - Closing a Database - -@features_1012_a - Ignore Unknown Settings - -@features_1013_a - Changing Other Settings when Opening a Connection - -@features_1014_a - Custom File Access Mode - -@features_1015_a - Multiple Connections - -@features_1016_a - Database File Layout - -@features_1017_a - Logging and Recovery - -@features_1018_a - Compatibility - -@features_1019_a - Auto-Reconnect - -@features_1020_a - Automatic Mixed Mode - -@features_1021_a - Page Size - -@features_1022_a - Using the Trace Options - -@features_1023_a - Using Other Logging APIs - -@features_1024_a - Read Only Databases - -@features_1025_a - Read Only Databases in Zip or Jar File - -@features_1026_a - Computed Columns / Function Based Index - -@features_1027_a - Multi-Dimensional Indexes - -@features_1028_a - User-Defined Functions and Stored Procedures - -@features_1029_a - Pluggable or User-Defined Tables - -@features_1030_a - Triggers - -@features_1031_a - Compacting a Database - -@features_1032_a - Cache Settings - -@features_1033_h2 -Feature List - -@features_1034_h3 -Main Features - -@features_1035_li -Very fast database engine - -@features_1036_li -Open source - -@features_1037_li -Written in Java - -@features_1038_li -Supports standard SQL, JDBC API - -@features_1039_li -Embedded and Server mode, Clustering support - -@features_1040_li -Strong security features - -@features_1041_li -The PostgreSQL ODBC driver can be used - -@features_1042_li -Multi version concurrency - -@features_1043_h3 -Additional Features - -@features_1044_li -Disk based or in-memory databases and tables, read-only database support, temporary tables - -@features_1045_li -Transaction support (read committed), 2-phase-commit - -@features_1046_li -Multiple connections, table level locking - -@features_1047_li -Cost based optimizer, using a genetic algorithm for complex queries, zero-administration - -@features_1048_li -Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set - -@features_1049_li -Encrypted database (AES), SHA-256 password encryption, encryption functions, SSL - -@features_1050_h3 -SQL Support - -@features_1051_li -Support for multiple schemas, information schema - -@features_1052_li -Referential integrity / foreign key constraints with cascade, check constraints - -@features_1053_li -Inner and outer joins, subqueries, read only views and inline views - -@features_1054_li -Triggers and Java functions / stored procedures - -@features_1055_li -Many built-in functions, including XML and lossless data compression - -@features_1056_li -Wide range of data types including large objects (BLOB/CLOB) and arrays - -@features_1057_li -Sequence and autoincrement columns, computed columns (can be used for function based indexes) - -@features_1058_code -ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP - -@features_1059_li -Collation support, including support for the ICU4J library - -@features_1060_li -Support for users and roles - -@features_1061_li -Compatibility modes for IBM DB2, Apache Derby, HSQLDB, MS SQL Server, MySQL, Oracle, and PostgreSQL. - -@features_1062_h3 -Security Features - -@features_1063_li -Includes a solution for the SQL injection problem - -@features_1064_li -User password authentication uses SHA-256 and salt - -@features_1065_li -For server mode connections, user passwords are never transmitted in plain text over the network (even when using insecure connections; this only applies to the TCP server and not to the H2 Console however; it also doesn't apply if you set the password in the database URL) - -@features_1066_li -All database files (including script files that can be used to backup data) can be encrypted using the AES-128 encryption algorithm - -@features_1067_li -The remote JDBC driver supports TCP/IP connections over TLS - -@features_1068_li -The built-in web server supports connections over TLS - -@features_1069_li -Passwords can be sent to the database using char arrays instead of Strings - -@features_1070_h3 -Other Features and Tools - -@features_1071_li -Small footprint (smaller than 1.5 MB), low memory requirements - -@features_1072_li -Multiple index types (b-tree, tree, hash) - -@features_1073_li -Support for multi-dimensional indexes - -@features_1074_li -CSV (comma separated values) file support - -@features_1075_li -Support for linked tables, and a built-in virtual 'range' table - -@features_1076_li -Supports the EXPLAIN PLAN statement; sophisticated trace options - -@features_1077_li -Database closing can be delayed or disabled to improve the performance - -@features_1078_li -Web-based Console application (translated to many languages) with autocomplete - -@features_1079_li -The database can generate SQL script files - -@features_1080_li -Contains a recovery tool that can dump the contents of the database - -@features_1081_li -Support for variables (for example to calculate running totals) - -@features_1082_li -Automatic re-compilation of prepared statements - -@features_1083_li -Uses a small number of database files - -@features_1084_li -Uses a checksum for each record and log entry for data integrity - -@features_1085_li -Well tested (high code coverage, randomized stress tests) - -@features_1086_h2 -Comparison to Other Database Engines - -@features_1087_p - This comparison is based on H2 1.3, Apache Derby version 10.8, HSQLDB 2.2, MySQL 5.5, PostgreSQL 9.0. - -@features_1088_th -Feature - -@features_1089_th -H2 - -@features_1090_th -Derby - -@features_1091_th -HSQLDB - -@features_1092_th -MySQL - -@features_1093_th -PostgreSQL - -@features_1094_td -Pure Java - -@features_1095_td -Yes - -@features_1096_td -Yes - -@features_1097_td -Yes - -@features_1098_td -No - -@features_1099_td -No - -@features_1100_td -Embedded Mode (Java) - -@features_1101_td -Yes - -@features_1102_td -Yes - -@features_1103_td -Yes - -@features_1104_td -No - -@features_1105_td -No - -@features_1106_td -In-Memory Mode - -@features_1107_td -Yes - -@features_1108_td -Yes - -@features_1109_td -Yes - -@features_1110_td -No - -@features_1111_td -No - -@features_1112_td -Explain Plan - -@features_1113_td -Yes - -@features_1114_td -Yes *12 - -@features_1115_td -Yes - -@features_1116_td -Yes - -@features_1117_td -Yes - -@features_1118_td -Built-in Clustering / Replication - -@features_1119_td -Yes - -@features_1120_td -Yes - -@features_1121_td -No - -@features_1122_td -Yes - -@features_1123_td -Yes - -@features_1124_td -Encrypted Database - -@features_1125_td -Yes - -@features_1126_td -Yes *10 - -@features_1127_td -Yes *10 - -@features_1128_td -No - -@features_1129_td -No - -@features_1130_td -Linked Tables - -@features_1131_td -Yes - -@features_1132_td -No - -@features_1133_td -Partially *1 - -@features_1134_td -Partially *2 - -@features_1135_td -No - -@features_1136_td -ODBC Driver - -@features_1137_td -Yes - -@features_1138_td -No - -@features_1139_td -No - -@features_1140_td -Yes - -@features_1141_td -Yes - -@features_1142_td -Fulltext Search - -@features_1143_td -Yes - -@features_1144_td -Yes - -@features_1145_td -No - -@features_1146_td -Yes - -@features_1147_td -Yes - -@features_1148_td -Domains (User-Defined Types) - -@features_1149_td -Yes - -@features_1150_td -No - -@features_1151_td -Yes - -@features_1152_td -Yes - -@features_1153_td -Yes - -@features_1154_td -Files per Database - -@features_1155_td -Few - -@features_1156_td -Many - -@features_1157_td -Few - -@features_1158_td -Many - -@features_1159_td -Many - -@features_1160_td -Row Level Locking - -@features_1161_td -Yes *9 - -@features_1162_td -Yes - -@features_1163_td -Yes *9 - -@features_1164_td -Yes - -@features_1165_td -Yes - -@features_1166_td -Multi Version Concurrency - -@features_1167_td -Yes - -@features_1168_td -No - -@features_1169_td -Yes - -@features_1170_td -Yes - -@features_1171_td -Yes - -@features_1172_td -Multi-Threaded Statement Processing - -@features_1173_td -No *11 - -@features_1174_td -Yes - -@features_1175_td -Yes - -@features_1176_td -Yes - -@features_1177_td -Yes - -@features_1178_td -Role Based Security - -@features_1179_td -Yes - -@features_1180_td -Yes *3 - -@features_1181_td -Yes - -@features_1182_td -Yes - -@features_1183_td -Yes - -@features_1184_td -Updatable Result Sets - -@features_1185_td -Yes - -@features_1186_td -Yes *7 - -@features_1187_td -Yes - -@features_1188_td -Yes - -@features_1189_td -Yes - -@features_1190_td -Sequences - -@features_1191_td -Yes - -@features_1192_td -Yes - -@features_1193_td -Yes - -@features_1194_td -No - -@features_1195_td -Yes - -@features_1196_td -Limit and Offset - -@features_1197_td -Yes - -@features_1198_td -Yes *13 - -@features_1199_td -Yes - -@features_1200_td -Yes - -@features_1201_td -Yes - -@features_1202_td -Window Functions - -@features_1203_td -No *15 - -@features_1204_td -No *15 - -@features_1205_td -No - -@features_1206_td -No - -@features_1207_td -Yes - -@features_1208_td -Temporary Tables - -@features_1209_td -Yes - -@features_1210_td -Yes *4 - -@features_1211_td -Yes - -@features_1212_td -Yes - -@features_1213_td -Yes - -@features_1214_td -Information Schema - -@features_1215_td -Yes - -@features_1216_td -No *8 - -@features_1217_td -Yes - -@features_1218_td -Yes - -@features_1219_td -Yes - -@features_1220_td -Computed Columns - -@features_1221_td -Yes - -@features_1222_td -Yes - -@features_1223_td -Yes - -@features_1224_td -No - -@features_1225_td -Yes *6 - -@features_1226_td -Case Insensitive Columns - -@features_1227_td -Yes - -@features_1228_td -Yes *14 - -@features_1229_td -Yes - -@features_1230_td -Yes - -@features_1231_td -Yes *6 - -@features_1232_td -Custom Aggregate Functions - -@features_1233_td -Yes - -@features_1234_td -No - -@features_1235_td -Yes - -@features_1236_td -Yes - -@features_1237_td -Yes - -@features_1238_td -CLOB/BLOB Compression - -@features_1239_td -Yes - -@features_1240_td -No - -@features_1241_td -No - -@features_1242_td -No - -@features_1243_td -Yes - -@features_1244_td -Footprint (jar/dll size) - -@features_1245_td -~1.5 MB *5 - -@features_1246_td -~3 MB - -@features_1247_td -~1.5 MB - -@features_1248_td -~4 MB - -@features_1249_td -~6 MB - -@features_1250_p - *1 HSQLDB supports text tables. - -@features_1251_p - *2 MySQL supports linked MySQL tables under the name 'federated tables'. - -@features_1252_p - *3 Derby support for roles based security and password checking as an option. - -@features_1253_p - *4 Derby only supports global temporary tables. - -@features_1254_p - *5 The default H2 jar file contains debug information, jar files for other databases do not. - -@features_1255_p - *6 PostgreSQL supports functional indexes. - -@features_1256_p - *7 Derby only supports updatable result sets if the query is not sorted. - -@features_1257_p - *8 Derby doesn't support standard compliant information schema tables. - -@features_1258_p - *9 When using MVCC (multi version concurrency). - -@features_1259_p - *10 Derby and HSQLDB don't hide data patterns well. - -@features_1260_p - *11 The MULTI_THREADED option is not enabled by default, and not yet supported when using MVCC. - -@features_1261_p - *12 Derby doesn't support the EXPLAIN statement, but it supports runtime statistics and retrieving statement execution plans. - -@features_1262_p - *13 Derby doesn't support the syntax LIMIT .. [OFFSET ..], however it supports FETCH FIRST .. ROW[S] ONLY. - -@features_1263_p - *14 Using collations. *15 Derby and H2 support ROW_NUMBER() OVER(). - -@features_1264_h3 -DaffodilDb and One$Db - -@features_1265_p - It looks like the development of this database has stopped. The last release was February 2006. - -@features_1266_h3 -McKoi - -@features_1267_p - It looks like the development of this database has stopped. The last release was August 2004. - -@features_1268_h2 -H2 in Use - -@features_1269_p - For a list of applications that work with or use H2, see: Links. - -@features_1270_h2 -Connection Modes - -@features_1271_p - The following connection modes are supported: - -@features_1272_li -Embedded mode (local connections using JDBC) - -@features_1273_li -Server mode (remote connections using JDBC or ODBC over TCP/IP) - -@features_1274_li -Mixed mode (local and remote connections at the same time) - -@features_1275_h3 -Embedded Mode - -@features_1276_p - In embedded mode, an application opens a database from within the same JVM using JDBC. This is the fastest and easiest connection mode. The disadvantage is that a database may only be open in one virtual machine (and class loader) at any time. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently, or on the number of open connections. - -@features_1277_h3 -Server Mode - -@features_1278_p - When using the server mode (sometimes called remote mode or client/server mode), an application opens a database remotely using the JDBC or ODBC API. A server needs to be started within the same or another virtual machine, or on another computer. Many applications can connect to the same database at the same time, by connecting to this server. Internally, the server process opens the database(s) in embedded mode. - -@features_1279_p - The server mode is slower than the embedded mode, because all data is transferred over TCP/IP. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently per server, or on the number of open connections. - -@features_1280_h3 -Mixed Mode - -@features_1281_p - The mixed mode is a combination of the embedded and the server mode. The first application that connects to a database does that in embedded mode, but also starts a server so that other applications (running in different processes or virtual machines) can concurrently access the same data. The local connections are as fast as if the database is used in just the embedded mode, while the remote connections are a bit slower. - -@features_1282_p - The server can be started and stopped from within the application (using the server API), or automatically (automatic mixed mode). When using the automatic mixed mode, all clients that want to connect to the database (no matter if it's an local or remote connection) can do so using the exact same database URL. - -@features_1283_h2 -Database URL Overview - -@features_1284_p - This database supports multiple connection modes and connection settings. This is achieved using different database URLs. Settings in the URLs are not case sensitive. - -@features_1285_th -Topic - -@features_1286_th -URL Format and Examples - -@features_1287_a -Embedded (local) connection - -@features_1288_td - jdbc:h2:[file:][<path>]<databaseName> - -@features_1289_td - jdbc:h2:~/test - -@features_1290_td - jdbc:h2:file:/data/sample - -@features_1291_td - jdbc:h2:file:C:/data/sample (Windows only) - -@features_1292_a -In-memory (private) - -@features_1293_td -jdbc:h2:mem: - -@features_1294_a -In-memory (named) - -@features_1295_td - jdbc:h2:mem:<databaseName> - -@features_1296_td - jdbc:h2:mem:test_mem - -@features_1297_a -Server mode (remote connections) - -@features_1298_a - using TCP/IP - -@features_1299_td - jdbc:h2:tcp://<server>[:<port>]/[<path>]<databaseName> - -@features_1300_td - jdbc:h2:tcp://localhost/~/test - -@features_1301_td - jdbc:h2:tcp://dbserv:8084/~/sample - -@features_1302_td - jdbc:h2:tcp://localhost/mem:test - -@features_1303_a -Server mode (remote connections) - -@features_1304_a - using TLS - -@features_1305_td - jdbc:h2:ssl://<server>[:<port>]/<databaseName> - -@features_1306_td - jdbc:h2:ssl://localhost:8085/~/sample; - -@features_1307_a -Using encrypted files - -@features_1308_td - jdbc:h2:<url>;CIPHER=AES - -@features_1309_td - jdbc:h2:ssl://localhost/~/test;CIPHER=AES - -@features_1310_td - jdbc:h2:file:~/secure;CIPHER=AES - -@features_1311_a -File locking methods - -@features_1312_td - jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|NO} - -@features_1313_td - jdbc:h2:file:~/private;CIPHER=AES;FILE_LOCK=SOCKET - -@features_1314_a -Only open if it already exists - -@features_1315_td - jdbc:h2:<url>;IFEXISTS=TRUE - -@features_1316_td - jdbc:h2:file:~/sample;IFEXISTS=TRUE - -@features_1317_a -Don't close the database when the VM exits - -@features_1318_td - jdbc:h2:<url>;DB_CLOSE_ON_EXIT=FALSE - -@features_1319_a -Execute SQL on connection - -@features_1320_td - jdbc:h2:<url>;INIT=RUNSCRIPT FROM '~/create.sql' - -@features_1321_td - jdbc:h2:file:~/sample;INIT=RUNSCRIPT FROM '~/create.sql'\;RUNSCRIPT FROM '~/populate.sql' - -@features_1322_a -User name and/or password - -@features_1323_td - jdbc:h2:<url>[;USER=<username>][;PASSWORD=<value>] - -@features_1324_td - jdbc:h2:file:~/sample;USER=sa;PASSWORD=123 - -@features_1325_a -Debug trace settings - -@features_1326_td - jdbc:h2:<url>;TRACE_LEVEL_FILE=<level 0..3> - -@features_1327_td - jdbc:h2:file:~/sample;TRACE_LEVEL_FILE=3 - -@features_1328_a -Ignore unknown settings - -@features_1329_td - jdbc:h2:<url>;IGNORE_UNKNOWN_SETTINGS=TRUE - -@features_1330_a -Custom file access mode - -@features_1331_td - jdbc:h2:<url>;ACCESS_MODE_DATA=rws - -@features_1332_a -Database in a zip file - -@features_1333_td - jdbc:h2:zip:<zipFileName>!/<databaseName> - -@features_1334_td - jdbc:h2:zip:~/db.zip!/test - -@features_1335_a -Compatibility mode - -@features_1336_td - jdbc:h2:<url>;MODE=<databaseType> - -@features_1337_td - jdbc:h2:~/test;MODE=MYSQL - -@features_1338_a -Auto-reconnect - -@features_1339_td - jdbc:h2:<url>;AUTO_RECONNECT=TRUE - -@features_1340_td - jdbc:h2:tcp://localhost/~/test;AUTO_RECONNECT=TRUE - -@features_1341_a -Automatic mixed mode - -@features_1342_td - jdbc:h2:<url>;AUTO_SERVER=TRUE - -@features_1343_td - jdbc:h2:~/test;AUTO_SERVER=TRUE - -@features_1344_a -Page size - -@features_1345_td - jdbc:h2:<url>;PAGE_SIZE=512 - -@features_1346_a -Changing other settings - -@features_1347_td - jdbc:h2:<url>;<setting>=<value>[;<setting>=<value>...] - -@features_1348_td - jdbc:h2:file:~/sample;TRACE_LEVEL_SYSTEM_OUT=3 - -@features_1349_h2 -Connecting to an Embedded (Local) Database - -@features_1350_p - The database URL for connecting to a local database is jdbc:h2:[file:][<path>]<databaseName>. The prefix file: is optional. If no or only a relative path is used, then the current working directory is used as a starting point. The case sensitivity of the path and database name depend on the operating system, however it is recommended to use lowercase letters only. The database name must be at least three characters long (a limitation of File.createTempFile). The database name must not contain a semicolon. To point to the user home directory, use ~/, as in: jdbc:h2:~/test. - -@features_1351_h2 -In-Memory Databases - -@features_1352_p - For certain use cases (for example: rapid prototyping, testing, high performance operations, read-only databases), it may not be required to persist data, or persist changes to the data. This database supports the in-memory mode, where the data is not persisted. - -@features_1353_p - In some cases, only one connection to a in-memory database is required. This means the database to be opened is private. In this case, the database URL is jdbc:h2:mem: Opening two connections within the same virtual machine means opening two different (private) databases. - -@features_1354_p - Sometimes multiple connections to the same in-memory database are required. In this case, the database URL must include a name. Example: jdbc:h2:mem:db1. Accessing the same database using this URL only works within the same virtual machine and class loader environment. - -@features_1355_p - To access an in-memory database from another process or from another computer, you need to start a TCP server in the same process as the in-memory database was created. The other processes then need to access the database over TCP/IP or TLS, using a database URL such as: jdbc:h2:tcp://localhost/mem:db1. - -@features_1356_p - By default, closing the last connection to a database closes the database. For an in-memory database, this means the content is lost. To keep the database open, add ;DB_CLOSE_DELAY=-1 to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc:h2:mem:test;DB_CLOSE_DELAY=-1. - -@features_1357_h2 -Database Files Encryption - -@features_1358_p - The database files can be encrypted. Two encryption algorithm AES is supported. To use file encryption, you need to specify the encryption algorithm (the 'cipher') and the file password (in addition to the user password) when connecting to the database. - -@features_1359_h3 -Creating a New Database with File Encryption - -@features_1360_p - By default, a new database is automatically created if it does not exist yet. To create an encrypted database, connect to it as it would already exist. - -@features_1361_h3 -Connecting to an Encrypted Database - -@features_1362_p - The encryption algorithm is set in the database URL, and the file password is specified in the password field, before the user password. A single space separates the file password and the user password; the file password itself may not contain spaces. File passwords and user passwords are case sensitive. Here is an example to connect to a password-encrypted database: - -@features_1363_h3 -Encrypting or Decrypting a Database - -@features_1364_p - To encrypt an existing database, use the ChangeFileEncryption tool. This tool can also decrypt an encrypted database, or change the file encryption key. The tool is available from within the H2 Console in the tools section, or you can run it from the command line. The following command line will encrypt the database test in the user home directory with the file password filepwd and the encryption algorithm AES: - -@features_1365_h2 -Database File Locking - -@features_1366_p - Whenever a database is opened, a lock file is created to signal other processes that the database is in use. If database is closed, or if the process that opened the database terminates, this lock file is deleted. - -@features_1367_p - The following file locking methods are implemented: - -@features_1368_li -The default method is FILE and uses a watchdog thread to protect the database file. The watchdog reads the lock file each second. - -@features_1369_li -The second method is SOCKET and opens a server socket. The socket method does not require reading the lock file every second. The socket method should only be used if the database files are only accessed by one (and always the same) computer. - -@features_1370_li -The third method is FS. This will use native file locking using FileChannel.lock. - -@features_1371_li -It is also possible to open the database without file locking; in this case it is up to the application to protect the database files. Failing to do so will result in a corrupted database. Using the method NO forces the database to not create a lock file at all. Please note that this is unsafe as another process is able to open the same database, possibly leading to data corruption. - -@features_1372_p - To open the database with a different file locking method, use the parameter FILE_LOCK. The following code opens the database with the 'socket' locking method: - -@features_1373_p - For more information about the algorithms, see Advanced / File Locking Protocols. - -@features_1374_h2 -Opening a Database Only if it Already Exists - -@features_1375_p - By default, when an application calls DriverManager.getConnection(url, ...) and the database specified in the URL does not yet exist, a new (empty) database is created. In some situations, it is better to restrict creating new databases, and only allow to open existing databases. To do this, add ;IFEXISTS=TRUE to the database URL. In this case, if the database does not already exist, an exception is thrown when trying to connect. The connection only succeeds when the database already exists. The complete URL may look like this: - -@features_1376_h2 -Closing a Database - -@features_1377_h3 -Delayed Database Closing - -@features_1378_p - Usually, a database is closed when the last connection to it is closed. In some situations this slows down the application, for example when it is not possible to keep at least one connection open. The automatic closing of a database can be delayed or disabled with the SQL statement SET DB_CLOSE_DELAY <seconds>. The parameter <seconds> specifies the number of seconds to keep a database open after the last connection to it was closed. The following statement will keep a database open for 10 seconds after the last connection was closed: - -@features_1379_p - The value -1 means the database is not closed automatically. The value 0 is the default and means the database is closed when the last connection is closed. This setting is persistent and can be set by an administrator only. It is possible to set the value in the database URL: jdbc:h2:~/test;DB_CLOSE_DELAY=10. - -@features_1380_h3 -Don't Close a Database when the VM Exits - -@features_1381_p - By default, a database is closed when the last connection is closed. However, if it is never closed, the database is closed when the virtual machine exits normally, using a shutdown hook. In some situations, the database should not be closed in this case, for example because the database is still used at virtual machine shutdown (to store the shutdown process in the database for example). For those cases, the automatic closing of the database can be disabled in the database URL. The first connection (the one that is opening the database) needs to set the option in the database URL (it is not possible to change the setting afterwards). The database URL to disable database closing on exit is: - -@features_1382_h2 -Execute SQL on Connection - -@features_1383_p - Sometimes, particularly for in-memory databases, it is useful to be able to execute DDL or DML commands automatically when a client connects to a database. This functionality is enabled via the INIT property. Note that multiple commands may be passed to INIT, but the semicolon delimiter must be escaped, as in the example below. - -@features_1384_p - Please note the double backslash is only required in a Java or properties file. In a GUI, or in an XML file, only one backslash is required: - -@features_1385_p - Backslashes within the init script (for example within a runscript statement, to specify the folder names in Windows) need to be escaped as well (using a second backslash). It might be simpler to avoid backslashes in folder names for this reason; use forward slashes instead. - -@features_1386_h2 -Ignore Unknown Settings - -@features_1387_p - Some applications (for example OpenOffice.org Base) pass some additional parameters when connecting to the database. Why those parameters are passed is unknown. The parameters PREFERDOSLIKELINEENDS and IGNOREDRIVERPRIVILEGES are such examples; they are simply ignored to improve the compatibility with OpenOffice.org. If an application passes other parameters when connecting to the database, usually the database throws an exception saying the parameter is not supported. It is possible to ignored such parameters by adding ;IGNORE_UNKNOWN_SETTINGS=TRUE to the database URL. - -@features_1388_h2 -Changing Other Settings when Opening a Connection - -@features_1389_p - In addition to the settings already described, other database settings can be passed in the database URL. Adding ;setting=value at the end of a database URL is the same as executing the statement SET setting value just after connecting. For a list of supported settings, see SQL Grammar or the DbSettings javadoc. - -@features_1390_h2 -Custom File Access Mode - -@features_1391_p - Usually, the database opens the database file with the access mode rw, meaning read-write (except for read only databases, where the mode r is used). To open a database in read-only mode if the database file is not read-only, use ACCESS_MODE_DATA=r. Also supported are rws and rwd. This setting must be specified in the database URL: - -@features_1392_p - For more information see Durability Problems. On many operating systems the access mode rws does not guarantee that the data is written to the disk. - -@features_1393_h2 -Multiple Connections - -@features_1394_h3 -Opening Multiple Databases at the Same Time - -@features_1395_p - An application can open multiple databases at the same time, including multiple connections to the same database. The number of open database is only limited by the memory available. - -@features_1396_h3 -Multiple Connections to the Same Database: Client/Server - -@features_1397_p - If you want to access the same database at the same time from different processes or computers, you need to use the client / server mode. In this case, one process acts as the server, and the other processes (that could reside on other computers as well) connect to the server via TCP/IP (or TLS over TCP/IP for improved security). - -@features_1398_h3 -Multithreading Support - -@features_1399_p - This database is multithreading-safe. That means, if an application is multi-threaded, it does not need to worry about synchronizing access to the database. Internally, most requests to the same database are synchronized. That means an application can use multiple threads that access the same database at the same time, however if one thread executes a long running query, the other threads need to wait. - -@features_1400_p - An application should normally use one connection per thread. This database synchronizes access to the same connection, but other databases may not do this. - -@features_1401_h3 -Locking, Lock-Timeout, Deadlocks - -@features_1402_p - Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. If multi-version concurrency is not used, the database uses table level locks to give each connection a consistent state of the data. There are two kinds of locks: read locks (shared locks) and write locks (exclusive locks). All locks are released when the transaction commits or rolls back. When using the default transaction isolation level 'read committed', read locks are already released after each statement. - -@features_1403_p - If a connection wants to reads from a table, and there is no write lock on the table, then a read lock is added to the table. If there is a write lock, then this connection waits for the other connection to release the lock. If a connection cannot get a lock for a specified time, then a lock timeout exception is thrown. - -@features_1404_p - Usually, SELECT statements will generate read locks. This includes subqueries. Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, using the statement SELECT ... FOR UPDATE. The statements COMMIT and ROLLBACK releases all open locks. The commands SAVEPOINT and ROLLBACK TO SAVEPOINT don't affect locks. The locks are also released when the autocommit mode changes, and for connections with autocommit set to true (this is the default), locks are released after each statement. The following statements generate locks: - -@features_1405_th -Type of Lock - -@features_1406_th -SQL Statement - -@features_1407_td -Read - -@features_1408_td -SELECT * FROM TEST; - -@features_1409_td - CALL SELECT MAX(ID) FROM TEST; - -@features_1410_td - SCRIPT; - -@features_1411_td -Write - -@features_1412_td -SELECT * FROM TEST WHERE 1=0 FOR UPDATE; - -@features_1413_td -Write - -@features_1414_td -INSERT INTO TEST VALUES(1, 'Hello'); - -@features_1415_td - INSERT INTO TEST SELECT * FROM TEST; - -@features_1416_td - UPDATE TEST SET NAME='Hi'; - -@features_1417_td - DELETE FROM TEST; - -@features_1418_td -Write - -@features_1419_td -ALTER TABLE TEST ...; - -@features_1420_td - CREATE INDEX ... ON TEST ...; - -@features_1421_td - DROP INDEX ...; - -@features_1422_p - The number of seconds until a lock timeout exception is thrown can be set separately for each connection using the SQL command SET LOCK_TIMEOUT <milliseconds>. The initial lock timeout (that is the timeout used for new connections) can be set using the SQL command SET DEFAULT_LOCK_TIMEOUT <milliseconds>. The default lock timeout is persistent. - -@features_1423_h3 -Avoiding Deadlocks - -@features_1424_p - To avoid deadlocks, ensure that all transactions lock the tables in the same order (for example in alphabetical order), and avoid upgrading read locks to write locks. Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE. - -@features_1425_h2 -Database File Layout - -@features_1426_p - The following files are created for persistent databases: - -@features_1427_th -File Name - -@features_1428_th -Description - -@features_1429_th -Number of Files - -@features_1430_td - test.h2.db - -@features_1431_td - Database file. - -@features_1432_td - Contains the transaction log, indexes, and data for all tables. - -@features_1433_td - Format: <database>.h2.db - -@features_1434_td - 1 per database - -@features_1435_td - test.lock.db - -@features_1436_td - Database lock file. - -@features_1437_td - Automatically (re-)created while the database is in use. - -@features_1438_td - Format: <database>.lock.db - -@features_1439_td - 1 per database (only if in use) - -@features_1440_td - test.trace.db - -@features_1441_td - Trace file (if the trace option is enabled). - -@features_1442_td - Contains trace information. - -@features_1443_td - Format: <database>.trace.db - -@features_1444_td - Renamed to <database>.trace.db.old is too big. - -@features_1445_td - 0 or 1 per database - -@features_1446_td - test.lobs.db/* - -@features_1447_td - Directory containing one file for each - -@features_1448_td - BLOB or CLOB value larger than a certain size. - -@features_1449_td - Format: <id>.t<tableId>.lob.db - -@features_1450_td - 1 per large object - -@features_1451_td - test.123.temp.db - -@features_1452_td - Temporary file. - -@features_1453_td - Contains a temporary blob or a large result set. - -@features_1454_td - Format: <database>.<id>.temp.db - -@features_1455_td - 1 per object - -@features_1456_h3 -Moving and Renaming Database Files - -@features_1457_p - Database name and location are not stored inside the database files. - -@features_1458_p - While a database is closed, the files can be moved to another directory, and they can be renamed as well (as long as all files of the same database start with the same name and the respective extensions are unchanged). - -@features_1459_p - As there is no platform specific data in the files, they can be moved to other operating systems without problems. - -@features_1460_h3 -Backup - -@features_1461_p - When the database is closed, it is possible to backup the database files. - -@features_1462_p - To backup data while the database is running, the SQL commands SCRIPT and BACKUP can be used. - -@features_1463_h2 -Logging and Recovery - -@features_1464_p - Whenever data is modified in the database and those changes are committed, the changes are written to the transaction log (except for in-memory objects). The changes to the main data area itself are usually written later on, to optimize disk access. If there is a power failure, the main data area is not up-to-date, but because the changes are in the transaction log, the next time the database is opened, the changes are re-applied automatically. - -@features_1465_h2 -Compatibility - -@features_1466_p - All database engines behave a little bit different. Where possible, H2 supports the ANSI SQL standard, and tries to be compatible to other databases. There are still a few differences however: - -@features_1467_p - In MySQL text columns are case insensitive by default, while in H2 they are case sensitive. However H2 supports case insensitive columns as well. To create the tables with case insensitive texts, append IGNORECASE=TRUE to the database URL (example: jdbc:h2:~/test;IGNORECASE=TRUE). - -@features_1468_h3 -Compatibility Modes - -@features_1469_p - For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode: - -@features_1470_h3 -DB2 Compatibility Mode - -@features_1471_p - To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2 or the SQL statement SET MODE DB2. - -@features_1472_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1473_li -Support for the syntax [OFFSET .. ROW] [FETCH ... ONLY] as an alternative for LIMIT .. OFFSET. - -@features_1474_li -Concatenating NULL with another value results in the other value. - -@features_1475_li -Support the pseudo-table SYSIBM.SYSDUMMY1. - -@features_1476_h3 -Derby Compatibility Mode - -@features_1477_p - To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby or the SQL statement SET MODE Derby. - -@features_1478_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1479_li -For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1480_li -Concatenating NULL with another value results in the other value. - -@features_1481_li -Support the pseudo-table SYSIBM.SYSDUMMY1. - -@features_1482_h3 -HSQLDB Compatibility Mode - -@features_1483_p - To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB or the SQL statement SET MODE HSQLDB. - -@features_1484_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1485_li -When converting the scale of decimal data, the number is only converted if the new scale is smaller than the current scale. Usually, the scale is converted and 0s are added if required. - -@features_1486_li -For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1487_li -Text can be concatenated using '+'. - -@features_1488_h3 -MS SQL Server Compatibility Mode - -@features_1489_p - To use the MS SQL Server mode, use the database URL jdbc:h2:~/test;MODE=MSSQLServer or the SQL statement SET MODE MSSQLServer. - -@features_1490_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1491_li -Identifiers may be quoted using square brackets as in [Test]. - -@features_1492_li -For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1493_li -Concatenating NULL with another value results in the other value. - -@features_1494_li -Text can be concatenated using '+'. - -@features_1495_h3 -MySQL Compatibility Mode - -@features_1496_p - To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL or the SQL statement SET MODE MySQL. - -@features_1497_li -When inserting data, if a column is defined to be NOT NULL and NULL is inserted, then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. Usually, this operation is not allowed and an exception is thrown. - -@features_1498_li -Creating indexes in the CREATE TABLE statement is allowed using INDEX(..) or KEY(..). Example: create table test(id int primary key, name varchar(255), key idx_name(name)); - -@features_1499_li -Meta data calls return identifiers in lower case. - -@features_1500_li -When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded. - -@features_1501_li -Concatenating NULL with another value results in the other value. - -@features_1502_p - Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). H2 does support case insensitive text comparison, but it needs to be set separately, using SET IGNORECASE TRUE. This affects comparison using =, LIKE, REGEXP. - -@features_1503_h3 -Oracle Compatibility Mode - -@features_1504_p - To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle or the SQL statement SET MODE Oracle. - -@features_1505_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1506_li -When using unique indexes, multiple rows with NULL in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise. - -@features_1507_li -Concatenating NULL with another value results in the other value. - -@features_1508_li -Empty strings are treated like NULL values. - -@features_1509_h3 -PostgreSQL Compatibility Mode - -@features_1510_p - To use the PostgreSQL mode, use the database URL jdbc:h2:~/test;MODE=PostgreSQL or the SQL statement SET MODE PostgreSQL. - -@features_1511_li -For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1512_li -When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded. - -@features_1513_li -The system columns CTID and OID are supported. - -@features_1514_li -LOG(x) is base 10 in this mode. - -@features_1515_h2 -Auto-Reconnect - -@features_1516_p - The auto-reconnect feature causes the JDBC driver to reconnect to the database if the connection is lost. The automatic re-connect only occurs when auto-commit is enabled; if auto-commit is disabled, an exception is thrown. To enable this mode, append ;AUTO_RECONNECT=TRUE to the database URL. - -@features_1517_p - Re-connecting will open a new session. After an automatic re-connect, variables and local temporary tables definitions (excluding data) are re-created. The contents of the system table INFORMATION_SCHEMA.SESSION_STATE contains all client side state that is re-created. - -@features_1518_p - If another connection uses the database in exclusive mode (enabled using SET EXCLUSIVE 1 or SET EXCLUSIVE 2), then this connection will try to re-connect until the exclusive mode ends. - -@features_1519_h2 -Automatic Mixed Mode - -@features_1520_p - Multiple processes can access the same database without having to start the server manually. To do that, append ;AUTO_SERVER=TRUE to the database URL. You can use the same database URL independent of whether the database is already open or not. This feature doesn't work with in-memory databases. Example database URL: - -@features_1521_p - Use the same URL for all connections to this database. Internally, when using this mode, the first connection to the database is made in embedded mode, and additionally a server is started internally (as a daemon thread). If the database is already open in another process, the server mode is used automatically. The IP address and port of the server are stored in the file .lock.db, that's why in-memory databases can't be supported. - -@features_1522_p - The application that opens the first connection to the database uses the embedded mode, which is faster than the server mode. Therefore the main application should open the database first if possible. The first connection automatically starts a server on a random port. This server allows remote connections, however only to this database (to ensure that, the client reads .lock.db file and sends the the random key that is stored there to the server). When the first connection is closed, the server stops. If other (remote) connections are still open, one of them will then start a server (auto-reconnect is enabled automatically). - -@features_1523_p - All processes need to have access to the database files. If the first connection is closed (the connection that started the server), open transactions of other connections will be rolled back (this may not be a problem if you don't disable autocommit). Explicit client/server connections (using jdbc:h2:tcp:// or ssl://) are not supported. This mode is not supported for in-memory databases. - -@features_1524_p - Here is an example how to use this mode. Application 1 and 2 are not necessarily started on the same computer, but they need to have access to the database files. Application 1 and 2 are typically two different processes (however they could run within the same process). - -@features_1525_p - When using this feature, by default the server uses any free TCP port. The port can be set manually using AUTO_SERVER_PORT=9090. - -@features_1526_h2 -Page Size - -@features_1527_p - The page size for new databases is 2 KB (2048), unless the page size is set explicitly in the database URL using PAGE_SIZE= when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created. - -@features_1528_h2 -Using the Trace Options - -@features_1529_p - To find problems in an application, it is sometimes good to see what database operations where executed. This database offers the following trace features: - -@features_1530_li -Trace to System.out and/or to a file - -@features_1531_li -Support for trace levels OFF, ERROR, INFO, DEBUG - -@features_1532_li -The maximum size of the trace file can be set - -@features_1533_li -It is possible to generate Java source code from the trace file - -@features_1534_li -Trace can be enabled at runtime by manually creating a file - -@features_1535_h3 -Trace Options - -@features_1536_p - The simplest way to enable the trace option is setting it in the database URL. There are two settings, one for System.out (TRACE_LEVEL_SYSTEM_OUT) tracing, and one for file tracing (TRACE_LEVEL_FILE). The trace levels are 0 for OFF, 1 for ERROR (the default), 2 for INFO, and 3 for DEBUG. A database URL with both levels set to DEBUG is: - -@features_1537_p - The trace level can be changed at runtime by executing the SQL command SET TRACE_LEVEL_SYSTEM_OUT level (for System.out tracing) or SET TRACE_LEVEL_FILE level (for file tracing). Example: - -@features_1538_h3 -Setting the Maximum Size of the Trace File - -@features_1539_p - When using a high trace level, the trace file can get very big quickly. The default size limit is 16 MB, if the trace file exceeds this limit, it is renamed to .old and a new file is created. If another such file exists, it is deleted. To limit the size to a certain number of megabytes, use SET TRACE_MAX_FILE_SIZE mb. Example: - -@features_1540_h3 -Java Code Generation - -@features_1541_p - When setting the trace level to INFO or DEBUG, Java source code is generated as well. This simplifies reproducing problems. The trace file looks like this: - -@features_1542_p - To filter the Java source code, use the ConvertTraceFile tool as follows: - -@features_1543_p - The generated file Test.java will contain the Java source code. The generated source code may be too large to compile (the size of a Java method is limited). If this is the case, the source code needs to be split in multiple methods. The password is not listed in the trace file and therefore not included in the source code. - -@features_1544_h2 -Using Other Logging APIs - -@features_1545_p - By default, this database uses its own native 'trace' facility. This facility is called 'trace' and not 'log' within this database to avoid confusion with the transaction log. Trace messages can be written to both file and System.out. In most cases, this is sufficient, however sometimes it is better to use the same facility as the application, for example Log4j. To do that, this database support SLF4J. - -@features_1546_a -SLF4J - -@features_1547_p - is a simple facade for various logging APIs and allows to plug in the desired implementation at deployment time. SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), Java logging, x4juli, and Simple Log. - -@features_1548_p - To enable SLF4J, set the file trace level to 4 in the database URL: - -@features_1549_p - Changing the log mechanism is not possible after the database is open, that means executing the SQL statement SET TRACE_LEVEL_FILE 4 when the database is already open will not have the desired effect. To use SLF4J, all required jar files need to be in the classpath. The logger name is h2database. If it does not work, check the file <database>.trace.db for error messages. - -@features_1550_h2 -Read Only Databases - -@features_1551_p - If the database files are read-only, then the database is read-only as well. It is not possible to create new tables, add or modify data in this database. Only SELECT and CALL statements are allowed. To create a read-only database, close the database. Then, make the database file read-only. When you open the database now, it is read-only. There are two ways an application can find out whether database is read-only: by calling Connection.isReadOnly() or by executing the SQL statement CALL READONLY(). - -@features_1552_p - Using the Custom Access Mode r the database can also be opened in read-only mode, even if the database file is not read only. - -@features_1553_h2 -Read Only Databases in Zip or Jar File - -@features_1554_p - To create a read-only database in a zip file, first create a regular persistent database, and then create a backup. The database must not have pending changes, that means you need to close all connections to the database first. To speed up opening the read-only database and running queries, the database should be closed using SHUTDOWN DEFRAG. If you are using a database named test, an easy way to create a zip file is using the Backup tool. You can start the tool from the command line, or from within the H2 Console (Tools - Backup). Please note that the database must be closed when the backup is created. Therefore, the SQL statement BACKUP TO can not be used. - -@features_1555_p - When the zip file is created, you can open the database in the zip file using the following database URL: - -@features_1556_p - Databases in zip files are read-only. The performance for some queries will be slower than when using a regular database, because random access in zip files is not supported (only streaming). How much this affects the performance depends on the queries and the data. The database is not read in memory; therefore large databases are supported as well. The same indexes are used as when using a regular database. - -@features_1557_p - If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files, because random access in compressed files is not possible. See also the sample application ReadOnlyDatabaseInZip. - -@features_1558_h3 -Opening a Corrupted Database - -@features_1559_p - If a database cannot be opened because the boot info (the SQL script that is run at startup) is corrupted, then the database can be opened by specifying a database event listener. The exceptions are logged, but opening the database will continue. - -@features_1560_h2 -Computed Columns / Function Based Index - -@features_1561_p - A computed column is a column whose value is calculated before storing. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time: - -@features_1562_p - Function indexes are not directly supported by this database, but they can be emulated by using computed columns. For example, if an index on the upper-case version of a column is required, create a computed column with the upper-case version of the original column, and create an index for this column: - -@features_1563_p - When inserting data, it is not required (and not allowed) to specify a value for the upper-case version of the column, because the value is generated. But you can use the column when querying the table: - -@features_1564_h2 -Multi-Dimensional Indexes - -@features_1565_p - A tool is provided to execute efficient multi-dimension (spatial) range queries. This database does not support a specialized spatial index (R-Tree or similar). Instead, the B-Tree index is used. For each record, the multi-dimensional key is converted (mapped) to a single dimensional (scalar) value. This value specifies the location on a space-filling curve. - -@features_1566_p - Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. The scalar value is indexed using a B-Tree index (usually using a computed column). - -@features_1567_p - The method can result in a drastic performance improvement over just using an index on the first column. Depending on the data and number of dimensions, the improvement is usually higher than factor 5. The tool generates a SQL query from a specified multi-dimensional range. The method used is not database dependent, and the tool can easily be ported to other databases. For an example how to use the tool, please have a look at the sample code provided in TestMultiDimension.java. - -@features_1568_h2 -User-Defined Functions and Stored Procedures - -@features_1569_p - In addition to the built-in functions, this database supports user-defined Java functions. In this database, Java functions can be used as stored procedures as well. A function must be declared (registered) before it can be used. A function can be defined using source code, or as a reference to a compiled class that is available in the classpath. By default, the function aliases are stored in the current schema. - -@features_1570_h3 -Referencing a Compiled Method - -@features_1571_p - When referencing a method, the class must already be compiled and included in the classpath where the database is running. Only static Java methods are supported; both the class and the method must be public. Example Java class: - -@features_1572_p - The Java function must be registered in the database by calling CREATE ALIAS ... FOR: - -@features_1573_p - For a complete sample application, see src/test/org/h2/samples/Function.java. - -@features_1574_h3 -Declaring Functions as Source Code - -@features_1575_p - When defining a function alias with source code, the database tries to compile the source code using the Sun Java compiler (the class com.sun.tools.javac.Main) if the tools.jar is in the classpath. If not, javac is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. Example: - -@features_1576_p - By default, the three packages java.util, java.math, java.sql are imported. The method name (nextPrime in the example above) is ignored. Method overloading is not supported when declaring functions as source code, that means only one method may be declared for an alias. If different import statements are required, they must be declared at the beginning and separated with the tag @CODE: - -@features_1577_p - The following template is used to create a complete Java class: - -@features_1578_h3 -Method Overloading - -@features_1579_p - Multiple methods may be bound to a SQL function if the class is already compiled and included in the classpath. Each Java method must have a different number of arguments. Method overloading is not supported when declaring functions as source code. - -@features_1580_h3 -Function Data Type Mapping - -@features_1581_p - Functions that accept non-nullable parameters such as int will not be called if one of those parameters is NULL. Instead, the result of the function is NULL. If the function should be called if a parameter is NULL, you need to use java.lang.Integer instead. - -@features_1582_p - SQL types are mapped to Java classes and vice-versa as in the JDBC API. For details, see Data Types. There are a few special cases: java.lang.Object is mapped to OTHER (a serialized object). Therefore, java.lang.Object can not be used to match all SQL types (matching all SQL types is not supported). The second special case is Object[]: arrays of any class are mapped to ARRAY. Objects of type org.h2.value.Value (the internal value class) are passed through without conversion. - -@features_1583_h3 -Functions That Require a Connection - -@features_1584_p - If the first parameter of a Java function is a java.sql.Connection, then the connection to database is provided. This connection does not need to be closed before returning. When calling the method from within the SQL statement, this connection parameter does not need to be (can not be) specified. - -@features_1585_h3 -Functions Throwing an Exception - -@features_1586_p - If a function throws an exception, then the current statement is rolled back and the exception is thrown to the application. SQLException are directly re-thrown to the calling application; all other exceptions are first converted to a SQLException. - -@features_1587_h3 -Functions Returning a Result Set - -@features_1588_p - Functions may returns a result set. Such a function can be called with the CALL statement: - -@features_1589_h3 -Using SimpleResultSet - -@features_1590_p - A function can create a result set using the SimpleResultSet tool: - -@features_1591_h3 -Using a Function as a Table - -@features_1592_p - A function that returns a result set can be used like a table. However, in this case the function is called at least twice: first while parsing the statement to collect the column names (with parameters set to null where not known at compile time). And then, while executing the statement to get the data (maybe multiple times if this is a join). If the function is called just to get the column list, the URL of the connection passed to the function is jdbc:columnlist:connection. Otherwise, the URL of the connection is jdbc:default:connection. - -@features_1593_h2 -Pluggable or User-Defined Tables - -@features_1594_p - For situations where you need to expose other data-sources to the SQL engine as a table, there are "pluggable tables". For some examples, have a look at the code in org.h2.test.db.TestTableEngines. - -@features_1595_p - In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine interface e.g. something like this: - -@features_1596_p - and then create the table from SQL like this: - -@features_1597_p - It is also possible to pass in parameters to the table engine, like so: - -@features_1598_p - In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object. - -@features_1599_h2 -Triggers - -@features_1600_p - This database supports Java triggers that are called before or after a row is updated, inserted or deleted. Triggers can be used for complex consistency checks, or to update related data in the database. It is also possible to use triggers to simulate materialized views. For a complete sample application, see src/test/org/h2/samples/TriggerSample.java. A Java trigger must implement the interface org.h2.api.Trigger. The trigger class must be available in the classpath of the database engine (when using the server mode, it must be in the classpath of the server). - -@features_1601_p - The connection can be used to query or update data in other tables. The trigger then needs to be defined in the database: - -@features_1602_p - The trigger can be used to veto a change by throwing a SQLException. - -@features_1603_p - As an alternative to implementing the Trigger interface, an application can extend the abstract class org.h2.tools.TriggerAdapter. This will allows to use the ResultSet interface within trigger implementations. In this case, only the fire method needs to be implemented: - -@features_1604_h2 -Compacting a Database - -@features_1605_p - Empty space in the database file re-used automatically. When closing the database, the database is automatically compacted for up to 200 milliseconds by default. To compact more, use the SQL statement SHUTDOWN COMPACT. However re-creating the database may further reduce the database size because this will re-build the indexes. Here is a sample function to do this: - -@features_1606_p - See also the sample application org.h2.samples.Compact. The commands SCRIPT / RUNSCRIPT can be used as well to create a backup of a database and re-build the database from the script. - -@features_1607_h2 -Cache Settings - -@features_1608_p - The database keeps most frequently used data in the main memory. The amount of memory used for caching can be changed using the setting CACHE_SIZE. This setting can be set in the database connection URL (jdbc:h2:~/test;CACHE_SIZE=131072), or it can be changed at runtime using SET CACHE_SIZE size. The size of the cache, as represented by CACHE_SIZE is measured in KB, with each KB being 1024 bytes. This setting has no effect for in-memory databases. For persistent databases, the setting is stored in the database and re-used when the database is opened the next time. However, when opening an existing database, the cache size is set to at most half the amount of memory available for the virtual machine (Runtime.getRuntime().maxMemory()), even if the cache size setting stored in the database is larger; however the setting stored in the database is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'info.CACHE_MAX_SIZE' - -@features_1609_p - An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE=TQ to the database URL. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. - -@features_1610_p - Also included is an experimental second level soft reference cache. Rows in this cache are only garbage collected on low memory. By default the second level cache is disabled. To enable it, use the prefix SOFT_. Example: jdbc:h2:~/test;CACHE_TYPE=SOFT_LRU. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. - -@features_1611_p - To get information about page reads and writes, and the current caching algorithm in use, call SELECT * FROM INFORMATION_SCHEMA.SETTINGS. The number of pages read / written is listed. - -@fragments_1000_div -    - -@fragments_1001_label -Search: - -@fragments_1002_label -Highlight keyword(s) - -@fragments_1003_a -Home - -@fragments_1004_a -Download - -@fragments_1005_a -Cheat Sheet - -@fragments_1006_b -Documentation - -@fragments_1007_a -Quickstart - -@fragments_1008_a -Installation - -@fragments_1009_a -Tutorial - -@fragments_1010_a -Features - -@fragments_1011_a -Performance - -@fragments_1012_a -Advanced - -@fragments_1013_b -Reference - -@fragments_1014_a -SQL Grammar - -@fragments_1015_a -Functions - -@fragments_1016_a -Data Types - -@fragments_1017_a -Javadoc - -@fragments_1018_a -PDF (1 MB) - -@fragments_1019_b -Support - -@fragments_1020_a -FAQ - -@fragments_1021_a -Error Analyzer - -@fragments_1022_a -Google Group (English) - -@fragments_1023_a -Google Group (Japanese) - -@fragments_1024_a -Google Group (Chinese) - -@fragments_1025_b -Appendix - -@fragments_1026_a -History & Roadmap - -@fragments_1027_a -License - -@fragments_1028_a -Build - -@fragments_1029_a -Links - -@fragments_1030_a -JaQu - -@fragments_1031_a -MVStore - -@fragments_1032_a -Architecture - -@fragments_1033_td -  - -@frame_1000_h1 -H2 Database Engine - -@frame_1001_p - Welcome to H2, the free SQL database. The main feature of H2 are: - -@frame_1002_li -It is free to use for everybody, source code is included - -@frame_1003_li -Written in Java, but also available as native executable - -@frame_1004_li -JDBC and (partial) ODBC API - -@frame_1005_li -Embedded and client/server modes - -@frame_1006_li -Clustering is supported - -@frame_1007_li -A web client is included - -@frame_1008_h2 -No Javascript - -@frame_1009_p - If you are not automatically redirected to the main page, then Javascript is currently disabled or your browser does not support Javascript. Some features (for example the integrated search) require Javascript. - -@frame_1010_p - Please enable Javascript, or go ahead without it: H2 Database Engine - -@history_1000_h1 -History and Roadmap - -@history_1001_a - Change Log - -@history_1002_a - Roadmap - -@history_1003_a - History of this Database Engine - -@history_1004_a - Why Java - -@history_1005_a - Supporters - -@history_1006_h2 -Change Log - -@history_1007_p - The up-to-date change log is available at http://www.h2database.com/html/changelog.html - -@history_1008_h2 -Roadmap - -@history_1009_p - The current roadmap is available at http://www.h2database.com/html/roadmap.html - -@history_1010_h2 -History of this Database Engine - -@history_1011_p - The development of H2 was started in May 2004, but it was first published on December 14th 2005. The main author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database. At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed to continued to work on the Hypersonic SQL codebase. The name H2 stands for Hypersonic 2, however H2 does not share code with Hypersonic SQL or HSQLDB. H2 is built from scratch. - -@history_1012_h2 -Why Java - -@history_1013_p - The main reasons to use a Java database are: - -@history_1014_li -Very simple to integrate in Java applications - -@history_1015_li -Support for many different platforms - -@history_1016_li -More secure than native applications (no buffer overflows) - -@history_1017_li -User defined functions (or triggers) run very fast - -@history_1018_li -Unicode support - -@history_1019_p - Some think Java is too slow for low level operations, but this is no longer true. Garbage collection for example is now faster than manual memory management. - -@history_1020_p - Developing Java code is faster than developing C or C++ code. When using Java, most time can be spent on improving the algorithms instead of porting the code to different platforms or doing memory management. Features such as Unicode and network libraries are already built-in. In Java, writing secure code is easier because buffer overflows can not occur. Features such as reflection can be used for randomized testing. - -@history_1021_p - Java is future proof: a lot of companies support Java. Java is now open source. - -@history_1022_p - To increase the portability and ease of use, this software depends on very few libraries. Features that are not available in open source Java implementations (such as Swing) are not used, or only used for optional features. - -@history_1023_h2 -Supporters - -@history_1024_p - Many thanks for those who reported bugs, gave valuable feedback, spread the word, and translated this project. Also many thanks to the donors. To become a donor, use PayPal (at the very bottom of the main web page). - -@history_1025_a -xso; xBase Software Ontwikkeling, Netherlands - -@history_1026_a -Cognitect, USA - -@history_1027_a -Code 42 Software, Inc., Minneapolis - -@history_1028_li -Martin Wildam, Austria - -@history_1029_a -Code Lutin, France - -@history_1030_a -NetSuxxess GmbH, Germany - -@history_1031_a -Poker Copilot, Steve McLeod, Germany - -@history_1032_a -SkyCash, Poland - -@history_1033_a -Lumber-mill, Inc., Japan - -@history_1034_a -StockMarketEye, USA - -@history_1035_a -Eckenfelder GmbH & Co.KG, Germany - -@history_1036_li -Anthony Goubard, Netherlands - -@history_1037_li -Richard Hickey, USA - -@history_1038_li -Alessio Jacopo D'Adamo, Italy - -@history_1039_li -Ashwin Jayaprakash, USA - -@history_1040_li -Donald Bleyl, USA - -@history_1041_li -Frank Berger, Germany - -@history_1042_li -Florent Ramiere, France - -@history_1043_li -Jun Iyama, Japan - -@history_1044_li -Antonio Casqueiro, Portugal - -@history_1045_li -Oliver Computing LLC, USA - -@history_1046_li -Harpal Grover Consulting Inc., USA - -@history_1047_li -Elisabetta Berlini, Italy - -@history_1048_li -William Gilbert, USA - -@history_1049_li -Antonio Dieguez Rojas, Chile - -@history_1050_a -Ontology Works, USA - -@history_1051_li -Pete Haidinyak, USA - -@history_1052_li -William Osmond, USA - -@history_1053_li -Joachim Ansorg, Germany - -@history_1054_li -Oliver Soerensen, Germany - -@history_1055_li -Christos Vasilakis, Greece - -@history_1056_li -Fyodor Kupolov, Denmark - -@history_1057_li -Jakob Jenkov, Denmark - -@history_1058_li -Stéphane Chartrand, Switzerland - -@history_1059_li -Glenn Kidd, USA - -@history_1060_li -Gustav Trede, Sweden - -@history_1061_li -Joonas Pulakka, Finland - -@history_1062_li -Bjorn Darri Sigurdsson, Iceland - -@history_1063_li -Iyama Jun, Japan - -@history_1064_li -Gray Watson, USA - -@history_1065_li -Erik Dick, Germany - -@history_1066_li -Pengxiang Shao, China - -@history_1067_li -Bilingual Marketing Group, USA - -@history_1068_li -Philippe Marschall, Switzerland - -@history_1069_li -Knut Staring, Norway - -@history_1070_li -Theis Borg, Denmark - -@history_1071_li -Mark De Mendonca Duske, USA - -@history_1072_li -Joel A. Garringer, USA - -@history_1073_li -Olivier Chafik, France - -@history_1074_li -Rene Schwietzke, Germany - -@history_1075_li -Jalpesh Patadia, USA - -@history_1076_li -Takanori Kawashima, Japan - -@history_1077_li -Terrence JC Huang, China - -@history_1078_a -JiaDong Huang, Australia - -@history_1079_li -Laurent van Roy, Belgium - -@history_1080_li -Qian Chen, China - -@history_1081_li -Clinton Hyde, USA - -@history_1082_li -Kritchai Phromros, Thailand - -@history_1083_li -Alan Thompson, USA - -@history_1084_li -Ladislav Jech, Czech Republic - -@history_1085_li -Dimitrijs Fedotovs, Latvia - -@history_1086_li -Richard Manley-Reeve, United Kingdom - -@installation_1000_h1 -Installation - -@installation_1001_a - Requirements - -@installation_1002_a - Supported Platforms - -@installation_1003_a - Installing the Software - -@installation_1004_a - Directory Structure - -@installation_1005_h2 -Requirements - -@installation_1006_p - To run this database, the following software stack is known to work. Other software most likely also works, but is not tested as much. - -@installation_1007_h3 -Database Engine - -@installation_1008_li -Windows XP or Vista, Mac OS X, or Linux - -@installation_1009_li -Sun Java 6 or newer - -@installation_1010_li -Recommended Windows file system: NTFS (FAT32 only supports files up to 4 GB) - -@installation_1011_h3 -H2 Console - -@installation_1012_li -Mozilla Firefox - -@installation_1013_h2 -Supported Platforms - -@installation_1014_p - As this database is written in Java, it can run on many different platforms. It is tested with Java 6 and 7. Currently, the database is developed and tested on Windows 8 and Mac OS X using Java 6, but it also works in many other operating systems and using other Java runtime environments. All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported. - -@installation_1015_h2 -Installing the Software - -@installation_1016_p - To install the software, run the installer or unzip it to a directory of your choice. - -@installation_1017_h2 -Directory Structure - -@installation_1018_p - After installing, you should get the following directory structure: - -@installation_1019_th -Directory - -@installation_1020_th -Contents - -@installation_1021_td -bin - -@installation_1022_td -JAR and batch files - -@installation_1023_td -docs - -@installation_1024_td -Documentation - -@installation_1025_td -docs/html - -@installation_1026_td -HTML pages - -@installation_1027_td -docs/javadoc - -@installation_1028_td -Javadoc files - -@installation_1029_td -ext - -@installation_1030_td -External dependencies (downloaded when building) - -@installation_1031_td -service - -@installation_1032_td -Tools to run the database as a Windows Service - -@installation_1033_td -src - -@installation_1034_td -Source files - -@installation_1035_td -src/docsrc - -@installation_1036_td -Documentation sources - -@installation_1037_td -src/installer - -@installation_1038_td -Installer, shell, and release build script - -@installation_1039_td -src/main - -@installation_1040_td -Database engine source code - -@installation_1041_td -src/test - -@installation_1042_td -Test source code - -@installation_1043_td -src/tools - -@installation_1044_td -Tools and database adapters source code - -@jaqu_1000_h1 -JaQu - -@jaqu_1001_a - What is JaQu - -@jaqu_1002_a - Differences to Other Data Access Tools - -@jaqu_1003_a - Current State - -@jaqu_1004_a - Building the JaQu Library - -@jaqu_1005_a - Requirements - -@jaqu_1006_a - Example Code - -@jaqu_1007_a - Configuration - -@jaqu_1008_a - Natural Syntax - -@jaqu_1009_a - Other Ideas - -@jaqu_1010_a - Similar Projects - -@jaqu_1011_h2 -What is JaQu - -@jaqu_1012_p - Note: This project is currently in maintenance mode. A friendly fork of JaQu is available under the name iciql. - -@jaqu_1013_p - JaQu stands for Java Query and allows to access databases using pure Java. JaQu provides a fluent interface (or internal DSL). JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a Microsoft .NET technology). The following JaQu code: - -@jaqu_1014_p - stands for the SQL statement: - -@jaqu_1015_h2 -Differences to Other Data Access Tools - -@jaqu_1016_p - Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java, auto-complete in the IDE is supported. Type checking is performed by the compiler. JaQu fully protects against SQL injection. - -@jaqu_1017_p - JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate. With JaQu, you don't write SQL statements as strings. JaQu is much smaller and simpler than other persistence frameworks such as Hibernate, but it also does not provide all the features of those. Unlike iBatis and Hibernate, no XML or annotation based configuration is required; instead the configuration (if required at all) is done in pure Java, within the application. - -@jaqu_1018_p - JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis, JaQu provides full control over when and what SQL statements are executed (but without having to write SQL statements as strings). - -@jaqu_1019_h3 -Restrictions - -@jaqu_1020_p - Primitive types (eg. boolean, int, long, double) are not supported. Use java.lang.Boolean, Integer, Long, Double instead. - -@jaqu_1021_h3 -Why in Java? - -@jaqu_1022_p - Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy) in the same application is complicated: you would need to split the application and database code, and write adapter / wrapper code. - -@jaqu_1023_h2 -Current State - -@jaqu_1024_p - Currently, JaQu is only tested with the H2 database. The API may change in future versions. JaQu is not part of the h2 jar file, however the source code is included in H2, under: - -@jaqu_1025_code -src/test/org/h2/test/jaqu/* - -@jaqu_1026_li - (samples and tests) - -@jaqu_1027_code -src/tools/org/h2/jaqu/* - -@jaqu_1028_li - (framework) - -@jaqu_1029_h2 -Building the JaQu Library - -@jaqu_1030_p - To create the JaQu jar file, run: build jarJaqu. This will create the file bin/h2jaqu.jar. - -@jaqu_1031_h2 -Requirements - -@jaqu_1032_p - JaQu requires Java 6. Annotations are not need. Currently, JaQu is only tested with the H2 database engine, however in theory it should work with any database that supports the JDBC API. - -@jaqu_1033_h2 -Example Code - -@jaqu_1034_h2 -Configuration - -@jaqu_1035_p - JaQu does not require any configuration when using the default field to column mapping. To define table indices, or if you want to map a class to a table with a different name, or a field to a column with another name, create a function called define in the data class. Example: - -@jaqu_1036_p - The method define() contains the mapping definition. It is called once when the class is used for the first time. Like annotations, the mapping is defined in the class itself. Unlike when using annotations, the compiler can check the syntax even for multi-column objects (multi-column indexes, multi-column primary keys and so on). Because the definition is written in Java, the configuration can be set at runtime, which is not possible using annotations. Unlike XML mapping configuration, the configuration is integrated in the class itself. - -@jaqu_1037_h2 -Natural Syntax - -@jaqu_1038_p -The plan is to support more natural (pure Java) syntax in conditions. To do that, the condition class is de-compiled to a SQL condition. A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome). The planned syntax is: - -@jaqu_1039_h2 -Other Ideas - -@jaqu_1040_p - This project has just been started, and nothing is fixed yet. Some ideas are: - -@jaqu_1041_li -Support queries on collections (instead of using a database). - -@jaqu_1042_li -Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA). - -@jaqu_1043_li -Internally use a JPA implementation (for example Hibernate) instead of SQL directly. - -@jaqu_1044_li -Use PreparedStatements and cache them. - -@jaqu_1045_h2 -Similar Projects - -@jaqu_1046_a -iciql (a friendly fork of JaQu) - -@jaqu_1047_a -Cement Framework - -@jaqu_1048_a -Dreamsource ORM - -@jaqu_1049_a -Empire-db - -@jaqu_1050_a -JEQUEL: Java Embedded QUEry Language - -@jaqu_1051_a -Joist - -@jaqu_1052_a -jOOQ - -@jaqu_1053_a -JoSQL - -@jaqu_1054_a -LIQUidFORM - -@jaqu_1055_a -Quaere (Alias implementation) - -@jaqu_1056_a -Quaere - -@jaqu_1057_a -Querydsl - -@jaqu_1058_a -Squill - -@license_1000_h1 -License - -@license_1001_a - Summary and License FAQ - -@license_1002_a - Mozilla Public License Version 2.0 - -@license_1003_a - Eclipse Public License - Version 1.0 - -@license_1004_a - Export Control Classification Number (ECCN) - -@license_1005_h2 -Summary and License FAQ - -@license_1006_p - H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL. - -@license_1007_li -You can use H2 for free. - -@license_1008_li -You can integrate it into your applications (including in commercial applications) and distribute it. - -@license_1009_li -Files containing only your code are not covered by this license (it is 'commercial friendly'). - -@license_1010_li -Modifications to the H2 source code must be published. - -@license_1011_li -You don't need to provide the source code of H2 if you did not modify anything. - -@license_1012_li -If you distribute a binary that includes H2, you need to add a disclaimer of liability - see the example below. - -@license_1013_p - However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the Wayback Machine and visit old web pages of http://www.bungisoft.com. - -@license_1014_p - About porting the source code to another language (for example C# or C++): converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code. - -@license_1015_p - If you distribute a binary that includes H2, you need to add the license and a disclaimer of liability (as you should do for your own code). You should add a disclaimer for each open source library you use. For example, add a file 3rdparty_license.txt in the directory where the jar files are, and list all open source libraries, each one with its license and disclaimer. For H2, a simple solution is to copy the following text below. You may also include a copy of the complete license. - -@license_1016_h2 -Mozilla Public License Version 2.0 - -@license_1017_h3 -1. Definitions - -@license_1018_p -1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. - -@license_1019_p -1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. - -@license_1020_p -1.3. "Contribution" means Covered Software of a particular Contributor. - -@license_1021_p -1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. - -@license_1022_p -1.5. "Incompatible With Secondary Licenses" means - -@license_1023_p -a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - -@license_1024_p -b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. - -@license_1025_p -1.6. "Executable Form" means any form of the work other than Source Code Form. - -@license_1026_p -1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. - -@license_1027_p -1.8. "License" means this document. - -@license_1028_p -1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. - -@license_1029_p -1.10. "Modifications" means any of the following: - -@license_1030_p -a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or - -@license_1031_p -b. any new file in Source Code Form that contains any Covered Software. - -@license_1032_p -1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. - -@license_1033_p -1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. - -@license_1034_p -1.13. "Source Code Form" means the form of the work preferred for making modifications. - -@license_1035_p -1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -@license_1036_h3 -2. License Grants and Conditions - -@license_1037_h4 -2.1. Grants - -@license_1038_p -Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - -@license_1039_p -under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and - -@license_1040_p -under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. - -@license_1041_h4 -2.2. Effective Date - -@license_1042_p -The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. - -@license_1043_h4 -2.3. Limitations on Grant Scope - -@license_1044_p -The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: - -@license_1045_p -for any code that a Contributor has removed from Covered Software; or - -@license_1046_p -for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - -@license_1047_p -under Patent Claims infringed by Covered Software in the absence of its Contributions. - -@license_1048_p -This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). - -@license_1049_h4 -2.4. Subsequent Licenses - -@license_1050_p -No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). - -@license_1051_h4 -2.5. Representation - -@license_1052_p -Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. - -@license_1053_h4 -2.6. Fair Use - -@license_1054_p -This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. - -@license_1055_h4 -2.7. Conditions - -@license_1056_p -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. - -@license_1057_h3 -3. Responsibilities - -@license_1058_h4 -3.1. Distribution of Source Form - -@license_1059_p -All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. - -@license_1060_h4 -3.2. Distribution of Executable Form - -@license_1061_p -If You distribute Covered Software in Executable Form then: - -@license_1062_p -such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - -@license_1063_p -You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. - -@license_1064_h4 -3.3. Distribution of a Larger Work - -@license_1065_p -You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). - -@license_1066_h4 -3.4. Notices - -@license_1067_p -You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. - -@license_1068_h4 -3.5. Application of Additional Terms - -@license_1069_p -You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. - -@license_1070_h3 -4. Inability to Comply Due to Statute or Regulation - -@license_1071_p -If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. - -@license_1072_h3 -5. Termination - -@license_1073_p -5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. - -@license_1074_p -5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. - -@license_1075_p -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. - -@license_1076_h3 -6. Disclaimer of Warranty - -@license_1077_p -Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. - -@license_1078_h3 -7. Limitation of Liability - -@license_1079_p -Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. - -@license_1080_h3 -8. Litigation - -@license_1081_p -Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. - -@license_1082_h3 -9. Miscellaneous - -@license_1083_p -This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. - -@license_1084_h3 -10. Versions of the License - -@license_1085_h4 -10.1. New Versions - -@license_1086_p -Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. - -@license_1087_h4 -10.2. Effect of New Versions - -@license_1088_p -You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. - -@license_1089_h4 -10.3. Modified Versions - -@license_1090_p -If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). - -@license_1091_h4 -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - -@license_1092_p -If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. - -@license_1093_h3 -Exhibit A - Source Code Form License Notice - -@license_1094_p -If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. - -@license_1095_p -You may add additional accurate notices of copyright ownership. - -@license_1096_h3 -Exhibit B - "Incompatible With Secondary Licenses" Notice - -@license_1097_h2 -Eclipse Public License - Version 1.0 - -@license_1098_p - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -@license_1099_h3 -1. DEFINITIONS - -@license_1100_p - "Contribution" means: - -@license_1101_p - a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and - -@license_1102_p - b) in the case of each subsequent Contributor: - -@license_1103_p - i) changes to the Program, and - -@license_1104_p - ii) additions to the Program; - -@license_1105_p - where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. - -@license_1106_p - "Contributor" means any person or entity that distributes the Program. - -@license_1107_p - "Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. - -@license_1108_p - "Program" means the Contributions distributed in accordance with this Agreement. - -@license_1109_p - "Recipient" means anyone who receives the Program under this Agreement, including all Contributors. - -@license_1110_h3 -2. GRANT OF RIGHTS - -@license_1111_p - a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. - -@license_1112_p - b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. - -@license_1113_p - c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. - -@license_1114_p - d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. - -@license_1115_h3 -3. REQUIREMENTS - -@license_1116_p - A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: - -@license_1117_p - a) it complies with the terms and conditions of this Agreement; and - -@license_1118_p - b) its license agreement: - -@license_1119_p - i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; - -@license_1120_p - ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; - -@license_1121_p - iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and - -@license_1122_p - iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. - -@license_1123_p - When the Program is made available in source code form: - -@license_1124_p - a) it must be made available under this Agreement; and - -@license_1125_p - b) a copy of this Agreement must be included with each copy of the Program. - -@license_1126_p - Contributors may not remove or alter any copyright notices contained within the Program. - -@license_1127_p - Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. - -@license_1128_h3 -4. COMMERCIAL DISTRIBUTION - -@license_1129_p - Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. - -@license_1130_p - For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. - -@license_1131_h3 -5. NO WARRANTY - -@license_1132_p - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. - -@license_1133_h3 -6. DISCLAIMER OF LIABILITY - -@license_1134_p - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -@license_1135_h3 -7. GENERAL - -@license_1136_p - If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - -@license_1137_p - If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. - -@license_1138_p - All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. - -@license_1139_p - Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. - -@license_1140_p - This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. - -@license_1141_h2 -Export Control Classification Number (ECCN) - -@license_1142_p - As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002. However, for legal reasons, we can make no warranty that this information is correct. For details, see also the Apache Software Foundation Export Classifications page. - -@links_1000_h1 -Links - -@links_1001_p - If you want to add a link, please send it to the support email address or post it to the group. - -@links_1002_a - Commercial Support - -@links_1003_a - Quotes - -@links_1004_a - Books - -@links_1005_a - Extensions - -@links_1006_a - Blog Articles, Videos - -@links_1007_a - Database Frontends / Tools - -@links_1008_a - Products and Projects - -@links_1009_h2 -Commercial Support - -@links_1010_a -Commercial support for H2 is available - -@links_1011_p - from Steve McLeod (steve dot mcleod at gmail dot com). Please note he is not one of the main developers of H2. He describes himself as follows: - -@links_1012_li -I'm a long time user of H2, routinely working with H2 databases several gigabytes in size. - -@links_1013_li -I'm the creator of popular commercial desktop software that uses H2. - -@links_1014_li -I'm a certified Java developer (SCJP). - -@links_1015_li -I have a decade and more of IT consulting experience with large and small clients in Australia, the UK, and Germany. - -@links_1016_li -I'm based in Germany, and willing to travel within Europe. I can work remotely with teams in the USA and other locations." - -@links_1017_h2 -Quotes - -@links_1018_a - Quote - -@links_1019_p -: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. But, in less than 15 minutes I had H2 up and working with little recoding of the SQL. Thanks..... " - -@links_1020_h2 -Books - -@links_1021_a - Seam In Action - -@links_1022_h2 -Extensions - -@links_1023_a - Grails H2 Database Plugin - -@links_1024_a - h2osgi: OSGi for the H2 Database - -@links_1025_a - H2Sharp: ADO.NET interface for the H2 database engine - -@links_1026_a - A spatial extension of the H2 database. - -@links_1027_h2 -Blog Articles, Videos - -@links_1028_a - Youtube: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2 - -@links_1029_a - Analyzing CSVs with H2 in under 10 minutes (2009-12-07) - -@links_1030_a - Efficient sorting and iteration on large databases (2009-06-15) - -@links_1031_a - Porting Flexive to the H2 Database (2008-12-05) - -@links_1032_a - H2 Database with GlassFish (2008-11-24) - -@links_1033_a - H2 Database - Performance Tracing (2008-04-30) - -@links_1034_a - Open Source Databases Comparison (2007-09-11) - -@links_1035_a - The Codist: The Open Source Frameworks I Use (2007-07-23) - -@links_1036_a - The Codist: SQL Injections: How Not To Get Stuck (2007-05-08) - -@links_1037_a - David Coldrick's Weblog: New Version of H2 Database Released (2007-01-06) - -@links_1038_a - The Codist: Write Your Own Database, Again (2006-11-13) - -@links_1039_h2 -Project Pages - -@links_1040_a - Ohloh - -@links_1041_a - Freshmeat Project Page - -@links_1042_a - Wikipedia - -@links_1043_a - Java Source Net - -@links_1044_a - Linux Package Manager - -@links_1045_h2 -Database Frontends / Tools - -@links_1046_a - Dataflyer - -@links_1047_p - A tool to browse databases and export data. - -@links_1048_a - DB Solo - -@links_1049_p - SQL query tool. - -@links_1050_a - DbVisualizer - -@links_1051_p - Database tool. - -@links_1052_a - Execute Query - -@links_1053_p - Database utility written in Java. - -@links_1054_a - Flyway - -@links_1055_p - The agile database migration framework for Java. - -@links_1056_a - [fleXive] - -@links_1057_p - JavaEE 5 open source framework for the development of complex and evolving (web-)applications. - -@links_1058_a - JDBC Console - -@links_1059_p - This small webapp gives an ability to execute SQL against datasources bound in container's JNDI. Based on H2 Console. - -@links_1060_a - HenPlus - -@links_1061_p - HenPlus is a SQL shell written in Java. - -@links_1062_a - JDBC lint - -@links_1063_p - Helps write correct and efficient code when using the JDBC API. - -@links_1064_a - OpenOffice - -@links_1065_p - Base is OpenOffice.org's database application. It provides access to relational data sources. - -@links_1066_a - RazorSQL - -@links_1067_p - An SQL query tool, database browser, SQL editor, and database administration tool. - -@links_1068_a - SQL Developer - -@links_1069_p - Universal Database Frontend. - -@links_1070_a - SQL Workbench/J - -@links_1071_p - Free DBMS-independent SQL tool. - -@links_1072_a - SQuirreL SQL Client - -@links_1073_p - Graphical tool to view the structure of a database, browse the data, issue SQL commands etc. - -@links_1074_a - SQuirreL DB Copy Plugin - -@links_1075_p - Tool to copy data from one database to another. - -@links_1076_h2 -Products and Projects - -@links_1077_a - AccuProcess - -@links_1078_p - Visual business process modeling and simulation software for business users. - -@links_1079_a - Adeptia BPM - -@links_1080_p - A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows. - -@links_1081_a - Adeptia Integration - -@links_1082_p - Process-centric, services-based application integration suite. - -@links_1083_a - Aejaks - -@links_1084_p - A server-side scripting environment to build AJAX enabled web applications. - -@links_1085_a - Axiom Stack - -@links_1086_p - A web framework that let's you write dynamic web applications with Zen-like simplicity. - -@links_1087_a - Apache Cayenne - -@links_1088_p - Open source persistence framework providing object-relational mapping (ORM) and remoting services. - -@links_1089_a - Apache Jackrabbit - -@links_1090_p - Open source implementation of the Java Content Repository API (JCR). - -@links_1091_a - Apache OpenJPA - -@links_1092_p - Open source implementation of the Java Persistence API (JPA). - -@links_1093_a - AppFuse - -@links_1094_p - Helps building web applications. - -@links_1095_a - BGBlitz - -@links_1096_p - The Swiss army knife of Backgammon. - -@links_1097_a - Bonita - -@links_1098_p - Open source workflow solution for handing long-running, user-oriented processes providing out of the box workflow and business process management features. - -@links_1099_a - Bookmarks Portlet - -@links_1100_p - JSR 168 compliant bookmarks management portlet application. - -@links_1101_a - Claros inTouch - -@links_1102_p - Ajax communication suite with mail, addresses, notes, IM, and rss reader. - -@links_1103_a - CrashPlan PRO Server - -@links_1104_p - Easy and cross platform backup solution for business and service providers. - -@links_1105_a - DataNucleus - -@links_1106_p - Java persistent objects. - -@links_1107_a - DbUnit - -@links_1108_p - A JUnit extension (also usable with Ant) targeted for database-driven projects. - -@links_1109_a - DiffKit - -@links_1110_p - DiffKit is a tool for comparing two tables of data, field-by-field. DiffKit is like the Unix diff utility, but for tables instead of lines of text. - -@links_1111_a - Dinamica Framework - -@links_1112_p - Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets). - -@links_1113_a - District Health Information Software 2 (DHIS) - -@links_1114_p - The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities. - -@links_1115_a - Ebean ORM Persistence Layer - -@links_1116_p - Open source Java Object Relational Mapping tool. - -@links_1117_a - Eclipse CDO - -@links_1118_p - The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution. - -@links_1119_a - Fabric3 - -@links_1120_p - Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http://www.osoa.org). - -@links_1121_a - FIT4Data - -@links_1122_p - A testing framework for data management applications built on the Java implementation of FIT. - -@links_1123_a - Flux - -@links_1124_p - Java job scheduler, file transfer, workflow, and BPM. - -@links_1125_a - GeoServer - -@links_1126_p - GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing. - -@links_1127_a - GBIF Integrated Publishing Toolkit (IPT) - -@links_1128_p - The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data: taxon primary occurrence data, taxon checklists and general resource metadata. - -@links_1129_a - GNU Gluco Control - -@links_1130_p - Helps you to manage your diabetes. - -@links_1131_a - Golden T Studios - -@links_1132_p - Fun-to-play games with a simple interface. - -@links_1133_a - GridGain - -@links_1134_p - GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications that auto-scale on any grid or cloud infrastructure. - -@links_1135_a - Group Session - -@links_1136_p - Open source web groupware. - -@links_1137_a - HA-JDBC - -@links_1138_p - High-Availability JDBC: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver. - -@links_1139_a - Hibernate - -@links_1140_p - Relational persistence for idiomatic Java (O-R mapping tool). - -@links_1141_a - Hibicius - -@links_1142_p - Online Banking Client for the HBCI protocol. - -@links_1143_a - ImageMapper - -@links_1144_p - ImageMapper frees users from having to use file browsers to view their images. They get fast access to images and easy cataloguing of them via a user friendly interface. - -@links_1145_a - JAMWiki - -@links_1146_p - Java-based Wiki engine. - -@links_1147_a - Jaspa - -@links_1148_p - Java Spatial. Jaspa potentially brings around 200 spatial functions. - -@links_1149_a - Java Simon - -@links_1150_p - Simple Monitoring API. - -@links_1151_a - JBoss jBPM - -@links_1152_p - A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration. - -@links_1153_a - JBoss Jopr - -@links_1154_p - An enterprise management solution for JBoss middleware projects and other application technologies. - -@links_1155_a - JGeocoder - -@links_1156_p - Free Java geocoder. Geocoding is the process of estimating a latitude and longitude for a given location. - -@links_1157_a - JGrass - -@links_1158_p - Java Geographic Resources Analysis Support System. Free, multi platform, open source GIS based on the GIS framework of uDig. - -@links_1159_a - Jena - -@links_1160_p - Java framework for building Semantic Web applications. - -@links_1161_a - JMatter - -@links_1162_p - Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern. - -@links_1163_a - jOOQ (Java Object Oriented Querying) - -@links_1164_p - jOOQ is a fluent API for typesafe SQL query construction and execution - -@links_1165_a - Liftweb - -@links_1166_p - A Scala-based, secure, developer friendly web framework. - -@links_1167_a - LiquiBase - -@links_1168_p - A tool to manage database changes and refactorings. - -@links_1169_a - Luntbuild - -@links_1170_p - Build automation and management tool. - -@links_1171_a - localdb - -@links_1172_p - A tool that locates the full file path of the folder containing the database files. - -@links_1173_a - Magnolia - -@links_1174_p - Microarray Data Management and Export System for PFGRC (Pathogen Functional Genomics Resource Center) Microarrays. - -@links_1175_a - MiniConnectionPoolManager - -@links_1176_p - A lightweight standalone JDBC connection pool manager. - -@links_1177_a - Mr. Persister - -@links_1178_p - Simple, small and fast object relational mapping. - -@links_1179_a - Myna Application Server - -@links_1180_p - Java web app that provides dynamic web content and Java libraries access from JavaScript. - -@links_1181_a - MyTunesRss - -@links_1182_p - MyTunesRSS lets you listen to your music wherever you are. - -@links_1183_a - NCGC CurveFit - -@links_1184_p - From: NIH Chemical Genomics Center, National Institutes of Health, USA. An open source application in the life sciences research field. This application handles chemical structures and biological responses of thousands of compounds with the potential to handle million+ compounds. It utilizes an embedded H2 database to enable flexible query/retrieval of all data including advanced chemical substructure and similarity searching. The application highlights an automated curve fitting and classification algorithm that outperforms commercial packages in the field. Commercial alternatives are typically small desktop software that handle a few dose response curves at a time. A couple of commercial packages that do handle several thousand curves are very expensive tools (>60k USD) that require manual curation of analysis by the user; require a license to Oracle; lack advanced query/retrieval; and the ability to handle chemical structures. - -@links_1185_a - Nuxeo - -@links_1186_p - Standards-based, open source platform for building ECM applications. - -@links_1187_a - nWire - -@links_1188_p - Eclipse plug-in which expedites Java development. It's main purpose is to help developers find code quicker and easily understand how it relates to the rest of the application, thus, understand the application structure. - -@links_1189_a - Ontology Works - -@links_1190_p - This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), semantic information fusion and semantic federation of legacy databases, ontology-based domain modeling, and management of the distributed enterprise. - -@links_1191_a - Ontoprise OntoBroker - -@links_1192_p - SemanticWeb-Middleware. It supports all W3C Semantic Web recommendations: OWL, RDF, RDFS, SPARQL, and F-Logic. - -@links_1193_a - Open Anzo - -@links_1194_p - Semantic Application Server. - -@links_1195_a - OpenGroove - -@links_1196_p - OpenGroove is a groupware program that allows users to synchronize data. - -@links_1197_a - OpenSocial Development Environment (OSDE) - -@links_1198_p - Development tool for OpenSocial application. - -@links_1199_a - Orion - -@links_1200_p - J2EE Application Server. - -@links_1201_a - P5H2 - -@links_1202_p - A library for the Processing programming language and environment. - -@links_1203_a - Phase-6 - -@links_1204_p - A computer based learning software. - -@links_1205_a - Pickle - -@links_1206_p - Pickle is a Java library containing classes for persistence, concurrency, and logging. - -@links_1207_a - Piman - -@links_1208_p - Water treatment projects data management. - -@links_1209_a - PolePosition - -@links_1210_p - Open source database benchmark. - -@links_1211_a - Poormans - -@links_1212_p - Very basic CMS running as a SWT application and generating static html pages. - -@links_1213_a - Railo - -@links_1214_p - Railo is an alternative engine for the Cold Fusion Markup Language, that compiles code programmed in CFML into Java bytecode and executes it on a servlet engine. - -@links_1215_a - Razuna - -@links_1216_p - Open source Digital Asset Management System with integrated Web Content Management. - -@links_1217_a - RIFE - -@links_1218_p - A full-stack web application framework with tools and APIs to implement most common web features. - -@links_1219_a - Sava - -@links_1220_p - Open-source web-based content management system. - -@links_1221_a - Scriptella - -@links_1222_p - ETL (Extract-Transform-Load) and script execution tool. - -@links_1223_a - Sesar - -@links_1224_p - Dependency Injection Container with Aspect Oriented Programming. - -@links_1225_a - SemmleCode - -@links_1226_p - Eclipse plugin to help you improve software quality. - -@links_1227_a - SeQuaLite - -@links_1228_p - A free, light-weight, java data access framework. - -@links_1229_a - ShapeLogic - -@links_1230_p - Toolkit for declarative programming, image processing and computer vision. - -@links_1231_a - Shellbook - -@links_1232_p - Desktop publishing application. - -@links_1233_a - Signsoft intelliBO - -@links_1234_p - Persistence middleware supporting the JDO specification. - -@links_1235_a - SimpleORM - -@links_1236_p - Simple Java Object Relational Mapping. - -@links_1237_a - SymmetricDS - -@links_1238_p - A web-enabled, database independent, data synchronization/replication software. - -@links_1239_a - SmartFoxServer - -@links_1240_p - Platform for developing multiuser applications and games with Macromedia Flash. - -@links_1241_a - Social Bookmarks Friend Finder - -@links_1242_p - A GUI application that allows you to find users with similar bookmarks to the user specified (for delicious.com). - -@links_1243_a - sormula - -@links_1244_p - Simple object relational mapping. - -@links_1245_a - Springfuse - -@links_1246_p - Code generation For Spring, Spring MVC & Hibernate. - -@links_1247_a - SQLOrm - -@links_1248_p - Java Object Relation Mapping. - -@links_1249_a - StelsCSV and StelsXML - -@links_1250_p - StelsCSV is a CSV JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on text files. StelsXML is a XML JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on XML files. Both use H2 as the SQL engine. - -@links_1251_a - StorYBook - -@links_1252_p - A summary-based tool for novelist and script writers. It helps to keep the overview over the various traces a story has. - -@links_1253_a - StreamCruncher - -@links_1254_p - Event (stream) processing kernel. - -@links_1255_a - SUSE Manager, part of Linux Enterprise Server 11 - -@links_1256_p - The SUSE Manager eases the burden of compliance with regulatory requirements and corporate policies. - -@links_1257_a - Tune Backup - -@links_1258_p - Easy-to-use backup solution for your iTunes library. - -@links_1259_a - weblica - -@links_1260_p - Desktop CMS. - -@links_1261_a - Web of Web - -@links_1262_p - Collaborative and realtime interactive media platform for the web. - -@links_1263_a - Werkzeugkasten - -@links_1264_p - Minimum Java Toolset. - -@links_1265_a - VPDA - -@links_1266_p - View providers driven applications is a Java based application framework for building applications composed from server components - view providers. - -@links_1267_a - Volunteer database - -@links_1268_p - A database front end to register volunteers, partnership and donation for a Non Profit organization. - -@mainWeb_1000_h1 -H2 Database Engine - -@mainWeb_1001_p - Welcome to H2, the Java SQL database. The main features of H2 are: - -@mainWeb_1002_li -Very fast, open source, JDBC API - -@mainWeb_1003_li -Embedded and server modes; in-memory databases - -@mainWeb_1004_li -Browser based Console application - -@mainWeb_1005_li -Small footprint: around 1.5 MB jar file size - -@mainWeb_1006_h2 -Download - -@mainWeb_1007_td - Version 1.4.187 (2015-04-10), Beta - -@mainWeb_1008_a -Windows Installer (5 MB) - -@mainWeb_1009_a -All Platforms (zip, 8 MB) - -@mainWeb_1010_a -All Downloads - -@mainWeb_1011_td -    - -@mainWeb_1012_h2 -Support - -@mainWeb_1013_a -Stack Overflow (tag H2) - -@mainWeb_1014_a -Google Group English - -@mainWeb_1015_p -, Japanese - -@mainWeb_1016_p - For non-technical issues, use: - -@mainWeb_1017_h2 -Features - -@mainWeb_1018_th -H2 - -@mainWeb_1019_a -Derby - -@mainWeb_1020_a -HSQLDB - -@mainWeb_1021_a -MySQL - -@mainWeb_1022_a -PostgreSQL - -@mainWeb_1023_td -Pure Java - -@mainWeb_1024_td -Yes - -@mainWeb_1025_td -Yes - -@mainWeb_1026_td -Yes - -@mainWeb_1027_td -No - -@mainWeb_1028_td -No - -@mainWeb_1029_td -Memory Mode - -@mainWeb_1030_td -Yes - -@mainWeb_1031_td -Yes - -@mainWeb_1032_td -Yes - -@mainWeb_1033_td -No - -@mainWeb_1034_td -No - -@mainWeb_1035_td -Encrypted Database - -@mainWeb_1036_td -Yes - -@mainWeb_1037_td -Yes - -@mainWeb_1038_td -Yes - -@mainWeb_1039_td -No - -@mainWeb_1040_td -No - -@mainWeb_1041_td -ODBC Driver - -@mainWeb_1042_td -Yes - -@mainWeb_1043_td -No - -@mainWeb_1044_td -No - -@mainWeb_1045_td -Yes - -@mainWeb_1046_td -Yes - -@mainWeb_1047_td -Fulltext Search - -@mainWeb_1048_td -Yes - -@mainWeb_1049_td -No - -@mainWeb_1050_td -No - -@mainWeb_1051_td -Yes - -@mainWeb_1052_td -Yes - -@mainWeb_1053_td -Multi Version Concurrency - -@mainWeb_1054_td -Yes - -@mainWeb_1055_td -No - -@mainWeb_1056_td -Yes - -@mainWeb_1057_td -Yes - -@mainWeb_1058_td -Yes - -@mainWeb_1059_td -Footprint (jar/dll size) - -@mainWeb_1060_td -~1 MB - -@mainWeb_1061_td -~2 MB - -@mainWeb_1062_td -~1 MB - -@mainWeb_1063_td -~4 MB - -@mainWeb_1064_td -~6 MB - -@mainWeb_1065_p - See also the detailed comparison. - -@mainWeb_1066_h2 -News - -@mainWeb_1067_b -Newsfeeds: - -@mainWeb_1068_a -Full text (Atom) - -@mainWeb_1069_p - or Header only (RSS). - -@mainWeb_1070_b -Email Newsletter: - -@mainWeb_1071_p - Subscribe to H2 Database News (Google account required) to get informed about new releases. Your email address is only used in this context. - -@mainWeb_1072_td -  - -@mainWeb_1073_h2 -Contribute - -@mainWeb_1074_p - You can contribute to the development of H2 by sending feedback and bug reports, or translate the H2 Console application (for details, start the H2 Console and select Options / Translate). To donate money, click on the PayPal button below. You will be listed as a supporter: - -@main_1000_h1 -H2 Database Engine - -@main_1001_p - Welcome to H2, the free Java SQL database engine. - -@main_1002_a -Quickstart - -@main_1003_p - Get a fast overview. - -@main_1004_a -Tutorial - -@main_1005_p - Go through the samples. - -@main_1006_a -Features - -@main_1007_p - See what this database can do and how to use these features. - -@mvstore_1000_h1 -MVStore - -@mvstore_1001_a - Overview - -@mvstore_1002_a - Example Code - -@mvstore_1003_a - Store Builder - -@mvstore_1004_a - R-Tree - -@mvstore_1005_a - Features - -@mvstore_1006_a -- Maps - -@mvstore_1007_a -- Versions - -@mvstore_1008_a -- Transactions - -@mvstore_1009_a -- In-Memory Performance and Usage - -@mvstore_1010_a -- Pluggable Data Types - -@mvstore_1011_a -- BLOB Support - -@mvstore_1012_a -- R-Tree and Pluggable Map Implementations - -@mvstore_1013_a -- Concurrent Operations and Caching - -@mvstore_1014_a -- Log Structured Storage - -@mvstore_1015_a -- Off-Heap and Pluggable Storage - -@mvstore_1016_a -- File System Abstraction, File Locking and Online Backup - -@mvstore_1017_a -- Encrypted Files - -@mvstore_1018_a -- Tools - -@mvstore_1019_a -- Exception Handling - -@mvstore_1020_a -- Storage Engine for H2 - -@mvstore_1021_a - File Format - -@mvstore_1022_a - Similar Projects and Differences to Other Storage Engines - -@mvstore_1023_a - Current State - -@mvstore_1024_a - Requirements - -@mvstore_1025_h2 -Overview - -@mvstore_1026_p - The MVStore is a persistent, log structured key-value store. It is planned to be the next storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL. - -@mvstore_1027_li -MVStore stands for "multi-version store". - -@mvstore_1028_li -Each store contains a number of maps that can be accessed using the java.util.Map interface. - -@mvstore_1029_li -Both file-based persistence and in-memory operation are supported. - -@mvstore_1030_li -It is intended to be fast, simple to use, and small. - -@mvstore_1031_li -Concurrent read and write operations are supported. - -@mvstore_1032_li -Transactions are supported (including concurrent transactions and 2-phase commit). - -@mvstore_1033_li -The tool is very modular. It supports pluggable data types and serialization, pluggable storage (to a file, to off-heap memory), pluggable map implementations (B-tree, R-tree, concurrent B-tree currently), BLOB storage, and a file system abstraction to support encrypted files and zip files. - -@mvstore_1034_h2 -Example Code - -@mvstore_1035_p - The following sample code shows how to use the tool: - -@mvstore_1036_h2 -Store Builder - -@mvstore_1037_p - The MVStore.Builder provides a fluid interface to build a store if configuration options are needed. Example usage: - -@mvstore_1038_p - The list of available options is: - -@mvstore_1039_li -autoCommitBufferSize: the size of the write buffer. - -@mvstore_1040_li -autoCommitDisabled: to disable auto-commit. - -@mvstore_1041_li -backgroundExceptionHandler: a handler for exceptions that could occur while writing in the background. - -@mvstore_1042_li -cacheSize: the cache size in MB. - -@mvstore_1043_li -compress: compress the data when storing using a fast algorithm (LZF). - -@mvstore_1044_li -compressHigh: compress the data when storing using a slower algorithm (Deflate). - -@mvstore_1045_li -encryptionKey: the key for file encryption. - -@mvstore_1046_li -fileName: the name of the file, for file based stores. - -@mvstore_1047_li -fileStore: the storage implementation to use. - -@mvstore_1048_li -pageSplitSize: the point where pages are split. - -@mvstore_1049_li -readOnly: open the file in read-only mode. - -@mvstore_1050_h2 -R-Tree - -@mvstore_1051_p - The MVRTreeMap is an R-tree implementation that supports fast spatial queries. It can be used as follows: - -@mvstore_1052_p - The default number of dimensions is 2. To use a different number of dimensions, call new MVRTreeMap.Builder<String>().dimensions(3). The minimum number of dimensions is 1, the maximum is 32. - -@mvstore_1053_h2 -Features - -@mvstore_1054_h3 -Maps - -@mvstore_1055_p - Each store contains a set of named maps. A map is sorted by key, and supports the common lookup operations, including access to the first and last key, iterate over some or all keys, and so on. - -@mvstore_1056_p - Also supported, and very uncommon for maps, is fast index lookup: the entries of the map can be be efficiently accessed like a random-access list (get the entry at the given index), and the index of a key can be calculated efficiently. That also means getting the median of two keys is very fast, and a range of keys can be counted very quickly. The iterator supports fast skipping. This is possible because internally, each map is organized in the form of a counted B+-tree. - -@mvstore_1057_p - In database terms, a map can be used like a table, where the key of the map is the primary key of the table, and the value is the row. A map can also represent an index, where the key of the map is the key of the index, and the value of the map is the primary key of the table (for non-unique indexes, the key of the map must also contain the primary key). - -@mvstore_1058_h3 -Versions - -@mvstore_1059_p - A version is a snapshot of all the data of all maps at a given point in time. Creating a snapshot is fast: only those pages that are changed after a snapshot are copied. This behavior is also called COW (copy on write). Old versions are readable. Rollback to an old version is supported. - -@mvstore_1060_p - The following sample code show how to create a store, open a map, add some data, and access the current and an old version: - -@mvstore_1061_h3 -Transactions - -@mvstore_1062_p - To support multiple concurrent open transactions, a transaction utility is included, the TransactionStore. The tool supports PostgreSQL style "read committed" transaction isolation with savepoints, two-phase commit, and other features typically available in a database. There is no limit on the size of a transaction (the log is written to disk for large or long running transactions). - -@mvstore_1063_p - Internally, this utility stores the old versions of changed entries in a separate map, similar to a transaction log, except that entries of a closed transaction are removed, and the log is usually not stored for short transactions. For common use cases, the storage overhead of this utility is very small compared to the overhead of a regular transaction log. - -@mvstore_1064_h3 -In-Memory Performance and Usage - -@mvstore_1065_p - Performance of in-memory operations is about 50% slower than java.util.TreeMap. - -@mvstore_1066_p - The memory overhead for large maps is slightly better than for the regular map implementations, but there is a higher overhead per map. For maps with less than about 25 entries, the regular map implementations need less memory. - -@mvstore_1067_p - If no file name is specified, the store operates purely in memory. Except for persisting data, all features are supported in this mode (multi-versioning, index lookup, R-tree and so on). If a file name is specified, all operations occur in memory (with the same performance characteristics) until data is persisted. - -@mvstore_1068_p - As in all map implementations, keys need to be immutable, that means changing the key object after an entry has been added is not allowed. If a file name is specified, the value may also not be changed after adding an entry, because it might be serialized (which could happen at any time when autocommit is enabled). - -@mvstore_1069_h3 -Pluggable Data Types - -@mvstore_1070_p - Serialization is pluggable. The default serialization currently supports many common data types, and uses Java serialization for other objects. The following classes are currently directly supported: Boolean, Byte, Short, Character, Integer, Long, Float, Double, BigInteger, BigDecimal, String, UUID, Date and arrays (both primitive arrays and object arrays). For serialized objects, the size estimate is adjusted using an exponential moving average. - -@mvstore_1071_p - Parameterized data types are supported (for example one could build a string data type that limits the length). - -@mvstore_1072_p - The storage engine itself does not have any length limits, so that keys, values, pages, and chunks can be very big (as big as fits in memory). Also, there is no inherent limit to the number of maps and chunks. Due to using a log structured storage, there is no special case handling for large keys or pages. - -@mvstore_1073_h3 -BLOB Support - -@mvstore_1074_p - There is a mechanism that stores large binary objects by splitting them into smaller blocks. This allows to store objects that don't fit in memory. Streaming as well as random access reads on such objects are supported. This tool is written on top of the store, using only the map interface. - -@mvstore_1075_h3 -R-Tree and Pluggable Map Implementations - -@mvstore_1076_p - The map implementation is pluggable. In addition to the default MVMap (multi-version map), there is a map that supports concurrent write operations, and a multi-version R-tree map implementation for spatial operations. - -@mvstore_1077_h3 -Concurrent Operations and Caching - -@mvstore_1078_p - Concurrent reads and writes are supported. All such read operations can occur in parallel. Concurrent reads from the page cache, as well as concurrent reads from the file system are supported. Write operations first read the relevant pages from disk to memory (this can happen concurrently), and only then modify the data. The in-memory parts of write operations are synchronized. Writing changes to the file can occur concurrently to modifying the data, as writing operates on a snapshot. - -@mvstore_1079_p - Caching is done on the page level. The page cache is a concurrent LIRS cache, which should be resistant against scan operations. - -@mvstore_1080_p - For fully scalable concurrent write operations to a map (in-memory and to disk), the map could be split into multiple maps in different stores ('sharding'). The plan is to add such a mechanism later when needed. - -@mvstore_1081_h3 -Log Structured Storage - -@mvstore_1082_p - Internally, changes are buffered in memory, and once enough changes have accumulated, they are written in one continuous disk write operation. Compared to traditional database storage engines, this should improve write performance for file systems and storage systems that do not efficiently support small random writes, such as Btrfs, as well as SSDs. (According to a test, write throughput of a common SSD increases with write block size, until a block size of 2 MB, and then does not further increase.) By default, changes are automatically written when more than a number of pages are modified, and once every second in a background thread, even if only little data was changed. Changes can also be written explicitly by calling commit(). - -@mvstore_1083_p - When storing, all changed pages are serialized, optionally compressed using the LZF algorithm, and written sequentially to a free area of the file. Each such change set is called a chunk. All parent pages of the changed B-trees are stored in this chunk as well, so that each chunk also contains the root of each changed map (which is the entry point for reading this version of the data). There is no separate index: all data is stored as a list of pages. Per store, there is one additional map that contains the metadata (the list of maps, where the root page of each map is stored, and the list of chunks). - -@mvstore_1084_p - There are usually two write operations per chunk: one to store the chunk data (the pages), and one to update the file header (so it points to the latest chunk). If the chunk is appended at the end of the file, the file header is only written at the end of the chunk. There is no transaction log, no undo log, and there are no in-place updates (however, unused chunks are overwritten by default). - -@mvstore_1085_p - Old data is kept for at least 45 seconds (configurable), so that there are no explicit sync operations required to guarantee data consistency. An application can also sync explicitly when needed. To reuse disk space, the chunks with the lowest amount of live data are compacted (the live data is stored again in the next chunk). To improve data locality and disk space usage, the plan is to automatically defragment and compact data. - -@mvstore_1086_p - Compared to traditional storage engines (that use a transaction log, undo log, and main storage area), the log structured storage is simpler, more flexible, and typically needs less disk operations per change, as data is only written once instead of twice or 3 times, and because the B-tree pages are always full (they are stored next to each other) and can be easily compressed. But temporarily, disk space usage might actually be a bit higher than for a regular database, as disk space is not immediately re-used (there are no in-place updates). - -@mvstore_1087_h3 -Off-Heap and Pluggable Storage - -@mvstore_1088_p - Storage is pluggable. Unless pure in-memory operation is used, the default storage is to a single file. - -@mvstore_1089_p - An off-heap storage implementation is available. This storage keeps the data in the off-heap memory, meaning outside of the regular garbage collected heap. This allows to use very large in-memory stores without having to increase the JVM heap, which would increase Java garbage collection pauses a lot. Memory is allocated using ByteBuffer.allocateDirect. One chunk is allocated at a time (each chunk is usually a few MB large), so that allocation cost is low. To use the off-heap storage, call: - -@mvstore_1090_h3 -File System Abstraction, File Locking and Online Backup - -@mvstore_1091_p - The file system is pluggable. The same file system abstraction is used as H2 uses. The file can be encrypted using a encrypting file system wrapper. Other file system implementations support reading from a compressed zip or jar file. The file system abstraction closely matches the Java 7 file system API. - -@mvstore_1092_p - Each store may only be opened once within a JVM. When opening a store, the file is locked in exclusive mode, so that the file can only be changed from within one process. Files can be opened in read-only mode, in which case a shared lock is used. - -@mvstore_1093_p - The persisted data can be backed up at any time, even during write operations (online backup). To do that, automatic disk space reuse needs to be first disabled, so that new data is always appended at the end of the file. Then, the file can be copied. The file handle is available to the application. It is recommended to use the utility class FileChannelInputStream to do this. For encrypted databases, both the encrypted (raw) file content, as well as the clear text content, can be backed up. - -@mvstore_1094_h3 -Encrypted Files - -@mvstore_1095_p - File encryption ensures the data can only be read with the correct password. Data can be encrypted as follows: - -@mvstore_1096_p - The following algorithms and settings are used: - -@mvstore_1097_li -The password char array is cleared after use, to reduce the risk that the password is stolen even if the attacker has access to the main memory. - -@mvstore_1098_li -The password is hashed according to the PBKDF2 standard, using the SHA-256 hash algorithm. - -@mvstore_1099_li -The length of the salt is 64 bits, so that an attacker can not use a pre-calculated password hash table (rainbow table). It is generated using a cryptographically secure random number generator. - -@mvstore_1100_li -To speed up opening an encrypted stores on Android, the number of PBKDF2 iterations is 10. The higher the value, the better the protection against brute-force password cracking attacks, but the slower is opening a file. - -@mvstore_1101_li -The file itself is encrypted using the standardized disk encryption mode XTS-AES. Only little more than one AES-128 round per block is needed. - -@mvstore_1102_h3 -Tools - -@mvstore_1103_p - There is a tool, the MVStoreTool, to dump the contents of a file. - -@mvstore_1104_h3 -Exception Handling - -@mvstore_1105_p - This tool does not throw checked exceptions. Instead, unchecked exceptions are thrown if needed. The error message always contains the version of the tool. The following exceptions can occur: - -@mvstore_1106_code -IllegalStateException - -@mvstore_1107_li - if a map was already closed or an IO exception occurred, for example if the file was locked, is already closed, could not be opened or closed, if reading or writing failed, if the file is corrupt, or if there is an internal error in the tool. For such exceptions, an error code is added so that the application can distinguish between different error cases. - -@mvstore_1108_code -IllegalArgumentException - -@mvstore_1109_li - if a method was called with an illegal argument. - -@mvstore_1110_code -UnsupportedOperationException - -@mvstore_1111_li - if a method was called that is not supported, for example trying to modify a read-only map. - -@mvstore_1112_code -ConcurrentModificationException - -@mvstore_1113_li - if a map is modified concurrently. - -@mvstore_1114_h3 -Storage Engine for H2 - -@mvstore_1115_p - For H2 version 1.4 and newer, the MVStore is the default storage engine (supporting SQL, JDBC, transactions, MVCC, and so on). For older versions, append ;MV_STORE=TRUE to the database URL. Even though it can be used with the default table level locking, by default the MVCC mode is enabled when using the MVStore. - -@mvstore_1116_h2 -File Format - -@mvstore_1117_p - The data is stored in one file. The file contains two file headers (for safety), and a number of chunks. The file headers are one block each; a block is 4096 bytes. Each chunk is at least one block, but typically 200 blocks or more. Data is stored in the chunks in the form of a log structured storage. There is one chunk for every version. - -@mvstore_1118_p - Each chunk contains a number of B-tree pages. As an example, the following code: - -@mvstore_1119_p - will result in the following two chunks (excluding metadata): - -@mvstore_1120_b -Chunk 1: - -@mvstore_1121_p - - Page 1: (root) node with 2 entries pointing to page 2 and 3 - -@mvstore_1122_p - - Page 2: leaf with 140 entries (keys 0 - 139) - -@mvstore_1123_p - - Page 3: leaf with 260 entries (keys 140 - 399) - -@mvstore_1124_b -Chunk 2: - -@mvstore_1125_p - - Page 4: (root) node with 2 entries pointing to page 3 and 5 - -@mvstore_1126_p - - Page 5: leaf with 140 entries (keys 0 - 139) - -@mvstore_1127_p - That means each chunk contains the changes of one version: the new version of the changed pages and the parent pages, recursively, up to the root page. Pages in subsequent chunks refer to pages in earlier chunks. - -@mvstore_1128_h3 -File Header - -@mvstore_1129_p - There are two file headers, which normally contain the exact same data. But once in a while, the file headers are updated, and writing could partially fail, which could corrupt a header. That's why there is a second header. Only the file headers are updated in this way (called "in-place update"). The headers contain the following data: - -@mvstore_1130_p - The data is stored in the form of a key-value pair. Each value is stored as a hexadecimal number. The entries are: - -@mvstore_1131_li -H: The entry "H:2" stands for the the H2 database. - -@mvstore_1132_li -block: The block number where one of the newest chunks starts (but not necessarily the newest). - -@mvstore_1133_li -blockSize: The block size of the file; currently always hex 1000, which is decimal 4096, to match the disk sector length of modern hard disks. - -@mvstore_1134_li -chunk: The chunk id, which is normally the same value as the version; however, the chunk id might roll over to 0, while the version doesn't. - -@mvstore_1135_li -created: The number of milliseconds since 1970 when the file was created. - -@mvstore_1136_li -format: The file format number. Currently 1. - -@mvstore_1137_li -version: The version number of the chunk. - -@mvstore_1138_li -fletcher: The Fletcher-32 checksum of the header. - -@mvstore_1139_p - When opening the file, both headers are read and the checksum is verified. If both headers are valid, the one with the newer version is used. The chunk with the latest version is then detected (details about this see below), and the rest of the metadata is read from there. If the chunk id, block and version are not stored in the file header, then the latest chunk lookup starts with the last chunk in the file. - -@mvstore_1140_h3 -Chunk Format - -@mvstore_1141_p - There is one chunk per version. Each chunk consists of a header, the pages that were modified in this version, and a footer. The pages contain the actual data of the maps. The pages inside a chunk are stored right after the header, next to each other (unaligned). The size of a chunk is a multiple of the block size. The footer is stored in the last 128 bytes of the chunk. - -@mvstore_1142_p - The footer allows to verify that the chunk is completely written (a chunk is written as one write operation), and allows to find the start position of the very last chunk in the file. The chunk header and footer contain the following data: - -@mvstore_1143_p - The fields of the chunk header and footer are: - -@mvstore_1144_li -chunk: The chunk id. - -@mvstore_1145_li -block: The first block of the chunk (multiply by the block size to get the position in the file). - -@mvstore_1146_li -len: The size of the chunk in number of blocks. - -@mvstore_1147_li -map: The id of the newest map; incremented when a new map is created. - -@mvstore_1148_li -max: The sum of all maximum page sizes (see page format). - -@mvstore_1149_li -next: The predicted start block of the next chunk. - -@mvstore_1150_li -pages: The number of pages in the chunk. - -@mvstore_1151_li -root: The position of the metadata root page (see page format). - -@mvstore_1152_li -time: The time the chunk was written, in milliseconds after the file was created. - -@mvstore_1153_li -version: The version this chunk represents. - -@mvstore_1154_li -fletcher: The checksum of the footer. - -@mvstore_1155_p - Chunks are never updated in-place. Each chunk contains the pages that were changed in that version (there is one chunk per version, see above), plus all the parent nodes of those pages, recursively, up to the root page. If an entry in a map is changed, removed, or added, then the respective page is copied, modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented. This mechanism is called copy-on-write, and is similar to how the Btrfs file system works. Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first. - -@mvstore_1156_p - How the newest chunk is located when opening a store: The file header contains the position of a recent chunk, but not always the newest one. This is to reduce the number of file header updates. After opening the file, the file headers, and the chunk footer of the very last chunk (at the end of the file) are read. From those candidates, the header of the most recent chunk is read. If it contains a "next" pointer (see above), those chunk's header and footer are read as well. If it turned out to be a newer valid chunk, this is repeated, until the newest chunk was found. Before writing a chunk, the position of the next chunk is predicted based on the assumption that the next chunk will be of the same size as the current one. When the next chunk is written, and the previous prediction turned out to be incorrect, the file header is updated as well. In any case, the file header is updated if the next chain gets longer than 20 hops. - -@mvstore_1157_h3 -Page Format - -@mvstore_1158_p - Each map is a B-tree, and the map data is stored in (B-tree-) pages. There are leaf pages that contain the key-value pairs of the map, and internal nodes, which only contain keys and pointers to leaf pages. The root of a tree is either a leaf or an internal node. Unlike file header and chunk header and footer, the page data is not human readable. Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes), and variable size int and long (1 to 5 / 10 bytes). The page format is: - -@mvstore_1159_li -length (int): Length of the page in bytes. - -@mvstore_1160_li -checksum (short): Checksum (chunk id xor offset within the chunk xor page length). - -@mvstore_1161_li -mapId (variable size int): The id of the map this page belongs to. - -@mvstore_1162_li -len (variable size int): The number of keys in the page. - -@mvstore_1163_li -type (byte): The page type (0 for leaf page, 1 for internal node; plus 2 if the keys and values are compressed with the LZF algorithm, or plus 6 if the keys and values are compressed with the Deflate algorithm). - -@mvstore_1164_li -children (array of long; internal nodes only): The position of the children. - -@mvstore_1165_li -childCounts (array of variable size long; internal nodes only): The total number of entries for the given child page. - -@mvstore_1166_li -keys (byte array): All keys, stored depending on the data type. - -@mvstore_1167_li -values (byte array; leaf pages only): All values, stored depending on the data type. - -@mvstore_1168_p - Even though this is not required by the file format, pages are stored in the following order: For each map, the root page is stored first, then the internal nodes (if there are any), and then the leaf pages. This should speed up reads for media where sequential reads are faster than random access reads. The metadata map is stored at the end of a chunk. - -@mvstore_1169_p - Pointers to pages are stored as a long, using a special format: 26 bits for the chunk id, 32 bits for the offset within the chunk, 5 bits for the length code, 1 bit for the page type (leaf or internal node). The page type is encoded so that when clearing or removing a map, leaf pages don't have to be read (internal nodes do have to be read in order to know where all the pages are; but in a typical B-tree the vast majority of the pages are leaf pages). The absolute file position is not included so that chunks can be moved within the file without having to change page pointers; only the chunk metadata needs to be changed. The length code is a number from 0 to 31, where 0 means the maximum length of the page is 32 bytes, 1 means 48 bytes, 2: 64, 3: 96, 4: 128, 5: 192, and so on until 31 which means longer than 1 MB. That way, reading a page only requires one read operation (except for very large pages). The sum of the maximum length of all pages is stored in the chunk metadata (field "max"), and when a page is marked as removed, the live maximum length is adjusted. This allows to estimate the amount of free space within a block, in addition to the number of free pages. - -@mvstore_1170_p - The total number of entries in child pages are kept to allow efficient range counting, lookup by index, and skip operations. The pages form a counted B-tree. - -@mvstore_1171_p - Data compression: The data after the page type are optionally compressed using the LZF algorithm. - -@mvstore_1172_h3 -Metadata Map - -@mvstore_1173_p - In addition to the user maps, there is one metadata map that contains names and positions of user maps, and chunk metadata. The very last page of a chunk contains the root page of that metadata map. The exact position of this root page is stored in the chunk header. This page (directly or indirectly) points to the root pages of all other maps. The metadata map of a store with a map named "data", and one chunk, contains the following entries: - -@mvstore_1174_li -chunk.1: The metadata of chunk 1. This is the same data as the chunk header, plus the number of live pages, and the maximum live length. - -@mvstore_1175_li -map.1: The metadata of map 1. The entries are: name, createVersion, and type. - -@mvstore_1176_li -name.data: The map id of the map named "data". The value is "1". - -@mvstore_1177_li -root.1: The root position of map 1. - -@mvstore_1178_li -setting.storeVersion: The store version (a user defined value). - -@mvstore_1179_h2 -Similar Projects and Differences to Other Storage Engines - -@mvstore_1180_p - Unlike similar storage engines like LevelDB and Kyoto Cabinet, the MVStore is written in Java and can easily be embedded in a Java and Android application. - -@mvstore_1181_p - The MVStore is somewhat similar to the Berkeley DB Java Edition because it is also written in Java, and is also a log structured storage, but the H2 license is more liberal. - -@mvstore_1182_p - Like SQLite 3, the MVStore keeps all data in one file. Unlike SQLite 3, the MVStore uses is a log structured storage. The plan is to make the MVStore both easier to use as well as faster than SQLite 3. In a recent (very simple) test, the MVStore was about twice as fast as SQLite 3 on Android. - -@mvstore_1183_p - The API of the MVStore is similar to MapDB (previously known as JDBM) from Jan Kotek, and some code is shared between MVStore and MapDB. However, unlike MapDB, the MVStore uses is a log structured storage. The MVStore does not have a record size limit. - -@mvstore_1184_h2 -Current State - -@mvstore_1185_p - The code is still experimental at this stage. The API as well as the behavior may partially change. Features may be added and removed (even though the main features will stay). - -@mvstore_1186_h2 -Requirements - -@mvstore_1187_p - The MVStore is included in the latest H2 jar file. - -@mvstore_1188_p - There are no special requirements to use it. The MVStore should run on any JVM as well as on Android. - -@mvstore_1189_p - To build just the MVStore (without the database engine), run: - -@mvstore_1190_p - This will create the file bin/h2mvstore-1.4.187.jar (about 200 KB). - -@performance_1000_h1 -Performance - -@performance_1001_a - Performance Comparison - -@performance_1002_a - PolePosition Benchmark - -@performance_1003_a - Database Performance Tuning - -@performance_1004_a - Using the Built-In Profiler - -@performance_1005_a - Application Profiling - -@performance_1006_a - Database Profiling - -@performance_1007_a - Statement Execution Plans - -@performance_1008_a - How Data is Stored and How Indexes Work - -@performance_1009_a - Fast Database Import - -@performance_1010_h2 -Performance Comparison - -@performance_1011_p - In many cases H2 is faster than other (open source and not open source) database engines. Please note this is mostly a single connection benchmark run on one computer, with many very simple operations running against the database. This benchmark does not include very complex queries. The embedded mode of H2 is faster than the client-server mode because the per-statement overhead is greatly reduced. - -@performance_1012_h3 -Embedded - -@performance_1013_th -Test Case - -@performance_1014_th -Unit - -@performance_1015_th -H2 - -@performance_1016_th -HSQLDB - -@performance_1017_th -Derby - -@performance_1018_td -Simple: Init - -@performance_1019_td -ms - -@performance_1020_td -1019 - -@performance_1021_td -1907 - -@performance_1022_td -8280 - -@performance_1023_td -Simple: Query (random) - -@performance_1024_td -ms - -@performance_1025_td -1304 - -@performance_1026_td -873 - -@performance_1027_td -1912 - -@performance_1028_td -Simple: Query (sequential) - -@performance_1029_td -ms - -@performance_1030_td -835 - -@performance_1031_td -1839 - -@performance_1032_td -5415 - -@performance_1033_td -Simple: Update (sequential) - -@performance_1034_td -ms - -@performance_1035_td -961 - -@performance_1036_td -2333 - -@performance_1037_td -21759 - -@performance_1038_td -Simple: Delete (sequential) - -@performance_1039_td -ms - -@performance_1040_td -950 - -@performance_1041_td -1922 - -@performance_1042_td -32016 - -@performance_1043_td -Simple: Memory Usage - -@performance_1044_td -MB - -@performance_1045_td -21 - -@performance_1046_td -10 - -@performance_1047_td -8 - -@performance_1048_td -BenchA: Init - -@performance_1049_td -ms - -@performance_1050_td -919 - -@performance_1051_td -2133 - -@performance_1052_td -7528 - -@performance_1053_td -BenchA: Transactions - -@performance_1054_td -ms - -@performance_1055_td -1219 - -@performance_1056_td -2297 - -@performance_1057_td -8541 - -@performance_1058_td -BenchA: Memory Usage - -@performance_1059_td -MB - -@performance_1060_td -12 - -@performance_1061_td -15 - -@performance_1062_td -7 - -@performance_1063_td -BenchB: Init - -@performance_1064_td -ms - -@performance_1065_td -905 - -@performance_1066_td -1993 - -@performance_1067_td -8049 - -@performance_1068_td -BenchB: Transactions - -@performance_1069_td -ms - -@performance_1070_td -1091 - -@performance_1071_td -583 - -@performance_1072_td -1165 - -@performance_1073_td -BenchB: Memory Usage - -@performance_1074_td -MB - -@performance_1075_td -17 - -@performance_1076_td -11 - -@performance_1077_td -8 - -@performance_1078_td -BenchC: Init - -@performance_1079_td -ms - -@performance_1080_td -2491 - -@performance_1081_td -4003 - -@performance_1082_td -8064 - -@performance_1083_td -BenchC: Transactions - -@performance_1084_td -ms - -@performance_1085_td -1979 - -@performance_1086_td -803 - -@performance_1087_td -2840 - -@performance_1088_td -BenchC: Memory Usage - -@performance_1089_td -MB - -@performance_1090_td -19 - -@performance_1091_td -22 - -@performance_1092_td -9 - -@performance_1093_td -Executed statements - -@performance_1094_td -# - -@performance_1095_td -1930995 - -@performance_1096_td -1930995 - -@performance_1097_td -1930995 - -@performance_1098_td -Total time - -@performance_1099_td -ms - -@performance_1100_td -13673 - -@performance_1101_td -20686 - -@performance_1102_td -105569 - -@performance_1103_td -Statements per second - -@performance_1104_td -# - -@performance_1105_td -141226 - -@performance_1106_td -93347 - -@performance_1107_td -18291 - -@performance_1108_h3 -Client-Server - -@performance_1109_th -Test Case - -@performance_1110_th -Unit - -@performance_1111_th -H2 (Server) - -@performance_1112_th -HSQLDB - -@performance_1113_th -Derby - -@performance_1114_th -PostgreSQL - -@performance_1115_th -MySQL - -@performance_1116_td -Simple: Init - -@performance_1117_td -ms - -@performance_1118_td -16338 - -@performance_1119_td -17198 - -@performance_1120_td -27860 - -@performance_1121_td -30156 - -@performance_1122_td -29409 - -@performance_1123_td -Simple: Query (random) - -@performance_1124_td -ms - -@performance_1125_td -3399 - -@performance_1126_td -2582 - -@performance_1127_td -6190 - -@performance_1128_td -3315 - -@performance_1129_td -3342 - -@performance_1130_td -Simple: Query (sequential) - -@performance_1131_td -ms - -@performance_1132_td -21841 - -@performance_1133_td -18699 - -@performance_1134_td -42347 - -@performance_1135_td -30774 - -@performance_1136_td -32611 - -@performance_1137_td -Simple: Update (sequential) - -@performance_1138_td -ms - -@performance_1139_td -6913 - -@performance_1140_td -7745 - -@performance_1141_td -28576 - -@performance_1142_td -32698 - -@performance_1143_td -11350 - -@performance_1144_td -Simple: Delete (sequential) - -@performance_1145_td -ms - -@performance_1146_td -8051 - -@performance_1147_td -9751 - -@performance_1148_td -42202 - -@performance_1149_td -44480 - -@performance_1150_td -16555 - -@performance_1151_td -Simple: Memory Usage - -@performance_1152_td -MB - -@performance_1153_td -22 - -@performance_1154_td -11 - -@performance_1155_td -9 - -@performance_1156_td -0 - -@performance_1157_td -1 - -@performance_1158_td -BenchA: Init - -@performance_1159_td -ms - -@performance_1160_td -12996 - -@performance_1161_td -14720 - -@performance_1162_td -24722 - -@performance_1163_td -26375 - -@performance_1164_td -26060 - -@performance_1165_td -BenchA: Transactions - -@performance_1166_td -ms - -@performance_1167_td -10134 - -@performance_1168_td -10250 - -@performance_1169_td -18452 - -@performance_1170_td -21453 - -@performance_1171_td -15877 - -@performance_1172_td -BenchA: Memory Usage - -@performance_1173_td -MB - -@performance_1174_td -13 - -@performance_1175_td -15 - -@performance_1176_td -9 - -@performance_1177_td -0 - -@performance_1178_td -1 - -@performance_1179_td -BenchB: Init - -@performance_1180_td -ms - -@performance_1181_td -15264 - -@performance_1182_td -16889 - -@performance_1183_td -28546 - -@performance_1184_td -31610 - -@performance_1185_td -29747 - -@performance_1186_td -BenchB: Transactions - -@performance_1187_td -ms - -@performance_1188_td -3017 - -@performance_1189_td -3376 - -@performance_1190_td -1842 - -@performance_1191_td -2771 - -@performance_1192_td -1433 - -@performance_1193_td -BenchB: Memory Usage - -@performance_1194_td -MB - -@performance_1195_td -17 - -@performance_1196_td -12 - -@performance_1197_td -11 - -@performance_1198_td -1 - -@performance_1199_td -1 - -@performance_1200_td -BenchC: Init - -@performance_1201_td -ms - -@performance_1202_td -14020 - -@performance_1203_td -10407 - -@performance_1204_td -17655 - -@performance_1205_td -19520 - -@performance_1206_td -17532 - -@performance_1207_td -BenchC: Transactions - -@performance_1208_td -ms - -@performance_1209_td -5076 - -@performance_1210_td -3160 - -@performance_1211_td -6411 - -@performance_1212_td -6063 - -@performance_1213_td -4530 - -@performance_1214_td -BenchC: Memory Usage - -@performance_1215_td -MB - -@performance_1216_td -19 - -@performance_1217_td -21 - -@performance_1218_td -11 - -@performance_1219_td -1 - -@performance_1220_td -1 - -@performance_1221_td -Executed statements - -@performance_1222_td -# - -@performance_1223_td -1930995 - -@performance_1224_td -1930995 - -@performance_1225_td -1930995 - -@performance_1226_td -1930995 - -@performance_1227_td -1930995 - -@performance_1228_td -Total time - -@performance_1229_td -ms - -@performance_1230_td -117049 - -@performance_1231_td -114777 - -@performance_1232_td -244803 - -@performance_1233_td -249215 - -@performance_1234_td -188446 - -@performance_1235_td -Statements per second - -@performance_1236_td -# - -@performance_1237_td -16497 - -@performance_1238_td -16823 - -@performance_1239_td -7887 - -@performance_1240_td -7748 - -@performance_1241_td -10246 - -@performance_1242_h3 -Benchmark Results and Comments - -@performance_1243_h4 -H2 - -@performance_1244_p - Version 1.4.177 (2014-04-12) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. The advantage of buffering is: there is no limit on the result set size. - -@performance_1245_h4 -HSQLDB - -@performance_1246_p - Version 2.3.2 was used for the test. Cached tables are used in this test (hsqldb.default_table_type=cached), and the write delay is 1 second (SET WRITE_DELAY 1). - -@performance_1247_h4 -Derby - -@performance_1248_p - Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified: leaving autocommit on is a problem for Derby. If it is switched off during the whole test, the results are about 20% better for Derby. Derby calls FileChannel.force(false), but only twice per log file (not on each commit). Disabling this call improves performance for Derby by about 2%. Unlike H2, Derby does not call FileDescriptor.sync() on each checkpoint. Derby supports a testing mode (system property derby.system.durability=test) where durability is disabled. According to the documentation, this setting should be used for testing only, as the database may not recover after a crash. Enabling this setting improves performance by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less than half as fast as H2 in default mode. - -@performance_1249_h4 -PostgreSQL - -@performance_1250_p - Version 9.1.5 was used for the test. The following options where changed in postgresql.conf: fsync = off, commit_delay = 1000. PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. - -@performance_1251_h4 -MySQL - -@performance_1252_p - Version 5.1.65-log was used for the test. MySQL was run with the InnoDB backend. The setting innodb_flush_log_at_trx_commit (found in the my.ini / my.cnf file) was set to 0. Otherwise (and by default), MySQL is slow (around 140 statements per second in this test) because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. You need to change this setting manually in the file my.ini / my.cnf, and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. - -@performance_1253_h4 -Firebird - -@performance_1254_p - Firebird 1.5 (default installation) was tested, but the results are not published currently. It is possible to run the performance test with the Firebird database, and any information on how to configure Firebird for higher performance are welcome. - -@performance_1255_h4 -Why Oracle / MS SQL Server / DB2 are Not Listed - -@performance_1256_p - The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. SQLite was not tested because the JDBC driver doesn't support transactions. - -@performance_1257_h3 -About this Benchmark - -@performance_1258_h4 -How to Run - -@performance_1259_p - This test was as follows: - -@performance_1260_h4 -Separate Process per Database - -@performance_1261_p - For each database, a new process is started, to ensure the previous test does not impact the current test. - -@performance_1262_h4 -Number of Connections - -@performance_1263_p - This is mostly a single-connection benchmark. BenchB uses multiple connections; the other tests use one connection. - -@performance_1264_h4 -Real-World Tests - -@performance_1265_p - Good benchmarks emulate real-world use cases. This benchmark includes 4 test cases: BenchSimple uses one table and many small updates / deletes. BenchA is similar to the TPC-A test, but single connection / single threaded (see also: www.tpc.org). BenchB is similar to the TPC-B test, using multiple connections (one thread per connection). BenchC is similar to the TPC-C test, but single connection / single threaded. - -@performance_1266_h4 -Comparing Embedded with Server Databases - -@performance_1267_p - This is mainly a benchmark for embedded databases (where the application runs in the same virtual machine as the database engine). However MySQL and PostgreSQL are not Java databases and cannot be embedded into a Java application. For the Java databases, both embedded and server modes are tested. - -@performance_1268_h4 -Test Platform - -@performance_1269_p - This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. The JVM used is Sun JDK 1.6. - -@performance_1270_h4 -Multiple Runs - -@performance_1271_p - When a Java benchmark is run first, the code is not fully compiled and therefore runs slower than when running multiple times. A benchmark should always run the same test multiple times and ignore the first run(s). This benchmark runs three times, but only the last run is measured. - -@performance_1272_h4 -Memory Usage - -@performance_1273_p - It is not enough to measure the time taken, the memory usage is important as well. Performance can be improved by using a bigger cache, but the amount of memory is limited. HSQLDB tables are kept fully in memory by default; this benchmark uses 'disk based' tables for all databases. Unfortunately, it is not so easy to calculate the memory usage of PostgreSQL and MySQL, because they run in a different process than the test. This benchmark currently does not print memory usage of those databases. - -@performance_1274_h4 -Delayed Operations - -@performance_1275_p - Some databases delay some operations (for example flushing the buffers) until after the benchmark is run. This benchmark waits between each database tested, and each database runs in a different process (sequentially). - -@performance_1276_h4 -Transaction Commit / Durability - -@performance_1277_p - Durability means transaction committed to the database will not be lost. Some databases (for example MySQL) try to enforce this by default by calling fsync() to flush the buffers, but most hard drives don't actually flush all data. Calling the method slows down transaction commit a lot, but doesn't always make data durable. When comparing the results, it is important to think about the effect. Many database suggest to 'batch' operations when possible. This benchmark switches off autocommit when loading the data, and calls commit after each 1000 inserts. However many applications need 'short' transactions at runtime (a commit after each update). This benchmark commits after each update / delete in the simple benchmark, and after each business transaction in the other benchmarks. For databases that support delayed commits, a delay of one second is used. - -@performance_1278_h4 -Using Prepared Statements - -@performance_1279_p - Wherever possible, the test cases use prepared statements. - -@performance_1280_h4 -Currently Not Tested: Startup Time - -@performance_1281_p - The startup time of a database engine is important as well for embedded use. This time is not measured currently. Also, not tested is the time used to create a database and open an existing database. Here, one (wrapper) connection is opened at the start, and for each step a new connection is opened and then closed. - -@performance_1282_h2 -PolePosition Benchmark - -@performance_1283_p - The PolePosition is an open source benchmark. The algorithms are all quite simple. It was developed / sponsored by db4o. This test was not run for a longer time, so please be aware that the results below are for older database versions (H2 version 1.1, HSQLDB 1.8, Java 1.4). - -@performance_1284_th -Test Case - -@performance_1285_th -Unit - -@performance_1286_th -H2 - -@performance_1287_th -HSQLDB - -@performance_1288_th -MySQL - -@performance_1289_td -Melbourne write - -@performance_1290_td -ms - -@performance_1291_td -369 - -@performance_1292_td -249 - -@performance_1293_td -2022 - -@performance_1294_td -Melbourne read - -@performance_1295_td -ms - -@performance_1296_td -47 - -@performance_1297_td -49 - -@performance_1298_td -93 - -@performance_1299_td -Melbourne read_hot - -@performance_1300_td -ms - -@performance_1301_td -24 - -@performance_1302_td -43 - -@performance_1303_td -95 - -@performance_1304_td -Melbourne delete - -@performance_1305_td -ms - -@performance_1306_td -147 - -@performance_1307_td -133 - -@performance_1308_td -176 - -@performance_1309_td -Sepang write - -@performance_1310_td -ms - -@performance_1311_td -965 - -@performance_1312_td -1201 - -@performance_1313_td -3213 - -@performance_1314_td -Sepang read - -@performance_1315_td -ms - -@performance_1316_td -765 - -@performance_1317_td -948 - -@performance_1318_td -3455 - -@performance_1319_td -Sepang read_hot - -@performance_1320_td -ms - -@performance_1321_td -789 - -@performance_1322_td -859 - -@performance_1323_td -3563 - -@performance_1324_td -Sepang delete - -@performance_1325_td -ms - -@performance_1326_td -1384 - -@performance_1327_td -1596 - -@performance_1328_td -6214 - -@performance_1329_td -Bahrain write - -@performance_1330_td -ms - -@performance_1331_td -1186 - -@performance_1332_td -1387 - -@performance_1333_td -6904 - -@performance_1334_td -Bahrain query_indexed_string - -@performance_1335_td -ms - -@performance_1336_td -336 - -@performance_1337_td -170 - -@performance_1338_td -693 - -@performance_1339_td -Bahrain query_string - -@performance_1340_td -ms - -@performance_1341_td -18064 - -@performance_1342_td -39703 - -@performance_1343_td -41243 - -@performance_1344_td -Bahrain query_indexed_int - -@performance_1345_td -ms - -@performance_1346_td -104 - -@performance_1347_td -134 - -@performance_1348_td -678 - -@performance_1349_td -Bahrain update - -@performance_1350_td -ms - -@performance_1351_td -191 - -@performance_1352_td -87 - -@performance_1353_td -159 - -@performance_1354_td -Bahrain delete - -@performance_1355_td -ms - -@performance_1356_td -1215 - -@performance_1357_td -729 - -@performance_1358_td -6812 - -@performance_1359_td -Imola retrieve - -@performance_1360_td -ms - -@performance_1361_td -198 - -@performance_1362_td -194 - -@performance_1363_td -4036 - -@performance_1364_td -Barcelona write - -@performance_1365_td -ms - -@performance_1366_td -413 - -@performance_1367_td -832 - -@performance_1368_td -3191 - -@performance_1369_td -Barcelona read - -@performance_1370_td -ms - -@performance_1371_td -119 - -@performance_1372_td -160 - -@performance_1373_td -1177 - -@performance_1374_td -Barcelona query - -@performance_1375_td -ms - -@performance_1376_td -20 - -@performance_1377_td -5169 - -@performance_1378_td -101 - -@performance_1379_td -Barcelona delete - -@performance_1380_td -ms - -@performance_1381_td -388 - -@performance_1382_td -319 - -@performance_1383_td -3287 - -@performance_1384_td -Total - -@performance_1385_td -ms - -@performance_1386_td -26724 - -@performance_1387_td -53962 - -@performance_1388_td -87112 - -@performance_1389_p - There are a few problems with the PolePosition test: - -@performance_1390_li - HSQLDB uses in-memory tables by default while H2 uses persistent tables. The HSQLDB version included in PolePosition does not support changing this, so you need to replace poleposition-0.20/lib/hsqldb.jar with a newer version (for example hsqldb-1.8.0.7.jar), and then use the setting hsqldb.connecturl=jdbc:hsqldb:file:data/hsqldb/dbbench2;hsqldb.default_table_type=cached;sql.enforce_size=true in the file Jdbc.properties. - -@performance_1391_li -HSQLDB keeps the database open between tests, while H2 closes the database (losing all the cache). To change that, use the database URL jdbc:h2:file:data/h2/dbbench;DB_CLOSE_DELAY=-1 - -@performance_1392_li -The amount of cache memory is quite important, specially for the PolePosition test. Unfortunately, the PolePosition test does not take this into account. - -@performance_1393_h2 -Database Performance Tuning - -@performance_1394_h3 -Keep Connections Open or Use a Connection Pool - -@performance_1395_p - If your application opens and closes connections a lot (for example, for each request), you should consider using a connection pool. Opening a connection using DriverManager.getConnection is specially slow if the database is closed. By default the database is closed if the last connection is closed. - -@performance_1396_p - If you open and close connections a lot but don't want to use a connection pool, consider keeping a 'sentinel' connection open for as long as the application runs, or use delayed database closing. See also Closing a database. - -@performance_1397_h3 -Use a Modern JVM - -@performance_1398_p - Newer JVMs are faster. Upgrading to the latest version of your JVM can provide a "free" boost to performance. Switching from the default Client JVM to the Server JVM using the -server command-line option improves performance at the cost of a slight increase in start-up time. - -@performance_1399_h3 -Virus Scanners - -@performance_1400_p - Some virus scanners scan files every time they are accessed. It is very important for performance that database files are not scanned for viruses. The database engine never interprets the data stored in the files as programs, that means even if somebody would store a virus in a database file, this would be harmless (when the virus does not run, it cannot spread). Some virus scanners allow to exclude files by suffix. Ensure files ending with .db are not scanned. - -@performance_1401_h3 -Using the Trace Options - -@performance_1402_p - If the performance hot spots are in the database engine, in many cases the performance can be optimized by creating additional indexes, or changing the schema. Sometimes the application does not directly generate the SQL statements, for example if an O/R mapping tool is used. To view the SQL statements and JDBC API calls, you can use the trace options. For more information, see Using the Trace Options. - -@performance_1403_h3 -Index Usage - -@performance_1404_p - This database uses indexes to improve the performance of SELECT, UPDATE, DELETE. If a column is used in the WHERE clause of a query, and if an index exists on this column, then the index can be used. Multi-column indexes are used if all or the first columns of the index are used. Both equality lookup and range scans are supported. Indexes are used to order result sets, but only if the condition uses the same index or no index at all. The results are sorted in memory if required. Indexes are created automatically for primary key and unique constraints. Indexes are also created for foreign key constraints, if required. For other columns, indexes need to be created manually using the CREATE INDEX statement. - -@performance_1405_h3 -How Data is Stored Internally - -@performance_1406_p - For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT, then the data of the table is organized in this way. This is sometimes also called a "clustered index" or "index organized table". - -@performance_1407_p - H2 internally stores table data and indexes in the form of b-trees. Each b-tree stores entries as a list of unique keys (one or more columns) and data (zero or more columns). The table data is always organized in the form of a "data b-tree" with a single column key of type long. If a single column primary key of type BIGINT, INT, SMALLINT, TINYINT is specified when creating the table (or just after creating the table, but before inserting any rows), then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, then a hidden auto-increment column of type BIGINT is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB columns, which are stored externally). - -@performance_1408_p - For each additional index, one new "index b-tree" is created. The key of this b-tree consists of the indexed columns, plus the key of the data b-tree. If a primary key is created after the table has been created, or if the primary key contains multiple column, or if the primary key is not of the data types listed above, then the primary key is stored in a new index b-tree. - -@performance_1409_h3 -Optimizer - -@performance_1410_p - This database uses a cost based optimizer. For simple and queries and queries with medium complexity (less than 7 tables in the join), the expected cost (running time) of all possible plans is calculated, and the plan with the lowest cost is used. For more complex queries, the algorithm first tries all possible combinations for the first few tables, and the remaining tables added using a greedy algorithm (this works well for most joins). Afterwards a genetic algorithm is used to test at most 2000 distinct plans. Only left-deep plans are evaluated. - -@performance_1411_h3 -Expression Optimization - -@performance_1412_p - After the statement is parsed, all expressions are simplified automatically if possible. Operations are evaluated only once if all parameters are constant. Functions are also optimized, but only if the function is constant (always returns the same result for the same parameter values). If the WHERE clause is always false, then the table is not accessed at all. - -@performance_1413_h3 -COUNT(*) Optimization - -@performance_1414_p - If the query only counts all rows of a table, then the data is not accessed. However, this is only possible if no WHERE clause is used, that means it only works for queries of the form SELECT COUNT(*) FROM table. - -@performance_1415_h3 -Updating Optimizer Statistics / Column Selectivity - -@performance_1416_p - When executing a query, at most one index per join can be used. If the same table is joined multiple times, for each join only one index is used (the same index could be used for both joins, or each join could use a different index). Example: for the query SELECT * FROM TEST T1, TEST T2 WHERE T1.NAME='A' AND T2.ID=T1.ID, two index can be used, in this case the index on NAME for T1 and the index on ID for T2. - -@performance_1417_p - If a table has multiple indexes, sometimes more than one index could be used. Example: if there is a table TEST(ID, NAME, FIRSTNAME) and an index on each column, then two indexes could be used for the query SELECT * FROM TEST WHERE NAME='A' AND FIRSTNAME='B', the index on NAME or the index on FIRSTNAME. It is not possible to use both indexes at the same time. Which index is used depends on the selectivity of the column. The selectivity describes the 'uniqueness' of values in a column. A selectivity of 100 means each value appears only once, and a selectivity of 1 means the same value appears in many or most rows. For the query above, the index on NAME should be used if the table contains more distinct names than first names. - -@performance_1418_p - The SQL statement ANALYZE can be used to automatically estimate the selectivity of the columns in the tables. This command should be run from time to time to improve the query plans generated by the optimizer. - -@performance_1419_h3 -In-Memory (Hash) Indexes - -@performance_1420_p - Using in-memory indexes, specially in-memory hash indexes, can speed up queries and data manipulation. - -@performance_1421_p -In-memory indexes are automatically used for in-memory databases, but can also be created for persistent databases using CREATE MEMORY TABLE. In many cases, the rows itself will also be kept in-memory. Please note this may cause memory problems for large tables. - -@performance_1422_p - In-memory hash indexes are backed by a hash table and are usually faster than regular indexes. However, hash indexes only supports direct lookup (WHERE ID = ?) but not range scan (WHERE ID < ?). To use hash indexes, use HASH as in: CREATE UNIQUE HASH INDEX and CREATE TABLE ...(ID INT PRIMARY KEY HASH,...). - -@performance_1423_h3 -Use Prepared Statements - -@performance_1424_p - If possible, use prepared statements with parameters. - -@performance_1425_h3 -Prepared Statements and IN(...) - -@performance_1426_p - Avoid generating SQL statements with a variable size IN(...) list. Instead, use a prepared statement with arrays as in the following example: - -@performance_1427_h3 -Optimization Examples - -@performance_1428_p - See src/test/org/h2/samples/optimizations.sql for a few examples of queries that benefit from special optimizations built into the database. - -@performance_1429_h3 -Cache Size and Type - -@performance_1430_p - By default the cache size of H2 is quite small. Consider using a larger cache size, or enable the second level soft reference cache. See also Cache Settings. - -@performance_1431_h3 -Data Types - -@performance_1432_p - Each data type has different storage and performance characteristics: - -@performance_1433_li -The DECIMAL/NUMERIC type is slower and requires more storage than the REAL and DOUBLE types. - -@performance_1434_li -Text types are slower to read, write, and compare than numeric types and generally require more storage. - -@performance_1435_li -See Large Objects for information on BINARY vs. BLOB and VARCHAR vs. CLOB performance. - -@performance_1436_li -Parsing and formatting takes longer for the TIME, DATE, and TIMESTAMP types than the numeric types. - -@performance_1437_code -SMALLINT/TINYINT/BOOLEAN - -@performance_1438_li - are not significantly smaller or faster to work with than INTEGER in most modes. - -@performance_1439_h3 -Sorted Insert Optimization - -@performance_1440_p - To reduce disk space usage and speed up table creation, an optimization for sorted inserts is available. When used, b-tree pages are split at the insertion point. To use this optimization, add SORTED before the SELECT statement: - -@performance_1441_h2 -Using the Built-In Profiler - -@performance_1442_p - A very simple Java profiler is built-in. To use it, use the following template: - -@performance_1443_h2 -Application Profiling - -@performance_1444_h3 -Analyze First - -@performance_1445_p - Before trying to optimize performance, it is important to understand where the problem is (what part of the application is slow). Blind optimization or optimization based on guesses should be avoided, because usually it is not an efficient strategy. There are various ways to analyze an application. Sometimes two implementations can be compared using System.currentTimeMillis(). But this does not work for complex applications with many modules, and for memory problems. - -@performance_1446_p - A simple way to profile an application is to use the built-in profiling tool of java. Example: - -@performance_1447_p - Unfortunately, it is only possible to profile the application from start to end. Another solution is to create a number of full thread dumps. To do that, first run jps -l to get the process id, and then run jstack <pid> or kill -QUIT <pid> (Linux) or press Ctrl+C (Windows). - -@performance_1448_p - A simple profiling tool is included in H2. To use it, the application needs to be changed slightly. Example: - -@performance_1449_p - The profiler is built into the H2 Console tool, to analyze databases that open slowly. To use it, run the H2 Console, and then click on 'Test Connection'. Afterwards, click on "Test successful" and you get the most common stack traces, which helps to find out why it took so long to connect. You will only get the stack traces if opening the database took more than a few seconds. - -@performance_1450_h2 -Database Profiling - -@performance_1451_p - The ConvertTraceFile tool generates SQL statement statistics at the end of the SQL script file. The format used is similar to the profiling data generated when using java -Xrunhprof. For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE=2). The easiest way to set the trace level is to append the setting to the database URL, for example: jdbc:h2:~/test;TRACE_LEVEL_FILE=2 or jdbc:h2:tcp://localhost/~/test;TRACE_LEVEL_FILE=2. As an example, execute the the following script using the H2 Console: - -@performance_1452_p - After running the test case, convert the .trace.db file using the ConvertTraceFile tool. The trace file is located in the same directory as the database file. - -@performance_1453_p - The generated file test.sql will contain the SQL statements as well as the following profiling data (results vary): - -@performance_1454_h2 -Statement Execution Plans - -@performance_1455_p - The SQL statement EXPLAIN displays the indexes and optimizations the database uses for a statement. The following statements support EXPLAIN: SELECT, UPDATE, DELETE, MERGE, INSERT. The following query shows that the database uses the primary key index to search for rows: - -@performance_1456_p - For joins, the tables in the execution plan are sorted in the order they are processed. The following query shows the database first processes the table INVOICE (using the primary key). For each row, it will additionally check that the value of the column AMOUNT is larger than zero, and for those rows the database will search in the table CUSTOMER (using the primary key). The query plan contains some redundancy so it is a valid statement. - -@performance_1457_h3 -Displaying the Scan Count - -@performance_1458_code -EXPLAIN ANALYZE - -@performance_1459_p - additionally shows the scanned rows per table and pages read from disk per table or index. This will actually execute the query, unlike EXPLAIN which only prepares it. The following query scanned 1000 rows, and to do that had to read 85 pages from the data area of the table. Running the query twice will not list the pages read from disk, because they are now in the cache. The tableScan means this query doesn't use an index. - -@performance_1460_p - The cache will prevent the pages are read twice. H2 reads all columns of the row unless only the columns in the index are read. Except for large CLOB and BLOB, which are not store in the table. - -@performance_1461_h3 -Special Optimizations - -@performance_1462_p - For certain queries, the database doesn't need to read all rows, or doesn't need to sort the result even if ORDER BY is used. - -@performance_1463_p - For queries of the form SELECT COUNT(*), MIN(ID), MAX(ID) FROM TEST, the query plan includes the line /* direct lookup */ if the data can be read from an index. - -@performance_1464_p - For queries of the form SELECT DISTINCT CUSTOMER_ID FROM INVOICE, the query plan includes the line /* distinct */ if there is an non-unique or multi-column index on this column, and if this column has a low selectivity. - -@performance_1465_p - For queries of the form SELECT * FROM TEST ORDER BY ID, the query plan includes the line /* index sorted */ to indicate there is no separate sorting required. - -@performance_1466_p - For queries of the form SELECT * FROM TEST GROUP BY ID ORDER BY ID, the query plan includes the line /* group sorted */ to indicate there is no separate sorting required. - -@performance_1467_h2 -How Data is Stored and How Indexes Work - -@performance_1468_p - Internally, each row in a table is identified by a unique number, the row id. The rows of a table are stored with the row id as the key. The row id is a number of type long. If a table has a single column primary key of type INT or BIGINT, then the value of this column is the row id, otherwise the database generates the row id automatically. There is a (non-standard) way to access the row id: using the _ROWID_ pseudo-column: - -@performance_1469_p - The data is stored in the database as follows: - -@performance_1470_th -_ROWID_ - -@performance_1471_th -FIRST_NAME - -@performance_1472_th -NAME - -@performance_1473_th -CITY - -@performance_1474_th -PHONE - -@performance_1475_td -1 - -@performance_1476_td -John - -@performance_1477_td -Miller - -@performance_1478_td -Berne - -@performance_1479_td -123 456 789 - -@performance_1480_td -2 - -@performance_1481_td -Philip - -@performance_1482_td -Jones - -@performance_1483_td -Berne - -@performance_1484_td -123 012 345 - -@performance_1485_p - Access by row id is fast because the data is sorted by this key. Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT: - -@performance_1486_h3 -Indexes - -@performance_1487_p - An index internally is basically just a table that contains the indexed column(s), plus the row id: - -@performance_1488_p - In the index, the data is sorted by the indexed columns. So this index contains the following data: - -@performance_1489_th -CITY - -@performance_1490_th -NAME - -@performance_1491_th -FIRST_NAME - -@performance_1492_th -_ROWID_ - -@performance_1493_td -Berne - -@performance_1494_td -Jones - -@performance_1495_td -Philip - -@performance_1496_td -2 - -@performance_1497_td -Berne - -@performance_1498_td -Miller - -@performance_1499_td -John - -@performance_1500_td -1 - -@performance_1501_p - When the database uses an index to query the data, it searches the index for the given data, and (if required) reads the remaining columns in the main data table (retrieved using the row id). An index on city, name, and first name (multi-column index) allows to quickly search for rows when the city, name, and first name are known. If only the city and name, or only the city is known, then this index is also used (so creating an additional index on just the city is not needed). This index is also used when reading all rows, sorted by the indexed columns. However, if only the first name is known, then this index is not used: - -@performance_1502_p - If your application often queries the table for a phone number, then it makes sense to create an additional index on it: - -@performance_1503_p - This index contains the phone number, and the row id: - -@performance_1504_th -PHONE - -@performance_1505_th -_ROWID_ - -@performance_1506_td -123 012 345 - -@performance_1507_td -2 - -@performance_1508_td -123 456 789 - -@performance_1509_td -1 - -@performance_1510_h3 -Using Multiple Indexes - -@performance_1511_p - Within a query, only one index per logical table is used. Using the condition PHONE = '123 567 789' OR CITY = 'Berne' would use a table scan instead of first using the index on the phone number and then the index on the city. It makes sense to write two queries and combine then using UNION. In this case, each individual query uses a different index: - -@performance_1512_h2 -Fast Database Import - -@performance_1513_p - To speed up large imports, consider using the following options temporarily: - -@performance_1514_code -SET LOG 0 - -@performance_1515_li - (disabling the transaction log) - -@performance_1516_code -SET CACHE_SIZE - -@performance_1517_li - (a large cache is faster) - -@performance_1518_code -SET LOCK_MODE 0 - -@performance_1519_li - (disable locking) - -@performance_1520_code -SET UNDO_LOG 0 - -@performance_1521_li - (disable the session undo log) - -@performance_1522_p - These options can be set in the database URL: jdbc:h2:~/test;LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0. Most of those options are not recommended for regular use, that means you need to reset them after use. - -@performance_1523_p - If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ... is faster than CREATE TABLE(...); INSERT INTO ... SELECT .... - -@quickstart_1000_h1 -Quickstart - -@quickstart_1001_a - Embedding H2 in an Application - -@quickstart_1002_a - The H2 Console Application - -@quickstart_1003_h2 -Embedding H2 in an Application - -@quickstart_1004_p - This database can be used in embedded mode, or in server mode. To use it in embedded mode, you need to: - -@quickstart_1005_li -Add the h2*.jar to the classpath (H2 does not have any dependencies) - -@quickstart_1006_li -Use the JDBC driver class: org.h2.Driver - -@quickstart_1007_li -The database URL jdbc:h2:~/test opens the database test in your user home directory - -@quickstart_1008_li -A new database is automatically created - -@quickstart_1009_h2 -The H2 Console Application - -@quickstart_1010_p - The Console lets you access a SQL database using a browser interface. - -@quickstart_1011_p - If you don't have Windows XP, or if something does not work as expected, please see the detailed description in the Tutorial. - -@quickstart_1012_h3 -Step-by-Step - -@quickstart_1013_h4 -Installation - -@quickstart_1014_p - Install the software using the Windows Installer (if you did not yet do that). - -@quickstart_1015_h4 -Start the Console - -@quickstart_1016_p - Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]: - -@quickstart_1017_p - A new console window appears: - -@quickstart_1018_p - Also, a new browser page should open with the URL http://localhost:8082. You may get a security warning from the firewall. If you don't want other computers in the network to access the database on your machine, you can let the firewall block these connections. Only local connections are required at this time. - -@quickstart_1019_h4 -Login - -@quickstart_1020_p - Select [Generic H2] and click [Connect]: - -@quickstart_1021_p - You are now logged in. - -@quickstart_1022_h4 -Sample - -@quickstart_1023_p - Click on the [Sample SQL Script]: - -@quickstart_1024_p - The SQL commands appear in the command area. - -@quickstart_1025_h4 -Execute - -@quickstart_1026_p - Click [Run] - -@quickstart_1027_p - On the left side, a new entry TEST is added below the database icon. The operations and results of the statements are shown below the script. - -@quickstart_1028_h4 -Disconnect - -@quickstart_1029_p - Click on [Disconnect]: - -@quickstart_1030_p - to close the connection. - -@quickstart_1031_h4 -End - -@quickstart_1032_p - Close the console window. For more information, see the Tutorial. - -@roadmap_1000_h1 -Roadmap - -@roadmap_1001_p - New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. Of course, patches are always welcome, but are not always applied as is. See also Providing Patches. - -@roadmap_1002_h2 -Version 1.5.x: Planned Changes - -@roadmap_1003_li -Replace file password hash with file encryption key; validate encryption key when connecting. - -@roadmap_1004_li -Remove "set binary collation" feature. - -@roadmap_1005_li -Remove the encryption algorithm XTEA. - -@roadmap_1006_li -Disallow referencing other tables in a table (via constraints for example). - -@roadmap_1007_li -Remove PageStore features like compress_lob. - -@roadmap_1008_h2 -Version 1.4.x: Planned Changes - -@roadmap_1009_li -Change license to MPL 2.0. - -@roadmap_1010_li -Automatic migration from 1.3 databases to 1.4. - -@roadmap_1011_li -Option to disable the file name suffix somehow (issue 447). - -@roadmap_1012_h2 -Priority 1 - -@roadmap_1013_li -Bugfixes. - -@roadmap_1014_li -More tests with MULTI_THREADED=1 (and MULTI_THREADED with MVCC): Online backup (using the 'backup' statement). - -@roadmap_1015_li -Server side cursors. - -@roadmap_1016_h2 -Priority 2 - -@roadmap_1017_li -Support hints for the optimizer (which index to use, enforce the join order). - -@roadmap_1018_li -Full outer joins. - -@roadmap_1019_li -Access rights: remember the owner of an object. Create, alter and drop privileges. COMMENT: allow owner of object to change it. Issue 208: Access rights for schemas. - -@roadmap_1020_li -Test multi-threaded in-memory db access. - -@roadmap_1021_li -MySQL, MS SQL Server compatibility: support case sensitive (mixed case) identifiers without quotes. - -@roadmap_1022_li -Support GRANT SELECT, UPDATE ON [schemaName.] *. - -@roadmap_1023_li -Migrate database tool (also from other database engines). For Oracle, maybe use DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL. - -@roadmap_1024_li -Clustering: support mixed clustering mode (one embedded, others in server mode). - -@roadmap_1025_li -Clustering: reads should be randomly distributed (optional) or to a designated database on RAM (parameter: READ_FROM=3). - -@roadmap_1026_li -Window functions: RANK() and DENSE_RANK(), partition using OVER(). select *, count(*) over() as fullCount from ... limit 4; - -@roadmap_1027_li -PostgreSQL catalog: use BEFORE SELECT triggers instead of views over metadata tables. - -@roadmap_1028_li -Compatibility: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211. - -@roadmap_1029_li -Test very large databases and LOBs (up to 256 GB). - -@roadmap_1030_li -Store all temp files in the temp directory. - -@roadmap_1031_li -Don't use temp files, specially not deleteOnExit (bug 4513817: File.deleteOnExit consumes memory). Also to allow opening client / server (remote) connections when using LOBs. - -@roadmap_1032_li -Make DDL (Data Definition) operations transactional. - -@roadmap_1033_li -Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED). - -@roadmap_1034_li -Groovy Stored Procedures: http://groovy.codehaus.org/GSQL - -@roadmap_1035_li -Add a migration guide (list differences between databases). - -@roadmap_1036_li -Optimization: automatic index creation suggestion using the trace file? - -@roadmap_1037_li -Fulltext search Lucene: analyzer configuration, mergeFactor. - -@roadmap_1038_li -Compression performance: don't allocate buffers, compress / expand in to out buffer. - -@roadmap_1039_li -Rebuild index functionality to shrink index size and improve performance. - -@roadmap_1040_li -Console: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA). - -@roadmap_1041_li -Test performance again with SQL Server, Oracle, DB2. - -@roadmap_1042_li -Test with Spatial DB in a box / JTS: http://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification. - -@roadmap_1043_li -Write more tests and documentation for MVCC (Multi Version Concurrency Control). - -@roadmap_1044_li -Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after. - -@roadmap_1045_li -Implement, test, document XAConnection and so on. - -@roadmap_1046_li -Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption). - -@roadmap_1047_li -CHECK: find out what makes CHECK=TRUE slow, move to CHECK2. - -@roadmap_1048_li -Drop with invalidate views (so that source code is not lost). Check what other databases do exactly. - -@roadmap_1049_li -Index usage for (ID, NAME)=(1, 'Hi'); document. - -@roadmap_1050_li -Set a connection read only (Connection.setReadOnly) or using a connection parameter. - -@roadmap_1051_li -Access rights: finer grained access control (grant access for specific functions). - -@roadmap_1052_li -ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]). - -@roadmap_1053_li -Version check: docs / web console (using Javascript), and maybe in the library (using TCP/IP). - -@roadmap_1054_li -Web server classloader: override findResource / getResourceFrom. - -@roadmap_1055_li -Cost for embedded temporary view is calculated wrong, if result is constant. - -@roadmap_1056_li -Count index range query (count(*) where id between 10 and 20). - -@roadmap_1057_li -Performance: update in-place. - -@roadmap_1058_li -Clustering: when a database is back alive, automatically synchronize with the master (requires readable transaction log). - -@roadmap_1059_li -Database file name suffix: a way to use no or a different suffix (for example using a slash). - -@roadmap_1060_li -Eclipse plugin. - -@roadmap_1061_li -Asynchronous queries to support publish/subscribe: SELECT ... FOR READ WAIT [maxMillisToWait]. See also MS SQL Server "Query Notification". - -@roadmap_1062_li -Fulltext search (native): reader / tokenizer / filter. - -@roadmap_1063_li -Linked schema using CSV files: one schema for a directory of files; support indexes for CSV files. - -@roadmap_1064_li -iReport to support H2. - -@roadmap_1065_li -Include SMTP (mail) client (alert on cluster failure, low disk space,...). - -@roadmap_1066_li -Option for SCRIPT to only process one or a set of schemas or tables, and append to a file. - -@roadmap_1067_li -JSON parser and functions. - -@roadmap_1068_li -Copy database: tool with config GUI and batch mode, extensible (example: compare). - -@roadmap_1069_li -Document, implement tool for long running transactions using user-defined compensation statements. - -@roadmap_1070_li -Support SET TABLE DUAL READONLY. - -@roadmap_1071_li -GCJ: what is the state now? - -@roadmap_1072_li -Events for: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http://docs.openlinksw.com/virtuoso/fn_dbev_startup.html - -@roadmap_1073_li -Optimization: simpler log compression. - -@roadmap_1074_li -Support standard INFORMATION_SCHEMA tables, as defined in http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE: http://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif - -@roadmap_1075_li -Compatibility: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby: division by zero. HSQLDB: 0.0e1 / 0.0e1 is NaN. - -@roadmap_1076_li -Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R). - -@roadmap_1077_li -Custom class loader to reload functions on demand. - -@roadmap_1078_li -Test http://mysql-je.sourceforge.net/ - -@roadmap_1079_li -H2 Console: the webclient could support more features like phpMyAdmin. - -@roadmap_1080_li -Support Oracle functions: TO_DATE, TO_NUMBER. - -@roadmap_1081_li -Work on the Java to C converter. - -@roadmap_1082_li -The HELP information schema can be directly exposed in the Console. - -@roadmap_1083_li -Maybe use the 0x1234 notation for binary fields, see MS SQL Server. - -@roadmap_1084_li -Support Oracle CONNECT BY in some way: http://www.adp-gmbh.ch/ora/sql/connect_by.html http://philip.greenspun.com/sql/trees.html - -@roadmap_1085_li -SQL Server 2005, Oracle: support COUNT(*) OVER(). See http://www.orafusion.com/art_anlytc.htm - -@roadmap_1086_li -SQL 2003: http://www.wiscorp.com/sql_2003_standard.zip - -@roadmap_1087_li -Version column (number/sequence and timestamp based). - -@roadmap_1088_li -Optimize getGeneratedKey: send last identity after each execute (server). - -@roadmap_1089_li -Test and document UPDATE TEST SET (ID, NAME) = (SELECT ID*10, NAME || '!' FROM TEST T WHERE T.ID=TEST.ID). - -@roadmap_1090_li -Max memory rows / max undo log size: use block count / row size not row count. - -@roadmap_1091_li -Implement point-in-time recovery. - -@roadmap_1092_li -Support PL/SQL (programming language / control flow statements). - -@roadmap_1093_li -LIKE: improved version for larger texts (currently using naive search). - -@roadmap_1094_li -Throw an exception when the application calls getInt on a Long (optional). - -@roadmap_1095_li -Default date format for input and output (local date constants). - -@roadmap_1096_li -Document ROWNUM usage for reports: SELECT ROWNUM, * FROM (subquery). - -@roadmap_1097_li -File system that writes to two file systems (replication, replicating file system). - -@roadmap_1098_li -Standalone tool to get relevant system properties and add it to the trace output. - -@roadmap_1099_li -Support 'call proc(1=value)' (PostgreSQL, Oracle). - -@roadmap_1100_li -Console: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?). - -@roadmap_1101_li -Console: autocomplete Ctrl+Space inserts template. - -@roadmap_1102_li -Option to encrypt .trace.db file. - -@roadmap_1103_li -Auto-Update feature for database, .jar file. - -@roadmap_1104_li -ResultSet SimpleResultSet.readFromURL(String url): id varchar, state varchar, released timestamp. - -@roadmap_1105_li -Partial indexing (see PostgreSQL). - -@roadmap_1106_li -Add GUI to build a custom version (embedded, fulltext,...) using build flags. - -@roadmap_1107_li -http://rubyforge.org/projects/hypersonic/ - -@roadmap_1108_li -Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app). - -@roadmap_1109_li -Table order: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility). - -@roadmap_1110_li -Backup tool should work with other databases as well. - -@roadmap_1111_li -Console: -ifExists doesn't work for the console. Add a flag to disable other dbs. - -@roadmap_1112_li -Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess). - -@roadmap_1113_li -Java static code analysis: http://pmd.sourceforge.net/ - -@roadmap_1114_li -Java static code analysis: http://www.eclipse.org/tptp/ - -@roadmap_1115_li -Compatibility for CREATE SCHEMA AUTHORIZATION. - -@roadmap_1116_li -Implement Clob / Blob truncate and the remaining functionality. - -@roadmap_1117_li -Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ... - -@roadmap_1118_li -File locking: writing a system property to detect concurrent access from the same VM (different classloaders). - -@roadmap_1119_li -Pure SQL triggers (example: update parent table if the child table is changed). - -@roadmap_1120_li -Add H2 to Gem (Ruby install system). - -@roadmap_1121_li -Support linked JCR tables. - -@roadmap_1122_li -Native fulltext search: min word length; store word positions. - -@roadmap_1123_li -Add an option to the SCRIPT command to generate only portable / standard SQL. - -@roadmap_1124_li -Updatable views: create 'instead of' triggers automatically if possible (simple cases first). - -@roadmap_1125_li -Improve create index performance. - -@roadmap_1126_li -Compact databases without having to close the database (vacuum). - -@roadmap_1127_li -Implement more JDBC 4.0 features. - -@roadmap_1128_li -Support TRANSFORM / PIVOT as in MS Access. - -@roadmap_1129_li -SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...). - -@roadmap_1130_li -Support updatable views with join on primary keys (to extend a table). - -@roadmap_1131_li -Public interface for functions (not public static). - -@roadmap_1132_li -Support reading the transaction log. - -@roadmap_1133_li -Feature matrix as in i-net software. - -@roadmap_1134_li -Updatable result set on table without primary key or unique index. - -@roadmap_1135_li -Compatibility with Derby and PostgreSQL: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221. - -@roadmap_1136_li -Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString') - -@roadmap_1137_li -Support data type INTERVAL - -@roadmap_1138_li -Support nested transactions (possibly using savepoints internally). - -@roadmap_1139_li -Add a benchmark for bigger databases, and one for many users. - -@roadmap_1140_li -Compression in the result set over TCP/IP. - -@roadmap_1141_li -Support curtimestamp (like curtime, curdate). - -@roadmap_1142_li -Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options. - -@roadmap_1143_li -Release locks (shared or exclusive) on demand - -@roadmap_1144_li -Support OUTER UNION - -@roadmap_1145_li -Support parameterized views (similar to CSVREAD, but using just SQL for the definition) - -@roadmap_1146_li -A way (JDBC driver) to map an URL (jdbc:h2map:c1) to a connection object - -@roadmap_1147_li -Support dynamic linked schema (automatically adding/updating/removing tables) - -@roadmap_1148_li -Clustering: adding a node should be very fast and without interrupting clients (very short lock) - -@roadmap_1149_li -Compatibility: # is the start of a single line comment (MySQL) but date quote (Access). Mode specific - -@roadmap_1150_li -Run benchmarks with Android, Java 7, java -server - -@roadmap_1151_li -Optimizations: faster hash function for strings. - -@roadmap_1152_li -DatabaseEventListener: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality - -@roadmap_1153_li -Benchmark: add a graph to show how databases scale (performance/database size) - -@roadmap_1154_li -Implement a SQLData interface to map your data over to a custom object - -@roadmap_1155_li -In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers = true) - -@roadmap_1156_li -Support multiple directories (on different hard drives) for the same database - -@roadmap_1157_li -Server protocol: use challenge response authentication, but client sends hash(user+password) encrypted with response - -@roadmap_1158_li -Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server) - -@roadmap_1159_li -Support native XML data type - see http://en.wikipedia.org/wiki/SQL/XML - -@roadmap_1160_li -Support triggers with a string property or option: SpringTrigger, OSGITrigger - -@roadmap_1161_li -MySQL compatibility: update test1 t1, test2 t2 set t1.id = t2.id where t1.id = t2.id; - -@roadmap_1162_li -Ability to resize the cache array when resizing the cache - -@roadmap_1163_li -Time based cache writing (one second after writing the log) - -@roadmap_1164_li -Check state of H2 driver for DDLUtils: http://issues.apache.org/jira/browse/DDLUTILS-185 - -@roadmap_1165_li -Index usage for REGEXP LIKE. - -@roadmap_1166_li -Compatibility: add a role DBA (like ADMIN). - -@roadmap_1167_li -Better support multiple processors for in-memory databases. - -@roadmap_1168_li -Support N'text' - -@roadmap_1169_li -Support compatibility for jdbc:hsqldb:res: - -@roadmap_1170_li -HSQLDB compatibility: automatically convert to the next 'higher' data type. Example: cast(2000000000 as int) + cast(2000000000 as int); (HSQLDB: long; PostgreSQL: integer out of range) - -@roadmap_1171_li -Provide an Java SQL builder with standard and H2 syntax - -@roadmap_1172_li -Trace: write OS, file system, JVM,... when opening the database - -@roadmap_1173_li -Support indexes for views (probably requires materialized views) - -@roadmap_1174_li -Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters - -@roadmap_1175_li -Server: use one listener (detect if the request comes from an PG or TCP client) - -@roadmap_1176_li -Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200 - -@roadmap_1177_li -Sequence: PostgreSQL compatibility (rename, create) http://www.postgresql.org/docs/8.2/static/sql-altersequence.html - -@roadmap_1178_li -DISTINCT: support large result sets by sorting on all columns (additionally) and then removing duplicates. - -@roadmap_1179_li -Support a special trigger on all tables to allow building a transaction log reader. - -@roadmap_1180_li -File system with a background writer thread; test if this is faster - -@roadmap_1181_li -Better document the source code (high level documentation). - -@roadmap_1182_li -Support select * from dual a left join dual b on b.x=(select max(x) from dual) - -@roadmap_1183_li -Optimization: don't lock when the database is read-only - -@roadmap_1184_li -Issue 146: Support merge join. - -@roadmap_1185_li -Integrate spatial functions from http://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download - -@roadmap_1186_li -Cluster: hot deploy (adding a node at runtime). - -@roadmap_1187_li -Support DatabaseMetaData.insertsAreDetected: updatable result sets should detect inserts. - -@roadmap_1188_li -Oracle: support DECODE method (convert to CASE WHEN). - -@roadmap_1189_li -Native search: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping - -@roadmap_1190_li -Improve documentation of access rights. - -@roadmap_1191_li -Support opening a database that is in the classpath, maybe using a new file system. Workaround: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation(). - -@roadmap_1192_li -Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others). - -@roadmap_1193_li -Remember the user defined data type (domain) of a column. - -@roadmap_1194_li -MVCC: support multi-threaded kernel with multi-version concurrency. - -@roadmap_1195_li -Auto-server: add option to define the port range or list. - -@roadmap_1196_li -Support Jackcess (MS Access databases) - -@roadmap_1197_li -Built-in methods to write large objects (BLOB and CLOB): FILE_WRITE('test.txt', 'Hello World') - -@roadmap_1198_li -Improve time to open large databases (see mail 'init time for distributed setup') - -@roadmap_1199_li -Move Maven 2 repository from hsql.sf.net to h2database.sf.net - -@roadmap_1200_li -Java 1.5 tool: JdbcUtils.closeSilently(s1, s2,...) - -@roadmap_1201_li -Optimize A=? OR B=? to UNION if the cost is lower. - -@roadmap_1202_li -Javadoc: document design patterns used - -@roadmap_1203_li -Support custom collators, for example for natural sort (for text that contains numbers). - -@roadmap_1204_li -Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt) - -@roadmap_1205_li -Convert SQL-injection-2.txt to html document, include SQLInjection.java sample - -@roadmap_1206_li -Support OUT parameters in user-defined procedures. - -@roadmap_1207_li -Web site design: http://www.igniterealtime.org/projects/openfire/index.jsp - -@roadmap_1208_li -HSQLDB compatibility: Openfire server uses: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC - -@roadmap_1209_li -Translation: use ?? in help.csv - -@roadmap_1210_li -Translated .pdf - -@roadmap_1211_li -Recovery tool: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file - -@roadmap_1212_li -Issue 357: support getGeneratedKeys to return multiple rows when used with batch updates. This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys. Also support it when using INSERT ... SELECT. - -@roadmap_1213_li -RECOVER=2 to backup the database, run recovery, open the database - -@roadmap_1214_li -Recovery should work with encrypted databases - -@roadmap_1215_li -Corruption: new error code, add help - -@roadmap_1216_li -Space reuse: after init, scan all storages and free those that don't belong to a live database object - -@roadmap_1217_li -Access rights: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects) - -@roadmap_1218_li -Support NOCACHE table option (Oracle). - -@roadmap_1219_li -Support table partitioning. - -@roadmap_1220_li -Add regular javadocs (using the default doclet, but another css) to the homepage. - -@roadmap_1221_li -The database should be kept open for a longer time when using the server mode. - -@roadmap_1222_li -Javadocs: for each tool, add a copy & paste sample in the class level. - -@roadmap_1223_li -Javadocs: add @author tags. - -@roadmap_1224_li -Fluent API for tools: Server.createTcpServer().setPort(9081).setPassword(password).start(); - -@roadmap_1225_li -MySQL compatibility: real SQL statement for DESCRIBE TEST - -@roadmap_1226_li -Use a default delay of 1 second before closing a database. - -@roadmap_1227_li -Write (log) to system table before adding to internal data structures. - -@roadmap_1228_li -Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup). - -@roadmap_1229_li -Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case). - -@roadmap_1230_li -MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem). - -@roadmap_1231_li -Oracle compatibility: support NLS_DATE_FORMAT. - -@roadmap_1232_li -Support for Thread.interrupt to cancel running statements. - -@roadmap_1233_li -Cluster: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process). - -@roadmap_1234_li -H2 Console: support CLOB/BLOB download using a link. - -@roadmap_1235_li -Support flashback queries as in Oracle. - -@roadmap_1236_li -Import / Export of fixed with text files. - -@roadmap_1237_li -HSQLDB compatibility: automatic data type for SUM if value is the value is too big (by default use the same type as the data). - -@roadmap_1238_li -Improve the optimizer to select the right index for special cases: where id between 2 and 4 and booleanColumn - -@roadmap_1239_li -Linked tables: make hidden columns available (Oracle: rowid and ora_rowscn columns). - -@roadmap_1240_li -H2 Console: in-place autocomplete. - -@roadmap_1241_li -Support large databases: split database files to multiple directories / disks (similar to tablespaces). - -@roadmap_1242_li -H2 Console: support configuration option for fixed width (monospace) font. - -@roadmap_1243_li -Native fulltext search: support analyzers (specially for Chinese, Japanese). - -@roadmap_1244_li -Automatically compact databases from time to time (as a background process). - -@roadmap_1245_li -Test Eclipse DTP. - -@roadmap_1246_li -H2 Console: autocomplete: keep the previous setting - -@roadmap_1247_li -executeBatch: option to stop at the first failed statement. - -@roadmap_1248_li -Implement OLAP features as described here: http://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5 - -@roadmap_1249_li -Support Oracle ROWID (unique identifier for each row). - -@roadmap_1250_li -MySQL compatibility: alter table add index i(c), add constraint c foreign key(c) references t(c); - -@roadmap_1251_li -Server mode: improve performance for batch updates. - -@roadmap_1252_li -Applets: support read-only databases in a zip file (accessed as a resource). - -@roadmap_1253_li -Long running queries / errors / trace system table. - -@roadmap_1254_li -H2 Console should support JaQu directly. - -@roadmap_1255_li -Better document FTL_SEARCH, FTL_SEARCH_DATA. - -@roadmap_1256_li -Sequences: CURRVAL should be session specific. Compatibility with PostgreSQL. - -@roadmap_1257_li -Index creation using deterministic functions. - -@roadmap_1258_li -ANALYZE: for unique indexes that allow null, count the number of null. - -@roadmap_1259_li -MySQL compatibility: multi-table delete: DELETE .. FROM .. [,...] USING - See http://dev.mysql.com/doc/refman/5.0/en/delete.html - -@roadmap_1260_li -AUTO_SERVER: support changing IP addresses (disable a network while the database is open). - -@roadmap_1261_li -Avoid using java.util.Calendar internally because it's slow, complicated, and buggy. - -@roadmap_1262_li -Support TRUNCATE .. CASCADE like PostgreSQL. - -@roadmap_1263_li -Fulltext search: lazy result generation using SimpleRowSource. - -@roadmap_1264_li -Fulltext search: support alternative syntax: WHERE FTL_CONTAINS(name, 'hello'). - -@roadmap_1265_li -MySQL compatibility: support REPLACE, see http://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73. - -@roadmap_1266_li -MySQL compatibility: support INSERT INTO table SET column1 = value1, column2 = value2 - -@roadmap_1267_li -Docs: add a one line description for each functions and SQL statements at the top (in the link section). - -@roadmap_1268_li -Javadoc search: weight for titles should be higher ('random' should list Functions as the best match). - -@roadmap_1269_li -Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes. - -@roadmap_1270_li -Issue 50: Oracle compatibility: support calling 0-parameters functions without parenthesis. Make constants obsolete. - -@roadmap_1271_li -MySQL, HSQLDB compatibility: support where 'a'=1 (not supported by Derby, PostgreSQL) - -@roadmap_1272_li -Support a data type "timestamp with timezone" using java.util.Calendar. - -@roadmap_1273_li -Finer granularity for SLF4J trace - See http://code.google.com/p/h2database/issues/detail?id=62 - -@roadmap_1274_li -Add database creation date and time to the database. - -@roadmap_1275_li -Support ASSERTION. - -@roadmap_1276_li -MySQL compatibility: support comparing 1='a' - -@roadmap_1277_li -Support PostgreSQL lock modes: http://www.postgresql.org/docs/8.3/static/explicit-locking.html - -@roadmap_1278_li -PostgreSQL compatibility: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver. - -@roadmap_1279_li -RunScript should be able to read from system in (or quite mode for Shell). - -@roadmap_1280_li -Natural join: support select x from dual natural join dual. - -@roadmap_1281_li -Support using system properties in database URLs (may be a security problem). - -@roadmap_1282_li -Natural join: somehow support this: select a.x, b.x, x from dual a natural join dual b - -@roadmap_1283_li -Use the Java service provider mechanism to register file systems and function libraries. - -@roadmap_1284_li -MySQL compatibility: for auto_increment columns, convert 0 to next value (as when inserting NULL). - -@roadmap_1285_li -Optimization for multi-column IN: use an index if possible. Example: (A, B) IN((1, 2), (2, 3)). - -@roadmap_1286_li -Optimization for EXISTS: convert to inner join or IN(..) if possible. - -@roadmap_1287_li -Functions: support hashcode(value); cryptographic and fast - -@roadmap_1288_li -Serialized file lock: support long running queries. - -@roadmap_1289_li -Network: use 127.0.0.1 if other addresses don't work. - -@roadmap_1290_li -Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication. - -@roadmap_1291_li -Support reading JCR data: one table per node type; query table; cache option - -@roadmap_1292_li -OSGi: create a sample application, test, document. - -@roadmap_1293_li -help.csv: use complete examples for functions; run as test case. - -@roadmap_1294_li -Functions to calculate the memory and disk space usage of a table, a row, or a value. - -@roadmap_1295_li -Re-implement PooledConnection; use a lightweight connection object. - -@roadmap_1296_li -Doclet: convert tests in javadocs to a java class. - -@roadmap_1297_li -Doclet: format fields like methods, but support sorting by name and value. - -@roadmap_1298_li -Doclet: shrink the html files. - -@roadmap_1299_li -MySQL compatibility: support SET NAMES 'latin1' - See also http://code.google.com/p/h2database/issues/detail?id=56 - -@roadmap_1300_li -Allow to scan index backwards starting with a value (to better support ORDER BY DESC). - -@roadmap_1301_li -Java Service Wrapper: try http://yajsw.sourceforge.net/ - -@roadmap_1302_li -Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE. - -@roadmap_1303_li -MySQL compatibility: support ALTER TABLE .. MODIFY COLUMN. - -@roadmap_1304_li -Use a lazy and auto-close input stream (open resource when reading, close on eof). - -@roadmap_1305_li -Connection pool: 'reset session' command (delete temp tables, rollback, auto-commit true). - -@roadmap_1306_li -Improve SQL documentation, see http://www.w3schools.com/sql/ - -@roadmap_1307_li -MySQL compatibility: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL. - -@roadmap_1308_li -MS SQL Server compatibility: support DATEPART syntax. - -@roadmap_1309_li -Sybase/DB2/Oracle compatibility: support out parameters in stored procedures - See http://code.google.com/p/h2database/issues/detail?id=83 - -@roadmap_1310_li -Support INTERVAL data type (see Oracle and others). - -@roadmap_1311_li -Combine Server and Console tool (only keep Server). - -@roadmap_1312_li -Store the Lucene index in the database itself. - -@roadmap_1313_li -Support standard MERGE statement: http://en.wikipedia.org/wiki/Merge_%28SQL%29 - -@roadmap_1314_li -Oracle compatibility: support DECODE(x, ...). - -@roadmap_1315_li -MVCC: compare concurrent update behavior with PostgreSQL and Oracle. - -@roadmap_1316_li -HSQLDB compatibility: CREATE FUNCTION (maybe using a Function interface). - -@roadmap_1317_li -HSQLDB compatibility: support CALL "java.lang.Math.sqrt"(2.0) - -@roadmap_1318_li -Support comma as the decimal separator in the CSV tool. - -@roadmap_1319_li -Compatibility: Java functions with SQLJ Part1 http://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz - -@roadmap_1320_li -Compatibility: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation. - -@roadmap_1321_li -CACHE_SIZE: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache. - -@roadmap_1322_li -Support date/time/timestamp as documented in http://en.wikipedia.org/wiki/ISO_8601 - -@roadmap_1323_li -PostgreSQL compatibility: when in PG mode, treat BYTEA data like PG. - -@roadmap_1324_li -Support =ANY(array) as in PostgreSQL. See also http://www.postgresql.org/docs/8.0/interactive/arrays.html - -@roadmap_1325_li -IBM DB2 compatibility: support PREVIOUS VALUE FOR sequence. - -@roadmap_1326_li -Compatibility: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer). - -@roadmap_1327_li -Oracle compatibility: support CREATE SYNONYM table FOR schema.table. - -@roadmap_1328_li -FTP: document the server, including -ftpTask option to execute / kill remote processes - -@roadmap_1329_li -FTP: problems with multithreading? - -@roadmap_1330_li -FTP: implement SFTP / FTPS - -@roadmap_1331_li -FTP: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file). - -@roadmap_1332_li -More secure default configuration if remote access is enabled. - -@roadmap_1333_li -Improve database file locking (maybe use native file locking). The current approach seems to be problematic if the file system is on a remote share (see Google Group 'Lock file modification time is in the future'). - -@roadmap_1334_li -Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE. - -@roadmap_1335_li -Issue 107: Prefer using the ORDER BY index if LIMIT is used. - -@roadmap_1336_li -An index on (id, name) should be used for a query: select * from t where s=? order by i - -@roadmap_1337_li -Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL. - -@roadmap_1338_li -Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true). - -@roadmap_1339_li -Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2). - -@roadmap_1340_li -Fast alter table add column. - -@roadmap_1341_li -Improve concurrency for in-memory database operations. - -@roadmap_1342_li -Issue 122: Support for connection aliases for remote tcp connections. - -@roadmap_1343_li -Fast scrambling (strong encryption doesn't help if the password is included in the application). - -@roadmap_1344_li -H2 Console: support -webPassword to require a password to access preferences or shutdown. - -@roadmap_1345_li -Issue 126: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number. - -@roadmap_1346_li -Issue 127: Support activation/deactivation of triggers - -@roadmap_1347_li -Issue 130: Custom log event listeners - -@roadmap_1348_li -Issue 131: IBM DB2 compatibility: sysibm.sysdummy1 - -@roadmap_1349_li -Issue 132: Use Java enum trigger type. - -@roadmap_1350_li -Issue 134: IBM DB2 compatibility: session global variables. - -@roadmap_1351_li -Cluster: support load balance with values for each server / auto detect. - -@roadmap_1352_li -FTL_SET_OPTION(keyString, valueString) with key stopWords at first. - -@roadmap_1353_li -Pluggable access control mechanism. - -@roadmap_1354_li -Fulltext search (Lucene): support streaming CLOB data. - -@roadmap_1355_li -Document/example how to create and read an encrypted script file. - -@roadmap_1356_li -Check state of http://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins). - -@roadmap_1357_li -Fulltext search (Lucene): only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible. - -@roadmap_1358_li -Support a way to create or read compressed encrypted script files using an API. - -@roadmap_1359_li -Scripting language support (Javascript). - -@roadmap_1360_li -The network client should better detect if the server is not an H2 server and fail early. - -@roadmap_1361_li -H2 Console: support CLOB/BLOB upload. - -@roadmap_1362_li -Database file lock: detect hibernate / standby / very slow threads (compare system time). - -@roadmap_1363_li -Automatic detection of redundant indexes. - -@roadmap_1364_li -Maybe reject join without "on" (except natural join). - -@roadmap_1365_li -Implement GiST (Generalized Search Tree for Secondary Storage). - -@roadmap_1366_li -Function to read a number of bytes/characters from an BLOB or CLOB. - -@roadmap_1367_li -Issue 156: Support SELECT ? UNION SELECT ?. - -@roadmap_1368_li -Automatic mixed mode: support a port range list (to avoid firewall problems). - -@roadmap_1369_li -Support the pseudo column rowid, oid, _rowid_. - -@roadmap_1370_li -H2 Console / large result sets: stream early instead of keeping a whole result in-memory - -@roadmap_1371_li -Support TRUNCATE for linked tables. - -@roadmap_1372_li -UNION: evaluate INTERSECT before UNION (like most other database except Oracle). - -@roadmap_1373_li -Delay creating the information schema, and share metadata columns. - -@roadmap_1374_li -TCP Server: use a nonce (number used once) to protect unencrypted channels against replay attacks. - -@roadmap_1375_li -Simplify running scripts and recovery: CREATE FORCE USER (overwrites an existing user). - -@roadmap_1376_li -Support CREATE DATABASE LINK (a custom JDBC driver is already supported). - -@roadmap_1377_li -Support large GROUP BY operations. Issue 216. - -@roadmap_1378_li -Issue 163: Allow to create foreign keys on metadata types. - -@roadmap_1379_li -Logback: write a native DBAppender. - -@roadmap_1380_li -Cache size: don't use more cache than what is available. - -@roadmap_1381_li -Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread. - -@roadmap_1382_li -Tree index: Instead of an AVL tree, use a general balanced trees or a scapegoat tree. - -@roadmap_1383_li -User defined functions: allow to store the bytecode (of just the class, or the jar file of the extension) in the database. - -@roadmap_1384_li -Compatibility: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL. - -@roadmap_1385_li -Optimizer: WHERE X=? AND Y IN(?), it always uses the index on Y. Should be cost based. - -@roadmap_1386_li -Common Table Expression (CTE) / recursive queries: support parameters. Issue 314. - -@roadmap_1387_li -Oracle compatibility: support INSERT ALL. - -@roadmap_1388_li -Issue 178: Optimizer: index usage when both ascending and descending indexes are available. - -@roadmap_1389_li -Issue 179: Related subqueries in HAVING clause. - -@roadmap_1390_li -IBM DB2 compatibility: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero. - -@roadmap_1391_li -Creating primary key: always create a constraint. - -@roadmap_1392_li -Maybe use a different page layout: keep the data at the head of the page, and ignore the tail (don't store / read it). This may increase write / read performance depending on the file system. - -@roadmap_1393_li -Indexes of temporary tables are currently kept in-memory. Is this how it should be? - -@roadmap_1394_li -The Shell tool should support the same built-in commands as the H2 Console. - -@roadmap_1395_li -Maybe use PhantomReference instead of finalize. - -@roadmap_1396_li -Database file name suffix: should only have one dot by default. Example: .h2db - -@roadmap_1397_li -Issue 196: Function based indexes - -@roadmap_1398_li -ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName. - -@roadmap_1399_li -Fix the disk space leak (killing the process at the exact right moment will increase the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java - -@roadmap_1400_li -ROWNUM: Oracle compatibility when used within a subquery. Issue 198. - -@roadmap_1401_li -Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way. - -@roadmap_1402_li -ODBC: encrypted databases are not supported because the ;CIPHER= can not be set. - -@roadmap_1403_li -Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1); - -@roadmap_1404_li -Optimizer: index usage when both ascending and descending indexes are available. Issue 178. - -@roadmap_1405_li -Issue 306: Support schema specific domains. - -@roadmap_1406_li -Triggers: support user defined execution order. Oracle: CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT ON TEST FOR EACH ROW FOLLOWS TEST_1. SQL specifies that multiple triggers should be fired in time-of-creation order. PostgreSQL uses name order, which was judged to be more convenient. Derby: triggers are fired in the order in which they were created. - -@roadmap_1407_li -PostgreSQL compatibility: combine "users" and "roles". See: http://www.postgresql.org/docs/8.1/interactive/user-manag.html - -@roadmap_1408_li -Improve documentation of system properties: only list the property names, default values, and description. - -@roadmap_1409_li -Support running totals / cumulative sum using SUM(..) OVER(..). - -@roadmap_1410_li -Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize) - -@roadmap_1411_li -Triggers: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others). - -@roadmap_1412_li -Common Table Expression (CTE) / recursive queries: support INSERT INTO ... SELECT ... Issue 219. - -@roadmap_1413_li -Common Table Expression (CTE) / recursive queries: support non-recursive queries. Issue 217. - -@roadmap_1414_li -Common Table Expression (CTE) / recursive queries: avoid endless loop. Issue 218. - -@roadmap_1415_li -Common Table Expression (CTE) / recursive queries: support multiple named queries. Issue 220. - -@roadmap_1416_li -Common Table Expression (CTE) / recursive queries: identifier scope may be incorrect. Issue 222. - -@roadmap_1417_li -Log long running transactions (similar to long running statements). - -@roadmap_1418_li -Parameter data type is data type of other operand. Issue 205. - -@roadmap_1419_li -Some combinations of nested join with right outer join are not supported. - -@roadmap_1420_li -DatabaseEventListener.openConnection(id) and closeConnection(id). - -@roadmap_1421_li -Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, or to prevent to login with the same username and password from different IPs. Possibly using the DatabaseEventListener API, or a new API. - -@roadmap_1422_li -Compatibility for data type CHAR (Derby, HSQLDB). Issue 212. - -@roadmap_1423_li -Compatibility with MySQL TIMESTAMPDIFF. Issue 209. - -@roadmap_1424_li -Optimizer: use a histogram of the data, specially for non-normal distributions. - -@roadmap_1425_li -Trigger: allow declaring as source code (like functions). - -@roadmap_1426_li -User defined aggregate: allow declaring as source code (like functions). - -@roadmap_1427_li -The error "table not found" is sometimes caused by using the wrong database. Add "(this database is empty)" to the exception message if applicable. - -@roadmap_1428_li -MySQL + PostgreSQL compatibility: support string literal escape with \n. - -@roadmap_1429_li -PostgreSQL compatibility: support string literal escape with double \\. - -@roadmap_1430_li -Document the TCP server "management_db". Maybe include the IP address of the client. - -@roadmap_1431_li -Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main - -@roadmap_1432_li -If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message. - -@roadmap_1433_li -Optimization to use an index for OR when using multiple keys: where (key1 = ? and key2 = ?) OR (key1 = ? and key2 = ?) - -@roadmap_1434_li -Issue 302: Support optimizing queries with both inner and outer joins, as in: select * from test a inner join test b on a.id=b.id inner join o on o.id=a.id where b.x=1 (the optimizer should swap a and b here). See also TestNestedJoins, tag "swapInnerJoinTables". - -@roadmap_1435_li -JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool). - -@roadmap_1436_li -Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...; - -@roadmap_1437_li -nioMapped file system: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example). - -@roadmap_1438_li -Column as parameter of function table. Issue 228. - -@roadmap_1439_li -Connection pool: detect ;AUTOCOMMIT=FALSE in the database URL, and if set, disable autocommit for all connections. - -@roadmap_1440_li -Compatibility with MS Access: support "&" to concatenate text. - -@roadmap_1441_li -The BACKUP statement should not synchronize on the database, and therefore should not block other users. - -@roadmap_1442_li -Document the database file format. - -@roadmap_1443_li -Support reading LOBs. - -@roadmap_1444_li -Require appending DANGEROUS=TRUE when using certain dangerous settings such as LOG=0, LOG=1, LOCK_MODE=0, disabling FILE_LOCK,... - -@roadmap_1445_li -Support UDT (user defined types) similar to how Apache Derby supports it: check constraint, allow to use it in Java functions as parameters (return values already seem to work). - -@roadmap_1446_li -Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, optional compatibility with current encrypted database files). - -@roadmap_1447_li -Issue 229: SELECT with simple OR tests uses tableScan when it could use indexes. - -@roadmap_1448_li -GROUP BY queries should use a temporary table if there are too many rows. - -@roadmap_1449_li -BLOB: support random access when reading. - -@roadmap_1450_li -CLOB: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form). - -@roadmap_1451_li -Compatibility: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...). - -@roadmap_1452_li -Compatibility with MySQL: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...). - -@roadmap_1453_li -Compatibility with MySQL: support non-strict mode (sql_mode = "") any data that is too large for the column will just be truncated or set to the default value. - -@roadmap_1454_li -The full condition should be sent to the linked table, not just the indexed condition. Example: TestLinkedTableFullCondition - -@roadmap_1455_li -Compatibility with IBM DB2: CREATE PROCEDURE. - -@roadmap_1456_li -Compatibility with IBM DB2: SQL cursors. - -@roadmap_1457_li -Single-column primary key values are always stored explicitly. This is not required. - -@roadmap_1458_li -Compatibility with MySQL: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8). - -@roadmap_1459_li -CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true. - -@roadmap_1460_li -Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set (maybe only for a part of the values - the ones that can be evaluated). - -@roadmap_1461_li -Compatibility for ARRAY data type (Oracle: VARRAY(n) of VARCHAR(m); HSQLDB: VARCHAR(n) ARRAY; Postgres: VARCHAR(n)[]). - -@roadmap_1462_li -PostgreSQL compatible array literal syntax: ARRAY[['a', 'b'], ['c', 'd']] - -@roadmap_1463_li -PostgreSQL compatibility: UPDATE with FROM. - -@roadmap_1464_li -Issue 297: Oracle compatibility for "at time zone". - -@roadmap_1465_li -IBM DB2 compatibility: IDENTITY_VAL_LOCAL(). - -@roadmap_1466_li -Support SQL/XML. - -@roadmap_1467_li -Support concurrent opening of databases. - -@roadmap_1468_li -Improved error message and diagnostics in case of network configuration problems. - -@roadmap_1469_li -TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases). - -@roadmap_1470_li -Adding a primary key should make the columns 'not null' unless if there is a row with null (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby). - -@roadmap_1471_li -ARRAY data type: support Integer[] and so on in Java functions (currently only Object[] is supported). - -@roadmap_1472_li -MySQL compatibility: LOCK TABLES a READ, b READ - see also http://dev.mysql.com/doc/refman/5.0/en/lock-tables.html - -@roadmap_1473_li -The HTML to PDF converter should use http://code.google.com/p/wkhtmltopdf/ - -@roadmap_1474_li -Issue 303: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)". - -@roadmap_1475_li -MySQL compatibility: update test1 t1, test2 t2 set t1.name=t2.name where t1.id=t2.id. - -@roadmap_1476_li -Issue 283: Improve performance of H2 on Android. - -@roadmap_1477_li -Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s). - -@roadmap_1478_li -Column compression option - see http://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d - -@roadmap_1479_li -PostgreSQL compatibility: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID). - -@roadmap_1480_li -MS SQL Server compatibility: support @@ROWCOUNT. - -@roadmap_1481_li -PostgreSQL compatibility: LOG(x) is LOG10(x) and not LN(x). - -@roadmap_1482_li -Issue 311: Serialized lock mode: executeQuery of write operations fails. - -@roadmap_1483_li -PostgreSQL compatibility: support PgAdmin III (specially the function current_setting). - -@roadmap_1484_li -MySQL compatibility: support TIMESTAMPADD. - -@roadmap_1485_li -Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). - -@roadmap_1486_li -Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). - -@roadmap_1487_li -Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase). - -@roadmap_1488_li -TRANSACTION_ID() for in-memory databases. - -@roadmap_1489_li -TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL). - -@roadmap_1490_li -Support [INNER | OUTER] JOIN USING(column [,...]). - -@roadmap_1491_li -Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle) - -@roadmap_1492_li -GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby). - -@roadmap_1493_li -Sybase / MS SQL Server compatibility: CONVERT(..) parameters are swapped. - -@roadmap_1494_li -Index conditions: WHERE AGE>1 should not scan through all rows with AGE=1. - -@roadmap_1495_li -PHP support: H2 should support PDO, or test with PostgreSQL PDO. - -@roadmap_1496_li -Outer joins: if no column of the outer join table is referenced, the outer join table could be removed from the query. - -@roadmap_1497_li -Cluster: allow using auto-increment and identity columns by ensuring executed in lock-step. - -@roadmap_1498_li -MySQL compatibility: index names only need to be unique for the given table. - -@roadmap_1499_li -Issue 352: constraints: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases. - -@roadmap_1500_li -Oracle compatibility: support MEDIAN aggregate function. - -@roadmap_1501_li -Issue 348: Oracle compatibility: division should return a decimal result. - -@roadmap_1502_li -Read rows on demand: instead of reading the whole row, only read up to that column that is requested. Keep an pointer to the data area and the column id that is already read. - -@roadmap_1503_li -Long running transactions: log session id when detected. - -@roadmap_1504_li -Optimization: "select id from test" should use the index on id even without "order by". - -@roadmap_1505_li -Issue 362: LIMIT support for UPDATE statements (MySQL compatibility). - -@roadmap_1506_li -Sybase SQL Anywhere compatibility: SELECT TOP ... START AT ... - -@roadmap_1507_li -Use Java 6 SQLException subclasses. - -@roadmap_1508_li -Issue 390: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR - -@roadmap_1509_li -Use Java 6 exceptions: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,.. - -@roadmap_1510_li -Support index-only when doing selects (i.e. without needing to load the actual table data) - -@roadmap_1511_h2 -Not Planned - -@roadmap_1512_li -HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility. - -@roadmap_1513_li -String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively. - -@roadmap_1514_li -In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements. - -@sourceError_1000_h1 -Error Analyzer - -@sourceError_1001_a -Home - -@sourceError_1002_a -Input - -@sourceError_1003_h2 -  Details  Source Code - -@sourceError_1004_p -Paste the error message and stack trace below and click on 'Details' or 'Source Code': - -@sourceError_1005_b -Error Code: - -@sourceError_1006_b -Product Version: - -@sourceError_1007_b -Message: - -@sourceError_1008_b -More Information: - -@sourceError_1009_b -Stack Trace: - -@sourceError_1010_b -Source File: - -@sourceError_1011_p - Inline - -@tutorial_1000_h1 -Tutorial - -@tutorial_1001_a - Starting and Using the H2 Console - -@tutorial_1002_a - Special H2 Console Syntax - -@tutorial_1003_a - Settings of the H2 Console - -@tutorial_1004_a - Connecting to a Database using JDBC - -@tutorial_1005_a - Creating New Databases - -@tutorial_1006_a - Using the Server - -@tutorial_1007_a - Using Hibernate - -@tutorial_1008_a - Using TopLink and Glassfish - -@tutorial_1009_a - Using EclipseLink - -@tutorial_1010_a - Using Apache ActiveMQ - -@tutorial_1011_a - Using H2 within NetBeans - -@tutorial_1012_a - Using H2 with jOOQ - -@tutorial_1013_a - Using Databases in Web Applications - -@tutorial_1014_a - Android - -@tutorial_1015_a - CSV (Comma Separated Values) Support - -@tutorial_1016_a - Upgrade, Backup, and Restore - -@tutorial_1017_a - Command Line Tools - -@tutorial_1018_a - The Shell Tool - -@tutorial_1019_a - Using OpenOffice Base - -@tutorial_1020_a - Java Web Start / JNLP - -@tutorial_1021_a - Using a Connection Pool - -@tutorial_1022_a - Fulltext Search - -@tutorial_1023_a - User-Defined Variables - -@tutorial_1024_a - Date and Time - -@tutorial_1025_a - Using Spring - -@tutorial_1026_a - OSGi - -@tutorial_1027_a - Java Management Extension (JMX) - -@tutorial_1028_h2 -Starting and Using the H2 Console - -@tutorial_1029_p - The H2 Console application lets you access a database using a browser. This can be a H2 database, or another database that supports the JDBC API. - -@tutorial_1030_p - This is a client/server application, so both a server and a client (a browser) are required to run it. - -@tutorial_1031_p - Depending on your platform and environment, there are multiple ways to start the H2 Console: - -@tutorial_1032_th -OS - -@tutorial_1033_th -Start - -@tutorial_1034_td -Windows - -@tutorial_1035_td - Click [Start], [All Programs], [H2], and [H2 Console (Command Line)] - -@tutorial_1036_td - An icon will be added to the system tray: - -@tutorial_1037_td - If you don't get the window and the system tray icon, then maybe Java is not installed correctly (in this case, try another way to start the application). A browser window should open and point to the login page at http://localhost:8082. - -@tutorial_1038_td -Windows - -@tutorial_1039_td - Open a file browser, navigate to h2/bin, and double click on h2.bat. - -@tutorial_1040_td - A console window appears. If there is a problem, you will see an error message in this window. A browser window will open and point to the login page (URL: http://localhost:8082). - -@tutorial_1041_td -Any - -@tutorial_1042_td - Double click on the h2*.jar file. This only works if the .jar suffix is associated with Java. - -@tutorial_1043_td -Any - -@tutorial_1044_td - Open a console window, navigate to the directory h2/bin, and type: - -@tutorial_1045_h3 -Firewall - -@tutorial_1046_p - If you start the server, you may get a security warning from the firewall (if you have installed one). If you don't want other computers in the network to access the application on your machine, you can let the firewall block those connections. The connection from the local machine will still work. Only if you want other computers to access the database on this computer, you need allow remote connections in the firewall. - -@tutorial_1047_p - It has been reported that when using Kaspersky 7.0 with firewall, the H2 Console is very slow when connecting over the IP address. A workaround is to connect using 'localhost'. - -@tutorial_1048_p - A small firewall is already built into the server: other computers may not connect to the server by default. To change this, go to 'Preferences' and select 'Allow connections from other computers'. - -@tutorial_1049_h3 -Testing Java - -@tutorial_1050_p - To find out which version of Java is installed, open a command prompt and type: - -@tutorial_1051_p - If you get an error message, you may need to add the Java binary directory to the path environment variable. - -@tutorial_1052_h3 -Error Message 'Port may be in use' - -@tutorial_1053_p - You can only start one instance of the H2 Console, otherwise you will get the following error message: "The Web server could not be started. Possible cause: another server is already running...". It is possible to start multiple console applications on the same computer (using different ports), but this is usually not required as the console supports multiple concurrent connections. - -@tutorial_1054_h3 -Using another Port - -@tutorial_1055_p - If the default port of the H2 Console is already in use by another application, then a different port needs to be configured. The settings are stored in a properties file. For details, see Settings of the H2 Console. The relevant entry is webPort. - -@tutorial_1056_p - If no port is specified for the TCP and PG servers, each service will try to listen on its default port. If the default port is already in use, a random port is used. - -@tutorial_1057_h3 -Connecting to the Server using a Browser - -@tutorial_1058_p - If the server started successfully, you can connect to it using a web browser. Javascript needs to be enabled. If you started the server on the same computer as the browser, open the URL http://localhost:8082. If you want to connect to the application from another computer, you need to provide the IP address of the server, for example: http://192.168.0.2:8082. If you enabled TLS on the server side, the URL needs to start with https://. - -@tutorial_1059_h3 -Multiple Concurrent Sessions - -@tutorial_1060_p - Multiple concurrent browser sessions are supported. As that the database objects reside on the server, the amount of concurrent work is limited by the memory available to the server application. - -@tutorial_1061_h3 -Login - -@tutorial_1062_p - At the login page, you need to provide connection information to connect to a database. Set the JDBC driver class of your database, the JDBC URL, user name, and password. If you are done, click [Connect]. - -@tutorial_1063_p - You can save and reuse previously saved settings. The settings are stored in a properties file (see Settings of the H2 Console). - -@tutorial_1064_h3 -Error Messages - -@tutorial_1065_p - Error messages in are shown in red. You can show/hide the stack trace of the exception by clicking on the message. - -@tutorial_1066_h3 -Adding Database Drivers - -@tutorial_1067_p - To register additional JDBC drivers (MySQL, PostgreSQL, HSQLDB,...), add the jar file names to the environment variables H2DRIVERS or CLASSPATH. Example (Windows): to add the HSQLDB JDBC driver C:\Programs\hsqldb\lib\hsqldb.jar, set the environment variable H2DRIVERS to C:\Programs\hsqldb\lib\hsqldb.jar. - -@tutorial_1068_p - Multiple drivers can be set; entries need to be separated by ; (Windows) or : (other operating systems). Spaces in the path names are supported. The settings must not be quoted. - -@tutorial_1069_h3 -Using the H2 Console - -@tutorial_1070_p - The H2 Console application has three main panels: the toolbar on top, the tree on the left, and the query/result panel on the right. The database objects (for example, tables) are listed on the left. Type a SQL command in the query panel and click [Run]. The result appears just below the command. - -@tutorial_1071_h3 -Inserting Table Names or Column Names - -@tutorial_1072_p - To insert table and column names into the script, click on the item in the tree. If you click on a table while the query is empty, then SELECT * FROM ... is added. While typing a query, the table that was used is expanded in the tree. For example if you type SELECT * FROM TEST T WHERE T. then the table TEST is expanded. - -@tutorial_1073_h3 -Disconnecting and Stopping the Application - -@tutorial_1074_p - To log out of the database, click [Disconnect] in the toolbar panel. However, the server is still running and ready to accept new sessions. - -@tutorial_1075_p - To stop the server, right click on the system tray icon and select [Exit]. If you don't have the system tray icon, navigate to [Preferences] and click [Shutdown], press [Ctrl]+[C] in the console where the server was started (Windows), or close the console window. - -@tutorial_1076_h2 -Special H2 Console Syntax - -@tutorial_1077_p - The H2 Console supports a few built-in commands. Those are interpreted within the H2 Console, so they work with any database. Built-in commands need to be at the beginning of a statement (before any remarks), otherwise they are not parsed correctly. If in doubt, add ; before the command. - -@tutorial_1078_th -Command(s) - -@tutorial_1079_th -Description - -@tutorial_1080_td - @autocommit_true; - -@tutorial_1081_td - @autocommit_false; - -@tutorial_1082_td - Enable or disable autocommit. - -@tutorial_1083_td - @cancel; - -@tutorial_1084_td - Cancel the currently running statement. - -@tutorial_1085_td - @columns null null TEST; - -@tutorial_1086_td - @index_info null null TEST; - -@tutorial_1087_td - @tables; - -@tutorial_1088_td - @tables null null TEST; - -@tutorial_1089_td - Call the corresponding DatabaseMetaData.get method. Patterns are case sensitive (usually identifiers are uppercase). For information about the parameters, see the Javadoc documentation. Missing parameters at the end of the line are set to null. The complete list of metadata commands is: @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, @procedure_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns - -@tutorial_1090_td - @edit select * from test; - -@tutorial_1091_td - Use an updatable result set. - -@tutorial_1092_td - @generated insert into test() values(); - -@tutorial_1093_td - Show the result of Statement.getGeneratedKeys(). - -@tutorial_1094_td - @history; - -@tutorial_1095_td - List the command history. - -@tutorial_1096_td - @info; - -@tutorial_1097_td - Display the result of various Connection and DatabaseMetaData methods. - -@tutorial_1098_td - @list select * from test; - -@tutorial_1099_td - Show the result set in list format (each column on its own line, with row numbers). - -@tutorial_1100_td - @loop 1000 select ?, ?/*rnd*/; - -@tutorial_1101_td - @loop 1000 @statement select ?; - -@tutorial_1102_td - Run the statement this many times. Parameters (?) are set using a loop from 0 up to x - 1. Random values are used for each ?/*rnd*/. A Statement object is used instead of a PreparedStatement if @statement is used. Result sets are read until ResultSet.next() returns false. Timing information is printed. - -@tutorial_1103_td - @maxrows 20; - -@tutorial_1104_td - Set the maximum number of rows to display. - -@tutorial_1105_td - @memory; - -@tutorial_1106_td - Show the used and free memory. This will call System.gc(). - -@tutorial_1107_td - @meta select 1; - -@tutorial_1108_td - List the ResultSetMetaData after running the query. - -@tutorial_1109_td - @parameter_meta select ?; - -@tutorial_1110_td - Show the result of the PreparedStatement.getParameterMetaData() calls. The statement is not executed. - -@tutorial_1111_td - @prof_start; - -@tutorial_1112_td - call hash('SHA256', '', 1000000); - -@tutorial_1113_td - @prof_stop; - -@tutorial_1114_td - Start/stop the built-in profiling tool. The top 3 stack traces of the statement(s) between start and stop are listed (if there are 3). - -@tutorial_1115_td - @prof_start; - -@tutorial_1116_td - @sleep 10; - -@tutorial_1117_td - @prof_stop; - -@tutorial_1118_td - Sleep for a number of seconds. Used to profile a long running query or operation that is running in another session (but in the same process). - -@tutorial_1119_td - @transaction_isolation; - -@tutorial_1120_td - @transaction_isolation 2; - -@tutorial_1121_td - Display (without parameters) or change (with parameters 1, 2, 4, 8) the transaction isolation level. - -@tutorial_1122_h2 -Settings of the H2 Console - -@tutorial_1123_p - The settings of the H2 Console are stored in a configuration file called .h2.server.properties in you user home directory. For Windows installations, the user home directory is usually C:\Documents and Settings\[username] or C:\Users\[username]. The configuration file contains the settings of the application and is automatically created when the H2 Console is first started. Supported settings are: - -@tutorial_1124_code -webAllowOthers - -@tutorial_1125_li -: allow other computers to connect. - -@tutorial_1126_code -webPort - -@tutorial_1127_li -: the port of the H2 Console - -@tutorial_1128_code -webSSL - -@tutorial_1129_li -: use encrypted TLS (HTTPS) connections. - -@tutorial_1130_p - In addition to those settings, the properties of the last recently used connection are listed in the form <number>=<name>|<driver>|<url>|<user> using the escape character \. Example: 1=Generic H2 (Embedded)|org.h2.Driver|jdbc\:h2\:~/test|sa - -@tutorial_1131_h2 -Connecting to a Database using JDBC - -@tutorial_1132_p - To connect to a database, a Java application first needs to load the database driver, and then get a connection. A simple way to do that is using the following code: - -@tutorial_1133_p - This code first loads the driver (Class.forName(...)) and then opens a connection (using DriverManager.getConnection()). The driver name is "org.h2.Driver". The database URL always needs to start with jdbc:h2: to be recognized by this database. The second parameter in the getConnection() call is the user name (sa for System Administrator in this example). The third parameter is the password. In this database, user names are not case sensitive, but passwords are. - -@tutorial_1134_h2 -Creating New Databases - -@tutorial_1135_p - By default, if the database specified in the URL does not yet exist, a new (empty) database is created automatically. The user that created the database automatically becomes the administrator of this database. - -@tutorial_1136_p - Auto-creating new database can be disabled, see Opening a Database Only if it Already Exists. - -@tutorial_1137_h2 -Using the Server - -@tutorial_1138_p - H2 currently supports three server: a web server (for the H2 Console), a TCP server (for client/server connections) and an PG server (for PostgreSQL clients). Please note that only the web server supports browser connections. The servers can be started in different ways, one is using the Server tool. Starting the server doesn't open a database - databases are opened as soon as a client connects. - -@tutorial_1139_h3 -Starting the Server Tool from Command Line - -@tutorial_1140_p - To start the Server tool from the command line with the default settings, run: - -@tutorial_1141_p - This will start the tool with the default options. To get the list of options and default values, run: - -@tutorial_1142_p - There are options available to use other ports, and start or not start parts. - -@tutorial_1143_h3 -Connecting to the TCP Server - -@tutorial_1144_p - To remotely connect to a database using the TCP server, use the following driver and database URL: - -@tutorial_1145_li -JDBC driver class: org.h2.Driver - -@tutorial_1146_li -Database URL: jdbc:h2:tcp://localhost/~/test - -@tutorial_1147_p - For details about the database URL, see also in Features. Please note that you can't connection with a web browser to this URL. You can only connect using a H2 client (over JDBC). - -@tutorial_1148_h3 -Starting the TCP Server within an Application - -@tutorial_1149_p - Servers can also be started and stopped from within an application. Sample code: - -@tutorial_1150_h3 -Stopping a TCP Server from Another Process - -@tutorial_1151_p - The TCP server can be stopped from another process. To stop the server from the command line, run: - -@tutorial_1152_p - To stop the server from a user application, use the following code: - -@tutorial_1153_p - This function will only stop the TCP server. If other server were started in the same process, they will continue to run. To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. Shutting down a TCP server can be protected using the option -tcpPassword (the same password must be used to start and stop the TCP server). - -@tutorial_1154_h2 -Using Hibernate - -@tutorial_1155_p - This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect, or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy. A patch for Hibernate has been submitted and is now applied. You can rename it to H2Dialect.java and include this as a patch in your application, or upgrade to a version of Hibernate where this is fixed. - -@tutorial_1156_p - When using Hibernate, try to use the H2Dialect if possible. When using the H2Dialect, compatibility modes such as MODE=MySQL are not supported. When using such a compatibility mode, use the Hibernate dialect for the corresponding database instead of the H2Dialect; but please note H2 does not support all features of all databases. - -@tutorial_1157_h2 -Using TopLink and Glassfish - -@tutorial_1158_p - To use H2 with Glassfish (or Sun AS), set the Datasource Classname to org.h2.jdbcx.JdbcDataSource. You can set this in the GUI at Application Server - Resources - JDBC - Connection Pools, or by editing the file sun-resources.xml: at element jdbc-connection-pool, set the attribute datasource-classname to org.h2.jdbcx.JdbcDataSource. - -@tutorial_1159_p - The H2 database is compatible with HSQLDB and PostgreSQL. To take advantage of H2 specific features, use the H2Platform. The source code of this platform is included in H2 at src/tools/oracle/toplink/essentials/platform/database/DatabasePlatform.java.txt. You will need to copy this file to your application, and rename it to .java. To enable it, change the following setting in persistence.xml: - -@tutorial_1160_p - In old versions of Glassfish, the property name is toplink.platform.class.name. - -@tutorial_1161_p - To use H2 within Glassfish, copy the h2*.jar to the directory glassfish/glassfish/lib. - -@tutorial_1162_h2 -Using EclipseLink - -@tutorial_1163_p - To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. See also H2Platform. - -@tutorial_1164_h2 -Using Apache ActiveMQ - -@tutorial_1165_p - When using H2 as the backend database for Apache ActiveMQ, please use the TransactDatabaseLocker instead of the default locking mechanism. Otherwise the database file will grow without bounds. The problem is that the default locking mechanism uses an uncommitted UPDATE transaction, which keeps the transaction log from shrinking (causes the database file to grow). Instead of using an UPDATE statement, the TransactDatabaseLocker uses SELECT ... FOR UPDATE which is not problematic. To use it, change the ApacheMQ configuration element <jdbcPersistenceAdapter> element, property databaseLocker="org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker". However, using the MVCC mode will again result in the same problem. Therefore, please do not use the MVCC mode in this case. Another (more dangerous) solution is to set useDatabaseLock to false. - -@tutorial_1166_h2 -Using H2 within NetBeans - -@tutorial_1167_p - The project H2 Database Engine Support For NetBeans allows you to start and stop the H2 server from within the IDE. - -@tutorial_1168_p - There is a known issue when using the Netbeans SQL Execution Window: before executing a query, another query in the form SELECT COUNT(*) FROM <query> is run. This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL. In this case, two sequence values are allocated instead of just one. - -@tutorial_1169_h2 -Using H2 with jOOQ - -@tutorial_1170_p - jOOQ adds a thin layer on top of JDBC, allowing for type-safe SQL construction, including advanced SQL, stored procedures and advanced data types. jOOQ takes your database schema as a base for code generation. If this is your example schema: - -@tutorial_1171_p - then run the jOOQ code generator on the command line using this command: - -@tutorial_1172_p - ...where codegen.xml is on the classpath and contains this information - -@tutorial_1173_p - Using the generated source, you can query the database as follows: - -@tutorial_1174_p - See more details on jOOQ Homepage and in the jOOQ Tutorial - -@tutorial_1175_h2 -Using Databases in Web Applications - -@tutorial_1176_p - There are multiple ways to access a database from within web applications. Here are some examples if you use Tomcat or JBoss. - -@tutorial_1177_h3 -Embedded Mode - -@tutorial_1178_p - The (currently) simplest solution is to use the database in the embedded mode, that means open a connection in your application when it starts (a good solution is using a Servlet Listener, see below), or when a session starts. A database can be accessed from multiple sessions and applications at the same time, as long as they run in the same process. Most Servlet Containers (for example Tomcat) are just using one process, so this is not a problem (unless you run Tomcat in clustered mode). Tomcat uses multiple threads and multiple classloaders. If multiple applications access the same database at the same time, you need to put the database jar in the shared/lib or server/lib directory. It is a good idea to open the database when the web application starts, and close it when the web application stops. If using multiple applications, only one (any) of them needs to do that. In the application, an idea is to use one connection per Session, or even one connection per request (action). Those connections should be closed after use if possible (but it's not that bad if they don't get closed). - -@tutorial_1179_h3 -Server Mode - -@tutorial_1180_p - The server mode is similar, but it allows you to run the server in another process. - -@tutorial_1181_h3 -Using a Servlet Listener to Start and Stop a Database - -@tutorial_1182_p - Add the h2*.jar file to your web application, and add the following snippet to your web.xml file (between the context-param and the filter section): - -@tutorial_1183_p - For details on how to access the database, see the file DbStarter.java. By default this tool opens an embedded connection using the database URL jdbc:h2:~/test, user name sa, and password sa. If you want to use this connection within your servlet, you can access as follows: - -@tutorial_1184_code -DbStarter - -@tutorial_1185_p - can also start the TCP server, however this is disabled by default. To enable it, use the parameter db.tcpServer in the file web.xml. Here is the complete list of options. These options need to be placed between the description tag and the listener / filter tags: - -@tutorial_1186_p - When the web application is stopped, the database connection will be closed automatically. If the TCP server is started within the DbStarter, it will also be stopped automatically. - -@tutorial_1187_h3 -Using the H2 Console Servlet - -@tutorial_1188_p - The H2 Console is a standalone application and includes its own web server, but it can be used as a servlet as well. To do that, include the the h2*.jar file in your application, and add the following configuration to your web.xml: - -@tutorial_1189_p - For details, see also src/tools/WEB-INF/web.xml. - -@tutorial_1190_p - To create a web application with just the H2 Console, run the following command: - -@tutorial_1191_h2 -Android - -@tutorial_1192_p - You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, except for opening and closing a database, which is not yet optimized in H2 (H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. So far, only very few tests have been run, and everything seems to work as expected. Fulltext search was not yet tested, however the native fulltext search should work. - -@tutorial_1193_p - Reasons to use H2 instead of SQLite are: - -@tutorial_1194_li -Full Unicode support including UPPER() and LOWER(). - -@tutorial_1195_li -Streaming API for BLOB and CLOB data. - -@tutorial_1196_li -Fulltext search. - -@tutorial_1197_li -Multiple connections. - -@tutorial_1198_li -User defined functions and triggers. - -@tutorial_1199_li -Database file encryption. - -@tutorial_1200_li -Reading and writing CSV files (this feature can be used outside the database as well). - -@tutorial_1201_li -Referential integrity and check constraints. - -@tutorial_1202_li -Better data type and SQL support. - -@tutorial_1203_li -In-memory databases, read-only databases, linked tables. - -@tutorial_1204_li -Better compatibility with other databases which simplifies porting applications. - -@tutorial_1205_li -Possibly better performance (so far for read operations). - -@tutorial_1206_li -Server mode (accessing a database on a different machine over TCP/IP). - -@tutorial_1207_p - Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). Both the regular H2 jar file and the smaller h2small-*.jar can be used. To create the smaller jar file, run the command ./build.sh jarSmall (Linux / Mac OS) or build.bat jarSmall (Windows). - -@tutorial_1208_p - The database files needs to be stored in a place that is accessible for the application. Example: - -@tutorial_1209_p - Limitations: Using a connection pool is currently not supported, because the required javax.sql. classes are not available on Android. - -@tutorial_1210_h2 -CSV (Comma Separated Values) Support - -@tutorial_1211_p - The CSV file support can be used inside the database using the functions CSVREAD and CSVWRITE, or it can be used outside the database as a standalone tool. - -@tutorial_1212_h3 -Reading a CSV File from Within a Database - -@tutorial_1213_p - A CSV file can be read using the function CSVREAD. Example: - -@tutorial_1214_p - Please note for performance reason, CSVREAD should not be used inside a join. Instead, import the data first (possibly into a temporary table), create the required indexes if necessary, and then query this table. - -@tutorial_1215_h3 -Importing Data from a CSV File - -@tutorial_1216_p - A fast way to load or import data (sometimes called 'bulk load') from a CSV file is to combine table creation with import. Optionally, the column names and data types can be set when creating the table. Another option is to use INSERT INTO ... SELECT. - -@tutorial_1217_h3 -Writing a CSV File from Within a Database - -@tutorial_1218_p - The built-in function CSVWRITE can be used to create a CSV file from a query. Example: - -@tutorial_1219_h3 -Writing a CSV File from a Java Application - -@tutorial_1220_p - The Csv tool can be used in a Java application even when not using a database at all. Example: - -@tutorial_1221_h3 -Reading a CSV File from a Java Application - -@tutorial_1222_p - It is possible to read a CSV file without opening a database. Example: - -@tutorial_1223_h2 -Upgrade, Backup, and Restore - -@tutorial_1224_h3 -Database Upgrade - -@tutorial_1225_p - The recommended way to upgrade from one version of the database engine to the next version is to create a backup of the database (in the form of a SQL script) using the old engine, and then execute the SQL script using the new engine. - -@tutorial_1226_h3 -Backup using the Script Tool - -@tutorial_1227_p - The recommended way to backup a database is to create a compressed SQL script file. This will result in a small, human readable, and database version independent backup. Creating the script will also verify the checksums of the database file. The Script tool is ran as follows: - -@tutorial_1228_p - It is also possible to use the SQL command SCRIPT to create the backup of the database. For more information about the options, see the SQL command SCRIPT. The backup can be done remotely, however the file will be created on the server side. The built in FTP server could be used to retrieve the file from the server. - -@tutorial_1229_h3 -Restore from a Script - -@tutorial_1230_p - To restore a database from a SQL script file, you can use the RunScript tool: - -@tutorial_1231_p - For more information about the options, see the SQL command RUNSCRIPT. The restore can be done remotely, however the file needs to be on the server side. The built in FTP server could be used to copy the file to the server. It is also possible to use the SQL command RUNSCRIPT to execute a SQL script. SQL script files may contain references to other script files, in the form of RUNSCRIPT commands. However, when using the server mode, the references script files need to be available on the server side. - -@tutorial_1232_h3 -Online Backup - -@tutorial_1233_p - The BACKUP SQL statement and the Backup tool both create a zip file with the database file. However, the contents of this file are not human readable. - -@tutorial_1234_p - The resulting backup is transactionally consistent, meaning the consistency and atomicity rules apply. - -@tutorial_1235_p - The Backup tool (org.h2.tools.Backup) can not be used to create a online backup; the database must not be in use while running this program. - -@tutorial_1236_p - Creating a backup by copying the database files while the database is running is not supported, except if the file systems support creating snapshots. With other file systems, it can't be guaranteed that the data is copied in the right order. - -@tutorial_1237_h2 -Command Line Tools - -@tutorial_1238_p - This database comes with a number of command line tools. To get more information about a tool, start it with the parameter '-?', for example: - -@tutorial_1239_p - The command line tools are: - -@tutorial_1240_code -Backup - -@tutorial_1241_li - creates a backup of a database. - -@tutorial_1242_code -ChangeFileEncryption - -@tutorial_1243_li - allows changing the file encryption password or algorithm of a database. - -@tutorial_1244_code -Console - -@tutorial_1245_li - starts the browser based H2 Console. - -@tutorial_1246_code -ConvertTraceFile - -@tutorial_1247_li - converts a .trace.db file to a Java application and SQL script. - -@tutorial_1248_code -CreateCluster - -@tutorial_1249_li - creates a cluster from a standalone database. - -@tutorial_1250_code -DeleteDbFiles - -@tutorial_1251_li - deletes all files belonging to a database. - -@tutorial_1252_code -Recover - -@tutorial_1253_li - helps recovering a corrupted database. - -@tutorial_1254_code -Restore - -@tutorial_1255_li - restores a backup of a database. - -@tutorial_1256_code -RunScript - -@tutorial_1257_li - runs a SQL script against a database. - -@tutorial_1258_code -Script - -@tutorial_1259_li - allows converting a database to a SQL script for backup or migration. - -@tutorial_1260_code -Server - -@tutorial_1261_li - is used in the server mode to start a H2 server. - -@tutorial_1262_code -Shell - -@tutorial_1263_li - is a command line database tool. - -@tutorial_1264_p - The tools can also be called from an application by calling the main or another public method. For details, see the Javadoc documentation. - -@tutorial_1265_h2 -The Shell Tool - -@tutorial_1266_p - The Shell tool is a simple interactive command line tool. To start it, type: - -@tutorial_1267_p - You will be asked for a database URL, JDBC driver, user name, and password. The connection setting can also be set as command line parameters. After connecting, you will get the list of options. The built-in commands don't need to end with a semicolon, but SQL statements are only executed if the line ends with a semicolon ;. This allows to enter multi-line statements: - -@tutorial_1268_p - By default, results are printed as a table. For results with many column, consider using the list mode: - -@tutorial_1269_h2 -Using OpenOffice Base - -@tutorial_1270_p - OpenOffice.org Base supports database access over the JDBC API. To connect to a H2 database using OpenOffice Base, you first need to add the JDBC driver to OpenOffice. The steps to connect to a H2 database are: - -@tutorial_1271_li -Start OpenOffice Writer, go to [Tools], [Options] - -@tutorial_1272_li -Make sure you have selected a Java runtime environment in OpenOffice.org / Java - -@tutorial_1273_li -Click [Class Path...], [Add Archive...] - -@tutorial_1274_li -Select your h2 jar file (location is up to you, could be wherever you choose) - -@tutorial_1275_li -Click [OK] (as much as needed), stop OpenOffice (including the Quickstarter) - -@tutorial_1276_li -Start OpenOffice Base - -@tutorial_1277_li -Connect to an existing database; select [JDBC]; [Next] - -@tutorial_1278_li -Example datasource URL: jdbc:h2:~/test - -@tutorial_1279_li -JDBC driver class: org.h2.Driver - -@tutorial_1280_p - Now you can access the database stored in the current users home directory. - -@tutorial_1281_p - To use H2 in NeoOffice (OpenOffice without X11): - -@tutorial_1282_li -In NeoOffice, go to [NeoOffice], [Preferences] - -@tutorial_1283_li -Look for the page under [NeoOffice], [Java] - -@tutorial_1284_li -Click [Class Path], [Add Archive...] - -@tutorial_1285_li -Select your h2 jar file (location is up to you, could be wherever you choose) - -@tutorial_1286_li -Click [OK] (as much as needed), restart NeoOffice. - -@tutorial_1287_p - Now, when creating a new database using the "Database Wizard" : - -@tutorial_1288_li -Click [File], [New], [Database]. - -@tutorial_1289_li -Select [Connect to existing database] and the select [JDBC]. Click next. - -@tutorial_1290_li -Example datasource URL: jdbc:h2:~/test - -@tutorial_1291_li -JDBC driver class: org.h2.Driver - -@tutorial_1292_p - Another solution to use H2 in NeoOffice is: - -@tutorial_1293_li -Package the h2 jar within an extension package - -@tutorial_1294_li -Install it as a Java extension in NeoOffice - -@tutorial_1295_p - This can be done by create it using the NetBeans OpenOffice plugin. See also Extensions Development. - -@tutorial_1296_h2 -Java Web Start / JNLP - -@tutorial_1297_p - When using Java Web Start / JNLP (Java Network Launch Protocol), permissions tags must be set in the .jnlp file, and the application .jar file must be signed. Otherwise, when trying to write to the file system, the following exception will occur: java.security.AccessControlException: access denied (java.io.FilePermission ... read). Example permission tags: - -@tutorial_1298_h2 -Using a Connection Pool - -@tutorial_1299_p - For H2, opening a connection is fast if the database is already open. Still, using a connection pool improves performance if you open and close connections a lot. A simple connection pool is included in H2. It is based on the Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection().The build-in connection pool is used as follows: - -@tutorial_1300_h2 -Fulltext Search - -@tutorial_1301_p - H2 includes two fulltext search implementations. One is using Apache Lucene, and the other (the native implementation) stores the index data in special tables in the database. - -@tutorial_1302_h3 -Using the Native Fulltext Search - -@tutorial_1303_p - To initialize, call: - -@tutorial_1304_p - You need to initialize it in each database where you want to use it. Afterwards, you can create a fulltext index for a table using: - -@tutorial_1305_p - PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query: - -@tutorial_1306_p - This will produce a result set that contains the query needed to retrieve the data: - -@tutorial_1307_p - To drop an index on a table: - -@tutorial_1308_p - To get the raw data, use FT_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in: SELECT T.* FROM FT_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0]; - -@tutorial_1309_p - You can also call the index from within a Java application: - -@tutorial_1310_h3 -Using the Lucene Fulltext Search - -@tutorial_1311_p - To use the Lucene full text search, you need the Lucene library in the classpath. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS or CLASSPATH. To initialize the Lucene fulltext search in a database, call: - -@tutorial_1312_p - You need to initialize it in each database where you want to use it. Afterwards, you can create a full text index for a table using: - -@tutorial_1313_p - PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query: - -@tutorial_1314_p - This will produce a result set that contains the query needed to retrieve the data: - -@tutorial_1315_p - To drop an index on a table (be warned that this will re-index all of the full-text indices for the entire database): - -@tutorial_1316_p - To get the raw data, use FTL_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in: SELECT T.* FROM FTL_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0]; - -@tutorial_1317_p - You can also call the index from within a Java application: - -@tutorial_1318_p - The Lucene fulltext search supports searching in specific column only. Column names must be uppercase (except if the original columns are double quoted). For column names starting with an underscore (_), another underscore needs to be added. Example: - -@tutorial_1319_p - The Lucene fulltext search implementation is not synchronized internally. If you update the database and query the fulltext search concurrently (directly using the Java API of H2 or Lucene itself), you need to ensure operations are properly synchronized. If this is not the case, you may get exceptions such as org.apache.lucene.store.AlreadyClosedException: this IndexReader is closed. - -@tutorial_1320_h2 -User-Defined Variables - -@tutorial_1321_p - This database supports user-defined variables. Variables start with @ and can be used wherever expressions or parameters are allowed. Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. A value is usually assigned using the SET command: - -@tutorial_1322_p - The value can also be changed using the SET() method. This is useful in queries: - -@tutorial_1323_p - Variables that are not set evaluate to NULL. The data type of a user-defined variable is the data type of the value assigned to it, that means it is not necessary (or possible) to declare variable names before using them. There are no restrictions on the assigned values; large objects (LOBs) are supported as well. Rolling back a transaction does not affect the value of a user-defined variable. - -@tutorial_1324_h2 -Date and Time - -@tutorial_1325_p - Date, time and timestamp values support ISO 8601 formatting, including time zone: - -@tutorial_1326_p - If the time zone is not set, the value is parsed using the current time zone setting of the system. Date and time information is stored in H2 database files without time zone information. If the database is opened using another system time zone, the date and time will be the same. That means if you store the value '2000-01-01 12:00:00' in one time zone, then close the database and open the database again in a different time zone, you will also get '2000-01-01 12:00:00'. Please note that changing the time zone after the H2 driver is loaded is not supported. - -@tutorial_1327_h2 -Using Spring - -@tutorial_1328_h3 -Using the TCP Server - -@tutorial_1329_p - Use the following configuration to start and stop the H2 TCP server using the Spring Framework: - -@tutorial_1330_p - The destroy-method will help prevent exceptions on hot-redeployment or when restarting the server. - -@tutorial_1331_h3 -Error Code Incompatibility - -@tutorial_1332_p - There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer, because of a change in the error code. This will cause the JdbcTemplate to not detect a duplicate key condition, and so a DataIntegrityViolationException is thrown instead of DuplicateKeyException. See also the issue SPR-8235. The workaround is to add the following XML file to the root of the classpath: - -@tutorial_1333_h2 -OSGi - -@tutorial_1334_p - The standard H2 jar can be dropped in as a bundle in an OSGi container. H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification. The H2 Data Source Factory service is registered with the following properties: OSGI_JDBC_DRIVER_CLASS=org.h2.Driver and OSGI_JDBC_DRIVER_NAME=H2. The OSGI_JDBC_DRIVER_VERSION property reflects the version of the driver as is. - -@tutorial_1335_p - The following standard configuration properties are supported: JDBC_USER, JDBC_PASSWORD, JDBC_DESCRIPTION, JDBC_DATASOURCE_NAME, JDBC_NETWORK_PROTOCOL, JDBC_URL, JDBC_SERVER_NAME, JDBC_PORT_NUMBER. Any other standard property will be rejected. Non-standard properties will be passed on to H2 in the connection URL. - -@tutorial_1336_h2 -Java Management Extension (JMX) - -@tutorial_1337_p - Management over JMX is supported, but not enabled by default. To enable JMX, append ;JMX=TRUE to the database URL when opening the database. Various tools support JMX, one such tool is the jconsole. When opening the jconsole, connect to the process where the database is open (when using the server mode, you need to connect to the server process). Then go to the MBeans section. Under org.h2 you will find one entry per database. The object name of the entry is the database short name, plus the path (each colon is replaced with an underscore character). - -@tutorial_1338_p - The following attributes and operations are supported: - -@tutorial_1339_code -CacheSize - -@tutorial_1340_li -: the cache size currently in use in KB. - -@tutorial_1341_code -CacheSizeMax - -@tutorial_1342_li - (read/write): the maximum cache size in KB. - -@tutorial_1343_code -Exclusive - -@tutorial_1344_li -: whether this database is open in exclusive mode or not. - -@tutorial_1345_code -FileReadCount - -@tutorial_1346_li -: the number of file read operations since the database was opened. - -@tutorial_1347_code -FileSize - -@tutorial_1348_li -: the file size in KB. - -@tutorial_1349_code -FileWriteCount - -@tutorial_1350_li -: the number of file write operations since the database was opened. - -@tutorial_1351_code -FileWriteCountTotal - -@tutorial_1352_li -: the number of file write operations since the database was created. - -@tutorial_1353_code -LogMode - -@tutorial_1354_li - (read/write): the current transaction log mode. See SET LOG for details. - -@tutorial_1355_code -Mode - -@tutorial_1356_li -: the compatibility mode (REGULAR if no compatibility mode is used). - -@tutorial_1357_code -MultiThreaded - -@tutorial_1358_li -: true if multi-threaded is enabled. - -@tutorial_1359_code -Mvcc - -@tutorial_1360_li -: true if MVCC is enabled. - -@tutorial_1361_code -ReadOnly - -@tutorial_1362_li -: true if the database is read-only. - -@tutorial_1363_code -TraceLevel - -@tutorial_1364_li - (read/write): the file trace level. - -@tutorial_1365_code -Version - -@tutorial_1366_li -: the database version in use. - -@tutorial_1367_code -listSettings - -@tutorial_1368_li -: list the database settings. - -@tutorial_1369_code -listSessions - -@tutorial_1370_li -: list the open sessions, including currently executing statement (if any) and locked tables (if any). - -@tutorial_1371_p - To enable JMX, you may need to set the system properties com.sun.management.jmxremote and com.sun.management.jmxremote.port as required by the JVM. - diff --git a/h2/src/docsrc/text/_docs_ja.utf8.txt b/h2/src/docsrc/text/_docs_ja.utf8.txt deleted file mode 100644 index 0de2d4df6a..0000000000 --- a/h2/src/docsrc/text/_docs_ja.utf8.txt +++ /dev/null @@ -1,11985 +0,0 @@ -@advanced_1000_h1 -#Advanced - -@advanced_1001_a -# Result Sets - -@advanced_1002_a -# Large Objects - -@advanced_1003_a -# Linked Tables - -@advanced_1004_a -# Spatial Features - -@advanced_1005_a -# Recursive Queries - -@advanced_1006_a -# Updatable Views - -@advanced_1007_a -# Transaction Isolation - -@advanced_1008_a -# Multi-Version Concurrency Control (MVCC) - -@advanced_1009_a -# Clustering / High Availability - -@advanced_1010_a -# Two Phase Commit - -@advanced_1011_a -# Compatibility - -@advanced_1012_a -# Standards Compliance - -@advanced_1013_a -# Run as Windows Service - -@advanced_1014_a -# ODBC Driver - -@advanced_1015_a -# Using H2 in Microsoft .NET - -@advanced_1016_a -# ACID - -@advanced_1017_a -# Durability Problems - -@advanced_1018_a -# Using the Recover Tool - -@advanced_1019_a -# File Locking Protocols - -@advanced_1020_a -# Using Passwords - -@advanced_1021_a -# Password Hash - -@advanced_1022_a -# Protection against SQL Injection - -@advanced_1023_a -# Protection against Remote Access - -@advanced_1024_a -# Restricting Class Loading and Usage - -@advanced_1025_a -# Security Protocols - -@advanced_1026_a -# TLS Connections - -@advanced_1027_a -# Universally Unique Identifiers (UUID) - -@advanced_1028_a -# Settings Read from System Properties - -@advanced_1029_a -# Setting the Server Bind Address - -@advanced_1030_a -# Pluggable File System - -@advanced_1031_a -# Split File System - -@advanced_1032_a -# Database Upgrade - -@advanced_1033_a -# Java Objects Serialization - -@advanced_1034_a -# Limits and Limitations - -@advanced_1035_a -# Glossary and Links - -@advanced_1036_h2 -Result Sets - -@advanced_1037_h3 -#Statements that Return a Result Set - -@advanced_1038_p -# The following statements return a result set: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP. All other statements return an update count. - -@advanced_1039_h3 -行数�?�制�? - -@advanced_1040_p -# Before the result is returned to the application, all rows are read by the database. Server side cursors are not supported currently. If only the first few rows are interesting for the application, then the result set size should be limited to improve the performance. This can be done using LIMIT in a query (example: SELECT * FROM TEST LIMIT 100), or by using Statement.setMaxRows(max). - -@advanced_1041_h3 -大�??�?�Result Set �?�外部ソート - -@advanced_1042_p -# For large result set, the result is buffered to disk. The threshold can be defined using the statement SET MAX_MEMORY_ROWS. If ORDER BY is used, the sorting is done using an external sort algorithm. In this case, each block of rows is sorted using quick sort, then written to disk; when reading the data, the blocks are merged together. - -@advanced_1043_h2 -大�??�?�オブジェクト - -@advanced_1044_h3 -大�??�?�オブジェクト�?�ソート�?�読�?�込�?� - -@advanced_1045_p -# If it is possible that the objects don't fit into memory, then the data type CLOB (for textual data) or BLOB (for binary data) should be used. For these data types, the objects are not fully read into memory, by using streams. To store a BLOB, use PreparedStatement.setBinaryStream. To store a CLOB, use PreparedStatement.setCharacterStream. To read a BLOB, use ResultSet.getBinaryStream, and to read a CLOB, use ResultSet.getCharacterStream. When using the client/server mode, large BLOB and CLOB data is stored in a temporary file on the client side. - -@advanced_1046_h3 -#When to use CLOB/BLOB - -@advanced_1047_p -# By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster than reading from separate files, but slows down the performance of operations that don't involve this column. - -@advanced_1048_h3 -#Large Object Compression - -@advanced_1049_p -# The following feature is only available for the PageStore storage engine. For the MVStore engine (the default for H2 version 1.4.x), append ;COMPRESS=TRUE to the database URL instead. CLOB and BLOB values can be compressed by using SET COMPRESS_LOB. The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster. - -@advanced_1050_h2 -リンクテーブル - -@advanced_1051_p -# This database supports linked tables, which means tables that don't exist in the current database but are just links to another database. To create such a link, use the CREATE LINKED TABLE statement: - -@advanced_1052_p -# You can then access the table in the usual way. Whenever the linked table is accessed, the database issues specific queries over JDBC. Using the example above, if you issue the query SELECT * FROM LINK WHERE ID=1, then the following query is run against the PostgreSQL database: SELECT * FROM TEST WHERE ID=?. The same happens for insert and update statements. Only simple statements are executed against the target database, that means no joins (queries that contain joins are converted to simple queries). Prepared statements are used where possible. - -@advanced_1053_p -# To view the statements that are executed against the target table, set the trace level to 3. - -@advanced_1054_p -# If multiple linked tables point to the same database (using the same database URL), the connection is shared. To disable this, set the system property h2.shareLinkedConnections=false. - -@advanced_1055_p -# The statement CREATE LINKED TABLE supports an optional schema name parameter. - -@advanced_1056_p -# The following are not supported because they may result in a deadlock: creating a linked table to the same database, and creating a linked table to another database using the server mode if the other database is open in the same server (use the embedded mode instead). - -@advanced_1057_p -# Data types that are not supported in H2 are also not supported for linked tables, for example unsigned data types if the value is outside the range of the signed type. In such cases, the columns needs to be cast to a supported type. - -@advanced_1058_h2 -#Updatable Views - -@advanced_1059_p -# By default, views are not updatable. To make a view updatable, use an "instead of" trigger as follows: - -@advanced_1060_p -# Update the base table(s) within the trigger as required. For details, see the sample application org.h2.samples.UpdatableView. - -@advanced_1061_h2 -トランザクション分離 - -@advanced_1062_p -# Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. See the Grammar for details. - -@advanced_1063_p -# Transaction isolation is provided for all data manipulation language (DML) statements. - -@advanced_1064_p -# Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. Instead, rows are locked for update, and read committed is used in all cases (changing the isolation level has no effect). - -@advanced_1065_p -# This database supports the following transaction isolation levels: - -@advanced_1066_b -Read Committed (コミット済�?�読�?��?�り) - -@advanced_1067_li -# This is the default level. Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. Higher concurrency is possible when using this level. - -@advanced_1068_li -# To enable, execute the SQL statement SET LOCK_MODE 3 - -@advanced_1069_li -# or append ;LOCK_MODE=3 to the database URL: jdbc:h2:~/test;LOCK_MODE=3 - -@advanced_1070_b -Serializable (直列化) - -@advanced_1071_li -# Both read locks and write locks are kept until the transaction commits. To enable, execute the SQL statement SET LOCK_MODE 1 - -@advanced_1072_li -# or append ;LOCK_MODE=1 to the database URL: jdbc:h2:~/test;LOCK_MODE=1 - -@advanced_1073_b -Read Uncommitted (�?�コミット読�?��?�り) - -@advanced_1074_li -# This level means that transaction isolation is disabled. - -@advanced_1075_li -# To enable, execute the SQL statement SET LOCK_MODE 0 - -@advanced_1076_li -# or append ;LOCK_MODE=0 to the database URL: jdbc:h2:~/test;LOCK_MODE=0 - -@advanced_1077_p -# When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited. - -@advanced_1078_b -Dirty Reads (ダーティリード) - -@advanced_1079_li -# Means a connection can read uncommitted changes made by another connection. - -@advanced_1080_li -# Possible with: read uncommitted - -@advanced_1081_b -Non-Repeatable Reads (�??復�?�?�能読�?��?�り) - -@advanced_1082_li -# A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result. - -@advanced_1083_li -# Possible with: read uncommitted, read committed - -@advanced_1084_b -Phantom Reads (ファントムリード) - -@advanced_1085_li -# A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row. - -@advanced_1086_li -# Possible with: read uncommitted, read committed - -@advanced_1087_h3 -テーブルレベルロック - -@advanced_1088_p -# The database allows multiple concurrent connections to the same database. To make sure all connections only see consistent data, table level locking is used by default. This mechanism does not allow high concurrency, but is very fast. Shared locks and exclusive locks are supported. Before reading from a table, the database tries to add a shared lock to the table (this is only possible if there is no exclusive lock on the object by another connection). If the shared lock is added successfully, the table can be read. It is allowed that other connections also have a shared lock on the same object. If a connection wants to write to a table (update or delete a row), an exclusive lock is required. To get the exclusive lock, other connection must not have any locks on the object. After the connection commits, all locks are released. This database keeps all locks in memory. When a lock is released, and multiple connections are waiting for it, one of them is picked at random. - -@advanced_1089_h3 -ロックタイムアウト - -@advanced_1090_p -# If a connection cannot get a lock on an object, the connection waits for some amount of time (the lock timeout). During this time, hopefully the connection holding the lock commits and it is then possible to get the lock. If this is not possible because the other connection does not release the lock for some time, the unsuccessful connection will get a lock timeout exception. The lock timeout can be set individually for each connection. - -@advanced_1091_h2 -#Multi-Version Concurrency Control (MVCC) - -@advanced_1092_p -# The MVCC feature allows higher concurrency than using (table level or row level) locks. When using MVCC in this database, delete, insert and update operations will only issue a shared lock on the table. An exclusive lock is still used when adding or removing columns, when dropping the table, and when using SELECT ... FOR UPDATE. Connections only 'see' committed data, and own changes. That means, if connection A updates a row but doesn't commit this change yet, connection B will see the old value. Only when the change is committed, the new value is visible by other connections (read committed). If multiple connections concurrently try to update the same row, the database waits until it can apply the change, but at most until the lock timeout expires. - -@advanced_1093_p -# To use the MVCC feature, append ;MVCC=TRUE to the database URL: - -@advanced_1094_p -# The setting must be specified in the first connection (the one that opens the database). It is not possible to enable or disable this setting while the database is already open. - -@advanced_1095_p -# If MVCC is enabled, changing the lock mode (LOCK_MODE) has no effect. - -@advanced_1096_div -# The MVCC mode is enabled by default in version 1.4.x, with the default MVStore storage engine. MVCC is disabled by default when using the PageStore storage engine (which is the default in version 1.3.x). The following applies when using the PageStore storage engine: The MVCC feature is not fully tested yet. The limitations of the MVCC mode are: with the PageStore storage engine, it can not be used at the same time as MULTI_THREADED=TRUE; the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency. The setting MAX_MEMORY_UNDO has no effect. Clustering / High Availability - -@advanced_1097_p -# This database supports a simple clustering / high availability mechanism. The architecture is: two database servers run on two different computers, and on both computers is a copy of the same database. If both servers run, each database operation is executed on both computers. If one server fails (power, hardware or network failure), the other server can still continue to work. From this point on, the operations will be executed only on one server until the other server is back up. - -@advanced_1098_p -# Clustering can only be used in the server mode (the embedded mode does not support clustering). The cluster can be re-created using the CreateCluster tool without stopping the remaining server. Applications that are still connected are automatically disconnected, however when appending ;AUTO_RECONNECT=TRUE, they will recover from that. - -@advanced_1099_p -# To initialize the cluster, use the following steps: - -@advanced_1100_li -#Create a database - -@advanced_1101_li -#Use the CreateCluster tool to copy the database to another location and initialize the clustering. Afterwards, you have two databases containing the same data. - -@advanced_1102_li -#Start two servers (one for each copy of the database) - -@advanced_1103_li -#You are now ready to connect to the databases with the client application(s) - -@advanced_1104_h3 -CreateClusterツールを使用�?�る - -@advanced_1105_p -# To understand how clustering works, please try out the following example. In this example, the two databases reside on the same computer, but usually, the databases will be on different servers. - -@advanced_1106_li -#Create two directories: server1, server2. Each directory will simulate a directory on a computer. - -@advanced_1107_li -#Start a TCP server pointing to the first directory. You can do this using the command line: - -@advanced_1108_li -#Start a second TCP server pointing to the second directory. This will simulate a server running on a second (redundant) computer. You can do this using the command line: - -@advanced_1109_li -#Use the CreateCluster tool to initialize clustering. This will automatically create a new, empty database if it does not exist. Run the tool on the command line: - -@advanced_1110_li -#You can now connect to the databases using an application or the H2 Console using the JDBC URL jdbc:h2:tcp://localhost:9101,localhost:9102/~/test - -@advanced_1111_li -#If you stop a server (by killing the process), you will notice that the other machine continues to work, and therefore the database is still accessible. - -@advanced_1112_li -#To restore the cluster, you first need to delete the database that failed, then restart the server that was stopped, and re-run the CreateCluster tool. - -@advanced_1113_h3 -#Detect Which Cluster Instances are Running - -@advanced_1114_p -# To find out which cluster nodes are currently running, execute the following SQL statement: - -@advanced_1115_p -# If the result is '' (two single quotes), then the cluster mode is disabled. Otherwise, the list of servers is returned, enclosed in single quote. Example: 'server1:9191,server2:9191'. - -@advanced_1116_p -# It is also possible to get the list of servers by using Connection.getClientInfo(). - -@advanced_1117_p -# The property list returned from getClientInfo() contains a numServers property that returns the number of servers that are in the connection list. To get the actual servers, getClientInfo() also has properties server0..serverX, where serverX is the number of servers minus 1. - -@advanced_1118_p -# Example: To get the 2nd server in the connection list one uses getClientInfo('server1'). Note: The serverX property only returns IP addresses and ports and not hostnames. - -@advanced_1119_h3 -クラスタリングアルゴリズム�?�制�? - -@advanced_1120_p -# Read-only queries are only executed against the first cluster node, but all other statements are executed against all nodes. There is currently no load balancing made to avoid problems with transactions. The following functions may yield different results on different cluster nodes and must be executed with care: RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND() [when not using a seed]. Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE). However, they can be used in read-only statements and the result can then be used for modifying statements. Using auto-increment and identity columns is currently not supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements). - -@advanced_1121_p -# When using the cluster modes, result sets are read fully in memory by the client, so that there is no problem if the server dies that executed the query. Result sets must fit in memory on the client side. - -@advanced_1122_p -# The SQL statement SET AUTOCOMMIT FALSE is not supported in the cluster mode. To disable autocommit, the method Connection.setAutoCommit(false) needs to be called. - -@advanced_1123_p -# It is possible that a transaction from one connection overtakes a transaction from a different connection. Depending on the operations, this might result in different results, for example when conditionally incrementing a value in a row. - -@advanced_1124_h2 -2フェーズコミット - -@advanced_1125_p -# The two phase commit protocol is supported. 2-phase-commit works as follows: - -@advanced_1126_li -#Autocommit needs to be switched off - -@advanced_1127_li -#A transaction is started, for example by inserting a row - -@advanced_1128_li -#The transaction is marked 'prepared' by executing the SQL statement PREPARE COMMIT transactionName - -@advanced_1129_li -#The transaction can now be committed or rolled back - -@advanced_1130_li -#If a problem occurs before the transaction was successfully committed or rolled back (for example because a network problem occurred), the transaction is in the state 'in-doubt' - -@advanced_1131_li -#When re-connecting to the database, the in-doubt transactions can be listed with SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT - -@advanced_1132_li -#Each transaction in this list must now be committed or rolled back by executing COMMIT TRANSACTION transactionName or ROLLBACK TRANSACTION transactionName - -@advanced_1133_li -#The database needs to be closed and re-opened to apply the changes - -@advanced_1134_h2 -互�?�性 - -@advanced_1135_p -# This database is (up to a certain point) compatible to other databases such as HSQLDB, MySQL and PostgreSQL. There are certain areas where H2 is incompatible. - -@advanced_1136_h3 -オートコミット�?�ON�?�時�?�トランザクションコミット - -@advanced_1137_p -# At this time, this database engine commits a transaction (if autocommit is switched on) just before returning the result. For a query, this means the transaction is committed even before the application scans through the result set, and before the result set is closed. Other database engines may commit the transaction in this case when the result set is closed. - -@advanced_1138_h3 -キーワード / 予約語 - -@advanced_1139_p -# There is a list of keywords that can't be used as identifiers (table names, column names and so on), unless they are quoted (surrounded with double quotes). The list is currently: - -@advanced_1140_code -# CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, TRUE, UNION, UNIQUE, WHERE - -@advanced_1141_p -# Certain words of this list are keywords because they are functions that can be used without '()' for compatibility, for example CURRENT_TIMESTAMP. - -@advanced_1142_h2 -#Standards Compliance - -@advanced_1143_p -# This database tries to be as much standard compliant as possible. For the SQL language, ANSI/ISO is the main standard. There are several versions that refer to the release date: SQL-92, SQL:1999, and SQL:2003. Unfortunately, the standard documentation is not freely available. Another problem is that important features are not standardized. Whenever this is the case, this database tries to be compatible to other databases. - -@advanced_1144_h3 -#Supported Character Sets, Character Encoding, and Unicode - -@advanced_1145_p -# H2 internally uses Unicode, and supports all character encoding systems and character sets supported by the virtual machine you use. - -@advanced_1146_h2 -Windowsサービス�?��?��?�実行�?�る - -@advanced_1147_p -# Using a native wrapper / adapter, Java applications can be run as a Windows Service. There are various tools available to do that. The Java Service Wrapper from Tanuki Software, Inc. is included in the installation. Batch files are provided to install, start, stop and uninstall the H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application. The batch files are located in the directory h2/service. - -@advanced_1148_p -# The service wrapper bundled with H2 is a 32-bit version. To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper, for example the one from Simon Krenger. - -@advanced_1149_p -# When running the database as a service, absolute path should be used. Using ~ in the database URL is problematic in this case, because it means to use the home directory of the current user. The service might run without or with the wrong user, so that the database files might end up in an unexpected place. - -@advanced_1150_h3 -サービスをインストール�?�る - -@advanced_1151_p -# The service needs to be registered as a Windows Service first. To do that, double click on 1_install_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. - -@advanced_1152_h3 -サービスを起動�?�る - -@advanced_1153_p -# You can start the H2 Database Engine Service using the service manager of Windows, or by double clicking on 2_start_service.bat. Please note that the batch file does not print an error message if the service is not installed. - -@advanced_1154_h3 -H2コンソール�?�接続�?�る - -@advanced_1155_p -# After installing and starting the service, you can connect to the H2 Console application using a browser. Double clicking on 3_start_browser.bat to do that. The default port (8082) is hard coded in the batch file. - -@advanced_1156_h3 -サービスを終了�?�る - -@advanced_1157_p -# To stop the service, double click on 4_stop_service.bat. Please note that the batch file does not print an error message if the service is not installed or started. - -@advanced_1158_h3 -サービス�?�アンインストール - -@advanced_1159_p -# To uninstall the service, double click on 5_uninstall_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. - -@advanced_1160_h3 -#Additional JDBC drivers - -@advanced_1161_p -# To use other databases (for example MySQL), the location of the JDBC drivers of those databases need to be added to the environment variables H2DRIVERS or CLASSPATH before installing the service. Multiple drivers can be set; each entry needs to be separated with a ; (Windows) or : (other operating systems). Spaces in the path names are supported. The settings must not be quoted. - -@advanced_1162_h2 -ODBCドライ�? - -@advanced_1163_p -# This database does not come with its own ODBC driver at this time, but it supports the PostgreSQL network protocol. Therefore, the PostgreSQL ODBC driver can be used. Support for the PostgreSQL network protocol is quite new and should be viewed as experimental. It should not be used for production applications. - -@advanced_1164_p -# To use the PostgreSQL ODBC driver on 64 bit versions of Windows, first run c:/windows/syswow64/odbcad32.exe. At this point you set up your DSN just like you would on any other system. See also: Re: ODBC Driver on Windows 64 bit - -@advanced_1165_h3 -ODBCインストール - -@advanced_1166_p -# First, the ODBC driver must be installed. Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*) or newer is recommended. The Windows version of the PostgreSQL ODBC driver is available at http://www.postgresql.org/ftp/odbc/versions/msi. - -@advanced_1167_h3 -サー�?ー�?�起動 - -@advanced_1168_p -# After installing the ODBC driver, start the H2 Server using the command line: - -@advanced_1169_p -# The PG Server (PG for PostgreSQL protocol) is started as well. By default, databases are stored in the current working directory where the server is started. Use -baseDir to save databases in another directory, for example the user home directory: - -@advanced_1170_p -# The PG server can be started and stopped from within a Java application as follows: - -@advanced_1171_p -# By default, only connections from localhost are allowed. To allow remote connections, use -pgAllowOthers when starting the server. - -@advanced_1172_p -# To map an ODBC database name to a different JDBC database name, use the option -key when starting the server. Please note only one mapping is allowed. The following will map the ODBC database named TEST to the database URL jdbc:h2:~/data/test;cipher=aes: - -@advanced_1173_h3 -ODBC設定 - -@advanced_1174_p -# After installing the driver, a new Data Source must be added. In Windows, run odbcad32.exe to open the Data Source Administrator. Then click on 'Add...' and select the PostgreSQL Unicode driver. Then click 'Finish'. You will be able to change the connection properties. The property column represents the property key in the odbc.ini file (which may be different from the GUI). - -@advanced_1175_th -プロパティ - -@advanced_1176_th -例 - -@advanced_1177_th -コメント - -@advanced_1178_td -Data Source - -@advanced_1179_td -H2 Test - -@advanced_1180_td -ODBCデータソース�?��??称 - -@advanced_1181_td -Database - -@advanced_1182_td -#~/test;ifexists=true - -@advanced_1183_td -# The database name. This can include connections settings. By default, the database is stored in the current working directory where the Server is started except when the -baseDir setting is used. The name must be at least 3 characters. - -@advanced_1184_td -#Servername - -@advanced_1185_td -localhost - -@advanced_1186_td -サー�?ー�??�?�?��?��?�IPアドレス - -@advanced_1187_td -デフォルト�?��?��?リモート接続�?��?�許�?��?�れ�?��?��?��?�。 - -@advanced_1188_td -#Username - -@advanced_1189_td -sa - -@advanced_1190_td -データベース�?�ユーザー�?? - -@advanced_1191_td -#SSL - -@advanced_1192_td -#false (disabled) - -@advanced_1193_td -�?�時点�?��?SSL�?�サ�?ート�?�れ�?��?��?��?�ん。 - -@advanced_1194_td -Port - -@advanced_1195_td -5435 - -@advanced_1196_td -PGサー�?ー�?�傾�?��?��?��?�る�?ート - -@advanced_1197_td -Password - -@advanced_1198_td -sa - -@advanced_1199_td -データベースパスワード - -@advanced_1200_p -# To improve performance, please enable 'server side prepare' under Options / Datasource / Page 2 / Server side prepare. - -@advanced_1201_p -# Afterwards, you may use this data source. - -@advanced_1202_h3 -PGプロトコルサ�?ート�?�制�? - -@advanced_1203_p -# At this time, only a subset of the PostgreSQL network protocol is implemented. Also, there may be compatibility problems on the SQL level, with the catalog, or with text encoding. Problems are fixed as they are found. Currently, statements can not be canceled when using the PG protocol. Also, H2 does not provide index meta over ODBC. - -@advanced_1204_p -# PostgreSQL ODBC Driver Setup requires a database password; that means it is not possible to connect to H2 databases without password. This is a limitation of the ODBC driver. - -@advanced_1205_h3 -セキュリティ考慮 - -@advanced_1206_p -# Currently, the PG Server does not support challenge response or encrypt passwords. This may be a problem if an attacker can listen to the data transferred between the ODBC driver and the server, because the password is readable to the attacker. Also, it is currently not possible to use encrypted SSL connections. Therefore the ODBC driver should not be used where security is important. - -@advanced_1207_p -# The first connection that opens a database using the PostgreSQL server needs to be an administrator user. Subsequent connections don't need to be opened by an administrator. - -@advanced_1208_h3 -#Using Microsoft Access - -@advanced_1209_p -# When using Microsoft Access to edit data in a linked H2 table, you may need to enable the following option: Tools - Options - Edit/Find - ODBC fields. - -@advanced_1210_h2 -#Using H2 in Microsoft .NET - -@advanced_1211_p -# The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface. - -@advanced_1212_h3 -#Using the ADO.NET API on .NET - -@advanced_1213_p -# An implementation of the ADO.NET interface is available in the open source project H2Sharp. - -@advanced_1214_h3 -#Using the JDBC API on .NET - -@advanced_1215_li -#Install the .NET Framework from Microsoft. Mono has not yet been tested. - -@advanced_1216_li -#Install IKVM.NET. - -@advanced_1217_li -#Copy the h2*.jar file to ikvm/bin - -@advanced_1218_li -#Run the H2 Console using: ikvm -jar h2*.jar - -@advanced_1219_li -#Convert the H2 Console to an .exe file using: ikvmc -target:winexe h2*.jar. You may ignore the warnings. - -@advanced_1220_li -#Create a .dll file using (change the version accordingly): ikvmc.exe -target:library -version:1.0.69.0 h2*.jar - -@advanced_1221_p -# If you want your C# application use H2, you need to add the h2.dll and the IKVM.OpenJDK.ClassLibrary.dll to your C# solution. Here some sample code: - -@advanced_1222_h2 -ACID - -@advanced_1223_p -# In the database world, ACID stands for: - -@advanced_1224_li -#Atomicity: transactions must be atomic, meaning either all tasks are performed or none. - -@advanced_1225_li -#Consistency: all operations must comply with the defined constraints. - -@advanced_1226_li -#Isolation: transactions must be isolated from each other. - -@advanced_1227_li -#Durability: committed transaction will not be lost. - -@advanced_1228_h3 -Atomicity (原�?性) - -@advanced_1229_p -# Transactions in this database are always atomic. - -@advanced_1230_h3 -Consistency (一貫性) - -@advanced_1231_p -# By default, this database is always in a consistent state. Referential integrity rules are enforced except when explicitly disabled. - -@advanced_1232_h3 -Isolation (独立性 / 分離性) - -@advanced_1233_p -# For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'. - -@advanced_1234_h3 -Durability (永続性) - -@advanced_1235_p -# This database does not guarantee that all committed transactions survive a power failure. Tests show that all databases sometimes lose transactions on power failure (for details, see below). Where losing transactions is not acceptable, a laptop or UPS (uninterruptible power supply) should be used. If durability is required for all possible cases of hardware failure, clustering should be used, such as the H2 clustering mode. - -@advanced_1236_h2 -永続性�?題 - -@advanced_1237_p -# Complete durability means all committed transaction survive a power failure. Some databases claim they can guarantee durability, but such claims are wrong. A durability test was run against H2, HSQLDB, PostgreSQL, and Derby. All of those databases sometimes lose committed transactions. The test is included in the H2 download, see org.h2.test.poweroff.Test. - -@advanced_1238_h3 -永続性を実�?��?�る (�?��?��?�) 方法 - -@advanced_1239_p -# Making sure that committed transactions are not lost is more complicated than it seems first. To guarantee complete durability, a database must ensure that the log record is on the hard drive before the commit call returns. To do that, databases use different methods. One is to use the 'synchronous write' file access mode. In Java, RandomAccessFile supports the modes rws and rwd: - -@advanced_1240_code -#rwd - -@advanced_1241_li -#: every update to the file's content is written synchronously to the underlying storage device. - -@advanced_1242_code -#rws - -@advanced_1243_li -#: in addition to rwd, every update to the metadata is written synchronously. - -@advanced_1244_p -# A test (org.h2.test.poweroff.TestWrite) with one of those modes achieves around 50 thousand write operations per second. Even when the operating system write buffer is disabled, the write rate is around 50 thousand operations per second. This feature does not force changes to disk because it does not flush all buffers. The test updates the same byte in the file again and again. If the hard drive was able to write at this rate, then the disk would need to make at least 50 thousand revolutions per second, or 3 million RPM (revolutions per minute). There are no such hard drives. The hard drive used for the test is about 7200 RPM, or about 120 revolutions per second. There is an overhead, so the maximum write rate must be lower than that. - -@advanced_1245_p -# Calling fsync flushes the buffers. There are two ways to do that in Java: - -@advanced_1246_code -#FileDescriptor.sync() - -@advanced_1247_li -#. The documentation says that this forces all system buffers to synchronize with the underlying device. This method is supposed to return after all in-memory modified copies of buffers associated with this file descriptor have been written to the physical medium. - -@advanced_1248_code -#FileChannel.force() - -@advanced_1249_li -#. This method is supposed to force any updates to this channel's file to be written to the storage device that contains it. - -@advanced_1250_p -# By default, MySQL calls fsync for each commit. When using one of those methods, only around 60 write operations per second can be achieved, which is consistent with the RPM rate of the hard drive used. Unfortunately, even when calling FileDescriptor.sync() or FileChannel.force(), data is not always persisted to the hard drive, because most hard drives do not obey fsync(): see Your Hard Drive Lies to You. In Mac OS X, fsync does not flush hard drive buffers. See Bad fsync?. So the situation is confusing, and tests prove there is a problem. - -@advanced_1251_p -# Trying to flush hard drive buffers is hard, and if you do the performance is very bad. First you need to make sure that the hard drive actually flushes all buffers. Tests show that this can not be done in a reliable way. Then the maximum number of transactions is around 60 per second. Because of those reasons, the default behavior of H2 is to delay writing committed transactions. - -@advanced_1252_p -# In H2, after a power failure, a bit more than one second of committed transactions may be lost. To change the behavior, use SET WRITE_DELAY and CHECKPOINT SYNC. Most other databases support commit delay as well. In the performance comparison, commit delay was used for all databases that support it. - -@advanced_1253_h3 -永続性テストを実行�?�る - -@advanced_1254_p -# To test the durability / non-durability of this and other databases, you can use the test application in the package org.h2.test.poweroff. Two computers with network connection are required to run this test. One computer just listens, while the test application is run (and power is cut) on the other computer. The computer with the listener application opens a TCP/IP port and listens for an incoming connection. The second computer first connects to the listener, and then created the databases and starts inserting records. The connection is set to 'autocommit', which means after each inserted record a commit is performed automatically. Afterwards, the test computer notifies the listener that this record was inserted successfully. The listener computer displays the last inserted record number every 10 seconds. Now, switch off the power manually, then restart the computer, and run the application again. You will find out that in most cases, none of the databases contains all the records that the listener computer knows about. For details, please consult the source code of the listener and test application. - -@advanced_1255_h2 -リカ�?ーツールを使用�?�る - -@advanced_1256_p -# The Recover tool can be used to extract the contents of a database file, even if the database is corrupted. It also extracts the content of the transaction log and large objects (CLOB or BLOB). To run the tool, type on the command line: - -@advanced_1257_p -# For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript tool or a RUNSCRIPT FROM SQL statement. The script includes at least one CREATE USER statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script. - -@advanced_1258_p -# The Recover tool creates a SQL script from database file. It also processes the transaction log. - -@advanced_1259_p -# To verify the database can recover at any time, append ;RECOVER_TEST=64 to the database URL in your test environment. This will simulate an application crash after each 64 writes to the database file. A log file named databaseName.h2.db.log is created that lists the operations. The recovery is tested using an in-memory file system, that means it may require a larger heap setting. - -@advanced_1260_h2 -ファイルロックプロトコル - -@advanced_1261_p -# Multiple concurrent connections to the same database are supported, however a database file can only be open for reading and writing (in embedded mode) by one process at the same time. Otherwise, the processes would overwrite each others data and corrupt the database file. To protect against this problem, whenever a database is opened, a lock file is created to signal other processes that the database is in use. If the database is closed, or if the process that opened the database stops normally, this lock file is deleted. - -@advanced_1262_p -# In special cases (if the process did not terminate normally, for example because there was a power failure), the lock file is not deleted by the process that created it. That means the existence of the lock file is not a safe protocol for file locking. However, this software uses a challenge-response protocol to protect the database files. There are two methods (algorithms) implemented to provide both security (that is, the same database files cannot be opened by two processes at the same time) and simplicity (that is, the lock file does not need to be deleted manually by the user). The two methods are 'file method' and 'socket methods'. - -@advanced_1263_p -# The file locking protocols (except the file locking method 'FS') have the following limitation: if a shared file system is used, and the machine with the lock owner is sent to sleep (standby or hibernate), another machine may take over. If the machine that originally held the lock wakes up, the database may become corrupt. If this situation can occur, the application must ensure the database is closed when the application is put to sleep. - -@advanced_1264_h3 -ファイルロックメソッド "File" - -@advanced_1265_p -# The default method for database file locking for version 1.3 and older is the 'File Method'. The algorithm is: - -@advanced_1266_li -#If the lock file does not exist, it is created (using the atomic operation File.createNewFile). Then, the process waits a little bit (20 ms) and checks the file again. If the file was changed during this time, the operation is aborted. This protects against a race condition when one process deletes the lock file just after another one create it, and a third process creates the file again. It does not occur if there are only two writers. - -@advanced_1267_li -# If the file can be created, a random number is inserted together with the locking method ('file'). Afterwards, a watchdog thread is started that checks regularly (every second once by default) if the file was deleted or modified by another (challenger) thread / process. Whenever that occurs, the file is overwritten with the old data. The watchdog thread runs with high priority so that a change to the lock file does not get through undetected even if the system is very busy. However, the watchdog thread does use very little resources (CPU time), because it waits most of the time. Also, the watchdog only reads from the hard disk and does not write to it. - -@advanced_1268_li -# If the lock file exists and was recently modified, the process waits for some time (up to two seconds). If it was still changed, an exception is thrown (database is locked). This is done to eliminate race conditions with many concurrent writers. Afterwards, the file is overwritten with a new version (challenge). After that, the thread waits for 2 seconds. If there is a watchdog thread protecting the file, he will overwrite the change and this process will fail to lock the database. However, if there is no watchdog thread, the lock file will still be as written by this thread. In this case, the file is deleted and atomically created again. The watchdog thread is started in this case and the file is locked. - -@advanced_1269_p -# This algorithm is tested with over 100 concurrent threads. In some cases, when there are many concurrent threads trying to lock the database, they block each other (meaning the file cannot be locked by any of them) for some time. However, the file never gets locked by two threads at the same time. However using that many concurrent threads / processes is not the common use case. Generally, an application should throw an error to the user if it cannot open a database, and not try again in a (fast) loop. - -@advanced_1270_h3 -ファイルロックメソッド "Socket" - -@advanced_1271_p -# There is a second locking mechanism implemented, but disabled by default. To use it, append ;FILE_LOCK=SOCKET to the database URL. The algorithm is: - -@advanced_1272_li -#If the lock file does not exist, it is created. Then a server socket is opened on a defined port, and kept open. The port and IP address of the process that opened the database is written into the lock file. - -@advanced_1273_li -#If the lock file exists, and the lock method is 'file', then the software switches to the 'file' method. - -@advanced_1274_li -#If the lock file exists, and the lock method is 'socket', then the process checks if the port is in use. If the original process is still running, the port is in use and this process throws an exception (database is in use). If the original process died (for example due to a power failure, or abnormal termination of the virtual machine), then the port was released. The new process deletes the lock file and starts again. - -@advanced_1275_p -# This method does not require a watchdog thread actively polling (reading) the same file every second. The problem with this method is, if the file is stored on a network share, two processes (running on different computers) could still open the same database files, if they do not have a direct TCP/IP connection. - -@advanced_1276_h3 -#File Locking Method 'FS' - -@advanced_1277_p -# This is the default mode for version 1.4 and newer. This database file locking mechanism uses native file system lock on the database file. No *.lock.db file is created in this case, and no background thread is started. This mechanism may not work on all systems as expected. Some systems allow to lock the same file multiple times within the same virtual machine, and on some system native file locking is not supported or files are not unlocked after a power failure. - -@advanced_1278_p -# To enable this feature, append ;FILE_LOCK=FS to the database URL. - -@advanced_1279_p -# This feature is relatively new. When using it for production, please ensure your system does in fact lock files as expected. - -@advanced_1280_h2 -パスワードを使用�?�る - -@advanced_1281_h3 -安全�?�パスワードを使用�?�る - -@advanced_1282_p -# Remember that weak passwords can be broken regardless of the encryption and security protocols. Don't use passwords that can be found in a dictionary. Appending numbers does not make passwords secure. A way to create good passwords that can be remembered is: take the first letters of a sentence, use upper and lower case characters, and creatively include special characters (but it's more important to use a long password than to use special characters). Example: - -@advanced_1283_code -#i'sE2rtPiUKtT - -@advanced_1284_p -# from the sentence it's easy to remember this password if you know the trick. - -@advanced_1285_h3 -パスワード: String�?�代�?り�?�Char Arraysを使用�?�る - -@advanced_1286_p -# Java strings are immutable objects and cannot be safely 'destroyed' by the application. After creating a string, it will remain in the main memory of the computer at least until it is garbage collected. The garbage collection cannot be controlled by the application, and even if it is garbage collected the data may still remain in memory. It might also be possible that the part of memory containing the password is swapped to disk (if not enough main memory is available), which is a problem if the attacker has access to the swap file of the operating system. - -@advanced_1287_p -# It is a good idea to use char arrays instead of strings for passwords. Char arrays can be cleared (filled with zeros) after use, and therefore the password will not be stored in the swap file. - -@advanced_1288_p -# This database supports using char arrays instead of string to pass user and file passwords. The following code can be used to do that: - -@advanced_1289_p -# This example requires Java 1.6. When using Swing, use javax.swing.JPasswordField. - -@advanced_1290_h3 -ユーザー�?? �?� (�?��?��?�) パスワードをURL�?��?証�?�る - -@advanced_1291_p -# Instead of passing the user name as a separate parameter as in Connection conn = DriverManager. getConnection("jdbc:h2:~/test", "sa", "123"); the user name (and/or password) can be supplied in the URL itself: Connection conn = DriverManager. getConnection("jdbc:h2:~/test;USER=sa;PASSWORD=123"); The settings in the URL override the settings passed as a separate parameter. - -@advanced_1292_h2 -#Password Hash - -@advanced_1293_p -# Sometimes the database password needs to be stored in a configuration file (for example in the web.xml file). In addition to connecting with the plain text password, this database supports connecting with the password hash. This means that only the hash of the password (and not the plain text password) needs to be stored in the configuration file. This will only protect others from reading or re-constructing the plain text password (even if they have access to the configuration file); it does not protect others from accessing the database using the password hash. - -@advanced_1294_p -# To connect using the password hash instead of plain text password, append ;PASSWORD_HASH=TRUE to the database URL, and replace the password with the password hash. To calculate the password hash from a plain text password, run the following command within the H2 Console tool: @password_hash <upperCaseUserName> <password>. As an example, if the user name is sa and the password is test, run the command @password_hash SA test. Then use the resulting password hash as you would use the plain text password. When using an encrypted database, then the user password and file password need to be hashed separately. To calculate the hash of the file password, run: @password_hash file <filePassword>. - -@advanced_1295_h2 -SQLインジェクション�?�対�?�る防御 - -@advanced_1296_h3 -SQLインジェクション�?��?� - -@advanced_1297_p -# This database engine provides a solution for the security vulnerability known as 'SQL Injection'. Here is a short description of what SQL injection means. Some applications build SQL statements with embedded user input such as: - -@advanced_1298_p -# If this mechanism is used anywhere in the application, and user input is not correctly filtered or encoded, it is possible for a user to inject SQL functionality or statements by using specially built input such as (in this example) this password: ' OR ''='. In this case the statement becomes: - -@advanced_1299_p -# Which is always true no matter what the password stored in the database is. For more information about SQL Injection, see Glossary and Links. - -@advanced_1300_h3 -リテラルを無効�?��?�る - -@advanced_1301_p -# SQL Injection is not possible if user input is not directly embedded in SQL statements. A simple solution for the problem above is to use a prepared statement: - -@advanced_1302_p -# This database provides a way to enforce usage of parameters when passing user input to the database. This is done by disabling embedded literals in SQL statements. To do this, execute the statement: - -@advanced_1303_p -# Afterwards, SQL statements with text and number literals are not allowed any more. That means, SQL statement of the form WHERE NAME='abc' or WHERE CustomerId=10 will fail. It is still possible to use prepared statements and parameters as described above. Also, it is still possible to generate SQL statements dynamically, and use the Statement API, as long as the SQL statements do not include literals. There is also a second mode where number literals are allowed: SET ALLOW_LITERALS NUMBERS. To allow all literals, execute SET ALLOW_LITERALS ALL (this is the default setting). Literals can only be enabled or disabled by an administrator. - -@advanced_1304_h3 -定数を使用�?�る - -@advanced_1305_p -# Disabling literals also means disabling hard-coded 'constant' literals. This database supports defining constants using the CREATE CONSTANT command. Constants can be defined only when literals are enabled, but used even when literals are disabled. To avoid name clashes with column names, constants can be defined in other schemas: - -@advanced_1306_p -# Even when literals are enabled, it is better to use constants instead of hard-coded number or text literals in queries or views. With constants, typos are found at compile time, the source code is easier to understand and change. - -@advanced_1307_h3 -ZERO() 関数を使用�?�る - -@advanced_1308_p -# It is not required to create a constant for the number 0 as there is already a built-in function ZERO(): - -@advanced_1309_h2 -#Protection against Remote Access - -@advanced_1310_p -# By default this database does not allow connections from other machines when starting the H2 Console, the TCP server, or the PG server. Remote access can be enabled using the command line options -webAllowOthers, -tcpAllowOthers, -pgAllowOthers. - -@advanced_1311_p -# If you enable remote access using -tcpAllowOthers or -pgAllowOthers, please also consider using the options -baseDir, -ifExists, so that remote users can not create new databases or access existing databases with weak passwords. When using the option -baseDir, only databases within that directory may be accessed. Ensure the existing accessible databases are protected using strong passwords. - -@advanced_1312_p -# If you enable remote access using -webAllowOthers, please ensure the web server can only be accessed from trusted networks. The options -baseDir, -ifExists don't protect access to the tools section, prevent remote shutdown of the web server, changes to the preferences, the saved connection settings, or access to other databases accessible from the system. - -@advanced_1313_h2 -#Restricting Class Loading and Usage - -@advanced_1314_p -# By default there is no restriction on loading classes and executing Java code for admins. That means an admin may call system functions such as System.setProperty by executing: - -@advanced_1315_p -# To restrict users (including admins) from loading classes and executing code, the list of allowed classes can be set in the system property h2.allowedClasses in the form of a comma separated list of classes or patterns (items ending with *). By default all classes are allowed. Example: - -@advanced_1316_p -# This mechanism is used for all user classes, including database event listeners, trigger classes, user-defined functions, user-defined aggregate functions, and JDBC driver classes (with the exception of the H2 driver) when using the H2 Console. - -@advanced_1317_h2 -セキュリティプロトコル - -@advanced_1318_p -# The following paragraphs document the security protocols used in this database. These descriptions are very technical and only intended for security experts that already know the underlying security primitives. - -@advanced_1319_h3 -ユーザーパスワード�?�暗�?�化 - -@advanced_1320_p -# When a user tries to connect to a database, the combination of user name, @, and password are hashed using SHA-256, and this hash value is transmitted to the database. This step does not protect against an attacker that re-uses the value if he is able to listen to the (unencrypted) transmission between the client and the server. But, the passwords are never transmitted as plain text, even when using an unencrypted connection between client and server. That means if a user reuses the same password for different things, this password is still protected up to some point. See also 'RFC 2617 - HTTP Authentication: Basic and Digest Access Authentication' for more information. - -@advanced_1321_p -# When a new database or user is created, a new random salt value is generated. The size of the salt is 64 bits. Using the random salt reduces the risk of an attacker pre-calculating hash values for many different (commonly used) passwords. - -@advanced_1322_p -# The combination of user-password hash value (see above) and salt is hashed using SHA-256. The resulting value is stored in the database. When a user tries to connect to the database, the database combines user-password hash value with the stored salt value and calculates the hash value. Other products use multiple iterations (hash the hash value again and again), but this is not done in this product to reduce the risk of denial of service attacks (where the attacker tries to connect with bogus passwords, and the server spends a lot of time calculating the hash value for each password). The reasoning is: if the attacker has access to the hashed passwords, he also has access to the data in plain text, and therefore does not need the password any more. If the data is protected by storing it on another computer and only accessible remotely, then the iteration count is not required at all. - -@advanced_1323_h3 -ファイル暗�?�化 - -@advanced_1324_p -# The database files can be encrypted using the AES-128 algorithm. - -@advanced_1325_p -# When a user tries to connect to an encrypted database, the combination of file@ and the file password is hashed using SHA-256. This hash value is transmitted to the server. - -@advanced_1326_p -# When a new database file is created, a new cryptographically secure random salt value is generated. The size of the salt is 64 bits. The combination of the file password hash and the salt value is hashed 1024 times using SHA-256. The reason for the iteration is to make it harder for an attacker to calculate hash values for common passwords. - -@advanced_1327_p -# The resulting hash value is used as the key for the block cipher algorithm. Then, an initialization vector (IV) key is calculated by hashing the key again using SHA-256. This is to make sure the IV is unknown to the attacker. The reason for using a secret IV is to protect against watermark attacks. - -@advanced_1328_p -# Before saving a block of data (each block is 8 bytes long), the following operations are executed: first, the IV is calculated by encrypting the block number with the IV key (using the same block cipher algorithm). This IV is combined with the plain text using XOR. The resulting data is encrypted using the AES-128 algorithm. - -@advanced_1329_p -# When decrypting, the operation is done in reverse. First, the block is decrypted using the key, and then the IV is calculated combined with the decrypted text using XOR. - -@advanced_1330_p -# Therefore, the block cipher mode of operation is CBC (cipher-block chaining), but each chain is only one block long. The advantage over the ECB (electronic codebook) mode is that patterns in the data are not revealed, and the advantage over multi block CBC is that flipped cipher text bits are not propagated to flipped plaintext bits in the next block. - -@advanced_1331_p -# Database encryption is meant for securing the database while it is not in use (stolen laptop and so on). It is not meant for cases where the attacker has access to files while the database is in use. When he has write access, he can for example replace pieces of files with pieces of older versions and manipulate data like this. - -@advanced_1332_p -# File encryption slows down the performance of the database engine. Compared to unencrypted mode, database operations take about 2.5 times longer using AES (embedded mode). - -@advanced_1333_h3 -#Wrong Password / User Name Delay - -@advanced_1334_p -# To protect against remote brute force password attacks, the delay after each unsuccessful login gets double as long. Use the system properties h2.delayWrongPasswordMin and h2.delayWrongPasswordMax to change the minimum (the default is 250 milliseconds) or maximum delay (the default is 4000 milliseconds, or 4 seconds). The delay only applies for those using the wrong password. Normally there is no delay for a user that knows the correct password, with one exception: after using the wrong password, there is a delay of up to (randomly distributed) the same delay as for a wrong password. This is to protect against parallel brute force attacks, so that an attacker needs to wait for the whole delay. Delays are synchronized. This is also required to protect against parallel attacks. - -@advanced_1335_p -# There is only one exception message for both wrong user and for wrong password, to make it harder to get the list of user names. It is not possible from the stack trace to see if the user name was wrong or the password. - -@advanced_1336_h3 -HTTPS 接続 - -@advanced_1337_p -# The web server supports HTTP and HTTPS connections using SSLServerSocket. There is a default self-certified certificate to support an easy starting point, but custom certificates are supported as well. - -@advanced_1338_h2 -#TLS Connections - -@advanced_1339_p -# Remote TLS connections are supported using the Java Secure Socket Extension (SSLServerSocket, SSLSocket). By default, anonymous TLS is enabled. - -@advanced_1340_p -# To use your own keystore, set the system properties javax.net.ssl.keyStore and javax.net.ssl.keyStorePassword before starting the H2 server and client. See also Customizing the Default Key and Trust Stores, Store Types, and Store Passwords for more information. - -@advanced_1341_p -# To disable anonymous TLS, set the system property h2.enableAnonymousTLS to false. - -@advanced_1342_h2 -汎用一�?識別�? (UUID) - -@advanced_1343_p -# This database supports UUIDs. Also supported is a function to create new UUIDs using a cryptographically strong pseudo random number generator. With random UUIDs, the chance of two having the same value can be calculated using the probability theory. See also 'Birthday Paradox'. Standardized randomly generated UUIDs have 122 random bits. 4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz). This database supports generating such UUIDs using the built-in function RANDOM_UUID(). Here is a small program to estimate the probability of having two identical UUIDs after generating a number of values: - -@advanced_1344_p -# Some values are: - -@advanced_1345_th -#Number of UUIs - -@advanced_1346_th -#Probability of Duplicates - -@advanced_1347_td -#2^36=68'719'476'736 - -@advanced_1348_td -#0.000'000'000'000'000'4 - -@advanced_1349_td -#2^41=2'199'023'255'552 - -@advanced_1350_td -#0.000'000'000'000'4 - -@advanced_1351_td -#2^46=70'368'744'177'664 - -@advanced_1352_td -#0.000'000'000'4 - -@advanced_1353_p -# To help non-mathematicians understand what those numbers mean, here a comparison: one's annual risk of being hit by a meteorite is estimated to be one chance in 17 billion, that means the probability is about 0.000'000'000'06. - -@advanced_1354_h2 -#Spatial Features - -@advanced_1355_p -# H2 supports the geometry data type and spatial indexes if the JTS Topology Suite is in the classpath. To run the H2 Console tool with the JTS tool, you need to download the JTS 1.13 jar file and place it in the h2 bin directory. Then edit the h2.sh file as follows: - -@advanced_1356_p -# Here is an example SQL script to create a table with a spatial column and index: - -@advanced_1357_p -# To query the table using geometry envelope intersection, use the operation &&, as in PostGIS: - -@advanced_1358_p -# You can verify that the spatial index is used using the "explain plan" feature: - -@advanced_1359_p -# For persistent databases, the spatial index is stored on disk; for in-memory databases, the index is kept in memory. - -@advanced_1360_h2 -#Recursive Queries - -@advanced_1361_p -# H2 has experimental support for recursive queries using so called "common table expressions" (CTE). Examples: - -@advanced_1362_p -# Limitations: Recursive queries need to be of the type UNION ALL, and the recursion needs to be on the second part of the query. No tables or views with the name of the table expression may exist. Different table expression names need to be used when using multiple distinct table expressions within the same transaction and for the same session. All columns of the table expression are of type VARCHAR, and may need to be cast to the required data type. Views with recursive queries are not supported. Subqueries and INSERT INTO ... FROM with recursive queries are not supported. Parameters are only supported within the last SELECT statement (a workaround is to use session variables like @start within the table expression). The syntax is: - -@advanced_1363_h2 -システムプロパティ�?�ら読�?�込�?�れ�?�設定 - -@advanced_1364_p -# Some settings of the database can be set on the command line using -DpropertyName=value. It is usually not required to change those settings manually. The settings are case sensitive. Example: - -@advanced_1365_p -# The current value of the settings can be read in the table INFORMATION_SCHEMA.SETTINGS. - -@advanced_1366_p -# For a complete list of settings, see SysProperties. - -@advanced_1367_h2 -#Setting the Server Bind Address - -@advanced_1368_p -# Usually server sockets accept connections on any/all local addresses. This may be a problem on multi-homed hosts. To bind only to one address, use the system property h2.bindAddress. This setting is used for both regular server sockets and for TLS server sockets. IPv4 and IPv6 address formats are supported. - -@advanced_1369_h2 -#Pluggable File System - -@advanced_1370_p -# This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. The following file systems are included: - -@advanced_1371_code -#zip: - -@advanced_1372_li -# read-only zip-file based file system. Format: zip:/zipFileName!/fileName. - -@advanced_1373_code -#split: - -@advanced_1374_li -# file system that splits files in 1 GB files (stackable with other file systems). - -@advanced_1375_code -#nio: - -@advanced_1376_li -# file system that uses FileChannel instead of RandomAccessFile (faster in some operating systems). - -@advanced_1377_code -#nioMapped: - -@advanced_1378_li -# file system that uses memory mapped files (faster in some operating systems). Please note that there currently is a file size limitation of 2 GB when using this file system when using a 32-bit JVM. To work around this limitation, combine it with the split file system: split:nioMapped:test. - -@advanced_1379_code -#memFS: - -@advanced_1380_li -# in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself). - -@advanced_1381_code -#memLZF: - -@advanced_1382_li -# compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself). - -@advanced_1383_p -# As an example, to use the the nio file system, use the following database URL: jdbc:h2:nio:~/test. - -@advanced_1384_p -# To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase, and call the method FilePath.register before using it. - -@advanced_1385_p -# For input streams (but not for random access files), URLs may be used in addition to the registered file systems. Example: jar:file:///c:/temp/example.zip!/org/example/nested.csv. To read a stream from the classpath, use the prefix classpath:, as in classpath:/org/h2/samples/newsfeed.sql. - -@advanced_1386_h2 -#Split File System - -@advanced_1387_p -# The file system prefix split: is used to split logical files into multiple physical files, for example so that a database can get larger than the maximum file system size of the operating system. If the logical file is larger than the maximum file size, then the file is split as follows: - -@advanced_1388_code -#<fileName> - -@advanced_1389_li -# (first block, is always created) - -@advanced_1390_code -#<fileName>.1.part - -@advanced_1391_li -# (second block) - -@advanced_1392_p -# More physical files (*.2.part, *.3.part) are automatically created / deleted if needed. The maximum physical file size of a block is 2^30 bytes, which is also called 1 GiB or 1 GB. However this can be changed if required, by specifying the block size in the file name. The file name format is: split:<x>:<fileName> where the file size per block is 2^x. For 1 MiB block sizes, use x = 20 (because 2^20 is 1 MiB). The following file name means the logical file is split into 1 MiB blocks: split:20:test.h2.db. An example database URL for this case is jdbc:h2:split:20:~/test. - -@advanced_1393_h2 -データベース�?�アップグレー - -@advanced_1394_p -# In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. To automatically convert databases to the new file store, it is necessary to include an additional jar file. The file can be found at http://h2database.com/h2mig_pagestore_addon.jar . If this file is in the classpath, every connect to an older database will result in a conversion process. - -@advanced_1395_p -# The conversion itself is done internally via 'script to' and 'runscript from'. After the conversion process, the files will be renamed from - -@advanced_1396_code -#dbName.data.db - -@advanced_1397_li -# to dbName.data.db.backup - -@advanced_1398_code -#dbName.index.db - -@advanced_1399_li -# to dbName.index.db.backup - -@advanced_1400_p -# by default. Also, the temporary script will be written to the database directory instead of a temporary directory. Both defaults can be customized via - -@advanced_1401_code -#org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean) - -@advanced_1402_code -#org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean) - -@advanced_1403_p -# prior opening a database connection. - -@advanced_1404_p -# Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. The automatic upgrade .jar file must be present, and the URL must start with jdbc:h2v1_1: (the JDBC driver class is org.h2.upgrade.v1_1.Driver). If the database should automatically connect using the old version if a database with the old format exists (without upgrade), and use the new version otherwise, then append ;NO_UPGRADE=TRUE to the database URL. Please note the old driver did not process the system property "h2.baseDir" correctly, so that using this setting is not supported when upgrading. - -@advanced_1405_h2 -#Java Objects Serialization - -@advanced_1406_p -# Java objects serialization is enabled by default for columns of type OTHER, using standard Java serialization/deserialization semantics. - -@advanced_1407_p -# To disable this feature set the system property h2.serializeJavaObject=false (default: true). - -@advanced_1408_p -# Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation: - -@advanced_1409_li -# At system level set the system property h2.javaObjectSerializer with the Fully Qualified Name of the JavaObjectSerializer interface implementation. It will be used over the entire JVM session to (de)serialize java objects being stored in column of type OTHER. Example h2.javaObjectSerializer=com.acme.SerializerClassName. - -@advanced_1410_li -# At database level execute the SQL statement SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName' or append ;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName' to the database URL: jdbc:h2:~/test;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'. - -@advanced_1411_p -# Please note that this SQL statement can only be executed before any tables are defined. - -@advanced_1412_h2 -#Limits and Limitations - -@advanced_1413_p -# This database has the following known limitations: - -@advanced_1414_li -#Database file size limit: 4 TB (using the default page size of 2 KB) or higher (when using a larger page size). This limit is including CLOB and BLOB data. - -@advanced_1415_li -#The maximum file size for FAT or FAT32 file systems is 4 GB. That means when using FAT or FAT32, the limit is 4 GB for the data. This is the limitation of the file system. The database does provide a workaround for this problem, it is to use the file name prefix split:. In that case files are split into files of 1 GB by default. An example database URL is: jdbc:h2:split:~/test. - -@advanced_1416_li -#The maximum number of rows per table is 2^64. - -@advanced_1417_li -#The maximum number of open transactions is 65535. - -@advanced_1418_li -#Main memory requirements: The larger the database, the more main memory is required. With the current storage mechanism (the page store), the minimum main memory required is around 1 MB for each 8 GB database file size. - -@advanced_1419_li -#Limit on the complexity of SQL statements. Statements of the following form will result in a stack overflow exception: - -@advanced_1420_li -#There is no limit for the following entities, except the memory and storage capacity: maximum identifier length (table name, column name, and so on); maximum number of tables, columns, indexes, triggers, and other database objects; maximum statement length, number of parameters per statement, tables per statement, expressions in order by, group by, having, and so on; maximum rows per query; maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; maximum row length, index row length, select row length; maximum length of a varchar column, decimal column, literal in a statement. - -@advanced_1421_li -#Querying from the metadata tables is slow if there are many tables (thousands). - -@advanced_1422_li -#For limitations on data types, see the documentation of the respective Java data type or the data type documentation of this database. - -@advanced_1423_h2 -用語集�?�リンク - -@advanced_1424_th -用語 - -@advanced_1425_th -説明 - -@advanced_1426_td -AES-128 - -@advanced_1427_td -#A block encryption algorithm. See also: Wikipedia: AES - -@advanced_1428_td -Birthday Paradox - -@advanced_1429_td -#Describes the higher than expected probability that two persons in a room have the same birthday. Also valid for randomly generated UUIDs. See also: Wikipedia: Birthday Paradox - -@advanced_1430_td -Digest - -@advanced_1431_td -#Protocol to protect a password (but not to protect data). See also: RFC 2617: HTTP Digest Access Authentication - -@advanced_1432_td -GCJ - -@advanced_1433_td -#Compiler for Java. GNU Compiler for the Java and NativeJ (commercial) - -@advanced_1434_td -HTTPS - -@advanced_1435_td -#A protocol to provide security to HTTP connections. See also: RFC 2818: HTTP Over TLS - -@advanced_1436_td -Modes of Operation - -@advanced_1437_a -#Wikipedia: Block cipher modes of operation - -@advanced_1438_td -Salt - -@advanced_1439_td -#Random number to increase the security of passwords. See also: Wikipedia: Key derivation function - -@advanced_1440_td -SHA-256 - -@advanced_1441_td -#A cryptographic one-way hash function. See also: Wikipedia: SHA hash functions - -@advanced_1442_td -SQLインジェクション - -@advanced_1443_td -#A security vulnerability where an application embeds SQL statements or expressions in user input. See also: Wikipedia: SQL Injection - -@advanced_1444_td -Watermark Attack (�?�?��?�攻撃) - -@advanced_1445_td -#Security problem of certain encryption programs where the existence of certain data can be proven without decrypting. For more information, search in the internet for 'watermark attack cryptoloop' - -@advanced_1446_td -SSL/TLS - -@advanced_1447_td -#Secure Sockets Layer / Transport Layer Security. See also: Java Secure Socket Extension (JSSE) - -@architecture_1000_h1 -#Architecture - -@architecture_1001_a -# Introduction - -@architecture_1002_a -# Top-down overview - -@architecture_1003_a -# JDBC driver - -@architecture_1004_a -# Connection/session management - -@architecture_1005_a -# Command execution and planning - -@architecture_1006_a -# Table/index/constraints - -@architecture_1007_a -# Undo log, redo log, and transactions layer - -@architecture_1008_a -# B-tree engine and page-based storage allocation - -@architecture_1009_a -# Filesystem abstraction - -@architecture_1010_h2 -#Introduction - -@architecture_1011_p -# H2 implements an embedded and standalone ANSI-SQL89 compliant SQL engine on top of a B-tree based disk store. - -@architecture_1012_p -# As of October 2013, Thomas is still working on our next-generation storage engine called MVStore. This will in time replace the B-tree based storage engine. - -@architecture_1013_h2 -#Top-down Overview - -@architecture_1014_p -# Working from the top down, the layers look like this: - -@architecture_1015_li -#JDBC driver. - -@architecture_1016_li -#Connection/session management. - -@architecture_1017_li -#SQL Parser. - -@architecture_1018_li -#Command execution and planning. - -@architecture_1019_li -#Table/Index/Constraints. - -@architecture_1020_li -#Undo log, redo log, and transactions layer. - -@architecture_1021_li -#B-tree engine and page-based storage allocation. - -@architecture_1022_li -#Filesystem abstraction. - -@architecture_1023_h2 -#JDBC Driver - -@architecture_1024_p -# The JDBC driver implementation lives in org.h2.jdbc, org.h2.jdbcx - -@architecture_1025_h2 -#Connection/session management - -@architecture_1026_p -# The primary classes of interest are: - -@architecture_1027_th -#Package - -@architecture_1028_th -説明 - -@architecture_1029_td -#org.h2.engine.Database - -@architecture_1030_td -#the root/global class - -@architecture_1031_td -#org.h2.engine.SessionInterface - -@architecture_1032_td -#abstracts over the differences between embedded and remote sessions - -@architecture_1033_td -#org.h2.engine.Session - -@architecture_1034_td -#local/embedded session - -@architecture_1035_td -#org.h2.engine.SessionRemote - -@architecture_1036_td -#remote session - -@architecture_1037_h2 -#Parser - -@architecture_1038_p -# The parser lives in org.h2.command.Parser. It uses a straightforward recursive-descent design. - -@architecture_1039_p -# See Wikipedia Recursive-descent parser page. - -@architecture_1040_h2 -#Command execution and planning - -@architecture_1041_p -# Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. The primary packages of interest are: - -@architecture_1042_th -#Package - -@architecture_1043_th -説明 - -@architecture_1044_td -#org.h2.command.ddl - -@architecture_1045_td -#Commands that modify schema data structures - -@architecture_1046_td -#org.h2.command.dml - -@architecture_1047_td -#Commands that modify data - -@architecture_1048_h2 -#Table/Index/Constraints - -@architecture_1049_p -# One thing to note here is that indexes are simply stored as special kinds of tables. - -@architecture_1050_p -# The primary packages of interest are: - -@architecture_1051_th -#Package - -@architecture_1052_th -説明 - -@architecture_1053_td -#org.h2.table - -@architecture_1054_td -#Implementations of different kinds of tables - -@architecture_1055_td -#org.h2.index - -@architecture_1056_td -#Implementations of different kinds of indices - -@architecture_1057_h2 -#Undo log, redo log, and transactions layer - -@architecture_1058_p -# We have a transaction log, which is shared among all sessions. See also http://en.wikipedia.org/wiki/Transaction_log http://h2database.com/html/grammar.html#set_log - -@architecture_1059_p -# We also have an undo log, which is per session, to undo an operation (an update that fails for example) and to rollback a transaction. Theoretically, the transaction log could be used, but for simplicity, H2 currently uses it's own "list of operations" (usually in-memory). - -@architecture_1060_p -# With the MVStore, this is no longer needed (just the transaction log). - -@architecture_1061_h2 -#B-tree engine and page-based storage allocation. - -@architecture_1062_p -# The primary package of interest is org.h2.store. - -@architecture_1063_p -# This implements a storage mechanism which allocates pages of storage (typically 2k in size) and also implements a b-tree over those pages to allow fast retrieval and update. - -@architecture_1064_h2 -#Filesystem abstraction. - -@architecture_1065_p -# The primary class of interest is org.h2.store.FileStore. - -@architecture_1066_p -# This implements an abstraction of a random-access file. This allows the higher layers to treat in-memory vs. on-disk vs. zip-file databases the same. - -@build_1000_h1 -ビルド - -@build_1001_a -# Portability - -@build_1002_a -# Environment - -@build_1003_a -# Building the Software - -@build_1004_a -# Build Targets - -@build_1005_a -# Using Maven 2 - -@build_1006_a -# Using Eclipse - -@build_1007_a -# Translating - -@build_1008_a -# Providing Patches - -@build_1009_a -# Reporting Problems or Requests - -@build_1010_a -# Automated Build - -@build_1011_a -# Generating Railroad Diagrams - -@build_1012_h2 -�?ータビリティ - -@build_1013_p -# This database is written in Java and therefore works on many platforms. It can also be compiled to a native executable using GCJ. - -@build_1014_h2 -環境 - -@build_1015_p -# To run this database, a Java Runtime Environment (JRE) version 1.6 or higher is required. - -@build_1016_p -# To create the database executables, the following software stack was used. To use this database, it is not required to install this software however. - -@build_1017_li -#Mac OS X and Windows - -@build_1018_a -#Sun JDK Version 1.6 and 1.7 - -@build_1019_a -#Eclipse - -@build_1020_li -#Eclipse Plugins: Subclipse, Eclipse Checkstyle Plug-in, EclEmma Java Code Coverage - -@build_1021_a -#Emma Java Code Coverage - -@build_1022_a -#Mozilla Firefox - -@build_1023_a -#OpenOffice - -@build_1024_a -#NSIS - -@build_1025_li -# (Nullsoft Scriptable Install System) - -@build_1026_a -#Maven - -@build_1027_h2 -ソフトウェア�?�ビルド - -@build_1028_p -# You need to install a JDK, for example the Sun JDK version 1.6 or 1.7. Ensure that Java binary directory is included in the PATH environment variable, and that the environment variable JAVA_HOME points to your Java installation. On the command line, go to the directory h2 and execute the following command: - -@build_1029_p -# For Linux and OS X, use ./build.sh instead of build. - -@build_1030_p -# You will get a list of targets. If you want to build the jar file, execute (Windows): - -@build_1031_p -# To run the build tool in shell mode, use the command line option - as in ./build.sh -. - -@build_1032_h3 -#Switching the Source Code - -@build_1033_p -# The source code uses Java 1.6 features. To switch the source code to the installed version of Java, run: - -@build_1034_h2 -#Build Targets - -@build_1035_p -# The build system can generate smaller jar files as well. The following targets are currently supported: - -@build_1036_code -#jarClient - -@build_1037_li -# creates the file h2client.jar. This only contains the JDBC client. - -@build_1038_code -#jarSmall - -@build_1039_li -# creates the file h2small.jar. This only contains the embedded database. Debug information is disabled. - -@build_1040_code -#jarJaqu - -@build_1041_li -# creates the file h2jaqu.jar. This only contains the JaQu (Java Query) implementation. All other jar files do not include JaQu. - -@build_1042_code -#javadocImpl - -@build_1043_li -# creates the Javadocs of the implementation. - -@build_1044_p -# To create the file h2client.jar, go to the directory h2 and execute the following command: - -@build_1045_h3 -#Using Lucene 2 / 3 - -@build_1046_p -# Both Apache Lucene 2 and Lucene 3 are supported. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. To use a different version of Lucene when compiling, it needs to be specified as follows: - -@build_1047_h2 -Maven 2 �?�利用 - -@build_1048_h3 -Centralリ�?ジトリ�?�利用 - -@build_1049_p -# You can include the database in your Maven 2 project as a dependency. Example: - -@build_1050_p -# New versions of this database are first uploaded to http://hsql.sourceforge.net/m2-repo/ and then automatically synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there. - -@build_1051_h3 -#Maven Plugin to Start and Stop the TCP Server - -@build_1052_p -# A Maven plugin to start and stop the H2 TCP server is available from Laird Nelson at GitHub. To start the H2 server, use: - -@build_1053_p -# To stop the H2 server, use: - -@build_1054_h3 -スナップショット�?ージョン�?�利用 - -@build_1055_p -# To build a h2-*-SNAPSHOT.jar file and upload it the to the local Maven 2 repository, execute the following command: - -@build_1056_p -# Afterwards, you can include the database in your Maven 2 project as a dependency: - -@build_1057_h2 -#Using Eclipse - -@build_1058_p -# To create an Eclipse project for H2, use the following steps: - -@build_1059_li -#Install Subversion and Eclipse. - -@build_1060_li -#Get the H2 source code from the Subversion repository: - -@build_1061_code -#svn checkout http://h2database.googlecode.com/svn/trunk h2database-read-only - -@build_1062_li -#Download all dependencies (Windows): - -@build_1063_code -#build.bat download - -@build_1064_li -#In Eclipse, create a new Java project from existing source code: File, New, Project, Java Project, Create project from existing source. - -@build_1065_li -#Select the h2 folder, click Next and Finish. - -@build_1066_li -#To resolve com.sun.javadoc import statements, you may need to manually add the file <java.home>/../lib/tools.jar to the build path. - -@build_1067_h2 -#Translating - -@build_1068_p -# The translation of this software is split into the following parts: - -@build_1069_li -#H2 Console: src/main/org/h2/server/web/res/_text_*.prop - -@build_1070_li -#Error messages: src/main/org/h2/res/_messages_*.prop - -@build_1071_p -# To translate the H2 Console, start it and select Preferences / Translate. After you are done, send the translated *.prop file to the Google Group. The web site is currently translated using Google. - -@build_1072_h2 -#Providing Patches - -@build_1073_p -# If you like to provide patches, please consider the following guidelines to simplify merging them: - -@build_1074_li -#Only use Java 6 features (do not use Java 7) (see Environment). - -@build_1075_li -#Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml. - -@build_1076_li -#A template of the Eclipse settings are in src/installer/eclipse.settings/*. If you want to use them, you need to copy them to the .settings directory. The formatting options (eclipseCodeStyle) are also included. - -@build_1077_li -#Please provide test cases and integrate them into the test suite. For Java level tests, see src/test/org/h2/test/TestAll.java. For SQL level tests, see src/test/org/h2/test/test.in.txt or testSimple.in.txt. - -@build_1078_li -#The test cases should cover at least 90% of the changed and new code; use a code coverage tool to verify that (see above). or use the build target coverage. - -@build_1079_li -#Verify that you did not break other features: run the test cases by executing build test. - -@build_1080_li -#Provide end user documentation if required (src/docsrc/html/*). - -@build_1081_li -#Document grammar changes in src/docsrc/help/help.csv - -@build_1082_li -#Provide a change log entry (src/docsrc/html/changelog.html). - -@build_1083_li -#Verify the spelling using build spellcheck. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt. - -@build_1084_li -#Run src/installer/buildRelease to find and fix formatting errors. - -@build_1085_li -#Verify the formatting using build docs and build javadoc. - -@build_1086_li -#Submit patches as .patch files (compressed if big). To create a patch using Eclipse, use Team / Create Patch. - -@build_1087_p -# For legal reasons, patches need to be public in the form of an email to the group, or in the form of an issue report or attachment. Significant contributions need to include the following statement: - -@build_1088_p -# "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 (http://h2database.com/html/license.html)." - -@build_1089_h2 -#Reporting Problems or Requests - -@build_1090_p -# Please consider the following checklist if you have a question, want to report a problem, or if you have a feature request: - -@build_1091_li -#For bug reports, please provide a short, self contained, correct (compilable), example of the problem. - -@build_1092_li -#Feature requests are always welcome, even if the feature is already on the roadmap. Your mail will help prioritize feature requests. If you urgently need a feature, consider providing a patch. - -@build_1093_li -#Before posting problems, check the FAQ and do a Google search. - -@build_1094_li -#When got an unexpected exception, please try the Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s). - -@build_1095_li -#When sending source code, please use a public web clipboard such as Pastebin, Cl1p, or Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. As a template, use: HelloWorld.java. Method that simply call other methods should be avoided, as well as unnecessary exception handling. Please use the JDBC API and no external tools or libraries. The test should include all required initialization code, and should be started with the main method. - -@build_1096_li -#For large attachments, use a public temporary storage such as Rapidshare. - -@build_1097_li -#Google Group versus issue tracking: Use the Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an issue, but you don't need to (sending an email to the group is enough). Please note that only few people monitor the issue tracking system. - -@build_1098_li -#For out-of-memory problems, please analyze the problem yourself first, for example using the command line option -XX:+HeapDumpOnOutOfMemoryError (to create a heap dump file on out of memory) and a memory analysis tool such as the Eclipse Memory Analyzer (MAT). - -@build_1099_li -#It may take a few days to get an answers. Please do not double post. - -@build_1100_h2 -#Automated Build - -@build_1101_p -# This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line ./build.sh clean jar coverage -Dh2.ftpPassword=... uploadBuild. The last results are available here: - -@build_1102_a -#Test Output - -@build_1103_a -#Code Coverage Summary - -@build_1104_a -#Code Coverage Details (download, 1.3 MB) - -@build_1105_a -#Build Newsfeed - -@build_1106_a -#Latest Jar File (download, 1 MB) - -@build_1107_h2 -#Generating Railroad Diagrams - -@build_1108_p -# The railroad diagrams of the SQL grammar are HTML, formatted as nested tables. The diagrams are generated as follows: - -@build_1109_li -#The BNF parser (org.h2.bnf.Bnf) reads and parses the BNF from the file help.csv. - -@build_1110_li -#The page parser (org.h2.server.web.PageParser) reads the template HTML file and fills in the diagrams. - -@build_1111_li -#The rail images (one straight, four junctions, two turns) are generated using a simple Java application. - -@build_1112_p -# To generate railroad diagrams for other grammars, see the package org.h2.jcr. This package is used to generate the SQL-2 railroad diagrams for the JCR 2.0 specification. - -@changelog_1000_h1 -変更履歴 - -@changelog_1001_h2 -#Next Version (unreleased) - -@changelog_1002_li -#- - -@changelog_1003_h2 -#Version 1.4.187 Beta (2015-04-10) - -@changelog_1004_li -#MVStore: concurrent changes to the same row could result in the exception "The transaction log might be corrupt for key ...". This could only be reproduced with 3 or more threads. - -@changelog_1005_li -#Results with CLOB or BLOB data are no longer reused. - -@changelog_1006_li -#References to BLOB and CLOB objects now have a timeout. The configuration setting is LOB_TIMEOUT (default 5 minutes). This should avoid growing the database file if there are many queries that return BLOB or CLOB objects, and the database is not closed for a longer time. - -@changelog_1007_li -#MVStore: when committing a session that removed LOB values, changes were flushed unnecessarily. - -@changelog_1008_li -#Issue 610: possible integer overflow in WriteBuffer.grow(). - -@changelog_1009_li -#Issue 609: the spatial index did not support NULL (ClassCastException). - -@changelog_1010_li -#MVStore: in some cases, CLOB/BLOB data blocks were removed incorrectly when opening a database. - -@changelog_1011_li -#MVStore: updates that affected many rows were were slow in some cases if there was a secondary index. - -@changelog_1012_li -#Using "runscript" with autocommit disabled could result in a lock timeout on the internal table "SYS". - -@changelog_1013_li -#Issue 603: there was a memory leak when using H2 in a web application. Apache Tomcat logged an error message: "The web application ... created a ThreadLocal with key of type [org.h2.util.DateTimeUtils$1]". - -@changelog_1014_li -#When using the MVStore, running a SQL script generate by the Recover tool from a PageStore file failed with a strange error message (NullPointerException), now a clear error message is shown. - -@changelog_1015_li -#Issue 605: with version 1.4.186, opening a database could result in an endless loop in LobStorageMap.init. - -@changelog_1016_li -#Queries that use the same table alias multiple times now work. Before, the select expression list was expanded incorrectly. Example: "select * from a as x, b as x". - -@changelog_1017_li -#The MySQL compatibility feature "insert ... on duplicate key update" did not work with a non-default schema. - -@changelog_1018_li -#Issue 599: the condition "in(x, y)" could not be used in the select list when using "group by". - -@changelog_1019_li -#The LIRS cache could grow larger than the allocated memory. - -@changelog_1020_li -#A new file system implementation that re-opens the file if it was closed due to the application calling Thread.interrupt(). File name prefix "retry:". Please note it is strongly recommended to avoid calling Thread.interrupt; this is a problem for various libraries, including Apache Lucene. - -@changelog_1021_li -#MVStore: use RandomAccessFile file system if the file name starts with "file:". - -@changelog_1022_li -#Allow DATEADD to take a long value for count when manipulating milliseconds. - -@changelog_1023_li -#When using MV_STORE=TRUE and the SET CACHE_SIZE setting, the cache size was incorrectly set, so that it was effectively 1024 times smaller than it should be. - -@changelog_1024_li -#Concurrent CREATE TABLE... IF NOT EXISTS in the presence of MULTI_THREAD=TRUE could throw an exception. - -@changelog_1025_li -#Fix bug in MVStore when creating lots of temporary tables, where we could run out of transaction IDs. - -@changelog_1026_li -#Add support for PostgreSQL STRING_AGG function. Patch by Fred Aquiles. - -@changelog_1027_li -#Fix bug in "jdbc:h2:nioMemFS" isRoot() function. Also, the page size was increased to 64 KB. - -@changelog_1028_h2 -#Version 1.4.186 Beta (2015-03-02) - -@changelog_1029_li -#The Servlet API 3.0.1 is now used, instead of 2.4. - -@changelog_1030_li -#MVStore: old chunks no longer removed in append-only mode. - -@changelog_1031_li -#MVStore: the cache for page references could grow far too big, resulting in out of memory in some cases. - -@changelog_1032_li -#MVStore: orphaned lob objects were not correctly removed in some cases, making the database grow unnecessarily. - -@changelog_1033_li -#MVStore: the maximum cache size was artificially limited to 2 GB (due to an integer overflow). - -@changelog_1034_li -#MVStore / TransactionStore: concurrent updates could result in a "Too many open transactions" exception. - -@changelog_1035_li -#StringUtils.toUpperEnglish now has a small cache. This should speed up reading from a ResultSet when using the column name. - -@changelog_1036_li -#MVStore: up to 65535 open transactions are now supported. Previously, the limit was at most 65535 transactions between the oldest open and the newest open transaction (which was quite a strange limit). - -@changelog_1037_li -#The default limit for in-place LOB objects was changed from 128 to 256 bytes. This is because each read creates a reference to a LOB, and maintaining the references is a big overhead. With the higher limit, less references are needed. - -@changelog_1038_li -#Tables without columns didn't work. (The use case for such tables is testing.) - -@changelog_1039_li -#The LIRS cache now resizes the table automatically in all cases and no longer needs the averageMemory configuration. - -@changelog_1040_li -#Creating a linked table from an MVStore database to a non-MVStore database created a second (non-MVStore) database file. - -@changelog_1041_li -#In version 1.4.184, a bug was introduced that broke queries that have both joins and wildcards, for example: select * from dual join(select x from dual) on 1=1 - -@changelog_1042_li -#Issue 598: parser fails on timestamp "24:00:00.1234" - prevent the creation of out-of-range time values. - -@changelog_1043_li -#Allow declaring triggers as source code (like functions). Patch by Sylvain Cuaz. - -@changelog_1044_li -#Make the planner use indexes for sorting when doing a GROUP BY where all of the GROUP BY columns are not mentioned in the select. Patch by Frederico (zepfred). - -@changelog_1045_li -#PostgreSQL compatibility: generate_series (as an alias for system_range). Patch by litailang. - -@changelog_1046_li -#Fix missing "column" type in right-hand parameter in ConditionIn. Patch by Arnaud Thimel. - -@changelog_1047_h2 -#Version 1.4.185 Beta (2015-01-16) - -@changelog_1048_li -#In version 1.4.184, "group by" ignored the table name, and could pick a select column by mistake. Example: select 0 as x from system_range(1, 2) d group by d.x; - -@changelog_1049_li -#New connection setting "REUSE_SPACE" (default: true). If disabled, all changes are appended to the database file, and existing content is never overwritten. This allows to rollback to a previous state of the database by truncating the database file. - -@changelog_1050_li -#Issue 587: MVStore: concurrent compaction and store operations could result in an IllegalStateException. - -@changelog_1051_li -#Issue 594: Profiler.copyInThread does not work properly. - -@changelog_1052_li -#Script tool: Now, SCRIPT ... TO is always used (for higher speed and lower disk space usage). - -@changelog_1053_li -#Script tool: Fix parsing of BLOCKSIZE parameter, original patch by Ken Jorissen. - -@changelog_1054_li -#Fix bug in PageStore#commit method - when the ignoreBigLog flag was set, the logic that cleared the flag could never be reached, resulting in performance degradation. Reported by Alexander Nesterov. - -@changelog_1055_li -#Issue 552: Implement BIT_AND and BIT_OR aggregate functions. - -@changelog_1056_h2 -#Version 1.4.184 Beta (2014-12-19) - -@changelog_1057_li -#In version 1.3.183, indexes were not used if the table contains columns with a default value generated by a sequence. This includes tables with identity and auto-increment columns. This bug was introduced by supporting "rownum" in views and derived tables. - -@changelog_1058_li -#MVStore: imported BLOB and CLOB data sometimes disappeared. This was caused by a bug in the ObjectDataType comparison. - -@changelog_1059_li -#Reading from a StreamStore now throws an IOException if the underlying data doesn't exist. - -@changelog_1060_li -#MVStore: if there is an exception while saving, the store is now in all cases immediately closed. - -@changelog_1061_li -#MVStore: the dump tool could go into an endless loop for some files. - -@changelog_1062_li -#MVStore: recovery for a database with many CLOB or BLOB entries is now much faster. - -@changelog_1063_li -#Group by with a quoted select column name alias didn't work. Example: select 1 "a" from dual group by "a" - -@changelog_1064_li -#Auto-server mode: the host name is now stored in the .lock.db file. - -@changelog_1065_h2 -#Version 1.4.183 Beta (2014-12-13) - -@changelog_1066_li -#MVStore: the default auto-commit buffer size is now about twice as big. This should reduce the database file size after inserting a lot of data. - -@changelog_1067_li -#The built-in functions "power" and "radians" now always return a double. - -@changelog_1068_li -#Using "row_number" or "rownum" in views or derived tables had unexpected results if the outer query contained constraints for the given view. Example: select b.nr, b.id from (select row_number() over() as nr, a.id as id from (select id from test order by name) as a) as b where b.id = 1 - -@changelog_1069_li -#MVStore: the Recover tool can now deal with more types of corruption in the file. - -@changelog_1070_li -#MVStore: the TransactionStore now first needs to be initialized before it can be used. - -@changelog_1071_li -#Views and derived tables with equality and range conditions on the same columns did not work properly. example: select x from (select x from (select 1 as x) where x > 0 and x < 2) where x = 1 - -@changelog_1072_li -#The database URL setting PAGE_SIZE setting is now also used for the MVStore. - -@changelog_1073_li -#MVStore: the default page split size for persistent stores is now 4096 (it was 16 KB so far). This should reduce the database file size for most situations (in some cases, less than half the size of the previous version). - -@changelog_1074_li -#With query literals disabled, auto-analyze of a table with CLOB or BLOB did not work. - -@changelog_1075_li -#MVStore: use a mark and sweep GC algorithm instead of reference counting, to ensure used chunks are never overwrite, even if the reference counting algorithm does not work properly. - -@changelog_1076_li -#In the multi-threaded mode, updating the column selectivity ("analyze") in the background sometimes did not work. - -@changelog_1077_li -#In the multi-threaded mode, database metadata operations did sometimes not work if the schema was changed at the same time (for example, if tables were dropped). - -@changelog_1078_li -#Some CLOB and BLOB values could no longer be read when the original row was removed (even when using the MVCC mode). - -@changelog_1079_li -#The MVStoreTool could throw an IllegalArgumentException. - -@changelog_1080_li -#Improved performance for some date / time / timestamp conversion operations. Thanks to Sergey Evdokimov for reporting the problem. - -@changelog_1081_li -#H2 Console: the built-in web server did not work properly if an unknown file was requested. - -@changelog_1082_li -#MVStore: the jar file is renamed to "h2-mvstore-*.jar" and is deployed to Maven separately. - -@changelog_1083_li -#MVStore: support for concurrent reads and writes is now enabled by default. - -@changelog_1084_li -#Server mode: the transfer buffer size has been changed from 16 KB to 64 KB, after it was found that this improves performance on Linux quite a lot. - -@changelog_1085_li -#H2 Console and server mode: SSL is now disabled and TLS is used to protect against the Poodle SSLv3 vulnerability. The system property to disable secure anonymous connections is now "h2.enableAnonymousTLS". The default certificate is still self-signed, so you need to manually install another one if you want to avoid man in the middle attacks. - -@changelog_1086_li -#MVStore: the R-tree did not correctly measure the memory usage. - -@changelog_1087_li -#MVStore: compacting a store with an R-tree did not always work. - -@changelog_1088_li -#Issue 581: When running in LOCK_MODE=0, JdbcDatabaseMetaData#supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED) should return false - -@changelog_1089_li -#Fix bug which could generate deadlocks when multiple connections accessed the same table. - -@changelog_1090_li -#Some places in the code were not respecting the value set in the "SET MAX_MEMORY_ROWS x" command - -@changelog_1091_li -#Fix bug which could generate a NegativeArraySizeException when performing large (>40M) row union operations - -@changelog_1092_li -#Fix "USE schema" command for MySQL compatibility, patch by mfulton - -@changelog_1093_li -#Parse and ignore the ROW_FORMAT=DYNAMIC MySQL syntax, patch by mfulton - -@changelog_1094_h2 -#Version 1.4.182 Beta (2014-10-17) - -@changelog_1095_li -#MVStore: improved error messages and logging; improved behavior if there is an error when serializing objects. - -@changelog_1096_li -#OSGi: the MVStore packages are now exported. - -@changelog_1097_li -#With the MVStore option, when using multiple threads that concurrently create indexes or tables, it was relatively easy to get a lock timeout on the "SYS" table. - -@changelog_1098_li -#When using the multi-threaded option, the exception "Unexpected code path" could be thrown, specially if the option "analyze_auto" was set to a low value. - -@changelog_1099_li -#In the server mode, when reading from a CLOB or BLOB, if the connection was closed, a NullPointerException could be thrown instead of an exception saying the connection is closed. - -@changelog_1100_li -#DatabaseMetaData.getProcedures and getProcedureColumns could throw an exception if a user defined class is not available. - -@changelog_1101_li -#Issue 584: the error message for a wrong sequence definition was wrong. - -@changelog_1102_li -#CSV tool: the rowSeparator option is no longer supported, as the same can be achieved with the lineSeparator. - -@changelog_1103_li -#Descending indexes on MVStore tables did not work properly. - -@changelog_1104_li -#Issue 579: Conditions on the "_rowid_" pseudo-column didn't use an index when using the MVStore. - -@changelog_1105_li -#Fixed documentation that "offset" and "fetch" are also keywords since version 1.4.x. - -@changelog_1106_li -#The Long.MIN_VALUE could not be parsed for auto-increment (identity) columns. - -@changelog_1107_li -#Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in other JDBC classes. - -@changelog_1108_li -#Issue 572: MySQL compatibility for "order by" in update statements. - -@changelog_1109_li -#The change in JDBC escape processing in version 1.4.181 affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax "{t 'time}", or "{ts 'timestamp'}", or "{d 'data'}", then both the client and the server need to be upgraded to version 1.4.181 or later. - -@changelog_1110_h2 -#Version 1.4.181 Beta (2014-08-06) - -@changelog_1111_li -#Improved MySQL compatibility by supporting "use schema". Thanks a lot to Karl Pietrzak for the patch! - -@changelog_1112_li -#Writing to the trace file is now faster, specially with the debug level. - -@changelog_1113_li -#The database option "defrag_always=true" did not work with the MVStore. - -@changelog_1114_li -#The JDBC escape syntax {ts 'value'} did not interpret the value as a timestamp. The same for {d 'value'} (for date) and {t 'value'} (for time). Thanks to Lukas Eder for reporting the issue. The following problem was detected after version 1.4.181 was released: The change in JDBC escape processing affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax {t 'time'}, or {ts 'timestamp'}, or {d 'date'}, then both the client and the server need to be upgraded to version 1.4.181 or later. - -@changelog_1115_li -#File system abstraction: support replacing existing files using move (currently not for Windows). - -@changelog_1116_li -#The statement "shutdown defrag" now compresses the database (with the MVStore). This command can greatly reduce the file size, and is relatively fast, but is not incremental. - -@changelog_1117_li -#The MVStore now automatically compacts the store in the background if there is no read or write activity, which should (after some time; sometimes about one minute) reduce the file size. This is still work in progress, feedback is welcome! - -@changelog_1118_li -#Change default value of PAGE_SIZE from 2048 to 4096 to more closely match most file systems block size (PageStore only; the MVStore already used 4096). - -@changelog_1119_li -#Auto-scale MAX_MEMORY_ROWS and CACHE_SIZE settings by the amount of available RAM. Gives a better out of box experience for people with more powerful machines. - -@changelog_1120_li -#Handle tabs like 4 spaces in web console, patch by Martin Grajcar. - -@changelog_1121_li -#Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in JdbcConnection.java, patch by BigMichi1. - -@changelog_1122_h2 -#Version 1.4.180 Beta (2014-07-13) - -@changelog_1123_li -#MVStore: the store is now auto-compacted automatically up to some point, to avoid very large file sizes. This area is still work in progress. - -@changelog_1124_li -#Sequences of temporary tables (auto-increment or identity columns) were persisted unnecessarily in the database file, and were not removed when re-opening the database. - -@changelog_1125_li -#MVStore: an IndexOutOfBoundsException could sometimes occur MVMap.openVersion when concurrently accessing the store. - -@changelog_1126_li -#The LIRS cache now re-sizes the internal hash map if needed. - -@changelog_1127_li -#Optionally persist session history in the H2 console. (patch from Martin Grajcar) - -@changelog_1128_li -#Add client-info property to get the number of servers currently in the cluster and which servers that are available. (patch from Nikolaj Fogh) - -@changelog_1129_li -#Fix bug in changing encrypted DB password that kept the file handle open when the wrong password was supplied. (test case from Jens Hohmuth). - -@changelog_1130_li -#Issue 567: H2 hangs for a long time then (sometimes) recovers. Introduce a queue when doing table locking to prevent session starvation. - -@changelog_1131_h2 -#Version 1.4.179 Beta (2014-06-23) - -@changelog_1132_li -#The license was changed to MPL 2.0 (from 1.0) and EPL 1.0. - -@changelog_1133_li -#Issue 565: MVStore: concurrently adding LOB objects (with MULTI_THREADED option) resulted in a NullPointerException. - -@changelog_1134_li -#MVStore: reduced dependencies to other H2 classes. - -@changelog_1135_li -#There was a way to prevent a database from being re-opened, by creating a column constraint that references a table with a higher id, for example with "check" constraints that contains queries. This is now detected, and creating the table is prohibited. In future versions of H2, most likely creating references to other tables will no longer be supported because of such problems. - -@changelog_1136_li -#MVStore: descending indexes with "nulls first" did not work as expected (null was ordered last). - -@changelog_1137_li -#Large result sets now always create temporary tables instead of temporary files. - -@changelog_1138_li -#When using the PageStore, opening a database failed in some cases with a NullPointerException if temporary tables were used (explicitly, or implicitly when using large result sets). - -@changelog_1139_li -#If a database file in the PageStore file format exists, this file and this mode is now used, even if the database URL does not contain "MV_STORE=FALSE". If a MVStore file exists, it is used. - -@changelog_1140_li -#Databases created with version 1.3.175 and earlier that contained foreign keys in combination with multi-column indexes could not be opened in some cases. This was due to a bugfix in version 1.3.176: Referential integrity constraints sometimes used the wrong index. - -@changelog_1141_li -#MVStore: the ObjectDataType comparison method was incorrect if one key was Serializable and the other was of a common class. - -@changelog_1142_li -#Recursive queries with many result rows (more than the setting "max_memory_rows") did not work correctly. - -@changelog_1143_li -#The license has changed to MPL 2.0 + EPL 1.0. - -@changelog_1144_li -#MVStore: temporary tables from result sets could survive re-opening a database, which could result in a ClassCastException. - -@changelog_1145_li -#Issue 566: MVStore: unique indexes that were created later on did not work correctly if there were over 5000 rows in the table. Existing databases need to be re-created (at least the broken index need to be re-built). - -@changelog_1146_li -#MVStore: creating secondary indexes on large tables results in missing rows in the index. - -@changelog_1147_li -#Metadata: the password of linked tables is now only visible for admin users. - -@changelog_1148_li -#For Windows, database URLs of the form "jdbc:h2:/test" where considered relative and did not work unless the system property "h2.implicitRelativePath" was used. - -@changelog_1149_li -#Windows: using a base directory of "C:/" and similar did not work as expected. - -@changelog_1150_li -#Follow JDBC specification on Procedures MetaData, use P0 as return type of procedure. - -@changelog_1151_li -#Issue 531: IDENTITY ignored for added column. - -@changelog_1152_li -#FileSystem: improve exception throwing compatibility with JDK - -@changelog_1153_li -#Spatial Index: adjust costs so we do not use the spatial index if the query does not contain an intersects operator. - -@changelog_1154_li -#Fix multi-threaded deadlock when using a View that includes a TableFunction. - -@changelog_1155_li -#Fix bug in dividing very-small BigDecimal numbers. - -@changelog_1156_h2 -#Version 1.4.178 Beta (2014-05-02) - -@changelog_1157_li -#Issue 559: Make dependency on org.osgi.service.jdbc optional. - -@changelog_1158_li -#Improve error message when the user specifies an unsupported combination of database settings. - -@changelog_1159_li -#MVStore: in the multi-threaded mode, NullPointerException and other exceptions could occur. - -@changelog_1160_li -#MVStore: some database file could not be compacted due to a bug in the bookkeeping of the fill rate. Also, database file were compacted quite slowly. This has been improved; but more changes in this area are expected. - -@changelog_1161_li -#MVStore: support for volatile maps (that don't store changes). - -@changelog_1162_li -#MVStore mode: in-memory databases now also use the MVStore. - -@changelog_1163_li -#In server mode, appending ";autocommit=false" to the database URL was working, but the return value of Connection.getAutoCommit() was wrong. - -@changelog_1164_li -#Issue 561: OSGi: the import package declaration of org.h2 excluded version 1.4. - -@changelog_1165_li -#Issue 558: with the MVStore, a NullPointerException could occur when using LOBs at session commit (LobStorageMap.removeLob). - -@changelog_1166_li -#Remove the "h2.MAX_MEMORY_ROWS_DISTINCT" system property to reduce confusion. We already have the MAX_MEMORY_ROWS setting which does a very similar thing, and is better documented. - -@changelog_1167_li -#Issue 554: Web Console in an IFrame was not fully supported. - -@changelog_1168_h2 -#Version 1.4.177 Beta (2014-04-12) - -@changelog_1169_li -#By default, the MV_STORE option is enabled, so it is using the new MVStore storage. The MVCC setting is by default set to the same values as the MV_STORE setting, so it is also enabled by default. For testing, both settings can be disabled by appending ";MV_STORE=FALSE" and/or ";MVCC=FALSE" to the database URL. - -@changelog_1170_li -#The file locking method 'serialized' is no longer supported. This mode might return in a future version, however this is not clear right now. A new implementation and new tests would be needed. - -@changelog_1171_li -#Enable the new storage format for dates (system property "h2.storeLocalTime"). For the MVStore mode, this is always enabled, but with version 1.4 this is even enabled in the PageStore mode. - -@changelog_1172_li -#Implicit relative paths are disabled (system property "h2.implicitRelativePath"), so that the database URL jdbc:h2:test now needs to be written as jdbc:h2:./test. - -@changelog_1173_li -#"select ... fetch first 1 row only" is supported with the regular mode. This was disabled so far because "fetch" and "offset" are now keywords. See also Mode.supportOffsetFetch. - -@changelog_1174_li -#Byte arrays are now sorted in unsigned mode (x'99' is larger than x'09'). (System property "h2.sortBinaryUnsigned", Mode.binaryUnsigned, setting "binary_collation"). - -@changelog_1175_li -#Csv.getInstance will be removed in future versions of 1.4. Use the public constructor instead. - -@changelog_1176_li -#Remove support for the limited old-style outer join syntax using "(+)". Use "outer join" instead. System property "h2.oldStyleOuterJoin". - -@changelog_1177_li -#Support the data type "DATETIME2" as an alias for "DATETIME", for MS SQL Server compatibility. - -@changelog_1178_li -#Add Oracle-compatible TRANSLATE function, patch by Eric Chatellier. - -@changelog_1179_h2 -#Version 1.3.176 (2014-04-05) - -@changelog_1180_li -#The file locking method 'serialized' is no longer documented, as it will not be available in version 1.4. - -@changelog_1181_li -#The static method Csv.getInstance() was removed. Use the public constructor instead. - -@changelog_1182_li -#The default user name for the Script, RunScript, Shell, and CreateCluster tools are no longer "sa" but an empty string. - -@changelog_1183_li -#The stack trace of the exception "The object is already closed" is no longer logged by default. - -@changelog_1184_li -#If a value of a result set was itself a result set, the result could only be read once. - -@changelog_1185_li -#Column constraints are also visible in views (patch from Nicolas Fortin for H2GIS). - -@changelog_1186_li -#Granting a additional right to a role that already had a right for that table was not working. - -@changelog_1187_li -#Spatial index: a few bugs have been fixed (using spatial constraints in views, transferring geometry objects over TCP/IP, the returned geometry object is copied when needed). - -@changelog_1188_li -#Issue 551: the datatype documentation was incorrect (found by Bernd Eckenfels). - -@changelog_1189_li -#Issue 368: ON DUPLICATE KEY UPDATE did not work for multi-row inserts. Test case from Angus Macdonald. - -@changelog_1190_li -#OSGi: the package javax.tools is now imported (as an optional). - -@changelog_1191_li -#H2 Console: auto-complete is now disabled by default, but there is a hot-key (Ctrl+Space). - -@changelog_1192_li -#H2 Console: auto-complete did not work with multi-line statements. - -@changelog_1193_li -#CLOB and BLOB data was not immediately removed after a rollback. - -@changelog_1194_li -#There is a new Aggregate API that supports the internal H2 data types (GEOMETRY for example). Thanks a lot to Nicolas Fortin for the patch! - -@changelog_1195_li -#Referential integrity constraints sometimes used the wrong index, such that updating a row in the referenced table incorrectly failed with a constraint violation. - -@changelog_1196_li -#The Polish translation was completed and corrected by Wojtek Jurczyk. Thanks a lot! - -@changelog_1197_li -#Issue 545: Unnecessary duplicate code was removed. - -@changelog_1198_li -#The profiler tool can now process files with full thread dumps. - -@changelog_1199_li -#MVStore: the file format was changed slightly. - -@changelog_1200_li -#MVStore mode: the CLOB and BLOB storage was re-implemented and is now much faster than with the PageStore (which is still the default storage). - -@changelog_1201_li -#MVStore mode: creating indexes is now much faster (in many cases faster than with the default PageStore). - -@changelog_1202_li -#Various bugs in the MVStore storage and have been fixed, including a bug in the R-tree implementation. The database could get corrupt if there were transient IO exceptions while storing. - -@changelog_1203_li -#The method org.h2.expression.Function.getCost could throw a NullPointException. - -@changelog_1204_li -#Storing LOBs in separate files (outside of the main database file) is no longer supported for new databases. - -@changelog_1205_li -#Lucene 2 is no longer supported. - -@changelog_1206_li -#Fix bug in calculating default MIN and MAX values for SEQUENCE. - -@changelog_1207_li -#Fix bug in performing IN queries with multiple values when IGNORECASE=TRUE - -@changelog_1208_li -#Add entry-point to org.h2.tools.Shell so it can be called from inside an application. patch by Thomas Gillet. - -@changelog_1209_li -#Fix bug that prevented the PgServer from being stopped and started multiple times. - -@changelog_1210_li -#Support some more DDL syntax for MySQL, patch from Peter Jentsch. - -@changelog_1211_li -#Issue 548: TO_CHAR does not format MM and DD correctly when the month or day of the month is 1 digit, patch from "the.tucc" - -@changelog_1212_li -#Fix bug in varargs support in ALIAS's, patch from Nicolas Fortin - -@cheatSheet_1000_h1 -#H2 Database Engine Cheat Sheet - -@cheatSheet_1001_h2 -#Using H2 - -@cheatSheet_1002_a -H2 - -@cheatSheet_1003_li -# is open source, free to use and distribute. - -@cheatSheet_1004_a -ダウンロード - -@cheatSheet_1005_li -#: jar, installer (Windows), zip. - -@cheatSheet_1006_li -#To start the H2 Console tool, double click the jar file, or run java -jar h2*.jar, h2.bat, or h2.sh. - -@cheatSheet_1007_a -#A new database is automatically created - -@cheatSheet_1008_a -#by default - -@cheatSheet_1009_li -#. - -@cheatSheet_1010_a -#Closing the last connection closes the database - -@cheatSheet_1011_li -#. - -@cheatSheet_1012_h2 -ドキュメント - -@cheatSheet_1013_p -# Reference: SQL grammar, functions, data types, tools, API - -@cheatSheet_1014_a -特徴 - -@cheatSheet_1015_p -#: fulltext search, encryption, read-only (zip/jar), CSV, auto-reconnect, triggers, user functions - -@cheatSheet_1016_a -#Database URLs - -@cheatSheet_1017_a -#Embedded - -@cheatSheet_1018_code -jdbc:h2:~/test - -@cheatSheet_1019_p -# 'test' in the user home directory - -@cheatSheet_1020_code -#jdbc:h2:/data/test - -@cheatSheet_1021_p -# 'test' in the directory /data - -@cheatSheet_1022_code -#jdbc:h2:test - -@cheatSheet_1023_p -# in the current(!) working directory - -@cheatSheet_1024_a -#In-Memory - -@cheatSheet_1025_code -#jdbc:h2:mem:test - -@cheatSheet_1026_p -# multiple connections in one process - -@cheatSheet_1027_code -jdbc:h2:mem: - -@cheatSheet_1028_p -# unnamed private; one connection - -@cheatSheet_1029_a -サー�?ーモード - -@cheatSheet_1030_code -#jdbc:h2:tcp://localhost/~/test - -@cheatSheet_1031_p -# user home dir - -@cheatSheet_1032_code -#jdbc:h2:tcp://localhost//data/test - -@cheatSheet_1033_p -# absolute dir - -@cheatSheet_1034_a -#Server start - -@cheatSheet_1035_p -#:java -cp *.jar org.h2.tools.Server - -@cheatSheet_1036_a -#Settings - -@cheatSheet_1037_code -#jdbc:h2:..;MODE=MySQL - -@cheatSheet_1038_a -#compatibility (or HSQLDB,...) - -@cheatSheet_1039_code -#jdbc:h2:..;TRACE_LEVEL_FILE=3 - -@cheatSheet_1040_a -#log to *.trace.db - -@cheatSheet_1041_a -#Using the JDBC API - -@cheatSheet_1042_a -#Connection Pool - -@cheatSheet_1043_a -#Maven 2 - -@cheatSheet_1044_a -#Hibernate - -@cheatSheet_1045_p -# hibernate.cfg.xml (or use the HSQLDialect): - -@cheatSheet_1046_a -#TopLink and Glassfish - -@cheatSheet_1047_p -# Datasource class: org.h2.jdbcx.JdbcDataSource - -@cheatSheet_1048_code -#oracle.toplink.essentials.platform. - -@cheatSheet_1049_code -#database.H2Platform - -@download_1000_h1 -ダウンロード - -@download_1001_h3 -#Version 1.4.187 (2015-04-10), Beta - -@download_1002_a -Windows Installer - -@download_1003_a -Platform-Independent Zip - -@download_1004_h3 -#Version 1.3.176 (2014-04-05), Last Stable - -@download_1005_a -Windows Installer - -@download_1006_a -Platform-Independent Zip - -@download_1007_h3 -#Download Mirror and Older Versions - -@download_1008_a -Platform-Independent Zip - -@download_1009_h3 -#Jar File - -@download_1010_a -#Maven.org - -@download_1011_a -#Sourceforge.net - -@download_1012_a -#Latest Automated Build (not released) - -@download_1013_h3 -#Maven (Binary, Javadoc, and Source) - -@download_1014_a -#Binary - -@download_1015_a -#Javadoc - -@download_1016_a -#Sources - -@download_1017_h3 -#Database Upgrade Helper File - -@download_1018_a -#Upgrade database from 1.1 to the current version - -@download_1019_h3 -サブ�?ージョン�?�ソースリ�?ジトリ - -@download_1020_a -Google Code - -@download_1021_p -# For details about changes, see the Change Log. - -@download_1022_h3 -#News and Project Information - -@download_1023_a -#Atom Feed - -@download_1024_a -#RSS Feed - -@download_1025_a -#DOAP File - -@download_1026_p -# (what is this) - -@faq_1000_h1 -F A Q - -@faq_1001_a -# I Have a Problem or Feature Request - -@faq_1002_a -# Are there Known Bugs? When is the Next Release? - -@faq_1003_a -# Is this Database Engine Open Source? - -@faq_1004_a -# Is Commercial Support Available? - -@faq_1005_a -# How to Create a New Database? - -@faq_1006_a -# How to Connect to a Database? - -@faq_1007_a -# Where are the Database Files Stored? - -@faq_1008_a -# What is the Size Limit (Maximum Size) of a Database? - -@faq_1009_a -# Is it Reliable? - -@faq_1010_a -# Why is Opening my Database Slow? - -@faq_1011_a -# My Query is Slow - -@faq_1012_a -# H2 is Very Slow - -@faq_1013_a -# Column Names are Incorrect? - -@faq_1014_a -# Float is Double? - -@faq_1015_a -# Is the GCJ Version Stable? Faster? - -@faq_1016_a -# How to Translate this Project? - -@faq_1017_a -# How to Contribute to this Project? - -@faq_1018_h3 -#I Have a Problem or Feature Request - -@faq_1019_p -# Please read the support checklist. - -@faq_1020_h3 -#Are there Known Bugs? When is the Next Release? - -@faq_1021_p -# Usually, bugs get fixes as they are found. There is a release every few weeks. Here is the list of known and confirmed issues: - -@faq_1022_li -#When opening a database file in a timezone that has different daylight saving rules: the time part of dates where the daylight saving doesn't match will differ. This is not a problem within regions that use the same rules (such as, within USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new timezone. This problem does not occur when using the system property "h2.storeLocalTime" (however such database files are not compatible with older versions of H2). - -@faq_1023_li -#Apache Harmony: there seems to be a bug in Harmony that affects H2. See HARMONY-6505. - -@faq_1024_li -#Tomcat and Glassfish 3 set most static fields (final or non-final) to null when unloading a web application. This can cause a NullPointerException in H2 versions 1.1.107 and older, and may still not work in newer versions. Please report it if you run into this issue. In Tomcat >= 6.0 this behavior can be disabled by setting the system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false, however Tomcat may then run out of memory. A known workaround is to put the h2*.jar file in a shared lib directory (common/lib). - -@faq_1025_li -#Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3. - -@faq_1026_li -#When using Install4j before 4.1.4 on Linux and enabling pack200, the h2*.jar becomes corrupted by the install process, causing application failure. A workaround is to add an empty file h2*.jar.nopack next to the h2*.jar file. This problem is solved in Install4j 4.1.4. - -@faq_1027_p -# For a complete list, see Open Issues. - -@faq_1028_h3 -�?��?�データベースエンジン�?�オープンソース�?��?��?�? - -@faq_1029_p -# Yes. It is free to use and distribute, and the source code is included. See also under license. - -@faq_1030_h3 -#Is Commercial Support Available? - -@faq_1031_p -# Yes, commercial support is available, see Commercial Support. - -@faq_1032_h3 -新�?データベース�?�構築方法�?�? - -@faq_1033_p -# By default, a new database is automatically created if it does not yet exist. See Creating New Databases. - -@faq_1034_h3 -データベース�?��?�接続方法�?�? - -@faq_1035_p -# The database driver is org.h2.Driver, and the database URL starts with jdbc:h2:. To connect to a database using JDBC, use the following code: - -@faq_1036_h3 -データベース�?�ファイル�?��?��?��?��?存�?�れ�?��?��?�? - -@faq_1037_p -# When using database URLs like jdbc:h2:~/test, the database is stored in the user directory. For Windows, this is usually C:\Documents and Settings\<userName> or C:\Users\<userName>. If the base directory is not set (as in jdbc:h2:test), the database files are stored in the directory where the application is started (the current working directory). When using the H2 Console application from the start menu, this is <Installation Directory>/bin. The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL jdbc:h2:file:data/sample, the database is stored in the directory data (relative to the current working directory). The directory is created automatically if it does not yet exist. It is also possible to use the fully qualified directory name (and for Windows, drive name). Example: jdbc:h2:file:C:/data/test - -@faq_1038_h3 -#What is the Size Limit (Maximum Size) of a Database? - -@faq_1039_p -# See Limits and Limitations. - -@faq_1040_h3 -�?�れ�?�信頼�?��??るデータベース�?��?��?�? - -@faq_1041_p -# That is not easy to say. It is still a quite new product. A lot of tests have been written, and the code coverage of these tests is higher than 80% for each package. Randomized stress tests are run regularly. But there are probably still bugs that have not yet been found (as with most software). Some features are known to be dangerous, they are only supported for situations where performance is more important than reliability. Those dangerous features are: - -@faq_1042_li -#Disabling the transaction log or FileDescriptor.sync() using LOG=0 or LOG=1. - -@faq_1043_li -#Using the transaction isolation level READ_UNCOMMITTED (LOCK_MODE 0) while at the same time using multiple connections. - -@faq_1044_li -#Disabling database file protection using (setting FILE_LOCK to NO in the database URL). - -@faq_1045_li -#Disabling referential integrity using SET REFERENTIAL_INTEGRITY FALSE. - -@faq_1046_p -# In addition to that, running out of memory should be avoided. In older versions, OutOfMemory errors while using the database could corrupt a databases. - -@faq_1047_p -# This database is well tested using automated test cases. The tests run every night and run for more than one hour. But not all areas of this database are equally well tested. When using one of the following features for production, please ensure your use case is well tested (if possible with automated test cases). The areas that are not well tested are: - -@faq_1048_li -#Platforms other than Windows XP, Linux, Mac OS X, or JVMs other than Sun 1.6 or 1.7 - -@faq_1049_li -#The features AUTO_SERVER and AUTO_RECONNECT. - -@faq_1050_li -#Cluster mode, 2-phase commit, savepoints. - -@faq_1051_li -#24/7 operation. - -@faq_1052_li -#Fulltext search. - -@faq_1053_li -#Operations on LOBs over 2 GB. - -@faq_1054_li -#The optimizer may not always select the best plan. - -@faq_1055_li -#Using the ICU4J collator. - -@faq_1056_p -# Areas considered experimental are: - -@faq_1057_li -#The PostgreSQL server - -@faq_1058_li -#Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session). - -@faq_1059_li -#Multi-threading within the engine using SET MULTI_THREADED=1. - -@faq_1060_li -#Compatibility modes for other databases (only some features are implemented). - -@faq_1061_li -#The soft reference cache (CACHE_TYPE=SOFT_LRU). It might not improve performance, and out of memory issues have been reported. - -@faq_1062_p -# Some users have reported that after a power failure, the database cannot be opened sometimes. In this case, use a backup of the database or the Recover tool. Please report such problems. The plan is that the database automatically recovers in all situations. - -@faq_1063_h3 -#Why is Opening my Database Slow? - -@faq_1064_p -# To find out what the problem is, use the H2 Console and click on "Test Connection" instead of "Login". After the "Login Successful" appears, click on it (it's a link). This will list the top stack traces. Then either analyze this yourself, or post those stack traces in the Google Group. - -@faq_1065_p -# Other possible reasons are: the database is very big (many GB), or contains linked tables that are slow to open. - -@faq_1066_h3 -#My Query is Slow - -@faq_1067_p -# Slow SELECT (or DELETE, UPDATE, MERGE) statement can have multiple reasons. Follow this checklist: - -@faq_1068_li -#Run ANALYZE (see documentation for details). - -@faq_1069_li -#Run the query with EXPLAIN and check if indexes are used (see documentation for details). - -@faq_1070_li -#If required, create additional indexes and try again using ANALYZE and EXPLAIN. - -@faq_1071_li -#If it doesn't help please report the problem. - -@faq_1072_h3 -#H2 is Very Slow - -@faq_1073_p -# By default, H2 closes the database when the last connection is closed. If your application closes the only connection after each operation, the database is opened and closed a lot, which is quite slow. There are multiple ways to solve this problem, see Database Performance Tuning. - -@faq_1074_h3 -#Column Names are Incorrect? - -@faq_1075_p -# For the query SELECT ID AS X FROM TEST the method ResultSetMetaData.getColumnName() returns ID, I expect it to return X. What's wrong? - -@faq_1076_p -# This is not a bug. According the the JDBC specification, the method ResultSetMetaData.getColumnName() should return the name of the column and not the alias name. If you need the alias name, use ResultSetMetaData.getColumnLabel(). Some other database don't work like this yet (they don't follow the JDBC specification). If you need compatibility with those databases, use the Compatibility Mode, or append ;ALIAS_COLUMN_NAME=TRUE to the database URL. - -@faq_1077_p -# This also applies to DatabaseMetaData calls that return a result set. The columns in the JDBC API are column labels, not column names. - -@faq_1078_h3 -#Float is Double? - -@faq_1079_p -# For a table defined as CREATE TABLE TEST(X FLOAT) the method ResultSet.getObject() returns a java.lang.Double, I expect it to return a java.lang.Float. What's wrong? - -@faq_1080_p -# This is not a bug. According the the JDBC specification, the JDBC data type FLOAT is equivalent to DOUBLE, and both are mapped to java.lang.Double. See also Mapping SQL and Java Types - 8.3.10 FLOAT. - -@faq_1081_h3 -#Is the GCJ Version Stable? Faster? - -@faq_1082_p -# The GCJ version is not as stable as the Java version. When running the regression test with the GCJ version, sometimes the application just stops at what seems to be a random point without error message. Currently, the GCJ version is also slower than when using the Sun VM. However, the startup of the GCJ version is faster than when using a VM. - -@faq_1083_h3 -�?��?�プロジェクト�?�翻訳方法�?�? - -@faq_1084_p -# For more information, see Build/Translating. - -@faq_1085_h3 -#How to Contribute to this Project? - -@faq_1086_p -# There are various way to help develop an open source project like H2. The first step could be to translate the error messages and the GUI to your native language. Then, you could provide patches. Please start with small patches. That could be adding a test case to improve the code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well. - -@features_1000_h1 -特徴 - -@features_1001_a -# Feature List - -@features_1002_a -# Comparison to Other Database Engines - -@features_1003_a -# H2 in Use - -@features_1004_a -# Connection Modes - -@features_1005_a -# Database URL Overview - -@features_1006_a -# Connecting to an Embedded (Local) Database - -@features_1007_a -# In-Memory Databases - -@features_1008_a -# Database Files Encryption - -@features_1009_a -# Database File Locking - -@features_1010_a -# Opening a Database Only if it Already Exists - -@features_1011_a -# Closing a Database - -@features_1012_a -# Ignore Unknown Settings - -@features_1013_a -# Changing Other Settings when Opening a Connection - -@features_1014_a -# Custom File Access Mode - -@features_1015_a -# Multiple Connections - -@features_1016_a -# Database File Layout - -@features_1017_a -# Logging and Recovery - -@features_1018_a -# Compatibility - -@features_1019_a -# Auto-Reconnect - -@features_1020_a -# Automatic Mixed Mode - -@features_1021_a -# Page Size - -@features_1022_a -# Using the Trace Options - -@features_1023_a -# Using Other Logging APIs - -@features_1024_a -# Read Only Databases - -@features_1025_a -# Read Only Databases in Zip or Jar File - -@features_1026_a -# Computed Columns / Function Based Index - -@features_1027_a -# Multi-Dimensional Indexes - -@features_1028_a -# User-Defined Functions and Stored Procedures - -@features_1029_a -# Pluggable or User-Defined Tables - -@features_1030_a -# Triggers - -@features_1031_a -# Compacting a Database - -@features_1032_a -# Cache Settings - -@features_1033_h2 -特徴一覧 - -@features_1034_h3 -主�?�特徴 - -@features_1035_li -#Very fast database engine - -@features_1036_li -#Open source - -@features_1037_li -#Written in Java - -@features_1038_li -#Supports standard SQL, JDBC API - -@features_1039_li -#Embedded and Server mode, Clustering support - -@features_1040_li -#Strong security features - -@features_1041_li -#The PostgreSQL ODBC driver can be used - -@features_1042_li -#Multi version concurrency - -@features_1043_h3 -追加�?�れ�?�特徴 - -@features_1044_li -#Disk based or in-memory databases and tables, read-only database support, temporary tables - -@features_1045_li -#Transaction support (read committed), 2-phase-commit - -@features_1046_li -#Multiple connections, table level locking - -@features_1047_li -#Cost based optimizer, using a genetic algorithm for complex queries, zero-administration - -@features_1048_li -#Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set - -@features_1049_li -#Encrypted database (AES), SHA-256 password encryption, encryption functions, SSL - -@features_1050_h3 -SQLサ�?ート - -@features_1051_li -#Support for multiple schemas, information schema - -@features_1052_li -#Referential integrity / foreign key constraints with cascade, check constraints - -@features_1053_li -#Inner and outer joins, subqueries, read only views and inline views - -@features_1054_li -#Triggers and Java functions / stored procedures - -@features_1055_li -#Many built-in functions, including XML and lossless data compression - -@features_1056_li -#Wide range of data types including large objects (BLOB/CLOB) and arrays - -@features_1057_li -#Sequence and autoincrement columns, computed columns (can be used for function based indexes) - -@features_1058_code -ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP - -@features_1059_li -#Collation support, including support for the ICU4J library - -@features_1060_li -#Support for users and roles - -@features_1061_li -#Compatibility modes for IBM DB2, Apache Derby, HSQLDB, MS SQL Server, MySQL, Oracle, and PostgreSQL. - -@features_1062_h3 -セキュリティ�?�特徴 - -@features_1063_li -#Includes a solution for the SQL injection problem - -@features_1064_li -#User password authentication uses SHA-256 and salt - -@features_1065_li -#For server mode connections, user passwords are never transmitted in plain text over the network (even when using insecure connections; this only applies to the TCP server and not to the H2 Console however; it also doesn't apply if you set the password in the database URL) - -@features_1066_li -#All database files (including script files that can be used to backup data) can be encrypted using the AES-128 encryption algorithm - -@features_1067_li -#The remote JDBC driver supports TCP/IP connections over TLS - -@features_1068_li -#The built-in web server supports connections over TLS - -@features_1069_li -#Passwords can be sent to the database using char arrays instead of Strings - -@features_1070_h3 -他�?�特徴�?�ツール - -@features_1071_li -#Small footprint (smaller than 1.5 MB), low memory requirements - -@features_1072_li -#Multiple index types (b-tree, tree, hash) - -@features_1073_li -#Support for multi-dimensional indexes - -@features_1074_li -#CSV (comma separated values) file support - -@features_1075_li -#Support for linked tables, and a built-in virtual 'range' table - -@features_1076_li -#Supports the EXPLAIN PLAN statement; sophisticated trace options - -@features_1077_li -#Database closing can be delayed or disabled to improve the performance - -@features_1078_li -#Web-based Console application (translated to many languages) with autocomplete - -@features_1079_li -#The database can generate SQL script files - -@features_1080_li -#Contains a recovery tool that can dump the contents of the database - -@features_1081_li -#Support for variables (for example to calculate running totals) - -@features_1082_li -#Automatic re-compilation of prepared statements - -@features_1083_li -#Uses a small number of database files - -@features_1084_li -#Uses a checksum for each record and log entry for data integrity - -@features_1085_li -#Well tested (high code coverage, randomized stress tests) - -@features_1086_h2 -他�?�データベースエンジン�?�比較�?�る - -@features_1087_p -# This comparison is based on H2 1.3, Apache Derby version 10.8, HSQLDB 2.2, MySQL 5.5, PostgreSQL 9.0. - -@features_1088_th -#Feature - -@features_1089_th -H2 - -@features_1090_th -Derby - -@features_1091_th -HSQLDB - -@features_1092_th -MySQL - -@features_1093_th -PostgreSQL - -@features_1094_td -Pure Java - -@features_1095_td -対応 - -@features_1096_td -対応 - -@features_1097_td -対応 - -@features_1098_td -�?�対応 - -@features_1099_td -�?�対応 - -@features_1100_td -エンベッドモード (Java) - -@features_1101_td -対応 - -@features_1102_td -対応 - -@features_1103_td -対応 - -@features_1104_td -�?�対応 - -@features_1105_td -�?�対応 - -@features_1106_td -#In-Memory Mode - -@features_1107_td -対応 - -@features_1108_td -対応 - -@features_1109_td -対応 - -@features_1110_td -�?�対応 - -@features_1111_td -�?�対応 - -@features_1112_td -#Explain Plan - -@features_1113_td -対応 - -@features_1114_td -#Yes *12 - -@features_1115_td -対応 - -@features_1116_td -対応 - -@features_1117_td -対応 - -@features_1118_td -#Built-in Clustering / Replication - -@features_1119_td -対応 - -@features_1120_td -対応 - -@features_1121_td -�?�対応 - -@features_1122_td -対応 - -@features_1123_td -対応 - -@features_1124_td -暗�?�化データベース - -@features_1125_td -対応 - -@features_1126_td -#Yes *10 - -@features_1127_td -#Yes *10 - -@features_1128_td -�?�対応 - -@features_1129_td -�?�対応 - -@features_1130_td -リンクテーブル - -@features_1131_td -対応 - -@features_1132_td -�?�対応 - -@features_1133_td -#Partially *1 - -@features_1134_td -#Partially *2 - -@features_1135_td -�?�対応 - -@features_1136_td -ODBCドライ�? - -@features_1137_td -対応 - -@features_1138_td -�?�対応 - -@features_1139_td -�?�対応 - -@features_1140_td -対応 - -@features_1141_td -対応 - -@features_1142_td -フルテキストサー�? - -@features_1143_td -対応 - -@features_1144_td -対応 - -@features_1145_td -�?�対応 - -@features_1146_td -対応 - -@features_1147_td -対応 - -@features_1148_td -#Domains (User-Defined Types) - -@features_1149_td -対応 - -@features_1150_td -�?�対応 - -@features_1151_td -対応 - -@features_1152_td -対応 - -@features_1153_td -対応 - -@features_1154_td -データベース�?��?��?�ファイル - -@features_1155_td -少 - -@features_1156_td -多 - -@features_1157_td -少 - -@features_1158_td -多 - -@features_1159_td -多 - -@features_1160_td -#Row Level Locking - -@features_1161_td -#Yes *9 - -@features_1162_td -対応 - -@features_1163_td -#Yes *9 - -@features_1164_td -対応 - -@features_1165_td -対応 - -@features_1166_td -#Multi Version Concurrency - -@features_1167_td -対応 - -@features_1168_td -�?�対応 - -@features_1169_td -対応 - -@features_1170_td -対応 - -@features_1171_td -対応 - -@features_1172_td -#Multi-Threaded Statement Processing - -@features_1173_td -#No *11 - -@features_1174_td -対応 - -@features_1175_td -対応 - -@features_1176_td -対応 - -@features_1177_td -対応 - -@features_1178_td -#Role Based Security - -@features_1179_td -対応 - -@features_1180_td -#Yes *3 - -@features_1181_td -対応 - -@features_1182_td -対応 - -@features_1183_td -対応 - -@features_1184_td -#Updatable Result Sets - -@features_1185_td -対応 - -@features_1186_td -#Yes *7 - -@features_1187_td -対応 - -@features_1188_td -対応 - -@features_1189_td -対応 - -@features_1190_td -#Sequences - -@features_1191_td -対応 - -@features_1192_td -対応 - -@features_1193_td -対応 - -@features_1194_td -�?�対応 - -@features_1195_td -対応 - -@features_1196_td -#Limit and Offset - -@features_1197_td -対応 - -@features_1198_td -#Yes *13 - -@features_1199_td -対応 - -@features_1200_td -対応 - -@features_1201_td -対応 - -@features_1202_td -#Window Functions - -@features_1203_td -#No *15 - -@features_1204_td -#No *15 - -@features_1205_td -�?�対応 - -@features_1206_td -�?�対応 - -@features_1207_td -対応 - -@features_1208_td -#Temporary Tables - -@features_1209_td -対応 - -@features_1210_td -#Yes *4 - -@features_1211_td -対応 - -@features_1212_td -対応 - -@features_1213_td -対応 - -@features_1214_td -#Information Schema - -@features_1215_td -対応 - -@features_1216_td -#No *8 - -@features_1217_td -対応 - -@features_1218_td -対応 - -@features_1219_td -対応 - -@features_1220_td -#Computed Columns - -@features_1221_td -対応 - -@features_1222_td -対応 - -@features_1223_td -対応 - -@features_1224_td -�?�対応 - -@features_1225_td -#Yes *6 - -@features_1226_td -#Case Insensitive Columns - -@features_1227_td -対応 - -@features_1228_td -#Yes *14 - -@features_1229_td -対応 - -@features_1230_td -対応 - -@features_1231_td -#Yes *6 - -@features_1232_td -#Custom Aggregate Functions - -@features_1233_td -対応 - -@features_1234_td -�?�対応 - -@features_1235_td -対応 - -@features_1236_td -対応 - -@features_1237_td -対応 - -@features_1238_td -#CLOB/BLOB Compression - -@features_1239_td -対応 - -@features_1240_td -�?�対応 - -@features_1241_td -�?�対応 - -@features_1242_td -�?�対応 - -@features_1243_td -対応 - -@features_1244_td -フットプリント (jar/dll size) - -@features_1245_td -#~1.5 MB *5 - -@features_1246_td -#~3 MB - -@features_1247_td -#~1.5 MB - -@features_1248_td -#~4 MB - -@features_1249_td -#~6 MB - -@features_1250_p -# *1 HSQLDB supports text tables. - -@features_1251_p -# *2 MySQL supports linked MySQL tables under the name 'federated tables'. - -@features_1252_p -# *3 Derby support for roles based security and password checking as an option. - -@features_1253_p -# *4 Derby only supports global temporary tables. - -@features_1254_p -# *5 The default H2 jar file contains debug information, jar files for other databases do not. - -@features_1255_p -# *6 PostgreSQL supports functional indexes. - -@features_1256_p -# *7 Derby only supports updatable result sets if the query is not sorted. - -@features_1257_p -# *8 Derby doesn't support standard compliant information schema tables. - -@features_1258_p -# *9 When using MVCC (multi version concurrency). - -@features_1259_p -# *10 Derby and HSQLDB don't hide data patterns well. - -@features_1260_p -# *11 The MULTI_THREADED option is not enabled by default, and not yet supported when using MVCC. - -@features_1261_p -# *12 Derby doesn't support the EXPLAIN statement, but it supports runtime statistics and retrieving statement execution plans. - -@features_1262_p -# *13 Derby doesn't support the syntax LIMIT .. [OFFSET ..], however it supports FETCH FIRST .. ROW[S] ONLY. - -@features_1263_p -# *14 Using collations. *15 Derby and H2 support ROW_NUMBER() OVER(). - -@features_1264_h3 -DaffodilDb�?�One$Db - -@features_1265_p -# It looks like the development of this database has stopped. The last release was February 2006. - -@features_1266_h3 -McKoi - -@features_1267_p -# It looks like the development of this database has stopped. The last release was August 2004. - -@features_1268_h2 -#H2 in Use - -@features_1269_p -# For a list of applications that work with or use H2, see: Links. - -@features_1270_h2 -接続モード - -@features_1271_p -# The following connection modes are supported: - -@features_1272_li -#Embedded mode (local connections using JDBC) - -@features_1273_li -#Server mode (remote connections using JDBC or ODBC over TCP/IP) - -@features_1274_li -#Mixed mode (local and remote connections at the same time) - -@features_1275_h3 -エンベッドモード - -@features_1276_p -# In embedded mode, an application opens a database from within the same JVM using JDBC. This is the fastest and easiest connection mode. The disadvantage is that a database may only be open in one virtual machine (and class loader) at any time. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently, or on the number of open connections. - -@features_1277_h3 -サー�?ーモード - -@features_1278_p -# When using the server mode (sometimes called remote mode or client/server mode), an application opens a database remotely using the JDBC or ODBC API. A server needs to be started within the same or another virtual machine, or on another computer. Many applications can connect to the same database at the same time, by connecting to this server. Internally, the server process opens the database(s) in embedded mode. - -@features_1279_p -# The server mode is slower than the embedded mode, because all data is transferred over TCP/IP. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently per server, or on the number of open connections. - -@features_1280_h3 -#Mixed Mode - -@features_1281_p -# The mixed mode is a combination of the embedded and the server mode. The first application that connects to a database does that in embedded mode, but also starts a server so that other applications (running in different processes or virtual machines) can concurrently access the same data. The local connections are as fast as if the database is used in just the embedded mode, while the remote connections are a bit slower. - -@features_1282_p -# The server can be started and stopped from within the application (using the server API), or automatically (automatic mixed mode). When using the automatic mixed mode, all clients that want to connect to the database (no matter if it's an local or remote connection) can do so using the exact same database URL. - -@features_1283_h2 -データベースURL概�? - -@features_1284_p -# This database supports multiple connection modes and connection settings. This is achieved using different database URLs. Settings in the URLs are not case sensitive. - -@features_1285_th -トピック - -@features_1286_th -URLフォーマット�?�例 - -@features_1287_a -エンベッド (ローカル) 接続 - -@features_1288_td -# jdbc:h2:[file:][<path>]<databaseName> - -@features_1289_td -# jdbc:h2:~/test - -@features_1290_td -# jdbc:h2:file:/data/sample - -@features_1291_td -# jdbc:h2:file:C:/data/sample (Windows only) - -@features_1292_a -#In-memory (private) - -@features_1293_td -jdbc:h2:mem: - -@features_1294_a -#In-memory (named) - -@features_1295_td -# jdbc:h2:mem:<databaseName> - -@features_1296_td -# jdbc:h2:mem:test_mem - -@features_1297_a -#Server mode (remote connections) - -@features_1298_a -# using TCP/IP - -@features_1299_td -# jdbc:h2:tcp://<server>[:<port>]/[<path>]<databaseName> - -@features_1300_td -# jdbc:h2:tcp://localhost/~/test - -@features_1301_td -# jdbc:h2:tcp://dbserv:8084/~/sample - -@features_1302_td -# jdbc:h2:tcp://localhost/mem:test - -@features_1303_a -#Server mode (remote connections) - -@features_1304_a -# using TLS - -@features_1305_td -# jdbc:h2:ssl://<server>[:<port>]/<databaseName> - -@features_1306_td -# jdbc:h2:ssl://localhost:8085/~/sample; - -@features_1307_a -#Using encrypted files - -@features_1308_td -# jdbc:h2:<url>;CIPHER=AES - -@features_1309_td -# jdbc:h2:ssl://localhost/~/test;CIPHER=AES - -@features_1310_td -# jdbc:h2:file:~/secure;CIPHER=AES - -@features_1311_a -#File locking methods - -@features_1312_td -# jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|NO} - -@features_1313_td -# jdbc:h2:file:~/private;CIPHER=AES;FILE_LOCK=SOCKET - -@features_1314_a -#Only open if it already exists - -@features_1315_td -# jdbc:h2:<url>;IFEXISTS=TRUE - -@features_1316_td -# jdbc:h2:file:~/sample;IFEXISTS=TRUE - -@features_1317_a -#Don't close the database when the VM exits - -@features_1318_td -# jdbc:h2:<url>;DB_CLOSE_ON_EXIT=FALSE - -@features_1319_a -#Execute SQL on connection - -@features_1320_td -# jdbc:h2:<url>;INIT=RUNSCRIPT FROM '~/create.sql' - -@features_1321_td -# jdbc:h2:file:~/sample;INIT=RUNSCRIPT FROM '~/create.sql'\;RUNSCRIPT FROM '~/populate.sql' - -@features_1322_a -#User name and/or password - -@features_1323_td -# jdbc:h2:<url>[;USER=<username>][;PASSWORD=<value>] - -@features_1324_td -# jdbc:h2:file:~/sample;USER=sa;PASSWORD=123 - -@features_1325_a -#Debug trace settings - -@features_1326_td -# jdbc:h2:<url>;TRACE_LEVEL_FILE=<level 0..3> - -@features_1327_td -# jdbc:h2:file:~/sample;TRACE_LEVEL_FILE=3 - -@features_1328_a -#Ignore unknown settings - -@features_1329_td -# jdbc:h2:<url>;IGNORE_UNKNOWN_SETTINGS=TRUE - -@features_1330_a -#Custom file access mode - -@features_1331_td -# jdbc:h2:<url>;ACCESS_MODE_DATA=rws - -@features_1332_a -#Database in a zip file - -@features_1333_td -# jdbc:h2:zip:<zipFileName>!/<databaseName> - -@features_1334_td -# jdbc:h2:zip:~/db.zip!/test - -@features_1335_a -#Compatibility mode - -@features_1336_td -# jdbc:h2:<url>;MODE=<databaseType> - -@features_1337_td -# jdbc:h2:~/test;MODE=MYSQL - -@features_1338_a -#Auto-reconnect - -@features_1339_td -# jdbc:h2:<url>;AUTO_RECONNECT=TRUE - -@features_1340_td -# jdbc:h2:tcp://localhost/~/test;AUTO_RECONNECT=TRUE - -@features_1341_a -#Automatic mixed mode - -@features_1342_td -# jdbc:h2:<url>;AUTO_SERVER=TRUE - -@features_1343_td -# jdbc:h2:~/test;AUTO_SERVER=TRUE - -@features_1344_a -#Page size - -@features_1345_td -# jdbc:h2:<url>;PAGE_SIZE=512 - -@features_1346_a -#Changing other settings - -@features_1347_td -# jdbc:h2:<url>;<setting>=<value>[;<setting>=<value>...] - -@features_1348_td -# jdbc:h2:file:~/sample;TRACE_LEVEL_SYSTEM_OUT=3 - -@features_1349_h2 -エンベッド (ローカル) データベース�?�接続 - -@features_1350_p -# The database URL for connecting to a local database is jdbc:h2:[file:][<path>]<databaseName>. The prefix file: is optional. If no or only a relative path is used, then the current working directory is used as a starting point. The case sensitivity of the path and database name depend on the operating system, however it is recommended to use lowercase letters only. The database name must be at least three characters long (a limitation of File.createTempFile). The database name must not contain a semicolon. To point to the user home directory, use ~/, as in: jdbc:h2:~/test. - -@features_1351_h2 -#In-Memory Databases - -@features_1352_p -# For certain use cases (for example: rapid prototyping, testing, high performance operations, read-only databases), it may not be required to persist data, or persist changes to the data. This database supports the in-memory mode, where the data is not persisted. - -@features_1353_p -# In some cases, only one connection to a in-memory database is required. This means the database to be opened is private. In this case, the database URL is jdbc:h2:mem: Opening two connections within the same virtual machine means opening two different (private) databases. - -@features_1354_p -# Sometimes multiple connections to the same in-memory database are required. In this case, the database URL must include a name. Example: jdbc:h2:mem:db1. Accessing the same database using this URL only works within the same virtual machine and class loader environment. - -@features_1355_p -# To access an in-memory database from another process or from another computer, you need to start a TCP server in the same process as the in-memory database was created. The other processes then need to access the database over TCP/IP or TLS, using a database URL such as: jdbc:h2:tcp://localhost/mem:db1. - -@features_1356_p -# By default, closing the last connection to a database closes the database. For an in-memory database, this means the content is lost. To keep the database open, add ;DB_CLOSE_DELAY=-1 to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc:h2:mem:test;DB_CLOSE_DELAY=-1. - -@features_1357_h2 -#Database Files Encryption - -@features_1358_p -# The database files can be encrypted. Two encryption algorithm AES is supported. To use file encryption, you need to specify the encryption algorithm (the 'cipher') and the file password (in addition to the user password) when connecting to the database. - -@features_1359_h3 -#Creating a New Database with File Encryption - -@features_1360_p -# By default, a new database is automatically created if it does not exist yet. To create an encrypted database, connect to it as it would already exist. - -@features_1361_h3 -#Connecting to an Encrypted Database - -@features_1362_p -# The encryption algorithm is set in the database URL, and the file password is specified in the password field, before the user password. A single space separates the file password and the user password; the file password itself may not contain spaces. File passwords and user passwords are case sensitive. Here is an example to connect to a password-encrypted database: - -@features_1363_h3 -#Encrypting or Decrypting a Database - -@features_1364_p -# To encrypt an existing database, use the ChangeFileEncryption tool. This tool can also decrypt an encrypted database, or change the file encryption key. The tool is available from within the H2 Console in the tools section, or you can run it from the command line. The following command line will encrypt the database test in the user home directory with the file password filepwd and the encryption algorithm AES: - -@features_1365_h2 -データベースファイルロック - -@features_1366_p -# Whenever a database is opened, a lock file is created to signal other processes that the database is in use. If database is closed, or if the process that opened the database terminates, this lock file is deleted. - -@features_1367_p -# The following file locking methods are implemented: - -@features_1368_li -#The default method is FILE and uses a watchdog thread to protect the database file. The watchdog reads the lock file each second. - -@features_1369_li -#The second method is SOCKET and opens a server socket. The socket method does not require reading the lock file every second. The socket method should only be used if the database files are only accessed by one (and always the same) computer. - -@features_1370_li -#The third method is FS. This will use native file locking using FileChannel.lock. - -@features_1371_li -#It is also possible to open the database without file locking; in this case it is up to the application to protect the database files. Failing to do so will result in a corrupted database. Using the method NO forces the database to not create a lock file at all. Please note that this is unsafe as another process is able to open the same database, possibly leading to data corruption. - -@features_1372_p -# To open the database with a different file locking method, use the parameter FILE_LOCK. The following code opens the database with the 'socket' locking method: - -@features_1373_p -# For more information about the algorithms, see Advanced / File Locking Protocols. - -@features_1374_h2 -�?��?��?�存在�?�る場�?��?��?��?データベースを開�?? - -@features_1375_p -# By default, when an application calls DriverManager.getConnection(url, ...) and the database specified in the URL does not yet exist, a new (empty) database is created. In some situations, it is better to restrict creating new databases, and only allow to open existing databases. To do this, add ;IFEXISTS=TRUE to the database URL. In this case, if the database does not already exist, an exception is thrown when trying to connect. The connection only succeeds when the database already exists. The complete URL may look like this: - -@features_1376_h2 -#Closing a Database - -@features_1377_h3 -データベース�?��?�延終了 - -@features_1378_p -# Usually, a database is closed when the last connection to it is closed. In some situations this slows down the application, for example when it is not possible to keep at least one connection open. The automatic closing of a database can be delayed or disabled with the SQL statement SET DB_CLOSE_DELAY <seconds>. The parameter <seconds> specifies the number of seconds to keep a database open after the last connection to it was closed. The following statement will keep a database open for 10 seconds after the last connection was closed: - -@features_1379_p -# The value -1 means the database is not closed automatically. The value 0 is the default and means the database is closed when the last connection is closed. This setting is persistent and can be set by an administrator only. It is possible to set the value in the database URL: jdbc:h2:~/test;DB_CLOSE_DELAY=10. - -@features_1380_h3 -#Don't Close a Database when the VM Exits - -@features_1381_p -# By default, a database is closed when the last connection is closed. However, if it is never closed, the database is closed when the virtual machine exits normally, using a shutdown hook. In some situations, the database should not be closed in this case, for example because the database is still used at virtual machine shutdown (to store the shutdown process in the database for example). For those cases, the automatic closing of the database can be disabled in the database URL. The first connection (the one that is opening the database) needs to set the option in the database URL (it is not possible to change the setting afterwards). The database URL to disable database closing on exit is: - -@features_1382_h2 -#Execute SQL on Connection - -@features_1383_p -# Sometimes, particularly for in-memory databases, it is useful to be able to execute DDL or DML commands automatically when a client connects to a database. This functionality is enabled via the INIT property. Note that multiple commands may be passed to INIT, but the semicolon delimiter must be escaped, as in the example below. - -@features_1384_p -# Please note the double backslash is only required in a Java or properties file. In a GUI, or in an XML file, only one backslash is required: - -@features_1385_p -# Backslashes within the init script (for example within a runscript statement, to specify the folder names in Windows) need to be escaped as well (using a second backslash). It might be simpler to avoid backslashes in folder names for this reason; use forward slashes instead. - -@features_1386_h2 -未知�?�設定を無視 - -@features_1387_p -# Some applications (for example OpenOffice.org Base) pass some additional parameters when connecting to the database. Why those parameters are passed is unknown. The parameters PREFERDOSLIKELINEENDS and IGNOREDRIVERPRIVILEGES are such examples; they are simply ignored to improve the compatibility with OpenOffice.org. If an application passes other parameters when connecting to the database, usually the database throws an exception saying the parameter is not supported. It is possible to ignored such parameters by adding ;IGNORE_UNKNOWN_SETTINGS=TRUE to the database URL. - -@features_1388_h2 -接続�?�開始�?�れ�?�時�?�他�?�設定を変更�?�る - -@features_1389_p -# In addition to the settings already described, other database settings can be passed in the database URL. Adding ;setting=value at the end of a database URL is the same as executing the statement SET setting value just after connecting. For a list of supported settings, see SQL Grammar or the DbSettings javadoc. - -@features_1390_h2 -カスタムファイル アクセスモード - -@features_1391_p -# Usually, the database opens the database file with the access mode rw, meaning read-write (except for read only databases, where the mode r is used). To open a database in read-only mode if the database file is not read-only, use ACCESS_MODE_DATA=r. Also supported are rws and rwd. This setting must be specified in the database URL: - -@features_1392_p -# For more information see Durability Problems. On many operating systems the access mode rws does not guarantee that the data is written to the disk. - -@features_1393_h2 -複数�?�接続 - -@features_1394_h3 -�?�時�?�複数�?�データベースを開�?? - -@features_1395_p -# An application can open multiple databases at the same time, including multiple connections to the same database. The number of open database is only limited by the memory available. - -@features_1396_h3 ->�?��?�データベース�?��?�複数�?�接続: クライアント/サー�?ー - -@features_1397_p -# If you want to access the same database at the same time from different processes or computers, you need to use the client / server mode. In this case, one process acts as the server, and the other processes (that could reside on other computers as well) connect to the server via TCP/IP (or TLS over TCP/IP for improved security). - -@features_1398_h3 -マル�?スレッドサ�?ート - -@features_1399_p -# This database is multithreading-safe. That means, if an application is multi-threaded, it does not need to worry about synchronizing access to the database. Internally, most requests to the same database are synchronized. That means an application can use multiple threads that access the same database at the same time, however if one thread executes a long running query, the other threads need to wait. - -@features_1400_p -# An application should normally use one connection per thread. This database synchronizes access to the same connection, but other databases may not do this. - -@features_1401_h3 -ロック�?ロックタイムアウト�?デッドロック - -@features_1402_p -# Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. If multi-version concurrency is not used, the database uses table level locks to give each connection a consistent state of the data. There are two kinds of locks: read locks (shared locks) and write locks (exclusive locks). All locks are released when the transaction commits or rolls back. When using the default transaction isolation level 'read committed', read locks are already released after each statement. - -@features_1403_p -# If a connection wants to reads from a table, and there is no write lock on the table, then a read lock is added to the table. If there is a write lock, then this connection waits for the other connection to release the lock. If a connection cannot get a lock for a specified time, then a lock timeout exception is thrown. - -@features_1404_p -# Usually, SELECT statements will generate read locks. This includes subqueries. Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, using the statement SELECT ... FOR UPDATE. The statements COMMIT and ROLLBACK releases all open locks. The commands SAVEPOINT and ROLLBACK TO SAVEPOINT don't affect locks. The locks are also released when the autocommit mode changes, and for connections with autocommit set to true (this is the default), locks are released after each statement. The following statements generate locks: - -@features_1405_th -ロック�?�種類 - -@features_1406_th -SQLステートメント - -@features_1407_td -Read - -@features_1408_td -#SELECT * FROM TEST; - -@features_1409_td -# CALL SELECT MAX(ID) FROM TEST; - -@features_1410_td -# SCRIPT; - -@features_1411_td -Write - -@features_1412_td -#SELECT * FROM TEST WHERE 1=0 FOR UPDATE; - -@features_1413_td -Write - -@features_1414_td -#INSERT INTO TEST VALUES(1, 'Hello'); - -@features_1415_td -# INSERT INTO TEST SELECT * FROM TEST; - -@features_1416_td -# UPDATE TEST SET NAME='Hi'; - -@features_1417_td -# DELETE FROM TEST; - -@features_1418_td -Write - -@features_1419_td -#ALTER TABLE TEST ...; - -@features_1420_td -# CREATE INDEX ... ON TEST ...; - -@features_1421_td -# DROP INDEX ...; - -@features_1422_p -# The number of seconds until a lock timeout exception is thrown can be set separately for each connection using the SQL command SET LOCK_TIMEOUT <milliseconds>. The initial lock timeout (that is the timeout used for new connections) can be set using the SQL command SET DEFAULT_LOCK_TIMEOUT <milliseconds>. The default lock timeout is persistent. - -@features_1423_h3 -#Avoiding Deadlocks - -@features_1424_p -# To avoid deadlocks, ensure that all transactions lock the tables in the same order (for example in alphabetical order), and avoid upgrading read locks to write locks. Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE. - -@features_1425_h2 -データベースファイルレイアウト - -@features_1426_p -# The following files are created for persistent databases: - -@features_1427_th -ファイル�?? - -@features_1428_th -説明 - -@features_1429_th -ファイル数 - -@features_1430_td -# test.h2.db - -@features_1431_td -# Database file. - -@features_1432_td -# Contains the transaction log, indexes, and data for all tables. - -@features_1433_td -# Format: <database>.h2.db - -@features_1434_td -# 1 per database - -@features_1435_td -# test.lock.db - -@features_1436_td -# Database lock file. - -@features_1437_td -# Automatically (re-)created while the database is in use. - -@features_1438_td -# Format: <database>.lock.db - -@features_1439_td -# 1 per database (only if in use) - -@features_1440_td -# test.trace.db - -@features_1441_td -# Trace file (if the trace option is enabled). - -@features_1442_td -# Contains trace information. - -@features_1443_td -# Format: <database>.trace.db - -@features_1444_td -# Renamed to <database>.trace.db.old is too big. - -@features_1445_td -# 0 or 1 per database - -@features_1446_td -# test.lobs.db/* - -@features_1447_td -# Directory containing one file for each - -@features_1448_td -# BLOB or CLOB value larger than a certain size. - -@features_1449_td -# Format: <id>.t<tableId>.lob.db - -@features_1450_td -# 1 per large object - -@features_1451_td -# test.123.temp.db - -@features_1452_td -# Temporary file. - -@features_1453_td -# Contains a temporary blob or a large result set. - -@features_1454_td -# Format: <database>.<id>.temp.db - -@features_1455_td -# 1 per object - -@features_1456_h3 -データベースファイル�?�移動�?�改�?? - -@features_1457_p -# Database name and location are not stored inside the database files. - -@features_1458_p -# While a database is closed, the files can be moved to another directory, and they can be renamed as well (as long as all files of the same database start with the same name and the respective extensions are unchanged). - -@features_1459_p -# As there is no platform specific data in the files, they can be moved to other operating systems without problems. - -@features_1460_h3 -�?ックアップ - -@features_1461_p -# When the database is closed, it is possible to backup the database files. - -@features_1462_p -# To backup data while the database is running, the SQL commands SCRIPT and BACKUP can be used. - -@features_1463_h2 -ログ�?�リカ�?リー - -@features_1464_p -# Whenever data is modified in the database and those changes are committed, the changes are written to the transaction log (except for in-memory objects). The changes to the main data area itself are usually written later on, to optimize disk access. If there is a power failure, the main data area is not up-to-date, but because the changes are in the transaction log, the next time the database is opened, the changes are re-applied automatically. - -@features_1465_h2 -互�?�性 - -@features_1466_p -# All database engines behave a little bit different. Where possible, H2 supports the ANSI SQL standard, and tries to be compatible to other databases. There are still a few differences however: - -@features_1467_p -# In MySQL text columns are case insensitive by default, while in H2 they are case sensitive. However H2 supports case insensitive columns as well. To create the tables with case insensitive texts, append IGNORECASE=TRUE to the database URL (example: jdbc:h2:~/test;IGNORECASE=TRUE). - -@features_1468_h3 -互�?�モード - -@features_1469_p -# For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode: - -@features_1470_h3 -#DB2 Compatibility Mode - -@features_1471_p -# To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2 or the SQL statement SET MODE DB2. - -@features_1472_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1473_li -#Support for the syntax [OFFSET .. ROW] [FETCH ... ONLY] as an alternative for LIMIT .. OFFSET. - -@features_1474_li -#Concatenating NULL with another value results in the other value. - -@features_1475_li -#Support the pseudo-table SYSIBM.SYSDUMMY1. - -@features_1476_h3 -#Derby Compatibility Mode - -@features_1477_p -# To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby or the SQL statement SET MODE Derby. - -@features_1478_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1479_li -#For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1480_li -#Concatenating NULL with another value results in the other value. - -@features_1481_li -#Support the pseudo-table SYSIBM.SYSDUMMY1. - -@features_1482_h3 -#HSQLDB Compatibility Mode - -@features_1483_p -# To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB or the SQL statement SET MODE HSQLDB. - -@features_1484_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1485_li -#When converting the scale of decimal data, the number is only converted if the new scale is smaller than the current scale. Usually, the scale is converted and 0s are added if required. - -@features_1486_li -#For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1487_li -#Text can be concatenated using '+'. - -@features_1488_h3 -#MS SQL Server Compatibility Mode - -@features_1489_p -# To use the MS SQL Server mode, use the database URL jdbc:h2:~/test;MODE=MSSQLServer or the SQL statement SET MODE MSSQLServer. - -@features_1490_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1491_li -#Identifiers may be quoted using square brackets as in [Test]. - -@features_1492_li -#For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. - -@features_1493_li -#Concatenating NULL with another value results in the other value. - -@features_1494_li -#Text can be concatenated using '+'. - -@features_1495_h3 -#MySQL Compatibility Mode - -@features_1496_p -# To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL or the SQL statement SET MODE MySQL. - -@features_1497_li -#When inserting data, if a column is defined to be NOT NULL and NULL is inserted, then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. Usually, this operation is not allowed and an exception is thrown. - -@features_1498_li -#Creating indexes in the CREATE TABLE statement is allowed using INDEX(..) or KEY(..). Example: create table test(id int primary key, name varchar(255), key idx_name(name)); - -@features_1499_li -#Meta data calls return identifiers in lower case. - -@features_1500_li -#When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded. - -@features_1501_li -#Concatenating NULL with another value results in the other value. - -@features_1502_p -# Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). H2 does support case insensitive text comparison, but it needs to be set separately, using SET IGNORECASE TRUE. This affects comparison using =, LIKE, REGEXP. - -@features_1503_h3 -#Oracle Compatibility Mode - -@features_1504_p -# To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle or the SQL statement SET MODE Oracle. - -@features_1505_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1506_li -#When using unique indexes, multiple rows with NULL in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise. - -@features_1507_li -#Concatenating NULL with another value results in the other value. - -@features_1508_li -#Empty strings are treated like NULL values. - -@features_1509_h3 -#PostgreSQL Compatibility Mode - -@features_1510_p -# To use the PostgreSQL mode, use the database URL jdbc:h2:~/test;MODE=PostgreSQL or the SQL statement SET MODE PostgreSQL. - -@features_1511_li -#For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. - -@features_1512_li -#When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded. - -@features_1513_li -#The system columns CTID and OID are supported. - -@features_1514_li -#LOG(x) is base 10 in this mode. - -@features_1515_h2 -#Auto-Reconnect - -@features_1516_p -# The auto-reconnect feature causes the JDBC driver to reconnect to the database if the connection is lost. The automatic re-connect only occurs when auto-commit is enabled; if auto-commit is disabled, an exception is thrown. To enable this mode, append ;AUTO_RECONNECT=TRUE to the database URL. - -@features_1517_p -# Re-connecting will open a new session. After an automatic re-connect, variables and local temporary tables definitions (excluding data) are re-created. The contents of the system table INFORMATION_SCHEMA.SESSION_STATE contains all client side state that is re-created. - -@features_1518_p -# If another connection uses the database in exclusive mode (enabled using SET EXCLUSIVE 1 or SET EXCLUSIVE 2), then this connection will try to re-connect until the exclusive mode ends. - -@features_1519_h2 -#Automatic Mixed Mode - -@features_1520_p -# Multiple processes can access the same database without having to start the server manually. To do that, append ;AUTO_SERVER=TRUE to the database URL. You can use the same database URL independent of whether the database is already open or not. This feature doesn't work with in-memory databases. Example database URL: - -@features_1521_p -# Use the same URL for all connections to this database. Internally, when using this mode, the first connection to the database is made in embedded mode, and additionally a server is started internally (as a daemon thread). If the database is already open in another process, the server mode is used automatically. The IP address and port of the server are stored in the file .lock.db, that's why in-memory databases can't be supported. - -@features_1522_p -# The application that opens the first connection to the database uses the embedded mode, which is faster than the server mode. Therefore the main application should open the database first if possible. The first connection automatically starts a server on a random port. This server allows remote connections, however only to this database (to ensure that, the client reads .lock.db file and sends the the random key that is stored there to the server). When the first connection is closed, the server stops. If other (remote) connections are still open, one of them will then start a server (auto-reconnect is enabled automatically). - -@features_1523_p -# All processes need to have access to the database files. If the first connection is closed (the connection that started the server), open transactions of other connections will be rolled back (this may not be a problem if you don't disable autocommit). Explicit client/server connections (using jdbc:h2:tcp:// or ssl://) are not supported. This mode is not supported for in-memory databases. - -@features_1524_p -# Here is an example how to use this mode. Application 1 and 2 are not necessarily started on the same computer, but they need to have access to the database files. Application 1 and 2 are typically two different processes (however they could run within the same process). - -@features_1525_p -# When using this feature, by default the server uses any free TCP port. The port can be set manually using AUTO_SERVER_PORT=9090. - -@features_1526_h2 -#Page Size - -@features_1527_p -# The page size for new databases is 2 KB (2048), unless the page size is set explicitly in the database URL using PAGE_SIZE= when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created. - -@features_1528_h2 -トレースオプションを使用�?�る - -@features_1529_p -# To find problems in an application, it is sometimes good to see what database operations where executed. This database offers the following trace features: - -@features_1530_li -#Trace to System.out and/or to a file - -@features_1531_li -#Support for trace levels OFF, ERROR, INFO, DEBUG - -@features_1532_li -#The maximum size of the trace file can be set - -@features_1533_li -#It is possible to generate Java source code from the trace file - -@features_1534_li -#Trace can be enabled at runtime by manually creating a file - -@features_1535_h3 -トレースオプション - -@features_1536_p -# The simplest way to enable the trace option is setting it in the database URL. There are two settings, one for System.out (TRACE_LEVEL_SYSTEM_OUT) tracing, and one for file tracing (TRACE_LEVEL_FILE). The trace levels are 0 for OFF, 1 for ERROR (the default), 2 for INFO, and 3 for DEBUG. A database URL with both levels set to DEBUG is: - -@features_1537_p -# The trace level can be changed at runtime by executing the SQL command SET TRACE_LEVEL_SYSTEM_OUT level (for System.out tracing) or SET TRACE_LEVEL_FILE level (for file tracing). Example: - -@features_1538_h3 -トレースファイル�?�最大サイズを設定 - -@features_1539_p -# When using a high trace level, the trace file can get very big quickly. The default size limit is 16 MB, if the trace file exceeds this limit, it is renamed to .old and a new file is created. If another such file exists, it is deleted. To limit the size to a certain number of megabytes, use SET TRACE_MAX_FILE_SIZE mb. Example: - -@features_1540_h3 -Javaコード生�? - -@features_1541_p -# When setting the trace level to INFO or DEBUG, Java source code is generated as well. This simplifies reproducing problems. The trace file looks like this: - -@features_1542_p -# To filter the Java source code, use the ConvertTraceFile tool as follows: - -@features_1543_p -# The generated file Test.java will contain the Java source code. The generated source code may be too large to compile (the size of a Java method is limited). If this is the case, the source code needs to be split in multiple methods. The password is not listed in the trace file and therefore not included in the source code. - -@features_1544_h2 -#Using Other Logging APIs - -@features_1545_p -# By default, this database uses its own native 'trace' facility. This facility is called 'trace' and not 'log' within this database to avoid confusion with the transaction log. Trace messages can be written to both file and System.out. In most cases, this is sufficient, however sometimes it is better to use the same facility as the application, for example Log4j. To do that, this database support SLF4J. - -@features_1546_a -#SLF4J - -@features_1547_p -# is a simple facade for various logging APIs and allows to plug in the desired implementation at deployment time. SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), Java logging, x4juli, and Simple Log. - -@features_1548_p -# To enable SLF4J, set the file trace level to 4 in the database URL: - -@features_1549_p -# Changing the log mechanism is not possible after the database is open, that means executing the SQL statement SET TRACE_LEVEL_FILE 4 when the database is already open will not have the desired effect. To use SLF4J, all required jar files need to be in the classpath. The logger name is h2database. If it does not work, check the file <database>.trace.db for error messages. - -@features_1550_h2 -読�?��?�り専用データベース - -@features_1551_p -# If the database files are read-only, then the database is read-only as well. It is not possible to create new tables, add or modify data in this database. Only SELECT and CALL statements are allowed. To create a read-only database, close the database. Then, make the database file read-only. When you open the database now, it is read-only. There are two ways an application can find out whether database is read-only: by calling Connection.isReadOnly() or by executing the SQL statement CALL READONLY(). - -@features_1552_p -# Using the Custom Access Mode r the database can also be opened in read-only mode, even if the database file is not read only. - -@features_1553_h2 -#Read Only Databases in Zip or Jar File - -@features_1554_p -# To create a read-only database in a zip file, first create a regular persistent database, and then create a backup. The database must not have pending changes, that means you need to close all connections to the database first. To speed up opening the read-only database and running queries, the database should be closed using SHUTDOWN DEFRAG. If you are using a database named test, an easy way to create a zip file is using the Backup tool. You can start the tool from the command line, or from within the H2 Console (Tools - Backup). Please note that the database must be closed when the backup is created. Therefore, the SQL statement BACKUP TO can not be used. - -@features_1555_p -# When the zip file is created, you can open the database in the zip file using the following database URL: - -@features_1556_p -# Databases in zip files are read-only. The performance for some queries will be slower than when using a regular database, because random access in zip files is not supported (only streaming). How much this affects the performance depends on the queries and the data. The database is not read in memory; therefore large databases are supported as well. The same indexes are used as when using a regular database. - -@features_1557_p -# If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files, because random access in compressed files is not possible. See also the sample application ReadOnlyDatabaseInZip. - -@features_1558_h3 -破�??�?��?�データベースを開�?? - -@features_1559_p -# If a database cannot be opened because the boot info (the SQL script that is run at startup) is corrupted, then the database can be opened by specifying a database event listener. The exceptions are logged, but opening the database will continue. - -@features_1560_h2 -computed column / ベースインデックス�?�機能 - -@features_1561_p -# A computed column is a column whose value is calculated before storing. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time: - -@features_1562_p -# Function indexes are not directly supported by this database, but they can be emulated by using computed columns. For example, if an index on the upper-case version of a column is required, create a computed column with the upper-case version of the original column, and create an index for this column: - -@features_1563_p -# When inserting data, it is not required (and not allowed) to specify a value for the upper-case version of the column, because the value is generated. But you can use the column when querying the table: - -@features_1564_h2 -多次元インデックス - -@features_1565_p -# A tool is provided to execute efficient multi-dimension (spatial) range queries. This database does not support a specialized spatial index (R-Tree or similar). Instead, the B-Tree index is used. For each record, the multi-dimensional key is converted (mapped) to a single dimensional (scalar) value. This value specifies the location on a space-filling curve. - -@features_1566_p -# Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. The scalar value is indexed using a B-Tree index (usually using a computed column). - -@features_1567_p -# The method can result in a drastic performance improvement over just using an index on the first column. Depending on the data and number of dimensions, the improvement is usually higher than factor 5. The tool generates a SQL query from a specified multi-dimensional range. The method used is not database dependent, and the tool can easily be ported to other databases. For an example how to use the tool, please have a look at the sample code provided in TestMultiDimension.java. - -@features_1568_h2 -ユーザー定義�?�関数�?�ストアドプロシージャ - -@features_1569_p -# In addition to the built-in functions, this database supports user-defined Java functions. In this database, Java functions can be used as stored procedures as well. A function must be declared (registered) before it can be used. A function can be defined using source code, or as a reference to a compiled class that is available in the classpath. By default, the function aliases are stored in the current schema. - -@features_1570_h3 -#Referencing a Compiled Method - -@features_1571_p -# When referencing a method, the class must already be compiled and included in the classpath where the database is running. Only static Java methods are supported; both the class and the method must be public. Example Java class: - -@features_1572_p -# The Java function must be registered in the database by calling CREATE ALIAS ... FOR: - -@features_1573_p -# For a complete sample application, see src/test/org/h2/samples/Function.java. - -@features_1574_h3 -#Declaring Functions as Source Code - -@features_1575_p -# When defining a function alias with source code, the database tries to compile the source code using the Sun Java compiler (the class com.sun.tools.javac.Main) if the tools.jar is in the classpath. If not, javac is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. Example: - -@features_1576_p -# By default, the three packages java.util, java.math, java.sql are imported. The method name (nextPrime in the example above) is ignored. Method overloading is not supported when declaring functions as source code, that means only one method may be declared for an alias. If different import statements are required, they must be declared at the beginning and separated with the tag @CODE: - -@features_1577_p -# The following template is used to create a complete Java class: - -@features_1578_h3 -#Method Overloading - -@features_1579_p -# Multiple methods may be bound to a SQL function if the class is already compiled and included in the classpath. Each Java method must have a different number of arguments. Method overloading is not supported when declaring functions as source code. - -@features_1580_h3 -データタイプマッピング関数 - -@features_1581_p -# Functions that accept non-nullable parameters such as int will not be called if one of those parameters is NULL. Instead, the result of the function is NULL. If the function should be called if a parameter is NULL, you need to use java.lang.Integer instead. - -@features_1582_p -# SQL types are mapped to Java classes and vice-versa as in the JDBC API. For details, see Data Types. There are a few special cases: java.lang.Object is mapped to OTHER (a serialized object). Therefore, java.lang.Object can not be used to match all SQL types (matching all SQL types is not supported). The second special case is Object[]: arrays of any class are mapped to ARRAY. Objects of type org.h2.value.Value (the internal value class) are passed through without conversion. - -@features_1583_h3 -#Functions That Require a Connection - -@features_1584_p -# If the first parameter of a Java function is a java.sql.Connection, then the connection to database is provided. This connection does not need to be closed before returning. When calling the method from within the SQL statement, this connection parameter does not need to be (can not be) specified. - -@features_1585_h3 -#Functions Throwing an Exception - -@features_1586_p -# If a function throws an exception, then the current statement is rolled back and the exception is thrown to the application. SQLException are directly re-thrown to the calling application; all other exceptions are first converted to a SQLException. - -@features_1587_h3 -#Functions Returning a Result Set - -@features_1588_p -# Functions may returns a result set. Such a function can be called with the CALL statement: - -@features_1589_h3 -SimpleResultSetを使用�?�る - -@features_1590_p -# A function can create a result set using the SimpleResultSet tool: - -@features_1591_h3 -関数をテーブル�?��?��?�使用�?�る - -@features_1592_p -# A function that returns a result set can be used like a table. However, in this case the function is called at least twice: first while parsing the statement to collect the column names (with parameters set to null where not known at compile time). And then, while executing the statement to get the data (maybe multiple times if this is a join). If the function is called just to get the column list, the URL of the connection passed to the function is jdbc:columnlist:connection. Otherwise, the URL of the connection is jdbc:default:connection. - -@features_1593_h2 -#Pluggable or User-Defined Tables - -@features_1594_p -# For situations where you need to expose other data-sources to the SQL engine as a table, there are "pluggable tables". For some examples, have a look at the code in org.h2.test.db.TestTableEngines. - -@features_1595_p -# In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine interface e.g. something like this: - -@features_1596_p -# and then create the table from SQL like this: - -@features_1597_p -# It is also possible to pass in parameters to the table engine, like so: - -@features_1598_p -# In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object. - -@features_1599_h2 -トリガー - -@features_1600_p -# This database supports Java triggers that are called before or after a row is updated, inserted or deleted. Triggers can be used for complex consistency checks, or to update related data in the database. It is also possible to use triggers to simulate materialized views. For a complete sample application, see src/test/org/h2/samples/TriggerSample.java. A Java trigger must implement the interface org.h2.api.Trigger. The trigger class must be available in the classpath of the database engine (when using the server mode, it must be in the classpath of the server). - -@features_1601_p -# The connection can be used to query or update data in other tables. The trigger then needs to be defined in the database: - -@features_1602_p -# The trigger can be used to veto a change by throwing a SQLException. - -@features_1603_p -# As an alternative to implementing the Trigger interface, an application can extend the abstract class org.h2.tools.TriggerAdapter. This will allows to use the ResultSet interface within trigger implementations. In this case, only the fire method needs to be implemented: - -@features_1604_h2 -データベースをコンパクト�?��?�る - -@features_1605_p -# Empty space in the database file re-used automatically. When closing the database, the database is automatically compacted for up to 200 milliseconds by default. To compact more, use the SQL statement SHUTDOWN COMPACT. However re-creating the database may further reduce the database size because this will re-build the indexes. Here is a sample function to do this: - -@features_1606_p -# See also the sample application org.h2.samples.Compact. The commands SCRIPT / RUNSCRIPT can be used as well to create a backup of a database and re-build the database from the script. - -@features_1607_h2 -キャッシュ�?�設定 - -@features_1608_p -# The database keeps most frequently used data in the main memory. The amount of memory used for caching can be changed using the setting CACHE_SIZE. This setting can be set in the database connection URL (jdbc:h2:~/test;CACHE_SIZE=131072), or it can be changed at runtime using SET CACHE_SIZE size. The size of the cache, as represented by CACHE_SIZE is measured in KB, with each KB being 1024 bytes. This setting has no effect for in-memory databases. For persistent databases, the setting is stored in the database and re-used when the database is opened the next time. However, when opening an existing database, the cache size is set to at most half the amount of memory available for the virtual machine (Runtime.getRuntime().maxMemory()), even if the cache size setting stored in the database is larger; however the setting stored in the database is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'info.CACHE_MAX_SIZE' - -@features_1609_p -# An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE=TQ to the database URL. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. - -@features_1610_p -# Also included is an experimental second level soft reference cache. Rows in this cache are only garbage collected on low memory. By default the second level cache is disabled. To enable it, use the prefix SOFT_. Example: jdbc:h2:~/test;CACHE_TYPE=SOFT_LRU. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. - -@features_1611_p -# To get information about page reads and writes, and the current caching algorithm in use, call SELECT * FROM INFORMATION_SCHEMA.SETTINGS. The number of pages read / written is listed. - -@fragments_1000_div -#    - -@fragments_1001_label -#Search: - -@fragments_1002_label -#Highlight keyword(s) - -@fragments_1003_a -ホーム - -@fragments_1004_a -ダウンロード - -@fragments_1005_a -#Cheat Sheet - -@fragments_1006_b -ドキュメント - -@fragments_1007_a -クイックスタート - -@fragments_1008_a -インストール - -@fragments_1009_a -�?ュートリアル - -@fragments_1010_a -特徴 - -@fragments_1011_a -パフォーマンス - -@fragments_1012_a -#Advanced - -@fragments_1013_b -#Reference - -@fragments_1014_a -#SQL Grammar - -@fragments_1015_a -#Functions - -@fragments_1016_a -データ型 - -@fragments_1017_a -#Javadoc - -@fragments_1018_a -#PDF (1 MB) - -@fragments_1019_b -サ�?ート - -@fragments_1020_a -#FAQ - -@fragments_1021_a -#Error Analyzer - -@fragments_1022_a -#Google Group (English) - -@fragments_1023_a -#Google Group (Japanese) - -@fragments_1024_a -#Google Group (Chinese) - -@fragments_1025_b -#Appendix - -@fragments_1026_a -#History & Roadmap - -@fragments_1027_a -ライセンス - -@fragments_1028_a -ビルド - -@fragments_1029_a -#Links - -@fragments_1030_a -#JaQu - -@fragments_1031_a -#MVStore - -@fragments_1032_a -#Architecture - -@fragments_1033_td -  - -@frame_1000_h1 -H2 データベース エンジン - -@frame_1001_p -# Welcome to H2, the free SQL database. The main feature of H2 are: - -@frame_1002_li -#It is free to use for everybody, source code is included - -@frame_1003_li -#Written in Java, but also available as native executable - -@frame_1004_li -#JDBC and (partial) ODBC API - -@frame_1005_li -#Embedded and client/server modes - -@frame_1006_li -#Clustering is supported - -@frame_1007_li -#A web client is included - -@frame_1008_h2 -#No Javascript - -@frame_1009_p -# If you are not automatically redirected to the main page, then Javascript is currently disabled or your browser does not support Javascript. Some features (for example the integrated search) require Javascript. - -@frame_1010_p -# Please enable Javascript, or go ahead without it: H2 Database Engine - -@history_1000_h1 -歴�?��?�ロードマップ - -@history_1001_a -# Change Log - -@history_1002_a -# Roadmap - -@history_1003_a -# History of this Database Engine - -@history_1004_a -# Why Java - -@history_1005_a -# Supporters - -@history_1006_h2 -変更履歴 - -@history_1007_p -# The up-to-date change log is available at http://www.h2database.com/html/changelog.html - -@history_1008_h2 -ロードマップ - -@history_1009_p -# The current roadmap is available at http://www.h2database.com/html/roadmap.html - -@history_1010_h2 -�?��?�データベースエンジン�?�歴�?� - -@history_1011_p -# The development of H2 was started in May 2004, but it was first published on December 14th 2005. The main author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database. At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed to continued to work on the Hypersonic SQL codebase. The name H2 stands for Hypersonic 2, however H2 does not share code with Hypersonic SQL or HSQLDB. H2 is built from scratch. - -@history_1012_h2 -�?��?�Java�?��?��?� - -@history_1013_p -# The main reasons to use a Java database are: - -@history_1014_li -#Very simple to integrate in Java applications - -@history_1015_li -#Support for many different platforms - -@history_1016_li -#More secure than native applications (no buffer overflows) - -@history_1017_li -#User defined functions (or triggers) run very fast - -@history_1018_li -#Unicode support - -@history_1019_p -# Some think Java is too slow for low level operations, but this is no longer true. Garbage collection for example is now faster than manual memory management. - -@history_1020_p -# Developing Java code is faster than developing C or C++ code. When using Java, most time can be spent on improving the algorithms instead of porting the code to different platforms or doing memory management. Features such as Unicode and network libraries are already built-in. In Java, writing secure code is easier because buffer overflows can not occur. Features such as reflection can be used for randomized testing. - -@history_1021_p -# Java is future proof: a lot of companies support Java. Java is now open source. - -@history_1022_p -# To increase the portability and ease of use, this software depends on very few libraries. Features that are not available in open source Java implementations (such as Swing) are not used, or only used for optional features. - -@history_1023_h2 -支�?�者 - -@history_1024_p -# Many thanks for those who reported bugs, gave valuable feedback, spread the word, and translated this project. Also many thanks to the donors. To become a donor, use PayPal (at the very bottom of the main web page). - -@history_1025_a -#xso; xBase Software Ontwikkeling, Netherlands - -@history_1026_a -#Cognitect, USA - -@history_1027_a -#Code 42 Software, Inc., Minneapolis - -@history_1028_li -#Martin Wildam, Austria - -@history_1029_a -#Code Lutin, France - -@history_1030_a -#NetSuxxess GmbH, Germany - -@history_1031_a -#Poker Copilot, Steve McLeod, Germany - -@history_1032_a -#SkyCash, Poland - -@history_1033_a -#Lumber-mill, Inc., Japan - -@history_1034_a -#StockMarketEye, USA - -@history_1035_a -#Eckenfelder GmbH & Co.KG, Germany - -@history_1036_li -#Anthony Goubard, Netherlands - -@history_1037_li -#Richard Hickey, USA - -@history_1038_li -#Alessio Jacopo D'Adamo, Italy - -@history_1039_li -#Ashwin Jayaprakash, USA - -@history_1040_li -#Donald Bleyl, USA - -@history_1041_li -#Frank Berger, Germany - -@history_1042_li -#Florent Ramiere, France - -@history_1043_li -#Jun Iyama, Japan - -@history_1044_li -#Antonio Casqueiro, Portugal - -@history_1045_li -#Oliver Computing LLC, USA - -@history_1046_li -#Harpal Grover Consulting Inc., USA - -@history_1047_li -#Elisabetta Berlini, Italy - -@history_1048_li -#William Gilbert, USA - -@history_1049_li -#Antonio Dieguez Rojas, Chile - -@history_1050_a -#Ontology Works, USA - -@history_1051_li -#Pete Haidinyak, USA - -@history_1052_li -#William Osmond, USA - -@history_1053_li -#Joachim Ansorg, Germany - -@history_1054_li -#Oliver Soerensen, Germany - -@history_1055_li -#Christos Vasilakis, Greece - -@history_1056_li -#Fyodor Kupolov, Denmark - -@history_1057_li -#Jakob Jenkov, Denmark - -@history_1058_li -#Stéphane Chartrand, Switzerland - -@history_1059_li -#Glenn Kidd, USA - -@history_1060_li -#Gustav Trede, Sweden - -@history_1061_li -#Joonas Pulakka, Finland - -@history_1062_li -#Bjorn Darri Sigurdsson, Iceland - -@history_1063_li -#Iyama Jun, Japan - -@history_1064_li -#Gray Watson, USA - -@history_1065_li -#Erik Dick, Germany - -@history_1066_li -#Pengxiang Shao, China - -@history_1067_li -#Bilingual Marketing Group, USA - -@history_1068_li -#Philippe Marschall, Switzerland - -@history_1069_li -#Knut Staring, Norway - -@history_1070_li -#Theis Borg, Denmark - -@history_1071_li -#Mark De Mendonca Duske, USA - -@history_1072_li -#Joel A. Garringer, USA - -@history_1073_li -#Olivier Chafik, France - -@history_1074_li -#Rene Schwietzke, Germany - -@history_1075_li -#Jalpesh Patadia, USA - -@history_1076_li -#Takanori Kawashima, Japan - -@history_1077_li -#Terrence JC Huang, China - -@history_1078_a -#JiaDong Huang, Australia - -@history_1079_li -#Laurent van Roy, Belgium - -@history_1080_li -#Qian Chen, China - -@history_1081_li -#Clinton Hyde, USA - -@history_1082_li -#Kritchai Phromros, Thailand - -@history_1083_li -#Alan Thompson, USA - -@history_1084_li -#Ladislav Jech, Czech Republic - -@history_1085_li -#Dimitrijs Fedotovs, Latvia - -@history_1086_li -#Richard Manley-Reeve, United Kingdom - -@installation_1000_h1 -インストール - -@installation_1001_a -# Requirements - -@installation_1002_a -# Supported Platforms - -@installation_1003_a -# Installing the Software - -@installation_1004_a -# Directory Structure - -@installation_1005_h2 -必�?�?�件 - -@installation_1006_p -# To run this database, the following software stack is known to work. Other software most likely also works, but is not tested as much. - -@installation_1007_h3 -#Database Engine - -@installation_1008_li -#Windows XP or Vista, Mac OS X, or Linux - -@installation_1009_li -#Sun Java 6 or newer - -@installation_1010_li -#Recommended Windows file system: NTFS (FAT32 only supports files up to 4 GB) - -@installation_1011_h3 -#H2 Console - -@installation_1012_li -#Mozilla Firefox - -@installation_1013_h2 -サ�?ート�?�れ�?��?�るプラットフォーム - -@installation_1014_p -# As this database is written in Java, it can run on many different platforms. It is tested with Java 6 and 7. Currently, the database is developed and tested on Windows 8 and Mac OS X using Java 6, but it also works in many other operating systems and using other Java runtime environments. All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported. - -@installation_1015_h2 -ソフトウェア�?�インストール - -@installation_1016_p -# To install the software, run the installer or unzip it to a directory of your choice. - -@installation_1017_h2 -ディレクトリ構�? - -@installation_1018_p -# After installing, you should get the following directory structure: - -@installation_1019_th -ディレクトリ - -@installation_1020_th -コンテンツ - -@installation_1021_td -bin - -@installation_1022_td -JAR�?�batchファイル - -@installation_1023_td -docs - -@installation_1024_td -ドキュメント - -@installation_1025_td -docs/html - -@installation_1026_td -HTMLページ - -@installation_1027_td -docs/javadoc - -@installation_1028_td -Javadocファイル - -@installation_1029_td -#ext - -@installation_1030_td -#External dependencies (downloaded when building) - -@installation_1031_td -service - -@installation_1032_td -Windows Service�?��?��?�データベースを実行�?�るツール - -@installation_1033_td -src - -@installation_1034_td -Sourceファイル - -@installation_1035_td -#src/docsrc - -@installation_1036_td -#Documentation sources - -@installation_1037_td -#src/installer - -@installation_1038_td -#Installer, shell, and release build script - -@installation_1039_td -#src/main - -@installation_1040_td -#Database engine source code - -@installation_1041_td -#src/test - -@installation_1042_td -#Test source code - -@installation_1043_td -#src/tools - -@installation_1044_td -#Tools and database adapters source code - -@jaqu_1000_h1 -#JaQu - -@jaqu_1001_a -# What is JaQu - -@jaqu_1002_a -# Differences to Other Data Access Tools - -@jaqu_1003_a -# Current State - -@jaqu_1004_a -# Building the JaQu Library - -@jaqu_1005_a -# Requirements - -@jaqu_1006_a -# Example Code - -@jaqu_1007_a -# Configuration - -@jaqu_1008_a -# Natural Syntax - -@jaqu_1009_a -# Other Ideas - -@jaqu_1010_a -# Similar Projects - -@jaqu_1011_h2 -#What is JaQu - -@jaqu_1012_p -# Note: This project is currently in maintenance mode. A friendly fork of JaQu is available under the name iciql. - -@jaqu_1013_p -# JaQu stands for Java Query and allows to access databases using pure Java. JaQu provides a fluent interface (or internal DSL). JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a Microsoft .NET technology). The following JaQu code: - -@jaqu_1014_p -# stands for the SQL statement: - -@jaqu_1015_h2 -#Differences to Other Data Access Tools - -@jaqu_1016_p -# Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java, auto-complete in the IDE is supported. Type checking is performed by the compiler. JaQu fully protects against SQL injection. - -@jaqu_1017_p -# JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate. With JaQu, you don't write SQL statements as strings. JaQu is much smaller and simpler than other persistence frameworks such as Hibernate, but it also does not provide all the features of those. Unlike iBatis and Hibernate, no XML or annotation based configuration is required; instead the configuration (if required at all) is done in pure Java, within the application. - -@jaqu_1018_p -# JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis, JaQu provides full control over when and what SQL statements are executed (but without having to write SQL statements as strings). - -@jaqu_1019_h3 -#Restrictions - -@jaqu_1020_p -# Primitive types (eg. boolean, int, long, double) are not supported. Use java.lang.Boolean, Integer, Long, Double instead. - -@jaqu_1021_h3 -#Why in Java? - -@jaqu_1022_p -# Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy) in the same application is complicated: you would need to split the application and database code, and write adapter / wrapper code. - -@jaqu_1023_h2 -#Current State - -@jaqu_1024_p -# Currently, JaQu is only tested with the H2 database. The API may change in future versions. JaQu is not part of the h2 jar file, however the source code is included in H2, under: - -@jaqu_1025_code -#src/test/org/h2/test/jaqu/* - -@jaqu_1026_li -# (samples and tests) - -@jaqu_1027_code -#src/tools/org/h2/jaqu/* - -@jaqu_1028_li -# (framework) - -@jaqu_1029_h2 -#Building the JaQu Library - -@jaqu_1030_p -# To create the JaQu jar file, run: build jarJaqu. This will create the file bin/h2jaqu.jar. - -@jaqu_1031_h2 -必�?�?�件 - -@jaqu_1032_p -# JaQu requires Java 6. Annotations are not need. Currently, JaQu is only tested with the H2 database engine, however in theory it should work with any database that supports the JDBC API. - -@jaqu_1033_h2 -#Example Code - -@jaqu_1034_h2 -#Configuration - -@jaqu_1035_p -# JaQu does not require any configuration when using the default field to column mapping. To define table indices, or if you want to map a class to a table with a different name, or a field to a column with another name, create a function called define in the data class. Example: - -@jaqu_1036_p -# The method define() contains the mapping definition. It is called once when the class is used for the first time. Like annotations, the mapping is defined in the class itself. Unlike when using annotations, the compiler can check the syntax even for multi-column objects (multi-column indexes, multi-column primary keys and so on). Because the definition is written in Java, the configuration can be set at runtime, which is not possible using annotations. Unlike XML mapping configuration, the configuration is integrated in the class itself. - -@jaqu_1037_h2 -#Natural Syntax - -@jaqu_1038_p -#The plan is to support more natural (pure Java) syntax in conditions. To do that, the condition class is de-compiled to a SQL condition. A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome). The planned syntax is: - -@jaqu_1039_h2 -#Other Ideas - -@jaqu_1040_p -# This project has just been started, and nothing is fixed yet. Some ideas are: - -@jaqu_1041_li -#Support queries on collections (instead of using a database). - -@jaqu_1042_li -#Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA). - -@jaqu_1043_li -#Internally use a JPA implementation (for example Hibernate) instead of SQL directly. - -@jaqu_1044_li -#Use PreparedStatements and cache them. - -@jaqu_1045_h2 -#Similar Projects - -@jaqu_1046_a -#iciql (a friendly fork of JaQu) - -@jaqu_1047_a -#Cement Framework - -@jaqu_1048_a -#Dreamsource ORM - -@jaqu_1049_a -#Empire-db - -@jaqu_1050_a -#JEQUEL: Java Embedded QUEry Language - -@jaqu_1051_a -#Joist - -@jaqu_1052_a -#jOOQ - -@jaqu_1053_a -#JoSQL - -@jaqu_1054_a -#LIQUidFORM - -@jaqu_1055_a -#Quaere (Alias implementation) - -@jaqu_1056_a -#Quaere - -@jaqu_1057_a -#Querydsl - -@jaqu_1058_a -#Squill - -@license_1000_h1 -ライセンス - -@license_1001_a -# Summary and License FAQ - -@license_1002_a -# Mozilla Public License Version 2.0 - -@license_1003_a -# Eclipse Public License - Version 1.0 - -@license_1004_a -# Export Control Classification Number (ECCN) - -@license_1005_h2 -#Summary and License FAQ - -@license_1006_p -# H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL. - -@license_1007_li -#You can use H2 for free. - -@license_1008_li -#You can integrate it into your applications (including in commercial applications) and distribute it. - -@license_1009_li -#Files containing only your code are not covered by this license (it is 'commercial friendly'). - -@license_1010_li -#Modifications to the H2 source code must be published. - -@license_1011_li -#You don't need to provide the source code of H2 if you did not modify anything. - -@license_1012_li -#If you distribute a binary that includes H2, you need to add a disclaimer of liability - see the example below. - -@license_1013_p -# However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the Wayback Machine and visit old web pages of http://www.bungisoft.com. - -@license_1014_p -# About porting the source code to another language (for example C# or C++): converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code. - -@license_1015_p -# If you distribute a binary that includes H2, you need to add the license and a disclaimer of liability (as you should do for your own code). You should add a disclaimer for each open source library you use. For example, add a file 3rdparty_license.txt in the directory where the jar files are, and list all open source libraries, each one with its license and disclaimer. For H2, a simple solution is to copy the following text below. You may also include a copy of the complete license. - -@license_1016_h2 -#Mozilla Public License Version 2.0 - -@license_1017_h3 -#1. Definitions - -@license_1018_p -#1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. - -@license_1019_p -#1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. - -@license_1020_p -#1.3. "Contribution" means Covered Software of a particular Contributor. - -@license_1021_p -#1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. - -@license_1022_p -#1.5. "Incompatible With Secondary Licenses" means - -@license_1023_p -#a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - -@license_1024_p -#b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. - -@license_1025_p -#1.6. "Executable Form" means any form of the work other than Source Code Form. - -@license_1026_p -#1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. - -@license_1027_p -#1.8. "License" means this document. - -@license_1028_p -#1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. - -@license_1029_p -#1.10. "Modifications" means any of the following: - -@license_1030_p -#a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or - -@license_1031_p -#b. any new file in Source Code Form that contains any Covered Software. - -@license_1032_p -#1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. - -@license_1033_p -#1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. - -@license_1034_p -#1.13. "Source Code Form" means the form of the work preferred for making modifications. - -@license_1035_p -#1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -@license_1036_h3 -#2. License Grants and Conditions - -@license_1037_h4 -#2.1. Grants - -@license_1038_p -#Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - -@license_1039_p -#under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and - -@license_1040_p -#under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. - -@license_1041_h4 -#2.2. Effective Date - -@license_1042_p -#The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. - -@license_1043_h4 -#2.3. Limitations on Grant Scope - -@license_1044_p -#The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: - -@license_1045_p -#for any code that a Contributor has removed from Covered Software; or - -@license_1046_p -#for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - -@license_1047_p -#under Patent Claims infringed by Covered Software in the absence of its Contributions. - -@license_1048_p -#This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). - -@license_1049_h4 -#2.4. Subsequent Licenses - -@license_1050_p -#No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). - -@license_1051_h4 -#2.5. Representation - -@license_1052_p -#Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. - -@license_1053_h4 -#2.6. Fair Use - -@license_1054_p -#This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. - -@license_1055_h4 -#2.7. Conditions - -@license_1056_p -#Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. - -@license_1057_h3 -#3. Responsibilities - -@license_1058_h4 -#3.1. Distribution of Source Form - -@license_1059_p -#All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. - -@license_1060_h4 -#3.2. Distribution of Executable Form - -@license_1061_p -#If You distribute Covered Software in Executable Form then: - -@license_1062_p -#such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - -@license_1063_p -#You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. - -@license_1064_h4 -#3.3. Distribution of a Larger Work - -@license_1065_p -#You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). - -@license_1066_h4 -#3.4. Notices - -@license_1067_p -#You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. - -@license_1068_h4 -#3.5. Application of Additional Terms - -@license_1069_p -#You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. - -@license_1070_h3 -#4. Inability to Comply Due to Statute or Regulation - -@license_1071_p -#If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. - -@license_1072_h3 -#5. Termination - -@license_1073_p -#5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. - -@license_1074_p -#5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. - -@license_1075_p -#5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. - -@license_1076_h3 -#6. Disclaimer of Warranty - -@license_1077_p -#Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. - -@license_1078_h3 -#7. Limitation of Liability - -@license_1079_p -#Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. - -@license_1080_h3 -#8. Litigation - -@license_1081_p -#Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. - -@license_1082_h3 -#9. Miscellaneous - -@license_1083_p -#This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. - -@license_1084_h3 -#10. Versions of the License - -@license_1085_h4 -#10.1. New Versions - -@license_1086_p -#Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. - -@license_1087_h4 -#10.2. Effect of New Versions - -@license_1088_p -#You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. - -@license_1089_h4 -#10.3. Modified Versions - -@license_1090_p -#If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). - -@license_1091_h4 -#10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - -@license_1092_p -#If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. - -@license_1093_h3 -#Exhibit A - Source Code Form License Notice - -@license_1094_p -#If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. - -@license_1095_p -#You may add additional accurate notices of copyright ownership. - -@license_1096_h3 -#Exhibit B - "Incompatible With Secondary Licenses" Notice - -@license_1097_h2 -#Eclipse Public License - Version 1.0 - -@license_1098_p -# THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -@license_1099_h3 -#1. DEFINITIONS - -@license_1100_p -# "Contribution" means: - -@license_1101_p -# a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and - -@license_1102_p -# b) in the case of each subsequent Contributor: - -@license_1103_p -# i) changes to the Program, and - -@license_1104_p -# ii) additions to the Program; - -@license_1105_p -# where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. - -@license_1106_p -# "Contributor" means any person or entity that distributes the Program. - -@license_1107_p -# "Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. - -@license_1108_p -# "Program" means the Contributions distributed in accordance with this Agreement. - -@license_1109_p -# "Recipient" means anyone who receives the Program under this Agreement, including all Contributors. - -@license_1110_h3 -#2. GRANT OF RIGHTS - -@license_1111_p -# a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. - -@license_1112_p -# b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. - -@license_1113_p -# c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. - -@license_1114_p -# d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. - -@license_1115_h3 -#3. REQUIREMENTS - -@license_1116_p -# A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: - -@license_1117_p -# a) it complies with the terms and conditions of this Agreement; and - -@license_1118_p -# b) its license agreement: - -@license_1119_p -# i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; - -@license_1120_p -# ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; - -@license_1121_p -# iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and - -@license_1122_p -# iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. - -@license_1123_p -# When the Program is made available in source code form: - -@license_1124_p -# a) it must be made available under this Agreement; and - -@license_1125_p -# b) a copy of this Agreement must be included with each copy of the Program. - -@license_1126_p -# Contributors may not remove or alter any copyright notices contained within the Program. - -@license_1127_p -# Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. - -@license_1128_h3 -#4. COMMERCIAL DISTRIBUTION - -@license_1129_p -# Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. - -@license_1130_p -# For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. - -@license_1131_h3 -#5. NO WARRANTY - -@license_1132_p -# EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. - -@license_1133_h3 -#6. DISCLAIMER OF LIABILITY - -@license_1134_p -# EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -@license_1135_h3 -#7. GENERAL - -@license_1136_p -# If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - -@license_1137_p -# If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. - -@license_1138_p -# All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. - -@license_1139_p -# Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. - -@license_1140_p -# This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. - -@license_1141_h2 -#Export Control Classification Number (ECCN) - -@license_1142_p -# As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002. However, for legal reasons, we can make no warranty that this information is correct. For details, see also the Apache Software Foundation Export Classifications page. - -@links_1000_h1 -#Links - -@links_1001_p -# If you want to add a link, please send it to the support email address or post it to the group. - -@links_1002_a -# Commercial Support - -@links_1003_a -# Quotes - -@links_1004_a -# Books - -@links_1005_a -# Extensions - -@links_1006_a -# Blog Articles, Videos - -@links_1007_a -# Database Frontends / Tools - -@links_1008_a -# Products and Projects - -@links_1009_h2 -#Commercial Support - -@links_1010_a -#Commercial support for H2 is available - -@links_1011_p -# from Steve McLeod (steve dot mcleod at gmail dot com). Please note he is not one of the main developers of H2. He describes himself as follows: - -@links_1012_li -#I'm a long time user of H2, routinely working with H2 databases several gigabytes in size. - -@links_1013_li -#I'm the creator of popular commercial desktop software that uses H2. - -@links_1014_li -#I'm a certified Java developer (SCJP). - -@links_1015_li -#I have a decade and more of IT consulting experience with large and small clients in Australia, the UK, and Germany. - -@links_1016_li -#I'm based in Germany, and willing to travel within Europe. I can work remotely with teams in the USA and other locations." - -@links_1017_h2 -#Quotes - -@links_1018_a -# Quote - -@links_1019_p -#: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. But, in less than 15 minutes I had H2 up and working with little recoding of the SQL. Thanks..... " - -@links_1020_h2 -#Books - -@links_1021_a -# Seam In Action - -@links_1022_h2 -#Extensions - -@links_1023_a -# Grails H2 Database Plugin - -@links_1024_a -# h2osgi: OSGi for the H2 Database - -@links_1025_a -# H2Sharp: ADO.NET interface for the H2 database engine - -@links_1026_a -# A spatial extension of the H2 database. - -@links_1027_h2 -#Blog Articles, Videos - -@links_1028_a -# Youtube: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2 - -@links_1029_a -# Analyzing CSVs with H2 in under 10 minutes (2009-12-07) - -@links_1030_a -# Efficient sorting and iteration on large databases (2009-06-15) - -@links_1031_a -# Porting Flexive to the H2 Database (2008-12-05) - -@links_1032_a -# H2 Database with GlassFish (2008-11-24) - -@links_1033_a -# H2 Database - Performance Tracing (2008-04-30) - -@links_1034_a -# Open Source Databases Comparison (2007-09-11) - -@links_1035_a -# The Codist: The Open Source Frameworks I Use (2007-07-23) - -@links_1036_a -# The Codist: SQL Injections: How Not To Get Stuck (2007-05-08) - -@links_1037_a -# David Coldrick's Weblog: New Version of H2 Database Released (2007-01-06) - -@links_1038_a -# The Codist: Write Your Own Database, Again (2006-11-13) - -@links_1039_h2 -#Project Pages - -@links_1040_a -# Ohloh - -@links_1041_a -# Freshmeat Project Page - -@links_1042_a -# Wikipedia - -@links_1043_a -# Java Source Net - -@links_1044_a -# Linux Package Manager - -@links_1045_h2 -#Database Frontends / Tools - -@links_1046_a -# Dataflyer - -@links_1047_p -# A tool to browse databases and export data. - -@links_1048_a -# DB Solo - -@links_1049_p -# SQL query tool. - -@links_1050_a -# DbVisualizer - -@links_1051_p -# Database tool. - -@links_1052_a -# Execute Query - -@links_1053_p -# Database utility written in Java. - -@links_1054_a -# Flyway - -@links_1055_p -# The agile database migration framework for Java. - -@links_1056_a -# [fleXive] - -@links_1057_p -# JavaEE 5 open source framework for the development of complex and evolving (web-)applications. - -@links_1058_a -# JDBC Console - -@links_1059_p -# This small webapp gives an ability to execute SQL against datasources bound in container's JNDI. Based on H2 Console. - -@links_1060_a -# HenPlus - -@links_1061_p -# HenPlus is a SQL shell written in Java. - -@links_1062_a -# JDBC lint - -@links_1063_p -# Helps write correct and efficient code when using the JDBC API. - -@links_1064_a -# OpenOffice - -@links_1065_p -# Base is OpenOffice.org's database application. It provides access to relational data sources. - -@links_1066_a -# RazorSQL - -@links_1067_p -# An SQL query tool, database browser, SQL editor, and database administration tool. - -@links_1068_a -# SQL Developer - -@links_1069_p -# Universal Database Frontend. - -@links_1070_a -# SQL Workbench/J - -@links_1071_p -# Free DBMS-independent SQL tool. - -@links_1072_a -# SQuirreL SQL Client - -@links_1073_p -# Graphical tool to view the structure of a database, browse the data, issue SQL commands etc. - -@links_1074_a -# SQuirreL DB Copy Plugin - -@links_1075_p -# Tool to copy data from one database to another. - -@links_1076_h2 -#Products and Projects - -@links_1077_a -# AccuProcess - -@links_1078_p -# Visual business process modeling and simulation software for business users. - -@links_1079_a -# Adeptia BPM - -@links_1080_p -# A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows. - -@links_1081_a -# Adeptia Integration - -@links_1082_p -# Process-centric, services-based application integration suite. - -@links_1083_a -# Aejaks - -@links_1084_p -# A server-side scripting environment to build AJAX enabled web applications. - -@links_1085_a -# Axiom Stack - -@links_1086_p -# A web framework that let's you write dynamic web applications with Zen-like simplicity. - -@links_1087_a -# Apache Cayenne - -@links_1088_p -# Open source persistence framework providing object-relational mapping (ORM) and remoting services. - -@links_1089_a -# Apache Jackrabbit - -@links_1090_p -# Open source implementation of the Java Content Repository API (JCR). - -@links_1091_a -# Apache OpenJPA - -@links_1092_p -# Open source implementation of the Java Persistence API (JPA). - -@links_1093_a -# AppFuse - -@links_1094_p -# Helps building web applications. - -@links_1095_a -# BGBlitz - -@links_1096_p -# The Swiss army knife of Backgammon. - -@links_1097_a -# Bonita - -@links_1098_p -# Open source workflow solution for handing long-running, user-oriented processes providing out of the box workflow and business process management features. - -@links_1099_a -# Bookmarks Portlet - -@links_1100_p -# JSR 168 compliant bookmarks management portlet application. - -@links_1101_a -# Claros inTouch - -@links_1102_p -# Ajax communication suite with mail, addresses, notes, IM, and rss reader. - -@links_1103_a -# CrashPlan PRO Server - -@links_1104_p -# Easy and cross platform backup solution for business and service providers. - -@links_1105_a -# DataNucleus - -@links_1106_p -# Java persistent objects. - -@links_1107_a -# DbUnit - -@links_1108_p -# A JUnit extension (also usable with Ant) targeted for database-driven projects. - -@links_1109_a -# DiffKit - -@links_1110_p -# DiffKit is a tool for comparing two tables of data, field-by-field. DiffKit is like the Unix diff utility, but for tables instead of lines of text. - -@links_1111_a -# Dinamica Framework - -@links_1112_p -# Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets). - -@links_1113_a -# District Health Information Software 2 (DHIS) - -@links_1114_p -# The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities. - -@links_1115_a -# Ebean ORM Persistence Layer - -@links_1116_p -# Open source Java Object Relational Mapping tool. - -@links_1117_a -# Eclipse CDO - -@links_1118_p -# The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution. - -@links_1119_a -# Fabric3 - -@links_1120_p -# Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http://www.osoa.org). - -@links_1121_a -# FIT4Data - -@links_1122_p -# A testing framework for data management applications built on the Java implementation of FIT. - -@links_1123_a -# Flux - -@links_1124_p -# Java job scheduler, file transfer, workflow, and BPM. - -@links_1125_a -# GeoServer - -@links_1126_p -# GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing. - -@links_1127_a -# GBIF Integrated Publishing Toolkit (IPT) - -@links_1128_p -# The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data: taxon primary occurrence data, taxon checklists and general resource metadata. - -@links_1129_a -# GNU Gluco Control - -@links_1130_p -# Helps you to manage your diabetes. - -@links_1131_a -# Golden T Studios - -@links_1132_p -# Fun-to-play games with a simple interface. - -@links_1133_a -# GridGain - -@links_1134_p -# GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications that auto-scale on any grid or cloud infrastructure. - -@links_1135_a -# Group Session - -@links_1136_p -# Open source web groupware. - -@links_1137_a -# HA-JDBC - -@links_1138_p -# High-Availability JDBC: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver. - -@links_1139_a -# Hibernate - -@links_1140_p -# Relational persistence for idiomatic Java (O-R mapping tool). - -@links_1141_a -# Hibicius - -@links_1142_p -# Online Banking Client for the HBCI protocol. - -@links_1143_a -# ImageMapper - -@links_1144_p -# ImageMapper frees users from having to use file browsers to view their images. They get fast access to images and easy cataloguing of them via a user friendly interface. - -@links_1145_a -# JAMWiki - -@links_1146_p -# Java-based Wiki engine. - -@links_1147_a -# Jaspa - -@links_1148_p -# Java Spatial. Jaspa potentially brings around 200 spatial functions. - -@links_1149_a -# Java Simon - -@links_1150_p -# Simple Monitoring API. - -@links_1151_a -# JBoss jBPM - -@links_1152_p -# A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration. - -@links_1153_a -# JBoss Jopr - -@links_1154_p -# An enterprise management solution for JBoss middleware projects and other application technologies. - -@links_1155_a -# JGeocoder - -@links_1156_p -# Free Java geocoder. Geocoding is the process of estimating a latitude and longitude for a given location. - -@links_1157_a -# JGrass - -@links_1158_p -# Java Geographic Resources Analysis Support System. Free, multi platform, open source GIS based on the GIS framework of uDig. - -@links_1159_a -# Jena - -@links_1160_p -# Java framework for building Semantic Web applications. - -@links_1161_a -# JMatter - -@links_1162_p -# Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern. - -@links_1163_a -# jOOQ (Java Object Oriented Querying) - -@links_1164_p -# jOOQ is a fluent API for typesafe SQL query construction and execution - -@links_1165_a -# Liftweb - -@links_1166_p -# A Scala-based, secure, developer friendly web framework. - -@links_1167_a -# LiquiBase - -@links_1168_p -# A tool to manage database changes and refactorings. - -@links_1169_a -# Luntbuild - -@links_1170_p -# Build automation and management tool. - -@links_1171_a -# localdb - -@links_1172_p -# A tool that locates the full file path of the folder containing the database files. - -@links_1173_a -# Magnolia - -@links_1174_p -# Microarray Data Management and Export System for PFGRC (Pathogen Functional Genomics Resource Center) Microarrays. - -@links_1175_a -# MiniConnectionPoolManager - -@links_1176_p -# A lightweight standalone JDBC connection pool manager. - -@links_1177_a -# Mr. Persister - -@links_1178_p -# Simple, small and fast object relational mapping. - -@links_1179_a -# Myna Application Server - -@links_1180_p -# Java web app that provides dynamic web content and Java libraries access from JavaScript. - -@links_1181_a -# MyTunesRss - -@links_1182_p -# MyTunesRSS lets you listen to your music wherever you are. - -@links_1183_a -# NCGC CurveFit - -@links_1184_p -# From: NIH Chemical Genomics Center, National Institutes of Health, USA. An open source application in the life sciences research field. This application handles chemical structures and biological responses of thousands of compounds with the potential to handle million+ compounds. It utilizes an embedded H2 database to enable flexible query/retrieval of all data including advanced chemical substructure and similarity searching. The application highlights an automated curve fitting and classification algorithm that outperforms commercial packages in the field. Commercial alternatives are typically small desktop software that handle a few dose response curves at a time. A couple of commercial packages that do handle several thousand curves are very expensive tools (>60k USD) that require manual curation of analysis by the user; require a license to Oracle; lack advanced query/retrieval; and the ability to handle chemical structures. - -@links_1185_a -# Nuxeo - -@links_1186_p -# Standards-based, open source platform for building ECM applications. - -@links_1187_a -# nWire - -@links_1188_p -# Eclipse plug-in which expedites Java development. It's main purpose is to help developers find code quicker and easily understand how it relates to the rest of the application, thus, understand the application structure. - -@links_1189_a -# Ontology Works - -@links_1190_p -# This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), semantic information fusion and semantic federation of legacy databases, ontology-based domain modeling, and management of the distributed enterprise. - -@links_1191_a -# Ontoprise OntoBroker - -@links_1192_p -# SemanticWeb-Middleware. It supports all W3C Semantic Web recommendations: OWL, RDF, RDFS, SPARQL, and F-Logic. - -@links_1193_a -# Open Anzo - -@links_1194_p -# Semantic Application Server. - -@links_1195_a -# OpenGroove - -@links_1196_p -# OpenGroove is a groupware program that allows users to synchronize data. - -@links_1197_a -# OpenSocial Development Environment (OSDE) - -@links_1198_p -# Development tool for OpenSocial application. - -@links_1199_a -# Orion - -@links_1200_p -# J2EE Application Server. - -@links_1201_a -# P5H2 - -@links_1202_p -# A library for the Processing programming language and environment. - -@links_1203_a -# Phase-6 - -@links_1204_p -# A computer based learning software. - -@links_1205_a -# Pickle - -@links_1206_p -# Pickle is a Java library containing classes for persistence, concurrency, and logging. - -@links_1207_a -# Piman - -@links_1208_p -# Water treatment projects data management. - -@links_1209_a -# PolePosition - -@links_1210_p -# Open source database benchmark. - -@links_1211_a -# Poormans - -@links_1212_p -# Very basic CMS running as a SWT application and generating static html pages. - -@links_1213_a -# Railo - -@links_1214_p -# Railo is an alternative engine for the Cold Fusion Markup Language, that compiles code programmed in CFML into Java bytecode and executes it on a servlet engine. - -@links_1215_a -# Razuna - -@links_1216_p -# Open source Digital Asset Management System with integrated Web Content Management. - -@links_1217_a -# RIFE - -@links_1218_p -# A full-stack web application framework with tools and APIs to implement most common web features. - -@links_1219_a -# Sava - -@links_1220_p -# Open-source web-based content management system. - -@links_1221_a -# Scriptella - -@links_1222_p -# ETL (Extract-Transform-Load) and script execution tool. - -@links_1223_a -# Sesar - -@links_1224_p -# Dependency Injection Container with Aspect Oriented Programming. - -@links_1225_a -# SemmleCode - -@links_1226_p -# Eclipse plugin to help you improve software quality. - -@links_1227_a -# SeQuaLite - -@links_1228_p -# A free, light-weight, java data access framework. - -@links_1229_a -# ShapeLogic - -@links_1230_p -# Toolkit for declarative programming, image processing and computer vision. - -@links_1231_a -# Shellbook - -@links_1232_p -# Desktop publishing application. - -@links_1233_a -# Signsoft intelliBO - -@links_1234_p -# Persistence middleware supporting the JDO specification. - -@links_1235_a -# SimpleORM - -@links_1236_p -# Simple Java Object Relational Mapping. - -@links_1237_a -# SymmetricDS - -@links_1238_p -# A web-enabled, database independent, data synchronization/replication software. - -@links_1239_a -# SmartFoxServer - -@links_1240_p -# Platform for developing multiuser applications and games with Macromedia Flash. - -@links_1241_a -# Social Bookmarks Friend Finder - -@links_1242_p -# A GUI application that allows you to find users with similar bookmarks to the user specified (for delicious.com). - -@links_1243_a -# sormula - -@links_1244_p -# Simple object relational mapping. - -@links_1245_a -# Springfuse - -@links_1246_p -# Code generation For Spring, Spring MVC & Hibernate. - -@links_1247_a -# SQLOrm - -@links_1248_p -# Java Object Relation Mapping. - -@links_1249_a -# StelsCSV and StelsXML - -@links_1250_p -# StelsCSV is a CSV JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on text files. StelsXML is a XML JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on XML files. Both use H2 as the SQL engine. - -@links_1251_a -# StorYBook - -@links_1252_p -# A summary-based tool for novelist and script writers. It helps to keep the overview over the various traces a story has. - -@links_1253_a -# StreamCruncher - -@links_1254_p -# Event (stream) processing kernel. - -@links_1255_a -# SUSE Manager, part of Linux Enterprise Server 11 - -@links_1256_p -# The SUSE Manager eases the burden of compliance with regulatory requirements and corporate policies. - -@links_1257_a -# Tune Backup - -@links_1258_p -# Easy-to-use backup solution for your iTunes library. - -@links_1259_a -# weblica - -@links_1260_p -# Desktop CMS. - -@links_1261_a -# Web of Web - -@links_1262_p -# Collaborative and realtime interactive media platform for the web. - -@links_1263_a -# Werkzeugkasten - -@links_1264_p -# Minimum Java Toolset. - -@links_1265_a -# VPDA - -@links_1266_p -# View providers driven applications is a Java based application framework for building applications composed from server components - view providers. - -@links_1267_a -# Volunteer database - -@links_1268_p -# A database front end to register volunteers, partnership and donation for a Non Profit organization. - -@mainWeb_1000_h1 -H2 データベース エンジン - -@mainWeb_1001_p -# Welcome to H2, the Java SQL database. The main features of H2 are: - -@mainWeb_1002_li -#Very fast, open source, JDBC API - -@mainWeb_1003_li -#Embedded and server modes; in-memory databases - -@mainWeb_1004_li -#Browser based Console application - -@mainWeb_1005_li -#Small footprint: around 1.5 MB jar file size - -@mainWeb_1006_h2 -ダウンロード - -@mainWeb_1007_td -# Version 1.4.187 (2015-04-10), Beta - -@mainWeb_1008_a -#Windows Installer (5 MB) - -@mainWeb_1009_a -#All Platforms (zip, 8 MB) - -@mainWeb_1010_a -#All Downloads - -@mainWeb_1011_td -    - -@mainWeb_1012_h2 -サ�?ート - -@mainWeb_1013_a -#Stack Overflow (tag H2) - -@mainWeb_1014_a -#Google Group English - -@mainWeb_1015_p -#, Japanese - -@mainWeb_1016_p -# For non-technical issues, use: - -@mainWeb_1017_h2 -特徴 - -@mainWeb_1018_th -H2 - -@mainWeb_1019_a -Derby - -@mainWeb_1020_a -HSQLDB - -@mainWeb_1021_a -MySQL - -@mainWeb_1022_a -PostgreSQL - -@mainWeb_1023_td -Pure Java - -@mainWeb_1024_td -対応 - -@mainWeb_1025_td -対応 - -@mainWeb_1026_td -対応 - -@mainWeb_1027_td -�?�対応 - -@mainWeb_1028_td -�?�対応 - -@mainWeb_1029_td -#Memory Mode - -@mainWeb_1030_td -対応 - -@mainWeb_1031_td -対応 - -@mainWeb_1032_td -対応 - -@mainWeb_1033_td -�?�対応 - -@mainWeb_1034_td -�?�対応 - -@mainWeb_1035_td -暗�?�化データベース - -@mainWeb_1036_td -対応 - -@mainWeb_1037_td -対応 - -@mainWeb_1038_td -対応 - -@mainWeb_1039_td -�?�対応 - -@mainWeb_1040_td -�?�対応 - -@mainWeb_1041_td -ODBCドライ�? - -@mainWeb_1042_td -対応 - -@mainWeb_1043_td -�?�対応 - -@mainWeb_1044_td -�?�対応 - -@mainWeb_1045_td -対応 - -@mainWeb_1046_td -対応 - -@mainWeb_1047_td -フルテキストサー�? - -@mainWeb_1048_td -対応 - -@mainWeb_1049_td -�?�対応 - -@mainWeb_1050_td -�?�対応 - -@mainWeb_1051_td -対応 - -@mainWeb_1052_td -対応 - -@mainWeb_1053_td -#Multi Version Concurrency - -@mainWeb_1054_td -対応 - -@mainWeb_1055_td -�?�対応 - -@mainWeb_1056_td -対応 - -@mainWeb_1057_td -対応 - -@mainWeb_1058_td -対応 - -@mainWeb_1059_td -フットプリント (jar/dll size) - -@mainWeb_1060_td -#~1 MB - -@mainWeb_1061_td -#~2 MB - -@mainWeb_1062_td -#~1 MB - -@mainWeb_1063_td -#~4 MB - -@mainWeb_1064_td -#~6 MB - -@mainWeb_1065_p -# See also the detailed comparison. - -@mainWeb_1066_h2 -ニュース - -@mainWeb_1067_b -ニュースフィード: - -@mainWeb_1068_a -#Full text (Atom) - -@mainWeb_1069_p -# or Header only (RSS). - -@mainWeb_1070_b -Email ニュースレター: - -@mainWeb_1071_p -# Subscribe to H2 Database News (Google account required) to get informed about new releases. Your email address is only used in this context. - -@mainWeb_1072_td -  - -@mainWeb_1073_h2 -寄稿�?�る - -@mainWeb_1074_p -# You can contribute to the development of H2 by sending feedback and bug reports, or translate the H2 Console application (for details, start the H2 Console and select Options / Translate). To donate money, click on the PayPal button below. You will be listed as a supporter: - -@main_1000_h1 -H2 データベース エンジン - -@main_1001_p -# Welcome to H2, the free Java SQL database engine. - -@main_1002_a -クイックスタート - -@main_1003_p -# Get a fast overview. - -@main_1004_a -�?ュートリアル - -@main_1005_p -# Go through the samples. - -@main_1006_a -特徴 - -@main_1007_p -# See what this database can do and how to use these features. - -@mvstore_1000_h1 -#MVStore - -@mvstore_1001_a -# Overview - -@mvstore_1002_a -# Example Code - -@mvstore_1003_a -# Store Builder - -@mvstore_1004_a -# R-Tree - -@mvstore_1005_a -# Features - -@mvstore_1006_a -#- Maps - -@mvstore_1007_a -#- Versions - -@mvstore_1008_a -#- Transactions - -@mvstore_1009_a -#- In-Memory Performance and Usage - -@mvstore_1010_a -#- Pluggable Data Types - -@mvstore_1011_a -#- BLOB Support - -@mvstore_1012_a -#- R-Tree and Pluggable Map Implementations - -@mvstore_1013_a -#- Concurrent Operations and Caching - -@mvstore_1014_a -#- Log Structured Storage - -@mvstore_1015_a -#- Off-Heap and Pluggable Storage - -@mvstore_1016_a -#- File System Abstraction, File Locking and Online Backup - -@mvstore_1017_a -#- Encrypted Files - -@mvstore_1018_a -#- Tools - -@mvstore_1019_a -#- Exception Handling - -@mvstore_1020_a -#- Storage Engine for H2 - -@mvstore_1021_a -# File Format - -@mvstore_1022_a -# Similar Projects and Differences to Other Storage Engines - -@mvstore_1023_a -# Current State - -@mvstore_1024_a -# Requirements - -@mvstore_1025_h2 -#Overview - -@mvstore_1026_p -# The MVStore is a persistent, log structured key-value store. It is planned to be the next storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL. - -@mvstore_1027_li -#MVStore stands for "multi-version store". - -@mvstore_1028_li -#Each store contains a number of maps that can be accessed using the java.util.Map interface. - -@mvstore_1029_li -#Both file-based persistence and in-memory operation are supported. - -@mvstore_1030_li -#It is intended to be fast, simple to use, and small. - -@mvstore_1031_li -#Concurrent read and write operations are supported. - -@mvstore_1032_li -#Transactions are supported (including concurrent transactions and 2-phase commit). - -@mvstore_1033_li -#The tool is very modular. It supports pluggable data types and serialization, pluggable storage (to a file, to off-heap memory), pluggable map implementations (B-tree, R-tree, concurrent B-tree currently), BLOB storage, and a file system abstraction to support encrypted files and zip files. - -@mvstore_1034_h2 -#Example Code - -@mvstore_1035_p -# The following sample code shows how to use the tool: - -@mvstore_1036_h2 -#Store Builder - -@mvstore_1037_p -# The MVStore.Builder provides a fluid interface to build a store if configuration options are needed. Example usage: - -@mvstore_1038_p -# The list of available options is: - -@mvstore_1039_li -#autoCommitBufferSize: the size of the write buffer. - -@mvstore_1040_li -#autoCommitDisabled: to disable auto-commit. - -@mvstore_1041_li -#backgroundExceptionHandler: a handler for exceptions that could occur while writing in the background. - -@mvstore_1042_li -#cacheSize: the cache size in MB. - -@mvstore_1043_li -#compress: compress the data when storing using a fast algorithm (LZF). - -@mvstore_1044_li -#compressHigh: compress the data when storing using a slower algorithm (Deflate). - -@mvstore_1045_li -#encryptionKey: the key for file encryption. - -@mvstore_1046_li -#fileName: the name of the file, for file based stores. - -@mvstore_1047_li -#fileStore: the storage implementation to use. - -@mvstore_1048_li -#pageSplitSize: the point where pages are split. - -@mvstore_1049_li -#readOnly: open the file in read-only mode. - -@mvstore_1050_h2 -#R-Tree - -@mvstore_1051_p -# The MVRTreeMap is an R-tree implementation that supports fast spatial queries. It can be used as follows: - -@mvstore_1052_p -# The default number of dimensions is 2. To use a different number of dimensions, call new MVRTreeMap.Builder<String>().dimensions(3). The minimum number of dimensions is 1, the maximum is 32. - -@mvstore_1053_h2 -特徴 - -@mvstore_1054_h3 -#Maps - -@mvstore_1055_p -# Each store contains a set of named maps. A map is sorted by key, and supports the common lookup operations, including access to the first and last key, iterate over some or all keys, and so on. - -@mvstore_1056_p -# Also supported, and very uncommon for maps, is fast index lookup: the entries of the map can be be efficiently accessed like a random-access list (get the entry at the given index), and the index of a key can be calculated efficiently. That also means getting the median of two keys is very fast, and a range of keys can be counted very quickly. The iterator supports fast skipping. This is possible because internally, each map is organized in the form of a counted B+-tree. - -@mvstore_1057_p -# In database terms, a map can be used like a table, where the key of the map is the primary key of the table, and the value is the row. A map can also represent an index, where the key of the map is the key of the index, and the value of the map is the primary key of the table (for non-unique indexes, the key of the map must also contain the primary key). - -@mvstore_1058_h3 -#Versions - -@mvstore_1059_p -# A version is a snapshot of all the data of all maps at a given point in time. Creating a snapshot is fast: only those pages that are changed after a snapshot are copied. This behavior is also called COW (copy on write). Old versions are readable. Rollback to an old version is supported. - -@mvstore_1060_p -# The following sample code show how to create a store, open a map, add some data, and access the current and an old version: - -@mvstore_1061_h3 -#Transactions - -@mvstore_1062_p -# To support multiple concurrent open transactions, a transaction utility is included, the TransactionStore. The tool supports PostgreSQL style "read committed" transaction isolation with savepoints, two-phase commit, and other features typically available in a database. There is no limit on the size of a transaction (the log is written to disk for large or long running transactions). - -@mvstore_1063_p -# Internally, this utility stores the old versions of changed entries in a separate map, similar to a transaction log, except that entries of a closed transaction are removed, and the log is usually not stored for short transactions. For common use cases, the storage overhead of this utility is very small compared to the overhead of a regular transaction log. - -@mvstore_1064_h3 -#In-Memory Performance and Usage - -@mvstore_1065_p -# Performance of in-memory operations is about 50% slower than java.util.TreeMap. - -@mvstore_1066_p -# The memory overhead for large maps is slightly better than for the regular map implementations, but there is a higher overhead per map. For maps with less than about 25 entries, the regular map implementations need less memory. - -@mvstore_1067_p -# If no file name is specified, the store operates purely in memory. Except for persisting data, all features are supported in this mode (multi-versioning, index lookup, R-tree and so on). If a file name is specified, all operations occur in memory (with the same performance characteristics) until data is persisted. - -@mvstore_1068_p -# As in all map implementations, keys need to be immutable, that means changing the key object after an entry has been added is not allowed. If a file name is specified, the value may also not be changed after adding an entry, because it might be serialized (which could happen at any time when autocommit is enabled). - -@mvstore_1069_h3 -#Pluggable Data Types - -@mvstore_1070_p -# Serialization is pluggable. The default serialization currently supports many common data types, and uses Java serialization for other objects. The following classes are currently directly supported: Boolean, Byte, Short, Character, Integer, Long, Float, Double, BigInteger, BigDecimal, String, UUID, Date and arrays (both primitive arrays and object arrays). For serialized objects, the size estimate is adjusted using an exponential moving average. - -@mvstore_1071_p -# Parameterized data types are supported (for example one could build a string data type that limits the length). - -@mvstore_1072_p -# The storage engine itself does not have any length limits, so that keys, values, pages, and chunks can be very big (as big as fits in memory). Also, there is no inherent limit to the number of maps and chunks. Due to using a log structured storage, there is no special case handling for large keys or pages. - -@mvstore_1073_h3 -#BLOB Support - -@mvstore_1074_p -# There is a mechanism that stores large binary objects by splitting them into smaller blocks. This allows to store objects that don't fit in memory. Streaming as well as random access reads on such objects are supported. This tool is written on top of the store, using only the map interface. - -@mvstore_1075_h3 -#R-Tree and Pluggable Map Implementations - -@mvstore_1076_p -# The map implementation is pluggable. In addition to the default MVMap (multi-version map), there is a map that supports concurrent write operations, and a multi-version R-tree map implementation for spatial operations. - -@mvstore_1077_h3 -#Concurrent Operations and Caching - -@mvstore_1078_p -# Concurrent reads and writes are supported. All such read operations can occur in parallel. Concurrent reads from the page cache, as well as concurrent reads from the file system are supported. Write operations first read the relevant pages from disk to memory (this can happen concurrently), and only then modify the data. The in-memory parts of write operations are synchronized. Writing changes to the file can occur concurrently to modifying the data, as writing operates on a snapshot. - -@mvstore_1079_p -# Caching is done on the page level. The page cache is a concurrent LIRS cache, which should be resistant against scan operations. - -@mvstore_1080_p -# For fully scalable concurrent write operations to a map (in-memory and to disk), the map could be split into multiple maps in different stores ('sharding'). The plan is to add such a mechanism later when needed. - -@mvstore_1081_h3 -#Log Structured Storage - -@mvstore_1082_p -# Internally, changes are buffered in memory, and once enough changes have accumulated, they are written in one continuous disk write operation. Compared to traditional database storage engines, this should improve write performance for file systems and storage systems that do not efficiently support small random writes, such as Btrfs, as well as SSDs. (According to a test, write throughput of a common SSD increases with write block size, until a block size of 2 MB, and then does not further increase.) By default, changes are automatically written when more than a number of pages are modified, and once every second in a background thread, even if only little data was changed. Changes can also be written explicitly by calling commit(). - -@mvstore_1083_p -# When storing, all changed pages are serialized, optionally compressed using the LZF algorithm, and written sequentially to a free area of the file. Each such change set is called a chunk. All parent pages of the changed B-trees are stored in this chunk as well, so that each chunk also contains the root of each changed map (which is the entry point for reading this version of the data). There is no separate index: all data is stored as a list of pages. Per store, there is one additional map that contains the metadata (the list of maps, where the root page of each map is stored, and the list of chunks). - -@mvstore_1084_p -# There are usually two write operations per chunk: one to store the chunk data (the pages), and one to update the file header (so it points to the latest chunk). If the chunk is appended at the end of the file, the file header is only written at the end of the chunk. There is no transaction log, no undo log, and there are no in-place updates (however, unused chunks are overwritten by default). - -@mvstore_1085_p -# Old data is kept for at least 45 seconds (configurable), so that there are no explicit sync operations required to guarantee data consistency. An application can also sync explicitly when needed. To reuse disk space, the chunks with the lowest amount of live data are compacted (the live data is stored again in the next chunk). To improve data locality and disk space usage, the plan is to automatically defragment and compact data. - -@mvstore_1086_p -# Compared to traditional storage engines (that use a transaction log, undo log, and main storage area), the log structured storage is simpler, more flexible, and typically needs less disk operations per change, as data is only written once instead of twice or 3 times, and because the B-tree pages are always full (they are stored next to each other) and can be easily compressed. But temporarily, disk space usage might actually be a bit higher than for a regular database, as disk space is not immediately re-used (there are no in-place updates). - -@mvstore_1087_h3 -#Off-Heap and Pluggable Storage - -@mvstore_1088_p -# Storage is pluggable. Unless pure in-memory operation is used, the default storage is to a single file. - -@mvstore_1089_p -# An off-heap storage implementation is available. This storage keeps the data in the off-heap memory, meaning outside of the regular garbage collected heap. This allows to use very large in-memory stores without having to increase the JVM heap, which would increase Java garbage collection pauses a lot. Memory is allocated using ByteBuffer.allocateDirect. One chunk is allocated at a time (each chunk is usually a few MB large), so that allocation cost is low. To use the off-heap storage, call: - -@mvstore_1090_h3 -#File System Abstraction, File Locking and Online Backup - -@mvstore_1091_p -# The file system is pluggable. The same file system abstraction is used as H2 uses. The file can be encrypted using a encrypting file system wrapper. Other file system implementations support reading from a compressed zip or jar file. The file system abstraction closely matches the Java 7 file system API. - -@mvstore_1092_p -# Each store may only be opened once within a JVM. When opening a store, the file is locked in exclusive mode, so that the file can only be changed from within one process. Files can be opened in read-only mode, in which case a shared lock is used. - -@mvstore_1093_p -# The persisted data can be backed up at any time, even during write operations (online backup). To do that, automatic disk space reuse needs to be first disabled, so that new data is always appended at the end of the file. Then, the file can be copied. The file handle is available to the application. It is recommended to use the utility class FileChannelInputStream to do this. For encrypted databases, both the encrypted (raw) file content, as well as the clear text content, can be backed up. - -@mvstore_1094_h3 -#Encrypted Files - -@mvstore_1095_p -# File encryption ensures the data can only be read with the correct password. Data can be encrypted as follows: - -@mvstore_1096_p -# The following algorithms and settings are used: - -@mvstore_1097_li -#The password char array is cleared after use, to reduce the risk that the password is stolen even if the attacker has access to the main memory. - -@mvstore_1098_li -#The password is hashed according to the PBKDF2 standard, using the SHA-256 hash algorithm. - -@mvstore_1099_li -#The length of the salt is 64 bits, so that an attacker can not use a pre-calculated password hash table (rainbow table). It is generated using a cryptographically secure random number generator. - -@mvstore_1100_li -#To speed up opening an encrypted stores on Android, the number of PBKDF2 iterations is 10. The higher the value, the better the protection against brute-force password cracking attacks, but the slower is opening a file. - -@mvstore_1101_li -#The file itself is encrypted using the standardized disk encryption mode XTS-AES. Only little more than one AES-128 round per block is needed. - -@mvstore_1102_h3 -#Tools - -@mvstore_1103_p -# There is a tool, the MVStoreTool, to dump the contents of a file. - -@mvstore_1104_h3 -#Exception Handling - -@mvstore_1105_p -# This tool does not throw checked exceptions. Instead, unchecked exceptions are thrown if needed. The error message always contains the version of the tool. The following exceptions can occur: - -@mvstore_1106_code -#IllegalStateException - -@mvstore_1107_li -# if a map was already closed or an IO exception occurred, for example if the file was locked, is already closed, could not be opened or closed, if reading or writing failed, if the file is corrupt, or if there is an internal error in the tool. For such exceptions, an error code is added so that the application can distinguish between different error cases. - -@mvstore_1108_code -#IllegalArgumentException - -@mvstore_1109_li -# if a method was called with an illegal argument. - -@mvstore_1110_code -#UnsupportedOperationException - -@mvstore_1111_li -# if a method was called that is not supported, for example trying to modify a read-only map. - -@mvstore_1112_code -#ConcurrentModificationException - -@mvstore_1113_li -# if a map is modified concurrently. - -@mvstore_1114_h3 -#Storage Engine for H2 - -@mvstore_1115_p -# For H2 version 1.4 and newer, the MVStore is the default storage engine (supporting SQL, JDBC, transactions, MVCC, and so on). For older versions, append ;MV_STORE=TRUE to the database URL. Even though it can be used with the default table level locking, by default the MVCC mode is enabled when using the MVStore. - -@mvstore_1116_h2 -#File Format - -@mvstore_1117_p -# The data is stored in one file. The file contains two file headers (for safety), and a number of chunks. The file headers are one block each; a block is 4096 bytes. Each chunk is at least one block, but typically 200 blocks or more. Data is stored in the chunks in the form of a log structured storage. There is one chunk for every version. - -@mvstore_1118_p -# Each chunk contains a number of B-tree pages. As an example, the following code: - -@mvstore_1119_p -# will result in the following two chunks (excluding metadata): - -@mvstore_1120_b -#Chunk 1: - -@mvstore_1121_p -# - Page 1: (root) node with 2 entries pointing to page 2 and 3 - -@mvstore_1122_p -# - Page 2: leaf with 140 entries (keys 0 - 139) - -@mvstore_1123_p -# - Page 3: leaf with 260 entries (keys 140 - 399) - -@mvstore_1124_b -#Chunk 2: - -@mvstore_1125_p -# - Page 4: (root) node with 2 entries pointing to page 3 and 5 - -@mvstore_1126_p -# - Page 5: leaf with 140 entries (keys 0 - 139) - -@mvstore_1127_p -# That means each chunk contains the changes of one version: the new version of the changed pages and the parent pages, recursively, up to the root page. Pages in subsequent chunks refer to pages in earlier chunks. - -@mvstore_1128_h3 -#File Header - -@mvstore_1129_p -# There are two file headers, which normally contain the exact same data. But once in a while, the file headers are updated, and writing could partially fail, which could corrupt a header. That's why there is a second header. Only the file headers are updated in this way (called "in-place update"). The headers contain the following data: - -@mvstore_1130_p -# The data is stored in the form of a key-value pair. Each value is stored as a hexadecimal number. The entries are: - -@mvstore_1131_li -#H: The entry "H:2" stands for the the H2 database. - -@mvstore_1132_li -#block: The block number where one of the newest chunks starts (but not necessarily the newest). - -@mvstore_1133_li -#blockSize: The block size of the file; currently always hex 1000, which is decimal 4096, to match the disk sector length of modern hard disks. - -@mvstore_1134_li -#chunk: The chunk id, which is normally the same value as the version; however, the chunk id might roll over to 0, while the version doesn't. - -@mvstore_1135_li -#created: The number of milliseconds since 1970 when the file was created. - -@mvstore_1136_li -#format: The file format number. Currently 1. - -@mvstore_1137_li -#version: The version number of the chunk. - -@mvstore_1138_li -#fletcher: The Fletcher-32 checksum of the header. - -@mvstore_1139_p -# When opening the file, both headers are read and the checksum is verified. If both headers are valid, the one with the newer version is used. The chunk with the latest version is then detected (details about this see below), and the rest of the metadata is read from there. If the chunk id, block and version are not stored in the file header, then the latest chunk lookup starts with the last chunk in the file. - -@mvstore_1140_h3 -#Chunk Format - -@mvstore_1141_p -# There is one chunk per version. Each chunk consists of a header, the pages that were modified in this version, and a footer. The pages contain the actual data of the maps. The pages inside a chunk are stored right after the header, next to each other (unaligned). The size of a chunk is a multiple of the block size. The footer is stored in the last 128 bytes of the chunk. - -@mvstore_1142_p -# The footer allows to verify that the chunk is completely written (a chunk is written as one write operation), and allows to find the start position of the very last chunk in the file. The chunk header and footer contain the following data: - -@mvstore_1143_p -# The fields of the chunk header and footer are: - -@mvstore_1144_li -#chunk: The chunk id. - -@mvstore_1145_li -#block: The first block of the chunk (multiply by the block size to get the position in the file). - -@mvstore_1146_li -#len: The size of the chunk in number of blocks. - -@mvstore_1147_li -#map: The id of the newest map; incremented when a new map is created. - -@mvstore_1148_li -#max: The sum of all maximum page sizes (see page format). - -@mvstore_1149_li -#next: The predicted start block of the next chunk. - -@mvstore_1150_li -#pages: The number of pages in the chunk. - -@mvstore_1151_li -#root: The position of the metadata root page (see page format). - -@mvstore_1152_li -#time: The time the chunk was written, in milliseconds after the file was created. - -@mvstore_1153_li -#version: The version this chunk represents. - -@mvstore_1154_li -#fletcher: The checksum of the footer. - -@mvstore_1155_p -# Chunks are never updated in-place. Each chunk contains the pages that were changed in that version (there is one chunk per version, see above), plus all the parent nodes of those pages, recursively, up to the root page. If an entry in a map is changed, removed, or added, then the respective page is copied, modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented. This mechanism is called copy-on-write, and is similar to how the Btrfs file system works. Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first. - -@mvstore_1156_p -# How the newest chunk is located when opening a store: The file header contains the position of a recent chunk, but not always the newest one. This is to reduce the number of file header updates. After opening the file, the file headers, and the chunk footer of the very last chunk (at the end of the file) are read. From those candidates, the header of the most recent chunk is read. If it contains a "next" pointer (see above), those chunk's header and footer are read as well. If it turned out to be a newer valid chunk, this is repeated, until the newest chunk was found. Before writing a chunk, the position of the next chunk is predicted based on the assumption that the next chunk will be of the same size as the current one. When the next chunk is written, and the previous prediction turned out to be incorrect, the file header is updated as well. In any case, the file header is updated if the next chain gets longer than 20 hops. - -@mvstore_1157_h3 -#Page Format - -@mvstore_1158_p -# Each map is a B-tree, and the map data is stored in (B-tree-) pages. There are leaf pages that contain the key-value pairs of the map, and internal nodes, which only contain keys and pointers to leaf pages. The root of a tree is either a leaf or an internal node. Unlike file header and chunk header and footer, the page data is not human readable. Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes), and variable size int and long (1 to 5 / 10 bytes). The page format is: - -@mvstore_1159_li -#length (int): Length of the page in bytes. - -@mvstore_1160_li -#checksum (short): Checksum (chunk id xor offset within the chunk xor page length). - -@mvstore_1161_li -#mapId (variable size int): The id of the map this page belongs to. - -@mvstore_1162_li -#len (variable size int): The number of keys in the page. - -@mvstore_1163_li -#type (byte): The page type (0 for leaf page, 1 for internal node; plus 2 if the keys and values are compressed with the LZF algorithm, or plus 6 if the keys and values are compressed with the Deflate algorithm). - -@mvstore_1164_li -#children (array of long; internal nodes only): The position of the children. - -@mvstore_1165_li -#childCounts (array of variable size long; internal nodes only): The total number of entries for the given child page. - -@mvstore_1166_li -#keys (byte array): All keys, stored depending on the data type. - -@mvstore_1167_li -#values (byte array; leaf pages only): All values, stored depending on the data type. - -@mvstore_1168_p -# Even though this is not required by the file format, pages are stored in the following order: For each map, the root page is stored first, then the internal nodes (if there are any), and then the leaf pages. This should speed up reads for media where sequential reads are faster than random access reads. The metadata map is stored at the end of a chunk. - -@mvstore_1169_p -# Pointers to pages are stored as a long, using a special format: 26 bits for the chunk id, 32 bits for the offset within the chunk, 5 bits for the length code, 1 bit for the page type (leaf or internal node). The page type is encoded so that when clearing or removing a map, leaf pages don't have to be read (internal nodes do have to be read in order to know where all the pages are; but in a typical B-tree the vast majority of the pages are leaf pages). The absolute file position is not included so that chunks can be moved within the file without having to change page pointers; only the chunk metadata needs to be changed. The length code is a number from 0 to 31, where 0 means the maximum length of the page is 32 bytes, 1 means 48 bytes, 2: 64, 3: 96, 4: 128, 5: 192, and so on until 31 which means longer than 1 MB. That way, reading a page only requires one read operation (except for very large pages). The sum of the maximum length of all pages is stored in the chunk metadata (field "max"), and when a page is marked as removed, the live maximum length is adjusted. This allows to estimate the amount of free space within a block, in addition to the number of free pages. - -@mvstore_1170_p -# The total number of entries in child pages are kept to allow efficient range counting, lookup by index, and skip operations. The pages form a counted B-tree. - -@mvstore_1171_p -# Data compression: The data after the page type are optionally compressed using the LZF algorithm. - -@mvstore_1172_h3 -#Metadata Map - -@mvstore_1173_p -# In addition to the user maps, there is one metadata map that contains names and positions of user maps, and chunk metadata. The very last page of a chunk contains the root page of that metadata map. The exact position of this root page is stored in the chunk header. This page (directly or indirectly) points to the root pages of all other maps. The metadata map of a store with a map named "data", and one chunk, contains the following entries: - -@mvstore_1174_li -#chunk.1: The metadata of chunk 1. This is the same data as the chunk header, plus the number of live pages, and the maximum live length. - -@mvstore_1175_li -#map.1: The metadata of map 1. The entries are: name, createVersion, and type. - -@mvstore_1176_li -#name.data: The map id of the map named "data". The value is "1". - -@mvstore_1177_li -#root.1: The root position of map 1. - -@mvstore_1178_li -#setting.storeVersion: The store version (a user defined value). - -@mvstore_1179_h2 -#Similar Projects and Differences to Other Storage Engines - -@mvstore_1180_p -# Unlike similar storage engines like LevelDB and Kyoto Cabinet, the MVStore is written in Java and can easily be embedded in a Java and Android application. - -@mvstore_1181_p -# The MVStore is somewhat similar to the Berkeley DB Java Edition because it is also written in Java, and is also a log structured storage, but the H2 license is more liberal. - -@mvstore_1182_p -# Like SQLite 3, the MVStore keeps all data in one file. Unlike SQLite 3, the MVStore uses is a log structured storage. The plan is to make the MVStore both easier to use as well as faster than SQLite 3. In a recent (very simple) test, the MVStore was about twice as fast as SQLite 3 on Android. - -@mvstore_1183_p -# The API of the MVStore is similar to MapDB (previously known as JDBM) from Jan Kotek, and some code is shared between MVStore and MapDB. However, unlike MapDB, the MVStore uses is a log structured storage. The MVStore does not have a record size limit. - -@mvstore_1184_h2 -#Current State - -@mvstore_1185_p -# The code is still experimental at this stage. The API as well as the behavior may partially change. Features may be added and removed (even though the main features will stay). - -@mvstore_1186_h2 -必�?�?�件 - -@mvstore_1187_p -# The MVStore is included in the latest H2 jar file. - -@mvstore_1188_p -# There are no special requirements to use it. The MVStore should run on any JVM as well as on Android. - -@mvstore_1189_p -# To build just the MVStore (without the database engine), run: - -@mvstore_1190_p -# This will create the file bin/h2mvstore-1.4.187.jar (about 200 KB). - -@performance_1000_h1 -パフォーマンス - -@performance_1001_a -# Performance Comparison - -@performance_1002_a -# PolePosition Benchmark - -@performance_1003_a -# Database Performance Tuning - -@performance_1004_a -# Using the Built-In Profiler - -@performance_1005_a -# Application Profiling - -@performance_1006_a -# Database Profiling - -@performance_1007_a -# Statement Execution Plans - -@performance_1008_a -# How Data is Stored and How Indexes Work - -@performance_1009_a -# Fast Database Import - -@performance_1010_h2 -#Performance Comparison - -@performance_1011_p -# In many cases H2 is faster than other (open source and not open source) database engines. Please note this is mostly a single connection benchmark run on one computer, with many very simple operations running against the database. This benchmark does not include very complex queries. The embedded mode of H2 is faster than the client-server mode because the per-statement overhead is greatly reduced. - -@performance_1012_h3 -#Embedded - -@performance_1013_th -#Test Case - -@performance_1014_th -#Unit - -@performance_1015_th -H2 - -@performance_1016_th -HSQLDB - -@performance_1017_th -Derby - -@performance_1018_td -#Simple: Init - -@performance_1019_td -#ms - -@performance_1020_td -#1019 - -@performance_1021_td -#1907 - -@performance_1022_td -#8280 - -@performance_1023_td -#Simple: Query (random) - -@performance_1024_td -#ms - -@performance_1025_td -#1304 - -@performance_1026_td -#873 - -@performance_1027_td -#1912 - -@performance_1028_td -#Simple: Query (sequential) - -@performance_1029_td -#ms - -@performance_1030_td -#835 - -@performance_1031_td -#1839 - -@performance_1032_td -#5415 - -@performance_1033_td -#Simple: Update (sequential) - -@performance_1034_td -#ms - -@performance_1035_td -#961 - -@performance_1036_td -#2333 - -@performance_1037_td -#21759 - -@performance_1038_td -#Simple: Delete (sequential) - -@performance_1039_td -#ms - -@performance_1040_td -#950 - -@performance_1041_td -#1922 - -@performance_1042_td -#32016 - -@performance_1043_td -#Simple: Memory Usage - -@performance_1044_td -#MB - -@performance_1045_td -#21 - -@performance_1046_td -#10 - -@performance_1047_td -#8 - -@performance_1048_td -#BenchA: Init - -@performance_1049_td -#ms - -@performance_1050_td -#919 - -@performance_1051_td -#2133 - -@performance_1052_td -#7528 - -@performance_1053_td -#BenchA: Transactions - -@performance_1054_td -#ms - -@performance_1055_td -#1219 - -@performance_1056_td -#2297 - -@performance_1057_td -#8541 - -@performance_1058_td -#BenchA: Memory Usage - -@performance_1059_td -#MB - -@performance_1060_td -#12 - -@performance_1061_td -#15 - -@performance_1062_td -#7 - -@performance_1063_td -#BenchB: Init - -@performance_1064_td -#ms - -@performance_1065_td -#905 - -@performance_1066_td -#1993 - -@performance_1067_td -#8049 - -@performance_1068_td -#BenchB: Transactions - -@performance_1069_td -#ms - -@performance_1070_td -#1091 - -@performance_1071_td -#583 - -@performance_1072_td -#1165 - -@performance_1073_td -#BenchB: Memory Usage - -@performance_1074_td -#MB - -@performance_1075_td -#17 - -@performance_1076_td -#11 - -@performance_1077_td -#8 - -@performance_1078_td -#BenchC: Init - -@performance_1079_td -#ms - -@performance_1080_td -#2491 - -@performance_1081_td -#4003 - -@performance_1082_td -#8064 - -@performance_1083_td -#BenchC: Transactions - -@performance_1084_td -#ms - -@performance_1085_td -#1979 - -@performance_1086_td -#803 - -@performance_1087_td -#2840 - -@performance_1088_td -#BenchC: Memory Usage - -@performance_1089_td -#MB - -@performance_1090_td -#19 - -@performance_1091_td -#22 - -@performance_1092_td -#9 - -@performance_1093_td -#Executed statements - -@performance_1094_td -## - -@performance_1095_td -#1930995 - -@performance_1096_td -#1930995 - -@performance_1097_td -#1930995 - -@performance_1098_td -#Total time - -@performance_1099_td -#ms - -@performance_1100_td -#13673 - -@performance_1101_td -#20686 - -@performance_1102_td -#105569 - -@performance_1103_td -#Statements per second - -@performance_1104_td -## - -@performance_1105_td -#141226 - -@performance_1106_td -#93347 - -@performance_1107_td -#18291 - -@performance_1108_h3 -#Client-Server - -@performance_1109_th -#Test Case - -@performance_1110_th -#Unit - -@performance_1111_th -#H2 (Server) - -@performance_1112_th -HSQLDB - -@performance_1113_th -Derby - -@performance_1114_th -PostgreSQL - -@performance_1115_th -MySQL - -@performance_1116_td -#Simple: Init - -@performance_1117_td -#ms - -@performance_1118_td -#16338 - -@performance_1119_td -#17198 - -@performance_1120_td -#27860 - -@performance_1121_td -#30156 - -@performance_1122_td -#29409 - -@performance_1123_td -#Simple: Query (random) - -@performance_1124_td -#ms - -@performance_1125_td -#3399 - -@performance_1126_td -#2582 - -@performance_1127_td -#6190 - -@performance_1128_td -#3315 - -@performance_1129_td -#3342 - -@performance_1130_td -#Simple: Query (sequential) - -@performance_1131_td -#ms - -@performance_1132_td -#21841 - -@performance_1133_td -#18699 - -@performance_1134_td -#42347 - -@performance_1135_td -#30774 - -@performance_1136_td -#32611 - -@performance_1137_td -#Simple: Update (sequential) - -@performance_1138_td -#ms - -@performance_1139_td -#6913 - -@performance_1140_td -#7745 - -@performance_1141_td -#28576 - -@performance_1142_td -#32698 - -@performance_1143_td -#11350 - -@performance_1144_td -#Simple: Delete (sequential) - -@performance_1145_td -#ms - -@performance_1146_td -#8051 - -@performance_1147_td -#9751 - -@performance_1148_td -#42202 - -@performance_1149_td -#44480 - -@performance_1150_td -#16555 - -@performance_1151_td -#Simple: Memory Usage - -@performance_1152_td -#MB - -@performance_1153_td -#22 - -@performance_1154_td -#11 - -@performance_1155_td -#9 - -@performance_1156_td -#0 - -@performance_1157_td -#1 - -@performance_1158_td -#BenchA: Init - -@performance_1159_td -#ms - -@performance_1160_td -#12996 - -@performance_1161_td -#14720 - -@performance_1162_td -#24722 - -@performance_1163_td -#26375 - -@performance_1164_td -#26060 - -@performance_1165_td -#BenchA: Transactions - -@performance_1166_td -#ms - -@performance_1167_td -#10134 - -@performance_1168_td -#10250 - -@performance_1169_td -#18452 - -@performance_1170_td -#21453 - -@performance_1171_td -#15877 - -@performance_1172_td -#BenchA: Memory Usage - -@performance_1173_td -#MB - -@performance_1174_td -#13 - -@performance_1175_td -#15 - -@performance_1176_td -#9 - -@performance_1177_td -#0 - -@performance_1178_td -#1 - -@performance_1179_td -#BenchB: Init - -@performance_1180_td -#ms - -@performance_1181_td -#15264 - -@performance_1182_td -#16889 - -@performance_1183_td -#28546 - -@performance_1184_td -#31610 - -@performance_1185_td -#29747 - -@performance_1186_td -#BenchB: Transactions - -@performance_1187_td -#ms - -@performance_1188_td -#3017 - -@performance_1189_td -#3376 - -@performance_1190_td -#1842 - -@performance_1191_td -#2771 - -@performance_1192_td -#1433 - -@performance_1193_td -#BenchB: Memory Usage - -@performance_1194_td -#MB - -@performance_1195_td -#17 - -@performance_1196_td -#12 - -@performance_1197_td -#11 - -@performance_1198_td -#1 - -@performance_1199_td -#1 - -@performance_1200_td -#BenchC: Init - -@performance_1201_td -#ms - -@performance_1202_td -#14020 - -@performance_1203_td -#10407 - -@performance_1204_td -#17655 - -@performance_1205_td -#19520 - -@performance_1206_td -#17532 - -@performance_1207_td -#BenchC: Transactions - -@performance_1208_td -#ms - -@performance_1209_td -#5076 - -@performance_1210_td -#3160 - -@performance_1211_td -#6411 - -@performance_1212_td -#6063 - -@performance_1213_td -#4530 - -@performance_1214_td -#BenchC: Memory Usage - -@performance_1215_td -#MB - -@performance_1216_td -#19 - -@performance_1217_td -#21 - -@performance_1218_td -#11 - -@performance_1219_td -#1 - -@performance_1220_td -#1 - -@performance_1221_td -#Executed statements - -@performance_1222_td -## - -@performance_1223_td -#1930995 - -@performance_1224_td -#1930995 - -@performance_1225_td -#1930995 - -@performance_1226_td -#1930995 - -@performance_1227_td -#1930995 - -@performance_1228_td -#Total time - -@performance_1229_td -#ms - -@performance_1230_td -#117049 - -@performance_1231_td -#114777 - -@performance_1232_td -#244803 - -@performance_1233_td -#249215 - -@performance_1234_td -#188446 - -@performance_1235_td -#Statements per second - -@performance_1236_td -## - -@performance_1237_td -#16497 - -@performance_1238_td -#16823 - -@performance_1239_td -#7887 - -@performance_1240_td -#7748 - -@performance_1241_td -#10246 - -@performance_1242_h3 -#Benchmark Results and Comments - -@performance_1243_h4 -H2 - -@performance_1244_p -# Version 1.4.177 (2014-04-12) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. The advantage of buffering is: there is no limit on the result set size. - -@performance_1245_h4 -HSQLDB - -@performance_1246_p -# Version 2.3.2 was used for the test. Cached tables are used in this test (hsqldb.default_table_type=cached), and the write delay is 1 second (SET WRITE_DELAY 1). - -@performance_1247_h4 -Derby - -@performance_1248_p -# Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified: leaving autocommit on is a problem for Derby. If it is switched off during the whole test, the results are about 20% better for Derby. Derby calls FileChannel.force(false), but only twice per log file (not on each commit). Disabling this call improves performance for Derby by about 2%. Unlike H2, Derby does not call FileDescriptor.sync() on each checkpoint. Derby supports a testing mode (system property derby.system.durability=test) where durability is disabled. According to the documentation, this setting should be used for testing only, as the database may not recover after a crash. Enabling this setting improves performance by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less than half as fast as H2 in default mode. - -@performance_1249_h4 -PostgreSQL - -@performance_1250_p -# Version 9.1.5 was used for the test. The following options where changed in postgresql.conf: fsync = off, commit_delay = 1000. PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. - -@performance_1251_h4 -MySQL - -@performance_1252_p -# Version 5.1.65-log was used for the test. MySQL was run with the InnoDB backend. The setting innodb_flush_log_at_trx_commit (found in the my.ini / my.cnf file) was set to 0. Otherwise (and by default), MySQL is slow (around 140 statements per second in this test) because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. You need to change this setting manually in the file my.ini / my.cnf, and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. - -@performance_1253_h4 -#Firebird - -@performance_1254_p -# Firebird 1.5 (default installation) was tested, but the results are not published currently. It is possible to run the performance test with the Firebird database, and any information on how to configure Firebird for higher performance are welcome. - -@performance_1255_h4 -#Why Oracle / MS SQL Server / DB2 are Not Listed - -@performance_1256_p -# The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. SQLite was not tested because the JDBC driver doesn't support transactions. - -@performance_1257_h3 -#About this Benchmark - -@performance_1258_h4 -#How to Run - -@performance_1259_p -# This test was as follows: - -@performance_1260_h4 -#Separate Process per Database - -@performance_1261_p -# For each database, a new process is started, to ensure the previous test does not impact the current test. - -@performance_1262_h4 -#Number of Connections - -@performance_1263_p -# This is mostly a single-connection benchmark. BenchB uses multiple connections; the other tests use one connection. - -@performance_1264_h4 -#Real-World Tests - -@performance_1265_p -# Good benchmarks emulate real-world use cases. This benchmark includes 4 test cases: BenchSimple uses one table and many small updates / deletes. BenchA is similar to the TPC-A test, but single connection / single threaded (see also: www.tpc.org). BenchB is similar to the TPC-B test, using multiple connections (one thread per connection). BenchC is similar to the TPC-C test, but single connection / single threaded. - -@performance_1266_h4 -#Comparing Embedded with Server Databases - -@performance_1267_p -# This is mainly a benchmark for embedded databases (where the application runs in the same virtual machine as the database engine). However MySQL and PostgreSQL are not Java databases and cannot be embedded into a Java application. For the Java databases, both embedded and server modes are tested. - -@performance_1268_h4 -#Test Platform - -@performance_1269_p -# This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. The JVM used is Sun JDK 1.6. - -@performance_1270_h4 -#Multiple Runs - -@performance_1271_p -# When a Java benchmark is run first, the code is not fully compiled and therefore runs slower than when running multiple times. A benchmark should always run the same test multiple times and ignore the first run(s). This benchmark runs three times, but only the last run is measured. - -@performance_1272_h4 -#Memory Usage - -@performance_1273_p -# It is not enough to measure the time taken, the memory usage is important as well. Performance can be improved by using a bigger cache, but the amount of memory is limited. HSQLDB tables are kept fully in memory by default; this benchmark uses 'disk based' tables for all databases. Unfortunately, it is not so easy to calculate the memory usage of PostgreSQL and MySQL, because they run in a different process than the test. This benchmark currently does not print memory usage of those databases. - -@performance_1274_h4 -#Delayed Operations - -@performance_1275_p -# Some databases delay some operations (for example flushing the buffers) until after the benchmark is run. This benchmark waits between each database tested, and each database runs in a different process (sequentially). - -@performance_1276_h4 -#Transaction Commit / Durability - -@performance_1277_p -# Durability means transaction committed to the database will not be lost. Some databases (for example MySQL) try to enforce this by default by calling fsync() to flush the buffers, but most hard drives don't actually flush all data. Calling the method slows down transaction commit a lot, but doesn't always make data durable. When comparing the results, it is important to think about the effect. Many database suggest to 'batch' operations when possible. This benchmark switches off autocommit when loading the data, and calls commit after each 1000 inserts. However many applications need 'short' transactions at runtime (a commit after each update). This benchmark commits after each update / delete in the simple benchmark, and after each business transaction in the other benchmarks. For databases that support delayed commits, a delay of one second is used. - -@performance_1278_h4 -#Using Prepared Statements - -@performance_1279_p -# Wherever possible, the test cases use prepared statements. - -@performance_1280_h4 -#Currently Not Tested: Startup Time - -@performance_1281_p -# The startup time of a database engine is important as well for embedded use. This time is not measured currently. Also, not tested is the time used to create a database and open an existing database. Here, one (wrapper) connection is opened at the start, and for each step a new connection is opened and then closed. - -@performance_1282_h2 -#PolePosition Benchmark - -@performance_1283_p -# The PolePosition is an open source benchmark. The algorithms are all quite simple. It was developed / sponsored by db4o. This test was not run for a longer time, so please be aware that the results below are for older database versions (H2 version 1.1, HSQLDB 1.8, Java 1.4). - -@performance_1284_th -#Test Case - -@performance_1285_th -#Unit - -@performance_1286_th -H2 - -@performance_1287_th -HSQLDB - -@performance_1288_th -MySQL - -@performance_1289_td -#Melbourne write - -@performance_1290_td -#ms - -@performance_1291_td -#369 - -@performance_1292_td -#249 - -@performance_1293_td -#2022 - -@performance_1294_td -#Melbourne read - -@performance_1295_td -#ms - -@performance_1296_td -#47 - -@performance_1297_td -#49 - -@performance_1298_td -#93 - -@performance_1299_td -#Melbourne read_hot - -@performance_1300_td -#ms - -@performance_1301_td -#24 - -@performance_1302_td -#43 - -@performance_1303_td -#95 - -@performance_1304_td -#Melbourne delete - -@performance_1305_td -#ms - -@performance_1306_td -#147 - -@performance_1307_td -#133 - -@performance_1308_td -#176 - -@performance_1309_td -#Sepang write - -@performance_1310_td -#ms - -@performance_1311_td -#965 - -@performance_1312_td -#1201 - -@performance_1313_td -#3213 - -@performance_1314_td -#Sepang read - -@performance_1315_td -#ms - -@performance_1316_td -#765 - -@performance_1317_td -#948 - -@performance_1318_td -#3455 - -@performance_1319_td -#Sepang read_hot - -@performance_1320_td -#ms - -@performance_1321_td -#789 - -@performance_1322_td -#859 - -@performance_1323_td -#3563 - -@performance_1324_td -#Sepang delete - -@performance_1325_td -#ms - -@performance_1326_td -#1384 - -@performance_1327_td -#1596 - -@performance_1328_td -#6214 - -@performance_1329_td -#Bahrain write - -@performance_1330_td -#ms - -@performance_1331_td -#1186 - -@performance_1332_td -#1387 - -@performance_1333_td -#6904 - -@performance_1334_td -#Bahrain query_indexed_string - -@performance_1335_td -#ms - -@performance_1336_td -#336 - -@performance_1337_td -#170 - -@performance_1338_td -#693 - -@performance_1339_td -#Bahrain query_string - -@performance_1340_td -#ms - -@performance_1341_td -#18064 - -@performance_1342_td -#39703 - -@performance_1343_td -#41243 - -@performance_1344_td -#Bahrain query_indexed_int - -@performance_1345_td -#ms - -@performance_1346_td -#104 - -@performance_1347_td -#134 - -@performance_1348_td -#678 - -@performance_1349_td -#Bahrain update - -@performance_1350_td -#ms - -@performance_1351_td -#191 - -@performance_1352_td -#87 - -@performance_1353_td -#159 - -@performance_1354_td -#Bahrain delete - -@performance_1355_td -#ms - -@performance_1356_td -#1215 - -@performance_1357_td -#729 - -@performance_1358_td -#6812 - -@performance_1359_td -#Imola retrieve - -@performance_1360_td -#ms - -@performance_1361_td -#198 - -@performance_1362_td -#194 - -@performance_1363_td -#4036 - -@performance_1364_td -#Barcelona write - -@performance_1365_td -#ms - -@performance_1366_td -#413 - -@performance_1367_td -#832 - -@performance_1368_td -#3191 - -@performance_1369_td -#Barcelona read - -@performance_1370_td -#ms - -@performance_1371_td -#119 - -@performance_1372_td -#160 - -@performance_1373_td -#1177 - -@performance_1374_td -#Barcelona query - -@performance_1375_td -#ms - -@performance_1376_td -#20 - -@performance_1377_td -#5169 - -@performance_1378_td -#101 - -@performance_1379_td -#Barcelona delete - -@performance_1380_td -#ms - -@performance_1381_td -#388 - -@performance_1382_td -#319 - -@performance_1383_td -#3287 - -@performance_1384_td -#Total - -@performance_1385_td -#ms - -@performance_1386_td -#26724 - -@performance_1387_td -#53962 - -@performance_1388_td -#87112 - -@performance_1389_p -# There are a few problems with the PolePosition test: - -@performance_1390_li -# HSQLDB uses in-memory tables by default while H2 uses persistent tables. The HSQLDB version included in PolePosition does not support changing this, so you need to replace poleposition-0.20/lib/hsqldb.jar with a newer version (for example hsqldb-1.8.0.7.jar), and then use the setting hsqldb.connecturl=jdbc:hsqldb:file:data/hsqldb/dbbench2;hsqldb.default_table_type=cached;sql.enforce_size=true in the file Jdbc.properties. - -@performance_1391_li -#HSQLDB keeps the database open between tests, while H2 closes the database (losing all the cache). To change that, use the database URL jdbc:h2:file:data/h2/dbbench;DB_CLOSE_DELAY=-1 - -@performance_1392_li -#The amount of cache memory is quite important, specially for the PolePosition test. Unfortunately, the PolePosition test does not take this into account. - -@performance_1393_h2 -#Database Performance Tuning - -@performance_1394_h3 -#Keep Connections Open or Use a Connection Pool - -@performance_1395_p -# If your application opens and closes connections a lot (for example, for each request), you should consider using a connection pool. Opening a connection using DriverManager.getConnection is specially slow if the database is closed. By default the database is closed if the last connection is closed. - -@performance_1396_p -# If you open and close connections a lot but don't want to use a connection pool, consider keeping a 'sentinel' connection open for as long as the application runs, or use delayed database closing. See also Closing a database. - -@performance_1397_h3 -#Use a Modern JVM - -@performance_1398_p -# Newer JVMs are faster. Upgrading to the latest version of your JVM can provide a "free" boost to performance. Switching from the default Client JVM to the Server JVM using the -server command-line option improves performance at the cost of a slight increase in start-up time. - -@performance_1399_h3 -#Virus Scanners - -@performance_1400_p -# Some virus scanners scan files every time they are accessed. It is very important for performance that database files are not scanned for viruses. The database engine never interprets the data stored in the files as programs, that means even if somebody would store a virus in a database file, this would be harmless (when the virus does not run, it cannot spread). Some virus scanners allow to exclude files by suffix. Ensure files ending with .db are not scanned. - -@performance_1401_h3 -トレースオプションを使用�?�る - -@performance_1402_p -# If the performance hot spots are in the database engine, in many cases the performance can be optimized by creating additional indexes, or changing the schema. Sometimes the application does not directly generate the SQL statements, for example if an O/R mapping tool is used. To view the SQL statements and JDBC API calls, you can use the trace options. For more information, see Using the Trace Options. - -@performance_1403_h3 -#Index Usage - -@performance_1404_p -# This database uses indexes to improve the performance of SELECT, UPDATE, DELETE. If a column is used in the WHERE clause of a query, and if an index exists on this column, then the index can be used. Multi-column indexes are used if all or the first columns of the index are used. Both equality lookup and range scans are supported. Indexes are used to order result sets, but only if the condition uses the same index or no index at all. The results are sorted in memory if required. Indexes are created automatically for primary key and unique constraints. Indexes are also created for foreign key constraints, if required. For other columns, indexes need to be created manually using the CREATE INDEX statement. - -@performance_1405_h3 -#How Data is Stored Internally - -@performance_1406_p -# For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT, then the data of the table is organized in this way. This is sometimes also called a "clustered index" or "index organized table". - -@performance_1407_p -# H2 internally stores table data and indexes in the form of b-trees. Each b-tree stores entries as a list of unique keys (one or more columns) and data (zero or more columns). The table data is always organized in the form of a "data b-tree" with a single column key of type long. If a single column primary key of type BIGINT, INT, SMALLINT, TINYINT is specified when creating the table (or just after creating the table, but before inserting any rows), then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, then a hidden auto-increment column of type BIGINT is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB columns, which are stored externally). - -@performance_1408_p -# For each additional index, one new "index b-tree" is created. The key of this b-tree consists of the indexed columns, plus the key of the data b-tree. If a primary key is created after the table has been created, or if the primary key contains multiple column, or if the primary key is not of the data types listed above, then the primary key is stored in a new index b-tree. - -@performance_1409_h3 -#Optimizer - -@performance_1410_p -# This database uses a cost based optimizer. For simple and queries and queries with medium complexity (less than 7 tables in the join), the expected cost (running time) of all possible plans is calculated, and the plan with the lowest cost is used. For more complex queries, the algorithm first tries all possible combinations for the first few tables, and the remaining tables added using a greedy algorithm (this works well for most joins). Afterwards a genetic algorithm is used to test at most 2000 distinct plans. Only left-deep plans are evaluated. - -@performance_1411_h3 -#Expression Optimization - -@performance_1412_p -# After the statement is parsed, all expressions are simplified automatically if possible. Operations are evaluated only once if all parameters are constant. Functions are also optimized, but only if the function is constant (always returns the same result for the same parameter values). If the WHERE clause is always false, then the table is not accessed at all. - -@performance_1413_h3 -#COUNT(*) Optimization - -@performance_1414_p -# If the query only counts all rows of a table, then the data is not accessed. However, this is only possible if no WHERE clause is used, that means it only works for queries of the form SELECT COUNT(*) FROM table. - -@performance_1415_h3 -#Updating Optimizer Statistics / Column Selectivity - -@performance_1416_p -# When executing a query, at most one index per join can be used. If the same table is joined multiple times, for each join only one index is used (the same index could be used for both joins, or each join could use a different index). Example: for the query SELECT * FROM TEST T1, TEST T2 WHERE T1.NAME='A' AND T2.ID=T1.ID, two index can be used, in this case the index on NAME for T1 and the index on ID for T2. - -@performance_1417_p -# If a table has multiple indexes, sometimes more than one index could be used. Example: if there is a table TEST(ID, NAME, FIRSTNAME) and an index on each column, then two indexes could be used for the query SELECT * FROM TEST WHERE NAME='A' AND FIRSTNAME='B', the index on NAME or the index on FIRSTNAME. It is not possible to use both indexes at the same time. Which index is used depends on the selectivity of the column. The selectivity describes the 'uniqueness' of values in a column. A selectivity of 100 means each value appears only once, and a selectivity of 1 means the same value appears in many or most rows. For the query above, the index on NAME should be used if the table contains more distinct names than first names. - -@performance_1418_p -# The SQL statement ANALYZE can be used to automatically estimate the selectivity of the columns in the tables. This command should be run from time to time to improve the query plans generated by the optimizer. - -@performance_1419_h3 -#In-Memory (Hash) Indexes - -@performance_1420_p -# Using in-memory indexes, specially in-memory hash indexes, can speed up queries and data manipulation. - -@performance_1421_p -#In-memory indexes are automatically used for in-memory databases, but can also be created for persistent databases using CREATE MEMORY TABLE. In many cases, the rows itself will also be kept in-memory. Please note this may cause memory problems for large tables. - -@performance_1422_p -# In-memory hash indexes are backed by a hash table and are usually faster than regular indexes. However, hash indexes only supports direct lookup (WHERE ID = ?) but not range scan (WHERE ID < ?). To use hash indexes, use HASH as in: CREATE UNIQUE HASH INDEX and CREATE TABLE ...(ID INT PRIMARY KEY HASH,...). - -@performance_1423_h3 -#Use Prepared Statements - -@performance_1424_p -# If possible, use prepared statements with parameters. - -@performance_1425_h3 -#Prepared Statements and IN(...) - -@performance_1426_p -# Avoid generating SQL statements with a variable size IN(...) list. Instead, use a prepared statement with arrays as in the following example: - -@performance_1427_h3 -#Optimization Examples - -@performance_1428_p -# See src/test/org/h2/samples/optimizations.sql for a few examples of queries that benefit from special optimizations built into the database. - -@performance_1429_h3 -#Cache Size and Type - -@performance_1430_p -# By default the cache size of H2 is quite small. Consider using a larger cache size, or enable the second level soft reference cache. See also Cache Settings. - -@performance_1431_h3 -データ型 - -@performance_1432_p -# Each data type has different storage and performance characteristics: - -@performance_1433_li -#The DECIMAL/NUMERIC type is slower and requires more storage than the REAL and DOUBLE types. - -@performance_1434_li -#Text types are slower to read, write, and compare than numeric types and generally require more storage. - -@performance_1435_li -#See Large Objects for information on BINARY vs. BLOB and VARCHAR vs. CLOB performance. - -@performance_1436_li -#Parsing and formatting takes longer for the TIME, DATE, and TIMESTAMP types than the numeric types. - -@performance_1437_code -#SMALLINT/TINYINT/BOOLEAN - -@performance_1438_li -# are not significantly smaller or faster to work with than INTEGER in most modes. - -@performance_1439_h3 -#Sorted Insert Optimization - -@performance_1440_p -# To reduce disk space usage and speed up table creation, an optimization for sorted inserts is available. When used, b-tree pages are split at the insertion point. To use this optimization, add SORTED before the SELECT statement: - -@performance_1441_h2 -#Using the Built-In Profiler - -@performance_1442_p -# A very simple Java profiler is built-in. To use it, use the following template: - -@performance_1443_h2 -#Application Profiling - -@performance_1444_h3 -#Analyze First - -@performance_1445_p -# Before trying to optimize performance, it is important to understand where the problem is (what part of the application is slow). Blind optimization or optimization based on guesses should be avoided, because usually it is not an efficient strategy. There are various ways to analyze an application. Sometimes two implementations can be compared using System.currentTimeMillis(). But this does not work for complex applications with many modules, and for memory problems. - -@performance_1446_p -# A simple way to profile an application is to use the built-in profiling tool of java. Example: - -@performance_1447_p -# Unfortunately, it is only possible to profile the application from start to end. Another solution is to create a number of full thread dumps. To do that, first run jps -l to get the process id, and then run jstack <pid> or kill -QUIT <pid> (Linux) or press Ctrl+C (Windows). - -@performance_1448_p -# A simple profiling tool is included in H2. To use it, the application needs to be changed slightly. Example: - -@performance_1449_p -# The profiler is built into the H2 Console tool, to analyze databases that open slowly. To use it, run the H2 Console, and then click on 'Test Connection'. Afterwards, click on "Test successful" and you get the most common stack traces, which helps to find out why it took so long to connect. You will only get the stack traces if opening the database took more than a few seconds. - -@performance_1450_h2 -#Database Profiling - -@performance_1451_p -# The ConvertTraceFile tool generates SQL statement statistics at the end of the SQL script file. The format used is similar to the profiling data generated when using java -Xrunhprof. For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE=2). The easiest way to set the trace level is to append the setting to the database URL, for example: jdbc:h2:~/test;TRACE_LEVEL_FILE=2 or jdbc:h2:tcp://localhost/~/test;TRACE_LEVEL_FILE=2. As an example, execute the the following script using the H2 Console: - -@performance_1452_p -# After running the test case, convert the .trace.db file using the ConvertTraceFile tool. The trace file is located in the same directory as the database file. - -@performance_1453_p -# The generated file test.sql will contain the SQL statements as well as the following profiling data (results vary): - -@performance_1454_h2 -#Statement Execution Plans - -@performance_1455_p -# The SQL statement EXPLAIN displays the indexes and optimizations the database uses for a statement. The following statements support EXPLAIN: SELECT, UPDATE, DELETE, MERGE, INSERT. The following query shows that the database uses the primary key index to search for rows: - -@performance_1456_p -# For joins, the tables in the execution plan are sorted in the order they are processed. The following query shows the database first processes the table INVOICE (using the primary key). For each row, it will additionally check that the value of the column AMOUNT is larger than zero, and for those rows the database will search in the table CUSTOMER (using the primary key). The query plan contains some redundancy so it is a valid statement. - -@performance_1457_h3 -#Displaying the Scan Count - -@performance_1458_code -#EXPLAIN ANALYZE - -@performance_1459_p -# additionally shows the scanned rows per table and pages read from disk per table or index. This will actually execute the query, unlike EXPLAIN which only prepares it. The following query scanned 1000 rows, and to do that had to read 85 pages from the data area of the table. Running the query twice will not list the pages read from disk, because they are now in the cache. The tableScan means this query doesn't use an index. - -@performance_1460_p -# The cache will prevent the pages are read twice. H2 reads all columns of the row unless only the columns in the index are read. Except for large CLOB and BLOB, which are not store in the table. - -@performance_1461_h3 -#Special Optimizations - -@performance_1462_p -# For certain queries, the database doesn't need to read all rows, or doesn't need to sort the result even if ORDER BY is used. - -@performance_1463_p -# For queries of the form SELECT COUNT(*), MIN(ID), MAX(ID) FROM TEST, the query plan includes the line /* direct lookup */ if the data can be read from an index. - -@performance_1464_p -# For queries of the form SELECT DISTINCT CUSTOMER_ID FROM INVOICE, the query plan includes the line /* distinct */ if there is an non-unique or multi-column index on this column, and if this column has a low selectivity. - -@performance_1465_p -# For queries of the form SELECT * FROM TEST ORDER BY ID, the query plan includes the line /* index sorted */ to indicate there is no separate sorting required. - -@performance_1466_p -# For queries of the form SELECT * FROM TEST GROUP BY ID ORDER BY ID, the query plan includes the line /* group sorted */ to indicate there is no separate sorting required. - -@performance_1467_h2 -#How Data is Stored and How Indexes Work - -@performance_1468_p -# Internally, each row in a table is identified by a unique number, the row id. The rows of a table are stored with the row id as the key. The row id is a number of type long. If a table has a single column primary key of type INT or BIGINT, then the value of this column is the row id, otherwise the database generates the row id automatically. There is a (non-standard) way to access the row id: using the _ROWID_ pseudo-column: - -@performance_1469_p -# The data is stored in the database as follows: - -@performance_1470_th -#_ROWID_ - -@performance_1471_th -#FIRST_NAME - -@performance_1472_th -#NAME - -@performance_1473_th -#CITY - -@performance_1474_th -#PHONE - -@performance_1475_td -#1 - -@performance_1476_td -#John - -@performance_1477_td -#Miller - -@performance_1478_td -#Berne - -@performance_1479_td -#123 456 789 - -@performance_1480_td -#2 - -@performance_1481_td -#Philip - -@performance_1482_td -#Jones - -@performance_1483_td -#Berne - -@performance_1484_td -#123 012 345 - -@performance_1485_p -# Access by row id is fast because the data is sorted by this key. Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT: - -@performance_1486_h3 -#Indexes - -@performance_1487_p -# An index internally is basically just a table that contains the indexed column(s), plus the row id: - -@performance_1488_p -# In the index, the data is sorted by the indexed columns. So this index contains the following data: - -@performance_1489_th -#CITY - -@performance_1490_th -#NAME - -@performance_1491_th -#FIRST_NAME - -@performance_1492_th -#_ROWID_ - -@performance_1493_td -#Berne - -@performance_1494_td -#Jones - -@performance_1495_td -#Philip - -@performance_1496_td -#2 - -@performance_1497_td -#Berne - -@performance_1498_td -#Miller - -@performance_1499_td -#John - -@performance_1500_td -#1 - -@performance_1501_p -# When the database uses an index to query the data, it searches the index for the given data, and (if required) reads the remaining columns in the main data table (retrieved using the row id). An index on city, name, and first name (multi-column index) allows to quickly search for rows when the city, name, and first name are known. If only the city and name, or only the city is known, then this index is also used (so creating an additional index on just the city is not needed). This index is also used when reading all rows, sorted by the indexed columns. However, if only the first name is known, then this index is not used: - -@performance_1502_p -# If your application often queries the table for a phone number, then it makes sense to create an additional index on it: - -@performance_1503_p -# This index contains the phone number, and the row id: - -@performance_1504_th -#PHONE - -@performance_1505_th -#_ROWID_ - -@performance_1506_td -#123 012 345 - -@performance_1507_td -#2 - -@performance_1508_td -#123 456 789 - -@performance_1509_td -#1 - -@performance_1510_h3 -#Using Multiple Indexes - -@performance_1511_p -# Within a query, only one index per logical table is used. Using the condition PHONE = '123 567 789' OR CITY = 'Berne' would use a table scan instead of first using the index on the phone number and then the index on the city. It makes sense to write two queries and combine then using UNION. In this case, each individual query uses a different index: - -@performance_1512_h2 -#Fast Database Import - -@performance_1513_p -# To speed up large imports, consider using the following options temporarily: - -@performance_1514_code -#SET LOG 0 - -@performance_1515_li -# (disabling the transaction log) - -@performance_1516_code -#SET CACHE_SIZE - -@performance_1517_li -# (a large cache is faster) - -@performance_1518_code -#SET LOCK_MODE 0 - -@performance_1519_li -# (disable locking) - -@performance_1520_code -#SET UNDO_LOG 0 - -@performance_1521_li -# (disable the session undo log) - -@performance_1522_p -# These options can be set in the database URL: jdbc:h2:~/test;LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0. Most of those options are not recommended for regular use, that means you need to reset them after use. - -@performance_1523_p -# If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ... is faster than CREATE TABLE(...); INSERT INTO ... SELECT .... - -@quickstart_1000_h1 -クイックスタート - -@quickstart_1001_a -# Embedding H2 in an Application - -@quickstart_1002_a -# The H2 Console Application - -@quickstart_1003_h2 -アプリケーション�?�エンベッドH2 - -@quickstart_1004_p -# This database can be used in embedded mode, or in server mode. To use it in embedded mode, you need to: - -@quickstart_1005_li -#Add the h2*.jar to the classpath (H2 does not have any dependencies) - -@quickstart_1006_li -#Use the JDBC driver class: org.h2.Driver - -@quickstart_1007_li -#The database URL jdbc:h2:~/test opens the database test in your user home directory - -@quickstart_1008_li -#A new database is automatically created - -@quickstart_1009_h2 -H2 コンソール アプリケーション - -@quickstart_1010_p -# The Console lets you access a SQL database using a browser interface. - -@quickstart_1011_p -# If you don't have Windows XP, or if something does not work as expected, please see the detailed description in the Tutorial. - -@quickstart_1012_h3 -手順 - -@quickstart_1013_h4 -インストール - -@quickstart_1014_p -# Install the software using the Windows Installer (if you did not yet do that). - -@quickstart_1015_h4 -コンソールを起動�?�る - -@quickstart_1016_p -# Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]: - -@quickstart_1017_p -# A new console window appears: - -@quickstart_1018_p -# Also, a new browser page should open with the URL http://localhost:8082. You may get a security warning from the firewall. If you don't want other computers in the network to access the database on your machine, you can let the firewall block these connections. Only local connections are required at this time. - -@quickstart_1019_h4 -ログイン - -@quickstart_1020_p -# Select [Generic H2] and click [Connect]: - -@quickstart_1021_p -# You are now logged in. - -@quickstart_1022_h4 -サンプル - -@quickstart_1023_p -# Click on the [Sample SQL Script]: - -@quickstart_1024_p -# The SQL commands appear in the command area. - -@quickstart_1025_h4 -実行�?�る - -@quickstart_1026_p -# Click [Run] - -@quickstart_1027_p -# On the left side, a new entry TEST is added below the database icon. The operations and results of the statements are shown below the script. - -@quickstart_1028_h4 -切断 - -@quickstart_1029_p -# Click on [Disconnect]: - -@quickstart_1030_p -# to close the connection. - -@quickstart_1031_h4 -終了 - -@quickstart_1032_p -# Close the console window. For more information, see the Tutorial. - -@roadmap_1000_h1 -ロードマップ - -@roadmap_1001_p -# New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. Of course, patches are always welcome, but are not always applied as is. See also Providing Patches. - -@roadmap_1002_h2 -#Version 1.5.x: Planned Changes - -@roadmap_1003_li -#Replace file password hash with file encryption key; validate encryption key when connecting. - -@roadmap_1004_li -#Remove "set binary collation" feature. - -@roadmap_1005_li -#Remove the encryption algorithm XTEA. - -@roadmap_1006_li -#Disallow referencing other tables in a table (via constraints for example). - -@roadmap_1007_li -#Remove PageStore features like compress_lob. - -@roadmap_1008_h2 -#Version 1.4.x: Planned Changes - -@roadmap_1009_li -#Change license to MPL 2.0. - -@roadmap_1010_li -#Automatic migration from 1.3 databases to 1.4. - -@roadmap_1011_li -#Option to disable the file name suffix somehow (issue 447). - -@roadmap_1012_h2 -#Priority 1 - -@roadmap_1013_li -#Bugfixes. - -@roadmap_1014_li -#More tests with MULTI_THREADED=1 (and MULTI_THREADED with MVCC): Online backup (using the 'backup' statement). - -@roadmap_1015_li -#Server side cursors. - -@roadmap_1016_h2 -#Priority 2 - -@roadmap_1017_li -#Support hints for the optimizer (which index to use, enforce the join order). - -@roadmap_1018_li -#Full outer joins. - -@roadmap_1019_li -#Access rights: remember the owner of an object. Create, alter and drop privileges. COMMENT: allow owner of object to change it. Issue 208: Access rights for schemas. - -@roadmap_1020_li -#Test multi-threaded in-memory db access. - -@roadmap_1021_li -#MySQL, MS SQL Server compatibility: support case sensitive (mixed case) identifiers without quotes. - -@roadmap_1022_li -#Support GRANT SELECT, UPDATE ON [schemaName.] *. - -@roadmap_1023_li -#Migrate database tool (also from other database engines). For Oracle, maybe use DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL. - -@roadmap_1024_li -#Clustering: support mixed clustering mode (one embedded, others in server mode). - -@roadmap_1025_li -#Clustering: reads should be randomly distributed (optional) or to a designated database on RAM (parameter: READ_FROM=3). - -@roadmap_1026_li -#Window functions: RANK() and DENSE_RANK(), partition using OVER(). select *, count(*) over() as fullCount from ... limit 4; - -@roadmap_1027_li -#PostgreSQL catalog: use BEFORE SELECT triggers instead of views over metadata tables. - -@roadmap_1028_li -#Compatibility: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211. - -@roadmap_1029_li -#Test very large databases and LOBs (up to 256 GB). - -@roadmap_1030_li -#Store all temp files in the temp directory. - -@roadmap_1031_li -#Don't use temp files, specially not deleteOnExit (bug 4513817: File.deleteOnExit consumes memory). Also to allow opening client / server (remote) connections when using LOBs. - -@roadmap_1032_li -#Make DDL (Data Definition) operations transactional. - -@roadmap_1033_li -#Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED). - -@roadmap_1034_li -#Groovy Stored Procedures: http://groovy.codehaus.org/GSQL - -@roadmap_1035_li -#Add a migration guide (list differences between databases). - -@roadmap_1036_li -#Optimization: automatic index creation suggestion using the trace file? - -@roadmap_1037_li -#Fulltext search Lucene: analyzer configuration, mergeFactor. - -@roadmap_1038_li -#Compression performance: don't allocate buffers, compress / expand in to out buffer. - -@roadmap_1039_li -#Rebuild index functionality to shrink index size and improve performance. - -@roadmap_1040_li -#Console: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA). - -@roadmap_1041_li -#Test performance again with SQL Server, Oracle, DB2. - -@roadmap_1042_li -#Test with Spatial DB in a box / JTS: http://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification. - -@roadmap_1043_li -#Write more tests and documentation for MVCC (Multi Version Concurrency Control). - -@roadmap_1044_li -#Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after. - -@roadmap_1045_li -#Implement, test, document XAConnection and so on. - -@roadmap_1046_li -#Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption). - -@roadmap_1047_li -#CHECK: find out what makes CHECK=TRUE slow, move to CHECK2. - -@roadmap_1048_li -#Drop with invalidate views (so that source code is not lost). Check what other databases do exactly. - -@roadmap_1049_li -#Index usage for (ID, NAME)=(1, 'Hi'); document. - -@roadmap_1050_li -#Set a connection read only (Connection.setReadOnly) or using a connection parameter. - -@roadmap_1051_li -#Access rights: finer grained access control (grant access for specific functions). - -@roadmap_1052_li -#ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]). - -@roadmap_1053_li -#Version check: docs / web console (using Javascript), and maybe in the library (using TCP/IP). - -@roadmap_1054_li -#Web server classloader: override findResource / getResourceFrom. - -@roadmap_1055_li -#Cost for embedded temporary view is calculated wrong, if result is constant. - -@roadmap_1056_li -#Count index range query (count(*) where id between 10 and 20). - -@roadmap_1057_li -#Performance: update in-place. - -@roadmap_1058_li -#Clustering: when a database is back alive, automatically synchronize with the master (requires readable transaction log). - -@roadmap_1059_li -#Database file name suffix: a way to use no or a different suffix (for example using a slash). - -@roadmap_1060_li -#Eclipse plugin. - -@roadmap_1061_li -#Asynchronous queries to support publish/subscribe: SELECT ... FOR READ WAIT [maxMillisToWait]. See also MS SQL Server "Query Notification". - -@roadmap_1062_li -#Fulltext search (native): reader / tokenizer / filter. - -@roadmap_1063_li -#Linked schema using CSV files: one schema for a directory of files; support indexes for CSV files. - -@roadmap_1064_li -#iReport to support H2. - -@roadmap_1065_li -#Include SMTP (mail) client (alert on cluster failure, low disk space,...). - -@roadmap_1066_li -#Option for SCRIPT to only process one or a set of schemas or tables, and append to a file. - -@roadmap_1067_li -#JSON parser and functions. - -@roadmap_1068_li -#Copy database: tool with config GUI and batch mode, extensible (example: compare). - -@roadmap_1069_li -#Document, implement tool for long running transactions using user-defined compensation statements. - -@roadmap_1070_li -#Support SET TABLE DUAL READONLY. - -@roadmap_1071_li -#GCJ: what is the state now? - -@roadmap_1072_li -#Events for: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http://docs.openlinksw.com/virtuoso/fn_dbev_startup.html - -@roadmap_1073_li -#Optimization: simpler log compression. - -@roadmap_1074_li -#Support standard INFORMATION_SCHEMA tables, as defined in http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE: http://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif - -@roadmap_1075_li -#Compatibility: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby: division by zero. HSQLDB: 0.0e1 / 0.0e1 is NaN. - -@roadmap_1076_li -#Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R). - -@roadmap_1077_li -#Custom class loader to reload functions on demand. - -@roadmap_1078_li -#Test http://mysql-je.sourceforge.net/ - -@roadmap_1079_li -#H2 Console: the webclient could support more features like phpMyAdmin. - -@roadmap_1080_li -#Support Oracle functions: TO_DATE, TO_NUMBER. - -@roadmap_1081_li -#Work on the Java to C converter. - -@roadmap_1082_li -#The HELP information schema can be directly exposed in the Console. - -@roadmap_1083_li -#Maybe use the 0x1234 notation for binary fields, see MS SQL Server. - -@roadmap_1084_li -#Support Oracle CONNECT BY in some way: http://www.adp-gmbh.ch/ora/sql/connect_by.html http://philip.greenspun.com/sql/trees.html - -@roadmap_1085_li -#SQL Server 2005, Oracle: support COUNT(*) OVER(). See http://www.orafusion.com/art_anlytc.htm - -@roadmap_1086_li -#SQL 2003: http://www.wiscorp.com/sql_2003_standard.zip - -@roadmap_1087_li -#Version column (number/sequence and timestamp based). - -@roadmap_1088_li -#Optimize getGeneratedKey: send last identity after each execute (server). - -@roadmap_1089_li -#Test and document UPDATE TEST SET (ID, NAME) = (SELECT ID*10, NAME || '!' FROM TEST T WHERE T.ID=TEST.ID). - -@roadmap_1090_li -#Max memory rows / max undo log size: use block count / row size not row count. - -@roadmap_1091_li -#Implement point-in-time recovery. - -@roadmap_1092_li -#Support PL/SQL (programming language / control flow statements). - -@roadmap_1093_li -#LIKE: improved version for larger texts (currently using naive search). - -@roadmap_1094_li -#Throw an exception when the application calls getInt on a Long (optional). - -@roadmap_1095_li -#Default date format for input and output (local date constants). - -@roadmap_1096_li -#Document ROWNUM usage for reports: SELECT ROWNUM, * FROM (subquery). - -@roadmap_1097_li -#File system that writes to two file systems (replication, replicating file system). - -@roadmap_1098_li -#Standalone tool to get relevant system properties and add it to the trace output. - -@roadmap_1099_li -#Support 'call proc(1=value)' (PostgreSQL, Oracle). - -@roadmap_1100_li -#Console: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?). - -@roadmap_1101_li -#Console: autocomplete Ctrl+Space inserts template. - -@roadmap_1102_li -#Option to encrypt .trace.db file. - -@roadmap_1103_li -#Auto-Update feature for database, .jar file. - -@roadmap_1104_li -#ResultSet SimpleResultSet.readFromURL(String url): id varchar, state varchar, released timestamp. - -@roadmap_1105_li -#Partial indexing (see PostgreSQL). - -@roadmap_1106_li -#Add GUI to build a custom version (embedded, fulltext,...) using build flags. - -@roadmap_1107_li -#http://rubyforge.org/projects/hypersonic/ - -@roadmap_1108_li -#Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app). - -@roadmap_1109_li -#Table order: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility). - -@roadmap_1110_li -#Backup tool should work with other databases as well. - -@roadmap_1111_li -#Console: -ifExists doesn't work for the console. Add a flag to disable other dbs. - -@roadmap_1112_li -#Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess). - -@roadmap_1113_li -#Java static code analysis: http://pmd.sourceforge.net/ - -@roadmap_1114_li -#Java static code analysis: http://www.eclipse.org/tptp/ - -@roadmap_1115_li -#Compatibility for CREATE SCHEMA AUTHORIZATION. - -@roadmap_1116_li -#Implement Clob / Blob truncate and the remaining functionality. - -@roadmap_1117_li -#Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ... - -@roadmap_1118_li -#File locking: writing a system property to detect concurrent access from the same VM (different classloaders). - -@roadmap_1119_li -#Pure SQL triggers (example: update parent table if the child table is changed). - -@roadmap_1120_li -#Add H2 to Gem (Ruby install system). - -@roadmap_1121_li -#Support linked JCR tables. - -@roadmap_1122_li -#Native fulltext search: min word length; store word positions. - -@roadmap_1123_li -#Add an option to the SCRIPT command to generate only portable / standard SQL. - -@roadmap_1124_li -#Updatable views: create 'instead of' triggers automatically if possible (simple cases first). - -@roadmap_1125_li -#Improve create index performance. - -@roadmap_1126_li -#Compact databases without having to close the database (vacuum). - -@roadmap_1127_li -#Implement more JDBC 4.0 features. - -@roadmap_1128_li -#Support TRANSFORM / PIVOT as in MS Access. - -@roadmap_1129_li -#SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...). - -@roadmap_1130_li -#Support updatable views with join on primary keys (to extend a table). - -@roadmap_1131_li -#Public interface for functions (not public static). - -@roadmap_1132_li -#Support reading the transaction log. - -@roadmap_1133_li -#Feature matrix as in i-net software. - -@roadmap_1134_li -#Updatable result set on table without primary key or unique index. - -@roadmap_1135_li -#Compatibility with Derby and PostgreSQL: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221. - -@roadmap_1136_li -#Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString') - -@roadmap_1137_li -#Support data type INTERVAL - -@roadmap_1138_li -#Support nested transactions (possibly using savepoints internally). - -@roadmap_1139_li -#Add a benchmark for bigger databases, and one for many users. - -@roadmap_1140_li -#Compression in the result set over TCP/IP. - -@roadmap_1141_li -#Support curtimestamp (like curtime, curdate). - -@roadmap_1142_li -#Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options. - -@roadmap_1143_li -#Release locks (shared or exclusive) on demand - -@roadmap_1144_li -#Support OUTER UNION - -@roadmap_1145_li -#Support parameterized views (similar to CSVREAD, but using just SQL for the definition) - -@roadmap_1146_li -#A way (JDBC driver) to map an URL (jdbc:h2map:c1) to a connection object - -@roadmap_1147_li -#Support dynamic linked schema (automatically adding/updating/removing tables) - -@roadmap_1148_li -#Clustering: adding a node should be very fast and without interrupting clients (very short lock) - -@roadmap_1149_li -#Compatibility: # is the start of a single line comment (MySQL) but date quote (Access). Mode specific - -@roadmap_1150_li -#Run benchmarks with Android, Java 7, java -server - -@roadmap_1151_li -#Optimizations: faster hash function for strings. - -@roadmap_1152_li -#DatabaseEventListener: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality - -@roadmap_1153_li -#Benchmark: add a graph to show how databases scale (performance/database size) - -@roadmap_1154_li -#Implement a SQLData interface to map your data over to a custom object - -@roadmap_1155_li -#In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers = true) - -@roadmap_1156_li -#Support multiple directories (on different hard drives) for the same database - -@roadmap_1157_li -#Server protocol: use challenge response authentication, but client sends hash(user+password) encrypted with response - -@roadmap_1158_li -#Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server) - -@roadmap_1159_li -#Support native XML data type - see http://en.wikipedia.org/wiki/SQL/XML - -@roadmap_1160_li -#Support triggers with a string property or option: SpringTrigger, OSGITrigger - -@roadmap_1161_li -#MySQL compatibility: update test1 t1, test2 t2 set t1.id = t2.id where t1.id = t2.id; - -@roadmap_1162_li -#Ability to resize the cache array when resizing the cache - -@roadmap_1163_li -#Time based cache writing (one second after writing the log) - -@roadmap_1164_li -#Check state of H2 driver for DDLUtils: http://issues.apache.org/jira/browse/DDLUTILS-185 - -@roadmap_1165_li -#Index usage for REGEXP LIKE. - -@roadmap_1166_li -#Compatibility: add a role DBA (like ADMIN). - -@roadmap_1167_li -#Better support multiple processors for in-memory databases. - -@roadmap_1168_li -#Support N'text' - -@roadmap_1169_li -#Support compatibility for jdbc:hsqldb:res: - -@roadmap_1170_li -#HSQLDB compatibility: automatically convert to the next 'higher' data type. Example: cast(2000000000 as int) + cast(2000000000 as int); (HSQLDB: long; PostgreSQL: integer out of range) - -@roadmap_1171_li -#Provide an Java SQL builder with standard and H2 syntax - -@roadmap_1172_li -#Trace: write OS, file system, JVM,... when opening the database - -@roadmap_1173_li -#Support indexes for views (probably requires materialized views) - -@roadmap_1174_li -#Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters - -@roadmap_1175_li -#Server: use one listener (detect if the request comes from an PG or TCP client) - -@roadmap_1176_li -#Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200 - -@roadmap_1177_li -#Sequence: PostgreSQL compatibility (rename, create) http://www.postgresql.org/docs/8.2/static/sql-altersequence.html - -@roadmap_1178_li -#DISTINCT: support large result sets by sorting on all columns (additionally) and then removing duplicates. - -@roadmap_1179_li -#Support a special trigger on all tables to allow building a transaction log reader. - -@roadmap_1180_li -#File system with a background writer thread; test if this is faster - -@roadmap_1181_li -#Better document the source code (high level documentation). - -@roadmap_1182_li -#Support select * from dual a left join dual b on b.x=(select max(x) from dual) - -@roadmap_1183_li -#Optimization: don't lock when the database is read-only - -@roadmap_1184_li -#Issue 146: Support merge join. - -@roadmap_1185_li -#Integrate spatial functions from http://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download - -@roadmap_1186_li -#Cluster: hot deploy (adding a node at runtime). - -@roadmap_1187_li -#Support DatabaseMetaData.insertsAreDetected: updatable result sets should detect inserts. - -@roadmap_1188_li -#Oracle: support DECODE method (convert to CASE WHEN). - -@roadmap_1189_li -#Native search: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping - -@roadmap_1190_li -#Improve documentation of access rights. - -@roadmap_1191_li -#Support opening a database that is in the classpath, maybe using a new file system. Workaround: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation(). - -@roadmap_1192_li -#Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others). - -@roadmap_1193_li -#Remember the user defined data type (domain) of a column. - -@roadmap_1194_li -#MVCC: support multi-threaded kernel with multi-version concurrency. - -@roadmap_1195_li -#Auto-server: add option to define the port range or list. - -@roadmap_1196_li -#Support Jackcess (MS Access databases) - -@roadmap_1197_li -#Built-in methods to write large objects (BLOB and CLOB): FILE_WRITE('test.txt', 'Hello World') - -@roadmap_1198_li -#Improve time to open large databases (see mail 'init time for distributed setup') - -@roadmap_1199_li -#Move Maven 2 repository from hsql.sf.net to h2database.sf.net - -@roadmap_1200_li -#Java 1.5 tool: JdbcUtils.closeSilently(s1, s2,...) - -@roadmap_1201_li -#Optimize A=? OR B=? to UNION if the cost is lower. - -@roadmap_1202_li -#Javadoc: document design patterns used - -@roadmap_1203_li -#Support custom collators, for example for natural sort (for text that contains numbers). - -@roadmap_1204_li -#Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt) - -@roadmap_1205_li -#Convert SQL-injection-2.txt to html document, include SQLInjection.java sample - -@roadmap_1206_li -#Support OUT parameters in user-defined procedures. - -@roadmap_1207_li -#Web site design: http://www.igniterealtime.org/projects/openfire/index.jsp - -@roadmap_1208_li -#HSQLDB compatibility: Openfire server uses: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC - -@roadmap_1209_li -#Translation: use ?? in help.csv - -@roadmap_1210_li -#Translated .pdf - -@roadmap_1211_li -#Recovery tool: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file - -@roadmap_1212_li -#Issue 357: support getGeneratedKeys to return multiple rows when used with batch updates. This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys. Also support it when using INSERT ... SELECT. - -@roadmap_1213_li -#RECOVER=2 to backup the database, run recovery, open the database - -@roadmap_1214_li -#Recovery should work with encrypted databases - -@roadmap_1215_li -#Corruption: new error code, add help - -@roadmap_1216_li -#Space reuse: after init, scan all storages and free those that don't belong to a live database object - -@roadmap_1217_li -#Access rights: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects) - -@roadmap_1218_li -#Support NOCACHE table option (Oracle). - -@roadmap_1219_li -#Support table partitioning. - -@roadmap_1220_li -#Add regular javadocs (using the default doclet, but another css) to the homepage. - -@roadmap_1221_li -#The database should be kept open for a longer time when using the server mode. - -@roadmap_1222_li -#Javadocs: for each tool, add a copy & paste sample in the class level. - -@roadmap_1223_li -#Javadocs: add @author tags. - -@roadmap_1224_li -#Fluent API for tools: Server.createTcpServer().setPort(9081).setPassword(password).start(); - -@roadmap_1225_li -#MySQL compatibility: real SQL statement for DESCRIBE TEST - -@roadmap_1226_li -#Use a default delay of 1 second before closing a database. - -@roadmap_1227_li -#Write (log) to system table before adding to internal data structures. - -@roadmap_1228_li -#Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup). - -@roadmap_1229_li -#Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case). - -@roadmap_1230_li -#MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem). - -@roadmap_1231_li -#Oracle compatibility: support NLS_DATE_FORMAT. - -@roadmap_1232_li -#Support for Thread.interrupt to cancel running statements. - -@roadmap_1233_li -#Cluster: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process). - -@roadmap_1234_li -#H2 Console: support CLOB/BLOB download using a link. - -@roadmap_1235_li -#Support flashback queries as in Oracle. - -@roadmap_1236_li -#Import / Export of fixed with text files. - -@roadmap_1237_li -#HSQLDB compatibility: automatic data type for SUM if value is the value is too big (by default use the same type as the data). - -@roadmap_1238_li -#Improve the optimizer to select the right index for special cases: where id between 2 and 4 and booleanColumn - -@roadmap_1239_li -#Linked tables: make hidden columns available (Oracle: rowid and ora_rowscn columns). - -@roadmap_1240_li -#H2 Console: in-place autocomplete. - -@roadmap_1241_li -#Support large databases: split database files to multiple directories / disks (similar to tablespaces). - -@roadmap_1242_li -#H2 Console: support configuration option for fixed width (monospace) font. - -@roadmap_1243_li -#Native fulltext search: support analyzers (specially for Chinese, Japanese). - -@roadmap_1244_li -#Automatically compact databases from time to time (as a background process). - -@roadmap_1245_li -#Test Eclipse DTP. - -@roadmap_1246_li -#H2 Console: autocomplete: keep the previous setting - -@roadmap_1247_li -#executeBatch: option to stop at the first failed statement. - -@roadmap_1248_li -#Implement OLAP features as described here: http://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5 - -@roadmap_1249_li -#Support Oracle ROWID (unique identifier for each row). - -@roadmap_1250_li -#MySQL compatibility: alter table add index i(c), add constraint c foreign key(c) references t(c); - -@roadmap_1251_li -#Server mode: improve performance for batch updates. - -@roadmap_1252_li -#Applets: support read-only databases in a zip file (accessed as a resource). - -@roadmap_1253_li -#Long running queries / errors / trace system table. - -@roadmap_1254_li -#H2 Console should support JaQu directly. - -@roadmap_1255_li -#Better document FTL_SEARCH, FTL_SEARCH_DATA. - -@roadmap_1256_li -#Sequences: CURRVAL should be session specific. Compatibility with PostgreSQL. - -@roadmap_1257_li -#Index creation using deterministic functions. - -@roadmap_1258_li -#ANALYZE: for unique indexes that allow null, count the number of null. - -@roadmap_1259_li -#MySQL compatibility: multi-table delete: DELETE .. FROM .. [,...] USING - See http://dev.mysql.com/doc/refman/5.0/en/delete.html - -@roadmap_1260_li -#AUTO_SERVER: support changing IP addresses (disable a network while the database is open). - -@roadmap_1261_li -#Avoid using java.util.Calendar internally because it's slow, complicated, and buggy. - -@roadmap_1262_li -#Support TRUNCATE .. CASCADE like PostgreSQL. - -@roadmap_1263_li -#Fulltext search: lazy result generation using SimpleRowSource. - -@roadmap_1264_li -#Fulltext search: support alternative syntax: WHERE FTL_CONTAINS(name, 'hello'). - -@roadmap_1265_li -#MySQL compatibility: support REPLACE, see http://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73. - -@roadmap_1266_li -#MySQL compatibility: support INSERT INTO table SET column1 = value1, column2 = value2 - -@roadmap_1267_li -#Docs: add a one line description for each functions and SQL statements at the top (in the link section). - -@roadmap_1268_li -#Javadoc search: weight for titles should be higher ('random' should list Functions as the best match). - -@roadmap_1269_li -#Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes. - -@roadmap_1270_li -#Issue 50: Oracle compatibility: support calling 0-parameters functions without parenthesis. Make constants obsolete. - -@roadmap_1271_li -#MySQL, HSQLDB compatibility: support where 'a'=1 (not supported by Derby, PostgreSQL) - -@roadmap_1272_li -#Support a data type "timestamp with timezone" using java.util.Calendar. - -@roadmap_1273_li -#Finer granularity for SLF4J trace - See http://code.google.com/p/h2database/issues/detail?id=62 - -@roadmap_1274_li -#Add database creation date and time to the database. - -@roadmap_1275_li -#Support ASSERTION. - -@roadmap_1276_li -#MySQL compatibility: support comparing 1='a' - -@roadmap_1277_li -#Support PostgreSQL lock modes: http://www.postgresql.org/docs/8.3/static/explicit-locking.html - -@roadmap_1278_li -#PostgreSQL compatibility: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver. - -@roadmap_1279_li -#RunScript should be able to read from system in (or quite mode for Shell). - -@roadmap_1280_li -#Natural join: support select x from dual natural join dual. - -@roadmap_1281_li -#Support using system properties in database URLs (may be a security problem). - -@roadmap_1282_li -#Natural join: somehow support this: select a.x, b.x, x from dual a natural join dual b - -@roadmap_1283_li -#Use the Java service provider mechanism to register file systems and function libraries. - -@roadmap_1284_li -#MySQL compatibility: for auto_increment columns, convert 0 to next value (as when inserting NULL). - -@roadmap_1285_li -#Optimization for multi-column IN: use an index if possible. Example: (A, B) IN((1, 2), (2, 3)). - -@roadmap_1286_li -#Optimization for EXISTS: convert to inner join or IN(..) if possible. - -@roadmap_1287_li -#Functions: support hashcode(value); cryptographic and fast - -@roadmap_1288_li -#Serialized file lock: support long running queries. - -@roadmap_1289_li -#Network: use 127.0.0.1 if other addresses don't work. - -@roadmap_1290_li -#Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication. - -@roadmap_1291_li -#Support reading JCR data: one table per node type; query table; cache option - -@roadmap_1292_li -#OSGi: create a sample application, test, document. - -@roadmap_1293_li -#help.csv: use complete examples for functions; run as test case. - -@roadmap_1294_li -#Functions to calculate the memory and disk space usage of a table, a row, or a value. - -@roadmap_1295_li -#Re-implement PooledConnection; use a lightweight connection object. - -@roadmap_1296_li -#Doclet: convert tests in javadocs to a java class. - -@roadmap_1297_li -#Doclet: format fields like methods, but support sorting by name and value. - -@roadmap_1298_li -#Doclet: shrink the html files. - -@roadmap_1299_li -#MySQL compatibility: support SET NAMES 'latin1' - See also http://code.google.com/p/h2database/issues/detail?id=56 - -@roadmap_1300_li -#Allow to scan index backwards starting with a value (to better support ORDER BY DESC). - -@roadmap_1301_li -#Java Service Wrapper: try http://yajsw.sourceforge.net/ - -@roadmap_1302_li -#Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE. - -@roadmap_1303_li -#MySQL compatibility: support ALTER TABLE .. MODIFY COLUMN. - -@roadmap_1304_li -#Use a lazy and auto-close input stream (open resource when reading, close on eof). - -@roadmap_1305_li -#Connection pool: 'reset session' command (delete temp tables, rollback, auto-commit true). - -@roadmap_1306_li -#Improve SQL documentation, see http://www.w3schools.com/sql/ - -@roadmap_1307_li -#MySQL compatibility: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL. - -@roadmap_1308_li -#MS SQL Server compatibility: support DATEPART syntax. - -@roadmap_1309_li -#Sybase/DB2/Oracle compatibility: support out parameters in stored procedures - See http://code.google.com/p/h2database/issues/detail?id=83 - -@roadmap_1310_li -#Support INTERVAL data type (see Oracle and others). - -@roadmap_1311_li -#Combine Server and Console tool (only keep Server). - -@roadmap_1312_li -#Store the Lucene index in the database itself. - -@roadmap_1313_li -#Support standard MERGE statement: http://en.wikipedia.org/wiki/Merge_%28SQL%29 - -@roadmap_1314_li -#Oracle compatibility: support DECODE(x, ...). - -@roadmap_1315_li -#MVCC: compare concurrent update behavior with PostgreSQL and Oracle. - -@roadmap_1316_li -#HSQLDB compatibility: CREATE FUNCTION (maybe using a Function interface). - -@roadmap_1317_li -#HSQLDB compatibility: support CALL "java.lang.Math.sqrt"(2.0) - -@roadmap_1318_li -#Support comma as the decimal separator in the CSV tool. - -@roadmap_1319_li -#Compatibility: Java functions with SQLJ Part1 http://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz - -@roadmap_1320_li -#Compatibility: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation. - -@roadmap_1321_li -#CACHE_SIZE: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache. - -@roadmap_1322_li -#Support date/time/timestamp as documented in http://en.wikipedia.org/wiki/ISO_8601 - -@roadmap_1323_li -#PostgreSQL compatibility: when in PG mode, treat BYTEA data like PG. - -@roadmap_1324_li -#Support =ANY(array) as in PostgreSQL. See also http://www.postgresql.org/docs/8.0/interactive/arrays.html - -@roadmap_1325_li -#IBM DB2 compatibility: support PREVIOUS VALUE FOR sequence. - -@roadmap_1326_li -#Compatibility: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer). - -@roadmap_1327_li -#Oracle compatibility: support CREATE SYNONYM table FOR schema.table. - -@roadmap_1328_li -#FTP: document the server, including -ftpTask option to execute / kill remote processes - -@roadmap_1329_li -#FTP: problems with multithreading? - -@roadmap_1330_li -#FTP: implement SFTP / FTPS - -@roadmap_1331_li -#FTP: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file). - -@roadmap_1332_li -#More secure default configuration if remote access is enabled. - -@roadmap_1333_li -#Improve database file locking (maybe use native file locking). The current approach seems to be problematic if the file system is on a remote share (see Google Group 'Lock file modification time is in the future'). - -@roadmap_1334_li -#Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE. - -@roadmap_1335_li -#Issue 107: Prefer using the ORDER BY index if LIMIT is used. - -@roadmap_1336_li -#An index on (id, name) should be used for a query: select * from t where s=? order by i - -@roadmap_1337_li -#Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL. - -@roadmap_1338_li -#Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true). - -@roadmap_1339_li -#Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2). - -@roadmap_1340_li -#Fast alter table add column. - -@roadmap_1341_li -#Improve concurrency for in-memory database operations. - -@roadmap_1342_li -#Issue 122: Support for connection aliases for remote tcp connections. - -@roadmap_1343_li -#Fast scrambling (strong encryption doesn't help if the password is included in the application). - -@roadmap_1344_li -#H2 Console: support -webPassword to require a password to access preferences or shutdown. - -@roadmap_1345_li -#Issue 126: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number. - -@roadmap_1346_li -#Issue 127: Support activation/deactivation of triggers - -@roadmap_1347_li -#Issue 130: Custom log event listeners - -@roadmap_1348_li -#Issue 131: IBM DB2 compatibility: sysibm.sysdummy1 - -@roadmap_1349_li -#Issue 132: Use Java enum trigger type. - -@roadmap_1350_li -#Issue 134: IBM DB2 compatibility: session global variables. - -@roadmap_1351_li -#Cluster: support load balance with values for each server / auto detect. - -@roadmap_1352_li -#FTL_SET_OPTION(keyString, valueString) with key stopWords at first. - -@roadmap_1353_li -#Pluggable access control mechanism. - -@roadmap_1354_li -#Fulltext search (Lucene): support streaming CLOB data. - -@roadmap_1355_li -#Document/example how to create and read an encrypted script file. - -@roadmap_1356_li -#Check state of http://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins). - -@roadmap_1357_li -#Fulltext search (Lucene): only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible. - -@roadmap_1358_li -#Support a way to create or read compressed encrypted script files using an API. - -@roadmap_1359_li -#Scripting language support (Javascript). - -@roadmap_1360_li -#The network client should better detect if the server is not an H2 server and fail early. - -@roadmap_1361_li -#H2 Console: support CLOB/BLOB upload. - -@roadmap_1362_li -#Database file lock: detect hibernate / standby / very slow threads (compare system time). - -@roadmap_1363_li -#Automatic detection of redundant indexes. - -@roadmap_1364_li -#Maybe reject join without "on" (except natural join). - -@roadmap_1365_li -#Implement GiST (Generalized Search Tree for Secondary Storage). - -@roadmap_1366_li -#Function to read a number of bytes/characters from an BLOB or CLOB. - -@roadmap_1367_li -#Issue 156: Support SELECT ? UNION SELECT ?. - -@roadmap_1368_li -#Automatic mixed mode: support a port range list (to avoid firewall problems). - -@roadmap_1369_li -#Support the pseudo column rowid, oid, _rowid_. - -@roadmap_1370_li -#H2 Console / large result sets: stream early instead of keeping a whole result in-memory - -@roadmap_1371_li -#Support TRUNCATE for linked tables. - -@roadmap_1372_li -#UNION: evaluate INTERSECT before UNION (like most other database except Oracle). - -@roadmap_1373_li -#Delay creating the information schema, and share metadata columns. - -@roadmap_1374_li -#TCP Server: use a nonce (number used once) to protect unencrypted channels against replay attacks. - -@roadmap_1375_li -#Simplify running scripts and recovery: CREATE FORCE USER (overwrites an existing user). - -@roadmap_1376_li -#Support CREATE DATABASE LINK (a custom JDBC driver is already supported). - -@roadmap_1377_li -#Support large GROUP BY operations. Issue 216. - -@roadmap_1378_li -#Issue 163: Allow to create foreign keys on metadata types. - -@roadmap_1379_li -#Logback: write a native DBAppender. - -@roadmap_1380_li -#Cache size: don't use more cache than what is available. - -@roadmap_1381_li -#Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread. - -@roadmap_1382_li -#Tree index: Instead of an AVL tree, use a general balanced trees or a scapegoat tree. - -@roadmap_1383_li -#User defined functions: allow to store the bytecode (of just the class, or the jar file of the extension) in the database. - -@roadmap_1384_li -#Compatibility: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL. - -@roadmap_1385_li -#Optimizer: WHERE X=? AND Y IN(?), it always uses the index on Y. Should be cost based. - -@roadmap_1386_li -#Common Table Expression (CTE) / recursive queries: support parameters. Issue 314. - -@roadmap_1387_li -#Oracle compatibility: support INSERT ALL. - -@roadmap_1388_li -#Issue 178: Optimizer: index usage when both ascending and descending indexes are available. - -@roadmap_1389_li -#Issue 179: Related subqueries in HAVING clause. - -@roadmap_1390_li -#IBM DB2 compatibility: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero. - -@roadmap_1391_li -#Creating primary key: always create a constraint. - -@roadmap_1392_li -#Maybe use a different page layout: keep the data at the head of the page, and ignore the tail (don't store / read it). This may increase write / read performance depending on the file system. - -@roadmap_1393_li -#Indexes of temporary tables are currently kept in-memory. Is this how it should be? - -@roadmap_1394_li -#The Shell tool should support the same built-in commands as the H2 Console. - -@roadmap_1395_li -#Maybe use PhantomReference instead of finalize. - -@roadmap_1396_li -#Database file name suffix: should only have one dot by default. Example: .h2db - -@roadmap_1397_li -#Issue 196: Function based indexes - -@roadmap_1398_li -#ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName. - -@roadmap_1399_li -#Fix the disk space leak (killing the process at the exact right moment will increase the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java - -@roadmap_1400_li -#ROWNUM: Oracle compatibility when used within a subquery. Issue 198. - -@roadmap_1401_li -#Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way. - -@roadmap_1402_li -#ODBC: encrypted databases are not supported because the ;CIPHER= can not be set. - -@roadmap_1403_li -#Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1); - -@roadmap_1404_li -#Optimizer: index usage when both ascending and descending indexes are available. Issue 178. - -@roadmap_1405_li -#Issue 306: Support schema specific domains. - -@roadmap_1406_li -#Triggers: support user defined execution order. Oracle: CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT ON TEST FOR EACH ROW FOLLOWS TEST_1. SQL specifies that multiple triggers should be fired in time-of-creation order. PostgreSQL uses name order, which was judged to be more convenient. Derby: triggers are fired in the order in which they were created. - -@roadmap_1407_li -#PostgreSQL compatibility: combine "users" and "roles". See: http://www.postgresql.org/docs/8.1/interactive/user-manag.html - -@roadmap_1408_li -#Improve documentation of system properties: only list the property names, default values, and description. - -@roadmap_1409_li -#Support running totals / cumulative sum using SUM(..) OVER(..). - -@roadmap_1410_li -#Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize) - -@roadmap_1411_li -#Triggers: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others). - -@roadmap_1412_li -#Common Table Expression (CTE) / recursive queries: support INSERT INTO ... SELECT ... Issue 219. - -@roadmap_1413_li -#Common Table Expression (CTE) / recursive queries: support non-recursive queries. Issue 217. - -@roadmap_1414_li -#Common Table Expression (CTE) / recursive queries: avoid endless loop. Issue 218. - -@roadmap_1415_li -#Common Table Expression (CTE) / recursive queries: support multiple named queries. Issue 220. - -@roadmap_1416_li -#Common Table Expression (CTE) / recursive queries: identifier scope may be incorrect. Issue 222. - -@roadmap_1417_li -#Log long running transactions (similar to long running statements). - -@roadmap_1418_li -#Parameter data type is data type of other operand. Issue 205. - -@roadmap_1419_li -#Some combinations of nested join with right outer join are not supported. - -@roadmap_1420_li -#DatabaseEventListener.openConnection(id) and closeConnection(id). - -@roadmap_1421_li -#Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, or to prevent to login with the same username and password from different IPs. Possibly using the DatabaseEventListener API, or a new API. - -@roadmap_1422_li -#Compatibility for data type CHAR (Derby, HSQLDB). Issue 212. - -@roadmap_1423_li -#Compatibility with MySQL TIMESTAMPDIFF. Issue 209. - -@roadmap_1424_li -#Optimizer: use a histogram of the data, specially for non-normal distributions. - -@roadmap_1425_li -#Trigger: allow declaring as source code (like functions). - -@roadmap_1426_li -#User defined aggregate: allow declaring as source code (like functions). - -@roadmap_1427_li -#The error "table not found" is sometimes caused by using the wrong database. Add "(this database is empty)" to the exception message if applicable. - -@roadmap_1428_li -#MySQL + PostgreSQL compatibility: support string literal escape with \n. - -@roadmap_1429_li -#PostgreSQL compatibility: support string literal escape with double \\. - -@roadmap_1430_li -#Document the TCP server "management_db". Maybe include the IP address of the client. - -@roadmap_1431_li -#Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main - -@roadmap_1432_li -#If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message. - -@roadmap_1433_li -#Optimization to use an index for OR when using multiple keys: where (key1 = ? and key2 = ?) OR (key1 = ? and key2 = ?) - -@roadmap_1434_li -#Issue 302: Support optimizing queries with both inner and outer joins, as in: select * from test a inner join test b on a.id=b.id inner join o on o.id=a.id where b.x=1 (the optimizer should swap a and b here). See also TestNestedJoins, tag "swapInnerJoinTables". - -@roadmap_1435_li -#JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool). - -@roadmap_1436_li -#Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...; - -@roadmap_1437_li -#nioMapped file system: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example). - -@roadmap_1438_li -#Column as parameter of function table. Issue 228. - -@roadmap_1439_li -#Connection pool: detect ;AUTOCOMMIT=FALSE in the database URL, and if set, disable autocommit for all connections. - -@roadmap_1440_li -#Compatibility with MS Access: support "&" to concatenate text. - -@roadmap_1441_li -#The BACKUP statement should not synchronize on the database, and therefore should not block other users. - -@roadmap_1442_li -#Document the database file format. - -@roadmap_1443_li -#Support reading LOBs. - -@roadmap_1444_li -#Require appending DANGEROUS=TRUE when using certain dangerous settings such as LOG=0, LOG=1, LOCK_MODE=0, disabling FILE_LOCK,... - -@roadmap_1445_li -#Support UDT (user defined types) similar to how Apache Derby supports it: check constraint, allow to use it in Java functions as parameters (return values already seem to work). - -@roadmap_1446_li -#Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, optional compatibility with current encrypted database files). - -@roadmap_1447_li -#Issue 229: SELECT with simple OR tests uses tableScan when it could use indexes. - -@roadmap_1448_li -#GROUP BY queries should use a temporary table if there are too many rows. - -@roadmap_1449_li -#BLOB: support random access when reading. - -@roadmap_1450_li -#CLOB: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form). - -@roadmap_1451_li -#Compatibility: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...). - -@roadmap_1452_li -#Compatibility with MySQL: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...). - -@roadmap_1453_li -#Compatibility with MySQL: support non-strict mode (sql_mode = "") any data that is too large for the column will just be truncated or set to the default value. - -@roadmap_1454_li -#The full condition should be sent to the linked table, not just the indexed condition. Example: TestLinkedTableFullCondition - -@roadmap_1455_li -#Compatibility with IBM DB2: CREATE PROCEDURE. - -@roadmap_1456_li -#Compatibility with IBM DB2: SQL cursors. - -@roadmap_1457_li -#Single-column primary key values are always stored explicitly. This is not required. - -@roadmap_1458_li -#Compatibility with MySQL: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8). - -@roadmap_1459_li -#CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true. - -@roadmap_1460_li -#Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set (maybe only for a part of the values - the ones that can be evaluated). - -@roadmap_1461_li -#Compatibility for ARRAY data type (Oracle: VARRAY(n) of VARCHAR(m); HSQLDB: VARCHAR(n) ARRAY; Postgres: VARCHAR(n)[]). - -@roadmap_1462_li -#PostgreSQL compatible array literal syntax: ARRAY[['a', 'b'], ['c', 'd']] - -@roadmap_1463_li -#PostgreSQL compatibility: UPDATE with FROM. - -@roadmap_1464_li -#Issue 297: Oracle compatibility for "at time zone". - -@roadmap_1465_li -#IBM DB2 compatibility: IDENTITY_VAL_LOCAL(). - -@roadmap_1466_li -#Support SQL/XML. - -@roadmap_1467_li -#Support concurrent opening of databases. - -@roadmap_1468_li -#Improved error message and diagnostics in case of network configuration problems. - -@roadmap_1469_li -#TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases). - -@roadmap_1470_li -#Adding a primary key should make the columns 'not null' unless if there is a row with null (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby). - -@roadmap_1471_li -#ARRAY data type: support Integer[] and so on in Java functions (currently only Object[] is supported). - -@roadmap_1472_li -#MySQL compatibility: LOCK TABLES a READ, b READ - see also http://dev.mysql.com/doc/refman/5.0/en/lock-tables.html - -@roadmap_1473_li -#The HTML to PDF converter should use http://code.google.com/p/wkhtmltopdf/ - -@roadmap_1474_li -#Issue 303: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)". - -@roadmap_1475_li -#MySQL compatibility: update test1 t1, test2 t2 set t1.name=t2.name where t1.id=t2.id. - -@roadmap_1476_li -#Issue 283: Improve performance of H2 on Android. - -@roadmap_1477_li -#Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s). - -@roadmap_1478_li -#Column compression option - see http://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d - -@roadmap_1479_li -#PostgreSQL compatibility: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID). - -@roadmap_1480_li -#MS SQL Server compatibility: support @@ROWCOUNT. - -@roadmap_1481_li -#PostgreSQL compatibility: LOG(x) is LOG10(x) and not LN(x). - -@roadmap_1482_li -#Issue 311: Serialized lock mode: executeQuery of write operations fails. - -@roadmap_1483_li -#PostgreSQL compatibility: support PgAdmin III (specially the function current_setting). - -@roadmap_1484_li -#MySQL compatibility: support TIMESTAMPADD. - -@roadmap_1485_li -#Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). - -@roadmap_1486_li -#Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). - -@roadmap_1487_li -#Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase). - -@roadmap_1488_li -#TRANSACTION_ID() for in-memory databases. - -@roadmap_1489_li -#TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL). - -@roadmap_1490_li -#Support [INNER | OUTER] JOIN USING(column [,...]). - -@roadmap_1491_li -#Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle) - -@roadmap_1492_li -#GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby). - -@roadmap_1493_li -#Sybase / MS SQL Server compatibility: CONVERT(..) parameters are swapped. - -@roadmap_1494_li -#Index conditions: WHERE AGE>1 should not scan through all rows with AGE=1. - -@roadmap_1495_li -#PHP support: H2 should support PDO, or test with PostgreSQL PDO. - -@roadmap_1496_li -#Outer joins: if no column of the outer join table is referenced, the outer join table could be removed from the query. - -@roadmap_1497_li -#Cluster: allow using auto-increment and identity columns by ensuring executed in lock-step. - -@roadmap_1498_li -#MySQL compatibility: index names only need to be unique for the given table. - -@roadmap_1499_li -#Issue 352: constraints: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases. - -@roadmap_1500_li -#Oracle compatibility: support MEDIAN aggregate function. - -@roadmap_1501_li -#Issue 348: Oracle compatibility: division should return a decimal result. - -@roadmap_1502_li -#Read rows on demand: instead of reading the whole row, only read up to that column that is requested. Keep an pointer to the data area and the column id that is already read. - -@roadmap_1503_li -#Long running transactions: log session id when detected. - -@roadmap_1504_li -#Optimization: "select id from test" should use the index on id even without "order by". - -@roadmap_1505_li -#Issue 362: LIMIT support for UPDATE statements (MySQL compatibility). - -@roadmap_1506_li -#Sybase SQL Anywhere compatibility: SELECT TOP ... START AT ... - -@roadmap_1507_li -#Use Java 6 SQLException subclasses. - -@roadmap_1508_li -#Issue 390: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR - -@roadmap_1509_li -#Use Java 6 exceptions: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,.. - -@roadmap_1510_li -#Support index-only when doing selects (i.e. without needing to load the actual table data) - -@roadmap_1511_h2 -#Not Planned - -@roadmap_1512_li -#HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility. - -@roadmap_1513_li -#String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively. - -@roadmap_1514_li -#In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements. - -@sourceError_1000_h1 -#Error Analyzer - -@sourceError_1001_a -ホーム - -@sourceError_1002_a -#Input - -@sourceError_1003_h2 -#  Details  Source Code - -@sourceError_1004_p -#Paste the error message and stack trace below and click on 'Details' or 'Source Code': - -@sourceError_1005_b -#Error Code: - -@sourceError_1006_b -#Product Version: - -@sourceError_1007_b -#Message: - -@sourceError_1008_b -#More Information: - -@sourceError_1009_b -#Stack Trace: - -@sourceError_1010_b -#Source File: - -@sourceError_1011_p -# Inline - -@tutorial_1000_h1 -�?ュートリアル - -@tutorial_1001_a -# Starting and Using the H2 Console - -@tutorial_1002_a -# Special H2 Console Syntax - -@tutorial_1003_a -# Settings of the H2 Console - -@tutorial_1004_a -# Connecting to a Database using JDBC - -@tutorial_1005_a -# Creating New Databases - -@tutorial_1006_a -# Using the Server - -@tutorial_1007_a -# Using Hibernate - -@tutorial_1008_a -# Using TopLink and Glassfish - -@tutorial_1009_a -# Using EclipseLink - -@tutorial_1010_a -# Using Apache ActiveMQ - -@tutorial_1011_a -# Using H2 within NetBeans - -@tutorial_1012_a -# Using H2 with jOOQ - -@tutorial_1013_a -# Using Databases in Web Applications - -@tutorial_1014_a -# Android - -@tutorial_1015_a -# CSV (Comma Separated Values) Support - -@tutorial_1016_a -# Upgrade, Backup, and Restore - -@tutorial_1017_a -# Command Line Tools - -@tutorial_1018_a -# The Shell Tool - -@tutorial_1019_a -# Using OpenOffice Base - -@tutorial_1020_a -# Java Web Start / JNLP - -@tutorial_1021_a -# Using a Connection Pool - -@tutorial_1022_a -# Fulltext Search - -@tutorial_1023_a -# User-Defined Variables - -@tutorial_1024_a -# Date and Time - -@tutorial_1025_a -# Using Spring - -@tutorial_1026_a -# OSGi - -@tutorial_1027_a -# Java Management Extension (JMX) - -@tutorial_1028_h2 -起動�?�H2コンソール�?�使用 - -@tutorial_1029_p -# The H2 Console application lets you access a database using a browser. This can be a H2 database, or another database that supports the JDBC API. - -@tutorial_1030_p -# This is a client/server application, so both a server and a client (a browser) are required to run it. - -@tutorial_1031_p -# Depending on your platform and environment, there are multiple ways to start the H2 Console: - -@tutorial_1032_th -OS - -@tutorial_1033_th -起動 - -@tutorial_1034_td -Windows - -@tutorial_1035_td -# Click [Start], [All Programs], [H2], and [H2 Console (Command Line)] - -@tutorial_1036_td -# An icon will be added to the system tray: - -@tutorial_1037_td -# If you don't get the window and the system tray icon, then maybe Java is not installed correctly (in this case, try another way to start the application). A browser window should open and point to the login page at http://localhost:8082. - -@tutorial_1038_td -Windows - -@tutorial_1039_td -# Open a file browser, navigate to h2/bin, and double click on h2.bat. - -@tutorial_1040_td -# A console window appears. If there is a problem, you will see an error message in this window. A browser window will open and point to the login page (URL: http://localhost:8082). - -@tutorial_1041_td -Any - -@tutorial_1042_td -# Double click on the h2*.jar file. This only works if the .jar suffix is associated with Java. - -@tutorial_1043_td -Any - -@tutorial_1044_td -# Open a console window, navigate to the directory h2/bin, and type: - -@tutorial_1045_h3 -ファイアウォール - -@tutorial_1046_p -# If you start the server, you may get a security warning from the firewall (if you have installed one). If you don't want other computers in the network to access the application on your machine, you can let the firewall block those connections. The connection from the local machine will still work. Only if you want other computers to access the database on this computer, you need allow remote connections in the firewall. - -@tutorial_1047_p -# It has been reported that when using Kaspersky 7.0 with firewall, the H2 Console is very slow when connecting over the IP address. A workaround is to connect using 'localhost'. - -@tutorial_1048_p -# A small firewall is already built into the server: other computers may not connect to the server by default. To change this, go to 'Preferences' and select 'Allow connections from other computers'. - -@tutorial_1049_h3 -Javaをテスト�?�る - -@tutorial_1050_p -# To find out which version of Java is installed, open a command prompt and type: - -@tutorial_1051_p -# If you get an error message, you may need to add the Java binary directory to the path environment variable. - -@tutorial_1052_h3 -#Error Message 'Port may be in use' - -@tutorial_1053_p -# You can only start one instance of the H2 Console, otherwise you will get the following error message: "The Web server could not be started. Possible cause: another server is already running...". It is possible to start multiple console applications on the same computer (using different ports), but this is usually not required as the console supports multiple concurrent connections. - -@tutorial_1054_h3 -他�?��?ートを使用�?�る - -@tutorial_1055_p -# If the default port of the H2 Console is already in use by another application, then a different port needs to be configured. The settings are stored in a properties file. For details, see Settings of the H2 Console. The relevant entry is webPort. - -@tutorial_1056_p -# If no port is specified for the TCP and PG servers, each service will try to listen on its default port. If the default port is already in use, a random port is used. - -@tutorial_1057_h3 -ブラウザを使用�?��?�サー�?ー�?�接続 - -@tutorial_1058_p -# If the server started successfully, you can connect to it using a web browser. Javascript needs to be enabled. If you started the server on the same computer as the browser, open the URL http://localhost:8082. If you want to connect to the application from another computer, you need to provide the IP address of the server, for example: http://192.168.0.2:8082. If you enabled TLS on the server side, the URL needs to start with https://. - -@tutorial_1059_h3 -複数�?��?�時セッション - -@tutorial_1060_p -# Multiple concurrent browser sessions are supported. As that the database objects reside on the server, the amount of concurrent work is limited by the memory available to the server application. - -@tutorial_1061_h3 -ログイン - -@tutorial_1062_p -# At the login page, you need to provide connection information to connect to a database. Set the JDBC driver class of your database, the JDBC URL, user name, and password. If you are done, click [Connect]. - -@tutorial_1063_p -# You can save and reuse previously saved settings. The settings are stored in a properties file (see Settings of the H2 Console). - -@tutorial_1064_h3 -エラーメッセージ - -@tutorial_1065_p -# Error messages in are shown in red. You can show/hide the stack trace of the exception by clicking on the message. - -@tutorial_1066_h3 -データベースドライ�?�?�追加 - -@tutorial_1067_p -# To register additional JDBC drivers (MySQL, PostgreSQL, HSQLDB,...), add the jar file names to the environment variables H2DRIVERS or CLASSPATH. Example (Windows): to add the HSQLDB JDBC driver C:\Programs\hsqldb\lib\hsqldb.jar, set the environment variable H2DRIVERS to C:\Programs\hsqldb\lib\hsqldb.jar. - -@tutorial_1068_p -# Multiple drivers can be set; entries need to be separated by ; (Windows) or : (other operating systems). Spaces in the path names are supported. The settings must not be quoted. - -@tutorial_1069_h3 -#Using the H2 Console - -@tutorial_1070_p -# The H2 Console application has three main panels: the toolbar on top, the tree on the left, and the query/result panel on the right. The database objects (for example, tables) are listed on the left. Type a SQL command in the query panel and click [Run]. The result appears just below the command. - -@tutorial_1071_h3 -テーブル�??�?�?��?��?�カラム�??をインサート�?�る - -@tutorial_1072_p -# To insert table and column names into the script, click on the item in the tree. If you click on a table while the query is empty, then SELECT * FROM ... is added. While typing a query, the table that was used is expanded in the tree. For example if you type SELECT * FROM TEST T WHERE T. then the table TEST is expanded. - -@tutorial_1073_h3 -切断�?�アプリケーション�?�終了 - -@tutorial_1074_p -# To log out of the database, click [Disconnect] in the toolbar panel. However, the server is still running and ready to accept new sessions. - -@tutorial_1075_p -# To stop the server, right click on the system tray icon and select [Exit]. If you don't have the system tray icon, navigate to [Preferences] and click [Shutdown], press [Ctrl]+[C] in the console where the server was started (Windows), or close the console window. - -@tutorial_1076_h2 -#Special H2 Console Syntax - -@tutorial_1077_p -# The H2 Console supports a few built-in commands. Those are interpreted within the H2 Console, so they work with any database. Built-in commands need to be at the beginning of a statement (before any remarks), otherwise they are not parsed correctly. If in doubt, add ; before the command. - -@tutorial_1078_th -#Command(s) - -@tutorial_1079_th -説明 - -@tutorial_1080_td -# @autocommit_true; - -@tutorial_1081_td -# @autocommit_false; - -@tutorial_1082_td -# Enable or disable autocommit. - -@tutorial_1083_td -# @cancel; - -@tutorial_1084_td -# Cancel the currently running statement. - -@tutorial_1085_td -# @columns null null TEST; - -@tutorial_1086_td -# @index_info null null TEST; - -@tutorial_1087_td -# @tables; - -@tutorial_1088_td -# @tables null null TEST; - -@tutorial_1089_td -# Call the corresponding DatabaseMetaData.get method. Patterns are case sensitive (usually identifiers are uppercase). For information about the parameters, see the Javadoc documentation. Missing parameters at the end of the line are set to null. The complete list of metadata commands is: @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, @procedure_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns - -@tutorial_1090_td -# @edit select * from test; - -@tutorial_1091_td -# Use an updatable result set. - -@tutorial_1092_td -# @generated insert into test() values(); - -@tutorial_1093_td -# Show the result of Statement.getGeneratedKeys(). - -@tutorial_1094_td -# @history; - -@tutorial_1095_td -# List the command history. - -@tutorial_1096_td -# @info; - -@tutorial_1097_td -# Display the result of various Connection and DatabaseMetaData methods. - -@tutorial_1098_td -# @list select * from test; - -@tutorial_1099_td -# Show the result set in list format (each column on its own line, with row numbers). - -@tutorial_1100_td -# @loop 1000 select ?, ?/*rnd*/; - -@tutorial_1101_td -# @loop 1000 @statement select ?; - -@tutorial_1102_td -# Run the statement this many times. Parameters (?) are set using a loop from 0 up to x - 1. Random values are used for each ?/*rnd*/. A Statement object is used instead of a PreparedStatement if @statement is used. Result sets are read until ResultSet.next() returns false. Timing information is printed. - -@tutorial_1103_td -# @maxrows 20; - -@tutorial_1104_td -# Set the maximum number of rows to display. - -@tutorial_1105_td -# @memory; - -@tutorial_1106_td -# Show the used and free memory. This will call System.gc(). - -@tutorial_1107_td -# @meta select 1; - -@tutorial_1108_td -# List the ResultSetMetaData after running the query. - -@tutorial_1109_td -# @parameter_meta select ?; - -@tutorial_1110_td -# Show the result of the PreparedStatement.getParameterMetaData() calls. The statement is not executed. - -@tutorial_1111_td -# @prof_start; - -@tutorial_1112_td -# call hash('SHA256', '', 1000000); - -@tutorial_1113_td -# @prof_stop; - -@tutorial_1114_td -# Start/stop the built-in profiling tool. The top 3 stack traces of the statement(s) between start and stop are listed (if there are 3). - -@tutorial_1115_td -# @prof_start; - -@tutorial_1116_td -# @sleep 10; - -@tutorial_1117_td -# @prof_stop; - -@tutorial_1118_td -# Sleep for a number of seconds. Used to profile a long running query or operation that is running in another session (but in the same process). - -@tutorial_1119_td -# @transaction_isolation; - -@tutorial_1120_td -# @transaction_isolation 2; - -@tutorial_1121_td -# Display (without parameters) or change (with parameters 1, 2, 4, 8) the transaction isolation level. - -@tutorial_1122_h2 -#Settings of the H2 Console - -@tutorial_1123_p -# The settings of the H2 Console are stored in a configuration file called .h2.server.properties in you user home directory. For Windows installations, the user home directory is usually C:\Documents and Settings\[username] or C:\Users\[username]. The configuration file contains the settings of the application and is automatically created when the H2 Console is first started. Supported settings are: - -@tutorial_1124_code -#webAllowOthers - -@tutorial_1125_li -#: allow other computers to connect. - -@tutorial_1126_code -#webPort - -@tutorial_1127_li -#: the port of the H2 Console - -@tutorial_1128_code -#webSSL - -@tutorial_1129_li -#: use encrypted TLS (HTTPS) connections. - -@tutorial_1130_p -# In addition to those settings, the properties of the last recently used connection are listed in the form <number>=<name>|<driver>|<url>|<user> using the escape character \. Example: 1=Generic H2 (Embedded)|org.h2.Driver|jdbc\:h2\:~/test|sa - -@tutorial_1131_h2 -JDBCを使用�?��?�データベース�?�接続 - -@tutorial_1132_p -# To connect to a database, a Java application first needs to load the database driver, and then get a connection. A simple way to do that is using the following code: - -@tutorial_1133_p -# This code first loads the driver (Class.forName(...)) and then opens a connection (using DriverManager.getConnection()). The driver name is "org.h2.Driver". The database URL always needs to start with jdbc:h2: to be recognized by this database. The second parameter in the getConnection() call is the user name (sa for System Administrator in this example). The third parameter is the password. In this database, user names are not case sensitive, but passwords are. - -@tutorial_1134_h2 -新�?��?�データベースを作�?�?�る - -@tutorial_1135_p -# By default, if the database specified in the URL does not yet exist, a new (empty) database is created automatically. The user that created the database automatically becomes the administrator of this database. - -@tutorial_1136_p -# Auto-creating new database can be disabled, see Opening a Database Only if it Already Exists. - -@tutorial_1137_h2 -サー�?ーを使用�?�る - -@tutorial_1138_p -# H2 currently supports three server: a web server (for the H2 Console), a TCP server (for client/server connections) and an PG server (for PostgreSQL clients). Please note that only the web server supports browser connections. The servers can be started in different ways, one is using the Server tool. Starting the server doesn't open a database - databases are opened as soon as a client connects. - -@tutorial_1139_h3 -#Starting the Server Tool from Command Line - -@tutorial_1140_p -# To start the Server tool from the command line with the default settings, run: - -@tutorial_1141_p -# This will start the tool with the default options. To get the list of options and default values, run: - -@tutorial_1142_p -# There are options available to use other ports, and start or not start parts. - -@tutorial_1143_h3 -TCPサー�?ー�?�接続�?�る - -@tutorial_1144_p -# To remotely connect to a database using the TCP server, use the following driver and database URL: - -@tutorial_1145_li -#JDBC driver class: org.h2.Driver - -@tutorial_1146_li -#Database URL: jdbc:h2:tcp://localhost/~/test - -@tutorial_1147_p -# For details about the database URL, see also in Features. Please note that you can't connection with a web browser to this URL. You can only connect using a H2 client (over JDBC). - -@tutorial_1148_h3 -#Starting the TCP Server within an Application - -@tutorial_1149_p -# Servers can also be started and stopped from within an application. Sample code: - -@tutorial_1150_h3 -他�?��?�程�?�らTCPサー�?ーを終了�?�る - -@tutorial_1151_p -# The TCP server can be stopped from another process. To stop the server from the command line, run: - -@tutorial_1152_p -# To stop the server from a user application, use the following code: - -@tutorial_1153_p -# This function will only stop the TCP server. If other server were started in the same process, they will continue to run. To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. Shutting down a TCP server can be protected using the option -tcpPassword (the same password must be used to start and stop the TCP server). - -@tutorial_1154_h2 -Hibernateを使用�?�る - -@tutorial_1155_p -# This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect, or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy. A patch for Hibernate has been submitted and is now applied. You can rename it to H2Dialect.java and include this as a patch in your application, or upgrade to a version of Hibernate where this is fixed. - -@tutorial_1156_p -# When using Hibernate, try to use the H2Dialect if possible. When using the H2Dialect, compatibility modes such as MODE=MySQL are not supported. When using such a compatibility mode, use the Hibernate dialect for the corresponding database instead of the H2Dialect; but please note H2 does not support all features of all databases. - -@tutorial_1157_h2 -#Using TopLink and Glassfish - -@tutorial_1158_p -# To use H2 with Glassfish (or Sun AS), set the Datasource Classname to org.h2.jdbcx.JdbcDataSource. You can set this in the GUI at Application Server - Resources - JDBC - Connection Pools, or by editing the file sun-resources.xml: at element jdbc-connection-pool, set the attribute datasource-classname to org.h2.jdbcx.JdbcDataSource. - -@tutorial_1159_p -# The H2 database is compatible with HSQLDB and PostgreSQL. To take advantage of H2 specific features, use the H2Platform. The source code of this platform is included in H2 at src/tools/oracle/toplink/essentials/platform/database/DatabasePlatform.java.txt. You will need to copy this file to your application, and rename it to .java. To enable it, change the following setting in persistence.xml: - -@tutorial_1160_p -# In old versions of Glassfish, the property name is toplink.platform.class.name. - -@tutorial_1161_p -# To use H2 within Glassfish, copy the h2*.jar to the directory glassfish/glassfish/lib. - -@tutorial_1162_h2 -#Using EclipseLink - -@tutorial_1163_p -# To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. See also H2Platform. - -@tutorial_1164_h2 -#Using Apache ActiveMQ - -@tutorial_1165_p -# When using H2 as the backend database for Apache ActiveMQ, please use the TransactDatabaseLocker instead of the default locking mechanism. Otherwise the database file will grow without bounds. The problem is that the default locking mechanism uses an uncommitted UPDATE transaction, which keeps the transaction log from shrinking (causes the database file to grow). Instead of using an UPDATE statement, the TransactDatabaseLocker uses SELECT ... FOR UPDATE which is not problematic. To use it, change the ApacheMQ configuration element <jdbcPersistenceAdapter> element, property databaseLocker="org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker". However, using the MVCC mode will again result in the same problem. Therefore, please do not use the MVCC mode in this case. Another (more dangerous) solution is to set useDatabaseLock to false. - -@tutorial_1166_h2 -#Using H2 within NetBeans - -@tutorial_1167_p -# The project H2 Database Engine Support For NetBeans allows you to start and stop the H2 server from within the IDE. - -@tutorial_1168_p -# There is a known issue when using the Netbeans SQL Execution Window: before executing a query, another query in the form SELECT COUNT(*) FROM <query> is run. This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL. In this case, two sequence values are allocated instead of just one. - -@tutorial_1169_h2 -#Using H2 with jOOQ - -@tutorial_1170_p -# jOOQ adds a thin layer on top of JDBC, allowing for type-safe SQL construction, including advanced SQL, stored procedures and advanced data types. jOOQ takes your database schema as a base for code generation. If this is your example schema: - -@tutorial_1171_p -# then run the jOOQ code generator on the command line using this command: - -@tutorial_1172_p -# ...where codegen.xml is on the classpath and contains this information - -@tutorial_1173_p -# Using the generated source, you can query the database as follows: - -@tutorial_1174_p -# See more details on jOOQ Homepage and in the jOOQ Tutorial - -@tutorial_1175_h2 -Webアプリケーション�?� データベースを使用�?�る - -@tutorial_1176_p -# There are multiple ways to access a database from within web applications. Here are some examples if you use Tomcat or JBoss. - -@tutorial_1177_h3 -エンベッドモード - -@tutorial_1178_p -# The (currently) simplest solution is to use the database in the embedded mode, that means open a connection in your application when it starts (a good solution is using a Servlet Listener, see below), or when a session starts. A database can be accessed from multiple sessions and applications at the same time, as long as they run in the same process. Most Servlet Containers (for example Tomcat) are just using one process, so this is not a problem (unless you run Tomcat in clustered mode). Tomcat uses multiple threads and multiple classloaders. If multiple applications access the same database at the same time, you need to put the database jar in the shared/lib or server/lib directory. It is a good idea to open the database when the web application starts, and close it when the web application stops. If using multiple applications, only one (any) of them needs to do that. In the application, an idea is to use one connection per Session, or even one connection per request (action). Those connections should be closed after use if possible (but it's not that bad if they don't get closed). - -@tutorial_1179_h3 -サー�?ーモード - -@tutorial_1180_p -# The server mode is similar, but it allows you to run the server in another process. - -@tutorial_1181_h3 -データベース�?�起動�?�終了�?�Servletリスナーを使用�?�る - -@tutorial_1182_p -# Add the h2*.jar file to your web application, and add the following snippet to your web.xml file (between the context-param and the filter section): - -@tutorial_1183_p -# For details on how to access the database, see the file DbStarter.java. By default this tool opens an embedded connection using the database URL jdbc:h2:~/test, user name sa, and password sa. If you want to use this connection within your servlet, you can access as follows: - -@tutorial_1184_code -#DbStarter - -@tutorial_1185_p -# can also start the TCP server, however this is disabled by default. To enable it, use the parameter db.tcpServer in the file web.xml. Here is the complete list of options. These options need to be placed between the description tag and the listener / filter tags: - -@tutorial_1186_p -# When the web application is stopped, the database connection will be closed automatically. If the TCP server is started within the DbStarter, it will also be stopped automatically. - -@tutorial_1187_h3 -#Using the H2 Console Servlet - -@tutorial_1188_p -# The H2 Console is a standalone application and includes its own web server, but it can be used as a servlet as well. To do that, include the the h2*.jar file in your application, and add the following configuration to your web.xml: - -@tutorial_1189_p -# For details, see also src/tools/WEB-INF/web.xml. - -@tutorial_1190_p -# To create a web application with just the H2 Console, run the following command: - -@tutorial_1191_h2 -#Android - -@tutorial_1192_p -# You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, except for opening and closing a database, which is not yet optimized in H2 (H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. So far, only very few tests have been run, and everything seems to work as expected. Fulltext search was not yet tested, however the native fulltext search should work. - -@tutorial_1193_p -# Reasons to use H2 instead of SQLite are: - -@tutorial_1194_li -#Full Unicode support including UPPER() and LOWER(). - -@tutorial_1195_li -#Streaming API for BLOB and CLOB data. - -@tutorial_1196_li -#Fulltext search. - -@tutorial_1197_li -#Multiple connections. - -@tutorial_1198_li -#User defined functions and triggers. - -@tutorial_1199_li -#Database file encryption. - -@tutorial_1200_li -#Reading and writing CSV files (this feature can be used outside the database as well). - -@tutorial_1201_li -#Referential integrity and check constraints. - -@tutorial_1202_li -#Better data type and SQL support. - -@tutorial_1203_li -#In-memory databases, read-only databases, linked tables. - -@tutorial_1204_li -#Better compatibility with other databases which simplifies porting applications. - -@tutorial_1205_li -#Possibly better performance (so far for read operations). - -@tutorial_1206_li -#Server mode (accessing a database on a different machine over TCP/IP). - -@tutorial_1207_p -# Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). Both the regular H2 jar file and the smaller h2small-*.jar can be used. To create the smaller jar file, run the command ./build.sh jarSmall (Linux / Mac OS) or build.bat jarSmall (Windows). - -@tutorial_1208_p -# The database files needs to be stored in a place that is accessible for the application. Example: - -@tutorial_1209_p -# Limitations: Using a connection pool is currently not supported, because the required javax.sql. classes are not available on Android. - -@tutorial_1210_h2 -CSV (Comma Separated Values) サ�?ート - -@tutorial_1211_p -# The CSV file support can be used inside the database using the functions CSVREAD and CSVWRITE, or it can be used outside the database as a standalone tool. - -@tutorial_1212_h3 -データベース内�?�らCSVファイルを読�?�込む - -@tutorial_1213_p -# A CSV file can be read using the function CSVREAD. Example: - -@tutorial_1214_p -# Please note for performance reason, CSVREAD should not be used inside a join. Instead, import the data first (possibly into a temporary table), create the required indexes if necessary, and then query this table. - -@tutorial_1215_h3 -#Importing Data from a CSV File - -@tutorial_1216_p -# A fast way to load or import data (sometimes called 'bulk load') from a CSV file is to combine table creation with import. Optionally, the column names and data types can be set when creating the table. Another option is to use INSERT INTO ... SELECT. - -@tutorial_1217_h3 -データベース内�?�らCSVファイル�?�書�??込む - -@tutorial_1218_p -# The built-in function CSVWRITE can be used to create a CSV file from a query. Example: - -@tutorial_1219_h3 -Javaアプリケーション�?�らCSVファイル�?�書�??込む - -@tutorial_1220_p -# The Csv tool can be used in a Java application even when not using a database at all. Example: - -@tutorial_1221_h3 -Javaアプリケーション�?�らCSVファイルを読�?�込む - -@tutorial_1222_p -# It is possible to read a CSV file without opening a database. Example: - -@tutorial_1223_h2 -アップグレード�? �?ックアップ�?修復 - -@tutorial_1224_h3 -データベース�?�アップグレー - -@tutorial_1225_p -# The recommended way to upgrade from one version of the database engine to the next version is to create a backup of the database (in the form of a SQL script) using the old engine, and then execute the SQL script using the new engine. - -@tutorial_1226_h3 -�?ックアップ - -@tutorial_1227_p -# The recommended way to backup a database is to create a compressed SQL script file. This will result in a small, human readable, and database version independent backup. Creating the script will also verify the checksums of the database file. The Script tool is ran as follows: - -@tutorial_1228_p -# It is also possible to use the SQL command SCRIPT to create the backup of the database. For more information about the options, see the SQL command SCRIPT. The backup can be done remotely, however the file will be created on the server side. The built in FTP server could be used to retrieve the file from the server. - -@tutorial_1229_h3 -修復 - -@tutorial_1230_p -# To restore a database from a SQL script file, you can use the RunScript tool: - -@tutorial_1231_p -# For more information about the options, see the SQL command RUNSCRIPT. The restore can be done remotely, however the file needs to be on the server side. The built in FTP server could be used to copy the file to the server. It is also possible to use the SQL command RUNSCRIPT to execute a SQL script. SQL script files may contain references to other script files, in the form of RUNSCRIPT commands. However, when using the server mode, the references script files need to be available on the server side. - -@tutorial_1232_h3 -オンライン�?ックアップ - -@tutorial_1233_p -# The BACKUP SQL statement and the Backup tool both create a zip file with the database file. However, the contents of this file are not human readable. - -@tutorial_1234_p -# The resulting backup is transactionally consistent, meaning the consistency and atomicity rules apply. - -@tutorial_1235_p -# The Backup tool (org.h2.tools.Backup) can not be used to create a online backup; the database must not be in use while running this program. - -@tutorial_1236_p -# Creating a backup by copying the database files while the database is running is not supported, except if the file systems support creating snapshots. With other file systems, it can't be guaranteed that the data is copied in the right order. - -@tutorial_1237_h2 -#Command Line Tools - -@tutorial_1238_p -# This database comes with a number of command line tools. To get more information about a tool, start it with the parameter '-?', for example: - -@tutorial_1239_p -# The command line tools are: - -@tutorial_1240_code -�?ックアップ - -@tutorial_1241_li -# creates a backup of a database. - -@tutorial_1242_code -#ChangeFileEncryption - -@tutorial_1243_li -# allows changing the file encryption password or algorithm of a database. - -@tutorial_1244_code -#Console - -@tutorial_1245_li -# starts the browser based H2 Console. - -@tutorial_1246_code -#ConvertTraceFile - -@tutorial_1247_li -# converts a .trace.db file to a Java application and SQL script. - -@tutorial_1248_code -#CreateCluster - -@tutorial_1249_li -# creates a cluster from a standalone database. - -@tutorial_1250_code -#DeleteDbFiles - -@tutorial_1251_li -# deletes all files belonging to a database. - -@tutorial_1252_code -#Recover - -@tutorial_1253_li -# helps recovering a corrupted database. - -@tutorial_1254_code -#Restore - -@tutorial_1255_li -# restores a backup of a database. - -@tutorial_1256_code -#RunScript - -@tutorial_1257_li -# runs a SQL script against a database. - -@tutorial_1258_code -#Script - -@tutorial_1259_li -# allows converting a database to a SQL script for backup or migration. - -@tutorial_1260_code -Server - -@tutorial_1261_li -# is used in the server mode to start a H2 server. - -@tutorial_1262_code -#Shell - -@tutorial_1263_li -# is a command line database tool. - -@tutorial_1264_p -# The tools can also be called from an application by calling the main or another public method. For details, see the Javadoc documentation. - -@tutorial_1265_h2 -#The Shell Tool - -@tutorial_1266_p -# The Shell tool is a simple interactive command line tool. To start it, type: - -@tutorial_1267_p -# You will be asked for a database URL, JDBC driver, user name, and password. The connection setting can also be set as command line parameters. After connecting, you will get the list of options. The built-in commands don't need to end with a semicolon, but SQL statements are only executed if the line ends with a semicolon ;. This allows to enter multi-line statements: - -@tutorial_1268_p -# By default, results are printed as a table. For results with many column, consider using the list mode: - -@tutorial_1269_h2 -OpenOffice Baseを使用�?�る - -@tutorial_1270_p -# OpenOffice.org Base supports database access over the JDBC API. To connect to a H2 database using OpenOffice Base, you first need to add the JDBC driver to OpenOffice. The steps to connect to a H2 database are: - -@tutorial_1271_li -#Start OpenOffice Writer, go to [Tools], [Options] - -@tutorial_1272_li -#Make sure you have selected a Java runtime environment in OpenOffice.org / Java - -@tutorial_1273_li -#Click [Class Path...], [Add Archive...] - -@tutorial_1274_li -#Select your h2 jar file (location is up to you, could be wherever you choose) - -@tutorial_1275_li -#Click [OK] (as much as needed), stop OpenOffice (including the Quickstarter) - -@tutorial_1276_li -#Start OpenOffice Base - -@tutorial_1277_li -#Connect to an existing database; select [JDBC]; [Next] - -@tutorial_1278_li -#Example datasource URL: jdbc:h2:~/test - -@tutorial_1279_li -#JDBC driver class: org.h2.Driver - -@tutorial_1280_p -# Now you can access the database stored in the current users home directory. - -@tutorial_1281_p -# To use H2 in NeoOffice (OpenOffice without X11): - -@tutorial_1282_li -#In NeoOffice, go to [NeoOffice], [Preferences] - -@tutorial_1283_li -#Look for the page under [NeoOffice], [Java] - -@tutorial_1284_li -#Click [Class Path], [Add Archive...] - -@tutorial_1285_li -#Select your h2 jar file (location is up to you, could be wherever you choose) - -@tutorial_1286_li -#Click [OK] (as much as needed), restart NeoOffice. - -@tutorial_1287_p -# Now, when creating a new database using the "Database Wizard" : - -@tutorial_1288_li -#Click [File], [New], [Database]. - -@tutorial_1289_li -#Select [Connect to existing database] and the select [JDBC]. Click next. - -@tutorial_1290_li -#Example datasource URL: jdbc:h2:~/test - -@tutorial_1291_li -#JDBC driver class: org.h2.Driver - -@tutorial_1292_p -# Another solution to use H2 in NeoOffice is: - -@tutorial_1293_li -#Package the h2 jar within an extension package - -@tutorial_1294_li -#Install it as a Java extension in NeoOffice - -@tutorial_1295_p -# This can be done by create it using the NetBeans OpenOffice plugin. See also Extensions Development. - -@tutorial_1296_h2 -Java Web Start / JNLP - -@tutorial_1297_p -# When using Java Web Start / JNLP (Java Network Launch Protocol), permissions tags must be set in the .jnlp file, and the application .jar file must be signed. Otherwise, when trying to write to the file system, the following exception will occur: java.security.AccessControlException: access denied (java.io.FilePermission ... read). Example permission tags: - -@tutorial_1298_h2 -#Using a Connection Pool - -@tutorial_1299_p -# For H2, opening a connection is fast if the database is already open. Still, using a connection pool improves performance if you open and close connections a lot. A simple connection pool is included in H2. It is based on the Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection().The build-in connection pool is used as follows: - -@tutorial_1300_h2 -フルテキストサー�? - -@tutorial_1301_p -# H2 includes two fulltext search implementations. One is using Apache Lucene, and the other (the native implementation) stores the index data in special tables in the database. - -@tutorial_1302_h3 -#Using the Native Fulltext Search - -@tutorial_1303_p -# To initialize, call: - -@tutorial_1304_p -# You need to initialize it in each database where you want to use it. Afterwards, you can create a fulltext index for a table using: - -@tutorial_1305_p -# PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query: - -@tutorial_1306_p -# This will produce a result set that contains the query needed to retrieve the data: - -@tutorial_1307_p -# To drop an index on a table: - -@tutorial_1308_p -# To get the raw data, use FT_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in: SELECT T.* FROM FT_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0]; - -@tutorial_1309_p -# You can also call the index from within a Java application: - -@tutorial_1310_h3 -Luceneフルテキストサー�?を使用�?�る - -@tutorial_1311_p -# To use the Lucene full text search, you need the Lucene library in the classpath. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS or CLASSPATH. To initialize the Lucene fulltext search in a database, call: - -@tutorial_1312_p -# You need to initialize it in each database where you want to use it. Afterwards, you can create a full text index for a table using: - -@tutorial_1313_p -# PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query: - -@tutorial_1314_p -# This will produce a result set that contains the query needed to retrieve the data: - -@tutorial_1315_p -# To drop an index on a table (be warned that this will re-index all of the full-text indices for the entire database): - -@tutorial_1316_p -# To get the raw data, use FTL_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in: SELECT T.* FROM FTL_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0]; - -@tutorial_1317_p -# You can also call the index from within a Java application: - -@tutorial_1318_p -# The Lucene fulltext search supports searching in specific column only. Column names must be uppercase (except if the original columns are double quoted). For column names starting with an underscore (_), another underscore needs to be added. Example: - -@tutorial_1319_p -# The Lucene fulltext search implementation is not synchronized internally. If you update the database and query the fulltext search concurrently (directly using the Java API of H2 or Lucene itself), you need to ensure operations are properly synchronized. If this is not the case, you may get exceptions such as org.apache.lucene.store.AlreadyClosedException: this IndexReader is closed. - -@tutorial_1320_h2 -#User-Defined Variables - -@tutorial_1321_p -# This database supports user-defined variables. Variables start with @ and can be used wherever expressions or parameters are allowed. Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. A value is usually assigned using the SET command: - -@tutorial_1322_p -# The value can also be changed using the SET() method. This is useful in queries: - -@tutorial_1323_p -# Variables that are not set evaluate to NULL. The data type of a user-defined variable is the data type of the value assigned to it, that means it is not necessary (or possible) to declare variable names before using them. There are no restrictions on the assigned values; large objects (LOBs) are supported as well. Rolling back a transaction does not affect the value of a user-defined variable. - -@tutorial_1324_h2 -#Date and Time - -@tutorial_1325_p -# Date, time and timestamp values support ISO 8601 formatting, including time zone: - -@tutorial_1326_p -# If the time zone is not set, the value is parsed using the current time zone setting of the system. Date and time information is stored in H2 database files without time zone information. If the database is opened using another system time zone, the date and time will be the same. That means if you store the value '2000-01-01 12:00:00' in one time zone, then close the database and open the database again in a different time zone, you will also get '2000-01-01 12:00:00'. Please note that changing the time zone after the H2 driver is loaded is not supported. - -@tutorial_1327_h2 -#Using Spring - -@tutorial_1328_h3 -#Using the TCP Server - -@tutorial_1329_p -# Use the following configuration to start and stop the H2 TCP server using the Spring Framework: - -@tutorial_1330_p -# The destroy-method will help prevent exceptions on hot-redeployment or when restarting the server. - -@tutorial_1331_h3 -#Error Code Incompatibility - -@tutorial_1332_p -# There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer, because of a change in the error code. This will cause the JdbcTemplate to not detect a duplicate key condition, and so a DataIntegrityViolationException is thrown instead of DuplicateKeyException. See also the issue SPR-8235. The workaround is to add the following XML file to the root of the classpath: - -@tutorial_1333_h2 -#OSGi - -@tutorial_1334_p -# The standard H2 jar can be dropped in as a bundle in an OSGi container. H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification. The H2 Data Source Factory service is registered with the following properties: OSGI_JDBC_DRIVER_CLASS=org.h2.Driver and OSGI_JDBC_DRIVER_NAME=H2. The OSGI_JDBC_DRIVER_VERSION property reflects the version of the driver as is. - -@tutorial_1335_p -# The following standard configuration properties are supported: JDBC_USER, JDBC_PASSWORD, JDBC_DESCRIPTION, JDBC_DATASOURCE_NAME, JDBC_NETWORK_PROTOCOL, JDBC_URL, JDBC_SERVER_NAME, JDBC_PORT_NUMBER. Any other standard property will be rejected. Non-standard properties will be passed on to H2 in the connection URL. - -@tutorial_1336_h2 -#Java Management Extension (JMX) - -@tutorial_1337_p -# Management over JMX is supported, but not enabled by default. To enable JMX, append ;JMX=TRUE to the database URL when opening the database. Various tools support JMX, one such tool is the jconsole. When opening the jconsole, connect to the process where the database is open (when using the server mode, you need to connect to the server process). Then go to the MBeans section. Under org.h2 you will find one entry per database. The object name of the entry is the database short name, plus the path (each colon is replaced with an underscore character). - -@tutorial_1338_p -# The following attributes and operations are supported: - -@tutorial_1339_code -#CacheSize - -@tutorial_1340_li -#: the cache size currently in use in KB. - -@tutorial_1341_code -#CacheSizeMax - -@tutorial_1342_li -# (read/write): the maximum cache size in KB. - -@tutorial_1343_code -#Exclusive - -@tutorial_1344_li -#: whether this database is open in exclusive mode or not. - -@tutorial_1345_code -#FileReadCount - -@tutorial_1346_li -#: the number of file read operations since the database was opened. - -@tutorial_1347_code -#FileSize - -@tutorial_1348_li -#: the file size in KB. - -@tutorial_1349_code -#FileWriteCount - -@tutorial_1350_li -#: the number of file write operations since the database was opened. - -@tutorial_1351_code -#FileWriteCountTotal - -@tutorial_1352_li -#: the number of file write operations since the database was created. - -@tutorial_1353_code -#LogMode - -@tutorial_1354_li -# (read/write): the current transaction log mode. See SET LOG for details. - -@tutorial_1355_code -#Mode - -@tutorial_1356_li -#: the compatibility mode (REGULAR if no compatibility mode is used). - -@tutorial_1357_code -#MultiThreaded - -@tutorial_1358_li -#: true if multi-threaded is enabled. - -@tutorial_1359_code -#Mvcc - -@tutorial_1360_li -#: true if MVCC is enabled. - -@tutorial_1361_code -#ReadOnly - -@tutorial_1362_li -#: true if the database is read-only. - -@tutorial_1363_code -#TraceLevel - -@tutorial_1364_li -# (read/write): the file trace level. - -@tutorial_1365_code -#Version - -@tutorial_1366_li -#: the database version in use. - -@tutorial_1367_code -#listSettings - -@tutorial_1368_li -#: list the database settings. - -@tutorial_1369_code -#listSessions - -@tutorial_1370_li -#: list the open sessions, including currently executing statement (if any) and locked tables (if any). - -@tutorial_1371_p -# To enable JMX, you may need to set the system properties com.sun.management.jmxremote and com.sun.management.jmxremote.port as required by the JVM. - diff --git a/h2/src/docsrc/textbase/_docs_en.properties b/h2/src/docsrc/textbase/_docs_en.properties deleted file mode 100644 index 77324149c2..0000000000 --- a/h2/src/docsrc/textbase/_docs_en.properties +++ /dev/null @@ -1,3995 +0,0 @@ -advanced_1000_h1=Advanced -advanced_1001_a=\ Result Sets -advanced_1002_a=\ Large Objects -advanced_1003_a=\ Linked Tables -advanced_1004_a=\ Spatial Features -advanced_1005_a=\ Recursive Queries -advanced_1006_a=\ Updatable Views -advanced_1007_a=\ Transaction Isolation -advanced_1008_a=\ Multi-Version Concurrency Control (MVCC) -advanced_1009_a=\ Clustering / High Availability -advanced_1010_a=\ Two Phase Commit -advanced_1011_a=\ Compatibility -advanced_1012_a=\ Standards Compliance -advanced_1013_a=\ Run as Windows Service -advanced_1014_a=\ ODBC Driver -advanced_1015_a=\ Using H2 in Microsoft .NET -advanced_1016_a=\ ACID -advanced_1017_a=\ Durability Problems -advanced_1018_a=\ Using the Recover Tool -advanced_1019_a=\ File Locking Protocols -advanced_1020_a=\ Using Passwords -advanced_1021_a=\ Password Hash -advanced_1022_a=\ Protection against SQL Injection -advanced_1023_a=\ Protection against Remote Access -advanced_1024_a=\ Restricting Class Loading and Usage -advanced_1025_a=\ Security Protocols -advanced_1026_a=\ TLS Connections -advanced_1027_a=\ Universally Unique Identifiers (UUID) -advanced_1028_a=\ Settings Read from System Properties -advanced_1029_a=\ Setting the Server Bind Address -advanced_1030_a=\ Pluggable File System -advanced_1031_a=\ Split File System -advanced_1032_a=\ Database Upgrade -advanced_1033_a=\ Java Objects Serialization -advanced_1034_a=\ Limits and Limitations -advanced_1035_a=\ Glossary and Links -advanced_1036_h2=Result Sets -advanced_1037_h3=Statements that Return a Result Set -advanced_1038_p=\ The following statements return a result set\: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP. All other statements return an update count. -advanced_1039_h3=Limiting the Number of Rows -advanced_1040_p=\ Before the result is returned to the application, all rows are read by the database. Server side cursors are not supported currently. If only the first few rows are interesting for the application, then the result set size should be limited to improve the performance. This can be done using LIMIT in a query (example\: SELECT * FROM TEST LIMIT 100), or by using Statement.setMaxRows(max). -advanced_1041_h3=Large Result Sets and External Sorting -advanced_1042_p=\ For large result set, the result is buffered to disk. The threshold can be defined using the statement SET MAX_MEMORY_ROWS. If ORDER BY is used, the sorting is done using an external sort algorithm. In this case, each block of rows is sorted using quick sort, then written to disk; when reading the data, the blocks are merged together. -advanced_1043_h2=Large Objects -advanced_1044_h3=Storing and Reading Large Objects -advanced_1045_p=\ If it is possible that the objects don't fit into memory, then the data type CLOB (for textual data) or BLOB (for binary data) should be used. For these data types, the objects are not fully read into memory, by using streams. To store a BLOB, use PreparedStatement.setBinaryStream. To store a CLOB, use PreparedStatement.setCharacterStream. To read a BLOB, use ResultSet.getBinaryStream, and to read a CLOB, use ResultSet.getCharacterStream. When using the client/server mode, large BLOB and CLOB data is stored in a temporary file on the client side. -advanced_1046_h3=When to use CLOB/BLOB -advanced_1047_p=\ By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster than reading from separate files, but slows down the performance of operations that don't involve this column. -advanced_1048_h3=Large Object Compression -advanced_1049_p=\ The following feature is only available for the PageStore storage engine. For the MVStore engine (the default for H2 version 1.4.x), append ;COMPRESS\=TRUE to the database URL instead. CLOB and BLOB values can be compressed by using SET COMPRESS_LOB. The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster. -advanced_1050_h2=Linked Tables -advanced_1051_p=\ This database supports linked tables, which means tables that don't exist in the current database but are just links to another database. To create such a link, use the CREATE LINKED TABLE statement\: -advanced_1052_p=\ You can then access the table in the usual way. Whenever the linked table is accessed, the database issues specific queries over JDBC. Using the example above, if you issue the query SELECT * FROM LINK WHERE ID\=1, then the following query is run against the PostgreSQL database\: SELECT * FROM TEST WHERE ID\=?. The same happens for insert and update statements. Only simple statements are executed against the target database, that means no joins (queries that contain joins are converted to simple queries). Prepared statements are used where possible. -advanced_1053_p=\ To view the statements that are executed against the target table, set the trace level to 3. -advanced_1054_p=\ If multiple linked tables point to the same database (using the same database URL), the connection is shared. To disable this, set the system property h2.shareLinkedConnections\=false. -advanced_1055_p=\ The statement CREATE LINKED TABLE supports an optional schema name parameter. -advanced_1056_p=\ The following are not supported because they may result in a deadlock\: creating a linked table to the same database, and creating a linked table to another database using the server mode if the other database is open in the same server (use the embedded mode instead). -advanced_1057_p=\ Data types that are not supported in H2 are also not supported for linked tables, for example unsigned data types if the value is outside the range of the signed type. In such cases, the columns needs to be cast to a supported type. -advanced_1058_h2=Updatable Views -advanced_1059_p=\ By default, views are not updatable. To make a view updatable, use an "instead of" trigger as follows\: -advanced_1060_p=\ Update the base table(s) within the trigger as required. For details, see the sample application org.h2.samples.UpdatableView. -advanced_1061_h2=Transaction Isolation -advanced_1062_p=\ Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. See the Grammar for details. -advanced_1063_p=\ Transaction isolation is provided for all data manipulation language (DML) statements. -advanced_1064_p=\ Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. Instead, rows are locked for update, and read committed is used in all cases (changing the isolation level has no effect). -advanced_1065_p=\ This database supports the following transaction isolation levels\: -advanced_1066_b=Read Committed -advanced_1067_li=\ This is the default level. Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. Higher concurrency is possible when using this level. -advanced_1068_li=\ To enable, execute the SQL statement SET LOCK_MODE 3 -advanced_1069_li=\ or append ;LOCK_MODE\=3 to the database URL\: jdbc\:h2\:~/test;LOCK_MODE\=3 -advanced_1070_b=Serializable -advanced_1071_li=\ Both read locks and write locks are kept until the transaction commits. To enable, execute the SQL statement SET LOCK_MODE 1 -advanced_1072_li=\ or append ;LOCK_MODE\=1 to the database URL\: jdbc\:h2\:~/test;LOCK_MODE\=1 -advanced_1073_b=Read Uncommitted -advanced_1074_li=\ This level means that transaction isolation is disabled. -advanced_1075_li=\ To enable, execute the SQL statement SET LOCK_MODE 0 -advanced_1076_li=\ or append ;LOCK_MODE\=0 to the database URL\: jdbc\:h2\:~/test;LOCK_MODE\=0 -advanced_1077_p=\ When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited. -advanced_1078_b=Dirty Reads -advanced_1079_li=\ Means a connection can read uncommitted changes made by another connection. -advanced_1080_li=\ Possible with\: read uncommitted -advanced_1081_b=Non-Repeatable Reads -advanced_1082_li=\ A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result. -advanced_1083_li=\ Possible with\: read uncommitted, read committed -advanced_1084_b=Phantom Reads -advanced_1085_li=\ A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row. -advanced_1086_li=\ Possible with\: read uncommitted, read committed -advanced_1087_h3=Table Level Locking -advanced_1088_p=\ The database allows multiple concurrent connections to the same database. To make sure all connections only see consistent data, table level locking is used by default. This mechanism does not allow high concurrency, but is very fast. Shared locks and exclusive locks are supported. Before reading from a table, the database tries to add a shared lock to the table (this is only possible if there is no exclusive lock on the object by another connection). If the shared lock is added successfully, the table can be read. It is allowed that other connections also have a shared lock on the same object. If a connection wants to write to a table (update or delete a row), an exclusive lock is required. To get the exclusive lock, other connection must not have any locks on the object. After the connection commits, all locks are released. This database keeps all locks in memory. When a lock is released, and multiple connections are waiting for it, one of them is picked at random. -advanced_1089_h3=Lock Timeout -advanced_1090_p=\ If a connection cannot get a lock on an object, the connection waits for some amount of time (the lock timeout). During this time, hopefully the connection holding the lock commits and it is then possible to get the lock. If this is not possible because the other connection does not release the lock for some time, the unsuccessful connection will get a lock timeout exception. The lock timeout can be set individually for each connection. -advanced_1091_h2=Multi-Version Concurrency Control (MVCC) -advanced_1092_p=\ The MVCC feature allows higher concurrency than using (table level or row level) locks. When using MVCC in this database, delete, insert and update operations will only issue a shared lock on the table. An exclusive lock is still used when adding or removing columns, when dropping the table, and when using SELECT ... FOR UPDATE. Connections only 'see' committed data, and own changes. That means, if connection A updates a row but doesn't commit this change yet, connection B will see the old value. Only when the change is committed, the new value is visible by other connections (read committed). If multiple connections concurrently try to update the same row, the database waits until it can apply the change, but at most until the lock timeout expires. -advanced_1093_p=\ To use the MVCC feature, append ;MVCC\=TRUE to the database URL\: -advanced_1094_p=\ The setting must be specified in the first connection (the one that opens the database). It is not possible to enable or disable this setting while the database is already open. -advanced_1095_p=\ If MVCC is enabled, changing the lock mode (LOCK_MODE) has no effect. -advanced_1096_div=\ The MVCC mode is enabled by default in version 1.4.x, with the default MVStore storage engine. MVCC is disabled by default when using the PageStore storage engine (which is the default in version 1.3.x). The following applies when using the PageStore storage engine\: The MVCC feature is not fully tested yet. The limitations of the MVCC mode are\: with the PageStore storage engine, it can not be used at the same time as MULTI_THREADED\=TRUE; the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency. The setting MAX_MEMORY_UNDO has no effect. Clustering / High Availability -advanced_1097_p=\ This database supports a simple clustering / high availability mechanism. The architecture is\: two database servers run on two different computers, and on both computers is a copy of the same database. If both servers run, each database operation is executed on both computers. If one server fails (power, hardware or network failure), the other server can still continue to work. From this point on, the operations will be executed only on one server until the other server is back up. -advanced_1098_p=\ Clustering can only be used in the server mode (the embedded mode does not support clustering). The cluster can be re-created using the CreateCluster tool without stopping the remaining server. Applications that are still connected are automatically disconnected, however when appending ;AUTO_RECONNECT\=TRUE, they will recover from that. -advanced_1099_p=\ To initialize the cluster, use the following steps\: -advanced_1100_li=Create a database -advanced_1101_li=Use the CreateCluster tool to copy the database to another location and initialize the clustering. Afterwards, you have two databases containing the same data. -advanced_1102_li=Start two servers (one for each copy of the database) -advanced_1103_li=You are now ready to connect to the databases with the client application(s) -advanced_1104_h3=Using the CreateCluster Tool -advanced_1105_p=\ To understand how clustering works, please try out the following example. In this example, the two databases reside on the same computer, but usually, the databases will be on different servers. -advanced_1106_li=Create two directories\: server1, server2. Each directory will simulate a directory on a computer. -advanced_1107_li=Start a TCP server pointing to the first directory. You can do this using the command line\: -advanced_1108_li=Start a second TCP server pointing to the second directory. This will simulate a server running on a second (redundant) computer. You can do this using the command line\: -advanced_1109_li=Use the CreateCluster tool to initialize clustering. This will automatically create a new, empty database if it does not exist. Run the tool on the command line\: -advanced_1110_li=You can now connect to the databases using an application or the H2 Console using the JDBC URL jdbc\:h2\:tcp\://localhost\:9101,localhost\:9102/~/test -advanced_1111_li=If you stop a server (by killing the process), you will notice that the other machine continues to work, and therefore the database is still accessible. -advanced_1112_li=To restore the cluster, you first need to delete the database that failed, then restart the server that was stopped, and re-run the CreateCluster tool. -advanced_1113_h3=Detect Which Cluster Instances are Running -advanced_1114_p=\ To find out which cluster nodes are currently running, execute the following SQL statement\: -advanced_1115_p=\ If the result is '' (two single quotes), then the cluster mode is disabled. Otherwise, the list of servers is returned, enclosed in single quote. Example\: 'server1\:9191,server2\:9191'. -advanced_1116_p=\ It is also possible to get the list of servers by using Connection.getClientInfo(). -advanced_1117_p=\ The property list returned from getClientInfo() contains a numServers property that returns the number of servers that are in the connection list. To get the actual servers, getClientInfo() also has properties server0..serverX, where serverX is the number of servers minus 1. -advanced_1118_p=\ Example\: To get the 2nd server in the connection list one uses getClientInfo('server1'). Note\: The serverX property only returns IP addresses and ports and not hostnames. -advanced_1119_h3=Clustering Algorithm and Limitations -advanced_1120_p=\ Read-only queries are only executed against the first cluster node, but all other statements are executed against all nodes. There is currently no load balancing made to avoid problems with transactions. The following functions may yield different results on different cluster nodes and must be executed with care\: RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND() [when not using a seed]. Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE). However, they can be used in read-only statements and the result can then be used for modifying statements. Using auto-increment and identity columns is currently not supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements). -advanced_1121_p=\ When using the cluster modes, result sets are read fully in memory by the client, so that there is no problem if the server dies that executed the query. Result sets must fit in memory on the client side. -advanced_1122_p=\ The SQL statement SET AUTOCOMMIT FALSE is not supported in the cluster mode. To disable autocommit, the method Connection.setAutoCommit(false) needs to be called. -advanced_1123_p=\ It is possible that a transaction from one connection overtakes a transaction from a different connection. Depending on the operations, this might result in different results, for example when conditionally incrementing a value in a row. -advanced_1124_h2=Two Phase Commit -advanced_1125_p=\ The two phase commit protocol is supported. 2-phase-commit works as follows\: -advanced_1126_li=Autocommit needs to be switched off -advanced_1127_li=A transaction is started, for example by inserting a row -advanced_1128_li=The transaction is marked 'prepared' by executing the SQL statement PREPARE COMMIT transactionName -advanced_1129_li=The transaction can now be committed or rolled back -advanced_1130_li=If a problem occurs before the transaction was successfully committed or rolled back (for example because a network problem occurred), the transaction is in the state 'in-doubt' -advanced_1131_li=When re-connecting to the database, the in-doubt transactions can be listed with SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT -advanced_1132_li=Each transaction in this list must now be committed or rolled back by executing COMMIT TRANSACTION transactionName or ROLLBACK TRANSACTION transactionName -advanced_1133_li=The database needs to be closed and re-opened to apply the changes -advanced_1134_h2=Compatibility -advanced_1135_p=\ This database is (up to a certain point) compatible to other databases such as HSQLDB, MySQL and PostgreSQL. There are certain areas where H2 is incompatible. -advanced_1136_h3=Transaction Commit when Autocommit is On -advanced_1137_p=\ At this time, this database engine commits a transaction (if autocommit is switched on) just before returning the result. For a query, this means the transaction is committed even before the application scans through the result set, and before the result set is closed. Other database engines may commit the transaction in this case when the result set is closed. -advanced_1138_h3=Keywords / Reserved Words -advanced_1139_p=\ There is a list of keywords that can't be used as identifiers (table names, column names and so on), unless they are quoted (surrounded with double quotes). The list is currently\: -advanced_1140_code=\ CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, TRUE, UNION, UNIQUE, WHERE -advanced_1141_p=\ Certain words of this list are keywords because they are functions that can be used without '()' for compatibility, for example CURRENT_TIMESTAMP. -advanced_1142_h2=Standards Compliance -advanced_1143_p=\ This database tries to be as much standard compliant as possible. For the SQL language, ANSI/ISO is the main standard. There are several versions that refer to the release date\: SQL-92, SQL\:1999, and SQL\:2003. Unfortunately, the standard documentation is not freely available. Another problem is that important features are not standardized. Whenever this is the case, this database tries to be compatible to other databases. -advanced_1144_h3=Supported Character Sets, Character Encoding, and Unicode -advanced_1145_p=\ H2 internally uses Unicode, and supports all character encoding systems and character sets supported by the virtual machine you use. -advanced_1146_h2=Run as Windows Service -advanced_1147_p=\ Using a native wrapper / adapter, Java applications can be run as a Windows Service. There are various tools available to do that. The Java Service Wrapper from Tanuki Software, Inc. is included in the installation. Batch files are provided to install, start, stop and uninstall the H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application. The batch files are located in the directory h2/service. -advanced_1148_p=\ The service wrapper bundled with H2 is a 32-bit version. To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper, for example the one from Simon Krenger. -advanced_1149_p=\ When running the database as a service, absolute path should be used. Using ~ in the database URL is problematic in this case, because it means to use the home directory of the current user. The service might run without or with the wrong user, so that the database files might end up in an unexpected place. -advanced_1150_h3=Install the Service -advanced_1151_p=\ The service needs to be registered as a Windows Service first. To do that, double click on 1_install_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. -advanced_1152_h3=Start the Service -advanced_1153_p=\ You can start the H2 Database Engine Service using the service manager of Windows, or by double clicking on 2_start_service.bat. Please note that the batch file does not print an error message if the service is not installed. -advanced_1154_h3=Connect to the H2 Console -advanced_1155_p=\ After installing and starting the service, you can connect to the H2 Console application using a browser. Double clicking on 3_start_browser.bat to do that. The default port (8082) is hard coded in the batch file. -advanced_1156_h3=Stop the Service -advanced_1157_p=\ To stop the service, double click on 4_stop_service.bat. Please note that the batch file does not print an error message if the service is not installed or started. -advanced_1158_h3=Uninstall the Service -advanced_1159_p=\ To uninstall the service, double click on 5_uninstall_service.bat. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear. -advanced_1160_h3=Additional JDBC drivers -advanced_1161_p=\ To use other databases (for example MySQL), the location of the JDBC drivers of those databases need to be added to the environment variables H2DRIVERS or CLASSPATH before installing the service. Multiple drivers can be set; each entry needs to be separated with a ; (Windows) or \: (other operating systems). Spaces in the path names are supported. The settings must not be quoted. -advanced_1162_h2=ODBC Driver -advanced_1163_p=\ This database does not come with its own ODBC driver at this time, but it supports the PostgreSQL network protocol. Therefore, the PostgreSQL ODBC driver can be used. Support for the PostgreSQL network protocol is quite new and should be viewed as experimental. It should not be used for production applications. -advanced_1164_p=\ To use the PostgreSQL ODBC driver on 64 bit versions of Windows, first run c\:/windows/syswow64/odbcad32.exe. At this point you set up your DSN just like you would on any other system. See also\: Re\: ODBC Driver on Windows 64 bit -advanced_1165_h3=ODBC Installation -advanced_1166_p=\ First, the ODBC driver must be installed. Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*) or newer is recommended. The Windows version of the PostgreSQL ODBC driver is available at http\://www.postgresql.org/ftp/odbc/versions/msi. -advanced_1167_h3=Starting the Server -advanced_1168_p=\ After installing the ODBC driver, start the H2 Server using the command line\: -advanced_1169_p=\ The PG Server (PG for PostgreSQL protocol) is started as well. By default, databases are stored in the current working directory where the server is started. Use -baseDir to save databases in another directory, for example the user home directory\: -advanced_1170_p=\ The PG server can be started and stopped from within a Java application as follows\: -advanced_1171_p=\ By default, only connections from localhost are allowed. To allow remote connections, use -pgAllowOthers when starting the server. -advanced_1172_p=\ To map an ODBC database name to a different JDBC database name, use the option -key when starting the server. Please note only one mapping is allowed. The following will map the ODBC database named TEST to the database URL jdbc\:h2\:~/data/test;cipher\=aes\: -advanced_1173_h3=ODBC Configuration -advanced_1174_p=\ After installing the driver, a new Data Source must be added. In Windows, run odbcad32.exe to open the Data Source Administrator. Then click on 'Add...' and select the PostgreSQL Unicode driver. Then click 'Finish'. You will be able to change the connection properties. The property column represents the property key in the odbc.ini file (which may be different from the GUI). -advanced_1175_th=Property -advanced_1176_th=Example -advanced_1177_th=Remarks -advanced_1178_td=Data Source -advanced_1179_td=H2 Test -advanced_1180_td=The name of the ODBC Data Source -advanced_1181_td=Database -advanced_1182_td=~/test;ifexists\=true -advanced_1183_td=\ The database name. This can include connections settings. By default, the database is stored in the current working directory where the Server is started except when the -baseDir setting is used. The name must be at least 3 characters. -advanced_1184_td=Servername -advanced_1185_td=localhost -advanced_1186_td=The server name or IP address. -advanced_1187_td=By default, only remote connections are allowed -advanced_1188_td=Username -advanced_1189_td=sa -advanced_1190_td=The database user name. -advanced_1191_td=SSL -advanced_1192_td=false (disabled) -advanced_1193_td=At this time, SSL is not supported. -advanced_1194_td=Port -advanced_1195_td=5435 -advanced_1196_td=The port where the PG Server is listening. -advanced_1197_td=Password -advanced_1198_td=sa -advanced_1199_td=The database password. -advanced_1200_p=\ To improve performance, please enable 'server side prepare' under Options / Datasource / Page 2 / Server side prepare. -advanced_1201_p=\ Afterwards, you may use this data source. -advanced_1202_h3=PG Protocol Support Limitations -advanced_1203_p=\ At this time, only a subset of the PostgreSQL network protocol is implemented. Also, there may be compatibility problems on the SQL level, with the catalog, or with text encoding. Problems are fixed as they are found. Currently, statements can not be canceled when using the PG protocol. Also, H2 does not provide index meta over ODBC. -advanced_1204_p=\ PostgreSQL ODBC Driver Setup requires a database password; that means it is not possible to connect to H2 databases without password. This is a limitation of the ODBC driver. -advanced_1205_h3=Security Considerations -advanced_1206_p=\ Currently, the PG Server does not support challenge response or encrypt passwords. This may be a problem if an attacker can listen to the data transferred between the ODBC driver and the server, because the password is readable to the attacker. Also, it is currently not possible to use encrypted SSL connections. Therefore the ODBC driver should not be used where security is important. -advanced_1207_p=\ The first connection that opens a database using the PostgreSQL server needs to be an administrator user. Subsequent connections don't need to be opened by an administrator. -advanced_1208_h3=Using Microsoft Access -advanced_1209_p=\ When using Microsoft Access to edit data in a linked H2 table, you may need to enable the following option\: Tools - Options - Edit/Find - ODBC fields. -advanced_1210_h2=Using H2 in Microsoft .NET -advanced_1211_p=\ The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface. -advanced_1212_h3=Using the ADO.NET API on .NET -advanced_1213_p=\ An implementation of the ADO.NET interface is available in the open source project H2Sharp. -advanced_1214_h3=Using the JDBC API on .NET -advanced_1215_li=Install the .NET Framework from Microsoft. Mono has not yet been tested. -advanced_1216_li=Install IKVM.NET. -advanced_1217_li=Copy the h2*.jar file to ikvm/bin -advanced_1218_li=Run the H2 Console using\: ikvm -jar h2*.jar -advanced_1219_li=Convert the H2 Console to an .exe file using\: ikvmc -target\:winexe h2*.jar. You may ignore the warnings. -advanced_1220_li=Create a .dll file using (change the version accordingly)\: ikvmc.exe -target\:library -version\:1.0.69.0 h2*.jar -advanced_1221_p=\ If you want your C\# application use H2, you need to add the h2.dll and the IKVM.OpenJDK.ClassLibrary.dll to your C\# solution. Here some sample code\: -advanced_1222_h2=ACID -advanced_1223_p=\ In the database world, ACID stands for\: -advanced_1224_li=Atomicity\: transactions must be atomic, meaning either all tasks are performed or none. -advanced_1225_li=Consistency\: all operations must comply with the defined constraints. -advanced_1226_li=Isolation\: transactions must be isolated from each other. -advanced_1227_li=Durability\: committed transaction will not be lost. -advanced_1228_h3=Atomicity -advanced_1229_p=\ Transactions in this database are always atomic. -advanced_1230_h3=Consistency -advanced_1231_p=\ By default, this database is always in a consistent state. Referential integrity rules are enforced except when explicitly disabled. -advanced_1232_h3=Isolation -advanced_1233_p=\ For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'. -advanced_1234_h3=Durability -advanced_1235_p=\ This database does not guarantee that all committed transactions survive a power failure. Tests show that all databases sometimes lose transactions on power failure (for details, see below). Where losing transactions is not acceptable, a laptop or UPS (uninterruptible power supply) should be used. If durability is required for all possible cases of hardware failure, clustering should be used, such as the H2 clustering mode. -advanced_1236_h2=Durability Problems -advanced_1237_p=\ Complete durability means all committed transaction survive a power failure. Some databases claim they can guarantee durability, but such claims are wrong. A durability test was run against H2, HSQLDB, PostgreSQL, and Derby. All of those databases sometimes lose committed transactions. The test is included in the H2 download, see org.h2.test.poweroff.Test. -advanced_1238_h3=Ways to (Not) Achieve Durability -advanced_1239_p=\ Making sure that committed transactions are not lost is more complicated than it seems first. To guarantee complete durability, a database must ensure that the log record is on the hard drive before the commit call returns. To do that, databases use different methods. One is to use the 'synchronous write' file access mode. In Java, RandomAccessFile supports the modes rws and rwd\: -advanced_1240_code=rwd -advanced_1241_li=\: every update to the file's content is written synchronously to the underlying storage device. -advanced_1242_code=rws -advanced_1243_li=\: in addition to rwd, every update to the metadata is written synchronously. -advanced_1244_p=\ A test (org.h2.test.poweroff.TestWrite) with one of those modes achieves around 50 thousand write operations per second. Even when the operating system write buffer is disabled, the write rate is around 50 thousand operations per second. This feature does not force changes to disk because it does not flush all buffers. The test updates the same byte in the file again and again. If the hard drive was able to write at this rate, then the disk would need to make at least 50 thousand revolutions per second, or 3 million RPM (revolutions per minute). There are no such hard drives. The hard drive used for the test is about 7200 RPM, or about 120 revolutions per second. There is an overhead, so the maximum write rate must be lower than that. -advanced_1245_p=\ Calling fsync flushes the buffers. There are two ways to do that in Java\: -advanced_1246_code=FileDescriptor.sync() -advanced_1247_li=. The documentation says that this forces all system buffers to synchronize with the underlying device. This method is supposed to return after all in-memory modified copies of buffers associated with this file descriptor have been written to the physical medium. -advanced_1248_code=FileChannel.force() -advanced_1249_li=. This method is supposed to force any updates to this channel's file to be written to the storage device that contains it. -advanced_1250_p=\ By default, MySQL calls fsync for each commit. When using one of those methods, only around 60 write operations per second can be achieved, which is consistent with the RPM rate of the hard drive used. Unfortunately, even when calling FileDescriptor.sync() or FileChannel.force(), data is not always persisted to the hard drive, because most hard drives do not obey fsync()\: see Your Hard Drive Lies to You. In Mac OS X, fsync does not flush hard drive buffers. See Bad fsync?. So the situation is confusing, and tests prove there is a problem. -advanced_1251_p=\ Trying to flush hard drive buffers is hard, and if you do the performance is very bad. First you need to make sure that the hard drive actually flushes all buffers. Tests show that this can not be done in a reliable way. Then the maximum number of transactions is around 60 per second. Because of those reasons, the default behavior of H2 is to delay writing committed transactions. -advanced_1252_p=\ In H2, after a power failure, a bit more than one second of committed transactions may be lost. To change the behavior, use SET WRITE_DELAY and CHECKPOINT SYNC. Most other databases support commit delay as well. In the performance comparison, commit delay was used for all databases that support it. -advanced_1253_h3=Running the Durability Test -advanced_1254_p=\ To test the durability / non-durability of this and other databases, you can use the test application in the package org.h2.test.poweroff. Two computers with network connection are required to run this test. One computer just listens, while the test application is run (and power is cut) on the other computer. The computer with the listener application opens a TCP/IP port and listens for an incoming connection. The second computer first connects to the listener, and then created the databases and starts inserting records. The connection is set to 'autocommit', which means after each inserted record a commit is performed automatically. Afterwards, the test computer notifies the listener that this record was inserted successfully. The listener computer displays the last inserted record number every 10 seconds. Now, switch off the power manually, then restart the computer, and run the application again. You will find out that in most cases, none of the databases contains all the records that the listener computer knows about. For details, please consult the source code of the listener and test application. -advanced_1255_h2=Using the Recover Tool -advanced_1256_p=\ The Recover tool can be used to extract the contents of a database file, even if the database is corrupted. It also extracts the content of the transaction log and large objects (CLOB or BLOB). To run the tool, type on the command line\: -advanced_1257_p=\ For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript tool or a RUNSCRIPT FROM SQL statement. The script includes at least one CREATE USER statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script. -advanced_1258_p=\ The Recover tool creates a SQL script from database file. It also processes the transaction log. -advanced_1259_p=\ To verify the database can recover at any time, append ;RECOVER_TEST\=64 to the database URL in your test environment. This will simulate an application crash after each 64 writes to the database file. A log file named databaseName.h2.db.log is created that lists the operations. The recovery is tested using an in-memory file system, that means it may require a larger heap setting. -advanced_1260_h2=File Locking Protocols -advanced_1261_p=\ Multiple concurrent connections to the same database are supported, however a database file can only be open for reading and writing (in embedded mode) by one process at the same time. Otherwise, the processes would overwrite each others data and corrupt the database file. To protect against this problem, whenever a database is opened, a lock file is created to signal other processes that the database is in use. If the database is closed, or if the process that opened the database stops normally, this lock file is deleted. -advanced_1262_p=\ In special cases (if the process did not terminate normally, for example because there was a power failure), the lock file is not deleted by the process that created it. That means the existence of the lock file is not a safe protocol for file locking. However, this software uses a challenge-response protocol to protect the database files. There are two methods (algorithms) implemented to provide both security (that is, the same database files cannot be opened by two processes at the same time) and simplicity (that is, the lock file does not need to be deleted manually by the user). The two methods are 'file method' and 'socket methods'. -advanced_1263_p=\ The file locking protocols (except the file locking method 'FS') have the following limitation\: if a shared file system is used, and the machine with the lock owner is sent to sleep (standby or hibernate), another machine may take over. If the machine that originally held the lock wakes up, the database may become corrupt. If this situation can occur, the application must ensure the database is closed when the application is put to sleep. -advanced_1264_h3=File Locking Method 'File' -advanced_1265_p=\ The default method for database file locking for version 1.3 and older is the 'File Method'. The algorithm is\: -advanced_1266_li=If the lock file does not exist, it is created (using the atomic operation File.createNewFile). Then, the process waits a little bit (20 ms) and checks the file again. If the file was changed during this time, the operation is aborted. This protects against a race condition when one process deletes the lock file just after another one create it, and a third process creates the file again. It does not occur if there are only two writers. -advanced_1267_li=\ If the file can be created, a random number is inserted together with the locking method ('file'). Afterwards, a watchdog thread is started that checks regularly (every second once by default) if the file was deleted or modified by another (challenger) thread / process. Whenever that occurs, the file is overwritten with the old data. The watchdog thread runs with high priority so that a change to the lock file does not get through undetected even if the system is very busy. However, the watchdog thread does use very little resources (CPU time), because it waits most of the time. Also, the watchdog only reads from the hard disk and does not write to it. -advanced_1268_li=\ If the lock file exists and was recently modified, the process waits for some time (up to two seconds). If it was still changed, an exception is thrown (database is locked). This is done to eliminate race conditions with many concurrent writers. Afterwards, the file is overwritten with a new version (challenge). After that, the thread waits for 2 seconds. If there is a watchdog thread protecting the file, he will overwrite the change and this process will fail to lock the database. However, if there is no watchdog thread, the lock file will still be as written by this thread. In this case, the file is deleted and atomically created again. The watchdog thread is started in this case and the file is locked. -advanced_1269_p=\ This algorithm is tested with over 100 concurrent threads. In some cases, when there are many concurrent threads trying to lock the database, they block each other (meaning the file cannot be locked by any of them) for some time. However, the file never gets locked by two threads at the same time. However using that many concurrent threads / processes is not the common use case. Generally, an application should throw an error to the user if it cannot open a database, and not try again in a (fast) loop. -advanced_1270_h3=File Locking Method 'Socket' -advanced_1271_p=\ There is a second locking mechanism implemented, but disabled by default. To use it, append ;FILE_LOCK\=SOCKET to the database URL. The algorithm is\: -advanced_1272_li=If the lock file does not exist, it is created. Then a server socket is opened on a defined port, and kept open. The port and IP address of the process that opened the database is written into the lock file. -advanced_1273_li=If the lock file exists, and the lock method is 'file', then the software switches to the 'file' method. -advanced_1274_li=If the lock file exists, and the lock method is 'socket', then the process checks if the port is in use. If the original process is still running, the port is in use and this process throws an exception (database is in use). If the original process died (for example due to a power failure, or abnormal termination of the virtual machine), then the port was released. The new process deletes the lock file and starts again. -advanced_1275_p=\ This method does not require a watchdog thread actively polling (reading) the same file every second. The problem with this method is, if the file is stored on a network share, two processes (running on different computers) could still open the same database files, if they do not have a direct TCP/IP connection. -advanced_1276_h3=File Locking Method 'FS' -advanced_1277_p=\ This is the default mode for version 1.4 and newer. This database file locking mechanism uses native file system lock on the database file. No *.lock.db file is created in this case, and no background thread is started. This mechanism may not work on all systems as expected. Some systems allow to lock the same file multiple times within the same virtual machine, and on some system native file locking is not supported or files are not unlocked after a power failure. -advanced_1278_p=\ To enable this feature, append ;FILE_LOCK\=FS to the database URL. -advanced_1279_p=\ This feature is relatively new. When using it for production, please ensure your system does in fact lock files as expected. -advanced_1280_h2=Using Passwords -advanced_1281_h3=Using Secure Passwords -advanced_1282_p=\ Remember that weak passwords can be broken regardless of the encryption and security protocols. Don't use passwords that can be found in a dictionary. Appending numbers does not make passwords secure. A way to create good passwords that can be remembered is\: take the first letters of a sentence, use upper and lower case characters, and creatively include special characters (but it's more important to use a long password than to use special characters). Example\: -advanced_1283_code=i'sE2rtPiUKtT -advanced_1284_p=\ from the sentence it's easy to remember this password if you know the trick. -advanced_1285_h3=Passwords\: Using Char Arrays instead of Strings -advanced_1286_p=\ Java strings are immutable objects and cannot be safely 'destroyed' by the application. After creating a string, it will remain in the main memory of the computer at least until it is garbage collected. The garbage collection cannot be controlled by the application, and even if it is garbage collected the data may still remain in memory. It might also be possible that the part of memory containing the password is swapped to disk (if not enough main memory is available), which is a problem if the attacker has access to the swap file of the operating system. -advanced_1287_p=\ It is a good idea to use char arrays instead of strings for passwords. Char arrays can be cleared (filled with zeros) after use, and therefore the password will not be stored in the swap file. -advanced_1288_p=\ This database supports using char arrays instead of string to pass user and file passwords. The following code can be used to do that\: -advanced_1289_p=\ This example requires Java 1.6. When using Swing, use javax.swing.JPasswordField. -advanced_1290_h3=Passing the User Name and/or Password in the URL -advanced_1291_p=\ Instead of passing the user name as a separate parameter as in Connection conn \= DriverManager. getConnection("jdbc\:h2\:~/test", "sa", "123"); the user name (and/or password) can be supplied in the URL itself\: Connection conn \= DriverManager. getConnection("jdbc\:h2\:~/test;USER\=sa;PASSWORD\=123"); The settings in the URL override the settings passed as a separate parameter. -advanced_1292_h2=Password Hash -advanced_1293_p=\ Sometimes the database password needs to be stored in a configuration file (for example in the web.xml file). In addition to connecting with the plain text password, this database supports connecting with the password hash. This means that only the hash of the password (and not the plain text password) needs to be stored in the configuration file. This will only protect others from reading or re-constructing the plain text password (even if they have access to the configuration file); it does not protect others from accessing the database using the password hash. -advanced_1294_p=\ To connect using the password hash instead of plain text password, append ;PASSWORD_HASH\=TRUE to the database URL, and replace the password with the password hash. To calculate the password hash from a plain text password, run the following command within the H2 Console tool\: @password_hash <upperCaseUserName> <password>. As an example, if the user name is sa and the password is test, run the command @password_hash SA test. Then use the resulting password hash as you would use the plain text password. When using an encrypted database, then the user password and file password need to be hashed separately. To calculate the hash of the file password, run\: @password_hash file <filePassword>. -advanced_1295_h2=Protection against SQL Injection -advanced_1296_h3=What is SQL Injection -advanced_1297_p=\ This database engine provides a solution for the security vulnerability known as 'SQL Injection'. Here is a short description of what SQL injection means. Some applications build SQL statements with embedded user input such as\: -advanced_1298_p=\ If this mechanism is used anywhere in the application, and user input is not correctly filtered or encoded, it is possible for a user to inject SQL functionality or statements by using specially built input such as (in this example) this password\: ' OR ''\='. In this case the statement becomes\: -advanced_1299_p=\ Which is always true no matter what the password stored in the database is. For more information about SQL Injection, see Glossary and Links. -advanced_1300_h3=Disabling Literals -advanced_1301_p=\ SQL Injection is not possible if user input is not directly embedded in SQL statements. A simple solution for the problem above is to use a prepared statement\: -advanced_1302_p=\ This database provides a way to enforce usage of parameters when passing user input to the database. This is done by disabling embedded literals in SQL statements. To do this, execute the statement\: -advanced_1303_p=\ Afterwards, SQL statements with text and number literals are not allowed any more. That means, SQL statement of the form WHERE NAME\='abc' or WHERE CustomerId\=10 will fail. It is still possible to use prepared statements and parameters as described above. Also, it is still possible to generate SQL statements dynamically, and use the Statement API, as long as the SQL statements do not include literals. There is also a second mode where number literals are allowed\: SET ALLOW_LITERALS NUMBERS. To allow all literals, execute SET ALLOW_LITERALS ALL (this is the default setting). Literals can only be enabled or disabled by an administrator. -advanced_1304_h3=Using Constants -advanced_1305_p=\ Disabling literals also means disabling hard-coded 'constant' literals. This database supports defining constants using the CREATE CONSTANT command. Constants can be defined only when literals are enabled, but used even when literals are disabled. To avoid name clashes with column names, constants can be defined in other schemas\: -advanced_1306_p=\ Even when literals are enabled, it is better to use constants instead of hard-coded number or text literals in queries or views. With constants, typos are found at compile time, the source code is easier to understand and change. -advanced_1307_h3=Using the ZERO() Function -advanced_1308_p=\ It is not required to create a constant for the number 0 as there is already a built-in function ZERO()\: -advanced_1309_h2=Protection against Remote Access -advanced_1310_p=\ By default this database does not allow connections from other machines when starting the H2 Console, the TCP server, or the PG server. Remote access can be enabled using the command line options -webAllowOthers, -tcpAllowOthers, -pgAllowOthers. -advanced_1311_p=\ If you enable remote access using -tcpAllowOthers or -pgAllowOthers, please also consider using the options -baseDir, -ifExists, so that remote users can not create new databases or access existing databases with weak passwords. When using the option -baseDir, only databases within that directory may be accessed. Ensure the existing accessible databases are protected using strong passwords. -advanced_1312_p=\ If you enable remote access using -webAllowOthers, please ensure the web server can only be accessed from trusted networks. The options -baseDir, -ifExists don't protect access to the tools section, prevent remote shutdown of the web server, changes to the preferences, the saved connection settings, or access to other databases accessible from the system. -advanced_1313_h2=Restricting Class Loading and Usage -advanced_1314_p=\ By default there is no restriction on loading classes and executing Java code for admins. That means an admin may call system functions such as System.setProperty by executing\: -advanced_1315_p=\ To restrict users (including admins) from loading classes and executing code, the list of allowed classes can be set in the system property h2.allowedClasses in the form of a comma separated list of classes or patterns (items ending with *). By default all classes are allowed. Example\: -advanced_1316_p=\ This mechanism is used for all user classes, including database event listeners, trigger classes, user-defined functions, user-defined aggregate functions, and JDBC driver classes (with the exception of the H2 driver) when using the H2 Console. -advanced_1317_h2=Security Protocols -advanced_1318_p=\ The following paragraphs document the security protocols used in this database. These descriptions are very technical and only intended for security experts that already know the underlying security primitives. -advanced_1319_h3=User Password Encryption -advanced_1320_p=\ When a user tries to connect to a database, the combination of user name, @, and password are hashed using SHA-256, and this hash value is transmitted to the database. This step does not protect against an attacker that re-uses the value if he is able to listen to the (unencrypted) transmission between the client and the server. But, the passwords are never transmitted as plain text, even when using an unencrypted connection between client and server. That means if a user reuses the same password for different things, this password is still protected up to some point. See also 'RFC 2617 - HTTP Authentication\: Basic and Digest Access Authentication' for more information. -advanced_1321_p=\ When a new database or user is created, a new random salt value is generated. The size of the salt is 64 bits. Using the random salt reduces the risk of an attacker pre-calculating hash values for many different (commonly used) passwords. -advanced_1322_p=\ The combination of user-password hash value (see above) and salt is hashed using SHA-256. The resulting value is stored in the database. When a user tries to connect to the database, the database combines user-password hash value with the stored salt value and calculates the hash value. Other products use multiple iterations (hash the hash value again and again), but this is not done in this product to reduce the risk of denial of service attacks (where the attacker tries to connect with bogus passwords, and the server spends a lot of time calculating the hash value for each password). The reasoning is\: if the attacker has access to the hashed passwords, he also has access to the data in plain text, and therefore does not need the password any more. If the data is protected by storing it on another computer and only accessible remotely, then the iteration count is not required at all. -advanced_1323_h3=File Encryption -advanced_1324_p=\ The database files can be encrypted using the AES-128 algorithm. -advanced_1325_p=\ When a user tries to connect to an encrypted database, the combination of file@ and the file password is hashed using SHA-256. This hash value is transmitted to the server. -advanced_1326_p=\ When a new database file is created, a new cryptographically secure random salt value is generated. The size of the salt is 64 bits. The combination of the file password hash and the salt value is hashed 1024 times using SHA-256. The reason for the iteration is to make it harder for an attacker to calculate hash values for common passwords. -advanced_1327_p=\ The resulting hash value is used as the key for the block cipher algorithm. Then, an initialization vector (IV) key is calculated by hashing the key again using SHA-256. This is to make sure the IV is unknown to the attacker. The reason for using a secret IV is to protect against watermark attacks. -advanced_1328_p=\ Before saving a block of data (each block is 8 bytes long), the following operations are executed\: first, the IV is calculated by encrypting the block number with the IV key (using the same block cipher algorithm). This IV is combined with the plain text using XOR. The resulting data is encrypted using the AES-128 algorithm. -advanced_1329_p=\ When decrypting, the operation is done in reverse. First, the block is decrypted using the key, and then the IV is calculated combined with the decrypted text using XOR. -advanced_1330_p=\ Therefore, the block cipher mode of operation is CBC (cipher-block chaining), but each chain is only one block long. The advantage over the ECB (electronic codebook) mode is that patterns in the data are not revealed, and the advantage over multi block CBC is that flipped cipher text bits are not propagated to flipped plaintext bits in the next block. -advanced_1331_p=\ Database encryption is meant for securing the database while it is not in use (stolen laptop and so on). It is not meant for cases where the attacker has access to files while the database is in use. When he has write access, he can for example replace pieces of files with pieces of older versions and manipulate data like this. -advanced_1332_p=\ File encryption slows down the performance of the database engine. Compared to unencrypted mode, database operations take about 2.5 times longer using AES (embedded mode). -advanced_1333_h3=Wrong Password / User Name Delay -advanced_1334_p=\ To protect against remote brute force password attacks, the delay after each unsuccessful login gets double as long. Use the system properties h2.delayWrongPasswordMin and h2.delayWrongPasswordMax to change the minimum (the default is 250 milliseconds) or maximum delay (the default is 4000 milliseconds, or 4 seconds). The delay only applies for those using the wrong password. Normally there is no delay for a user that knows the correct password, with one exception\: after using the wrong password, there is a delay of up to (randomly distributed) the same delay as for a wrong password. This is to protect against parallel brute force attacks, so that an attacker needs to wait for the whole delay. Delays are synchronized. This is also required to protect against parallel attacks. -advanced_1335_p=\ There is only one exception message for both wrong user and for wrong password, to make it harder to get the list of user names. It is not possible from the stack trace to see if the user name was wrong or the password. -advanced_1336_h3=HTTPS Connections -advanced_1337_p=\ The web server supports HTTP and HTTPS connections using SSLServerSocket. There is a default self-certified certificate to support an easy starting point, but custom certificates are supported as well. -advanced_1338_h2=TLS Connections -advanced_1339_p=\ Remote TLS connections are supported using the Java Secure Socket Extension (SSLServerSocket, SSLSocket). By default, anonymous TLS is enabled. -advanced_1340_p=\ To use your own keystore, set the system properties javax.net.ssl.keyStore and javax.net.ssl.keyStorePassword before starting the H2 server and client. See also Customizing the Default Key and Trust Stores, Store Types, and Store Passwords for more information. -advanced_1341_p=\ To disable anonymous TLS, set the system property h2.enableAnonymousTLS to false. -advanced_1342_h2=Universally Unique Identifiers (UUID) -advanced_1343_p=\ This database supports UUIDs. Also supported is a function to create new UUIDs using a cryptographically strong pseudo random number generator. With random UUIDs, the chance of two having the same value can be calculated using the probability theory. See also 'Birthday Paradox'. Standardized randomly generated UUIDs have 122 random bits. 4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz). This database supports generating such UUIDs using the built-in function RANDOM_UUID(). Here is a small program to estimate the probability of having two identical UUIDs after generating a number of values\: -advanced_1344_p=\ Some values are\: -advanced_1345_th=Number of UUIs -advanced_1346_th=Probability of Duplicates -advanced_1347_td=2^36\=68'719'476'736 -advanced_1348_td=0.000'000'000'000'000'4 -advanced_1349_td=2^41\=2'199'023'255'552 -advanced_1350_td=0.000'000'000'000'4 -advanced_1351_td=2^46\=70'368'744'177'664 -advanced_1352_td=0.000'000'000'4 -advanced_1353_p=\ To help non-mathematicians understand what those numbers mean, here a comparison\: one's annual risk of being hit by a meteorite is estimated to be one chance in 17 billion, that means the probability is about 0.000'000'000'06. -advanced_1354_h2=Spatial Features -advanced_1355_p=\ H2 supports the geometry data type and spatial indexes if the JTS Topology Suite is in the classpath. To run the H2 Console tool with the JTS tool, you need to download the JTS 1.13 jar file and place it in the h2 bin directory. Then edit the h2.sh file as follows\: -advanced_1356_p=\ Here is an example SQL script to create a table with a spatial column and index\: -advanced_1357_p=\ To query the table using geometry envelope intersection, use the operation &&, as in PostGIS\: -advanced_1358_p=\ You can verify that the spatial index is used using the "explain plan" feature\: -advanced_1359_p=\ For persistent databases, the spatial index is stored on disk; for in-memory databases, the index is kept in memory. -advanced_1360_h2=Recursive Queries -advanced_1361_p=\ H2 has experimental support for recursive queries using so called "common table expressions" (CTE). Examples\: -advanced_1362_p=\ Limitations\: Recursive queries need to be of the type UNION ALL, and the recursion needs to be on the second part of the query. No tables or views with the name of the table expression may exist. Different table expression names need to be used when using multiple distinct table expressions within the same transaction and for the same session. All columns of the table expression are of type VARCHAR, and may need to be cast to the required data type. Views with recursive queries are not supported. Subqueries and INSERT INTO ... FROM with recursive queries are not supported. Parameters are only supported within the last SELECT statement (a workaround is to use session variables like @start within the table expression). The syntax is\: -advanced_1363_h2=Settings Read from System Properties -advanced_1364_p=\ Some settings of the database can be set on the command line using -DpropertyName\=value. It is usually not required to change those settings manually. The settings are case sensitive. Example\: -advanced_1365_p=\ The current value of the settings can be read in the table INFORMATION_SCHEMA.SETTINGS. -advanced_1366_p=\ For a complete list of settings, see SysProperties. -advanced_1367_h2=Setting the Server Bind Address -advanced_1368_p=\ Usually server sockets accept connections on any/all local addresses. This may be a problem on multi-homed hosts. To bind only to one address, use the system property h2.bindAddress. This setting is used for both regular server sockets and for TLS server sockets. IPv4 and IPv6 address formats are supported. -advanced_1369_h2=Pluggable File System -advanced_1370_p=\ This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. The following file systems are included\: -advanced_1371_code=zip\: -advanced_1372_li=\ read-only zip-file based file system. Format\: zip\:/zipFileName\!/fileName. -advanced_1373_code=split\: -advanced_1374_li=\ file system that splits files in 1 GB files (stackable with other file systems). -advanced_1375_code=nio\: -advanced_1376_li=\ file system that uses FileChannel instead of RandomAccessFile (faster in some operating systems). -advanced_1377_code=nioMapped\: -advanced_1378_li=\ file system that uses memory mapped files (faster in some operating systems). Please note that there currently is a file size limitation of 2 GB when using this file system when using a 32-bit JVM. To work around this limitation, combine it with the split file system\: split\:nioMapped\:test. -advanced_1379_code=memFS\: -advanced_1380_li=\ in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself). -advanced_1381_code=memLZF\: -advanced_1382_li=\ compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself). -advanced_1383_p=\ As an example, to use the the nio file system, use the following database URL\: jdbc\:h2\:nio\:~/test. -advanced_1384_p=\ To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase, and call the method FilePath.register before using it. -advanced_1385_p=\ For input streams (but not for random access files), URLs may be used in addition to the registered file systems. Example\: jar\:file\:///c\:/temp/example.zip\!/org/example/nested.csv. To read a stream from the classpath, use the prefix classpath\:, as in classpath\:/org/h2/samples/newsfeed.sql. -advanced_1386_h2=Split File System -advanced_1387_p=\ The file system prefix split\: is used to split logical files into multiple physical files, for example so that a database can get larger than the maximum file system size of the operating system. If the logical file is larger than the maximum file size, then the file is split as follows\: -advanced_1388_code=<fileName> -advanced_1389_li=\ (first block, is always created) -advanced_1390_code=<fileName>.1.part -advanced_1391_li=\ (second block) -advanced_1392_p=\ More physical files (*.2.part, *.3.part) are automatically created / deleted if needed. The maximum physical file size of a block is 2^30 bytes, which is also called 1 GiB or 1 GB. However this can be changed if required, by specifying the block size in the file name. The file name format is\: split\:<x>\:<fileName> where the file size per block is 2^x. For 1 MiB block sizes, use x \= 20 (because 2^20 is 1 MiB). The following file name means the logical file is split into 1 MiB blocks\: split\:20\:test.h2.db. An example database URL for this case is jdbc\:h2\:split\:20\:~/test. -advanced_1393_h2=Database Upgrade -advanced_1394_p=\ In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. To automatically convert databases to the new file store, it is necessary to include an additional jar file. The file can be found at http\://h2database.com/h2mig_pagestore_addon.jar . If this file is in the classpath, every connect to an older database will result in a conversion process. -advanced_1395_p=\ The conversion itself is done internally via 'script to' and 'runscript from'. After the conversion process, the files will be renamed from -advanced_1396_code=dbName.data.db -advanced_1397_li=\ to dbName.data.db.backup -advanced_1398_code=dbName.index.db -advanced_1399_li=\ to dbName.index.db.backup -advanced_1400_p=\ by default. Also, the temporary script will be written to the database directory instead of a temporary directory. Both defaults can be customized via -advanced_1401_code=org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean) -advanced_1402_code=org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean) -advanced_1403_p=\ prior opening a database connection. -advanced_1404_p=\ Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. The automatic upgrade .jar file must be present, and the URL must start with jdbc\:h2v1_1\: (the JDBC driver class is org.h2.upgrade.v1_1.Driver). If the database should automatically connect using the old version if a database with the old format exists (without upgrade), and use the new version otherwise, then append ;NO_UPGRADE\=TRUE to the database URL. Please note the old driver did not process the system property "h2.baseDir" correctly, so that using this setting is not supported when upgrading. -advanced_1405_h2=Java Objects Serialization -advanced_1406_p=\ Java objects serialization is enabled by default for columns of type OTHER, using standard Java serialization/deserialization semantics. -advanced_1407_p=\ To disable this feature set the system property h2.serializeJavaObject\=false (default\: true). -advanced_1408_p=\ Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation\: -advanced_1409_li=\ At system level set the system property h2.javaObjectSerializer with the Fully Qualified Name of the JavaObjectSerializer interface implementation. It will be used over the entire JVM session to (de)serialize java objects being stored in column of type OTHER. Example h2.javaObjectSerializer\=com.acme.SerializerClassName. -advanced_1410_li=\ At database level execute the SQL statement SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName' or append ;JAVA_OBJECT_SERIALIZER\='com.acme.SerializerClassName' to the database URL\: jdbc\:h2\:~/test;JAVA_OBJECT_SERIALIZER\='com.acme.SerializerClassName'. -advanced_1411_p=\ Please note that this SQL statement can only be executed before any tables are defined. -advanced_1412_h2=Limits and Limitations -advanced_1413_p=\ This database has the following known limitations\: -advanced_1414_li=Database file size limit\: 4 TB (using the default page size of 2 KB) or higher (when using a larger page size). This limit is including CLOB and BLOB data. -advanced_1415_li=The maximum file size for FAT or FAT32 file systems is 4 GB. That means when using FAT or FAT32, the limit is 4 GB for the data. This is the limitation of the file system. The database does provide a workaround for this problem, it is to use the file name prefix split\:. In that case files are split into files of 1 GB by default. An example database URL is\: jdbc\:h2\:split\:~/test. -advanced_1416_li=The maximum number of rows per table is 2^64. -advanced_1417_li=The maximum number of open transactions is 65535. -advanced_1418_li=Main memory requirements\: The larger the database, the more main memory is required. With the current storage mechanism (the page store), the minimum main memory required is around 1 MB for each 8 GB database file size. -advanced_1419_li=Limit on the complexity of SQL statements. Statements of the following form will result in a stack overflow exception\: -advanced_1420_li=There is no limit for the following entities, except the memory and storage capacity\: maximum identifier length (table name, column name, and so on); maximum number of tables, columns, indexes, triggers, and other database objects; maximum statement length, number of parameters per statement, tables per statement, expressions in order by, group by, having, and so on; maximum rows per query; maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; maximum row length, index row length, select row length; maximum length of a varchar column, decimal column, literal in a statement. -advanced_1421_li=Querying from the metadata tables is slow if there are many tables (thousands). -advanced_1422_li=For limitations on data types, see the documentation of the respective Java data type or the data type documentation of this database. -advanced_1423_h2=Glossary and Links -advanced_1424_th=Term -advanced_1425_th=Description -advanced_1426_td=AES-128 -advanced_1427_td=A block encryption algorithm. See also\: Wikipedia\: AES -advanced_1428_td=Birthday Paradox -advanced_1429_td=Describes the higher than expected probability that two persons in a room have the same birthday. Also valid for randomly generated UUIDs. See also\: Wikipedia\: Birthday Paradox -advanced_1430_td=Digest -advanced_1431_td=Protocol to protect a password (but not to protect data). See also\: RFC 2617\: HTTP Digest Access Authentication -advanced_1432_td=GCJ -advanced_1433_td=Compiler for Java. GNU Compiler for the Java and NativeJ (commercial) -advanced_1434_td=HTTPS -advanced_1435_td=A protocol to provide security to HTTP connections. See also\: RFC 2818\: HTTP Over TLS -advanced_1436_td=Modes of Operation -advanced_1437_a=Wikipedia\: Block cipher modes of operation -advanced_1438_td=Salt -advanced_1439_td=Random number to increase the security of passwords. See also\: Wikipedia\: Key derivation function -advanced_1440_td=SHA-256 -advanced_1441_td=A cryptographic one-way hash function. See also\: Wikipedia\: SHA hash functions -advanced_1442_td=SQL Injection -advanced_1443_td=A security vulnerability where an application embeds SQL statements or expressions in user input. See also\: Wikipedia\: SQL Injection -advanced_1444_td=Watermark Attack -advanced_1445_td=Security problem of certain encryption programs where the existence of certain data can be proven without decrypting. For more information, search in the internet for 'watermark attack cryptoloop' -advanced_1446_td=SSL/TLS -advanced_1447_td=Secure Sockets Layer / Transport Layer Security. See also\: Java Secure Socket Extension (JSSE) -architecture_1000_h1=Architecture -architecture_1001_a=\ Introduction -architecture_1002_a=\ Top-down overview -architecture_1003_a=\ JDBC driver -architecture_1004_a=\ Connection/session management -architecture_1005_a=\ Command execution and planning -architecture_1006_a=\ Table/index/constraints -architecture_1007_a=\ Undo log, redo log, and transactions layer -architecture_1008_a=\ B-tree engine and page-based storage allocation -architecture_1009_a=\ Filesystem abstraction -architecture_1010_h2=Introduction -architecture_1011_p=\ H2 implements an embedded and standalone ANSI-SQL89 compliant SQL engine on top of a B-tree based disk store. -architecture_1012_p=\ As of October 2013, Thomas is still working on our next-generation storage engine called MVStore. This will in time replace the B-tree based storage engine. -architecture_1013_h2=Top-down Overview -architecture_1014_p=\ Working from the top down, the layers look like this\: -architecture_1015_li=JDBC driver. -architecture_1016_li=Connection/session management. -architecture_1017_li=SQL Parser. -architecture_1018_li=Command execution and planning. -architecture_1019_li=Table/Index/Constraints. -architecture_1020_li=Undo log, redo log, and transactions layer. -architecture_1021_li=B-tree engine and page-based storage allocation. -architecture_1022_li=Filesystem abstraction. -architecture_1023_h2=JDBC Driver -architecture_1024_p=\ The JDBC driver implementation lives in org.h2.jdbc, org.h2.jdbcx -architecture_1025_h2=Connection/session management -architecture_1026_p=\ The primary classes of interest are\: -architecture_1027_th=Package -architecture_1028_th=Description -architecture_1029_td=org.h2.engine.Database -architecture_1030_td=the root/global class -architecture_1031_td=org.h2.engine.SessionInterface -architecture_1032_td=abstracts over the differences between embedded and remote sessions -architecture_1033_td=org.h2.engine.Session -architecture_1034_td=local/embedded session -architecture_1035_td=org.h2.engine.SessionRemote -architecture_1036_td=remote session -architecture_1037_h2=Parser -architecture_1038_p=\ The parser lives in org.h2.command.Parser. It uses a straightforward recursive-descent design. -architecture_1039_p=\ See Wikipedia Recursive-descent parser page. -architecture_1040_h2=Command execution and planning -architecture_1041_p=\ Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. The primary packages of interest are\: -architecture_1042_th=Package -architecture_1043_th=Description -architecture_1044_td=org.h2.command.ddl -architecture_1045_td=Commands that modify schema data structures -architecture_1046_td=org.h2.command.dml -architecture_1047_td=Commands that modify data -architecture_1048_h2=Table/Index/Constraints -architecture_1049_p=\ One thing to note here is that indexes are simply stored as special kinds of tables. -architecture_1050_p=\ The primary packages of interest are\: -architecture_1051_th=Package -architecture_1052_th=Description -architecture_1053_td=org.h2.table -architecture_1054_td=Implementations of different kinds of tables -architecture_1055_td=org.h2.index -architecture_1056_td=Implementations of different kinds of indices -architecture_1057_h2=Undo log, redo log, and transactions layer -architecture_1058_p=\ We have a transaction log, which is shared among all sessions. See also http\://en.wikipedia.org/wiki/Transaction_log http\://h2database.com/html/grammar.html\#set_log -architecture_1059_p=\ We also have an undo log, which is per session, to undo an operation (an update that fails for example) and to rollback a transaction. Theoretically, the transaction log could be used, but for simplicity, H2 currently uses it's own "list of operations" (usually in-memory). -architecture_1060_p=\ With the MVStore, this is no longer needed (just the transaction log). -architecture_1061_h2=B-tree engine and page-based storage allocation. -architecture_1062_p=\ The primary package of interest is org.h2.store. -architecture_1063_p=\ This implements a storage mechanism which allocates pages of storage (typically 2k in size) and also implements a b-tree over those pages to allow fast retrieval and update. -architecture_1064_h2=Filesystem abstraction. -architecture_1065_p=\ The primary class of interest is org.h2.store.FileStore. -architecture_1066_p=\ This implements an abstraction of a random-access file. This allows the higher layers to treat in-memory vs. on-disk vs. zip-file databases the same. -build_1000_h1=Build -build_1001_a=\ Portability -build_1002_a=\ Environment -build_1003_a=\ Building the Software -build_1004_a=\ Build Targets -build_1005_a=\ Using Maven 2 -build_1006_a=\ Using Eclipse -build_1007_a=\ Translating -build_1008_a=\ Providing Patches -build_1009_a=\ Reporting Problems or Requests -build_1010_a=\ Automated Build -build_1011_a=\ Generating Railroad Diagrams -build_1012_h2=Portability -build_1013_p=\ This database is written in Java and therefore works on many platforms. It can also be compiled to a native executable using GCJ. -build_1014_h2=Environment -build_1015_p=\ To run this database, a Java Runtime Environment (JRE) version 1.6 or higher is required. -build_1016_p=\ To create the database executables, the following software stack was used. To use this database, it is not required to install this software however. -build_1017_li=Mac OS X and Windows -build_1018_a=Sun JDK Version 1.6 and 1.7 -build_1019_a=Eclipse -build_1020_li=Eclipse Plugins\: Subclipse, Eclipse Checkstyle Plug-in, EclEmma Java Code Coverage -build_1021_a=Emma Java Code Coverage -build_1022_a=Mozilla Firefox -build_1023_a=OpenOffice -build_1024_a=NSIS -build_1025_li=\ (Nullsoft Scriptable Install System) -build_1026_a=Maven -build_1027_h2=Building the Software -build_1028_p=\ You need to install a JDK, for example the Sun JDK version 1.6 or 1.7. Ensure that Java binary directory is included in the PATH environment variable, and that the environment variable JAVA_HOME points to your Java installation. On the command line, go to the directory h2 and execute the following command\: -build_1029_p=\ For Linux and OS X, use ./build.sh instead of build. -build_1030_p=\ You will get a list of targets. If you want to build the jar file, execute (Windows)\: -build_1031_p=\ To run the build tool in shell mode, use the command line option - as in ./build.sh -. -build_1032_h3=Switching the Source Code -build_1033_p=\ The source code uses Java 1.6 features. To switch the source code to the installed version of Java, run\: -build_1034_h2=Build Targets -build_1035_p=\ The build system can generate smaller jar files as well. The following targets are currently supported\: -build_1036_code=jarClient -build_1037_li=\ creates the file h2client.jar. This only contains the JDBC client. -build_1038_code=jarSmall -build_1039_li=\ creates the file h2small.jar. This only contains the embedded database. Debug information is disabled. -build_1040_code=jarJaqu -build_1041_li=\ creates the file h2jaqu.jar. This only contains the JaQu (Java Query) implementation. All other jar files do not include JaQu. -build_1042_code=javadocImpl -build_1043_li=\ creates the Javadocs of the implementation. -build_1044_p=\ To create the file h2client.jar, go to the directory h2 and execute the following command\: -build_1045_h3=Using Lucene 2 / 3 -build_1046_p=\ Both Apache Lucene 2 and Lucene 3 are supported. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. To use a different version of Lucene when compiling, it needs to be specified as follows\: -build_1047_h2=Using Maven 2 -build_1048_h3=Using a Central Repository -build_1049_p=\ You can include the database in your Maven 2 project as a dependency. Example\: -build_1050_p=\ New versions of this database are first uploaded to http\://hsql.sourceforge.net/m2-repo/ and then automatically synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there. -build_1051_h3=Maven Plugin to Start and Stop the TCP Server -build_1052_p=\ A Maven plugin to start and stop the H2 TCP server is available from Laird Nelson at GitHub. To start the H2 server, use\: -build_1053_p=\ To stop the H2 server, use\: -build_1054_h3=Using Snapshot Version -build_1055_p=\ To build a h2-*-SNAPSHOT.jar file and upload it the to the local Maven 2 repository, execute the following command\: -build_1056_p=\ Afterwards, you can include the database in your Maven 2 project as a dependency\: -build_1057_h2=Using Eclipse -build_1058_p=\ To create an Eclipse project for H2, use the following steps\: -build_1059_li=Install Subversion and Eclipse. -build_1060_li=Get the H2 source code from the Subversion repository\: -build_1061_code=svn checkout http\://h2database.googlecode.com/svn/trunk h2database-read-only -build_1062_li=Download all dependencies (Windows)\: -build_1063_code=build.bat download -build_1064_li=In Eclipse, create a new Java project from existing source code\: File, New, Project, Java Project, Create project from existing source. -build_1065_li=Select the h2 folder, click Next and Finish. -build_1066_li=To resolve com.sun.javadoc import statements, you may need to manually add the file <java.home>/../lib/tools.jar to the build path. -build_1067_h2=Translating -build_1068_p=\ The translation of this software is split into the following parts\: -build_1069_li=H2 Console\: src/main/org/h2/server/web/res/_text_*.prop -build_1070_li=Error messages\: src/main/org/h2/res/_messages_*.prop -build_1071_p=\ To translate the H2 Console, start it and select Preferences / Translate. After you are done, send the translated *.prop file to the Google Group. The web site is currently translated using Google. -build_1072_h2=Providing Patches -build_1073_p=\ If you like to provide patches, please consider the following guidelines to simplify merging them\: -build_1074_li=Only use Java 6 features (do not use Java 7) (see Environment). -build_1075_li=Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml. -build_1076_li=A template of the Eclipse settings are in src/installer/eclipse.settings/*. If you want to use them, you need to copy them to the .settings directory. The formatting options (eclipseCodeStyle) are also included. -build_1077_li=Please provide test cases and integrate them into the test suite. For Java level tests, see src/test/org/h2/test/TestAll.java. For SQL level tests, see src/test/org/h2/test/test.in.txt or testSimple.in.txt. -build_1078_li=The test cases should cover at least 90% of the changed and new code; use a code coverage tool to verify that (see above). or use the build target coverage. -build_1079_li=Verify that you did not break other features\: run the test cases by executing build test. -build_1080_li=Provide end user documentation if required (src/docsrc/html/*). -build_1081_li=Document grammar changes in src/docsrc/help/help.csv -build_1082_li=Provide a change log entry (src/docsrc/html/changelog.html). -build_1083_li=Verify the spelling using build spellcheck. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt. -build_1084_li=Run src/installer/buildRelease to find and fix formatting errors. -build_1085_li=Verify the formatting using build docs and build javadoc. -build_1086_li=Submit patches as .patch files (compressed if big). To create a patch using Eclipse, use Team / Create Patch. -build_1087_p=\ For legal reasons, patches need to be public in the form of an email to the group, or in the form of an issue report or attachment. Significant contributions need to include the following statement\: -build_1088_p=\ "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 (http\://h2database.com/html/license.html)." -build_1089_h2=Reporting Problems or Requests -build_1090_p=\ Please consider the following checklist if you have a question, want to report a problem, or if you have a feature request\: -build_1091_li=For bug reports, please provide a short, self contained, correct (compilable), example of the problem. -build_1092_li=Feature requests are always welcome, even if the feature is already on the roadmap. Your mail will help prioritize feature requests. If you urgently need a feature, consider providing a patch. -build_1093_li=Before posting problems, check the FAQ and do a Google search. -build_1094_li=When got an unexpected exception, please try the Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s). -build_1095_li=When sending source code, please use a public web clipboard such as Pastebin, Cl1p, or Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. As a template, use\: HelloWorld.java. Method that simply call other methods should be avoided, as well as unnecessary exception handling. Please use the JDBC API and no external tools or libraries. The test should include all required initialization code, and should be started with the main method. -build_1096_li=For large attachments, use a public temporary storage such as Rapidshare. -build_1097_li=Google Group versus issue tracking\: Use the Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an issue, but you don't need to (sending an email to the group is enough). Please note that only few people monitor the issue tracking system. -build_1098_li=For out-of-memory problems, please analyze the problem yourself first, for example using the command line option -XX\:+HeapDumpOnOutOfMemoryError (to create a heap dump file on out of memory) and a memory analysis tool such as the Eclipse Memory Analyzer (MAT). -build_1099_li=It may take a few days to get an answers. Please do not double post. -build_1100_h2=Automated Build -build_1101_p=\ This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line ./build.sh clean jar coverage -Dh2.ftpPassword\=... uploadBuild. The last results are available here\: -build_1102_a=Test Output -build_1103_a=Code Coverage Summary -build_1104_a=Code Coverage Details (download, 1.3 MB) -build_1105_a=Build Newsfeed -build_1106_a=Latest Jar File (download, 1 MB) -build_1107_h2=Generating Railroad Diagrams -build_1108_p=\ The railroad diagrams of the SQL grammar are HTML, formatted as nested tables. The diagrams are generated as follows\: -build_1109_li=The BNF parser (org.h2.bnf.Bnf) reads and parses the BNF from the file help.csv. -build_1110_li=The page parser (org.h2.server.web.PageParser) reads the template HTML file and fills in the diagrams. -build_1111_li=The rail images (one straight, four junctions, two turns) are generated using a simple Java application. -build_1112_p=\ To generate railroad diagrams for other grammars, see the package org.h2.jcr. This package is used to generate the SQL-2 railroad diagrams for the JCR 2.0 specification. -changelog_1000_h1=Change Log -changelog_1001_h2=Next Version (unreleased) -changelog_1002_li=- -changelog_1003_h2=Version 1.4.187 Beta (2015-04-10) -changelog_1004_li=MVStore\: concurrent changes to the same row could result in the exception "The transaction log might be corrupt for key ...". This could only be reproduced with 3 or more threads. -changelog_1005_li=Results with CLOB or BLOB data are no longer reused. -changelog_1006_li=References to BLOB and CLOB objects now have a timeout. The configuration setting is LOB_TIMEOUT (default 5 minutes). This should avoid growing the database file if there are many queries that return BLOB or CLOB objects, and the database is not closed for a longer time. -changelog_1007_li=MVStore\: when committing a session that removed LOB values, changes were flushed unnecessarily. -changelog_1008_li=Issue 610\: possible integer overflow in WriteBuffer.grow(). -changelog_1009_li=Issue 609\: the spatial index did not support NULL (ClassCastException). -changelog_1010_li=MVStore\: in some cases, CLOB/BLOB data blocks were removed incorrectly when opening a database. -changelog_1011_li=MVStore\: updates that affected many rows were were slow in some cases if there was a secondary index. -changelog_1012_li=Using "runscript" with autocommit disabled could result in a lock timeout on the internal table "SYS". -changelog_1013_li=Issue 603\: there was a memory leak when using H2 in a web application. Apache Tomcat logged an error message\: "The web application ... created a ThreadLocal with key of type [org.h2.util.DateTimeUtils$1]". -changelog_1014_li=When using the MVStore, running a SQL script generate by the Recover tool from a PageStore file failed with a strange error message (NullPointerException), now a clear error message is shown. -changelog_1015_li=Issue 605\: with version 1.4.186, opening a database could result in an endless loop in LobStorageMap.init. -changelog_1016_li=Queries that use the same table alias multiple times now work. Before, the select expression list was expanded incorrectly. Example\: "select * from a as x, b as x". -changelog_1017_li=The MySQL compatibility feature "insert ... on duplicate key update" did not work with a non-default schema. -changelog_1018_li=Issue 599\: the condition "in(x, y)" could not be used in the select list when using "group by". -changelog_1019_li=The LIRS cache could grow larger than the allocated memory. -changelog_1020_li=A new file system implementation that re-opens the file if it was closed due to the application calling Thread.interrupt(). File name prefix "retry\:". Please note it is strongly recommended to avoid calling Thread.interrupt; this is a problem for various libraries, including Apache Lucene. -changelog_1021_li=MVStore\: use RandomAccessFile file system if the file name starts with "file\:". -changelog_1022_li=Allow DATEADD to take a long value for count when manipulating milliseconds. -changelog_1023_li=When using MV_STORE\=TRUE and the SET CACHE_SIZE setting, the cache size was incorrectly set, so that it was effectively 1024 times smaller than it should be. -changelog_1024_li=Concurrent CREATE TABLE... IF NOT EXISTS in the presence of MULTI_THREAD\=TRUE could throw an exception. -changelog_1025_li=Fix bug in MVStore when creating lots of temporary tables, where we could run out of transaction IDs. -changelog_1026_li=Add support for PostgreSQL STRING_AGG function. Patch by Fred Aquiles. -changelog_1027_li=Fix bug in "jdbc\:h2\:nioMemFS" isRoot() function. Also, the page size was increased to 64 KB. -changelog_1028_h2=Version 1.4.186 Beta (2015-03-02) -changelog_1029_li=The Servlet API 3.0.1 is now used, instead of 2.4. -changelog_1030_li=MVStore\: old chunks no longer removed in append-only mode. -changelog_1031_li=MVStore\: the cache for page references could grow far too big, resulting in out of memory in some cases. -changelog_1032_li=MVStore\: orphaned lob objects were not correctly removed in some cases, making the database grow unnecessarily. -changelog_1033_li=MVStore\: the maximum cache size was artificially limited to 2 GB (due to an integer overflow). -changelog_1034_li=MVStore / TransactionStore\: concurrent updates could result in a "Too many open transactions" exception. -changelog_1035_li=StringUtils.toUpperEnglish now has a small cache. This should speed up reading from a ResultSet when using the column name. -changelog_1036_li=MVStore\: up to 65535 open transactions are now supported. Previously, the limit was at most 65535 transactions between the oldest open and the newest open transaction (which was quite a strange limit). -changelog_1037_li=The default limit for in-place LOB objects was changed from 128 to 256 bytes. This is because each read creates a reference to a LOB, and maintaining the references is a big overhead. With the higher limit, less references are needed. -changelog_1038_li=Tables without columns didn't work. (The use case for such tables is testing.) -changelog_1039_li=The LIRS cache now resizes the table automatically in all cases and no longer needs the averageMemory configuration. -changelog_1040_li=Creating a linked table from an MVStore database to a non-MVStore database created a second (non-MVStore) database file. -changelog_1041_li=In version 1.4.184, a bug was introduced that broke queries that have both joins and wildcards, for example\: select * from dual join(select x from dual) on 1\=1 -changelog_1042_li=Issue 598\: parser fails on timestamp "24\:00\:00.1234" - prevent the creation of out-of-range time values. -changelog_1043_li=Allow declaring triggers as source code (like functions). Patch by Sylvain Cuaz. -changelog_1044_li=Make the planner use indexes for sorting when doing a GROUP BY where all of the GROUP BY columns are not mentioned in the select. Patch by Frederico (zepfred). -changelog_1045_li=PostgreSQL compatibility\: generate_series (as an alias for system_range). Patch by litailang. -changelog_1046_li=Fix missing "column" type in right-hand parameter in ConditionIn. Patch by Arnaud Thimel. -changelog_1047_h2=Version 1.4.185 Beta (2015-01-16) -changelog_1048_li=In version 1.4.184, "group by" ignored the table name, and could pick a select column by mistake. Example\: select 0 as x from system_range(1, 2) d group by d.x; -changelog_1049_li=New connection setting "REUSE_SPACE" (default\: true). If disabled, all changes are appended to the database file, and existing content is never overwritten. This allows to rollback to a previous state of the database by truncating the database file. -changelog_1050_li=Issue 587\: MVStore\: concurrent compaction and store operations could result in an IllegalStateException. -changelog_1051_li=Issue 594\: Profiler.copyInThread does not work properly. -changelog_1052_li=Script tool\: Now, SCRIPT ... TO is always used (for higher speed and lower disk space usage). -changelog_1053_li=Script tool\: Fix parsing of BLOCKSIZE parameter, original patch by Ken Jorissen. -changelog_1054_li=Fix bug in PageStore\#commit method - when the ignoreBigLog flag was set, the logic that cleared the flag could never be reached, resulting in performance degradation. Reported by Alexander Nesterov. -changelog_1055_li=Issue 552\: Implement BIT_AND and BIT_OR aggregate functions. -changelog_1056_h2=Version 1.4.184 Beta (2014-12-19) -changelog_1057_li=In version 1.3.183, indexes were not used if the table contains columns with a default value generated by a sequence. This includes tables with identity and auto-increment columns. This bug was introduced by supporting "rownum" in views and derived tables. -changelog_1058_li=MVStore\: imported BLOB and CLOB data sometimes disappeared. This was caused by a bug in the ObjectDataType comparison. -changelog_1059_li=Reading from a StreamStore now throws an IOException if the underlying data doesn't exist. -changelog_1060_li=MVStore\: if there is an exception while saving, the store is now in all cases immediately closed. -changelog_1061_li=MVStore\: the dump tool could go into an endless loop for some files. -changelog_1062_li=MVStore\: recovery for a database with many CLOB or BLOB entries is now much faster. -changelog_1063_li=Group by with a quoted select column name alias didn't work. Example\: select 1 "a" from dual group by "a" -changelog_1064_li=Auto-server mode\: the host name is now stored in the .lock.db file. -changelog_1065_h2=Version 1.4.183 Beta (2014-12-13) -changelog_1066_li=MVStore\: the default auto-commit buffer size is now about twice as big. This should reduce the database file size after inserting a lot of data. -changelog_1067_li=The built-in functions "power" and "radians" now always return a double. -changelog_1068_li=Using "row_number" or "rownum" in views or derived tables had unexpected results if the outer query contained constraints for the given view. Example\: select b.nr, b.id from (select row_number() over() as nr, a.id as id from (select id from test order by name) as a) as b where b.id \= 1 -changelog_1069_li=MVStore\: the Recover tool can now deal with more types of corruption in the file. -changelog_1070_li=MVStore\: the TransactionStore now first needs to be initialized before it can be used. -changelog_1071_li=Views and derived tables with equality and range conditions on the same columns did not work properly. example\: select x from (select x from (select 1 as x) where x > 0 and x < 2) where x \= 1 -changelog_1072_li=The database URL setting PAGE_SIZE setting is now also used for the MVStore. -changelog_1073_li=MVStore\: the default page split size for persistent stores is now 4096 (it was 16 KB so far). This should reduce the database file size for most situations (in some cases, less than half the size of the previous version). -changelog_1074_li=With query literals disabled, auto-analyze of a table with CLOB or BLOB did not work. -changelog_1075_li=MVStore\: use a mark and sweep GC algorithm instead of reference counting, to ensure used chunks are never overwrite, even if the reference counting algorithm does not work properly. -changelog_1076_li=In the multi-threaded mode, updating the column selectivity ("analyze") in the background sometimes did not work. -changelog_1077_li=In the multi-threaded mode, database metadata operations did sometimes not work if the schema was changed at the same time (for example, if tables were dropped). -changelog_1078_li=Some CLOB and BLOB values could no longer be read when the original row was removed (even when using the MVCC mode). -changelog_1079_li=The MVStoreTool could throw an IllegalArgumentException. -changelog_1080_li=Improved performance for some date / time / timestamp conversion operations. Thanks to Sergey Evdokimov for reporting the problem. -changelog_1081_li=H2 Console\: the built-in web server did not work properly if an unknown file was requested. -changelog_1082_li=MVStore\: the jar file is renamed to "h2-mvstore-*.jar" and is deployed to Maven separately. -changelog_1083_li=MVStore\: support for concurrent reads and writes is now enabled by default. -changelog_1084_li=Server mode\: the transfer buffer size has been changed from 16 KB to 64 KB, after it was found that this improves performance on Linux quite a lot. -changelog_1085_li=H2 Console and server mode\: SSL is now disabled and TLS is used to protect against the Poodle SSLv3 vulnerability. The system property to disable secure anonymous connections is now "h2.enableAnonymousTLS". The default certificate is still self-signed, so you need to manually install another one if you want to avoid man in the middle attacks. -changelog_1086_li=MVStore\: the R-tree did not correctly measure the memory usage. -changelog_1087_li=MVStore\: compacting a store with an R-tree did not always work. -changelog_1088_li=Issue 581\: When running in LOCK_MODE\=0, JdbcDatabaseMetaData\#supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED) should return false -changelog_1089_li=Fix bug which could generate deadlocks when multiple connections accessed the same table. -changelog_1090_li=Some places in the code were not respecting the value set in the "SET MAX_MEMORY_ROWS x" command -changelog_1091_li=Fix bug which could generate a NegativeArraySizeException when performing large (>40M) row union operations -changelog_1092_li=Fix "USE schema" command for MySQL compatibility, patch by mfulton -changelog_1093_li=Parse and ignore the ROW_FORMAT\=DYNAMIC MySQL syntax, patch by mfulton -changelog_1094_h2=Version 1.4.182 Beta (2014-10-17) -changelog_1095_li=MVStore\: improved error messages and logging; improved behavior if there is an error when serializing objects. -changelog_1096_li=OSGi\: the MVStore packages are now exported. -changelog_1097_li=With the MVStore option, when using multiple threads that concurrently create indexes or tables, it was relatively easy to get a lock timeout on the "SYS" table. -changelog_1098_li=When using the multi-threaded option, the exception "Unexpected code path" could be thrown, specially if the option "analyze_auto" was set to a low value. -changelog_1099_li=In the server mode, when reading from a CLOB or BLOB, if the connection was closed, a NullPointerException could be thrown instead of an exception saying the connection is closed. -changelog_1100_li=DatabaseMetaData.getProcedures and getProcedureColumns could throw an exception if a user defined class is not available. -changelog_1101_li=Issue 584\: the error message for a wrong sequence definition was wrong. -changelog_1102_li=CSV tool\: the rowSeparator option is no longer supported, as the same can be achieved with the lineSeparator. -changelog_1103_li=Descending indexes on MVStore tables did not work properly. -changelog_1104_li=Issue 579\: Conditions on the "_rowid_" pseudo-column didn't use an index when using the MVStore. -changelog_1105_li=Fixed documentation that "offset" and "fetch" are also keywords since version 1.4.x. -changelog_1106_li=The Long.MIN_VALUE could not be parsed for auto-increment (identity) columns. -changelog_1107_li=Issue 573\: Add implementation for Methods "isWrapperFor()" and "unwrap()" in other JDBC classes. -changelog_1108_li=Issue 572\: MySQL compatibility for "order by" in update statements. -changelog_1109_li=The change in JDBC escape processing in version 1.4.181 affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax "{t 'time}", or "{ts 'timestamp'}", or "{d 'data'}", then both the client and the server need to be upgraded to version 1.4.181 or later. -changelog_1110_h2=Version 1.4.181 Beta (2014-08-06) -changelog_1111_li=Improved MySQL compatibility by supporting "use schema". Thanks a lot to Karl Pietrzak for the patch\! -changelog_1112_li=Writing to the trace file is now faster, specially with the debug level. -changelog_1113_li=The database option "defrag_always\=true" did not work with the MVStore. -changelog_1114_li=The JDBC escape syntax {ts 'value'} did not interpret the value as a timestamp. The same for {d 'value'} (for date) and {t 'value'} (for time). Thanks to Lukas Eder for reporting the issue. The following problem was detected after version 1.4.181 was released\: The change in JDBC escape processing affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax {t 'time'}, or {ts 'timestamp'}, or {d 'date'}, then both the client and the server need to be upgraded to version 1.4.181 or later. -changelog_1115_li=File system abstraction\: support replacing existing files using move (currently not for Windows). -changelog_1116_li=The statement "shutdown defrag" now compresses the database (with the MVStore). This command can greatly reduce the file size, and is relatively fast, but is not incremental. -changelog_1117_li=The MVStore now automatically compacts the store in the background if there is no read or write activity, which should (after some time; sometimes about one minute) reduce the file size. This is still work in progress, feedback is welcome\! -changelog_1118_li=Change default value of PAGE_SIZE from 2048 to 4096 to more closely match most file systems block size (PageStore only; the MVStore already used 4096). -changelog_1119_li=Auto-scale MAX_MEMORY_ROWS and CACHE_SIZE settings by the amount of available RAM. Gives a better out of box experience for people with more powerful machines. -changelog_1120_li=Handle tabs like 4 spaces in web console, patch by Martin Grajcar. -changelog_1121_li=Issue 573\: Add implementation for Methods "isWrapperFor()" and "unwrap()" in JdbcConnection.java, patch by BigMichi1. -changelog_1122_h2=Version 1.4.180 Beta (2014-07-13) -changelog_1123_li=MVStore\: the store is now auto-compacted automatically up to some point, to avoid very large file sizes. This area is still work in progress. -changelog_1124_li=Sequences of temporary tables (auto-increment or identity columns) were persisted unnecessarily in the database file, and were not removed when re-opening the database. -changelog_1125_li=MVStore\: an IndexOutOfBoundsException could sometimes occur MVMap.openVersion when concurrently accessing the store. -changelog_1126_li=The LIRS cache now re-sizes the internal hash map if needed. -changelog_1127_li=Optionally persist session history in the H2 console. (patch from Martin Grajcar) -changelog_1128_li=Add client-info property to get the number of servers currently in the cluster and which servers that are available. (patch from Nikolaj Fogh) -changelog_1129_li=Fix bug in changing encrypted DB password that kept the file handle open when the wrong password was supplied. (test case from Jens Hohmuth). -changelog_1130_li=Issue 567\: H2 hangs for a long time then (sometimes) recovers. Introduce a queue when doing table locking to prevent session starvation. -changelog_1131_h2=Version 1.4.179 Beta (2014-06-23) -changelog_1132_li=The license was changed to MPL 2.0 (from 1.0) and EPL 1.0. -changelog_1133_li=Issue 565\: MVStore\: concurrently adding LOB objects (with MULTI_THREADED option) resulted in a NullPointerException. -changelog_1134_li=MVStore\: reduced dependencies to other H2 classes. -changelog_1135_li=There was a way to prevent a database from being re-opened, by creating a column constraint that references a table with a higher id, for example with "check" constraints that contains queries. This is now detected, and creating the table is prohibited. In future versions of H2, most likely creating references to other tables will no longer be supported because of such problems. -changelog_1136_li=MVStore\: descending indexes with "nulls first" did not work as expected (null was ordered last). -changelog_1137_li=Large result sets now always create temporary tables instead of temporary files. -changelog_1138_li=When using the PageStore, opening a database failed in some cases with a NullPointerException if temporary tables were used (explicitly, or implicitly when using large result sets). -changelog_1139_li=If a database file in the PageStore file format exists, this file and this mode is now used, even if the database URL does not contain "MV_STORE\=FALSE". If a MVStore file exists, it is used. -changelog_1140_li=Databases created with version 1.3.175 and earlier that contained foreign keys in combination with multi-column indexes could not be opened in some cases. This was due to a bugfix in version 1.3.176\: Referential integrity constraints sometimes used the wrong index. -changelog_1141_li=MVStore\: the ObjectDataType comparison method was incorrect if one key was Serializable and the other was of a common class. -changelog_1142_li=Recursive queries with many result rows (more than the setting "max_memory_rows") did not work correctly. -changelog_1143_li=The license has changed to MPL 2.0 + EPL 1.0. -changelog_1144_li=MVStore\: temporary tables from result sets could survive re-opening a database, which could result in a ClassCastException. -changelog_1145_li=Issue 566\: MVStore\: unique indexes that were created later on did not work correctly if there were over 5000 rows in the table. Existing databases need to be re-created (at least the broken index need to be re-built). -changelog_1146_li=MVStore\: creating secondary indexes on large tables results in missing rows in the index. -changelog_1147_li=Metadata\: the password of linked tables is now only visible for admin users. -changelog_1148_li=For Windows, database URLs of the form "jdbc\:h2\:/test" where considered relative and did not work unless the system property "h2.implicitRelativePath" was used. -changelog_1149_li=Windows\: using a base directory of "C\:/" and similar did not work as expected. -changelog_1150_li=Follow JDBC specification on Procedures MetaData, use P0 as return type of procedure. -changelog_1151_li=Issue 531\: IDENTITY ignored for added column. -changelog_1152_li=FileSystem\: improve exception throwing compatibility with JDK -changelog_1153_li=Spatial Index\: adjust costs so we do not use the spatial index if the query does not contain an intersects operator. -changelog_1154_li=Fix multi-threaded deadlock when using a View that includes a TableFunction. -changelog_1155_li=Fix bug in dividing very-small BigDecimal numbers. -changelog_1156_h2=Version 1.4.178 Beta (2014-05-02) -changelog_1157_li=Issue 559\: Make dependency on org.osgi.service.jdbc optional. -changelog_1158_li=Improve error message when the user specifies an unsupported combination of database settings. -changelog_1159_li=MVStore\: in the multi-threaded mode, NullPointerException and other exceptions could occur. -changelog_1160_li=MVStore\: some database file could not be compacted due to a bug in the bookkeeping of the fill rate. Also, database file were compacted quite slowly. This has been improved; but more changes in this area are expected. -changelog_1161_li=MVStore\: support for volatile maps (that don't store changes). -changelog_1162_li=MVStore mode\: in-memory databases now also use the MVStore. -changelog_1163_li=In server mode, appending ";autocommit\=false" to the database URL was working, but the return value of Connection.getAutoCommit() was wrong. -changelog_1164_li=Issue 561\: OSGi\: the import package declaration of org.h2 excluded version 1.4. -changelog_1165_li=Issue 558\: with the MVStore, a NullPointerException could occur when using LOBs at session commit (LobStorageMap.removeLob). -changelog_1166_li=Remove the "h2.MAX_MEMORY_ROWS_DISTINCT" system property to reduce confusion. We already have the MAX_MEMORY_ROWS setting which does a very similar thing, and is better documented. -changelog_1167_li=Issue 554\: Web Console in an IFrame was not fully supported. -changelog_1168_h2=Version 1.4.177 Beta (2014-04-12) -changelog_1169_li=By default, the MV_STORE option is enabled, so it is using the new MVStore storage. The MVCC setting is by default set to the same values as the MV_STORE setting, so it is also enabled by default. For testing, both settings can be disabled by appending ";MV_STORE\=FALSE" and/or ";MVCC\=FALSE" to the database URL. -changelog_1170_li=The file locking method 'serialized' is no longer supported. This mode might return in a future version, however this is not clear right now. A new implementation and new tests would be needed. -changelog_1171_li=Enable the new storage format for dates (system property "h2.storeLocalTime"). For the MVStore mode, this is always enabled, but with version 1.4 this is even enabled in the PageStore mode. -changelog_1172_li=Implicit relative paths are disabled (system property "h2.implicitRelativePath"), so that the database URL jdbc\:h2\:test now needs to be written as jdbc\:h2\:./test. -changelog_1173_li="select ... fetch first 1 row only" is supported with the regular mode. This was disabled so far because "fetch" and "offset" are now keywords. See also Mode.supportOffsetFetch. -changelog_1174_li=Byte arrays are now sorted in unsigned mode (x'99' is larger than x'09'). (System property "h2.sortBinaryUnsigned", Mode.binaryUnsigned, setting "binary_collation"). -changelog_1175_li=Csv.getInstance will be removed in future versions of 1.4. Use the public constructor instead. -changelog_1176_li=Remove support for the limited old-style outer join syntax using "(+)". Use "outer join" instead. System property "h2.oldStyleOuterJoin". -changelog_1177_li=Support the data type "DATETIME2" as an alias for "DATETIME", for MS SQL Server compatibility. -changelog_1178_li=Add Oracle-compatible TRANSLATE function, patch by Eric Chatellier. -changelog_1179_h2=Version 1.3.176 (2014-04-05) -changelog_1180_li=The file locking method 'serialized' is no longer documented, as it will not be available in version 1.4. -changelog_1181_li=The static method Csv.getInstance() was removed. Use the public constructor instead. -changelog_1182_li=The default user name for the Script, RunScript, Shell, and CreateCluster tools are no longer "sa" but an empty string. -changelog_1183_li=The stack trace of the exception "The object is already closed" is no longer logged by default. -changelog_1184_li=If a value of a result set was itself a result set, the result could only be read once. -changelog_1185_li=Column constraints are also visible in views (patch from Nicolas Fortin for H2GIS). -changelog_1186_li=Granting a additional right to a role that already had a right for that table was not working. -changelog_1187_li=Spatial index\: a few bugs have been fixed (using spatial constraints in views, transferring geometry objects over TCP/IP, the returned geometry object is copied when needed). -changelog_1188_li=Issue 551\: the datatype documentation was incorrect (found by Bernd Eckenfels). -changelog_1189_li=Issue 368\: ON DUPLICATE KEY UPDATE did not work for multi-row inserts. Test case from Angus Macdonald. -changelog_1190_li=OSGi\: the package javax.tools is now imported (as an optional). -changelog_1191_li=H2 Console\: auto-complete is now disabled by default, but there is a hot-key (Ctrl+Space). -changelog_1192_li=H2 Console\: auto-complete did not work with multi-line statements. -changelog_1193_li=CLOB and BLOB data was not immediately removed after a rollback. -changelog_1194_li=There is a new Aggregate API that supports the internal H2 data types (GEOMETRY for example). Thanks a lot to Nicolas Fortin for the patch\! -changelog_1195_li=Referential integrity constraints sometimes used the wrong index, such that updating a row in the referenced table incorrectly failed with a constraint violation. -changelog_1196_li=The Polish translation was completed and corrected by Wojtek Jurczyk. Thanks a lot\! -changelog_1197_li=Issue 545\: Unnecessary duplicate code was removed. -changelog_1198_li=The profiler tool can now process files with full thread dumps. -changelog_1199_li=MVStore\: the file format was changed slightly. -changelog_1200_li=MVStore mode\: the CLOB and BLOB storage was re-implemented and is now much faster than with the PageStore (which is still the default storage). -changelog_1201_li=MVStore mode\: creating indexes is now much faster (in many cases faster than with the default PageStore). -changelog_1202_li=Various bugs in the MVStore storage and have been fixed, including a bug in the R-tree implementation. The database could get corrupt if there were transient IO exceptions while storing. -changelog_1203_li=The method org.h2.expression.Function.getCost could throw a NullPointException. -changelog_1204_li=Storing LOBs in separate files (outside of the main database file) is no longer supported for new databases. -changelog_1205_li=Lucene 2 is no longer supported. -changelog_1206_li=Fix bug in calculating default MIN and MAX values for SEQUENCE. -changelog_1207_li=Fix bug in performing IN queries with multiple values when IGNORECASE\=TRUE -changelog_1208_li=Add entry-point to org.h2.tools.Shell so it can be called from inside an application. patch by Thomas Gillet. -changelog_1209_li=Fix bug that prevented the PgServer from being stopped and started multiple times. -changelog_1210_li=Support some more DDL syntax for MySQL, patch from Peter Jentsch. -changelog_1211_li=Issue 548\: TO_CHAR does not format MM and DD correctly when the month or day of the month is 1 digit, patch from "the.tucc" -changelog_1212_li=Fix bug in varargs support in ALIAS's, patch from Nicolas Fortin -cheatSheet_1000_h1=H2 Database Engine Cheat Sheet -cheatSheet_1001_h2=Using H2 -cheatSheet_1002_a=H2 -cheatSheet_1003_li=\ is open source, free to use and distribute. -cheatSheet_1004_a=Download -cheatSheet_1005_li=\: jar, installer (Windows), zip. -cheatSheet_1006_li=To start the H2 Console tool, double click the jar file, or run java -jar h2*.jar, h2.bat, or h2.sh. -cheatSheet_1007_a=A new database is automatically created -cheatSheet_1008_a=by default -cheatSheet_1009_li=. -cheatSheet_1010_a=Closing the last connection closes the database -cheatSheet_1011_li=. -cheatSheet_1012_h2=Documentation -cheatSheet_1013_p=\ Reference\: SQL grammar, functions, data types, tools, API -cheatSheet_1014_a=Features -cheatSheet_1015_p=\: fulltext search, encryption, read-only (zip/jar), CSV, auto-reconnect, triggers, user functions -cheatSheet_1016_a=Database URLs -cheatSheet_1017_a=Embedded -cheatSheet_1018_code=jdbc\:h2\:~/test -cheatSheet_1019_p=\ 'test' in the user home directory -cheatSheet_1020_code=jdbc\:h2\:/data/test -cheatSheet_1021_p=\ 'test' in the directory /data -cheatSheet_1022_code=jdbc\:h2\:test -cheatSheet_1023_p=\ in the current(\!) working directory -cheatSheet_1024_a=In-Memory -cheatSheet_1025_code=jdbc\:h2\:mem\:test -cheatSheet_1026_p=\ multiple connections in one process -cheatSheet_1027_code=jdbc\:h2\:mem\: -cheatSheet_1028_p=\ unnamed private; one connection -cheatSheet_1029_a=Server Mode -cheatSheet_1030_code=jdbc\:h2\:tcp\://localhost/~/test -cheatSheet_1031_p=\ user home dir -cheatSheet_1032_code=jdbc\:h2\:tcp\://localhost//data/test -cheatSheet_1033_p=\ absolute dir -cheatSheet_1034_a=Server start -cheatSheet_1035_p=\:java -cp *.jar org.h2.tools.Server -cheatSheet_1036_a=Settings -cheatSheet_1037_code=jdbc\:h2\:..;MODE\=MySQL -cheatSheet_1038_a=compatibility (or HSQLDB,...) -cheatSheet_1039_code=jdbc\:h2\:..;TRACE_LEVEL_FILE\=3 -cheatSheet_1040_a=log to *.trace.db -cheatSheet_1041_a=Using the JDBC API -cheatSheet_1042_a=Connection Pool -cheatSheet_1043_a=Maven 2 -cheatSheet_1044_a=Hibernate -cheatSheet_1045_p=\ hibernate.cfg.xml (or use the HSQLDialect)\: -cheatSheet_1046_a=TopLink and Glassfish -cheatSheet_1047_p=\ Datasource class\: org.h2.jdbcx.JdbcDataSource -cheatSheet_1048_code=oracle.toplink.essentials.platform. -cheatSheet_1049_code=database.H2Platform -download_1000_h1=Downloads -download_1001_h3=Version 1.4.187 (2015-04-10), Beta -download_1002_a=Windows Installer -download_1003_a=Platform-Independent Zip -download_1004_h3=Version 1.3.176 (2014-04-05), Last Stable -download_1005_a=Windows Installer -download_1006_a=Platform-Independent Zip -download_1007_h3=Download Mirror and Older Versions -download_1008_a=Platform-Independent Zip -download_1009_h3=Jar File -download_1010_a=Maven.org -download_1011_a=Sourceforge.net -download_1012_a=Latest Automated Build (not released) -download_1013_h3=Maven (Binary, Javadoc, and Source) -download_1014_a=Binary -download_1015_a=Javadoc -download_1016_a=Sources -download_1017_h3=Database Upgrade Helper File -download_1018_a=Upgrade database from 1.1 to the current version -download_1019_h3=Subversion Source Repository -download_1020_a=Google Code -download_1021_p=\ For details about changes, see the Change Log. -download_1022_h3=News and Project Information -download_1023_a=Atom Feed -download_1024_a=RSS Feed -download_1025_a=DOAP File -download_1026_p=\ (what is this) -faq_1000_h1=Frequently Asked Questions -faq_1001_a=\ I Have a Problem or Feature Request -faq_1002_a=\ Are there Known Bugs? When is the Next Release? -faq_1003_a=\ Is this Database Engine Open Source? -faq_1004_a=\ Is Commercial Support Available? -faq_1005_a=\ How to Create a New Database? -faq_1006_a=\ How to Connect to a Database? -faq_1007_a=\ Where are the Database Files Stored? -faq_1008_a=\ What is the Size Limit (Maximum Size) of a Database? -faq_1009_a=\ Is it Reliable? -faq_1010_a=\ Why is Opening my Database Slow? -faq_1011_a=\ My Query is Slow -faq_1012_a=\ H2 is Very Slow -faq_1013_a=\ Column Names are Incorrect? -faq_1014_a=\ Float is Double? -faq_1015_a=\ Is the GCJ Version Stable? Faster? -faq_1016_a=\ How to Translate this Project? -faq_1017_a=\ How to Contribute to this Project? -faq_1018_h3=I Have a Problem or Feature Request -faq_1019_p=\ Please read the support checklist. -faq_1020_h3=Are there Known Bugs? When is the Next Release? -faq_1021_p=\ Usually, bugs get fixes as they are found. There is a release every few weeks. Here is the list of known and confirmed issues\: -faq_1022_li=When opening a database file in a timezone that has different daylight saving rules\: the time part of dates where the daylight saving doesn't match will differ. This is not a problem within regions that use the same rules (such as, within USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new timezone. This problem does not occur when using the system property "h2.storeLocalTime" (however such database files are not compatible with older versions of H2). -faq_1023_li=Apache Harmony\: there seems to be a bug in Harmony that affects H2. See HARMONY-6505. -faq_1024_li=Tomcat and Glassfish 3 set most static fields (final or non-final) to null when unloading a web application. This can cause a NullPointerException in H2 versions 1.1.107 and older, and may still not work in newer versions. Please report it if you run into this issue. In Tomcat >\= 6.0 this behavior can be disabled by setting the system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES\=false, however Tomcat may then run out of memory. A known workaround is to put the h2*.jar file in a shared lib directory (common/lib). -faq_1025_li=Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3. -faq_1026_li=When using Install4j before 4.1.4 on Linux and enabling pack200, the h2*.jar becomes corrupted by the install process, causing application failure. A workaround is to add an empty file h2*.jar.nopack next to the h2*.jar file. This problem is solved in Install4j 4.1.4. -faq_1027_p=\ For a complete list, see Open Issues. -faq_1028_h3=Is this Database Engine Open Source? -faq_1029_p=\ Yes. It is free to use and distribute, and the source code is included. See also under license. -faq_1030_h3=Is Commercial Support Available? -faq_1031_p=\ Yes, commercial support is available, see Commercial Support. -faq_1032_h3=How to Create a New Database? -faq_1033_p=\ By default, a new database is automatically created if it does not yet exist. See Creating New Databases. -faq_1034_h3=How to Connect to a Database? -faq_1035_p=\ The database driver is org.h2.Driver, and the database URL starts with jdbc\:h2\:. To connect to a database using JDBC, use the following code\: -faq_1036_h3=Where are the Database Files Stored? -faq_1037_p=\ When using database URLs like jdbc\:h2\:~/test, the database is stored in the user directory. For Windows, this is usually C\:\\Documents and Settings\\<userName> or C\:\\Users\\<userName>. If the base directory is not set (as in jdbc\:h2\:test), the database files are stored in the directory where the application is started (the current working directory). When using the H2 Console application from the start menu, this is <Installation Directory>/bin. The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL jdbc\:h2\:file\:data/sample, the database is stored in the directory data (relative to the current working directory). The directory is created automatically if it does not yet exist. It is also possible to use the fully qualified directory name (and for Windows, drive name). Example\: jdbc\:h2\:file\:C\:/data/test -faq_1038_h3=What is the Size Limit (Maximum Size) of a Database? -faq_1039_p=\ See Limits and Limitations. -faq_1040_h3=Is it Reliable? -faq_1041_p=\ That is not easy to say. It is still a quite new product. A lot of tests have been written, and the code coverage of these tests is higher than 80% for each package. Randomized stress tests are run regularly. But there are probably still bugs that have not yet been found (as with most software). Some features are known to be dangerous, they are only supported for situations where performance is more important than reliability. Those dangerous features are\: -faq_1042_li=Disabling the transaction log or FileDescriptor.sync() using LOG\=0 or LOG\=1. -faq_1043_li=Using the transaction isolation level READ_UNCOMMITTED (LOCK_MODE 0) while at the same time using multiple connections. -faq_1044_li=Disabling database file protection using (setting FILE_LOCK to NO in the database URL). -faq_1045_li=Disabling referential integrity using SET REFERENTIAL_INTEGRITY FALSE. -faq_1046_p=\ In addition to that, running out of memory should be avoided. In older versions, OutOfMemory errors while using the database could corrupt a databases. -faq_1047_p=\ This database is well tested using automated test cases. The tests run every night and run for more than one hour. But not all areas of this database are equally well tested. When using one of the following features for production, please ensure your use case is well tested (if possible with automated test cases). The areas that are not well tested are\: -faq_1048_li=Platforms other than Windows XP, Linux, Mac OS X, or JVMs other than Sun 1.6 or 1.7 -faq_1049_li=The features AUTO_SERVER and AUTO_RECONNECT. -faq_1050_li=Cluster mode, 2-phase commit, savepoints. -faq_1051_li=24/7 operation. -faq_1052_li=Fulltext search. -faq_1053_li=Operations on LOBs over 2 GB. -faq_1054_li=The optimizer may not always select the best plan. -faq_1055_li=Using the ICU4J collator. -faq_1056_p=\ Areas considered experimental are\: -faq_1057_li=The PostgreSQL server -faq_1058_li=Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session). -faq_1059_li=Multi-threading within the engine using SET MULTI_THREADED\=1. -faq_1060_li=Compatibility modes for other databases (only some features are implemented). -faq_1061_li=The soft reference cache (CACHE_TYPE\=SOFT_LRU). It might not improve performance, and out of memory issues have been reported. -faq_1062_p=\ Some users have reported that after a power failure, the database cannot be opened sometimes. In this case, use a backup of the database or the Recover tool. Please report such problems. The plan is that the database automatically recovers in all situations. -faq_1063_h3=Why is Opening my Database Slow? -faq_1064_p=\ To find out what the problem is, use the H2 Console and click on "Test Connection" instead of "Login". After the "Login Successful" appears, click on it (it's a link). This will list the top stack traces. Then either analyze this yourself, or post those stack traces in the Google Group. -faq_1065_p=\ Other possible reasons are\: the database is very big (many GB), or contains linked tables that are slow to open. -faq_1066_h3=My Query is Slow -faq_1067_p=\ Slow SELECT (or DELETE, UPDATE, MERGE) statement can have multiple reasons. Follow this checklist\: -faq_1068_li=Run ANALYZE (see documentation for details). -faq_1069_li=Run the query with EXPLAIN and check if indexes are used (see documentation for details). -faq_1070_li=If required, create additional indexes and try again using ANALYZE and EXPLAIN. -faq_1071_li=If it doesn't help please report the problem. -faq_1072_h3=H2 is Very Slow -faq_1073_p=\ By default, H2 closes the database when the last connection is closed. If your application closes the only connection after each operation, the database is opened and closed a lot, which is quite slow. There are multiple ways to solve this problem, see Database Performance Tuning. -faq_1074_h3=Column Names are Incorrect? -faq_1075_p=\ For the query SELECT ID AS X FROM TEST the method ResultSetMetaData.getColumnName() returns ID, I expect it to return X. What's wrong? -faq_1076_p=\ This is not a bug. According the the JDBC specification, the method ResultSetMetaData.getColumnName() should return the name of the column and not the alias name. If you need the alias name, use ResultSetMetaData.getColumnLabel(). Some other database don't work like this yet (they don't follow the JDBC specification). If you need compatibility with those databases, use the Compatibility Mode, or append ;ALIAS_COLUMN_NAME\=TRUE to the database URL. -faq_1077_p=\ This also applies to DatabaseMetaData calls that return a result set. The columns in the JDBC API are column labels, not column names. -faq_1078_h3=Float is Double? -faq_1079_p=\ For a table defined as CREATE TABLE TEST(X FLOAT) the method ResultSet.getObject() returns a java.lang.Double, I expect it to return a java.lang.Float. What's wrong? -faq_1080_p=\ This is not a bug. According the the JDBC specification, the JDBC data type FLOAT is equivalent to DOUBLE, and both are mapped to java.lang.Double. See also Mapping SQL and Java Types - 8.3.10 FLOAT. -faq_1081_h3=Is the GCJ Version Stable? Faster? -faq_1082_p=\ The GCJ version is not as stable as the Java version. When running the regression test with the GCJ version, sometimes the application just stops at what seems to be a random point without error message. Currently, the GCJ version is also slower than when using the Sun VM. However, the startup of the GCJ version is faster than when using a VM. -faq_1083_h3=How to Translate this Project? -faq_1084_p=\ For more information, see Build/Translating. -faq_1085_h3=How to Contribute to this Project? -faq_1086_p=\ There are various way to help develop an open source project like H2. The first step could be to translate the error messages and the GUI to your native language. Then, you could provide patches. Please start with small patches. That could be adding a test case to improve the code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well. -features_1000_h1=Features -features_1001_a=\ Feature List -features_1002_a=\ Comparison to Other Database Engines -features_1003_a=\ H2 in Use -features_1004_a=\ Connection Modes -features_1005_a=\ Database URL Overview -features_1006_a=\ Connecting to an Embedded (Local) Database -features_1007_a=\ In-Memory Databases -features_1008_a=\ Database Files Encryption -features_1009_a=\ Database File Locking -features_1010_a=\ Opening a Database Only if it Already Exists -features_1011_a=\ Closing a Database -features_1012_a=\ Ignore Unknown Settings -features_1013_a=\ Changing Other Settings when Opening a Connection -features_1014_a=\ Custom File Access Mode -features_1015_a=\ Multiple Connections -features_1016_a=\ Database File Layout -features_1017_a=\ Logging and Recovery -features_1018_a=\ Compatibility -features_1019_a=\ Auto-Reconnect -features_1020_a=\ Automatic Mixed Mode -features_1021_a=\ Page Size -features_1022_a=\ Using the Trace Options -features_1023_a=\ Using Other Logging APIs -features_1024_a=\ Read Only Databases -features_1025_a=\ Read Only Databases in Zip or Jar File -features_1026_a=\ Computed Columns / Function Based Index -features_1027_a=\ Multi-Dimensional Indexes -features_1028_a=\ User-Defined Functions and Stored Procedures -features_1029_a=\ Pluggable or User-Defined Tables -features_1030_a=\ Triggers -features_1031_a=\ Compacting a Database -features_1032_a=\ Cache Settings -features_1033_h2=Feature List -features_1034_h3=Main Features -features_1035_li=Very fast database engine -features_1036_li=Open source -features_1037_li=Written in Java -features_1038_li=Supports standard SQL, JDBC API -features_1039_li=Embedded and Server mode, Clustering support -features_1040_li=Strong security features -features_1041_li=The PostgreSQL ODBC driver can be used -features_1042_li=Multi version concurrency -features_1043_h3=Additional Features -features_1044_li=Disk based or in-memory databases and tables, read-only database support, temporary tables -features_1045_li=Transaction support (read committed), 2-phase-commit -features_1046_li=Multiple connections, table level locking -features_1047_li=Cost based optimizer, using a genetic algorithm for complex queries, zero-administration -features_1048_li=Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set -features_1049_li=Encrypted database (AES), SHA-256 password encryption, encryption functions, SSL -features_1050_h3=SQL Support -features_1051_li=Support for multiple schemas, information schema -features_1052_li=Referential integrity / foreign key constraints with cascade, check constraints -features_1053_li=Inner and outer joins, subqueries, read only views and inline views -features_1054_li=Triggers and Java functions / stored procedures -features_1055_li=Many built-in functions, including XML and lossless data compression -features_1056_li=Wide range of data types including large objects (BLOB/CLOB) and arrays -features_1057_li=Sequence and autoincrement columns, computed columns (can be used for function based indexes) -features_1058_code=ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP -features_1059_li=Collation support, including support for the ICU4J library -features_1060_li=Support for users and roles -features_1061_li=Compatibility modes for IBM DB2, Apache Derby, HSQLDB, MS SQL Server, MySQL, Oracle, and PostgreSQL. -features_1062_h3=Security Features -features_1063_li=Includes a solution for the SQL injection problem -features_1064_li=User password authentication uses SHA-256 and salt -features_1065_li=For server mode connections, user passwords are never transmitted in plain text over the network (even when using insecure connections; this only applies to the TCP server and not to the H2 Console however; it also doesn't apply if you set the password in the database URL) -features_1066_li=All database files (including script files that can be used to backup data) can be encrypted using the AES-128 encryption algorithm -features_1067_li=The remote JDBC driver supports TCP/IP connections over TLS -features_1068_li=The built-in web server supports connections over TLS -features_1069_li=Passwords can be sent to the database using char arrays instead of Strings -features_1070_h3=Other Features and Tools -features_1071_li=Small footprint (smaller than 1.5 MB), low memory requirements -features_1072_li=Multiple index types (b-tree, tree, hash) -features_1073_li=Support for multi-dimensional indexes -features_1074_li=CSV (comma separated values) file support -features_1075_li=Support for linked tables, and a built-in virtual 'range' table -features_1076_li=Supports the EXPLAIN PLAN statement; sophisticated trace options -features_1077_li=Database closing can be delayed or disabled to improve the performance -features_1078_li=Web-based Console application (translated to many languages) with autocomplete -features_1079_li=The database can generate SQL script files -features_1080_li=Contains a recovery tool that can dump the contents of the database -features_1081_li=Support for variables (for example to calculate running totals) -features_1082_li=Automatic re-compilation of prepared statements -features_1083_li=Uses a small number of database files -features_1084_li=Uses a checksum for each record and log entry for data integrity -features_1085_li=Well tested (high code coverage, randomized stress tests) -features_1086_h2=Comparison to Other Database Engines -features_1087_p=\ This comparison is based on H2 1.3, Apache Derby version 10.8, HSQLDB 2.2, MySQL 5.5, PostgreSQL 9.0. -features_1088_th=Feature -features_1089_th=H2 -features_1090_th=Derby -features_1091_th=HSQLDB -features_1092_th=MySQL -features_1093_th=PostgreSQL -features_1094_td=Pure Java -features_1095_td=Yes -features_1096_td=Yes -features_1097_td=Yes -features_1098_td=No -features_1099_td=No -features_1100_td=Embedded Mode (Java) -features_1101_td=Yes -features_1102_td=Yes -features_1103_td=Yes -features_1104_td=No -features_1105_td=No -features_1106_td=In-Memory Mode -features_1107_td=Yes -features_1108_td=Yes -features_1109_td=Yes -features_1110_td=No -features_1111_td=No -features_1112_td=Explain Plan -features_1113_td=Yes -features_1114_td=Yes *12 -features_1115_td=Yes -features_1116_td=Yes -features_1117_td=Yes -features_1118_td=Built-in Clustering / Replication -features_1119_td=Yes -features_1120_td=Yes -features_1121_td=No -features_1122_td=Yes -features_1123_td=Yes -features_1124_td=Encrypted Database -features_1125_td=Yes -features_1126_td=Yes *10 -features_1127_td=Yes *10 -features_1128_td=No -features_1129_td=No -features_1130_td=Linked Tables -features_1131_td=Yes -features_1132_td=No -features_1133_td=Partially *1 -features_1134_td=Partially *2 -features_1135_td=No -features_1136_td=ODBC Driver -features_1137_td=Yes -features_1138_td=No -features_1139_td=No -features_1140_td=Yes -features_1141_td=Yes -features_1142_td=Fulltext Search -features_1143_td=Yes -features_1144_td=Yes -features_1145_td=No -features_1146_td=Yes -features_1147_td=Yes -features_1148_td=Domains (User-Defined Types) -features_1149_td=Yes -features_1150_td=No -features_1151_td=Yes -features_1152_td=Yes -features_1153_td=Yes -features_1154_td=Files per Database -features_1155_td=Few -features_1156_td=Many -features_1157_td=Few -features_1158_td=Many -features_1159_td=Many -features_1160_td=Row Level Locking -features_1161_td=Yes *9 -features_1162_td=Yes -features_1163_td=Yes *9 -features_1164_td=Yes -features_1165_td=Yes -features_1166_td=Multi Version Concurrency -features_1167_td=Yes -features_1168_td=No -features_1169_td=Yes -features_1170_td=Yes -features_1171_td=Yes -features_1172_td=Multi-Threaded Statement Processing -features_1173_td=No *11 -features_1174_td=Yes -features_1175_td=Yes -features_1176_td=Yes -features_1177_td=Yes -features_1178_td=Role Based Security -features_1179_td=Yes -features_1180_td=Yes *3 -features_1181_td=Yes -features_1182_td=Yes -features_1183_td=Yes -features_1184_td=Updatable Result Sets -features_1185_td=Yes -features_1186_td=Yes *7 -features_1187_td=Yes -features_1188_td=Yes -features_1189_td=Yes -features_1190_td=Sequences -features_1191_td=Yes -features_1192_td=Yes -features_1193_td=Yes -features_1194_td=No -features_1195_td=Yes -features_1196_td=Limit and Offset -features_1197_td=Yes -features_1198_td=Yes *13 -features_1199_td=Yes -features_1200_td=Yes -features_1201_td=Yes -features_1202_td=Window Functions -features_1203_td=No *15 -features_1204_td=No *15 -features_1205_td=No -features_1206_td=No -features_1207_td=Yes -features_1208_td=Temporary Tables -features_1209_td=Yes -features_1210_td=Yes *4 -features_1211_td=Yes -features_1212_td=Yes -features_1213_td=Yes -features_1214_td=Information Schema -features_1215_td=Yes -features_1216_td=No *8 -features_1217_td=Yes -features_1218_td=Yes -features_1219_td=Yes -features_1220_td=Computed Columns -features_1221_td=Yes -features_1222_td=Yes -features_1223_td=Yes -features_1224_td=No -features_1225_td=Yes *6 -features_1226_td=Case Insensitive Columns -features_1227_td=Yes -features_1228_td=Yes *14 -features_1229_td=Yes -features_1230_td=Yes -features_1231_td=Yes *6 -features_1232_td=Custom Aggregate Functions -features_1233_td=Yes -features_1234_td=No -features_1235_td=Yes -features_1236_td=Yes -features_1237_td=Yes -features_1238_td=CLOB/BLOB Compression -features_1239_td=Yes -features_1240_td=No -features_1241_td=No -features_1242_td=No -features_1243_td=Yes -features_1244_td=Footprint (jar/dll size) -features_1245_td=~1.5 MB *5 -features_1246_td=~3 MB -features_1247_td=~1.5 MB -features_1248_td=~4 MB -features_1249_td=~6 MB -features_1250_p=\ *1 HSQLDB supports text tables. -features_1251_p=\ *2 MySQL supports linked MySQL tables under the name 'federated tables'. -features_1252_p=\ *3 Derby support for roles based security and password checking as an option. -features_1253_p=\ *4 Derby only supports global temporary tables. -features_1254_p=\ *5 The default H2 jar file contains debug information, jar files for other databases do not. -features_1255_p=\ *6 PostgreSQL supports functional indexes. -features_1256_p=\ *7 Derby only supports updatable result sets if the query is not sorted. -features_1257_p=\ *8 Derby doesn't support standard compliant information schema tables. -features_1258_p=\ *9 When using MVCC (multi version concurrency). -features_1259_p=\ *10 Derby and HSQLDB don't hide data patterns well. -features_1260_p=\ *11 The MULTI_THREADED option is not enabled by default, and not yet supported when using MVCC. -features_1261_p=\ *12 Derby doesn't support the EXPLAIN statement, but it supports runtime statistics and retrieving statement execution plans. -features_1262_p=\ *13 Derby doesn't support the syntax LIMIT .. [OFFSET ..], however it supports FETCH FIRST .. ROW[S] ONLY. -features_1263_p=\ *14 Using collations. *15 Derby and H2 support ROW_NUMBER() OVER(). -features_1264_h3=DaffodilDb and One$Db -features_1265_p=\ It looks like the development of this database has stopped. The last release was February 2006. -features_1266_h3=McKoi -features_1267_p=\ It looks like the development of this database has stopped. The last release was August 2004. -features_1268_h2=H2 in Use -features_1269_p=\ For a list of applications that work with or use H2, see\: Links. -features_1270_h2=Connection Modes -features_1271_p=\ The following connection modes are supported\: -features_1272_li=Embedded mode (local connections using JDBC) -features_1273_li=Server mode (remote connections using JDBC or ODBC over TCP/IP) -features_1274_li=Mixed mode (local and remote connections at the same time) -features_1275_h3=Embedded Mode -features_1276_p=\ In embedded mode, an application opens a database from within the same JVM using JDBC. This is the fastest and easiest connection mode. The disadvantage is that a database may only be open in one virtual machine (and class loader) at any time. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently, or on the number of open connections. -features_1277_h3=Server Mode -features_1278_p=\ When using the server mode (sometimes called remote mode or client/server mode), an application opens a database remotely using the JDBC or ODBC API. A server needs to be started within the same or another virtual machine, or on another computer. Many applications can connect to the same database at the same time, by connecting to this server. Internally, the server process opens the database(s) in embedded mode. -features_1279_p=\ The server mode is slower than the embedded mode, because all data is transferred over TCP/IP. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently per server, or on the number of open connections. -features_1280_h3=Mixed Mode -features_1281_p=\ The mixed mode is a combination of the embedded and the server mode. The first application that connects to a database does that in embedded mode, but also starts a server so that other applications (running in different processes or virtual machines) can concurrently access the same data. The local connections are as fast as if the database is used in just the embedded mode, while the remote connections are a bit slower. -features_1282_p=\ The server can be started and stopped from within the application (using the server API), or automatically (automatic mixed mode). When using the automatic mixed mode, all clients that want to connect to the database (no matter if it's an local or remote connection) can do so using the exact same database URL. -features_1283_h2=Database URL Overview -features_1284_p=\ This database supports multiple connection modes and connection settings. This is achieved using different database URLs. Settings in the URLs are not case sensitive. -features_1285_th=Topic -features_1286_th=URL Format and Examples -features_1287_a=Embedded (local) connection -features_1288_td=\ jdbc\:h2\:[file\:][<path>]<databaseName> -features_1289_td=\ jdbc\:h2\:~/test -features_1290_td=\ jdbc\:h2\:file\:/data/sample -features_1291_td=\ jdbc\:h2\:file\:C\:/data/sample (Windows only) -features_1292_a=In-memory (private) -features_1293_td=jdbc\:h2\:mem\: -features_1294_a=In-memory (named) -features_1295_td=\ jdbc\:h2\:mem\:<databaseName> -features_1296_td=\ jdbc\:h2\:mem\:test_mem -features_1297_a=Server mode (remote connections) -features_1298_a=\ using TCP/IP -features_1299_td=\ jdbc\:h2\:tcp\://<server>[\:<port>]/[<path>]<databaseName> -features_1300_td=\ jdbc\:h2\:tcp\://localhost/~/test -features_1301_td=\ jdbc\:h2\:tcp\://dbserv\:8084/~/sample -features_1302_td=\ jdbc\:h2\:tcp\://localhost/mem\:test -features_1303_a=Server mode (remote connections) -features_1304_a=\ using TLS -features_1305_td=\ jdbc\:h2\:ssl\://<server>[\:<port>]/<databaseName> -features_1306_td=\ jdbc\:h2\:ssl\://localhost\:8085/~/sample; -features_1307_a=Using encrypted files -features_1308_td=\ jdbc\:h2\:<url>;CIPHER\=AES -features_1309_td=\ jdbc\:h2\:ssl\://localhost/~/test;CIPHER\=AES -features_1310_td=\ jdbc\:h2\:file\:~/secure;CIPHER\=AES -features_1311_a=File locking methods -features_1312_td=\ jdbc\:h2\:<url>;FILE_LOCK\={FILE|SOCKET|NO} -features_1313_td=\ jdbc\:h2\:file\:~/private;CIPHER\=AES;FILE_LOCK\=SOCKET -features_1314_a=Only open if it already exists -features_1315_td=\ jdbc\:h2\:<url>;IFEXISTS\=TRUE -features_1316_td=\ jdbc\:h2\:file\:~/sample;IFEXISTS\=TRUE -features_1317_a=Don't close the database when the VM exits -features_1318_td=\ jdbc\:h2\:<url>;DB_CLOSE_ON_EXIT\=FALSE -features_1319_a=Execute SQL on connection -features_1320_td=\ jdbc\:h2\:<url>;INIT\=RUNSCRIPT FROM '~/create.sql' -features_1321_td=\ jdbc\:h2\:file\:~/sample;INIT\=RUNSCRIPT FROM '~/create.sql'\\;RUNSCRIPT FROM '~/populate.sql' -features_1322_a=User name and/or password -features_1323_td=\ jdbc\:h2\:<url>[;USER\=<username>][;PASSWORD\=<value>] -features_1324_td=\ jdbc\:h2\:file\:~/sample;USER\=sa;PASSWORD\=123 -features_1325_a=Debug trace settings -features_1326_td=\ jdbc\:h2\:<url>;TRACE_LEVEL_FILE\=<level 0..3> -features_1327_td=\ jdbc\:h2\:file\:~/sample;TRACE_LEVEL_FILE\=3 -features_1328_a=Ignore unknown settings -features_1329_td=\ jdbc\:h2\:<url>;IGNORE_UNKNOWN_SETTINGS\=TRUE -features_1330_a=Custom file access mode -features_1331_td=\ jdbc\:h2\:<url>;ACCESS_MODE_DATA\=rws -features_1332_a=Database in a zip file -features_1333_td=\ jdbc\:h2\:zip\:<zipFileName>\!/<databaseName> -features_1334_td=\ jdbc\:h2\:zip\:~/db.zip\!/test -features_1335_a=Compatibility mode -features_1336_td=\ jdbc\:h2\:<url>;MODE\=<databaseType> -features_1337_td=\ jdbc\:h2\:~/test;MODE\=MYSQL -features_1338_a=Auto-reconnect -features_1339_td=\ jdbc\:h2\:<url>;AUTO_RECONNECT\=TRUE -features_1340_td=\ jdbc\:h2\:tcp\://localhost/~/test;AUTO_RECONNECT\=TRUE -features_1341_a=Automatic mixed mode -features_1342_td=\ jdbc\:h2\:<url>;AUTO_SERVER\=TRUE -features_1343_td=\ jdbc\:h2\:~/test;AUTO_SERVER\=TRUE -features_1344_a=Page size -features_1345_td=\ jdbc\:h2\:<url>;PAGE_SIZE\=512 -features_1346_a=Changing other settings -features_1347_td=\ jdbc\:h2\:<url>;<setting>\=<value>[;<setting>\=<value>...] -features_1348_td=\ jdbc\:h2\:file\:~/sample;TRACE_LEVEL_SYSTEM_OUT\=3 -features_1349_h2=Connecting to an Embedded (Local) Database -features_1350_p=\ The database URL for connecting to a local database is jdbc\:h2\:[file\:][<path>]<databaseName>. The prefix file\: is optional. If no or only a relative path is used, then the current working directory is used as a starting point. The case sensitivity of the path and database name depend on the operating system, however it is recommended to use lowercase letters only. The database name must be at least three characters long (a limitation of File.createTempFile). The database name must not contain a semicolon. To point to the user home directory, use ~/, as in\: jdbc\:h2\:~/test. -features_1351_h2=In-Memory Databases -features_1352_p=\ For certain use cases (for example\: rapid prototyping, testing, high performance operations, read-only databases), it may not be required to persist data, or persist changes to the data. This database supports the in-memory mode, where the data is not persisted. -features_1353_p=\ In some cases, only one connection to a in-memory database is required. This means the database to be opened is private. In this case, the database URL is jdbc\:h2\:mem\: Opening two connections within the same virtual machine means opening two different (private) databases. -features_1354_p=\ Sometimes multiple connections to the same in-memory database are required. In this case, the database URL must include a name. Example\: jdbc\:h2\:mem\:db1. Accessing the same database using this URL only works within the same virtual machine and class loader environment. -features_1355_p=\ To access an in-memory database from another process or from another computer, you need to start a TCP server in the same process as the in-memory database was created. The other processes then need to access the database over TCP/IP or TLS, using a database URL such as\: jdbc\:h2\:tcp\://localhost/mem\:db1. -features_1356_p=\ By default, closing the last connection to a database closes the database. For an in-memory database, this means the content is lost. To keep the database open, add ;DB_CLOSE_DELAY\=-1 to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc\:h2\:mem\:test;DB_CLOSE_DELAY\=-1. -features_1357_h2=Database Files Encryption -features_1358_p=\ The database files can be encrypted. Two encryption algorithm AES is supported. To use file encryption, you need to specify the encryption algorithm (the 'cipher') and the file password (in addition to the user password) when connecting to the database. -features_1359_h3=Creating a New Database with File Encryption -features_1360_p=\ By default, a new database is automatically created if it does not exist yet. To create an encrypted database, connect to it as it would already exist. -features_1361_h3=Connecting to an Encrypted Database -features_1362_p=\ The encryption algorithm is set in the database URL, and the file password is specified in the password field, before the user password. A single space separates the file password and the user password; the file password itself may not contain spaces. File passwords and user passwords are case sensitive. Here is an example to connect to a password-encrypted database\: -features_1363_h3=Encrypting or Decrypting a Database -features_1364_p=\ To encrypt an existing database, use the ChangeFileEncryption tool. This tool can also decrypt an encrypted database, or change the file encryption key. The tool is available from within the H2 Console in the tools section, or you can run it from the command line. The following command line will encrypt the database test in the user home directory with the file password filepwd and the encryption algorithm AES\: -features_1365_h2=Database File Locking -features_1366_p=\ Whenever a database is opened, a lock file is created to signal other processes that the database is in use. If database is closed, or if the process that opened the database terminates, this lock file is deleted. -features_1367_p=\ The following file locking methods are implemented\: -features_1368_li=The default method is FILE and uses a watchdog thread to protect the database file. The watchdog reads the lock file each second. -features_1369_li=The second method is SOCKET and opens a server socket. The socket method does not require reading the lock file every second. The socket method should only be used if the database files are only accessed by one (and always the same) computer. -features_1370_li=The third method is FS. This will use native file locking using FileChannel.lock. -features_1371_li=It is also possible to open the database without file locking; in this case it is up to the application to protect the database files. Failing to do so will result in a corrupted database. Using the method NO forces the database to not create a lock file at all. Please note that this is unsafe as another process is able to open the same database, possibly leading to data corruption. -features_1372_p=\ To open the database with a different file locking method, use the parameter FILE_LOCK. The following code opens the database with the 'socket' locking method\: -features_1373_p=\ For more information about the algorithms, see Advanced / File Locking Protocols. -features_1374_h2=Opening a Database Only if it Already Exists -features_1375_p=\ By default, when an application calls DriverManager.getConnection(url, ...) and the database specified in the URL does not yet exist, a new (empty) database is created. In some situations, it is better to restrict creating new databases, and only allow to open existing databases. To do this, add ;IFEXISTS\=TRUE to the database URL. In this case, if the database does not already exist, an exception is thrown when trying to connect. The connection only succeeds when the database already exists. The complete URL may look like this\: -features_1376_h2=Closing a Database -features_1377_h3=Delayed Database Closing -features_1378_p=\ Usually, a database is closed when the last connection to it is closed. In some situations this slows down the application, for example when it is not possible to keep at least one connection open. The automatic closing of a database can be delayed or disabled with the SQL statement SET DB_CLOSE_DELAY <seconds>. The parameter <seconds> specifies the number of seconds to keep a database open after the last connection to it was closed. The following statement will keep a database open for 10 seconds after the last connection was closed\: -features_1379_p=\ The value -1 means the database is not closed automatically. The value 0 is the default and means the database is closed when the last connection is closed. This setting is persistent and can be set by an administrator only. It is possible to set the value in the database URL\: jdbc\:h2\:~/test;DB_CLOSE_DELAY\=10. -features_1380_h3=Don't Close a Database when the VM Exits -features_1381_p=\ By default, a database is closed when the last connection is closed. However, if it is never closed, the database is closed when the virtual machine exits normally, using a shutdown hook. In some situations, the database should not be closed in this case, for example because the database is still used at virtual machine shutdown (to store the shutdown process in the database for example). For those cases, the automatic closing of the database can be disabled in the database URL. The first connection (the one that is opening the database) needs to set the option in the database URL (it is not possible to change the setting afterwards). The database URL to disable database closing on exit is\: -features_1382_h2=Execute SQL on Connection -features_1383_p=\ Sometimes, particularly for in-memory databases, it is useful to be able to execute DDL or DML commands automatically when a client connects to a database. This functionality is enabled via the INIT property. Note that multiple commands may be passed to INIT, but the semicolon delimiter must be escaped, as in the example below. -features_1384_p=\ Please note the double backslash is only required in a Java or properties file. In a GUI, or in an XML file, only one backslash is required\: -features_1385_p=\ Backslashes within the init script (for example within a runscript statement, to specify the folder names in Windows) need to be escaped as well (using a second backslash). It might be simpler to avoid backslashes in folder names for this reason; use forward slashes instead. -features_1386_h2=Ignore Unknown Settings -features_1387_p=\ Some applications (for example OpenOffice.org Base) pass some additional parameters when connecting to the database. Why those parameters are passed is unknown. The parameters PREFERDOSLIKELINEENDS and IGNOREDRIVERPRIVILEGES are such examples; they are simply ignored to improve the compatibility with OpenOffice.org. If an application passes other parameters when connecting to the database, usually the database throws an exception saying the parameter is not supported. It is possible to ignored such parameters by adding ;IGNORE_UNKNOWN_SETTINGS\=TRUE to the database URL. -features_1388_h2=Changing Other Settings when Opening a Connection -features_1389_p=\ In addition to the settings already described, other database settings can be passed in the database URL. Adding ;setting\=value at the end of a database URL is the same as executing the statement SET setting value just after connecting. For a list of supported settings, see SQL Grammar or the DbSettings javadoc. -features_1390_h2=Custom File Access Mode -features_1391_p=\ Usually, the database opens the database file with the access mode rw, meaning read-write (except for read only databases, where the mode r is used). To open a database in read-only mode if the database file is not read-only, use ACCESS_MODE_DATA\=r. Also supported are rws and rwd. This setting must be specified in the database URL\: -features_1392_p=\ For more information see Durability Problems. On many operating systems the access mode rws does not guarantee that the data is written to the disk. -features_1393_h2=Multiple Connections -features_1394_h3=Opening Multiple Databases at the Same Time -features_1395_p=\ An application can open multiple databases at the same time, including multiple connections to the same database. The number of open database is only limited by the memory available. -features_1396_h3=Multiple Connections to the Same Database\: Client/Server -features_1397_p=\ If you want to access the same database at the same time from different processes or computers, you need to use the client / server mode. In this case, one process acts as the server, and the other processes (that could reside on other computers as well) connect to the server via TCP/IP (or TLS over TCP/IP for improved security). -features_1398_h3=Multithreading Support -features_1399_p=\ This database is multithreading-safe. That means, if an application is multi-threaded, it does not need to worry about synchronizing access to the database. Internally, most requests to the same database are synchronized. That means an application can use multiple threads that access the same database at the same time, however if one thread executes a long running query, the other threads need to wait. -features_1400_p=\ An application should normally use one connection per thread. This database synchronizes access to the same connection, but other databases may not do this. -features_1401_h3=Locking, Lock-Timeout, Deadlocks -features_1402_p=\ Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. If multi-version concurrency is not used, the database uses table level locks to give each connection a consistent state of the data. There are two kinds of locks\: read locks (shared locks) and write locks (exclusive locks). All locks are released when the transaction commits or rolls back. When using the default transaction isolation level 'read committed', read locks are already released after each statement. -features_1403_p=\ If a connection wants to reads from a table, and there is no write lock on the table, then a read lock is added to the table. If there is a write lock, then this connection waits for the other connection to release the lock. If a connection cannot get a lock for a specified time, then a lock timeout exception is thrown. -features_1404_p=\ Usually, SELECT statements will generate read locks. This includes subqueries. Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, using the statement SELECT ... FOR UPDATE. The statements COMMIT and ROLLBACK releases all open locks. The commands SAVEPOINT and ROLLBACK TO SAVEPOINT don't affect locks. The locks are also released when the autocommit mode changes, and for connections with autocommit set to true (this is the default), locks are released after each statement. The following statements generate locks\: -features_1405_th=Type of Lock -features_1406_th=SQL Statement -features_1407_td=Read -features_1408_td=SELECT * FROM TEST; -features_1409_td=\ CALL SELECT MAX(ID) FROM TEST; -features_1410_td=\ SCRIPT; -features_1411_td=Write -features_1412_td=SELECT * FROM TEST WHERE 1\=0 FOR UPDATE; -features_1413_td=Write -features_1414_td=INSERT INTO TEST VALUES(1, 'Hello'); -features_1415_td=\ INSERT INTO TEST SELECT * FROM TEST; -features_1416_td=\ UPDATE TEST SET NAME\='Hi'; -features_1417_td=\ DELETE FROM TEST; -features_1418_td=Write -features_1419_td=ALTER TABLE TEST ...; -features_1420_td=\ CREATE INDEX ... ON TEST ...; -features_1421_td=\ DROP INDEX ...; -features_1422_p=\ The number of seconds until a lock timeout exception is thrown can be set separately for each connection using the SQL command SET LOCK_TIMEOUT <milliseconds>. The initial lock timeout (that is the timeout used for new connections) can be set using the SQL command SET DEFAULT_LOCK_TIMEOUT <milliseconds>. The default lock timeout is persistent. -features_1423_h3=Avoiding Deadlocks -features_1424_p=\ To avoid deadlocks, ensure that all transactions lock the tables in the same order (for example in alphabetical order), and avoid upgrading read locks to write locks. Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE. -features_1425_h2=Database File Layout -features_1426_p=\ The following files are created for persistent databases\: -features_1427_th=File Name -features_1428_th=Description -features_1429_th=Number of Files -features_1430_td=\ test.h2.db -features_1431_td=\ Database file. -features_1432_td=\ Contains the transaction log, indexes, and data for all tables. -features_1433_td=\ Format\: <database>.h2.db -features_1434_td=\ 1 per database -features_1435_td=\ test.lock.db -features_1436_td=\ Database lock file. -features_1437_td=\ Automatically (re-)created while the database is in use. -features_1438_td=\ Format\: <database>.lock.db -features_1439_td=\ 1 per database (only if in use) -features_1440_td=\ test.trace.db -features_1441_td=\ Trace file (if the trace option is enabled). -features_1442_td=\ Contains trace information. -features_1443_td=\ Format\: <database>.trace.db -features_1444_td=\ Renamed to <database>.trace.db.old is too big. -features_1445_td=\ 0 or 1 per database -features_1446_td=\ test.lobs.db/* -features_1447_td=\ Directory containing one file for each -features_1448_td=\ BLOB or CLOB value larger than a certain size. -features_1449_td=\ Format\: <id>.t<tableId>.lob.db -features_1450_td=\ 1 per large object -features_1451_td=\ test.123.temp.db -features_1452_td=\ Temporary file. -features_1453_td=\ Contains a temporary blob or a large result set. -features_1454_td=\ Format\: <database>.<id>.temp.db -features_1455_td=\ 1 per object -features_1456_h3=Moving and Renaming Database Files -features_1457_p=\ Database name and location are not stored inside the database files. -features_1458_p=\ While a database is closed, the files can be moved to another directory, and they can be renamed as well (as long as all files of the same database start with the same name and the respective extensions are unchanged). -features_1459_p=\ As there is no platform specific data in the files, they can be moved to other operating systems without problems. -features_1460_h3=Backup -features_1461_p=\ When the database is closed, it is possible to backup the database files. -features_1462_p=\ To backup data while the database is running, the SQL commands SCRIPT and BACKUP can be used. -features_1463_h2=Logging and Recovery -features_1464_p=\ Whenever data is modified in the database and those changes are committed, the changes are written to the transaction log (except for in-memory objects). The changes to the main data area itself are usually written later on, to optimize disk access. If there is a power failure, the main data area is not up-to-date, but because the changes are in the transaction log, the next time the database is opened, the changes are re-applied automatically. -features_1465_h2=Compatibility -features_1466_p=\ All database engines behave a little bit different. Where possible, H2 supports the ANSI SQL standard, and tries to be compatible to other databases. There are still a few differences however\: -features_1467_p=\ In MySQL text columns are case insensitive by default, while in H2 they are case sensitive. However H2 supports case insensitive columns as well. To create the tables with case insensitive texts, append IGNORECASE\=TRUE to the database URL (example\: jdbc\:h2\:~/test;IGNORECASE\=TRUE). -features_1468_h3=Compatibility Modes -features_1469_p=\ For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode\: -features_1470_h3=DB2 Compatibility Mode -features_1471_p=\ To use the IBM DB2 mode, use the database URL jdbc\:h2\:~/test;MODE\=DB2 or the SQL statement SET MODE DB2. -features_1472_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1473_li=Support for the syntax [OFFSET .. ROW] [FETCH ... ONLY] as an alternative for LIMIT .. OFFSET. -features_1474_li=Concatenating NULL with another value results in the other value. -features_1475_li=Support the pseudo-table SYSIBM.SYSDUMMY1. -features_1476_h3=Derby Compatibility Mode -features_1477_p=\ To use the Apache Derby mode, use the database URL jdbc\:h2\:~/test;MODE\=Derby or the SQL statement SET MODE Derby. -features_1478_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1479_li=For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. -features_1480_li=Concatenating NULL with another value results in the other value. -features_1481_li=Support the pseudo-table SYSIBM.SYSDUMMY1. -features_1482_h3=HSQLDB Compatibility Mode -features_1483_p=\ To use the HSQLDB mode, use the database URL jdbc\:h2\:~/test;MODE\=HSQLDB or the SQL statement SET MODE HSQLDB. -features_1484_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1485_li=When converting the scale of decimal data, the number is only converted if the new scale is smaller than the current scale. Usually, the scale is converted and 0s are added if required. -features_1486_li=For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. -features_1487_li=Text can be concatenated using '+'. -features_1488_h3=MS SQL Server Compatibility Mode -features_1489_p=\ To use the MS SQL Server mode, use the database URL jdbc\:h2\:~/test;MODE\=MSSQLServer or the SQL statement SET MODE MSSQLServer. -features_1490_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1491_li=Identifiers may be quoted using square brackets as in [Test]. -features_1492_li=For unique indexes, NULL is distinct. That means only one row with NULL in one of the columns is allowed. -features_1493_li=Concatenating NULL with another value results in the other value. -features_1494_li=Text can be concatenated using '+'. -features_1495_h3=MySQL Compatibility Mode -features_1496_p=\ To use the MySQL mode, use the database URL jdbc\:h2\:~/test;MODE\=MySQL or the SQL statement SET MODE MySQL. -features_1497_li=When inserting data, if a column is defined to be NOT NULL and NULL is inserted, then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. Usually, this operation is not allowed and an exception is thrown. -features_1498_li=Creating indexes in the CREATE TABLE statement is allowed using INDEX(..) or KEY(..). Example\: create table test(id int primary key, name varchar(255), key idx_name(name)); -features_1499_li=Meta data calls return identifiers in lower case. -features_1500_li=When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded. -features_1501_li=Concatenating NULL with another value results in the other value. -features_1502_p=\ Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). H2 does support case insensitive text comparison, but it needs to be set separately, using SET IGNORECASE TRUE. This affects comparison using \=, LIKE, REGEXP. -features_1503_h3=Oracle Compatibility Mode -features_1504_p=\ To use the Oracle mode, use the database URL jdbc\:h2\:~/test;MODE\=Oracle or the SQL statement SET MODE Oracle. -features_1505_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1506_li=When using unique indexes, multiple rows with NULL in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise. -features_1507_li=Concatenating NULL with another value results in the other value. -features_1508_li=Empty strings are treated like NULL values. -features_1509_h3=PostgreSQL Compatibility Mode -features_1510_p=\ To use the PostgreSQL mode, use the database URL jdbc\:h2\:~/test;MODE\=PostgreSQL or the SQL statement SET MODE PostgreSQL. -features_1511_li=For aliased columns, ResultSetMetaData.getColumnName() returns the alias name and getTableName() returns null. -features_1512_li=When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded. -features_1513_li=The system columns CTID and OID are supported. -features_1514_li=LOG(x) is base 10 in this mode. -features_1515_h2=Auto-Reconnect -features_1516_p=\ The auto-reconnect feature causes the JDBC driver to reconnect to the database if the connection is lost. The automatic re-connect only occurs when auto-commit is enabled; if auto-commit is disabled, an exception is thrown. To enable this mode, append ;AUTO_RECONNECT\=TRUE to the database URL. -features_1517_p=\ Re-connecting will open a new session. After an automatic re-connect, variables and local temporary tables definitions (excluding data) are re-created. The contents of the system table INFORMATION_SCHEMA.SESSION_STATE contains all client side state that is re-created. -features_1518_p=\ If another connection uses the database in exclusive mode (enabled using SET EXCLUSIVE 1 or SET EXCLUSIVE 2), then this connection will try to re-connect until the exclusive mode ends. -features_1519_h2=Automatic Mixed Mode -features_1520_p=\ Multiple processes can access the same database without having to start the server manually. To do that, append ;AUTO_SERVER\=TRUE to the database URL. You can use the same database URL independent of whether the database is already open or not. This feature doesn't work with in-memory databases. Example database URL\: -features_1521_p=\ Use the same URL for all connections to this database. Internally, when using this mode, the first connection to the database is made in embedded mode, and additionally a server is started internally (as a daemon thread). If the database is already open in another process, the server mode is used automatically. The IP address and port of the server are stored in the file .lock.db, that's why in-memory databases can't be supported. -features_1522_p=\ The application that opens the first connection to the database uses the embedded mode, which is faster than the server mode. Therefore the main application should open the database first if possible. The first connection automatically starts a server on a random port. This server allows remote connections, however only to this database (to ensure that, the client reads .lock.db file and sends the the random key that is stored there to the server). When the first connection is closed, the server stops. If other (remote) connections are still open, one of them will then start a server (auto-reconnect is enabled automatically). -features_1523_p=\ All processes need to have access to the database files. If the first connection is closed (the connection that started the server), open transactions of other connections will be rolled back (this may not be a problem if you don't disable autocommit). Explicit client/server connections (using jdbc\:h2\:tcp\:// or ssl\://) are not supported. This mode is not supported for in-memory databases. -features_1524_p=\ Here is an example how to use this mode. Application 1 and 2 are not necessarily started on the same computer, but they need to have access to the database files. Application 1 and 2 are typically two different processes (however they could run within the same process). -features_1525_p=\ When using this feature, by default the server uses any free TCP port. The port can be set manually using AUTO_SERVER_PORT\=9090. -features_1526_h2=Page Size -features_1527_p=\ The page size for new databases is 2 KB (2048), unless the page size is set explicitly in the database URL using PAGE_SIZE\= when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created. -features_1528_h2=Using the Trace Options -features_1529_p=\ To find problems in an application, it is sometimes good to see what database operations where executed. This database offers the following trace features\: -features_1530_li=Trace to System.out and/or to a file -features_1531_li=Support for trace levels OFF, ERROR, INFO, DEBUG -features_1532_li=The maximum size of the trace file can be set -features_1533_li=It is possible to generate Java source code from the trace file -features_1534_li=Trace can be enabled at runtime by manually creating a file -features_1535_h3=Trace Options -features_1536_p=\ The simplest way to enable the trace option is setting it in the database URL. There are two settings, one for System.out (TRACE_LEVEL_SYSTEM_OUT) tracing, and one for file tracing (TRACE_LEVEL_FILE). The trace levels are 0 for OFF, 1 for ERROR (the default), 2 for INFO, and 3 for DEBUG. A database URL with both levels set to DEBUG is\: -features_1537_p=\ The trace level can be changed at runtime by executing the SQL command SET TRACE_LEVEL_SYSTEM_OUT level (for System.out tracing) or SET TRACE_LEVEL_FILE level (for file tracing). Example\: -features_1538_h3=Setting the Maximum Size of the Trace File -features_1539_p=\ When using a high trace level, the trace file can get very big quickly. The default size limit is 16 MB, if the trace file exceeds this limit, it is renamed to .old and a new file is created. If another such file exists, it is deleted. To limit the size to a certain number of megabytes, use SET TRACE_MAX_FILE_SIZE mb. Example\: -features_1540_h3=Java Code Generation -features_1541_p=\ When setting the trace level to INFO or DEBUG, Java source code is generated as well. This simplifies reproducing problems. The trace file looks like this\: -features_1542_p=\ To filter the Java source code, use the ConvertTraceFile tool as follows\: -features_1543_p=\ The generated file Test.java will contain the Java source code. The generated source code may be too large to compile (the size of a Java method is limited). If this is the case, the source code needs to be split in multiple methods. The password is not listed in the trace file and therefore not included in the source code. -features_1544_h2=Using Other Logging APIs -features_1545_p=\ By default, this database uses its own native 'trace' facility. This facility is called 'trace' and not 'log' within this database to avoid confusion with the transaction log. Trace messages can be written to both file and System.out. In most cases, this is sufficient, however sometimes it is better to use the same facility as the application, for example Log4j. To do that, this database support SLF4J. -features_1546_a=SLF4J -features_1547_p=\ is a simple facade for various logging APIs and allows to plug in the desired implementation at deployment time. SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), Java logging, x4juli, and Simple Log. -features_1548_p=\ To enable SLF4J, set the file trace level to 4 in the database URL\: -features_1549_p=\ Changing the log mechanism is not possible after the database is open, that means executing the SQL statement SET TRACE_LEVEL_FILE 4 when the database is already open will not have the desired effect. To use SLF4J, all required jar files need to be in the classpath. The logger name is h2database. If it does not work, check the file <database>.trace.db for error messages. -features_1550_h2=Read Only Databases -features_1551_p=\ If the database files are read-only, then the database is read-only as well. It is not possible to create new tables, add or modify data in this database. Only SELECT and CALL statements are allowed. To create a read-only database, close the database. Then, make the database file read-only. When you open the database now, it is read-only. There are two ways an application can find out whether database is read-only\: by calling Connection.isReadOnly() or by executing the SQL statement CALL READONLY(). -features_1552_p=\ Using the Custom Access Mode r the database can also be opened in read-only mode, even if the database file is not read only. -features_1553_h2=Read Only Databases in Zip or Jar File -features_1554_p=\ To create a read-only database in a zip file, first create a regular persistent database, and then create a backup. The database must not have pending changes, that means you need to close all connections to the database first. To speed up opening the read-only database and running queries, the database should be closed using SHUTDOWN DEFRAG. If you are using a database named test, an easy way to create a zip file is using the Backup tool. You can start the tool from the command line, or from within the H2 Console (Tools - Backup). Please note that the database must be closed when the backup is created. Therefore, the SQL statement BACKUP TO can not be used. -features_1555_p=\ When the zip file is created, you can open the database in the zip file using the following database URL\: -features_1556_p=\ Databases in zip files are read-only. The performance for some queries will be slower than when using a regular database, because random access in zip files is not supported (only streaming). How much this affects the performance depends on the queries and the data. The database is not read in memory; therefore large databases are supported as well. The same indexes are used as when using a regular database. -features_1557_p=\ If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files, because random access in compressed files is not possible. See also the sample application ReadOnlyDatabaseInZip. -features_1558_h3=Opening a Corrupted Database -features_1559_p=\ If a database cannot be opened because the boot info (the SQL script that is run at startup) is corrupted, then the database can be opened by specifying a database event listener. The exceptions are logged, but opening the database will continue. -features_1560_h2=Computed Columns / Function Based Index -features_1561_p=\ A computed column is a column whose value is calculated before storing. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time\: -features_1562_p=\ Function indexes are not directly supported by this database, but they can be emulated by using computed columns. For example, if an index on the upper-case version of a column is required, create a computed column with the upper-case version of the original column, and create an index for this column\: -features_1563_p=\ When inserting data, it is not required (and not allowed) to specify a value for the upper-case version of the column, because the value is generated. But you can use the column when querying the table\: -features_1564_h2=Multi-Dimensional Indexes -features_1565_p=\ A tool is provided to execute efficient multi-dimension (spatial) range queries. This database does not support a specialized spatial index (R-Tree or similar). Instead, the B-Tree index is used. For each record, the multi-dimensional key is converted (mapped) to a single dimensional (scalar) value. This value specifies the location on a space-filling curve. -features_1566_p=\ Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. The scalar value is indexed using a B-Tree index (usually using a computed column). -features_1567_p=\ The method can result in a drastic performance improvement over just using an index on the first column. Depending on the data and number of dimensions, the improvement is usually higher than factor 5. The tool generates a SQL query from a specified multi-dimensional range. The method used is not database dependent, and the tool can easily be ported to other databases. For an example how to use the tool, please have a look at the sample code provided in TestMultiDimension.java. -features_1568_h2=User-Defined Functions and Stored Procedures -features_1569_p=\ In addition to the built-in functions, this database supports user-defined Java functions. In this database, Java functions can be used as stored procedures as well. A function must be declared (registered) before it can be used. A function can be defined using source code, or as a reference to a compiled class that is available in the classpath. By default, the function aliases are stored in the current schema. -features_1570_h3=Referencing a Compiled Method -features_1571_p=\ When referencing a method, the class must already be compiled and included in the classpath where the database is running. Only static Java methods are supported; both the class and the method must be public. Example Java class\: -features_1572_p=\ The Java function must be registered in the database by calling CREATE ALIAS ... FOR\: -features_1573_p=\ For a complete sample application, see src/test/org/h2/samples/Function.java. -features_1574_h3=Declaring Functions as Source Code -features_1575_p=\ When defining a function alias with source code, the database tries to compile the source code using the Sun Java compiler (the class com.sun.tools.javac.Main) if the tools.jar is in the classpath. If not, javac is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. Example\: -features_1576_p=\ By default, the three packages java.util, java.math, java.sql are imported. The method name (nextPrime in the example above) is ignored. Method overloading is not supported when declaring functions as source code, that means only one method may be declared for an alias. If different import statements are required, they must be declared at the beginning and separated with the tag @CODE\: -features_1577_p=\ The following template is used to create a complete Java class\: -features_1578_h3=Method Overloading -features_1579_p=\ Multiple methods may be bound to a SQL function if the class is already compiled and included in the classpath. Each Java method must have a different number of arguments. Method overloading is not supported when declaring functions as source code. -features_1580_h3=Function Data Type Mapping -features_1581_p=\ Functions that accept non-nullable parameters such as int will not be called if one of those parameters is NULL. Instead, the result of the function is NULL. If the function should be called if a parameter is NULL, you need to use java.lang.Integer instead. -features_1582_p=\ SQL types are mapped to Java classes and vice-versa as in the JDBC API. For details, see Data Types. There are a few special cases\: java.lang.Object is mapped to OTHER (a serialized object). Therefore, java.lang.Object can not be used to match all SQL types (matching all SQL types is not supported). The second special case is Object[]\: arrays of any class are mapped to ARRAY. Objects of type org.h2.value.Value (the internal value class) are passed through without conversion. -features_1583_h3=Functions That Require a Connection -features_1584_p=\ If the first parameter of a Java function is a java.sql.Connection, then the connection to database is provided. This connection does not need to be closed before returning. When calling the method from within the SQL statement, this connection parameter does not need to be (can not be) specified. -features_1585_h3=Functions Throwing an Exception -features_1586_p=\ If a function throws an exception, then the current statement is rolled back and the exception is thrown to the application. SQLException are directly re-thrown to the calling application; all other exceptions are first converted to a SQLException. -features_1587_h3=Functions Returning a Result Set -features_1588_p=\ Functions may returns a result set. Such a function can be called with the CALL statement\: -features_1589_h3=Using SimpleResultSet -features_1590_p=\ A function can create a result set using the SimpleResultSet tool\: -features_1591_h3=Using a Function as a Table -features_1592_p=\ A function that returns a result set can be used like a table. However, in this case the function is called at least twice\: first while parsing the statement to collect the column names (with parameters set to null where not known at compile time). And then, while executing the statement to get the data (maybe multiple times if this is a join). If the function is called just to get the column list, the URL of the connection passed to the function is jdbc\:columnlist\:connection. Otherwise, the URL of the connection is jdbc\:default\:connection. -features_1593_h2=Pluggable or User-Defined Tables -features_1594_p=\ For situations where you need to expose other data-sources to the SQL engine as a table, there are "pluggable tables". For some examples, have a look at the code in org.h2.test.db.TestTableEngines. -features_1595_p=\ In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine interface e.g. something like this\: -features_1596_p=\ and then create the table from SQL like this\: -features_1597_p=\ It is also possible to pass in parameters to the table engine, like so\: -features_1598_p=\ In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object. -features_1599_h2=Triggers -features_1600_p=\ This database supports Java triggers that are called before or after a row is updated, inserted or deleted. Triggers can be used for complex consistency checks, or to update related data in the database. It is also possible to use triggers to simulate materialized views. For a complete sample application, see src/test/org/h2/samples/TriggerSample.java. A Java trigger must implement the interface org.h2.api.Trigger. The trigger class must be available in the classpath of the database engine (when using the server mode, it must be in the classpath of the server). -features_1601_p=\ The connection can be used to query or update data in other tables. The trigger then needs to be defined in the database\: -features_1602_p=\ The trigger can be used to veto a change by throwing a SQLException. -features_1603_p=\ As an alternative to implementing the Trigger interface, an application can extend the abstract class org.h2.tools.TriggerAdapter. This will allows to use the ResultSet interface within trigger implementations. In this case, only the fire method needs to be implemented\: -features_1604_h2=Compacting a Database -features_1605_p=\ Empty space in the database file re-used automatically. When closing the database, the database is automatically compacted for up to 200 milliseconds by default. To compact more, use the SQL statement SHUTDOWN COMPACT. However re-creating the database may further reduce the database size because this will re-build the indexes. Here is a sample function to do this\: -features_1606_p=\ See also the sample application org.h2.samples.Compact. The commands SCRIPT / RUNSCRIPT can be used as well to create a backup of a database and re-build the database from the script. -features_1607_h2=Cache Settings -features_1608_p=\ The database keeps most frequently used data in the main memory. The amount of memory used for caching can be changed using the setting CACHE_SIZE. This setting can be set in the database connection URL (jdbc\:h2\:~/test;CACHE_SIZE\=131072), or it can be changed at runtime using SET CACHE_SIZE size. The size of the cache, as represented by CACHE_SIZE is measured in KB, with each KB being 1024 bytes. This setting has no effect for in-memory databases. For persistent databases, the setting is stored in the database and re-used when the database is opened the next time. However, when opening an existing database, the cache size is set to at most half the amount of memory available for the virtual machine (Runtime.getRuntime().maxMemory()), even if the cache size setting stored in the database is larger; however the setting stored in the database is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME \= 'info.CACHE_MAX_SIZE' -features_1609_p=\ An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE\=TQ to the database URL. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. -features_1610_p=\ Also included is an experimental second level soft reference cache. Rows in this cache are only garbage collected on low memory. By default the second level cache is disabled. To enable it, use the prefix SOFT_. Example\: jdbc\:h2\:~/test;CACHE_TYPE\=SOFT_LRU. The cache might not actually improve performance. If you plan to use it, please run your own test cases first. -features_1611_p=\ To get information about page reads and writes, and the current caching algorithm in use, call SELECT * FROM INFORMATION_SCHEMA.SETTINGS. The number of pages read / written is listed. -fragments_1000_div=\   &\#x25b2; -fragments_1001_label=Search\: -fragments_1002_label=Highlight keyword(s) -fragments_1003_a=Home -fragments_1004_a=Download -fragments_1005_a=Cheat Sheet -fragments_1006_b=Documentation -fragments_1007_a=Quickstart -fragments_1008_a=Installation -fragments_1009_a=Tutorial -fragments_1010_a=Features -fragments_1011_a=Performance -fragments_1012_a=Advanced -fragments_1013_b=Reference -fragments_1014_a=SQL Grammar -fragments_1015_a=Functions -fragments_1016_a=Data Types -fragments_1017_a=Javadoc -fragments_1018_a=PDF (1 MB) -fragments_1019_b=Support -fragments_1020_a=FAQ -fragments_1021_a=Error Analyzer -fragments_1022_a=Google Group (English) -fragments_1023_a=Google Group (Japanese) -fragments_1024_a=Google Group (Chinese) -fragments_1025_b=Appendix -fragments_1026_a=History & Roadmap -fragments_1027_a=License -fragments_1028_a=Build -fragments_1029_a=Links -fragments_1030_a=JaQu -fragments_1031_a=MVStore -fragments_1032_a=Architecture -fragments_1033_td=  -frame_1000_h1=H2 Database Engine -frame_1001_p=\ Welcome to H2, the free SQL database. The main feature of H2 are\: -frame_1002_li=It is free to use for everybody, source code is included -frame_1003_li=Written in Java, but also available as native executable -frame_1004_li=JDBC and (partial) ODBC API -frame_1005_li=Embedded and client/server modes -frame_1006_li=Clustering is supported -frame_1007_li=A web client is included -frame_1008_h2=No Javascript -frame_1009_p=\ If you are not automatically redirected to the main page, then Javascript is currently disabled or your browser does not support Javascript. Some features (for example the integrated search) require Javascript. -frame_1010_p=\ Please enable Javascript, or go ahead without it\: H2 Database Engine -history_1000_h1=History and Roadmap -history_1001_a=\ Change Log -history_1002_a=\ Roadmap -history_1003_a=\ History of this Database Engine -history_1004_a=\ Why Java -history_1005_a=\ Supporters -history_1006_h2=Change Log -history_1007_p=\ The up-to-date change log is available at http\://www.h2database.com/html/changelog.html -history_1008_h2=Roadmap -history_1009_p=\ The current roadmap is available at http\://www.h2database.com/html/roadmap.html -history_1010_h2=History of this Database Engine -history_1011_p=\ The development of H2 was started in May 2004, but it was first published on December 14th 2005. The main author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database. At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed to continued to work on the Hypersonic SQL codebase. The name H2 stands for Hypersonic 2, however H2 does not share code with Hypersonic SQL or HSQLDB. H2 is built from scratch. -history_1012_h2=Why Java -history_1013_p=\ The main reasons to use a Java database are\: -history_1014_li=Very simple to integrate in Java applications -history_1015_li=Support for many different platforms -history_1016_li=More secure than native applications (no buffer overflows) -history_1017_li=User defined functions (or triggers) run very fast -history_1018_li=Unicode support -history_1019_p=\ Some think Java is too slow for low level operations, but this is no longer true. Garbage collection for example is now faster than manual memory management. -history_1020_p=\ Developing Java code is faster than developing C or C++ code. When using Java, most time can be spent on improving the algorithms instead of porting the code to different platforms or doing memory management. Features such as Unicode and network libraries are already built-in. In Java, writing secure code is easier because buffer overflows can not occur. Features such as reflection can be used for randomized testing. -history_1021_p=\ Java is future proof\: a lot of companies support Java. Java is now open source. -history_1022_p=\ To increase the portability and ease of use, this software depends on very few libraries. Features that are not available in open source Java implementations (such as Swing) are not used, or only used for optional features. -history_1023_h2=Supporters -history_1024_p=\ Many thanks for those who reported bugs, gave valuable feedback, spread the word, and translated this project. Also many thanks to the donors. To become a donor, use PayPal (at the very bottom of the main web page). -history_1025_a=xso; xBase Software Ontwikkeling, Netherlands -history_1026_a=Cognitect, USA -history_1027_a=Code 42 Software, Inc., Minneapolis -history_1028_li=Martin Wildam, Austria -history_1029_a=Code Lutin, France -history_1030_a=NetSuxxess GmbH, Germany -history_1031_a=Poker Copilot, Steve McLeod, Germany -history_1032_a=SkyCash, Poland -history_1033_a=Lumber-mill, Inc., Japan -history_1034_a=StockMarketEye, USA -history_1035_a=Eckenfelder GmbH & Co.KG, Germany -history_1036_li=Anthony Goubard, Netherlands -history_1037_li=Richard Hickey, USA -history_1038_li=Alessio Jacopo D'Adamo, Italy -history_1039_li=Ashwin Jayaprakash, USA -history_1040_li=Donald Bleyl, USA -history_1041_li=Frank Berger, Germany -history_1042_li=Florent Ramiere, France -history_1043_li=Jun Iyama, Japan -history_1044_li=Antonio Casqueiro, Portugal -history_1045_li=Oliver Computing LLC, USA -history_1046_li=Harpal Grover Consulting Inc., USA -history_1047_li=Elisabetta Berlini, Italy -history_1048_li=William Gilbert, USA -history_1049_li=Antonio Dieguez Rojas, Chile -history_1050_a=Ontology Works, USA -history_1051_li=Pete Haidinyak, USA -history_1052_li=William Osmond, USA -history_1053_li=Joachim Ansorg, Germany -history_1054_li=Oliver Soerensen, Germany -history_1055_li=Christos Vasilakis, Greece -history_1056_li=Fyodor Kupolov, Denmark -history_1057_li=Jakob Jenkov, Denmark -history_1058_li=Stéphane Chartrand, Switzerland -history_1059_li=Glenn Kidd, USA -history_1060_li=Gustav Trede, Sweden -history_1061_li=Joonas Pulakka, Finland -history_1062_li=Bjorn Darri Sigurdsson, Iceland -history_1063_li=Iyama Jun, Japan -history_1064_li=Gray Watson, USA -history_1065_li=Erik Dick, Germany -history_1066_li=Pengxiang Shao, China -history_1067_li=Bilingual Marketing Group, USA -history_1068_li=Philippe Marschall, Switzerland -history_1069_li=Knut Staring, Norway -history_1070_li=Theis Borg, Denmark -history_1071_li=Mark De Mendonca Duske, USA -history_1072_li=Joel A. Garringer, USA -history_1073_li=Olivier Chafik, France -history_1074_li=Rene Schwietzke, Germany -history_1075_li=Jalpesh Patadia, USA -history_1076_li=Takanori Kawashima, Japan -history_1077_li=Terrence JC Huang, China -history_1078_a=JiaDong Huang, Australia -history_1079_li=Laurent van Roy, Belgium -history_1080_li=Qian Chen, China -history_1081_li=Clinton Hyde, USA -history_1082_li=Kritchai Phromros, Thailand -history_1083_li=Alan Thompson, USA -history_1084_li=Ladislav Jech, Czech Republic -history_1085_li=Dimitrijs Fedotovs, Latvia -history_1086_li=Richard Manley-Reeve, United Kingdom -installation_1000_h1=Installation -installation_1001_a=\ Requirements -installation_1002_a=\ Supported Platforms -installation_1003_a=\ Installing the Software -installation_1004_a=\ Directory Structure -installation_1005_h2=Requirements -installation_1006_p=\ To run this database, the following software stack is known to work. Other software most likely also works, but is not tested as much. -installation_1007_h3=Database Engine -installation_1008_li=Windows XP or Vista, Mac OS X, or Linux -installation_1009_li=Sun Java 6 or newer -installation_1010_li=Recommended Windows file system\: NTFS (FAT32 only supports files up to 4 GB) -installation_1011_h3=H2 Console -installation_1012_li=Mozilla Firefox -installation_1013_h2=Supported Platforms -installation_1014_p=\ As this database is written in Java, it can run on many different platforms. It is tested with Java 6 and 7. Currently, the database is developed and tested on Windows 8 and Mac OS X using Java 6, but it also works in many other operating systems and using other Java runtime environments. All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported. -installation_1015_h2=Installing the Software -installation_1016_p=\ To install the software, run the installer or unzip it to a directory of your choice. -installation_1017_h2=Directory Structure -installation_1018_p=\ After installing, you should get the following directory structure\: -installation_1019_th=Directory -installation_1020_th=Contents -installation_1021_td=bin -installation_1022_td=JAR and batch files -installation_1023_td=docs -installation_1024_td=Documentation -installation_1025_td=docs/html -installation_1026_td=HTML pages -installation_1027_td=docs/javadoc -installation_1028_td=Javadoc files -installation_1029_td=ext -installation_1030_td=External dependencies (downloaded when building) -installation_1031_td=service -installation_1032_td=Tools to run the database as a Windows Service -installation_1033_td=src -installation_1034_td=Source files -installation_1035_td=src/docsrc -installation_1036_td=Documentation sources -installation_1037_td=src/installer -installation_1038_td=Installer, shell, and release build script -installation_1039_td=src/main -installation_1040_td=Database engine source code -installation_1041_td=src/test -installation_1042_td=Test source code -installation_1043_td=src/tools -installation_1044_td=Tools and database adapters source code -jaqu_1000_h1=JaQu -jaqu_1001_a=\ What is JaQu -jaqu_1002_a=\ Differences to Other Data Access Tools -jaqu_1003_a=\ Current State -jaqu_1004_a=\ Building the JaQu Library -jaqu_1005_a=\ Requirements -jaqu_1006_a=\ Example Code -jaqu_1007_a=\ Configuration -jaqu_1008_a=\ Natural Syntax -jaqu_1009_a=\ Other Ideas -jaqu_1010_a=\ Similar Projects -jaqu_1011_h2=What is JaQu -jaqu_1012_p=\ Note\: This project is currently in maintenance mode. A friendly fork of JaQu is available under the name iciql. -jaqu_1013_p=\ JaQu stands for Java Query and allows to access databases using pure Java. JaQu provides a fluent interface (or internal DSL). JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a Microsoft .NET technology). The following JaQu code\: -jaqu_1014_p=\ stands for the SQL statement\: -jaqu_1015_h2=Differences to Other Data Access Tools -jaqu_1016_p=\ Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java, auto-complete in the IDE is supported. Type checking is performed by the compiler. JaQu fully protects against SQL injection. -jaqu_1017_p=\ JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate. With JaQu, you don't write SQL statements as strings. JaQu is much smaller and simpler than other persistence frameworks such as Hibernate, but it also does not provide all the features of those. Unlike iBatis and Hibernate, no XML or annotation based configuration is required; instead the configuration (if required at all) is done in pure Java, within the application. -jaqu_1018_p=\ JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis, JaQu provides full control over when and what SQL statements are executed (but without having to write SQL statements as strings). -jaqu_1019_h3=Restrictions -jaqu_1020_p=\ Primitive types (eg. boolean, int, long, double) are not supported. Use java.lang.Boolean, Integer, Long, Double instead. -jaqu_1021_h3=Why in Java? -jaqu_1022_p=\ Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy) in the same application is complicated\: you would need to split the application and database code, and write adapter / wrapper code. -jaqu_1023_h2=Current State -jaqu_1024_p=\ Currently, JaQu is only tested with the H2 database. The API may change in future versions. JaQu is not part of the h2 jar file, however the source code is included in H2, under\: -jaqu_1025_code=src/test/org/h2/test/jaqu/* -jaqu_1026_li=\ (samples and tests) -jaqu_1027_code=src/tools/org/h2/jaqu/* -jaqu_1028_li=\ (framework) -jaqu_1029_h2=Building the JaQu Library -jaqu_1030_p=\ To create the JaQu jar file, run\: build jarJaqu. This will create the file bin/h2jaqu.jar. -jaqu_1031_h2=Requirements -jaqu_1032_p=\ JaQu requires Java 6. Annotations are not need. Currently, JaQu is only tested with the H2 database engine, however in theory it should work with any database that supports the JDBC API. -jaqu_1033_h2=Example Code -jaqu_1034_h2=Configuration -jaqu_1035_p=\ JaQu does not require any configuration when using the default field to column mapping. To define table indices, or if you want to map a class to a table with a different name, or a field to a column with another name, create a function called define in the data class. Example\: -jaqu_1036_p=\ The method define() contains the mapping definition. It is called once when the class is used for the first time. Like annotations, the mapping is defined in the class itself. Unlike when using annotations, the compiler can check the syntax even for multi-column objects (multi-column indexes, multi-column primary keys and so on). Because the definition is written in Java, the configuration can be set at runtime, which is not possible using annotations. Unlike XML mapping configuration, the configuration is integrated in the class itself. -jaqu_1037_h2=Natural Syntax -jaqu_1038_p=The plan is to support more natural (pure Java) syntax in conditions. To do that, the condition class is de-compiled to a SQL condition. A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome). The planned syntax is\: -jaqu_1039_h2=Other Ideas -jaqu_1040_p=\ This project has just been started, and nothing is fixed yet. Some ideas are\: -jaqu_1041_li=Support queries on collections (instead of using a database). -jaqu_1042_li=Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA). -jaqu_1043_li=Internally use a JPA implementation (for example Hibernate) instead of SQL directly. -jaqu_1044_li=Use PreparedStatements and cache them. -jaqu_1045_h2=Similar Projects -jaqu_1046_a=iciql (a friendly fork of JaQu) -jaqu_1047_a=Cement Framework -jaqu_1048_a=Dreamsource ORM -jaqu_1049_a=Empire-db -jaqu_1050_a=JEQUEL\: Java Embedded QUEry Language -jaqu_1051_a=Joist -jaqu_1052_a=jOOQ -jaqu_1053_a=JoSQL -jaqu_1054_a=LIQUidFORM -jaqu_1055_a=Quaere (Alias implementation) -jaqu_1056_a=Quaere -jaqu_1057_a=Querydsl -jaqu_1058_a=Squill -license_1000_h1=License -license_1001_a=\ Summary and License FAQ -license_1002_a=\ Mozilla Public License Version 2.0 -license_1003_a=\ Eclipse Public License - Version 1.0 -license_1004_a=\ Export Control Classification Number (ECCN) -license_1005_h2=Summary and License FAQ -license_1006_p=\ H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL. -license_1007_li=You can use H2 for free. -license_1008_li=You can integrate it into your applications (including in commercial applications) and distribute it. -license_1009_li=Files containing only your code are not covered by this license (it is 'commercial friendly'). -license_1010_li=Modifications to the H2 source code must be published. -license_1011_li=You don't need to provide the source code of H2 if you did not modify anything. -license_1012_li=If you distribute a binary that includes H2, you need to add a disclaimer of liability - see the example below. -license_1013_p=\ However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB\: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the Wayback Machine and visit old web pages of http\://www.bungisoft.com. -license_1014_p=\ About porting the source code to another language (for example C\# or C++)\: converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code. -license_1015_p=\ If you distribute a binary that includes H2, you need to add the license and a disclaimer of liability (as you should do for your own code). You should add a disclaimer for each open source library you use. For example, add a file 3rdparty_license.txt in the directory where the jar files are, and list all open source libraries, each one with its license and disclaimer. For H2, a simple solution is to copy the following text below. You may also include a copy of the complete license. -license_1016_h2=Mozilla Public License Version 2.0 -license_1017_h3=1. Definitions -license_1018_p=1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -license_1019_p=1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. -license_1020_p=1.3. "Contribution" means Covered Software of a particular Contributor. -license_1021_p=1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -license_1022_p=1.5. "Incompatible With Secondary Licenses" means -license_1023_p=a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or -license_1024_p=b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. -license_1025_p=1.6. "Executable Form" means any form of the work other than Source Code Form. -license_1026_p=1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. -license_1027_p=1.8. "License" means this document. -license_1028_p=1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. -license_1029_p=1.10. "Modifications" means any of the following\: -license_1030_p=a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or -license_1031_p=b. any new file in Source Code Form that contains any Covered Software. -license_1032_p=1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. -license_1033_p=1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -license_1034_p=1.13. "Source Code Form" means the form of the work preferred for making modifications. -license_1035_p=1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. -license_1036_h3=2. License Grants and Conditions -license_1037_h4=2.1. Grants -license_1038_p=Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license\: -license_1039_p=under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and -license_1040_p=under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. -license_1041_h4=2.2. Effective Date -license_1042_p=The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. -license_1043_h4=2.3. Limitations on Grant Scope -license_1044_p=The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor\: -license_1045_p=for any code that a Contributor has removed from Covered Software; or -license_1046_p=for infringements caused by\: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or -license_1047_p=under Patent Claims infringed by Covered Software in the absence of its Contributions. -license_1048_p=This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). -license_1049_h4=2.4. Subsequent Licenses -license_1050_p=No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). -license_1051_h4=2.5. Representation -license_1052_p=Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. -license_1053_h4=2.6. Fair Use -license_1054_p=This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. -license_1055_h4=2.7. Conditions -license_1056_p=Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. -license_1057_h3=3. Responsibilities -license_1058_h4=3.1. Distribution of Source Form -license_1059_p=All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. -license_1060_h4=3.2. Distribution of Executable Form -license_1061_p=If You distribute Covered Software in Executable Form then\: -license_1062_p=such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and -license_1063_p=You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. -license_1064_h4=3.3. Distribution of a Larger Work -license_1065_p=You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). -license_1066_h4=3.4. Notices -license_1067_p=You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. -license_1068_h4=3.5. Application of Additional Terms -license_1069_p=You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. -license_1070_h3=4. Inability to Comply Due to Statute or Regulation -license_1071_p=If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must\: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. -license_1072_h3=5. Termination -license_1073_p=5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. -license_1074_p=5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. -license_1075_p=5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. -license_1076_h3=6. Disclaimer of Warranty -license_1077_p=Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. -license_1078_h3=7. Limitation of Liability -license_1079_p=Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. -license_1080_h3=8. Litigation -license_1081_p=Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. -license_1082_h3=9. Miscellaneous -license_1083_p=This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. -license_1084_h3=10. Versions of the License -license_1085_h4=10.1. New Versions -license_1086_p=Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. -license_1087_h4=10.2. Effect of New Versions -license_1088_p=You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. -license_1089_h4=10.3. Modified Versions -license_1090_p=If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). -license_1091_h4=10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses -license_1092_p=If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. -license_1093_h3=Exhibit A - Source Code Form License Notice -license_1094_p=If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. -license_1095_p=You may add additional accurate notices of copyright ownership. -license_1096_h3=Exhibit B - "Incompatible With Secondary Licenses" Notice -license_1097_h2=Eclipse Public License - Version 1.0 -license_1098_p=\ THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. -license_1099_h3=1. DEFINITIONS -license_1100_p=\ "Contribution" means\: -license_1101_p=\ a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and -license_1102_p=\ b) in the case of each subsequent Contributor\: -license_1103_p=\ i) changes to the Program, and -license_1104_p=\ ii) additions to the Program; -license_1105_p=\ where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which\: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. -license_1106_p=\ "Contributor" means any person or entity that distributes the Program. -license_1107_p=\ "Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. -license_1108_p=\ "Program" means the Contributions distributed in accordance with this Agreement. -license_1109_p=\ "Recipient" means anyone who receives the Program under this Agreement, including all Contributors. -license_1110_h3=2. GRANT OF RIGHTS -license_1111_p=\ a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. -license_1112_p=\ b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. -license_1113_p=\ c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. -license_1114_p=\ d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. -license_1115_h3=3. REQUIREMENTS -license_1116_p=\ A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that\: -license_1117_p=\ a) it complies with the terms and conditions of this Agreement; and -license_1118_p=\ b) its license agreement\: -license_1119_p=\ i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; -license_1120_p=\ ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; -license_1121_p=\ iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and -license_1122_p=\ iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. -license_1123_p=\ When the Program is made available in source code form\: -license_1124_p=\ a) it must be made available under this Agreement; and -license_1125_p=\ b) a copy of this Agreement must be included with each copy of the Program. -license_1126_p=\ Contributors may not remove or alter any copyright notices contained within the Program. -license_1127_p=\ Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. -license_1128_h3=4. COMMERCIAL DISTRIBUTION -license_1129_p=\ Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must\: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. -license_1130_p=\ For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. -license_1131_h3=5. NO WARRANTY -license_1132_p=\ EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. -license_1133_h3=6. DISCLAIMER OF LIABILITY -license_1134_p=\ EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. -license_1135_h3=7. GENERAL -license_1136_p=\ If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. -license_1137_p=\ If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. -license_1138_p=\ All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. -license_1139_p=\ Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. -license_1140_p=\ This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. -license_1141_h2=Export Control Classification Number (ECCN) -license_1142_p=\ As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002. However, for legal reasons, we can make no warranty that this information is correct. For details, see also the Apache Software Foundation Export Classifications page. -links_1000_h1=Links -links_1001_p=\ If you want to add a link, please send it to the support email address or post it to the group. -links_1002_a=\ Commercial Support -links_1003_a=\ Quotes -links_1004_a=\ Books -links_1005_a=\ Extensions -links_1006_a=\ Blog Articles, Videos -links_1007_a=\ Database Frontends / Tools -links_1008_a=\ Products and Projects -links_1009_h2=Commercial Support -links_1010_a=Commercial support for H2 is available -links_1011_p=\ from Steve McLeod (steve dot mcleod at gmail dot com). Please note he is not one of the main developers of H2. He describes himself as follows\: -links_1012_li=I'm a long time user of H2, routinely working with H2 databases several gigabytes in size. -links_1013_li=I'm the creator of popular commercial desktop software that uses H2. -links_1014_li=I'm a certified Java developer (SCJP). -links_1015_li=I have a decade and more of IT consulting experience with large and small clients in Australia, the UK, and Germany. -links_1016_li=I'm based in Germany, and willing to travel within Europe. I can work remotely with teams in the USA and other locations." -links_1017_h2=Quotes -links_1018_a=\ Quote -links_1019_p=\: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. But, in less than 15 minutes I had H2 up and working with little recoding of the SQL. Thanks..... " -links_1020_h2=Books -links_1021_a=\ Seam In Action -links_1022_h2=Extensions -links_1023_a=\ Grails H2 Database Plugin -links_1024_a=\ h2osgi\: OSGi for the H2 Database -links_1025_a=\ H2Sharp\: ADO.NET interface for the H2 database engine -links_1026_a=\ A spatial extension of the H2 database. -links_1027_h2=Blog Articles, Videos -links_1028_a=\ Youtube\: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2 -links_1029_a=\ Analyzing CSVs with H2 in under 10 minutes (2009-12-07) -links_1030_a=\ Efficient sorting and iteration on large databases (2009-06-15) -links_1031_a=\ Porting Flexive to the H2 Database (2008-12-05) -links_1032_a=\ H2 Database with GlassFish (2008-11-24) -links_1033_a=\ H2 Database - Performance Tracing (2008-04-30) -links_1034_a=\ Open Source Databases Comparison (2007-09-11) -links_1035_a=\ The Codist\: The Open Source Frameworks I Use (2007-07-23) -links_1036_a=\ The Codist\: SQL Injections\: How Not To Get Stuck (2007-05-08) -links_1037_a=\ David Coldrick's Weblog\: New Version of H2 Database Released (2007-01-06) -links_1038_a=\ The Codist\: Write Your Own Database, Again (2006-11-13) -links_1039_h2=Project Pages -links_1040_a=\ Ohloh -links_1041_a=\ Freshmeat Project Page -links_1042_a=\ Wikipedia -links_1043_a=\ Java Source Net -links_1044_a=\ Linux Package Manager -links_1045_h2=Database Frontends / Tools -links_1046_a=\ Dataflyer -links_1047_p=\ A tool to browse databases and export data. -links_1048_a=\ DB Solo -links_1049_p=\ SQL query tool. -links_1050_a=\ DbVisualizer -links_1051_p=\ Database tool. -links_1052_a=\ Execute Query -links_1053_p=\ Database utility written in Java. -links_1054_a=\ Flyway -links_1055_p=\ The agile database migration framework for Java. -links_1056_a=\ [fleXive] -links_1057_p=\ JavaEE 5 open source framework for the development of complex and evolving (web-)applications. -links_1058_a=\ JDBC Console -links_1059_p=\ This small webapp gives an ability to execute SQL against datasources bound in container's JNDI. Based on H2 Console. -links_1060_a=\ HenPlus -links_1061_p=\ HenPlus is a SQL shell written in Java. -links_1062_a=\ JDBC lint -links_1063_p=\ Helps write correct and efficient code when using the JDBC API. -links_1064_a=\ OpenOffice -links_1065_p=\ Base is OpenOffice.org's database application. It provides access to relational data sources. -links_1066_a=\ RazorSQL -links_1067_p=\ An SQL query tool, database browser, SQL editor, and database administration tool. -links_1068_a=\ SQL Developer -links_1069_p=\ Universal Database Frontend. -links_1070_a=\ SQL Workbench/J -links_1071_p=\ Free DBMS-independent SQL tool. -links_1072_a=\ SQuirreL SQL Client -links_1073_p=\ Graphical tool to view the structure of a database, browse the data, issue SQL commands etc. -links_1074_a=\ SQuirreL DB Copy Plugin -links_1075_p=\ Tool to copy data from one database to another. -links_1076_h2=Products and Projects -links_1077_a=\ AccuProcess -links_1078_p=\ Visual business process modeling and simulation software for business users. -links_1079_a=\ Adeptia BPM -links_1080_p=\ A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows. -links_1081_a=\ Adeptia Integration -links_1082_p=\ Process-centric, services-based application integration suite. -links_1083_a=\ Aejaks -links_1084_p=\ A server-side scripting environment to build AJAX enabled web applications. -links_1085_a=\ Axiom Stack -links_1086_p=\ A web framework that let's you write dynamic web applications with Zen-like simplicity. -links_1087_a=\ Apache Cayenne -links_1088_p=\ Open source persistence framework providing object-relational mapping (ORM) and remoting services. -links_1089_a=\ Apache Jackrabbit -links_1090_p=\ Open source implementation of the Java Content Repository API (JCR). -links_1091_a=\ Apache OpenJPA -links_1092_p=\ Open source implementation of the Java Persistence API (JPA). -links_1093_a=\ AppFuse -links_1094_p=\ Helps building web applications. -links_1095_a=\ BGBlitz -links_1096_p=\ The Swiss army knife of Backgammon. -links_1097_a=\ Bonita -links_1098_p=\ Open source workflow solution for handing long-running, user-oriented processes providing out of the box workflow and business process management features. -links_1099_a=\ Bookmarks Portlet -links_1100_p=\ JSR 168 compliant bookmarks management portlet application. -links_1101_a=\ Claros inTouch -links_1102_p=\ Ajax communication suite with mail, addresses, notes, IM, and rss reader. -links_1103_a=\ CrashPlan PRO Server -links_1104_p=\ Easy and cross platform backup solution for business and service providers. -links_1105_a=\ DataNucleus -links_1106_p=\ Java persistent objects. -links_1107_a=\ DbUnit -links_1108_p=\ A JUnit extension (also usable with Ant) targeted for database-driven projects. -links_1109_a=\ DiffKit -links_1110_p=\ DiffKit is a tool for comparing two tables of data, field-by-field. DiffKit is like the Unix diff utility, but for tables instead of lines of text. -links_1111_a=\ Dinamica Framework -links_1112_p=\ Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets). -links_1113_a=\ District Health Information Software 2 (DHIS) -links_1114_p=\ The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities. -links_1115_a=\ Ebean ORM Persistence Layer -links_1116_p=\ Open source Java Object Relational Mapping tool. -links_1117_a=\ Eclipse CDO -links_1118_p=\ The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution. -links_1119_a=\ Fabric3 -links_1120_p=\ Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http\://www.osoa.org). -links_1121_a=\ FIT4Data -links_1122_p=\ A testing framework for data management applications built on the Java implementation of FIT. -links_1123_a=\ Flux -links_1124_p=\ Java job scheduler, file transfer, workflow, and BPM. -links_1125_a=\ GeoServer -links_1126_p=\ GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing. -links_1127_a=\ GBIF Integrated Publishing Toolkit (IPT) -links_1128_p=\ The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data\: taxon primary occurrence data, taxon checklists and general resource metadata. -links_1129_a=\ GNU Gluco Control -links_1130_p=\ Helps you to manage your diabetes. -links_1131_a=\ Golden T Studios -links_1132_p=\ Fun-to-play games with a simple interface. -links_1133_a=\ GridGain -links_1134_p=\ GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications that auto-scale on any grid or cloud infrastructure. -links_1135_a=\ Group Session -links_1136_p=\ Open source web groupware. -links_1137_a=\ HA-JDBC -links_1138_p=\ High-Availability JDBC\: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver. -links_1139_a=\ Hibernate -links_1140_p=\ Relational persistence for idiomatic Java (O-R mapping tool). -links_1141_a=\ Hibicius -links_1142_p=\ Online Banking Client for the HBCI protocol. -links_1143_a=\ ImageMapper -links_1144_p=\ ImageMapper frees users from having to use file browsers to view their images. They get fast access to images and easy cataloguing of them via a user friendly interface. -links_1145_a=\ JAMWiki -links_1146_p=\ Java-based Wiki engine. -links_1147_a=\ Jaspa -links_1148_p=\ Java Spatial. Jaspa potentially brings around 200 spatial functions. -links_1149_a=\ Java Simon -links_1150_p=\ Simple Monitoring API. -links_1151_a=\ JBoss jBPM -links_1152_p=\ A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration. -links_1153_a=\ JBoss Jopr -links_1154_p=\ An enterprise management solution for JBoss middleware projects and other application technologies. -links_1155_a=\ JGeocoder -links_1156_p=\ Free Java geocoder. Geocoding is the process of estimating a latitude and longitude for a given location. -links_1157_a=\ JGrass -links_1158_p=\ Java Geographic Resources Analysis Support System. Free, multi platform, open source GIS based on the GIS framework of uDig. -links_1159_a=\ Jena -links_1160_p=\ Java framework for building Semantic Web applications. -links_1161_a=\ JMatter -links_1162_p=\ Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern. -links_1163_a=\ jOOQ (Java Object Oriented Querying) -links_1164_p=\ jOOQ is a fluent API for typesafe SQL query construction and execution -links_1165_a=\ Liftweb -links_1166_p=\ A Scala-based, secure, developer friendly web framework. -links_1167_a=\ LiquiBase -links_1168_p=\ A tool to manage database changes and refactorings. -links_1169_a=\ Luntbuild -links_1170_p=\ Build automation and management tool. -links_1171_a=\ localdb -links_1172_p=\ A tool that locates the full file path of the folder containing the database files. -links_1173_a=\ Magnolia -links_1174_p=\ Microarray Data Management and Export System for PFGRC (Pathogen Functional Genomics Resource Center) Microarrays. -links_1175_a=\ MiniConnectionPoolManager -links_1176_p=\ A lightweight standalone JDBC connection pool manager. -links_1177_a=\ Mr. Persister -links_1178_p=\ Simple, small and fast object relational mapping. -links_1179_a=\ Myna Application Server -links_1180_p=\ Java web app that provides dynamic web content and Java libraries access from JavaScript. -links_1181_a=\ MyTunesRss -links_1182_p=\ MyTunesRSS lets you listen to your music wherever you are. -links_1183_a=\ NCGC CurveFit -links_1184_p=\ From\: NIH Chemical Genomics Center, National Institutes of Health, USA. An open source application in the life sciences research field. This application handles chemical structures and biological responses of thousands of compounds with the potential to handle million+ compounds. It utilizes an embedded H2 database to enable flexible query/retrieval of all data including advanced chemical substructure and similarity searching. The application highlights an automated curve fitting and classification algorithm that outperforms commercial packages in the field. Commercial alternatives are typically small desktop software that handle a few dose response curves at a time. A couple of commercial packages that do handle several thousand curves are very expensive tools (>60k USD) that require manual curation of analysis by the user; require a license to Oracle; lack advanced query/retrieval; and the ability to handle chemical structures. -links_1185_a=\ Nuxeo -links_1186_p=\ Standards-based, open source platform for building ECM applications. -links_1187_a=\ nWire -links_1188_p=\ Eclipse plug-in which expedites Java development. It's main purpose is to help developers find code quicker and easily understand how it relates to the rest of the application, thus, understand the application structure. -links_1189_a=\ Ontology Works -links_1190_p=\ This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), semantic information fusion and semantic federation of legacy databases, ontology-based domain modeling, and management of the distributed enterprise. -links_1191_a=\ Ontoprise OntoBroker -links_1192_p=\ SemanticWeb-Middleware. It supports all W3C Semantic Web recommendations\: OWL, RDF, RDFS, SPARQL, and F-Logic. -links_1193_a=\ Open Anzo -links_1194_p=\ Semantic Application Server. -links_1195_a=\ OpenGroove -links_1196_p=\ OpenGroove is a groupware program that allows users to synchronize data. -links_1197_a=\ OpenSocial Development Environment (OSDE) -links_1198_p=\ Development tool for OpenSocial application. -links_1199_a=\ Orion -links_1200_p=\ J2EE Application Server. -links_1201_a=\ P5H2 -links_1202_p=\ A library for the Processing programming language and environment. -links_1203_a=\ Phase-6 -links_1204_p=\ A computer based learning software. -links_1205_a=\ Pickle -links_1206_p=\ Pickle is a Java library containing classes for persistence, concurrency, and logging. -links_1207_a=\ Piman -links_1208_p=\ Water treatment projects data management. -links_1209_a=\ PolePosition -links_1210_p=\ Open source database benchmark. -links_1211_a=\ Poormans -links_1212_p=\ Very basic CMS running as a SWT application and generating static html pages. -links_1213_a=\ Railo -links_1214_p=\ Railo is an alternative engine for the Cold Fusion Markup Language, that compiles code programmed in CFML into Java bytecode and executes it on a servlet engine. -links_1215_a=\ Razuna -links_1216_p=\ Open source Digital Asset Management System with integrated Web Content Management. -links_1217_a=\ RIFE -links_1218_p=\ A full-stack web application framework with tools and APIs to implement most common web features. -links_1219_a=\ Sava -links_1220_p=\ Open-source web-based content management system. -links_1221_a=\ Scriptella -links_1222_p=\ ETL (Extract-Transform-Load) and script execution tool. -links_1223_a=\ Sesar -links_1224_p=\ Dependency Injection Container with Aspect Oriented Programming. -links_1225_a=\ SemmleCode -links_1226_p=\ Eclipse plugin to help you improve software quality. -links_1227_a=\ SeQuaLite -links_1228_p=\ A free, light-weight, java data access framework. -links_1229_a=\ ShapeLogic -links_1230_p=\ Toolkit for declarative programming, image processing and computer vision. -links_1231_a=\ Shellbook -links_1232_p=\ Desktop publishing application. -links_1233_a=\ Signsoft intelliBO -links_1234_p=\ Persistence middleware supporting the JDO specification. -links_1235_a=\ SimpleORM -links_1236_p=\ Simple Java Object Relational Mapping. -links_1237_a=\ SymmetricDS -links_1238_p=\ A web-enabled, database independent, data synchronization/replication software. -links_1239_a=\ SmartFoxServer -links_1240_p=\ Platform for developing multiuser applications and games with Macromedia Flash. -links_1241_a=\ Social Bookmarks Friend Finder -links_1242_p=\ A GUI application that allows you to find users with similar bookmarks to the user specified (for delicious.com). -links_1243_a=\ sormula -links_1244_p=\ Simple object relational mapping. -links_1245_a=\ Springfuse -links_1246_p=\ Code generation For Spring, Spring MVC & Hibernate. -links_1247_a=\ SQLOrm -links_1248_p=\ Java Object Relation Mapping. -links_1249_a=\ StelsCSV and StelsXML -links_1250_p=\ StelsCSV is a CSV JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on text files. StelsXML is a XML JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on XML files. Both use H2 as the SQL engine. -links_1251_a=\ StorYBook -links_1252_p=\ A summary-based tool for novelist and script writers. It helps to keep the overview over the various traces a story has. -links_1253_a=\ StreamCruncher -links_1254_p=\ Event (stream) processing kernel. -links_1255_a=\ SUSE Manager, part of Linux Enterprise Server 11 -links_1256_p=\ The SUSE Manager eases the burden of compliance with regulatory requirements and corporate policies. -links_1257_a=\ Tune Backup -links_1258_p=\ Easy-to-use backup solution for your iTunes library. -links_1259_a=\ weblica -links_1260_p=\ Desktop CMS. -links_1261_a=\ Web of Web -links_1262_p=\ Collaborative and realtime interactive media platform for the web. -links_1263_a=\ Werkzeugkasten -links_1264_p=\ Minimum Java Toolset. -links_1265_a=\ VPDA -links_1266_p=\ View providers driven applications is a Java based application framework for building applications composed from server components - view providers. -links_1267_a=\ Volunteer database -links_1268_p=\ A database front end to register volunteers, partnership and donation for a Non Profit organization. -mainWeb_1000_h1=H2 Database Engine -mainWeb_1001_p=\ Welcome to H2, the Java SQL database. The main features of H2 are\: -mainWeb_1002_li=Very fast, open source, JDBC API -mainWeb_1003_li=Embedded and server modes; in-memory databases -mainWeb_1004_li=Browser based Console application -mainWeb_1005_li=Small footprint\: around 1.5 MB jar file size -mainWeb_1006_h2=Download -mainWeb_1007_td=\ Version 1.4.187 (2015-04-10), Beta -mainWeb_1008_a=Windows Installer (5 MB) -mainWeb_1009_a=All Platforms (zip, 8 MB) -mainWeb_1010_a=All Downloads -mainWeb_1011_td=    -mainWeb_1012_h2=Support -mainWeb_1013_a=Stack Overflow (tag H2) -mainWeb_1014_a=Google Group English -mainWeb_1015_p=, Japanese -mainWeb_1016_p=\ For non-technical issues, use\: -mainWeb_1017_h2=Features -mainWeb_1018_th=H2 -mainWeb_1019_a=Derby -mainWeb_1020_a=HSQLDB -mainWeb_1021_a=MySQL -mainWeb_1022_a=PostgreSQL -mainWeb_1023_td=Pure Java -mainWeb_1024_td=Yes -mainWeb_1025_td=Yes -mainWeb_1026_td=Yes -mainWeb_1027_td=No -mainWeb_1028_td=No -mainWeb_1029_td=Memory Mode -mainWeb_1030_td=Yes -mainWeb_1031_td=Yes -mainWeb_1032_td=Yes -mainWeb_1033_td=No -mainWeb_1034_td=No -mainWeb_1035_td=Encrypted Database -mainWeb_1036_td=Yes -mainWeb_1037_td=Yes -mainWeb_1038_td=Yes -mainWeb_1039_td=No -mainWeb_1040_td=No -mainWeb_1041_td=ODBC Driver -mainWeb_1042_td=Yes -mainWeb_1043_td=No -mainWeb_1044_td=No -mainWeb_1045_td=Yes -mainWeb_1046_td=Yes -mainWeb_1047_td=Fulltext Search -mainWeb_1048_td=Yes -mainWeb_1049_td=No -mainWeb_1050_td=No -mainWeb_1051_td=Yes -mainWeb_1052_td=Yes -mainWeb_1053_td=Multi Version Concurrency -mainWeb_1054_td=Yes -mainWeb_1055_td=No -mainWeb_1056_td=Yes -mainWeb_1057_td=Yes -mainWeb_1058_td=Yes -mainWeb_1059_td=Footprint (jar/dll size) -mainWeb_1060_td=~1 MB -mainWeb_1061_td=~2 MB -mainWeb_1062_td=~1 MB -mainWeb_1063_td=~4 MB -mainWeb_1064_td=~6 MB -mainWeb_1065_p=\ See also the detailed comparison. -mainWeb_1066_h2=News -mainWeb_1067_b=Newsfeeds\: -mainWeb_1068_a=Full text (Atom) -mainWeb_1069_p=\ or Header only (RSS). -mainWeb_1070_b=Email Newsletter\: -mainWeb_1071_p=\ Subscribe to H2 Database News (Google account required) to get informed about new releases. Your email address is only used in this context. -mainWeb_1072_td=  -mainWeb_1073_h2=Contribute -mainWeb_1074_p=\ You can contribute to the development of H2 by sending feedback and bug reports, or translate the H2 Console application (for details, start the H2 Console and select Options / Translate). To donate money, click on the PayPal button below. You will be listed as a supporter\: -main_1000_h1=H2 Database Engine -main_1001_p=\ Welcome to H2, the free Java SQL database engine. -main_1002_a=Quickstart -main_1003_p=\ Get a fast overview. -main_1004_a=Tutorial -main_1005_p=\ Go through the samples. -main_1006_a=Features -main_1007_p=\ See what this database can do and how to use these features. -mvstore_1000_h1=MVStore -mvstore_1001_a=\ Overview -mvstore_1002_a=\ Example Code -mvstore_1003_a=\ Store Builder -mvstore_1004_a=\ R-Tree -mvstore_1005_a=\ Features -mvstore_1006_a=- Maps -mvstore_1007_a=- Versions -mvstore_1008_a=- Transactions -mvstore_1009_a=- In-Memory Performance and Usage -mvstore_1010_a=- Pluggable Data Types -mvstore_1011_a=- BLOB Support -mvstore_1012_a=- R-Tree and Pluggable Map Implementations -mvstore_1013_a=- Concurrent Operations and Caching -mvstore_1014_a=- Log Structured Storage -mvstore_1015_a=- Off-Heap and Pluggable Storage -mvstore_1016_a=- File System Abstraction, File Locking and Online Backup -mvstore_1017_a=- Encrypted Files -mvstore_1018_a=- Tools -mvstore_1019_a=- Exception Handling -mvstore_1020_a=- Storage Engine for H2 -mvstore_1021_a=\ File Format -mvstore_1022_a=\ Similar Projects and Differences to Other Storage Engines -mvstore_1023_a=\ Current State -mvstore_1024_a=\ Requirements -mvstore_1025_h2=Overview -mvstore_1026_p=\ The MVStore is a persistent, log structured key-value store. It is planned to be the next storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL. -mvstore_1027_li=MVStore stands for "multi-version store". -mvstore_1028_li=Each store contains a number of maps that can be accessed using the java.util.Map interface. -mvstore_1029_li=Both file-based persistence and in-memory operation are supported. -mvstore_1030_li=It is intended to be fast, simple to use, and small. -mvstore_1031_li=Concurrent read and write operations are supported. -mvstore_1032_li=Transactions are supported (including concurrent transactions and 2-phase commit). -mvstore_1033_li=The tool is very modular. It supports pluggable data types and serialization, pluggable storage (to a file, to off-heap memory), pluggable map implementations (B-tree, R-tree, concurrent B-tree currently), BLOB storage, and a file system abstraction to support encrypted files and zip files. -mvstore_1034_h2=Example Code -mvstore_1035_p=\ The following sample code shows how to use the tool\: -mvstore_1036_h2=Store Builder -mvstore_1037_p=\ The MVStore.Builder provides a fluid interface to build a store if configuration options are needed. Example usage\: -mvstore_1038_p=\ The list of available options is\: -mvstore_1039_li=autoCommitBufferSize\: the size of the write buffer. -mvstore_1040_li=autoCommitDisabled\: to disable auto-commit. -mvstore_1041_li=backgroundExceptionHandler\: a handler for exceptions that could occur while writing in the background. -mvstore_1042_li=cacheSize\: the cache size in MB. -mvstore_1043_li=compress\: compress the data when storing using a fast algorithm (LZF). -mvstore_1044_li=compressHigh\: compress the data when storing using a slower algorithm (Deflate). -mvstore_1045_li=encryptionKey\: the key for file encryption. -mvstore_1046_li=fileName\: the name of the file, for file based stores. -mvstore_1047_li=fileStore\: the storage implementation to use. -mvstore_1048_li=pageSplitSize\: the point where pages are split. -mvstore_1049_li=readOnly\: open the file in read-only mode. -mvstore_1050_h2=R-Tree -mvstore_1051_p=\ The MVRTreeMap is an R-tree implementation that supports fast spatial queries. It can be used as follows\: -mvstore_1052_p=\ The default number of dimensions is 2. To use a different number of dimensions, call new MVRTreeMap.Builder<String>().dimensions(3). The minimum number of dimensions is 1, the maximum is 32. -mvstore_1053_h2=Features -mvstore_1054_h3=Maps -mvstore_1055_p=\ Each store contains a set of named maps. A map is sorted by key, and supports the common lookup operations, including access to the first and last key, iterate over some or all keys, and so on. -mvstore_1056_p=\ Also supported, and very uncommon for maps, is fast index lookup\: the entries of the map can be be efficiently accessed like a random-access list (get the entry at the given index), and the index of a key can be calculated efficiently. That also means getting the median of two keys is very fast, and a range of keys can be counted very quickly. The iterator supports fast skipping. This is possible because internally, each map is organized in the form of a counted B+-tree. -mvstore_1057_p=\ In database terms, a map can be used like a table, where the key of the map is the primary key of the table, and the value is the row. A map can also represent an index, where the key of the map is the key of the index, and the value of the map is the primary key of the table (for non-unique indexes, the key of the map must also contain the primary key). -mvstore_1058_h3=Versions -mvstore_1059_p=\ A version is a snapshot of all the data of all maps at a given point in time. Creating a snapshot is fast\: only those pages that are changed after a snapshot are copied. This behavior is also called COW (copy on write). Old versions are readable. Rollback to an old version is supported. -mvstore_1060_p=\ The following sample code show how to create a store, open a map, add some data, and access the current and an old version\: -mvstore_1061_h3=Transactions -mvstore_1062_p=\ To support multiple concurrent open transactions, a transaction utility is included, the TransactionStore. The tool supports PostgreSQL style "read committed" transaction isolation with savepoints, two-phase commit, and other features typically available in a database. There is no limit on the size of a transaction (the log is written to disk for large or long running transactions). -mvstore_1063_p=\ Internally, this utility stores the old versions of changed entries in a separate map, similar to a transaction log, except that entries of a closed transaction are removed, and the log is usually not stored for short transactions. For common use cases, the storage overhead of this utility is very small compared to the overhead of a regular transaction log. -mvstore_1064_h3=In-Memory Performance and Usage -mvstore_1065_p=\ Performance of in-memory operations is about 50% slower than java.util.TreeMap. -mvstore_1066_p=\ The memory overhead for large maps is slightly better than for the regular map implementations, but there is a higher overhead per map. For maps with less than about 25 entries, the regular map implementations need less memory. -mvstore_1067_p=\ If no file name is specified, the store operates purely in memory. Except for persisting data, all features are supported in this mode (multi-versioning, index lookup, R-tree and so on). If a file name is specified, all operations occur in memory (with the same performance characteristics) until data is persisted. -mvstore_1068_p=\ As in all map implementations, keys need to be immutable, that means changing the key object after an entry has been added is not allowed. If a file name is specified, the value may also not be changed after adding an entry, because it might be serialized (which could happen at any time when autocommit is enabled). -mvstore_1069_h3=Pluggable Data Types -mvstore_1070_p=\ Serialization is pluggable. The default serialization currently supports many common data types, and uses Java serialization for other objects. The following classes are currently directly supported\: Boolean, Byte, Short, Character, Integer, Long, Float, Double, BigInteger, BigDecimal, String, UUID, Date and arrays (both primitive arrays and object arrays). For serialized objects, the size estimate is adjusted using an exponential moving average. -mvstore_1071_p=\ Parameterized data types are supported (for example one could build a string data type that limits the length). -mvstore_1072_p=\ The storage engine itself does not have any length limits, so that keys, values, pages, and chunks can be very big (as big as fits in memory). Also, there is no inherent limit to the number of maps and chunks. Due to using a log structured storage, there is no special case handling for large keys or pages. -mvstore_1073_h3=BLOB Support -mvstore_1074_p=\ There is a mechanism that stores large binary objects by splitting them into smaller blocks. This allows to store objects that don't fit in memory. Streaming as well as random access reads on such objects are supported. This tool is written on top of the store, using only the map interface. -mvstore_1075_h3=R-Tree and Pluggable Map Implementations -mvstore_1076_p=\ The map implementation is pluggable. In addition to the default MVMap (multi-version map), there is a map that supports concurrent write operations, and a multi-version R-tree map implementation for spatial operations. -mvstore_1077_h3=Concurrent Operations and Caching -mvstore_1078_p=\ Concurrent reads and writes are supported. All such read operations can occur in parallel. Concurrent reads from the page cache, as well as concurrent reads from the file system are supported. Write operations first read the relevant pages from disk to memory (this can happen concurrently), and only then modify the data. The in-memory parts of write operations are synchronized. Writing changes to the file can occur concurrently to modifying the data, as writing operates on a snapshot. -mvstore_1079_p=\ Caching is done on the page level. The page cache is a concurrent LIRS cache, which should be resistant against scan operations. -mvstore_1080_p=\ For fully scalable concurrent write operations to a map (in-memory and to disk), the map could be split into multiple maps in different stores ('sharding'). The plan is to add such a mechanism later when needed. -mvstore_1081_h3=Log Structured Storage -mvstore_1082_p=\ Internally, changes are buffered in memory, and once enough changes have accumulated, they are written in one continuous disk write operation. Compared to traditional database storage engines, this should improve write performance for file systems and storage systems that do not efficiently support small random writes, such as Btrfs, as well as SSDs. (According to a test, write throughput of a common SSD increases with write block size, until a block size of 2 MB, and then does not further increase.) By default, changes are automatically written when more than a number of pages are modified, and once every second in a background thread, even if only little data was changed. Changes can also be written explicitly by calling commit(). -mvstore_1083_p=\ When storing, all changed pages are serialized, optionally compressed using the LZF algorithm, and written sequentially to a free area of the file. Each such change set is called a chunk. All parent pages of the changed B-trees are stored in this chunk as well, so that each chunk also contains the root of each changed map (which is the entry point for reading this version of the data). There is no separate index\: all data is stored as a list of pages. Per store, there is one additional map that contains the metadata (the list of maps, where the root page of each map is stored, and the list of chunks). -mvstore_1084_p=\ There are usually two write operations per chunk\: one to store the chunk data (the pages), and one to update the file header (so it points to the latest chunk). If the chunk is appended at the end of the file, the file header is only written at the end of the chunk. There is no transaction log, no undo log, and there are no in-place updates (however, unused chunks are overwritten by default). -mvstore_1085_p=\ Old data is kept for at least 45 seconds (configurable), so that there are no explicit sync operations required to guarantee data consistency. An application can also sync explicitly when needed. To reuse disk space, the chunks with the lowest amount of live data are compacted (the live data is stored again in the next chunk). To improve data locality and disk space usage, the plan is to automatically defragment and compact data. -mvstore_1086_p=\ Compared to traditional storage engines (that use a transaction log, undo log, and main storage area), the log structured storage is simpler, more flexible, and typically needs less disk operations per change, as data is only written once instead of twice or 3 times, and because the B-tree pages are always full (they are stored next to each other) and can be easily compressed. But temporarily, disk space usage might actually be a bit higher than for a regular database, as disk space is not immediately re-used (there are no in-place updates). -mvstore_1087_h3=Off-Heap and Pluggable Storage -mvstore_1088_p=\ Storage is pluggable. Unless pure in-memory operation is used, the default storage is to a single file. -mvstore_1089_p=\ An off-heap storage implementation is available. This storage keeps the data in the off-heap memory, meaning outside of the regular garbage collected heap. This allows to use very large in-memory stores without having to increase the JVM heap, which would increase Java garbage collection pauses a lot. Memory is allocated using ByteBuffer.allocateDirect. One chunk is allocated at a time (each chunk is usually a few MB large), so that allocation cost is low. To use the off-heap storage, call\: -mvstore_1090_h3=File System Abstraction, File Locking and Online Backup -mvstore_1091_p=\ The file system is pluggable. The same file system abstraction is used as H2 uses. The file can be encrypted using a encrypting file system wrapper. Other file system implementations support reading from a compressed zip or jar file. The file system abstraction closely matches the Java 7 file system API. -mvstore_1092_p=\ Each store may only be opened once within a JVM. When opening a store, the file is locked in exclusive mode, so that the file can only be changed from within one process. Files can be opened in read-only mode, in which case a shared lock is used. -mvstore_1093_p=\ The persisted data can be backed up at any time, even during write operations (online backup). To do that, automatic disk space reuse needs to be first disabled, so that new data is always appended at the end of the file. Then, the file can be copied. The file handle is available to the application. It is recommended to use the utility class FileChannelInputStream to do this. For encrypted databases, both the encrypted (raw) file content, as well as the clear text content, can be backed up. -mvstore_1094_h3=Encrypted Files -mvstore_1095_p=\ File encryption ensures the data can only be read with the correct password. Data can be encrypted as follows\: -mvstore_1096_p=\ The following algorithms and settings are used\: -mvstore_1097_li=The password char array is cleared after use, to reduce the risk that the password is stolen even if the attacker has access to the main memory. -mvstore_1098_li=The password is hashed according to the PBKDF2 standard, using the SHA-256 hash algorithm. -mvstore_1099_li=The length of the salt is 64 bits, so that an attacker can not use a pre-calculated password hash table (rainbow table). It is generated using a cryptographically secure random number generator. -mvstore_1100_li=To speed up opening an encrypted stores on Android, the number of PBKDF2 iterations is 10. The higher the value, the better the protection against brute-force password cracking attacks, but the slower is opening a file. -mvstore_1101_li=The file itself is encrypted using the standardized disk encryption mode XTS-AES. Only little more than one AES-128 round per block is needed. -mvstore_1102_h3=Tools -mvstore_1103_p=\ There is a tool, the MVStoreTool, to dump the contents of a file. -mvstore_1104_h3=Exception Handling -mvstore_1105_p=\ This tool does not throw checked exceptions. Instead, unchecked exceptions are thrown if needed. The error message always contains the version of the tool. The following exceptions can occur\: -mvstore_1106_code=IllegalStateException -mvstore_1107_li=\ if a map was already closed or an IO exception occurred, for example if the file was locked, is already closed, could not be opened or closed, if reading or writing failed, if the file is corrupt, or if there is an internal error in the tool. For such exceptions, an error code is added so that the application can distinguish between different error cases. -mvstore_1108_code=IllegalArgumentException -mvstore_1109_li=\ if a method was called with an illegal argument. -mvstore_1110_code=UnsupportedOperationException -mvstore_1111_li=\ if a method was called that is not supported, for example trying to modify a read-only map. -mvstore_1112_code=ConcurrentModificationException -mvstore_1113_li=\ if a map is modified concurrently. -mvstore_1114_h3=Storage Engine for H2 -mvstore_1115_p=\ For H2 version 1.4 and newer, the MVStore is the default storage engine (supporting SQL, JDBC, transactions, MVCC, and so on). For older versions, append ;MV_STORE\=TRUE to the database URL. Even though it can be used with the default table level locking, by default the MVCC mode is enabled when using the MVStore. -mvstore_1116_h2=File Format -mvstore_1117_p=\ The data is stored in one file. The file contains two file headers (for safety), and a number of chunks. The file headers are one block each; a block is 4096 bytes. Each chunk is at least one block, but typically 200 blocks or more. Data is stored in the chunks in the form of a log structured storage. There is one chunk for every version. -mvstore_1118_p=\ Each chunk contains a number of B-tree pages. As an example, the following code\: -mvstore_1119_p=\ will result in the following two chunks (excluding metadata)\: -mvstore_1120_b=Chunk 1\: -mvstore_1121_p=\ - Page 1\: (root) node with 2 entries pointing to page 2 and 3 -mvstore_1122_p=\ - Page 2\: leaf with 140 entries (keys 0 - 139) -mvstore_1123_p=\ - Page 3\: leaf with 260 entries (keys 140 - 399) -mvstore_1124_b=Chunk 2\: -mvstore_1125_p=\ - Page 4\: (root) node with 2 entries pointing to page 3 and 5 -mvstore_1126_p=\ - Page 5\: leaf with 140 entries (keys 0 - 139) -mvstore_1127_p=\ That means each chunk contains the changes of one version\: the new version of the changed pages and the parent pages, recursively, up to the root page. Pages in subsequent chunks refer to pages in earlier chunks. -mvstore_1128_h3=File Header -mvstore_1129_p=\ There are two file headers, which normally contain the exact same data. But once in a while, the file headers are updated, and writing could partially fail, which could corrupt a header. That's why there is a second header. Only the file headers are updated in this way (called "in-place update"). The headers contain the following data\: -mvstore_1130_p=\ The data is stored in the form of a key-value pair. Each value is stored as a hexadecimal number. The entries are\: -mvstore_1131_li=H\: The entry "H\:2" stands for the the H2 database. -mvstore_1132_li=block\: The block number where one of the newest chunks starts (but not necessarily the newest). -mvstore_1133_li=blockSize\: The block size of the file; currently always hex 1000, which is decimal 4096, to match the disk sector length of modern hard disks. -mvstore_1134_li=chunk\: The chunk id, which is normally the same value as the version; however, the chunk id might roll over to 0, while the version doesn't. -mvstore_1135_li=created\: The number of milliseconds since 1970 when the file was created. -mvstore_1136_li=format\: The file format number. Currently 1. -mvstore_1137_li=version\: The version number of the chunk. -mvstore_1138_li=fletcher\: The Fletcher-32 checksum of the header. -mvstore_1139_p=\ When opening the file, both headers are read and the checksum is verified. If both headers are valid, the one with the newer version is used. The chunk with the latest version is then detected (details about this see below), and the rest of the metadata is read from there. If the chunk id, block and version are not stored in the file header, then the latest chunk lookup starts with the last chunk in the file. -mvstore_1140_h3=Chunk Format -mvstore_1141_p=\ There is one chunk per version. Each chunk consists of a header, the pages that were modified in this version, and a footer. The pages contain the actual data of the maps. The pages inside a chunk are stored right after the header, next to each other (unaligned). The size of a chunk is a multiple of the block size. The footer is stored in the last 128 bytes of the chunk. -mvstore_1142_p=\ The footer allows to verify that the chunk is completely written (a chunk is written as one write operation), and allows to find the start position of the very last chunk in the file. The chunk header and footer contain the following data\: -mvstore_1143_p=\ The fields of the chunk header and footer are\: -mvstore_1144_li=chunk\: The chunk id. -mvstore_1145_li=block\: The first block of the chunk (multiply by the block size to get the position in the file). -mvstore_1146_li=len\: The size of the chunk in number of blocks. -mvstore_1147_li=map\: The id of the newest map; incremented when a new map is created. -mvstore_1148_li=max\: The sum of all maximum page sizes (see page format). -mvstore_1149_li=next\: The predicted start block of the next chunk. -mvstore_1150_li=pages\: The number of pages in the chunk. -mvstore_1151_li=root\: The position of the metadata root page (see page format). -mvstore_1152_li=time\: The time the chunk was written, in milliseconds after the file was created. -mvstore_1153_li=version\: The version this chunk represents. -mvstore_1154_li=fletcher\: The checksum of the footer. -mvstore_1155_p=\ Chunks are never updated in-place. Each chunk contains the pages that were changed in that version (there is one chunk per version, see above), plus all the parent nodes of those pages, recursively, up to the root page. If an entry in a map is changed, removed, or added, then the respective page is copied, modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented. This mechanism is called copy-on-write, and is similar to how the Btrfs file system works. Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first. -mvstore_1156_p=\ How the newest chunk is located when opening a store\: The file header contains the position of a recent chunk, but not always the newest one. This is to reduce the number of file header updates. After opening the file, the file headers, and the chunk footer of the very last chunk (at the end of the file) are read. From those candidates, the header of the most recent chunk is read. If it contains a "next" pointer (see above), those chunk's header and footer are read as well. If it turned out to be a newer valid chunk, this is repeated, until the newest chunk was found. Before writing a chunk, the position of the next chunk is predicted based on the assumption that the next chunk will be of the same size as the current one. When the next chunk is written, and the previous prediction turned out to be incorrect, the file header is updated as well. In any case, the file header is updated if the next chain gets longer than 20 hops. -mvstore_1157_h3=Page Format -mvstore_1158_p=\ Each map is a B-tree, and the map data is stored in (B-tree-) pages. There are leaf pages that contain the key-value pairs of the map, and internal nodes, which only contain keys and pointers to leaf pages. The root of a tree is either a leaf or an internal node. Unlike file header and chunk header and footer, the page data is not human readable. Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes), and variable size int and long (1 to 5 / 10 bytes). The page format is\: -mvstore_1159_li=length (int)\: Length of the page in bytes. -mvstore_1160_li=checksum (short)\: Checksum (chunk id xor offset within the chunk xor page length). -mvstore_1161_li=mapId (variable size int)\: The id of the map this page belongs to. -mvstore_1162_li=len (variable size int)\: The number of keys in the page. -mvstore_1163_li=type (byte)\: The page type (0 for leaf page, 1 for internal node; plus 2 if the keys and values are compressed with the LZF algorithm, or plus 6 if the keys and values are compressed with the Deflate algorithm). -mvstore_1164_li=children (array of long; internal nodes only)\: The position of the children. -mvstore_1165_li=childCounts (array of variable size long; internal nodes only)\: The total number of entries for the given child page. -mvstore_1166_li=keys (byte array)\: All keys, stored depending on the data type. -mvstore_1167_li=values (byte array; leaf pages only)\: All values, stored depending on the data type. -mvstore_1168_p=\ Even though this is not required by the file format, pages are stored in the following order\: For each map, the root page is stored first, then the internal nodes (if there are any), and then the leaf pages. This should speed up reads for media where sequential reads are faster than random access reads. The metadata map is stored at the end of a chunk. -mvstore_1169_p=\ Pointers to pages are stored as a long, using a special format\: 26 bits for the chunk id, 32 bits for the offset within the chunk, 5 bits for the length code, 1 bit for the page type (leaf or internal node). The page type is encoded so that when clearing or removing a map, leaf pages don't have to be read (internal nodes do have to be read in order to know where all the pages are; but in a typical B-tree the vast majority of the pages are leaf pages). The absolute file position is not included so that chunks can be moved within the file without having to change page pointers; only the chunk metadata needs to be changed. The length code is a number from 0 to 31, where 0 means the maximum length of the page is 32 bytes, 1 means 48 bytes, 2\: 64, 3\: 96, 4\: 128, 5\: 192, and so on until 31 which means longer than 1 MB. That way, reading a page only requires one read operation (except for very large pages). The sum of the maximum length of all pages is stored in the chunk metadata (field "max"), and when a page is marked as removed, the live maximum length is adjusted. This allows to estimate the amount of free space within a block, in addition to the number of free pages. -mvstore_1170_p=\ The total number of entries in child pages are kept to allow efficient range counting, lookup by index, and skip operations. The pages form a counted B-tree. -mvstore_1171_p=\ Data compression\: The data after the page type are optionally compressed using the LZF algorithm. -mvstore_1172_h3=Metadata Map -mvstore_1173_p=\ In addition to the user maps, there is one metadata map that contains names and positions of user maps, and chunk metadata. The very last page of a chunk contains the root page of that metadata map. The exact position of this root page is stored in the chunk header. This page (directly or indirectly) points to the root pages of all other maps. The metadata map of a store with a map named "data", and one chunk, contains the following entries\: -mvstore_1174_li=chunk.1\: The metadata of chunk 1. This is the same data as the chunk header, plus the number of live pages, and the maximum live length. -mvstore_1175_li=map.1\: The metadata of map 1. The entries are\: name, createVersion, and type. -mvstore_1176_li=name.data\: The map id of the map named "data". The value is "1". -mvstore_1177_li=root.1\: The root position of map 1. -mvstore_1178_li=setting.storeVersion\: The store version (a user defined value). -mvstore_1179_h2=Similar Projects and Differences to Other Storage Engines -mvstore_1180_p=\ Unlike similar storage engines like LevelDB and Kyoto Cabinet, the MVStore is written in Java and can easily be embedded in a Java and Android application. -mvstore_1181_p=\ The MVStore is somewhat similar to the Berkeley DB Java Edition because it is also written in Java, and is also a log structured storage, but the H2 license is more liberal. -mvstore_1182_p=\ Like SQLite 3, the MVStore keeps all data in one file. Unlike SQLite 3, the MVStore uses is a log structured storage. The plan is to make the MVStore both easier to use as well as faster than SQLite 3. In a recent (very simple) test, the MVStore was about twice as fast as SQLite 3 on Android. -mvstore_1183_p=\ The API of the MVStore is similar to MapDB (previously known as JDBM) from Jan Kotek, and some code is shared between MVStore and MapDB. However, unlike MapDB, the MVStore uses is a log structured storage. The MVStore does not have a record size limit. -mvstore_1184_h2=Current State -mvstore_1185_p=\ The code is still experimental at this stage. The API as well as the behavior may partially change. Features may be added and removed (even though the main features will stay). -mvstore_1186_h2=Requirements -mvstore_1187_p=\ The MVStore is included in the latest H2 jar file. -mvstore_1188_p=\ There are no special requirements to use it. The MVStore should run on any JVM as well as on Android. -mvstore_1189_p=\ To build just the MVStore (without the database engine), run\: -mvstore_1190_p=\ This will create the file bin/h2mvstore-1.4.187.jar (about 200 KB). -performance_1000_h1=Performance -performance_1001_a=\ Performance Comparison -performance_1002_a=\ PolePosition Benchmark -performance_1003_a=\ Database Performance Tuning -performance_1004_a=\ Using the Built-In Profiler -performance_1005_a=\ Application Profiling -performance_1006_a=\ Database Profiling -performance_1007_a=\ Statement Execution Plans -performance_1008_a=\ How Data is Stored and How Indexes Work -performance_1009_a=\ Fast Database Import -performance_1010_h2=Performance Comparison -performance_1011_p=\ In many cases H2 is faster than other (open source and not open source) database engines. Please note this is mostly a single connection benchmark run on one computer, with many very simple operations running against the database. This benchmark does not include very complex queries. The embedded mode of H2 is faster than the client-server mode because the per-statement overhead is greatly reduced. -performance_1012_h3=Embedded -performance_1013_th=Test Case -performance_1014_th=Unit -performance_1015_th=H2 -performance_1016_th=HSQLDB -performance_1017_th=Derby -performance_1018_td=Simple\: Init -performance_1019_td=ms -performance_1020_td=1019 -performance_1021_td=1907 -performance_1022_td=8280 -performance_1023_td=Simple\: Query (random) -performance_1024_td=ms -performance_1025_td=1304 -performance_1026_td=873 -performance_1027_td=1912 -performance_1028_td=Simple\: Query (sequential) -performance_1029_td=ms -performance_1030_td=835 -performance_1031_td=1839 -performance_1032_td=5415 -performance_1033_td=Simple\: Update (sequential) -performance_1034_td=ms -performance_1035_td=961 -performance_1036_td=2333 -performance_1037_td=21759 -performance_1038_td=Simple\: Delete (sequential) -performance_1039_td=ms -performance_1040_td=950 -performance_1041_td=1922 -performance_1042_td=32016 -performance_1043_td=Simple\: Memory Usage -performance_1044_td=MB -performance_1045_td=21 -performance_1046_td=10 -performance_1047_td=8 -performance_1048_td=BenchA\: Init -performance_1049_td=ms -performance_1050_td=919 -performance_1051_td=2133 -performance_1052_td=7528 -performance_1053_td=BenchA\: Transactions -performance_1054_td=ms -performance_1055_td=1219 -performance_1056_td=2297 -performance_1057_td=8541 -performance_1058_td=BenchA\: Memory Usage -performance_1059_td=MB -performance_1060_td=12 -performance_1061_td=15 -performance_1062_td=7 -performance_1063_td=BenchB\: Init -performance_1064_td=ms -performance_1065_td=905 -performance_1066_td=1993 -performance_1067_td=8049 -performance_1068_td=BenchB\: Transactions -performance_1069_td=ms -performance_1070_td=1091 -performance_1071_td=583 -performance_1072_td=1165 -performance_1073_td=BenchB\: Memory Usage -performance_1074_td=MB -performance_1075_td=17 -performance_1076_td=11 -performance_1077_td=8 -performance_1078_td=BenchC\: Init -performance_1079_td=ms -performance_1080_td=2491 -performance_1081_td=4003 -performance_1082_td=8064 -performance_1083_td=BenchC\: Transactions -performance_1084_td=ms -performance_1085_td=1979 -performance_1086_td=803 -performance_1087_td=2840 -performance_1088_td=BenchC\: Memory Usage -performance_1089_td=MB -performance_1090_td=19 -performance_1091_td=22 -performance_1092_td=9 -performance_1093_td=Executed statements -performance_1094_td=\# -performance_1095_td=1930995 -performance_1096_td=1930995 -performance_1097_td=1930995 -performance_1098_td=Total time -performance_1099_td=ms -performance_1100_td=13673 -performance_1101_td=20686 -performance_1102_td=105569 -performance_1103_td=Statements per second -performance_1104_td=\# -performance_1105_td=141226 -performance_1106_td=93347 -performance_1107_td=18291 -performance_1108_h3=Client-Server -performance_1109_th=Test Case -performance_1110_th=Unit -performance_1111_th=H2 (Server) -performance_1112_th=HSQLDB -performance_1113_th=Derby -performance_1114_th=PostgreSQL -performance_1115_th=MySQL -performance_1116_td=Simple\: Init -performance_1117_td=ms -performance_1118_td=16338 -performance_1119_td=17198 -performance_1120_td=27860 -performance_1121_td=30156 -performance_1122_td=29409 -performance_1123_td=Simple\: Query (random) -performance_1124_td=ms -performance_1125_td=3399 -performance_1126_td=2582 -performance_1127_td=6190 -performance_1128_td=3315 -performance_1129_td=3342 -performance_1130_td=Simple\: Query (sequential) -performance_1131_td=ms -performance_1132_td=21841 -performance_1133_td=18699 -performance_1134_td=42347 -performance_1135_td=30774 -performance_1136_td=32611 -performance_1137_td=Simple\: Update (sequential) -performance_1138_td=ms -performance_1139_td=6913 -performance_1140_td=7745 -performance_1141_td=28576 -performance_1142_td=32698 -performance_1143_td=11350 -performance_1144_td=Simple\: Delete (sequential) -performance_1145_td=ms -performance_1146_td=8051 -performance_1147_td=9751 -performance_1148_td=42202 -performance_1149_td=44480 -performance_1150_td=16555 -performance_1151_td=Simple\: Memory Usage -performance_1152_td=MB -performance_1153_td=22 -performance_1154_td=11 -performance_1155_td=9 -performance_1156_td=0 -performance_1157_td=1 -performance_1158_td=BenchA\: Init -performance_1159_td=ms -performance_1160_td=12996 -performance_1161_td=14720 -performance_1162_td=24722 -performance_1163_td=26375 -performance_1164_td=26060 -performance_1165_td=BenchA\: Transactions -performance_1166_td=ms -performance_1167_td=10134 -performance_1168_td=10250 -performance_1169_td=18452 -performance_1170_td=21453 -performance_1171_td=15877 -performance_1172_td=BenchA\: Memory Usage -performance_1173_td=MB -performance_1174_td=13 -performance_1175_td=15 -performance_1176_td=9 -performance_1177_td=0 -performance_1178_td=1 -performance_1179_td=BenchB\: Init -performance_1180_td=ms -performance_1181_td=15264 -performance_1182_td=16889 -performance_1183_td=28546 -performance_1184_td=31610 -performance_1185_td=29747 -performance_1186_td=BenchB\: Transactions -performance_1187_td=ms -performance_1188_td=3017 -performance_1189_td=3376 -performance_1190_td=1842 -performance_1191_td=2771 -performance_1192_td=1433 -performance_1193_td=BenchB\: Memory Usage -performance_1194_td=MB -performance_1195_td=17 -performance_1196_td=12 -performance_1197_td=11 -performance_1198_td=1 -performance_1199_td=1 -performance_1200_td=BenchC\: Init -performance_1201_td=ms -performance_1202_td=14020 -performance_1203_td=10407 -performance_1204_td=17655 -performance_1205_td=19520 -performance_1206_td=17532 -performance_1207_td=BenchC\: Transactions -performance_1208_td=ms -performance_1209_td=5076 -performance_1210_td=3160 -performance_1211_td=6411 -performance_1212_td=6063 -performance_1213_td=4530 -performance_1214_td=BenchC\: Memory Usage -performance_1215_td=MB -performance_1216_td=19 -performance_1217_td=21 -performance_1218_td=11 -performance_1219_td=1 -performance_1220_td=1 -performance_1221_td=Executed statements -performance_1222_td=\# -performance_1223_td=1930995 -performance_1224_td=1930995 -performance_1225_td=1930995 -performance_1226_td=1930995 -performance_1227_td=1930995 -performance_1228_td=Total time -performance_1229_td=ms -performance_1230_td=117049 -performance_1231_td=114777 -performance_1232_td=244803 -performance_1233_td=249215 -performance_1234_td=188446 -performance_1235_td=Statements per second -performance_1236_td=\# -performance_1237_td=16497 -performance_1238_td=16823 -performance_1239_td=7887 -performance_1240_td=7748 -performance_1241_td=10246 -performance_1242_h3=Benchmark Results and Comments -performance_1243_h4=H2 -performance_1244_p=\ Version 1.4.177 (2014-04-12) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. The advantage of buffering is\: there is no limit on the result set size. -performance_1245_h4=HSQLDB -performance_1246_p=\ Version 2.3.2 was used for the test. Cached tables are used in this test (hsqldb.default_table_type\=cached), and the write delay is 1 second (SET WRITE_DELAY 1). -performance_1247_h4=Derby -performance_1248_p=\ Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified\: leaving autocommit on is a problem for Derby. If it is switched off during the whole test, the results are about 20% better for Derby. Derby calls FileChannel.force(false), but only twice per log file (not on each commit). Disabling this call improves performance for Derby by about 2%. Unlike H2, Derby does not call FileDescriptor.sync() on each checkpoint. Derby supports a testing mode (system property derby.system.durability\=test) where durability is disabled. According to the documentation, this setting should be used for testing only, as the database may not recover after a crash. Enabling this setting improves performance by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less than half as fast as H2 in default mode. -performance_1249_h4=PostgreSQL -performance_1250_p=\ Version 9.1.5 was used for the test. The following options where changed in postgresql.conf\: fsync \= off, commit_delay \= 1000. PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. -performance_1251_h4=MySQL -performance_1252_p=\ Version 5.1.65-log was used for the test. MySQL was run with the InnoDB backend. The setting innodb_flush_log_at_trx_commit (found in the my.ini / my.cnf file) was set to 0. Otherwise (and by default), MySQL is slow (around 140 statements per second in this test) because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. You need to change this setting manually in the file my.ini / my.cnf, and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured. -performance_1253_h4=Firebird -performance_1254_p=\ Firebird 1.5 (default installation) was tested, but the results are not published currently. It is possible to run the performance test with the Firebird database, and any information on how to configure Firebird for higher performance are welcome. -performance_1255_h4=Why Oracle / MS SQL Server / DB2 are Not Listed -performance_1256_p=\ The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. SQLite was not tested because the JDBC driver doesn't support transactions. -performance_1257_h3=About this Benchmark -performance_1258_h4=How to Run -performance_1259_p=\ This test was as follows\: -performance_1260_h4=Separate Process per Database -performance_1261_p=\ For each database, a new process is started, to ensure the previous test does not impact the current test. -performance_1262_h4=Number of Connections -performance_1263_p=\ This is mostly a single-connection benchmark. BenchB uses multiple connections; the other tests use one connection. -performance_1264_h4=Real-World Tests -performance_1265_p=\ Good benchmarks emulate real-world use cases. This benchmark includes 4 test cases\: BenchSimple uses one table and many small updates / deletes. BenchA is similar to the TPC-A test, but single connection / single threaded (see also\: www.tpc.org). BenchB is similar to the TPC-B test, using multiple connections (one thread per connection). BenchC is similar to the TPC-C test, but single connection / single threaded. -performance_1266_h4=Comparing Embedded with Server Databases -performance_1267_p=\ This is mainly a benchmark for embedded databases (where the application runs in the same virtual machine as the database engine). However MySQL and PostgreSQL are not Java databases and cannot be embedded into a Java application. For the Java databases, both embedded and server modes are tested. -performance_1268_h4=Test Platform -performance_1269_p=\ This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. The JVM used is Sun JDK 1.6. -performance_1270_h4=Multiple Runs -performance_1271_p=\ When a Java benchmark is run first, the code is not fully compiled and therefore runs slower than when running multiple times. A benchmark should always run the same test multiple times and ignore the first run(s). This benchmark runs three times, but only the last run is measured. -performance_1272_h4=Memory Usage -performance_1273_p=\ It is not enough to measure the time taken, the memory usage is important as well. Performance can be improved by using a bigger cache, but the amount of memory is limited. HSQLDB tables are kept fully in memory by default; this benchmark uses 'disk based' tables for all databases. Unfortunately, it is not so easy to calculate the memory usage of PostgreSQL and MySQL, because they run in a different process than the test. This benchmark currently does not print memory usage of those databases. -performance_1274_h4=Delayed Operations -performance_1275_p=\ Some databases delay some operations (for example flushing the buffers) until after the benchmark is run. This benchmark waits between each database tested, and each database runs in a different process (sequentially). -performance_1276_h4=Transaction Commit / Durability -performance_1277_p=\ Durability means transaction committed to the database will not be lost. Some databases (for example MySQL) try to enforce this by default by calling fsync() to flush the buffers, but most hard drives don't actually flush all data. Calling the method slows down transaction commit a lot, but doesn't always make data durable. When comparing the results, it is important to think about the effect. Many database suggest to 'batch' operations when possible. This benchmark switches off autocommit when loading the data, and calls commit after each 1000 inserts. However many applications need 'short' transactions at runtime (a commit after each update). This benchmark commits after each update / delete in the simple benchmark, and after each business transaction in the other benchmarks. For databases that support delayed commits, a delay of one second is used. -performance_1278_h4=Using Prepared Statements -performance_1279_p=\ Wherever possible, the test cases use prepared statements. -performance_1280_h4=Currently Not Tested\: Startup Time -performance_1281_p=\ The startup time of a database engine is important as well for embedded use. This time is not measured currently. Also, not tested is the time used to create a database and open an existing database. Here, one (wrapper) connection is opened at the start, and for each step a new connection is opened and then closed. -performance_1282_h2=PolePosition Benchmark -performance_1283_p=\ The PolePosition is an open source benchmark. The algorithms are all quite simple. It was developed / sponsored by db4o. This test was not run for a longer time, so please be aware that the results below are for older database versions (H2 version 1.1, HSQLDB 1.8, Java 1.4). -performance_1284_th=Test Case -performance_1285_th=Unit -performance_1286_th=H2 -performance_1287_th=HSQLDB -performance_1288_th=MySQL -performance_1289_td=Melbourne write -performance_1290_td=ms -performance_1291_td=369 -performance_1292_td=249 -performance_1293_td=2022 -performance_1294_td=Melbourne read -performance_1295_td=ms -performance_1296_td=47 -performance_1297_td=49 -performance_1298_td=93 -performance_1299_td=Melbourne read_hot -performance_1300_td=ms -performance_1301_td=24 -performance_1302_td=43 -performance_1303_td=95 -performance_1304_td=Melbourne delete -performance_1305_td=ms -performance_1306_td=147 -performance_1307_td=133 -performance_1308_td=176 -performance_1309_td=Sepang write -performance_1310_td=ms -performance_1311_td=965 -performance_1312_td=1201 -performance_1313_td=3213 -performance_1314_td=Sepang read -performance_1315_td=ms -performance_1316_td=765 -performance_1317_td=948 -performance_1318_td=3455 -performance_1319_td=Sepang read_hot -performance_1320_td=ms -performance_1321_td=789 -performance_1322_td=859 -performance_1323_td=3563 -performance_1324_td=Sepang delete -performance_1325_td=ms -performance_1326_td=1384 -performance_1327_td=1596 -performance_1328_td=6214 -performance_1329_td=Bahrain write -performance_1330_td=ms -performance_1331_td=1186 -performance_1332_td=1387 -performance_1333_td=6904 -performance_1334_td=Bahrain query_indexed_string -performance_1335_td=ms -performance_1336_td=336 -performance_1337_td=170 -performance_1338_td=693 -performance_1339_td=Bahrain query_string -performance_1340_td=ms -performance_1341_td=18064 -performance_1342_td=39703 -performance_1343_td=41243 -performance_1344_td=Bahrain query_indexed_int -performance_1345_td=ms -performance_1346_td=104 -performance_1347_td=134 -performance_1348_td=678 -performance_1349_td=Bahrain update -performance_1350_td=ms -performance_1351_td=191 -performance_1352_td=87 -performance_1353_td=159 -performance_1354_td=Bahrain delete -performance_1355_td=ms -performance_1356_td=1215 -performance_1357_td=729 -performance_1358_td=6812 -performance_1359_td=Imola retrieve -performance_1360_td=ms -performance_1361_td=198 -performance_1362_td=194 -performance_1363_td=4036 -performance_1364_td=Barcelona write -performance_1365_td=ms -performance_1366_td=413 -performance_1367_td=832 -performance_1368_td=3191 -performance_1369_td=Barcelona read -performance_1370_td=ms -performance_1371_td=119 -performance_1372_td=160 -performance_1373_td=1177 -performance_1374_td=Barcelona query -performance_1375_td=ms -performance_1376_td=20 -performance_1377_td=5169 -performance_1378_td=101 -performance_1379_td=Barcelona delete -performance_1380_td=ms -performance_1381_td=388 -performance_1382_td=319 -performance_1383_td=3287 -performance_1384_td=Total -performance_1385_td=ms -performance_1386_td=26724 -performance_1387_td=53962 -performance_1388_td=87112 -performance_1389_p=\ There are a few problems with the PolePosition test\: -performance_1390_li=\ HSQLDB uses in-memory tables by default while H2 uses persistent tables. The HSQLDB version included in PolePosition does not support changing this, so you need to replace poleposition-0.20/lib/hsqldb.jar with a newer version (for example hsqldb-1.8.0.7.jar), and then use the setting hsqldb.connecturl\=jdbc\:hsqldb\:file\:data/hsqldb/dbbench2;hsqldb.default_table_type\=cached;sql.enforce_size\=true in the file Jdbc.properties. -performance_1391_li=HSQLDB keeps the database open between tests, while H2 closes the database (losing all the cache). To change that, use the database URL jdbc\:h2\:file\:data/h2/dbbench;DB_CLOSE_DELAY\=-1 -performance_1392_li=The amount of cache memory is quite important, specially for the PolePosition test. Unfortunately, the PolePosition test does not take this into account. -performance_1393_h2=Database Performance Tuning -performance_1394_h3=Keep Connections Open or Use a Connection Pool -performance_1395_p=\ If your application opens and closes connections a lot (for example, for each request), you should consider using a connection pool. Opening a connection using DriverManager.getConnection is specially slow if the database is closed. By default the database is closed if the last connection is closed. -performance_1396_p=\ If you open and close connections a lot but don't want to use a connection pool, consider keeping a 'sentinel' connection open for as long as the application runs, or use delayed database closing. See also Closing a database. -performance_1397_h3=Use a Modern JVM -performance_1398_p=\ Newer JVMs are faster. Upgrading to the latest version of your JVM can provide a "free" boost to performance. Switching from the default Client JVM to the Server JVM using the -server command-line option improves performance at the cost of a slight increase in start-up time. -performance_1399_h3=Virus Scanners -performance_1400_p=\ Some virus scanners scan files every time they are accessed. It is very important for performance that database files are not scanned for viruses. The database engine never interprets the data stored in the files as programs, that means even if somebody would store a virus in a database file, this would be harmless (when the virus does not run, it cannot spread). Some virus scanners allow to exclude files by suffix. Ensure files ending with .db are not scanned. -performance_1401_h3=Using the Trace Options -performance_1402_p=\ If the performance hot spots are in the database engine, in many cases the performance can be optimized by creating additional indexes, or changing the schema. Sometimes the application does not directly generate the SQL statements, for example if an O/R mapping tool is used. To view the SQL statements and JDBC API calls, you can use the trace options. For more information, see Using the Trace Options. -performance_1403_h3=Index Usage -performance_1404_p=\ This database uses indexes to improve the performance of SELECT, UPDATE, DELETE. If a column is used in the WHERE clause of a query, and if an index exists on this column, then the index can be used. Multi-column indexes are used if all or the first columns of the index are used. Both equality lookup and range scans are supported. Indexes are used to order result sets, but only if the condition uses the same index or no index at all. The results are sorted in memory if required. Indexes are created automatically for primary key and unique constraints. Indexes are also created for foreign key constraints, if required. For other columns, indexes need to be created manually using the CREATE INDEX statement. -performance_1405_h3=How Data is Stored Internally -performance_1406_p=\ For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT, then the data of the table is organized in this way. This is sometimes also called a "clustered index" or "index organized table". -performance_1407_p=\ H2 internally stores table data and indexes in the form of b-trees. Each b-tree stores entries as a list of unique keys (one or more columns) and data (zero or more columns). The table data is always organized in the form of a "data b-tree" with a single column key of type long. If a single column primary key of type BIGINT, INT, SMALLINT, TINYINT is specified when creating the table (or just after creating the table, but before inserting any rows), then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, then a hidden auto-increment column of type BIGINT is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB columns, which are stored externally). -performance_1408_p=\ For each additional index, one new "index b-tree" is created. The key of this b-tree consists of the indexed columns, plus the key of the data b-tree. If a primary key is created after the table has been created, or if the primary key contains multiple column, or if the primary key is not of the data types listed above, then the primary key is stored in a new index b-tree. -performance_1409_h3=Optimizer -performance_1410_p=\ This database uses a cost based optimizer. For simple and queries and queries with medium complexity (less than 7 tables in the join), the expected cost (running time) of all possible plans is calculated, and the plan with the lowest cost is used. For more complex queries, the algorithm first tries all possible combinations for the first few tables, and the remaining tables added using a greedy algorithm (this works well for most joins). Afterwards a genetic algorithm is used to test at most 2000 distinct plans. Only left-deep plans are evaluated. -performance_1411_h3=Expression Optimization -performance_1412_p=\ After the statement is parsed, all expressions are simplified automatically if possible. Operations are evaluated only once if all parameters are constant. Functions are also optimized, but only if the function is constant (always returns the same result for the same parameter values). If the WHERE clause is always false, then the table is not accessed at all. -performance_1413_h3=COUNT(*) Optimization -performance_1414_p=\ If the query only counts all rows of a table, then the data is not accessed. However, this is only possible if no WHERE clause is used, that means it only works for queries of the form SELECT COUNT(*) FROM table. -performance_1415_h3=Updating Optimizer Statistics / Column Selectivity -performance_1416_p=\ When executing a query, at most one index per join can be used. If the same table is joined multiple times, for each join only one index is used (the same index could be used for both joins, or each join could use a different index). Example\: for the query SELECT * FROM TEST T1, TEST T2 WHERE T1.NAME\='A' AND T2.ID\=T1.ID, two index can be used, in this case the index on NAME for T1 and the index on ID for T2. -performance_1417_p=\ If a table has multiple indexes, sometimes more than one index could be used. Example\: if there is a table TEST(ID, NAME, FIRSTNAME) and an index on each column, then two indexes could be used for the query SELECT * FROM TEST WHERE NAME\='A' AND FIRSTNAME\='B', the index on NAME or the index on FIRSTNAME. It is not possible to use both indexes at the same time. Which index is used depends on the selectivity of the column. The selectivity describes the 'uniqueness' of values in a column. A selectivity of 100 means each value appears only once, and a selectivity of 1 means the same value appears in many or most rows. For the query above, the index on NAME should be used if the table contains more distinct names than first names. -performance_1418_p=\ The SQL statement ANALYZE can be used to automatically estimate the selectivity of the columns in the tables. This command should be run from time to time to improve the query plans generated by the optimizer. -performance_1419_h3=In-Memory (Hash) Indexes -performance_1420_p=\ Using in-memory indexes, specially in-memory hash indexes, can speed up queries and data manipulation. -performance_1421_p=In-memory indexes are automatically used for in-memory databases, but can also be created for persistent databases using CREATE MEMORY TABLE. In many cases, the rows itself will also be kept in-memory. Please note this may cause memory problems for large tables. -performance_1422_p=\ In-memory hash indexes are backed by a hash table and are usually faster than regular indexes. However, hash indexes only supports direct lookup (WHERE ID \= ?) but not range scan (WHERE ID < ?). To use hash indexes, use HASH as in\: CREATE UNIQUE HASH INDEX and CREATE TABLE ...(ID INT PRIMARY KEY HASH,...). -performance_1423_h3=Use Prepared Statements -performance_1424_p=\ If possible, use prepared statements with parameters. -performance_1425_h3=Prepared Statements and IN(...) -performance_1426_p=\ Avoid generating SQL statements with a variable size IN(...) list. Instead, use a prepared statement with arrays as in the following example\: -performance_1427_h3=Optimization Examples -performance_1428_p=\ See src/test/org/h2/samples/optimizations.sql for a few examples of queries that benefit from special optimizations built into the database. -performance_1429_h3=Cache Size and Type -performance_1430_p=\ By default the cache size of H2 is quite small. Consider using a larger cache size, or enable the second level soft reference cache. See also Cache Settings. -performance_1431_h3=Data Types -performance_1432_p=\ Each data type has different storage and performance characteristics\: -performance_1433_li=The DECIMAL/NUMERIC type is slower and requires more storage than the REAL and DOUBLE types. -performance_1434_li=Text types are slower to read, write, and compare than numeric types and generally require more storage. -performance_1435_li=See Large Objects for information on BINARY vs. BLOB and VARCHAR vs. CLOB performance. -performance_1436_li=Parsing and formatting takes longer for the TIME, DATE, and TIMESTAMP types than the numeric types. -performance_1437_code=SMALLINT/TINYINT/BOOLEAN -performance_1438_li=\ are not significantly smaller or faster to work with than INTEGER in most modes. -performance_1439_h3=Sorted Insert Optimization -performance_1440_p=\ To reduce disk space usage and speed up table creation, an optimization for sorted inserts is available. When used, b-tree pages are split at the insertion point. To use this optimization, add SORTED before the SELECT statement\: -performance_1441_h2=Using the Built-In Profiler -performance_1442_p=\ A very simple Java profiler is built-in. To use it, use the following template\: -performance_1443_h2=Application Profiling -performance_1444_h3=Analyze First -performance_1445_p=\ Before trying to optimize performance, it is important to understand where the problem is (what part of the application is slow). Blind optimization or optimization based on guesses should be avoided, because usually it is not an efficient strategy. There are various ways to analyze an application. Sometimes two implementations can be compared using System.currentTimeMillis(). But this does not work for complex applications with many modules, and for memory problems. -performance_1446_p=\ A simple way to profile an application is to use the built-in profiling tool of java. Example\: -performance_1447_p=\ Unfortunately, it is only possible to profile the application from start to end. Another solution is to create a number of full thread dumps. To do that, first run jps -l to get the process id, and then run jstack <pid> or kill -QUIT <pid> (Linux) or press Ctrl+C (Windows). -performance_1448_p=\ A simple profiling tool is included in H2. To use it, the application needs to be changed slightly. Example\: -performance_1449_p=\ The profiler is built into the H2 Console tool, to analyze databases that open slowly. To use it, run the H2 Console, and then click on 'Test Connection'. Afterwards, click on "Test successful" and you get the most common stack traces, which helps to find out why it took so long to connect. You will only get the stack traces if opening the database took more than a few seconds. -performance_1450_h2=Database Profiling -performance_1451_p=\ The ConvertTraceFile tool generates SQL statement statistics at the end of the SQL script file. The format used is similar to the profiling data generated when using java -Xrunhprof. For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE\=2). The easiest way to set the trace level is to append the setting to the database URL, for example\: jdbc\:h2\:~/test;TRACE_LEVEL_FILE\=2 or jdbc\:h2\:tcp\://localhost/~/test;TRACE_LEVEL_FILE\=2. As an example, execute the the following script using the H2 Console\: -performance_1452_p=\ After running the test case, convert the .trace.db file using the ConvertTraceFile tool. The trace file is located in the same directory as the database file. -performance_1453_p=\ The generated file test.sql will contain the SQL statements as well as the following profiling data (results vary)\: -performance_1454_h2=Statement Execution Plans -performance_1455_p=\ The SQL statement EXPLAIN displays the indexes and optimizations the database uses for a statement. The following statements support EXPLAIN\: SELECT, UPDATE, DELETE, MERGE, INSERT. The following query shows that the database uses the primary key index to search for rows\: -performance_1456_p=\ For joins, the tables in the execution plan are sorted in the order they are processed. The following query shows the database first processes the table INVOICE (using the primary key). For each row, it will additionally check that the value of the column AMOUNT is larger than zero, and for those rows the database will search in the table CUSTOMER (using the primary key). The query plan contains some redundancy so it is a valid statement. -performance_1457_h3=Displaying the Scan Count -performance_1458_code=EXPLAIN ANALYZE -performance_1459_p=\ additionally shows the scanned rows per table and pages read from disk per table or index. This will actually execute the query, unlike EXPLAIN which only prepares it. The following query scanned 1000 rows, and to do that had to read 85 pages from the data area of the table. Running the query twice will not list the pages read from disk, because they are now in the cache. The tableScan means this query doesn't use an index. -performance_1460_p=\ The cache will prevent the pages are read twice. H2 reads all columns of the row unless only the columns in the index are read. Except for large CLOB and BLOB, which are not store in the table. -performance_1461_h3=Special Optimizations -performance_1462_p=\ For certain queries, the database doesn't need to read all rows, or doesn't need to sort the result even if ORDER BY is used. -performance_1463_p=\ For queries of the form SELECT COUNT(*), MIN(ID), MAX(ID) FROM TEST, the query plan includes the line /* direct lookup */ if the data can be read from an index. -performance_1464_p=\ For queries of the form SELECT DISTINCT CUSTOMER_ID FROM INVOICE, the query plan includes the line /* distinct */ if there is an non-unique or multi-column index on this column, and if this column has a low selectivity. -performance_1465_p=\ For queries of the form SELECT * FROM TEST ORDER BY ID, the query plan includes the line /* index sorted */ to indicate there is no separate sorting required. -performance_1466_p=\ For queries of the form SELECT * FROM TEST GROUP BY ID ORDER BY ID, the query plan includes the line /* group sorted */ to indicate there is no separate sorting required. -performance_1467_h2=How Data is Stored and How Indexes Work -performance_1468_p=\ Internally, each row in a table is identified by a unique number, the row id. The rows of a table are stored with the row id as the key. The row id is a number of type long. If a table has a single column primary key of type INT or BIGINT, then the value of this column is the row id, otherwise the database generates the row id automatically. There is a (non-standard) way to access the row id\: using the _ROWID_ pseudo-column\: -performance_1469_p=\ The data is stored in the database as follows\: -performance_1470_th=_ROWID_ -performance_1471_th=FIRST_NAME -performance_1472_th=NAME -performance_1473_th=CITY -performance_1474_th=PHONE -performance_1475_td=1 -performance_1476_td=John -performance_1477_td=Miller -performance_1478_td=Berne -performance_1479_td=123 456 789 -performance_1480_td=2 -performance_1481_td=Philip -performance_1482_td=Jones -performance_1483_td=Berne -performance_1484_td=123 012 345 -performance_1485_p=\ Access by row id is fast because the data is sorted by this key. Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT\: -performance_1486_h3=Indexes -performance_1487_p=\ An index internally is basically just a table that contains the indexed column(s), plus the row id\: -performance_1488_p=\ In the index, the data is sorted by the indexed columns. So this index contains the following data\: -performance_1489_th=CITY -performance_1490_th=NAME -performance_1491_th=FIRST_NAME -performance_1492_th=_ROWID_ -performance_1493_td=Berne -performance_1494_td=Jones -performance_1495_td=Philip -performance_1496_td=2 -performance_1497_td=Berne -performance_1498_td=Miller -performance_1499_td=John -performance_1500_td=1 -performance_1501_p=\ When the database uses an index to query the data, it searches the index for the given data, and (if required) reads the remaining columns in the main data table (retrieved using the row id). An index on city, name, and first name (multi-column index) allows to quickly search for rows when the city, name, and first name are known. If only the city and name, or only the city is known, then this index is also used (so creating an additional index on just the city is not needed). This index is also used when reading all rows, sorted by the indexed columns. However, if only the first name is known, then this index is not used\: -performance_1502_p=\ If your application often queries the table for a phone number, then it makes sense to create an additional index on it\: -performance_1503_p=\ This index contains the phone number, and the row id\: -performance_1504_th=PHONE -performance_1505_th=_ROWID_ -performance_1506_td=123 012 345 -performance_1507_td=2 -performance_1508_td=123 456 789 -performance_1509_td=1 -performance_1510_h3=Using Multiple Indexes -performance_1511_p=\ Within a query, only one index per logical table is used. Using the condition PHONE \= '123 567 789' OR CITY \= 'Berne' would use a table scan instead of first using the index on the phone number and then the index on the city. It makes sense to write two queries and combine then using UNION. In this case, each individual query uses a different index\: -performance_1512_h2=Fast Database Import -performance_1513_p=\ To speed up large imports, consider using the following options temporarily\: -performance_1514_code=SET LOG 0 -performance_1515_li=\ (disabling the transaction log) -performance_1516_code=SET CACHE_SIZE -performance_1517_li=\ (a large cache is faster) -performance_1518_code=SET LOCK_MODE 0 -performance_1519_li=\ (disable locking) -performance_1520_code=SET UNDO_LOG 0 -performance_1521_li=\ (disable the session undo log) -performance_1522_p=\ These options can be set in the database URL\: jdbc\:h2\:~/test;LOG\=0;CACHE_SIZE\=65536;LOCK_MODE\=0;UNDO_LOG\=0. Most of those options are not recommended for regular use, that means you need to reset them after use. -performance_1523_p=\ If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ... is faster than CREATE TABLE(...); INSERT INTO ... SELECT .... -quickstart_1000_h1=Quickstart -quickstart_1001_a=\ Embedding H2 in an Application -quickstart_1002_a=\ The H2 Console Application -quickstart_1003_h2=Embedding H2 in an Application -quickstart_1004_p=\ This database can be used in embedded mode, or in server mode. To use it in embedded mode, you need to\: -quickstart_1005_li=Add the h2*.jar to the classpath (H2 does not have any dependencies) -quickstart_1006_li=Use the JDBC driver class\: org.h2.Driver -quickstart_1007_li=The database URL jdbc\:h2\:~/test opens the database test in your user home directory -quickstart_1008_li=A new database is automatically created -quickstart_1009_h2=The H2 Console Application -quickstart_1010_p=\ The Console lets you access a SQL database using a browser interface. -quickstart_1011_p=\ If you don't have Windows XP, or if something does not work as expected, please see the detailed description in the Tutorial. -quickstart_1012_h3=Step-by-Step -quickstart_1013_h4=Installation -quickstart_1014_p=\ Install the software using the Windows Installer (if you did not yet do that). -quickstart_1015_h4=Start the Console -quickstart_1016_p=\ Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]\: -quickstart_1017_p=\ A new console window appears\: -quickstart_1018_p=\ Also, a new browser page should open with the URL http\://localhost\:8082. You may get a security warning from the firewall. If you don't want other computers in the network to access the database on your machine, you can let the firewall block these connections. Only local connections are required at this time. -quickstart_1019_h4=Login -quickstart_1020_p=\ Select [Generic H2] and click [Connect]\: -quickstart_1021_p=\ You are now logged in. -quickstart_1022_h4=Sample -quickstart_1023_p=\ Click on the [Sample SQL Script]\: -quickstart_1024_p=\ The SQL commands appear in the command area. -quickstart_1025_h4=Execute -quickstart_1026_p=\ Click [Run] -quickstart_1027_p=\ On the left side, a new entry TEST is added below the database icon. The operations and results of the statements are shown below the script. -quickstart_1028_h4=Disconnect -quickstart_1029_p=\ Click on [Disconnect]\: -quickstart_1030_p=\ to close the connection. -quickstart_1031_h4=End -quickstart_1032_p=\ Close the console window. For more information, see the Tutorial. -roadmap_1000_h1=Roadmap -roadmap_1001_p=\ New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. Of course, patches are always welcome, but are not always applied as is. See also Providing Patches. -roadmap_1002_h2=Version 1.5.x\: Planned Changes -roadmap_1003_li=Replace file password hash with file encryption key; validate encryption key when connecting. -roadmap_1004_li=Remove "set binary collation" feature. -roadmap_1005_li=Remove the encryption algorithm XTEA. -roadmap_1006_li=Disallow referencing other tables in a table (via constraints for example). -roadmap_1007_li=Remove PageStore features like compress_lob. -roadmap_1008_h2=Version 1.4.x\: Planned Changes -roadmap_1009_li=Change license to MPL 2.0. -roadmap_1010_li=Automatic migration from 1.3 databases to 1.4. -roadmap_1011_li=Option to disable the file name suffix somehow (issue 447). -roadmap_1012_h2=Priority 1 -roadmap_1013_li=Bugfixes. -roadmap_1014_li=More tests with MULTI_THREADED\=1 (and MULTI_THREADED with MVCC)\: Online backup (using the 'backup' statement). -roadmap_1015_li=Server side cursors. -roadmap_1016_h2=Priority 2 -roadmap_1017_li=Support hints for the optimizer (which index to use, enforce the join order). -roadmap_1018_li=Full outer joins. -roadmap_1019_li=Access rights\: remember the owner of an object. Create, alter and drop privileges. COMMENT\: allow owner of object to change it. Issue 208\: Access rights for schemas. -roadmap_1020_li=Test multi-threaded in-memory db access. -roadmap_1021_li=MySQL, MS SQL Server compatibility\: support case sensitive (mixed case) identifiers without quotes. -roadmap_1022_li=Support GRANT SELECT, UPDATE ON [schemaName.] *. -roadmap_1023_li=Migrate database tool (also from other database engines). For Oracle, maybe use DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL. -roadmap_1024_li=Clustering\: support mixed clustering mode (one embedded, others in server mode). -roadmap_1025_li=Clustering\: reads should be randomly distributed (optional) or to a designated database on RAM (parameter\: READ_FROM\=3). -roadmap_1026_li=Window functions\: RANK() and DENSE_RANK(), partition using OVER(). select *, count(*) over() as fullCount from ... limit 4; -roadmap_1027_li=PostgreSQL catalog\: use BEFORE SELECT triggers instead of views over metadata tables. -roadmap_1028_li=Compatibility\: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211. -roadmap_1029_li=Test very large databases and LOBs (up to 256 GB). -roadmap_1030_li=Store all temp files in the temp directory. -roadmap_1031_li=Don't use temp files, specially not deleteOnExit (bug 4513817\: File.deleteOnExit consumes memory). Also to allow opening client / server (remote) connections when using LOBs. -roadmap_1032_li=Make DDL (Data Definition) operations transactional. -roadmap_1033_li=Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED). -roadmap_1034_li=Groovy Stored Procedures\: http\://groovy.codehaus.org/GSQL -roadmap_1035_li=Add a migration guide (list differences between databases). -roadmap_1036_li=Optimization\: automatic index creation suggestion using the trace file? -roadmap_1037_li=Fulltext search Lucene\: analyzer configuration, mergeFactor. -roadmap_1038_li=Compression performance\: don't allocate buffers, compress / expand in to out buffer. -roadmap_1039_li=Rebuild index functionality to shrink index size and improve performance. -roadmap_1040_li=Console\: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA). -roadmap_1041_li=Test performance again with SQL Server, Oracle, DB2. -roadmap_1042_li=Test with Spatial DB in a box / JTS\: http\://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification. -roadmap_1043_li=Write more tests and documentation for MVCC (Multi Version Concurrency Control). -roadmap_1044_li=Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after. -roadmap_1045_li=Implement, test, document XAConnection and so on. -roadmap_1046_li=Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption). -roadmap_1047_li=CHECK\: find out what makes CHECK\=TRUE slow, move to CHECK2. -roadmap_1048_li=Drop with invalidate views (so that source code is not lost). Check what other databases do exactly. -roadmap_1049_li=Index usage for (ID, NAME)\=(1, 'Hi'); document. -roadmap_1050_li=Set a connection read only (Connection.setReadOnly) or using a connection parameter. -roadmap_1051_li=Access rights\: finer grained access control (grant access for specific functions). -roadmap_1052_li=ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]). -roadmap_1053_li=Version check\: docs / web console (using Javascript), and maybe in the library (using TCP/IP). -roadmap_1054_li=Web server classloader\: override findResource / getResourceFrom. -roadmap_1055_li=Cost for embedded temporary view is calculated wrong, if result is constant. -roadmap_1056_li=Count index range query (count(*) where id between 10 and 20). -roadmap_1057_li=Performance\: update in-place. -roadmap_1058_li=Clustering\: when a database is back alive, automatically synchronize with the master (requires readable transaction log). -roadmap_1059_li=Database file name suffix\: a way to use no or a different suffix (for example using a slash). -roadmap_1060_li=Eclipse plugin. -roadmap_1061_li=Asynchronous queries to support publish/subscribe\: SELECT ... FOR READ WAIT [maxMillisToWait]. See also MS SQL Server "Query Notification". -roadmap_1062_li=Fulltext search (native)\: reader / tokenizer / filter. -roadmap_1063_li=Linked schema using CSV files\: one schema for a directory of files; support indexes for CSV files. -roadmap_1064_li=iReport to support H2. -roadmap_1065_li=Include SMTP (mail) client (alert on cluster failure, low disk space,...). -roadmap_1066_li=Option for SCRIPT to only process one or a set of schemas or tables, and append to a file. -roadmap_1067_li=JSON parser and functions. -roadmap_1068_li=Copy database\: tool with config GUI and batch mode, extensible (example\: compare). -roadmap_1069_li=Document, implement tool for long running transactions using user-defined compensation statements. -roadmap_1070_li=Support SET TABLE DUAL READONLY. -roadmap_1071_li=GCJ\: what is the state now? -roadmap_1072_li=Events for\: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http\://docs.openlinksw.com/virtuoso/fn_dbev_startup.html -roadmap_1073_li=Optimization\: simpler log compression. -roadmap_1074_li=Support standard INFORMATION_SCHEMA tables, as defined in http\://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE\: http\://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http\://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif -roadmap_1075_li=Compatibility\: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby\: division by zero. HSQLDB\: 0.0e1 / 0.0e1 is NaN. -roadmap_1076_li=Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R). -roadmap_1077_li=Custom class loader to reload functions on demand. -roadmap_1078_li=Test http\://mysql-je.sourceforge.net/ -roadmap_1079_li=H2 Console\: the webclient could support more features like phpMyAdmin. -roadmap_1080_li=Support Oracle functions\: TO_DATE, TO_NUMBER. -roadmap_1081_li=Work on the Java to C converter. -roadmap_1082_li=The HELP information schema can be directly exposed in the Console. -roadmap_1083_li=Maybe use the 0x1234 notation for binary fields, see MS SQL Server. -roadmap_1084_li=Support Oracle CONNECT BY in some way\: http\://www.adp-gmbh.ch/ora/sql/connect_by.html http\://philip.greenspun.com/sql/trees.html -roadmap_1085_li=SQL Server 2005, Oracle\: support COUNT(*) OVER(). See http\://www.orafusion.com/art_anlytc.htm -roadmap_1086_li=SQL 2003\: http\://www.wiscorp.com/sql_2003_standard.zip -roadmap_1087_li=Version column (number/sequence and timestamp based). -roadmap_1088_li=Optimize getGeneratedKey\: send last identity after each execute (server). -roadmap_1089_li=Test and document UPDATE TEST SET (ID, NAME) \= (SELECT ID*10, NAME || '\!' FROM TEST T WHERE T.ID\=TEST.ID). -roadmap_1090_li=Max memory rows / max undo log size\: use block count / row size not row count. -roadmap_1091_li=Implement point-in-time recovery. -roadmap_1092_li=Support PL/SQL (programming language / control flow statements). -roadmap_1093_li=LIKE\: improved version for larger texts (currently using naive search). -roadmap_1094_li=Throw an exception when the application calls getInt on a Long (optional). -roadmap_1095_li=Default date format for input and output (local date constants). -roadmap_1096_li=Document ROWNUM usage for reports\: SELECT ROWNUM, * FROM (subquery). -roadmap_1097_li=File system that writes to two file systems (replication, replicating file system). -roadmap_1098_li=Standalone tool to get relevant system properties and add it to the trace output. -roadmap_1099_li=Support 'call proc(1\=value)' (PostgreSQL, Oracle). -roadmap_1100_li=Console\: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?). -roadmap_1101_li=Console\: autocomplete Ctrl+Space inserts template. -roadmap_1102_li=Option to encrypt .trace.db file. -roadmap_1103_li=Auto-Update feature for database, .jar file. -roadmap_1104_li=ResultSet SimpleResultSet.readFromURL(String url)\: id varchar, state varchar, released timestamp. -roadmap_1105_li=Partial indexing (see PostgreSQL). -roadmap_1106_li=Add GUI to build a custom version (embedded, fulltext,...) using build flags. -roadmap_1107_li=http\://rubyforge.org/projects/hypersonic/ -roadmap_1108_li=Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app). -roadmap_1109_li=Table order\: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility). -roadmap_1110_li=Backup tool should work with other databases as well. -roadmap_1111_li=Console\: -ifExists doesn't work for the console. Add a flag to disable other dbs. -roadmap_1112_li=Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess). -roadmap_1113_li=Java static code analysis\: http\://pmd.sourceforge.net/ -roadmap_1114_li=Java static code analysis\: http\://www.eclipse.org/tptp/ -roadmap_1115_li=Compatibility for CREATE SCHEMA AUTHORIZATION. -roadmap_1116_li=Implement Clob / Blob truncate and the remaining functionality. -roadmap_1117_li=Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ... -roadmap_1118_li=File locking\: writing a system property to detect concurrent access from the same VM (different classloaders). -roadmap_1119_li=Pure SQL triggers (example\: update parent table if the child table is changed). -roadmap_1120_li=Add H2 to Gem (Ruby install system). -roadmap_1121_li=Support linked JCR tables. -roadmap_1122_li=Native fulltext search\: min word length; store word positions. -roadmap_1123_li=Add an option to the SCRIPT command to generate only portable / standard SQL. -roadmap_1124_li=Updatable views\: create 'instead of' triggers automatically if possible (simple cases first). -roadmap_1125_li=Improve create index performance. -roadmap_1126_li=Compact databases without having to close the database (vacuum). -roadmap_1127_li=Implement more JDBC 4.0 features. -roadmap_1128_li=Support TRANSFORM / PIVOT as in MS Access. -roadmap_1129_li=SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...). -roadmap_1130_li=Support updatable views with join on primary keys (to extend a table). -roadmap_1131_li=Public interface for functions (not public static). -roadmap_1132_li=Support reading the transaction log. -roadmap_1133_li=Feature matrix as in i-net software. -roadmap_1134_li=Updatable result set on table without primary key or unique index. -roadmap_1135_li=Compatibility with Derby and PostgreSQL\: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221. -roadmap_1136_li=Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString') -roadmap_1137_li=Support data type INTERVAL -roadmap_1138_li=Support nested transactions (possibly using savepoints internally). -roadmap_1139_li=Add a benchmark for bigger databases, and one for many users. -roadmap_1140_li=Compression in the result set over TCP/IP. -roadmap_1141_li=Support curtimestamp (like curtime, curdate). -roadmap_1142_li=Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options. -roadmap_1143_li=Release locks (shared or exclusive) on demand -roadmap_1144_li=Support OUTER UNION -roadmap_1145_li=Support parameterized views (similar to CSVREAD, but using just SQL for the definition) -roadmap_1146_li=A way (JDBC driver) to map an URL (jdbc\:h2map\:c1) to a connection object -roadmap_1147_li=Support dynamic linked schema (automatically adding/updating/removing tables) -roadmap_1148_li=Clustering\: adding a node should be very fast and without interrupting clients (very short lock) -roadmap_1149_li=Compatibility\: \# is the start of a single line comment (MySQL) but date quote (Access). Mode specific -roadmap_1150_li=Run benchmarks with Android, Java 7, java -server -roadmap_1151_li=Optimizations\: faster hash function for strings. -roadmap_1152_li=DatabaseEventListener\: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality -roadmap_1153_li=Benchmark\: add a graph to show how databases scale (performance/database size) -roadmap_1154_li=Implement a SQLData interface to map your data over to a custom object -roadmap_1155_li=In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers \= true) -roadmap_1156_li=Support multiple directories (on different hard drives) for the same database -roadmap_1157_li=Server protocol\: use challenge response authentication, but client sends hash(user+password) encrypted with response -roadmap_1158_li=Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server) -roadmap_1159_li=Support native XML data type - see http\://en.wikipedia.org/wiki/SQL/XML -roadmap_1160_li=Support triggers with a string property or option\: SpringTrigger, OSGITrigger -roadmap_1161_li=MySQL compatibility\: update test1 t1, test2 t2 set t1.id \= t2.id where t1.id \= t2.id; -roadmap_1162_li=Ability to resize the cache array when resizing the cache -roadmap_1163_li=Time based cache writing (one second after writing the log) -roadmap_1164_li=Check state of H2 driver for DDLUtils\: http\://issues.apache.org/jira/browse/DDLUTILS-185 -roadmap_1165_li=Index usage for REGEXP LIKE. -roadmap_1166_li=Compatibility\: add a role DBA (like ADMIN). -roadmap_1167_li=Better support multiple processors for in-memory databases. -roadmap_1168_li=Support N'text' -roadmap_1169_li=Support compatibility for jdbc\:hsqldb\:res\: -roadmap_1170_li=HSQLDB compatibility\: automatically convert to the next 'higher' data type. Example\: cast(2000000000 as int) + cast(2000000000 as int); (HSQLDB\: long; PostgreSQL\: integer out of range) -roadmap_1171_li=Provide an Java SQL builder with standard and H2 syntax -roadmap_1172_li=Trace\: write OS, file system, JVM,... when opening the database -roadmap_1173_li=Support indexes for views (probably requires materialized views) -roadmap_1174_li=Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters -roadmap_1175_li=Server\: use one listener (detect if the request comes from an PG or TCP client) -roadmap_1176_li=Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200 -roadmap_1177_li=Sequence\: PostgreSQL compatibility (rename, create) http\://www.postgresql.org/docs/8.2/static/sql-altersequence.html -roadmap_1178_li=DISTINCT\: support large result sets by sorting on all columns (additionally) and then removing duplicates. -roadmap_1179_li=Support a special trigger on all tables to allow building a transaction log reader. -roadmap_1180_li=File system with a background writer thread; test if this is faster -roadmap_1181_li=Better document the source code (high level documentation). -roadmap_1182_li=Support select * from dual a left join dual b on b.x\=(select max(x) from dual) -roadmap_1183_li=Optimization\: don't lock when the database is read-only -roadmap_1184_li=Issue 146\: Support merge join. -roadmap_1185_li=Integrate spatial functions from http\://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download -roadmap_1186_li=Cluster\: hot deploy (adding a node at runtime). -roadmap_1187_li=Support DatabaseMetaData.insertsAreDetected\: updatable result sets should detect inserts. -roadmap_1188_li=Oracle\: support DECODE method (convert to CASE WHEN). -roadmap_1189_li=Native search\: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping -roadmap_1190_li=Improve documentation of access rights. -roadmap_1191_li=Support opening a database that is in the classpath, maybe using a new file system. Workaround\: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation(). -roadmap_1192_li=Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others). -roadmap_1193_li=Remember the user defined data type (domain) of a column. -roadmap_1194_li=MVCC\: support multi-threaded kernel with multi-version concurrency. -roadmap_1195_li=Auto-server\: add option to define the port range or list. -roadmap_1196_li=Support Jackcess (MS Access databases) -roadmap_1197_li=Built-in methods to write large objects (BLOB and CLOB)\: FILE_WRITE('test.txt', 'Hello World') -roadmap_1198_li=Improve time to open large databases (see mail 'init time for distributed setup') -roadmap_1199_li=Move Maven 2 repository from hsql.sf.net to h2database.sf.net -roadmap_1200_li=Java 1.5 tool\: JdbcUtils.closeSilently(s1, s2,...) -roadmap_1201_li=Optimize A\=? OR B\=? to UNION if the cost is lower. -roadmap_1202_li=Javadoc\: document design patterns used -roadmap_1203_li=Support custom collators, for example for natural sort (for text that contains numbers). -roadmap_1204_li=Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt) -roadmap_1205_li=Convert SQL-injection-2.txt to html document, include SQLInjection.java sample -roadmap_1206_li=Support OUT parameters in user-defined procedures. -roadmap_1207_li=Web site design\: http\://www.igniterealtime.org/projects/openfire/index.jsp -roadmap_1208_li=HSQLDB compatibility\: Openfire server uses\: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC -roadmap_1209_li=Translation\: use ?? in help.csv -roadmap_1210_li=Translated .pdf -roadmap_1211_li=Recovery tool\: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file -roadmap_1212_li=Issue 357\: support getGeneratedKeys to return multiple rows when used with batch updates. This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys. Also support it when using INSERT ... SELECT. -roadmap_1213_li=RECOVER\=2 to backup the database, run recovery, open the database -roadmap_1214_li=Recovery should work with encrypted databases -roadmap_1215_li=Corruption\: new error code, add help -roadmap_1216_li=Space reuse\: after init, scan all storages and free those that don't belong to a live database object -roadmap_1217_li=Access rights\: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects) -roadmap_1218_li=Support NOCACHE table option (Oracle). -roadmap_1219_li=Support table partitioning. -roadmap_1220_li=Add regular javadocs (using the default doclet, but another css) to the homepage. -roadmap_1221_li=The database should be kept open for a longer time when using the server mode. -roadmap_1222_li=Javadocs\: for each tool, add a copy & paste sample in the class level. -roadmap_1223_li=Javadocs\: add @author tags. -roadmap_1224_li=Fluent API for tools\: Server.createTcpServer().setPort(9081).setPassword(password).start(); -roadmap_1225_li=MySQL compatibility\: real SQL statement for DESCRIBE TEST -roadmap_1226_li=Use a default delay of 1 second before closing a database. -roadmap_1227_li=Write (log) to system table before adding to internal data structures. -roadmap_1228_li=Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup). -roadmap_1229_li=Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case). -roadmap_1230_li=MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem). -roadmap_1231_li=Oracle compatibility\: support NLS_DATE_FORMAT. -roadmap_1232_li=Support for Thread.interrupt to cancel running statements. -roadmap_1233_li=Cluster\: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process). -roadmap_1234_li=H2 Console\: support CLOB/BLOB download using a link. -roadmap_1235_li=Support flashback queries as in Oracle. -roadmap_1236_li=Import / Export of fixed with text files. -roadmap_1237_li=HSQLDB compatibility\: automatic data type for SUM if value is the value is too big (by default use the same type as the data). -roadmap_1238_li=Improve the optimizer to select the right index for special cases\: where id between 2 and 4 and booleanColumn -roadmap_1239_li=Linked tables\: make hidden columns available (Oracle\: rowid and ora_rowscn columns). -roadmap_1240_li=H2 Console\: in-place autocomplete. -roadmap_1241_li=Support large databases\: split database files to multiple directories / disks (similar to tablespaces). -roadmap_1242_li=H2 Console\: support configuration option for fixed width (monospace) font. -roadmap_1243_li=Native fulltext search\: support analyzers (specially for Chinese, Japanese). -roadmap_1244_li=Automatically compact databases from time to time (as a background process). -roadmap_1245_li=Test Eclipse DTP. -roadmap_1246_li=H2 Console\: autocomplete\: keep the previous setting -roadmap_1247_li=executeBatch\: option to stop at the first failed statement. -roadmap_1248_li=Implement OLAP features as described here\: http\://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5 -roadmap_1249_li=Support Oracle ROWID (unique identifier for each row). -roadmap_1250_li=MySQL compatibility\: alter table add index i(c), add constraint c foreign key(c) references t(c); -roadmap_1251_li=Server mode\: improve performance for batch updates. -roadmap_1252_li=Applets\: support read-only databases in a zip file (accessed as a resource). -roadmap_1253_li=Long running queries / errors / trace system table. -roadmap_1254_li=H2 Console should support JaQu directly. -roadmap_1255_li=Better document FTL_SEARCH, FTL_SEARCH_DATA. -roadmap_1256_li=Sequences\: CURRVAL should be session specific. Compatibility with PostgreSQL. -roadmap_1257_li=Index creation using deterministic functions. -roadmap_1258_li=ANALYZE\: for unique indexes that allow null, count the number of null. -roadmap_1259_li=MySQL compatibility\: multi-table delete\: DELETE .. FROM .. [,...] USING - See http\://dev.mysql.com/doc/refman/5.0/en/delete.html -roadmap_1260_li=AUTO_SERVER\: support changing IP addresses (disable a network while the database is open). -roadmap_1261_li=Avoid using java.util.Calendar internally because it's slow, complicated, and buggy. -roadmap_1262_li=Support TRUNCATE .. CASCADE like PostgreSQL. -roadmap_1263_li=Fulltext search\: lazy result generation using SimpleRowSource. -roadmap_1264_li=Fulltext search\: support alternative syntax\: WHERE FTL_CONTAINS(name, 'hello'). -roadmap_1265_li=MySQL compatibility\: support REPLACE, see http\://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73. -roadmap_1266_li=MySQL compatibility\: support INSERT INTO table SET column1 \= value1, column2 \= value2 -roadmap_1267_li=Docs\: add a one line description for each functions and SQL statements at the top (in the link section). -roadmap_1268_li=Javadoc search\: weight for titles should be higher ('random' should list Functions as the best match). -roadmap_1269_li=Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes. -roadmap_1270_li=Issue 50\: Oracle compatibility\: support calling 0-parameters functions without parenthesis. Make constants obsolete. -roadmap_1271_li=MySQL, HSQLDB compatibility\: support where 'a'\=1 (not supported by Derby, PostgreSQL) -roadmap_1272_li=Support a data type "timestamp with timezone" using java.util.Calendar. -roadmap_1273_li=Finer granularity for SLF4J trace - See http\://code.google.com/p/h2database/issues/detail?id\=62 -roadmap_1274_li=Add database creation date and time to the database. -roadmap_1275_li=Support ASSERTION. -roadmap_1276_li=MySQL compatibility\: support comparing 1\='a' -roadmap_1277_li=Support PostgreSQL lock modes\: http\://www.postgresql.org/docs/8.3/static/explicit-locking.html -roadmap_1278_li=PostgreSQL compatibility\: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver. -roadmap_1279_li=RunScript should be able to read from system in (or quite mode for Shell). -roadmap_1280_li=Natural join\: support select x from dual natural join dual. -roadmap_1281_li=Support using system properties in database URLs (may be a security problem). -roadmap_1282_li=Natural join\: somehow support this\: select a.x, b.x, x from dual a natural join dual b -roadmap_1283_li=Use the Java service provider mechanism to register file systems and function libraries. -roadmap_1284_li=MySQL compatibility\: for auto_increment columns, convert 0 to next value (as when inserting NULL). -roadmap_1285_li=Optimization for multi-column IN\: use an index if possible. Example\: (A, B) IN((1, 2), (2, 3)). -roadmap_1286_li=Optimization for EXISTS\: convert to inner join or IN(..) if possible. -roadmap_1287_li=Functions\: support hashcode(value); cryptographic and fast -roadmap_1288_li=Serialized file lock\: support long running queries. -roadmap_1289_li=Network\: use 127.0.0.1 if other addresses don't work. -roadmap_1290_li=Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication. -roadmap_1291_li=Support reading JCR data\: one table per node type; query table; cache option -roadmap_1292_li=OSGi\: create a sample application, test, document. -roadmap_1293_li=help.csv\: use complete examples for functions; run as test case. -roadmap_1294_li=Functions to calculate the memory and disk space usage of a table, a row, or a value. -roadmap_1295_li=Re-implement PooledConnection; use a lightweight connection object. -roadmap_1296_li=Doclet\: convert tests in javadocs to a java class. -roadmap_1297_li=Doclet\: format fields like methods, but support sorting by name and value. -roadmap_1298_li=Doclet\: shrink the html files. -roadmap_1299_li=MySQL compatibility\: support SET NAMES 'latin1' - See also http\://code.google.com/p/h2database/issues/detail?id\=56 -roadmap_1300_li=Allow to scan index backwards starting with a value (to better support ORDER BY DESC). -roadmap_1301_li=Java Service Wrapper\: try http\://yajsw.sourceforge.net/ -roadmap_1302_li=Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE. -roadmap_1303_li=MySQL compatibility\: support ALTER TABLE .. MODIFY COLUMN. -roadmap_1304_li=Use a lazy and auto-close input stream (open resource when reading, close on eof). -roadmap_1305_li=Connection pool\: 'reset session' command (delete temp tables, rollback, auto-commit true). -roadmap_1306_li=Improve SQL documentation, see http\://www.w3schools.com/sql/ -roadmap_1307_li=MySQL compatibility\: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL. -roadmap_1308_li=MS SQL Server compatibility\: support DATEPART syntax. -roadmap_1309_li=Sybase/DB2/Oracle compatibility\: support out parameters in stored procedures - See http\://code.google.com/p/h2database/issues/detail?id\=83 -roadmap_1310_li=Support INTERVAL data type (see Oracle and others). -roadmap_1311_li=Combine Server and Console tool (only keep Server). -roadmap_1312_li=Store the Lucene index in the database itself. -roadmap_1313_li=Support standard MERGE statement\: http\://en.wikipedia.org/wiki/Merge_%28SQL%29 -roadmap_1314_li=Oracle compatibility\: support DECODE(x, ...). -roadmap_1315_li=MVCC\: compare concurrent update behavior with PostgreSQL and Oracle. -roadmap_1316_li=HSQLDB compatibility\: CREATE FUNCTION (maybe using a Function interface). -roadmap_1317_li=HSQLDB compatibility\: support CALL "java.lang.Math.sqrt"(2.0) -roadmap_1318_li=Support comma as the decimal separator in the CSV tool. -roadmap_1319_li=Compatibility\: Java functions with SQLJ Part1 http\://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz -roadmap_1320_li=Compatibility\: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation. -roadmap_1321_li=CACHE_SIZE\: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache. -roadmap_1322_li=Support date/time/timestamp as documented in http\://en.wikipedia.org/wiki/ISO_8601 -roadmap_1323_li=PostgreSQL compatibility\: when in PG mode, treat BYTEA data like PG. -roadmap_1324_li=Support \=ANY(array) as in PostgreSQL. See also http\://www.postgresql.org/docs/8.0/interactive/arrays.html -roadmap_1325_li=IBM DB2 compatibility\: support PREVIOUS VALUE FOR sequence. -roadmap_1326_li=Compatibility\: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer). -roadmap_1327_li=Oracle compatibility\: support CREATE SYNONYM table FOR schema.table. -roadmap_1328_li=FTP\: document the server, including -ftpTask option to execute / kill remote processes -roadmap_1329_li=FTP\: problems with multithreading? -roadmap_1330_li=FTP\: implement SFTP / FTPS -roadmap_1331_li=FTP\: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file). -roadmap_1332_li=More secure default configuration if remote access is enabled. -roadmap_1333_li=Improve database file locking (maybe use native file locking). The current approach seems to be problematic if the file system is on a remote share (see Google Group 'Lock file modification time is in the future'). -roadmap_1334_li=Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE. -roadmap_1335_li=Issue 107\: Prefer using the ORDER BY index if LIMIT is used. -roadmap_1336_li=An index on (id, name) should be used for a query\: select * from t where s\=? order by i -roadmap_1337_li=Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL. -roadmap_1338_li=Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true). -roadmap_1339_li=Maybe disallow \= within database names (jdbc\:h2\:mem\:MODE\=DB2 means database name MODE\=DB2). -roadmap_1340_li=Fast alter table add column. -roadmap_1341_li=Improve concurrency for in-memory database operations. -roadmap_1342_li=Issue 122\: Support for connection aliases for remote tcp connections. -roadmap_1343_li=Fast scrambling (strong encryption doesn't help if the password is included in the application). -roadmap_1344_li=H2 Console\: support -webPassword to require a password to access preferences or shutdown. -roadmap_1345_li=Issue 126\: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number. -roadmap_1346_li=Issue 127\: Support activation/deactivation of triggers -roadmap_1347_li=Issue 130\: Custom log event listeners -roadmap_1348_li=Issue 131\: IBM DB2 compatibility\: sysibm.sysdummy1 -roadmap_1349_li=Issue 132\: Use Java enum trigger type. -roadmap_1350_li=Issue 134\: IBM DB2 compatibility\: session global variables. -roadmap_1351_li=Cluster\: support load balance with values for each server / auto detect. -roadmap_1352_li=FTL_SET_OPTION(keyString, valueString) with key stopWords at first. -roadmap_1353_li=Pluggable access control mechanism. -roadmap_1354_li=Fulltext search (Lucene)\: support streaming CLOB data. -roadmap_1355_li=Document/example how to create and read an encrypted script file. -roadmap_1356_li=Check state of http\://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins). -roadmap_1357_li=Fulltext search (Lucene)\: only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible. -roadmap_1358_li=Support a way to create or read compressed encrypted script files using an API. -roadmap_1359_li=Scripting language support (Javascript). -roadmap_1360_li=The network client should better detect if the server is not an H2 server and fail early. -roadmap_1361_li=H2 Console\: support CLOB/BLOB upload. -roadmap_1362_li=Database file lock\: detect hibernate / standby / very slow threads (compare system time). -roadmap_1363_li=Automatic detection of redundant indexes. -roadmap_1364_li=Maybe reject join without "on" (except natural join). -roadmap_1365_li=Implement GiST (Generalized Search Tree for Secondary Storage). -roadmap_1366_li=Function to read a number of bytes/characters from an BLOB or CLOB. -roadmap_1367_li=Issue 156\: Support SELECT ? UNION SELECT ?. -roadmap_1368_li=Automatic mixed mode\: support a port range list (to avoid firewall problems). -roadmap_1369_li=Support the pseudo column rowid, oid, _rowid_. -roadmap_1370_li=H2 Console / large result sets\: stream early instead of keeping a whole result in-memory -roadmap_1371_li=Support TRUNCATE for linked tables. -roadmap_1372_li=UNION\: evaluate INTERSECT before UNION (like most other database except Oracle). -roadmap_1373_li=Delay creating the information schema, and share metadata columns. -roadmap_1374_li=TCP Server\: use a nonce (number used once) to protect unencrypted channels against replay attacks. -roadmap_1375_li=Simplify running scripts and recovery\: CREATE FORCE USER (overwrites an existing user). -roadmap_1376_li=Support CREATE DATABASE LINK (a custom JDBC driver is already supported). -roadmap_1377_li=Support large GROUP BY operations. Issue 216. -roadmap_1378_li=Issue 163\: Allow to create foreign keys on metadata types. -roadmap_1379_li=Logback\: write a native DBAppender. -roadmap_1380_li=Cache size\: don't use more cache than what is available. -roadmap_1381_li=Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread. -roadmap_1382_li=Tree index\: Instead of an AVL tree, use a general balanced trees or a scapegoat tree. -roadmap_1383_li=User defined functions\: allow to store the bytecode (of just the class, or the jar file of the extension) in the database. -roadmap_1384_li=Compatibility\: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL. -roadmap_1385_li=Optimizer\: WHERE X\=? AND Y IN(?), it always uses the index on Y. Should be cost based. -roadmap_1386_li=Common Table Expression (CTE) / recursive queries\: support parameters. Issue 314. -roadmap_1387_li=Oracle compatibility\: support INSERT ALL. -roadmap_1388_li=Issue 178\: Optimizer\: index usage when both ascending and descending indexes are available. -roadmap_1389_li=Issue 179\: Related subqueries in HAVING clause. -roadmap_1390_li=IBM DB2 compatibility\: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero. -roadmap_1391_li=Creating primary key\: always create a constraint. -roadmap_1392_li=Maybe use a different page layout\: keep the data at the head of the page, and ignore the tail (don't store / read it). This may increase write / read performance depending on the file system. -roadmap_1393_li=Indexes of temporary tables are currently kept in-memory. Is this how it should be? -roadmap_1394_li=The Shell tool should support the same built-in commands as the H2 Console. -roadmap_1395_li=Maybe use PhantomReference instead of finalize. -roadmap_1396_li=Database file name suffix\: should only have one dot by default. Example\: .h2db -roadmap_1397_li=Issue 196\: Function based indexes -roadmap_1398_li=ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName. -roadmap_1399_li=Fix the disk space leak (killing the process at the exact right moment will increase the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java -roadmap_1400_li=ROWNUM\: Oracle compatibility when used within a subquery. Issue 198. -roadmap_1401_li=Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way. -roadmap_1402_li=ODBC\: encrypted databases are not supported because the ;CIPHER\= can not be set. -roadmap_1403_li=Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1); -roadmap_1404_li=Optimizer\: index usage when both ascending and descending indexes are available. Issue 178. -roadmap_1405_li=Issue 306\: Support schema specific domains. -roadmap_1406_li=Triggers\: support user defined execution order. Oracle\: CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT ON TEST FOR EACH ROW FOLLOWS TEST_1. SQL specifies that multiple triggers should be fired in time-of-creation order. PostgreSQL uses name order, which was judged to be more convenient. Derby\: triggers are fired in the order in which they were created. -roadmap_1407_li=PostgreSQL compatibility\: combine "users" and "roles". See\: http\://www.postgresql.org/docs/8.1/interactive/user-manag.html -roadmap_1408_li=Improve documentation of system properties\: only list the property names, default values, and description. -roadmap_1409_li=Support running totals / cumulative sum using SUM(..) OVER(..). -roadmap_1410_li=Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize) -roadmap_1411_li=Triggers\: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others). -roadmap_1412_li=Common Table Expression (CTE) / recursive queries\: support INSERT INTO ... SELECT ... Issue 219. -roadmap_1413_li=Common Table Expression (CTE) / recursive queries\: support non-recursive queries. Issue 217. -roadmap_1414_li=Common Table Expression (CTE) / recursive queries\: avoid endless loop. Issue 218. -roadmap_1415_li=Common Table Expression (CTE) / recursive queries\: support multiple named queries. Issue 220. -roadmap_1416_li=Common Table Expression (CTE) / recursive queries\: identifier scope may be incorrect. Issue 222. -roadmap_1417_li=Log long running transactions (similar to long running statements). -roadmap_1418_li=Parameter data type is data type of other operand. Issue 205. -roadmap_1419_li=Some combinations of nested join with right outer join are not supported. -roadmap_1420_li=DatabaseEventListener.openConnection(id) and closeConnection(id). -roadmap_1421_li=Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, or to prevent to login with the same username and password from different IPs. Possibly using the DatabaseEventListener API, or a new API. -roadmap_1422_li=Compatibility for data type CHAR (Derby, HSQLDB). Issue 212. -roadmap_1423_li=Compatibility with MySQL TIMESTAMPDIFF. Issue 209. -roadmap_1424_li=Optimizer\: use a histogram of the data, specially for non-normal distributions. -roadmap_1425_li=Trigger\: allow declaring as source code (like functions). -roadmap_1426_li=User defined aggregate\: allow declaring as source code (like functions). -roadmap_1427_li=The error "table not found" is sometimes caused by using the wrong database. Add "(this database is empty)" to the exception message if applicable. -roadmap_1428_li=MySQL + PostgreSQL compatibility\: support string literal escape with \\n. -roadmap_1429_li=PostgreSQL compatibility\: support string literal escape with double \\\\. -roadmap_1430_li=Document the TCP server "management_db". Maybe include the IP address of the client. -roadmap_1431_li=Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main -roadmap_1432_li=If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message. -roadmap_1433_li=Optimization to use an index for OR when using multiple keys\: where (key1 \= ? and key2 \= ?) OR (key1 \= ? and key2 \= ?) -roadmap_1434_li=Issue 302\: Support optimizing queries with both inner and outer joins, as in\: select * from test a inner join test b on a.id\=b.id inner join o on o.id\=a.id where b.x\=1 (the optimizer should swap a and b here). See also TestNestedJoins, tag "swapInnerJoinTables". -roadmap_1435_li=JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool). -roadmap_1436_li=Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...; -roadmap_1437_li=nioMapped file system\: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example). -roadmap_1438_li=Column as parameter of function table. Issue 228. -roadmap_1439_li=Connection pool\: detect ;AUTOCOMMIT\=FALSE in the database URL, and if set, disable autocommit for all connections. -roadmap_1440_li=Compatibility with MS Access\: support "&" to concatenate text. -roadmap_1441_li=The BACKUP statement should not synchronize on the database, and therefore should not block other users. -roadmap_1442_li=Document the database file format. -roadmap_1443_li=Support reading LOBs. -roadmap_1444_li=Require appending DANGEROUS\=TRUE when using certain dangerous settings such as LOG\=0, LOG\=1, LOCK_MODE\=0, disabling FILE_LOCK,... -roadmap_1445_li=Support UDT (user defined types) similar to how Apache Derby supports it\: check constraint, allow to use it in Java functions as parameters (return values already seem to work). -roadmap_1446_li=Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, optional compatibility with current encrypted database files). -roadmap_1447_li=Issue 229\: SELECT with simple OR tests uses tableScan when it could use indexes. -roadmap_1448_li=GROUP BY queries should use a temporary table if there are too many rows. -roadmap_1449_li=BLOB\: support random access when reading. -roadmap_1450_li=CLOB\: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form). -roadmap_1451_li=Compatibility\: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...). -roadmap_1452_li=Compatibility with MySQL\: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...). -roadmap_1453_li=Compatibility with MySQL\: support non-strict mode (sql_mode \= "") any data that is too large for the column will just be truncated or set to the default value. -roadmap_1454_li=The full condition should be sent to the linked table, not just the indexed condition. Example\: TestLinkedTableFullCondition -roadmap_1455_li=Compatibility with IBM DB2\: CREATE PROCEDURE. -roadmap_1456_li=Compatibility with IBM DB2\: SQL cursors. -roadmap_1457_li=Single-column primary key values are always stored explicitly. This is not required. -roadmap_1458_li=Compatibility with MySQL\: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8). -roadmap_1459_li=CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true. -roadmap_1460_li=Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set (maybe only for a part of the values - the ones that can be evaluated). -roadmap_1461_li=Compatibility for ARRAY data type (Oracle\: VARRAY(n) of VARCHAR(m); HSQLDB\: VARCHAR(n) ARRAY; Postgres\: VARCHAR(n)[]). -roadmap_1462_li=PostgreSQL compatible array literal syntax\: ARRAY[['a', 'b'], ['c', 'd']] -roadmap_1463_li=PostgreSQL compatibility\: UPDATE with FROM. -roadmap_1464_li=Issue 297\: Oracle compatibility for "at time zone". -roadmap_1465_li=IBM DB2 compatibility\: IDENTITY_VAL_LOCAL(). -roadmap_1466_li=Support SQL/XML. -roadmap_1467_li=Support concurrent opening of databases. -roadmap_1468_li=Improved error message and diagnostics in case of network configuration problems. -roadmap_1469_li=TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases). -roadmap_1470_li=Adding a primary key should make the columns 'not null' unless if there is a row with null (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby). -roadmap_1471_li=ARRAY data type\: support Integer[] and so on in Java functions (currently only Object[] is supported). -roadmap_1472_li=MySQL compatibility\: LOCK TABLES a READ, b READ - see also http\://dev.mysql.com/doc/refman/5.0/en/lock-tables.html -roadmap_1473_li=The HTML to PDF converter should use http\://code.google.com/p/wkhtmltopdf/ -roadmap_1474_li=Issue 303\: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)". -roadmap_1475_li=MySQL compatibility\: update test1 t1, test2 t2 set t1.name\=t2.name where t1.id\=t2.id. -roadmap_1476_li=Issue 283\: Improve performance of H2 on Android. -roadmap_1477_li=Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s). -roadmap_1478_li=Column compression option - see http\://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d -roadmap_1479_li=PostgreSQL compatibility\: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID). -roadmap_1480_li=MS SQL Server compatibility\: support @@ROWCOUNT. -roadmap_1481_li=PostgreSQL compatibility\: LOG(x) is LOG10(x) and not LN(x). -roadmap_1482_li=Issue 311\: Serialized lock mode\: executeQuery of write operations fails. -roadmap_1483_li=PostgreSQL compatibility\: support PgAdmin III (specially the function current_setting). -roadmap_1484_li=MySQL compatibility\: support TIMESTAMPADD. -roadmap_1485_li=Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). -roadmap_1486_li=Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby). -roadmap_1487_li=Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase). -roadmap_1488_li=TRANSACTION_ID() for in-memory databases. -roadmap_1489_li=TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL). -roadmap_1490_li=Support [INNER | OUTER] JOIN USING(column [,...]). -roadmap_1491_li=Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle) -roadmap_1492_li=GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby). -roadmap_1493_li=Sybase / MS SQL Server compatibility\: CONVERT(..) parameters are swapped. -roadmap_1494_li=Index conditions\: WHERE AGE>1 should not scan through all rows with AGE\=1. -roadmap_1495_li=PHP support\: H2 should support PDO, or test with PostgreSQL PDO. -roadmap_1496_li=Outer joins\: if no column of the outer join table is referenced, the outer join table could be removed from the query. -roadmap_1497_li=Cluster\: allow using auto-increment and identity columns by ensuring executed in lock-step. -roadmap_1498_li=MySQL compatibility\: index names only need to be unique for the given table. -roadmap_1499_li=Issue 352\: constraints\: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases. -roadmap_1500_li=Oracle compatibility\: support MEDIAN aggregate function. -roadmap_1501_li=Issue 348\: Oracle compatibility\: division should return a decimal result. -roadmap_1502_li=Read rows on demand\: instead of reading the whole row, only read up to that column that is requested. Keep an pointer to the data area and the column id that is already read. -roadmap_1503_li=Long running transactions\: log session id when detected. -roadmap_1504_li=Optimization\: "select id from test" should use the index on id even without "order by". -roadmap_1505_li=Issue 362\: LIMIT support for UPDATE statements (MySQL compatibility). -roadmap_1506_li=Sybase SQL Anywhere compatibility\: SELECT TOP ... START AT ... -roadmap_1507_li=Use Java 6 SQLException subclasses. -roadmap_1508_li=Issue 390\: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR -roadmap_1509_li=Use Java 6 exceptions\: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,.. -roadmap_1510_li=Support index-only when doing selects (i.e. without needing to load the actual table data) -roadmap_1511_h2=Not Planned -roadmap_1512_li=HSQLDB (did) support this\: select id i from test where i<0 (other databases don't). Supporting it may break compatibility. -roadmap_1513_li=String.intern (so that Strings can be compared with \=\=) will not be used because some VMs have problems when used extensively. -roadmap_1514_li=In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements. -sourceError_1000_h1=Error Analyzer -sourceError_1001_a=Home -sourceError_1002_a=Input -sourceError_1003_h2=  Details  Source Code -sourceError_1004_p=Paste the error message and stack trace below and click on 'Details' or 'Source Code'\: -sourceError_1005_b=Error Code\: -sourceError_1006_b=Product Version\: -sourceError_1007_b=Message\: -sourceError_1008_b=More Information\: -sourceError_1009_b=Stack Trace\: -sourceError_1010_b=Source File\: -sourceError_1011_p=\ Inline -tutorial_1000_h1=Tutorial -tutorial_1001_a=\ Starting and Using the H2 Console -tutorial_1002_a=\ Special H2 Console Syntax -tutorial_1003_a=\ Settings of the H2 Console -tutorial_1004_a=\ Connecting to a Database using JDBC -tutorial_1005_a=\ Creating New Databases -tutorial_1006_a=\ Using the Server -tutorial_1007_a=\ Using Hibernate -tutorial_1008_a=\ Using TopLink and Glassfish -tutorial_1009_a=\ Using EclipseLink -tutorial_1010_a=\ Using Apache ActiveMQ -tutorial_1011_a=\ Using H2 within NetBeans -tutorial_1012_a=\ Using H2 with jOOQ -tutorial_1013_a=\ Using Databases in Web Applications -tutorial_1014_a=\ Android -tutorial_1015_a=\ CSV (Comma Separated Values) Support -tutorial_1016_a=\ Upgrade, Backup, and Restore -tutorial_1017_a=\ Command Line Tools -tutorial_1018_a=\ The Shell Tool -tutorial_1019_a=\ Using OpenOffice Base -tutorial_1020_a=\ Java Web Start / JNLP -tutorial_1021_a=\ Using a Connection Pool -tutorial_1022_a=\ Fulltext Search -tutorial_1023_a=\ User-Defined Variables -tutorial_1024_a=\ Date and Time -tutorial_1025_a=\ Using Spring -tutorial_1026_a=\ OSGi -tutorial_1027_a=\ Java Management Extension (JMX) -tutorial_1028_h2=Starting and Using the H2 Console -tutorial_1029_p=\ The H2 Console application lets you access a database using a browser. This can be a H2 database, or another database that supports the JDBC API. -tutorial_1030_p=\ This is a client/server application, so both a server and a client (a browser) are required to run it. -tutorial_1031_p=\ Depending on your platform and environment, there are multiple ways to start the H2 Console\: -tutorial_1032_th=OS -tutorial_1033_th=Start -tutorial_1034_td=Windows -tutorial_1035_td=\ Click [Start], [All Programs], [H2], and [H2 Console (Command Line)] -tutorial_1036_td=\ An icon will be added to the system tray\: -tutorial_1037_td=\ If you don't get the window and the system tray icon, then maybe Java is not installed correctly (in this case, try another way to start the application). A browser window should open and point to the login page at http\://localhost\:8082. -tutorial_1038_td=Windows -tutorial_1039_td=\ Open a file browser, navigate to h2/bin, and double click on h2.bat. -tutorial_1040_td=\ A console window appears. If there is a problem, you will see an error message in this window. A browser window will open and point to the login page (URL\: http\://localhost\:8082). -tutorial_1041_td=Any -tutorial_1042_td=\ Double click on the h2*.jar file. This only works if the .jar suffix is associated with Java. -tutorial_1043_td=Any -tutorial_1044_td=\ Open a console window, navigate to the directory h2/bin, and type\: -tutorial_1045_h3=Firewall -tutorial_1046_p=\ If you start the server, you may get a security warning from the firewall (if you have installed one). If you don't want other computers in the network to access the application on your machine, you can let the firewall block those connections. The connection from the local machine will still work. Only if you want other computers to access the database on this computer, you need allow remote connections in the firewall. -tutorial_1047_p=\ It has been reported that when using Kaspersky 7.0 with firewall, the H2 Console is very slow when connecting over the IP address. A workaround is to connect using 'localhost'. -tutorial_1048_p=\ A small firewall is already built into the server\: other computers may not connect to the server by default. To change this, go to 'Preferences' and select 'Allow connections from other computers'. -tutorial_1049_h3=Testing Java -tutorial_1050_p=\ To find out which version of Java is installed, open a command prompt and type\: -tutorial_1051_p=\ If you get an error message, you may need to add the Java binary directory to the path environment variable. -tutorial_1052_h3=Error Message 'Port may be in use' -tutorial_1053_p=\ You can only start one instance of the H2 Console, otherwise you will get the following error message\: "The Web server could not be started. Possible cause\: another server is already running...". It is possible to start multiple console applications on the same computer (using different ports), but this is usually not required as the console supports multiple concurrent connections. -tutorial_1054_h3=Using another Port -tutorial_1055_p=\ If the default port of the H2 Console is already in use by another application, then a different port needs to be configured. The settings are stored in a properties file. For details, see Settings of the H2 Console. The relevant entry is webPort. -tutorial_1056_p=\ If no port is specified for the TCP and PG servers, each service will try to listen on its default port. If the default port is already in use, a random port is used. -tutorial_1057_h3=Connecting to the Server using a Browser -tutorial_1058_p=\ If the server started successfully, you can connect to it using a web browser. Javascript needs to be enabled. If you started the server on the same computer as the browser, open the URL http\://localhost\:8082. If you want to connect to the application from another computer, you need to provide the IP address of the server, for example\: http\://192.168.0.2\:8082. If you enabled TLS on the server side, the URL needs to start with https\://. -tutorial_1059_h3=Multiple Concurrent Sessions -tutorial_1060_p=\ Multiple concurrent browser sessions are supported. As that the database objects reside on the server, the amount of concurrent work is limited by the memory available to the server application. -tutorial_1061_h3=Login -tutorial_1062_p=\ At the login page, you need to provide connection information to connect to a database. Set the JDBC driver class of your database, the JDBC URL, user name, and password. If you are done, click [Connect]. -tutorial_1063_p=\ You can save and reuse previously saved settings. The settings are stored in a properties file (see Settings of the H2 Console). -tutorial_1064_h3=Error Messages -tutorial_1065_p=\ Error messages in are shown in red. You can show/hide the stack trace of the exception by clicking on the message. -tutorial_1066_h3=Adding Database Drivers -tutorial_1067_p=\ To register additional JDBC drivers (MySQL, PostgreSQL, HSQLDB,...), add the jar file names to the environment variables H2DRIVERS or CLASSPATH. Example (Windows)\: to add the HSQLDB JDBC driver C\:\\Programs\\hsqldb\\lib\\hsqldb.jar, set the environment variable H2DRIVERS to C\:\\Programs\\hsqldb\\lib\\hsqldb.jar. -tutorial_1068_p=\ Multiple drivers can be set; entries need to be separated by ; (Windows) or \: (other operating systems). Spaces in the path names are supported. The settings must not be quoted. -tutorial_1069_h3=Using the H2 Console -tutorial_1070_p=\ The H2 Console application has three main panels\: the toolbar on top, the tree on the left, and the query/result panel on the right. The database objects (for example, tables) are listed on the left. Type a SQL command in the query panel and click [Run]. The result appears just below the command. -tutorial_1071_h3=Inserting Table Names or Column Names -tutorial_1072_p=\ To insert table and column names into the script, click on the item in the tree. If you click on a table while the query is empty, then SELECT * FROM ... is added. While typing a query, the table that was used is expanded in the tree. For example if you type SELECT * FROM TEST T WHERE T. then the table TEST is expanded. -tutorial_1073_h3=Disconnecting and Stopping the Application -tutorial_1074_p=\ To log out of the database, click [Disconnect] in the toolbar panel. However, the server is still running and ready to accept new sessions. -tutorial_1075_p=\ To stop the server, right click on the system tray icon and select [Exit]. If you don't have the system tray icon, navigate to [Preferences] and click [Shutdown], press [Ctrl]+[C] in the console where the server was started (Windows), or close the console window. -tutorial_1076_h2=Special H2 Console Syntax -tutorial_1077_p=\ The H2 Console supports a few built-in commands. Those are interpreted within the H2 Console, so they work with any database. Built-in commands need to be at the beginning of a statement (before any remarks), otherwise they are not parsed correctly. If in doubt, add ; before the command. -tutorial_1078_th=Command(s) -tutorial_1079_th=Description -tutorial_1080_td=\ @autocommit_true; -tutorial_1081_td=\ @autocommit_false; -tutorial_1082_td=\ Enable or disable autocommit. -tutorial_1083_td=\ @cancel; -tutorial_1084_td=\ Cancel the currently running statement. -tutorial_1085_td=\ @columns null null TEST; -tutorial_1086_td=\ @index_info null null TEST; -tutorial_1087_td=\ @tables; -tutorial_1088_td=\ @tables null null TEST; -tutorial_1089_td=\ Call the corresponding DatabaseMetaData.get method. Patterns are case sensitive (usually identifiers are uppercase). For information about the parameters, see the Javadoc documentation. Missing parameters at the end of the line are set to null. The complete list of metadata commands is\: @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, @procedure_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns -tutorial_1090_td=\ @edit select * from test; -tutorial_1091_td=\ Use an updatable result set. -tutorial_1092_td=\ @generated insert into test() values(); -tutorial_1093_td=\ Show the result of Statement.getGeneratedKeys(). -tutorial_1094_td=\ @history; -tutorial_1095_td=\ List the command history. -tutorial_1096_td=\ @info; -tutorial_1097_td=\ Display the result of various Connection and DatabaseMetaData methods. -tutorial_1098_td=\ @list select * from test; -tutorial_1099_td=\ Show the result set in list format (each column on its own line, with row numbers). -tutorial_1100_td=\ @loop 1000 select ?, ?/*rnd*/; -tutorial_1101_td=\ @loop 1000 @statement select ?; -tutorial_1102_td=\ Run the statement this many times. Parameters (?) are set using a loop from 0 up to x - 1. Random values are used for each ?/*rnd*/. A Statement object is used instead of a PreparedStatement if @statement is used. Result sets are read until ResultSet.next() returns false. Timing information is printed. -tutorial_1103_td=\ @maxrows 20; -tutorial_1104_td=\ Set the maximum number of rows to display. -tutorial_1105_td=\ @memory; -tutorial_1106_td=\ Show the used and free memory. This will call System.gc(). -tutorial_1107_td=\ @meta select 1; -tutorial_1108_td=\ List the ResultSetMetaData after running the query. -tutorial_1109_td=\ @parameter_meta select ?; -tutorial_1110_td=\ Show the result of the PreparedStatement.getParameterMetaData() calls. The statement is not executed. -tutorial_1111_td=\ @prof_start; -tutorial_1112_td=\ call hash('SHA256', '', 1000000); -tutorial_1113_td=\ @prof_stop; -tutorial_1114_td=\ Start/stop the built-in profiling tool. The top 3 stack traces of the statement(s) between start and stop are listed (if there are 3). -tutorial_1115_td=\ @prof_start; -tutorial_1116_td=\ @sleep 10; -tutorial_1117_td=\ @prof_stop; -tutorial_1118_td=\ Sleep for a number of seconds. Used to profile a long running query or operation that is running in another session (but in the same process). -tutorial_1119_td=\ @transaction_isolation; -tutorial_1120_td=\ @transaction_isolation 2; -tutorial_1121_td=\ Display (without parameters) or change (with parameters 1, 2, 4, 8) the transaction isolation level. -tutorial_1122_h2=Settings of the H2 Console -tutorial_1123_p=\ The settings of the H2 Console are stored in a configuration file called .h2.server.properties in you user home directory. For Windows installations, the user home directory is usually C\:\\Documents and Settings\\[username] or C\:\\Users\\[username]. The configuration file contains the settings of the application and is automatically created when the H2 Console is first started. Supported settings are\: -tutorial_1124_code=webAllowOthers -tutorial_1125_li=\: allow other computers to connect. -tutorial_1126_code=webPort -tutorial_1127_li=\: the port of the H2 Console -tutorial_1128_code=webSSL -tutorial_1129_li=\: use encrypted TLS (HTTPS) connections. -tutorial_1130_p=\ In addition to those settings, the properties of the last recently used connection are listed in the form <number>\=<name>|<driver>|<url>|<user> using the escape character \\. Example\: 1\=Generic H2 (Embedded)|org.h2.Driver|jdbc\\\:h2\\\:~/test|sa -tutorial_1131_h2=Connecting to a Database using JDBC -tutorial_1132_p=\ To connect to a database, a Java application first needs to load the database driver, and then get a connection. A simple way to do that is using the following code\: -tutorial_1133_p=\ This code first loads the driver (Class.forName(...)) and then opens a connection (using DriverManager.getConnection()). The driver name is "org.h2.Driver". The database URL always needs to start with jdbc\:h2\: to be recognized by this database. The second parameter in the getConnection() call is the user name (sa for System Administrator in this example). The third parameter is the password. In this database, user names are not case sensitive, but passwords are. -tutorial_1134_h2=Creating New Databases -tutorial_1135_p=\ By default, if the database specified in the URL does not yet exist, a new (empty) database is created automatically. The user that created the database automatically becomes the administrator of this database. -tutorial_1136_p=\ Auto-creating new database can be disabled, see Opening a Database Only if it Already Exists. -tutorial_1137_h2=Using the Server -tutorial_1138_p=\ H2 currently supports three server\: a web server (for the H2 Console), a TCP server (for client/server connections) and an PG server (for PostgreSQL clients). Please note that only the web server supports browser connections. The servers can be started in different ways, one is using the Server tool. Starting the server doesn't open a database - databases are opened as soon as a client connects. -tutorial_1139_h3=Starting the Server Tool from Command Line -tutorial_1140_p=\ To start the Server tool from the command line with the default settings, run\: -tutorial_1141_p=\ This will start the tool with the default options. To get the list of options and default values, run\: -tutorial_1142_p=\ There are options available to use other ports, and start or not start parts. -tutorial_1143_h3=Connecting to the TCP Server -tutorial_1144_p=\ To remotely connect to a database using the TCP server, use the following driver and database URL\: -tutorial_1145_li=JDBC driver class\: org.h2.Driver -tutorial_1146_li=Database URL\: jdbc\:h2\:tcp\://localhost/~/test -tutorial_1147_p=\ For details about the database URL, see also in Features. Please note that you can't connection with a web browser to this URL. You can only connect using a H2 client (over JDBC). -tutorial_1148_h3=Starting the TCP Server within an Application -tutorial_1149_p=\ Servers can also be started and stopped from within an application. Sample code\: -tutorial_1150_h3=Stopping a TCP Server from Another Process -tutorial_1151_p=\ The TCP server can be stopped from another process. To stop the server from the command line, run\: -tutorial_1152_p=\ To stop the server from a user application, use the following code\: -tutorial_1153_p=\ This function will only stop the TCP server. If other server were started in the same process, they will continue to run. To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. Shutting down a TCP server can be protected using the option -tcpPassword (the same password must be used to start and stop the TCP server). -tutorial_1154_h2=Using Hibernate -tutorial_1155_p=\ This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect, or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy. A patch for Hibernate has been submitted and is now applied. You can rename it to H2Dialect.java and include this as a patch in your application, or upgrade to a version of Hibernate where this is fixed. -tutorial_1156_p=\ When using Hibernate, try to use the H2Dialect if possible. When using the H2Dialect, compatibility modes such as MODE\=MySQL are not supported. When using such a compatibility mode, use the Hibernate dialect for the corresponding database instead of the H2Dialect; but please note H2 does not support all features of all databases. -tutorial_1157_h2=Using TopLink and Glassfish -tutorial_1158_p=\ To use H2 with Glassfish (or Sun AS), set the Datasource Classname to org.h2.jdbcx.JdbcDataSource. You can set this in the GUI at Application Server - Resources - JDBC - Connection Pools, or by editing the file sun-resources.xml\: at element jdbc-connection-pool, set the attribute datasource-classname to org.h2.jdbcx.JdbcDataSource. -tutorial_1159_p=\ The H2 database is compatible with HSQLDB and PostgreSQL. To take advantage of H2 specific features, use the H2Platform. The source code of this platform is included in H2 at src/tools/oracle/toplink/essentials/platform/database/DatabasePlatform.java.txt. You will need to copy this file to your application, and rename it to .java. To enable it, change the following setting in persistence.xml\: -tutorial_1160_p=\ In old versions of Glassfish, the property name is toplink.platform.class.name. -tutorial_1161_p=\ To use H2 within Glassfish, copy the h2*.jar to the directory glassfish/glassfish/lib. -tutorial_1162_h2=Using EclipseLink -tutorial_1163_p=\ To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. See also H2Platform. -tutorial_1164_h2=Using Apache ActiveMQ -tutorial_1165_p=\ When using H2 as the backend database for Apache ActiveMQ, please use the TransactDatabaseLocker instead of the default locking mechanism. Otherwise the database file will grow without bounds. The problem is that the default locking mechanism uses an uncommitted UPDATE transaction, which keeps the transaction log from shrinking (causes the database file to grow). Instead of using an UPDATE statement, the TransactDatabaseLocker uses SELECT ... FOR UPDATE which is not problematic. To use it, change the ApacheMQ configuration element <jdbcPersistenceAdapter> element, property databaseLocker\="org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker". However, using the MVCC mode will again result in the same problem. Therefore, please do not use the MVCC mode in this case. Another (more dangerous) solution is to set useDatabaseLock to false. -tutorial_1166_h2=Using H2 within NetBeans -tutorial_1167_p=\ The project H2 Database Engine Support For NetBeans allows you to start and stop the H2 server from within the IDE. -tutorial_1168_p=\ There is a known issue when using the Netbeans SQL Execution Window\: before executing a query, another query in the form SELECT COUNT(*) FROM <query> is run. This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL. In this case, two sequence values are allocated instead of just one. -tutorial_1169_h2=Using H2 with jOOQ -tutorial_1170_p=\ jOOQ adds a thin layer on top of JDBC, allowing for type-safe SQL construction, including advanced SQL, stored procedures and advanced data types. jOOQ takes your database schema as a base for code generation. If this is your example schema\: -tutorial_1171_p=\ then run the jOOQ code generator on the command line using this command\: -tutorial_1172_p=\ ...where codegen.xml is on the classpath and contains this information -tutorial_1173_p=\ Using the generated source, you can query the database as follows\: -tutorial_1174_p=\ See more details on jOOQ Homepage and in the jOOQ Tutorial -tutorial_1175_h2=Using Databases in Web Applications -tutorial_1176_p=\ There are multiple ways to access a database from within web applications. Here are some examples if you use Tomcat or JBoss. -tutorial_1177_h3=Embedded Mode -tutorial_1178_p=\ The (currently) simplest solution is to use the database in the embedded mode, that means open a connection in your application when it starts (a good solution is using a Servlet Listener, see below), or when a session starts. A database can be accessed from multiple sessions and applications at the same time, as long as they run in the same process. Most Servlet Containers (for example Tomcat) are just using one process, so this is not a problem (unless you run Tomcat in clustered mode). Tomcat uses multiple threads and multiple classloaders. If multiple applications access the same database at the same time, you need to put the database jar in the shared/lib or server/lib directory. It is a good idea to open the database when the web application starts, and close it when the web application stops. If using multiple applications, only one (any) of them needs to do that. In the application, an idea is to use one connection per Session, or even one connection per request (action). Those connections should be closed after use if possible (but it's not that bad if they don't get closed). -tutorial_1179_h3=Server Mode -tutorial_1180_p=\ The server mode is similar, but it allows you to run the server in another process. -tutorial_1181_h3=Using a Servlet Listener to Start and Stop a Database -tutorial_1182_p=\ Add the h2*.jar file to your web application, and add the following snippet to your web.xml file (between the context-param and the filter section)\: -tutorial_1183_p=\ For details on how to access the database, see the file DbStarter.java. By default this tool opens an embedded connection using the database URL jdbc\:h2\:~/test, user name sa, and password sa. If you want to use this connection within your servlet, you can access as follows\: -tutorial_1184_code=DbStarter -tutorial_1185_p=\ can also start the TCP server, however this is disabled by default. To enable it, use the parameter db.tcpServer in the file web.xml. Here is the complete list of options. These options need to be placed between the description tag and the listener / filter tags\: -tutorial_1186_p=\ When the web application is stopped, the database connection will be closed automatically. If the TCP server is started within the DbStarter, it will also be stopped automatically. -tutorial_1187_h3=Using the H2 Console Servlet -tutorial_1188_p=\ The H2 Console is a standalone application and includes its own web server, but it can be used as a servlet as well. To do that, include the the h2*.jar file in your application, and add the following configuration to your web.xml\: -tutorial_1189_p=\ For details, see also src/tools/WEB-INF/web.xml. -tutorial_1190_p=\ To create a web application with just the H2 Console, run the following command\: -tutorial_1191_h2=Android -tutorial_1192_p=\ You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, except for opening and closing a database, which is not yet optimized in H2 (H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. So far, only very few tests have been run, and everything seems to work as expected. Fulltext search was not yet tested, however the native fulltext search should work. -tutorial_1193_p=\ Reasons to use H2 instead of SQLite are\: -tutorial_1194_li=Full Unicode support including UPPER() and LOWER(). -tutorial_1195_li=Streaming API for BLOB and CLOB data. -tutorial_1196_li=Fulltext search. -tutorial_1197_li=Multiple connections. -tutorial_1198_li=User defined functions and triggers. -tutorial_1199_li=Database file encryption. -tutorial_1200_li=Reading and writing CSV files (this feature can be used outside the database as well). -tutorial_1201_li=Referential integrity and check constraints. -tutorial_1202_li=Better data type and SQL support. -tutorial_1203_li=In-memory databases, read-only databases, linked tables. -tutorial_1204_li=Better compatibility with other databases which simplifies porting applications. -tutorial_1205_li=Possibly better performance (so far for read operations). -tutorial_1206_li=Server mode (accessing a database on a different machine over TCP/IP). -tutorial_1207_p=\ Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). Both the regular H2 jar file and the smaller h2small-*.jar can be used. To create the smaller jar file, run the command ./build.sh jarSmall (Linux / Mac OS) or build.bat jarSmall (Windows). -tutorial_1208_p=\ The database files needs to be stored in a place that is accessible for the application. Example\: -tutorial_1209_p=\ Limitations\: Using a connection pool is currently not supported, because the required javax.sql. classes are not available on Android. -tutorial_1210_h2=CSV (Comma Separated Values) Support -tutorial_1211_p=\ The CSV file support can be used inside the database using the functions CSVREAD and CSVWRITE, or it can be used outside the database as a standalone tool. -tutorial_1212_h3=Reading a CSV File from Within a Database -tutorial_1213_p=\ A CSV file can be read using the function CSVREAD. Example\: -tutorial_1214_p=\ Please note for performance reason, CSVREAD should not be used inside a join. Instead, import the data first (possibly into a temporary table), create the required indexes if necessary, and then query this table. -tutorial_1215_h3=Importing Data from a CSV File -tutorial_1216_p=\ A fast way to load or import data (sometimes called 'bulk load') from a CSV file is to combine table creation with import. Optionally, the column names and data types can be set when creating the table. Another option is to use INSERT INTO ... SELECT. -tutorial_1217_h3=Writing a CSV File from Within a Database -tutorial_1218_p=\ The built-in function CSVWRITE can be used to create a CSV file from a query. Example\: -tutorial_1219_h3=Writing a CSV File from a Java Application -tutorial_1220_p=\ The Csv tool can be used in a Java application even when not using a database at all. Example\: -tutorial_1221_h3=Reading a CSV File from a Java Application -tutorial_1222_p=\ It is possible to read a CSV file without opening a database. Example\: -tutorial_1223_h2=Upgrade, Backup, and Restore -tutorial_1224_h3=Database Upgrade -tutorial_1225_p=\ The recommended way to upgrade from one version of the database engine to the next version is to create a backup of the database (in the form of a SQL script) using the old engine, and then execute the SQL script using the new engine. -tutorial_1226_h3=Backup using the Script Tool -tutorial_1227_p=\ The recommended way to backup a database is to create a compressed SQL script file. This will result in a small, human readable, and database version independent backup. Creating the script will also verify the checksums of the database file. The Script tool is ran as follows\: -tutorial_1228_p=\ It is also possible to use the SQL command SCRIPT to create the backup of the database. For more information about the options, see the SQL command SCRIPT. The backup can be done remotely, however the file will be created on the server side. The built in FTP server could be used to retrieve the file from the server. -tutorial_1229_h3=Restore from a Script -tutorial_1230_p=\ To restore a database from a SQL script file, you can use the RunScript tool\: -tutorial_1231_p=\ For more information about the options, see the SQL command RUNSCRIPT. The restore can be done remotely, however the file needs to be on the server side. The built in FTP server could be used to copy the file to the server. It is also possible to use the SQL command RUNSCRIPT to execute a SQL script. SQL script files may contain references to other script files, in the form of RUNSCRIPT commands. However, when using the server mode, the references script files need to be available on the server side. -tutorial_1232_h3=Online Backup -tutorial_1233_p=\ The BACKUP SQL statement and the Backup tool both create a zip file with the database file. However, the contents of this file are not human readable. -tutorial_1234_p=\ The resulting backup is transactionally consistent, meaning the consistency and atomicity rules apply. -tutorial_1235_p=\ The Backup tool (org.h2.tools.Backup) can not be used to create a online backup; the database must not be in use while running this program. -tutorial_1236_p=\ Creating a backup by copying the database files while the database is running is not supported, except if the file systems support creating snapshots. With other file systems, it can't be guaranteed that the data is copied in the right order. -tutorial_1237_h2=Command Line Tools -tutorial_1238_p=\ This database comes with a number of command line tools. To get more information about a tool, start it with the parameter '-?', for example\: -tutorial_1239_p=\ The command line tools are\: -tutorial_1240_code=Backup -tutorial_1241_li=\ creates a backup of a database. -tutorial_1242_code=ChangeFileEncryption -tutorial_1243_li=\ allows changing the file encryption password or algorithm of a database. -tutorial_1244_code=Console -tutorial_1245_li=\ starts the browser based H2 Console. -tutorial_1246_code=ConvertTraceFile -tutorial_1247_li=\ converts a .trace.db file to a Java application and SQL script. -tutorial_1248_code=CreateCluster -tutorial_1249_li=\ creates a cluster from a standalone database. -tutorial_1250_code=DeleteDbFiles -tutorial_1251_li=\ deletes all files belonging to a database. -tutorial_1252_code=Recover -tutorial_1253_li=\ helps recovering a corrupted database. -tutorial_1254_code=Restore -tutorial_1255_li=\ restores a backup of a database. -tutorial_1256_code=RunScript -tutorial_1257_li=\ runs a SQL script against a database. -tutorial_1258_code=Script -tutorial_1259_li=\ allows converting a database to a SQL script for backup or migration. -tutorial_1260_code=Server -tutorial_1261_li=\ is used in the server mode to start a H2 server. -tutorial_1262_code=Shell -tutorial_1263_li=\ is a command line database tool. -tutorial_1264_p=\ The tools can also be called from an application by calling the main or another public method. For details, see the Javadoc documentation. -tutorial_1265_h2=The Shell Tool -tutorial_1266_p=\ The Shell tool is a simple interactive command line tool. To start it, type\: -tutorial_1267_p=\ You will be asked for a database URL, JDBC driver, user name, and password. The connection setting can also be set as command line parameters. After connecting, you will get the list of options. The built-in commands don't need to end with a semicolon, but SQL statements are only executed if the line ends with a semicolon ;. This allows to enter multi-line statements\: -tutorial_1268_p=\ By default, results are printed as a table. For results with many column, consider using the list mode\: -tutorial_1269_h2=Using OpenOffice Base -tutorial_1270_p=\ OpenOffice.org Base supports database access over the JDBC API. To connect to a H2 database using OpenOffice Base, you first need to add the JDBC driver to OpenOffice. The steps to connect to a H2 database are\: -tutorial_1271_li=Start OpenOffice Writer, go to [Tools], [Options] -tutorial_1272_li=Make sure you have selected a Java runtime environment in OpenOffice.org / Java -tutorial_1273_li=Click [Class Path...], [Add Archive...] -tutorial_1274_li=Select your h2 jar file (location is up to you, could be wherever you choose) -tutorial_1275_li=Click [OK] (as much as needed), stop OpenOffice (including the Quickstarter) -tutorial_1276_li=Start OpenOffice Base -tutorial_1277_li=Connect to an existing database; select [JDBC]; [Next] -tutorial_1278_li=Example datasource URL\: jdbc\:h2\:~/test -tutorial_1279_li=JDBC driver class\: org.h2.Driver -tutorial_1280_p=\ Now you can access the database stored in the current users home directory. -tutorial_1281_p=\ To use H2 in NeoOffice (OpenOffice without X11)\: -tutorial_1282_li=In NeoOffice, go to [NeoOffice], [Preferences] -tutorial_1283_li=Look for the page under [NeoOffice], [Java] -tutorial_1284_li=Click [Class Path], [Add Archive...] -tutorial_1285_li=Select your h2 jar file (location is up to you, could be wherever you choose) -tutorial_1286_li=Click [OK] (as much as needed), restart NeoOffice. -tutorial_1287_p=\ Now, when creating a new database using the "Database Wizard" \: -tutorial_1288_li=Click [File], [New], [Database]. -tutorial_1289_li=Select [Connect to existing database] and the select [JDBC]. Click next. -tutorial_1290_li=Example datasource URL\: jdbc\:h2\:~/test -tutorial_1291_li=JDBC driver class\: org.h2.Driver -tutorial_1292_p=\ Another solution to use H2 in NeoOffice is\: -tutorial_1293_li=Package the h2 jar within an extension package -tutorial_1294_li=Install it as a Java extension in NeoOffice -tutorial_1295_p=\ This can be done by create it using the NetBeans OpenOffice plugin. See also Extensions Development. -tutorial_1296_h2=Java Web Start / JNLP -tutorial_1297_p=\ When using Java Web Start / JNLP (Java Network Launch Protocol), permissions tags must be set in the .jnlp file, and the application .jar file must be signed. Otherwise, when trying to write to the file system, the following exception will occur\: java.security.AccessControlException\: access denied (java.io.FilePermission ... read). Example permission tags\: -tutorial_1298_h2=Using a Connection Pool -tutorial_1299_p=\ For H2, opening a connection is fast if the database is already open. Still, using a connection pool improves performance if you open and close connections a lot. A simple connection pool is included in H2. It is based on the Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection().The build-in connection pool is used as follows\: -tutorial_1300_h2=Fulltext Search -tutorial_1301_p=\ H2 includes two fulltext search implementations. One is using Apache Lucene, and the other (the native implementation) stores the index data in special tables in the database. -tutorial_1302_h3=Using the Native Fulltext Search -tutorial_1303_p=\ To initialize, call\: -tutorial_1304_p=\ You need to initialize it in each database where you want to use it. Afterwards, you can create a fulltext index for a table using\: -tutorial_1305_p=\ PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query\: -tutorial_1306_p=\ This will produce a result set that contains the query needed to retrieve the data\: -tutorial_1307_p=\ To drop an index on a table\: -tutorial_1308_p=\ To get the raw data, use FT_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in\: SELECT T.* FROM FT_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE\='TEST' AND T.ID\=FT.KEYS[0]; -tutorial_1309_p=\ You can also call the index from within a Java application\: -tutorial_1310_h3=Using the Lucene Fulltext Search -tutorial_1311_p=\ To use the Lucene full text search, you need the Lucene library in the classpath. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS or CLASSPATH. To initialize the Lucene fulltext search in a database, call\: -tutorial_1312_p=\ You need to initialize it in each database where you want to use it. Afterwards, you can create a full text index for a table using\: -tutorial_1313_p=\ PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query\: -tutorial_1314_p=\ This will produce a result set that contains the query needed to retrieve the data\: -tutorial_1315_p=\ To drop an index on a table (be warned that this will re-index all of the full-text indices for the entire database)\: -tutorial_1316_p=\ To get the raw data, use FTL_SEARCH_DATA('Hello', 0, 0);. The result contains the columns SCHEMA (the schema name), TABLE (the table name), COLUMNS (an array of column names), and KEYS (an array of objects). To join a table, use a join as in\: SELECT T.* FROM FTL_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE\='TEST' AND T.ID\=FT.KEYS[0]; -tutorial_1317_p=\ You can also call the index from within a Java application\: -tutorial_1318_p=\ The Lucene fulltext search supports searching in specific column only. Column names must be uppercase (except if the original columns are double quoted). For column names starting with an underscore (_), another underscore needs to be added. Example\: -tutorial_1319_p=\ The Lucene fulltext search implementation is not synchronized internally. If you update the database and query the fulltext search concurrently (directly using the Java API of H2 or Lucene itself), you need to ensure operations are properly synchronized. If this is not the case, you may get exceptions such as org.apache.lucene.store.AlreadyClosedException\: this IndexReader is closed. -tutorial_1320_h2=User-Defined Variables -tutorial_1321_p=\ This database supports user-defined variables. Variables start with @ and can be used wherever expressions or parameters are allowed. Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. A value is usually assigned using the SET command\: -tutorial_1322_p=\ The value can also be changed using the SET() method. This is useful in queries\: -tutorial_1323_p=\ Variables that are not set evaluate to NULL. The data type of a user-defined variable is the data type of the value assigned to it, that means it is not necessary (or possible) to declare variable names before using them. There are no restrictions on the assigned values; large objects (LOBs) are supported as well. Rolling back a transaction does not affect the value of a user-defined variable. -tutorial_1324_h2=Date and Time -tutorial_1325_p=\ Date, time and timestamp values support ISO 8601 formatting, including time zone\: -tutorial_1326_p=\ If the time zone is not set, the value is parsed using the current time zone setting of the system. Date and time information is stored in H2 database files without time zone information. If the database is opened using another system time zone, the date and time will be the same. That means if you store the value '2000-01-01 12\:00\:00' in one time zone, then close the database and open the database again in a different time zone, you will also get '2000-01-01 12\:00\:00'. Please note that changing the time zone after the H2 driver is loaded is not supported. -tutorial_1327_h2=Using Spring -tutorial_1328_h3=Using the TCP Server -tutorial_1329_p=\ Use the following configuration to start and stop the H2 TCP server using the Spring Framework\: -tutorial_1330_p=\ The destroy-method will help prevent exceptions on hot-redeployment or when restarting the server. -tutorial_1331_h3=Error Code Incompatibility -tutorial_1332_p=\ There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer, because of a change in the error code. This will cause the JdbcTemplate to not detect a duplicate key condition, and so a DataIntegrityViolationException is thrown instead of DuplicateKeyException. See also the issue SPR-8235. The workaround is to add the following XML file to the root of the classpath\: -tutorial_1333_h2=OSGi -tutorial_1334_p=\ The standard H2 jar can be dropped in as a bundle in an OSGi container. H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification. The H2 Data Source Factory service is registered with the following properties\: OSGI_JDBC_DRIVER_CLASS\=org.h2.Driver and OSGI_JDBC_DRIVER_NAME\=H2. The OSGI_JDBC_DRIVER_VERSION property reflects the version of the driver as is. -tutorial_1335_p=\ The following standard configuration properties are supported\: JDBC_USER, JDBC_PASSWORD, JDBC_DESCRIPTION, JDBC_DATASOURCE_NAME, JDBC_NETWORK_PROTOCOL, JDBC_URL, JDBC_SERVER_NAME, JDBC_PORT_NUMBER. Any other standard property will be rejected. Non-standard properties will be passed on to H2 in the connection URL. -tutorial_1336_h2=Java Management Extension (JMX) -tutorial_1337_p=\ Management over JMX is supported, but not enabled by default. To enable JMX, append ;JMX\=TRUE to the database URL when opening the database. Various tools support JMX, one such tool is the jconsole. When opening the jconsole, connect to the process where the database is open (when using the server mode, you need to connect to the server process). Then go to the MBeans section. Under org.h2 you will find one entry per database. The object name of the entry is the database short name, plus the path (each colon is replaced with an underscore character). -tutorial_1338_p=\ The following attributes and operations are supported\: -tutorial_1339_code=CacheSize -tutorial_1340_li=\: the cache size currently in use in KB. -tutorial_1341_code=CacheSizeMax -tutorial_1342_li=\ (read/write)\: the maximum cache size in KB. -tutorial_1343_code=Exclusive -tutorial_1344_li=\: whether this database is open in exclusive mode or not. -tutorial_1345_code=FileReadCount -tutorial_1346_li=\: the number of file read operations since the database was opened. -tutorial_1347_code=FileSize -tutorial_1348_li=\: the file size in KB. -tutorial_1349_code=FileWriteCount -tutorial_1350_li=\: the number of file write operations since the database was opened. -tutorial_1351_code=FileWriteCountTotal -tutorial_1352_li=\: the number of file write operations since the database was created. -tutorial_1353_code=LogMode -tutorial_1354_li=\ (read/write)\: the current transaction log mode. See SET LOG for details. -tutorial_1355_code=Mode -tutorial_1356_li=\: the compatibility mode (REGULAR if no compatibility mode is used). -tutorial_1357_code=MultiThreaded -tutorial_1358_li=\: true if multi-threaded is enabled. -tutorial_1359_code=Mvcc -tutorial_1360_li=\: true if MVCC is enabled. -tutorial_1361_code=ReadOnly -tutorial_1362_li=\: true if the database is read-only. -tutorial_1363_code=TraceLevel -tutorial_1364_li=\ (read/write)\: the file trace level. -tutorial_1365_code=Version -tutorial_1366_li=\: the database version in use. -tutorial_1367_code=listSettings -tutorial_1368_li=\: list the database settings. -tutorial_1369_code=listSessions -tutorial_1370_li=\: list the open sessions, including currently executing statement (if any) and locked tables (if any). -tutorial_1371_p=\ To enable JMX, you may need to set the system properties com.sun.management.jmxremote and com.sun.management.jmxremote.port as required by the JVM. diff --git a/h2/src/docsrc/textbase/_messages_en.prop b/h2/src/docsrc/textbase/_messages_en.prop deleted file mode 100644 index 21ee868385..0000000000 --- a/h2/src/docsrc/textbase/_messages_en.prop +++ /dev/null @@ -1,171 +0,0 @@ -.translator=Thomas Mueller -02000=No data is available -07001=Invalid parameter count for {0}, expected count: {1} -08000=Error opening database: {0} -21S02=Column count does not match -22001=Value too long for column {0}: {1} -22003=Numeric value out of range: {0} -22007=Cannot parse {0} constant {1} -22012=Division by zero: {0} -22018=Data conversion error converting {0} -22025=Error in LIKE ESCAPE: {0} -23502=NULL not allowed for column {0} -23503=Referential integrity constraint violation: {0} -23505=Unique index or primary key violation: {0} -23506=Referential integrity constraint violation: {0} -23507=No default value is set for column {0} -23513=Check constraint violation: {0} -23514=Check constraint invalid: {0} -28000=Wrong user name or password -40001=Deadlock detected. The current transaction was rolled back. Details: {0} -42000=Syntax error in SQL statement {0} -42001=Syntax error in SQL statement {0}; expected {1} -42S01=Table {0} already exists -42S02=Table {0} not found -42S11=Index {0} already exists -42S12=Index {0} not found -42S21=Duplicate column name {0} -42S22=Column {0} not found -42S32=Setting {0} not found -57014=Statement was canceled or the session timed out -90000=Function {0} must return a result set -90001=Method is not allowed for a query. Use execute or executeQuery instead of executeUpdate -90002=Method is only allowed for a query. Use execute or executeUpdate instead of executeQuery -90003=Hexadecimal string with odd number of characters: {0} -90004=Hexadecimal string contains non-hex character: {0} -90006=Sequence {0} has run out of numbers -90007=The object is already closed -90008=Invalid value {0} for parameter {1} -90009=Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) -90010=Invalid TO_CHAR format {0} -90011=A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. -90012=Parameter {0} is not set -90013=Database {0} not found -90014=Error parsing {0} -90015=SUM or AVG on wrong data type for {0} -90016=Column {0} must be in the GROUP BY list -90017=Attempt to define a second primary key -90018=The connection was not closed by the application and is garbage collected -90019=Cannot drop the current user -90020=Database may be already in use: {0}. Possible solutions: close all other connection(s); use the server mode -90021=This combination of database settings is not supported: {0} -90022=Function {0} not found -90023=Column {0} must not be nullable -90024=Error while renaming file {0} to {1} -90025=Cannot delete file {0} -90026=Serialization failed, cause: {0} -90027=Deserialization failed, cause: {0} -90028=IO Exception: {0} -90029=Currently not on an updatable row -90030=File corrupted while reading record: {0}. Possible solution: use the recovery tool -90031=IO Exception: {0}; {1} -90032=User {0} not found -90033=User {0} already exists -90034=Log file error: {0}, cause: {1} -90035=Sequence {0} already exists -90036=Sequence {0} not found -90037=View {0} not found -90038=View {0} already exists -90039=This CLOB or BLOB reference timed out: {0} -90040=Admin rights are required for this operation -90041=Trigger {0} already exists -90042=Trigger {0} not found -90043=Error creating or initializing trigger {0} object, class {1}, cause: {2}; see root cause for details -90044=Error executing trigger {0}, class {1}, cause : {2}; see root cause for details -90045=Constraint {0} already exists -90046=URL format error; must be {0} but is {1} -90047=Version mismatch, driver version is {0} but server version is {1} -90048=Unsupported database file version or invalid file header in file {0} -90049=Encryption error in file {0} -90050=Wrong password format, must be: file password user password -90052=Subquery is not a single column query -90053=Scalar subquery contains more than one row -90054=Invalid use of aggregate function {0} -90055=Unsupported cipher {0} -90057=Constraint {0} not found -90058=Commit or rollback is not allowed within a trigger -90059=Ambiguous column name {0} -90060=Unsupported file lock method {0} -90061=Exception opening port {0} (port may be in use), cause: {1} -90062=Error while creating file {0} -90063=Savepoint is invalid: {0} -90064=Savepoint is unnamed -90065=Savepoint is named -90066=Duplicate property {0} -90067=Connection is broken: {0} -90068=Order by expression {0} must be in the result list in this case -90069=Role {0} already exists -90070=Role {0} not found -90071=User or role {0} not found -90072=Roles and rights cannot be mixed -90073=Matching Java methods must have different parameter counts: {0} and {1} -90074=Role {0} already granted -90075=Column is part of the index {0} -90076=Function alias {0} already exists -90077=Function alias {0} not found -90078=Schema {0} already exists -90079=Schema {0} not found -90080=Schema name must match -90081=Column {0} contains null values -90082=Sequence {0} belongs to a table -90083=Column may be referenced by {0} -90084=Cannot drop last column {0} -90085=Index {0} belongs to constraint {1} -90086=Class {0} not found -90087=Method {0} not found -90088=Unknown mode {0} -90089=Collation cannot be changed because there is a data table: {0} -90090=Schema {0} cannot be dropped -90091=Role {0} cannot be dropped -90093=Clustering error - database currently runs in standalone mode -90094=Clustering error - database currently runs in cluster mode, server list: {0} -90095=String format error: {0} -90096=Not enough rights for object {0} -90097=The database is read only -90098=The database has been closed -90099=Error setting database event listener {0}, cause: {1} -90101=Wrong XID format: {0} -90102=Unsupported compression options: {0} -90103=Unsupported compression algorithm: {0} -90104=Compression error -90105=Exception calling user-defined function: {0} -90106=Cannot truncate {0} -90107=Cannot drop {0} because {1} depends on it -90108=Out of memory. -90109=View {0} is invalid: {1} -90111=Error accessing linked table with SQL statement {0}, cause: {1} -90112=Row not found when trying to delete from index {0} -90113=Unsupported connection setting {0} -90114=Constant {0} already exists -90115=Constant {0} not found -90116=Literals of this kind are not allowed -90117=Remote connections to this server are not allowed, see -tcpAllowOthers -90118=Cannot drop table {0} -90119=User data type {0} already exists -90120=User data type {0} not found -90121=Database is already closed (to disable automatic closing at VM shutdown, add ";DB_CLOSE_ON_EXIT=FALSE" to the db URL) -90122=Operation not supported for table {0} when there are views on the table: {1} -90123=Cannot mix indexed and non-indexed parameters -90124=File not found: {0} -90125=Invalid class, expected {0} but got {1} -90126=Database is not persistent -90127=The result set is not updatable. The query must select all columns from a unique key. Only one table may be selected. -90128=The result set is not scrollable and can not be reset. You may need to use conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ..). -90129=Transaction {0} not found -90130=This method is not allowed for a prepared statement; use a regular statement instead. -90131=Concurrent update in table {0}: another transaction has updated or deleted the same row -90132=Aggregate {0} not found -90133=Cannot change the setting {0} when the database is already open -90134=Access to the class {0} is denied -90135=The database is open in exclusive mode; can not open additional connections -90136=Unsupported outer join condition: {0} -90137=Can only assign to a variable, not to: {0} -90138=Invalid database name: {0} -90139=The public static Java method was not found: {0} -90140=The result set is readonly. You may need to use conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). -90141=Serializer cannot be changed because there is a data table: {0} -90142=Step size must not be zero -HY000=General error: {0} -HY004=Unknown data type: {0} -HYC00=Feature not supported: {0} -HYT00=Timeout trying to lock table {0} diff --git a/h2/src/docsrc/textbase/_text_en.prop b/h2/src/docsrc/textbase/_text_en.prop deleted file mode 100644 index 9eab3466be..0000000000 --- a/h2/src/docsrc/textbase/_text_en.prop +++ /dev/null @@ -1,160 +0,0 @@ -.translator=Thomas Mueller -a.help=Help -a.language=English -a.lynxNotSupported=Sorry, Lynx not supported yet -a.password=Password -a.remoteConnectionsDisabled=Sorry, remote connections ('webAllowOthers') are disabled on this server. -a.title=H2 Console -a.tools=Tools -a.user=User Name -admin.executing=Executing -admin.ip=IP -admin.lastAccess=Last Access -admin.lastQuery=Last Query -admin.no=no -admin.notConnected=not connected -admin.url=URL -admin.yes=yes -adminAllow=Allowed clients -adminConnection=Connection security -adminHttp=Use unencrypted HTTP connections -adminHttps=Use encrypted SSL (HTTPS) connections -adminLocal=Only allow local connections -adminLogin=Administration Login -adminLoginCancel=Cancel -adminLoginOk=OK -adminLogout=Logout -adminOthers=Allow connections from other computers -adminPort=Port number -adminPortWeb=Web server port number -adminRestart=Changes take effect after restarting the server. -adminSave=Save -adminSessions=Active Sessions -adminShutdown=Shutdown -adminTitle=H2 Console Preferences -adminTranslateHelp=Translate or improve the translation of the H2 Console. -adminTranslateStart=Translate -helpAction=Action -helpAddAnotherRow=Add another row -helpAddDrivers=Adding Database Drivers -helpAddDriversText=Additional database drivers can be registered by adding the Jar file location of the driver to the the environment variables H2DRIVERS or CLASSPATH. Example (Windows): to add the database driver library C:/Programs/hsqldb/lib/hsqldb.jar, set the environment variable H2DRIVERS to C:/Programs/hsqldb/lib/hsqldb.jar. -helpAddRow=Add a new row -helpCommandHistory=Shows the Command History -helpCreateTable=Create a new table -helpDeleteRow=Remove a row -helpDisconnect=Disconnects from the database -helpDisplayThis=Displays this Help Page -helpDropTable=Delete the table if it exists -helpExecuteCurrent=Executes the current SQL statement -helpExecuteSelected=Executes the SQL statement defined by the text selection -helpIcon=Icon -helpImportantCommands=Important Commands -helpOperations=Operations -helpQuery=Query the table -helpSampleSQL=Sample SQL Script -helpStatements=SQL statements -helpUpdate=Change data in a row -helpWithColumnsIdName=with ID and NAME columns -key.alt=Alt -key.ctrl=Ctrl -key.enter=Enter -key.shift=Shift -key.space=Space -login.connect=Connect -login.driverClass=Driver Class -login.driverNotFound=Database driver not found
    See in the Help for how to add drivers -login.goAdmin=Preferences -login.jdbcUrl=JDBC URL -login.language=Language -login.login=Login -login.remove=Remove -login.save=Save -login.savedSetting=Saved Settings -login.settingName=Setting Name -login.testConnection=Test Connection -login.testSuccessful=Test successful -login.welcome=H2 Console -result.1row=1 row -result.autoCommitOff=Auto commit is now OFF -result.autoCommitOn=Auto commit is now ON -result.bytes=bytes -result.characters=characters -result.maxrowsSet=Max rowcount is set -result.noRows=no rows -result.noRunningStatement=There is currently no running statement -result.rows=rows -result.statementWasCanceled=The statement was canceled -result.updateCount=Update count -resultEdit.action=Action -resultEdit.add=Add -resultEdit.cancel=Cancel -resultEdit.delete=Delete -resultEdit.edit=Edit -resultEdit.editResult=Edit -resultEdit.save=Save -toolbar.all=All -toolbar.autoCommit=Auto commit -toolbar.autoComplete=Auto complete -toolbar.autoComplete.full=Full -toolbar.autoComplete.normal=Normal -toolbar.autoComplete.off=Off -toolbar.cancelStatement=Cancel the current statement -toolbar.clear=Clear -toolbar.commit=Commit -toolbar.disconnect=Disconnect -toolbar.history=Command history -toolbar.maxRows=Max rows -toolbar.refresh=Refresh -toolbar.rollback=Rollback -toolbar.run=Run -toolbar.runSelected=Run Selected -toolbar.sqlStatement=SQL statement -tools.backup=Backup -tools.backup.help=Creates a backup of a database. -tools.changeFileEncryption=ChangeFileEncryption -tools.changeFileEncryption.help=Allows changing the database file encryption password and algorithm. -tools.cipher=Cipher (AES or XTEA) -tools.commandLine=Command line -tools.convertTraceFile=ConvertTraceFile -tools.convertTraceFile.help=Converts a .trace.db file to a Java application and SQL script. -tools.createCluster=CreateCluster -tools.createCluster.help=Creates a cluster from a standalone database. -tools.databaseName=Database name -tools.decryptionPassword=Decryption password -tools.deleteDbFiles=DeleteDbFiles -tools.deleteDbFiles.help=Deletes all files belonging to a database. -tools.directory=Directory -tools.encryptionPassword=Encryption password -tools.javaDirectoryClassName=Java directory and class name -tools.recover=Recover -tools.recover.help=Helps recovering a corrupted database. -tools.restore=Restore -tools.restore.help=Restores a database backup. -tools.result=Result -tools.run=Run -tools.runScript=RunScript -tools.runScript.help=Runs a SQL script. -tools.script=Script -tools.script.help=Allows to convert a database to a SQL script for backup or migration. -tools.scriptFileName=Script file name -tools.serverList=Server list -tools.sourceDatabaseName=Source database name -tools.sourceDatabaseURL=Source database URL -tools.sourceDirectory=Source directory -tools.sourceFileName=Source file name -tools.sourceScriptFileName=Source script file name -tools.targetDatabaseName=Target database name -tools.targetDatabaseURL=Target database URL -tools.targetDirectory=Target directory -tools.targetFileName=Target file name -tools.targetScriptFileName=Target script file name -tools.traceFileName=Trace file name -tree.admin=Admin -tree.current=Current value -tree.hashed=Hashed -tree.increment=Increment -tree.indexes=Indexes -tree.nonUnique=Non unique -tree.sequences=Sequences -tree.unique=Unique -tree.users=Users diff --git a/h2/src/installer/buildRelease.bat b/h2/src/installer/buildRelease.bat index 144888313d..5a82084ff2 100644 --- a/h2/src/installer/buildRelease.bat +++ b/h2/src/installer/buildRelease.bat @@ -11,9 +11,8 @@ mkdir ..\h2web rmdir /s /q bin 2>nul rmdir /s /q temp 2>nul -call java16 >nul 2>nul call build -quiet compile -call build -quiet spellcheck javadocImpl jarClient +call build -quiet spellcheck javadocImpl call build -quiet clean compile installer mavenDeployCentral rem call build -quiet compile benchmark diff --git a/h2/src/installer/buildRelease.sh b/h2/src/installer/buildRelease.sh old mode 100644 new mode 100755 index 042a55d174..8782e23845 --- a/h2/src/installer/buildRelease.sh +++ b/h2/src/installer/buildRelease.sh @@ -8,7 +8,7 @@ rm -rf bin rm -rf temp ./build.sh -quiet compile -./build.sh -quiet spellcheck javadocImpl jarClient +./build.sh -quiet spellcheck javadocImpl ./build.sh -quiet clean compile installer mavenDeployCentral # ./build.sh -quiet compile benchmark diff --git a/h2/src/installer/checkstyle.xml b/h2/src/installer/checkstyle.xml index 1a1681070d..a9a3e4b465 100644 --- a/h2/src/installer/checkstyle.xml +++ b/h2/src/installer/checkstyle.xml @@ -39,7 +39,7 @@ - + @@ -55,11 +55,6 @@ - - - - - diff --git a/h2/src/installer/favicon.ico b/h2/src/installer/favicon.ico index 6e0f78aeb1..fd5e73a416 100644 Binary files a/h2/src/installer/favicon.ico and b/h2/src/installer/favicon.ico differ diff --git a/h2/src/installer/h2.bat b/h2/src/installer/h2.bat index 98cae20eaf..0a7c7212d7 100644 --- a/h2/src/installer/h2.bat +++ b/h2/src/installer/h2.bat @@ -1,2 +1,2 @@ -@java -cp "h2.jar;%H2DRIVERS%;%CLASSPATH%" org.h2.tools.Console %* +@java -cp "h2.jar;%H2DRIVERS%;%CLASSPATH%" org.h2.tools.Console %* @if errorlevel 1 pause \ No newline at end of file diff --git a/h2/src/installer/h2.nsi b/h2/src/installer/h2.nsi index d1fa6c380e..ffaf509fd9 100644 --- a/h2/src/installer/h2.nsi +++ b/h2/src/installer/h2.nsi @@ -1,3 +1,4 @@ + Unicode True !include "MUI.nsh" SetCompressor /SOLID lzma diff --git a/h2/src/installer/h2.sh b/h2/src/installer/h2.sh old mode 100644 new mode 100755 diff --git a/h2/src/installer/h2w.bat b/h2/src/installer/h2w.bat index cb55e87dc2..c7d8d26a5c 100644 --- a/h2/src/installer/h2w.bat +++ b/h2/src/installer/h2w.bat @@ -1,2 +1,2 @@ -@start javaw -cp "h2.jar;%H2DRIVERS%;%CLASSPATH%" org.h2.tools.Console %* +@start javaw -cp "h2.jar;%H2DRIVERS%;%CLASSPATH%" org.h2.tools.Console %* @if errorlevel 1 pause \ No newline at end of file diff --git a/h2/src/installer/mvstore/MANIFEST.MF b/h2/src/installer/mvstore/MANIFEST.MF index 415624c9ab..a470ceb294 100644 --- a/h2/src/installer/mvstore/MANIFEST.MF +++ b/h2/src/installer/mvstore/MANIFEST.MF @@ -1,18 +1,23 @@ Manifest-Version: 1.0 Implementation-Title: H2 MVStore -Implementation-URL: http://www.h2database.com +Implementation-URL: https://h2database.com Implementation-Version: ${version} Build-Jdk: ${buildJdk} Created-By: ${createdBy} +Automatic-Module-Name: com.h2database.mvstore Bundle-Description: The MVStore is a persistent, log structured key-value store. -Bundle-DocURL: http://h2database.com/html/mvstore.html +Bundle-DocURL: https://h2database.com/html/mvstore.html Bundle-ManifestVersion: 2 Bundle-Name: H2 MVStore -Bundle-SymbolicName: org.h2.mvstore +Bundle-SymbolicName: com.h2database.mvstore Bundle-Vendor: H2 Group Bundle-Version: ${version} -Bundle-License: http://www.h2database.com/html/license.html +Bundle-License: https://h2database.com/html/license.html Bundle-Category: utility +Multi-Release: true +Import-Package: javax.crypto, + javax.crypto.spec Export-Package: org.h2.mvstore;version="${version}", + org.h2.mvstore.tx;version="${version}", org.h2.mvstore.type;version="${version}", org.h2.mvstore.rtree;version="${version}" diff --git a/h2/src/installer/openoffice.txt b/h2/src/installer/openoffice.txt index 238831aa91..dcd6d32cee 100644 --- a/h2/src/installer/openoffice.txt +++ b/h2/src/installer/openoffice.txt @@ -29,6 +29,10 @@ sub H2Pdf HeadingStyle.BreakType = 3 ' Insert Page Break Before HeadingStyle.ParaKeepTogether = false + For i = 1 to 4 + ParagraphStyles.getByName("Heading " + i).OutlineLevel = i + Next + images = document.GraphicObjects For i = 0 to images.getCount() - 1 image = images.getByIndex(i) @@ -88,11 +92,15 @@ sub H2Pdf dim linkStart(0) As New com.sun.star.beans.PropertyValue dim linkEnd(0) As New com.sun.star.beans.PropertyValue - For i = 1 To 4 + for i = 1 To 4 oLevel = toc.LevelFormat.getByIndex(i) - x = DimArray(5) - x = Array(linkStart, oLevel(0), oLevel(1), oLevel(2), oLevel(3), linkEnd) - old = oLevel(0) + bound = UBound(oLevel) + x = DimArray(bound + 2) + x(0) = linkStart + for j = 0 to bound + x(j + 1) = oLevel(j) + next + x(bound + 2) = linkEnd linkStart(0).Name = "TokenType" linkStart(0).Value = "TokenHyperlinkStart" linkStart(0).Handle = -1 diff --git a/h2/src/installer/pom-mvstore-template.xml b/h2/src/installer/pom-mvstore-template.xml index 491445284a..2a2b2cede1 100644 --- a/h2/src/installer/pom-mvstore-template.xml +++ b/h2/src/installer/pom-mvstore-template.xml @@ -5,18 +5,23 @@ @version@ jar H2 MVStore - http://www.h2database.com/html/mvstore.html + https://h2database.com/html/mvstore.html H2 MVStore - MPL 2.0, and EPL 1.0 - http://h2database.com/html/license.html + MPL 2.0 + https://www.mozilla.org/en-US/MPL/2.0/ + repo + + + EPL 1.0 + https://opensource.org/licenses/eclipse-1.0.php repo - scm:svn:http://h2database.googlecode.com/svn/trunk - http://h2database.googlecode.com/svn/trunk + scm:git:https://github.com/h2database/h2database + https://github.com/h2database/h2database diff --git a/h2/src/installer/pom-template.xml b/h2/src/installer/pom-template.xml index 1a79eda213..132a1a8f91 100644 --- a/h2/src/installer/pom-template.xml +++ b/h2/src/installer/pom-template.xml @@ -5,18 +5,23 @@ @version@ jar H2 Database Engine - http://www.h2database.com + https://h2database.com H2 Database Engine - MPL 2.0, and EPL 1.0 - http://h2database.com/html/license.html + MPL 2.0 + https://www.mozilla.org/en-US/MPL/2.0/ + repo + + + EPL 1.0 + https://opensource.org/licenses/eclipse-1.0.php repo - scm:svn:http://h2database.googlecode.com/svn/trunk - http://h2database.googlecode.com/svn/trunk + scm:git:https://github.com/h2database/h2database + https://github.com/h2database/h2database diff --git a/h2/src/installer/release.txt b/h2/src/installer/release.txt index 518eb63d7a..54bc01212d 100644 --- a/h2/src/installer/release.txt +++ b/h2/src/installer/release.txt @@ -1,33 +1,138 @@ -Check dictionary.txt -svn up -./build.sh spellcheck -./build.sh javadocImpl -./build.sh docs -./build.sh jarMVStore (should be about 200 KB) -Update Constants.java - change version and build number -Update changelog.html - add new version, remove oldest -Update newsfeed.sql - add new version, remove oldest -Minor version change: change sourceError.html and source.html -If a beta, change download.html: Version ${version} (${versionDate}), Beta -If a beta, change mainWeb.html: Version ${version} (${versionDate}), Beta -Benchmark: use latest versions of other dbs, change version(s) in performance.html -Run ./buildRelease.sh / buildRelease.bat - -Scan for viruses -Test installer, H2 Console (test new languages) -Check docs, versions and links in main, downloads, build numbers -Check the PDF file size - -Upload to SourceForge -Upload to ftp://h2database.com -Upload to ftp://h2database.com/m2-repo -svn commit -svn copy: /svn/trunk /svn/tags/version-1.1.x; Version 1.1.x (yyyy-mm-dd) -Newsletter: prepare (always to BCC) -Newsletter: send to h2-database-jp@googlegroups.com; h2-database@googlegroups.com; h2database-news@googlegroups.com; ... -Add to http://twitter.com -- tweet: add @geospatialnews for the new geometry type and disk spatial index -Close bugs: http://code.google.com/p/h2database/issues/list -Update statistics +# Checklist for a release +## Formatting, Spellchecking, Javadocs + git pull + +Do this until there are no errors. +Fix typos, add new words to dictionary.txt: + + ./build.sh clean compile spellcheck + +Add documentation for all public methods. Make methods private if possible: + + ./build.sh clean compile javadocImpl + +Ensure lines are not overly long: + + ./build.sh clean compile docs + +## MVStore Jar File Size Verification + +To ensure the MVStore jar file is not too large +(does not reference the database code by accident). +The file size should be about 300 KB: + + ./build.sh jarMVStore + +## Changing Version Numbers + +Update org.h2.engine.Constants.java: + change the version and build number: + set BUILD_DATE to today + increment BUILD_ID, the value must be even (for example, 202) + set VERSION_MAJOR / VERSION_MINOR to the new version number + if the last TCP_PROTOCOL_VERSION_## + doesn't have a release date set it to current BUILD_DATE + check and update if necessary links to the latest releases in previous + series of releases and their checksums in download.html + +Update README.md. + set version to the new version + +Update changelog.html: + * create a new "Next Version (unreleased)" with an empty list + * add a new version + * remove change log entries of the oldest version (keeping about 500 lines) + +Update newsfeed.sql: + * add new version, for example: + * (150, '1.4.200', '2019-10-14'), + * remove oldest entry in that list + +Update download-archive.html: + * add new version under Distribution section + +## Skipped + +* Minor version change: change sourceError.html and source.html +* If a beta, change download.html: Version ${version} (${versionDate}), Beta +* If a beta, change mainWeb.html: Version ${version} (${versionDate}), Beta + +The following can be skipped currently; benchmarks should probably be removed: +* To update benchmark data: use latest versions of other dbs, change version(s) in performance.html + +## Build the Release + +In Build.java, comment "-Xdoclint:...", but don't commit that change. + +Run the following commands: +Non-Windows: + + cd src/installer + ./buildRelease.sh + +Windows: + + cd src/installer + buildRelease.bat + +Scan for viruses. + +Test installer, H2 Console (test new languages). + +Check docs, versions and links in main, downloads, build numbers. + +Check the PDF file size. + +Upload ( = httpdocs and httpsdocs) to ftp://h2database.com//javadoc +Upload ( = httpdocs and httpsdocs) to ftp://h2database.com// +Upload ( = httpdocs and httpsdocs) to ftp://h2database.com//m2-repo + +Github: create a release. + +Newsletter: send (always to BCC!), the following: + + h2-database@googlegroups.com; h2database-news@googlegroups.com; ... + +Create tweet at http://twitter.com + +## Sign files and publish files on Maven Central + +In Build.java, comment "-Xdoclint:none", but don't commit that change. + + ./build.sh clean compile jar mavenDeployCentral + cd /data/h2database/m2-repo/com/h2database + # remove sha and md5 files: + find . -name "*.sha1" -delete + find . -name "*.md5" -delete + cd h2/1 + # for each file separately (-javadoc.jar, -sources.jar, .jar, .pom): + gpg -u "Thomas Mueller Graf " -ab h2-<...> + jar -cvf bundle.jar h2-* + cd ../../h2-mvstore/1 + # for each file separately (-javadoc.jar, -sources.jar, .jar, .pom): + gpg -u "Thomas Mueller Graf " -ab h2-mvstore<...> + jar -cvf bundle.jar h2-* + # http://central.sonatype.org/pages/ossrh-guide.html + # http://central.sonatype.org/pages/manual-staging-bundle-creation-and-deployment.html + # https://oss.sonatype.org/#welcome - Log In "t..." + # sometimes this doesn't work reliably and you will have to retry + # - Staging Upload + # - Upload Mode: Artifact Bundle, Select Bundle to Upload... - /data/h2database/.../h2/.../bundle.jar + # - Upload Bundle + # - Staging Repositories - Refresh - select comh2database-<...> - Release - Confirm + # - Staging Upload + # - Upload Mode: Artifact Bundle, Select Bundle to Upload... - /data/h2database/.../h2-mvstore/.../bundle.jar + # - Upload Bundle + # - Staging Repositories - Refresh - select comh2database-<...> - Release - Confirm + +Update statistics. + +Change version in pom.xml, commit, add version-*.*.*** tag. + +Update org.h2.engine.Constants.java: + increment BUILD_ID again, the value must be odd (for example, 203) +Update h2/pom.xml. + set ...-SNAPSHOT to the next version (with this odd third number) +Commit. diff --git a/h2/src/installer/source-manifest.mf b/h2/src/installer/source-manifest.mf index 63022f8fe7..bb3c215b5a 100644 --- a/h2/src/installer/source-manifest.mf +++ b/h2/src/installer/source-manifest.mf @@ -1,7 +1,7 @@ Manifest-Version: 1.0 Bundle-ManifestVersion: 2 Bundle-Name: H2 Database Engine Sources -Bundle-SymbolicName: org.h2.source +Bundle-SymbolicName: com.h2database.source Bundle-Vendor: H2 Group Bundle-Version: ${version} -Eclipse-SourceBundle: org.h2;version="${version}" \ No newline at end of file +Eclipse-SourceBundle: com.h2database;version="${version}" diff --git a/h2/src/installer/source-mvstore-manifest.mf b/h2/src/installer/source-mvstore-manifest.mf new file mode 100644 index 0000000000..48c80436f9 --- /dev/null +++ b/h2/src/installer/source-mvstore-manifest.mf @@ -0,0 +1,7 @@ +Manifest-Version: 1.0 +Bundle-ManifestVersion: 2 +Bundle-Name: H2 MVStore Sources +Bundle-SymbolicName: com.h2database.mvstore.source +Bundle-Vendor: H2 Group +Bundle-Version: ${version} +Eclipse-SourceBundle: com.h2database.mvstore;version="${version}" diff --git a/h2/src/java10/precompiled/org/h2/util/Utils10.class b/h2/src/java10/precompiled/org/h2/util/Utils10.class new file mode 100644 index 0000000000..1ae38e89d7 Binary files /dev/null and b/h2/src/java10/precompiled/org/h2/util/Utils10.class differ diff --git a/h2/src/java10/src/org/h2/util/Utils10.java b/h2/src/java10/src/org/h2/util/Utils10.java new file mode 100644 index 0000000000..2ba397e893 --- /dev/null +++ b/h2/src/java10/src/org/h2/util/Utils10.java @@ -0,0 +1,72 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.net.Socket; +import java.nio.charset.Charset; + +import jdk.net.ExtendedSocketOptions; + +/** + * Utilities with specialized implementations for Java 10 and later versions. + * + * This class contains implementations for Java 10 and later versions. + */ +public final class Utils10 { + + /** + * Converts the buffer's contents into a string by decoding the bytes using + * the specified {@link java.nio.charset.Charset charset}. + * + * @param baos + * the buffer to decode + * @param charset + * the charset to use + * @return the decoded string + */ + public static String byteArrayOutputStreamToString(ByteArrayOutputStream baos, Charset charset) { + return baos.toString(charset); + } + + /** + * Returns the value of TCP_QUICKACK option. + * + * @param socket + * the socket + * @return the current value of TCP_QUICKACK option + * @throws IOException + * on I/O exception + * @throws UnsupportedOperationException + * if TCP_QUICKACK is not supported + */ + public static boolean getTcpQuickack(Socket socket) throws IOException { + return socket.getOption(ExtendedSocketOptions.TCP_QUICKACK); + } + + /** + * Sets the value of TCP_QUICKACK option. + * + * @param socket + * the socket + * @param value + * the value to set + * @return whether operation was successful + */ + public static boolean setTcpQuickack(Socket socket, boolean value) { + try { + socket.setOption(ExtendedSocketOptions.TCP_QUICKACK, value); + return true; + } catch (Throwable t) { + return false; + } + } + + private Utils10() { + } + +} diff --git a/h2/src/java10/src/org/h2/util/package.html b/h2/src/java10/src/org/h2/util/package.html new file mode 100644 index 0000000000..5860dd0957 --- /dev/null +++ b/h2/src/java10/src/org/h2/util/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Internal utility classes reimplemented for Java 10 and later versions. + +

    \ No newline at end of file diff --git a/h2/src/java9/precompiled/org/h2/util/Bits.class b/h2/src/java9/precompiled/org/h2/util/Bits.class new file mode 100644 index 0000000000..c5dabdfb86 Binary files /dev/null and b/h2/src/java9/precompiled/org/h2/util/Bits.class differ diff --git a/h2/src/java9/src/org/h2/util/Bits.java b/h2/src/java9/src/org/h2/util/Bits.java new file mode 100644 index 0000000000..fc323a8abf --- /dev/null +++ b/h2/src/java9/src/org/h2/util/Bits.java @@ -0,0 +1,320 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.util; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; +import java.nio.ByteOrder; +import java.util.Arrays; +import java.util.UUID; + +/** + * Manipulations with bytes and arrays. Specialized implementation for Java 9 + * and later versions. + */ +public final class Bits { + + /** + * VarHandle giving access to elements of a byte[] array viewed as if it + * were a int[] array on big-endian system. + */ + private static final VarHandle INT_VH_BE = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.BIG_ENDIAN); + + /** + * VarHandle giving access to elements of a byte[] array viewed as if it + * were a int[] array on little-endian system. + */ + private static final VarHandle INT_VH_LE = MethodHandles.byteArrayViewVarHandle(int[].class, + ByteOrder.LITTLE_ENDIAN); + + /** + * VarHandle giving access to elements of a byte[] array viewed as if it + * were a long[] array on big-endian system. + */ + private static final VarHandle LONG_VH_BE = MethodHandles.byteArrayViewVarHandle(long[].class, + ByteOrder.BIG_ENDIAN); + + /** + * VarHandle giving access to elements of a byte[] array viewed as if it + * were a long[] array on little-endian system. + */ + private static final VarHandle LONG_VH_LE = MethodHandles.byteArrayViewVarHandle(long[].class, + ByteOrder.LITTLE_ENDIAN); + + /** + * VarHandle giving access to elements of a byte[] array viewed as if it + * were a double[] array on big-endian system. + */ + private static final VarHandle DOUBLE_VH_BE = MethodHandles.byteArrayViewVarHandle(double[].class, + ByteOrder.BIG_ENDIAN); + + /** + * VarHandle giving access to elements of a byte[] array viewed as if it + * were a double[] array on little-endian system. + */ + private static final VarHandle DOUBLE_VH_LE = MethodHandles.byteArrayViewVarHandle(double[].class, + ByteOrder.LITTLE_ENDIAN); + + /** + * Compare the contents of two char arrays. If the content or length of the + * first array is smaller than the second array, -1 is returned. If the content + * or length of the second array is smaller than the first array, 1 is returned. + * If the contents and lengths are the same, 0 is returned. + * + * @param data1 + * the first char array (must not be null) + * @param data2 + * the second char array (must not be null) + * @return the result of the comparison (-1, 1 or 0) + */ + public static int compareNotNull(char[] data1, char[] data2) { + return Integer.signum(Arrays.compare(data1, data2)); + } + + /** + * Compare the contents of two byte arrays. If the content or length of the + * first array is smaller than the second array, -1 is returned. If the content + * or length of the second array is smaller than the first array, 1 is returned. + * If the contents and lengths are the same, 0 is returned. + * + *

    + * This method interprets bytes as signed. + *

    + * + * @param data1 + * the first byte array (must not be null) + * @param data2 + * the second byte array (must not be null) + * @return the result of the comparison (-1, 1 or 0) + */ + public static int compareNotNullSigned(byte[] data1, byte[] data2) { + return Integer.signum(Arrays.compare(data1, data2)); + } + + /** + * Compare the contents of two byte arrays. If the content or length of the + * first array is smaller than the second array, -1 is returned. If the content + * or length of the second array is smaller than the first array, 1 is returned. + * If the contents and lengths are the same, 0 is returned. + * + *

    + * This method interprets bytes as unsigned. + *

    + * + * @param data1 + * the first byte array (must not be null) + * @param data2 + * the second byte array (must not be null) + * @return the result of the comparison (-1, 1 or 0) + */ + public static int compareNotNullUnsigned(byte[] data1, byte[] data2) { + return Integer.signum(Arrays.compareUnsigned(data1, data2)); + } + + /** + * Reads a int value from the byte array at the given position in big-endian + * order. + * + * @param buff + * the byte array + * @param pos + * the position + * @return the value + */ + public static int readInt(byte[] buff, int pos) { + return (int) INT_VH_BE.get(buff, pos); + } + + /** + * Reads a int value from the byte array at the given position in + * little-endian order. + * + * @param buff + * the byte array + * @param pos + * the position + * @return the value + */ + public static int readIntLE(byte[] buff, int pos) { + return (int) INT_VH_LE.get(buff, pos); + } + + /** + * Reads a long value from the byte array at the given position in + * big-endian order. + * + * @param buff + * the byte array + * @param pos + * the position + * @return the value + */ + public static long readLong(byte[] buff, int pos) { + return (long) LONG_VH_BE.get(buff, pos); + } + + /** + * Reads a long value from the byte array at the given position in + * little-endian order. + * + * @param buff + * the byte array + * @param pos + * the position + * @return the value + */ + public static long readLongLE(byte[] buff, int pos) { + return (long) LONG_VH_LE.get(buff, pos); + } + + /** + * Reads a double value from the byte array at the given position in + * big-endian order. + * + * @param buff + * the byte array + * @param pos + * the position + * @return the value + */ + public static double readDouble(byte[] buff, int pos) { + return (double) DOUBLE_VH_BE.get(buff, pos); + } + + /** + * Reads a double value from the byte array at the given position in + * little-endian order. + * + * @param buff + * the byte array + * @param pos + * the position + * @return the value + */ + public static double readDoubleLE(byte[] buff, int pos) { + return (double) DOUBLE_VH_LE.get(buff, pos); + } + + /** + * Converts UUID value to byte array in big-endian order. + * + * @param msb + * most significant part of UUID + * @param lsb + * least significant part of UUID + * @return byte array representation + */ + public static byte[] uuidToBytes(long msb, long lsb) { + byte[] buff = new byte[16]; + LONG_VH_BE.set(buff, 0, msb); + LONG_VH_BE.set(buff, 8, lsb); + return buff; + } + + /** + * Converts UUID value to byte array in big-endian order. + * + * @param uuid + * UUID value + * @return byte array representation + */ + public static byte[] uuidToBytes(UUID uuid) { + return uuidToBytes(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); + } + + /** + * Writes a int value to the byte array at the given position in big-endian + * order. + * + * @param buff + * the byte array + * @param pos + * the position + * @param x + * the value to write + */ + public static void writeInt(byte[] buff, int pos, int x) { + INT_VH_BE.set(buff, pos, x); + } + + /** + * Writes a int value to the byte array at the given position in + * little-endian order. + * + * @param buff + * the byte array + * @param pos + * the position + * @param x + * the value to write + */ + public static void writeIntLE(byte[] buff, int pos, int x) { + INT_VH_LE.set(buff, pos, x); + } + + /** + * Writes a long value to the byte array at the given position in big-endian + * order. + * + * @param buff + * the byte array + * @param pos + * the position + * @param x + * the value to write + */ + public static void writeLong(byte[] buff, int pos, long x) { + LONG_VH_BE.set(buff, pos, x); + } + + /** + * Writes a long value to the byte array at the given position in + * little-endian order. + * + * @param buff + * the byte array + * @param pos + * the position + * @param x + * the value to write + */ + public static void writeLongLE(byte[] buff, int pos, long x) { + LONG_VH_LE.set(buff, pos, x); + } + + /** + * Writes a double value to the byte array at the given position in + * big-endian order. + * + * @param buff + * the byte array + * @param pos + * the position + * @param x + * the value to write + */ + public static void writeDouble(byte[] buff, int pos, double x) { + DOUBLE_VH_BE.set(buff, pos, x); + } + + /** + * Writes a double value to the byte array at the given position in + * little-endian order. + * + * @param buff + * the byte array + * @param pos + * the position + * @param x + * the value to write + */ + public static void writeDoubleLE(byte[] buff, int pos, double x) { + DOUBLE_VH_LE.set(buff, pos, x); + } + + private Bits() { + } +} diff --git a/h2/src/java9/src/org/h2/util/package.html b/h2/src/java9/src/org/h2/util/package.html new file mode 100644 index 0000000000..9ef3d9ca4e --- /dev/null +++ b/h2/src/java9/src/org/h2/util/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Internal utility classes reimplemented for Java 9 and later versions. + +

    \ No newline at end of file diff --git a/h2/src/main/META-INF/MANIFEST.MF b/h2/src/main/META-INF/MANIFEST.MF index f954daf194..c4a0ae3b15 100644 --- a/h2/src/main/META-INF/MANIFEST.MF +++ b/h2/src/main/META-INF/MANIFEST.MF @@ -1,48 +1,60 @@ Manifest-Version: 1.0 -Implementation-Title: ${title} -Implementation-URL: http://www.h2database.com +Implementation-Title: H2 Database Engine +Implementation-URL: https://h2database.com Implementation-Version: ${version} Build-Jdk: ${buildJdk} Created-By: ${createdBy} -${mainClassTag} +Main-Class: org.h2.tools.Console +Automatic-Module-Name: com.h2database Bundle-Activator: org.h2.util.DbDriverActivator Bundle-ManifestVersion: 2 Bundle-Name: H2 Database Engine -Bundle-SymbolicName: org.h2 +Bundle-SymbolicName: com.h2database Bundle-Vendor: H2 Group Bundle-Version: ${version} -Bundle-License: http://www.h2database.com/html/license.html +Bundle-License: https://h2database.com/html/license.html Bundle-Category: jdbc -Import-Package: javax.management, +Multi-Release: true +Import-Package: javax.crypto, + javax.crypto.spec, + javax.management, javax.naming;resolution:=optional, + javax.naming.directory;resolution:=optional, javax.naming.spi;resolution:=optional, javax.net, javax.net.ssl, + javax.script;resolution:=optional, + javax.security.auth.callback;resolution:=optional, + javax.security.auth.login;resolution:=optional, javax.servlet;resolution:=optional, javax.servlet.http;resolution:=optional, + jakarta.servlet;resolution:=optional, + jakarta.servlet.http;resolution:=optional, javax.sql, javax.tools;resolution:=optional, javax.transaction.xa;resolution:=optional, - org.apache.lucene.analysis;version="[3.0.0,3.1.0)";resolution:=optional, - org.apache.lucene.analysis.standard;version="[3.0.0,3.1.0)";resolution:=optional, - org.apache.lucene.document;version="[3.0.0,3.1.0)";resolution:=optional, - org.apache.lucene.index;version="[3.0.0,3.1.0)";resolution:=optional, - org.apache.lucene.queryParser;version="[3.0.0,3.1.0)";resolution:=optional, - org.apache.lucene.search;version="[3.0.0,3.1.0)";resolution:=optional, - org.apache.lucene.store;version="[3.0.0,3.1.0)";resolution:=optional, - org.apache.lucene.util;version="[3.0.0,3.1.0)";resolution:=optional, - com.vividsolutions.jts.geom;version="1.13";resolution:=optional, - com.vividsolutions.jts.io;version="1.13";resolution:=optional, - org.h2;version="[${version},1.5.0)", - org.h2.api;version="[${version},1.5.0)", - org.h2.fulltext;version="[${version},1.5.0)", - org.h2.jdbcx;version="[${version},1.5.0)", - org.h2.tools;version="[${version},1.5.0)", - org.h2.util;version="[${version},1.5.0)", - org.h2.value;version="[${version},1.5.0)", + javax.xml.parsers;resolution:=optional, + javax.xml.stream;resolution:=optional, + javax.xml.transform;resolution:=optional, + javax.xml.transform.dom;resolution:=optional, + javax.xml.transform.sax;resolution:=optional, + javax.xml.transform.stax;resolution:=optional, + javax.xml.transform.stream;resolution:=optional, + org.w3c.dom;resolution:=optional, + org.xml.sax;resolution:=optional, + org.xml.sax.helpers;resolution:=optional, + org.apache.lucene.analysis;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.analysis.standard;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.document;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.index;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.queryparser;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.search;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.store;version="[8.5.2,9.0.0)";resolution:=optional, + org.apache.lucene.util;version="[8.5.2,9.0.0)";resolution:=optional, + org.locationtech.jts.geom;version="1.17.0";resolution:=optional, org.osgi.framework;version="1.5", org.osgi.service.jdbc;version="1.0";resolution:=optional, - org.slf4j;version="[1.6.0,1.7.0)";resolution:=optional + org.slf4j;version="[1.7.0,1.8.0)";resolution:=optional Export-Package: org.h2;version="${version}", org.h2.api;version="${version}", org.h2.constant;version="${version}", @@ -55,6 +67,9 @@ Export-Package: org.h2;version="${version}", org.h2.bnf;version="${version}", org.h2.bnf.context;version="${version}", org.h2.mvstore;version="${version}", + org.h2.mvstore.tx;version="${version}", org.h2.mvstore.type;version="${version}", - org.h2.mvstore.rtree;version="${version}" + org.h2.mvstore.rtree;version="${version}", + org.h2.store.fs;version="${version}" +Provide-Capability: osgi.service;objectClass:List=org.osgi.service.jdbc.DataSourceFactory Premain-Class: org.h2.util.Profiler diff --git a/h2/src/main/org/h2/Driver.java b/h2/src/main/org/h2/Driver.java index ba33bbac03..a0660fc5fd 100644 --- a/h2/src/main/org/h2/Driver.java +++ b/h2/src/main/org/h2/Driver.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2; @@ -10,14 +10,11 @@ import java.sql.DriverPropertyInfo; import java.sql.SQLException; import java.util.Properties; +import java.util.logging.Logger; +import org.h2.api.ErrorCode; import org.h2.engine.Constants; import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; -import org.h2.upgrade.DbUpgrade; - -/*## Java 1.7 ## -import java.util.logging.Logger; -//*/ /** * The database driver. An application should not use this class directly. The @@ -31,14 +28,14 @@ * "jdbc:h2:˜/test", "sa", "sa"); * */ -public class Driver implements java.sql.Driver { +public class Driver implements java.sql.Driver, JdbcDriverBackwardsCompat { private static final Driver INSTANCE = new Driver(); private static final String DEFAULT_URL = "jdbc:default:connection"; private static final ThreadLocal DEFAULT_CONNECTION = - new ThreadLocal(); + new ThreadLocal<>(); - private static volatile boolean registered; + private static boolean registered; static { load(); @@ -52,26 +49,18 @@ public class Driver implements java.sql.Driver { * @param url the database URL * @param info the connection properties * @return the new connection or null if the URL is not supported + * @throws SQLException on connection exception or if URL is {@code null} */ @Override public Connection connect(String url, Properties info) throws SQLException { - try { - if (info == null) { - info = new Properties(); - } - if (!acceptsURL(url)) { - return null; - } - if (url.equals(DEFAULT_URL)) { - return DEFAULT_CONNECTION.get(); - } - Connection c = DbUpgrade.connectOrUpgrade(url, info); - if (c != null) { - return c; - } - return new JdbcConnection(url, info); - } catch (Exception e) { - throw DbException.toSQLException(e); + if (url == null) { + throw DbException.getJdbcSQLException(ErrorCode.URL_FORMAT_ERROR_2, null, Constants.URL_FORMAT, null); + } else if (url.startsWith(Constants.START_URL)) { + return new JdbcConnection(url, info, null, null, false); + } else if (url.equals(DEFAULT_URL)) { + return DEFAULT_CONNECTION.get(); + } else { + return null; } } @@ -81,17 +70,19 @@ public Connection connect(String url, Properties info) throws SQLException { * * @param url the database URL * @return if the driver understands the URL + * @throws SQLException if URL is {@code null} */ @Override - public boolean acceptsURL(String url) { - if (url != null) { - if (url.startsWith(Constants.START_URL)) { - return true; - } else if (url.equals(DEFAULT_URL)) { - return DEFAULT_CONNECTION.get() != null; - } + public boolean acceptsURL(String url) throws SQLException { + if (url == null) { + throw DbException.getJdbcSQLException(ErrorCode.URL_FORMAT_ERROR_2, null, Constants.URL_FORMAT, null); + } else if (url.startsWith(Constants.START_URL)) { + return true; + } else if (url.equals(DEFAULT_URL)) { + return DEFAULT_CONNECTION.get() != null; + } else { + return false; } - return false; } /** @@ -143,14 +134,14 @@ public boolean jdbcCompliant() { /** * [Not supported] */ -/*## Java 1.7 ## + @Override public Logger getParentLogger() { return null; } -//*/ /** * INTERNAL + * @return instance of the driver registered with the DriverManager */ public static synchronized Driver load() { try { @@ -182,6 +173,7 @@ public static synchronized void unload() { * INTERNAL * Sets, on a per-thread basis, the default-connection for * user-defined functions. + * @param c to set default to */ public static void setDefaultConnection(Connection c) { if (c == null) { @@ -193,6 +185,7 @@ public static void setDefaultConnection(Connection c) { /** * INTERNAL + * @param thread to set context class loader for */ public static void setThreadContextClassLoader(Thread thread) { // Apache Tomcat: use the classloader of the driver to avoid the diff --git a/h2/src/main/org/h2/JdbcDriverBackwardsCompat.java b/h2/src/main/org/h2/JdbcDriverBackwardsCompat.java new file mode 100644 index 0000000000..4d033fd00c --- /dev/null +++ b/h2/src/main/org/h2/JdbcDriverBackwardsCompat.java @@ -0,0 +1,16 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2; + +/** + * Allows us to compile on older platforms, while still implementing the methods + * from the newer JDBC API. + */ +public interface JdbcDriverBackwardsCompat { + + // compatibility interface + +} diff --git a/h2/src/main/org/h2/api/Aggregate.java b/h2/src/main/org/h2/api/Aggregate.java index 8e6d30e6a8..6169d0cec4 100644 --- a/h2/src/main/org/h2/api/Aggregate.java +++ b/h2/src/main/org/h2/api/Aggregate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -19,8 +19,11 @@ public interface Aggregate { * A new object is created for each invocation. * * @param conn a connection to the database + * @throws SQLException on SQL exception */ - void init(Connection conn) throws SQLException; + default void init(Connection conn) throws SQLException { + // Do nothing by default + } /** * This method must return the H2 data type, {@link org.h2.value.Value}, @@ -40,13 +43,17 @@ public interface Aggregate { * those are passed as array. * * @param value the value(s) for this row + * @throws SQLException on failure */ void add(Object value) throws SQLException; /** - * This method returns the computed aggregate value. + * This method returns the computed aggregate value. This method must + * preserve previously added values and must be able to reevaluate result if + * more values were added since its previous invocation. * * @return the aggregated value + * @throws SQLException on failure */ Object getResult() throws SQLException; diff --git a/h2/src/main/org/h2/api/AggregateFunction.java b/h2/src/main/org/h2/api/AggregateFunction.java index 7a547d4b80..916853edcd 100644 --- a/h2/src/main/org/h2/api/AggregateFunction.java +++ b/h2/src/main/org/h2/api/AggregateFunction.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -24,8 +24,11 @@ public interface AggregateFunction { * A new object is created for each invocation. * * @param conn a connection to the database + * @throws SQLException on SQL exception */ - void init(Connection conn) throws SQLException; + default void init(Connection conn) throws SQLException { + // Do nothing by default + } /** * This method must return the SQL type of the method, given the SQL type of @@ -34,6 +37,7 @@ public interface AggregateFunction { * * @param inputTypes the SQL type of the parameters, {@link java.sql.Types} * @return the SQL type of the result + * @throws SQLException on failure */ int getType(int[] inputTypes) throws SQLException; @@ -43,13 +47,17 @@ public interface AggregateFunction { * those are passed as array. * * @param value the value(s) for this row + * @throws SQLException on failure */ void add(Object value) throws SQLException; /** - * This method returns the computed aggregate value. + * This method returns the computed aggregate value. This method must + * preserve previously added values and must be able to reevaluate result if + * more values were added since its previous invocation. * * @return the aggregated value + * @throws SQLException on failure */ Object getResult() throws SQLException; diff --git a/h2/src/main/org/h2/api/CredentialsValidator.java b/h2/src/main/org/h2/api/CredentialsValidator.java new file mode 100644 index 0000000000..79dae86059 --- /dev/null +++ b/h2/src/main/org/h2/api/CredentialsValidator.java @@ -0,0 +1,32 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.api; + +import org.h2.security.auth.AuthenticationInfo; +import org.h2.security.auth.Configurable; + +/** + * A class that implement this interface can be used to validate credentials + * provided by client. + *

    + * This feature is experimental and subject to change + *

    + */ +public interface CredentialsValidator extends Configurable { + + /** + * Validate user credential. + * + * @param authenticationInfo + * = authentication info + * @return true if credentials are valid, otherwise false + * @throws Exception + * any exception occurred (invalid credentials or internal + * issue) prevent user login + */ + boolean validateCredentials(AuthenticationInfo authenticationInfo) throws Exception; + +} diff --git a/h2/src/main/org/h2/api/DatabaseEventListener.java b/h2/src/main/org/h2/api/DatabaseEventListener.java index 0fa85d0685..67f3c8eb9e 100644 --- a/h2/src/main/org/h2/api/DatabaseEventListener.java +++ b/h2/src/main/org/h2/api/DatabaseEventListener.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -12,7 +12,7 @@ * A class that implements this interface can get notified about exceptions * and other events. A database event listener can be registered when * connecting to a database. Example database URL: - * jdbc:h2:test;DATABASE_EVENT_LISTENER='com.acme.DbListener' + * jdbc:h2:./test;DATABASE_EVENT_LISTENER='com.acme.DbListener' */ public interface DatabaseEventListener extends EventListener { @@ -66,13 +66,15 @@ public interface DatabaseEventListener extends EventListener { * * @param url - the database URL */ - void init(String url); + default void init(String url) { + } /** - * This method is called after the database has been opened. It is save to + * This method is called after the database has been opened. It is safe to * connect to the database and execute statements at this point. */ - void opened(); + default void opened() { + } /** * This method is called if an exception occurred. @@ -80,7 +82,8 @@ public interface DatabaseEventListener extends EventListener { * @param e the exception * @param sql the SQL statement */ - void exceptionThrown(SQLException e, String sql); + default void exceptionThrown(SQLException e, String sql) { + } /** * This method is called for long running events, such as recovering, @@ -93,15 +96,17 @@ public interface DatabaseEventListener extends EventListener { * @param state the state * @param name the object name * @param x the current position - * @param max the highest possible value (might be 0) + * @param max the highest possible value or 0 if unknown */ - void setProgress(int state, String name, int x, int max); + default void setProgress(int state, String name, long x, long max) { + } /** - * This method is called before the database is closed normally. It is save + * This method is called before the database is closed normally. It is safe * to connect to the database and execute statements at this point, however * the connection must be closed before the method returns. */ - void closingDatabase(); + default void closingDatabase() { + } } diff --git a/h2/src/main/org/h2/api/ErrorCode.java b/h2/src/main/org/h2/api/ErrorCode.java index 71cec6a1da..bb74ebef80 100644 --- a/h2/src/main/org/h2/api/ErrorCode.java +++ b/h2/src/main/org/h2/api/ErrorCode.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -106,6 +106,12 @@ public class ErrorCode { */ public static final int NUMERIC_VALUE_OUT_OF_RANGE_1 = 22003; + /** + * The error with code 22004 is thrown when a value is out of + * range when converting to another column's data type. + */ + public static final int NUMERIC_VALUE_OUT_OF_RANGE_2 = 22004; + /** * The error with code 22007 is thrown when * a text can not be converted to a date, time, or timestamp constant. @@ -127,6 +133,15 @@ public class ErrorCode { */ public static final int DIVISION_BY_ZERO_1 = 22012; + /** + * The error with code 22013 is thrown when preceding or + * following size in a window function is null or negative. Example: + *
    +     * FIRST_VALUE(N) OVER(ORDER BY N ROWS -1 PRECEDING)
    +     * 
    + */ + public static final int INVALID_PRECEDING_OR_FOLLOWING_1 = 22013; + /** * The error with code 22018 is thrown when * trying to convert a value to a data type where the conversion is @@ -159,6 +174,55 @@ public class ErrorCode { */ public static final int LIKE_ESCAPE_ERROR_1 = 22025; + /** + * The error with code 22030 is thrown when + * an attempt is made to INSERT or UPDATE an ENUM-typed cell, + * but the value is not one of the values enumerated by the + * type. + * + * Example: + *
    +     * CREATE TABLE TEST(CASE ENUM('sensitive','insensitive'));
    +     * INSERT INTO TEST VALUES('snake');
    +     * 
    + */ + public static final int ENUM_VALUE_NOT_PERMITTED = 22030; + + /** + * The error with code 22032 is thrown when an + * attempt is made to add or modify an ENUM-typed column so + * that one or more of its enumerators would be empty. + * + * Example: + *
    +     * CREATE TABLE TEST(CASE ENUM(' '));
    +     * 
    + */ + public static final int ENUM_EMPTY = 22032; + + /** + * The error with code 22033 is thrown when an + * attempt is made to add or modify an ENUM-typed column so + * that it would have duplicate values. + * + * Example: + *
    +     * CREATE TABLE TEST(CASE ENUM('sensitive', 'sensitive'));
    +     * 
    + */ + public static final int ENUM_DUPLICATE = 22033; + + /** + * The error with code 22034 is thrown when an + * attempt is made to read non-existing element of an array. + * + * Example: + *
    +     * VALUES ARRAY[1, 2][3]
    +     * 
    + */ + public static final int ARRAY_ELEMENT_ERROR_2 = 22034; + // 23: constraint violation /** @@ -228,7 +292,7 @@ public class ErrorCode { * The error with code 23513 is thrown when * a check constraint is violated. Example: *
    -     * CREATE TABLE TEST(ID INT CHECK ID>0);
    +     * CREATE TABLE TEST(ID INT CHECK (ID>0));
          * INSERT INTO TEST VALUES(0);
          * 
    */ @@ -236,7 +300,7 @@ public class ErrorCode { /** * The error with code 23514 is thrown when - * evaluation of a check constraint resulted in a error. + * evaluation of a check constraint resulted in an error. */ public static final int CHECK_CONSTRAINT_INVALID = 23514; @@ -264,7 +328,7 @@ public class ErrorCode { * sessions are also possible. To solve deadlock problems, an application * should lock tables always in the same order, such as always lock table A * before locking table B. For details, see Wikipedia Deadlock. + * href="https://en.wikipedia.org/wiki/Deadlock">Wikipedia Deadlock. */ public static final int DEADLOCK_1 = 40001; @@ -314,6 +378,30 @@ public class ErrorCode { */ public static final int TABLE_OR_VIEW_NOT_FOUND_1 = 42102; + /** + * The error with code 42103 is thrown when + * trying to query, modify or drop a table or view that does not exists + * in this schema and database but similar names were found. A common cause + * is that the names are written in different case. + * Example: + *
    +     * SELECT * FROM ABC;
    +     * 
    + */ + public static final int TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2 = 42103; + + /** + * The error with code 42104 is thrown when + * trying to query, modify or drop a table or view that does not exists + * in this schema and database but it is empty anyway. A common cause is + * that the wrong database was opened. + * Example: + *
    +     * SELECT * FROM ABC;
    +     * 
    + */ + public static final int TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 = 42104; + /** * The error with code 42111 is thrown when * trying to create an index if an index with the same name already exists. @@ -358,6 +446,52 @@ public class ErrorCode { */ public static final int COLUMN_NOT_FOUND_1 = 42122; + /** + * The error with code 42131 is thrown when + * identical expressions should be used, but different + * expressions were found. + * Example: + *
    +     * SELECT MODE(A ORDER BY B) FROM TEST;
    +     * 
    + */ + public static final int IDENTICAL_EXPRESSIONS_SHOULD_BE_USED = 42131; + + /** + * The error with code 42602 is thrown when + * invalid name of identifier is used. + * Example: + *
    +     * statement.enquoteIdentifier("\"", true);
    +     * 
    + */ + public static final int INVALID_NAME_1 = 42602; + + /** + * The error with code 42622 is thrown when + * name of identifier is too long. + * Example: + *
    +     * char[] c = new char[1000];
    +     * Arrays.fill(c, 'A');
    +     * statement.executeQuery("SELECT 1 " + new String(c));
    +     * 
    + */ + public static final int NAME_TOO_LONG_2 = 42622; + + // 54: program limit exceeded + + /** + * The error with code 54011 is thrown when + * too many columns were specified in a table, select statement, + * or row value. + * Example: + *
    +     * CREATE TABLE TEST(C1 INTEGER, C2 INTEGER, ..., C20000 INTEGER);
    +     * 
    + */ + public static final int TOO_MANY_COLUMNS_1 = 54011; + // 0A: feature not supported // HZ: remote database access @@ -407,6 +541,18 @@ public class ErrorCode { */ public static final int LOCK_TIMEOUT_1 = 50200; + /** + * The error with code 57014 is thrown when + * a statement was canceled using Statement.cancel() or + * when the query timeout has been reached. + * Examples: + *
    +     * stat.setQueryTimeout(1);
    +     * stat.cancel();
    +     * 
    + */ + public static final int STATEMENT_WAS_CANCELED = 57014; + /** * The error with code 90000 is thrown when * a function that does not return a result set was used in the FROM clause. @@ -463,10 +609,9 @@ public class ErrorCode { /** * The error with code 90005 is thrown when - * trying to create a trigger and using the combination of SELECT - * and FOR EACH ROW, which we do not support. + * trying to create a trigger with invalid combination of flags. */ - public static final int TRIGGER_SELECT_AND_ROW_BASED_NOT_SUPPORTED = 90005; + public static final int INVALID_TRIGGER_FLAGS_1 = 90005; /** * The error with code 90006 is thrown when @@ -496,7 +641,7 @@ public class ErrorCode { * trying to create a sequence with an invalid combination * of attributes (min value, max value, start value, etc). */ - public static final int SEQUENCE_ATTRIBUTES_INVALID = 90009; + public static final int SEQUENCE_ATTRIBUTES_INVALID_7 = 90009; /** * The error with code 90010 is thrown when @@ -535,13 +680,11 @@ public class ErrorCode { public static final int PARAMETER_NOT_SET_1 = 90012; /** - * The error with code 90013 is thrown when - * trying to open a database that does not exist using the flag - * IFEXISTS=TRUE, or when trying to access a database object with a catalog - * name that does not match the database name. Example: + * The error with code 90013 is thrown when when trying to access + * a database object with a catalog name that does not match the database + * name. *
    -     * CREATE TABLE TEST(ID INT);
    -     * SELECT XYZ.PUBLIC.TEST.ID FROM TEST;
    +     * SELECT * FROM database_that_does_not_exist.table_name
          * 
    */ public static final int DATABASE_NOT_FOUND_1 = 90013; @@ -661,13 +804,22 @@ public class ErrorCode { public static final int FUNCTION_NOT_FOUND_1 = 90022; /** - * The error with code 90023 is thrown when - * trying to set a primary key on a nullable column. - * Example: + * The error with code 90023 is thrown when trying to set a + * primary key on a nullable column or when trying to drop NOT NULL + * constraint on primary key or identity column. + * Examples: *
          * CREATE TABLE TEST(ID INT, NAME VARCHAR);
          * ALTER TABLE TEST ADD CONSTRAINT PK PRIMARY KEY(ID);
          * 
    + *
    +     * CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR);
    +     * ALTER TABLE TEST ALTER COLUMN ID DROP NOT NULL;
    +     * 
    + *
    +     * CREATE TABLE TEST(ID INT GENERATED ALWAYS AS IDENTITY, NAME VARCHAR);
    +     * ALTER TABLE TEST ALTER COLUMN ID DROP NOT NULL;
    +     * 
    */ public static final int COLUMN_MUST_NOT_BE_NULLABLE_1 = 90023; @@ -926,32 +1078,15 @@ public class ErrorCode { */ public static final int WRONG_PASSWORD_FORMAT = 90050; - /** - * The error with code 57014 is thrown when - * a statement was canceled using Statement.cancel() or - * when the query timeout has been reached. - * Examples: - *
    -     * stat.setQueryTimeout(1);
    -     * stat.cancel();
    -     * 
    - */ - public static final int STATEMENT_WAS_CANCELED = 57014; + // 90051 was removed /** - * The error with code 90052 is thrown when - * a subquery that is used as a value contains more than one column. - * Example of wrong usage: - *
    -     * CREATE TABLE TEST(ID INT);
    -     * INSERT INTO TEST VALUES(1), (2);
    -     * SELECT * FROM TEST WHERE ID IN (SELECT 1, 2 FROM DUAL);
    -     * 
    - * Correct: + * The error with code 90052 is thrown when a single-column + * subquery is expected but a subquery with other number of columns was + * specified. + * Example: *
    -     * CREATE TABLE TEST(ID INT);
    -     * INSERT INTO TEST VALUES(1), (2);
    -     * SELECT * FROM TEST WHERE ID IN (1, 2);
    +     * VALUES ARRAY(SELECT A, B FROM TEST)
          * 
    */ public static final int SUBQUERY_IS_NOT_SINGLE_COLUMN = 90052; @@ -991,6 +1126,12 @@ public class ErrorCode { */ public static final int UNSUPPORTED_CIPHER = 90055; + /** + * The error with code 90056 is thrown when trying to format a + * timestamp using TO_DATE and TO_TIMESTAMP with an invalid format. + */ + public static final int INVALID_TO_DATE_FORMAT = 90056; + /** * The error with code 90057 is thrown when * trying to drop a constraint that does not exist. @@ -1316,11 +1457,14 @@ public class ErrorCode { /** * The error with code 90085 is thrown when * trying to manually drop an index that was generated by the system - * because of a unique or referential constraint. To find out what - * constraint causes the problem, run: + * because of a unique or referential constraint. To find + * the owner of the index without attempt to drop it run *
    -     * SELECT * FROM INFORMATION_SCHEMA.CONSTRAINTS
    -     * WHERE UNIQUE_INDEX_NAME = '<index name>';
    +     * SELECT CONSTRAINT_SCHEMA, CONSTRAINT_NAME
    +     * FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE
    +     * WHERE INDEX_SCHEMA = '<index schema>'
    +     * AND INDEX_NAME = '<index name>'
    +     * FETCH FIRST ROW ONLY
          * 
    * Example of wrong usage: *
    @@ -1348,7 +1492,7 @@ public class ErrorCode {
     
         /**
          * The error with code 90087 is thrown when
    -     * the specified method was not found in the class.
    +     * a method with matching number of arguments was not found in the class.
          * Example:
          * 
          * CREATE ALIAS TO_BINARY FOR "java.lang.Long.toBinaryString(long)";
    @@ -1568,6 +1712,17 @@ public class ErrorCode {
          */
         public static final int VIEW_IS_INVALID_2 = 90109;
     
    +    /**
    +     * The error with code 90110 is thrown when
    +     * trying to compare values of incomparable data types.
    +     * Example:
    +     * 
    +     * CREATE TABLE test (id INT NOT NULL, name VARCHAR);
    +     * select * from test where id = (1, 2);
    +     * 
    + */ + public static final int TYPES_ARE_NOT_COMPARABLE_2 = 90110; + /** * The error with code 90111 is thrown when * an exception occurred while accessing a linked table. @@ -1653,7 +1808,7 @@ public class ErrorCode { * DROP TABLE INFORMATION_SCHEMA.SETTINGS; *
    */ - public static final int CANNOT_DROP_TABLE_1 = 90118; + public static final int CANNOT_DROP_TABLE_1 = 90118; /** * The error with code 90119 is thrown when @@ -1662,11 +1817,17 @@ public class ErrorCode { * Example: *
          * CREATE DOMAIN INTEGER AS VARCHAR;
    -     * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
    -     * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
    +     * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
    +     * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
          * 
    */ - public static final int USER_DATA_TYPE_ALREADY_EXISTS_1 = 90119; + public static final int DOMAIN_ALREADY_EXISTS_1 = 90119; + + /** + * Deprecated since 1.4.198. Use {@link #DOMAIN_ALREADY_EXISTS_1} instead. + */ + @Deprecated + public static final int USER_DATA_TYPE_ALREADY_EXISTS_1 = DOMAIN_ALREADY_EXISTS_1; /** * The error with code 90120 is thrown when @@ -1676,7 +1837,13 @@ public class ErrorCode { * DROP DOMAIN UNKNOWN; *
    */ - public static final int USER_DATA_TYPE_NOT_FOUND_1 = 90120; + public static final int DOMAIN_NOT_FOUND_1 = 90120; + + /** + * Deprecated since 1.4.198. Use {@link #DOMAIN_NOT_FOUND_1} instead. + */ + @Deprecated + public static final int USER_DATA_TYPE_NOT_FOUND_1 = DOMAIN_NOT_FOUND_1; /** * The error with code 90121 is thrown when @@ -1685,6 +1852,12 @@ public class ErrorCode { */ public static final int DATABASE_CALLED_AT_SHUTDOWN = 90121; + /** + * The error with code 90122 is thrown when + * WITH TIES clause is used without ORDER BY clause. + */ + public static final int WITH_TIES_WITHOUT_ORDER_BY = 90122; + /** * The error with code 90123 is thrown when * trying mix regular parameters and indexed parameters in the same @@ -1790,7 +1963,7 @@ public class ErrorCode { * connections at the same time, or trying to insert two rows with the same * key from two connections. Example: *
    -     * jdbc:h2:~/test;MVCC=TRUE
    +     * jdbc:h2:~/test
          * Session 1:
          * CREATE TABLE TEST(ID INT);
          * INSERT INTO TEST VALUES(1);
    @@ -1816,8 +1989,7 @@ public class ErrorCode {
         /**
          * The error with code 90133 is thrown when
          * trying to change a specific database property while the database is
    -     * already open. The MVCC property needs to be set in the first connection
    -     * (in the connection opening the database) and can not be changed later on.
    +     * already open.
          */
         public static final int CANNOT_CHANGE_SETTING_WHEN_OPEN_1 = 90133;
     
    @@ -1841,19 +2013,19 @@ public class ErrorCode {
     
         /**
          * The error with code 90136 is thrown when
    -     * executing a query that used an unsupported outer join condition.
    +     * trying to reference a window that does not exist.
          * Example:
          * 
    -     * SELECT * FROM DUAL A LEFT JOIN DUAL B ON B.X=(SELECT MAX(X) FROM DUAL);
    +     * SELECT LEAD(X) OVER W FROM TEST;
          * 
    */ - public static final int UNSUPPORTED_OUTER_JOIN_CONDITION_1 = 90136; + public static final int WINDOW_NOT_FOUND_1 = 90136; /** * The error with code 90137 is thrown when * trying to assign a value to something that is not a variable. *
    -     * SELECT AMOUNT, SET(@V, IFNULL(@V, 0)+AMOUNT) FROM TEST;
    +     * SELECT AMOUNT, SET(@V, COALESCE(@V, 0)+AMOUNT) FROM TEST;
          * 
    */ public static final int CAN_ONLY_ASSIGN_TO_VARIABLE_1 = 90137; @@ -1898,7 +2070,6 @@ public class ErrorCode { */ public static final int RESULT_SET_READONLY = 90140; - /** * The error with code 90141 is thrown when * trying to change the java object serializer while there was already data @@ -1913,8 +2084,163 @@ public class ErrorCode { */ public static final int STEP_SIZE_MUST_NOT_BE_ZERO = 90142; + /** + * The error with code 90143 is thrown when + * trying to fetch a row from the primary index and the row is not there. + */ + public static final int ROW_NOT_FOUND_IN_PRIMARY_INDEX = 90143; + + /** + * The error with code 90144 is thrown when + * user trying to login into a database with AUTHREALM set and + * the target database doesn't have an authenticator defined + *

    Authenticator experimental feature can be enabled by + *

    + *
    +     * SET AUTHENTICATOR TRUE
    +     * 
    + */ + public static final int AUTHENTICATOR_NOT_AVAILABLE = 90144; - // next are 90051, 90056, 90110, 90122, 90143 + /** + * The error with code 90145 is thrown when trying to execute a + * SELECT statement with non-window aggregates, DISTINCT, GROUP BY, or + * HAVING clauses together with FOR UPDATE clause. + * + *
    +     * SELECT DISTINCT NAME FOR UPDATE;
    +     * SELECT MAX(VALUE) FOR UPDATE;
    +     * 
    + */ + public static final int FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT = 90145; + + /** + * The error with code 90146 is thrown when trying to open a + * database that does not exist using the flag IFEXISTS=TRUE + *
    +     * jdbc:h2:./database_that_does_not_exist
    +     * 
    + */ + public static final int DATABASE_NOT_FOUND_WITH_IF_EXISTS_1 = 90146; + + /** + * The error with code 90147 is thrown when trying to execute a + * statement which closes the transaction (such as commit and rollback) and + * autocommit mode is on. + * + * @see org.h2.engine.SysProperties#FORCE_AUTOCOMMIT_OFF_ON_COMMIT + */ + public static final int METHOD_DISABLED_ON_AUTOCOMMIT_TRUE = 90147; + + /** + * The error with code 90148 is thrown when trying to access + * the current value of a sequence before execution of NEXT VALUE FOR + * sequenceName in the current session. Example: + * + *
    +     * SELECT CURRENT VALUE FOR SEQUENCE XYZ;
    +     * 
    + */ + public static final int CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1 = 90148; + + /** + * The error with code 90149 is thrown when trying to open a + * database that does not exist remotely without enabling remote database + * creation first. + *
    +     * jdbc:h2:./database_that_does_not_exist
    +     * 
    + */ + public static final int REMOTE_DATABASE_NOT_FOUND_1 = 90149; + + /** + * The error with code 90150 is thrown when + * trying to use an invalid precision. + * Example: + *
    +     * CREATE TABLE TABLE1 ( FAIL INTERVAL YEAR(20) );
    +     * 
    + */ + public static final int INVALID_VALUE_PRECISION = 90150; + + /** + * The error with code 90151 is thrown when + * trying to use an invalid scale or fractional seconds precision. + * Example: + *
    +     * CREATE TABLE TABLE1 ( FAIL TIME(10) );
    +     * 
    + */ + public static final int INVALID_VALUE_SCALE = 90151; + + /** + * The error with code 90152 is thrown when trying to manually + * drop a unique or primary key constraint that is referenced by a foreign + * key constraint without a CASCADE clause. + * + *
    +     * CREATE TABLE PARENT(ID INT CONSTRAINT P1 PRIMARY KEY);
    +     * CREATE TABLE CHILD(ID INT CONSTRAINT P2 PRIMARY KEY, CHILD INT CONSTRAINT C REFERENCES PARENT);
    +     * ALTER TABLE PARENT DROP CONSTRAINT P1 RESTRICT;
    +     * 
    + */ + public static final int CONSTRAINT_IS_USED_BY_CONSTRAINT_2 = 90152; + + /** + * The error with code 90153 is thrown when trying to reference + * a column of another data type when data types aren't comparable or don't + * have a session-independent compare order between each other. + * + *
    +     * CREATE TABLE PARENT(T TIMESTAMP UNIQUE);
    +     * CREATE TABLE CHILD(T TIMESTAMP WITH TIME ZONE REFERENCES PARENT(T));
    +     * 
    + */ + public static final int UNCOMPARABLE_REFERENCED_COLUMN_2 = 90153; + + /** + * The error with code 90154 is thrown when trying to assign a + * value to a generated column. + * + *
    +     * CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1));
    +     * INSERT INTO TEST(A, B) VALUES (1, 1);
    +     * 
    + */ + public static final int GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 = 90154; + + /** + * The error with code 90155 is thrown when trying to create a + * referential constraint that can update a referenced generated column. + * + *
    +     * CREATE TABLE PARENT(ID INT PRIMARY KEY, K INT GENERATED ALWAYS AS (ID) UNIQUE);
    +     * CREATE TABLE CHILD(ID INT PRIMARY KEY, P INT);
    +     * ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE SET NULL;
    +     * 
    + */ + public static final int GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 = 90155; + + /** + * The error with code 90156 is thrown when trying to create a + * view or a table from a select and some expression doesn't have a column + * name or alias when it is required by a compatibility mode. + * + *
    +     * SET MODE DB2;
    +     * CREATE TABLE T1(A INT, B INT);
    +     * CREATE TABLE T2 AS (SELECT A + B FROM T1) WITH DATA;
    +     * 
    + */ + public static final int COLUMN_ALIAS_IS_NOT_SPECIFIED_1 = 90156; + + /** + * The error with code 90157 is thrown when the integer + * index that is used in the GROUP BY is not in the SELECT list + */ + public static final int GROUP_BY_NOT_IN_THE_RESULT = 90157; + + // next is 90158 private ErrorCode() { // utility class @@ -1922,6 +2248,8 @@ private ErrorCode() { /** * INTERNAL + * @param errorCode to check + * @return true if provided code is common, false otherwise */ public static boolean isCommon(int errorCode) { // this list is sorted alphabetically @@ -1940,6 +2268,8 @@ public static boolean isCommon(int errorCode) { case SYNTAX_ERROR_2: case TABLE_OR_VIEW_ALREADY_EXISTS_1: case TABLE_OR_VIEW_NOT_FOUND_1: + case TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2: + case TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1: case VALUE_TOO_LONG_2: return true; } @@ -1948,6 +2278,8 @@ public static boolean isCommon(int errorCode) { /** * INTERNAL + * @param errorCode to get state for + * @return error state */ public static String getState(int errorCode) { // To convert SQLState to error code, replace @@ -1967,13 +2299,19 @@ public static String getState(int errorCode) { // 21: cardinality violation case COLUMN_COUNT_DOES_NOT_MATCH: return "21S02"; + // 22: data exception + case ARRAY_ELEMENT_ERROR_2: return "2202E"; + // 42: syntax error or access rule violation case TABLE_OR_VIEW_ALREADY_EXISTS_1: return "42S01"; case TABLE_OR_VIEW_NOT_FOUND_1: return "42S02"; + case TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2: return "42S03"; + case TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1: return "42S04"; case INDEX_ALREADY_EXISTS_1: return "42S11"; case INDEX_NOT_FOUND_1: return "42S12"; case DUPLICATE_COLUMN_NAME_1: return "42S21"; case COLUMN_NOT_FOUND_1: return "42S22"; + case IDENTICAL_EXPRESSIONS_SHOULD_BE_USED: return "42S31"; // 0A: feature not supported @@ -1986,7 +2324,7 @@ public static String getState(int errorCode) { case FEATURE_NOT_SUPPORTED_1: return "HYC00"; case LOCK_TIMEOUT_1: return "HYT00"; default: - return "" + errorCode; + return Integer.toString(errorCode); } } diff --git a/h2/src/main/org/h2/api/H2Type.java b/h2/src/main/org/h2/api/H2Type.java new file mode 100644 index 0000000000..ecc61311e8 --- /dev/null +++ b/h2/src/main/org/h2/api/H2Type.java @@ -0,0 +1,321 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.api; + +import java.sql.SQLType; + +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Data types of H2. + */ +public final class H2Type implements SQLType { + + // Character strings + + /** + * The CHARACTER data type. + */ + public static final H2Type CHAR = new H2Type(TypeInfo.getTypeInfo(Value.CHAR), "CHARACTER"); + + /** + * The CHARACTER VARYING data type. + */ + public static final H2Type VARCHAR = new H2Type(TypeInfo.TYPE_VARCHAR, "CHARACTER VARYING"); + + /** + * The CHARACTER LARGE OBJECT data type. + */ + public static final H2Type CLOB = new H2Type(TypeInfo.TYPE_CLOB, "CHARACTER LARGE OBJECT"); + + /** + * The VARCHAR_IGNORECASE data type. + */ + public static final H2Type VARCHAR_IGNORECASE = new H2Type(TypeInfo.TYPE_VARCHAR_IGNORECASE, "VARCHAR_IGNORECASE"); + + // Binary strings + + /** + * The BINARY data type. + */ + public static final H2Type BINARY = new H2Type(TypeInfo.getTypeInfo(Value.BINARY), "BINARY"); + + /** + * The BINARY VARYING data type. + */ + public static final H2Type VARBINARY = new H2Type(TypeInfo.TYPE_VARBINARY, "BINARY VARYING"); + + /** + * The BINARY LARGE OBJECT data type. + */ + public static final H2Type BLOB = new H2Type(TypeInfo.TYPE_BLOB, "BINARY LARGE OBJECT"); + + // Boolean + + /** + * The BOOLEAN data type + */ + public static final H2Type BOOLEAN = new H2Type(TypeInfo.TYPE_BOOLEAN, "BOOLEAN"); + + // Exact numeric data types + + /** + * The TINYINT data type. + */ + public static final H2Type TINYINT = new H2Type(TypeInfo.TYPE_TINYINT, "TINYINT"); + + /** + * The SMALLINT data type. + */ + public static final H2Type SMALLINT = new H2Type(TypeInfo.TYPE_SMALLINT, "SMALLINT"); + + /** + * The INTEGER data type. + */ + public static final H2Type INTEGER = new H2Type(TypeInfo.TYPE_INTEGER, "INTEGER"); + + /** + * The BIGINT data type. + */ + public static final H2Type BIGINT = new H2Type(TypeInfo.TYPE_BIGINT, "BIGINT"); + + /** + * The NUMERIC data type. + */ + public static final H2Type NUMERIC = new H2Type(TypeInfo.TYPE_NUMERIC_FLOATING_POINT, "NUMERIC"); + + // Approximate numeric data types + + /** + * The REAL data type. + */ + public static final H2Type REAL = new H2Type(TypeInfo.TYPE_REAL, "REAL"); + + /** + * The DOUBLE PRECISION data type. + */ + public static final H2Type DOUBLE_PRECISION = new H2Type(TypeInfo.TYPE_DOUBLE, "DOUBLE PRECISION"); + + // Decimal floating-point type + + /** + * The DECFLOAT data type. + */ + public static final H2Type DECFLOAT = new H2Type(TypeInfo.TYPE_DECFLOAT, "DECFLOAT"); + + // Date-time data types + + /** + * The DATE data type. + */ + public static final H2Type DATE = new H2Type(TypeInfo.TYPE_DATE, "DATE"); + + /** + * The TIME data type. + */ + public static final H2Type TIME = new H2Type(TypeInfo.TYPE_TIME, "TIME"); + + /** + * The TIME WITH TIME ZONE data type. + */ + public static final H2Type TIME_WITH_TIME_ZONE = new H2Type(TypeInfo.TYPE_TIME_TZ, "TIME WITH TIME ZONE"); + + /** + * The TIMESTAMP data type. + */ + public static final H2Type TIMESTAMP = new H2Type(TypeInfo.TYPE_TIMESTAMP, "TIMESTAMP"); + + /** + * The TIMESTAMP WITH TIME ZONE data type. + */ + public static final H2Type TIMESTAMP_WITH_TIME_ZONE = new H2Type(TypeInfo.TYPE_TIMESTAMP_TZ, + "TIMESTAMP WITH TIME ZONE"); + + // Intervals + + /** + * The INTERVAL YEAR data type. + */ + public static final H2Type INTERVAL_YEAR = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_YEAR), "INTERVAL_YEAR"); + + /** + * The INTERVAL MONTH data type. + */ + public static final H2Type INTERVAL_MONTH = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_MONTH), + "INTERVAL_MONTH"); + + /** + * The INTERVAL DAY data type. + */ + public static final H2Type INTERVAL_DAY = new H2Type(TypeInfo.TYPE_INTERVAL_DAY, "INTERVAL_DAY"); + + /** + * The INTERVAL HOUR data type. + */ + public static final H2Type INTERVAL_HOUR = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_HOUR), "INTERVAL_HOUR"); + + /** + * The INTERVAL MINUTE data type. + */ + public static final H2Type INTERVAL_MINUTE = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_MINUTE), + "INTERVAL_MINUTE"); + + /** + * The INTERVAL SECOND data type. + */ + public static final H2Type INTERVAL_SECOND = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_SECOND), + "INTERVAL_SECOND"); + + /** + * The INTERVAL YEAR TO MONTH data type. + */ + public static final H2Type INTERVAL_YEAR_TO_MONTH = new H2Type(TypeInfo.TYPE_INTERVAL_YEAR_TO_MONTH, + "INTERVAL_YEAR_TO_MONTH"); + + /** + * The INTERVAL DAY TO HOUR data type. + */ + public static final H2Type INTERVAL_DAY_TO_HOUR = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_DAY_TO_HOUR), + "INTERVAL_DAY_TO_HOUR"); + + /** + * The INTERVAL DAY TO MINUTE data type. + */ + public static final H2Type INTERVAL_DAY_TO_MINUTE = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_DAY_TO_MINUTE), + "INTERVAL_DAY_TO_MINUTE"); + + /** + * The INTERVAL DAY TO SECOND data type. + */ + public static final H2Type INTERVAL_DAY_TO_SECOND = new H2Type(TypeInfo.TYPE_INTERVAL_DAY_TO_SECOND, + "INTERVAL_DAY_TO_SECOND"); + + /** + * The INTERVAL HOUR TO MINUTE data type. + */ + public static final H2Type INTERVAL_HOUR_TO_MINUTE = new H2Type( // + TypeInfo.getTypeInfo(Value.INTERVAL_HOUR_TO_MINUTE), "INTERVAL_HOUR_TO_MINUTE"); + + /** + * The INTERVAL HOUR TO SECOND data type. + */ + public static final H2Type INTERVAL_HOUR_TO_SECOND = new H2Type(TypeInfo.TYPE_INTERVAL_HOUR_TO_SECOND, + "INTERVAL_HOUR_TO_SECOND"); + + /** + * The INTERVAL MINUTE TO SECOND data type. + */ + public static final H2Type INTERVAL_MINUTE_TO_SECOND = new H2Type( + TypeInfo.getTypeInfo(Value.INTERVAL_MINUTE_TO_SECOND), "INTERVAL_MINUTE_TO_SECOND"); + + // Other JDBC + + /** + * The JAVA_OBJECT data type. + */ + public static final H2Type JAVA_OBJECT = new H2Type(TypeInfo.TYPE_JAVA_OBJECT, "JAVA_OBJECT"); + + // Other non-standard + + /** + * The ENUM data type. + */ + public static final H2Type ENUM = new H2Type(TypeInfo.TYPE_ENUM_UNDEFINED, "ENUM"); + + /** + * The GEOMETRY data type. + */ + public static final H2Type GEOMETRY = new H2Type(TypeInfo.TYPE_GEOMETRY, "GEOMETRY"); + + /** + * The JSON data type. + */ + public static final H2Type JSON = new H2Type(TypeInfo.TYPE_JSON, "JSON"); + + /** + * The UUID data type. + */ + public static final H2Type UUID = new H2Type(TypeInfo.TYPE_UUID, "UUID"); + + // Collections + + // Use arrayOf() for ARRAY + + // Use row() for ROW + + /** + * Returns ARRAY data type with the specified component type. + * + * @param componentType + * the type of elements + * @return ARRAY data type + */ + public static H2Type array(H2Type componentType) { + return new H2Type(TypeInfo.getTypeInfo(Value.ARRAY, -1L, -1, componentType.typeInfo), + "array(" + componentType.field + ')'); + } + + /** + * Returns ROW data type with specified types of fields and default names. + * + * @param fieldTypes + * the type of fields + * @return ROW data type + */ + public static H2Type row(H2Type... fieldTypes) { + int degree = fieldTypes.length; + TypeInfo[] row = new TypeInfo[degree]; + StringBuilder builder = new StringBuilder("row("); + for (int i = 0; i < degree; i++) { + H2Type t = fieldTypes[i]; + row[i] = t.typeInfo; + if (i > 0) { + builder.append(", "); + } + builder.append(t.field); + } + return new H2Type(TypeInfo.getTypeInfo(Value.ROW, -1L, -1, new ExtTypeInfoRow(row)), + builder.append(')').toString()); + } + + private TypeInfo typeInfo; + + private String field; + + private H2Type(TypeInfo typeInfo, String field) { + this.typeInfo = typeInfo; + this.field = "H2Type." + field; + } + + @Override + public String getName() { + return typeInfo.toString(); + } + + @Override + public String getVendor() { + return "com.h2database"; + } + + /** + * Returns the vendor specific type number for the data type. The returned + * value is actual only for the current version of H2. + * + * @return the vendor specific data type + */ + @Override + public Integer getVendorTypeNumber() { + return typeInfo.getValueType(); + } + + @Override + public String toString() { + return field; + } + +} diff --git a/h2/src/main/org/h2/api/Interval.java b/h2/src/main/org/h2/api/Interval.java new file mode 100644 index 0000000000..42024b9466 --- /dev/null +++ b/h2/src/main/org/h2/api/Interval.java @@ -0,0 +1,635 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.api; + +import static org.h2.util.DateTimeUtils.NANOS_PER_MINUTE; +import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND; + +import org.h2.message.DbException; +import org.h2.util.IntervalUtils; + +/** + * INTERVAL representation for result sets. + */ +public final class Interval { + + private final IntervalQualifier qualifier; + + /** + * {@code false} for zero or positive intervals, {@code true} for negative + * intervals. + */ + private final boolean negative; + + /** + * Non-negative long with value of leading field. For INTERVAL SECOND + * contains only integer part of seconds. + */ + private final long leading; + + /** + * Non-negative long with combined value of all remaining field, or 0 for + * single-field intervals, with exception for INTERVAL SECOND that uses this + * field to store fractional part of seconds measured in nanoseconds. + */ + private final long remaining; + + /** + * Creates a new INTERVAL YEAR. + * + * @param years + * years, |years|<1018 + * @return INTERVAL YEAR + */ + public static Interval ofYears(long years) { + return new Interval(IntervalQualifier.YEAR, years < 0, Math.abs(years), 0); + } + + /** + * Creates a new INTERVAL MONTH. + * + * @param months + * months, |months|<1018 + * @return INTERVAL MONTH + */ + public static Interval ofMonths(long months) { + return new Interval(IntervalQualifier.MONTH, months < 0, Math.abs(months), 0); + } + + /** + * Creates a new INTERVAL DAY. + * + * @param days + * days, |days|<1018 + * @return INTERVAL DAY + */ + public static Interval ofDays(long days) { + return new Interval(IntervalQualifier.DAY, days < 0, Math.abs(days), 0); + } + + /** + * Creates a new INTERVAL HOUR. + * + * @param hours + * hours, |hours|<1018 + * @return INTERVAL HOUR + */ + public static Interval ofHours(long hours) { + return new Interval(IntervalQualifier.HOUR, hours < 0, Math.abs(hours), 0); + } + + /** + * Creates a new INTERVAL MINUTE. + * + * @param minutes + * minutes, |minutes|<1018 + * @return interval + */ + public static Interval ofMinutes(long minutes) { + return new Interval(IntervalQualifier.MINUTE, minutes < 0, Math.abs(minutes), 0); + } + + /** + * Creates a new INTERVAL SECOND. + * + * @param seconds + * seconds, |seconds|<1018 + * @return INTERVAL SECOND + */ + public static Interval ofSeconds(long seconds) { + return new Interval(IntervalQualifier.SECOND, seconds < 0, Math.abs(seconds), 0); + } + + /** + * Creates a new INTERVAL SECOND. + * + *

    + * If both arguments are not equal to zero they should have the same sign. + *

    + * + * @param seconds + * seconds, |seconds|<1018 + * @param nanos + * nanoseconds, |nanos|<1,000,000,000 + * @return INTERVAL SECOND + */ + public static Interval ofSeconds(long seconds, int nanos) { + // Interval is negative if any field is negative + boolean negative = (seconds | nanos) < 0; + if (negative) { + // Ensure that all fields are negative or zero + if (seconds > 0 || nanos > 0) { + throw new IllegalArgumentException(); + } + // Make them positive + seconds = -seconds; + nanos = -nanos; + // Long.MIN_VALUE and Integer.MIN_VALUE will be rejected by + // constructor + } + return new Interval(IntervalQualifier.SECOND, negative, seconds, nanos); + } + + /** + * Creates a new INTERVAL SECOND. + * + * @param nanos + * nanoseconds (including seconds) + * @return INTERVAL SECOND + */ + public static Interval ofNanos(long nanos) { + boolean negative = nanos < 0; + if (negative) { + nanos = -nanos; + if (nanos < 0) { + // Long.MIN_VALUE = -9_223_372_036_854_775_808L + return new Interval(IntervalQualifier.SECOND, true, 9_223_372_036L, 854_775_808); + } + } + return new Interval(IntervalQualifier.SECOND, negative, nanos / NANOS_PER_SECOND, nanos % NANOS_PER_SECOND); + } + + /** + * Creates a new INTERVAL YEAR TO MONTH. + * + *

    + * If both arguments are not equal to zero they should have the same sign. + *

    + * + * @param years + * years, |years|<1018 + * @param months + * months, |months|<12 + * @return INTERVAL YEAR TO MONTH + */ + public static Interval ofYearsMonths(long years, int months) { + // Interval is negative if any field is negative + boolean negative = (years | months) < 0; + if (negative) { + // Ensure that all fields are negative or zero + if (years > 0 || months > 0) { + throw new IllegalArgumentException(); + } + // Make them positive + years = -years; + months = -months; + // Long.MIN_VALUE and Integer.MIN_VALUE will be rejected by + // constructor + } + return new Interval(IntervalQualifier.YEAR_TO_MONTH, negative, years, months); + } + + /** + * Creates a new INTERVAL DAY TO HOUR. + * + *

    + * If both arguments are not equal to zero they should have the same sign. + *

    + * + * @param days + * days, |days|<1018 + * @param hours + * hours, |hours|<24 + * @return INTERVAL DAY TO HOUR + */ + public static Interval ofDaysHours(long days, int hours) { + // Interval is negative if any field is negative + boolean negative = (days | hours) < 0; + if (negative) { + // Ensure that all fields are negative or zero + if (days > 0 || hours > 0) { + throw new IllegalArgumentException(); + } + // Make them positive + days = -days; + hours = -hours; + // Long.MIN_VALUE and Integer.MIN_VALUE will be rejected by + // constructor + } + return new Interval(IntervalQualifier.DAY_TO_HOUR, negative, days, hours); + } + + /** + * Creates a new INTERVAL DAY TO MINUTE. + * + *

    + * Non-zero arguments should have the same sign. + *

    + * + * @param days + * days, |days|<1018 + * @param hours + * hours, |hours|<24 + * @param minutes + * minutes, |minutes|<60 + * @return INTERVAL DAY TO MINUTE + */ + public static Interval ofDaysHoursMinutes(long days, int hours, int minutes) { + // Interval is negative if any field is negative + boolean negative = (days | hours | minutes) < 0; + if (negative) { + // Ensure that all fields are negative or zero + if (days > 0 || hours > 0 || minutes > 0) { + throw new IllegalArgumentException(); + } + // Make them positive + days = -days; + hours = -hours; + minutes = -minutes; + if ((hours | minutes) < 0) { + // Integer.MIN_VALUE + throw new IllegalArgumentException(); + } + // days = Long.MIN_VALUE will be rejected by constructor + } + // Check only minutes. + // Overflow in days or hours will be detected by constructor + if (minutes >= 60) { + throw new IllegalArgumentException(); + } + return new Interval(IntervalQualifier.DAY_TO_MINUTE, negative, days, hours * 60L + minutes); + } + + /** + * Creates a new INTERVAL DAY TO SECOND. + * + *

    + * Non-zero arguments should have the same sign. + *

    + * + * @param days + * days, |days|<1018 + * @param hours + * hours, |hours|<24 + * @param minutes + * minutes, |minutes|<60 + * @param seconds + * seconds, |seconds|<60 + * @return INTERVAL DAY TO SECOND + */ + public static Interval ofDaysHoursMinutesSeconds(long days, int hours, int minutes, int seconds) { + return ofDaysHoursMinutesNanos(days, hours, minutes, seconds * NANOS_PER_SECOND); + } + + /** + * Creates a new INTERVAL DAY TO SECOND. + * + *

    + * Non-zero arguments should have the same sign. + *

    + * + * @param days + * days, |days|<1018 + * @param hours + * hours, |hours|<24 + * @param minutes + * minutes, |minutes|<60 + * @param nanos + * nanoseconds, |nanos|<60,000,000,000 + * @return INTERVAL DAY TO SECOND + */ + public static Interval ofDaysHoursMinutesNanos(long days, int hours, int minutes, long nanos) { + // Interval is negative if any field is negative + boolean negative = (days | hours | minutes | nanos) < 0; + if (negative) { + // Ensure that all fields are negative or zero + if (days > 0 || hours > 0 || minutes > 0 || nanos > 0) { + throw new IllegalArgumentException(); + } + // Make them positive + days = -days; + hours = -hours; + minutes = -minutes; + nanos = -nanos; + if ((hours | minutes | nanos) < 0) { + // Integer.MIN_VALUE, Long.MIN_VALUE + throw new IllegalArgumentException(); + } + // days = Long.MIN_VALUE will be rejected by constructor + } + // Check only minutes and nanoseconds. + // Overflow in days or hours will be detected by constructor + if (minutes >= 60 || nanos >= NANOS_PER_MINUTE) { + throw new IllegalArgumentException(); + } + return new Interval(IntervalQualifier.DAY_TO_SECOND, negative, days, + (hours * 60L + minutes) * NANOS_PER_MINUTE + nanos); + } + + /** + * Creates a new INTERVAL HOUR TO MINUTE. + * + *

    + * If both arguments are not equal to zero they should have the same sign. + *

    + * + * @param hours + * hours, |hours|<1018 + * @param minutes + * minutes, |minutes|<60 + * @return INTERVAL HOUR TO MINUTE + */ + public static Interval ofHoursMinutes(long hours, int minutes) { + // Interval is negative if any field is negative + boolean negative = (hours | minutes) < 0; + if (negative) { + // Ensure that all fields are negative or zero + if (hours > 0 || minutes > 0) { + throw new IllegalArgumentException(); + } + // Make them positive + hours = -hours; + minutes = -minutes; + // Long.MIN_VALUE and Integer.MIN_VALUE will be rejected by + // constructor + } + return new Interval(IntervalQualifier.HOUR_TO_MINUTE, negative, hours, minutes); + } + + /** + * Creates a new INTERVAL HOUR TO SECOND. + * + *

    + * Non-zero arguments should have the same sign. + *

    + * + * @param hours + * hours, |hours|<1018 + * @param minutes + * minutes, |minutes|<60 + * @param seconds + * seconds, |seconds|<60 + * @return INTERVAL HOUR TO SECOND + */ + public static Interval ofHoursMinutesSeconds(long hours, int minutes, int seconds) { + return ofHoursMinutesNanos(hours, minutes, seconds * NANOS_PER_SECOND); + } + + /** + * Creates a new INTERVAL HOUR TO SECOND. + * + *

    + * Non-zero arguments should have the same sign. + *

    + * + * @param hours + * hours, |hours|<1018 + * @param minutes + * minutes, |minutes|<60 + * @param nanos + * nanoseconds, |seconds|<60,000,000,000 + * @return INTERVAL HOUR TO SECOND + */ + public static Interval ofHoursMinutesNanos(long hours, int minutes, long nanos) { + // Interval is negative if any field is negative + boolean negative = (hours | minutes | nanos) < 0; + if (negative) { + // Ensure that all fields are negative or zero + if (hours > 0 || minutes > 0 || nanos > 0) { + throw new IllegalArgumentException(); + } + // Make them positive + hours = -hours; + minutes = -minutes; + nanos = -nanos; + if ((minutes | nanos) < 0) { + // Integer.MIN_VALUE, Long.MIN_VALUE + throw new IllegalArgumentException(); + } + // hours = Long.MIN_VALUE will be rejected by constructor + } + // Check only nanoseconds. + // Overflow in hours or minutes will be detected by constructor + if (nanos >= NANOS_PER_MINUTE) { + throw new IllegalArgumentException(); + } + return new Interval(IntervalQualifier.HOUR_TO_SECOND, negative, hours, minutes * NANOS_PER_MINUTE + nanos); + } + + /** + * Creates a new INTERVAL MINUTE TO SECOND. + * + *

    + * If both arguments are not equal to zero they should have the same sign. + *

    + * + * @param minutes + * minutes, |minutes|<1018 + * @param seconds + * seconds, |seconds|<60 + * @return INTERVAL MINUTE TO SECOND + */ + public static Interval ofMinutesSeconds(long minutes, int seconds) { + return ofMinutesNanos(minutes, seconds * NANOS_PER_SECOND); + } + + /** + * Creates a new INTERVAL MINUTE TO SECOND. + * + *

    + * If both arguments are not equal to zero they should have the same sign. + *

    + * + * @param minutes + * minutes, |minutes|<1018 + * @param nanos + * nanoseconds, |nanos|<60,000,000,000 + * @return INTERVAL MINUTE TO SECOND + */ + public static Interval ofMinutesNanos(long minutes, long nanos) { + // Interval is negative if any field is negative + boolean negative = (minutes | nanos) < 0; + if (negative) { + // Ensure that all fields are negative or zero + if (minutes > 0 || nanos > 0) { + throw new IllegalArgumentException(); + } + // Make them positive + minutes = -minutes; + nanos = -nanos; + // Long.MIN_VALUE will be rejected by constructor + } + return new Interval(IntervalQualifier.MINUTE_TO_SECOND, negative, minutes, nanos); + } + + /** + * Creates a new interval. Do not use this constructor, use static methods + * instead. + * + * @param qualifier + * qualifier + * @param negative + * whether interval is negative + * @param leading + * value of leading field + * @param remaining + * combined value of all remaining fields + */ + public Interval(IntervalQualifier qualifier, boolean negative, long leading, long remaining) { + this.qualifier = qualifier; + try { + this.negative = IntervalUtils.validateInterval(qualifier, negative, leading, remaining); + } catch (DbException e) { + throw new IllegalArgumentException(); + } + this.leading = leading; + this.remaining = remaining; + } + + /** + * Returns qualifier of this interval. + * + * @return qualifier + */ + public IntervalQualifier getQualifier() { + return qualifier; + } + + /** + * Returns where the interval is negative. + * + * @return where the interval is negative + */ + public boolean isNegative() { + return negative; + } + + /** + * Returns value of leading field of this interval. For {@code SECOND} + * intervals returns integer part of seconds. + * + * @return value of leading field + */ + public long getLeading() { + return leading; + } + + /** + * Returns combined value of remaining fields of this interval. For + * {@code SECOND} intervals returns nanoseconds. + * + * @return combined value of remaining fields + */ + public long getRemaining() { + return remaining; + } + + /** + * Returns years value, if any. + * + * @return years, or 0 + */ + public long getYears() { + return IntervalUtils.yearsFromInterval(qualifier, negative, leading, remaining); + } + + /** + * Returns months value, if any. + * + * @return months, or 0 + */ + public long getMonths() { + return IntervalUtils.monthsFromInterval(qualifier, negative, leading, remaining); + } + + /** + * Returns days value, if any. + * + * @return days, or 0 + */ + public long getDays() { + return IntervalUtils.daysFromInterval(qualifier, negative, leading, remaining); + } + + /** + * Returns hours value, if any. + * + * @return hours, or 0 + */ + public long getHours() { + return IntervalUtils.hoursFromInterval(qualifier, negative, leading, remaining); + } + + /** + * Returns minutes value, if any. + * + * @return minutes, or 0 + */ + public long getMinutes() { + return IntervalUtils.minutesFromInterval(qualifier, negative, leading, remaining); + } + + /** + * Returns value of integer part of seconds, if any. + * + * @return seconds, or 0 + */ + public long getSeconds() { + if (qualifier == IntervalQualifier.SECOND) { + return negative ? -leading : leading; + } + return getSecondsAndNanos() / NANOS_PER_SECOND; + } + + /** + * Returns value of fractional part of seconds (in nanoseconds), if any. + * + * @return nanoseconds, or 0 + */ + public long getNanosOfSecond() { + if (qualifier == IntervalQualifier.SECOND) { + return negative ? -remaining : remaining; + } + return getSecondsAndNanos() % NANOS_PER_SECOND; + } + + /** + * Returns seconds value measured in nanoseconds, if any. + * + *

    + * This method returns a long value that cannot fit all possible values of + * INTERVAL SECOND. For a very large intervals of this type use + * {@link #getSeconds()} and {@link #getNanosOfSecond()} instead. This + * method can be safely used for intervals of other day-time types. + *

    + * + * @return nanoseconds (including seconds), or 0 + */ + public long getSecondsAndNanos() { + return IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + qualifier.hashCode(); + result = prime * result + (negative ? 1231 : 1237); + result = prime * result + (int) (leading ^ leading >>> 32); + result = prime * result + (int) (remaining ^ remaining >>> 32); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof Interval)) { + return false; + } + Interval other = (Interval) obj; + return qualifier == other.qualifier && negative == other.negative && leading == other.leading + && remaining == other.remaining; + } + + @Override + public String toString() { + return IntervalUtils.appendInterval(new StringBuilder(), getQualifier(), negative, leading, remaining) + .toString(); + } + +} diff --git a/h2/src/main/org/h2/api/IntervalQualifier.java b/h2/src/main/org/h2/api/IntervalQualifier.java new file mode 100644 index 0000000000..1772d1790e --- /dev/null +++ b/h2/src/main/org/h2/api/IntervalQualifier.java @@ -0,0 +1,352 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.api; + +/** + * Interval qualifier. + */ +public enum IntervalQualifier { + + /** + * {@code YEAR} + */ + YEAR, + + /** + * {@code MONTH} + */ + MONTH, + + /** + * {@code DAY} + */ + DAY, + + /** + * {@code HOUR} + */ + HOUR, + + /** + * {@code MINUTE} + */ + MINUTE, + + /** + * {@code SECOND} + */ + SECOND, + + /** + * {@code YEAR TO MONTH} + */ + YEAR_TO_MONTH, + + /** + * {@code DAY TO HOUR} + */ + DAY_TO_HOUR, + + /** + * {@code DAY TO MINUTE} + */ + DAY_TO_MINUTE, + + /** + * {@code DAY TO SECOND} + */ + DAY_TO_SECOND, + + /** + * {@code HOUR TO MINUTE} + */ + HOUR_TO_MINUTE, + + /** + * {@code HOUR TO SECOND} + */ + HOUR_TO_SECOND, + + /** + * {@code MINUTE TO SECOND} + */ + MINUTE_TO_SECOND; + + private final String string; + + /** + * Returns the interval qualifier with the specified ordinal value. + * + * @param ordinal + * Java ordinal value (0-based) + * @return interval qualifier with the specified ordinal value + */ + public static IntervalQualifier valueOf(int ordinal) { + switch (ordinal) { + case 0: + return YEAR; + case 1: + return MONTH; + case 2: + return DAY; + case 3: + return HOUR; + case 4: + return MINUTE; + case 5: + return SECOND; + case 6: + return YEAR_TO_MONTH; + case 7: + return DAY_TO_HOUR; + case 8: + return DAY_TO_MINUTE; + case 9: + return DAY_TO_SECOND; + case 10: + return HOUR_TO_MINUTE; + case 11: + return HOUR_TO_SECOND; + case 12: + return MINUTE_TO_SECOND; + default: + throw new IllegalArgumentException(); + } + } + + private IntervalQualifier() { + string = name().replace('_', ' ').intern(); + } + + /** + * Returns whether interval with this qualifier is a year-month interval. + * + * @return whether interval with this qualifier is a year-month interval + */ + public boolean isYearMonth() { + return this == YEAR || this == MONTH || this == YEAR_TO_MONTH; + } + + /** + * Returns whether interval with this qualifier is a day-time interval. + * + * @return whether interval with this qualifier is a day-time interval + */ + public boolean isDayTime() { + return !isYearMonth(); + } + + /** + * Returns whether interval with this qualifier has years. + * + * @return whether interval with this qualifier has years + */ + public boolean hasYears() { + return this == YEAR || this == YEAR_TO_MONTH; + } + + /** + * Returns whether interval with this qualifier has months. + * + * @return whether interval with this qualifier has months + */ + public boolean hasMonths() { + return this == MONTH || this == YEAR_TO_MONTH; + } + + /** + * Returns whether interval with this qualifier has days. + * + * @return whether interval with this qualifier has days + */ + public boolean hasDays() { + switch (this) { + case DAY: + case DAY_TO_HOUR: + case DAY_TO_MINUTE: + case DAY_TO_SECOND: + return true; + default: + return false; + } + } + + /** + * Returns whether interval with this qualifier has hours. + * + * @return whether interval with this qualifier has hours + */ + public boolean hasHours() { + switch (this) { + case HOUR: + case DAY_TO_HOUR: + case DAY_TO_MINUTE: + case DAY_TO_SECOND: + case HOUR_TO_MINUTE: + case HOUR_TO_SECOND: + return true; + default: + return false; + } + } + + /** + * Returns whether interval with this qualifier has minutes. + * + * @return whether interval with this qualifier has minutes + */ + public boolean hasMinutes() { + switch (this) { + case MINUTE: + case DAY_TO_MINUTE: + case DAY_TO_SECOND: + case HOUR_TO_MINUTE: + case HOUR_TO_SECOND: + case MINUTE_TO_SECOND: + return true; + default: + return false; + } + } + + /** + * Returns whether interval with this qualifier has seconds. + * + * @return whether interval with this qualifier has seconds + */ + public boolean hasSeconds() { + switch (this) { + case SECOND: + case DAY_TO_SECOND: + case HOUR_TO_SECOND: + case MINUTE_TO_SECOND: + return true; + default: + return false; + } + } + + /** + * Returns whether interval with this qualifier has multiple fields. + * + * @return whether interval with this qualifier has multiple fields + */ + public boolean hasMultipleFields() { + return ordinal() > 5; + } + + @Override + public String toString() { + return string; + } + + /** + * Returns full type name. + * + * @param precision precision, or {@code -1} + * @param scale fractional seconds precision, or {@code -1} + * @return full type name + */ + public String getTypeName(int precision, int scale) { + return getTypeName(new StringBuilder(), precision, scale, false).toString(); + } + + /** + * Appends full type name to the specified string builder. + * + * @param builder string builder + * @param precision precision, or {@code -1} + * @param scale fractional seconds precision, or {@code -1} + * @param qualifierOnly if {@code true}, don't add the INTERVAL prefix + * @return the specified string builder + */ + public StringBuilder getTypeName(StringBuilder builder, int precision, int scale, boolean qualifierOnly) { + if (!qualifierOnly) { + builder.append("INTERVAL "); + } + switch (this) { + case YEAR: + case MONTH: + case DAY: + case HOUR: + case MINUTE: + builder.append(string); + if (precision > 0) { + builder.append('(').append(precision).append(')'); + } + break; + case SECOND: + builder.append(string); + if (precision > 0 || scale >= 0) { + builder.append('(').append(precision > 0 ? precision : 2); + if (scale >= 0) { + builder.append(", ").append(scale); + } + builder.append(')'); + } + break; + case YEAR_TO_MONTH: + builder.append("YEAR"); + if (precision > 0) { + builder.append('(').append(precision).append(')'); + } + builder.append(" TO MONTH"); + break; + case DAY_TO_HOUR: + builder.append("DAY"); + if (precision > 0) { + builder.append('(').append(precision).append(')'); + } + builder.append(" TO HOUR"); + break; + case DAY_TO_MINUTE: + builder.append("DAY"); + if (precision > 0) { + builder.append('(').append(precision).append(')'); + } + builder.append(" TO MINUTE"); + break; + case DAY_TO_SECOND: + builder.append("DAY"); + if (precision > 0) { + builder.append('(').append(precision).append(')'); + } + builder.append(" TO SECOND"); + if (scale >= 0) { + builder.append('(').append(scale).append(')'); + } + break; + case HOUR_TO_MINUTE: + builder.append("HOUR"); + if (precision > 0) { + builder.append('(').append(precision).append(')'); + } + builder.append(" TO MINUTE"); + break; + case HOUR_TO_SECOND: + builder.append("HOUR"); + if (precision > 0) { + builder.append('(').append(precision).append(')'); + } + builder.append(" TO SECOND"); + if (scale >= 0) { + builder.append('(').append(scale).append(')'); + } + break; + case MINUTE_TO_SECOND: + builder.append("MINUTE"); + if (precision > 0) { + builder.append('(').append(precision).append(')'); + } + builder.append(" TO SECOND"); + if (scale >= 0) { + builder.append('(').append(scale).append(')'); + } + } + return builder; + } + +} diff --git a/h2/src/main/org/h2/api/JavaObjectSerializer.java b/h2/src/main/org/h2/api/JavaObjectSerializer.java index 748a174de7..9daa53065d 100644 --- a/h2/src/main/org/h2/api/JavaObjectSerializer.java +++ b/h2/src/main/org/h2/api/JavaObjectSerializer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -18,6 +18,7 @@ public interface JavaObjectSerializer { * * @param obj the object to serialize * @return the byte array of the serialized object + * @throws Exception on failure */ byte[] serialize(Object obj) throws Exception; @@ -26,6 +27,7 @@ public interface JavaObjectSerializer { * * @param bytes the byte array of the serialized object * @return the object + * @throws Exception on failure */ Object deserialize(byte[] bytes) throws Exception; diff --git a/h2/src/main/org/h2/api/TableEngine.java b/h2/src/main/org/h2/api/TableEngine.java index b123a35e94..497b291949 100644 --- a/h2/src/main/org/h2/api/TableEngine.java +++ b/h2/src/main/org/h2/api/TableEngine.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; -import org.h2.table.Table; import org.h2.command.ddl.CreateTableData; +import org.h2.table.Table; /** * A class that implements this interface can create custom table diff --git a/h2/src/main/org/h2/api/Trigger.java b/h2/src/main/org/h2/api/Trigger.java index 3ce8f09249..37a1cb74c2 100644 --- a/h2/src/main/org/h2/api/Trigger.java +++ b/h2/src/main/org/h2/api/Trigger.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.api; @@ -49,9 +49,12 @@ public interface Trigger { * operation is performed * @param type the operation type: INSERT, UPDATE, DELETE, SELECT, or a * combination (this parameter is a bit field) + * @throws SQLException on SQL exception */ - void init(Connection conn, String schemaName, String triggerName, - String tableName, boolean before, int type) throws SQLException; + default void init(Connection conn, String schemaName, String triggerName, + String tableName, boolean before, int type) throws SQLException { + // Does nothing by default + } /** * This method is called for each triggered action. The method is called @@ -82,12 +85,20 @@ void fire(Connection conn, Object[] oldRow, Object[] newRow) * This method is called when the database is closed. * If the method throws an exception, it will be logged, but * closing the database will continue. + * + * @throws SQLException on SQL exception */ - void close() throws SQLException; + default void close() throws SQLException { + // Does nothing by default + } /** * This method is called when the trigger is dropped. + * + * @throws SQLException on SQL exception */ - void remove() throws SQLException; + default void remove() throws SQLException { + // Does nothing by default + } } diff --git a/h2/src/main/org/h2/api/UserToRolesMapper.java b/h2/src/main/org/h2/api/UserToRolesMapper.java new file mode 100644 index 0000000000..55d59468e2 --- /dev/null +++ b/h2/src/main/org/h2/api/UserToRolesMapper.java @@ -0,0 +1,33 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.api; + +import java.util.Collection; + +import org.h2.security.auth.AuthenticationException; +import org.h2.security.auth.AuthenticationInfo; +import org.h2.security.auth.Configurable; + +/** + * A class that implement this interface can be used during authentication to + * map external users to database roles. + *

    + * This feature is experimental and subject to change + *

    + */ +public interface UserToRolesMapper extends Configurable { + + /** + * Map user identified by authentication info to a set of granted roles. + * + * @param authenticationInfo + * authentication information + * @return list of roles to be assigned to the user temporary + * @throws AuthenticationException + * on authentication exception + */ + Collection mapUserToRoles(AuthenticationInfo authenticationInfo) throws AuthenticationException; +} diff --git a/h2/src/main/org/h2/api/package.html b/h2/src/main/org/h2/api/package.html index ed9c0e7f20..3dd9f31c6c 100644 --- a/h2/src/main/org/h2/api/package.html +++ b/h2/src/main/org/h2/api/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/bnf/Bnf.java b/h2/src/main/org/h2/bnf/Bnf.java index 9afa92fcb4..3faccea4e4 100644 --- a/h2/src/main/org/h2/bnf/Bnf.java +++ b/h2/src/main/org/h2/bnf/Bnf.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -14,10 +14,9 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.StringTokenizer; - import org.h2.bnf.context.DbContextRule; +import org.h2.command.dml.Help; import org.h2.tools.Csv; -import org.h2.util.New; import org.h2.util.StringUtils; import org.h2.util.Utils; @@ -31,7 +30,7 @@ public class Bnf { * The rule map. The key is lowercase, and all spaces * are replaces with underscore. */ - private final HashMap ruleMap = New.hashMap(); + private final HashMap ruleMap = new HashMap<>(); private String syntax; private String currentToken; private String[] tokens; @@ -46,6 +45,8 @@ public class Bnf { * * @param csv if not specified, the help.csv is used * @return a new instance + * @throws SQLException on failure + * @throws IOException on failure */ public static Bnf getInstance(Reader csv) throws SQLException, IOException { Bnf bnf = new Bnf(); @@ -57,6 +58,17 @@ public static Bnf getInstance(Reader csv) throws SQLException, IOException { return bnf; } + /** + * Add an alias for a rule. + * + * @param name for example "procedure" + * @param replacement for example "@func@" + */ + public void addAlias(String name, String replacement) { + RuleHead head = ruleMap.get(replacement); + ruleMap.put(name, head); + } + private void addFixedRule(String name, int fixedType) { Rule rule = new RuleFixed(fixedType); addRule(name, "Fixed", rule); @@ -65,16 +77,15 @@ private void addFixedRule(String name, int fixedType) { private RuleHead addRule(String topic, String section, Rule rule) { RuleHead head = new RuleHead(section, topic, rule); String key = StringUtils.toLowerEnglish(topic.trim().replace(' ', '_')); - if (ruleMap.get(key) != null) { + if (ruleMap.putIfAbsent(key, head) != null) { throw new AssertionError("already exists: " + topic); } - ruleMap.put(key, head); return head; } private void parse(Reader reader) throws SQLException, IOException { Rule functions = null; - statements = New.arrayList(); + statements = new ArrayList<>(); Csv csv = new Csv(); csv.setLineCommentCharacter('#'); ResultSet rs = csv.read(reader, null); @@ -84,7 +95,7 @@ private void parse(Reader reader) throws SQLException, IOException { continue; } String topic = rs.getString("TOPIC"); - syntax = rs.getString("SYNTAX").trim(); + syntax = Help.stripAnnotationsFromSyntax(rs.getString("SYNTAX")); currentTopic = section; tokens = tokenize(); index = 0; @@ -108,9 +119,10 @@ private void parse(Reader reader) throws SQLException, IOException { addFixedRule("@hms@", RuleFixed.HMS); addFixedRule("@nanos@", RuleFixed.NANOS); addFixedRule("anything_except_single_quote", RuleFixed.ANY_EXCEPT_SINGLE_QUOTE); + addFixedRule("single_character", RuleFixed.ANY_EXCEPT_SINGLE_QUOTE); addFixedRule("anything_except_double_quote", RuleFixed.ANY_EXCEPT_DOUBLE_QUOTE); addFixedRule("anything_until_end_of_line", RuleFixed.ANY_UNTIL_EOL); - addFixedRule("anything_until_end_comment", RuleFixed.ANY_UNTIL_END); + addFixedRule("anything_until_comment_start_or_end", RuleFixed.ANY_UNTIL_END); addFixedRule("anything_except_two_dollar_signs", RuleFixed.ANY_EXCEPT_2_DOLLAR); addFixedRule("anything", RuleFixed.ANY_WORD); addFixedRule("@hex_start@", RuleFixed.HEX_START); @@ -120,6 +132,7 @@ private void parse(Reader reader) throws SQLException, IOException { addFixedRule("@digit@", RuleFixed.DIGIT); addFixedRule("@open_bracket@", RuleFixed.OPEN_BRACKET); addFixedRule("@close_bracket@", RuleFixed.CLOSE_BRACKET); + addFixedRule("json_text", RuleFixed.JSON_TEXT); } /** @@ -200,6 +213,28 @@ private Rule parseList() { return r; } + private RuleExtension parseExtension(boolean compatibility) { + read(); + Rule r; + if (firstChar == '[') { + read(); + r = parseOr(); + r = new RuleOptional(r); + if (firstChar != ']') { + throw new AssertionError("expected ], got " + currentToken + " syntax:" + syntax); + } + } else if (firstChar == '{') { + read(); + r = parseOr(); + if (firstChar != '}') { + throw new AssertionError("expected }, got " + currentToken + " syntax:" + syntax); + } + } else { + r = parseOr(); + } + return new RuleExtension(r, compatibility); + } + private Rule parseToken() { Rule r; if ((firstChar >= 'A' && firstChar <= 'Z') @@ -208,24 +243,30 @@ private Rule parseToken() { r = new RuleElement(currentToken, currentTopic); } else if (firstChar == '[') { read(); - Rule r2 = parseOr(); - r = new RuleOptional(r2); + r = parseOr(); + r = new RuleOptional(r); if (firstChar != ']') { - throw new AssertionError("expected ], got " + currentToken - + " syntax:" + syntax); + throw new AssertionError("expected ], got " + currentToken + " syntax:" + syntax); } } else if (firstChar == '{') { read(); r = parseOr(); if (firstChar != '}') { - throw new AssertionError("expected }, got " + currentToken - + " syntax:" + syntax); + throw new AssertionError("expected }, got " + currentToken + " syntax:" + syntax); + } + } else if (firstChar == '@') { + if ("@commaDots@".equals(currentToken)) { + r = new RuleList(new RuleElement(",", currentTopic), lastRepeat, false); + r = new RuleRepeat(r, true); + } else if ("@dots@".equals(currentToken)) { + r = new RuleRepeat(lastRepeat, false); + } else if ("@c@".equals(currentToken)) { + r = parseExtension(true); + } else if ("@h2@".equals(currentToken)) { + r = parseExtension(false); + } else { + r = new RuleElement(currentToken, currentTopic); } - } else if ("@commaDots@".equals(currentToken)) { - r = new RuleList(new RuleElement(",", currentTopic), lastRepeat, false); - r = new RuleRepeat(r, true); - } else if ("@dots@".equals(currentToken)) { - r = new RuleRepeat(lastRepeat, false); } else { r = new RuleElement(currentToken, currentTopic); } @@ -244,10 +285,25 @@ private void read() { } } + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < index; i++) { + builder.append(tokens[i]).append(' '); + } + builder.append("[*]"); + for (int i = index; i < tokens.length; i++) { + builder.append(' ').append(tokens[i]); + } + return builder.toString(); + } + private String[] tokenize() { - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); syntax = StringUtils.replaceAll(syntax, "yyyy-MM-dd", "@ymd@"); syntax = StringUtils.replaceAll(syntax, "hh:mm:ss", "@hms@"); + syntax = StringUtils.replaceAll(syntax, "hh:mm", "@hms@"); + syntax = StringUtils.replaceAll(syntax, "mm:ss", "@hms@"); syntax = StringUtils.replaceAll(syntax, "nnnnnnnnn", "@nanos@"); syntax = StringUtils.replaceAll(syntax, "function", "@func@"); syntax = StringUtils.replaceAll(syntax, "0x", "@hexStart@"); @@ -272,7 +328,7 @@ private String[] tokenize() { } list.add(s); } - return list.toArray(new String[list.size()]); + return list.toArray(new String[0]); } /** @@ -351,7 +407,7 @@ public ArrayList getStatements() { * @return the tokenizer */ public static StringTokenizer getTokenizer(String s) { - return new StringTokenizer(s, " [](){}|.,\r\n<>:-+*/=<\">!'$", true); + return new StringTokenizer(s, " [](){}|.,\r\n<>:-+*/=\"!'$", true); } } diff --git a/h2/src/main/org/h2/bnf/BnfVisitor.java b/h2/src/main/org/h2/bnf/BnfVisitor.java index c379f30ed9..1a8ec01d6f 100644 --- a/h2/src/main/org/h2/bnf/BnfVisitor.java +++ b/h2/src/main/org/h2/bnf/BnfVisitor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -51,4 +51,19 @@ public interface BnfVisitor { */ void visitRuleOptional(Rule rule); + /** + * Visit an OR list of optional rules. + * + * @param list the optional rules + */ + void visitRuleOptional(ArrayList list); + + /** + * Visit a rule with non-standard extension. + * + * @param rule the rule + * @param compatibility whether this rule exists for compatibility only + */ + void visitRuleExtension(Rule rule, boolean compatibility); + } diff --git a/h2/src/main/org/h2/bnf/Rule.java b/h2/src/main/org/h2/bnf/Rule.java index bb8858f3c7..0070e4e28b 100644 --- a/h2/src/main/org/h2/bnf/Rule.java +++ b/h2/src/main/org/h2/bnf/Rule.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; diff --git a/h2/src/main/org/h2/bnf/RuleElement.java b/h2/src/main/org/h2/bnf/RuleElement.java index fec0fa0ef6..aca908583b 100644 --- a/h2/src/main/org/h2/bnf/RuleElement.java +++ b/h2/src/main/org/h2/bnf/RuleElement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -77,4 +77,9 @@ public boolean autoComplete(Sentence sentence) { return link.autoComplete(sentence); } + @Override + public String toString() { + return name; + } + } diff --git a/h2/src/main/org/h2/bnf/RuleExtension.java b/h2/src/main/org/h2/bnf/RuleExtension.java new file mode 100644 index 0000000000..217a946da7 --- /dev/null +++ b/h2/src/main/org/h2/bnf/RuleExtension.java @@ -0,0 +1,49 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.bnf; + +import java.util.HashMap; + +/** + * Represents a non-standard syntax. + */ +public class RuleExtension implements Rule { + + private final Rule rule; + private final boolean compatibility; + + private boolean mapSet; + + public RuleExtension(Rule rule, boolean compatibility) { + this.rule = rule; + this.compatibility = compatibility; + } + + @Override + public void accept(BnfVisitor visitor) { + visitor.visitRuleExtension(rule, compatibility); + } + + @Override + public void setLinks(HashMap ruleMap) { + if (!mapSet) { + rule.setLinks(ruleMap); + mapSet = true; + } + } + @Override + public boolean autoComplete(Sentence sentence) { + sentence.stopIfRequired(); + rule.autoComplete(sentence); + return true; + } + + @Override + public String toString() { + return (compatibility ? "@c@ " : "@h2@ ") + rule.toString(); + } + +} diff --git a/h2/src/main/org/h2/bnf/RuleFixed.java b/h2/src/main/org/h2/bnf/RuleFixed.java index a8057dad71..8557e0ae52 100644 --- a/h2/src/main/org/h2/bnf/RuleFixed.java +++ b/h2/src/main/org/h2/bnf/RuleFixed.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -22,6 +22,7 @@ public class RuleFixed implements Rule { public static final int HEX_START = 10, CONCAT = 11; public static final int AZ_UNDERSCORE = 12, AF = 13, DIGIT = 14; public static final int OPEN_BRACKET = 15, CLOSE_BRACKET = 16; + public static final int JSON_TEXT = 17; private final int type; @@ -44,7 +45,8 @@ public boolean autoComplete(Sentence sentence) { sentence.stopIfRequired(); String query = sentence.getQuery(); String s = query; - switch(type) { + boolean removeTrailingSpaces = false; + switch (type) { case YMD: while (s.length() > 0 && "0123456789-".indexOf(s.charAt(0)) >= 0) { s = s.substring(1); @@ -52,6 +54,8 @@ public boolean autoComplete(Sentence sentence) { if (s.length() == 0) { sentence.add("2006-01-01", "1", Sentence.KEYWORD); } + // needed for timestamps + removeTrailingSpaces = true; break; case HMS: while (s.length() > 0 && "0123456789:".indexOf(s.charAt(0)) >= 0) { @@ -68,6 +72,7 @@ public boolean autoComplete(Sentence sentence) { if (s.length() == 0) { sentence.add("nanoseconds", "0", Sentence.KEYWORD); } + removeTrailingSpaces = true; break; case ANY_EXCEPT_SINGLE_QUOTE: while (true) { @@ -111,6 +116,7 @@ public boolean autoComplete(Sentence sentence) { } break; case ANY_WORD: + case JSON_TEXT: while (s.length() > 0 && !Bnf.startWithSpace(s)) { s = s.substring(1); } @@ -135,6 +141,7 @@ public boolean autoComplete(Sentence sentence) { } else if (s.length() == 0) { sentence.add("||", "||", Sentence.KEYWORD); } + removeTrailingSpaces = true; break; case AZ_UNDERSCORE: if (s.length() > 0 && @@ -170,6 +177,7 @@ public boolean autoComplete(Sentence sentence) { } else if (s.charAt(0) == '[') { s = s.substring(1); } + removeTrailingSpaces = true; break; case CLOSE_BRACKET: if (s.length() == 0) { @@ -177,6 +185,7 @@ public boolean autoComplete(Sentence sentence) { } else if (s.charAt(0) == ']') { s = s.substring(1); } + removeTrailingSpaces = true; break; // no autocomplete support for comments // (comments are not reachable in the bnf tree) @@ -186,8 +195,14 @@ public boolean autoComplete(Sentence sentence) { throw new AssertionError("type="+type); } if (!s.equals(query)) { - while (Bnf.startWithSpace(s)) { - s = s.substring(1); + // can not always remove spaces here, because a repeat + // rule for a-z would remove multiple words + // but we have to remove spaces after '||' + // and after ']' + if (removeTrailingSpaces) { + while (Bnf.startWithSpace(s)) { + s = s.substring(1); + } } sentence.setQuery(s); return true; @@ -195,4 +210,9 @@ public boolean autoComplete(Sentence sentence) { return false; } + @Override + public String toString() { + return "#" + type; + } + } diff --git a/h2/src/main/org/h2/bnf/RuleHead.java b/h2/src/main/org/h2/bnf/RuleHead.java index f280c3dcca..95891bd1a0 100644 --- a/h2/src/main/org/h2/bnf/RuleHead.java +++ b/h2/src/main/org/h2/bnf/RuleHead.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; diff --git a/h2/src/main/org/h2/bnf/RuleList.java b/h2/src/main/org/h2/bnf/RuleList.java index 7518161365..30e8f67893 100644 --- a/h2/src/main/org/h2/bnf/RuleList.java +++ b/h2/src/main/org/h2/bnf/RuleList.java @@ -1,25 +1,26 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; import java.util.ArrayList; import java.util.HashMap; -import org.h2.util.New; + +import org.h2.util.Utils; /** * Represents a sequence of BNF rules, or a list of alternative rules. */ public class RuleList implements Rule { - private final boolean or; - private final ArrayList list; + final boolean or; + final ArrayList list; private boolean mapSet; public RuleList(Rule first, Rule next, boolean or) { - list = New.arrayList(); + list = Utils.newSmallArrayList(); if (first instanceof RuleList && ((RuleList) first).or == or) { list.addAll(((RuleList) first).list); } else { @@ -70,4 +71,20 @@ public boolean autoComplete(Sentence sentence) { return true; } + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + for (int i = 0, l = list.size(); i < l; i++) { + if (i > 0) { + if (or) { + builder.append(" | "); + } else { + builder.append(' '); + } + } + builder.append(list.get(i).toString()); + } + return builder.toString(); + } + } diff --git a/h2/src/main/org/h2/bnf/RuleOptional.java b/h2/src/main/org/h2/bnf/RuleOptional.java index a5f2f7aa74..52cfee7f42 100644 --- a/h2/src/main/org/h2/bnf/RuleOptional.java +++ b/h2/src/main/org/h2/bnf/RuleOptional.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -20,6 +20,13 @@ public RuleOptional(Rule rule) { @Override public void accept(BnfVisitor visitor) { + if (rule instanceof RuleList) { + RuleList ruleList = (RuleList) rule; + if (ruleList.or) { + visitor.visitRuleOptional(ruleList.list); + return; + } + } visitor.visitRuleOptional(rule); } @@ -37,4 +44,9 @@ public boolean autoComplete(Sentence sentence) { return true; } + @Override + public String toString() { + return '[' + rule.toString() + ']'; + } + } diff --git a/h2/src/main/org/h2/bnf/RuleRepeat.java b/h2/src/main/org/h2/bnf/RuleRepeat.java index 5c27438c40..347d03a8e7 100644 --- a/h2/src/main/org/h2/bnf/RuleRepeat.java +++ b/h2/src/main/org/h2/bnf/RuleRepeat.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; @@ -36,7 +36,17 @@ public boolean autoComplete(Sentence sentence) { while (rule.autoComplete(sentence)) { // nothing to do } + String s = sentence.getQuery(); + while (Bnf.startWithSpace(s)) { + s = s.substring(1); + } + sentence.setQuery(s); return true; } + @Override + public String toString() { + return comma ? ", ..." : " ..."; + } + } diff --git a/h2/src/main/org/h2/bnf/Sentence.java b/h2/src/main/org/h2/bnf/Sentence.java index 768d6fa2a5..a0993b0892 100644 --- a/h2/src/main/org/h2/bnf/Sentence.java +++ b/h2/src/main/org/h2/bnf/Sentence.java @@ -1,16 +1,16 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf; import java.util.HashMap; import java.util.HashSet; +import java.util.Objects; import org.h2.bnf.context.DbSchema; import org.h2.bnf.context.DbTableOrView; -import org.h2.util.New; import org.h2.util.StringUtils; /** @@ -36,12 +36,12 @@ public class Sentence { */ public static final int FUNCTION = 2; - private static final long MAX_PROCESSING_TIME = 100; + private static final int MAX_PROCESSING_TIME = 100; /** * The map of next tokens in the form type#tokenName token. */ - private final HashMap next = New.hashMap(); + private final HashMap next = new HashMap<>(); /** * The complete query string. @@ -53,7 +53,7 @@ public class Sentence { */ private String queryUpper; - private long stopAt; + private long stopAtNs; private DbSchema lastMatchedSchema; private DbTableOrView lastMatchedTable; private DbTableOrView lastTable; @@ -64,7 +64,7 @@ public class Sentence { * Start the timer to make sure processing doesn't take too long. */ public void start() { - stopAt = System.currentTimeMillis() + MAX_PROCESSING_TIME; + stopAtNs = System.nanoTime() + MAX_PROCESSING_TIME * 1_000_000L; } /** @@ -73,7 +73,7 @@ public void start() { * If processing is stopped, this methods throws an IllegalStateException */ public void stopIfRequired() { - if (System.currentTimeMillis() > stopAt) { + if (System.nanoTime() - stopAtNs > 0L) { throw new IllegalStateException(); } } @@ -97,7 +97,7 @@ public void add(String n, String string, int type) { */ public void addAlias(String alias, DbTableOrView table) { if (aliases == null) { - aliases = New.hashMap(); + aliases = new HashMap<>(); } aliases.put(alias, table); } @@ -110,7 +110,7 @@ public void addAlias(String alias, DbTableOrView table) { public void addTable(DbTableOrView table) { lastTable = table; if (tables == null) { - tables = New.hashSet(); + tables = new HashSet<>(); } tables.add(table); } @@ -185,7 +185,7 @@ public DbTableOrView getLastMatchedTable() { * @param query the query string */ public void setQuery(String query) { - if (!StringUtils.equals(this.query, query)) { + if (!Objects.equals(this.query, query)) { this.query = query; this.queryUpper = StringUtils.toUpperEnglish(query); } diff --git a/h2/src/main/org/h2/bnf/context/DbColumn.java b/h2/src/main/org/h2/bnf/context/DbColumn.java index 10e3bfbe16..db187c3e0a 100644 --- a/h2/src/main/org/h2/bnf/context/DbColumn.java +++ b/h2/src/main/org/h2/bnf/context/DbColumn.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; @@ -21,37 +21,36 @@ public class DbColumn { private final String dataType; - private int position; + private final int position; private DbColumn(DbContents contents, ResultSet rs, boolean procedureColumn) throws SQLException { name = rs.getString("COLUMN_NAME"); quotedName = contents.quoteIdentifier(name); + position = rs.getInt("ORDINAL_POSITION"); + if (contents.isH2() && !procedureColumn) { + dataType = rs.getString("COLUMN_TYPE"); + return; + } String type = rs.getString("TYPE_NAME"); // a procedures column size is identified by PRECISION, for table this // is COLUMN_SIZE - String precisionColumnName; + String precisionColumnName, scaleColumnName; if (procedureColumn) { precisionColumnName = "PRECISION"; + scaleColumnName = "SCALE"; } else { precisionColumnName = "COLUMN_SIZE"; + scaleColumnName = "DECIMAL_DIGITS"; } int precision = rs.getInt(precisionColumnName); - position = rs.getInt("ORDINAL_POSITION"); - boolean isSQLite = contents.isSQLite(); - if (precision > 0 && !isSQLite) { - type += "(" + precision; - String scaleColumnName; - if (procedureColumn) { - scaleColumnName = "SCALE"; + if (precision > 0 && !contents.isSQLite()) { + int scale = rs.getInt(scaleColumnName); + if (scale > 0) { + type = type + '(' + precision + ", " + scale + ')'; } else { - scaleColumnName = "DECIMAL_DIGITS"; - } - int prec = rs.getInt(scaleColumnName); - if (prec > 0) { - type += ", " + prec; + type = type + '(' + precision + ')'; } - type += ")"; } if (rs.getInt("NULLABLE") == DatabaseMetaData.columnNoNulls) { type += " NOT NULL"; @@ -65,6 +64,7 @@ private DbColumn(DbContents contents, ResultSet rs, boolean procedureColumn) * @param contents the database contents * @param rs the result set * @return the column + * @throws SQLException on failure */ public static DbColumn getProcedureColumn(DbContents contents, ResultSet rs) throws SQLException { @@ -77,6 +77,7 @@ public static DbColumn getProcedureColumn(DbContents contents, ResultSet rs) * @param contents the database contents * @param rs the result set * @return the column + * @throws SQLException on failure */ public static DbColumn getColumn(DbContents contents, ResultSet rs) throws SQLException { diff --git a/h2/src/main/org/h2/bnf/context/DbContents.java b/h2/src/main/org/h2/bnf/context/DbContents.java index 1d061fbf28..1cedefb0da 100644 --- a/h2/src/main/org/h2/bnf/context/DbContents.java +++ b/h2/src/main/org/h2/bnf/context/DbContents.java @@ -1,20 +1,21 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; import java.sql.Connection; import java.sql.DatabaseMetaData; -import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; -import org.h2.command.Parser; -import org.h2.util.New; +import org.h2.engine.Session; +import org.h2.jdbc.JdbcConnection; +import org.h2.util.ParserUtil; import org.h2.util.StringUtils; +import org.h2.util.Utils; /** * Keeps meta data information about a database. @@ -29,110 +30,121 @@ public class DbContents { private boolean isPostgreSQL; private boolean isDerby; private boolean isSQLite; - private boolean isH2ModeMySQL; private boolean isMySQL; private boolean isFirebird; private boolean isMSSQLServer; + private boolean isDB2; + + private boolean databaseToUpper, databaseToLower; + + private boolean mayHaveStandardViews = true; /** - * @return The default schema. + * @return the default schema. */ public DbSchema getDefaultSchema() { return defaultSchema; } /** - * @return True if this is an Apache Derby database. + * @return true if this is an Apache Derby database. */ public boolean isDerby() { return isDerby; } /** - * @return True if this is a Firebird database. + * @return true if this is a Firebird database. */ public boolean isFirebird() { return isFirebird; } /** - * @return True if this is a H2 database. + * @return true if this is a H2 database. */ public boolean isH2() { return isH2; } /** - * @return True if this is a H2 database in MySQL mode. - */ - public boolean isH2ModeMySQL() { - return isH2ModeMySQL; - } - - /** - * @return True if this is a MS SQL Server database. + * @return true if this is a MS SQL Server database. */ public boolean isMSSQLServer() { return isMSSQLServer; } /** - * @return True if this is a MySQL database. + * @return true if this is a MySQL database. */ public boolean isMySQL() { return isMySQL; } /** - * @return True if this is an Oracle database. + * @return true if this is an Oracle database. */ public boolean isOracle() { return isOracle; } /** - * @return True if this is a PostgreSQL database. + * @return true if this is a PostgreSQL database. */ public boolean isPostgreSQL() { return isPostgreSQL; } /** - * @return True if this is an SQLite database. + * @return true if this is an SQLite database. */ public boolean isSQLite() { return isSQLite; } /** - * @return The list of schemas. + * @return true if this is an IBM DB2 database. + */ + public boolean isDB2() { + return isDB2; + } + + /** + * @return the list of schemas. */ public DbSchema[] getSchemas() { return schemas; } + /** + * Returns whether standard INFORMATION_SCHEMA.VIEWS may be supported. + * + * @return whether standard INFORMATION_SCHEMA.VIEWS may be supported + */ + public boolean mayHaveStandardViews() { + return mayHaveStandardViews; + } + + /** + * @param mayHaveStandardViews + * whether standard INFORMATION_SCHEMA.VIEWS is detected as + * supported + */ + public void setMayHaveStandardViews(boolean mayHaveStandardViews) { + this.mayHaveStandardViews = mayHaveStandardViews; + } + /** * Read the contents of this database from the database meta data. * * @param url the database URL * @param conn the connection + * @throws SQLException on failure */ public synchronized void readContents(String url, Connection conn) throws SQLException { isH2 = url.startsWith("jdbc:h2:"); - if (isH2) { - PreparedStatement prep = conn.prepareStatement( - "SELECT UPPER(VALUE) FROM INFORMATION_SCHEMA.SETTINGS " + - "WHERE NAME=?"); - prep.setString(1, "MODE"); - ResultSet rs = prep.executeQuery(); - rs.next(); - if ("MYSQL".equals(rs.getString(1))) { - isH2ModeMySQL = true; - } - rs.close(); - prep.close(); - } + isDB2 = url.startsWith("jdbc:db2:"); isSQLite = url.startsWith("jdbc:sqlite:"); isOracle = url.startsWith("jdbc:oracle:"); // the Vertica engine is based on PostgreSQL @@ -142,6 +154,17 @@ public synchronized void readContents(String url, Connection conn) isDerby = url.startsWith("jdbc:derby:"); isFirebird = url.startsWith("jdbc:firebirdsql:"); isMSSQLServer = url.startsWith("jdbc:sqlserver:"); + if (isH2) { + Session.StaticSettings settings = ((JdbcConnection) conn).getStaticSettings(); + databaseToUpper = settings.databaseToUpper; + databaseToLower = settings.databaseToLower; + }else if (isMySQL || isPostgreSQL) { + databaseToUpper = false; + databaseToLower = true; + } else { + databaseToUpper = true; + databaseToLower = false; + } DatabaseMetaData meta = conn.getMetaData(); String defaultSchemaName = getDefaultSchemaName(meta); String[] schemaNames = getSchemaNames(meta); @@ -158,7 +181,7 @@ public synchronized void readContents(String url, Connection conn) String[] tableTypes = { "TABLE", "SYSTEM TABLE", "VIEW", "SYSTEM VIEW", "TABLE LINK", "SYNONYM", "EXTERNAL" }; schema.readTables(meta, tableTypes); - if (!isPostgreSQL) { + if (!isPostgreSQL && !isDB2) { schema.readProcedures(meta); } } @@ -187,7 +210,7 @@ private String[] getSchemaNames(DatabaseMetaData meta) throws SQLException { return new String[] { null }; } ResultSet rs = meta.getSchemas(); - ArrayList schemaList = New.arrayList(); + ArrayList schemaList = Utils.newSmallArrayList(); while (rs.next()) { String schema = rs.getString("TABLE_SCHEM"); String[] ignoreNames = null; @@ -202,6 +225,14 @@ private String[] getSchemaNames(DatabaseMetaData meta) throws SQLException { "db_backupoperator", "db_datareader", "db_datawriter", "db_ddladmin", "db_denydatareader", "db_denydatawriter", "db_owner", "db_securityadmin" }; + } else if (isDB2) { + ignoreNames = new String[] { "NULLID", "SYSFUN", + "SYSIBMINTERNAL", "SYSIBMTS", "SYSPROC", "SYSPUBLIC", + // not empty, but not sure what they contain + "SYSCAT", "SYSIBM", "SYSIBMADM", + "SYSSTAT", "SYSTOOLS", + }; + } if (ignoreNames != null) { for (String ignore : ignoreNames) { @@ -217,15 +248,15 @@ private String[] getSchemaNames(DatabaseMetaData meta) throws SQLException { schemaList.add(schema); } rs.close(); - String[] list = new String[schemaList.size()]; - schemaList.toArray(list); - return list; + return schemaList.toArray(new String[0]); } private String getDefaultSchemaName(DatabaseMetaData meta) { String defaultSchemaName = ""; try { - if (isOracle) { + if (isH2) { + return meta.storesLowerCaseIdentifiers() ? "public" : "PUBLIC"; + } else if (isOracle) { return meta.getUserName(); } else if (isPostgreSQL) { return "public"; @@ -236,22 +267,14 @@ private String getDefaultSchemaName(DatabaseMetaData meta) { } else if (isFirebird) { return null; } - ResultSet rs = meta.getSchemas(); - int index = rs.findColumn("IS_DEFAULT"); - while (rs.next()) { - if (rs.getBoolean(index)) { - defaultSchemaName = rs.getString("TABLE_SCHEM"); - } - } } catch (SQLException e) { - // IS_DEFAULT not found + // Ignore } return defaultSchemaName; } /** * Add double quotes around an identifier if required. - * For the H2 database, all identifiers are quoted. * * @param identifier the identifier * @return the quoted identifier @@ -260,10 +283,10 @@ public String quoteIdentifier(String identifier) { if (identifier == null) { return null; } - if (isH2 && !isH2ModeMySQL) { - return Parser.quoteIdentifier(identifier); + if (ParserUtil.isSimpleIdentifier(identifier, databaseToUpper, databaseToLower)) { + return identifier; } - return StringUtils.toUpperEnglish(identifier); + return StringUtils.quoteIdentifier(identifier); } } diff --git a/h2/src/main/org/h2/bnf/context/DbContextRule.java b/h2/src/main/org/h2/bnf/context/DbContextRule.java index a48c0495c0..1d295cdb42 100644 --- a/h2/src/main/org/h2/bnf/context/DbContextRule.java +++ b/h2/src/main/org/h2/bnf/context/DbContextRule.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; @@ -15,8 +15,8 @@ import org.h2.bnf.RuleHead; import org.h2.bnf.RuleList; import org.h2.bnf.Sentence; -import org.h2.command.Parser; import org.h2.message.DbException; +import org.h2.util.ParserUtil; import org.h2.util.StringUtils; /** @@ -154,7 +154,7 @@ public boolean autoComplete(Sentence sentence) { break; } String alias = up.substring(0, i); - if (Parser.isKeyword(alias, true)) { + if (ParserUtil.isKeyword(alias, false)) { break; } s = s.substring(alias.length()); @@ -172,9 +172,7 @@ public boolean autoComplete(Sentence sentence) { name = column.getQuotedName(); compare = query; } - if (compare.startsWith(name) && - (columnType == null || - column.getDataType().contains(columnType))) { + if (compare.startsWith(name) && testColumnType(column)) { String b = s.substring(name.length()); if (best == null || b.length() < best.length()) { best = b; @@ -199,8 +197,7 @@ public boolean autoComplete(Sentence sentence) { for (DbColumn column : table.getColumns()) { String name = StringUtils.toUpperEnglish(column .getName()); - if (columnType == null - || column.getDataType().contains(columnType)) { + if (testColumnType(column)) { if (up.startsWith(name)) { String b = s.substring(name.length()); if (best == null || b.length() < best.length()) { @@ -226,7 +223,7 @@ public boolean autoComplete(Sentence sentence) { autoCompleteProcedure(sentence); break; default: - throw DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } if (!s.equals(query)) { while (Bnf.startWithSpace(s)) { @@ -237,6 +234,21 @@ public boolean autoComplete(Sentence sentence) { } return false; } + + private boolean testColumnType(DbColumn column) { + if (columnType == null) { + return true; + } + String type = column.getDataType(); + if (columnType.contains("CHAR") || columnType.contains("CLOB")) { + return type.contains("CHAR") || type.contains("CLOB"); + } + if (columnType.contains("BINARY") || columnType.contains("BLOB")) { + return type.contains("BINARY") || type.contains("BLOB"); + } + return type.contains(columnType); + } + private void autoCompleteProcedure(Sentence sentence) { DbSchema schema = sentence.getLastMatchedSchema(); if (schema == null) { @@ -244,9 +256,9 @@ private void autoCompleteProcedure(Sentence sentence) { } String incompleteSentence = sentence.getQueryUpper(); String incompleteFunctionName = incompleteSentence; - if (incompleteSentence.contains("(")) { - incompleteFunctionName = incompleteSentence.substring(0, - incompleteSentence.indexOf('(')).trim(); + int bracketIndex = incompleteSentence.indexOf('('); + if (bracketIndex != -1) { + incompleteFunctionName = StringUtils.trimSubstring(incompleteSentence, 0, bracketIndex); } // Common elements @@ -301,7 +313,7 @@ private static String autoCompleteTableAlias(Sentence sentence, return s; } String alias = up.substring(0, i); - if ("SET".equals(alias) || Parser.isKeyword(alias, true)) { + if ("SET".equals(alias) || ParserUtil.isKeyword(alias, false)) { return s; } if (newAlias) { diff --git a/h2/src/main/org/h2/bnf/context/DbProcedure.java b/h2/src/main/org/h2/bnf/context/DbProcedure.java index 4a6e30c6db..0e9a71c2b7 100644 --- a/h2/src/main/org/h2/bnf/context/DbProcedure.java +++ b/h2/src/main/org/h2/bnf/context/DbProcedure.java @@ -1,17 +1,17 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; -import org.h2.util.New; - import java.sql.DatabaseMetaData; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import org.h2.util.Utils; + /** * Contains meta data information about a procedure. * This class is used by the H2 Console. @@ -21,7 +21,7 @@ public class DbProcedure { private final DbSchema schema; private final String name; private final String quotedName; - private boolean returnsResult; + private final boolean returnsResult; private DbColumn[] parameters; public DbProcedure(DbSchema schema, ResultSet rs) throws SQLException { @@ -71,10 +71,11 @@ public boolean isReturnsResult() { * Read the column for this table from the database meta data. * * @param meta the database meta data + * @throws SQLException on failure */ void readParameters(DatabaseMetaData meta) throws SQLException { ResultSet rs = meta.getProcedureColumns(null, schema.name, name, null); - ArrayList list = New.arrayList(); + ArrayList list = Utils.newSmallArrayList(); while (rs.next()) { DbColumn column = DbColumn.getProcedureColumn(schema.getContents(), rs); if (column.getPosition() > 0) { diff --git a/h2/src/main/org/h2/bnf/context/DbSchema.java b/h2/src/main/org/h2/bnf/context/DbSchema.java index d3655da5bd..f37e06fbe1 100644 --- a/h2/src/main/org/h2/bnf/context/DbSchema.java +++ b/h2/src/main/org/h2/bnf/context/DbSchema.java @@ -1,18 +1,21 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; +import java.sql.Connection; import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; import java.util.ArrayList; import org.h2.engine.SysProperties; -import org.h2.util.New; import org.h2.util.StringUtils; +import org.h2.util.Utils; /** * Contains meta data information about a database schema. @@ -20,6 +23,13 @@ */ public class DbSchema { + private static final String COLUMNS_QUERY_H2_197 = "SELECT COLUMN_NAME, ORDINAL_POSITION, COLUMN_TYPE " + + "FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = ?1 AND TABLE_NAME = ?2"; + + private static final String COLUMNS_QUERY_H2_202 = "SELECT COLUMN_NAME, ORDINAL_POSITION, " + + "DATA_TYPE_SQL(?1, ?2, 'TABLE', ORDINAL_POSITION) COLUMN_TYPE " + + "FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = ?1 AND TABLE_NAME = ?2"; + /** * The schema name. */ @@ -58,12 +68,12 @@ public class DbSchema { DbSchema(DbContents contents, String name, boolean isDefault) { this.contents = contents; this.name = name; - this.quotedName = contents.quoteIdentifier(name); + this.quotedName = contents.quoteIdentifier(name); this.isDefault = isDefault; if (name == null) { // firebird isSystem = true; - } else if ("INFORMATION_SCHEMA".equals(name)) { + } else if ("INFORMATION_SCHEMA".equalsIgnoreCase(name)) { isSystem = true; } else if (!contents.isH2() && StringUtils.toUpperEnglish(name).startsWith("INFO")) { @@ -104,11 +114,12 @@ public DbProcedure[] getProcedures() { * * @param meta the database meta data * @param tableTypes the table types to read + * @throws SQLException on failure */ public void readTables(DatabaseMetaData meta, String[] tableTypes) throws SQLException { ResultSet rs = meta.getTables(null, name, null, tableTypes); - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); while (rs.next()) { DbTableOrView table = new DbTableOrView(this, rs); if (contents.isOracle() && table.getName().indexOf('$') > 0) { @@ -117,37 +128,46 @@ public void readTables(DatabaseMetaData meta, String[] tableTypes) list.add(table); } rs.close(); - tables = new DbTableOrView[list.size()]; - list.toArray(tables); + tables = list.toArray(new DbTableOrView[0]); if (tables.length < SysProperties.CONSOLE_MAX_TABLES_LIST_COLUMNS) { - for (DbTableOrView tab : tables) { - try { - tab.readColumns(meta); - } catch (SQLException e) { - // MySQL: - // View '...' references invalid table(s) or column(s) - // or function(s) or definer/invoker of view - // lack rights to use them HY000/1356 - // ignore + try (PreparedStatement ps = contents.isH2() ? prepareColumnsQueryH2(meta.getConnection()) : null) { + for (DbTableOrView tab : tables) { + try { + tab.readColumns(meta, ps); + } catch (SQLException e) { + // MySQL: + // View '...' references invalid table(s) or column(s) + // or function(s) or definer/invoker of view + // lack rights to use them HY000/1356 + // ignore + } } } } } + private static PreparedStatement prepareColumnsQueryH2(Connection connection) throws SQLException { + try { + return connection.prepareStatement(COLUMNS_QUERY_H2_202); + } catch (SQLSyntaxErrorException ex) { + return connection.prepareStatement(COLUMNS_QUERY_H2_197); + } + } + /** - * Read all procedures in the dataBase. + * Read all procedures in the database. + * * @param meta the database meta data * @throws SQLException Error while fetching procedures */ public void readProcedures(DatabaseMetaData meta) throws SQLException { ResultSet rs = meta.getProcedures(null, name, null); - ArrayList list = New.arrayList(); + ArrayList list = Utils.newSmallArrayList(); while (rs.next()) { list.add(new DbProcedure(this, rs)); } rs.close(); - procedures = new DbProcedure[list.size()]; - list.toArray(procedures); + procedures = list.toArray(new DbProcedure[0]); if (procedures.length < SysProperties.CONSOLE_MAX_PROCEDURES_LIST_COLUMNS) { for (DbProcedure procedure : procedures) { procedure.readParameters(meta); diff --git a/h2/src/main/org/h2/bnf/context/DbTableOrView.java b/h2/src/main/org/h2/bnf/context/DbTableOrView.java index cb09454968..e97ffe4385 100644 --- a/h2/src/main/org/h2/bnf/context/DbTableOrView.java +++ b/h2/src/main/org/h2/bnf/context/DbTableOrView.java @@ -1,15 +1,15 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.bnf.context; import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; -import org.h2.util.New; /** * Contains meta data information about a table or a view. @@ -89,17 +89,26 @@ public String getQuotedName() { * Read the column for this table from the database meta data. * * @param meta the database meta data + * @param ps prepared statement with custom query for H2 database, null for + * others + * @throws SQLException on failure */ - public void readColumns(DatabaseMetaData meta) throws SQLException { - ResultSet rs = meta.getColumns(null, schema.name, name, null); - ArrayList list = New.arrayList(); + public void readColumns(DatabaseMetaData meta, PreparedStatement ps) throws SQLException { + ResultSet rs; + if (schema.getContents().isH2()) { + ps.setString(1, schema.name); + ps.setString(2, name); + rs = ps.executeQuery(); + } else { + rs = meta.getColumns(null, schema.name, name, null); + } + ArrayList list = new ArrayList<>(); while (rs.next()) { DbColumn column = DbColumn.getColumn(schema.getContents(), rs); list.add(column); } rs.close(); - columns = new DbColumn[list.size()]; - list.toArray(columns); + columns = list.toArray(new DbColumn[0]); } } diff --git a/h2/src/main/org/h2/bnf/context/package.html b/h2/src/main/org/h2/bnf/context/package.html index aeec0f4c4d..0a6386fb30 100644 --- a/h2/src/main/org/h2/bnf/context/package.html +++ b/h2/src/main/org/h2/bnf/context/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/bnf/package.html b/h2/src/main/org/h2/bnf/package.html index 575434d3ed..36296736e3 100644 --- a/h2/src/main/org/h2/bnf/package.html +++ b/h2/src/main/org/h2/bnf/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/command/Command.java b/h2/src/main/org/h2/command/Command.java index 089a7110fd..f26fb686b8 100644 --- a/h2/src/main/org/h2/command/Command.java +++ b/h2/src/main/org/h2/command/Command.java @@ -1,22 +1,27 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; import java.sql.SQLException; import java.util.ArrayList; - +import java.util.Set; import org.h2.api.ErrorCode; import org.h2.engine.Constants; import org.h2.engine.Database; +import org.h2.engine.DbObject; +import org.h2.engine.Mode.CharPadding; import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.ParameterInterface; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.result.ResultInterface; -import org.h2.util.MathUtils; +import org.h2.result.ResultWithGeneratedKeys; +import org.h2.result.ResultWithPaddedStrings; +import org.h2.util.Utils; /** * Represents a SQL statement. This object is only used on the server side. @@ -26,12 +31,12 @@ public abstract class Command implements CommandInterface { /** * The session. */ - protected final Session session; + protected final SessionLocal session; /** * The last start time. */ - protected long startTime; + protected long startTimeNanos; /** * The trace module. @@ -47,8 +52,8 @@ public abstract class Command implements CommandInterface { private boolean canReuse; - Command(Parser parser, String sql) { - this.session = parser.getSession(); + Command(SessionLocal session, String sql) { + this.session = session; this.sql = sql; trace = session.getDatabase().getTrace(Trace.COMMAND); } @@ -95,12 +100,16 @@ public abstract class Command implements CommandInterface { * Execute an updating statement (for example insert, delete, or update), if * this is possible. * - * @return the update count + * @param generatedKeysRequest + * {@code false} if generated keys are not needed, {@code true} if + * generated keys should be configured automatically, {@code int[]} + * to specify column indices to return generated keys from, or + * {@code String[]} to specify column names to return generated keys + * from + * @return the update count and generated keys, if any * @throws DbException if the command is not an updating statement */ - public int update() { - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_QUERY); - } + public abstract ResultWithGeneratedKeys update(Object generatedKeysRequest); /** * Execute a query statement, if this is possible. @@ -109,9 +118,7 @@ public int update() { * @return the local result set * @throws DbException if the command is not a query */ - public ResultInterface query(int maxrows) { - throw DbException.get(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY); - } + public abstract ResultInterface query(long maxrows); @Override public final ResultInterface getMetaData() { @@ -122,8 +129,8 @@ public final ResultInterface getMetaData() { * Start the stopwatch. */ void start() { - if (trace.isInfoEnabled()) { - startTime = System.currentTimeMillis(); + if (trace.isInfoEnabled() || session.getDatabase().getQueryStatistics()) { + startTimeNanos = Utils.currentNanoTime(); } } @@ -143,59 +150,54 @@ protected void checkCanceled() { } } - private void stop() { - session.endStatement(); - session.setCurrentCommand(null); - if (!isTransactional()) { - session.commit(true); - } else if (session.getAutoCommit()) { + @Override + public void stop() { + commitIfNonTransactional(); + if (isTransactional() && session.getAutoCommit()) { session.commit(false); - } else if (session.getDatabase().isMultiThreaded()) { - Database db = session.getDatabase(); - if (db != null) { - if (db.getLockMode() == Constants.LOCK_MODE_READ_COMMITTED) { - session.unlockReadLocks(); - } - } } - if (trace.isInfoEnabled() && startTime > 0) { - long time = System.currentTimeMillis() - startTime; - if (time > Constants.SLOW_QUERY_LIMIT_MS) { - trace.info("slow query: {0} ms", time); + if (trace.isInfoEnabled() && startTimeNanos != 0L) { + long timeMillis = (System.nanoTime() - startTimeNanos) / 1_000_000L; + if (timeMillis > Constants.SLOW_QUERY_LIMIT_MS) { + trace.info("slow query: {0} ms", timeMillis); } } } /** * Execute a query and return the result. - * This method prepares everything and calls {@link #query(int)} finally. + * This method prepares everything and calls {@link #query(long)} finally. * * @param maxrows the maximum number of rows to return * @param scrollable if the result set must be scrollable (ignored) * @return the result set */ @Override - public ResultInterface executeQuery(int maxrows, boolean scrollable) { - startTime = 0; - long start = 0; + public ResultInterface executeQuery(long maxrows, boolean scrollable) { + startTimeNanos = 0L; + long start = 0L; Database database = session.getDatabase(); - Object sync = database.isMultiThreaded() ? (Object) session : (Object) database; session.waitIfExclusiveModeEnabled(); boolean callStop = true; - boolean writing = !isReadOnly(); - if (writing) { - while (!database.beforeWriting()) { - // wait - } - } - synchronized (sync) { - session.setCurrentCommand(this); + //noinspection SynchronizationOnLocalVariableOrMethodParameter + synchronized (session) { + session.startStatementWithinTransaction(this); + Session oldSession = session.setThreadLocalSession(); try { while (true) { database.checkPowerOff(); try { - return query(maxrows); + ResultInterface result = query(maxrows); + callStop = !result.isLazy(); + if (database.getMode().charPadding == CharPadding.IN_RESULT_SETS) { + return ResultWithPaddedStrings.get(result); + } + return result; } catch (DbException e) { + // cannot retry DDL + if (isCurrentCommandADefineCommand()) { + throw e; + } start = filterConcurrentUpdate(e, start); } catch (OutOfMemoryError e) { callStop = false; @@ -221,38 +223,38 @@ public ResultInterface executeQuery(int maxrows, boolean scrollable) { database.checkPowerOff(); throw e; } finally { + session.resetThreadLocalSession(oldSession); + session.endStatement(); if (callStop) { stop(); } - if (writing) { - database.afterWriting(); - } } } } @Override - public int executeUpdate() { + public ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest) { long start = 0; Database database = session.getDatabase(); - Object sync = database.isMultiThreaded() ? (Object) session : (Object) database; session.waitIfExclusiveModeEnabled(); boolean callStop = true; - boolean writing = !isReadOnly(); - if (writing) { - while (!database.beforeWriting()) { - // wait - } - } - synchronized (sync) { - Session.Savepoint rollback = session.setSavepoint(); - session.setCurrentCommand(this); + //noinspection SynchronizationOnLocalVariableOrMethodParameter + synchronized (session) { + commitIfNonTransactional(); + SessionLocal.Savepoint rollback = session.setSavepoint(); + session.startStatementWithinTransaction(this); + DbException ex = null; + Session oldSession = session.setThreadLocalSession(); try { while (true) { database.checkPowerOff(); try { - return update(); + return update(generatedKeysRequest); } catch (DbException e) { + // cannot retry DDL + if (isCurrentCommandADefineCommand()) { + throw e; + } start = filterConcurrentUpdate(e, start); } catch (OutOfMemoryError e) { callStop = false; @@ -271,53 +273,57 @@ public int executeUpdate() { database.shutdownImmediately(); throw e; } - database.checkPowerOff(); - if (s.getErrorCode() == ErrorCode.DEADLOCK_1) { - session.rollback(); - } else { - session.rollbackTo(rollback, false); + try { + database.checkPowerOff(); + if (s.getErrorCode() == ErrorCode.DEADLOCK_1) { + session.rollback(); + } else { + session.rollbackTo(rollback); + } + } catch (Throwable nested) { + e.addSuppressed(nested); } + ex = e; throw e; } finally { + session.resetThreadLocalSession(oldSession); try { + session.endStatement(); if (callStop) { stop(); } - } finally { - if (writing) { - database.afterWriting(); + } catch (Throwable nested) { + if (ex == null) { + throw nested; + } else { + ex.addSuppressed(nested); } } } } } + private void commitIfNonTransactional() { + if (!isTransactional()) { + boolean autoCommit = session.getAutoCommit(); + session.commit(true); + if (!autoCommit && session.getAutoCommit()) { + session.begin(); + } + } + } + private long filterConcurrentUpdate(DbException e, long start) { - if (e.getErrorCode() != ErrorCode.CONCURRENT_UPDATE_1) { + int errorCode = e.getErrorCode(); + if (errorCode != ErrorCode.CONCURRENT_UPDATE_1 && errorCode != ErrorCode.ROW_NOT_FOUND_IN_PRIMARY_INDEX + && errorCode != ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1) { throw e; } - long now = System.nanoTime() / 1000000; - if (start != 0 && now - start > session.getLockTimeout()) { - throw DbException.get(ErrorCode.LOCK_TIMEOUT_1, e.getCause(), ""); - } - Database database = session.getDatabase(); - int sleep = 1 + MathUtils.randomInt(10); - while (true) { - try { - if (database.isMultiThreaded()) { - Thread.sleep(sleep); - } else { - database.wait(sleep); - } - } catch (InterruptedException e1) { - // ignore - } - long slept = System.nanoTime() / 1000000 - now; - if (slept >= sleep) { - break; - } + long now = Utils.currentNanoTime(); + if (start != 0L && now - start > session.getLockTimeout() * 1_000_000L) { + throw DbException.get(ErrorCode.LOCK_TIMEOUT_1, e); } - return start == 0 ? now : start; + return start == 0L ? now : start; } @Override @@ -327,7 +333,7 @@ public void close() { @Override public void cancel() { - this.cancel = true; + cancel = true; } @Override @@ -355,10 +361,21 @@ public boolean canReuse() { public void reuse() { canReuse = false; ArrayList parameters = getParameters(); - for (int i = 0, size = parameters.size(); i < size; i++) { - ParameterInterface param = parameters.get(i); + for (ParameterInterface param : parameters) { param.setValue(null, true); } } + public void setCanReuse(boolean canReuse) { + this.canReuse = canReuse; + } + + public abstract Set getDependencies(); + + /** + * Is the command we just tried to execute a DefineCommand (i.e. DDL). + * + * @return true if yes + */ + protected abstract boolean isCurrentCommandADefineCommand(); } diff --git a/h2/src/main/org/h2/command/CommandContainer.java b/h2/src/main/org/h2/command/CommandContainer.java index 0c121aae67..30fcf5bc53 100644 --- a/h2/src/main/org/h2/command/CommandContainer.java +++ b/h2/src/main/org/h2/command/CommandContainer.java @@ -1,30 +1,117 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; import org.h2.api.DatabaseEventListener; +import org.h2.api.ErrorCode; +import org.h2.command.ddl.DefineCommand; +import org.h2.command.dml.DataChangeStatement; +import org.h2.engine.Database; +import org.h2.engine.DbObject; +import org.h2.engine.DbSettings; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; import org.h2.expression.Parameter; import org.h2.expression.ParameterInterface; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.result.LocalResult; import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; +import org.h2.result.ResultWithGeneratedKeys; +import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable.ResultOption; +import org.h2.table.Table; +import org.h2.table.TableView; +import org.h2.util.StringUtils; +import org.h2.util.Utils; import org.h2.value.Value; -import org.h2.value.ValueNull; /** * Represents a single SQL statements. * It wraps a prepared statement. */ -class CommandContainer extends Command { +public class CommandContainer extends Command { + + /** + * Collector of generated keys. + */ + private static final class GeneratedKeysCollector implements ResultTarget { + + private final int[] indexes; + private final LocalResult result; + + GeneratedKeysCollector(int[] indexes, LocalResult result) { + this.indexes = indexes; + this.result = result; + } + + @Override + public void limitsWereApplied() { + // Nothing to do + } + + @Override + public long getRowCount() { + // Not required + return 0L; + } + + @Override + public void addRow(Value... values) { + int length = indexes.length; + Value[] row = new Value[length]; + for (int i = 0; i < length; i++) { + row[i] = values[indexes[i]]; + } + result.addRow(row); + } + + } private Prepared prepared; private boolean readOnlyKnown; private boolean readOnly; - CommandContainer(Parser parser, String sql, Prepared prepared) { - super(parser, sql); + /** + * Clears CTE views for a specified statement. + * + * @param session the session + * @param prepared prepared statement + */ + static void clearCTE(SessionLocal session, Prepared prepared) { + List cteCleanups = prepared.getCteCleanups(); + if (cteCleanups != null) { + clearCTE(session, cteCleanups); + } + } + + /** + * Clears CTE views. + * + * @param session the session + * @param views list of view + */ + static void clearCTE(SessionLocal session, List views) { + for (TableView view : views) { + // check if view was previously deleted as their name is set to + // null + if (view.getName() != null) { + session.removeLocalTempTable(view); + } + } + } + + public CommandContainer(SessionLocal session, String sql, Prepared prepared) { + super(session, sql); prepared.setCommand(this); this.prepared = prepared; } @@ -49,13 +136,14 @@ private void recompileIfRequired() { // TODO test with 'always recompile' prepared.setModificationMetaId(0); String sql = prepared.getSQL(); + ArrayList tokens = prepared.getSQLTokens(); ArrayList oldParams = prepared.getParameters(); Parser parser = new Parser(session); - prepared = parser.parse(sql); + prepared = parser.parse(sql, tokens); long mod = prepared.getModificationMetaId(); prepared.setModificationMetaId(0); ArrayList newParams = prepared.getParameters(); - for (int i = 0, size = newParams.size(); i < size; i++) { + for (int i = 0, size = Math.min(newParams.size(), oldParams.size()); i < size; i++) { Parameter old = oldParams.get(i); if (old.isValueSet()) { Value v = old.getValue(session); @@ -69,30 +157,121 @@ private void recompileIfRequired() { } @Override - public int update() { + public ResultWithGeneratedKeys update(Object generatedKeysRequest) { recompileIfRequired(); setProgress(DatabaseEventListener.STATE_STATEMENT_START); start(); - session.setLastScopeIdentity(ValueNull.INSTANCE); prepared.checkParameters(); - int updateCount = prepared.update(); - prepared.trace(startTime, updateCount); + ResultWithGeneratedKeys result; + if (generatedKeysRequest != null && !Boolean.FALSE.equals(generatedKeysRequest)) { + if (prepared instanceof DataChangeStatement && prepared.getType() != CommandInterface.DELETE) { + result = executeUpdateWithGeneratedKeys((DataChangeStatement) prepared, + generatedKeysRequest); + } else { + result = new ResultWithGeneratedKeys.WithKeys(prepared.update(), new LocalResult()); + } + } else { + result = ResultWithGeneratedKeys.of(prepared.update()); + } + prepared.trace(startTimeNanos, result.getUpdateCount()); setProgress(DatabaseEventListener.STATE_STATEMENT_END); - return updateCount; + return result; + } + + private ResultWithGeneratedKeys executeUpdateWithGeneratedKeys(DataChangeStatement statement, + Object generatedKeysRequest) { + Database db = session.getDatabase(); + Table table = statement.getTable(); + ArrayList expressionColumns; + if (Boolean.TRUE.equals(generatedKeysRequest)) { + expressionColumns = Utils.newSmallArrayList(); + Column[] columns = table.getColumns(); + Index primaryKey = table.findPrimaryKey(); + for (Column column : columns) { + Expression e; + if (column.isIdentity() + || ((e = column.getEffectiveDefaultExpression()) != null && !e.isConstant()) + || (primaryKey != null && primaryKey.getColumnIndex(column) >= 0)) { + expressionColumns.add(new ExpressionColumn(db, column)); + } + } + } else if (generatedKeysRequest instanceof int[]) { + int[] indexes = (int[]) generatedKeysRequest; + Column[] columns = table.getColumns(); + int cnt = columns.length; + expressionColumns = new ArrayList<>(indexes.length); + for (int idx : indexes) { + if (idx < 1 || idx > cnt) { + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "Index: " + idx); + } + expressionColumns.add(new ExpressionColumn(db, columns[idx - 1])); + } + } else if (generatedKeysRequest instanceof String[]) { + String[] names = (String[]) generatedKeysRequest; + expressionColumns = new ArrayList<>(names.length); + for (String name : names) { + Column column = table.findColumn(name); + if (column == null) { + DbSettings settings = db.getSettings(); + if (settings.databaseToUpper) { + column = table.findColumn(StringUtils.toUpperEnglish(name)); + } else if (settings.databaseToLower) { + column = table.findColumn(StringUtils.toLowerEnglish(name)); + } + search: if (column == null) { + for (Column c : table.getColumns()) { + if (c.getName().equalsIgnoreCase(name)) { + column = c; + break search; + } + } + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, name); + } + } + expressionColumns.add(new ExpressionColumn(db, column)); + } + } else { + throw DbException.getInternalError(); + } + int columnCount = expressionColumns.size(); + if (columnCount == 0) { + return new ResultWithGeneratedKeys.WithKeys(statement.update(), new LocalResult()); + } + int[] indexes = new int[columnCount]; + ExpressionColumn[] expressions = expressionColumns.toArray(new ExpressionColumn[0]); + for (int i = 0; i < columnCount; i++) { + indexes[i] = expressions[i].getColumn().getColumnId(); + } + LocalResult result = new LocalResult(session, expressions, columnCount, columnCount); + return new ResultWithGeneratedKeys.WithKeys( + statement.update(new GeneratedKeysCollector(indexes, result), ResultOption.FINAL), result); } @Override - public ResultInterface query(int maxrows) { + public ResultInterface query(long maxrows) { recompileIfRequired(); setProgress(DatabaseEventListener.STATE_STATEMENT_START); start(); prepared.checkParameters(); ResultInterface result = prepared.query(maxrows); - prepared.trace(startTime, result.getRowCount()); + prepared.trace(startTimeNanos, result.isLazy() ? 0 : result.getRowCount()); setProgress(DatabaseEventListener.STATE_STATEMENT_END); return result; } + @Override + public void stop() { + super.stop(); + // Clean up after the command was run in the session. + // Must restart query (and dependency construction) to reuse. + clearCTE(session, prepared); + } + + @Override + public boolean canReuse() { + return super.canReuse() && prepared.getCteCleanups() == null; + } + @Override public boolean isReadOnly() { if (!readOnlyKnown) { @@ -117,4 +296,22 @@ public int getCommandType() { return prepared.getType(); } + /** + * Clean up any associated CTE. + */ + void clearCTE() { + clearCTE(session, prepared); + } + + @Override + public Set getDependencies() { + HashSet dependencies = new HashSet<>(); + prepared.collectDependencies(dependencies); + return dependencies; + } + + @Override + protected boolean isCurrentCommandADefineCommand() { + return prepared instanceof DefineCommand; + } } diff --git a/h2/src/main/org/h2/command/CommandInterface.java b/h2/src/main/org/h2/command/CommandInterface.java index de5720c231..fbe1223ad7 100644 --- a/h2/src/main/org/h2/command/CommandInterface.java +++ b/h2/src/main/org/h2/command/CommandInterface.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; @@ -8,11 +8,12 @@ import java.util.ArrayList; import org.h2.expression.ParameterInterface; import org.h2.result.ResultInterface; +import org.h2.result.ResultWithGeneratedKeys; /** * Represents a SQL statement. */ -public interface CommandInterface { +public interface CommandInterface extends AutoCloseable { /** * The type for unknown statement. @@ -27,47 +28,48 @@ public interface CommandInterface { int ALTER_INDEX_RENAME = 1; /** - * The type of a ALTER SCHEMA RENAME statement. + * The type of an ALTER SCHEMA RENAME statement. */ int ALTER_SCHEMA_RENAME = 2; /** - * The type of a ALTER TABLE ADD CHECK statement. + * The type of an ALTER TABLE ADD CHECK statement. */ int ALTER_TABLE_ADD_CONSTRAINT_CHECK = 3; /** - * The type of a ALTER TABLE ADD UNIQUE statement. + * The type of an ALTER TABLE ADD UNIQUE statement. */ int ALTER_TABLE_ADD_CONSTRAINT_UNIQUE = 4; /** - * The type of a ALTER TABLE ADD FOREIGN KEY statement. + * The type of an ALTER TABLE ADD FOREIGN KEY statement. */ int ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL = 5; /** - * The type of a ALTER TABLE ADD PRIMARY KEY statement. + * The type of an ALTER TABLE ADD PRIMARY KEY statement. */ int ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY = 6; /** - * The type of a ALTER TABLE ADD statement. + * The type of an ALTER TABLE ADD statement. */ int ALTER_TABLE_ADD_COLUMN = 7; /** - * The type of a ALTER TABLE ALTER COLUMN SET NOT NULL statement. + * The type of an ALTER TABLE ALTER COLUMN SET NOT NULL statement. */ int ALTER_TABLE_ALTER_COLUMN_NOT_NULL = 8; /** - * The type of a ALTER TABLE ALTER COLUMN SET NULL statement. + * The type of an ALTER TABLE ALTER COLUMN DROP NOT NULL statement. */ - int ALTER_TABLE_ALTER_COLUMN_NULL = 9; + int ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL = 9; /** - * The type of a ALTER TABLE ALTER COLUMN SET DEFAULT statement. + * The type of an ALTER TABLE ALTER COLUMN SET DEFAULT and ALTER TABLE ALTER + * COLUMN DROP DEFAULT statements. */ int ALTER_TABLE_ALTER_COLUMN_DEFAULT = 10; @@ -78,52 +80,52 @@ public interface CommandInterface { int ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE = 11; /** - * The type of a ALTER TABLE DROP COLUMN statement. + * The type of an ALTER TABLE DROP COLUMN statement. */ int ALTER_TABLE_DROP_COLUMN = 12; /** - * The type of a ALTER TABLE ALTER COLUMN SELECTIVITY statement. + * The type of an ALTER TABLE ALTER COLUMN SELECTIVITY statement. */ int ALTER_TABLE_ALTER_COLUMN_SELECTIVITY = 13; /** - * The type of a ALTER TABLE DROP CONSTRAINT statement. + * The type of an ALTER TABLE DROP CONSTRAINT statement. */ int ALTER_TABLE_DROP_CONSTRAINT = 14; /** - * The type of a ALTER TABLE RENAME statement. + * The type of an ALTER TABLE RENAME statement. */ int ALTER_TABLE_RENAME = 15; /** - * The type of a ALTER TABLE ALTER COLUMN RENAME statement. + * The type of an ALTER TABLE ALTER COLUMN RENAME statement. */ int ALTER_TABLE_ALTER_COLUMN_RENAME = 16; /** - * The type of a ALTER USER ADMIN statement. + * The type of an ALTER USER ADMIN statement. */ int ALTER_USER_ADMIN = 17; /** - * The type of a ALTER USER RENAME statement. + * The type of an ALTER USER RENAME statement. */ int ALTER_USER_RENAME = 18; /** - * The type of a ALTER USER SET PASSWORD statement. + * The type of an ALTER USER SET PASSWORD statement. */ int ALTER_USER_SET_PASSWORD = 19; /** - * The type of a ALTER VIEW statement. + * The type of an ALTER VIEW statement. */ int ALTER_VIEW = 20; /** - * The type of a ANALYZE statement. + * The type of an ANALYZE statement. */ int ANALYZE = 21; @@ -290,12 +292,12 @@ public interface CommandInterface { // dml operations /** - * The type of a ALTER SEQUENCE statement. + * The type of an ALTER SEQUENCE statement. */ int ALTER_SEQUENCE = 54; /** - * The type of a ALTER TABLE SET REFERENTIAL_INTEGRITY statement. + * The type of an ALTER TABLE SET REFERENTIAL_INTEGRITY statement. */ int ALTER_TABLE_SET_REFERENTIAL_INTEGRITY = 55; @@ -315,17 +317,17 @@ public interface CommandInterface { int DELETE = 58; /** - * The type of a EXECUTE statement. + * The type of an EXECUTE statement. */ int EXECUTE = 59; /** - * The type of a EXPLAIN statement. + * The type of an EXPLAIN statement. */ int EXPLAIN = 60; /** - * The type of a INSERT statement. + * The type of an INSERT statement. */ int INSERT = 61; @@ -365,7 +367,7 @@ public interface CommandInterface { int SET = 67; /** - * The type of a UPDATE statement. + * The type of an UPDATE statement. */ int UPDATE = 68; @@ -451,6 +453,94 @@ public interface CommandInterface { */ int SHUTDOWN_DEFRAG = 84; + /** + * The type of an ALTER TABLE RENAME CONSTRAINT statement. + */ + int ALTER_TABLE_RENAME_CONSTRAINT = 85; + + /** + * The type of an EXPLAIN ANALYZE statement. + */ + int EXPLAIN_ANALYZE = 86; + + /** + * The type of an ALTER TABLE ALTER COLUMN SET INVISIBLE statement. + */ + int ALTER_TABLE_ALTER_COLUMN_VISIBILITY = 87; + + /** + * The type of a CREATE SYNONYM statement. + */ + int CREATE_SYNONYM = 88; + + /** + * The type of a DROP SYNONYM statement. + */ + int DROP_SYNONYM = 89; + + /** + * The type of an ALTER TABLE ALTER COLUMN SET ON UPDATE statement. + */ + int ALTER_TABLE_ALTER_COLUMN_ON_UPDATE = 90; + + /** + * The type of an EXECUTE IMMEDIATELY statement. + */ + int EXECUTE_IMMEDIATELY = 91; + + /** + * The type of ALTER DOMAIN ADD CONSTRAINT statement. + */ + int ALTER_DOMAIN_ADD_CONSTRAINT = 92; + + /** + * The type of ALTER DOMAIN DROP CONSTRAINT statement. + */ + int ALTER_DOMAIN_DROP_CONSTRAINT = 93; + + /** + * The type of an ALTER DOMAIN SET DEFAULT and ALTER DOMAIN DROP DEFAULT + * statements. + */ + int ALTER_DOMAIN_DEFAULT = 94; + + /** + * The type of an ALTER DOMAIN SET ON UPDATE and ALTER DOMAIN DROP ON UPDATE + * statements. + */ + int ALTER_DOMAIN_ON_UPDATE = 95; + + /** + * The type of an ALTER DOMAIN RENAME statement. + */ + int ALTER_DOMAIN_RENAME = 96; + + /** + * The type of a HELP statement. + */ + int HELP = 97; + + /** + * The type of an ALTER TABLE ALTER COLUMN DROP EXPRESSION statement. + */ + int ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION = 98; + + /** + * The type of an ALTER TABLE ALTER COLUMN DROP IDENTITY statement. + */ + int ALTER_TABLE_ALTER_COLUMN_DROP_IDENTITY = 99; + + /** + * The type of ALTER TABLE ALTER COLUMN SET DEFAULT ON NULL and ALTER TABLE + * ALTER COLUMN DROP DEFAULT ON NULL statements. + */ + int ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL = 100; + + /** + * The type of an ALTER DOMAIN RENAME CONSTRAINT statement. + */ + int ALTER_DOMAIN_RENAME_CONSTRAINT = 101; + /** * Get command type. * @@ -479,18 +569,31 @@ public interface CommandInterface { * @param scrollable if the result set must be scrollable * @return the result */ - ResultInterface executeQuery(int maxRows, boolean scrollable); + ResultInterface executeQuery(long maxRows, boolean scrollable); /** * Execute the statement * - * @return the update count + * @param generatedKeysRequest + * {@code null} or {@code false} if generated keys are not + * needed, {@code true} if generated keys should be configured + * automatically, {@code int[]} to specify column indices to + * return generated keys from, or {@code String[]} to specify + * column names to return generated keys from + * + * @return the update count and generated keys, if any */ - int executeUpdate(); + ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest); + + /** + * Stop the command execution, release all locks and resources + */ + void stop(); /** * Close the statement. */ + @Override void close(); /** @@ -504,4 +607,5 @@ public interface CommandInterface { * @return the empty result */ ResultInterface getMetaData(); + } diff --git a/h2/src/main/org/h2/command/CommandList.java b/h2/src/main/org/h2/command/CommandList.java index 3255c0f734..f3d17e1162 100644 --- a/h2/src/main/org/h2/command/CommandList.java +++ b/h2/src/main/org/h2/command/CommandList.java @@ -1,56 +1,90 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; import java.util.ArrayList; +import java.util.HashSet; +import java.util.Set; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.expression.Parameter; import org.h2.expression.ParameterInterface; import org.h2.result.ResultInterface; +import org.h2.result.ResultWithGeneratedKeys; /** * Represents a list of SQL statements. */ class CommandList extends Command { - private final Command command; - private final String remaining; + private CommandContainer command; + private final ArrayList commands; + private final ArrayList parameters; + private String remaining; + private Command remainingCommand; - CommandList(Parser parser, String sql, Command c, String remaining) { - super(parser, sql); - this.command = c; + CommandList(SessionLocal session, String sql, CommandContainer command, ArrayList commands, + ArrayList parameters, String remaining) { + super(session, sql); + this.command = command; + this.commands = commands; + this.parameters = parameters; this.remaining = remaining; } @Override public ArrayList getParameters() { - return command.getParameters(); + return parameters; } private void executeRemaining() { - Command remainingCommand = session.prepareLocal(remaining); - if (remainingCommand.isQuery()) { - remainingCommand.query(0); - } else { - remainingCommand.update(); + for (Prepared prepared : commands) { + prepared.prepare(); + if (prepared.isQuery()) { + prepared.query(0); + } else { + prepared.update(); + } + } + if (remaining != null) { + remainingCommand = session.prepareLocal(remaining); + remaining = null; + if (remainingCommand.isQuery()) { + remainingCommand.query(0); + } else { + remainingCommand.update(null); + } } } @Override - public int update() { - int updateCount = command.executeUpdate(); + public ResultWithGeneratedKeys update(Object generatedKeysRequest) { + ResultWithGeneratedKeys result = command.executeUpdate(null); executeRemaining(); - return updateCount; + return result; } @Override - public ResultInterface query(int maxrows) { + public ResultInterface query(long maxrows) { ResultInterface result = command.query(maxrows); executeRemaining(); return result; } + @Override + public void stop() { + command.stop(); + for (Prepared prepared : commands) { + CommandContainer.clearCTE(session, prepared); + } + if (remainingCommand != null) { + remainingCommand.stop(); + } + } + @Override public boolean isQuery() { return command.isQuery(); @@ -76,4 +110,17 @@ public int getCommandType() { return command.getCommandType(); } + @Override + public Set getDependencies() { + HashSet dependencies = new HashSet<>(); + for (Prepared prepared : commands) { + prepared.collectDependencies(dependencies); + } + return dependencies; + } + + @Override + protected boolean isCurrentCommandADefineCommand() { + return command.isCurrentCommandADefineCommand(); + } } diff --git a/h2/src/main/org/h2/command/CommandRemote.java b/h2/src/main/org/h2/command/CommandRemote.java index 04ba6feceb..7807ef4b7a 100644 --- a/h2/src/main/org/h2/command/CommandRemote.java +++ b/h2/src/main/org/h2/command/CommandRemote.java @@ -1,12 +1,13 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; import java.io.IOException; import java.util.ArrayList; +import org.h2.engine.GeneratedKeysMode; import org.h2.engine.SessionRemote; import org.h2.engine.SysProperties; import org.h2.expression.ParameterInterface; @@ -15,9 +16,12 @@ import org.h2.message.Trace; import org.h2.result.ResultInterface; import org.h2.result.ResultRemote; -import org.h2.util.New; +import org.h2.result.ResultWithGeneratedKeys; +import org.h2.util.Utils; import org.h2.value.Transfer; import org.h2.value.Value; +import org.h2.value.ValueLob; +import org.h2.value.ValueNull; /** * Represents the client-side part of a SQL statement. @@ -33,6 +37,7 @@ public class CommandRemote implements CommandInterface { private SessionRemote session; private int id; private boolean isQuery; + private int cmdType = UNKNOWN; private boolean readonly; private final int created; @@ -41,7 +46,7 @@ public CommandRemote(SessionRemote session, this.transferList = transferList; trace = session.getTrace(); this.sql = sql; - parameters = New.arrayList(); + parameters = Utils.newSmallArrayList(); prepare(session, true); // set session late because prepare might fail - in this case we don't // need to close the object @@ -50,16 +55,21 @@ public CommandRemote(SessionRemote session, created = session.getLastReconnect(); } + @Override + public void stop() { + // Ignore + } + private void prepare(SessionRemote s, boolean createParams) { id = s.getNextId(); for (int i = 0, count = 0; i < transferList.size(); i++) { try { Transfer transfer = transferList.get(i); + if (createParams) { - s.traceOperation("SESSION_PREPARE_READ_PARAMS", id); - transfer. - writeInt(SessionRemote.SESSION_PREPARE_READ_PARAMS). - writeInt(id).writeString(sql); + s.traceOperation("SESSION_PREPARE_READ_PARAMS2", id); + transfer.writeInt(SessionRemote.SESSION_PREPARE_READ_PARAMS2) + .writeInt(id).writeString(sql); } else { s.traceOperation("SESSION_PREPARE", id); transfer.writeInt(SessionRemote.SESSION_PREPARE). @@ -68,6 +78,9 @@ private void prepare(SessionRemote s, boolean createParams) { s.done(transfer); isQuery = transfer.readBoolean(); readonly = transfer.readBoolean(); + + cmdType = createParams ? transfer.readInt() : UNKNOWN; + int paramCount = transfer.readInt(); if (createParams) { parameters.clear(); @@ -135,7 +148,7 @@ public ResultInterface getMetaData() { } @Override - public ResultInterface executeQuery(int maxRows, boolean scrollable) { + public ResultInterface executeQuery(long maxRows, boolean scrollable) { checkParameters(); synchronized (session) { int objectId = session.getNextId(); @@ -145,8 +158,8 @@ public ResultInterface executeQuery(int maxRows, boolean scrollable) { Transfer transfer = transferList.get(i); try { session.traceOperation("COMMAND_EXECUTE_QUERY", id); - transfer.writeInt(SessionRemote.COMMAND_EXECUTE_QUERY). - writeInt(id).writeInt(objectId).writeInt(maxRows); + transfer.writeInt(SessionRemote.COMMAND_EXECUTE_QUERY).writeInt(id).writeInt(objectId); + transfer.writeRowCount(maxRows); int fetch; if (session.isClustered() || scrollable) { fetch = Integer.MAX_VALUE; @@ -176,10 +189,14 @@ public ResultInterface executeQuery(int maxRows, boolean scrollable) { } @Override - public int executeUpdate() { + public ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest) { checkParameters(); + int generatedKeysMode = GeneratedKeysMode.valueOf(generatedKeysRequest); + boolean readGeneratedKeys = generatedKeysMode != GeneratedKeysMode.NONE; + int objectId = readGeneratedKeys ? session.getNextId() : 0; synchronized (session) { - int updateCount = 0; + long updateCount = 0L; + ResultRemote generatedKeys = null; boolean autoCommit = false; for (int i = 0, count = 0; i < transferList.size(); i++) { prepareIfRequired(); @@ -188,9 +205,36 @@ public int executeUpdate() { session.traceOperation("COMMAND_EXECUTE_UPDATE", id); transfer.writeInt(SessionRemote.COMMAND_EXECUTE_UPDATE).writeInt(id); sendParameters(transfer); + transfer.writeInt(generatedKeysMode); + switch (generatedKeysMode) { + case GeneratedKeysMode.COLUMN_NUMBERS: { + int[] keys = (int[]) generatedKeysRequest; + transfer.writeInt(keys.length); + for (int key : keys) { + transfer.writeInt(key); + } + break; + } + case GeneratedKeysMode.COLUMN_NAMES: { + String[] keys = (String[]) generatedKeysRequest; + transfer.writeInt(keys.length); + for (String key : keys) { + transfer.writeString(key); + } + break; + } + } session.done(transfer); - updateCount = transfer.readInt(); + updateCount = transfer.readRowCount(); autoCommit = transfer.readBoolean(); + if (readGeneratedKeys) { + int columnCount = transfer.readInt(); + if (generatedKeys != null) { + generatedKeys.close(); + generatedKeys = null; + } + generatedKeys = new ResultRemote(session, transfer, objectId, columnCount, Integer.MAX_VALUE); + } } catch (IOException e) { session.removeServer(e, i--, ++count); } @@ -198,13 +242,18 @@ public int executeUpdate() { session.setAutoCommitFromServer(autoCommit); session.autoCommitIfCluster(); session.readSessionState(); - return updateCount; + if (generatedKeys != null) { + return new ResultWithGeneratedKeys.WithKeys(updateCount, generatedKeys); + } + return ResultWithGeneratedKeys.of(updateCount); } } private void checkParameters() { - for (ParameterInterface p : parameters) { - p.checkSet(); + if (cmdType != EXPLAIN) { + for (ParameterInterface p : parameters) { + p.checkSet(); + } } } @@ -212,7 +261,13 @@ private void sendParameters(Transfer transfer) throws IOException { int len = parameters.size(); transfer.writeInt(len); for (ParameterInterface p : parameters) { - transfer.writeValue(p.getParamValue()); + Value pVal = p.getParamValue(); + + if (pVal == null && cmdType == EXPLAIN) { + pVal = ValueNull.INSTANCE; + } + + transfer.writeValue(pVal); } } @@ -235,8 +290,8 @@ public void close() { try { for (ParameterInterface p : parameters) { Value v = p.getParamValue(); - if (v != null) { - v.close(); + if (v instanceof ValueLob) { + ((ValueLob) v).remove(); } } } catch (DbException e) { @@ -260,7 +315,7 @@ public String toString() { @Override public int getCommandType() { - return UNKNOWN; + return cmdType; } } diff --git a/h2/src/main/org/h2/command/Parser.java b/h2/src/main/org/h2/command/Parser.java index 0f0fa19df6..6aa8a51d37 100644 --- a/h2/src/main/org/h2/command/Parser.java +++ b/h2/src/main/org/h2/command/Parser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group * * Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 @@ -8,146 +8,401 @@ */ package org.h2.command; -import java.math.BigDecimal; -import java.math.BigInteger; +import static org.h2.command.Token.ASTERISK; +import static org.h2.command.Token.AT; +import static org.h2.command.Token.BIGGER; +import static org.h2.command.Token.BIGGER_EQUAL; +import static org.h2.command.Token.CLOSE_BRACE; +import static org.h2.command.Token.CLOSE_BRACKET; +import static org.h2.command.Token.CLOSE_PAREN; +import static org.h2.command.Token.COLON; +import static org.h2.command.Token.COLON_COLON; +import static org.h2.command.Token.COLON_EQ; +import static org.h2.command.Token.COMMA; +import static org.h2.command.Token.CONCATENATION; +import static org.h2.command.Token.DOT; +import static org.h2.command.Token.END_OF_INPUT; +import static org.h2.command.Token.EQUAL; +import static org.h2.command.Token.LITERAL; +import static org.h2.command.Token.MINUS_SIGN; +import static org.h2.command.Token.NOT_EQUAL; +import static org.h2.command.Token.NOT_TILDE; +import static org.h2.command.Token.OPEN_BRACE; +import static org.h2.command.Token.OPEN_BRACKET; +import static org.h2.command.Token.OPEN_PAREN; +import static org.h2.command.Token.PARAMETER; +import static org.h2.command.Token.PERCENT; +import static org.h2.command.Token.PLUS_SIGN; +import static org.h2.command.Token.SEMICOLON; +import static org.h2.command.Token.SLASH; +import static org.h2.command.Token.SMALLER; +import static org.h2.command.Token.SMALLER_EQUAL; +import static org.h2.command.Token.SPATIAL_INTERSECTS; +import static org.h2.command.Token.TILDE; +import static org.h2.command.Token.TOKENS; +import static org.h2.util.ParserUtil.ALL; +import static org.h2.util.ParserUtil.AND; +import static org.h2.util.ParserUtil.ANY; +import static org.h2.util.ParserUtil.ARRAY; +import static org.h2.util.ParserUtil.AS; +import static org.h2.util.ParserUtil.ASYMMETRIC; +import static org.h2.util.ParserUtil.AUTHORIZATION; +import static org.h2.util.ParserUtil.BETWEEN; +import static org.h2.util.ParserUtil.CASE; +import static org.h2.util.ParserUtil.CAST; +import static org.h2.util.ParserUtil.CHECK; +import static org.h2.util.ParserUtil.CONSTRAINT; +import static org.h2.util.ParserUtil.CROSS; +import static org.h2.util.ParserUtil.CURRENT_CATALOG; +import static org.h2.util.ParserUtil.CURRENT_DATE; +import static org.h2.util.ParserUtil.CURRENT_PATH; +import static org.h2.util.ParserUtil.CURRENT_ROLE; +import static org.h2.util.ParserUtil.CURRENT_SCHEMA; +import static org.h2.util.ParserUtil.CURRENT_TIME; +import static org.h2.util.ParserUtil.CURRENT_TIMESTAMP; +import static org.h2.util.ParserUtil.CURRENT_USER; +import static org.h2.util.ParserUtil.DAY; +import static org.h2.util.ParserUtil.DEFAULT; +import static org.h2.util.ParserUtil.DISTINCT; +import static org.h2.util.ParserUtil.ELSE; +import static org.h2.util.ParserUtil.END; +import static org.h2.util.ParserUtil.EXCEPT; +import static org.h2.util.ParserUtil.EXISTS; +import static org.h2.util.ParserUtil.FALSE; +import static org.h2.util.ParserUtil.FETCH; +import static org.h2.util.ParserUtil.FIRST_KEYWORD; +import static org.h2.util.ParserUtil.FOR; +import static org.h2.util.ParserUtil.FOREIGN; +import static org.h2.util.ParserUtil.FROM; +import static org.h2.util.ParserUtil.FULL; +import static org.h2.util.ParserUtil.GROUP; +import static org.h2.util.ParserUtil.HAVING; +import static org.h2.util.ParserUtil.HOUR; +import static org.h2.util.ParserUtil.IDENTIFIER; +import static org.h2.util.ParserUtil.IF; +import static org.h2.util.ParserUtil.IN; +import static org.h2.util.ParserUtil.INNER; +import static org.h2.util.ParserUtil.INTERSECT; +import static org.h2.util.ParserUtil.INTERVAL; +import static org.h2.util.ParserUtil.IS; +import static org.h2.util.ParserUtil.JOIN; +import static org.h2.util.ParserUtil.KEY; +import static org.h2.util.ParserUtil.LAST_KEYWORD; +import static org.h2.util.ParserUtil.LEFT; +import static org.h2.util.ParserUtil.LIKE; +import static org.h2.util.ParserUtil.LIMIT; +import static org.h2.util.ParserUtil.LOCALTIME; +import static org.h2.util.ParserUtil.LOCALTIMESTAMP; +import static org.h2.util.ParserUtil.MINUS; +import static org.h2.util.ParserUtil.MINUTE; +import static org.h2.util.ParserUtil.MONTH; +import static org.h2.util.ParserUtil.NATURAL; +import static org.h2.util.ParserUtil.NOT; +import static org.h2.util.ParserUtil.NULL; +import static org.h2.util.ParserUtil.OFFSET; +import static org.h2.util.ParserUtil.ON; +import static org.h2.util.ParserUtil.OR; +import static org.h2.util.ParserUtil.ORDER; +import static org.h2.util.ParserUtil.PRIMARY; +import static org.h2.util.ParserUtil.QUALIFY; +import static org.h2.util.ParserUtil.RIGHT; +import static org.h2.util.ParserUtil.ROW; +import static org.h2.util.ParserUtil.ROWNUM; +import static org.h2.util.ParserUtil.SECOND; +import static org.h2.util.ParserUtil.SELECT; +import static org.h2.util.ParserUtil.SESSION_USER; +import static org.h2.util.ParserUtil.SET; +import static org.h2.util.ParserUtil.SOME; +import static org.h2.util.ParserUtil.SYMMETRIC; +import static org.h2.util.ParserUtil.SYSTEM_USER; +import static org.h2.util.ParserUtil.TABLE; +import static org.h2.util.ParserUtil.TO; +import static org.h2.util.ParserUtil.TRUE; +import static org.h2.util.ParserUtil.UNION; +import static org.h2.util.ParserUtil.UNIQUE; +import static org.h2.util.ParserUtil.UNKNOWN; +import static org.h2.util.ParserUtil.USER; +import static org.h2.util.ParserUtil.USING; +import static org.h2.util.ParserUtil.VALUE; +import static org.h2.util.ParserUtil.VALUES; +import static org.h2.util.ParserUtil.WHEN; +import static org.h2.util.ParserUtil.WHERE; +import static org.h2.util.ParserUtil.WINDOW; +import static org.h2.util.ParserUtil.WITH; +import static org.h2.util.ParserUtil.YEAR; +import static org.h2.util.ParserUtil._ROWID_; + import java.nio.charset.Charset; import java.text.Collator; import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; +import java.util.Collection; +import java.util.Collections; import java.util.HashSet; - +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.TreeSet; import org.h2.api.ErrorCode; +import org.h2.api.IntervalQualifier; import org.h2.api.Trigger; +import org.h2.command.ddl.AlterDomainAddConstraint; +import org.h2.command.ddl.AlterDomainDropConstraint; +import org.h2.command.ddl.AlterDomainExpressions; +import org.h2.command.ddl.AlterDomainRename; +import org.h2.command.ddl.AlterDomainRenameConstraint; import org.h2.command.ddl.AlterIndexRename; import org.h2.command.ddl.AlterSchemaRename; +import org.h2.command.ddl.AlterSequence; import org.h2.command.ddl.AlterTableAddConstraint; import org.h2.command.ddl.AlterTableAlterColumn; import org.h2.command.ddl.AlterTableDropConstraint; import org.h2.command.ddl.AlterTableRename; import org.h2.command.ddl.AlterTableRenameColumn; +import org.h2.command.ddl.AlterTableRenameConstraint; import org.h2.command.ddl.AlterUser; import org.h2.command.ddl.AlterView; import org.h2.command.ddl.Analyze; +import org.h2.command.ddl.CommandWithColumns; import org.h2.command.ddl.CreateAggregate; import org.h2.command.ddl.CreateConstant; +import org.h2.command.ddl.CreateDomain; import org.h2.command.ddl.CreateFunctionAlias; import org.h2.command.ddl.CreateIndex; import org.h2.command.ddl.CreateLinkedTable; import org.h2.command.ddl.CreateRole; import org.h2.command.ddl.CreateSchema; import org.h2.command.ddl.CreateSequence; +import org.h2.command.ddl.CreateSynonym; import org.h2.command.ddl.CreateTable; -import org.h2.command.ddl.CreateTableData; import org.h2.command.ddl.CreateTrigger; import org.h2.command.ddl.CreateUser; -import org.h2.command.ddl.CreateUserDataType; import org.h2.command.ddl.CreateView; import org.h2.command.ddl.DeallocateProcedure; import org.h2.command.ddl.DefineCommand; import org.h2.command.ddl.DropAggregate; import org.h2.command.ddl.DropConstant; import org.h2.command.ddl.DropDatabase; +import org.h2.command.ddl.DropDomain; import org.h2.command.ddl.DropFunctionAlias; import org.h2.command.ddl.DropIndex; import org.h2.command.ddl.DropRole; import org.h2.command.ddl.DropSchema; import org.h2.command.ddl.DropSequence; +import org.h2.command.ddl.DropSynonym; import org.h2.command.ddl.DropTable; import org.h2.command.ddl.DropTrigger; import org.h2.command.ddl.DropUser; -import org.h2.command.ddl.DropUserDataType; import org.h2.command.ddl.DropView; import org.h2.command.ddl.GrantRevoke; import org.h2.command.ddl.PrepareProcedure; +import org.h2.command.ddl.SequenceOptions; import org.h2.command.ddl.SetComment; import org.h2.command.ddl.TruncateTable; -import org.h2.command.dml.AlterSequence; import org.h2.command.dml.AlterTableSet; import org.h2.command.dml.BackupCommand; import org.h2.command.dml.Call; +import org.h2.command.dml.CommandWithValues; +import org.h2.command.dml.DataChangeStatement; import org.h2.command.dml.Delete; +import org.h2.command.dml.ExecuteImmediate; import org.h2.command.dml.ExecuteProcedure; import org.h2.command.dml.Explain; +import org.h2.command.dml.Help; import org.h2.command.dml.Insert; import org.h2.command.dml.Merge; +import org.h2.command.dml.MergeUsing; import org.h2.command.dml.NoOperation; -import org.h2.command.dml.Query; -import org.h2.command.dml.Replace; import org.h2.command.dml.RunScriptCommand; import org.h2.command.dml.ScriptCommand; -import org.h2.command.dml.Select; -import org.h2.command.dml.SelectOrderBy; -import org.h2.command.dml.SelectUnion; import org.h2.command.dml.Set; +import org.h2.command.dml.SetClauseList; +import org.h2.command.dml.SetSessionCharacteristics; import org.h2.command.dml.SetTypes; import org.h2.command.dml.TransactionCommand; import org.h2.command.dml.Update; -import org.h2.constraint.ConstraintReferential; +import org.h2.command.query.Query; +import org.h2.command.query.QueryOrderBy; +import org.h2.command.query.Select; +import org.h2.command.query.SelectUnion; +import org.h2.command.query.TableValueConstructor; +import org.h2.constraint.ConstraintActionType; +import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.FunctionAlias; +import org.h2.engine.DbSettings; +import org.h2.engine.IsolationLevel; +import org.h2.engine.Mode; +import org.h2.engine.Mode.ModeEnum; import org.h2.engine.Procedure; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; +import org.h2.engine.SessionLocal; import org.h2.engine.User; -import org.h2.engine.UserAggregate; -import org.h2.engine.UserDataType; -import org.h2.expression.Aggregate; import org.h2.expression.Alias; -import org.h2.expression.CompareLike; -import org.h2.expression.Comparison; -import org.h2.expression.ConditionAndOr; -import org.h2.expression.ConditionExists; -import org.h2.expression.ConditionIn; -import org.h2.expression.ConditionInSelect; -import org.h2.expression.ConditionNot; +import org.h2.expression.ArrayConstructorByQuery; +import org.h2.expression.ArrayElementReference; +import org.h2.expression.BinaryOperation; +import org.h2.expression.BinaryOperation.OpType; +import org.h2.expression.ConcatenationOperation; +import org.h2.expression.DomainValueExpression; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionList; -import org.h2.expression.Function; -import org.h2.expression.FunctionCall; -import org.h2.expression.JavaAggregate; -import org.h2.expression.JavaFunction; -import org.h2.expression.Operation; +import org.h2.expression.ExpressionWithFlags; +import org.h2.expression.ExpressionWithVariableParameters; +import org.h2.expression.FieldReference; +import org.h2.expression.Format; +import org.h2.expression.Format.FormatEnum; import org.h2.expression.Parameter; import org.h2.expression.Rownum; +import org.h2.expression.SearchedCase; import org.h2.expression.SequenceValue; +import org.h2.expression.SimpleCase; import org.h2.expression.Subquery; -import org.h2.expression.TableFunction; +import org.h2.expression.TimeZoneOperation; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.UnaryOperation; import org.h2.expression.ValueExpression; import org.h2.expression.Variable; import org.h2.expression.Wildcard; +import org.h2.expression.aggregate.AbstractAggregate; +import org.h2.expression.aggregate.Aggregate; +import org.h2.expression.aggregate.AggregateType; +import org.h2.expression.aggregate.JavaAggregate; +import org.h2.expression.aggregate.ListaggArguments; +import org.h2.expression.analysis.DataAnalysisOperation; +import org.h2.expression.analysis.Window; +import org.h2.expression.analysis.WindowFrame; +import org.h2.expression.analysis.WindowFrameBound; +import org.h2.expression.analysis.WindowFrameBoundType; +import org.h2.expression.analysis.WindowFrameExclusion; +import org.h2.expression.analysis.WindowFrameUnits; +import org.h2.expression.analysis.WindowFunction; +import org.h2.expression.analysis.WindowFunctionType; +import org.h2.expression.condition.BetweenPredicate; +import org.h2.expression.condition.BooleanTest; +import org.h2.expression.condition.CompareLike; +import org.h2.expression.condition.CompareLike.LikeType; +import org.h2.expression.condition.Comparison; +import org.h2.expression.condition.ConditionAndOr; +import org.h2.expression.condition.ConditionAndOrN; +import org.h2.expression.condition.ConditionIn; +import org.h2.expression.condition.ConditionInParameter; +import org.h2.expression.condition.ConditionInQuery; +import org.h2.expression.condition.ConditionLocalAndGlobal; +import org.h2.expression.condition.ConditionNot; +import org.h2.expression.condition.ExistsPredicate; +import org.h2.expression.condition.IsJsonPredicate; +import org.h2.expression.condition.NullPredicate; +import org.h2.expression.condition.TypePredicate; +import org.h2.expression.condition.UniquePredicate; +import org.h2.expression.function.ArrayFunction; +import org.h2.expression.function.BitFunction; +import org.h2.expression.function.BuiltinFunctions; +import org.h2.expression.function.CSVWriteFunction; +import org.h2.expression.function.CardinalityExpression; +import org.h2.expression.function.CastSpecification; +import org.h2.expression.function.CoalesceFunction; +import org.h2.expression.function.CompatibilitySequenceValueFunction; +import org.h2.expression.function.CompressFunction; +import org.h2.expression.function.ConcatFunction; +import org.h2.expression.function.CryptFunction; +import org.h2.expression.function.CurrentDateTimeValueFunction; +import org.h2.expression.function.CurrentGeneralValueSpecification; +import org.h2.expression.function.DBObjectFunction; +import org.h2.expression.function.DataTypeSQLFunction; +import org.h2.expression.function.DateTimeFormatFunction; +import org.h2.expression.function.DateTimeFunction; +import org.h2.expression.function.DayMonthNameFunction; +import org.h2.expression.function.FileFunction; +import org.h2.expression.function.HashFunction; +import org.h2.expression.function.JavaFunction; +import org.h2.expression.function.JsonConstructorFunction; +import org.h2.expression.function.LengthFunction; +import org.h2.expression.function.MathFunction; +import org.h2.expression.function.MathFunction1; +import org.h2.expression.function.MathFunction2; +import org.h2.expression.function.NullIfFunction; +import org.h2.expression.function.RandFunction; +import org.h2.expression.function.RegexpFunction; +import org.h2.expression.function.SessionControlFunction; +import org.h2.expression.function.SetFunction; +import org.h2.expression.function.SignalFunction; +import org.h2.expression.function.SoundexFunction; +import org.h2.expression.function.StringFunction; +import org.h2.expression.function.StringFunction1; +import org.h2.expression.function.StringFunction2; +import org.h2.expression.function.SubstringFunction; +import org.h2.expression.function.SysInfoFunction; +import org.h2.expression.function.TableInfoFunction; +import org.h2.expression.function.ToCharFunction; +import org.h2.expression.function.TrimFunction; +import org.h2.expression.function.TruncateValueFunction; +import org.h2.expression.function.XMLFunction; +import org.h2.expression.function.table.ArrayTableFunction; +import org.h2.expression.function.table.CSVReadFunction; +import org.h2.expression.function.table.JavaTableFunction; +import org.h2.expression.function.table.LinkSchemaFunction; +import org.h2.expression.function.table.TableFunction; import org.h2.index.Index; import org.h2.message.DbException; +import org.h2.mode.FunctionsPostgreSQL; +import org.h2.mode.ModeFunction; +import org.h2.mode.OnDuplicateKeyValues; +import org.h2.mode.Regclass; import org.h2.result.SortOrder; +import org.h2.schema.Domain; +import org.h2.schema.FunctionAlias; import org.h2.schema.Schema; import org.h2.schema.Sequence; +import org.h2.schema.UserAggregate; +import org.h2.schema.UserDefinedFunction; import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable; +import org.h2.table.DataChangeDeltaTable.ResultOption; +import org.h2.table.DualTable; import org.h2.table.FunctionTable; import org.h2.table.IndexColumn; +import org.h2.table.IndexHints; import org.h2.table.RangeTable; import org.h2.table.Table; import org.h2.table.TableFilter; import org.h2.table.TableView; -import org.h2.table.TableFilter.TableFilterVisitor; -import org.h2.util.MathUtils; -import org.h2.util.New; -import org.h2.util.StatementBuilder; +import org.h2.util.HasSQL; +import org.h2.util.IntervalUtils; +import org.h2.util.ParserUtil; import org.h2.util.StringUtils; +import org.h2.util.Utils; +import org.h2.util.geometry.EWKTUtils; +import org.h2.util.json.JSONItemType; +import org.h2.util.json.JsonConstructorUtils; import org.h2.value.CompareMode; import org.h2.value.DataType; +import org.h2.value.ExtTypeInfoEnum; +import org.h2.value.ExtTypeInfoGeometry; +import org.h2.value.ExtTypeInfoNumeric; +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueBytes; +import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; +import org.h2.value.ValueDouble; +import org.h2.value.ValueGeometry; +import org.h2.value.ValueInteger; +import org.h2.value.ValueInterval; +import org.h2.value.ValueJson; import org.h2.value.ValueNull; -import org.h2.value.ValueString; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueRow; import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueUuid; +import org.h2.value.ValueVarchar; /** * The parser is used to convert a SQL statement string to an command object. @@ -158,64 +413,118 @@ */ public class Parser { - // used during the tokenizer phase - private static final int CHAR_END = 1, CHAR_VALUE = 2, CHAR_QUOTED = 3; - private static final int CHAR_NAME = 4, CHAR_SPECIAL_1 = 5, - CHAR_SPECIAL_2 = 6; - private static final int CHAR_STRING = 7, CHAR_DOT = 8, - CHAR_DOLLAR_QUOTED_STRING = 9; - - // this are token types - private static final int KEYWORD = 1, IDENTIFIER = 2, PARAMETER = 3, - END = 4, VALUE = 5; - private static final int EQUAL = 6, BIGGER_EQUAL = 7, BIGGER = 8; - private static final int SMALLER = 9, SMALLER_EQUAL = 10, NOT_EQUAL = 11, - AT = 12; - private static final int MINUS = 13, PLUS = 14, STRING_CONCAT = 15; - private static final int OPEN = 16, CLOSE = 17, NULL = 18, TRUE = 19, - FALSE = 20; - private static final int CURRENT_TIMESTAMP = 21, CURRENT_DATE = 22, - CURRENT_TIME = 23, ROWNUM = 24; - private static final int SPATIAL_INTERSECTS = 25; + private static final String WITH_STATEMENT_SUPPORTS_LIMITED_SUB_STATEMENTS = + "WITH statement supports only SELECT, TABLE, VALUES, " + + "CREATE TABLE, INSERT, UPDATE, MERGE or DELETE statements"; private final Database database; - private final Session session; + private final SessionLocal session; + + /** + * @see org.h2.engine.DbSettings#databaseToLower + */ + private final boolean identifiersToLower; /** * @see org.h2.engine.DbSettings#databaseToUpper */ private final boolean identifiersToUpper; - /** indicates character-type for each char in sqlCommand */ - private int[] characterTypes; + /** + * @see org.h2.engine.SessionLocal#isVariableBinary() + */ + private final boolean variableBinary; + + private final BitSet nonKeywords; + + ArrayList tokens; + int tokenIndex; + Token token; private int currentTokenType; private String currentToken; - private boolean currentTokenQuoted; - private Value currentValue; - private String originalSQL; - /** copy of originalSQL, with comments blanked out */ private String sqlCommand; - /** cached array if chars from sqlCommand */ - private char[] sqlCommandChars; - /** index into sqlCommand of previous token */ - private int lastParseIndex; - /** index into sqlCommand of current token */ - private int parseIndex; private CreateView createView; private Prepared currentPrepared; private Select currentSelect; + private List cteCleanups; private ArrayList parameters; + private ArrayList suppliedParameters; private String schemaName; private ArrayList expectedList; private boolean rightsChecked; private boolean recompileAlways; - private ArrayList indexedParameterList; + private boolean literalsChecked; + private int orderInFrom; + private boolean parseDomainConstraint; + + /** + * Parses the specified collection of non-keywords. + * + * @param nonKeywords array of non-keywords in upper case + * @return bit set of non-keywords, or {@code null} + */ + public static BitSet parseNonKeywords(String[] nonKeywords) { + if (nonKeywords.length == 0) { + return null; + } + BitSet set = new BitSet(); + for (String nonKeyword : nonKeywords) { + int index = Arrays.binarySearch(TOKENS, FIRST_KEYWORD, LAST_KEYWORD + 1, nonKeyword); + if (index >= 0) { + set.set(index); + } + } + return set.isEmpty() ? null : set; + } + + /** + * Formats a comma-separated list of keywords. + * + * @param nonKeywords bit set of non-keywords, or {@code null} + * @return comma-separated list of non-keywords + */ + public static String formatNonKeywords(BitSet nonKeywords) { + if (nonKeywords == null || nonKeywords.isEmpty()) { + return ""; + } + StringBuilder builder = new StringBuilder(); + for (int i = -1; (i = nonKeywords.nextSetBit(i + 1)) >= 0;) { + if (i >= FIRST_KEYWORD && i <= LAST_KEYWORD) { + if (builder.length() > 0) { + builder.append(','); + } + builder.append(TOKENS[i]); + } + } + return builder.toString(); + } - public Parser(Session session) { + /** + * Creates a new instance of parser. + * + * @param session the session + */ + public Parser(SessionLocal session) { this.database = session.getDatabase(); - this.identifiersToUpper = database.getSettings().databaseToUpper; + DbSettings settings = database.getSettings(); + this.identifiersToLower = settings.databaseToLower; + this.identifiersToUpper = settings.databaseToUpper; + this.variableBinary = session.isVariableBinary(); + this.nonKeywords = session.getNonKeywords(); this.session = session; } + /** + * Creates a new instance of parser for special use cases. + */ + public Parser() { + database = null; + identifiersToLower = false; + identifiersToUpper = false; + variableBinary = false; + nonKeywords = null; + session = null; + } + /** * Parse the statement and prepare it for execution. * @@ -223,9 +532,9 @@ public Parser(Session session) { * @return the prepared object */ public Prepared prepare(String sql) { - Prepared p = parse(sql); + Prepared p = parse(sql, null); p.prepare(); - if (currentTokenType != END) { + if (currentTokenType != END_OF_INPUT) { throw getSyntaxError(); } return p; @@ -239,38 +548,96 @@ public Prepared prepare(String sql) { */ public Command prepareCommand(String sql) { try { - Prepared p = parse(sql); - boolean hasMore = isToken(";"); - if (!hasMore && currentTokenType != END) { + Prepared p = parse(sql, null); + if (currentTokenType != SEMICOLON && currentTokenType != END_OF_INPUT) { + addExpected(SEMICOLON); throw getSyntaxError(); } - p.prepare(); - Command c = new CommandContainer(this, sql, p); - if (hasMore) { - String remaining = originalSQL.substring(parseIndex); - if (remaining.trim().length() != 0) { - CommandList list = new CommandList(this, sql, c, remaining); - // list.addCommand(c); - // do { - // c = parseCommand(); - // list.addCommand(c); - // } while (currentToken.equals(";")); - c = list; - } + try { + p.prepare(); + } catch (Throwable t) { + CommandContainer.clearCTE(session, p); + throw t; + } + int sqlIndex = token.start(); + if (sqlIndex < sql.length()) { + sql = sql.substring(0, sqlIndex); + } + CommandContainer c = new CommandContainer(session, sql, p); + while (currentTokenType == SEMICOLON) { + read(); + } + if (currentTokenType != END_OF_INPUT) { + int offset = token.start(); + return prepareCommandList(c, p, sql, sqlCommand.substring(offset), getRemainingTokens(offset)); } return c; } catch (DbException e) { - throw e.addSQL(originalSQL); + throw e.addSQL(sqlCommand); + } + } + + private CommandList prepareCommandList(CommandContainer command, Prepared p, String sql, String remainingSql, + ArrayList remainingTokens) { + try { + ArrayList list = Utils.newSmallArrayList(); + for (;;) { + if (p instanceof DefineCommand) { + // Next commands may depend on results of this command. + return new CommandList(session, sql, command, list, parameters, remainingSql); + } + suppliedParameters = parameters; + try { + p = parse(remainingSql, remainingTokens); + } catch (DbException ex) { + // This command may depend on results of previous commands. + if (ex.getErrorCode() == ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS) { + throw ex; + } + return new CommandList(session, sql, command, list, parameters, remainingSql); + } + list.add(p); + if (currentTokenType != SEMICOLON && currentTokenType != END_OF_INPUT) { + addExpected(SEMICOLON); + throw getSyntaxError(); + } + while (currentTokenType == SEMICOLON) { + read(); + } + if (currentTokenType == END_OF_INPUT) { + break; + } + int offset = token.start(); + remainingSql = sqlCommand.substring(offset); + remainingTokens = getRemainingTokens(offset); + } + return new CommandList(session, sql, command, list, parameters, null); + } catch (Throwable t) { + command.clearCTE(); + throw t; } } + private ArrayList getRemainingTokens(int offset) { + List subList = tokens.subList(tokenIndex, tokens.size()); + ArrayList remainingTokens = new ArrayList<>(subList); + subList.clear(); + tokens.add(new Token.EndOfInputToken(offset)); + for (Token token : remainingTokens) { + token.subtractFromStart(offset); + } + return remainingTokens; + } + /** * Parse the statement, but don't prepare it for execution. * * @param sql the SQL statement to parse + * @param tokens tokens, or null * @return the prepared object */ - Prepared parse(String sql) { + Prepared parse(String sql, ArrayList tokens) { + initialize(sql, tokens, false); Prepared p; try { // first, try the fast variant @@ -278,6 +645,7 @@ Prepared parse(String sql) { } catch (DbException e) { if (e.getErrorCode() == ErrorCode.SYNTAX_ERROR_1) { // now, get the detailed exception + resetTokenIndex(); p = parse(sql, true); } else { throw e.addSQL(sql); @@ -289,44 +657,74 @@ Prepared parse(String sql) { } private Prepared parse(String sql, boolean withExpectedList) { - initialize(sql); if (withExpectedList) { - expectedList = New.arrayList(); + expectedList = new ArrayList<>(); } else { expectedList = null; } - parameters = New.arrayList(); + parameters = suppliedParameters != null ? suppliedParameters : Utils.newSmallArrayList(); currentSelect = null; currentPrepared = null; createView = null; + cteCleanups = null; recompileAlways = false; - indexedParameterList = null; read(); - return parsePrepared(); + Prepared p; + try { + p = parsePrepared(); + p.setCteCleanups(cteCleanups); + } catch (Throwable t) { + if (cteCleanups != null) { + CommandContainer.clearCTE(session, cteCleanups); + } + throw t; + } + return p; } private Prepared parsePrepared() { - int start = lastParseIndex; + int start = tokenIndex; Prepared c = null; - String token = currentToken; - if (token.length() == 0) { + switch (currentTokenType) { + case END_OF_INPUT: + case SEMICOLON: c = new NoOperation(session); - } else { - char first = token.charAt(0); - switch (first) { - case '?': - // read the ? as a parameter - readTerm(); - // this is an 'out' parameter - set a dummy value - parameters.get(0).setValue(ValueNull.INSTANCE); - read("="); - read("CALL"); - c = parseCall(); - break; - case '(': - c = parseSelect(); + setSQL(c, start); + return c; + case PARAMETER: + // read the ? as a parameter + // this is an 'out' parameter - set a dummy value + readParameter().setValue(ValueNull.INSTANCE); + read(EQUAL); + start = tokenIndex; + read("CALL"); + c = parseCall(); + break; + case OPEN_PAREN: + case SELECT: + case TABLE: + case VALUES: + c = parseQuery(); + break; + case WITH: + read(); + c = parseWithStatementOrQuery(start); + break; + case SET: + read(); + c = parseSet(); + break; + case IDENTIFIER: + if (token.isQuoted()) { break; - case 'a': + } + /* + * Convert a-z to A-Z. This method is safe, because only A-Z + * characters are considered below. + * + * Unquoted identifier is never empty. + */ + switch (currentToken.charAt(0) & 0xffdf) { case 'A': if (readIf("ALTER")) { c = parseAlter(); @@ -334,7 +732,6 @@ private Prepared parsePrepared() { c = parseAnalyze(); } break; - case 'b': case 'B': if (readIf("BACKUP")) { c = parseBackup(); @@ -342,7 +739,6 @@ private Prepared parsePrepared() { c = parseBegin(); } break; - case 'c': case 'C': if (readIf("COMMIT")) { c = parseCommit(); @@ -356,64 +752,61 @@ private Prepared parsePrepared() { c = parseComment(); } break; - case 'd': case 'D': if (readIf("DELETE")) { - c = parseDelete(); + c = parseDelete(start); } else if (readIf("DROP")) { c = parseDrop(); } else if (readIf("DECLARE")) { // support for DECLARE GLOBAL TEMPORARY TABLE... c = parseCreate(); - } else if (readIf("DEALLOCATE")) { + } else if (database.getMode().getEnum() != ModeEnum.MSSQLServer && readIf("DEALLOCATE")) { + /* + * PostgreSQL-style DEALLOCATE is disabled in MSSQLServer + * mode because PostgreSQL-style EXECUTE is redefined in + * this mode. + */ c = parseDeallocate(); } break; - case 'e': case 'E': if (readIf("EXPLAIN")) { c = parseExplain(); - } else if (readIf("EXECUTE")) { - c = parseExecute(); - } - break; - case 'f': - case 'F': - if (isToken("FROM")) { - c = parseSelect(); + } else if (database.getMode().getEnum() != ModeEnum.MSSQLServer) { + if (readIf("EXECUTE")) { + c = parseExecutePostgre(); + } + } else { + if (readIf("EXEC") || readIf("EXECUTE")) { + c = parseExecuteSQLServer(); + } } break; - case 'g': case 'G': if (readIf("GRANT")) { c = parseGrantRevoke(CommandInterface.GRANT); } break; - case 'h': case 'H': if (readIf("HELP")) { c = parseHelp(); } break; - case 'i': case 'I': if (readIf("INSERT")) { - c = parseInsert(); + c = parseInsert(start); } break; - case 'm': case 'M': if (readIf("MERGE")) { - c = parseMerge(); + c = parseMerge(start); } break; - case 'p': case 'P': if (readIf("PREPARE")) { c = parsePrepare(); } break; - case 'r': case 'R': if (readIf("ROLLBACK")) { c = parseRollback(); @@ -423,17 +816,12 @@ private Prepared parsePrepared() { c = parseRunScript(); } else if (readIf("RELEASE")) { c = parseReleaseSavepoint(); - } else if (readIf("REPLACE")) { - c = parseReplace(); + } else if (database.getMode().replaceInto && readIf("REPLACE")) { + c = parseReplace(start); } break; - case 's': case 'S': - if (isToken("SELECT")) { - c = parseSelect(); - } else if (readIf("SET")) { - c = parseSet(); - } else if (readIf("SAVEPOINT")) { + if (readIf("SAVEPOINT")) { c = parseSavepoint(); } else if (readIf("SCRIPT")) { c = parseScript(); @@ -443,100 +831,80 @@ private Prepared parsePrepared() { c = parseShow(); } break; - case 't': case 'T': if (readIf("TRUNCATE")) { c = parseTruncate(); } break; - case 'u': case 'U': if (readIf("UPDATE")) { - c = parseUpdate(); + c = parseUpdate(start); } else if (readIf("USE")) { c = parseUse(); } break; - case 'v': - case 'V': - if (readIf("VALUES")) { - c = parseValues(); - } - break; - case 'w': - case 'W': - if (readIf("WITH")) { - c = parseWith(); - } - break; - case ';': - c = new NoOperation(session); - break; - default: - throw getSyntaxError(); } - if (indexedParameterList != null) { - for (int i = 0, size = indexedParameterList.size(); - i < size; i++) { - if (indexedParameterList.get(i) == null) { - indexedParameterList.set(i, new Parameter(i)); - } + } + if (c == null) { + throw getSyntaxError(); + } + if (parameters != null) { + for (int i = 0, size = parameters.size(); i < size; i++) { + if (parameters.get(i) == null) { + parameters.set(i, new Parameter(i)); } - parameters = indexedParameterList; } - if (readIf("{")) { - do { - int index = (int) readLong() - 1; - if (index < 0 || index >= parameters.size()) { - throw getSyntaxError(); - } - Parameter p = parameters.get(index); - if (p == null) { - throw getSyntaxError(); - } - read(":"); - Expression expr = readExpression(); - expr = expr.optimize(session); - p.setValue(expr.getValue(session)); - } while (readIf(",")); - read("}"); - for (Parameter p : parameters) { - p.checkSet(); + } + boolean withParamValues = readIf(OPEN_BRACE); + if (withParamValues) { + do { + int index = (int) readLong() - 1; + if (index < 0 || index >= parameters.size()) { + throw getSyntaxError(); + } + Parameter p = parameters.get(index); + if (p == null) { + throw getSyntaxError(); } - parameters.clear(); + read(COLON); + Expression expr = readExpression(); + expr = expr.optimize(session); + p.setValue(expr.getValue(session)); + } while (readIf(COMMA)); + read(CLOSE_BRACE); + for (Parameter p : parameters) { + p.checkSet(); } + parameters.clear(); } - if (c == null) { - throw getSyntaxError(); + if (withParamValues || c.getSQL() == null) { + setSQL(c, start); } - setSQL(c, null, start); return c; } private DbException getSyntaxError() { - if (expectedList == null || expectedList.size() == 0) { - return DbException.getSyntaxError(sqlCommand, parseIndex); + if (expectedList == null || expectedList.isEmpty()) { + return DbException.getSyntaxError(sqlCommand, token.start()); } - StatementBuilder buff = new StatementBuilder(); - for (String e : expectedList) { - buff.appendExceptFirst(", "); - buff.append(e); - } - return DbException.getSyntaxError(sqlCommand, parseIndex, - buff.toString()); + return DbException.getSyntaxError(sqlCommand, token.start(), String.join(", ", expectedList)); } private Prepared parseBackup() { BackupCommand command = new BackupCommand(session); - read("TO"); + read(TO); command.setFileName(readExpression()); return command; } private Prepared parseAnalyze() { Analyze command = new Analyze(session); + if (readIf(TABLE)) { + Table table = readTableOrView(); + command.setTable(table); + } if (readIf("SAMPLE_SIZE")) { - command.setTop(readPositiveInt()); + command.setTop(readNonNegativeInt()); } return command; } @@ -553,13 +921,11 @@ private TransactionCommand parseBegin() { private TransactionCommand parseCommit() { TransactionCommand command; if (readIf("TRANSACTION")) { - command = new TransactionCommand(session, - CommandInterface.COMMIT_TRANSACTION); - command.setTransactionName(readUniqueIdentifier()); + command = new TransactionCommand(session, CommandInterface.COMMIT_TRANSACTION); + command.setTransactionName(readIdentifier()); return command; } - command = new TransactionCommand(session, - CommandInterface.COMMIT); + command = new TransactionCommand(session, CommandInterface.COMMIT); readIf("WORK"); return command; } @@ -581,44 +947,51 @@ private TransactionCommand parseShutdown() { private TransactionCommand parseRollback() { TransactionCommand command; if (readIf("TRANSACTION")) { - command = new TransactionCommand(session, - CommandInterface.ROLLBACK_TRANSACTION); - command.setTransactionName(readUniqueIdentifier()); + command = new TransactionCommand(session, CommandInterface.ROLLBACK_TRANSACTION); + command.setTransactionName(readIdentifier()); return command; } - if (readIf("TO")) { + readIf("WORK"); + if (readIf(TO)) { read("SAVEPOINT"); - command = new TransactionCommand(session, - CommandInterface.ROLLBACK_TO_SAVEPOINT); - command.setSavepointName(readUniqueIdentifier()); + command = new TransactionCommand(session, CommandInterface.ROLLBACK_TO_SAVEPOINT); + command.setSavepointName(readIdentifier()); } else { - readIf("WORK"); - command = new TransactionCommand(session, - CommandInterface.ROLLBACK); + command = new TransactionCommand(session, CommandInterface.ROLLBACK); } return command; } private Prepared parsePrepare() { if (readIf("COMMIT")) { - TransactionCommand command = new TransactionCommand(session, - CommandInterface.PREPARE_COMMIT); - command.setTransactionName(readUniqueIdentifier()); + TransactionCommand command = new TransactionCommand(session, CommandInterface.PREPARE_COMMIT); + command.setTransactionName(readIdentifier()); return command; } - String procedureName = readAliasIdentifier(); - if (readIf("(")) { - ArrayList list = New.arrayList(); + return parsePrepareProcedure(); + } + + private Prepared parsePrepareProcedure() { + if (database.getMode().getEnum() == ModeEnum.MSSQLServer) { + throw getSyntaxError(); + /* + * PostgreSQL-style PREPARE is disabled in MSSQLServer mode + * because PostgreSQL-style EXECUTE is redefined in this + * mode. + */ + } + String procedureName = readIdentifier(); + if (readIf(OPEN_PAREN)) { + ArrayList list = Utils.newSmallArrayList(); for (int i = 0;; i++) { Column column = parseColumnForTable("C" + i, true); list.add(column); - if (readIf(")")) { + if (!readIfMore()) { break; } - read(","); } } - read("AS"); + read(AS); Prepared prep = parsePrepared(); PrepareProcedure command = new PrepareProcedure(session); command.setProcedureName(procedureName); @@ -627,20 +1000,19 @@ private Prepared parsePrepare() { } private TransactionCommand parseSavepoint() { - TransactionCommand command = new TransactionCommand(session, - CommandInterface.SAVEPOINT); - command.setSavepointName(readUniqueIdentifier()); + TransactionCommand command = new TransactionCommand(session, CommandInterface.SAVEPOINT); + command.setSavepointName(readIdentifier()); return command; } private Prepared parseReleaseSavepoint() { Prepared command = new NoOperation(session); readIf("SAVEPOINT"); - readUniqueIdentifier(); + readIdentifier(); return command; } - private Schema getSchema(String schemaName) { + private Schema findSchema(String schemaName) { if (schemaName == null) { return null; } @@ -649,303 +1021,351 @@ private Schema getSchema(String schemaName) { if (equalsToken("SESSION", schemaName)) { // for local temporary tables schema = database.getSchema(session.getCurrentSchemaName()); - } else if (database.getMode().sysDummy1 && - "SYSIBM".equals(schemaName)) { - // IBM DB2 and Apache Derby compatibility: SYSIBM.SYSDUMMY1 - } else { - throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName); } } return schema; } + private Schema getSchema(String schemaName) { + if (schemaName == null) { + return null; + } + Schema schema = findSchema(schemaName); + if (schema == null) { + throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName); + } + return schema; + } + private Schema getSchema() { return getSchema(schemaName); } + /* + * Gets the current schema for scenarios that need a guaranteed, non-null schema object. + * + * This routine is solely here + * because of the function readIdentifierWithSchema(String defaultSchemaName) - which + * is often called with a null parameter (defaultSchemaName) - then 6 lines into the function + * that routine nullifies the state field schemaName - which I believe is a bug. + * + * There are about 7 places where "readIdentifierWithSchema(null)" is called in this file. + * + * In other words when is it legal to not have an active schema defined by schemaName ? + * I don't think it's ever a valid case. I don't understand when that would be allowed. + * I spent a long time trying to figure this out. + * As another proof of this point, the command "SET SCHEMA=NULL" is not a valid command. + * + * I did try to fix this in readIdentifierWithSchema(String defaultSchemaName) + * - but every fix I tried cascaded so many unit test errors - so + * I gave up. I think this needs a bigger effort to fix his, as part of bigger, dedicated story. + * + */ + private Schema getSchemaWithDefault() { + if (schemaName == null) { + schemaName = session.getCurrentSchemaName(); + } + return getSchema(schemaName); + } private Column readTableColumn(TableFilter filter) { - String tableAlias = null; - String columnName = readColumnIdentifier(); - if (readIf(".")) { + String columnName = readIdentifier(); + if (readIf(DOT)) { + columnName = readTableColumn(filter, columnName); + } + return filter.getTable().getColumn(columnName); + } + + private String readTableColumn(TableFilter filter, String tableAlias) { + String columnName = readIdentifier(); + if (readIf(DOT)) { + String schema = tableAlias; tableAlias = columnName; - columnName = readColumnIdentifier(); - if (readIf(".")) { - String schema = tableAlias; + columnName = readIdentifier(); + if (readIf(DOT)) { + checkDatabaseName(schema); + schema = tableAlias; tableAlias = columnName; - columnName = readColumnIdentifier(); - if (readIf(".")) { - String catalogName = schema; - schema = tableAlias; - tableAlias = columnName; - columnName = readColumnIdentifier(); - if (!equalsToken(catalogName, database.getShortName())) { - throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, - catalogName); - } - } - if (!equalsToken(schema, filter.getTable().getSchema() - .getName())) { - throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schema); - } + columnName = readIdentifier(); } - if (!equalsToken(tableAlias, filter.getTableAlias())) { - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, - tableAlias); + if (!equalsToken(schema, filter.getTable().getSchema().getName())) { + throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schema); } } - if (database.getSettings().rowId) { - if (Column.ROWID.equals(columnName)) { - return filter.getRowIdColumn(); - } + if (!equalsToken(tableAlias, filter.getTableAlias())) { + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableAlias); } - return filter.getTable().getColumn(columnName); + return columnName; } - private Update parseUpdate() { + private Update parseUpdate(int start) { Update command = new Update(session); currentPrepared = command; - int start = lastParseIndex; + Expression fetch = null; + if (database.getMode().topInDML && readIf("TOP")) { + read(OPEN_PAREN); + fetch = readTerm().optimize(session); + read(CLOSE_PAREN); + } TableFilter filter = readSimpleTableFilter(); command.setTableFilter(filter); - read("SET"); - if (readIf("(")) { - ArrayList columns = New.arrayList(); - do { - Column column = readTableColumn(filter); - columns.add(column); - } while (readIf(",")); - read(")"); - read("="); - Expression expression = readExpression(); - if (columns.size() == 1) { - // the expression is parsed as a simple value - command.setAssignment(columns.get(0), expression); - } else { - for (int i = 0, size = columns.size(); i < size; i++) { - Column column = columns.get(i); - Function f = Function.getFunction(database, "ARRAY_GET"); - f.setParameter(0, expression); - f.setParameter(1, ValueExpression.get(ValueInt.get(i + 1))); - f.doneWithParameters(); - command.setAssignment(column, f); - } - } - } else { - do { - Column column = readTableColumn(filter); - read("="); - Expression expression; - if (readIf("DEFAULT")) { - expression = ValueExpression.getDefault(); - } else { - expression = readExpression(); - } - command.setAssignment(column, expression); - } while (readIf(",")); + command.setSetClauseList(readUpdateSetClause(filter)); + if (database.getMode().allowUsingFromClauseInUpdateStatement && readIf(FROM)) { + TableFilter fromTable = readTablePrimary(); + command.setFromTableFilter(fromTable); } - if (readIf("WHERE")) { - Expression condition = readExpression(); - command.setCondition(condition); + if (readIf(WHERE)) { + command.setCondition(readExpression()); } - if (readIf("ORDER")) { + if (fetch == null) { // for MySQL compatibility // (this syntax is supported, but ignored) - read("BY"); - parseSimpleOrderList(); - } - if (readIf("LIMIT")) { - Expression limit = readTerm().optimize(session); - command.setLimit(limit); + readIfOrderBy(); + fetch = readFetchOrLimit(); } - setSQL(command, "UPDATE", start); + command.setFetch(fetch); + setSQL(command, start); return command; } - private TableFilter readSimpleTableFilter() { - Table table = readTableOrView(); - String alias = null; - if (readIf("AS")) { - alias = readAliasIdentifier(); - } else if (currentTokenType == IDENTIFIER) { - if (!equalsToken("SET", currentToken)) { - // SET is not a keyword (PostgreSQL supports it as a table name) - alias = readAliasIdentifier(); + private SetClauseList readUpdateSetClause(TableFilter filter) { + read(SET); + SetClauseList list = new SetClauseList(filter.getTable()); + do { + if (readIf(OPEN_PAREN)) { + ArrayList columns = Utils.newSmallArrayList(); + do { + columns.add(readTableColumn(filter)); + } while (readIfMore()); + read(EQUAL); + list.addMultiple(columns, readExpression()); + } else { + Column column = readTableColumn(filter); + read(EQUAL); + list.addSingle(column, readExpressionOrDefault()); } - } - return new TableFilter(session, table, alias, rightsChecked, - currentSelect); + } while (readIf(COMMA)); + return list; + } + + private TableFilter readSimpleTableFilter() { + return new TableFilter(session, readTableOrView(), readFromAlias(null), rightsChecked, currentSelect, 0, null); } - private Delete parseDelete() { + private Delete parseDelete(int start) { Delete command = new Delete(session); - Expression limit = null; - if (readIf("TOP")) { - limit = readTerm().optimize(session); + Expression fetch = null; + if (database.getMode().topInDML && readIf("TOP")) { + fetch = readTerm().optimize(session); } currentPrepared = command; - int start = lastParseIndex; - readIf("FROM"); - TableFilter filter = readSimpleTableFilter(); - command.setTableFilter(filter); - if (readIf("WHERE")) { - Expression condition = readExpression(); - command.setCondition(condition); + if (!readIf(FROM) && database.getMode().getEnum() == ModeEnum.MySQL) { + readIdentifierWithSchema(); + read(FROM); } - if (readIf("LIMIT") && limit == null) { - limit = readTerm().optimize(session); + command.setTableFilter(readSimpleTableFilter()); + if (readIf(WHERE)) { + command.setCondition(readExpression()); } - command.setLimit(limit); - setSQL(command, "DELETE", start); + if (fetch == null) { + fetch = readFetchOrLimit(); + } + command.setFetch(fetch); + setSQL(command, start); return command; } - private IndexColumn[] parseIndexColumnList() { - ArrayList columns = New.arrayList(); - do { - IndexColumn column = new IndexColumn(); - column.columnName = readColumnIdentifier(); - columns.add(column); - if (readIf("ASC")) { - // ignore - } else if (readIf("DESC")) { - column.sortType = SortOrder.DESCENDING; + private Expression readFetchOrLimit() { + Expression fetch = null; + if (readIf(FETCH)) { + if (!readIf("FIRST")) { + read("NEXT"); } - if (readIf("NULLS")) { - if (readIf("FIRST")) { - column.sortType |= SortOrder.NULLS_FIRST; - } else { - read("LAST"); - column.sortType |= SortOrder.NULLS_LAST; + if (readIf(ROW) || readIf("ROWS")) { + fetch = ValueExpression.get(ValueInteger.get(1)); + } else { + fetch = readExpression().optimize(session); + if (!readIf(ROW)) { + read("ROWS"); } } - } while (readIf(",")); - read(")"); - return columns.toArray(new IndexColumn[columns.size()]); + read("ONLY"); + } else if (database.getMode().limit && readIf(LIMIT)) { + fetch = readTerm().optimize(session); + } + return fetch; + } + + private IndexColumn[] parseIndexColumnList() { + ArrayList columns = Utils.newSmallArrayList(); + do { + columns.add(new IndexColumn(readIdentifier(), parseSortType())); + } while (readIfMore()); + return columns.toArray(new IndexColumn[0]); + } + + private int parseSortType() { + int sortType = !readIf("ASC") && readIf("DESC") ? SortOrder.DESCENDING : SortOrder.ASCENDING; + if (readIf("NULLS")) { + if (readIf("FIRST")) { + sortType |= SortOrder.NULLS_FIRST; + } else { + read("LAST"); + sortType |= SortOrder.NULLS_LAST; + } + } + return sortType; } private String[] parseColumnList() { - ArrayList columns = New.arrayList(); + ArrayList columns = Utils.newSmallArrayList(); do { - String columnName = readColumnIdentifier(); - columns.add(columnName); + columns.add(readIdentifier()); } while (readIfMore()); - return columns.toArray(new String[columns.size()]); + return columns.toArray(new String[0]); } private Column[] parseColumnList(Table table) { - ArrayList columns = New.arrayList(); - HashSet set = New.hashSet(); - if (!readIf(")")) { + ArrayList columns = Utils.newSmallArrayList(); + HashSet set = new HashSet<>(); + if (!readIf(CLOSE_PAREN)) { do { Column column = parseColumn(table); if (!set.add(column)) { - throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, - column.getSQL()); + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column.getTraceSQL()); } columns.add(column); } while (readIfMore()); } - return columns.toArray(new Column[columns.size()]); + return columns.toArray(new Column[0]); } private Column parseColumn(Table table) { - String id = readColumnIdentifier(); - if (database.getSettings().rowId && Column.ROWID.equals(id)) { + if (currentTokenType == _ROWID_) { + read(); return table.getRowIdColumn(); } - return table.getColumn(id); + return table.getColumn(readIdentifier()); } + /** + * Read comma or closing brace. + * + * @return {@code true} if comma is read, {@code false} if brace is read + */ private boolean readIfMore() { - if (readIf(",")) { - return !readIf(")"); + if (readIf(COMMA)) { + return true; } - read(")"); + read(CLOSE_PAREN); return false; } private Prepared parseHelp() { - StringBuilder buff = new StringBuilder( - "SELECT * FROM INFORMATION_SCHEMA.HELP"); - int i = 0; - ArrayList paramValues = New.arrayList(); - while (currentTokenType != END) { - String s = currentToken; + HashSet conditions = new HashSet<>(); + while (currentTokenType != END_OF_INPUT) { + conditions.add(StringUtils.toUpperEnglish(currentToken)); read(); - if (i == 0) { - buff.append(" WHERE "); - } else { - buff.append(" AND "); - } - i++; - buff.append("UPPER(TOPIC) LIKE ?"); - paramValues.add(ValueString.get("%" + s + "%")); } - return prepare(session, buff.toString(), paramValues); + return new Help(session, conditions.toArray(new String[0])); } private Prepared parseShow() { - ArrayList paramValues = New.arrayList(); + ArrayList paramValues = Utils.newSmallArrayList(); StringBuilder buff = new StringBuilder("SELECT "); if (readIf("CLIENT_ENCODING")) { // for PostgreSQL compatibility - buff.append("'UNICODE' AS CLIENT_ENCODING FROM DUAL"); + buff.append("'UNICODE' CLIENT_ENCODING"); } else if (readIf("DEFAULT_TRANSACTION_ISOLATION")) { // for PostgreSQL compatibility - buff.append("'read committed' AS DEFAULT_TRANSACTION_ISOLATION " + - "FROM DUAL"); + buff.append("'read committed' DEFAULT_TRANSACTION_ISOLATION"); } else if (readIf("TRANSACTION")) { // for PostgreSQL compatibility read("ISOLATION"); read("LEVEL"); - buff.append("'read committed' AS TRANSACTION_ISOLATION " + - "FROM DUAL"); + buff.append("LOWER(ISOLATION_LEVEL) TRANSACTION_ISOLATION FROM INFORMATION_SCHEMA.SESSIONS " + + "WHERE SESSION_ID = SESSION_ID()"); } else if (readIf("DATESTYLE")) { // for PostgreSQL compatibility - buff.append("'ISO' AS DATESTYLE FROM DUAL"); + buff.append("'ISO' DATESTYLE"); + } else if (readIf("SEARCH_PATH")) { + // for PostgreSQL compatibility + String[] searchPath = session.getSchemaSearchPath(); + StringBuilder searchPathBuff = new StringBuilder(); + if (searchPath != null) { + for (int i = 0; i < searchPath.length; i ++) { + if (i > 0) { + searchPathBuff.append(", "); + } + ParserUtil.quoteIdentifier(searchPathBuff, searchPath[i], HasSQL.QUOTE_ONLY_WHEN_REQUIRED); + } + } + StringUtils.quoteStringSQL(buff, searchPathBuff.toString()); + buff.append(" SEARCH_PATH"); } else if (readIf("SERVER_VERSION")) { // for PostgreSQL compatibility - buff.append("'8.1.4' AS SERVER_VERSION FROM DUAL"); + buff.append("'" + Constants.PG_VERSION + "' SERVER_VERSION"); } else if (readIf("SERVER_ENCODING")) { // for PostgreSQL compatibility - buff.append("'UTF8' AS SERVER_ENCODING FROM DUAL"); + buff.append("'UTF8' SERVER_ENCODING"); + } else if (readIf("SSL")) { + // for PostgreSQL compatibility + buff.append("'off' SSL"); } else if (readIf("TABLES")) { // for MySQL compatibility - String schema = Constants.SCHEMA_MAIN; - if (readIf("FROM")) { - schema = readUniqueIdentifier(); + String schema = database.getMainSchema().getName(); + if (readIf(FROM)) { + schema = readIdentifier(); } buff.append("TABLE_NAME, TABLE_SCHEMA FROM " + "INFORMATION_SCHEMA.TABLES " + "WHERE TABLE_SCHEMA=? ORDER BY TABLE_NAME"); - paramValues.add(ValueString.get(schema)); + paramValues.add(ValueVarchar.get(schema)); } else if (readIf("COLUMNS")) { // for MySQL compatibility - read("FROM"); + read(FROM); String tableName = readIdentifierWithSchema(); String schemaName = getSchema().getName(); - paramValues.add(ValueString.get(tableName)); - if (readIf("FROM")) { - schemaName = readUniqueIdentifier(); - } - buff.append("C.COLUMN_NAME FIELD, " - + "C.TYPE_NAME || '(' || C.NUMERIC_PRECISION || ')' TYPE, " + paramValues.add(ValueVarchar.get(tableName)); + if (readIf(FROM)) { + schemaName = readIdentifier(); + } + buff.append("C.COLUMN_NAME FIELD, "); + boolean oldInformationSchema = session.isOldInformationSchema(); + buff.append(oldInformationSchema + ? "C.COLUMN_TYPE" + : "DATA_TYPE_SQL(?2, ?1, 'TABLE', C.DTD_IDENTIFIER)"); + buff.append(" TYPE, " + "C.IS_NULLABLE \"NULL\", " + "CASE (SELECT MAX(I.INDEX_TYPE_NAME) FROM " - + "INFORMATION_SCHEMA.INDEXES I " - + "WHERE I.TABLE_SCHEMA=C.TABLE_SCHEMA " - + "AND I.TABLE_NAME=C.TABLE_NAME " - + "AND I.COLUMN_NAME=C.COLUMN_NAME)" + + "INFORMATION_SCHEMA.INDEXES I "); + if (!oldInformationSchema) { + buff.append("JOIN INFORMATION_SCHEMA.INDEX_COLUMNS IC "); + } + buff.append("WHERE I.TABLE_SCHEMA=C.TABLE_SCHEMA " + + "AND I.TABLE_NAME=C.TABLE_NAME "); + if (oldInformationSchema) { + buff.append("AND I.COLUMN_NAME=C.COLUMN_NAME"); + } else { + buff.append("AND IC.TABLE_SCHEMA=C.TABLE_SCHEMA " + + "AND IC.TABLE_NAME=C.TABLE_NAME " + + "AND IC.INDEX_SCHEMA=I.INDEX_SCHEMA " + + "AND IC.INDEX_NAME=I.INDEX_NAME " + + "AND IC.COLUMN_NAME=C.COLUMN_NAME"); + } + buff.append(')' + "WHEN 'PRIMARY KEY' THEN 'PRI' " - + "WHEN 'UNIQUE INDEX' THEN 'UNI' ELSE '' END KEY, " - + "IFNULL(COLUMN_DEFAULT, 'NULL') DEFAULT " + + "WHEN 'UNIQUE INDEX' THEN 'UNI' ELSE '' END `KEY`, " + + "COALESCE(COLUMN_DEFAULT, 'NULL') `DEFAULT` " + "FROM INFORMATION_SCHEMA.COLUMNS C " - + "WHERE C.TABLE_NAME=? AND C.TABLE_SCHEMA=? " + + "WHERE C.TABLE_NAME=?1 AND C.TABLE_SCHEMA=?2 " + "ORDER BY C.ORDINAL_POSITION"); - paramValues.add(ValueString.get(schemaName)); + paramValues.add(ValueVarchar.get(schemaName)); } else if (readIf("DATABASES") || readIf("SCHEMAS")) { // for MySQL compatibility buff.append("SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA"); + } else if (database.getMode().getEnum() == ModeEnum.PostgreSQL && readIf("ALL")) { + // for PostgreSQL compatibility + buff.append("NAME, SETTING FROM PG_CATALOG.PG_SETTINGS"); } boolean b = session.getAllowLiterals(); try { @@ -958,7 +1378,7 @@ private Prepared parseShow() { } } - private static Prepared prepare(Session s, String sql, + private static Prepared prepare(SessionLocal s, String sql, ArrayList paramValues) { Prepared prep = s.prepare(sql); ArrayList params = prep.getParameters(); @@ -971,299 +1391,726 @@ private static Prepared prepare(Session s, String sql, return prep; } - private boolean isSelect() { - int start = lastParseIndex; - while (readIf("(")) { - // need to read ahead, it could be a nested union: - // ((select 1) union (select 1)) + private boolean isDerivedTable() { + int offset = tokenIndex; + int level = 0; + while (tokens.get(offset).tokenType() == OPEN_PAREN) { + level++; + offset++; + } + boolean query = isDirectQuery(offset); + s: if (query && level > 0) { + offset = scanToCloseParen(offset + 1); + if (offset < 0) { + query = false; + break s; + } + for (;;) { + switch (tokens.get(offset).tokenType()) { + case SEMICOLON: + case END_OF_INPUT: + query = false; + break s; + case OPEN_PAREN: + offset = scanToCloseParen(offset + 1); + if (offset < 0) { + query = false; + break s; + } + break; + case CLOSE_PAREN: + if (--level == 0) { + break s; + } + offset++; + break; + case JOIN: + query = false; + break s; + default: + offset++; + } + } } - boolean select = isToken("SELECT") || isToken("FROM"); - parseIndex = start; - read(); - return select; + return query; } - private Merge parseMerge() { - Merge command = new Merge(session); - currentPrepared = command; + private boolean isQuery() { + int offset = tokenIndex; + int level = 0; + while (tokens.get(offset).tokenType() == OPEN_PAREN) { + level++; + offset++; + } + boolean query = isDirectQuery(offset); + s: if (query && level > 0) { + offset++; + do { + offset = scanToCloseParen(offset); + if (offset < 0) { + query = false; + break s; + } + switch (tokens.get(offset).tokenType()) { + default: + query = false; + break s; + case END_OF_INPUT: + case SEMICOLON: + case CLOSE_PAREN: + case ORDER: + case OFFSET: + case FETCH: + case LIMIT: + case UNION: + case EXCEPT: + case MINUS: + case INTERSECT: + } + } while (--level > 0); + } + return query; + } + + private int scanToCloseParen(int offset) { + for (int level = 0;;) { + switch (tokens.get(offset).tokenType()) { + case SEMICOLON: + case END_OF_INPUT: + return -1; + case OPEN_PAREN: + level++; + break; + case CLOSE_PAREN: + if (--level < 0) { + return offset + 1; + } + } + offset++; + } + } + + private boolean isQueryQuick() { + int offset = tokenIndex; + while (tokens.get(offset).tokenType() == OPEN_PAREN) { + offset++; + } + return isDirectQuery(offset); + } + + private boolean isDirectQuery(int offset) { + boolean query; + switch (tokens.get(offset).tokenType()) { + case SELECT: + case VALUES: + case WITH: + query = true; + break; + case TABLE: + query = tokens.get(offset + 1).tokenType() != OPEN_PAREN; + break; + default: + query = false; + } + return query; + } + + private Prepared parseMerge(int start) { read("INTO"); - Table table = readTableOrView(); - command.setTable(table); - if (readIf("(")) { - if (isSelect()) { - command.setQuery(parseSelect()); - read(")"); + TableFilter targetTableFilter = readSimpleTableFilter(); + if (readIf(USING)) { + return parseMergeUsing(targetTableFilter, start); + } + return parseMergeInto(targetTableFilter, start); + } + + private Prepared parseMergeInto(TableFilter targetTableFilter, int start) { + Merge command = new Merge(session, false); + currentPrepared = command; + command.setTable(targetTableFilter.getTable()); + Table table = command.getTable(); + if (readIf(OPEN_PAREN)) { + if (isQueryQuick()) { + command.setQuery(parseQuery()); + read(CLOSE_PAREN); return command; } - Column[] columns = parseColumnList(table); - command.setColumns(columns); + command.setColumns(parseColumnList(table)); } - if (readIf("KEY")) { - read("("); - Column[] keys = parseColumnList(table); - command.setKeys(keys); + if (readIf(KEY)) { + read(OPEN_PAREN); + command.setKeys(parseColumnList(table)); } - if (readIf("VALUES")) { - do { - ArrayList values = New.arrayList(); - read("("); - if (!readIf(")")) { - do { - if (readIf("DEFAULT")) { - values.add(null); - } else { - values.add(readExpression()); - } - } while (readIfMore()); - } - command.addRow(values.toArray(new Expression[values.size()])); - } while (readIf(",")); + if (readIf(VALUES)) { + parseValuesForCommand(command); } else { - command.setQuery(parseSelect()); + command.setQuery(parseQuery()); } + setSQL(command, start); return command; } - private Insert parseInsert() { - Insert command = new Insert(session); + private MergeUsing parseMergeUsing(TableFilter targetTableFilter, int start) { + MergeUsing command = new MergeUsing(session, targetTableFilter); currentPrepared = command; - read("INTO"); - Table table = readTableOrView(); - command.setTable(table); - Column[] columns = null; - if (readIf("(")) { - if (isSelect()) { - command.setQuery(parseSelect()); - read(")"); - return command; + command.setSourceTableFilter(readTableReference()); + read(ON); + Expression condition = readExpression(); + command.setOnCondition(condition); + + read(WHEN); + do { + boolean matched = readIf("MATCHED"); + if (matched) { + parseWhenMatched(command); + } else { + parseWhenNotMatched(command); + } + } while (readIf(WHEN)); + + setSQL(command, start); + return command; + } + + private void parseWhenMatched(MergeUsing command) { + Expression and = readIf(AND) ? readExpression() : null; + read("THEN"); + MergeUsing.When when; + if (readIf("UPDATE")) { + MergeUsing.WhenMatchedThenUpdate update = command.new WhenMatchedThenUpdate(); + update.setSetClauseList(readUpdateSetClause(command.getTargetTableFilter())); + when = update; + } else { + read("DELETE"); + when = command.new WhenMatchedThenDelete(); + } + if (and == null && database.getMode().mergeWhere && readIf(WHERE)) { + and = readExpression(); + } + when.setAndCondition(and); + command.addWhen(when); + } + + private void parseWhenNotMatched(MergeUsing command) { + read(NOT); + read("MATCHED"); + Expression and = readIf(AND) ? readExpression() : null; + read("THEN"); + read("INSERT"); + Column[] columns = readIf(OPEN_PAREN) ? parseColumnList(command.getTargetTableFilter().getTable()) : null; + Boolean overridingSystem = readIfOverriding(); + read(VALUES); + read(OPEN_PAREN); + ArrayList values = Utils.newSmallArrayList(); + if (!readIf(CLOSE_PAREN)) { + do { + values.add(readExpressionOrDefault()); + } while (readIfMore()); + } + MergeUsing.WhenNotMatched when = command.new WhenNotMatched(columns, overridingSystem, + values.toArray(new Expression[0])); + when.setAndCondition(and); + command.addWhen(when); + } + + private Insert parseInsert(int start) { + Insert command = new Insert(session); + currentPrepared = command; + Mode mode = database.getMode(); + if (mode.onDuplicateKeyUpdate && readIf("IGNORE")) { + command.setIgnore(true); + } + read("INTO"); + Table table = readTableOrView(); + command.setTable(table); + Column[] columns = null; + if (readIf(OPEN_PAREN)) { + if (isQueryQuick()) { + command.setQuery(parseQuery()); + read(CLOSE_PAREN); + return command; } columns = parseColumnList(table); command.setColumns(columns); } + Boolean overridingSystem = readIfOverriding(); + command.setOverridingSystem(overridingSystem); + boolean requireQuery = false; if (readIf("DIRECT")) { + requireQuery = true; command.setInsertFromSelect(true); } if (readIf("SORTED")) { - command.setSortedInsertMode(true); - } - if (readIf("DEFAULT")) { - read("VALUES"); - Expression[] expr = {}; - command.addRow(expr); - } else if (readIf("VALUES")) { - read("("); - do { - ArrayList values = New.arrayList(); - if (!readIf(")")) { - do { - if (readIf("DEFAULT")) { - values.add(null); - } else { - values.add(readExpression()); - } - } while (readIfMore()); + requireQuery = true; + } + readValues: { + if (!requireQuery) { + if (overridingSystem == null && readIf(DEFAULT)) { + read(VALUES); + command.addRow(new Expression[0]); + break readValues; } - command.addRow(values.toArray(new Expression[values.size()])); - // the following condition will allow (..),; and (..); - } while (readIf(",") && readIf("(")); - } else if (readIf("SET")) { - if (columns != null) { - throw getSyntaxError(); - } - ArrayList columnList = New.arrayList(); - ArrayList values = New.arrayList(); - do { - columnList.add(parseColumn(table)); - read("="); - Expression expression; - if (readIf("DEFAULT")) { - expression = ValueExpression.getDefault(); - } else { - expression = readExpression(); + if (readIf(VALUES)) { + parseValuesForCommand(command); + break readValues; } - values.add(expression); - } while (readIf(",")); - command.setColumns(columnList.toArray(new Column[columnList.size()])); - command.addRow(values.toArray(new Expression[values.size()])); - } else { - command.setQuery(parseSelect()); + if (readIf(SET)) { + parseInsertSet(command, table, columns); + break readValues; + } + } + command.setQuery(parseQuery()); + } + if (mode.onDuplicateKeyUpdate || mode.insertOnConflict || mode.isolationLevelInSelectOrInsertStatement) { + parseInsertCompatibility(command, table, mode); + } + setSQL(command, start); + return command; + } + + private Boolean readIfOverriding() { + Boolean overridingSystem = null; + if (readIf("OVERRIDING")) { + if (readIf(USER)) { + overridingSystem = Boolean.FALSE; + } else { + read("SYSTEM"); + overridingSystem = Boolean.TRUE; + } + read(VALUE); + } + return overridingSystem; + } + + private void parseInsertSet(Insert command, Table table, Column[] columns) { + if (columns != null) { + throw getSyntaxError(); } - if (database.getMode().onDuplicateKeyUpdate) { - if (readIf("ON")) { + ArrayList columnList = Utils.newSmallArrayList(); + ArrayList values = Utils.newSmallArrayList(); + do { + columnList.add(parseColumn(table)); + read(EQUAL); + values.add(readExpressionOrDefault()); + } while (readIf(COMMA)); + command.setColumns(columnList.toArray(new Column[0])); + command.addRow(values.toArray(new Expression[0])); + } + + private void parseInsertCompatibility(Insert command, Table table, Mode mode) { + if (mode.onDuplicateKeyUpdate) { + if (readIf(ON)) { read("DUPLICATE"); - read("KEY"); + read(KEY); read("UPDATE"); do { - Column column = parseColumn(table); - read("="); - Expression expression; - if (readIf("DEFAULT")) { - expression = ValueExpression.getDefault(); - } else { - expression = readExpression(); + String columnName = readIdentifier(); + if (readIf(DOT)) { + String schemaOrTableName = columnName; + String tableOrColumnName = readIdentifier(); + if (readIf(DOT)) { + if (!table.getSchema().getName().equals(schemaOrTableName)) { + throw DbException.get(ErrorCode.SCHEMA_NAME_MUST_MATCH); + } + columnName = readIdentifier(); + } else { + columnName = tableOrColumnName; + tableOrColumnName = schemaOrTableName; + } + if (!table.getName().equals(tableOrColumnName)) { + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableOrColumnName); + } } - command.addAssignmentForDuplicate(column, expression); - } while (readIf(",")); + Column column = table.getColumn(columnName); + read(EQUAL); + command.addAssignmentForDuplicate(column, readExpressionOrDefault()); + } while (readIf(COMMA)); } } - if (database.getMode().isolationLevelInSelectOrInsertStatement) { + if (mode.insertOnConflict) { + if (readIf(ON)) { + read("CONFLICT"); + read("DO"); + read("NOTHING"); + command.setIgnore(true); + } + } + if (mode.isolationLevelInSelectOrInsertStatement) { parseIsolationClause(); } - return command; } /** * MySQL compatibility. REPLACE is similar to MERGE. */ - private Replace parseReplace() { - Replace command = new Replace(session); + private Merge parseReplace(int start) { + Merge command = new Merge(session, true); currentPrepared = command; read("INTO"); Table table = readTableOrView(); command.setTable(table); - if (readIf("(")) { - if (isSelect()) { - command.setQuery(parseSelect()); - read(")"); + if (readIf(OPEN_PAREN)) { + if (isQueryQuick()) { + command.setQuery(parseQuery()); + read(CLOSE_PAREN); return command; } - Column[] columns = parseColumnList(table); - command.setColumns(columns); + command.setColumns(parseColumnList(table)); } - if (readIf("VALUES")) { - do { - ArrayList values = New.arrayList(); - read("("); - if (!readIf(")")) { - do { - if (readIf("DEFAULT")) { - values.add(null); - } else { - values.add(readExpression()); - } - } while (readIfMore()); - } - command.addRow(values.toArray(new Expression[values.size()])); - } while (readIf(",")); + if (readIf(VALUES)) { + parseValuesForCommand(command); } else { - command.setQuery(parseSelect()); + command.setQuery(parseQuery()); } + setSQL(command, start); return command; } - private TableFilter readTableFilter(boolean fromOuter) { + private void parseValuesForCommand(CommandWithValues command) { + ArrayList values = Utils.newSmallArrayList(); + do { + values.clear(); + boolean multiColumn; + if (readIf(ROW)) { + read(OPEN_PAREN); + multiColumn = true; + } else { + multiColumn = readIf(OPEN_PAREN); + } + if (multiColumn) { + if (!readIf(CLOSE_PAREN)) { + do { + values.add(readExpressionOrDefault()); + } while (readIfMore()); + } + } else { + values.add(readExpressionOrDefault()); + } + command.addRow(values.toArray(new Expression[0])); + } while (readIf(COMMA)); + } + + private TableFilter readTablePrimary() { Table table; String alias = null; - if (readIf("(")) { - if (isSelect()) { - Query query = parseSelectUnion(); - read(")"); - query.setParameterList(New.arrayList(parameters)); - query.init(); - Session s; - if (createView != null) { - s = database.getSystemSession(); - } else { - s = session; - } - alias = session.getNextSystemIdentifier(sqlCommand); - table = TableView.createTempView(s, session.getUser(), alias, - query, currentSelect); + label: if (readIf(OPEN_PAREN)) { + if (isDerivedTable()) { + // Derived table + return readDerivedTableWithCorrelation(); } else { - TableFilter top; - if (database.getSettings().nestedJoins) { - top = readTableFilter(false); - top = readJoin(top, currentSelect, false, false); - top = getNested(top); - } else { - top = readTableFilter(fromOuter); - top = readJoin(top, currentSelect, false, fromOuter); - } - read(")"); - alias = readFromAlias(null); - if (alias != null) { - top.setAlias(alias); + // Parenthesized joined table + TableFilter tableFilter = readTableReference(); + read(CLOSE_PAREN); + return readCorrelation(tableFilter); + } + } else if (readIf(VALUES)) { + TableValueConstructor query = parseValues(); + alias = session.getNextSystemIdentifier(sqlCommand); + table = query.toTable(alias, null, parameters, createView != null, currentSelect); + } else if (readIf(TABLE)) { + // Table function derived table + read(OPEN_PAREN); + ArrayTableFunction function = readTableFunction(ArrayTableFunction.TABLE); + table = new FunctionTable(database.getMainSchema(), session, function); + } else { + boolean quoted = token.isQuoted(); + String tableName = readIdentifier(); + int backupIndex = tokenIndex; + schemaName = null; + if (readIf(DOT)) { + tableName = readIdentifierWithSchema2(tableName); + } else if (!quoted && readIf(TABLE)) { + table = readDataChangeDeltaTable(upperName(tableName), backupIndex); + break label; + } + Schema schema; + if (schemaName == null) { + schema = null; + } else { + schema = findSchema(schemaName); + if (schema == null) { + if (isDualTable(tableName)) { + table = new DualTable(database); + break label; + } + throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName); } - return top; } - } else if (readIf("VALUES")) { - table = parseValuesTable().getTable(); - } else { - String tableName = readIdentifierWithSchema(null); - Schema schema = getSchema(); - boolean foundLeftBracket = readIf("("); - if (foundLeftBracket && readIf("INDEX")) { + boolean foundLeftParen = readIf(OPEN_PAREN); + if (foundLeftParen && readIf("INDEX")) { // Sybase compatibility with // "select * from test (index table1_index)" readIdentifierWithSchema(null); - read(")"); - foundLeftBracket = false; + read(CLOSE_PAREN); + foundLeftParen = false; } - if (foundLeftBracket) { - Schema mainSchema = database.getSchema(Constants.SCHEMA_MAIN); + if (foundLeftParen) { + Schema mainSchema = database.getMainSchema(); if (equalsToken(tableName, RangeTable.NAME) || equalsToken(tableName, RangeTable.ALIAS)) { Expression min = readExpression(); - read(","); + read(COMMA); Expression max = readExpression(); - if (readIf(",")) { + if (readIf(COMMA)) { Expression step = readExpression(); - read(")"); - table = new RangeTable(mainSchema, min, max, step, - false); + read(CLOSE_PAREN); + table = new RangeTable(mainSchema, min, max, step); } else { - read(")"); - table = new RangeTable(mainSchema, min, max, false); + read(CLOSE_PAREN); + table = new RangeTable(mainSchema, min, max); } } else { - Expression expr = readFunction(schema, tableName); - if (!(expr instanceof FunctionCall)) { - throw getSyntaxError(); - } - FunctionCall call = (FunctionCall) expr; - if (!call.isDeterministic()) { - recompileAlways = true; - } - table = new FunctionTable(mainSchema, session, expr, call); + table = new FunctionTable(mainSchema, session, readTableFunction(tableName, schema)); } - } else if (equalsToken("DUAL", tableName)) { - table = getDualTable(false); - } else if (database.getMode().sysDummy1 && - equalsToken("SYSDUMMY1", tableName)) { - table = getDualTable(false); } else { table = readTableOrView(tableName); } } - alias = readFromAlias(alias); - return new TableFilter(session, table, alias, rightsChecked, - currentSelect); + ArrayList derivedColumnNames = null; + IndexHints indexHints = null; + if (readIfUseIndex()) { + indexHints = parseIndexHints(table); + } else { + alias = readFromAlias(alias); + if (alias != null) { + derivedColumnNames = readDerivedColumnNames(); + if (readIfUseIndex()) { + indexHints = parseIndexHints(table); + } + } + } + return buildTableFilter(table, alias, derivedColumnNames, indexHints); } - private String readFromAlias(String alias) { - if (readIf("AS")) { - alias = readAliasIdentifier(); - } else if (currentTokenType == IDENTIFIER) { - // left and right are not keywords (because they are functions as - // well) - if (!isToken("LEFT") && !isToken("RIGHT") && !isToken("FULL")) { - alias = readAliasIdentifier(); + private TableFilter readCorrelation(TableFilter tableFilter) { + String alias = readFromAlias(null); + if (alias != null) { + tableFilter.setAlias(alias); + ArrayList derivedColumnNames = readDerivedColumnNames(); + if (derivedColumnNames != null) { + tableFilter.setDerivedColumns(derivedColumnNames); + } + } + return tableFilter; + } + + private TableFilter readDerivedTableWithCorrelation() { + Query query = parseQueryExpression(); + read(CLOSE_PAREN); + Table table; + String alias; + ArrayList derivedColumnNames = null; + IndexHints indexHints = null; + if (readIfUseIndex()) { + alias = session.getNextSystemIdentifier(sqlCommand); + table = query.toTable(alias, null, parameters, createView != null, currentSelect); + indexHints = parseIndexHints(table); + } else { + alias = readFromAlias(null); + if (alias != null) { + derivedColumnNames = readDerivedColumnNames(); + Column[] columnTemplates = null; + if (derivedColumnNames != null) { + query.init(); + columnTemplates = TableView.createQueryColumnTemplateList( + derivedColumnNames.toArray(new String[0]), query, new String[1]) + .toArray(new Column[0]); + } + table = query.toTable(alias, columnTemplates, parameters, createView != null, currentSelect); + if (readIfUseIndex()) { + indexHints = parseIndexHints(table); + } + } else { + alias = session.getNextSystemIdentifier(sqlCommand); + table = query.toTable(alias, null, parameters, createView != null, currentSelect); + } + } + return buildTableFilter(table, alias, derivedColumnNames, indexHints); + } + + private TableFilter buildTableFilter(Table table, String alias, ArrayList derivedColumnNames, + IndexHints indexHints) { + if (database.getMode().discardWithTableHints) { + discardWithTableHints(); + } + // inherit alias for CTE as views from table name + if (alias == null && table.isView() && table.isTableExpression()) { + alias = table.getName(); + } + TableFilter filter = new TableFilter(session, table, alias, rightsChecked, + currentSelect, orderInFrom++, indexHints); + if (derivedColumnNames != null) { + filter.setDerivedColumns(derivedColumnNames); + } + return filter; + } + + private Table readDataChangeDeltaTable(String resultOptionName, int backupIndex) { + read(OPEN_PAREN); + int start = tokenIndex; + DataChangeStatement statement; + ResultOption resultOption = ResultOption.FINAL; + switch (resultOptionName) { + case "OLD": + resultOption = ResultOption.OLD; + if (readIf("UPDATE")) { + statement = parseUpdate(start); + } else if (readIf("DELETE")) { + statement = parseDelete(start); + } else if (readIf("MERGE")) { + statement = (DataChangeStatement) parseMerge(start); + } else if (database.getMode().replaceInto && readIf("REPLACE")) { + statement = parseReplace(start); + } else { + throw getSyntaxError(); + } + break; + case "NEW": + resultOption = ResultOption.NEW; + //$FALL-THROUGH$ + case "FINAL": + if (readIf("INSERT")) { + statement = parseInsert(start); + } else if (readIf("UPDATE")) { + statement = parseUpdate(start); + } else if (readIf("MERGE")) { + statement = (DataChangeStatement) parseMerge(start); + } else if (database.getMode().replaceInto && readIf("REPLACE")) { + statement = parseReplace(start); + } else { + throw getSyntaxError(); + } + break; + default: + setTokenIndex(backupIndex); + addExpected("OLD TABLE"); + addExpected("NEW TABLE"); + addExpected("FINAL TABLE"); + throw getSyntaxError(); + } + read(CLOSE_PAREN); + if (currentSelect != null) { + // Lobs aren't copied, so use it for more safety + currentSelect.setNeverLazy(true); + } + return new DataChangeDeltaTable(getSchemaWithDefault(), session, statement, resultOption); + } + + private TableFunction readTableFunction(String name, Schema schema) { + if (schema == null) { + switch (upperName(name)) { + case "UNNEST": + return readUnnestFunction(); + case "TABLE_DISTINCT": + return readTableFunction(ArrayTableFunction.TABLE_DISTINCT); + case "CSVREAD": + recompileAlways = true; + return readParameters(new CSVReadFunction()); + case "LINK_SCHEMA": + recompileAlways = true; + return readParameters(new LinkSchemaFunction()); } } + FunctionAlias functionAlias = getFunctionAliasWithinPath(name, schema); + if (!functionAlias.isDeterministic()) { + recompileAlways = true; + } + ArrayList argList = Utils.newSmallArrayList(); + if (!readIf(CLOSE_PAREN)) { + do { + argList.add(readExpression()); + } while (readIfMore()); + } + return new JavaTableFunction(functionAlias, argList.toArray(new Expression[0])); + } + + private boolean readIfUseIndex() { + int start = tokenIndex; + if (!readIf("USE")) { + return false; + } + if (!readIf("INDEX")) { + setTokenIndex(start); + return false; + } + return true; + } + + private IndexHints parseIndexHints(Table table) { + read(OPEN_PAREN); + LinkedHashSet indexNames = new LinkedHashSet<>(); + if (!readIf(CLOSE_PAREN)) { + do { + String indexName = readIdentifierWithSchema(); + Index index = table.getIndex(indexName); + indexNames.add(index.getName()); + } while (readIfMore()); + } + return IndexHints.createUseIndexHints(indexNames); + } + + private String readFromAlias(String alias) { + if (readIf(AS) || isIdentifier()) { + alias = readIdentifier(); + } return alias; } + private ArrayList readDerivedColumnNames() { + if (readIf(OPEN_PAREN)) { + ArrayList derivedColumnNames = new ArrayList<>(); + do { + derivedColumnNames.add(readIdentifier()); + } while (readIfMore()); + return derivedColumnNames; + } + return null; + } + + private void discardWithTableHints() { + if (readIf(WITH)) { + read(OPEN_PAREN); + do { + discardTableHint(); + } while (readIfMore()); + } + } + + private void discardTableHint() { + if (readIf("INDEX")) { + if (readIf(OPEN_PAREN)) { + do { + readExpression(); + } while (readIfMore()); + } else { + read(EQUAL); + readExpression(); + } + } else { + readExpression(); + } + } + private Prepared parseTruncate() { - read("TABLE"); + read(TABLE); Table table = readTableOrView(); + boolean restart = database.getMode().truncateTableRestartIdentity; + if (readIf("CONTINUE")) { + read("IDENTITY"); + restart = false; + } else if (readIf("RESTART")) { + read("IDENTITY"); + restart = true; + } TruncateTable command = new TruncateTable(session); command.setTable(table); + command.setRestart(restart); return command; } private boolean readIfExists(boolean ifExists) { - if (readIf("IF")) { - read("EXISTS"); + if (readIf(IF)) { + read(EXISTS); ifExists = true; } return ifExists; @@ -1271,16 +2118,16 @@ private boolean readIfExists(boolean ifExists) { private Prepared parseComment() { int type = 0; - read("ON"); + read(ON); boolean column = false; - if (readIf("TABLE") || readIf("VIEW")) { + if (readIf(TABLE) || readIf("VIEW")) { type = DbObject.TABLE_OR_VIEW; } else if (readIf("COLUMN")) { column = true; type = DbObject.TABLE_OR_VIEW; } else if (readIf("CONSTANT")) { type = DbObject.CONSTANT; - } else if (readIf("CONSTRAINT")) { + } else if (readIf(CONSTRAINT)) { type = DbObject.CONSTRAINT; } else if (readIf("ALIAS")) { type = DbObject.FUNCTION_ALIAS; @@ -1294,10 +2141,10 @@ private Prepared parseComment() { type = DbObject.SEQUENCE; } else if (readIf("TRIGGER")) { type = DbObject.TRIGGER; - } else if (readIf("USER")) { + } else if (readIf(USER)) { type = DbObject.USER; } else if (readIf("DOMAIN")) { - type = DbObject.USER_DATATYPE; + type = DbObject.DOMAIN; } else { throw getSyntaxError(); } @@ -1305,63 +2152,58 @@ private Prepared parseComment() { String objectName; if (column) { // can't use readIdentifierWithSchema() because - // it would not read schema.table.column correctly - // if the db name is equal to the schema name - ArrayList list = New.arrayList(); - do { - list.add(readUniqueIdentifier()); - } while (readIf(".")); - schemaName = session.getCurrentSchemaName(); - if (list.size() == 4) { - if (!equalsToken(database.getShortName(), list.get(0))) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, - "database name"); - } - list.remove(0); - } - if (list.size() == 3) { - schemaName = list.get(0); - list.remove(0); - } - if (list.size() != 2) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, - "table.column"); - } - objectName = list.get(0); + // it would not read [catalog.]schema.table.column correctly + objectName = readIdentifier(); + String tmpSchemaName = null; + read(DOT); + boolean allowEmpty = database.getMode().allowEmptySchemaValuesAsDefaultSchema; + String columnName = allowEmpty && currentTokenType == DOT ? null : readIdentifier(); + if (readIf(DOT)) { + tmpSchemaName = objectName; + objectName = columnName; + columnName = allowEmpty && currentTokenType == DOT ? null : readIdentifier(); + if (readIf(DOT)) { + checkDatabaseName(tmpSchemaName); + tmpSchemaName = objectName; + objectName = columnName; + columnName = readIdentifier(); + } + } + if (columnName == null || objectName == null) { + throw DbException.getSyntaxError(sqlCommand, token.start(), "table.column"); + } + schemaName = tmpSchemaName != null ? tmpSchemaName : session.getCurrentSchemaName(); command.setColumn(true); - command.setColumnName(list.get(1)); + command.setColumnName(columnName); } else { objectName = readIdentifierWithSchema(); } command.setSchemaName(schemaName); command.setObjectName(objectName); command.setObjectType(type); - read("IS"); + read(IS); command.setCommentExpression(readExpression()); return command; } private Prepared parseDrop() { - if (readIf("TABLE")) { + if (readIf(TABLE)) { boolean ifExists = readIfExists(false); - String tableName = readIdentifierWithSchema(); - DropTable command = new DropTable(session, getSchema()); - command.setTableName(tableName); - while (readIf(",")) { - tableName = readIdentifierWithSchema(); - DropTable next = new DropTable(session, getSchema()); - next.setTableName(tableName); - command.addNextDropTable(next); - } + DropTable command = new DropTable(session); + do { + String tableName = readIdentifierWithSchema(); + command.addTable(getSchema(), tableName); + } while (readIf(COMMA)); ifExists = readIfExists(ifExists); command.setIfExists(ifExists); if (readIf("CASCADE")) { - command.setDropAction(ConstraintReferential.CASCADE); + command.setDropAction(ConstraintActionType.CASCADE); readIf("CONSTRAINTS"); } else if (readIf("RESTRICT")) { - command.setDropAction(ConstraintReferential.RESTRICT); + command.setDropAction(ConstraintActionType.RESTRICT); } else if (readIf("IGNORE")) { - command.setDropAction(ConstraintReferential.SET_DEFAULT); + // TODO SET_DEFAULT works in the same way as CASCADE + command.setDropAction(ConstraintActionType.SET_DEFAULT); } return command; } else if (readIf("INDEX")) { @@ -1371,11 +2213,15 @@ private Prepared parseDrop() { command.setIndexName(indexName); ifExists = readIfExists(ifExists); command.setIfExists(ifExists); + //Support for MySQL: DROP INDEX index_name ON tbl_name + if (readIf(ON)) { + readIdentifierWithSchema(); + } return command; - } else if (readIf("USER")) { + } else if (readIf(USER)) { boolean ifExists = readIfExists(false); DropUser command = new DropUser(session); - command.setUserName(readUniqueIdentifier()); + command.setUserName(readIdentifier()); ifExists = readIfExists(ifExists); readIf("CASCADE"); command.setIfExists(ifExists); @@ -1411,7 +2257,7 @@ private Prepared parseDrop() { command.setViewName(viewName); ifExists = readIfExists(ifExists); command.setIfExists(ifExists); - Integer dropAction = parseCascadeOrRestrict(); + ConstraintActionType dropAction = parseCascadeOrRestrict(); if (dropAction != null) { command.setDropAction(dropAction); } @@ -1419,7 +2265,7 @@ private Prepared parseDrop() { } else if (readIf("ROLE")) { boolean ifExists = readIfExists(false); DropRole command = new DropRole(session); - command.setRoleName(readUniqueIdentifier()); + command.setRoleName(readIdentifier()); ifExists = readIfExists(ifExists); command.setIfExists(ifExists); return command; @@ -1435,11 +2281,15 @@ private Prepared parseDrop() { } else if (readIf("SCHEMA")) { boolean ifExists = readIfExists(false); DropSchema command = new DropSchema(session); - command.setSchemaName(readUniqueIdentifier()); + command.setSchemaName(readIdentifier()); ifExists = readIfExists(ifExists); command.setIfExists(ifExists); + ConstraintActionType dropAction = parseCascadeOrRestrict(); + if (dropAction != null) { + command.setDropAction(dropAction); + } return command; - } else if (readIf("ALL")) { + } else if (readIf(ALL)) { read("OBJECTS"); DropDatabase command = new DropDatabase(session); command.setDropAllObjects(true); @@ -1448,200 +2298,240 @@ private Prepared parseDrop() { command.setDeleteFiles(true); } return command; - } else if (readIf("DOMAIN")) { - return parseDropUserDataType(); - } else if (readIf("TYPE")) { - return parseDropUserDataType(); - } else if (readIf("DATATYPE")) { - return parseDropUserDataType(); + } else if (readIf("DOMAIN") || readIf("TYPE") || readIf("DATATYPE")) { + return parseDropDomain(); } else if (readIf("AGGREGATE")) { return parseDropAggregate(); + } else if (readIf("SYNONYM")) { + boolean ifExists = readIfExists(false); + String synonymName = readIdentifierWithSchema(); + DropSynonym command = new DropSynonym(session, getSchema()); + command.setSynonymName(synonymName); + ifExists = readIfExists(ifExists); + command.setIfExists(ifExists); + return command; } throw getSyntaxError(); } - private DropUserDataType parseDropUserDataType() { + private DropDomain parseDropDomain() { boolean ifExists = readIfExists(false); - DropUserDataType command = new DropUserDataType(session); - command.setTypeName(readUniqueIdentifier()); + String domainName = readIdentifierWithSchema(); + DropDomain command = new DropDomain(session, getSchema()); + command.setDomainName(domainName); ifExists = readIfExists(ifExists); - command.setIfExists(ifExists); + command.setIfDomainExists(ifExists); + ConstraintActionType dropAction = parseCascadeOrRestrict(); + if (dropAction != null) { + command.setDropAction(dropAction); + } return command; } private DropAggregate parseDropAggregate() { boolean ifExists = readIfExists(false); - DropAggregate command = new DropAggregate(session); - command.setName(readUniqueIdentifier()); + String name = readIdentifierWithSchema(); + DropAggregate command = new DropAggregate(session, getSchema()); + command.setName(name); ifExists = readIfExists(ifExists); command.setIfExists(ifExists); return command; } - private TableFilter readJoin(TableFilter top, Select command, - boolean nested, boolean fromOuter) { - boolean joined = false; - TableFilter last = top; - boolean nestedJoins = database.getSettings().nestedJoins; - while (true) { - if (readIf("RIGHT")) { + private TableFilter readTableReference() { + for (TableFilter top, last = top = readTablePrimary(), join;; last = join) { + switch (currentTokenType) { + case RIGHT: { + read(); readIf("OUTER"); - read("JOIN"); - joined = true; + read(JOIN); // the right hand side is the 'inner' table usually - TableFilter newTop = readTableFilter(fromOuter); - newTop = readJoin(newTop, command, nested, true); - Expression on = null; - if (readIf("ON")) { - on = readExpression(); - } - if (nestedJoins) { - top = getNested(top); - newTop.addJoin(top, true, false, on); - } else { - newTop.addJoin(top, true, false, on); - } - top = newTop; - last = newTop; - } else if (readIf("LEFT")) { + join = readTableReference(); + Expression on = readJoinSpecification(top, join, true); + addJoin(join, top, true, on); + top = join; + break; + } + case LEFT: { + read(); readIf("OUTER"); - read("JOIN"); - joined = true; - TableFilter join = readTableFilter(true); - if (nestedJoins) { - join = readJoin(join, command, true, true); - } else { - top = readJoin(top, command, false, true); - } - Expression on = null; - if (readIf("ON")) { - on = readExpression(); - } - top.addJoin(join, true, false, on); - last = join; - } else if (readIf("FULL")) { + read(JOIN); + join = readTableReference(); + Expression on = readJoinSpecification(top, join, false); + addJoin(top, join, true, on); + break; + } + case FULL: + read(); throw getSyntaxError(); - } else if (readIf("INNER")) { - read("JOIN"); - joined = true; - TableFilter join = readTableFilter(fromOuter); - top = readJoin(top, command, false, false); - Expression on = null; - if (readIf("ON")) { - on = readExpression(); - } - if (nestedJoins) { - top.addJoin(join, false, false, on); - } else { - top.addJoin(join, fromOuter, false, on); - } - last = join; - } else if (readIf("JOIN")) { - joined = true; - TableFilter join = readTableFilter(fromOuter); - top = readJoin(top, command, false, false); - Expression on = null; - if (readIf("ON")) { - on = readExpression(); - } - if (nestedJoins) { - top.addJoin(join, false, false, on); - } else { - top.addJoin(join, fromOuter, false, on); - } - last = join; - } else if (readIf("CROSS")) { - read("JOIN"); - joined = true; - TableFilter join = readTableFilter(fromOuter); - if (nestedJoins) { - top.addJoin(join, false, false, null); - } else { - top.addJoin(join, fromOuter, false, null); - } - last = join; - } else if (readIf("NATURAL")) { - read("JOIN"); - joined = true; - TableFilter join = readTableFilter(fromOuter); - Column[] tableCols = last.getTable().getColumns(); - Column[] joinCols = join.getTable().getColumns(); - String tableSchema = last.getTable().getSchema().getName(); - String joinSchema = join.getTable().getSchema().getName(); + case INNER: { + read(); + read(JOIN); + join = readTableReference(); + Expression on = readJoinSpecification(top, join, false); + addJoin(top, join, false, on); + break; + } + case JOIN: { + read(); + join = readTableReference(); + Expression on = readJoinSpecification(top, join, false); + addJoin(top, join, false, on); + break; + } + case CROSS: { + read(); + read(JOIN); + join = readTablePrimary(); + addJoin(top, join, false, null); + break; + } + case NATURAL: { + read(); + read(JOIN); + join = readTablePrimary(); Expression on = null; - for (Column tc : tableCols) { - String tableColumnName = tc.getName(); - for (Column c : joinCols) { - String joinColumnName = c.getName(); - if (equalsToken(tableColumnName, joinColumnName)) { - join.addNaturalJoinColumn(c); - Expression tableExpr = new ExpressionColumn( - database, tableSchema, - last.getTableAlias(), tableColumnName); - Expression joinExpr = new ExpressionColumn( - database, joinSchema, join.getTableAlias(), - joinColumnName); - Expression equal = new Comparison(session, - Comparison.EQUAL, tableExpr, joinExpr); - if (on == null) { - on = equal; - } else { - on = new ConditionAndOr(ConditionAndOr.AND, on, - equal); - } - } + for (Column column1 : last.getTable().getColumns()) { + Column column2 = join.getColumn(last.getColumnName(column1), true); + if (column2 != null) { + on = addJoinColumn(on, last, join, column1, column2, false); } } - if (nestedJoins) { - top.addJoin(join, false, nested, on); - } else { - top.addJoin(join, fromOuter, false, on); - } - last = join; - } else { + addJoin(top, join, false, on); break; } + default: + if (expectedList != null) { + // FULL is intentionally excluded + addMultipleExpected(RIGHT, LEFT, INNER, JOIN, CROSS, NATURAL); + } + return top; + } } - if (nested && joined) { - top = getNested(top); - } - return top; } - private TableFilter getNested(TableFilter n) { - String joinTable = Constants.PREFIX_JOIN + parseIndex; - TableFilter top = new TableFilter(session, getDualTable(true), - joinTable, rightsChecked, currentSelect); - top.addJoin(n, false, true, null); - return top; + private Expression readJoinSpecification(TableFilter filter1, TableFilter filter2, boolean rightJoin) { + Expression on = null; + if (readIf(ON)) { + on = readExpression(); + } else if (readIf(USING)) { + read(OPEN_PAREN); + do { + String columnName = readIdentifier(); + on = addJoinColumn(on, filter1, filter2, filter1.getColumn(columnName, false), + filter2.getColumn(columnName, false), rightJoin); + } while (readIfMore()); + } + return on; } - private Prepared parseExecute() { - ExecuteProcedure command = new ExecuteProcedure(session); - String procedureName = readAliasIdentifier(); - Procedure p = session.getProcedure(procedureName); - if (p == null) { - throw DbException.get(ErrorCode.FUNCTION_ALIAS_NOT_FOUND_1, - procedureName); + private Expression addJoinColumn(Expression on, TableFilter filter1, TableFilter filter2, Column column1, + Column column2, boolean rightJoin) { + if (rightJoin) { + filter1.addCommonJoinColumns(column1, column2, filter2); + filter2.addCommonJoinColumnToExclude(column2); + } else { + filter1.addCommonJoinColumns(column1, column1, filter1); + filter2.addCommonJoinColumnToExclude(column2); + } + Expression tableExpr = new ExpressionColumn(database, filter1.getSchemaName(), filter1.getTableAlias(), + filter1.getColumnName(column1)); + Expression joinExpr = new ExpressionColumn(database, filter2.getSchemaName(), filter2.getTableAlias(), + filter2.getColumnName(column2)); + Expression equal = new Comparison(Comparison.EQUAL, tableExpr, joinExpr, false); + if (on == null) { + on = equal; + } else { + on = new ConditionAndOr(ConditionAndOr.AND, on, equal); } - command.setProcedure(p); - if (readIf("(")) { + return on; + } + + /** + * Add one join to another. This method creates nested join between them if + * required. + * + * @param top parent join + * @param join child join + * @param outer if child join is an outer join + * @param on the join condition + * @see TableFilter#addJoin(TableFilter, boolean, Expression) + */ + private void addJoin(TableFilter top, TableFilter join, boolean outer, Expression on) { + if (join.getJoin() != null) { + String joinTable = Constants.PREFIX_JOIN + token.start(); + TableFilter n = new TableFilter(session, new DualTable(database), + joinTable, rightsChecked, currentSelect, join.getOrderInFrom(), + null); + n.setNestedJoin(join); + join = n; + } + top.addJoin(join, outer, on); + } + + private Prepared parseExecutePostgre() { + if (readIf("IMMEDIATE")) { + return new ExecuteImmediate(session, readExpression()); + } + ExecuteProcedure command = new ExecuteProcedure(session); + String procedureName = readIdentifier(); + Procedure p = session.getProcedure(procedureName); + if (p == null) { + throw DbException.get(ErrorCode.FUNCTION_ALIAS_NOT_FOUND_1, + procedureName); + } + command.setProcedure(p); + if (readIf(OPEN_PAREN)) { for (int i = 0;; i++) { command.setExpression(i, readExpression()); - if (readIf(")")) { + if (!readIfMore()) { break; } - read(","); } } return command; } + private Prepared parseExecuteSQLServer() { + Call command = new Call(session); + currentPrepared = command; + String schemaName = null; + String name = readIdentifier(); + if (readIf(DOT)) { + schemaName = name; + name = readIdentifier(); + if (readIf(DOT)) { + checkDatabaseName(schemaName); + schemaName = name; + name = readIdentifier(); + } + } + FunctionAlias functionAlias = getFunctionAliasWithinPath(name, + schemaName != null ? database.getSchema(schemaName) : null); + Expression[] args; + ArrayList argList = Utils.newSmallArrayList(); + if (currentTokenType != SEMICOLON && currentTokenType != END_OF_INPUT) { + do { + argList.add(readExpression()); + } while (readIf(COMMA)); + } + args = argList.toArray(new Expression[0]); + command.setExpression(new JavaFunction(functionAlias, args)); + return command; + } + + private FunctionAlias getFunctionAliasWithinPath(String name, Schema schema) { + UserDefinedFunction userDefinedFunction = findUserDefinedFunctionWithinPath(schema, name); + if (userDefinedFunction instanceof FunctionAlias) { + return (FunctionAlias) userDefinedFunction; + } + throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, name); + } + private DeallocateProcedure parseDeallocate() { readIf("PLAN"); - String procedureName = readAliasIdentifier(); DeallocateProcedure command = new DeallocateProcedure(session); - command.setProcedureName(procedureName); + command.setProcedureName(readIdentifier()); return command; } @@ -1651,32 +2541,42 @@ private Explain parseExplain() { command.setExecuteCommand(true); } else { if (readIf("PLAN")) { - readIf("FOR"); + readIf(FOR); } } - if (isToken("SELECT") || isToken("FROM") || isToken("(")) { - command.setCommand(parseSelect()); - } else if (readIf("DELETE")) { - command.setCommand(parseDelete()); - } else if (readIf("UPDATE")) { - command.setCommand(parseUpdate()); - } else if (readIf("INSERT")) { - command.setCommand(parseInsert()); - } else if (readIf("MERGE")) { - command.setCommand(parseMerge()); - } else if (readIf("WITH")) { - command.setCommand(parseWith()); - } else { - throw getSyntaxError(); + switch (currentTokenType) { + case SELECT: + case TABLE: + case VALUES: + case WITH: + case OPEN_PAREN: + Query query = parseQuery(); + query.setNeverLazy(true); + command.setCommand(query); + break; + default: + int start = tokenIndex; + if (readIf("DELETE")) { + command.setCommand(parseDelete(start)); + } else if (readIf("UPDATE")) { + command.setCommand(parseUpdate(start)); + } else if (readIf("INSERT")) { + command.setCommand(parseInsert(start)); + } else if (readIf("MERGE")) { + command.setCommand(parseMerge(start)); + } else { + throw getSyntaxError(); + } } return command; } - private Query parseSelect() { + private Query parseQuery() { int paramIndex = parameters.size(); - Query command = parseSelectUnion(); - ArrayList params = New.arrayList(); - for (int i = paramIndex, size = parameters.size(); i < size; i++) { + Query command = parseQueryExpression(); + int size = parameters.size(); + ArrayList params = new ArrayList<>(size); + for (int i = paramIndex; i < size; i++) { params.add(parameters.get(i)); } command.setParameterList(params); @@ -1684,63 +2584,89 @@ private Query parseSelect() { return command; } - private Query parseSelectUnion() { - int start = lastParseIndex; - Query command = parseSelectSub(); - return parseSelectUnionExtension(command, start, false); + private Prepared parseWithStatementOrQuery(int start) { + int paramIndex = parameters.size(); + Prepared command = parseWith(); + int size = parameters.size(); + ArrayList params = new ArrayList<>(size); + for (int i = paramIndex; i < size; i++) { + params.add(parameters.get(i)); + } + command.setParameterList(params); + if (command instanceof Query) { + Query query = (Query) command; + query.init(); + } + setSQL(command, start); + return command; + } + + private Query parseQueryExpression() { + Query query; + if (readIf(WITH)) { + try { + query = (Query) parseWith(); + } catch (ClassCastException e) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_1, "WITH statement supports only query in this context"); + } + // recursive can not be lazy + query.setNeverLazy(true); + } else { + query = parseQueryExpressionBodyAndEndOfQuery(); + } + return query; } - private Query parseSelectUnionExtension(Query command, int start, - boolean unionOnly) { - while (true) { - if (readIf("UNION")) { - SelectUnion union = new SelectUnion(session, command); - if (readIf("ALL")) { - union.setUnionType(SelectUnion.UNION_ALL); + private Query parseQueryExpressionBodyAndEndOfQuery() { + int start = tokenIndex; + Query command = parseQueryExpressionBody(); + parseEndOfQuery(command); + setSQL(command, start); + return command; + } + + private Query parseQueryExpressionBody() { + Query command = parseQueryTerm(); + for (;;) { + SelectUnion.UnionType type; + if (readIf(UNION)) { + if (readIf(ALL)) { + type = SelectUnion.UnionType.UNION_ALL; } else { - readIf("DISTINCT"); - union.setUnionType(SelectUnion.UNION); - } - union.setRight(parseSelectSub()); - command = union; - } else if (readIf("MINUS") || readIf("EXCEPT")) { - SelectUnion union = new SelectUnion(session, command); - union.setUnionType(SelectUnion.EXCEPT); - union.setRight(parseSelectSub()); - command = union; - } else if (readIf("INTERSECT")) { - SelectUnion union = new SelectUnion(session, command); - union.setUnionType(SelectUnion.INTERSECT); - union.setRight(parseSelectSub()); - command = union; + readIf(DISTINCT); + type = SelectUnion.UnionType.UNION; + } + } else if (readIf(EXCEPT) || readIf(MINUS)) { + type = SelectUnion.UnionType.EXCEPT; } else { break; } + command = new SelectUnion(session, type, command, parseQueryTerm()); } - if (!unionOnly) { - parseEndOfQuery(command); + return command; + } + + private Query parseQueryTerm() { + Query command = parseQueryPrimary(); + while (readIf(INTERSECT)) { + command = new SelectUnion(session, SelectUnion.UnionType.INTERSECT, command, parseQueryPrimary()); } - setSQL(command, null, start); return command; } private void parseEndOfQuery(Query command) { - if (readIf("ORDER")) { + if (readIf(ORDER)) { read("BY"); Select oldSelect = currentSelect; if (command instanceof Select) { currentSelect = (Select) command; } - ArrayList orderList = New.arrayList(); + ArrayList orderList = Utils.newSmallArrayList(); do { - boolean canBeNumber = true; - if (readIf("=")) { - canBeNumber = false; - } - SelectOrderBy order = new SelectOrderBy(); + boolean canBeNumber = currentTokenType == LITERAL; + QueryOrderBy order = new QueryOrderBy(); Expression expr = readExpression(); - if (canBeNumber && expr instanceof ValueExpression && - expr.getType() == Value.INT) { + if (canBeNumber && expr instanceof ValueExpression && expr.getType().getValueType() == Value.INTEGER) { order.columnIndexExpr = expr; } else if (expr instanceof Parameter) { recompileAlways = true; @@ -1748,87 +2674,74 @@ private void parseEndOfQuery(Query command) { } else { order.expression = expr; } - if (readIf("DESC")) { - order.descending = true; - } else { - readIf("ASC"); - } - if (readIf("NULLS")) { - if (readIf("FIRST")) { - order.nullsFirst = true; - } else { - read("LAST"); - order.nullsLast = true; - } - } + order.sortType = parseSortType(); orderList.add(order); - } while (readIf(",")); + } while (readIf(COMMA)); command.setOrder(orderList); currentSelect = oldSelect; } - if (database.getMode().supportOffsetFetch) { + if (command.getFetch() == null) { // make sure aggregate functions will not work here Select temp = currentSelect; currentSelect = null; - - // http://sqlpro.developpez.com/SQL2008/ - if (readIf("OFFSET")) { + boolean hasOffsetOrFetch = false; + // Standard SQL OFFSET / FETCH + if (readIf(OFFSET)) { + hasOffsetOrFetch = true; command.setOffset(readExpression().optimize(session)); - if (!readIf("ROW")) { - read("ROWS"); + if (!readIf(ROW)) { + readIf("ROWS"); } } - if (readIf("FETCH")) { + if (readIf(FETCH)) { + hasOffsetOrFetch = true; if (!readIf("FIRST")) { read("NEXT"); } - if (readIf("ROW")) { - command.setLimit(ValueExpression.get(ValueInt.get(1))); + if (readIf(ROW) || readIf("ROWS")) { + command.setFetch(ValueExpression.get(ValueInteger.get(1))); } else { - Expression limit = readExpression().optimize(session); - command.setLimit(limit); - if (!readIf("ROW")) { + command.setFetch(readExpression().optimize(session)); + if (readIf("PERCENT")) { + command.setFetchPercent(true); + } + if (!readIf(ROW)) { read("ROWS"); } } - read("ONLY"); + if (readIf(WITH)) { + read("TIES"); + command.setWithTies(true); + } else { + read("ONLY"); + } } - - currentSelect = temp; - } - if (readIf("LIMIT")) { - Select temp = currentSelect; - // make sure aggregate functions will not work here - currentSelect = null; - Expression limit = readExpression().optimize(session); - command.setLimit(limit); - if (readIf("OFFSET")) { - Expression offset = readExpression().optimize(session); - command.setOffset(offset); - } else if (readIf(",")) { - // MySQL: [offset, ] rowcount - Expression offset = limit; - limit = readExpression().optimize(session); - command.setOffset(offset); - command.setLimit(limit); - } - if (readIf("SAMPLE_SIZE")) { - Expression sampleSize = readExpression().optimize(session); - command.setSampleSize(sampleSize); + // MySQL-style LIMIT / OFFSET + if (!hasOffsetOrFetch && database.getMode().limit && readIf(LIMIT)) { + Expression limit = readExpression().optimize(session); + if (readIf(OFFSET)) { + command.setOffset(readExpression().optimize(session)); + } else if (readIf(COMMA)) { + // MySQL: [offset, ] rowcount + Expression offset = limit; + limit = readExpression().optimize(session); + command.setOffset(offset); + } + command.setFetch(limit); } currentSelect = temp; } - if (readIf("FOR")) { + if (readIf(FOR)) { if (readIf("UPDATE")) { if (readIf("OF")) { do { readIdentifierWithSchema(); - } while (readIf(",")); + } while (readIf(COMMA)); } else if (readIf("NOWAIT")) { // TODO parser: select for update nowait: should not wait } command.setForUpdate(true); - } else if (readIf("READ") || readIf("FETCH")) { + } else if (readIf("READ") || readIf(FETCH)) { read("ONLY"); } } @@ -1841,11 +2754,11 @@ private void parseEndOfQuery(Query command) { * DB2 isolation clause */ private void parseIsolationClause() { - if (readIf("WITH")) { + if (readIf(WITH)) { if (readIf("RR") || readIf("RS")) { // concurrent-access-resolution clause if (readIf("USE")) { - read("AND"); + read(AND); read("KEEP"); if (readIf("SHARE") || readIf("UPDATE") || readIf("EXCLUSIVE")) { @@ -1859,1230 +2772,2973 @@ private void parseIsolationClause() { } } - private Query parseSelectSub() { - if (readIf("(")) { - Query command = parseSelectUnion(); - read(")"); + private Query parseQueryPrimary() { + if (readIf(OPEN_PAREN)) { + Query command = parseQueryExpressionBodyAndEndOfQuery(); + read(CLOSE_PAREN); return command; } - Select select = parseSelectSimple(); - return select; + int start = tokenIndex; + if (readIf(SELECT)) { + return parseSelect(start); + } else if (readIf(TABLE)) { + return parseExplicitTable(start); + } + read(VALUES); + return parseValues(); } - private void parseSelectSimpleFromPart(Select command) { + private void parseSelectFromPart(Select command) { do { - TableFilter filter = readTableFilter(false); - parseJoinTableFilter(filter, command); - } while (readIf(",")); - } - - private void parseJoinTableFilter(TableFilter top, final Select command) { - top = readJoin(top, command, false, top.isJoinOuter()); - command.addTableFilter(top, true); - boolean isOuter = false; - while (true) { - TableFilter n = top.getNestedJoin(); - if (n != null) { - n.visit(new TableFilterVisitor() { - @Override - public void accept(TableFilter f) { - command.addTableFilter(f, false); + TableFilter top = readTableReference(); + command.addTableFilter(top, true); + boolean isOuter = false; + for (;;) { + TableFilter n = top.getNestedJoin(); + if (n != null) { + n.visit(f -> command.addTableFilter(f, false)); + } + TableFilter join = top.getJoin(); + if (join == null) { + break; + } + isOuter = isOuter | join.isJoinOuter(); + if (isOuter) { + command.addTableFilter(join, false); + } else { + // make flat so the optimizer can work better + Expression on = join.getJoinCondition(); + if (on != null) { + command.addCondition(on); } - }); - } - TableFilter join = top.getJoin(); - if (join == null) { - break; - } - isOuter = isOuter | join.isJoinOuter(); - if (isOuter) { - command.addTableFilter(join, false); - } else { - // make flat so the optimizer can work better - Expression on = join.getJoinCondition(); - if (on != null) { - command.addCondition(on); + join.removeJoinCondition(); + top.removeJoin(); + command.addTableFilter(join, true); } - join.removeJoinCondition(); - top.removeJoin(); - command.addTableFilter(join, true); + top = join; } - top = join; - } + } while (readIf(COMMA)); } - private void parseSelectSimpleSelectPart(Select command) { - Select temp = currentSelect; - // make sure aggregate functions will not work in TOP and LIMIT - currentSelect = null; - if (readIf("TOP")) { + private void parseSelectExpressions(Select command) { + if (database.getMode().topInSelect && readIf("TOP")) { + Select temp = currentSelect; + // make sure aggregate functions will not work in TOP and LIMIT + currentSelect = null; // can't read more complex expressions here because // SELECT TOP 1 +? A FROM TEST could mean // SELECT TOP (1+?) A FROM TEST or // SELECT TOP 1 (+?) AS A FROM TEST - Expression limit = readTerm().optimize(session); - command.setLimit(limit); - } else if (readIf("LIMIT")) { - Expression offset = readTerm().optimize(session); - command.setOffset(offset); - Expression limit = readTerm().optimize(session); - command.setLimit(limit); - } - currentSelect = temp; - if (readIf("DISTINCT")) { - command.setDistinct(true); + command.setFetch(readTerm().optimize(session)); + if (readIf("PERCENT")) { + command.setFetchPercent(true); + } + if (readIf(WITH)) { + read("TIES"); + command.setWithTies(true); + } + currentSelect = temp; + } + if (readIf(DISTINCT)) { + if (readIf(ON)) { + read(OPEN_PAREN); + ArrayList distinctExpressions = Utils.newSmallArrayList(); + do { + distinctExpressions.add(readExpression()); + } while (readIfMore()); + command.setDistinct(distinctExpressions.toArray(new Expression[0])); + } else { + command.setDistinct(); + } } else { - readIf("ALL"); + readIf(ALL); } - ArrayList expressions = New.arrayList(); + ArrayList expressions = Utils.newSmallArrayList(); do { - if (readIf("*")) { - expressions.add(new Wildcard(null, null)); + if (readIf(ASTERISK)) { + expressions.add(parseWildcard(null, null)); } else { - Expression expr = readExpression(); - if (readIf("AS") || currentTokenType == IDENTIFIER) { - String alias = readAliasIdentifier(); - boolean aliasColumnName = database.getSettings().aliasColumnName; - aliasColumnName |= database.getMode().aliasColumnName; - expr = new Alias(expr, alias, aliasColumnName); + switch (currentTokenType) { + case FROM: + case WHERE: + case GROUP: + case HAVING: + case WINDOW: + case QUALIFY: + case ORDER: + case OFFSET: + case FETCH: + case CLOSE_PAREN: + case SEMICOLON: + case END_OF_INPUT: + break; + default: + Expression expr = readExpression(); + if (readIf(AS) || isIdentifier()) { + expr = new Alias(expr, readIdentifier(), database.getMode().aliasColumnName); + } + expressions.add(expr); } - expressions.add(expr); } - } while (readIf(",")); + } while (readIf(COMMA)); command.setExpressions(expressions); } - private Select parseSelectSimple() { - boolean fromFirst; - if (readIf("SELECT")) { - fromFirst = false; - } else if (readIf("FROM")) { - fromFirst = true; - } else { - throw getSyntaxError(); - } - Select command = new Select(session); - int start = lastParseIndex; + private Select parseSelect(int start) { + Select command = new Select(session, currentSelect); Select oldSelect = currentSelect; + Prepared oldPrepared = currentPrepared; currentSelect = command; currentPrepared = command; - if (fromFirst) { - parseSelectSimpleFromPart(command); - read("SELECT"); - parseSelectSimpleSelectPart(command); + parseSelectExpressions(command); + if (!readIf(FROM)) { + // select without FROM + TableFilter filter = new TableFilter(session, new DualTable(database), null, rightsChecked, + currentSelect, 0, null); + command.addTableFilter(filter, true); } else { - parseSelectSimpleSelectPart(command); - if (!readIf("FROM")) { - // select without FROM: convert to SELECT ... FROM - // SYSTEM_RANGE(1,1) - Table dual = getDualTable(false); - TableFilter filter = new TableFilter(session, dual, null, - rightsChecked, currentSelect); - command.addTableFilter(filter, true); - } else { - parseSelectSimpleFromPart(command); - } + parseSelectFromPart(command); } - if (readIf("WHERE")) { - Expression condition = readExpression(); - command.addCondition(condition); + if (readIf(WHERE)) { + command.addCondition(readExpressionWithGlobalConditions()); } // the group by is read for the outer select (or not a select) // so that columns that are not grouped can be used currentSelect = oldSelect; - if (readIf("GROUP")) { + if (readIf(GROUP)) { read("BY"); command.setGroupQuery(); - ArrayList list = New.arrayList(); + ArrayList list = Utils.newSmallArrayList(); do { - Expression expr = readExpression(); - list.add(expr); - } while (readIf(",")); - command.setGroupBy(list); + if (isToken(OPEN_PAREN) && isOrdinaryGroupingSet()) { + if (!readIf(CLOSE_PAREN)) { + do { + list.add(readExpression()); + } while (readIfMore()); + } + } else { + Expression expr = readExpression(); + if (database.getMode().groupByColumnIndex && expr instanceof ValueExpression && + expr.getType().getValueType() == Value.INTEGER) { + ArrayList expressions = command.getExpressions(); + for (Expression e : expressions) { + if (e instanceof Wildcard) { + throw getSyntaxError(); + } + } + int idx = expr.getValue(session).getInt(); + if (idx < 1 || idx > expressions.size()) { + throw DbException.get(ErrorCode.GROUP_BY_NOT_IN_THE_RESULT, Integer.toString(idx), + Integer.toString(expressions.size())); + } + list.add(expressions.get(idx-1)); + } else { + list.add(expr); + } + } + } while (readIf(COMMA)); + if (!list.isEmpty()) { + command.setGroupBy(list); + } } currentSelect = command; - if (readIf("HAVING")) { + if (readIf(HAVING)) { command.setGroupQuery(); - Expression condition = readExpression(); - command.setHaving(condition); + command.setHaving(readExpressionWithGlobalConditions()); + } + if (readIf(WINDOW)) { + do { + int sqlIndex = token.start(); + String name = readIdentifier(); + read(AS); + Window w = readWindowSpecification(); + if (!currentSelect.addWindow(name, w)) { + throw DbException.getSyntaxError(sqlCommand, sqlIndex, "unique identifier"); + } + } while (readIf(COMMA)); + } + if (readIf(QUALIFY)) { + command.setWindowQuery(); + command.setQualify(readExpressionWithGlobalConditions()); } command.setParameterList(parameters); currentSelect = oldSelect; - setSQL(command, "SELECT", start); + currentPrepared = oldPrepared; + setSQL(command, start); return command; } - private Table getDualTable(boolean noColumns) { - Schema main = database.findSchema(Constants.SCHEMA_MAIN); - Expression one = ValueExpression.get(ValueLong.get(1)); - return new RangeTable(main, one, one, noColumns); + /** + * Checks whether current opening parenthesis can be a start of ordinary + * grouping set. This method reads this parenthesis if it is. + * + * @return whether current opening parenthesis can be a start of ordinary + * grouping set + */ + private boolean isOrdinaryGroupingSet() { + int offset = scanToCloseParen(tokenIndex + 1); + if (offset < 0) { + // Try to parse as expression to get better syntax error + return false; + } + switch (tokens.get(offset).tokenType()) { + // End of query + case CLOSE_PAREN: + case SEMICOLON: + case END_OF_INPUT: + // Next grouping element + case COMMA: + // Next select clause + case HAVING: + case WINDOW: + case QUALIFY: + // Next query expression body clause + case UNION: + case EXCEPT: + case MINUS: + case INTERSECT: + // Next query expression clause + case ORDER: + case OFFSET: + case FETCH: + case LIMIT: + case FOR: + setTokenIndex(tokenIndex + 1); + return true; + default: + return false; + } + } + + private Query parseExplicitTable(int start) { + Table table = readTableOrView(); + Select command = new Select(session, currentSelect); + TableFilter filter = new TableFilter(session, table, null, rightsChecked, + command, orderInFrom++, null); + command.addTableFilter(filter, true); + command.setExplicitTable(); + setSQL(command, start); + return command; } - private void setSQL(Prepared command, String start, int startIndex) { - String sql = originalSQL.substring(startIndex, lastParseIndex).trim(); - if (start != null) { - sql = start + " " + sql; + private void setSQL(Prepared command, int start) { + String s = sqlCommand; + int beginIndex = tokens.get(start).start(); + int endIndex = token.start(); + while (beginIndex < endIndex && s.charAt(beginIndex) <= ' ') { + beginIndex++; + } + while (beginIndex < endIndex && s.charAt(endIndex - 1) <= ' ') { + endIndex--; } - command.setSQL(sql); + s = s.substring(beginIndex, endIndex); + ArrayList commandTokens; + if (start == 0 && currentTokenType == END_OF_INPUT) { + commandTokens = tokens; + if (beginIndex != 0) { + for (int i = 0, l = commandTokens.size() - 1; i < l; i++) { + commandTokens.get(i).subtractFromStart(beginIndex); + } + } + token.setStart(s.length()); + sqlCommand = s; + } else { + List subList = tokens.subList(start, tokenIndex); + commandTokens = new ArrayList<>(subList.size() + 1); + for (int i = start; i < tokenIndex; i++) { + Token t = tokens.get(i).clone(); + t.subtractFromStart(beginIndex); + commandTokens.add(t); + } + commandTokens.add(new Token.EndOfInputToken(s.length())); + } + command.setSQL(s, commandTokens); } - private Expression readExpression() { - Expression r = readAnd(); - while (readIf("OR")) { - r = new ConditionAndOr(ConditionAndOr.OR, r, readAnd()); + private Expression readExpressionOrDefault() { + if (readIf(DEFAULT)) { + return ValueExpression.DEFAULT; } - return r; + return readExpression(); } - private Expression readAnd() { + private Expression readExpressionWithGlobalConditions() { Expression r = readCondition(); - while (readIf("AND")) { - r = new ConditionAndOr(ConditionAndOr.AND, r, readCondition()); + if (readIf(AND)) { + r = readAnd(new ConditionAndOr(ConditionAndOr.AND, r, readCondition())); + } else if (readIf("_LOCAL_AND_GLOBAL_")) { + r = readAnd(new ConditionLocalAndGlobal(r, readCondition())); } - return r; + return readExpressionPart2(r); + } + + private Expression readExpression() { + return readExpressionPart2(readAnd(readCondition())); + } + + private Expression readExpressionPart2(Expression r1) { + if (!readIf(OR)) { + return r1; + } + Expression r2 = readAnd(readCondition()); + if (!readIf(OR)) { + return new ConditionAndOr(ConditionAndOr.OR, r1, r2); + } + // Above logic to avoid allocating an ArrayList for the common case. + // We combine into ConditionAndOrN here rather than letting the optimisation + // pass do it, to avoid StackOverflowError during stuff like mapColumns. + final ArrayList expressions = new ArrayList<>(); + expressions.add(r1); + expressions.add(r2); + do { + expressions.add(readAnd(readCondition())); + } + while (readIf(OR)); + return new ConditionAndOrN(ConditionAndOr.OR, expressions); + } + + private Expression readAnd(Expression r) { + if (!readIf(AND)) { + return r; + } + Expression expr2 = readCondition(); + if (!readIf(AND)) { + return new ConditionAndOr(ConditionAndOr.AND, r, expr2); + } + // Above logic to avoid allocating an ArrayList for the common case. + // We combine into ConditionAndOrN here rather than letting the optimisation + // pass do it, to avoid StackOverflowError during stuff like mapColumns. + final ArrayList expressions = new ArrayList<>(); + expressions.add(r); + expressions.add(expr2); + do { + expressions.add(readCondition()); + } + while (readIf(AND)); + return new ConditionAndOrN(ConditionAndOr.AND, expressions); } private Expression readCondition() { - if (readIf("NOT")) { + switch (currentTokenType) { + case NOT: + read(); return new ConditionNot(readCondition()); - } - if (readIf("EXISTS")) { - read("("); - Query query = parseSelect(); + case EXISTS: { + read(); + read(OPEN_PAREN); + Query query = parseQuery(); // can not reduce expression because it might be a union except // query with distinct - read(")"); - return new ConditionExists(query); - } - if (readIf("INTERSECTS")) { - read("("); - Expression r1 = readConcat(); - read(","); - Expression r2 = readConcat(); - read(")"); - return new Comparison(session, Comparison.SPATIAL_INTERSECTS, r1, - r2); - } - Expression r = readConcat(); - while (true) { + read(CLOSE_PAREN); + return new ExistsPredicate(query); + } + case UNIQUE: { + read(); + read(OPEN_PAREN); + Query query = parseQuery(); + read(CLOSE_PAREN); + return new UniquePredicate(query); + } + default: + int index = tokenIndex; + if (readIf("INTERSECTS")) { + if (readIf(OPEN_PAREN)) { + Expression r1 = readConcat(); + read(COMMA); + Expression r2 = readConcat(); + read(CLOSE_PAREN); + return new Comparison(Comparison.SPATIAL_INTERSECTS, r1, r2, false); + } else { + setTokenIndex(index); + } + } + if (expectedList != null) { + addMultipleExpected(NOT, EXISTS, UNIQUE); + addExpected("INTERSECTS"); + } + } + Expression l, c = readConcat(); + do { + l = c; // special case: NOT NULL is not part of an expression (as in CREATE // TABLE TEST(ID INT DEFAULT 0 NOT NULL)) - int backup = parseIndex; - boolean not = false; - if (readIf("NOT")) { - not = true; - if (isToken("NULL")) { - // this really only works for NOT NULL! - parseIndex = backup; - currentToken = "NOT"; - break; - } + int backup = tokenIndex; + boolean not = readIf(NOT); + if (not && isToken(NULL)) { + // this really only works for NOT NULL! + setTokenIndex(backup); + break; } - if (readIf("LIKE")) { - Expression b = readConcat(); - Expression esc = null; - if (readIf("ESCAPE")) { - esc = readConcat(); + c = readConditionRightHandSide(l, not, false); + } while (c != null); + return l; + } + + private Expression readConditionRightHandSide(Expression r, boolean not, boolean whenOperand) { + if (!not && readIf(IS)) { + r = readConditionIs(r, whenOperand); + } else { + switch (currentTokenType) { + case BETWEEN: { + read(); + boolean symmetric = readIf(SYMMETRIC); + if (!symmetric) { + readIf(ASYMMETRIC); } - recompileAlways = true; - r = new CompareLike(database, r, b, esc, false); - } else if (readIf("REGEXP")) { - Expression b = readConcat(); - r = new CompareLike(database, r, b, null, true); - } else if (readIf("IS")) { - if (readIf("NOT")) { - if (readIf("NULL")) { - r = new Comparison(session, Comparison.IS_NOT_NULL, r, - null); - } else if (readIf("DISTINCT")) { - read("FROM"); - r = new Comparison(session, Comparison.EQUAL_NULL_SAFE, - r, readConcat()); - } else { - r = new Comparison(session, - Comparison.NOT_EQUAL_NULL_SAFE, r, readConcat()); + Expression a = readConcat(); + read(AND); + r = new BetweenPredicate(r, not, whenOperand, symmetric, a, readConcat()); + break; + } + case IN: + read(); + r = readInPredicate(r, not, whenOperand); + break; + case LIKE: { + read(); + r = readLikePredicate(r, LikeType.LIKE, not, whenOperand); + break; + } + default: + if (readIf("ILIKE")) { + r = readLikePredicate(r, LikeType.ILIKE, not, whenOperand); + } else if (readIf("REGEXP")) { + Expression b = readConcat(); + recompileAlways = true; + r = new CompareLike(database, r, not, whenOperand, b, null, LikeType.REGEXP); + } else if (not) { + if (whenOperand) { + return null; } - } else if (readIf("NULL")) { - r = new Comparison(session, Comparison.IS_NULL, r, null); - } else if (readIf("DISTINCT")) { - read("FROM"); - r = new Comparison(session, Comparison.NOT_EQUAL_NULL_SAFE, - r, readConcat()); - } else { - r = new Comparison(session, Comparison.EQUAL_NULL_SAFE, r, - readConcat()); - } - } else if (readIf("IN")) { - read("("); - if (readIf(")")) { - r = ValueExpression.get(ValueBoolean.get(false)); - } else { - if (isSelect()) { - Query query = parseSelect(); - r = new ConditionInSelect(database, r, query, false, - Comparison.EQUAL); - } else { - ArrayList v = New.arrayList(); - Expression last; - do { - last = readExpression(); - v.add(last); - } while (readIf(",")); - if (v.size() == 1 && (last instanceof Subquery)) { - Subquery s = (Subquery) last; - Query q = s.getQuery(); - r = new ConditionInSelect(database, r, q, false, - Comparison.EQUAL); - } else { - r = new ConditionIn(database, r, v); - } + if (expectedList != null) { + addMultipleExpected(BETWEEN, IN, LIKE); } - read(")"); - } - } else if (readIf("BETWEEN")) { - Expression low = readConcat(); - read("AND"); - Expression high = readConcat(); - Expression condLow = new Comparison(session, - Comparison.SMALLER_EQUAL, low, r); - Expression condHigh = new Comparison(session, - Comparison.BIGGER_EQUAL, high, r); - r = new ConditionAndOr(ConditionAndOr.AND, condLow, condHigh); - } else { - int compareType = getCompareType(currentTokenType); - if (compareType < 0) { - break; - } - read(); - if (readIf("ALL")) { - read("("); - Query query = parseSelect(); - r = new ConditionInSelect(database, r, query, true, - compareType); - read(")"); - } else if (readIf("ANY") || readIf("SOME")) { - read("("); - Query query = parseSelect(); - r = new ConditionInSelect(database, r, query, false, - compareType); - read(")"); + throw getSyntaxError(); } else { - Expression right = readConcat(); - if (SysProperties.OLD_STYLE_OUTER_JOIN && - readIf("(") && readIf("+") && readIf(")")) { - // support for a subset of old-fashioned Oracle outer - // join with (+) - if (r instanceof ExpressionColumn && - right instanceof ExpressionColumn) { - ExpressionColumn leftCol = (ExpressionColumn) r; - ExpressionColumn rightCol = (ExpressionColumn) right; - ArrayList filters = currentSelect - .getTopFilters(); - for (TableFilter f : filters) { - while (f != null) { - leftCol.mapColumns(f, 0); - rightCol.mapColumns(f, 0); - f = f.getJoin(); - } - } - TableFilter leftFilter = leftCol.getTableFilter(); - TableFilter rightFilter = rightCol.getTableFilter(); - r = new Comparison(session, compareType, r, right); - if (leftFilter != null && rightFilter != null) { - int idx = filters.indexOf(rightFilter); - if (idx >= 0) { - filters.remove(idx); - leftFilter.addJoin(rightFilter, true, - false, r); - } else { - rightFilter.mapAndAddFilter(r); - } - r = ValueExpression.get(ValueBoolean.get(true)); - } - } - } else { - r = new Comparison(session, compareType, r, right); + int compareType = getCompareType(currentTokenType); + if (compareType < 0) { + return null; } + read(); + r = readComparison(r, compareType, whenOperand); } } - if (not) { - r = new ConditionNot(r); - } } return r; } - private Expression readConcat() { - Expression r = readSum(); - while (true) { - if (readIf("||")) { - r = new Operation(Operation.CONCAT, r, readSum()); - } else if (readIf("~")) { - if (readIf("*")) { - Function function = Function.getFunction(database, "CAST"); - function.setDataType(new Column("X", - Value.STRING_IGNORECASE)); - function.setParameter(0, r); - r = function; - } - r = new CompareLike(database, r, readSum(), null, true); - } else if (readIf("!~")) { - if (readIf("*")) { - Function function = Function.getFunction(database, "CAST"); - function.setDataType(new Column("X", - Value.STRING_IGNORECASE)); - function.setParameter(0, r); - r = function; - } - r = new ConditionNot(new CompareLike(database, r, readSum(), - null, true)); + private Expression readConditionIs(Expression left, boolean whenOperand) { + boolean isNot = readIf(NOT); + switch (currentTokenType) { + case NULL: + read(); + left = new NullPredicate(left, isNot, whenOperand); + break; + case DISTINCT: + read(); + read(FROM); + left = readComparison(left, isNot ? Comparison.EQUAL_NULL_SAFE : Comparison.NOT_EQUAL_NULL_SAFE, + whenOperand); + break; + case TRUE: + read(); + left = new BooleanTest(left, isNot, whenOperand, true); + break; + case FALSE: + read(); + left = new BooleanTest(left, isNot, whenOperand, false); + break; + case UNKNOWN: + read(); + left = new BooleanTest(left, isNot, whenOperand, null); + break; + default: + if (readIf("OF")) { + left = readTypePredicate(left, isNot, whenOperand); + } else if (readIf("JSON")) { + left = readJsonPredicate(left, isNot, whenOperand); } else { - return r; + if (expectedList != null) { + addMultipleExpected(NULL, DISTINCT, TRUE, FALSE, UNKNOWN); + } + /* + * Databases that were created in 1.4.199 and older + * versions can contain invalid generated IS [ NOT ] + * expressions. + */ + if (whenOperand || !session.isQuirksMode()) { + throw getSyntaxError(); + } + left = new Comparison(isNot ? Comparison.NOT_EQUAL_NULL_SAFE : Comparison.EQUAL_NULL_SAFE, left, + readConcat(), false); } } + return left; } - private Expression readSum() { - Expression r = readFactor(); - while (true) { - if (readIf("+")) { - r = new Operation(Operation.PLUS, r, readFactor()); - } else if (readIf("-")) { - r = new Operation(Operation.MINUS, r, readFactor()); - } else { - return r; - } - } + private TypePredicate readTypePredicate(Expression left, boolean not, boolean whenOperand) { + read(OPEN_PAREN); + ArrayList typeList = Utils.newSmallArrayList(); + do { + typeList.add(parseDataType()); + } while (readIfMore()); + return new TypePredicate(left, not, whenOperand, typeList.toArray(new TypeInfo[0])); } - private Expression readFactor() { - Expression r = readTerm(); - while (true) { - if (readIf("*")) { - r = new Operation(Operation.MULTIPLY, r, readTerm()); - } else if (readIf("/")) { - r = new Operation(Operation.DIVIDE, r, readTerm()); - } else if (readIf("%")) { - r = new Operation(Operation.MODULUS, r, readTerm()); - } else { - return r; + private Expression readInPredicate(Expression left, boolean not, boolean whenOperand) { + read(OPEN_PAREN); + if (!whenOperand && database.getMode().allowEmptyInPredicate && readIf(CLOSE_PAREN)) { + return ValueExpression.getBoolean(not); + } + ArrayList v; + if (isQuery()) { + Query query = parseQuery(); + if (!readIfMore()) { + return new ConditionInQuery(left, not, whenOperand, query, false, Comparison.EQUAL); } + v = Utils.newSmallArrayList(); + v.add(new Subquery(query)); + } else { + v = Utils.newSmallArrayList(); } + do { + v.add(readExpression()); + } while (readIfMore()); + return new ConditionIn(left, not, whenOperand, v); + } + + private IsJsonPredicate readJsonPredicate(Expression left, boolean not, boolean whenOperand) { + JSONItemType itemType; + if (readIf(VALUE)) { + itemType = JSONItemType.VALUE; + } else if (readIf(ARRAY)) { + itemType = JSONItemType.ARRAY; + } else if (readIf("OBJECT")) { + itemType = JSONItemType.OBJECT; + } else if (readIf("SCALAR")) { + itemType = JSONItemType.SCALAR; + } else { + itemType = JSONItemType.VALUE; + } + boolean unique = false; + if (readIf(WITH)) { + read(UNIQUE); + readIf("KEYS"); + unique = true; + } else if (readIf("WITHOUT")) { + read(UNIQUE); + readIf("KEYS"); + } + return new IsJsonPredicate(left, not, whenOperand, unique, itemType); + } + + private Expression readLikePredicate(Expression left, LikeType likeType, boolean not, boolean whenOperand) { + Expression right = readConcat(); + Expression esc = readIf("ESCAPE") ? readConcat() : null; + recompileAlways = true; + return new CompareLike(database, left, not, whenOperand, right, esc, likeType); + } + + private Expression readComparison(Expression left, int compareType, boolean whenOperand) { + int start = tokenIndex; + if (readIf(ALL)) { + read(OPEN_PAREN); + if (isQuery()) { + Query query = parseQuery(); + left = new ConditionInQuery(left, false, whenOperand, query, true, compareType); + read(CLOSE_PAREN); + } else { + setTokenIndex(start); + left = new Comparison(compareType, left, readConcat(), whenOperand); + } + } else if (readIf(ANY) || readIf(SOME)) { + read(OPEN_PAREN); + if (currentTokenType == PARAMETER && compareType == Comparison.EQUAL) { + Parameter p = readParameter(); + left = new ConditionInParameter(left, false, whenOperand, p); + read(CLOSE_PAREN); + } else if (isQuery()) { + Query query = parseQuery(); + left = new ConditionInQuery(left, false, whenOperand, query, false, compareType); + read(CLOSE_PAREN); + } else { + setTokenIndex(start); + left = new Comparison(compareType, left, readConcat(), whenOperand); + } + } else { + left = new Comparison(compareType, left, readConcat(), whenOperand); + } + return left; } - private Expression readAggregate(int aggregateType, String aggregateName) { + private Expression readConcat() { + Expression op1 = readSum(); + for (;;) { + switch (currentTokenType) { + case CONCATENATION: { + read(); + Expression op2 = readSum(); + if (readIf(CONCATENATION)) { + ConcatenationOperation c = new ConcatenationOperation(); + c.addParameter(op1); + c.addParameter(op2); + do { + c.addParameter(readSum()); + } while (readIf(CONCATENATION)); + c.doneWithParameters(); + op1 = c; + } else { + op1 = new ConcatenationOperation(op1, op2); + } + break; + } + case TILDE: // PostgreSQL compatibility + op1 = readTildeCondition(op1, false); + break; + case NOT_TILDE: // PostgreSQL compatibility + op1 = readTildeCondition(op1, true); + break; + default: + // Don't add compatibility operators + addExpected(CONCATENATION); + return op1; + } + } + } + + private Expression readSum() { + Expression r = readFactor(); + while (true) { + if (readIf(PLUS_SIGN)) { + r = new BinaryOperation(OpType.PLUS, r, readFactor()); + } else if (readIf(MINUS_SIGN)) { + r = new BinaryOperation(OpType.MINUS, r, readFactor()); + } else { + return r; + } + } + } + + private Expression readFactor() { + Expression r = readTerm(); + while (true) { + if (readIf(ASTERISK)) { + r = new BinaryOperation(OpType.MULTIPLY, r, readTerm()); + } else if (readIf(SLASH)) { + r = new BinaryOperation(OpType.DIVIDE, r, readTerm()); + } else if (readIf(PERCENT)) { + r = new MathFunction(r, readTerm(), MathFunction.MOD); + } else { + return r; + } + } + } + + private Expression readTildeCondition(Expression r, boolean not) { + read(); + if (readIf(ASTERISK)) { + r = new CastSpecification(r, TypeInfo.TYPE_VARCHAR_IGNORECASE); + } + return new CompareLike(database, r, not, false, readSum(), null, LikeType.REGEXP); + } + + private Expression readAggregate(AggregateType aggregateType, String aggregateName) { if (currentSelect == null) { + expectedList = null; throw getSyntaxError(); } - currentSelect.setGroupQuery(); - Expression r; - if (aggregateType == Aggregate.COUNT) { - if (readIf("*")) { - r = new Aggregate(Aggregate.COUNT_ALL, null, currentSelect, - false); + Aggregate r; + switch (aggregateType) { + case COUNT: + if (readIf(ASTERISK)) { + r = new Aggregate(AggregateType.COUNT_ALL, new Expression[0], currentSelect, false); } else { - boolean distinct = readIf("DISTINCT"); + boolean distinct = readDistinctAgg(); Expression on = readExpression(); if (on instanceof Wildcard && !distinct) { // PostgreSQL compatibility: count(t.*) - r = new Aggregate(Aggregate.COUNT_ALL, null, currentSelect, - false); + r = new Aggregate(AggregateType.COUNT_ALL, new Expression[0], currentSelect, false); } else { - r = new Aggregate(Aggregate.COUNT, on, currentSelect, - distinct); + r = new Aggregate(AggregateType.COUNT, new Expression[] { on }, currentSelect, distinct); } } - } else if (aggregateType == Aggregate.GROUP_CONCAT) { - Aggregate agg = null; - if (equalsToken("GROUP_CONCAT", aggregateName)) { - boolean distinct = readIf("DISTINCT"); - agg = new Aggregate(Aggregate.GROUP_CONCAT, - readExpression(), currentSelect, distinct); - if (readIf("ORDER")) { - read("BY"); - agg.setGroupConcatOrder(parseSimpleOrderList()); - } - + break; + case COVAR_POP: + case COVAR_SAMP: + case CORR: + case REGR_SLOPE: + case REGR_INTERCEPT: + case REGR_COUNT: + case REGR_R2: + case REGR_AVGX: + case REGR_AVGY: + case REGR_SXX: + case REGR_SYY: + case REGR_SXY: + r = new Aggregate(aggregateType, new Expression[] { readExpression(), readNextArgument() }, + currentSelect, false); + break; + case HISTOGRAM: + r = new Aggregate(aggregateType, new Expression[] { readExpression() }, currentSelect, false); + break; + case LISTAGG: { + boolean distinct = readDistinctAgg(); + Expression arg = readExpression(); + ListaggArguments extraArguments = new ListaggArguments(); + ArrayList orderByList; + if ("STRING_AGG".equals(aggregateName)) { + // PostgreSQL compatibility: string_agg(expression, delimiter) + read(COMMA); + extraArguments.setSeparator(readString()); + orderByList = readIfOrderBy(); + } else if ("GROUP_CONCAT".equals(aggregateName)) { + orderByList = readIfOrderBy(); if (readIf("SEPARATOR")) { - agg.setGroupConcatSeparator(readExpression()); + extraArguments.setSeparator(readString()); } - } else if (equalsToken("STRING_AGG", aggregateName)) { - // PostgreSQL compatibility: string_agg(expression, delimiter) - agg = new Aggregate(Aggregate.GROUP_CONCAT, - readExpression(), currentSelect, false); - read(","); - agg.setGroupConcatSeparator(readExpression()); + } else { + if (readIf(COMMA)) { + extraArguments.setSeparator(readString()); + } + if (readIf(ON)) { + read("OVERFLOW"); + if (readIf("TRUNCATE")) { + extraArguments.setOnOverflowTruncate(true); + if (currentTokenType == LITERAL) { + extraArguments.setFilter(readString()); + } + if (!readIf(WITH)) { + read("WITHOUT"); + extraArguments.setWithoutCount(true); + } + read("COUNT"); + } else { + read("ERROR"); + } + } + orderByList = null; } - r = agg; - } else { - boolean distinct = readIf("DISTINCT"); - r = new Aggregate(aggregateType, readExpression(), currentSelect, + Expression[] args = new Expression[] { arg }; + int index = tokenIndex; + read(CLOSE_PAREN); + if (orderByList == null && isToken("WITHIN")) { + r = readWithinGroup(aggregateType, args, distinct, extraArguments, false, false); + } else { + setTokenIndex(index); + r = new Aggregate(AggregateType.LISTAGG, args, currentSelect, distinct); + r.setExtraArguments(extraArguments); + if (orderByList != null) { + r.setOrderByList(orderByList); + } + } + break; + } + case ARRAY_AGG: { + boolean distinct = readDistinctAgg(); + r = new Aggregate(AggregateType.ARRAY_AGG, new Expression[] { readExpression() }, currentSelect, distinct); + r.setOrderByList(readIfOrderBy()); + break; + } + case RANK: + case DENSE_RANK: + case PERCENT_RANK: + case CUME_DIST: { + if (isToken(CLOSE_PAREN)) { + return readWindowFunction(aggregateName); + } + ArrayList expressions = Utils.newSmallArrayList(); + do { + expressions.add(readExpression()); + } while (readIfMore()); + r = readWithinGroup(aggregateType, expressions.toArray(new Expression[0]), false, null, true, false); + break; + } + case PERCENTILE_CONT: + case PERCENTILE_DISC: { + Expression num = readExpression(); + read(CLOSE_PAREN); + r = readWithinGroup(aggregateType, new Expression[] { num }, false, null, false, true); + break; + } + case MODE: { + if (readIf(CLOSE_PAREN)) { + r = readWithinGroup(AggregateType.MODE, new Expression[0], false, null, false, true); + } else { + Expression expr = readExpression(); + r = new Aggregate(AggregateType.MODE, new Expression[0], currentSelect, false); + if (readIf(ORDER)) { + read("BY"); + Expression expr2 = readExpression(); + String sql = expr.getSQL(HasSQL.DEFAULT_SQL_FLAGS), sql2 = expr2.getSQL(HasSQL.DEFAULT_SQL_FLAGS); + if (!sql.equals(sql2)) { + throw DbException.getSyntaxError(ErrorCode.IDENTICAL_EXPRESSIONS_SHOULD_BE_USED, sqlCommand, + token.start(), sql, sql2); + } + readAggregateOrder(r, expr, true); + } else { + readAggregateOrder(r, expr, false); + } + } + break; + } + case JSON_OBJECTAGG: { + boolean withKey = readIf(KEY); + Expression key = readExpression(); + if (withKey) { + read(VALUE); + } else if (!readIf(VALUE)) { + read(COLON); + } + Expression value = readExpression(); + r = new Aggregate(AggregateType.JSON_OBJECTAGG, new Expression[] { key, value }, currentSelect, false); + readJsonObjectFunctionFlags(r, false); + break; + } + case JSON_ARRAYAGG: { + boolean distinct = readDistinctAgg(); + r = new Aggregate(AggregateType.JSON_ARRAYAGG, new Expression[] { readExpression() }, currentSelect, distinct); + r.setOrderByList(readIfOrderBy()); + r.setFlags(JsonConstructorUtils.JSON_ABSENT_ON_NULL); + readJsonObjectFunctionFlags(r, true); + break; + } + default: + boolean distinct = readDistinctAgg(); + r = new Aggregate(aggregateType, new Expression[] { readExpression() }, currentSelect, distinct); + break; + } + read(CLOSE_PAREN); + readFilterAndOver(r); + return r; + } + + private Aggregate readWithinGroup(AggregateType aggregateType, Expression[] args, boolean distinct, + Object extraArguments, boolean forHypotheticalSet, boolean simple) { + read("WITHIN"); + read(GROUP); + read(OPEN_PAREN); + read(ORDER); + read("BY"); + Aggregate r = new Aggregate(aggregateType, args, currentSelect, distinct); + r.setExtraArguments(extraArguments); + if (forHypotheticalSet) { + int count = args.length; + ArrayList orderList = new ArrayList<>(count); + for (int i = 0; i < count; i++) { + if (i > 0) { + read(COMMA); + } + orderList.add(parseSortSpecification()); + } + r.setOrderByList(orderList); + } else if (simple) { + readAggregateOrder(r, readExpression(), true); + } else { + r.setOrderByList(parseSortSpecificationList()); } - read(")"); return r; } - private ArrayList parseSimpleOrderList() { - ArrayList orderList = New.arrayList(); + private void readAggregateOrder(Aggregate r, Expression expr, boolean parseSortType) { + ArrayList orderList = new ArrayList<>(1); + QueryOrderBy order = new QueryOrderBy(); + order.expression = expr; + if (parseSortType) { + order.sortType = parseSortType(); + } + orderList.add(order); + r.setOrderByList(orderList); + } + + private ArrayList readIfOrderBy() { + if (readIf(ORDER)) { + read("BY"); + return parseSortSpecificationList(); + } + return null; + } + + private ArrayList parseSortSpecificationList() { + ArrayList orderList = Utils.newSmallArrayList(); do { - SelectOrderBy order = new SelectOrderBy(); - Expression expr = readExpression(); - order.expression = expr; - if (readIf("DESC")) { - order.descending = true; - } else { - readIf("ASC"); - } - orderList.add(order); - } while (readIf(",")); + orderList.add(parseSortSpecification()); + } while (readIf(COMMA)); return orderList; } - private JavaFunction readJavaFunction(Schema schema, String functionName) { - FunctionAlias functionAlias = null; - if (schema != null) { - functionAlias = schema.findFunction(functionName); + private QueryOrderBy parseSortSpecification() { + QueryOrderBy order = new QueryOrderBy(); + order.expression = readExpression(); + order.sortType = parseSortType(); + return order; + } + + private Expression readUserDefinedFunctionIf(Schema schema, String functionName) { + UserDefinedFunction userDefinedFunction = findUserDefinedFunctionWithinPath(schema, functionName); + if (userDefinedFunction == null) { + return null; + } else if (userDefinedFunction instanceof FunctionAlias) { + FunctionAlias functionAlias = (FunctionAlias) userDefinedFunction; + ArrayList argList = Utils.newSmallArrayList(); + if (!readIf(CLOSE_PAREN)) { + do { + argList.add(readExpression()); + } while (readIfMore()); + } + return new JavaFunction(functionAlias, argList.toArray(new Expression[0])); } else { - functionAlias = findFunctionAlias(session.getCurrentSchemaName(), - functionName); + UserAggregate aggregate = (UserAggregate) userDefinedFunction; + boolean distinct = readDistinctAgg(); + ArrayList params = Utils.newSmallArrayList(); + do { + params.add(readExpression()); + } while (readIfMore()); + Expression[] list = params.toArray(new Expression[0]); + JavaAggregate agg = new JavaAggregate(aggregate, list, currentSelect, distinct); + readFilterAndOver(agg); + return agg; } - if (functionAlias == null) { - throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, functionName); + } + + private boolean readDistinctAgg() { + if (readIf(DISTINCT)) { + return true; } - Expression[] args; - ArrayList argList = New.arrayList(); - int numArgs = 0; - while (!readIf(")")) { - if (numArgs++ > 0) { - read(","); + readIf(ALL); + return false; + } + + private void readFilterAndOver(AbstractAggregate aggregate) { + if (readIf("FILTER")) { + read(OPEN_PAREN); + read(WHERE); + Expression filterCondition = readExpression(); + read(CLOSE_PAREN); + aggregate.setFilterCondition(filterCondition); + } + readOver(aggregate); + } + + private void readOver(DataAnalysisOperation operation) { + if (readIf("OVER")) { + operation.setOverCondition(readWindowNameOrSpecification()); + currentSelect.setWindowQuery(); + } else if (operation.isAggregate()) { + currentSelect.setGroupQuery(); + } else { + throw getSyntaxError(); + } + } + + private Window readWindowNameOrSpecification() { + return isToken(OPEN_PAREN) ? readWindowSpecification() : new Window(readIdentifier(), null, null, null); + } + + private Window readWindowSpecification() { + read(OPEN_PAREN); + String parent = null; + if (currentTokenType == IDENTIFIER) { + String current = currentToken; + if (token.isQuoted() || ( // + !equalsToken(current, "PARTITION") // + && !equalsToken(current, "ROWS") // + && !equalsToken(current, "RANGE") // + && !equalsToken(current, "GROUPS"))) { + parent = current; + read(); } - argList.add(readExpression()); } - args = new Expression[numArgs]; - argList.toArray(args); - JavaFunction func = new JavaFunction(functionAlias, args); - return func; + ArrayList partitionBy = null; + if (readIf("PARTITION")) { + read("BY"); + partitionBy = Utils.newSmallArrayList(); + do { + Expression expr = readExpression(); + partitionBy.add(expr); + } while (readIf(COMMA)); + } + ArrayList orderBy = readIfOrderBy(); + WindowFrame frame = readWindowFrame(); + read(CLOSE_PAREN); + return new Window(parent, partitionBy, orderBy, frame); + } + + private WindowFrame readWindowFrame() { + WindowFrameUnits units; + if (readIf("ROWS")) { + units = WindowFrameUnits.ROWS; + } else if (readIf("RANGE")) { + units = WindowFrameUnits.RANGE; + } else if (readIf("GROUPS")) { + units = WindowFrameUnits.GROUPS; + } else { + return null; + } + WindowFrameBound starting, following; + if (readIf(BETWEEN)) { + starting = readWindowFrameRange(); + read(AND); + following = readWindowFrameRange(); + } else { + starting = readWindowFrameStarting(); + following = null; + } + int sqlIndex = token.start(); + WindowFrameExclusion exclusion = WindowFrameExclusion.EXCLUDE_NO_OTHERS; + if (readIf("EXCLUDE")) { + if (readIf("CURRENT")) { + read(ROW); + exclusion = WindowFrameExclusion.EXCLUDE_CURRENT_ROW; + } else if (readIf(GROUP)) { + exclusion = WindowFrameExclusion.EXCLUDE_GROUP; + } else if (readIf("TIES")) { + exclusion = WindowFrameExclusion.EXCLUDE_TIES; + } else { + read("NO"); + read("OTHERS"); + } + } + WindowFrame frame = new WindowFrame(units, starting, following, exclusion); + if (!frame.isValid()) { + throw DbException.getSyntaxError(sqlCommand, sqlIndex); + } + return frame; } - private JavaAggregate readJavaAggregate(UserAggregate aggregate) { - ArrayList params = New.arrayList(); - do { - params.add(readExpression()); - } while (readIf(",")); - read(")"); - Expression[] list = new Expression[params.size()]; - params.toArray(list); - JavaAggregate agg = new JavaAggregate(aggregate, list, currentSelect); - currentSelect.setGroupQuery(); - return agg; + private WindowFrameBound readWindowFrameStarting() { + if (readIf("UNBOUNDED")) { + read("PRECEDING"); + return new WindowFrameBound(WindowFrameBoundType.UNBOUNDED_PRECEDING, null); + } + if (readIf("CURRENT")) { + read(ROW); + return new WindowFrameBound(WindowFrameBoundType.CURRENT_ROW, null); + } + Expression value = readExpression(); + read("PRECEDING"); + return new WindowFrameBound(WindowFrameBoundType.PRECEDING, value); } - private int getAggregateType(String name) { - if (!identifiersToUpper) { - // if not yet converted to uppercase, do it now - name = StringUtils.toUpperEnglish(name); + private WindowFrameBound readWindowFrameRange() { + if (readIf("UNBOUNDED")) { + if (readIf("PRECEDING")) { + return new WindowFrameBound(WindowFrameBoundType.UNBOUNDED_PRECEDING, null); + } + read("FOLLOWING"); + return new WindowFrameBound(WindowFrameBoundType.UNBOUNDED_FOLLOWING, null); + } + if (readIf("CURRENT")) { + read(ROW); + return new WindowFrameBound(WindowFrameBoundType.CURRENT_ROW, null); } - return Aggregate.getAggregateType(name); + Expression value = readExpression(); + if (readIf("PRECEDING")) { + return new WindowFrameBound(WindowFrameBoundType.PRECEDING, value); + } + read("FOLLOWING"); + return new WindowFrameBound(WindowFrameBoundType.FOLLOWING, value); } private Expression readFunction(Schema schema, String name) { + String upperName = upperName(name); if (schema != null) { - return readJavaFunction(schema, name); - } - int agg = getAggregateType(name); - if (agg >= 0) { - return readAggregate(agg, name); - } - Function function = Function.getFunction(database, name); - if (function == null) { - UserAggregate aggregate = database.findAggregate(name); - if (aggregate != null) { - return readJavaAggregate(aggregate); - } - return readJavaFunction(null, name); - } - switch (function.getFunctionType()) { - case Function.CAST: { - function.setParameter(0, readExpression()); - read("AS"); - Column type = parseColumnWithType(null); - function.setDataType(type); - read(")"); - break; + return readFunctionWithSchema(schema, name, upperName); + } + boolean allowOverride = database.isAllowBuiltinAliasOverride(); + if (allowOverride) { + Expression e = readUserDefinedFunctionIf(null, name); + if (e != null) { + return e; + } + } + AggregateType agg = Aggregate.getAggregateType(upperName); + if (agg != null) { + return readAggregate(agg, upperName); + } + Expression e = readBuiltinFunctionIf(upperName); + if (e != null) { + return e; + } + e = readWindowFunction(upperName); + if (e != null) { + return e; + } + e = readCompatibilityFunction(upperName); + if (e != null) { + return e; + } + if (!allowOverride) { + e = readUserDefinedFunctionIf(null, name); + if (e != null) { + return e; + } + } + throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, name); + } + + private Expression readFunctionWithSchema(Schema schema, String name, String upperName) { + if (database.getMode().getEnum() == ModeEnum.PostgreSQL + && schema.getName().equals(database.sysIdentifier("PG_CATALOG"))) { + FunctionsPostgreSQL function = FunctionsPostgreSQL.getFunction(upperName); + if (function != null) { + return readParameters(function); + } + } + Expression function = readUserDefinedFunctionIf(schema, name); + if (function != null) { + return function; + } + throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, name); + } + + private Expression readCompatibilityFunction(String name) { + switch (name) { + // || + case "ARRAY_APPEND": + case "ARRAY_CAT": + return new ConcatenationOperation(readExpression(), readLastArgument()); + // [] + case "ARRAY_GET": + return new ArrayElementReference(readExpression(), readLastArgument()); + // CARDINALITY + case "ARRAY_LENGTH": + return new CardinalityExpression(readSingleArgument(), false); + // Simple case + case "DECODE": { + Expression caseOperand = readExpression(); + boolean canOptimize = caseOperand.isConstant() && !caseOperand.getValue(session).containsNull(); + Expression a = readNextArgument(), b = readNextArgument(); + SimpleCase.SimpleWhen when = decodeToWhen(caseOperand, canOptimize, a, b), current = when; + Expression elseResult = null; + while (readIf(COMMA)) { + a = readExpression(); + if (readIf(COMMA)) { + b = readExpression(); + SimpleCase.SimpleWhen next = decodeToWhen(caseOperand, canOptimize, a, b); + current.setWhen(next); + current = next; + } else { + elseResult = a; + break; + } + } + read(CLOSE_PAREN); + return new SimpleCase(caseOperand, when, elseResult); } - case Function.CONVERT: { + // Searched case + case "CASEWHEN": + return readCompatibilityCase(readExpression()); + case "NVL2": + return readCompatibilityCase(new NullPredicate(readExpression(), true, false)); + // Cast specification + case "CONVERT": { + Expression arg; + Column column; if (database.getMode().swapConvertFunctionParameters) { - Column type = parseColumnWithType(null); - function.setDataType(type); - read(","); - function.setParameter(0, readExpression()); - read(")"); + column = parseColumnWithType(null); + arg = readNextArgument(); + } else { + arg = readExpression(); + read(COMMA); + column = parseColumnWithType(null); + } + read(CLOSE_PAREN); + return new CastSpecification(arg, column); + } + // COALESCE + case "IFNULL": + return new CoalesceFunction(CoalesceFunction.COALESCE, readExpression(), readLastArgument()); + case "NVL": + return readCoalesceFunction(CoalesceFunction.COALESCE); + // CURRENT_CATALOG + case "DATABASE": + read(CLOSE_PAREN); + return new CurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_CATALOG); + // CURRENT_DATE + case "CURDATE": + case "SYSDATE": + case "TODAY": + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, true, name); + // CURRENT_SCHEMA + case "SCHEMA": + read(CLOSE_PAREN); + return new CurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_SCHEMA); + // CURRENT_TIMESTAMP + case "SYSTIMESTAMP": + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP, true, name); + // EXTRACT + case "DAY": + case "DAY_OF_MONTH": + case "DAYOFMONTH": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.DAY, readSingleArgument(), null); + case "DAY_OF_WEEK": + case "DAYOFWEEK": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.DAY_OF_WEEK, readSingleArgument(), + null); + case "DAY_OF_YEAR": + case "DAYOFYEAR": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.DAY_OF_YEAR, readSingleArgument(), + null); + case "HOUR": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.HOUR, readSingleArgument(), null); + case "ISO_DAY_OF_WEEK": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.ISO_DAY_OF_WEEK, + readSingleArgument(), null); + case "ISO_WEEK": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.ISO_WEEK, readSingleArgument(), + null); + case "ISO_YEAR": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.ISO_WEEK_YEAR, readSingleArgument(), + null); + case "MINUTE": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.MINUTE, readSingleArgument(), null); + case "MONTH": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.MONTH, readSingleArgument(), null); + case "QUARTER": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.QUARTER, readSingleArgument(), // + null); + case "SECOND": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.SECOND, readSingleArgument(), null); + case "WEEK": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.WEEK, readSingleArgument(), null); + case "YEAR": + return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.YEAR, readSingleArgument(), null); + // LOCALTIME + case "CURTIME": + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, true, "CURTIME"); + case "SYSTIME": + read(CLOSE_PAREN); + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, false, "SYSTIME"); + // LOCALTIMESTAMP + case "NOW": + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, true, "NOW"); + // LOCATE + case "INSTR": { + Expression arg1 = readExpression(); + return new StringFunction(readNextArgument(), arg1, readIfArgument(), StringFunction.LOCATE); + } + case "POSITION": { + // can't read expression because IN would be read too early + Expression arg1 = readConcat(); + if (!readIf(COMMA)) { + read(IN); + } + return new StringFunction(arg1, readSingleArgument(), null, StringFunction.LOCATE); + } + // LOWER + case "LCASE": + return new StringFunction1(readSingleArgument(), StringFunction1.LOWER); + // SUBSTRING + case "SUBSTR": + return readSubstringFunction(); + // TRIM + case "LTRIM": + return new TrimFunction(readSingleArgument(), null, TrimFunction.LEADING); + case "RTRIM": + return new TrimFunction(readSingleArgument(), null, TrimFunction.TRAILING); + // UPPER + case "UCASE": + return new StringFunction1(readSingleArgument(), StringFunction1.UPPER); + // Sequence value + case "CURRVAL": + return readCompatibilitySequenceValueFunction(true); + case "NEXTVAL": + return readCompatibilitySequenceValueFunction(false); + default: + return null; + } + } + + private T readParameters(T expression) { + if (!readIf(CLOSE_PAREN)) { + do { + expression.addParameter(readExpression()); + } while (readIfMore()); + } + expression.doneWithParameters(); + return expression; + } + + private SimpleCase.SimpleWhen decodeToWhen(Expression caseOperand, boolean canOptimize, Expression whenOperand, + Expression result) { + if (!canOptimize && (!whenOperand.isConstant() || whenOperand.getValue(session).containsNull())) { + whenOperand = new Comparison(Comparison.EQUAL_NULL_SAFE, caseOperand, whenOperand, true); + } + return new SimpleCase.SimpleWhen(whenOperand, result); + } + + private Expression readCompatibilityCase(Expression when) { + return new SearchedCase(new Expression[] { when, readNextArgument(), readLastArgument() }); + } + + private Expression readCompatibilitySequenceValueFunction(boolean current) { + Expression arg1 = readExpression(), arg2 = readIf(COMMA) ? readExpression() : null; + read(CLOSE_PAREN); + return new CompatibilitySequenceValueFunction(arg1, arg2, current); + } + + private Expression readBuiltinFunctionIf(String upperName) { + switch (upperName) { + case "ABS": + return new MathFunction(readSingleArgument(), null, MathFunction.ABS); + case "MOD": + return new MathFunction(readExpression(), readLastArgument(), MathFunction.MOD); + case "SIN": + return new MathFunction1(readSingleArgument(), MathFunction1.SIN); + case "COS": + return new MathFunction1(readSingleArgument(), MathFunction1.COS); + case "TAN": + return new MathFunction1(readSingleArgument(), MathFunction1.TAN); + case "COT": + return new MathFunction1(readSingleArgument(), MathFunction1.COT); + case "SINH": + return new MathFunction1(readSingleArgument(), MathFunction1.SINH); + case "COSH": + return new MathFunction1(readSingleArgument(), MathFunction1.COSH); + case "TANH": + return new MathFunction1(readSingleArgument(), MathFunction1.TANH); + case "ASIN": + return new MathFunction1(readSingleArgument(), MathFunction1.ASIN); + case "ACOS": + return new MathFunction1(readSingleArgument(), MathFunction1.ACOS); + case "ATAN": + return new MathFunction1(readSingleArgument(), MathFunction1.ATAN); + case "ATAN2": + return new MathFunction2(readExpression(), readLastArgument(), MathFunction2.ATAN2); + case "LOG": { + Expression arg1 = readExpression(); + if (readIf(COMMA)) { + return new MathFunction2(arg1, readSingleArgument(), MathFunction2.LOG); } else { - function.setParameter(0, readExpression()); - read(","); - Column type = parseColumnWithType(null); - function.setDataType(type); - read(")"); + read(CLOSE_PAREN); + return new MathFunction1(arg1, + database.getMode().logIsLogBase10 ? MathFunction1.LOG10 : MathFunction1.LN); + } + } + case "LOG10": + return new MathFunction1(readSingleArgument(), MathFunction1.LOG10); + case "LN": + return new MathFunction1(readSingleArgument(), MathFunction1.LN); + case "EXP": + return new MathFunction1(readSingleArgument(), MathFunction1.EXP); + case "POWER": + return new MathFunction2(readExpression(), readLastArgument(), MathFunction2.POWER); + case "SQRT": + return new MathFunction1(readSingleArgument(), MathFunction1.SQRT); + case "FLOOR": + return new MathFunction(readSingleArgument(), null, MathFunction.FLOOR); + case "CEIL": + case "CEILING": + return new MathFunction(readSingleArgument(), null, MathFunction.CEIL); + case "ROUND": + return new MathFunction(readExpression(), readIfArgument(), MathFunction.ROUND); + case "ROUNDMAGIC": + return new MathFunction(readSingleArgument(), null, MathFunction.ROUNDMAGIC); + case "SIGN": + return new MathFunction(readSingleArgument(), null, MathFunction.SIGN); + case "TRUNC": + case "TRUNCATE": + return new MathFunction(readExpression(), readIfArgument(), MathFunction.TRUNC); + case "DEGREES": + return new MathFunction1(readSingleArgument(), MathFunction1.DEGREES); + case "RADIANS": + return new MathFunction1(readSingleArgument(), MathFunction1.RADIANS); + case "BITAND": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITAND); + case "BITOR": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITOR); + case "BITXOR": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITXOR); + case "BITNOT": + return new BitFunction(readSingleArgument(), null, BitFunction.BITNOT); + case "BITNAND": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITNAND); + case "BITNOR": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITNOR); + case "BITXNOR": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITXNOR); + case "BITGET": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITGET); + case "BITCOUNT": + return new BitFunction(readSingleArgument(), null, BitFunction.BITCOUNT); + case "LSHIFT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.LSHIFT); + case "RSHIFT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.RSHIFT); + case "ULSHIFT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.ULSHIFT); + case "URSHIFT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.URSHIFT); + case "ROTATELEFT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.ROTATELEFT); + case "ROTATERIGHT": + return new BitFunction(readExpression(), readLastArgument(), BitFunction.ROTATERIGHT); + case "EXTRACT": { + int field = readDateTimeField(); + read(FROM); + return new DateTimeFunction(DateTimeFunction.EXTRACT, field, readSingleArgument(), null); + } + case "DATE_TRUNC": + return new DateTimeFunction(DateTimeFunction.DATE_TRUNC, readDateTimeField(), readLastArgument(), null); + case "DATEADD": + case "TIMESTAMPADD": + return new DateTimeFunction(DateTimeFunction.DATEADD, readDateTimeField(), readNextArgument(), + readLastArgument()); + case "DATEDIFF": + case "TIMESTAMPDIFF": + return new DateTimeFunction(DateTimeFunction.DATEDIFF, readDateTimeField(), readNextArgument(), + readLastArgument()); + case "FORMATDATETIME": + return readDateTimeFormatFunction(DateTimeFormatFunction.FORMATDATETIME); + case "PARSEDATETIME": + return readDateTimeFormatFunction(DateTimeFormatFunction.PARSEDATETIME); + case "DAYNAME": + return new DayMonthNameFunction(readSingleArgument(), DayMonthNameFunction.DAYNAME); + case "MONTHNAME": + return new DayMonthNameFunction(readSingleArgument(), DayMonthNameFunction.MONTHNAME); + case "CARDINALITY": + return new CardinalityExpression(readSingleArgument(), false); + case "ARRAY_MAX_CARDINALITY": + return new CardinalityExpression(readSingleArgument(), true); + case "LOCATE": + return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.LOCATE); + case "INSERT": + return new StringFunction(readExpression(), readNextArgument(), readNextArgument(), readLastArgument(), + StringFunction.INSERT); + case "REPLACE": + return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.REPLACE); + case "LPAD": + return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.LPAD); + case "RPAD": + return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.RPAD); + case "TRANSLATE": + return new StringFunction(readExpression(), readNextArgument(), readLastArgument(), + StringFunction.TRANSLATE); + case "UPPER": + return new StringFunction1(readSingleArgument(), StringFunction1.UPPER); + case "LOWER": + return new StringFunction1(readSingleArgument(), StringFunction1.LOWER); + case "ASCII": + return new StringFunction1(readSingleArgument(), StringFunction1.ASCII); + case "CHAR": + case "CHR": + return new StringFunction1(readSingleArgument(), StringFunction1.CHAR); + case "STRINGENCODE": + return new StringFunction1(readSingleArgument(), StringFunction1.STRINGENCODE); + case "STRINGDECODE": + return new StringFunction1(readSingleArgument(), StringFunction1.STRINGDECODE); + case "STRINGTOUTF8": + return new StringFunction1(readSingleArgument(), StringFunction1.STRINGTOUTF8); + case "UTF8TOSTRING": + return new StringFunction1(readSingleArgument(), StringFunction1.UTF8TOSTRING); + case "HEXTORAW": + return new StringFunction1(readSingleArgument(), StringFunction1.HEXTORAW); + case "RAWTOHEX": + return new StringFunction1(readSingleArgument(), StringFunction1.RAWTOHEX); + case "SPACE": + return new StringFunction1(readSingleArgument(), StringFunction1.SPACE); + case "QUOTE_IDENT": + return new StringFunction1(readSingleArgument(), StringFunction1.QUOTE_IDENT); + case "SUBSTRING": + return readSubstringFunction(); + case "TO_CHAR": { + Expression arg1 = readExpression(), arg2, arg3; + if (readIf(COMMA)) { + arg2 = readExpression(); + arg3 = readIf(COMMA) ? readExpression() : null; + } else { + arg3 = arg2 = null; + } + read(CLOSE_PAREN); + return new ToCharFunction(arg1, arg2, arg3); + } + case "REPEAT": + return new StringFunction2(readExpression(), readLastArgument(), StringFunction2.REPEAT); + case "CHAR_LENGTH": + case "CHARACTER_LENGTH": + case "LENGTH": + return new LengthFunction(readIfSingleArgument(), LengthFunction.CHAR_LENGTH); + case "OCTET_LENGTH": + return new LengthFunction(readIfSingleArgument(), LengthFunction.OCTET_LENGTH); + case "BIT_LENGTH": + return new LengthFunction(readIfSingleArgument(), LengthFunction.BIT_LENGTH); + case "TRIM": + return readTrimFunction(); + case "REGEXP_LIKE": + return readParameters(new RegexpFunction(RegexpFunction.REGEXP_LIKE)); + case "REGEXP_REPLACE": + return readParameters(new RegexpFunction(RegexpFunction.REGEXP_REPLACE)); + case "REGEXP_SUBSTR": + return readParameters(new RegexpFunction(RegexpFunction.REGEXP_SUBSTR)); + case "XMLATTR": + return readParameters(new XMLFunction(XMLFunction.XMLATTR)); + case "XMLCDATA": + return readParameters(new XMLFunction(XMLFunction.XMLCDATA)); + case "XMLCOMMENT": + return readParameters(new XMLFunction(XMLFunction.XMLCOMMENT)); + case "XMLNODE": + return readParameters(new XMLFunction(XMLFunction.XMLNODE)); + case "XMLSTARTDOC": + return readParameters(new XMLFunction(XMLFunction.XMLSTARTDOC)); + case "XMLTEXT": + return readParameters(new XMLFunction(XMLFunction.XMLTEXT)); + case "TRIM_ARRAY": + return new ArrayFunction(readExpression(), readLastArgument(), null, ArrayFunction.TRIM_ARRAY); + case "ARRAY_CONTAINS": + return new ArrayFunction(readExpression(), readLastArgument(), null, ArrayFunction.ARRAY_CONTAINS); + case "ARRAY_SLICE": + return new ArrayFunction(readExpression(), readNextArgument(), readLastArgument(), + ArrayFunction.ARRAY_SLICE); + case "COMPRESS": + return new CompressFunction(readExpression(), readIfArgument(), CompressFunction.COMPRESS); + case "EXPAND": + return new CompressFunction(readSingleArgument(), null, CompressFunction.EXPAND); + case "SOUNDEX": + return new SoundexFunction(readSingleArgument(), null, SoundexFunction.SOUNDEX); + case "DIFFERENCE": + return new SoundexFunction(readExpression(), readLastArgument(), SoundexFunction.DIFFERENCE); + case "JSON_OBJECT": { + JsonConstructorFunction function = new JsonConstructorFunction(false); + if (currentTokenType != CLOSE_PAREN && !readJsonObjectFunctionFlags(function, false)) { + do { + boolean withKey = readIf(KEY); + function.addParameter(readExpression()); + if (withKey) { + read(VALUE); + } else if (!readIf(VALUE)) { + read(COLON); + } + function.addParameter(readExpression()); + } while (readIf(COMMA)); + readJsonObjectFunctionFlags(function, false); } + read(CLOSE_PAREN); + function.doneWithParameters(); + return function; + } + case "JSON_ARRAY": { + JsonConstructorFunction function = new JsonConstructorFunction(true); + function.setFlags(JsonConstructorUtils.JSON_ABSENT_ON_NULL); + if (currentTokenType != CLOSE_PAREN && !readJsonObjectFunctionFlags(function, true)) { + do { + function.addParameter(readExpression()); + } while (readIf(COMMA)); + readJsonObjectFunctionFlags(function, true); + } + read(CLOSE_PAREN); + function.doneWithParameters(); + return function; + } + case "ENCRYPT": + return new CryptFunction(readExpression(), readNextArgument(), readLastArgument(), CryptFunction.ENCRYPT); + case "DECRYPT": + return new CryptFunction(readExpression(), readNextArgument(), readLastArgument(), CryptFunction.DECRYPT); + case "COALESCE": + return readCoalesceFunction(CoalesceFunction.COALESCE); + case "GREATEST": + return readCoalesceFunction(CoalesceFunction.GREATEST); + case "LEAST": + return readCoalesceFunction(CoalesceFunction.LEAST); + case "NULLIF": + return new NullIfFunction(readExpression(), readLastArgument()); + case "CONCAT": + return readConcatFunction(ConcatFunction.CONCAT); + case "CONCAT_WS": + return readConcatFunction(ConcatFunction.CONCAT_WS); + case "HASH": + return new HashFunction(readExpression(), readNextArgument(), readIfArgument(), HashFunction.HASH); + case "ORA_HASH": { + Expression arg1 = readExpression(); + if (readIfMore()) { + return new HashFunction(arg1, readExpression(), readIfArgument(), HashFunction.ORA_HASH); + } + return new HashFunction(arg1, HashFunction.ORA_HASH); + } + case "RAND": + case "RANDOM": + return new RandFunction(readIfSingleArgument(), RandFunction.RAND); + case "SECURE_RAND": + return new RandFunction(readSingleArgument(), RandFunction.SECURE_RAND); + case "RANDOM_UUID": + case "UUID": + read(CLOSE_PAREN); + return new RandFunction(null, RandFunction.RANDOM_UUID); + case "ABORT_SESSION": + return new SessionControlFunction(readIfSingleArgument(), SessionControlFunction.ABORT_SESSION); + case "CANCEL_SESSION": + return new SessionControlFunction(readIfSingleArgument(), SessionControlFunction.CANCEL_SESSION); + case "AUTOCOMMIT": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.AUTOCOMMIT); + case "DATABASE_PATH": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.DATABASE_PATH); + case "H2VERSION": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.H2VERSION); + case "LOCK_MODE": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.LOCK_MODE); + case "LOCK_TIMEOUT": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.LOCK_TIMEOUT); + case "MEMORY_FREE": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.MEMORY_FREE); + case "MEMORY_USED": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.MEMORY_USED); + case "READONLY": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.READONLY); + case "SESSION_ID": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.SESSION_ID); + case "TRANSACTION_ID": + read(CLOSE_PAREN); + return new SysInfoFunction(SysInfoFunction.TRANSACTION_ID); + case "DISK_SPACE_USED": + return new TableInfoFunction(readIfSingleArgument(), null, TableInfoFunction.DISK_SPACE_USED); + case "ESTIMATED_ENVELOPE": + return new TableInfoFunction(readExpression(), readLastArgument(), TableInfoFunction.ESTIMATED_ENVELOPE); + case "FILE_READ": + return new FileFunction(readExpression(), readIfArgument(), FileFunction.FILE_READ); + case "FILE_WRITE": + return new FileFunction(readExpression(), readLastArgument(), FileFunction.FILE_WRITE); + case "DATA_TYPE_SQL": + return new DataTypeSQLFunction(readExpression(), readNextArgument(), readNextArgument(), + readLastArgument()); + case "DB_OBJECT_ID": + return new DBObjectFunction(readExpression(), readNextArgument(), readIfArgument(), + DBObjectFunction.DB_OBJECT_ID); + case "DB_OBJECT_SQL": + return new DBObjectFunction(readExpression(), readNextArgument(), readIfArgument(), + DBObjectFunction.DB_OBJECT_SQL); + case "CSVWRITE": + return readParameters(new CSVWriteFunction()); + case "SIGNAL": + return new SignalFunction(readExpression(), readLastArgument()); + case "TRUNCATE_VALUE": + return new TruncateValueFunction(readExpression(), readNextArgument(), readLastArgument()); + case "ZERO": + read(CLOSE_PAREN); + return ValueExpression.get(ValueInteger.get(0)); + case "PI": + read(CLOSE_PAREN); + return ValueExpression.get(ValueDouble.get(Math.PI)); + } + ModeFunction function = ModeFunction.getFunction(database, upperName); + return function != null ? readParameters(function) : null; + } + + private Expression readDateTimeFormatFunction(int function) { + DateTimeFormatFunction f = new DateTimeFormatFunction(function); + f.addParameter(readExpression()); + read(COMMA); + f.addParameter(readExpression()); + if (readIf(COMMA)) { + f.addParameter(readExpression()); + if (readIf(COMMA)) { + f.addParameter(readExpression()); + } + } + read(CLOSE_PAREN); + f.doneWithParameters(); + return f; + } + + private Expression readTrimFunction() { + int flags; + boolean needFrom = false; + if (readIf("LEADING")) { + flags = TrimFunction.LEADING; + needFrom = true; + } else if (readIf("TRAILING")) { + flags = TrimFunction.TRAILING; + needFrom = true; + } else { + needFrom = readIf("BOTH"); + flags = TrimFunction.LEADING | TrimFunction.TRAILING; + } + Expression from, space = null; + if (needFrom) { + if (!readIf(FROM)) { + space = readExpression(); + read(FROM); + } + from = readExpression(); + } else { + if (readIf(FROM)) { + from = readExpression(); + } else { + from = readExpression(); + if (readIf(FROM)) { + space = from; + from = readExpression(); + } else if (readIf(COMMA)) { + space = readExpression(); + } + } + } + read(CLOSE_PAREN); + return new TrimFunction(from, space, flags); + } + + private ArrayTableFunction readUnnestFunction() { + ArrayTableFunction f = new ArrayTableFunction(ArrayTableFunction.UNNEST); + ArrayList columns = Utils.newSmallArrayList(); + if (!readIf(CLOSE_PAREN)) { + int i = 0; + do { + Expression expr = readExpression(); + TypeInfo columnType = TypeInfo.TYPE_NULL; + if (expr.isConstant()) { + expr = expr.optimize(session); + TypeInfo exprType = expr.getType(); + if (exprType.getValueType() == Value.ARRAY) { + columnType = (TypeInfo) exprType.getExtTypeInfo(); + } + } + f.addParameter(expr); + columns.add(new Column("C" + ++i, columnType)); + } while (readIfMore()); + } + if (readIf(WITH)) { + read("ORDINALITY"); + columns.add(new Column("NORD", TypeInfo.TYPE_INTEGER)); + } + f.setColumns(columns); + f.doneWithParameters(); + return f; + } + + private ArrayTableFunction readTableFunction(int functionType) { + ArrayTableFunction f = new ArrayTableFunction(functionType); + ArrayList columns = Utils.newSmallArrayList(); + do { + columns.add(parseColumnWithType(readIdentifier())); + read(EQUAL); + f.addParameter(readExpression()); + } while (readIfMore()); + f.setColumns(columns); + f.doneWithParameters(); + return f; + } + + private Expression readSingleArgument() { + Expression arg = readExpression(); + read(CLOSE_PAREN); + return arg; + } + + private Expression readNextArgument() { + read(COMMA); + return readExpression(); + } + + private Expression readLastArgument() { + read(COMMA); + Expression arg = readExpression(); + read(CLOSE_PAREN); + return arg; + } + + private Expression readIfSingleArgument() { + Expression arg; + if (readIf(CLOSE_PAREN)) { + arg = null; + } else { + arg = readExpression(); + read(CLOSE_PAREN); + } + return arg; + } + + private Expression readIfArgument() { + Expression arg = readIf(COMMA) ? readExpression() : null; + read(CLOSE_PAREN); + return arg; + } + + private Expression readCoalesceFunction(int function) { + CoalesceFunction f = new CoalesceFunction(function); + f.addParameter(readExpression()); + while (readIfMore()) { + f.addParameter(readExpression()); + } + f.doneWithParameters(); + return f; + } + + private Expression readConcatFunction(int function) { + ConcatFunction f = new ConcatFunction(function); + f.addParameter(readExpression()); + f.addParameter(readNextArgument()); + if (function == ConcatFunction.CONCAT_WS) { + f.addParameter(readNextArgument()); + } + while (readIfMore()) { + f.addParameter(readExpression()); + } + f.doneWithParameters(); + return f; + } + + private Expression readSubstringFunction() { + // Standard variants are: + // SUBSTRING(X FROM 1) + // SUBSTRING(X FROM 1 FOR 1) + // Different non-standard variants include: + // SUBSTRING(X,1) + // SUBSTRING(X,1,1) + // SUBSTRING(X FOR 1) -- Postgres + SubstringFunction function = new SubstringFunction(); + function.addParameter(readExpression()); + if (readIf(FROM)) { + function.addParameter(readExpression()); + if (readIf(FOR)) { + function.addParameter(readExpression()); + } + } else if (readIf(FOR)) { + function.addParameter(ValueExpression.get(ValueInteger.get(1))); + function.addParameter(readExpression()); + } else { + read(COMMA); + function.addParameter(readExpression()); + if (readIf(COMMA)) { + function.addParameter(readExpression()); + } + } + read(CLOSE_PAREN); + function.doneWithParameters(); + return function; + } + + private int readDateTimeField() { + int field = -1; + switch (currentTokenType) { + case IDENTIFIER: + if (!token.isQuoted()) { + field = DateTimeFunction.getField(currentToken); + } + break; + case LITERAL: + if (token.value(session).getValueType() == Value.VARCHAR) { + field = DateTimeFunction.getField(token.value(session).getString()); + } + break; + case YEAR: + field = DateTimeFunction.YEAR; break; + case MONTH: + field = DateTimeFunction.MONTH; + break; + case DAY: + field = DateTimeFunction.DAY; + break; + case HOUR: + field = DateTimeFunction.HOUR; + break; + case MINUTE: + field = DateTimeFunction.MINUTE; + break; + case SECOND: + field = DateTimeFunction.SECOND; + } + if (field < 0) { + addExpected("date-time field"); + throw getSyntaxError(); + } + read(); + return field; + } + + private WindowFunction readWindowFunction(String name) { + WindowFunctionType type = WindowFunctionType.get(name); + if (type == null) { + return null; + } + if (currentSelect == null) { + throw getSyntaxError(); + } + int numArgs = WindowFunction.getMinArgumentCount(type); + Expression[] args = null; + if (numArgs > 0) { + // There is no functions with numArgs == 0 && numArgsMax > 0 + int numArgsMax = WindowFunction.getMaxArgumentCount(type); + args = new Expression[numArgsMax]; + if (numArgs == numArgsMax) { + for (int i = 0; i < numArgs; i++) { + if (i > 0) { + read(COMMA); + } + args[i] = readExpression(); + } + } else { + int i = 0; + while (i < numArgsMax) { + if (i > 0 && !readIf(COMMA)) { + break; + } + args[i] = readExpression(); + i++; + } + if (i < numArgs) { + throw getSyntaxError(); + } + if (i != numArgsMax) { + args = Arrays.copyOf(args, i); + } + } + } + read(CLOSE_PAREN); + WindowFunction function = new WindowFunction(type, currentSelect, args); + switch (type) { + case NTH_VALUE: + readFromFirstOrLast(function); + //$FALL-THROUGH$ + case LEAD: + case LAG: + case FIRST_VALUE: + case LAST_VALUE: + readRespectOrIgnoreNulls(function); + //$FALL-THROUGH$ + default: + // Avoid warning + } + readOver(function); + return function; + } + + private void readFromFirstOrLast(WindowFunction function) { + if (readIf(FROM) && !readIf("FIRST")) { + read("LAST"); + function.setFromLast(true); + } + } + + private void readRespectOrIgnoreNulls(WindowFunction function) { + if (readIf("RESPECT")) { + read("NULLS"); + } else if (readIf("IGNORE")) { + read("NULLS"); + function.setIgnoreNulls(true); + } + } + + private boolean readJsonObjectFunctionFlags(ExpressionWithFlags function, boolean forArray) { + int start = tokenIndex; + boolean result = false; + int flags = function.getFlags(); + if (readIf(NULL)) { + if (readIf(ON)) { + read(NULL); + flags &= ~JsonConstructorUtils.JSON_ABSENT_ON_NULL; + result = true; + } else { + setTokenIndex(start); + return false; + } + } else if (readIf("ABSENT")) { + if (readIf(ON)) { + read(NULL); + flags |= JsonConstructorUtils.JSON_ABSENT_ON_NULL; + result = true; + } else { + setTokenIndex(start); + return false; + } + } + if (!forArray) { + if (readIf(WITH)) { + read(UNIQUE); + read("KEYS"); + flags |= JsonConstructorUtils.JSON_WITH_UNIQUE_KEYS; + result = true; + } else if (readIf("WITHOUT")) { + if (readIf(UNIQUE)) { + read("KEYS"); + flags &= ~JsonConstructorUtils.JSON_WITH_UNIQUE_KEYS; + result = true; + } else if (result) { + throw getSyntaxError(); + } else { + setTokenIndex(start); + return false; + } + } + } + if (result) { + function.setFlags(flags); + } + return result; + } + + private Expression readKeywordCompatibilityFunctionOrColumn() { + boolean nonKeyword = nonKeywords != null && nonKeywords.get(currentTokenType); + String name = currentToken; + read(); + if (readIf(OPEN_PAREN)) { + return readCompatibilityFunction(upperName(name)); + } else if (nonKeyword) { + return readIf(DOT) ? readTermObjectDot(name) : new ExpressionColumn(database, null, null, name); + } + throw getSyntaxError(); + } + + private Expression readCurrentDateTimeValueFunction(int function, boolean hasParen, String name) { + int scale = -1; + if (hasParen) { + if (function != CurrentDateTimeValueFunction.CURRENT_DATE && currentTokenType != CLOSE_PAREN) { + scale = readInt(); + if (scale < 0 || scale > ValueTime.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", + /* compile-time constant */ "" + ValueTime.MAXIMUM_SCALE); + } + } + read(CLOSE_PAREN); + } + if (database.isAllowBuiltinAliasOverride()) { + FunctionAlias functionAlias = database.getSchema(session.getCurrentSchemaName()) + .findFunction(name != null ? name : CurrentDateTimeValueFunction.getName(function)); + if (functionAlias != null) { + return new JavaFunction(functionAlias, + scale >= 0 ? new Expression[] { ValueExpression.get(ValueInteger.get(scale)) } + : new Expression[0]); + } + } + return new CurrentDateTimeValueFunction(function, scale); + } + + private Expression readIfWildcardRowidOrSequencePseudoColumn(String schema, String objectName) { + if (readIf(ASTERISK)) { + return parseWildcard(schema, objectName); + } + if (readIf(_ROWID_)) { + return new ExpressionColumn(database, schema, objectName); + } + if (database.getMode().nextvalAndCurrvalPseudoColumns) { + return readIfSequencePseudoColumn(schema, objectName); + } + return null; + } + + private Wildcard parseWildcard(String schema, String objectName) { + Wildcard wildcard = new Wildcard(schema, objectName); + if (readIf(EXCEPT)) { + read(OPEN_PAREN); + ArrayList exceptColumns = Utils.newSmallArrayList(); + do { + String s = null, t = null; + String name = readIdentifier(); + if (readIf(DOT)) { + t = name; + name = readIdentifier(); + if (readIf(DOT)) { + s = t; + t = name; + name = readIdentifier(); + if (readIf(DOT)) { + checkDatabaseName(s); + s = t; + t = name; + name = readIdentifier(); + } + } + } + exceptColumns.add(new ExpressionColumn(database, s, t, name)); + } while (readIfMore()); + wildcard.setExceptColumns(exceptColumns); + } + return wildcard; + } + + private SequenceValue readIfSequencePseudoColumn(String schema, String objectName) { + if (schema == null) { + schema = session.getCurrentSchemaName(); + } + if (isToken("NEXTVAL")) { + Sequence sequence = findSequence(schema, objectName); + if (sequence != null) { + read(); + return new SequenceValue(sequence, getCurrentPrepared()); + } + } else if (isToken("CURRVAL")) { + Sequence sequence = findSequence(schema, objectName); + if (sequence != null) { + read(); + return new SequenceValue(sequence); + } + } + return null; + } + + private Expression readTermObjectDot(String objectName) { + Expression expr = readIfWildcardRowidOrSequencePseudoColumn(null, objectName); + if (expr != null) { + return expr; + } + String name = readIdentifier(); + if (readIf(OPEN_PAREN)) { + return readFunction(database.getSchema(objectName), name); + } else if (readIf(DOT)) { + String schema = objectName; + objectName = name; + expr = readIfWildcardRowidOrSequencePseudoColumn(schema, objectName); + if (expr != null) { + return expr; + } + name = readIdentifier(); + if (readIf(OPEN_PAREN)) { + checkDatabaseName(schema); + return readFunction(database.getSchema(objectName), name); + } else if (readIf(DOT)) { + checkDatabaseName(schema); + schema = objectName; + objectName = name; + expr = readIfWildcardRowidOrSequencePseudoColumn(schema, objectName); + if (expr != null) { + return expr; + } + name = readIdentifier(); + } + return new ExpressionColumn(database, schema, objectName, name); } - case Function.EXTRACT: { - function.setParameter(0, - ValueExpression.get(ValueString.get(currentToken))); + return new ExpressionColumn(database, null, objectName, name); + } + + private void checkDatabaseName(String databaseName) { + if (!database.getIgnoreCatalogs() && !equalsToken(database.getShortName(), databaseName)) { + throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, databaseName); + } + } + + private Parameter readParameter() { + int index = ((Token.ParameterToken) token).index(); + read(); + Parameter p; + if (parameters == null) { + parameters = Utils.newSmallArrayList(); + } + if (index > Constants.MAX_PARAMETER_INDEX) { + throw DbException.getInvalidValueException("parameter index", index); + } + index--; + if (parameters.size() <= index) { + parameters.ensureCapacity(index + 1); + while (parameters.size() < index) { + parameters.add(null); + } + p = new Parameter(index); + parameters.add(p); + } else if ((p = parameters.get(index)) == null) { + p = new Parameter(index); + parameters.set(index, p); + } + return p; + } + + private Expression readTerm() { + Expression r; + switch (currentTokenType) { + case AT: read(); - read("FROM"); - function.setParameter(1, readExpression()); - read(")"); + r = new Variable(session, readIdentifier()); + if (readIf(COLON_EQ)) { + r = new SetFunction(r, readExpression()); + } break; - } - case Function.DATE_ADD: - case Function.DATE_DIFF: { - if (Function.isDatePart(currentToken)) { - function.setParameter(0, - ValueExpression.get(ValueString.get(currentToken))); + case PARAMETER: + r = readParameter(); + break; + case TABLE: + case SELECT: + case WITH: + r = new Subquery(parseQuery()); + break; + case MINUS_SIGN: + read(); + if (currentTokenType == LITERAL) { + r = ValueExpression.get(token.value(session).negate()); + int rType = r.getType().getValueType(); + if (rType == Value.BIGINT && + r.getValue(session).getLong() == Integer.MIN_VALUE) { + // convert Integer.MIN_VALUE to type 'int' + // (Integer.MAX_VALUE+1 is of type 'long') + r = ValueExpression.get(ValueInteger.get(Integer.MIN_VALUE)); + } else if (rType == Value.NUMERIC && + r.getValue(session).getBigDecimal().compareTo(Value.MIN_LONG_DECIMAL) == 0) { + // convert Long.MIN_VALUE to type 'long' + // (Long.MAX_VALUE+1 is of type 'decimal') + r = ValueExpression.get(ValueBigint.MIN); + } read(); } else { - function.setParameter(0, readExpression()); + r = new UnaryOperation(readTerm()); } - read(","); - function.setParameter(1, readExpression()); - read(","); - function.setParameter(2, readExpression()); - read(")"); break; - } - case Function.SUBSTRING: { - // Different variants include: - // SUBSTRING(X,1) - // SUBSTRING(X,1,1) - // SUBSTRING(X FROM 1 FOR 1) -- Postgres - // SUBSTRING(X FROM 1) -- Postgres - // SUBSTRING(X FOR 1) -- Postgres - function.setParameter(0, readExpression()); - if (readIf("FROM")) { - function.setParameter(1, readExpression()); - if (readIf("FOR")) { - function.setParameter(2, readExpression()); - } - } else if (readIf("FOR")) { - function.setParameter(1, ValueExpression.get(ValueInt.get(0))); - function.setParameter(2, readExpression()); + case PLUS_SIGN: + read(); + r = readTerm(); + break; + case OPEN_PAREN: + read(); + if (readIf(CLOSE_PAREN)) { + r = ValueExpression.get(ValueRow.EMPTY); + } else if (isQuery()) { + r = new Subquery(parseQuery()); + read(CLOSE_PAREN); } else { - read(","); - function.setParameter(1, readExpression()); - if (readIf(",")) { - function.setParameter(2, readExpression()); + r = readExpression(); + if (readIfMore()) { + ArrayList list = Utils.newSmallArrayList(); + list.add(r); + do { + list.add(readExpression()); + } while (readIfMore()); + r = new ExpressionList(list.toArray(new Expression[0]), false); + } else if (r instanceof BinaryOperation) { + BinaryOperation binaryOperation = (BinaryOperation) r; + if (binaryOperation.getOperationType() == OpType.MINUS) { + TypeInfo ti = readIntervalQualifier(); + if (ti != null) { + binaryOperation.setForcedType(ti); + } + } + } + } + if (readIf(DOT)) { + r = new FieldReference(r, readIdentifier()); + } + break; + case ARRAY: + read(); + if (readIf(OPEN_BRACKET)) { + if (readIf(CLOSE_BRACKET)) { + r = ValueExpression.get(ValueArray.EMPTY); + } else { + ArrayList list = Utils.newSmallArrayList(); + do { + list.add(readExpression()); + } while (readIf(COMMA)); + read(CLOSE_BRACKET); + r = new ExpressionList(list.toArray(new Expression[0]), true); } + } else { + read(OPEN_PAREN); + Query q = parseQuery(); + read(CLOSE_PAREN); + r = new ArrayConstructorByQuery(q); + } + break; + case INTERVAL: + read(); + r = readInterval(); + break; + case ROW: { + read(); + read(OPEN_PAREN); + if (readIf(CLOSE_PAREN)) { + r = ValueExpression.get(ValueRow.EMPTY); + } else { + ArrayList list = Utils.newSmallArrayList(); + do { + list.add(readExpression()); + } while (readIfMore()); + r = new ExpressionList(list.toArray(new Expression[0]), false); + } + break; + } + case TRUE: + read(); + r = ValueExpression.TRUE; + break; + case FALSE: + read(); + r = ValueExpression.FALSE; + break; + case UNKNOWN: + read(); + r = TypedValueExpression.UNKNOWN; + break; + case ROWNUM: + read(); + if (readIf(OPEN_PAREN)) { + read(CLOSE_PAREN); + } + if (currentSelect == null && currentPrepared == null) { + throw getSyntaxError(); + } + r = new Rownum(getCurrentPrepared()); + break; + case NULL: + read(); + r = ValueExpression.NULL; + break; + case _ROWID_: + read(); + r = new ExpressionColumn(database, null, null); + break; + case LITERAL: + r = ValueExpression.get(token.value(session)); + read(); + break; + case VALUES: + if (database.getMode().onDuplicateKeyUpdate) { + if (currentPrepared instanceof Insert) { + r = readOnDuplicateKeyValues(((Insert) currentPrepared).getTable(), null); + break; + } else if (currentPrepared instanceof Update) { + Update update = (Update) currentPrepared; + r = readOnDuplicateKeyValues(update.getTable(), update); + break; + } + } + r = new Subquery(parseQuery()); + break; + case CASE: + read(); + r = readCase(); + break; + case CAST: { + read(); + read(OPEN_PAREN); + Expression arg = readExpression(); + read(AS); + Column column = parseColumnWithType(null); + read(CLOSE_PAREN); + r = new CastSpecification(arg, column); + break; + } + case CURRENT_CATALOG: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_CATALOG); + case CURRENT_DATE: + read(); + r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, readIf(OPEN_PAREN), null); + break; + case CURRENT_PATH: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_PATH); + case CURRENT_ROLE: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_ROLE); + case CURRENT_SCHEMA: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_SCHEMA); + case CURRENT_TIME: + read(); + r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIME, readIf(OPEN_PAREN), null); + break; + case CURRENT_TIMESTAMP: + read(); + r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP, readIf(OPEN_PAREN), + null); + break; + case CURRENT_USER: + case USER: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_USER); + case SESSION_USER: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.SESSION_USER); + case SYSTEM_USER: + return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.SYSTEM_USER); + case ANY: + case SOME: + read(); + read(OPEN_PAREN); + return readAggregate(AggregateType.ANY, "ANY"); + case DAY: + case HOUR: + case MINUTE: + case MONTH: + case SECOND: + case YEAR: + r = readKeywordCompatibilityFunctionOrColumn(); + break; + case LEFT: + r = readColumnIfNotFunction(); + if (r == null) { + r = new StringFunction2(readExpression(), readLastArgument(), StringFunction2.LEFT); + } + break; + case LOCALTIME: + read(); + r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, readIf(OPEN_PAREN), null); + break; + case LOCALTIMESTAMP: + read(); + r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, readIf(OPEN_PAREN), // + null); + break; + case RIGHT: + r = readColumnIfNotFunction(); + if (r == null) { + r = new StringFunction2(readExpression(), readLastArgument(), StringFunction2.RIGHT); } - read(")"); break; - } - case Function.POSITION: { - // can't read expression because IN would be read too early - function.setParameter(0, readConcat()); - if (!readIf(",")) { - read("IN"); + case SET: + r = readColumnIfNotFunction(); + if (r == null) { + r = readSetFunction(); } - function.setParameter(1, readExpression()); - read(")"); break; - } - case Function.TRIM: { - Expression space = null; - if (readIf("LEADING")) { - function = Function.getFunction(database, "LTRIM"); - if (!readIf("FROM")) { - space = readExpression(); - read("FROM"); - } - } else if (readIf("TRAILING")) { - function = Function.getFunction(database, "RTRIM"); - if (!readIf("FROM")) { - space = readExpression(); - read("FROM"); - } - } else if (readIf("BOTH")) { - if (!readIf("FROM")) { - space = readExpression(); - read("FROM"); - } + case VALUE: + if (parseDomainConstraint) { + read(); + r = new DomainValueExpression(); + break; } - Expression p0 = readExpression(); - if (readIf(",")) { - space = readExpression(); - } else if (readIf("FROM")) { - space = p0; - p0 = readExpression(); + //$FALL-THROUGH$ + default: + if (!isIdentifier()) { + throw getSyntaxError(); } - function.setParameter(0, p0); - if (space != null) { - function.setParameter(1, space); + //$FALL-THROUGH$ + case IDENTIFIER: + String name = currentToken; + boolean quoted = token.isQuoted(); + read(); + if (readIf(OPEN_PAREN)) { + r = readFunction(null, name); + } else if (readIf(DOT)) { + r = readTermObjectDot(name); + } else if (quoted) { + r = new ExpressionColumn(database, null, null, name); + } else { + r = readTermWithIdentifier(name, quoted); } - read(")"); break; } - case Function.TABLE: - case Function.TABLE_DISTINCT: { - int i = 0; - ArrayList columns = New.arrayList(); - do { - String columnName = readAliasIdentifier(); - Column column = parseColumnWithType(columnName); - columns.add(column); - read("="); - function.setParameter(i, readExpression()); - i++; - } while (readIf(",")); - read(")"); - TableFunction tf = (TableFunction) function; - tf.setColumns(columns); - break; + if (readIf(OPEN_BRACKET)) { + r = new ArrayElementReference(r, readExpression()); + read(CLOSE_BRACKET); } - case Function.ROW_NUMBER: - read(")"); - read("OVER"); - read("("); - read(")"); - return new Rownum(currentSelect == null ? currentPrepared - : currentSelect); - default: - if (!readIf(")")) { - int i = 0; - do { - function.setParameter(i++, readExpression()); - } while (readIf(",")); - read(")"); + colonColon: if (readIf(COLON_COLON)) { + if (database.getMode().getEnum() == ModeEnum.PostgreSQL) { + // PostgreSQL compatibility + if (isToken("PG_CATALOG")) { + read("PG_CATALOG"); + read(DOT); + } + if (readIf("REGCLASS")) { + r = new Regclass(r); + break colonColon; + } + } + r = new CastSpecification(r, parseColumnWithType(null)); + } + for (;;) { + TypeInfo ti = readIntervalQualifier(); + if (ti != null) { + r = new CastSpecification(r, ti); + } + int index = tokenIndex; + if (readIf("AT")) { + if (readIf("TIME")) { + read("ZONE"); + r = new TimeZoneOperation(r, readExpression()); + continue; + } else if (readIf("LOCAL")) { + r = new TimeZoneOperation(r, null); + continue; + } else { + setTokenIndex(index); + } + } else if (readIf("FORMAT")) { + if (readIf("JSON")) { + r = new Format(r, FormatEnum.JSON); + continue; + } else { + setTokenIndex(index); + } } + break; } - function.doneWithParameters(); - return function; + return r; } - private Function readFunctionWithoutParameters(String name) { - if (readIf("(")) { - read(")"); + private Expression readCurrentGeneralValueSpecification(int specification) { + read(); + if (readIf(OPEN_PAREN)) { + read(CLOSE_PAREN); } - Function function = Function.getFunction(database, name); - function.doneWithParameters(); - return function; + return new CurrentGeneralValueSpecification(specification); } - private Expression readWildcardOrSequenceValue(String schema, - String objectName) { - if (readIf("*")) { - return new Wildcard(schema, objectName); - } - if (schema == null) { - schema = session.getCurrentSchemaName(); + private Expression readColumnIfNotFunction() { + boolean nonKeyword = nonKeywords != null && nonKeywords.get(currentTokenType); + String name = currentToken; + read(); + if (readIf(OPEN_PAREN)) { + return null; + } else if (nonKeyword) { + return readIf(DOT) ? readTermObjectDot(name) : new ExpressionColumn(database, null, null, name); } - if (readIf("NEXTVAL")) { - Sequence sequence = findSequence(schema, objectName); - if (sequence != null) { - return new SequenceValue(sequence); - } - } else if (readIf("CURRVAL")) { - Sequence sequence = findSequence(schema, objectName); - if (sequence != null) { - Function function = Function.getFunction(database, "CURRVAL"); - function.setParameter(0, ValueExpression.get(ValueString - .get(sequence.getSchema().getName()))); - function.setParameter(1, ValueExpression.get(ValueString - .get(sequence.getName()))); - function.doneWithParameters(); - return function; + throw getSyntaxError(); + } + + private Expression readSetFunction() { + SetFunction function = new SetFunction(readExpression(), readLastArgument()); + if (database.isAllowBuiltinAliasOverride()) { + FunctionAlias functionAlias = database.getSchema(session.getCurrentSchemaName()).findFunction( + function.getName()); + if (functionAlias != null) { + return new JavaFunction(functionAlias, + new Expression[] { function.getSubexpression(0), function.getSubexpression(1) }); } } - return null; + return function; } - private Expression readTermObjectDot(String objectName) { - Expression expr = readWildcardOrSequenceValue(null, objectName); - if (expr != null) { - return expr; - } - String name = readColumnIdentifier(); - Schema s = database.findSchema(objectName); - if ((!SysProperties.OLD_STYLE_OUTER_JOIN || s != null) && readIf("(")) { - // only if the token before the dot is a valid schema name, - // otherwise the old style Oracle outer join doesn't work: - // t.x = t2.x(+) - // this additional check is not required - // if the old style outer joins are not supported - return readFunction(s, name); - } else if (readIf(".")) { - String schema = objectName; - objectName = name; - expr = readWildcardOrSequenceValue(schema, objectName); - if (expr != null) { - return expr; - } - name = readColumnIdentifier(); - if (readIf("(")) { - String databaseName = schema; - if (!equalsToken(database.getShortName(), databaseName)) { - throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, - databaseName); - } - schema = objectName; - return readFunction(database.getSchema(schema), name); - } else if (readIf(".")) { - String databaseName = schema; - if (!equalsToken(database.getShortName(), databaseName)) { - throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, - databaseName); + private Expression readOnDuplicateKeyValues(Table table, Update update) { + read(); + read(OPEN_PAREN); + Column c = readTableColumn(new TableFilter(session, table, null, rightsChecked, null, 0, null)); + read(CLOSE_PAREN); + return new OnDuplicateKeyValues(c, update); + } + + private Expression readTermWithIdentifier(String name, boolean quoted) { + /* + * Convert a-z to A-Z. This method is safe, because only A-Z + * characters are considered below. + * + * Unquoted identifier is never empty. + */ + switch (name.charAt(0) & 0xffdf) { + case 'C': + if (equalsToken("CURRENT", name)) { + int index = tokenIndex; + if (readIf(VALUE) && readIf(FOR)) { + return new SequenceValue(readSequence()); } - schema = objectName; - objectName = name; - expr = readWildcardOrSequenceValue(schema, objectName); - if (expr != null) { - return expr; + setTokenIndex(index); + if (database.getMode().getEnum() == ModeEnum.DB2) { + return parseDB2SpecialRegisters(name); } - name = readColumnIdentifier(); - return new ExpressionColumn(database, schema, objectName, name); } - return new ExpressionColumn(database, schema, objectName, name); - } - return new ExpressionColumn(database, null, objectName, name); - } - - private Expression readTerm() { - Expression r; - switch (currentTokenType) { - case AT: - read(); - r = new Variable(session, readAliasIdentifier()); - if (readIf(":=")) { - Expression value = readExpression(); - Function function = Function.getFunction(database, "SET"); - function.setParameter(0, r); - function.setParameter(1, value); - r = function; + break; + case 'D': + if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR && + (equalsToken("DATE", name) || equalsToken("D", name))) { + String date = token.value(session).getString(); + read(); + return ValueExpression.get(ValueDate.parse(date)); } break; - case PARAMETER: - // there must be no space between ? and the number - boolean indexed = Character.isDigit(sqlCommandChars[parseIndex]); - read(); - Parameter p; - if (indexed && currentTokenType == VALUE && - currentValue.getType() == Value.INT) { - if (indexedParameterList == null) { - if (parameters == null) { - // this can occur when parsing expressions only (for - // example check constraints) - throw getSyntaxError(); - } else if (parameters.size() > 0) { - throw DbException - .get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS); - } - indexedParameterList = New.arrayList(); - } - int index = currentValue.getInt() - 1; - if (index < 0 || index >= Constants.MAX_PARAMETER_INDEX) { - throw DbException.getInvalidValueException( - "parameter index", index); - } - if (indexedParameterList.size() <= index) { - indexedParameterList.ensureCapacity(index + 1); - while (indexedParameterList.size() <= index) { - indexedParameterList.add(null); - } - } - p = indexedParameterList.get(index); - if (p == null) { - p = new Parameter(index); - indexedParameterList.set(index, p); - } + case 'E': + if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR // + && equalsToken("E", name)) { + String text = token.value(session).getString(); + // the PostgreSQL ODBC driver uses + // LIKE E'PROJECT\\_DATA' instead of LIKE + // 'PROJECT\_DATA' + // N: SQL-92 "National Language" strings + text = StringUtils.replaceAll(text, "\\\\", "\\"); read(); - } else { - if (indexedParameterList != null) { - throw DbException - .get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS); + return ValueExpression.get(ValueVarchar.get(text)); + } + break; + case 'G': + if (currentTokenType == LITERAL) { + int t = token.value(session).getValueType(); + if (t == Value.VARCHAR && equalsToken("GEOMETRY", name)) { + ValueExpression v = ValueExpression.get(ValueGeometry.get(token.value(session).getString())); + read(); + return v; + } else if (t == Value.VARBINARY && equalsToken("GEOMETRY", name)) { + ValueExpression v = ValueExpression + .get(ValueGeometry.getFromEWKB(token.value(session).getBytesNoCopy())); + read(); + return v; } - p = new Parameter(parameters.size()); } - parameters.add(p); - r = p; break; - case KEYWORD: - if (isToken("SELECT") || isToken("FROM")) { - Query query = parseSelect(); - r = new Subquery(query); - } else { - throw getSyntaxError(); + case 'J': + if (currentTokenType == LITERAL) { + int t = token.value(session).getValueType(); + if (t == Value.VARCHAR && equalsToken("JSON", name)) { + ValueExpression v = ValueExpression.get(ValueJson.fromJson(token.value(session).getString())); + read(); + return v; + } else if (t == Value.VARBINARY && equalsToken("JSON", name)) { + ValueExpression v = ValueExpression.get(ValueJson.fromJson(token.value(session).getBytesNoCopy())); + read(); + return v; + } } break; - case IDENTIFIER: - String name = currentToken; - if (currentTokenQuoted) { - read(); - if (readIf("(")) { - r = readFunction(null, name); - } else if (readIf(".")) { - r = readTermObjectDot(name); - } else { - r = new ExpressionColumn(database, null, null, name); + case 'N': + if (equalsToken("NEXT", name)) { + int index = tokenIndex; + if (readIf(VALUE) && readIf(FOR)) { + return new SequenceValue(readSequence(), getCurrentPrepared()); } - } else { - read(); - if (readIf(".")) { - r = readTermObjectDot(name); - } else if (equalsToken("CASE", name)) { - // CASE must be processed before (, - // otherwise CASE(3) would be a function call, which it is - // not - r = readCase(); - } else if (readIf("(")) { - r = readFunction(null, name); - } else if (equalsToken("CURRENT_USER", name)) { - r = readFunctionWithoutParameters("USER"); - } else if (equalsToken("CURRENT", name)) { - if (readIf("TIMESTAMP")) { - r = readFunctionWithoutParameters("CURRENT_TIMESTAMP"); - } else if (readIf("TIME")) { - r = readFunctionWithoutParameters("CURRENT_TIME"); - } else if (readIf("DATE")) { - r = readFunctionWithoutParameters("CURRENT_DATE"); - } else { - r = new ExpressionColumn(database, null, null, name); + setTokenIndex(index); + } + break; + case 'T': + if (equalsToken("TIME", name)) { + if (readIf(WITH)) { + read("TIME"); + read("ZONE"); + if (currentTokenType != LITERAL || token.value(session).getValueType() != Value.VARCHAR) { + throw getSyntaxError(); } - } else if (equalsToken("NEXT", name) && readIf("VALUE")) { - read("FOR"); - Sequence sequence = readSequence(); - r = new SequenceValue(sequence); - } else if (currentTokenType == VALUE && - currentValue.getType() == Value.STRING) { - if (equalsToken("DATE", name) || - equalsToken("D", name)) { - String date = currentValue.getString(); - read(); - r = ValueExpression.get(ValueDate.parse(date)); - } else if (equalsToken("TIME", name) || - equalsToken("T", name)) { - String time = currentValue.getString(); - read(); - r = ValueExpression.get(ValueTime.parse(time)); - } else if (equalsToken("TIMESTAMP", name) || - equalsToken("TS", name)) { - String timestamp = currentValue.getString(); - read(); - r = ValueExpression - .get(ValueTimestamp.parse(timestamp)); - } else if (equalsToken("X", name)) { - read(); - byte[] buffer = StringUtils - .convertHexToBytes(currentValue.getString()); - r = ValueExpression.get(ValueBytes.getNoCopy(buffer)); - } else if (equalsToken("E", name)) { - String text = currentValue.getString(); - // the PostgreSQL ODBC driver uses - // LIKE E'PROJECT\\_DATA' instead of LIKE - // 'PROJECT\_DATA' - // N: SQL-92 "National Language" strings - text = StringUtils.replaceAll(text, "\\\\", "\\"); - read(); - r = ValueExpression.get(ValueString.get(text)); - } else if (equalsToken("N", name)) { - // SQL-92 "National Language" strings - String text = currentValue.getString(); + String time = token.value(session).getString(); + read(); + return ValueExpression.get(ValueTimeTimeZone.parse(time)); + } else { + boolean without = readIf("WITHOUT"); + if (without) { + read("TIME"); + read("ZONE"); + } + if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR) { + String time = token.value(session).getString(); read(); - r = ValueExpression.get(ValueString.get(text)); - } else { - r = new ExpressionColumn(database, null, null, name); + return ValueExpression.get(ValueTime.parse(time)); + } else if (without) { + throw getSyntaxError(); + } + } + } else if (equalsToken("TIMESTAMP", name)) { + if (readIf(WITH)) { + read("TIME"); + read("ZONE"); + if (currentTokenType != LITERAL || token.value(session).getValueType() != Value.VARCHAR) { + throw getSyntaxError(); } + String timestamp = token.value(session).getString(); + read(); + return ValueExpression.get(ValueTimestampTimeZone.parse(timestamp, session)); } else { - r = new ExpressionColumn(database, null, null, name); + boolean without = readIf("WITHOUT"); + if (without) { + read("TIME"); + read("ZONE"); + } + if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR) { + String timestamp = token.value(session).getString(); + read(); + return ValueExpression.get(ValueTimestamp.parse(timestamp, session)); + } else if (without) { + throw getSyntaxError(); + } + } + } else if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR) { + if (equalsToken("T", name)) { + String time = token.value(session).getString(); + read(); + return ValueExpression.get(ValueTime.parse(time)); + } else if (equalsToken("TS", name)) { + String timestamp = token.value(session).getString(); + read(); + return ValueExpression.get(ValueTimestamp.parse(timestamp, session)); } } break; - case MINUS: - read(); - if (currentTokenType == VALUE) { - r = ValueExpression.get(currentValue.negate()); - if (r.getType() == Value.LONG && - r.getValue(session).getLong() == Integer.MIN_VALUE) { - // convert Integer.MIN_VALUE to type 'int' - // (Integer.MAX_VALUE+1 is of type 'long') - r = ValueExpression.get(ValueInt.get(Integer.MIN_VALUE)); - } else if (r.getType() == Value.DECIMAL && - r.getValue(session).getBigDecimal() - .compareTo(ValueLong.MIN_BD) == 0) { - // convert Long.MIN_VALUE to type 'long' - // (Long.MAX_VALUE+1 is of type 'decimal') - r = ValueExpression.get(ValueLong.get(Long.MIN_VALUE)); - } + case 'U': + if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR + && (equalsToken("UUID", name))) { + String uuid = token.value(session).getString(); read(); - } else { - r = new Operation(Operation.NEGATE, readTerm(), null); + return ValueExpression.get(ValueUuid.get(uuid)); } break; - case PLUS: - read(); - r = readTerm(); - break; - case OPEN: + } + return new ExpressionColumn(database, null, null, name, quoted); + } + + private Prepared getCurrentPrepared() { + return currentPrepared; + } + + private Expression readInterval() { + boolean negative = readIf(MINUS_SIGN); + if (!negative) { + readIf(PLUS_SIGN); + } + if (currentTokenType != LITERAL || token.value(session).getValueType() != Value.VARCHAR) { + addExpected("string"); + throw getSyntaxError(); + } + String s = token.value(session).getString(); + read(); + IntervalQualifier qualifier; + switch (currentTokenType) { + case YEAR: read(); - if (readIf(")")) { - r = new ExpressionList(new Expression[0]); + if (readIf(TO)) { + read(MONTH); + qualifier = IntervalQualifier.YEAR_TO_MONTH; } else { - r = readExpression(); - if (readIf(",")) { - ArrayList list = New.arrayList(); - list.add(r); - while (!readIf(")")) { - r = readExpression(); - list.add(r); - if (!readIf(",")) { - read(")"); - break; - } - } - Expression[] array = new Expression[list.size()]; - list.toArray(array); - r = new ExpressionList(array); - } else { - read(")"); - } + qualifier = IntervalQualifier.YEAR; } break; - case TRUE: - read(); - r = ValueExpression.get(ValueBoolean.get(true)); - break; - case FALSE: - read(); - r = ValueExpression.get(ValueBoolean.get(false)); - break; - case CURRENT_TIME: - read(); - r = readFunctionWithoutParameters("CURRENT_TIME"); - break; - case CURRENT_DATE: + case MONTH: read(); - r = readFunctionWithoutParameters("CURRENT_DATE"); + qualifier = IntervalQualifier.MONTH; break; - case CURRENT_TIMESTAMP: { - Function function = Function.getFunction(database, - "CURRENT_TIMESTAMP"); + case DAY: read(); - if (readIf("(")) { - if (!readIf(")")) { - function.setParameter(0, readExpression()); - read(")"); + if (readIf(TO)) { + switch (currentTokenType) { + case HOUR: + qualifier = IntervalQualifier.DAY_TO_HOUR; + break; + case MINUTE: + qualifier = IntervalQualifier.DAY_TO_MINUTE; + break; + case SECOND: + qualifier = IntervalQualifier.DAY_TO_SECOND; + break; + default: + throw intervalDayError(); } + read(); + } else { + qualifier = IntervalQualifier.DAY; } - function.doneWithParameters(); - r = function; break; - } - case ROWNUM: + case HOUR: read(); - if (readIf("(")) { - read(")"); + if (readIf(TO)) { + switch (currentTokenType) { + case MINUTE: + qualifier = IntervalQualifier.HOUR_TO_MINUTE; + break; + case SECOND: + qualifier = IntervalQualifier.HOUR_TO_SECOND; + break; + default: + throw intervalHourError(); + } + read(); + } else { + qualifier = IntervalQualifier.HOUR; } - r = new Rownum(currentSelect == null ? currentPrepared - : currentSelect); break; - case NULL: + case MINUTE: read(); - r = ValueExpression.getNull(); + if (readIf(TO)) { + read(SECOND); + qualifier = IntervalQualifier.MINUTE_TO_SECOND; + } else { + qualifier = IntervalQualifier.MINUTE; + } break; - case VALUE: - r = ValueExpression.get(currentValue); + case SECOND: read(); + qualifier = IntervalQualifier.SECOND; break; default: - throw getSyntaxError(); + throw intervalQualifierError(); } - if (readIf("[")) { - Function function = Function.getFunction(database, "ARRAY_GET"); - function.setParameter(0, r); - r = readExpression(); - r = new Operation(Operation.PLUS, r, ValueExpression.get(ValueInt - .get(1))); - function.setParameter(1, r); - r = function; - read("]"); - } - if (readIf("::")) { - // PostgreSQL compatibility - if (isToken("PG_CATALOG")) { - read("PG_CATALOG"); - read("."); - } - if (readIf("REGCLASS")) { - FunctionAlias f = findFunctionAlias(Constants.SCHEMA_MAIN, - "PG_GET_OID"); - if (f == null) { - throw getSyntaxError(); - } - Expression[] args = { r }; - JavaFunction func = new JavaFunction(f, args); - r = func; - } else { - Column col = parseColumnWithType(null); - Function function = Function.getFunction(database, "CAST"); - function.setDataType(col); - function.setParameter(0, r); - r = function; + try { + return ValueExpression.get(IntervalUtils.parseInterval(qualifier, negative, s)); + } catch (Exception e) { + throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, e, "INTERVAL", s); + } + } + + private Expression parseDB2SpecialRegisters(String name) { + // Only "CURRENT" name is supported + if (readIf("TIMESTAMP")) { + if (readIf(WITH)) { + read("TIME"); + read("ZONE"); + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP, + readIf(OPEN_PAREN), null); } + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, readIf(OPEN_PAREN), + null); + } else if (readIf("TIME")) { + // Time with fractional seconds is not supported by DB2 + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, false, null); + } else if (readIf("DATE")) { + return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, false, null); } - return r; + // No match, parse CURRENT as a column + return new ExpressionColumn(database, null, null, name); } private Expression readCase() { - if (readIf("END")) { - readIf("CASE"); - return ValueExpression.getNull(); - } - if (readIf("ELSE")) { - Expression elsePart = readExpression().optimize(session); - read("END"); - readIf("CASE"); - return elsePart; - } - int i; - Function function; - if (readIf("WHEN")) { - function = Function.getFunction(database, "CASE"); - function.setParameter(0, null); - i = 1; + Expression c; + if (readIf(WHEN)) { + SearchedCase searched = new SearchedCase(); do { - function.setParameter(i++, readExpression()); + Expression condition = readExpression(); read("THEN"); - function.setParameter(i++, readExpression()); - } while (readIf("WHEN")); + searched.addParameter(condition); + searched.addParameter(readExpression()); + } while (readIf(WHEN)); + if (readIf(ELSE)) { + searched.addParameter(readExpression()); + } + searched.doneWithParameters(); + c = searched; } else { - Expression expr = readExpression(); - if (readIf("END")) { - readIf("CASE"); - return ValueExpression.getNull(); - } - if (readIf("ELSE")) { - Expression elsePart = readExpression().optimize(session); - read("END"); - readIf("CASE"); - return elsePart; - } - function = Function.getFunction(database, "CASE"); - function.setParameter(0, expr); - i = 1; - read("WHEN"); + Expression caseOperand = readExpression(); + read(WHEN); + SimpleCase.SimpleWhen when = readSimpleWhenClause(caseOperand), current = when; + while (readIf(WHEN)) { + SimpleCase.SimpleWhen next = readSimpleWhenClause(caseOperand); + current.setWhen(next); + current = next; + } + c = new SimpleCase(caseOperand, when, readIf(ELSE) ? readExpression() : null); + } + read(END); + return c; + } + + private SimpleCase.SimpleWhen readSimpleWhenClause(Expression caseOperand) { + Expression whenOperand = readWhenOperand(caseOperand); + if (readIf(COMMA)) { + ArrayList operands = Utils.newSmallArrayList(); + operands.add(whenOperand); do { - function.setParameter(i++, readExpression()); - read("THEN"); - function.setParameter(i++, readExpression()); - } while (readIf("WHEN")); + operands.add(readWhenOperand(caseOperand)); + } while (readIf(COMMA)); + read("THEN"); + return new SimpleCase.SimpleWhen(operands.toArray(new Expression[0]), readExpression()); } - if (readIf("ELSE")) { - function.setParameter(i, readExpression()); + read("THEN"); + return new SimpleCase.SimpleWhen(whenOperand, readExpression()); + } + + private Expression readWhenOperand(Expression caseOperand) { + int backup = tokenIndex; + boolean not = readIf(NOT); + Expression whenOperand = readConditionRightHandSide(caseOperand, not, true); + if (whenOperand == null) { + if (not) { + setTokenIndex(backup); + } + whenOperand = readExpression(); } - read("END"); - readIf("CASE"); - function.doneWithParameters(); - return function; + return whenOperand; } - private int readPositiveInt() { + private int readNonNegativeInt() { int v = readInt(); if (v < 0) { - throw DbException.getInvalidValueException("positive integer", v); + throw DbException.getInvalidValueException("non-negative integer", v); } return v; } private int readInt() { boolean minus = false; - if (currentTokenType == MINUS) { + if (currentTokenType == MINUS_SIGN) { minus = true; read(); - } else if (currentTokenType == PLUS) { + } else if (currentTokenType == PLUS_SIGN) { read(); } - if (currentTokenType != VALUE) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, "integer"); + if (currentTokenType != LITERAL) { + throw DbException.getSyntaxError(sqlCommand, token.start(), "integer"); } + Value value = token.value(session); if (minus) { // must do that now, otherwise Integer.MIN_VALUE would not work - currentValue = currentValue.negate(); + value = value.negate(); } - int i = currentValue.getInt(); + int i = value.getInt(); read(); return i; } + private long readPositiveLong() { + long v = readLong(); + if (v <= 0) { + throw DbException.getInvalidValueException("positive long", v); + } + return v; + } + private long readLong() { boolean minus = false; - if (currentTokenType == MINUS) { + if (currentTokenType == MINUS_SIGN) { minus = true; read(); - } else if (currentTokenType == PLUS) { + } else if (currentTokenType == PLUS_SIGN) { read(); } - if (currentTokenType != VALUE) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, "long"); + if (currentTokenType != LITERAL) { + throw DbException.getSyntaxError(sqlCommand, token.start(), "long"); } + Value value = token.value(session); if (minus) { // must do that now, otherwise Long.MIN_VALUE would not work - currentValue = currentValue.negate(); + value = value.negate(); } - long i = currentValue.getLong(); + long i = value.getLong(); read(); return i; } private boolean readBooleanSetting() { - if (currentTokenType == VALUE) { - boolean result = currentValue.getBoolean().booleanValue(); + switch (currentTokenType) { + case ON: + case TRUE: + read(); + return true; + case FALSE: + read(); + return false; + case LITERAL: + boolean result = token.value(session).getBoolean(); read(); return result; } - if (readIf("TRUE") || readIf("ON")) { - return true; - } else if (readIf("FALSE") || readIf("OFF")) { + if (readIf("OFF")) { return false; } else { + if (expectedList != null) { + addMultipleExpected(ON, TRUE, FALSE); + } throw getSyntaxError(); } } private String readString() { - Expression expr = readExpression().optimize(session); - if (!(expr instanceof ValueExpression)) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, "string"); + int sqlIndex = token.start(); + Expression expr = readExpression(); + try { + String s = expr.optimize(session).getValue(session).getString(); + if (s == null || s.length() <= Constants.MAX_STRING_LENGTH) { + return s; + } + } catch (DbException e) { } - String s = expr.getValue(session).getString(); - return s; + throw DbException.getSyntaxError(sqlCommand, sqlIndex, "character string"); } + // TODO: why does this function allow defaultSchemaName=null - which resets + // the parser schemaName for everyone ? private String readIdentifierWithSchema(String defaultSchemaName) { - if (currentTokenType != IDENTIFIER) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, - "identifier"); - } - String s = currentToken; - read(); + String s = readIdentifier(); schemaName = defaultSchemaName; - if (readIf(".")) { - schemaName = s; - if (currentTokenType != IDENTIFIER) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, - "identifier"); - } - s = currentToken; - read(); + if (readIf(DOT)) { + s = readIdentifierWithSchema2(s); } - if (equalsToken(".", currentToken)) { - if (equalsToken(schemaName, database.getShortName())) { - read("."); - schemaName = s; - if (currentTokenType != IDENTIFIER) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, - "identifier"); + return s; + } + + private String readIdentifierWithSchema2(String s) { + schemaName = s; + if (database.getMode().allowEmptySchemaValuesAsDefaultSchema && readIf(DOT)) { + if (equalsToken(schemaName, database.getShortName()) || database.getIgnoreCatalogs()) { + schemaName = session.getCurrentSchemaName(); + s = readIdentifier(); + } + } else { + s = readIdentifier(); + if (currentTokenType == DOT) { + if (equalsToken(schemaName, database.getShortName()) || database.getIgnoreCatalogs()) { + read(); + schemaName = s; + s = readIdentifier(); } - s = currentToken; - read(); } } return s; @@ -3092,18 +5748,16 @@ private String readIdentifierWithSchema() { return readIdentifierWithSchema(session.getCurrentSchemaName()); } - private String readAliasIdentifier() { - return readColumnIdentifier(); - } - - private String readUniqueIdentifier() { - return readColumnIdentifier(); - } - - private String readColumnIdentifier() { - if (currentTokenType != IDENTIFIER) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, - "identifier"); + private String readIdentifier() { + if (!isIdentifier()) { + /* + * Sometimes a new keywords are introduced. During metadata + * initialization phase keywords are accepted as identifiers to + * allow migration from older versions. + */ + if (!session.isQuirksMode() || !isKeyword(currentTokenType)) { + throw DbException.getSyntaxError(sqlCommand, token.start(), "identifier"); + } } String s = currentToken; read(); @@ -3111,989 +5765,959 @@ private String readColumnIdentifier() { } private void read(String expected) { - if (currentTokenQuoted || !equalsToken(expected, currentToken)) { + if (token.isQuoted() || !equalsToken(expected, currentToken)) { addExpected(expected); throw getSyntaxError(); } read(); } - private boolean readIf(String token) { - if (!currentTokenQuoted && equalsToken(token, currentToken)) { + private void read(int tokenType) { + if (tokenType != currentTokenType) { + addExpected(tokenType); + throw getSyntaxError(); + } + read(); + } + + private boolean readIf(String tokenName) { + if (!token.isQuoted() && equalsToken(tokenName, currentToken)) { read(); return true; } - addExpected(token); + addExpected(tokenName); return false; } - private boolean isToken(String token) { - boolean result = equalsToken(token, currentToken) && - !currentTokenQuoted; - if (result) { + private boolean readIf(int tokenType) { + if (tokenType == currentTokenType) { + read(); return true; } - addExpected(token); + addExpected(tokenType); return false; } - private boolean equalsToken(String a, String b) { - if (a == null) { - return b == null; - } else if (a.equals(b)) { + private boolean isToken(String tokenName) { + if (!token.isQuoted() && equalsToken(tokenName, currentToken)) { return true; - } else if (!identifiersToUpper && a.equalsIgnoreCase(b)) { + } + addExpected(tokenName); + return false; + } + + private boolean isToken(int tokenType) { + if (tokenType == currentTokenType) { return true; } + addExpected(tokenType); return false; } + private boolean equalsToken(String a, String b) { + if (a == null) { + return b == null; + } else + return a.equals(b) || !identifiersToUpper && a.equalsIgnoreCase(b); + } + + private boolean isIdentifier() { + return currentTokenType == IDENTIFIER || nonKeywords != null && nonKeywords.get(currentTokenType); + } + private void addExpected(String token) { if (expectedList != null) { expectedList.add(token); } } + private void addExpected(int tokenType) { + if (expectedList != null) { + expectedList.add(TOKENS[tokenType]); + } + } + + private void addMultipleExpected(int ... tokenTypes) { + for (int tokenType : tokenTypes) { + expectedList.add(TOKENS[tokenType]); + } + } + private void read() { - currentTokenQuoted = false; if (expectedList != null) { expectedList.clear(); } - int[] types = characterTypes; - lastParseIndex = parseIndex; - int i = parseIndex; - int type = types[i]; - while (type == 0) { - type = types[++i]; - } - int start = i; - char[] chars = sqlCommandChars; - char c = chars[i++]; - currentToken = ""; - switch (type) { - case CHAR_NAME: - while (true) { - type = types[i]; - if (type != CHAR_NAME && type != CHAR_VALUE) { - break; - } - i++; + int size = tokens.size(); + if (tokenIndex + 1 < size) { + token = tokens.get(++tokenIndex); + currentTokenType = token.tokenType(); + currentToken = token.asIdentifier(); + if (currentToken != null && currentToken.length() > Constants.MAX_IDENTIFIER_LENGTH) { + throw DbException.get(ErrorCode.NAME_TOO_LONG_2, currentToken.substring(0, 32), + "" + Constants.MAX_IDENTIFIER_LENGTH); + } else if (currentTokenType == LITERAL) { + checkLiterals(); } - currentToken = StringUtils.fromCacheOrNew(sqlCommand.substring( - start, i)); - currentTokenType = getTokenType(currentToken); - parseIndex = i; - return; - case CHAR_QUOTED: { - String result = null; - while (true) { - for (int begin = i;; i++) { - if (chars[i] == '\"') { - if (result == null) { - result = sqlCommand.substring(begin, i); - } else { - result += sqlCommand.substring(begin - 1, i); - } - break; - } - } - if (chars[++i] != '\"') { - break; - } - i++; + } else { + throw getSyntaxError(); + } + } + + private void checkLiterals() { + if (!literalsChecked && session != null && !session.getAllowLiterals()) { + int allowed = database.getAllowLiterals(); + if (allowed == Constants.ALLOW_LITERALS_NONE + || ((token instanceof Token.CharacterStringToken || token instanceof Token.BinaryStringToken) + && allowed != Constants.ALLOW_LITERALS_ALL)) { + throw DbException.get(ErrorCode.LITERALS_ARE_NOT_ALLOWED); } - currentToken = StringUtils.fromCacheOrNew(result); - parseIndex = i; - currentTokenQuoted = true; - currentTokenType = IDENTIFIER; - return; } - case CHAR_SPECIAL_2: - if (types[i] == CHAR_SPECIAL_2) { - i++; + } + + private void initialize(String sql, ArrayList tokens, boolean stopOnCloseParen) { + if (sql == null) { + sql = ""; + } + sqlCommand = sql; + this.tokens = tokens == null ? new Tokenizer(database, identifiersToUpper, identifiersToLower, nonKeywords) + .tokenize(sql, stopOnCloseParen) : tokens; + resetTokenIndex(); + } + + private void resetTokenIndex() { + tokenIndex = -1; + token = null; + currentTokenType = -1; + currentToken = null; + } + + void setTokenIndex(int index) { + if (index != tokenIndex) { + if (expectedList != null) { + expectedList.clear(); } - currentToken = sqlCommand.substring(start, i); - currentTokenType = getSpecialType(currentToken); - parseIndex = i; - return; - case CHAR_SPECIAL_1: - currentToken = sqlCommand.substring(start, i); - currentTokenType = getSpecialType(currentToken); - parseIndex = i; - return; - case CHAR_VALUE: - if (c == '0' && chars[i] == 'X') { - // hex number - long number = 0; - start += 2; - i++; - while (true) { - c = chars[i]; - if ((c < '0' || c > '9') && (c < 'A' || c > 'F')) { - checkLiterals(false); - currentValue = ValueInt.get((int) number); - currentTokenType = VALUE; - currentToken = "0"; - parseIndex = i; - return; - } - number = (number << 4) + c - - (c >= 'A' ? ('A' - 0xa) : ('0')); - if (number > Integer.MAX_VALUE) { - readHexDecimal(start, i); - return; + token = tokens.get(index); + tokenIndex = index; + currentTokenType = token.tokenType(); + currentToken = token.asIdentifier(); + } + } + + private static boolean isKeyword(int tokenType) { + return tokenType >= FIRST_KEYWORD && tokenType <= LAST_KEYWORD; + } + + private boolean isKeyword(String s) { + return ParserUtil.isKeyword(s, !identifiersToUpper); + } + + private String upperName(String name) { + return identifiersToUpper ? name : StringUtils.toUpperEnglish(name); + } + + private Column parseColumnForTable(String columnName, boolean defaultNullable) { + Column column; + Mode mode = database.getMode(); + if (mode.identityDataType && readIf("IDENTITY")) { + column = new Column(columnName, TypeInfo.TYPE_BIGINT); + parseCompatibilityIdentityOptions(column); + column.setPrimaryKey(true); + } else if (mode.serialDataTypes && readIf("BIGSERIAL")) { + column = new Column(columnName, TypeInfo.TYPE_BIGINT); + column.setIdentityOptions(new SequenceOptions(), false); + } else if (mode.serialDataTypes && readIf("SERIAL")) { + column = new Column(columnName, TypeInfo.TYPE_INTEGER); + column.setIdentityOptions(new SequenceOptions(), false); + } else { + column = parseColumnWithType(columnName); + } + if (readIf("INVISIBLE")) { + column.setVisible(false); + } else if (readIf("VISIBLE")) { + column.setVisible(true); + } + boolean defaultOnNull = false; + NullConstraintType nullConstraint = parseNotNullConstraint(); + defaultIdentityGeneration: if (!column.isIdentity()) { + if (readIf(AS)) { + column.setGeneratedExpression(readExpression()); + } else if (readIf(DEFAULT)) { + if (readIf(ON)) { + read(NULL); + defaultOnNull = true; + break defaultIdentityGeneration; + } + column.setDefaultExpression(session, readExpression()); + } else if (readIf("GENERATED")) { + boolean always = readIf("ALWAYS"); + if (!always) { + read("BY"); + read(DEFAULT); + } + read(AS); + if (readIf("IDENTITY")) { + SequenceOptions options = new SequenceOptions(); + if (readIf(OPEN_PAREN)) { + parseSequenceOptions(options, null, false, false); + read(CLOSE_PAREN); } - i++; + column.setIdentityOptions(options, always); + break defaultIdentityGeneration; + } else if (!always) { + throw getSyntaxError(); + } else { + column.setGeneratedExpression(readExpression()); } } - long number = c - '0'; - while (true) { - c = chars[i]; - if (c < '0' || c > '9') { - if (c == '.' || c == 'E' || c == 'L') { - readDecimal(start, i); - break; - } - checkLiterals(false); - currentValue = ValueInt.get((int) number); - currentTokenType = VALUE; - currentToken = "0"; - parseIndex = i; - break; - } - number = number * 10 + (c - '0'); - if (number > Integer.MAX_VALUE) { - readDecimal(start, i); - break; - } - i++; + if (!column.isGenerated() && readIf(ON)) { + read("UPDATE"); + column.setOnUpdateExpression(session, readExpression()); } - return; - case CHAR_DOT: - if (types[i] != CHAR_VALUE) { - currentTokenType = KEYWORD; - currentToken = "."; - parseIndex = i; - return; - } - readDecimal(i - 1, i); - return; - case CHAR_STRING: { - String result = null; - while (true) { - for (int begin = i;; i++) { - if (chars[i] == '\'') { - if (result == null) { - result = sqlCommand.substring(begin, i); - } else { - result += sqlCommand.substring(begin - 1, i); - } - break; - } - } - if (chars[++i] != '\'') { - break; - } - i++; + nullConstraint = parseNotNullConstraint(nullConstraint); + if (parseCompatibilityIdentity(column, mode)) { + nullConstraint = parseNotNullConstraint(nullConstraint); } - currentToken = "'"; - checkLiterals(true); - currentValue = ValueString.get(StringUtils.fromCacheOrNew(result), - database.getMode().treatEmptyStringsAsNull); - parseIndex = i; - currentTokenType = VALUE; - return; - } - case CHAR_DOLLAR_QUOTED_STRING: { - String result = null; - int begin = i - 1; - while (types[i] == CHAR_DOLLAR_QUOTED_STRING) { - i++; - } - result = sqlCommand.substring(begin, i); - currentToken = "'"; - checkLiterals(true); - currentValue = ValueString.get(StringUtils.fromCacheOrNew(result), - database.getMode().treatEmptyStringsAsNull); - parseIndex = i; - currentTokenType = VALUE; - return; } - case CHAR_END: - currentToken = ""; - currentTokenType = END; - parseIndex = i; - return; + switch (nullConstraint) { + case NULL_IS_ALLOWED: + if (column.isIdentity()) { + throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, column.getName()); + } + column.setNullable(true); + break; + case NULL_IS_NOT_ALLOWED: + column.setNullable(false); + break; + case NO_NULL_CONSTRAINT_FOUND: + if (!column.isIdentity()) { + column.setNullable(defaultNullable); + } + break; default: - throw getSyntaxError(); + throw DbException.get(ErrorCode.UNKNOWN_MODE_1, + "Internal Error - unhandled case: " + nullConstraint.name()); } - } - - private void checkLiterals(boolean text) { - if (!session.getAllowLiterals()) { - int allowed = database.getAllowLiterals(); - if (allowed == Constants.ALLOW_LITERALS_NONE || - (text && allowed != Constants.ALLOW_LITERALS_ALL)) { - throw DbException.get(ErrorCode.LITERALS_ARE_NOT_ALLOWED); + if (!defaultOnNull) { + if (readIf(DEFAULT)) { + read(ON); + read(NULL); + defaultOnNull = true; + } else if (readIf("NULL_TO_DEFAULT")) { + defaultOnNull = true; } } - } - - private void readHexDecimal(int start, int i) { - char[] chars = sqlCommandChars; - char c; - do { - c = chars[++i]; - } while ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F')); - parseIndex = i; - String sub = sqlCommand.substring(start, i); - BigDecimal bd = new BigDecimal(new BigInteger(sub, 16)); - checkLiterals(false); - currentValue = ValueDecimal.get(bd); - currentTokenType = VALUE; - } - - private void readDecimal(int start, int i) { - char[] chars = sqlCommandChars; - int[] types = characterTypes; - // go until the first non-number - while (true) { - int t = types[i]; - if (t != CHAR_DOT && t != CHAR_VALUE) { - break; - } - i++; + if (defaultOnNull) { + column.setDefaultOnNull(true); } - boolean containsE = false; - if (chars[i] == 'E' || chars[i] == 'e') { - containsE = true; - i++; - if (chars[i] == '+' || chars[i] == '-') { - i++; + if (!column.isGenerated()) { + if (readIf("SEQUENCE")) { + column.setSequence(readSequence(), column.isGeneratedAlways()); } - if (types[i] != CHAR_VALUE) { - throw getSyntaxError(); + } + if (readIf("SELECTIVITY")) { + column.setSelectivity(readNonNegativeInt()); + } + if (mode.getEnum() == ModeEnum.MySQL) { + if (readIf("CHARACTER")) { + readIf(SET); + readMySQLCharset(); } - while (types[++i] == CHAR_VALUE) { - // go until the first non-number + if (readIf("COLLATE")) { + readMySQLCharset(); } } - parseIndex = i; - String sub = sqlCommand.substring(start, i); - checkLiterals(false); - if (!containsE && sub.indexOf('.') < 0) { - BigInteger bi = new BigInteger(sub); - if (bi.compareTo(ValueLong.MAX) <= 0) { - // parse constants like "10000000L" - if (chars[i] == 'L') { - parseIndex++; - } - currentValue = ValueLong.get(bi.longValue()); - currentTokenType = VALUE; - return; + String comment = readCommentIf(); + if (comment != null) { + column.setComment(comment); + } + return column; + } + + private void parseCompatibilityIdentityOptions(Column column) { + SequenceOptions options = new SequenceOptions(); + if (readIf(OPEN_PAREN)) { + options.setStartValue(ValueExpression.get(ValueBigint.get(readLong()))); + if (readIf(COMMA)) { + options.setIncrement(ValueExpression.get(ValueBigint.get(readLong()))); } + read(CLOSE_PAREN); } - BigDecimal bd; - try { - bd = new BigDecimal(sub); - } catch (NumberFormatException e) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, sub); + column.setIdentityOptions(options, false); + } + + private String readCommentIf() { + if (readIf("COMMENT")) { + readIf(IS); + return readString(); } - currentValue = ValueDecimal.get(bd); - currentTokenType = VALUE; + return null; } - public Session getSession() { - return session; + private Column parseColumnWithType(String columnName) { + TypeInfo typeInfo = readIfDataType(); + if (typeInfo == null) { + String domainName = readIdentifierWithSchema(); + return getColumnWithDomain(columnName, getSchema().getDomain(domainName)); + } + return new Column(columnName, typeInfo); } - private void initialize(String sql) { - if (sql == null) { - sql = ""; + private TypeInfo parseDataType() { + TypeInfo typeInfo = readIfDataType(); + if (typeInfo == null) { + addExpected("data type"); + throw getSyntaxError(); } - originalSQL = sql; - sqlCommand = sql; - int len = sql.length() + 1; - char[] command = new char[len]; - int[] types = new int[len]; - len--; - sql.getChars(0, len, command, 0); - boolean changed = false; - command[len] = ' '; - int startLoop = 0; - int lastType = 0; - for (int i = 0; i < len; i++) { - char c = command[i]; - int type = 0; - switch (c) { - case '/': - if (command[i + 1] == '*') { - // block comment - changed = true; - command[i] = ' '; - command[i + 1] = ' '; - startLoop = i; - i += 2; - checkRunOver(i, len, startLoop); - while (command[i] != '*' || command[i + 1] != '/') { - command[i++] = ' '; - checkRunOver(i, len, startLoop); - } - command[i] = ' '; - command[i + 1] = ' '; - i++; - } else if (command[i + 1] == '/') { - // single line comment - changed = true; - startLoop = i; - while (true) { - c = command[i]; - if (c == '\n' || c == '\r' || i >= len - 1) { - break; - } - command[i++] = ' '; - checkRunOver(i, len, startLoop); - } - } else { - type = CHAR_SPECIAL_1; - } - break; - case '-': - if (command[i + 1] == '-') { - // single line comment - changed = true; - startLoop = i; - while (true) { - c = command[i]; - if (c == '\n' || c == '\r' || i >= len - 1) { - break; - } - command[i++] = ' '; - checkRunOver(i, len, startLoop); - } - } else { - type = CHAR_SPECIAL_1; - } + return typeInfo; + } + + private TypeInfo readIfDataType() { + TypeInfo typeInfo = readIfDataType1(); + if (typeInfo != null) { + while (readIf(ARRAY)) { + typeInfo = parseArrayType(typeInfo); + } + } + return typeInfo; + } + + private TypeInfo readIfDataType1() { + switch (currentTokenType) { + case IDENTIFIER: + if (token.isQuoted()) { + return null; + } + break; + case INTERVAL: { + read(); + TypeInfo typeInfo = readIntervalQualifier(); + if (typeInfo == null) { + throw intervalQualifierError(); + } + return typeInfo; + } + case NULL: + read(); + return TypeInfo.TYPE_NULL; + case ROW: + read(); + return parseRowType(); + case ARRAY: + // Partial compatibility with 1.4.200 and older versions + if (session.isQuirksMode()) { + read(); + return parseArrayType(TypeInfo.TYPE_VARCHAR); + } + addExpected("data type"); + throw getSyntaxError(); + default: + if (isKeyword(currentToken)) { break; - case '$': - if (command[i + 1] == '$' && (i == 0 || command[i - 1] <= ' ')) { - // dollar quoted string - changed = true; - command[i] = ' '; - command[i + 1] = ' '; - startLoop = i; - i += 2; - checkRunOver(i, len, startLoop); - while (command[i] != '$' || command[i + 1] != '$') { - types[i++] = CHAR_DOLLAR_QUOTED_STRING; - checkRunOver(i, len, startLoop); - } - command[i] = ' '; - command[i + 1] = ' '; - i++; + } + addExpected("data type"); + throw getSyntaxError(); + } + int index = tokenIndex; + String originalCase = currentToken; + read(); + if (currentTokenType == DOT) { + setTokenIndex(index); + return null; + } + String original = upperName(originalCase); + switch (original) { + case "BINARY": + if (readIf("VARYING")) { + original = "BINARY VARYING"; + } else if (readIf("LARGE")) { + read("OBJECT"); + original = "BINARY LARGE OBJECT"; + } else if (variableBinary) { + original = "VARBINARY"; + } + break; + case "CHAR": + if (readIf("VARYING")) { + original = "CHAR VARYING"; + } else if (readIf("LARGE")) { + read("OBJECT"); + original = "CHAR LARGE OBJECT"; + } + break; + case "CHARACTER": + if (readIf("VARYING")) { + original = "CHARACTER VARYING"; + } else if (readIf("LARGE")) { + read("OBJECT"); + original = "CHARACTER LARGE OBJECT"; + } + break; + case "DATETIME": + case "DATETIME2": + return parseDateTimeType(false); + case "DEC": + case "DECIMAL": + return parseNumericType(true); + case "DECFLOAT": + return parseDecfloatType(); + case "DOUBLE": + if (readIf("PRECISION")) { + original = "DOUBLE PRECISION"; + } + break; + case "ENUM": + return parseEnumType(); + case "FLOAT": + return parseFloatType(); + case "GEOMETRY": + return parseGeometryType(); + case "LONG": + if (readIf("RAW")) { + original = "LONG RAW"; + } + break; + case "NATIONAL": + if (readIf("CHARACTER")) { + if (readIf("VARYING")) { + original = "NATIONAL CHARACTER VARYING"; + } else if (readIf("LARGE")) { + read("OBJECT"); + original = "NATIONAL CHARACTER LARGE OBJECT"; } else { - if (lastType == CHAR_NAME || lastType == CHAR_VALUE) { - // $ inside an identifier is supported - type = CHAR_NAME; - } else { - // but not at the start, to support PostgreSQL $1 - type = CHAR_SPECIAL_1; - } - } - break; - case '(': - case ')': - case '{': - case '}': - case '*': - case ',': - case ';': - case '+': - case '%': - case '?': - case '@': - case ']': - type = CHAR_SPECIAL_1; - break; - case '!': - case '<': - case '>': - case '|': - case '=': - case ':': - case '&': - case '~': - type = CHAR_SPECIAL_2; - break; - case '.': - type = CHAR_DOT; - break; - case '\'': - type = types[i] = CHAR_STRING; - startLoop = i; - while (command[++i] != '\'') { - checkRunOver(i, len, startLoop); + original = "NATIONAL CHARACTER"; } - break; - case '[': - if (database.getMode().squareBracketQuotedNames) { - // SQL Server alias for " - command[i] = '"'; - changed = true; - type = types[i] = CHAR_QUOTED; - startLoop = i; - while (command[++i] != ']') { - checkRunOver(i, len, startLoop); - } - command[i] = '"'; + } else { + read("CHAR"); + if (readIf("VARYING")) { + original = "NATIONAL CHAR VARYING"; } else { - type = CHAR_SPECIAL_1; - } - break; - case '`': - // MySQL alias for ", but not case sensitive - command[i] = '"'; - changed = true; - type = types[i] = CHAR_QUOTED; - startLoop = i; - while (command[++i] != '`') { - checkRunOver(i, len, startLoop); - c = command[i]; - command[i] = Character.toUpperCase(c); - } - command[i] = '"'; - break; - case '\"': - type = types[i] = CHAR_QUOTED; - startLoop = i; - while (command[++i] != '\"') { - checkRunOver(i, len, startLoop); + original = "NATIONAL CHAR"; } - break; - case '_': - type = CHAR_NAME; - break; - default: - if (c >= 'a' && c <= 'z') { - if (identifiersToUpper) { - command[i] = (char) (c - ('a' - 'A')); - changed = true; + } + break; + case "NCHAR": + if (readIf("VARYING")) { + original = "NCHAR VARYING"; + } else if (readIf("LARGE")) { + read("OBJECT"); + original = "NCHAR LARGE OBJECT"; + } + break; + case "NUMBER": + if (database.getMode().disallowedTypes.contains("NUMBER")) { + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "NUMBER"); + } + if (!isToken(OPEN_PAREN)) { + return TypeInfo.getTypeInfo(Value.DECFLOAT, 40, -1, null); + } + //$FALL-THROUGH$ + case "NUMERIC": + return parseNumericType(false); + case "SMALLDATETIME": + return parseDateTimeType(true); + case "TIME": + return parseTimeType(); + case "TIMESTAMP": + return parseTimestampType(); + } + // Domain names can't have multiple words without quotes + if (originalCase.length() == original.length()) { + Domain domain = database.getSchema(session.getCurrentSchemaName()).findDomain(originalCase); + if (domain != null) { + setTokenIndex(index); + return null; + } + } + Mode mode = database.getMode(); + DataType dataType = DataType.getTypeByName(original, mode); + if (dataType == null || mode.disallowedTypes.contains(original)) { + throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, original); + } + long precision; + int scale; + if (dataType.specialPrecisionScale) { + precision = dataType.defaultPrecision; + scale = dataType.defaultScale; + } else { + precision = -1L; + scale = -1; + } + int t = dataType.type; + if (database.getIgnoreCase() && t == Value.VARCHAR && !equalsToken("VARCHAR_CASESENSITIVE", original)) { + dataType = DataType.getDataType(t = Value.VARCHAR_IGNORECASE); + } + if ((dataType.supportsPrecision || dataType.supportsScale) && readIf(OPEN_PAREN)) { + if (!readIf("MAX")) { + if (dataType.supportsPrecision) { + precision = readPrecision(t); + if (precision < dataType.minPrecision) { + throw getInvalidPrecisionException(dataType, precision); + } else if (precision > dataType.maxPrecision) + badPrecision: { + if (session.isQuirksMode() || session.isTruncateLargeLength()) { + switch (dataType.type) { + case Value.CHAR: + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.BINARY: + case Value.VARBINARY: + case Value.JAVA_OBJECT: + case Value.JSON: + precision = dataType.maxPrecision; + break badPrecision; + } + } + throw getInvalidPrecisionException(dataType, precision); } - type = CHAR_NAME; - } else if (c >= 'A' && c <= 'Z') { - type = CHAR_NAME; - } else if (c >= '0' && c <= '9') { - type = CHAR_VALUE; - } else { - if (c <= ' ' || Character.isSpaceChar(c)) { - // whitespace - } else if (Character.isJavaIdentifierPart(c)) { - type = CHAR_NAME; - if (identifiersToUpper) { - char u = Character.toUpperCase(c); - if (u != c) { - command[i] = u; - changed = true; + if (dataType.supportsScale) { + if (readIf(COMMA)) { + scale = readInt(); + if (scale < dataType.minScale || scale > dataType.maxScale) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), + Integer.toString(dataType.minScale), Integer.toString(dataType.maxScale)); } } - } else { - type = CHAR_SPECIAL_1; + } + } else { + scale = readInt(); + if (scale < dataType.minScale || scale > dataType.maxScale) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), + Integer.toString(dataType.minScale), Integer.toString(dataType.maxScale)); } } } - types[i] = type; - lastType = type; - } - sqlCommandChars = command; - types[len] = CHAR_END; - characterTypes = types; - if (changed) { - sqlCommand = new String(command); - } - parseIndex = 0; - } - - private void checkRunOver(int i, int len, int startLoop) { - if (i >= len) { - parseIndex = startLoop; - throw getSyntaxError(); - } - } - - private int getSpecialType(String s) { - char c0 = s.charAt(0); - if (s.length() == 1) { - switch (c0) { - case '?': - case '$': - return PARAMETER; - case '@': - return AT; - case '+': - return PLUS; - case '-': - return MINUS; - case '{': - case '}': - case '*': - case '/': - case '%': - case ';': - case ',': - case ':': - case '[': - case ']': - case '~': - return KEYWORD; - case '(': - return OPEN; - case ')': - return CLOSE; - case '<': - return SMALLER; - case '>': - return BIGGER; - case '=': - return EQUAL; - default: - break; - } - } else if (s.length() == 2) { - switch (c0) { - case ':': - if ("::".equals(s)) { - return KEYWORD; - } else if (":=".equals(s)) { - return KEYWORD; - } - break; - case '>': - if (">=".equals(s)) { - return BIGGER_EQUAL; - } - break; - case '<': - if ("<=".equals(s)) { - return SMALLER_EQUAL; - } else if ("<>".equals(s)) { - return NOT_EQUAL; - } - break; - case '!': - if ("!=".equals(s)) { - return NOT_EQUAL; - } else if ("!~".equals(s)) { - return KEYWORD; - } - break; - case '|': - if ("||".equals(s)) { - return STRING_CONCAT; - } - break; - case '&': - if ("&&".equals(s)) { - return SPATIAL_INTERSECTS; - } - break; + read(CLOSE_PAREN); + } + if (mode.allNumericTypesHavePrecision && DataType.isNumericType(dataType.type)) { + if (readIf(OPEN_PAREN)) { + // Support for MySQL: INT(11), MEDIUMINT(8) and so on. + // Just ignore the precision. + readNonNegativeInt(); + read(CLOSE_PAREN); } + readIf("UNSIGNED"); } - throw getSyntaxError(); + if (mode.forBitData && DataType.isStringType(t)) { + if (readIf(FOR)) { + read("BIT"); + read("DATA"); + dataType = DataType.getDataType(t = Value.VARBINARY); + } + } + return TypeInfo.getTypeInfo(t, precision, scale, null); } - private int getTokenType(String s) { - int len = s.length(); - if (len == 0) { - throw getSyntaxError(); - } - if (!identifiersToUpper) { - // if not yet converted to uppercase, do it now - s = StringUtils.toUpperEnglish(s); - } - return getSaveTokenType(s, database.getMode().supportOffsetFetch); + private static DbException getInvalidPrecisionException(DataType dataType, long precision) { + return DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Long.toString(precision), + Long.toString(dataType.minPrecision), Long.toString(dataType.maxPrecision)); } - private boolean isKeyword(String s) { - if (!identifiersToUpper) { - // if not yet converted to uppercase, do it now - s = StringUtils.toUpperEnglish(s); - } - return isKeyword(s, false); + private static Column getColumnWithDomain(String columnName, Domain domain) { + Column column = new Column(columnName, domain.getDataType()); + column.setComment(domain.getComment()); + column.setDomain(domain); + return column; } - /** - * Checks if this string is a SQL keyword. - * - * @param s the token to check - * @param supportOffsetFetch if OFFSET and FETCH are keywords - * @return true if it is a keyword - */ - public static boolean isKeyword(String s, boolean supportOffsetFetch) { - if (s == null || s.length() == 0) { - return false; + private TypeInfo parseFloatType() { + int type = Value.DOUBLE; + int precision; + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); + if (precision < 1 || precision > 53) { + throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Integer.toString(precision), "1", "53"); + } + if (precision <= 24) { + type = Value.REAL; + } + } else { + precision = 0; } - return getSaveTokenType(s, supportOffsetFetch) != IDENTIFIER; + return TypeInfo.getTypeInfo(type, precision, -1, null); } - private static int getSaveTokenType(String s, boolean supportOffsetFetch) { - switch (s.charAt(0)) { - case 'C': - if (s.equals("CURRENT_TIMESTAMP")) { - return CURRENT_TIMESTAMP; - } else if (s.equals("CURRENT_TIME")) { - return CURRENT_TIME; - } else if (s.equals("CURRENT_DATE")) { - return CURRENT_DATE; - } - return getKeywordOrIdentifier(s, "CROSS", KEYWORD); - case 'D': - return getKeywordOrIdentifier(s, "DISTINCT", KEYWORD); - case 'E': - if ("EXCEPT".equals(s)) { - return KEYWORD; - } - return getKeywordOrIdentifier(s, "EXISTS", KEYWORD); - case 'F': - if ("FROM".equals(s)) { - return KEYWORD; - } else if ("FOR".equals(s)) { - return KEYWORD; - } else if ("FULL".equals(s)) { - return KEYWORD; - } else if (supportOffsetFetch && "FETCH".equals(s)) { - return KEYWORD; - } - return getKeywordOrIdentifier(s, "FALSE", FALSE); - case 'G': - return getKeywordOrIdentifier(s, "GROUP", KEYWORD); - case 'H': - return getKeywordOrIdentifier(s, "HAVING", KEYWORD); - case 'I': - if ("INNER".equals(s)) { - return KEYWORD; - } else if ("INTERSECT".equals(s)) { - return KEYWORD; - } - return getKeywordOrIdentifier(s, "IS", KEYWORD); - case 'J': - return getKeywordOrIdentifier(s, "JOIN", KEYWORD); - case 'L': - if ("LIMIT".equals(s)) { - return KEYWORD; - } - return getKeywordOrIdentifier(s, "LIKE", KEYWORD); - case 'M': - return getKeywordOrIdentifier(s, "MINUS", KEYWORD); - case 'N': - if ("NOT".equals(s)) { - return KEYWORD; - } else if ("NATURAL".equals(s)) { - return KEYWORD; - } - return getKeywordOrIdentifier(s, "NULL", NULL); - case 'O': - if ("ON".equals(s)) { - return KEYWORD; - } else if (supportOffsetFetch && "OFFSET".equals(s)) { - return KEYWORD; - } - return getKeywordOrIdentifier(s, "ORDER", KEYWORD); - case 'P': - return getKeywordOrIdentifier(s, "PRIMARY", KEYWORD); - case 'R': - return getKeywordOrIdentifier(s, "ROWNUM", ROWNUM); - case 'S': - if (s.equals("SYSTIMESTAMP")) { - return CURRENT_TIMESTAMP; - } else if (s.equals("SYSTIME")) { - return CURRENT_TIME; - } else if (s.equals("SYSDATE")) { - return CURRENT_TIMESTAMP; - } - return getKeywordOrIdentifier(s, "SELECT", KEYWORD); - case 'T': - if ("TODAY".equals(s)) { - return CURRENT_DATE; - } - return getKeywordOrIdentifier(s, "TRUE", TRUE); - case 'U': - if ("UNIQUE".equals(s)) { - return KEYWORD; + private TypeInfo parseNumericType(boolean decimal) { + long precision = -1L; + int scale = -1; + if (readIf(OPEN_PAREN)) { + precision = readPrecision(Value.NUMERIC); + if (precision < 1) { + throw getInvalidNumericPrecisionException(precision); + } else if (precision > Constants.MAX_NUMERIC_PRECISION) { + if (session.isQuirksMode() || session.isTruncateLargeLength()) { + precision = Constants.MAX_NUMERIC_PRECISION; + } else { + throw getInvalidNumericPrecisionException(precision); + } } - return getKeywordOrIdentifier(s, "UNION", KEYWORD); - case 'W': - if ("WITH".equals(s)) { - return KEYWORD; + if (readIf(COMMA)) { + scale = readInt(); + if (scale < 0 || scale > ValueNumeric.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), + "0", "" + ValueNumeric.MAXIMUM_SCALE); + } } - return getKeywordOrIdentifier(s, "WHERE", KEYWORD); - default: - return IDENTIFIER; + read(CLOSE_PAREN); } + return TypeInfo.getTypeInfo(Value.NUMERIC, precision, scale, decimal ? ExtTypeInfoNumeric.DECIMAL : null); } - private static int getKeywordOrIdentifier(String s1, String s2, - int keywordType) { - if (s1.equals(s2)) { - return keywordType; + private TypeInfo parseDecfloatType() { + long precision = -1L; + if (readIf(OPEN_PAREN)) { + precision = readPrecision(Value.DECFLOAT); + if (precision < 1 || precision > Constants.MAX_NUMERIC_PRECISION) { + throw getInvalidNumericPrecisionException(precision); + } + read(CLOSE_PAREN); } - return IDENTIFIER; + return TypeInfo.getTypeInfo(Value.DECFLOAT, precision, -1, null); } - private Column parseColumnForTable(String columnName, - boolean defaultNullable) { - Column column; - boolean isIdentity = false; - if (readIf("IDENTITY") || readIf("BIGSERIAL")) { - column = new Column(columnName, Value.LONG); - column.setOriginalSQL("IDENTITY"); - parseAutoIncrement(column); - // PostgreSQL compatibility - if (!database.getMode().serialColumnIsNotPK) { - column.setPrimaryKey(true); - } - } else if (readIf("SERIAL")) { - column = new Column(columnName, Value.INT); - column.setOriginalSQL("SERIAL"); - parseAutoIncrement(column); - // PostgreSQL compatibility - if (!database.getMode().serialColumnIsNotPK) { - column.setPrimaryKey(true); + private static DbException getInvalidNumericPrecisionException(long precision) { + return DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Long.toString(precision), "1", + "" + Constants.MAX_NUMERIC_PRECISION); + } + + private TypeInfo parseTimeType() { + int scale = -1; + if (readIf(OPEN_PAREN)) { + scale = readNonNegativeInt(); + if (scale > ValueTime.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", + /* Folds to a constant */ "" + ValueTime.MAXIMUM_SCALE); } - } else { - column = parseColumnWithType(columnName); + read(CLOSE_PAREN); } - if (readIf("NOT")) { - read("NULL"); - column.setNullable(false); - } else if (readIf("NULL")) { - column.setNullable(true); - } else { - // domains may be defined as not nullable - column.setNullable(defaultNullable & column.isNullable()); - } - if (readIf("AS")) { - if (isIdentity) { - getSyntaxError(); - } - Expression expr = readExpression(); - column.setComputedExpression(expr); - } else if (readIf("DEFAULT")) { - Expression defaultExpression = readExpression(); - column.setDefaultExpression(session, defaultExpression); - } else if (readIf("GENERATED")) { - if (!readIf("ALWAYS")) { - read("BY"); - read("DEFAULT"); - } - read("AS"); - read("IDENTITY"); - long start = 1, increment = 1; - if (readIf("(")) { - read("START"); - readIf("WITH"); - start = readLong(); - readIf(","); - if (readIf("INCREMENT")) { - readIf("BY"); - increment = readLong(); - } - read(")"); - } - column.setPrimaryKey(true); - column.setAutoIncrement(true, start, increment); + int type = Value.TIME; + if (readIf(WITH)) { + read("TIME"); + read("ZONE"); + type = Value.TIME_TZ; + } else if (readIf("WITHOUT")) { + read("TIME"); + read("ZONE"); } - if (readIf("NOT")) { - read("NULL"); - column.setNullable(false); + return TypeInfo.getTypeInfo(type, -1L, scale, null); + } + + private TypeInfo parseTimestampType() { + int scale = -1; + if (readIf(OPEN_PAREN)) { + scale = readNonNegativeInt(); + // Allow non-standard TIMESTAMP(..., ...) syntax + if (readIf(COMMA)) { + scale = readNonNegativeInt(); + } + if (scale > ValueTimestamp.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", + /* Folds to a constant */ "" + ValueTimestamp.MAXIMUM_SCALE); + } + read(CLOSE_PAREN); + } + int type = Value.TIMESTAMP; + if (readIf(WITH)) { + read("TIME"); + read("ZONE"); + type = Value.TIMESTAMP_TZ; + } else if (readIf("WITHOUT")) { + read("TIME"); + read("ZONE"); + } + return TypeInfo.getTypeInfo(type, -1L, scale, null); + } + + private TypeInfo parseDateTimeType(boolean smallDateTime) { + int scale; + if (smallDateTime) { + scale = 0; } else { - readIf("NULL"); + scale = -1; + if (readIf(OPEN_PAREN)) { + scale = readNonNegativeInt(); + if (scale > ValueTimestamp.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", + /* folds to a constant */ "" + ValueTimestamp.MAXIMUM_SCALE); + } + read(CLOSE_PAREN); + } } - if (readIf("AUTO_INCREMENT") || readIf("BIGSERIAL") || readIf("SERIAL")) { - parseAutoIncrement(column); - if (readIf("NOT")) { - read("NULL"); + return TypeInfo.getTypeInfo(Value.TIMESTAMP, -1L, scale, null); + } + + private TypeInfo readIntervalQualifier() { + IntervalQualifier qualifier; + int precision = -1, scale = -1; + switch (currentTokenType) { + case YEAR: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); } - } else if (readIf("IDENTITY")) { - parseAutoIncrement(column); - column.setPrimaryKey(true); - if (readIf("NOT")) { - read("NULL"); + if (readIf(TO)) { + read(MONTH); + qualifier = IntervalQualifier.YEAR_TO_MONTH; + } else { + qualifier = IntervalQualifier.YEAR; + } + break; + case MONTH: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); } + qualifier = IntervalQualifier.MONTH; + break; + case DAY: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); + } + if (readIf(TO)) { + switch (currentTokenType) { + case HOUR: + read(); + qualifier = IntervalQualifier.DAY_TO_HOUR; + break; + case MINUTE: + read(); + qualifier = IntervalQualifier.DAY_TO_MINUTE; + break; + case SECOND: + read(); + if (readIf(OPEN_PAREN)) { + scale = readNonNegativeInt(); + read(CLOSE_PAREN); + } + qualifier = IntervalQualifier.DAY_TO_SECOND; + break; + default: + throw intervalDayError(); + } + } else { + qualifier = IntervalQualifier.DAY; + } + break; + case HOUR: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); + } + if (readIf(TO)) { + switch (currentTokenType) { + case MINUTE: + read(); + qualifier = IntervalQualifier.HOUR_TO_MINUTE; + break; + case SECOND: + read(); + if (readIf(OPEN_PAREN)) { + scale = readNonNegativeInt(); + read(CLOSE_PAREN); + } + qualifier = IntervalQualifier.HOUR_TO_SECOND; + break; + default: + throw intervalHourError(); + } + } else { + qualifier = IntervalQualifier.HOUR; + } + break; + case MINUTE: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + read(CLOSE_PAREN); + } + if (readIf(TO)) { + read(SECOND); + if (readIf(OPEN_PAREN)) { + scale = readNonNegativeInt(); + read(CLOSE_PAREN); + } + qualifier = IntervalQualifier.MINUTE_TO_SECOND; + } else { + qualifier = IntervalQualifier.MINUTE; + } + break; + case SECOND: + read(); + if (readIf(OPEN_PAREN)) { + precision = readNonNegativeInt(); + if (readIf(COMMA)) { + scale = readNonNegativeInt(); + } + read(CLOSE_PAREN); + } + qualifier = IntervalQualifier.SECOND; + break; + default: + return null; } - if (readIf("NULL_TO_DEFAULT")) { - column.setConvertNullToDefault(true); + if (precision >= 0) { + if (precision == 0 || precision > ValueInterval.MAXIMUM_PRECISION) { + throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Integer.toString(precision), "1", + /* Folds to a constant */ "" + ValueInterval.MAXIMUM_PRECISION); + } } - if (readIf("SEQUENCE")) { - Sequence sequence = readSequence(); - column.setSequence(sequence); + if (scale >= 0) { + if (scale > ValueInterval.MAXIMUM_SCALE) { + throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0", + /* Folds to a constant */ "" + ValueInterval.MAXIMUM_SCALE); + } } - if (readIf("SELECTIVITY")) { - int value = readPositiveInt(); - column.setSelectivity(value); + return TypeInfo.getTypeInfo(qualifier.ordinal() + Value.INTERVAL_YEAR, precision, scale, null); + } + + private DbException intervalQualifierError() { + if (expectedList != null) { + addMultipleExpected(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND); } - String comment = readCommentIf(); - if (comment != null) { - column.setComment(comment); + return getSyntaxError(); + } + + private DbException intervalDayError() { + if (expectedList != null) { + addMultipleExpected(HOUR, MINUTE, SECOND); } - return column; + return getSyntaxError(); } - private void parseAutoIncrement(Column column) { - long start = 1, increment = 1; - if (readIf("(")) { - start = readLong(); - if (readIf(",")) { - increment = readLong(); - } - read(")"); + private DbException intervalHourError() { + if (expectedList != null) { + addMultipleExpected(MINUTE, SECOND); } - column.setAutoIncrement(true, start, increment); + return getSyntaxError(); } - private String readCommentIf() { - if (readIf("COMMENT")) { - readIf("IS"); - return readString(); + private TypeInfo parseArrayType(TypeInfo componentType) { + int precision = -1; + if (readIf(OPEN_BRACKET)) { + // Maximum cardinality may be zero + precision = readNonNegativeInt(); + if (precision > Constants.MAX_ARRAY_CARDINALITY) { + throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Integer.toString(precision), "0", + /* Folds to a constant */ "" + Constants.MAX_ARRAY_CARDINALITY); + } + read(CLOSE_BRACKET); } - return null; + return TypeInfo.getTypeInfo(Value.ARRAY, precision, -1, componentType); } - private Column parseColumnWithType(String columnName) { - String original = currentToken; - boolean regular = false; - if (readIf("LONG")) { - if (readIf("RAW")) { - original += " RAW"; + private TypeInfo parseEnumType() { + read(OPEN_PAREN); + ArrayList enumeratorList = new ArrayList<>(); + do { + enumeratorList.add(readString()); + } while (readIfMore()); + return TypeInfo.getTypeInfo(Value.ENUM, -1L, -1, new ExtTypeInfoEnum(enumeratorList.toArray(new String[0]))); + } + + private TypeInfo parseGeometryType() { + ExtTypeInfoGeometry extTypeInfo; + if (readIf(OPEN_PAREN)) { + int type = 0; + if (currentTokenType != IDENTIFIER || token.isQuoted()) { + throw getSyntaxError(); } - } else if (readIf("DOUBLE")) { - if (readIf("PRECISION")) { - original += " PRECISION"; + if (!readIf("GEOMETRY")) { + try { + type = EWKTUtils.parseGeometryType(currentToken); + read(); + if (type / 1_000 == 0 && currentTokenType == IDENTIFIER && !token.isQuoted()) { + type += EWKTUtils.parseDimensionSystem(currentToken) * 1_000; + read(); + } + } catch (IllegalArgumentException ex) { + throw getSyntaxError(); + } } - } else if (readIf("CHARACTER")) { - if (readIf("VARYING")) { - original += " VARYING"; + Integer srid = null; + if (readIf(COMMA)) { + srid = readInt(); } + read(CLOSE_PAREN); + extTypeInfo = new ExtTypeInfoGeometry(type, srid); } else { - regular = true; + extTypeInfo = null; } - long precision = -1; - int displaySize = -1; - int scale = -1; - String comment = null; - Column templateColumn = null; - DataType dataType; - if (!identifiersToUpper) { - original = StringUtils.toUpperEnglish(original); - } - UserDataType userDataType = database.findUserDataType(original); - if (userDataType != null) { - templateColumn = userDataType.getColumn(); - dataType = DataType.getDataType(templateColumn.getType()); - comment = templateColumn.getComment(); - original = templateColumn.getOriginalSQL(); - precision = templateColumn.getPrecision(); - displaySize = templateColumn.getDisplaySize(); - scale = templateColumn.getScale(); - } else { - dataType = DataType.getTypeByName(original); - if (dataType == null) { - throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, - currentToken); + return TypeInfo.getTypeInfo(Value.GEOMETRY, -1L, -1, extTypeInfo); + } + + private TypeInfo parseRowType() { + read(OPEN_PAREN); + LinkedHashMap fields = new LinkedHashMap<>(); + do { + String name = readIdentifier(); + if (fields.putIfAbsent(name, parseDataType()) != null) { + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, name); } - } - if (database.getIgnoreCase() && dataType.type == Value.STRING && - !equalsToken("VARCHAR_CASESENSITIVE", original)) { - original = "VARCHAR_IGNORECASE"; - dataType = DataType.getTypeByName(original); - } - if (regular) { + } while (readIfMore()); + return TypeInfo.getTypeInfo(Value.ROW, -1L, -1, new ExtTypeInfoRow(fields)); + } + + private long readPrecision(int valueType) { + long p = readPositiveLong(); + if (currentTokenType != IDENTIFIER || token.isQuoted()) { + return p; + } + if ((valueType == Value.BLOB || valueType == Value.CLOB) && currentToken.length() == 1) { + long mul; + /* + * Convert a-z to A-Z. This method is safe, because only A-Z + * characters are considered below. + */ + switch (currentToken.charAt(0) & 0xffdf) { + case 'K': + mul = 1L << 10; + break; + case 'M': + mul = 1L << 20; + break; + case 'G': + mul = 1L << 30; + break; + case 'T': + mul = 1L << 40; + break; + case 'P': + mul = 1L << 50; + break; + default: + throw getSyntaxError(); + } + if (p > Long.MAX_VALUE / mul) { + throw DbException.getInvalidValueException("precision", p + currentToken); + } + p *= mul; read(); + if (currentTokenType != IDENTIFIER || token.isQuoted()) { + return p; + } } - precision = precision == -1 ? dataType.defaultPrecision : precision; - displaySize = displaySize == -1 ? dataType.defaultDisplaySize - : displaySize; - scale = scale == -1 ? dataType.defaultScale : scale; - if (dataType.supportsPrecision || dataType.supportsScale) { - if (readIf("(")) { - if (!readIf("MAX")) { - long p = readLong(); - if (readIf("K")) { - p *= 1024; - } else if (readIf("M")) { - p *= 1024 * 1024; - } else if (readIf("G")) { - p *= 1024 * 1024 * 1024; - } - if (p > Long.MAX_VALUE) { - p = Long.MAX_VALUE; - } - original += "(" + p; - // Oracle syntax - readIf("CHAR"); - if (dataType.supportsScale) { - if (readIf(",")) { - scale = readInt(); - original += ", " + scale; - } else { - // special case: TIMESTAMP(5) actually means - // TIMESTAMP(23, 5) - if (dataType.type == Value.TIMESTAMP) { - scale = MathUtils.convertLongToInt(p); - p = precision; - } else { - scale = 0; - } - } - } - precision = p; - displaySize = MathUtils.convertLongToInt(precision); - original += ")"; - } - read(")"); - } - } else if (readIf("(")) { - // Support for MySQL: INT(11), MEDIUMINT(8) and so on. - // Just ignore the precision. - readPositiveInt(); - read(")"); - } - if (readIf("FOR")) { - read("BIT"); - read("DATA"); - if (dataType.type == Value.STRING) { - dataType = DataType.getTypeByName("BINARY"); - } - } - // MySQL compatibility - readIf("UNSIGNED"); - int type = dataType.type; - if (scale > precision) { - throw DbException.get(ErrorCode.INVALID_VALUE_2, - Integer.toString(scale), "scale (precision = " + precision + - ")"); - } - Column column = new Column(columnName, type, precision, scale, - displaySize); - if (templateColumn != null) { - column.setNullable(templateColumn.isNullable()); - column.setDefaultExpression(session, - templateColumn.getDefaultExpression()); - int selectivity = templateColumn.getSelectivity(); - if (selectivity != Constants.SELECTIVITY_DEFAULT) { - column.setSelectivity(selectivity); - } - Expression checkConstraint = templateColumn.getCheckConstraint( - session, columnName); - column.addCheckConstraint(session, checkConstraint); - } - column.setComment(comment); - column.setOriginalSQL(original); - return column; + switch (valueType) { + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.CLOB: + case Value.CHAR: + if (!readIf("CHARACTERS") && !readIf("OCTETS")) { + if (database.getMode().charAndByteLengthUnits && !readIf("CHAR")) { + readIf("BYTE"); + } + } + } + return p; } private Prepared parseCreate() { boolean orReplace = false; - if (readIf("OR")) { + if (readIf(OR)) { read("REPLACE"); orReplace = true; } @@ -4104,7 +6728,7 @@ private Prepared parseCreate() { return parseCreateFunctionAlias(force); } else if (readIf("SEQUENCE")) { return parseCreateSequence(); - } else if (readIf("USER")) { + } else if (readIf(USER)) { return parseCreateUser(); } else if (readIf("TRIGGER")) { return parseCreateTrigger(force); @@ -4114,12 +6738,8 @@ private Prepared parseCreate() { return parseCreateSchema(); } else if (readIf("CONSTANT")) { return parseCreateConstant(); - } else if (readIf("DOMAIN")) { - return parseCreateUserDataType(); - } else if (readIf("TYPE")) { - return parseCreateUserDataType(); - } else if (readIf("DATATYPE")) { - return parseCreateUserDataType(); + } else if (readIf("DOMAIN") || readIf("TYPE") || readIf("DATATYPE")) { + return parseCreateDomain(); } else if (readIf("AGGREGATE")) { return parseCreateAggregate(force); } else if (readIf("LINKED")) { @@ -4137,77 +6757,114 @@ private Prepared parseCreate() { if (readIf("LINKED")) { return parseCreateLinkedTable(true, false, force); } - read("TABLE"); + read(TABLE); return parseCreateTable(true, false, cached); } else if (readIf("GLOBAL")) { read("TEMPORARY"); if (readIf("LINKED")) { return parseCreateLinkedTable(true, true, force); } - read("TABLE"); + read(TABLE); return parseCreateTable(true, true, cached); } else if (readIf("TEMP") || readIf("TEMPORARY")) { if (readIf("LINKED")) { return parseCreateLinkedTable(true, true, force); } - read("TABLE"); + read(TABLE); return parseCreateTable(true, true, cached); - } else if (readIf("TABLE")) { + } else if (readIf(TABLE)) { if (!cached && !memory) { cached = database.getDefaultTableType() == Table.TYPE_CACHED; } return parseCreateTable(false, false, cached); + } else if (readIf("SYNONYM")) { + return parseCreateSynonym(orReplace); } else { boolean hash = false, primaryKey = false; boolean unique = false, spatial = false; String indexName = null; Schema oldSchema = null; boolean ifNotExists = false; - if (readIf("PRIMARY")) { - read("KEY"); + if (session.isQuirksMode() && readIf(PRIMARY)) { + read(KEY); if (readIf("HASH")) { hash = true; } primaryKey = true; - if (!isToken("ON")) { - ifNotExists = readIfNoExists(); + if (!isToken(ON)) { + ifNotExists = readIfNotExists(); indexName = readIdentifierWithSchema(null); oldSchema = getSchema(); } } else { - if (readIf("UNIQUE")) { + if (readIf(UNIQUE)) { unique = true; } if (readIf("HASH")) { hash = true; - } - if (readIf("SPATIAL")) { + } else if (!unique && readIf("SPATIAL")) { spatial = true; } - if (readIf("INDEX")) { - if (!isToken("ON")) { - ifNotExists = readIfNoExists(); - indexName = readIdentifierWithSchema(null); - oldSchema = getSchema(); - } - } else { - throw getSyntaxError(); + read("INDEX"); + if (!isToken(ON)) { + ifNotExists = readIfNotExists(); + indexName = readIdentifierWithSchema(null); + oldSchema = getSchema(); } } - read("ON"); + read(ON); String tableName = readIdentifierWithSchema(); checkSchema(oldSchema); + String comment = readCommentIf(); + if (!readIf(OPEN_PAREN)) { + // PostgreSQL compatibility + if (hash || spatial) { + throw getSyntaxError(); + } + read(USING); + if (readIf("BTREE")) { + // default + } else if (readIf("HASH")) { + hash = true; + } else { + read("RTREE"); + spatial = true; + } + read(OPEN_PAREN); + } CreateIndex command = new CreateIndex(session, getSchema()); command.setIfNotExists(ifNotExists); - command.setHash(hash); - command.setSpatial(spatial); command.setPrimaryKey(primaryKey); command.setTableName(tableName); - command.setUnique(unique); + command.setHash(hash); + command.setSpatial(spatial); command.setIndexName(indexName); - command.setComment(readCommentIf()); - read("("); - command.setIndexColumns(parseIndexColumnList()); + command.setComment(comment); + IndexColumn[] columns; + int uniqueColumnCount = 0; + if (spatial) { + columns = new IndexColumn[] { new IndexColumn(readIdentifier()) }; + if (unique) { + uniqueColumnCount = 1; + } + read(CLOSE_PAREN); + } else { + columns = parseIndexColumnList(); + if (unique) { + uniqueColumnCount = columns.length; + if (readIf("INCLUDE")) { + read(OPEN_PAREN); + IndexColumn[] columnsToInclude = parseIndexColumnList(); + int nonUniqueCount = columnsToInclude.length; + columns = Arrays.copyOf(columns, uniqueColumnCount + nonUniqueCount); + System.arraycopy(columnsToInclude, 0, columns, uniqueColumnCount, nonUniqueCount); + } + } else if (primaryKey) { + uniqueColumnCount = columns.length; + } + } + command.setIndexColumns(columns); + command.setUniqueColumnCount(uniqueColumnCount); return command; } } @@ -4216,7 +6873,7 @@ private Prepared parseCreate() { * @return true if we expect to see a TABLE clause */ private boolean addRoleOrRight(GrantRevoke command) { - if (readIf("SELECT")) { + if (readIf(SELECT)) { command.addRight(Right.SELECT); return true; } else if (readIf("DELETE")) { @@ -4228,15 +6885,6 @@ private boolean addRoleOrRight(GrantRevoke command) { } else if (readIf("UPDATE")) { command.addRight(Right.UPDATE); return true; - } else if (readIf("ALL")) { - command.addRight(Right.ALL); - return true; - } else if (readIf("ALTER")) { - read("ANY"); - read("SCHEMA"); - command.addRight(Right.ALTER_ANY_SCHEMA); - command.addTable(null); - return false; } else if (readIf("CONNECT")) { // ignore this right return true; @@ -4244,7 +6892,7 @@ private boolean addRoleOrRight(GrantRevoke command) { // ignore this right return true; } else { - command.addRoleName(readUniqueIdentifier()); + command.addRoleName(readIdentifier()); return false; } } @@ -4252,217 +6900,181 @@ private boolean addRoleOrRight(GrantRevoke command) { private GrantRevoke parseGrantRevoke(int operationType) { GrantRevoke command = new GrantRevoke(session); command.setOperationType(operationType); - boolean tableClauseExpected = addRoleOrRight(command); - while (readIf(",")) { - addRoleOrRight(command); - if (command.isRightMode() && command.isRoleMode()) { - throw DbException - .get(ErrorCode.ROLES_AND_RIGHT_CANNOT_BE_MIXED); + boolean tableClauseExpected; + if (readIf(ALL)) { + readIf("PRIVILEGES"); + command.addRight(Right.ALL); + tableClauseExpected = true; + } else if (readIf("ALTER")) { + read(ANY); + read("SCHEMA"); + command.addRight(Right.ALTER_ANY_SCHEMA); + command.addTable(null); + tableClauseExpected = false; + } else { + tableClauseExpected = addRoleOrRight(command); + while (readIf(COMMA)) { + if (addRoleOrRight(command) != tableClauseExpected) { + throw DbException.get(ErrorCode.ROLES_AND_RIGHT_CANNOT_BE_MIXED); + } } } if (tableClauseExpected) { - if (readIf("ON")) { - do { - Table table = readTableOrView(); - command.addTable(table); - } while (readIf(",")); + if (readIf(ON)) { + if (readIf("SCHEMA")) { + command.setSchema(database.getSchema(readIdentifier())); + } else { + readIf(TABLE); + do { + Table table = readTableOrView(); + command.addTable(table); + } while (readIf(COMMA)); + } } } - if (operationType == CommandInterface.GRANT) { - read("TO"); - } else { - read("FROM"); - } - command.setGranteeName(readUniqueIdentifier()); - return command; - } - - private Select parseValues() { - Select command = new Select(session); - currentSelect = command; - TableFilter filter = parseValuesTable(); - ArrayList list = New.arrayList(); - list.add(new Wildcard(null, null)); - command.setExpressions(list); - command.addTableFilter(filter, true); - command.init(); + read(operationType == CommandInterface.GRANT ? TO : FROM); + command.setGranteeName(readIdentifier()); return command; } - private TableFilter parseValuesTable() { - Schema mainSchema = database.getSchema(Constants.SCHEMA_MAIN); - TableFunction tf = (TableFunction) Function.getFunction(database, - "TABLE"); - ArrayList columns = New.arrayList(); - ArrayList> rows = New.arrayList(); - do { - int i = 0; - ArrayList row = New.arrayList(); - boolean multiColumn = readIf("("); - do { - Expression expr = readExpression(); - expr = expr.optimize(session); - int type = expr.getType(); - long prec; - int scale, displaySize; - Column column; - String columnName = "C" + (i + 1); - if (rows.size() == 0) { - if (type == Value.UNKNOWN) { - type = Value.STRING; - } - DataType dt = DataType.getDataType(type); - prec = dt.defaultPrecision; - scale = dt.defaultScale; - displaySize = dt.defaultDisplaySize; - column = new Column(columnName, type, prec, scale, - displaySize); - columns.add(column); - } - prec = expr.getPrecision(); - scale = expr.getScale(); - displaySize = expr.getDisplaySize(); - if (i >= columns.size()) { - throw DbException - .get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); - } - Column c = columns.get(i); - type = Value.getHigherOrder(c.getType(), type); - prec = Math.max(c.getPrecision(), prec); - scale = Math.max(c.getScale(), scale); - displaySize = Math.max(c.getDisplaySize(), displaySize); - column = new Column(columnName, type, prec, scale, displaySize); - columns.set(i, column); - row.add(expr); - i++; - } while (multiColumn && readIf(",")); - if (multiColumn) { - read(")"); - } - rows.add(row); - } while (readIf(",")); - int columnCount = columns.size(); - int rowCount = rows.size(); - for (int i = 0; i < rowCount; i++) { - if (rows.get(i).size() != columnCount) { + private TableValueConstructor parseValues() { + ArrayList> rows = Utils.newSmallArrayList(); + ArrayList row = parseValuesRow(Utils.newSmallArrayList()); + rows.add(row); + int columnCount = row.size(); + while (readIf(COMMA)) { + row = parseValuesRow(new ArrayList<>(columnCount)); + if (row.size() != columnCount) { throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); } + rows.add(row); } - for (int i = 0; i < columnCount; i++) { - Column c = columns.get(i); - if (c.getType() == Value.UNKNOWN) { - c = new Column(c.getName(), Value.STRING, 0, 0, 0); - columns.set(i, c); - } - Expression[] array = new Expression[rowCount]; - for (int j = 0; j < rowCount; j++) { - array[j] = rows.get(j).get(i); - } - ExpressionList list = new ExpressionList(array); - tf.setParameter(i, list); + return new TableValueConstructor(session, rows); + } + + private ArrayList parseValuesRow(ArrayList row) { + if (readIf(ROW)) { + read(OPEN_PAREN); + } else if (!readIf(OPEN_PAREN)) { + row.add(readExpression()); + return row; } - tf.setColumns(columns); - tf.doneWithParameters(); - Table table = new FunctionTable(mainSchema, session, tf, tf); - TableFilter filter = new TableFilter(session, table, null, - rightsChecked, currentSelect); - return filter; + do { + row.add(readExpression()); + } while (readIfMore()); + return row; } private Call parseCall() { Call command = new Call(session); currentPrepared = command; - command.setExpression(readExpression()); + int index = tokenIndex; + boolean canBeFunction; + switch (currentTokenType) { + case IDENTIFIER: + canBeFunction = true; + break; + case TABLE: + read(); + read(OPEN_PAREN); + command.setTableFunction(readTableFunction(ArrayTableFunction.TABLE)); + return command; + default: + canBeFunction = false; + } + try { + command.setExpression(readExpression()); + } catch (DbException e) { + if (canBeFunction && e.getErrorCode() == ErrorCode.FUNCTION_NOT_FOUND_1) { + setTokenIndex(index); + String schemaName = null, name = readIdentifier(); + if (readIf(DOT)) { + schemaName = name; + name = readIdentifier(); + if (readIf(DOT)) { + checkDatabaseName(schemaName); + schemaName = name; + name = readIdentifier(); + } + } + read(OPEN_PAREN); + Schema schema = schemaName != null ? database.getSchema(schemaName) : null; + command.setTableFunction(readTableFunction(name, schema)); + return command; + } + throw e; + } return command; } private CreateRole parseCreateRole() { CreateRole command = new CreateRole(session); - command.setIfNotExists(readIfNoExists()); - command.setRoleName(readUniqueIdentifier()); + command.setIfNotExists(readIfNotExists()); + command.setRoleName(readIdentifier()); return command; } private CreateSchema parseCreateSchema() { CreateSchema command = new CreateSchema(session); - command.setIfNotExists(readIfNoExists()); - command.setSchemaName(readUniqueIdentifier()); - if (readIf("AUTHORIZATION")) { - command.setAuthorization(readUniqueIdentifier()); + command.setIfNotExists(readIfNotExists()); + String authorization; + if (readIf(AUTHORIZATION)) { + authorization = readIdentifier(); + command.setSchemaName(authorization); + command.setAuthorization(authorization); } else { - command.setAuthorization(session.getUser().getName()); + command.setSchemaName(readIdentifier()); + if (readIf(AUTHORIZATION)) { + authorization = readIdentifier(); + } else { + authorization = session.getUser().getName(); + } + } + command.setAuthorization(authorization); + if (readIf(WITH)) { + command.setTableEngineParams(readTableEngineParams()); } return command; } + private ArrayList readTableEngineParams() { + ArrayList tableEngineParams = Utils.newSmallArrayList(); + do { + tableEngineParams.add(readIdentifier()); + } while (readIf(COMMA)); + return tableEngineParams; + } + private CreateSequence parseCreateSequence() { - boolean ifNotExists = readIfNoExists(); + boolean ifNotExists = readIfNotExists(); String sequenceName = readIdentifierWithSchema(); CreateSequence command = new CreateSequence(session, getSchema()); command.setIfNotExists(ifNotExists); command.setSequenceName(sequenceName); - while (true) { - if (readIf("START")) { - readIf("WITH"); - command.setStartWith(readExpression()); - } else if (readIf("INCREMENT")) { - readIf("BY"); - command.setIncrement(readExpression()); - } else if (readIf("MINVALUE")) { - command.setMinValue(readExpression()); - } else if (readIf("NOMINVALUE")) { - command.setMinValue(null); - } else if (readIf("MAXVALUE")) { - command.setMaxValue(readExpression()); - } else if (readIf("NOMAXVALUE")) { - command.setMaxValue(null); - } else if (readIf("CYCLE")) { - command.setCycle(true); - } else if (readIf("NOCYCLE")) { - command.setCycle(false); - } else if (readIf("NO")) { - if (readIf("MINVALUE")) { - command.setMinValue(null); - } else if (readIf("MAXVALUE")) { - command.setMaxValue(null); - } else if (readIf("CYCLE")) { - command.setCycle(false); - } else if (readIf("CACHE")) { - command.setCacheSize(ValueExpression.get(ValueLong.get(1))); - } else { - break; - } - } else if (readIf("CACHE")) { - command.setCacheSize(readExpression()); - } else if (readIf("NOCACHE")) { - command.setCacheSize(ValueExpression.get(ValueLong.get(1))); - } else if (readIf("BELONGS_TO_TABLE")) { - command.setBelongsToTable(true); - } else { - break; - } - } + SequenceOptions options = new SequenceOptions(); + parseSequenceOptions(options, command, true, false); + command.setOptions(options); return command; } - private boolean readIfNoExists() { - if (readIf("IF")) { - read("NOT"); - read("EXISTS"); + private boolean readIfNotExists() { + if (readIf(IF)) { + read(NOT); + read(EXISTS); return true; } return false; } private CreateConstant parseCreateConstant() { - boolean ifNotExists = readIfNoExists(); + boolean ifNotExists = readIfNotExists(); String constantName = readIdentifierWithSchema(); Schema schema = getSchema(); if (isKeyword(constantName)) { throw DbException.get(ErrorCode.CONSTANT_ALREADY_EXISTS_1, constantName); } - read("VALUE"); + read(VALUE); Expression expr = readExpression(); CreateConstant command = new CreateConstant(session, schema); command.setConstantName(constantName); @@ -4472,41 +7084,77 @@ private CreateConstant parseCreateConstant() { } private CreateAggregate parseCreateAggregate(boolean force) { - boolean ifNotExists = readIfNoExists(); - CreateAggregate command = new CreateAggregate(session); - command.setForce(force); - String name = readIdentifierWithSchema(); - if (isKeyword(name) || Function.getFunction(database, name) != null || - getAggregateType(name) >= 0) { - throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, - name); + boolean ifNotExists = readIfNotExists(); + String name = readIdentifierWithSchema(), upperName; + if (isKeyword(name) || BuiltinFunctions.isBuiltinFunction(database, upperName = upperName(name)) + || Aggregate.getAggregateType(upperName) != null) { + throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, name); } + CreateAggregate command = new CreateAggregate(session, getSchema()); + command.setForce(force); command.setName(name); - command.setSchema(getSchema()); command.setIfNotExists(ifNotExists); - read("FOR"); - command.setJavaClassMethod(readUniqueIdentifier()); + read(FOR); + command.setJavaClassMethod(readStringOrIdentifier()); return command; } - private CreateUserDataType parseCreateUserDataType() { - boolean ifNotExists = readIfNoExists(); - CreateUserDataType command = new CreateUserDataType(session); - command.setTypeName(readUniqueIdentifier()); - read("AS"); - Column col = parseColumnForTable("VALUE", true); - if (readIf("CHECK")) { - Expression expr = readExpression(); - col.addCheckConstraint(session, expr); - } - col.rename(null); - command.setColumn(col); + private CreateDomain parseCreateDomain() { + boolean ifNotExists = readIfNotExists(); + String domainName = readIdentifierWithSchema(); + Schema schema = getSchema(); + CreateDomain command = new CreateDomain(session, schema); command.setIfNotExists(ifNotExists); + command.setTypeName(domainName); + readIf(AS); + TypeInfo dataType = readIfDataType(); + if (dataType != null) { + command.setDataType(dataType); + } else { + String parentDomainName = readIdentifierWithSchema(); + command.setParentDomain(getSchema().getDomain(parentDomainName)); + } + if (readIf(DEFAULT)) { + command.setDefaultExpression(readExpression()); + } + if (readIf(ON)) { + read("UPDATE"); + command.setOnUpdateExpression(readExpression()); + } + // Compatibility with 1.4.200 and older versions + if (readIf("SELECTIVITY")) { + readNonNegativeInt(); + } + String comment = readCommentIf(); + if (comment != null) { + command.setComment(comment); + } + for (;;) { + String constraintName; + if (readIf(CONSTRAINT)) { + constraintName = readIdentifier(); + read(CHECK); + } else if (readIf(CHECK)) { + constraintName = null; + } else { + break; + } + AlterDomainAddConstraint constraint = new AlterDomainAddConstraint(session, schema, ifNotExists); + constraint.setConstraintName(constraintName); + constraint.setDomainName(domainName); + parseDomainConstraint = true; + try { + constraint.setCheckExpression(readExpression()); + } finally { + parseDomainConstraint = false; + } + command.addConstraintCommand(constraint); + } return command; } private CreateTrigger parseCreateTrigger(boolean force) { - boolean ifNotExists = readIfNoExists(); + boolean ifNotExists = readIfNotExists(); String triggerName = readIdentifierWithSchema(null); Schema schema = getSchema(); boolean insteadOf, isBefore; @@ -4524,6 +7172,7 @@ private CreateTrigger parseCreateTrigger(boolean force) { } int typeMask = 0; boolean onRollback = false; + boolean allowOr = database.getMode().getEnum() == ModeEnum.PostgreSQL; do { if (readIf("INSERT")) { typeMask |= Trigger.INSERT; @@ -4531,15 +7180,15 @@ private CreateTrigger parseCreateTrigger(boolean force) { typeMask |= Trigger.UPDATE; } else if (readIf("DELETE")) { typeMask |= Trigger.DELETE; - } else if (readIf("SELECT")) { + } else if (readIf(SELECT)) { typeMask |= Trigger.SELECT; } else if (readIf("ROLLBACK")) { onRollback = true; } else { throw getSyntaxError(); } - } while (readIf(",")); - read("ON"); + } while (readIf(COMMA) || allowOr && readIf(OR)); + read(ON); String tableName = readIdentifierWithSchema(); checkSchema(schema); CreateTrigger command = new CreateTrigger(session, getSchema()); @@ -4551,30 +7200,31 @@ private CreateTrigger parseCreateTrigger(boolean force) { command.setOnRollback(onRollback); command.setTypeMask(typeMask); command.setTableName(tableName); - if (readIf("FOR")) { + if (readIf(FOR)) { read("EACH"); - read("ROW"); - command.setRowBased(true); - } else { - command.setRowBased(false); + if (readIf(ROW)) { + command.setRowBased(true); + } else { + read("STATEMENT"); + } } if (readIf("QUEUE")) { - command.setQueueSize(readPositiveInt()); + command.setQueueSize(readNonNegativeInt()); } command.setNoWait(readIf("NOWAIT")); - if (readIf("AS")) { + if (readIf(AS)) { command.setTriggerSource(readString()); } else { read("CALL"); - command.setTriggerClassName(readUniqueIdentifier()); + command.setTriggerClassName(readStringOrIdentifier()); } return command; } private CreateUser parseCreateUser() { CreateUser command = new CreateUser(session); - command.setIfNotExists(readIfNoExists()); - command.setUserName(readUniqueIdentifier()); + command.setIfNotExists(readIfNotExists()); + command.setUserName(readIdentifier()); command.setComment(readCommentIf()); if (readIf("PASSWORD")) { command.setPassword(readExpression()); @@ -4585,8 +7235,7 @@ private CreateUser parseCreateUser() { } else if (readIf("IDENTIFIED")) { read("BY"); // uppercase if not quoted - command.setPassword(ValueExpression.get(ValueString - .get(readColumnIdentifier()))); + command.setPassword(ValueExpression.get(ValueVarchar.get(readIdentifier()))); } else { throw getSyntaxError(); } @@ -4597,90 +7246,261 @@ private CreateUser parseCreateUser() { } private CreateFunctionAlias parseCreateFunctionAlias(boolean force) { - boolean ifNotExists = readIfNoExists(); - String aliasName = readIdentifierWithSchema(); - if (isKeyword(aliasName) || - Function.getFunction(database, aliasName) != null || - getAggregateType(aliasName) >= 0) { - throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, - aliasName); - } - CreateFunctionAlias command = new CreateFunctionAlias(session, - getSchema()); + boolean ifNotExists = readIfNotExists(); + String aliasName; + if (currentTokenType != IDENTIFIER) { + aliasName = currentToken; + read(); + schemaName = session.getCurrentSchemaName(); + } else { + aliasName = readIdentifierWithSchema(); + } + String upperName = upperName(aliasName); + if (isReservedFunctionName(upperName)) { + throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, aliasName); + } + CreateFunctionAlias command = new CreateFunctionAlias(session, getSchema()); command.setForce(force); command.setAliasName(aliasName); command.setIfNotExists(ifNotExists); command.setDeterministic(readIf("DETERMINISTIC")); - command.setBufferResultSetToLocalTemp(!readIf("NOBUFFER")); - if (readIf("AS")) { + // Compatibility with old versions of H2 + readIf("NOBUFFER"); + if (readIf(AS)) { command.setSource(readString()); } else { - read("FOR"); - command.setJavaClassMethod(readUniqueIdentifier()); + read(FOR); + command.setJavaClassMethod(readStringOrIdentifier()); } return command; } - private Query parseWith() { + private String readStringOrIdentifier() { + return currentTokenType != IDENTIFIER ? readString() : readIdentifier(); + } + + private boolean isReservedFunctionName(String name) { + int tokenType = ParserUtil.getTokenType(name, false, false); + if (tokenType != ParserUtil.IDENTIFIER) { + if (database.isAllowBuiltinAliasOverride()) { + switch (tokenType) { + case CURRENT_DATE: + case CURRENT_TIME: + case CURRENT_TIMESTAMP: + case DAY: + case HOUR: + case LOCALTIME: + case LOCALTIMESTAMP: + case MINUTE: + case MONTH: + case SECOND: + case YEAR: + return false; + } + } + return true; + } + return Aggregate.getAggregateType(name) != null + || BuiltinFunctions.isBuiltinFunction(database, name) && !database.isAllowBuiltinAliasOverride(); + } + + private Prepared parseWith() { + List viewsCreated = new ArrayList<>(); + try { + return parseWith1(viewsCreated); + } catch (Throwable t) { + CommandContainer.clearCTE(session, viewsCreated); + throw t; + } + } + + private Prepared parseWith1(List viewsCreated) { readIf("RECURSIVE"); - String tempViewName = readIdentifierWithSchema(); + + // This WITH statement is not a temporary view - it is part of a persistent view + // as in CREATE VIEW abc AS WITH my_cte - this auto detects that condition. + final boolean isTemporary = !session.isParsingCreateView(); + + do { + viewsCreated.add(parseSingleCommonTableExpression(isTemporary)); + } while (readIf(COMMA)); + + Prepared p; + // Reverse the order of constructed CTE views - as the destruction order + // (since later created view may depend on previously created views - + // we preserve that dependency order in the destruction sequence ) + // used in setCteCleanups. + Collections.reverse(viewsCreated); + + int start = tokenIndex; + if (isQueryQuick()) { + p = parseWithQuery(); + } else if (readIf("INSERT")) { + p = parseInsert(start); + p.setPrepareAlways(true); + } else if (readIf("UPDATE")) { + p = parseUpdate(start); + p.setPrepareAlways(true); + } else if (readIf("MERGE")) { + p = parseMerge(start); + p.setPrepareAlways(true); + } else if (readIf("DELETE")) { + p = parseDelete(start); + p.setPrepareAlways(true); + } else if (readIf("CREATE")) { + if (!isToken(TABLE)) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_1, + WITH_STATEMENT_SUPPORTS_LIMITED_SUB_STATEMENTS); + } + p = parseCreate(); + p.setPrepareAlways(true); + } else { + throw DbException.get(ErrorCode.SYNTAX_ERROR_1, + WITH_STATEMENT_SUPPORTS_LIMITED_SUB_STATEMENTS); + } + + // Clean up temporary views starting with last to first (in case of + // dependencies) - but only if they are not persistent. + if (isTemporary) { + if (cteCleanups == null) { + cteCleanups = new ArrayList<>(viewsCreated.size()); + } + cteCleanups.addAll(viewsCreated); + } + return p; + } + + private Prepared parseWithQuery() { + Query query = parseQueryExpressionBodyAndEndOfQuery(); + query.setPrepareAlways(true); + query.setNeverLazy(true); + return query; + } + + private TableView parseSingleCommonTableExpression(boolean isTemporary) { + String cteViewName = readIdentifierWithSchema(); Schema schema = getSchema(); - Table recursiveTable; - read("("); - ArrayList columns = New.arrayList(); - String[] cols = parseColumnList(); - for (String c : cols) { - columns.add(new Column(c, Value.STRING)); - } - Table old = session.findLocalTempTable(tempViewName); - if (old != null) { - if (!(old instanceof TableView)) { + ArrayList columns = Utils.newSmallArrayList(); + String[] cols = null; + + // column names are now optional - they can be inferred from the named + // query, if not supplied by user + if (readIf(OPEN_PAREN)) { + cols = parseColumnList(); + for (String c : cols) { + // we don't really know the type of the column, so STRING will + // have to do, UNKNOWN does not work here + columns.add(new Column(c, TypeInfo.TYPE_VARCHAR)); + } + } + + Table oldViewFound; + if (!isTemporary) { + oldViewFound = getSchema().findTableOrView(session, cteViewName); + } else { + oldViewFound = session.findLocalTempTable(cteViewName); + } + // this persistent check conflicts with check 10 lines down + if (oldViewFound != null) { + if (!(oldViewFound instanceof TableView)) { throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, - tempViewName); + cteViewName); } - TableView tv = (TableView) old; + TableView tv = (TableView) oldViewFound; if (!tv.isTableExpression()) { throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, - tempViewName); - } - session.removeLocalTempTable(old); - } - CreateTableData data = new CreateTableData(); - data.id = database.allocateObjectId(); - data.columns = columns; - data.tableName = tempViewName; - data.temporary = true; - data.persistData = true; - data.persistIndexes = false; - data.create = true; - data.session = session; - recursiveTable = schema.createTable(data); - session.addLocalTempTable(recursiveTable); - String querySQL; + cteViewName); + } + if (!isTemporary) { + oldViewFound.lock(session, Table.EXCLUSIVE_LOCK); + database.removeSchemaObject(session, oldViewFound); + + } else { + session.removeLocalTempTable(oldViewFound); + } + } + /* + * This table is created as a workaround because recursive table + * expressions need to reference something that look like themselves to + * work (its removed after creation in this method). Only create table + * data and table if we don't have a working CTE already. + */ + Table recursiveTable = TableView.createShadowTableForRecursiveTableExpression( + isTemporary, session, cteViewName, schema, columns, database); + List columnTemplateList; + String[] querySQLOutput = new String[1]; try { - read("AS"); - read("("); - Query withQuery = parseSelect(); - read(")"); - withQuery.prepare(); - querySQL = StringUtils.fromCacheOrNew(withQuery.getPlanSQL()); + read(AS); + read(OPEN_PAREN); + Query withQuery = parseQuery(); + if (!isTemporary) { + withQuery.session = session; + } + read(CLOSE_PAREN); + columnTemplateList = TableView.createQueryColumnTemplateList(cols, withQuery, querySQLOutput); + } finally { - session.removeLocalTempTable(recursiveTable); + TableView.destroyShadowTableForRecursiveExpression(isTemporary, session, recursiveTable); } + + return createCTEView(cteViewName, + querySQLOutput[0], columnTemplateList, + true/* allowRecursiveQueryDetection */, + true/* add to session */, + isTemporary); + } + + private TableView createCTEView(String cteViewName, String querySQL, + List columnTemplateList, boolean allowRecursiveQueryDetection, + boolean addViewToSession, boolean isTemporary) { + Schema schema = getSchemaWithDefault(); int id = database.allocateObjectId(); - TableView view = new TableView(schema, id, tempViewName, querySQL, - null, cols, session, true); + Column[] columnTemplateArray = columnTemplateList.toArray(new Column[0]); + + // No easy way to determine if this is a recursive query up front, so we just compile + // it twice - once without the flag set, and if we didn't see a recursive term, + // then we just compile it again. + TableView view; + synchronized (session) { + view = new TableView(schema, id, cteViewName, querySQL, + parameters, columnTemplateArray, session, + allowRecursiveQueryDetection, false /* literalsChecked */, true /* isTableExpression */, + isTemporary); + if (!view.isRecursiveQueryDetected() && allowRecursiveQueryDetection) { + if (!isTemporary) { + database.addSchemaObject(session, view); + view.lock(session, Table.EXCLUSIVE_LOCK); + database.removeSchemaObject(session, view); + } else { + session.removeLocalTempTable(view); + } + view = new TableView(schema, id, cteViewName, querySQL, parameters, + columnTemplateArray, session, + false/* assume recursive */, false /* literalsChecked */, true /* isTableExpression */, + isTemporary); + } + // both removeSchemaObject and removeLocalTempTable hold meta locks + database.unlockMeta(session); + } view.setTableExpression(true); - view.setTemporary(true); - session.addLocalTempTable(view); - view.setOnCommitDrop(true); - Query q = parseSelect(); - q.setPrepareAlways(true); - return q; + view.setTemporary(isTemporary); + view.setHidden(true); + view.setOnCommitDrop(false); + if (addViewToSession) { + if (!isTemporary) { + database.addSchemaObject(session, view); + view.unlock(session); + database.unlockMeta(session); + } else { + session.addLocalTempTable(view); + } + } + return view; } private CreateView parseCreateView(boolean force, boolean orReplace) { - boolean ifNotExists = readIfNoExists(); + boolean ifNotExists = readIfNotExists(); + boolean isTableExpression = readIf("TABLE_EXPRESSION"); String viewName = readIdentifierWithSchema(); CreateView command = new CreateView(session, getSchema()); this.createView = command; @@ -4689,21 +7509,27 @@ private CreateView parseCreateView(boolean force, boolean orReplace) { command.setComment(readCommentIf()); command.setOrReplace(orReplace); command.setForce(force); - if (readIf("(")) { + command.setTableExpression(isTableExpression); + if (readIf(OPEN_PAREN)) { String[] cols = parseColumnList(); command.setColumnNames(cols); } - String select = StringUtils.fromCacheOrNew(sqlCommand - .substring(parseIndex)); - read("AS"); + read(AS); + String select = StringUtils.cache(sqlCommand.substring(token.start())); try { - Query query = parseSelect(); - query.prepare(); + Query query; + session.setParsingCreateView(true); + try { + query = parseQuery(); + query.prepare(); + } finally { + session.setParsingCreateView(false); + } command.setSelect(query); } catch (DbException e) { if (force) { command.setSelectSQL(select); - while (currentTokenType != END) { + while (currentTokenType != END_OF_INPUT) { read(); } } else { @@ -4726,9 +7552,9 @@ private TransactionCommand parseCheckpoint() { } private Prepared parseAlter() { - if (readIf("TABLE")) { + if (readIf(TABLE)) { return parseAlterTable(); - } else if (readIf("USER")) { + } else if (readIf(USER)) { return parseAlterUser(); } else if (readIf("INDEX")) { return parseAlterIndex(); @@ -4738,6 +7564,8 @@ private Prepared parseAlter() { return parseAlterSequence(); } else if (readIf("VIEW")) { return parseAlterView(); + } else if (readIf("DOMAIN")) { + return parseAlterDomain(); } throw getSyntaxError(); } @@ -4749,94 +7577,274 @@ private void checkSchema(Schema old) { } private AlterIndexRename parseAlterIndex() { + boolean ifExists = readIfExists(false); String indexName = readIdentifierWithSchema(); Schema old = getSchema(); AlterIndexRename command = new AlterIndexRename(session); - command.setOldIndex(getSchema().getIndex(indexName)); + command.setOldSchema(old); + command.setOldName(indexName); + command.setIfExists(ifExists); read("RENAME"); - read("TO"); + read(TO); String newName = readIdentifierWithSchema(old.getName()); checkSchema(old); command.setNewName(newName); return command; } - private AlterView parseAlterView() { - AlterView command = new AlterView(session); + private DefineCommand parseAlterDomain() { + boolean ifDomainExists = readIfExists(false); + String domainName = readIdentifierWithSchema(); + Schema schema = getSchema(); + if (readIf("ADD")) { + boolean ifNotExists = false; + String constraintName = null; + String comment = null; + if (readIf(CONSTRAINT)) { + ifNotExists = readIfNotExists(); + constraintName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + comment = readCommentIf(); + } + read(CHECK); + AlterDomainAddConstraint command = new AlterDomainAddConstraint(session, schema, ifNotExists); + command.setDomainName(domainName); + command.setConstraintName(constraintName); + parseDomainConstraint = true; + try { + command.setCheckExpression(readExpression()); + } finally { + parseDomainConstraint = false; + } + command.setIfDomainExists(ifDomainExists); + command.setComment(comment); + if (readIf("NOCHECK")) { + command.setCheckExisting(false); + } else { + readIf(CHECK); + command.setCheckExisting(true); + } + return command; + } else if (readIf("DROP")) { + if (readIf(CONSTRAINT)) { + boolean ifConstraintExists = readIfExists(false); + String constraintName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + AlterDomainDropConstraint command = new AlterDomainDropConstraint(session, getSchema(), + ifConstraintExists); + command.setConstraintName(constraintName); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + return command; + } else if (readIf(DEFAULT)) { + AlterDomainExpressions command = new AlterDomainExpressions(session, schema, + CommandInterface.ALTER_DOMAIN_DEFAULT); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setExpression(null); + return command; + } else if (readIf(ON)) { + read("UPDATE"); + AlterDomainExpressions command = new AlterDomainExpressions(session, schema, + CommandInterface.ALTER_DOMAIN_ON_UPDATE); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setExpression(null); + return command; + } + } else if (readIf("RENAME")) { + if (readIf(CONSTRAINT)) { + String constraintName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + read(TO); + AlterDomainRenameConstraint command = new AlterDomainRenameConstraint(session, schema); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setConstraintName(constraintName); + command.setNewConstraintName(readIdentifier()); + return command; + } + read(TO); + String newName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + AlterDomainRename command = new AlterDomainRename(session, getSchema()); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setNewDomainName(newName); + return command; + } else { + read(SET); + if (readIf(DEFAULT)) { + AlterDomainExpressions command = new AlterDomainExpressions(session, schema, + CommandInterface.ALTER_DOMAIN_DEFAULT); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setExpression(readExpression()); + return command; + } else if (readIf(ON)) { + read("UPDATE"); + AlterDomainExpressions command = new AlterDomainExpressions(session, schema, + CommandInterface.ALTER_DOMAIN_ON_UPDATE); + command.setDomainName(domainName); + command.setIfDomainExists(ifDomainExists); + command.setExpression(readExpression()); + return command; + } + } + throw getSyntaxError(); + } + + private DefineCommand parseAlterView() { + boolean ifExists = readIfExists(false); String viewName = readIdentifierWithSchema(); - Table tableView = getSchema().findTableOrView(session, viewName); - if (!(tableView instanceof TableView)) { + Schema schema = getSchema(); + Table tableView = schema.findTableOrView(session, viewName); + if (!(tableView instanceof TableView) && !ifExists) { throw DbException.get(ErrorCode.VIEW_NOT_FOUND_1, viewName); } - TableView view = (TableView) tableView; - command.setView(view); - read("RECOMPILE"); - return command; + if (readIf("RENAME")) { + read(TO); + String newName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + AlterTableRename command = new AlterTableRename(session, getSchema()); + command.setTableName(viewName); + command.setNewTableName(newName); + command.setIfTableExists(ifExists); + return command; + } else { + read("RECOMPILE"); + TableView view = (TableView) tableView; + AlterView command = new AlterView(session); + command.setIfExists(ifExists); + command.setView(view); + return command; + } } - private AlterSchemaRename parseAlterSchema() { + private Prepared parseAlterSchema() { + boolean ifExists = readIfExists(false); String schemaName = readIdentifierWithSchema(); Schema old = getSchema(); - AlterSchemaRename command = new AlterSchemaRename(session); - command.setOldSchema(getSchema(schemaName)); read("RENAME"); - read("TO"); + read(TO); String newName = readIdentifierWithSchema(old.getName()); + Schema schema = findSchema(schemaName); + if (schema == null) { + if (ifExists) { + return new NoOperation(session); + } + throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName); + } + AlterSchemaRename command = new AlterSchemaRename(session); + command.setOldSchema(schema); checkSchema(old); command.setNewName(newName); return command; } private AlterSequence parseAlterSequence() { + boolean ifExists = readIfExists(false); String sequenceName = readIdentifierWithSchema(); - Sequence sequence = getSchema().getSequence(sequenceName); - AlterSequence command = new AlterSequence(session, sequence.getSchema()); - command.setSequence(sequence); - while (true) { - if (readIf("RESTART")) { - read("WITH"); - command.setStartWith(readExpression()); - } else if (readIf("INCREMENT")) { - read("BY"); - command.setIncrement(readExpression()); - } else if (readIf("MINVALUE")) { - command.setMinValue(readExpression()); - } else if (readIf("NOMINVALUE")) { - command.setMinValue(null); - } else if (readIf("MAXVALUE")) { - command.setMaxValue(readExpression()); - } else if (readIf("NOMAXVALUE")) { - command.setMaxValue(null); - } else if (readIf("CYCLE")) { - command.setCycle(true); - } else if (readIf("NOCYCLE")) { - command.setCycle(false); - } else if (readIf("NO")) { - if (readIf("MINVALUE")) { - command.setMinValue(null); - } else if (readIf("MAXVALUE")) { - command.setMaxValue(null); - } else if (readIf("CYCLE")) { - command.setCycle(false); - } else if (readIf("CACHE")) { - command.setCacheSize(ValueExpression.get(ValueLong.get(1))); + AlterSequence command = new AlterSequence(session, getSchema()); + command.setSequenceName(sequenceName); + command.setIfExists(ifExists); + SequenceOptions options = new SequenceOptions(); + parseSequenceOptions(options, null, false, false); + command.setOptions(options); + return command; + } + + private boolean parseSequenceOptions(SequenceOptions options, CreateSequence command, boolean allowDataType, + boolean forAlterColumn) { + boolean result = false; + for (;;) { + if (allowDataType && readIf(AS)) { + TypeInfo dataType = parseDataType(); + if (!DataType.isNumericType(dataType.getValueType())) { + throw DbException.getUnsupportedException(dataType + .getSQL(new StringBuilder("CREATE SEQUENCE AS "), HasSQL.TRACE_SQL_FLAGS).toString()); + } + options.setDataType(dataType); + } else if (readIf("START")) { + read(WITH); + options.setStartValue(readExpression()); + } else if (readIf("RESTART")) { + options.setRestartValue(readIf(WITH) ? readExpression() : ValueExpression.DEFAULT); + } else if (command != null && parseCreateSequenceOption(command)) { + // + } else if (forAlterColumn) { + int index = tokenIndex; + if (readIf(SET)) { + if (!parseBasicSequenceOption(options)) { + setTokenIndex(index); + break; + } } else { break; } + } else if (!parseBasicSequenceOption(options)) { + break; + } + result = true; + } + return result; + } + + private boolean parseCreateSequenceOption(CreateSequence command) { + if (readIf("BELONGS_TO_TABLE")) { + command.setBelongsToTable(true); + } else if (readIf(ORDER)) { + // Oracle compatibility + } else { + return false; + } + return true; + } + + private boolean parseBasicSequenceOption(SequenceOptions options) { + if (readIf("INCREMENT")) { + readIf("BY"); + options.setIncrement(readExpression()); + } else if (readIf("MINVALUE")) { + options.setMinValue(readExpression()); + } else if (readIf("MAXVALUE")) { + options.setMaxValue(readExpression()); + } else if (readIf("CYCLE")) { + options.setCycle(Sequence.Cycle.CYCLE); + } else if (readIf("NO")) { + if (readIf("MINVALUE")) { + options.setMinValue(ValueExpression.NULL); + } else if (readIf("MAXVALUE")) { + options.setMaxValue(ValueExpression.NULL); + } else if (readIf("CYCLE")) { + options.setCycle(Sequence.Cycle.NO_CYCLE); } else if (readIf("CACHE")) { - command.setCacheSize(readExpression()); - } else if (readIf("NOCACHE")) { - command.setCacheSize(ValueExpression.get(ValueLong.get(1))); + options.setCacheSize(ValueExpression.get(ValueBigint.get(1))); } else { - break; + throw getSyntaxError(); } + } else if (readIf("EXHAUSTED")) { + options.setCycle(Sequence.Cycle.EXHAUSTED); + } else if (readIf("CACHE")) { + options.setCacheSize(readExpression()); + // Various compatibility options + } else if (readIf("NOMINVALUE")) { + options.setMinValue(ValueExpression.NULL); + } else if (readIf("NOMAXVALUE")) { + options.setMaxValue(ValueExpression.NULL); + } else if (readIf("NOCYCLE")) { + options.setCycle(Sequence.Cycle.NO_CYCLE); + } else if (readIf("NOCACHE")) { + options.setCacheSize(ValueExpression.get(ValueBigint.get(1))); + } else { + return false; } - return command; + return true; } private AlterUser parseAlterUser() { - String userName = readUniqueIdentifier(); - if (readIf("SET")) { + String userName = readIdentifier(); + if (readIf(SET)) { AlterUser command = new AlterUser(session); command.setType(CommandInterface.ALTER_USER_SET_PASSWORD); command.setUser(database.getUser(userName)); @@ -4851,21 +7859,20 @@ private AlterUser parseAlterUser() { } return command; } else if (readIf("RENAME")) { - read("TO"); + read(TO); AlterUser command = new AlterUser(session); command.setType(CommandInterface.ALTER_USER_RENAME); command.setUser(database.getUser(userName)); - String newName = readUniqueIdentifier(); - command.setNewName(newName); + command.setNewName(readIdentifier()); return command; } else if (readIf("ADMIN")) { AlterUser command = new AlterUser(session); command.setType(CommandInterface.ALTER_USER_ADMIN); User user = database.getUser(userName); command.setUser(user); - if (readIf("TRUE")) { + if (readIf(TRUE)) { command.setAdmin(true); - } else if (readIf("FALSE")) { + } else if (readIf(FALSE)) { command.setAdmin(false); } else { throw getSyntaxError(); @@ -4876,30 +7883,22 @@ private AlterUser parseAlterUser() { } private void readIfEqualOrTo() { - if (!readIf("=")) { - readIf("TO"); + if (!readIf(EQUAL)) { + readIf(TO); } } private Prepared parseSet() { - if (readIf("@")) { + if (readIf(AT)) { Set command = new Set(session, SetTypes.VARIABLE); - command.setString(readAliasIdentifier()); + command.setString(readIdentifier()); readIfEqualOrTo(); command.setExpression(readExpression()); return command; } else if (readIf("AUTOCOMMIT")) { readIfEqualOrTo(); - boolean value = readBooleanSetting(); - int setting = value ? CommandInterface.SET_AUTOCOMMIT_TRUE - : CommandInterface.SET_AUTOCOMMIT_FALSE; - return new TransactionCommand(session, setting); - } else if (readIf("MVCC")) { - readIfEqualOrTo(); - boolean value = readBooleanSetting(); - Set command = new Set(session, SetTypes.MVCC); - command.setInt(value ? 1 : 0); - return command; + return new TransactionCommand(session, readBooleanSetting() ? CommandInterface.SET_AUTOCOMMIT_TRUE + : CommandInterface.SET_AUTOCOMMIT_FALSE); } else if (readIf("EXCLUSIVE")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.EXCLUSIVE); @@ -4907,9 +7906,8 @@ private Prepared parseSet() { return command; } else if (readIf("IGNORECASE")) { readIfEqualOrTo(); - boolean value = readBooleanSetting(); Set command = new Set(session, SetTypes.IGNORECASE); - command.setInt(value ? 1 : 0); + command.setInt(readBooleanSetting() ? 1 : 0); return command; } else if (readIf("PASSWORD")) { readIfEqualOrTo(); @@ -4930,16 +7928,7 @@ private Prepared parseSet() { } else if (readIf("MODE")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.MODE); - command.setString(readAliasIdentifier()); - return command; - } else if (readIf("COMPRESS_LOB")) { - readIfEqualOrTo(); - Set command = new Set(session, SetTypes.COMPRESS_LOB); - if (currentTokenType == VALUE) { - command.setString(readString()); - } else { - command.setString(readUniqueIdentifier()); - } + command.setString(readIdentifier()); return command; } else if (readIf("DATABASE")) { readIfEqualOrTo(); @@ -4948,9 +7937,6 @@ private Prepared parseSet() { } else if (readIf("COLLATION")) { readIfEqualOrTo(); return parseSetCollation(); - } else if (readIf("BINARY_COLLATION")) { - readIfEqualOrTo(); - return parseSetBinaryCollation(); } else if (readIf("CLUSTER")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.CLUSTER); @@ -4964,156 +7950,174 @@ private Prepared parseSet() { } else if (readIf("ALLOW_LITERALS")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.ALLOW_LITERALS); - if (readIf("NONE")) { - command.setInt(Constants.ALLOW_LITERALS_NONE); - } else if (readIf("ALL")) { - command.setInt(Constants.ALLOW_LITERALS_ALL); + int v; + if (readIf(ALL)) { + v = Constants.ALLOW_LITERALS_ALL; + } else if (readIf("NONE")) { + v = Constants.ALLOW_LITERALS_NONE; } else if (readIf("NUMBERS")) { - command.setInt(Constants.ALLOW_LITERALS_NUMBERS); + v = Constants.ALLOW_LITERALS_NUMBERS; } else { - command.setInt(readPositiveInt()); + v = readNonNegativeInt(); } + command.setInt(v); return command; } else if (readIf("DEFAULT_TABLE_TYPE")) { readIfEqualOrTo(); Set command = new Set(session, SetTypes.DEFAULT_TABLE_TYPE); + int v; if (readIf("MEMORY")) { - command.setInt(Table.TYPE_MEMORY); + v = Table.TYPE_MEMORY; } else if (readIf("CACHED")) { - command.setInt(Table.TYPE_CACHED); + v = Table.TYPE_CACHED; } else { - command.setInt(readPositiveInt()); + v = readNonNegativeInt(); } + command.setInt(v); return command; - } else if (readIf("CREATE")) { - readIfEqualOrTo(); - // Derby compatibility (CREATE=TRUE in the database URL) - read(); - return new NoOperation(session); - } else if (readIf("HSQLDB.DEFAULT_TABLE_TYPE")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("PAGE_STORE")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("CACHE_TYPE")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("FILE_LOCK")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("DB_CLOSE_ON_EXIT")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("AUTO_SERVER")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("AUTO_SERVER_PORT")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("AUTO_RECONNECT")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("ASSERT")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("ACCESS_MODE_DATA")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("OPEN_NEW")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("JMX")) { - readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("PAGE_SIZE")) { + } else if (readIf("SCHEMA")) { readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("RECOVER")) { + Set command = new Set(session, SetTypes.SCHEMA); + command.setExpression(readExpressionOrIdentifier()); + return command; + } else if (readIf("CATALOG")) { readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("NAMES")) { - // Quercus PHP MySQL driver compatibility + Set command = new Set(session, SetTypes.CATALOG); + command.setExpression(readExpressionOrIdentifier()); + return command; + } else if (readIf(SetTypes.getTypeName(SetTypes.SCHEMA_SEARCH_PATH))) { readIfEqualOrTo(); - read(); - return new NoOperation(session); - } else if (readIf("SCHEMA")) { + Set command = new Set(session, SetTypes.SCHEMA_SEARCH_PATH); + ArrayList list = Utils.newSmallArrayList(); + do { + list.add(readIdentifier()); + } while (readIf(COMMA)); + command.setStringArray(list.toArray(new String[0])); + return command; + } else if (readIf("JAVA_OBJECT_SERIALIZER")) { readIfEqualOrTo(); - Set command = new Set(session, SetTypes.SCHEMA); - command.setString(readAliasIdentifier()); + Set command = new Set(session, SetTypes.JAVA_OBJECT_SERIALIZER); + command.setString(readString()); return command; - } else if (readIf("DATESTYLE")) { - // PostgreSQL compatibility + } else if (readIf("IGNORE_CATALOGS")) { readIfEqualOrTo(); - if (!readIf("ISO")) { - String s = readString(); - if (!equalsToken(s, "ISO")) { - throw getSyntaxError(); - } + Set command = new Set(session, SetTypes.IGNORE_CATALOGS); + command.setInt(readBooleanSetting() ? 1 : 0); + return command; + } else if (readIf("SESSION")) { + read("CHARACTERISTICS"); + read(AS); + read("TRANSACTION"); + return parseSetTransactionMode(); + } else if (readIf("TRANSACTION")) { + // TODO should affect only the current transaction + return parseSetTransactionMode(); + } else if (readIf("TIME")) { + read("ZONE"); + Set command = new Set(session, SetTypes.TIME_ZONE); + if (!readIf("LOCAL")) { + command.setExpression(readExpression()); } - return new NoOperation(session); - } else if (readIf("SEARCH_PATH") || - readIf(SetTypes.getTypeName(SetTypes.SCHEMA_SEARCH_PATH))) { + return command; + } else if (readIf("NON_KEYWORDS")) { readIfEqualOrTo(); - Set command = new Set(session, SetTypes.SCHEMA_SEARCH_PATH); - ArrayList list = New.arrayList(); - list.add(readAliasIdentifier()); - while (readIf(",")) { - list.add(readAliasIdentifier()); - } - String[] schemaNames = new String[list.size()]; - list.toArray(schemaNames); - command.setStringArray(schemaNames); + Set command = new Set(session, SetTypes.NON_KEYWORDS); + ArrayList list = Utils.newSmallArrayList(); + if (currentTokenType != END_OF_INPUT && currentTokenType != SEMICOLON) { + do { + if (currentTokenType < IDENTIFIER || currentTokenType > LAST_KEYWORD) { + throw getSyntaxError(); + } + list.add(StringUtils.toUpperEnglish(currentToken)); + read(); + } while (readIf(COMMA)); + } + command.setStringArray(list.toArray(new String[0])); return command; - } else if (readIf("JAVA_OBJECT_SERIALIZER")) { + } else if (readIf("DEFAULT_NULL_ORDERING")) { readIfEqualOrTo(); - return parseSetJavaObjectSerializer(); + Set command = new Set(session, SetTypes.DEFAULT_NULL_ORDERING); + command.setString(readIdentifier()); + return command; + } else if (readIf("LOG")) { + throw DbException.getUnsupportedException("LOG"); } else { - if (isToken("LOGSIZE")) { - // HSQLDB compatibility - currentToken = SetTypes.getTypeName(SetTypes.MAX_LOG_SIZE); + String upperName = upperName(currentToken); + if (ConnectionInfo.isIgnoredByParser(upperName)) { + read(); + readIfEqualOrTo(); + read(); + return new NoOperation(session); } - if (isToken("FOREIGN_KEY_CHECKS")) { - // MySQL compatibility - currentToken = SetTypes - .getTypeName(SetTypes.REFERENTIAL_INTEGRITY); + int type = SetTypes.getType(upperName); + if (type >= 0) { + read(); + readIfEqualOrTo(); + Set command = new Set(session, type); + command.setExpression(readExpression()); + return command; } - int type = SetTypes.getType(currentToken); - if (type < 0) { - throw getSyntaxError(); + ModeEnum modeEnum = database.getMode().getEnum(); + if (modeEnum != ModeEnum.REGULAR) { + Prepared command = readSetCompatibility(modeEnum); + if (command != null) { + return command; + } } - read(); - readIfEqualOrTo(); - Set command = new Set(session, type); - command.setExpression(readExpression()); - return command; + if (session.isQuirksMode()) { + switch (upperName) { + case "BINARY_COLLATION": + case "UUID_COLLATION": + read(); + readIfEqualOrTo(); + readIdentifier(); + return new NoOperation(session); + } + } + throw getSyntaxError(); + } + } + + private Prepared parseSetTransactionMode() { + IsolationLevel isolationLevel; + read("ISOLATION"); + read("LEVEL"); + if (readIf("READ")) { + if (readIf("UNCOMMITTED")) { + isolationLevel = IsolationLevel.READ_UNCOMMITTED; + } else { + read("COMMITTED"); + isolationLevel = IsolationLevel.READ_COMMITTED; + } + } else if (readIf("REPEATABLE")) { + read("READ"); + isolationLevel = IsolationLevel.REPEATABLE_READ; + } else if (readIf("SNAPSHOT")) { + isolationLevel = IsolationLevel.SNAPSHOT; + } else { + read("SERIALIZABLE"); + isolationLevel = IsolationLevel.SERIALIZABLE; + } + return new SetSessionCharacteristics(session, isolationLevel); + } + + private Expression readExpressionOrIdentifier() { + if (isIdentifier()) { + return ValueExpression.get(ValueVarchar.get(readIdentifier())); } + return readExpression(); } private Prepared parseUse() { readIfEqualOrTo(); Set command = new Set(session, SetTypes.SCHEMA); - command.setString(readAliasIdentifier()); + command.setExpression(ValueExpression.get(ValueVarchar.get(readIdentifier()))); return command; } private Set parseSetCollation() { Set command = new Set(session, SetTypes.COLLATION); - String name = readAliasIdentifier(); + String name = readIdentifier(); command.setString(name); if (equalsToken(name, CompareMode.OFF)) { return command; @@ -5123,7 +8127,7 @@ private Set parseSetCollation() { throw DbException.getInvalidValueException("collation", name); } if (readIf("STRENGTH")) { - if (readIf("PRIMARY")) { + if (readIf(PRIMARY)) { command.setInt(Collator.PRIMARY); } else if (readIf("SECONDARY")) { command.setInt(Collator.SECONDARY); @@ -5138,33 +8142,99 @@ private Set parseSetCollation() { return command; } - private Set parseSetBinaryCollation() { - Set command = new Set(session, SetTypes.BINARY_COLLATION); - String name = readAliasIdentifier(); - command.setString(name); - if (equalsToken(name, CompareMode.UNSIGNED) || - equalsToken(name, CompareMode.SIGNED)) { - return command; + private Prepared readSetCompatibility(ModeEnum modeEnum) { + switch (modeEnum) { + case Derby: + if (readIf("CREATE")) { + readIfEqualOrTo(); + // (CREATE=TRUE in the database URL) + read(); + return new NoOperation(session); + } + break; + case HSQLDB: + if (readIf("LOGSIZE")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.MAX_LOG_SIZE); + command.setExpression(readExpression()); + return command; + } + break; + case MySQL: + if (readIf("FOREIGN_KEY_CHECKS")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.REFERENTIAL_INTEGRITY); + command.setExpression(readExpression()); + return command; + } else if (readIf("NAMES")) { + // Quercus PHP MySQL driver compatibility + readIfEqualOrTo(); + read(); + return new NoOperation(session); + } + break; + case PostgreSQL: + if (readIf("STATEMENT_TIMEOUT")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.QUERY_TIMEOUT); + command.setInt(readNonNegativeInt()); + return command; + } else if (readIf("CLIENT_ENCODING") || readIf("CLIENT_MIN_MESSAGES") || readIf("JOIN_COLLAPSE_LIMIT")) { + readIfEqualOrTo(); + read(); + return new NoOperation(session); + } else if (readIf("DATESTYLE")) { + readIfEqualOrTo(); + if (!readIf("ISO")) { + String s = readString(); + if (!equalsToken(s, "ISO")) { + throw getSyntaxError(); + } + } + return new NoOperation(session); + } else if (readIf("SEARCH_PATH")) { + readIfEqualOrTo(); + Set command = new Set(session, SetTypes.SCHEMA_SEARCH_PATH); + ArrayList list = Utils.newSmallArrayList(); + String pgCatalog = database.sysIdentifier("PG_CATALOG"); + boolean hasPgCatalog = false; + do { + // some PG clients will send single-quoted alias + String s = currentTokenType == LITERAL ? readString() : readIdentifier(); + if ("$user".equals(s)) { + continue; + } + if (pgCatalog.equals(s)) { + hasPgCatalog = true; + } + list.add(s); + } while (readIf(COMMA)); + // If "pg_catalog" is not in the path then it will be searched before + // searching any of the path items. See + // https://www.postgresql.org/docs/8.2/runtime-config-client.html + if (!hasPgCatalog) { + if (database.findSchema(pgCatalog) != null) { + list.add(0, pgCatalog); + } + } + command.setStringArray(list.toArray(new String[0])); + return command; + } + break; + default: } - throw DbException.getInvalidValueException("BINARY_COLLATION", name); - } - - private Set parseSetJavaObjectSerializer() { - Set command = new Set(session, SetTypes.JAVA_OBJECT_SERIALIZER); - String name = readString(); - command.setString(name); - return command; + return null; } private RunScriptCommand parseRunScript() { RunScriptCommand command = new RunScriptCommand(session); - read("FROM"); + read(FROM); command.setFileNameExpr(readExpression()); if (readIf("COMPRESSION")) { - command.setCompressionAlgorithm(readUniqueIdentifier()); + command.setCompressionAlgorithm(readIdentifier()); } if (readIf("CIPHER")) { - command.setCipher(readUniqueIdentifier()); + command.setCipher(readIdentifier()); if (readIf("PASSWORD")) { command.setPassword(readExpression()); } @@ -5172,18 +8242,32 @@ private RunScriptCommand parseRunScript() { if (readIf("CHARSET")) { command.setCharset(Charset.forName(readString())); } + if (readIf("FROM_1X")) { + command.setFrom1X(); + } else { + if (readIf("QUIRKS_MODE")) { + command.setQuirksMode(true); + } + if (readIf("VARIABLE_BINARY")) { + command.setVariableBinary(true); + } + } return command; } private ScriptCommand parseScript() { ScriptCommand command = new ScriptCommand(session); - boolean data = true, passwords = true, settings = true; - boolean dropTables = false, simple = false; - if (readIf("SIMPLE")) { - simple = true; - } + boolean data = true, passwords = true, settings = true, version = true; + boolean dropTables = false, simple = false, withColumns = false; if (readIf("NODATA")) { data = false; + } else { + if (readIf("SIMPLE")) { + simple = true; + } + if (readIf("COLUMNS")) { + withColumns = true; + } } if (readIf("NOPASSWORDS")) { passwords = false; @@ -5191,6 +8275,9 @@ private ScriptCommand parseScript() { if (readIf("NOSETTINGS")) { settings = false; } + if (readIf("NOVERSION")) { + version = false; + } if (readIf("DROP")) { dropTables = true; } @@ -5201,15 +8288,17 @@ private ScriptCommand parseScript() { command.setData(data); command.setPasswords(passwords); command.setSettings(settings); + command.setVersion(version); command.setDrop(dropTables); command.setSimple(simple); - if (readIf("TO")) { + command.setWithColumns(withColumns); + if (readIf(TO)) { command.setFileNameExpr(readExpression()); if (readIf("COMPRESSION")) { - command.setCompressionAlgorithm(readUniqueIdentifier()); + command.setCompressionAlgorithm(readIdentifier()); } if (readIf("CIPHER")) { - command.setCipher(readUniqueIdentifier()); + command.setCipher(readIdentifier()); if (readIf("PASSWORD")) { command.setPassword(readExpression()); } @@ -5219,60 +8308,143 @@ private ScriptCommand parseScript() { } } if (readIf("SCHEMA")) { - HashSet schemaNames = New.hashSet(); + HashSet schemaNames = new HashSet<>(); do { - schemaNames.add(readUniqueIdentifier()); - } while (readIf(",")); + schemaNames.add(readIdentifier()); + } while (readIf(COMMA)); command.setSchemaNames(schemaNames); - } else if (readIf("TABLE")) { - ArrayList tables = New.arrayList(); + } else if (readIf(TABLE)) { + ArrayList
    tables = Utils.newSmallArrayList(); do { tables.add(readTableOrView()); - } while (readIf(",")); + } while (readIf(COMMA)); command.setTables(tables); } return command; } + /** + * Is this the Oracle DUAL table or the IBM/DB2 SYSIBM table? + * + * @param tableName table name. + * @return {@code true} if the table is DUAL special table. Otherwise returns {@code false}. + * @see Wikipedia: DUAL table + */ + private boolean isDualTable(String tableName) { + return ((schemaName == null || equalsToken(schemaName, "SYS")) && equalsToken("DUAL", tableName)) + || (database.getMode().sysDummy1 && (schemaName == null || equalsToken(schemaName, "SYSIBM")) + && equalsToken("SYSDUMMY1", tableName)); + } + private Table readTableOrView() { return readTableOrView(readIdentifierWithSchema(null)); } private Table readTableOrView(String tableName) { - // same algorithm than readSequence if (schemaName != null) { - return getSchema().getTableOrView(session, tableName); + Table table = getSchema().resolveTableOrView(session, tableName); + if (table != null) { + return table; + } + } else { + Table table = database.getSchema(session.getCurrentSchemaName()) + .resolveTableOrView(session, tableName); + if (table != null) { + return table; + } + String[] schemaNames = session.getSchemaSearchPath(); + if (schemaNames != null) { + for (String name : schemaNames) { + Schema s = database.getSchema(name); + table = s.resolveTableOrView(session, tableName); + if (table != null) { + return table; + } + } + } } - Table table = database.getSchema(session.getCurrentSchemaName()) - .findTableOrView(session, tableName); - if (table != null) { - return table; + if (isDualTable(tableName)) { + return new DualTable(database); } - String[] schemaNames = session.getSchemaSearchPath(); - if (schemaNames != null) { - for (String name : schemaNames) { - Schema s = database.getSchema(name); - table = s.findTableOrView(session, tableName); - if (table != null) { - return table; - } + + throw getTableOrViewNotFoundDbException(tableName); + } + + private DbException getTableOrViewNotFoundDbException(String tableName) { + if (schemaName != null) { + return getTableOrViewNotFoundDbException(schemaName, tableName); + } + + String currentSchemaName = session.getCurrentSchemaName(); + String[] schemaSearchPath = session.getSchemaSearchPath(); + if (schemaSearchPath == null) { + return getTableOrViewNotFoundDbException(Collections.singleton(currentSchemaName), tableName); + } + + LinkedHashSet schemaNames = new LinkedHashSet<>(); + schemaNames.add(currentSchemaName); + schemaNames.addAll(Arrays.asList(schemaSearchPath)); + return getTableOrViewNotFoundDbException(schemaNames, tableName); + } + + private DbException getTableOrViewNotFoundDbException(String schemaName, String tableName) { + return getTableOrViewNotFoundDbException(Collections.singleton(schemaName), tableName); + } + + private DbException getTableOrViewNotFoundDbException( + java.util.Set schemaNames, String tableName) { + if (database == null || database.getFirstUserTable() == null) { + return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, tableName); + } + + if (database.getSettings().caseInsensitiveIdentifiers) { + return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + + java.util.Set candidates = new TreeSet<>(); + for (String schemaName : schemaNames) { + findTableNameCandidates(schemaName, tableName, candidates); + } + + if (candidates.isEmpty()) { + return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + + return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2, + tableName, + String.join(", ", candidates)); + } + + private void findTableNameCandidates(String schemaName, String tableName, java.util.Set candidates) { + Schema schema = database.getSchema(schemaName); + String ucTableName = StringUtils.toUpperEnglish(tableName); + Collection
    allTablesAndViews = schema.getAllTablesAndViews(session); + for (Table candidate : allTablesAndViews) { + String candidateName = candidate.getName(); + if (ucTableName.equals(StringUtils.toUpperEnglish(candidateName))) { + candidates.add(candidateName); } } - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); } - private FunctionAlias findFunctionAlias(String schema, String aliasName) { - FunctionAlias functionAlias = database.getSchema(schema).findFunction( - aliasName); - if (functionAlias != null) { - return functionAlias; + private UserDefinedFunction findUserDefinedFunctionWithinPath(Schema schema, String name) { + if (schema != null) { + return schema.findFunctionOrAggregate(name); + } + schema = database.getSchema(session.getCurrentSchemaName()); + UserDefinedFunction userDefinedFunction = schema.findFunctionOrAggregate(name); + if (userDefinedFunction != null) { + return userDefinedFunction; } String[] schemaNames = session.getSchemaSearchPath(); if (schemaNames != null) { - for (String n : schemaNames) { - functionAlias = database.getSchema(n).findFunction(aliasName); - if (functionAlias != null) { - return functionAlias; + for (String schemaName : schemaNames) { + Schema schemaFromPath = database.getSchema(schemaName); + if (schemaFromPath != schema) { + userDefinedFunction = schemaFromPath.findFunctionOrAggregate(name); + if (userDefinedFunction != null) { + return userDefinedFunction; + } } } } @@ -5312,342 +8484,651 @@ private Sequence readSequence() { } private Prepared parseAlterTable() { - Table table = readTableOrView(); + boolean ifTableExists = readIfExists(false); + String tableName = readIdentifierWithSchema(); + Schema schema = getSchema(); if (readIf("ADD")) { - Prepared command = parseAlterTableAddConstraintIf(table.getName(), - table.getSchema()); + Prepared command = parseTableConstraintIf(tableName, schema, ifTableExists); if (command != null) { return command; } - return parseAlterTableAddColumn(table); - } else if (readIf("SET")) { - read("REFERENTIAL_INTEGRITY"); - int type = CommandInterface.ALTER_TABLE_SET_REFERENTIAL_INTEGRITY; - boolean value = readBooleanSetting(); - AlterTableSet command = new AlterTableSet(session, - table.getSchema(), type, value); - command.setTableName(table.getName()); - if (readIf("CHECK")) { - command.setCheckExisting(true); - } else if (readIf("NOCHECK")) { - command.setCheckExisting(false); + return parseAlterTableAddColumn(tableName, schema, ifTableExists); + } else if (readIf(SET)) { + return parseAlterTableSet(schema, tableName, ifTableExists); + } else if (readIf("RENAME")) { + return parseAlterTableRename(schema, tableName, ifTableExists); + } else if (readIf("DROP")) { + return parseAlterTableDrop(schema, tableName, ifTableExists); + } else if (readIf("ALTER")) { + return parseAlterTableAlter(schema, tableName, ifTableExists); + } else { + Mode mode = database.getMode(); + if (mode.alterTableExtensionsMySQL || mode.alterTableModifyColumn) { + return parseAlterTableCompatibility(schema, tableName, ifTableExists, mode); } + } + throw getSyntaxError(); + } + + private Prepared parseAlterTableAlter(Schema schema, String tableName, boolean ifTableExists) { + readIf("COLUMN"); + boolean ifExists = readIfExists(false); + String columnName = readIdentifier(); + Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists, ifExists); + if (readIf("RENAME")) { + read(TO); + AlterTableRenameColumn command = new AlterTableRenameColumn( + session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setIfExists(ifExists); + command.setOldColumnName(columnName); + String newName = readIdentifier(); + command.setNewColumnName(newName); return command; - } else if (readIf("RENAME")) { - read("TO"); - String newName = readIdentifierWithSchema(table.getSchema() - .getName()); - checkSchema(table.getSchema()); + } else if (readIf("DROP")) { + if (readIf(DEFAULT)) { + if (readIf(ON)) { + read(NULL); + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumn(column); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL); + command.setBooleanFlag(false); + return command; + } + return getAlterTableAlterColumnDropDefaultExpression(schema, tableName, ifTableExists, column, + CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT); + } else if (readIf("EXPRESSION")) { + return getAlterTableAlterColumnDropDefaultExpression(schema, tableName, ifTableExists, column, + CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION); + } else if (readIf("IDENTITY")) { + return getAlterTableAlterColumnDropDefaultExpression(schema, tableName, ifTableExists, column, + CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_IDENTITY); + } + if (readIf(ON)) { + read("UPDATE"); + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumn(column); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE); + command.setDefaultExpression(null); + return command; + } + read(NOT); + read(NULL); + AlterTableAlterColumn command = new AlterTableAlterColumn( + session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumn(column); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL); + return command; + } else if (readIf("TYPE")) { + // PostgreSQL compatibility + return parseAlterTableAlterColumnDataType(schema, tableName, columnName, ifTableExists, ifExists); + } else if (readIf("SELECTIVITY")) { + AlterTableAlterColumn command = new AlterTableAlterColumn( + session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_SELECTIVITY); + command.setOldColumn(column); + command.setSelectivity(readExpression()); + return command; + } + Prepared command = parseAlterTableAlterColumnIdentity(schema, tableName, ifTableExists, column); + if (command != null) { + return command; + } + if (readIf(SET)) { + return parseAlterTableAlterColumnSet(schema, tableName, ifTableExists, ifExists, columnName, column); + } + return parseAlterTableAlterColumnType(schema, tableName, columnName, ifTableExists, ifExists, true); + } + + private Prepared getAlterTableAlterColumnDropDefaultExpression(Schema schema, String tableName, + boolean ifTableExists, Column column, int type) { + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumn(column); + command.setType(type); + command.setDefaultExpression(null); + return command; + } + + private Prepared parseAlterTableAlterColumnIdentity(Schema schema, String tableName, boolean ifTableExists, + Column column) { + int index = tokenIndex; + Boolean always = null; + if (readIf(SET) && readIf("GENERATED")) { + if (readIf("ALWAYS")) { + always = true; + } else { + read("BY"); + read(DEFAULT); + always = false; + } + } else { + setTokenIndex(index); + } + SequenceOptions options = new SequenceOptions(); + if (!parseSequenceOptions(options, null, false, true) && always == null) { + return null; + } + if (column == null) { + return new NoOperation(session); + } + if (!column.isIdentity()) { + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + parseAlterColumnUsingIf(command); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE); + command.setOldColumn(column); + Column newColumn = column.getClone(); + newColumn.setIdentityOptions(options, always != null && always); + command.setNewColumn(newColumn); + return command; + } + AlterSequence command = new AlterSequence(session, schema); + command.setColumn(column, always); + command.setOptions(options); + return commandIfTableExists(schema, tableName, ifTableExists, command); + } + + private Prepared parseAlterTableAlterColumnSet(Schema schema, String tableName, boolean ifTableExists, + boolean ifExists, String columnName, Column column) { + if (readIf("DATA")) { + read("TYPE"); + return parseAlterTableAlterColumnDataType(schema, tableName, columnName, ifTableExists, ifExists); + } + AlterTableAlterColumn command = new AlterTableAlterColumn( + session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumn(column); + NullConstraintType nullConstraint = parseNotNullConstraint(); + switch (nullConstraint) { + case NULL_IS_ALLOWED: + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL); + break; + case NULL_IS_NOT_ALLOWED: + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NOT_NULL); + break; + case NO_NULL_CONSTRAINT_FOUND: + if (readIf(DEFAULT)) { + if (readIf(ON)) { + read(NULL); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL); + command.setBooleanFlag(true); + break; + } + Expression defaultExpression = readExpression(); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT); + command.setDefaultExpression(defaultExpression); + } else if (readIf(ON)) { + read("UPDATE"); + Expression onUpdateExpression = readExpression(); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE); + command.setDefaultExpression(onUpdateExpression); + } else if (readIf("INVISIBLE")) { + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY); + command.setBooleanFlag(false); + } else if (readIf("VISIBLE")) { + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY); + command.setBooleanFlag(true); + } + break; + default: + throw DbException.get(ErrorCode.UNKNOWN_MODE_1, + "Internal Error - unhandled case: " + nullConstraint.name()); + } + return command; + } + + private Prepared parseAlterTableDrop(Schema schema, String tableName, boolean ifTableExists) { + if (readIf(CONSTRAINT)) { + boolean ifExists = readIfExists(false); + String constraintName = readIdentifierWithSchema(schema.getName()); + ifExists = readIfExists(ifExists); + checkSchema(schema); + AlterTableDropConstraint command = new AlterTableDropConstraint(session, getSchema(), ifExists); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setConstraintName(constraintName); + ConstraintActionType dropAction = parseCascadeOrRestrict(); + if (dropAction != null) { + command.setDropAction(dropAction); + } + return command; + } else if (readIf(PRIMARY)) { + read(KEY); + Table table = tableIfTableExists(schema, tableName, ifTableExists); + if (table == null) { + return new NoOperation(session); + } + Index idx = table.getPrimaryKey(); + DropIndex command = new DropIndex(session, schema); + command.setIndexName(idx.getName()); + return command; + } else if (database.getMode().alterTableExtensionsMySQL) { + Prepared command = parseAlterTableDropCompatibility(schema, tableName, ifTableExists); + if (command != null) { + return command; + } + } + readIf("COLUMN"); + boolean ifExists = readIfExists(false); + ArrayList columnsToRemove = new ArrayList<>(); + Table table = tableIfTableExists(schema, tableName, ifTableExists); + // For Oracle compatibility - open bracket required + boolean openingBracketDetected = readIf(OPEN_PAREN); + do { + String columnName = readIdentifier(); + if (table != null) { + Column column = table.getColumn(columnName, ifExists); + if (column != null) { + columnsToRemove.add(column); + } + } + } while (readIf(COMMA)); + if (openingBracketDetected) { + // For Oracle compatibility - close bracket + read(CLOSE_PAREN); + } + if (table == null || columnsToRemove.isEmpty()) { + return new NoOperation(session); + } + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + command.setType(CommandInterface.ALTER_TABLE_DROP_COLUMN); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setColumnsToRemove(columnsToRemove); + return command; + } + + private Prepared parseAlterTableDropCompatibility(Schema schema, String tableName, boolean ifTableExists) { + if (readIf(FOREIGN)) { + read(KEY); + // For MariaDB + boolean ifExists = readIfExists(false); + String constraintName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + AlterTableDropConstraint command = new AlterTableDropConstraint(session, getSchema(), ifExists); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setConstraintName(constraintName); + return command; + } else if (readIf("INDEX")) { + // For MariaDB + boolean ifExists = readIfExists(false); + String indexOrConstraintName = readIdentifierWithSchema(schema.getName()); + if (schema.findIndex(session, indexOrConstraintName) != null) { + DropIndex dropIndexCommand = new DropIndex(session, getSchema()); + dropIndexCommand.setIndexName(indexOrConstraintName); + return commandIfTableExists(schema, tableName, ifTableExists, dropIndexCommand); + } else { + AlterTableDropConstraint dropCommand = new AlterTableDropConstraint(session, getSchema(), ifExists); + dropCommand.setTableName(tableName); + dropCommand.setIfTableExists(ifTableExists); + dropCommand.setConstraintName(indexOrConstraintName); + return dropCommand; + } + } + return null; + } + + private Prepared parseAlterTableRename(Schema schema, String tableName, boolean ifTableExists) { + if (readIf("COLUMN")) { + // PostgreSQL syntax + String columnName = readIdentifier(); + read(TO); + AlterTableRenameColumn command = new AlterTableRenameColumn( + session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumnName(columnName); + command.setNewColumnName(readIdentifier()); + return command; + } else if (readIf(CONSTRAINT)) { + String constraintName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); + read(TO); + AlterTableRenameConstraint command = new AlterTableRenameConstraint(session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setConstraintName(constraintName); + command.setNewConstraintName(readIdentifier()); + return command; + } else { + read(TO); + String newName = readIdentifierWithSchema(schema.getName()); + checkSchema(schema); AlterTableRename command = new AlterTableRename(session, getSchema()); - command.setOldTable(table); + command.setTableName(tableName); command.setNewTableName(newName); + command.setIfTableExists(ifTableExists); command.setHidden(readIf("HIDDEN")); return command; - } else if (readIf("DROP")) { - if (readIf("CONSTRAINT")) { - boolean ifExists = readIfExists(false); - String constraintName = readIdentifierWithSchema(table - .getSchema().getName()); - ifExists = readIfExists(ifExists); - checkSchema(table.getSchema()); - AlterTableDropConstraint command = new AlterTableDropConstraint( - session, getSchema(), ifExists); - command.setConstraintName(constraintName); - return command; - } else if (readIf("FOREIGN")) { - // MySQL compatibility - read("KEY"); - String constraintName = readIdentifierWithSchema(table - .getSchema().getName()); - checkSchema(table.getSchema()); - AlterTableDropConstraint command = new AlterTableDropConstraint( - session, getSchema(), false); - command.setConstraintName(constraintName); - return command; - } else if (readIf("INDEX")) { - // MySQL compatibility - String indexName = readIdentifierWithSchema(); - DropIndex command = new DropIndex(session, getSchema()); - command.setIndexName(indexName); - return command; - } else if (readIf("PRIMARY")) { - read("KEY"); - Index idx = table.getPrimaryKey(); - DropIndex command = new DropIndex(session, table.getSchema()); - command.setIndexName(idx.getName()); - return command; - } else { - readIf("COLUMN"); - boolean ifExists = readIfExists(false); - AlterTableAlterColumn command = new AlterTableAlterColumn( - session, table.getSchema()); - command.setType(CommandInterface.ALTER_TABLE_DROP_COLUMN); - String columnName = readColumnIdentifier(); - command.setTable(table); - if (ifExists && !table.doesColumnExist(columnName)) { + } + } + + private Prepared parseAlterTableSet(Schema schema, String tableName, boolean ifTableExists) { + read("REFERENTIAL_INTEGRITY"); + int type = CommandInterface.ALTER_TABLE_SET_REFERENTIAL_INTEGRITY; + boolean value = readBooleanSetting(); + AlterTableSet command = new AlterTableSet(session, + schema, type, value); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + if (readIf(CHECK)) { + command.setCheckExisting(true); + } else if (readIf("NOCHECK")) { + command.setCheckExisting(false); + } + return command; + } + + private Prepared parseAlterTableCompatibility(Schema schema, String tableName, boolean ifTableExists, Mode mode) { + if (mode.alterTableExtensionsMySQL) { + if (readIf("AUTO_INCREMENT")) { + readIf(EQUAL); + Expression restart = readExpression(); + Table table = tableIfTableExists(schema, tableName, ifTableExists); + if (table == null) { return new NoOperation(session); } - command.setOldColumn(table.getColumn(columnName)); - return command; + Index idx = table.findPrimaryKey(); + if (idx != null) { + for (IndexColumn ic : idx.getIndexColumns()) { + Column column = ic.column; + if (column.isIdentity()) { + AlterSequence command = new AlterSequence(session, schema); + command.setColumn(column, null); + SequenceOptions options = new SequenceOptions(); + options.setRestartValue(restart); + command.setOptions(options); + return command; + } + } + } + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "AUTO_INCREMENT PRIMARY KEY"); + } else if (readIf("CHANGE")) { + readIf("COLUMN"); + String columnName = readIdentifier(); + String newColumnName = readIdentifier(); + Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists, false); + boolean nullable = column == null ? true : column.isNullable(); + // new column type ignored. RENAME and MODIFY are + // a single command in MySQL but two different commands in H2. + parseColumnForTable(newColumnName, nullable); + AlterTableRenameColumn command = new AlterTableRenameColumn(session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setOldColumnName(columnName); + command.setNewColumnName(newColumnName); + return command; + } else if (readIf("CONVERT")) { + readIf(TO); + readIf("CHARACTER"); + readIf(SET); + readMySQLCharset(); + + if (readIf("COLLATE")) { + readMySQLCharset(); + } + + return new NoOperation(session); } - } else if (readIf("CHANGE")) { - // MySQL compatibility - readIf("COLUMN"); - String columnName = readColumnIdentifier(); - Column column = table.getColumn(columnName); - String newColumnName = readColumnIdentifier(); - // new column type ignored. RENAME and MODIFY are - // a single command in MySQL but two different commands in H2. - parseColumnForTable(newColumnName, column.isNullable()); - AlterTableRenameColumn command = new AlterTableRenameColumn(session); - command.setTable(table); - command.setColumn(column); - command.setNewColumnName(newColumnName); - return command; - } else if (readIf("MODIFY")) { - // MySQL compatibility - readIf("COLUMN"); - String columnName = readColumnIdentifier(); - Column column = table.getColumn(columnName); - return parseAlterTableAlterColumnType(table, columnName, column); - } else if (readIf("ALTER")) { + } + if (mode.alterTableModifyColumn && readIf("MODIFY")) { + // MySQL compatibility (optional) readIf("COLUMN"); - String columnName = readColumnIdentifier(); - Column column = table.getColumn(columnName); - if (readIf("RENAME")) { - read("TO"); - AlterTableRenameColumn command = new AlterTableRenameColumn( - session); - command.setTable(table); - command.setColumn(column); - String newName = readColumnIdentifier(); - command.setNewColumnName(newName); - return command; - } else if (readIf("DROP")) { - // PostgreSQL compatibility - if (readIf("DEFAULT")) { - AlterTableAlterColumn command = new AlterTableAlterColumn( - session, table.getSchema()); - command.setTable(table); - command.setOldColumn(column); - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT); - command.setDefaultExpression(null); - return command; - } - read("NOT"); - read("NULL"); - AlterTableAlterColumn command = new AlterTableAlterColumn( - session, table.getSchema()); - command.setTable(table); - command.setOldColumn(column); - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NULL); - return command; - } else if (readIf("TYPE")) { - // PostgreSQL compatibility - return parseAlterTableAlterColumnType(table, columnName, column); - } else if (readIf("SET")) { - if (readIf("DATA")) { - // Derby compatibility - read("TYPE"); - return parseAlterTableAlterColumnType(table, columnName, - column); - } - AlterTableAlterColumn command = new AlterTableAlterColumn( - session, table.getSchema()); - command.setTable(table); + // Oracle specifies (but will not require) an opening parenthesis + boolean hasOpeningBracket = readIf(OPEN_PAREN); + String columnName = readIdentifier(); + AlterTableAlterColumn command; + NullConstraintType nullConstraint = parseNotNullConstraint(); + switch (nullConstraint) { + case NULL_IS_ALLOWED: + case NULL_IS_NOT_ALLOWED: + command = new AlterTableAlterColumn(session, schema); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists, false); command.setOldColumn(column); - if (readIf("NULL")) { - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NULL); - return command; - } else if (readIf("NOT")) { - read("NULL"); + if (nullConstraint == NullConstraintType.NULL_IS_ALLOWED) { + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL); + } else { command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NOT_NULL); - return command; - } else if (readIf("DEFAULT")) { - Expression defaultExpression = readExpression(); - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT); - command.setDefaultExpression(defaultExpression); - return command; } - } else if (readIf("RESTART")) { - readIf("WITH"); - Expression start = readExpression(); - AlterSequence command = new AlterSequence(session, - table.getSchema()); - command.setColumn(column); - command.setStartWith(start); - return command; - } else if (readIf("SELECTIVITY")) { - AlterTableAlterColumn command = new AlterTableAlterColumn( - session, table.getSchema()); - command.setTable(table); - command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_SELECTIVITY); - command.setOldColumn(column); - command.setSelectivity(readExpression()); - return command; - } else { - return parseAlterTableAlterColumnType(table, columnName, column); + break; + case NO_NULL_CONSTRAINT_FOUND: + command = parseAlterTableAlterColumnType(schema, tableName, columnName, ifTableExists, false, + mode.getEnum() != ModeEnum.MySQL); + break; + default: + throw DbException.get(ErrorCode.UNKNOWN_MODE_1, + "Internal Error - unhandled case: " + nullConstraint.name()); } + if (hasOpeningBracket) { + read(CLOSE_PAREN); + } + return command; } throw getSyntaxError(); } - private AlterTableAlterColumn parseAlterTableAlterColumnType(Table table, - String columnName, Column column) { - Column newColumn = parseColumnForTable(columnName, column.isNullable()); - AlterTableAlterColumn command = new AlterTableAlterColumn(session, - table.getSchema()); - command.setTable(table); + private Table tableIfTableExists(Schema schema, String tableName, boolean ifTableExists) { + Table table = schema.resolveTableOrView(session, tableName); + if (table == null && !ifTableExists) { + throw getTableOrViewNotFoundDbException(schema.getName(), tableName); + } + return table; + } + + private Column columnIfTableExists(Schema schema, String tableName, + String columnName, boolean ifTableExists, boolean ifExists) { + Table table = tableIfTableExists(schema, tableName, ifTableExists); + if (table == null) { + return null; + } + return table.getColumn(columnName, ifExists); + } + + private Prepared commandIfTableExists(Schema schema, String tableName, + boolean ifTableExists, Prepared commandIfTableExists) { + return tableIfTableExists(schema, tableName, ifTableExists) == null + ? new NoOperation(session) + : commandIfTableExists; + } + + private AlterTableAlterColumn parseAlterTableAlterColumnType(Schema schema, + String tableName, String columnName, boolean ifTableExists, boolean ifExists, boolean preserveNotNull) { + Column oldColumn = columnIfTableExists(schema, tableName, columnName, ifTableExists, ifExists); + Column newColumn = parseColumnForTable(columnName, + !preserveNotNull || oldColumn == null || oldColumn.isNullable()); + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + parseAlterColumnUsingIf(command); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE); - command.setOldColumn(column); + command.setOldColumn(oldColumn); + command.setNewColumn(newColumn); + return command; + } + + private AlterTableAlterColumn parseAlterTableAlterColumnDataType(Schema schema, + String tableName, String columnName, boolean ifTableExists, boolean ifExists) { + Column oldColumn = columnIfTableExists(schema, tableName, columnName, ifTableExists, ifExists); + Column newColumn = parseColumnWithType(columnName); + if (oldColumn != null) { + if (!oldColumn.isNullable()) { + newColumn.setNullable(false); + } + if (!oldColumn.getVisible()) { + newColumn.setVisible(false); + } + Expression e = oldColumn.getDefaultExpression(); + if (e != null) { + if (oldColumn.isGenerated()) { + newColumn.setGeneratedExpression(e); + } else { + newColumn.setDefaultExpression(session, e); + } + } + e = oldColumn.getOnUpdateExpression(); + if (e != null) { + newColumn.setOnUpdateExpression(session, e); + } + Sequence s = oldColumn.getSequence(); + if (s != null) { + newColumn.setIdentityOptions(new SequenceOptions(s, newColumn.getType()), + oldColumn.isGeneratedAlways()); + } + String c = oldColumn.getComment(); + if (c != null) { + newColumn.setComment(c); + } + } + AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); + parseAlterColumnUsingIf(command); + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE); + command.setOldColumn(oldColumn); command.setNewColumn(newColumn); return command; } - private AlterTableAlterColumn parseAlterTableAddColumn(Table table) { + private AlterTableAlterColumn parseAlterTableAddColumn(String tableName, + Schema schema, boolean ifTableExists) { readIf("COLUMN"); - Schema schema = table.getSchema(); AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema); command.setType(CommandInterface.ALTER_TABLE_ADD_COLUMN); - command.setTable(table); - ArrayList columnsToAdd = New.arrayList(); - if (readIf("(")) { + command.setTableName(tableName); + command.setIfTableExists(ifTableExists); + if (readIf(OPEN_PAREN)) { command.setIfNotExists(false); do { - String columnName = readColumnIdentifier(); - Column column = parseColumnForTable(columnName, true); - columnsToAdd.add(column); - } while (readIf(",")); - read(")"); - command.setNewColumns(columnsToAdd); + parseTableColumnDefinition(command, schema, tableName, false); + } while (readIfMore()); } else { - boolean ifNotExists = readIfNoExists(); + boolean ifNotExists = readIfNotExists(); command.setIfNotExists(ifNotExists); - String columnName = readColumnIdentifier(); - Column column = parseColumnForTable(columnName, true); - columnsToAdd.add(column); - if (readIf("BEFORE")) { - command.setAddBefore(readColumnIdentifier()); - } else if (readIf("AFTER")) { - command.setAddAfter(readColumnIdentifier()); - } + parseTableColumnDefinition(command, schema, tableName, false); + parseAlterColumnUsingIf(command); + } + if (readIf("BEFORE")) { + command.setAddBefore(readIdentifier()); + } else if (readIf("AFTER")) { + command.setAddAfter(readIdentifier()); + } else if (readIf("FIRST")) { + command.setAddFirst(); } - command.setNewColumns(columnsToAdd); return command; } - private int parseAction() { - Integer result = parseCascadeOrRestrict(); + private void parseAlterColumnUsingIf(AlterTableAlterColumn command) { + if (readIf(USING)) { + command.setUsingExpression(readExpression()); + } + } + + private ConstraintActionType parseAction() { + ConstraintActionType result = parseCascadeOrRestrict(); if (result != null) { return result; } if (readIf("NO")) { read("ACTION"); - return ConstraintReferential.RESTRICT; + return ConstraintActionType.RESTRICT; } - read("SET"); - if (readIf("NULL")) { - return ConstraintReferential.SET_NULL; + read(SET); + if (readIf(NULL)) { + return ConstraintActionType.SET_NULL; } - read("DEFAULT"); - return ConstraintReferential.SET_DEFAULT; + read(DEFAULT); + return ConstraintActionType.SET_DEFAULT; } - private Integer parseCascadeOrRestrict() { + private ConstraintActionType parseCascadeOrRestrict() { if (readIf("CASCADE")) { - return ConstraintReferential.CASCADE; + return ConstraintActionType.CASCADE; } else if (readIf("RESTRICT")) { - return ConstraintReferential.RESTRICT; + return ConstraintActionType.RESTRICT; } else { return null; } } - private DefineCommand parseAlterTableAddConstraintIf(String tableName, - Schema schema) { + private DefineCommand parseTableConstraintIf(String tableName, Schema schema, boolean ifTableExists) { String constraintName = null, comment = null; boolean ifNotExists = false; - boolean allowIndexDefinition = database.getMode().indexDefinitionInCreateTable; - if (readIf("CONSTRAINT")) { - ifNotExists = readIfNoExists(); + if (readIf(CONSTRAINT)) { + ifNotExists = readIfNotExists(); constraintName = readIdentifierWithSchema(schema.getName()); checkSchema(schema); comment = readCommentIf(); - allowIndexDefinition = true; } - if (readIf("PRIMARY")) { - read("KEY"); - AlterTableAddConstraint command = new AlterTableAddConstraint( - session, schema, ifNotExists); - command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY); - command.setComment(comment); - command.setConstraintName(constraintName); - command.setTableName(tableName); + AlterTableAddConstraint command; + switch (currentTokenType) { + case PRIMARY: + read(); + read(KEY); + command = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY, ifNotExists); if (readIf("HASH")) { command.setPrimaryKeyHash(true); } - read("("); + read(OPEN_PAREN); command.setIndexColumns(parseIndexColumnList()); if (readIf("INDEX")) { String indexName = readIdentifierWithSchema(); command.setIndex(getSchema().findIndex(session, indexName)); } - return command; - } else if (allowIndexDefinition && (isToken("INDEX") || isToken("KEY"))) { - // MySQL - // need to read ahead, as it could be a column name - int start = lastParseIndex; + break; + case UNIQUE: read(); - if (DataType.getTypeByName(currentToken) != null) { - // known data type - parseIndex = start; - read(); - return null; - } - CreateIndex command = new CreateIndex(session, schema); - command.setComment(comment); - command.setTableName(tableName); - if (!readIf("(")) { - command.setIndexName(readUniqueIdentifier()); - read("("); - } - command.setIndexColumns(parseIndexColumnList()); // MySQL compatibility - if (readIf("USING")) { - read("BTREE"); + boolean compatibility = database.getMode().indexDefinitionInCreateTable; + if (compatibility) { + if (!readIf(KEY)) { + readIf("INDEX"); + } + if (!isToken(OPEN_PAREN)) { + constraintName = readIdentifier(); + } } - return command; - } - AlterTableAddConstraint command; - if (readIf("CHECK")) { - command = new AlterTableAddConstraint(session, schema, ifNotExists); - command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK); - command.setCheckExpression(readExpression()); - } else if (readIf("UNIQUE")) { - readIf("KEY"); - readIf("INDEX"); - command = new AlterTableAddConstraint(session, schema, ifNotExists); - command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE); - if (!readIf("(")) { - constraintName = readUniqueIdentifier(); - read("("); + read(OPEN_PAREN); + command = new AlterTableAddConstraint(session, schema, CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE, + ifNotExists); + if (readIf(VALUE)) { + read(CLOSE_PAREN); + command.setIndexColumns(null); + } else { + command.setIndexColumns(parseIndexColumnList()); } - command.setIndexColumns(parseIndexColumnList()); if (readIf("INDEX")) { String indexName = readIdentifierWithSchema(); command.setIndex(getSchema().findIndex(session, indexName)); } - // MySQL compatibility - if (readIf("USING")) { + if (compatibility && readIf(USING)) { read("BTREE"); } - } else if (readIf("FOREIGN")) { - command = new AlterTableAddConstraint(session, schema, ifNotExists); - command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL); - read("KEY"); - read("("); + break; + case FOREIGN: + read(); + command = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL, ifNotExists); + read(KEY); + read(OPEN_PAREN); command.setIndexColumns(parseIndexColumnList()); if (readIf("INDEX")) { String indexName = readIdentifierWithSchema(); @@ -5655,19 +9136,60 @@ private DefineCommand parseAlterTableAddConstraintIf(String tableName, } read("REFERENCES"); parseReferences(command, schema, tableName); - } else { - if (constraintName != null) { + break; + case CHECK: + read(); + command = new AlterTableAddConstraint(session, schema, CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK, + ifNotExists); + command.setCheckExpression(readExpression()); + break; + default: + if (constraintName == null) { + Mode mode = database.getMode(); + if (mode.indexDefinitionInCreateTable) { + int start = tokenIndex; + if (readIf(KEY) || readIf("INDEX")) { + // MySQL + // need to read ahead, as it could be a column name + if (DataType.getTypeByName(currentToken, mode) == null) { + CreateIndex createIndex = new CreateIndex(session, schema); + createIndex.setComment(comment); + createIndex.setTableName(tableName); + createIndex.setIfTableExists(ifTableExists); + if (!readIf(OPEN_PAREN)) { + createIndex.setIndexName(readIdentifier()); + read(OPEN_PAREN); + } + createIndex.setIndexColumns(parseIndexColumnList()); + // MySQL compatibility + if (readIf(USING)) { + read("BTREE"); + } + return createIndex; + } else { + // known data type + setTokenIndex(start); + } + } + } + return null; + } else { + if (expectedList != null) { + addMultipleExpected(PRIMARY, UNIQUE, FOREIGN, CHECK); + } throw getSyntaxError(); } - return null; } - if (readIf("NOCHECK")) { - command.setCheckExisting(false); - } else { - readIf("CHECK"); - command.setCheckExisting(true); + if (command.getType() != CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY) { + if (readIf("NOCHECK")) { + command.setCheckExisting(false); + } else { + readIf(CHECK); + command.setCheckExisting(true); + } } command.setTableName(tableName); + command.setIfTableExists(ifTableExists); command.setConstraintName(constraintName); command.setComment(comment); return command; @@ -5675,13 +9197,13 @@ private DefineCommand parseAlterTableAddConstraintIf(String tableName, private void parseReferences(AlterTableAddConstraint command, Schema schema, String tableName) { - if (readIf("(")) { + if (readIf(OPEN_PAREN)) { command.setRefTableName(schema, tableName); command.setRefIndexColumns(parseIndexColumnList()); } else { String refTableName = readIdentifierWithSchema(schema.getName()); command.setRefTableName(getSchema(), refTableName); - if (readIf("(")) { + if (readIf(OPEN_PAREN)) { command.setRefIndexColumns(parseIndexColumnList()); } } @@ -5689,7 +9211,7 @@ private void parseReferences(AlterTableAddConstraint command, String indexName = readIdentifierWithSchema(); command.setRefIndex(getSchema().findIndex(session, indexName)); } - while (readIf("ON")) { + while (readIf(ON)) { if (readIf("DELETE")) { command.setDeleteAction(parseAction()); } else { @@ -5697,7 +9219,7 @@ private void parseReferences(AlterTableAddConstraint command, command.setUpdateAction(parseAction()); } } - if (readIf("NOT")) { + if (readIf(NOT)) { read("DEFERRABLE"); } else { readIf("DEFERRABLE"); @@ -5706,8 +9228,8 @@ private void parseReferences(AlterTableAddConstraint command, private CreateLinkedTable parseCreateLinkedTable(boolean temp, boolean globalTemp, boolean force) { - read("TABLE"); - boolean ifNotExists = readIfNoExists(); + read(TABLE); + boolean ifNotExists = readIfNotExists(); String tableName = readIdentifierWithSchema(); CreateLinkedTable command = new CreateLinkedTable(session, getSchema()); command.setTemporary(temp); @@ -5716,34 +9238,45 @@ private CreateLinkedTable parseCreateLinkedTable(boolean temp, command.setIfNotExists(ifNotExists); command.setTableName(tableName); command.setComment(readCommentIf()); - read("("); + read(OPEN_PAREN); command.setDriver(readString()); - read(","); + read(COMMA); command.setUrl(readString()); - read(","); + read(COMMA); command.setUser(readString()); - read(","); + read(COMMA); command.setPassword(readString()); - read(","); + read(COMMA); String originalTable = readString(); - if (readIf(",")) { + if (readIf(COMMA)) { command.setOriginalSchema(originalTable); originalTable = readString(); } command.setOriginalTable(originalTable); - read(")"); + read(CLOSE_PAREN); if (readIf("EMIT")) { read("UPDATES"); command.setEmitUpdates(true); } else if (readIf("READONLY")) { command.setReadOnly(true); } + if (readIf("FETCH_SIZE")) { + command.setFetchSize(readNonNegativeInt()); + } + if(readIf("AUTOCOMMIT")){ + if(readIf("ON")) { + command.setAutoCommit(true); + } + else if(readIf("OFF")){ + command.setAutoCommit(false); + } + } return command; } private CreateTable parseCreateTable(boolean temp, boolean globalTemp, boolean persistIndexes) { - boolean ifNotExists = readIfNoExists(); + boolean ifNotExists = readIfNotExists(); String tableName = readIdentifierWithSchema(); if (temp && globalTemp && equalsToken("SESSION", schemaName)) { // support weird syntax: declare global temporary table session.xy @@ -5759,130 +9292,24 @@ private CreateTable parseCreateTable(boolean temp, boolean globalTemp, command.setIfNotExists(ifNotExists); command.setTableName(tableName); command.setComment(readCommentIf()); - if (readIf("(")) { - if (!readIf(")")) { + if (readIf(OPEN_PAREN)) { + if (!readIf(CLOSE_PAREN)) { do { - DefineCommand c = parseAlterTableAddConstraintIf(tableName, - schema); - if (c != null) { - command.addConstraintCommand(c); - } else { - String columnName = readColumnIdentifier(); - Column column = parseColumnForTable(columnName, true); - if (column.isAutoIncrement() && column.isPrimaryKey()) { - column.setPrimaryKey(false); - IndexColumn[] cols = { new IndexColumn() }; - cols[0].columnName = column.getName(); - AlterTableAddConstraint pk = new AlterTableAddConstraint( - session, schema, false); - pk.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY); - pk.setTableName(tableName); - pk.setIndexColumns(cols); - command.addConstraintCommand(pk); - } - command.addColumn(column); - String constraintName = null; - if (readIf("CONSTRAINT")) { - constraintName = readColumnIdentifier(); - } - if (readIf("PRIMARY")) { - read("KEY"); - boolean hash = readIf("HASH"); - IndexColumn[] cols = { new IndexColumn() }; - cols[0].columnName = column.getName(); - AlterTableAddConstraint pk = new AlterTableAddConstraint( - session, schema, false); - pk.setPrimaryKeyHash(hash); - pk.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY); - pk.setTableName(tableName); - pk.setIndexColumns(cols); - command.addConstraintCommand(pk); - if (readIf("AUTO_INCREMENT")) { - parseAutoIncrement(column); - } - } else if (readIf("UNIQUE")) { - AlterTableAddConstraint unique = new AlterTableAddConstraint( - session, schema, false); - unique.setConstraintName(constraintName); - unique.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE); - IndexColumn[] cols = { new IndexColumn() }; - cols[0].columnName = columnName; - unique.setIndexColumns(cols); - unique.setTableName(tableName); - command.addConstraintCommand(unique); - } - if (readIf("NOT")) { - read("NULL"); - column.setNullable(false); - } else { - readIf("NULL"); - } - if (readIf("CHECK")) { - Expression expr = readExpression(); - column.addCheckConstraint(session, expr); - } - if (readIf("REFERENCES")) { - AlterTableAddConstraint ref = new AlterTableAddConstraint( - session, schema, false); - ref.setConstraintName(constraintName); - ref.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL); - IndexColumn[] cols = { new IndexColumn() }; - cols[0].columnName = columnName; - ref.setIndexColumns(cols); - ref.setTableName(tableName); - parseReferences(ref, schema, tableName); - command.addConstraintCommand(ref); - } - } + parseTableColumnDefinition(command, schema, tableName, true); } while (readIfMore()); } } - // Allows "COMMENT='comment'" in DDL statements (MySQL syntax) - if (readIf("COMMENT")) { - if (readIf("=")) { - // read the complete string comment, but nothing with it for now - readString(); - } + if (database.getMode().getEnum() == ModeEnum.MySQL) { + parseCreateTableMySQLTableOptions(command); } if (readIf("ENGINE")) { - if (readIf("=")) { - // map MySQL engine types onto H2 behavior - String tableEngine = readUniqueIdentifier(); - if ("InnoDb".equalsIgnoreCase(tableEngine)) { - // ok - } else if (!"MyISAM".equalsIgnoreCase(tableEngine)) { - throw DbException.getUnsupportedException(tableEngine); - } - } else { - command.setTableEngine(readUniqueIdentifier()); - if (readIf("WITH")) { - ArrayList tableEngineParams = New.arrayList(); - do { - tableEngineParams.add(readUniqueIdentifier()); - } while (readIf(",")); - command.setTableEngineParams(tableEngineParams); - } - } - } else if (database.getSettings().defaultTableEngine != null) { - command.setTableEngine(database.getSettings().defaultTableEngine); - } - // MySQL compatibility - if (readIf("AUTO_INCREMENT")) { - read("="); - if (currentTokenType != VALUE || - currentValue.getType() != Value.INT) { - throw DbException.getSyntaxError(sqlCommand, parseIndex, - "integer"); - } - read(); + command.setTableEngine(readIdentifier()); } - readIf("DEFAULT"); - if (readIf("CHARSET")) { - read("="); - read("UTF8"); + if (readIf(WITH)) { + command.setTableEngineParams(readTableEngineParams()); } if (temp) { - if (readIf("ON")) { + if (readIf(ON)) { read("COMMIT"); if (readIf("DROP")) { command.setOnCommitDrop(); @@ -5890,7 +9317,7 @@ private CreateTable parseCreateTable(boolean temp, boolean globalTemp, read("ROWS"); command.setOnCommitTruncate(); } - } else if (readIf("NOT")) { + } else if (readIf(NOT)) { if (readIf("PERSISTENT")) { command.setPersistData(false); } else { @@ -5900,25 +9327,268 @@ private CreateTable parseCreateTable(boolean temp, boolean globalTemp, if (readIf("TRANSACTIONAL")) { command.setTransactional(true); } - } else if (!persistIndexes && readIf("NOT")) { + } else if (!persistIndexes && readIf(NOT)) { read("PERSISTENT"); command.setPersistData(false); } if (readIf("HIDDEN")) { command.setHidden(true); } - if (readIf("AS")) { - if (readIf("SORTED")) { - command.setSortedInsertMode(true); + if (readIf(AS)) { + readIf("SORTED"); + command.setQuery(parseQuery()); + if (readIf(WITH)) { + command.setWithNoData(readIf("NO")); + read("DATA"); + } + } + return command; + } + + private void parseTableColumnDefinition(CommandWithColumns command, Schema schema, String tableName, + boolean forCreateTable) { + DefineCommand c = parseTableConstraintIf(tableName, schema, false); + if (c != null) { + command.addConstraintCommand(c); + return; + } + String columnName = readIdentifier(); + if (forCreateTable && (currentTokenType == COMMA || currentTokenType == CLOSE_PAREN)) { + command.addColumn(new Column(columnName, TypeInfo.TYPE_UNKNOWN)); + return; + } + Column column = parseColumnForTable(columnName, true); + if (column.hasIdentityOptions() && column.isPrimaryKey()) { + command.addConstraintCommand(newPrimaryKeyConstraintCommand(session, schema, tableName, column)); + } + command.addColumn(column); + readColumnConstraints(command, schema, tableName, column); + } + + /** + * Create a new alter table command. + * + * @param session the session + * @param schema the schema + * @param tableName the table + * @param column the column + * @return the command + */ + public static AlterTableAddConstraint newPrimaryKeyConstraintCommand(SessionLocal session, Schema schema, + String tableName, Column column) { + column.setPrimaryKey(false); + AlterTableAddConstraint pk = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY, false); + pk.setTableName(tableName); + pk.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) }); + return pk; + } + + private void readColumnConstraints(CommandWithColumns command, Schema schema, String tableName, Column column) { + String comment = column.getComment(); + boolean hasPrimaryKey = false, hasNotNull = false; + NullConstraintType nullType; + Mode mode = database.getMode(); + for (;;) { + String constraintName; + if (readIf(CONSTRAINT)) { + constraintName = readIdentifier(); + } else if (comment == null && (comment = readCommentIf()) != null) { + // Compatibility: COMMENT may be specified appear after some constraint + column.setComment(comment); + continue; + } else { + constraintName = null; + } + if (!hasPrimaryKey && readIf(PRIMARY)) { + read(KEY); + hasPrimaryKey = true; + boolean hash = readIf("HASH"); + AlterTableAddConstraint pk = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY, false); + pk.setConstraintName(constraintName); + pk.setPrimaryKeyHash(hash); + pk.setTableName(tableName); + pk.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) }); + command.addConstraintCommand(pk); + } else if (readIf(UNIQUE)) { + AlterTableAddConstraint unique = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE, false); + unique.setConstraintName(constraintName); + unique.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) }); + unique.setTableName(tableName); + command.addConstraintCommand(unique); + } else if (!hasNotNull + && (nullType = parseNotNullConstraint()) != NullConstraintType.NO_NULL_CONSTRAINT_FOUND) { + hasNotNull = true; + if (nullType == NullConstraintType.NULL_IS_NOT_ALLOWED) { + column.setNullable(false); + } else if (nullType == NullConstraintType.NULL_IS_ALLOWED) { + if (column.isIdentity()) { + throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, column.getName()); + } + column.setNullable(true); + } + } else if (readIf(CHECK)) { + AlterTableAddConstraint check = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK, false); + check.setConstraintName(constraintName); + check.setTableName(tableName); + check.setCheckExpression(readExpression()); + command.addConstraintCommand(check); + } else if (readIf("REFERENCES")) { + AlterTableAddConstraint ref = new AlterTableAddConstraint(session, schema, + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL, false); + ref.setConstraintName(constraintName); + ref.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) }); + ref.setTableName(tableName); + parseReferences(ref, schema, tableName); + command.addConstraintCommand(ref); + } else if (constraintName == null) { + if (column.getIdentityOptions() != null || !parseCompatibilityIdentity(column, mode)) { + return; + } + } else { + throw getSyntaxError(); + } + } + } + + private boolean parseCompatibilityIdentity(Column column, Mode mode) { + if (mode.autoIncrementClause && readIf("AUTO_INCREMENT")) { + parseCompatibilityIdentityOptions(column); + return true; + } + if (mode.identityClause && readIf("IDENTITY")) { + parseCompatibilityIdentityOptions(column); + return true; + } + return false; + } + + private void parseCreateTableMySQLTableOptions(CreateTable command) { + boolean requireNext = false; + for (;;) { + if (readIf("AUTO_INCREMENT")) { + readIf(EQUAL); + Expression value = readExpression(); + set: { + AlterTableAddConstraint primaryKey = command.getPrimaryKey(); + if (primaryKey != null) { + for (IndexColumn ic : primaryKey.getIndexColumns()) { + String columnName = ic.columnName; + for (Column column : command.getColumns()) { + if (database.equalsIdentifiers(column.getName(), columnName)) { + SequenceOptions options = column.getIdentityOptions(); + if (options != null) { + options.setStartValue(value); + break set; + } + } + } + } + } + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "AUTO_INCREMENT PRIMARY KEY"); + } + } else if (readIf(DEFAULT)) { + if (readIf("CHARACTER")) { + read(SET); + } else { + readIf("CHARSET"); + readIf("COLLATE"); + } + readMySQLCharset(); + } else if (readIf("CHARACTER")) { + read(SET); + readMySQLCharset(); + } else if (readIf("COLLATE")) { + readMySQLCharset(); + } else if (readIf("CHARSET")) { + readMySQLCharset(); + } else if (readIf("COMMENT")) { + readIf(EQUAL); + command.setComment(readString()); + } else if (readIf("ENGINE")) { + readIf(EQUAL); + readIdentifier(); + } else if (readIf("ROW_FORMAT")) { + readIf(EQUAL); + readIdentifier(); + } else if (requireNext) { + throw getSyntaxError(); + } else { + break; } - command.setQuery(parseSelect()); + requireNext = readIf(COMMA); + } + } + + private void readMySQLCharset() { + readIf(EQUAL); + readIdentifier(); + } + + /** + * Enumeration describing null constraints + */ + private enum NullConstraintType { + NULL_IS_ALLOWED, NULL_IS_NOT_ALLOWED, NO_NULL_CONSTRAINT_FOUND + } + + private NullConstraintType parseNotNullConstraint(NullConstraintType nullConstraint) { + if (nullConstraint == NullConstraintType.NO_NULL_CONSTRAINT_FOUND) { + nullConstraint = parseNotNullConstraint(); + } + return nullConstraint; + } + + private NullConstraintType parseNotNullConstraint() { + NullConstraintType nullConstraint; + if (readIf(NOT)) { + read(NULL); + nullConstraint = NullConstraintType.NULL_IS_NOT_ALLOWED; + } else if (readIf(NULL)) { + nullConstraint = NullConstraintType.NULL_IS_ALLOWED; + } else { + return NullConstraintType.NO_NULL_CONSTRAINT_FOUND; + } + if (database.getMode().getEnum() == ModeEnum.Oracle) { + nullConstraint = parseNotNullCompatibility(nullConstraint); } - // for MySQL compatibility - if (readIf("ROW_FORMAT")) { - if (readIf("=")) { - readColumnIdentifier(); + return nullConstraint; + } + + private NullConstraintType parseNotNullCompatibility(NullConstraintType nullConstraint) { + if (readIf("ENABLE")) { + if (!readIf("VALIDATE") && readIf("NOVALIDATE")) { + // Turn off constraint, allow NULLs + nullConstraint = NullConstraintType.NULL_IS_ALLOWED; + } + } else if (readIf("DISABLE")) { + // Turn off constraint, allow NULLs + nullConstraint = NullConstraintType.NULL_IS_ALLOWED; + if (!readIf("VALIDATE")) { + readIf("NOVALIDATE"); } } + return nullConstraint; + } + + private CreateSynonym parseCreateSynonym(boolean orReplace) { + boolean ifNotExists = readIfNotExists(); + String name = readIdentifierWithSchema(); + Schema synonymSchema = getSchema(); + read(FOR); + String tableName = readIdentifierWithSchema(); + + Schema targetSchema = getSchema(); + CreateSynonym command = new CreateSynonym(session, synonymSchema); + command.setName(name); + command.setSynonymFor(tableName); + command.setSynonymForSchema(targetSchema); + command.setComment(readCommentIf()); + command.setIfNotExists(ifNotExists); + command.setOrReplace(orReplace); return command; } @@ -5947,34 +9617,31 @@ private static int getCompareType(int tokenType) { * Add double quotes around an identifier if required. * * @param s the identifier + * @param sqlFlags formatting flags * @return the quoted identifier */ - public static String quoteIdentifier(String s) { - if (s == null || s.length() == 0) { + public static String quoteIdentifier(String s, int sqlFlags) { + if (s == null) { return "\"\""; } - char c = s.charAt(0); - // lowercase a-z is quoted as well - if ((!Character.isLetter(c) && c != '_') || Character.isLowerCase(c)) { - return StringUtils.quoteIdentifier(s); - } - for (int i = 1, length = s.length(); i < length; i++) { - c = s.charAt(i); - if ((!Character.isLetterOrDigit(c) && c != '_') || - Character.isLowerCase(c)) { - return StringUtils.quoteIdentifier(s); - } - } - if (isKeyword(s, true)) { - return StringUtils.quoteIdentifier(s); + if ((sqlFlags & HasSQL.QUOTE_ONLY_WHEN_REQUIRED) != 0 && ParserUtil.isSimpleIdentifier(s, false, false)) { + return s; } - return s; + return StringUtils.quoteIdentifier(s); + } + + public void setLiteralsChecked(boolean literalsChecked) { + this.literalsChecked = literalsChecked; } public void setRightsChecked(boolean rightsChecked) { this.rightsChecked = rightsChecked; } + public void setSuppliedParameters(ArrayList suppliedParameters) { + this.suppliedParameters = suppliedParameters; + } + /** * Parse a SQL code snippet that represents an expression. * @@ -5982,12 +9649,30 @@ public void setRightsChecked(boolean rightsChecked) { * @return the expression object */ public Expression parseExpression(String sql) { - parameters = New.arrayList(); - initialize(sql); + parameters = Utils.newSmallArrayList(); + initialize(sql, null, false); read(); return readExpression(); } + /** + * Parse a SQL code snippet that represents an expression for a domain constraint. + * + * @param sql the code snippet + * @return the expression object + */ + public Expression parseDomainConstraintExpression(String sql) { + parameters = Utils.newSmallArrayList(); + initialize(sql, null, false); + read(); + try { + parseDomainConstraint = true; + return readExpression(); + } finally { + parseDomainConstraint = false; + } + } + /** * Parse a SQL code snippet that represents a table name. * @@ -5995,9 +9680,70 @@ public Expression parseExpression(String sql) { * @return the table object */ public Table parseTableName(String sql) { - parameters = New.arrayList(); - initialize(sql); + parameters = Utils.newSmallArrayList(); + initialize(sql, null, false); read(); return readTableOrView(); } + + /** + * Parses a list of column names or numbers in parentheses. + * + * @param sql the source SQL + * @param offset the initial offset + * @return the array of column names ({@code String[]}) or numbers + * ({@code int[]}) + * @throws DbException on syntax error + */ + public Object parseColumnList(String sql, int offset) { + initialize(sql, null, true); + for (int i = 0, l = tokens.size(); i < l; i++) { + if (tokens.get(i).start() >= offset) { + setTokenIndex(i); + break; + } + } + read(OPEN_PAREN); + if (readIf(CLOSE_PAREN)) { + return Utils.EMPTY_INT_ARRAY; + } + if (isIdentifier()) { + ArrayList list = Utils.newSmallArrayList(); + do { + if (!isIdentifier()) { + throw getSyntaxError(); + } + list.add(currentToken); + read(); + } while (readIfMore()); + return list.toArray(new String[0]); + } else if (currentTokenType == LITERAL) { + ArrayList list = Utils.newSmallArrayList(); + do { + list.add(readInt()); + } while (readIfMore()); + int count = list.size(); + int[] array = new int[count]; + for (int i = 0; i < count; i++) { + array[i] = list.get(i); + } + return array; + } else { + throw getSyntaxError(); + } + } + + /** + * Returns the last parse index. + * + * @return the last parse index + */ + public int getLastParseIndex() { + return token.start(); + } + + @Override + public String toString() { + return StringUtils.addAsterisk(sqlCommand, token.start()); + } } diff --git a/h2/src/main/org/h2/command/Prepared.java b/h2/src/main/org/h2/command/Prepared.java index 89b0693a7f..f9a88835d9 100644 --- a/h2/src/main/org/h2/command/Prepared.java +++ b/h2/src/main/org/h2/command/Prepared.java @@ -1,22 +1,25 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command; import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.Parameter; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.result.ResultInterface; -import org.h2.util.StatementBuilder; -import org.h2.value.Value; +import org.h2.table.TableView; +import org.h2.util.HasSQL; /** * A prepared statement. @@ -26,13 +29,18 @@ public abstract class Prepared { /** * The session. */ - protected Session session; + protected SessionLocal session; /** * The SQL string. */ protected String sqlStatement; + /** + * The SQL tokens. + */ + protected ArrayList sqlTokens; + /** * Whether to create a new object (for indexes). */ @@ -52,16 +60,26 @@ public abstract class Prepared { private long modificationMetaId; private Command command; - private int objectId; - private int currentRowNumber; + /** + * Used to preserve object identities on database startup. {@code 0} if + * object is not stored, {@code -1} if object is stored and its ID is + * already read, {@code >0} if object is stored and its id is not yet read. + */ + private int persistedObjectId; + private long currentRowNumber; private int rowScanCount; + /** + * Common table expressions (CTE) in queries require us to create temporary views, + * which need to be cleaned up once a command is done executing. + */ + private List cteCleanups; /** * Create a new object. * * @param session the session */ - public Prepared(Session session) { + public Prepared(SessionLocal session) { this.session = session; modificationMetaId = session.getDatabase().getModificationMetaId(); } @@ -158,9 +176,13 @@ public ArrayList getParameters() { * @throws DbException if any parameter has not been set */ protected void checkParameters() { + if (persistedObjectId < 0) { + // restore original persistedObjectId on Command re-run + // i.e. due to concurrent update + persistedObjectId = ~persistedObjectId; + } if (parameters != null) { - for (int i = 0, size = parameters.size(); i < size; i++) { - Parameter param = parameters.get(i); + for (Parameter param : parameters) { param.checkSet(); } } @@ -197,7 +219,7 @@ public void prepare() { * @return the update count * @throws DbException if it is a query */ - public int update() { + public long update() { throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_QUERY); } @@ -208,7 +230,8 @@ public int update() { * @return the result set * @throws DbException if it is not a query */ - public ResultInterface query(int maxrows) { + @SuppressWarnings("unused") + public ResultInterface query(long maxrows) { throw DbException.get(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY); } @@ -216,9 +239,11 @@ public ResultInterface query(int maxrows) { * Set the SQL statement. * * @param sql the SQL statement + * @param sqlTokens the SQL tokens */ - public void setSQL(String sql) { + public final void setSQL(String sql, ArrayList sqlTokens) { this.sqlStatement = sql; + this.sqlTokens = sqlTokens; } /** @@ -226,43 +251,56 @@ public void setSQL(String sql) { * * @return the SQL statement */ - public String getSQL() { + public final String getSQL() { return sqlStatement; } + /** + * Get the SQL tokens. + * + * @return the SQL tokens + */ + public final ArrayList getSQLTokens() { + return sqlTokens; + } + /** * Get the object id to use for the database object that is created in this - * statement. This id is only set when the object is persistent. + * statement. This id is only set when the object is already persisted. * If not set, this method returns 0. * * @return the object id or 0 if not set */ - protected int getCurrentObjectId() { - return objectId; + public int getPersistedObjectId() { + int id = persistedObjectId; + return id >= 0 ? id : 0; } /** * Get the current object id, or get a new id from the database. The object - * id is used when creating new database object (CREATE statement). + * id is used when creating new database object (CREATE statement). This + * method may be called only once. * * @return the object id */ protected int getObjectId() { - int id = objectId; + int id = persistedObjectId; if (id == 0) { id = session.getDatabase().allocateObjectId(); - } else { - objectId = 0; + } else if (id < 0) { + throw DbException.getInternalError("Prepared.getObjectId() was called before"); } + persistedObjectId = ~persistedObjectId; // while negative, it can be restored later return id; } /** * Get the SQL statement with the execution plan. * + * @param sqlFlags formatting flags * @return the execution plan */ - public String getPlanSQL() { + public String getPlanSQL(int sqlFlags) { return null; } @@ -280,12 +318,12 @@ public void checkCanceled() { } /** - * Set the object id for this statement. + * Set the persisted object id for this statement. * * @param i the object id */ - public void setObjectId(int i) { - this.objectId = i; + public void setPersistedObjectId(int i) { + this.persistedObjectId = i; this.create = false; } @@ -294,7 +332,7 @@ public void setObjectId(int i) { * * @param currentSession the new session */ - public void setSession(Session currentSession) { + public void setSession(SessionLocal currentSession) { this.session = currentSession; } @@ -302,19 +340,20 @@ public void setSession(Session currentSession) { * Print information about the statement executed if info trace level is * enabled. * - * @param startTime when the statement was started + * @param startTimeNanos when the statement was started * @param rowCount the query or update row count */ - void trace(long startTime, int rowCount) { - if (session.getTrace().isInfoEnabled() && startTime > 0) { - long deltaTime = System.currentTimeMillis() - startTime; + void trace(long startTimeNanos, long rowCount) { + if (session.getTrace().isInfoEnabled() && startTimeNanos > 0) { + long deltaTimeNanos = System.nanoTime() - startTimeNanos; String params = Trace.formatParams(parameters); - session.getTrace().infoSQL(sqlStatement, params, rowCount, deltaTime); + session.getTrace().infoSQL(sqlStatement, params, rowCount, deltaTimeNanos / 1_000_000L); } - if (session.getDatabase().getQueryStatistics()) { - long deltaTime = System.currentTimeMillis() - startTime; - session.getDatabase().getQueryStatisticsData(). - update(toString(), deltaTime, rowCount); + // startTime_nanos can be zero for the command that actually turns on + // statistics + if (session.getDatabase().getQueryStatistics() && startTimeNanos != 0) { + long deltaTimeNanos = System.nanoTime() - startTimeNanos; + session.getDatabase().getQueryStatisticsData().update(toString(), deltaTimeNanos, rowCount); } } @@ -333,7 +372,7 @@ public void setPrepareAlways(boolean prepareAlways) { * * @param rowNumber the row number */ - protected void setCurrentRowNumber(int rowNumber) { + public void setCurrentRowNumber(long rowNumber) { if ((++rowScanCount & 127) == 0) { checkCanceled(); } @@ -346,7 +385,7 @@ protected void setCurrentRowNumber(int rowNumber) { * * @return the row number */ - public int getCurrentRowNumber() { + public long getCurrentRowNumber() { return currentRowNumber; } @@ -355,9 +394,8 @@ public int getCurrentRowNumber() { */ private void setProgress() { if ((currentRowNumber & 127) == 0) { - session.getDatabase().setProgress( - DatabaseEventListener.STATE_STATEMENT_PROGRESS, - sqlStatement, currentRowNumber, 0); + session.getDatabase().setProgress(DatabaseEventListener.STATE_STATEMENT_PROGRESS, sqlStatement, + currentRowNumber, 0L); } } @@ -371,38 +409,14 @@ public String toString() { return sqlStatement; } - /** - * Get the SQL snippet of the value list. - * - * @param values the value list - * @return the SQL snippet - */ - protected static String getSQL(Value[] values) { - StatementBuilder buff = new StatementBuilder(); - for (Value v : values) { - buff.appendExceptFirst(", "); - if (v != null) { - buff.append(v.getSQL()); - } - } - return buff.toString(); - } - /** * Get the SQL snippet of the expression list. * * @param list the expression list * @return the SQL snippet */ - protected static String getSQL(Expression[] list) { - StatementBuilder buff = new StatementBuilder(); - for (Expression e : list) { - buff.appendExceptFirst(", "); - if (e != null) { - buff.append(e.getSQL()); - } - } - return buff.toString(); + public static String getSimpleSQL(Expression[] list) { + return Expression.writeExpressions(new StringBuilder(), list, HasSQL.TRACE_SQL_FLAGS).toString(); } /** @@ -413,7 +427,7 @@ protected static String getSQL(Expression[] list) { * @param values the values of the row * @return the exception */ - protected DbException setRow(DbException e, int rowId, String values) { + protected DbException setRow(DbException e, long rowId, String values) { StringBuilder buff = new StringBuilder(); if (sqlStatement != null) { buff.append(sqlStatement); @@ -430,4 +444,30 @@ public boolean isCacheable() { return false; } + /** + * @return the temporary views created for CTE's. + */ + public List getCteCleanups() { + return cteCleanups; + } + + /** + * Set the temporary views created for CTE's. + * + * @param cteCleanups the temporary views + */ + public void setCteCleanups(List cteCleanups) { + this.cteCleanups = cteCleanups; + } + + public final SessionLocal getSession() { + return session; + } + + /** + * Find and collect all DbObjects, this Prepared depends on. + * + * @param dependencies collection of dependencies to populate + */ + public void collectDependencies(HashSet dependencies) {} } diff --git a/h2/src/main/org/h2/command/Token.java b/h2/src/main/org/h2/command/Token.java new file mode 100644 index 0000000000..888a7e776a --- /dev/null +++ b/h2/src/main/org/h2/command/Token.java @@ -0,0 +1,757 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command; + +import static org.h2.util.ParserUtil.IDENTIFIER; +import static org.h2.util.ParserUtil.LAST_KEYWORD; + +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueInteger; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; + +/** + * Token. + */ +public abstract class Token implements Cloneable { + + /** + * Token with parameter. + */ + static final int PARAMETER = LAST_KEYWORD + 1; + + /** + * End of input. + */ + static final int END_OF_INPUT = PARAMETER + 1; + + /** + * Token with literal. + */ + static final int LITERAL = END_OF_INPUT + 1; + + /** + * The token "=". + */ + static final int EQUAL = LITERAL + 1; + + /** + * The token ">=". + */ + static final int BIGGER_EQUAL = EQUAL + 1; + + /** + * The token ">". + */ + static final int BIGGER = BIGGER_EQUAL + 1; + + /** + * The token "<". + */ + static final int SMALLER = BIGGER + 1; + + /** + * The token "<=". + */ + static final int SMALLER_EQUAL = SMALLER + 1; + + /** + * The token "<>" or "!=". + */ + static final int NOT_EQUAL = SMALLER_EQUAL + 1; + + /** + * The token "@". + */ + static final int AT = NOT_EQUAL + 1; + + /** + * The token "-". + */ + static final int MINUS_SIGN = AT + 1; + + /** + * The token "+". + */ + static final int PLUS_SIGN = MINUS_SIGN + 1; + + /** + * The token "||". + */ + static final int CONCATENATION = PLUS_SIGN + 1; + + /** + * The token "(". + */ + static final int OPEN_PAREN = CONCATENATION + 1; + + /** + * The token ")". + */ + static final int CLOSE_PAREN = OPEN_PAREN + 1; + + /** + * The token "&&". + */ + static final int SPATIAL_INTERSECTS = CLOSE_PAREN + 1; + + /** + * The token "*". + */ + static final int ASTERISK = SPATIAL_INTERSECTS + 1; + + /** + * The token ",". + */ + static final int COMMA = ASTERISK + 1; + + /** + * The token ".". + */ + static final int DOT = COMMA + 1; + + /** + * The token "{". + */ + static final int OPEN_BRACE = DOT + 1; + + /** + * The token "}". + */ + static final int CLOSE_BRACE = OPEN_BRACE + 1; + + /** + * The token "/". + */ + static final int SLASH = CLOSE_BRACE + 1; + + /** + * The token "%". + */ + static final int PERCENT = SLASH + 1; + + /** + * The token ";". + */ + static final int SEMICOLON = PERCENT + 1; + + /** + * The token ":". + */ + static final int COLON = SEMICOLON + 1; + + /** + * The token "[". + */ + static final int OPEN_BRACKET = COLON + 1; + + /** + * The token "]". + */ + static final int CLOSE_BRACKET = OPEN_BRACKET + 1; + + /** + * The token "~". + */ + static final int TILDE = CLOSE_BRACKET + 1; + + /** + * The token "::". + */ + static final int COLON_COLON = TILDE + 1; + + /** + * The token ":=". + */ + static final int COLON_EQ = COLON_COLON + 1; + + /** + * The token "!~". + */ + static final int NOT_TILDE = COLON_EQ + 1; + + static final String[] TOKENS = { + // Unused + null, + // KEYWORD + null, + // IDENTIFIER + null, + // ALL + "ALL", + // AND + "AND", + // ANY + "ANY", + // ARRAY + "ARRAY", + // AS + "AS", + // ASYMMETRIC + "ASYMMETRIC", + // AUTHORIZATION + "AUTHORIZATION", + // BETWEEN + "BETWEEN", + // CASE + "CASE", + // CAST + "CAST", + // CHECK + "CHECK", + // CONSTRAINT + "CONSTRAINT", + // CROSS + "CROSS", + // CURRENT_CATALOG + "CURRENT_CATALOG", + // CURRENT_DATE + "CURRENT_DATE", + // CURRENT_PATH + "CURRENT_PATH", + // CURRENT_ROLE + "CURRENT_ROLE", + // CURRENT_SCHEMA + "CURRENT_SCHEMA", + // CURRENT_TIME + "CURRENT_TIME", + // CURRENT_TIMESTAMP + "CURRENT_TIMESTAMP", + // CURRENT_USER + "CURRENT_USER", + // DAY + "DAY", + // DEFAULT + "DEFAULT", + // DISTINCT + "DISTINCT", + // ELSE + "ELSE", + // END + "END", + // EXCEPT + "EXCEPT", + // EXISTS + "EXISTS", + // FALSE + "FALSE", + // FETCH + "FETCH", + // FOR + "FOR", + // FOREIGN + "FOREIGN", + // FROM + "FROM", + // FULL + "FULL", + // GROUP + "GROUP", + // HAVING + "HAVING", + // HOUR + "HOUR", + // IF + "IF", + // IN + "IN", + // INNER + "INNER", + // INTERSECT + "INTERSECT", + // INTERVAL + "INTERVAL", + // IS + "IS", + // JOIN + "JOIN", + // KEY + "KEY", + // LEFT + "LEFT", + // LIKE + "LIKE", + // LIMIT + "LIMIT", + // LOCALTIME + "LOCALTIME", + // LOCALTIMESTAMP + "LOCALTIMESTAMP", + // MINUS + "MINUS", + // MINUTE + "MINUTE", + // MONTH + "MONTH", + // NATURAL + "NATURAL", + // NOT + "NOT", + // NULL + "NULL", + // OFFSET + "OFFSET", + // ON + "ON", + // OR + "OR", + // ORDER + "ORDER", + // PRIMARY + "PRIMARY", + // QUALIFY + "QUALIFY", + // RIGHT + "RIGHT", + // ROW + "ROW", + // ROWNUM + "ROWNUM", + // SECOND + "SECOND", + // SELECT + "SELECT", + // SESSION_USER + "SESSION_USER", + // SET + "SET", + // SOME + "SOME", + // SYMMETRIC + "SYMMETRIC", + // SYSTEM_USER + "SYSTEM_USER", + // TABLE + "TABLE", + // TO + "TO", + // TRUE + "TRUE", + // UESCAPE + "UESCAPE", + // UNION + "UNION", + // UNIQUE + "UNIQUE", + // UNKNOWN + "UNKNOWN", + // USER + "USER", + // USING + "USING", + // VALUE + "VALUE", + // VALUES + "VALUES", + // WHEN + "WHEN", + // WHERE + "WHERE", + // WINDOW + "WINDOW", + // WITH + "WITH", + // YEAR + "YEAR", + // _ROWID_ + "_ROWID_", + // PARAMETER + "?", + // END_OF_INPUT + null, + // LITERAL + null, + // EQUAL + "=", + // BIGGER_EQUAL + ">=", + // BIGGER + ">", + // SMALLER + "<", + // SMALLER_EQUAL + "<=", + // NOT_EQUAL + "<>", + // AT + "@", + // MINUS_SIGN + "-", + // PLUS_SIGN + "+", + // CONCATENATION + "||", + // OPEN_PAREN + "(", + // CLOSE_PAREN + ")", + // SPATIAL_INTERSECTS + "&&", + // ASTERISK + "*", + // COMMA + ",", + // DOT + ".", + // OPEN_BRACE + "{", + // CLOSE_BRACE + "}", + // SLASH + "/", + // PERCENT + "%", + // SEMICOLON + ";", + // COLON + ":", + // OPEN_BRACKET + "[", + // CLOSE_BRACKET + "]", + // TILDE + "~", + // COLON_COLON + "::", + // COLON_EQ + ":=", + // NOT_TILDE + "!~", + // End + }; + + static class IdentifierToken extends Token { + + private String identifier; + + private final boolean quoted; + + private boolean unicode; + + IdentifierToken(int start, String identifier, boolean quoted, boolean unicode) { + super(start); + this.identifier = identifier; + this.quoted = quoted; + this.unicode = unicode; + } + + @Override + int tokenType() { + return IDENTIFIER; + } + + @Override + String asIdentifier() { + return identifier; + } + + @Override + boolean isQuoted() { + return quoted; + } + + @Override + boolean needsUnicodeConversion() { + return unicode; + } + + @Override + void convertUnicode(int uescape) { + if (unicode) { + identifier = StringUtils.decodeUnicodeStringSQL(identifier, uescape); + unicode = false; + } else { + throw DbException.getInternalError(); + } + } + + @Override + public String toString() { + return quoted ? StringUtils.quoteIdentifier(identifier) : identifier; + } + + } + + static final class KeywordToken extends Token { + + private final int type; + + KeywordToken(int start, int type) { + super(start); + this.type = type; + } + + @Override + int tokenType() { + return type; + } + + @Override + String asIdentifier() { + return TOKENS[type]; + } + + @Override + public String toString() { + return TOKENS[type]; + } + + } + + static final class KeywordOrIdentifierToken extends Token { + + private final int type; + + private final String identifier; + + KeywordOrIdentifierToken(int start, int type, String identifier) { + super(start); + this.type = type; + this.identifier = identifier; + } + + @Override + int tokenType() { + return type; + } + + @Override + String asIdentifier() { + return identifier; + } + + @Override + public String toString() { + return identifier; + } + + } + + static abstract class LiteralToken extends Token { + + Value value; + + LiteralToken(int start) { + super(start); + } + + @Override + final int tokenType() { + return LITERAL; + } + + @Override + public final String toString() { + return value(null).getTraceSQL(); + } + + } + + static final class BinaryStringToken extends LiteralToken { + + private final byte[] string; + + BinaryStringToken(int start, byte[] string) { + super(start); + this.string = string; + } + + @Override + Value value(CastDataProvider provider) { + if (value == null) { + value = ValueVarbinary.getNoCopy(string); + } + return value; + } + + } + + static final class CharacterStringToken extends LiteralToken { + + String string; + + private boolean unicode; + + CharacterStringToken(int start, String string, boolean unicode) { + super(start); + this.string = string; + this.unicode = unicode; + } + + @Override + Value value(CastDataProvider provider) { + if (value == null) { + value = ValueVarchar.get(string, provider); + } + return value; + } + + @Override + boolean needsUnicodeConversion() { + return unicode; + } + + @Override + void convertUnicode(int uescape) { + if (unicode) { + string = StringUtils.decodeUnicodeStringSQL(string, uescape); + unicode = false; + } else { + throw DbException.getInternalError(); + } + } + + } + + static final class IntegerToken extends LiteralToken { + + private final int number; + + IntegerToken(int start, int number) { + super(start); + this.number = number; + } + + @Override + Value value(CastDataProvider provider) { + if (value == null) { + value = ValueInteger.get(number); + } + return value; + } + + } + + static final class BigintToken extends LiteralToken { + + private final long number; + + BigintToken(int start, long number) { + super(start); + this.number = number; + } + + @Override + Value value(CastDataProvider provider) { + if (value == null) { + value = ValueBigint.get(number); + } + return value; + } + + } + + static final class ValueToken extends LiteralToken { + + ValueToken(int start, Value value) { + super(start); + this.value = value; + } + + @Override + Value value(CastDataProvider provider) { + return value; + } + + } + + static final class ParameterToken extends Token { + + int index; + + ParameterToken(int start, int index) { + super(start); + this.index = index; + } + + @Override + int tokenType() { + return PARAMETER; + } + + @Override + String asIdentifier() { + return "?"; + } + + int index() { + return index; + } + + @Override + public String toString() { + return index == 0 ? "?" : "?" + index; + } + + } + + static final class EndOfInputToken extends Token { + + EndOfInputToken(int start) { + super(start); + } + + @Override + int tokenType() { + return END_OF_INPUT; + } + + } + + private int start; + + Token(int start) { + this.start = start; + } + + final int start() { + return start; + } + + final void setStart(int offset) { + start = offset; + } + + final void subtractFromStart(int offset) { + start -= offset; + } + + abstract int tokenType(); + + String asIdentifier() { + return null; + } + + boolean isQuoted() { + return false; + } + + Value value(CastDataProvider provider) { + return null; + } + + boolean needsUnicodeConversion() { + return false; + } + + void convertUnicode(int uescape) { + throw DbException.getInternalError(); + } + + @Override + protected Token clone() { + try { + return (Token) super.clone(); + } catch (CloneNotSupportedException e) { + throw DbException.getInternalError(); + } + } + +} diff --git a/h2/src/main/org/h2/command/Tokenizer.java b/h2/src/main/org/h2/command/Tokenizer.java new file mode 100644 index 0000000000..f0c413e546 --- /dev/null +++ b/h2/src/main/org/h2/command/Tokenizer.java @@ -0,0 +1,1400 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command; + +import static org.h2.command.Token.ASTERISK; +import static org.h2.command.Token.AT; +import static org.h2.command.Token.BIGGER; +import static org.h2.command.Token.BIGGER_EQUAL; +import static org.h2.command.Token.CLOSE_BRACE; +import static org.h2.command.Token.CLOSE_BRACKET; +import static org.h2.command.Token.CLOSE_PAREN; +import static org.h2.command.Token.COLON; +import static org.h2.command.Token.COLON_COLON; +import static org.h2.command.Token.COLON_EQ; +import static org.h2.command.Token.COMMA; +import static org.h2.command.Token.CONCATENATION; +import static org.h2.command.Token.DOT; +import static org.h2.command.Token.EQUAL; +import static org.h2.command.Token.MINUS_SIGN; +import static org.h2.command.Token.NOT_EQUAL; +import static org.h2.command.Token.NOT_TILDE; +import static org.h2.command.Token.OPEN_BRACE; +import static org.h2.command.Token.OPEN_BRACKET; +import static org.h2.command.Token.OPEN_PAREN; +import static org.h2.command.Token.PERCENT; +import static org.h2.command.Token.PLUS_SIGN; +import static org.h2.command.Token.SEMICOLON; +import static org.h2.command.Token.SLASH; +import static org.h2.command.Token.SMALLER; +import static org.h2.command.Token.SMALLER_EQUAL; +import static org.h2.command.Token.SPATIAL_INTERSECTS; +import static org.h2.command.Token.TILDE; +import static org.h2.util.ParserUtil.ALL; +import static org.h2.util.ParserUtil.AND; +import static org.h2.util.ParserUtil.ANY; +import static org.h2.util.ParserUtil.ARRAY; +import static org.h2.util.ParserUtil.AS; +import static org.h2.util.ParserUtil.ASYMMETRIC; +import static org.h2.util.ParserUtil.AUTHORIZATION; +import static org.h2.util.ParserUtil.BETWEEN; +import static org.h2.util.ParserUtil.CASE; +import static org.h2.util.ParserUtil.CAST; +import static org.h2.util.ParserUtil.CHECK; +import static org.h2.util.ParserUtil.CONSTRAINT; +import static org.h2.util.ParserUtil.CROSS; +import static org.h2.util.ParserUtil.CURRENT_CATALOG; +import static org.h2.util.ParserUtil.CURRENT_DATE; +import static org.h2.util.ParserUtil.CURRENT_PATH; +import static org.h2.util.ParserUtil.CURRENT_ROLE; +import static org.h2.util.ParserUtil.CURRENT_SCHEMA; +import static org.h2.util.ParserUtil.CURRENT_TIME; +import static org.h2.util.ParserUtil.CURRENT_TIMESTAMP; +import static org.h2.util.ParserUtil.CURRENT_USER; +import static org.h2.util.ParserUtil.DAY; +import static org.h2.util.ParserUtil.DEFAULT; +import static org.h2.util.ParserUtil.DISTINCT; +import static org.h2.util.ParserUtil.ELSE; +import static org.h2.util.ParserUtil.END; +import static org.h2.util.ParserUtil.EXCEPT; +import static org.h2.util.ParserUtil.EXISTS; +import static org.h2.util.ParserUtil.FALSE; +import static org.h2.util.ParserUtil.FETCH; +import static org.h2.util.ParserUtil.FOR; +import static org.h2.util.ParserUtil.FOREIGN; +import static org.h2.util.ParserUtil.FROM; +import static org.h2.util.ParserUtil.FULL; +import static org.h2.util.ParserUtil.GROUP; +import static org.h2.util.ParserUtil.HAVING; +import static org.h2.util.ParserUtil.HOUR; +import static org.h2.util.ParserUtil.IDENTIFIER; +import static org.h2.util.ParserUtil.IF; +import static org.h2.util.ParserUtil.IN; +import static org.h2.util.ParserUtil.INNER; +import static org.h2.util.ParserUtil.INTERSECT; +import static org.h2.util.ParserUtil.INTERVAL; +import static org.h2.util.ParserUtil.IS; +import static org.h2.util.ParserUtil.JOIN; +import static org.h2.util.ParserUtil.KEY; +import static org.h2.util.ParserUtil.LEFT; +import static org.h2.util.ParserUtil.LIKE; +import static org.h2.util.ParserUtil.LIMIT; +import static org.h2.util.ParserUtil.LOCALTIME; +import static org.h2.util.ParserUtil.LOCALTIMESTAMP; +import static org.h2.util.ParserUtil.MINUS; +import static org.h2.util.ParserUtil.MINUTE; +import static org.h2.util.ParserUtil.MONTH; +import static org.h2.util.ParserUtil.NATURAL; +import static org.h2.util.ParserUtil.NOT; +import static org.h2.util.ParserUtil.NULL; +import static org.h2.util.ParserUtil.OFFSET; +import static org.h2.util.ParserUtil.ON; +import static org.h2.util.ParserUtil.OR; +import static org.h2.util.ParserUtil.ORDER; +import static org.h2.util.ParserUtil.PRIMARY; +import static org.h2.util.ParserUtil.QUALIFY; +import static org.h2.util.ParserUtil.RIGHT; +import static org.h2.util.ParserUtil.ROW; +import static org.h2.util.ParserUtil.ROWNUM; +import static org.h2.util.ParserUtil.SECOND; +import static org.h2.util.ParserUtil.SELECT; +import static org.h2.util.ParserUtil.SESSION_USER; +import static org.h2.util.ParserUtil.SET; +import static org.h2.util.ParserUtil.SOME; +import static org.h2.util.ParserUtil.SYMMETRIC; +import static org.h2.util.ParserUtil.SYSTEM_USER; +import static org.h2.util.ParserUtil.TABLE; +import static org.h2.util.ParserUtil.TO; +import static org.h2.util.ParserUtil.TRUE; +import static org.h2.util.ParserUtil.UESCAPE; +import static org.h2.util.ParserUtil.UNION; +import static org.h2.util.ParserUtil.UNIQUE; +import static org.h2.util.ParserUtil.UNKNOWN; +import static org.h2.util.ParserUtil.USER; +import static org.h2.util.ParserUtil.USING; +import static org.h2.util.ParserUtil.VALUE; +import static org.h2.util.ParserUtil.VALUES; +import static org.h2.util.ParserUtil.WHEN; +import static org.h2.util.ParserUtil.WHERE; +import static org.h2.util.ParserUtil.WINDOW; +import static org.h2.util.ParserUtil.WITH; +import static org.h2.util.ParserUtil.YEAR; +import static org.h2.util.ParserUtil._ROWID_; + +import java.io.ByteArrayOutputStream; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.ListIterator; + +import org.h2.api.ErrorCode; +import org.h2.engine.CastDataProvider; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.value.ValueBigint; +import org.h2.value.ValueDecfloat; +import org.h2.value.ValueNumeric; + +/** + * Tokenizer. + */ +public final class Tokenizer { + + private final CastDataProvider provider; + + private final boolean identifiersToUpper; + + private final boolean identifiersToLower; + + private final BitSet nonKeywords; + + Tokenizer(CastDataProvider provider, boolean identifiersToUpper, boolean identifiersToLower, BitSet nonKeywords) { + this.provider = provider; + this.identifiersToUpper = identifiersToUpper; + this.identifiersToLower = identifiersToLower; + this.nonKeywords = nonKeywords; + } + + ArrayList tokenize(String sql, boolean stopOnCloseParen) { + ArrayList tokens = new ArrayList<>(); + int end = sql.length() - 1; + boolean foundUnicode = false; + int lastParameter = 0; + loop: for (int i = 0; i <= end;) { + int tokenStart = i; + char c = sql.charAt(i); + Token token; + switch (c) { + case '!': + if (i < end) { + char c2 = sql.charAt(++i); + if (c2 == '=') { + token = new Token.KeywordToken(tokenStart, NOT_EQUAL); + break; + } + if (c2 == '~') { + token = new Token.KeywordToken(tokenStart, NOT_TILDE); + break; + } + } + throw DbException.getSyntaxError(sql, tokenStart); + case '"': + case '`': + i = readQuotedIdentifier(sql, end, tokenStart, i, c, false, tokens); + continue loop; + case '#': + if (provider.getMode().supportPoundSymbolForColumnNames) { + i = readIdentifier(sql, end, tokenStart, i, c, tokens); + continue loop; + } + throw DbException.getSyntaxError(sql, tokenStart); + case '$': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == '$') { + i += 2; + int stringEnd = sql.indexOf("$$", i); + if (stringEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + token = new Token.CharacterStringToken(tokenStart, sql.substring(i, stringEnd), false); + i = stringEnd + 1; + } else { + i = parseParameterIndex(sql, end, i, tokens); + lastParameter = assignParameterIndex(tokens, lastParameter); + continue loop; + } + } else { + token = new Token.ParameterToken(tokenStart, 0); + } + break; + case '%': + token = new Token.KeywordToken(tokenStart, PERCENT); + break; + case '&': + if (i < end && sql.charAt(i + 1) == '&') { + i++; + token = new Token.KeywordToken(tokenStart, SPATIAL_INTERSECTS); + break; + } + throw DbException.getSyntaxError(sql, tokenStart); + case '\'': + i = readCharacterString(sql, tokenStart, end, i, false, tokens); + continue loop; + case '(': + token = new Token.KeywordToken(tokenStart, OPEN_PAREN); + break; + case ')': + token = new Token.KeywordToken(tokenStart, CLOSE_PAREN); + if (stopOnCloseParen) { + tokens.add(token); + end = skipWhitespace(sql, end, i + 1) - 1; + break loop; + } + break; + case '*': + token = new Token.KeywordToken(tokenStart, ASTERISK); + break; + case '+': + token = new Token.KeywordToken(tokenStart, PLUS_SIGN); + break; + case ',': + token = new Token.KeywordToken(tokenStart, COMMA); + break; + case '-': + if (i < end && sql.charAt(i + 1) == '-') { + i = skipSimpleComment(sql, end, i); + continue loop; + } else { + token = new Token.KeywordToken(tokenStart, MINUS_SIGN); + } + break; + case '.': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 >= '0' && c2 <= '9') { + i = readNumeric(sql, tokenStart, end, i + 1, c2, false, false, tokens); + continue loop; + } + } + token = new Token.KeywordToken(tokenStart, DOT); + break; + case '/': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == '*') { + i = skipBracketedComment(sql, tokenStart, end, i); + continue loop; + } else if (c2 == '/') { + i = skipSimpleComment(sql, end, i); + continue loop; + } + } + token = new Token.KeywordToken(tokenStart, SLASH); + break; + case '0': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == 'X' || c2 == 'x') { + i = readHexNumber(sql, provider, tokenStart, end, i + 2, tokens); + continue loop; + } + } + //$FALL-THROUGH$ + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + i = readNumeric(sql, tokenStart, end, i + 1, c, tokens); + continue loop; + case ':': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == ':') { + i++; + token = new Token.KeywordToken(tokenStart, COLON_COLON); + break; + } else if (c2 == '=') { + i++; + token = new Token.KeywordToken(tokenStart, COLON_EQ); + break; + } + } + token = new Token.KeywordToken(tokenStart, COLON); + break; + case ';': + token = new Token.KeywordToken(tokenStart, SEMICOLON); + break; + case '<': + if (i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == '=') { + i++; + token = new Token.KeywordToken(tokenStart, SMALLER_EQUAL); + break; + } + if (c2 == '>') { + i++; + token = new Token.KeywordToken(tokenStart, NOT_EQUAL); + break; + } + } + token = new Token.KeywordToken(tokenStart, SMALLER); + break; + case '=': + token = new Token.KeywordToken(tokenStart, EQUAL); + break; + case '>': + if (i < end && sql.charAt(i + 1) == '=') { + i++; + token = new Token.KeywordToken(tokenStart, BIGGER_EQUAL); + break; + } + token = new Token.KeywordToken(tokenStart, BIGGER); + break; + case '?': { + if (i + 1 < end && sql.charAt(i + 1) == '?') { + char c3 = sql.charAt(i + 2); + if (c3 == '(') { + i += 2; + token = new Token.KeywordToken(tokenStart, OPEN_BRACKET); + break; + } + if (c3 == ')') { + i += 2; + token = new Token.KeywordToken(tokenStart, CLOSE_BRACKET); + break; + } + } + i = parseParameterIndex(sql, end, i, tokens); + lastParameter = assignParameterIndex(tokens, lastParameter); + continue loop; + } + case '@': + token = new Token.KeywordToken(tokenStart, AT); + break; + case 'A': + case 'a': + i = readA(sql, end, tokenStart, i, tokens); + continue loop; + case 'B': + case 'b': + i = readB(sql, end, tokenStart, i, tokens); + continue loop; + case 'C': + case 'c': + i = readC(sql, end, tokenStart, i, tokens); + continue loop; + case 'D': + case 'd': + i = readD(sql, end, tokenStart, i, tokens); + continue loop; + case 'E': + case 'e': + i = readE(sql, end, tokenStart, i, tokens); + continue loop; + case 'F': + case 'f': + i = readF(sql, end, tokenStart, i, tokens); + continue loop; + case 'G': + case 'g': + i = readG(sql, end, tokenStart, i, tokens); + continue loop; + case 'H': + case 'h': + i = readH(sql, end, tokenStart, i, tokens); + continue loop; + case 'I': + case 'i': + i = readI(sql, end, tokenStart, i, tokens); + continue loop; + case 'J': + case 'j': + i = readJ(sql, end, tokenStart, i, tokens); + continue loop; + case 'K': + case 'k': + i = readK(sql, end, tokenStart, i, tokens); + continue loop; + case 'L': + case 'l': + i = readL(sql, end, tokenStart, i, tokens); + continue loop; + case 'M': + case 'm': + i = readM(sql, end, tokenStart, i, tokens); + continue loop; + case 'N': + case 'n': + if (i < end && sql.charAt(i + 1) == '\'') { + i = readCharacterString(sql, tokenStart, end, i + 1, false, tokens); + } else { + i = readN(sql, end, tokenStart, i, tokens); + } + continue loop; + case 'O': + case 'o': + i = readO(sql, end, tokenStart, i, tokens); + continue loop; + case 'P': + case 'p': + i = readP(sql, end, tokenStart, i, tokens); + continue loop; + case 'Q': + case 'q': + i = readQ(sql, end, tokenStart, i, tokens); + continue loop; + case 'R': + case 'r': + i = readR(sql, end, tokenStart, i, tokens); + continue loop; + case 'S': + case 's': + i = readS(sql, end, tokenStart, i, tokens); + continue loop; + case 'T': + case 't': + i = readT(sql, end, tokenStart, i, tokens); + continue loop; + case 'U': + case 'u': + if (i + 1 < end && sql.charAt(i + 1) == '&') { + char c3 = sql.charAt(i + 2); + if (c3 == '"') { + i = readQuotedIdentifier(sql, end, tokenStart, i + 2, '"', true, tokens); + foundUnicode = true; + continue loop; + } else if (c3 == '\'') { + i = readCharacterString(sql, tokenStart, end, i + 2, true, tokens); + foundUnicode = true; + continue loop; + } + } + i = readU(sql, end, tokenStart, i, tokens); + continue loop; + case 'V': + case 'v': + i = readV(sql, end, tokenStart, i, tokens); + continue loop; + case 'W': + case 'w': + i = readW(sql, end, tokenStart, i, tokens); + continue loop; + case 'X': + case 'x': + if (i < end && sql.charAt(i + 1) == '\'') { + i = readBinaryString(sql, tokenStart, end, i + 1, tokens); + } else { + i = readIdentifier(sql, end, tokenStart, i, c, tokens); + } + continue loop; + case 'Y': + case 'y': + i = readY(sql, end, tokenStart, i, tokens); + continue loop; + case 'Z': + case 'z': + i = readIdentifier(sql, end, tokenStart, i, c, tokens); + continue loop; + case '[': + if (provider.getMode().squareBracketQuotedNames) { + int identifierEnd = sql.indexOf(']', ++i); + if (identifierEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + token = new Token.IdentifierToken(tokenStart, sql.substring(i, identifierEnd), true, false); + i = identifierEnd; + } else { + token = new Token.KeywordToken(tokenStart, OPEN_BRACKET); + } + break; + case ']': + token = new Token.KeywordToken(tokenStart, CLOSE_BRACKET); + break; + case '_': + i = read_(sql, end, tokenStart, i, tokens); + continue loop; + case '{': + token = new Token.KeywordToken(tokenStart, OPEN_BRACE); + break; + case '|': + if (i < end && sql.charAt(++i) == '|') { + token = new Token.KeywordToken(tokenStart, CONCATENATION); + break; + } + throw DbException.getSyntaxError(sql, tokenStart); + case '}': + token = new Token.KeywordToken(tokenStart, CLOSE_BRACE); + break; + case '~': + token = new Token.KeywordToken(tokenStart, TILDE); + break; + default: + if (c <= ' ') { + i++; + continue loop; + } else { + int cp = Character.isHighSurrogate(c) ? sql.codePointAt(i++) : c; + if (Character.isSpaceChar(cp)) { + continue loop; + } + if (Character.isJavaIdentifierStart(cp)) { + i = readIdentifier(sql, end, tokenStart, i, cp, tokens); + continue loop; + } + throw DbException.getSyntaxError(sql, tokenStart); + } + } + tokens.add(token); + i++; + } + if (foundUnicode) { + processUescape(sql, tokens); + } + tokens.add(new Token.EndOfInputToken(end + 1)); + return tokens; + } + + private int readIdentifier(String sql, int end, int tokenStart, int i, int cp, ArrayList tokens) { + if (cp >= Character.MIN_SUPPLEMENTARY_CODE_POINT) { + i++; + } + int endIndex = findIdentifierEnd(sql, end, i + Character.charCount(cp) - 1); + tokens.add(new Token.IdentifierToken(tokenStart, extractIdentifier(sql, tokenStart, endIndex), false, false)); + return endIndex; + } + + private int readA(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (length == 2) { + type = (sql.charAt(tokenStart + 1) & 0xffdf) == 'S' ? AS : IDENTIFIER; + } else { + if (eq("ALL", sql, tokenStart, length)) { + type = ALL; + } else if (eq("AND", sql, tokenStart, length)) { + type = AND; + } else if (eq("ANY", sql, tokenStart, length)) { + type = ANY; + } else if (eq("ARRAY", sql, tokenStart, length)) { + type = ARRAY; + } else if (eq("ASYMMETRIC", sql, tokenStart, length)) { + type = ASYMMETRIC; + } else if (eq("AUTHORIZATION", sql, tokenStart, length)) { + type = AUTHORIZATION; + } else { + type = IDENTIFIER; + } + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readB(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("BETWEEN", sql, tokenStart, length) ? BETWEEN : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readC(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("CASE", sql, tokenStart, length)) { + type = CASE; + } else if (eq("CAST", sql, tokenStart, length)) { + type = CAST; + } else if (eq("CHECK", sql, tokenStart, length)) { + type = CHECK; + } else if (eq("CONSTRAINT", sql, tokenStart, length)) { + type = CONSTRAINT; + } else if (eq("CROSS", sql, tokenStart, length)) { + type = CROSS; + } else if (length >= 12 && eq("CURRENT_", sql, tokenStart, 8)) { + type = getTokenTypeCurrent(sql, tokenStart, length); + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private static int getTokenTypeCurrent(String s, int tokenStart, int length) { + tokenStart += 8; + switch (length) { + case 12: + if (eqCurrent("CURRENT_DATE", s, tokenStart, length)) { + return CURRENT_DATE; + } else if (eqCurrent("CURRENT_PATH", s, tokenStart, length)) { + return CURRENT_PATH; + } else if (eqCurrent("CURRENT_ROLE", s, tokenStart, length)) { + return CURRENT_ROLE; + } else if (eqCurrent("CURRENT_TIME", s, tokenStart, length)) { + return CURRENT_TIME; + } else if (eqCurrent("CURRENT_USER", s, tokenStart, length)) { + return CURRENT_USER; + } + break; + case 14: + if (eqCurrent("CURRENT_SCHEMA", s, tokenStart, length)) { + return CURRENT_SCHEMA; + } + break; + case 15: + if (eqCurrent("CURRENT_CATALOG", s, tokenStart, length)) { + return CURRENT_CATALOG; + } + break; + case 17: + if (eqCurrent("CURRENT_TIMESTAMP", s, tokenStart, length)) { + return CURRENT_TIMESTAMP; + } + } + return IDENTIFIER; + } + + private static boolean eqCurrent(String expected, String s, int start, int length) { + for (int i = 8; i < length; i++) { + if (expected.charAt(i) != (s.charAt(start++) & 0xffdf)) { + return false; + } + } + return true; + } + + private int readD(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("DAY", sql, tokenStart, length)) { + type = DAY; + } else if (eq("DEFAULT", sql, tokenStart, length)) { + type = DEFAULT; + } else if (eq("DISTINCT", sql, tokenStart, length)) { + type = DISTINCT; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readE(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("ELSE", sql, tokenStart, length)) { + type = ELSE; + } else if (eq("END", sql, tokenStart, length)) { + type = END; + } else if (eq("EXCEPT", sql, tokenStart, length)) { + type = EXCEPT; + } else if (eq("EXISTS", sql, tokenStart, length)) { + type = EXISTS; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readF(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("FETCH", sql, tokenStart, length)) { + type = FETCH; + } else if (eq("FROM", sql, tokenStart, length)) { + type = FROM; + } else if (eq("FOR", sql, tokenStart, length)) { + type = FOR; + } else if (eq("FOREIGN", sql, tokenStart, length)) { + type = FOREIGN; + } else if (eq("FULL", sql, tokenStart, length)) { + type = FULL; + } else if (eq("FALSE", sql, tokenStart, length)) { + type = FALSE; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readG(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("GROUP", sql, tokenStart, length) ? GROUP : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readH(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("HAVING", sql, tokenStart, length)) { + type = HAVING; + } else if (eq("HOUR", sql, tokenStart, length)) { + type = HOUR; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readI(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (length == 2) { + switch ((sql.charAt(tokenStart + 1) & 0xffdf)) { + case 'F': + type = IF; + break; + case 'N': + type = IN; + break; + case 'S': + type = IS; + break; + default: + type = IDENTIFIER; + } + } else { + if (eq("INNER", sql, tokenStart, length)) { + type = INNER; + } else if (eq("INTERSECT", sql, tokenStart, length)) { + type = INTERSECT; + } else if (eq("INTERVAL", sql, tokenStart, length)) { + type = INTERVAL; + } else { + type = IDENTIFIER; + } + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readJ(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("JOIN", sql, tokenStart, length) ? JOIN : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readK(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("KEY", sql, tokenStart, length) ? KEY : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readL(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("LEFT", sql, tokenStart, length)) { + type = LEFT; + } else if (eq("LIMIT", sql, tokenStart, length)) { + type = provider.getMode().limit ? LIMIT : IDENTIFIER; + } else if (eq("LIKE", sql, tokenStart, length)) { + type = LIKE; + } else if (eq("LOCALTIME", sql, tokenStart, length)) { + type = LOCALTIME; + } else if (eq("LOCALTIMESTAMP", sql, tokenStart, length)) { + type = LOCALTIMESTAMP; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readM(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("MINUS", sql, tokenStart, length)) { + type = provider.getMode().minusIsExcept ? MINUS : IDENTIFIER; + } else if (eq("MINUTE", sql, tokenStart, length)) { + type = MINUTE; + } else if (eq("MONTH", sql, tokenStart, length)) { + type = MONTH; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readN(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("NOT", sql, tokenStart, length)) { + type = NOT; + } else if (eq("NATURAL", sql, tokenStart, length)) { + type = NATURAL; + } else if (eq("NULL", sql, tokenStart, length)) { + type = NULL; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readO(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (length == 2) { + switch ((sql.charAt(tokenStart + 1) & 0xffdf)) { + case 'N': + type = ON; + break; + case 'R': + type = OR; + break; + default: + type = IDENTIFIER; + } + } else { + if (eq("OFFSET", sql, tokenStart, length)) { + type = OFFSET; + } else if (eq("ORDER", sql, tokenStart, length)) { + type = ORDER; + } else { + type = IDENTIFIER; + } + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readP(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("PRIMARY", sql, tokenStart, length) ? PRIMARY : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readQ(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("QUALIFY", sql, tokenStart, length) ? QUALIFY : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readR(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("RIGHT", sql, tokenStart, length)) { + type = RIGHT; + } else if (eq("ROW", sql, tokenStart, length)) { + type = ROW; + } else if (eq("ROWNUM", sql, tokenStart, length)) { + type = ROWNUM; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readS(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("SECOND", sql, tokenStart, length)) { + type = SECOND; + } else if (eq("SELECT", sql, tokenStart, length)) { + type = SELECT; + } else if (eq("SESSION_USER", sql, tokenStart, length)) { + type = SESSION_USER; + } else if (eq("SET", sql, tokenStart, length)) { + type = SET; + } else if (eq("SOME", sql, tokenStart, length)) { + type = SOME; + } else if (eq("SYMMETRIC", sql, tokenStart, length)) { + type = SYMMETRIC; + } else if (eq("SYSTEM_USER", sql, tokenStart, length)) { + type = SYSTEM_USER; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readT(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (length == 2) { + type = (sql.charAt(tokenStart + 1) & 0xffdf) == 'O' ? TO : IDENTIFIER; + } else { + if (eq("TABLE", sql, tokenStart, length)) { + type = TABLE; + } else if (eq("TRUE", sql, tokenStart, length)) { + type = TRUE; + } else { + type = IDENTIFIER; + } + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readU(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("UESCAPE", sql, tokenStart, length)) { + type = UESCAPE; + } else if (eq("UNION", sql, tokenStart, length)) { + type = UNION; + } else if (eq("UNIQUE", sql, tokenStart, length)) { + type = UNIQUE; + } else if (eq("UNKNOWN", sql, tokenStart, length)) { + type = UNKNOWN; + } else if (eq("USER", sql, tokenStart, length)) { + type = USER; + } else if (eq("USING", sql, tokenStart, length)) { + type = USING; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readV(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("VALUE", sql, tokenStart, length)) { + type = VALUE; + } else if (eq("VALUES", sql, tokenStart, length)) { + type = VALUES; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readW(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type; + if (eq("WHEN", sql, tokenStart, length)) { + type = WHEN; + } else if (eq("WHERE", sql, tokenStart, length)) { + type = WHERE; + } else if (eq("WINDOW", sql, tokenStart, length)) { + type = WINDOW; + } else if (eq("WITH", sql, tokenStart, length)) { + type = WITH; + } else { + type = IDENTIFIER; + } + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readY(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int length = endIndex - tokenStart; + int type = eq("YEAR", sql, tokenStart, length) ? YEAR : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int read_(String sql, int end, int tokenStart, int i, ArrayList tokens) { + int endIndex = findIdentifierEnd(sql, end, i); + int type = endIndex - tokenStart == 7 && "_ROWID_".regionMatches(true, 1, sql, tokenStart + 1, 6) ? _ROWID_ + : IDENTIFIER; + return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type); + } + + private int readIdentifierOrKeyword(String sql, int tokenStart, ArrayList tokens, int endIndex, int type) { + Token token; + if (type == IDENTIFIER) { + token = new Token.IdentifierToken(tokenStart, extractIdentifier(sql, tokenStart, endIndex), false, false); + } else if (nonKeywords != null && nonKeywords.get(type)) { + token = new Token.KeywordOrIdentifierToken(tokenStart, type, extractIdentifier(sql, tokenStart, endIndex)); + } else { + token = new Token.KeywordToken(tokenStart, type); + } + tokens.add(token); + return endIndex; + } + + private static boolean eq(String expected, String s, int start, int length) { + if (length != expected.length()) { + return false; + } + for (int i = 1; i < length; i++) { + if (expected.charAt(i) != (s.charAt(++start) & 0xffdf)) { + return false; + } + } + return true; + } + + private int findIdentifierEnd(String sql, int end, int i) { + i++; + for (;;) { + int cp; + if (i > end || (!Character.isJavaIdentifierPart(cp = sql.codePointAt(i)) + && (cp != '#' || !provider.getMode().supportPoundSymbolForColumnNames))) { + break; + } + i += Character.charCount(cp); + } + return i; + } + + private String extractIdentifier(String sql, int beginIndex, int endIndex) { + return convertCase(sql.substring(beginIndex, endIndex)); + } + + private int readQuotedIdentifier(String sql, int end, int tokenStart, int i, char c, boolean unicode, + ArrayList tokens) { + int identifierEnd = sql.indexOf(c, ++i); + if (identifierEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + String s = sql.substring(i, identifierEnd); + i = identifierEnd + 1; + if (i <= end && sql.charAt(i) == c) { + StringBuilder builder = new StringBuilder(s); + do { + identifierEnd = sql.indexOf(c, i + 1); + if (identifierEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + builder.append(sql, i, identifierEnd); + i = identifierEnd + 1; + } while (i <= end && sql.charAt(i) == c); + s = builder.toString(); + } + if (c == '`') { + s = convertCase(s); + } + tokens.add(new Token.IdentifierToken(tokenStart, s, true, unicode)); + return i; + } + + private String convertCase(String s) { + if (identifiersToUpper) { + s = StringUtils.toUpperEnglish(s); + } else if (identifiersToLower) { + s = StringUtils.toLowerEnglish(s); + } + return s; + } + + private static int readBinaryString(String sql, int tokenStart, int end, int i, ArrayList tokens) { + ByteArrayOutputStream result = new ByteArrayOutputStream(); + int stringEnd; + do { + stringEnd = sql.indexOf('\'', ++i); + if (stringEnd < 0 || stringEnd < end && sql.charAt(stringEnd + 1) == '\'') { + throw DbException.getSyntaxError(sql, tokenStart); + } + StringUtils.convertHexWithSpacesToBytes(result, sql, i, stringEnd); + i = skipWhitespace(sql, end, stringEnd + 1); + } while (i <= end && sql.charAt(i) == '\''); + tokens.add(new Token.BinaryStringToken(tokenStart, result.toByteArray())); + return i; + } + + private static int readCharacterString(String sql, int tokenStart, int end, int i, boolean unicode, + ArrayList tokens) { + String s = null; + StringBuilder builder = null; + int stringEnd; + do { + stringEnd = sql.indexOf('\'', ++i); + if (stringEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + if (s == null) { + s = sql.substring(i, stringEnd); + } else { + if (builder == null) { + builder = new StringBuilder(s); + } + builder.append(sql, i, stringEnd); + } + i = stringEnd + 1; + if (i <= end && sql.charAt(i) == '\'') { + if (builder == null) { + builder = new StringBuilder(s); + } + do { + stringEnd = sql.indexOf('\'', i + 1); + if (stringEnd < 0) { + throw DbException.getSyntaxError(sql, tokenStart); + } + builder.append(sql, i, stringEnd); + i = stringEnd + 1; + } while (i <= end && sql.charAt(i) == '\''); + } + i = skipWhitespace(sql, end, i); + } while (i <= end && sql.charAt(i) == '\''); + if (builder != null) { + s = builder.toString(); + } + tokens.add(new Token.CharacterStringToken(tokenStart, s, unicode)); + return i; + } + + private static int skipWhitespace(String sql, int end, int i) { + while (i <= end) { + int cp = sql.codePointAt(i); + if (!Character.isWhitespace(cp)) { + if (cp == '/' && i < end) { + char c2 = sql.charAt(i + 1); + if (c2 == '*') { + i = skipBracketedComment(sql, i, end, i); + continue; + } else if (c2 == '/') { + i = skipSimpleComment(sql, end, i); + continue; + } + } + break; + } + i += Character.charCount(cp); + } + return i; + } + + private static int readHexNumber(String sql, CastDataProvider provider, int tokenStart, int end, int i, + ArrayList tokens) { + if (provider.getMode().zeroExLiteralsAreBinaryStrings) { + int start = i; + for (char c; i <= end + && (((c = sql.charAt(i)) >= '0' && c <= '9') || ((c &= 0xffdf) >= 'A' && c <= 'F'));) { + i++; + } + if (i <= end && Character.isJavaIdentifierPart(sql.codePointAt(i))) { + throw DbException.get(ErrorCode.HEX_STRING_WRONG_1, sql.substring(start, i + 1)); + } + tokens.add(new Token.BinaryStringToken(start, StringUtils.convertHexToBytes(sql.substring(start, i)))); + return i; + } else { + if (i > end) { + throw DbException.getSyntaxError(sql, tokenStart, "Hex number"); + } + int start = i; + long number = 0; + char c; + do { + c = sql.charAt(i); + if (c >= '0' && c <= '9') { + number = (number << 4) + c - '0'; + // Convert a-z to A-Z + } else if ((c &= 0xffdf) >= 'A' && c <= 'F') { + number = (number << 4) + c - ('A' - 10); + } else if (i == start) { + throw DbException.getSyntaxError(sql, tokenStart, "Hex number"); + } else { + break; + } + if (number > Integer.MAX_VALUE) { + while (++i <= end + && (((c = sql.charAt(i)) >= '0' && c <= '9') || ((c &= 0xffdf) >= 'A' && c <= 'F'))) { + } + return finishBigInteger(sql, tokenStart, end, i, start, i <= end && c == 'L', 16, tokens); + } + } while (++i <= end); + + boolean bigint = i <= end && c == 'L'; + if (bigint) { + i++; + } + if (i <= end && Character.isJavaIdentifierPart(sql.codePointAt(i))) { + throw DbException.getSyntaxError(sql, tokenStart, "Hex number"); + } + tokens.add(bigint ? new Token.BigintToken(start, number) : new Token.IntegerToken(start, (int) number)); + return i; + } + } + + private static int readNumeric(String sql, int tokenStart, int end, int i, char c, ArrayList tokens) { + long number = c - '0'; + for (; i <= end; i++) { + c = sql.charAt(i); + if (c < '0' || c > '9') { + switch (c) { + case '.': + return readNumeric(sql, tokenStart, end, i, c, false, false, tokens); + case 'E': + case 'e': + return readNumeric(sql, tokenStart, end, i, c, false, true, tokens); + case 'L': + case 'l': + return finishBigInteger(sql, tokenStart, end, i, tokenStart, true, 10, tokens); + } + break; + } + number = number * 10 + (c - '0'); + if (number > Integer.MAX_VALUE) { + return readNumeric(sql, tokenStart, end, i, c, true, false, tokens); + } + } + tokens.add(new Token.IntegerToken(tokenStart, (int) number)); + return i; + } + + private static int readNumeric(String sql, int tokenStart, int end, int i, char c, boolean integer, + boolean approximate, ArrayList tokens) { + if (!approximate) { + while (++i <= end) { + c = sql.charAt(i); + if (c == '.') { + integer = false; + } else if (c < '0' || c > '9') { + break; + } + } + } + if (i <= end && (c == 'E' || c == 'e')) { + integer = false; + approximate = true; + if (i == end) { + throw DbException.getSyntaxError(sql, tokenStart); + } + c = sql.charAt(++i); + if (c == '+' || c == '-') { + if (i == end) { + throw DbException.getSyntaxError(sql, tokenStart); + } + c = sql.charAt(++i); + } + if (c < '0' || c > '9') { + throw DbException.getSyntaxError(sql, tokenStart); + } + while (++i <= end && (c = sql.charAt(i)) >= '0' && c <= '9') { + // go until the first non-number + } + } + if (integer) { + return finishBigInteger(sql, tokenStart, end, i, tokenStart, i < end && c == 'L' || c == 'l', 10, tokens); + } + BigDecimal bd; + String string = sql.substring(tokenStart, i); + try { + bd = new BigDecimal(string); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, string); + } + tokens.add(new Token.ValueToken(tokenStart, approximate ? ValueDecfloat.get(bd) : ValueNumeric.get(bd))); + return i; + } + + private static int finishBigInteger(String sql, int tokenStart, int end, int i, int start, boolean asBigint, + int radix, ArrayList tokens) { + int endIndex = i; + if (asBigint) { + i++; + } + if (radix == 16 && i <= end && Character.isJavaIdentifierPart(sql.codePointAt(i))) { + throw DbException.getSyntaxError(sql, tokenStart, "Hex number"); + } + BigInteger bigInteger = new BigInteger(sql.substring(start, endIndex), radix); + Token token; + if (bigInteger.compareTo(ValueBigint.MAX_BI) > 0) { + if (asBigint) { + throw DbException.getSyntaxError(sql, tokenStart); + } + token = new Token.ValueToken(tokenStart, ValueNumeric.get(bigInteger)); + } else { + token = new Token.BigintToken(start, bigInteger.longValue()); + } + tokens.add(token); + return i; + } + + private static int skipBracketedComment(String sql, int tokenStart, int end, int i) { + i += 2; + for (int level = 1; level > 0;) { + for (;;) { + if (i >= end) { + throw DbException.getSyntaxError(sql, tokenStart); + } + char c = sql.charAt(i++); + if (c == '*') { + if (sql.charAt(i) == '/') { + level--; + i++; + break; + } + } else if (c == '/' && sql.charAt(i) == '*') { + level++; + i++; + } + } + } + return i; + } + + private static int skipSimpleComment(String sql, int end, int i) { + i += 2; + for (char c; i <= end && (c = sql.charAt(i)) != '\n' && c != '\r'; i++) { + // + } + return i; + } + + private static int parseParameterIndex(String sql, int end, int i, ArrayList tokens) { + int tokenStart = i; + long number = 0; + for (char c; ++i <= end && (c = sql.charAt(i)) >= '0' && c <= '9';) { + number = number * 10 + (c - '0'); + if (number > Integer.MAX_VALUE) { + throw DbException.getInvalidValueException("parameter index", number); + } + } + if (i > tokenStart + 1 && number == 0) { + throw DbException.getInvalidValueException("parameter index", number); + } + tokens.add(new Token.ParameterToken(tokenStart, (int) number)); + return i; + } + + private static int assignParameterIndex(ArrayList tokens, int lastParameter) { + Token.ParameterToken parameter = (Token.ParameterToken) tokens.get(tokens.size() - 1); + if (parameter.index == 0) { + if (lastParameter < 0) { + throw DbException.get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS); + } + parameter.index = ++lastParameter; + } else if (lastParameter > 0) { + throw DbException.get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS); + } else { + lastParameter = -1; + } + return lastParameter; + } + + private static void processUescape(String sql, ArrayList tokens) { + ListIterator i = tokens.listIterator(); + while (i.hasNext()) { + Token token = i.next(); + if (token.needsUnicodeConversion()) { + int uescape = '\\'; + condition: if (i.hasNext()) { + Token t2 = i.next(); + if (t2.tokenType() == UESCAPE) { + i.remove(); + if (i.hasNext()) { + Token t3 = i.next(); + i.remove(); + if (t3 instanceof Token.CharacterStringToken) { + String s = ((Token.CharacterStringToken) t3).string; + if (s.codePointCount(0, s.length()) == 1) { + int escape = s.codePointAt(0); + if (!Character.isWhitespace(escape) && (escape < '0' || escape > '9') + && (escape < 'A' || escape > 'F') && (escape < 'a' || escape > 'f')) { + switch (escape) { + default: + uescape = escape; + break condition; + case '"': + case '\'': + case '+': + } + } + } + } + } + throw DbException.getSyntaxError(sql, t2.start() + 7, "''"); + } + } + token.convertUnicode(uescape); + } + } + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomain.java b/h2/src/main/org/h2/command/ddl/AlterDomain.java new file mode 100644 index 0000000000..4b96f6828d --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomain.java @@ -0,0 +1,111 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import java.util.function.BiPredicate; + +import org.h2.api.ErrorCode; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; +import org.h2.table.Column; +import org.h2.table.Table; + +/** + * The base class for ALTER DOMAIN commands. + */ +public abstract class AlterDomain extends SchemaOwnerCommand { + + /** + * Processes all columns and domains that use the specified domain. + * + * @param session + * the session + * @param domain + * the domain to process + * @param columnProcessor + * column handler + * @param domainProcessor + * domain handler + * @param recompileExpressions + * whether processed expressions need to be recompiled + */ + public static void forAllDependencies(SessionLocal session, Domain domain, + BiPredicate columnProcessor, BiPredicate domainProcessor, + boolean recompileExpressions) { + Database db = session.getDatabase(); + for (Schema schema : db.getAllSchemasNoMeta()) { + for (Domain targetDomain : schema.getAllDomains()) { + if (targetDomain.getDomain() == domain) { + if (domainProcessor == null || domainProcessor.test(domain, targetDomain)) { + if (recompileExpressions) { + domain.prepareExpressions(session); + } + db.updateMeta(session, targetDomain); + } + } + } + for (Table t : schema.getAllTablesAndViews(null)) { + if (forTable(session, domain, columnProcessor, recompileExpressions, t)) { + db.updateMeta(session, t); + } + } + } + for (Table t : session.getLocalTempTables()) { + forTable(session, domain, columnProcessor, recompileExpressions, t); + } + } + + private static boolean forTable(SessionLocal session, Domain domain, BiPredicate columnProcessor, + boolean recompileExpressions, Table t) { + boolean modified = false; + for (Column targetColumn : t.getColumns()) { + if (targetColumn.getDomain() == domain) { + boolean m = columnProcessor == null || columnProcessor.test(domain, targetColumn); + if (m) { + if (recompileExpressions) { + targetColumn.prepareExpressions(session); + } + modified = true; + } + } + } + return modified; + } + + String domainName; + + boolean ifDomainExists; + + AlterDomain(SessionLocal session, Schema schema) { + super(session, schema); + } + + public final void setDomainName(String domainName) { + this.domainName = domainName; + } + + public final void setIfDomainExists(boolean b) { + ifDomainExists = b; + } + + @Override + final long update(Schema schema) { + Domain domain = getSchema().findDomain(domainName); + if (domain == null) { + if (ifDomainExists) { + return 0; + } + throw DbException.get(ErrorCode.DOMAIN_NOT_FOUND_1, domainName); + } + return update(schema, domain); + } + + abstract long update(Schema schema, Domain domain); + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainAddConstraint.java b/h2/src/main/org/h2/command/ddl/AlterDomainAddConstraint.java new file mode 100644 index 0000000000..d8b8bcef52 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomainAddConstraint.java @@ -0,0 +1,105 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.constraint.ConstraintDomain; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; + +/** + * This class represents the statement ALTER DOMAIN ADD CONSTRAINT + */ +public class AlterDomainAddConstraint extends AlterDomain { + + private String constraintName; + private Expression checkExpression; + private String comment; + private boolean checkExisting; + private final boolean ifNotExists; + + public AlterDomainAddConstraint(SessionLocal session, Schema schema, boolean ifNotExists) { + super(session, schema); + this.ifNotExists = ifNotExists; + } + + private String generateConstraintName(Domain domain) { + if (constraintName == null) { + constraintName = getSchema().getUniqueDomainConstraintName(session, domain); + } + return constraintName; + } + + @Override + long update(Schema schema, Domain domain) { + try { + return tryUpdate(schema, domain); + } finally { + getSchema().freeUniqueName(constraintName); + } + } + + /** + * Try to execute the statement. + * + * @param schema the schema + * @param domain the domain + * @return the update count + */ + private int tryUpdate(Schema schema, Domain domain) { + if (constraintName != null && schema.findConstraint(session, constraintName) != null) { + if (ifNotExists) { + return 0; + } + throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, constraintName); + } + Database db = session.getDatabase(); + db.lockMeta(session); + + int id = getObjectId(); + String name = generateConstraintName(domain); + ConstraintDomain constraint = new ConstraintDomain(schema, id, name, domain); + constraint.setExpression(session, checkExpression); + if (checkExisting) { + constraint.checkExistingData(session); + } + constraint.setComment(comment); + db.addSchemaObject(session, constraint); + domain.addConstraint(constraint); + return 0; + } + + public void setConstraintName(String constraintName) { + this.constraintName = constraintName; + } + + public String getConstraintName() { + return constraintName; + } + + @Override + public int getType() { + return CommandInterface.ALTER_DOMAIN_ADD_CONSTRAINT; + } + + public void setCheckExpression(Expression expression) { + this.checkExpression = expression; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public void setCheckExisting(boolean b) { + this.checkExisting = b; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainDropConstraint.java b/h2/src/main/org/h2/command/ddl/AlterDomainDropConstraint.java new file mode 100644 index 0000000000..df9efaa5a8 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomainDropConstraint.java @@ -0,0 +1,54 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.constraint.ConstraintDomain; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; + +/** + * This class represents the statement ALTER DOMAIN DROP CONSTRAINT + */ +public class AlterDomainDropConstraint extends AlterDomain { + + private String constraintName; + private final boolean ifConstraintExists; + + public AlterDomainDropConstraint(SessionLocal session, Schema schema, boolean ifConstraintExists) { + super(session, schema); + this.ifConstraintExists = ifConstraintExists; + } + + public void setConstraintName(String string) { + constraintName = string; + } + + @Override + long update(Schema schema, Domain domain) { + Constraint constraint = schema.findConstraint(session, constraintName); + if (constraint == null || constraint.getConstraintType() != Type.DOMAIN + || ((ConstraintDomain) constraint).getDomain() != domain) { + if (!ifConstraintExists) { + throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, constraintName); + } + } else { + session.getDatabase().removeSchemaObject(session, constraint); + } + return 0; + } + + @Override + public int getType() { + return CommandInterface.ALTER_DOMAIN_DROP_CONSTRAINT; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainExpressions.java b/h2/src/main/org/h2/command/ddl/AlterDomainExpressions.java new file mode 100644 index 0000000000..a5d519e379 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomainExpressions.java @@ -0,0 +1,92 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.command.CommandInterface; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; +import org.h2.table.Column; +import org.h2.table.ColumnTemplate; + +/** + * This class represents the statements + * ALTER DOMAIN SET DEFAULT + * ALTER DOMAIN DROP DEFAULT + * ALTER DOMAIN SET ON UPDATE + * ALTER DOMAIN DROP ON UPDATE + */ +public class AlterDomainExpressions extends AlterDomain { + + private final int type; + + private Expression expression; + + public AlterDomainExpressions(SessionLocal session, Schema schema, int type) { + super(session, schema); + this.type = type; + } + + public void setExpression(Expression expression) { + this.expression = expression; + } + + @Override + long update(Schema schema, Domain domain) { + switch (type) { + case CommandInterface.ALTER_DOMAIN_DEFAULT: + domain.setDefaultExpression(session, expression); + break; + case CommandInterface.ALTER_DOMAIN_ON_UPDATE: + domain.setOnUpdateExpression(session, expression); + break; + default: + throw DbException.getInternalError("type=" + type); + } + if (expression != null) { + forAllDependencies(session, domain, this::copyColumn, this::copyDomain, true); + } + session.getDatabase().updateMeta(session, domain); + return 0; + } + + private boolean copyColumn(Domain domain, Column targetColumn) { + return copyExpressions(session, domain, targetColumn); + } + + private boolean copyDomain(Domain domain, Domain targetDomain) { + return copyExpressions(session, domain, targetDomain); + } + + private boolean copyExpressions(SessionLocal session, Domain domain, ColumnTemplate targetColumn) { + switch (type) { + case CommandInterface.ALTER_DOMAIN_DEFAULT: { + Expression e = domain.getDefaultExpression(); + if (e != null && targetColumn.getDefaultExpression() == null) { + targetColumn.setDefaultExpression(session, e); + return true; + } + break; + } + case CommandInterface.ALTER_DOMAIN_ON_UPDATE: { + Expression e = domain.getOnUpdateExpression(); + if (e != null && targetColumn.getOnUpdateExpression() == null) { + targetColumn.setOnUpdateExpression(session, e); + return true; + } + } + } + return false; + } + + @Override + public int getType() { + return type; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainRename.java b/h2/src/main/org/h2/command/ddl/AlterDomainRename.java new file mode 100644 index 0000000000..f0b65e9705 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomainRename.java @@ -0,0 +1,52 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; + +/** + * This class represents the statement + * ALTER DOMAIN RENAME + */ +public class AlterDomainRename extends AlterDomain { + + private String newDomainName; + + public AlterDomainRename(SessionLocal session, Schema schema) { + super(session, schema); + } + + public void setNewDomainName(String name) { + newDomainName = name; + } + + @Override + long update(Schema schema, Domain domain) { + Domain d = schema.findDomain(newDomainName); + if (d != null) { + if (domain != d) { + throw DbException.get(ErrorCode.DOMAIN_ALREADY_EXISTS_1, newDomainName); + } + if (newDomainName.equals(domain.getName())) { + return 0; + } + } + session.getDatabase().renameSchemaObject(session, domain, newDomainName); + forAllDependencies(session, domain, null, null, false); + return 0; + } + + @Override + public int getType() { + return CommandInterface.ALTER_DOMAIN_RENAME; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainRenameConstraint.java b/h2/src/main/org/h2/command/ddl/AlterDomainRenameConstraint.java new file mode 100644 index 0000000000..3f4cfbad23 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterDomainRenameConstraint.java @@ -0,0 +1,59 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.constraint.ConstraintDomain; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; + +/** + * This class represents the statement + * ALTER DOMAIN RENAME CONSTRAINT + */ +public class AlterDomainRenameConstraint extends AlterDomain { + + private String constraintName; + private String newConstraintName; + + public AlterDomainRenameConstraint(SessionLocal session, Schema schema) { + super(session, schema); + } + + public void setConstraintName(String string) { + constraintName = string; + } + + public void setNewConstraintName(String newName) { + this.newConstraintName = newName; + } + + @Override + long update(Schema schema, Domain domain) { + Constraint constraint = getSchema().findConstraint(session, constraintName); + if (constraint == null || constraint.getConstraintType() != Type.DOMAIN + || ((ConstraintDomain) constraint).getDomain() != domain) { + throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, constraintName); + } + if (getSchema().findConstraint(session, newConstraintName) != null + || newConstraintName.equals(constraintName)) { + throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, newConstraintName); + } + session.getDatabase().renameSchemaObject(session, constraint, newConstraintName); + return 0; + } + + @Override + public int getType() { + return CommandInterface.ALTER_DOMAIN_RENAME_CONSTRAINT; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterIndexRename.java b/h2/src/main/org/h2/command/ddl/AlterIndexRename.java index e09bed3924..a09d820ce2 100644 --- a/h2/src/main/org/h2/command/ddl/AlterIndexRename.java +++ b/h2/src/main/org/h2/command/ddl/AlterIndexRename.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -9,7 +9,7 @@ import org.h2.command.CommandInterface; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.index.Index; import org.h2.message.DbException; import org.h2.schema.Schema; @@ -20,15 +20,25 @@ */ public class AlterIndexRename extends DefineCommand { - private Index oldIndex; + private boolean ifExists; + private Schema oldSchema; + private String oldIndexName; private String newIndexName; - public AlterIndexRename(Session session) { + public AlterIndexRename(SessionLocal session) { super(session); } - public void setOldIndex(Index index) { - oldIndex = index; + public void setIfExists(boolean b) { + ifExists = b; + } + + public void setOldSchema(Schema old) { + oldSchema = old; + } + + public void setOldName(String name) { + oldIndexName = name; } public void setNewName(String name) { @@ -36,16 +46,22 @@ public void setNewName(String name) { } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); - Schema schema = oldIndex.getSchema(); - if (schema.findIndex(session, newIndexName) != null || - newIndexName.equals(oldIndex.getName())) { + Index oldIndex = oldSchema.findIndex(session, oldIndexName); + if (oldIndex == null) { + if (!ifExists) { + throw DbException.get(ErrorCode.INDEX_NOT_FOUND_1, + newIndexName); + } + return 0; + } + if (oldSchema.findIndex(session, newIndexName) != null || + newIndexName.equals(oldIndexName)) { throw DbException.get(ErrorCode.INDEX_ALREADY_EXISTS_1, newIndexName); } - session.getUser().checkRight(oldIndex.getTable(), Right.ALL); + session.getUser().checkTableRight(oldIndex.getTable(), Right.SCHEMA_OWNER); db.renameSchemaObject(session, oldIndex, newIndexName); return 0; } diff --git a/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java b/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java index 13ca821104..3ce0b0fb3b 100644 --- a/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java +++ b/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java @@ -1,20 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; +import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.SchemaObject; -import java.util.ArrayList; - /** * This class represents the statement * ALTER SCHEMA RENAME @@ -24,7 +23,7 @@ public class AlterSchemaRename extends DefineCommand { private Schema oldSchema; private String newSchemaName; - public AlterSchemaRename(Session session) { + public AlterSchemaRename(SessionLocal session) { super(session); } @@ -37,23 +36,23 @@ public void setNewName(String name) { } @Override - public int update() { - session.commit(true); + public long update() { + session.getUser().checkSchemaAdmin(); Database db = session.getDatabase(); if (!oldSchema.canDrop()) { - throw DbException.get(ErrorCode.SCHEMA_CAN_NOT_BE_DROPPED_1, - oldSchema.getName()); + throw DbException.get(ErrorCode.SCHEMA_CAN_NOT_BE_DROPPED_1, oldSchema.getName()); } - if (db.findSchema(newSchemaName) != null || - newSchemaName.equals(oldSchema.getName())) { - throw DbException.get(ErrorCode.SCHEMA_ALREADY_EXISTS_1, - newSchemaName); + if (db.findSchema(newSchemaName) != null || newSchemaName.equals(oldSchema.getName())) { + throw DbException.get(ErrorCode.SCHEMA_ALREADY_EXISTS_1, newSchemaName); } - session.getUser().checkSchemaAdmin(); db.renameDatabaseObject(session, oldSchema, newSchemaName); - ArrayList all = db.getAllSchemaObjects(); - for (SchemaObject schemaObject : all) { - db.updateMeta(session, schemaObject); + ArrayList all = new ArrayList<>(); + for (Schema schema : db.getAllSchemas()) { + schema.getAll(all); + for (SchemaObject schemaObject : all) { + db.updateMeta(session, schemaObject); + } + all.clear(); } return 0; } diff --git a/h2/src/main/org/h2/command/ddl/AlterSequence.java b/h2/src/main/org/h2/command/ddl/AlterSequence.java new file mode 100644 index 0000000000..706672a7c1 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterSequence.java @@ -0,0 +1,106 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.engine.Right; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.schema.Sequence; +import org.h2.table.Column; + +/** + * This class represents the statement ALTER SEQUENCE. + */ +public class AlterSequence extends SchemaOwnerCommand { + + private boolean ifExists; + + private Column column; + + private Boolean always; + + private String sequenceName; + + private Sequence sequence; + + private SequenceOptions options; + + public AlterSequence(SessionLocal session, Schema schema) { + super(session, schema); + transactional = true; + } + + public void setIfExists(boolean b) { + ifExists = b; + } + + public void setSequenceName(String sequenceName) { + this.sequenceName = sequenceName; + } + + public void setOptions(SequenceOptions options) { + this.options = options; + } + + @Override + public boolean isTransactional() { + return true; + } + + /** + * Set the column + * + * @param column the column + * @param always whether value should be always generated, or null if "set + * generated is not specified + */ + public void setColumn(Column column, Boolean always) { + this.column = column; + this.always = always; + sequence = column.getSequence(); + if (sequence == null && !ifExists) { + throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, column.getTraceSQL()); + } + } + + @Override + long update(Schema schema) { + if (sequence == null) { + sequence = schema.findSequence(sequenceName); + if (sequence == null) { + if (!ifExists) { + throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, sequenceName); + } + return 0; + } + } + if (column != null) { + session.getUser().checkTableRight(column.getTable(), Right.SCHEMA_OWNER); + } + options.setDataType(sequence.getDataType()); + Long startValue = options.getStartValue(session); + sequence.modify( + options.getRestartValue(session, startValue != null ? startValue : sequence.getStartValue()), + startValue, + options.getMinValue(sequence, session), options.getMaxValue(sequence, session), + options.getIncrement(session), options.getCycle(), options.getCacheSize(session)); + sequence.flush(session); + if (column != null && always != null) { + column.setSequence(sequence, always); + session.getDatabase().updateMeta(session, column.getTable()); + } + return 0; + } + + @Override + public int getType() { + return CommandInterface.ALTER_SEQUENCE; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterTable.java b/h2/src/main/org/h2/command/ddl/AlterTable.java new file mode 100644 index 0000000000..2cfbd7ff85 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterTable.java @@ -0,0 +1,51 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.engine.Right; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.table.Table; + +/** + * The base class for ALTER TABLE commands. + */ +public abstract class AlterTable extends SchemaCommand { + + String tableName; + + boolean ifTableExists; + + AlterTable(SessionLocal session, Schema schema) { + super(session, schema); + } + + public final void setTableName(String tableName) { + this.tableName = tableName; + } + + public final void setIfTableExists(boolean b) { + ifTableExists = b; + } + + @Override + public final long update() { + Table table = getSchema().findTableOrView(session, tableName); + if (table == null) { + if (ifTableExists) { + return 0; + } + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); + return update(table); + } + + abstract long update(Table table); + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java b/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java index 1c74fdc000..05c425b2e0 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java @@ -1,23 +1,23 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; -import java.util.HashSet; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.constraint.Constraint; +import org.h2.constraint.ConstraintActionType; import org.h2.constraint.ConstraintCheck; import org.h2.constraint.ConstraintReferential; import org.h2.constraint.ConstraintUnique; import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.index.Index; import org.h2.index.IndexType; @@ -27,20 +27,20 @@ import org.h2.table.IndexColumn; import org.h2.table.Table; import org.h2.table.TableFilter; -import org.h2.util.New; +import org.h2.util.HasSQL; +import org.h2.value.DataType; /** * This class represents the statement * ALTER TABLE ADD CONSTRAINT */ -public class AlterTableAddConstraint extends SchemaCommand { +public class AlterTableAddConstraint extends AlterTable { - private int type; + private final int type; private String constraintName; - private String tableName; private IndexColumn[] indexColumns; - private int deleteAction; - private int updateAction; + private ConstraintActionType deleteAction = ConstraintActionType.RESTRICT; + private ConstraintActionType updateAction = ConstraintActionType.RESTRICT; private Schema refSchema; private String refTableName; private IndexColumn[] refIndexColumns; @@ -50,29 +50,38 @@ public class AlterTableAddConstraint extends SchemaCommand { private boolean checkExisting; private boolean primaryKeyHash; private final boolean ifNotExists; - private ArrayList createdIndexes = New.arrayList(); + private final ArrayList createdIndexes = new ArrayList<>(); + private ConstraintUnique createdUniqueConstraint; - public AlterTableAddConstraint(Session session, Schema schema, - boolean ifNotExists) { + public AlterTableAddConstraint(SessionLocal session, Schema schema, int type, boolean ifNotExists) { super(session, schema); this.ifNotExists = ifNotExists; + this.type = type; } private String generateConstraintName(Table table) { if (constraintName == null) { - constraintName = getSchema().getUniqueConstraintName( - session, table); + constraintName = getSchema().getUniqueConstraintName(session, table); } return constraintName; } @Override - public int update() { + public long update(Table table) { try { - return tryUpdate(); + return tryUpdate(table); } catch (DbException e) { - for (Index index : createdIndexes) { - session.getDatabase().removeSchemaObject(session, index); + try { + if (createdUniqueConstraint != null) { + Index index = createdUniqueConstraint.getIndex(); + session.getDatabase().removeSchemaObject(session, createdUniqueConstraint); + createdIndexes.remove(index); + } + for (Index index : createdIndexes) { + session.getDatabase().removeSchemaObject(session, index); + } + } catch (Throwable ex) { + e.addSuppressed(ex); } throw e; } finally { @@ -85,22 +94,25 @@ public int update() { * * @return the update count */ - private int tryUpdate() { - if (!transactional) { - session.commit(true); - } - Database db = session.getDatabase(); - Table table = getSchema().getTableOrView(session, tableName); - if (getSchema().findConstraint(session, constraintName) != null) { + private int tryUpdate(Table table) { + if (constraintName != null && getSchema().findConstraint(session, constraintName) != null) { if (ifNotExists) { return 0; } - throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, - constraintName); + /** + * 1.4.200 and older databases don't always have a unique constraint + * for each referential constraint, so these constraints are created + * and they may use the same generated name as some other not yet + * initialized constraint that may lead to a name conflict. + */ + if (!session.isQuirksMode()) { + throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, constraintName); + } + constraintName = null; } - session.getUser().checkRight(table, Right.ALL); + Database db = session.getDatabase(); db.lockMeta(session); - table.lock(session, true, true); + table.lock(session, Table.EXCLUSIVE_LOCK); Constraint constraint; switch (type) { case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY: { @@ -109,7 +121,7 @@ private int tryUpdate() { ArrayList constraints = table.getConstraints(); for (int i = 0; constraints != null && i < constraints.size(); i++) { Constraint c = constraints.get(i); - if (Constraint.PRIMARY_KEY.equals(c.getConstraintType())) { + if (Constraint.Type.PRIMARY_KEY == c.getConstraintType()) { throw DbException.get(ErrorCode.SECOND_PRIMARY_KEY); } } @@ -125,58 +137,57 @@ private int tryUpdate() { throw DbException.get(ErrorCode.SECOND_PRIMARY_KEY); } } - } - if (index == null) { + } else { IndexType indexType = IndexType.createPrimaryKey( table.isPersistIndexes(), primaryKeyHash); String indexName = table.getSchema().getUniqueIndexName( session, table, Constants.PREFIX_PRIMARY_KEY); - int id = getObjectId(); + int indexId = session.getDatabase().allocateObjectId(); try { - index = table.addIndex(session, indexName, id, - indexColumns, indexType, true, null); + index = table.addIndex(session, indexName, indexId, indexColumns, indexColumns.length, indexType, + true, null); } finally { getSchema().freeUniqueName(indexName); } } index.getIndexType().setBelongsToConstraint(true); - int constraintId = getObjectId(); + int id = getObjectId(); String name = generateConstraintName(table); ConstraintUnique pk = new ConstraintUnique(getSchema(), - constraintId, name, table, true); + id, name, table, true); pk.setColumns(indexColumns); pk.setIndex(index, true); constraint = pk; break; } - case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE: { - IndexColumn.mapColumns(indexColumns, table); - boolean isOwner = false; - if (index != null && canUseUniqueIndex(index, table, indexColumns)) { - isOwner = true; - index.getIndexType().setBelongsToConstraint(true); - } else { - index = getUniqueIndex(table, indexColumns); - if (index == null) { - index = createIndex(table, indexColumns, true); - isOwner = true; + case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE: + if (indexColumns == null) { + Column[] columns = table.getColumns(); + int columnCount = columns.length; + ArrayList list = new ArrayList<>(columnCount); + for (int i = 0; i < columnCount; i++) { + Column c = columns[i]; + if (c.getVisible()) { + IndexColumn indexColumn = new IndexColumn(c.getName()); + indexColumn.column = c; + list.add(indexColumn); + } + } + if (list.isEmpty()) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_1, "UNIQUE(VALUE) on table without columns"); } + indexColumns = list.toArray(new IndexColumn[0]); + } else { + IndexColumn.mapColumns(indexColumns, table); } - int id = getObjectId(); - String name = generateConstraintName(table); - ConstraintUnique unique = new ConstraintUnique(getSchema(), id, - name, table, false); - unique.setColumns(indexColumns); - unique.setIndex(index, isOwner); - constraint = unique; + constraint = createUniqueConstraint(table, index, indexColumns, false); break; - } case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK: { int id = getObjectId(); String name = generateConstraintName(table); ConstraintCheck check = new ConstraintCheck(getSchema(), id, name, table); - TableFilter filter = new TableFilter(session, table, null, false, null); - checkExpression.mapColumns(filter, 0); + TableFilter filter = new TableFilter(session, table, null, false, null, 0, null); + checkExpression.mapColumns(filter, 0, Expression.MAP_INITIAL); checkExpression = checkExpression.optimize(session); check.setExpression(checkExpression); check.setTableFilter(filter); @@ -187,90 +198,154 @@ private int tryUpdate() { break; } case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL: { - Table refTable = refSchema.getTableOrView(session, refTableName); - session.getUser().checkRight(refTable, Right.ALL); + Table refTable = refSchema.resolveTableOrView(session, refTableName); + if (refTable == null) { + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, refTableName); + } + if (refTable != table) { + session.getUser().checkTableRight(refTable, Right.SCHEMA_OWNER); + } if (!refTable.canReference()) { - throw DbException.getUnsupportedException("Reference " + - refTable.getSQL()); + StringBuilder builder = new StringBuilder("Reference "); + refTable.getSQL(builder, HasSQL.TRACE_SQL_FLAGS); + throw DbException.getUnsupportedException(builder.toString()); } boolean isOwner = false; IndexColumn.mapColumns(indexColumns, table); - if (index != null && canUseIndex(index, table, indexColumns, false)) { - isOwner = true; - index.getIndexType().setBelongsToConstraint(true); - } else { - if (db.isStarting()) { - // before version 1.3.176, an existing index was used: - // must do the same to avoid - // Unique index or primary key violation: - // "PRIMARY KEY ON """".PAGE_INDEX" - index = getIndex(table, indexColumns, true); - } else { - index = getIndex(table, indexColumns, false); - } - if (index == null) { - index = createIndex(table, indexColumns, false); - isOwner = true; - } - } if (refIndexColumns == null) { - Index refIdx = refTable.getPrimaryKey(); - refIndexColumns = refIdx.getIndexColumns(); + refIndexColumns = refTable.getPrimaryKey().getIndexColumns(); } else { IndexColumn.mapColumns(refIndexColumns, refTable); } - if (refIndexColumns.length != indexColumns.length) { + int columnCount = indexColumns.length; + if (refIndexColumns.length != columnCount) { throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); } - boolean isRefOwner = false; - if (refIndex != null && refIndex.getTable() == refTable && - canUseIndex(refIndex, refTable, refIndexColumns, false)) { - isRefOwner = true; - refIndex.getIndexType().setBelongsToConstraint(true); - } else { - refIndex = null; + for (IndexColumn indexColumn : indexColumns) { + Column column = indexColumn.column; + if (column.isGeneratedAlways()) { + switch (deleteAction) { + case SET_DEFAULT: + case SET_NULL: + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2, + column.getSQLWithTable(new StringBuilder(), HasSQL.TRACE_SQL_FLAGS).toString(), + "ON DELETE " + deleteAction.getSqlName()); + default: + // All other actions are allowed + } + switch (updateAction) { + case CASCADE: + case SET_DEFAULT: + case SET_NULL: + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2, + column.getSQLWithTable(new StringBuilder(), HasSQL.TRACE_SQL_FLAGS).toString(), + "ON UPDATE " + updateAction.getSqlName()); + default: + // All other actions are allowed + } + } } - if (refIndex == null) { - refIndex = getIndex(refTable, refIndexColumns, false); - if (refIndex == null) { - refIndex = createIndex(refTable, refIndexColumns, true); - isRefOwner = true; + for (int i = 0; i < columnCount; i++) { + Column column1 = indexColumns[i].column, column2 = refIndexColumns[i].column; + if (!DataType.areStableComparable(column1.getType(), column2.getType())) { + throw DbException.get(ErrorCode.UNCOMPARABLE_REFERENCED_COLUMN_2, column1.getCreateSQL(), + column2.getCreateSQL()); + } + } + ConstraintUnique unique = getUniqueConstraint(refTable, refIndexColumns); + if (unique == null && !session.isQuirksMode() + && !session.getMode().createUniqueConstraintForReferencedColumns) { + throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, IndexColumn.writeColumns( + new StringBuilder("PRIMARY KEY | UNIQUE ("), refIndexColumns, HasSQL.TRACE_SQL_FLAGS) + .append(')').toString()); + } + if (index != null && canUseIndex(index, table, indexColumns, false)) { + isOwner = true; + index.getIndexType().setBelongsToConstraint(true); + } else { + index = getIndex(table, indexColumns, false); + if (index == null) { + index = createIndex(table, indexColumns, false); + isOwner = true; } } int id = getObjectId(); String name = generateConstraintName(table); - ConstraintReferential ref = new ConstraintReferential(getSchema(), + ConstraintReferential refConstraint = new ConstraintReferential(getSchema(), id, name, table); - ref.setColumns(indexColumns); - ref.setIndex(index, isOwner); - ref.setRefTable(refTable); - ref.setRefColumns(refIndexColumns); - ref.setRefIndex(refIndex, isRefOwner); + refConstraint.setColumns(indexColumns); + refConstraint.setIndex(index, isOwner); + refConstraint.setRefTable(refTable); + refConstraint.setRefColumns(refIndexColumns); + if (unique == null) { + unique = createUniqueConstraint(refTable, refIndex, refIndexColumns, true); + addConstraintToTable(db, refTable, unique); + createdUniqueConstraint = unique; + } + refConstraint.setRefConstraint(unique); if (checkExisting) { - ref.checkExistingData(session); + refConstraint.checkExistingData(session); } - constraint = ref; - refTable.addConstraint(constraint); - ref.setDeleteAction(deleteAction); - ref.setUpdateAction(updateAction); + refTable.addConstraint(refConstraint); + refConstraint.setDeleteAction(deleteAction); + refConstraint.setUpdateAction(updateAction); + constraint = refConstraint; break; } default: - throw DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } // parent relationship is already set with addConstraint constraint.setComment(comment); + addConstraintToTable(db, table, constraint); + return 0; + } + + private ConstraintUnique createUniqueConstraint(Table table, Index index, IndexColumn[] indexColumns, + boolean forForeignKey) { + boolean isOwner = false; + if (index != null && canUseIndex(index, table, indexColumns, true)) { + isOwner = true; + index.getIndexType().setBelongsToConstraint(true); + } else { + index = getIndex(table, indexColumns, true); + if (index == null) { + index = createIndex(table, indexColumns, true); + isOwner = true; + } + } + int id; + String name; + Schema tableSchema = table.getSchema(); + if (forForeignKey) { + id = session.getDatabase().allocateObjectId(); + try { + tableSchema.reserveUniqueName(constraintName); + name = tableSchema.getUniqueConstraintName(session, table); + } finally { + tableSchema.freeUniqueName(constraintName); + } + } else { + id = getObjectId(); + name = generateConstraintName(table); + } + ConstraintUnique unique = new ConstraintUnique(tableSchema, id, name, table, false); + unique.setColumns(indexColumns); + unique.setIndex(index, isOwner); + return unique; + } + + private void addConstraintToTable(Database db, Table table, Constraint constraint) { if (table.isTemporary() && !table.isGlobalTemporary()) { session.addLocalTempTableConstraint(constraint); } else { db.addSchemaObject(session, constraint); } table.addConstraint(constraint); - return 0; } private Index createIndex(Table t, IndexColumn[] cols, boolean unique) { - int indexId = getObjectId(); + int indexId = session.getDatabase().allocateObjectId(); IndexType indexType; if (unique) { // for unique constraints @@ -284,8 +359,8 @@ private Index createIndex(Table t, IndexColumn[] cols, boolean unique) { String indexName = t.getSchema().getUniqueIndexName(session, t, prefix + "_INDEX_"); try { - Index index = t.addIndex(session, indexName, indexId, cols, - indexType, true, null); + Index index = t.addIndex(session, indexName, indexId, cols, unique ? cols.length : 0, indexType, true, + null); createdIndexes.add(index); return index; } finally { @@ -293,87 +368,66 @@ private Index createIndex(Table t, IndexColumn[] cols, boolean unique) { } } - public void setDeleteAction(int action) { + public void setDeleteAction(ConstraintActionType action) { this.deleteAction = action; } - public void setUpdateAction(int action) { + public void setUpdateAction(ConstraintActionType action) { this.updateAction = action; } - private static Index getUniqueIndex(Table t, IndexColumn[] cols) { - for (Index idx : t.getIndexes()) { - if (canUseUniqueIndex(idx, t, cols)) { - return idx; - } - } - return null; - } - - private static Index getIndex(Table t, IndexColumn[] cols, boolean moreColumnOk) { - for (Index idx : t.getIndexes()) { - if (canUseIndex(idx, t, cols, moreColumnOk)) { - return idx; + private static ConstraintUnique getUniqueConstraint(Table t, IndexColumn[] cols) { + ArrayList constraints = t.getConstraints(); + if (constraints != null) { + for (Constraint constraint : constraints) { + if (constraint.getTable() == t) { + Constraint.Type constraintType = constraint.getConstraintType(); + if (constraintType == Constraint.Type.PRIMARY_KEY || constraintType == Constraint.Type.UNIQUE) { + if (canUseIndex(constraint.getIndex(), t, cols, true)) { + return (ConstraintUnique) constraint; + } + } + } } } return null; } - private static boolean canUseUniqueIndex(Index idx, Table table, - IndexColumn[] cols) { - if (idx.getTable() != table || !idx.getIndexType().isUnique()) { - return false; - } - Column[] indexCols = idx.getColumns(); - if (indexCols.length > cols.length) { - return false; - } - HashSet set = New.hashSet(); - for (IndexColumn c : cols) { - set.add(c.column); - } - for (Column c : indexCols) { - // all columns of the index must be part of the list, - // but not all columns of the list need to be part of the index - if (!set.contains(c)) { - return false; + private static Index getIndex(Table t, IndexColumn[] cols, boolean unique) { + ArrayList indexes = t.getIndexes(); + Index index = null; + if (indexes != null) { + for (Index idx : indexes) { + if (canUseIndex(idx, t, cols, unique)) { + if (index == null || idx.getIndexColumns().length < index.getIndexColumns().length) { + index = idx; + } + } } } - return true; + return index; } - private static boolean canUseIndex(Index existingIndex, Table table, - IndexColumn[] cols, boolean moreColumnsOk) { - if (existingIndex.getTable() != table || existingIndex.getCreateSQL() == null) { - // can't use the scan index or index of another table + private static boolean canUseIndex(Index index, Table table, IndexColumn[] cols, boolean unique) { + if (index.getTable() != table) { return false; } - Column[] indexCols = existingIndex.getColumns(); - - if (moreColumnsOk) { - if (indexCols.length < cols.length) { + int allowedColumns; + if (unique) { + allowedColumns = index.getUniqueColumnCount(); + if (allowedColumns != cols.length) { return false; } - for (IndexColumn col : cols) { - // all columns of the list must be part of the index, - // but not all columns of the index need to be part of the list - // holes are not allowed (index=a,b,c & list=a,b is ok; - // but list=a,c is not) - int idx = existingIndex.getColumnIndex(col.column); - if (idx < 0 || idx >= cols.length) { - return false; - } - } } else { - if (indexCols.length != cols.length) { + if (index.getCreateSQL() == null || (allowedColumns = index.getColumns().length) != cols.length) { return false; } - for (IndexColumn col : cols) { - // all columns of the list must be part of the index - int idx = existingIndex.getColumnIndex(col.column); - if (idx < 0) { - return false; - } + } + for (IndexColumn col : cols) { + // all columns of the list must be part of the index + int i = index.getColumnIndex(col.column); + if (i < 0 || i >= allowedColumns) { + return false; } } return true; @@ -383,8 +437,8 @@ public void setConstraintName(String constraintName) { this.constraintName = constraintName; } - public void setType(int type) { - this.type = type; + public String getConstraintName() { + return constraintName; } @Override @@ -396,10 +450,6 @@ public void setCheckExpression(Expression expression) { this.checkExpression = expression; } - public void setTableName(String tableName) { - this.tableName = tableName; - } - public void setIndexColumns(IndexColumn[] indexColumns) { this.indexColumns = indexColumns; } diff --git a/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java b/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java index 90f91965cc..ebb8baa2ef 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java @@ -1,75 +1,102 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; import java.util.HashSet; - import org.h2.api.ErrorCode; +import org.h2.command.CommandContainer; import org.h2.command.CommandInterface; import org.h2.command.Parser; import org.h2.command.Prepared; import org.h2.constraint.Constraint; import org.h2.constraint.ConstraintReferential; +import org.h2.constraint.ConstraintUnique; +import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; import org.h2.index.Index; import org.h2.index.IndexType; import org.h2.message.DbException; import org.h2.result.ResultInterface; +import org.h2.result.SearchRow; import org.h2.schema.Schema; import org.h2.schema.SchemaObject; import org.h2.schema.Sequence; import org.h2.schema.TriggerObject; import org.h2.table.Column; import org.h2.table.Table; +import org.h2.table.TableBase; import org.h2.table.TableView; -import org.h2.util.New; +import org.h2.util.HasSQL; +import org.h2.util.Utils; /** * This class represents the statements * ALTER TABLE ADD, * ALTER TABLE ADD IF NOT EXISTS, * ALTER TABLE ALTER COLUMN, - * ALTER TABLE ALTER COLUMN RESTART, * ALTER TABLE ALTER COLUMN SELECTIVITY, * ALTER TABLE ALTER COLUMN SET DEFAULT, - * ALTER TABLE ALTER COLUMN SET NOT NULL, + * ALTER TABLE ALTER COLUMN DROP DEFAULT, + * ALTER TABLE ALTER COLUMN DROP EXPRESSION, * ALTER TABLE ALTER COLUMN SET NULL, + * ALTER TABLE ALTER COLUMN DROP NULL, + * ALTER TABLE ALTER COLUMN SET VISIBLE, + * ALTER TABLE ALTER COLUMN SET INVISIBLE, * ALTER TABLE DROP COLUMN */ -public class AlterTableAlterColumn extends SchemaCommand { +public class AlterTableAlterColumn extends CommandWithColumns { - private Table table; + private String tableName; private Column oldColumn; private Column newColumn; private int type; + /** + * Default or on update expression. + */ private Expression defaultExpression; private Expression newSelectivity; + private Expression usingExpression; + private boolean addFirst; private String addBefore; private String addAfter; + private boolean ifTableExists; private boolean ifNotExists; private ArrayList columnsToAdd; + private ArrayList columnsToRemove; + private boolean booleanFlag; - public AlterTableAlterColumn(Session session, Schema schema) { + public AlterTableAlterColumn(SessionLocal session, Schema schema) { super(session, schema); } - public void setTable(Table table) { - this.table = table; + public void setIfTableExists(boolean b) { + ifTableExists = b; + } + + public void setTableName(String tableName) { + this.tableName = tableName; } public void setOldColumn(Column oldColumn) { this.oldColumn = oldColumn; } + /** + * Add the column as the first column of the table. + */ + public void setAddFirst() { + addFirst = true; + } + public void setAddBefore(String before) { this.addBefore = before; } @@ -79,136 +106,219 @@ public void setAddAfter(String after) { } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); - session.getUser().checkRight(table, Right.ALL); + Table table = getSchema().resolveTableOrView(session, tableName); + if (table == null) { + if (ifTableExists) { + return 0; + } + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); table.checkSupportAlter(); - table.lock(session, true, true); - Sequence sequence = oldColumn == null ? null : oldColumn.getSequence(); + table.lock(session, Table.EXCLUSIVE_LOCK); if (newColumn != null) { - checkDefaultReferencesTable(newColumn.getDefaultExpression()); + checkDefaultReferencesTable(table, newColumn.getDefaultExpression()); + checkClustering(newColumn); } if (columnsToAdd != null) { for (Column column : columnsToAdd) { - checkDefaultReferencesTable(column.getDefaultExpression()); + checkDefaultReferencesTable(table, column.getDefaultExpression()); + checkClustering(column); } } switch (type) { case CommandInterface.ALTER_TABLE_ALTER_COLUMN_NOT_NULL: { - if (!oldColumn.isNullable()) { + if (oldColumn == null || !oldColumn.isNullable()) { // no change break; } - checkNoNullValues(); + checkNoNullValues(table); oldColumn.setNullable(false); db.updateMeta(session, table); break; } - case CommandInterface.ALTER_TABLE_ALTER_COLUMN_NULL: { - if (oldColumn.isNullable()) { + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL: { + if (oldColumn == null || oldColumn.isNullable()) { // no change break; } - checkNullable(); + checkNullable(table); oldColumn.setNullable(true); db.updateMeta(session, table); break; } - case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT: { - checkDefaultReferencesTable(defaultExpression); - oldColumn.setSequence(null); - oldColumn.setDefaultExpression(session, defaultExpression); - removeSequence(sequence); + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT: + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION: { + if (oldColumn == null) { + break; + } + if (oldColumn.isIdentity()) { + break; + } + if (defaultExpression != null) { + if (oldColumn.isGenerated()) { + break; + } + checkDefaultReferencesTable(table, defaultExpression); + oldColumn.setDefaultExpression(session, defaultExpression); + } else { + if (type == CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION != oldColumn.isGenerated()) { + break; + } + oldColumn.setDefaultExpression(session, null); + } + db.updateMeta(session, table); + break; + } + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_IDENTITY: { + if (oldColumn == null) { + break; + } + Sequence sequence = oldColumn.getSequence(); + if (sequence == null) { + break; + } + oldColumn.setSequence(null, false); + removeSequence(table, sequence); + db.updateMeta(session, table); + break; + } + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE: { + if (oldColumn == null) { + break; + } + if (defaultExpression != null) { + if (oldColumn.isIdentity() || oldColumn.isGenerated()) { + break; + } + checkDefaultReferencesTable(table, defaultExpression); + oldColumn.setOnUpdateExpression(session, defaultExpression); + } else { + oldColumn.setOnUpdateExpression(session, null); + } db.updateMeta(session, table); break; } case CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE: { + if (oldColumn == null) { + break; + } // if the change is only increasing the precision, then we don't // need to copy the table because the length is only a constraint, // and does not affect the storage structure. - if (oldColumn.isWideningConversion(newColumn)) { - convertAutoIncrementColumn(newColumn); + if (oldColumn.isWideningConversion(newColumn) && usingExpression == null) { + convertIdentityColumn(table, newColumn); oldColumn.copy(newColumn); db.updateMeta(session, table); } else { - oldColumn.setSequence(null); + oldColumn.setSequence(null, false); oldColumn.setDefaultExpression(session, null); - oldColumn.setConvertNullToDefault(false); if (oldColumn.isNullable() && !newColumn.isNullable()) { - checkNoNullValues(); + checkNoNullValues(table); } else if (!oldColumn.isNullable() && newColumn.isNullable()) { - checkNullable(); + checkNullable(table); } - convertAutoIncrementColumn(newColumn); - copyData(); + if (oldColumn.getVisible() ^ newColumn.getVisible()) { + oldColumn.setVisible(newColumn.getVisible()); + } + convertIdentityColumn(table, newColumn); + copyData(table, null, true); } + table.setModified(); break; } case CommandInterface.ALTER_TABLE_ADD_COLUMN: { // ifNotExists only supported for single column add - if (ifNotExists && columnsToAdd.size() == 1 && + if (ifNotExists && columnsToAdd != null && columnsToAdd.size() == 1 && table.doesColumnExist(columnsToAdd.get(0).getName())) { break; } - for (Column column : columnsToAdd) { - if (column.isAutoIncrement()) { - int objId = getObjectId(); - column.convertAutoIncrementToSequence(session, getSchema(), objId, - table.isTemporary()); - } + ArrayList sequences = generateSequences(columnsToAdd, false); + if (columnsToAdd != null) { + changePrimaryKeysToNotNull(columnsToAdd); } - copyData(); + copyData(table, sequences, true); break; } case CommandInterface.ALTER_TABLE_DROP_COLUMN: { - if (table.getColumns().length == 1) { - throw DbException.get(ErrorCode.CANNOT_DROP_LAST_COLUMN, - oldColumn.getSQL()); + if (table.getColumns().length - columnsToRemove.size() < 1) { + throw DbException.get(ErrorCode.CANNOT_DROP_LAST_COLUMN, columnsToRemove.get(0).getTraceSQL()); } - table.dropSingleColumnConstraintsAndIndexes(session, oldColumn); - copyData(); + table.dropMultipleColumnsConstraintsAndIndexes(session, columnsToRemove); + copyData(table, null, false); break; } case CommandInterface.ALTER_TABLE_ALTER_COLUMN_SELECTIVITY: { + if (oldColumn == null) { + break; + } int value = newSelectivity.optimize(session).getValue(session).getInt(); oldColumn.setSelectivity(value); db.updateMeta(session, table); break; } + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY: + if (oldColumn == null) { + break; + } + if (oldColumn.getVisible() != booleanFlag) { + oldColumn.setVisible(booleanFlag); + table.setModified(); + db.updateMeta(session, table); + } + break; + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL: + if (oldColumn == null) { + break; + } + if (oldColumn.isDefaultOnNull() != booleanFlag) { + oldColumn.setDefaultOnNull(booleanFlag); + table.setModified(); + db.updateMeta(session, table); + } + break; default: - DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } return 0; } - private void checkDefaultReferencesTable(Expression defaultExpression) { + private static void checkDefaultReferencesTable(Table table, Expression defaultExpression) { if (defaultExpression == null) { return; } - HashSet dependencies = New.hashSet(); + HashSet dependencies = new HashSet<>(); ExpressionVisitor visitor = ExpressionVisitor .getDependenciesVisitor(dependencies); defaultExpression.isEverything(visitor); if (dependencies.contains(table)) { - throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, - defaultExpression.getSQL()); + throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, defaultExpression.getTraceSQL()); } } - private void convertAutoIncrementColumn(Column c) { - if (c.isAutoIncrement()) { + private void checkClustering(Column c) { + if (!Constants.CLUSTERING_DISABLED + .equals(session.getDatabase().getCluster()) + && c.hasIdentityOptions()) { + throw DbException.getUnsupportedException( + "CLUSTERING && identity columns"); + } + } + + private void convertIdentityColumn(Table table, Column c) { + if (c.hasIdentityOptions()) { if (c.isPrimaryKey()) { - c.setOriginalSQL("IDENTITY"); - } else { - int objId = getObjectId(); - c.convertAutoIncrementToSequence(session, getSchema(), objId, - table.isTemporary()); + addConstraintCommand( + Parser.newPrimaryKeyConstraintCommand(session, table.getSchema(), table.getName(), c)); } + int objId = getObjectId(); + c.initializeSequence(session, getSchema(), objId, table.isTemporary()); } } - private void removeSequence(Sequence sequence) { + private void removeSequence(Table table, Sequence sequence) { if (sequence != null) { table.removeSequence(sequence); sequence.setBelongsToTable(false); @@ -217,7 +327,7 @@ private void removeSequence(Sequence sequence) { } } - private void copyData() { + private void copyData(Table table, ArrayList sequences, boolean createConstraints) { if (table.isTemporary()) { throw DbException.getUnsupportedException("TEMP TABLE"); } @@ -225,25 +335,31 @@ private void copyData() { String baseName = table.getName(); String tempName = db.getTempTableName(baseName, session); Column[] columns = table.getColumns(); - ArrayList newColumns = New.arrayList(); - Table newTable = cloneTableStructure(columns, db, tempName, newColumns); + ArrayList newColumns = new ArrayList<>(columns.length); + Table newTable = cloneTableStructure(table, columns, db, tempName, newColumns); + if (sequences != null) { + for (Sequence sequence : sequences) { + table.addSequence(sequence); + } + } try { // check if a view would become invalid // (because the column to drop is referenced or so) checkViews(table, newTable); } catch (DbException e) { - execute("DROP TABLE " + newTable.getName(), true); - throw DbException.get(ErrorCode.VIEW_IS_INVALID_2, e, getSQL(), e.getMessage()); + StringBuilder builder = new StringBuilder("DROP TABLE "); + newTable.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); + execute(builder.toString()); + throw e; } String tableName = table.getName(); - ArrayList views = table.getViews(); - if (views != null) { - views = New.arrayList(views); - for (TableView view : views) { - table.removeView(view); - } + ArrayList dependentViews = new ArrayList<>(table.getDependentViews()); + for (TableView view : dependentViews) { + table.removeDependentView(view); } - execute("DROP TABLE " + table.getSQL() + " IGNORE", true); + StringBuilder builder = new StringBuilder("DROP TABLE "); + table.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append(" IGNORE"); + execute(builder.toString()); db.renameSchemaObject(session, newTable, tableName); for (DbObject child : newTable.getChildren()) { if (child instanceof Sequence) { @@ -268,38 +384,56 @@ private void copyData() { db.renameSchemaObject(session, so, name); } } - if (views != null) { - for (TableView view : views) { - String sql = view.getCreateSQL(true, true); - execute(sql, true); - } + if (createConstraints) { + createConstraints(); + } + for (TableView view : dependentViews) { + String sql = view.getCreateSQL(true, true); + execute(sql); } } - private Table cloneTableStructure(Column[] columns, Database db, + private Table cloneTableStructure(Table table, Column[] columns, Database db, String tempName, ArrayList newColumns) { for (Column col : columns) { newColumns.add(col.getClone()); } - if (type == CommandInterface.ALTER_TABLE_DROP_COLUMN) { - int position = oldColumn.getColumnId(); - newColumns.remove(position); - } else if (type == CommandInterface.ALTER_TABLE_ADD_COLUMN) { + switch (type) { + case CommandInterface.ALTER_TABLE_DROP_COLUMN: + for (Column removeCol : columnsToRemove) { + Column foundCol = null; + for (Column newCol : newColumns) { + if (newCol.getName().equals(removeCol.getName())) { + foundCol = newCol; + break; + } + } + if (foundCol == null) { + throw DbException.getInternalError(removeCol.getCreateSQL()); + } + newColumns.remove(foundCol); + } + break; + case CommandInterface.ALTER_TABLE_ADD_COLUMN: { int position; - if (addBefore != null) { + if (addFirst) { + position = 0; + } else if (addBefore != null) { position = table.getColumn(addBefore).getColumnId(); } else if (addAfter != null) { position = table.getColumn(addAfter).getColumnId() + 1; } else { position = columns.length; } - for (Column column : columnsToAdd) { - newColumns.add(position++, column); + if (columnsToAdd != null) { + for (Column column : columnsToAdd) { + newColumns.add(position++, column); + } } - } else if (type == CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE) { - int position = oldColumn.getColumnId(); - newColumns.remove(position); - newColumns.add(position, newColumn); + break; + } + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE: + newColumns.set(oldColumn.getColumnId(), newColumn); } // create a table object in order to get the SQL statement @@ -316,41 +450,44 @@ private Table cloneTableStructure(Column[] columns, Database db, data.persistData = table.isPersistData(); data.persistIndexes = table.isPersistIndexes(); data.isHidden = table.isHidden(); - data.create = true; data.session = session; Table newTable = getSchema().createTable(data); newTable.setComment(table.getComment()); - StringBuilder buff = new StringBuilder(); - buff.append(newTable.getCreateSQL()); - StringBuilder columnList = new StringBuilder(); + String newTableSQL = newTable.getCreateSQLForMeta(); + StringBuilder columnNames = new StringBuilder(); + StringBuilder columnValues = new StringBuilder(); for (Column nc : newColumns) { - if (columnList.length() > 0) { - columnList.append(", "); + if (nc.isGenerated()) { + continue; } - if (type == CommandInterface.ALTER_TABLE_ADD_COLUMN && - columnsToAdd.contains(nc)) { - Expression def = nc.getDefaultExpression(); - columnList.append(def == null ? "NULL" : def.getSQL()); - } else { - columnList.append(nc.getSQL()); + switch (type) { + case CommandInterface.ALTER_TABLE_ADD_COLUMN: + if (columnsToAdd != null && columnsToAdd.contains(nc)) { + if (usingExpression != null) { + usingExpression.getUnenclosedSQL(addColumn(nc, columnNames, columnValues), + HasSQL.DEFAULT_SQL_FLAGS); + } + continue; + } + break; + case CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE: + if (nc.equals(newColumn) && usingExpression != null) { + usingExpression.getUnenclosedSQL(addColumn(nc, columnNames, columnValues), + HasSQL.DEFAULT_SQL_FLAGS); + continue; + } } + nc.getSQL(addColumn(nc, columnNames, columnValues), HasSQL.DEFAULT_SQL_FLAGS); } - buff.append(" AS SELECT "); - if (columnList.length() == 0) { - // special case: insert into test select * from - buff.append('*'); - } else { - buff.append(columnList); - } - buff.append(" FROM ").append(table.getSQL()); - String newTableSQL = buff.toString(); String newTableName = newTable.getName(); Schema newTableSchema = newTable.getSchema(); newTable.removeChildrenAndResources(session); - execute(newTableSQL, true); + execute(newTableSQL); newTable = newTableSchema.getTableOrView(session, newTableName); - ArrayList triggers = New.arrayList(); + ArrayList children = Utils.newSmallArrayList(); + ArrayList triggers = Utils.newSmallArrayList(); + boolean hasDelegateIndex = false; for (DbObject child : table.getChildren()) { if (child instanceof Sequence) { continue; @@ -367,9 +504,9 @@ private Table cloneTableStructure(Column[] columns, Database db, if (child instanceof TableView) { continue; } else if (child.getType() == DbObject.TABLE_OR_VIEW) { - DbException.throwInternalError(); + throw DbException.getInternalError(); } - String quotedName = Parser.quoteIdentifier(tempName + "_" + child.getName()); + String quotedName = Parser.quoteIdentifier(tempName + "_" + child.getName(), HasSQL.DEFAULT_SQL_FLAGS); String sql = null; if (child instanceof ConstraintReferential) { ConstraintReferential r = (ConstraintReferential) child; @@ -384,10 +521,51 @@ private Table cloneTableStructure(Column[] columns, Database db, if (child instanceof TriggerObject) { triggers.add(sql); } else { - execute(sql, true); + if (!hasDelegateIndex) { + Index index = null; + if (child instanceof ConstraintUnique) { + ConstraintUnique constraint = (ConstraintUnique) child; + if (constraint.getConstraintType() == Constraint.Type.PRIMARY_KEY) { + index = constraint.getIndex(); + } + } else if (child instanceof Index) { + index = (Index) child; + } + if (index != null + && TableBase.getMainIndexColumn(index.getIndexType(), index.getIndexColumns()) + != SearchRow.ROWID_INDEX) { + execute(sql); + hasDelegateIndex = true; + continue; + } + } + children.add(sql); } } } + StringBuilder builder = newTable.getSQL(new StringBuilder(128).append("INSERT INTO "), // + HasSQL.DEFAULT_SQL_FLAGS) + .append('(').append(columnNames).append(") OVERRIDING SYSTEM VALUE SELECT "); + if (columnValues.length() == 0) { + // special case: insert into test select * from + builder.append('*'); + } else { + builder.append(columnValues); + } + table.getSQL(builder.append(" FROM "), HasSQL.DEFAULT_SQL_FLAGS); + try { + execute(builder.toString()); + } catch (Throwable t) { + // data was not inserted due to data conversion error or some + // unexpected reason + builder = new StringBuilder("DROP TABLE "); + newTable.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); + execute(builder.toString()); + throw t; + } + for (String sql : children) { + execute(sql); + } table.setModified(); // remove the sequences from the columns (except dropped columns) // otherwise the sequence is dropped if the table is dropped @@ -395,15 +573,26 @@ private Table cloneTableStructure(Column[] columns, Database db, Sequence seq = col.getSequence(); if (seq != null) { table.removeSequence(seq); - col.setSequence(null); + col.setSequence(null, false); } } for (String sql : triggers) { - execute(sql, true); + execute(sql); } return newTable; } + private static StringBuilder addColumn(Column column, StringBuilder columnNames, StringBuilder columnValues) { + if (columnNames.length() > 0) { + columnNames.append(", "); + } + column.getSQL(columnNames, HasSQL.DEFAULT_SQL_FLAGS); + if (columnValues.length() > 0) { + columnValues.append(", "); + } + return columnValues; + } + /** * Check that all views and other dependent objects. */ @@ -440,44 +629,47 @@ private void checkViewsAreValid(DbObject tableOrView) { // check if the query is still valid // do not execute, not even with limit 1, because that could // have side effects or take a very long time - session.prepare(sql); + try { + session.prepare(sql); + } catch (DbException e) { + throw DbException.get(ErrorCode.COLUMN_IS_REFERENCED_1, e, view.getTraceSQL()); + } checkViewsAreValid(view); } } } - private void execute(String sql, boolean ddl) { + private void execute(String sql) { Prepared command = session.prepare(sql); - command.update(); - if (ddl) { - session.commit(true); - } + CommandContainer commandContainer = new CommandContainer(session, sql, command); + commandContainer.executeUpdate(null); } - private void checkNullable() { + private void checkNullable(Table table) { + if (oldColumn.isIdentity()) { + throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, oldColumn.getName()); + } for (Index index : table.getIndexes()) { if (index.getColumnIndex(oldColumn) < 0) { continue; } IndexType indexType = index.getIndexType(); - if (indexType.isPrimaryKey() || indexType.isHash()) { - throw DbException.get( - ErrorCode.COLUMN_IS_PART_OF_INDEX_1, index.getSQL()); + if (indexType.isPrimaryKey()) { + throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, oldColumn.getName()); } } } - private void checkNoNullValues() { - String sql = "SELECT COUNT(*) FROM " + - table.getSQL() + " WHERE " + - oldColumn.getSQL() + " IS NULL"; + private void checkNoNullValues(Table table) { + StringBuilder builder = new StringBuilder("SELECT COUNT(*) FROM "); + table.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append(" WHERE "); + oldColumn.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append(" IS NULL"); + String sql = builder.toString(); Prepared command = session.prepare(sql); ResultInterface result = command.query(0); result.next(); if (result.currentRow()[0].getInt() > 0) { - throw DbException.get( - ErrorCode.COLUMN_CONTAINS_NULL_VALUES_1, - oldColumn.getSQL()); + throw DbException.get(ErrorCode.COLUMN_CONTAINS_NULL_VALUES_1, oldColumn.getTraceSQL()); } } @@ -489,10 +681,24 @@ public void setSelectivity(Expression selectivity) { newSelectivity = selectivity; } + /** + * Set default or on update expression. + * + * @param defaultExpression default or on update expression + */ public void setDefaultExpression(Expression defaultExpression) { this.defaultExpression = defaultExpression; } + /** + * Set using expression. + * + * @param usingExpression using expression + */ + public void setUsingExpression(Expression usingExpression) { + this.usingExpression = usingExpression; + } + public void setNewColumn(Column newColumn) { this.newColumn = newColumn; } @@ -506,7 +712,19 @@ public void setIfNotExists(boolean ifNotExists) { this.ifNotExists = ifNotExists; } - public void setNewColumns(ArrayList columnsToAdd) { - this.columnsToAdd = columnsToAdd; + @Override + public void addColumn(Column column) { + if (columnsToAdd == null) { + columnsToAdd = new ArrayList<>(); + } + columnsToAdd.add(column); + } + + public void setColumnsToRemove(ArrayList columnsToRemove) { + this.columnsToRemove = columnsToRemove; + } + + public void setBooleanFlag(boolean booleanFlag) { + this.booleanFlag = booleanFlag; } } diff --git a/h2/src/main/org/h2/command/ddl/AlterTableDropConstraint.java b/h2/src/main/org/h2/command/ddl/AlterTableDropConstraint.java index 549f4e802a..32a7390e02 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableDropConstraint.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableDropConstraint.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,41 +8,67 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.constraint.ConstraintActionType; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; +import org.h2.table.Table; /** * This class represents the statement * ALTER TABLE DROP CONSTRAINT */ -public class AlterTableDropConstraint extends SchemaCommand { +public class AlterTableDropConstraint extends AlterTable { private String constraintName; private final boolean ifExists; + private ConstraintActionType dropAction; - public AlterTableDropConstraint(Session session, Schema schema, - boolean ifExists) { + public AlterTableDropConstraint(SessionLocal session, Schema schema, boolean ifExists) { super(session, schema); this.ifExists = ifExists; + dropAction = session.getDatabase().getSettings().dropRestrict ? + ConstraintActionType.RESTRICT : ConstraintActionType.CASCADE; } public void setConstraintName(String string) { constraintName = string; } + public void setDropAction(ConstraintActionType dropAction) { + this.dropAction = dropAction; + } + @Override - public int update() { - session.commit(true); + public long update(Table table) { Constraint constraint = getSchema().findConstraint(session, constraintName); - if (constraint == null) { + Type constraintType; + if (constraint == null || (constraintType = constraint.getConstraintType()) == Type.DOMAIN + || constraint.getTable() != table) { if (!ifExists) { throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, constraintName); } } else { - session.getUser().checkRight(constraint.getTable(), Right.ALL); - session.getUser().checkRight(constraint.getRefTable(), Right.ALL); + Table refTable = constraint.getRefTable(); + if (refTable != table) { + session.getUser().checkTableRight(refTable, Right.SCHEMA_OWNER); + } + if (constraintType == Type.PRIMARY_KEY || constraintType == Type.UNIQUE) { + for (Constraint c : constraint.getTable().getConstraints()) { + if (c.getReferencedConstraint() == constraint) { + if (dropAction == ConstraintActionType.RESTRICT) { + throw DbException.get(ErrorCode.CONSTRAINT_IS_USED_BY_CONSTRAINT_2, + constraint.getTraceSQL(), c.getTraceSQL()); + } + Table t = c.getTable(); + if (t != table && t != refTable) { + session.getUser().checkTableRight(t, Right.SCHEMA_OWNER); + } + } + } + } session.getDatabase().removeSchemaObject(session, constraint); } return 0; diff --git a/h2/src/main/org/h2/command/ddl/AlterTableRename.java b/h2/src/main/org/h2/command/ddl/AlterTableRename.java index ffea8da9b5..948b4878d2 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableRename.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableRename.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,8 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.Table; @@ -18,45 +17,38 @@ * This class represents the statement * ALTER TABLE RENAME */ -public class AlterTableRename extends SchemaCommand { +public class AlterTableRename extends AlterTable { - private Table oldTable; private String newTableName; private boolean hidden; - public AlterTableRename(Session session, Schema schema) { + public AlterTableRename(SessionLocal session, Schema schema) { super(session, schema); } - public void setOldTable(Table table) { - oldTable = table; - } - public void setNewTableName(String name) { newTableName = name; } @Override - public int update() { - session.commit(true); + public long update(Table table) { Database db = session.getDatabase(); - session.getUser().checkRight(oldTable, Right.ALL); Table t = getSchema().findTableOrView(session, newTableName); - if (t != null && hidden && newTableName.equals(oldTable.getName())) { + if (t != null && hidden && newTableName.equals(table.getName())) { if (!t.isHidden()) { t.setHidden(hidden); - oldTable.setHidden(true); - db.updateMeta(session, oldTable); + table.setHidden(true); + db.updateMeta(session, table); } return 0; } - if (t != null || newTableName.equals(oldTable.getName())) { + if (t != null || newTableName.equals(table.getName())) { throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, newTableName); } - if (oldTable.isTemporary()) { + if (table.isTemporary()) { throw DbException.getUnsupportedException("temp table"); } - db.renameSchemaObject(session, oldTable, newTableName); + db.renameSchemaObject(session, table, newTableName); return 0; } diff --git a/h2/src/main/org/h2/command/ddl/AlterTableRenameColumn.java b/h2/src/main/org/h2/command/ddl/AlterTableRenameColumn.java index f4fa14526b..104d514108 100644 --- a/h2/src/main/org/h2/command/ddl/AlterTableRenameColumn.java +++ b/h2/src/main/org/h2/command/ddl/AlterTableRenameColumn.java @@ -1,16 +1,16 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.command.CommandInterface; +import org.h2.constraint.ConstraintReferential; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.expression.Expression; +import org.h2.engine.SessionLocal; +import org.h2.schema.Schema; import org.h2.table.Column; import org.h2.table.Table; @@ -18,22 +18,22 @@ * This class represents the statement * ALTER TABLE ALTER COLUMN RENAME */ -public class AlterTableRenameColumn extends DefineCommand { +public class AlterTableRenameColumn extends AlterTable { - private Table table; - private Column column; + private boolean ifExists; + private String oldName; private String newName; - public AlterTableRenameColumn(Session session) { - super(session); + public AlterTableRenameColumn(SessionLocal session, Schema schema) { + super(session, schema); } - public void setTable(Table table) { - this.table = table; + public void setIfExists(boolean b) { + this.ifExists = b; } - public void setColumn(Column column) { - this.column = column; + public void setOldColumnName(String oldName) { + this.oldName = oldName; } public void setNewColumnName(String newName) { @@ -41,19 +41,25 @@ public void setNewColumnName(String newName) { } @Override - public int update() { - session.commit(true); - Database db = session.getDatabase(); - session.getUser().checkRight(table, Right.ALL); + public long update(Table table) { + Column column = table.getColumn(oldName, ifExists); + if (column == null) { + return 0; + } table.checkSupportAlter(); - // we need to update CHECK constraint - // since it might reference the name of the column - Expression newCheckExpr = column.getCheckConstraint(session, newName); table.renameColumn(column, newName); - column.removeCheckConstraint(); - column.addCheckConstraint(session, newCheckExpr); table.setModified(); + Database db = session.getDatabase(); db.updateMeta(session, table); + + // if we have foreign key constraints pointing at this table, we need to update them + for (DbObject childDbObject : table.getChildren()) { + if (childDbObject instanceof ConstraintReferential) { + ConstraintReferential ref = (ConstraintReferential) childDbObject; + ref.updateOnTableColumnRename(); + } + } + for (DbObject child : table.getChildren()) { if (child.getCreateSQL() != null) { db.updateMeta(session, child); diff --git a/h2/src/main/org/h2/command/ddl/AlterTableRenameConstraint.java b/h2/src/main/org/h2/command/ddl/AlterTableRenameConstraint.java new file mode 100644 index 0000000000..3dce7f3a6c --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/AlterTableRenameConstraint.java @@ -0,0 +1,66 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.engine.Database; +import org.h2.engine.Right; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.table.Table; + +/** + * This class represents the statement + * ALTER TABLE RENAME CONSTRAINT + */ +public class AlterTableRenameConstraint extends AlterTable { + + private String constraintName; + private String newConstraintName; + + public AlterTableRenameConstraint(SessionLocal session, Schema schema) { + super(session, schema); + } + + public void setConstraintName(String string) { + constraintName = string; + } + + public void setNewConstraintName(String newName) { + this.newConstraintName = newName; + } + + @Override + public long update(Table table) { + Constraint constraint = getSchema().findConstraint(session, constraintName); + Database db = session.getDatabase(); + if (constraint == null || constraint.getConstraintType() == Type.DOMAIN || constraint.getTable() != table) { + throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, constraintName); + } + if (getSchema().findConstraint(session, newConstraintName) != null + || newConstraintName.equals(constraintName)) { + throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, newConstraintName); + } + User user = session.getUser(); + Table refTable = constraint.getRefTable(); + if (refTable != table) { + user.checkTableRight(refTable, Right.SCHEMA_OWNER); + } + db.renameSchemaObject(session, constraint, newConstraintName); + return 0; + } + + @Override + public int getType() { + return CommandInterface.ALTER_TABLE_RENAME_CONSTRAINT; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/AlterUser.java b/h2/src/main/org/h2/command/ddl/AlterUser.java index 9a6f00492b..adaf83ea64 100644 --- a/h2/src/main/org/h2/command/ddl/AlterUser.java +++ b/h2/src/main/org/h2/command/ddl/AlterUser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,12 +8,10 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.User; import org.h2.expression.Expression; import org.h2.message.DbException; -import org.h2.security.SHA256; -import org.h2.util.StringUtils; /** * This class represents the statements @@ -31,7 +29,7 @@ public class AlterUser extends DefineCommand { private Expression hash; private boolean admin; - public AlterUser(Session session) { + public AlterUser(SessionLocal session) { super(session); } @@ -63,18 +61,8 @@ public void setPassword(Expression password) { this.password = password; } - private char[] getCharArray(Expression e) { - return e.optimize(session).getValue(session).getString().toCharArray(); - } - - private byte[] getByteArray(Expression e) { - return StringUtils.convertHexToBytes( - e.optimize(session).getValue(session).getString()); - } - @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); switch (type) { case CommandInterface.ALTER_USER_SET_PASSWORD: @@ -82,12 +70,9 @@ public int update() { session.getUser().checkAdmin(); } if (hash != null && salt != null) { - user.setSaltAndHash(getByteArray(salt), getByteArray(hash)); + CreateUser.setSaltAndHash(user, session, salt, hash); } else { - String name = newName == null ? user.getName() : newName; - char[] passwordChars = getCharArray(password); - byte[] userPasswordHash = SHA256.getKeyPasswordHash(name, passwordChars); - user.setUserPasswordHash(userPasswordHash); + CreateUser.setPassword(user, session, password); } break; case CommandInterface.ALTER_USER_RENAME: @@ -99,13 +84,10 @@ public int update() { break; case CommandInterface.ALTER_USER_ADMIN: session.getUser().checkAdmin(); - if (!admin) { - user.checkOwnsNoSchemas(); - } user.setAdmin(admin); break; default: - DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } db.updateMeta(session, user); return 0; diff --git a/h2/src/main/org/h2/command/ddl/AlterView.java b/h2/src/main/org/h2/command/ddl/AlterView.java index 019be6566b..27360167c4 100644 --- a/h2/src/main/org/h2/command/ddl/AlterView.java +++ b/h2/src/main/org/h2/command/ddl/AlterView.java @@ -1,13 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.command.CommandInterface; -import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.table.TableView; @@ -17,21 +16,28 @@ */ public class AlterView extends DefineCommand { + private boolean ifExists; private TableView view; - public AlterView(Session session) { + public AlterView(SessionLocal session) { super(session); } + public void setIfExists(boolean b) { + ifExists = b; + } + public void setView(TableView view) { this.view = view; } @Override - public int update() { - session.commit(true); - session.getUser().checkRight(view, Right.ALL); - DbException e = view.recompile(session, false); + public long update() { + if (view == null && ifExists) { + return 0; + } + session.getUser().checkSchemaOwner(view.getSchema()); + DbException e = view.recompile(session, false, true); if (e != null) { throw e; } diff --git a/h2/src/main/org/h2/command/ddl/Analyze.java b/h2/src/main/org/h2/command/ddl/Analyze.java index 11bf9ceff8..166d319685 100644 --- a/h2/src/main/org/h2/command/ddl/Analyze.java +++ b/h2/src/main/org/h2/command/ddl/Analyze.java @@ -1,48 +1,161 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; -import java.util.ArrayList; +import java.util.Arrays; + import org.h2.command.CommandInterface; -import org.h2.command.Prepared; +import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.expression.Parameter; -import org.h2.result.ResultInterface; +import org.h2.engine.SessionLocal; +import org.h2.index.Cursor; +import org.h2.result.Row; +import org.h2.schema.Schema; import org.h2.table.Column; import org.h2.table.Table; -import org.h2.util.StatementBuilder; +import org.h2.table.TableType; +import org.h2.value.DataType; import org.h2.value.Value; -import org.h2.value.ValueInt; -import org.h2.value.ValueNull; /** - * This class represents the statement - * ANALYZE + * This class represents the statements + * ANALYZE and ANALYZE TABLE */ public class Analyze extends DefineCommand { + private static final class SelectivityData { + + private long distinctCount; + + /** + * The number of occupied slots, excluding the zero element (if any). + */ + private int size; + + private int[] elements; + + /** + * Whether the zero element is present. + */ + private boolean zeroElement; + + private int maxSize; + + SelectivityData() { + elements = new int[8]; + maxSize = 7; + } + + void add(Value v) { + int currentSize = currentSize(); + if (currentSize >= Constants.SELECTIVITY_DISTINCT_COUNT) { + size = 0; + Arrays.fill(elements, 0); + zeroElement = false; + distinctCount += currentSize; + } + int hash = v.hashCode(); + if (hash == 0) { + zeroElement = true; + } else { + if (size >= maxSize) { + rehash(); + } + add(hash); + } + } + + int getSelectivity(long count) { + int s; + if (count == 0) { + s = 0; + } else { + s = (int) (100 * (distinctCount + currentSize()) / count); + if (s <= 0) { + s = 1; + } + } + return s; + } + + private int currentSize() { + int size = this.size; + if (zeroElement) { + size++; + } + return size; + } + + private void add(int element) { + int len = elements.length; + int mask = len - 1; + int index = element & mask; + int plus = 1; + do { + int k = elements[index]; + if (k == 0) { + // found an empty record + size++; + elements[index] = element; + return; + } else if (k == element) { + // existing element + return; + } + index = (index + plus++) & mask; + } while (plus <= len); + // no space, ignore + } + + private void rehash() { + size = 0; + int[] oldElements = elements; + int len = oldElements.length << 1; + elements = new int[len]; + maxSize = (int) (len * 90L / 100); + for (int k : oldElements) { + if (k != 0) { + add(k); + } + } + } + + } + /** * The sample size. */ private int sampleRows; + /** + * used in ANALYZE TABLE... + */ + private Table table; - public Analyze(Session session) { + public Analyze(SessionLocal session) { super(session); sampleRows = session.getDatabase().getSettings().analyzeSample; } + public void setTable(Table table) { + this.table = table; + } + @Override - public int update() { - session.commit(true); + public long update() { session.getUser().checkAdmin(); Database db = session.getDatabase(); - for (Table table : db.getAllTablesAndViews(false)) { + if (table != null) { analyzeTable(session, table, sampleRows, true); + } else { + for (Schema schema : db.getAllSchemasNoMeta()) { + for (Table table : schema.getAllTablesAndViews(null)) { + analyzeTable(session, table, sampleRows, true); + } + } } return 0; } @@ -55,88 +168,57 @@ public int update() { * @param sample the number of sample rows * @param manual whether the command was called by the user */ - public static void analyzeTable(Session session, Table table, int sample, - boolean manual) { - if (!(table.getTableType().equals(Table.TABLE)) || - table.isHidden() || session == null) { - return; - } - if (!manual) { - if (session.getDatabase().isSysTableLocked()) { - return; - } - if (table.hasSelectTrigger()) { - return; - } - } - if (table.isTemporary() && !table.isGlobalTemporary() - && session.findLocalTempTable(table.getName()) == null) { - return; - } - if (table.isLockedExclusively() && !table.isLockedExclusivelyBy(session)) { - return; - } - if (!session.getUser().hasRight(table, Right.SELECT)) { - return; - } - if (session.getCancel() != 0) { - // if the connection is closed and there is something to undo + public static void analyzeTable(SessionLocal session, Table table, int sample, boolean manual) { + if (table.getTableType() != TableType.TABLE // + || table.isHidden() // + || session == null // + || !manual && (session.getDatabase().isSysTableLocked() || table.hasSelectTrigger()) // + || table.isTemporary() && !table.isGlobalTemporary() // + && session.findLocalTempTable(table.getName()) == null // + || table.isLockedExclusively() && !table.isLockedExclusivelyBy(session) + || !session.getUser().hasTableRight(table, Right.SELECT) // + // if the connection is closed and there is something to undo + || session.getCancel() != 0) { return; } + table.lock(session, Table.READ_LOCK); Column[] columns = table.getColumns(); - if (columns.length == 0) { + int columnCount = columns.length; + if (columnCount == 0) { return; } - Database db = session.getDatabase(); - StatementBuilder buff = new StatementBuilder("SELECT "); - for (Column col : columns) { - buff.appendExceptFirst(", "); - int type = col.getType(); - if (type == Value.BLOB || type == Value.CLOB) { - // can not index LOB columns, so calculating - // the selectivity is not required - buff.append("MAX(NULL)"); - } else { - buff.append("SELECTIVITY(").append(col.getSQL()).append(')'); - } - } - buff.append(" FROM ").append(table.getSQL()); - if (sample > 0) { - buff.append(" LIMIT ? SAMPLE_SIZE ? "); - } - String sql = buff.toString(); - Prepared command = session.prepare(sql); - if (sample > 0) { - ArrayList params = command.getParameters(); - params.get(0).setValue(ValueInt.get(1)); - params.get(1).setValue(ValueInt.get(sample)); - } - ResultInterface result = command.query(0); - result.next(); - for (int j = 0; j < columns.length; j++) { - Value v = result.currentRow()[j]; - if (v != ValueNull.INSTANCE) { - int selectivity = v.getInt(); - columns[j].setSelectivity(selectivity); + Cursor cursor = table.getScanIndex(session).find(session, null, null); + if (cursor.next()) { + SelectivityData[] array = new SelectivityData[columnCount]; + for (int i = 0; i < columnCount; i++) { + Column col = columns[i]; + if (!DataType.isLargeObject(col.getType().getValueType())) { + array[i] = new SelectivityData(); + } } - } - if (manual) { - db.updateMeta(session, table); - } else { - Session sysSession = db.getSystemSession(); - if (sysSession != session) { - // if the current session is the system session - // (which is the case if we are within a trigger) - // then we can't update the statistics because - // that would unlock all locked objects - synchronized (sysSession) { - synchronized (db) { - db.updateMeta(sysSession, table); - sysSession.commit(true); + long rowNumber = 0; + do { + Row row = cursor.get(); + for (int i = 0; i < columnCount; i++) { + SelectivityData selectivity = array[i]; + if (selectivity != null) { + selectivity.add(row.getValue(i)); } } + rowNumber++; + } while ((sample <= 0 || rowNumber < sample) && cursor.next()); + for (int i = 0; i < columnCount; i++) { + SelectivityData selectivity = array[i]; + if (selectivity != null) { + columns[i].setSelectivity(selectivity.getSelectivity(rowNumber)); + } + } + } else { + for (int i = 0; i < columnCount; i++) { + columns[i].setSelectivity(0); } } + session.getDatabase().updateMeta(session, table); } public void setTop(int top) { diff --git a/h2/src/main/org/h2/command/ddl/CommandWithColumns.java b/h2/src/main/org/h2/command/ddl/CommandWithColumns.java new file mode 100644 index 0000000000..b8cb76ec80 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/CommandWithColumns.java @@ -0,0 +1,165 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import java.util.ArrayList; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.engine.Constants; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.schema.Sequence; +import org.h2.table.Column; +import org.h2.table.IndexColumn; + +public abstract class CommandWithColumns extends SchemaCommand { + + private ArrayList constraintCommands; + + private AlterTableAddConstraint primaryKey; + + protected CommandWithColumns(SessionLocal session, Schema schema) { + super(session, schema); + } + + /** + * Add a column to this table. + * + * @param column + * the column to add + */ + public abstract void addColumn(Column column); + + /** + * Add a constraint statement to this statement. The primary key definition is + * one possible constraint statement. + * + * @param command + * the statement to add + */ + public void addConstraintCommand(DefineCommand command) { + if (!(command instanceof CreateIndex)) { + AlterTableAddConstraint con = (AlterTableAddConstraint) command; + if (con.getType() == CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY) { + if (setPrimaryKey(con)) { + return; + } + } + } + getConstraintCommands().add(command); + } + + /** + * For the given list of columns, disable "nullable" for those columns that + * are primary key columns. + * + * @param columns the list of columns + */ + protected void changePrimaryKeysToNotNull(ArrayList columns) { + if (primaryKey != null) { + IndexColumn[] pkColumns = primaryKey.getIndexColumns(); + for (Column c : columns) { + for (IndexColumn idxCol : pkColumns) { + if (c.getName().equals(idxCol.columnName)) { + c.setNullable(false); + } + } + } + } + } + + /** + * Create the constraints. + */ + protected void createConstraints() { + if (constraintCommands != null) { + for (DefineCommand command : constraintCommands) { + command.setTransactional(transactional); + command.update(); + } + } + } + + /** + * For the given list of columns, create sequences for identity + * columns (if needed), and then get the list of all sequences of the + * columns. + * + * @param columns the columns + * @param temporary whether generated sequences should be temporary + * @return the list of sequences (may be empty) + */ + protected ArrayList generateSequences(ArrayList columns, boolean temporary) { + ArrayList sequences = new ArrayList<>(columns == null ? 0 : columns.size()); + if (columns != null) { + for (Column c : columns) { + if (c.hasIdentityOptions()) { + int objId = session.getDatabase().allocateObjectId(); + c.initializeSequence(session, getSchema(), objId, temporary); + if (!Constants.CLUSTERING_DISABLED.equals(session.getDatabase().getCluster())) { + throw DbException.getUnsupportedException("CLUSTERING && identity columns"); + } + } + Sequence seq = c.getSequence(); + if (seq != null) { + sequences.add(seq); + } + } + } + return sequences; + } + + private ArrayList getConstraintCommands() { + if (constraintCommands == null) { + constraintCommands = new ArrayList<>(); + } + return constraintCommands; + } + + /** + * Set the primary key, but also check if a primary key with different + * columns is already defined. + *

    + * If an unnamed primary key with the same columns is already defined it is + * removed from the list of constraints and this method returns + * {@code false}. + *

    + * + * @param primaryKey + * the primary key + * @return whether another primary key with the same columns was already set + * and the specified primary key should be ignored + */ + private boolean setPrimaryKey(AlterTableAddConstraint primaryKey) { + if (this.primaryKey != null) { + IndexColumn[] oldColumns = this.primaryKey.getIndexColumns(); + IndexColumn[] newColumns = primaryKey.getIndexColumns(); + int len = newColumns.length; + if (len != oldColumns.length) { + throw DbException.get(ErrorCode.SECOND_PRIMARY_KEY); + } + for (int i = 0; i < len; i++) { + if (!newColumns[i].columnName.equals(oldColumns[i].columnName)) { + throw DbException.get(ErrorCode.SECOND_PRIMARY_KEY); + } + } + if (this.primaryKey.getConstraintName() != null) { + return true; + } + // Remove unnamed primary key + constraintCommands.remove(this.primaryKey); + } + this.primaryKey = primaryKey; + return false; + } + + public AlterTableAddConstraint getPrimaryKey() { + return primaryKey; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/CreateAggregate.java b/h2/src/main/org/h2/command/ddl/CreateAggregate.java index 7944b64568..000f09fe05 100644 --- a/h2/src/main/org/h2/command/ddl/CreateAggregate.java +++ b/h2/src/main/org/h2/command/ddl/CreateAggregate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,50 +8,43 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.UserAggregate; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; +import org.h2.schema.UserAggregate; /** * This class represents the statement * CREATE AGGREGATE */ -public class CreateAggregate extends DefineCommand { +public class CreateAggregate extends SchemaCommand { - private Schema schema; private String name; private String javaClassMethod; private boolean ifNotExists; private boolean force; - public CreateAggregate(Session session) { - super(session); + public CreateAggregate(SessionLocal session, Schema schema) { + super(session, schema); } @Override - public int update() { - session.commit(true); + public long update() { session.getUser().checkAdmin(); Database db = session.getDatabase(); - if (db.findAggregate(name) != null || schema.findFunction(name) != null) { + Schema schema = getSchema(); + if (schema.findFunctionOrAggregate(name) != null) { if (!ifNotExists) { - throw DbException.get( - ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, name); + throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, name); } } else { int id = getObjectId(); - UserAggregate aggregate = new UserAggregate( - db, id, name, javaClassMethod, force); - db.addDatabaseObject(session, aggregate); + UserAggregate aggregate = new UserAggregate(schema, id, name, javaClassMethod, force); + db.addSchemaObject(session, aggregate); } return 0; } - public void setSchema(Schema schema) { - this.schema = schema; - } - public void setName(String name) { this.name = name; } diff --git a/h2/src/main/org/h2/command/ddl/CreateConstant.java b/h2/src/main/org/h2/command/ddl/CreateConstant.java index ff8e2712e3..a66b8c3a23 100644 --- a/h2/src/main/org/h2/command/ddl/CreateConstant.java +++ b/h2/src/main/org/h2/command/ddl/CreateConstant.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.schema.Constant; @@ -19,13 +19,13 @@ * This class represents the statement * CREATE CONSTANT */ -public class CreateConstant extends SchemaCommand { +public class CreateConstant extends SchemaOwnerCommand { private String constantName; private Expression expression; private boolean ifNotExists; - public CreateConstant(Session session, Schema schema) { + public CreateConstant(SessionLocal session, Schema schema) { super(session, schema); } @@ -34,18 +34,16 @@ public void setIfNotExists(boolean ifNotExists) { } @Override - public int update() { - session.commit(true); - session.getUser().checkAdmin(); + long update(Schema schema) { Database db = session.getDatabase(); - if (getSchema().findConstant(constantName) != null) { + if (schema.findConstant(constantName) != null) { if (ifNotExists) { return 0; } throw DbException.get(ErrorCode.CONSTANT_ALREADY_EXISTS_1, constantName); } int id = getObjectId(); - Constant constant = new Constant(getSchema(), id, constantName); + Constant constant = new Constant(schema, id, constantName); expression = expression.optimize(session); Value value = expression.getValue(session); constant.setValue(value); diff --git a/h2/src/main/org/h2/command/ddl/CreateDomain.java b/h2/src/main/org/h2/command/ddl/CreateDomain.java new file mode 100644 index 0000000000..2af747f546 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/CreateDomain.java @@ -0,0 +1,131 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import java.util.ArrayList; +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; +import org.h2.table.Table; +import org.h2.util.HasSQL; +import org.h2.util.Utils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * This class represents the statement + * CREATE DOMAIN + */ +public class CreateDomain extends SchemaOwnerCommand { + + private String typeName; + private boolean ifNotExists; + + private TypeInfo dataType; + + private Domain parentDomain; + + private Expression defaultExpression; + + private Expression onUpdateExpression; + + private String comment; + + private ArrayList constraintCommands; + + public CreateDomain(SessionLocal session, Schema schema) { + super(session, schema); + } + + public void setTypeName(String name) { + this.typeName = name; + } + + public void setIfNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + } + + public void setDataType(TypeInfo dataType) { + this.dataType = dataType; + } + + public void setParentDomain(Domain parentDomain) { + this.parentDomain = parentDomain; + } + + public void setDefaultExpression(Expression defaultExpression) { + this.defaultExpression = defaultExpression; + } + + public void setOnUpdateExpression(Expression onUpdateExpression) { + this.onUpdateExpression = onUpdateExpression; + } + + public void setComment(String comment) { + this.comment = comment; + } + + @Override + long update(Schema schema) { + if (schema.findDomain(typeName) != null) { + if (ifNotExists) { + return 0; + } + throw DbException.get(ErrorCode.DOMAIN_ALREADY_EXISTS_1, typeName); + } + if (typeName.indexOf(' ') < 0) { + DataType builtIn = DataType.getTypeByName(typeName, session.getDatabase().getMode()); + if (builtIn != null) { + if (session.getDatabase().equalsIdentifiers(typeName, Value.getTypeName(builtIn.type))) { + throw DbException.get(ErrorCode.DOMAIN_ALREADY_EXISTS_1, typeName); + } + Table table = session.getDatabase().getFirstUserTable(); + if (table != null) { + StringBuilder builder = new StringBuilder(typeName).append(" ("); + table.getSQL(builder, HasSQL.TRACE_SQL_FLAGS).append(')'); + throw DbException.get(ErrorCode.DOMAIN_ALREADY_EXISTS_1, builder.toString()); + } + } + } + int id = getObjectId(); + Domain domain = new Domain(schema, id, typeName); + domain.setDataType(dataType != null ? dataType : parentDomain.getDataType()); + domain.setDomain(parentDomain); + domain.setDefaultExpression(session, defaultExpression); + domain.setOnUpdateExpression(session, onUpdateExpression); + domain.setComment(comment); + schema.getDatabase().addSchemaObject(session, domain); + if (constraintCommands != null) { + for (AlterDomainAddConstraint command : constraintCommands) { + command.update(); + } + } + return 0; + } + + @Override + public int getType() { + return CommandInterface.CREATE_DOMAIN; + } + + /** + * Add a constraint command. + * + * @param command the command to add + */ + public void addConstraintCommand(AlterDomainAddConstraint command) { + if (constraintCommands == null) { + constraintCommands = Utils.newSmallArrayList(); + } + constraintCommands.add(command); + } + +} diff --git a/h2/src/main/org/h2/command/ddl/CreateFunctionAlias.java b/h2/src/main/org/h2/command/ddl/CreateFunctionAlias.java index 909e0727cf..0641dbce33 100644 --- a/h2/src/main/org/h2/command/ddl/CreateFunctionAlias.java +++ b/h2/src/main/org/h2/command/ddl/CreateFunctionAlias.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,9 +8,9 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.FunctionAlias; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; +import org.h2.schema.FunctionAlias; import org.h2.schema.Schema; import org.h2.util.StringUtils; @@ -26,33 +26,27 @@ public class CreateFunctionAlias extends SchemaCommand { private boolean ifNotExists; private boolean force; private String source; - private boolean bufferResultSetToLocalTemp = true; - public CreateFunctionAlias(Session session, Schema schema) { + public CreateFunctionAlias(SessionLocal session, Schema schema) { super(session, schema); } @Override - public int update() { - session.commit(true); + public long update() { session.getUser().checkAdmin(); Database db = session.getDatabase(); - if (getSchema().findFunction(aliasName) != null) { + Schema schema = getSchema(); + if (schema.findFunctionOrAggregate(aliasName) != null) { if (!ifNotExists) { - throw DbException.get( - ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, aliasName); + throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, aliasName); } } else { int id = getObjectId(); FunctionAlias functionAlias; if (javaClassMethod != null) { - functionAlias = FunctionAlias.newInstance(getSchema(), id, - aliasName, javaClassMethod, force, - bufferResultSetToLocalTemp); + functionAlias = FunctionAlias.newInstance(schema, id, aliasName, javaClassMethod, force); } else { - functionAlias = FunctionAlias.newInstanceFromSource( - getSchema(), id, aliasName, source, force, - bufferResultSetToLocalTemp); + functionAlias = FunctionAlias.newInstanceFromSource(schema, id, aliasName, source, force); } functionAlias.setDeterministic(deterministic); db.addSchemaObject(session, functionAlias); @@ -85,15 +79,6 @@ public void setDeterministic(boolean deterministic) { this.deterministic = deterministic; } - /** - * Should the return value ResultSet be buffered in a local temporary file? - * - * @param b the new value - */ - public void setBufferResultSetToLocalTemp(boolean b) { - this.bufferResultSetToLocalTemp = b; - } - public void setSource(String source) { this.source = source; } diff --git a/h2/src/main/org/h2/command/ddl/CreateIndex.java b/h2/src/main/org/h2/command/ddl/CreateIndex.java index 3cf2f9f374..cf00511c40 100644 --- a/h2/src/main/org/h2/command/ddl/CreateIndex.java +++ b/h2/src/main/org/h2/command/ddl/CreateIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -10,7 +10,7 @@ import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.index.IndexType; import org.h2.message.DbException; import org.h2.schema.Schema; @@ -26,14 +26,20 @@ public class CreateIndex extends SchemaCommand { private String tableName; private String indexName; private IndexColumn[] indexColumns; - private boolean primaryKey, unique, hash, spatial; + private int uniqueColumnCount; + private boolean primaryKey, hash, spatial; + private boolean ifTableExists; private boolean ifNotExists; private String comment; - public CreateIndex(Session session, Schema schema) { + public CreateIndex(SessionLocal session, Schema schema) { super(session, schema); } + public void setIfTableExists(boolean b) { + this.ifTableExists = b; + } + public void setIfNotExists(boolean ifNotExists) { this.ifNotExists = ifNotExists; } @@ -51,21 +57,24 @@ public void setIndexColumns(IndexColumn[] columns) { } @Override - public int update() { - if (!transactional) { - session.commit(true); - } + public long update() { Database db = session.getDatabase(); boolean persistent = db.isPersistent(); - Table table = getSchema().getTableOrView(session, tableName); - if (getSchema().findIndex(session, indexName) != null) { + Table table = getSchema().findTableOrView(session, tableName); + if (table == null) { + if (ifTableExists) { + return 0; + } + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + if (indexName != null && getSchema().findIndex(session, indexName) != null) { if (ifNotExists) { return 0; } throw DbException.get(ErrorCode.INDEX_ALREADY_EXISTS_1, indexName); } - session.getUser().checkRight(table, Right.ALL); - table.lock(session, true, true); + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); + table.lock(session, Table.EXCLUSIVE_LOCK); if (!table.isPersistIndexes()) { persistent = false; } @@ -85,14 +94,13 @@ public int update() { throw DbException.get(ErrorCode.SECOND_PRIMARY_KEY); } indexType = IndexType.createPrimaryKey(persistent, hash); - } else if (unique) { + } else if (uniqueColumnCount > 0) { indexType = IndexType.createUnique(persistent, hash); } else { indexType = IndexType.createNonUnique(persistent, hash, spatial); } IndexColumn.mapColumns(indexColumns, table); - table.addIndex(session, indexName, id, indexColumns, indexType, create, - comment); + table.addIndex(session, indexName, id, indexColumns, uniqueColumnCount, indexType, create, comment); return 0; } @@ -100,8 +108,8 @@ public void setPrimaryKey(boolean b) { this.primaryKey = b; } - public void setUnique(boolean b) { - this.unique = b; + public void setUniqueColumnCount(int uniqueColumnCount) { + this.uniqueColumnCount = uniqueColumnCount; } public void setHash(boolean b) { diff --git a/h2/src/main/org/h2/command/ddl/CreateLinkedTable.java b/h2/src/main/org/h2/command/ddl/CreateLinkedTable.java index 0201ff7888..d7ea31eaac 100644 --- a/h2/src/main/org/h2/command/ddl/CreateLinkedTable.java +++ b/h2/src/main/org/h2/command/ddl/CreateLinkedTable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.TableLink; @@ -28,8 +28,10 @@ public class CreateLinkedTable extends SchemaCommand { private boolean temporary; private boolean globalTemporary; private boolean readOnly; + private int fetchSize; + private boolean autocommit = true; - public CreateLinkedTable(Session session, Schema schema) { + public CreateLinkedTable(SessionLocal session, Schema schema) { super(session, schema); } @@ -61,12 +63,29 @@ public void setIfNotExists(boolean ifNotExists) { this.ifNotExists = ifNotExists; } + /** + * Specify the number of rows fetched by the linked table command + * + * @param fetchSize to set + */ + public void setFetchSize(int fetchSize) { + this.fetchSize = fetchSize; + } + + /** + * Specify if the autocommit mode is activated or not + * + * @param mode to set + */ + public void setAutoCommit(boolean mode) { + this.autocommit= mode; + } + @Override - public int update() { - session.commit(true); - Database db = session.getDatabase(); + public long update() { session.getUser().checkAdmin(); - if (getSchema().findTableOrView(session, tableName) != null) { + Database db = session.getDatabase(); + if (getSchema().resolveTableOrView(session, tableName) != null) { if (ifNotExists) { return 0; } @@ -80,6 +99,10 @@ public int update() { table.setGlobalTemporary(globalTemporary); table.setComment(comment); table.setReadOnly(readOnly); + if (fetchSize > 0) { + table.setFetchSize(fetchSize); + } + table.setAutoCommit(autocommit); if (temporary && !globalTemporary) { session.addLocalTempTable(table); } else { diff --git a/h2/src/main/org/h2/command/ddl/CreateRole.java b/h2/src/main/org/h2/command/ddl/CreateRole.java index 73d8361c7c..3add534252 100644 --- a/h2/src/main/org/h2/command/ddl/CreateRole.java +++ b/h2/src/main/org/h2/command/ddl/CreateRole.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,8 +8,9 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; +import org.h2.engine.RightOwner; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; /** @@ -21,7 +22,7 @@ public class CreateRole extends DefineCommand { private String roleName; private boolean ifNotExists; - public CreateRole(Session session) { + public CreateRole(SessionLocal session) { super(session); } @@ -34,18 +35,18 @@ public void setRoleName(String name) { } @Override - public int update() { + public long update() { session.getUser().checkAdmin(); - session.commit(true); Database db = session.getDatabase(); - if (db.findUser(roleName) != null) { - throw DbException.get(ErrorCode.USER_ALREADY_EXISTS_1, roleName); - } - if (db.findRole(roleName) != null) { - if (ifNotExists) { - return 0; + RightOwner rightOwner = db.findUserOrRole(roleName); + if (rightOwner != null) { + if (rightOwner instanceof Role) { + if (ifNotExists) { + return 0; + } + throw DbException.get(ErrorCode.ROLE_ALREADY_EXISTS_1, roleName); } - throw DbException.get(ErrorCode.ROLE_ALREADY_EXISTS_1, roleName); + throw DbException.get(ErrorCode.USER_ALREADY_EXISTS_1, roleName); } int id = getObjectId(); Role role = new Role(db, id, roleName, false); diff --git a/h2/src/main/org/h2/command/ddl/CreateSchema.java b/h2/src/main/org/h2/command/ddl/CreateSchema.java index 57c50afcb9..fbab006152 100644 --- a/h2/src/main/org/h2/command/ddl/CreateSchema.java +++ b/h2/src/main/org/h2/command/ddl/CreateSchema.java @@ -1,15 +1,16 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; +import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.User; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; @@ -22,8 +23,9 @@ public class CreateSchema extends DefineCommand { private String schemaName; private String authorization; private boolean ifNotExists; + private ArrayList tableEngineParams; - public CreateSchema(Session session) { + public CreateSchema(SessionLocal session) { super(session); } @@ -32,14 +34,12 @@ public void setIfNotExists(boolean ifNotExists) { } @Override - public int update() { + public long update() { session.getUser().checkSchemaAdmin(); - session.commit(true); Database db = session.getDatabase(); - User user = db.getUser(authorization); - // during DB startup, the Right/Role records have not yet been loaded - if (!db.isStarting()) { - user.checkSchemaAdmin(); + RightOwner owner = db.findUserOrRole(authorization); + if (owner == null) { + throw DbException.get(ErrorCode.USER_OR_ROLE_NOT_FOUND_1, authorization); } if (db.findSchema(schemaName) != null) { if (ifNotExists) { @@ -48,7 +48,8 @@ public int update() { throw DbException.get(ErrorCode.SCHEMA_ALREADY_EXISTS_1, schemaName); } int id = getObjectId(); - Schema schema = new Schema(db, id, schemaName, user, false); + Schema schema = new Schema(db, id, schemaName, owner, false); + schema.setTableEngineParams(tableEngineParams); db.addDatabaseObject(session, schema); return 0; } @@ -61,6 +62,10 @@ public void setAuthorization(String userName) { this.authorization = userName; } + public void setTableEngineParams(ArrayList tableEngineParams) { + this.tableEngineParams = tableEngineParams; + } + @Override public int getType() { return CommandInterface.CREATE_SCHEMA; diff --git a/h2/src/main/org/h2/command/ddl/CreateSequence.java b/h2/src/main/org/h2/command/ddl/CreateSequence.java index 2e01d451be..896a326337 100644 --- a/h2/src/main/org/h2/command/ddl/CreateSequence.java +++ b/h2/src/main/org/h2/command/ddl/CreateSequence.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,30 +8,27 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.expression.Expression; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.Sequence; /** - * This class represents the statement - * CREATE SEQUENCE + * This class represents the statement CREATE SEQUENCE. */ -public class CreateSequence extends SchemaCommand { +public class CreateSequence extends SchemaOwnerCommand { private String sequenceName; + private boolean ifNotExists; - private boolean cycle; - private Expression minValue; - private Expression maxValue; - private Expression start; - private Expression increment; - private Expression cacheSize; + + private SequenceOptions options; + private boolean belongsToTable; - public CreateSequence(Session session, Schema schema) { + public CreateSequence(SessionLocal session, Schema schema) { super(session, schema); + transactional = true; } public void setSequenceName(String sequenceName) { @@ -42,63 +39,29 @@ public void setIfNotExists(boolean ifNotExists) { this.ifNotExists = ifNotExists; } - public void setCycle(boolean cycle) { - this.cycle = cycle; + public void setOptions(SequenceOptions options) { + this.options = options; } @Override - public int update() { - session.commit(true); + long update(Schema schema) { Database db = session.getDatabase(); - if (getSchema().findSequence(sequenceName) != null) { + if (schema.findSequence(sequenceName) != null) { if (ifNotExists) { return 0; } throw DbException.get(ErrorCode.SEQUENCE_ALREADY_EXISTS_1, sequenceName); } int id = getObjectId(); - Long startValue = getLong(start); - Long inc = getLong(increment); - Long cache = getLong(cacheSize); - Long min = getLong(minValue); - Long max = getLong(maxValue); - Sequence sequence = new Sequence(getSchema(), id, sequenceName, startValue, inc, - cache, min, max, cycle, belongsToTable); + Sequence sequence = new Sequence(session, schema, id, sequenceName, options, belongsToTable); db.addSchemaObject(session, sequence); return 0; } - private Long getLong(Expression expr) { - if (expr == null) { - return null; - } - return expr.optimize(session).getValue(session).getLong(); - } - - public void setStartWith(Expression start) { - this.start = start; - } - - public void setIncrement(Expression increment) { - this.increment = increment; - } - - public void setMinValue(Expression minValue) { - this.minValue = minValue; - } - - public void setMaxValue(Expression maxValue) { - this.maxValue = maxValue; - } - public void setBelongsToTable(boolean belongsToTable) { this.belongsToTable = belongsToTable; } - public void setCacheSize(Expression cacheSize) { - this.cacheSize = cacheSize; - } - @Override public int getType() { return CommandInterface.CREATE_SEQUENCE; diff --git a/h2/src/main/org/h2/command/ddl/CreateSynonym.java b/h2/src/main/org/h2/command/ddl/CreateSynonym.java new file mode 100644 index 0000000000..5f94ad93b4 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/CreateSynonym.java @@ -0,0 +1,110 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.table.TableSynonym; + +/** + * This class represents the statement + * CREATE SYNONYM + */ +public class CreateSynonym extends SchemaOwnerCommand { + + private final CreateSynonymData data = new CreateSynonymData(); + private boolean ifNotExists; + private boolean orReplace; + private String comment; + + public CreateSynonym(SessionLocal session, Schema schema) { + super(session, schema); + } + + public void setName(String name) { + data.synonymName = name; + } + + public void setSynonymFor(String tableName) { + data.synonymFor = tableName; + } + + public void setSynonymForSchema(Schema synonymForSchema) { + data.synonymForSchema = synonymForSchema; + } + + public void setIfNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + } + + public void setOrReplace(boolean orReplace) { this.orReplace = orReplace; } + + @Override + long update(Schema schema) { + Database db = session.getDatabase(); + data.session = session; + db.lockMeta(session); + + if (schema.findTableOrView(session, data.synonymName) != null) { + throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, data.synonymName); + } + + if (data.synonymForSchema.findTableOrView(session, data.synonymFor) != null) { + return createTableSynonym(db); + } + + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, + data.synonymForSchema.getName() + "." + data.synonymFor); + + } + + private int createTableSynonym(Database db) { + + TableSynonym old = getSchema().getSynonym(data.synonymName); + if (old != null) { + if (orReplace) { + // ok, we replacing the existing synonym + } else if (ifNotExists) { + return 0; + } else { + throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, data.synonymName); + } + } + + TableSynonym table; + if (old != null) { + table = old; + data.schema = table.getSchema(); + table.updateData(data); + table.setComment(comment); + table.setModified(); + db.updateMeta(session, table); + } else { + data.id = getObjectId(); + table = getSchema().createSynonym(data); + table.setComment(comment); + db.addSchemaObject(session, table); + } + + table.updateSynonymFor(); + return 0; + } + + public void setComment(String comment) { + this.comment = comment; + } + + @Override + public int getType() { + return CommandInterface.CREATE_SYNONYM; + } + + +} diff --git a/h2/src/main/org/h2/command/ddl/CreateSynonymData.java b/h2/src/main/org/h2/command/ddl/CreateSynonymData.java new file mode 100644 index 0000000000..6e1122d749 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/CreateSynonymData.java @@ -0,0 +1,44 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.engine.SessionLocal; +import org.h2.schema.Schema; + +/** + * The data required to create a synonym. + */ +public class CreateSynonymData { + + /** + * The schema. + */ + public Schema schema; + + /** + * The synonyms name. + */ + public String synonymName; + + /** + * The name of the table the synonym is created for. + */ + public String synonymFor; + + /** Schema synonymFor is located in. */ + public Schema synonymForSchema; + + /** + * The object id. + */ + public int id; + + /** + * The session. + */ + public SessionLocal session; + +} diff --git a/h2/src/main/org/h2/command/ddl/CreateTable.java b/h2/src/main/org/h2/command/ddl/CreateTable.java index 7e439ad27c..213b178702 100644 --- a/h2/src/main/org/h2/command/ddl/CreateTable.java +++ b/h2/src/main/org/h2/command/ddl/CreateTable.java @@ -1,47 +1,42 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; import java.util.HashSet; - import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.command.dml.Insert; -import org.h2.command.dml.Query; +import org.h2.command.query.Query; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.Sequence; import org.h2.table.Column; -import org.h2.table.IndexColumn; import org.h2.table.Table; -import org.h2.util.New; -import org.h2.value.DataType; +import org.h2.value.Value; /** * This class represents the statement * CREATE TABLE */ -public class CreateTable extends SchemaCommand { +public class CreateTable extends CommandWithColumns { private final CreateTableData data = new CreateTableData(); - private final ArrayList constraintCommands = New.arrayList(); - private IndexColumn[] pkColumns; private boolean ifNotExists; private boolean onCommitDrop; private boolean onCommitTruncate; private Query asQuery; private String comment; - private boolean sortedInsertMode; + private boolean withNoData; - public CreateTable(Session session, Schema schema) { + public CreateTable(SessionLocal session, Schema schema) { super(session, schema); data.persistIndexes = true; data.persistData = true; @@ -59,36 +54,13 @@ public void setTableName(String tableName) { data.tableName = tableName; } - /** - * Add a column to this table. - * - * @param column the column to add - */ + @Override public void addColumn(Column column) { data.columns.add(column); } - /** - * Add a constraint statement to this statement. - * The primary key definition is one possible constraint statement. - * - * @param command the statement to add - */ - public void addConstraintCommand(DefineCommand command) { - if (command instanceof CreateIndex) { - constraintCommands.add(command); - } else { - AlterTableAddConstraint con = (AlterTableAddConstraint) command; - boolean alreadySet; - if (con.getType() == CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY) { - alreadySet = setPrimaryKeyColumns(con.getIndexColumns()); - } else { - alreadySet = false; - } - if (!alreadySet) { - constraintCommands.add(command); - } - } + public ArrayList getColumns() { + return data.columns; } public void setIfNotExists(boolean ifNotExists) { @@ -96,19 +68,20 @@ public void setIfNotExists(boolean ifNotExists) { } @Override - public int update() { - if (!transactional) { - session.commit(true); + public long update() { + Schema schema = getSchema(); + boolean isSessionTemporary = data.temporary && !data.globalTemporary; + if (!isSessionTemporary) { + session.getUser().checkSchemaOwner(schema); } Database db = session.getDatabase(); if (!db.isPersistent()) { data.persistIndexes = false; } - boolean isSessionTemporary = data.temporary && !data.globalTemporary; if (!isSessionTemporary) { db.lockMeta(session); } - if (getSchema().findTableOrView(session, data.tableName) != null) { + if (schema.resolveTableOrView(session, data.tableName) != null) { if (ifNotExists) { return 0; } @@ -116,36 +89,25 @@ public int update() { } if (asQuery != null) { asQuery.prepare(); - if (data.columns.size() == 0) { + if (data.columns.isEmpty()) { generateColumnsFromQuery(); } else if (data.columns.size() != asQuery.getColumnCount()) { throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); - } - } - if (pkColumns != null) { - for (Column c : data.columns) { - for (IndexColumn idxCol : pkColumns) { - if (c.getName().equals(idxCol.columnName)) { - c.setNullable(false); + } else { + ArrayList columns = data.columns; + for (int i = 0; i < columns.size(); i++) { + Column column = columns.get(i); + if (column.getType().getValueType() == Value.UNKNOWN) { + columns.set(i, new Column(column.getName(), asQuery.getExpressions().get(i).getType())); } } } } + changePrimaryKeysToNotNull(data.columns); data.id = getObjectId(); - data.create = create; data.session = session; - Table table = getSchema().createTable(data); - ArrayList sequences = New.arrayList(); - for (Column c : data.columns) { - if (c.isAutoIncrement()) { - int objId = getObjectId(); - c.convertAutoIncrementToSequence(session, getSchema(), objId, data.temporary); - } - Sequence seq = c.getSequence(); - if (seq != null) { - sequences.add(seq); - } - } + Table table = schema.createTable(data); + ArrayList sequences = generateSequences(data.columns, data.temporary); table.setComment(comment); if (isSessionTemporary) { if (onCommitDrop) { @@ -161,34 +123,13 @@ public int update() { } try { for (Column c : data.columns) { - c.prepareExpression(session); + c.prepareExpressions(session); } for (Sequence sequence : sequences) { table.addSequence(sequence); } - for (DefineCommand command : constraintCommands) { - command.setTransactional(transactional); - command.update(); - } - if (asQuery != null) { - boolean old = session.isUndoLogEnabled(); - try { - session.setUndoLogEnabled(false); - session.startStatementWithinTransaction(); - Insert insert = null; - insert = new Insert(session); - insert.setSortedInsertMode(sortedInsertMode); - insert.setQuery(asQuery); - insert.setTable(table); - insert.setInsertFromSelect(true); - insert.prepare(); - insert.update(); - } finally { - session.setUndoLogEnabled(old); - } - } - HashSet set = New.hashSet(); - set.clear(); + createConstraints(); + HashSet set = new HashSet<>(); table.addDependencies(set); for (DbObject obj : set) { if (obj == table) { @@ -209,11 +150,49 @@ public int update() { } } } + if (asQuery != null && !withNoData) { + boolean flushSequences = false; + if (!isSessionTemporary) { + db.unlockMeta(session); + for (Column c : table.getColumns()) { + Sequence s = c.getSequence(); + if (s != null) { + flushSequences = true; + s.setTemporary(true); + } + } + } + try { + session.startStatementWithinTransaction(null); + Insert insert = new Insert(session); + insert.setQuery(asQuery); + insert.setTable(table); + insert.setInsertFromSelect(true); + insert.prepare(); + insert.update(); + } finally { + session.endStatement(); + } + if (flushSequences) { + db.lockMeta(session); + for (Column c : table.getColumns()) { + Sequence s = c.getSequence(); + if (s != null) { + s.setTemporary(false); + s.flush(session); + } + } + } + } } catch (DbException e) { - db.checkPowerOff(); - db.removeSchemaObject(session, table); - if (!transactional) { - session.commit(true); + try { + db.checkPowerOff(); + db.removeSchemaObject(session, table); + if (!transactional) { + session.commit(true); + } + } catch (Throwable ex) { + e.addSuppressed(ex); } throw e; } @@ -225,51 +204,8 @@ private void generateColumnsFromQuery() { ArrayList expressions = asQuery.getExpressions(); for (int i = 0; i < columnCount; i++) { Expression expr = expressions.get(i); - int type = expr.getType(); - String name = expr.getAlias(); - long precision = expr.getPrecision(); - int displaySize = expr.getDisplaySize(); - DataType dt = DataType.getDataType(type); - if (precision > 0 && (dt.defaultPrecision == 0 || - (dt.defaultPrecision > precision && dt.defaultPrecision < Byte.MAX_VALUE))) { - // dont' set precision to MAX_VALUE if this is the default - precision = dt.defaultPrecision; - } - int scale = expr.getScale(); - if (scale > 0 && (dt.defaultScale == 0 || - (dt.defaultScale > scale && dt.defaultScale < precision))) { - scale = dt.defaultScale; - } - if (scale > precision) { - precision = scale; - } - Column col = new Column(name, type, precision, scale, displaySize); - addColumn(col); - } - } - - /** - * Sets the primary key columns, but also check if a primary key - * with different columns is already defined. - * - * @param columns the primary key columns - * @return true if the same primary key columns where already set - */ - private boolean setPrimaryKeyColumns(IndexColumn[] columns) { - if (pkColumns != null) { - int len = columns.length; - if (len != pkColumns.length) { - throw DbException.get(ErrorCode.SECOND_PRIMARY_KEY); - } - for (int i = 0; i < len; i++) { - if (!columns[i].columnName.equals(pkColumns[i].columnName)) { - throw DbException.get(ErrorCode.SECOND_PRIMARY_KEY); - } - } - return true; + addColumn(new Column(expr.getColumnNameForView(session, i), expr.getType())); } - this.pkColumns = columns; - return false; } public void setPersistIndexes(boolean persistIndexes) { @@ -305,8 +241,8 @@ public void setPersistData(boolean persistData) { } } - public void setSortedInsertMode(boolean sortedInsertMode) { - this.sortedInsertMode = sortedInsertMode; + public void setWithNoData(boolean withNoData) { + this.withNoData = withNoData; } public void setTableEngine(String tableEngine) { diff --git a/h2/src/main/org/h2/command/ddl/CreateTableData.java b/h2/src/main/org/h2/command/ddl/CreateTableData.java index 9c1ec3e7bb..7549b15175 100644 --- a/h2/src/main/org/h2/command/ddl/CreateTableData.java +++ b/h2/src/main/org/h2/command/ddl/CreateTableData.java @@ -1,15 +1,15 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; -import org.h2.engine.Session; + +import org.h2.engine.SessionLocal; import org.h2.schema.Schema; import org.h2.table.Column; -import org.h2.util.New; /** * The data required to create a table. @@ -34,7 +34,7 @@ public class CreateTableData { /** * The column list. */ - public ArrayList columns = New.arrayList(); + public ArrayList columns = new ArrayList<>(); /** * Whether this is a temporary table. @@ -56,15 +56,10 @@ public class CreateTableData { */ public boolean persistData; - /** - * Whether to create a new table. - */ - public boolean create; - /** * The session. */ - public Session session; + public SessionLocal session; /** * The table engine to use for creating the table. @@ -80,5 +75,4 @@ public class CreateTableData { * The table is hidden. */ public boolean isHidden; - } diff --git a/h2/src/main/org/h2/command/ddl/CreateTrigger.java b/h2/src/main/org/h2/command/ddl/CreateTrigger.java index aac4c1f25d..9b098fe3e8 100644 --- a/h2/src/main/org/h2/command/ddl/CreateTrigger.java +++ b/h2/src/main/org/h2/command/ddl/CreateTrigger.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -9,7 +9,7 @@ import org.h2.api.Trigger; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.TriggerObject; @@ -36,7 +36,7 @@ public class CreateTrigger extends SchemaCommand { private boolean force; private boolean onRollback; - public CreateTrigger(Session session, Schema schema) { + public CreateTrigger(SessionLocal session, Schema schema) { super(session, schema); } @@ -85,8 +85,8 @@ public void setIfNotExists(boolean ifNotExists) { } @Override - public int update() { - session.commit(true); + public long update() { + session.getUser().checkAdmin(); Database db = session.getDatabase(); if (getSchema().findTrigger(triggerName) != null) { if (ifNotExists) { @@ -96,10 +96,18 @@ public int update() { ErrorCode.TRIGGER_ALREADY_EXISTS_1, triggerName); } - if ((typeMask & Trigger.SELECT) == Trigger.SELECT && rowBased) { - throw DbException.get( - ErrorCode.TRIGGER_SELECT_AND_ROW_BASED_NOT_SUPPORTED, - triggerName); + if ((typeMask & Trigger.SELECT) != 0) { + if (rowBased) { + throw DbException.get(ErrorCode.INVALID_TRIGGER_FLAGS_1, "SELECT + FOR EACH ROW"); + } + if (onRollback) { + throw DbException.get(ErrorCode.INVALID_TRIGGER_FLAGS_1, "SELECT + ROLLBACK"); + } + } else if ((typeMask & (Trigger.INSERT | Trigger.UPDATE | Trigger.DELETE)) == 0) { + if (onRollback) { + throw DbException.get(ErrorCode.INVALID_TRIGGER_FLAGS_1, "(!INSERT & !UPDATE & !DELETE) + ROLLBACK"); + } + throw DbException.getInternalError(); } int id = getObjectId(); Table table = getSchema().getTableOrView(session, tableName); diff --git a/h2/src/main/org/h2/command/ddl/CreateUser.java b/h2/src/main/org/h2/command/ddl/CreateUser.java index fd2b1b22a2..17983aad07 100644 --- a/h2/src/main/org/h2/command/ddl/CreateUser.java +++ b/h2/src/main/org/h2/command/ddl/CreateUser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,12 +8,15 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; import org.h2.engine.User; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.security.SHA256; import org.h2.util.StringUtils; +import org.h2.value.DataType; +import org.h2.value.Value; /** * This class represents the statement @@ -29,7 +32,7 @@ public class CreateUser extends DefineCommand { private boolean ifNotExists; private String comment; - public CreateUser(Session session) { + public CreateUser(SessionLocal session) { super(session); } @@ -45,46 +48,72 @@ public void setPassword(Expression password) { this.password = password; } - private char[] getCharArray(Expression e) { - return e.optimize(session).getValue(session).getString().toCharArray(); + /** + * Set the salt and hash for the given user. + * + * @param user the user + * @param session the session + * @param salt the salt + * @param hash the hash + */ + static void setSaltAndHash(User user, SessionLocal session, Expression salt, Expression hash) { + user.setSaltAndHash(getByteArray(session, salt), getByteArray(session, hash)); } - private byte[] getByteArray(Expression e) { - return StringUtils.convertHexToBytes( - e.optimize(session).getValue(session).getString()); + private static byte[] getByteArray(SessionLocal session, Expression e) { + Value value = e.optimize(session).getValue(session); + if (DataType.isBinaryStringType(value.getValueType())) { + byte[] b = value.getBytes(); + return b == null ? new byte[0] : b; + } + String s = value.getString(); + return s == null ? new byte[0] : StringUtils.convertHexToBytes(s); + } + + /** + * Set the password for the given user. + * + * @param user the user + * @param session the session + * @param password the password + */ + static void setPassword(User user, SessionLocal session, Expression password) { + String pwd = password.optimize(session).getValue(session).getString(); + char[] passwordChars = pwd == null ? new char[0] : pwd.toCharArray(); + byte[] userPasswordHash; + String userName = user.getName(); + if (userName.isEmpty() && passwordChars.length == 0) { + userPasswordHash = new byte[0]; + } else { + userPasswordHash = SHA256.getKeyPasswordHash(userName, passwordChars); + } + user.setUserPasswordHash(userPasswordHash); } @Override - public int update() { + public long update() { session.getUser().checkAdmin(); - session.commit(true); Database db = session.getDatabase(); - if (db.findRole(userName) != null) { - throw DbException.get(ErrorCode.ROLE_ALREADY_EXISTS_1, userName); - } - if (db.findUser(userName) != null) { - if (ifNotExists) { - return 0; + RightOwner rightOwner = db.findUserOrRole(userName); + if (rightOwner != null) { + if (rightOwner instanceof User) { + if (ifNotExists) { + return 0; + } + throw DbException.get(ErrorCode.USER_ALREADY_EXISTS_1, userName); } - throw DbException.get(ErrorCode.USER_ALREADY_EXISTS_1, userName); + throw DbException.get(ErrorCode.ROLE_ALREADY_EXISTS_1, userName); } int id = getObjectId(); User user = new User(db, id, userName, false); user.setAdmin(admin); user.setComment(comment); if (hash != null && salt != null) { - user.setSaltAndHash(getByteArray(salt), getByteArray(hash)); + setSaltAndHash(user, session, salt, hash); } else if (password != null) { - char[] passwordChars = getCharArray(password); - byte[] userPasswordHash; - if (userName.length() == 0 && passwordChars.length == 0) { - userPasswordHash = new byte[0]; - } else { - userPasswordHash = SHA256.getKeyPasswordHash(userName, passwordChars); - } - user.setUserPasswordHash(userPasswordHash); + setPassword(user, session, password); } else { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } db.addDatabaseObject(session, user); return 0; diff --git a/h2/src/main/org/h2/command/ddl/CreateUserDataType.java b/h2/src/main/org/h2/command/ddl/CreateUserDataType.java deleted file mode 100644 index 0cd3a5eb2b..0000000000 --- a/h2/src/main/org/h2/command/ddl/CreateUserDataType.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.command.ddl; - -import org.h2.api.ErrorCode; -import org.h2.command.CommandInterface; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.UserDataType; -import org.h2.message.DbException; -import org.h2.table.Column; -import org.h2.table.Table; -import org.h2.value.DataType; - -/** - * This class represents the statement - * CREATE DOMAIN - */ -public class CreateUserDataType extends DefineCommand { - - private String typeName; - private Column column; - private boolean ifNotExists; - - public CreateUserDataType(Session session) { - super(session); - } - - public void setTypeName(String name) { - this.typeName = name; - } - - public void setColumn(Column column) { - this.column = column; - } - - public void setIfNotExists(boolean ifNotExists) { - this.ifNotExists = ifNotExists; - } - - @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); - Database db = session.getDatabase(); - session.getUser().checkAdmin(); - if (db.findUserDataType(typeName) != null) { - if (ifNotExists) { - return 0; - } - throw DbException.get( - ErrorCode.USER_DATA_TYPE_ALREADY_EXISTS_1, - typeName); - } - DataType builtIn = DataType.getTypeByName(typeName); - if (builtIn != null) { - if (!builtIn.hidden) { - throw DbException.get( - ErrorCode.USER_DATA_TYPE_ALREADY_EXISTS_1, - typeName); - } - Table table = session.getDatabase().getFirstUserTable(); - if (table != null) { - throw DbException.get( - ErrorCode.USER_DATA_TYPE_ALREADY_EXISTS_1, - typeName + " (" + table.getSQL() + ")"); - } - } - int id = getObjectId(); - UserDataType type = new UserDataType(db, id, typeName); - type.setColumn(column); - db.addDatabaseObject(session, type); - return 0; - } - - @Override - public int getType() { - return CommandInterface.CREATE_DOMAIN; - } - -} diff --git a/h2/src/main/org/h2/command/ddl/CreateView.java b/h2/src/main/org/h2/command/ddl/CreateView.java index 0942f40a0d..dc397ae3da 100644 --- a/h2/src/main/org/h2/command/ddl/CreateView.java +++ b/h2/src/main/org/h2/command/ddl/CreateView.java @@ -1,29 +1,31 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; - import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.command.dml.Query; -import org.h2.engine.Constants; +import org.h2.command.query.Query; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Parameter; import org.h2.message.DbException; import org.h2.schema.Schema; +import org.h2.table.Column; import org.h2.table.Table; +import org.h2.table.TableType; import org.h2.table.TableView; +import org.h2.util.HasSQL; +import org.h2.value.TypeInfo; /** * This class represents the statement * CREATE VIEW */ -public class CreateView extends SchemaCommand { +public class CreateView extends SchemaOwnerCommand { private Query select; private String viewName; @@ -33,8 +35,9 @@ public class CreateView extends SchemaCommand { private String comment; private boolean orReplace; private boolean force; + private boolean isTableExpression; - public CreateView(Session session, Schema schema) { + public CreateView(SessionLocal session, Schema schema) { super(session, schema); } @@ -70,18 +73,20 @@ public void setForce(boolean force) { this.force = force; } + public void setTableExpression(boolean isTableExpression) { + this.isTableExpression = isTableExpression; + } + @Override - public int update() { - session.commit(true); - session.getUser().checkAdmin(); + long update(Schema schema) { Database db = session.getDatabase(); TableView view = null; - Table old = getSchema().findTableOrView(session, viewName); + Table old = schema.findTableOrView(session, viewName); if (old != null) { if (ifNotExists) { return 0; } - if (!orReplace || !Table.VIEW.equals(old.getTableType())) { + if (!orReplace || TableType.VIEW != old.getTableType()) { throw DbException.get(ErrorCode.VIEW_ALREADY_EXISTS_1, viewName); } view = (TableView) old; @@ -92,35 +97,50 @@ public int update() { querySQL = selectSQL; } else { ArrayList params = select.getParameters(); - if (params != null && params.size() > 0) { + if (params != null && !params.isEmpty()) { throw DbException.getUnsupportedException("parameters in views"); } - querySQL = select.getPlanSQL(); + querySQL = select.getPlanSQL(HasSQL.DEFAULT_SQL_FLAGS); + } + Column[] columnTemplatesAsUnknowns = null; + Column[] columnTemplatesAsStrings = null; + if (columnNames != null) { + columnTemplatesAsUnknowns = new Column[columnNames.length]; + columnTemplatesAsStrings = new Column[columnNames.length]; + for (int i = 0; i < columnNames.length; ++i) { + // non table expressions are fine to use unknown column type + columnTemplatesAsUnknowns[i] = new Column(columnNames[i], TypeInfo.TYPE_UNKNOWN); + // table expressions can't have unknown types - so we use string instead + columnTemplatesAsStrings[i] = new Column(columnNames[i], TypeInfo.TYPE_VARCHAR); + } } - // The view creates a Prepared command object, which belongs to a - // session, so we pass the system session down. - Session sysSession = db.getSystemSession(); - try { - if (view == null) { - Schema schema = session.getDatabase().getSchema(session.getCurrentSchemaName()); - sysSession.setCurrentSchema(schema); - view = new TableView(getSchema(), id, viewName, querySQL, null, - columnNames, sysSession, false); + if (view == null) { + if (isTableExpression) { + view = TableView.createTableViewMaybeRecursive(schema, id, viewName, querySQL, null, + columnTemplatesAsStrings, session, false /* literalsChecked */, isTableExpression, + false/*isTemporary*/, db); } else { - view.replace(querySQL, columnNames, sysSession, false, force); - view.setModified(); + view = new TableView(schema, id, viewName, querySQL, null, columnTemplatesAsUnknowns, session, + false/* allow recursive */, false/* literalsChecked */, isTableExpression, false/*temporary*/); } - } finally { - sysSession.setCurrentSchema(db.getSchema(Constants.SCHEMA_MAIN)); + } else { + // TODO support isTableExpression in replace function... + view.replace(querySQL, columnTemplatesAsUnknowns, session, false, force, false); + view.setModified(); } if (comment != null) { view.setComment(comment); } if (old == null) { db.addSchemaObject(session, view); + db.unlockMeta(session); } else { db.updateMeta(session, view); } + + // TODO: if we added any table expressions that aren't used by this view, detect them + // and drop them - otherwise they will leak and never get cleaned up. + return 0; } diff --git a/h2/src/main/org/h2/command/ddl/DeallocateProcedure.java b/h2/src/main/org/h2/command/ddl/DeallocateProcedure.java index bc94b26e85..dad6d054cb 100644 --- a/h2/src/main/org/h2/command/ddl/DeallocateProcedure.java +++ b/h2/src/main/org/h2/command/ddl/DeallocateProcedure.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.command.CommandInterface; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; /** * This class represents the statement @@ -16,12 +16,12 @@ public class DeallocateProcedure extends DefineCommand { private String procedureName; - public DeallocateProcedure(Session session) { + public DeallocateProcedure(SessionLocal session) { super(session); } @Override - public int update() { + public long update() { session.removeProcedure(procedureName); return 0; } diff --git a/h2/src/main/org/h2/command/ddl/DefineCommand.java b/h2/src/main/org/h2/command/ddl/DefineCommand.java index b1a65f2f66..cf10794d56 100644 --- a/h2/src/main/org/h2/command/ddl/DefineCommand.java +++ b/h2/src/main/org/h2/command/ddl/DefineCommand.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.result.ResultInterface; /** @@ -26,7 +26,7 @@ public abstract class DefineCommand extends Prepared { * * @param session the session */ - DefineCommand(Session session) { + DefineCommand(SessionLocal session) { super(session); } diff --git a/h2/src/main/org/h2/command/ddl/DropAggregate.java b/h2/src/main/org/h2/command/ddl/DropAggregate.java index 96970eec34..08cd6d5741 100644 --- a/h2/src/main/org/h2/command/ddl/DropAggregate.java +++ b/h2/src/main/org/h2/command/ddl/DropAggregate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,35 +8,34 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.UserAggregate; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.schema.UserAggregate; /** * This class represents the statement * DROP AGGREGATE */ -public class DropAggregate extends DefineCommand { +public class DropAggregate extends SchemaOwnerCommand { private String name; private boolean ifExists; - public DropAggregate(Session session) { - super(session); + public DropAggregate(SessionLocal session, Schema schema) { + super(session, schema); } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); + long update(Schema schema) { Database db = session.getDatabase(); - UserAggregate aggregate = db.findAggregate(name); + UserAggregate aggregate = schema.findAggregate(name); if (aggregate == null) { if (!ifExists) { throw DbException.get(ErrorCode.AGGREGATE_NOT_FOUND_1, name); } } else { - db.removeDatabaseObject(session, aggregate); + db.removeSchemaObject(session, aggregate); } return 0; } diff --git a/h2/src/main/org/h2/command/ddl/DropConstant.java b/h2/src/main/org/h2/command/ddl/DropConstant.java index f8a72a2440..565031ee60 100644 --- a/h2/src/main/org/h2/command/ddl/DropConstant.java +++ b/h2/src/main/org/h2/command/ddl/DropConstant.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,7 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Constant; import org.h2.schema.Schema; @@ -17,12 +17,12 @@ * This class represents the statement * DROP CONSTANT */ -public class DropConstant extends SchemaCommand { +public class DropConstant extends SchemaOwnerCommand { private String constantName; private boolean ifExists; - public DropConstant(Session session, Schema schema) { + public DropConstant(SessionLocal session, Schema schema) { super(session, schema); } @@ -35,11 +35,9 @@ public void setConstantName(String constantName) { } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); + long update(Schema schema) { Database db = session.getDatabase(); - Constant constant = getSchema().findConstant(constantName); + Constant constant = schema.findConstant(constantName); if (constant == null) { if (!ifExists) { throw DbException.get(ErrorCode.CONSTANT_NOT_FOUND_1, constantName); diff --git a/h2/src/main/org/h2/command/ddl/DropDatabase.java b/h2/src/main/org/h2/command/ddl/DropDatabase.java index 59e2c99a52..a46fae9f6a 100644 --- a/h2/src/main/org/h2/command/ddl/DropDatabase.java +++ b/h2/src/main/org/h2/command/ddl/DropDatabase.java @@ -1,21 +1,27 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; +import java.util.Collection; + import org.h2.command.CommandInterface; import org.h2.engine.Database; import org.h2.engine.DbObject; +import org.h2.engine.Right; +import org.h2.engine.RightOwner; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.User; import org.h2.schema.Schema; import org.h2.schema.SchemaObject; +import org.h2.schema.Sequence; import org.h2.table.Table; -import org.h2.util.New; +import org.h2.table.TableType; +import org.h2.value.ValueNull; /** * This class represents the statement @@ -26,12 +32,12 @@ public class DropDatabase extends DefineCommand { private boolean dropAllObjects; private boolean deleteFiles; - public DropDatabase(Session session) { + public DropDatabase(SessionLocal session) { super(session); } @Override - public int update() { + public long update() { if (dropAllObjects) { dropAllObjects(); } @@ -42,8 +48,8 @@ public int update() { } private void dropAllObjects() { - session.getUser().checkAdmin(); - session.commit(true); + User user = session.getUser(); + user.checkAdmin(); Database db = session.getDatabase(); db.lockMeta(session); @@ -51,30 +57,30 @@ private void dropAllObjects() { // so we might need to loop over them multiple times. boolean runLoopAgain; do { - ArrayList
    tables = db.getAllTablesAndViews(false); - ArrayList
    toRemove = New.arrayList(); + ArrayList
    tables = db.getAllTablesAndViews(); + ArrayList
    toRemove = new ArrayList<>(tables.size()); for (Table t : tables) { if (t.getName() != null && - Table.VIEW.equals(t.getTableType())) { + TableType.VIEW == t.getTableType()) { toRemove.add(t); } } for (Table t : tables) { if (t.getName() != null && - Table.TABLE_LINK.equals(t.getTableType())) { + TableType.TABLE_LINK == t.getTableType()) { toRemove.add(t); } } for (Table t : tables) { if (t.getName() != null && - Table.TABLE.equals(t.getTableType()) && + TableType.TABLE == t.getTableType() && !t.isHidden()) { toRemove.add(t); } } for (Table t : tables) { if (t.getName() != null && - Table.EXTERNAL_TABLE_ENGINE.equals(t.getTableType()) && + TableType.EXTERNAL_TABLE_ENGINE == t.getTableType() && !t.isHidden()) { toRemove.add(t); } @@ -82,7 +88,7 @@ private void dropAllObjects() { runLoopAgain = false; for (Table t : toRemove) { if (t.getName() == null) { - // ignore + // ignore, already dead } else if (db.getDependentTable(t, t) == null) { db.removeSchemaObject(session, t); } else { @@ -91,49 +97,55 @@ private void dropAllObjects() { } } while (runLoopAgain); - // TODO local temp tables are not removed - for (Schema schema : db.getAllSchemas()) { + // TODO session-local temp tables are not removed + Collection schemas = db.getAllSchemasNoMeta(); + for (Schema schema : schemas) { if (schema.canDrop()) { db.removeDatabaseObject(session, schema); } } - session.findLocalTempTable(null); - ArrayList list = New.arrayList(); - list.addAll(db.getAllSchemaObjects(DbObject.SEQUENCE)); + ArrayList list = new ArrayList<>(); + for (Schema schema : schemas) { + for (Sequence sequence : schema.getAllSequences()) { + // ignore these. the ones we want to drop will get dropped when we + // drop their associated tables, and we will ignore the problematic + // ones that belong to session-local temp tables. + if (!sequence.getBelongsToTable()) { + list.add(sequence); + } + } + } // maybe constraints and triggers on system tables will be allowed in // the future - list.addAll(db.getAllSchemaObjects(DbObject.CONSTRAINT)); - list.addAll(db.getAllSchemaObjects(DbObject.TRIGGER)); - list.addAll(db.getAllSchemaObjects(DbObject.CONSTANT)); - list.addAll(db.getAllSchemaObjects(DbObject.FUNCTION_ALIAS)); + addAll(schemas, DbObject.CONSTRAINT, list); + addAll(schemas, DbObject.TRIGGER, list); + addAll(schemas, DbObject.CONSTANT, list); + // Function aliases and aggregates are stored together + addAll(schemas, DbObject.FUNCTION_ALIAS, list); + addAll(schemas, DbObject.DOMAIN, list); for (SchemaObject obj : list) { - if (obj.isHidden()) { + if (!obj.getSchema().isValid() || obj.isHidden()) { continue; } db.removeSchemaObject(session, obj); } - for (User user : db.getAllUsers()) { - if (user != session.getUser()) { - db.removeDatabaseObject(session, user); + Role publicRole = db.getPublicRole(); + for (RightOwner rightOwner : db.getAllUsersAndRoles()) { + if (rightOwner != user && rightOwner != publicRole) { + db.removeDatabaseObject(session, rightOwner); } } - for (Role role : db.getAllRoles()) { - String sql = role.getCreateSQL(); - // the role PUBLIC must not be dropped - if (sql != null) { - db.removeDatabaseObject(session, role); - } + for (Right right : db.getAllRights()) { + db.removeDatabaseObject(session, right); } - ArrayList dbObjects = New.arrayList(); - dbObjects.addAll(db.getAllRights()); - dbObjects.addAll(db.getAllAggregates()); - dbObjects.addAll(db.getAllUserDataTypes()); - for (DbObject obj : dbObjects) { - String sql = obj.getCreateSQL(); - // the role PUBLIC must not be dropped - if (sql != null) { - db.removeDatabaseObject(session, obj); - } + for (SessionLocal s : db.getSessions(false)) { + s.setLastIdentity(ValueNull.INSTANCE); + } + } + + private static void addAll(Collection schemas, int type, ArrayList list) { + for (Schema schema : schemas) { + schema.getAll(type, list); } } diff --git a/h2/src/main/org/h2/command/ddl/DropDomain.java b/h2/src/main/org/h2/command/ddl/DropDomain.java new file mode 100644 index 0000000000..8426dc2390 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/DropDomain.java @@ -0,0 +1,108 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import java.util.ArrayList; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.constraint.ConstraintActionType; +import org.h2.constraint.ConstraintDomain; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.schema.Domain; +import org.h2.schema.Schema; +import org.h2.table.Column; +import org.h2.table.ColumnTemplate; +import org.h2.table.Table; + +/** + * This class represents the statement DROP DOMAIN + */ +public class DropDomain extends AlterDomain { + + private ConstraintActionType dropAction; + + public DropDomain(SessionLocal session, Schema schema) { + super(session, schema); + dropAction = session.getDatabase().getSettings().dropRestrict ? ConstraintActionType.RESTRICT + : ConstraintActionType.CASCADE; + } + + public void setDropAction(ConstraintActionType dropAction) { + this.dropAction = dropAction; + } + + @Override + long update(Schema schema, Domain domain) { + forAllDependencies(session, domain, this::copyColumn, this::copyDomain, true); + session.getDatabase().removeSchemaObject(session, domain); + return 0; + } + + private boolean copyColumn(Domain domain, Column targetColumn) { + Table targetTable = targetColumn.getTable(); + if (dropAction == ConstraintActionType.RESTRICT) { + throw DbException.get(ErrorCode.CANNOT_DROP_2, domainName, targetTable.getCreateSQL()); + } + String columnName = targetColumn.getName(); + ArrayList constraints = domain.getConstraints(); + if (constraints != null && !constraints.isEmpty()) { + for (ConstraintDomain constraint : constraints) { + Expression checkCondition = constraint.getCheckConstraint(session, columnName); + AlterTableAddConstraint check = new AlterTableAddConstraint(session, targetTable.getSchema(), + CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK, false); + check.setTableName(targetTable.getName()); + check.setCheckExpression(checkCondition); + check.update(); + } + } + copyExpressions(session, domain, targetColumn); + return true; + } + + private boolean copyDomain(Domain domain, Domain targetDomain) { + if (dropAction == ConstraintActionType.RESTRICT) { + throw DbException.get(ErrorCode.CANNOT_DROP_2, domainName, targetDomain.getTraceSQL()); + } + ArrayList constraints = domain.getConstraints(); + if (constraints != null && !constraints.isEmpty()) { + for (ConstraintDomain constraint : constraints) { + Expression checkCondition = constraint.getCheckConstraint(session, null); + AlterDomainAddConstraint check = new AlterDomainAddConstraint(session, targetDomain.getSchema(), // + false); + check.setDomainName(targetDomain.getName()); + check.setCheckExpression(checkCondition); + check.update(); + } + } + copyExpressions(session, domain, targetDomain); + return true; + } + + private static boolean copyExpressions(SessionLocal session, Domain domain, ColumnTemplate targetColumn) { + targetColumn.setDomain(domain.getDomain()); + Expression e = domain.getDefaultExpression(); + boolean modified = false; + if (e != null && targetColumn.getDefaultExpression() == null) { + targetColumn.setDefaultExpression(session, e); + modified = true; + } + e = domain.getOnUpdateExpression(); + if (e != null && targetColumn.getOnUpdateExpression() == null) { + targetColumn.setOnUpdateExpression(session, e); + modified = true; + } + return modified; + } + + @Override + public int getType() { + return CommandInterface.DROP_DOMAIN; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/DropFunctionAlias.java b/h2/src/main/org/h2/command/ddl/DropFunctionAlias.java index e120835759..2a9fb641de 100644 --- a/h2/src/main/org/h2/command/ddl/DropFunctionAlias.java +++ b/h2/src/main/org/h2/command/ddl/DropFunctionAlias.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,30 +8,28 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.FunctionAlias; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; +import org.h2.schema.FunctionAlias; import org.h2.schema.Schema; /** * This class represents the statement * DROP ALIAS */ -public class DropFunctionAlias extends SchemaCommand { +public class DropFunctionAlias extends SchemaOwnerCommand { private String aliasName; private boolean ifExists; - public DropFunctionAlias(Session session, Schema schema) { + public DropFunctionAlias(SessionLocal session, Schema schema) { super(session, schema); } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); + long update(Schema schema) { Database db = session.getDatabase(); - FunctionAlias functionAlias = getSchema().findFunction(aliasName); + FunctionAlias functionAlias = schema.findFunction(aliasName); if (functionAlias == null) { if (!ifExists) { throw DbException.get(ErrorCode.FUNCTION_ALIAS_NOT_FOUND_1, aliasName); diff --git a/h2/src/main/org/h2/command/ddl/DropIndex.java b/h2/src/main/org/h2/command/ddl/DropIndex.java index f401b3a3ab..37b66aa011 100644 --- a/h2/src/main/org/h2/command/ddl/DropIndex.java +++ b/h2/src/main/org/h2/command/ddl/DropIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -12,7 +12,7 @@ import org.h2.constraint.Constraint; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.index.Index; import org.h2.message.DbException; import org.h2.schema.Schema; @@ -27,7 +27,7 @@ public class DropIndex extends SchemaCommand { private String indexName; private boolean ifExists; - public DropIndex(Session session, Schema schema) { + public DropIndex(SessionLocal session, Schema schema) { super(session, schema); } @@ -40,8 +40,7 @@ public void setIndexName(String indexName) { } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); Index index = getSchema().findIndex(session, indexName); if (index == null) { @@ -50,19 +49,23 @@ public int update() { } } else { Table table = index.getTable(); - session.getUser().checkRight(index.getTable(), Right.ALL); + session.getUser().checkTableRight(index.getTable(), Right.SCHEMA_OWNER); Constraint pkConstraint = null; ArrayList constraints = table.getConstraints(); for (int i = 0; constraints != null && i < constraints.size(); i++) { Constraint cons = constraints.get(i); if (cons.usesIndex(index)) { // can drop primary key index (for compatibility) - if (Constraint.PRIMARY_KEY.equals(cons.getConstraintType())) { + if (Constraint.Type.PRIMARY_KEY == cons.getConstraintType()) { + for (Constraint c : constraints) { + if (c.getReferencedConstraint() == cons) { + throw DbException.get(ErrorCode.INDEX_BELONGS_TO_CONSTRAINT_2, indexName, + cons.getName()); + } + } pkConstraint = cons; } else { - throw DbException.get( - ErrorCode.INDEX_BELONGS_TO_CONSTRAINT_2, - indexName, cons.getName()); + throw DbException.get(ErrorCode.INDEX_BELONGS_TO_CONSTRAINT_2, indexName, cons.getName()); } } } diff --git a/h2/src/main/org/h2/command/ddl/DropRole.java b/h2/src/main/org/h2/command/ddl/DropRole.java index 9a64d70faf..5fdac3838c 100644 --- a/h2/src/main/org/h2/command/ddl/DropRole.java +++ b/h2/src/main/org/h2/command/ddl/DropRole.java @@ -1,16 +1,15 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; /** @@ -22,7 +21,7 @@ public class DropRole extends DefineCommand { private String roleName; private boolean ifExists; - public DropRole(Session session) { + public DropRole(SessionLocal session) { super(session); } @@ -31,19 +30,19 @@ public void setRoleName(String roleName) { } @Override - public int update() { + public long update() { session.getUser().checkAdmin(); - session.commit(true); Database db = session.getDatabase(); - if (roleName.equals(Constants.PUBLIC_ROLE_NAME)) { - throw DbException.get(ErrorCode.ROLE_CAN_NOT_BE_DROPPED_1, roleName); - } Role role = db.findRole(roleName); if (role == null) { if (!ifExists) { throw DbException.get(ErrorCode.ROLE_NOT_FOUND_1, roleName); } } else { + if (role == db.getPublicRole()) { + throw DbException.get(ErrorCode.ROLE_CAN_NOT_BE_DROPPED_1, roleName); + } + role.checkOwnsNoSchemas(); db.removeDatabaseObject(session, role); } return 0; diff --git a/h2/src/main/org/h2/command/ddl/DropSchema.java b/h2/src/main/org/h2/command/ddl/DropSchema.java index c18d54e1fd..3a8ea29ce1 100644 --- a/h2/src/main/org/h2/command/ddl/DropSchema.java +++ b/h2/src/main/org/h2/command/ddl/DropSchema.java @@ -1,16 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; +import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; +import org.h2.constraint.ConstraintActionType; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; +import org.h2.schema.SchemaObject; /** * This class represents the statement @@ -20,9 +23,12 @@ public class DropSchema extends DefineCommand { private String schemaName; private boolean ifExists; + private ConstraintActionType dropAction; - public DropSchema(Session session) { + public DropSchema(SessionLocal session) { super(session); + dropAction = session.getDatabase().getSettings().dropRestrict ? + ConstraintActionType.RESTRICT : ConstraintActionType.CASCADE; } public void setSchemaName(String name) { @@ -30,9 +36,7 @@ public void setSchemaName(String name) { } @Override - public int update() { - session.getUser().checkSchemaAdmin(); - session.commit(true); + public long update() { Database db = session.getDatabase(); Schema schema = db.findSchema(schemaName); if (schema == null) { @@ -40,9 +44,24 @@ public int update() { throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName); } } else { + session.getUser().checkSchemaOwner(schema); if (!schema.canDrop()) { throw DbException.get(ErrorCode.SCHEMA_CAN_NOT_BE_DROPPED_1, schemaName); } + if (dropAction == ConstraintActionType.RESTRICT && !schema.isEmpty()) { + ArrayList all = schema.getAll(null); + int size = all.size(); + if (size > 0) { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < size; i++) { + if (i > 0) { + builder.append(", "); + } + builder.append(all.get(i).getName()); + } + throw DbException.get(ErrorCode.CANNOT_DROP_2, schemaName, builder.toString()); + } + } db.removeDatabaseObject(session, schema); } return 0; @@ -52,6 +71,10 @@ public void setIfExists(boolean ifExists) { this.ifExists = ifExists; } + public void setDropAction(ConstraintActionType dropAction) { + this.dropAction = dropAction; + } + @Override public int getType() { return CommandInterface.DROP_SCHEMA; diff --git a/h2/src/main/org/h2/command/ddl/DropSequence.java b/h2/src/main/org/h2/command/ddl/DropSequence.java index c0c1321b3f..451c628fee 100644 --- a/h2/src/main/org/h2/command/ddl/DropSequence.java +++ b/h2/src/main/org/h2/command/ddl/DropSequence.java @@ -1,14 +1,13 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.Sequence; @@ -17,12 +16,12 @@ * This class represents the statement * DROP SEQUENCE */ -public class DropSequence extends SchemaCommand { +public class DropSequence extends SchemaOwnerCommand { private String sequenceName; private boolean ifExists; - public DropSequence(Session session, Schema schema) { + public DropSequence(SessionLocal session, Schema schema) { super(session, schema); } @@ -35,11 +34,8 @@ public void setSequenceName(String sequenceName) { } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); - Database db = session.getDatabase(); - Sequence sequence = getSchema().findSequence(sequenceName); + long update(Schema schema) { + Sequence sequence = schema.findSequence(sequenceName); if (sequence == null) { if (!ifExists) { throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, sequenceName); @@ -48,7 +44,7 @@ public int update() { if (sequence.getBelongsToTable()) { throw DbException.get(ErrorCode.SEQUENCE_BELONGS_TO_A_TABLE_1, sequenceName); } - db.removeSchemaObject(session, sequence); + session.getDatabase().removeSchemaObject(session, sequence); } return 0; } diff --git a/h2/src/main/org/h2/command/ddl/DropSynonym.java b/h2/src/main/org/h2/command/ddl/DropSynonym.java new file mode 100644 index 0000000000..fcab524f5e --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/DropSynonym.java @@ -0,0 +1,54 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.table.TableSynonym; + +/** + * This class represents the statement + * DROP SYNONYM + */ +public class DropSynonym extends SchemaOwnerCommand { + + private String synonymName; + private boolean ifExists; + + public DropSynonym(SessionLocal session, Schema schema) { + super(session, schema); + } + + public void setSynonymName(String name) { + this.synonymName = name; + } + + @Override + long update(Schema schema) { + TableSynonym synonym = schema.getSynonym(synonymName); + if (synonym == null) { + if (!ifExists) { + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, synonymName); + } + } else { + session.getDatabase().removeSchemaObject(session, synonym); + } + return 0; + } + + public void setIfExists(boolean ifExists) { + this.ifExists = ifExists; + } + + @Override + public int getType() { + return CommandInterface.DROP_SYNONYM; + } + +} diff --git a/h2/src/main/org/h2/command/ddl/DropTable.java b/h2/src/main/org/h2/command/ddl/DropTable.java index d19d26b928..c907d56e2b 100644 --- a/h2/src/main/org/h2/command/ddl/DropTable.java +++ b/h2/src/main/org/h2/command/ddl/DropTable.java @@ -1,125 +1,131 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; - +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.constraint.ConstraintReferential; +import org.h2.constraint.Constraint; +import org.h2.constraint.ConstraintActionType; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.Table; import org.h2.table.TableView; -import org.h2.util.StatementBuilder; +import org.h2.util.Utils; /** * This class represents the statement * DROP TABLE */ -public class DropTable extends SchemaCommand { +public class DropTable extends DefineCommand { private boolean ifExists; - private String tableName; - private Table table; - private DropTable next; - private int dropAction; + private ConstraintActionType dropAction; - public DropTable(Session session, Schema schema) { - super(session, schema); - dropAction = session.getDatabase().getSettings().dropRestrict ? - ConstraintReferential.RESTRICT : - ConstraintReferential.CASCADE; - } + private final ArrayList tables = Utils.newSmallArrayList(); - /** - * Chain another drop table statement to this statement. - * - * @param drop the statement to add - */ - public void addNextDropTable(DropTable drop) { - if (next == null) { - next = drop; - } else { - next.addNextDropTable(drop); - } + public DropTable(SessionLocal session) { + super(session); + dropAction = session.getDatabase().getSettings().dropRestrict ? + ConstraintActionType.RESTRICT : + ConstraintActionType.CASCADE; } public void setIfExists(boolean b) { ifExists = b; - if (next != null) { - next.setIfExists(b); - } } - public void setTableName(String tableName) { - this.tableName = tableName; + /** + * Add a table to drop. + * + * @param schema the schema + * @param tableName the table name + */ + public void addTable(Schema schema, String tableName) { + tables.add(new SchemaAndTable(schema, tableName)); } - private void prepareDrop() { - table = getSchema().findTableOrView(session, tableName); - if (table == null) { - if (!ifExists) { - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); - } - } else { - session.getUser().checkRight(table, Right.ALL); - if (!table.canDrop()) { - throw DbException.get(ErrorCode.CANNOT_DROP_TABLE_1, tableName); + private boolean prepareDrop() { + HashSet
    tablesToDrop = new HashSet<>(); + for (SchemaAndTable schemaAndTable : tables) { + String tableName = schemaAndTable.tableName; + Table table = schemaAndTable.schema.findTableOrView(session, tableName); + if (table == null) { + if (!ifExists) { + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + } else { + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); + if (!table.canDrop()) { + throw DbException.get(ErrorCode.CANNOT_DROP_TABLE_1, tableName); + } + tablesToDrop.add(table); } - if (dropAction == ConstraintReferential.RESTRICT) { - ArrayList views = table.getViews(); - if (views != null && views.size() > 0) { - StatementBuilder buff = new StatementBuilder(); - for (TableView v : views) { - buff.appendExceptFirst(", "); - buff.append(v.getName()); + } + if (tablesToDrop.isEmpty()) { + return false; + } + for (Table table : tablesToDrop) { + ArrayList dependencies = new ArrayList<>(); + if (dropAction == ConstraintActionType.RESTRICT) { + CopyOnWriteArrayList dependentViews = table.getDependentViews(); + if (dependentViews != null && !dependentViews.isEmpty()) { + for (TableView v : dependentViews) { + if (!tablesToDrop.contains(v)) { + dependencies.add(v.getName()); + } + } + } + final List constraints = table.getConstraints(); + if (constraints != null && !constraints.isEmpty()) { + for (Constraint c : constraints) { + if (!tablesToDrop.contains(c.getTable())) { + dependencies.add(c.getName()); + } } - throw DbException.get(ErrorCode.CANNOT_DROP_2, tableName, buff.toString()); + } + if (!dependencies.isEmpty()) { + throw DbException.get(ErrorCode.CANNOT_DROP_2, table.getName(), String.join(", ", dependencies)); } } - table.lock(session, true, true); - } - if (next != null) { - next.prepareDrop(); + table.lock(session, Table.EXCLUSIVE_LOCK); } + return true; } private void executeDrop() { - // need to get the table again, because it may be dropped already - // meanwhile (dependent object, or same object) - table = getSchema().findTableOrView(session, tableName); - - if (table != null) { - table.setModified(); - Database db = session.getDatabase(); - db.lockMeta(session); - db.removeSchemaObject(session, table); - } - if (next != null) { - next.executeDrop(); + for (SchemaAndTable schemaAndTable : tables) { + // need to get the table again, because it may be dropped already + // meanwhile (dependent object, or same object) + Table table = schemaAndTable.schema.findTableOrView(session, schemaAndTable.tableName); + if (table != null) { + table.setModified(); + Database db = session.getDatabase(); + db.lockMeta(session); + db.removeSchemaObject(session, table); + } } } @Override - public int update() { - session.commit(true); - prepareDrop(); - executeDrop(); + public long update() { + if (prepareDrop()) { + executeDrop(); + } return 0; } - public void setDropAction(int dropAction) { + public void setDropAction(ConstraintActionType dropAction) { this.dropAction = dropAction; - if (next != null) { - next.setDropAction(dropAction); - } } @Override @@ -127,4 +133,17 @@ public int getType() { return CommandInterface.DROP_TABLE; } -} \ No newline at end of file + private static final class SchemaAndTable { + + final Schema schema; + + final String tableName; + + SchemaAndTable(Schema schema, String tableName) { + this.schema = schema; + this.tableName = tableName; + } + + } + +} diff --git a/h2/src/main/org/h2/command/ddl/DropTrigger.java b/h2/src/main/org/h2/command/ddl/DropTrigger.java index 1a3d6193d0..3e304bd5ce 100644 --- a/h2/src/main/org/h2/command/ddl/DropTrigger.java +++ b/h2/src/main/org/h2/command/ddl/DropTrigger.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -9,7 +9,7 @@ import org.h2.command.CommandInterface; import org.h2.engine.Database; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.schema.TriggerObject; @@ -24,7 +24,7 @@ public class DropTrigger extends SchemaCommand { private String triggerName; private boolean ifExists; - public DropTrigger(Session session, Schema schema) { + public DropTrigger(SessionLocal session, Schema schema) { super(session, schema); } @@ -37,8 +37,7 @@ public void setTriggerName(String triggerName) { } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); TriggerObject trigger = getSchema().findTrigger(triggerName); if (trigger == null) { @@ -47,7 +46,7 @@ public int update() { } } else { Table table = trigger.getTable(); - session.getUser().checkRight(table, Right.ALL); + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); db.removeSchemaObject(session, trigger); } return 0; diff --git a/h2/src/main/org/h2/command/ddl/DropUser.java b/h2/src/main/org/h2/command/ddl/DropUser.java index 907aa1eb53..3f72099e46 100644 --- a/h2/src/main/org/h2/command/ddl/DropUser.java +++ b/h2/src/main/org/h2/command/ddl/DropUser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,7 +8,8 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; import org.h2.engine.User; import org.h2.message.DbException; @@ -21,7 +22,7 @@ public class DropUser extends DefineCommand { private boolean ifExists; private String userName; - public DropUser(Session session) { + public DropUser(SessionLocal session) { super(session); } @@ -34,9 +35,8 @@ public void setUserName(String userName) { } @Override - public int update() { + public long update() { session.getUser().checkAdmin(); - session.commit(true); Database db = session.getDatabase(); User user = db.findUser(userName); if (user == null) { @@ -46,8 +46,8 @@ public int update() { } else { if (user == session.getUser()) { int adminUserCount = 0; - for (User u : db.getAllUsers()) { - if (u.isAdmin()) { + for (RightOwner rightOwner : db.getAllUsersAndRoles()) { + if (rightOwner instanceof User && ((User) rightOwner).isAdmin()) { adminUserCount++; } } diff --git a/h2/src/main/org/h2/command/ddl/DropUserDataType.java b/h2/src/main/org/h2/command/ddl/DropUserDataType.java deleted file mode 100644 index 616654eeab..0000000000 --- a/h2/src/main/org/h2/command/ddl/DropUserDataType.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.command.ddl; - -import org.h2.api.ErrorCode; -import org.h2.command.CommandInterface; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.UserDataType; -import org.h2.message.DbException; - -/** - * This class represents the statement - * DROP DOMAIN - */ -public class DropUserDataType extends DefineCommand { - - private String typeName; - private boolean ifExists; - - public DropUserDataType(Session session) { - super(session); - } - - public void setIfExists(boolean ifExists) { - this.ifExists = ifExists; - } - - @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); - Database db = session.getDatabase(); - UserDataType type = db.findUserDataType(typeName); - if (type == null) { - if (!ifExists) { - throw DbException.get(ErrorCode.USER_DATA_TYPE_NOT_FOUND_1, typeName); - } - } else { - db.removeDatabaseObject(session, type); - } - return 0; - } - - public void setTypeName(String name) { - this.typeName = name; - } - - @Override - public int getType() { - return CommandInterface.DROP_DOMAIN; - } - -} diff --git a/h2/src/main/org/h2/command/ddl/DropView.java b/h2/src/main/org/h2/command/ddl/DropView.java index d5c7a7e68e..35c8462e4b 100644 --- a/h2/src/main/org/h2/command/ddl/DropView.java +++ b/h2/src/main/org/h2/command/ddl/DropView.java @@ -1,19 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; +import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.constraint.ConstraintReferential; +import org.h2.constraint.ConstraintActionType; import org.h2.engine.DbObject; -import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.Table; +import org.h2.table.TableType; import org.h2.table.TableView; /** @@ -24,20 +25,20 @@ public class DropView extends SchemaCommand { private String viewName; private boolean ifExists; - private int dropAction; + private ConstraintActionType dropAction; - public DropView(Session session, Schema schema) { + public DropView(SessionLocal session, Schema schema) { super(session, schema); dropAction = session.getDatabase().getSettings().dropRestrict ? - ConstraintReferential.RESTRICT : - ConstraintReferential.CASCADE; + ConstraintActionType.RESTRICT : + ConstraintActionType.CASCADE; } public void setIfExists(boolean b) { ifExists = b; } - public void setDropAction(int dropAction) { + public void setDropAction(ConstraintActionType dropAction) { this.dropAction = dropAction; } @@ -46,20 +47,19 @@ public void setViewName(String viewName) { } @Override - public int update() { - session.commit(true); + public long update() { Table view = getSchema().findTableOrView(session, viewName); if (view == null) { if (!ifExists) { throw DbException.get(ErrorCode.VIEW_NOT_FOUND_1, viewName); } } else { - if (!Table.VIEW.equals(view.getTableType())) { + if (TableType.VIEW != view.getTableType()) { throw DbException.get(ErrorCode.VIEW_NOT_FOUND_1, viewName); } - session.getUser().checkRight(view, Right.ALL); + session.getUser().checkSchemaOwner(view.getSchema()); - if (dropAction == ConstraintReferential.RESTRICT) { + if (dropAction == ConstraintActionType.RESTRICT) { for (DbObject child : view.getChildren()) { if (child instanceof TableView) { throw DbException.get(ErrorCode.CANNOT_DROP_2, viewName, child.getName()); @@ -67,8 +67,26 @@ public int update() { } } - view.lock(session, true, true); + // TODO: Where is the ConstraintReferential.CASCADE style drop processing ? It's + // supported from imported keys - but not for dependent db objects + + TableView tableView = (TableView) view; + ArrayList
    copyOfDependencies = new ArrayList<>(tableView.getTables()); + + view.lock(session, Table.EXCLUSIVE_LOCK); session.getDatabase().removeSchemaObject(session, view); + + // remove dependent table expressions + for (Table childTable: copyOfDependencies) { + if (TableType.VIEW == childTable.getTableType()) { + TableView childTableView = (TableView) childTable; + if (childTableView.isTableExpression() && childTableView.getName() != null) { + session.getDatabase().removeSchemaObject(session, childTableView); + } + } + } + // make sure its all unlocked + session.getDatabase().unlockMeta(session); } return 0; } diff --git a/h2/src/main/org/h2/command/ddl/GrantRevoke.java b/h2/src/main/org/h2/command/ddl/GrantRevoke.java index 7b6cd6c4f3..3fc52cf5d0 100644 --- a/h2/src/main/org/h2/command/ddl/GrantRevoke.java +++ b/h2/src/main/org/h2/command/ddl/GrantRevoke.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -10,13 +10,16 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Database; +import org.h2.engine.DbObject; import org.h2.engine.Right; import org.h2.engine.RightOwner; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; import org.h2.message.DbException; +import org.h2.schema.Schema; import org.h2.table.Table; -import org.h2.util.New; +import org.h2.util.Utils; /** * This class represents the statements @@ -30,10 +33,11 @@ public class GrantRevoke extends DefineCommand { private ArrayList roleNames; private int operationType; private int rightMask; - private final ArrayList
    tables = New.arrayList(); + private final ArrayList
    tables = Utils.newSmallArrayList(); + private Schema schema; private RightOwner grantee; - public GrantRevoke(Session session) { + public GrantRevoke(SessionLocal session) { super(session); } @@ -57,28 +61,25 @@ public void addRight(int right) { */ public void addRoleName(String roleName) { if (roleNames == null) { - roleNames = New.arrayList(); + roleNames = Utils.newSmallArrayList(); } roleNames.add(roleName); } public void setGranteeName(String granteeName) { Database db = session.getDatabase(); - grantee = db.findUser(granteeName); + grantee = db.findUserOrRole(granteeName); if (grantee == null) { - grantee = db.findRole(granteeName); - if (grantee == null) { - throw DbException.get(ErrorCode.USER_OR_ROLE_NOT_FOUND_1, granteeName); - } + throw DbException.get(ErrorCode.USER_OR_ROLE_NOT_FOUND_1, granteeName); } } @Override - public int update() { - session.getUser().checkAdmin(); - session.commit(true); + public long update() { Database db = session.getDatabase(); + User user = session.getUser(); if (roleNames != null) { + user.checkAdmin(); for (String name : roleNames) { Role grantedRole = db.findRole(name); if (grantedRole == null) { @@ -89,34 +90,54 @@ public int update() { } else if (operationType == CommandInterface.REVOKE) { revokeRole(grantedRole); } else { - DbException.throwInternalError("type=" + operationType); + throw DbException.getInternalError("type=" + operationType); } } } else { + if ((rightMask & Right.ALTER_ANY_SCHEMA) != 0) { + user.checkAdmin(); + } else { + if (schema != null) { + user.checkSchemaOwner(schema); + } + for (Table table : tables) { + user.checkSchemaOwner(table.getSchema()); + } + } if (operationType == CommandInterface.GRANT) { grantRight(); } else if (operationType == CommandInterface.REVOKE) { revokeRight(); } else { - DbException.throwInternalError("type=" + operationType); + throw DbException.getInternalError("type=" + operationType); } } return 0; } private void grantRight() { - Database db = session.getDatabase(); + if (schema != null) { + grantRight(schema); + } for (Table table : tables) { - Right right = grantee.getRightForTable(table); - if (right == null) { - int id = getObjectId(); - right = new Right(db, id, grantee, rightMask, table); - grantee.grantRight(table, right); - db.addDatabaseObject(session, right); - } else { - right.setRightMask(right.getRightMask() | rightMask); - db.updateMeta(session, right); + grantRight(table); + } + } + + private void grantRight(DbObject object) { + Database db = session.getDatabase(); + Right right = grantee.getRightForObject(object); + if (right == null) { + int id = getPersistedObjectId(); + if (id == 0) { + id = session.getDatabase().allocateObjectId(); } + right = new Right(db, id, grantee, rightMask, object); + grantee.grantRight(object, right); + db.addDatabaseObject(session, right); + } else { + right.setRightMask(right.getRightMask() | rightMask); + db.updateMeta(session, right); } } @@ -128,7 +149,7 @@ private void grantRole(Role grantedRole) { Role granteeRole = (Role) grantee; if (grantedRole.isRoleGranted(granteeRole)) { // cyclic role grants are not allowed - throw DbException.get(ErrorCode.ROLE_ALREADY_GRANTED_1, grantedRole.getSQL()); + throw DbException.get(ErrorCode.ROLE_ALREADY_GRANTED_1, grantedRole.getTraceSQL()); } } Database db = session.getDatabase(); @@ -139,23 +160,31 @@ private void grantRole(Role grantedRole) { } private void revokeRight() { + if (schema != null) { + revokeRight(schema); + } for (Table table : tables) { - Right right = grantee.getRightForTable(table); - if (right == null) { - continue; - } - int mask = right.getRightMask(); - int newRight = mask & ~rightMask; - Database db = session.getDatabase(); - if (newRight == 0) { - db.removeDatabaseObject(session, right); - } else { - right.setRightMask(newRight); - db.updateMeta(session, right); - } + revokeRight(table); } } + private void revokeRight(DbObject object) { + Right right = grantee.getRightForObject(object); + if (right == null) { + return; + } + int mask = right.getRightMask(); + int newRight = mask & ~rightMask; + Database db = session.getDatabase(); + if (newRight == 0) { + db.removeDatabaseObject(session, right); + } else { + right.setRightMask(newRight); + db.updateMeta(session, right); + } + } + + private void revokeRole(Role grantedRole) { Right right = grantee.getRightForRole(grantedRole); if (right == null) { @@ -179,22 +208,18 @@ public void addTable(Table table) { tables.add(table); } - @Override - public int getType() { - return operationType; - } - /** - * @return true if this command is using Roles + * Set the specified schema + * + * @param schema the schema */ - public boolean isRoleMode() { - return roleNames != null; + public void setSchema(Schema schema) { + this.schema = schema; } - /** - * @return true if this command is using Rights - */ - public boolean isRightMode() { - return rightMask != 0; + @Override + public int getType() { + return operationType; } + } diff --git a/h2/src/main/org/h2/command/ddl/PrepareProcedure.java b/h2/src/main/org/h2/command/ddl/PrepareProcedure.java index f9c332a022..028ab2fcae 100644 --- a/h2/src/main/org/h2/command/ddl/PrepareProcedure.java +++ b/h2/src/main/org/h2/command/ddl/PrepareProcedure.java @@ -1,17 +1,17 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; import java.util.ArrayList; + import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Procedure; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Parameter; -import org.h2.util.New; /** * This class represents the statement @@ -22,7 +22,7 @@ public class PrepareProcedure extends DefineCommand { private String procedureName; private Prepared prepared; - public PrepareProcedure(Session session) { + public PrepareProcedure(SessionLocal session) { super(session); } @@ -32,7 +32,7 @@ public void checkParameters() { } @Override - public int update() { + public long update() { Procedure proc = new Procedure(procedureName, prepared); prepared.setParameterList(parameters); prepared.setPrepareAlways(prepareAlways); @@ -51,7 +51,7 @@ public void setPrepared(Prepared prep) { @Override public ArrayList getParameters() { - return New.arrayList(); + return new ArrayList<>(0); } @Override diff --git a/h2/src/main/org/h2/command/ddl/SchemaCommand.java b/h2/src/main/org/h2/command/ddl/SchemaCommand.java index cb445f66ba..14cf2c772c 100644 --- a/h2/src/main/org/h2/command/ddl/SchemaCommand.java +++ b/h2/src/main/org/h2/command/ddl/SchemaCommand.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.schema.Schema; /** @@ -21,7 +21,7 @@ public abstract class SchemaCommand extends DefineCommand { * @param session the session * @param schema the schema */ - public SchemaCommand(Session session, Schema schema) { + public SchemaCommand(SessionLocal session, Schema schema) { super(session); this.schema = schema; } @@ -31,7 +31,7 @@ public SchemaCommand(Session session, Schema schema) { * * @return the schema */ - protected Schema getSchema() { + protected final Schema getSchema() { return schema; } diff --git a/h2/src/main/org/h2/command/ddl/SchemaOwnerCommand.java b/h2/src/main/org/h2/command/ddl/SchemaOwnerCommand.java new file mode 100644 index 0000000000..28d432e625 --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/SchemaOwnerCommand.java @@ -0,0 +1,38 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.engine.SessionLocal; +import org.h2.schema.Schema; + +/** + * This class represents a non-transaction statement that involves a schema and + * requires schema owner rights. + */ +abstract class SchemaOwnerCommand extends SchemaCommand { + + /** + * Create a new command. + * + * @param session + * the session + * @param schema + * the schema + */ + SchemaOwnerCommand(SessionLocal session, Schema schema) { + super(session, schema); + } + + @Override + public final long update() { + Schema schema = getSchema(); + session.getUser().checkSchemaOwner(schema); + return update(schema); + } + + abstract long update(Schema schema); + +} diff --git a/h2/src/main/org/h2/command/ddl/SequenceOptions.java b/h2/src/main/org/h2/command/ddl/SequenceOptions.java new file mode 100644 index 0000000000..801db6e1bd --- /dev/null +++ b/h2/src/main/org/h2/command/ddl/SequenceOptions.java @@ -0,0 +1,362 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.ddl; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.schema.Sequence; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; + +/** + * Sequence options. + */ +public class SequenceOptions { + + private TypeInfo dataType; + + private Expression start; + + private Expression restart; + + private Expression increment; + + private Expression maxValue; + + private Expression minValue; + + private Sequence.Cycle cycle; + + private Expression cacheSize; + + private long[] bounds; + + private final Sequence oldSequence; + + private static Long getLong(SessionLocal session, Expression expr) { + if (expr != null) { + Value value = expr.optimize(session).getValue(session); + if (value != ValueNull.INSTANCE) { + return value.getLong(); + } + } + return null; + } + + /** + * Creates new instance of sequence options. + */ + public SequenceOptions() { + oldSequence = null; + } + + /** + * Creates new instance of sequence options. + * + * @param oldSequence + * the sequence to copy options from + * @param dataType + * the new data type + */ + public SequenceOptions(Sequence oldSequence, TypeInfo dataType) { + this.oldSequence = oldSequence; + this.dataType = dataType; + // Check data type correctness immediately + getBounds(); + } + + public TypeInfo getDataType() { + if (oldSequence != null) { + synchronized (oldSequence) { + copyFromOldSequence(); + } + } + return dataType; + } + + private void copyFromOldSequence() { + long bounds[] = getBounds(); + long min = Math.max(oldSequence.getMinValue(), bounds[0]); + long max = Math.min(oldSequence.getMaxValue(), bounds[1]); + if (max < min) { + min = bounds[0]; + max = bounds[1]; + } + minValue = ValueExpression.get(ValueBigint.get(min)); + maxValue = ValueExpression.get(ValueBigint.get(max)); + long v = oldSequence.getStartValue(); + if (v >= min && v <= max) { + start = ValueExpression.get(ValueBigint.get(v)); + } + v = oldSequence.getBaseValue(); + if (v >= min && v <= max) { + restart = ValueExpression.get(ValueBigint.get(v)); + } + increment = ValueExpression.get(ValueBigint.get(oldSequence.getIncrement())); + cycle = oldSequence.getCycle(); + cacheSize = ValueExpression.get(ValueBigint.get(oldSequence.getCacheSize())); + } + + public void setDataType(TypeInfo dataType) { + this.dataType = dataType; + } + + /** + * Gets start value. + * + * @param session The session to calculate the value. + * @return start value or {@code null} if value is not defined. + */ + public Long getStartValue(SessionLocal session) { + return check(getLong(session, start)); + } + + /** + * Sets start value expression. + * + * @param start START WITH value expression. + */ + public void setStartValue(Expression start) { + this.start = start; + } + + /** + * Gets restart value. + * + * @param session + * the session to calculate the value + * @param startValue + * the start value to use if restart without value is specified + * @return restart value or {@code null} if value is not defined. + */ + public Long getRestartValue(SessionLocal session, long startValue) { + return check(restart == ValueExpression.DEFAULT ? (Long) startValue : getLong(session, restart)); + } + + /** + * Sets restart value expression, or {@link ValueExpression#DEFAULT}. + * + * @param restart + * RESTART WITH value expression, or + * {@link ValueExpression#DEFAULT} for simple RESTART + */ + public void setRestartValue(Expression restart) { + this.restart = restart; + } + + /** + * Gets increment value. + * + * @param session The session to calculate the value. + * @return increment value or {@code null} if value is not defined. + */ + public Long getIncrement(SessionLocal session) { + return check(getLong(session, increment)); + } + + /** + * Sets increment value expression. + * + * @param increment INCREMENT BY value expression. + */ + public void setIncrement(Expression increment) { + this.increment = increment; + } + + /** + * Gets max value. + * + * @param sequence the sequence to get default max value. + * @param session The session to calculate the value. + * @return max value when the MAXVALUE expression is set, otherwise returns default max value. + */ + public Long getMaxValue(Sequence sequence, SessionLocal session) { + Long v; + if (maxValue == ValueExpression.NULL && sequence != null) { + v = Sequence.getDefaultMaxValue(getCurrentStart(sequence, session), + increment != null ? getIncrement(session) : sequence.getIncrement(), getBounds()); + } else { + v = getLong(session, maxValue); + } + return check(v); + } + + /** + * Sets max value expression. + * + * @param maxValue MAXVALUE expression. + */ + public void setMaxValue(Expression maxValue) { + this.maxValue = maxValue; + } + + /** + * Gets min value. + * + * @param sequence the sequence to get default min value. + * @param session The session to calculate the value. + * @return min value when the MINVALUE expression is set, otherwise returns default min value. + */ + public Long getMinValue(Sequence sequence, SessionLocal session) { + Long v; + if (minValue == ValueExpression.NULL && sequence != null) { + v = Sequence.getDefaultMinValue(getCurrentStart(sequence, session), + increment != null ? getIncrement(session) : sequence.getIncrement(), getBounds()); + } else { + v = getLong(session, minValue); + } + return check(v); + } + + /** + * Sets min value expression. + * + * @param minValue MINVALUE expression. + */ + public void setMinValue(Expression minValue) { + this.minValue = minValue; + } + + private Long check(Long value) { + if (value == null) { + return null; + } else { + long[] bounds = getBounds(); + long v = value; + if (v < bounds[0] || v > bounds[1]) { + throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, Long.toString(v)); + } + } + return value; + } + + public long[] getBounds() { + long[] bounds = this.bounds; + if (bounds == null) { + this.bounds = bounds = getBounds(dataType); + } + return bounds; + } + + /** + * Get the bounds (min, max) of a data type. + * + * @param dataType the data type + * @return the bounds (an array with 2 elements) + */ + public static long[] getBounds(TypeInfo dataType) { + long min, max; + switch (dataType.getValueType()) { + case Value.TINYINT: + min = Byte.MIN_VALUE; + max = Byte.MAX_VALUE; + break; + case Value.SMALLINT: + min = Short.MIN_VALUE; + max = Short.MAX_VALUE; + break; + case Value.INTEGER: + min = Integer.MIN_VALUE; + max = Integer.MAX_VALUE; + break; + case Value.BIGINT: + min = Long.MIN_VALUE; + max = Long.MAX_VALUE; + break; + case Value.REAL: + min = -0x100_0000; + max = 0x100_0000; + break; + case Value.DOUBLE: + min = -0x20_0000_0000_0000L; + max = 0x20_0000_0000_0000L; + break; + case Value.NUMERIC: { + if (dataType.getScale() != 0) { + throw DbException.getUnsupportedException(dataType.getTraceSQL()); + } + long p = (dataType.getPrecision() - dataType.getScale()); + if (p <= 0) { + throw DbException.getUnsupportedException(dataType.getTraceSQL()); + } else if (p > 18) { + min = Long.MIN_VALUE; + max = Long.MAX_VALUE; + } else { + max = 10; + for (int i = 1; i < p; i++) { + max *= 10; + } + min = - --max; + } + break; + } + case Value.DECFLOAT: { + long p = dataType.getPrecision(); + if (p > 18) { + min = Long.MIN_VALUE; + max = Long.MAX_VALUE; + } else { + max = 10; + for (int i = 1; i < p; i++) { + max *= 10; + } + min = -max; + } + break; + } + default: + throw DbException.getUnsupportedException(dataType.getTraceSQL()); + } + long bounds[] = { min, max }; + return bounds; + } + + /** + * Gets cycle option. + * + * @return cycle option value or {@code null} if is not defined. + */ + public Sequence.Cycle getCycle() { + return cycle; + } + + /** + * Sets cycle option. + * + * @param cycle option value. + */ + public void setCycle(Sequence.Cycle cycle) { + this.cycle = cycle; + } + + /** + * Gets cache size. + * + * @param session The session to calculate the value. + * @return cache size or {@code null} if value is not defined. + */ + public Long getCacheSize(SessionLocal session) { + return getLong(session, cacheSize); + } + + /** + * Sets cache size. + * + * @param cacheSize cache size. + */ + public void setCacheSize(Expression cacheSize) { + this.cacheSize = cacheSize; + } + + private long getCurrentStart(Sequence sequence, SessionLocal session) { + return start != null ? getStartValue(session) : sequence.getBaseValue(); + } +} diff --git a/h2/src/main/org/h2/command/ddl/SetComment.java b/h2/src/main/org/h2/command/ddl/SetComment.java index 717f33ca85..ba936cc766 100644 --- a/h2/src/main/org/h2/command/ddl/SetComment.java +++ b/h2/src/main/org/h2/command/ddl/SetComment.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -10,9 +10,10 @@ import org.h2.engine.Comment; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.message.DbException; +import org.h2.schema.Schema; import org.h2.table.Table; /** @@ -28,69 +29,97 @@ public class SetComment extends DefineCommand { private int objectType; private Expression expr; - public SetComment(Session session) { + public SetComment(SessionLocal session) { super(session); } @Override - public int update() { - session.commit(true); + public long update() { Database db = session.getDatabase(); - session.getUser().checkAdmin(); DbObject object = null; int errorCode = ErrorCode.GENERAL_ERROR_1; if (schemaName == null) { schemaName = session.getCurrentSchemaName(); } switch (objectType) { - case DbObject.CONSTANT: - object = db.getSchema(schemaName).getConstant(objectName); + case DbObject.CONSTANT: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.getConstant(objectName); break; - case DbObject.CONSTRAINT: - object = db.getSchema(schemaName).getConstraint(objectName); + } + case DbObject.CONSTRAINT: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.getConstraint(objectName); break; - case DbObject.FUNCTION_ALIAS: - object = db.getSchema(schemaName).findFunction(objectName); + } + case DbObject.FUNCTION_ALIAS: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.findFunction(objectName); errorCode = ErrorCode.FUNCTION_ALIAS_NOT_FOUND_1; break; - case DbObject.INDEX: - object = db.getSchema(schemaName).getIndex(objectName); + } + case DbObject.INDEX: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.getIndex(objectName); break; + } case DbObject.ROLE: + session.getUser().checkAdmin(); schemaName = null; object = db.findRole(objectName); errorCode = ErrorCode.ROLE_NOT_FOUND_1; break; - case DbObject.SCHEMA: + case DbObject.SCHEMA: { schemaName = null; - object = db.findSchema(objectName); - errorCode = ErrorCode.SCHEMA_NOT_FOUND_1; + Schema schema = db.getSchema(objectName); + session.getUser().checkSchemaOwner(schema); + object = schema; break; - case DbObject.SEQUENCE: - object = db.getSchema(schemaName).getSequence(objectName); + } + case DbObject.SEQUENCE: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.getSequence(objectName); break; - case DbObject.TABLE_OR_VIEW: - object = db.getSchema(schemaName).getTableOrView(session, objectName); + } + case DbObject.TABLE_OR_VIEW: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.getTableOrView(session, objectName); break; - case DbObject.TRIGGER: - object = db.getSchema(schemaName).findTrigger(objectName); + } + case DbObject.TRIGGER: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.findTrigger(objectName); errorCode = ErrorCode.TRIGGER_NOT_FOUND_1; break; + } case DbObject.USER: + session.getUser().checkAdmin(); schemaName = null; object = db.getUser(objectName); break; - case DbObject.USER_DATATYPE: - schemaName = null; - object = db.findUserDataType(objectName); - errorCode = ErrorCode.USER_DATA_TYPE_ALREADY_EXISTS_1; + case DbObject.DOMAIN: { + Schema schema = db.getSchema(schemaName); + session.getUser().checkSchemaOwner(schema); + object = schema.findDomain(objectName); + errorCode = ErrorCode.DOMAIN_NOT_FOUND_1; break; + } default: } if (object == null) { throw DbException.get(errorCode, objectName); } String text = expr.optimize(session).getValue(session).getString(); + if (text != null && text.isEmpty()) { + text = null; + } if (column) { Table table = (Table) object; table.getColumn(columnName).setComment(text); diff --git a/h2/src/main/org/h2/command/ddl/TruncateTable.java b/h2/src/main/org/h2/command/ddl/TruncateTable.java index 0473123724..6bb244f6b7 100644 --- a/h2/src/main/org/h2/command/ddl/TruncateTable.java +++ b/h2/src/main/org/h2/command/ddl/TruncateTable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.ddl; @@ -8,8 +8,10 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; +import org.h2.schema.Sequence; +import org.h2.table.Column; import org.h2.table.Table; /** @@ -20,7 +22,9 @@ public class TruncateTable extends DefineCommand { private Table table; - public TruncateTable(Session session) { + private boolean restart; + + public TruncateTable(SessionLocal session) { super(session); } @@ -28,16 +32,28 @@ public void setTable(Table table) { this.table = table; } + public void setRestart(boolean restart) { + this.restart = restart; + } + @Override - public int update() { - session.commit(true); + public long update() { if (!table.canTruncate()) { - throw DbException.get(ErrorCode.CANNOT_TRUNCATE_1, table.getSQL()); + throw DbException.get(ErrorCode.CANNOT_TRUNCATE_1, table.getTraceSQL()); + } + session.getUser().checkTableRight(table, Right.DELETE); + table.lock(session, Table.EXCLUSIVE_LOCK); + long result = table.truncate(session); + if (restart) { + for (Column column : table.getColumns()) { + Sequence sequence = column.getSequence(); + if (sequence != null) { + sequence.modify(sequence.getStartValue(), null, null, null, null, null, null); + session.getDatabase().updateMeta(session, sequence); + } + } } - session.getUser().checkRight(table, Right.DELETE); - table.lock(session, true, true); - table.truncate(session); - return 0; + return result; } @Override diff --git a/h2/src/main/org/h2/command/ddl/package.html b/h2/src/main/org/h2/command/ddl/package.html index 9c88dc0ddc..9862a68694 100644 --- a/h2/src/main/org/h2/command/ddl/package.html +++ b/h2/src/main/org/h2/command/ddl/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/command/dml/AlterSequence.java b/h2/src/main/org/h2/command/dml/AlterSequence.java deleted file mode 100644 index 57c5232ddf..0000000000 --- a/h2/src/main/org/h2/command/dml/AlterSequence.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.command.dml; - -import org.h2.api.ErrorCode; -import org.h2.command.CommandInterface; -import org.h2.command.ddl.SchemaCommand; -import org.h2.engine.Database; -import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.message.DbException; -import org.h2.schema.Schema; -import org.h2.schema.Sequence; -import org.h2.table.Column; -import org.h2.table.Table; - -/** - * This class represents the statement - * ALTER SEQUENCE - */ -public class AlterSequence extends SchemaCommand { - - private Table table; - private Sequence sequence; - private Expression start; - private Expression increment; - private Boolean cycle; - private Expression minValue; - private Expression maxValue; - private Expression cacheSize; - - public AlterSequence(Session session, Schema schema) { - super(session, schema); - } - - public void setSequence(Sequence sequence) { - this.sequence = sequence; - } - - @Override - public boolean isTransactional() { - return true; - } - - public void setColumn(Column column) { - table = column.getTable(); - sequence = column.getSequence(); - if (sequence == null) { - throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, column.getSQL()); - } - } - - public void setStartWith(Expression start) { - this.start = start; - } - - public void setIncrement(Expression increment) { - this.increment = increment; - } - - public void setCycle(Boolean cycle) { - this.cycle = cycle; - } - - public void setMinValue(Expression minValue) { - this.minValue = minValue; - } - - public void setMaxValue(Expression maxValue) { - this.maxValue = maxValue; - } - - public void setCacheSize(Expression cacheSize) { - this.cacheSize = cacheSize; - } - - @Override - public int update() { - Database db = session.getDatabase(); - if (table != null) { - session.getUser().checkRight(table, Right.ALL); - } - if (cycle != null) { - sequence.setCycle(cycle); - } - if (cacheSize != null) { - long size = cacheSize.optimize(session).getValue(session).getLong(); - sequence.setCacheSize(size); - } - if (start != null || minValue != null || - maxValue != null || increment != null) { - Long startValue = getLong(start); - Long min = getLong(minValue); - Long max = getLong(maxValue); - Long inc = getLong(increment); - sequence.modify(startValue, min, max, inc); - } - // need to use the system session, so that the update - // can be committed immediately - not committing it - // would keep other transactions from using the sequence - Session sysSession = db.getSystemSession(); - synchronized (sysSession) { - synchronized (db) { - db.updateMeta(sysSession, sequence); - sysSession.commit(true); - } - } - return 0; - } - - private Long getLong(Expression expr) { - if (expr == null) { - return null; - } - return expr.optimize(session).getValue(session).getLong(); - } - - @Override - public int getType() { - return CommandInterface.ALTER_SEQUENCE; - } - -} diff --git a/h2/src/main/org/h2/command/dml/AlterTableSet.java b/h2/src/main/org/h2/command/dml/AlterTableSet.java index 78c6e75647..9d3a3c1a14 100644 --- a/h2/src/main/org/h2/command/dml/AlterTableSet.java +++ b/h2/src/main/org/h2/command/dml/AlterTableSet.java @@ -1,14 +1,15 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; +import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.command.ddl.SchemaCommand; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.schema.Schema; import org.h2.table.Table; @@ -19,13 +20,14 @@ */ public class AlterTableSet extends SchemaCommand { + private boolean ifTableExists; private String tableName; private final int type; private final boolean value; private boolean checkExisting; - public AlterTableSet(Session session, Schema schema, int type, boolean value) { + public AlterTableSet(SessionLocal session, Schema schema, int type, boolean value) { super(session, schema); this.type = type; this.value = value; @@ -40,22 +42,32 @@ public boolean isTransactional() { return true; } + public void setIfTableExists(boolean b) { + this.ifTableExists = b; + } + public void setTableName(String tableName) { this.tableName = tableName; } @Override - public int update() { - Table table = getSchema().getTableOrView(session, tableName); - session.getUser().checkRight(table, Right.ALL); - table.lock(session, true, true); - switch(type) { + public long update() { + Table table = getSchema().resolveTableOrView(session, tableName); + if (table == null) { + if (ifTableExists) { + return 0; + } + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName); + } + session.getUser().checkTableRight(table, Right.SCHEMA_OWNER); + table.lock(session, Table.EXCLUSIVE_LOCK); + switch (type) { case CommandInterface.ALTER_TABLE_SET_REFERENTIAL_INTEGRITY: table.setCheckForeignKeyConstraints(session, value, value ? checkExisting : false); break; default: - DbException.throwInternalError("type="+type); + throw DbException.getInternalError("type="+type); } return 0; } diff --git a/h2/src/main/org/h2/command/dml/BackupCommand.java b/h2/src/main/org/h2/command/dml/BackupCommand.java index 282c206f6d..709147da4d 100644 --- a/h2/src/main/org/h2/command/dml/BackupCommand.java +++ b/h2/src/main/org/h2/command/dml/BackupCommand.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -11,20 +11,18 @@ import java.util.ArrayList; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; -import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.mvstore.MVStore; -import org.h2.mvstore.db.MVTableEngine.Store; +import org.h2.mvstore.db.Store; import org.h2.result.ResultInterface; import org.h2.store.FileLister; -import org.h2.store.PageStore; import org.h2.store.fs.FileUtils; import org.h2.util.IOUtils; @@ -36,7 +34,7 @@ public class BackupCommand extends Prepared { private Expression fileNameExpr; - public BackupCommand(Session session) { + public BackupCommand(SessionLocal session) { super(session); } @@ -45,7 +43,7 @@ public void setFileName(Expression fileName) { } @Override - public int update() { + public long update() { String name = fileNameExpr.getValue(session).getString(); session.getUser().checkAdmin(); backupTo(name); @@ -58,85 +56,48 @@ private void backupTo(String fileName) { throw DbException.get(ErrorCode.DATABASE_IS_NOT_PERSISTENT); } try { - Store mvStore = db.getMvStore(); - if (mvStore != null) { - mvStore.flush(); - } + Store store = db.getStore(); + store.flush(); String name = db.getName(); name = FileUtils.getName(name); - OutputStream zip = FileUtils.newOutputStream(fileName, false); - ZipOutputStream out = new ZipOutputStream(zip); - db.flush(); - if (db.getPageStore() != null) { - String fn = db.getName() + Constants.SUFFIX_PAGE_FILE; - backupPageStore(out, fn, db.getPageStore()); - } - // synchronize on the database, to avoid concurrent temp file - // creation / deletion / backup - String base = FileUtils.getParent(db.getName()); - synchronized (db.getLobSyncObject()) { - String prefix = db.getDatabasePath(); - String dir = FileUtils.getParent(prefix); - dir = FileLister.getDir(dir); - ArrayList fileList = FileLister.getDatabaseFiles(dir, name, true); - for (String n : fileList) { - if (n.endsWith(Constants.SUFFIX_LOB_FILE)) { - backupFile(out, base, n); - } - if (n.endsWith(Constants.SUFFIX_MV_FILE) && mvStore != null) { - MVStore s = mvStore.getStore(); - boolean before = s.getReuseSpace(); - s.setReuseSpace(false); - try { - InputStream in = mvStore.getInputStream(); - backupFile(out, base, n, in); - } finally { - s.setReuseSpace(before); + try (OutputStream zip = FileUtils.newOutputStream(fileName, false)) { + ZipOutputStream out = new ZipOutputStream(zip); + db.flush(); + // synchronize on the database, to avoid concurrent temp file + // creation / deletion / backup + String base = FileUtils.getParent(db.getName()); + synchronized (db.getLobSyncObject()) { + String prefix = db.getDatabasePath(); + String dir = FileUtils.getParent(prefix); + dir = FileLister.getDir(dir); + ArrayList fileList = FileLister.getDatabaseFiles(dir, name, true); + for (String n : fileList) { + if (n.endsWith(Constants.SUFFIX_MV_FILE)) { + MVStore s = store.getMvStore(); + boolean before = s.getReuseSpace(); + s.setReuseSpace(false); + try { + InputStream in = store.getInputStream(); + backupFile(out, base, n, in); + } finally { + s.setReuseSpace(before); + } } } } + out.close(); } - out.close(); - zip.close(); } catch (IOException e) { throw DbException.convertIOException(e, fileName); } } - private void backupPageStore(ZipOutputStream out, String fileName, - PageStore store) throws IOException { - Database db = session.getDatabase(); - fileName = FileUtils.getName(fileName); - out.putNextEntry(new ZipEntry(fileName)); - int pos = 0; - try { - store.setBackup(true); - while (true) { - pos = store.copyDirect(pos, out); - if (pos < 0) { - break; - } - int max = store.getPageCount(); - db.setProgress(DatabaseEventListener.STATE_BACKUP_FILE, fileName, pos, max); - } - } finally { - store.setBackup(false); - } - out.closeEntry(); - } - - private static void backupFile(ZipOutputStream out, String base, String fn) - throws IOException { - InputStream in = FileUtils.newInputStream(fn); - backupFile(out, base, fn, in); - } - private static void backupFile(ZipOutputStream out, String base, String fn, InputStream in) throws IOException { String f = FileUtils.toRealPath(fn); base = FileUtils.toRealPath(base); if (!f.startsWith(base)) { - DbException.throwInternalError(f + " does not start with " + base); + throw DbException.getInternalError(f + " does not start with " + base); } f = f.substring(base.length()); f = correctFileName(f); diff --git a/h2/src/main/org/h2/command/dml/Call.java b/h2/src/main/org/h2/command/dml/Call.java index 4eb647fb43..7302298328 100644 --- a/h2/src/main/org/h2/command/dml/Call.java +++ b/h2/src/main/org/h2/command/dml/Call.java @@ -1,18 +1,21 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; -import java.sql.ResultSet; import org.h2.command.CommandInterface; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.expression.Alias; import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; +import org.h2.expression.function.table.TableFunction; import org.h2.result.LocalResult; import org.h2.result.ResultInterface; +import org.h2.table.Column; import org.h2.value.Value; /** @@ -21,36 +24,34 @@ */ public class Call extends Prepared { - private boolean isResultSet; private Expression expression; + + private TableFunction tableFunction; + private Expression[] expressions; - public Call(Session session) { + public Call(SessionLocal session) { super(session); } @Override public ResultInterface queryMeta() { - LocalResult result; - if (isResultSet) { - Expression[] expr = expression.getExpressionColumns(session); - result = new LocalResult(session, expr, expr.length); - } else { - result = new LocalResult(session, expressions, 1); - } + int columnCount = expressions.length; + LocalResult result = new LocalResult(session, expressions, columnCount, columnCount); result.done(); return result; } @Override - public int update() { - Value v = expression.getValue(session); - int type = v.getType(); - switch(type) { - case Value.RESULT_SET: + public long update() { + if (tableFunction != null) { // this will throw an exception // methods returning a result set may not be called like this. return super.update(); + } + Value v = expression.getValue(session); + int type = v.getValueType(); + switch (type) { case Value.UNKNOWN: case Value.NULL: return 0; @@ -60,28 +61,36 @@ public int update() { } @Override - public ResultInterface query(int maxrows) { + public ResultInterface query(long maxrows) { setCurrentRowNumber(1); - Value v = expression.getValue(session); - if (isResultSet) { - v = v.convertTo(Value.RESULT_SET); - ResultSet rs = v.getResultSet(); - return LocalResult.read(session, rs, maxrows); + if (tableFunction != null) { + return tableFunction.getValue(session); } - LocalResult result = new LocalResult(session, expressions, 1); - Value[] row = { v }; - result.addRow(row); + LocalResult result = new LocalResult(session, expressions, 1, 1); + result.addRow(expression.getValue(session)); result.done(); return result; } @Override public void prepare() { - expression = expression.optimize(session); - expressions = new Expression[] { expression }; - isResultSet = expression.getType() == Value.RESULT_SET; - if (isResultSet) { + if (tableFunction != null) { prepareAlways = true; + tableFunction.optimize(session); + ResultInterface result = tableFunction.getValueTemplate(session); + int columnCount = result.getVisibleColumnCount(); + expressions = new Expression[columnCount]; + for (int i = 0; i < columnCount; i++) { + String name = result.getColumnName(i); + String alias = result.getAlias(i); + Expression e = new ExpressionColumn(session.getDatabase(), new Column(name, result.getColumnType(i))); + if (!alias.equals(name)) { + e = new Alias(e, alias, false); + } + expressions[i] = e; + } + } else { + expressions = new Expression[] { expression = expression.optimize(session) }; } } @@ -89,6 +98,10 @@ public void setExpression(Expression expression) { this.expression = expression; } + public void setTableFunction(TableFunction tableFunction) { + this.tableFunction = tableFunction; + } + @Override public boolean isQuery() { return true; @@ -101,7 +114,7 @@ public boolean isTransactional() { @Override public boolean isReadOnly() { - return expression.isEverything(ExpressionVisitor.READONLY_VISITOR); + return tableFunction == null && expression.isEverything(ExpressionVisitor.READONLY_VISITOR); } @@ -112,7 +125,7 @@ public int getType() { @Override public boolean isCacheable() { - return !isResultSet; + return tableFunction == null; } } diff --git a/h2/src/main/org/h2/command/dml/CommandWithValues.java b/h2/src/main/org/h2/command/dml/CommandWithValues.java new file mode 100644 index 0000000000..592981ae33 --- /dev/null +++ b/h2/src/main/org/h2/command/dml/CommandWithValues.java @@ -0,0 +1,44 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import java.util.ArrayList; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.util.Utils; + +/** + * Command that supports VALUES clause. + */ +public abstract class CommandWithValues extends DataChangeStatement { + + /** + * Expression data for the VALUES clause. + */ + protected final ArrayList valuesExpressionList = Utils.newSmallArrayList(); + + /** + * Creates new instance of command with VALUES clause. + * + * @param session + * the session + */ + protected CommandWithValues(SessionLocal session) { + super(session); + } + + /** + * Add a row to this command. + * + * @param expr + * the list of values + */ + public void addRow(Expression[] expr) { + valuesExpressionList.add(expr); + } + +} diff --git a/h2/src/main/org/h2/command/dml/DataChangeStatement.java b/h2/src/main/org/h2/command/dml/DataChangeStatement.java new file mode 100644 index 0000000000..a2b53970f4 --- /dev/null +++ b/h2/src/main/org/h2/command/dml/DataChangeStatement.java @@ -0,0 +1,75 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import org.h2.command.Prepared; +import org.h2.engine.SessionLocal; +import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; +import org.h2.table.DataChangeDeltaTable.ResultOption; +import org.h2.table.Table; + +/** + * Data change statement. + */ +public abstract class DataChangeStatement extends Prepared { + + /** + * Creates new instance of DataChangeStatement. + * + * @param session + * the session + */ + protected DataChangeStatement(SessionLocal session) { + super(session); + } + + /** + * Return the name of this statement. + * + * @return the short name of this statement. + */ + public abstract String getStatementName(); + + /** + * Return the target table. + * + * @return the target table + */ + public abstract Table getTable(); + + @Override + public final boolean isTransactional() { + return true; + } + + @Override + public final ResultInterface queryMeta() { + return null; + } + + @Override + public boolean isCacheable() { + return true; + } + + @Override + public final long update() { + return update(null, null); + } + + /** + * Execute the statement with specified delta change collector and collection mode. + * + * @param deltaChangeCollector + * target result + * @param deltaChangeCollectionMode + * collection mode + * @return the update count + */ + public abstract long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode); + +} diff --git a/h2/src/main/org/h2/command/dml/Delete.java b/h2/src/main/org/h2/command/dml/Delete.java index 4f37a360dc..832ba22dc2 100644 --- a/h2/src/main/org/h2/command/dml/Delete.java +++ b/h2/src/main/org/h2/command/dml/Delete.java @@ -1,24 +1,28 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; +import java.util.HashSet; + import org.h2.api.Trigger; import org.h2.command.CommandInterface; -import org.h2.command.Prepared; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; -import org.h2.result.ResultInterface; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultTarget; import org.h2.result.Row; -import org.h2.result.RowList; +import org.h2.table.DataChangeDeltaTable.ResultOption; import org.h2.table.PlanItem; import org.h2.table.Table; import org.h2.table.TableFilter; -import org.h2.util.StringUtils; import org.h2.value.Value; import org.h2.value.ValueNull; @@ -26,123 +30,94 @@ * This class represents the statement * DELETE */ -public class Delete extends Prepared { - - private Expression condition; - private TableFilter tableFilter; - - /** - * The limit expression as specified in the LIMIT or TOP clause. - */ - private Expression limitExpr; +public final class Delete extends FilteredDataChangeStatement { - public Delete(Session session) { + public Delete(SessionLocal session) { super(session); } - public void setTableFilter(TableFilter tableFilter) { - this.tableFilter = tableFilter; - } - - public void setCondition(Expression condition) { - this.condition = condition; - } @Override - public int update() { - tableFilter.startQuery(session); - tableFilter.reset(); - Table table = tableFilter.getTable(); - session.getUser().checkRight(table, Right.DELETE); + public long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + targetTableFilter.startQuery(session); + targetTableFilter.reset(); + Table table = targetTableFilter.getTable(); + session.getUser().checkTableRight(table, Right.DELETE); table.fire(session, Trigger.DELETE, true); - table.lock(session, true, false); - RowList rows = new RowList(session); - int limitRows = -1; - if (limitExpr != null) { - Value v = limitExpr.getValue(session); - if (v != ValueNull.INSTANCE) { - limitRows = v.getInt(); + table.lock(session, Table.WRITE_LOCK); + long limitRows = -1; + if (fetchExpr != null) { + Value v = fetchExpr.getValue(session); + if (v == ValueNull.INSTANCE || (limitRows = v.getLong()) < 0) { + throw DbException.getInvalidValueException("FETCH", v); } } - try { + try (LocalResult rows = LocalResult.forTable(session, table)) { setCurrentRowNumber(0); - int count = 0; - while (limitRows != 0 && tableFilter.next()) { - setCurrentRowNumber(rows.size() + 1); - if (condition == null || Boolean.TRUE.equals( - condition.getBooleanValue(session))) { - Row row = tableFilter.get(); - boolean done = false; - if (table.fireRow()) { - done = table.fireBeforeRow(session, row, null); + long count = 0; + while (nextRow(limitRows, count)) { + Row row = targetTableFilter.get(); + if (table.isRowLockable()) { + Row lockedRow = table.lockRow(session, row); + if (lockedRow == null) { + continue; } - if (!done) { - rows.add(row); - } - count++; - if (limitRows >= 0 && count >= limitRows) { - break; + if (!row.hasSharedData(lockedRow)) { + row = lockedRow; + targetTableFilter.set(row); + if (condition != null && !condition.getBooleanValue(session)) { + continue; + } } } + if (deltaChangeCollectionMode == ResultOption.OLD) { + deltaChangeCollector.addRow(row.getValueList()); + } + if (!table.fireRow() || !table.fireBeforeRow(session, row, null)) { + rows.addRowForTable(row); + } + count++; } - int rowScanCount = 0; - for (rows.reset(); rows.hasNext();) { + rows.done(); + long rowScanCount = 0; + while (rows.next()) { if ((++rowScanCount & 127) == 0) { checkCanceled(); } - Row row = rows.next(); + Row row = rows.currentRowForTable(); table.removeRow(session, row); - session.log(table, UndoLogRecord.DELETE, row); } if (table.fireRow()) { - for (rows.reset(); rows.hasNext();) { - Row row = rows.next(); - table.fireAfterRow(session, row, null, false); + for (rows.reset(); rows.next();) { + table.fireAfterRow(session, rows.currentRowForTable(), null, false); } } table.fire(session, Trigger.DELETE, false); return count; - } finally { - rows.close(); } } @Override - public String getPlanSQL() { - StringBuilder buff = new StringBuilder(); - buff.append("DELETE "); - buff.append("FROM ").append(tableFilter.getPlanSQL(false)); - if (condition != null) { - buff.append("\nWHERE ").append(StringUtils.unEnclose( - condition.getSQL())); - } - if (limitExpr != null) { - buff.append("\nLIMIT (").append(StringUtils.unEnclose( - limitExpr.getSQL())).append(')'); - } - return buff.toString(); + public String getPlanSQL(int sqlFlags) { + StringBuilder builder = new StringBuilder("DELETE FROM "); + targetTableFilter.getPlanSQL(builder, false, sqlFlags); + appendFilterCondition(builder, sqlFlags); + return builder.toString(); } @Override public void prepare() { if (condition != null) { - condition.mapColumns(tableFilter, 0); - condition = condition.optimize(session); - condition.createIndexConditions(session, tableFilter); + condition.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); + condition = condition.optimizeCondition(session); + if (condition != null) { + condition.createIndexConditions(session, targetTableFilter); + } } - PlanItem item = tableFilter.getBestPlanItem(session, 1); - tableFilter.setPlanItem(item); - tableFilter.prepare(); - } - - @Override - public boolean isTransactional() { - return true; - } - - @Override - public ResultInterface queryMeta() { - return null; + TableFilter[] filters = new TableFilter[] { targetTableFilter }; + PlanItem item = targetTableFilter.getBestPlanItem(session, filters, 0, new AllColumnsForPlan(filters)); + targetTableFilter.setPlanItem(item); + targetTableFilter.prepare(); } @Override @@ -150,13 +125,16 @@ public int getType() { return CommandInterface.DELETE; } - public void setLimit(Expression limit) { - this.limitExpr = limit; + @Override + public String getStatementName() { + return "DELETE"; } @Override - public boolean isCacheable() { - return true; + public void collectDependencies(HashSet dependencies) { + ExpressionVisitor visitor = ExpressionVisitor.getDependenciesVisitor(dependencies); + if (condition != null) { + condition.isEverything(visitor); + } } - } diff --git a/h2/src/main/org/h2/command/dml/ExecuteImmediate.java b/h2/src/main/org/h2/command/dml/ExecuteImmediate.java new file mode 100644 index 0000000000..b9e5cfe66e --- /dev/null +++ b/h2/src/main/org/h2/command/dml/ExecuteImmediate.java @@ -0,0 +1,57 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.command.Prepared; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; + +/** + * This class represents the statement + * EXECUTE IMMEDIATE. + */ +public class ExecuteImmediate extends Prepared { + + private Expression statement; + + public ExecuteImmediate(SessionLocal session, Expression statement) { + super(session); + this.statement = statement.optimize(session); + } + + @Override + public long update() { + String sql = statement.getValue(session).getString(); + if (sql == null) { + throw DbException.getInvalidValueException("SQL command", null); + } + Prepared command = session.prepare(sql); + if (command.isQuery()) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_2, sql, ""); + } + return command.update(); + } + + @Override + public boolean isTransactional() { + return true; + } + + @Override + public int getType() { + return CommandInterface.EXECUTE_IMMEDIATELY; + } + + @Override + public ResultInterface queryMeta() { + return null; + } + +} diff --git a/h2/src/main/org/h2/command/dml/ExecuteProcedure.java b/h2/src/main/org/h2/command/dml/ExecuteProcedure.java index e612b6bdb3..0313ea51fd 100644 --- a/h2/src/main/org/h2/command/dml/ExecuteProcedure.java +++ b/h2/src/main/org/h2/command/dml/ExecuteProcedure.java @@ -1,19 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; import java.util.ArrayList; + import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Procedure; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.Parameter; import org.h2.result.ResultInterface; -import org.h2.util.New; +import org.h2.util.Utils; /** * This class represents the statement @@ -21,10 +22,10 @@ */ public class ExecuteProcedure extends Prepared { - private final ArrayList expressions = New.arrayList(); + private final ArrayList expressions = Utils.newSmallArrayList(); private Procedure procedure; - public ExecuteProcedure(Session session) { + public ExecuteProcedure(SessionLocal session) { super(session); } @@ -60,14 +61,14 @@ public boolean isQuery() { } @Override - public int update() { + public long update() { setParameters(); Prepared prepared = procedure.getPrepared(); return prepared.update(); } @Override - public ResultInterface query(int limit) { + public ResultInterface query(long limit) { setParameters(); Prepared prepared = procedure.getPrepared(); return prepared.query(limit); diff --git a/h2/src/main/org/h2/command/dml/Explain.java b/h2/src/main/org/h2/command/dml/Explain.java index 0065056e4e..ea677f528f 100644 --- a/h2/src/main/org/h2/command/dml/Explain.java +++ b/h2/src/main/org/h2/command/dml/Explain.java @@ -1,26 +1,28 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; +import java.util.HashSet; import java.util.Map; -import java.util.TreeMap; import java.util.Map.Entry; +import java.util.TreeMap; import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; -import org.h2.mvstore.db.MVTableEngine.Store; +import org.h2.mvstore.db.Store; import org.h2.result.LocalResult; import org.h2.result.ResultInterface; -import org.h2.store.PageStore; import org.h2.table.Column; -import org.h2.value.Value; -import org.h2.value.ValueString; +import org.h2.util.HasSQL; +import org.h2.value.TypeInfo; +import org.h2.value.ValueVarchar; /** * This class represents the statement @@ -32,7 +34,7 @@ public class Explain extends Prepared { private LocalResult result; private boolean executeCommand; - public Explain(Session session) { + public Explain(SessionLocal session) { super(session); } @@ -40,6 +42,10 @@ public void setCommand(Prepared command) { this.command = command; } + public Prepared getCommand() { + return command; + } + @Override public void prepare() { command.prepare(); @@ -55,38 +61,36 @@ public ResultInterface queryMeta() { } @Override - public ResultInterface query(int maxrows) { - Column column = new Column("PLAN", Value.STRING); + protected void checkParameters() { + // Check params only in case of EXPLAIN ANALYZE + if (executeCommand) { + super.checkParameters(); + } + } + + @Override + public ResultInterface query(long maxrows) { Database db = session.getDatabase(); - ExpressionColumn expr = new ExpressionColumn(db, column); - Expression[] expressions = { expr }; - result = new LocalResult(session, expressions, 1); + Expression[] expressions = { new ExpressionColumn(db, new Column("PLAN", TypeInfo.TYPE_VARCHAR)) }; + result = new LocalResult(session, expressions, 1, 1); + int sqlFlags = HasSQL.ADD_PLAN_INFORMATION; if (maxrows >= 0) { String plan; if (executeCommand) { - PageStore store = null; - Store mvStore = null; + Store store = null; if (db.isPersistent()) { - store = db.getPageStore(); - if (store != null) { - store.statisticsStart(); - } - mvStore = db.getMvStore(); - if (mvStore != null) { - mvStore.statisticsStart(); - } + store = db.getStore(); + store.statisticsStart(); } if (command.isQuery()) { command.query(maxrows); } else { command.update(); } - plan = command.getPlanSQL(); + plan = command.getPlanSQL(sqlFlags); Map statistics = null; if (store != null) { statistics = store.statisticsEnd(); - } else if (mvStore != null) { - statistics = mvStore.statisticsEnd(); } if (statistics != null) { int total = 0; @@ -94,7 +98,7 @@ public ResultInterface query(int maxrows) { total += e.getValue(); } if (total > 0) { - statistics = new TreeMap(statistics); + statistics = new TreeMap<>(statistics); StringBuilder buff = new StringBuilder(); if (statistics.size() > 1) { buff.append("total: ").append(total).append('\n'); @@ -112,7 +116,7 @@ public ResultInterface query(int maxrows) { } } } else { - plan = command.getPlanSQL(); + plan = command.getPlanSQL(sqlFlags); } add(plan); } @@ -121,8 +125,7 @@ public ResultInterface query(int maxrows) { } private void add(String text) { - Value[] row = { ValueString.get(text) }; - result.addRow(row); + result.addRow(ValueVarchar.get(text)); } @Override @@ -142,6 +145,12 @@ public boolean isReadOnly() { @Override public int getType() { - return CommandInterface.EXPLAIN; + return executeCommand ? CommandInterface.EXPLAIN_ANALYZE : CommandInterface.EXPLAIN; } + + @Override + public void collectDependencies(HashSet dependencies) { + command.collectDependencies(dependencies); + } + } diff --git a/h2/src/main/org/h2/command/dml/FilteredDataChangeStatement.java b/h2/src/main/org/h2/command/dml/FilteredDataChangeStatement.java new file mode 100644 index 0000000000..81995ce801 --- /dev/null +++ b/h2/src/main/org/h2/command/dml/FilteredDataChangeStatement.java @@ -0,0 +1,97 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.table.Table; +import org.h2.table.TableFilter; + +/** + * Data change statement with WHERE criteria and possibly limited number of + * rows. + */ +abstract class FilteredDataChangeStatement extends DataChangeStatement { + + /** + * The WHERE criteria. + */ + Expression condition; + + /** + * The target table filter. + */ + TableFilter targetTableFilter; + + /** + * The expression with optional maximum number of rows. + */ + Expression fetchExpr; + + /** + * Creates new instance of FilteredDataChangeStatement. + * + * @param session + * the session + */ + FilteredDataChangeStatement(SessionLocal session) { + super(session); + } + + @Override + public final Table getTable() { + return targetTableFilter.getTable(); + } + + public final void setTableFilter(TableFilter tableFilter) { + this.targetTableFilter = tableFilter; + } + + public final TableFilter getTableFilter() { + return targetTableFilter; + } + + public final void setCondition(Expression condition) { + this.condition = condition; + } + + public final Expression getCondition() { + return this.condition; + } + + public void setFetch(Expression fetch) { + this.fetchExpr = fetch; + } + + final boolean nextRow(long limitRows, long count) { + if (limitRows < 0 || count < limitRows) { + while (targetTableFilter.next()) { + setCurrentRowNumber(count + 1); + if (condition == null || condition.getBooleanValue(session)) { + return true; + } + } + } + return false; + } + + final void appendFilterCondition(StringBuilder builder, int sqlFlags) { + if (condition != null) { + builder.append("\nWHERE "); + condition.getUnenclosedSQL(builder, sqlFlags); + } + if (fetchExpr != null) { + builder.append("\nFETCH FIRST "); + String count = fetchExpr.getSQL(sqlFlags, Expression.WITHOUT_PARENTHESES); + if ("1".equals(count)) { + builder.append("ROW ONLY"); + } else { + builder.append(count).append(" ROWS ONLY"); + } + } + } + +} diff --git a/h2/src/main/org/h2/command/dml/Help.java b/h2/src/main/org/h2/command/dml/Help.java new file mode 100644 index 0000000000..528909e31d --- /dev/null +++ b/h2/src/main/org/h2/command/dml/Help.java @@ -0,0 +1,161 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.sql.ResultSet; + +import org.h2.command.CommandInterface; +import org.h2.command.Prepared; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.table.Column; +import org.h2.tools.Csv; +import org.h2.util.Utils; +import org.h2.value.TypeInfo; +import org.h2.value.ValueVarchar; + +/** + * This class represents the statement CALL. + */ +public class Help extends Prepared { + + private final String[] conditions; + + private final Expression[] expressions; + + public Help(SessionLocal session, String[] conditions) { + super(session); + this.conditions = conditions; + Database db = session.getDatabase(); + expressions = new Expression[] { // + new ExpressionColumn(db, new Column("SECTION", TypeInfo.TYPE_VARCHAR)), // + new ExpressionColumn(db, new Column("TOPIC", TypeInfo.TYPE_VARCHAR)), // + new ExpressionColumn(db, new Column("SYNTAX", TypeInfo.TYPE_VARCHAR)), // + new ExpressionColumn(db, new Column("TEXT", TypeInfo.TYPE_VARCHAR)), // + }; + } + + @Override + public ResultInterface queryMeta() { + LocalResult result = new LocalResult(session, expressions, 4, 4); + result.done(); + return result; + } + + @Override + public ResultInterface query(long maxrows) { + LocalResult result = new LocalResult(session, expressions, 4, 4); + try { + ResultSet rs = getTable(); + loop: while (rs.next()) { + String topic = rs.getString(2).trim(); + for (String condition : conditions) { + if (!topic.contains(condition)) { + continue loop; + } + } + result.addRow( + // SECTION + ValueVarchar.get(rs.getString(1).trim(), session), + // TOPIC + ValueVarchar.get(topic, session), + // SYNTAX + ValueVarchar.get(stripAnnotationsFromSyntax(rs.getString(3)), session), + // TEXT + ValueVarchar.get(processHelpText(rs.getString(4)), session)); + } + } catch (Exception e) { + throw DbException.convert(e); + } + result.done(); + return result; + } + + /** + * Strip out the special annotations we use to help build the railroad/BNF diagrams + * @param s to process + * @return cleaned text + */ + public static String stripAnnotationsFromSyntax(String s) { + // SYNTAX column - Strip out the special annotations we use to + // help build the railroad/BNF diagrams. + return s.replaceAll("@c@ ", "").replaceAll("@h2@ ", "") + .replaceAll("@c@", "").replaceAll("@h2@", "").trim(); + } + + /** + * Sanitize value read from csv file (i.e. help.csv) + * @param s text to process + * @return text without wrapping quotes and trimmed + */ + public static String processHelpText(String s) { + int len = s.length(); + int end = 0; + for (; end < len; end++) { + char ch = s.charAt(end); + if (ch == '.') { + end++; + break; + } + if (ch == '"') { + do { + end++; + } while (end < len && s.charAt(end) != '"'); + } + } + s = s.substring(0, end); + return s.trim(); + } + + /** + * Returns HELP table. + * + * @return HELP table with columns SECTION,TOPIC,SYNTAX,TEXT + * @throws IOException + * on I/O exception + */ + public static ResultSet getTable() throws IOException { + Reader reader = new InputStreamReader(new ByteArrayInputStream(Utils.getResource("/org/h2/res/help.csv"))); + Csv csv = new Csv(); + csv.setLineCommentCharacter('#'); + return csv.read(reader, null); + } + + @Override + public boolean isQuery() { + return true; + } + + @Override + public boolean isTransactional() { + return true; + } + + @Override + public boolean isReadOnly() { + return true; + } + + @Override + public int getType() { + return CommandInterface.CALL; + } + + @Override + public boolean isCacheable() { + return true; + } + +} diff --git a/h2/src/main/org/h2/command/dml/Insert.java b/h2/src/main/org/h2/command/dml/Insert.java index 76e4edf6be..aa350cc3ee 100644 --- a/h2/src/main/org/h2/command/dml/Insert.java +++ b/h2/src/main/org/h2/command/dml/Insert.java @@ -1,58 +1,74 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; -import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; +import java.util.Map.Entry; import org.h2.api.ErrorCode; import org.h2.api.Trigger; import org.h2.command.Command; import org.h2.command.CommandInterface; -import org.h2.command.Prepared; +import org.h2.command.query.Query; +import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; -import org.h2.expression.Comparison; -import org.h2.expression.ConditionAndOr; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; import org.h2.expression.Parameter; +import org.h2.expression.ValueExpression; +import org.h2.expression.condition.Comparison; +import org.h2.expression.condition.ConditionAndOr; import org.h2.index.Index; import org.h2.message.DbException; +import org.h2.mvstore.db.MVPrimaryIndex; import org.h2.result.ResultInterface; import org.h2.result.ResultTarget; import org.h2.result.Row; import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable; +import org.h2.table.DataChangeDeltaTable.ResultOption; import org.h2.table.Table; -import org.h2.util.New; -import org.h2.util.StatementBuilder; +import org.h2.util.HasSQL; import org.h2.value.Value; -import org.h2.value.ValueNull; /** * This class represents the statement * INSERT */ -public class Insert extends Prepared implements ResultTarget { +public final class Insert extends CommandWithValues implements ResultTarget { private Table table; private Column[] columns; - private final ArrayList list = New.arrayList(); private Query query; - private boolean sortedInsertMode; - private int rowNumber; + private long rowNumber; private boolean insertFromSelect; + private Boolean overridingSystem; + /** * For MySQL-style INSERT ... ON DUPLICATE KEY UPDATE .... */ private HashMap duplicateKeyAssignmentMap; - public Insert(Session session) { + private Value[] onDuplicateKeyRow; + + /** + * For MySQL-style INSERT IGNORE and PostgreSQL-style ON CONFLICT DO + * NOTHING. + */ + private boolean ignore; + + private ResultTarget deltaChangeCollector; + + private ResultOption deltaChangeCollectionMode; + + public Insert(SessionLocal session) { super(session); } @@ -64,6 +80,11 @@ public void setCommand(Command command) { } } + @Override + public Table getTable() { + return table; + } + public void setTable(Table table) { this.table = table; } @@ -72,10 +93,24 @@ public void setColumns(Column[] columns) { this.columns = columns; } + /** + * Sets MySQL-style INSERT IGNORE mode or PostgreSQL-style ON CONFLICT + * DO NOTHING. + * + * @param ignore ignore duplicates + */ + public void setIgnore(boolean ignore) { + this.ignore = ignore; + } + public void setQuery(Query query) { this.query = query; } + public void setOverridingSystem(Boolean overridingSystem) { + this.overridingSystem = overridingSystem; + } + /** * Keep a collection of the columns to pass to update if a duplicate key * happens, for MySQL-style INSERT ... ON DUPLICATE KEY UPDATE .... @@ -85,91 +120,97 @@ public void setQuery(Query query) { */ public void addAssignmentForDuplicate(Column column, Expression expression) { if (duplicateKeyAssignmentMap == null) { - duplicateKeyAssignmentMap = New.hashMap(); + duplicateKeyAssignmentMap = new HashMap<>(); } - if (duplicateKeyAssignmentMap.containsKey(column)) { - throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, - column.getName()); + if (duplicateKeyAssignmentMap.putIfAbsent(column, expression) != null) { + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column.getName()); } - duplicateKeyAssignmentMap.put(column, expression); - } - - /** - * Add a row to this merge statement. - * - * @param expr the list of values - */ - public void addRow(Expression[] expr) { - list.add(expr); } @Override - public int update() { - Index index = null; - if (sortedInsertMode) { - index = table.getScanIndex(session); - index.setSortedInsertMode(true); - } + public long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + this.deltaChangeCollector = deltaChangeCollector; + this.deltaChangeCollectionMode = deltaChangeCollectionMode; try { return insertRows(); } finally { - if (index != null) { - index.setSortedInsertMode(false); - } + this.deltaChangeCollector = null; + this.deltaChangeCollectionMode = null; } } - private int insertRows() { - session.getUser().checkRight(table, Right.INSERT); + private long insertRows() { + session.getUser().checkTableRight(table, Right.INSERT); setCurrentRowNumber(0); table.fire(session, Trigger.INSERT, true); rowNumber = 0; - int listSize = list.size(); + int listSize = valuesExpressionList.size(); if (listSize > 0) { int columnLen = columns.length; for (int x = 0; x < listSize; x++) { - session.startStatementWithinTransaction(); Row newRow = table.getTemplateRow(); - Expression[] expr = list.get(x); + Expression[] expr = valuesExpressionList.get(x); setCurrentRowNumber(x + 1); for (int i = 0; i < columnLen; i++) { Column c = columns[i]; int index = c.getColumnId(); Expression e = expr[i]; - if (e != null) { - // e can be null (DEFAULT) - e = e.optimize(session); + if (e != ValueExpression.DEFAULT) { try { - Value v = c.convert(e.getValue(session)); - newRow.setValue(index, v); + newRow.setValue(index, e.getValue(session)); } catch (DbException ex) { - throw setRow(ex, x, getSQL(expr)); + throw setRow(ex, x, getSimpleSQL(expr)); } } } rowNumber++; - table.validateConvertUpdateSequence(session, newRow); - boolean done = table.fireBeforeRow(session, null, newRow); - if (!done) { - table.lock(session, true, false); + table.convertInsertRow(session, newRow, overridingSystem); + if (deltaChangeCollectionMode == ResultOption.NEW) { + deltaChangeCollector.addRow(newRow.getValueList().clone()); + } + if (!table.fireBeforeRow(session, null, newRow)) { + table.lock(session, Table.WRITE_LOCK); try { table.addRow(session, newRow); } catch (DbException de) { - handleOnDuplicate(de); + if (handleOnDuplicate(de, null)) { + // MySQL returns 2 for updated row + // TODO: detect no-op change + rowNumber++; + } else { + // INSERT IGNORE case + rowNumber--; + } + continue; } - session.log(table, UndoLogRecord.INSERT, newRow); + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); table.fireAfterRow(session, null, newRow, false); + } else { + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); } } } else { - table.lock(session, true, false); + table.lock(session, Table.WRITE_LOCK); if (insertFromSelect) { query.query(0, this); } else { ResultInterface rows = query.query(0); while (rows.next()) { Value[] r = rows.currentRow(); - addRow(r); + try { + addRow(r); + } catch (DbException de) { + if (handleOnDuplicate(de, r)) { + // MySQL returns 2 for updated row + // TODO: detect no-op change + rowNumber++; + } else { + // INSERT IGNORE case + rowNumber--; + } + } } rows.close(); } @@ -179,88 +220,77 @@ private int insertRows() { } @Override - public void addRow(Value[] values) { + public void addRow(Value... values) { Row newRow = table.getTemplateRow(); setCurrentRowNumber(++rowNumber); for (int j = 0, len = columns.length; j < len; j++) { - Column c = columns[j]; - int index = c.getColumnId(); - try { - Value v = c.convert(values[j]); - newRow.setValue(index, v); - } catch (DbException ex) { - throw setRow(ex, rowNumber, getSQL(values)); - } + newRow.setValue(columns[j].getColumnId(), values[j]); + } + table.convertInsertRow(session, newRow, overridingSystem); + if (deltaChangeCollectionMode == ResultOption.NEW) { + deltaChangeCollector.addRow(newRow.getValueList().clone()); } - table.validateConvertUpdateSequence(session, newRow); - boolean done = table.fireBeforeRow(session, null, newRow); - if (!done) { + if (!table.fireBeforeRow(session, null, newRow)) { table.addRow(session, newRow); - session.log(table, UndoLogRecord.INSERT, newRow); + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); table.fireAfterRow(session, null, newRow, false); + } else { + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); } } @Override - public int getRowCount() { + public long getRowCount() { + // This method is not used in this class return rowNumber; } @Override - public String getPlanSQL() { - StatementBuilder buff = new StatementBuilder("INSERT INTO "); - buff.append(table.getSQL()).append('('); - for (Column c : columns) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()); - } - buff.append(")\n"); + public void limitsWereApplied() { + // Nothing to do + } + + @Override + public String getPlanSQL(int sqlFlags) { + StringBuilder builder = new StringBuilder("INSERT INTO "); + table.getSQL(builder, sqlFlags).append('('); + Column.writeColumns(builder, columns, sqlFlags); + builder.append(")\n"); if (insertFromSelect) { - buff.append("DIRECT "); - } - if (sortedInsertMode) { - buff.append("SORTED "); + builder.append("DIRECT "); } - if (list.size() > 0) { - buff.append("VALUES "); + if (!valuesExpressionList.isEmpty()) { + builder.append("VALUES "); int row = 0; - if (list.size() > 1) { - buff.append('\n'); + if (valuesExpressionList.size() > 1) { + builder.append('\n'); } - for (Expression[] expr : list) { + for (Expression[] expr : valuesExpressionList) { if (row++ > 0) { - buff.append(",\n"); + builder.append(",\n"); } - buff.append('('); - buff.resetCount(); - for (Expression e : expr) { - buff.appendExceptFirst(", "); - if (e == null) { - buff.append("DEFAULT"); - } else { - buff.append(e.getSQL()); - } - } - buff.append(')'); + Expression.writeExpressions(builder.append('('), expr, sqlFlags).append(')'); } } else { - buff.append(query.getPlanSQL()); + builder.append(query.getPlanSQL(sqlFlags)); } - return buff.toString(); + return builder.toString(); } @Override public void prepare() { if (columns == null) { - if (list.size() > 0 && list.get(0).length == 0) { + if (!valuesExpressionList.isEmpty() && valuesExpressionList.get(0).length == 0) { // special case where table is used as a sequence columns = new Column[0]; } else { columns = table.getColumns(); } } - if (list.size() > 0) { - for (Expression[] expr : list) { + if (!valuesExpressionList.isEmpty()) { + for (Expression[] expr : valuesExpressionList) { if (expr.length != columns.length) { throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); } @@ -285,22 +315,13 @@ public void prepare() { } @Override - public boolean isTransactional() { - return true; - } - - @Override - public ResultInterface queryMeta() { - return null; - } - - public void setSortedInsertMode(boolean sortedInsertMode) { - this.sortedInsertMode = sortedInsertMode; + public int getType() { + return CommandInterface.INSERT; } @Override - public int getType() { - return CommandInterface.INSERT; + public String getStatementName() { + return "INSERT"; } public void setInsertFromSelect(boolean value) { @@ -309,98 +330,126 @@ public void setInsertFromSelect(boolean value) { @Override public boolean isCacheable() { - return duplicateKeyAssignmentMap == null || - duplicateKeyAssignmentMap.isEmpty(); + return duplicateKeyAssignmentMap == null; } - private void handleOnDuplicate(DbException de) { + /** + * @param de duplicate key exception + * @param currentRow current row values (optional) + * @return {@code true} if row was updated, {@code false} if row was ignored + */ + private boolean handleOnDuplicate(DbException de, Value[] currentRow) { if (de.getErrorCode() != ErrorCode.DUPLICATE_KEY_1) { throw de; } - if (duplicateKeyAssignmentMap == null || - duplicateKeyAssignmentMap.isEmpty()) { + if (duplicateKeyAssignmentMap == null) { + if (ignore) { + return false; + } throw de; } - ArrayList variableNames = new ArrayList( - duplicateKeyAssignmentMap.size()); - for (int i = 0; i < columns.length; i++) { - String key = table.getSchema().getName() + "." + - table.getName() + "." + columns[i].getName(); - variableNames.add(key); - session.setVariable(key, - list.get(getCurrentRowNumber() - 1)[i].getValue(session)); + int columnCount = columns.length; + Expression[] row = (currentRow == null) ? valuesExpressionList.get((int) getCurrentRowNumber() - 1) + : new Expression[columnCount]; + onDuplicateKeyRow = new Value[table.getColumns().length]; + for (int i = 0; i < columnCount; i++) { + Value value; + if (currentRow != null) { + value = currentRow[i]; + row[i] = ValueExpression.get(value); + } else { + value = row[i].getValue(session); + } + onDuplicateKeyRow[columns[i].getColumnId()] = value; } - StatementBuilder buff = new StatementBuilder("UPDATE "); - buff.append(table.getSQL()).append(" SET "); - for (Column column : duplicateKeyAssignmentMap.keySet()) { - buff.appendExceptFirst(", "); - Expression ex = duplicateKeyAssignmentMap.get(column); - buff.append(column.getSQL()).append("=").append(ex.getSQL()); + StringBuilder builder = new StringBuilder("UPDATE "); + table.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append(" SET "); + boolean f = false; + for (Entry entry : duplicateKeyAssignmentMap.entrySet()) { + if (f) { + builder.append(", "); + } + f = true; + entry.getKey().getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append('='); + entry.getValue().getUnenclosedSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); } - buff.append(" WHERE "); - Index foundIndex = searchForUpdateIndex(); + builder.append(" WHERE "); + Index foundIndex = (Index) de.getSource(); if (foundIndex == null) { throw DbException.getUnsupportedException( "Unable to apply ON DUPLICATE KEY UPDATE, no index found!"); } - buff.append(prepareUpdateCondition(foundIndex).getSQL()); - String sql = buff.toString(); - Prepared command = session.prepare(sql); + prepareUpdateCondition(foundIndex, row).getUnenclosedSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); + String sql = builder.toString(); + Update command = (Update) session.prepare(sql); + command.setOnDuplicateKeyInsert(this); for (Parameter param : command.getParameters()) { Parameter insertParam = parameters.get(param.getIndex()); param.setValue(insertParam.getValue(session)); } - command.update(); - for (String variableName : variableNames) { - session.setVariable(variableName, ValueNull.INSTANCE); - } + boolean result = command.update() > 0; + onDuplicateKeyRow = null; + return result; } - private Index searchForUpdateIndex() { - Index foundIndex = null; - for (Index index : table.getIndexes()) { - if (index.getIndexType().isPrimaryKey() || index.getIndexType().isUnique()) { - for (Column indexColumn : index.getColumns()) { - for (Column insertColumn : columns) { - if (indexColumn.getName().equals(insertColumn.getName())) { - foundIndex = index; - break; - } - foundIndex = null; - } - if (foundIndex == null) { - break; - } - } - if (foundIndex != null) { - break; - } - } + private Expression prepareUpdateCondition(Index foundIndex, Expression[] row) { + // MVPrimaryIndex is playing fast and loose with it's implementation of + // the Index interface. + // It returns all of the columns in the table when we call + // getIndexColumns() or getColumns(). + // Don't have time right now to fix that, so just special-case it. + // PageDataIndex has the same problem. + final Column[] indexedColumns; + if (foundIndex instanceof MVPrimaryIndex) { + MVPrimaryIndex foundMV = (MVPrimaryIndex) foundIndex; + indexedColumns = new Column[] { foundMV.getIndexColumns()[foundMV + .getMainIndexColumn()].column }; + } else { + indexedColumns = foundIndex.getColumns(); } - return foundIndex; - } - private Expression prepareUpdateCondition(Index foundIndex) { Expression condition = null; - for (Column column : foundIndex.getColumns()) { + for (Column column : indexedColumns) { ExpressionColumn expr = new ExpressionColumn(session.getDatabase(), table.getSchema().getName(), table.getName(), column.getName()); for (int i = 0; i < columns.length; i++) { - if (expr.getColumnName().equals(columns[i].getName())) { + if (expr.getColumnName(session, i).equals(columns[i].getName())) { if (condition == null) { - condition = new Comparison(session, Comparison.EQUAL, - expr, list.get(getCurrentRowNumber() - 1)[i++]); + condition = new Comparison(Comparison.EQUAL, expr, row[i], false); } else { condition = new ConditionAndOr(ConditionAndOr.AND, condition, - new Comparison(session, Comparison.EQUAL, - expr, list.get(0)[i++])); + new Comparison(Comparison.EQUAL, expr, row[i], false)); } + break; } } } return condition; } + /** + * Get the value to use for the specified column in case of a duplicate key. + * + * @param columnIndex the column index + * @return the value + */ + public Value getOnDuplicateKeyValue(int columnIndex) { + return onDuplicateKeyRow[columnIndex]; + } + + @Override + public void collectDependencies(HashSet dependencies) { + ExpressionVisitor visitor = ExpressionVisitor.getDependenciesVisitor(dependencies); + if (!valuesExpressionList.isEmpty()) { + for (Expression[] expr : valuesExpressionList) { + for (Expression e : expr) { + e.isEverything(visitor); + } + } + } else { + query.isEverything(visitor); + } + } } diff --git a/h2/src/main/org/h2/command/dml/Merge.java b/h2/src/main/org/h2/command/dml/Merge.java index f295762780..7931be7085 100644 --- a/h2/src/main/org/h2/command/dml/Merge.java +++ b/h2/src/main/org/h2/command/dml/Merge.java @@ -1,47 +1,56 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; import java.util.ArrayList; - +import java.util.HashSet; import org.h2.api.ErrorCode; import org.h2.api.Trigger; import org.h2.command.Command; import org.h2.command.CommandInterface; -import org.h2.command.Prepared; +import org.h2.command.query.Query; +import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.Parameter; +import org.h2.expression.ValueExpression; import org.h2.index.Index; import org.h2.message.DbException; +import org.h2.mvstore.db.MVPrimaryIndex; import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; import org.h2.result.Row; import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable; +import org.h2.table.DataChangeDeltaTable.ResultOption; import org.h2.table.Table; -import org.h2.util.New; -import org.h2.util.StatementBuilder; +import org.h2.util.HasSQL; import org.h2.value.Value; +import org.h2.value.ValueNull; /** * This class represents the statement * MERGE + * or the MySQL compatibility statement + * REPLACE */ -public class Merge extends Prepared { +public final class Merge extends CommandWithValues { + + private boolean isReplace; private Table table; private Column[] columns; private Column[] keys; - private final ArrayList list = New.arrayList(); private Query query; - private Prepared update; + private Update update; - public Merge(Session session) { + public Merge(SessionLocal session, boolean isReplace) { super(session); + this.isReplace = isReplace; } @Override @@ -52,6 +61,11 @@ public void setCommand(Command command) { } } + @Override + public Table getTable() { + return table; + } + public void setTable(Table table) { this.table = table; } @@ -68,65 +82,46 @@ public void setQuery(Query query) { this.query = query; } - /** - * Add a row to this merge statement. - * - * @param expr the list of values - */ - public void addRow(Expression[] expr) { - list.add(expr); - } - @Override - public int update() { - int count; - session.getUser().checkRight(table, Right.INSERT); - session.getUser().checkRight(table, Right.UPDATE); + public long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + long count = 0; + session.getUser().checkTableRight(table, Right.INSERT); + session.getUser().checkTableRight(table, Right.UPDATE); setCurrentRowNumber(0); - if (list.size() > 0) { - count = 0; - for (int x = 0, size = list.size(); x < size; x++) { + if (!valuesExpressionList.isEmpty()) { + // process values in list + for (int x = 0, size = valuesExpressionList.size(); x < size; x++) { setCurrentRowNumber(x + 1); - Expression[] expr = list.get(x); + Expression[] expr = valuesExpressionList.get(x); Row newRow = table.getTemplateRow(); for (int i = 0, len = columns.length; i < len; i++) { Column c = columns[i]; int index = c.getColumnId(); Expression e = expr[i]; - if (e != null) { - // e can be null (DEFAULT) + if (e != ValueExpression.DEFAULT) { try { - Value v = c.convert(e.getValue(session)); - newRow.setValue(index, v); + newRow.setValue(index, e.getValue(session)); } catch (DbException ex) { - throw setRow(ex, count, getSQL(expr)); + throw setRow(ex, count, getSimpleSQL(expr)); } } } - merge(newRow); - count++; + count += merge(newRow, expr, deltaChangeCollector, deltaChangeCollectionMode); } } else { + // process select data for list + query.setNeverLazy(true); ResultInterface rows = query.query(0); - count = 0; table.fire(session, Trigger.UPDATE | Trigger.INSERT, true); - table.lock(session, true, false); + table.lock(session, Table.WRITE_LOCK); while (rows.next()) { - count++; Value[] r = rows.currentRow(); Row newRow = table.getTemplateRow(); setCurrentRowNumber(count); for (int j = 0; j < columns.length; j++) { - Column c = columns[j]; - int index = c.getColumnId(); - try { - Value v = c.convert(r[j]); - newRow.setValue(index, v); - } catch (DbException ex) { - throw setRow(ex, count, getSQL(r)); - } + newRow.setValue(columns[j].getColumnId(), r[j]); } - merge(newRow); + count += merge(newRow, null, deltaChangeCollector, deltaChangeCollectionMode); } rows.close(); table.fire(session, Trigger.UPDATE | Trigger.INSERT, false); @@ -134,49 +129,94 @@ public int update() { return count; } - private void merge(Row row) { - ArrayList k = update.getParameters(); - for (int i = 0; i < columns.length; i++) { - Column col = columns[i]; - Value v = row.getValue(col.getColumnId()); - Parameter p = k.get(i); - p.setValue(v); - } - for (int i = 0; i < keys.length; i++) { - Column col = keys[i]; - Value v = row.getValue(col.getColumnId()); - if (v == null) { - throw DbException.get(ErrorCode.COLUMN_CONTAINS_NULL_VALUES_1, col.getSQL()); + /** + * Updates an existing row or inserts a new one. + * + * @param row row to replace + * @param expressions source expressions, or null + * @param deltaChangeCollector target result + * @param deltaChangeCollectionMode collection mode + * @return 1 if row was inserted, 1 if row was updated by a MERGE statement, + * and 2 if row was updated by a REPLACE statement + */ + private int merge(Row row, Expression[] expressions, ResultTarget deltaChangeCollector, + ResultOption deltaChangeCollectionMode) { + long count; + if (update == null) { + // if there is no valid primary key, + // the REPLACE statement degenerates to an INSERT + count = 0; + } else { + ArrayList k = update.getParameters(); + int j = 0; + for (int i = 0, l = columns.length; i < l; i++) { + Column col = columns[i]; + if (col.isGeneratedAlways()) { + if (expressions == null || expressions[i] != ValueExpression.DEFAULT) { + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1, + col.getSQLWithTable(new StringBuilder(), HasSQL.TRACE_SQL_FLAGS).toString()); + } + } else { + Value v = row.getValue(col.getColumnId()); + if (v == null) { + Expression defaultExpression = col.getEffectiveDefaultExpression(); + v = defaultExpression != null ? defaultExpression.getValue(session) : ValueNull.INSTANCE; + } + k.get(j++).setValue(v); + } } - Parameter p = k.get(columns.length + i); - p.setValue(v); + for (Column col : keys) { + Value v = row.getValue(col.getColumnId()); + if (v == null) { + throw DbException.get(ErrorCode.COLUMN_CONTAINS_NULL_VALUES_1, col.getTraceSQL()); + } + k.get(j++).setValue(v); + } + count = update.update(deltaChangeCollector, deltaChangeCollectionMode); } - int count = update.update(); + // if update fails try an insert if (count == 0) { try { - table.validateConvertUpdateSequence(session, row); - boolean done = table.fireBeforeRow(session, null, row); - if (!done) { - table.lock(session, true, false); + table.convertInsertRow(session, row, null); + if (deltaChangeCollectionMode == ResultOption.NEW) { + deltaChangeCollector.addRow(row.getValueList().clone()); + } + if (!table.fireBeforeRow(session, null, row)) { + table.lock(session, Table.WRITE_LOCK); table.addRow(session, row); - session.log(table, UndoLogRecord.INSERT, row); + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, row); table.fireAfterRow(session, null, row, false); + } else { + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, row); } + return 1; } catch (DbException e) { if (e.getErrorCode() == ErrorCode.DUPLICATE_KEY_1) { // possibly a concurrent merge or insert Index index = (Index) e.getSource(); if (index != null) { // verify the index columns match the key - Column[] indexColumns = index.getColumns(); - boolean indexMatchesKeys = false; + Column[] indexColumns; + if (index instanceof MVPrimaryIndex) { + MVPrimaryIndex foundMV = (MVPrimaryIndex) index; + indexColumns = new Column[] { + foundMV.getIndexColumns()[foundMV.getMainIndexColumn()].column }; + } else { + indexColumns = index.getColumns(); + } + boolean indexMatchesKeys; if (indexColumns.length <= keys.length) { + indexMatchesKeys = true; for (int i = 0; i < indexColumns.length; i++) { if (indexColumns[i] != keys[i]) { indexMatchesKeys = false; break; } } + } else { + indexMatchesKeys = false; } if (indexMatchesKeys) { throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, table.getName()); @@ -185,67 +225,51 @@ private void merge(Row row) { } throw e; } - } else if (count != 1) { - throw DbException.get(ErrorCode.DUPLICATE_KEY_1, table.getSQL()); + } else if (count == 1) { + return isReplace ? 2 : 1; } + throw DbException.get(ErrorCode.DUPLICATE_KEY_1, table.getTraceSQL()); } @Override - public String getPlanSQL() { - StatementBuilder buff = new StatementBuilder("MERGE INTO "); - buff.append(table.getSQL()).append('('); - for (Column c : columns) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()); - } - buff.append(')'); - if (keys != null) { - buff.append(" KEY("); - buff.resetCount(); - for (Column c : keys) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()); - } - buff.append(')'); + public String getPlanSQL(int sqlFlags) { + StringBuilder builder = new StringBuilder(isReplace ? "REPLACE INTO " : "MERGE INTO "); + table.getSQL(builder, sqlFlags).append('('); + Column.writeColumns(builder, columns, sqlFlags); + builder.append(')'); + if (!isReplace && keys != null) { + builder.append(" KEY("); + Column.writeColumns(builder, keys, sqlFlags); + builder.append(')'); } - buff.append('\n'); - if (list.size() > 0) { - buff.append("VALUES "); + builder.append('\n'); + if (!valuesExpressionList.isEmpty()) { + builder.append("VALUES "); int row = 0; - for (Expression[] expr : list) { + for (Expression[] expr : valuesExpressionList) { if (row++ > 0) { - buff.append(", "); + builder.append(", "); } - buff.append('('); - buff.resetCount(); - for (Expression e : expr) { - buff.appendExceptFirst(", "); - if (e == null) { - buff.append("DEFAULT"); - } else { - buff.append(e.getSQL()); - } - } - buff.append(')'); + Expression.writeExpressions(builder.append('('), expr, sqlFlags).append(')'); } } else { - buff.append(query.getPlanSQL()); + builder.append(query.getPlanSQL(sqlFlags)); } - return buff.toString(); + return builder.toString(); } @Override public void prepare() { if (columns == null) { - if (list.size() > 0 && list.get(0).length == 0) { + if (!valuesExpressionList.isEmpty() && valuesExpressionList.get(0).length == 0) { // special case where table is used as a sequence columns = new Column[0]; } else { columns = table.getColumns(); } } - if (list.size() > 0) { - for (Expression[] expr : list) { + if (!valuesExpressionList.isEmpty()) { + for (Expression[] expr : valuesExpressionList) { if (expr.length != columns.length) { throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); } @@ -269,40 +293,56 @@ public void prepare() { } keys = idx.getColumns(); } - StatementBuilder buff = new StatementBuilder("UPDATE "); - buff.append(table.getSQL()).append(" SET "); - for (Column c : columns) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()).append("=?"); + if (isReplace) { + // if there is no valid primary key, + // the REPLACE statement degenerates to an INSERT + for (Column key : keys) { + boolean found = false; + for (Column column : columns) { + if (column.getColumnId() == key.getColumnId()) { + found = true; + break; + } + } + if (!found) { + return; + } + } } - buff.append(" WHERE "); - buff.resetCount(); - for (Column c : keys) { - buff.appendExceptFirst(" AND "); - buff.append(c.getSQL()).append("=?"); + StringBuilder builder = table.getSQL(new StringBuilder("UPDATE "), HasSQL.DEFAULT_SQL_FLAGS).append(" SET "); + boolean hasColumn = false; + for (int i = 0, l = columns.length; i < l; i++) { + Column column = columns[i]; + if (!column.isGeneratedAlways()) { + if (hasColumn) { + builder.append(", "); + } + hasColumn = true; + column.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS).append("=?"); + } } - String sql = buff.toString(); - update = session.prepare(sql); - } - - @Override - public boolean isTransactional() { - return true; + if (!hasColumn) { + throw DbException.getSyntaxError(sqlStatement, sqlStatement.length(), + "Valid MERGE INTO statement with at least one updatable column"); + } + Column.writeColumns(builder.append(" WHERE "), keys, " AND ", "=?", HasSQL.DEFAULT_SQL_FLAGS); + update = (Update) session.prepare(builder.toString()); } @Override - public ResultInterface queryMeta() { - return null; + public int getType() { + return isReplace ? CommandInterface.REPLACE : CommandInterface.MERGE; } @Override - public int getType() { - return CommandInterface.MERGE; + public String getStatementName() { + return isReplace ? "REPLACE" : "MERGE"; } @Override - public boolean isCacheable() { - return true; + public void collectDependencies(HashSet dependencies) { + if (query != null) { + query.collectDependencies(dependencies); + } } - } diff --git a/h2/src/main/org/h2/command/dml/MergeUsing.java b/h2/src/main/org/h2/command/dml/MergeUsing.java new file mode 100644 index 0000000000..0dab851782 --- /dev/null +++ b/h2/src/main/org/h2/command/dml/MergeUsing.java @@ -0,0 +1,570 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; + +import org.h2.api.ErrorCode; +import org.h2.api.Trigger; +import org.h2.command.CommandInterface; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.DbObject; +import org.h2.engine.Right; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultTarget; +import org.h2.result.Row; +import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable; +import org.h2.table.DataChangeDeltaTable.ResultOption; +import org.h2.table.PlanItem; +import org.h2.table.Table; +import org.h2.table.TableFilter; +import org.h2.util.HasSQL; +import org.h2.util.Utils; + +/** + * This class represents the statement syntax + * MERGE INTO table alias USING... + * + * It does not replace the MERGE INTO... KEYS... form. + */ +public final class MergeUsing extends DataChangeStatement { + + /** + * Target table filter. + */ + TableFilter targetTableFilter; + + /** + * Source table filter. + */ + TableFilter sourceTableFilter; + + /** + * ON condition expression. + */ + Expression onCondition; + + private ArrayList when = Utils.newSmallArrayList(); + + /** + * Contains _ROWID_ of processed rows. Row + * identities are remembered to prevent duplicate updates of the same row. + */ + private final HashSet targetRowidsRemembered = new HashSet<>(); + + public MergeUsing(SessionLocal session, TableFilter targetTableFilter) { + super(session); + this.targetTableFilter = targetTableFilter; + } + + @Override + public long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + long countUpdatedRows = 0; + targetRowidsRemembered.clear(); + checkRights(); + setCurrentRowNumber(0); + sourceTableFilter.startQuery(session); + sourceTableFilter.reset(); + Table table = targetTableFilter.getTable(); + table.fire(session, evaluateTriggerMasks(), true); + table.lock(session, Table.WRITE_LOCK); + setCurrentRowNumber(0); + long count = 0; + Row previousSource = null, missedSource = null; + boolean hasRowId = table.getRowIdColumn() != null; + while (sourceTableFilter.next()) { + Row source = sourceTableFilter.get(); + if (missedSource != null) { + if (source != missedSource) { + Row backupTarget = targetTableFilter.get(); + sourceTableFilter.set(missedSource); + targetTableFilter.set(table.getNullRow()); + countUpdatedRows += merge(true, deltaChangeCollector, deltaChangeCollectionMode); + sourceTableFilter.set(source); + targetTableFilter.set(backupTarget); + count++; + } + missedSource = null; + } + setCurrentRowNumber(count + 1); + boolean nullRow = targetTableFilter.isNullRow(); + if (!nullRow) { + Row targetRow = targetTableFilter.get(); + if (table.isRowLockable()) { + Row lockedRow = table.lockRow(session, targetRow); + if (lockedRow == null) { + if (previousSource != source) { + missedSource = source; + } + continue; + } + if (!targetRow.hasSharedData(lockedRow)) { + targetRow = lockedRow; + targetTableFilter.set(targetRow); + if (!onCondition.getBooleanValue(session)) { + if (previousSource != source) { + missedSource = source; + } + continue; + } + } + } + if (hasRowId) { + long targetRowId = targetRow.getKey(); + if (!targetRowidsRemembered.add(targetRowId)) { + throw DbException.get(ErrorCode.DUPLICATE_KEY_1, + "Merge using ON column expression, " + + "duplicate _ROWID_ target record already processed:_ROWID_=" + + targetRowId + ":in:" + + targetTableFilter.getTable()); + } + } + } + countUpdatedRows += merge(nullRow, deltaChangeCollector, deltaChangeCollectionMode); + count++; + previousSource = source; + } + if (missedSource != null) { + sourceTableFilter.set(missedSource); + targetTableFilter.set(table.getNullRow()); + countUpdatedRows += merge(true, deltaChangeCollector, deltaChangeCollectionMode); + } + targetRowidsRemembered.clear(); + table.fire(session, evaluateTriggerMasks(), false); + return countUpdatedRows; + } + + private int merge(boolean nullRow, ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + for (When w : when) { + if (w.getClass() == WhenNotMatched.class == nullRow) { + Expression condition = w.andCondition; + if (condition == null || condition.getBooleanValue(session)) { + w.merge(session, deltaChangeCollector, deltaChangeCollectionMode); + return 1; + } + } + } + return 0; + } + + private int evaluateTriggerMasks() { + int masks = 0; + for (When w : when) { + masks |= w.evaluateTriggerMasks(); + } + return masks; + } + + private void checkRights() { + for (When w : when) { + w.checkRights(); + } + session.getUser().checkTableRight(targetTableFilter.getTable(), Right.SELECT); + session.getUser().checkTableRight(sourceTableFilter.getTable(), Right.SELECT); + } + + @Override + public String getPlanSQL(int sqlFlags) { + StringBuilder builder = new StringBuilder("MERGE INTO "); + targetTableFilter.getPlanSQL(builder, false, sqlFlags); + builder.append('\n').append("USING "); + sourceTableFilter.getPlanSQL(builder, false, sqlFlags); + for (When w : when) { + w.getSQL(builder.append('\n'), sqlFlags); + } + return builder.toString(); + } + + @Override + public void prepare() { + onCondition.addFilterConditions(sourceTableFilter); + onCondition.addFilterConditions(targetTableFilter); + + onCondition.mapColumns(sourceTableFilter, 0, Expression.MAP_INITIAL); + onCondition.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); + + onCondition = onCondition.optimize(session); + // Create conditions only for target table + onCondition.createIndexConditions(session, targetTableFilter); + + TableFilter[] filters = new TableFilter[] { sourceTableFilter, targetTableFilter }; + sourceTableFilter.addJoin(targetTableFilter, true, onCondition); + PlanItem item = sourceTableFilter.getBestPlanItem(session, filters, 0, new AllColumnsForPlan(filters)); + sourceTableFilter.setPlanItem(item); + sourceTableFilter.prepare(); + + boolean hasFinalNotMatched = false, hasFinalMatched = false; + for (Iterator i = when.iterator(); i.hasNext();) { + When w = i.next(); + if (!w.prepare(session)) { + i.remove(); + } else if (w.getClass() == WhenNotMatched.class) { + if (hasFinalNotMatched) { + i.remove(); + } else if (w.andCondition == null) { + hasFinalNotMatched = true; + } + } else { + if (hasFinalMatched) { + i.remove(); + } else if (w.andCondition == null) { + hasFinalMatched = true; + } + } + } + } + + public void setSourceTableFilter(TableFilter sourceTableFilter) { + this.sourceTableFilter = sourceTableFilter; + } + + public TableFilter getSourceTableFilter() { + return sourceTableFilter; + } + + public void setOnCondition(Expression condition) { + this.onCondition = condition; + } + + public Expression getOnCondition() { + return onCondition; + } + + public ArrayList getWhen() { + return when; + } + + /** + * Adds WHEN command. + * + * @param w new WHEN command to add (update, delete or insert). + */ + public void addWhen(When w) { + when.add(w); + } + + @Override + public Table getTable() { + return targetTableFilter.getTable(); + } + + public void setTargetTableFilter(TableFilter targetTableFilter) { + this.targetTableFilter = targetTableFilter; + } + + public TableFilter getTargetTableFilter() { + return targetTableFilter; + } + + // Prepared interface implementations + + @Override + public int getType() { + return CommandInterface.MERGE; + } + + @Override + public String getStatementName() { + return "MERGE"; + } + + @Override + public void collectDependencies(HashSet dependencies) { + dependencies.add(targetTableFilter.getTable()); + dependencies.add(sourceTableFilter.getTable()); + ExpressionVisitor visitor = ExpressionVisitor.getDependenciesVisitor(dependencies); + for (When w : when) { + w.collectDependencies(visitor); + } + onCondition.isEverything(visitor); + } + + /** + * Abstract WHEN command of the MERGE statement. + */ + public abstract class When implements HasSQL { + + /** + * AND condition of the command. + */ + Expression andCondition; + + When() { + } + + /** + * Sets the specified AND condition. + * + * @param andCondition AND condition to set + */ + public void setAndCondition(Expression andCondition) { + this.andCondition = andCondition; + } + + /** + * Merges rows. + * + * @param session + * the session + * @param deltaChangeCollector + * target result + * @param deltaChangeCollectionMode + * collection mode + */ + abstract void merge(SessionLocal session, ResultTarget deltaChangeCollector, + ResultOption deltaChangeCollectionMode); + + /** + * Prepares WHEN command. + * + * @param session + * the session + * @return {@code false} if this clause may be removed + */ + boolean prepare(SessionLocal session) { + if (andCondition != null) { + andCondition.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); + andCondition.mapColumns(sourceTableFilter, 0, Expression.MAP_INITIAL); + andCondition = andCondition.optimize(session); + if (andCondition.isConstant()) { + if (andCondition.getBooleanValue(session)) { + andCondition = null; + } else { + return false; + } + } + } + return true; + } + + /** + * Evaluates trigger mask (UPDATE, INSERT, DELETE). + * + * @return the trigger mask. + */ + abstract int evaluateTriggerMasks(); + + /** + * Checks user's INSERT, UPDATE, DELETE permission in appropriate cases. + */ + abstract void checkRights(); + + /** + * Find and collect all DbObjects, this When object depends on. + * + * @param visitor the expression visitor + */ + void collectDependencies(ExpressionVisitor visitor) { + if (andCondition != null) { + andCondition.isEverything(visitor); + } + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + builder.append("WHEN "); + if (getClass() == WhenNotMatched.class) { + builder.append("NOT "); + } + builder.append("MATCHED"); + if (andCondition != null) { + andCondition.getUnenclosedSQL(builder.append(" AND "), sqlFlags); + } + return builder.append(" THEN "); + } + + } + + public final class WhenMatchedThenDelete extends When { + + @Override + void merge(SessionLocal session, ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + TableFilter targetTableFilter = MergeUsing.this.targetTableFilter; + Table table = targetTableFilter.getTable(); + Row row = targetTableFilter.get(); + if (deltaChangeCollectionMode == ResultOption.OLD) { + deltaChangeCollector.addRow(row.getValueList()); + } + if (!table.fireRow() || !table.fireBeforeRow(session, row, null)) { + table.removeRow(session, row); + table.fireAfterRow(session, row, null, false); + } + } + + @Override + int evaluateTriggerMasks() { + return Trigger.DELETE; + } + + @Override + void checkRights() { + getSession().getUser().checkTableRight(targetTableFilter.getTable(), Right.DELETE); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return super.getSQL(builder, sqlFlags).append("DELETE"); + } + + } + + public final class WhenMatchedThenUpdate extends When { + + private SetClauseList setClauseList; + + public void setSetClauseList(SetClauseList setClauseList) { + this.setClauseList = setClauseList; + } + + @Override + void merge(SessionLocal session, ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + TableFilter targetTableFilter = MergeUsing.this.targetTableFilter; + Table table = targetTableFilter.getTable(); + try (LocalResult rows = LocalResult.forTable(session, table)) { + setClauseList.prepareUpdate(table, session, deltaChangeCollector, deltaChangeCollectionMode, rows, + targetTableFilter.get(), false); + Update.doUpdate(MergeUsing.this, session, table, rows); + } + } + + @Override + boolean prepare(SessionLocal session) { + boolean result = super.prepare(session); + setClauseList.mapAndOptimize(session, targetTableFilter, sourceTableFilter); + return result; + } + + @Override + int evaluateTriggerMasks() { + return Trigger.UPDATE; + } + + @Override + void checkRights() { + getSession().getUser().checkTableRight(targetTableFilter.getTable(), Right.UPDATE); + } + + @Override + void collectDependencies(ExpressionVisitor visitor) { + super.collectDependencies(visitor); + setClauseList.isEverything(visitor); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return setClauseList.getSQL(super.getSQL(builder, sqlFlags).append("UPDATE"), sqlFlags); + } + + } + + public final class WhenNotMatched extends When { + + private Column[] columns; + + private final Boolean overridingSystem; + + private final Expression[] values; + + public WhenNotMatched(Column[] columns, Boolean overridingSystem, Expression[] values) { + this.columns = columns; + this.overridingSystem = overridingSystem; + this.values = values; + } + + @Override + void merge(SessionLocal session, ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + Table table = targetTableFilter.getTable(); + Row newRow = table.getTemplateRow(); + Expression[] expr = values; + for (int i = 0, len = columns.length; i < len; i++) { + Column c = columns[i]; + int index = c.getColumnId(); + Expression e = expr[i]; + if (e != ValueExpression.DEFAULT) { + try { + newRow.setValue(index, e.getValue(session)); + } catch (DbException ex) { + ex.addSQL("INSERT -- " + getSimpleSQL(expr)); + throw ex; + } + } + } + table.convertInsertRow(session, newRow, overridingSystem); + if (deltaChangeCollectionMode == ResultOption.NEW) { + deltaChangeCollector.addRow(newRow.getValueList().clone()); + } + if (!table.fireBeforeRow(session, null, newRow)) { + table.addRow(session, newRow); + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); + table.fireAfterRow(session, null, newRow, false); + } else { + DataChangeDeltaTable.collectInsertedFinalRow(session, table, deltaChangeCollector, + deltaChangeCollectionMode, newRow); + } + } + + @Override + boolean prepare(SessionLocal session) { + boolean result = super.prepare(session); + TableFilter targetTableFilter = MergeUsing.this.targetTableFilter, + sourceTableFilter = MergeUsing.this.sourceTableFilter; + if (columns == null) { + columns = targetTableFilter.getTable().getColumns(); + } + if (values.length != columns.length) { + throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); + } + for (int i = 0, len = values.length; i < len; i++) { + Expression e = values[i]; + e.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); + e.mapColumns(sourceTableFilter, 0, Expression.MAP_INITIAL); + e = e.optimize(session); + if (e instanceof Parameter) { + ((Parameter) e).setColumn(columns[i]); + } + values[i] = e; + } + return result; + } + + @Override + int evaluateTriggerMasks() { + return Trigger.INSERT; + } + + @Override + void checkRights() { + getSession().getUser().checkTableRight(targetTableFilter.getTable(), Right.INSERT); + } + + @Override + void collectDependencies(ExpressionVisitor visitor) { + super.collectDependencies(visitor); + for (Expression e : values) { + e.isEverything(visitor); + } + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + super.getSQL(builder, sqlFlags).append("INSERT ("); + Column.writeColumns(builder, columns, sqlFlags).append(")\nVALUES ("); + return Expression.writeExpressions(builder, values, sqlFlags).append(')'); + } + + } + +} diff --git a/h2/src/main/org/h2/command/dml/NoOperation.java b/h2/src/main/org/h2/command/dml/NoOperation.java index 0f4cda4477..803c52003d 100644 --- a/h2/src/main/org/h2/command/dml/NoOperation.java +++ b/h2/src/main/org/h2/command/dml/NoOperation.java @@ -1,13 +1,13 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; import org.h2.command.CommandInterface; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.result.ResultInterface; /** @@ -15,20 +15,15 @@ */ public class NoOperation extends Prepared { - public NoOperation(Session session) { + public NoOperation(SessionLocal session) { super(session); } @Override - public int update() { + public long update() { return 0; } - @Override - public boolean isQuery() { - return false; - } - @Override public boolean isTransactional() { return true; diff --git a/h2/src/main/org/h2/command/dml/Query.java b/h2/src/main/org/h2/command/dml/Query.java deleted file mode 100644 index e2c6b8df7e..0000000000 --- a/h2/src/main/org/h2/command/dml/Query.java +++ /dev/null @@ -1,550 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.command.dml; - -import java.util.ArrayList; -import java.util.HashSet; - -import org.h2.api.ErrorCode; -import org.h2.command.Prepared; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.expression.Alias; -import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; -import org.h2.expression.ExpressionVisitor; -import org.h2.expression.Parameter; -import org.h2.expression.ValueExpression; -import org.h2.message.DbException; -import org.h2.result.LocalResult; -import org.h2.result.ResultTarget; -import org.h2.result.SortOrder; -import org.h2.table.ColumnResolver; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.util.New; -import org.h2.value.Value; -import org.h2.value.ValueInt; -import org.h2.value.ValueNull; - -/** - * Represents a SELECT statement (simple, or union). - */ -public abstract class Query extends Prepared { - - /** - * The limit expression as specified in the LIMIT or TOP clause. - */ - protected Expression limitExpr; - - /** - * The offset expression as specified in the LIMIT .. OFFSET clause. - */ - protected Expression offsetExpr; - - /** - * The sample size expression as specified in the SAMPLE_SIZE clause. - */ - protected Expression sampleSizeExpr; - - /** - * Whether the result must only contain distinct rows. - */ - protected boolean distinct; - - /** - * Whether the result needs to support random access. - */ - protected boolean randomAccessResult; - - private boolean noCache; - private int lastLimit; - private long lastEvaluated; - private LocalResult lastResult; - private Value[] lastParameters; - private boolean cacheableChecked; - - Query(Session session) { - super(session); - } - - /** - * Execute the query without checking the cache. If a target is specified, - * the results are written to it, and the method returns null. If no target - * is specified, a new LocalResult is created and returned. - * - * @param limit the limit as specified in the JDBC method call - * @param target the target to write results to - * @return the result - */ - protected abstract LocalResult queryWithoutCache(int limit, - ResultTarget target); - - /** - * Initialize the query. - */ - public abstract void init(); - - /** - * The the list of select expressions. - * This may include invisible expressions such as order by expressions. - * - * @return the list of expressions - */ - public abstract ArrayList getExpressions(); - - /** - * Calculate the cost to execute this query. - * - * @return the cost - */ - public abstract double getCost(); - - /** - * Calculate the cost when used as a subquery. - * This method returns a value between 10 and 1000000, - * to ensure adding other values can't result in an integer overflow. - * - * @return the estimated cost as an integer - */ - public int getCostAsExpression() { - // ensure the cost is not larger than 1 million, - // so that adding other values can't overflow - return (int) Math.min(1000000.0, 10.0 + 10.0 * getCost()); - } - - /** - * Get all tables that are involved in this query. - * - * @return the set of tables - */ - public abstract HashSet
    getTables(); - - /** - * Set the order by list. - * - * @param order the order by list - */ - public abstract void setOrder(ArrayList order); - - /** - * Set the 'for update' flag. - * - * @param forUpdate the new setting - */ - public abstract void setForUpdate(boolean forUpdate); - - /** - * Get the column count of this query. - * - * @return the column count - */ - public abstract int getColumnCount(); - - /** - * Map the columns to the given column resolver. - * - * @param resolver - * the resolver - * @param level - * the subquery level (0 is the top level query, 1 is the first - * subquery level) - */ - public abstract void mapColumns(ColumnResolver resolver, int level); - - /** - * Change the evaluatable flag. This is used when building the execution - * plan. - * - * @param tableFilter the table filter - * @param b the new value - */ - public abstract void setEvaluatable(TableFilter tableFilter, boolean b); - - /** - * Add a condition to the query. This is used for views. - * - * @param param the parameter - * @param columnId the column index (0 meaning the first column) - * @param comparisonType the comparison type - */ - public abstract void addGlobalCondition(Parameter param, int columnId, - int comparisonType); - - /** - * Check whether adding condition to the query is allowed. This is not - * allowed for views that have an order by and a limit, as it would affect - * the returned results. - * - * @return true if adding global conditions is allowed - */ - public abstract boolean allowGlobalConditions(); - - /** - * Check if this expression and all sub-expressions can fulfill a criteria. - * If any part returns false, the result is false. - * - * @param visitor the visitor - * @return if the criteria can be fulfilled - */ - public abstract boolean isEverything(ExpressionVisitor visitor); - - /** - * Update all aggregate function values. - * - * @param s the session - */ - public abstract void updateAggregate(Session s); - - /** - * Call the before triggers on all tables. - */ - public abstract void fireBeforeSelectTriggers(); - - /** - * Set the distinct flag. - * - * @param b the new value - */ - public void setDistinct(boolean b) { - distinct = b; - } - - public boolean isDistinct() { - return distinct; - } - - /** - * Whether results need to support random access. - * - * @param b the new value - */ - public void setRandomAccessResult(boolean b) { - randomAccessResult = b; - } - - @Override - public boolean isQuery() { - return true; - } - - @Override - public boolean isTransactional() { - return true; - } - - /** - * Disable caching of result sets. - */ - public void disableCache() { - this.noCache = true; - } - - private boolean sameResultAsLast(Session s, Value[] params, - Value[] lastParams, long lastEval) { - if (!cacheableChecked) { - long max = getMaxDataModificationId(); - noCache = max == Long.MAX_VALUE; - cacheableChecked = true; - } - if (noCache) { - return false; - } - Database db = s.getDatabase(); - for (int i = 0; i < params.length; i++) { - Value a = lastParams[i], b = params[i]; - if (a.getType() != b.getType() || !db.areEqual(a, b)) { - return false; - } - } - if (!isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR) || - !isEverything(ExpressionVisitor.INDEPENDENT_VISITOR)) { - return false; - } - if (db.getModificationDataId() > lastEval && - getMaxDataModificationId() > lastEval) { - return false; - } - return true; - } - - public final Value[] getParameterValues() { - ArrayList list = getParameters(); - if (list == null) { - list = New.arrayList(); - } - int size = list.size(); - Value[] params = new Value[size]; - for (int i = 0; i < size; i++) { - Value v = list.get(i).getParamValue(); - params[i] = v; - } - return params; - } - - @Override - public LocalResult query(int maxrows) { - return query(maxrows, null); - } - - /** - * Execute the query, writing the result to the target result. - * - * @param limit the maximum number of rows to return - * @param target the target result (null will return the result) - * @return the result set (if the target is not set). - */ - LocalResult query(int limit, ResultTarget target) { - fireBeforeSelectTriggers(); - if (noCache || !session.getDatabase().getOptimizeReuseResults()) { - return queryWithoutCache(limit, target); - } - Value[] params = getParameterValues(); - long now = session.getDatabase().getModificationDataId(); - if (isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { - if (lastResult != null && !lastResult.isClosed() && - limit == lastLimit) { - if (sameResultAsLast(session, params, lastParameters, - lastEvaluated)) { - lastResult = lastResult.createShallowCopy(session); - if (lastResult != null) { - lastResult.reset(); - return lastResult; - } - } - } - } - lastParameters = params; - closeLastResult(); - LocalResult r = queryWithoutCache(limit, target); - lastResult = r; - this.lastEvaluated = now; - lastLimit = limit; - return r; - } - - private void closeLastResult() { - if (lastResult != null) { - lastResult.close(); - } - } - - /** - * Initialize the order by list. This call may extend the expressions list. - * - * @param session the session - * @param expressions the select list expressions - * @param expressionSQL the select list SQL snippets - * @param orderList the order by list - * @param visible the number of visible columns in the select list - * @param mustBeInResult all order by expressions must be in the select list - * @param filters the table filters - */ - static void initOrder(Session session, - ArrayList expressions, - ArrayList expressionSQL, - ArrayList orderList, - int visible, - boolean mustBeInResult, - ArrayList filters) { - Database db = session.getDatabase(); - for (SelectOrderBy o : orderList) { - Expression e = o.expression; - if (e == null) { - continue; - } - // special case: SELECT 1 AS A FROM DUAL ORDER BY A - // (oracle supports it, but only in order by, not in group by and - // not in having): - // SELECT 1 AS A FROM DUAL ORDER BY -A - boolean isAlias = false; - int idx = expressions.size(); - if (e instanceof ExpressionColumn) { - // order by expression - ExpressionColumn exprCol = (ExpressionColumn) e; - String tableAlias = exprCol.getOriginalTableAliasName(); - String col = exprCol.getOriginalColumnName(); - for (int j = 0; j < visible; j++) { - boolean found = false; - Expression ec = expressions.get(j); - if (ec instanceof ExpressionColumn) { - // select expression - ExpressionColumn c = (ExpressionColumn) ec; - found = db.equalsIdentifiers(col, c.getColumnName()); - if (found && tableAlias != null) { - String ca = c.getOriginalTableAliasName(); - if (ca == null) { - found = false; - if (filters != null) { - // select id from test order by test.id - for (int i = 0, size = filters.size(); i < size; i++) { - TableFilter f = filters.get(i); - if (db.equalsIdentifiers(f.getTableAlias(), tableAlias)) { - found = true; - break; - } - } - } - } else { - found = db.equalsIdentifiers(ca, tableAlias); - } - } - } else if (!(ec instanceof Alias)) { - continue; - } else if (tableAlias == null && db.equalsIdentifiers(col, ec.getAlias())) { - found = true; - } else { - Expression ec2 = ec.getNonAliasExpression(); - if (ec2 instanceof ExpressionColumn) { - ExpressionColumn c2 = (ExpressionColumn) ec2; - String ta = exprCol.getSQL(); - String tb = c2.getSQL(); - String s2 = c2.getColumnName(); - found = db.equalsIdentifiers(col, s2); - if (!db.equalsIdentifiers(ta, tb)) { - found = false; - } - } - } - if (found) { - idx = j; - isAlias = true; - break; - } - } - } else { - String s = e.getSQL(); - if (expressionSQL != null) { - for (int j = 0, size = expressionSQL.size(); j < size; j++) { - String s2 = expressionSQL.get(j); - if (db.equalsIdentifiers(s2, s)) { - idx = j; - isAlias = true; - break; - } - } - } - } - if (!isAlias) { - if (mustBeInResult) { - throw DbException.get(ErrorCode.ORDER_BY_NOT_IN_RESULT, - e.getSQL()); - } - expressions.add(e); - String sql = e.getSQL(); - expressionSQL.add(sql); - } - o.columnIndexExpr = ValueExpression.get(ValueInt.get(idx + 1)); - Expression expr = expressions.get(idx).getNonAliasExpression(); - o.expression = expr; - } - } - - /** - * Create a {@link SortOrder} object given the list of {@link SelectOrderBy} - * objects. The expression list is extended if necessary. - * - * @param orderList a list of {@link SelectOrderBy} elements - * @param expressionCount the number of columns in the query - * @return the {@link SortOrder} object - */ - public SortOrder prepareOrder(ArrayList orderList, - int expressionCount) { - int size = orderList.size(); - int[] index = new int[size]; - int[] sortType = new int[size]; - for (int i = 0; i < size; i++) { - SelectOrderBy o = orderList.get(i); - int idx; - boolean reverse = false; - Expression expr = o.columnIndexExpr; - Value v = expr.getValue(null); - if (v == ValueNull.INSTANCE) { - // parameter not yet set - order by first column - idx = 0; - } else { - idx = v.getInt(); - if (idx < 0) { - reverse = true; - idx = -idx; - } - idx -= 1; - if (idx < 0 || idx >= expressionCount) { - throw DbException.get(ErrorCode.ORDER_BY_NOT_IN_RESULT, "" + (idx + 1)); - } - } - index[i] = idx; - boolean desc = o.descending; - if (reverse) { - desc = !desc; - } - int type = desc ? SortOrder.DESCENDING : SortOrder.ASCENDING; - if (o.nullsFirst) { - type += SortOrder.NULLS_FIRST; - } else if (o.nullsLast) { - type += SortOrder.NULLS_LAST; - } - sortType[i] = type; - } - return new SortOrder(session.getDatabase(), index, sortType, orderList); - } - - public void setOffset(Expression offset) { - this.offsetExpr = offset; - } - - public Expression getOffset() { - return offsetExpr; - } - - public void setLimit(Expression limit) { - this.limitExpr = limit; - } - - public Expression getLimit() { - return limitExpr; - } - - /** - * Add a parameter to the parameter list. - * - * @param param the parameter to add - */ - void addParameter(Parameter param) { - if (parameters == null) { - parameters = New.arrayList(); - } - parameters.add(param); - } - - public void setSampleSize(Expression sampleSize) { - this.sampleSizeExpr = sampleSize; - } - - /** - * Get the sample size, if set. - * - * @param session the session - * @return the sample size - */ - int getSampleSizeValue(Session session) { - if (sampleSizeExpr == null) { - return 0; - } - Value v = sampleSizeExpr.optimize(session).getValue(session); - if (v == ValueNull.INSTANCE) { - return 0; - } - return v.getInt(); - } - - public final long getMaxDataModificationId() { - ExpressionVisitor visitor = ExpressionVisitor.getMaxModificationIdVisitor(); - isEverything(visitor); - return visitor.getMaxDataModificationId(); - } - -} diff --git a/h2/src/main/org/h2/command/dml/Replace.java b/h2/src/main/org/h2/command/dml/Replace.java deleted file mode 100644 index 4c6cc87d83..0000000000 --- a/h2/src/main/org/h2/command/dml/Replace.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.command.dml; - -import org.h2.api.ErrorCode; -import org.h2.api.Trigger; -import org.h2.command.Command; -import org.h2.command.CommandInterface; -import org.h2.command.Prepared; -import org.h2.engine.Right; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; -import org.h2.expression.Expression; -import org.h2.expression.Parameter; -import org.h2.index.Index; -import org.h2.message.DbException; -import org.h2.result.ResultInterface; -import org.h2.result.Row; -import org.h2.table.Column; -import org.h2.table.Table; -import org.h2.util.New; -import org.h2.util.StatementBuilder; -import org.h2.value.Value; - -import java.util.ArrayList; - -/** - * This class represents the MySQL-compatibility REPLACE statement - */ -public class Replace extends Prepared { - - private Table table; - private Column[] columns; - private Column[] keys; - private final ArrayList list = New.arrayList(); - private Query query; - private Prepared update; - - public Replace(Session session) { - super(session); - } - - @Override - public void setCommand(Command command) { - super.setCommand(command); - if (query != null) { - query.setCommand(command); - } - } - - public void setTable(Table table) { - this.table = table; - } - - public void setColumns(Column[] columns) { - this.columns = columns; - } - - public void setKeys(Column[] keys) { - this.keys = keys; - } - - public void setQuery(Query query) { - this.query = query; - } - - /** - * Add a row to this replace statement. - * - * @param expr the list of values - */ - public void addRow(Expression[] expr) { - list.add(expr); - } - - @Override - public int update() { - int count; - session.getUser().checkRight(table, Right.INSERT); - session.getUser().checkRight(table, Right.UPDATE); - setCurrentRowNumber(0); - if (list.size() > 0) { - count = 0; - for (int x = 0, size = list.size(); x < size; x++) { - setCurrentRowNumber(x + 1); - Expression[] expr = list.get(x); - Row newRow = table.getTemplateRow(); - for (int i = 0, len = columns.length; i < len; i++) { - Column c = columns[i]; - int index = c.getColumnId(); - Expression e = expr[i]; - if (e != null) { - // e can be null (DEFAULT) - try { - Value v = c.convert(e.getValue(session)); - newRow.setValue(index, v); - } catch (DbException ex) { - throw setRow(ex, count, getSQL(expr)); - } - } - } - replace(newRow); - count++; - } - } else { - ResultInterface rows = query.query(0); - count = 0; - table.fire(session, Trigger.UPDATE | Trigger.INSERT, true); - table.lock(session, true, false); - while (rows.next()) { - count++; - Value[] r = rows.currentRow(); - Row newRow = table.getTemplateRow(); - setCurrentRowNumber(count); - for (int j = 0; j < columns.length; j++) { - Column c = columns[j]; - int index = c.getColumnId(); - try { - Value v = c.convert(r[j]); - newRow.setValue(index, v); - } catch (DbException ex) { - throw setRow(ex, count, getSQL(r)); - } - } - replace(newRow); - } - rows.close(); - table.fire(session, Trigger.UPDATE | Trigger.INSERT, false); - } - return count; - } - - private void replace(Row row) { - int count = update(row); - if (count == 0) { - try { - table.validateConvertUpdateSequence(session, row); - boolean done = table.fireBeforeRow(session, null, row); - if (!done) { - table.lock(session, true, false); - table.addRow(session, row); - session.log(table, UndoLogRecord.INSERT, row); - table.fireAfterRow(session, null, row, false); - } - } catch (DbException e) { - if (e.getErrorCode() == ErrorCode.DUPLICATE_KEY_1) { - // possibly a concurrent replace or insert - Index index = (Index) e.getSource(); - if (index != null) { - // verify the index columns match the key - Column[] indexColumns = index.getColumns(); - boolean indexMatchesKeys = false; - if (indexColumns.length <= keys.length) { - for (int i = 0; i < indexColumns.length; i++) { - if (indexColumns[i] != keys[i]) { - indexMatchesKeys = false; - break; - } - } - } - if (indexMatchesKeys) { - throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, table.getName()); - } - } - } - throw e; - } - } else if (count != 1) { - throw DbException.get(ErrorCode.DUPLICATE_KEY_1, table.getSQL()); - } - } - - private int update(Row row) { - // if there is no valid primary key, - // the statement degenerates to an INSERT - if (update == null) { - return 0; - } - ArrayList k = update.getParameters(); - for (int i = 0; i < columns.length; i++) { - Column col = columns[i]; - Value v = row.getValue(col.getColumnId()); - Parameter p = k.get(i); - p.setValue(v); - } - for (int i = 0; i < keys.length; i++) { - Column col = keys[i]; - Value v = row.getValue(col.getColumnId()); - if (v == null) { - throw DbException.get(ErrorCode.COLUMN_CONTAINS_NULL_VALUES_1, col.getSQL()); - } - Parameter p = k.get(columns.length + i); - p.setValue(v); - } - return update.update(); - } - - @Override - public String getPlanSQL() { - StatementBuilder buff = new StatementBuilder("REPLACE INTO "); - buff.append(table.getSQL()).append('('); - for (Column c : columns) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()); - } - buff.append(')'); - buff.append('\n'); - if (list.size() > 0) { - buff.append("VALUES "); - int row = 0; - for (Expression[] expr : list) { - if (row++ > 0) { - buff.append(", "); - } - buff.append('('); - buff.resetCount(); - for (Expression e : expr) { - buff.appendExceptFirst(", "); - if (e == null) { - buff.append("DEFAULT"); - } else { - buff.append(e.getSQL()); - } - } - buff.append(')'); - } - } else { - buff.append(query.getPlanSQL()); - } - return buff.toString(); - } - - @Override - public void prepare() { - if (columns == null) { - if (list.size() > 0 && list.get(0).length == 0) { - // special case where table is used as a sequence - columns = new Column[0]; - } else { - columns = table.getColumns(); - } - } - if (list.size() > 0) { - for (Expression[] expr : list) { - if (expr.length != columns.length) { - throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); - } - for (int i = 0; i < expr.length; i++) { - Expression e = expr[i]; - if (e != null) { - expr[i] = e.optimize(session); - } - } - } - } else { - query.prepare(); - if (query.getColumnCount() != columns.length) { - throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); - } - } - if (keys == null) { - Index idx = table.getPrimaryKey(); - if (idx == null) { - throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, "PRIMARY KEY"); - } - keys = idx.getColumns(); - } - // if there is no valid primary key, the statement degenerates to an - // INSERT - for (Column key : keys) { - boolean found = false; - for (Column column : columns) { - if (column.getColumnId() == key.getColumnId()) { - found = true; - break; - } - } - if (!found) { - return; - } - } - StatementBuilder buff = new StatementBuilder("UPDATE "); - buff.append(table.getSQL()).append(" SET "); - for (Column c : columns) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()).append("=?"); - } - buff.append(" WHERE "); - buff.resetCount(); - for (Column c : keys) { - buff.appendExceptFirst(" AND "); - buff.append(c.getSQL()).append("=?"); - } - String sql = buff.toString(); - update = session.prepare(sql); - } - - @Override - public boolean isTransactional() { - return true; - } - - @Override - public ResultInterface queryMeta() { - return null; - } - - @Override - public int getType() { - return CommandInterface.REPLACE; - } - - @Override - public boolean isCacheable() { - return true; - } - -} diff --git a/h2/src/main/org/h2/command/dml/RunScriptCommand.java b/h2/src/main/org/h2/command/dml/RunScriptCommand.java index 0c862f5508..1040e3d6e2 100644 --- a/h2/src/main/org/h2/command/dml/RunScriptCommand.java +++ b/h2/src/main/org/h2/command/dml/RunScriptCommand.java @@ -1,18 +1,18 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; -import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStreamReader; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + +import org.h2.command.CommandContainer; import org.h2.command.CommandInterface; import org.h2.command.Prepared; -import org.h2.engine.Constants; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.ResultInterface; import org.h2.util.ScriptReader; @@ -30,24 +30,37 @@ public class RunScriptCommand extends ScriptBase { */ private static final char UTF8_BOM = '\uFEFF'; - private Charset charset = Constants.UTF8; + private Charset charset = StandardCharsets.UTF_8; + + private boolean quirksMode; + + private boolean variableBinary; - public RunScriptCommand(Session session) { + private boolean from1X; + + public RunScriptCommand(SessionLocal session) { super(session); } @Override - public int update() { + public long update() { session.getUser().checkAdmin(); int count = 0; + boolean oldQuirksMode = session.isQuirksMode(); + boolean oldVariableBinary = session.isVariableBinary(); try { - openInput(); - BufferedReader reader = new BufferedReader(new InputStreamReader(in, charset)); + openInput(charset); // if necessary, strip the BOM from the front of the file reader.mark(1); if (reader.read() != UTF8_BOM) { reader.reset(); } + if (quirksMode) { + session.setQuirksMode(true); + } + if (variableBinary) { + session.setVariableBinary(true); + } ScriptReader r = new ScriptReader(reader); while (true) { String sql = r.readStatement(); @@ -60,25 +73,39 @@ public int update() { checkCanceled(); } } - reader.close(); + r.close(); } catch (IOException e) { throw DbException.convertIOException(e, null); } finally { + if (quirksMode) { + session.setQuirksMode(oldQuirksMode); + } + if (variableBinary) { + session.setVariableBinary(oldVariableBinary); + } closeIO(); } return count; } private void execute(String sql) { + if (from1X) { + sql = sql.trim(); + if (sql.startsWith("INSERT INTO SYSTEM_LOB_STREAM VALUES(")) { + int idx = sql.indexOf(", NULL, '"); + if (idx >= 0) { + sql = new StringBuilder(sql.length() + 1).append(sql, 0, idx + 8).append("X'") + .append(sql, idx + 9, sql.length()).toString(); + } + } + } try { Prepared command = session.prepare(sql); - if (command.isQuery()) { - command.query(0); + CommandContainer commandContainer = new CommandContainer(session, sql, command); + if (commandContainer.isQuery()) { + commandContainer.executeQuery(0, false); } else { - command.update(); - } - if (session.getAutoCommit()) { - session.commit(false); + commandContainer.executeUpdate(null); } } catch (DbException e) { throw e.addSQL(sql); @@ -89,6 +116,34 @@ public void setCharset(Charset charset) { this.charset = charset; } + /** + * Enables or disables the quirks mode. + * + * @param quirksMode + * whether quirks mode should be enabled + */ + public void setQuirksMode(boolean quirksMode) { + this.quirksMode = quirksMode; + } + + /** + * Changes parsing of a BINARY data type. + * + * @param variableBinary + * {@code true} to parse BINARY as VARBINARY, {@code false} to + * parse it as is + */ + public void setVariableBinary(boolean variableBinary) { + this.variableBinary = variableBinary; + } + + /** + * Enables quirks for parsing scripts from H2 1.*.*. + */ + public void setFrom1X() { + variableBinary = quirksMode = from1X = true; + } + @Override public ResultInterface queryMeta() { return null; diff --git a/h2/src/main/org/h2/command/dml/ScriptBase.java b/h2/src/main/org/h2/command/dml/ScriptBase.java index 0598333a7f..e1b99c039f 100644 --- a/h2/src/main/org/h2/command/dml/ScriptBase.java +++ b/h2/src/main/org/h2/command/dml/ScriptBase.java @@ -1,41 +1,38 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; -import java.io.BufferedInputStream; import java.io.BufferedOutputStream; +import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; +import java.io.InputStreamReader; import java.io.OutputStream; - +import java.nio.charset.Charset; import org.h2.api.ErrorCode; -import org.h2.api.JavaObjectSerializer; import org.h2.command.Prepared; import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.SysProperties; import org.h2.expression.Expression; import org.h2.message.DbException; import org.h2.security.SHA256; -import org.h2.store.DataHandler; import org.h2.store.FileStore; import org.h2.store.FileStoreInputStream; import org.h2.store.FileStoreOutputStream; -import org.h2.store.LobStorageBackend; import org.h2.store.fs.FileUtils; import org.h2.tools.CompressTool; import org.h2.util.IOUtils; -import org.h2.util.SmallLRUCache; -import org.h2.util.TempFileDeleter; +import org.h2.util.StringUtils; /** * This class is the base for RunScriptCommand and ScriptCommand. */ -abstract class ScriptBase extends Prepared implements DataHandler { +abstract class ScriptBase extends Prepared { /** * The default name of the script file if .zip compression is used. @@ -48,9 +45,9 @@ abstract class ScriptBase extends Prepared implements DataHandler { protected OutputStream out; /** - * The input stream. + * The input reader. */ - protected InputStream in; + protected BufferedReader reader; /** * The file name (if set). @@ -65,7 +62,7 @@ abstract class ScriptBase extends Prepared implements DataHandler { private FileStore store; private String compressionAlgorithm; - ScriptBase(Session session) { + ScriptBase(SessionLocal session) { super(session); } @@ -88,7 +85,7 @@ public void setFileNameExpr(Expression file) { protected String getFileName() { if (fileNameExpr != null && fileName == null) { fileName = fileNameExpr.optimize(session).getValue(session).getString(); - if (fileName == null || fileName.trim().length() == 0) { + if (fileName == null || StringUtils.isWhitespaceOrEmpty(fileName)) { fileName = "script.sql"; } fileName = SysProperties.getScriptDirectory() + fileName; @@ -135,7 +132,7 @@ void openOutput() { } if (isEncrypted()) { initStore(); - out = new FileStoreOutputStream(store, this, compressionAlgorithm); + out = new FileStoreOutputStream(store, compressionAlgorithm); // always use a big buffer, otherwise end-of-block is written a lot out = new BufferedOutputStream(out, Constants.IO_BUFFER_SIZE_COMPRESS); } else { @@ -152,28 +149,30 @@ void openOutput() { /** * Open the input stream. + * + * @param charset the charset to use */ - void openInput() { + void openInput(Charset charset) { String file = getFileName(); if (file == null) { return; } + InputStream in; if (isEncrypted()) { initStore(); - in = new FileStoreInputStream(store, this, compressionAlgorithm != null, false); + in = new FileStoreInputStream(store, compressionAlgorithm != null, false); } else { - InputStream inStream; try { - inStream = FileUtils.newInputStream(file); + in = FileUtils.newInputStream(file); } catch (IOException e) { throw DbException.convertIOException(e, file); } - in = new BufferedInputStream(inStream, Constants.IO_BUFFER_SIZE); in = CompressTool.wrapInputStream(in, compressionAlgorithm, SCRIPT_SQL); if (in == null) { throw DbException.get(ErrorCode.FILE_NOT_FOUND_1, SCRIPT_SQL + " in " + file); } } + reader = new BufferedReader(new InputStreamReader(in, charset), Constants.IO_BUFFER_SIZE); } /** @@ -182,8 +181,8 @@ void openInput() { void closeIO() { IOUtils.closeSilently(out); out = null; - IOUtils.closeSilently(in); - in = null; + IOUtils.closeSilently(reader); + reader = null; if (store != null) { store.closeSilently(); store = null; @@ -195,68 +194,8 @@ public boolean needRecompile() { return false; } - @Override - public String getDatabasePath() { - return null; - } - - @Override - public FileStore openFile(String name, String mode, boolean mustExist) { - return null; - } - - @Override - public void checkPowerOff() { - session.getDatabase().checkPowerOff(); - } - - @Override - public void checkWritingAllowed() { - session.getDatabase().checkWritingAllowed(); - } - - @Override - public int getMaxLengthInplaceLob() { - return session.getDatabase().getMaxLengthInplaceLob(); - } - - @Override - public TempFileDeleter getTempFileDeleter() { - return session.getDatabase().getTempFileDeleter(); - } - - @Override - public String getLobCompressionAlgorithm(int type) { - return session.getDatabase().getLobCompressionAlgorithm(type); - } - public void setCompressionAlgorithm(String algorithm) { this.compressionAlgorithm = algorithm; } - @Override - public Object getLobSyncObject() { - return this; - } - - @Override - public SmallLRUCache getLobFileListCache() { - return null; - } - - @Override - public LobStorageBackend getLobStorage() { - return null; - } - - @Override - public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, - int off, int length) { - throw DbException.throwInternalError(); - } - - @Override - public JavaObjectSerializer getJavaObjectSerializer() { - return session.getDataHandler().getJavaObjectSerializer(); - } } diff --git a/h2/src/main/org/h2/command/dml/ScriptCommand.java b/h2/src/main/org/h2/command/dml/ScriptCommand.java index 1b11b90a27..d613e45079 100644 --- a/h2/src/main/org/h2/command/dml/ScriptCommand.java +++ b/h2/src/main/org/h2/command/dml/ScriptCommand.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -11,55 +11,60 @@ import java.io.InputStream; import java.io.Reader; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; import java.util.Set; - +import java.util.TreeMap; +import java.util.TreeSet; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.command.Parser; import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; import org.h2.engine.Comment; import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.DbObject; import org.h2.engine.Right; +import org.h2.engine.RightOwner; import org.h2.engine.Role; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.Setting; -import org.h2.engine.SysProperties; import org.h2.engine.User; -import org.h2.engine.UserAggregate; -import org.h2.engine.UserDataType; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.index.Cursor; import org.h2.index.Index; import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; import org.h2.result.LocalResult; import org.h2.result.ResultInterface; import org.h2.result.Row; import org.h2.schema.Constant; +import org.h2.schema.Domain; import org.h2.schema.Schema; import org.h2.schema.SchemaObject; import org.h2.schema.Sequence; import org.h2.schema.TriggerObject; +import org.h2.schema.UserDefinedFunction; import org.h2.table.Column; import org.h2.table.PlanItem; import org.h2.table.Table; +import org.h2.table.TableType; +import org.h2.util.HasSQL; import org.h2.util.IOUtils; import org.h2.util.MathUtils; -import org.h2.util.StatementBuilder; import org.h2.util.StringUtils; import org.h2.util.Utils; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueString; +import org.h2.value.ValueVarchar; /** * This class represents the statement @@ -67,7 +72,17 @@ */ public class ScriptCommand extends ScriptBase { - private Charset charset = Constants.UTF8; + private static final Comparator BY_NAME_COMPARATOR = (o1, o2) -> { + if (o1 instanceof SchemaObject && o2 instanceof SchemaObject) { + int cmp = ((SchemaObject) o1).getSchema().getName().compareTo(((SchemaObject) o2).getSchema().getName()); + if (cmp != 0) { + return cmp; + } + } + return o1.getName().compareTo(o2.getName()); + }; + + private Charset charset = StandardCharsets.UTF_8; private Set schemaNames; private Collection
    tables; private boolean passwords; @@ -79,6 +94,9 @@ public class ScriptCommand extends ScriptBase { // true if we're generating the DROP statements private boolean drop; private boolean simple; + private boolean withColumns; + private boolean version = true; + private LocalResult result; private String lineSeparatorString; private byte[] lineSeparator; @@ -87,7 +105,7 @@ public class ScriptCommand extends ScriptBase { private int nextLobId; private int lobBlockSize = Constants.IO_BUFFER_SIZE; - public ScriptCommand(Session session) { + public ScriptCommand(SessionLocal session) { super(session); } @@ -134,13 +152,12 @@ public ResultInterface queryMeta() { } private LocalResult createResult() { - Expression[] expressions = { new ExpressionColumn( - session.getDatabase(), new Column("SCRIPT", Value.STRING)) }; - return new LocalResult(session, expressions, 1); + return new LocalResult(session, new Expression[] { + new ExpressionColumn(session.getDatabase(), new Column("SCRIPT", TypeInfo.TYPE_VARCHAR)) }, 1, 1); } @Override - public ResultInterface query(int maxrows) { + public ResultInterface query(long maxrows) { session.getUser().checkAdmin(); reset(); Database db = session.getDatabase(); @@ -160,6 +177,9 @@ public ResultInterface query(int maxrows) { if (out != null) { buffer = new byte[Constants.IO_BUFFER_SIZE]; } + if (version) { + add("-- H2 " + Constants.VERSION, true); + } if (settings) { for (Setting setting : db.getAllSettings()) { if (setting.getName().equals(SetTypes.getTypeName( @@ -174,42 +194,47 @@ public ResultInterface query(int maxrows) { if (out != null) { add("", true); } - for (User user : db.getAllUsers()) { - add(user.getCreateSQL(passwords), false); - } - for (Role role : db.getAllRoles()) { - add(role.getCreateSQL(true), false); + RightOwner[] rightOwners = db.getAllUsersAndRoles().toArray(new RightOwner[0]); + // ADMIN users first, other users next, roles last + Arrays.sort(rightOwners, (o1, o2) -> { + boolean b = o1 instanceof User; + if (b != o2 instanceof User) { + return b ? -1 : 1; + } + if (b) { + b = ((User) o1).isAdmin(); + if (b != ((User) o2).isAdmin()) { + return b ? -1 : 1; + } + } + return o1.getName().compareTo(o2.getName()); + }); + for (RightOwner rightOwner : rightOwners) { + if (rightOwner instanceof User) { + add(((User) rightOwner).getCreateSQL(passwords), false); + } else { + add(((Role) rightOwner).getCreateSQL(true), false); + } } + ArrayList schemas = new ArrayList<>(); for (Schema schema : db.getAllSchemas()) { if (excludeSchema(schema)) { continue; } + schemas.add(schema); add(schema.getCreateSQL(), false); } - for (UserDataType datatype : db.getAllUserDataTypes()) { - if (drop) { - add(datatype.getDropSQL(), false); + dumpDomains(schemas); + for (Schema schema : schemas) { + for (Constant constant : sorted(schema.getAllConstants(), Constant.class)) { + add(constant.getCreateSQL(), false); } - add(datatype.getCreateSQL(), false); - } - for (SchemaObject obj : db.getAllSchemaObjects( - DbObject.CONSTANT)) { - if (excludeSchema(obj.getSchema())) { - continue; - } - Constant constant = (Constant) obj; - add(constant.getCreateSQL(), false); } - final ArrayList
    tables = db.getAllTablesAndViews(false); + final ArrayList
    tables = db.getAllTablesAndViews(); // sort by id, so that views are after tables and views on views // after the base views - Collections.sort(tables, new Comparator
    () { - @Override - public int compare(Table t1, Table t2) { - return t1.getId() - t2.getId(); - } - }); + tables.sort(Comparator.comparingInt(Table::getId)); // Generate the DROP XXX ... IF EXISTS for (Table table : tables) { @@ -222,7 +247,7 @@ public int compare(Table t1, Table t2) { if (table.isHidden()) { continue; } - table.lock(session, false, false); + table.lock(session, Table.READ_LOCK); String sql = table.getCreateSQL(); if (sql == null) { // null for metadata tables @@ -232,32 +257,25 @@ public int compare(Table t1, Table t2) { add(table.getDropSQL(), false); } } - for (SchemaObject obj : db.getAllSchemaObjects( - DbObject.FUNCTION_ALIAS)) { - if (excludeSchema(obj.getSchema())) { - continue; - } - if (drop) { - add(obj.getDropSQL(), false); - } - add(obj.getCreateSQL(), false); - } - for (UserAggregate agg : db.getAllAggregates()) { - if (drop) { - add(agg.getDropSQL(), false); + for (Schema schema : schemas) { + for (UserDefinedFunction userDefinedFunction : sorted(schema.getAllFunctionsAndAggregates(), + UserDefinedFunction.class)) { + if (drop) { + add(userDefinedFunction.getDropSQL(), false); + } + add(userDefinedFunction.getCreateSQL(), false); } - add(agg.getCreateSQL(), false); } - for (SchemaObject obj : db.getAllSchemaObjects( - DbObject.SEQUENCE)) { - if (excludeSchema(obj.getSchema())) { - continue; - } - Sequence sequence = (Sequence) obj; - if (drop) { - add(sequence.getDropSQL(), false); + for (Schema schema : schemas) { + for (Sequence sequence : sorted(schema.getAllSequences(), Sequence.class)) { + if (sequence.getBelongsToTable()) { + continue; + } + if (drop) { + add(sequence.getDropSQL(), false); + } + add(sequence.getCreateSQL(), false); } - add(sequence.getCreateSQL(), false); } // Generate CREATE TABLE and INSERT...VALUES @@ -272,29 +290,29 @@ public int compare(Table t1, Table t2) { if (table.isHidden()) { continue; } - table.lock(session, false, false); + table.lock(session, Table.READ_LOCK); String createTableSql = table.getCreateSQL(); if (createTableSql == null) { // null for metadata tables continue; } - final String tableType = table.getTableType(); + final TableType tableType = table.getTableType(); add(createTableSql, false); final ArrayList constraints = table.getConstraints(); if (constraints != null) { for (Constraint constraint : constraints) { - if (Constraint.PRIMARY_KEY.equals( - constraint.getConstraintType())) { + if (Constraint.Type.PRIMARY_KEY == constraint.getConstraintType()) { add(constraint.getCreateSQLWithoutIndexes(), false); } } } - if (Table.TABLE.equals(tableType)) { - if (table.canGetRowCount()) { - String rowcount = "-- " + - table.getRowCountApproximation() + - " +/- SELECT COUNT(*) FROM " + table.getSQL(); - add(rowcount, false); + if (TableType.TABLE == tableType) { + if (table.canGetRowCount(session)) { + StringBuilder builder = new StringBuilder("-- ") + .append(table.getRowCountApproximation(session)) + .append(" +/- SELECT COUNT(*) FROM "); + table.getSQL(builder, HasSQL.TRACE_SQL_FLAGS); + add(builder.toString(), false); } if (data) { count = generateInsertValues(count, table); @@ -310,59 +328,41 @@ public int compare(Table t1, Table t2) { } if (tempLobTableCreated) { add("DROP TABLE IF EXISTS SYSTEM_LOB_STREAM", true); - add("CALL SYSTEM_COMBINE_BLOB(-1)", true); add("DROP ALIAS IF EXISTS SYSTEM_COMBINE_CLOB", true); add("DROP ALIAS IF EXISTS SYSTEM_COMBINE_BLOB", true); tempLobTableCreated = false; } // Generate CREATE CONSTRAINT ... - final ArrayList constraints = db.getAllSchemaObjects( - DbObject.CONSTRAINT); - Collections.sort(constraints, new Comparator() { - @Override - public int compare(SchemaObject c1, SchemaObject c2) { - return ((Constraint) c1).compareTo((Constraint) c2); - } - }); - for (SchemaObject obj : constraints) { - if (excludeSchema(obj.getSchema())) { - continue; - } - Constraint constraint = (Constraint) obj; - if (excludeTable(constraint.getTable())) { - continue; - } - if (constraint.getTable().isHidden()) { - continue; - } - if (!Constraint.PRIMARY_KEY.equals(constraint.getConstraintType())) { - add(constraint.getCreateSQLWithoutIndexes(), false); + ArrayList constraints = new ArrayList<>(); + for (Schema schema : schemas) { + for (Constraint constraint : schema.getAllConstraints()) { + if (excludeTable(constraint.getTable())) { + continue; + } + Type constraintType = constraint.getConstraintType(); + if (constraintType != Type.DOMAIN && constraint.getTable().isHidden()) { + continue; + } + if (constraintType != Constraint.Type.PRIMARY_KEY) { + constraints.add(constraint); + } } } - // Generate CREATE TRIGGER ... - for (SchemaObject obj : db.getAllSchemaObjects(DbObject.TRIGGER)) { - if (excludeSchema(obj.getSchema())) { - continue; - } - TriggerObject trigger = (TriggerObject) obj; - if (excludeTable(trigger.getTable())) { - continue; - } - add(trigger.getCreateSQL(), false); + constraints.sort(null); + for (Constraint constraint : constraints) { + add(constraint.getCreateSQLWithoutIndexes(), false); } - // Generate GRANT ... - for (Right right : db.getAllRights()) { - Table table = right.getGrantedTable(); - if (table != null) { - if (excludeSchema(table.getSchema())) { - continue; - } - if (excludeTable(table)) { + // Generate CREATE TRIGGER ... + for (Schema schema : schemas) { + for (TriggerObject trigger : schema.getAllTriggers()) { + if (excludeTable(trigger.getTable())) { continue; } + add(trigger.getCreateSQL(), false); } - add(right.getCreateSQL(), false); } + // Generate GRANT ... + dumpRights(db); // Generate COMMENT ON ... for (Comment comment : db.getAllComments()) { add(comment.getCreateSQL(), false); @@ -381,127 +381,251 @@ public int compare(SchemaObject c1, SchemaObject c2) { return r; } + private void dumpDomains(ArrayList schemas) throws IOException { + TreeMap> referencingDomains = new TreeMap<>(BY_NAME_COMPARATOR); + TreeSet known = new TreeSet<>(BY_NAME_COMPARATOR); + for (Schema schema : schemas) { + for (Domain domain : sorted(schema.getAllDomains(), Domain.class)) { + Domain parent = domain.getDomain(); + if (parent == null) { + addDomain(domain); + } else { + TreeSet set = referencingDomains.get(parent); + if (set == null) { + set = new TreeSet<>(BY_NAME_COMPARATOR); + referencingDomains.put(parent, set); + } + set.add(domain); + if (parent.getDomain() == null || !schemas.contains(parent.getSchema())) { + known.add(parent); + } + } + } + } + while (!referencingDomains.isEmpty()) { + TreeSet known2 = new TreeSet<>(BY_NAME_COMPARATOR); + for (Domain d : known) { + TreeSet set = referencingDomains.remove(d); + if (set != null) { + for (Domain d2 : set) { + addDomain(d2); + known2.add(d2); + } + } + } + known = known2; + } + } + + private void dumpRights(Database db) throws IOException { + Right[] rights = db.getAllRights().toArray(new Right[0]); + Arrays.sort(rights, (o1, o2) -> { + Role r1 = o1.getGrantedRole(), r2 = o2.getGrantedRole(); + if ((r1 == null) != (r2 == null)) { + return r1 == null ? -1 : 1; + } + if (r1 == null) { + DbObject g1 = o1.getGrantedObject(), g2 = o2.getGrantedObject(); + if ((g1 == null) != (g2 == null)) { + return g1 == null ? -1 : 1; + } + if (g1 != null) { + if (g1 instanceof Schema != g2 instanceof Schema) { + return g1 instanceof Schema ? -1 : 1; + } + int cmp = g1.getName().compareTo(g2.getName()); + if (cmp != 0) { + return cmp; + } + } + } else { + int cmp = r1.getName().compareTo(r2.getName()); + if (cmp != 0) { + return cmp; + } + } + return o1.getGrantee().getName().compareTo(o2.getGrantee().getName()); + }); + for (Right right : rights) { + DbObject object = right.getGrantedObject(); + if (object != null) { + if (object instanceof Schema) { + if (excludeSchema((Schema) object)) { + continue; + } + } else if (object instanceof Table) { + Table table = (Table) object; + if (excludeSchema(table.getSchema())) { + continue; + } + if (excludeTable(table)) { + continue; + } + } + } + add(right.getCreateSQL(), false); + } + } + + private void addDomain(Domain domain) throws IOException { + if (drop) { + add(domain.getDropSQL(), false); + } + add(domain.getCreateSQL(), false); + } + + private static T[] sorted(Collection collection, Class clazz) { + @SuppressWarnings("unchecked") + T[] array = collection.toArray((T[]) java.lang.reflect.Array.newInstance(clazz, 0)); + Arrays.sort(array, BY_NAME_COMPARATOR); + return array; + } + private int generateInsertValues(int count, Table table) throws IOException { - PlanItem plan = table.getBestPlanItem(session, null, null, null); + PlanItem plan = table.getBestPlanItem(session, null, null, -1, null, null); Index index = plan.getIndex(); Cursor cursor = index.find(session, null, null); Column[] columns = table.getColumns(); - StatementBuilder buff = new StatementBuilder("INSERT INTO "); - buff.append(table.getSQL()).append('('); - for (Column col : columns) { - buff.appendExceptFirst(", "); - buff.append(Parser.quoteIdentifier(col.getName())); + boolean withGenerated = false, withGeneratedAlwaysAsIdentity = false; + for (Column c : columns) { + if (c.isGeneratedAlways()) { + if (c.isIdentity()) { + withGeneratedAlwaysAsIdentity = true; + } else { + withGenerated = true; + } + } } - buff.append(") VALUES"); + StringBuilder builder = new StringBuilder("INSERT INTO "); + table.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); + if (withGenerated || withGeneratedAlwaysAsIdentity || withColumns) { + builder.append('('); + boolean needComma = false; + for (Column column : columns) { + if (!column.isGenerated()) { + if (needComma) { + builder.append(", "); + } + needComma = true; + column.getSQL(builder, HasSQL.DEFAULT_SQL_FLAGS); + } + } + builder.append(')'); + if (withGeneratedAlwaysAsIdentity) { + builder.append(" OVERRIDING SYSTEM VALUE"); + } + } + builder.append(" VALUES"); if (!simple) { - buff.append('\n'); + builder.append('\n'); } - buff.append('('); - String ins = buff.toString(); - buff = null; + builder.append('('); + String ins = builder.toString(); + builder = null; + int columnCount = columns.length; while (cursor.next()) { Row row = cursor.get(); - if (buff == null) { - buff = new StatementBuilder(ins); + if (builder == null) { + builder = new StringBuilder(ins); } else { - buff.append(",\n("); + builder.append(",\n("); } - for (int j = 0; j < row.getColumnCount(); j++) { - if (j > 0) { - buff.append(", "); + boolean needComma = false; + for (int i = 0; i < columnCount; i++) { + if (columns[i].isGenerated()) { + continue; + } + if (needComma) { + builder.append(", "); } - Value v = row.getValue(j); - if (v.getPrecision() > lobBlockSize) { + needComma = true; + Value v = row.getValue(i); + if (v.getType().getPrecision() > lobBlockSize) { int id; - if (v.getType() == Value.CLOB) { + if (v.getValueType() == Value.CLOB) { id = writeLobStream(v); - buff.append("SYSTEM_COMBINE_CLOB(" + id + ")"); - } else if (v.getType() == Value.BLOB) { + builder.append("SYSTEM_COMBINE_CLOB(").append(id).append(')'); + } else if (v.getValueType() == Value.BLOB) { id = writeLobStream(v); - buff.append("SYSTEM_COMBINE_BLOB(" + id + ")"); + builder.append("SYSTEM_COMBINE_BLOB(").append(id).append(')'); } else { - buff.append(v.getSQL()); + v.getSQL(builder, HasSQL.NO_CASTS); } } else { - buff.append(v.getSQL()); + v.getSQL(builder, HasSQL.NO_CASTS); } } - buff.append(')'); + builder.append(')'); count++; if ((count & 127) == 0) { checkCanceled(); } - if (simple || buff.length() > Constants.IO_BUFFER_SIZE) { - add(buff.toString(), true); - buff = null; + if (simple || builder.length() > Constants.IO_BUFFER_SIZE) { + add(builder.toString(), true); + builder = null; } } - if (buff != null) { - add(buff.toString(), true); + if (builder != null) { + add(builder.toString(), true); } return count; } private int writeLobStream(Value v) throws IOException { if (!tempLobTableCreated) { - add("CREATE TABLE IF NOT EXISTS SYSTEM_LOB_STREAM" + + add("CREATE CACHED LOCAL TEMPORARY TABLE IF NOT EXISTS SYSTEM_LOB_STREAM" + "(ID INT NOT NULL, PART INT NOT NULL, " + - "CDATA VARCHAR, BDATA BINARY)", + "CDATA VARCHAR, BDATA VARBINARY)", true); - add("CREATE PRIMARY KEY SYSTEM_LOB_STREAM_PRIMARY_KEY " + - "ON SYSTEM_LOB_STREAM(ID, PART)", true); - add("CREATE ALIAS IF NOT EXISTS " + "SYSTEM_COMBINE_CLOB FOR \"" + - this.getClass().getName() + ".combineClob\"", true); - add("CREATE ALIAS IF NOT EXISTS " + "SYSTEM_COMBINE_BLOB FOR \"" + - this.getClass().getName() + ".combineBlob\"", true); + add("ALTER TABLE SYSTEM_LOB_STREAM ADD CONSTRAINT SYSTEM_LOB_STREAM_PRIMARY_KEY PRIMARY KEY(ID, PART)", + true); + String className = getClass().getName(); + add("CREATE ALIAS IF NOT EXISTS " + "SYSTEM_COMBINE_CLOB FOR '" + className + ".combineClob'", true); + add("CREATE ALIAS IF NOT EXISTS " + "SYSTEM_COMBINE_BLOB FOR '" + className + ".combineBlob'", true); tempLobTableCreated = true; } int id = nextLobId++; - switch (v.getType()) { + switch (v.getValueType()) { case Value.BLOB: { byte[] bytes = new byte[lobBlockSize]; - InputStream input = v.getInputStream(); - try { + try (InputStream input = v.getInputStream()) { for (int i = 0;; i++) { StringBuilder buff = new StringBuilder(lobBlockSize * 2); - buff.append("INSERT INTO SYSTEM_LOB_STREAM VALUES(" + id + - ", " + i + ", NULL, '"); + buff.append("INSERT INTO SYSTEM_LOB_STREAM VALUES(").append(id) + .append(", ").append(i).append(", NULL, X'"); int len = IOUtils.readFully(input, bytes, lobBlockSize); if (len <= 0) { break; } - buff.append(StringUtils.convertBytesToHex(bytes, len)).append("')"); + StringUtils.convertBytesToHex(buff, bytes, len).append("')"); String sql = buff.toString(); add(sql, true); } - } finally { - IOUtils.closeSilently(input); } break; } case Value.CLOB: { char[] chars = new char[lobBlockSize]; - Reader reader = v.getReader(); - try { + + try (Reader reader = v.getReader()) { for (int i = 0;; i++) { StringBuilder buff = new StringBuilder(lobBlockSize * 2); - buff.append("INSERT INTO SYSTEM_LOB_STREAM VALUES(" + id + ", " + i + ", "); + buff.append("INSERT INTO SYSTEM_LOB_STREAM VALUES(").append(id).append(", ").append(i) + .append(", "); int len = IOUtils.readFully(reader, chars, lobBlockSize); if (len == 0) { break; } - buff.append(StringUtils.quoteStringSQL(new String(chars, 0, len))). + StringUtils.quoteStringSQL(buff, new String(chars, 0, len)). append(", NULL)"); String sql = buff.toString(); add(sql, true); } - } finally { - IOUtils.closeSilently(reader); } break; } default: - DbException.throwInternalError("type:" + v.getType()); + throw DbException.getInternalError("type:" + v.getValueType()); } return id; } @@ -514,6 +638,7 @@ private int writeLobStream(Value v) throws IOException { * @param conn a connection * @param id the lob id * @return a stream for the combined data + * @throws SQLException on failure */ public static InputStream combineBlob(Connection conn, int id) throws SQLException { @@ -545,7 +670,7 @@ public int read() throws IOException { } current = null; } catch (SQLException e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } } @@ -558,7 +683,7 @@ public void close() throws IOException { try { rs.close(); } catch (SQLException e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } }; @@ -571,6 +696,7 @@ public void close() throws IOException { * @param conn a connection * @param id the lob id * @return a reader for the combined data + * @throws SQLException on failure */ public static Reader combineClob(Connection conn, int id) throws SQLException { if (id < 0) { @@ -601,7 +727,7 @@ public int read() throws IOException { } current = null; } catch (SQLException e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } } @@ -614,7 +740,7 @@ public void close() throws IOException { try { rs.close(); } catch (SQLException e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } @Override @@ -651,7 +777,7 @@ private static ResultSet getLobStream(Connection conn, String column, int id) private void reset() { result = null; buffer = null; - lineSeparatorString = SysProperties.LINE_SEPARATOR; + lineSeparatorString = System.lineSeparator(); lineSeparator = lineSeparatorString.getBytes(charset); } @@ -661,7 +787,7 @@ private boolean excludeSchema(Schema schema) { } if (tables != null) { // if filtering on specific tables, only include those schemas - for (Table table : schema.getAllTablesAndViews()) { + for (Table table : schema.getAllTablesAndViews(session)) { if (tables.contains(table)) { return false; } @@ -701,12 +827,10 @@ private void add(String s, boolean insert) throws IOException { } out.write(buffer, 0, len); if (!insert) { - Value[] row = { ValueString.get(s) }; - result.addRow(row); + result.addRow(ValueVarchar.get(s)); } } else { - Value[] row = { ValueString.get(s) }; - result.addRow(row); + result.addRow(ValueVarchar.get(s)); } } @@ -714,6 +838,14 @@ public void setSimple(boolean simple) { this.simple = simple; } + public void setWithColumns(boolean withColumns) { + this.withColumns = withColumns; + } + + public void setVersion(boolean version) { + this.version = version; + } + public void setCharset(Charset charset) { this.charset = charset; } diff --git a/h2/src/main/org/h2/command/dml/Select.java b/h2/src/main/org/h2/command/dml/Select.java deleted file mode 100644 index 46fbce8702..0000000000 --- a/h2/src/main/org/h2/command/dml/Select.java +++ /dev/null @@ -1,1398 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.command.dml; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; - -import org.h2.api.ErrorCode; -import org.h2.api.Trigger; -import org.h2.command.CommandInterface; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.expression.Comparison; -import org.h2.expression.ConditionAndOr; -import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; -import org.h2.expression.ExpressionVisitor; -import org.h2.expression.Parameter; -import org.h2.index.Cursor; -import org.h2.index.Index; -import org.h2.index.IndexCondition; -import org.h2.index.IndexType; -import org.h2.message.DbException; -import org.h2.result.LocalResult; -import org.h2.result.ResultInterface; -import org.h2.result.ResultTarget; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.ColumnResolver; -import org.h2.table.IndexColumn; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.util.New; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; -import org.h2.util.ValueHashMap; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueNull; - -/** - * This class represents a simple SELECT statement. - * - * For each select statement, - * visibleColumnCount <= distinctColumnCount <= expressionCount. - * The expression list count could include ORDER BY and GROUP BY expressions - * that are not in the select list. - * - * The call sequence is init(), mapColumns() if it's a subquery, prepare(). - * - * @author Thomas Mueller - * @author Joel Turkel (Group sorted query) - */ -public class Select extends Query { - private TableFilter topTableFilter; - private final ArrayList filters = New.arrayList(); - private final ArrayList topFilters = New.arrayList(); - private ArrayList expressions; - private Expression[] expressionArray; - private Expression having; - private Expression condition; - private int visibleColumnCount, distinctColumnCount; - private ArrayList orderList; - private ArrayList group; - private int[] groupIndex; - private boolean[] groupByExpression; - private HashMap currentGroup; - private int havingIndex; - private boolean isGroupQuery, isGroupSortedQuery; - private boolean isForUpdate, isForUpdateMvcc; - private double cost; - private boolean isQuickAggregateQuery, isDistinctQuery; - private boolean isPrepared, checkInit; - private boolean sortUsingIndex; - private SortOrder sort; - private int currentGroupRowId; - - public Select(Session session) { - super(session); - } - - /** - * Add a table to the query. - * - * @param filter the table to add - * @param isTop if the table can be the first table in the query plan - */ - public void addTableFilter(TableFilter filter, boolean isTop) { - // Oracle doesn't check on duplicate aliases - // String alias = filter.getAlias(); - // if (filterNames.contains(alias)) { - // throw Message.getSQLException( - // ErrorCode.DUPLICATE_TABLE_ALIAS, alias); - // } - // filterNames.add(alias); - filters.add(filter); - if (isTop) { - topFilters.add(filter); - } - } - - public ArrayList getTopFilters() { - return topFilters; - } - - public void setExpressions(ArrayList expressions) { - this.expressions = expressions; - } - - /** - * Called if this query contains aggregate functions. - */ - public void setGroupQuery() { - isGroupQuery = true; - } - - public void setGroupBy(ArrayList group) { - this.group = group; - } - - public ArrayList getGroupBy() { - return group; - } - - public HashMap getCurrentGroup() { - return currentGroup; - } - - public int getCurrentGroupRowId() { - return currentGroupRowId; - } - - @Override - public void setOrder(ArrayList order) { - orderList = order; - } - - /** - * Add a condition to the list of conditions. - * - * @param cond the condition to add - */ - public void addCondition(Expression cond) { - if (condition == null) { - condition = cond; - } else { - condition = new ConditionAndOr(ConditionAndOr.AND, cond, condition); - } - } - - private void queryGroupSorted(int columnCount, ResultTarget result) { - int rowNumber = 0; - setCurrentRowNumber(0); - currentGroup = null; - Value[] previousKeyValues = null; - while (topTableFilter.next()) { - setCurrentRowNumber(rowNumber + 1); - if (condition == null || - Boolean.TRUE.equals(condition.getBooleanValue(session))) { - rowNumber++; - Value[] keyValues = new Value[groupIndex.length]; - // update group - for (int i = 0; i < groupIndex.length; i++) { - int idx = groupIndex[i]; - Expression expr = expressions.get(idx); - keyValues[i] = expr.getValue(session); - } - - if (previousKeyValues == null) { - previousKeyValues = keyValues; - currentGroup = New.hashMap(); - } else if (!Arrays.equals(previousKeyValues, keyValues)) { - addGroupSortedRow(previousKeyValues, columnCount, result); - previousKeyValues = keyValues; - currentGroup = New.hashMap(); - } - currentGroupRowId++; - - for (int i = 0; i < columnCount; i++) { - if (groupByExpression == null || !groupByExpression[i]) { - Expression expr = expressions.get(i); - expr.updateAggregate(session); - } - } - } - } - if (previousKeyValues != null) { - addGroupSortedRow(previousKeyValues, columnCount, result); - } - } - - private void addGroupSortedRow(Value[] keyValues, int columnCount, - ResultTarget result) { - Value[] row = new Value[columnCount]; - for (int j = 0; groupIndex != null && j < groupIndex.length; j++) { - row[groupIndex[j]] = keyValues[j]; - } - for (int j = 0; j < columnCount; j++) { - if (groupByExpression != null && groupByExpression[j]) { - continue; - } - Expression expr = expressions.get(j); - row[j] = expr.getValue(session); - } - if (isHavingNullOrFalse(row)) { - return; - } - row = keepOnlyDistinct(row, columnCount); - result.addRow(row); - } - - private Value[] keepOnlyDistinct(Value[] row, int columnCount) { - if (columnCount == distinctColumnCount) { - return row; - } - // remove columns so that 'distinct' can filter duplicate rows - Value[] r2 = new Value[distinctColumnCount]; - System.arraycopy(row, 0, r2, 0, distinctColumnCount); - return r2; - } - - private boolean isHavingNullOrFalse(Value[] row) { - if (havingIndex >= 0) { - Value v = row[havingIndex]; - if (v == ValueNull.INSTANCE) { - return true; - } - if (!Boolean.TRUE.equals(v.getBoolean())) { - return true; - } - } - return false; - } - - private Index getGroupSortedIndex() { - if (groupIndex == null || groupByExpression == null) { - return null; - } - ArrayList indexes = topTableFilter.getTable().getIndexes(); - if (indexes != null) { - for (int i = 0, size = indexes.size(); i < size; i++) { - Index index = indexes.get(i); - if (index.getIndexType().isScan()) { - continue; - } - if (index.getIndexType().isHash()) { - // does not allow scanning entries - continue; - } - if (isGroupSortedIndex(topTableFilter, index)) { - return index; - } - } - } - return null; - } - - private boolean isGroupSortedIndex(TableFilter tableFilter, Index index) { - // check that all the GROUP BY expressions are part of the index - Column[] indexColumns = index.getColumns(); - // also check that the first columns in the index are grouped - boolean[] grouped = new boolean[indexColumns.length]; - outerLoop: - for (int i = 0, size = expressions.size(); i < size; i++) { - if (!groupByExpression[i]) { - continue; - } - Expression expr = expressions.get(i).getNonAliasExpression(); - if (!(expr instanceof ExpressionColumn)) { - return false; - } - ExpressionColumn exprCol = (ExpressionColumn) expr; - for (int j = 0; j < indexColumns.length; ++j) { - if (tableFilter == exprCol.getTableFilter()) { - if (indexColumns[j].equals(exprCol.getColumn())) { - grouped[j] = true; - continue outerLoop; - } - } - } - // We didn't find a matching index column - // for one group by expression - return false; - } - // check that the first columns in the index are grouped - // good: index(a, b, c); group by b, a - // bad: index(a, b, c); group by a, c - for (int i = 1; i < grouped.length; i++) { - if (!grouped[i - 1] && grouped[i]) { - return false; - } - } - return true; - } - - private int getGroupByExpressionCount() { - if (groupByExpression == null) { - return 0; - } - int count = 0; - for (boolean b : groupByExpression) { - if (b) { - ++count; - } - } - return count; - } - - private void queryGroup(int columnCount, LocalResult result) { - ValueHashMap> groups = - ValueHashMap.newInstance(); - int rowNumber = 0; - setCurrentRowNumber(0); - currentGroup = null; - ValueArray defaultGroup = ValueArray.get(new Value[0]); - int sampleSize = getSampleSizeValue(session); - while (topTableFilter.next()) { - setCurrentRowNumber(rowNumber + 1); - if (condition == null || - Boolean.TRUE.equals(condition.getBooleanValue(session))) { - Value key; - rowNumber++; - if (groupIndex == null) { - key = defaultGroup; - } else { - Value[] keyValues = new Value[groupIndex.length]; - // update group - for (int i = 0; i < groupIndex.length; i++) { - int idx = groupIndex[i]; - Expression expr = expressions.get(idx); - keyValues[i] = expr.getValue(session); - } - key = ValueArray.get(keyValues); - } - HashMap values = groups.get(key); - if (values == null) { - values = new HashMap(); - groups.put(key, values); - } - currentGroup = values; - currentGroupRowId++; - int len = columnCount; - for (int i = 0; i < len; i++) { - if (groupByExpression == null || !groupByExpression[i]) { - Expression expr = expressions.get(i); - expr.updateAggregate(session); - } - } - if (sampleSize > 0 && rowNumber >= sampleSize) { - break; - } - } - } - if (groupIndex == null && groups.size() == 0) { - groups.put(defaultGroup, new HashMap()); - } - ArrayList keys = groups.keys(); - for (Value v : keys) { - ValueArray key = (ValueArray) v; - currentGroup = groups.get(key); - Value[] keyValues = key.getList(); - Value[] row = new Value[columnCount]; - for (int j = 0; groupIndex != null && j < groupIndex.length; j++) { - row[groupIndex[j]] = keyValues[j]; - } - for (int j = 0; j < columnCount; j++) { - if (groupByExpression != null && groupByExpression[j]) { - continue; - } - Expression expr = expressions.get(j); - row[j] = expr.getValue(session); - } - if (isHavingNullOrFalse(row)) { - continue; - } - row = keepOnlyDistinct(row, columnCount); - result.addRow(row); - } - } - - /** - * Get the index that matches the ORDER BY list, if one exists. This is to - * avoid running a separate ORDER BY if an index can be used. This is - * specially important for large result sets, if only the first few rows are - * important (LIMIT is used) - * - * @return the index if one is found - */ - private Index getSortIndex() { - if (sort == null) { - return null; - } - ArrayList sortColumns = New.arrayList(); - for (int idx : sort.getQueryColumnIndexes()) { - if (idx < 0 || idx >= expressions.size()) { - throw DbException.getInvalidValueException("ORDER BY", idx + 1); - } - Expression expr = expressions.get(idx); - expr = expr.getNonAliasExpression(); - if (expr.isConstant()) { - continue; - } - if (!(expr instanceof ExpressionColumn)) { - return null; - } - ExpressionColumn exprCol = (ExpressionColumn) expr; - if (exprCol.getTableFilter() != topTableFilter) { - return null; - } - sortColumns.add(exprCol.getColumn()); - } - Column[] sortCols = sortColumns.toArray(new Column[sortColumns.size()]); - int[] sortTypes = sort.getSortTypes(); - if (sortCols.length == 0) { - // sort just on constants - can use scan index - return topTableFilter.getTable().getScanIndex(session); - } - ArrayList list = topTableFilter.getTable().getIndexes(); - if (list != null) { - for (int i = 0, size = list.size(); i < size; i++) { - Index index = list.get(i); - if (index.getCreateSQL() == null) { - // can't use the scan index - continue; - } - if (index.getIndexType().isHash()) { - continue; - } - IndexColumn[] indexCols = index.getIndexColumns(); - if (indexCols.length < sortCols.length) { - continue; - } - boolean ok = true; - for (int j = 0; j < sortCols.length; j++) { - // the index and the sort order must start - // with the exact same columns - IndexColumn idxCol = indexCols[j]; - Column sortCol = sortCols[j]; - boolean implicitSortColumn = false; - if (idxCol.column != sortCol) { - implicitSortColumn = isSortColumnImplicit( - topTableFilter, idxCol.column); - if (!implicitSortColumn) { - ok = false; - break; - } - } - if (!implicitSortColumn && idxCol.sortType != sortTypes[j]) { - // NULL FIRST for ascending and NULLS LAST - // for descending would actually match the default - ok = false; - break; - } - } - if (ok) { - return index; - } - } - } - if (sortCols.length == 1 && sortCols[0].getColumnId() == -1) { - // special case: order by _ROWID_ - Index index = topTableFilter.getTable().getScanIndex(session); - if (index.isRowIdIndex()) { - return index; - } - } - return null; - } - - /** - * Validates the cases where ORDER BY clause do not contains all indexed - * columns, but the related index path still would be valid for such search. - * Sometimes, the absence of a column in the ORDER BY clause does not alter - * the expected final result, and an index sorted scan could still be used. - *
    -     * CREATE TABLE test(a, b);
    -     * CREATE UNIQUE INDEX idx_test ON test(a, b);
    -     * SELECT b FROM test WHERE a=22 AND b>10 order by b;
    -     * 
    - * More restrictive rule where one table query with indexed column not - * present in the ORDER BY clause is filtered with equality conditions (at - * least one) of type COLUMN = CONSTANT in a conjunctive fashion. - * - * @param sortColumn Column to be validated - * @return true if the column can be used implicitly, or false otherwise. - */ - private boolean isSortColumnImplicit(TableFilter tableFilter, - Column sortColumn) { - if (filters.size() == 1 && condition != null - && !condition.isDisjunctive()) { - ArrayList conditions = tableFilter - .getIndexConditionsForColumn(sortColumn); - if (conditions.isEmpty()) { - return false; - } - for (IndexCondition conditionExp : conditions) { - if (!conditionExp.isEquality(true)) { - return false; - } - } - return true; - } - return false; - } - - private void queryDistinct(ResultTarget result, long limitRows) { - // limitRows must be long, otherwise we get an int overflow - // if limitRows is at or near Integer.MAX_VALUE - // limitRows is never 0 here - if (limitRows > 0 && offsetExpr != null) { - int offset = offsetExpr.getValue(session).getInt(); - if (offset > 0) { - limitRows += offset; - } - } - int rowNumber = 0; - setCurrentRowNumber(0); - Index index = topTableFilter.getIndex(); - SearchRow first = null; - int columnIndex = index.getColumns()[0].getColumnId(); - int sampleSize = getSampleSizeValue(session); - while (true) { - setCurrentRowNumber(rowNumber + 1); - Cursor cursor = index.findNext(session, first, null); - if (!cursor.next()) { - break; - } - SearchRow found = cursor.getSearchRow(); - Value value = found.getValue(columnIndex); - if (first == null) { - first = topTableFilter.getTable().getTemplateSimpleRow(true); - } - first.setValue(columnIndex, value); - Value[] row = { value }; - result.addRow(row); - rowNumber++; - if ((sort == null || sortUsingIndex) && limitRows > 0 && - rowNumber >= limitRows) { - break; - } - if (sampleSize > 0 && rowNumber >= sampleSize) { - break; - } - } - } - - private void queryFlat(int columnCount, ResultTarget result, long limitRows) { - // limitRows must be long, otherwise we get an int overflow - // if limitRows is at or near Integer.MAX_VALUE - // limitRows is never 0 here - if (limitRows > 0 && offsetExpr != null) { - int offset = offsetExpr.getValue(session).getInt(); - if (offset > 0) { - limitRows += offset; - } - } - int rowNumber = 0; - setCurrentRowNumber(0); - ArrayList forUpdateRows = null; - if (isForUpdateMvcc) { - forUpdateRows = New.arrayList(); - } - int sampleSize = getSampleSizeValue(session); - while (topTableFilter.next()) { - setCurrentRowNumber(rowNumber + 1); - if (condition == null || - Boolean.TRUE.equals(condition.getBooleanValue(session))) { - Value[] row = new Value[columnCount]; - for (int i = 0; i < columnCount; i++) { - Expression expr = expressions.get(i); - row[i] = expr.getValue(session); - } - if (isForUpdateMvcc) { - topTableFilter.lockRowAdd(forUpdateRows); - } - result.addRow(row); - rowNumber++; - if ((sort == null || sortUsingIndex) && limitRows > 0 && - result.getRowCount() >= limitRows) { - break; - } - if (sampleSize > 0 && rowNumber >= sampleSize) { - break; - } - } - } - if (isForUpdateMvcc) { - topTableFilter.lockRows(forUpdateRows); - } - } - - private void queryQuick(int columnCount, ResultTarget result) { - Value[] row = new Value[columnCount]; - for (int i = 0; i < columnCount; i++) { - Expression expr = expressions.get(i); - row[i] = expr.getValue(session); - } - result.addRow(row); - } - - @Override - public ResultInterface queryMeta() { - LocalResult result = new LocalResult(session, expressionArray, - visibleColumnCount); - result.done(); - return result; - } - - @Override - protected LocalResult queryWithoutCache(int maxRows, ResultTarget target) { - int limitRows = maxRows == 0 ? -1 : maxRows; - if (limitExpr != null) { - Value v = limitExpr.getValue(session); - int l = v == ValueNull.INSTANCE ? -1 : v.getInt(); - if (limitRows < 0) { - limitRows = l; - } else if (l >= 0) { - limitRows = Math.min(l, limitRows); - } - } - int columnCount = expressions.size(); - LocalResult result = null; - if (target == null || - !session.getDatabase().getSettings().optimizeInsertFromSelect) { - result = createLocalResult(result); - } - if (sort != null && (!sortUsingIndex || distinct)) { - result = createLocalResult(result); - result.setSortOrder(sort); - } - if (distinct && !isDistinctQuery) { - result = createLocalResult(result); - result.setDistinct(); - } - if (randomAccessResult) { - result = createLocalResult(result); - } - if (isGroupQuery && !isGroupSortedQuery) { - result = createLocalResult(result); - } - if (limitRows >= 0 || offsetExpr != null) { - result = createLocalResult(result); - } - topTableFilter.startQuery(session); - topTableFilter.reset(); - boolean exclusive = isForUpdate && !isForUpdateMvcc; - if (isForUpdateMvcc) { - if (isGroupQuery) { - throw DbException.getUnsupportedException( - "MVCC=TRUE && FOR UPDATE && GROUP"); - } else if (distinct) { - throw DbException.getUnsupportedException( - "MVCC=TRUE && FOR UPDATE && DISTINCT"); - } else if (isQuickAggregateQuery) { - throw DbException.getUnsupportedException( - "MVCC=TRUE && FOR UPDATE && AGGREGATE"); - } else if (topTableFilter.getJoin() != null) { - throw DbException.getUnsupportedException( - "MVCC=TRUE && FOR UPDATE && JOIN"); - } - } - topTableFilter.lock(session, exclusive, exclusive); - ResultTarget to = result != null ? result : target; - if (limitRows != 0) { - if (isQuickAggregateQuery) { - queryQuick(columnCount, to); - } else if (isGroupQuery) { - if (isGroupSortedQuery) { - queryGroupSorted(columnCount, to); - } else { - queryGroup(columnCount, result); - } - } else if (isDistinctQuery) { - queryDistinct(to, limitRows); - } else { - queryFlat(columnCount, to, limitRows); - } - } - if (offsetExpr != null) { - result.setOffset(offsetExpr.getValue(session).getInt()); - } - if (limitRows >= 0) { - result.setLimit(limitRows); - } - if (result != null) { - result.done(); - if (target != null) { - while (result.next()) { - target.addRow(result.currentRow()); - } - result.close(); - return null; - } - return result; - } - return null; - } - - private LocalResult createLocalResult(LocalResult old) { - return old != null ? old : new LocalResult(session, expressionArray, - visibleColumnCount); - } - - private void expandColumnList() { - Database db = session.getDatabase(); - - // the expressions may change within the loop - for (int i = 0; i < expressions.size(); i++) { - Expression expr = expressions.get(i); - if (!expr.isWildcard()) { - continue; - } - String schemaName = expr.getSchemaName(); - String tableAlias = expr.getTableAlias(); - if (tableAlias == null) { - expressions.remove(i); - for (TableFilter filter : filters) { - i = expandColumnList(filter, i); - } - i--; - } else { - TableFilter filter = null; - for (TableFilter f : filters) { - if (db.equalsIdentifiers(tableAlias, f.getTableAlias())) { - if (schemaName == null || - db.equalsIdentifiers(schemaName, - f.getSchemaName())) { - filter = f; - break; - } - } - } - if (filter == null) { - throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, - tableAlias); - } - expressions.remove(i); - i = expandColumnList(filter, i); - i--; - } - } - } - - private int expandColumnList(TableFilter filter, int index) { - Table t = filter.getTable(); - String alias = filter.getTableAlias(); - Column[] columns = t.getColumns(); - for (Column c : columns) { - if (filter.isNaturalJoinColumn(c)) { - continue; - } - ExpressionColumn ec = new ExpressionColumn( - session.getDatabase(), null, alias, c.getName()); - expressions.add(index++, ec); - } - return index; - } - - @Override - public void init() { - if (SysProperties.CHECK && checkInit) { - DbException.throwInternalError(); - } - expandColumnList(); - visibleColumnCount = expressions.size(); - ArrayList expressionSQL; - if (orderList != null || group != null) { - expressionSQL = New.arrayList(); - for (int i = 0; i < visibleColumnCount; i++) { - Expression expr = expressions.get(i); - expr = expr.getNonAliasExpression(); - String sql = expr.getSQL(); - expressionSQL.add(sql); - } - } else { - expressionSQL = null; - } - if (orderList != null) { - initOrder(session, expressions, expressionSQL, orderList, - visibleColumnCount, distinct, filters); - } - distinctColumnCount = expressions.size(); - if (having != null) { - expressions.add(having); - havingIndex = expressions.size() - 1; - having = null; - } else { - havingIndex = -1; - } - - Database db = session.getDatabase(); - - // first the select list (visible columns), - // then 'ORDER BY' expressions, - // then 'HAVING' expressions, - // and 'GROUP BY' expressions at the end - if (group != null) { - int size = group.size(); - int expSize = expressionSQL.size(); - groupIndex = new int[size]; - for (int i = 0; i < size; i++) { - Expression expr = group.get(i); - String sql = expr.getSQL(); - int found = -1; - for (int j = 0; j < expSize; j++) { - String s2 = expressionSQL.get(j); - if (db.equalsIdentifiers(s2, sql)) { - found = j; - break; - } - } - if (found < 0) { - // special case: GROUP BY a column alias - for (int j = 0; j < expSize; j++) { - Expression e = expressions.get(j); - if (db.equalsIdentifiers(sql, e.getAlias())) { - found = j; - break; - } - sql = expr.getAlias(); - if (db.equalsIdentifiers(sql, e.getAlias())) { - found = j; - break; - } - } - } - if (found < 0) { - int index = expressions.size(); - groupIndex[i] = index; - expressions.add(expr); - } else { - groupIndex[i] = found; - } - } - groupByExpression = new boolean[expressions.size()]; - for (int gi : groupIndex) { - groupByExpression[gi] = true; - } - group = null; - } - // map columns in select list and condition - for (TableFilter f : filters) { - mapColumns(f, 0); - } - if (havingIndex >= 0) { - Expression expr = expressions.get(havingIndex); - SelectListColumnResolver res = new SelectListColumnResolver(this); - expr.mapColumns(res, 0); - } - checkInit = true; - } - - @Override - public void prepare() { - if (isPrepared) { - // sometimes a subquery is prepared twice (CREATE TABLE AS SELECT) - return; - } - if (SysProperties.CHECK && !checkInit) { - DbException.throwInternalError("not initialized"); - } - if (orderList != null) { - sort = prepareOrder(orderList, expressions.size()); - orderList = null; - } - for (int i = 0; i < expressions.size(); i++) { - Expression e = expressions.get(i); - expressions.set(i, e.optimize(session)); - } - if (condition != null) { - condition = condition.optimize(session); - for (TableFilter f : filters) { - // outer joins: must not add index conditions such as - // "c is null" - example: - // create table parent(p int primary key) as select 1; - // create table child(c int primary key, pc int); - // insert into child values(2, 1); - // select p, c from parent - // left outer join child on p = pc where c is null; - if (!f.isJoinOuter() && !f.isJoinOuterIndirect()) { - condition.createIndexConditions(session, f); - } - } - } - if (isGroupQuery && groupIndex == null && - havingIndex < 0 && filters.size() == 1) { - if (condition == null) { - Table t = filters.get(0).getTable(); - ExpressionVisitor optimizable = ExpressionVisitor. - getOptimizableVisitor(t); - isQuickAggregateQuery = isEverything(optimizable); - } - } - cost = preparePlan(); - if (distinct && session.getDatabase().getSettings().optimizeDistinct && - !isGroupQuery && filters.size() == 1 && - expressions.size() == 1 && condition == null) { - Expression expr = expressions.get(0); - expr = expr.getNonAliasExpression(); - if (expr instanceof ExpressionColumn) { - Column column = ((ExpressionColumn) expr).getColumn(); - int selectivity = column.getSelectivity(); - Index columnIndex = topTableFilter.getTable(). - getIndexForColumn(column); - if (columnIndex != null && - selectivity != Constants.SELECTIVITY_DEFAULT && - selectivity < 20) { - // the first column must be ascending - boolean ascending = columnIndex. - getIndexColumns()[0].sortType == SortOrder.ASCENDING; - Index current = topTableFilter.getIndex(); - // if another index is faster - if (columnIndex.canFindNext() && ascending && - (current == null || - current.getIndexType().isScan() || - columnIndex == current)) { - IndexType type = columnIndex.getIndexType(); - // hash indexes don't work, and unique single column - // indexes don't work - if (!type.isHash() && (!type.isUnique() || - columnIndex.getColumns().length > 1)) { - topTableFilter.setIndex(columnIndex); - isDistinctQuery = true; - } - } - } - } - } - if (sort != null && !isQuickAggregateQuery && !isGroupQuery) { - Index index = getSortIndex(); - if (index != null) { - Index current = topTableFilter.getIndex(); - if (current.getIndexType().isScan() || current == index) { - topTableFilter.setIndex(index); - if (!topTableFilter.hasInComparisons()) { - // in(select ...) and in(1,2,3) may return the key in - // another order - sortUsingIndex = true; - } - } else if (index.getIndexColumns().length >= - current.getIndexColumns().length) { - IndexColumn[] sortColumns = index.getIndexColumns(); - IndexColumn[] currentColumns = current.getIndexColumns(); - boolean swapIndex = false; - for (int i = 0; i < currentColumns.length; i++) { - if (sortColumns[i].column != currentColumns[i].column) { - swapIndex = false; - break; - } - if (sortColumns[i].sortType != currentColumns[i].sortType) { - swapIndex = true; - } - } - if (swapIndex) { - topTableFilter.setIndex(index); - sortUsingIndex = true; - } - } - } - } - if (!isQuickAggregateQuery && isGroupQuery && - getGroupByExpressionCount() > 0) { - Index index = getGroupSortedIndex(); - Index current = topTableFilter.getIndex(); - if (index != null && (current.getIndexType().isScan() || - current == index)) { - topTableFilter.setIndex(index); - isGroupSortedQuery = true; - } - } - expressionArray = new Expression[expressions.size()]; - expressions.toArray(expressionArray); - isPrepared = true; - } - - @Override - public double getCost() { - return cost; - } - - @Override - public HashSet
    getTables() { - HashSet
    set = New.hashSet(); - for (TableFilter filter : filters) { - set.add(filter.getTable()); - } - return set; - } - - @Override - public void fireBeforeSelectTriggers() { - for (int i = 0, size = filters.size(); i < size; i++) { - TableFilter filter = filters.get(i); - filter.getTable().fire(session, Trigger.SELECT, true); - } - } - - private double preparePlan() { - TableFilter[] topArray = topFilters.toArray( - new TableFilter[topFilters.size()]); - for (TableFilter t : topArray) { - t.setFullCondition(condition); - } - - Optimizer optimizer = new Optimizer(topArray, condition, session); - optimizer.optimize(); - topTableFilter = optimizer.getTopFilter(); - double planCost = optimizer.getCost(); - - setEvaluatableRecursive(topTableFilter); - - topTableFilter.prepare(); - return planCost; - } - - private void setEvaluatableRecursive(TableFilter f) { - for (; f != null; f = f.getJoin()) { - f.setEvaluatable(f, true); - if (condition != null) { - condition.setEvaluatable(f, true); - } - TableFilter n = f.getNestedJoin(); - if (n != null) { - setEvaluatableRecursive(n); - } - Expression on = f.getJoinCondition(); - if (on != null) { - if (!on.isEverything(ExpressionVisitor.EVALUATABLE_VISITOR)) { - if (session.getDatabase().getSettings().nestedJoins) { - // need to check that all added are bound to a table - on = on.optimize(session); - if (!f.isJoinOuter() && !f.isJoinOuterIndirect()) { - f.removeJoinCondition(); - addCondition(on); - } - } else { - if (f.isJoinOuter()) { - // this will check if all columns exist - it may or - // may not throw an exception - on = on.optimize(session); - // it is not supported even if the columns exist - throw DbException.get( - ErrorCode.UNSUPPORTED_OUTER_JOIN_CONDITION_1, - on.getSQL()); - } - f.removeJoinCondition(); - // need to check that all added are bound to a table - on = on.optimize(session); - addCondition(on); - } - } - } - on = f.getFilterCondition(); - if (on != null) { - if (!on.isEverything(ExpressionVisitor.EVALUATABLE_VISITOR)) { - f.removeFilterCondition(); - addCondition(on); - } - } - // this is only important for subqueries, so they know - // the result columns are evaluatable - for (Expression e : expressions) { - e.setEvaluatable(f, true); - } - } - } - - @Override - public String getPlanSQL() { - // can not use the field sqlStatement because the parameter - // indexes may be incorrect: ? may be in fact ?2 for a subquery - // but indexes may be set manually as well - Expression[] exprList = expressions.toArray( - new Expression[expressions.size()]); - StatementBuilder buff = new StatementBuilder("SELECT"); - if (distinct) { - buff.append(" DISTINCT"); - } - for (int i = 0; i < visibleColumnCount; i++) { - buff.appendExceptFirst(","); - buff.append('\n'); - buff.append(StringUtils.indent(exprList[i].getSQL(), 4, false)); - } - buff.append("\nFROM "); - TableFilter filter = topTableFilter; - if (filter != null) { - buff.resetCount(); - int i = 0; - do { - buff.appendExceptFirst("\n"); - buff.append(filter.getPlanSQL(i++ > 0)); - filter = filter.getJoin(); - } while (filter != null); - } else { - buff.resetCount(); - int i = 0; - for (TableFilter f : topFilters) { - do { - buff.appendExceptFirst("\n"); - buff.append(f.getPlanSQL(i++ > 0)); - f = f.getJoin(); - } while (f != null); - } - } - if (condition != null) { - buff.append("\nWHERE ").append( - StringUtils.unEnclose(condition.getSQL())); - } - if (groupIndex != null) { - buff.append("\nGROUP BY "); - buff.resetCount(); - for (int gi : groupIndex) { - Expression g = exprList[gi]; - g = g.getNonAliasExpression(); - buff.appendExceptFirst(", "); - buff.append(StringUtils.unEnclose(g.getSQL())); - } - } - if (group != null) { - buff.append("\nGROUP BY "); - buff.resetCount(); - for (Expression g : group) { - buff.appendExceptFirst(", "); - buff.append(StringUtils.unEnclose(g.getSQL())); - } - } - if (having != null) { - // could be set in addGlobalCondition - // in this case the query is not run directly, just getPlanSQL is - // called - Expression h = having; - buff.append("\nHAVING ").append( - StringUtils.unEnclose(h.getSQL())); - } else if (havingIndex >= 0) { - Expression h = exprList[havingIndex]; - buff.append("\nHAVING ").append( - StringUtils.unEnclose(h.getSQL())); - } - if (sort != null) { - buff.append("\nORDER BY ").append( - sort.getSQL(exprList, visibleColumnCount)); - } - if (orderList != null) { - buff.append("\nORDER BY "); - buff.resetCount(); - for (SelectOrderBy o : orderList) { - buff.appendExceptFirst(", "); - buff.append(StringUtils.unEnclose(o.getSQL())); - } - } - if (limitExpr != null) { - buff.append("\nLIMIT ").append( - StringUtils.unEnclose(limitExpr.getSQL())); - if (offsetExpr != null) { - buff.append(" OFFSET ").append( - StringUtils.unEnclose(offsetExpr.getSQL())); - } - } - if (sampleSizeExpr != null) { - buff.append("\nSAMPLE_SIZE ").append( - StringUtils.unEnclose(sampleSizeExpr.getSQL())); - } - if (isForUpdate) { - buff.append("\nFOR UPDATE"); - } - if (isQuickAggregateQuery) { - buff.append("\n/* direct lookup */"); - } - if (isDistinctQuery) { - buff.append("\n/* distinct */"); - } - if (sortUsingIndex) { - buff.append("\n/* index sorted */"); - } - if (isGroupQuery) { - if (isGroupSortedQuery) { - buff.append("\n/* group sorted */"); - } - } - // buff.append("\n/* cost: " + cost + " */"); - return buff.toString(); - } - - public void setHaving(Expression having) { - this.having = having; - } - - public Expression getHaving() { - return having; - } - - @Override - public int getColumnCount() { - return visibleColumnCount; - } - - public TableFilter getTopTableFilter() { - return topTableFilter; - } - - @Override - public ArrayList getExpressions() { - return expressions; - } - - @Override - public void setForUpdate(boolean b) { - this.isForUpdate = b; - if (session.getDatabase().getSettings().selectForUpdateMvcc && - session.getDatabase().isMultiVersion()) { - isForUpdateMvcc = b; - } - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - for (Expression e : expressions) { - e.mapColumns(resolver, level); - } - if (condition != null) { - condition.mapColumns(resolver, level); - } - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - for (Expression e : expressions) { - e.setEvaluatable(tableFilter, b); - } - if (condition != null) { - condition.setEvaluatable(tableFilter, b); - } - } - - /** - * Check if this is an aggregate query with direct lookup, for example a - * query of the type SELECT COUNT(*) FROM TEST or - * SELECT MAX(ID) FROM TEST. - * - * @return true if a direct lookup is possible - */ - public boolean isQuickAggregateQuery() { - return isQuickAggregateQuery; - } - - @Override - public void addGlobalCondition(Parameter param, int columnId, - int comparisonType) { - addParameter(param); - Expression comp; - Expression col = expressions.get(columnId); - col = col.getNonAliasExpression(); - if (col.isEverything(ExpressionVisitor.QUERY_COMPARABLE_VISITOR)) { - comp = new Comparison(session, comparisonType, col, param); - } else { - // this condition will always evaluate to true, but need to - // add the parameter, so it can be set later - comp = new Comparison(session, Comparison.EQUAL_NULL_SAFE, param, param); - } - comp = comp.optimize(session); - boolean addToCondition = true; - if (isGroupQuery) { - addToCondition = false; - for (int i = 0; groupIndex != null && i < groupIndex.length; i++) { - if (groupIndex[i] == columnId) { - addToCondition = true; - break; - } - } - if (!addToCondition) { - if (havingIndex >= 0) { - having = expressions.get(havingIndex); - } - if (having == null) { - having = comp; - } else { - having = new ConditionAndOr(ConditionAndOr.AND, having, comp); - } - } - } - if (addToCondition) { - if (condition == null) { - condition = comp; - } else { - condition = new ConditionAndOr(ConditionAndOr.AND, condition, comp); - } - } - } - - @Override - public void updateAggregate(Session s) { - for (Expression e : expressions) { - e.updateAggregate(s); - } - if (condition != null) { - condition.updateAggregate(s); - } - if (having != null) { - having.updateAggregate(s); - } - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - switch(visitor.getType()) { - case ExpressionVisitor.DETERMINISTIC: { - if (isForUpdate) { - return false; - } - for (int i = 0, size = filters.size(); i < size; i++) { - TableFilter f = filters.get(i); - if (!f.getTable().isDeterministic()) { - return false; - } - } - break; - } - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: { - for (int i = 0, size = filters.size(); i < size; i++) { - TableFilter f = filters.get(i); - long m = f.getTable().getMaxDataModificationId(); - visitor.addDataModificationId(m); - } - break; - } - case ExpressionVisitor.EVALUATABLE: { - if (!session.getDatabase().getSettings().optimizeEvaluatableSubqueries) { - return false; - } - break; - } - case ExpressionVisitor.GET_DEPENDENCIES: { - for (int i = 0, size = filters.size(); i < size; i++) { - TableFilter f = filters.get(i); - Table table = f.getTable(); - visitor.addDependency(table); - table.addDependencies(visitor.getDependencies()); - } - break; - } - default: - } - ExpressionVisitor v2 = visitor.incrementQueryLevel(1); - boolean result = true; - for (int i = 0, size = expressions.size(); i < size; i++) { - Expression e = expressions.get(i); - if (!e.isEverything(v2)) { - result = false; - break; - } - } - if (result && condition != null && !condition.isEverything(v2)) { - result = false; - } - if (result && having != null && !having.isEverything(v2)) { - result = false; - } - return result; - } - - @Override - public boolean isReadOnly() { - return isEverything(ExpressionVisitor.READONLY_VISITOR); - } - - - @Override - public boolean isCacheable() { - return !isForUpdate; - } - - @Override - public int getType() { - return CommandInterface.SELECT; - } - - @Override - public boolean allowGlobalConditions() { - if (offsetExpr == null && (limitExpr == null || sort == null)) { - return true; - } - return false; - } - - public SortOrder getSortOrder() { - return sort; - } - -} diff --git a/h2/src/main/org/h2/command/dml/SelectListColumnResolver.java b/h2/src/main/org/h2/command/dml/SelectListColumnResolver.java deleted file mode 100644 index 82b647e8ba..0000000000 --- a/h2/src/main/org/h2/command/dml/SelectListColumnResolver.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.command.dml; - -import java.util.ArrayList; -import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; -import org.h2.table.Column; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.value.Value; - -/** - * This class represents a column resolver for the column list of a SELECT - * statement. It is used to resolve select column aliases in the HAVING clause. - * Example: - *

    - * SELECT X/3 AS A, COUNT(*) FROM SYSTEM_RANGE(1, 10) GROUP BY A HAVING A>2; - *

    - * - * @author Thomas Mueller - */ -public class SelectListColumnResolver implements ColumnResolver { - - private final Select select; - private final Expression[] expressions; - private final Column[] columns; - - SelectListColumnResolver(Select select) { - this.select = select; - int columnCount = select.getColumnCount(); - columns = new Column[columnCount]; - expressions = new Expression[columnCount]; - ArrayList columnList = select.getExpressions(); - for (int i = 0; i < columnCount; i++) { - Expression expr = columnList.get(i); - Column column = new Column(expr.getAlias(), Value.NULL); - column.setTable(null, i); - columns[i] = column; - expressions[i] = expr.getNonAliasExpression(); - } - } - - @Override - public Column[] getColumns() { - return columns; - } - - @Override - public String getSchemaName() { - return null; - } - - @Override - public Select getSelect() { - return select; - } - - @Override - public Column[] getSystemColumns() { - return null; - } - - @Override - public Column getRowIdColumn() { - return null; - } - - @Override - public String getTableAlias() { - return null; - } - - @Override - public TableFilter getTableFilter() { - return null; - } - - @Override - public Value getValue(Column column) { - return null; - } - - @Override - public Expression optimize(ExpressionColumn expressionColumn, Column column) { - return expressions[column.getColumnId()]; - } - -} diff --git a/h2/src/main/org/h2/command/dml/SelectOrderBy.java b/h2/src/main/org/h2/command/dml/SelectOrderBy.java deleted file mode 100644 index 87edfb7a85..0000000000 --- a/h2/src/main/org/h2/command/dml/SelectOrderBy.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.command.dml; - -import org.h2.expression.Expression; - -/** - * Describes one element of the ORDER BY clause of a query. - */ -public class SelectOrderBy { - - /** - * The order by expression. - */ - public Expression expression; - - /** - * The column index expression. This can be a column index number (1 meaning - * the first column of the select list) or a parameter (the parameter is a - * number representing the column index number). - */ - public Expression columnIndexExpr; - - /** - * If the column should be sorted descending. - */ - public boolean descending; - - /** - * If NULL should be appear first. - */ - public boolean nullsFirst; - - /** - * If NULL should be appear at the end. - */ - public boolean nullsLast; - - public String getSQL() { - StringBuilder buff = new StringBuilder(); - if (expression != null) { - buff.append('=').append(expression.getSQL()); - } else { - buff.append(columnIndexExpr.getSQL()); - } - if (descending) { - buff.append(" DESC"); - } - if (nullsFirst) { - buff.append(" NULLS FIRST"); - } else if (nullsLast) { - buff.append(" NULLS LAST"); - } - return buff.toString(); - } - -} diff --git a/h2/src/main/org/h2/command/dml/SelectUnion.java b/h2/src/main/org/h2/command/dml/SelectUnion.java deleted file mode 100644 index 00af273294..0000000000 --- a/h2/src/main/org/h2/command/dml/SelectUnion.java +++ /dev/null @@ -1,460 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.command.dml; - -import java.util.ArrayList; -import java.util.HashSet; - -import org.h2.api.ErrorCode; -import org.h2.command.CommandInterface; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.expression.Expression; -import org.h2.expression.ExpressionColumn; -import org.h2.expression.ExpressionVisitor; -import org.h2.expression.Parameter; -import org.h2.expression.ValueExpression; -import org.h2.message.DbException; -import org.h2.result.LocalResult; -import org.h2.result.ResultInterface; -import org.h2.result.ResultTarget; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.ColumnResolver; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.util.New; -import org.h2.util.StringUtils; -import org.h2.value.Value; -import org.h2.value.ValueInt; -import org.h2.value.ValueNull; - -/** - * Represents a union SELECT statement. - */ -public class SelectUnion extends Query { - - /** - * The type of a UNION statement. - */ - public static final int UNION = 0; - - /** - * The type of a UNION ALL statement. - */ - public static final int UNION_ALL = 1; - - /** - * The type of an EXCEPT statement. - */ - public static final int EXCEPT = 2; - - /** - * The type of an INTERSECT statement. - */ - public static final int INTERSECT = 3; - - private int unionType; - private final Query left; - private Query right; - private ArrayList expressions; - private Expression[] expressionArray; - private ArrayList orderList; - private SortOrder sort; - private boolean isPrepared, checkInit; - private boolean isForUpdate; - - public SelectUnion(Session session, Query query) { - super(session); - this.left = query; - } - - public void setUnionType(int type) { - this.unionType = type; - } - - public int getUnionType() { - return unionType; - } - - public void setRight(Query select) { - right = select; - } - - public Query getLeft() { - return left; - } - - public Query getRight() { - return right; - } - - @Override - public void setSQL(String sql) { - this.sqlStatement = sql; - } - - @Override - public void setOrder(ArrayList order) { - orderList = order; - } - - private Value[] convert(Value[] values, int columnCount) { - Value[] newValues; - if (columnCount == values.length) { - // re-use the array if possible - newValues = values; - } else { - // create a new array if needed, - // for the value hash set - newValues = new Value[columnCount]; - } - for (int i = 0; i < columnCount; i++) { - Expression e = expressions.get(i); - newValues[i] = values[i].convertTo(e.getType()); - } - return newValues; - } - - @Override - public ResultInterface queryMeta() { - int columnCount = left.getColumnCount(); - LocalResult result = new LocalResult(session, expressionArray, columnCount); - result.done(); - return result; - } - - public LocalResult getEmptyResult() { - int columnCount = left.getColumnCount(); - return new LocalResult(session, expressionArray, columnCount); - } - - @Override - protected LocalResult queryWithoutCache(int maxRows, ResultTarget target) { - if (maxRows != 0) { - // maxRows is set (maxRows 0 means no limit) - int l; - if (limitExpr == null) { - l = -1; - } else { - Value v = limitExpr.getValue(session); - l = v == ValueNull.INSTANCE ? -1 : v.getInt(); - } - if (l < 0) { - // for limitExpr, 0 means no rows, and -1 means no limit - l = maxRows; - } else { - l = Math.min(l, maxRows); - } - limitExpr = ValueExpression.get(ValueInt.get(l)); - } - if (session.getDatabase().getSettings().optimizeInsertFromSelect) { - if (unionType == UNION_ALL && target != null) { - if (sort == null && !distinct && maxRows == 0 && - offsetExpr == null && limitExpr == null) { - left.query(0, target); - right.query(0, target); - return null; - } - } - } - int columnCount = left.getColumnCount(); - LocalResult result = new LocalResult(session, expressionArray, columnCount); - if (sort != null) { - result.setSortOrder(sort); - } - if (distinct) { - left.setDistinct(true); - right.setDistinct(true); - result.setDistinct(); - } - if (randomAccessResult) { - result.setRandomAccess(); - } - switch (unionType) { - case UNION: - case EXCEPT: - left.setDistinct(true); - right.setDistinct(true); - result.setDistinct(); - break; - case UNION_ALL: - break; - case INTERSECT: - left.setDistinct(true); - right.setDistinct(true); - break; - default: - DbException.throwInternalError("type=" + unionType); - } - LocalResult l = left.query(0); - LocalResult r = right.query(0); - l.reset(); - r.reset(); - switch (unionType) { - case UNION_ALL: - case UNION: { - while (l.next()) { - result.addRow(convert(l.currentRow(), columnCount)); - } - while (r.next()) { - result.addRow(convert(r.currentRow(), columnCount)); - } - break; - } - case EXCEPT: { - while (l.next()) { - result.addRow(convert(l.currentRow(), columnCount)); - } - while (r.next()) { - result.removeDistinct(convert(r.currentRow(), columnCount)); - } - break; - } - case INTERSECT: { - LocalResult temp = new LocalResult(session, expressionArray, columnCount); - temp.setDistinct(); - temp.setRandomAccess(); - while (l.next()) { - temp.addRow(convert(l.currentRow(), columnCount)); - } - while (r.next()) { - Value[] values = convert(r.currentRow(), columnCount); - if (temp.containsDistinct(values)) { - result.addRow(values); - } - } - break; - } - default: - DbException.throwInternalError("type=" + unionType); - } - if (offsetExpr != null) { - result.setOffset(offsetExpr.getValue(session).getInt()); - } - if (limitExpr != null) { - Value v = limitExpr.getValue(session); - if (v != ValueNull.INSTANCE) { - result.setLimit(v.getInt()); - } - } - l.close(); - r.close(); - result.done(); - if (target != null) { - while (result.next()) { - target.addRow(result.currentRow()); - } - result.close(); - return null; - } - return result; - } - - @Override - public void init() { - if (SysProperties.CHECK && checkInit) { - DbException.throwInternalError(); - } - checkInit = true; - left.init(); - right.init(); - int len = left.getColumnCount(); - if (len != right.getColumnCount()) { - throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); - } - ArrayList le = left.getExpressions(); - // set the expressions to get the right column count and names, - // but can't validate at this time - expressions = New.arrayList(); - for (int i = 0; i < len; i++) { - Expression l = le.get(i); - expressions.add(l); - } - } - - @Override - public void prepare() { - if (isPrepared) { - // sometimes a subquery is prepared twice (CREATE TABLE AS SELECT) - return; - } - if (SysProperties.CHECK && !checkInit) { - DbException.throwInternalError("not initialized"); - } - isPrepared = true; - left.prepare(); - right.prepare(); - int len = left.getColumnCount(); - // set the correct expressions now - expressions = New.arrayList(); - ArrayList le = left.getExpressions(); - ArrayList re = right.getExpressions(); - for (int i = 0; i < len; i++) { - Expression l = le.get(i); - Expression r = re.get(i); - int type = Value.getHigherOrder(l.getType(), r.getType()); - long prec = Math.max(l.getPrecision(), r.getPrecision()); - int scale = Math.max(l.getScale(), r.getScale()); - int displaySize = Math.max(l.getDisplaySize(), r.getDisplaySize()); - Column col = new Column(l.getAlias(), type, prec, scale, displaySize); - Expression e = new ExpressionColumn(session.getDatabase(), col); - expressions.add(e); - } - if (orderList != null) { - initOrder(session, expressions, null, orderList, getColumnCount(), true, null); - sort = prepareOrder(orderList, expressions.size()); - orderList = null; - } - expressionArray = new Expression[expressions.size()]; - expressions.toArray(expressionArray); - } - - @Override - public double getCost() { - return left.getCost() + right.getCost(); - } - - @Override - public HashSet
    getTables() { - HashSet
    set = left.getTables(); - set.addAll(right.getTables()); - return set; - } - - @Override - public ArrayList getExpressions() { - return expressions; - } - - @Override - public void setForUpdate(boolean forUpdate) { - left.setForUpdate(forUpdate); - right.setForUpdate(forUpdate); - isForUpdate = forUpdate; - } - - @Override - public int getColumnCount() { - return left.getColumnCount(); - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - left.mapColumns(resolver, level); - right.mapColumns(resolver, level); - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - right.setEvaluatable(tableFilter, b); - } - - @Override - public void addGlobalCondition(Parameter param, int columnId, - int comparisonType) { - addParameter(param); - switch (unionType) { - case UNION_ALL: - case UNION: - case INTERSECT: { - left.addGlobalCondition(param, columnId, comparisonType); - right.addGlobalCondition(param, columnId, comparisonType); - break; - } - case EXCEPT: { - left.addGlobalCondition(param, columnId, comparisonType); - break; - } - default: - DbException.throwInternalError("type=" + unionType); - } - } - - @Override - public String getPlanSQL() { - StringBuilder buff = new StringBuilder(); - buff.append('(').append(left.getPlanSQL()).append(')'); - switch (unionType) { - case UNION_ALL: - buff.append("\nUNION ALL\n"); - break; - case UNION: - buff.append("\nUNION\n"); - break; - case INTERSECT: - buff.append("\nINTERSECT\n"); - break; - case EXCEPT: - buff.append("\nEXCEPT\n"); - break; - default: - DbException.throwInternalError("type=" + unionType); - } - buff.append('(').append(right.getPlanSQL()).append(')'); - Expression[] exprList = expressions.toArray(new Expression[expressions.size()]); - if (sort != null) { - buff.append("\nORDER BY ").append(sort.getSQL(exprList, exprList.length)); - } - if (limitExpr != null) { - buff.append("\nLIMIT ").append( - StringUtils.unEnclose(limitExpr.getSQL())); - if (offsetExpr != null) { - buff.append("\nOFFSET ").append( - StringUtils.unEnclose(offsetExpr.getSQL())); - } - } - if (sampleSizeExpr != null) { - buff.append("\nSAMPLE_SIZE ").append( - StringUtils.unEnclose(sampleSizeExpr.getSQL())); - } - if (isForUpdate) { - buff.append("\nFOR UPDATE"); - } - return buff.toString(); - } - - @Override - public LocalResult query(int limit, ResultTarget target) { - // union doesn't always know the parameter list of the left and right - // queries - return queryWithoutCache(limit, target); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return left.isEverything(visitor) && right.isEverything(visitor); - } - - @Override - public boolean isReadOnly() { - return left.isReadOnly() && right.isReadOnly(); - } - - @Override - public void updateAggregate(Session s) { - left.updateAggregate(s); - right.updateAggregate(s); - } - - @Override - public void fireBeforeSelectTriggers() { - left.fireBeforeSelectTriggers(); - right.fireBeforeSelectTriggers(); - } - - @Override - public int getType() { - return CommandInterface.SELECT; - } - - @Override - public boolean allowGlobalConditions() { - return left.allowGlobalConditions() && right.allowGlobalConditions(); - } - -} diff --git a/h2/src/main/org/h2/command/dml/Set.java b/h2/src/main/org/h2/command/dml/Set.java index b1d08c42b8..d0020a7307 100644 --- a/h2/src/main/org/h2/command/dml/Set.java +++ b/h2/src/main/org/h2/command/dml/Set.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -9,23 +9,31 @@ import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; +import org.h2.command.Parser; import org.h2.command.Prepared; -import org.h2.compress.Compressor; import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.engine.Mode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.Setting; import org.h2.expression.Expression; +import org.h2.expression.TimeZoneOperation; import org.h2.expression.ValueExpression; import org.h2.message.DbException; +import org.h2.message.Trace; +import org.h2.mode.DefaultNullOrdering; import org.h2.result.ResultInterface; import org.h2.schema.Schema; +import org.h2.security.auth.AuthenticatorFactory; import org.h2.table.Table; -import org.h2.tools.CompressTool; +import org.h2.util.DateTimeUtils; import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; import org.h2.value.CompareMode; -import org.h2.value.ValueInt; +import org.h2.value.DataType; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; /** * This class represents the statement @@ -38,7 +46,7 @@ public class Set extends Prepared { private String stringValue; private String[] stringValueList; - public Set(Session session, int type) { + public Set(SessionLocal session, int type) { super(session); this.type = type; } @@ -59,7 +67,14 @@ public boolean isTransactional() { case SetTypes.THROTTLE: case SetTypes.SCHEMA: case SetTypes.SCHEMA_SEARCH_PATH: + case SetTypes.CATALOG: case SetTypes.RETENTION_TIME: + case SetTypes.LAZY_QUERY_EXECUTION: + case SetTypes.NON_KEYWORDS: + case SetTypes.TIME_ZONE: + case SetTypes.VARIABLE_BINARY: + case SetTypes.TRUNCATE_LARGE_LENGTH: + case SetTypes.WRITE_DELAY: return true; default: } @@ -67,7 +82,7 @@ public boolean isTransactional() { } @Override - public int update() { + public long update() { Database database = session.getDatabase(); String name = SetTypes.getTypeName(type); switch (type) { @@ -75,22 +90,26 @@ public int update() { session.getUser().checkAdmin(); int value = getIntValue(); if (value < 0 || value > 2) { - throw DbException.getInvalidValueException("ALLOW_LITERALS", - getIntValue()); + throw DbException.getInvalidValueException("ALLOW_LITERALS", value); + } + synchronized (database) { + database.setAllowLiterals(value); + addOrUpdateSetting(name, null, value); } - database.setAllowLiterals(value); - addOrUpdateSetting(name, null, value); break; } - case SetTypes.CACHE_SIZE: - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("CACHE_SIZE", - getIntValue()); - } + case SetTypes.CACHE_SIZE: { session.getUser().checkAdmin(); - database.setCacheSize(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("CACHE_SIZE", value); + } + synchronized (database) { + database.setCacheSize(value); + addOrUpdateSetting(name, null, value); + } break; + } case SetTypes.CLUSTER: { if (Constants.CLUSTERING_ENABLED.equals(stringValue)) { // this value is used when connecting @@ -107,7 +126,7 @@ public int update() { database.setCluster(value); // use the system session so that the current transaction // (if any) is not committed - Session sysSession = database.getSystemSession(); + SessionLocal sysSession = database.getSystemSession(); synchronized (sysSession) { synchronized (database) { addOrUpdateSetting(sysSession, name, value, 0); @@ -119,12 +138,10 @@ public int update() { } case SetTypes.COLLATION: { session.getUser().checkAdmin(); - final boolean binaryUnsigned = database. - getCompareMode().isBinaryUnsigned(); CompareMode compareMode; StringBuilder buff = new StringBuilder(stringValue); if (stringValue.equals(CompareMode.OFF)) { - compareMode = CompareMode.getInstance(null, 0, binaryUnsigned); + compareMode = CompareMode.getInstance(null, 0); } else { int strength = getIntValue(); buff.append(" STRENGTH "); @@ -137,53 +154,20 @@ public int update() { } else if (strength == Collator.TERTIARY) { buff.append("TERTIARY"); } - compareMode = CompareMode.getInstance(stringValue, strength, - binaryUnsigned); - } - CompareMode old = database.getCompareMode(); - if (old.equals(compareMode)) { - break; - } - Table table = database.getFirstUserTable(); - if (table != null) { - throw DbException.get( - ErrorCode.COLLATION_CHANGE_WITH_DATA_TABLE_1, - table.getSQL()); + compareMode = CompareMode.getInstance(stringValue, strength); } - addOrUpdateSetting(name, buff.toString(), 0); - database.setCompareMode(compareMode); - break; - } - case SetTypes.BINARY_COLLATION: { - session.getUser().checkAdmin(); - Table table = database.getFirstUserTable(); - if (table != null) { - throw DbException.get( - ErrorCode.COLLATION_CHANGE_WITH_DATA_TABLE_1, - table.getSQL()); - } - CompareMode currentMode = database.getCompareMode(); - CompareMode newMode; - if (stringValue.equals(CompareMode.SIGNED)) { - newMode = CompareMode.getInstance(currentMode.getName(), - currentMode.getStrength(), false); - } else if (stringValue.equals(CompareMode.UNSIGNED)) { - newMode = CompareMode.getInstance(currentMode.getName(), - currentMode.getStrength(), true); - } else { - throw DbException.getInvalidValueException("BINARY_COLLATION", - stringValue); + synchronized (database) { + CompareMode old = database.getCompareMode(); + if (old.equals(compareMode)) { + break; + } + Table table = database.getFirstUserTable(); + if (table != null) { + throw DbException.get(ErrorCode.COLLATION_CHANGE_WITH_DATA_TABLE_1, table.getTraceSQL()); + } + addOrUpdateSetting(name, buff.toString(), 0); + database.setCompareMode(compareMode); } - addOrUpdateSetting(name, stringValue, 0); - database.setCompareMode(newMode); - break; - } - case SetTypes.COMPRESS_LOB: { - session.getUser().checkAdmin(); - int algo = CompressTool.getCompressAlgorithm(stringValue); - database.setLobCompressionAlgorithm(algo == Compressor.NO ? - null : stringValue); - addOrUpdateSetting(name, stringValue, 0); break; } case SetTypes.CREATE_BUILD: { @@ -192,7 +176,9 @@ public int update() { // just ignore the command if not starting // this avoids problems when running recovery scripts int value = getIntValue(); - addOrUpdateSetting(name, null, value); + synchronized (database) { + addOrUpdateSetting(name, null, value); + } } break; } @@ -202,44 +188,59 @@ public int update() { break; } case SetTypes.DB_CLOSE_DELAY: { - int x = getIntValue(); - if (x == -1) { + session.getUser().checkAdmin(); + int value = getIntValue(); + if (value == -1) { // -1 is a special value for in-memory databases, // which means "keep the DB alive and use the same // DB for all connections" - } else if (x < 0) { - throw DbException.getInvalidValueException("DB_CLOSE_DELAY", x); + } else if (value < 0) { + throw DbException.getInvalidValueException("DB_CLOSE_DELAY", value); + } + synchronized (database) { + database.setCloseDelay(value); + addOrUpdateSetting(name, null, value); } - session.getUser().checkAdmin(); - database.setCloseDelay(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); break; } - case SetTypes.DEFAULT_LOCK_TIMEOUT: - if (getIntValue() < 0) { - throw DbException.getInvalidValueException( - "DEFAULT_LOCK_TIMEOUT", getIntValue()); - } + case SetTypes.DEFAULT_LOCK_TIMEOUT: { session.getUser().checkAdmin(); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("DEFAULT_LOCK_TIMEOUT", value); + } + synchronized (database) { + addOrUpdateSetting(name, null, value); + } break; - case SetTypes.DEFAULT_TABLE_TYPE: + } + case SetTypes.DEFAULT_TABLE_TYPE: { session.getUser().checkAdmin(); - database.setDefaultTableType(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + synchronized (database) { + database.setDefaultTableType(value); + addOrUpdateSetting(name, null, value); + } break; + } case SetTypes.EXCLUSIVE: { session.getUser().checkAdmin(); int value = getIntValue(); switch (value) { case 0: - database.setExclusiveSession(null, false); + if (!database.unsetExclusiveSession(session)) { + throw DbException.get(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE); + } break; case 1: - database.setExclusiveSession(session, false); + if (!database.setExclusiveSession(session, false)) { + throw DbException.get(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE); + } break; case 2: - database.setExclusiveSession(session, true); + if (!database.setExclusiveSession(session, true)) { + throw DbException.get(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE); + } break; default: throw DbException.getInvalidValueException("EXCLUSIVE", value); @@ -248,91 +249,96 @@ public int update() { } case SetTypes.JAVA_OBJECT_SERIALIZER: { session.getUser().checkAdmin(); - Table table = database.getFirstUserTable(); - if (table != null) { - throw DbException.get(ErrorCode. - JAVA_OBJECT_SERIALIZER_CHANGE_WITH_DATA_TABLE, - table.getSQL()); + synchronized (database) { + Table table = database.getFirstUserTable(); + if (table != null) { + throw DbException.get(ErrorCode.JAVA_OBJECT_SERIALIZER_CHANGE_WITH_DATA_TABLE, + table.getTraceSQL()); + } + database.setJavaObjectSerializerName(stringValue); + addOrUpdateSetting(name, stringValue, 0); } - database.setJavaObjectSerializerName(stringValue); - addOrUpdateSetting(name, stringValue, 0); break; } - case SetTypes.IGNORECASE: + case SetTypes.IGNORECASE: { session.getUser().checkAdmin(); - database.setIgnoreCase(getIntValue() == 1); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + synchronized (database) { + database.setIgnoreCase(value == 1); + addOrUpdateSetting(name, null, value); + } break; - case SetTypes.LOCK_MODE: + } + case SetTypes.LOCK_MODE: { session.getUser().checkAdmin(); - database.setLockMode(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); - break; - case SetTypes.LOCK_TIMEOUT: - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("LOCK_TIMEOUT", - getIntValue()); + int value = getIntValue(); + synchronized (database) { + database.setLockMode(value); + addOrUpdateSetting(name, null, value); } - session.setLockTimeout(getIntValue()); break; - case SetTypes.LOG: { + } + case SetTypes.LOCK_TIMEOUT: { int value = getIntValue(); - if (database.isPersistent() && value != database.getLogMode()) { - session.getUser().checkAdmin(); - database.setLogMode(value); + if (value < 0) { + throw DbException.getInvalidValueException("LOCK_TIMEOUT", value); } + session.setLockTimeout(value); break; } case SetTypes.MAX_LENGTH_INPLACE_LOB: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException( - "MAX_LENGTH_INPLACE_LOB", getIntValue()); - } session.getUser().checkAdmin(); - database.setMaxLengthInplaceLob(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("MAX_LENGTH_INPLACE_LOB", value); + } + synchronized (database) { + database.setMaxLengthInplaceLob(value); + addOrUpdateSetting(name, null, value); + } break; } - case SetTypes.MAX_LOG_SIZE: - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("MAX_LOG_SIZE", - getIntValue()); - } + case SetTypes.MAX_LOG_SIZE: { session.getUser().checkAdmin(); - database.setMaxLogSize((long) getIntValue() * 1024 * 1024); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("MAX_LOG_SIZE", value); + } break; + } case SetTypes.MAX_MEMORY_ROWS: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("MAX_MEMORY_ROWS", - getIntValue()); - } session.getUser().checkAdmin(); - database.setMaxMemoryRows(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("MAX_MEMORY_ROWS", value); + } + synchronized (database) { + database.setMaxMemoryRows(value); + addOrUpdateSetting(name, null, value); + } break; } case SetTypes.MAX_MEMORY_UNDO: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("MAX_MEMORY_UNDO", - getIntValue()); - } session.getUser().checkAdmin(); - database.setMaxMemoryUndo(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("MAX_MEMORY_UNDO", value); + } + synchronized (database) { + addOrUpdateSetting(name, null, value); + } break; } case SetTypes.MAX_OPERATION_MEMORY: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException( - "MAX_OPERATION_MEMORY", getIntValue()); - } session.getUser().checkAdmin(); int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("MAX_OPERATION_MEMORY", value); + } database.setMaxOperationMemory(value); break; } - case SetTypes.MODE: + case SetTypes.MODE: { Mode mode = Mode.getInstance(stringValue); if (mode == null) { throw DbException.get(ErrorCode.UNKNOWN_MODE_1, stringValue); @@ -342,17 +348,6 @@ public int update() { database.setMode(mode); } break; - case SetTypes.MULTI_THREADED: { - session.getUser().checkAdmin(); - database.setMultiThreaded(getIntValue() == 1); - break; - } - case SetTypes.MVCC: { - if (database.isMultiVersion() != (getIntValue() == 1)) { - throw DbException.get( - ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1, "MVCC"); - } - break; } case SetTypes.OPTIMIZE_REUSE_RESULTS: { session.getUser().checkAdmin(); @@ -360,25 +355,22 @@ public int update() { break; } case SetTypes.QUERY_TIMEOUT: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("QUERY_TIMEOUT", - getIntValue()); - } int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("QUERY_TIMEOUT", value); + } session.setQueryTimeout(value); break; } case SetTypes.REDO_LOG_BINARY: { - int value = getIntValue(); - session.setRedoLogBinary(value == 1); + DbException.getUnsupportedException("MV_STORE + SET REDO_LOG_BINARY"); break; } case SetTypes.REFERENTIAL_INTEGRITY: { session.getUser().checkAdmin(); int value = getIntValue(); if (value < 0 || value > 1) { - throw DbException.getInvalidValueException( - "REFERENTIAL_INTEGRITY", getIntValue()); + throw DbException.getInvalidValueException("REFERENTIAL_INTEGRITY", value); } database.setReferentialIntegrity(value == 1); break; @@ -387,14 +379,22 @@ public int update() { session.getUser().checkAdmin(); int value = getIntValue(); if (value < 0 || value > 1) { - throw DbException.getInvalidValueException("QUERY_STATISTICS", - getIntValue()); + throw DbException.getInvalidValueException("QUERY_STATISTICS", value); } database.setQueryStatistics(value == 1); break; } + case SetTypes.QUERY_STATISTICS_MAX_ENTRIES: { + session.getUser().checkAdmin(); + int value = getIntValue(); + if (value < 1) { + throw DbException.getInvalidValueException("QUERY_STATISTICS_MAX_ENTRIES", value); + } + database.setQueryStatisticsMaxEntries(value); + break; + } case SetTypes.SCHEMA: { - Schema schema = database.getSchema(stringValue); + Schema schema = database.getSchema(expression.optimize(session).getValue(session).getString()); session.setCurrentSchema(schema); break; } @@ -402,9 +402,18 @@ public int update() { session.setSchemaSearchPath(stringValueList); break; } + case SetTypes.CATALOG: { + String shortName = database.getShortName(); + String value = expression.optimize(session).getValue(session).getString(); + if (value == null || !database.equalsIdentifiers(shortName, value) + && !database.equalsIdentifiers(shortName, value.trim())) { + throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, stringValue); + } + break; + } case SetTypes.TRACE_LEVEL_FILE: session.getUser().checkAdmin(); - if (getCurrentObjectId() == 0) { + if (getPersistedObjectId() == 0) { // don't set the property when opening the database // this is for compatibility with older versions, because // this setting was persistent @@ -413,7 +422,7 @@ public int update() { break; case SetTypes.TRACE_LEVEL_SYSTEM_OUT: session.getUser().checkAdmin(); - if (getCurrentObjectId() == 0) { + if (getPersistedObjectId() == 0) { // don't set the property when opening the database // this is for compatibility with older versions, because // this setting was persistent @@ -421,31 +430,24 @@ public int update() { } break; case SetTypes.TRACE_MAX_FILE_SIZE: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException( - "TRACE_MAX_FILE_SIZE", getIntValue()); - } session.getUser().checkAdmin(); - int size = getIntValue() * 1024 * 1024; - database.getTraceSystem().setMaxFileSize(size); - addOrUpdateSetting(name, null, getIntValue()); - break; - } - case SetTypes.THROTTLE: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("THROTTLE", - getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("TRACE_MAX_FILE_SIZE", value); + } + int size = value * (1024 * 1024); + synchronized (database) { + database.getTraceSystem().setMaxFileSize(size); + addOrUpdateSetting(name, null, value); } - session.setThrottle(getIntValue()); break; } - case SetTypes.UNDO_LOG: { + case SetTypes.THROTTLE: { int value = getIntValue(); - if (value < 0 || value > 1) { - throw DbException.getInvalidValueException("UNDO_LOG", - getIntValue()); + if (value < 0) { + throw DbException.getInvalidValueException("THROTTLE", value); } - session.setUndoLogEnabled(value == 1); + session.setThrottle(value); break; } case SetTypes.VARIABLE: { @@ -454,27 +456,108 @@ public int update() { break; } case SetTypes.WRITE_DELAY: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("WRITE_DELAY", - getIntValue()); - } session.getUser().checkAdmin(); - database.setWriteDelay(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("WRITE_DELAY", value); + } + synchronized (database) { + database.setWriteDelay(value); + addOrUpdateSetting(name, null, value); + } break; } case SetTypes.RETENTION_TIME: { - if (getIntValue() < 0) { - throw DbException.getInvalidValueException("RETENTION_TIME", - getIntValue()); + session.getUser().checkAdmin(); + int value = getIntValue(); + if (value < 0) { + throw DbException.getInvalidValueException("RETENTION_TIME", value); } + synchronized (database) { + database.setRetentionTime(value); + addOrUpdateSetting(name, null, value); + } + break; + } + case SetTypes.LAZY_QUERY_EXECUTION: { + int value = getIntValue(); + if (value != 0 && value != 1) { + throw DbException.getInvalidValueException("LAZY_QUERY_EXECUTION", + value); + } + session.setLazyQueryExecution(value == 1); + break; + } + case SetTypes.BUILTIN_ALIAS_OVERRIDE: { + session.getUser().checkAdmin(); + int value = getIntValue(); + if (value != 0 && value != 1) { + throw DbException.getInvalidValueException("BUILTIN_ALIAS_OVERRIDE", + value); + } + database.setAllowBuiltinAliasOverride(value == 1); + break; + } + case SetTypes.AUTHENTICATOR: { + session.getUser().checkAdmin(); + boolean value = expression.optimize(session).getBooleanValue(session); + try { + synchronized (database) { + if (value) { + database.setAuthenticator(AuthenticatorFactory.createAuthenticator()); + } else { + database.setAuthenticator(null); + } + addOrUpdateSetting(name, value ? "TRUE" : "FALSE", 0); + } + } catch (Exception e) { + // Errors during start are ignored to allow to open the database + if (database.isStarting()) { + database.getTrace(Trace.DATABASE).error(e, + "{0}: failed to set authenticator during database start ", expression.toString()); + } else { + throw DbException.convert(e); + } + } + break; + } + case SetTypes.IGNORE_CATALOGS: { session.getUser().checkAdmin(); - database.setRetentionTime(getIntValue()); - addOrUpdateSetting(name, null, getIntValue()); + int value = getIntValue(); + synchronized (database) { + database.setIgnoreCatalogs(value == 1); + addOrUpdateSetting(name, null, value); + } break; } + case SetTypes.NON_KEYWORDS: + session.setNonKeywords(Parser.parseNonKeywords(stringValueList)); + break; + case SetTypes.TIME_ZONE: + session.setTimeZone(expression == null ? DateTimeUtils.getTimeZone() + : parseTimeZone(expression.getValue(session))); + break; + case SetTypes.VARIABLE_BINARY: + session.setVariableBinary(expression.getBooleanValue(session)); + break; + case SetTypes.DEFAULT_NULL_ORDERING: { + DefaultNullOrdering defaultNullOrdering; + try { + defaultNullOrdering = DefaultNullOrdering.valueOf(StringUtils.toUpperEnglish(stringValue)); + } catch (RuntimeException e) { + throw DbException.getInvalidValueException("DEFAULT_NULL_ORDERING", stringValue); + } + if (database.getDefaultNullOrdering() != defaultNullOrdering) { + session.getUser().checkAdmin(); + database.setDefaultNullOrdering(defaultNullOrdering); + } + break; + } + case SetTypes.TRUNCATE_LARGE_LENGTH: + session.setTruncateLargeLength(expression.getBooleanValue(session)); + break; default: - DbException.throwInternalError("type="+type); + throw DbException.getInternalError("type="+type); } // the meta data information has changed database.getNextModificationDataId(); @@ -484,13 +567,28 @@ public int update() { return 0; } + private static TimeZoneProvider parseTimeZone(Value v) { + if (DataType.isCharacterStringType(v.getValueType())) { + TimeZoneProvider timeZone; + try { + timeZone = TimeZoneProvider.ofId(v.getString()); + } catch (IllegalArgumentException ex) { + throw DbException.getInvalidValueException("TIME ZONE", v.getTraceSQL()); + } + return timeZone; + } else if (v == ValueNull.INSTANCE) { + throw DbException.getInvalidValueException("TIME ZONE", v); + } + return TimeZoneProvider.ofOffset(TimeZoneOperation.parseInterval(v)); + } + private int getIntValue() { expression = expression.optimize(session); return expression.getValue(session).getInt(); } public void setInt(int value) { - this.expression = ValueExpression.get(ValueInt.get(value)); + this.expression = ValueExpression.get(ValueInteger.get(value)); } public void setExpression(Expression expression) { @@ -501,9 +599,9 @@ private void addOrUpdateSetting(String name, String s, int v) { addOrUpdateSetting(session, name, s, v); } - private void addOrUpdateSetting(Session session, String name, String s, - int v) { + private void addOrUpdateSetting(SessionLocal session, String name, String s, int v) { Database database = session.getDatabase(); + assert Thread.holdsLock(database); if (database.isReadOnly()) { return; } diff --git a/h2/src/main/org/h2/command/dml/SetClauseList.java b/h2/src/main/org/h2/command/dml/SetClauseList.java new file mode 100644 index 0000000000..a17d38b825 --- /dev/null +++ b/h2/src/main/org/h2/command/dml/SetClauseList.java @@ -0,0 +1,404 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import java.util.ArrayList; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionList; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultTarget; +import org.h2.result.Row; +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.table.DataChangeDeltaTable.ResultOption; +import org.h2.table.Table; +import org.h2.util.HasSQL; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Set clause list. + */ +public final class SetClauseList implements HasSQL { + + private final Table table; + + private final UpdateAction[] actions; + + private boolean onUpdate; + + public SetClauseList(Table table) { + this.table = table; + actions = new UpdateAction[table.getColumns().length]; + } + + /** + * Add a single column. + * + * @param column the column + * @param expression the expression + */ + public void addSingle(Column column, Expression expression) { + int id = column.getColumnId(); + if (actions[id] != null) { + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column.getName()); + } + if (expression != ValueExpression.DEFAULT) { + actions[id] = new SetSimple(expression); + if (expression instanceof Parameter) { + ((Parameter) expression).setColumn(column); + } + } else { + actions[id] = SetClauseList.UpdateAction.SET_DEFAULT; + } + } + + /** + * Add multiple columns. + * + * @param columns the columns + * @param expression the expression (e.g. an expression list) + */ + public void addMultiple(ArrayList columns, Expression expression) { + int columnCount = columns.size(); + if (expression instanceof ExpressionList) { + ExpressionList expressions = (ExpressionList) expression; + if (!expressions.isArray()) { + if (columnCount != expressions.getSubexpressionCount()) { + throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); + } + for (int i = 0; i < columnCount; i++) { + addSingle(columns.get(i), expressions.getSubexpression(i)); + } + return; + } + } + if (columnCount == 1) { + // Row value special case + addSingle(columns.get(0), expression); + } else { + int[] cols = new int[columnCount]; + RowExpression row = new RowExpression(expression, cols); + int minId = table.getColumns().length - 1, maxId = 0; + for (int i = 0; i < columnCount; i++) { + int id = columns.get(i).getColumnId(); + if (id < minId) { + minId = id; + } + if (id > maxId) { + maxId = id; + } + } + for (int i = 0; i < columnCount; i++) { + Column column = columns.get(i); + int id = column.getColumnId(); + cols[i] = id; + if (actions[id] != null) { + throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column.getName()); + } + actions[id] = new SetMultiple(row, i, id == minId, id == maxId); + } + } + } + + boolean prepareUpdate(Table table, SessionLocal session, ResultTarget deltaChangeCollector, + ResultOption deltaChangeCollectionMode, LocalResult rows, Row oldRow, + boolean updateToCurrentValuesReturnsZero) { + Column[] columns = table.getColumns(); + int columnCount = columns.length; + Row newRow = table.getTemplateRow(); + for (int i = 0; i < columnCount; i++) { + UpdateAction action = actions[i]; + Column column = columns[i]; + Value newValue; + if (action == null || action == UpdateAction.ON_UPDATE) { + newValue = column.isGenerated() ? null : oldRow.getValue(i); + } else if (action == UpdateAction.SET_DEFAULT) { + newValue = !column.isIdentity() ? null : oldRow.getValue(i); + } else { + newValue = action.update(session); + if (newValue == ValueNull.INSTANCE && column.isDefaultOnNull()) { + newValue = !column.isIdentity() ? null : oldRow.getValue(i); + } else if (column.isGeneratedAlways()) { + throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1, + column.getSQLWithTable(new StringBuilder(), TRACE_SQL_FLAGS).toString()); + } + } + newRow.setValue(i, newValue); + } + newRow.setKey(oldRow.getKey()); + table.convertUpdateRow(session, newRow, false); + boolean result = true; + if (onUpdate) { + if (!oldRow.hasSameValues(newRow)) { + for (int i = 0; i < columnCount; i++) { + if (actions[i] == UpdateAction.ON_UPDATE) { + newRow.setValue(i, columns[i].getEffectiveOnUpdateExpression().getValue(session)); + } else if (columns[i].isGenerated()) { + newRow.setValue(i, null); + } + } + // Convert on update expressions and reevaluate + // generated columns + table.convertUpdateRow(session, newRow, false); + } else if (updateToCurrentValuesReturnsZero) { + result = false; + } + } else if (updateToCurrentValuesReturnsZero && oldRow.hasSameValues(newRow)) { + result = false; + } + if (deltaChangeCollectionMode == ResultOption.OLD) { + deltaChangeCollector.addRow(oldRow.getValueList()); + } else if (deltaChangeCollectionMode == ResultOption.NEW) { + deltaChangeCollector.addRow(newRow.getValueList().clone()); + } + if (!table.fireRow() || !table.fireBeforeRow(session, oldRow, newRow)) { + rows.addRowForTable(oldRow); + rows.addRowForTable(newRow); + } + if (deltaChangeCollectionMode == ResultOption.FINAL) { + deltaChangeCollector.addRow(newRow.getValueList()); + } + return result; + } + + /** + * Check if this expression and all sub-expressions can fulfill a criteria. + * If any part returns false, the result is false. + * + * @param visitor + * the visitor + * @return if the criteria can be fulfilled + */ + boolean isEverything(ExpressionVisitor visitor) { + for (UpdateAction action : actions) { + if (action != null) { + if (!action.isEverything(visitor)) { + return false; + } + } + } + return true; + } + + /** + * Map the columns and optimize expressions. + * + * @param session + * the session + * @param resolver1 + * the first column resolver + * @param resolver2 + * the second column resolver, or {@code null} + */ + void mapAndOptimize(SessionLocal session, ColumnResolver resolver1, ColumnResolver resolver2) { + Column[] columns = table.getColumns(); + boolean onUpdate = false; + for (int i = 0; i < actions.length; i++) { + UpdateAction action = actions[i]; + if (action != null) { + action.mapAndOptimize(session, resolver1, resolver2); + } else { + Column column = columns[i]; + if (column.getEffectiveOnUpdateExpression() != null) { + actions[i] = UpdateAction.ON_UPDATE; + onUpdate = true; + } + } + } + this.onUpdate = onUpdate; + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + Column[] columns = table.getColumns(); + builder.append("\nSET\n "); + boolean f = false; + for (int i = 0; i < actions.length; i++) { + UpdateAction action = actions[i]; + if (action != null && action != UpdateAction.ON_UPDATE) { + if (action.getClass() == SetMultiple.class) { + SetMultiple multiple = (SetMultiple) action; + if (multiple.first) { + if (f) { + builder.append(",\n "); + } + f = true; + RowExpression r = multiple.row; + builder.append('('); + int[] cols = r.columns; + for (int j = 0, l = cols.length; j < l; j++) { + if (j > 0) { + builder.append(", "); + } + columns[cols[j]].getSQL(builder, sqlFlags); + } + r.expression.getUnenclosedSQL(builder.append(") = "), sqlFlags); + } + } else { + if (f) { + builder.append(",\n "); + } + f = true; + Column column = columns[i]; + if (action != UpdateAction.SET_DEFAULT) { + action.getSQL(builder, sqlFlags, column); + } else { + column.getSQL(builder, sqlFlags).append(" = DEFAULT"); + } + } + } + } + return builder; + } + + private static class UpdateAction { + + static UpdateAction ON_UPDATE = new UpdateAction(); + + static UpdateAction SET_DEFAULT = new UpdateAction(); + + UpdateAction() { + } + + Value update(SessionLocal session) { + throw DbException.getInternalError(); + } + + boolean isEverything(ExpressionVisitor visitor) { + return true; + } + + void mapAndOptimize(SessionLocal session, ColumnResolver resolver1, ColumnResolver resolver2) { + // Do nothing + } + + void getSQL(StringBuilder builder, int sqlFlags, Column column) { + throw DbException.getInternalError(); + } + + } + + private static final class SetSimple extends UpdateAction { + + private Expression expression; + + SetSimple(Expression expression) { + this.expression = expression; + } + + @Override + Value update(SessionLocal session) { + return expression.getValue(session); + } + + @Override + boolean isEverything(ExpressionVisitor visitor) { + return expression.isEverything(visitor); + } + + @Override + void mapAndOptimize(SessionLocal session, ColumnResolver resolver1, ColumnResolver resolver2) { + expression.mapColumns(resolver1, 0, Expression.MAP_INITIAL); + if (resolver2 != null) { + expression.mapColumns(resolver2, 0, Expression.MAP_INITIAL); + } + expression = expression.optimize(session); + } + + @Override + void getSQL(StringBuilder builder, int sqlFlags, Column column) { + expression.getUnenclosedSQL(column.getSQL(builder, sqlFlags).append(" = "), sqlFlags); + } + + } + + private static final class RowExpression { + + Expression expression; + + final int[] columns; + + Value[] values; + + RowExpression(Expression expression, int[] columns) { + this.expression = expression; + this.columns = columns; + } + + boolean isEverything(ExpressionVisitor visitor) { + return expression.isEverything(visitor); + } + + void mapAndOptimize(SessionLocal session, ColumnResolver resolver1, ColumnResolver resolver2) { + expression.mapColumns(resolver1, 0, Expression.MAP_INITIAL); + if (resolver2 != null) { + expression.mapColumns(resolver2, 0, Expression.MAP_INITIAL); + } + expression = expression.optimize(session); + } + } + + private static final class SetMultiple extends UpdateAction { + + final RowExpression row; + + private final int position; + + boolean first; + + private boolean last; + + SetMultiple(RowExpression row, int position, boolean first, boolean last) { + this.row = row; + this.position = position; + this.first = first; + this.last = last; + } + + @Override + Value update(SessionLocal session) { + Value[] v; + if (first) { + Value value = row.expression.getValue(session); + if (value == ValueNull.INSTANCE) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, "NULL to assigned row value"); + } + row.values = v = value.convertToAnyRow().getList(); + if (v.length != row.columns.length) { + throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); + } + } else { + v = row.values; + if (last) { + row.values = null; + } + } + return v[position]; + } + + @Override + boolean isEverything(ExpressionVisitor visitor) { + return !first || row.isEverything(visitor); + } + + @Override + void mapAndOptimize(SessionLocal session, ColumnResolver resolver1, ColumnResolver resolver2) { + if (first) { + row.mapAndOptimize(session, resolver1, resolver2); + } + } + + } + +} diff --git a/h2/src/main/org/h2/command/dml/SetSessionCharacteristics.java b/h2/src/main/org/h2/command/dml/SetSessionCharacteristics.java new file mode 100644 index 0000000000..cb5efc62f7 --- /dev/null +++ b/h2/src/main/org/h2/command/dml/SetSessionCharacteristics.java @@ -0,0 +1,52 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.dml; + +import org.h2.command.CommandInterface; +import org.h2.command.Prepared; +import org.h2.engine.IsolationLevel; +import org.h2.engine.SessionLocal; +import org.h2.result.ResultInterface; + +/** + * This class represents the statement SET SESSION CHARACTERISTICS + */ +public class SetSessionCharacteristics extends Prepared { + + private final IsolationLevel isolationLevel; + + public SetSessionCharacteristics(SessionLocal session, IsolationLevel isolationLevel) { + super(session); + this.isolationLevel = isolationLevel; + } + + @Override + public boolean isTransactional() { + return false; + } + + @Override + public long update() { + session.setIsolationLevel(isolationLevel); + return 0; + } + + @Override + public boolean needRecompile() { + return false; + } + + @Override + public ResultInterface queryMeta() { + return null; + } + + @Override + public int getType() { + return CommandInterface.SET; + } + +} diff --git a/h2/src/main/org/h2/command/dml/SetTypes.java b/h2/src/main/org/h2/command/dml/SetTypes.java index 91ad3ecd17..464ffc8674 100644 --- a/h2/src/main/org/h2/command/dml/SetTypes.java +++ b/h2/src/main/org/h2/command/dml/SetTypes.java @@ -1,12 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; import java.util.ArrayList; -import org.h2.util.New; /** * The list of setting for a SET statement. @@ -16,258 +15,291 @@ public class SetTypes { /** * The type of a SET IGNORECASE statement. */ - public static final int IGNORECASE = 1; + public static final int IGNORECASE = 0; /** * The type of a SET MAX_LOG_SIZE statement. */ - public static final int MAX_LOG_SIZE = 2; + public static final int MAX_LOG_SIZE = IGNORECASE + 1; /** * The type of a SET MODE statement. */ - public static final int MODE = 3; + public static final int MODE = MAX_LOG_SIZE + 1; /** * The type of a SET READONLY statement. */ - public static final int READONLY = 4; + public static final int READONLY = MODE + 1; /** * The type of a SET LOCK_TIMEOUT statement. */ - public static final int LOCK_TIMEOUT = 5; + public static final int LOCK_TIMEOUT = READONLY + 1; /** * The type of a SET DEFAULT_LOCK_TIMEOUT statement. */ - public static final int DEFAULT_LOCK_TIMEOUT = 6; + public static final int DEFAULT_LOCK_TIMEOUT = LOCK_TIMEOUT + 1; /** * The type of a SET DEFAULT_TABLE_TYPE statement. */ - public static final int DEFAULT_TABLE_TYPE = 7; + public static final int DEFAULT_TABLE_TYPE = DEFAULT_LOCK_TIMEOUT + 1; /** * The type of a SET CACHE_SIZE statement. */ - public static final int CACHE_SIZE = 8; + public static final int CACHE_SIZE = DEFAULT_TABLE_TYPE + 1; /** * The type of a SET TRACE_LEVEL_SYSTEM_OUT statement. */ - public static final int TRACE_LEVEL_SYSTEM_OUT = 9; + public static final int TRACE_LEVEL_SYSTEM_OUT = CACHE_SIZE + 1; /** * The type of a SET TRACE_LEVEL_FILE statement. */ - public static final int TRACE_LEVEL_FILE = 10; + public static final int TRACE_LEVEL_FILE = TRACE_LEVEL_SYSTEM_OUT + 1; /** * The type of a SET TRACE_MAX_FILE_SIZE statement. */ - public static final int TRACE_MAX_FILE_SIZE = 11; + public static final int TRACE_MAX_FILE_SIZE = TRACE_LEVEL_FILE + 1; /** * The type of a SET COLLATION statement. */ - public static final int COLLATION = 12; + public static final int COLLATION = TRACE_MAX_FILE_SIZE + 1; /** * The type of a SET CLUSTER statement. */ - public static final int CLUSTER = 13; + public static final int CLUSTER = COLLATION + 1; /** * The type of a SET WRITE_DELAY statement. */ - public static final int WRITE_DELAY = 14; + public static final int WRITE_DELAY = CLUSTER + 1; /** * The type of a SET DATABASE_EVENT_LISTENER statement. */ - public static final int DATABASE_EVENT_LISTENER = 15; + public static final int DATABASE_EVENT_LISTENER = WRITE_DELAY + 1; /** * The type of a SET MAX_MEMORY_ROWS statement. */ - public static final int MAX_MEMORY_ROWS = 16; + public static final int MAX_MEMORY_ROWS = DATABASE_EVENT_LISTENER + 1; /** * The type of a SET LOCK_MODE statement. */ - public static final int LOCK_MODE = 17; + public static final int LOCK_MODE = MAX_MEMORY_ROWS + 1; /** * The type of a SET DB_CLOSE_DELAY statement. */ - public static final int DB_CLOSE_DELAY = 18; - - /** - * The type of a SET LOG statement. - */ - public static final int LOG = 19; + public static final int DB_CLOSE_DELAY = LOCK_MODE + 1; /** * The type of a SET THROTTLE statement. */ - public static final int THROTTLE = 20; + public static final int THROTTLE = DB_CLOSE_DELAY + 1; /** * The type of a SET MAX_MEMORY_UNDO statement. */ - public static final int MAX_MEMORY_UNDO = 21; + public static final int MAX_MEMORY_UNDO = THROTTLE + 1; /** * The type of a SET MAX_LENGTH_INPLACE_LOB statement. */ - public static final int MAX_LENGTH_INPLACE_LOB = 22; + public static final int MAX_LENGTH_INPLACE_LOB = MAX_MEMORY_UNDO + 1; /** - * The type of a SET COMPRESS_LOB statement. + * The type of a SET ALLOW_LITERALS statement. */ - public static final int COMPRESS_LOB = 23; + public static final int ALLOW_LITERALS = MAX_LENGTH_INPLACE_LOB + 1; /** - * The type of a SET ALLOW_LITERALS statement. + * The type of a SET SCHEMA statement. */ - public static final int ALLOW_LITERALS = 24; + public static final int SCHEMA = ALLOW_LITERALS + 1; /** - * The type of a SET MULTI_THREADED statement. + * The type of a SET OPTIMIZE_REUSE_RESULTS statement. */ - public static final int MULTI_THREADED = 25; + public static final int OPTIMIZE_REUSE_RESULTS = SCHEMA + 1; /** - * The type of a SET SCHEMA statement. + * The type of a SET SCHEMA_SEARCH_PATH statement. */ - public static final int SCHEMA = 26; + public static final int SCHEMA_SEARCH_PATH = OPTIMIZE_REUSE_RESULTS + 1; /** - * The type of a SET OPTIMIZE_REUSE_RESULTS statement. + * The type of a SET REFERENTIAL_INTEGRITY statement. */ - public static final int OPTIMIZE_REUSE_RESULTS = 27; + public static final int REFERENTIAL_INTEGRITY = SCHEMA_SEARCH_PATH + 1; /** - * The type of a SET SCHEMA_SEARCH_PATH statement. + * The type of a SET MAX_OPERATION_MEMORY statement. */ - public static final int SCHEMA_SEARCH_PATH = 28; + public static final int MAX_OPERATION_MEMORY = REFERENTIAL_INTEGRITY + 1; /** - * The type of a SET UNDO_LOG statement. + * The type of a SET EXCLUSIVE statement. */ - public static final int UNDO_LOG = 29; + public static final int EXCLUSIVE = MAX_OPERATION_MEMORY + 1; /** - * The type of a SET REFERENTIAL_INTEGRITY statement. + * The type of a SET CREATE_BUILD statement. */ - public static final int REFERENTIAL_INTEGRITY = 30; + public static final int CREATE_BUILD = EXCLUSIVE + 1; /** - * The type of a SET MVCC statement. + * The type of a SET \@VARIABLE statement. */ - public static final int MVCC = 31; + public static final int VARIABLE = CREATE_BUILD + 1; /** - * The type of a SET MAX_OPERATION_MEMORY statement. + * The type of a SET QUERY_TIMEOUT statement. */ - public static final int MAX_OPERATION_MEMORY = 32; + public static final int QUERY_TIMEOUT = VARIABLE + 1; /** - * The type of a SET EXCLUSIVE statement. + * The type of a SET REDO_LOG_BINARY statement. */ - public static final int EXCLUSIVE = 33; + public static final int REDO_LOG_BINARY = QUERY_TIMEOUT + 1; /** - * The type of a SET CREATE_BUILD statement. + * The type of a SET JAVA_OBJECT_SERIALIZER statement. */ - public static final int CREATE_BUILD = 34; + public static final int JAVA_OBJECT_SERIALIZER = REDO_LOG_BINARY + 1; /** - * The type of a SET \@VARIABLE statement. + * The type of a SET RETENTION_TIME statement. */ - public static final int VARIABLE = 35; + public static final int RETENTION_TIME = JAVA_OBJECT_SERIALIZER + 1; /** - * The type of a SET QUERY_TIMEOUT statement. + * The type of a SET QUERY_STATISTICS statement. */ - public static final int QUERY_TIMEOUT = 36; + public static final int QUERY_STATISTICS = RETENTION_TIME + 1; /** - * The type of a SET REDO_LOG_BINARY statement. + * The type of a SET QUERY_STATISTICS_MAX_ENTRIES statement. */ - public static final int REDO_LOG_BINARY = 37; + public static final int QUERY_STATISTICS_MAX_ENTRIES = QUERY_STATISTICS + 1; /** - * The type of a SET BINARY_COLLATION statement. + * The type of SET LAZY_QUERY_EXECUTION statement. */ - public static final int BINARY_COLLATION = 38; + public static final int LAZY_QUERY_EXECUTION = QUERY_STATISTICS_MAX_ENTRIES + 1; /** - * The type of a SET JAVA_OBJECT_SERIALIZER statement. + * The type of SET BUILTIN_ALIAS_OVERRIDE statement. */ - public static final int JAVA_OBJECT_SERIALIZER = 39; + public static final int BUILTIN_ALIAS_OVERRIDE = LAZY_QUERY_EXECUTION + 1; /** - * The type of a SET RETENTION_TIME statement. + * The type of a SET AUTHENTICATOR statement. + */ + public static final int AUTHENTICATOR = BUILTIN_ALIAS_OVERRIDE + 1; + + /** + * The type of a SET IGNORE_CATALOGS statement. + */ + public static final int IGNORE_CATALOGS = AUTHENTICATOR + 1; + + /** + * The type of a SET CATALOG statement. + */ + public static final int CATALOG = IGNORE_CATALOGS + 1; + + /** + * The type of a SET NON_KEYWORDS statement. + */ + public static final int NON_KEYWORDS = CATALOG + 1; + + /** + * The type of a SET TIME ZONE statement. + */ + public static final int TIME_ZONE = NON_KEYWORDS + 1; + + /** + * The type of a SET VARIABLE_BINARY statement. + */ + public static final int VARIABLE_BINARY = TIME_ZONE + 1; + + /** + * The type of a SET DEFAULT_NULL_ORDERING statement. */ - public static final int RETENTION_TIME = 40; + public static final int DEFAULT_NULL_ORDERING = VARIABLE_BINARY + 1; /** - * The type of a SET QUERY_STATISTICS_ACTIVE statement. + * The type of a SET TRUNCATE_LARGE_LENGTH statement. */ - public static final int QUERY_STATISTICS = 41; + public static final int TRUNCATE_LARGE_LENGTH = DEFAULT_NULL_ORDERING + 1; + + private static final int COUNT = TRUNCATE_LARGE_LENGTH + 1; - private static final ArrayList TYPES = New.arrayList(); + private static final ArrayList TYPES; private SetTypes() { // utility class } static { - ArrayList list = TYPES; - list.add(null); - list.add(IGNORECASE, "IGNORECASE"); - list.add(MAX_LOG_SIZE, "MAX_LOG_SIZE"); - list.add(MODE, "MODE"); - list.add(READONLY, "READONLY"); - list.add(LOCK_TIMEOUT, "LOCK_TIMEOUT"); - list.add(DEFAULT_LOCK_TIMEOUT, "DEFAULT_LOCK_TIMEOUT"); - list.add(DEFAULT_TABLE_TYPE, "DEFAULT_TABLE_TYPE"); - list.add(CACHE_SIZE, "CACHE_SIZE"); - list.add(TRACE_LEVEL_SYSTEM_OUT, "TRACE_LEVEL_SYSTEM_OUT"); - list.add(TRACE_LEVEL_FILE, "TRACE_LEVEL_FILE"); - list.add(TRACE_MAX_FILE_SIZE, "TRACE_MAX_FILE_SIZE"); - list.add(COLLATION, "COLLATION"); - list.add(CLUSTER, "CLUSTER"); - list.add(WRITE_DELAY, "WRITE_DELAY"); - list.add(DATABASE_EVENT_LISTENER, "DATABASE_EVENT_LISTENER"); - list.add(MAX_MEMORY_ROWS, "MAX_MEMORY_ROWS"); - list.add(LOCK_MODE, "LOCK_MODE"); - list.add(DB_CLOSE_DELAY, "DB_CLOSE_DELAY"); - list.add(LOG, "LOG"); - list.add(THROTTLE, "THROTTLE"); - list.add(MAX_MEMORY_UNDO, "MAX_MEMORY_UNDO"); - list.add(MAX_LENGTH_INPLACE_LOB, "MAX_LENGTH_INPLACE_LOB"); - list.add(COMPRESS_LOB, "COMPRESS_LOB"); - list.add(ALLOW_LITERALS, "ALLOW_LITERALS"); - list.add(MULTI_THREADED, "MULTI_THREADED"); - list.add(SCHEMA, "SCHEMA"); - list.add(OPTIMIZE_REUSE_RESULTS, "OPTIMIZE_REUSE_RESULTS"); - list.add(SCHEMA_SEARCH_PATH, "SCHEMA_SEARCH_PATH"); - list.add(UNDO_LOG, "UNDO_LOG"); - list.add(REFERENTIAL_INTEGRITY, "REFERENTIAL_INTEGRITY"); - list.add(MVCC, "MVCC"); - list.add(MAX_OPERATION_MEMORY, "MAX_OPERATION_MEMORY"); - list.add(EXCLUSIVE, "EXCLUSIVE"); - list.add(CREATE_BUILD, "CREATE_BUILD"); - list.add(VARIABLE, "@"); - list.add(QUERY_TIMEOUT, "QUERY_TIMEOUT"); - list.add(REDO_LOG_BINARY, "REDO_LOG_BINARY"); - list.add(BINARY_COLLATION, "BINARY_COLLATION"); - list.add(JAVA_OBJECT_SERIALIZER, "JAVA_OBJECT_SERIALIZER"); - list.add(RETENTION_TIME, "RETENTION_TIME"); - list.add(QUERY_STATISTICS, "QUERY_STATISTICS"); + ArrayList list = new ArrayList<>(COUNT); + list.add("IGNORECASE"); + list.add("MAX_LOG_SIZE"); + list.add("MODE"); + list.add("READONLY"); + list.add("LOCK_TIMEOUT"); + list.add("DEFAULT_LOCK_TIMEOUT"); + list.add("DEFAULT_TABLE_TYPE"); + list.add("CACHE_SIZE"); + list.add("TRACE_LEVEL_SYSTEM_OUT"); + list.add("TRACE_LEVEL_FILE"); + list.add("TRACE_MAX_FILE_SIZE"); + list.add("COLLATION"); + list.add("CLUSTER"); + list.add("WRITE_DELAY"); + list.add("DATABASE_EVENT_LISTENER"); + list.add("MAX_MEMORY_ROWS"); + list.add("LOCK_MODE"); + list.add("DB_CLOSE_DELAY"); + list.add("THROTTLE"); + list.add("MAX_MEMORY_UNDO"); + list.add("MAX_LENGTH_INPLACE_LOB"); + list.add("ALLOW_LITERALS"); + list.add("SCHEMA"); + list.add("OPTIMIZE_REUSE_RESULTS"); + list.add("SCHEMA_SEARCH_PATH"); + list.add("REFERENTIAL_INTEGRITY"); + list.add("MAX_OPERATION_MEMORY"); + list.add("EXCLUSIVE"); + list.add("CREATE_BUILD"); + list.add("@"); + list.add("QUERY_TIMEOUT"); + list.add("REDO_LOG_BINARY"); + list.add("JAVA_OBJECT_SERIALIZER"); + list.add("RETENTION_TIME"); + list.add("QUERY_STATISTICS"); + list.add("QUERY_STATISTICS_MAX_ENTRIES"); + list.add("LAZY_QUERY_EXECUTION"); + list.add("BUILTIN_ALIAS_OVERRIDE"); + list.add("AUTHENTICATOR"); + list.add("IGNORE_CATALOGS"); + list.add("CATALOG"); + list.add("NON_KEYWORDS"); + list.add("TIME ZONE"); + list.add("VARIABLE_BINARY"); + list.add("DEFAULT_NULL_ORDERING"); + list.add("TRUNCATE_LARGE_LENGTH"); + TYPES = list; + assert(list.size() == COUNT); } /** @@ -277,12 +309,7 @@ private SetTypes() { * @return the number */ public static int getType(String name) { - for (int i = 0; i < getTypes().size(); i++) { - if (name.equals(getTypes().get(i))) { - return i; - } - } - return -1; + return TYPES.indexOf(name); } public static ArrayList getTypes() { @@ -296,7 +323,7 @@ public static ArrayList getTypes() { * @return the name */ public static String getTypeName(int type) { - return getTypes().get(type); + return TYPES.get(type); } } diff --git a/h2/src/main/org/h2/command/dml/TransactionCommand.java b/h2/src/main/org/h2/command/dml/TransactionCommand.java index 6e46694260..c8fa171126 100644 --- a/h2/src/main/org/h2/command/dml/TransactionCommand.java +++ b/h2/src/main/org/h2/command/dml/TransactionCommand.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; @@ -8,7 +8,7 @@ import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.ResultInterface; @@ -21,7 +21,7 @@ public class TransactionCommand extends Prepared { private String savepointName; private String transactionName; - public TransactionCommand(Session session, int type) { + public TransactionCommand(SessionLocal session, int type) { super(session); this.type = type; } @@ -31,7 +31,7 @@ public void setSavepointName(String name) { } @Override - public int update() { + public long update() { switch (type) { case CommandInterface.SET_AUTOCOMMIT_TRUE: session.setAutoCommit(true); @@ -73,46 +73,27 @@ public int update() { session.getUser().checkAdmin(); session.setPreparedTransaction(transactionName, false); break; - case CommandInterface.SHUTDOWN_IMMEDIATELY: - session.getUser().checkAdmin(); - session.getDatabase().shutdownImmediately(); - break; case CommandInterface.SHUTDOWN: case CommandInterface.SHUTDOWN_COMPACT: - case CommandInterface.SHUTDOWN_DEFRAG: { - session.getUser().checkAdmin(); + case CommandInterface.SHUTDOWN_DEFRAG: session.commit(false); - if (type == CommandInterface.SHUTDOWN_COMPACT || - type == CommandInterface.SHUTDOWN_DEFRAG) { - session.getDatabase().setCompactMode(type); - } - // close the database, but don't update the persistent setting - session.getDatabase().setCloseDelay(0); - Database db = session.getDatabase(); + //$FALL-THROUGH$ + case CommandInterface.SHUTDOWN_IMMEDIATELY: { + session.getUser().checkAdmin(); // throttle, to allow testing concurrent // execution of shutdown and query session.throttle(); - for (Session s : db.getSessions(false)) { - if (db.isMultiThreaded()) { - synchronized (s) { - s.rollback(); - } - } else { - // if not multi-threaded, the session could already own - // the lock, which would result in a deadlock - // the other session can not concurrently do anything - // because the current session has locked the database - s.rollback(); - } - if (s != session) { - s.close(); - } + Database db = session.getDatabase(); + if (db.setExclusiveSession(session, true)) { + db.setCompactMode(type); + // close the database, but don't update the persistent setting + db.setCloseDelay(0); + session.close(); } - session.close(); break; } default: - DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } return 0; } diff --git a/h2/src/main/org/h2/command/dml/Update.java b/h2/src/main/org/h2/command/dml/Update.java index c5c37daebb..26781c9594 100644 --- a/h2/src/main/org/h2/command/dml/Update.java +++ b/h2/src/main/org/h2/command/dml/Update.java @@ -1,33 +1,29 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.command.dml; -import java.util.ArrayList; -import java.util.HashMap; +import java.util.HashSet; -import org.h2.api.ErrorCode; import org.h2.api.Trigger; import org.h2.command.CommandInterface; import org.h2.command.Prepared; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.DbObject; import org.h2.engine.Right; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; -import org.h2.expression.Parameter; -import org.h2.expression.ValueExpression; +import org.h2.expression.ExpressionVisitor; import org.h2.message.DbException; -import org.h2.result.ResultInterface; +import org.h2.result.LocalResult; +import org.h2.result.ResultTarget; import org.h2.result.Row; -import org.h2.result.RowList; -import org.h2.table.Column; +import org.h2.table.DataChangeDeltaTable.ResultOption; import org.h2.table.PlanItem; import org.h2.table.Table; import org.h2.table.TableFilter; -import org.h2.util.New; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; import org.h2.value.Value; import org.h2.value.ValueNull; @@ -35,185 +31,157 @@ * This class represents the statement * UPDATE */ -public class Update extends Prepared { +public final class Update extends FilteredDataChangeStatement { - private Expression condition; - private TableFilter tableFilter; + private SetClauseList setClauseList; - /** The limit expression as specified in the LIMIT clause. */ - private Expression limitExpr; + private Insert onDuplicateKeyInsert; - private final ArrayList columns = New.arrayList(); - private final HashMap expressionMap = New.hashMap(); + private TableFilter fromTableFilter; - public Update(Session session) { + public Update(SessionLocal session) { super(session); } - public void setTableFilter(TableFilter tableFilter) { - this.tableFilter = tableFilter; + public void setSetClauseList(SetClauseList setClauseList) { + this.setClauseList = setClauseList; } - public void setCondition(Expression condition) { - this.condition = condition; - } - - /** - * Add an assignment of the form column = expression. - * - * @param column the column - * @param expression the expression - */ - public void setAssignment(Column column, Expression expression) { - if (expressionMap.containsKey(column)) { - throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column - .getName()); - } - columns.add(column); - expressionMap.put(column, expression); - if (expression instanceof Parameter) { - Parameter p = (Parameter) expression; - p.setColumn(column); - } + public void setFromTableFilter(TableFilter tableFilter) { + this.fromTableFilter = tableFilter; } @Override - public int update() { - tableFilter.startQuery(session); - tableFilter.reset(); - RowList rows = new RowList(session); - try { - Table table = tableFilter.getTable(); - session.getUser().checkRight(table, Right.UPDATE); + public long update(ResultTarget deltaChangeCollector, ResultOption deltaChangeCollectionMode) { + targetTableFilter.startQuery(session); + targetTableFilter.reset(); + Table table = targetTableFilter.getTable(); + try (LocalResult rows = LocalResult.forTable(session, table)) { + session.getUser().checkTableRight(table, Right.UPDATE); table.fire(session, Trigger.UPDATE, true); - table.lock(session, true, false); - int columnCount = table.getColumns().length; + table.lock(session, Table.WRITE_LOCK); // get the old rows, compute the new rows setCurrentRowNumber(0); - int count = 0; - Column[] columns = table.getColumns(); - int limitRows = -1; - if (limitExpr != null) { - Value v = limitExpr.getValue(session); - if (v != ValueNull.INSTANCE) { - limitRows = v.getInt(); + long count = 0; + long limitRows = -1; + if (fetchExpr != null) { + Value v = fetchExpr.getValue(session); + if (v == ValueNull.INSTANCE || (limitRows = v.getLong()) < 0) { + throw DbException.getInvalidValueException("FETCH", v); } } - while (tableFilter.next()) { - setCurrentRowNumber(count+1); - if (limitRows >= 0 && count >= limitRows) { - break; - } - if (condition == null || - Boolean.TRUE.equals(condition.getBooleanValue(session))) { - Row oldRow = tableFilter.get(); - Row newRow = table.getTemplateRow(); - for (int i = 0; i < columnCount; i++) { - Expression newExpr = expressionMap.get(columns[i]); - Value newValue; - if (newExpr == null) { - newValue = oldRow.getValue(i); - } else if (newExpr == ValueExpression.getDefault()) { - Column column = table.getColumn(i); - newValue = table.getDefaultValue(session, column); - } else { - Column column = table.getColumn(i); - newValue = column.convert(newExpr.getValue(session)); - } - newRow.setValue(i, newValue); - } - table.validateConvertUpdateSequence(session, newRow); - boolean done = false; - if (table.fireRow()) { - done = table.fireBeforeRow(session, oldRow, newRow); + while (nextRow(limitRows, count)) { + Row oldRow = targetTableFilter.get(); + if (table.isRowLockable()) { + Row lockedRow = table.lockRow(session, oldRow); + if (lockedRow == null) { + continue; } - if (!done) { - rows.add(oldRow); - rows.add(newRow); + if (!oldRow.hasSharedData(lockedRow)) { + oldRow = lockedRow; + targetTableFilter.set(oldRow); + if (condition != null && !condition.getBooleanValue(session)) { + continue; + } } - count++; } - } - // TODO self referencing referential integrity constraints - // don't work if update is multi-row and 'inversed' the condition! - // probably need multi-row triggers with 'deleted' and 'inserted' - // at the same time. anyway good for sql compatibility - // TODO update in-place (but if the key changes, - // we need to update all indexes) before row triggers - - // the cached row is already updated - we need the old values - table.updateRows(this, session, rows); - if (table.fireRow()) { - rows.invalidateCache(); - for (rows.reset(); rows.hasNext();) { - Row o = rows.next(); - Row n = rows.next(); - table.fireAfterRow(session, o, n, false); + if (setClauseList.prepareUpdate(table, session, deltaChangeCollector, deltaChangeCollectionMode, + rows, oldRow, onDuplicateKeyInsert != null)) { + count++; } } + doUpdate(this, session, table, rows); table.fire(session, Trigger.UPDATE, false); return count; - } finally { - rows.close(); } } - @Override - public String getPlanSQL() { - StatementBuilder buff = new StatementBuilder("UPDATE "); - buff.append(tableFilter.getPlanSQL(false)).append("\nSET\n "); - for (int i = 0, size = columns.size(); i < size; i++) { - Column c = columns.get(i); - Expression e = expressionMap.get(c); - buff.appendExceptFirst(",\n "); - buff.append(c.getName()).append(" = ").append(e.getSQL()); + static void doUpdate(Prepared prepared, SessionLocal session, Table table, LocalResult rows) { + rows.done(); + // TODO self referencing referential integrity constraints + // don't work if update is multi-row and 'inversed' the condition! + // probably need multi-row triggers with 'deleted' and 'inserted' + // at the same time. anyway good for sql compatibility + // TODO update in-place (but if the key changes, + // we need to update all indexes) before row triggers + + // the cached row is already updated - we need the old values + table.updateRows(prepared, session, rows); + if (table.fireRow()) { + for (rows.reset(); rows.next();) { + Row o = rows.currentRowForTable(); + rows.next(); + Row n = rows.currentRowForTable(); + table.fireAfterRow(session, o, n, false); + } } - if (condition != null) { - buff.append("\nWHERE ").append(StringUtils.unEnclose(condition.getSQL())); + } + + @Override + public String getPlanSQL(int sqlFlags) { + StringBuilder builder = new StringBuilder("UPDATE "); + targetTableFilter.getPlanSQL(builder, false, sqlFlags); + if (fromTableFilter != null) { + builder.append("\nFROM "); + fromTableFilter.getPlanSQL(builder, false, sqlFlags); } - return buff.toString(); + setClauseList.getSQL(builder, sqlFlags); + appendFilterCondition(builder, sqlFlags); + return builder.toString(); } @Override public void prepare() { + if (fromTableFilter != null) { + targetTableFilter.addJoin(fromTableFilter, false, null); + } if (condition != null) { - condition.mapColumns(tableFilter, 0); - condition = condition.optimize(session); - condition.createIndexConditions(session, tableFilter); + condition.mapColumns(targetTableFilter, 0, Expression.MAP_INITIAL); + if (fromTableFilter != null) { + condition.mapColumns(fromTableFilter, 0, Expression.MAP_INITIAL); + } + condition = condition.optimizeCondition(session); + if (condition != null) { + condition.createIndexConditions(session, targetTableFilter); + } } - for (int i = 0, size = columns.size(); i < size; i++) { - Column c = columns.get(i); - Expression e = expressionMap.get(c); - e.mapColumns(tableFilter, 0); - expressionMap.put(c, e.optimize(session)); + setClauseList.mapAndOptimize(session, targetTableFilter, fromTableFilter); + TableFilter[] filters = null; + if (fromTableFilter == null) { + filters = new TableFilter[] { targetTableFilter }; + } else { + filters = new TableFilter[] { targetTableFilter, fromTableFilter }; } - PlanItem item = tableFilter.getBestPlanItem(session, 1); - tableFilter.setPlanItem(item); - tableFilter.prepare(); + PlanItem item = targetTableFilter.getBestPlanItem(session, filters, 0, new AllColumnsForPlan(filters)); + targetTableFilter.setPlanItem(item); + targetTableFilter.prepare(); } @Override - public boolean isTransactional() { - return true; + public int getType() { + return CommandInterface.UPDATE; } @Override - public ResultInterface queryMeta() { - return null; + public String getStatementName() { + return "UPDATE"; } @Override - public int getType() { - return CommandInterface.UPDATE; + public void collectDependencies(HashSet dependencies) { + ExpressionVisitor visitor = ExpressionVisitor.getDependenciesVisitor(dependencies); + if (condition != null) { + condition.isEverything(visitor); + } + setClauseList.isEverything(visitor); } - public void setLimit(Expression limit) { - this.limitExpr = limit; + public Insert getOnDuplicateKeyInsert() { + return onDuplicateKeyInsert; } - @Override - public boolean isCacheable() { - return true; + void setOnDuplicateKeyInsert(Insert onDuplicateKeyInsert) { + this.onDuplicateKeyInsert = onDuplicateKeyInsert; } } diff --git a/h2/src/main/org/h2/command/dml/package.html b/h2/src/main/org/h2/command/dml/package.html index 868e8a4ca7..077734e108 100644 --- a/h2/src/main/org/h2/command/dml/package.html +++ b/h2/src/main/org/h2/command/dml/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/command/package.html b/h2/src/main/org/h2/command/package.html index 807f2bb9db..6003e70e0d 100644 --- a/h2/src/main/org/h2/command/package.html +++ b/h2/src/main/org/h2/command/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/command/query/AllColumnsForPlan.java b/h2/src/main/org/h2/command/query/AllColumnsForPlan.java new file mode 100644 index 0000000000..b5b34e5290 --- /dev/null +++ b/h2/src/main/org/h2/command/query/AllColumnsForPlan.java @@ -0,0 +1,58 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.query; + +import java.util.ArrayList; +import java.util.HashMap; +import org.h2.expression.ExpressionVisitor; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.table.TableFilter; + +/** + * This information is expensive to compute for large queries, so do so + * on-demand. Also store the information pre-mapped by table to avoid expensive + * traversal. + */ +public class AllColumnsForPlan { + + private final TableFilter[] filters; + private HashMap> map; + + public AllColumnsForPlan(TableFilter[] filters) { + this.filters = filters; + } + + /** + * Called by ExpressionVisitor. + * + * @param newCol new column to be added. + */ + public void add(Column newCol) { + ArrayList cols = map.get(newCol.getTable()); + if (cols == null) { + cols = new ArrayList<>(); + map.put(newCol.getTable(), cols); + } + if (!cols.contains(newCol)) + cols.add(newCol); + } + + /** + * Used by index to calculate the cost of a scan. + * + * @param table the table. + * @return all table's referenced columns. + */ + public ArrayList get(Table table) { + if (map == null) { + map = new HashMap<>(); + ExpressionVisitor.allColumnsForTableFilters(filters, this); + } + return map.get(table); + } + +} diff --git a/h2/src/main/org/h2/command/dml/Optimizer.java b/h2/src/main/org/h2/command/query/Optimizer.java similarity index 80% rename from h2/src/main/org/h2/command/dml/Optimizer.java rename to h2/src/main/org/h2/command/query/Optimizer.java index 6d27ec6468..83bd58699f 100644 --- a/h2/src/main/org/h2/command/dml/Optimizer.java +++ b/h2/src/main/org/h2/command/query/Optimizer.java @@ -1,17 +1,17 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ -package org.h2.command.dml; +package org.h2.command.query; +import java.util.BitSet; import java.util.Random; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.table.Plan; import org.h2.table.PlanItem; import org.h2.table.TableFilter; -import org.h2.util.BitField; import org.h2.util.Permutations; /** @@ -23,8 +23,8 @@ class Optimizer { private static final int MAX_BRUTE_FORCE_FILTERS = 7; private static final int MAX_BRUTE_FORCE = 2000; private static final int MAX_GENETIC = 500; - private long start; - private BitField switched; + private long startNs; + private BitSet switched; // possible plans for filters, if using brute force: // 1 filter 1 plan @@ -40,17 +40,19 @@ class Optimizer { private final TableFilter[] filters; private final Expression condition; - private final Session session; + private final SessionLocal session; private Plan bestPlan; private TableFilter topFilter; private double cost; private Random random; + private final AllColumnsForPlan allColumnsSet; - Optimizer(TableFilter[] filters, Expression condition, Session session) { + Optimizer(TableFilter[] filters, Expression condition, SessionLocal session) { this.filters = filters; this.condition = condition; this.session = session; + allColumnsSet = new AllColumnsForPlan(filters); } /** @@ -74,28 +76,32 @@ private static int getMaxBruteForceFilters(int filterCount) { } private void calculateBestPlan() { - start = System.currentTimeMillis(); cost = -1; if (filters.length == 1) { testPlan(filters); - } else if (filters.length <= MAX_BRUTE_FORCE_FILTERS) { - calculateBruteForceAll(); } else { - calculateBruteForceSome(); - random = new Random(0); - calculateGenetic(); + startNs = System.nanoTime(); + if (filters.length <= MAX_BRUTE_FORCE_FILTERS) { + calculateBruteForceAll(); + } else { + calculateBruteForceSome(); + random = new Random(0); + calculateGenetic(); + } } } + private void calculateFakePlan() { + cost = -1; + bestPlan = new Plan(filters, filters.length, condition); + } + private boolean canStop(int x) { - if ((x & 127) == 0) { - long t = System.currentTimeMillis() - start; - // don't calculate for simple queries (no rows or so) - if (cost >= 0 && 10 * t > cost) { - return true; - } - } - return false; + return (x & 127) == 0 + // don't calculate for simple queries (no rows or so) + && cost >= 0 + // 100 microseconds * cost + && System.nanoTime() - startNs > cost * 100_000L; } private void calculateBruteForceAll() { @@ -130,7 +136,7 @@ private void calculateBruteForceSome() { } list[i] = filters[j]; Plan part = new Plan(list, i+1, condition); - double costNow = part.calculateCost(session); + double costNow = part.calculateCost(session, allColumnsSet); if (costPart < 0 || costNow < costPart) { costPart = costNow; bestPart = j; @@ -159,13 +165,13 @@ private void calculateGenetic() { } } if (generateRandom) { - switched = new BitField(); + switched = new BitSet(); System.arraycopy(filters, 0, best, 0, filters.length); shuffleAll(best); System.arraycopy(best, 0, list, 0, filters.length); } if (testPlan(list)) { - switched = new BitField(); + switched = new BitSet(); System.arraycopy(list, 0, best, 0, filters.length); } } @@ -173,7 +179,7 @@ private void calculateGenetic() { private boolean testPlan(TableFilter[] list) { Plan p = new Plan(list, list.length, condition); - double costNow = p.calculateCost(session); + double costNow = p.calculateCost(session, allColumnsSet); if (cost < 0 || costNow < cost) { cost = costNow; bestPlan = p; @@ -224,14 +230,24 @@ private boolean shuffleTwo(TableFilter[] f) { /** * Calculate the best query plan to use. + * + * @param parse If we do not need to really get the best plan because it is + * a view parsing stage. */ - void optimize() { - calculateBestPlan(); - bestPlan.removeUnusableIndexConditions(); + void optimize(boolean parse) { + if (parse) { + calculateFakePlan(); + } else { + calculateBestPlan(); + bestPlan.removeUnusableIndexConditions(); + } TableFilter[] f2 = bestPlan.getFilters(); topFilter = f2[0]; for (int i = 0; i < f2.length - 1; i++) { - f2[i].addJoin(f2[i + 1], false, false, null); + f2[i].addJoin(f2[i + 1], false, null); + } + if (parse) { + return; } for (TableFilter f : f2) { PlanItem item = bestPlan.getItem(f); diff --git a/h2/src/main/org/h2/command/query/Query.java b/h2/src/main/org/h2/command/query/Query.java new file mode 100644 index 0000000000..227e15a472 --- /dev/null +++ b/h2/src/main/org/h2/command/query/Query.java @@ -0,0 +1,1018 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.query; + +import static org.h2.expression.Expression.WITHOUT_PARENTHESES; +import static org.h2.util.HasSQL.DEFAULT_SQL_FLAGS; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.command.Prepared; +import org.h2.engine.Database; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.expression.Alias; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; +import org.h2.result.SortOrder; +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.table.Table; +import org.h2.table.TableFilter; +import org.h2.table.TableView; +import org.h2.util.Utils; +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; + +/** + * Represents a SELECT statement (simple, or union). + */ +public abstract class Query extends Prepared { + + /** + * Evaluated values of OFFSET and FETCH clauses. + */ + static final class OffsetFetch { + + /** + * OFFSET value. + */ + final long offset; + + /** + * FETCH value. + */ + final long fetch; + + /** + * Whether FETCH value is a PERCENT value. + */ + final boolean fetchPercent; + + OffsetFetch(long offset, long fetch, boolean fetchPercent) { + this.offset = offset; + this.fetch = fetch; + this.fetchPercent = fetchPercent; + } + + } + + /** + * The column list, including invisible expressions such as order by expressions. + */ + ArrayList expressions; + + /** + * Array of expressions. + * + * @see #expressions + */ + Expression[] expressionArray; + + /** + * Describes elements of the ORDER BY clause of a query. + */ + ArrayList orderList; + + /** + * A sort order represents an ORDER BY clause in a query. + */ + SortOrder sort; + + /** + * The fetch expression as specified in the FETCH, LIMIT, or TOP clause. + */ + Expression fetchExpr; + + /** + * Whether limit expression specifies percentage of rows. + */ + boolean fetchPercent; + + /** + * Whether tied rows should be included in result too. + */ + boolean withTies; + + /** + * The offset expression as specified in the OFFSET clause. + */ + Expression offsetExpr; + + /** + * Whether the result must only contain distinct rows. + */ + boolean distinct; + + /** + * Whether the result needs to support random access. + */ + boolean randomAccessResult; + + /** + * The visible columns (the ones required in the result). + */ + int visibleColumnCount; + + /** + * Number of columns including visible columns and additional virtual + * columns for ORDER BY and DISTINCT ON clauses. This number does not + * include virtual columns for HAVING and QUALIFY. + */ + int resultColumnCount; + + private boolean noCache; + private long lastLimit; + private long lastEvaluated; + private ResultInterface lastResult; + private Boolean lastExists; + private Value[] lastParameters; + private boolean cacheableChecked; + private boolean neverLazy; + + boolean checkInit; + + boolean isPrepared; + + Query(SessionLocal session) { + super(session); + } + + public void setNeverLazy(boolean b) { + this.neverLazy = b; + } + + public boolean isNeverLazy() { + return neverLazy; + } + + /** + * Check if this is a UNION query. + * + * @return {@code true} if this is a UNION query + */ + public abstract boolean isUnion(); + + @Override + public ResultInterface queryMeta() { + LocalResult result = new LocalResult(session, expressionArray, visibleColumnCount, resultColumnCount); + result.done(); + return result; + } + + /** + * Execute the query without checking the cache. If a target is specified, + * the results are written to it, and the method returns null. If no target + * is specified, a new LocalResult is created and returned. + * + * @param limit the limit as specified in the JDBC method call + * @param target the target to write results to + * @return the result + */ + protected abstract ResultInterface queryWithoutCache(long limit, ResultTarget target); + + private ResultInterface queryWithoutCacheLazyCheck(long limit, ResultTarget target) { + boolean disableLazy = neverLazy && session.isLazyQueryExecution(); + if (disableLazy) { + session.setLazyQueryExecution(false); + } + try { + return queryWithoutCache(limit, target); + } finally { + if (disableLazy) { + session.setLazyQueryExecution(true); + } + } + } + + /** + * Initialize the query. + */ + public abstract void init(); + + /** + * The the list of select expressions. + * This may include invisible expressions such as order by expressions. + * + * @return the list of expressions + */ + public ArrayList getExpressions() { + return expressions; + } + + /** + * Calculate the cost to execute this query. + * + * @return the cost + */ + public abstract double getCost(); + + /** + * Calculate the cost when used as a subquery. + * This method returns a value between 10 and 1000000, + * to ensure adding other values can't result in an integer overflow. + * + * @return the estimated cost as an integer + */ + public int getCostAsExpression() { + // ensure the cost is not larger than 1 million, + // so that adding other values can't overflow + return (int) Math.min(1_000_000d, 10d + 10d * getCost()); + } + + /** + * Get all tables that are involved in this query. + * + * @return the set of tables + */ + public abstract HashSet
    getTables(); + + /** + * Set the order by list. + * + * @param order the order by list + */ + public void setOrder(ArrayList order) { + orderList = order; + } + + /** + * Whether the query has an order. + * + * @return true if it has + */ + public boolean hasOrder() { + return orderList != null || sort != null; + } + + /** + * Set the 'for update' flag. + * + * @param forUpdate the new setting + */ + public abstract void setForUpdate(boolean forUpdate); + + /** + * Get the column count of this query. + * + * @return the column count + */ + public int getColumnCount() { + return visibleColumnCount; + } + + /** + * Returns data type of rows. + * + * @return data type of rows + */ + public TypeInfo getRowDataType() { + if (visibleColumnCount == 1) { + return expressionArray[0].getType(); + } + return TypeInfo.getTypeInfo(Value.ROW, -1L, -1, new ExtTypeInfoRow(expressionArray, visibleColumnCount)); + } + + /** + * Map the columns to the given column resolver. + * + * @param resolver + * the resolver + * @param level + * the subquery level (0 is the top level query, 1 is the first + * subquery level) + */ + public abstract void mapColumns(ColumnResolver resolver, int level); + + /** + * Change the evaluatable flag. This is used when building the execution + * plan. + * + * @param tableFilter the table filter + * @param b the new value + */ + public abstract void setEvaluatable(TableFilter tableFilter, boolean b); + + /** + * Add a condition to the query. This is used for views. + * + * @param param the parameter + * @param columnId the column index (0 meaning the first column) + * @param comparisonType the comparison type + */ + public abstract void addGlobalCondition(Parameter param, int columnId, + int comparisonType); + + /** + * Check whether adding condition to the query is allowed. This is not + * allowed for views that have an order by and a limit, as it would affect + * the returned results. + * + * @return true if adding global conditions is allowed + */ + public abstract boolean allowGlobalConditions(); + + /** + * Check if this expression and all sub-expressions can fulfill a criteria. + * If any part returns false, the result is false. + * + * @param visitor the visitor + * @return if the criteria can be fulfilled + */ + public abstract boolean isEverything(ExpressionVisitor visitor); + + @Override + public boolean isReadOnly() { + return isEverything(ExpressionVisitor.READONLY_VISITOR); + } + + /** + * Update all aggregate function values. + * + * @param s the session + * @param stage select stage + */ + public abstract void updateAggregate(SessionLocal s, int stage); + + /** + * Call the before triggers on all tables. + */ + public abstract void fireBeforeSelectTriggers(); + + /** + * Set the distinct flag only if it is possible, may be used as a possible + * optimization only. + */ + public void setDistinctIfPossible() { + if (!isAnyDistinct() && offsetExpr == null && fetchExpr == null) { + distinct = true; + } + } + + /** + * @return whether this query is a plain {@code DISTINCT} query + */ + public boolean isStandardDistinct() { + return distinct; + } + + /** + * @return whether this query is a {@code DISTINCT} or + * {@code DISTINCT ON (...)} query + */ + public boolean isAnyDistinct() { + return distinct; + } + + /** + * Returns whether results support random access. + * + * @return whether results support random access + */ + public boolean isRandomAccessResult() { + return randomAccessResult; + } + + /** + * Whether results need to support random access. + * + * @param b the new value + */ + public void setRandomAccessResult(boolean b) { + randomAccessResult = b; + } + + @Override + public boolean isQuery() { + return true; + } + + @Override + public boolean isTransactional() { + return true; + } + + /** + * Disable caching of result sets. + */ + public void disableCache() { + this.noCache = true; + } + + private boolean sameResultAsLast(Value[] params, Value[] lastParams, long lastEval) { + if (!cacheableChecked) { + long max = getMaxDataModificationId(); + noCache = max == Long.MAX_VALUE; + if (!isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR) || + !isEverything(ExpressionVisitor.INDEPENDENT_VISITOR)) { + noCache = true; + } + cacheableChecked = true; + } + if (noCache) { + return false; + } + for (int i = 0; i < params.length; i++) { + Value a = lastParams[i], b = params[i]; + if (a.getValueType() != b.getValueType() || !session.areEqual(a, b)) { + return false; + } + } + return getMaxDataModificationId() <= lastEval; + } + + private Value[] getParameterValues() { + ArrayList list = getParameters(); + if (list == null) { + return Value.EMPTY_VALUES; + } + int size = list.size(); + Value[] params = new Value[size]; + for (int i = 0; i < size; i++) { + Value v = list.get(i).getParamValue(); + params[i] = v; + } + return params; + } + + @Override + public final ResultInterface query(long maxrows) { + return query(maxrows, null); + } + + /** + * Execute the query, writing the result to the target result. + * + * @param limit the maximum number of rows to return + * @param target the target result (null will return the result) + * @return the result set (if the target is not set). + */ + public final ResultInterface query(long limit, ResultTarget target) { + if (isUnion()) { + // union doesn't always know the parameter list of the left and + // right queries + return queryWithoutCacheLazyCheck(limit, target); + } + fireBeforeSelectTriggers(); + if (noCache || !session.getDatabase().getOptimizeReuseResults() || + (session.isLazyQueryExecution() && !neverLazy)) { + return queryWithoutCacheLazyCheck(limit, target); + } + Value[] params = getParameterValues(); + long now = session.getDatabase().getModificationDataId(); + if (isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + if (lastResult != null && !lastResult.isClosed() && + limit == lastLimit) { + if (sameResultAsLast(params, lastParameters, lastEvaluated)) { + lastResult = lastResult.createShallowCopy(session); + if (lastResult != null) { + lastResult.reset(); + return lastResult; + } + } + } + } + lastParameters = params; + closeLastResult(); + ResultInterface r = queryWithoutCacheLazyCheck(limit, target); + lastResult = r; + lastExists = null; + lastEvaluated = now; + lastLimit = limit; + return r; + } + + private void closeLastResult() { + if (lastResult != null) { + lastResult.close(); + } + } + + /** + * Execute the EXISTS predicate over the query. + * + * @return EXISTS predicate result + */ + public final boolean exists() { + if (isUnion()) { + // union doesn't always know the parameter list of the left and + // right queries + return executeExists(); + } + fireBeforeSelectTriggers(); + if (noCache || !session.getDatabase().getOptimizeReuseResults()) { + return executeExists(); + } + Value[] params = getParameterValues(); + long now = session.getDatabase().getModificationDataId(); + if (isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + if (lastExists != null) { + if (sameResultAsLast(params, lastParameters, lastEvaluated)) { + return lastExists; + } + } + } + lastParameters = params; + boolean exists = executeExists(); + lastExists = exists; + lastResult = null; + lastEvaluated = now; + return exists; + } + + private boolean executeExists() { + ResultInterface r = queryWithoutCacheLazyCheck(1L, null); + boolean exists = r.hasNext(); + r.close(); + return exists; + } + + /** + * Initialize the order by list. This call may extend the expressions list. + * + * @param expressionSQL the select list SQL snippets + * @param mustBeInResult all order by expressions must be in the select list + * @param filters the table filters + * @return {@code true} if ORDER BY clause is preserved, {@code false} + * otherwise + */ + boolean initOrder(ArrayList expressionSQL, boolean mustBeInResult, ArrayList filters) { + for (Iterator i = orderList.iterator(); i.hasNext();) { + QueryOrderBy o = i.next(); + Expression e = o.expression; + if (e == null) { + continue; + } + if (e.isConstant()) { + i.remove(); + continue; + } + int idx = initExpression(expressionSQL, e, mustBeInResult, filters); + o.columnIndexExpr = ValueExpression.get(ValueInteger.get(idx + 1)); + o.expression = expressions.get(idx).getNonAliasExpression(); + } + if (orderList.isEmpty()) { + orderList = null; + return false; + } + return true; + } + + /** + * Initialize the 'ORDER BY' or 'DISTINCT' expressions. + * + * @param expressionSQL the select list SQL snippets + * @param e the expression. + * @param mustBeInResult all order by expressions must be in the select list + * @param filters the table filters. + * @return index on the expression in the {@link #expressions} list. + */ + int initExpression(ArrayList expressionSQL, Expression e, boolean mustBeInResult, + ArrayList filters) { + Database db = session.getDatabase(); + // special case: SELECT 1 AS A FROM DUAL ORDER BY A + // (oracle supports it, but only in order by, not in group by and + // not in having): + // SELECT 1 AS A FROM DUAL ORDER BY -A + if (e instanceof ExpressionColumn) { + // order by expression + ExpressionColumn exprCol = (ExpressionColumn) e; + String tableAlias = exprCol.getOriginalTableAliasName(); + String col = exprCol.getOriginalColumnName(); + for (int j = 0, visible = getColumnCount(); j < visible; j++) { + Expression ec = expressions.get(j); + if (ec instanceof ExpressionColumn) { + // select expression + ExpressionColumn c = (ExpressionColumn) ec; + if (!db.equalsIdentifiers(col, c.getColumnName(session, j))) { + continue; + } + if (tableAlias == null) { + return j; + } + String ca = c.getOriginalTableAliasName(); + if (ca != null) { + if (db.equalsIdentifiers(ca, tableAlias)) { + return j; + } + } else if (filters != null) { + // select id from test order by test.id + for (TableFilter f : filters) { + if (db.equalsIdentifiers(f.getTableAlias(), tableAlias)) { + return j; + } + } + } + } else if (ec instanceof Alias) { + if (tableAlias == null && db.equalsIdentifiers(col, ec.getAlias(session, j))) { + return j; + } + Expression ec2 = ec.getNonAliasExpression(); + if (ec2 instanceof ExpressionColumn) { + ExpressionColumn c2 = (ExpressionColumn) ec2; + String ta = exprCol.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES); + String tb = c2.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES); + String s2 = c2.getColumnName(session, j); + if (db.equalsIdentifiers(col, s2) && db.equalsIdentifiers(ta, tb)) { + return j; + } + } + } + } + } else if (expressionSQL != null) { + String s = e.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES); + for (int j = 0, size = expressionSQL.size(); j < size; j++) { + if (db.equalsIdentifiers(expressionSQL.get(j), s)) { + return j; + } + } + } + if (expressionSQL == null + || mustBeInResult && !db.getMode().allowUnrelatedOrderByExpressionsInDistinctQueries + && !checkOrderOther(session, e, expressionSQL)) { + throw DbException.get(ErrorCode.ORDER_BY_NOT_IN_RESULT, e.getTraceSQL()); + } + int idx = expressions.size(); + expressions.add(e); + expressionSQL.add(e.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES)); + return idx; + } + + /** + * An additional check for expression in ORDER BY list for DISTINCT selects + * that was not matched with selected expressions in regular way. This + * method allows expressions based only on selected expressions in different + * complicated ways with functions, comparisons, or operators. + * + * @param session session + * @param expr expression to check + * @param expressionSQL SQL of allowed expressions + * @return whether the specified expression should be allowed in ORDER BY + * list of DISTINCT select + */ + private static boolean checkOrderOther(SessionLocal session, Expression expr, ArrayList expressionSQL) { + if (expr == null || expr.isConstant()) { + // ValueExpression, null expression in CASE, or other + return true; + } + String exprSQL = expr.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES); + for (String sql: expressionSQL) { + if (session.getDatabase().equalsIdentifiers(exprSQL, sql)) { + return true; + } + } + int count = expr.getSubexpressionCount(); + if (!expr.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + return false; + } else if (count <= 0) { + // Expression is an ExpressionColumn, Parameter, SequenceValue or + // has other unsupported type without subexpressions + return false; + } + for (int i = 0; i < count; i++) { + if (!checkOrderOther(session, expr.getSubexpression(i), expressionSQL)) { + return false; + } + } + return true; + } + + /** + * Create a {@link SortOrder} object given the list of {@link QueryOrderBy} + * objects. + * + * @param orderList a list of {@link QueryOrderBy} elements + * @param expressionCount the number of columns in the query + */ + void prepareOrder(ArrayList orderList, int expressionCount) { + int size = orderList.size(); + int[] index = new int[size]; + int[] sortType = new int[size]; + for (int i = 0; i < size; i++) { + QueryOrderBy o = orderList.get(i); + int idx; + boolean reverse = false; + Value v = o.columnIndexExpr.getValue(null); + if (v == ValueNull.INSTANCE) { + // parameter not yet set - order by first column + idx = 0; + } else { + idx = v.getInt(); + if (idx < 0) { + reverse = true; + idx = -idx; + } + idx -= 1; + if (idx < 0 || idx >= expressionCount) { + throw DbException.get(ErrorCode.ORDER_BY_NOT_IN_RESULT, Integer.toString(idx + 1)); + } + } + index[i] = idx; + int type = o.sortType; + if (reverse) { + // TODO NULLS FIRST / LAST should be inverted too? + type ^= SortOrder.DESCENDING; + } + sortType[i] = type; + } + sort = new SortOrder(session, index, sortType, orderList); + this.orderList = null; + } + + /** + * Removes constant expressions from the sort order. + * + * Some constants are detected only after optimization of expressions, this + * method removes them from the sort order only. They are currently + * preserved in the list of expressions. + */ + void cleanupOrder() { + int sourceIndexes[] = sort.getQueryColumnIndexes(); + int count = sourceIndexes.length; + int constants = 0; + for (int i = 0; i < count; i++) { + if (expressions.get(sourceIndexes[i]).isConstant()) { + constants++; + } + } + if (constants == 0) { + return; + } + if (constants == count) { + sort = null; + return; + } + int size = count - constants; + int[] indexes = new int[size]; + int[] sortTypes = new int[size]; + int[] sourceSortTypes = sort.getSortTypes(); + ArrayList orderList = sort.getOrderList(); + for (int i = 0, j = 0; j < size; i++) { + if (!expressions.get(sourceIndexes[i]).isConstant()) { + indexes[j] = sourceIndexes[i]; + sortTypes[j] = sourceSortTypes[i]; + j++; + } else { + orderList.remove(j); + } + } + sort = new SortOrder(session, indexes, sortTypes, orderList); + } + + @Override + public int getType() { + return CommandInterface.SELECT; + } + + public void setOffset(Expression offset) { + this.offsetExpr = offset; + } + + public Expression getOffset() { + return offsetExpr; + } + + public void setFetch(Expression fetch) { + this.fetchExpr = fetch; + } + + public Expression getFetch() { + return fetchExpr; + } + + public void setFetchPercent(boolean fetchPercent) { + this.fetchPercent = fetchPercent; + } + + public boolean isFetchPercent() { + return fetchPercent; + } + + public void setWithTies(boolean withTies) { + this.withTies = withTies; + } + + public boolean isWithTies() { + return withTies; + } + + /** + * Add a parameter to the parameter list. + * + * @param param the parameter to add + */ + void addParameter(Parameter param) { + if (parameters == null) { + parameters = Utils.newSmallArrayList(); + } + parameters.add(param); + } + + public final long getMaxDataModificationId() { + ExpressionVisitor visitor = ExpressionVisitor.getMaxModificationIdVisitor(); + isEverything(visitor); + return Math.max(visitor.getMaxDataModificationId(), session.getSnapshotDataModificationId()); + } + + /** + * Appends ORDER BY, OFFSET, and FETCH clauses to the plan. + * + * @param builder query plan string builder. + * @param sqlFlags formatting flags + * @param expressions the array of expressions + */ + void appendEndOfQueryToSQL(StringBuilder builder, int sqlFlags, Expression[] expressions) { + if (sort != null) { + sort.getSQL(builder.append("\nORDER BY "), expressions, visibleColumnCount, sqlFlags); + } else if (orderList != null) { + builder.append("\nORDER BY "); + for (int i = 0, l = orderList.size(); i < l; i++) { + if (i > 0) { + builder.append(", "); + } + orderList.get(i).getSQL(builder, sqlFlags); + } + } + if (offsetExpr != null) { + String count = offsetExpr.getSQL(sqlFlags, WITHOUT_PARENTHESES); + builder.append("\nOFFSET ").append(count).append("1".equals(count) ? " ROW" : " ROWS"); + } + if (fetchExpr != null) { + builder.append("\nFETCH ").append(offsetExpr != null ? "NEXT" : "FIRST"); + String count = fetchExpr.getSQL(sqlFlags, WITHOUT_PARENTHESES); + boolean withCount = fetchPercent || !"1".equals(count); + if (withCount) { + builder.append(' ').append(count); + if (fetchPercent) { + builder.append(" PERCENT"); + } + } + builder.append(!withCount ? " ROW" : " ROWS") + .append(withTies ? " WITH TIES" : " ONLY"); + } + } + + /** + * Evaluates OFFSET and FETCH expressions. + * + * @param maxRows + * additional limit + * @return the evaluated values + */ + OffsetFetch getOffsetFetch(long maxRows) { + long offset; + if (offsetExpr != null) { + Value v = offsetExpr.getValue(session); + if (v == ValueNull.INSTANCE || (offset = v.getLong()) < 0) { + throw DbException.getInvalidValueException("result OFFSET", v); + } + } else { + offset = 0; + } + long fetch = maxRows == 0 ? -1 : maxRows; + if (fetchExpr != null) { + Value v = fetchExpr.getValue(session); + long l; + if (v == ValueNull.INSTANCE || (l = v.getLong()) < 0) { + throw DbException.getInvalidValueException("result FETCH", v); + } + fetch = fetch < 0 ? l : Math.min(l, fetch); + } + boolean fetchPercent = this.fetchPercent; + if (fetchPercent) { + if (fetch > 100) { + throw DbException.getInvalidValueException("result FETCH PERCENT", fetch); + } + // 0 PERCENT means 0 + if (fetch == 0) { + fetchPercent = false; + } + } + return new OffsetFetch(offset, fetch, fetchPercent); + } + + /** + * Applies limits, if any, to a result and makes it ready for value + * retrieval. + * + * @param result + * the result + * @param offset + * OFFSET value + * @param fetch + * FETCH value + * @param fetchPercent + * whether FETCH value is a PERCENT value + * @param target + * target result or null + * @return the result or null + */ + LocalResult finishResult(LocalResult result, long offset, long fetch, boolean fetchPercent, ResultTarget target) { + if (offset != 0) { + result.setOffset(offset); + } + if (fetch >= 0) { + result.setLimit(fetch); + result.setFetchPercent(fetchPercent); + if (withTies) { + result.setWithTies(sort); + } + } + result.done(); + if (randomAccessResult && !distinct) { + result = convertToDistinct(result); + } + if (target != null) { + while (result.next()) { + target.addRow(result.currentRow()); + } + result.close(); + return null; + } + return result; + } + + /** + * Convert a result into a distinct result, using the current columns. + * + * @param result the source + * @return the distinct result + */ + LocalResult convertToDistinct(ResultInterface result) { + LocalResult distinctResult = new LocalResult(session, expressionArray, visibleColumnCount, resultColumnCount); + distinctResult.setDistinct(); + result.reset(); + while (result.next()) { + distinctResult.addRow(result.currentRow()); + } + result.close(); + distinctResult.done(); + return distinctResult; + } + + /** + * Converts this query to a table or a view. + * + * @param alias alias name for the view + * @param columnTemplates column templates, or {@code null} + * @param parameters the parameters + * @param forCreateView if true, a system session will be used for the view + * @param topQuery the top level query + * @return the table or the view + */ + public Table toTable(String alias, Column[] columnTemplates, ArrayList parameters, + boolean forCreateView, Query topQuery) { + setParameterList(new ArrayList<>(parameters)); + if (!checkInit) { + init(); + } + return TableView.createTempView(forCreateView ? session.getDatabase().getSystemSession() : session, + session.getUser(), alias, columnTemplates, this, topQuery); + } + + @Override + public void collectDependencies(HashSet dependencies) { + ExpressionVisitor visitor = ExpressionVisitor.getDependenciesVisitor(dependencies); + isEverything(visitor); + } + + /** + * Check if this query will always return the same value and has no side + * effects. + * + * @return if this query will always return the same value and has no side + * effects. + */ + public boolean isConstantQuery() { + return !hasOrder() && (offsetExpr == null || offsetExpr.isConstant()) + && (fetchExpr == null || fetchExpr.isConstant()); + } + + /** + * If this query is determined as a single-row query, returns a replacement + * expression. + * + * @return the expression, or {@code null} + */ + public Expression getIfSingleRow() { + return null; + } + +} diff --git a/h2/src/main/org/h2/command/query/QueryOrderBy.java b/h2/src/main/org/h2/command/query/QueryOrderBy.java new file mode 100644 index 0000000000..8606f30a69 --- /dev/null +++ b/h2/src/main/org/h2/command/query/QueryOrderBy.java @@ -0,0 +1,44 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.query; + +import org.h2.expression.Expression; +import org.h2.result.SortOrder; + +/** + * Describes one element of the ORDER BY clause of a query. + */ +public class QueryOrderBy { + + /** + * The order by expression. + */ + public Expression expression; + + /** + * The column index expression. This can be a column index number (1 meaning + * the first column of the select list) or a parameter (the parameter is a + * number representing the column index number). + */ + public Expression columnIndexExpr; + + /** + * Sort type for this column. + */ + public int sortType; + + /** + * Appends the order by expression to the specified builder. + * + * @param builder the string builder + * @param sqlFlags formatting flags + */ + public void getSQL(StringBuilder builder, int sqlFlags) { + (expression != null ? expression : columnIndexExpr).getUnenclosedSQL(builder, sqlFlags); + SortOrder.typeToString(builder, sortType); + } + +} diff --git a/h2/src/main/org/h2/command/query/Select.java b/h2/src/main/org/h2/command/query/Select.java new file mode 100644 index 0000000000..5b1b730dd1 --- /dev/null +++ b/h2/src/main/org/h2/command/query/Select.java @@ -0,0 +1,1927 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.query; + +import static org.h2.expression.Expression.WITHOUT_PARENTHESES; +import static org.h2.util.HasSQL.ADD_PLAN_INFORMATION; +import static org.h2.util.HasSQL.DEFAULT_SQL_FLAGS; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.Map.Entry; +import org.h2.api.ErrorCode; +import org.h2.api.Trigger; +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.Mode.ExpressionNames; +import org.h2.engine.SessionLocal; +import org.h2.expression.Alias; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionList; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.expression.Wildcard; +import org.h2.expression.analysis.DataAnalysisOperation; +import org.h2.expression.analysis.Window; +import org.h2.expression.condition.Comparison; +import org.h2.expression.condition.ConditionAndOr; +import org.h2.expression.condition.ConditionLocalAndGlobal; +import org.h2.expression.function.CoalesceFunction; +import org.h2.index.Cursor; +import org.h2.index.Index; +import org.h2.index.ViewIndex; +import org.h2.message.DbException; +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.LazyResult; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.result.SortOrder; +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.table.IndexColumn; +import org.h2.table.Table; +import org.h2.table.TableFilter; +import org.h2.table.TableType; +import org.h2.table.TableView; +import org.h2.util.ParserUtil; +import org.h2.util.StringUtils; +import org.h2.util.Utils; +import org.h2.value.DataType; +import org.h2.value.Value; +import org.h2.value.ValueRow; + +/** + * This class represents a simple SELECT statement. + * + * For each select statement, + * visibleColumnCount <= distinctColumnCount <= expressionCount. + * The expression list count could include ORDER BY and GROUP BY expressions + * that are not in the select list. + * + * The call sequence is init(), mapColumns() if it's a subquery, prepare(). + * + * @author Thomas Mueller + * @author Joel Turkel (Group sorted query) + */ +public class Select extends Query { + + /** + * The main (top) table filter. + */ + TableFilter topTableFilter; + + private final ArrayList filters = Utils.newSmallArrayList(); + private final ArrayList topFilters = Utils.newSmallArrayList(); + + /** + * Parent select for selects in table filters. + */ + private Select parentSelect; + + /** + * WHERE condition. + */ + private Expression condition; + + /** + * HAVING condition. + */ + private Expression having; + + /** + * QUALIFY condition. + */ + private Expression qualify; + + /** + * {@code DISTINCT ON(...)} expressions. + */ + private Expression[] distinctExpressions; + + private int[] distinctIndexes; + + private ArrayList group; + + /** + * The indexes of the group-by columns. + */ + int[] groupIndex; + + /** + * Whether a column in the expression list is part of a group-by. + */ + boolean[] groupByExpression; + + /** + * Grouped data for aggregates. + */ + SelectGroups groupData; + + private int havingIndex; + + private int qualifyIndex; + + private int[] groupByCopies; + + /** + * Whether this SELECT is an explicit table (TABLE tableName). It is used in + * {@link #getPlanSQL(int)} to generate SQL similar to original query. + */ + private boolean isExplicitTable; + + /** + * This flag is set when SELECT statement contains (non-window) aggregate + * functions, GROUP BY clause or HAVING clause. + */ + boolean isGroupQuery; + private boolean isGroupSortedQuery; + private boolean isWindowQuery; + private boolean isForUpdate; + private double cost; + private boolean isQuickAggregateQuery, isDistinctQuery; + private boolean sortUsingIndex; + + private boolean isGroupWindowStage2; + + private HashMap windows; + + public Select(SessionLocal session, Select parentSelect) { + super(session); + this.parentSelect = parentSelect; + } + + @Override + public boolean isUnion() { + return false; + } + + /** + * Add a table to the query. + * + * @param filter the table to add + * @param isTop if the table can be the first table in the query plan + */ + public void addTableFilter(TableFilter filter, boolean isTop) { + // Oracle doesn't check on duplicate aliases + // String alias = filter.getAlias(); + // if (filterNames.contains(alias)) { + // throw Message.getSQLException( + // ErrorCode.DUPLICATE_TABLE_ALIAS, alias); + // } + // filterNames.add(alias); + filters.add(filter); + if (isTop) { + topFilters.add(filter); + } + } + + public ArrayList getTopFilters() { + return topFilters; + } + + public void setExpressions(ArrayList expressions) { + this.expressions = expressions; + } + + /** + * Convert this SELECT to an explicit table (TABLE tableName). + */ + public void setExplicitTable() { + setWildcard(); + isExplicitTable = true; + } + + /** + * Sets a wildcard expression as in "SELECT * FROM TEST". + */ + public void setWildcard() { + expressions = new ArrayList<>(1); + expressions.add(new Wildcard(null, null)); + } + + /** + * Set when SELECT statement contains (non-window) aggregate functions, + * GROUP BY clause or HAVING clause. + */ + public void setGroupQuery() { + isGroupQuery = true; + } + + /** + * Called if this query contains window functions. + */ + public void setWindowQuery() { + isWindowQuery = true; + } + + public void setGroupBy(ArrayList group) { + this.group = group; + } + + public ArrayList getGroupBy() { + return group; + } + + /** + * Get the group data if there is currently a group-by active. + * + * @param window is this a window function + * @return the grouped data + */ + public SelectGroups getGroupDataIfCurrent(boolean window) { + return groupData != null && (window || groupData.isCurrentGroup()) ? groupData : null; + } + + /** + * Set the distinct flag. + */ + public void setDistinct() { + if (distinctExpressions != null) { + throw DbException.getUnsupportedException("DISTINCT ON together with DISTINCT"); + } + distinct = true; + } + + /** + * Set the DISTINCT ON expressions. + * + * @param distinctExpressions array of expressions + */ + public void setDistinct(Expression[] distinctExpressions) { + if (distinct) { + throw DbException.getUnsupportedException("DISTINCT ON together with DISTINCT"); + } + this.distinctExpressions = distinctExpressions; + } + + @Override + public boolean isAnyDistinct() { + return distinct || distinctExpressions != null; + } + + /** + * Adds a named window definition. + * + * @param name name + * @param window window definition + * @return true if a new definition was added, false if old definition was replaced + */ + public boolean addWindow(String name, Window window) { + if (windows == null) { + windows = new HashMap<>(); + } + return windows.put(name, window) == null; + } + + /** + * Returns a window with specified name, or null. + * + * @param name name of the window + * @return the window with specified name, or null + */ + public Window getWindow(String name) { + return windows != null ? windows.get(name) : null; + } + + /** + * Add a condition to the list of conditions. + * + * @param cond the condition to add + */ + public void addCondition(Expression cond) { + if (condition == null) { + condition = cond; + } else { + condition = new ConditionAndOr(ConditionAndOr.AND, cond, condition); + } + } + + public Expression getCondition() { + return condition; + } + + private LazyResult queryGroupSorted(int columnCount, ResultTarget result, long offset, boolean quickOffset) { + LazyResultGroupSorted lazyResult = new LazyResultGroupSorted(expressionArray, columnCount); + skipOffset(lazyResult, offset, quickOffset); + if (result == null) { + return lazyResult; + } + while (lazyResult.next()) { + result.addRow(lazyResult.currentRow()); + } + return null; + } + + /** + * Create a row with the current values, for queries with group-sort. + * + * @param keyValues the key values + * @param columnCount the number of columns + * @return the row + */ + Value[] createGroupSortedRow(Value[] keyValues, int columnCount) { + Value[] row = constructGroupResultRow(keyValues, columnCount); + if (isHavingNullOrFalse(row)) { + return null; + } + return rowForResult(row, columnCount); + } + + /** + * Removes HAVING and QUALIFY columns from the row. + * + * @param row + * the complete row + * @param columnCount + * the number of columns to keep + * @return the same or the truncated row + */ + private Value[] rowForResult(Value[] row, int columnCount) { + if (columnCount == resultColumnCount) { + return row; + } + return Arrays.copyOf(row, resultColumnCount); + } + + private boolean isHavingNullOrFalse(Value[] row) { + return havingIndex >= 0 && !row[havingIndex].isTrue(); + } + + private Index getGroupSortedIndex() { + if (groupIndex == null || groupByExpression == null) { + return null; + } + ArrayList indexes = topTableFilter.getTable().getIndexes(); + if (indexes != null) { + for (Index index : indexes) { + if (index.getIndexType().isScan()) { + continue; + } + if (index.getIndexType().isHash()) { + // does not allow scanning entries + continue; + } + if (isGroupSortedIndex(topTableFilter, index)) { + return index; + } + } + } + return null; + } + + private boolean isGroupSortedIndex(TableFilter tableFilter, Index index) { + // check that all the GROUP BY expressions are part of the index + Column[] indexColumns = index.getColumns(); + // also check that the first columns in the index are grouped + boolean[] grouped = new boolean[indexColumns.length]; + outerLoop: + for (int i = 0, size = expressions.size(); i < size; i++) { + if (!groupByExpression[i]) { + continue; + } + Expression expr = expressions.get(i).getNonAliasExpression(); + if (!(expr instanceof ExpressionColumn)) { + return false; + } + ExpressionColumn exprCol = (ExpressionColumn) expr; + for (int j = 0; j < indexColumns.length; ++j) { + if (tableFilter == exprCol.getTableFilter()) { + if (indexColumns[j].equals(exprCol.getColumn())) { + grouped[j] = true; + continue outerLoop; + } + } + } + // We didn't find a matching index column + // for one group by expression + return false; + } + // check that the first columns in the index are grouped + // good: index(a, b, c); group by b, a + // bad: index(a, b, c); group by a, c + for (int i = 1; i < grouped.length; i++) { + if (!grouped[i - 1] && grouped[i]) { + return false; + } + } + return true; + } + + boolean isConditionMetForUpdate() { + if (isConditionMet()) { + int count = filters.size(); + boolean notChanged = true; + for (int i = 0; i < count; i++) { + TableFilter tableFilter = filters.get(i); + if (!tableFilter.isJoinOuter() && !tableFilter.isJoinOuterIndirect()) { + Row row = tableFilter.get(); + Table table = tableFilter.getTable(); + // Views, function tables, links, etc. do not support locks + if (table.isRowLockable()) { + Row lockedRow = table.lockRow(session, row); + if (lockedRow == null) { + return false; + } + if (!row.hasSharedData(lockedRow)) { + tableFilter.set(lockedRow); + notChanged = false; + } + } + } + } + return notChanged || isConditionMet(); + } + return false; + } + + boolean isConditionMet() { + return condition == null || condition.getBooleanValue(session); + } + + private void queryWindow(int columnCount, LocalResult result, long offset, boolean quickOffset) { + initGroupData(columnCount); + try { + gatherGroup(columnCount, DataAnalysisOperation.STAGE_WINDOW); + processGroupResult(columnCount, result, offset, quickOffset, false); + } finally { + groupData.reset(); + } + } + + private void queryGroupWindow(int columnCount, LocalResult result, long offset, boolean quickOffset) { + initGroupData(columnCount); + try { + gatherGroup(columnCount, DataAnalysisOperation.STAGE_GROUP); + try { + isGroupWindowStage2 = true; + while (groupData.next() != null) { + if (havingIndex < 0 || expressions.get(havingIndex).getBooleanValue(session)) { + updateAgg(columnCount, DataAnalysisOperation.STAGE_WINDOW); + } else { + groupData.remove(); + } + } + groupData.done(); + processGroupResult(columnCount, result, offset, quickOffset, /* Having was performed earlier */ false); + } finally { + isGroupWindowStage2 = false; + } + } finally { + groupData.reset(); + } + } + + private void queryGroup(int columnCount, LocalResult result, long offset, boolean quickOffset) { + initGroupData(columnCount); + try { + gatherGroup(columnCount, DataAnalysisOperation.STAGE_GROUP); + processGroupResult(columnCount, result, offset, quickOffset, true); + } finally { + groupData.reset(); + } + } + + private void initGroupData(int columnCount) { + if (groupData == null) { + setGroupData(SelectGroups.getInstance(session, expressions, isGroupQuery, groupIndex)); + } else { + updateAgg(columnCount, DataAnalysisOperation.STAGE_RESET); + } + groupData.reset(); + } + + void setGroupData(final SelectGroups groupData) { + this.groupData = groupData; + topTableFilter.visit(f -> { + Select s = f.getSelect(); + if (s != null) { + s.groupData = groupData; + } + }); + } + + private void gatherGroup(int columnCount, int stage) { + long rowNumber = 0; + setCurrentRowNumber(0); + while (topTableFilter.next()) { + setCurrentRowNumber(rowNumber + 1); + if (isForUpdate ? isConditionMetForUpdate() : isConditionMet()) { + rowNumber++; + groupData.nextSource(); + updateAgg(columnCount, stage); + } + } + groupData.done(); + } + + + /** + * Update any aggregate expressions with the query stage. + * @param columnCount number of columns + * @param stage see STAGE_RESET/STAGE_GROUP/STAGE_WINDOW in DataAnalysisOperation + */ + void updateAgg(int columnCount, int stage) { + for (int i = 0; i < columnCount; i++) { + if ((groupByExpression == null || !groupByExpression[i]) + && (groupByCopies == null || groupByCopies[i] < 0)) { + Expression expr = expressions.get(i); + expr.updateAggregate(session, stage); + } + } + } + + private void processGroupResult(int columnCount, LocalResult result, long offset, boolean quickOffset, + boolean withHaving) { + for (ValueRow currentGroupsKey; (currentGroupsKey = groupData.next()) != null;) { + Value[] row = constructGroupResultRow(currentGroupsKey.getList(), columnCount); + if (withHaving && isHavingNullOrFalse(row)) { + continue; + } + if (qualifyIndex >= 0 && !row[qualifyIndex].isTrue()) { + continue; + } + if (quickOffset && offset > 0) { + offset--; + continue; + } + result.addRow(rowForResult(row, columnCount)); + } + } + + private Value[] constructGroupResultRow(Value[] keyValues, int columnCount) { + Value[] row = new Value[columnCount]; + if (groupIndex != null) { + for (int i = 0, l = groupIndex.length; i < l; i++) { + row[groupIndex[i]] = keyValues[i]; + } + } + for (int i = 0; i < columnCount; i++) { + if (groupByExpression != null && groupByExpression[i]) { + continue; + } + if (groupByCopies != null) { + int original = groupByCopies[i]; + if (original >= 0) { + row[i] = row[original]; + continue; + } + } + row[i] = expressions.get(i).getValue(session); + } + return row; + } + + /** + * Get the index that matches the ORDER BY list, if one exists. This is to + * avoid running a separate ORDER BY if an index can be used. This is + * specially important for large result sets, if only the first few rows are + * important (LIMIT is used) + * + * @return the index if one is found + */ + private Index getSortIndex() { + if (sort == null) { + return null; + } + ArrayList sortColumns = Utils.newSmallArrayList(); + int[] queryColumnIndexes = sort.getQueryColumnIndexes(); + int queryIndexesLength = queryColumnIndexes.length; + int[] sortIndex = new int[queryIndexesLength]; + for (int i = 0, j = 0; i < queryIndexesLength; i++) { + int idx = queryColumnIndexes[i]; + if (idx < 0 || idx >= expressions.size()) { + throw DbException.getInvalidValueException("ORDER BY", idx + 1); + } + Expression expr = expressions.get(idx); + expr = expr.getNonAliasExpression(); + if (expr.isConstant()) { + continue; + } + if (!(expr instanceof ExpressionColumn)) { + return null; + } + ExpressionColumn exprCol = (ExpressionColumn) expr; + if (exprCol.getTableFilter() != topTableFilter) { + return null; + } + sortColumns.add(exprCol.getColumn()); + sortIndex[j++] = i; + } + Column[] sortCols = sortColumns.toArray(new Column[0]); + if (sortCols.length == 0) { + // sort just on constants - can use scan index + return topTableFilter.getTable().getScanIndex(session); + } + ArrayList list = topTableFilter.getTable().getIndexes(); + if (list != null) { + int[] sortTypes = sort.getSortTypesWithNullOrdering(); + DefaultNullOrdering defaultNullOrdering = session.getDatabase().getDefaultNullOrdering(); + loop: for (Index index : list) { + if (index.getCreateSQL() == null) { + // can't use the scan index + continue; + } + if (index.getIndexType().isHash()) { + continue; + } + IndexColumn[] indexCols = index.getIndexColumns(); + if (indexCols.length < sortCols.length) { + continue; + } + for (int j = 0; j < sortCols.length; j++) { + // the index and the sort order must start + // with the exact same columns + IndexColumn idxCol = indexCols[j]; + Column sortCol = sortCols[j]; + if (idxCol.column != sortCol) { + continue loop; + } + int sortType = sortTypes[sortIndex[j]]; + if (sortCol.isNullable() + ? defaultNullOrdering.addExplicitNullOrdering(idxCol.sortType) != sortType + : (idxCol.sortType & SortOrder.DESCENDING) != (sortType & SortOrder.DESCENDING)) { + continue loop; + } + } + return index; + } + } + if (sortCols.length == 1 && sortCols[0].getColumnId() == -1) { + // special case: order by _ROWID_ + Index index = topTableFilter.getTable().getScanIndex(session); + if (index.isRowIdIndex()) { + return index; + } + } + return null; + } + + private void queryDistinct(ResultTarget result, long offset, long limitRows, boolean withTies, + boolean quickOffset) { + if (limitRows > 0 && offset > 0) { + limitRows += offset; + if (limitRows < 0) { + // Overflow + limitRows = Long.MAX_VALUE; + } + } + long rowNumber = 0; + setCurrentRowNumber(0); + Index index = topTableFilter.getIndex(); + SearchRow first = null; + int columnIndex = index.getColumns()[0].getColumnId(); + if (!quickOffset) { + offset = 0; + } + while (true) { + setCurrentRowNumber(++rowNumber); + Cursor cursor = index.findNext(session, first, null); + if (!cursor.next()) { + break; + } + SearchRow found = cursor.getSearchRow(); + Value value = found.getValue(columnIndex); + if (first == null) { + first = index.getRowFactory().createRow(); + } + first.setValue(columnIndex, value); + if (offset > 0) { + offset--; + continue; + } + result.addRow(value); + if ((sort == null || sortUsingIndex) && limitRows > 0 && rowNumber >= limitRows && !withTies) { + break; + } + } + } + + private LazyResult queryFlat(int columnCount, ResultTarget result, long offset, long limitRows, boolean withTies, + boolean quickOffset) { + if (limitRows > 0 && offset > 0 && !quickOffset) { + limitRows += offset; + if (limitRows < 0) { + // Overflow + limitRows = Long.MAX_VALUE; + } + } + LazyResultQueryFlat lazyResult = new LazyResultQueryFlat(expressionArray, columnCount, isForUpdate); + skipOffset(lazyResult, offset, quickOffset); + if (result == null) { + return lazyResult; + } + if (limitRows < 0 || sort != null && !sortUsingIndex || withTies && !quickOffset) { + limitRows = Long.MAX_VALUE; + } + Value[] row = null; + while (result.getRowCount() < limitRows && lazyResult.next()) { + row = lazyResult.currentRow(); + result.addRow(row); + } + if (limitRows != Long.MAX_VALUE && withTies && sort != null && row != null) { + Value[] expected = row; + while (lazyResult.next()) { + row = lazyResult.currentRow(); + if (sort.compare(expected, row) != 0) { + break; + } + result.addRow(row); + } + result.limitsWereApplied(); + } + return null; + } + + private static void skipOffset(LazyResultSelect lazyResult, long offset, boolean quickOffset) { + if (quickOffset) { + while (offset > 0 && lazyResult.skip()) { + offset--; + } + } + } + + private void queryQuick(int columnCount, ResultTarget result, boolean skipResult) { + Value[] row = new Value[columnCount]; + for (int i = 0; i < columnCount; i++) { + Expression expr = expressions.get(i); + row[i] = expr.getValue(session); + } + if (!skipResult) { + result.addRow(row); + } + } + + @Override + protected ResultInterface queryWithoutCache(long maxRows, ResultTarget target) { + disableLazyForJoinSubqueries(topTableFilter); + OffsetFetch offsetFetch = getOffsetFetch(maxRows); + long offset = offsetFetch.offset; + long fetch = offsetFetch.fetch; + boolean fetchPercent = offsetFetch.fetchPercent; + boolean lazy = session.isLazyQueryExecution() && + target == null && !isForUpdate && !isQuickAggregateQuery && + fetch != 0 && !fetchPercent && !withTies && offset == 0 && isReadOnly(); + int columnCount = expressions.size(); + LocalResult result = null; + if (!lazy && (target == null || + !session.getDatabase().getSettings().optimizeInsertFromSelect)) { + result = createLocalResult(result); + } + // Do not add rows before OFFSET to result if possible + boolean quickOffset = !fetchPercent; + if (sort != null && (!sortUsingIndex || isAnyDistinct())) { + result = createLocalResult(result); + result.setSortOrder(sort); + if (!sortUsingIndex) { + quickOffset = false; + } + } + if (distinct) { + if (!isDistinctQuery) { + quickOffset = false; + result = createLocalResult(result); + result.setDistinct(); + } + } else if (distinctExpressions != null) { + quickOffset = false; + result = createLocalResult(result); + result.setDistinct(distinctIndexes); + } + if (isWindowQuery || isGroupQuery && !isGroupSortedQuery) { + result = createLocalResult(result); + } + if (!lazy && (fetch >= 0 || offset > 0)) { + result = createLocalResult(result); + } + topTableFilter.startQuery(session); + topTableFilter.reset(); + topTableFilter.lock(session); + ResultTarget to = result != null ? result : target; + lazy &= to == null; + LazyResult lazyResult = null; + if (fetch != 0) { + // Cannot apply limit now if percent is specified + long limit = fetchPercent ? -1 : fetch; + if (isQuickAggregateQuery) { + queryQuick(columnCount, to, quickOffset && offset > 0); + } else if (isWindowQuery) { + if (isGroupQuery) { + queryGroupWindow(columnCount, result, offset, quickOffset); + } else { + queryWindow(columnCount, result, offset, quickOffset); + } + } else if (isGroupQuery) { + if (isGroupSortedQuery) { + lazyResult = queryGroupSorted(columnCount, to, offset, quickOffset); + } else { + queryGroup(columnCount, result, offset, quickOffset); + } + } else if (isDistinctQuery) { + queryDistinct(to, offset, limit, withTies, quickOffset); + } else { + lazyResult = queryFlat(columnCount, to, offset, limit, withTies, quickOffset); + } + if (quickOffset) { + offset = 0; + } + } + assert lazy == (lazyResult != null) : lazy; + if (lazyResult != null) { + if (fetch > 0) { + lazyResult.setLimit(fetch); + } + if (randomAccessResult) { + return convertToDistinct(lazyResult); + } else { + return lazyResult; + } + } + if (result != null) { + return finishResult(result, offset, fetch, fetchPercent, target); + } + return null; + } + + private void disableLazyForJoinSubqueries(final TableFilter top) { + if (session.isLazyQueryExecution()) { + top.visit(f -> { + if (f != top && f.getTable().getTableType() == TableType.VIEW) { + ViewIndex idx = (ViewIndex) f.getIndex(); + if (idx != null && idx.getQuery() != null) { + idx.getQuery().setNeverLazy(true); + } + } + }); + } + } + + private LocalResult createLocalResult(LocalResult old) { + return old != null ? old : new LocalResult(session, expressionArray, visibleColumnCount, resultColumnCount); + } + + private void expandColumnList() { + // the expressions may change within the loop + for (int i = 0; i < expressions.size();) { + Expression expr = expressions.get(i); + if (!(expr instanceof Wildcard)) { + i++; + continue; + } + expressions.remove(i); + Wildcard w = (Wildcard) expr; + String tableAlias = w.getTableAlias(); + boolean hasExceptColumns = w.getExceptColumns() != null; + HashMap exceptTableColumns = null; + if (tableAlias == null) { + if (hasExceptColumns) { + for (TableFilter filter : filters) { + w.mapColumns(filter, 1, Expression.MAP_INITIAL); + } + exceptTableColumns = w.mapExceptColumns(); + } + for (TableFilter filter : filters) { + i = expandColumnList(filter, i, false, exceptTableColumns); + } + } else { + Database db = session.getDatabase(); + String schemaName = w.getSchemaName(); + TableFilter filter = null; + for (TableFilter f : filters) { + if (db.equalsIdentifiers(tableAlias, f.getTableAlias())) { + if (schemaName == null || db.equalsIdentifiers(schemaName, f.getSchemaName())) { + if (hasExceptColumns) { + w.mapColumns(f, 1, Expression.MAP_INITIAL); + exceptTableColumns = w.mapExceptColumns(); + } + filter = f; + break; + } + } + } + if (filter == null) { + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableAlias); + } + i = expandColumnList(filter, i, true, exceptTableColumns); + } + } + } + + private int expandColumnList(TableFilter filter, int index, boolean forAlias, + HashMap except) { + String schema = filter.getSchemaName(); + String alias = filter.getTableAlias(); + if (forAlias) { + for (Column c : filter.getTable().getColumns()) { + index = addExpandedColumn(filter, index, except, schema, alias, c); + } + } else { + LinkedHashMap commonJoinColumns = filter.getCommonJoinColumns(); + if (commonJoinColumns != null) { + TableFilter replacementFilter = filter.getCommonJoinColumnsFilter(); + String replacementSchema = replacementFilter.getSchemaName(); + String replacementAlias = replacementFilter.getTableAlias(); + for (Entry entry : commonJoinColumns.entrySet()) { + Column left = entry.getKey(), right = entry.getValue(); + if (!filter.isCommonJoinColumnToExclude(right) + && (except == null || except.remove(left) == null && except.remove(right) == null)) { + Database database = session.getDatabase(); + Expression e; + if (left == right + || DataType.hasTotalOrdering(left.getType().getValueType()) + && DataType.hasTotalOrdering(right.getType().getValueType())) { + e = new ExpressionColumn(database, replacementSchema, replacementAlias, + replacementFilter.getColumnName(right)); + } else { + e = new Alias(new CoalesceFunction(CoalesceFunction.COALESCE, + new ExpressionColumn(database, schema, alias, filter.getColumnName(left)), + new ExpressionColumn(database, replacementSchema, replacementAlias, + replacementFilter.getColumnName(right))), // + left.getName(), true); + } + expressions.add(index++, e); + } + } + } + for (Column c : filter.getTable().getColumns()) { + if (commonJoinColumns == null || !commonJoinColumns.containsKey(c)) { + if (!filter.isCommonJoinColumnToExclude(c)) { + index = addExpandedColumn(filter, index, except, schema, alias, c); + } + } + } + } + return index; + } + + private int addExpandedColumn(TableFilter filter, int index, HashMap except, + String schema, String alias, Column c) { + if ((except == null || except.remove(c) == null) && c.getVisible()) { + ExpressionColumn ec = new ExpressionColumn(session.getDatabase(), schema, alias, filter.getColumnName(c)); + expressions.add(index++, ec); + } + return index; + } + + @Override + public void init() { + if (checkInit) { + throw DbException.getInternalError(); + } + filters.sort(TableFilter.ORDER_IN_FROM_COMPARATOR); + expandColumnList(); + if ((visibleColumnCount = expressions.size()) > Constants.MAX_COLUMNS) { + throw DbException.get(ErrorCode.TOO_MANY_COLUMNS_1, "" + Constants.MAX_COLUMNS); + } + ArrayList expressionSQL; + if (distinctExpressions != null || orderList != null || group != null) { + expressionSQL = new ArrayList<>(visibleColumnCount); + for (int i = 0; i < visibleColumnCount; i++) { + Expression expr = expressions.get(i); + expr = expr.getNonAliasExpression(); + expressionSQL.add(expr.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES)); + } + } else { + expressionSQL = null; + } + if (distinctExpressions != null) { + BitSet set = new BitSet(); + for (Expression e : distinctExpressions) { + set.set(initExpression(expressionSQL, e, false, filters)); + } + int idx = 0, cnt = set.cardinality(); + distinctIndexes = new int[cnt]; + for (int i = 0; i < cnt; i++) { + idx = set.nextSetBit(idx); + distinctIndexes[i] = idx; + idx++; + } + } + if (orderList != null) { + initOrder(expressionSQL, isAnyDistinct(), filters); + } + resultColumnCount = expressions.size(); + if (having != null) { + expressions.add(having); + havingIndex = expressions.size() - 1; + having = null; + } else { + havingIndex = -1; + } + if (qualify != null) { + expressions.add(qualify); + qualifyIndex = expressions.size() - 1; + qualify = null; + } else { + qualifyIndex = -1; + } + + if (withTies && !hasOrder()) { + throw DbException.get(ErrorCode.WITH_TIES_WITHOUT_ORDER_BY); + } + + Database db = session.getDatabase(); + + // first the select list (visible columns), + // then 'ORDER BY' expressions, + // then 'HAVING' expressions, + // and 'GROUP BY' expressions at the end + if (group != null) { + int size = group.size(); + int expSize = expressionSQL.size(); + int fullExpSize = expressions.size(); + if (fullExpSize > expSize) { + expressionSQL.ensureCapacity(fullExpSize); + for (int i = expSize; i < fullExpSize; i++) { + expressionSQL.add(expressions.get(i).getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES)); + } + } + groupIndex = new int[size]; + for (int i = 0; i < size; i++) { + Expression expr = group.get(i); + String sql = expr.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES); + int found = -1; + for (int j = 0; j < expSize; j++) { + String s2 = expressionSQL.get(j); + if (db.equalsIdentifiers(s2, sql)) { + found = mergeGroupByExpressions(db, j, expressionSQL, false); + break; + } + } + if (found < 0) { + // special case: GROUP BY a column alias + for (int j = 0; j < expSize; j++) { + Expression e = expressions.get(j); + if (db.equalsIdentifiers(sql, e.getAlias(session, j))) { + found = mergeGroupByExpressions(db, j, expressionSQL, true); + break; + } + sql = expr.getAlias(session, j); + if (db.equalsIdentifiers(sql, e.getAlias(session, j))) { + found = mergeGroupByExpressions(db, j, expressionSQL, true); + break; + } + } + } + if (found < 0) { + int index = expressions.size(); + groupIndex[i] = index; + expressions.add(expr); + } else { + groupIndex[i] = found; + } + } + checkUsed: if (groupByCopies != null) { + for (int i : groupByCopies) { + if (i >= 0) { + break checkUsed; + } + } + groupByCopies = null; + } + groupByExpression = new boolean[expressions.size()]; + for (int gi : groupIndex) { + groupByExpression[gi] = true; + } + group = null; + } + // map columns in select list and condition + for (TableFilter f : filters) { + mapColumns(f, 0); + } + mapCondition(havingIndex); + mapCondition(qualifyIndex); + checkInit = true; + } + + private void mapCondition(int index) { + if (index >= 0) { + Expression expr = expressions.get(index); + SelectListColumnResolver res = new SelectListColumnResolver(this); + expr.mapColumns(res, 0, Expression.MAP_INITIAL); + } + } + + private int mergeGroupByExpressions(Database db, int index, ArrayList expressionSQL, // + boolean scanPrevious) { + + /* + * -1: uniqueness of expression is not known yet + * + * -2: expression that is used as a source for a copy or does not have + * copies + * + * >=0: expression is a copy of expression at this index + */ + if (groupByCopies != null) { + int c = groupByCopies[index]; + if (c >= 0) { + return c; + } else if (c == -2) { + return index; + } + } else { + groupByCopies = new int[expressionSQL.size()]; + Arrays.fill(groupByCopies, -1); + } + String sql = expressionSQL.get(index); + if (scanPrevious) { + /* + * If expression was matched using an alias previous expressions may + * be identical. + */ + for (int i = 0; i < index; i++) { + if (db.equalsIdentifiers(sql, expressionSQL.get(i))) { + index = i; + break; + } + } + } + int l = expressionSQL.size(); + for (int i = index + 1; i < l; i++) { + if (db.equalsIdentifiers(sql, expressionSQL.get(i))) { + groupByCopies[i] = index; + } + } + groupByCopies[index] = -2; + return index; + } + + @Override + public void prepare() { + if (isPrepared) { + // sometimes a subquery is prepared twice (CREATE TABLE AS SELECT) + return; + } + if (!checkInit) { + throw DbException.getInternalError("not initialized"); + } + if (orderList != null) { + prepareOrder(orderList, expressions.size()); + } + ExpressionNames expressionNames = session.getMode().expressionNames; + if (expressionNames == ExpressionNames.ORIGINAL_SQL || expressionNames == ExpressionNames.POSTGRESQL_STYLE) { + optimizeExpressionsAndPreserveAliases(); + } else { + for (int i = 0; i < expressions.size(); i++) { + expressions.set(i, expressions.get(i).optimize(session)); + } + } + if (sort != null) { + cleanupOrder(); + } + if (condition != null) { + condition = condition.optimizeCondition(session); + if (condition != null) { + for (TableFilter f : filters) { + // outer joins: must not add index conditions such as + // "c is null" - example: + // create table parent(p int primary key) as select 1; + // create table child(c int primary key, pc int); + // insert into child values(2, 1); + // select p, c from parent + // left outer join child on p = pc where c is null; + if (!f.isJoinOuter() && !f.isJoinOuterIndirect()) { + condition.createIndexConditions(session, f); + } + } + } + } + if (isGroupQuery && groupIndex == null && havingIndex < 0 && qualifyIndex < 0 && condition == null + && filters.size() == 1) { + isQuickAggregateQuery = isEverything(ExpressionVisitor.getOptimizableVisitor(filters.get(0).getTable())); + } + cost = preparePlan(session.isParsingCreateView()); + if (distinct && session.getDatabase().getSettings().optimizeDistinct && + !isGroupQuery && filters.size() == 1 && + expressions.size() == 1 && condition == null) { + Expression expr = expressions.get(0); + expr = expr.getNonAliasExpression(); + if (expr instanceof ExpressionColumn) { + Column column = ((ExpressionColumn) expr).getColumn(); + int selectivity = column.getSelectivity(); + Index columnIndex = topTableFilter.getTable(). + getIndexForColumn(column, false, true); + if (columnIndex != null && + selectivity != Constants.SELECTIVITY_DEFAULT && + selectivity < 20) { + Index current = topTableFilter.getIndex(); + // if another index is faster + if (current == null || current.getIndexType().isScan() || columnIndex == current) { + topTableFilter.setIndex(columnIndex); + isDistinctQuery = true; + } + } + } + } + if (sort != null && !isQuickAggregateQuery && !isGroupQuery) { + Index index = getSortIndex(); + Index current = topTableFilter.getIndex(); + if (index != null && current != null) { + if (current.getIndexType().isScan() || current == index) { + topTableFilter.setIndex(index); + if (!topTableFilter.hasInComparisons()) { + // in(select ...) and in(1,2,3) may return the key in + // another order + sortUsingIndex = true; + } + } else if (index.getIndexColumns() != null + && index.getIndexColumns().length >= current + .getIndexColumns().length) { + IndexColumn[] sortColumns = index.getIndexColumns(); + IndexColumn[] currentColumns = current.getIndexColumns(); + boolean swapIndex = false; + for (int i = 0; i < currentColumns.length; i++) { + if (sortColumns[i].column != currentColumns[i].column) { + swapIndex = false; + break; + } + if (sortColumns[i].sortType != currentColumns[i].sortType) { + swapIndex = true; + } + } + if (swapIndex) { + topTableFilter.setIndex(index); + sortUsingIndex = true; + } + } + } + if (sortUsingIndex && isForUpdate && !topTableFilter.getIndex().isRowIdIndex()) { + sortUsingIndex = false; + } + } + if (!isQuickAggregateQuery && isGroupQuery) { + Index index = getGroupSortedIndex(); + if (index != null) { + Index current = topTableFilter.getIndex(); + if (current != null && (current.getIndexType().isScan() || current == index)) { + topTableFilter.setIndex(index); + isGroupSortedQuery = true; + } + } + } + expressionArray = expressions.toArray(new Expression[0]); + isPrepared = true; + } + + private void optimizeExpressionsAndPreserveAliases() { + for (int i = 0; i < expressions.size(); i++) { + Expression e = expressions.get(i); + String alias = e.getAlias(session, i); + e = e.optimize(session); + if (!e.getAlias(session, i).equals(alias)) { + e = new Alias(e, alias, true); + } + expressions.set(i, e); + } + } + + @Override + public double getCost() { + return cost; + } + + @Override + public HashSet
    getTables() { + HashSet
    set = new HashSet<>(); + for (TableFilter filter : filters) { + set.add(filter.getTable()); + } + return set; + } + + @Override + public void fireBeforeSelectTriggers() { + for (TableFilter filter : filters) { + filter.getTable().fire(session, Trigger.SELECT, true); + } + } + + private double preparePlan(boolean parse) { + TableFilter[] topArray = topFilters.toArray(new TableFilter[0]); + for (TableFilter t : topArray) { + t.createIndexConditions(); + t.setFullCondition(condition); + } + + Optimizer optimizer = new Optimizer(topArray, condition, session); + optimizer.optimize(parse); + topTableFilter = optimizer.getTopFilter(); + double planCost = optimizer.getCost(); + + setEvaluatableRecursive(topTableFilter); + + if (!parse) { + topTableFilter.prepare(); + } + return planCost; + } + + private void setEvaluatableRecursive(TableFilter f) { + for (; f != null; f = f.getJoin()) { + f.setEvaluatable(f, true); + if (condition != null) { + condition.setEvaluatable(f, true); + } + TableFilter n = f.getNestedJoin(); + if (n != null) { + setEvaluatableRecursive(n); + } + Expression on = f.getJoinCondition(); + if (on != null) { + if (!on.isEverything(ExpressionVisitor.EVALUATABLE_VISITOR)) { + // need to check that all added are bound to a table + on = on.optimize(session); + if (!f.isJoinOuter() && !f.isJoinOuterIndirect()) { + f.removeJoinCondition(); + addCondition(on); + } + } + } + on = f.getFilterCondition(); + if (on != null) { + if (!on.isEverything(ExpressionVisitor.EVALUATABLE_VISITOR)) { + f.removeFilterCondition(); + addCondition(on); + } + } + // this is only important for subqueries, so they know + // the result columns are evaluatable + for (Expression e : expressions) { + e.setEvaluatable(f, true); + } + } + } + + @Override + public String getPlanSQL(int sqlFlags) { + // can not use the field sqlStatement because the parameter + // indexes may be incorrect: ? may be in fact ?2 for a subquery + // but indexes may be set manually as well + Expression[] exprList = expressions.toArray(new Expression[0]); + StringBuilder builder = new StringBuilder(); + for (TableFilter f : topFilters) { + Table t = f.getTable(); + TableView tableView = t instanceof TableView ? (TableView) t : null; + if (tableView != null && tableView.isRecursive() && tableView.isTableExpression()) { + + if (!tableView.isTemporary()) { + // skip the generation of plan SQL for this already recursive persistent CTEs, + // since using a with statement will re-create the common table expression + // views. + } else { + builder.append("WITH RECURSIVE "); + t.getSchema().getSQL(builder, sqlFlags).append('.'); + ParserUtil.quoteIdentifier(builder, t.getName(), sqlFlags).append('('); + Column.writeColumns(builder, t.getColumns(), sqlFlags); + builder.append(") AS "); + t.getSQL(builder, sqlFlags).append('\n'); + } + } + } + if (isExplicitTable) { + builder.append("TABLE "); + filters.get(0).getPlanSQL(builder, false, sqlFlags); + } else { + builder.append("SELECT"); + if (isAnyDistinct()) { + builder.append(" DISTINCT"); + if (distinctExpressions != null) { + Expression.writeExpressions(builder.append(" ON("), distinctExpressions, sqlFlags).append(')'); + } + } + for (int i = 0; i < visibleColumnCount; i++) { + if (i > 0) { + builder.append(','); + } + builder.append('\n'); + StringUtils.indent(builder, exprList[i].getSQL(sqlFlags, WITHOUT_PARENTHESES), 4, false); + } + TableFilter filter = topTableFilter; + if (filter == null) { + int count = topFilters.size(); + if (count != 1 || !topFilters.get(0).isNoFromClauseFilter()) { + builder.append("\nFROM "); + boolean isJoin = false; + for (int i = 0; i < count; i++) { + isJoin = getPlanFromFilter(builder, sqlFlags, topFilters.get(i), isJoin); + } + } + } else if (!filter.isNoFromClauseFilter()) { + getPlanFromFilter(builder.append("\nFROM "), sqlFlags, filter, false); + } + if (condition != null) { + getFilterSQL(builder, "\nWHERE ", condition, sqlFlags); + } + if (groupIndex != null) { + builder.append("\nGROUP BY "); + for (int i = 0, l = groupIndex.length; i < l; i++) { + if (i > 0) { + builder.append(", "); + } + exprList[groupIndex[i]].getNonAliasExpression().getUnenclosedSQL(builder, sqlFlags); + } + } else if (group != null) { + builder.append("\nGROUP BY "); + for (int i = 0, l = group.size(); i < l; i++) { + if (i > 0) { + builder.append(", "); + } + group.get(i).getUnenclosedSQL(builder, sqlFlags); + } + } else emptyGroupingSet: if (isGroupQuery && having == null && havingIndex < 0) { + for (int i = 0; i < visibleColumnCount; i++) { + if (containsAggregate(exprList[i])) { + break emptyGroupingSet; + } + } + builder.append("\nGROUP BY ()"); + } + getFilterSQL(builder, "\nHAVING ", exprList, having, havingIndex, sqlFlags); + getFilterSQL(builder, "\nQUALIFY ", exprList, qualify, qualifyIndex, sqlFlags); + } + appendEndOfQueryToSQL(builder, sqlFlags, exprList); + if (isForUpdate) { + builder.append("\nFOR UPDATE"); + } + if ((sqlFlags & ADD_PLAN_INFORMATION) != 0) { + if (isQuickAggregateQuery) { + builder.append("\n/* direct lookup */"); + } + if (isDistinctQuery) { + builder.append("\n/* distinct */"); + } + if (sortUsingIndex) { + builder.append("\n/* index sorted */"); + } + if (isGroupQuery) { + if (isGroupSortedQuery) { + builder.append("\n/* group sorted */"); + } + } + // builder.append("\n/* cost: " + cost + " */"); + } + return builder.toString(); + } + + private static boolean getPlanFromFilter(StringBuilder builder, int sqlFlags, TableFilter f, boolean isJoin) { + do { + if (isJoin) { + builder.append('\n'); + } + f.getPlanSQL(builder, isJoin, sqlFlags); + isJoin = true; + } while ((f = f.getJoin()) != null); + return isJoin; + } + + private static void getFilterSQL(StringBuilder builder, String sql, Expression[] exprList, Expression condition, + int conditionIndex, int sqlFlags) { + if (condition != null) { + getFilterSQL(builder, sql, condition, sqlFlags); + } else if (conditionIndex >= 0) { + getFilterSQL(builder, sql, exprList[conditionIndex], sqlFlags); + } + } + + private static void getFilterSQL(StringBuilder builder, String sql, Expression condition, int sqlFlags) { + condition.getUnenclosedSQL(builder.append(sql), sqlFlags); + } + + private static boolean containsAggregate(Expression expression) { + if (expression instanceof DataAnalysisOperation) { + if (((DataAnalysisOperation) expression).isAggregate()) { + return true; + } + } + for (int i = 0, l = expression.getSubexpressionCount(); i < l; i++) { + if (containsAggregate(expression.getSubexpression(i))) { + return true; + } + } + return false; + } + + public void setHaving(Expression having) { + this.having = having; + } + + public Expression getHaving() { + return having; + } + + public void setQualify(Expression qualify) { + this.qualify = qualify; + } + + public Expression getQualify() { + return qualify; + } + + public TableFilter getTopTableFilter() { + return topTableFilter; + } + + @Override + public void setForUpdate(boolean b) { + if (b && (isAnyDistinct() || isGroupQuery)) { + throw DbException.get(ErrorCode.FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT); + } + this.isForUpdate = b; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level) { + for (Expression e : expressions) { + e.mapColumns(resolver, level, Expression.MAP_INITIAL); + } + if (condition != null) { + condition.mapColumns(resolver, level, Expression.MAP_INITIAL); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + for (Expression e : expressions) { + e.setEvaluatable(tableFilter, b); + } + if (condition != null) { + condition.setEvaluatable(tableFilter, b); + } + } + + /** + * Check if this is an aggregate query with direct lookup, for example a + * query of the type SELECT COUNT(*) FROM TEST or + * SELECT MAX(ID) FROM TEST. + * + * @return true if a direct lookup is possible + */ + public boolean isQuickAggregateQuery() { + return isQuickAggregateQuery; + } + + /** + * Checks if this query is a group query. + * + * @return whether this query is a group query. + */ + public boolean isGroupQuery() { + return isGroupQuery; + } + + /** + * Checks if this query contains window functions. + * + * @return whether this query contains window functions + */ + public boolean isWindowQuery() { + return isWindowQuery; + } + + /** + * Checks if window stage of group window query is performed. If true, + * column resolver may not be used. + * + * @return true if window stage of group window query is performed + */ + public boolean isGroupWindowStage2() { + return isGroupWindowStage2; + } + + @Override + public void addGlobalCondition(Parameter param, int columnId, int comparisonType) { + addParameter(param); + Expression comp; + Expression col = expressions.get(columnId); + col = col.getNonAliasExpression(); + if (col.isEverything(ExpressionVisitor.QUERY_COMPARABLE_VISITOR)) { + comp = new Comparison(comparisonType, col, param, false); + } else { + // this condition will always evaluate to true, but need to + // add the parameter, so it can be set later + comp = new Comparison(Comparison.EQUAL_NULL_SAFE, param, param, false); + } + comp = comp.optimize(session); + if (isWindowQuery) { + qualify = addGlobalCondition(qualify, comp); + } else if (isGroupQuery) { + for (int i = 0; groupIndex != null && i < groupIndex.length; i++) { + if (groupIndex[i] == columnId) { + condition = addGlobalCondition(condition, comp); + return; + } + } + if (havingIndex >= 0) { + having = expressions.get(havingIndex); + } + having = addGlobalCondition(having, comp); + } else { + condition = addGlobalCondition(condition, comp); + } + } + + private static Expression addGlobalCondition(Expression condition, Expression additional) { + if (!(condition instanceof ConditionLocalAndGlobal)) { + return new ConditionLocalAndGlobal(condition, additional); + } + Expression oldLocal, oldGlobal; + if (condition.getSubexpressionCount() == 1) { + oldLocal = null; + oldGlobal = condition.getSubexpression(0); + } else { + oldLocal = condition.getSubexpression(0); + oldGlobal = condition.getSubexpression(1); + } + return new ConditionLocalAndGlobal(oldLocal, new ConditionAndOr(ConditionAndOr.AND, oldGlobal, additional)); + } + + @Override + public void updateAggregate(SessionLocal s, int stage) { + for (Expression e : expressions) { + e.updateAggregate(s, stage); + } + if (condition != null) { + condition.updateAggregate(s, stage); + } + if (having != null) { + having.updateAggregate(s, stage); + } + if (qualify != null) { + qualify.updateAggregate(s, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: { + if (isForUpdate) { + return false; + } + for (TableFilter f : filters) { + if (!f.getTable().isDeterministic()) { + return false; + } + } + break; + } + case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: { + for (TableFilter f : filters) { + long m = f.getTable().getMaxDataModificationId(); + visitor.addDataModificationId(m); + } + break; + } + case ExpressionVisitor.EVALUATABLE: { + if (!session.getDatabase().getSettings().optimizeEvaluatableSubqueries) { + return false; + } + break; + } + case ExpressionVisitor.GET_DEPENDENCIES: { + for (TableFilter f : filters) { + Table table = f.getTable(); + visitor.addDependency(table); + table.addDependencies(visitor.getDependencies()); + } + break; + } + default: + } + ExpressionVisitor v2 = visitor.incrementQueryLevel(1); + for (Expression e : expressions) { + if (!e.isEverything(v2)) { + return false; + } + } + if (condition != null && !condition.isEverything(v2)) { + return false; + } + if (having != null && !having.isEverything(v2)) { + return false; + } + if (qualify != null && !qualify.isEverything(v2)) { + return false; + } + return true; + } + + + @Override + public boolean isCacheable() { + return !isForUpdate; + } + + @Override + public boolean allowGlobalConditions() { + return offsetExpr == null && fetchExpr == null && distinctExpressions == null; + } + + public SortOrder getSortOrder() { + return sort; + } + + /** + * Returns parent select, or null. + * + * @return parent select, or null + */ + public Select getParentSelect() { + return parentSelect; + } + + @Override + public boolean isConstantQuery() { + if (!super.isConstantQuery() || distinctExpressions != null || condition != null || isGroupQuery + || isWindowQuery || !isNoFromClause()) { + return false; + } + for (int i = 0; i < visibleColumnCount; i++) { + if (!expressions.get(i).isConstant()) { + return false; + } + } + return true; + } + + @Override + public Expression getIfSingleRow() { + if (offsetExpr != null || fetchExpr != null || condition != null || isGroupQuery || isWindowQuery + || !isNoFromClause()) { + return null; + } + if (visibleColumnCount == 1) { + return expressions.get(0); + } + Expression[] array = new Expression[visibleColumnCount]; + for (int i = 0; i < visibleColumnCount; i++) { + array[i] = expressions.get(i); + } + return new ExpressionList(array, false); + } + + private boolean isNoFromClause() { + if (topTableFilter != null) { + return topTableFilter.isNoFromClauseFilter(); + } else if (topFilters.size() == 1) { + return topFilters.get(0).isNoFromClauseFilter(); + } + return false; + } + + /** + * Lazy execution for this select. + */ + private abstract class LazyResultSelect extends LazyResult { + + long rowNumber; + int columnCount; + + LazyResultSelect(Expression[] expressions, int columnCount) { + super(getSession(), expressions); + this.columnCount = columnCount; + setCurrentRowNumber(0); + } + + @Override + public final int getVisibleColumnCount() { + return visibleColumnCount; + } + + @Override + public void reset() { + super.reset(); + topTableFilter.reset(); + setCurrentRowNumber(0); + rowNumber = 0; + } + } + + /** + * Lazy execution for a flat query. + */ + private final class LazyResultQueryFlat extends LazyResultSelect { + + private boolean forUpdate; + + LazyResultQueryFlat(Expression[] expressions, int columnCount, boolean forUpdate) { + super(expressions, columnCount); + this.forUpdate = forUpdate; + } + + @Override + protected Value[] fetchNextRow() { + while (topTableFilter.next()) { + setCurrentRowNumber(rowNumber + 1); + // This method may lock rows + if (forUpdate ? isConditionMetForUpdate() : isConditionMet()) { + ++rowNumber; + Value[] row = new Value[columnCount]; + for (int i = 0; i < columnCount; i++) { + Expression expr = expressions.get(i); + row[i] = expr.getValue(getSession()); + } + return row; + } + } + return null; + } + + @Override + protected boolean skipNextRow() { + while (topTableFilter.next()) { + setCurrentRowNumber(rowNumber + 1); + // This method does not lock rows + if (isConditionMet()) { + ++rowNumber; + return true; + } + } + return false; + } + + } + + /** + * Lazy execution for a group sorted query. + */ + private final class LazyResultGroupSorted extends LazyResultSelect { + + private Value[] previousKeyValues; + + LazyResultGroupSorted(Expression[] expressions, int columnCount) { + super(expressions, columnCount); + if (groupData == null) { + setGroupData(SelectGroups.getInstance(getSession(), Select.this.expressions, isGroupQuery, + groupIndex)); + } else { + updateAgg(columnCount, DataAnalysisOperation.STAGE_RESET); + groupData.resetLazy(); + } + } + + @Override + public void reset() { + super.reset(); + groupData.resetLazy(); + previousKeyValues = null; + } + + @Override + protected Value[] fetchNextRow() { + while (topTableFilter.next()) { + setCurrentRowNumber(rowNumber + 1); + if (isConditionMet()) { + rowNumber++; + int groupSize = groupIndex.length; + Value[] keyValues = new Value[groupSize]; + // update group + for (int i = 0; i < groupSize; i++) { + int idx = groupIndex[i]; + Expression expr = expressions.get(idx); + keyValues[i] = expr.getValue(getSession()); + } + + Value[] row = null; + if (previousKeyValues == null) { + previousKeyValues = keyValues; + groupData.nextLazyGroup(); + } else { + SessionLocal session = getSession(); + for (int i = 0; i < groupSize; i++) { + if (session.compare(previousKeyValues[i], keyValues[i]) != 0) { + row = createGroupSortedRow(previousKeyValues, columnCount); + previousKeyValues = keyValues; + groupData.nextLazyGroup(); + break; + } + } + } + groupData.nextLazyRow(); + updateAgg(columnCount, DataAnalysisOperation.STAGE_GROUP); + if (row != null) { + return row; + } + } + } + Value[] row = null; + if (previousKeyValues != null) { + row = createGroupSortedRow(previousKeyValues, columnCount); + previousKeyValues = null; + } + return row; + } + } + +} diff --git a/h2/src/main/org/h2/command/query/SelectGroups.java b/h2/src/main/org/h2/command/query/SelectGroups.java new file mode 100644 index 0000000000..ef5e1572ab --- /dev/null +++ b/h2/src/main/org/h2/command/query/SelectGroups.java @@ -0,0 +1,433 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.query; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.analysis.DataAnalysisOperation; +import org.h2.expression.analysis.PartitionData; +import org.h2.value.Value; +import org.h2.value.ValueRow; + +/** + * Grouped data for aggregates. + * + *

    + * Call sequence: + *

    + *
      + *
    • {@link #reset()}.
    • + *
    • For each source row {@link #nextSource()} should be invoked.
    • + *
    • {@link #done()}.
    • + *
    • {@link #next()} is invoked inside a loop until it returns null.
    • + *
    + *

    + * Call sequence for lazy group sorted result: + *

    + *
      + *
    • {@link #resetLazy()} (not required before the first execution).
    • + *
    • For each source group {@link #nextLazyGroup()} should be invoked.
    • + *
    • For each source row {@link #nextLazyRow()} should be invoked. Each group + * can have one or more rows.
    • + *
    + */ +public abstract class SelectGroups { + + private static final class Grouped extends SelectGroups { + + private final int[] groupIndex; + + /** + * Map of group-by key to group-by expression data e.g. AggregateData + */ + private TreeMap groupByData; + + /** + * Key into groupByData that produces currentGroupByExprData. Not used + * in lazy mode. + */ + private ValueRow currentGroupsKey; + + /** + * Cursor for {@link #next()} method. + */ + private Iterator> cursor; + + Grouped(SessionLocal session, ArrayList expressions, int[] groupIndex) { + super(session, expressions); + this.groupIndex = groupIndex; + } + + @Override + public void reset() { + super.reset(); + groupByData = new TreeMap<>(session.getDatabase().getCompareMode()); + currentGroupsKey = null; + cursor = null; + } + + @Override + public void nextSource() { + if (groupIndex == null) { + currentGroupsKey = ValueRow.EMPTY; + } else { + Value[] keyValues = new Value[groupIndex.length]; + // update group + for (int i = 0; i < groupIndex.length; i++) { + int idx = groupIndex[i]; + Expression expr = expressions.get(idx); + keyValues[i] = expr.getValue(session); + } + currentGroupsKey = ValueRow.get(keyValues); + } + Object[] values = groupByData.get(currentGroupsKey); + if (values == null) { + values = createRow(); + groupByData.put(currentGroupsKey, values); + } + currentGroupByExprData = values; + currentGroupRowId++; + } + + @Override + void updateCurrentGroupExprData() { + // this can be null in lazy mode + if (currentGroupsKey != null) { + // since we changed the size of the array, update the object in + // the groups map + groupByData.put(currentGroupsKey, currentGroupByExprData); + } + } + + @Override + public void done() { + super.done(); + if (groupIndex == null && groupByData.size() == 0) { + groupByData.put(ValueRow.EMPTY, createRow()); + } + cursor = groupByData.entrySet().iterator(); + } + + @Override + public ValueRow next() { + if (cursor.hasNext()) { + Map.Entry entry = cursor.next(); + currentGroupByExprData = entry.getValue(); + currentGroupRowId++; + return entry.getKey(); + } + return null; + } + + @Override + public void remove() { + cursor.remove(); + currentGroupByExprData = null; + currentGroupRowId--; + } + + @Override + public void resetLazy() { + super.resetLazy(); + currentGroupsKey = null; + } + } + + private static final class Plain extends SelectGroups { + + private ArrayList rows; + + /** + * Cursor for {@link #next()} method. + */ + private Iterator cursor; + + Plain(SessionLocal session, ArrayList expressions) { + super(session, expressions); + } + + @Override + public void reset() { + super.reset(); + rows = new ArrayList<>(); + cursor = null; + } + + @Override + public void nextSource() { + Object[] values = createRow(); + rows.add(values); + currentGroupByExprData = values; + currentGroupRowId++; + } + + @Override + void updateCurrentGroupExprData() { + rows.set(rows.size() - 1, currentGroupByExprData); + } + + @Override + public void done() { + super.done(); + cursor = rows.iterator(); + } + + @Override + public ValueRow next() { + if (cursor.hasNext()) { + currentGroupByExprData = cursor.next(); + currentGroupRowId++; + return ValueRow.EMPTY; + } + return null; + } + } + + /** + * The database session. + */ + final SessionLocal session; + + /** + * The query's column list, including invisible expressions such as order by expressions. + */ + final ArrayList expressions; + + /** + * The array of current group-by expression data e.g. AggregateData. + */ + Object[] currentGroupByExprData; + + /** + * Maps an expression object to an index, to use in accessing the Object[] + * pointed to by groupByData. + */ + private final HashMap exprToIndexInGroupByData = new HashMap<>(); + + /** + * Maps an window expression object to its data. + */ + private final HashMap windowData = new HashMap<>(); + + /** + * Maps an partitioned window expression object to its data. + */ + private final HashMap> windowPartitionData = new HashMap<>(); + + /** + * The id of the current group. + */ + int currentGroupRowId; + + /** + * Creates new instance of grouped data. + * + * @param session + * the session + * @param expressions + * the expressions + * @param isGroupQuery + * is this query is a group query + * @param groupIndex + * the indexes of group expressions, or null + * @return new instance of the grouped data. + */ + public static SelectGroups getInstance(SessionLocal session, ArrayList expressions, + boolean isGroupQuery, int[] groupIndex) { + return isGroupQuery ? new Grouped(session, expressions, groupIndex) : new Plain(session, expressions); + } + + SelectGroups(SessionLocal session, ArrayList expressions) { + this.session = session; + this.expressions = expressions; + } + + /** + * Is there currently a group-by active. + * + * @return {@code true} if there is currently a group-by active, + * otherwise returns {@code false}. + */ + public boolean isCurrentGroup() { + return currentGroupByExprData != null; + } + + /** + * Get the group-by data for the current group and the passed in expression. + * + * @param expr + * expression + * @return expression data or null + */ + public final Object getCurrentGroupExprData(Expression expr) { + Integer index = exprToIndexInGroupByData.get(expr); + if (index == null) { + return null; + } + return currentGroupByExprData[index]; + } + + /** + * Set the group-by data for the current group and the passed in expression. + * + * @param expr + * expression + * @param obj + * expression data to set + */ + public final void setCurrentGroupExprData(Expression expr, Object obj) { + Integer index = exprToIndexInGroupByData.get(expr); + if (index != null) { + assert currentGroupByExprData[index] == null; + currentGroupByExprData[index] = obj; + return; + } + index = exprToIndexInGroupByData.size(); + exprToIndexInGroupByData.put(expr, index); + if (index >= currentGroupByExprData.length) { + currentGroupByExprData = Arrays.copyOf(currentGroupByExprData, currentGroupByExprData.length * 2); + updateCurrentGroupExprData(); + } + currentGroupByExprData[index] = obj; + } + + /** + * Creates new object arrays to holds group-by data. + * + * @return new object array to holds group-by data. + */ + final Object[] createRow() { + return new Object[Math.max(exprToIndexInGroupByData.size(), expressions.size())]; + } + + /** + * Get the window data for the specified expression. + * + * @param expr + * expression + * @param partitionKey + * a key of partition + * @return expression data or null + */ + public final PartitionData getWindowExprData(DataAnalysisOperation expr, Value partitionKey) { + if (partitionKey == null) { + return windowData.get(expr); + } else { + TreeMap map = windowPartitionData.get(expr); + return map != null ? map.get(partitionKey) : null; + } + } + + /** + * Set the window data for the specified expression. + * + * @param expr + * expression + * @param partitionKey + * a key of partition + * @param obj + * window expression data to set + */ + public final void setWindowExprData(DataAnalysisOperation expr, Value partitionKey, PartitionData obj) { + if (partitionKey == null) { + Object old = windowData.put(expr, obj); + assert old == null; + } else { + TreeMap map = windowPartitionData.get(expr); + if (map == null) { + map = new TreeMap<>(session.getDatabase().getCompareMode()); + windowPartitionData.put(expr, map); + } + map.put(partitionKey, obj); + } + } + + /** + * Update group-by data specified by implementation. + */ + abstract void updateCurrentGroupExprData(); + + /** + * Returns identity of the current row. Used by aggregates to check whether + * they already processed this row or not. + * + * @return identity of the current row + */ + public int getCurrentGroupRowId() { + return currentGroupRowId; + } + + /** + * Resets this group data for reuse. + */ + public void reset() { + currentGroupByExprData = null; + exprToIndexInGroupByData.clear(); + windowData.clear(); + windowPartitionData.clear(); + currentGroupRowId = 0; + } + + /** + * Invoked for each source row to evaluate group key and setup all necessary + * data for aggregates. + */ + public abstract void nextSource(); + + /** + * Invoked after all source rows are evaluated. + */ + public void done() { + currentGroupRowId = 0; + } + + /** + * Returns the key of the next group. + * + * @return the key of the next group, or null + */ + public abstract ValueRow next(); + + /** + * Removes the data for the current key. + * + * @see #next() + */ + public void remove() { + throw new UnsupportedOperationException(); + } + + /** + * Resets this group data for reuse in lazy mode. + */ + public void resetLazy() { + currentGroupByExprData = null; + currentGroupRowId = 0; + } + + /** + * Moves group data to the next group in lazy mode. + */ + public void nextLazyGroup() { + currentGroupByExprData = new Object[Math.max(exprToIndexInGroupByData.size(), expressions.size())]; + } + + /** + * Moves group data to the next row in lazy mode. + */ + public void nextLazyRow() { + currentGroupRowId++; + } + +} diff --git a/h2/src/main/org/h2/command/query/SelectListColumnResolver.java b/h2/src/main/org/h2/command/query/SelectListColumnResolver.java new file mode 100644 index 0000000000..ec62787f09 --- /dev/null +++ b/h2/src/main/org/h2/command/query/SelectListColumnResolver.java @@ -0,0 +1,80 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.query; + +import java.util.ArrayList; + +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * This class represents a column resolver for the column list of a SELECT + * statement. It is used to resolve select column aliases in the HAVING clause. + * Example: + *

    + * SELECT X/3 AS A, COUNT(*) FROM SYSTEM_RANGE(1, 10) GROUP BY A HAVING A > 2; + *

    + * + * @author Thomas Mueller + */ +public class SelectListColumnResolver implements ColumnResolver { + + private final Select select; + private final Expression[] expressions; + private final Column[] columns; + + SelectListColumnResolver(Select select) { + this.select = select; + int columnCount = select.getColumnCount(); + columns = new Column[columnCount]; + expressions = new Expression[columnCount]; + ArrayList columnList = select.getExpressions(); + SessionLocal session = select.getSession(); + for (int i = 0; i < columnCount; i++) { + Expression expr = columnList.get(i); + columns[i] = new Column(expr.getAlias(session, i), TypeInfo.TYPE_NULL, null, i); + expressions[i] = expr.getNonAliasExpression(); + } + } + + @Override + public Column[] getColumns() { + return columns; + } + + @Override + public Column findColumn(String name) { + Database db = select.getSession().getDatabase(); + for (Column column : columns) { + if (db.equalsIdentifiers(column.getName(), name)) { + return column; + } + } + return null; + } + + @Override + public Select getSelect() { + return select; + } + + @Override + public Value getValue(Column column) { + return null; + } + + @Override + public Expression optimize(ExpressionColumn expressionColumn, Column column) { + return expressions[column.getColumnId()]; + } + +} diff --git a/h2/src/main/org/h2/command/query/SelectUnion.java b/h2/src/main/org/h2/command/query/SelectUnion.java new file mode 100644 index 0000000000..a1388eccfe --- /dev/null +++ b/h2/src/main/org/h2/command/query/SelectUnion.java @@ -0,0 +1,460 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.query; + +import java.util.ArrayList; +import java.util.HashSet; + +import org.h2.api.ErrorCode; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.message.DbException; +import org.h2.result.LazyResult; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.table.Table; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Represents a union SELECT statement. + */ +public class SelectUnion extends Query { + + public enum UnionType { + /** + * The type of a UNION statement. + */ + UNION, + + /** + * The type of a UNION ALL statement. + */ + UNION_ALL, + + /** + * The type of an EXCEPT statement. + */ + EXCEPT, + + /** + * The type of an INTERSECT statement. + */ + INTERSECT + } + + private final UnionType unionType; + + /** + * The left hand side of the union (the first subquery). + */ + final Query left; + + /** + * The right hand side of the union (the second subquery). + */ + final Query right; + + private boolean isForUpdate; + + public SelectUnion(SessionLocal session, UnionType unionType, Query query, Query right) { + super(session); + this.unionType = unionType; + this.left = query; + this.right = right; + } + + @Override + public boolean isUnion() { + return true; + } + + public UnionType getUnionType() { + return unionType; + } + + public Query getLeft() { + return left; + } + + public Query getRight() { + return right; + } + + private Value[] convert(Value[] values, int columnCount) { + Value[] newValues; + if (columnCount == values.length) { + // re-use the array if possible + newValues = values; + } else { + // create a new array if needed, + // for the value hash set + newValues = new Value[columnCount]; + } + for (int i = 0; i < columnCount; i++) { + Expression e = expressions.get(i); + newValues[i] = values[i].convertTo(e.getType(), session); + } + return newValues; + } + + public LocalResult getEmptyResult() { + int columnCount = left.getColumnCount(); + return createLocalResult(columnCount); + } + + @Override + protected ResultInterface queryWithoutCache(long maxRows, ResultTarget target) { + OffsetFetch offsetFetch = getOffsetFetch(maxRows); + long offset = offsetFetch.offset; + long fetch = offsetFetch.fetch; + boolean fetchPercent = offsetFetch.fetchPercent; + Database db = session.getDatabase(); + if (db.getSettings().optimizeInsertFromSelect) { + if (unionType == UnionType.UNION_ALL && target != null) { + if (sort == null && !distinct && fetch < 0 && offset == 0) { + left.query(0, target); + right.query(0, target); + return null; + } + } + } + int columnCount = left.getColumnCount(); + if (session.isLazyQueryExecution() && unionType == UnionType.UNION_ALL && !distinct && + sort == null && !randomAccessResult && !isForUpdate && + offset == 0 && !fetchPercent && !withTies && isReadOnly()) { + // limit 0 means no rows + if (fetch != 0) { + LazyResultUnion lazyResult = new LazyResultUnion(expressionArray, columnCount); + if (fetch > 0) { + lazyResult.setLimit(fetch); + } + return lazyResult; + } + } + LocalResult result = createLocalResult(columnCount); + if (sort != null) { + result.setSortOrder(sort); + } + if (distinct) { + left.setDistinctIfPossible(); + right.setDistinctIfPossible(); + result.setDistinct(); + } + switch (unionType) { + case UNION: + case EXCEPT: + left.setDistinctIfPossible(); + right.setDistinctIfPossible(); + result.setDistinct(); + break; + case UNION_ALL: + break; + case INTERSECT: + left.setDistinctIfPossible(); + right.setDistinctIfPossible(); + break; + default: + throw DbException.getInternalError("type=" + unionType); + } + ResultInterface l = left.query(0); + ResultInterface r = right.query(0); + l.reset(); + r.reset(); + switch (unionType) { + case UNION_ALL: + case UNION: { + while (l.next()) { + result.addRow(convert(l.currentRow(), columnCount)); + } + while (r.next()) { + result.addRow(convert(r.currentRow(), columnCount)); + } + break; + } + case EXCEPT: { + while (l.next()) { + result.addRow(convert(l.currentRow(), columnCount)); + } + while (r.next()) { + result.removeDistinct(convert(r.currentRow(), columnCount)); + } + break; + } + case INTERSECT: { + LocalResult temp = createLocalResult(columnCount); + temp.setDistinct(); + while (l.next()) { + temp.addRow(convert(l.currentRow(), columnCount)); + } + while (r.next()) { + Value[] values = convert(r.currentRow(), columnCount); + if (temp.containsDistinct(values)) { + result.addRow(values); + } + } + temp.close(); + break; + } + default: + throw DbException.getInternalError("type=" + unionType); + } + l.close(); + r.close(); + return finishResult(result, offset, fetch, fetchPercent, target); + } + + private LocalResult createLocalResult(int columnCount) { + return new LocalResult(session, expressionArray, columnCount, columnCount); + } + + @Override + public void init() { + if (checkInit) { + throw DbException.getInternalError(); + } + checkInit = true; + left.init(); + right.init(); + int len = left.getColumnCount(); + if (len != right.getColumnCount()) { + throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH); + } + ArrayList le = left.getExpressions(); + // set the expressions to get the right column count and names, + // but can't validate at this time + expressions = new ArrayList<>(len); + for (int i = 0; i < len; i++) { + Expression l = le.get(i); + expressions.add(l); + } + visibleColumnCount = len; + if (withTies && !hasOrder()) { + throw DbException.get(ErrorCode.WITH_TIES_WITHOUT_ORDER_BY); + } + } + + @Override + public void prepare() { + if (isPrepared) { + // sometimes a subquery is prepared twice (CREATE TABLE AS SELECT) + return; + } + if (!checkInit) { + throw DbException.getInternalError("not initialized"); + } + isPrepared = true; + left.prepare(); + right.prepare(); + int len = left.getColumnCount(); + // set the correct expressions now + expressions = new ArrayList<>(len); + ArrayList le = left.getExpressions(); + ArrayList re = right.getExpressions(); + for (int i = 0; i < len; i++) { + Expression l = le.get(i); + Expression r = re.get(i); + Column col = new Column(l.getAlias(session, i), TypeInfo.getHigherType(l.getType(), r.getType())); + Expression e = new ExpressionColumn(session.getDatabase(), col); + expressions.add(e); + } + if (orderList != null) { + if (initOrder(null, true, null)) { + prepareOrder(orderList, expressions.size()); + cleanupOrder(); + } + } + resultColumnCount = expressions.size(); + expressionArray = expressions.toArray(new Expression[0]); + } + + @Override + public double getCost() { + return left.getCost() + right.getCost(); + } + + @Override + public HashSet
    getTables() { + HashSet
    set = left.getTables(); + set.addAll(right.getTables()); + return set; + } + + @Override + public void setForUpdate(boolean forUpdate) { + left.setForUpdate(forUpdate); + right.setForUpdate(forUpdate); + isForUpdate = forUpdate; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level) { + left.mapColumns(resolver, level); + right.mapColumns(resolver, level); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + right.setEvaluatable(tableFilter, b); + } + + @Override + public void addGlobalCondition(Parameter param, int columnId, + int comparisonType) { + addParameter(param); + switch (unionType) { + case UNION_ALL: + case UNION: + case INTERSECT: { + left.addGlobalCondition(param, columnId, comparisonType); + right.addGlobalCondition(param, columnId, comparisonType); + break; + } + case EXCEPT: { + left.addGlobalCondition(param, columnId, comparisonType); + break; + } + default: + throw DbException.getInternalError("type=" + unionType); + } + } + + @Override + public String getPlanSQL(int sqlFlags) { + StringBuilder buff = new StringBuilder(); + buff.append('(').append(left.getPlanSQL(sqlFlags)).append(')'); + switch (unionType) { + case UNION_ALL: + buff.append("\nUNION ALL\n"); + break; + case UNION: + buff.append("\nUNION\n"); + break; + case INTERSECT: + buff.append("\nINTERSECT\n"); + break; + case EXCEPT: + buff.append("\nEXCEPT\n"); + break; + default: + throw DbException.getInternalError("type=" + unionType); + } + buff.append('(').append(right.getPlanSQL(sqlFlags)).append(')'); + appendEndOfQueryToSQL(buff, sqlFlags, expressions.toArray(new Expression[0])); + if (isForUpdate) { + buff.append("\nFOR UPDATE"); + } + return buff.toString(); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && right.isEverything(visitor); + } + + @Override + public void updateAggregate(SessionLocal s, int stage) { + left.updateAggregate(s, stage); + right.updateAggregate(s, stage); + } + + @Override + public void fireBeforeSelectTriggers() { + left.fireBeforeSelectTriggers(); + right.fireBeforeSelectTriggers(); + } + + @Override + public boolean allowGlobalConditions() { + return left.allowGlobalConditions() && right.allowGlobalConditions(); + } + + @Override + public boolean isConstantQuery() { + return super.isConstantQuery() && left.isConstantQuery() && right.isConstantQuery(); + } + + /** + * Lazy execution for this union. + */ + private final class LazyResultUnion extends LazyResult { + + int columnCount; + ResultInterface l; + ResultInterface r; + boolean leftDone; + boolean rightDone; + + LazyResultUnion(Expression[] expressions, int columnCount) { + super(getSession(), expressions); + this.columnCount = columnCount; + } + + @Override + public int getVisibleColumnCount() { + return columnCount; + } + + @Override + protected Value[] fetchNextRow() { + if (rightDone) { + return null; + } + if (!leftDone) { + if (l == null) { + l = left.query(0); + l.reset(); + } + if (l.next()) { + return l.currentRow(); + } + leftDone = true; + } + if (r == null) { + r = right.query(0); + r.reset(); + } + if (r.next()) { + return r.currentRow(); + } + rightDone = true; + return null; + } + + @Override + public void close() { + super.close(); + if (l != null) { + l.close(); + } + if (r != null) { + r.close(); + } + } + + @Override + public void reset() { + super.reset(); + if (l != null) { + l.reset(); + } + if (r != null) { + r.reset(); + } + leftDone = false; + rightDone = false; + } + } +} diff --git a/h2/src/main/org/h2/command/query/TableValueConstructor.java b/h2/src/main/org/h2/command/query/TableValueConstructor.java new file mode 100644 index 0000000000..82d171fa3c --- /dev/null +++ b/h2/src/main/org/h2/command/query/TableValueConstructor.java @@ -0,0 +1,400 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.command.query; + +import static org.h2.expression.Expression.WITHOUT_PARENTHESES; +import static org.h2.util.HasSQL.DEFAULT_SQL_FLAGS; + +import java.util.ArrayList; +import java.util.HashSet; + +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionList; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.result.ResultTarget; +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.table.Table; +import org.h2.table.TableFilter; +import org.h2.table.TableValueConstructorTable; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Table value constructor. + */ +public class TableValueConstructor extends Query { + + private final ArrayList> rows; + + /** + * The table. + */ + TableValueConstructorTable table; + + private TableValueColumnResolver columnResolver; + + private double cost; + + /** + * Creates new instance of table value constructor. + * + * @param session + * the session + * @param rows + * the rows + */ + public TableValueConstructor(SessionLocal session, ArrayList> rows) { + super(session); + this.rows = rows; + if ((visibleColumnCount = rows.get(0).size()) > Constants.MAX_COLUMNS) { + throw DbException.get(ErrorCode.TOO_MANY_COLUMNS_1, "" + Constants.MAX_COLUMNS); + } + for (ArrayList row : rows) { + for (Expression column : row) { + if (!column.isConstant()) { + return; + } + } + } + createTable(); + } + + /** + * Appends visible columns of all rows to the specified result. + * + * @param session + * the session + * @param result + * the result + * @param columns + * the columns + * @param rows + * the rows with data + */ + public static void getVisibleResult(SessionLocal session, ResultTarget result, Column[] columns, + ArrayList> rows) { + int count = columns.length; + for (ArrayList row : rows) { + Value[] values = new Value[count]; + for (int i = 0; i < count; i++) { + values[i] = row.get(i).getValue(session).convertTo(columns[i].getType(), session); + } + result.addRow(values); + } + } + + /** + * Appends the SQL of the values to the specified string builder.. + * + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @param rows + * the values + */ + public static void getValuesSQL(StringBuilder builder, int sqlFlags, ArrayList> rows) { + builder.append("VALUES "); + int rowCount = rows.size(); + for (int i = 0; i < rowCount; i++) { + if (i > 0) { + builder.append(", "); + } + Expression.writeExpressions(builder.append('('), rows.get(i), sqlFlags).append(')'); + } + } + + @Override + public boolean isUnion() { + return false; + } + + @Override + protected ResultInterface queryWithoutCache(long limit, ResultTarget target) { + OffsetFetch offsetFetch = getOffsetFetch(limit); + long offset = offsetFetch.offset; + long fetch = offsetFetch.fetch; + boolean fetchPercent = offsetFetch.fetchPercent; + int visibleColumnCount = this.visibleColumnCount, resultColumnCount = this.resultColumnCount; + LocalResult result = new LocalResult(session, expressionArray, visibleColumnCount, resultColumnCount); + if (sort != null) { + result.setSortOrder(sort); + } + if (distinct) { + result.setDistinct(); + } + Column[] columns = table.getColumns(); + if (visibleColumnCount == resultColumnCount) { + getVisibleResult(session, result, columns, rows); + } else { + for (ArrayList row : rows) { + Value[] values = new Value[resultColumnCount]; + for (int i = 0; i < visibleColumnCount; i++) { + values[i] = row.get(i).getValue(session).convertTo(columns[i].getType(), session); + } + columnResolver.currentRow = values; + for (int i = visibleColumnCount; i < resultColumnCount; i++) { + values[i] = expressionArray[i].getValue(session); + } + result.addRow(values); + } + columnResolver.currentRow = null; + } + return finishResult(result, offset, fetch, fetchPercent, target); + } + + @Override + public void init() { + if (checkInit) { + throw DbException.getInternalError(); + } + checkInit = true; + if (withTies && !hasOrder()) { + throw DbException.get(ErrorCode.WITH_TIES_WITHOUT_ORDER_BY); + } + } + + @Override + public void prepare() { + if (isPrepared) { + // sometimes a subquery is prepared twice (CREATE TABLE AS SELECT) + return; + } + if (!checkInit) { + throw DbException.getInternalError("not initialized"); + } + isPrepared = true; + if (columnResolver == null) { + createTable(); + } + if (orderList != null) { + ArrayList expressionsSQL = new ArrayList<>(); + for (Expression e : expressions) { + expressionsSQL.add(e.getSQL(DEFAULT_SQL_FLAGS, WITHOUT_PARENTHESES)); + } + if (initOrder(expressionsSQL, false, null)) { + prepareOrder(orderList, expressions.size()); + } + } + resultColumnCount = expressions.size(); + for (int i = 0; i < resultColumnCount; i++) { + expressions.get(i).mapColumns(columnResolver, 0, Expression.MAP_INITIAL); + } + for (int i = visibleColumnCount; i < resultColumnCount; i++) { + expressions.set(i, expressions.get(i).optimize(session)); + } + if (sort != null) { + cleanupOrder(); + } + expressionArray = expressions.toArray(new Expression[0]); + double cost = 0; + int columnCount = visibleColumnCount; + for (ArrayList r : rows) { + for (int i = 0; i < columnCount; i++) { + cost += r.get(i).getCost(); + } + } + this.cost = cost + rows.size(); + } + + private void createTable() { + int rowCount = rows.size(); + ArrayList row = rows.get(0); + int columnCount = row.size(); + TypeInfo[] types = new TypeInfo[columnCount]; + for (int c = 0; c < columnCount; c++) { + Expression e = row.get(c).optimize(session); + row.set(c, e); + TypeInfo type = e.getType(); + if (type.getValueType() == Value.UNKNOWN) { + type = TypeInfo.TYPE_VARCHAR; + } + types[c] = type; + } + for (int r = 1; r < rowCount; r++) { + row = rows.get(r); + for (int c = 0; c < columnCount; c++) { + Expression e = row.get(c).optimize(session); + row.set(c, e); + types[c] = TypeInfo.getHigherType(types[c], e.getType()); + } + } + Column[] columns = new Column[columnCount]; + for (int c = 0; c < columnCount;) { + TypeInfo type = types[c]; + columns[c] = new Column("C" + ++c, type); + } + Database database = session.getDatabase(); + ArrayList expressions = new ArrayList<>(columnCount); + for (int i = 0; i < columnCount; i++) { + expressions.add(new ExpressionColumn(database, null, null, columns[i].getName())); + } + this.expressions = expressions; + table = new TableValueConstructorTable(session.getDatabase().getMainSchema(), session, columns, rows); + columnResolver = new TableValueColumnResolver(); + } + + @Override + public double getCost() { + return cost; + } + + @Override + public HashSet
    getTables() { + HashSet
    tables = new HashSet<>(1, 1f); + tables.add(table); + return tables; + } + + @Override + public void setForUpdate(boolean forUpdate) { + throw DbException.get(ErrorCode.RESULT_SET_READONLY); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level) { + int columnCount = visibleColumnCount; + for (ArrayList row : rows) { + for (int i = 0; i < columnCount; i++) { + row.get(i).mapColumns(resolver, level, Expression.MAP_INITIAL); + } + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + int columnCount = visibleColumnCount; + for (ArrayList row : rows) { + for (int i = 0; i < columnCount; i++) { + row.get(i).setEvaluatable(tableFilter, b); + } + } + } + + @Override + public void addGlobalCondition(Parameter param, int columnId, int comparisonType) { + // Can't add + } + + @Override + public boolean allowGlobalConditions() { + return false; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + ExpressionVisitor v2 = visitor.incrementQueryLevel(1); + for (Expression e : expressionArray) { + if (!e.isEverything(v2)) { + return false; + } + } + return true; + } + + @Override + public void updateAggregate(SessionLocal s, int stage) { + int columnCount = visibleColumnCount; + for (ArrayList row : rows) { + for (int i = 0; i < columnCount; i++) { + row.get(i).updateAggregate(s, stage); + } + } + } + + @Override + public void fireBeforeSelectTriggers() { + // Nothing to do + } + + @Override + public String getPlanSQL(int sqlFlags) { + StringBuilder builder = new StringBuilder(); + getValuesSQL(builder, sqlFlags, rows); + appendEndOfQueryToSQL(builder, sqlFlags, expressionArray); + return builder.toString(); + } + + @Override + public Table toTable(String alias, Column[] columnTemplates, ArrayList parameters, + boolean forCreateView, Query topQuery) { + if (!hasOrder() && offsetExpr == null && fetchExpr == null && table != null) { + return table; + } + return super.toTable(alias, columnTemplates, parameters, forCreateView, topQuery); + } + + @Override + public boolean isConstantQuery() { + if (!super.isConstantQuery()) { + return false; + } + for (ArrayList row : rows) { + for (int i = 0; i < visibleColumnCount; i++) { + if (!row.get(i).isConstant()) { + return false; + } + } + } + return true; + } + + @Override + public Expression getIfSingleRow() { + if (offsetExpr != null || fetchExpr != null || rows.size() != 1) { + return null; + } + ArrayList row = rows.get(0); + if (visibleColumnCount == 1) { + return row.get(0); + } + Expression[] array = new Expression[visibleColumnCount]; + for (int i = 0; i < visibleColumnCount; i++) { + array[i] = row.get(i); + } + return new ExpressionList(array, false); + } + + private final class TableValueColumnResolver implements ColumnResolver { + + Value[] currentRow; + + TableValueColumnResolver() { + } + + @Override + public Column[] getColumns() { + return table.getColumns(); + } + + @Override + public Column findColumn(String name) { + return table.findColumn(name); + } + + @Override + public Value getValue(Column column) { + return currentRow[column.getColumnId()]; + } + + @Override + public Expression optimize(ExpressionColumn expressionColumn, Column column) { + return expressions.get(column.getColumnId()); + } + + } + +} diff --git a/h2/src/main/org/h2/command/query/package.html b/h2/src/main/org/h2/command/query/package.html new file mode 100644 index 0000000000..80f0d16539 --- /dev/null +++ b/h2/src/main/org/h2/command/query/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Contains queries. + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/compress/CompressDeflate.java b/h2/src/main/org/h2/compress/CompressDeflate.java index aaafb741cc..0a1f722a05 100644 --- a/h2/src/main/org/h2/compress/CompressDeflate.java +++ b/h2/src/main/org/h2/compress/CompressDeflate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.compress; @@ -11,7 +11,7 @@ import java.util.zip.Inflater; import org.h2.api.ErrorCode; -import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; /** * This is a wrapper class for the Deflater class. @@ -47,24 +47,24 @@ public void setOptions(String options) { deflater.setStrategy(strategy); } } catch (Exception e) { - throw DbException.get(ErrorCode.UNSUPPORTED_COMPRESSION_OPTIONS_1, options); + throw DataUtils.newMVStoreException(ErrorCode.UNSUPPORTED_COMPRESSION_OPTIONS_1, options); } } @Override - public int compress(byte[] in, int inLen, byte[] out, int outPos) { + public int compress(byte[] in, int inPos, int inLen, byte[] out, int outPos) { Deflater deflater = new Deflater(level); deflater.setStrategy(strategy); - deflater.setInput(in, 0, inLen); + deflater.setInput(in, inPos, inLen); deflater.finish(); int compressed = deflater.deflate(out, outPos, out.length - outPos); - while (compressed == 0) { + if (compressed == 0) { // the compressed length is 0, meaning compression didn't work // (sounds like a JDK bug) // try again, using the default strategy and compression level strategy = Deflater.DEFAULT_STRATEGY; level = Deflater.DEFAULT_COMPRESSION; - return compress(in, inLen, out, outPos); + return compress(in, inPos, inLen, out, outPos); } deflater.end(); return outPos + compressed; @@ -87,7 +87,7 @@ public void expand(byte[] in, int inPos, int inLen, byte[] out, int outPos, throw new DataFormatException(len + " " + outLen); } } catch (DataFormatException e) { - throw DbException.get(ErrorCode.COMPRESSION_ERROR, e); + throw DataUtils.newMVStoreException(ErrorCode.COMPRESSION_ERROR, e.getMessage(), e); } decompresser.end(); } diff --git a/h2/src/main/org/h2/compress/CompressLZF.java b/h2/src/main/org/h2/compress/CompressLZF.java index 274e2e24de..952a4e53b8 100644 --- a/h2/src/main/org/h2/compress/CompressLZF.java +++ b/h2/src/main/org/h2/compress/CompressLZF.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * * This code is based on the LZF algorithm from Marc Lehmann. It is a * re-implementation of the C code: @@ -74,7 +74,7 @@ * *

    * The first byte of the compressed stream is the control byte. For literal - * runs, the highest three bits of the control byte are not set, the the lower + * runs, the highest three bits of the control byte are not set, the lower * bits are the literal run length, and the next bytes are data to copy directly * into the output. For back-references, the highest three bits of the control * byte are the back-reference length. If all three bits are set, then the @@ -155,15 +155,16 @@ private static int hash(int h) { } @Override - public int compress(byte[] in, int inLen, byte[] out, int outPos) { - int inPos = 0; + public int compress(byte[] in, int inPos, int inLen, byte[] out, int outPos) { + int offset = inPos; + inLen += inPos; if (cachedHashTable == null) { cachedHashTable = new int[HASH_SIZE]; } int[] hashTab = cachedHashTable; int literals = 0; outPos++; - int future = first(in, 0); + int future = first(in, inPos); while (inPos < inLen - 4) { byte p2 = in[inPos + 2]; // next @@ -178,7 +179,7 @@ public int compress(byte[] in, int inLen, byte[] out, int outPos) { // && (((in[ref] & 255) << 8) | (in[ref + 1] & 255)) == // ((future >> 8) & 0xffff)) { if (ref < inPos - && ref > 0 + && ref > offset && (off = inPos - ref - 1) < MAX_OFF && in[ref + 2] == p2 && in[ref + 1] == (byte) (future >> 8) @@ -265,14 +266,15 @@ public int compress(byte[] in, int inLen, byte[] out, int outPos) { * @return the end position */ public int compress(ByteBuffer in, int inPos, byte[] out, int outPos) { - int inLen = in.capacity() - inPos; + int offset = inPos; + int inLen = in.capacity(); if (cachedHashTable == null) { cachedHashTable = new int[HASH_SIZE]; } int[] hashTab = cachedHashTable; int literals = 0; outPos++; - int future = first(in, 0); + int future = first(in, inPos); while (inPos < inLen - 4) { byte p2 = in.get(inPos + 2); // next @@ -287,7 +289,7 @@ public int compress(ByteBuffer in, int inPos, byte[] out, int outPos) { // && (((in[ref] & 255) << 8) | (in[ref + 1] & 255)) == // ((future >> 8) & 0xffff)) { if (ref < inPos - && ref > 0 + && ref > offset && (off = inPos - ref - 1) < MAX_OFF && in.get(ref + 2) == p2 && in.get(ref + 1) == (byte) (future >> 8) diff --git a/h2/src/main/org/h2/compress/CompressNo.java b/h2/src/main/org/h2/compress/CompressNo.java index fe2cc0625b..df7c1fb4f9 100644 --- a/h2/src/main/org/h2/compress/CompressNo.java +++ b/h2/src/main/org/h2/compress/CompressNo.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.compress; @@ -23,8 +23,8 @@ public void setOptions(String options) { } @Override - public int compress(byte[] in, int inLen, byte[] out, int outPos) { - System.arraycopy(in, 0, out, outPos, inLen); + public int compress(byte[] in, int inPos, int inLen, byte[] out, int outPos) { + System.arraycopy(in, inPos, out, outPos, inLen); return outPos + inLen; } diff --git a/h2/src/main/org/h2/compress/Compressor.java b/h2/src/main/org/h2/compress/Compressor.java index 088d6f2bcb..4970ff0b57 100644 --- a/h2/src/main/org/h2/compress/Compressor.java +++ b/h2/src/main/org/h2/compress/Compressor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.compress; @@ -37,12 +37,13 @@ public interface Compressor { * Compress a number of bytes. * * @param in the input data + * @param inPos the offset at the input array * @param inLen the number of bytes to compress * @param out the output area * @param outPos the offset at the output array * @return the end position */ - int compress(byte[] in, int inLen, byte[] out, int outPos); + int compress(byte[] in, int inPos, int inLen, byte[] out, int outPos); /** * Expand a number of compressed bytes. diff --git a/h2/src/main/org/h2/compress/LZFInputStream.java b/h2/src/main/org/h2/compress/LZFInputStream.java index ef6468b819..5586841b86 100644 --- a/h2/src/main/org/h2/compress/LZFInputStream.java +++ b/h2/src/main/org/h2/compress/LZFInputStream.java @@ -1,14 +1,14 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.compress; import java.io.IOException; import java.io.InputStream; -import org.h2.message.DbException; import org.h2.mvstore.DataUtils; +import org.h2.util.Utils; /** * An input stream to read from an LZF stream. @@ -31,7 +31,7 @@ public LZFInputStream(InputStream in) throws IOException { } private static byte[] ensureSize(byte[] buff, int len) { - return buff == null || buff.length < len ? DataUtils.newBytes(len) : buff; + return buff == null || buff.length < len ? Utils.newBytes(len) : buff; } private void fillBuffer() throws IOException { @@ -55,7 +55,7 @@ private void fillBuffer() throws IOException { try { decompress.expand(inBuffer, 0, len, buffer, 0, size); } catch (ArrayIndexOutOfBoundsException e) { - DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } this.bufferLength = size; } diff --git a/h2/src/main/org/h2/compress/LZFOutputStream.java b/h2/src/main/org/h2/compress/LZFOutputStream.java index 24d85d273d..e2b7aa2a04 100644 --- a/h2/src/main/org/h2/compress/LZFOutputStream.java +++ b/h2/src/main/org/h2/compress/LZFOutputStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.compress; @@ -54,7 +54,7 @@ public void write(int b) throws IOException { private void compressAndWrite(byte[] buff, int len) throws IOException { if (len > 0) { ensureOutput(len); - int compressed = compress.compress(buff, len, outBuffer, 0); + int compressed = compress.compress(buff, 0, len, outBuffer, 0); if (compressed > len) { writeInt(-len); out.write(buff, 0, len); diff --git a/h2/src/main/org/h2/compress/package.html b/h2/src/main/org/h2/compress/package.html index 050b0d831e..3c1c6d9b1f 100644 --- a/h2/src/main/org/h2/compress/package.html +++ b/h2/src/main/org/h2/compress/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/constraint/Constraint.java b/h2/src/main/org/h2/constraint/Constraint.java index 6342cd5e34..762b267643 100644 --- a/h2/src/main/org/h2/constraint/Constraint.java +++ b/h2/src/main/org/h2/constraint/Constraint.java @@ -1,48 +1,66 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.constraint; import java.util.HashSet; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; import org.h2.index.Index; -import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.result.Row; import org.h2.schema.Schema; -import org.h2.schema.SchemaObjectBase; +import org.h2.schema.SchemaObject; import org.h2.table.Column; import org.h2.table.Table; /** * The base class for constraint checking. */ -public abstract class Constraint extends SchemaObjectBase implements - Comparable { - - /** - * The constraint type name for check constraints. - */ - public static final String CHECK = "CHECK"; - - /** - * The constraint type name for referential constraints. - */ - public static final String REFERENTIAL = "REFERENTIAL"; - - /** - * The constraint type name for unique constraints. - */ - public static final String UNIQUE = "UNIQUE"; +public abstract class Constraint extends SchemaObject implements Comparable { + + public enum Type { + /** + * The constraint type for check constraints. + */ + CHECK, + /** + * The constraint type for primary key constraints. + */ + PRIMARY_KEY, + /** + * The constraint type for unique constraints. + */ + UNIQUE, + /** + * The constraint type for referential constraints. + */ + REFERENTIAL, + /** + * The constraint type for domain constraints. + */ + DOMAIN; + + /** + * Get standard SQL type name. + * + * @return standard SQL type name + */ + public String getSqlName() { + if (this == Constraint.Type.PRIMARY_KEY) { + return "PRIMARY KEY"; + } + if (this == Constraint.Type.REFERENTIAL) { + return "FOREIGN KEY"; + } + return name(); + } - /** - * The constraint type name for primary key constraints. - */ - public static final String PRIMARY_KEY = "PRIMARY KEY"; + } /** * The table for which this constraint is defined. @@ -50,9 +68,11 @@ public abstract class Constraint extends SchemaObjectBase implements protected Table table; Constraint(Schema schema, int id, String name, Table table) { - initSchemaObjectBase(schema, id, name, Trace.CONSTRAINT); + super(schema, id, name, Trace.CONSTRAINT); this.table = table; - this.setTemporary(table.isTemporary()); + if (table != null) { + this.setTemporary(table.isTemporary()); + } } /** @@ -60,7 +80,7 @@ public abstract class Constraint extends SchemaObjectBase implements * * @return the name */ - public abstract String getConstraintType(); + public abstract Type getConstraintType(); /** * Check if this row fulfils the constraint. @@ -71,7 +91,7 @@ public abstract class Constraint extends SchemaObjectBase implements * @param oldRow the old row * @param newRow the new row */ - public abstract void checkRow(Session session, Table t, Row oldRow, Row newRow); + public abstract void checkRow(SessionLocal session, Table t, Row oldRow, Row newRow); /** * Check if this constraint needs the specified index. @@ -96,6 +116,15 @@ public abstract class Constraint extends SchemaObjectBase implements */ public abstract HashSet getReferencedColumns(Table table); + /** + * Returns the CHECK expression or null. + * + * @return the CHECK expression or null. + */ + public Expression getExpression() { + return null; + } + /** * Get the SQL statement to create this constraint. * @@ -116,7 +145,7 @@ public abstract class Constraint extends SchemaObjectBase implements * * @param session the session */ - public abstract void checkExistingData(Session session); + public abstract void checkExistingData(SessionLocal session); /** * This method is called after a related table has changed @@ -125,16 +154,22 @@ public abstract class Constraint extends SchemaObjectBase implements public abstract void rebuild(); /** - * Get the unique index used to enforce this constraint, or null if no index + * Get the index of this constraint in the source table, or null if no index * is used. * * @return the index */ - public abstract Index getUniqueIndex(); + public Index getIndex() { + return null; + } - @Override - public void checkRename() { - // ok + /** + * Returns the referenced unique constraint, or null. + * + * @return the referenced unique constraint, or null + */ + public ConstraintUnique getReferencedConstraint() { + return null; } @Override @@ -150,39 +185,17 @@ public Table getRefTable() { return table; } - @Override - public String getDropSQL() { - return null; - } - - private int getConstraintTypeOrder() { - String constraintType = getConstraintType(); - if (CHECK.equals(constraintType)) { - return 0; - } else if (PRIMARY_KEY.equals(constraintType)) { - return 1; - } else if (UNIQUE.equals(constraintType)) { - return 2; - } else if (REFERENTIAL.equals(constraintType)) { - return 3; - } else { - throw DbException.throwInternalError("type: " + constraintType); - } - } - @Override public int compareTo(Constraint other) { if (this == other) { return 0; } - int thisType = getConstraintTypeOrder(); - int otherType = other.getConstraintTypeOrder(); - return thisType - otherType; + return Integer.compare(getConstraintType().ordinal(), other.getConstraintType().ordinal()); } @Override public boolean isHidden() { - return table.isHidden(); + return table != null && table.isHidden(); } /** @@ -192,7 +205,7 @@ public boolean isHidden() { * @return true if every visited expression returned true, or if there are * no expressions */ - public boolean isEverything(ExpressionVisitor visitor) { + public boolean isEverything(@SuppressWarnings("unused") ExpressionVisitor visitor) { return true; } diff --git a/h2/src/main/org/h2/constraint/ConstraintActionType.java b/h2/src/main/org/h2/constraint/ConstraintActionType.java new file mode 100644 index 0000000000..b5e3b8fc6c --- /dev/null +++ b/h2/src/main/org/h2/constraint/ConstraintActionType.java @@ -0,0 +1,44 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.constraint; + +public enum ConstraintActionType { + /** + * The action is to restrict the operation. + */ + RESTRICT, + + /** + * The action is to cascade the operation. + */ + CASCADE, + + /** + * The action is to set the value to the default value. + */ + SET_DEFAULT, + + /** + * The action is to set the value to NULL. + */ + SET_NULL; + + /** + * Get standard SQL type name. + * + * @return standard SQL type name + */ + public String getSqlName() { + if (this == ConstraintActionType.SET_DEFAULT) { + return "SET DEFAULT"; + } + if (this == SET_NULL) { + return "SET NULL"; + } + return name(); + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/constraint/ConstraintCheck.java b/h2/src/main/org/h2/constraint/ConstraintCheck.java index c6034e1636..a453b23705 100644 --- a/h2/src/main/org/h2/constraint/ConstraintCheck.java +++ b/h2/src/main/org/h2/constraint/ConstraintCheck.java @@ -1,15 +1,13 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.constraint; import java.util.HashSet; -import java.util.Iterator; - import org.h2.api.ErrorCode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionVisitor; import org.h2.index.Index; @@ -20,8 +18,8 @@ import org.h2.table.Column; import org.h2.table.Table; import org.h2.table.TableFilter; -import org.h2.util.New; import org.h2.util.StringUtils; +import org.h2.value.Value; /** * A check constraint. @@ -36,8 +34,8 @@ public ConstraintCheck(Schema schema, int id, String name, Table table) { } @Override - public String getConstraintType() { - return Constraint.CHECK; + public Type getConstraintType() { + return Constraint.Type.CHECK; } public void setTableFilter(TableFilter filter) { @@ -51,21 +49,24 @@ public void setExpression(Expression expr) { @Override public String getCreateSQLForCopy(Table forTable, String quotedName) { StringBuilder buff = new StringBuilder("ALTER TABLE "); - buff.append(forTable.getSQL()).append(" ADD CONSTRAINT "); + forTable.getSQL(buff, DEFAULT_SQL_FLAGS).append(" ADD CONSTRAINT "); if (forTable.isHidden()) { buff.append("IF NOT EXISTS "); } buff.append(quotedName); if (comment != null) { - buff.append(" COMMENT ").append(StringUtils.quoteStringSQL(comment)); + buff.append(" COMMENT "); + StringUtils.quoteStringSQL(buff, comment); } - buff.append(" CHECK").append(StringUtils.enclose(expr.getSQL())) - .append(" NOCHECK"); + buff.append(" CHECK"); + expr.getEnclosedSQL(buff, DEFAULT_SQL_FLAGS).append(" NOCHECK"); return buff.toString(); } private String getShortDescription() { - return getName() + ": " + expr.getSQL(); + StringBuilder builder = new StringBuilder().append(getName()).append(": "); + expr.getTraceSQL(); + return builder.toString(); } @Override @@ -75,11 +76,11 @@ public String getCreateSQLWithoutIndexes() { @Override public String getCreateSQL() { - return getCreateSQLForCopy(table, getSQL()); + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS)); } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { table.removeConstraint(this); database.removeMeta(session, getId()); filter = null; @@ -89,22 +90,24 @@ public void removeChildrenAndResources(Session session) { } @Override - public void checkRow(Session session, Table t, Row oldRow, Row newRow) { + public void checkRow(SessionLocal session, Table t, Row oldRow, Row newRow) { if (newRow == null) { return; } - filter.set(newRow); - Boolean b; + boolean b; try { - b = expr.getValue(session).getBoolean(); + Value v; + synchronized (this) { + filter.set(newRow); + v = expr.getValue(session); + } + // Both TRUE and NULL are ok + b = v.isFalse(); } catch (DbException ex) { - throw DbException.get(ErrorCode.CHECK_CONSTRAINT_INVALID, ex, - getShortDescription()); + throw DbException.get(ErrorCode.CHECK_CONSTRAINT_INVALID, ex, getShortDescription()); } - // Both TRUE and NULL are ok - if (Boolean.FALSE.equals(b)) { - throw DbException.get(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, - getShortDescription()); + if (b) { + throw DbException.get(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, getShortDescription()); } } @@ -115,21 +118,17 @@ public boolean usesIndex(Index index) { @Override public void setIndexOwner(Index index) { - DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } @Override public HashSet getReferencedColumns(Table table) { - HashSet columns = New.hashSet(); - expr.isEverything(ExpressionVisitor.getColumnsVisitor(columns)); - for (Iterator it = columns.iterator(); it.hasNext();) { - if (it.next().getTable() != table) { - it.remove(); - } - } + HashSet columns = new HashSet<>(); + expr.isEverything(ExpressionVisitor.getColumnsVisitor(columns, table)); return columns; } + @Override public Expression getExpression() { return expr; } @@ -140,24 +139,21 @@ public boolean isBefore() { } @Override - public void checkExistingData(Session session) { + public void checkExistingData(SessionLocal session) { if (session.getDatabase().isStarting()) { // don't check at startup return; } - String sql = "SELECT 1 FROM " + filter.getTable().getSQL() + - " WHERE NOT(" + expr.getSQL() + ")"; + StringBuilder builder = new StringBuilder().append("SELECT NULL FROM "); + filter.getTable().getSQL(builder, DEFAULT_SQL_FLAGS).append(" WHERE NOT "); + expr.getSQL(builder, DEFAULT_SQL_FLAGS, Expression.AUTO_PARENTHESES); + String sql = builder.toString(); ResultInterface r = session.prepare(sql).query(1); if (r.next()) { throw DbException.get(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, getName()); } } - @Override - public Index getUniqueIndex() { - return null; - } - @Override public void rebuild() { // nothing to do diff --git a/h2/src/main/org/h2/constraint/ConstraintDomain.java b/h2/src/main/org/h2/constraint/ConstraintDomain.java new file mode 100644 index 0000000000..c866c808bb --- /dev/null +++ b/h2/src/main/org/h2/constraint/ConstraintDomain.java @@ -0,0 +1,240 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.constraint; + +import java.util.HashSet; + +import org.h2.api.ErrorCode; +import org.h2.command.Parser; +import org.h2.command.ddl.AlterDomain; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.result.Row; +import org.h2.schema.Domain; +import org.h2.schema.Schema; +import org.h2.table.Column; +import org.h2.table.PlanItem; +import org.h2.table.Table; +import org.h2.table.TableFilter; +import org.h2.util.StringUtils; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A domain constraint. + */ +public class ConstraintDomain extends Constraint { + + private Domain domain; + + private Expression expr; + + private DomainColumnResolver resolver; + + public ConstraintDomain(Schema schema, int id, String name, Domain domain) { + super(schema, id, name, null); + this.domain = domain; + resolver = new DomainColumnResolver(domain.getDataType()); + } + + @Override + public Type getConstraintType() { + return Constraint.Type.DOMAIN; + } + + /** + * Returns the domain of this constraint. + * + * @return the domain + */ + public Domain getDomain() { + return domain; + } + + /** + * Set the expression. + * + * @param session the session + * @param expr the expression + */ + public void setExpression(SessionLocal session, Expression expr) { + expr.mapColumns(resolver, 0, Expression.MAP_INITIAL); + expr = expr.optimize(session); + // check if the column is mapped + synchronized (this) { + resolver.setValue(ValueNull.INSTANCE); + expr.getValue(session); + } + this.expr = expr; + } + + @Override + public String getCreateSQLForCopy(Table forTable, String quotedName) { + throw DbException.getInternalError(toString()); + } + + @Override + public String getCreateSQLWithoutIndexes() { + return getCreateSQL(); + } + + @Override + public String getCreateSQL() { + StringBuilder builder = new StringBuilder("ALTER DOMAIN "); + domain.getSQL(builder, DEFAULT_SQL_FLAGS).append(" ADD CONSTRAINT "); + getSQL(builder, DEFAULT_SQL_FLAGS); + if (comment != null) { + builder.append(" COMMENT "); + StringUtils.quoteStringSQL(builder, comment); + } + builder.append(" CHECK"); + expr.getEnclosedSQL(builder, DEFAULT_SQL_FLAGS).append(" NOCHECK"); + return builder.toString(); + } + + @Override + public void removeChildrenAndResources(SessionLocal session) { + domain.removeConstraint(this); + database.removeMeta(session, getId()); + domain = null; + expr = null; + invalidate(); + } + + @Override + public void checkRow(SessionLocal session, Table t, Row oldRow, Row newRow) { + throw DbException.getInternalError(toString()); + } + + /** + * Check the specified value. + * + * @param session + * the session + * @param value + * the value to check + */ + public void check(SessionLocal session, Value value) { + Value v; + synchronized (this) { + resolver.setValue(value); + v = expr.getValue(session); + } + // Both TRUE and NULL are OK + if (v.isFalse()) { + throw DbException.get(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, expr.getTraceSQL()); + } + } + + /** + * Get the check constraint expression for this column. + * + * @param session the session + * @param columnName the column name + * @return the expression + */ + public Expression getCheckConstraint(SessionLocal session, String columnName) { + String sql; + if (columnName != null) { + synchronized (this) { + try { + resolver.setColumnName(columnName); + sql = expr.getSQL(DEFAULT_SQL_FLAGS); + } finally { + resolver.resetColumnName(); + } + } + return new Parser(session).parseExpression(sql); + } else { + synchronized (this) { + sql = expr.getSQL(DEFAULT_SQL_FLAGS); + } + return new Parser(session).parseDomainConstraintExpression(sql); + } + } + + @Override + public boolean usesIndex(Index index) { + return false; + } + + @Override + public void setIndexOwner(Index index) { + throw DbException.getInternalError(toString()); + } + + @Override + public HashSet getReferencedColumns(Table table) { + HashSet columns = new HashSet<>(); + expr.isEverything(ExpressionVisitor.getColumnsVisitor(columns, table)); + return columns; + } + + @Override + public Expression getExpression() { + return expr; + } + + @Override + public boolean isBefore() { + return true; + } + + @Override + public void checkExistingData(SessionLocal session) { + if (session.getDatabase().isStarting()) { + // don't check at startup + return; + } + new CheckExistingData(session, domain); + } + + @Override + public void rebuild() { + // nothing to do + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return expr.isEverything(visitor); + } + + private class CheckExistingData { + + private final SessionLocal session; + + CheckExistingData(SessionLocal session, Domain domain) { + this.session = session; + checkDomain(null, domain); + } + + private boolean checkColumn(Domain domain, Column targetColumn) { + Table table = targetColumn.getTable(); + TableFilter filter = new TableFilter(session, table, null, true, null, 0, null); + TableFilter[] filters = { filter }; + PlanItem item = filter.getBestPlanItem(session, filters, 0, new AllColumnsForPlan(filters)); + filter.setPlanItem(item); + filter.prepare(); + filter.startQuery(session); + filter.reset(); + while (filter.next()) { + check(session, filter.getValue(targetColumn)); + } + return false; + } + + private boolean checkDomain(Domain domain, Domain targetDomain) { + AlterDomain.forAllDependencies(session, targetDomain, this::checkColumn, this::checkDomain, false); + return false; + } + + } + +} diff --git a/h2/src/main/org/h2/constraint/ConstraintReferential.java b/h2/src/main/org/h2/constraint/ConstraintReferential.java index bfff4fefe2..7bdde5c130 100644 --- a/h2/src/main/org/h2/constraint/ConstraintReferential.java +++ b/h2/src/main/org/h2/constraint/ConstraintReferential.java @@ -1,17 +1,15 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.constraint; import java.util.ArrayList; import java.util.HashSet; - import org.h2.api.ErrorCode; -import org.h2.command.Parser; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.Parameter; import org.h2.index.Cursor; @@ -24,8 +22,6 @@ import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.Table; -import org.h2.util.New; -import org.h2.util.StatementBuilder; import org.h2.util.StringUtils; import org.h2.value.Value; import org.h2.value.ValueNull; @@ -35,35 +31,14 @@ */ public class ConstraintReferential extends Constraint { - /** - * The action is to restrict the operation. - */ - public static final int RESTRICT = 0; - - /** - * The action is to cascade the operation. - */ - public static final int CASCADE = 1; - - /** - * The action is to set the value to the default value. - */ - public static final int SET_DEFAULT = 2; - - /** - * The action is to set the value to NULL. - */ - public static final int SET_NULL = 3; - private IndexColumn[] columns; private IndexColumn[] refColumns; - private int deleteAction; - private int updateAction; + private ConstraintActionType deleteAction = ConstraintActionType.RESTRICT; + private ConstraintActionType updateAction = ConstraintActionType.RESTRICT; private Table refTable; private Index index; - private Index refIndex; + private ConstraintUnique refConstraint; private boolean indexOwner; - private boolean refIndexOwner; private String deleteSQL, updateSQL; private boolean skipOwnTable; @@ -72,24 +47,8 @@ public ConstraintReferential(Schema schema, int id, String name, Table table) { } @Override - public String getConstraintType() { - return Constraint.REFERENTIAL; - } - - private static void appendAction(StatementBuilder buff, int action) { - switch (action) { - case CASCADE: - buff.append("CASCADE"); - break; - case SET_DEFAULT: - buff.append("SET DEFAULT"); - break; - case SET_NULL: - buff.append("SET NULL"); - break; - default: - DbException.throwInternalError("action=" + action); - } + public Type getConstraintType() { + return Constraint.Type.REFERENTIAL; } /** @@ -117,54 +76,42 @@ public String getCreateSQLForCopy(Table forTable, String quotedName) { */ public String getCreateSQLForCopy(Table forTable, Table forRefTable, String quotedName, boolean internalIndex) { - StatementBuilder buff = new StatementBuilder("ALTER TABLE "); - String mainTable = forTable.getSQL(); - buff.append(mainTable).append(" ADD CONSTRAINT "); + StringBuilder builder = new StringBuilder("ALTER TABLE "); + forTable.getSQL(builder, DEFAULT_SQL_FLAGS).append(" ADD CONSTRAINT "); if (forTable.isHidden()) { - buff.append("IF NOT EXISTS "); + builder.append("IF NOT EXISTS "); } - buff.append(quotedName); + builder.append(quotedName); if (comment != null) { - buff.append(" COMMENT ").append(StringUtils.quoteStringSQL(comment)); + builder.append(" COMMENT "); + StringUtils.quoteStringSQL(builder, comment); } IndexColumn[] cols = columns; IndexColumn[] refCols = refColumns; - buff.append(" FOREIGN KEY("); - for (IndexColumn c : cols) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()); - } - buff.append(')'); + builder.append(" FOREIGN KEY("); + IndexColumn.writeColumns(builder, cols, DEFAULT_SQL_FLAGS); + builder.append(')'); if (internalIndex && indexOwner && forTable == this.table) { - buff.append(" INDEX ").append(index.getSQL()); + builder.append(" INDEX "); + index.getSQL(builder, DEFAULT_SQL_FLAGS); } - buff.append(" REFERENCES "); - String quotedRefTable; + builder.append(" REFERENCES "); if (this.table == this.refTable) { // self-referencing constraints: need to use new table - quotedRefTable = forTable.getSQL(); + forTable.getSQL(builder, DEFAULT_SQL_FLAGS); } else { - quotedRefTable = forRefTable.getSQL(); - } - buff.append(quotedRefTable).append('('); - buff.resetCount(); - for (IndexColumn r : refCols) { - buff.appendExceptFirst(", "); - buff.append(r.getSQL()); + forRefTable.getSQL(builder, DEFAULT_SQL_FLAGS); } - buff.append(')'); - if (internalIndex && refIndexOwner && forTable == this.table) { - buff.append(" INDEX ").append(refIndex.getSQL()); + builder.append('('); + IndexColumn.writeColumns(builder, refCols, DEFAULT_SQL_FLAGS); + builder.append(')'); + if (deleteAction != ConstraintActionType.RESTRICT) { + builder.append(" ON DELETE ").append(deleteAction.getSqlName()); } - if (deleteAction != RESTRICT) { - buff.append(" ON DELETE "); - appendAction(buff, deleteAction); + if (updateAction != ConstraintActionType.RESTRICT) { + builder.append(" ON UPDATE ").append(updateAction.getSqlName()); } - if (updateAction != RESTRICT) { - buff.append(" ON UPDATE "); - appendAction(buff, updateAction); - } - return buff.append(" NOCHECK").toString(); + return builder.append(" NOCHECK").toString(); } @@ -177,43 +124,38 @@ public String getCreateSQLForCopy(Table forTable, Table forRefTable, * @return the description */ private String getShortDescription(Index searchIndex, SearchRow check) { - StatementBuilder buff = new StatementBuilder(getName()); - buff.append(": ").append(table.getSQL()).append(" FOREIGN KEY("); - for (IndexColumn c : columns) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()); - } - buff.append(") REFERENCES ").append(refTable.getSQL()).append('('); - buff.resetCount(); - for (IndexColumn r : refColumns) { - buff.appendExceptFirst(", "); - buff.append(r.getSQL()); - } - buff.append(')'); + StringBuilder builder = new StringBuilder(getName()).append(": "); + table.getSQL(builder, TRACE_SQL_FLAGS).append(" FOREIGN KEY("); + IndexColumn.writeColumns(builder, columns, TRACE_SQL_FLAGS); + builder.append(") REFERENCES "); + refTable.getSQL(builder, TRACE_SQL_FLAGS).append('('); + IndexColumn.writeColumns(builder, refColumns, TRACE_SQL_FLAGS); + builder.append(')'); if (searchIndex != null && check != null) { - buff.append(" ("); - buff.resetCount(); + builder.append(" ("); Column[] cols = searchIndex.getColumns(); int len = Math.min(columns.length, cols.length); for (int i = 0; i < len; i++) { int idx = cols[i].getColumnId(); Value c = check.getValue(idx); - buff.appendExceptFirst(", "); - buff.append(c == null ? "" : c.toString()); + if (i > 0) { + builder.append(", "); + } + builder.append(c == null ? "" : c.toString()); } - buff.append(')'); + builder.append(')'); } - return buff.toString(); + return builder.toString(); } @Override public String getCreateSQLWithoutIndexes() { - return getCreateSQLForCopy(table, refTable, getSQL(), false); + return getCreateSQLForCopy(table, refTable, getSQL(DEFAULT_SQL_FLAGS), false); } @Override public String getCreateSQL() { - return getCreateSQLForCopy(table, getSQL()); + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS)); } public void setColumns(IndexColumn[] cols) { @@ -226,7 +168,7 @@ public IndexColumn[] getColumns() { @Override public HashSet getReferencedColumns(Table table) { - HashSet result = New.hashSet(); + HashSet result = new HashSet<>(); if (table == this.table) { for (IndexColumn c : columns) { result.add(c.column); @@ -267,31 +209,27 @@ public void setIndex(Index index, boolean isOwner) { } /** - * Set the index of the referenced table to use for this constraint. + * Set the unique constraint of the referenced table to use for this + * constraint. * - * @param refIndex the index - * @param isRefOwner true if the index is generated by the system and - * belongs to this constraint + * @param refConstraint + * the unique constraint */ - public void setRefIndex(Index refIndex, boolean isRefOwner) { - this.refIndex = refIndex; - this.refIndexOwner = isRefOwner; + public void setRefConstraint(ConstraintUnique refConstraint) { + this.refConstraint = refConstraint; } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { table.removeConstraint(this); refTable.removeConstraint(this); if (indexOwner) { table.removeIndexOrTransferOwnership(session, index); } - if (refIndexOwner) { - refTable.removeIndexOrTransferOwnership(session, refIndex); - } database.removeMeta(session, getId()); refTable = null; index = null; - refIndex = null; + refConstraint = null; columns = null; refColumns = null; deleteSQL = null; @@ -301,7 +239,7 @@ public void removeChildrenAndResources(Session session) { } @Override - public void checkRow(Session session, Table t, Row oldRow, Row newRow) { + public void checkRow(SessionLocal session, Table t, Row oldRow, Row newRow) { if (!database.getReferentialIntegrity()) { return; } @@ -319,7 +257,7 @@ public void checkRow(Session session, Table t, Row oldRow, Row newRow) { } } - private void checkRowOwnTable(Session session, Row oldRow, Row newRow) { + private void checkRowOwnTable(SessionLocal session, Row oldRow, Row newRow) { if (newRow == null) { return; } @@ -332,7 +270,7 @@ private void checkRowOwnTable(Session session, Row oldRow, Row newRow) { return; } if (constraintColumnsEqual) { - if (!database.areEqual(v, oldRow.getValue(idx))) { + if (!session.areEqual(v, oldRow.getValue(idx))) { constraintColumnsEqual = false; } } @@ -351,7 +289,7 @@ private void checkRowOwnTable(Session session, Row oldRow, Row newRow) { Column refCol = refColumns[i].column; int refIdx = refCol.getColumnId(); Value r = newRow.getValue(refIdx); - if (!database.areEqual(r, v)) { + if (!session.areEqual(r, v)) { self = false; break; } @@ -366,18 +304,19 @@ private void checkRowOwnTable(Session session, Row oldRow, Row newRow) { Value v = newRow.getValue(idx); Column refCol = refColumns[i].column; int refIdx = refCol.getColumnId(); - check.setValue(refIdx, refCol.convert(v)); + check.setValue(refIdx, refCol.convert(session, v)); } + Index refIndex = refConstraint.getIndex(); if (!existsRow(session, refIndex, check, null)) { throw DbException.get(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, getShortDescription(refIndex, check)); } } - private boolean existsRow(Session session, Index searchIndex, + private boolean existsRow(SessionLocal session, Index searchIndex, SearchRow check, Row excluding) { Table searchTable = searchIndex.getTable(); - searchTable.lock(session, false, false); + searchTable.lock(session, Table.READ_LOCK); Cursor cursor = searchIndex.find(session, check, check); while (cursor.next()) { SearchRow found; @@ -392,7 +331,7 @@ private boolean existsRow(Session session, Index searchIndex, int idx = cols[i].getColumnId(); Value c = check.getValue(idx); Value f = found.getValue(idx); - if (searchTable.compareTypeSave(c, f) != 0) { + if (searchTable.compareValues(session, c, f) != 0) { allEqual = false; break; } @@ -405,16 +344,16 @@ private boolean existsRow(Session session, Index searchIndex, } private boolean isEqual(Row oldRow, Row newRow) { - return refIndex.compareRows(oldRow, newRow) == 0; + return refConstraint.getIndex().compareRows(oldRow, newRow) == 0; } - private void checkRow(Session session, Row oldRow) { - SearchRow check = table.getTemplateSimpleRow(false); + private void checkRow(SessionLocal session, Row oldRow) { + SearchRow check = table.getRowFactory().createRow(); for (int i = 0, len = columns.length; i < len; i++) { Column refCol = refColumns[i].column; int refIdx = refCol.getColumnId(); Column col = columns[i].column; - Value v = col.convert(oldRow.getValue(refIdx)); + Value v = col.convert(session, oldRow.getValue(refIdx)); if (v == ValueNull.INSTANCE) { return; } @@ -428,7 +367,7 @@ private void checkRow(Session session, Row oldRow) { } } - private void checkRowRefTable(Session session, Row oldRow, Row newRow) { + private void checkRowRefTable(SessionLocal session, Row oldRow, Row newRow) { if (oldRow == null) { // this is an insert return; @@ -439,21 +378,21 @@ private void checkRowRefTable(Session session, Row oldRow, Row newRow) { } if (newRow == null) { // this is a delete - if (deleteAction == RESTRICT) { + if (deleteAction == ConstraintActionType.RESTRICT) { checkRow(session, oldRow); } else { - int i = deleteAction == CASCADE ? 0 : columns.length; + int i = deleteAction == ConstraintActionType.CASCADE ? 0 : columns.length; Prepared deleteCommand = getDelete(session); setWhere(deleteCommand, i, oldRow); updateWithSkipCheck(deleteCommand); } } else { // this is an update - if (updateAction == RESTRICT) { + if (updateAction == ConstraintActionType.RESTRICT) { checkRow(session, oldRow); } else { Prepared updateCommand = getUpdate(session); - if (updateAction == CASCADE) { + if (updateAction == ConstraintActionType.CASCADE) { ArrayList params = updateCommand.getParameters(); for (int i = 0, len = columns.length; i < len; i++) { Parameter param = params.get(i); @@ -490,7 +429,7 @@ private void setWhere(Prepared command, int pos, Row row) { } } - public int getDeleteAction() { + public ConstraintActionType getDeleteAction() { return deleteAction; } @@ -499,40 +438,55 @@ public int getDeleteAction() { * * @param action the action */ - public void setDeleteAction(int action) { + public void setDeleteAction(ConstraintActionType action) { if (action == deleteAction && deleteSQL == null) { return; } - if (deleteAction != RESTRICT) { + if (deleteAction != ConstraintActionType.RESTRICT) { throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, "ON DELETE"); } this.deleteAction = action; buildDeleteSQL(); } + /** + * Update the constraint SQL when a referenced column is renamed. + */ + public void updateOnTableColumnRename() { + if (deleteAction != null) { + deleteSQL = null; + buildDeleteSQL(); + } + if (updateAction != null) { + updateSQL = null; + buildUpdateSQL(); + } + } + private void buildDeleteSQL() { - if (deleteAction == RESTRICT) { + if (deleteAction == ConstraintActionType.RESTRICT) { return; } - StatementBuilder buff = new StatementBuilder(); - if (deleteAction == CASCADE) { - buff.append("DELETE FROM ").append(table.getSQL()); + StringBuilder builder = new StringBuilder(); + if (deleteAction == ConstraintActionType.CASCADE) { + builder.append("DELETE FROM "); + table.getSQL(builder, DEFAULT_SQL_FLAGS); } else { - appendUpdate(buff); + appendUpdate(builder); } - appendWhere(buff); - deleteSQL = buff.toString(); + appendWhere(builder); + deleteSQL = builder.toString(); } - private Prepared getUpdate(Session session) { + private Prepared getUpdate(SessionLocal session) { return prepare(session, updateSQL, updateAction); } - private Prepared getDelete(Session session) { + private Prepared getDelete(SessionLocal session) { return prepare(session, deleteSQL, deleteAction); } - public int getUpdateAction() { + public ConstraintActionType getUpdateAction() { return updateAction; } @@ -541,11 +495,11 @@ public int getUpdateAction() { * * @param action the action */ - public void setUpdateAction(int action) { + public void setUpdateAction(ConstraintActionType action) { if (action == updateAction && updateSQL == null) { return; } - if (updateAction != RESTRICT) { + if (updateAction != ConstraintActionType.RESTRICT) { throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, "ON UPDATE"); } this.updateAction = action; @@ -553,13 +507,13 @@ public void setUpdateAction(int action) { } private void buildUpdateSQL() { - if (updateAction == RESTRICT) { + if (updateAction == ConstraintActionType.RESTRICT) { return; } - StatementBuilder buff = new StatementBuilder(); - appendUpdate(buff); - appendWhere(buff); - updateSQL = buff.toString(); + StringBuilder builder = new StringBuilder(); + appendUpdate(builder); + appendWhere(builder); + updateSQL = builder.toString(); } @Override @@ -568,18 +522,18 @@ public void rebuild() { buildDeleteSQL(); } - private Prepared prepare(Session session, String sql, int action) { + private Prepared prepare(SessionLocal session, String sql, ConstraintActionType action) { Prepared command = session.prepare(sql); - if (action != CASCADE) { + if (action != ConstraintActionType.CASCADE) { ArrayList params = command.getParameters(); for (int i = 0, len = columns.length; i < len; i++) { Column column = columns[i].column; Parameter param = params.get(i); Value value; - if (action == SET_NULL) { + if (action == ConstraintActionType.SET_NULL) { value = ValueNull.INSTANCE; } else { - Expression expr = column.getDefaultExpression(); + Expression expr = column.getEffectiveDefaultExpression(); if (expr == null) { throw DbException.get(ErrorCode.NO_DEFAULT_SET_1, column.getName()); } @@ -591,22 +545,15 @@ private Prepared prepare(Session session, String sql, int action) { return command; } - private void appendUpdate(StatementBuilder buff) { - buff.append("UPDATE ").append(table.getSQL()).append(" SET "); - buff.resetCount(); - for (IndexColumn c : columns) { - buff.appendExceptFirst(" , "); - buff.append(Parser.quoteIdentifier(c.column.getName())).append("=?"); - } + private void appendUpdate(StringBuilder builder) { + builder.append("UPDATE "); + table.getSQL(builder, DEFAULT_SQL_FLAGS).append(" SET "); + IndexColumn.writeColumns(builder, columns, ", ", "=?", IndexColumn.SQL_NO_ORDER); } - private void appendWhere(StatementBuilder buff) { - buff.append(" WHERE "); - buff.resetCount(); - for (IndexColumn c : columns) { - buff.appendExceptFirst(" AND "); - buff.append(Parser.quoteIdentifier(c.column.getName())).append("=?"); - } + private void appendWhere(StringBuilder builder) { + builder.append(" WHERE "); + IndexColumn.writeColumns(builder, columns, " AND ", "=?", IndexColumn.SQL_NO_ORDER); } @Override @@ -616,17 +563,15 @@ public Table getRefTable() { @Override public boolean usesIndex(Index idx) { - return idx == index || idx == refIndex; + return idx == index; } @Override public void setIndexOwner(Index index) { if (this.index == index) { indexOwner = true; - } else if (this.refIndex == index) { - refIndexOwner = true; } else { - DbException.throwInternalError(); + throw DbException.getInternalError(index + " " + toString()); } } @@ -636,50 +581,50 @@ public boolean isBefore() { } @Override - public void checkExistingData(Session session) { + public void checkExistingData(SessionLocal session) { if (session.getDatabase().isStarting()) { // don't check at startup return; } - session.startStatementWithinTransaction(); - StatementBuilder buff = new StatementBuilder("SELECT 1 FROM (SELECT "); - for (IndexColumn c : columns) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()); - } - buff.append(" FROM ").append(table.getSQL()).append(" WHERE "); - buff.resetCount(); - for (IndexColumn c : columns) { - buff.appendExceptFirst(" AND "); - buff.append(c.getSQL()).append(" IS NOT NULL "); - } - buff.append(" ORDER BY "); - buff.resetCount(); - for (IndexColumn c : columns) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()); - } - buff.append(") C WHERE NOT EXISTS(SELECT 1 FROM "). - append(refTable.getSQL()).append(" P WHERE "); - buff.resetCount(); - int i = 0; - for (IndexColumn c : columns) { - buff.appendExceptFirst(" AND "); - buff.append("C.").append(c.getSQL()).append('='). - append("P.").append(refColumns[i++].getSQL()); - } - buff.append(')'); - String sql = buff.toString(); - ResultInterface r = session.prepare(sql).query(1); - if (r.next()) { - throw DbException.get(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, - getShortDescription(null, null)); + StringBuilder builder = new StringBuilder("SELECT 1 FROM (SELECT "); + IndexColumn.writeColumns(builder, columns, IndexColumn.SQL_NO_ORDER); + builder.append(" FROM "); + table.getSQL(builder, DEFAULT_SQL_FLAGS).append(" WHERE "); + IndexColumn.writeColumns(builder, columns, " AND ", " IS NOT NULL ", IndexColumn.SQL_NO_ORDER); + builder.append(" ORDER BY "); + IndexColumn.writeColumns(builder, columns, DEFAULT_SQL_FLAGS); + builder.append(") C WHERE NOT EXISTS(SELECT 1 FROM "); + refTable.getSQL(builder, DEFAULT_SQL_FLAGS).append(" P WHERE "); + for (int i = 0, l = columns.length; i < l; i++) { + if (i > 0) { + builder.append(" AND "); + } + builder.append("C."); + columns[i].column.getSQL(builder, DEFAULT_SQL_FLAGS).append('=').append("P."); + refColumns[i].column.getSQL(builder, DEFAULT_SQL_FLAGS); } + builder.append(')'); + + session.startStatementWithinTransaction(null); + try { + ResultInterface r = session.prepare(builder.toString()).query(1); + if (r.next()) { + throw DbException.get(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, + getShortDescription(null, null)); + } + } finally { + session.endStatement(); + } + } + + @Override + public Index getIndex() { + return index; } @Override - public Index getUniqueIndex() { - return refIndex; + public ConstraintUnique getReferencedConstraint() { + return refConstraint; } } diff --git a/h2/src/main/org/h2/constraint/ConstraintUnique.java b/h2/src/main/org/h2/constraint/ConstraintUnique.java index 2e480a38c0..3da09e09e8 100644 --- a/h2/src/main/org/h2/constraint/ConstraintUnique.java +++ b/h2/src/main/org/h2/constraint/ConstraintUnique.java @@ -1,21 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.constraint; +import java.util.ArrayList; import java.util.HashSet; -import org.h2.command.Parser; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.index.Index; import org.h2.result.Row; import org.h2.schema.Schema; import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.Table; -import org.h2.util.New; -import org.h2.util.StatementBuilder; import org.h2.util.StringUtils; /** @@ -35,8 +33,8 @@ public ConstraintUnique(Schema schema, int id, String name, Table table, } @Override - public String getConstraintType() { - return primaryKey ? Constraint.PRIMARY_KEY : Constraint.UNIQUE; + public Type getConstraintType() { + return primaryKey ? Constraint.Type.PRIMARY_KEY : Constraint.Type.UNIQUE; } @Override @@ -44,44 +42,34 @@ public String getCreateSQLForCopy(Table forTable, String quotedName) { return getCreateSQLForCopy(forTable, quotedName, true); } - private String getCreateSQLForCopy(Table forTable, String quotedName, - boolean internalIndex) { - StatementBuilder buff = new StatementBuilder("ALTER TABLE "); - buff.append(forTable.getSQL()).append(" ADD CONSTRAINT "); + private String getCreateSQLForCopy(Table forTable, String quotedName, boolean internalIndex) { + StringBuilder builder = new StringBuilder("ALTER TABLE "); + forTable.getSQL(builder, DEFAULT_SQL_FLAGS).append(" ADD CONSTRAINT "); if (forTable.isHidden()) { - buff.append("IF NOT EXISTS "); + builder.append("IF NOT EXISTS "); } - buff.append(quotedName); + builder.append(quotedName); if (comment != null) { - buff.append(" COMMENT ").append(StringUtils.quoteStringSQL(comment)); + builder.append(" COMMENT "); + StringUtils.quoteStringSQL(builder, comment); } - buff.append(' ').append(getTypeName()).append('('); - for (IndexColumn c : columns) { - buff.appendExceptFirst(", "); - buff.append(Parser.quoteIdentifier(c.column.getName())); - } - buff.append(')'); + builder.append(' ').append(getConstraintType().getSqlName()).append('('); + IndexColumn.writeColumns(builder, columns, DEFAULT_SQL_FLAGS).append(')'); if (internalIndex && indexOwner && forTable == this.table) { - buff.append(" INDEX ").append(index.getSQL()); - } - return buff.toString(); - } - - private String getTypeName() { - if (primaryKey) { - return "PRIMARY KEY"; + builder.append(" INDEX "); + index.getSQL(builder, DEFAULT_SQL_FLAGS); } - return "UNIQUE"; + return builder.toString(); } @Override public String getCreateSQLWithoutIndexes() { - return getCreateSQLForCopy(table, getSQL(), false); + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS), false); } @Override public String getCreateSQL() { - return getCreateSQLForCopy(table, getSQL()); + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS)); } public void setColumns(IndexColumn[] columns) { @@ -105,7 +93,16 @@ public void setIndex(Index index, boolean isOwner) { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { + ArrayList constraints = table.getConstraints(); + if (constraints != null) { + constraints = new ArrayList<>(table.getConstraints()); + for (Constraint c : constraints) { + if (c.getReferencedConstraint() == this) { + database.removeSchemaObject(session, c); + } + } + } table.removeConstraint(this); if (indexOwner) { table.removeIndexOrTransferOwnership(session, index); @@ -118,7 +115,7 @@ public void removeChildrenAndResources(Session session) { } @Override - public void checkRow(Session session, Table t, Row oldRow, Row newRow) { + public void checkRow(SessionLocal session, Table t, Row oldRow, Row newRow) { // unique index check is enough } @@ -134,7 +131,7 @@ public void setIndexOwner(Index index) { @Override public HashSet getReferencedColumns(Table table) { - HashSet result = New.hashSet(); + HashSet result = new HashSet<>(); for (IndexColumn c : columns) { result.add(c.column); } @@ -147,13 +144,13 @@ public boolean isBefore() { } @Override - public void checkExistingData(Session session) { + public void checkExistingData(SessionLocal session) { // no need to check: when creating the unique index any problems are // found } @Override - public Index getUniqueIndex() { + public Index getIndex() { return index; } diff --git a/h2/src/main/org/h2/constraint/DomainColumnResolver.java b/h2/src/main/org/h2/constraint/DomainColumnResolver.java new file mode 100644 index 0000000000..1d01e1afe5 --- /dev/null +++ b/h2/src/main/org/h2/constraint/DomainColumnResolver.java @@ -0,0 +1,72 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.constraint; + +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * The single column resolver resolves the VALUE column. + * It is used to parse a domain constraint. + */ +public class DomainColumnResolver implements ColumnResolver { + + private final Column column; + private Value value; + private String name; + + public DomainColumnResolver(TypeInfo typeInfo) { + this.column = new Column("VALUE", typeInfo); + } + + public void setValue(Value value) { + this.value = value; + } + + @Override + public Value getValue(Column col) { + return value; + } + + @Override + public Column[] getColumns() { + return new Column[] { column }; + } + + @Override + public Column findColumn(String name) { + return null; + } + + void setColumnName(String newName) { + name = newName; + } + + void resetColumnName() { + name = null; + } + + /** + * Return column name to use or null. + * + * @return column name to use or null + */ + public String getColumnName() { + return name; + } + + /** + * Return the type of the column. + * + * @return the type of the column + */ + public TypeInfo getValueType() { + return column.getType(); + } + +} diff --git a/h2/src/main/org/h2/constraint/package.html b/h2/src/main/org/h2/constraint/package.html index e3d1c798a6..a7e1d88a70 100644 --- a/h2/src/main/org/h2/constraint/package.html +++ b/h2/src/main/org/h2/constraint/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/engine/CastDataProvider.java b/h2/src/main/org/h2/engine/CastDataProvider.java new file mode 100644 index 0000000000..9682dda61a --- /dev/null +++ b/h2/src/main/org/h2/engine/CastDataProvider.java @@ -0,0 +1,53 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.engine; + +import org.h2.api.JavaObjectSerializer; +import org.h2.util.TimeZoneProvider; +import org.h2.value.ValueTimestampTimeZone; + +/** + * Provides information for type casts and comparison operations. + */ +public interface CastDataProvider { + + /** + * Returns the current timestamp with maximum resolution. The value must be + * the same within a transaction or within execution of a command. + * + * @return the current timestamp for CURRENT_TIMESTAMP(9) + */ + ValueTimestampTimeZone currentTimestamp(); + + /** + * Returns the current time zone. + * + * @return the current time zone + */ + TimeZoneProvider currentTimeZone(); + + /** + * Returns the database mode. + * + * @return the database mode + */ + Mode getMode(); + + /** + * Returns the custom Java object serializer, or {@code null}. + * + * @return the custom Java object serializer, or {@code null} + */ + JavaObjectSerializer getJavaObjectSerializer(); + + /** + * Returns are ENUM values 0-based. + * + * @return are ENUM values 0-based + */ + boolean zeroBasedEnums(); + +} diff --git a/h2/src/main/org/h2/engine/Comment.java b/h2/src/main/org/h2/engine/Comment.java index 066681f32e..e3af80fb67 100644 --- a/h2/src/main/org/h2/engine/Comment.java +++ b/h2/src/main/org/h2/engine/Comment.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -13,25 +13,25 @@ /** * Represents a database object comment. */ -public class Comment extends DbObjectBase { +public final class Comment extends DbObject { private final int objectType; - private final String objectName; + private final String quotedObjectName; private String commentText; public Comment(Database database, int id, DbObject obj) { - initDbObjectBase(database, id, getKey(obj), Trace.DATABASE); + super(database, id, getKey(obj), Trace.DATABASE); this.objectType = obj.getType(); - this.objectName = obj.getSQL(); + this.quotedObjectName = obj.getSQL(DEFAULT_SQL_FLAGS); } @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } private static String getTypeName(int type) { - switch(type) { + switch (type) { case DbObject.CONSTANT: return "CONSTANT"; case DbObject.CONSTRAINT: @@ -52,7 +52,7 @@ private static String getTypeName(int type) { return "TRIGGER"; case DbObject.USER: return "USER"; - case DbObject.USER_DATATYPE: + case DbObject.DOMAIN: return "DOMAIN"; default: // not supported by parser, but required when trying to find a @@ -61,20 +61,15 @@ private static String getTypeName(int type) { } } - @Override - public String getDropSQL() { - return null; - } - @Override public String getCreateSQL() { StringBuilder buff = new StringBuilder("COMMENT ON "); buff.append(getTypeName(objectType)).append(' '). - append(objectName).append(" IS "); + append(quotedObjectName).append(" IS "); if (commentText == null) { buff.append("NULL"); } else { - buff.append(StringUtils.quoteStringSQL(commentText)); + StringUtils.quoteStringSQL(buff, commentText); } return buff.toString(); } @@ -85,13 +80,13 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { database.removeMeta(session, getId()); } @Override public void checkRename() { - DbException.throwInternalError(); + throw DbException.getInternalError(); } /** @@ -102,7 +97,9 @@ public void checkRename() { * @return the key name */ static String getKey(DbObject obj) { - return getTypeName(obj.getType()) + " " + obj.getSQL(); + StringBuilder builder = new StringBuilder(getTypeName(obj.getType())).append(' '); + obj.getSQL(builder, DEFAULT_SQL_FLAGS); + return builder.toString(); } /** diff --git a/h2/src/main/org/h2/engine/ConnectionInfo.java b/h2/src/main/org/h2/engine/ConnectionInfo.java index d1d821ebce..fdd0ee260a 100644 --- a/h2/src/main/org/h2/engine/ConnectionInfo.java +++ b/h2/src/main/org/h2/engine/ConnectionInfo.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; +import java.io.File; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -16,19 +16,24 @@ import org.h2.command.dml.SetTypes; import org.h2.message.DbException; import org.h2.security.SHA256; -import org.h2.store.fs.FilePathEncrypt; -import org.h2.store.fs.FilePathRec; import org.h2.store.fs.FileUtils; -import org.h2.util.New; +import org.h2.store.fs.encrypt.FilePathEncrypt; +import org.h2.store.fs.rec.FilePathRec; +import org.h2.util.IOUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.SortedProperties; import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; import org.h2.util.Utils; /** * Encapsulates the connection settings, including user name and password. */ public class ConnectionInfo implements Cloneable { - private static final HashSet KNOWN_SETTINGS = New.hashSet(); + + private static final HashSet KNOWN_SETTINGS; + + private static final HashSet IGNORED_BY_PARSER; private Properties prop = new Properties(); private String originalURL; @@ -38,6 +43,8 @@ public class ConnectionInfo implements Cloneable { private byte[] fileEncryptionKey; private byte[] userPasswordHash; + private TimeZoneProvider timeZone; + /** * The database name */ @@ -48,6 +55,8 @@ public class ConnectionInfo implements Cloneable { private boolean persistent; private boolean unnamed; + private NetworkConnectionInfo networkConnectionInfo; + /** * Create a connection info object. * @@ -64,21 +73,36 @@ public ConnectionInfo(String name) { * Create a connection info object. * * @param u the database URL (must start with jdbc:h2:) - * @param info the connection properties + * @param info the connection properties or {@code null} + * @param user the user name or {@code null} + * @param password + * the password as {@code String} or {@code char[]}, or + * {@code null} */ - public ConnectionInfo(String u, Properties info) { + public ConnectionInfo(String u, Properties info, String user, Object password) { u = remapURL(u); - this.originalURL = u; + originalURL = url = u; if (!u.startsWith(Constants.START_URL)) { - throw DbException.getInvalidValueException("url", u); + throw getFormatException(); + } + if (info != null) { + readProperties(info); + } + if (user != null) { + prop.put("USER", user); + } + if (password != null) { + prop.put("PASSWORD", password); } - this.url = u; - readProperties(info); readSettingsFromURL(); + Object timeZoneName = prop.remove("TIME ZONE"); + if (timeZoneName != null) { + timeZone = TimeZoneProvider.ofId(timeZoneName.toString()); + } setUserName(removeProperty("USER", "")); - convertPasswords(); name = url.substring(Constants.START_URL.length()); parseName(); + convertPasswords(); String recoverTest = removeProperty("RECOVER_TEST", null); if (recoverTest != null) { FilePathRec.register(); @@ -92,26 +116,73 @@ public ConnectionInfo(String u, Properties info) { } static { - ArrayList list = SetTypes.getTypes(); - HashSet set = KNOWN_SETTINGS; - set.addAll(list); - String[] connectionTime = { "ACCESS_MODE_DATA", "AUTOCOMMIT", "CIPHER", - "CREATE", "CACHE_TYPE", "FILE_LOCK", "IGNORE_UNKNOWN_SETTINGS", - "IFEXISTS", "INIT", "PASSWORD", "RECOVER", "RECOVER_TEST", - "USER", "AUTO_SERVER", "AUTO_SERVER_PORT", "NO_UPGRADE", - "AUTO_RECONNECT", "OPEN_NEW", "PAGE_SIZE", "PASSWORD_HASH", "JMX" }; - for (String key : connectionTime) { - if (SysProperties.CHECK && set.contains(key)) { - DbException.throwInternalError(key); + String[] commonSettings = { // + "ACCESS_MODE_DATA", "AUTO_RECONNECT", "AUTO_SERVER", "AUTO_SERVER_PORT", // + "CACHE_TYPE", // + "FILE_LOCK", // + "JMX", // + "NETWORK_TIMEOUT", // + "OLD_INFORMATION_SCHEMA", "OPEN_NEW", // + "PAGE_SIZE", // + "RECOVER", // + }; + String[] settings = { // + "AUTHREALM", "AUTHZPWD", "AUTOCOMMIT", // + "CIPHER", "CREATE", // + "FORBID_CREATION", // + "IGNORE_UNKNOWN_SETTINGS", "IFEXISTS", "INIT", // + "NO_UPGRADE", // + "PASSWORD", "PASSWORD_HASH", // + "RECOVER_TEST", // + "USER" // + }; + HashSet set = new HashSet<>(128); + set.addAll(SetTypes.getTypes()); + for (String setting : commonSettings) { + if (!set.add(setting)) { + throw DbException.getInternalError(setting); + } + } + for (String setting : settings) { + if (!set.add(setting)) { + throw DbException.getInternalError(setting); } - set.add(key); } + KNOWN_SETTINGS = set; + settings = new String[] { // + "ASSERT", // + "BINARY_COLLATION", // + "DB_CLOSE_ON_EXIT", // + "PAGE_STORE", // + "UUID_COLLATION", // + }; + set = new HashSet<>(32); + for (String setting : commonSettings) { + set.add(setting); + } + for (String setting : settings) { + set.add(setting); + } + IGNORED_BY_PARSER = set; } private static boolean isKnownSetting(String s) { return KNOWN_SETTINGS.contains(s); } + /** + * Returns whether setting with the specified name should be ignored by + * parser. + * + * @param name + * the name of the setting + * @return whether setting with the specified name should be ignored by + * parser + */ + public static boolean isIgnoredByParser(String name) { + return IGNORED_BY_PARSER.contains(name); + } + @Override public ConnectionInfo clone() throws CloneNotSupportedException { ConnectionInfo clone = (ConnectionInfo) super.clone(); @@ -145,11 +216,7 @@ private void parseName() { persistent = true; } if (persistent && !remote) { - if ("/".equals(SysProperties.FILE_SEPARATOR)) { - name = name.replace('\\', '/'); - } else { - name = name.replace('/', '\\'); - } + name = IOUtils.nameSeparatorsToNative(name); } } @@ -165,7 +232,7 @@ public void setBaseDir(String dir) { boolean absolute = FileUtils.isAbsolute(name); String n; String prefix = null; - if (dir.endsWith(SysProperties.FILE_SEPARATOR)) { + if (dir.endsWith(File.separator)) { dir = dir.substring(0, dir.length() - 1); } if (absolute) { @@ -173,7 +240,7 @@ public void setBaseDir(String dir) { } else { n = FileUtils.unwrap(name); prefix = name.substring(0, name.length() - n.length()); - n = dir + SysProperties.FILE_SEPARATOR + n; + n = dir + File.separatorChar + n; } String normalizedName = FileUtils.unwrap(FileUtils.toRealPath(n)); if (normalizedName.equals(absDir) || !normalizedName.startsWith(absDir)) { @@ -192,7 +259,7 @@ public void setBaseDir(String dir) { absDir); } if (!absolute) { - name = prefix + dir + SysProperties.FILE_SEPARATOR + FileUtils.unwrap(name); + name = prefix + dir + File.separatorChar + FileUtils.unwrap(name); } } } @@ -225,8 +292,7 @@ boolean isUnnamedInMemory() { } private void readProperties(Properties info) { - Object[] list = new Object[info.size()]; - info.keySet().toArray(list); + Object[] list = info.keySet().toArray(); DbSettings s = null; for (Object k : list) { String key = StringUtils.toUpperEnglish(k.toString()); @@ -248,14 +314,15 @@ private void readProperties(Properties info) { } private void readSettingsFromURL() { - DbSettings defaultSettings = DbSettings.getDefaultSettings(); + DbSettings defaultSettings = DbSettings.DEFAULT; int idx = url.indexOf(';'); if (idx >= 0) { String settings = url.substring(idx + 1); url = url.substring(0, idx); + String unknownSetting = null; String[] list = StringUtils.arraySplit(settings, ';', false); for (String setting : list) { - if (setting.length() == 0) { + if (setting.isEmpty()) { continue; } int equal = setting.indexOf('='); @@ -265,20 +332,32 @@ private void readSettingsFromURL() { String value = setting.substring(equal + 1); String key = setting.substring(0, equal); key = StringUtils.toUpperEnglish(key); - if (!isKnownSetting(key) && !defaultSettings.containsKey(key)) { - throw DbException.get(ErrorCode.UNSUPPORTED_SETTING_1, key); - } - String old = prop.getProperty(key); - if (old != null && !old.equals(value)) { - throw DbException.get(ErrorCode.DUPLICATE_PROPERTY_1, key); + if (isKnownSetting(key) || defaultSettings.containsKey(key)) { + String old = prop.getProperty(key); + if (old != null && !old.equals(value)) { + throw DbException.get(ErrorCode.DUPLICATE_PROPERTY_1, key); + } + prop.setProperty(key, value); + } else { + unknownSetting = key; } - prop.setProperty(key, value); + } + if (unknownSetting != null // + && !Utils.parseBoolean(prop.getProperty("IGNORE_UNKNOWN_SETTINGS"), false, false)) { + throw DbException.get(ErrorCode.UNSUPPORTED_SETTING_1, unknownSetting); } } } + private void preservePasswordForAuthentication(Object password) { + if ((!isRemote() || isSSL()) && prop.containsKey("AUTHREALM") && password!=null) { + prop.put("AUTHZPWD",password instanceof char[] ? new String((char[])password) : password); + } + } + private char[] removePassword() { Object p = prop.remove("PASSWORD"); + preservePasswordForAuthentication(p); if (p == null) { return new char[0]; } else if (p instanceof char[]) { @@ -307,10 +386,8 @@ private void convertPasswords() { if (space < 0) { throw DbException.get(ErrorCode.WRONG_PASSWORD_FORMAT); } - char[] np = new char[password.length - space - 1]; - char[] filePassword = new char[space]; - System.arraycopy(password, space + 1, np, 0, np.length); - System.arraycopy(password, 0, filePassword, 0, space); + char[] np = Arrays.copyOfRange(password, space + 1, password.length); + char[] filePassword = Arrays.copyOf(password, space); Arrays.fill(password, (char) 0); password = np; fileEncryptionKey = FilePathEncrypt.getPasswordBytes(filePassword); @@ -324,7 +401,7 @@ private static byte[] hashPassword(boolean passwordHash, String userName, if (passwordHash) { return StringUtils.convertHexToBytes(new String(password)); } - if (userName.length() == 0 && password.length == 0) { + if (userName.isEmpty() && password.length == 0) { return new byte[0]; } return SHA256.getKeyPasswordHash(userName, password); @@ -337,16 +414,8 @@ private static byte[] hashPassword(boolean passwordHash, String userName, * @param defaultValue the default value * @return the value */ - boolean getProperty(String key, boolean defaultValue) { - String x = getProperty(key, null); - if (x == null) { - return defaultValue; - } - // support 0 / 1 (like the parser) - if (x.length() == 1 && Character.isDigit(x.charAt(0))) { - return Integer.parseInt(x) != 0; - } - return Boolean.parseBoolean(x); + public boolean getProperty(String key, boolean defaultValue) { + return Utils.parseBoolean(getProperty(key, null), defaultValue, false); } /** @@ -357,8 +426,7 @@ boolean getProperty(String key, boolean defaultValue) { * @return the value */ public boolean removeProperty(String key, boolean defaultValue) { - String x = removeProperty(key, null); - return x == null ? defaultValue : Boolean.parseBoolean(x); + return Utils.parseBoolean(removeProperty(key, null), defaultValue, false); } /** @@ -370,7 +438,7 @@ public boolean removeProperty(String key, boolean defaultValue) { */ String removeProperty(String key, String defaultValue) { if (SysProperties.CHECK && !isKnownSetting(key)) { - DbException.throwInternalError(key); + throw DbException.getInternalError(key); } Object x = prop.remove(key); return x == null ? defaultValue : x.toString(); @@ -382,42 +450,28 @@ String removeProperty(String key, String defaultValue) { * @return the database name */ public String getName() { - if (persistent) { - if (nameNormalized == null) { - if (!SysProperties.IMPLICIT_RELATIVE_PATH) { - if (!FileUtils.isAbsolute(name)) { - if (name.indexOf("./") < 0 && - name.indexOf(".\\") < 0 && - name.indexOf(":/") < 0 && - name.indexOf(":\\") < 0) { - // the name could start with "./", or - // it could start with a prefix such as "nio:./" - // for Windows, the path "\test" is not considered - // absolute as the drive letter is missing, - // but we consider it absolute - throw DbException.get( - ErrorCode.URL_RELATIVE_TO_CWD, - originalURL); - } - } - } - String suffix = Constants.SUFFIX_PAGE_FILE; - String n; - if (FileUtils.exists(name + suffix)) { - n = FileUtils.toRealPath(name + suffix); - } else { - suffix = Constants.SUFFIX_MV_FILE; - n = FileUtils.toRealPath(name + suffix); - } - String fileName = FileUtils.getName(n); - if (fileName.length() < suffix.length() + 1) { - throw DbException.get(ErrorCode.INVALID_DATABASE_NAME_1, name); - } - nameNormalized = n.substring(0, n.length() - suffix.length()); + if (!persistent) { + return name; + } + if (nameNormalized == null) { + if (!FileUtils.isAbsolute(name) && !name.contains("./") && !name.contains(".\\") && !name.contains(":/") + && !name.contains(":\\")) { + // the name could start with "./", or + // it could start with a prefix such as "nioMapped:./" + // for Windows, the path "\test" is not considered + // absolute as the drive letter is missing, + // but we consider it absolute + throw DbException.get(ErrorCode.URL_RELATIVE_TO_CWD, originalURL); } - return nameNormalized; + String suffix = Constants.SUFFIX_MV_FILE; + String n = FileUtils.toRealPath(name + suffix); + String fileName = FileUtils.getName(n); + if (fileName.length() < suffix.length() + 1) { + throw DbException.get(ErrorCode.INVALID_DATABASE_NAME_1, name); + } + nameNormalized = n.substring(0, n.length() - suffix.length()); } - return name; + return nameNormalized; } /** @@ -457,9 +511,7 @@ byte[] getUserPasswordHash() { * @return the property keys */ String[] getKeys() { - String[] keys = new String[prop.size()]; - prop.keySet().toArray(keys); - return keys; + return prop.keySet().toArray(new String[prop.size()]); } /** @@ -470,7 +522,7 @@ String[] getKeys() { */ String getProperty(String key) { Object value = prop.get(key); - if (value == null || !(value instanceof String)) { + if (!(value instanceof String)) { return null; } return value.toString(); @@ -485,7 +537,7 @@ String getProperty(String key) { */ int getProperty(String key, int defaultValue) { if (SysProperties.CHECK && !isKnownSetting(key)) { - DbException.throwInternalError(key); + throw DbException.getInternalError(key); } String s = getProperty(key); return s == null ? defaultValue : Integer.parseInt(s); @@ -500,7 +552,7 @@ int getProperty(String key, int defaultValue) { */ public String getProperty(String key, String defaultValue) { if (SysProperties.CHECK && !isKnownSetting(key)) { - DbException.throwInternalError(key); + throw DbException.getInternalError(key); } String s = getProperty(key); return s == null ? defaultValue : s; @@ -618,13 +670,21 @@ public void setOriginalURL(String url) { } /** - * Generate an URL format exception. + * Returns the time zone. + * + * @return the time zone + */ + public TimeZoneProvider getTimeZone() { + return timeZone; + } + + /** + * Generate a URL format exception. * * @return the exception */ DbException getFormatException() { - String format = Constants.URL_FORMAT; - return DbException.get(ErrorCode.URL_FORMAT_ERROR_2, format, url); + return DbException.get(ErrorCode.URL_FORMAT_ERROR_2, Constants.URL_FORMAT, url); } /** @@ -638,9 +698,27 @@ public void setServerKey(String serverKey) { this.name = serverKey; } + /** + * Returns the network connection information, or {@code null}. + * + * @return the network connection information, or {@code null} + */ + public NetworkConnectionInfo getNetworkConnectionInfo() { + return networkConnectionInfo; + } + + /** + * Sets the network connection information. + * + * @param networkConnectionInfo the network connection information + */ + public void setNetworkConnectionInfo(NetworkConnectionInfo networkConnectionInfo) { + this.networkConnectionInfo = networkConnectionInfo; + } + public DbSettings getDbSettings() { - DbSettings defaultSettings = DbSettings.getDefaultSettings(); - HashMap s = New.hashMap(); + DbSettings defaultSettings = DbSettings.DEFAULT; + HashMap s = new HashMap<>(DbSettings.TABLE_SIZE); for (Object k : prop.keySet()) { String key = k.toString(); if (!isKnownSetting(key) && defaultSettings.containsKey(key)) { @@ -652,7 +730,7 @@ public DbSettings getDbSettings() { private static String remapURL(String url) { String urlMap = SysProperties.URL_MAP; - if (urlMap != null && urlMap.length() > 0) { + if (urlMap != null && !urlMap.isEmpty()) { try { SortedProperties prop; prop = SortedProperties.loadProperties(urlMap); @@ -662,7 +740,7 @@ private static String remapURL(String url) { prop.store(urlMap); } else { url2 = url2.trim(); - if (url2.length() > 0) { + if (!url2.isEmpty()) { return url2; } } @@ -673,4 +751,11 @@ private static String remapURL(String url) { return url; } + /** + * Clear authentication properties. + */ + public void cleanAuthenticationInfo() { + removeProperty("AUTHREALM", false); + removeProperty("AUTHZPWD", false); + } } diff --git a/h2/src/main/org/h2/engine/Constants.java b/h2/src/main/org/h2/engine/Constants.java index f3428a7248..d71cf6b656 100644 --- a/h2/src/main/org/h2/engine/Constants.java +++ b/h2/src/main/org/h2/engine/Constants.java @@ -1,11 +1,10 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; -import java.nio.charset.Charset; import java.sql.ResultSet; /** @@ -16,22 +15,13 @@ public class Constants { /** * The build date is updated for each public release. */ - public static final String BUILD_DATE = "2015-04-10"; + public static final String BUILD_DATE = "2022-01-17"; /** - * The build date of the last stable release. + * Sequential version number. Even numbers are used for official releases, + * odd numbers are used for development builds. */ - public static final String BUILD_DATE_STABLE = "2014-04-05"; - - /** - * The build id is incremented for each public release. - */ - public static final int BUILD_ID = 187; - - /** - * The build id of the last stable release. - */ - public static final int BUILD_ID_STABLE = 176; + public static final int BUILD_ID = 210; /** * Whether this is a snapshot version. @@ -47,64 +37,48 @@ public class Constants { public static final String BUILD_VENDOR_AND_VERSION = null; /** - * The TCP protocol version number 6. + * The TCP protocol version number 17. + * @since 1.4.197 (2018-03-18) */ - public static final int TCP_PROTOCOL_VERSION_6 = 6; + public static final int TCP_PROTOCOL_VERSION_17 = 17; /** - * The TCP protocol version number 7. + * The TCP protocol version number 18. + * @since 1.4.198 (2019-02-22) */ - public static final int TCP_PROTOCOL_VERSION_7 = 7; + public static final int TCP_PROTOCOL_VERSION_18 = 18; /** - * The TCP protocol version number 8. + * The TCP protocol version number 19. + * @since 1.4.200 (2019-10-14) */ - public static final int TCP_PROTOCOL_VERSION_8 = 8; + public static final int TCP_PROTOCOL_VERSION_19 = 19; /** - * The TCP protocol version number 9. + * The TCP protocol version number 20. + * @since 2.0.202 (2021-11-25) */ - public static final int TCP_PROTOCOL_VERSION_9 = 9; + public static final int TCP_PROTOCOL_VERSION_20 = 20; /** - * The TCP protocol version number 10. + * Minimum supported version of TCP protocol. */ - public static final int TCP_PROTOCOL_VERSION_10 = 10; + public static final int TCP_PROTOCOL_VERSION_MIN_SUPPORTED = TCP_PROTOCOL_VERSION_17; /** - * The TCP protocol version number 11. + * Maximum supported version of TCP protocol. */ - public static final int TCP_PROTOCOL_VERSION_11 = 11; - - /** - * The TCP protocol version number 12. - */ - public static final int TCP_PROTOCOL_VERSION_12 = 12; - - /** - * The TCP protocol version number 13. - */ - public static final int TCP_PROTOCOL_VERSION_13 = 13; - - /** - * The TCP protocol version number 14. - */ - public static final int TCP_PROTOCOL_VERSION_14 = 14; - - /** - * The TCP protocol version number 15. - */ - public static final int TCP_PROTOCOL_VERSION_15 = 15; + public static final int TCP_PROTOCOL_VERSION_MAX_SUPPORTED = TCP_PROTOCOL_VERSION_20; /** * The major version of this database. */ - public static final int VERSION_MAJOR = 1; + public static final int VERSION_MAJOR = 2; /** * The minor version of this database. */ - public static final int VERSION_MINOR = 4; + public static final int VERSION_MINOR = 1; /** * The lock mode that means no locking is used at all. @@ -146,6 +120,11 @@ public class Constants { */ public static final int ALLOW_LITERALS_NUMBERS = 1; + /** + * SNAPSHOT isolation level of transaction. + */ + public static final int TRANSACTION_SNAPSHOT = 6; + /** * Whether searching in Blob values should be supported. */ @@ -156,11 +135,6 @@ public class Constants { */ public static final int CACHE_MIN_RECORDS = 16; - /** - * The default cache size in KB for each GB of RAM. - */ - public static final int CACHE_SIZE_DEFAULT = 64 * 1024; - /** * The default cache type. */ @@ -219,20 +193,10 @@ public class Constants { */ public static final int DEFAULT_MAX_LENGTH_INPLACE_LOB = 256; - /** - * The default value for the maximum transaction log size. - */ - public static final long DEFAULT_MAX_LOG_SIZE = 16 * 1024 * 1024; - - /** - * The default value for the MAX_MEMORY_UNDO setting. - */ - public static final int DEFAULT_MAX_MEMORY_UNDO = 50000; - /** * The default for the setting MAX_OPERATION_MEMORY. */ - public static final int DEFAULT_MAX_OPERATION_MEMORY = 100000; + public static final int DEFAULT_MAX_OPERATION_MEMORY = 100_000; /** * The default page size to use for new databases. @@ -291,43 +255,47 @@ public class Constants { public static final int LOCK_SLEEP = 1000; /** - * The highest possible parameter index. + * The maximum allowed length of identifiers. */ - public static final int MAX_PARAMETER_INDEX = 100000; + public static final int MAX_IDENTIFIER_LENGTH = 256; /** - * The memory needed by a object of class Data + * The maximum number of columns in a table, select statement or row value. */ - public static final int MEMORY_DATA = 24; + public static final int MAX_COLUMNS = 16_384; /** - * This value is used to calculate the average memory usage. + * The maximum allowed length for character string, binary string, and other + * data types based on them; excluding LOB data types. */ - public static final int MEMORY_FACTOR = 64; + public static final int MAX_STRING_LENGTH = 1024 * 1024; /** - * The memory needed by a regular object with at least one field. + * The maximum allowed precision of numeric data types. */ - // Java 6, 64 bit: 24 - // Java 6, 32 bit: 12 - public static final int MEMORY_OBJECT = 24; + public static final int MAX_NUMERIC_PRECISION = 100_000; /** - * The memory needed by an object of class PageBtree. + * The maximum allowed cardinality of array. */ - public static final int MEMORY_PAGE_BTREE = - 112 + MEMORY_DATA + 2 * MEMORY_OBJECT; + public static final int MAX_ARRAY_CARDINALITY = 65_536; /** - * The memory needed by an object of class PageData. + * The highest possible parameter index. */ - public static final int MEMORY_PAGE_DATA = - 144 + MEMORY_DATA + 3 * MEMORY_OBJECT; + public static final int MAX_PARAMETER_INDEX = 100_000; /** - * The memory needed by an object of class PageDataOverflow. + * The memory needed by a regular object with at least one field. */ - public static final int MEMORY_PAGE_DATA_OVERFLOW = 96 + MEMORY_DATA; + // Java 6, 64 bit: 24 + // Java 6, 32 bit: 12 + public static final int MEMORY_OBJECT = 24; + + /** + * The memory needed by an array. + */ + public static final int MEMORY_ARRAY = 24; /** * The memory needed by a pointer. @@ -341,11 +309,6 @@ public class Constants { */ public static final int MEMORY_ROW = 40; - /** - * The minimum write delay that causes commits to be delayed. - */ - public static final int MIN_WRITE_DELAY = 5; - /** * The name prefix used for indexes that are not explicitly named. */ @@ -362,6 +325,11 @@ public class Constants { */ public static final String PREFIX_PRIMARY_KEY = "PRIMARY_KEY_"; + /** + * The name prefix used for query aliases that are not explicitly named. + */ + public static final String PREFIX_QUERY_ALIAS = "QUERY_ALIAS_"; + /** * Every user belongs to this role. */ @@ -372,11 +340,31 @@ public class Constants { */ public static final int SALT_LEN = 8; + /** + * The identity of INFORMATION_SCHEMA. + */ + public static final int INFORMATION_SCHEMA_ID = -1; + + /** + * The identity of PUBLIC schema. + */ + public static final int MAIN_SCHEMA_ID = 0; + /** * The name of the default schema. */ public static final String SCHEMA_MAIN = "PUBLIC"; + /** + * The identity of pg_catalog schema. + */ + public static final int PG_CATALOG_SCHEMA_ID = -1_000; + + /** + * The name of the pg_catalog schema. + */ + public static final String SCHEMA_PG_CATALOG = "PG_CATALOG"; + /** * The default selectivity (used if the selectivity is not calculated). */ @@ -385,7 +373,7 @@ public class Constants { /** * The number of distinct values to keep in memory when running ANALYZE. */ - public static final int SELECTIVITY_DISTINCT_COUNT = 10000; + public static final int SELECTIVITY_DISTINCT_COUNT = 10_000; /** * The default directory name of the server properties file for the H2 @@ -409,22 +397,6 @@ public class Constants { */ public static final String START_URL = "jdbc:h2:"; - /** - * The file name suffix of all database files. - */ - public static final String SUFFIX_DB_FILE = ".db"; - - /** - * The file name suffix of large object files. - */ - public static final String SUFFIX_LOB_FILE = ".lob.db"; - - /** - * The suffix of the directory name used if LOB objects are stored in a - * directory. - */ - public static final String SUFFIX_LOBS_DIRECTORY = ".lobs.db"; - /** * The file name suffix of file lock files that are used to make sure a * database is open by only one process at any time. @@ -436,10 +408,6 @@ public class Constants { */ public static final String SUFFIX_OLD_DATABASE_FILE = ".data.db"; - /** - * The file name suffix of page files. - */ - public static final String SUFFIX_PAGE_FILE = ".h2.db"; /** * The file name suffix of a MVStore file. */ @@ -467,15 +435,11 @@ public class Constants { public static final String SUFFIX_TRACE_FILE = ".trace.db"; /** - * The delay that is to be used if throttle has been enabled. + * How often we check to see if we need to apply a throttling delay if SET + * THROTTLE has been used. */ public static final int THROTTLE_DELAY = 50; - /** - * The maximum size of an undo log block. - */ - public static final int UNDO_BLOCK_SIZE = 1024 * 1024; - /** * The database URL format in simplified Backus-Naur form. */ @@ -488,16 +452,11 @@ public class Constants { */ public static final String USER_PACKAGE = "org.h2.dynamic"; - /** - * Name of the character encoding format. - */ - public static final Charset UTF8 = Charset.forName("UTF-8"); - /** * The maximum time in milliseconds to keep the cost of a view. * 10000 means 10 seconds. */ - public static final int VIEW_COST_CACHE_MAX_AGE = 10000; + public static final int VIEW_COST_CACHE_MAX_AGE = 10_000; /** * The name of the index cache that is used for temporary view (subqueries @@ -505,44 +464,42 @@ public class Constants { */ public static final int VIEW_INDEX_CACHE_SIZE = 64; - private Constants() { - // utility class - } + /** + * The maximum number of entries in query statistics. + */ + public static final int QUERY_STATISTICS_MAX_ENTRIES = 100; + + /** + * Announced version for PgServer. + */ + public static final String PG_VERSION = "8.2.23"; /** - * Get the version of this product, consisting of major version, minor + * The version of this product, consisting of major version, minor * version, and build id. - * - * @return the version number */ - public static String getVersion() { - String version = VERSION_MAJOR + "." + VERSION_MINOR + "." + BUILD_ID; + public static final String VERSION; + + /** + * The complete version number of this database, consisting of + * the major version, the minor version, the build id, and the build date. + */ + public static final String FULL_VERSION; + + static { + String version = VERSION_MAJOR + "." + VERSION_MINOR + '.' + BUILD_ID; if (BUILD_VENDOR_AND_VERSION != null) { - version += "_" + BUILD_VENDOR_AND_VERSION; + version += '_' + BUILD_VENDOR_AND_VERSION; } if (BUILD_SNAPSHOT) { version += "-SNAPSHOT"; } - return version; - } - - /** - * Get the last stable version name. - * - * @return the version number - */ - public static Object getVersionStable() { - return "1.3." + BUILD_ID_STABLE; + VERSION = version; + FULL_VERSION = version + (" (" + BUILD_DATE + ')'); } - /** - * Get the complete version number of this database, consisting of - * the major version, the minor version, the build id, and the build date. - * - * @return the complete version - */ - public static String getFullVersion() { - return getVersion() + " (" + BUILD_DATE + ")"; + private Constants() { + // utility class } } diff --git a/h2/src/main/org/h2/engine/Database.java b/h2/src/main/org/h2/engine/Database.java index f3ed06594e..f7d6958c4e 100644 --- a/h2/src/main/org/h2/engine/Database.java +++ b/h2/src/main/org/h2/engine/Database.java @@ -1,73 +1,88 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; -import java.io.IOException; import java.sql.SQLException; import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Properties; +import java.util.Map; +import java.util.Objects; import java.util.Set; -import java.util.StringTokenizer; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.api.JavaObjectSerializer; +import org.h2.api.TableEngine; import org.h2.command.CommandInterface; +import org.h2.command.Prepared; import org.h2.command.ddl.CreateTableData; import org.h2.command.dml.SetTypes; import org.h2.constraint.Constraint; +import org.h2.constraint.Constraint.Type; +import org.h2.engine.Mode.ModeEnum; import org.h2.index.Cursor; import org.h2.index.Index; import org.h2.index.IndexType; -import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.message.TraceSystem; -import org.h2.mvstore.db.MVTableEngine; +import org.h2.mode.DefaultNullOrdering; +import org.h2.mode.PgCatalogSchema; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.db.LobStorageMap; +import org.h2.mvstore.db.Store; import org.h2.result.Row; +import org.h2.result.RowFactory; import org.h2.result.SearchRow; +import org.h2.schema.InformationSchema; import org.h2.schema.Schema; import org.h2.schema.SchemaObject; import org.h2.schema.Sequence; import org.h2.schema.TriggerObject; +import org.h2.security.auth.Authenticator; import org.h2.store.DataHandler; import org.h2.store.FileLock; +import org.h2.store.FileLockMethod; import org.h2.store.FileStore; import org.h2.store.InDoubtTransaction; -import org.h2.store.LobStorageBackend; import org.h2.store.LobStorageFrontend; import org.h2.store.LobStorageInterface; -import org.h2.store.LobStorageMap; -import org.h2.store.PageStore; -import org.h2.store.WriterThread; import org.h2.store.fs.FileUtils; +import org.h2.store.fs.encrypt.FileEncrypt; import org.h2.table.Column; import org.h2.table.IndexColumn; -import org.h2.table.MetaTable; import org.h2.table.Table; import org.h2.table.TableLinkConnection; +import org.h2.table.TableSynonym; +import org.h2.table.TableType; import org.h2.table.TableView; import org.h2.tools.DeleteDbFiles; import org.h2.tools.Server; -import org.h2.util.BitField; import org.h2.util.JdbcUtils; import org.h2.util.MathUtils; import org.h2.util.NetUtils; -import org.h2.util.New; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.SmallLRUCache; import org.h2.util.SourceCompiler; import org.h2.util.StringUtils; import org.h2.util.TempFileDeleter; +import org.h2.util.TimeZoneProvider; import org.h2.util.Utils; +import org.h2.value.CaseInsensitiveConcurrentMap; import org.h2.value.CaseInsensitiveMap; import org.h2.value.CompareMode; -import org.h2.value.Value; -import org.h2.value.ValueInt; +import org.h2.value.TypeInfo; +import org.h2.value.ValueInteger; +import org.h2.value.ValueTimestampTimeZone; /** * There is one database object per open database. @@ -77,10 +92,33 @@ * * @since 2004-04-15 22:49 */ -public class Database implements DataHandler { +public final class Database implements DataHandler, CastDataProvider { private static int initialPowerOffCount; + private static final boolean ASSERT; + + private static final ThreadLocal META_LOCK_DEBUGGING; + private static final ThreadLocal META_LOCK_DEBUGGING_DB; + private static final ThreadLocal META_LOCK_DEBUGGING_STACK; + private static final SessionLocal[] EMPTY_SESSION_ARRAY = new SessionLocal[0]; + + static { + boolean a = false; + // Intentional side-effect + assert a = true; + ASSERT = a; + if (a) { + META_LOCK_DEBUGGING = new ThreadLocal<>(); + META_LOCK_DEBUGGING_DB = new ThreadLocal<>(); + META_LOCK_DEBUGGING_STACK = new ThreadLocal<>(); + } else { + META_LOCK_DEBUGGING = null; + META_LOCK_DEBUGGING_DB = null; + META_LOCK_DEBUGGING_STACK = null; + } + } + /** * The default name of the system user. This name is only used as long as * there is no administrator user registered. @@ -95,65 +133,61 @@ public class Database implements DataHandler { private final byte[] filePasswordHash; private final byte[] fileEncryptionKey; - private final HashMap roles = New.hashMap(); - private final HashMap users = New.hashMap(); - private final HashMap settings = New.hashMap(); - private final HashMap schemas = New.hashMap(); - private final HashMap rights = New.hashMap(); - private final HashMap userDataTypes = New.hashMap(); - private final HashMap aggregates = New.hashMap(); - private final HashMap comments = New.hashMap(); - - private final Set userSessions = - Collections.synchronizedSet(new HashSet()); - private Session exclusiveSession; - private final BitField objectIds = new BitField(); + private final ConcurrentHashMap usersAndRoles = new ConcurrentHashMap<>(); + private final ConcurrentHashMap settings = new ConcurrentHashMap<>(); + private final ConcurrentHashMap schemas = new ConcurrentHashMap<>(); + private final ConcurrentHashMap rights = new ConcurrentHashMap<>(); + private final ConcurrentHashMap comments = new ConcurrentHashMap<>(); + + private final HashMap tableEngines = new HashMap<>(); + + private final Set userSessions = Collections.synchronizedSet(new HashSet<>()); + private final AtomicReference exclusiveSession = new AtomicReference<>(); + private final BitSet objectIds = new BitSet(); private final Object lobSyncObject = new Object(); - private Schema mainSchema; - private Schema infoSchema; + private final Schema mainSchema; + private final Schema infoSchema; + private final Schema pgCatalogSchema; private int nextSessionId; private int nextTempTableId; - private User systemUser; - private Session systemSession; - private Session lobSession; - private Table meta; - private Index metaIdIndex; + private final User systemUser; + private SessionLocal systemSession; + private SessionLocal lobSession; + private final Table meta; + private final Index metaIdIndex; private FileLock lock; - private WriterThread writer; - private boolean starting; - private TraceSystem traceSystem; - private Trace trace; - private final int fileLockMethod; - private Role publicRole; - private long modificationDataId; - private long modificationMetaId; + private volatile boolean starting; + private final TraceSystem traceSystem; + private final Trace trace; + private final FileLockMethod fileLockMethod; + private final Role publicRole; + private final AtomicLong modificationDataId = new AtomicLong(); + private final AtomicLong modificationMetaId = new AtomicLong(); + /** + * Used to trigger the client side to reload some of the settings. + */ + private final AtomicLong remoteSettingsId = new AtomicLong(); private CompareMode compareMode; private String cluster = Constants.CLUSTERING_DISABLED; private boolean readOnly; - private int writeDelay = Constants.DEFAULT_WRITE_DELAY; private DatabaseEventListener eventListener; private int maxMemoryRows = SysProperties.MAX_MEMORY_ROWS; - private int maxMemoryUndo = Constants.DEFAULT_MAX_MEMORY_UNDO; - private int lockMode = Constants.DEFAULT_LOCK_MODE; + private int lockMode; private int maxLengthInplaceLob; private int allowLiterals = Constants.ALLOW_LITERALS_ALL; private int powerOffCount = initialPowerOffCount; - private int closeDelay; - private DatabaseCloser delayedCloser; + private volatile int closeDelay; + private DelayedDatabaseCloser delayedCloser; private volatile boolean closing; private boolean ignoreCase; private boolean deleteFilesOnDisconnect; - private String lobCompressionAlgorithm; private boolean optimizeReuseResults = true; private final String cacheType; - private final String accessModeData; private boolean referentialIntegrity = true; - private boolean multiVersion; - private DatabaseCloser closeOnExit; - private Mode mode = Mode.getInstance(Mode.REGULAR); - private boolean multiThreaded; + private Mode mode = Mode.getRegular(); + private DefaultNullOrdering defaultNullOrdering = DefaultNullOrdering.LOW; private int maxOperationMemory = Constants.DEFAULT_MAX_OPERATION_MEMORY; private SmallLRUCache lobFileListCache; @@ -162,142 +196,227 @@ public class Database implements DataHandler { private Server server; private HashMap linkConnections; private final TempFileDeleter tempFileDeleter = TempFileDeleter.getInstance(); - private PageStore pageStore; - private Properties reconnectLastLock; - private volatile long reconnectCheckNext; - private volatile boolean reconnectChangePending; - private volatile int checkpointAllowed; - private volatile boolean checkpointRunning; - private final Object reconnectSync = new Object(); - private int cacheSize; private int compactMode; private SourceCompiler compiler; - private volatile boolean metaTablesInitialized; - private boolean flushOnEachCommit; - private LobStorageInterface lobStorage; + private final LobStorageInterface lobStorage; private final int pageSize; private int defaultTableType = Table.TYPE_CACHED; private final DbSettings dbSettings; - private final int reconnectCheckDelay; - private int logMode; - private MVTableEngine.Store mvStore; - private int retentionTime; - private DbException backgroundException; + private final Store store; + private boolean allowBuiltinAliasOverride; + private final AtomicReference backgroundException = new AtomicReference<>(); private JavaObjectSerializer javaObjectSerializer; private String javaObjectSerializerName; private volatile boolean javaObjectSerializerInitialized; private boolean queryStatistics; + private int queryStatisticsMaxEntries = Constants.QUERY_STATISTICS_MAX_ENTRIES; private QueryStatisticsData queryStatisticsData; + private RowFactory rowFactory = RowFactory.getRowFactory(); + private boolean ignoreCatalogs; + + private Authenticator authenticator; public Database(ConnectionInfo ci, String cipher) { - String name = ci.getName(); + if (ASSERT) { + META_LOCK_DEBUGGING.set(null); + META_LOCK_DEBUGGING_DB.set(null); + META_LOCK_DEBUGGING_STACK.set(null); + } + String databaseName = ci.getName(); this.dbSettings = ci.getDbSettings(); - this.reconnectCheckDelay = dbSettings.reconnectCheckDelay; this.compareMode = CompareMode.getInstance(null, 0); this.persistent = ci.isPersistent(); this.filePasswordHash = ci.getFilePasswordHash(); this.fileEncryptionKey = ci.getFileEncryptionKey(); - this.databaseName = name; + this.databaseName = databaseName; this.databaseShortName = parseDatabaseShortName(); this.maxLengthInplaceLob = Constants.DEFAULT_MAX_LENGTH_INPLACE_LOB; this.cipher = cipher; - String lockMethodName = ci.getProperty("FILE_LOCK", null); - this.accessModeData = StringUtils.toLowerEnglish( - ci.getProperty("ACCESS_MODE_DATA", "rw")); this.autoServerMode = ci.getProperty("AUTO_SERVER", false); this.autoServerPort = ci.getProperty("AUTO_SERVER_PORT", 0); - int defaultCacheSize = Utils.scaleForAvailableMemory( - Constants.CACHE_SIZE_DEFAULT); - this.cacheSize = - ci.getProperty("CACHE_SIZE", defaultCacheSize); - this.pageSize = ci.getProperty("PAGE_SIZE", - Constants.DEFAULT_PAGE_SIZE); + pageSize = ci.getProperty("PAGE_SIZE", Constants.DEFAULT_PAGE_SIZE); + if (cipher != null && pageSize % FileEncrypt.BLOCK_SIZE != 0) { + throw DbException.getUnsupportedException("CIPHER && PAGE_SIZE=" + pageSize); + } + String accessModeData = StringUtils.toLowerEnglish(ci.getProperty("ACCESS_MODE_DATA", "rw")); if ("r".equals(accessModeData)) { readOnly = true; } - if (dbSettings.mvStore && lockMethodName == null) { - if (autoServerMode) { - fileLockMethod = FileLock.LOCK_FILE; - } else { - fileLockMethod = FileLock.LOCK_FS; + String lockMethodName = ci.getProperty("FILE_LOCK", null); + fileLockMethod = lockMethodName != null ? FileLock.getFileLockMethod(lockMethodName) : + autoServerMode ? FileLockMethod.FILE : FileLockMethod.FS; + this.databaseURL = ci.getURL(); + String s = ci.removeProperty("DATABASE_EVENT_LISTENER", null); + if (s != null) { + setEventListenerClass(StringUtils.trim(s, true, true, "'")); + } + s = ci.removeProperty("MODE", null); + if (s != null) { + mode = Mode.getInstance(s); + if (mode == null) { + throw DbException.get(ErrorCode.UNKNOWN_MODE_1, s); } - } else { - fileLockMethod = FileLock.getFileLockMethod(lockMethodName); } - if (dbSettings.mvStore && fileLockMethod == FileLock.LOCK_SERIALIZED) { - throw DbException.getUnsupportedException( - "MV_STORE combined with FILE_LOCK=SERIALIZED"); + s = ci.removeProperty("DEFAULT_NULL_ORDERING", null); + if (s != null) { + try { + defaultNullOrdering = DefaultNullOrdering.valueOf(StringUtils.toUpperEnglish(s)); + } catch (RuntimeException e) { + throw DbException.getInvalidValueException("DEFAULT_NULL_ORDERING", s); + } } - this.databaseURL = ci.getURL(); - String listener = ci.removeProperty("DATABASE_EVENT_LISTENER", null); - if (listener != null) { - listener = StringUtils.trim(listener, true, true, "'"); - setEventListenerClass(listener); - } - String modeName = ci.removeProperty("MODE", null); - if (modeName != null) { - this.mode = Mode.getInstance(modeName); - } - this.multiVersion = - ci.getProperty("MVCC", dbSettings.mvStore); - this.logMode = - ci.getProperty("LOG", PageStore.LOG_MODE_SYNC); - this.javaObjectSerializerName = - ci.getProperty("JAVA_OBJECT_SERIALIZER", null); - this.multiThreaded = - ci.getProperty("MULTI_THREADED", false); - - boolean closeAtVmShutdown = - dbSettings.dbCloseOnExit; - int traceLevelFile = - ci.getIntProperty(SetTypes.TRACE_LEVEL_FILE, - TraceSystem.DEFAULT_TRACE_LEVEL_FILE); - int traceLevelSystemOut = - ci.getIntProperty(SetTypes.TRACE_LEVEL_SYSTEM_OUT, + s = ci.getProperty("JAVA_OBJECT_SERIALIZER", null); + if (s != null) { + s = StringUtils.trim(s, true, true, "'"); + javaObjectSerializerName = s; + } + this.allowBuiltinAliasOverride = ci.getProperty("BUILTIN_ALIAS_OVERRIDE", false); + boolean closeAtVmShutdown = dbSettings.dbCloseOnExit; + int traceLevelFile = ci.getIntProperty(SetTypes.TRACE_LEVEL_FILE, TraceSystem.DEFAULT_TRACE_LEVEL_FILE); + int traceLevelSystemOut = ci.getIntProperty(SetTypes.TRACE_LEVEL_SYSTEM_OUT, TraceSystem.DEFAULT_TRACE_LEVEL_SYSTEM_OUT); - this.cacheType = StringUtils.toUpperEnglish( - ci.removeProperty("CACHE_TYPE", Constants.CACHE_TYPE_DEFAULT)); - openDatabase(traceLevelFile, traceLevelSystemOut, closeAtVmShutdown); - } - - private void openDatabase(int traceLevelFile, int traceLevelSystemOut, - boolean closeAtVmShutdown) { + this.cacheType = StringUtils.toUpperEnglish(ci.removeProperty("CACHE_TYPE", Constants.CACHE_TYPE_DEFAULT)); + this.ignoreCatalogs = ci.getProperty("IGNORE_CATALOGS", dbSettings.ignoreCatalogs); + this.lockMode = ci.getProperty("LOCK_MODE", Constants.DEFAULT_LOCK_MODE); + String traceFile; + if (persistent) { + if (readOnly) { + if (traceLevelFile >= TraceSystem.DEBUG) { + traceFile = Utils.getProperty("java.io.tmpdir", ".") + "/h2_" + System.currentTimeMillis() + + Constants.SUFFIX_TRACE_FILE; + } else { + traceFile = null; + } + } else { + traceFile = databaseName + Constants.SUFFIX_TRACE_FILE; + } + } else { + traceFile = null; + } + traceSystem = new TraceSystem(traceFile); + traceSystem.setLevelFile(traceLevelFile); + traceSystem.setLevelSystemOut(traceLevelSystemOut); + trace = traceSystem.getTrace(Trace.DATABASE); + trace.info("opening {0} (build {1})", databaseName, Constants.BUILD_ID); try { - open(traceLevelFile, traceLevelSystemOut); - if (closeAtVmShutdown) { - try { - closeOnExit = new DatabaseCloser(this, 0, true); - Runtime.getRuntime().addShutdownHook(closeOnExit); - } catch (IllegalStateException e) { - // shutdown in progress - just don't register the handler - // (maybe an application wants to write something into a - // database at shutdown time) - } catch (SecurityException e) { - // applets may not do that - ignore - // Google App Engine doesn't allow - // to instantiate classes that extend Thread + if (autoServerMode && (readOnly || !persistent || fileLockMethod == FileLockMethod.NO + || fileLockMethod == FileLockMethod.FS)) { + throw DbException.getUnsupportedException( + "AUTO_SERVER=TRUE && (readOnly || inMemory || FILE_LOCK=NO || FILE_LOCK=FS)"); + } + if (persistent) { + String lockFileName = databaseName + Constants.SUFFIX_LOCK_FILE; + if (readOnly) { + if (FileUtils.exists(lockFileName)) { + throw DbException.get(ErrorCode.DATABASE_ALREADY_OPEN_1, "Lock file exists: " + lockFileName); + } + } else if (fileLockMethod != FileLockMethod.NO && fileLockMethod != FileLockMethod.FS) { + lock = new FileLock(traceSystem, lockFileName, Constants.LOCK_SLEEP); + lock.lock(fileLockMethod); + if (autoServerMode) { + startServer(lock.getUniqueId()); + } + } + deleteOldTempFiles(); + } + starting = true; + if (dbSettings.mvStore) { + store = new Store(this); + } else { + throw new UnsupportedOperationException(); + } + starting = false; + systemUser = new User(this, 0, SYSTEM_USER_NAME, true); + systemUser.setAdmin(true); + mainSchema = new Schema(this, Constants.MAIN_SCHEMA_ID, sysIdentifier(Constants.SCHEMA_MAIN), systemUser, + true); + infoSchema = new InformationSchema(this, systemUser); + schemas.put(mainSchema.getName(), mainSchema); + schemas.put(infoSchema.getName(), infoSchema); + if (mode.getEnum() == ModeEnum.PostgreSQL) { + pgCatalogSchema = new PgCatalogSchema(this, systemUser); + schemas.put(pgCatalogSchema.getName(), pgCatalogSchema); + } else { + pgCatalogSchema = null; + } + publicRole = new Role(this, 0, sysIdentifier(Constants.PUBLIC_ROLE_NAME), true); + usersAndRoles.put(publicRole.getName(), publicRole); + systemSession = createSession(systemUser); + lobSession = createSession(systemUser); + Set settingKeys = dbSettings.getSettings().keySet(); + store.getTransactionStore().init(lobSession); + settingKeys.removeIf(name -> name.startsWith("PAGE_STORE_")); + CreateTableData data = createSysTableData(); + starting = true; + meta = mainSchema.createTable(data); + IndexColumn[] pkCols = IndexColumn.wrap(new Column[] { data.columns.get(0) }); + metaIdIndex = meta.addIndex(systemSession, "SYS_ID", 0, pkCols, 1, + IndexType.createPrimaryKey(false, false), true, null); + systemSession.commit(true); + objectIds.set(0); + executeMeta(); + systemSession.commit(true); + store.getTransactionStore().endLeftoverTransactions(); + store.removeTemporaryMaps(objectIds); + recompileInvalidViews(); + starting = false; + if (!readOnly) { + // set CREATE_BUILD in a new database + String settingName = SetTypes.getTypeName(SetTypes.CREATE_BUILD); + Setting setting = settings.get(settingName); + if (setting == null) { + setting = new Setting(this, allocateObjectId(), settingName); + setting.setIntValue(Constants.BUILD_ID); + lockMeta(systemSession); + addDatabaseObject(systemSession, setting); } } + lobStorage = new LobStorageMap(this); + lobSession.commit(true); + systemSession.commit(true); + trace.info("opened {0}", databaseName); + if (persistent) { + int writeDelay = ci.getProperty("WRITE_DELAY", Constants.DEFAULT_WRITE_DELAY); + setWriteDelay(writeDelay); + } + if (closeAtVmShutdown) { + OnExitDatabaseCloser.register(this); + } } catch (Throwable e) { - if (e instanceof OutOfMemoryError) { - e.fillInStackTrace(); - } - if (traceSystem != null) { - if (e instanceof SQLException) { - SQLException e2 = (SQLException) e; - if (e2.getErrorCode() != ErrorCode. - DATABASE_ALREADY_OPEN_1) { + try { + if (e instanceof OutOfMemoryError) { + e.fillInStackTrace(); + } + if (e instanceof DbException) { + if (((DbException) e).getErrorCode() == ErrorCode.DATABASE_ALREADY_OPEN_1) { + stopServer(); + } else { // only write if the database is not already in use trace.error(e, "opening {0}", databaseName); } } traceSystem.close(); + closeOpenFilesAndUnlock(); + } catch (Throwable ex) { + e.addSuppressed(ex); } - closeOpenFilesAndUnlock(false); throw DbException.convert(e); } } + public int getLockTimeout() { + Setting setting = findSetting(SetTypes.getTypeName(SetTypes.DEFAULT_LOCK_TIMEOUT)); + return setting == null ? Constants.INITIAL_LOCK_TIMEOUT : setting.getIntValue(); + } + + public RowFactory getRowFactory() { + return rowFactory; + } + + public void setRowFactory(RowFactory rowFactory) { + this.rowFactory = rowFactory; + } + public static void setInitialPowerOffCount(int count) { initialPowerOffCount = count; } @@ -309,144 +428,35 @@ public void setPowerOffCount(int count) { powerOffCount = count; } - public MVTableEngine.Store getMvStore() { - return mvStore; - } - - public void setMvStore(MVTableEngine.Store mvStore) { - this.mvStore = mvStore; - this.retentionTime = mvStore.getStore().getRetentionTime(); - } - - /** - * Check if two values are equal with the current comparison mode. - * - * @param a the first value - * @param b the second value - * @return true if both objects are equal - */ - public boolean areEqual(Value a, Value b) { - // can not use equals because ValueDecimal 0.0 is not equal to 0.00. - return a.compareTo(b, compareMode) == 0; - } - - /** - * Compare two values with the current comparison mode. The values may not - * be of the same type. - * - * @param a the first value - * @param b the second value - * @return 0 if both values are equal, -1 if the first value is smaller, and - * 1 otherwise - */ - public int compare(Value a, Value b) { - return a.compareTo(b, compareMode); - } - - /** - * Compare two values with the current comparison mode. The values must be - * of the same type. - * - * @param a the first value - * @param b the second value - * @return 0 if both values are equal, -1 if the first value is smaller, and - * 1 otherwise - */ - public int compareTypeSave(Value a, Value b) { - return a.compareTypeSave(b, compareMode); + public Store getStore() { + return store; } public long getModificationDataId() { - return modificationDataId; - } - - /** - * Set or reset the pending change flag in the .lock.db file. - * - * @param pending the new value of the flag - * @return true if the call was successful, - * false if another connection was faster - */ - private synchronized boolean reconnectModified(boolean pending) { - if (readOnly || lock == null || - fileLockMethod != FileLock.LOCK_SERIALIZED) { - return true; - } - try { - if (pending == reconnectChangePending) { - long now = System.currentTimeMillis(); - if (now > reconnectCheckNext) { - if (pending) { - String pos = pageStore == null ? - null : "" + pageStore.getWriteCountTotal(); - lock.setProperty("logPos", pos); - lock.save(); - } - reconnectCheckNext = now + reconnectCheckDelay; - } - return true; - } - Properties old = lock.load(); - if (pending) { - if (old.getProperty("changePending") != null) { - return false; - } - trace.debug("wait before writing"); - Thread.sleep((int) (reconnectCheckDelay * 1.1)); - Properties now = lock.load(); - if (!now.equals(old)) { - // somebody else was faster - return false; - } - } - String pos = pageStore == null ? - null : "" + pageStore.getWriteCountTotal(); - lock.setProperty("logPos", pos); - if (pending) { - lock.setProperty("changePending", "true-" + Math.random()); - } else { - lock.setProperty("changePending", null); - } - // ensure that the writer thread will - // not reset the flag before we are done - reconnectCheckNext = System.currentTimeMillis() + - 2 * reconnectCheckDelay; - old = lock.save(); - if (pending) { - trace.debug("wait before writing again"); - Thread.sleep((int) (reconnectCheckDelay * 1.1)); - Properties now = lock.load(); - if (!now.equals(old)) { - // somebody else was faster - return false; - } - } else { - Thread.sleep(1); - } - reconnectLastLock = old; - reconnectChangePending = pending; - reconnectCheckNext = System.currentTimeMillis() + - reconnectCheckDelay; - return true; - } catch (Exception e) { - trace.error(e, "pending {0}", pending); - return false; - } + return modificationDataId.get(); } public long getNextModificationDataId() { - return ++modificationDataId; + return modificationDataId.incrementAndGet(); } public long getModificationMetaId() { - return modificationMetaId; + return modificationMetaId.get(); } public long getNextModificationMetaId() { // if the meta data has been modified, the data is modified as well // (because MetaTable returns modificationDataId) - modificationDataId++; - return modificationMetaId++; + modificationDataId.incrementAndGet(); + return modificationMetaId.incrementAndGet() - 1; + } + + public long getRemoteSettingsId() { + return remoteSettingsId.get(); + } + + public long getNextRemoteSettingsId() { + return remoteSettingsId.incrementAndGet(); } public int getPowerOffCount() { @@ -455,9 +465,12 @@ public int getPowerOffCount() { @Override public void checkPowerOff() { - if (powerOffCount == 0) { - return; + if (powerOffCount != 0) { + checkPowerOff2(); } + } + + private void checkPowerOff2() { if (powerOffCount > 1) { powerOffCount--; return; @@ -465,24 +478,11 @@ public void checkPowerOff() { if (powerOffCount != -1) { try { powerOffCount = -1; - stopWriter(); - if (mvStore != null) { - mvStore.closeImmediately(); - } - if (pageStore != null) { - try { - pageStore.close(); - } catch (DbException e) { - // ignore - } - pageStore = null; - } + store.closeImmediately(); if (lock != null) { stopServer(); - if (fileLockMethod != FileLock.LOCK_SERIALIZED) { - // allow testing shutdown - lock.unlock(); - } + // allow testing shutdown + lock.unlock(); lock = null; } if (traceSystem != null) { @@ -492,31 +492,18 @@ public void checkPowerOff() { DbException.traceThrowable(e); } } - Engine.getInstance().close(databaseName); + Engine.close(databaseName); throw DbException.get(ErrorCode.DATABASE_IS_CLOSED); } /** - * Check if a database with the given name exists. - * - * @param name the name of the database (including path) - * @return true if one exists - */ - static boolean exists(String name) { - if (FileUtils.exists(name + Constants.SUFFIX_PAGE_FILE)) { - return true; - } - return FileUtils.exists(name + Constants.SUFFIX_MV_FILE); - } - - /** - * Get the trace object for the given module. + * Get the trace object for the given module id. * - * @param module the module name + * @param moduleId the module id * @return the trace object */ - public Trace getTrace(String module) { - return traceSystem.getTrace(module); + public Trace getTrace(int moduleId) { + return traceSystem.getTrace(moduleId); } @Override @@ -543,7 +530,7 @@ public FileStore openFile(String name, String openMode, boolean mustExist) { * @return true if the cipher algorithm and the password match */ boolean validateFilePasswordHash(String testCipher, byte[] testHash) { - if (!StringUtils.equals(testCipher, this.cipher)) { + if (!Objects.equals(testCipher, this.cipher)) { return false; } return Utils.compareSecure(testHash, filePasswordHash); @@ -551,222 +538,127 @@ boolean validateFilePasswordHash(String testCipher, byte[] testHash) { private String parseDatabaseShortName() { String n = databaseName; - if (n.endsWith(":")) { - n = null; - } - if (n != null) { - StringTokenizer tokenizer = new StringTokenizer(n, "/\\:,;"); - while (tokenizer.hasMoreTokens()) { - n = tokenizer.nextToken(); + int l = n.length(), i = l; + loop: while (--i >= 0) { + char ch = n.charAt(i); + switch (ch) { + case '/': + case ':': + case '\\': + break loop; } } - if (n == null || n.length() == 0) { - n = "unnamed"; - } - return dbSettings.databaseToUpper ? StringUtils.toUpperEnglish(n) : n; + n = ++i == l ? "UNNAMED" : n.substring(i); + return StringUtils.truncateString( + dbSettings.databaseToUpper ? StringUtils.toUpperEnglish(n) + : dbSettings.databaseToLower ? StringUtils.toLowerEnglish(n) : n, + Constants.MAX_IDENTIFIER_LENGTH); } - private synchronized void open(int traceLevelFile, int traceLevelSystemOut) { - if (persistent) { - String dataFileName = databaseName + Constants.SUFFIX_OLD_DATABASE_FILE; - boolean existsData = FileUtils.exists(dataFileName); - String pageFileName = databaseName + Constants.SUFFIX_PAGE_FILE; - String mvFileName = databaseName + Constants.SUFFIX_MV_FILE; - boolean existsPage = FileUtils.exists(pageFileName); - boolean existsMv = FileUtils.exists(mvFileName); - if (existsData && (!existsPage && !existsMv)) { - throw DbException.get( - ErrorCode.FILE_VERSION_ERROR_1, "Old database: " + - dataFileName + - " - please convert the database " + - "to a SQL script and re-create it."); - } - if (existsPage && !FileUtils.canWrite(pageFileName)) { - readOnly = true; - } - if (existsMv && !FileUtils.canWrite(mvFileName)) { - readOnly = true; - } - if (existsPage && !existsMv) { - dbSettings.mvStore = false; - } - if (readOnly) { - if (traceLevelFile >= TraceSystem.DEBUG) { - String traceFile = Utils.getProperty("java.io.tmpdir", ".") + - "/" + "h2_" + System.currentTimeMillis(); - traceSystem = new TraceSystem(traceFile + - Constants.SUFFIX_TRACE_FILE); - } else { - traceSystem = new TraceSystem(null); - } - } else { - traceSystem = new TraceSystem(databaseName + - Constants.SUFFIX_TRACE_FILE); - } - traceSystem.setLevelFile(traceLevelFile); - traceSystem.setLevelSystemOut(traceLevelSystemOut); - trace = traceSystem.getTrace(Trace.DATABASE); - trace.info("opening {0} (build {1})", databaseName, Constants.BUILD_ID); - if (autoServerMode) { - if (readOnly || - fileLockMethod == FileLock.LOCK_NO || - fileLockMethod == FileLock.LOCK_SERIALIZED || - fileLockMethod == FileLock.LOCK_FS || - !persistent) { - throw DbException.getUnsupportedException( - "autoServerMode && (readOnly || " + - "fileLockMethod == NO || " + - "fileLockMethod == SERIALIZED || " + - "fileLockMethod == FS || " + - "inMemory)"); - } - } - String lockFileName = databaseName + Constants.SUFFIX_LOCK_FILE; - if (readOnly) { - if (FileUtils.exists(lockFileName)) { - throw DbException.get(ErrorCode.DATABASE_ALREADY_OPEN_1, - "Lock file exists: " + lockFileName); - } - } - if (!readOnly && fileLockMethod != FileLock.LOCK_NO) { - if (fileLockMethod != FileLock.LOCK_FS) { - lock = new FileLock(traceSystem, lockFileName, Constants.LOCK_SLEEP); - lock.lock(fileLockMethod); - if (autoServerMode) { - startServer(lock.getUniqueId()); - } - } - } - if (SysProperties.MODIFY_ON_WRITE) { - while (isReconnectNeeded()) { - // wait until others stopped writing - } - } else { - while (isReconnectNeeded() && !beforeWriting()) { - // wait until others stopped writing and - // until we can write (the file is not yet open - - // no need to re-connect) - } - } - deleteOldTempFiles(); - starting = true; - if (SysProperties.MODIFY_ON_WRITE) { - try { - getPageStore(); - } catch (DbException e) { - if (e.getErrorCode() != ErrorCode.DATABASE_IS_READ_ONLY) { - throw e; - } - pageStore = null; - while (!beforeWriting()) { - // wait until others stopped writing and - // until we can write (the file is not yet open - - // no need to re-connect) - } - getPageStore(); - } - } else { - getPageStore(); - } - starting = false; - if (mvStore == null) { - writer = WriterThread.create(this, writeDelay); - } else { - setWriteDelay(writeDelay); - } - } else { - if (autoServerMode) { - throw DbException.getUnsupportedException( - "autoServerMode && inMemory"); - } - traceSystem = new TraceSystem(null); - trace = traceSystem.getTrace(Trace.DATABASE); - if (dbSettings.mvStore) { - getPageStore(); - } - } - systemUser = new User(this, 0, SYSTEM_USER_NAME, true); - mainSchema = new Schema(this, 0, Constants.SCHEMA_MAIN, systemUser, true); - infoSchema = new Schema(this, -1, "INFORMATION_SCHEMA", systemUser, true); - schemas.put(mainSchema.getName(), mainSchema); - schemas.put(infoSchema.getName(), infoSchema); - publicRole = new Role(this, 0, Constants.PUBLIC_ROLE_NAME, true); - roles.put(Constants.PUBLIC_ROLE_NAME, publicRole); - systemUser.setAdmin(true); - systemSession = new Session(this, systemUser, ++nextSessionId); - lobSession = new Session(this, systemUser, ++nextSessionId); + private CreateTableData createSysTableData() { CreateTableData data = new CreateTableData(); ArrayList cols = data.columns; - Column columnId = new Column("ID", Value.INT); + Column columnId = new Column("ID", TypeInfo.TYPE_INTEGER); columnId.setNullable(false); cols.add(columnId); - cols.add(new Column("HEAD", Value.INT)); - cols.add(new Column("TYPE", Value.INT)); - cols.add(new Column("SQL", Value.STRING)); - boolean create = true; - if (pageStore != null) { - create = pageStore.isNew(); - } + cols.add(new Column("HEAD", TypeInfo.TYPE_INTEGER)); + cols.add(new Column("TYPE", TypeInfo.TYPE_INTEGER)); + cols.add(new Column("SQL", TypeInfo.TYPE_VARCHAR)); data.tableName = "SYS"; data.id = 0; data.temporary = false; data.persistData = persistent; data.persistIndexes = persistent; - data.create = create; data.isHidden = true; data.session = systemSession; - meta = mainSchema.createTable(data); - IndexColumn[] pkCols = IndexColumn.wrap(new Column[] { columnId }); - metaIdIndex = meta.addIndex(systemSession, "SYS_ID", - 0, pkCols, IndexType.createPrimaryKey( - false, false), true, null); - objectIds.set(0); - starting = true; + return data; + } + + private void executeMeta() { Cursor cursor = metaIdIndex.find(systemSession, null, null); - ArrayList records = New.arrayList(); + ArrayList firstRecords = new ArrayList<>(), domainRecords = new ArrayList<>(), + middleRecords = new ArrayList<>(), constraintRecords = new ArrayList<>(), + lastRecords = new ArrayList<>(); while (cursor.next()) { MetaRecord rec = new MetaRecord(cursor.get()); objectIds.set(rec.getId()); - records.add(rec); + switch (rec.getObjectType()) { + case DbObject.SETTING: + case DbObject.USER: + case DbObject.SCHEMA: + case DbObject.FUNCTION_ALIAS: + firstRecords.add(rec); + break; + case DbObject.DOMAIN: + domainRecords.add(rec); + break; + case DbObject.SEQUENCE: + case DbObject.CONSTANT: + case DbObject.TABLE_OR_VIEW: + case DbObject.INDEX: + middleRecords.add(rec); + break; + case DbObject.CONSTRAINT: + constraintRecords.add(rec); + break; + default: + lastRecords.add(rec); + } } - Collections.sort(records); synchronized (systemSession) { - for (MetaRecord rec : records) { - rec.execute(this, systemSession, eventListener); + executeMeta(firstRecords); + // Domains may depend on other domains + int count = domainRecords.size(); + if (count > 0) { + for (int j = 0;; count = j) { + DbException exception = null; + for (int i = 0; i < count; i++) { + MetaRecord rec = domainRecords.get(i); + try { + rec.prepareAndExecute(this, systemSession, eventListener); + } catch (DbException ex) { + if (exception == null) { + exception = ex; + } + domainRecords.set(j++, rec); + } + } + if (exception == null) { + break; + } + if (count == j) { + throw exception; + } + } } - } - if (mvStore != null) { - mvStore.initTransactions(); - mvStore.removeTemporaryMaps(objectIds); - } - recompileInvalidViews(systemSession); - starting = false; - if (!readOnly) { - // set CREATE_BUILD in a new database - String name = SetTypes.getTypeName(SetTypes.CREATE_BUILD); - if (settings.get(name) == null) { - Setting setting = new Setting(this, allocateObjectId(), name); - setting.setIntValue(Constants.BUILD_ID); - lockMeta(systemSession); - addDatabaseObject(systemSession, setting); - } - // mark all ids used in the page store - if (pageStore != null) { - BitField f = pageStore.getObjectIds(); - for (int i = 0, len = f.length(); i < len; i++) { - if (f.get(i) && !objectIds.get(i)) { - trace.info("unused object id: " + i); - objectIds.set(i); + executeMeta(middleRecords); + // Prepare, but don't create all constraints and sort them + count = constraintRecords.size(); + if (count > 0) { + ArrayList constraints = new ArrayList<>(count); + for (int i = 0; i < count; i++) { + Prepared prepared = constraintRecords.get(i).prepare(this, systemSession, eventListener); + if (prepared != null) { + constraints.add(prepared); } } + constraints.sort(MetaRecord.CONSTRAINTS_COMPARATOR); + // Create constraints in order (unique and primary key before + // all others) + for (Prepared constraint : constraints) { + MetaRecord.execute(this, constraint, eventListener, constraint.getSQL()); + } } + executeMeta(lastRecords); } - getLobStorage().init(); - systemSession.commit(true); + } - trace.info("opened {0}", databaseName); - if (checkpointAllowed > 0) { - afterWriting(); + private void executeMeta(ArrayList records) { + if (!records.isEmpty()) { + records.sort(null); + for (MetaRecord rec : records) { + rec.prepareAndExecute(this, systemSession, eventListener); + } } } @@ -800,66 +692,51 @@ private void stopServer() { } } - private void recompileInvalidViews(Session session) { - boolean recompileSuccessful; + private void recompileInvalidViews() { + boolean atLeastOneRecompiledSuccessfully; do { - recompileSuccessful = false; - for (Table obj : getAllTablesAndViews(false)) { - if (obj instanceof TableView) { - TableView view = (TableView) obj; - if (view.isInvalid()) { - view.recompile(session, true); - if (!view.isInvalid()) { - recompileSuccessful = true; + atLeastOneRecompiledSuccessfully = false; + for (Schema schema : schemas.values()) { + for (Table obj : schema.getAllTablesAndViews(null)) { + if (obj instanceof TableView) { + TableView view = (TableView) obj; + if (view.isInvalid()) { + view.recompile(systemSession, true, false); + if (!view.isInvalid()) { + atLeastOneRecompiledSuccessfully = true; + } } } } } - } while (recompileSuccessful); - // when opening a database, views are initialized before indexes, - // so they may not have the optimal plan yet - // this is not a problem, it is just nice to see the newest plan - for (Table obj : getAllTablesAndViews(false)) { - if (obj instanceof TableView) { - TableView view = (TableView) obj; - if (!view.isInvalid()) { - view.recompile(systemSession, true); - } - } - } - } - - private void initMetaTables() { - if (metaTablesInitialized) { - return; - } - synchronized (infoSchema) { - if (!metaTablesInitialized) { - for (int type = 0, count = MetaTable.getMetaTableTypeCount(); - type < count; type++) { - MetaTable m = new MetaTable(infoSchema, -1 - type, type); - infoSchema.add(m); - } - metaTablesInitialized = true; - } - } + } while (atLeastOneRecompiledSuccessfully); + TableView.clearIndexCaches(this); } - private synchronized void addMeta(Session session, DbObject obj) { + private void addMeta(SessionLocal session, DbObject obj) { + assert Thread.holdsLock(this); int id = obj.getId(); - if (id > 0 && !starting && !obj.isTemporary()) { - Row r = meta.getTemplateRow(); - MetaRecord rec = new MetaRecord(obj); - rec.setRecord(r); - objectIds.set(id); - if (SysProperties.CHECK) { - verifyMetaLocked(session); - } - meta.addRow(session, r); - if (isMultiVersion()) { - // TODO this should work without MVCC, but avoid risks at the - // moment - session.log(meta, UndoLogRecord.INSERT, r); + if (id > 0 && !obj.isTemporary()) { + if (!isReadOnly()) { + Row r = meta.getTemplateRow(); + MetaRecord.populateRowFromDBObject(obj, r); + assert objectIds.get(id); + if (SysProperties.CHECK) { + verifyMetaLocked(session); + } + Cursor cursor = metaIdIndex.find(session, r, r); + if (!cursor.next()) { + meta.addRow(session, r); + } else { + assert starting; + Row oldRow = cursor.get(); + MetaRecord rec = new MetaRecord(oldRow); + assert rec.getId() == obj.getId(); + assert rec.getObjectType() == obj.getType(); + if (!rec.getSQL().equals(obj.getCreateSQLForMeta())) { + meta.updateRow(session, oldRow, r); + } + } } } } @@ -869,10 +746,9 @@ private synchronized void addMeta(Session session, DbObject obj) { * * @param session the session */ - public void verifyMetaLocked(Session session) { - if (meta != null && !meta.isLockedExclusivelyBy(session) - && lockMode != Constants.LOCK_MODE_OFF) { - throw DbException.throwInternalError(); + public void verifyMetaLocked(SessionLocal session) { + if (lockMode != Constants.LOCK_MODE_OFF && meta != null && !meta.isLockedExclusivelyBy(session)) { + throw DbException.getInternalError(); } } @@ -882,7 +758,7 @@ public void verifyMetaLocked(Session session) { * @param session the session * @return whether it was already locked before by this session */ - public boolean lockMeta(Session session) { + public boolean lockMeta(SessionLocal session) { // this method can not be synchronized on the database object, // as unlocking is also synchronized on the database object - // so if locking starts just before unlocking, locking could @@ -890,8 +766,59 @@ public boolean lockMeta(Session session) { if (meta == null) { return true; } - boolean wasLocked = meta.lock(session, true, true); - return wasLocked; + if (ASSERT) { + lockMetaAssertion(session); + } + return meta.lock(session, Table.EXCLUSIVE_LOCK); + } + + private void lockMetaAssertion(SessionLocal session) { + // If we are locking two different databases in the same stack, just ignore it. + // This only happens in TestLinkedTable where we connect to another h2 DB in the + // same process. + if (META_LOCK_DEBUGGING_DB.get() != null && META_LOCK_DEBUGGING_DB.get() != this) { + final SessionLocal prev = META_LOCK_DEBUGGING.get(); + if (prev == null) { + META_LOCK_DEBUGGING.set(session); + META_LOCK_DEBUGGING_DB.set(this); + META_LOCK_DEBUGGING_STACK.set(new Throwable("Last meta lock granted in this stack trace, " + + "this is debug information for following IllegalStateException")); + } else if (prev != session) { + META_LOCK_DEBUGGING_STACK.get().printStackTrace(); + throw new IllegalStateException("meta currently locked by " + prev + ", sessionid=" + prev.getId() + + " and trying to be locked by different session, " + session + ", sessionid=" // + + session.getId() + " on same thread"); + } + } + } + + /** + * Unlock the metadata table. + * + * @param session the session + */ + public void unlockMeta(SessionLocal session) { + if (meta != null) { + unlockMetaDebug(session); + meta.unlock(session); + session.unlock(meta); + } + } + + /** + * This method doesn't actually unlock the metadata table, all it does it + * reset the debugging flags. + * + * @param session the session + */ + static void unlockMetaDebug(SessionLocal session) { + if (ASSERT) { + if (META_LOCK_DEBUGGING.get() == session) { + META_LOCK_DEBUGGING.set(null); + META_LOCK_DEBUGGING_DB.set(null); + META_LOCK_DEBUGGING_STACK.set(null); + } + } } /** @@ -900,70 +827,69 @@ public boolean lockMeta(Session session) { * @param session the session * @param id the id of the object to remove */ - public synchronized void removeMeta(Session session, int id) { + public void removeMeta(SessionLocal session, int id) { if (id > 0 && !starting) { - SearchRow r = meta.getTemplateSimpleRow(false); - r.setValue(0, ValueInt.get(id)); + SearchRow r = meta.getRowFactory().createRow(); + r.setValue(0, ValueInteger.get(id)); boolean wasLocked = lockMeta(session); - Cursor cursor = metaIdIndex.find(session, r, r); - if (cursor.next()) { - if (SysProperties.CHECK) { - if (lockMode != Constants.LOCK_MODE_OFF && !wasLocked) { - throw DbException.throwInternalError(); + try { + Cursor cursor = metaIdIndex.find(session, r, r); + if (cursor.next()) { + Row found = cursor.get(); + meta.removeRow(session, found); + if (SysProperties.CHECK) { + checkMetaFree(session, id); } } - Row found = cursor.get(); - meta.removeRow(session, found); - if (isMultiVersion()) { - // TODO this should work without MVCC, but avoid risks at - // the moment - session.log(meta, UndoLogRecord.DELETE, found); - } - objectIds.clear(id); - if (SysProperties.CHECK) { - checkMetaFree(session, id); + } finally { + if (!wasLocked) { + // must not keep the lock if it was not locked + // otherwise updating sequences may cause a deadlock + unlockMeta(session); } - } else if (!wasLocked) { - // must not keep the lock if it was not locked - // otherwise updating sequences may cause a deadlock - meta.unlock(session); - session.unlock(meta); } + // release of the object id has to be postponed until the end of the transaction, + // otherwise it might be re-used prematurely, and it would make + // rollback impossible or lead to MVMaps name collision, + // so until then ids are accumulated within session + session.scheduleDatabaseObjectIdForRelease(id); + } + } + + /** + * Mark some database ids as unused. + * @param idsToRelease the ids to release + */ + public void releaseDatabaseObjectIds(BitSet idsToRelease) { + synchronized (objectIds) { + objectIds.andNot(idsToRelease); } } @SuppressWarnings("unchecked") - private HashMap getMap(int type) { - HashMap result; + private Map getMap(int type) { + Map result; switch (type) { case DbObject.USER: - result = users; + case DbObject.ROLE: + result = usersAndRoles; break; case DbObject.SETTING: result = settings; break; - case DbObject.ROLE: - result = roles; - break; case DbObject.RIGHT: result = rights; break; case DbObject.SCHEMA: result = schemas; break; - case DbObject.USER_DATATYPE: - result = userDataTypes; - break; case DbObject.COMMENT: result = comments; break; - case DbObject.AGGREGATE: - result = aggregates; - break; default: - throw DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } - return (HashMap) result; + return (Map) result; } /** @@ -972,14 +898,16 @@ private HashMap getMap(int type) { * @param session the session * @param obj the object to add */ - public synchronized void addSchemaObject(Session session, SchemaObject obj) { + public void addSchemaObject(SessionLocal session, SchemaObject obj) { int id = obj.getId(); if (id > 0 && !starting) { checkWritingAllowed(); } lockMeta(session); - obj.getSchema().add(obj); - addMeta(session, obj); + synchronized (this) { + obj.getSchema().add(obj); + addMeta(session, obj); + } } /** @@ -988,12 +916,12 @@ public synchronized void addSchemaObject(Session session, SchemaObject obj) { * @param session the session * @param obj the object to add */ - public synchronized void addDatabaseObject(Session session, DbObject obj) { + public synchronized void addDatabaseObject(SessionLocal session, DbObject obj) { int id = obj.getId(); if (id > 0 && !starting) { checkWritingAllowed(); } - HashMap map = getMap(obj.getType()); + Map map = getMap(obj.getType()); if (obj.getType() == DbObject.USER) { User user = (User) obj; if (user.isAdmin() && systemUser.getName().equals(SYSTEM_USER_NAME)) { @@ -1002,23 +930,13 @@ public synchronized void addDatabaseObject(Session session, DbObject obj) { } String name = obj.getName(); if (SysProperties.CHECK && map.get(name) != null) { - DbException.throwInternalError("object already exists"); + throw DbException.getInternalError("object already exists"); } lockMeta(session); addMeta(session, obj); map.put(name, obj); } - /** - * Get the user defined aggregate function if it exists, or null if not. - * - * @param name the name of the user defined aggregate function - * @return the aggregate function or null - */ - public UserAggregate findAggregate(String name) { - return aggregates.get(name); - } - /** * Get the comment for the given database object if one exists, or null if * not. @@ -1041,7 +959,8 @@ public Comment findComment(DbObject object) { * @return the role or null */ public Role findRole(String roleName) { - return roles.get(roleName); + RightOwner rightOwner = findUserOrRole(roleName); + return rightOwner instanceof Role ? (Role) rightOwner : null; } /** @@ -1051,11 +970,10 @@ public Role findRole(String roleName) { * @return the schema or null */ public Schema findSchema(String schemaName) { - Schema schema = schemas.get(schemaName); - if (schema == infoSchema) { - initMetaTables(); + if (schemaName == null) { + return null; } - return schema; + return schemas.get(schemaName); } /** @@ -1075,17 +993,8 @@ public Setting findSetting(String name) { * @return the user or null */ public User findUser(String name) { - return users.get(name); - } - - /** - * Get the user defined data type if it exists, or null if not. - * - * @param name the name of the user defined data type - * @return the user defined data type or null - */ - public UserDataType findUserDataType(String name) { - return userDataTypes.get(name); + RightOwner rightOwner = findUserOrRole(name); + return rightOwner instanceof User ? (User) rightOwner : null; } /** @@ -1104,18 +1013,33 @@ public User getUser(String name) { return user; } + /** + * Get the user or role if it exists, or {@code null} if not. + * + * @param name the name of the user or role + * @return the user, the role, or {@code null} + */ + public RightOwner findUserOrRole(String name) { + return usersAndRoles.get(StringUtils.toUpperEnglish(name)); + } + /** * Create a session for the given user. * * @param user the user - * @return the session + * @param networkConnectionInfo the network connection information, or {@code null} + * @return the session, or null if the database is currently closing * @throws DbException if the database is in exclusive mode */ - synchronized Session createSession(User user) { - if (exclusiveSession != null) { + synchronized SessionLocal createSession(User user, NetworkConnectionInfo networkConnectionInfo) { + if (closing) { + return null; + } + if (exclusiveSession.get() != null) { throw DbException.get(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE); } - Session session = new Session(this, user, ++nextSessionId); + SessionLocal session = createSession(user); + session.setNetworkConnectionInfo(networkConnectionInfo); userSessions.add(session); trace.info("connecting session #{0} to {1}", session.getId(), databaseName); if (delayedCloser != null) { @@ -1125,53 +1049,84 @@ synchronized Session createSession(User user) { return session; } + private SessionLocal createSession(User user) { + int id = ++nextSessionId; + return new SessionLocal(this, user, id); + } + /** * Remove a session. This method is called after the user has disconnected. * * @param session the session */ - public synchronized void removeSession(Session session) { + public synchronized void removeSession(SessionLocal session) { if (session != null) { - if (exclusiveSession == session) { - exclusiveSession = null; - } - userSessions.remove(session); - if (session != systemSession && session != lobSession) { + exclusiveSession.compareAndSet(session, null); + if (userSessions.remove(session)) { trace.info("disconnecting session #{0}", session.getId()); } } - if (userSessions.size() == 0 && - session != systemSession && session != lobSession) { - if (closeDelay == 0) { - close(false); - } else if (closeDelay < 0) { - return; - } else { - delayedCloser = new DatabaseCloser(this, closeDelay * 1000, false); - delayedCloser.setName("H2 Close Delay " + getShortName()); - delayedCloser.setDaemon(true); - delayedCloser.start(); + if (isUserSession(session)) { + if (userSessions.isEmpty()) { + if (closeDelay == 0) { + close(false); + } else if (closeDelay < 0) { + return; + } else { + delayedCloser = new DelayedDatabaseCloser(this, closeDelay * 1000); + } + } + if (session != null) { + trace.info("disconnected session #{0}", session.getId()); } } - if (session != systemSession && - session != lobSession && session != null) { - trace.info("disconnected session #{0}", session.getId()); - } } - private synchronized void closeAllSessionsException(Session except) { - Session[] all = new Session[userSessions.size()]; - userSessions.toArray(all); - for (Session s : all) { + boolean isUserSession(SessionLocal session) { + return session != systemSession && session != lobSession; + } + + private synchronized void closeAllSessionsExcept(SessionLocal except) { + SessionLocal[] all = userSessions.toArray(EMPTY_SESSION_ARRAY); + for (SessionLocal s : all) { if (s != except) { - try { - // must roll back, otherwise the session is removed and - // the transaction log that contains its uncommitted - // operations as well - s.rollback(); - s.close(); - } catch (DbException e) { - trace.error(e, "disconnecting session #{0}", s.getId()); + // indicate that session need to be closed ASAP + s.suspend(); + } + } + + int timeout = 2 * getLockTimeout(); + long start = System.currentTimeMillis(); + // 'sleep' should be strictly greater than zero, otherwise real time is not taken into consideration + // and the thread simply waits until notified + long sleep = Math.max(timeout / 20, 1); + boolean done = false; + while (!done) { + try { + // although nobody going to notify us + // it is vital to give up lock on a database + wait(sleep); + } catch (InterruptedException e1) { + // ignore + } + if (System.currentTimeMillis() - start > timeout) { + for (SessionLocal s : all) { + if (s != except && !s.isClosed()) { + try { + // this will rollback outstanding transaction + s.close(); + } catch (Throwable e) { + trace.error(e, "disconnecting session #{0}", s.getId()); + } + } + } + break; + } + done = true; + for (SessionLocal s : all) { + if (s != except && !s.isClosed()) { + done = false; + break; } } } @@ -1183,109 +1138,118 @@ private synchronized void closeAllSessionsException(Session except) { * @param fromShutdownHook true if this method is called from the shutdown * hook */ - synchronized void close(boolean fromShutdownHook) { - if (closing) { - return; - } - throwLastBackgroundException(); - if (fileLockMethod == FileLock.LOCK_SERIALIZED && - !reconnectChangePending) { - // another connection may have written something - don't write - try { - closeOpenFilesAndUnlock(false); - } catch (DbException e) { - // ignore + void close(boolean fromShutdownHook) { + DbException b = backgroundException.getAndSet(null); + try { + closeImpl(fromShutdownHook); + } catch (Throwable t) { + if (b != null) { + t.addSuppressed(b); } - traceSystem.close(); - Engine.getInstance().close(databaseName); - return; + throw t; } - closing = true; - stopServer(); - if (userSessions.size() > 0) { - if (!fromShutdownHook) { - return; - } - trace.info("closing {0} from shutdown hook", databaseName); - closeAllSessionsException(null); + if (b != null) { + // wrap the exception, so we see it was thrown here + throw DbException.get(b.getErrorCode(), b, b.getMessage()); } - trace.info("closing {0}", databaseName); - if (eventListener != null) { - // allow the event listener to connect to the database - closing = false; - DatabaseEventListener e = eventListener; - // set it to null, to make sure it's called only once - eventListener = null; - e.closingDatabase(); - if (userSessions.size() > 0) { - // if a connection was opened, we can't close the database + } + + private void closeImpl(boolean fromShutdownHook) { + synchronized (this) { + if (closing || !fromShutdownHook && !userSessions.isEmpty()) { return; } closing = true; + stopServer(); + if (!userSessions.isEmpty()) { + assert fromShutdownHook; + trace.info("closing {0} from shutdown hook", databaseName); + closeAllSessionsExcept(null); + } + trace.info("closing {0}", databaseName); + if (eventListener != null) { + // allow the event listener to connect to the database + closing = false; + DatabaseEventListener e = eventListener; + // set it to null, to make sure it's called only once + eventListener = null; + e.closingDatabase(); + closing = true; + if (!userSessions.isEmpty()) { + trace.info("event listener {0} left connection open", e.getClass().getName()); + // if listener left an open connection + closeAllSessionsExcept(null); + } + } + if (!this.isReadOnly()) { + removeOrphanedLobs(); + } } - removeOrphanedLobs(); try { - if (systemSession != null) { - if (powerOffCount != -1) { - for (Table table : getAllTablesAndViews(false)) { - if (table.isGlobalTemporary()) { - table.removeChildrenAndResources(systemSession); - } else { - table.close(systemSession); + try { + if (systemSession != null) { + if (powerOffCount != -1) { + for (Schema schema : schemas.values()) { + for (Table table : schema.getAllTablesAndViews(null)) { + if (table.isGlobalTemporary()) { + table.removeChildrenAndResources(systemSession); + } else { + table.close(systemSession); + } + } + } + for (Schema schema : schemas.values()) { + for (Sequence sequence : schema.getAllSequences()) { + sequence.close(); + } } } - for (SchemaObject obj : getAllSchemaObjects( - DbObject.SEQUENCE)) { - Sequence sequence = (Sequence) obj; - sequence.close(); + for (Schema schema : schemas.values()) { + for (TriggerObject trigger : schema.getAllTriggers()) { + try { + trigger.close(); + } catch (SQLException e) { + trace.error(e, "close"); + } + } } - } - for (SchemaObject obj : getAllSchemaObjects( - DbObject.TRIGGER)) { - TriggerObject trigger = (TriggerObject) obj; - try { - trigger.close(); - } catch (SQLException e) { - trace.error(e, "close"); + if (powerOffCount != -1) { + meta.close(systemSession); + systemSession.commit(true); } } - if (powerOffCount != -1) { - meta.close(systemSession); - systemSession.commit(true); - } + } catch (DbException e) { + trace.error(e, "close"); } - } catch (DbException e) { - trace.error(e, "close"); - } - tempFileDeleter.deleteAll(); - try { - closeOpenFilesAndUnlock(true); - } catch (DbException e) { - trace.error(e, "close"); - } - trace.info("closed"); - traceSystem.close(); - if (closeOnExit != null) { - closeOnExit.reset(); + tempFileDeleter.deleteAll(); try { - Runtime.getRuntime().removeShutdownHook(closeOnExit); - } catch (IllegalStateException e) { - // ignore - } catch (SecurityException e) { - // applets may not do that - ignore + if (lobSession != null) { + lobSession.close(); + lobSession = null; + } + if (systemSession != null) { + systemSession.close(); + systemSession = null; + } + closeOpenFilesAndUnlock(); + } catch (DbException e) { + trace.error(e, "close"); } - closeOnExit = null; - } - Engine.getInstance().close(databaseName); - if (deleteFilesOnDisconnect && persistent) { - deleteFilesOnDisconnect = false; - try { - String directory = FileUtils.getParent(databaseName); - String name = FileUtils.getName(databaseName); - DeleteDbFiles.execute(directory, name, true); - } catch (Exception e) { - // ignore (the trace is closed already) + trace.info("closed"); + traceSystem.close(); + OnExitDatabaseCloser.unregister(this); + if (deleteFilesOnDisconnect && persistent) { + deleteFilesOnDisconnect = false; + try { + String directory = FileUtils.getParent(databaseName); + String name = FileUtils.getName(databaseName); + DeleteDbFiles.execute(directory, name, true); + } catch (Exception e) { + // ignore (the trace is closed already) + } } + } finally { + Engine.close(databaseName); } } @@ -1294,130 +1258,59 @@ private void removeOrphanedLobs() { if (!persistent) { return; } - boolean lobStorageIsUsed = infoSchema.findTableOrView( - systemSession, LobStorageBackend.LOB_DATA_TABLE) != null; - lobStorageIsUsed |= mvStore != null; - if (!lobStorageIsUsed) { - return; - } try { - getLobStorage(); - lobStorage.removeAllForTable( - LobStorageFrontend.TABLE_ID_SESSION_VARIABLE); + lobStorage.removeAllForTable(LobStorageFrontend.TABLE_ID_SESSION_VARIABLE); } catch (DbException e) { trace.error(e, "close"); } } - private void stopWriter() { - if (writer != null) { - writer.stopThread(); - writer = null; - } - } - /** * Close all open files and unlock the database. - * - * @param flush whether writing is allowed */ - private synchronized void closeOpenFilesAndUnlock(boolean flush) { - stopWriter(); - if (pageStore != null) { - if (flush) { - try { - pageStore.checkpoint(); - if (!readOnly) { - lockMeta(pageStore.getPageStoreSession()); - pageStore.compact(compactMode); - } - } catch (DbException e) { - if (SysProperties.CHECK2) { - int code = e.getErrorCode(); - if (code != ErrorCode.DATABASE_IS_CLOSED && - code != ErrorCode.LOCK_TIMEOUT_1 && - code != ErrorCode.IO_EXCEPTION_2) { - e.printStackTrace(); - } - } - trace.error(e, "close"); - } catch (Throwable t) { - if (SysProperties.CHECK2) { - t.printStackTrace(); - } - trace.error(t, "close"); + private synchronized void closeOpenFilesAndUnlock() { + try { + if (!store.getMvStore().isClosed()) { + if (compactMode == CommandInterface.SHUTDOWN_IMMEDIATELY) { + store.closeImmediately(); + } else { + int allowedCompactionTime = + compactMode == CommandInterface.SHUTDOWN_COMPACT || + compactMode == CommandInterface.SHUTDOWN_DEFRAG || + dbSettings.defragAlways ? -1 : dbSettings.maxCompactTime; + store.close(allowedCompactionTime); } } - } - reconnectModified(false); - if (mvStore != null) { - long maxCompactTime = dbSettings.maxCompactTime; - if (compactMode == CommandInterface.SHUTDOWN_COMPACT) { - mvStore.compactFile(dbSettings.maxCompactTime); - } else if (compactMode == CommandInterface.SHUTDOWN_DEFRAG) { - maxCompactTime = Long.MAX_VALUE; - } else if (getSettings().defragAlways) { - maxCompactTime = Long.MAX_VALUE; - } - mvStore.close(maxCompactTime); - } - closeFiles(); - if (persistent && lock == null && - fileLockMethod != FileLock.LOCK_NO && - fileLockMethod != FileLock.LOCK_FS) { - // everything already closed (maybe in checkPowerOff) - // don't delete temp files in this case because - // the database could be open now (even from within another process) - return; - } - if (persistent) { - deleteOldTempFiles(); - } - if (systemSession != null) { - systemSession.close(); - systemSession = null; - } - if (lobSession != null) { - lobSession.close(); - lobSession = null; - } - if (lock != null) { - if (fileLockMethod == FileLock.LOCK_SERIALIZED) { - // wait before deleting the .lock file, - // otherwise other connections can not detect that - if (lock.load().containsKey("changePending")) { - try { - Thread.sleep((int) (reconnectCheckDelay * 1.1)); - } catch (InterruptedException e) { - trace.error(e, "close"); - } + if (persistent) { + // Don't delete temp files if everything is already closed + // (maybe in checkPowerOff), the database could be open now + // (even from within another process). + if (lock != null || fileLockMethod == FileLockMethod.NO || fileLockMethod == FileLockMethod.FS) { + deleteOldTempFiles(); } } - lock.unlock(); - lock = null; + } finally { + if (lock != null) { + lock.unlock(); + lock = null; + } } } private synchronized void closeFiles() { try { - if (mvStore != null) { - mvStore.closeImmediately(); - } - if (pageStore != null) { - pageStore.close(); - pageStore = null; - } + store.closeImmediately(); } catch (DbException e) { trace.error(e, "close"); } } - private void checkMetaFree(Session session, int id) { - SearchRow r = meta.getTemplateSimpleRow(false); - r.setValue(0, ValueInt.get(id)); + private void checkMetaFree(SessionLocal session, int id) { + SearchRow r = meta.getRowFactory().createRow(); + r.setValue(0, ValueInteger.get(id)); Cursor cursor = metaIdIndex.find(session, r, r); if (cursor.next()) { - DbException.throwInternalError(); + throw DbException.getInternalError(); } } @@ -1426,18 +1319,35 @@ private void checkMetaFree(Session session, int id) { * * @return the id */ - public synchronized int allocateObjectId() { - int i = objectIds.nextClearBit(0); - objectIds.set(i); + public int allocateObjectId() { + int i; + synchronized (objectIds) { + i = objectIds.nextClearBit(0); + objectIds.set(i); + } return i; } - public ArrayList getAllAggregates() { - return New.arrayList(aggregates.values()); + /** + * Returns system user. + * + * @return system user + */ + public User getSystemUser() { + return systemUser; + } + + /** + * Returns main schema (usually PUBLIC). + * + * @return main schema (usually PUBLIC) + */ + public Schema getMainSchema() { + return mainSchema; } public ArrayList getAllComments() { - return New.arrayList(comments.values()); + return new ArrayList<>(comments.values()); } public int getAllowLiterals() { @@ -1448,78 +1358,49 @@ public int getAllowLiterals() { } public ArrayList getAllRights() { - return New.arrayList(rights.values()); - } - - public ArrayList getAllRoles() { - return New.arrayList(roles.values()); + return new ArrayList<>(rights.values()); } /** - * Get all schema objects. + * Get all tables and views. Meta data tables may be excluded. * - * @return all objects of all types - */ - public ArrayList getAllSchemaObjects() { - initMetaTables(); - ArrayList list = New.arrayList(); - for (Schema schema : schemas.values()) { - list.addAll(schema.getAll()); - } - return list; - } - - /** - * Get all schema objects of the given type. - * - * @param type the object type * @return all objects of that type */ - public ArrayList getAllSchemaObjects(int type) { - if (type == DbObject.TABLE_OR_VIEW) { - initMetaTables(); - } - ArrayList list = New.arrayList(); + public ArrayList

    getAllTablesAndViews() { + ArrayList
    list = new ArrayList<>(); for (Schema schema : schemas.values()) { - list.addAll(schema.getAll(type)); + list.addAll(schema.getAllTablesAndViews(null)); } return list; } /** - * Get all tables and views. + * Get all synonyms. * - * @param includeMeta whether to force including the meta data tables (if - * true, metadata tables are always included; if false, metadata - * tables are only included if they are already initialized) * @return all objects of that type */ - public ArrayList
    getAllTablesAndViews(boolean includeMeta) { - if (includeMeta) { - initMetaTables(); - } - ArrayList
    list = New.arrayList(); + public ArrayList getAllSynonyms() { + ArrayList list = new ArrayList<>(); for (Schema schema : schemas.values()) { - list.addAll(schema.getAllTablesAndViews()); + list.addAll(schema.getAllSynonyms()); } return list; } - public ArrayList getAllSchemas() { - initMetaTables(); - return New.arrayList(schemas.values()); + public Collection getAllSchemas() { + return schemas.values(); } - public ArrayList getAllSettings() { - return New.arrayList(settings.values()); + public Collection getAllSchemasNoMeta() { + return schemas.values(); } - public ArrayList getAllUserDataTypes() { - return New.arrayList(userDataTypes.values()); + public Collection getAllSettings() { + return settings.values(); } - public ArrayList getAllUsers() { - return New.arrayList(users.values()); + public Collection getAllUsersAndRoles() { + return usersAndRoles.values(); } public String getCacheType() { @@ -1530,6 +1411,7 @@ public String getCluster() { return cluster; } + @Override public CompareMode getCompareMode() { return compareMode; } @@ -1557,25 +1439,25 @@ public String getName() { * included * @return the list of sessions */ - public Session[] getSessions(boolean includingSystemSession) { - ArrayList list; - // need to synchronized on userSession, otherwise the list - // may contain null elements - synchronized (userSessions) { - list = New.arrayList(userSessions); - } - // copy, to ensure the reference is stable - Session sys = systemSession; - Session lob = lobSession; - if (includingSystemSession && sys != null) { - list.add(sys); + public SessionLocal[] getSessions(boolean includingSystemSession) { + ArrayList list; + // need to synchronized on this database, + // otherwise the list may contain null elements + synchronized (this) { + list = new ArrayList<>(userSessions); } - if (includingSystemSession && lob != null) { - list.add(lob); + if (includingSystemSession) { + // copy, to ensure the reference is stable + SessionLocal s = systemSession; + if (s != null) { + list.add(s); + } + s = lobSession; + if (s != null) { + list.add(s); + } } - Session[] array = new Session[list.size()]; - list.toArray(array); - return array; + return list.toArray(new SessionLocal[0]); } /** @@ -1584,11 +1466,22 @@ public Session[] getSessions(boolean includingSystemSession) { * @param session the session * @param obj the database object */ - public synchronized void updateMeta(Session session, DbObject obj) { - lockMeta(session); + public void updateMeta(SessionLocal session, DbObject obj) { int id = obj.getId(); - removeMeta(session, id); - addMeta(session, obj); + if (id > 0) { + if (!starting && !obj.isTemporary()) { + Row newRow = meta.getTemplateRow(); + MetaRecord.populateRowFromDBObject(obj, newRow); + Row oldRow = metaIdIndex.getRow(session, id); + if (oldRow != null) { + meta.updateRow(session, oldRow, newRow); + } + } + // for temporary objects + synchronized (objectIds) { + objectIds.set(id); + } + } } /** @@ -1598,18 +1491,18 @@ public synchronized void updateMeta(Session session, DbObject obj) { * @param obj the object * @param newName the new name */ - public synchronized void renameSchemaObject(Session session, + public synchronized void renameSchemaObject(SessionLocal session, SchemaObject obj, String newName) { checkWritingAllowed(); obj.getSchema().rename(obj, newName); updateMetaAndFirstLevelChildren(session, obj); } - private synchronized void updateMetaAndFirstLevelChildren(Session session, DbObject obj) { + private synchronized void updateMetaAndFirstLevelChildren(SessionLocal session, DbObject obj) { ArrayList list = obj.getChildren(); Comment comment = findComment(obj); if (comment != null) { - DbException.throwInternalError(); + throw DbException.getInternalError(comment.toString()); } updateMeta(session, obj); // remember that this scans only one level deep! @@ -1629,48 +1522,26 @@ private synchronized void updateMetaAndFirstLevelChildren(Session session, DbObj * @param obj the object * @param newName the new name */ - public synchronized void renameDatabaseObject(Session session, + public synchronized void renameDatabaseObject(SessionLocal session, DbObject obj, String newName) { checkWritingAllowed(); int type = obj.getType(); - HashMap map = getMap(type); + Map map = getMap(type); if (SysProperties.CHECK) { if (!map.containsKey(obj.getName())) { - DbException.throwInternalError("not found: " + obj.getName()); + throw DbException.getInternalError("not found: " + obj.getName()); } if (obj.getName().equals(newName) || map.containsKey(newName)) { - DbException.throwInternalError("object already exists: " + newName); + throw DbException.getInternalError("object already exists: " + newName); } } obj.checkRename(); - int id = obj.getId(); - lockMeta(session); - removeMeta(session, id); map.remove(obj.getName()); obj.rename(newName); map.put(newName, obj); updateMetaAndFirstLevelChildren(session, obj); } - /** - * Create a temporary file in the database folder. - * - * @return the file name - */ - public String createTempFile() { - try { - boolean inTempDir = readOnly; - String name = databaseName; - if (!persistent) { - name = "memFS:" + name; - } - return FileUtils.createTempFile(name, - Constants.SUFFIX_TEMP_FILE, true, inTempDir); - } catch (IOException e) { - throw DbException.convertIOException(e, databaseName); - } - } - private void deleteOldTempFiles() { String path = FileUtils.getParent(databaseName); for (String name : FileUtils.newDirectoryStream(path)) { @@ -1703,13 +1574,13 @@ public Schema getSchema(String schemaName) { * @param session the session * @param obj the object to remove */ - public synchronized void removeDatabaseObject(Session session, DbObject obj) { + public synchronized void removeDatabaseObject(SessionLocal session, DbObject obj) { checkWritingAllowed(); String objName = obj.getName(); int type = obj.getType(); - HashMap map = getMap(type); + Map map = getMap(type); if (SysProperties.CHECK && !map.containsKey(objName)) { - DbException.throwInternalError("not found: " + objName); + throw DbException.getInternalError("not found: " + objName); } Comment comment = findComment(obj); lockMeta(session); @@ -1740,17 +1611,17 @@ public Table getDependentTable(SchemaObject obj, Table except) { return null; default: } - HashSet set = New.hashSet(); - for (Table t : getAllTablesAndViews(false)) { - if (except == t) { - continue; - } else if (Table.VIEW.equals(t.getTableType())) { - continue; - } - set.clear(); - t.addDependencies(set); - if (set.contains(obj)) { - return t; + HashSet set = new HashSet<>(); + for (Schema schema : schemas.values()) { + for (Table t : schema.getAllTablesAndViews(null)) { + if (except == t || TableType.VIEW == t.getTableType()) { + continue; + } + set.clear(); + t.addDependencies(set); + if (set.contains(obj)) { + return t; + } } } return null; @@ -1762,7 +1633,7 @@ public Table getDependentTable(SchemaObject obj, Table except) { * @param session the session * @param obj the object to be removed */ - public synchronized void removeSchemaObject(Session session, + public void removeSchemaObject(SessionLocal session, SchemaObject obj) { int type = obj.getType(); if (type == DbObject.TABLE_OR_VIEW) { @@ -1780,36 +1651,39 @@ public synchronized void removeSchemaObject(Session session, } } else if (type == DbObject.CONSTRAINT) { Constraint constraint = (Constraint) obj; - Table table = constraint.getTable(); - if (table.isTemporary() && !table.isGlobalTemporary()) { - session.removeLocalTempTableConstraint(constraint); - return; + if (constraint.getConstraintType() != Type.DOMAIN) { + Table table = constraint.getTable(); + if (table.isTemporary() && !table.isGlobalTemporary()) { + session.removeLocalTempTableConstraint(constraint); + return; + } } } checkWritingAllowed(); lockMeta(session); - Comment comment = findComment(obj); - if (comment != null) { - removeDatabaseObject(session, comment); - } - obj.getSchema().remove(obj); - int id = obj.getId(); - if (!starting) { - Table t = getDependentTable(obj, null); - if (t != null) { - obj.getSchema().add(obj); - throw DbException.get(ErrorCode.CANNOT_DROP_2, obj.getSQL(), - t.getSQL()); + synchronized (this) { + Comment comment = findComment(obj); + if (comment != null) { + removeDatabaseObject(session, comment); + } + obj.getSchema().remove(obj); + int id = obj.getId(); + if (!starting) { + Table t = getDependentTable(obj, null); + if (t != null) { + obj.getSchema().add(obj); + throw DbException.get(ErrorCode.CANNOT_DROP_2, obj.getTraceSQL(), t.getTraceSQL()); + } + obj.removeChildrenAndResources(session); } - obj.removeChildrenAndResources(session); + removeMeta(session, id); } - removeMeta(session, id); } /** - * Check if this database disk-based. + * Check if this database is disk-based. * - * @return true if it is disk-based, false it it is in-memory only. + * @return true if it is disk-based, false if it is in-memory only. */ public boolean isPersistent() { return persistent; @@ -1824,13 +1698,7 @@ public synchronized void setCacheSize(int kb) { int max = MathUtils.convertLongToInt(Utils.getMemoryMax()) / 2; kb = Math.min(kb, max); } - cacheSize = kb; - if (pageStore != null) { - pageStore.getCache().setMaxMemory(kb); - } - if (mvStore != null) { - mvStore.setCacheSize(Math.max(1, kb)); - } + store.setCacheSize(Math.max(1, kb)); } public synchronized void setMasterUser(User user) { @@ -1850,7 +1718,7 @@ public Role getPublicRole() { * @param session the session * @return a unique name */ - public synchronized String getTempTableName(String baseName, Session session) { + public synchronized String getTempTableName(String baseName, SessionLocal session) { String tempName; do { tempName = baseName + "_COPY_" + session.getId() + @@ -1872,11 +1740,6 @@ public void checkWritingAllowed() { if (readOnly) { throw DbException.get(ErrorCode.DATABASE_IS_READ_ONLY); } - if (fileLockMethod == FileLock.LOCK_SERIALIZED) { - if (!reconnectChangePending) { - throw DbException.get(ErrorCode.DATABASE_IS_READ_ONLY); - } - } } public boolean isReadOnly() { @@ -1884,36 +1747,23 @@ public boolean isReadOnly() { } public void setWriteDelay(int value) { - writeDelay = value; - if (writer != null) { - writer.setWriteDelay(value); - // TODO check if MIN_WRITE_DELAY is a good value - flushOnEachCommit = writeDelay < Constants.MIN_WRITE_DELAY; - } - if (mvStore != null) { - int millis = value < 0 ? 0 : value; - mvStore.getStore().setAutoCommitDelay(millis); - } + store.getMvStore().setAutoCommitDelay(value < 0 ? 0 : value); } public int getRetentionTime() { - return retentionTime; + return store.getMvStore().getRetentionTime(); } public void setRetentionTime(int value) { - retentionTime = value; - if (mvStore != null) { - mvStore.getStore().setRetentionTime(value); - } + store.getMvStore().setRetentionTime(value); } - /** - * Check if flush-on-each-commit is enabled. - * - * @return true if it is - */ - public boolean getFlushOnEachCommit() { - return flushOnEachCommit; + public void setAllowBuiltinAliasOverride(boolean b) { + allowBuiltinAliasOverride = b; + } + + public boolean isAllowBuiltinAliasOverride() { + return allowBuiltinAliasOverride; } /** @@ -1922,10 +1772,7 @@ public boolean getFlushOnEachCommit() { * @return the list */ public ArrayList getInDoubtTransactions() { - if (mvStore != null) { - return mvStore.getInDoubtTransactions(); - } - return pageStore == null ? null : pageStore.getInDoubtTransactions(); + return store.getInDoubtTransactions(); } /** @@ -1934,52 +1781,28 @@ public ArrayList getInDoubtTransactions() { * @param session the session * @param transaction the name of the transaction */ - synchronized void prepareCommit(Session session, String transaction) { - if (readOnly) { - return; - } - if (mvStore != null) { - mvStore.prepareCommit(session, transaction); - return; - } - if (pageStore != null) { - pageStore.flushLog(); - pageStore.prepareCommit(session, transaction); + synchronized void prepareCommit(SessionLocal session, String transaction) { + if (!readOnly) { + store.prepareCommit(session, transaction); } } /** - * Commit the current transaction of the given session. - * - * @param session the session + * If there is a background store thread, and if there wasn an exception in + * that thread, throw it now. */ - synchronized void commit(Session session) { - throwLastBackgroundException(); - if (readOnly) { - return; - } - if (pageStore != null) { - pageStore.commit(session); - } - session.setAllCommitted(); - } - - private void throwLastBackgroundException() { - if (backgroundException != null) { - // we don't care too much about concurrency here, - // we just want to make sure the exception is _normally_ - // not just logged to the .trace.db file - DbException b = backgroundException; - backgroundException = null; + void throwLastBackgroundException() { + if (!store.getMvStore().isBackgroundThread()) { + DbException b = backgroundException.getAndSet(null); if (b != null) { - throw b; + // wrap the exception, so we see it was thrown here + throw DbException.get(b.getErrorCode(), b, b.getMessage()); } } } public void setBackgroundException(DbException e) { - if (backgroundException == null) { - backgroundException = e; + if (backgroundException.compareAndSet(null, e)) { TraceSystem t = getTraceSystem(); if (t != null) { t.getTrace(Trace.DATABASE).error(e, "flush"); @@ -1987,21 +1810,24 @@ public void setBackgroundException(DbException e) { } } + public Throwable getBackgroundException() { + MVStoreException exception = store.getMvStore().getPanicException(); + if(exception != null) { + return exception; + } + return backgroundException.getAndSet(null); + } + + /** * Flush all pending changes to the transaction log. */ public synchronized void flush() { - if (readOnly) { - return; - } - if (pageStore != null) { - pageStore.flushLog(); - } - if (mvStore != null) { + if (!readOnly) { try { - mvStore.flush(); + store.flush(); } catch (RuntimeException e) { - backgroundException = DbException.convert(e); + backgroundException.compareAndSet(null, DbException.convert(e)); throw e; } } @@ -2012,12 +1838,12 @@ public void setEventListener(DatabaseEventListener eventListener) { } public void setEventListenerClass(String className) { - if (className == null || className.length() == 0) { + if (className == null || className.isEmpty()) { eventListener = null; } else { try { eventListener = (DatabaseEventListener) - JdbcUtils.loadUserClass(className).newInstance(); + JdbcUtils.loadUserClass(className).getDeclaredConstructor().newInstance(); String url = databaseURL; if (cipher != null) { url += ";CIPHER=" + cipher; @@ -2038,9 +1864,9 @@ public void setEventListenerClass(String className) { * @param state the {@link DatabaseEventListener} state * @param name the object name * @param x the current position - * @param max the highest value + * @param max the highest value or 0 if unknown */ - public void setProgress(int state, String name, int x, int max) { + public void setProgress(int state, String name, long x, long max) { if (eventListener != null) { try { eventListener.setProgress(state, name, x, max); @@ -2075,12 +1901,7 @@ public synchronized void sync() { if (readOnly) { return; } - if (mvStore != null) { - mvStore.sync(); - } - if (pageStore != null) { - pageStore.sync(); - } + store.sync(); } public int getMaxMemoryRows() { @@ -2091,29 +1912,14 @@ public void setMaxMemoryRows(int value) { this.maxMemoryRows = value; } - public void setMaxMemoryUndo(int value) { - this.maxMemoryUndo = value; - } - - public int getMaxMemoryUndo() { - return maxMemoryUndo; - } - public void setLockMode(int lockMode) { switch (lockMode) { case Constants.LOCK_MODE_OFF: - if (multiThreaded) { - // currently the combination of LOCK_MODE=0 and MULTI_THREADED - // is not supported. also see code in - // JdbcDatabaseMetaData#supportsTransactionIsolationLevel(int) - throw DbException.get( - ErrorCode.UNSUPPORTED_SETTING_COMBINATION, - "LOCK_MODE=0 & MULTI_THREADED"); - } - break; case Constants.LOCK_MODE_READ_COMMITTED: + break; case Constants.LOCK_MODE_TABLE: case Constants.LOCK_MODE_TABLE_GC: + lockMode = Constants.LOCK_MODE_READ_COMMITTED; break; default: throw DbException.getInvalidValueException("lock mode", lockMode); @@ -2125,11 +1931,11 @@ public int getLockMode() { return lockMode; } - public synchronized void setCloseDelay(int value) { + public void setCloseDelay(int value) { this.closeDelay = value; } - public Session getSystemSession() { + public SessionLocal getSystemSession() { return systemSession; } @@ -2163,23 +1969,17 @@ public boolean getIgnoreCase() { return ignoreCase; } - public synchronized void setDeleteFilesOnDisconnect(boolean b) { - this.deleteFilesOnDisconnect = b; + public void setIgnoreCatalogs(boolean b) { + ignoreCatalogs = b; } - @Override - public String getLobCompressionAlgorithm(int type) { - return lobCompressionAlgorithm; + public boolean getIgnoreCatalogs() { + return ignoreCatalogs; } - public void setLobCompressionAlgorithm(String stringValue) { - this.lobCompressionAlgorithm = stringValue; - } - public synchronized void setMaxLogSize(long value) { - if (pageStore != null) { - pageStore.setMaxLogSize(value); - } + public synchronized void setDeleteFilesOnDisconnect(boolean b) { + this.deleteFilesOnDisconnect = b; } public void setAllowLiterals(int value) { @@ -2214,7 +2014,9 @@ public boolean getReferentialIntegrity() { public void setQueryStatistics(boolean b) { queryStatistics = b; synchronized (this) { - queryStatisticsData = null; + if (!b) { + queryStatisticsData = null; + } } } @@ -2222,6 +2024,17 @@ public boolean getQueryStatistics() { return queryStatistics; } + public void setQueryStatisticsMaxEntries(int n) { + queryStatisticsMaxEntries = n; + if (queryStatisticsData != null) { + synchronized (this) { + if (queryStatisticsData != null) { + queryStatisticsData.setMaxQueryEntries(queryStatisticsMaxEntries); + } + } + } + } + public QueryStatisticsData getQueryStatisticsData() { if (!queryStatistics) { return null; @@ -2229,7 +2042,7 @@ public QueryStatisticsData getQueryStatisticsData() { if (queryStatisticsData == null) { synchronized (this) { if (queryStatisticsData == null) { - queryStatisticsData = new QueryStatisticsData(); + queryStatisticsData = new QueryStatisticsData(queryStatisticsMaxEntries); } } } @@ -2246,15 +2059,6 @@ public boolean isStarting() { return starting; } - /** - * Check if multi version concurrency is enabled for this database. - * - * @return true if it is enabled - */ - public boolean isMultiVersion() { - return multiVersion; - } - /** * Called after the database has been opened and initialized. This method * notifies the event listener if one has been set. @@ -2263,41 +2067,24 @@ void opened() { if (eventListener != null) { eventListener.opened(); } - if (writer != null) { - writer.startThread(); - } } public void setMode(Mode mode) { this.mode = mode; + getNextRemoteSettingsId(); } + @Override public Mode getMode() { return mode; } - public boolean isMultiThreaded() { - return multiThreaded; + public void setDefaultNullOrdering(DefaultNullOrdering defaultNullOrdering) { + this.defaultNullOrdering = defaultNullOrdering; } - public void setMultiThreaded(boolean multiThreaded) { - if (multiThreaded && this.multiThreaded != multiThreaded) { - if (multiVersion && mvStore == null) { - // currently the combination of MVCC and MULTI_THREADED is not - // supported - throw DbException.get( - ErrorCode.UNSUPPORTED_SETTING_COMBINATION, - "MVCC & MULTI_THREADED"); - } - if (lockMode == 0) { - // currently the combination of LOCK_MODE=0 and MULTI_THREADED - // is not supported - throw DbException.get( - ErrorCode.UNSUPPORTED_SETTING_COMBINATION, - "LOCK_MODE=0 & MULTI_THREADED"); - } - } - this.multiThreaded = multiThreaded; + public DefaultNullOrdering getDefaultNullOrdering() { + return defaultNullOrdering; } public void setMaxOperationMemory(int maxOperationMemory) { @@ -2308,8 +2095,8 @@ public int getMaxOperationMemory() { return maxOperationMemory; } - public Session getExclusiveSession() { - return exclusiveSession; + public SessionLocal getExclusiveSession() { + return exclusiveSession.get(); } /** @@ -2317,12 +2104,30 @@ public Session getExclusiveSession() { * * @param session the session * @param closeOthers whether other sessions are closed + * @return true if success or if database is in exclusive mode + * set by this session already, false otherwise */ - public void setExclusiveSession(Session session, boolean closeOthers) { - this.exclusiveSession = session; + public boolean setExclusiveSession(SessionLocal session, boolean closeOthers) { + if (exclusiveSession.get() != session && + !exclusiveSession.compareAndSet(null, session)) { + return false; + } if (closeOthers) { - closeAllSessionsException(session); + closeAllSessionsExcept(session); } + return true; + } + + /** + * Stop exclusive access the database by provided session. + * + * @param session the session + * @return true if success or if database is in non-exclusive mode already, + * false otherwise + */ + public boolean unsetExclusiveSession(SessionLocal session) { + return exclusiveSession.get() == null + || exclusiveSession.compareAndSet(session, null); } @Override @@ -2342,6 +2147,17 @@ public boolean isSysTableLocked() { return meta == null || meta.isLockedExclusively(); } + /** + * Checks if the system table (containing the catalog) is locked by the + * given session. + * + * @param session the session + * @return true if it is currently locked + */ + public boolean isSysTableLockedBy(SessionLocal session) { + return meta == null || meta.isLockedExclusivelyBy(session); + } + /** * Open a new connection or get an existing connection to another database. * @@ -2354,7 +2170,7 @@ public boolean isSysTableLocked() { public TableLinkConnection getLinkConnection(String driver, String url, String user, String password) { if (linkConnections == null) { - linkConnections = New.hashMap(); + linkConnections = new HashMap<>(); } return TableLinkConnection.open(linkConnections, driver, url, user, password, dbSettings.shareLinkedConnections); @@ -2369,6 +2185,7 @@ public String toString() { * Immediately close the database. */ public void shutdownImmediately() { + closing = true; setPowerOffCount(1); try { checkPowerOff(); @@ -2376,6 +2193,7 @@ public void shutdownImmediately() { // ignore } closeFiles(); + powerOffCount = 0; } @Override @@ -2383,38 +2201,21 @@ public TempFileDeleter getTempFileDeleter() { return tempFileDeleter; } - public PageStore getPageStore() { - if (dbSettings.mvStore) { - if (mvStore == null) { - mvStore = MVTableEngine.init(this); - } - return null; - } - if (pageStore == null) { - pageStore = new PageStore(this, databaseName + - Constants.SUFFIX_PAGE_FILE, accessModeData, cacheSize); - if (pageSize != Constants.DEFAULT_PAGE_SIZE) { - pageStore.setPageSize(pageSize); - } - if (!readOnly && fileLockMethod == FileLock.LOCK_FS) { - pageStore.setLockFile(true); - } - pageStore.setLogMode(logMode); - pageStore.open(); - } - return pageStore; - } - /** - * Get the first user defined table. + * Get the first user defined table, excluding the LOB_BLOCKS table that the + * Recover tool creates. * * @return the table or null if no table is defined */ public Table getFirstUserTable() { - for (Table table : getAllTablesAndViews(false)) { - if (table.getCreateSQL() != null) { - if (table.isHidden()) { - // LOB tables + for (Schema schema : schemas.values()) { + for (Table table : schema.getAllTablesAndViews(null)) { + if (table.getCreateSQL() == null || table.isHidden()) { + continue; + } + // exclude the LOB_MAP that the Recover tool creates + if (schema.getId() == Constants.INFORMATION_SCHEMA_ID + && table.getName().equalsIgnoreCase("LOB_BLOCKS")) { continue; } return table; @@ -2423,172 +2224,16 @@ public Table getFirstUserTable() { return null; } - /** - * Check if the contents of the database was changed and therefore it is - * required to re-connect. This method waits until pending changes are - * completed. If a pending change takes too long (more than 2 seconds), the - * pending change is broken (removed from the properties file). - * - * @return true if reconnecting is required - */ - public boolean isReconnectNeeded() { - if (fileLockMethod != FileLock.LOCK_SERIALIZED) { - return false; - } - if (reconnectChangePending) { - return false; - } - long now = System.currentTimeMillis(); - if (now < reconnectCheckNext) { - return false; - } - reconnectCheckNext = now + reconnectCheckDelay; - if (lock == null) { - lock = new FileLock(traceSystem, databaseName + - Constants.SUFFIX_LOCK_FILE, Constants.LOCK_SLEEP); - } - try { - Properties prop = lock.load(), first = prop; - while (true) { - if (prop.equals(reconnectLastLock)) { - return false; - } - if (prop.getProperty("changePending", null) == null) { - break; - } - if (System.currentTimeMillis() > - now + reconnectCheckDelay * 10) { - if (first.equals(prop)) { - // the writing process didn't update the file - - // it may have terminated - lock.setProperty("changePending", null); - lock.save(); - break; - } - } - trace.debug("delay (change pending)"); - Thread.sleep(reconnectCheckDelay); - prop = lock.load(); - } - reconnectLastLock = prop; - } catch (Exception e) { - // DbException, InterruptedException - trace.error(e, "readOnly {0}", readOnly); - // ignore - } - return true; - } - - /** - * Flush all changes when using the serialized mode, and if there are - * pending changes, and some time has passed. This switches to a new - * transaction log and resets the change pending flag in - * the .lock.db file. - */ - public void checkpointIfRequired() { - if (fileLockMethod != FileLock.LOCK_SERIALIZED || - readOnly || !reconnectChangePending || closing) { - return; - } - long now = System.currentTimeMillis(); - if (now > reconnectCheckNext + reconnectCheckDelay) { - if (SysProperties.CHECK && checkpointAllowed < 0) { - DbException.throwInternalError(); - } - synchronized (reconnectSync) { - if (checkpointAllowed > 0) { - return; - } - checkpointRunning = true; - } - synchronized (this) { - trace.debug("checkpoint start"); - flushSequences(); - checkpoint(); - reconnectModified(false); - trace.debug("checkpoint end"); - } - synchronized (reconnectSync) { - checkpointRunning = false; - } - } - } - - public boolean isFileLockSerialized() { - return fileLockMethod == FileLock.LOCK_SERIALIZED; - } - - private void flushSequences() { - for (SchemaObject obj : getAllSchemaObjects(DbObject.SEQUENCE)) { - Sequence sequence = (Sequence) obj; - sequence.flushWithoutMargin(); - } - } - /** * Flush all changes and open a new transaction log. */ public void checkpoint() { if (persistent) { - synchronized (this) { - if (pageStore != null) { - pageStore.checkpoint(); - } - } - if (mvStore != null) { - mvStore.flush(); - } + store.flush(); } getTempFileDeleter().deleteUnused(); } - /** - * This method is called before writing to the transaction log. - * - * @return true if the call was successful and writing is allowed, - * false if another connection was faster - */ - public boolean beforeWriting() { - if (fileLockMethod != FileLock.LOCK_SERIALIZED) { - return true; - } - while (checkpointRunning) { - try { - Thread.sleep(10 + (int) (Math.random() * 10)); - } catch (Exception e) { - // ignore InterruptedException - } - } - synchronized (reconnectSync) { - if (reconnectModified(true)) { - checkpointAllowed++; - if (SysProperties.CHECK && checkpointAllowed > 20) { - throw DbException.throwInternalError(); - } - return true; - } - } - // make sure the next call to isReconnectNeeded() returns true - reconnectCheckNext = System.currentTimeMillis() - 1; - reconnectLastLock = null; - return false; - } - - /** - * This method is called after updates are finished. - */ - public void afterWriting() { - if (fileLockMethod != FileLock.LOCK_SERIALIZED) { - return; - } - synchronized (reconnectSync) { - checkpointAllowed--; - } - if (SysProperties.CHECK && checkpointAllowed < 0) { - throw DbException.throwInternalError(); - } - } - /** * Switch the database to read-only mode. * @@ -2611,65 +2256,13 @@ public SourceCompiler getCompiler() { @Override public LobStorageInterface getLobStorage() { - if (lobStorage == null) { - if (dbSettings.mvStore) { - lobStorage = new LobStorageMap(this); - } else { - lobStorage = new LobStorageBackend(this); - } - } return lobStorage; } - public JdbcConnection getLobConnectionForInit() { - String url = Constants.CONN_URL_INTERNAL; - JdbcConnection conn = new JdbcConnection( - systemSession, systemUser.getName(), url); - conn.setTraceLevel(TraceSystem.OFF); - return conn; - } - - public JdbcConnection getLobConnectionForRegularUse() { - String url = Constants.CONN_URL_INTERNAL; - JdbcConnection conn = new JdbcConnection( - lobSession, systemUser.getName(), url); - conn.setTraceLevel(TraceSystem.OFF); - return conn; - } - - public Session getLobSession() { + public SessionLocal getLobSession() { return lobSession; } - public void setLogMode(int log) { - if (log < 0 || log > 2) { - throw DbException.getInvalidValueException("LOG", log); - } - if (pageStore != null) { - if (log != PageStore.LOG_MODE_SYNC || - pageStore.getLogMode() != PageStore.LOG_MODE_SYNC) { - // write the log mode in the trace file when enabling or - // disabling a dangerous mode - trace.error(null, "log {0}", log); - } - this.logMode = log; - pageStore.setLogMode(log); - } - if (mvStore != null) { - this.logMode = log; - } - } - - public int getLogMode() { - if (pageStore != null) { - return pageStore.getLogMode(); - } - if (mvStore != null) { - return logMode; - } - return PageStore.LOG_MODE_OFF; - } - public int getDefaultTableType() { return defaultTableType; } @@ -2678,10 +2271,6 @@ public void setDefaultTableType(int defaultTableType) { this.defaultTableType = defaultTableType; } - public void setMultiVersion(boolean multiVersion) { - this.multiVersion = multiVersion; - } - public DbSettings getSettings() { return dbSettings; } @@ -2694,9 +2283,32 @@ public DbSettings getSettings() { * @return the hash map */ public HashMap newStringMap() { - return dbSettings.databaseToUpper ? - new HashMap() : - new CaseInsensitiveMap(); + return dbSettings.caseInsensitiveIdentifiers ? new CaseInsensitiveMap<>() : new HashMap<>(); + } + + /** + * Create a new hash map. Depending on the configuration, the key is case + * sensitive or case insensitive. + * + * @param the value type + * @param initialCapacity the initial capacity + * @return the hash map + */ + public HashMap newStringMap(int initialCapacity) { + return dbSettings.caseInsensitiveIdentifiers ? new CaseInsensitiveMap<>(initialCapacity) + : new HashMap<>(initialCapacity); + } + + /** + * Create a new hash map. Depending on the configuration, the key is case + * sensitive or case insensitive. + * + * @param the value type + * @return the hash map + */ + public ConcurrentHashMap newConcurrentStringMap() { + return dbSettings.caseInsensitiveIdentifiers ? new CaseInsensitiveConcurrentMap<>() + : new ConcurrentHashMap<>(); } /** @@ -2708,19 +2320,49 @@ public HashMap newStringMap() { * @return true if they match */ public boolean equalsIdentifiers(String a, String b) { - if (a == b || a.equals(b)) { - return true; + return a.equals(b) || dbSettings.caseInsensitiveIdentifiers && a.equalsIgnoreCase(b); + } + + /** + * Returns identifier in upper or lower case depending on database settings. + * + * @param upperName + * identifier in the upper case + * @return identifier in upper or lower case + */ + public String sysIdentifier(String upperName) { + assert isUpperSysIdentifier(upperName); + return dbSettings.databaseToLower ? StringUtils.toLowerEnglish(upperName) : upperName; + } + + private static boolean isUpperSysIdentifier(String upperName) { + int l = upperName.length(); + if (l == 0) { + return false; } - if (!dbSettings.databaseToUpper && a.equalsIgnoreCase(b)) { - return true; + char c = upperName.charAt(0); + if (c < 'A' || c > 'Z') { + return false; + } + l--; + for (int i = 1; i < l; i++) { + c = upperName.charAt(i); + if ((c < 'A' || c > 'Z') && c != '_') { + return false; + } } - return false; + if (l > 0) { + c = upperName.charAt(l); + if (c < 'A' || c > 'Z') { + return false; + } + } + return true; } @Override - public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, - int off, int length) { - throw DbException.throwInternalError(); + public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, int off, int length) { + throw DbException.getInternalError(); } public byte[] getFileEncryptionKey() { @@ -2752,7 +2394,7 @@ private void initJavaObjectSerializer() { !serializerName.equals("null")) { try { javaObjectSerializer = (JavaObjectSerializer) - JdbcUtils.loadUserClass(serializerName).newInstance(); + JdbcUtils.loadUserClass(serializerName).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw DbException.convert(e); } @@ -2766,7 +2408,72 @@ public void setJavaObjectSerializerName(String serializerName) { synchronized (this) { javaObjectSerializerInitialized = false; javaObjectSerializerName = serializerName; + getNextRemoteSettingsId(); + } + } + + /** + * Get the table engine class, loading it if needed. + * + * @param tableEngine the table engine name + * @return the class + */ + public TableEngine getTableEngine(String tableEngine) { + assert Thread.holdsLock(this); + + TableEngine engine = tableEngines.get(tableEngine); + if (engine == null) { + try { + engine = (TableEngine) JdbcUtils.loadUserClass(tableEngine).getDeclaredConstructor().newInstance(); + } catch (Exception e) { + throw DbException.convert(e); + } + tableEngines.put(tableEngine, engine); + } + return engine; + } + + /** + * get authenticator for database users + * @return authenticator set for database + */ + public Authenticator getAuthenticator() { + return authenticator; + } + + /** + * Set current database authenticator + * + * @param authenticator = authenticator to set, null to revert to the Internal authenticator + */ + public void setAuthenticator(Authenticator authenticator) { + if (authenticator!=null) { + authenticator.init(this); + } + this.authenticator=authenticator; + } + + @Override + public ValueTimestampTimeZone currentTimestamp() { + Session session = SessionLocal.getThreadLocalSession(); + if (session != null) { + return session.currentTimestamp(); } + throw DbException.getUnsupportedException("Unsafe comparison or cast"); + } + + @Override + public TimeZoneProvider currentTimeZone() { + Session session = SessionLocal.getThreadLocalSession(); + if (session != null) { + return session.currentTimeZone(); + } + throw DbException.getUnsupportedException("Unsafe comparison or cast"); + } + + @Override + public boolean zeroBasedEnums() { + return dbSettings.zeroBasedEnums; } } diff --git a/h2/src/main/org/h2/engine/DatabaseCloser.java b/h2/src/main/org/h2/engine/DatabaseCloser.java deleted file mode 100644 index 10158488f7..0000000000 --- a/h2/src/main/org/h2/engine/DatabaseCloser.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.lang.ref.WeakReference; - -import org.h2.message.Trace; - -/** - * This class is responsible to close a database if the application did not - * close a connection. A database closer object only exists if there is no user - * connected to the database. - */ -class DatabaseCloser extends Thread { - - private final boolean shutdownHook; - private final Trace trace; - private volatile WeakReference databaseRef; - private int delayInMillis; - - DatabaseCloser(Database db, int delayInMillis, boolean shutdownHook) { - this.databaseRef = new WeakReference(db); - this.delayInMillis = delayInMillis; - this.shutdownHook = shutdownHook; - trace = db.getTrace(Trace.DATABASE); - } - - /** - * Stop and disable the database closer. This method is called after the - * database has been closed, or after a session has been created. - */ - void reset() { - synchronized (this) { - databaseRef = null; - } - } - - @Override - public void run() { - while (delayInMillis > 0) { - try { - int step = 100; - Thread.sleep(step); - delayInMillis -= step; - } catch (Exception e) { - // ignore InterruptedException - } - if (databaseRef == null) { - return; - } - } - Database database = null; - synchronized (this) { - if (databaseRef != null) { - database = databaseRef.get(); - } - } - if (database != null) { - try { - database.close(shutdownHook); - } catch (RuntimeException e) { - // this can happen when stopping a web application, - // if loading classes is no longer allowed - // it would throw an IllegalStateException - try { - trace.error(e, "could not close the database"); - // if this was successful, we ignore the exception - // otherwise not - } catch (RuntimeException e2) { - throw e; - } - } - } - } - -} diff --git a/h2/src/main/org/h2/engine/DbObject.java b/h2/src/main/org/h2/engine/DbObject.java index d50ac34016..7464f97794 100644 --- a/h2/src/main/org/h2/engine/DbObject.java +++ b/h2/src/main/org/h2/engine/DbObject.java @@ -1,129 +1,224 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.util.ArrayList; + +import org.h2.command.Parser; +import org.h2.message.DbException; +import org.h2.message.Trace; import org.h2.table.Table; +import org.h2.util.HasSQL; +import org.h2.util.ParserUtil; /** * A database object such as a table, an index, or a user. */ -public interface DbObject { +public abstract class DbObject implements HasSQL { /** * The object is of the type table or view. */ - int TABLE_OR_VIEW = 0; + public static final int TABLE_OR_VIEW = 0; /** * This object is an index. */ - int INDEX = 1; + public static final int INDEX = 1; /** * This object is a user. */ - int USER = 2; + public static final int USER = 2; /** * This object is a sequence. */ - int SEQUENCE = 3; + public static final int SEQUENCE = 3; /** * This object is a trigger. */ - int TRIGGER = 4; + public static final int TRIGGER = 4; /** * This object is a constraint (check constraint, unique constraint, or * referential constraint). */ - int CONSTRAINT = 5; + public static final int CONSTRAINT = 5; /** * This object is a setting. */ - int SETTING = 6; + public static final int SETTING = 6; /** * This object is a role. */ - int ROLE = 7; + public static final int ROLE = 7; /** * This object is a right. */ - int RIGHT = 8; + public static final int RIGHT = 8; /** * This object is an alias for a Java function. */ - int FUNCTION_ALIAS = 9; + public static final int FUNCTION_ALIAS = 9; /** * This object is a schema. */ - int SCHEMA = 10; + public static final int SCHEMA = 10; /** * This object is a constant. */ - int CONSTANT = 11; + public static final int CONSTANT = 11; /** - * This object is a user data type (domain). + * This object is a domain. */ - int USER_DATATYPE = 12; + public static final int DOMAIN = 12; /** * This object is a comment. */ - int COMMENT = 13; + public static final int COMMENT = 13; /** * This object is a user-defined aggregate function. */ - int AGGREGATE = 14; + public static final int AGGREGATE = 14; + + /** + * This object is a synonym. + */ + public static final int SYNONYM = 15; + + /** + * The database. + */ + protected Database database; + + /** + * The trace module. + */ + protected Trace trace; + + /** + * The comment (if set). + */ + protected String comment; + + private int id; + + private String objectName; + + private long modificationId; + + private boolean temporary; /** - * Get the SQL name of this object (may be quoted). + * Initialize some attributes of this object. * - * @return the SQL name + * @param db the database + * @param objectId the object id + * @param name the name + * @param traceModuleId the trace module id + */ + protected DbObject(Database db, int objectId, String name, int traceModuleId) { + this.database = db; + this.trace = db.getTrace(traceModuleId); + this.id = objectId; + this.objectName = name; + this.modificationId = db.getModificationMetaId(); + } + + /** + * Tell the object that is was modified. */ - String getSQL(); + public final void setModified() { + this.modificationId = database == null ? -1 : database.getNextModificationMetaId(); + } + + public final long getModificationId() { + return modificationId; + } + + protected final void setObjectName(String name) { + objectName = name; + } + + @Override + public String getSQL(int sqlFlags) { + return Parser.quoteIdentifier(objectName, sqlFlags); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return ParserUtil.quoteIdentifier(builder, objectName, sqlFlags); + } /** * Get the list of dependent children (for tables, this includes indexes and * so on). * - * @return the list of children + * @return the list of children, or {@code null} */ - ArrayList getChildren(); + public ArrayList getChildren() { + return null; + } /** * Get the database. * * @return the database */ - Database getDatabase(); + public final Database getDatabase() { + return database; + } /** * Get the unique object id. * * @return the object id */ - int getId(); + public final int getId() { + return id; + } /** * Get the name. * * @return the name */ - String getName(); + public final String getName() { + return objectName; + } + + /** + * Set the main attributes to null to make sure the object is no longer + * used. + */ + protected void invalidate() { + if (id == -1) { + throw DbException.getInternalError(); + } + setModified(); + id = -1; + database = null; + trace = null; + objectName = null; + } + + public final boolean isValid() { + return id != -1; + } /** * Build a SQL statement to re-create the object, or to create a copy of the @@ -133,74 +228,104 @@ public interface DbObject { * @param quotedName the quoted name * @return the SQL statement */ - String getCreateSQLForCopy(Table table, String quotedName); + public abstract String getCreateSQLForCopy(Table table, String quotedName); + + /** + * Construct the CREATE ... SQL statement for this object for meta table. + * + * @return the SQL statement + */ + public String getCreateSQLForMeta() { + return getCreateSQL(); + } /** - * Construct the original CREATE ... SQL statement for this object. + * Construct the CREATE ... SQL statement for this object. * * @return the SQL statement */ - String getCreateSQL(); + public abstract String getCreateSQL(); /** * Construct a DROP ... SQL statement for this object. * * @return the SQL statement */ - String getDropSQL(); + public String getDropSQL() { + return null; + } /** * Get the object type. * * @return the object type */ - int getType(); + public abstract int getType(); /** * Delete all dependent children objects and resources of this object. * * @param session the session */ - void removeChildrenAndResources(Session session); + public abstract void removeChildrenAndResources(SessionLocal session); /** * Check if renaming is allowed. Does nothing when allowed. */ - void checkRename(); + public void checkRename() { + // Allowed by default + } /** * Rename the object. * * @param newName the new name */ - void rename(String newName); + public void rename(String newName) { + checkRename(); + objectName = newName; + setModified(); + } /** * Check if this object is temporary (for example, a temporary table). * * @return true if is temporary */ - boolean isTemporary(); + public boolean isTemporary() { + return temporary; + } /** * Tell this object that it is temporary or not. * * @param temporary the new value */ - void setTemporary(boolean temporary); + public void setTemporary(boolean temporary) { + this.temporary = temporary; + } /** * Change the comment of this object. * * @param comment the new comment, or null for no comment */ - void setComment(String comment); + public void setComment(String comment) { + this.comment = comment != null && !comment.isEmpty() ? comment : null; + } /** * Get the current comment of this object. * * @return the comment, or null if not set */ - String getComment(); + public String getComment() { + return comment; + } + + @Override + public String toString() { + return objectName + ":" + id + ":" + super.toString(); + } } diff --git a/h2/src/main/org/h2/engine/DbObjectBase.java b/h2/src/main/org/h2/engine/DbObjectBase.java deleted file mode 100644 index e0ad9f657b..0000000000 --- a/h2/src/main/org/h2/engine/DbObjectBase.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.util.ArrayList; -import org.h2.command.Parser; -import org.h2.message.Trace; - -/** - * The base class for all database objects. - */ -public abstract class DbObjectBase implements DbObject { - - /** - * The database. - */ - protected Database database; - - /** - * The trace module. - */ - protected Trace trace; - - /** - * The comment (if set). - */ - protected String comment; - - private int id; - private String objectName; - private long modificationId; - private boolean temporary; - - /** - * Initialize some attributes of this object. - * - * @param db the database - * @param objectId the object id - * @param name the name - * @param traceModule the trace module name - */ - protected void initDbObjectBase(Database db, int objectId, String name, - String traceModule) { - this.database = db; - this.trace = db.getTrace(traceModule); - this.id = objectId; - this.objectName = name; - this.modificationId = db.getModificationMetaId(); - } - - /** - * Build a SQL statement to re-create this object. - * - * @return the SQL statement - */ - @Override - public abstract String getCreateSQL(); - - /** - * Build a SQL statement to drop this object. - * - * @return the SQL statement - */ - @Override - public abstract String getDropSQL(); - - /** - * Remove all dependent objects and free all resources (files, blocks in - * files) of this object. - * - * @param session the session - */ - @Override - public abstract void removeChildrenAndResources(Session session); - - /** - * Check if this object can be renamed. System objects may not be renamed. - */ - @Override - public abstract void checkRename(); - - /** - * Tell the object that is was modified. - */ - public void setModified() { - this.modificationId = database == null ? - -1 : database.getNextModificationMetaId(); - } - - public long getModificationId() { - return modificationId; - } - - protected void setObjectName(String name) { - objectName = name; - } - - @Override - public String getSQL() { - return Parser.quoteIdentifier(objectName); - } - - @Override - public ArrayList getChildren() { - return null; - } - - @Override - public Database getDatabase() { - return database; - } - - @Override - public int getId() { - return id; - } - - @Override - public String getName() { - return objectName; - } - - /** - * Set the main attributes to null to make sure the object is no longer - * used. - */ - protected void invalidate() { - setModified(); - id = -1; - database = null; - trace = null; - objectName = null; - } - - @Override - public void rename(String newName) { - checkRename(); - objectName = newName; - setModified(); - } - - @Override - public boolean isTemporary() { - return temporary; - } - - @Override - public void setTemporary(boolean temporary) { - this.temporary = temporary; - } - - @Override - public void setComment(String comment) { - this.comment = comment; - } - - @Override - public String getComment() { - return comment; - } - - @Override - public String toString() { - return objectName + ":" + id + ":" + super.toString(); - } - -} diff --git a/h2/src/main/org/h2/engine/DbSettings.java b/h2/src/main/org/h2/engine/DbSettings.java index 5eb19cbd42..c4baedefe3 100644 --- a/h2/src/main/org/h2/engine/DbSettings.java +++ b/h2/src/main/org/h2/engine/DbSettings.java @@ -1,16 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.util.HashMap; +import org.h2.api.ErrorCode; +import org.h2.message.DbException; + /** * This class contains various database-level settings. To override the * documented default value for a database, append the setting in the database - * URL: "jdbc:h2:test;ALIAS_COLUMN_NAME=TRUE" when opening the first connection + * URL: "jdbc:h2:./test;ANALYZE_SAMPLE=1000" when opening the first connection * to the database. The settings can not be changed once the database is open. *

    * Some settings are a last resort and temporary solution to work around a @@ -21,23 +24,19 @@ */ public class DbSettings extends SettingsBase { - private static DbSettings defaultSettings; + /** + * The initial size of the hash table. + */ + static final int TABLE_SIZE = 64; /** - * Database setting ALIAS_COLUMN_NAME (default: false).
    - * When enabled, aliased columns (as in SELECT ID AS I FROM TEST) return the - * alias (I in this case) in ResultSetMetaData.getColumnName() and 'null' in - * getTableName(). If disabled, the real column name (ID in this case) and - * table name is returned. - *
    - * This setting only affects the default and the MySQL mode. When using - * any other mode, this feature is enabled for compatibility, even if this - * database setting is not enabled explicitly. + * INTERNAL. + * The default settings. Those must not be modified. */ - public final boolean aliasColumnName = get("ALIAS_COLUMN_NAME", false); + public static final DbSettings DEFAULT = new DbSettings(new HashMap<>(TABLE_SIZE)); /** - * Database setting ANALYZE_AUTO (default: 2000).
    + * Database setting ANALYZE_AUTO (default: 2000). * After changing this many rows, ANALYZE is automatically run for a table. * Automatically running ANALYZE is disabled if set to 0. If set to 1000, * then ANALYZE will run against each user table after about 1000 changes to @@ -48,47 +47,73 @@ public class DbSettings extends SettingsBase { public final int analyzeAuto = get("ANALYZE_AUTO", 2000); /** - * Database setting ANALYZE_SAMPLE (default: 10000).
    + * Database setting ANALYZE_SAMPLE (default: 10000). * The default sample size when analyzing a table. */ - public final int analyzeSample = get("ANALYZE_SAMPLE", 10000); + public final int analyzeSample = get("ANALYZE_SAMPLE", 10_000); + + /** + * Database setting AUTO_COMPACT_FILL_RATE + * (default: 90, which means 90%, 0 disables auto-compacting). + * Set the auto-compact target fill rate. If the average fill rate (the + * percentage of the storage space that contains active data) of the + * chunks is lower, then the chunks with a low fill rate are re-written. + * Also, if the percentage of empty space between chunks is higher than + * this value, then chunks at the end of the file are moved. Compaction + * stops if the target fill rate is reached. + * This setting only affects MVStore engine. + */ + public final int autoCompactFillRate = get("AUTO_COMPACT_FILL_RATE", 90); + + /** + * Database setting DATABASE_TO_LOWER (default: false). + * When set to true unquoted identifiers and short name of database are + * converted to lower case. Value of this setting should not be changed + * after creation of database. Setting this to "true" is experimental. + */ + public final boolean databaseToLower; + + /** + * Database setting DATABASE_TO_UPPER (default: true). + * When set to true unquoted identifiers and short name of database are + * converted to upper case. + */ + public final boolean databaseToUpper; /** - * Database setting DATABASE_TO_UPPER (default: true).
    - * Database short names are converted to uppercase for the DATABASE() - * function, and in the CATALOG column of all database meta data methods. - * Setting this to "false" is experimental. When set to false, all - * identifier names (table names, column names) are case sensitive (except - * aggregate, built-in functions, data types, and keywords). + * Database setting CASE_INSENSITIVE_IDENTIFIERS (default: + * false). + * When set to true, all identifier names (table names, column names) are + * case insensitive. Setting this to "true" is experimental. */ - public final boolean databaseToUpper = get("DATABASE_TO_UPPER", true); + public final boolean caseInsensitiveIdentifiers = get("CASE_INSENSITIVE_IDENTIFIERS", false); /** - * Database setting DB_CLOSE_ON_EXIT (default: true).
    + * Database setting DB_CLOSE_ON_EXIT (default: true). * Close the database when the virtual machine exits normally, using a * shutdown hook. */ public final boolean dbCloseOnExit = get("DB_CLOSE_ON_EXIT", true); /** - * Database setting DEFAULT_CONNECTION (default: false).
    + * Database setting DEFAULT_CONNECTION (default: false). * Whether Java functions can use * DriverManager.getConnection("jdbc:default:connection") to * get a database connection. This feature is disabled by default for * performance reasons. Please note the Oracle JDBC driver will try to * resolve this database URL if it is loaded before the H2 driver. */ - public boolean defaultConnection = get("DEFAULT_CONNECTION", false); + public final boolean defaultConnection = get("DEFAULT_CONNECTION", false); /** - * Database setting DEFAULT_ESCAPE (default: \).
    + * Database setting DEFAULT_ESCAPE (default: \). * The default escape character for LIKE comparisons. To select no escape * character, use an empty string. */ public final String defaultEscape = get("DEFAULT_ESCAPE", "\\"); /** - * Database setting DEFRAG_ALWAYS (default: false).
    + * Database setting DEFRAG_ALWAYS (default: false) * Each time the database is closed normally, it is fully defragmented (the * same as SHUTDOWN DEFRAG). If you execute SHUTDOWN COMPACT, then this * setting is ignored. @@ -96,82 +121,46 @@ public class DbSettings extends SettingsBase { public final boolean defragAlways = get("DEFRAG_ALWAYS", false); /** - * Database setting DROP_RESTRICT (default: true).
    - * Whether the default action for DROP TABLE and DROP VIEW is RESTRICT. + * Database setting DROP_RESTRICT (default: true) + * Whether the default action for DROP TABLE, DROP VIEW, DROP SCHEMA, DROP + * DOMAIN, and DROP CONSTRAINT is RESTRICT. */ public final boolean dropRestrict = get("DROP_RESTRICT", true); - /** - * Database setting EARLY_FILTER (default: false).
    - * This setting allows table implementations to apply filter conditions - * early on. - */ - public final boolean earlyFilter = get("EARLY_FILTER", false); - /** * Database setting ESTIMATED_FUNCTION_TABLE_ROWS (default: - * 1000).
    + * 1000). * The estimated number of rows in a function table (for example, CSVREAD or * FTL_SEARCH). This value is used by the optimizer. */ public final int estimatedFunctionTableRows = get( "ESTIMATED_FUNCTION_TABLE_ROWS", 1000); - /** - * Database setting FUNCTIONS_IN_SCHEMA - * (default: true).
    - * If set, all functions are stored in a schema. Specially, the SCRIPT - * statement will always include the schema name in the CREATE ALIAS - * statement. This is not backward compatible with H2 versions 1.2.134 and - * older. - */ - public final boolean functionsInSchema = get("FUNCTIONS_IN_SCHEMA", true); - - /** - * Database setting LARGE_TRANSACTIONS (default: true).
    - * Support very large transactions - */ - public final boolean largeTransactions = get("LARGE_TRANSACTIONS", true); - /** * Database setting LOB_TIMEOUT (default: 300000, - * which means 5 minutes).
    + * which means 5 minutes). * The number of milliseconds a temporary LOB reference is kept until it * times out. After the timeout, the LOB is no longer accessible using this * reference. */ - public final int lobTimeout = get("LOB_TIMEOUT", 300000); - - /** - * Database setting MAX_COMPACT_COUNT - * (default: Integer.MAX_VALUE).
    - * The maximum number of pages to move when closing a database. - */ - public final int maxCompactCount = get("MAX_COMPACT_COUNT", - Integer.MAX_VALUE); + public final int lobTimeout = get("LOB_TIMEOUT", 300_000); /** - * Database setting MAX_COMPACT_TIME (default: 200).
    + * Database setting MAX_COMPACT_TIME (default: 200). * The maximum time in milliseconds used to compact a database when closing. */ public final int maxCompactTime = get("MAX_COMPACT_TIME", 200); /** - * Database setting MAX_QUERY_TIMEOUT (default: 0).
    + * Database setting MAX_QUERY_TIMEOUT (default: 0). * The maximum timeout of a query in milliseconds. The default is 0, meaning * no limit. Please note the actual query timeout may be set to a lower * value. */ - public int maxQueryTimeout = get("MAX_QUERY_TIMEOUT", 0); - - /** - * Database setting NESTED_JOINS (default: true).
    - * Whether nested joins should be supported. - */ - public final boolean nestedJoins = get("NESTED_JOINS", true); + public final int maxQueryTimeout = get("MAX_QUERY_TIMEOUT", 0); /** - * Database setting OPTIMIZE_DISTINCT (default: true).
    + * Database setting OPTIMIZE_DISTINCT (default: true). * Improve the performance of simple DISTINCT queries if an index is * available for the given column. The optimization is used if: *

      @@ -186,7 +175,7 @@ public class DbSettings extends SettingsBase { /** * Database setting OPTIMIZE_EVALUATABLE_SUBQUERIES (default: - * true).
      + * true). * Optimize subqueries that are not dependent on the outer query. */ public final boolean optimizeEvaluatableSubqueries = get( @@ -194,7 +183,7 @@ public class DbSettings extends SettingsBase { /** * Database setting OPTIMIZE_INSERT_FROM_SELECT - * (default: true).
      + * (default: true). * Insert into table from query directly bypassing temporary disk storage. * This also applies to create table as select. */ @@ -202,69 +191,40 @@ public class DbSettings extends SettingsBase { "OPTIMIZE_INSERT_FROM_SELECT", true); /** - * Database setting OPTIMIZE_IN_LIST (default: true).
      + * Database setting OPTIMIZE_IN_LIST (default: true). * Optimize IN(...) and IN(SELECT ...) comparisons. This includes * optimization for SELECT, DELETE, and UPDATE. */ public final boolean optimizeInList = get("OPTIMIZE_IN_LIST", true); /** - * Database setting OPTIMIZE_IN_SELECT (default: true).
      + * Database setting OPTIMIZE_IN_SELECT (default: true). * Optimize IN(SELECT ...) comparisons. This includes * optimization for SELECT, DELETE, and UPDATE. */ public final boolean optimizeInSelect = get("OPTIMIZE_IN_SELECT", true); /** - * Database setting OPTIMIZE_IS_NULL (default: false).
      - * Use an index for condition of the form columnName IS NULL. - */ - public final boolean optimizeIsNull = get("OPTIMIZE_IS_NULL", true); - - /** - * Database setting OPTIMIZE_OR (default: true).
      + * Database setting OPTIMIZE_OR (default: true). * Convert (C=? OR C=?) to (C IN(?, ?)). */ public final boolean optimizeOr = get("OPTIMIZE_OR", true); /** - * Database setting OPTIMIZE_TWO_EQUALS (default: true).
      + * Database setting OPTIMIZE_TWO_EQUALS (default: true). * Optimize expressions of the form A=B AND B=1. In this case, AND A=1 is * added so an index on A can be used. */ public final boolean optimizeTwoEquals = get("OPTIMIZE_TWO_EQUALS", true); /** - * Database setting OPTIMIZE_UPDATE (default: true).
      - * Speed up inserts, updates, and deletes by not reading all rows from a - * page unless necessary. - */ - public final boolean optimizeUpdate = get("OPTIMIZE_UPDATE", true); - - /** - * Database setting PAGE_STORE_MAX_GROWTH - * (default: 128 * 1024).
      - * The maximum number of pages the file grows at any time. - */ - public final int pageStoreMaxGrowth = get("PAGE_STORE_MAX_GROWTH", - 128 * 1024); - - /** - * Database setting PAGE_STORE_INTERNAL_COUNT - * (default: false).
      - * Update the row counts on a node level. - */ - public final boolean pageStoreInternalCount = get( - "PAGE_STORE_INTERNAL_COUNT", false); - - /** - * Database setting PAGE_STORE_TRIM (default: true).
      - * Trim the database size when closing. + * Database setting OPTIMIZE_SIMPLE_SINGLE_ROW_SUBQUERIES (default: true). + * Optimize expressions of the form (SELECT A) to A. */ - public final boolean pageStoreTrim = get("PAGE_STORE_TRIM", true); + public final boolean optimizeSimpleSingleRowSubqueries = get("OPTIMIZE_SIMPLE_SINGLE_ROW_SUBQUERIES", true); /** - * Database setting QUERY_CACHE_SIZE (default: 8).
      + * Database setting QUERY_CACHE_SIZE (default: 8). * The size of the query cache, in number of cached statements. Each session * has it's own cache with the given size. The cache is only used if the SQL * statement and all parameters match. Only the last returned result per @@ -277,45 +237,22 @@ public class DbSettings extends SettingsBase { public final int queryCacheSize = get("QUERY_CACHE_SIZE", 8); /** - * Database setting RECOMPILE_ALWAYS (default: false).
      + * Database setting RECOMPILE_ALWAYS (default: false). * Always recompile prepared statements. */ public final boolean recompileAlways = get("RECOMPILE_ALWAYS", false); /** - * Database setting RECONNECT_CHECK_DELAY (default: 200).
      - * Check the .lock.db file every this many milliseconds to detect that the - * database was changed. The process writing to the database must first - * notify a change in the .lock.db file, then wait twice this many - * milliseconds before updating the database. - */ - public final int reconnectCheckDelay = get("RECONNECT_CHECK_DELAY", 200); - - /** - * Database setting REUSE_SPACE (default: true).
      + * Database setting REUSE_SPACE (default: true). * If disabled, all changes are appended to the database file, and existing * content is never overwritten. This setting has no effect if the database * is already open. */ public final boolean reuseSpace = get("REUSE_SPACE", true); - /** - * Database setting ROWID (default: true).
      - * If set, each table has a pseudo-column _ROWID_. - */ - public final boolean rowId = get("ROWID", true); - - /** - * Database setting SELECT_FOR_UPDATE_MVCC - * (default: true).
      - * If set, SELECT .. FOR UPDATE queries lock only the selected rows when - * using MVCC. - */ - public final boolean selectForUpdateMvcc = get("SELECT_FOR_UPDATE_MVCC", true); - /** * Database setting SHARE_LINKED_CONNECTIONS - * (default: true).
      + * (default: true). * Linked connections should be shared, that means connections to the same * database should be used for all linked tables that connect to the same * database. @@ -325,27 +262,57 @@ public class DbSettings extends SettingsBase { /** * Database setting DEFAULT_TABLE_ENGINE - * (default: null).
      + * (default: null). * The default table engine to use for new tables. */ - public String defaultTableEngine = get("DEFAULT_TABLE_ENGINE", null); + public final String defaultTableEngine = get("DEFAULT_TABLE_ENGINE", null); /** * Database setting MV_STORE - * (default: false for version 1.3, true for version 1.4).
      + * (default: true). * Use the MVStore storage engine. */ - public boolean mvStore = get("MV_STORE", Constants.VERSION_MINOR >= 4); + public final boolean mvStore = get("MV_STORE", true); /** * Database setting COMPRESS - * (default: false).
      + * (default: false). * Compress data when storing. */ public final boolean compressData = get("COMPRESS", false); + /** + * Database setting IGNORE_CATALOGS + * (default: false). + * If set, all catalog names in identifiers are silently accepted + * without comparing them with the short name of the database. + */ + public final boolean ignoreCatalogs = get("IGNORE_CATALOGS", false); + + /** + * Database setting ZERO_BASED_ENUMS + * (default: false). + * If set, ENUM ordinal values are 0-based. + */ + public final boolean zeroBasedEnums = get("ZERO_BASED_ENUMS", false); + private DbSettings(HashMap s) { super(s); + boolean lower = get("DATABASE_TO_LOWER", false); + boolean upperSet = containsKey("DATABASE_TO_UPPER"); + boolean upper = get("DATABASE_TO_UPPER", true); + if (lower && upper) { + if (upperSet) { + throw DbException.get(ErrorCode.UNSUPPORTED_SETTING_COMBINATION, + "DATABASE_TO_LOWER & DATABASE_TO_UPPER"); + } + upper = false; + } + databaseToLower = lower; + databaseToUpper = upper; + HashMap settings = getSettings(); + settings.put("DATABASE_TO_LOWER", Boolean.toString(lower)); + settings.put("DATABASE_TO_UPPER", Boolean.toString(upper)); } /** @@ -355,21 +322,8 @@ private DbSettings(HashMap s) { * @param s the settings * @return the settings */ - public static DbSettings getInstance(HashMap s) { + static DbSettings getInstance(HashMap s) { return new DbSettings(s); } - /** - * INTERNAL. - * Get the default settings. Those must not be modified. - * - * @return the settings - */ - public static DbSettings getDefaultSettings() { - if (defaultSettings == null) { - defaultSettings = new DbSettings(new HashMap()); - } - return defaultSettings; - } - } diff --git a/h2/src/main/org/h2/engine/DelayedDatabaseCloser.java b/h2/src/main/org/h2/engine/DelayedDatabaseCloser.java new file mode 100644 index 0000000000..2e6083f260 --- /dev/null +++ b/h2/src/main/org/h2/engine/DelayedDatabaseCloser.java @@ -0,0 +1,76 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.engine; + +import java.lang.ref.WeakReference; + +import org.h2.message.Trace; + +/** + * This class is responsible to close a database after the specified delay. A + * database closer object only exists if there is no user connected to the + * database. + */ +class DelayedDatabaseCloser extends Thread { + + private final Trace trace; + private volatile WeakReference databaseRef; + private int delayInMillis; + + DelayedDatabaseCloser(Database db, int delayInMillis) { + databaseRef = new WeakReference<>(db); + this.delayInMillis = delayInMillis; + trace = db.getTrace(Trace.DATABASE); + setName("H2 Close Delay " + db.getShortName()); + setDaemon(true); + start(); + } + + /** + * Stop and disable the database closer. This method is called after a session + * has been created. + */ + void reset() { + databaseRef = null; + } + + @Override + public void run() { + while (delayInMillis > 0) { + try { + int step = 100; + Thread.sleep(step); + delayInMillis -= step; + } catch (Exception e) { + // ignore InterruptedException + } + WeakReference ref = databaseRef; + if (ref == null || ref.get() == null) { + return; + } + } + Database database; + WeakReference ref = databaseRef; + if (ref != null && (database = ref.get()) != null) { + try { + database.close(false); + } catch (RuntimeException e) { + // this can happen when stopping a web application, + // if loading classes is no longer allowed + // it would throw an IllegalStateException + try { + trace.error(e, "could not close the database"); + // if this was successful, we ignore the exception + // otherwise not + } catch (Throwable e2) { + e.addSuppressed(e2); + throw e; + } + } + } + } + +} diff --git a/h2/src/main/org/h2/engine/Engine.java b/h2/src/main/org/h2/engine/Engine.java index 04a7284991..2ee7732178 100644 --- a/h2/src/main/org/h2/engine/Engine.java +++ b/h2/src/main/org/h2/engine/Engine.java @@ -1,21 +1,28 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.util.HashMap; - +import java.util.Map; +import java.util.Objects; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.command.Parser; import org.h2.command.dml.SetTypes; import org.h2.message.DbException; -import org.h2.store.FileLock; +import org.h2.message.Trace; +import org.h2.security.auth.AuthenticationException; +import org.h2.security.auth.AuthenticationInfo; +import org.h2.security.auth.Authenticator; +import org.h2.store.fs.FileUtils; +import org.h2.util.DateTimeUtils; import org.h2.util.MathUtils; -import org.h2.util.New; +import org.h2.util.ParserUtil; import org.h2.util.StringUtils; +import org.h2.util.ThreadDeadlockDetector; +import org.h2.util.TimeZoneProvider; import org.h2.util.Utils; /** @@ -23,97 +30,163 @@ * It is also responsible for opening and creating new databases. * This is a singleton class. */ -public class Engine implements SessionFactory { +public final class Engine { - private static final Engine INSTANCE = new Engine(); - private static final HashMap DATABASES = New.hashMap(); + private static final Map DATABASES = new HashMap<>(); - private volatile long wrongPasswordDelay = - SysProperties.DELAY_WRONG_PASSWORD_MIN; - private boolean jmx; + private static volatile long WRONG_PASSWORD_DELAY = SysProperties.DELAY_WRONG_PASSWORD_MIN; - private Engine() { - // use getInstance() - } + private static boolean JMX; - public static Engine getInstance() { - return INSTANCE; + static { + if (SysProperties.THREAD_DEADLOCK_DETECTOR) { + ThreadDeadlockDetector.init(); + } } - private Session openSession(ConnectionInfo ci, boolean ifExists, + private static SessionLocal openSession(ConnectionInfo ci, boolean ifExists, boolean forbidCreation, String cipher) { String name = ci.getName(); Database database; ci.removeProperty("NO_UPGRADE", false); boolean openNew = ci.getProperty("OPEN_NEW", false); - if (openNew || ci.isUnnamedInMemory()) { - database = null; - } else { - database = DATABASES.get(name); - } - User user = null; boolean opened = false; - if (database == null) { - if (ifExists && !Database.exists(name)) { - throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, name); - } - database = new Database(ci, cipher); - opened = true; - if (database.getAllUsers().size() == 0) { - // users is the last thing we add, so if no user is around, - // the database is new (or not initialized correctly) - user = new User(database, database.allocateObjectId(), - ci.getUserName(), false); - user.setAdmin(true); - user.setUserPasswordHash(ci.getUserPasswordHash()); - database.setMasterUser(user); - } - if (!ci.isUnnamedInMemory()) { - DATABASES.put(name, database); + User user = null; + DatabaseHolder databaseHolder; + if (!ci.isUnnamedInMemory()) { + synchronized (DATABASES) { + databaseHolder = DATABASES.computeIfAbsent(name, (key) -> new DatabaseHolder()); } + } else { + databaseHolder = new DatabaseHolder(); } - synchronized (database) { - if (opened) { - // start the thread when already synchronizing on the database - // otherwise a deadlock can occur when the writer thread - // opens a new database (as in recovery testing) - database.opened(); - } - if (database.isClosing()) { - return null; + synchronized (databaseHolder) { + database = databaseHolder.database; + if (database == null || openNew) { + if (ci.isPersistent()) { + String p = ci.getProperty("MV_STORE"); + String fileName; + if (p == null) { + fileName = name + Constants.SUFFIX_MV_FILE; + if (!FileUtils.exists(fileName)) { + throwNotFound(ifExists, forbidCreation, name); + fileName = name + Constants.SUFFIX_OLD_DATABASE_FILE; + if (FileUtils.exists(fileName)) { + throw DbException.getFileVersionError(fileName); + } + fileName = null; + } + } else { + fileName = name + Constants.SUFFIX_MV_FILE; + if (!FileUtils.exists(fileName)) { + throwNotFound(ifExists, forbidCreation, name); + fileName = null; + } + } + if (fileName != null && !FileUtils.canWrite(fileName)) { + ci.setProperty("ACCESS_MODE_DATA", "r"); + } + } else { + throwNotFound(ifExists, forbidCreation, name); + } + database = new Database(ci, cipher); + opened = true; + boolean found = false; + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (rightOwner instanceof User) { + found = true; + break; + } + } + if (!found) { + // users is the last thing we add, so if no user is around, + // the database is new (or not initialized correctly) + user = new User(database, database.allocateObjectId(), ci.getUserName(), false); + user.setAdmin(true); + user.setUserPasswordHash(ci.getUserPasswordHash()); + database.setMasterUser(user); + } + databaseHolder.database = database; } - if (user == null) { - if (database.validateFilePasswordHash(cipher, ci.getFilePasswordHash())) { + } + + if (opened) { + // start the thread when already synchronizing on the database + // otherwise a deadlock can occur when the writer thread + // opens a new database (as in recovery testing) + database.opened(); + } + if (database.isClosing()) { + return null; + } + if (user == null) { + if (database.validateFilePasswordHash(cipher, ci.getFilePasswordHash())) { + if (ci.getProperty("AUTHREALM")== null) { user = database.findUser(ci.getUserName()); if (user != null) { if (!user.validateUserPasswordHash(ci.getUserPasswordHash())) { user = null; } } - } - if (opened && (user == null || !user.isAdmin())) { - // reset - because the user is not an admin, and has no - // right to listen to exceptions - database.setEventListener(null); + } else { + Authenticator authenticator = database.getAuthenticator(); + if (authenticator==null) { + throw DbException.get(ErrorCode.AUTHENTICATOR_NOT_AVAILABLE, name); + } else { + try { + AuthenticationInfo authenticationInfo=new AuthenticationInfo(ci); + user = database.getAuthenticator().authenticate(authenticationInfo, database); + } catch (AuthenticationException authenticationError) { + database.getTrace(Trace.DATABASE).error(authenticationError, + "an error occurred during authentication; user: \"" + + ci.getUserName() + "\""); + } + } } } - if (user == null) { - database.removeSession(null); - throw DbException.get(ErrorCode.WRONG_USER_OR_PASSWORD); + if (opened && (user == null || !user.isAdmin())) { + // reset - because the user is not an admin, and has no + // right to listen to exceptions + database.setEventListener(null); } - checkClustering(ci, database); - Session session = database.createSession(user); - if (ci.getProperty("JMX", false)) { - try { - Utils.callStaticMethod( - "org.h2.jmx.DatabaseInfo.registerMBean", ci, database); - } catch (Exception e) { - database.removeSession(session); - throw DbException.get(ErrorCode.FEATURE_NOT_SUPPORTED_1, e, "JMX"); - } - jmx = true; + } + if (user == null) { + DbException er = DbException.get(ErrorCode.WRONG_USER_OR_PASSWORD); + database.getTrace(Trace.DATABASE).error(er, "wrong user or password; user: \"" + + ci.getUserName() + "\""); + database.removeSession(null); + throw er; + } + //Prevent to set _PASSWORD + ci.cleanAuthenticationInfo(); + checkClustering(ci, database); + SessionLocal session = database.createSession(user, ci.getNetworkConnectionInfo()); + if (session == null) { + // concurrently closing + return null; + } + if (ci.getProperty("OLD_INFORMATION_SCHEMA", false)) { + session.setOldInformationSchema(true); + } + if (ci.getProperty("JMX", false)) { + try { + Utils.callStaticMethod( + "org.h2.jmx.DatabaseInfo.registerMBean", ci, database); + } catch (Exception e) { + database.removeSession(session); + throw DbException.get(ErrorCode.FEATURE_NOT_SUPPORTED_1, e, "JMX"); } - return session; + JMX = true; + } + return session; + } + + private static void throwNotFound(boolean ifExists, boolean forbidCreation, String name) { + if (ifExists) { + throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_WITH_IF_EXISTS_1, name); + } + if (forbidCreation) { + throw DbException.get(ErrorCode.REMOTE_DATABASE_NOT_FOUND_1, name); } } @@ -123,30 +196,10 @@ private Session openSession(ConnectionInfo ci, boolean ifExists, * @param ci the connection information * @return the session */ - @Override - public Session createSession(ConnectionInfo ci) { - return INSTANCE.createSessionAndValidate(ci); - } - - private Session createSessionAndValidate(ConnectionInfo ci) { + public static SessionLocal createSession(ConnectionInfo ci) { try { - ConnectionInfo backup = null; - String lockMethodName = ci.getProperty("FILE_LOCK", null); - int fileLockMethod = FileLock.getFileLockMethod(lockMethodName); - if (fileLockMethod == FileLock.LOCK_SERIALIZED) { - // In serialized mode, database instance sharing is not possible - ci.setProperty("OPEN_NEW", "TRUE"); - try { - backup = ci.clone(); - } catch (CloneNotSupportedException e) { - throw DbException.convert(e); - } - } - Session session = openSession(ci); + SessionLocal session = openSession(ci); validateUserAndPassword(true); - if (backup != null) { - session.setConnectionInfo(backup); - } return session; } catch (DbException e) { if (e.getErrorCode() == ErrorCode.WRONG_USER_OR_PASSWORD) { @@ -156,65 +209,84 @@ private Session createSessionAndValidate(ConnectionInfo ci) { } } - private synchronized Session openSession(ConnectionInfo ci) { + private static SessionLocal openSession(ConnectionInfo ci) { boolean ifExists = ci.removeProperty("IFEXISTS", false); + boolean forbidCreation = ci.removeProperty("FORBID_CREATION", false); boolean ignoreUnknownSetting = ci.removeProperty( "IGNORE_UNKNOWN_SETTINGS", false); String cipher = ci.removeProperty("CIPHER", null); String init = ci.removeProperty("INIT", null); - Session session; - for (int i = 0;; i++) { - session = openSession(ci, ifExists, cipher); + SessionLocal session; + long start = System.nanoTime(); + for (;;) { + session = openSession(ci, ifExists, forbidCreation, cipher); if (session != null) { break; } // we found a database that is currently closing // wait a bit to avoid a busy loop (the method is synchronized) - if (i > 60 * 1000) { - // retry at most 1 minute + if (System.nanoTime() - start > DateTimeUtils.NANOS_PER_MINUTE) { throw DbException.get(ErrorCode.DATABASE_ALREADY_OPEN_1, "Waited for database closing longer than 1 minute"); } try { Thread.sleep(1); } catch (InterruptedException e) { - // ignore + throw DbException.get(ErrorCode.DATABASE_CALLED_AT_SHUTDOWN); } } - session.setAllowLiterals(true); - DbSettings defaultSettings = DbSettings.getDefaultSettings(); - for (String setting : ci.getKeys()) { - if (defaultSettings.containsKey(setting)) { - // database setting are only used when opening the database - continue; - } - String value = ci.getProperty(setting); - try { - CommandInterface command = session.prepareCommand( - "SET " + Parser.quoteIdentifier(setting) + " " + value, - Integer.MAX_VALUE); - command.executeUpdate(); - } catch (DbException e) { - if (!ignoreUnknownSetting) { - session.close(); - throw e; + synchronized (session) { + session.setAllowLiterals(true); + DbSettings defaultSettings = DbSettings.DEFAULT; + for (String setting : ci.getKeys()) { + if (defaultSettings.containsKey(setting)) { + // database setting are only used when opening the database + continue; + } + String value = ci.getProperty(setting); + StringBuilder builder = new StringBuilder("SET ").append(setting).append(' '); + if (!ParserUtil.isSimpleIdentifier(setting, false, false)) { + if (!setting.equalsIgnoreCase("TIME ZONE")) { + throw DbException.get(ErrorCode.UNSUPPORTED_SETTING_1, setting); + } + StringUtils.quoteStringSQL(builder, value); + } else { + builder.append(value); + } + try { + CommandInterface command = session.prepareLocal(builder.toString()); + command.executeUpdate(null); + } catch (DbException e) { + if (e.getErrorCode() == ErrorCode.ADMIN_RIGHTS_REQUIRED) { + session.getTrace().error(e, "admin rights required; user: \"" + + ci.getUserName() + "\""); + } else { + session.getTrace().error(e, ""); + } + if (!ignoreUnknownSetting) { + session.close(); + throw e; + } } } - } - if (init != null) { - try { - CommandInterface command = session.prepareCommand(init, - Integer.MAX_VALUE); - command.executeUpdate(); - } catch (DbException e) { - if (!ignoreUnknownSetting) { - session.close(); - throw e; + TimeZoneProvider timeZone = ci.getTimeZone(); + if (timeZone != null) { + session.setTimeZone(timeZone); + } + if (init != null) { + try { + CommandInterface command = session.prepareLocal(init); + command.executeUpdate(null); + } catch (DbException e) { + if (!ignoreUnknownSetting) { + session.close(); + throw e; + } } } + session.setAllowLiterals(false); + session.commit(true); } - session.setAllowLiterals(false); - session.commit(true); return session; } @@ -228,7 +300,7 @@ private static void checkClustering(ConnectionInfo ci, Database database) { String clusterDb = database.getCluster(); if (!Constants.CLUSTERING_DISABLED.equals(clusterDb)) { if (!Constants.CLUSTERING_ENABLED.equals(clusterSession)) { - if (!StringUtils.equals(clusterSession, clusterDb)) { + if (!Objects.equals(clusterSession, clusterDb)) { if (clusterDb.equals(Constants.CLUSTERING_DISABLED)) { throw DbException.get( ErrorCode.CLUSTER_ERROR_DATABASE_RUNS_ALONE); @@ -247,15 +319,17 @@ private static void checkClustering(ConnectionInfo ci, Database database) { * * @param name the database name */ - void close(String name) { - if (jmx) { + static void close(String name) { + if (JMX) { try { Utils.callStaticMethod("org.h2.jmx.DatabaseInfo.unregisterMBean", name); } catch (Exception e) { throw DbException.get(ErrorCode.FEATURE_NOT_SUPPORTED_1, e, "JMX"); } } - DATABASES.remove(name); + synchronized (DATABASES) { + DATABASES.remove(name); + } } /** @@ -275,14 +349,14 @@ void close(String name) { * @param correct if the user name or the password was correct * @throws DbException the exception 'wrong user or password' */ - private void validateUserAndPassword(boolean correct) { + private static void validateUserAndPassword(boolean correct) { int min = SysProperties.DELAY_WRONG_PASSWORD_MIN; if (correct) { - long delay = wrongPasswordDelay; + long delay = WRONG_PASSWORD_DELAY; if (delay > min && delay > 0) { // the first correct password must be blocked, // otherwise parallel attacks are possible - synchronized (INSTANCE) { + synchronized (Engine.class) { // delay up to the last delay // an attacker can't know how long it will be delay = MathUtils.secureRandomInt((int) delay); @@ -291,21 +365,21 @@ private void validateUserAndPassword(boolean correct) { } catch (InterruptedException e) { // ignore } - wrongPasswordDelay = min; + WRONG_PASSWORD_DELAY = min; } } } else { // this method is not synchronized on the Engine, so that // regular successful attempts are not blocked - synchronized (INSTANCE) { - long delay = wrongPasswordDelay; + synchronized (Engine.class) { + long delay = WRONG_PASSWORD_DELAY; int max = SysProperties.DELAY_WRONG_PASSWORD_MAX; if (max <= 0) { max = Integer.MAX_VALUE; } - wrongPasswordDelay += wrongPasswordDelay; - if (wrongPasswordDelay > max || wrongPasswordDelay < 0) { - wrongPasswordDelay = max; + WRONG_PASSWORD_DELAY += WRONG_PASSWORD_DELAY; + if (WRONG_PASSWORD_DELAY > max || WRONG_PASSWORD_DELAY < 0) { + WRONG_PASSWORD_DELAY = max; } if (min > 0) { // a bit more to protect against timing attacks @@ -321,4 +395,14 @@ private void validateUserAndPassword(boolean correct) { } } + private Engine() { + } + + private static final class DatabaseHolder { + + DatabaseHolder() { + } + + volatile Database database; + } } diff --git a/h2/src/main/org/h2/engine/FunctionAlias.java b/h2/src/main/org/h2/engine/FunctionAlias.java deleted file mode 100644 index 34cc7daa1c..0000000000 --- a/h2/src/main/org/h2/engine/FunctionAlias.java +++ /dev/null @@ -1,543 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.lang.reflect.Array; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.sql.Connection; -import java.util.ArrayList; -import java.util.Arrays; - -import org.h2.Driver; -import org.h2.api.ErrorCode; -import org.h2.command.Parser; -import org.h2.expression.Expression; -import org.h2.message.DbException; -import org.h2.message.Trace; -import org.h2.schema.Schema; -import org.h2.schema.SchemaObjectBase; -import org.h2.table.Table; -import org.h2.util.JdbcUtils; -import org.h2.util.New; -import org.h2.util.SourceCompiler; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueNull; - -/** - * Represents a user-defined function, or alias. - * - * @author Thomas Mueller - * @author Gary Tong - */ -public class FunctionAlias extends SchemaObjectBase { - - private String className; - private String methodName; - private String source; - private JavaMethod[] javaMethods; - private boolean deterministic; - private boolean bufferResultSetToLocalTemp = true; - - private FunctionAlias(Schema schema, int id, String name) { - initSchemaObjectBase(schema, id, name, Trace.FUNCTION); - } - - /** - * Create a new alias based on a method name. - * - * @param schema the schema - * @param id the id - * @param name the name - * @param javaClassMethod the class and method name - * @param force create the object even if the class or method does not exist - * @param bufferResultSetToLocalTemp whether the result should be buffered - * @return the database object - */ - public static FunctionAlias newInstance( - Schema schema, int id, String name, String javaClassMethod, - boolean force, boolean bufferResultSetToLocalTemp) { - FunctionAlias alias = new FunctionAlias(schema, id, name); - int paren = javaClassMethod.indexOf('('); - int lastDot = javaClassMethod.lastIndexOf('.', paren < 0 ? - javaClassMethod.length() : paren); - if (lastDot < 0) { - throw DbException.get(ErrorCode.SYNTAX_ERROR_1, javaClassMethod); - } - alias.className = javaClassMethod.substring(0, lastDot); - alias.methodName = javaClassMethod.substring(lastDot + 1); - alias.bufferResultSetToLocalTemp = bufferResultSetToLocalTemp; - alias.init(force); - return alias; - } - - /** - * Create a new alias based on source code. - * - * @param schema the schema - * @param id the id - * @param name the name - * @param source the source code - * @param force create the object even if the class or method does not exist - * @param bufferResultSetToLocalTemp whether the result should be buffered - * @return the database object - */ - public static FunctionAlias newInstanceFromSource( - Schema schema, int id, String name, String source, boolean force, - boolean bufferResultSetToLocalTemp) { - FunctionAlias alias = new FunctionAlias(schema, id, name); - alias.source = source; - alias.bufferResultSetToLocalTemp = bufferResultSetToLocalTemp; - alias.init(force); - return alias; - } - - private void init(boolean force) { - try { - // at least try to compile the class, otherwise the data type is not - // initialized if it could be - load(); - } catch (DbException e) { - if (!force) { - throw e; - } - } - } - - private synchronized void load() { - if (javaMethods != null) { - return; - } - if (source != null) { - loadFromSource(); - } else { - loadClass(); - } - } - - private void loadFromSource() { - SourceCompiler compiler = database.getCompiler(); - synchronized (compiler) { - String fullClassName = Constants.USER_PACKAGE + "." + getName(); - compiler.setSource(fullClassName, source); - try { - Method m = compiler.getMethod(fullClassName); - JavaMethod method = new JavaMethod(m, 0); - javaMethods = new JavaMethod[] { - method - }; - } catch (DbException e) { - throw e; - } catch (Exception e) { - throw DbException.get(ErrorCode.SYNTAX_ERROR_1, e, source); - } - } - } - - private void loadClass() { - Class javaClass = JdbcUtils.loadUserClass(className); - Method[] methods = javaClass.getMethods(); - ArrayList list = New.arrayList(); - for (int i = 0, len = methods.length; i < len; i++) { - Method m = methods[i]; - if (!Modifier.isStatic(m.getModifiers())) { - continue; - } - if (m.getName().equals(methodName) || - getMethodSignature(m).equals(methodName)) { - JavaMethod javaMethod = new JavaMethod(m, i); - for (JavaMethod old : list) { - if (old.getParameterCount() == javaMethod.getParameterCount()) { - throw DbException.get(ErrorCode. - METHODS_MUST_HAVE_DIFFERENT_PARAMETER_COUNTS_2, - old.toString(), javaMethod.toString()); - } - } - list.add(javaMethod); - } - } - if (list.size() == 0) { - throw DbException.get( - ErrorCode.PUBLIC_STATIC_JAVA_METHOD_NOT_FOUND_1, - methodName + " (" + className + ")"); - } - javaMethods = new JavaMethod[list.size()]; - list.toArray(javaMethods); - // Sort elements. Methods with a variable number of arguments must be at - // the end. Reason: there could be one method without parameters and one - // with a variable number. The one without parameters needs to be used - // if no parameters are given. - Arrays.sort(javaMethods); - } - - private static String getMethodSignature(Method m) { - StatementBuilder buff = new StatementBuilder(m.getName()); - buff.append('('); - for (Class p : m.getParameterTypes()) { - // do not use a space here, because spaces are removed - // in CreateFunctionAlias.setJavaClassMethod() - buff.appendExceptFirst(","); - if (p.isArray()) { - buff.append(p.getComponentType().getName()).append("[]"); - } else { - buff.append(p.getName()); - } - } - return buff.append(')').toString(); - } - - @Override - public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(); - } - - @Override - public String getDropSQL() { - return "DROP ALIAS IF EXISTS " + getSQL(); - } - - @Override - public String getSQL() { - // TODO can remove this method once FUNCTIONS_IN_SCHEMA is enabled - if (database.getSettings().functionsInSchema || - !getSchema().getName().equals(Constants.SCHEMA_MAIN)) { - return super.getSQL(); - } - return Parser.quoteIdentifier(getName()); - } - - @Override - public String getCreateSQL() { - StringBuilder buff = new StringBuilder("CREATE FORCE ALIAS "); - buff.append(getSQL()); - if (deterministic) { - buff.append(" DETERMINISTIC"); - } - if (!bufferResultSetToLocalTemp) { - buff.append(" NOBUFFER"); - } - if (source != null) { - buff.append(" AS ").append(StringUtils.quoteStringSQL(source)); - } else { - buff.append(" FOR ").append(Parser.quoteIdentifier( - className + "." + methodName)); - } - return buff.toString(); - } - - @Override - public int getType() { - return DbObject.FUNCTION_ALIAS; - } - - @Override - public synchronized void removeChildrenAndResources(Session session) { - database.removeMeta(session, getId()); - className = null; - methodName = null; - javaMethods = null; - invalidate(); - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("RENAME"); - } - - /** - * Find the Java method that matches the arguments. - * - * @param args the argument list - * @return the Java method - * @throws DbException if no matching method could be found - */ - public JavaMethod findJavaMethod(Expression[] args) { - load(); - int parameterCount = args.length; - for (JavaMethod m : javaMethods) { - int count = m.getParameterCount(); - if (count == parameterCount || (m.isVarArgs() && - count <= parameterCount + 1)) { - return m; - } - } - throw DbException.get(ErrorCode.METHOD_NOT_FOUND_1, getName() + " (" + - className + ", parameter count: " + parameterCount + ")"); - } - - public String getJavaClassName() { - return this.className; - } - - public String getJavaMethodName() { - return this.methodName; - } - - /** - * Get the Java methods mapped by this function. - * - * @return the Java methods. - */ - public JavaMethod[] getJavaMethods() { - load(); - return javaMethods; - } - - public void setDeterministic(boolean deterministic) { - this.deterministic = deterministic; - } - - public boolean isDeterministic() { - return deterministic; - } - - public String getSource() { - return source; - } - - /** - * Checks if the given method takes a variable number of arguments. For Java - * 1.4 and older, false is returned. Example: - *
      -     * public static double mean(double... values)
      -     * 
      - * - * @param m the method to test - * @return true if the method takes a variable number of arguments. - */ - static boolean isVarArgs(Method m) { - if ("1.5".compareTo(SysProperties.JAVA_SPECIFICATION_VERSION) > 0) { - return false; - } - try { - Method isVarArgs = m.getClass().getMethod("isVarArgs"); - Boolean result = (Boolean) isVarArgs.invoke(m); - return result.booleanValue(); - } catch (Exception e) { - return false; - } - } - - /** - * Should the return value ResultSet be buffered in a local temporary file? - * - * @return true if yes - */ - public boolean isBufferResultSetToLocalTemp() { - return bufferResultSetToLocalTemp; - } - - /** - * There may be multiple Java methods that match a function name. - * Each method must have a different number of parameters however. - * This helper class represents one such method. - */ - public static class JavaMethod implements Comparable { - private final int id; - private final Method method; - private final int dataType; - private boolean hasConnectionParam; - private boolean varArgs; - private Class varArgClass; - private int paramCount; - - JavaMethod(Method method, int id) { - this.method = method; - this.id = id; - Class[] paramClasses = method.getParameterTypes(); - paramCount = paramClasses.length; - if (paramCount > 0) { - Class paramClass = paramClasses[0]; - if (Connection.class.isAssignableFrom(paramClass)) { - hasConnectionParam = true; - paramCount--; - } - } - if (paramCount > 0) { - Class lastArg = paramClasses[paramClasses.length - 1]; - if (lastArg.isArray() && FunctionAlias.isVarArgs(method)) { - varArgs = true; - varArgClass = lastArg.getComponentType(); - } - } - Class returnClass = method.getReturnType(); - dataType = DataType.getTypeFromClass(returnClass); - } - - @Override - public String toString() { - return method.toString(); - } - - /** - * Check if this function requires a database connection. - * - * @return if the function requires a connection - */ - public boolean hasConnectionParam() { - return this.hasConnectionParam; - } - - /** - * Call the user-defined function and return the value. - * - * @param session the session - * @param args the argument list - * @param columnList true if the function should only return the column - * list - * @return the value - */ - public Value getValue(Session session, Expression[] args, - boolean columnList) { - Class[] paramClasses = method.getParameterTypes(); - Object[] params = new Object[paramClasses.length]; - int p = 0; - if (hasConnectionParam && params.length > 0) { - params[p++] = session.createConnection(columnList); - } - - // allocate array for varArgs parameters - Object varArg = null; - if (varArgs) { - int len = args.length - params.length + 1 + - (hasConnectionParam ? 1 : 0); - varArg = Array.newInstance(varArgClass, len); - params[params.length - 1] = varArg; - } - - for (int a = 0, len = args.length; a < len; a++, p++) { - boolean currentIsVarArg = varArgs && - p >= paramClasses.length - 1; - Class paramClass; - if (currentIsVarArg) { - paramClass = varArgClass; - } else { - paramClass = paramClasses[p]; - } - int type = DataType.getTypeFromClass(paramClass); - Value v = args[a].getValue(session); - Object o; - if (Value.class.isAssignableFrom(paramClass)) { - o = v; - } else if (v.getType() == Value.ARRAY && - paramClass.isArray() && - paramClass.getComponentType() != Object.class) { - Value[] array = ((ValueArray) v).getList(); - Object[] objArray = (Object[]) Array.newInstance( - paramClass.getComponentType(), array.length); - int componentType = DataType.getTypeFromClass( - paramClass.getComponentType()); - for (int i = 0; i < objArray.length; i++) { - objArray[i] = array[i].convertTo(componentType).getObject(); - } - o = objArray; - } else { - v = v.convertTo(type); - o = v.getObject(); - } - if (o == null) { - if (paramClass.isPrimitive()) { - if (columnList) { - // If the column list is requested, the parameters - // may be null. Need to set to default value, - // otherwise the function can't be called at all. - o = DataType.getDefaultForPrimitiveType(paramClass); - } else { - // NULL for a java primitive: return NULL - return ValueNull.INSTANCE; - } - } - } else { - if (!paramClass.isAssignableFrom(o.getClass()) && !paramClass.isPrimitive()) { - o = DataType.convertTo(session.createConnection(false), v, paramClass); - } - } - if (currentIsVarArg) { - Array.set(varArg, p - params.length + 1, o); - } else { - params[p] = o; - } - } - boolean old = session.getAutoCommit(); - Value identity = session.getLastScopeIdentity(); - boolean defaultConnection = session.getDatabase(). - getSettings().defaultConnection; - try { - session.setAutoCommit(false); - Object returnValue; - try { - if (defaultConnection) { - Driver.setDefaultConnection( - session.createConnection(columnList)); - } - returnValue = method.invoke(null, params); - if (returnValue == null) { - return ValueNull.INSTANCE; - } - } catch (InvocationTargetException e) { - StatementBuilder buff = new StatementBuilder(method.getName()); - buff.append('('); - for (Object o : params) { - buff.appendExceptFirst(", "); - buff.append(o == null ? "null" : o.toString()); - } - buff.append(')'); - throw DbException.convertInvocation(e, buff.toString()); - } catch (Exception e) { - throw DbException.convert(e); - } - if (Value.class.isAssignableFrom(method.getReturnType())) { - return (Value) returnValue; - } - Value ret = DataType.convertToValue(session, returnValue, dataType); - return ret.convertTo(dataType); - } finally { - session.setLastScopeIdentity(identity); - session.setAutoCommit(old); - if (defaultConnection) { - Driver.setDefaultConnection(null); - } - } - } - - public Class[] getColumnClasses() { - return method.getParameterTypes(); - } - - public int getDataType() { - return dataType; - } - - public int getParameterCount() { - return paramCount; - } - - public boolean isVarArgs() { - return varArgs; - } - - @Override - public int compareTo(JavaMethod m) { - if (varArgs != m.varArgs) { - return varArgs ? 1 : -1; - } - if (paramCount != m.paramCount) { - return paramCount - m.paramCount; - } - if (hasConnectionParam != m.hasConnectionParam) { - return hasConnectionParam ? 1 : -1; - } - return id - m.id; - } - - } - -} diff --git a/h2/src/main/org/h2/engine/GeneratedKeysMode.java b/h2/src/main/org/h2/engine/GeneratedKeysMode.java new file mode 100644 index 0000000000..bf5f707b7c --- /dev/null +++ b/h2/src/main/org/h2/engine/GeneratedKeysMode.java @@ -0,0 +1,65 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.engine; + +import org.h2.message.DbException; + +/** + * Modes of generated keys' gathering. + */ +public final class GeneratedKeysMode { + + /** + * Generated keys are not needed. + */ + public static final int NONE = 0; + + /** + * Generated keys should be configured automatically. + */ + public static final int AUTO = 1; + + /** + * Use specified column indices to return generated keys from. + */ + public static final int COLUMN_NUMBERS = 2; + + /** + * Use specified column names to return generated keys from. + */ + public static final int COLUMN_NAMES = 3; + + /** + * Determines mode of generated keys' gathering. + * + * @param generatedKeysRequest + * {@code null} or {@code false} if generated keys are not + * needed, {@code true} if generated keys should be configured + * automatically, {@code int[]} to specify column indices to + * return generated keys from, or {@code String[]} to specify + * column names to return generated keys from + * @return mode for the specified generated keys request + */ + public static int valueOf(Object generatedKeysRequest) { + if (generatedKeysRequest == null || Boolean.FALSE.equals(generatedKeysRequest)) { + return NONE; + } + if (Boolean.TRUE.equals(generatedKeysRequest)) { + return AUTO; + } + if (generatedKeysRequest instanceof int[]) { + return ((int[]) generatedKeysRequest).length > 0 ? COLUMN_NUMBERS : NONE; + } + if (generatedKeysRequest instanceof String[]) { + return ((String[]) generatedKeysRequest).length > 0 ? COLUMN_NAMES : NONE; + } + throw DbException.getInternalError(); + } + + private GeneratedKeysMode() { + } + +} diff --git a/h2/src/main/org/h2/engine/IsolationLevel.java b/h2/src/main/org/h2/engine/IsolationLevel.java new file mode 100644 index 0000000000..26309cbdca --- /dev/null +++ b/h2/src/main/org/h2/engine/IsolationLevel.java @@ -0,0 +1,162 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.engine; + +import java.sql.Connection; + +import org.h2.message.DbException; + +/** + * Level of isolation. + */ +public enum IsolationLevel { + + /** + * Dirty reads, non-repeatable reads and phantom reads are allowed. + */ + READ_UNCOMMITTED(Connection.TRANSACTION_READ_UNCOMMITTED, Constants.LOCK_MODE_OFF), + + /** + * Dirty reads aren't allowed; non-repeatable reads and phantom reads are + * allowed. + */ + READ_COMMITTED(Connection.TRANSACTION_READ_COMMITTED, Constants.LOCK_MODE_READ_COMMITTED), + + /** + * Dirty reads and non-repeatable reads aren't allowed; phantom reads are + * allowed. + */ + REPEATABLE_READ(Connection.TRANSACTION_REPEATABLE_READ, Constants.LOCK_MODE_TABLE), + + /** + * Dirty reads, non-repeatable reads and phantom reads are'n allowed. + */ + SNAPSHOT(Constants.TRANSACTION_SNAPSHOT, Constants.LOCK_MODE_TABLE), + + /** + * Dirty reads, non-repeatable reads and phantom reads are'n allowed. + * Concurrent and serial execution of transactions with this isolation level + * should have the same effect. + */ + SERIALIZABLE(Connection.TRANSACTION_SERIALIZABLE, Constants.LOCK_MODE_TABLE); + + /** + * Returns the isolation level from LOCK_MODE equivalent for PageStore and + * old versions of H2. + * + * @param level + * the LOCK_MODE value + * @return the isolation level + */ + public static IsolationLevel fromJdbc(int level) { + switch (level) { + case Connection.TRANSACTION_READ_UNCOMMITTED: + return IsolationLevel.READ_UNCOMMITTED; + case Connection.TRANSACTION_READ_COMMITTED: + return IsolationLevel.READ_COMMITTED; + case Connection.TRANSACTION_REPEATABLE_READ: + return IsolationLevel.REPEATABLE_READ; + case Constants.TRANSACTION_SNAPSHOT: + return IsolationLevel.SNAPSHOT; + case Connection.TRANSACTION_SERIALIZABLE: + return IsolationLevel.SERIALIZABLE; + default: + throw DbException.getInvalidValueException("isolation level", level); + } + } + + /** + * Returns the isolation level from LOCK_MODE equivalent for PageStore and + * old versions of H2. + * + * @param lockMode + * the LOCK_MODE value + * @return the isolation level + */ + public static IsolationLevel fromLockMode(int lockMode) { + switch (lockMode) { + case Constants.LOCK_MODE_OFF: + return IsolationLevel.READ_UNCOMMITTED; + case Constants.LOCK_MODE_READ_COMMITTED: + default: + return IsolationLevel.READ_COMMITTED; + case Constants.LOCK_MODE_TABLE: + case Constants.LOCK_MODE_TABLE_GC: + return IsolationLevel.SERIALIZABLE; + } + } + + /** + * Returns the isolation level from its SQL name. + * + * @param sql + * the SQL name + * @return the isolation level from its SQL name + */ + public static IsolationLevel fromSql(String sql) { + switch (sql) { + case "READ UNCOMMITTED": + return READ_UNCOMMITTED; + case "READ COMMITTED": + return READ_COMMITTED; + case "REPEATABLE READ": + return REPEATABLE_READ; + case "SNAPSHOT": + return SNAPSHOT; + case "SERIALIZABLE": + return SERIALIZABLE; + default: + throw DbException.getInvalidValueException("isolation level", sql); + } + } + + private final String sql; + + private final int jdbc, lockMode; + + private IsolationLevel(int jdbc, int lockMode) { + sql = name().replace('_', ' ').intern(); + this.jdbc = jdbc; + this.lockMode = lockMode; + } + + /** + * Returns the SQL representation of this isolation level. + * + * @return SQL representation of this isolation level + */ + public String getSQL() { + return sql; + } + + /** + * Returns the JDBC constant for this isolation level. + * + * @return the JDBC constant for this isolation level + */ + public int getJdbc() { + return jdbc; + } + + /** + * Returns the LOCK_MODE equivalent for PageStore and old versions of H2. + * + * @return the LOCK_MODE equivalent + */ + public int getLockMode() { + return lockMode; + } + + /** + * Returns whether a non-repeatable read phenomena is allowed. + * + * @return whether a non-repeatable read phenomena is allowed + */ + public boolean allowNonRepeatableRead() { + return ordinal() < REPEATABLE_READ.ordinal(); + } + +} diff --git a/h2/src/main/org/h2/engine/MetaRecord.java b/h2/src/main/org/h2/engine/MetaRecord.java index a09b0d1633..b0016e4202 100644 --- a/h2/src/main/org/h2/engine/MetaRecord.java +++ b/h2/src/main/org/h2/engine/MetaRecord.java @@ -1,18 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.sql.SQLException; +import java.util.Comparator; import org.h2.api.DatabaseEventListener; +import org.h2.command.CommandInterface; import org.h2.command.Prepared; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.result.SearchRow; -import org.h2.value.ValueInt; -import org.h2.value.ValueString; +import org.h2.value.ValueInteger; +import org.h2.value.ValueVarchar; /** * A record in the system table of the database. @@ -20,52 +22,108 @@ */ public class MetaRecord implements Comparable { + /** + * Comparator for prepared constraints, sorts unique and primary key + * constraints first. + */ + static final Comparator CONSTRAINTS_COMPARATOR = (o1, o2) -> { + int t1 = o1.getType(), t2 = o2.getType(); + boolean u1 = t1 == CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY + || t1 == CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE; + boolean u2 = t2 == CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY + || t2 == CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE; + if (u1 == u2) { + return o1.getPersistedObjectId() - o2.getPersistedObjectId(); + } + return u1 ? -1 : 1; + }; + private final int id; private final int objectType; private final String sql; + /** + * Copy metadata from the specified object into specified search row. + * + * @param obj + * database object + * @param r + * search row + */ + public static void populateRowFromDBObject(DbObject obj, SearchRow r) { + r.setValue(0, ValueInteger.get(obj.getId())); + r.setValue(1, ValueInteger.get(0)); + r.setValue(2, ValueInteger.get(obj.getType())); + r.setValue(3, ValueVarchar.get(obj.getCreateSQLForMeta())); + } + public MetaRecord(SearchRow r) { id = r.getValue(0).getInt(); objectType = r.getValue(2).getInt(); sql = r.getValue(3).getString(); } - MetaRecord(DbObject obj) { - id = obj.getId(); - objectType = obj.getType(); - sql = obj.getCreateSQL(); + /** + * Execute the meta data statement. + * + * @param db the database + * @param systemSession the system session + * @param listener the database event listener + */ + void prepareAndExecute(Database db, SessionLocal systemSession, DatabaseEventListener listener) { + try { + Prepared command = systemSession.prepare(sql); + command.setPersistedObjectId(id); + command.update(); + } catch (DbException e) { + throwException(db, listener, e, sql); + } } - void setRecord(SearchRow r) { - r.setValue(0, ValueInt.get(id)); - r.setValue(1, ValueInt.get(0)); - r.setValue(2, ValueInt.get(objectType)); - r.setValue(3, ValueString.get(sql)); + /** + * Prepares the meta data statement. + * + * @param db the database + * @param systemSession the system session + * @param listener the database event listener + * @return the prepared command + */ + Prepared prepare(Database db, SessionLocal systemSession, DatabaseEventListener listener) { + try { + Prepared command = systemSession.prepare(sql); + command.setPersistedObjectId(id); + return command; + } catch (DbException e) { + throwException(db, listener, e, sql); + return null; + } } /** * Execute the meta data statement. * * @param db the database - * @param systemSession the system session + * @param command the prepared command * @param listener the database event listener + * @param sql SQL */ - void execute(Database db, Session systemSession, - DatabaseEventListener listener) { + static void execute(Database db, Prepared command, DatabaseEventListener listener, String sql) { try { - Prepared command = systemSession.prepare(sql); - command.setObjectId(id); command.update(); } catch (DbException e) { - e = e.addSQL(sql); - SQLException s = e.getSQLException(); - db.getTrace(Trace.DATABASE).error(s, sql); - if (listener != null) { - listener.exceptionThrown(s, sql); - // continue startup in this case - } else { - throw e; - } + throwException(db, listener, e, sql); + } + } + + private static void throwException(Database db, DatabaseEventListener listener, DbException e, String sql) { + e = e.addSQL(sql); + SQLException s = e.getSQLException(); + db.getTrace(Trace.DATABASE).error(s, sql); + if (listener != null) { + listener.exceptionThrown(s, sql); + // continue startup in this case + } else { + throw e; } } @@ -104,7 +162,7 @@ public int compareTo(MetaRecord other) { * @return the sort index */ private int getCreateOrder() { - switch(objectType) { + switch (objectType) { case DbObject.SETTING: return 0; case DbObject.USER: @@ -113,7 +171,7 @@ private int getCreateOrder() { return 2; case DbObject.FUNCTION_ALIAS: return 3; - case DbObject.USER_DATATYPE: + case DbObject.DOMAIN: return 4; case DbObject.SEQUENCE: return 5; @@ -127,23 +185,24 @@ private int getCreateOrder() { return 9; case DbObject.TRIGGER: return 10; - case DbObject.ROLE: + case DbObject.SYNONYM: return 11; - case DbObject.RIGHT: + case DbObject.ROLE: return 12; - case DbObject.AGGREGATE: + case DbObject.RIGHT: return 13; - case DbObject.COMMENT: + case DbObject.AGGREGATE: return 14; + case DbObject.COMMENT: + return 15; default: - throw DbException.throwInternalError("type="+objectType); + throw DbException.getInternalError("type=" + objectType); } } @Override public String toString() { - return "MetaRecord [id=" + id + ", objectType=" + objectType + - ", sql=" + sql + "]"; + return "MetaRecord [id=" + id + ", objectType=" + objectType + ", sql=" + sql + ']'; } } diff --git a/h2/src/main/org/h2/engine/Mode.java b/h2/src/main/org/h2/engine/Mode.java index 74fa2dab45..26f875b976 100644 --- a/h2/src/main/org/h2/engine/Mode.java +++ b/h2/src/main/org/h2/engine/Mode.java @@ -1,13 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; +import java.sql.Types; +import java.util.Collections; import java.util.HashMap; -import org.h2.util.New; +import java.util.Set; +import java.util.regex.Pattern; + import org.h2.util.StringUtils; +import org.h2.value.DataType; +import org.h2.value.Value; /** * The compatibility modes. There is a fixed set of modes (for example @@ -15,12 +21,113 @@ */ public class Mode { + public enum ModeEnum { + REGULAR, STRICT, LEGACY, DB2, Derby, MariaDB, MSSQLServer, HSQLDB, MySQL, Oracle, PostgreSQL + } + + /** + * Determines how rows with {@code NULL} values in indexed columns are handled + * in unique indexes. + */ + public enum UniqueIndexNullsHandling { + /** + * Multiple rows with identical values in indexed columns with at least one + * indexed {@code NULL} value are allowed in unique index. + */ + ALLOW_DUPLICATES_WITH_ANY_NULL, + + /** + * Multiple rows with identical values in indexed columns with all indexed + * {@code NULL} values are allowed in unique index. + */ + ALLOW_DUPLICATES_WITH_ALL_NULLS, + + /** + * Multiple rows with identical values in indexed columns are not allowed in + * unique index. + */ + FORBID_ANY_DUPLICATES + } + /** - * The name of the default mode. + * Generation of column names for expressions. */ - static final String REGULAR = "REGULAR"; + public enum ExpressionNames { + /** + * Use optimized SQL representation of expression. + */ + OPTIMIZED_SQL, + + /** + * Use original SQL representation of expression. + */ + ORIGINAL_SQL, + + /** + * Generate empty name. + */ + EMPTY, + + /** + * Use ordinal number of a column. + */ + NUMBER, + + /** + * Use ordinal number of a column with C prefix. + */ + C_NUMBER, + + /** + * Use function name for functions and ?column? for other expressions + */ + POSTGRESQL_STYLE, + } - private static final HashMap MODES = New.hashMap(); + /** + * Generation of column names for expressions to be used in a view. + */ + public enum ViewExpressionNames { + /** + * Use both specified and generated names as is. + */ + AS_IS, + + /** + * Throw exception for unspecified names. + */ + EXCEPTION, + + /** + * Use both specified and generated names as is, but replace too long + * generated names with {@code Name_exp_###}. + */ + MYSQL_STYLE, + } + + /** + * When CHAR values are right-padded with spaces. + */ + public enum CharPadding { + /** + * CHAR values are always right-padded with spaces. + */ + ALWAYS, + + /** + * Spaces are trimmed from the right side of CHAR values, but CHAR + * values in result sets are right-padded with spaces to the declared + * length + */ + IN_RESULT_SETS, + + /** + * Spaces are trimmed from the right side of CHAR values. + */ + NEVER + } + + private static final HashMap MODES = new HashMap<>(); // Modes are also documented in the features section @@ -32,14 +139,6 @@ public class Mode { */ public boolean aliasColumnName; - /** - * When inserting data, if a column is defined to be NOT NULL and NULL is - * inserted, then a 0 (or empty string, or the current timestamp for - * timestamp columns) value is used. Usually, this operation is not allowed - * and an exception is thrown. - */ - public boolean convertInsertNullToZero; - /** * When converting the scale of decimal data, the number is only converted * if the new scale is smaller than the current scale. Usually, the scale is @@ -55,47 +154,21 @@ public class Mode { */ public boolean indexDefinitionInCreateTable; - /** - * Meta data calls return identifiers in lower case. - */ - public boolean lowerCaseIdentifiers; - - /** - * Concatenation with NULL results in NULL. Usually, NULL is treated as an - * empty string if only one of the operands is NULL, and NULL is only - * returned if both operands are NULL. - */ - public boolean nullConcatIsNull; - /** * Identifiers may be quoted using square brackets as in [Test]. */ public boolean squareBracketQuotedNames; /** - * Support for the syntax - * [OFFSET .. ROW|ROWS] [FETCH FIRST .. ROW|ROWS ONLY] - * as an alternative for LIMIT .. OFFSET. - */ - public boolean supportOffsetFetch = Constants.VERSION_MINOR >= 4 ? true : false; - - /** - * The system columns 'CTID' and 'OID' are supported. + * The system columns 'ctid' and 'oid' are supported. */ public boolean systemColumns; /** - * For unique indexes, NULL is distinct. That means only one row with NULL - * in one of the columns is allowed. + * Determines how rows with {@code NULL} values in indexed columns are handled + * in unique indexes. */ - public boolean uniqueIndexSingleNull; - - /** - * When using unique indexes, multiple rows with NULL in all columns - * are allowed, however it is not allowed to have multiple rows with the - * same values otherwise. - */ - public boolean uniqueIndexSingleNullExceptAllColumnsAreNull; + public UniqueIndexNullsHandling uniqueIndexNullsHandling = UniqueIndexNullsHandling.ALLOW_DUPLICATES_WITH_ANY_NULL; /** * Empty strings are treated like NULL values. Useful for Oracle emulation. @@ -113,14 +186,19 @@ public class Mode { public boolean allowPlusForStringConcat; /** - * The function LOG() uses base 10 instead of E. + * The single-argument function LOG() uses base 10 instead of E. */ public boolean logIsLogBase10; /** - * SERIAL and BIGSERIAL columns are not automatically primary keys. + * Swap the parameters of LOG() function. + */ + public boolean swapLogFunctionParameters; + + /** + * The function REGEXP_REPLACE() uses \ for back-references. */ - public boolean serialColumnIsNotPK; + public boolean regexpReplaceBackslashReferences; /** * Swap the parameters of the CONVERT function. @@ -133,74 +211,491 @@ public class Mode { public boolean isolationLevelInSelectOrInsertStatement; /** - * MySQL style INSERT ... ON DUPLICATE KEY UPDATE ... + * MySQL style INSERT ... ON DUPLICATE KEY UPDATE ... and INSERT IGNORE. */ public boolean onDuplicateKeyUpdate; + /** + * MySQL style REPLACE INTO. + */ + public boolean replaceInto; + + /** + * PostgreSQL style INSERT ... ON CONFLICT DO NOTHING. + */ + public boolean insertOnConflict; + + /** + * Pattern describing the keys the java.sql.Connection.setClientInfo() + * method accepts. + */ + public Pattern supportedClientInfoPropertiesRegEx; + + /** + * Support the # for column names + */ + public boolean supportPoundSymbolForColumnNames; + + /** + * Whether IN predicate may have an empty value list. + */ + public boolean allowEmptyInPredicate; + + /** + * How to pad or trim CHAR values. + */ + public CharPadding charPadding = CharPadding.ALWAYS; + + /** + * Whether DB2 TIMESTAMP formats are allowed. + */ + public boolean allowDB2TimestampFormat; + + /** + * Discard SQLServer table hints (e.g. "SELECT * FROM table WITH (NOLOCK)") + */ + public boolean discardWithTableHints; + + /** + * If {@code true}, datetime value function return the same value within a + * transaction, if {@code false} datetime value functions return the same + * value within a command. + */ + public boolean dateTimeValueWithinTransaction; + + /** + * If {@code true} {@code 0x}-prefixed numbers are parsed as binary string + * literals, if {@code false} they are parsed as hexadecimal numeric values. + */ + public boolean zeroExLiteralsAreBinaryStrings; + + /** + * If {@code true} unrelated ORDER BY expression are allowed in DISTINCT + * queries, if {@code false} they are disallowed. + */ + public boolean allowUnrelatedOrderByExpressionsInDistinctQueries; + + /** + * If {@code true} some additional non-standard ALTER TABLE commands are allowed. + */ + public boolean alterTableExtensionsMySQL; + + /** + * If {@code true} non-standard ALTER TABLE MODIFY COLUMN is allowed. + */ + public boolean alterTableModifyColumn; + + /** + * If {@code true} TRUNCATE TABLE uses RESTART IDENTITY by default. + */ + public boolean truncateTableRestartIdentity; + + /** + * If {@code true} NEXT VALUE FOR SEQUENCE, CURRENT VALUE FOR SEQUENCE, + * SEQUENCE.NEXTVAL, and SEQUENCE.CURRVAL return values with DECIMAL/NUMERIC + * data type instead of BIGINT. + */ + public boolean decimalSequences; + + /** + * If {@code true} constructs like 'CREATE TABLE CATALOG..TABLE_NAME' are allowed, + * the default schema is used. + */ + public boolean allowEmptySchemaValuesAsDefaultSchema; + + /** + * If {@code true} all numeric data types may have precision and 'UNSIGNED' + * clause. + */ + public boolean allNumericTypesHavePrecision; + + /** + * If {@code true} 'FOR BIT DATA' clauses are allowed for character string + * data types. + */ + public boolean forBitData; + + /** + * If {@code true} 'CHAR' and 'BYTE' length units are allowed. + */ + public boolean charAndByteLengthUnits; + + /** + * If {@code true}, sequence.NEXTVAL and sequence.CURRVAL pseudo columns are + * supported. + */ + public boolean nextvalAndCurrvalPseudoColumns; + + /** + * If {@code true}, the next value expression returns different values when + * invoked multiple times within a row. This setting does not affect + * NEXTVAL() function. + */ + public boolean nextValueReturnsDifferentValues; + + /** + * If {@code true}, sequences of generated by default identity columns are + * updated when value is provided by user. + */ + public boolean updateSequenceOnManualIdentityInsertion; + + /** + * If {@code true}, last identity of the session is updated on insertion of + * a new value into identity column. + */ + public boolean takeInsertedIdentity; + + /** + * If {@code true}, last identity of the session is updated on generation of + * a new sequence value. + */ + public boolean takeGeneratedSequenceValue; + + /** + * If {@code true}, identity columns have DEFAULT ON NULL clause. + */ + public boolean identityColumnsHaveDefaultOnNull; + + /** + * If {@code true}, merge when matched clause may have WHERE clause. + */ + public boolean mergeWhere; + + /** + * If {@code true}, allow using from clause in update statement. + */ + public boolean allowUsingFromClauseInUpdateStatement; + + /** + * If {@code true}, referential constraints will create a unique constraint + * on referenced columns if it doesn't exist instead of throwing an + * exception. + */ + public boolean createUniqueConstraintForReferencedColumns; + + /** + * How column names are generated for expressions. + */ + public ExpressionNames expressionNames = ExpressionNames.OPTIMIZED_SQL; + + /** + * How column names are generated for views. + */ + public ViewExpressionNames viewExpressionNames = ViewExpressionNames.AS_IS; + + /** + * Whether TOP clause in SELECT queries is supported. + */ + public boolean topInSelect; + + /** + * Whether TOP clause in DML commands is supported. + */ + public boolean topInDML; + + /** + * Whether LIMIT / OFFSET clauses are supported. + */ + public boolean limit; + + /** + * Whether MINUS can be used as EXCEPT. + */ + public boolean minusIsExcept; + + /** + * Whether IDENTITY pseudo data type is supported. + */ + public boolean identityDataType; + + /** + * Whether SERIAL and BIGSERIAL pseudo data types are supported. + */ + public boolean serialDataTypes; + + /** + * Whether SQL Server-style IDENTITY clause is supported. + */ + public boolean identityClause; + + /** + * Whether MySQL-style AUTO_INCREMENT clause is supported. + */ + public boolean autoIncrementClause; + + /** + * An optional Set of hidden/disallowed column types. + * Certain DBMSs don't support all column types provided by H2, such as + * "NUMBER" when using PostgreSQL mode. + */ + public Set disallowedTypes = Collections.emptySet(); + + /** + * Custom mappings from type names to data types. + */ + public HashMap typeByNameMap = new HashMap<>(); + + /** + * Allow to use GROUP BY n, where n is column index in the SELECT list, similar to ORDER BY + */ + public boolean groupByColumnIndex; + + /** + * Allow to compare numeric with BOOLEAN. + */ + public boolean numericWithBooleanComparison; + private final String name; + private final ModeEnum modeEnum; + static { - Mode mode = new Mode(REGULAR); - mode.nullConcatIsNull = true; + Mode mode = new Mode(ModeEnum.REGULAR); + mode.allowEmptyInPredicate = true; + mode.dateTimeValueWithinTransaction = true; + mode.topInSelect = true; + mode.limit = true; + mode.minusIsExcept = true; + mode.identityDataType = true; + mode.serialDataTypes = true; + mode.autoIncrementClause = true; add(mode); - mode = new Mode("DB2"); + mode = new Mode(ModeEnum.STRICT); + mode.dateTimeValueWithinTransaction = true; + add(mode); + + mode = new Mode(ModeEnum.LEGACY); + // Features of REGULAR mode + mode.allowEmptyInPredicate = true; + mode.dateTimeValueWithinTransaction = true; + mode.topInSelect = true; + mode.limit = true; + mode.minusIsExcept = true; + mode.identityDataType = true; + mode.serialDataTypes = true; + mode.autoIncrementClause = true; + // Legacy identity and sequence features + mode.identityClause = true; + mode.updateSequenceOnManualIdentityInsertion = true; + mode.takeInsertedIdentity = true; + mode.identityColumnsHaveDefaultOnNull = true; + mode.nextvalAndCurrvalPseudoColumns = true; + // Legacy DML features + mode.topInDML = true; + mode.mergeWhere = true; + // Legacy DDL features + mode.createUniqueConstraintForReferencedColumns = true; + // Legacy numeric with boolean comparison + mode.numericWithBooleanComparison = true; + add(mode); + + mode = new Mode(ModeEnum.DB2); mode.aliasColumnName = true; - mode.supportOffsetFetch = true; mode.sysDummy1 = true; mode.isolationLevelInSelectOrInsertStatement = true; + // See + // https://www.ibm.com/support/knowledgecenter/SSEPEK_11.0.0/ + // com.ibm.db2z11.doc.java/src/tpc/imjcc_r0052001.dita + mode.supportedClientInfoPropertiesRegEx = + Pattern.compile("ApplicationName|ClientAccountingInformation|" + + "ClientUser|ClientCorrelationToken"); + mode.allowDB2TimestampFormat = true; + mode.forBitData = true; + mode.takeInsertedIdentity = true; + mode.expressionNames = ExpressionNames.NUMBER; + mode.viewExpressionNames = ViewExpressionNames.EXCEPTION; + mode.limit = true; + mode.minusIsExcept = true; + mode.numericWithBooleanComparison = true; add(mode); - mode = new Mode("Derby"); + mode = new Mode(ModeEnum.Derby); mode.aliasColumnName = true; - mode.uniqueIndexSingleNull = true; - mode.supportOffsetFetch = true; + mode.uniqueIndexNullsHandling = UniqueIndexNullsHandling.FORBID_ANY_DUPLICATES; mode.sysDummy1 = true; mode.isolationLevelInSelectOrInsertStatement = true; + // Derby does not support client info properties as of version 10.12.1.1 + mode.supportedClientInfoPropertiesRegEx = null; + mode.forBitData = true; + mode.takeInsertedIdentity = true; + mode.expressionNames = ExpressionNames.NUMBER; + mode.viewExpressionNames = ViewExpressionNames.EXCEPTION; add(mode); - mode = new Mode("HSQLDB"); - mode.aliasColumnName = true; - mode.convertOnlyToSmallerScale = true; - mode.nullConcatIsNull = true; - mode.uniqueIndexSingleNull = true; + mode = new Mode(ModeEnum.HSQLDB); mode.allowPlusForStringConcat = true; + mode.identityColumnsHaveDefaultOnNull = true; + // HSQLDB does not support client info properties. See + // http://hsqldb.org/doc/apidocs/org/hsqldb/jdbc/JDBCConnection.html#setClientInfo-java.lang.String-java.lang.String- + mode.supportedClientInfoPropertiesRegEx = null; + mode.expressionNames = ExpressionNames.C_NUMBER; + mode.topInSelect = true; + mode.limit = true; + mode.minusIsExcept = true; + mode.numericWithBooleanComparison = true; add(mode); - mode = new Mode("MSSQLServer"); + mode = new Mode(ModeEnum.MSSQLServer); mode.aliasColumnName = true; mode.squareBracketQuotedNames = true; - mode.uniqueIndexSingleNull = true; + mode.uniqueIndexNullsHandling = UniqueIndexNullsHandling.FORBID_ANY_DUPLICATES; mode.allowPlusForStringConcat = true; + mode.swapLogFunctionParameters = true; mode.swapConvertFunctionParameters = true; + mode.supportPoundSymbolForColumnNames = true; + mode.discardWithTableHints = true; + // MS SQL Server does not support client info properties. See + // https://msdn.microsoft.com/en-Us/library/dd571296%28v=sql.110%29.aspx + mode.supportedClientInfoPropertiesRegEx = null; + mode.zeroExLiteralsAreBinaryStrings = true; + mode.truncateTableRestartIdentity = true; + mode.takeInsertedIdentity = true; + DataType dt = DataType.createNumeric(19, 4); + dt.type = Value.NUMERIC; + dt.sqlType = Types.NUMERIC; + dt.specialPrecisionScale = true; + mode.typeByNameMap.put("MONEY", dt); + dt = DataType.createNumeric(10, 4); + dt.type = Value.NUMERIC; + dt.sqlType = Types.NUMERIC; + dt.specialPrecisionScale = true; + mode.typeByNameMap.put("SMALLMONEY", dt); + mode.typeByNameMap.put("UNIQUEIDENTIFIER", DataType.getDataType(Value.UUID)); + mode.allowEmptySchemaValuesAsDefaultSchema = true; + mode.expressionNames = ExpressionNames.EMPTY; + mode.viewExpressionNames = ViewExpressionNames.EXCEPTION; + mode.topInSelect = true; + mode.topInDML = true; + mode.identityClause = true; + mode.numericWithBooleanComparison = true; add(mode); - mode = new Mode("MySQL"); - mode.convertInsertNullToZero = true; + mode = new Mode(ModeEnum.MariaDB); mode.indexDefinitionInCreateTable = true; - mode.lowerCaseIdentifiers = true; + mode.regexpReplaceBackslashReferences = true; mode.onDuplicateKeyUpdate = true; + mode.replaceInto = true; + mode.charPadding = CharPadding.NEVER; + mode.supportedClientInfoPropertiesRegEx = Pattern.compile(".*"); + mode.zeroExLiteralsAreBinaryStrings = true; + mode.allowUnrelatedOrderByExpressionsInDistinctQueries = true; + mode.alterTableExtensionsMySQL = true; + mode.alterTableModifyColumn = true; + mode.truncateTableRestartIdentity = true; + mode.allNumericTypesHavePrecision = true; + mode.nextValueReturnsDifferentValues = true; + mode.updateSequenceOnManualIdentityInsertion = true; + mode.takeInsertedIdentity = true; + mode.identityColumnsHaveDefaultOnNull = true; + mode.expressionNames = ExpressionNames.ORIGINAL_SQL; + mode.viewExpressionNames = ViewExpressionNames.MYSQL_STYLE; + mode.limit = true; + mode.autoIncrementClause = true; + mode.typeByNameMap.put("YEAR", DataType.getDataType(Value.SMALLINT)); + mode.groupByColumnIndex = true; + mode.numericWithBooleanComparison = true; add(mode); - mode = new Mode("Oracle"); + mode = new Mode(ModeEnum.MySQL); + mode.indexDefinitionInCreateTable = true; + mode.regexpReplaceBackslashReferences = true; + mode.onDuplicateKeyUpdate = true; + mode.replaceInto = true; + mode.charPadding = CharPadding.NEVER; + // MySQL allows to use any key for client info entries. See + // https://github.com/mysql/mysql-connector-j/blob/5.1.47/src/com/mysql/jdbc/JDBC4CommentClientInfoProvider.java + mode.supportedClientInfoPropertiesRegEx = + Pattern.compile(".*"); + mode.zeroExLiteralsAreBinaryStrings = true; + mode.allowUnrelatedOrderByExpressionsInDistinctQueries = true; + mode.alterTableExtensionsMySQL = true; + mode.alterTableModifyColumn = true; + mode.truncateTableRestartIdentity = true; + mode.allNumericTypesHavePrecision = true; + mode.updateSequenceOnManualIdentityInsertion = true; + mode.takeInsertedIdentity = true; + mode.identityColumnsHaveDefaultOnNull = true; + mode.createUniqueConstraintForReferencedColumns = true; + mode.expressionNames = ExpressionNames.ORIGINAL_SQL; + mode.viewExpressionNames = ViewExpressionNames.MYSQL_STYLE; + mode.limit = true; + mode.autoIncrementClause = true; + mode.typeByNameMap.put("YEAR", DataType.getDataType(Value.SMALLINT)); + mode.groupByColumnIndex = true; + mode.numericWithBooleanComparison = true; + add(mode); + + mode = new Mode(ModeEnum.Oracle); mode.aliasColumnName = true; mode.convertOnlyToSmallerScale = true; - mode.uniqueIndexSingleNullExceptAllColumnsAreNull = true; + mode.uniqueIndexNullsHandling = UniqueIndexNullsHandling.ALLOW_DUPLICATES_WITH_ALL_NULLS; mode.treatEmptyStringsAsNull = true; + mode.regexpReplaceBackslashReferences = true; + mode.supportPoundSymbolForColumnNames = true; + // Oracle accepts keys of the form .*. See + // https://docs.oracle.com/database/121/JJDBC/jdbcvers.htm#JJDBC29006 + mode.supportedClientInfoPropertiesRegEx = + Pattern.compile(".*\\..*"); + mode.alterTableModifyColumn = true; + mode.decimalSequences = true; + mode.charAndByteLengthUnits = true; + mode.nextvalAndCurrvalPseudoColumns = true; + mode.mergeWhere = true; + mode.minusIsExcept = true; + mode.expressionNames = ExpressionNames.ORIGINAL_SQL; + mode.viewExpressionNames = ViewExpressionNames.EXCEPTION; + mode.typeByNameMap.put("BINARY_FLOAT", DataType.getDataType(Value.REAL)); + mode.typeByNameMap.put("BINARY_DOUBLE", DataType.getDataType(Value.DOUBLE)); + dt = DataType.createDate(/* 2001-01-01 23:59:59 */ 19, 19, "DATE", false, 0, 0); + dt.type = Value.TIMESTAMP; + dt.sqlType = Types.TIMESTAMP; + dt.specialPrecisionScale = true; + mode.typeByNameMap.put("DATE", dt); add(mode); - mode = new Mode("PostgreSQL"); + mode = new Mode(ModeEnum.PostgreSQL); mode.aliasColumnName = true; - mode.nullConcatIsNull = true; - mode.supportOffsetFetch = true; mode.systemColumns = true; mode.logIsLogBase10 = true; - mode.serialColumnIsNotPK = true; + mode.regexpReplaceBackslashReferences = true; + mode.insertOnConflict = true; + // PostgreSQL only supports the ApplicationName property. See + // https://github.com/hhru/postgres-jdbc/blob/master/postgresql-jdbc-9.2-1002.src/ + // org/postgresql/jdbc4/AbstractJdbc4Connection.java + mode.supportedClientInfoPropertiesRegEx = + Pattern.compile("ApplicationName"); + mode.charPadding = CharPadding.IN_RESULT_SETS; + mode.nextValueReturnsDifferentValues = true; + mode.takeGeneratedSequenceValue = true; + mode.expressionNames = ExpressionNames.POSTGRESQL_STYLE; + mode.allowUsingFromClauseInUpdateStatement = true; + mode.limit = true; + mode.serialDataTypes = true; + // Enumerate all H2 types NOT supported by PostgreSQL: + Set disallowedTypes = new java.util.HashSet<>(); + disallowedTypes.add("NUMBER"); + disallowedTypes.add("TINYINT"); + disallowedTypes.add("BLOB"); + disallowedTypes.add("VARCHAR_IGNORECASE"); + mode.disallowedTypes = disallowedTypes; + dt = DataType.getDataType(Value.JSON); + mode.typeByNameMap.put("JSONB", dt); + dt = DataType.createNumeric(19, 2); + dt.type = Value.NUMERIC; + dt.sqlType = Types.NUMERIC; + dt.specialPrecisionScale = true; + mode.typeByNameMap.put("MONEY", dt); + dt = DataType.getDataType(Value.INTEGER); + mode.typeByNameMap.put("OID", dt); + mode.dateTimeValueWithinTransaction = true; + mode.groupByColumnIndex = true; add(mode); } - private Mode(String name) { - this.name = name; + private Mode(ModeEnum modeEnum) { + this.name = modeEnum.name(); + this.modeEnum = modeEnum; } private static void add(Mode mode) { @@ -217,8 +712,21 @@ public static Mode getInstance(String name) { return MODES.get(StringUtils.toUpperEnglish(name)); } + public static Mode getRegular() { + return getInstance(ModeEnum.REGULAR.name()); + } + public String getName() { return name; } + public ModeEnum getEnum() { + return this.modeEnum; + } + + @Override + public String toString() { + return name; + } + } diff --git a/h2/src/main/org/h2/engine/OnExitDatabaseCloser.java b/h2/src/main/org/h2/engine/OnExitDatabaseCloser.java new file mode 100644 index 0000000000..d8022ac6e2 --- /dev/null +++ b/h2/src/main/org/h2/engine/OnExitDatabaseCloser.java @@ -0,0 +1,117 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.engine; + +import java.util.WeakHashMap; + +import org.h2.message.Trace; + +/** + * This class is responsible to close a database on JVM shutdown. + */ +class OnExitDatabaseCloser extends Thread { + + private static final WeakHashMap DATABASES = new WeakHashMap<>(); + + private static final Thread INSTANCE = new OnExitDatabaseCloser(); + + private static boolean registered; + + private static boolean terminated; + + /** + * Register database instance to close one on the JVM process shutdown. + * + * @param db Database instance. + */ + static synchronized void register(Database db) { + if (terminated) { + // Shutdown in progress + return; + } + DATABASES.put(db, null); + if (!registered) { + // Mark as registered unconditionally to avoid further attempts to register a + // shutdown hook in case of exception. + registered = true; + try { + Runtime.getRuntime().addShutdownHook(INSTANCE); + } catch (IllegalStateException e) { + // shutdown in progress - just don't register the handler + // (maybe an application wants to write something into a + // database at shutdown time) + } catch (SecurityException e) { + // applets may not do that - ignore + // Google App Engine doesn't allow + // to instantiate classes that extend Thread + } + } + } + + /** + * Unregister database instance. + * + * @param db Database instance. + */ + static synchronized void unregister(Database db) { + if (terminated) { + // Shutdown in progress, do nothing + // This method can be called from the onShutdown() + return; + } + DATABASES.remove(db); + if (DATABASES.isEmpty() && registered) { + try { + Runtime.getRuntime().removeShutdownHook(INSTANCE); + } catch (IllegalStateException e) { + // ignore + } catch (SecurityException e) { + // applets may not do that - ignore + } + registered = false; + } + } + + private static void onShutdown() { + synchronized(OnExitDatabaseCloser.class) { + terminated = true; + } + RuntimeException root = null; + for (Database database : DATABASES.keySet()) { + try { + database.close(true); + } catch (RuntimeException e) { + // this can happen when stopping a web application, + // if loading classes is no longer allowed + // it would throw an IllegalStateException + try { + database.getTrace(Trace.DATABASE).error(e, "could not close the database"); + // if this was successful, we ignore the exception + // otherwise not + } catch (Throwable e2) { + e.addSuppressed(e2); + if (root == null) { + root = e; + } else { + root.addSuppressed(e); + } + } + } + } + if (root != null) { + throw root; + } + } + + private OnExitDatabaseCloser() { + } + + @Override + public void run() { + onShutdown(); + } + +} diff --git a/h2/src/main/org/h2/engine/Procedure.java b/h2/src/main/org/h2/engine/Procedure.java index 36fb8f5bac..899309b6f6 100644 --- a/h2/src/main/org/h2/engine/Procedure.java +++ b/h2/src/main/org/h2/engine/Procedure.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; diff --git a/h2/src/main/org/h2/engine/QueryStatisticsData.java b/h2/src/main/org/h2/engine/QueryStatisticsData.java index f537745ef0..9d805e8a5f 100644 --- a/h2/src/main/org/h2/engine/QueryStatisticsData.java +++ b/h2/src/main/org/h2/engine/QueryStatisticsData.java @@ -1,12 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; @@ -19,57 +18,55 @@ */ public class QueryStatisticsData { - private static final int MAX_QUERY_ENTRIES = 100; - private static final Comparator QUERY_ENTRY_COMPARATOR = - new Comparator() { - @Override - public int compare(QueryEntry o1, QueryEntry o2) { - return (int) Math.signum(o1.lastUpdateTime - o2.lastUpdateTime); - } - }; + Comparator.comparingLong(q -> q.lastUpdateTime); + + private final HashMap map = new HashMap<>(); + + private int maxQueryEntries; + + public QueryStatisticsData(int maxQueryEntries) { + this.maxQueryEntries = maxQueryEntries; + } - private final HashMap map = - new HashMap(); + public synchronized void setMaxQueryEntries(int maxQueryEntries) { + this.maxQueryEntries = maxQueryEntries; + } public synchronized List getQueries() { // return a copy of the map so we don't have to // worry about external synchronization - ArrayList list = new ArrayList(); - list.addAll(map.values()); + ArrayList list = new ArrayList<>(map.values()); // only return the newest 100 entries - Collections.sort(list, QUERY_ENTRY_COMPARATOR); - return list.subList(0, Math.min(list.size(), MAX_QUERY_ENTRIES)); + list.sort(QUERY_ENTRY_COMPARATOR); + return list.subList(0, Math.min(list.size(), maxQueryEntries)); } /** * Update query statistics. * * @param sqlStatement the statement being executed - * @param executionTime the time in milliseconds the query/update took to - * execute + * @param executionTimeNanos the time in nanoseconds the query/update took + * to execute * @param rowCount the query or update row count */ - public synchronized void update(String sqlStatement, long executionTime, - int rowCount) { + public synchronized void update(String sqlStatement, long executionTimeNanos, long rowCount) { QueryEntry entry = map.get(sqlStatement); if (entry == null) { - entry = new QueryEntry(); - entry.sqlStatement = sqlStatement; + entry = new QueryEntry(sqlStatement); map.put(sqlStatement, entry); } - entry.update(executionTime, rowCount); + entry.update(executionTimeNanos, rowCount); // Age-out the oldest entries if the map gets too big. // Test against 1.5 x max-size so we don't do this too often - if (map.size() > MAX_QUERY_ENTRIES * 1.5f) { + if (map.size() > maxQueryEntries * 1.5f) { // Sort the entries by age - ArrayList list = new ArrayList(); - list.addAll(map.values()); - Collections.sort(list, QUERY_ENTRY_COMPARATOR); + ArrayList list = new ArrayList<>(map.values()); + list.sort(QUERY_ENTRY_COMPARATOR); // Create a set of the oldest 1/3 of the entries HashSet oldestSet = - new HashSet(list.subList(0, list.size() / 3)); + new HashSet<>(list.subList(0, list.size() / 3)); // Loop over the map using the set and remove // the oldest 1/3 of the entries. for (Iterator> it = @@ -90,7 +87,7 @@ public static final class QueryEntry { /** * The SQL statement. */ - public String sqlStatement; + public final String sqlStatement; /** * The number of times the statement was executed. @@ -104,29 +101,29 @@ public static final class QueryEntry { public long lastUpdateTime; /** - * The minimum execution time, in milliseconds. + * The minimum execution time, in nanoseconds. */ - public long executionTimeMin; + public long executionTimeMinNanos; /** - * The maximum execution time, in milliseconds. + * The maximum execution time, in nanoseconds. */ - public long executionTimeMax; + public long executionTimeMaxNanos; /** * The total execution time. */ - public long executionTimeCumulative; + public long executionTimeCumulativeNanos; /** * The minimum number of rows. */ - public int rowCountMin; + public long rowCountMin; /** * The maximum number of rows. */ - public int rowCountMax; + public long rowCountMax; /** * The total number of rows. @@ -136,7 +133,7 @@ public static final class QueryEntry { /** * The mean execution time. */ - public double executionTimeMean; + public double executionTimeMeanNanos; /** * The mean number of rows. @@ -144,42 +141,45 @@ public static final class QueryEntry { public double rowCountMean; // Using Welford's method, see also - // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance - // http://www.johndcook.com/standard_deviation.html + // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + // https://www.johndcook.com/blog/standard_deviation/ - private double executionTimeM2; + private double executionTimeM2Nanos; private double rowCountM2; + public QueryEntry(String sql) { + this.sqlStatement = sql; + } + /** * Update the statistics entry. * - * @param time the execution time + * @param timeNanos the execution time in nanos * @param rows the number of rows */ - void update(long time, int rows) { + void update(long timeNanos, long rows) { count++; - executionTimeMin = Math.min(time, executionTimeMin); - executionTimeMax = Math.max(time, executionTimeMax); + executionTimeMinNanos = Math.min(timeNanos, executionTimeMinNanos); + executionTimeMaxNanos = Math.max(timeNanos, executionTimeMaxNanos); rowCountMin = Math.min(rows, rowCountMin); rowCountMax = Math.max(rows, rowCountMax); - double delta = rows - rowCountMean; - rowCountMean += delta / count; - rowCountM2 += delta * (rows - rowCountMean); + double rowDelta = rows - rowCountMean; + rowCountMean += rowDelta / count; + rowCountM2 += rowDelta * (rows - rowCountMean); - delta = time - executionTimeMean; - executionTimeMean += delta / count; - executionTimeM2 += delta * (time - executionTimeMean); + double timeDelta = timeNanos - executionTimeMeanNanos; + executionTimeMeanNanos += timeDelta / count; + executionTimeM2Nanos += timeDelta * (timeNanos - executionTimeMeanNanos); - executionTimeCumulative += time; + executionTimeCumulativeNanos += timeNanos; rowCountCumulative += rows; lastUpdateTime = System.currentTimeMillis(); - } public double getExecutionTimeStandardDeviation() { // population standard deviation - return Math.sqrt(executionTimeM2 / count); + return Math.sqrt(executionTimeM2Nanos / count); } public double getRowCountStandardDeviation() { diff --git a/h2/src/main/org/h2/engine/Right.java b/h2/src/main/org/h2/engine/Right.java index b55d0ebf3b..3f171b7559 100644 --- a/h2/src/main/org/h2/engine/Right.java +++ b/h2/src/main/org/h2/engine/Right.java @@ -1,19 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import org.h2.message.DbException; import org.h2.message.Trace; +import org.h2.schema.Schema; import org.h2.table.Table; /** * An access right. Rights are regular database objects, but have generated * names. */ -public class Right extends DbObjectBase { +public final class Right extends DbObject { /** * The right bit mask that means: selecting from a table is allowed. @@ -40,33 +41,52 @@ public class Right extends DbObjectBase { */ public static final int ALTER_ANY_SCHEMA = 16; + /** + * The right bit mask that means: user is a schema owner. This mask isn't + * used in GRANT / REVOKE statements. + */ + public static final int SCHEMA_OWNER = 32; + /** * The right bit mask that means: select, insert, update, delete, and update * for this object is allowed. */ public static final int ALL = SELECT | DELETE | INSERT | UPDATE; + /** + * To whom the right is granted. + */ + private RightOwner grantee; + + /** + * The granted role, or null if a right was granted. + */ private Role grantedRole; + + /** + * The granted right. + */ private int grantedRight; - private Table grantedTable; - private RightOwner grantee; + + /** + * The object. If the right is global, this is null. + */ + private DbObject grantedObject; public Right(Database db, int id, RightOwner grantee, Role grantedRole) { - initDbObjectBase(db, id, "RIGHT_" + id, Trace.USER); + super(db, id, "RIGHT_" + id, Trace.USER); this.grantee = grantee; this.grantedRole = grantedRole; } - public Right(Database db, int id, RightOwner grantee, int grantedRight, - Table grantedRightOnTable) { - initDbObjectBase(db, id, "" + id, Trace.USER); + public Right(Database db, int id, RightOwner grantee, int grantedRight, DbObject grantedObject) { + super(db, id, Integer.toString(id), Trace.USER); this.grantee = grantee; this.grantedRight = grantedRight; - this.grantedTable = grantedRightOnTable; + this.grantedObject = grantedObject; } - private static boolean appendRight(StringBuilder buff, int right, int mask, - String name, boolean comma) { + private static boolean appendRight(StringBuilder buff, int right, int mask, String name, boolean comma) { if ((right & mask) != 0) { if (comma) { buff.append(", "); @@ -86,9 +106,8 @@ public String getRights() { comma = appendRight(buff, grantedRight, SELECT, "SELECT", comma); comma = appendRight(buff, grantedRight, DELETE, "DELETE", comma); comma = appendRight(buff, grantedRight, INSERT, "INSERT", comma); - comma = appendRight(buff, grantedRight, ALTER_ANY_SCHEMA, - "ALTER ANY SCHEMA", comma); - appendRight(buff, grantedRight, UPDATE, "UPDATE", comma); + comma = appendRight(buff, grantedRight, UPDATE, "UPDATE", comma); + appendRight(buff, grantedRight, ALTER_ANY_SCHEMA, "ALTER ANY SCHEMA", comma); } return buff.toString(); } @@ -97,8 +116,8 @@ public Role getGrantedRole() { return grantedRole; } - public Table getGrantedTable() { - return grantedTable; + public DbObject getGrantedObject() { + return grantedObject; } public DbObject getGrantee() { @@ -106,29 +125,35 @@ public DbObject getGrantee() { } @Override - public String getDropSQL() { - return null; + public String getCreateSQLForCopy(Table table, String quotedName) { + return getCreateSQLForCopy(table); } - @Override - public String getCreateSQLForCopy(Table table, String quotedName) { - StringBuilder buff = new StringBuilder(); - buff.append("GRANT "); + private String getCreateSQLForCopy(DbObject object) { + StringBuilder builder = new StringBuilder(); + builder.append("GRANT "); if (grantedRole != null) { - buff.append(grantedRole.getSQL()); + grantedRole.getSQL(builder, DEFAULT_SQL_FLAGS); } else { - buff.append(getRights()); - if (table != null) { - buff.append(" ON ").append(table.getSQL()); + builder.append(getRights()); + if (object != null) { + if (object instanceof Schema) { + builder.append(" ON SCHEMA "); + object.getSQL(builder, DEFAULT_SQL_FLAGS); + } else if (object instanceof Table) { + builder.append(" ON "); + object.getSQL(builder, DEFAULT_SQL_FLAGS); + } } } - buff.append(" TO ").append(grantee.getSQL()); - return buff.toString(); + builder.append(" TO "); + grantee.getSQL(builder, DEFAULT_SQL_FLAGS); + return builder.toString(); } @Override public String getCreateSQL() { - return getCreateSQLForCopy(grantedTable, null); + return getCreateSQLForCopy(grantedObject); } @Override @@ -137,22 +162,22 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { - if (grantedTable != null) { - grantee.revokeRight(grantedTable); - } else { + public void removeChildrenAndResources(SessionLocal session) { + if (grantedRole != null) { grantee.revokeRole(grantedRole); + } else { + grantee.revokeRight(grantedObject); } database.removeMeta(session, getId()); grantedRole = null; - grantedTable = null; + grantedObject = null; grantee = null; invalidate(); } @Override public void checkRename() { - DbException.throwInternalError(); + throw DbException.getInternalError(); } public void setRightMask(int rightMask) { diff --git a/h2/src/main/org/h2/engine/RightOwner.java b/h2/src/main/org/h2/engine/RightOwner.java index 4687ad4f23..bcd5e0ebfc 100644 --- a/h2/src/main/org/h2/engine/RightOwner.java +++ b/h2/src/main/org/h2/engine/RightOwner.java @@ -1,19 +1,25 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; +import org.h2.api.ErrorCode; +import org.h2.message.DbException; +import org.h2.schema.Schema; import org.h2.table.Table; -import org.h2.util.New; +import org.h2.util.StringUtils; /** * A right owner (sometimes called principal). */ -public abstract class RightOwner extends DbObjectBase { +public abstract class RightOwner extends DbObject { /** * The map of granted roles. @@ -23,11 +29,15 @@ public abstract class RightOwner extends DbObjectBase { /** * The map of granted rights. */ - private HashMap grantedRights; + private HashMap grantedRights; - protected RightOwner(Database database, int id, String name, - String traceModule) { - initDbObjectBase(database, id, name, traceModule); + protected RightOwner(Database database, int id, String name, int traceModuleId) { + super(database, id, StringUtils.toUpperEnglish(name), traceModuleId); + } + + @Override + public void rename(String newName) { + super.rename(StringUtils.toUpperEnglish(newName)); } /** @@ -54,26 +64,69 @@ public boolean isRoleGranted(Role grantedRole) { } /** - * Check if a right is already granted to this object or to objects that - * were granted to this object. + * Checks if a right is already granted to this object or to objects that + * were granted to this object. The rights of schemas will be valid for + * every each table in the related schema. The ALTER ANY SCHEMA right gives + * all rights to all tables. * - * @param table the table to check - * @param rightMask the right mask to check + * @param table + * the table to check + * @param rightMask + * the right mask to check * @return true if the right was already granted */ - boolean isRightGrantedRecursive(Table table, int rightMask) { - Right right; + final boolean isTableRightGrantedRecursive(Table table, int rightMask) { + Schema schema = table.getSchema(); + if (schema.getOwner() == this) { + return true; + } if (grantedRights != null) { + Right right = grantedRights.get(null); + if (right != null && (right.getRightMask() & Right.ALTER_ANY_SCHEMA) == Right.ALTER_ANY_SCHEMA) { + return true; + } + right = grantedRights.get(schema); + if (right != null && (right.getRightMask() & rightMask) == rightMask) { + return true; + } right = grantedRights.get(table); - if (right != null) { - if ((right.getRightMask() & rightMask) == rightMask) { + if (right != null && (right.getRightMask() & rightMask) == rightMask) { + return true; + } + } + if (grantedRoles != null) { + for (Role role : grantedRoles.keySet()) { + if (role.isTableRightGrantedRecursive(table, rightMask)) { return true; } } } + return false; + } + + /** + * Checks if a schema owner right is already granted to this object or to + * objects that were granted to this object. The ALTER ANY SCHEMA right + * gives rights to all schemas. + * + * @param schema + * the schema to check, or {@code null} to check for ALTER ANY + * SCHEMA right only + * @return true if the right was already granted + */ + final boolean isSchemaRightGrantedRecursive(Schema schema) { + if (schema != null && schema.getOwner() == this) { + return true; + } + if (grantedRights != null) { + Right right = grantedRights.get(null); + if (right != null && (right.getRightMask() & Right.ALTER_ANY_SCHEMA) == Right.ALTER_ANY_SCHEMA) { + return true; + } + } if (grantedRoles != null) { - for (RightOwner role : grantedRoles.keySet()) { - if (role.isRightGrantedRecursive(table, rightMask)) { + for (Role role : grantedRoles.keySet()) { + if (role.isSchemaRightGrantedRecursive(schema)) { return true; } } @@ -85,26 +138,26 @@ boolean isRightGrantedRecursive(Table table, int rightMask) { * Grant a right for the given table. Only one right object per table is * supported. * - * @param table the table + * @param object the object (table or schema) * @param right the right */ - public void grantRight(Table table, Right right) { + public void grantRight(DbObject object, Right right) { if (grantedRights == null) { - grantedRights = New.hashMap(); + grantedRights = new HashMap<>(); } - grantedRights.put(table, right); + grantedRights.put(object, right); } /** - * Revoke the right for the given table. + * Revoke the right for the given object (table or schema). * - * @param table the table + * @param object the object */ - void revokeRight(Table table) { + void revokeRight(DbObject object) { if (grantedRights == null) { return; } - grantedRights.remove(table); + grantedRights.remove(object); if (grantedRights.size() == 0) { grantedRights = null; } @@ -118,7 +171,7 @@ void revokeRight(Table table) { */ public void grantRole(Role role, Right right) { if (grantedRoles == null) { - grantedRoles = New.hashMap(); + grantedRoles = new HashMap<>(); } grantedRoles.put(role, right); } @@ -143,16 +196,36 @@ void revokeRole(Role role) { } /** - * Get the 'grant table' right of this object. + * Remove all the temporary rights granted on roles + */ + public void revokeTemporaryRightsOnRoles() { + if (grantedRoles == null) { + return; + } + List rolesToRemove= new ArrayList<>(); + for (Entry currentEntry : grantedRoles.entrySet()) { + if ( currentEntry.getValue().isTemporary() || !currentEntry.getValue().isValid()) { + rolesToRemove.add(currentEntry.getKey()); + } + } + for (Role currentRoleToRemove : rolesToRemove) { + revokeRole(currentRoleToRemove); + } + } + + + + /** + * Get the 'grant schema' right of this object. * - * @param table the granted table + * @param object the granted object (table or schema) * @return the right or null if the right has not been granted */ - public Right getRightForTable(Table table) { + public Right getRightForObject(DbObject object) { if (grantedRights == null) { return null; } - return grantedRights.get(table); + return grantedRights.get(object); } /** @@ -168,4 +241,19 @@ public Right getRightForRole(Role role) { return grantedRoles.get(role); } + /** + * Check that this right owner does not own any schema. An exception is + * thrown if it owns one or more schemas. + * + * @throws DbException + * if this right owner owns a schema + */ + public final void checkOwnsNoSchemas() { + for (Schema s : database.getAllSchemas()) { + if (this == s.getOwner()) { + throw DbException.get(ErrorCode.CANNOT_DROP_2, getName(), s.getName()); + } + } + } + } diff --git a/h2/src/main/org/h2/engine/Role.java b/h2/src/main/org/h2/engine/Role.java index 561dd3b7b2..7fec06ca11 100644 --- a/h2/src/main/org/h2/engine/Role.java +++ b/h2/src/main/org/h2/engine/Role.java @@ -1,18 +1,21 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; +import java.util.ArrayList; + import org.h2.message.DbException; import org.h2.message.Trace; +import org.h2.schema.Schema; import org.h2.table.Table; /** * Represents a role. Roles can be granted to users, and to other roles. */ -public class Role extends RightOwner { +public final class Role extends RightOwner { private final boolean system; @@ -23,12 +26,7 @@ public Role(Database database, int id, String roleName, boolean system) { @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(); - } - - @Override - public String getDropSQL() { - return null; + throw DbException.getInternalError(toString()); } /** @@ -41,12 +39,11 @@ public String getCreateSQL(boolean ifNotExists) { if (system) { return null; } - StringBuilder buff = new StringBuilder("CREATE ROLE "); + StringBuilder builder = new StringBuilder("CREATE ROLE "); if (ifNotExists) { - buff.append("IF NOT EXISTS "); + builder.append("IF NOT EXISTS "); } - buff.append(getSQL()); - return buff.toString(); + return getSQL(builder, DEFAULT_SQL_FLAGS).toString(); } @Override @@ -60,15 +57,20 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { - for (User user : database.getAllUsers()) { - Right right = user.getRightForRole(this); - if (right != null) { - database.removeDatabaseObject(session, right); + public ArrayList getChildren() { + ArrayList children = new ArrayList<>(); + for (Schema schema : database.getAllSchemas()) { + if (schema.getOwner() == this) { + children.add(schema); } } - for (Role r2 : database.getAllRoles()) { - Right right = r2.getRightForRole(this); + return children; + } + + @Override + public void removeChildrenAndResources(SessionLocal session) { + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + Right right = rightOwner.getRightForRole(this); if (right != null) { database.removeDatabaseObject(session, right); } @@ -82,9 +84,4 @@ public void removeChildrenAndResources(Session session) { invalidate(); } - @Override - public void checkRename() { - // ok - } - } diff --git a/h2/src/main/org/h2/engine/Session.java b/h2/src/main/org/h2/engine/Session.java index 417eff89a9..654458ceee 100644 --- a/h2/src/main/org/h2/engine/Session.java +++ b/h2/src/main/org/h2/engine/Session.java @@ -1,1535 +1,310 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.Random; -import org.h2.api.ErrorCode; -import org.h2.command.Command; import org.h2.command.CommandInterface; -import org.h2.command.Parser; -import org.h2.command.Prepared; -import org.h2.command.dml.SetTypes; -import org.h2.constraint.Constraint; -import org.h2.index.Index; -import org.h2.jdbc.JdbcConnection; -import org.h2.message.DbException; +import org.h2.jdbc.meta.DatabaseMeta; import org.h2.message.Trace; -import org.h2.message.TraceSystem; -import org.h2.mvstore.db.MVTable; -import org.h2.mvstore.db.TransactionStore.Change; -import org.h2.mvstore.db.TransactionStore.Transaction; -import org.h2.result.LocalResult; -import org.h2.result.Row; -import org.h2.schema.Schema; +import org.h2.result.ResultInterface; import org.h2.store.DataHandler; -import org.h2.store.InDoubtTransaction; -import org.h2.store.LobStorageFrontend; -import org.h2.table.Table; -import org.h2.util.New; -import org.h2.util.SmallLRUCache; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; -import org.h2.value.ValueString; +import org.h2.util.NetworkConnectionInfo; +import org.h2.util.TimeZoneProvider; +import org.h2.util.Utils; +import org.h2.value.ValueLob; /** - * A session represents an embedded database connection. When using the server - * mode, this object resides on the server side and communicates with a - * SessionRemote object on the client side. + * A local or remote session. A session represents a database connection. */ -public class Session extends SessionWithState { +public abstract class Session implements CastDataProvider, AutoCloseable { /** - * This special log position means that the log entry has been written. + * Static settings. */ - public static final int LOG_WRITTEN = -1; + public static final class StaticSettings { - /** - * The prefix of generated identifiers. It may not have letters, because - * they are case sensitive. - */ - private static final String SYSTEM_IDENTIFIER_PREFIX = "_"; - private static int nextSerialId; - - private final int serialId = nextSerialId++; - private final Database database; - private ConnectionInfo connectionInfo; - private final User user; - private final int id; - private final ArrayList
    locks = New.arrayList(); - private final UndoLog undoLog; - private boolean autoCommit = true; - private Random random; - private int lockTimeout; - private Value lastIdentity = ValueLong.get(0); - private Value lastScopeIdentity = ValueLong.get(0); - private int firstUncommittedLog = Session.LOG_WRITTEN; - private int firstUncommittedPos = Session.LOG_WRITTEN; - private HashMap savepoints; - private HashMap localTempTables; - private HashMap localTempTableIndexes; - private HashMap localTempTableConstraints; - private int throttle; - private long lastThrottle; - private Command currentCommand; - private boolean allowLiterals; - private String currentSchemaName; - private String[] schemaSearchPath; - private Trace trace; - private HashMap unlinkLobMap; - private int systemIdentifier; - private HashMap procedures; - private boolean undoLogEnabled = true; - private boolean redoLogBinary = true; - private boolean autoCommitAtTransactionEnd; - private String currentTransactionName; - private volatile long cancelAt; - private boolean closed; - private final long sessionStart = System.currentTimeMillis(); - private long transactionStart; - private long currentCommandStart; - private HashMap variables; - private HashSet temporaryResults; - private int queryTimeout; - private boolean commitOrRollbackDisabled; - private Table waitForLock; - private Thread waitForLockThread; - private int modificationId; - private int objectId; - private final int queryCacheSize; - private SmallLRUCache queryCache; - private long modificationMetaID = -1; - - /** - * Temporary LOBs from result sets. Those are kept for some time. The - * problem is that transactions are committed before the result is returned, - * and in some cases the next transaction is already started before the - * result is read (for example when using the server mode, when accessing - * metadata methods). We can't simply free those values up when starting the - * next transaction, because they would be removed too early. - */ - private LinkedList temporaryResultLobs; - - /** - * The temporary LOBs that need to be removed on commit. - */ - private ArrayList temporaryLobs; - - private Transaction transaction; - private long startStatement = -1; - - public Session(Database database, User user, int id) { - this.database = database; - this.queryTimeout = database.getSettings().maxQueryTimeout; - this.queryCacheSize = database.getSettings().queryCacheSize; - this.undoLog = new UndoLog(this); - this.user = user; - this.id = id; - Setting setting = database.findSetting( - SetTypes.getTypeName(SetTypes.DEFAULT_LOCK_TIMEOUT)); - this.lockTimeout = setting == null ? - Constants.INITIAL_LOCK_TIMEOUT : setting.getIntValue(); - this.currentSchemaName = Constants.SCHEMA_MAIN; - } - - @Override - public ArrayList getClusterServers() { - return new ArrayList(); - } - - public boolean setCommitOrRollbackDisabled(boolean x) { - boolean old = commitOrRollbackDisabled; - commitOrRollbackDisabled = x; - return old; - } - - private void initVariables() { - if (variables == null) { - variables = database.newStringMap(); - } - } - - /** - * Set the value of the given variable for this session. - * - * @param name the name of the variable (may not be null) - * @param value the new value (may not be null) - */ - public void setVariable(String name, Value value) { - initVariables(); - modificationId++; - Value old; - if (value == ValueNull.INSTANCE) { - old = variables.remove(name); - } else { - // link LOB values, to make sure we have our own object - value = value.link(database, - LobStorageFrontend.TABLE_ID_SESSION_VARIABLE); - old = variables.put(name, value); - } - if (old != null) { - // close the old value (in case it is a lob) - old.unlink(database); - old.close(); - } - } - - /** - * Get the value of the specified user defined variable. This method always - * returns a value; it returns ValueNull.INSTANCE if the variable doesn't - * exist. - * - * @param name the variable name - * @return the value, or NULL - */ - public Value getVariable(String name) { - initVariables(); - Value v = variables.get(name); - return v == null ? ValueNull.INSTANCE : v; - } - - /** - * Get the list of variable names that are set for this session. - * - * @return the list of names - */ - public String[] getVariableNames() { - if (variables == null) { - return new String[0]; - } - String[] list = new String[variables.size()]; - variables.keySet().toArray(list); - return list; - } - - /** - * Get the local temporary table if one exists with that name, or null if - * not. - * - * @param name the table name - * @return the table, or null - */ - public Table findLocalTempTable(String name) { - if (localTempTables == null) { - return null; - } - return localTempTables.get(name); - } - - public ArrayList
    getLocalTempTables() { - if (localTempTables == null) { - return New.arrayList(); - } - return New.arrayList(localTempTables.values()); - } - - /** - * Add a local temporary table to this session. - * - * @param table the table to add - * @throws DbException if a table with this name already exists - */ - public void addLocalTempTable(Table table) { - if (localTempTables == null) { - localTempTables = database.newStringMap(); - } - if (localTempTables.get(table.getName()) != null) { - throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, - table.getSQL()); - } - modificationId++; - localTempTables.put(table.getName(), table); - } - - /** - * Drop and remove the given local temporary table from this session. - * - * @param table the table - */ - public void removeLocalTempTable(Table table) { - modificationId++; - localTempTables.remove(table.getName()); - synchronized (database) { - table.removeChildrenAndResources(this); - } - } + /** + * Whether unquoted identifiers are converted to upper case. + */ + public final boolean databaseToUpper; - /** - * Get the local temporary index if one exists with that name, or null if - * not. - * - * @param name the table name - * @return the table, or null - */ - public Index findLocalTempTableIndex(String name) { - if (localTempTableIndexes == null) { - return null; - } - return localTempTableIndexes.get(name); - } + /** + * Whether unquoted identifiers are converted to lower case. + */ + public final boolean databaseToLower; - public HashMap getLocalTempTableIndexes() { - if (localTempTableIndexes == null) { - return New.hashMap(); - } - return localTempTableIndexes; - } + /** + * Whether all identifiers are case insensitive. + */ + public final boolean caseInsensitiveIdentifiers; - /** - * Add a local temporary index to this session. - * - * @param index the index to add - * @throws DbException if a index with this name already exists - */ - public void addLocalTempTableIndex(Index index) { - if (localTempTableIndexes == null) { - localTempTableIndexes = database.newStringMap(); - } - if (localTempTableIndexes.get(index.getName()) != null) { - throw DbException.get(ErrorCode.INDEX_ALREADY_EXISTS_1, - index.getSQL()); + /** + * Creates new instance of static settings. + * + * @param databaseToUpper + * whether unquoted identifiers are converted to upper case + * @param databaseToLower + * whether unquoted identifiers are converted to lower case + * @param caseInsensitiveIdentifiers + * whether all identifiers are case insensitive + */ + public StaticSettings(boolean databaseToUpper, boolean databaseToLower, boolean caseInsensitiveIdentifiers) { + this.databaseToUpper = databaseToUpper; + this.databaseToLower = databaseToLower; + this.caseInsensitiveIdentifiers = caseInsensitiveIdentifiers; } - localTempTableIndexes.put(index.getName(), index); - } - /** - * Drop and remove the given local temporary index from this session. - * - * @param index the index - */ - public void removeLocalTempTableIndex(Index index) { - if (localTempTableIndexes != null) { - localTempTableIndexes.remove(index.getName()); - synchronized (database) { - index.removeChildrenAndResources(this); - } - } } /** - * Get the local temporary constraint if one exists with that name, or - * null if not. - * - * @param name the constraint name - * @return the constraint, or null + * Dynamic settings. */ - public Constraint findLocalTempTableConstraint(String name) { - if (localTempTableConstraints == null) { - return null; - } - return localTempTableConstraints.get(name); - } + public static final class DynamicSettings { - /** - * Get the map of constraints for all constraints on local, temporary - * tables, if any. The map's keys are the constraints' names. - * - * @return the map of constraints, or null - */ - public HashMap getLocalTempTableConstraints() { - if (localTempTableConstraints == null) { - return New.hashMap(); - } - return localTempTableConstraints; - } + /** + * The database mode. + */ + public final Mode mode; - /** - * Add a local temporary constraint to this session. - * - * @param constraint the constraint to add - * @throws DbException if a constraint with the same name already exists - */ - public void addLocalTempTableConstraint(Constraint constraint) { - if (localTempTableConstraints == null) { - localTempTableConstraints = database.newStringMap(); - } - String name = constraint.getName(); - if (localTempTableConstraints.get(name) != null) { - throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, - constraint.getSQL()); - } - localTempTableConstraints.put(name, constraint); - } + /** + * The current time zone. + */ + public final TimeZoneProvider timeZone; - /** - * Drop and remove the given local temporary constraint from this session. - * - * @param constraint the constraint - */ - void removeLocalTempTableConstraint(Constraint constraint) { - if (localTempTableConstraints != null) { - localTempTableConstraints.remove(constraint.getName()); - synchronized (database) { - constraint.removeChildrenAndResources(this); - } + /** + * Creates new instance of dynamic settings. + * + * @param mode + * the database mode + * @param timeZone + * the current time zone + */ + public DynamicSettings(Mode mode, TimeZoneProvider timeZone) { + this.mode = mode; + this.timeZone = timeZone; } - } - @Override - public boolean getAutoCommit() { - return autoCommit; } - public User getUser() { - return user; - } + private ArrayList sessionState; - @Override - public void setAutoCommit(boolean b) { - autoCommit = b; - } + boolean sessionStateChanged; - public int getLockTimeout() { - return lockTimeout; - } + private boolean sessionStateUpdating; - public void setLockTimeout(int lockTimeout) { - this.lockTimeout = lockTimeout; - } + volatile StaticSettings staticSettings; - @Override - public synchronized CommandInterface prepareCommand(String sql, - int fetchSize) { - return prepareLocal(sql); + Session() { } /** - * Parse and prepare the given SQL statement. This method also checks the - * rights. + * Get the list of the cluster servers for this session. * - * @param sql the SQL statement - * @return the prepared statement + * @return A list of "ip:port" strings for the cluster servers in this + * session. */ - public Prepared prepare(String sql) { - return prepare(sql, false); - } + public abstract ArrayList getClusterServers(); /** - * Parse and prepare the given SQL statement. + * Parse a command and prepare it for execution. * * @param sql the SQL statement - * @param rightsChecked true if the rights have already been checked - * @return the prepared statement + * @param fetchSize the number of rows to fetch in one step + * @return the prepared command */ - public Prepared prepare(String sql, boolean rightsChecked) { - Parser parser = new Parser(this); - parser.setRightsChecked(rightsChecked); - return parser.prepare(sql); - } + public abstract CommandInterface prepareCommand(String sql, int fetchSize); /** - * Parse and prepare the given SQL statement. - * This method also checks if the connection has been closed. - * - * @param sql the SQL statement - * @return the prepared statement + * Roll back pending transactions and close the session. */ - public Command prepareLocal(String sql) { - if (closed) { - throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, - "session closed"); - } - Command command; - if (queryCacheSize > 0) { - if (queryCache == null) { - queryCache = SmallLRUCache.newInstance(queryCacheSize); - modificationMetaID = database.getModificationMetaId(); - } else { - long newModificationMetaID = database.getModificationMetaId(); - if (newModificationMetaID != modificationMetaID) { - queryCache.clear(); - modificationMetaID = newModificationMetaID; - } - command = queryCache.get(sql); - if (command != null && command.canReuse()) { - command.reuse(); - return command; - } - } - } - Parser parser = new Parser(this); - command = parser.prepareCommand(sql); - if (queryCache != null) { - if (command.isCacheable()) { - queryCache.put(sql, command); - } - } - return command; - } - - public Database getDatabase() { - return database; - } - - @Override - public int getPowerOffCount() { - return database.getPowerOffCount(); - } - @Override - public void setPowerOffCount(int count) { - database.setPowerOffCount(count); - } + public abstract void close(); /** - * Commit the current transaction. If the statement was not a data - * definition statement, and if there are temporary tables that should be - * dropped or truncated at commit, this is done as well. + * Get the trace object * - * @param ddl if the statement was a data definition statement + * @return the trace object */ - public void commit(boolean ddl) { - checkCommitRollback(); - currentTransactionName = null; - transactionStart = 0; - if (transaction != null) { - // increment the data mod count, so that other sessions - // see the changes - // TODO should not rely on locking - if (locks.size() > 0) { - for (int i = 0, size = locks.size(); i < size; i++) { - Table t = locks.get(i); - if (t instanceof MVTable) { - ((MVTable) t).commit(); - } - } - } - transaction.commit(); - transaction = null; - } - if (containsUncommitted()) { - // need to commit even if rollback is not possible - // (create/drop table and so on) - database.commit(this); - } - removeTemporaryLobs(true); - if (undoLog.size() > 0) { - // commit the rows when using MVCC - if (database.isMultiVersion()) { - ArrayList rows = New.arrayList(); - synchronized (database) { - while (undoLog.size() > 0) { - UndoLogRecord entry = undoLog.getLast(); - entry.commit(); - rows.add(entry.getRow()); - undoLog.removeLast(false); - } - for (int i = 0, size = rows.size(); i < size; i++) { - Row r = rows.get(i); - r.commit(); - } - } - } - undoLog.clear(); - } - if (!ddl) { - // do not clean the temp tables if the last command was a - // create/drop - cleanTempTables(false); - if (autoCommitAtTransactionEnd) { - autoCommit = true; - autoCommitAtTransactionEnd = false; - } - } - endTransaction(); - } - - private void removeTemporaryLobs(boolean onTimeout) { - if (temporaryLobs != null) { - for (Value v : temporaryLobs) { - if (!v.isLinked()) { - v.close(); - } - } - temporaryLobs.clear(); - } - if (temporaryResultLobs != null && temporaryResultLobs.size() > 0) { - long keepYoungerThan = System.currentTimeMillis() - - database.getSettings().lobTimeout; - while (temporaryResultLobs.size() > 0) { - TimeoutValue tv = temporaryResultLobs.getFirst(); - if (onTimeout && tv.created >= keepYoungerThan) { - break; - } - Value v = temporaryResultLobs.removeFirst().value; - if (!v.isLinked()) { - v.close(); - } - } - } - } - - private void checkCommitRollback() { - if (commitOrRollbackDisabled && locks.size() > 0) { - throw DbException.get(ErrorCode.COMMIT_ROLLBACK_NOT_ALLOWED); - } - } - - private void endTransaction() { - if (unlinkLobMap != null && unlinkLobMap.size() > 0) { - if (database.getMvStore() == null) { - // need to flush the transaction log, because we can't unlink - // lobs if the commit record is not written - database.flush(); - } - for (Value v : unlinkLobMap.values()) { - v.unlink(database); - v.close(); - } - unlinkLobMap = null; - } - unlockAll(); - } - - /** - * Fully roll back the current transaction. - */ - public void rollback() { - checkCommitRollback(); - currentTransactionName = null; - boolean needCommit = false; - if (undoLog.size() > 0) { - rollbackTo(null, false); - needCommit = true; - } - if (transaction != null) { - rollbackTo(null, false); - needCommit = true; - // rollback stored the undo operations in the transaction - // committing will end the transaction - transaction.commit(); - transaction = null; - } - if (locks.size() > 0 || needCommit) { - database.commit(this); - } - cleanTempTables(false); - if (autoCommitAtTransactionEnd) { - autoCommit = true; - autoCommitAtTransactionEnd = false; - } - endTransaction(); - } + public abstract Trace getTrace(); /** - * Partially roll back the current transaction. + * Check if close was called. * - * @param savepoint the savepoint to which should be rolled back - * @param trimToSize if the list should be trimmed + * @return if the session has been closed */ - public void rollbackTo(Savepoint savepoint, boolean trimToSize) { - int index = savepoint == null ? 0 : savepoint.logIndex; - while (undoLog.size() > index) { - UndoLogRecord entry = undoLog.getLast(); - entry.undo(this); - undoLog.removeLast(trimToSize); - } - if (transaction != null) { - long savepointId = savepoint == null ? 0 : savepoint.transactionSavepoint; - HashMap tableMap = - database.getMvStore().getTables(); - Iterator it = transaction.getChanges(savepointId); - while (it.hasNext()) { - Change c = it.next(); - MVTable t = tableMap.get(c.mapName); - if (t != null) { - long key = ((ValueLong) c.key).getLong(); - ValueArray value = (ValueArray) c.value; - short op; - Row row; - if (value == null) { - op = UndoLogRecord.INSERT; - row = t.getRow(this, key); - } else { - op = UndoLogRecord.DELETE; - row = new Row(value.getList(), Row.MEMORY_CALCULATE); - } - row.setKey(key); - UndoLogRecord log = new UndoLogRecord(t, op, row); - log.undo(this); - } - } - } - if (savepoints != null) { - String[] names = new String[savepoints.size()]; - savepoints.keySet().toArray(names); - for (String name : names) { - Savepoint sp = savepoints.get(name); - int savepointIndex = sp.logIndex; - if (savepointIndex > index) { - savepoints.remove(name); - } - } - } - } - - @Override - public boolean hasPendingTransaction() { - return undoLog.size() > 0; - } + public abstract boolean isClosed(); /** - * Create a savepoint to allow rolling back to this state. + * Get the data handler object. * - * @return the savepoint + * @return the data handler */ - public Savepoint setSavepoint() { - Savepoint sp = new Savepoint(); - sp.logIndex = undoLog.size(); - if (database.getMvStore() != null) { - sp.transactionSavepoint = getStatementSavepoint(); - } - return sp; - } - - public int getId() { - return id; - } - - @Override - public void cancel() { - cancelAt = System.currentTimeMillis(); - } - - @Override - public void close() { - if (!closed) { - try { - database.checkPowerOff(); - removeTemporaryLobs(false); - cleanTempTables(true); - undoLog.clear(); - database.removeSession(this); - } finally { - closed = true; - } - } - } - - /** - * Add a lock for the given table. The object is unlocked on commit or - * rollback. - * - * @param table the table that is locked - */ - public void addLock(Table table) { - if (SysProperties.CHECK) { - if (locks.contains(table)) { - DbException.throwInternalError(); - } - } - locks.add(table); - } + public abstract DataHandler getDataHandler(); /** - * Add an undo log entry to this session. + * Check whether this session has a pending transaction. * - * @param table the table - * @param operation the operation type (see {@link UndoLogRecord}) - * @param row the row + * @return true if it has */ - public void log(Table table, short operation, Row row) { - if (table.isMVStore()) { - return; - } - if (undoLogEnabled) { - UndoLogRecord log = new UndoLogRecord(table, operation, row); - // called _after_ the row was inserted successfully into the table, - // otherwise rollback will try to rollback a not-inserted row - if (SysProperties.CHECK) { - int lockMode = database.getLockMode(); - if (lockMode != Constants.LOCK_MODE_OFF && - !database.isMultiVersion()) { - String tableType = log.getTable().getTableType(); - if (locks.indexOf(log.getTable()) < 0 - && !Table.TABLE_LINK.equals(tableType) - && !Table.EXTERNAL_TABLE_ENGINE.equals(tableType)) { - DbException.throwInternalError(); - } - } - } - undoLog.add(log); - } else { - if (database.isMultiVersion()) { - // see also UndoLogRecord.commit - ArrayList indexes = table.getIndexes(); - for (int i = 0, size = indexes.size(); i < size; i++) { - Index index = indexes.get(i); - index.commit(operation, row); - } - row.commit(); - } - } - } + public abstract boolean hasPendingTransaction(); /** - * Unlock all read locks. This is done if the transaction isolation mode is - * READ_COMMITTED. + * Cancel the current or next command (called when closing a connection). */ - public void unlockReadLocks() { - if (database.isMultiVersion()) { - // MVCC: keep shared locks (insert / update / delete) - return; - } - // locks is modified in the loop - for (int i = 0; i < locks.size(); i++) { - Table t = locks.get(i); - if (!t.isLockedExclusively()) { - synchronized (database) { - t.unlock(this); - locks.remove(i); - } - i--; - } - } - } + public abstract void cancel(); /** - * Unlock just this table. + * Check if this session is in auto-commit mode. * - * @param t the table to unlock + * @return true if the session is in auto-commit mode */ - void unlock(Table t) { - locks.remove(t); - } - - private void unlockAll() { - if (SysProperties.CHECK) { - if (undoLog.size() > 0) { - DbException.throwInternalError(); - } - } - if (locks.size() > 0) { - // don't use the enhanced for loop to save memory - for (int i = 0, size = locks.size(); i < size; i++) { - Table t = locks.get(i); - t.unlock(this); - } - locks.clear(); - } - savepoints = null; - sessionStateChanged = true; - } - - private void cleanTempTables(boolean closeSession) { - if (localTempTables != null && localTempTables.size() > 0) { - synchronized (database) { - for (Table table : New.arrayList(localTempTables.values())) { - if (closeSession || table.getOnCommitDrop()) { - modificationId++; - table.setModified(); - localTempTables.remove(table.getName()); - table.removeChildrenAndResources(this); - if (closeSession) { - // need to commit, otherwise recovery might - // ignore the table removal - database.commit(this); - } - } else if (table.getOnCommitTruncate()) { - table.truncate(this); - } - } - } - } - } - - public Random getRandom() { - if (random == null) { - random = new Random(); - } - return random; - } - - @Override - public Trace getTrace() { - if (trace != null && !closed) { - return trace; - } - String traceModuleName = Trace.JDBC + "[" + id + "]"; - if (closed) { - return new TraceSystem(null).getTrace(traceModuleName); - } - trace = database.getTrace(traceModuleName); - return trace; - } - - public void setLastIdentity(Value last) { - this.lastIdentity = last; - this.lastScopeIdentity = last; - } - - public Value getLastIdentity() { - return lastIdentity; - } - - public void setLastScopeIdentity(Value last) { - this.lastScopeIdentity = last; - } - - public Value getLastScopeIdentity() { - return lastScopeIdentity; - } + public abstract boolean getAutoCommit(); /** - * Called when a log entry for this session is added. The session keeps - * track of the first entry in the transaction log that is not yet - * committed. + * Set the auto-commit mode. This call doesn't commit the current + * transaction. * - * @param logId the transaction log id - * @param pos the position of the log entry in the transaction log - */ - public void addLogPos(int logId, int pos) { - if (firstUncommittedLog == Session.LOG_WRITTEN) { - firstUncommittedLog = logId; - firstUncommittedPos = pos; - } - } - - public int getFirstUncommittedLog() { - return firstUncommittedLog; - } - - /** - * This method is called after the transaction log has written the commit - * entry for this session. + * @param autoCommit the new value */ - void setAllCommitted() { - firstUncommittedLog = Session.LOG_WRITTEN; - firstUncommittedPos = Session.LOG_WRITTEN; - } + public abstract void setAutoCommit(boolean autoCommit); /** - * Whether the session contains any uncommitted changes. + * Add a temporary LOB, which is closed when the session commits. * - * @return true if yes + * @param v the value + * @return the specified value */ - public boolean containsUncommitted() { - if (database.getMvStore() != null) { - return transaction != null; - } - return firstUncommittedLog != Session.LOG_WRITTEN; - } + public abstract ValueLob addTemporaryLob(ValueLob v); /** - * Create a savepoint that is linked to the current log position. + * Check if this session is remote or embedded. * - * @param name the savepoint name + * @return true if this session is remote */ - public void addSavepoint(String name) { - if (savepoints == null) { - savepoints = database.newStringMap(); - } - Savepoint sp = new Savepoint(); - sp.logIndex = undoLog.size(); - if (database.getMvStore() != null) { - sp.transactionSavepoint = getStatementSavepoint(); - } - savepoints.put(name, sp); - } + public abstract boolean isRemote(); /** - * Undo all operations back to the log position of the given savepoint. + * Set current schema. * - * @param name the savepoint name + * @param schema the schema name */ - public void rollbackToSavepoint(String name) { - checkCommitRollback(); - if (savepoints == null) { - throw DbException.get(ErrorCode.SAVEPOINT_IS_INVALID_1, name); - } - Savepoint savepoint = savepoints.get(name); - if (savepoint == null) { - throw DbException.get(ErrorCode.SAVEPOINT_IS_INVALID_1, name); - } - rollbackTo(savepoint, false); - } + public abstract void setCurrentSchemaName(String schema); /** - * Prepare the given transaction. + * Get current schema. * - * @param transactionName the name of the transaction + * @return the current schema name */ - public void prepareCommit(String transactionName) { - if (transaction != null) { - database.prepareCommit(this, transactionName); - } - if (containsUncommitted()) { - // need to commit even if rollback is not possible (create/drop - // table and so on) - database.prepareCommit(this, transactionName); - } - currentTransactionName = transactionName; - } + public abstract String getCurrentSchemaName(); /** - * Commit or roll back the given transaction. + * Sets the network connection information if possible. * - * @param transactionName the name of the transaction - * @param commit true for commit, false for rollback + * @param networkConnectionInfo the network connection information */ - public void setPreparedTransaction(String transactionName, boolean commit) { - if (currentTransactionName != null && - currentTransactionName.equals(transactionName)) { - if (commit) { - commit(false); - } else { - rollback(); - } - } else { - ArrayList list = database - .getInDoubtTransactions(); - int state = commit ? InDoubtTransaction.COMMIT - : InDoubtTransaction.ROLLBACK; - boolean found = false; - if (list != null) { - for (InDoubtTransaction p: list) { - if (p.getTransactionName().equals(transactionName)) { - p.setState(state); - found = true; - break; - } - } - } - if (!found) { - throw DbException.get(ErrorCode.TRANSACTION_NOT_FOUND_1, - transactionName); - } - } - } - - @Override - public boolean isClosed() { - return closed; - } - - public void setThrottle(int throttle) { - this.throttle = throttle; - } + public abstract void setNetworkConnectionInfo(NetworkConnectionInfo networkConnectionInfo); /** - * Wait for some time if this session is throttled (slowed down). - */ - public void throttle() { - if (currentCommandStart == 0) { - currentCommandStart = System.currentTimeMillis(); - } - if (throttle == 0) { - return; - } - long time = System.currentTimeMillis(); - if (lastThrottle + Constants.THROTTLE_DELAY > time) { - return; - } - lastThrottle = time + throttle; - try { - Thread.sleep(throttle); - } catch (Exception e) { - // ignore InterruptedException - } - } - - /** - * Set the current command of this session. This is done just before - * executing the statement. + * Returns the isolation level. * - * @param command the command + * @return the isolation level */ - public void setCurrentCommand(Command command) { - this.currentCommand = command; - if (queryTimeout > 0 && command != null) { - long now = System.currentTimeMillis(); - currentCommandStart = now; - cancelAt = now + queryTimeout; - } - } + public abstract IsolationLevel getIsolationLevel(); /** - * Check if the current transaction is canceled by calling - * Statement.cancel() or because a session timeout was set and expired. + * Sets the isolation level. * - * @throws DbException if the transaction is canceled + * @param isolationLevel the isolation level to set */ - public void checkCanceled() { - throttle(); - if (cancelAt == 0) { - return; - } - long time = System.currentTimeMillis(); - if (time >= cancelAt) { - cancelAt = 0; - throw DbException.get(ErrorCode.STATEMENT_WAS_CANCELED); - } - } + public abstract void setIsolationLevel(IsolationLevel isolationLevel); /** - * Get the cancel time. + * Returns static settings. These settings cannot be changed during + * lifecycle of session. * - * @return the time or 0 if not set + * @return static settings */ - public long getCancel() { - return cancelAt; - } - - public Command getCurrentCommand() { - return currentCommand; - } - - public long getCurrentCommandStart() { - return currentCommandStart; - } - - public boolean getAllowLiterals() { - return allowLiterals; - } - - public void setAllowLiterals(boolean b) { - this.allowLiterals = b; - } - - public void setCurrentSchema(Schema schema) { - modificationId++; - this.currentSchemaName = schema.getName(); - } - - public String getCurrentSchemaName() { - return currentSchemaName; - } - - /** - * Create an internal connection. This connection is used when initializing - * triggers, and when calling user defined functions. - * - * @param columnList if the url should be 'jdbc:columnlist:connection' - * @return the internal connection - */ - public JdbcConnection createConnection(boolean columnList) { - String url; - if (columnList) { - url = Constants.CONN_URL_COLUMNLIST; - } else { - url = Constants.CONN_URL_INTERNAL; - } - return new JdbcConnection(this, getUser().getName(), url); - } - - @Override - public DataHandler getDataHandler() { - return database; - } - - /** - * Remember that the given LOB value must be un-linked (disconnected from - * the table) at commit. - * - * @param v the value - */ - public void unlinkAtCommit(Value v) { - if (SysProperties.CHECK && !v.isLinked()) { - DbException.throwInternalError(); - } - if (unlinkLobMap == null) { - unlinkLobMap = New.hashMap(); - } - unlinkLobMap.put(v.toString(), v); - } + public abstract StaticSettings getStaticSettings(); /** - * Do not unlink this LOB value at commit any longer. + * Returns dynamic settings. These settings can be changed during lifecycle + * of session. * - * @param v the value + * @return dynamic settings */ - public void unlinkAtCommitStop(Value v) { - if (unlinkLobMap != null) { - unlinkLobMap.remove(v.toString()); - } - } + public abstract DynamicSettings getDynamicSettings(); /** - * Get the next system generated identifiers. The identifier returned does - * not occur within the given SQL statement. + * Returns database meta information. * - * @param sql the SQL statement - * @return the new identifier + * @return database meta information */ - public String getNextSystemIdentifier(String sql) { - String identifier; - do { - identifier = SYSTEM_IDENTIFIER_PREFIX + systemIdentifier++; - } while (sql.contains(identifier)); - return identifier; - } + public abstract DatabaseMeta getDatabaseMeta(); /** - * Add a procedure to this session. + * Returns whether INFORMATION_SCHEMA contains old-style tables. * - * @param procedure the procedure to add + * @return whether INFORMATION_SCHEMA contains old-style tables */ - public void addProcedure(Procedure procedure) { - if (procedures == null) { - procedures = database.newStringMap(); - } - procedures.put(procedure.getName(), procedure); - } + public abstract boolean isOldInformationSchema(); /** - * Remove a procedure from this session. - * - * @param name the name of the procedure to remove - */ - public void removeProcedure(String name) { - if (procedures != null) { - procedures.remove(name); - } - } - - /** - * Get the procedure with the given name, or null - * if none exists. - * - * @param name the procedure name - * @return the procedure or null + * Re-create the session state using the stored sessionState list. */ - public Procedure getProcedure(String name) { - if (procedures == null) { - return null; - } - return procedures.get(name); - } - - public void setSchemaSearchPath(String[] schemas) { - modificationId++; - this.schemaSearchPath = schemas; - } - - public String[] getSchemaSearchPath() { - return schemaSearchPath; - } - - @Override - public int hashCode() { - return serialId; - } - - @Override - public String toString() { - return "#" + serialId + " (user: " + user.getName() + ")"; - } - - public void setUndoLogEnabled(boolean b) { - this.undoLogEnabled = b; - } - - public void setRedoLogBinary(boolean b) { - this.redoLogBinary = b; - } - - public boolean isUndoLogEnabled() { - return undoLogEnabled; - } - - /** - * Begin a transaction. - */ - public void begin() { - autoCommitAtTransactionEnd = true; - autoCommit = false; - } - - public long getSessionStart() { - return sessionStart; - } - - public long getTransactionStart() { - if (transactionStart == 0) { - transactionStart = System.currentTimeMillis(); - } - return transactionStart; - } - - public Table[] getLocks() { - // copy the data without synchronizing - ArrayList
    copy = New.arrayList(); - for (int i = 0; i < locks.size(); i++) { + void recreateSessionState() { + if (sessionState != null && !sessionState.isEmpty()) { + sessionStateUpdating = true; try { - copy.add(locks.get(i)); - } catch (Exception e) { - // ignore - break; - } - } - Table[] list = new Table[copy.size()]; - copy.toArray(list); - return list; - } - - /** - * Wait if the exclusive mode has been enabled for another session. This - * method returns as soon as the exclusive mode has been disabled. - */ - public void waitIfExclusiveModeEnabled() { - // Even in exclusive mode, we have to let the LOB session proceed, or we - // will get deadlocks. - if (database.getLobSession() == this) { - return; - } - while (true) { - Session exclusive = database.getExclusiveSession(); - if (exclusive == null || exclusive == this) { - break; - } - if (Thread.holdsLock(exclusive)) { - // if another connection is used within the connection - break; - } - try { - Thread.sleep(100); - } catch (InterruptedException e) { - // ignore + for (String sql : sessionState) { + CommandInterface ci = prepareCommand(sql, Integer.MAX_VALUE); + ci.executeUpdate(null); + } + } finally { + sessionStateUpdating = false; + sessionStateChanged = false; } } } /** - * Remember the result set and close it as soon as the transaction is - * committed (if it needs to be closed). This is done to delete temporary - * files as soon as possible, and free object ids of temporary tables. - * - * @param result the temporary result set + * Read the session state if necessary. */ - public void addTemporaryResult(LocalResult result) { - if (!result.needToClose()) { + public void readSessionState() { + if (!sessionStateChanged || sessionStateUpdating) { return; } - if (temporaryResults == null) { - temporaryResults = New.hashSet(); - } - if (temporaryResults.size() < 100) { - // reference at most 100 result sets to avoid memory problems - temporaryResults.add(result); - } - } - - private void closeTemporaryResults() { - if (temporaryResults != null) { - for (LocalResult result : temporaryResults) { - result.close(); - } - temporaryResults = null; - } - } - - public void setQueryTimeout(int queryTimeout) { - int max = database.getSettings().maxQueryTimeout; - if (max != 0 && (max < queryTimeout || queryTimeout == 0)) { - // the value must be at most max - queryTimeout = max; - } - this.queryTimeout = queryTimeout; - // must reset the cancel at here, - // otherwise it is still used - this.cancelAt = 0; - } - - public int getQueryTimeout() { - return queryTimeout; - } - - /** - * Set the table this session is waiting for, and the thread that is - * waiting. - * - * @param waitForLock the table - * @param waitForLockThread the current thread (the one that is waiting) - */ - public void setWaitForLock(Table waitForLock, Thread waitForLockThread) { - this.waitForLock = waitForLock; - this.waitForLockThread = waitForLockThread; - } - - public Table getWaitForLock() { - return waitForLock; - } - - public Thread getWaitForLockThread() { - return waitForLockThread; - } - - public int getModificationId() { - return modificationId; - } - - @Override - public boolean isReconnectNeeded(boolean write) { - while (true) { - boolean reconnect = database.isReconnectNeeded(); - if (reconnect) { - return true; - } - if (write) { - if (database.beforeWriting()) { - return false; - } - } else { - return false; - } - } - } - - @Override - public void afterWriting() { - database.afterWriting(); - } - - @Override - public SessionInterface reconnect(boolean write) { - readSessionState(); - close(); - Session newSession = Engine.getInstance().createSession(connectionInfo); - newSession.sessionState = sessionState; - newSession.recreateSessionState(); - if (write) { - while (!newSession.database.beforeWriting()) { - // wait until we are allowed to write - } - } - return newSession; - } - - public void setConnectionInfo(ConnectionInfo ci) { - connectionInfo = ci; - } - - public Value getTransactionId() { - if (database.getMvStore() != null) { - if (transaction == null) { - return ValueNull.INSTANCE; - } - return ValueString.get(Long.toString(getTransaction().getId())); + sessionStateChanged = false; + sessionState = Utils.newSmallArrayList(); + CommandInterface ci = prepareCommand(!isOldInformationSchema() + ? "SELECT STATE_COMMAND FROM INFORMATION_SCHEMA.SESSION_STATE" + : "SELECT SQL FROM INFORMATION_SCHEMA.SESSION_STATE", Integer.MAX_VALUE); + ResultInterface result = ci.executeQuery(0, false); + while (result.next()) { + sessionState.add(result.currentRow()[0].getString()); } - if (!database.isPersistent()) { - return ValueNull.INSTANCE; - } - if (undoLog.size() == 0) { - return ValueNull.INSTANCE; - } - return ValueString.get(firstUncommittedLog + "-" + firstUncommittedPos + - "-" + id); } /** - * Get the next object id. + * Sets this session as thread local session, if this session is a local + * session. * - * @return the next object id + * @return old thread local session, or {@code null} */ - public int nextObjectId() { - return objectId++; - } - - public boolean isRedoLogBinaryEnabled() { - return redoLogBinary; + public Session setThreadLocalSession() { + return null; } /** - * Get the transaction to use for this session. + * Resets old thread local session. * - * @return the transaction - */ - public Transaction getTransaction() { - if (transaction == null) { - if (database.getMvStore().getStore().isClosed()) { - database.shutdownImmediately(); - throw DbException.get(ErrorCode.DATABASE_IS_CLOSED); - } - transaction = database.getMvStore().getTransactionStore().begin(); - startStatement = -1; - } - return transaction; - } - - public long getStatementSavepoint() { - if (startStatement == -1) { - startStatement = getTransaction().setSavepoint(); - } - return startStatement; - } - - /** - * Start a new statement within a transaction. - */ - public void startStatementWithinTransaction() { - startStatement = -1; - } - - /** - * Mark the statement as completed. This also close all temporary result - * set, and deletes all temporary files held by the result sets. + * @param oldSession + * the old thread local session, or {@code null} */ - public void endStatement() { - startStatement = -1; - closeTemporaryResults(); - } - - @Override - public void addTemporaryLob(Value v) { - if (v.getTableId() == LobStorageFrontend.TABLE_RESULT) { - if (temporaryResultLobs == null) { - temporaryResultLobs = new LinkedList(); - } - temporaryResultLobs.add(new TimeoutValue(v)); - } else { - if (temporaryLobs == null) { - temporaryLobs = new ArrayList(); - } - temporaryLobs.add(v); - } - } - - /** - * Represents a savepoint (a position in a transaction to where one can roll - * back to). - */ - public static class Savepoint { - - /** - * The undo log index. - */ - int logIndex; - - /** - * The transaction savepoint id. - */ - long transactionSavepoint; - } - - /** - * An object with a timeout. - */ - public static class TimeoutValue { - - /** - * The time when this object was created. - */ - final long created = System.currentTimeMillis(); - - /** - * The value. - */ - final Value value; - - TimeoutValue(Value v) { - this.value = v; - } - + public void resetThreadLocalSession(Session oldSession) { } } diff --git a/h2/src/main/org/h2/engine/SessionFactory.java b/h2/src/main/org/h2/engine/SessionFactory.java deleted file mode 100644 index 045254eb07..0000000000 --- a/h2/src/main/org/h2/engine/SessionFactory.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.sql.SQLException; - -/** - * A class that implements this interface can create new database sessions. This - * exists so that the JDBC layer (the client) can be compiled without dependency - * to the core database engine. - */ -interface SessionFactory { - - /** - * Create a new session. - * - * @param ci the connection parameters - * @return the new session - */ - SessionInterface createSession(ConnectionInfo ci) throws SQLException; - -} diff --git a/h2/src/main/org/h2/engine/SessionInterface.java b/h2/src/main/org/h2/engine/SessionInterface.java deleted file mode 100644 index 876e9e78b4..0000000000 --- a/h2/src/main/org/h2/engine/SessionInterface.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.io.Closeable; -import java.util.ArrayList; - -import org.h2.command.CommandInterface; -import org.h2.message.Trace; -import org.h2.store.DataHandler; -import org.h2.value.Value; - -/** - * A local or remote session. A session represents a database connection. - */ -public interface SessionInterface extends Closeable { - - /** - * Get the list of the cluster servers for this session. - * - * @return A list of "ip:port" strings for the cluster servers in this - * session. - */ - ArrayList getClusterServers(); - - /** - * Parse a command and prepare it for execution. - * - * @param sql the SQL statement - * @param fetchSize the number of rows to fetch in one step - * @return the prepared command - */ - CommandInterface prepareCommand(String sql, int fetchSize); - - /** - * Roll back pending transactions and close the session. - */ - @Override - void close(); - - /** - * Get the trace object - * - * @return the trace object - */ - Trace getTrace(); - - /** - * Check if close was called. - * - * @return if the session has been closed - */ - boolean isClosed(); - - /** - * Get the number of disk operations before power failure is simulated. - * This is used for testing. If not set, 0 is returned - * - * @return the number of operations, or 0 - */ - int getPowerOffCount(); - - /** - * Set the number of disk operations before power failure is simulated. - * To disable the countdown, use 0. - * - * @param i the number of operations - */ - void setPowerOffCount(int i); - - /** - * Get the data handler object. - * - * @return the data handler - */ - DataHandler getDataHandler(); - - /** - * Check whether this session has a pending transaction. - * - * @return true if it has - */ - boolean hasPendingTransaction(); - - /** - * Cancel the current or next command (called when closing a connection). - */ - void cancel(); - - /** - * Check if the database changed and therefore reconnecting is required. - * - * @param write if the next operation may be writing - * @return true if reconnecting is required - */ - boolean isReconnectNeeded(boolean write); - - /** - * Close the connection and open a new connection. - * - * @param write if the next operation may be writing - * @return the new connection - */ - SessionInterface reconnect(boolean write); - - /** - * Called after writing has ended. It needs to be called after - * isReconnectNeeded(true) returned false. - */ - void afterWriting(); - - /** - * Check if this session is in auto-commit mode. - * - * @return true if the session is in auto-commit mode - */ - boolean getAutoCommit(); - - /** - * Set the auto-commit mode. This call doesn't commit the current - * transaction. - * - * @param autoCommit the new value - */ - void setAutoCommit(boolean autoCommit); - - /** - * Add a temporary LOB, which is closed when the session commits. - * - * @param v the value - */ - void addTemporaryLob(Value v); - -} diff --git a/h2/src/main/org/h2/engine/SessionLocal.java b/h2/src/main/org/h2/engine/SessionLocal.java new file mode 100644 index 0000000000..8117c628da --- /dev/null +++ b/h2/src/main/org/h2/engine/SessionLocal.java @@ -0,0 +1,2069 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.engine; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.WeakHashMap; +import java.util.concurrent.atomic.AtomicReference; +import org.h2.api.ErrorCode; +import org.h2.api.JavaObjectSerializer; +import org.h2.command.Command; +import org.h2.command.CommandInterface; +import org.h2.command.Parser; +import org.h2.command.Prepared; +import org.h2.command.ddl.Analyze; +import org.h2.constraint.Constraint; +import org.h2.index.Index; +import org.h2.index.ViewIndex; +import org.h2.jdbc.JdbcConnection; +import org.h2.jdbc.meta.DatabaseMeta; +import org.h2.jdbc.meta.DatabaseMetaLocal; +import org.h2.message.DbException; +import org.h2.message.Trace; +import org.h2.message.TraceSystem; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.db.MVIndex; +import org.h2.mvstore.db.MVTable; +import org.h2.mvstore.db.Store; +import org.h2.mvstore.tx.Transaction; +import org.h2.mvstore.tx.TransactionStore; +import org.h2.result.Row; +import org.h2.schema.Schema; +import org.h2.schema.Sequence; +import org.h2.store.DataHandler; +import org.h2.store.InDoubtTransaction; +import org.h2.store.LobStorageFrontend; +import org.h2.table.Table; +import org.h2.util.DateTimeUtils; +import org.h2.util.HasSQL; +import org.h2.util.NetworkConnectionInfo; +import org.h2.util.SmallLRUCache; +import org.h2.util.TimeZoneProvider; +import org.h2.util.Utils; +import org.h2.value.Value; +import org.h2.value.ValueLob; +import org.h2.value.ValueNull; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarchar; +import org.h2.value.VersionedValue; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; +import org.h2.value.lob.LobDataInMemory; + +/** + * A session represents an embedded database connection. When using the server + * mode, this object resides on the server side and communicates with a + * SessionRemote object on the client side. + */ +public final class SessionLocal extends Session implements TransactionStore.RollbackListener { + + public enum State { INIT, RUNNING, BLOCKED, SLEEP, THROTTLED, SUSPENDED, CLOSED } + + private static final class SequenceAndPrepared { + + private final Sequence sequence; + + private final Prepared prepared; + + SequenceAndPrepared(Sequence sequence, Prepared prepared) { + this.sequence = sequence; + this.prepared = prepared; + } + + @Override + public int hashCode() { + return 31 * (31 + prepared.hashCode()) + sequence.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != SequenceAndPrepared.class) { + return false; + } + SequenceAndPrepared other = (SequenceAndPrepared) obj; + return sequence == other.sequence && prepared == other.prepared; + } + + } + + private static final class RowNumberAndValue { + + long rowNumber; + + Value nextValue; + + RowNumberAndValue(long rowNumber, Value nextValue) { + this.rowNumber = rowNumber; + this.nextValue = nextValue; + } + + } + + /** + * The prefix of generated identifiers. It may not have letters, because + * they are case sensitive. + */ + private static final String SYSTEM_IDENTIFIER_PREFIX = "_"; + private static int nextSerialId; + + /** + * Thread local session for comparison operations between different data types. + */ + private static final ThreadLocal THREAD_LOCAL_SESSION = new ThreadLocal<>(); + + static Session getThreadLocalSession() { + Session session = THREAD_LOCAL_SESSION.get(); + if (session == null) { + THREAD_LOCAL_SESSION.remove(); + } + return session; + } + + private final int serialId = nextSerialId++; + private final Database database; + private final User user; + private final int id; + + private NetworkConnectionInfo networkConnectionInfo; + + private final ArrayList
    locks = Utils.newSmallArrayList(); + private boolean autoCommit = true; + private Random random; + private int lockTimeout; + + private HashMap nextValueFor; + private WeakHashMap currentValueFor; + private Value lastIdentity = ValueNull.INSTANCE; + + private HashMap savepoints; + private HashMap localTempTables; + private HashMap localTempTableIndexes; + private HashMap localTempTableConstraints; + private int throttleMs; + private long lastThrottleNs; + private Command currentCommand; + private boolean allowLiterals; + private String currentSchemaName; + private String[] schemaSearchPath; + private Trace trace; + private HashMap removeLobMap; + private int systemIdentifier; + private HashMap procedures; + private boolean autoCommitAtTransactionEnd; + private String currentTransactionName; + private volatile long cancelAtNs; + private final ValueTimestampTimeZone sessionStart; + private Instant commandStartOrEnd; + private ValueTimestampTimeZone currentTimestamp; + private HashMap variables; + private int queryTimeout; + private boolean commitOrRollbackDisabled; + private Table waitForLock; + private Thread waitForLockThread; + private int modificationId; + private int objectId; + private final int queryCacheSize; + private SmallLRUCache queryCache; + private long modificationMetaID = -1; + private int createViewLevel; + private volatile SmallLRUCache viewIndexCache; + private HashMap subQueryIndexCache; + private boolean lazyQueryExecution; + + private BitSet nonKeywords; + + private TimeZoneProvider timeZone; + + /** + * Tables marked for ANALYZE after the current transaction is committed. + * Prevents us calling ANALYZE repeatedly in large transactions. + */ + private HashSet
    tablesToAnalyze; + + /** + * Temporary LOBs from result sets. Those are kept for some time. The + * problem is that transactions are committed before the result is returned, + * and in some cases the next transaction is already started before the + * result is read (for example when using the server mode, when accessing + * metadata methods). We can't simply free those values up when starting the + * next transaction, because they would be removed too early. + */ + private LinkedList temporaryResultLobs; + + /** + * The temporary LOBs that need to be removed on commit. + */ + private ArrayList temporaryLobs; + + private Transaction transaction; + private final AtomicReference state = new AtomicReference<>(State.INIT); + private long startStatement = -1; + + /** + * Isolation level. + */ + private IsolationLevel isolationLevel = IsolationLevel.READ_COMMITTED; + + /** + * The snapshot data modification id. If isolation level doesn't allow + * non-repeatable reads the session uses a snapshot versions of data. After + * commit or rollback these snapshots are discarded and cached results of + * queries may became invalid. Commit and rollback allocate a new data + * modification id and store it here to forbid usage of older results. + */ + private long snapshotDataModificationId; + + /** + * Set of database object ids to be released at the end of transaction + */ + private BitSet idsToRelease; + + /** + * Whether length in definitions of data types is truncated. + */ + private boolean truncateLargeLength; + + /** + * Whether BINARY is parsed as VARBINARY. + */ + private boolean variableBinary; + + /** + * Whether INFORMATION_SCHEMA contains old-style tables. + */ + private boolean oldInformationSchema; + + /** + * Whether commands are executed in quirks mode to support scripts from older versions of H2. + */ + private boolean quirksMode; + + public SessionLocal(Database database, User user, int id) { + this.database = database; + this.queryTimeout = database.getSettings().maxQueryTimeout; + this.queryCacheSize = database.getSettings().queryCacheSize; + this.user = user; + this.id = id; + this.lockTimeout = database.getLockTimeout(); + Schema mainSchema = database.getMainSchema(); + this.currentSchemaName = mainSchema != null ? mainSchema.getName() + : database.sysIdentifier(Constants.SCHEMA_MAIN); + timeZone = DateTimeUtils.getTimeZone(); + sessionStart = DateTimeUtils.currentTimestamp(timeZone, commandStartOrEnd = Instant.now()); + } + + public void setLazyQueryExecution(boolean lazyQueryExecution) { + this.lazyQueryExecution = lazyQueryExecution; + } + + public boolean isLazyQueryExecution() { + return lazyQueryExecution; + } + + /** + * This method is called before and after parsing of view definition and may + * be called recursively. + * + * @param parsingView + * {@code true} if this method is called before parsing of view + * definition, {@code false} if it is called after it. + */ + public void setParsingCreateView(boolean parsingView) { + createViewLevel += parsingView ? 1 : -1; + } + + public boolean isParsingCreateView() { + return createViewLevel != 0; + } + + @Override + public ArrayList getClusterServers() { + return new ArrayList<>(); + } + + public boolean setCommitOrRollbackDisabled(boolean x) { + boolean old = commitOrRollbackDisabled; + commitOrRollbackDisabled = x; + return old; + } + + private void initVariables() { + if (variables == null) { + variables = database.newStringMap(); + } + } + + /** + * Set the value of the given variable for this session. + * + * @param name the name of the variable (may not be null) + * @param value the new value (may not be null) + */ + public void setVariable(String name, Value value) { + initVariables(); + modificationId++; + Value old; + if (value == ValueNull.INSTANCE) { + old = variables.remove(name); + } else { + if (value instanceof ValueLob) { + // link LOB values, to make sure we have our own object + value = ((ValueLob) value).copy(database, LobStorageFrontend.TABLE_ID_SESSION_VARIABLE); + } + old = variables.put(name, value); + } + if (old instanceof ValueLob) { + ((ValueLob) old).remove(); + } + } + + /** + * Get the value of the specified user defined variable. This method always + * returns a value; it returns ValueNull.INSTANCE if the variable doesn't + * exist. + * + * @param name the variable name + * @return the value, or NULL + */ + public Value getVariable(String name) { + initVariables(); + Value v = variables.get(name); + return v == null ? ValueNull.INSTANCE : v; + } + + /** + * Get the list of variable names that are set for this session. + * + * @return the list of names + */ + public String[] getVariableNames() { + if (variables == null) { + return new String[0]; + } + return variables.keySet().toArray(new String[0]); + } + + /** + * Get the local temporary table if one exists with that name, or null if + * not. + * + * @param name the table name + * @return the table, or null + */ + public Table findLocalTempTable(String name) { + if (localTempTables == null) { + return null; + } + return localTempTables.get(name); + } + + public List
    getLocalTempTables() { + if (localTempTables == null) { + return Collections.emptyList(); + } + return new ArrayList<>(localTempTables.values()); + } + + /** + * Add a local temporary table to this session. + * + * @param table the table to add + * @throws DbException if a table with this name already exists + */ + public void addLocalTempTable(Table table) { + if (localTempTables == null) { + localTempTables = database.newStringMap(); + } + if (localTempTables.putIfAbsent(table.getName(), table) != null) { + StringBuilder builder = new StringBuilder(); + table.getSQL(builder, HasSQL.TRACE_SQL_FLAGS).append(" AS "); + Parser.quoteIdentifier(table.getName(), HasSQL.TRACE_SQL_FLAGS); + throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, builder.toString()); + } + modificationId++; + } + + /** + * Drop and remove the given local temporary table from this session. + * + * @param table the table + */ + public void removeLocalTempTable(Table table) { + modificationId++; + if (localTempTables != null) { + localTempTables.remove(table.getName()); + } + synchronized (database) { + table.removeChildrenAndResources(this); + } + } + + /** + * Get the local temporary index if one exists with that name, or null if + * not. + * + * @param name the table name + * @return the table, or null + */ + public Index findLocalTempTableIndex(String name) { + if (localTempTableIndexes == null) { + return null; + } + return localTempTableIndexes.get(name); + } + + public HashMap getLocalTempTableIndexes() { + if (localTempTableIndexes == null) { + return new HashMap<>(); + } + return localTempTableIndexes; + } + + /** + * Add a local temporary index to this session. + * + * @param index the index to add + * @throws DbException if a index with this name already exists + */ + public void addLocalTempTableIndex(Index index) { + if (localTempTableIndexes == null) { + localTempTableIndexes = database.newStringMap(); + } + if (localTempTableIndexes.putIfAbsent(index.getName(), index) != null) { + throw DbException.get(ErrorCode.INDEX_ALREADY_EXISTS_1, index.getTraceSQL()); + } + } + + /** + * Drop and remove the given local temporary index from this session. + * + * @param index the index + */ + public void removeLocalTempTableIndex(Index index) { + if (localTempTableIndexes != null) { + localTempTableIndexes.remove(index.getName()); + synchronized (database) { + index.removeChildrenAndResources(this); + } + } + } + + /** + * Get the local temporary constraint if one exists with that name, or + * null if not. + * + * @param name the constraint name + * @return the constraint, or null + */ + public Constraint findLocalTempTableConstraint(String name) { + if (localTempTableConstraints == null) { + return null; + } + return localTempTableConstraints.get(name); + } + + /** + * Get the map of constraints for all constraints on local, temporary + * tables, if any. The map's keys are the constraints' names. + * + * @return the map of constraints, or null + */ + public HashMap getLocalTempTableConstraints() { + if (localTempTableConstraints == null) { + return new HashMap<>(); + } + return localTempTableConstraints; + } + + /** + * Add a local temporary constraint to this session. + * + * @param constraint the constraint to add + * @throws DbException if a constraint with the same name already exists + */ + public void addLocalTempTableConstraint(Constraint constraint) { + if (localTempTableConstraints == null) { + localTempTableConstraints = database.newStringMap(); + } + String name = constraint.getName(); + if (localTempTableConstraints.putIfAbsent(name, constraint) != null) { + throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, constraint.getTraceSQL()); + } + } + + /** + * Drop and remove the given local temporary constraint from this session. + * + * @param constraint the constraint + */ + void removeLocalTempTableConstraint(Constraint constraint) { + if (localTempTableConstraints != null) { + localTempTableConstraints.remove(constraint.getName()); + synchronized (database) { + constraint.removeChildrenAndResources(this); + } + } + } + + @Override + public boolean getAutoCommit() { + return autoCommit; + } + + public User getUser() { + return user; + } + + @Override + public void setAutoCommit(boolean b) { + autoCommit = b; + } + + public int getLockTimeout() { + return lockTimeout; + } + + public void setLockTimeout(int lockTimeout) { + this.lockTimeout = lockTimeout; + if (hasTransaction()) { + transaction.setTimeoutMillis(lockTimeout); + } + } + + @Override + public synchronized CommandInterface prepareCommand(String sql, + int fetchSize) { + return prepareLocal(sql); + } + + /** + * Parse and prepare the given SQL statement. This method also checks the + * rights. + * + * @param sql the SQL statement + * @return the prepared statement + */ + public Prepared prepare(String sql) { + return prepare(sql, false, false); + } + + /** + * Parse and prepare the given SQL statement. + * + * @param sql the SQL statement + * @param rightsChecked true if the rights have already been checked + * @param literalsChecked true if the sql string has already been checked + * for literals (only used if ALLOW_LITERALS NONE is set). + * @return the prepared statement + */ + public Prepared prepare(String sql, boolean rightsChecked, boolean literalsChecked) { + Parser parser = new Parser(this); + parser.setRightsChecked(rightsChecked); + parser.setLiteralsChecked(literalsChecked); + return parser.prepare(sql); + } + + /** + * Parse and prepare the given SQL statement. + * This method also checks if the connection has been closed. + * + * @param sql the SQL statement + * @return the prepared statement + */ + public Command prepareLocal(String sql) { + if (isClosed()) { + throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, + "session closed"); + } + Command command; + if (queryCacheSize > 0) { + if (queryCache == null) { + queryCache = SmallLRUCache.newInstance(queryCacheSize); + modificationMetaID = database.getModificationMetaId(); + } else { + long newModificationMetaID = database.getModificationMetaId(); + if (newModificationMetaID != modificationMetaID) { + queryCache.clear(); + modificationMetaID = newModificationMetaID; + } + command = queryCache.get(sql); + if (command != null && command.canReuse()) { + command.reuse(); + return command; + } + } + } + Parser parser = new Parser(this); + try { + command = parser.prepareCommand(sql); + } finally { + // we can't reuse sub-query indexes, so just drop the whole cache + subQueryIndexCache = null; + } + if (queryCache != null) { + if (command.isCacheable()) { + queryCache.put(sql, command); + } + } + return command; + } + + /** + * Arranges for the specified database object id to be released + * at the end of the current transaction. + * @param id to be scheduled + */ + protected void scheduleDatabaseObjectIdForRelease(int id) { + if (idsToRelease == null) { + idsToRelease = new BitSet(); + } + idsToRelease.set(id); + } + + public Database getDatabase() { + return database; + } + + /** + * Commit the current transaction. If the statement was not a data + * definition statement, and if there are temporary tables that should be + * dropped or truncated at commit, this is done as well. + * + * @param ddl if the statement was a data definition statement + */ + public void commit(boolean ddl) { + beforeCommitOrRollback(); + if (hasTransaction()) { + try { + markUsedTablesAsUpdated(); + transaction.commit(); + removeTemporaryLobs(true); + endTransaction(); + } finally { + transaction = null; + } + if (!ddl) { + // do not clean the temp tables if the last command was a + // create/drop + cleanTempTables(false); + if (autoCommitAtTransactionEnd) { + autoCommit = true; + autoCommitAtTransactionEnd = false; + } + } + analyzeTables(); + } + } + + private void markUsedTablesAsUpdated() { + // TODO should not rely on locking + if (!locks.isEmpty()) { + for (Table t : locks) { + if (t instanceof MVTable) { + ((MVTable) t).commit(); + } + } + } + } + + private void analyzeTables() { + // On rare occasions it can be called concurrently (i.e. from close()) + // without proper locking, but instead of oversynchronizing + // we just skip this optional operation in such case + if (tablesToAnalyze != null && + Thread.holdsLock(this)) { + // take a local copy and clear because in rare cases we can call + // back into markTableForAnalyze while iterating here + HashSet
    tablesToAnalyzeLocal = tablesToAnalyze; + tablesToAnalyze = null; + int rowCount = getDatabase().getSettings().analyzeSample / 10; + for (Table table : tablesToAnalyzeLocal) { + Analyze.analyzeTable(this, table, rowCount, false); + } + // analyze can lock the meta + database.unlockMeta(this); + // table analysis opens a new transaction(s), + // so we need to commit afterwards whatever leftovers might be + commit(true); + } + } + + private void removeTemporaryLobs(boolean onTimeout) { + if (temporaryLobs != null) { + for (ValueLob v : temporaryLobs) { + if (!v.isLinkedToTable()) { + v.remove(); + } + } + temporaryLobs.clear(); + } + if (temporaryResultLobs != null && !temporaryResultLobs.isEmpty()) { + long keepYoungerThan = System.nanoTime() - database.getSettings().lobTimeout * 1_000_000L; + while (!temporaryResultLobs.isEmpty()) { + TimeoutValue tv = temporaryResultLobs.getFirst(); + if (onTimeout && tv.created - keepYoungerThan >= 0) { + break; + } + ValueLob v = temporaryResultLobs.removeFirst().value; + if (!v.isLinkedToTable()) { + v.remove(); + } + } + } + } + + private void beforeCommitOrRollback() { + if (commitOrRollbackDisabled && !locks.isEmpty()) { + throw DbException.get(ErrorCode.COMMIT_ROLLBACK_NOT_ALLOWED); + } + currentTransactionName = null; + currentTimestamp = null; + database.throwLastBackgroundException(); + } + + private void endTransaction() { + if (removeLobMap != null && !removeLobMap.isEmpty()) { + for (ValueLob v : removeLobMap.values()) { + v.remove(); + } + removeLobMap = null; + } + unlockAll(); + if (idsToRelease != null) { + database.releaseDatabaseObjectIds(idsToRelease); + idsToRelease = null; + } + if (hasTransaction() && !transaction.allowNonRepeatableRead()) { + snapshotDataModificationId = database.getNextModificationDataId(); + } + } + + /** + * Returns the data modification id of transaction's snapshot, or 0 if + * isolation level doesn't use snapshots. + * + * @return the data modification id of transaction's snapshot, or 0 + */ + public long getSnapshotDataModificationId() { + return snapshotDataModificationId; + } + + /** + * Fully roll back the current transaction. + */ + public void rollback() { + beforeCommitOrRollback(); + if (hasTransaction()) { + rollbackTo(null); + } + idsToRelease = null; + cleanTempTables(false); + if (autoCommitAtTransactionEnd) { + autoCommit = true; + autoCommitAtTransactionEnd = false; + } + endTransaction(); + } + + /** + * Partially roll back the current transaction. + * + * @param savepoint the savepoint to which should be rolled back + */ + public void rollbackTo(Savepoint savepoint) { + int index = savepoint == null ? 0 : savepoint.logIndex; + if (hasTransaction()) { + markUsedTablesAsUpdated(); + if (savepoint == null) { + transaction.rollback(); + transaction = null; + } else { + transaction.rollbackToSavepoint(savepoint.transactionSavepoint); + } + } + if (savepoints != null) { + String[] names = savepoints.keySet().toArray(new String[0]); + for (String name : names) { + Savepoint sp = savepoints.get(name); + int savepointIndex = sp.logIndex; + if (savepointIndex > index) { + savepoints.remove(name); + } + } + } + + // Because cache may have captured query result (in Query.lastResult), + // which is based on data from uncommitted transaction., + // It is not valid after rollback, therefore cache has to be cleared. + if (queryCache != null) { + queryCache.clear(); + } + } + + @Override + public boolean hasPendingTransaction() { + return hasTransaction() && transaction.hasChanges() && transaction.getStatus() != Transaction.STATUS_PREPARED; + } + + /** + * Create a savepoint to allow rolling back to this state. + * + * @return the savepoint + */ + public Savepoint setSavepoint() { + Savepoint sp = new Savepoint(); + sp.transactionSavepoint = getStatementSavepoint(); + return sp; + } + + public int getId() { + return id; + } + + @Override + public void cancel() { + cancelAtNs = Utils.currentNanoTime(); + } + + /** + * Cancel the transaction and close the session if needed. + */ + void suspend() { + cancel(); + if (transitionToState(State.SUSPENDED, false) == State.SLEEP) { + close(); + } + } + + @Override + public void close() { + // this is the only operation that can be invoked concurrently + // so, we should prevent double-closure + if (state.getAndSet(State.CLOSED) != State.CLOSED) { + try { + database.throwLastBackgroundException(); + + database.checkPowerOff(); + + // release any open table locks + if (hasPreparedTransaction()) { + if (currentTransactionName != null) { + removeLobMap = null; + } + endTransaction(); + } else { + rollback(); + removeTemporaryLobs(false); + cleanTempTables(true); + commit(true); // temp table removal may have opened new transaction + } + + // Table#removeChildrenAndResources can take the meta lock, + // and we need to unlock before we call removeSession(), which might + // want to take the meta lock using the system session. + database.unlockMeta(this); + } finally { + database.removeSession(this); + } + } + } + + /** + * Register table as locked within current transaction. + * Table is unlocked on commit or rollback. + * It also assumes that table will be modified by transaction. + * + * @param table the table that is locked + */ + public void registerTableAsLocked(Table table) { + if (SysProperties.CHECK) { + if (locks.contains(table)) { + throw DbException.getInternalError(table.toString()); + } + } + locks.add(table); + } + + /** + * Register table as updated within current transaction. + * This is used instead of table locking when lock mode is LOCK_MODE_OFF. + * + * @param table to register + */ + public void registerTableAsUpdated(Table table) { + if (!locks.contains(table)) { + locks.add(table); + } + } + + /** + * Unlock just this table. + * + * @param t the table to unlock + */ + void unlock(Table t) { + locks.remove(t); + } + + + private boolean hasTransaction() { + return transaction != null; + } + + private void unlockAll() { + if (!locks.isEmpty()) { + Table[] array = locks.toArray(new Table[0]); + for (Table t : array) { + if (t != null) { + t.unlock(this); + } + } + locks.clear(); + } + Database.unlockMetaDebug(this); + savepoints = null; + sessionStateChanged = true; + } + + private void cleanTempTables(boolean closeSession) { + if (localTempTables != null && !localTempTables.isEmpty()) { + Iterator
    it = localTempTables.values().iterator(); + while (it.hasNext()) { + Table table = it.next(); + if (closeSession || table.getOnCommitDrop()) { + modificationId++; + table.setModified(); + it.remove(); + // Exception thrown in org.h2.engine.Database.removeMeta + // if line below is missing with TestDeadlock + database.lockMeta(this); + table.removeChildrenAndResources(this); + if (closeSession) { + database.throwLastBackgroundException(); + } + } else if (table.getOnCommitTruncate()) { + table.truncate(this); + } + } + } + } + + public Random getRandom() { + if (random == null) { + random = new Random(); + } + return random; + } + + @Override + public Trace getTrace() { + if (trace != null && !isClosed()) { + return trace; + } + String traceModuleName = "jdbc[" + id + "]"; + if (isClosed()) { + return new TraceSystem(null).getTrace(traceModuleName); + } + trace = database.getTraceSystem().getTrace(traceModuleName); + return trace; + } + + /** + * Returns the next value of the sequence in this session. + * + * @param sequence + * the sequence + * @param prepared + * current prepared command, select, or {@code null} + * @return the next value of the sequence in this session + */ + public Value getNextValueFor(Sequence sequence, Prepared prepared) { + Value value; + Mode mode = database.getMode(); + if (mode.nextValueReturnsDifferentValues || prepared == null) { + value = sequence.getNext(this); + } else { + if (nextValueFor == null) { + nextValueFor = new HashMap<>(); + } + SequenceAndPrepared key = new SequenceAndPrepared(sequence, prepared); + RowNumberAndValue data = nextValueFor.get(key); + long rowNumber = prepared.getCurrentRowNumber(); + if (data != null) { + if (data.rowNumber == rowNumber) { + value = data.nextValue; + } else { + data.nextValue = value = sequence.getNext(this); + data.rowNumber = rowNumber; + } + } else { + value = sequence.getNext(this); + nextValueFor.put(key, new RowNumberAndValue(rowNumber, value)); + } + } + WeakHashMap currentValueFor = this.currentValueFor; + if (currentValueFor == null) { + this.currentValueFor = currentValueFor = new WeakHashMap<>(); + } + currentValueFor.put(sequence, value); + if (mode.takeGeneratedSequenceValue) { + lastIdentity = value; + } + return value; + } + + /** + * Returns the current value of the sequence in this session. + * + * @param sequence + * the sequence + * @return the current value of the sequence in this session + * @throws DbException + * if current value is not defined + */ + public Value getCurrentValueFor(Sequence sequence) { + WeakHashMap currentValueFor = this.currentValueFor; + if (currentValueFor != null) { + Value value = currentValueFor.get(sequence); + if (value != null) { + return value; + } + } + throw DbException.get(ErrorCode.CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1, sequence.getTraceSQL()); + } + + public void setLastIdentity(Value last) { + this.lastIdentity = last; + } + + public Value getLastIdentity() { + return lastIdentity; + } + + /** + * Whether the session contains any uncommitted changes. + * + * @return true if yes + */ + public boolean containsUncommitted() { + return transaction != null && transaction.hasChanges(); + } + + /** + * Create a savepoint that is linked to the current log position. + * + * @param name the savepoint name + */ + public void addSavepoint(String name) { + if (savepoints == null) { + savepoints = database.newStringMap(); + } + savepoints.put(name, setSavepoint()); + } + + /** + * Undo all operations back to the log position of the given savepoint. + * + * @param name the savepoint name + */ + public void rollbackToSavepoint(String name) { + beforeCommitOrRollback(); + Savepoint savepoint; + if (savepoints == null || (savepoint = savepoints.get(name)) == null) { + throw DbException.get(ErrorCode.SAVEPOINT_IS_INVALID_1, name); + } + rollbackTo(savepoint); + } + + /** + * Prepare the given transaction. + * + * @param transactionName the name of the transaction + */ + public void prepareCommit(String transactionName) { + if (hasPendingTransaction()) { + // need to commit even if rollback is not possible (create/drop + // table and so on) + database.prepareCommit(this, transactionName); + } + currentTransactionName = transactionName; + } + + /** + * Checks presence of prepared transaction in this session. + * + * @return {@code true} if there is a prepared transaction, + * {@code false} otherwise + */ + public boolean hasPreparedTransaction() { + return currentTransactionName != null; + } + + /** + * Commit or roll back the given transaction. + * + * @param transactionName the name of the transaction + * @param commit true for commit, false for rollback + */ + public void setPreparedTransaction(String transactionName, boolean commit) { + if (hasPreparedTransaction() && currentTransactionName.equals(transactionName)) { + if (commit) { + commit(false); + } else { + rollback(); + } + } else { + ArrayList list = database.getInDoubtTransactions(); + int state = commit ? InDoubtTransaction.COMMIT : InDoubtTransaction.ROLLBACK; + boolean found = false; + for (InDoubtTransaction p: list) { + if (p.getTransactionName().equals(transactionName)) { + p.setState(state); + found = true; + break; + } + } + if (!found) { + throw DbException.get(ErrorCode.TRANSACTION_NOT_FOUND_1, + transactionName); + } + } + } + + @Override + public boolean isClosed() { + return state.get() == State.CLOSED; + } + + public boolean isOpen() { + State current = state.get(); + checkSuspended(current); + return current != State.CLOSED; + } + + public void setThrottle(int throttle) { + this.throttleMs = throttle; + } + + /** + * Wait for some time if this session is throttled (slowed down). + */ + public void throttle() { + if (throttleMs == 0) { + return; + } + long time = System.nanoTime(); + if (lastThrottleNs != 0L && time - lastThrottleNs < Constants.THROTTLE_DELAY * 1_000_000L) { + return; + } + lastThrottleNs = Utils.nanoTimePlusMillis(time, throttleMs); + State prevState = transitionToState(State.THROTTLED, false); + try { + Thread.sleep(throttleMs); + } catch (InterruptedException ignore) { + } finally { + transitionToState(prevState, false); + } + } + + /** + * Set the current command of this session. This is done just before + * executing the statement. + * + * @param command the command + */ + private void setCurrentCommand(Command command) { + State targetState = command == null ? State.SLEEP : State.RUNNING; + transitionToState(targetState, true); + if (isOpen()) { + currentCommand = command; + commandStartOrEnd = Instant.now(); + if (command != null) { + if (queryTimeout > 0) { + cancelAtNs = Utils.currentNanoTimePlusMillis(queryTimeout); + } + } else { + if (currentTimestamp != null && !database.getMode().dateTimeValueWithinTransaction) { + currentTimestamp = null; + } + if (nextValueFor != null) { + nextValueFor.clear(); + } + } + } + } + + private State transitionToState(State targetState, boolean checkSuspended) { + State currentState; + while((currentState = state.get()) != State.CLOSED && + (!checkSuspended || checkSuspended(currentState)) && + !state.compareAndSet(currentState, targetState)) {/**/} + return currentState; + } + + private boolean checkSuspended(State currentState) { + if (currentState == State.SUSPENDED) { + close(); + throw DbException.get(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE); + } + return true; + } + + /** + * Check if the current transaction is canceled by calling + * Statement.cancel() or because a session timeout was set and expired. + * + * @throws DbException if the transaction is canceled + */ + public void checkCanceled() { + throttle(); + long cancel = cancelAtNs; + if (cancel == 0L) { + return; + } + if (System.nanoTime() - cancel >= 0L) { + cancelAtNs = 0L; + throw DbException.get(ErrorCode.STATEMENT_WAS_CANCELED); + } + } + + /** + * Get the cancel time. + * + * @return the time or 0 if not set + */ + public long getCancel() { + return cancelAtNs; + } + + public Command getCurrentCommand() { + return currentCommand; + } + + public ValueTimestampTimeZone getCommandStartOrEnd() { + return DateTimeUtils.currentTimestamp(timeZone, commandStartOrEnd); + } + + public boolean getAllowLiterals() { + return allowLiterals; + } + + public void setAllowLiterals(boolean b) { + this.allowLiterals = b; + } + + public void setCurrentSchema(Schema schema) { + modificationId++; + if (queryCache != null) { + queryCache.clear(); + } + this.currentSchemaName = schema.getName(); + } + + @Override + public String getCurrentSchemaName() { + return currentSchemaName; + } + + @Override + public void setCurrentSchemaName(String schemaName) { + Schema schema = database.getSchema(schemaName); + setCurrentSchema(schema); + } + + /** + * Create an internal connection. This connection is used when initializing + * triggers, and when calling user defined functions. + * + * @param columnList if the url should be 'jdbc:columnlist:connection' + * @return the internal connection + */ + public JdbcConnection createConnection(boolean columnList) { + String url; + if (columnList) { + url = Constants.CONN_URL_COLUMNLIST; + } else { + url = Constants.CONN_URL_INTERNAL; + } + return new JdbcConnection(this, getUser().getName(), url); + } + + @Override + public DataHandler getDataHandler() { + return database; + } + + /** + * Remember that the given LOB value must be removed at commit. + * + * @param v the value + */ + public void removeAtCommit(ValueLob v) { + if (v.isLinkedToTable()) { + if (removeLobMap == null) { + removeLobMap = new HashMap<>(); + } + removeLobMap.put(v.toString(), v); + } + } + + /** + * Do not remove this LOB value at commit any longer. + * + * @param v the value + */ + public void removeAtCommitStop(ValueLob v) { + if (v.isLinkedToTable() && removeLobMap != null) { + removeLobMap.remove(v.toString()); + } + } + + /** + * Get the next system generated identifiers. The identifier returned does + * not occur within the given SQL statement. + * + * @param sql the SQL statement + * @return the new identifier + */ + public String getNextSystemIdentifier(String sql) { + String identifier; + do { + identifier = SYSTEM_IDENTIFIER_PREFIX + systemIdentifier++; + } while (sql.contains(identifier)); + return identifier; + } + + /** + * Add a procedure to this session. + * + * @param procedure the procedure to add + */ + public void addProcedure(Procedure procedure) { + if (procedures == null) { + procedures = database.newStringMap(); + } + procedures.put(procedure.getName(), procedure); + } + + /** + * Remove a procedure from this session. + * + * @param name the name of the procedure to remove + */ + public void removeProcedure(String name) { + if (procedures != null) { + procedures.remove(name); + } + } + + /** + * Get the procedure with the given name, or null + * if none exists. + * + * @param name the procedure name + * @return the procedure or null + */ + public Procedure getProcedure(String name) { + if (procedures == null) { + return null; + } + return procedures.get(name); + } + + public void setSchemaSearchPath(String[] schemas) { + modificationId++; + this.schemaSearchPath = schemas; + } + + public String[] getSchemaSearchPath() { + return schemaSearchPath; + } + + @Override + public int hashCode() { + return serialId; + } + + @Override + public String toString() { + return "#" + serialId + " (user: " + (user == null ? "" : user.getName()) + ", " + state.get() + ")"; + } + + /** + * Begin a transaction. + */ + public void begin() { + autoCommitAtTransactionEnd = true; + autoCommit = false; + } + + public ValueTimestampTimeZone getSessionStart() { + return sessionStart; + } + + public Set
    getLocks() { + /* + * This implementation needs to be lock-free. + */ + if (database.getLockMode() == Constants.LOCK_MODE_OFF || locks.isEmpty()) { + return Collections.emptySet(); + } + /* + * Do not use ArrayList.toArray(T[]) here, its implementation is not + * thread-safe. + */ + Object[] array = locks.toArray(); + /* + * The returned array may contain null elements and may contain + * duplicates due to concurrent remove(). + */ + switch (array.length) { + case 1: { + Object table = array[0]; + if (table != null) { + return Collections.singleton((Table) table); + } + } + //$FALL-THROUGH$ + case 0: + return Collections.emptySet(); + default: { + HashSet
    set = new HashSet<>(); + for (Object table : array) { + if (table != null) { + set.add((Table) table); + } + } + return set; + } + } + } + + /** + * Wait if the exclusive mode has been enabled for another session. This + * method returns as soon as the exclusive mode has been disabled. + */ + public void waitIfExclusiveModeEnabled() { + transitionToState(State.RUNNING, true); + // Even in exclusive mode, we have to let the LOB session proceed, or we + // will get deadlocks. + if (database.getLobSession() == this) { + return; + } + while (isOpen()) { + SessionLocal exclusive = database.getExclusiveSession(); + if (exclusive == null || exclusive == this) { + break; + } + if (Thread.holdsLock(exclusive)) { + // if another connection is used within the connection + break; + } + try { + Thread.sleep(100); + } catch (InterruptedException e) { + // ignore + } + } + } + + /** + * Get the view cache for this session. There are two caches: the subquery + * cache (which is only use for a single query, has no bounds, and is + * cleared after use), and the cache for regular views. + * + * @param subQuery true to get the subquery cache + * @return the view cache + */ + public Map getViewIndexCache(boolean subQuery) { + if (subQuery) { + // for sub-queries we don't need to use LRU because the cache should + // not grow too large for a single query (we drop the whole cache in + // the end of prepareLocal) + if (subQueryIndexCache == null) { + subQueryIndexCache = new HashMap<>(); + } + return subQueryIndexCache; + } + SmallLRUCache cache = viewIndexCache; + if (cache == null) { + viewIndexCache = cache = SmallLRUCache.newInstance(Constants.VIEW_INDEX_CACHE_SIZE); + } + return cache; + } + + public void setQueryTimeout(int queryTimeout) { + int max = database.getSettings().maxQueryTimeout; + if (max != 0 && (max < queryTimeout || queryTimeout == 0)) { + // the value must be at most max + queryTimeout = max; + } + this.queryTimeout = queryTimeout; + // must reset the cancel at here, + // otherwise it is still used + cancelAtNs = 0L; + } + + public int getQueryTimeout() { + return queryTimeout; + } + + /** + * Set the table this session is waiting for, and the thread that is + * waiting. + * + * @param waitForLock the table + * @param waitForLockThread the current thread (the one that is waiting) + */ + public void setWaitForLock(Table waitForLock, Thread waitForLockThread) { + this.waitForLock = waitForLock; + this.waitForLockThread = waitForLockThread; + } + + public Table getWaitForLock() { + return waitForLock; + } + + public Thread getWaitForLockThread() { + return waitForLockThread; + } + + public int getModificationId() { + return modificationId; + } + + public Value getTransactionId() { + if (transaction == null || !transaction.hasChanges()) { + return ValueNull.INSTANCE; + } + return ValueVarchar.get(Long.toString(transaction.getSequenceNum())); + } + + /** + * Get the next object id. + * + * @return the next object id + */ + public int nextObjectId() { + return objectId++; + } + + /** + * Get the transaction to use for this session. + * + * @return the transaction + */ + public Transaction getTransaction() { + if (transaction == null) { + Store store = database.getStore(); + if (store.getMvStore().isClosed()) { + Throwable backgroundException = database.getBackgroundException(); + database.shutdownImmediately(); + throw DbException.get(ErrorCode.DATABASE_IS_CLOSED, backgroundException); + } + transaction = store.getTransactionStore().begin(this, this.lockTimeout, id, isolationLevel); + startStatement = -1; + } + return transaction; + } + + private long getStatementSavepoint() { + if (startStatement == -1) { + startStatement = getTransaction().setSavepoint(); + } + return startStatement; + } + + /** + * Start a new statement within a transaction. + * @param command about to be started + */ + @SuppressWarnings("incomplete-switch") + public void startStatementWithinTransaction(Command command) { + Transaction transaction = getTransaction(); + if (transaction != null) { + HashSet>> maps = new HashSet<>(); + if (command != null) { + Set dependencies = command.getDependencies(); + switch (transaction.getIsolationLevel()) { + case SNAPSHOT: + case SERIALIZABLE: + if (!transaction.hasStatementDependencies()) { + for (Schema schema : database.getAllSchemasNoMeta()) { + for (Table table : schema.getAllTablesAndViews(null)) { + if (table instanceof MVTable) { + addTableToDependencies((MVTable)table, maps); + } + } + } + break; + } + //$FALL-THROUGH$ + case READ_COMMITTED: + case READ_UNCOMMITTED: + for (DbObject dependency : dependencies) { + if (dependency instanceof MVTable) { + addTableToDependencies((MVTable)dependency, maps); + } + } + break; + case REPEATABLE_READ: + HashSet processed = new HashSet<>(); + for (DbObject dependency : dependencies) { + if (dependency instanceof MVTable) { + addTableToDependencies((MVTable)dependency, maps, processed); + } + } + break; + } + } + transaction.markStatementStart(maps); + } + startStatement = -1; + if (command != null) { + setCurrentCommand(command); + } + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static void addTableToDependencies(MVTable table, HashSet>> maps) { + for (Index index : table.getIndexes()) { + if (index instanceof MVIndex) { + maps.add(((MVIndex) index).getMVMap()); + } + } + } + + private static void addTableToDependencies(MVTable table, HashSet>> maps, + HashSet processed) { + if (!processed.add(table)) { + return; + } + addTableToDependencies(table, maps); + ArrayList constraints = table.getConstraints(); + if (constraints != null) { + for (Constraint constraint : constraints) { + Table ref = constraint.getTable(); + if (ref != table && ref instanceof MVTable) { + addTableToDependencies((MVTable) ref, maps, processed); + } + } + } + } + + /** + * Mark the statement as completed. This also close all temporary result + * set, and deletes all temporary files held by the result sets. + */ + public void endStatement() { + setCurrentCommand(null); + if (hasTransaction()) { + transaction.markStatementEnd(); + } + startStatement = -1; + } + + /** + * Clear the view cache for this session. + */ + public void clearViewIndexCache() { + viewIndexCache = null; + } + + @Override + public ValueLob addTemporaryLob(ValueLob v) { + LobData lobData = v.getLobData(); + if (lobData instanceof LobDataInMemory) { + return v; + } + int tableId = ((LobDataDatabase) lobData).getTableId(); + if (tableId == LobStorageFrontend.TABLE_RESULT || tableId == LobStorageFrontend.TABLE_TEMP) { + if (temporaryResultLobs == null) { + temporaryResultLobs = new LinkedList<>(); + } + temporaryResultLobs.add(new TimeoutValue(v)); + } else { + if (temporaryLobs == null) { + temporaryLobs = new ArrayList<>(); + } + temporaryLobs.add(v); + } + return v; + } + + @Override + public boolean isRemote() { + return false; + } + + /** + * Mark that the given table needs to be analyzed on commit. + * + * @param table the table + */ + public void markTableForAnalyze(Table table) { + if (tablesToAnalyze == null) { + tablesToAnalyze = new HashSet<>(); + } + tablesToAnalyze.add(table); + } + + public State getState() { + return getBlockingSessionId() != 0 ? State.BLOCKED : state.get(); + } + + public int getBlockingSessionId() { + return transaction == null ? 0 : transaction.getBlockerId(); + } + + @Override + public void onRollback(MVMap> map, Object key, + VersionedValue existingValue, + VersionedValue restoredValue) { + // Here we are relying on the fact that map which backs table's primary index + // has the same name as the table itself + Store store = database.getStore(); + MVTable table = store.getTable(map.getName()); + if (table != null) { + Row oldRow = existingValue == null ? null : (Row) existingValue.getCurrentValue(); + Row newRow = restoredValue == null ? null : (Row) restoredValue.getCurrentValue(); + table.fireAfterRow(this, oldRow, newRow, true); + + if (table.getContainsLargeObject()) { + if (oldRow != null) { + for (int i = 0, len = oldRow.getColumnCount(); i < len; i++) { + Value v = oldRow.getValue(i); + if (v instanceof ValueLob) { + removeAtCommit((ValueLob) v); + } + } + } + if (newRow != null) { + for (int i = 0, len = newRow.getColumnCount(); i < len; i++) { + Value v = newRow.getValue(i); + if (v instanceof ValueLob) { + removeAtCommitStop((ValueLob) v); + } + } + } + } + } + } + + /** + * Represents a savepoint (a position in a transaction to where one can roll + * back to). + */ + public static class Savepoint { + + /** + * The undo log index. + */ + int logIndex; + + /** + * The transaction savepoint id. + */ + long transactionSavepoint; + } + + /** + * An LOB object with a timeout. + */ + public static class TimeoutValue { + + /** + * The time when this object was created. + */ + final long created = System.nanoTime(); + + /** + * The value. + */ + final ValueLob value; + + TimeoutValue(ValueLob v) { + this.value = v; + } + + } + + /** + * Returns the network connection information, or {@code null}. + * + * @return the network connection information, or {@code null} + */ + public NetworkConnectionInfo getNetworkConnectionInfo() { + return networkConnectionInfo; + } + + @Override + public void setNetworkConnectionInfo(NetworkConnectionInfo networkConnectionInfo) { + this.networkConnectionInfo = networkConnectionInfo; + } + + @Override + public ValueTimestampTimeZone currentTimestamp() { + ValueTimestampTimeZone ts = currentTimestamp; + if (ts == null) { + currentTimestamp = ts = DateTimeUtils.currentTimestamp(timeZone, commandStartOrEnd); + } + return ts; + } + + @Override + public Mode getMode() { + return database.getMode(); + } + + @Override + public JavaObjectSerializer getJavaObjectSerializer() { + return database.getJavaObjectSerializer(); + } + + @Override + public IsolationLevel getIsolationLevel() { + return isolationLevel; + } + + @Override + public void setIsolationLevel(IsolationLevel isolationLevel) { + commit(false); + this.isolationLevel = isolationLevel; + } + + /** + * Gets bit set of non-keywords. + * + * @return set of non-keywords, or {@code null} + */ + public BitSet getNonKeywords() { + return nonKeywords; + } + + /** + * Sets bit set of non-keywords. + * + * @param nonKeywords set of non-keywords, or {@code null} + */ + public void setNonKeywords(BitSet nonKeywords) { + this.nonKeywords = nonKeywords; + } + + @Override + public StaticSettings getStaticSettings() { + StaticSettings settings = staticSettings; + if (settings == null) { + DbSettings dbSettings = database.getSettings(); + staticSettings = settings = new StaticSettings(dbSettings.databaseToUpper, dbSettings.databaseToLower, + dbSettings.caseInsensitiveIdentifiers); + } + return settings; + } + + @Override + public DynamicSettings getDynamicSettings() { + return new DynamicSettings(database.getMode(), timeZone); + } + + @Override + public TimeZoneProvider currentTimeZone() { + return timeZone; + } + + /** + * Sets current time zone. + * + * @param timeZone time zone + */ + public void setTimeZone(TimeZoneProvider timeZone) { + if (!timeZone.equals(this.timeZone)) { + this.timeZone = timeZone; + ValueTimestampTimeZone ts = currentTimestamp; + if (ts != null) { + long dateValue = ts.getDateValue(); + long timeNanos = ts.getTimeNanos(); + int offsetSeconds = ts.getTimeZoneOffsetSeconds(); + currentTimestamp = DateTimeUtils.timestampTimeZoneAtOffset(dateValue, timeNanos, offsetSeconds, // + timeZone.getTimeZoneOffsetUTC( + DateTimeUtils.getEpochSeconds(dateValue, timeNanos, offsetSeconds))); + } + modificationId++; + } + } + + /** + * Check if two values are equal with the current comparison mode. + * + * @param a the first value + * @param b the second value + * @return true if both objects are equal + */ + public boolean areEqual(Value a, Value b) { + // can not use equals because ValueDecimal 0.0 is not equal to 0.00. + return a.compareTo(b, this, database.getCompareMode()) == 0; + } + + /** + * Compare two values with the current comparison mode. The values may have + * different data types including NULL. + * + * @param a the first value + * @param b the second value + * @return 0 if both values are equal, -1 if the first value is smaller, and + * 1 otherwise + */ + public int compare(Value a, Value b) { + return a.compareTo(b, this, database.getCompareMode()); + } + + /** + * Compare two values with the current comparison mode. The values may have + * different data types including NULL. + * + * @param a the first value + * @param b the second value + * @param forEquality perform only check for equality (= or <>) + * @return 0 if both values are equal, -1 if the first value is smaller, 1 + * if the second value is larger, {@link Integer#MIN_VALUE} if order + * is not defined due to NULL comparison + */ + public int compareWithNull(Value a, Value b, boolean forEquality) { + return a.compareWithNull(b, forEquality, this, database.getCompareMode()); + } + + /** + * Compare two values with the current comparison mode. The values must be + * of the same type. + * + * @param a the first value + * @param b the second value + * @return 0 if both values are equal, -1 if the first value is smaller, and + * 1 otherwise + */ + public int compareTypeSafe(Value a, Value b) { + return a.compareTypeSafe(b, database.getCompareMode(), this); + } + + /** + * Changes parsing mode of data types with too large length. + * + * @param truncateLargeLength + * {@code true} to truncate to valid bound, {@code false} to + * throw an exception + */ + public void setTruncateLargeLength(boolean truncateLargeLength) { + this.truncateLargeLength = truncateLargeLength; + } + + /** + * Returns parsing mode of data types with too large length. + * + * @return {@code true} if large length is truncated, {@code false} if an + * exception is thrown + */ + public boolean isTruncateLargeLength() { + return truncateLargeLength; + } + + /** + * Changes parsing of a BINARY data type. + * + * @param variableBinary + * {@code true} to parse BINARY as VARBINARY, {@code false} to + * parse it as is + */ + public void setVariableBinary(boolean variableBinary) { + this.variableBinary = variableBinary; + } + + /** + * Returns BINARY data type parsing mode. + * + * @return {@code true} if BINARY should be parsed as VARBINARY, + * {@code false} if it should be parsed as is + */ + public boolean isVariableBinary() { + return variableBinary; + } + + /** + * Changes INFORMATION_SCHEMA content. + * + * @param oldInformationSchema + * {@code true} to have old-style tables in INFORMATION_SCHEMA, + * {@code false} to have modern tables + */ + public void setOldInformationSchema(boolean oldInformationSchema) { + this.oldInformationSchema = oldInformationSchema; + } + + @Override + public boolean isOldInformationSchema() { + return oldInformationSchema; + } + + @Override + public DatabaseMeta getDatabaseMeta() { + return new DatabaseMetaLocal(this); + } + + @Override + public boolean zeroBasedEnums() { + return database.zeroBasedEnums(); + } + + /** + * Enables or disables the quirks mode. + * + * @param quirksMode + * whether quirks mode should be enabled + */ + public void setQuirksMode(boolean quirksMode) { + this.quirksMode = quirksMode; + } + + /** + * Returns whether quirks mode is enabled explicitly or implicitly. + * + * @return {@code true} if database is starting or quirks mode was enabled + * explicitly, {@code false} otherwise + */ + public boolean isQuirksMode() { + return quirksMode || database.isStarting(); + } + + @Override + public Session setThreadLocalSession() { + Session oldSession = THREAD_LOCAL_SESSION.get(); + THREAD_LOCAL_SESSION.set(this); + return oldSession; + } + + @Override + public void resetThreadLocalSession(Session oldSession) { + if (oldSession == null) { + THREAD_LOCAL_SESSION.remove(); + } else { + THREAD_LOCAL_SESSION.set(oldSession); + } + } + +} diff --git a/h2/src/main/org/h2/engine/SessionRemote.java b/h2/src/main/org/h2/engine/SessionRemote.java index 918dc7e34a..6045e111c1 100644 --- a/h2/src/main/org/h2/engine/SessionRemote.java +++ b/h2/src/main/org/h2/engine/SessionRemote.java @@ -1,21 +1,26 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; import java.io.IOException; import java.net.Socket; +import java.sql.SQLException; import java.util.ArrayList; - import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.api.JavaObjectSerializer; import org.h2.command.CommandInterface; import org.h2.command.CommandRemote; import org.h2.command.dml.SetTypes; -import org.h2.jdbc.JdbcSQLException; +import org.h2.engine.Mode.ModeEnum; +import org.h2.expression.ParameterInterface; +import org.h2.jdbc.JdbcException; +import org.h2.jdbc.meta.DatabaseMeta; +import org.h2.jdbc.meta.DatabaseMetaLegacy; +import org.h2.jdbc.meta.DatabaseMetaRemote; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.message.TraceSystem; @@ -23,23 +28,30 @@ import org.h2.store.DataHandler; import org.h2.store.FileStore; import org.h2.store.LobStorageFrontend; -import org.h2.store.LobStorageInterface; import org.h2.store.fs.FileUtils; +import org.h2.util.DateTimeUtils; import org.h2.util.JdbcUtils; import org.h2.util.MathUtils; import org.h2.util.NetUtils; -import org.h2.util.New; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.SmallLRUCache; import org.h2.util.StringUtils; import org.h2.util.TempFileDeleter; +import org.h2.util.TimeZoneProvider; +import org.h2.util.Utils; +import org.h2.value.CompareMode; import org.h2.value.Transfer; import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueLob; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarchar; /** * The client side part of a session when using the server mode. This object * communicates with a Session on the server side. */ -public class SessionRemote extends SessionWithState implements DataHandler { +public final class SessionRemote extends Session implements DataHandler { public static final int SESSION_PREPARE = 0; public static final int SESSION_CLOSE = 1; @@ -52,27 +64,26 @@ public class SessionRemote extends SessionWithState implements DataHandler { public static final int COMMAND_COMMIT = 8; public static final int CHANGE_ID = 9; public static final int COMMAND_GET_META_DATA = 10; - public static final int SESSION_PREPARE_READ_PARAMS = 11; + // 11 was used for SESSION_PREPARE_READ_PARAMS public static final int SESSION_SET_ID = 12; public static final int SESSION_CANCEL_STATEMENT = 13; public static final int SESSION_CHECK_KEY = 14; public static final int SESSION_SET_AUTOCOMMIT = 15; public static final int SESSION_HAS_PENDING_TRANSACTION = 16; public static final int LOB_READ = 17; + public static final int SESSION_PREPARE_READ_PARAMS2 = 18; + public static final int GET_JDBC_META = 19; public static final int STATUS_ERROR = 0; public static final int STATUS_OK = 1; public static final int STATUS_CLOSED = 2; public static final int STATUS_OK_STATE_CHANGED = 3; - private static SessionFactory sessionFactory; - private TraceSystem traceSystem; private Trace trace; - private ArrayList transferList = New.arrayList(); + private ArrayList transferList = Utils.newSmallArrayList(); private int nextId; private boolean autoCommit = true; - private CommandInterface autoCommitFalse, autoCommitTrue; private ConnectionInfo connectionInfo; private String databaseName; private String cipher; @@ -82,24 +93,31 @@ public class SessionRemote extends SessionWithState implements DataHandler { private int clientVersion; private boolean autoReconnect; private int lastReconnect; - private SessionInterface embedded; + private Session embedded; private DatabaseEventListener eventListener; private LobStorageFrontend lobStorage; private boolean cluster; private TempFileDeleter tempFileDeleter; private JavaObjectSerializer javaObjectSerializer; - private volatile boolean javaObjectSerializerInitialized; + + private final CompareMode compareMode = CompareMode.getInstance(null, 0); + + private final boolean oldInformationSchema; + + private String currentSchemaName; + + private volatile DynamicSettings dynamicSettings; public SessionRemote(ConnectionInfo ci) { this.connectionInfo = ci; + oldInformationSchema = ci.getProperty("OLD_INFORMATION_SCHEMA", false); } @Override public ArrayList getClusterServers() { - ArrayList serverList = new ArrayList(); - for (int i = 0; i < transferList.size(); i++) { - Transfer transfer = transferList.get(i); + ArrayList serverList = new ArrayList<>(); + for (Transfer transfer : transferList) { serverList.add(transfer.getSocket().getInetAddress(). getHostAddress() + ":" + transfer.getSocket().getPort()); @@ -109,14 +127,13 @@ public ArrayList getClusterServers() { private Transfer initTransfer(ConnectionInfo ci, String db, String server) throws IOException { - Socket socket = NetUtils.createSocket(server, - Constants.DEFAULT_TCP_PORT, ci.isSSL()); - Transfer trans = new Transfer(this); - trans.setSocket(socket); + Socket socket = NetUtils.createSocket(server, Constants.DEFAULT_TCP_PORT, ci.isSSL(), + ci.getProperty("NETWORK_TIMEOUT", 0)); + Transfer trans = new Transfer(this, socket); trans.setSSL(ci.isSSL()); trans.init(); - trans.writeInt(Constants.TCP_PROTOCOL_VERSION_6); - trans.writeInt(Constants.TCP_PROTOCOL_VERSION_15); + trans.writeInt(Constants.TCP_PROTOCOL_VERSION_MIN_SUPPORTED); + trans.writeInt(Constants.TCP_PROTOCOL_VERSION_MAX_SUPPORTED); trans.writeString(db); trans.writeString(ci.getOriginalURL()); trans.writeString(ci.getUserName()); @@ -131,19 +148,20 @@ private Transfer initTransfer(ConnectionInfo ci, String db, String server) done(trans); clientVersion = trans.readInt(); trans.setVersion(clientVersion); - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_14) { - if (ci.getFileEncryptionKey() != null) { - trans.writeBytes(ci.getFileEncryptionKey()); - } + if (ci.getFileEncryptionKey() != null) { + trans.writeBytes(ci.getFileEncryptionKey()); } trans.writeInt(SessionRemote.SESSION_SET_ID); trans.writeString(sessionId); - done(trans); - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_15) { - autoCommit = trans.readBoolean(); - } else { - autoCommit = true; + if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_20) { + TimeZoneProvider timeZone = ci.getTimeZone(); + if (timeZone == null) { + timeZone = DateTimeUtils.getTimeZone(); + } + trans.writeString(timeZone.getId()); } + done(trans); + autoCommit = trans.readBoolean(); return trans; } catch (DbException e) { trans.close(); @@ -153,9 +171,6 @@ private Transfer initTransfer(ConnectionInfo ci, String db, String server) @Override public boolean hasPendingTransaction() { - if (clientVersion < Constants.TCP_PROTOCOL_VERSION_10) { - return true; - } for (int i = 0, count = 0; i < transferList.size(); i++) { Transfer transfer = transferList.get(i); try { @@ -208,13 +223,22 @@ private void checkClusterDisableAutoCommit(String serverList) { CommandInterface c = prepareCommand( "SET CLUSTER " + serverList, Integer.MAX_VALUE); // this will set autoCommit to false - c.executeUpdate(); + c.executeUpdate(null); // so we need to switch it on autoCommit = true; cluster = true; } } + /** + * Returns the TCP protocol version of remote connection. + * + * @return the TCP protocol version + */ + public int getClientVersion() { + return clientVersion; + } + @Override public boolean getAutoCommit() { return autoCommit; @@ -240,32 +264,16 @@ public void setAutoCommitFromServer(boolean autoCommit) { } } - private void setAutoCommitSend(boolean autoCommit) { - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_8) { - for (int i = 0, count = 0; i < transferList.size(); i++) { - Transfer transfer = transferList.get(i); - try { - traceOperation("SESSION_SET_AUTOCOMMIT", autoCommit ? 1 : 0); - transfer.writeInt(SessionRemote.SESSION_SET_AUTOCOMMIT). - writeBoolean(autoCommit); - done(transfer); - } catch (IOException e) { - removeServer(e, i--, ++count); - } - } - } else { - if (autoCommit) { - if (autoCommitTrue == null) { - autoCommitTrue = prepareCommand( - "SET AUTOCOMMIT TRUE", Integer.MAX_VALUE); - } - autoCommitTrue.executeUpdate(); - } else { - if (autoCommitFalse == null) { - autoCommitFalse = prepareCommand( - "SET AUTOCOMMIT FALSE", Integer.MAX_VALUE); - } - autoCommitFalse.executeUpdate(); + private synchronized void setAutoCommitSend(boolean autoCommit) { + for (int i = 0, count = 0; i < transferList.size(); i++) { + Transfer transfer = transferList.get(i); + try { + traceOperation("SESSION_SET_AUTOCOMMIT", autoCommit ? 1 : 0); + transfer.writeInt(SessionRemote.SESSION_SET_AUTOCOMMIT). + writeBoolean(autoCommit); + done(transfer); + } catch (IOException e) { + removeServer(e, i--, ++count); } } } @@ -305,32 +313,19 @@ private String getFilePrefix(String dir) { return buff.toString(); } - @Override - public int getPowerOffCount() { - return 0; - } - - @Override - public void setPowerOffCount(int count) { - throw DbException.getUnsupportedException("remote"); - } - /** * Open a new (remote or embedded) session. * * @param openNew whether to open a new session in any case * @return the session */ - public SessionInterface connectEmbeddedOrServer(boolean openNew) { + public Session connectEmbeddedOrServer(boolean openNew) { ConnectionInfo ci = connectionInfo; if (ci.isRemote()) { connectServer(ci); return this; } - // create the session using reflection, - // so that the JDBC layer can be compiled without it - boolean autoServerMode = Boolean.parseBoolean( - ci.getProperty("AUTO_SERVER", "false")); + boolean autoServerMode = ci.getProperty("AUTO_SERVER", false); ConnectionInfo backup = null; try { if (autoServerMode) { @@ -340,17 +335,12 @@ public SessionInterface connectEmbeddedOrServer(boolean openNew) { if (openNew) { ci.setProperty("OPEN_NEW", "true"); } - if (sessionFactory == null) { - sessionFactory = (SessionFactory) Class.forName( - "org.h2.engine.Engine").getMethod("getInstance").invoke(null); - } - return sessionFactory.createSession(ci); + return Engine.createSession(ci); } catch (Exception re) { DbException e = DbException.convert(re); if (e.getErrorCode() == ErrorCode.DATABASE_ALREADY_OPEN_1) { if (autoServerMode) { - String serverKey = ((JdbcSQLException) e.getSQLException()). - getSQL(); + String serverKey = ((JdbcException) e.getSQLException()).getSQL(); if (serverKey != null) { backup.setServerKey(serverKey); // OPEN_NEW must be removed now, otherwise @@ -388,7 +378,7 @@ private void connectServer(ConnectionInfo ci) { traceSystem.setLevelFile(level); if (level > 0 && level < 4) { String file = FileUtils.createTempFile(prefix, - Constants.SUFFIX_TRACE_FILE, false, false); + Constants.SUFFIX_TRACE_FILE, false); traceSystem.setFileName(file); } } catch (IOException e) { @@ -407,11 +397,9 @@ private void connectServer(ConnectionInfo ci) { serverList = StringUtils.quoteStringSQL(server); ci.setProperty("CLUSTER", Constants.CLUSTERING_ENABLED); } - autoReconnect = Boolean.parseBoolean(ci.getProperty( - "AUTO_RECONNECT", "false")); + autoReconnect = ci.getProperty("AUTO_RECONNECT", false); // AUTO_SERVER implies AUTO_RECONNECT - boolean autoServer = Boolean.parseBoolean(ci.getProperty( - "AUTO_SERVER", "false")); + boolean autoServer = ci.getProperty("AUTO_SERVER", false); if (autoServer && serverList != null) { throw DbException .getUnsupportedException("autoServer && serverList != null"); @@ -423,7 +411,7 @@ private void connectServer(ConnectionInfo ci) { className = StringUtils.trim(className, true, true, "'"); try { eventListener = (DatabaseEventListener) JdbcUtils - .loadUserClass(className).newInstance(); + .loadUserClass(className).getDeclaredConstructor().newInstance(); } catch (Throwable e) { throw DbException.convert(e); } @@ -440,8 +428,7 @@ private void connectServer(ConnectionInfo ci) { // TODO cluster: support more than 2 connections boolean switchOffCluster = false; try { - for (int i = 0; i < len; i++) { - String s = servers[i]; + for (String s : servers) { try { Transfer trans = initTransfer(ci, databaseName, s); transferList.add(trans); @@ -461,11 +448,12 @@ private void connectServer(ConnectionInfo ci) { traceSystem.close(); throw e; } + getDynamicSettings(); } private void switchOffCluster() { CommandInterface ci = prepareCommand("SET CLUSTER ''", Integer.MAX_VALUE); - ci.executeUpdate(); + ci.executeUpdate(null); } /** @@ -479,7 +467,7 @@ private void switchOffCluster() { public void removeServer(IOException e, int i, int count) { trace.debug(e, "removing server because of exception"); transferList.remove(i); - if (transferList.size() == 0 && autoReconnect(count)) { + if (transferList.isEmpty() && autoReconnect(count)) { return; } checkClosed(); @@ -613,32 +601,47 @@ public int getCurrentId() { public void done(Transfer transfer) throws IOException { transfer.flush(); int status = transfer.readInt(); - if (status == STATUS_ERROR) { - String sqlstate = transfer.readString(); - String message = transfer.readString(); - String sql = transfer.readString(); - int errorCode = transfer.readInt(); - String stackTrace = transfer.readString(); - JdbcSQLException s = new JdbcSQLException(message, sql, sqlstate, - errorCode, null, stackTrace); - if (errorCode == ErrorCode.CONNECTION_BROKEN_1) { - // allow re-connect - IOException e = new IOException(s.toString(), s); - throw e; - } - throw DbException.convert(s); - } else if (status == STATUS_CLOSED) { + switch (status) { + case STATUS_ERROR: + throw readException(transfer); + case STATUS_OK: + break; + case STATUS_CLOSED: transferList = null; - } else if (status == STATUS_OK_STATE_CHANGED) { + break; + case STATUS_OK_STATE_CHANGED: sessionStateChanged = true; - } else if (status == STATUS_OK) { - // ok - } else { - throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, - "unexpected status " + status); + currentSchemaName = null; + dynamicSettings = null; + break; + default: + throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, "unexpected status " + status); } } + /** + * Reads an exception. + * + * @param transfer + * the transfer object + * @return the exception + * @throws IOException + * on I/O exception + */ + public static DbException readException(Transfer transfer) throws IOException { + String sqlstate = transfer.readString(); + String message = transfer.readString(); + String sql = transfer.readString(); + int errorCode = transfer.readInt(); + String stackTrace = transfer.readString(); + SQLException s = DbException.getJdbcSQLException(message, sql, sqlstate, errorCode, null, stackTrace); + if (errorCode == ErrorCode.CONNECTION_BROKEN_1) { + // allow re-connect + throw new IOException(s.toString(), s); + } + return DbException.convert(s); + } + /** * Returns true if the connection was opened in cluster mode. * @@ -650,7 +653,7 @@ public boolean isClustered() { @Override public boolean isClosed() { - return transferList == null || transferList.size() == 0; + return transferList == null || transferList.isEmpty(); } /** @@ -680,11 +683,6 @@ public String getDatabasePath() { return ""; } - @Override - public String getLobCompressionAlgorithm(int type) { - return null; - } - @Override public int getMaxLengthInplaceLob() { return SysProperties.LOB_CLIENT_MAX_SIZE_MEMORY; @@ -739,22 +737,7 @@ public TempFileDeleter getTempFileDeleter() { } @Override - public boolean isReconnectNeeded(boolean write) { - return false; - } - - @Override - public SessionInterface reconnect(boolean write) { - return this; - } - - @Override - public void afterWriting() { - // nothing to do - } - - @Override - public LobStorageInterface getLobStorage() { + public LobStorageFrontend getLobStorage() { if (lobStorage == null) { lobStorage = new LobStorageFrontend(this); } @@ -771,9 +754,7 @@ public synchronized int readLob(long lobId, byte[] hmac, long offset, traceOperation("LOB_READ", (int) lobId); transfer.writeInt(SessionRemote.LOB_READ); transfer.writeLong(lobId); - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_12) { - transfer.writeBytes(hmac); - } + transfer.writeBytes(hmac); transfer.writeLong(offset); transfer.writeInt(length); done(transfer); @@ -792,58 +773,215 @@ public synchronized int readLob(long lobId, byte[] hmac, long offset, @Override public JavaObjectSerializer getJavaObjectSerializer() { - initJavaObjectSerializer(); + if (dynamicSettings == null) { + getDynamicSettings(); + } return javaObjectSerializer; } - private void initJavaObjectSerializer() { - if (javaObjectSerializerInitialized) { - return; + @Override + public ValueLob addTemporaryLob(ValueLob v) { + // do nothing + return v; + } + + @Override + public CompareMode getCompareMode() { + return compareMode; + } + + @Override + public boolean isRemote() { + return true; + } + + @Override + public String getCurrentSchemaName() { + String schema = currentSchemaName; + if (schema == null) { + synchronized (this) { + try (CommandInterface command = prepareCommand("CALL SCHEMA()", 1); + ResultInterface result = command.executeQuery(1, false)) { + result.next(); + currentSchemaName = schema = result.currentRow()[0].getString(); + } + } + } + return schema; + } + + @Override + public synchronized void setCurrentSchemaName(String schema) { + currentSchemaName = null; + try (CommandInterface command = prepareCommand( + StringUtils.quoteIdentifier(new StringBuilder("SET SCHEMA "), schema).toString(), 0)) { + command.executeUpdate(null); + currentSchemaName = schema; + } + } + + @Override + public void setNetworkConnectionInfo(NetworkConnectionInfo networkConnectionInfo) { + // Not supported + } + + @Override + public IsolationLevel getIsolationLevel() { + if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_19) { + try (CommandInterface command = prepareCommand(!isOldInformationSchema() + ? "SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID()" + : "SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE ID = SESSION_ID()", 1); + ResultInterface result = command.executeQuery(1, false)) { + result.next(); + return IsolationLevel.fromSql(result.currentRow()[0].getString()); + } + } else { + try (CommandInterface command = prepareCommand("CALL LOCK_MODE()", 1); + ResultInterface result = command.executeQuery(1, false)) { + result.next(); + return IsolationLevel.fromLockMode(result.currentRow()[0].getInt()); + } } - synchronized (this) { - if (javaObjectSerializerInitialized) { - return; + } + + @Override + public void setIsolationLevel(IsolationLevel isolationLevel) { + if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_19) { + try (CommandInterface command = prepareCommand( + "SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL " + isolationLevel.getSQL(), 0)) { + command.executeUpdate(null); } - String serializerFQN = readSerializationSettings(); - if (serializerFQN != null) { - serializerFQN = serializerFQN.trim(); - if (!serializerFQN.isEmpty() && !serializerFQN.equals("null")) { - try { - javaObjectSerializer = (JavaObjectSerializer) JdbcUtils - .loadUserClass(serializerFQN).newInstance(); - } catch (Exception e) { - throw DbException.convert(e); + } else { + try (CommandInterface command = prepareCommand("SET LOCK_MODE ?", 0)) { + command.getParameters().get(0).setValue(ValueInteger.get(isolationLevel.getLockMode()), false); + command.executeUpdate(null); + } + } + } + + @Override + public StaticSettings getStaticSettings() { + StaticSettings settings = staticSettings; + if (settings == null) { + boolean databaseToUpper = true, databaseToLower = false, caseInsensitiveIdentifiers = false; + try (CommandInterface command = getSettingsCommand(" IN (?, ?, ?)")) { + ArrayList parameters = command.getParameters(); + parameters.get(0).setValue(ValueVarchar.get("DATABASE_TO_UPPER"), false); + parameters.get(1).setValue(ValueVarchar.get("DATABASE_TO_LOWER"), false); + parameters.get(2).setValue(ValueVarchar.get("CASE_INSENSITIVE_IDENTIFIERS"), false); + try (ResultInterface result = command.executeQuery(0, false)) { + while (result.next()) { + Value[] row = result.currentRow(); + String value = row[1].getString(); + switch (row[0].getString()) { + case "DATABASE_TO_UPPER": + databaseToUpper = Boolean.valueOf(value); + break; + case "DATABASE_TO_LOWER": + databaseToLower = Boolean.valueOf(value); + break; + case "CASE_INSENSITIVE_IDENTIFIERS": + caseInsensitiveIdentifiers = Boolean.valueOf(value); + } } } } - javaObjectSerializerInitialized = true; + if (clientVersion < Constants.TCP_PROTOCOL_VERSION_18) { + caseInsensitiveIdentifiers = !databaseToUpper; + } + staticSettings = settings = new StaticSettings(databaseToUpper, databaseToLower, + caseInsensitiveIdentifiers); } + return settings; } - /** - * Read the serializer name from the persistent database settings. - * - * @return the serializer - */ - private String readSerializationSettings() { - String javaObjectSerializerFQN = null; - CommandInterface ci = prepareCommand( - "SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS "+ - " WHERE NAME='JAVA_OBJECT_SERIALIZER'", Integer.MAX_VALUE); - try { - ResultInterface result = ci.executeQuery(0, false); - if (result.next()) { - Value[] row = result.currentRow(); - javaObjectSerializerFQN = row[0].getString(); + @Override + public DynamicSettings getDynamicSettings() { + DynamicSettings settings = dynamicSettings; + if (settings == null) { + String modeName = ModeEnum.REGULAR.name(); + TimeZoneProvider timeZone = DateTimeUtils.getTimeZone(); + String javaObjectSerializerName = null; + try (CommandInterface command = getSettingsCommand(" IN (?, ?, ?)")) { + ArrayList parameters = command.getParameters(); + parameters.get(0).setValue(ValueVarchar.get("MODE"), false); + parameters.get(1).setValue(ValueVarchar.get("TIME ZONE"), false); + parameters.get(2).setValue(ValueVarchar.get("JAVA_OBJECT_SERIALIZER"), false); + try (ResultInterface result = command.executeQuery(0, false)) { + while (result.next()) { + Value[] row = result.currentRow(); + String value = row[1].getString(); + switch (row[0].getString()) { + case "MODE": + modeName = value; + break; + case "TIME ZONE": + timeZone = TimeZoneProvider.ofId(value); + break; + case "JAVA_OBJECT_SERIALIZER": + javaObjectSerializerName = value; + } + } + } + } + Mode mode = Mode.getInstance(modeName); + if (mode == null) { + mode = Mode.getRegular(); + } + dynamicSettings = settings = new DynamicSettings(mode, timeZone); + if (javaObjectSerializerName != null + && !(javaObjectSerializerName = javaObjectSerializerName.trim()).isEmpty() + && !javaObjectSerializerName.equals("null")) { + try { + javaObjectSerializer = (JavaObjectSerializer) JdbcUtils + .loadUserClass(javaObjectSerializerName).getDeclaredConstructor().newInstance(); + } catch (Exception e) { + throw DbException.convert(e); + } + } else { + javaObjectSerializer = null; } - } finally { - ci.close(); } - return javaObjectSerializerFQN; + return settings; + } + + private CommandInterface getSettingsCommand(String args) { + return prepareCommand( + (!isOldInformationSchema() + ? "SELECT SETTING_NAME, SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME" + : "SELECT NAME, `VALUE` FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME") + args, + Integer.MAX_VALUE); } @Override - public void addTemporaryLob(Value v) { - // do nothing + public ValueTimestampTimeZone currentTimestamp() { + return DateTimeUtils.currentTimestamp(getDynamicSettings().timeZone); + } + + @Override + public TimeZoneProvider currentTimeZone() { + return getDynamicSettings().timeZone; + } + + @Override + public Mode getMode() { + return getDynamicSettings().mode; + } + + @Override + public DatabaseMeta getDatabaseMeta() { + return clientVersion >= Constants.TCP_PROTOCOL_VERSION_20 ? new DatabaseMetaRemote(this, transferList) + : new DatabaseMetaLegacy(this); } + + @Override + public boolean isOldInformationSchema() { + return oldInformationSchema || clientVersion < Constants.TCP_PROTOCOL_VERSION_20; + } + + @Override + public boolean zeroBasedEnums() { + return false; + } + } diff --git a/h2/src/main/org/h2/engine/SessionWithState.java b/h2/src/main/org/h2/engine/SessionWithState.java deleted file mode 100644 index f011192a53..0000000000 --- a/h2/src/main/org/h2/engine/SessionWithState.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.util.ArrayList; -import org.h2.command.CommandInterface; -import org.h2.result.ResultInterface; -import org.h2.util.New; -import org.h2.value.Value; - -/** - * The base class for both remote and embedded sessions. - */ -abstract class SessionWithState implements SessionInterface { - - protected ArrayList sessionState; - protected boolean sessionStateChanged; - private boolean sessionStateUpdating; - - /** - * Re-create the session state using the stored sessionState list. - */ - protected void recreateSessionState() { - if (sessionState != null && sessionState.size() > 0) { - sessionStateUpdating = true; - try { - for (String sql : sessionState) { - CommandInterface ci = prepareCommand(sql, Integer.MAX_VALUE); - ci.executeUpdate(); - } - } finally { - sessionStateUpdating = false; - sessionStateChanged = false; - } - } - } - - /** - * Read the session state if necessary. - */ - public void readSessionState() { - if (!sessionStateChanged || sessionStateUpdating) { - return; - } - sessionStateChanged = false; - sessionState = New.arrayList(); - CommandInterface ci = prepareCommand( - "SELECT * FROM INFORMATION_SCHEMA.SESSION_STATE", - Integer.MAX_VALUE); - ResultInterface result = ci.executeQuery(0, false); - while (result.next()) { - Value[] row = result.currentRow(); - sessionState.add(row[1].getString()); - } - } - -} diff --git a/h2/src/main/org/h2/engine/Setting.java b/h2/src/main/org/h2/engine/Setting.java index e0942716f8..3d8cc24576 100644 --- a/h2/src/main/org/h2/engine/Setting.java +++ b/h2/src/main/org/h2/engine/Setting.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -12,13 +12,23 @@ /** * A persistent database setting. */ -public class Setting extends DbObjectBase { +public final class Setting extends DbObject { private int intValue; private String stringValue; public Setting(Database database, int id, String settingName) { - initDbObjectBase(database, id, settingName, Trace.SETTING); + super(database, id, settingName, Trace.SETTING); + } + + @Override + public String getSQL(int sqlFlags) { + return getName(); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return builder.append(getName()); } public void setIntValue(int value) { @@ -39,18 +49,13 @@ public String getStringValue() { @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(); - } - - @Override - public String getDropSQL() { - return null; + throw DbException.getInternalError(toString()); } @Override public String getCreateSQL() { StringBuilder buff = new StringBuilder("SET "); - buff.append(getSQL()).append(' '); + getSQL(buff, DEFAULT_SQL_FLAGS).append(' '); if (stringValue != null) { buff.append(stringValue); } else { @@ -65,7 +70,7 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { database.removeMeta(session, getId()); invalidate(); } diff --git a/h2/src/main/org/h2/engine/SettingsBase.java b/h2/src/main/org/h2/engine/SettingsBase.java index 13d38f815d..2059dfdbb6 100644 --- a/h2/src/main/org/h2/engine/SettingsBase.java +++ b/h2/src/main/org/h2/engine/SettingsBase.java @@ -1,11 +1,15 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; +import java.util.Arrays; +import java.util.Comparator; import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; import org.h2.api.ErrorCode; import org.h2.message.DbException; @@ -30,15 +34,25 @@ protected SettingsBase(HashMap s) { * @return the setting */ protected boolean get(String key, boolean defaultValue) { - String s = get(key, "" + defaultValue); + String s = get(key, Boolean.toString(defaultValue)); try { - return Boolean.parseBoolean(s); - } catch (NumberFormatException e) { + return Utils.parseBoolean(s, defaultValue, true); + } catch (IllegalArgumentException e) { throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, "key:" + key + " value:" + s); } } + /** + * Set an entry in the key-value pair. + * + * @param key the key + * @param value the value + */ + void set(String key, boolean value) { + settings.put(key, Boolean.toString(value)); + } + /** * Get the setting for the given key. * @@ -47,7 +61,7 @@ protected boolean get(String key, boolean defaultValue) { * @return the setting */ protected int get(String key, int defaultValue) { - String s = get(key, "" + defaultValue); + String s = get(key, Integer.toString(defaultValue)); try { return Integer.decode(s); } catch (NumberFormatException e) { @@ -64,6 +78,10 @@ protected int get(String key, int defaultValue) { * @return the setting */ protected String get(String key, String defaultValue) { + String v = settings.get(key); + if (v != null) { + return v; + } StringBuilder buff = new StringBuilder("h2."); boolean nextUpper = false; for (char c : key.toCharArray()) { @@ -76,11 +94,8 @@ protected String get(String key, String defaultValue) { } } String sysProperty = buff.toString(); - String v = settings.get(key); - if (v == null) { - v = Utils.getProperty(sysProperty, defaultValue); - settings.put(key, v); - } + v = Utils.getProperty(sysProperty, defaultValue); + settings.put(key, v); return v; } @@ -103,4 +118,16 @@ public HashMap getSettings() { return settings; } + /** + * Get all settings in alphabetical order. + * + * @return the settings + */ + public Entry[] getSortedSettings() { + @SuppressWarnings("unchecked") + Map.Entry[] entries = settings.entrySet().toArray(new Map.Entry[0]); + Arrays.sort(entries, Comparator.comparing(Entry::getKey)); + return entries; + } + } diff --git a/h2/src/main/org/h2/engine/SysProperties.java b/h2/src/main/org/h2/engine/SysProperties.java index 35757a7b55..bf07188c88 100644 --- a/h2/src/main/org/h2/engine/SysProperties.java +++ b/h2/src/main/org/h2/engine/SysProperties.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -42,36 +42,7 @@ public class SysProperties { public static final String H2_BROWSER = "h2.browser"; /** - * System property file.encoding (default: Cp1252).
    - * It is usually set by the system and is the default encoding used for the - * RunScript and CSV tool. - */ - public static final String FILE_ENCODING = - Utils.getProperty("file.encoding", "Cp1252"); - - /** - * System property file.separator (default: /).
    - * It is usually set by the system, and used to build absolute file names. - */ - public static final String FILE_SEPARATOR = - Utils.getProperty("file.separator", "/"); - - /** - * System property java.specification.version.
    - * It is set by the system. Examples: 1.4, 1.5, 1.6. - */ - public static final String JAVA_SPECIFICATION_VERSION = - Utils.getProperty("java.specification.version", "1.4"); - - /** - * System property line.separator (default: \n).
    - * It is usually set by the system, and used by the script and trace tools. - */ - public static final String LINE_SEPARATOR = - Utils.getProperty("line.separator", "\n"); - - /** - * System property user.home (empty string if not set).
    + * System property user.home (empty string if not set). * It is usually set by the system, and used as a replacement for ~ in file * names. */ @@ -79,62 +50,37 @@ public class SysProperties { Utils.getProperty("user.home", ""); /** - * System property h2.allowedClasses (default: *).
    + * System property h2.allowedClasses (default: *). * Comma separated list of class names or prefixes. */ public static final String ALLOWED_CLASSES = Utils.getProperty("h2.allowedClasses", "*"); /** - * System property h2.browser (default: null).
    - * The preferred browser to use. If not set, the default browser is used. - * For Windows, to use the Internet Explorer, set this property to - * 'explorer'. For Mac OS, if the default browser is not Safari and you want - * to use Safari, use: - * java -Dh2.browser="open,-a,Safari,%url" .... - */ - public static final String BROWSER = - Utils.getProperty(H2_BROWSER, null); - - /** - * System property h2.enableAnonymousTLS (default: true).
    + * System property h2.enableAnonymousTLS (default: true). * When using TLS connection, the anonymous cipher suites should be enabled. */ public static final boolean ENABLE_ANONYMOUS_TLS = Utils.getProperty("h2.enableAnonymousTLS", true); /** - * System property h2.bindAddress (default: null).
    + * System property h2.bindAddress (default: null). * The bind address to use. */ public static final String BIND_ADDRESS = Utils.getProperty("h2.bindAddress", null); /** - * System property h2.check (default: true).
    - * Assertions in the database engine. + * System property h2.check + * (default: true for JDK/JRE, false for Android). + * Optional additional checks in the database engine. */ - //## CHECK ## public static final boolean CHECK = - Utils.getProperty("h2.check", true); - /*/ - public static final boolean CHECK = false; - //*/ - - /** - * System property h2.check2 (default: true).
    - * Additional assertions in the database engine. - */ - //## CHECK ## - public static final boolean CHECK2 = - Utils.getProperty("h2.check2", false); - /*/ - public static final boolean CHECK2 = false; - //*/ + Utils.getProperty("h2.check", !"0.9".equals(Utils.getProperty("java.specification.version", null))); /** * System property h2.clientTraceDirectory (default: - * trace.db/).
    + * trace.db/). * Directory where the trace files of the JDBC client are stored (only for * client / server). */ @@ -142,16 +88,17 @@ public class SysProperties { Utils.getProperty("h2.clientTraceDirectory", "trace.db/"); /** - * System property h2.collatorCacheSize (default: 32000).
    + * System property h2.collatorCacheSize (default: 3 + * 2000). * The cache size for collation keys (in elements). Used when a collator has * been set for the database. */ public static final int COLLATOR_CACHE_SIZE = - Utils.getProperty("h2.collatorCacheSize", 32000); + Utils.getProperty("h2.collatorCacheSize", 32_000); /** * System property h2.consoleTableIndexes - * (default: 100).
    + * (default: 100). * Up to this many tables, the column type and indexes are listed. */ public static final int CONSOLE_MAX_TABLES_LIST_INDEXES = @@ -159,36 +106,36 @@ public class SysProperties { /** * System property h2.consoleTableColumns - * (default: 500).
    + * (default: 500). * Up to this many tables, the column names are listed. */ public static final int CONSOLE_MAX_TABLES_LIST_COLUMNS = - Utils.getProperty("h2.consoleTableColumns", 300); + Utils.getProperty("h2.consoleTableColumns", 500); /** * System property h2.consoleProcedureColumns - * (default: 500).
    + * (default: 500). * Up to this many procedures, the column names are listed. */ public static final int CONSOLE_MAX_PROCEDURES_LIST_COLUMNS = Utils.getProperty("h2.consoleProcedureColumns", 300); /** - * System property h2.consoleStream (default: true).
    + * System property h2.consoleStream (default: true). * H2 Console: stream query results. */ public static final boolean CONSOLE_STREAM = Utils.getProperty("h2.consoleStream", true); /** - * System property h2.consoleTimeout (default: 1800000).
    + * System property h2.consoleTimeout (default: 1800000). * H2 Console: session timeout in milliseconds. The default is 30 minutes. */ public static final int CONSOLE_TIMEOUT = Utils.getProperty("h2.consoleTimeout", 30 * 60 * 1000); /** - * System property h2.dataSourceTraceLevel (default: 1).
    + * System property h2.dataSourceTraceLevel (default: 1). * The trace level of the data source implementation. Default is 1 for * error. */ @@ -197,7 +144,7 @@ public class SysProperties { /** * System property h2.delayWrongPasswordMin - * (default: 250).
    + * (default: 250). * The minimum delay in milliseconds before an exception is thrown for using * the wrong user name or password. This slows down brute force attacks. The * delay is reset to this value after a successful login. Unsuccessful @@ -209,7 +156,7 @@ public class SysProperties { /** * System property h2.delayWrongPasswordMax - * (default: 4000).
    + * (default: 4000). * The maximum delay in milliseconds before an exception is thrown for using * the wrong user name or password. This slows down brute force attacks. The * delay is reset after a successful login. The value 0 means there is no @@ -219,7 +166,7 @@ public class SysProperties { Utils.getProperty("h2.delayWrongPasswordMax", 4000); /** - * System property h2.javaSystemCompiler (default: true).
    + * System property h2.javaSystemCompiler (default: true). * Whether to use the Java system compiler * (ToolProvider.getSystemJavaCompiler()) if it is available to compile user * defined functions. If disabled or if the system compiler is not @@ -231,23 +178,15 @@ public class SysProperties { /** * System property h2.lobCloseBetweenReads - * (default: false).
    + * (default: false). * Close LOB files between read operations. */ public static boolean lobCloseBetweenReads = Utils.getProperty("h2.lobCloseBetweenReads", false); - /** - * System property h2.lobFilesPerDirectory - * (default: 256).
    - * Maximum number of LOB files per directory. - */ - public static final int LOB_FILES_PER_DIRECTORY = - Utils.getProperty("h2.lobFilesPerDirectory", 256); - /** * System property h2.lobClientMaxSizeMemory (default: - * 1048576).
    + * 1048576). * The maximum size of a LOB object to keep in memory on the client side * when using the server mode. */ @@ -255,7 +194,7 @@ public class SysProperties { Utils.getProperty("h2.lobClientMaxSizeMemory", 1024 * 1024); /** - * System property h2.maxFileRetry (default: 16).
    + * System property h2.maxFileRetry (default: 16). * Number of times to retry file delete and rename. in Windows, files can't * be deleted if they are open. Waiting a bit can help (sometimes the * Windows Explorer opens the files for a short time) may help. Sometimes, @@ -266,7 +205,7 @@ public class SysProperties { Math.max(1, Utils.getProperty("h2.maxFileRetry", 16)); /** - * System property h2.maxReconnect (default: 3).
    + * System property h2.maxReconnect (default: 3). * The maximum number of tries to reconnect in a row. */ public static final int MAX_RECONNECT = @@ -274,15 +213,15 @@ public class SysProperties { /** * System property h2.maxMemoryRows - * (default: 40000 per GB of available RAM).
    + * (default: 40000 per GB of available RAM). * The default maximum number of rows to be kept in memory in a result set. */ public static final int MAX_MEMORY_ROWS = - getAutoScaledForMemoryProperty("h2.maxMemoryRows", 40000); + getAutoScaledForMemoryProperty("h2.maxMemoryRows", 40_000); /** * System property h2.maxTraceDataLength - * (default: 65535).
    + * (default: 65535). * The maximum size of a LOB value that is written as data to the trace * system. */ @@ -290,17 +229,7 @@ public class SysProperties { Utils.getProperty("h2.maxTraceDataLength", 65535); /** - * System property h2.modifyOnWrite (default: false).
    - * Only modify the database file when recovery is necessary, or when writing - * to the database. If disabled, opening the database always writes to the - * file (except if the database is read-only). When enabled, the serialized - * file lock is faster. - */ - public static final boolean MODIFY_ON_WRITE = - Utils.getProperty("h2.modifyOnWrite", false); - - /** - * System property h2.nioLoadMapped (default: false).
    + * System property h2.nioLoadMapped (default: false). * If the mapped buffer should be loaded when the file is opened. * This can improve performance. */ @@ -308,17 +237,17 @@ public class SysProperties { Utils.getProperty("h2.nioLoadMapped", false); /** - * System property h2.nioCleanerHack (default: false).
    + * System property h2.nioCleanerHack (default: false). * If enabled, use the reflection hack to un-map the mapped file if * possible. If disabled, System.gc() is called in a loop until the object * is garbage collected. See also - * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4724038 + * https://bugs.openjdk.java.net/browse/JDK-4724038 */ public static final boolean NIO_CLEANER_HACK = Utils.getProperty("h2.nioCleanerHack", false); /** - * System property h2.objectCache (default: true).
    + * System property h2.objectCache (default: true). * Cache commonly used values (numbers, strings). There is a shared cache * for all values. */ @@ -327,31 +256,29 @@ public class SysProperties { /** * System property h2.objectCacheMaxPerElementSize (default: - * 4096).
    + * 4096). * The maximum size (precision) of an object in the cache. */ public static final int OBJECT_CACHE_MAX_PER_ELEMENT_SIZE = Utils.getProperty("h2.objectCacheMaxPerElementSize", 4096); /** - * System property h2.objectCacheSize (default: 1024).
    + * System property h2.objectCacheSize (default: 1024). * The maximum number of objects in the cache. * This value must be a power of 2. */ - public static final int OBJECT_CACHE_SIZE = - MathUtils.nextPowerOf2(Utils.getProperty("h2.objectCacheSize", 1024)); - - /** - * System property h2.oldStyleOuterJoin - * (default: true for version 1.3, false for version 1.4).
    - * Limited support for the old-style Oracle outer join with "(+)". - */ - public static final boolean OLD_STYLE_OUTER_JOIN = - Utils.getProperty("h2.oldStyleOuterJoin", - Constants.VERSION_MINOR >= 4 ? false : true); + public static final int OBJECT_CACHE_SIZE; + static { + try { + OBJECT_CACHE_SIZE = MathUtils.nextPowerOf2( + Utils.getProperty("h2.objectCacheSize", 1024)); + } catch (IllegalArgumentException e) { + throw new IllegalStateException("Invalid h2.objectCacheSize", e); + } + } /** - * System property h2.pgClientEncoding (default: UTF-8).
    + * System property h2.pgClientEncoding (default: UTF-8). * Default client encoding for PG server. It is used if the client does not * sends his encoding. */ @@ -359,14 +286,21 @@ public class SysProperties { Utils.getProperty("h2.pgClientEncoding", "UTF-8"); /** - * System property h2.prefixTempFile (default: h2.temp).
    + * System property h2.prefixTempFile (default: h2.temp). * The prefix for temporary files in the temp directory. */ public static final String PREFIX_TEMP_FILE = Utils.getProperty("h2.prefixTempFile", "h2.temp"); /** - * System property h2.serverCachedObjects (default: 64).
    + * System property h2.forceAutoCommitOffOnCommit (default: false). + * Throw error if transaction's auto-commit property is true when a commit is executed. + */ + public static boolean FORCE_AUTOCOMMIT_OFF_ON_COMMIT = + Utils.getProperty("h2.forceAutoCommitOffOnCommit", false); + + /** + * System property h2.serverCachedObjects (default: 64). * TCP Server: number of cached objects per session. */ public static final int SERVER_CACHED_OBJECTS = @@ -374,97 +308,53 @@ public class SysProperties { /** * System property h2.serverResultSetFetchSize - * (default: 100).
    + * (default: 100). * The default result set fetch size when using the server mode. */ public static final int SERVER_RESULT_SET_FETCH_SIZE = Utils.getProperty("h2.serverResultSetFetchSize", 100); /** - * System property h2.socketConnectRetry (default: 16).
    + * System property h2.socketConnectRetry (default: 16). * The number of times to retry opening a socket. Windows sometimes fails * to open a socket, see bug - * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6213296 + * https://bugs.openjdk.java.net/browse/JDK-6213296 */ public static final int SOCKET_CONNECT_RETRY = Utils.getProperty("h2.socketConnectRetry", 16); /** * System property h2.socketConnectTimeout - * (default: 2000).
    + * (default: 2000). * The timeout in milliseconds to connect to a server. */ public static final int SOCKET_CONNECT_TIMEOUT = Utils.getProperty("h2.socketConnectTimeout", 2000); /** - * System property h2.sortBinaryUnsigned - * (default: false with version 1.3, true with version 1.4).
    - * Whether binary data should be sorted in unsigned mode - * (0xff is larger than 0x00). - */ - public static final boolean SORT_BINARY_UNSIGNED = - Utils.getProperty("h2.sortBinaryUnsigned", - Constants.VERSION_MINOR >= 4 ? true : false); - - /** - * System property h2.sortNullsHigh (default: false).
    - * Invert the default sorting behavior for NULL, such that NULL - * is at the end of a result set in an ascending sort and at - * the beginning of a result set in a descending sort. - */ - public static final boolean SORT_NULLS_HIGH = - Utils.getProperty("h2.sortNullsHigh", false); - - /** - * System property h2.splitFileSizeShift (default: 30).
    + * System property h2.splitFileSizeShift (default: 30). * The maximum file size of a split file is 1L << x. */ public static final long SPLIT_FILE_SIZE_SHIFT = Utils.getProperty("h2.splitFileSizeShift", 30); /** - * System property h2.storeLocalTime - * (default: false for version 1.3, true for version 1.4).
    - * Store the local time. If disabled, the daylight saving offset is not - * taken into account. - */ - public static final boolean STORE_LOCAL_TIME = - Utils.getProperty("h2.storeLocalTime", - Constants.VERSION_MINOR >= 4 ? true : false); - - /** - * System property h2.syncMethod (default: sync).
    - * What method to call when closing the database, on checkpoint, and on - * CHECKPOINT SYNC. The following options are supported: - * "sync" (default): RandomAccessFile.getFD().sync(); - * "force": RandomAccessFile.getChannel().force(true); - * "forceFalse": RandomAccessFile.getChannel().force(false); - * "": do not call a method (fast but there is a risk of data loss - * on power failure). - */ - public static final String SYNC_METHOD = - Utils.getProperty("h2.syncMethod", "sync"); - - /** - * System property h2.traceIO (default: false).
    + * System property h2.traceIO (default: false). * Trace all I/O operations. */ public static final boolean TRACE_IO = Utils.getProperty("h2.traceIO", false); /** - * System property h2.implicitRelativePath - * (default: true for version 1.3, false for version 1.4).
    - * If disabled, relative paths in database URLs need to be written as - * jdbc:h2:./test instead of jdbc:h2:test. + * System property h2.threadDeadlockDetector + * (default: false). + * Detect thread deadlocks in a background thread. */ - public static final boolean IMPLICIT_RELATIVE_PATH = - Utils.getProperty("h2.implicitRelativePath", - Constants.VERSION_MINOR >= 4 ? false : true); + public static final boolean THREAD_DEADLOCK_DETECTOR = + Utils.getProperty("h2.threadDeadlockDetector", false); /** - * System property h2.urlMap (default: null).
    + * System property h2.urlMap (default: null). * A properties file that contains a mapping between database URLs. New * connections are written into the file. An empty value in the map means no * redirection is used for the given URL. @@ -474,49 +364,16 @@ public class SysProperties { /** * System property h2.useThreadContextClassLoader - * (default: false).
    + * (default: false). * Instead of using the default class loader when deserializing objects, the * current thread-context class loader will be used. */ public static final boolean USE_THREAD_CONTEXT_CLASS_LOADER = Utils.getProperty("h2.useThreadContextClassLoader", false); - /** - * System property h2.serializeJavaObject - * (default: true).
    - * If true, values of type OTHER will be stored in serialized form - * and have the semantics of binary data for all operations (such as sorting - * and conversion to string). - *
    - * If false, the objects will be serialized only for I/O operations - * and a few other special cases (for example when someone tries to get the - * value in binary form or when comparing objects that are not comparable - * otherwise). - *
    - * If the object implements the Comparable interface, the method compareTo - * will be used for sorting (but only if objects being compared have a - * common comparable super type). Otherwise the objects will be compared by - * type, and if they are the same by hashCode, and if the hash codes are - * equal, but objects are not, the serialized forms (the byte arrays) are - * compared. - *
    - * The string representation of the values use the toString method of - * object. - *
    - * In client-server mode, the server must have all required classes in the - * class path. On the client side, this setting is required to be disabled - * as well, to have correct string representation and display size. - *
    - * In embedded mode, no data copying occurs, so the user has to make - * defensive copy himself before storing, or ensure that the value object is - * immutable. - */ - public static boolean serializeJavaObject = - Utils.getProperty("h2.serializeJavaObject", true); - /** * System property h2.javaObjectSerializer - * (default: null).
    + * (default: null). * The JavaObjectSerializer class name for java objects being stored in * column of type OTHER. It must be the same on client and server to work * correctly. @@ -524,6 +381,16 @@ public class SysProperties { public static final String JAVA_OBJECT_SERIALIZER = Utils.getProperty("h2.javaObjectSerializer", null); + /** + * System property h2.authConfigFile + * (default: null). + * authConfigFile define the URL of configuration file + * of {@link org.h2.security.auth.DefaultAuthenticator} + * + */ + public static final String AUTH_CONFIG_FILE = + Utils.getProperty("h2.authConfigFile", null); + private static final String H2_BASE_DIR = "h2.baseDir"; private SysProperties() { @@ -532,6 +399,7 @@ private SysProperties() { /** * INTERNAL + * @param dir base directory */ public static void setBaseDir(String dir) { if (!dir.endsWith("/")) { @@ -542,6 +410,7 @@ public static void setBaseDir(String dir) { /** * INTERNAL + * @return base directory */ public static String getBaseDir() { return Utils.getProperty(H2_BASE_DIR, null); @@ -549,7 +418,7 @@ public static String getBaseDir() { /** * System property h2.scriptDirectory (default: empty - * string).
    + * string). * Relative or absolute directory where the script files are stored to or * read from. * @@ -569,7 +438,7 @@ private static int getAutoScaledForMemoryProperty(String key, int defaultValue) String s = Utils.getProperty(key, null); if (s != null) { try { - return Integer.decode(s).intValue(); + return Integer.decode(s); } catch (NumberFormatException e) { // ignore } diff --git a/h2/src/main/org/h2/engine/UndoLog.java b/h2/src/main/org/h2/engine/UndoLog.java deleted file mode 100644 index f29246f147..0000000000 --- a/h2/src/main/org/h2/engine/UndoLog.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.util.ArrayList; -import java.util.HashMap; -import org.h2.message.DbException; -import org.h2.store.Data; -import org.h2.store.FileStore; -import org.h2.table.Table; -import org.h2.util.New; - -/** - * Each session keeps a undo log if rollback is required. - */ -public class UndoLog { - - private final Database database; - private final ArrayList storedEntriesPos = New.arrayList(); - private final ArrayList records = New.arrayList(); - private FileStore file; - private Data rowBuff; - private int memoryUndo; - private int storedEntries; - private HashMap tables; - private final boolean largeTransactions; - - /** - * Create a new undo log for the given session. - * - * @param session the session - */ - UndoLog(Session session) { - this.database = session.getDatabase(); - largeTransactions = database.getSettings().largeTransactions; - } - - /** - * Get the number of active rows in this undo log. - * - * @return the number of rows - */ - int size() { - if (largeTransactions) { - return storedEntries + records.size(); - } - if (SysProperties.CHECK && memoryUndo > records.size()) { - DbException.throwInternalError(); - } - return records.size(); - } - - /** - * Clear the undo log. This method is called after the transaction is - * committed. - */ - void clear() { - records.clear(); - storedEntries = 0; - storedEntriesPos.clear(); - memoryUndo = 0; - if (file != null) { - file.closeAndDeleteSilently(); - file = null; - rowBuff = null; - } - } - - /** - * Get the last record and remove it from the list of operations. - * - * @return the last record - */ - public UndoLogRecord getLast() { - int i = records.size() - 1; - if (largeTransactions) { - if (i < 0 && storedEntries > 0) { - int last = storedEntriesPos.size() - 1; - long pos = storedEntriesPos.get(last); - storedEntriesPos.remove(last); - long end = file.length(); - int bufferLength = (int) (end - pos); - Data buff = Data.create(database, bufferLength); - file.seek(pos); - file.readFully(buff.getBytes(), 0, bufferLength); - while (buff.length() < bufferLength) { - UndoLogRecord e = UndoLogRecord.loadFromBuffer(buff, this); - records.add(e); - memoryUndo++; - } - storedEntries -= records.size(); - file.setLength(pos); - file.seek(pos); - } - i = records.size() - 1; - } - UndoLogRecord entry = records.get(i); - if (entry.isStored()) { - int start = Math.max(0, i - database.getMaxMemoryUndo() / 2); - UndoLogRecord first = null; - for (int j = start; j <= i; j++) { - UndoLogRecord e = records.get(j); - if (e.isStored()) { - e.load(rowBuff, file, this); - memoryUndo++; - if (first == null) { - first = e; - } - } - } - for (int k = 0; k < i; k++) { - UndoLogRecord e = records.get(k); - e.invalidatePos(); - } - seek(first.getFilePos()); - } - return entry; - } - - /** - * Go to the right position in the file. - * - * @param filePos the position in the file - */ - void seek(long filePos) { - file.seek(filePos * Constants.FILE_BLOCK_SIZE); - } - - /** - * Remove the last record from the list of operations. - * - * @param trimToSize if the undo array should shrink to conserve memory - */ - void removeLast(boolean trimToSize) { - int i = records.size() - 1; - UndoLogRecord r = records.remove(i); - if (!r.isStored()) { - memoryUndo--; - } - if (trimToSize && i > 1024 && (i & 1023) == 0) { - records.trimToSize(); - } - } - - /** - * Append an undo log entry to the log. - * - * @param entry the entry - */ - void add(UndoLogRecord entry) { - records.add(entry); - if (largeTransactions) { - memoryUndo++; - if (memoryUndo > database.getMaxMemoryUndo() && - database.isPersistent() && - !database.isMultiVersion()) { - if (file == null) { - String fileName = database.createTempFile(); - file = database.openFile(fileName, "rw", false); - file.setCheckedWriting(false); - file.setLength(FileStore.HEADER_LENGTH); - } - Data buff = Data.create(database, Constants.DEFAULT_PAGE_SIZE); - for (int i = 0; i < records.size(); i++) { - UndoLogRecord r = records.get(i); - buff.checkCapacity(Constants.DEFAULT_PAGE_SIZE); - r.append(buff, this); - if (i == records.size() - 1 || buff.length() > Constants.UNDO_BLOCK_SIZE) { - storedEntriesPos.add(file.getFilePointer()); - file.write(buff.getBytes(), 0, buff.length()); - buff.reset(); - } - } - storedEntries += records.size(); - memoryUndo = 0; - records.clear(); - file.autoDelete(); - return; - } - } else { - if (!entry.isStored()) { - memoryUndo++; - } - if (memoryUndo > database.getMaxMemoryUndo() && - database.isPersistent() && - !database.isMultiVersion()) { - if (file == null) { - String fileName = database.createTempFile(); - file = database.openFile(fileName, "rw", false); - file.setCheckedWriting(false); - file.seek(FileStore.HEADER_LENGTH); - rowBuff = Data.create(database, Constants.DEFAULT_PAGE_SIZE); - Data buff = rowBuff; - for (int i = 0; i < records.size(); i++) { - UndoLogRecord r = records.get(i); - saveIfPossible(r, buff); - } - } else { - saveIfPossible(entry, rowBuff); - } - file.autoDelete(); - } - } - } - - private void saveIfPossible(UndoLogRecord r, Data buff) { - if (!r.isStored() && r.canStore()) { - r.save(buff, file, this); - memoryUndo--; - } - } - - /** - * Get the table id for this undo log. If the table is not registered yet, - * this is done as well. - * - * @param table the table - * @return the id - */ - int getTableId(Table table) { - int id = table.getId(); - if (tables == null) { - tables = New.hashMap(); - } - // need to overwrite the old entry, because the old object - // might be deleted in the meantime - tables.put(id, table); - return id; - } - - /** - * Get the table for this id. The table must be registered for this undo log - * first by calling getTableId. - * - * @param id the table id - * @return the table object - */ - Table getTable(int id) { - return tables.get(id); - } - -} diff --git a/h2/src/main/org/h2/engine/UndoLogRecord.java b/h2/src/main/org/h2/engine/UndoLogRecord.java deleted file mode 100644 index 5ae9d16041..0000000000 --- a/h2/src/main/org/h2/engine/UndoLogRecord.java +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import org.h2.api.ErrorCode; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.store.Data; -import org.h2.store.FileStore; -import org.h2.table.Table; -import org.h2.value.Value; - -/** - * An entry in a undo log. - */ -public class UndoLogRecord { - - /** - * Operation type meaning the row was inserted. - */ - public static final short INSERT = 0; - - /** - * Operation type meaning the row was deleted. - */ - public static final short DELETE = 1; - - private static final int IN_MEMORY = 0, STORED = 1, IN_MEMORY_INVALID = 2; - private Table table; - private Row row; - private short operation; - private short state; - private int filePos; - - /** - * Create a new undo log record - * - * @param table the table - * @param op the operation type - * @param row the row that was deleted or inserted - */ - UndoLogRecord(Table table, short op, Row row) { - this.table = table; - this.row = row; - this.operation = op; - this.state = IN_MEMORY; - } - - /** - * Check if the log record is stored in the file. - * - * @return true if it is - */ - boolean isStored() { - return state == STORED; - } - - /** - * Check if this undo log record can be store. Only record can be stored if - * the table has a unique index. - * - * @return if it can be stored - */ - boolean canStore() { - // if large transactions are enabled, this method is not called - if (table.getUniqueIndex() != null) { - return true; - } - return false; - } - - /** - * Un-do the operation. If the row was inserted before, it is deleted now, - * and vice versa. - * - * @param session the session - */ - void undo(Session session) { - Database db = session.getDatabase(); - switch (operation) { - case INSERT: - if (state == IN_MEMORY_INVALID) { - state = IN_MEMORY; - } - if (db.getLockMode() == Constants.LOCK_MODE_OFF) { - if (row.isDeleted()) { - // it might have been deleted by another thread - return; - } - } - try { - row.setDeleted(false); - table.removeRow(session, row); - table.fireAfterRow(session, row, null, true); - } catch (DbException e) { - if (session.getDatabase().getLockMode() == Constants.LOCK_MODE_OFF - && e.getErrorCode() == ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1) { - // it might have been deleted by another thread - // ignore - } else { - throw e; - } - } - break; - case DELETE: - try { - table.addRow(session, row); - table.fireAfterRow(session, null, row, true); - // reset session id, otherwise other sessions think - // that this row was inserted by this session - row.commit(); - } catch (DbException e) { - if (session.getDatabase().getLockMode() == Constants.LOCK_MODE_OFF - && e.getSQLException().getErrorCode() == ErrorCode.DUPLICATE_KEY_1) { - // it might have been added by another thread - // ignore - } else { - throw e; - } - } - break; - default: - DbException.throwInternalError("op=" + operation); - } - } - - /** - * Append the row to the buffer. - * - * @param buff the buffer - * @param log the undo log - */ - void append(Data buff, UndoLog log) { - int p = buff.length(); - buff.writeInt(0); - buff.writeInt(operation); - buff.writeByte(row.isDeleted() ? (byte) 1 : (byte) 0); - buff.writeInt(log.getTableId(table)); - buff.writeLong(row.getKey()); - buff.writeInt(row.getSessionId()); - int count = row.getColumnCount(); - buff.writeInt(count); - for (int i = 0; i < count; i++) { - Value v = row.getValue(i); - buff.checkCapacity(buff.getValueLen(v)); - buff.writeValue(v); - } - buff.fillAligned(); - buff.setInt(p, (buff.length() - p) / Constants.FILE_BLOCK_SIZE); - } - - /** - * Save the row in the file using a buffer. - * - * @param buff the buffer - * @param file the file - * @param log the undo log - */ - void save(Data buff, FileStore file, UndoLog log) { - buff.reset(); - append(buff, log); - filePos = (int) (file.getFilePointer() / Constants.FILE_BLOCK_SIZE); - file.write(buff.getBytes(), 0, buff.length()); - row = null; - state = STORED; - } - - /** - * Load an undo log record row using a buffer. - * - * @param buff the buffer - * @param log the log - * @return the undo log record - */ - static UndoLogRecord loadFromBuffer(Data buff, UndoLog log) { - UndoLogRecord rec = new UndoLogRecord(null, (short) 0, null); - int pos = buff.length(); - int len = buff.readInt() * Constants.FILE_BLOCK_SIZE; - rec.load(buff, log); - buff.setPos(pos + len); - return rec; - } - - /** - * Load an undo log record row using a buffer. - * - * @param buff the buffer - * @param file the source file - * @param log the log - */ - void load(Data buff, FileStore file, UndoLog log) { - int min = Constants.FILE_BLOCK_SIZE; - log.seek(filePos); - buff.reset(); - file.readFully(buff.getBytes(), 0, min); - int len = buff.readInt() * Constants.FILE_BLOCK_SIZE; - buff.checkCapacity(len); - if (len - min > 0) { - file.readFully(buff.getBytes(), min, len - min); - } - int oldOp = operation; - load(buff, log); - if (SysProperties.CHECK) { - if (operation != oldOp) { - DbException.throwInternalError("operation=" + operation + " op=" + oldOp); - } - } - } - - private void load(Data buff, UndoLog log) { - operation = (short) buff.readInt(); - boolean deleted = buff.readByte() == 1; - table = log.getTable(buff.readInt()); - long key = buff.readLong(); - int sessionId = buff.readInt(); - int columnCount = buff.readInt(); - Value[] values = new Value[columnCount]; - for (int i = 0; i < columnCount; i++) { - values[i] = buff.readValue(); - } - row = new Row(values, Row.MEMORY_CALCULATE); - row.setKey(key); - row.setDeleted(deleted); - row.setSessionId(sessionId); - state = IN_MEMORY_INVALID; - } - - /** - * Get the table. - * - * @return the table - */ - public Table getTable() { - return table; - } - - /** - * Get the position in the file. - * - * @return the file position - */ - public long getFilePos() { - return filePos; - } - - /** - * This method is called after the operation was committed. - * It commits the change to the indexes. - */ - void commit() { - table.commit(operation, row); - } - - /** - * Get the row that was deleted or inserted. - * - * @return the row - */ - public Row getRow() { - return row; - } - - /** - * Change the state from IN_MEMORY to IN_MEMORY_INVALID. This method is - * called if a later record was read from the temporary file, and therefore - * the position could have changed. - */ - void invalidatePos() { - if (this.state == IN_MEMORY) { - state = IN_MEMORY_INVALID; - } - } -} diff --git a/h2/src/main/org/h2/engine/User.java b/h2/src/main/org/h2/engine/User.java index 3752bc215e..312516a84f 100644 --- a/h2/src/main/org/h2/engine/User.java +++ b/h2/src/main/org/h2/engine/User.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.engine; @@ -13,19 +13,20 @@ import org.h2.message.Trace; import org.h2.schema.Schema; import org.h2.security.SHA256; +import org.h2.table.DualTable; import org.h2.table.MetaTable; import org.h2.table.RangeTable; import org.h2.table.Table; +import org.h2.table.TableType; import org.h2.table.TableView; import org.h2.util.MathUtils; -import org.h2.util.New; import org.h2.util.StringUtils; import org.h2.util.Utils; /** * Represents a user object. */ -public class User extends RightOwner { +public final class User extends RightOwner { private final boolean systemUser; private byte[] salt; @@ -76,7 +77,7 @@ public void setUserPasswordHash(byte[] userPasswordHash) { @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } @Override @@ -84,73 +85,6 @@ public String getCreateSQL() { return getCreateSQL(true); } - @Override - public String getDropSQL() { - return null; - } - - /** - * Checks that this user has the given rights for this database object. - * - * @param table the database object - * @param rightMask the rights required - * @throws DbException if this user does not have the required rights - */ - public void checkRight(Table table, int rightMask) { - if (!hasRight(table, rightMask)) { - throw DbException.get(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, table.getSQL()); - } - } - - /** - * See if this user has the given rights for this database object. - * - * @param table the database object, or null for schema-only check - * @param rightMask the rights required - * @return true if the user has the rights - */ - public boolean hasRight(Table table, int rightMask) { - if (rightMask != Right.SELECT && !systemUser && table != null) { - table.checkWritingAllowed(); - } - if (admin) { - return true; - } - Role publicRole = database.getPublicRole(); - if (publicRole.isRightGrantedRecursive(table, rightMask)) { - return true; - } - if (table instanceof MetaTable || table instanceof RangeTable) { - // everybody has access to the metadata information - return true; - } - if (table != null) { - if (hasRight(null, Right.ALTER_ANY_SCHEMA)) { - return true; - } - String tableType = table.getTableType(); - if (Table.VIEW.equals(tableType)) { - TableView v = (TableView) table; - if (v.getOwner() == this) { - // the owner of a view has access: - // SELECT * FROM (SELECT * FROM ...) - return true; - } - } else if (tableType == null) { - // function table - return true; - } - if (table.isTemporary() && !table.isGlobalTemporary()) { - // the owner has all rights on local temporary tables - return true; - } - } - if (isRightGrantedRecursive(table, rightMask)) { - return true; - } - return false; - } - /** * Get the CREATE SQL statement for this object. * @@ -160,15 +94,16 @@ public boolean hasRight(Table table, int rightMask) { */ public String getCreateSQL(boolean password) { StringBuilder buff = new StringBuilder("CREATE USER IF NOT EXISTS "); - buff.append(getSQL()); + getSQL(buff, DEFAULT_SQL_FLAGS); if (comment != null) { - buff.append(" COMMENT ").append(StringUtils.quoteStringSQL(comment)); + buff.append(" COMMENT "); + StringUtils.quoteStringSQL(buff, comment); } if (password) { - buff.append(" SALT '"). - append(StringUtils.convertBytesToHex(salt)). - append("' HASH '"). - append(StringUtils.convertBytesToHex(passwordHash)). + buff.append(" SALT '"); + StringUtils.convertBytesToHex(buff, salt). + append("' HASH '"); + StringUtils.convertBytesToHex(buff, passwordHash). append('\''); } else { buff.append(" PASSWORD ''"); @@ -197,8 +132,8 @@ boolean validateUserPasswordHash(byte[] userPasswordHash) { } /** - * Check if this user has admin rights. An exception is thrown if he does - * not have them. + * Checks if this user has admin rights. An exception is thrown if user + * doesn't have them. * * @throws DbException if this user is not an admin */ @@ -209,17 +144,101 @@ public void checkAdmin() { } /** - * Check if this user has schema admin rights. An exception is thrown if he - * does not have them. + * Checks if this user has schema admin rights for every schema. An + * exception is thrown if user doesn't have them. * * @throws DbException if this user is not a schema admin */ public void checkSchemaAdmin() { - if (!hasRight(null, Right.ALTER_ANY_SCHEMA)) { + if (!hasSchemaRight(null)) { throw DbException.get(ErrorCode.ADMIN_RIGHTS_REQUIRED); } } + /** + * Checks if this user has schema owner rights for the specified schema. An + * exception is thrown if user doesn't have them. + * + * @param schema the schema + * @throws DbException if this user is not a schema owner + */ + public void checkSchemaOwner(Schema schema) { + if (!hasSchemaRight(schema)) { + throw DbException.get(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, schema.getTraceSQL()); + } + } + + /** + * See if this user has owner rights for the specified schema + * + * @param schema the schema + * @return true if the user has the rights + */ + private boolean hasSchemaRight(Schema schema) { + if (admin) { + return true; + } + Role publicRole = database.getPublicRole(); + if (publicRole.isSchemaRightGrantedRecursive(schema)) { + return true; + } + return isSchemaRightGrantedRecursive(schema); + } + + /** + * Checks that this user has the given rights for the specified table. + * + * @param table the table + * @param rightMask the rights required + * @throws DbException if this user does not have the required rights + */ + public void checkTableRight(Table table, int rightMask) { + if (!hasTableRight(table, rightMask)) { + throw DbException.get(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, table.getTraceSQL()); + } + } + + /** + * See if this user has the given rights for this database object. + * + * @param table the database object, or null for schema-only check + * @param rightMask the rights required + * @return true if the user has the rights + */ + public boolean hasTableRight(Table table, int rightMask) { + if (rightMask != Right.SELECT && !systemUser) { + table.checkWritingAllowed(); + } + if (admin) { + return true; + } + Role publicRole = database.getPublicRole(); + if (publicRole.isTableRightGrantedRecursive(table, rightMask)) { + return true; + } + if (table instanceof MetaTable || table instanceof DualTable || table instanceof RangeTable) { + // everybody has access to the metadata information + return true; + } + TableType tableType = table.getTableType(); + if (TableType.VIEW == tableType) { + TableView v = (TableView) table; + if (v.getOwner() == this) { + // the owner of a view has access: + // SELECT * FROM (SELECT * FROM ...) + return true; + } + } else if (tableType == null) { + // function table + return true; + } + if (table.isTemporary() && !table.isGlobalTemporary()) { + // the owner has all rights on local temporary tables + return true; + } + return isTableRightGrantedRecursive(table, rightMask); + } + @Override public int getType() { return DbObject.USER; @@ -227,7 +246,7 @@ public int getType() { @Override public ArrayList getChildren() { - ArrayList children = New.arrayList(); + ArrayList children = new ArrayList<>(); for (Right right : database.getAllRights()) { if (right.getGrantee() == this) { children.add(right); @@ -242,7 +261,7 @@ public ArrayList getChildren() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { for (Right right : database.getAllRights()) { if (right.getGrantee() == this) { database.removeDatabaseObject(session, right); @@ -255,23 +274,4 @@ public void removeChildrenAndResources(Session session) { invalidate(); } - @Override - public void checkRename() { - // ok - } - - /** - * Check that this user does not own any schema. An exception is thrown if - * he owns one or more schemas. - * - * @throws DbException if this user owns a schema - */ - public void checkOwnsNoSchemas() { - for (Schema s : database.getAllSchemas()) { - if (this == s.getOwner()) { - throw DbException.get(ErrorCode.CANNOT_DROP_2, getName(), s.getName()); - } - } - } - } diff --git a/h2/src/main/org/h2/engine/UserAggregate.java b/h2/src/main/org/h2/engine/UserAggregate.java deleted file mode 100644 index d0b6636d77..0000000000 --- a/h2/src/main/org/h2/engine/UserAggregate.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import java.sql.Connection; -import java.sql.SQLException; - -import org.h2.api.Aggregate; -import org.h2.api.AggregateFunction; -import org.h2.command.Parser; -import org.h2.message.DbException; -import org.h2.message.Trace; -import org.h2.table.Table; -import org.h2.util.JdbcUtils; -import org.h2.value.DataType; - -/** - * Represents a user-defined aggregate function. - */ -public class UserAggregate extends DbObjectBase { - - private String className; - private Class javaClass; - - public UserAggregate(Database db, int id, String name, String className, - boolean force) { - initDbObjectBase(db, id, name, Trace.FUNCTION); - this.className = className; - if (!force) { - getInstance(); - } - } - - public Aggregate getInstance() { - if (javaClass == null) { - javaClass = JdbcUtils.loadUserClass(className); - } - Object obj; - try { - obj = javaClass.newInstance(); - Aggregate agg; - if (obj instanceof Aggregate) { - agg = (Aggregate) obj; - } else { - agg = new AggregateWrapper((AggregateFunction) obj); - } - return agg; - } catch (Exception e) { - throw DbException.convert(e); - } - } - - @Override - public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(); - } - - @Override - public String getDropSQL() { - return "DROP AGGREGATE IF EXISTS " + getSQL(); - } - - @Override - public String getCreateSQL() { - return "CREATE FORCE AGGREGATE " + getSQL() + - " FOR " + Parser.quoteIdentifier(className); - } - - @Override - public int getType() { - return DbObject.AGGREGATE; - } - - @Override - public synchronized void removeChildrenAndResources(Session session) { - database.removeMeta(session, getId()); - className = null; - javaClass = null; - invalidate(); - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("AGGREGATE"); - } - - public String getJavaClassName() { - return this.className; - } - - /** - * Wrap {@link AggregateFunction} in order to behave as - * {@link org.h2.api.Aggregate} - **/ - private static class AggregateWrapper implements Aggregate { - private final AggregateFunction aggregateFunction; - - AggregateWrapper(AggregateFunction aggregateFunction) { - this.aggregateFunction = aggregateFunction; - } - - @Override - public void init(Connection conn) throws SQLException { - aggregateFunction.init(conn); - } - - @Override - public int getInternalType(int[] inputTypes) throws SQLException { - int[] sqlTypes = new int[inputTypes.length]; - for (int i = 0; i < inputTypes.length; i++) { - sqlTypes[i] = DataType.convertTypeToSQLType(inputTypes[i]); - } - return DataType.convertSQLTypeToValueType(aggregateFunction.getType(sqlTypes)); - } - - @Override - public void add(Object value) throws SQLException { - aggregateFunction.add(value); - } - - @Override - public Object getResult() throws SQLException { - return aggregateFunction.getResult(); - } - } - -} diff --git a/h2/src/main/org/h2/engine/UserBuilder.java b/h2/src/main/org/h2/engine/UserBuilder.java new file mode 100644 index 0000000000..658c80581d --- /dev/null +++ b/h2/src/main/org/h2/engine/UserBuilder.java @@ -0,0 +1,36 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.engine; + +import org.h2.security.auth.AuthenticationInfo; +import org.h2.util.MathUtils; + +public class UserBuilder { + + /** + * Build the database user starting from authentication informations. + * + * @param authenticationInfo + * authentication info + * @param database + * target database + * @param persistent + * true if the user will be persisted in the database + * @return user bean + */ + public static User buildUser(AuthenticationInfo authenticationInfo, Database database, boolean persistent) { + User user = new User(database, persistent ? database.allocateObjectId() : -1, + authenticationInfo.getFullyQualifiedName(), false); + // In case of external authentication fill the password hash with random + // data + user.setUserPasswordHash( + authenticationInfo.getRealm() == null ? authenticationInfo.getConnectionInfo().getUserPasswordHash() + : MathUtils.secureRandomBytes(64)); + user.setTemporary(!persistent); + return user; + } + +} diff --git a/h2/src/main/org/h2/engine/UserDataType.java b/h2/src/main/org/h2/engine/UserDataType.java deleted file mode 100644 index 34854cd05f..0000000000 --- a/h2/src/main/org/h2/engine/UserDataType.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.engine; - -import org.h2.message.DbException; -import org.h2.message.Trace; -import org.h2.table.Column; -import org.h2.table.Table; - -/** - * Represents a domain (user-defined data type). - */ -public class UserDataType extends DbObjectBase { - - private Column column; - - public UserDataType(Database database, int id, String name) { - initDbObjectBase(database, id, name, Trace.DATABASE); - } - - @Override - public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(); - } - - @Override - public String getDropSQL() { - return "DROP DOMAIN IF EXISTS " + getSQL(); - } - - @Override - public String getCreateSQL() { - return "CREATE DOMAIN " + getSQL() + " AS " + column.getCreateSQL(); - } - - public Column getColumn() { - return column; - } - - @Override - public int getType() { - return DbObject.USER_DATATYPE; - } - - @Override - public void removeChildrenAndResources(Session session) { - database.removeMeta(session, getId()); - } - - @Override - public void checkRename() { - // ok - } - - public void setColumn(Column column) { - this.column = column; - } - -} diff --git a/h2/src/main/org/h2/engine/package.html b/h2/src/main/org/h2/engine/package.html index 5c6128996c..09d0a56fed 100644 --- a/h2/src/main/org/h2/engine/package.html +++ b/h2/src/main/org/h2/engine/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/expression/Aggregate.java b/h2/src/main/org/h2/expression/Aggregate.java deleted file mode 100644 index adb18aa807..0000000000 --- a/h2/src/main/org/h2/expression/Aggregate.java +++ /dev/null @@ -1,634 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; - -import org.h2.api.ErrorCode; -import org.h2.command.dml.Select; -import org.h2.command.dml.SelectOrderBy; -import org.h2.engine.Session; -import org.h2.index.Cursor; -import org.h2.index.Index; -import org.h2.message.DbException; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.ColumnResolver; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.util.New; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueDouble; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; -import org.h2.value.ValueString; - -/** - * Implements the integrated aggregate functions, such as COUNT, MAX, SUM. - */ -public class Aggregate extends Expression { - - /** - * The aggregate type for COUNT(*). - */ - public static final int COUNT_ALL = 0; - - /** - * The aggregate type for COUNT(expression). - */ - public static final int COUNT = 1; - - /** - * The aggregate type for GROUP_CONCAT(...). - */ - public static final int GROUP_CONCAT = 2; - - /** - * The aggregate type for SUM(expression). - */ - static final int SUM = 3; - - /** - * The aggregate type for MIN(expression). - */ - static final int MIN = 4; - - /** - * The aggregate type for MAX(expression). - */ - static final int MAX = 5; - - /** - * The aggregate type for AVG(expression). - */ - static final int AVG = 6; - - /** - * The aggregate type for STDDEV_POP(expression). - */ - static final int STDDEV_POP = 7; - - /** - * The aggregate type for STDDEV_SAMP(expression). - */ - static final int STDDEV_SAMP = 8; - - /** - * The aggregate type for VAR_POP(expression). - */ - static final int VAR_POP = 9; - - /** - * The aggregate type for VAR_SAMP(expression). - */ - static final int VAR_SAMP = 10; - - /** - * The aggregate type for BOOL_OR(expression). - */ - static final int BOOL_OR = 11; - - /** - * The aggregate type for BOOL_AND(expression). - */ - static final int BOOL_AND = 12; - - /** - * The aggregate type for BOOL_OR(expression). - */ - static final int BIT_OR = 13; - - /** - * The aggregate type for BOOL_AND(expression). - */ - static final int BIT_AND = 14; - - /** - * The aggregate type for SELECTIVITY(expression). - */ - static final int SELECTIVITY = 15; - - /** - * The aggregate type for HISTOGRAM(expression). - */ - static final int HISTOGRAM = 16; - - private static final HashMap AGGREGATES = New.hashMap(); - - private final int type; - private final Select select; - private final boolean distinct; - - private Expression on; - private Expression groupConcatSeparator; - private ArrayList groupConcatOrderList; - private SortOrder groupConcatSort; - private int dataType, scale; - private long precision; - private int displaySize; - private int lastGroupRowId; - - /** - * Create a new aggregate object. - * - * @param type the aggregate type - * @param on the aggregated expression - * @param select the select statement - * @param distinct if distinct is used - */ - public Aggregate(int type, Expression on, Select select, boolean distinct) { - this.type = type; - this.on = on; - this.select = select; - this.distinct = distinct; - } - - static { - addAggregate("COUNT", COUNT); - addAggregate("SUM", SUM); - addAggregate("MIN", MIN); - addAggregate("MAX", MAX); - addAggregate("AVG", AVG); - addAggregate("GROUP_CONCAT", GROUP_CONCAT); - // PostgreSQL compatibility: string_agg(expression, delimiter) - addAggregate("STRING_AGG", GROUP_CONCAT); - addAggregate("STDDEV_SAMP", STDDEV_SAMP); - addAggregate("STDDEV", STDDEV_SAMP); - addAggregate("STDDEV_POP", STDDEV_POP); - addAggregate("STDDEVP", STDDEV_POP); - addAggregate("VAR_POP", VAR_POP); - addAggregate("VARP", VAR_POP); - addAggregate("VAR_SAMP", VAR_SAMP); - addAggregate("VAR", VAR_SAMP); - addAggregate("VARIANCE", VAR_SAMP); - addAggregate("BOOL_OR", BOOL_OR); - // HSQLDB compatibility, but conflicts with x > EVERY(...) - addAggregate("SOME", BOOL_OR); - addAggregate("BOOL_AND", BOOL_AND); - // HSQLDB compatibility, but conflicts with x > SOME(...) - addAggregate("EVERY", BOOL_AND); - addAggregate("SELECTIVITY", SELECTIVITY); - addAggregate("HISTOGRAM", HISTOGRAM); - addAggregate("BIT_OR", BIT_OR); - addAggregate("BIT_AND", BIT_AND); - } - - private static void addAggregate(String name, int type) { - AGGREGATES.put(name, type); - } - - /** - * Get the aggregate type for this name, or -1 if no aggregate has been - * found. - * - * @param name the aggregate function name - * @return -1 if no aggregate function has been found, or the aggregate type - */ - public static int getAggregateType(String name) { - Integer type = AGGREGATES.get(name); - return type == null ? -1 : type.intValue(); - } - - /** - * Set the order for GROUP_CONCAT() aggregate. - * - * @param orderBy the order by list - */ - public void setGroupConcatOrder(ArrayList orderBy) { - this.groupConcatOrderList = orderBy; - } - - /** - * Set the separator for the GROUP_CONCAT() aggregate. - * - * @param separator the separator expression - */ - public void setGroupConcatSeparator(Expression separator) { - this.groupConcatSeparator = separator; - } - - private SortOrder initOrder(Session session) { - int size = groupConcatOrderList.size(); - int[] index = new int[size]; - int[] sortType = new int[size]; - for (int i = 0; i < size; i++) { - SelectOrderBy o = groupConcatOrderList.get(i); - index[i] = i + 1; - int order = o.descending ? SortOrder.DESCENDING : SortOrder.ASCENDING; - sortType[i] = order; - } - return new SortOrder(session.getDatabase(), index, sortType, null); - } - - @Override - public void updateAggregate(Session session) { - // TODO aggregates: check nested MIN(MAX(ID)) and so on - // if (on != null) { - // on.updateAggregate(); - // } - HashMap group = select.getCurrentGroup(); - if (group == null) { - // this is a different level (the enclosing query) - return; - } - - int groupRowId = select.getCurrentGroupRowId(); - if (lastGroupRowId == groupRowId) { - // already visited - return; - } - lastGroupRowId = groupRowId; - - AggregateData data = (AggregateData) group.get(this); - if (data == null) { - data = AggregateData.create(type); - group.put(this, data); - } - Value v = on == null ? null : on.getValue(session); - if (type == GROUP_CONCAT) { - if (v != ValueNull.INSTANCE) { - v = v.convertTo(Value.STRING); - if (groupConcatOrderList != null) { - int size = groupConcatOrderList.size(); - Value[] array = new Value[1 + size]; - array[0] = v; - for (int i = 0; i < size; i++) { - SelectOrderBy o = groupConcatOrderList.get(i); - array[i + 1] = o.expression.getValue(session); - } - v = ValueArray.get(array); - } - } - } - data.add(session.getDatabase(), dataType, distinct, v); - } - - @Override - public Value getValue(Session session) { - if (select.isQuickAggregateQuery()) { - switch (type) { - case COUNT: - case COUNT_ALL: - Table table = select.getTopTableFilter().getTable(); - return ValueLong.get(table.getRowCount(session)); - case MIN: - case MAX: - boolean first = type == MIN; - Index index = getColumnIndex(); - int sortType = index.getIndexColumns()[0].sortType; - if ((sortType & SortOrder.DESCENDING) != 0) { - first = !first; - } - Cursor cursor = index.findFirstOrLast(session, first); - SearchRow row = cursor.getSearchRow(); - Value v; - if (row == null) { - v = ValueNull.INSTANCE; - } else { - v = row.getValue(index.getColumns()[0].getColumnId()); - } - return v; - default: - DbException.throwInternalError("type=" + type); - } - } - HashMap group = select.getCurrentGroup(); - if (group == null) { - throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getSQL()); - } - AggregateData data = (AggregateData) group.get(this); - if (data == null) { - data = AggregateData.create(type); - } - Value v = data.getValue(session.getDatabase(), dataType, distinct); - if (type == GROUP_CONCAT) { - ArrayList list = ((AggregateDataGroupConcat) data).getList(); - if (list == null || list.size() == 0) { - return ValueNull.INSTANCE; - } - if (groupConcatOrderList != null) { - final SortOrder sortOrder = groupConcatSort; - Collections.sort(list, new Comparator() { - @Override - public int compare(Value v1, Value v2) { - Value[] a1 = ((ValueArray) v1).getList(); - Value[] a2 = ((ValueArray) v2).getList(); - return sortOrder.compare(a1, a2); - } - }); - } - StatementBuilder buff = new StatementBuilder(); - String sep = groupConcatSeparator == null ? - "," : groupConcatSeparator.getValue(session).getString(); - for (Value val : list) { - String s; - if (val.getType() == Value.ARRAY) { - s = ((ValueArray) val).getList()[0].getString(); - } else { - s = val.getString(); - } - if (s == null) { - continue; - } - if (sep != null) { - buff.appendExceptFirst(sep); - } - buff.append(s); - } - v = ValueString.get(buff.toString()); - } - return v; - } - - @Override - public int getType() { - return dataType; - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - if (on != null) { - on.mapColumns(resolver, level); - } - if (groupConcatOrderList != null) { - for (SelectOrderBy o : groupConcatOrderList) { - o.expression.mapColumns(resolver, level); - } - } - if (groupConcatSeparator != null) { - groupConcatSeparator.mapColumns(resolver, level); - } - } - - @Override - public Expression optimize(Session session) { - if (on != null) { - on = on.optimize(session); - dataType = on.getType(); - scale = on.getScale(); - precision = on.getPrecision(); - displaySize = on.getDisplaySize(); - } - if (groupConcatOrderList != null) { - for (SelectOrderBy o : groupConcatOrderList) { - o.expression = o.expression.optimize(session); - } - groupConcatSort = initOrder(session); - } - if (groupConcatSeparator != null) { - groupConcatSeparator = groupConcatSeparator.optimize(session); - } - switch (type) { - case GROUP_CONCAT: - dataType = Value.STRING; - scale = 0; - precision = displaySize = Integer.MAX_VALUE; - break; - case COUNT_ALL: - case COUNT: - dataType = Value.LONG; - scale = 0; - precision = ValueLong.PRECISION; - displaySize = ValueLong.DISPLAY_SIZE; - break; - case SELECTIVITY: - dataType = Value.INT; - scale = 0; - precision = ValueInt.PRECISION; - displaySize = ValueInt.DISPLAY_SIZE; - break; - case HISTOGRAM: - dataType = Value.ARRAY; - scale = 0; - precision = displaySize = Integer.MAX_VALUE; - break; - case SUM: - if (dataType == Value.BOOLEAN) { - // example: sum(id > 3) (count the rows) - dataType = Value.LONG; - } else if (!DataType.supportsAdd(dataType)) { - throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getSQL()); - } else { - dataType = DataType.getAddProofType(dataType); - } - break; - case AVG: - if (!DataType.supportsAdd(dataType)) { - throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getSQL()); - } - break; - case MIN: - case MAX: - break; - case STDDEV_POP: - case STDDEV_SAMP: - case VAR_POP: - case VAR_SAMP: - dataType = Value.DOUBLE; - precision = ValueDouble.PRECISION; - displaySize = ValueDouble.DISPLAY_SIZE; - scale = 0; - break; - case BOOL_AND: - case BOOL_OR: - dataType = Value.BOOLEAN; - precision = ValueBoolean.PRECISION; - displaySize = ValueBoolean.DISPLAY_SIZE; - scale = 0; - break; - case BIT_AND: - case BIT_OR: - if (!DataType.supportsAdd(dataType)) { - throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getSQL()); - } - break; - default: - DbException.throwInternalError("type=" + type); - } - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - if (on != null) { - on.setEvaluatable(tableFilter, b); - } - if (groupConcatOrderList != null) { - for (SelectOrderBy o : groupConcatOrderList) { - o.expression.setEvaluatable(tableFilter, b); - } - } - if (groupConcatSeparator != null) { - groupConcatSeparator.setEvaluatable(tableFilter, b); - } - } - - @Override - public int getScale() { - return scale; - } - - @Override - public long getPrecision() { - return precision; - } - - @Override - public int getDisplaySize() { - return displaySize; - } - - private String getSQLGroupConcat() { - StatementBuilder buff = new StatementBuilder("GROUP_CONCAT("); - if (distinct) { - buff.append("DISTINCT "); - } - buff.append(on.getSQL()); - if (groupConcatOrderList != null) { - buff.append(" ORDER BY "); - for (SelectOrderBy o : groupConcatOrderList) { - buff.appendExceptFirst(", "); - buff.append(o.expression.getSQL()); - if (o.descending) { - buff.append(" DESC"); - } - } - } - if (groupConcatSeparator != null) { - buff.append(" SEPARATOR ").append(groupConcatSeparator.getSQL()); - } - return buff.append(')').toString(); - } - - @Override - public String getSQL() { - String text; - switch (type) { - case GROUP_CONCAT: - return getSQLGroupConcat(); - case COUNT_ALL: - return "COUNT(*)"; - case COUNT: - text = "COUNT"; - break; - case SELECTIVITY: - text = "SELECTIVITY"; - break; - case HISTOGRAM: - text = "HISTOGRAM"; - break; - case SUM: - text = "SUM"; - break; - case MIN: - text = "MIN"; - break; - case MAX: - text = "MAX"; - break; - case AVG: - text = "AVG"; - break; - case STDDEV_POP: - text = "STDDEV_POP"; - break; - case STDDEV_SAMP: - text = "STDDEV_SAMP"; - break; - case VAR_POP: - text = "VAR_POP"; - break; - case VAR_SAMP: - text = "VAR_SAMP"; - break; - case BOOL_AND: - text = "BOOL_AND"; - break; - case BOOL_OR: - text = "BOOL_OR"; - break; - case BIT_AND: - text = "BIT_AND"; - break; - case BIT_OR: - text = "BIT_OR"; - break; - default: - throw DbException.throwInternalError("type=" + type); - } - if (distinct) { - return text + "(DISTINCT " + on.getSQL() + ")"; - } - return text + StringUtils.enclose(on.getSQL()); - } - - private Index getColumnIndex() { - if (on instanceof ExpressionColumn) { - ExpressionColumn col = (ExpressionColumn) on; - Column column = col.getColumn(); - TableFilter filter = col.getTableFilter(); - if (filter != null) { - Table table = filter.getTable(); - Index index = table.getIndexForColumn(column); - return index; - } - } - return null; - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - if (visitor.getType() == ExpressionVisitor.OPTIMIZABLE_MIN_MAX_COUNT_ALL) { - switch (type) { - case COUNT: - if (!distinct && on.getNullable() == Column.NOT_NULLABLE) { - return visitor.getTable().canGetRowCount(); - } - return false; - case COUNT_ALL: - return visitor.getTable().canGetRowCount(); - case MIN: - case MAX: - Index index = getColumnIndex(); - return index != null; - default: - return false; - } - } - if (on != null && !on.isEverything(visitor)) { - return false; - } - if (groupConcatSeparator != null && - !groupConcatSeparator.isEverything(visitor)) { - return false; - } - if (groupConcatOrderList != null) { - for (int i = 0, size = groupConcatOrderList.size(); i < size; i++) { - SelectOrderBy o = groupConcatOrderList.get(i); - if (!o.expression.isEverything(visitor)) { - return false; - } - } - } - return true; - } - - @Override - public int getCost() { - return (on == null) ? 1 : on.getCost() + 1; - } - -} diff --git a/h2/src/main/org/h2/expression/AggregateData.java b/h2/src/main/org/h2/expression/AggregateData.java deleted file mode 100644 index a0ff6578c5..0000000000 --- a/h2/src/main/org/h2/expression/AggregateData.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.engine.Database; -import org.h2.value.Value; - -/** - * Abstract class for the computation of an aggregate. - */ -abstract class AggregateData { - - /** - * Create an AggregateData object of the correct sub-type. - * - * @param aggregateType the type of the aggregate operation - * @return the aggregate data object of the specified type - */ - static AggregateData create(int aggregateType) { - if (aggregateType == Aggregate.SELECTIVITY) { - return new AggregateDataSelectivity(); - } else if (aggregateType == Aggregate.GROUP_CONCAT) { - return new AggregateDataGroupConcat(); - } else if (aggregateType == Aggregate.COUNT_ALL) { - return new AggregateDataCountAll(); - } else if (aggregateType == Aggregate.COUNT) { - return new AggregateDataCount(); - } else if (aggregateType == Aggregate.HISTOGRAM) { - return new AggregateDataHistogram(); - } else { - return new AggregateDataDefault(aggregateType); - } - } - - /** - * Add a value to this aggregate. - * - * @param database the database - * @param dataType the datatype of the computed result - * @param distinct if the calculation should be distinct - * @param v the value - */ - abstract void add(Database database, int dataType, boolean distinct, Value v); - - /** - * Get the aggregate result. - * - * @param database the database - * @param dataType the datatype of the computed result - * @param distinct if distinct is used - * @return the value - */ - abstract Value getValue(Database database, int dataType, boolean distinct); -} diff --git a/h2/src/main/org/h2/expression/AggregateDataCount.java b/h2/src/main/org/h2/expression/AggregateDataCount.java deleted file mode 100644 index dac272f5bf..0000000000 --- a/h2/src/main/org/h2/expression/AggregateDataCount.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.engine.Database; -import org.h2.util.ValueHashMap; -import org.h2.value.Value; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; - -/** - * Data stored while calculating an aggregate. - */ -class AggregateDataCount extends AggregateData { - private long count; - private ValueHashMap distinctValues; - - @Override - void add(Database database, int dataType, boolean distinct, Value v) { - if (v == ValueNull.INSTANCE) { - return; - } - count++; - if (distinct) { - if (distinctValues == null) { - distinctValues = ValueHashMap.newInstance(); - } - distinctValues.put(v, this); - return; - } - } - - @Override - Value getValue(Database database, int dataType, boolean distinct) { - if (distinct) { - if (distinctValues != null) { - count = distinctValues.size(); - } else { - count = 0; - } - } - Value v = ValueLong.get(count); - return v.convertTo(dataType); - } - -} diff --git a/h2/src/main/org/h2/expression/AggregateDataCountAll.java b/h2/src/main/org/h2/expression/AggregateDataCountAll.java deleted file mode 100644 index 2c647e5105..0000000000 --- a/h2/src/main/org/h2/expression/AggregateDataCountAll.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.engine.Database; -import org.h2.message.DbException; -import org.h2.value.Value; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; - -/** - * Data stored while calculating a COUNT(*) aggregate. - */ -class AggregateDataCountAll extends AggregateData { - private long count; - - @Override - void add(Database database, int dataType, boolean distinct, Value v) { - if (distinct) { - throw DbException.throwInternalError(); - } - count++; - } - - @Override - Value getValue(Database database, int dataType, boolean distinct) { - if (distinct) { - throw DbException.throwInternalError(); - } - Value v = ValueLong.get(count); - return v == null ? ValueNull.INSTANCE : v.convertTo(dataType); - } - -} diff --git a/h2/src/main/org/h2/expression/AggregateDataDefault.java b/h2/src/main/org/h2/expression/AggregateDataDefault.java deleted file mode 100644 index aafe23c150..0000000000 --- a/h2/src/main/org/h2/expression/AggregateDataDefault.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.engine.Database; -import org.h2.message.DbException; -import org.h2.util.ValueHashMap; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueDouble; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; - -/** - * Data stored while calculating an aggregate. - */ -class AggregateDataDefault extends AggregateData { - private final int aggregateType; - private long count; - private ValueHashMap distinctValues; - private Value value; - private double m2, mean; - - /** - * @param aggregateType the type of the aggregate operation - */ - AggregateDataDefault(int aggregateType) { - this.aggregateType = aggregateType; - } - - @Override - void add(Database database, int dataType, boolean distinct, Value v) { - if (v == ValueNull.INSTANCE) { - return; - } - count++; - if (distinct) { - if (distinctValues == null) { - distinctValues = ValueHashMap.newInstance(); - } - distinctValues.put(v, this); - return; - } - switch (aggregateType) { - case Aggregate.SUM: - if (value == null) { - value = v.convertTo(dataType); - } else { - v = v.convertTo(value.getType()); - value = value.add(v); - } - break; - case Aggregate.AVG: - if (value == null) { - value = v.convertTo(DataType.getAddProofType(dataType)); - } else { - v = v.convertTo(value.getType()); - value = value.add(v); - } - break; - case Aggregate.MIN: - if (value == null || database.compare(v, value) < 0) { - value = v; - } - break; - case Aggregate.MAX: - if (value == null || database.compare(v, value) > 0) { - value = v; - } - break; - case Aggregate.STDDEV_POP: - case Aggregate.STDDEV_SAMP: - case Aggregate.VAR_POP: - case Aggregate.VAR_SAMP: { - // Using Welford's method, see also - // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance - // http://www.johndcook.com/standard_deviation.html - double x = v.getDouble(); - if (count == 1) { - mean = x; - m2 = 0; - } else { - double delta = x - mean; - mean += delta / count; - m2 += delta * (x - mean); - } - break; - } - case Aggregate.BOOL_AND: - v = v.convertTo(Value.BOOLEAN); - if (value == null) { - value = v; - } else { - value = ValueBoolean.get(value.getBoolean().booleanValue() && - v.getBoolean().booleanValue()); - } - break; - case Aggregate.BOOL_OR: - v = v.convertTo(Value.BOOLEAN); - if (value == null) { - value = v; - } else { - value = ValueBoolean.get(value.getBoolean().booleanValue() || - v.getBoolean().booleanValue()); - } - break; - case Aggregate.BIT_AND: - if (value == null) { - value = v.convertTo(dataType); - } else { - value = ValueLong.get(value.getLong() & v.getLong()).convertTo(dataType); - } - break; - case Aggregate.BIT_OR: - if (value == null) { - value = v.convertTo(dataType); - } else { - value = ValueLong.get(value.getLong() | v.getLong()).convertTo(dataType); - } - break; - default: - DbException.throwInternalError("type=" + aggregateType); - } - } - - @Override - Value getValue(Database database, int dataType, boolean distinct) { - if (distinct) { - count = 0; - groupDistinct(database, dataType); - } - Value v = null; - switch (aggregateType) { - case Aggregate.SUM: - case Aggregate.MIN: - case Aggregate.MAX: - case Aggregate.BIT_OR: - case Aggregate.BIT_AND: - case Aggregate.BOOL_OR: - case Aggregate.BOOL_AND: - v = value; - break; - case Aggregate.AVG: - if (value != null) { - v = divide(value, count); - } - break; - case Aggregate.STDDEV_POP: { - if (count < 1) { - return ValueNull.INSTANCE; - } - v = ValueDouble.get(Math.sqrt(m2 / count)); - break; - } - case Aggregate.STDDEV_SAMP: { - if (count < 2) { - return ValueNull.INSTANCE; - } - v = ValueDouble.get(Math.sqrt(m2 / (count - 1))); - break; - } - case Aggregate.VAR_POP: { - if (count < 1) { - return ValueNull.INSTANCE; - } - v = ValueDouble.get(m2 / count); - break; - } - case Aggregate.VAR_SAMP: { - if (count < 2) { - return ValueNull.INSTANCE; - } - v = ValueDouble.get(m2 / (count - 1)); - break; - } - default: - DbException.throwInternalError("type=" + aggregateType); - } - return v == null ? ValueNull.INSTANCE : v.convertTo(dataType); - } - - private static Value divide(Value a, long by) { - if (by == 0) { - return ValueNull.INSTANCE; - } - int type = Value.getHigherOrder(a.getType(), Value.LONG); - Value b = ValueLong.get(by).convertTo(type); - a = a.convertTo(type).divide(b); - return a; - } - - private void groupDistinct(Database database, int dataType) { - if (distinctValues == null) { - return; - } - count = 0; - for (Value v : distinctValues.keys()) { - add(database, dataType, false, v); - } - } - -} diff --git a/h2/src/main/org/h2/expression/AggregateDataGroupConcat.java b/h2/src/main/org/h2/expression/AggregateDataGroupConcat.java deleted file mode 100644 index 308958c9eb..0000000000 --- a/h2/src/main/org/h2/expression/AggregateDataGroupConcat.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import java.util.ArrayList; -import org.h2.engine.Database; -import org.h2.util.New; -import org.h2.util.ValueHashMap; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * Data stored while calculating a GROUP_CONCAT aggregate. - */ -class AggregateDataGroupConcat extends AggregateData { - private ArrayList list; - private ValueHashMap distinctValues; - - @Override - void add(Database database, int dataType, boolean distinct, Value v) { - if (v == ValueNull.INSTANCE) { - return; - } - if (distinct) { - if (distinctValues == null) { - distinctValues = ValueHashMap.newInstance(); - } - distinctValues.put(v, this); - return; - } - if (list == null) { - list = New.arrayList(); - } - list.add(v); - } - - @Override - Value getValue(Database database, int dataType, boolean distinct) { - if (distinct) { - groupDistinct(database, dataType); - } - return null; - } - - ArrayList getList() { - return list; - } - - private void groupDistinct(Database database, int dataType) { - if (distinctValues == null) { - return; - } - for (Value v : distinctValues.keys()) { - add(database, dataType, false, v); - } - } -} diff --git a/h2/src/main/org/h2/expression/AggregateDataHistogram.java b/h2/src/main/org/h2/expression/AggregateDataHistogram.java deleted file mode 100644 index 3709b1910d..0000000000 --- a/h2/src/main/org/h2/expression/AggregateDataHistogram.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import java.util.Arrays; -import java.util.Comparator; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.util.ValueHashMap; -import org.h2.value.CompareMode; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueLong; - -/** - * Data stored while calculating a HISTOGRAM aggregate. - */ -class AggregateDataHistogram extends AggregateData { - private long count; - private ValueHashMap distinctValues; - - @Override - void add(Database database, int dataType, boolean distinct, Value v) { - if (distinctValues == null) { - distinctValues = ValueHashMap.newInstance(); - } - AggregateDataHistogram a = distinctValues.get(v); - if (a == null) { - if (distinctValues.size() < Constants.SELECTIVITY_DISTINCT_COUNT) { - a = new AggregateDataHistogram(); - distinctValues.put(v, a); - } - } - if (a != null) { - a.count++; - } - } - - @Override - Value getValue(Database database, int dataType, boolean distinct) { - if (distinct) { - count = 0; - groupDistinct(database, dataType); - } - ValueArray[] values = new ValueArray[distinctValues.size()]; - int i = 0; - for (Value dv : distinctValues.keys()) { - AggregateDataHistogram d = distinctValues.get(dv); - values[i] = ValueArray.get(new Value[] { dv, ValueLong.get(d.count) }); - i++; - } - final CompareMode compareMode = database.getCompareMode(); - Arrays.sort(values, new Comparator() { - @Override - public int compare(ValueArray v1, ValueArray v2) { - Value a1 = v1.getList()[0]; - Value a2 = v2.getList()[0]; - return a1.compareTo(a2, compareMode); - } - }); - Value v = ValueArray.get(values); - return v.convertTo(dataType); - } - - private void groupDistinct(Database database, int dataType) { - if (distinctValues == null) { - return; - } - count = 0; - for (Value v : distinctValues.keys()) { - add(database, dataType, false, v); - } - } - -} diff --git a/h2/src/main/org/h2/expression/AggregateDataSelectivity.java b/h2/src/main/org/h2/expression/AggregateDataSelectivity.java deleted file mode 100644 index 118219972b..0000000000 --- a/h2/src/main/org/h2/expression/AggregateDataSelectivity.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.util.IntIntHashMap; -import org.h2.value.Value; -import org.h2.value.ValueInt; - -/** - * Data stored while calculating a SELECTIVITY aggregate. - */ -class AggregateDataSelectivity extends AggregateData { - private long count; - private IntIntHashMap distinctHashes; - private double m2; - - @Override - void add(Database database, int dataType, boolean distinct, Value v) { - count++; - if (distinctHashes == null) { - distinctHashes = new IntIntHashMap(); - } - int size = distinctHashes.size(); - if (size > Constants.SELECTIVITY_DISTINCT_COUNT) { - distinctHashes = new IntIntHashMap(); - m2 += size; - } - int hash = v.hashCode(); - // the value -1 is not supported - distinctHashes.put(hash, 1); - } - - @Override - Value getValue(Database database, int dataType, boolean distinct) { - if (distinct) { - count = 0; - } - Value v = null; - int s = 0; - if (count == 0) { - s = 0; - } else { - m2 += distinctHashes.size(); - m2 = 100 * m2 / count; - s = (int) m2; - s = s <= 0 ? 1 : s > 100 ? 100 : s; - } - v = ValueInt.get(s); - return v.convertTo(dataType); - } -} diff --git a/h2/src/main/org/h2/expression/Alias.java b/h2/src/main/org/h2/expression/Alias.java index 800d3cdd6e..afae60cf28 100644 --- a/h2/src/main/org/h2/expression/Alias.java +++ b/h2/src/main/org/h2/expression/Alias.java @@ -1,20 +1,21 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.command.Parser; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; +import org.h2.util.ParserUtil; +import org.h2.value.TypeInfo; import org.h2.value.Value; /** * A column alias as in SELECT 'Hello' AS NAME ... */ -public class Alias extends Expression { +public final class Alias extends Expression { private final String alias; private Expression expr; @@ -32,22 +33,22 @@ public Expression getNonAliasExpression() { } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { return expr.getValue(session); } @Override - public int getType() { + public TypeInfo getType() { return expr.getType(); } @Override - public void mapColumns(ColumnResolver resolver, int level) { - expr.mapColumns(resolver, level); + public void mapColumns(ColumnResolver resolver, int level, int state) { + expr.mapColumns(resolver, level, state); } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { expr = expr.optimize(session); return this; } @@ -58,37 +59,28 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public int getScale() { - return expr.getScale(); + public boolean isIdentity() { + return expr.isIdentity(); } @Override - public long getPrecision() { - return expr.getPrecision(); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + expr.getUnenclosedSQL(builder, sqlFlags).append(" AS "); + return ParserUtil.quoteIdentifier(builder, alias, sqlFlags); } @Override - public int getDisplaySize() { - return expr.getDisplaySize(); + public void updateAggregate(SessionLocal session, int stage) { + expr.updateAggregate(session, stage); } @Override - public boolean isAutoIncrement() { - return expr.isAutoIncrement(); - } - - @Override - public String getSQL() { - return expr.getSQL() + " AS " + Parser.quoteIdentifier(alias); - } - - @Override - public void updateAggregate(Session session) { - expr.updateAggregate(session); + public String getAlias(SessionLocal session, int columnIndex) { + return alias; } @Override - public String getAlias() { + public String getColumnNameForView(SessionLocal session, int columnIndex) { return alias; } @@ -107,20 +99,28 @@ public int getCost() { return expr.getCost(); } + @Override + public String getSchemaName() { + if (aliasColumnName) { + return null; + } + return expr.getSchemaName(); + } + @Override public String getTableName() { if (aliasColumnName) { - return super.getTableName(); + return null; } return expr.getTableName(); } @Override - public String getColumnName() { + public String getColumnName(SessionLocal session, int columnIndex) { if (!(expr instanceof ExpressionColumn) || aliasColumnName) { - return super.getColumnName(); + return alias; } - return expr.getColumnName(); + return expr.getColumnName(session, columnIndex); } } diff --git a/h2/src/main/org/h2/expression/ArrayConstructorByQuery.java b/h2/src/main/org/h2/expression/ArrayConstructorByQuery.java new file mode 100644 index 0000000000..9ed16bd3e5 --- /dev/null +++ b/h2/src/main/org/h2/expression/ArrayConstructorByQuery.java @@ -0,0 +1,102 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import java.util.ArrayList; + +import org.h2.api.ErrorCode; +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; + +/** + * Array value constructor by query. + */ +public final class ArrayConstructorByQuery extends Expression { + + /** + * The subquery. + */ + private final Query query; + + private TypeInfo componentType, type; + + /** + * Creates new instance of array value constructor by query. + * + * @param query + * the query + */ + public ArrayConstructorByQuery(Query query) { + this.query = query; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return StringUtils.indent(builder.append("ARRAY ("), query.getPlanSQL(sqlFlags), 4, false).append(')'); + } + + @Override + public Value getValue(SessionLocal session) { + query.setSession(session); + ArrayList values = new ArrayList<>(); + try (ResultInterface result = query.query(0)) { + while (result.next()) { + values.add(result.currentRow()[0]); + } + } + return ValueArray.get(componentType, values.toArray(new Value[0]), session); + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + query.mapColumns(resolver, level + 1); + } + + @Override + public Expression optimize(SessionLocal session) { + query.prepare(); + if (query.getColumnCount() != 1) { + throw DbException.get(ErrorCode.SUBQUERY_IS_NOT_SINGLE_COLUMN); + } + componentType = query.getExpressions().get(0).getType(); + type = TypeInfo.getTypeInfo(Value.ARRAY, -1L, -1, componentType); + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + query.setEvaluatable(tableFilter, value); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + query.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return query.isEverything(visitor); + } + + @Override + public int getCost() { + return query.getCostAsExpression(); + } + +} diff --git a/h2/src/main/org/h2/expression/ArrayElementReference.java b/h2/src/main/org/h2/expression/ArrayElementReference.java new file mode 100644 index 0000000000..d02245e968 --- /dev/null +++ b/h2/src/main/org/h2/expression/ArrayElementReference.java @@ -0,0 +1,67 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueNull; + +/** + * Array element reference. + */ +public final class ArrayElementReference extends Operation2 { + + public ArrayElementReference(Expression left, Expression right) { + super(left, right); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append('['); + return right.getUnenclosedSQL(builder, sqlFlags).append(']'); + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + Value r = right.getValue(session); + if (l != ValueNull.INSTANCE && r != ValueNull.INSTANCE) { + Value[] list = ((ValueArray) l).getList(); + int element = r.getInt(); + int cardinality = list.length; + if (element >= 1 && element <= cardinality) { + return list[element - 1]; + } + throw DbException.get(ErrorCode.ARRAY_ELEMENT_ERROR_2, Integer.toString(element), "1.." + cardinality); + } + return ValueNull.INSTANCE; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + TypeInfo leftType = left.getType(); + switch (leftType.getValueType()) { + case Value.NULL: + return ValueExpression.NULL; + case Value.ARRAY: + type = (TypeInfo) leftType.getExtTypeInfo(); + if (left.isConstant() && right.isConstant()) { + return TypedValueExpression.get(getValue(session), type); + } + break; + default: + throw DbException.getInvalidExpressionTypeException("Array", left); + } + return this; + } + +} diff --git a/h2/src/main/org/h2/expression/BinaryOperation.java b/h2/src/main/org/h2/expression/BinaryOperation.java new file mode 100644 index 0000000000..9c910515e6 --- /dev/null +++ b/h2/src/main/org/h2/expression/BinaryOperation.java @@ -0,0 +1,447 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.expression.IntervalOperation.IntervalOpType; +import org.h2.expression.function.DateTimeFunction; +import org.h2.message.DbException; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; + +/** + * A mathematical expression, or string concatenation. + */ +public class BinaryOperation extends Operation2 { + + public enum OpType { + /** + * This operation represents an addition as in 1 + 2. + */ + PLUS, + + /** + * This operation represents a subtraction as in 2 - 1. + */ + MINUS, + + /** + * This operation represents a multiplication as in 2 * 3. + */ + MULTIPLY, + + /** + * This operation represents a division as in 4 / 2. + */ + DIVIDE + } + + private OpType opType; + private TypeInfo forcedType; + private boolean convertRight = true; + + public BinaryOperation(OpType opType, Expression left, Expression right) { + super(left, right); + this.opType = opType; + } + + /** + * Sets a forced data type of a datetime minus datetime operation. + * + * @param forcedType the forced data type + */ + public void setForcedType(TypeInfo forcedType) { + if (opType != OpType.MINUS) { + throw getUnexpectedForcedTypeException(); + } + this.forcedType = forcedType; + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + // don't remove the space, otherwise it might end up some thing like + // --1 which is a line remark + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(' ').append(getOperationToken()).append(' '); + return right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + private String getOperationToken() { + switch (opType) { + case PLUS: + return "+"; + case MINUS: + return "-"; + case MULTIPLY: + return "*"; + case DIVIDE: + return "/"; + default: + throw DbException.getInternalError("opType=" + opType); + } + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session).convertTo(type, session); + Value r = right.getValue(session); + if (convertRight) { + r = r.convertTo(type, session); + } + switch (opType) { + case PLUS: + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return l.add(r); + case MINUS: + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return l.subtract(r); + case MULTIPLY: + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return l.multiply(r); + case DIVIDE: + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return l.divide(r, type); + default: + throw DbException.getInternalError("type=" + opType); + } + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + TypeInfo leftType = left.getType(), rightType = right.getType(); + int l = leftType.getValueType(), r = rightType.getValueType(); + if ((l == Value.NULL && r == Value.NULL) || (l == Value.UNKNOWN && r == Value.UNKNOWN)) { + // (? + ?) - use decimal by default (the most safe data type) or + // string when text concatenation with + is enabled + if (opType == OpType.PLUS && session.getDatabase().getMode().allowPlusForStringConcat) { + return new ConcatenationOperation(left, right).optimize(session); + } else { + type = TypeInfo.TYPE_NUMERIC_FLOATING_POINT; + } + } else if (DataType.isIntervalType(l) || DataType.isIntervalType(r)) { + if (forcedType != null) { + throw getUnexpectedForcedTypeException(); + } + return optimizeInterval(l, r); + } else if (DataType.isDateTimeType(l) || DataType.isDateTimeType(r)) { + return optimizeDateTime(session, l, r); + } else if (forcedType != null) { + throw getUnexpectedForcedTypeException(); + } else { + int dataType = Value.getHigherOrder(l, r); + if (dataType == Value.NUMERIC) { + optimizeNumeric(leftType, rightType); + } else if (dataType == Value.DECFLOAT) { + optimizeDecfloat(leftType, rightType); + } else if (dataType == Value.ENUM) { + type = TypeInfo.TYPE_INTEGER; + } else if (DataType.isCharacterStringType(dataType) + && opType == OpType.PLUS && session.getDatabase().getMode().allowPlusForStringConcat) { + return new ConcatenationOperation(left, right).optimize(session); + } else { + type = TypeInfo.getTypeInfo(dataType); + } + } + if (left.isConstant() && right.isConstant()) { + return ValueExpression.get(getValue(session)); + } + return this; + } + + private void optimizeNumeric(TypeInfo leftType, TypeInfo rightType) { + leftType = leftType.toNumericType(); + rightType = rightType.toNumericType(); + long leftPrecision = leftType.getPrecision(), rightPrecision = rightType.getPrecision(); + int leftScale = leftType.getScale(), rightScale = rightType.getScale(); + long precision; + int scale; + switch (opType) { + case PLUS: + case MINUS: + // Precision is implementation-defined. + // Scale must be max(leftScale, rightScale). + // Choose the largest scale and adjust the precision of other + // argument. + if (leftScale < rightScale) { + leftPrecision += rightScale - leftScale; + scale = rightScale; + } else { + rightPrecision += leftScale - rightScale; + scale = leftScale; + } + // Add one extra digit to the largest precision. + precision = Math.max(leftPrecision, rightPrecision) + 1; + break; + case MULTIPLY: + // Precision is implementation-defined. + // Scale must be leftScale + rightScale. + // Use sum of precisions. + precision = leftPrecision + rightPrecision; + scale = leftScale + rightScale; + break; + case DIVIDE: { + // Precision and scale are implementation-defined. + long scaleAsLong = leftScale - rightScale + rightPrecision * 2; + if (scaleAsLong >= ValueNumeric.MAXIMUM_SCALE) { + scale = ValueNumeric.MAXIMUM_SCALE; + } else if (scaleAsLong <= 0) { + scale = 0; + } else { + scale = (int) scaleAsLong; + } + // Divider can be effectively multiplied by no more than + // 10^rightScale, so add rightScale to its precision and adjust the + // result to the changes in scale. + precision = leftPrecision + rightScale - leftScale + scale; + break; + } + default: + throw DbException.getInternalError("type=" + opType); + } + type = TypeInfo.getTypeInfo(Value.NUMERIC, precision, scale, null); + } + + private void optimizeDecfloat(TypeInfo leftType, TypeInfo rightType) { + leftType = leftType.toDecfloatType(); + rightType = rightType.toDecfloatType(); + long leftPrecision = leftType.getPrecision(), rightPrecision = rightType.getPrecision(); + long precision; + switch (opType) { + case PLUS: + case MINUS: + case DIVIDE: + // Add one extra digit to the largest precision. + precision = Math.max(leftPrecision, rightPrecision) + 1; + break; + case MULTIPLY: + // Use sum of precisions. + precision = leftPrecision + rightPrecision; + break; + default: + throw DbException.getInternalError("type=" + opType); + } + type = TypeInfo.getTypeInfo(Value.DECFLOAT, precision, 0, null); + } + + private Expression optimizeInterval(int l, int r) { + boolean lInterval = false, lNumeric = false, lDateTime = false; + if (DataType.isIntervalType(l)) { + lInterval = true; + } else if (DataType.isNumericType(l)) { + lNumeric = true; + } else if (DataType.isDateTimeType(l)) { + lDateTime = true; + } else { + throw getUnsupported(l, r); + } + boolean rInterval = false, rNumeric = false, rDateTime = false; + if (DataType.isIntervalType(r)) { + rInterval = true; + } else if (DataType.isNumericType(r)) { + rNumeric = true; + } else if (DataType.isDateTimeType(r)) { + rDateTime = true; + } else { + throw getUnsupported(l, r); + } + switch (opType) { + case PLUS: + if (lInterval && rInterval) { + if (DataType.isYearMonthIntervalType(l) == DataType.isYearMonthIntervalType(r)) { + return new IntervalOperation(IntervalOpType.INTERVAL_PLUS_INTERVAL, left, right); + } + } else if (lInterval && rDateTime) { + if (r == Value.TIME && DataType.isYearMonthIntervalType(l)) { + break; + } + return new IntervalOperation(IntervalOpType.DATETIME_PLUS_INTERVAL, right, left); + } else if (lDateTime && rInterval) { + if (l == Value.TIME && DataType.isYearMonthIntervalType(r)) { + break; + } + return new IntervalOperation(IntervalOpType.DATETIME_PLUS_INTERVAL, left, right); + } + break; + case MINUS: + if (lInterval && rInterval) { + if (DataType.isYearMonthIntervalType(l) == DataType.isYearMonthIntervalType(r)) { + return new IntervalOperation(IntervalOpType.INTERVAL_MINUS_INTERVAL, left, right); + } + } else if (lDateTime && rInterval) { + if (l == Value.TIME && DataType.isYearMonthIntervalType(r)) { + break; + } + return new IntervalOperation(IntervalOpType.DATETIME_MINUS_INTERVAL, left, right); + } + break; + case MULTIPLY: + if (lInterval && rNumeric) { + return new IntervalOperation(IntervalOpType.INTERVAL_MULTIPLY_NUMERIC, left, right); + } else if (lNumeric && rInterval) { + return new IntervalOperation(IntervalOpType.INTERVAL_MULTIPLY_NUMERIC, right, left); + } + break; + case DIVIDE: + if (lInterval) { + if (rNumeric) { + return new IntervalOperation(IntervalOpType.INTERVAL_DIVIDE_NUMERIC, left, right); + } else if (rInterval && DataType.isYearMonthIntervalType(l) == DataType.isYearMonthIntervalType(r)) { + // Non-standard + return new IntervalOperation(IntervalOpType.INTERVAL_DIVIDE_INTERVAL, left, right); + } + } + break; + default: + } + throw getUnsupported(l, r); + } + + private Expression optimizeDateTime(SessionLocal session, int l, int r) { + switch (opType) { + case PLUS: { + if (DataType.isDateTimeType(l)) { + if (DataType.isDateTimeType(r)) { + if (l > r) { + swap(); + int t = l; + l = r; + r = t; + } + return new CompatibilityDatePlusTimeOperation(right, left).optimize(session); + } + swap(); + int t = l; + l = r; + r = t; + } + switch (l) { + case Value.INTEGER: + // Oracle date add + return new DateTimeFunction(DateTimeFunction.DATEADD, DateTimeFunction.DAY, left, right) + .optimize(session); + case Value.NUMERIC: + case Value.REAL: + case Value.DOUBLE: + case Value.DECFLOAT: + // Oracle date add + return new DateTimeFunction(DateTimeFunction.DATEADD, DateTimeFunction.SECOND, + new BinaryOperation(OpType.MULTIPLY, ValueExpression.get(ValueInteger.get(60 * 60 * 24)), + left), right).optimize(session); + } + break; + } + case MINUS: + switch (l) { + case Value.DATE: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + switch (r) { + case Value.INTEGER: { + if (forcedType != null) { + throw getUnexpectedForcedTypeException(); + } + // Oracle date subtract + return new DateTimeFunction(DateTimeFunction.DATEADD, DateTimeFunction.DAY, + new UnaryOperation(right), left).optimize(session); + } + case Value.NUMERIC: + case Value.REAL: + case Value.DOUBLE: + case Value.DECFLOAT: { + if (forcedType != null) { + throw getUnexpectedForcedTypeException(); + } + // Oracle date subtract + return new DateTimeFunction(DateTimeFunction.DATEADD, DateTimeFunction.SECOND, + new BinaryOperation(OpType.MULTIPLY, ValueExpression.get(ValueInteger.get(-60 * 60 * 24)), + right), left).optimize(session); + } + case Value.TIME: + case Value.TIME_TZ: + case Value.DATE: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + return new IntervalOperation(IntervalOpType.DATETIME_MINUS_DATETIME, left, right, forcedType); + } + break; + case Value.TIME: + case Value.TIME_TZ: + if (DataType.isDateTimeType(r)) { + return new IntervalOperation(IntervalOpType.DATETIME_MINUS_DATETIME, left, right, forcedType); + } + break; + } + break; + case MULTIPLY: + if (l == Value.TIME) { + type = TypeInfo.TYPE_TIME; + convertRight = false; + return this; + } else if (r == Value.TIME) { + swap(); + type = TypeInfo.TYPE_TIME; + convertRight = false; + return this; + } + break; + case DIVIDE: + if (l == Value.TIME) { + type = TypeInfo.TYPE_TIME; + convertRight = false; + return this; + } + break; + default: + } + throw getUnsupported(l, r); + } + + private DbException getUnsupported(int l, int r) { + return DbException.getUnsupportedException( + Value.getTypeName(l) + ' ' + getOperationToken() + ' ' + Value.getTypeName(r)); + } + + private DbException getUnexpectedForcedTypeException() { + StringBuilder builder = getUnenclosedSQL(new StringBuilder(), TRACE_SQL_FLAGS); + int index = builder.length(); + return DbException.getSyntaxError( + IntervalOperation.getForcedTypeSQL(builder.append(' '), forcedType).toString(), index, ""); + } + + private void swap() { + Expression temp = left; + left = right; + right = temp; + } + + /** + * Returns the type of this binary operation. + * + * @return the type of this binary operation + */ + public OpType getOperationType() { + return opType; + } + +} diff --git a/h2/src/main/org/h2/expression/CompareLike.java b/h2/src/main/org/h2/expression/CompareLike.java deleted file mode 100644 index 89e33bb9c3..0000000000 --- a/h2/src/main/org/h2/expression/CompareLike.java +++ /dev/null @@ -1,431 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import java.util.regex.Pattern; -import java.util.regex.PatternSyntaxException; - -import org.h2.api.ErrorCode; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.index.IndexCondition; -import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.value.CompareMode; -import org.h2.value.Value; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueNull; -import org.h2.value.ValueString; - -/** - * Pattern matching comparison expression: WHERE NAME LIKE ? - */ -public class CompareLike extends Condition { - - private static final int MATCH = 0, ONE = 1, ANY = 2; - - private final CompareMode compareMode; - private final String defaultEscape; - private Expression left; - private Expression right; - private Expression escape; - - private boolean isInit; - - private char[] patternChars; - private String patternString; - private int[] patternTypes; - private int patternLength; - - private final boolean regexp; - private Pattern patternRegexp; - - private boolean ignoreCase; - private boolean fastCompare; - private boolean invalidPattern; - - public CompareLike(Database db, Expression left, Expression right, - Expression escape, boolean regexp) { - this(db.getCompareMode(), db.getSettings().defaultEscape, left, right, - escape, regexp); - } - - public CompareLike(CompareMode compareMode, String defaultEscape, - Expression left, Expression right, Expression escape, boolean regexp) { - this.compareMode = compareMode; - this.defaultEscape = defaultEscape; - this.regexp = regexp; - this.left = left; - this.right = right; - this.escape = escape; - } - - private static Character getEscapeChar(String s) { - return s == null || s.length() == 0 ? null : s.charAt(0); - } - - @Override - public String getSQL() { - String sql; - if (regexp) { - sql = left.getSQL() + " REGEXP " + right.getSQL(); - } else { - sql = left.getSQL() + " LIKE " + right.getSQL(); - if (escape != null) { - sql += " ESCAPE " + escape.getSQL(); - } - } - return "(" + sql + ")"; - } - - @Override - public Expression optimize(Session session) { - left = left.optimize(session); - right = right.optimize(session); - if (left.getType() == Value.STRING_IGNORECASE) { - ignoreCase = true; - } - if (left.isValueSet()) { - Value l = left.getValue(session); - if (l == ValueNull.INSTANCE) { - // NULL LIKE something > NULL - return ValueExpression.getNull(); - } - } - if (escape != null) { - escape = escape.optimize(session); - } - if (right.isValueSet() && (escape == null || escape.isValueSet())) { - if (left.isValueSet()) { - return ValueExpression.get(getValue(session)); - } - Value r = right.getValue(session); - if (r == ValueNull.INSTANCE) { - // something LIKE NULL > NULL - return ValueExpression.getNull(); - } - Value e = escape == null ? null : escape.getValue(session); - if (e == ValueNull.INSTANCE) { - return ValueExpression.getNull(); - } - String p = r.getString(); - initPattern(p, getEscapeChar(e)); - if (invalidPattern) { - return ValueExpression.getNull(); - } - if ("%".equals(p)) { - // optimization for X LIKE '%': convert to X IS NOT NULL - return new Comparison(session, - Comparison.IS_NOT_NULL, left, null).optimize(session); - } - if (isFullMatch()) { - // optimization for X LIKE 'Hello': convert to X = 'Hello' - Value value = ValueString.get(patternString); - Expression expr = ValueExpression.get(value); - return new Comparison(session, - Comparison.EQUAL, left, expr).optimize(session); - } - isInit = true; - } - return this; - } - - private Character getEscapeChar(Value e) { - if (e == null) { - return getEscapeChar(defaultEscape); - } - String es = e.getString(); - Character esc; - if (es == null) { - esc = getEscapeChar(defaultEscape); - } else if (es.length() == 0) { - esc = null; - } else if (es.length() > 1) { - throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, es); - } else { - esc = es.charAt(0); - } - return esc; - } - - @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (regexp) { - return; - } - if (!(left instanceof ExpressionColumn)) { - return; - } - ExpressionColumn l = (ExpressionColumn) left; - if (filter != l.getTableFilter()) { - return; - } - // parameters are always evaluatable, but - // we need to check if the value is set - // (at prepare time) - // otherwise we would need to prepare at execute time, - // which may be slower (possibly not in this case) - if (!right.isEverything(ExpressionVisitor.INDEPENDENT_VISITOR)) { - return; - } - if (escape != null && - !escape.isEverything(ExpressionVisitor.INDEPENDENT_VISITOR)) { - return; - } - String p = right.getValue(session).getString(); - Value e = escape == null ? null : escape.getValue(session); - if (e == ValueNull.INSTANCE) { - // should already be optimized - DbException.throwInternalError(); - } - initPattern(p, getEscapeChar(e)); - if (invalidPattern) { - return; - } - if (patternLength <= 0 || patternTypes[0] != MATCH) { - // can't use an index - return; - } - int dataType = l.getColumn().getType(); - if (dataType != Value.STRING && dataType != Value.STRING_IGNORECASE && - dataType != Value.STRING_FIXED) { - // column is not a varchar - can't use the index - return; - } - int maxMatch = 0; - StringBuilder buff = new StringBuilder(); - while (maxMatch < patternLength && patternTypes[maxMatch] == MATCH) { - buff.append(patternChars[maxMatch++]); - } - String begin = buff.toString(); - if (maxMatch == patternLength) { - filter.addIndexCondition(IndexCondition.get(Comparison.EQUAL, l, - ValueExpression.get(ValueString.get(begin)))); - } else { - // TODO check if this is correct according to Unicode rules - // (code points) - String end; - if (begin.length() > 0) { - filter.addIndexCondition(IndexCondition.get( - Comparison.BIGGER_EQUAL, l, - ValueExpression.get(ValueString.get(begin)))); - char next = begin.charAt(begin.length() - 1); - // search the 'next' unicode character (or at least a character - // that is higher) - for (int i = 1; i < 2000; i++) { - end = begin.substring(0, begin.length() - 1) + (char) (next + i); - if (compareMode.compareString(begin, end, ignoreCase) == -1) { - filter.addIndexCondition(IndexCondition.get( - Comparison.SMALLER, l, - ValueExpression.get(ValueString.get(end)))); - break; - } - } - } - } - } - - @Override - public Value getValue(Session session) { - Value l = left.getValue(session); - if (l == ValueNull.INSTANCE) { - return l; - } - if (!isInit) { - Value r = right.getValue(session); - if (r == ValueNull.INSTANCE) { - return r; - } - String p = r.getString(); - Value e = escape == null ? null : escape.getValue(session); - if (e == ValueNull.INSTANCE) { - return ValueNull.INSTANCE; - } - initPattern(p, getEscapeChar(e)); - } - if (invalidPattern) { - return ValueNull.INSTANCE; - } - String value = l.getString(); - boolean result; - if (regexp) { - // result = patternRegexp.matcher(value).matches(); - result = patternRegexp.matcher(value).find(); - } else { - result = compareAt(value, 0, 0, value.length(), patternChars, patternTypes); - } - return ValueBoolean.get(result); - } - - private boolean compare(char[] pattern, String s, int pi, int si) { - return pattern[pi] == s.charAt(si) || - (!fastCompare && compareMode.equalsChars(patternString, pi, s, - si, ignoreCase)); - } - - private boolean compareAt(String s, int pi, int si, int sLen, - char[] pattern, int[] types) { - for (; pi < patternLength; pi++) { - switch (types[pi]) { - case MATCH: - if ((si >= sLen) || !compare(pattern, s, pi, si++)) { - return false; - } - break; - case ONE: - if (si++ >= sLen) { - return false; - } - break; - case ANY: - if (++pi >= patternLength) { - return true; - } - while (si < sLen) { - if (compare(pattern, s, pi, si) && - compareAt(s, pi, si, sLen, pattern, types)) { - return true; - } - si++; - } - return false; - default: - DbException.throwInternalError(); - } - } - return si == sLen; - } - - /** - * Test if the value matches the pattern. - * - * @param testPattern the pattern - * @param value the value - * @param escapeChar the escape character - * @return true if the value matches - */ - public boolean test(String testPattern, String value, char escapeChar) { - initPattern(testPattern, escapeChar); - if (invalidPattern) { - return false; - } - return compareAt(value, 0, 0, value.length(), patternChars, patternTypes); - } - - private void initPattern(String p, Character escapeChar) { - if (compareMode.getName().equals(CompareMode.OFF) && !ignoreCase) { - fastCompare = true; - } - if (regexp) { - patternString = p; - try { - if (ignoreCase) { - patternRegexp = Pattern.compile(p, Pattern.CASE_INSENSITIVE); - } else { - patternRegexp = Pattern.compile(p); - } - } catch (PatternSyntaxException e) { - throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, p); - } - return; - } - patternLength = 0; - if (p == null) { - patternTypes = null; - patternChars = null; - return; - } - int len = p.length(); - patternChars = new char[len]; - patternTypes = new int[len]; - boolean lastAny = false; - for (int i = 0; i < len; i++) { - char c = p.charAt(i); - int type; - if (escapeChar != null && escapeChar == c) { - if (i >= len - 1) { - invalidPattern = true; - return; - } - c = p.charAt(++i); - type = MATCH; - lastAny = false; - } else if (c == '%') { - if (lastAny) { - continue; - } - type = ANY; - lastAny = true; - } else if (c == '_') { - type = ONE; - } else { - type = MATCH; - lastAny = false; - } - patternTypes[patternLength] = type; - patternChars[patternLength++] = c; - } - for (int i = 0; i < patternLength - 1; i++) { - if ((patternTypes[i] == ANY) && (patternTypes[i + 1] == ONE)) { - patternTypes[i] = ONE; - patternTypes[i + 1] = ANY; - } - } - patternString = new String(patternChars, 0, patternLength); - } - - private boolean isFullMatch() { - if (patternTypes == null) { - return false; - } - for (int type : patternTypes) { - if (type != MATCH) { - return false; - } - } - return true; - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - left.mapColumns(resolver, level); - right.mapColumns(resolver, level); - if (escape != null) { - escape.mapColumns(resolver, level); - } - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - right.setEvaluatable(tableFilter, b); - if (escape != null) { - escape.setEvaluatable(tableFilter, b); - } - } - - @Override - public void updateAggregate(Session session) { - left.updateAggregate(session); - right.updateAggregate(session); - if (escape != null) { - escape.updateAggregate(session); - } - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return left.isEverything(visitor) && right.isEverything(visitor) - && (escape == null || escape.isEverything(visitor)); - } - - @Override - public int getCost() { - return left.getCost() + right.getCost() + 3; - } - -} diff --git a/h2/src/main/org/h2/expression/Comparison.java b/h2/src/main/org/h2/expression/Comparison.java deleted file mode 100644 index 3caaa836f3..0000000000 --- a/h2/src/main/org/h2/expression/Comparison.java +++ /dev/null @@ -1,583 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import java.util.Arrays; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.index.IndexCondition; -import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.New; -import org.h2.value.Value; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueGeometry; -import org.h2.value.ValueNull; - -/** - * Example comparison expressions are ID=1, NAME=NAME, NAME IS NULL. - * - * @author Thomas Mueller - * @author Noel Grandin - * @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 - */ -public class Comparison extends Condition { - - /** - * This is a flag meaning the comparison is null safe (meaning never returns - * NULL even if one operand is NULL). Only EQUAL and NOT_EQUAL are supported - * currently. - */ - public static final int NULL_SAFE = 16; - - /** - * The comparison type meaning = as in ID=1. - */ - public static final int EQUAL = 0; - - /** - * The comparison type meaning ID IS 1 (ID IS NOT DISTINCT FROM 1). - */ - public static final int EQUAL_NULL_SAFE = EQUAL | NULL_SAFE; - - /** - * The comparison type meaning >= as in ID>=1. - */ - public static final int BIGGER_EQUAL = 1; - - /** - * The comparison type meaning > as in ID>1. - */ - public static final int BIGGER = 2; - - /** - * The comparison type meaning <= as in ID<=1. - */ - public static final int SMALLER_EQUAL = 3; - - /** - * The comparison type meaning < as in ID<1. - */ - public static final int SMALLER = 4; - - /** - * The comparison type meaning <> as in ID<>1. - */ - public static final int NOT_EQUAL = 5; - - /** - * The comparison type meaning ID IS NOT 1 (ID IS DISTINCT FROM 1). - */ - public static final int NOT_EQUAL_NULL_SAFE = NOT_EQUAL | NULL_SAFE; - - /** - * The comparison type meaning IS NULL as in NAME IS NULL. - */ - public static final int IS_NULL = 6; - - /** - * The comparison type meaning IS NOT NULL as in NAME IS NOT NULL. - */ - public static final int IS_NOT_NULL = 7; - - /** - * This is a pseudo comparison type that is only used for index conditions. - * It means the comparison will always yield FALSE. Example: 1=0. - */ - public static final int FALSE = 8; - - /** - * This is a pseudo comparison type that is only used for index conditions. - * It means equals any value of a list. Example: IN(1, 2, 3). - */ - public static final int IN_LIST = 9; - - /** - * This is a pseudo comparison type that is only used for index conditions. - * It means equals any value of a list. Example: IN(SELECT ...). - */ - public static final int IN_QUERY = 10; - - /** - * This is a comparison type that is only used for spatial index - * conditions (operator "&&"). - */ - public static final int SPATIAL_INTERSECTS = 11; - - private final Database database; - private int compareType; - private Expression left; - private Expression right; - - public Comparison(Session session, int compareType, Expression left, - Expression right) { - this.database = session.getDatabase(); - this.left = left; - this.right = right; - this.compareType = compareType; - } - - @Override - public String getSQL() { - String sql; - switch (compareType) { - case IS_NULL: - sql = left.getSQL() + " IS NULL"; - break; - case IS_NOT_NULL: - sql = left.getSQL() + " IS NOT NULL"; - break; - case SPATIAL_INTERSECTS: - sql = "INTERSECTS(" + left.getSQL() + ", " + right.getSQL() + ")"; - break; - default: - sql = left.getSQL() + " " + getCompareOperator(compareType) + - " " + right.getSQL(); - } - return "(" + sql + ")"; - } - - /** - * Get the comparison operator string ("=", ">",...). - * - * @param compareType the compare type - * @return the string - */ - static String getCompareOperator(int compareType) { - switch (compareType) { - case EQUAL: - return "="; - case EQUAL_NULL_SAFE: - return "IS"; - case BIGGER_EQUAL: - return ">="; - case BIGGER: - return ">"; - case SMALLER_EQUAL: - return "<="; - case SMALLER: - return "<"; - case NOT_EQUAL: - return "<>"; - case NOT_EQUAL_NULL_SAFE: - return "IS NOT"; - case SPATIAL_INTERSECTS: - return "&&"; - default: - throw DbException.throwInternalError("compareType=" + compareType); - } - } - - @Override - public Expression optimize(Session session) { - left = left.optimize(session); - if (right != null) { - right = right.optimize(session); - if (right instanceof ExpressionColumn) { - if (left.isConstant() || left instanceof Parameter) { - Expression temp = left; - left = right; - right = temp; - compareType = getReversedCompareType(compareType); - } - } - if (left instanceof ExpressionColumn) { - if (right.isConstant()) { - Value r = right.getValue(session); - if (r == ValueNull.INSTANCE) { - if ((compareType & NULL_SAFE) == 0) { - return ValueExpression.getNull(); - } - } - } else if (right instanceof Parameter) { - ((Parameter) right).setColumn( - ((ExpressionColumn) left).getColumn()); - } - } - } - if (compareType == IS_NULL || compareType == IS_NOT_NULL) { - if (left.isConstant()) { - return ValueExpression.get(getValue(session)); - } - } else { - if (SysProperties.CHECK && (left == null || right == null)) { - DbException.throwInternalError(); - } - if (left == ValueExpression.getNull() || - right == ValueExpression.getNull()) { - // TODO NULL handling: maybe issue a warning when comparing with - // a NULL constants - if ((compareType & NULL_SAFE) == 0) { - return ValueExpression.getNull(); - } - } - if (left.isConstant() && right.isConstant()) { - return ValueExpression.get(getValue(session)); - } - } - return this; - } - - @Override - public Value getValue(Session session) { - Value l = left.getValue(session); - if (right == null) { - boolean result; - switch (compareType) { - case IS_NULL: - result = l == ValueNull.INSTANCE; - break; - case IS_NOT_NULL: - result = !(l == ValueNull.INSTANCE); - break; - default: - throw DbException.throwInternalError("type=" + compareType); - } - return ValueBoolean.get(result); - } - if (l == ValueNull.INSTANCE) { - if ((compareType & NULL_SAFE) == 0) { - return ValueNull.INSTANCE; - } - } - Value r = right.getValue(session); - if (r == ValueNull.INSTANCE) { - if ((compareType & NULL_SAFE) == 0) { - return ValueNull.INSTANCE; - } - } - int dataType = Value.getHigherOrder(left.getType(), right.getType()); - l = l.convertTo(dataType); - r = r.convertTo(dataType); - boolean result = compareNotNull(database, l, r, compareType); - return ValueBoolean.get(result); - } - - /** - * Compare two values, given the values are not NULL. - * - * @param database the database - * @param l the first value - * @param r the second value - * @param compareType the compare type - * @return true if the comparison indicated by the comparison type evaluates - * to true - */ - static boolean compareNotNull(Database database, Value l, Value r, - int compareType) { - boolean result; - switch (compareType) { - case EQUAL: - case EQUAL_NULL_SAFE: - result = database.areEqual(l, r); - break; - case NOT_EQUAL: - case NOT_EQUAL_NULL_SAFE: - result = !database.areEqual(l, r); - break; - case BIGGER_EQUAL: - result = database.compare(l, r) >= 0; - break; - case BIGGER: - result = database.compare(l, r) > 0; - break; - case SMALLER_EQUAL: - result = database.compare(l, r) <= 0; - break; - case SMALLER: - result = database.compare(l, r) < 0; - break; - case SPATIAL_INTERSECTS: { - ValueGeometry lg = (ValueGeometry) l.convertTo(Value.GEOMETRY); - ValueGeometry rg = (ValueGeometry) r.convertTo(Value.GEOMETRY); - result = lg.intersectsBoundingBox(rg); - break; - } - default: - throw DbException.throwInternalError("type=" + compareType); - } - return result; - } - - private int getReversedCompareType(int type) { - switch (compareType) { - case EQUAL: - case EQUAL_NULL_SAFE: - case NOT_EQUAL: - case NOT_EQUAL_NULL_SAFE: - case SPATIAL_INTERSECTS: - return type; - case BIGGER_EQUAL: - return SMALLER_EQUAL; - case BIGGER: - return SMALLER; - case SMALLER_EQUAL: - return BIGGER_EQUAL; - case SMALLER: - return BIGGER; - default: - throw DbException.throwInternalError("type=" + compareType); - } - } - - @Override - public Expression getNotIfPossible(Session session) { - if (compareType == SPATIAL_INTERSECTS) { - return null; - } - int type = getNotCompareType(); - return new Comparison(session, type, left, right); - } - - private int getNotCompareType() { - switch (compareType) { - case EQUAL: - return NOT_EQUAL; - case EQUAL_NULL_SAFE: - return NOT_EQUAL_NULL_SAFE; - case NOT_EQUAL: - return EQUAL; - case NOT_EQUAL_NULL_SAFE: - return EQUAL_NULL_SAFE; - case BIGGER_EQUAL: - return SMALLER; - case BIGGER: - return SMALLER_EQUAL; - case SMALLER_EQUAL: - return BIGGER; - case SMALLER: - return BIGGER_EQUAL; - case IS_NULL: - return IS_NOT_NULL; - case IS_NOT_NULL: - return IS_NULL; - default: - throw DbException.throwInternalError("type=" + compareType); - } - } - - @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (!filter.getTable().isQueryComparable()) { - return; - } - ExpressionColumn l = null; - if (left instanceof ExpressionColumn) { - l = (ExpressionColumn) left; - if (filter != l.getTableFilter()) { - l = null; - } - } - if (right == null) { - if (l != null) { - switch (compareType) { - case IS_NULL: - if (session.getDatabase().getSettings().optimizeIsNull) { - filter.addIndexCondition( - IndexCondition.get( - Comparison.EQUAL_NULL_SAFE, l, - ValueExpression.getNull())); - } - } - } - return; - } - ExpressionColumn r = null; - if (right instanceof ExpressionColumn) { - r = (ExpressionColumn) right; - if (filter != r.getTableFilter()) { - r = null; - } - } - // one side must be from the current filter - if (l == null && r == null) { - return; - } - if (l != null && r != null) { - return; - } - if (l == null) { - ExpressionVisitor visitor = - ExpressionVisitor.getNotFromResolverVisitor(filter); - if (!left.isEverything(visitor)) { - return; - } - } else if (r == null) { - ExpressionVisitor visitor = - ExpressionVisitor.getNotFromResolverVisitor(filter); - if (!right.isEverything(visitor)) { - return; - } - } else { - // if both sides are part of the same filter, it can't be used for - // index lookup - return; - } - boolean addIndex; - switch (compareType) { - case NOT_EQUAL: - case NOT_EQUAL_NULL_SAFE: - addIndex = false; - break; - case EQUAL: - case EQUAL_NULL_SAFE: - case BIGGER: - case BIGGER_EQUAL: - case SMALLER_EQUAL: - case SMALLER: - case SPATIAL_INTERSECTS: - addIndex = true; - break; - default: - throw DbException.throwInternalError("type=" + compareType); - } - if (addIndex) { - if (l != null) { - filter.addIndexCondition( - IndexCondition.get(compareType, l, right)); - } else if (r != null) { - int compareRev = getReversedCompareType(compareType); - filter.addIndexCondition( - IndexCondition.get(compareRev, r, left)); - } - } - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - if (right != null) { - right.setEvaluatable(tableFilter, b); - } - } - - @Override - public void updateAggregate(Session session) { - left.updateAggregate(session); - if (right != null) { - right.updateAggregate(session); - } - } - - @Override - public void addFilterConditions(TableFilter filter, boolean outerJoin) { - if (compareType == IS_NULL && outerJoin) { - // can not optimize: - // select * from test t1 left join test t2 on t1.id = t2.id - // where t2.id is null - // to - // select * from test t1 left join test t2 - // on t1.id = t2.id and t2.id is null - return; - } - super.addFilterConditions(filter, outerJoin); - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - left.mapColumns(resolver, level); - if (right != null) { - right.mapColumns(resolver, level); - } - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return left.isEverything(visitor) && - (right == null || right.isEverything(visitor)); - } - - @Override - public int getCost() { - return left.getCost() + (right == null ? 0 : right.getCost()) + 1; - } - - /** - * Get the other expression if this is an equals comparison and the other - * expression matches. - * - * @param match the expression that should match - * @return null if no match, the other expression if there is a match - */ - Expression getIfEquals(Expression match) { - if (compareType == EQUAL) { - String sql = match.getSQL(); - if (left.getSQL().equals(sql)) { - return right; - } else if (right.getSQL().equals(sql)) { - return left; - } - } - return null; - } - - /** - * Get an additional condition if possible. Example: given two conditions - * A=B AND B=C, the new condition A=C is returned. Given the two conditions - * A=1 OR A=2, the new condition A IN(1, 2) is returned. - * - * @param session the session - * @param other the second condition - * @param and true for AND, false for OR - * @return null or the third condition - */ - Expression getAdditional(Session session, Comparison other, boolean and) { - if (compareType == other.compareType && compareType == EQUAL) { - boolean lc = left.isConstant(); - boolean rc = right.isConstant(); - boolean l2c = other.left.isConstant(); - boolean r2c = other.right.isConstant(); - String l = left.getSQL(); - String l2 = other.left.getSQL(); - String r = right.getSQL(); - String r2 = other.right.getSQL(); - if (and) { - // a=b AND a=c - // must not compare constants. example: NOT(B=2 AND B=3) - if (!(rc && r2c) && l.equals(l2)) { - return new Comparison(session, EQUAL, right, other.right); - } else if (!(rc && l2c) && l.equals(r2)) { - return new Comparison(session, EQUAL, right, other.left); - } else if (!(lc && r2c) && r.equals(l2)) { - return new Comparison(session, EQUAL, left, other.right); - } else if (!(lc && l2c) && r.equals(r2)) { - return new Comparison(session, EQUAL, left, other.left); - } - } else { - // a=b OR a=c - Database db = session.getDatabase(); - if (rc && r2c && l.equals(l2)) { - return new ConditionIn(db, left, - New.arrayList(Arrays.asList(right, other.right))); - } else if (rc && l2c && l.equals(r2)) { - return new ConditionIn(db, left, - New.arrayList(Arrays.asList(right, other.left))); - } else if (lc && r2c && r.equals(l2)) { - return new ConditionIn(db, right, - New.arrayList(Arrays.asList(left, other.right))); - } else if (lc && l2c && r.equals(r2)) { - return new ConditionIn(db, right, - New.arrayList(Arrays.asList(left, other.left))); - } - } - } - return null; - } - - /** - * Get the left or the right sub-expression of this condition. - * - * @param getLeft true to get the left sub-expression, false to get the - * right sub-expression. - * @return the sub-expression - */ - public Expression getExpression(boolean getLeft) { - return getLeft ? this.left : right; - } - -} diff --git a/h2/src/main/org/h2/expression/CompatibilityDatePlusTimeOperation.java b/h2/src/main/org/h2/expression/CompatibilityDatePlusTimeOperation.java new file mode 100644 index 0000000000..f1f4132788 --- /dev/null +++ b/h2/src/main/org/h2/expression/CompatibilityDatePlusTimeOperation.java @@ -0,0 +1,117 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import static org.h2.util.DateTimeUtils.NANOS_PER_DAY; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDate; +import org.h2.value.ValueNull; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + * A compatibility mathematical operation with datetime values. + */ +public class CompatibilityDatePlusTimeOperation extends Operation2 { + + public CompatibilityDatePlusTimeOperation(Expression left, Expression right) { + super(left, right); + TypeInfo l = left.getType(), r = right.getType(); + int t; + switch (l.getValueType()) { + case Value.TIMESTAMP_TZ: + if (r.getValueType() == Value.TIME_TZ) { + throw DbException.getUnsupportedException("TIMESTAMP WITH TIME ZONE + TIME WITH TIME ZONE"); + } + //$FALL-THROUGH$ + case Value.TIME: + t = r.getValueType() == Value.DATE ? Value.TIMESTAMP : l.getValueType(); + break; + case Value.TIME_TZ: + if (r.getValueType() == Value.TIME_TZ) { + throw DbException.getUnsupportedException("TIME WITH TIME ZONE + TIME WITH TIME ZONE"); + } + t = r.getValueType() == Value.DATE ? Value.TIMESTAMP_TZ : l.getValueType(); + break; + case Value.TIMESTAMP: + t = r.getValueType() == Value.TIME_TZ ? Value.TIMESTAMP_TZ : Value.TIMESTAMP; + break; + default: + throw DbException.getUnsupportedException( + Value.getTypeName(l.getValueType()) + " + " + Value.getTypeName(r.getValueType())); + } + type = TypeInfo.getTypeInfo(t, 0L, Math.max(l.getScale(), r.getScale()), null); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(" + "); + return right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + Value r = right.getValue(session); + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + switch (l.getValueType()) { + case Value.TIME: + if (r.getValueType() == Value.DATE) { + return ValueTimestamp.fromDateValueAndNanos(((ValueDate) r).getDateValue(), // + ((ValueTime) l).getNanos()); + } + break; + case Value.TIME_TZ: + if (r.getValueType() == Value.DATE) { + ValueTimeTimeZone t = (ValueTimeTimeZone) l; + return ValueTimestampTimeZone.fromDateValueAndNanos(((ValueDate) r).getDateValue(), t.getNanos(), + t.getTimeZoneOffsetSeconds()); + } + break; + case Value.TIMESTAMP: { + if (r.getValueType() == Value.TIME_TZ) { + ValueTimestamp ts = (ValueTimestamp) l; + l = ValueTimestampTimeZone.fromDateValueAndNanos(ts.getDateValue(), ts.getTimeNanos(), + ((ValueTimeTimeZone) r).getTimeZoneOffsetSeconds()); + } + break; + } + } + long[] a = DateTimeUtils.dateAndTimeFromValue(l, session); + long dateValue = a[0], timeNanos = a[1] + + (r instanceof ValueTime ? ((ValueTime) r).getNanos() : ((ValueTimeTimeZone) r).getNanos()); + if (timeNanos >= NANOS_PER_DAY) { + timeNanos -= NANOS_PER_DAY; + dateValue = DateTimeUtils.incrementDateValue(dateValue); + } + return DateTimeUtils.dateTimeToValue(l, dateValue, timeNanos); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + if (left.isConstant() && right.isConstant()) { + return ValueExpression.get(getValue(session)); + } + return this; + } + +} diff --git a/h2/src/main/org/h2/expression/ConcatenationOperation.java b/h2/src/main/org/h2/expression/ConcatenationOperation.java new file mode 100644 index 0000000000..18baaceb53 --- /dev/null +++ b/h2/src/main/org/h2/expression/ConcatenationOperation.java @@ -0,0 +1,281 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import java.util.Arrays; + +import org.h2.engine.SessionLocal; +import org.h2.expression.function.CastSpecification; +import org.h2.expression.function.ConcatFunction; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; + +/** + * Character string concatenation as in {@code 'Hello' || 'World'}, binary + * string concatenation as in {@code X'01' || X'AB'} or an array concatenation + * as in {@code ARRAY[1, 2] || 3}. + */ +public final class ConcatenationOperation extends OperationN { + + public ConcatenationOperation() { + super(new Expression[4]); + } + + public ConcatenationOperation(Expression op1, Expression op2) { + super(new Expression[] { op1, op2 }); + argsCount = 2; + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + for (int i = 0, l = args.length; i < l; i++) { + if (i > 0) { + builder.append(" || "); + } + args[i].getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + return builder; + } + + @Override + public Value getValue(SessionLocal session) { + int l = args.length; + if (l == 2) { + Value v1 = args[0].getValue(session); + v1 = v1.convertTo(type, session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + Value v2 = args[1].getValue(session); + v2 = v2.convertTo(type, session); + if (v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return getValue(session, v1, v2); + } + return getValue(session, l); + } + + private Value getValue(SessionLocal session, Value l, Value r) { + int valueType = type.getValueType(); + if (valueType == Value.VARCHAR) { + String s1 = l.getString(), s2 = r.getString(); + return ValueVarchar.get(new StringBuilder(s1.length() + s2.length()).append(s1).append(s2).toString()); + } else if (valueType == Value.VARBINARY) { + byte[] leftBytes = l.getBytesNoCopy(), rightBytes = r.getBytesNoCopy(); + int leftLength = leftBytes.length, rightLength = rightBytes.length; + byte[] bytes = Arrays.copyOf(leftBytes, leftLength + rightLength); + System.arraycopy(rightBytes, 0, bytes, leftLength, rightLength); + return ValueVarbinary.getNoCopy(bytes); + } else { + Value[] leftValues = ((ValueArray) l).getList(), rightValues = ((ValueArray) r).getList(); + int leftLength = leftValues.length, rightLength = rightValues.length; + Value[] values = Arrays.copyOf(leftValues, leftLength + rightLength); + System.arraycopy(rightValues, 0, values, leftLength, rightLength); + return ValueArray.get((TypeInfo) type.getExtTypeInfo(), values, session); + } + } + + private Value getValue(SessionLocal session, int l) { + Value[] values = new Value[l]; + for (int i = 0; i < l; i++) { + Value v = args[i].getValue(session).convertTo(type, session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + values[i] = v; + } + int valueType = type.getValueType(); + if (valueType == Value.VARCHAR) { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < l; i++) { + builder.append(values[i].getString()); + } + return ValueVarchar.get(builder.toString(), session); + } else if (valueType == Value.VARBINARY) { + int totalLength = 0; + for (int i = 0; i < l; i++) { + totalLength += values[i].getBytesNoCopy().length; + } + byte[] v = new byte[totalLength]; + int offset = 0; + for (int i = 0; i < l; i++) { + byte[] a = values[i].getBytesNoCopy(); + int length = a.length; + System.arraycopy(a, 0, v, offset, length); + offset += length; + } + return ValueVarbinary.getNoCopy(v); + } else { + int totalLength = 0; + for (int i = 0; i < l; i++) { + totalLength += ((ValueArray) values[i]).getList().length; + } + Value[] v = new Value[totalLength]; + int offset = 0; + for (int i = 0; i < l; i++) { + Value[] a = ((ValueArray) values[i]).getList(); + int length = a.length; + System.arraycopy(a, 0, v, offset, length); + offset += length; + } + return ValueArray.get((TypeInfo) type.getExtTypeInfo(), v, session); + } + } + + @Override + public Expression optimize(SessionLocal session) { + determineType(session); + inlineArguments(); + if (type.getValueType() == Value.VARCHAR && session.getMode().treatEmptyStringsAsNull) { + return new ConcatFunction(ConcatFunction.CONCAT, args).optimize(session); + } + int l = args.length; + boolean allConst = true, anyConst = false; + for (int i = 0; i < l; i++) { + if (args[i].isConstant()) { + anyConst = true; + } else { + allConst = false; + } + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + if (anyConst) { + int offset = 0; + for (int i = 0; i < l; i++) { + Expression arg1 = args[i]; + if (arg1.isConstant()) { + Value v1 = arg1.getValue(session).convertTo(type, session); + if (v1 == ValueNull.INSTANCE) { + return TypedValueExpression.get(ValueNull.INSTANCE, type); + } + if (isEmpty(v1)) { + continue; + } + for (Expression arg2; i + 1 < l && (arg2 = args[i + 1]).isConstant(); i++) { + Value v2 = arg2.getValue(session).convertTo(type, session); + if (v2 == ValueNull.INSTANCE) { + return TypedValueExpression.get(ValueNull.INSTANCE, type); + } + if (!isEmpty(v2)) { + v1 = getValue(session, v1, v2); + } + } + arg1 = ValueExpression.get(v1); + } + args[offset++] = arg1; + } + if (offset == 1) { + Expression arg = args[0]; + TypeInfo argType = arg.getType(); + if (TypeInfo.areSameTypes(type, argType)) { + return arg; + } + return new CastSpecification(arg, type); + } + argsCount = offset; + doneWithParameters(); + } + return this; + } + + private void determineType(SessionLocal session) { + int l = args.length; + boolean anyArray = false, allBinary = true, allCharacter = true; + for (int i = 0; i < l; i++) { + Expression arg = args[i].optimize(session); + args[i] = arg; + int t = arg.getType().getValueType(); + if (t == Value.ARRAY) { + anyArray = true; + allBinary = allCharacter = false; + } else if (t == Value.NULL) { + // Ignore NULL literals + } else if (DataType.isBinaryStringType(t)) { + allCharacter = false; + } else if (DataType.isCharacterStringType(t)) { + allBinary = false; + } else { + allBinary = allCharacter = false; + } + } + if (anyArray) { + type = TypeInfo.getTypeInfo(Value.ARRAY, -1, 0, TypeInfo.getHigherType(args).getExtTypeInfo()); + } else if (allBinary) { + long precision = getPrecision(0); + for (int i = 1; i < l; i++) { + precision = DataType.addPrecision(precision, getPrecision(i)); + } + type = TypeInfo.getTypeInfo(Value.VARBINARY, precision, 0, null); + } else if (allCharacter) { + long precision = getPrecision(0); + for (int i = 1; i < l; i++) { + precision = DataType.addPrecision(precision, getPrecision(i)); + } + type = TypeInfo.getTypeInfo(Value.VARCHAR, precision, 0, null); + } else { + type = TypeInfo.TYPE_VARCHAR; + } + } + + private long getPrecision(int i) { + TypeInfo t = args[i].getType(); + return t.getValueType() != Value.NULL ? t.getPrecision() : 0L; + } + + private void inlineArguments() { + int valueType = type.getValueType(); + int l = args.length; + int count = l; + for (int i = 0; i < l; i++) { + Expression arg = args[i]; + if (arg instanceof ConcatenationOperation && arg.getType().getValueType() == valueType) { + count += arg.getSubexpressionCount() - 1; + } + } + if (count > l) { + Expression[] newArguments = new Expression[count]; + for (int i = 0, offset = 0; i < l; i++) { + Expression arg = args[i]; + if (arg instanceof ConcatenationOperation && arg.getType().getValueType() == valueType) { + ConcatenationOperation c = (ConcatenationOperation) arg; + Expression[] innerArgs = c.args; + int innerLength = innerArgs.length; + System.arraycopy(innerArgs, 0, newArguments, offset, innerLength); + offset += innerLength; + } else { + newArguments[offset++] = arg; + } + } + args = newArguments; + argsCount = count; + } + } + + private static boolean isEmpty(Value v) { + int valueType = v.getValueType(); + if (valueType == Value.VARCHAR) { + return v.getString().isEmpty(); + } else if (valueType == Value.VARBINARY) { + return v.getBytesNoCopy().length == 0; + } else { + return ((ValueArray) v).getList().length == 0; + } + } + +} diff --git a/h2/src/main/org/h2/expression/Condition.java b/h2/src/main/org/h2/expression/Condition.java deleted file mode 100644 index 26c254a5bb..0000000000 --- a/h2/src/main/org/h2/expression/Condition.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.value.Value; -import org.h2.value.ValueBoolean; - -/** - * Represents a condition returning a boolean value, or NULL. - */ -abstract class Condition extends Expression { - - @Override - public int getType() { - return Value.BOOLEAN; - } - - @Override - public int getScale() { - return 0; - } - - @Override - public long getPrecision() { - return ValueBoolean.PRECISION; - } - - @Override - public int getDisplaySize() { - return ValueBoolean.DISPLAY_SIZE; - } - -} diff --git a/h2/src/main/org/h2/expression/ConditionAndOr.java b/h2/src/main/org/h2/expression/ConditionAndOr.java deleted file mode 100644 index 911ae4fb26..0000000000 --- a/h2/src/main/org/h2/expression/ConditionAndOr.java +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.value.Value; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueNull; - -/** - * An 'and' or 'or' condition as in WHERE ID=1 AND NAME=? - */ -public class ConditionAndOr extends Condition { - - /** - * The AND condition type as in ID=1 AND NAME='Hello'. - */ - public static final int AND = 0; - - /** - * The OR condition type as in ID=1 OR NAME='Hello'. - */ - public static final int OR = 1; - - private final int andOrType; - private Expression left, right; - - public ConditionAndOr(int andOrType, Expression left, Expression right) { - this.andOrType = andOrType; - this.left = left; - this.right = right; - if (SysProperties.CHECK && (left == null || right == null)) { - DbException.throwInternalError(); - } - } - - @Override - public String getSQL() { - String sql; - switch (andOrType) { - case AND: - sql = left.getSQL() + "\n AND " + right.getSQL(); - break; - case OR: - sql = left.getSQL() + "\n OR " + right.getSQL(); - break; - default: - throw DbException.throwInternalError("andOrType=" + andOrType); - } - return "(" + sql + ")"; - } - - @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (andOrType == AND) { - left.createIndexConditions(session, filter); - right.createIndexConditions(session, filter); - } - } - - @Override - public Expression getNotIfPossible(Session session) { - // (NOT (A OR B)): (NOT(A) AND NOT(B)) - // (NOT (A AND B)): (NOT(A) OR NOT(B)) - Expression l = left.getNotIfPossible(session); - if (l == null) { - l = new ConditionNot(left); - } - Expression r = right.getNotIfPossible(session); - if (r == null) { - r = new ConditionNot(right); - } - int reversed = andOrType == AND ? OR : AND; - return new ConditionAndOr(reversed, l, r); - } - - @Override - public Value getValue(Session session) { - Value l = left.getValue(session); - Value r; - switch (andOrType) { - case AND: { - if (Boolean.FALSE.equals(l.getBoolean())) { - return l; - } - r = right.getValue(session); - if (Boolean.FALSE.equals(r.getBoolean())) { - return r; - } - if (l == ValueNull.INSTANCE) { - return l; - } - if (r == ValueNull.INSTANCE) { - return r; - } - return ValueBoolean.get(true); - } - case OR: { - if (Boolean.TRUE.equals(l.getBoolean())) { - return l; - } - r = right.getValue(session); - if (Boolean.TRUE.equals(r.getBoolean())) { - return r; - } - if (l == ValueNull.INSTANCE) { - return l; - } - if (r == ValueNull.INSTANCE) { - return r; - } - return ValueBoolean.get(false); - } - default: - throw DbException.throwInternalError("type=" + andOrType); - } - } - - @Override - public Expression optimize(Session session) { - // NULL handling: see wikipedia, - // http://www-cs-students.stanford.edu/~wlam/compsci/sqlnulls - left = left.optimize(session); - right = right.optimize(session); - int lc = left.getCost(), rc = right.getCost(); - if (rc < lc) { - Expression t = left; - left = right; - right = t; - } - // this optimization does not work in the following case, - // but NOT is optimized before: - // CREATE TABLE TEST(A INT, B INT); - // INSERT INTO TEST VALUES(1, NULL); - // SELECT * FROM TEST WHERE NOT (B=A AND B=0); // no rows - // SELECT * FROM TEST WHERE NOT (B=A AND B=0 AND A=0); // 1, NULL - if (session.getDatabase().getSettings().optimizeTwoEquals && - andOrType == AND) { - // try to add conditions (A=B AND B=1: add A=1) - if (left instanceof Comparison && right instanceof Comparison) { - Comparison compLeft = (Comparison) left; - Comparison compRight = (Comparison) right; - Expression added = compLeft.getAdditional( - session, compRight, true); - if (added != null) { - added = added.optimize(session); - ConditionAndOr a = new ConditionAndOr(AND, this, added); - return a; - } - } - } - // TODO optimization: convert ((A=1 AND B=2) OR (A=1 AND B=3)) to - // (A=1 AND (B=2 OR B=3)) - if (andOrType == OR && - session.getDatabase().getSettings().optimizeOr) { - // try to add conditions (A=B AND B=1: add A=1) - if (left instanceof Comparison && - right instanceof Comparison) { - Comparison compLeft = (Comparison) left; - Comparison compRight = (Comparison) right; - Expression added = compLeft.getAdditional( - session, compRight, false); - if (added != null) { - return added.optimize(session); - } - } else if (left instanceof ConditionIn && - right instanceof Comparison) { - Expression added = ((ConditionIn) left). - getAdditional((Comparison) right); - if (added != null) { - return added.optimize(session); - } - } else if (right instanceof ConditionIn && - left instanceof Comparison) { - Expression added = ((ConditionIn) right). - getAdditional((Comparison) left); - if (added != null) { - return added.optimize(session); - } - } else if (left instanceof ConditionInConstantSet && - right instanceof Comparison) { - Expression added = ((ConditionInConstantSet) left). - getAdditional(session, (Comparison) right); - if (added != null) { - return added.optimize(session); - } - } else if (right instanceof ConditionInConstantSet && - left instanceof Comparison) { - Expression added = ((ConditionInConstantSet) right). - getAdditional(session, (Comparison) left); - if (added != null) { - return added.optimize(session); - } - } - } - // TODO optimization: convert .. OR .. to UNION if the cost is lower - Value l = left.isConstant() ? left.getValue(session) : null; - Value r = right.isConstant() ? right.getValue(session) : null; - if (l == null && r == null) { - return this; - } - if (l != null && r != null) { - return ValueExpression.get(getValue(session)); - } - switch (andOrType) { - case AND: - if (l != null) { - if (Boolean.FALSE.equals(l.getBoolean())) { - return ValueExpression.get(l); - } else if (Boolean.TRUE.equals(l.getBoolean())) { - return right; - } - } else if (r != null) { - if (Boolean.FALSE.equals(r.getBoolean())) { - return ValueExpression.get(r); - } else if (Boolean.TRUE.equals(r.getBoolean())) { - return left; - } - } - break; - case OR: - if (l != null) { - if (Boolean.TRUE.equals(l.getBoolean())) { - return ValueExpression.get(l); - } else if (Boolean.FALSE.equals(l.getBoolean())) { - return right; - } - } else if (r != null) { - if (Boolean.TRUE.equals(r.getBoolean())) { - return ValueExpression.get(r); - } else if (Boolean.FALSE.equals(r.getBoolean())) { - return left; - } - } - break; - default: - DbException.throwInternalError("type=" + andOrType); - } - return this; - } - - @Override - public void addFilterConditions(TableFilter filter, boolean outerJoin) { - if (andOrType == AND) { - left.addFilterConditions(filter, outerJoin); - right.addFilterConditions(filter, outerJoin); - } else { - super.addFilterConditions(filter, outerJoin); - } - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - left.mapColumns(resolver, level); - right.mapColumns(resolver, level); - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - right.setEvaluatable(tableFilter, b); - } - - @Override - public void updateAggregate(Session session) { - left.updateAggregate(session); - right.updateAggregate(session); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return left.isEverything(visitor) && right.isEverything(visitor); - } - - @Override - public int getCost() { - return left.getCost() + right.getCost(); - } - - @Override - public boolean isDisjunctive() { - return andOrType == OR || left.isDisjunctive() || right.isDisjunctive(); - } - - /** - * Get the left or the right sub-expression of this condition. - * - * @param getLeft true to get the left sub-expression, false to get the - * right sub-expression. - * @return the sub-expression - */ - public Expression getExpression(boolean getLeft) { - return getLeft ? this.left : right; - } - -} diff --git a/h2/src/main/org/h2/expression/ConditionExists.java b/h2/src/main/org/h2/expression/ConditionExists.java deleted file mode 100644 index c00ee880ab..0000000000 --- a/h2/src/main/org/h2/expression/ConditionExists.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.command.dml.Query; -import org.h2.engine.Session; -import org.h2.result.LocalResult; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.StringUtils; -import org.h2.value.Value; -import org.h2.value.ValueBoolean; - -/** - * An 'exists' condition as in WHERE EXISTS(SELECT ...) - */ -public class ConditionExists extends Condition { - - private final Query query; - - public ConditionExists(Query query) { - this.query = query; - } - - @Override - public Value getValue(Session session) { - query.setSession(session); - LocalResult result = query.query(1); - session.addTemporaryResult(result); - boolean r = result.getRowCount() > 0; - return ValueBoolean.get(r); - } - - @Override - public Expression optimize(Session session) { - query.prepare(); - return this; - } - - @Override - public String getSQL() { - return "EXISTS(\n" + StringUtils.indent(query.getPlanSQL(), 4, false) + ")"; - } - - @Override - public void updateAggregate(Session session) { - // TODO exists: is it allowed that the subquery contains aggregates? - // probably not - // select id from test group by id having exists (select * from test2 - // where id=count(test.id)) - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - query.mapColumns(resolver, level + 1); - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - query.setEvaluatable(tableFilter, b); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return query.isEverything(visitor); - } - - @Override - public int getCost() { - return query.getCostAsExpression(); - } - -} diff --git a/h2/src/main/org/h2/expression/ConditionIn.java b/h2/src/main/org/h2/expression/ConditionIn.java deleted file mode 100644 index abd3598bb2..0000000000 --- a/h2/src/main/org/h2/expression/ConditionIn.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import java.util.ArrayList; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.index.IndexCondition; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.StatementBuilder; -import org.h2.value.Value; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueNull; - -/** - * An 'in' condition with a list of values, as in WHERE NAME IN(...) - */ -public class ConditionIn extends Condition { - - private final Database database; - private Expression left; - private final ArrayList valueList; - private int queryLevel; - - /** - * Create a new IN(..) condition. - * - * @param database the database - * @param left the expression before IN - * @param values the value list (at least one element) - */ - public ConditionIn(Database database, Expression left, - ArrayList values) { - this.database = database; - this.left = left; - this.valueList = values; - } - - @Override - public Value getValue(Session session) { - Value l = left.getValue(session); - if (l == ValueNull.INSTANCE) { - return l; - } - boolean result = false; - boolean hasNull = false; - for (Expression e : valueList) { - Value r = e.getValue(session); - if (r == ValueNull.INSTANCE) { - hasNull = true; - } else { - r = r.convertTo(l.getType()); - result = Comparison.compareNotNull(database, l, r, Comparison.EQUAL); - if (result) { - break; - } - } - } - if (!result && hasNull) { - return ValueNull.INSTANCE; - } - return ValueBoolean.get(result); - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - left.mapColumns(resolver, level); - for (Expression e : valueList) { - e.mapColumns(resolver, level); - } - this.queryLevel = Math.max(level, this.queryLevel); - } - - @Override - public Expression optimize(Session session) { - left = left.optimize(session); - boolean constant = left.isConstant(); - if (constant && left == ValueExpression.getNull()) { - return left; - } - boolean allValuesConstant = true; - boolean allValuesNull = true; - int size = valueList.size(); - for (int i = 0; i < size; i++) { - Expression e = valueList.get(i); - e = e.optimize(session); - if (e.isConstant() && e.getValue(session) != ValueNull.INSTANCE) { - allValuesNull = false; - } - if (allValuesConstant && !e.isConstant()) { - allValuesConstant = false; - } - if (left instanceof ExpressionColumn && e instanceof Parameter) { - ((Parameter) e) - .setColumn(((ExpressionColumn) left).getColumn()); - } - valueList.set(i, e); - } - if (constant && allValuesConstant) { - return ValueExpression.get(getValue(session)); - } - if (size == 1) { - Expression right = valueList.get(0); - Expression expr = new Comparison(session, Comparison.EQUAL, left, right); - expr = expr.optimize(session); - return expr; - } - if (allValuesConstant && !allValuesNull) { - int leftType = left.getType(); - if (leftType == Value.UNKNOWN) { - return this; - } - Expression expr = new ConditionInConstantSet(session, left, valueList); - expr = expr.optimize(session); - return expr; - } - return this; - } - - @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (!(left instanceof ExpressionColumn)) { - return; - } - ExpressionColumn l = (ExpressionColumn) left; - if (filter != l.getTableFilter()) { - return; - } - if (session.getDatabase().getSettings().optimizeInList) { - ExpressionVisitor visitor = ExpressionVisitor.getNotFromResolverVisitor(filter); - for (Expression e : valueList) { - if (!e.isEverything(visitor)) { - return; - } - } - filter.addIndexCondition(IndexCondition.getInList(l, valueList)); - return; - } - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - for (Expression e : valueList) { - e.setEvaluatable(tableFilter, b); - } - } - - @Override - public String getSQL() { - StatementBuilder buff = new StatementBuilder("("); - buff.append(left.getSQL()).append(" IN("); - for (Expression e : valueList) { - buff.appendExceptFirst(", "); - buff.append(e.getSQL()); - } - return buff.append("))").toString(); - } - - @Override - public void updateAggregate(Session session) { - left.updateAggregate(session); - for (Expression e : valueList) { - e.updateAggregate(session); - } - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - if (!left.isEverything(visitor)) { - return false; - } - return areAllValues(visitor); - } - - private boolean areAllValues(ExpressionVisitor visitor) { - for (Expression e : valueList) { - if (!e.isEverything(visitor)) { - return false; - } - } - return true; - } - - @Override - public int getCost() { - int cost = left.getCost(); - for (Expression e : valueList) { - cost += e.getCost(); - } - return cost; - } - - @Override - public boolean isDisjunctive() { - return true; - } - - /** - * Add an additional element if possible. Example: given two conditions - * A IN(1, 2) OR A=3, the constant 3 is added: A IN(1, 2, 3). - * - * @param other the second condition - * @return null if the condition was not added, or the new condition - */ - Expression getAdditional(Comparison other) { - Expression add = other.getIfEquals(left); - if (add != null) { - valueList.add(add); - return this; - } - return null; - } -} diff --git a/h2/src/main/org/h2/expression/ConditionInConstantSet.java b/h2/src/main/org/h2/expression/ConditionInConstantSet.java deleted file mode 100644 index 46121622a8..0000000000 --- a/h2/src/main/org/h2/expression/ConditionInConstantSet.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import java.util.ArrayList; -import java.util.Comparator; -import java.util.TreeSet; -import org.h2.engine.Session; -import org.h2.index.IndexCondition; -import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.StatementBuilder; -import org.h2.value.Value; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueNull; - -/** - * Used for optimised IN(...) queries where the contents of the IN list are all - * constant and of the same type. - *

    - * Checking using a HashSet is has time complexity O(1), instead of O(n) for - * checking using an array. - */ -public class ConditionInConstantSet extends Condition { - - private Expression left; - private int queryLevel; - private final ArrayList valueList; - private final TreeSet valueSet; - - /** - * Create a new IN(..) condition. - * - * @param session the session - * @param left the expression before IN - * @param valueList the value list (at least two elements) - */ - public ConditionInConstantSet(final Session session, Expression left, - ArrayList valueList) { - this.left = left; - this.valueList = valueList; - this.valueSet = new TreeSet(new Comparator() { - @Override - public int compare(Value o1, Value o2) { - return session.getDatabase().compare(o1, o2); - } - }); - int type = left.getType(); - for (Expression expression : valueList) { - valueSet.add(expression.getValue(session).convertTo(type)); - } - } - - @Override - public Value getValue(Session session) { - Value x = left.getValue(session); - if (x == ValueNull.INSTANCE) { - return x; - } - boolean result = valueSet.contains(x); - if (!result) { - boolean setHasNull = valueSet.contains(ValueNull.INSTANCE); - if (setHasNull) { - return ValueNull.INSTANCE; - } - } - return ValueBoolean.get(result); - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - left.mapColumns(resolver, level); - this.queryLevel = Math.max(level, this.queryLevel); - } - - @Override - public Expression optimize(Session session) { - left = left.optimize(session); - return this; - } - - @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (!(left instanceof ExpressionColumn)) { - return; - } - ExpressionColumn l = (ExpressionColumn) left; - if (filter != l.getTableFilter()) { - return; - } - if (session.getDatabase().getSettings().optimizeInList) { - filter.addIndexCondition(IndexCondition.getInList(l, valueList)); - return; - } - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - } - - @Override - public String getSQL() { - StatementBuilder buff = new StatementBuilder("("); - buff.append(left.getSQL()).append(" IN("); - for (Expression e : valueList) { - buff.appendExceptFirst(", "); - buff.append(e.getSQL()); - } - return buff.append("))").toString(); - } - - @Override - public void updateAggregate(Session session) { - left.updateAggregate(session); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - if (!left.isEverything(visitor)) { - return false; - } - switch (visitor.getType()) { - case ExpressionVisitor.OPTIMIZABLE_MIN_MAX_COUNT_ALL: - case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.INDEPENDENT: - case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.GET_COLUMNS: - return true; - default: - throw DbException.throwInternalError("type=" + visitor.getType()); - } - } - - @Override - public int getCost() { - int cost = left.getCost(); - return cost; - } - - @Override - public boolean isDisjunctive() { - return true; - } - - /** - * Add an additional element if possible. Example: given two conditions - * A IN(1, 2) OR A=3, the constant 3 is added: A IN(1, 2, 3). - * - * @param session the session - * @param other the second condition - * @return null if the condition was not added, or the new condition - */ - Expression getAdditional(Session session, Comparison other) { - Expression add = other.getIfEquals(left); - if (add != null) { - if (add.isConstant()) { - valueList.add(add); - valueSet.add(add.getValue(session).convertTo(left.getType())); - return this; - } - } - return null; - } -} diff --git a/h2/src/main/org/h2/expression/ConditionInSelect.java b/h2/src/main/org/h2/expression/ConditionInSelect.java deleted file mode 100644 index 4e11db23a7..0000000000 --- a/h2/src/main/org/h2/expression/ConditionInSelect.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.api.ErrorCode; -import org.h2.command.dml.Query; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.index.IndexCondition; -import org.h2.message.DbException; -import org.h2.result.LocalResult; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.StringUtils; -import org.h2.value.Value; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueNull; - -/** - * An 'in' condition with a subquery, as in WHERE ID IN(SELECT ...) - */ -public class ConditionInSelect extends Condition { - - private final Database database; - private Expression left; - private final Query query; - private final boolean all; - private final int compareType; - private int queryLevel; - - public ConditionInSelect(Database database, Expression left, Query query, - boolean all, int compareType) { - this.database = database; - this.left = left; - this.query = query; - this.all = all; - this.compareType = compareType; - } - - @Override - public Value getValue(Session session) { - query.setSession(session); - query.setDistinct(true); - LocalResult rows = query.query(0); - try { - Value l = left.getValue(session); - if (rows.getRowCount() == 0) { - return ValueBoolean.get(all); - } else if (l == ValueNull.INSTANCE) { - return l; - } - if (!session.getDatabase().getSettings().optimizeInSelect) { - return getValueSlow(rows, l); - } - if (all || (compareType != Comparison.EQUAL && - compareType != Comparison.EQUAL_NULL_SAFE)) { - return getValueSlow(rows, l); - } - int dataType = rows.getColumnType(0); - if (dataType == Value.NULL) { - return ValueBoolean.get(false); - } - l = l.convertTo(dataType); - if (rows.containsDistinct(new Value[] { l })) { - return ValueBoolean.get(true); - } - if (rows.containsDistinct(new Value[] { ValueNull.INSTANCE })) { - return ValueNull.INSTANCE; - } - return ValueBoolean.get(false); - } finally { - rows.close(); - } - } - - private Value getValueSlow(LocalResult rows, Value l) { - // this only returns the correct result if the result has at least one - // row, and if l is not null - boolean hasNull = false; - boolean result = all; - while (rows.next()) { - boolean value; - Value r = rows.currentRow()[0]; - if (r == ValueNull.INSTANCE) { - value = false; - hasNull = true; - } else { - value = Comparison.compareNotNull(database, l, r, compareType); - } - if (!value && all) { - result = false; - break; - } else if (value && !all) { - result = true; - break; - } - } - if (!result && hasNull) { - return ValueNull.INSTANCE; - } - return ValueBoolean.get(result); - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - left.mapColumns(resolver, level); - query.mapColumns(resolver, level + 1); - this.queryLevel = Math.max(level, this.queryLevel); - } - - @Override - public Expression optimize(Session session) { - left = left.optimize(session); - query.setRandomAccessResult(true); - query.prepare(); - if (query.getColumnCount() != 1) { - throw DbException.get(ErrorCode.SUBQUERY_IS_NOT_SINGLE_COLUMN); - } - // Can not optimize: the data may change - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - query.setEvaluatable(tableFilter, b); - } - - @Override - public String getSQL() { - StringBuilder buff = new StringBuilder(); - buff.append('(').append(left.getSQL()).append(' '); - if (all) { - buff.append(Comparison.getCompareOperator(compareType)). - append(" ALL"); - } else { - if (compareType == Comparison.EQUAL) { - buff.append("IN"); - } else { - buff.append(Comparison.getCompareOperator(compareType)). - append(" ANY"); - } - } - buff.append("(\n").append(StringUtils.indent(query.getPlanSQL(), 4, false)). - append("))"); - return buff.toString(); - } - - @Override - public void updateAggregate(Session session) { - left.updateAggregate(session); - query.updateAggregate(session); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return left.isEverything(visitor) && query.isEverything(visitor); - } - - @Override - public int getCost() { - return left.getCost() + query.getCostAsExpression(); - } - - @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (!session.getDatabase().getSettings().optimizeInList) { - return; - } - if (!(left instanceof ExpressionColumn)) { - return; - } - ExpressionColumn l = (ExpressionColumn) left; - if (filter != l.getTableFilter()) { - return; - } - ExpressionVisitor visitor = ExpressionVisitor.getNotFromResolverVisitor(filter); - if (!query.isEverything(visitor)) { - return; - } - filter.addIndexCondition(IndexCondition.getInQuery(l, query)); - } - - @Override - public boolean isDisjunctive() { - return true; - } - -} diff --git a/h2/src/main/org/h2/expression/ConditionNot.java b/h2/src/main/org/h2/expression/ConditionNot.java deleted file mode 100644 index 4bb113258c..0000000000 --- a/h2/src/main/org/h2/expression/ConditionNot.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.engine.Session; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * A NOT condition. - */ -public class ConditionNot extends Condition { - - private Expression condition; - - public ConditionNot(Expression condition) { - this.condition = condition; - } - - @Override - public Expression getNotIfPossible(Session session) { - return condition; - } - - @Override - public Value getValue(Session session) { - Value v = condition.getValue(session); - if (v == ValueNull.INSTANCE) { - return v; - } - return v.convertTo(Value.BOOLEAN).negate(); - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - condition.mapColumns(resolver, level); - } - - @Override - public Expression optimize(Session session) { - Expression e2 = condition.getNotIfPossible(session); - if (e2 != null) { - return e2.optimize(session); - } - Expression expr = condition.optimize(session); - if (expr.isConstant()) { - Value v = expr.getValue(session); - if (v == ValueNull.INSTANCE) { - return ValueExpression.getNull(); - } - return ValueExpression.get(v.convertTo(Value.BOOLEAN).negate()); - } - condition = expr; - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - condition.setEvaluatable(tableFilter, b); - } - - @Override - public String getSQL() { - return "(NOT " + condition.getSQL() + ")"; - } - - @Override - public void updateAggregate(Session session) { - condition.updateAggregate(session); - } - - @Override - public void addFilterConditions(TableFilter filter, boolean outerJoin) { - if (outerJoin) { - // can not optimize: - // select * from test t1 left join test t2 on t1.id = t2.id where - // not t2.id is not null - // to - // select * from test t1 left join test t2 on t1.id = t2.id and - // t2.id is not null - return; - } - super.addFilterConditions(filter, outerJoin); - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return condition.isEverything(visitor); - } - - @Override - public int getCost() { - return condition.getCost(); - } - -} diff --git a/h2/src/main/org/h2/expression/DomainValueExpression.java b/h2/src/main/org/h2/expression/DomainValueExpression.java new file mode 100644 index 0000000000..e1831203e0 --- /dev/null +++ b/h2/src/main/org/h2/expression/DomainValueExpression.java @@ -0,0 +1,78 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.api.ErrorCode; +import org.h2.constraint.DomainColumnResolver; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.util.ParserUtil; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * An expression representing a value for domain constraint. + */ +public final class DomainValueExpression extends Operation0 { + + private DomainColumnResolver columnResolver; + + public DomainValueExpression() { + } + + @Override + public Value getValue(SessionLocal session) { + return columnResolver.getValue(null); + } + + @Override + public TypeInfo getType() { + return columnResolver.getValueType(); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + if (resolver instanceof DomainColumnResolver) { + columnResolver = (DomainColumnResolver) resolver; + } + } + + @Override + public Expression optimize(SessionLocal session) { + if (columnResolver == null) { + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "VALUE"); + } + return this; + } + + @Override + public boolean isValueSet() { + return columnResolver.getValue(null) != null; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (columnResolver != null) { + String name = columnResolver.getColumnName(); + if (name != null) { + return ParserUtil.quoteIdentifier(builder, name, sqlFlags); + } + } + return builder.append("VALUE"); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return true; + } + + @Override + public int getCost() { + return 1; + } + +} diff --git a/h2/src/main/org/h2/expression/Expression.java b/h2/src/main/org/h2/expression/Expression.java index b340d7ca3a..7718e6e6f0 100644 --- a/h2/src/main/org/h2/expression/Expression.java +++ b/h2/src/main/org/h2/expression/Expression.java @@ -1,54 +1,134 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.SQLException; -import org.h2.engine.Database; -import org.h2.engine.Session; +import java.util.List; + +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.engine.SessionLocal; +import org.h2.expression.function.NamedExpression; import org.h2.message.DbException; import org.h2.table.Column; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; +import org.h2.util.HasSQL; import org.h2.util.StringUtils; -import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Typed; import org.h2.value.Value; -import org.h2.value.ValueArray; /** * An expression is a operation, a value, or a function in a query. */ -public abstract class Expression { +public abstract class Expression implements HasSQL, Typed { + + /** + * Initial state for {@link #mapColumns(ColumnResolver, int, int)}. + */ + public static final int MAP_INITIAL = 0; + + /** + * State for expressions inside a window function for + * {@link #mapColumns(ColumnResolver, int, int)}. + */ + public static final int MAP_IN_WINDOW = 1; + + /** + * State for expressions inside an aggregate for + * {@link #mapColumns(ColumnResolver, int, int)}. + */ + public static final int MAP_IN_AGGREGATE = 2; + + /** + * Wrap expression in parentheses only if it can't be safely included into + * other expressions without them. + */ + public static final int AUTO_PARENTHESES = 0; + + /** + * Wrap expression in parentheses unconditionally. + */ + public static final int WITH_PARENTHESES = 1; + + /** + * Do not wrap expression in parentheses. + */ + public static final int WITHOUT_PARENTHESES = 2; private boolean addedToFilter; + /** + * Get the SQL snippet for a list of expressions. + * + * @param builder the builder to append the SQL to + * @param expressions the list of expressions + * @param sqlFlags formatting flags + * @return the specified string builder + */ + public static StringBuilder writeExpressions(StringBuilder builder, List expressions, + int sqlFlags) { + for (int i = 0, length = expressions.size(); i < length; i++) { + if (i > 0) { + builder.append(", "); + } + expressions.get(i).getUnenclosedSQL(builder, sqlFlags); + } + return builder; + } + + /** + * Get the SQL snippet for an array of expressions. + * + * @param builder the builder to append the SQL to + * @param expressions the list of expressions + * @param sqlFlags formatting flags + * @return the specified string builder + */ + public static StringBuilder writeExpressions(StringBuilder builder, Expression[] expressions, int sqlFlags) { + for (int i = 0, length = expressions.length; i < length; i++) { + if (i > 0) { + builder.append(", "); + } + Expression e = expressions[i]; + if (e == null) { + builder.append("DEFAULT"); + } else { + e.getUnenclosedSQL(builder, sqlFlags); + } + } + return builder; + } + /** * Return the resulting value for the current row. * * @param session the session * @return the result */ - public abstract Value getValue(Session session); + public abstract Value getValue(SessionLocal session); /** - * Return the data type. The data type may not be known before the + * Returns the data type. The data type may be unknown before the * optimization phase. * - * @return the type + * @return the data type */ - public abstract int getType(); + @Override + public abstract TypeInfo getType(); /** * Map the columns of the resolver to expression columns. * * @param resolver the column resolver * @param level the subquery nesting level + * @param state current state for nesting checks, initial value is + * {@link #MAP_INITIAL} */ - public abstract void mapColumns(ColumnResolver resolver, int level); + public abstract void mapColumns(ColumnResolver resolver, int level, int state); /** * Try to optimize the expression. @@ -56,7 +136,21 @@ public abstract class Expression { * @param session the session * @return the optimized expression */ - public abstract Expression optimize(Session session); + public abstract Expression optimize(SessionLocal session); + + /** + * Try to optimize or remove the condition. + * + * @param session the session + * @return the optimized condition, or {@code null} + */ + public final Expression optimizeCondition(SessionLocal session) { + Expression e = optimize(session); + if (e.isConstant()) { + return e.getBooleanValue(session) ? null : ValueExpression.FALSE; + } + return e; + } /** * Tell the expression columns whether the table filter can return values @@ -67,35 +161,85 @@ public abstract class Expression { */ public abstract void setEvaluatable(TableFilter tableFilter, boolean value); + @Override + public final String getSQL(int sqlFlags) { + return getSQL(new StringBuilder(), sqlFlags, AUTO_PARENTHESES).toString(); + } + + @Override + public final StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + /** + * Get the SQL statement of this expression. This may not always be the + * original SQL statement, especially after optimization. + * + * @param sqlFlags + * formatting flags + * @param parentheses + * parentheses mode + * @return the SQL statement + */ + public final String getSQL(int sqlFlags, int parentheses) { + return getSQL(new StringBuilder(), sqlFlags, parentheses).toString(); + } + /** - * Get the scale of this expression. + * Get the SQL statement of this expression. This may not always be the + * original SQL statement, especially after optimization. * - * @return the scale + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @param parentheses + * parentheses mode + * @return the specified string builder */ - public abstract int getScale(); + public final StringBuilder getSQL(StringBuilder builder, int sqlFlags, int parentheses) { + return parentheses == WITH_PARENTHESES || parentheses != WITHOUT_PARENTHESES && needParentheses() + ? getUnenclosedSQL(builder.append('('), sqlFlags).append(')') + : getUnenclosedSQL(builder, sqlFlags); + } /** - * Get the precision of this expression. + * Returns whether this expressions needs to be wrapped in parentheses when + * it is used as an argument of other expressions. * - * @return the precision + * @return {@code true} if it is */ - public abstract long getPrecision(); + public boolean needParentheses() { + return false; + } /** - * Get the display size of this expression. + * Get the SQL statement of this expression. This may not always be the + * original SQL statement, especially after optimization. Enclosing '(' and + * ')' are always appended. * - * @return the display size + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @return the specified string builder */ - public abstract int getDisplaySize(); + public final StringBuilder getEnclosedSQL(StringBuilder builder, int sqlFlags) { + return getUnenclosedSQL(builder.append('('), sqlFlags).append(')'); + } /** - * Get the SQL statement of this expression. - * This may not always be the original SQL statement, - * specially after optimization. + * Get the SQL statement of this expression. This may not always be the + * original SQL statement, especially after optimization. Enclosing '(' and + * ')' are never appended. * - * @return the SQL statement + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @return the specified string builder */ - public abstract String getSQL(); + public abstract StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags); /** * Update an aggregate value. This method is called at statement execution @@ -105,8 +249,9 @@ public abstract class Expression { * be used to make sure the internal state is only updated once. * * @param session the session + * @param stage select stage */ - public abstract void updateAggregate(Session session); + public abstract void updateAggregate(SessionLocal session, int stage); /** * Check if this expression and all sub-expressions can fulfill a criteria. @@ -128,13 +273,13 @@ public abstract class Expression { /** * If it is possible, return the negated expression. This is used - * to optimize NOT expressions: NOT ID>10 can be converted to + * to optimize NOT expressions: NOT ID>10 can be converted to * ID<=10. Returns null if negating is not possible. * * @param session the session * @return the negated expression, or null */ - public Expression getNotIfPossible(Session session) { + public Expression getNotIfPossible(@SuppressWarnings("unused") SessionLocal session) { // by default it is not possible return null; } @@ -148,6 +293,15 @@ public boolean isConstant() { return false; } + /** + * Check if this expression will always return the NULL value. + * + * @return if the expression is constant NULL value + */ + public boolean isNullConstant() { + return false; + } + /** * Is the value of a parameter set. * @@ -158,24 +312,24 @@ public boolean isValueSet() { } /** - * Check if this is an auto-increment column. + * Check if this is an identity column. * - * @return true if it is an auto-increment column + * @return true if it is an identity column */ - public boolean isAutoIncrement() { + public boolean isIdentity() { return false; } /** * Get the value in form of a boolean expression. - * Returns true, false, or null. + * Returns true or false. * In this database, everything can be a condition. * * @param session the session * @return the result */ - public Boolean getBooleanValue(Session session) { - return getValue(session).getBoolean(); + public boolean getBooleanValue(SessionLocal session) { + return getValue(session).isTrue(); } /** @@ -184,17 +338,20 @@ public Boolean getBooleanValue(Session session) { * @param session the session * @param filter the table filter */ - public void createIndexConditions(Session session, TableFilter filter) { + @SuppressWarnings("unused") + public void createIndexConditions(SessionLocal session, TableFilter filter) { // default is do nothing } /** * Get the column name or alias name of this expression. * + * @param session the session + * @param columnIndex 0-based column index * @return the column name */ - public String getColumnName() { - return getAlias(); + public String getColumnName(SessionLocal session, int columnIndex) { + return getAlias(session, columnIndex); } /** @@ -238,19 +395,55 @@ public String getTableAlias() { * Get the alias name of a column or SQL expression * if it is not an aliased expression. * + * @param session the session + * @param columnIndex 0-based column index * @return the alias name */ - public String getAlias() { - return StringUtils.unEnclose(getSQL()); + public String getAlias(SessionLocal session, int columnIndex) { + switch (session.getMode().expressionNames) { + default: { + String sql = getSQL(QUOTE_ONLY_WHEN_REQUIRED | NO_CASTS, WITHOUT_PARENTHESES); + if (sql.length() <= Constants.MAX_IDENTIFIER_LENGTH) { + return sql; + } + } + //$FALL-THROUGH$ + case C_NUMBER: + return "C" + (columnIndex + 1); + case EMPTY: + return ""; + case NUMBER: + return Integer.toString(columnIndex + 1); + case POSTGRESQL_STYLE: + if (this instanceof NamedExpression) { + return StringUtils.toLowerEnglish(((NamedExpression) this).getName()); + } + return "?column?"; + } } /** - * Only returns true if the expression is a wildcard. + * Get the column name of this expression for a view. * - * @return if this expression is a wildcard - */ - public boolean isWildcard() { - return false; + * @param session the session + * @param columnIndex 0-based column index + * @return the column name for a view + */ + public String getColumnNameForView(SessionLocal session, int columnIndex) { + switch (session.getMode().viewExpressionNames) { + case AS_IS: + default: + return getAlias(session, columnIndex); + case EXCEPTION: + throw DbException.get(ErrorCode.COLUMN_ALIAS_IS_NOT_SPECIFIED_1, getTraceSQL()); + case MYSQL_STYLE: { + String name = getSQL(QUOTE_ONLY_WHEN_REQUIRED | NO_CASTS, WITHOUT_PARENTHESES); + if (name.length() > 64) { + name = "Name_exp_" + (columnIndex + 1); + } + return name; + } + } } /** @@ -262,24 +455,13 @@ public Expression getNonAliasExpression() { return this; } - /** - * Allows to check if the related expression is under conjunctive format. - * - * @return if the related expression has the logic operator OR. - */ - public boolean isDisjunctive() { - return false; - } - /** * Add conditions to a table filter if they can be evaluated. * * @param filter the table filter - * @param outerJoin if the expression is part of an outer join */ - public void addFilterConditions(TableFilter filter, boolean outerJoin) { - if (!addedToFilter && !outerJoin && - isEverything(ExpressionVisitor.EVALUATABLE_VISITOR)) { + public void addFilterConditions(TableFilter filter) { + if (!addedToFilter && isEverything(ExpressionVisitor.EVALUATABLE_VISITOR)) { filter.addFilterCondition(this, false); addedToFilter = true; } @@ -292,66 +474,63 @@ public void addFilterConditions(TableFilter filter, boolean outerJoin) { */ @Override public String toString() { - return getSQL(); + return getTraceSQL(); } /** - * If this expression consists of column expressions it should return them. + * Returns count of subexpressions. * - * @param session the session - * @return array of expression columns if applicable, null otherwise + * @return count of subexpressions */ - public Expression[] getExpressionColumns(Session session) { - return null; + public int getSubexpressionCount() { + return 0; } /** - * Extracts expression columns from ValueArray + * Returns subexpression with specified index. * - * @param session the current session - * @param value the value to extract columns from - * @return array of expression columns + * @param index 0-based index + * @return subexpression with specified index, may be null + * @throws IndexOutOfBoundsException if specified index is not valid */ - static Expression[] getExpressionColumns(Session session, ValueArray value) { - Value[] list = value.getList(); - ExpressionColumn[] expr = new ExpressionColumn[list.length]; - for (int i = 0, len = list.length; i < len; i++) { - Value v = list[i]; - Column col = new Column("C" + (i + 1), v.getType(), - v.getPrecision(), v.getScale(), - v.getDisplaySize()); - expr[i] = new ExpressionColumn(session.getDatabase(), col); - } - return expr; + public Expression getSubexpression(int index) { + throw new IndexOutOfBoundsException(); } /** - * Extracts expression columns from the given result set. + * Return the resulting value of when operand for the current row. * - * @param session the session - * @param rs the result set - * @return an array of expression columns - */ - public static Expression[] getExpressionColumns(Session session, ResultSet rs) { - try { - ResultSetMetaData meta = rs.getMetaData(); - int columnCount = meta.getColumnCount(); - Expression[] expressions = new Expression[columnCount]; - Database db = session == null ? null : session.getDatabase(); - for (int i = 0; i < columnCount; i++) { - String name = meta.getColumnLabel(i + 1); - int type = DataType.getValueTypeFromResultSet(meta, i + 1); - int precision = meta.getPrecision(i + 1); - int scale = meta.getScale(i + 1); - int displaySize = meta.getColumnDisplaySize(i + 1); - Column col = new Column(name, type, precision, scale, displaySize); - Expression expr = new ExpressionColumn(db, col); - expressions[i] = expr; - } - return expressions; - } catch (SQLException e) { - throw DbException.convert(e); - } + * @param session + * the session + * @param left + * value on the left side + * @return the result + */ + public boolean getWhenValue(SessionLocal session, Value left) { + return session.compareWithNull(left, getValue(session), true) == 0; + } + + /** + * Appends the SQL statement of this when operand to the specified builder. + * + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @return the specified string builder + */ + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + return getUnenclosedSQL(builder.append(' '), sqlFlags); + } + + /** + * Returns whether this expression is a right side of condition in a when + * operand. + * + * @return {@code true} if it is, {@code false} otherwise + */ + public boolean isWhenConditionOperand() { + return false; } } diff --git a/h2/src/main/org/h2/expression/ExpressionColumn.java b/h2/src/main/org/h2/expression/ExpressionColumn.java index cc6517efe1..6a207b29cf 100644 --- a/h2/src/main/org/h2/expression/ExpressionColumn.java +++ b/h2/src/main/org/h2/expression/ExpressionColumn.java @@ -1,18 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import java.util.HashMap; - import org.h2.api.ErrorCode; -import org.h2.command.Parser; -import org.h2.command.dml.Select; -import org.h2.command.dml.SelectListColumnResolver; +import org.h2.command.query.Select; +import org.h2.command.query.SelectGroups; +import org.h2.command.query.SelectListColumnResolver; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.expression.analysis.DataAnalysisOperation; +import org.h2.expression.condition.Comparison; +import org.h2.expression.function.CurrentDateTimeValueFunction; import org.h2.index.IndexCondition; import org.h2.message.DbException; import org.h2.schema.Constant; @@ -21,57 +22,133 @@ import org.h2.table.ColumnResolver; import org.h2.table.Table; import org.h2.table.TableFilter; +import org.h2.util.ParserUtil; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueBigint; import org.h2.value.ValueBoolean; +import org.h2.value.ValueDecfloat; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueTinyint; /** - * A expression that represents a column of a table or view. + * A column reference expression that represents a column of a table or view. */ -public class ExpressionColumn extends Expression { +public final class ExpressionColumn extends Expression { private final Database database; private final String schemaName; private final String tableAlias; private final String columnName; + private final boolean rowId; + private final boolean quotedName; private ColumnResolver columnResolver; private int queryLevel; private Column column; - private boolean evaluatable; + /** + * Creates a new column reference for metadata of queries; should not be + * used as normal expression. + * + * @param database + * the database + * @param column + * the column + */ public ExpressionColumn(Database database, Column column) { this.database = database; this.column = column; - this.schemaName = null; - this.tableAlias = null; - this.columnName = null; + columnName = tableAlias = schemaName = null; + rowId = column.isRowId(); + quotedName = true; + } + + /** + * Creates a new instance of column reference for regular columns as normal + * expression. + * + * @param database + * the database + * @param schemaName + * the schema name, or {@code null} + * @param tableAlias + * the table alias name, table name, or {@code null} + * @param columnName + * the column name + */ + public ExpressionColumn(Database database, String schemaName, String tableAlias, String columnName) { + this(database, schemaName, tableAlias, columnName, true); } - public ExpressionColumn(Database database, String schemaName, - String tableAlias, String columnName) { + /** + * Creates a new instance of column reference for regular columns as normal + * expression. + * + * @param database + * the database + * @param schemaName + * the schema name, or {@code null} + * @param tableAlias + * the table alias name, table name, or {@code null} + * @param columnName + * the column name + * @param quotedName + * whether name was quoted + */ + public ExpressionColumn(Database database, String schemaName, String tableAlias, String columnName, + boolean quotedName) { this.database = database; this.schemaName = schemaName; this.tableAlias = tableAlias; this.columnName = columnName; + rowId = false; + this.quotedName = quotedName; + } + + /** + * Creates a new instance of column reference for {@code _ROWID_} column as + * normal expression. + * + * @param database + * the database + * @param schemaName + * the schema name, or {@code null} + * @param tableAlias + * the table alias name, table name, or {@code null} + */ + public ExpressionColumn(Database database, String schemaName, String tableAlias) { + this.database = database; + this.schemaName = schemaName; + this.tableAlias = tableAlias; + columnName = Column.ROWID; + quotedName = rowId = true; } @Override - public String getSQL() { - String sql; - boolean quote = database.getSettings().databaseToUpper; - if (column != null) { - sql = column.getSQL(); - } else { - sql = quote ? Parser.quoteIdentifier(columnName) : columnName; + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (schemaName != null) { + ParserUtil.quoteIdentifier(builder, schemaName, sqlFlags).append('.'); } if (tableAlias != null) { - String a = quote ? Parser.quoteIdentifier(tableAlias) : tableAlias; - sql = a + "." + sql; + ParserUtil.quoteIdentifier(builder, tableAlias, sqlFlags).append('.'); } - if (schemaName != null) { - String s = quote ? Parser.quoteIdentifier(schemaName) : schemaName; - sql = s + "." + sql; + if (column != null) { + if (columnResolver != null && columnResolver.hasDerivedColumnList()) { + ParserUtil.quoteIdentifier(builder, columnResolver.getColumnName(column), sqlFlags); + } else { + column.getSQL(builder, sqlFlags); + } + } else if (rowId) { + builder.append(columnName); + } else { + ParserUtil.quoteIdentifier(builder, columnName, sqlFlags); } - return sql; + return builder; } public TableFilter getTableFilter() { @@ -79,32 +156,28 @@ public TableFilter getTableFilter() { } @Override - public void mapColumns(ColumnResolver resolver, int level) { - if (tableAlias != null && !database.equalsIdentifiers( - tableAlias, resolver.getTableAlias())) { + public void mapColumns(ColumnResolver resolver, int level, int state) { + if (tableAlias != null && !database.equalsIdentifiers(tableAlias, resolver.getTableAlias())) { return; } - if (schemaName != null && !database.equalsIdentifiers( - schemaName, resolver.getSchemaName())) { + if (schemaName != null && !database.equalsIdentifiers(schemaName, resolver.getSchemaName())) { return; } - for (Column col : resolver.getColumns()) { - String n = col.getName(); - if (database.equalsIdentifiers(columnName, n)) { - mapColumn(resolver, col, level); - return; - } - } - if (database.equalsIdentifiers(Column.ROWID, columnName)) { + if (rowId) { Column col = resolver.getRowIdColumn(); if (col != null) { mapColumn(resolver, col, level); - return; } + return; + } + Column col = resolver.findColumn(columnName); + if (col != null) { + mapColumn(resolver, col, level); + return; } Column[] columns = resolver.getSystemColumns(); for (int i = 0; columns != null && i < columns.length; i++) { - Column col = columns[i]; + col = columns[i]; if (database.equalsIdentifiers(columnName, col.getName())) { mapColumn(resolver, col, level); return; @@ -127,7 +200,7 @@ private void mapColumn(ColumnResolver resolver, Column col, int level) { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { if (columnResolver == null) { Schema schema = session.getDatabase().findSchema( tableAlias == null ? session.getCurrentSchemaName() : tableAlias); @@ -137,91 +210,107 @@ public Expression optimize(Session session) { return constant.getValue(); } } - String name = columnName; - if (tableAlias != null) { - name = tableAlias + "." + name; - if (schemaName != null) { - name = schemaName + "." + name; - } - } - throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, name); + return optimizeOther(); } return columnResolver.optimize(this, column); } + private Expression optimizeOther() { + if (tableAlias == null && !quotedName) { + switch (StringUtils.toUpperEnglish(columnName)) { + case "SYSDATE": + case "TODAY": + return new CurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, -1); + case "SYSTIME": + return new CurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, -1); + case "SYSTIMESTAMP": + return new CurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP, -1); + } + } + throw getColumnException(ErrorCode.COLUMN_NOT_FOUND_1); + } + + /** + * Get exception to throw, with column and table info added + * + * @param code SQL error code + * @return DbException + */ + public DbException getColumnException(int code) { + String name = columnName; + if (tableAlias != null) { + if (schemaName != null) { + name = schemaName + '.' + tableAlias + '.' + name; + } else { + name = tableAlias + '.' + name; + } + } + return DbException.get(code, name); + } + @Override - public void updateAggregate(Session session) { - Value now = columnResolver.getValue(column); + public void updateAggregate(SessionLocal session, int stage) { Select select = columnResolver.getSelect(); if (select == null) { - throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getSQL()); + throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getTraceSQL()); + } + if (stage == DataAnalysisOperation.STAGE_RESET) { + return; } - HashMap values = select.getCurrentGroup(); - if (values == null) { + SelectGroups groupData = select.getGroupDataIfCurrent(false); + if (groupData == null) { // this is a different level (the enclosing query) return; } - Value v = (Value) values.get(this); + Value v = (Value) groupData.getCurrentGroupExprData(this); if (v == null) { - values.put(this, now); - } else { - if (!database.areEqual(now, v)) { - throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getSQL()); + groupData.setCurrentGroupExprData(this, columnResolver.getValue(column)); + } else if (!select.isGroupWindowStage2()) { + if (!session.areEqual(columnResolver.getValue(column), v)) { + throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getTraceSQL()); } } } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { Select select = columnResolver.getSelect(); if (select != null) { - HashMap values = select.getCurrentGroup(); - if (values != null) { - Value v = (Value) values.get(this); + SelectGroups groupData = select.getGroupDataIfCurrent(false); + if (groupData != null) { + Value v = (Value) groupData.getCurrentGroupExprData(this); if (v != null) { return v; } + if (select.isGroupWindowStage2()) { + throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getTraceSQL()); + } } } Value value = columnResolver.getValue(column); if (value == null) { - columnResolver.getValue(column); - throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getSQL()); + if (select == null) { + throw DbException.get(ErrorCode.NULL_NOT_ALLOWED, getTraceSQL()); + } else { + throw DbException.get(ErrorCode.MUST_GROUP_BY_COLUMN_1, getTraceSQL()); + } } return value; } @Override - public int getType() { - return column.getType(); + public TypeInfo getType() { + return column != null ? column.getType() : rowId ? TypeInfo.TYPE_BIGINT : TypeInfo.TYPE_UNKNOWN; } @Override public void setEvaluatable(TableFilter tableFilter, boolean b) { - if (columnResolver != null && tableFilter == columnResolver.getTableFilter()) { - evaluatable = b; - } } public Column getColumn() { return column; } - @Override - public int getScale() { - return column.getScale(); - } - - @Override - public long getPrecision() { - return column.getPrecision(); - } - - @Override - public int getDisplaySize() { - return column.getDisplaySize(); - } - public String getOriginalColumnName() { return columnName; } @@ -231,8 +320,14 @@ public String getOriginalTableAliasName() { } @Override - public String getColumnName() { - return columnName != null ? columnName : column.getName(); + public String getColumnName(SessionLocal session, int columnIndex) { + if (column != null) { + if (columnResolver != null) { + return columnResolver.getColumnName(column); + } + return column.getName(); + } + return columnName; } @Override @@ -248,19 +343,27 @@ public String getTableName() { } @Override - public String getAlias() { + public String getAlias(SessionLocal session, int columnIndex) { if (column != null) { + if (columnResolver != null) { + return columnResolver.getColumnName(column); + } return column.getName(); } if (tableAlias != null) { - return tableAlias + "." + columnName; + return tableAlias + '.' + columnName; } return columnName; } @Override - public boolean isAutoIncrement() { - return column.getSequence() != null; + public String getColumnNameForView(SessionLocal session, int columnIndex) { + return getAlias(session, columnIndex); + } + + @Override + public boolean isIdentity() { + return column.isIdentity(); } @Override @@ -271,28 +374,20 @@ public int getNullable() { @Override public boolean isEverything(ExpressionVisitor visitor) { switch (visitor.getType()) { - case ExpressionVisitor.OPTIMIZABLE_MIN_MAX_COUNT_ALL: + case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: return false; - case ExpressionVisitor.READONLY: - case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.QUERY_COMPARABLE: - return true; case ExpressionVisitor.INDEPENDENT: return this.queryLevel < visitor.getQueryLevel(); case ExpressionVisitor.EVALUATABLE: - // if the current value is known (evaluatable set) - // or if this columns belongs to a 'higher level' query and is + // if this column belongs to a 'higher level' query and is // therefore just a parameter - if (database.getSettings().nestedJoins) { - if (visitor.getQueryLevel() < this.queryLevel) { - return true; - } - if (getTableFilter() == null) { - return false; - } - return getTableFilter().isEvaluatable(); + if (visitor.getQueryLevel() < this.queryLevel) { + return true; + } + if (getTableFilter() == null) { + return false; } - return evaluatable || visitor.getQueryLevel() < this.queryLevel; + return getTableFilter().isEvaluatable(); case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: visitor.addDataModificationId(column.getTable().getMaxDataModificationId()); return true; @@ -303,11 +398,37 @@ public boolean isEverything(ExpressionVisitor visitor) { visitor.addDependency(column.getTable()); } return true; - case ExpressionVisitor.GET_COLUMNS: - visitor.addColumn(column); + case ExpressionVisitor.GET_COLUMNS1: + if (column == null) { + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, getTraceSQL()); + } + visitor.addColumn1(column); return true; + case ExpressionVisitor.GET_COLUMNS2: + if (column == null) { + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, getTraceSQL()); + } + visitor.addColumn2(column); + return true; + case ExpressionVisitor.DECREMENT_QUERY_LEVEL: { + if (column == null) { + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, getTraceSQL()); + } + if (visitor.getColumnResolvers().contains(columnResolver)) { + int decrement = visitor.getQueryLevel(); + if (decrement > 0) { + if (queryLevel > 0) { + queryLevel--; + return true; + } + throw DbException.getInternalError("queryLevel=0"); + } + return queryLevel > 0; + } + } + //$FALL-THROUGH$ default: - throw DbException.throwInternalError("type=" + visitor.getType()); + return true; } } @@ -317,20 +438,57 @@ public int getCost() { } @Override - public void createIndexConditions(Session session, TableFilter filter) { + public void createIndexConditions(SessionLocal session, TableFilter filter) { TableFilter tf = getTableFilter(); - if (filter == tf && column.getType() == Value.BOOLEAN) { - IndexCondition cond = IndexCondition.get( - Comparison.EQUAL, this, ValueExpression.get( - ValueBoolean.get(true))); - filter.addIndexCondition(cond); + if (filter == tf && column.getType().getValueType() == Value.BOOLEAN) { + filter.addIndexCondition(IndexCondition.get(Comparison.EQUAL, this, ValueExpression.TRUE)); } } @Override - public Expression getNotIfPossible(Session session) { - return new Comparison(session, Comparison.EQUAL, this, - ValueExpression.get(ValueBoolean.get(false))); + public Expression getNotIfPossible(SessionLocal session) { + Expression o = optimize(session); + if (o != this) { + return o.getNotIfPossible(session); + } + Value v; + switch (column.getType().getValueType()) { + case Value.BOOLEAN: + v = ValueBoolean.FALSE; + break; + case Value.TINYINT: + v = ValueTinyint.get((byte) 0); + break; + case Value.SMALLINT: + v = ValueSmallint.get((short) 0); + break; + case Value.INTEGER: + v = ValueInteger.get(0); + break; + case Value.BIGINT: + v = ValueBigint.get(0L); + break; + case Value.NUMERIC: + v = ValueNumeric.ZERO; + break; + case Value.REAL: + v = ValueReal.ZERO; + break; + case Value.DOUBLE: + v = ValueDouble.ZERO; + break; + case Value.DECFLOAT: + v = ValueDecfloat.ZERO; + break; + default: + /* + * Can be replaced with CAST(column AS BOOLEAN) = FALSE, but this + * replacement can't be optimized further, so it's better to leave + * NOT (column) as is. + */ + return null; + } + return new Comparison(Comparison.EQUAL, this, ValueExpression.get(v), false); } } diff --git a/h2/src/main/org/h2/expression/ExpressionList.java b/h2/src/main/org/h2/expression/ExpressionList.java index 78b5eb0a00..25c38c160b 100644 --- a/h2/src/main/org/h2/expression/ExpressionList.java +++ b/h2/src/main/org/h2/expression/ExpressionList.java @@ -1,67 +1,78 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.engine.Session; -import org.h2.table.Column; +import org.h2.engine.SessionLocal; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; -import org.h2.util.StatementBuilder; +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueArray; +import org.h2.value.ValueRow; /** * A list of expressions, as in (ID, NAME). - * The result of this expression is an array. + * The result of this expression is a row or an array. */ -public class ExpressionList extends Expression { +public final class ExpressionList extends Expression { private final Expression[] list; + private final boolean isArray; + private TypeInfo type; - public ExpressionList(Expression[] list) { + public ExpressionList(Expression[] list, boolean isArray) { this.list = list; + this.isArray = isArray; } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { Value[] v = new Value[list.length]; for (int i = 0; i < list.length; i++) { v[i] = list[i].getValue(session); } - return ValueArray.get(v); + return isArray ? ValueArray.get((TypeInfo) type.getExtTypeInfo(), v, session) : ValueRow.get(type, v); } @Override - public int getType() { - return Value.ARRAY; + public TypeInfo getType() { + return type; } @Override - public void mapColumns(ColumnResolver resolver, int level) { + public void mapColumns(ColumnResolver resolver, int level, int state) { for (Expression e : list) { - e.mapColumns(resolver, level); + e.mapColumns(resolver, level, state); } } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { boolean allConst = true; - for (int i = 0; i < list.length; i++) { + int count = list.length; + for (int i = 0; i < count; i++) { Expression e = list[i].optimize(session); if (!e.isConstant()) { allConst = false; } list[i] = e; } + initializeType(); if (allConst) { return ValueExpression.get(getValue(session)); } return this; } + void initializeType() { + type = isArray ? TypeInfo.getTypeInfo(Value.ARRAY, list.length, 0, TypeInfo.getHigherType(list)) + : TypeInfo.getTypeInfo(Value.ROW, 0, 0, new ExtTypeInfoRow(list)); + } + @Override public void setEvaluatable(TableFilter tableFilter, boolean b) { for (Expression e : list) { @@ -70,37 +81,16 @@ public void setEvaluatable(TableFilter tableFilter, boolean b) { } @Override - public int getScale() { - return 0; - } - - @Override - public long getPrecision() { - return Integer.MAX_VALUE; - } - - @Override - public int getDisplaySize() { - return Integer.MAX_VALUE; - } - - @Override - public String getSQL() { - StatementBuilder buff = new StatementBuilder("("); - for (Expression e: list) { - buff.appendExceptFirst(", "); - buff.append(e.getSQL()); - } - if (list.length == 1) { - buff.append(','); - } - return buff.append(')').toString(); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return isArray // + ? writeExpressions(builder.append("ARRAY ["), list, sqlFlags).append(']') + : writeExpressions(builder.append("ROW ("), list, sqlFlags).append(')'); } @Override - public void updateAggregate(Session session) { + public void updateAggregate(SessionLocal session, int stage) { for (Expression e : list) { - e.updateAggregate(session); + e.updateAggregate(session, stage); } } @@ -124,16 +114,27 @@ public int getCost() { } @Override - public Expression[] getExpressionColumns(Session session) { - ExpressionColumn[] expr = new ExpressionColumn[list.length]; - for (int i = 0; i < list.length; i++) { - Expression e = list[i]; - Column col = new Column("C" + (i + 1), - e.getType(), e.getPrecision(), e.getScale(), - e.getDisplaySize()); - expr[i] = new ExpressionColumn(session.getDatabase(), col); + public boolean isConstant() { + for (Expression e : list) { + if (!e.isConstant()) { + return false; + } } - return expr; + return true; + } + + @Override + public int getSubexpressionCount() { + return list.length; + } + + @Override + public Expression getSubexpression(int index) { + return list[index]; + } + + public boolean isArray() { + return isArray; } } diff --git a/h2/src/main/org/h2/expression/ExpressionVisitor.java b/h2/src/main/org/h2/expression/ExpressionVisitor.java index 6c452efe5e..7f2660fd7b 100644 --- a/h2/src/main/org/h2/expression/ExpressionVisitor.java +++ b/h2/src/main/org/h2/expression/ExpressionVisitor.java @@ -1,21 +1,23 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import java.util.HashSet; +import org.h2.command.query.AllColumnsForPlan; import org.h2.engine.DbObject; import org.h2.table.Column; import org.h2.table.ColumnResolver; import org.h2.table.Table; +import org.h2.table.TableFilter; /** * The visitor pattern is used to iterate through all expressions of a query * to optimize a statement. */ -public class ExpressionVisitor { +public final class ExpressionVisitor { /** * Is the value independent on unset parameters or on columns of a higher @@ -31,10 +33,10 @@ public class ExpressionVisitor { new ExpressionVisitor(INDEPENDENT); /** - * Are all aggregates MIN(column), MAX(column), or COUNT(*) for the given - * table (getTable)? + * Are all aggregates MIN(column), MAX(column), COUNT(*), MEDIAN(column), + * ENVELOPE(count) for the given table (getTable)? */ - public static final int OPTIMIZABLE_MIN_MAX_COUNT_ALL = 1; + public static final int OPTIMIZABLE_AGGREGATE = 1; /** * Does the expression return the same results for the same parameters? @@ -59,6 +61,37 @@ public class ExpressionVisitor { public static final ExpressionVisitor EVALUATABLE_VISITOR = new ExpressionVisitor(EVALUATABLE); + /** + * Count of cached INDEPENDENT and EVALUATABLE visitors with different query + * level. + */ + private static final int CACHED = 8; + + /** + * INDEPENDENT listeners with query level 0, 1, ... + */ + private static final ExpressionVisitor[] INDEPENDENT_VISITORS; + + /** + * EVALUATABLE listeners with query level 0, 1, ... + */ + private static final ExpressionVisitor[] EVALUATABLE_VISITORS; + + static { + ExpressionVisitor[] a = new ExpressionVisitor[CACHED]; + a[0] = INDEPENDENT_VISITOR; + for (int i = 1; i < CACHED; i++) { + a[i] = new ExpressionVisitor(INDEPENDENT, i); + } + INDEPENDENT_VISITORS = a; + a = new ExpressionVisitor[CACHED]; + a[0] = EVALUATABLE_VISITOR; + for (int i = 1; i < CACHED; i++) { + a[i] = new ExpressionVisitor(EVALUATABLE, i); + } + EVALUATABLE_VISITORS = a; + } + /** * Request to set the latest modification id (addDataModificationId). */ @@ -94,10 +127,20 @@ public class ExpressionVisitor { */ public static final int QUERY_COMPARABLE = 8; + /** + * Get all referenced columns for the optimiser. + */ + public static final int GET_COLUMNS1 = 9; + /** * Get all referenced columns. */ - public static final int GET_COLUMNS = 9; + public static final int GET_COLUMNS2 = 10; + + /** + * Decrement query level of all expression columns. + */ + public static final int DECREMENT_QUERY_LEVEL = 11; /** * The visitor singleton for the type QUERY_COMPARABLE. @@ -107,22 +150,21 @@ public class ExpressionVisitor { private final int type; private final int queryLevel; - private final HashSet dependencies; - private final HashSet columns; + private final HashSet set; + private final AllColumnsForPlan columns1; private final Table table; private final long[] maxDataModificationId; private final ColumnResolver resolver; private ExpressionVisitor(int type, int queryLevel, - HashSet dependencies, - HashSet columns, - Table table, ColumnResolver resolver, + HashSet set, + AllColumnsForPlan columns1, Table table, ColumnResolver resolver, long[] maxDataModificationId) { this.type = type; this.queryLevel = queryLevel; - this.dependencies = dependencies; - this.columns = columns; + this.set = set; + this.columns1 = columns1; this.table = table; this.resolver = resolver; this.maxDataModificationId = maxDataModificationId; @@ -131,8 +173,18 @@ private ExpressionVisitor(int type, private ExpressionVisitor(int type) { this.type = type; this.queryLevel = 0; - this.dependencies = null; - this.columns = null; + this.set = null; + this.columns1 = null; + this.table = null; + this.resolver = null; + this.maxDataModificationId = null; + } + + private ExpressionVisitor(int type, int queryLevel) { + this.type = type; + this.queryLevel = queryLevel; + this.set = null; + this.columns1 = null; this.table = null; this.resolver = null; this.maxDataModificationId = null; @@ -157,7 +209,7 @@ public static ExpressionVisitor getDependenciesVisitor( * @return the new visitor */ public static ExpressionVisitor getOptimizableVisitor(Table table) { - return new ExpressionVisitor(OPTIMIZABLE_MIN_MAX_COUNT_ALL, 0, null, + return new ExpressionVisitor(OPTIMIZABLE_AGGREGATE, 0, null, null, table, null, null); } @@ -168,7 +220,7 @@ public static ExpressionVisitor getOptimizableVisitor(Table table) { * @param resolver the resolver * @return the new visitor */ - static ExpressionVisitor getNotFromResolverVisitor(ColumnResolver resolver) { + public static ExpressionVisitor getNotFromResolverVisitor(ColumnResolver resolver) { return new ExpressionVisitor(NOT_FROM_RESOLVER, 0, null, null, null, resolver, null); } @@ -179,8 +231,19 @@ static ExpressionVisitor getNotFromResolverVisitor(ColumnResolver resolver) { * @param columns the columns map * @return the new visitor */ - public static ExpressionVisitor getColumnsVisitor(HashSet columns) { - return new ExpressionVisitor(GET_COLUMNS, 0, null, columns, null, null, null); + public static ExpressionVisitor getColumnsVisitor(AllColumnsForPlan columns) { + return new ExpressionVisitor(GET_COLUMNS1, 0, null, columns, null, null, null); + } + + /** + * Create a new visitor to get all referenced columns. + * + * @param columns the columns map + * @param table table to gather columns from, or {@code null} to gather all columns + * @return the new visitor + */ + public static ExpressionVisitor getColumnsVisitor(HashSet columns, Table table) { + return new ExpressionVisitor(GET_COLUMNS2, 0, columns, null, table, null, null); } public static ExpressionVisitor getMaxModificationIdVisitor() { @@ -188,14 +251,31 @@ public static ExpressionVisitor getMaxModificationIdVisitor() { null, null, null, new long[1]); } + /** + * Create a new visitor to decrement query level in columns with the + * specified resolvers. + * + * @param columnResolvers + * column resolvers + * @param queryDecrement + * 0 to check whether operation is allowed, 1 to actually perform + * the decrement + * @return the new visitor + */ + public static ExpressionVisitor getDecrementQueryLevelVisitor(HashSet columnResolvers, + int queryDecrement) { + return new ExpressionVisitor(DECREMENT_QUERY_LEVEL, queryDecrement, columnResolvers, null, null, null, null); + } + /** * Add a new dependency to the set of dependencies. * This is used for GET_DEPENDENCIES visitors. * * @param obj the additional dependency. */ + @SuppressWarnings("unchecked") public void addDependency(DbObject obj) { - dependencies.add(obj); + ((HashSet) set).add(obj); } /** @@ -204,8 +284,21 @@ public void addDependency(DbObject obj) { * * @param column the additional column. */ - void addColumn(Column column) { - columns.add(column); + void addColumn1(Column column) { + columns1.add(column); + } + + /** + * Add a new column to the set of columns. + * This is used for GET_COLUMNS2 visitors. + * + * @param column the additional column. + */ + @SuppressWarnings("unchecked") + void addColumn2(Column column) { + if (table == null || table == column.getTable()) { + ((HashSet) set).add(column); + } } /** @@ -214,19 +307,27 @@ void addColumn(Column column) { * * @return the set */ + @SuppressWarnings("unchecked") public HashSet getDependencies() { - return dependencies; + return (HashSet) set; } /** * Increment or decrement the query level. * * @param offset 1 to increment, -1 to decrement - * @return a clone of this expression visitor, with the changed query level + * @return this visitor or its clone with the changed query level */ public ExpressionVisitor incrementQueryLevel(int offset) { - return new ExpressionVisitor(type, queryLevel + offset, dependencies, - columns, table, resolver, maxDataModificationId); + if (type == INDEPENDENT) { + offset += queryLevel; + return offset < CACHED ? INDEPENDENT_VISITORS[offset] : new ExpressionVisitor(INDEPENDENT, offset); + } else if (type == EVALUATABLE) { + offset += queryLevel; + return offset < CACHED ? EVALUATABLE_VISITORS[offset] : new ExpressionVisitor(EVALUATABLE, offset); + } else { + return this; + } } /** @@ -239,6 +340,17 @@ public ColumnResolver getResolver() { return resolver; } + /** + * Get the set of column resolvers. + * This is used for {@link #DECREMENT_QUERY_LEVEL} visitors. + * + * @return the set + */ + @SuppressWarnings("unchecked") + public HashSet getColumnResolvers() { + return (HashSet) set; + } + /** * Update the field maxDataModificationId if this value is higher * than the current value. @@ -264,6 +376,7 @@ public long getMaxDataModificationId() { } int getQueryLevel() { + assert type == INDEPENDENT || type == EVALUATABLE || type == DECREMENT_QUERY_LEVEL; return queryLevel; } @@ -286,4 +399,18 @@ public int getType() { return type; } + /** + * Get the set of columns of all tables. + * + * @param filters the filters + * @param allColumnsSet the on-demand all-columns set + */ + public static void allColumnsForTableFilters(TableFilter[] filters, AllColumnsForPlan allColumnsSet) { + for (TableFilter filter : filters) { + if (filter.getSelect() != null) { + filter.getSelect().isEverything(ExpressionVisitor.getColumnsVisitor(allColumnsSet)); + } + } + } + } diff --git a/h2/src/main/org/h2/expression/ExpressionWithFlags.java b/h2/src/main/org/h2/expression/ExpressionWithFlags.java new file mode 100644 index 0000000000..6100d5d550 --- /dev/null +++ b/h2/src/main/org/h2/expression/ExpressionWithFlags.java @@ -0,0 +1,28 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +/** + * Expression with flags. + */ +public interface ExpressionWithFlags { + + /** + * Set the flags for this expression. + * + * @param flags + * the flags to set + */ + void setFlags(int flags); + + /** + * Returns the flags. + * + * @return the flags + */ + int getFlags(); + +} diff --git a/h2/src/main/org/h2/expression/ExpressionWithVariableParameters.java b/h2/src/main/org/h2/expression/ExpressionWithVariableParameters.java new file mode 100644 index 0000000000..a7c0d54e02 --- /dev/null +++ b/h2/src/main/org/h2/expression/ExpressionWithVariableParameters.java @@ -0,0 +1,33 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.message.DbException; + +/** + * An expression with variable number of parameters. + */ +public interface ExpressionWithVariableParameters { + + /** + * Adds the parameter expression. + * + * @param param + * the expression + */ + void addParameter(Expression param); + + /** + * This method must be called after all the parameters have been set. It + * checks if the parameter count is correct when required by the + * implementation. + * + * @throws DbException + * if the parameter count is incorrect. + */ + void doneWithParameters() throws DbException; + +} diff --git a/h2/src/main/org/h2/expression/FieldReference.java b/h2/src/main/org/h2/expression/FieldReference.java new file mode 100644 index 0000000000..248b937a55 --- /dev/null +++ b/h2/src/main/org/h2/expression/FieldReference.java @@ -0,0 +1,71 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import java.util.Map.Entry; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.util.ParserUtil; +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueRow; + +/** + * Field reference. + */ +public final class FieldReference extends Operation1 { + + private final String fieldName; + + private int ordinal; + + public FieldReference(Expression arg, String fieldName) { + super(arg); + this.fieldName = fieldName; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return ParserUtil.quoteIdentifier(arg.getEnclosedSQL(builder, sqlFlags).append('.'), fieldName, sqlFlags); + } + + @Override + public Value getValue(SessionLocal session) { + Value l = arg.getValue(session); + if (l != ValueNull.INSTANCE) { + return ((ValueRow) l).getList()[ordinal]; + } + return ValueNull.INSTANCE; + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + TypeInfo type = arg.getType(); + if (type.getValueType() != Value.ROW) { + throw DbException.getInvalidExpressionTypeException("ROW", arg); + } + int ordinal = 0; + for (Entry entry : ((ExtTypeInfoRow) type.getExtTypeInfo()).getFields()) { + if (fieldName.equals(entry.getKey())) { + type = entry.getValue(); + this.type = type; + this.ordinal = ordinal; + if (arg.isConstant()) { + return TypedValueExpression.get(getValue(session), type); + } + return this; + } + ordinal++; + } + throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, fieldName); + } + +} diff --git a/h2/src/main/org/h2/expression/Format.java b/h2/src/main/org/h2/expression/Format.java new file mode 100644 index 0000000000..6ba27eadd5 --- /dev/null +++ b/h2/src/main/org/h2/expression/Format.java @@ -0,0 +1,99 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueJson; + +/** + * A format clause such as FORMAT JSON. + */ +public final class Format extends Operation1 { + + /** + * Supported formats. + */ + public enum FormatEnum { + /** + * JSON. + */ + JSON; + } + + private final FormatEnum format; + + public Format(Expression arg, FormatEnum format) { + super(arg); + this.format = format; + } + + @Override + public Value getValue(SessionLocal session) { + return getValue(arg.getValue(session)); + } + + /** + * Returns the value with applied format. + * + * @param value + * the value + * @return the value with applied format + */ + public Value getValue(Value value) { + switch (value.getValueType()) { + case Value.NULL: + return ValueJson.NULL; + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.CHAR: + case Value.CLOB: + return ValueJson.fromJson(value.getString()); + default: + return value.convertTo(TypeInfo.TYPE_JSON); + } + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + if (arg.isConstant()) { + return ValueExpression.get(getValue(session)); + } + if (arg instanceof Format && format == ((Format) arg).format) { + return arg; + } + type = TypeInfo.TYPE_JSON; + return this; + } + + @Override + public boolean isIdentity() { + return arg.isIdentity(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return arg.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(" FORMAT ").append(format.name()); + } + + @Override + public int getNullable() { + return arg.getNullable(); + } + + @Override + public String getTableName() { + return arg.getTableName(); + } + + @Override + public String getColumnName(SessionLocal session, int columnIndex) { + return arg.getColumnName(session, columnIndex); + } + +} diff --git a/h2/src/main/org/h2/expression/Function.java b/h2/src/main/org/h2/expression/Function.java deleted file mode 100644 index 699833c456..0000000000 --- a/h2/src/main/org/h2/expression/Function.java +++ /dev/null @@ -1,2638 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import static org.h2.util.ToChar.toChar; - -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.Reader; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.HashMap; -import java.util.Locale; -import java.util.TimeZone; -import java.util.regex.PatternSyntaxException; - -import org.h2.api.ErrorCode; -import org.h2.command.Command; -import org.h2.command.Parser; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.engine.Mode; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.mvstore.DataUtils; -import org.h2.schema.Schema; -import org.h2.schema.Sequence; -import org.h2.security.BlockCipher; -import org.h2.security.CipherFactory; -import org.h2.security.SHA256; -import org.h2.store.fs.FileUtils; -import org.h2.table.Column; -import org.h2.table.ColumnResolver; -import org.h2.table.LinkSchema; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.tools.CompressTool; -import org.h2.tools.Csv; -import org.h2.util.AutoCloseInputStream; -import org.h2.util.DateTimeUtils; -import org.h2.util.JdbcUtils; -import org.h2.util.MathUtils; -import org.h2.util.New; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; -import org.h2.util.Utils; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDate; -import org.h2.value.ValueDouble; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; -import org.h2.value.ValueString; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; -import org.h2.value.ValueUuid; - -/** - * This class implements most built-in functions of this database. - */ -public class Function extends Expression implements FunctionCall { - public static final int ABS = 0, ACOS = 1, ASIN = 2, ATAN = 3, ATAN2 = 4, - BITAND = 5, BITOR = 6, BITXOR = 7, CEILING = 8, COS = 9, COT = 10, - DEGREES = 11, EXP = 12, FLOOR = 13, LOG = 14, LOG10 = 15, MOD = 16, - PI = 17, POWER = 18, RADIANS = 19, RAND = 20, ROUND = 21, - ROUNDMAGIC = 22, SIGN = 23, SIN = 24, SQRT = 25, TAN = 26, - TRUNCATE = 27, SECURE_RAND = 28, HASH = 29, ENCRYPT = 30, - DECRYPT = 31, COMPRESS = 32, EXPAND = 33, ZERO = 34, - RANDOM_UUID = 35, COSH = 36, SINH = 37, TANH = 38, LN = 39; - - public static final int ASCII = 50, BIT_LENGTH = 51, CHAR = 52, - CHAR_LENGTH = 53, CONCAT = 54, DIFFERENCE = 55, HEXTORAW = 56, - INSERT = 57, INSTR = 58, LCASE = 59, LEFT = 60, LENGTH = 61, - LOCATE = 62, LTRIM = 63, OCTET_LENGTH = 64, RAWTOHEX = 65, - REPEAT = 66, REPLACE = 67, RIGHT = 68, RTRIM = 69, SOUNDEX = 70, - SPACE = 71, SUBSTR = 72, SUBSTRING = 73, UCASE = 74, LOWER = 75, - UPPER = 76, POSITION = 77, TRIM = 78, STRINGENCODE = 79, - STRINGDECODE = 80, STRINGTOUTF8 = 81, UTF8TOSTRING = 82, - XMLATTR = 83, XMLNODE = 84, XMLCOMMENT = 85, XMLCDATA = 86, - XMLSTARTDOC = 87, XMLTEXT = 88, REGEXP_REPLACE = 89, RPAD = 90, - LPAD = 91, CONCAT_WS = 92, TO_CHAR = 93, TRANSLATE = 94; - - public static final int CURDATE = 100, CURTIME = 101, DATE_ADD = 102, - DATE_DIFF = 103, DAY_NAME = 104, DAY_OF_MONTH = 105, - DAY_OF_WEEK = 106, DAY_OF_YEAR = 107, HOUR = 108, MINUTE = 109, - MONTH = 110, MONTH_NAME = 111, NOW = 112, QUARTER = 113, - SECOND = 114, WEEK = 115, YEAR = 116, CURRENT_DATE = 117, - CURRENT_TIME = 118, CURRENT_TIMESTAMP = 119, EXTRACT = 120, - FORMATDATETIME = 121, PARSEDATETIME = 122, ISO_YEAR = 123, - ISO_WEEK = 124, ISO_DAY_OF_WEEK = 125; - - public static final int DATABASE = 150, USER = 151, CURRENT_USER = 152, - IDENTITY = 153, SCOPE_IDENTITY = 154, AUTOCOMMIT = 155, - READONLY = 156, DATABASE_PATH = 157, LOCK_TIMEOUT = 158, - DISK_SPACE_USED = 159; - - public static final int IFNULL = 200, CASEWHEN = 201, CONVERT = 202, - CAST = 203, COALESCE = 204, NULLIF = 205, CASE = 206, - NEXTVAL = 207, CURRVAL = 208, ARRAY_GET = 209, CSVREAD = 210, - CSVWRITE = 211, MEMORY_FREE = 212, MEMORY_USED = 213, - LOCK_MODE = 214, SCHEMA = 215, SESSION_ID = 216, - ARRAY_LENGTH = 217, LINK_SCHEMA = 218, GREATEST = 219, LEAST = 220, - CANCEL_SESSION = 221, SET = 222, TABLE = 223, TABLE_DISTINCT = 224, - FILE_READ = 225, TRANSACTION_ID = 226, TRUNCATE_VALUE = 227, - NVL2 = 228, DECODE = 229, ARRAY_CONTAINS = 230; - - /** - * Used in MySQL-style INSERT ... ON DUPLICATE KEY UPDATE ... VALUES - */ - public static final int VALUES = 250; - - /** - * This is called H2VERSION() and not VERSION(), because we return a fake - * value for VERSION() when running under the PostgreSQL ODBC driver. - */ - public static final int H2VERSION = 231; - - public static final int ROW_NUMBER = 300; - - private static final int VAR_ARGS = -1; - private static final long PRECISION_UNKNOWN = -1; - - private static final HashMap FUNCTIONS = New.hashMap(); - private static final HashMap DATE_PART = New.hashMap(); - private static final char[] SOUNDEX_INDEX = new char[128]; - - protected Expression[] args; - - private final FunctionInfo info; - private ArrayList varArgs; - private int dataType, scale; - private long precision = PRECISION_UNKNOWN; - private int displaySize; - private final Database database; - - static { - // DATE_PART - DATE_PART.put("SQL_TSI_YEAR", Calendar.YEAR); - DATE_PART.put("YEAR", Calendar.YEAR); - DATE_PART.put("YYYY", Calendar.YEAR); - DATE_PART.put("YY", Calendar.YEAR); - DATE_PART.put("SQL_TSI_MONTH", Calendar.MONTH); - DATE_PART.put("MONTH", Calendar.MONTH); - DATE_PART.put("MM", Calendar.MONTH); - DATE_PART.put("M", Calendar.MONTH); - DATE_PART.put("SQL_TSI_WEEK", Calendar.WEEK_OF_YEAR); - DATE_PART.put("WW", Calendar.WEEK_OF_YEAR); - DATE_PART.put("WK", Calendar.WEEK_OF_YEAR); - DATE_PART.put("WEEK", Calendar.WEEK_OF_YEAR); - DATE_PART.put("DAY", Calendar.DAY_OF_MONTH); - DATE_PART.put("DD", Calendar.DAY_OF_MONTH); - DATE_PART.put("D", Calendar.DAY_OF_MONTH); - DATE_PART.put("SQL_TSI_DAY", Calendar.DAY_OF_MONTH); - DATE_PART.put("DAYOFYEAR", Calendar.DAY_OF_YEAR); - DATE_PART.put("DAY_OF_YEAR", Calendar.DAY_OF_YEAR); - DATE_PART.put("DY", Calendar.DAY_OF_YEAR); - DATE_PART.put("DOY", Calendar.DAY_OF_YEAR); - DATE_PART.put("SQL_TSI_HOUR", Calendar.HOUR_OF_DAY); - DATE_PART.put("HOUR", Calendar.HOUR_OF_DAY); - DATE_PART.put("HH", Calendar.HOUR_OF_DAY); - DATE_PART.put("SQL_TSI_MINUTE", Calendar.MINUTE); - DATE_PART.put("MINUTE", Calendar.MINUTE); - DATE_PART.put("MI", Calendar.MINUTE); - DATE_PART.put("N", Calendar.MINUTE); - DATE_PART.put("SQL_TSI_SECOND", Calendar.SECOND); - DATE_PART.put("SECOND", Calendar.SECOND); - DATE_PART.put("SS", Calendar.SECOND); - DATE_PART.put("S", Calendar.SECOND); - DATE_PART.put("MILLISECOND", Calendar.MILLISECOND); - DATE_PART.put("MS", Calendar.MILLISECOND); - - // SOUNDEX_INDEX - String index = "7AEIOUY8HW1BFPV2CGJKQSXZ3DT4L5MN6R"; - char number = 0; - for (int i = 0, length = index.length(); i < length; i++) { - char c = index.charAt(i); - if (c < '9') { - number = c; - } else { - SOUNDEX_INDEX[c] = number; - SOUNDEX_INDEX[Character.toLowerCase(c)] = number; - } - } - - // FUNCTIONS - addFunction("ABS", ABS, 1, Value.NULL); - addFunction("ACOS", ACOS, 1, Value.DOUBLE); - addFunction("ASIN", ASIN, 1, Value.DOUBLE); - addFunction("ATAN", ATAN, 1, Value.DOUBLE); - addFunction("ATAN2", ATAN2, 2, Value.DOUBLE); - addFunction("BITAND", BITAND, 2, Value.LONG); - addFunction("BITOR", BITOR, 2, Value.LONG); - addFunction("BITXOR", BITXOR, 2, Value.LONG); - addFunction("CEILING", CEILING, 1, Value.DOUBLE); - addFunction("CEIL", CEILING, 1, Value.DOUBLE); - addFunction("COS", COS, 1, Value.DOUBLE); - addFunction("COSH", COSH, 1, Value.DOUBLE); - addFunction("COT", COT, 1, Value.DOUBLE); - addFunction("DEGREES", DEGREES, 1, Value.DOUBLE); - addFunction("EXP", EXP, 1, Value.DOUBLE); - addFunction("FLOOR", FLOOR, 1, Value.DOUBLE); - addFunction("LOG", LOG, 1, Value.DOUBLE); - addFunction("LN", LN, 1, Value.DOUBLE); - addFunction("LOG10", LOG10, 1, Value.DOUBLE); - addFunction("MOD", MOD, 2, Value.LONG); - addFunction("PI", PI, 0, Value.DOUBLE); - addFunction("POWER", POWER, 2, Value.DOUBLE); - addFunction("RADIANS", RADIANS, 1, Value.DOUBLE); - // RAND without argument: get the next value - // RAND with one argument: seed the random generator - addFunctionNotDeterministic("RAND", RAND, VAR_ARGS, Value.DOUBLE); - addFunctionNotDeterministic("RANDOM", RAND, VAR_ARGS, Value.DOUBLE); - addFunction("ROUND", ROUND, VAR_ARGS, Value.DOUBLE); - addFunction("ROUNDMAGIC", ROUNDMAGIC, 1, Value.DOUBLE); - addFunction("SIGN", SIGN, 1, Value.INT); - addFunction("SIN", SIN, 1, Value.DOUBLE); - addFunction("SINH", SINH, 1, Value.DOUBLE); - addFunction("SQRT", SQRT, 1, Value.DOUBLE); - addFunction("TAN", TAN, 1, Value.DOUBLE); - addFunction("TANH", TANH, 1, Value.DOUBLE); - addFunction("TRUNCATE", TRUNCATE, VAR_ARGS, Value.NULL); - // same as TRUNCATE - addFunction("TRUNC", TRUNCATE, VAR_ARGS, Value.NULL); - addFunction("HASH", HASH, 3, Value.BYTES); - addFunction("ENCRYPT", ENCRYPT, 3, Value.BYTES); - addFunction("DECRYPT", DECRYPT, 3, Value.BYTES); - addFunctionNotDeterministic("SECURE_RAND", SECURE_RAND, 1, Value.BYTES); - addFunction("COMPRESS", COMPRESS, VAR_ARGS, Value.BYTES); - addFunction("EXPAND", EXPAND, 1, Value.BYTES); - addFunction("ZERO", ZERO, 0, Value.INT); - addFunctionNotDeterministic("RANDOM_UUID", RANDOM_UUID, 0, Value.UUID); - addFunctionNotDeterministic("SYS_GUID", RANDOM_UUID, 0, Value.UUID); - // string - addFunction("ASCII", ASCII, 1, Value.INT); - addFunction("BIT_LENGTH", BIT_LENGTH, 1, Value.LONG); - addFunction("CHAR", CHAR, 1, Value.STRING); - addFunction("CHR", CHAR, 1, Value.STRING); - addFunction("CHAR_LENGTH", CHAR_LENGTH, 1, Value.INT); - // same as CHAR_LENGTH - addFunction("CHARACTER_LENGTH", CHAR_LENGTH, 1, Value.INT); - addFunctionWithNull("CONCAT", CONCAT, VAR_ARGS, Value.STRING); - addFunctionWithNull("CONCAT_WS", CONCAT_WS, VAR_ARGS, Value.STRING); - addFunction("DIFFERENCE", DIFFERENCE, 2, Value.INT); - addFunction("HEXTORAW", HEXTORAW, 1, Value.STRING); - addFunctionWithNull("INSERT", INSERT, 4, Value.STRING); - addFunction("LCASE", LCASE, 1, Value.STRING); - addFunction("LEFT", LEFT, 2, Value.STRING); - addFunction("LENGTH", LENGTH, 1, Value.LONG); - // 2 or 3 arguments - addFunction("LOCATE", LOCATE, VAR_ARGS, Value.INT); - // alias for MSSQLServer - addFunction("CHARINDEX", LOCATE, VAR_ARGS, Value.INT); - // same as LOCATE with 2 arguments - addFunction("POSITION", LOCATE, 2, Value.INT); - addFunction("INSTR", INSTR, VAR_ARGS, Value.INT); - addFunction("LTRIM", LTRIM, VAR_ARGS, Value.STRING); - addFunction("OCTET_LENGTH", OCTET_LENGTH, 1, Value.LONG); - addFunction("RAWTOHEX", RAWTOHEX, 1, Value.STRING); - addFunction("REPEAT", REPEAT, 2, Value.STRING); - addFunction("REPLACE", REPLACE, VAR_ARGS, Value.STRING); - addFunction("RIGHT", RIGHT, 2, Value.STRING); - addFunction("RTRIM", RTRIM, VAR_ARGS, Value.STRING); - addFunction("SOUNDEX", SOUNDEX, 1, Value.STRING); - addFunction("SPACE", SPACE, 1, Value.STRING); - addFunction("SUBSTR", SUBSTR, VAR_ARGS, Value.STRING); - addFunction("SUBSTRING", SUBSTRING, VAR_ARGS, Value.STRING); - addFunction("UCASE", UCASE, 1, Value.STRING); - addFunction("LOWER", LOWER, 1, Value.STRING); - addFunction("UPPER", UPPER, 1, Value.STRING); - addFunction("POSITION", POSITION, 2, Value.INT); - addFunction("TRIM", TRIM, VAR_ARGS, Value.STRING); - addFunction("STRINGENCODE", STRINGENCODE, 1, Value.STRING); - addFunction("STRINGDECODE", STRINGDECODE, 1, Value.STRING); - addFunction("STRINGTOUTF8", STRINGTOUTF8, 1, Value.BYTES); - addFunction("UTF8TOSTRING", UTF8TOSTRING, 1, Value.STRING); - addFunction("XMLATTR", XMLATTR, 2, Value.STRING); - addFunctionWithNull("XMLNODE", XMLNODE, VAR_ARGS, Value.STRING); - addFunction("XMLCOMMENT", XMLCOMMENT, 1, Value.STRING); - addFunction("XMLCDATA", XMLCDATA, 1, Value.STRING); - addFunction("XMLSTARTDOC", XMLSTARTDOC, 0, Value.STRING); - addFunction("XMLTEXT", XMLTEXT, VAR_ARGS, Value.STRING); - addFunction("REGEXP_REPLACE", REGEXP_REPLACE, 3, Value.STRING); - addFunction("RPAD", RPAD, VAR_ARGS, Value.STRING); - addFunction("LPAD", LPAD, VAR_ARGS, Value.STRING); - addFunction("TO_CHAR", TO_CHAR, VAR_ARGS, Value.STRING); - addFunction("TRANSLATE", TRANSLATE, 3, Value.STRING); - - // date - addFunctionNotDeterministic("CURRENT_DATE", CURRENT_DATE, - 0, Value.DATE); - addFunctionNotDeterministic("CURDATE", CURDATE, - 0, Value.DATE); - // alias for MSSQLServer - addFunctionNotDeterministic("GETDATE", CURDATE, - 0, Value.DATE); - addFunctionNotDeterministic("CURRENT_TIME", CURRENT_TIME, - 0, Value.TIME); - addFunctionNotDeterministic("CURTIME", CURTIME, - 0, Value.TIME); - addFunctionNotDeterministic("CURRENT_TIMESTAMP", CURRENT_TIMESTAMP, - VAR_ARGS, Value.TIMESTAMP); - addFunctionNotDeterministic("NOW", NOW, - VAR_ARGS, Value.TIMESTAMP); - addFunction("DATEADD", DATE_ADD, - 3, Value.TIMESTAMP); - addFunction("TIMESTAMPADD", DATE_ADD, - 3, Value.LONG); - addFunction("DATEDIFF", DATE_DIFF, - 3, Value.LONG); - addFunction("TIMESTAMPDIFF", DATE_DIFF, - 3, Value.LONG); - addFunction("DAYNAME", DAY_NAME, - 1, Value.STRING); - addFunction("DAYNAME", DAY_NAME, - 1, Value.STRING); - addFunction("DAY", DAY_OF_MONTH, - 1, Value.INT); - addFunction("DAY_OF_MONTH", DAY_OF_MONTH, - 1, Value.INT); - addFunction("DAY_OF_WEEK", DAY_OF_WEEK, - 1, Value.INT); - addFunction("DAY_OF_YEAR", DAY_OF_YEAR, - 1, Value.INT); - addFunction("DAYOFMONTH", DAY_OF_MONTH, - 1, Value.INT); - addFunction("DAYOFWEEK", DAY_OF_WEEK, - 1, Value.INT); - addFunction("DAYOFYEAR", DAY_OF_YEAR, - 1, Value.INT); - addFunction("HOUR", HOUR, - 1, Value.INT); - addFunction("MINUTE", MINUTE, - 1, Value.INT); - addFunction("MONTH", MONTH, - 1, Value.INT); - addFunction("MONTHNAME", MONTH_NAME, - 1, Value.STRING); - addFunction("QUARTER", QUARTER, - 1, Value.INT); - addFunction("SECOND", SECOND, - 1, Value.INT); - addFunction("WEEK", WEEK, - 1, Value.INT); - addFunction("YEAR", YEAR, - 1, Value.INT); - addFunction("EXTRACT", EXTRACT, - 2, Value.INT); - addFunctionWithNull("FORMATDATETIME", FORMATDATETIME, - VAR_ARGS, Value.STRING); - addFunctionWithNull("PARSEDATETIME", PARSEDATETIME, - VAR_ARGS, Value.TIMESTAMP); - addFunction("ISO_YEAR", ISO_YEAR, - 1, Value.INT); - addFunction("ISO_WEEK", ISO_WEEK, - 1, Value.INT); - addFunction("ISO_DAY_OF_WEEK", ISO_DAY_OF_WEEK, - 1, Value.INT); - // system - addFunctionNotDeterministic("DATABASE", DATABASE, - 0, Value.STRING); - addFunctionNotDeterministic("USER", USER, - 0, Value.STRING); - addFunctionNotDeterministic("CURRENT_USER", CURRENT_USER, - 0, Value.STRING); - addFunctionNotDeterministic("IDENTITY", IDENTITY, - 0, Value.LONG); - addFunctionNotDeterministic("SCOPE_IDENTITY", SCOPE_IDENTITY, - 0, Value.LONG); - addFunctionNotDeterministic("IDENTITY_VAL_LOCAL", IDENTITY, - 0, Value.LONG); - addFunctionNotDeterministic("LAST_INSERT_ID", IDENTITY, - 0, Value.LONG); - addFunctionNotDeterministic("LASTVAL", IDENTITY, - 0, Value.LONG); - addFunctionNotDeterministic("AUTOCOMMIT", AUTOCOMMIT, - 0, Value.BOOLEAN); - addFunctionNotDeterministic("READONLY", READONLY, - 0, Value.BOOLEAN); - addFunction("DATABASE_PATH", DATABASE_PATH, - 0, Value.STRING); - addFunctionNotDeterministic("LOCK_TIMEOUT", LOCK_TIMEOUT, - 0, Value.INT); - addFunctionWithNull("IFNULL", IFNULL, - 2, Value.NULL); - addFunctionWithNull("ISNULL", IFNULL, - 2, Value.NULL); - addFunctionWithNull("CASEWHEN", CASEWHEN, - 3, Value.NULL); - addFunctionWithNull("CONVERT", CONVERT, - 1, Value.NULL); - addFunctionWithNull("CAST", CAST, - 1, Value.NULL); - addFunctionWithNull("TRUNCATE_VALUE", TRUNCATE_VALUE, - 3, Value.NULL); - addFunctionWithNull("COALESCE", COALESCE, - VAR_ARGS, Value.NULL); - addFunctionWithNull("NVL", COALESCE, - VAR_ARGS, Value.NULL); - addFunctionWithNull("NVL2", NVL2, - 3, Value.NULL); - addFunctionWithNull("NULLIF", NULLIF, - 2, Value.NULL); - addFunctionWithNull("CASE", CASE, - VAR_ARGS, Value.NULL); - addFunctionNotDeterministic("NEXTVAL", NEXTVAL, - VAR_ARGS, Value.LONG); - addFunctionNotDeterministic("CURRVAL", CURRVAL, - VAR_ARGS, Value.LONG); - addFunction("ARRAY_GET", ARRAY_GET, - 2, Value.STRING); - addFunction("ARRAY_CONTAINS", ARRAY_CONTAINS, - 2, Value.BOOLEAN, false, true, true); - addFunction("CSVREAD", CSVREAD, - VAR_ARGS, Value.RESULT_SET, false, false, false); - addFunction("CSVWRITE", CSVWRITE, - VAR_ARGS, Value.INT, false, false, true); - addFunctionNotDeterministic("MEMORY_FREE", MEMORY_FREE, - 0, Value.INT); - addFunctionNotDeterministic("MEMORY_USED", MEMORY_USED, - 0, Value.INT); - addFunctionNotDeterministic("LOCK_MODE", LOCK_MODE, - 0, Value.INT); - addFunctionNotDeterministic("SCHEMA", SCHEMA, - 0, Value.STRING); - addFunctionNotDeterministic("SESSION_ID", SESSION_ID, - 0, Value.INT); - addFunction("ARRAY_LENGTH", ARRAY_LENGTH, - 1, Value.INT); - addFunctionNotDeterministic("LINK_SCHEMA", LINK_SCHEMA, - 6, Value.RESULT_SET); - addFunctionWithNull("LEAST", LEAST, - VAR_ARGS, Value.NULL); - addFunctionWithNull("GREATEST", GREATEST, - VAR_ARGS, Value.NULL); - addFunctionNotDeterministic("CANCEL_SESSION", CANCEL_SESSION, - 1, Value.BOOLEAN); - addFunction("SET", SET, - 2, Value.NULL, false, false, true); - addFunction("FILE_READ", FILE_READ, - VAR_ARGS, Value.NULL, false, false, true); - addFunctionNotDeterministic("TRANSACTION_ID", TRANSACTION_ID, - 0, Value.STRING); - addFunctionWithNull("DECODE", DECODE, - VAR_ARGS, Value.NULL); - addFunctionNotDeterministic("DISK_SPACE_USED", DISK_SPACE_USED, - 1, Value.LONG); - addFunction("H2VERSION", H2VERSION, 0, Value.STRING); - - // TableFunction - addFunctionWithNull("TABLE", TABLE, - VAR_ARGS, Value.RESULT_SET); - addFunctionWithNull("TABLE_DISTINCT", TABLE_DISTINCT, - VAR_ARGS, Value.RESULT_SET); - - // pseudo function - addFunctionWithNull("ROW_NUMBER", ROW_NUMBER, 0, Value.LONG); - - // ON DUPLICATE KEY VALUES function - addFunction("VALUES", VALUES, 1, Value.NULL, false, true, false); - } - - protected Function(Database database, FunctionInfo info) { - this.database = database; - this.info = info; - if (info.parameterCount == VAR_ARGS) { - varArgs = New.arrayList(); - } else { - args = new Expression[info.parameterCount]; - } - } - - private static void addFunction(String name, int type, int parameterCount, - int dataType, boolean nullIfParameterIsNull, boolean deterministic, - boolean bufferResultSetToLocalTemp) { - FunctionInfo info = new FunctionInfo(); - info.name = name; - info.type = type; - info.parameterCount = parameterCount; - info.dataType = dataType; - info.nullIfParameterIsNull = nullIfParameterIsNull; - info.deterministic = deterministic; - info.bufferResultSetToLocalTemp = bufferResultSetToLocalTemp; - FUNCTIONS.put(name, info); - } - - private static void addFunctionNotDeterministic(String name, int type, - int parameterCount, int dataType) { - addFunction(name, type, parameterCount, dataType, true, false, true); - } - - private static void addFunction(String name, int type, int parameterCount, - int dataType) { - addFunction(name, type, parameterCount, dataType, true, true, true); - } - - private static void addFunctionWithNull(String name, int type, - int parameterCount, int dataType) { - addFunction(name, type, parameterCount, dataType, false, true, true); - } - - /** - * Get the function info object for this function, or null if there is no - * such function. - * - * @param name the function name - * @return the function info - */ - private static FunctionInfo getFunctionInfo(String name) { - return FUNCTIONS.get(name); - } - - /** - * Get an instance of the given function for this database. - * If no function with this name is found, null is returned. - * - * @param database the database - * @param name the function name - * @return the function object or null - */ - public static Function getFunction(Database database, String name) { - if (!database.getSettings().databaseToUpper) { - // if not yet converted to uppercase, do it now - name = StringUtils.toUpperEnglish(name); - } - FunctionInfo info = getFunctionInfo(name); - if (info == null) { - return null; - } - switch(info.type) { - case TABLE: - case TABLE_DISTINCT: - return new TableFunction(database, info, Long.MAX_VALUE); - default: - return new Function(database, info); - } - } - - /** - * Set the parameter expression at the given index. - * - * @param index the index (0, 1,...) - * @param param the expression - */ - public void setParameter(int index, Expression param) { - if (varArgs != null) { - varArgs.add(param); - } else { - if (index >= args.length) { - throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, - info.name, "" + args.length); - } - args[index] = param; - } - } - - private static strictfp double log10(double value) { - return roundMagic(StrictMath.log(value) / StrictMath.log(10)); - } - - @Override - public Value getValue(Session session) { - return getValueWithArgs(session, args); - } - - private Value getSimpleValue(Session session, Value v0, Expression[] args, - Value[] values) { - Value result; - switch (info.type) { - case ABS: - result = v0.getSignum() > 0 ? v0 : v0.negate(); - break; - case ACOS: - result = ValueDouble.get(Math.acos(v0.getDouble())); - break; - case ASIN: - result = ValueDouble.get(Math.asin(v0.getDouble())); - break; - case ATAN: - result = ValueDouble.get(Math.atan(v0.getDouble())); - break; - case CEILING: - result = ValueDouble.get(Math.ceil(v0.getDouble())); - break; - case COS: - result = ValueDouble.get(Math.cos(v0.getDouble())); - break; - case COSH: - result = ValueDouble.get(Math.cosh(v0.getDouble())); - break; - case COT: { - double d = Math.tan(v0.getDouble()); - if (d == 0.0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); - } - result = ValueDouble.get(1. / d); - break; - } - case DEGREES: - result = ValueDouble.get(Math.toDegrees(v0.getDouble())); - break; - case EXP: - result = ValueDouble.get(Math.exp(v0.getDouble())); - break; - case FLOOR: - result = ValueDouble.get(Math.floor(v0.getDouble())); - break; - case LN: - result = ValueDouble.get(Math.log(v0.getDouble())); - break; - case LOG: - if (database.getMode().logIsLogBase10) { - result = ValueDouble.get(Math.log10(v0.getDouble())); - } else { - result = ValueDouble.get(Math.log(v0.getDouble())); - } - break; - case LOG10: - result = ValueDouble.get(log10(v0.getDouble())); - break; - case PI: - result = ValueDouble.get(Math.PI); - break; - case RADIANS: - result = ValueDouble.get(Math.toRadians(v0.getDouble())); - break; - case RAND: { - if (v0 != null) { - session.getRandom().setSeed(v0.getInt()); - } - result = ValueDouble.get(session.getRandom().nextDouble()); - break; - } - case ROUNDMAGIC: - result = ValueDouble.get(roundMagic(v0.getDouble())); - break; - case SIGN: - result = ValueInt.get(v0.getSignum()); - break; - case SIN: - result = ValueDouble.get(Math.sin(v0.getDouble())); - break; - case SINH: - result = ValueDouble.get(Math.sinh(v0.getDouble())); - break; - case SQRT: - result = ValueDouble.get(Math.sqrt(v0.getDouble())); - break; - case TAN: - result = ValueDouble.get(Math.tan(v0.getDouble())); - break; - case TANH: - result = ValueDouble.get(Math.tanh(v0.getDouble())); - break; - case SECURE_RAND: - result = ValueBytes.getNoCopy( - MathUtils.secureRandomBytes(v0.getInt())); - break; - case EXPAND: - result = ValueBytes.getNoCopy( - CompressTool.getInstance().expand(v0.getBytesNoCopy())); - break; - case ZERO: - result = ValueInt.get(0); - break; - case RANDOM_UUID: - result = ValueUuid.getNewRandom(); - break; - // string - case ASCII: { - String s = v0.getString(); - if (s.length() == 0) { - result = ValueNull.INSTANCE; - } else { - result = ValueInt.get(s.charAt(0)); - } - break; - } - case BIT_LENGTH: - result = ValueLong.get(16 * length(v0)); - break; - case CHAR: - result = ValueString.get(String.valueOf((char) v0.getInt()), - database.getMode().treatEmptyStringsAsNull); - break; - case CHAR_LENGTH: - case LENGTH: - result = ValueLong.get(length(v0)); - break; - case OCTET_LENGTH: - result = ValueLong.get(2 * length(v0)); - break; - case CONCAT_WS: - case CONCAT: { - result = ValueNull.INSTANCE; - int start = 0; - String separator = ""; - if (info.type == CONCAT_WS) { - start = 1; - separator = getNullOrValue(session, args, values, 0).getString(); - } - for (int i = start; i < args.length; i++) { - Value v = getNullOrValue(session, args, values, i); - if (v == ValueNull.INSTANCE) { - continue; - } - if (result == ValueNull.INSTANCE) { - result = v; - } else { - String tmp = v.getString(); - if (!StringUtils.isNullOrEmpty(separator) - && !StringUtils.isNullOrEmpty(tmp)) { - tmp = separator.concat(tmp); - } - result = ValueString.get(result.getString().concat(tmp), - database.getMode().treatEmptyStringsAsNull); - } - } - if (info.type == CONCAT_WS) { - if (separator != null && result == ValueNull.INSTANCE) { - result = ValueString.get("", - database.getMode().treatEmptyStringsAsNull); - } - } - break; - } - case HEXTORAW: - result = ValueString.get(hexToRaw(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case LOWER: - case LCASE: - // TODO this is locale specific, need to document or provide a way - // to set the locale - result = ValueString.get(v0.getString().toLowerCase(), - database.getMode().treatEmptyStringsAsNull); - break; - case RAWTOHEX: - result = ValueString.get(rawToHex(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case SOUNDEX: - result = ValueString.get(getSoundex(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case SPACE: { - int len = Math.max(0, v0.getInt()); - char[] chars = new char[len]; - for (int i = len - 1; i >= 0; i--) { - chars[i] = ' '; - } - result = ValueString.get(new String(chars), - database.getMode().treatEmptyStringsAsNull); - break; - } - case UPPER: - case UCASE: - // TODO this is locale specific, need to document or provide a way - // to set the locale - result = ValueString.get(v0.getString().toUpperCase(), - database.getMode().treatEmptyStringsAsNull); - break; - case STRINGENCODE: - result = ValueString.get(StringUtils.javaEncode(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case STRINGDECODE: - result = ValueString.get(StringUtils.javaDecode(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case STRINGTOUTF8: - result = ValueBytes.getNoCopy(v0.getString(). - getBytes(Constants.UTF8)); - break; - case UTF8TOSTRING: - result = ValueString.get(new String(v0.getBytesNoCopy(), - Constants.UTF8), - database.getMode().treatEmptyStringsAsNull); - break; - case XMLCOMMENT: - result = ValueString.get(StringUtils.xmlComment(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case XMLCDATA: - result = ValueString.get(StringUtils.xmlCData(v0.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case XMLSTARTDOC: - result = ValueString.get(StringUtils.xmlStartDoc(), - database.getMode().treatEmptyStringsAsNull); - break; - case DAY_NAME: { - SimpleDateFormat dayName = new SimpleDateFormat( - "EEEE", Locale.ENGLISH); - result = ValueString.get(dayName.format(v0.getDate()), - database.getMode().treatEmptyStringsAsNull); - break; - } - case DAY_OF_MONTH: - result = ValueInt.get(DateTimeUtils.getDatePart(v0.getDate(), - Calendar.DAY_OF_MONTH)); - break; - case DAY_OF_WEEK: - result = ValueInt.get(DateTimeUtils.getDatePart(v0.getDate(), - Calendar.DAY_OF_WEEK)); - break; - case DAY_OF_YEAR: - result = ValueInt.get(DateTimeUtils.getDatePart(v0.getDate(), - Calendar.DAY_OF_YEAR)); - break; - case HOUR: - result = ValueInt.get(DateTimeUtils.getDatePart(v0.getTimestamp(), - Calendar.HOUR_OF_DAY)); - break; - case MINUTE: - result = ValueInt.get(DateTimeUtils.getDatePart(v0.getTimestamp(), - Calendar.MINUTE)); - break; - case MONTH: - result = ValueInt.get(DateTimeUtils.getDatePart(v0.getDate(), - Calendar.MONTH)); - break; - case MONTH_NAME: { - SimpleDateFormat monthName = new SimpleDateFormat("MMMM", - Locale.ENGLISH); - result = ValueString.get(monthName.format(v0.getDate()), - database.getMode().treatEmptyStringsAsNull); - break; - } - case QUARTER: - result = ValueInt.get((DateTimeUtils.getDatePart(v0.getDate(), - Calendar.MONTH) - 1) / 3 + 1); - break; - case SECOND: - result = ValueInt.get(DateTimeUtils.getDatePart(v0.getTimestamp(), - Calendar.SECOND)); - break; - case WEEK: - result = ValueInt.get(DateTimeUtils.getDatePart(v0.getDate(), - Calendar.WEEK_OF_YEAR)); - break; - case YEAR: - result = ValueInt.get(DateTimeUtils.getDatePart(v0.getDate(), - Calendar.YEAR)); - break; - case ISO_YEAR: - result = ValueInt.get(DateTimeUtils.getIsoYear(v0.getDate())); - break; - case ISO_WEEK: - result = ValueInt.get(DateTimeUtils.getIsoWeek(v0.getDate())); - break; - case ISO_DAY_OF_WEEK: - result = ValueInt.get(DateTimeUtils.getIsoDayOfWeek(v0.getDate())); - break; - case CURDATE: - case CURRENT_DATE: { - long now = session.getTransactionStart(); - // need to normalize - result = ValueDate.fromMillis(now); - break; - } - case CURTIME: - case CURRENT_TIME: { - long now = session.getTransactionStart(); - // need to normalize - result = ValueTime.fromMillis(now); - break; - } - case NOW: - case CURRENT_TIMESTAMP: { - long now = session.getTransactionStart(); - ValueTimestamp vt = ValueTimestamp.fromMillis(now); - if (v0 != null) { - Mode mode = database.getMode(); - vt = (ValueTimestamp) vt.convertScale( - mode.convertOnlyToSmallerScale, v0.getInt()); - } - result = vt; - break; - } - case DATABASE: - result = ValueString.get(database.getShortName(), - database.getMode().treatEmptyStringsAsNull); - break; - case USER: - case CURRENT_USER: - result = ValueString.get(session.getUser().getName(), - database.getMode().treatEmptyStringsAsNull); - break; - case IDENTITY: - result = session.getLastIdentity(); - break; - case SCOPE_IDENTITY: - result = session.getLastScopeIdentity(); - break; - case AUTOCOMMIT: - result = ValueBoolean.get(session.getAutoCommit()); - break; - case READONLY: - result = ValueBoolean.get(database.isReadOnly()); - break; - case DATABASE_PATH: { - String path = database.getDatabasePath(); - result = path == null ? - (Value) ValueNull.INSTANCE : ValueString.get(path, - database.getMode().treatEmptyStringsAsNull); - break; - } - case LOCK_TIMEOUT: - result = ValueInt.get(session.getLockTimeout()); - break; - case DISK_SPACE_USED: - result = ValueLong.get(getDiskSpaceUsed(session, v0)); - break; - case CAST: - case CONVERT: { - v0 = v0.convertTo(dataType); - Mode mode = database.getMode(); - v0 = v0.convertScale(mode.convertOnlyToSmallerScale, scale); - v0 = v0.convertPrecision(getPrecision(), false); - result = v0; - break; - } - case MEMORY_FREE: - session.getUser().checkAdmin(); - result = ValueInt.get(Utils.getMemoryFree()); - break; - case MEMORY_USED: - session.getUser().checkAdmin(); - result = ValueInt.get(Utils.getMemoryUsed()); - break; - case LOCK_MODE: - result = ValueInt.get(database.getLockMode()); - break; - case SCHEMA: - result = ValueString.get(session.getCurrentSchemaName(), - database.getMode().treatEmptyStringsAsNull); - break; - case SESSION_ID: - result = ValueInt.get(session.getId()); - break; - case IFNULL: { - result = v0; - if (v0 == ValueNull.INSTANCE) { - result = getNullOrValue(session, args, values, 1); - } - break; - } - case CASEWHEN: { - Value v; - if (v0 == ValueNull.INSTANCE || - !v0.getBoolean().booleanValue()) { - v = getNullOrValue(session, args, values, 2); - } else { - v = getNullOrValue(session, args, values, 1); - } - result = v.convertTo(dataType); - break; - } - case DECODE: { - int index = -1; - for (int i = 1, len = args.length - 1; i < len; i += 2) { - if (database.areEqual(v0, - getNullOrValue(session, args, values, i))) { - index = i + 1; - break; - } - } - if (index < 0 && args.length % 2 == 0) { - index = args.length - 1; - } - Value v = index < 0 ? ValueNull.INSTANCE : - getNullOrValue(session, args, values, index); - result = v.convertTo(dataType); - break; - } - case NVL2: { - Value v; - if (v0 == ValueNull.INSTANCE) { - v = getNullOrValue(session, args, values, 2); - } else { - v = getNullOrValue(session, args, values, 1); - } - result = v.convertTo(dataType); - break; - } - case COALESCE: { - result = v0; - for (int i = 0; i < args.length; i++) { - Value v = getNullOrValue(session, args, values, i); - if (!(v == ValueNull.INSTANCE)) { - result = v.convertTo(dataType); - break; - } - } - break; - } - case GREATEST: - case LEAST: { - result = ValueNull.INSTANCE; - for (int i = 0; i < args.length; i++) { - Value v = getNullOrValue(session, args, values, i); - if (!(v == ValueNull.INSTANCE)) { - v = v.convertTo(dataType); - if (result == ValueNull.INSTANCE) { - result = v; - } else { - int comp = database.compareTypeSave(result, v); - if (info.type == GREATEST && comp < 0) { - result = v; - } else if (info.type == LEAST && comp > 0) { - result = v; - } - } - } - } - break; - } - case CASE: { - Expression then = null; - if (v0 == null) { - // Searched CASE expression - // (null, when, then) - // (null, when, then, else) - // (null, when, then, when, then) - // (null, when, then, when, then, else) - for (int i = 1, len = args.length - 1; i < len; i += 2) { - Value when = args[i].getValue(session); - if (!(when == ValueNull.INSTANCE) && - when.getBoolean().booleanValue()) { - then = args[i + 1]; - break; - } - } - } else { - // Simple CASE expression - // (expr, when, then) - // (expr, when, then, else) - // (expr, when, then, when, then) - // (expr, when, then, when, then, else) - if (!(v0 == ValueNull.INSTANCE)) { - for (int i = 1, len = args.length - 1; i < len; i += 2) { - Value when = args[i].getValue(session); - if (database.areEqual(v0, when)) { - then = args[i + 1]; - break; - } - } - } - } - if (then == null && args.length % 2 == 0) { - // then = elsePart - then = args[args.length - 1]; - } - Value v = then == null ? ValueNull.INSTANCE : then.getValue(session); - result = v.convertTo(dataType); - break; - } - case ARRAY_GET: { - if (v0.getType() == Value.ARRAY) { - Value v1 = getNullOrValue(session, args, values, 1); - int element = v1.getInt(); - Value[] list = ((ValueArray) v0).getList(); - if (element < 1 || element > list.length) { - result = ValueNull.INSTANCE; - } else { - result = list[element - 1]; - } - } else { - result = ValueNull.INSTANCE; - } - break; - } - case ARRAY_LENGTH: { - if (v0.getType() == Value.ARRAY) { - Value[] list = ((ValueArray) v0).getList(); - result = ValueInt.get(list.length); - } else { - result = ValueNull.INSTANCE; - } - break; - } - case ARRAY_CONTAINS: { - result = ValueBoolean.get(false); - if (v0.getType() == Value.ARRAY) { - Value v1 = getNullOrValue(session, args, values, 1); - Value[] list = ((ValueArray) v0).getList(); - for (Value v : list) { - if (v.equals(v1)) { - result = ValueBoolean.get(true); - break; - } - } - } - break; - } - case CANCEL_SESSION: { - result = ValueBoolean.get(cancelStatement(session, v0.getInt())); - break; - } - case TRANSACTION_ID: { - result = session.getTransactionId(); - break; - } - default: - result = null; - } - return result; - } - - private static boolean cancelStatement(Session session, int targetSessionId) { - session.getUser().checkAdmin(); - Session[] sessions = session.getDatabase().getSessions(false); - for (Session s : sessions) { - if (s.getId() == targetSessionId) { - Command c = s.getCurrentCommand(); - if (c == null) { - return false; - } - c.cancel(); - return true; - } - } - return false; - } - - private static long getDiskSpaceUsed(Session session, Value v0) { - Parser p = new Parser(session); - String sql = v0.getString(); - Table table = p.parseTableName(sql); - return table.getDiskSpaceUsed(); - } - - private static Value getNullOrValue(Session session, Expression[] args, - Value[] values, int i) { - if (i >= args.length) { - return null; - } - Value v = values[i]; - if (v == null) { - Expression e = args[i]; - if (e == null) { - return null; - } - v = values[i] = e.getValue(session); - } - return v; - } - - private Value getValueWithArgs(Session session, Expression[] args) { - Value[] values = new Value[args.length]; - if (info.nullIfParameterIsNull) { - for (int i = 0; i < args.length; i++) { - Expression e = args[i]; - Value v = e.getValue(session); - if (v == ValueNull.INSTANCE) { - return ValueNull.INSTANCE; - } - values[i] = v; - } - } - Value v0 = getNullOrValue(session, args, values, 0); - Value resultSimple = getSimpleValue(session, v0, args, values); - if (resultSimple != null) { - return resultSimple; - } - Value v1 = getNullOrValue(session, args, values, 1); - Value v2 = getNullOrValue(session, args, values, 2); - Value v3 = getNullOrValue(session, args, values, 3); - Value v4 = getNullOrValue(session, args, values, 4); - Value v5 = getNullOrValue(session, args, values, 5); - Value result; - switch (info.type) { - case ATAN2: - result = ValueDouble.get( - Math.atan2(v0.getDouble(), v1.getDouble())); - break; - case BITAND: - result = ValueLong.get(v0.getLong() & v1.getLong()); - break; - case BITOR: - result = ValueLong.get(v0.getLong() | v1.getLong()); - break; - case BITXOR: - result = ValueLong.get(v0.getLong() ^ v1.getLong()); - break; - case MOD: { - long x = v1.getLong(); - if (x == 0) { - throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getSQL()); - } - result = ValueLong.get(v0.getLong() % x); - break; - } - case POWER: - result = ValueDouble.get(Math.pow( - v0.getDouble(), v1.getDouble())); - break; - case ROUND: { - double f = v1 == null ? 1. : Math.pow(10., v1.getDouble()); - result = ValueDouble.get(Math.round(v0.getDouble() * f) / f); - break; - } - case TRUNCATE: { - if (v0.getType() == Value.TIMESTAMP) { - java.sql.Timestamp d = v0.getTimestamp(); - Calendar c = Calendar.getInstance(); - c.setTime(d); - c.set(Calendar.HOUR_OF_DAY, 0); - c.set(Calendar.MINUTE, 0); - c.set(Calendar.SECOND, 0); - c.set(Calendar.MILLISECOND, 0); - result = ValueTimestamp.fromMillis(c.getTimeInMillis()); - } else { - double d = v0.getDouble(); - int p = v1 == null ? 0 : v1.getInt(); - double f = Math.pow(10., p); - double g = d * f; - result = ValueDouble.get(((d < 0) ? Math.ceil(g) : Math.floor(g)) / f); - } - break; - } - case HASH: - result = ValueBytes.getNoCopy(getHash(v0.getString(), - v1.getBytesNoCopy(), v2.getInt())); - break; - case ENCRYPT: - result = ValueBytes.getNoCopy(encrypt(v0.getString(), - v1.getBytesNoCopy(), v2.getBytesNoCopy())); - break; - case DECRYPT: - result = ValueBytes.getNoCopy(decrypt(v0.getString(), - v1.getBytesNoCopy(), v2.getBytesNoCopy())); - break; - case COMPRESS: { - String algorithm = null; - if (v1 != null) { - algorithm = v1.getString(); - } - result = ValueBytes.getNoCopy(CompressTool.getInstance(). - compress(v0.getBytesNoCopy(), algorithm)); - break; - } - case DIFFERENCE: - result = ValueInt.get(getDifference( - v0.getString(), v1.getString())); - break; - case INSERT: { - if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE) { - result = v1; - } else { - result = ValueString.get(insert(v0.getString(), - v1.getInt(), v2.getInt(), v3.getString()), - database.getMode().treatEmptyStringsAsNull); - } - break; - } - case LEFT: - result = ValueString.get(left(v0.getString(), v1.getInt()), - database.getMode().treatEmptyStringsAsNull); - break; - case LOCATE: { - int start = v2 == null ? 0 : v2.getInt(); - result = ValueInt.get(locate(v0.getString(), v1.getString(), start)); - break; - } - case INSTR: { - int start = v2 == null ? 0 : v2.getInt(); - result = ValueInt.get(locate(v1.getString(), v0.getString(), start)); - break; - } - case REPEAT: { - int count = Math.max(0, v1.getInt()); - result = ValueString.get(repeat(v0.getString(), count), - database.getMode().treatEmptyStringsAsNull); - break; - } - case REPLACE: { - String s0 = v0.getString(); - String s1 = v1.getString(); - String s2 = (v2 == null) ? "" : v2.getString(); - result = ValueString.get(replace(s0, s1, s2), - database.getMode().treatEmptyStringsAsNull); - break; - } - case RIGHT: - result = ValueString.get(right(v0.getString(), v1.getInt()), - database.getMode().treatEmptyStringsAsNull); - break; - case LTRIM: - result = ValueString.get(StringUtils.trim(v0.getString(), - true, false, v1 == null ? " " : v1.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case TRIM: - result = ValueString.get(StringUtils.trim(v0.getString(), - true, true, v1 == null ? " " : v1.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case RTRIM: - result = ValueString.get(StringUtils.trim(v0.getString(), - false, true, v1 == null ? " " : v1.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case SUBSTR: - case SUBSTRING: { - String s = v0.getString(); - int offset = v1.getInt(); - if (offset < 0) { - offset = s.length() + offset + 1; - } - int length = v2 == null ? s.length() : v2.getInt(); - result = ValueString.get(substring(s, offset, length), - database.getMode().treatEmptyStringsAsNull); - break; - } - case POSITION: - result = ValueInt.get(locate(v0.getString(), v1.getString(), 0)); - break; - case XMLATTR: - result = ValueString.get( - StringUtils.xmlAttr(v0.getString(), v1.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case XMLNODE: { - String attr = v1 == null ? - null : v1 == ValueNull.INSTANCE ? null : v1.getString(); - String content = v2 == null ? - null : v2 == ValueNull.INSTANCE ? null : v2.getString(); - boolean indent = v3 == null ? - true : v3.getBoolean(); - result = ValueString.get(StringUtils.xmlNode( - v0.getString(), attr, content, indent), - database.getMode().treatEmptyStringsAsNull); - break; - } - case REGEXP_REPLACE: { - String regexp = v1.getString(); - String replacement = v2.getString(); - try { - result = ValueString.get( - v0.getString().replaceAll(regexp, replacement), - database.getMode().treatEmptyStringsAsNull); - } catch (StringIndexOutOfBoundsException e) { - throw DbException.get( - ErrorCode.LIKE_ESCAPE_ERROR_1, e, replacement); - } catch (PatternSyntaxException e) { - throw DbException.get( - ErrorCode.LIKE_ESCAPE_ERROR_1, e, regexp); - } - break; - } - case RPAD: - result = ValueString.get(StringUtils.pad(v0.getString(), - v1.getInt(), v2 == null ? null : v2.getString(), true), - database.getMode().treatEmptyStringsAsNull); - break; - case LPAD: - result = ValueString.get(StringUtils.pad(v0.getString(), - v1.getInt(), v2 == null ? null : v2.getString(), false), - database.getMode().treatEmptyStringsAsNull); - break; - case TO_CHAR: - switch(v0.getType()){ - case Value.TIME: - case Value.DATE: - case Value.TIMESTAMP: - result = ValueString.get(toChar(v0.getTimestamp(), - v1 == null ? null : v1.getString(), - v2 == null ? null : v2.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - case Value.SHORT: - case Value.INT: - case Value.LONG: - case Value.DECIMAL: - case Value.DOUBLE: - case Value.FLOAT: - result = ValueString.get(toChar(v0.getBigDecimal(), - v1 == null ? null : v1.getString(), - v2 == null ? null : v2.getString()), - database.getMode().treatEmptyStringsAsNull); - break; - default: - result = ValueString.get(v0.getString(), - database.getMode().treatEmptyStringsAsNull); - } - break; - case TRANSLATE: { - String matching = v1.getString(); - String replacement = v2.getString(); - result = ValueString.get( - translate(v0.getString(), matching, replacement), - database.getMode().treatEmptyStringsAsNull); - break; - } - case H2VERSION: - result = ValueString.get(Constants.getVersion(), - database.getMode().treatEmptyStringsAsNull); - break; - case DATE_ADD: - result = ValueTimestamp.get(dateadd( - v0.getString(), v1.getLong(), v2.getTimestamp())); - break; - case DATE_DIFF: - result = ValueLong.get(datediff( - v0.getString(), v1.getTimestamp(), v2.getTimestamp())); - break; - case EXTRACT: { - int field = getDatePart(v0.getString()); - result = ValueInt.get(DateTimeUtils.getDatePart( - v1.getTimestamp(), field)); - break; - } - case FORMATDATETIME: { - if (v0 == ValueNull.INSTANCE || v1 == ValueNull.INSTANCE) { - result = ValueNull.INSTANCE; - } else { - String locale = v2 == null ? - null : v2 == ValueNull.INSTANCE ? null : v2.getString(); - String tz = v3 == null ? - null : v3 == ValueNull.INSTANCE ? null : v3.getString(); - result = ValueString.get(DateTimeUtils.formatDateTime( - v0.getTimestamp(), v1.getString(), locale, tz), - database.getMode().treatEmptyStringsAsNull); - } - break; - } - case PARSEDATETIME: { - if (v0 == ValueNull.INSTANCE || v1 == ValueNull.INSTANCE) { - result = ValueNull.INSTANCE; - } else { - String locale = v2 == null ? - null : v2 == ValueNull.INSTANCE ? null : v2.getString(); - String tz = v3 == null ? - null : v3 == ValueNull.INSTANCE ? null : v3.getString(); - java.util.Date d = DateTimeUtils.parseDateTime( - v0.getString(), v1.getString(), locale, tz); - result = ValueTimestamp.fromMillis(d.getTime()); - } - break; - } - case NULLIF: - result = database.areEqual(v0, v1) ? ValueNull.INSTANCE : v0; - break; - // system - case NEXTVAL: { - Sequence sequence = getSequence(session, v0, v1); - SequenceValue value = new SequenceValue(sequence); - result = value.getValue(session); - break; - } - case CURRVAL: { - Sequence sequence = getSequence(session, v0, v1); - result = ValueLong.get(sequence.getCurrentValue()); - break; - } - case CSVREAD: { - String fileName = v0.getString(); - String columnList = v1 == null ? null : v1.getString(); - Csv csv = new Csv(); - String options = v2 == null ? null : v2.getString(); - String charset = null; - if (options != null && options.indexOf('=') >= 0) { - charset = csv.setOptions(options); - } else { - charset = options; - String fieldSeparatorRead = v3 == null ? null : v3.getString(); - String fieldDelimiter = v4 == null ? null : v4.getString(); - String escapeCharacter = v5 == null ? null : v5.getString(); - Value v6 = getNullOrValue(session, args, values, 6); - String nullString = v6 == null ? null : v6.getString(); - setCsvDelimiterEscape(csv, fieldSeparatorRead, fieldDelimiter, - escapeCharacter); - csv.setNullString(nullString); - } - char fieldSeparator = csv.getFieldSeparatorRead(); - String[] columns = StringUtils.arraySplit(columnList, - fieldSeparator, true); - try { - ValueResultSet vr = ValueResultSet.get(csv.read(fileName, - columns, charset)); - result = vr; - } catch (SQLException e) { - throw DbException.convert(e); - } - break; - } - case LINK_SCHEMA: { - session.getUser().checkAdmin(); - Connection conn = session.createConnection(false); - ResultSet rs = LinkSchema.linkSchema(conn, v0.getString(), - v1.getString(), v2.getString(), v3.getString(), - v4.getString(), v5.getString()); - result = ValueResultSet.get(rs); - break; - } - case CSVWRITE: { - session.getUser().checkAdmin(); - Connection conn = session.createConnection(false); - Csv csv = new Csv(); - String options = v2 == null ? null : v2.getString(); - String charset = null; - if (options != null && options.indexOf('=') >= 0) { - charset = csv.setOptions(options); - } else { - charset = options; - String fieldSeparatorWrite = v3 == null ? null : v3.getString(); - String fieldDelimiter = v4 == null ? null : v4.getString(); - String escapeCharacter = v5 == null ? null : v5.getString(); - Value v6 = getNullOrValue(session, args, values, 6); - String nullString = v6 == null ? null : v6.getString(); - Value v7 = getNullOrValue(session, args, values, 7); - String lineSeparator = v7 == null ? null : v7.getString(); - setCsvDelimiterEscape(csv, fieldSeparatorWrite, fieldDelimiter, - escapeCharacter); - csv.setNullString(nullString); - if (lineSeparator != null) { - csv.setLineSeparator(lineSeparator); - } - } - try { - int rows = csv.write(conn, v0.getString(), v1.getString(), - charset); - result = ValueInt.get(rows); - } catch (SQLException e) { - throw DbException.convert(e); - } - break; - } - case SET: { - Variable var = (Variable) args[0]; - session.setVariable(var.getName(), v1); - result = v1; - break; - } - case FILE_READ: { - session.getUser().checkAdmin(); - String fileName = v0.getString(); - boolean blob = args.length == 1; - try { - InputStream in = new AutoCloseInputStream( - FileUtils.newInputStream(fileName)); - if (blob) { - result = database.getLobStorage().createBlob(in, -1); - } else { - Reader reader; - if (v1 == ValueNull.INSTANCE) { - reader = new InputStreamReader(in); - } else { - reader = new InputStreamReader(in, v1.getString()); - } - result = database.getLobStorage().createClob(reader, -1); - } - } catch (IOException e) { - throw DbException.convertIOException(e, fileName); - } - break; - } - case TRUNCATE_VALUE: { - result = v0.convertPrecision(v1.getLong(), v2.getBoolean()); - break; - } - case XMLTEXT: - if (v1 == null) { - result = ValueString.get(StringUtils.xmlText( - v0.getString()), - database.getMode().treatEmptyStringsAsNull); - } else { - result = ValueString.get(StringUtils.xmlText( - v0.getString(), v1.getBoolean()), - database.getMode().treatEmptyStringsAsNull); - } - break; - case VALUES: - result = session.getVariable(args[0].getSchemaName() + "." + - args[0].getTableName() + "." + args[0].getColumnName()); - break; - default: - throw DbException.throwInternalError("type=" + info.type); - } - return result; - } - - private Sequence getSequence(Session session, Value v0, Value v1) { - String schemaName, sequenceName; - if (v1 == null) { - Parser p = new Parser(session); - String sql = v0.getString(); - Expression expr = p.parseExpression(sql); - if (expr instanceof ExpressionColumn) { - ExpressionColumn seq = (ExpressionColumn) expr; - schemaName = seq.getOriginalTableAliasName(); - if (schemaName == null) { - schemaName = session.getCurrentSchemaName(); - sequenceName = sql; - } else { - sequenceName = seq.getColumnName(); - } - } else { - throw DbException.getSyntaxError(sql, 1); - } - } else { - schemaName = v0.getString(); - sequenceName = v1.getString(); - } - Schema s = database.findSchema(schemaName); - if (s == null) { - schemaName = StringUtils.toUpperEnglish(schemaName); - s = database.getSchema(schemaName); - } - Sequence seq = s.findSequence(sequenceName); - if (seq == null) { - sequenceName = StringUtils.toUpperEnglish(sequenceName); - seq = s.getSequence(sequenceName); - } - return seq; - } - - private static long length(Value v) { - switch (v.getType()) { - case Value.BLOB: - case Value.CLOB: - case Value.BYTES: - case Value.JAVA_OBJECT: - return v.getPrecision(); - default: - return v.getString().length(); - } - } - - private static byte[] getPaddedArrayCopy(byte[] data, int blockSize) { - int size = MathUtils.roundUpInt(data.length, blockSize); - byte[] newData = DataUtils.newBytes(size); - System.arraycopy(data, 0, newData, 0, data.length); - return newData; - } - - private static byte[] decrypt(String algorithm, byte[] key, byte[] data) { - BlockCipher cipher = CipherFactory.getBlockCipher(algorithm); - byte[] newKey = getPaddedArrayCopy(key, cipher.getKeyLength()); - cipher.setKey(newKey); - byte[] newData = getPaddedArrayCopy(data, BlockCipher.ALIGN); - cipher.decrypt(newData, 0, newData.length); - return newData; - } - - private static byte[] encrypt(String algorithm, byte[] key, byte[] data) { - BlockCipher cipher = CipherFactory.getBlockCipher(algorithm); - byte[] newKey = getPaddedArrayCopy(key, cipher.getKeyLength()); - cipher.setKey(newKey); - byte[] newData = getPaddedArrayCopy(data, BlockCipher.ALIGN); - cipher.encrypt(newData, 0, newData.length); - return newData; - } - - private static byte[] getHash(String algorithm, byte[] bytes, int iterations) { - if (!"SHA256".equalsIgnoreCase(algorithm)) { - throw DbException.getInvalidValueException("algorithm", algorithm); - } - for (int i = 0; i < iterations; i++) { - bytes = SHA256.getHash(bytes, false); - } - return bytes; - } - - /** - * Check if a given string is a valid date part string. - * - * @param part the string - * @return true if it is - */ - public static boolean isDatePart(String part) { - Integer p = DATE_PART.get(StringUtils.toUpperEnglish(part)); - return p != null; - } - - private static int getDatePart(String part) { - Integer p = DATE_PART.get(StringUtils.toUpperEnglish(part)); - if (p == null) { - throw DbException.getInvalidValueException("date part", part); - } - return p.intValue(); - } - - private static Timestamp dateadd(String part, long count, Timestamp d) { - int field = getDatePart(part); - if (field == Calendar.MILLISECOND) { - Timestamp ts = new Timestamp(d.getTime() + count); - ts.setNanos(ts.getNanos() + (d.getNanos() % 1000000)); - return ts; - } - // We allow long for manipulating the millisecond component, - // for the rest we only allow int. - if (count > Integer.MAX_VALUE) { - throw DbException.getInvalidValueException("DATEADD count", count); - } - Calendar calendar = Calendar.getInstance(); - int nanos = d.getNanos() % 1000000; - calendar.setTime(d); - calendar.add(field, (int) count); - long t = calendar.getTime().getTime(); - Timestamp ts = new Timestamp(t); - ts.setNanos(ts.getNanos() + nanos); - return ts; - } - - /** - * Calculate the number of crossed unit boundaries between two timestamps. - * This method is supported for MS SQL Server compatibility. - *

    -     * DATEDIFF(YEAR, '2004-12-31', '2005-01-01') = 1
    -     * 
    - * - * @param part the part - * @param d1 the first date - * @param d2 the second date - * @return the number of crossed boundaries - */ - private static long datediff(String part, Timestamp d1, Timestamp d2) { - int field = getDatePart(part); - Calendar calendar = Calendar.getInstance(); - long t1 = d1.getTime(), t2 = d2.getTime(); - // need to convert to UTC, otherwise we get inconsistent results with - // certain time zones (those that are 30 minutes off) - TimeZone zone = calendar.getTimeZone(); - calendar.setTime(d1); - t1 += zone.getOffset(calendar.get(Calendar.ERA), - calendar.get(Calendar.YEAR), calendar.get(Calendar.MONTH), - calendar.get(Calendar.DAY_OF_MONTH), - calendar.get(Calendar.DAY_OF_WEEK), - calendar.get(Calendar.MILLISECOND)); - calendar.setTime(d2); - t2 += zone.getOffset(calendar.get(Calendar.ERA), - calendar.get(Calendar.YEAR), calendar.get(Calendar.MONTH), - calendar.get(Calendar.DAY_OF_MONTH), - calendar.get(Calendar.DAY_OF_WEEK), - calendar.get(Calendar.MILLISECOND)); - switch (field) { - case Calendar.MILLISECOND: - return t2 - t1; - case Calendar.SECOND: - case Calendar.MINUTE: - case Calendar.HOUR_OF_DAY: { - // first 'normalize' the numbers so both are not negative - long hour = 60 * 60 * 1000; - long add = Math.min(t1 / hour * hour, t2 / hour * hour); - t1 -= add; - t2 -= add; - switch (field) { - case Calendar.SECOND: - return t2 / 1000 - t1 / 1000; - case Calendar.MINUTE: - return t2 / (60 * 1000) - t1 / (60 * 1000); - case Calendar.HOUR_OF_DAY: - return t2 / hour - t1 / hour; - default: - throw DbException.throwInternalError("field:" + field); - } - } - case Calendar.DATE: - return t2 / (24 * 60 * 60 * 1000) - t1 / (24 * 60 * 60 * 1000); - default: - break; - } - calendar.setTimeInMillis(t1); - int year1 = calendar.get(Calendar.YEAR); - int month1 = calendar.get(Calendar.MONTH); - calendar.setTimeInMillis(t2); - int year2 = calendar.get(Calendar.YEAR); - int month2 = calendar.get(Calendar.MONTH); - int result = year2 - year1; - if (field == Calendar.MONTH) { - result = 12 * result + (month2 - month1); - } - return result; - } - - private static String substring(String s, int start, int length) { - int len = s.length(); - start--; - if (start < 0) { - start = 0; - } - if (length < 0) { - length = 0; - } - start = (start > len) ? len : start; - if (start + length > len) { - length = len - start; - } - return s.substring(start, start + length); - } - - private static String replace(String s, String replace, String with) { - if (s == null || replace == null || with == null) { - return null; - } - if (replace.length() == 0) { - // avoid out of memory - return s; - } - StringBuilder buff = new StringBuilder(s.length()); - int start = 0; - int len = replace.length(); - while (true) { - int i = s.indexOf(replace, start); - if (i == -1) { - break; - } - buff.append(s.substring(start, i)).append(with); - start = i + len; - } - buff.append(s.substring(start)); - return buff.toString(); - } - - private static String repeat(String s, int count) { - StringBuilder buff = new StringBuilder(s.length() * count); - while (count-- > 0) { - buff.append(s); - } - return buff.toString(); - } - - private static String rawToHex(String s) { - int length = s.length(); - StringBuilder buff = new StringBuilder(4 * length); - for (int i = 0; i < length; i++) { - String hex = Integer.toHexString(s.charAt(i) & 0xffff); - for (int j = hex.length(); j < 4; j++) { - buff.append('0'); - } - buff.append(hex); - } - return buff.toString(); - } - - private static int locate(String search, String s, int start) { - if (start < 0) { - int i = s.length() + start; - return s.lastIndexOf(search, i) + 1; - } - int i = (start == 0) ? 0 : start - 1; - return s.indexOf(search, i) + 1; - } - - private static String right(String s, int count) { - if (count < 0) { - count = 0; - } else if (count > s.length()) { - count = s.length(); - } - return s.substring(s.length() - count); - } - - private static String left(String s, int count) { - if (count < 0) { - count = 0; - } else if (count > s.length()) { - count = s.length(); - } - return s.substring(0, count); - } - - private static String insert(String s1, int start, int length, String s2) { - if (s1 == null) { - return s2; - } - if (s2 == null) { - return s1; - } - int len1 = s1.length(); - int len2 = s2.length(); - start--; - if (start < 0 || length <= 0 || len2 == 0 || start > len1) { - return s1; - } - if (start + length > len1) { - length = len1 - start; - } - return s1.substring(0, start) + s2 + s1.substring(start + length); - } - - private static String hexToRaw(String s) { - // TODO function hextoraw compatibility with oracle - int len = s.length(); - if (len % 4 != 0) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); - } - StringBuilder buff = new StringBuilder(len / 4); - for (int i = 0; i < len; i += 4) { - try { - char raw = (char) Integer.parseInt(s.substring(i, i + 4), 16); - buff.append(raw); - } catch (NumberFormatException e) { - throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); - } - } - return buff.toString(); - } - - private static int getDifference(String s1, String s2) { - // TODO function difference: compatibility with SQL Server and HSQLDB - s1 = getSoundex(s1); - s2 = getSoundex(s2); - int e = 0; - for (int i = 0; i < 4; i++) { - if (s1.charAt(i) == s2.charAt(i)) { - e++; - } - } - return e; - } - - private static String translate(String original, String findChars, - String replaceChars) { - if (StringUtils.isNullOrEmpty(original) || - StringUtils.isNullOrEmpty(findChars)) { - return original; - } - // if it stays null, then no replacements have been made - StringBuilder buff = null; - // if shorter than findChars, then characters are removed - // (if null, we don't access replaceChars at all) - int replaceSize = replaceChars == null ? 0 : replaceChars.length(); - for (int i = 0, size = original.length(); i < size; i++) { - char ch = original.charAt(i); - int index = findChars.indexOf(ch); - if (index >= 0) { - if (buff == null) { - buff = new StringBuilder(size); - if (i > 0) { - buff.append(original.substring(0, i)); - } - } - if (index < replaceSize) { - ch = replaceChars.charAt(index); - } - } - if (buff != null) { - buff.append(ch); - } - } - return buff == null ? original : buff.toString(); - } - - private static double roundMagic(double d) { - if ((d < 0.0000000000001) && (d > -0.0000000000001)) { - return 0.0; - } - if ((d > 1000000000000.) || (d < -1000000000000.)) { - return d; - } - StringBuilder s = new StringBuilder(); - s.append(d); - if (s.toString().indexOf('E') >= 0) { - return d; - } - int len = s.length(); - if (len < 16) { - return d; - } - if (s.toString().indexOf('.') > len - 3) { - return d; - } - s.delete(len - 2, len); - len -= 2; - char c1 = s.charAt(len - 2); - char c2 = s.charAt(len - 3); - char c3 = s.charAt(len - 4); - if ((c1 == '0') && (c2 == '0') && (c3 == '0')) { - s.setCharAt(len - 1, '0'); - } else if ((c1 == '9') && (c2 == '9') && (c3 == '9')) { - s.setCharAt(len - 1, '9'); - s.append('9'); - s.append('9'); - s.append('9'); - } - return Double.parseDouble(s.toString()); - } - - private static String getSoundex(String s) { - int len = s.length(); - char[] chars = { '0', '0', '0', '0' }; - char lastDigit = '0'; - for (int i = 0, j = 0; i < len && j < 4; i++) { - char c = s.charAt(i); - char newDigit = c > SOUNDEX_INDEX.length ? - 0 : SOUNDEX_INDEX[c]; - if (newDigit != 0) { - if (j == 0) { - chars[j++] = c; - lastDigit = newDigit; - } else if (newDigit <= '6') { - if (newDigit != lastDigit) { - chars[j++] = newDigit; - lastDigit = newDigit; - } - } else if (newDigit == '7') { - lastDigit = newDigit; - } - } - } - return new String(chars); - } - - @Override - public int getType() { - return dataType; - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - for (Expression e : args) { - if (e != null) { - e.mapColumns(resolver, level); - } - } - } - - /** - * Check if the parameter count is correct. - * - * @param len the number of parameters set - * @throws DbException if the parameter count is incorrect - */ - protected void checkParameterCount(int len) { - int min = 0, max = Integer.MAX_VALUE; - switch (info.type) { - case COALESCE: - case CSVREAD: - case LEAST: - case GREATEST: - min = 1; - break; - case NOW: - case CURRENT_TIMESTAMP: - case RAND: - max = 1; - break; - case COMPRESS: - case LTRIM: - case RTRIM: - case TRIM: - case FILE_READ: - case ROUND: - case XMLTEXT: - case TRUNCATE: - min = 1; - max = 2; - break; - case TO_CHAR: - min = 1; - max = 3; - break; - case REPLACE: - case LOCATE: - case INSTR: - case SUBSTR: - case SUBSTRING: - case LPAD: - case RPAD: - min = 2; - max = 3; - break; - case CONCAT: - case CONCAT_WS: - case CSVWRITE: - min = 2; - break; - case XMLNODE: - min = 1; - max = 4; - break; - case FORMATDATETIME: - case PARSEDATETIME: - min = 2; - max = 4; - break; - case CURRVAL: - case NEXTVAL: - min = 1; - max = 2; - break; - case DECODE: - case CASE: - min = 3; - break; - default: - DbException.throwInternalError("type=" + info.type); - } - boolean ok = (len >= min) && (len <= max); - if (!ok) { - throw DbException.get( - ErrorCode.INVALID_PARAMETER_COUNT_2, - info.name, min + ".." + max); - } - } - - /** - * This method is called after all the parameters have been set. - * It checks if the parameter count is correct. - * - * @throws DbException if the parameter count is incorrect. - */ - public void doneWithParameters() { - if (info.parameterCount == VAR_ARGS) { - int len = varArgs.size(); - checkParameterCount(len); - args = new Expression[len]; - varArgs.toArray(args); - varArgs = null; - } else { - int len = args.length; - if (len > 0 && args[len - 1] == null) { - throw DbException.get( - ErrorCode.INVALID_PARAMETER_COUNT_2, - info.name, "" + len); - } - } - } - - public void setDataType(Column col) { - dataType = col.getType(); - precision = col.getPrecision(); - displaySize = col.getDisplaySize(); - scale = col.getScale(); - } - - @Override - public Expression optimize(Session session) { - boolean allConst = info.deterministic; - for (int i = 0; i < args.length; i++) { - Expression e = args[i]; - if (e == null) { - continue; - } - e = e.optimize(session); - args[i] = e; - if (!e.isConstant()) { - allConst = false; - } - } - int t, s, d; - long p; - Expression p0 = args.length < 1 ? null : args[0]; - switch (info.type) { - case IFNULL: - case NULLIF: - case COALESCE: - case LEAST: - case GREATEST: { - t = Value.UNKNOWN; - s = 0; - p = 0; - d = 0; - for (Expression e : args) { - if (e != ValueExpression.getNull()) { - int type = e.getType(); - if (type != Value.UNKNOWN && type != Value.NULL) { - t = Value.getHigherOrder(t, type); - s = Math.max(s, e.getScale()); - p = Math.max(p, e.getPrecision()); - d = Math.max(d, e.getDisplaySize()); - } - } - } - if (t == Value.UNKNOWN) { - t = Value.STRING; - s = 0; - p = Integer.MAX_VALUE; - d = Integer.MAX_VALUE; - } - break; - } - case CASE: - case DECODE: { - t = Value.UNKNOWN; - s = 0; - p = 0; - d = 0; - // (expr, when, then) - // (expr, when, then, else) - // (expr, when, then, when, then) - // (expr, when, then, when, then, else) - for (int i = 2, len = args.length; i < len; i += 2) { - Expression then = args[i]; - if (then != ValueExpression.getNull()) { - int type = then.getType(); - if (type != Value.UNKNOWN && type != Value.NULL) { - t = Value.getHigherOrder(t, type); - s = Math.max(s, then.getScale()); - p = Math.max(p, then.getPrecision()); - d = Math.max(d, then.getDisplaySize()); - } - } - } - if (args.length % 2 == 0) { - Expression elsePart = args[args.length - 1]; - if (elsePart != ValueExpression.getNull()) { - int type = elsePart.getType(); - if (type != Value.UNKNOWN && type != Value.NULL) { - t = Value.getHigherOrder(t, type); - s = Math.max(s, elsePart.getScale()); - p = Math.max(p, elsePart.getPrecision()); - d = Math.max(d, elsePart.getDisplaySize()); - } - } - } - if (t == Value.UNKNOWN) { - t = Value.STRING; - s = 0; - p = Integer.MAX_VALUE; - d = Integer.MAX_VALUE; - } - break; - } - case CASEWHEN: - t = Value.getHigherOrder(args[1].getType(), args[2].getType()); - p = Math.max(args[1].getPrecision(), args[2].getPrecision()); - d = Math.max(args[1].getDisplaySize(), args[2].getDisplaySize()); - s = Math.max(args[1].getScale(), args[2].getScale()); - break; - case NVL2: - switch (args[1].getType()) { - case Value.STRING: - case Value.CLOB: - case Value.STRING_FIXED: - case Value.STRING_IGNORECASE: - t = args[1].getType(); - break; - default: - t = Value.getHigherOrder(args[1].getType(), args[2].getType()); - break; - } - p = Math.max(args[1].getPrecision(), args[2].getPrecision()); - d = Math.max(args[1].getDisplaySize(), args[2].getDisplaySize()); - s = Math.max(args[1].getScale(), args[2].getScale()); - break; - case CAST: - case CONVERT: - case TRUNCATE_VALUE: - // data type, precision and scale is already set - t = dataType; - p = precision; - s = scale; - d = displaySize; - break; - case TRUNCATE: - t = p0.getType(); - s = p0.getScale(); - p = p0.getPrecision(); - d = p0.getDisplaySize(); - if (t == Value.NULL) { - t = Value.INT; - p = ValueInt.PRECISION; - d = ValueInt.DISPLAY_SIZE; - s = 0; - } else if (t == Value.TIMESTAMP) { - t = Value.DATE; - p = ValueDate.PRECISION; - s = 0; - d = ValueDate.DISPLAY_SIZE; - } - break; - case ABS: - case FLOOR: - case ROUND: - t = p0.getType(); - s = p0.getScale(); - p = p0.getPrecision(); - d = p0.getDisplaySize(); - if (t == Value.NULL) { - t = Value.INT; - p = ValueInt.PRECISION; - d = ValueInt.DISPLAY_SIZE; - s = 0; - } - break; - case SET: { - Expression p1 = args[1]; - t = p1.getType(); - p = p1.getPrecision(); - s = p1.getScale(); - d = p1.getDisplaySize(); - if (!(p0 instanceof Variable)) { - throw DbException.get( - ErrorCode.CAN_ONLY_ASSIGN_TO_VARIABLE_1, p0.getSQL()); - } - break; - } - case FILE_READ: { - if (args.length == 1) { - t = Value.BLOB; - } else { - t = Value.CLOB; - } - p = Integer.MAX_VALUE; - s = 0; - d = Integer.MAX_VALUE; - break; - } - case SUBSTRING: - case SUBSTR: { - t = info.dataType; - p = args[0].getPrecision(); - s = 0; - if (args[1].isConstant()) { - // if only two arguments are used, - // subtract offset from first argument length - p -= args[1].getValue(session).getLong() - 1; - } - if (args.length == 3 && args[2].isConstant()) { - // if the third argument is constant it is at most this value - p = Math.min(p, args[2].getValue(session).getLong()); - } - p = Math.max(0, p); - d = MathUtils.convertLongToInt(p); - break; - } - default: - t = info.dataType; - DataType type = DataType.getDataType(t); - p = PRECISION_UNKNOWN; - d = 0; - s = type.defaultScale; - } - dataType = t; - precision = p; - scale = s; - displaySize = d; - if (allConst) { - Value v = getValue(session); - if (v == ValueNull.INSTANCE) { - if (info.type == CAST || info.type == CONVERT) { - return this; - } - } - return ValueExpression.get(v); - } - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - for (Expression e : args) { - if (e != null) { - e.setEvaluatable(tableFilter, b); - } - } - } - - @Override - public int getScale() { - return scale; - } - - @Override - public long getPrecision() { - if (precision == PRECISION_UNKNOWN) { - calculatePrecisionAndDisplaySize(); - } - return precision; - } - - @Override - public int getDisplaySize() { - if (precision == PRECISION_UNKNOWN) { - calculatePrecisionAndDisplaySize(); - } - return displaySize; - } - - private void calculatePrecisionAndDisplaySize() { - switch (info.type) { - case ENCRYPT: - case DECRYPT: - precision = args[2].getPrecision(); - displaySize = args[2].getDisplaySize(); - break; - case COMPRESS: - precision = args[0].getPrecision(); - displaySize = args[0].getDisplaySize(); - break; - case CHAR: - precision = 1; - displaySize = 1; - break; - case CONCAT: - precision = 0; - displaySize = 0; - for (Expression e : args) { - precision += e.getPrecision(); - displaySize = MathUtils.convertLongToInt( - (long) displaySize + e.getDisplaySize()); - if (precision < 0) { - precision = Long.MAX_VALUE; - } - } - break; - case HEXTORAW: - precision = (args[0].getPrecision() + 3) / 4; - displaySize = MathUtils.convertLongToInt(precision); - break; - case LCASE: - case LTRIM: - case RIGHT: - case RTRIM: - case UCASE: - case LOWER: - case UPPER: - case TRIM: - case STRINGDECODE: - case UTF8TOSTRING: - case TRUNCATE: - precision = args[0].getPrecision(); - displaySize = args[0].getDisplaySize(); - break; - case RAWTOHEX: - precision = args[0].getPrecision() * 4; - displaySize = MathUtils.convertLongToInt(precision); - break; - case SOUNDEX: - precision = 4; - displaySize = (int) precision; - break; - case DAY_NAME: - case MONTH_NAME: - // day and month names may be long in some languages - precision = 20; - displaySize = (int) precision; - break; - default: - DataType type = DataType.getDataType(dataType); - precision = type.defaultPrecision; - displaySize = type.defaultDisplaySize; - } - } - - @Override - public String getSQL() { - StatementBuilder buff = new StatementBuilder(info.name); - if (info.type == CASE) { - if (args[0] != null) { - buff.append(" ").append(args[0].getSQL()); - } - for (int i = 1, len = args.length - 1; i < len; i += 2) { - buff.append(" WHEN ").append(args[i].getSQL()); - buff.append(" THEN ").append(args[i + 1].getSQL()); - } - if (args.length % 2 == 0) { - buff.append(" ELSE ").append(args[args.length - 1].getSQL()); - } - return buff.append(" END").toString(); - } - buff.append('('); - switch (info.type) { - case CAST: { - buff.append(args[0].getSQL()).append(" AS "). - append(new Column(null, dataType, precision, - scale, displaySize).getCreateSQL()); - break; - } - case CONVERT: { - buff.append(args[0].getSQL()).append(','). - append(new Column(null, dataType, precision, - scale, displaySize).getCreateSQL()); - break; - } - case EXTRACT: { - ValueString v = (ValueString) ((ValueExpression) args[0]).getValue(null); - buff.append(v.getString()).append(" FROM ").append(args[1].getSQL()); - break; - } - default: { - for (Expression e : args) { - buff.appendExceptFirst(", "); - buff.append(e.getSQL()); - } - } - } - return buff.append(')').toString(); - } - - @Override - public void updateAggregate(Session session) { - for (Expression e : args) { - if (e != null) { - e.updateAggregate(session); - } - } - } - - public int getFunctionType() { - return info.type; - } - - @Override - public String getName() { - return info.name; - } - - @Override - public ValueResultSet getValueForColumnList(Session session, - Expression[] argList) { - switch (info.type) { - case CSVREAD: { - String fileName = argList[0].getValue(session).getString(); - if (fileName == null) { - throw DbException.get(ErrorCode.PARAMETER_NOT_SET_1, "fileName"); - } - String columnList = argList.length < 2 ? - null : argList[1].getValue(session).getString(); - Csv csv = new Csv(); - String options = argList.length < 3 ? - null : argList[2].getValue(session).getString(); - String charset = null; - if (options != null && options.indexOf('=') >= 0) { - charset = csv.setOptions(options); - } else { - charset = options; - String fieldSeparatorRead = argList.length < 4 ? - null : argList[3].getValue(session).getString(); - String fieldDelimiter = argList.length < 5 ? - null : argList[4].getValue(session).getString(); - String escapeCharacter = argList.length < 6 ? - null : argList[5].getValue(session).getString(); - setCsvDelimiterEscape(csv, fieldSeparatorRead, fieldDelimiter, - escapeCharacter); - } - char fieldSeparator = csv.getFieldSeparatorRead(); - String[] columns = StringUtils.arraySplit(columnList, fieldSeparator, true); - ResultSet rs = null; - ValueResultSet x; - try { - rs = csv.read(fileName, columns, charset); - x = ValueResultSet.getCopy(rs, 0); - } catch (SQLException e) { - throw DbException.convert(e); - } finally { - csv.close(); - JdbcUtils.closeSilently(rs); - } - return x; - } - default: - break; - } - return (ValueResultSet) getValueWithArgs(session, argList); - } - - private static void setCsvDelimiterEscape(Csv csv, String fieldSeparator, - String fieldDelimiter, String escapeCharacter) { - if (fieldSeparator != null) { - csv.setFieldSeparatorWrite(fieldSeparator); - if (fieldSeparator.length() > 0) { - char fs = fieldSeparator.charAt(0); - csv.setFieldSeparatorRead(fs); - } - } - if (fieldDelimiter != null) { - char fd = fieldDelimiter.length() == 0 ? - 0 : fieldDelimiter.charAt(0); - csv.setFieldDelimiter(fd); - } - if (escapeCharacter != null) { - char ec = escapeCharacter.length() == 0 ? - 0 : escapeCharacter.charAt(0); - csv.setEscapeCharacter(ec); - } - } - - @Override - public Expression[] getArgs() { - return args; - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - for (Expression e : args) { - if (e != null && !e.isEverything(visitor)) { - return false; - } - } - switch (visitor.getType()) { - case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.READONLY: - return info.deterministic; - case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.INDEPENDENT: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.OPTIMIZABLE_MIN_MAX_COUNT_ALL: - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - case ExpressionVisitor.GET_COLUMNS: - return true; - default: - throw DbException.throwInternalError("type=" + visitor.getType()); - } - } - - @Override - public int getCost() { - int cost = 3; - for (Expression e : args) { - if (e != null) { - cost += e.getCost(); - } - } - return cost; - } - - @Override - public boolean isDeterministic() { - return info.deterministic; - } - - @Override - public boolean isBufferResultSetToLocalTemp() { - return info.bufferResultSetToLocalTemp; - } - -} diff --git a/h2/src/main/org/h2/expression/FunctionCall.java b/h2/src/main/org/h2/expression/FunctionCall.java deleted file mode 100644 index 674e958dae..0000000000 --- a/h2/src/main/org/h2/expression/FunctionCall.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.engine.Session; -import org.h2.value.ValueResultSet; - -/** - * This interface is used by the built-in functions, - * as well as the user-defined functions. - */ -public interface FunctionCall { - - /** - * Get the name of the function. - * - * @return the name - */ - String getName(); - - /** - * Get an empty result set with the column names set. - * - * @param session the session - * @param nullArgs the argument list (some arguments may be null) - * @return the empty result set - */ - ValueResultSet getValueForColumnList(Session session, Expression[] nullArgs); - - /** - * Get the data type. - * - * @return the data type - */ - int getType(); - - /** - * Optimize the function if possible. - * - * @param session the session - * @return the optimized expression - */ - Expression optimize(Session session); - - /** - * Get the function arguments. - * - * @return argument list - */ - Expression[] getArgs(); - - /** - * Get the SQL snippet of the function (including arguments). - * - * @return the SQL snippet. - */ - String getSQL(); - - /** - * Whether the function always returns the same result for the same - * parameters. - * - * @return true if it does - */ - boolean isDeterministic(); - - /** - * Should the return value ResultSet be buffered in a local temporary file? - * - * @return true if it should be. - */ - boolean isBufferResultSetToLocalTemp(); - -} diff --git a/h2/src/main/org/h2/expression/FunctionInfo.java b/h2/src/main/org/h2/expression/FunctionInfo.java deleted file mode 100644 index b6dbc9d200..0000000000 --- a/h2/src/main/org/h2/expression/FunctionInfo.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -/** - * This class contains information about a built-in function. - */ -class FunctionInfo { - - /** - * The name of the function. - */ - String name; - - /** - * The function type. - */ - int type; - - /** - * The data type of the return value. - */ - int dataType; - - /** - * The number of parameters. - */ - int parameterCount; - - /** - * If the result of the function is NULL if any of the parameters is NULL. - */ - boolean nullIfParameterIsNull; - - /** - * If this function always returns the same value for the same parameters. - */ - boolean deterministic; - - /** - * Should the return value ResultSet be buffered in a local temporary file? - */ - boolean bufferResultSetToLocalTemp = true; - -} diff --git a/h2/src/main/org/h2/expression/IntervalOperation.java b/h2/src/main/org/h2/expression/IntervalOperation.java new file mode 100644 index 0000000000..8182b9c8e3 --- /dev/null +++ b/h2/src/main/org/h2/expression/IntervalOperation.java @@ -0,0 +1,380 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import static org.h2.util.DateTimeUtils.NANOS_PER_DAY; +import static org.h2.util.DateTimeUtils.NANOS_PER_HOUR; +import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND; +import static org.h2.util.DateTimeUtils.absoluteDayFromDateValue; +import static org.h2.util.DateTimeUtils.dateAndTimeFromValue; +import static org.h2.util.DateTimeUtils.dateTimeToValue; +import static org.h2.util.DateTimeUtils.dateValueFromAbsoluteDay; +import static org.h2.util.IntervalUtils.NANOS_PER_DAY_BI; + +import java.math.BigDecimal; +import java.math.BigInteger; + +import org.h2.api.ErrorCode; +import org.h2.api.IntervalQualifier; +import org.h2.engine.SessionLocal; +import org.h2.expression.function.DateTimeFunction; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.util.IntervalUtils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDate; +import org.h2.value.ValueInterval; +import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestampTimeZone; + +/** + * A mathematical operation with intervals. + */ +public class IntervalOperation extends Operation2 { + + public enum IntervalOpType { + /** + * Interval plus interval. + */ + INTERVAL_PLUS_INTERVAL, + + /** + * Interval minus interval. + */ + INTERVAL_MINUS_INTERVAL, + + /** + * Interval divided by interval (non-standard). + */ + INTERVAL_DIVIDE_INTERVAL, + + /** + * Date-time plus interval. + */ + DATETIME_PLUS_INTERVAL, + + /** + * Date-time minus interval. + */ + DATETIME_MINUS_INTERVAL, + + /** + * Interval multiplied by numeric. + */ + INTERVAL_MULTIPLY_NUMERIC, + + /** + * Interval divided by numeric. + */ + INTERVAL_DIVIDE_NUMERIC, + + /** + * Date-time minus date-time. + */ + DATETIME_MINUS_DATETIME + } + + /** + * Number of digits enough to hold + * {@code INTERVAL '999999999999999999' YEAR / INTERVAL '1' MONTH}. + */ + private static final int INTERVAL_YEAR_DIGITS = 20; + + /** + * Number of digits enough to hold + * {@code INTERVAL '999999999999999999' DAY / INTERVAL '0.000000001' SECOND}. + */ + private static final int INTERVAL_DAY_DIGITS = 32; + + private static final TypeInfo INTERVAL_DIVIDE_INTERVAL_YEAR_TYPE = TypeInfo.getTypeInfo(Value.NUMERIC, + INTERVAL_YEAR_DIGITS * 3, INTERVAL_YEAR_DIGITS * 2, null); + + private static final TypeInfo INTERVAL_DIVIDE_INTERVAL_DAY_TYPE = TypeInfo.getTypeInfo(Value.NUMERIC, + INTERVAL_DAY_DIGITS * 3, INTERVAL_DAY_DIGITS * 2, null); + + private final IntervalOpType opType; + + private TypeInfo forcedType; + + private static BigInteger nanosFromValue(SessionLocal session, Value v) { + long[] a = dateAndTimeFromValue(v, session); + return BigInteger.valueOf(absoluteDayFromDateValue(a[0])).multiply(NANOS_PER_DAY_BI) + .add(BigInteger.valueOf(a[1])); + } + + public IntervalOperation(IntervalOpType opType, Expression left, Expression right, TypeInfo forcedType) { + this(opType, left, right); + this.forcedType = forcedType; + } + + public IntervalOperation(IntervalOpType opType, Expression left, Expression right) { + super(left, right); + this.opType = opType; + int l = left.getType().getValueType(), r = right.getType().getValueType(); + switch (opType) { + case INTERVAL_PLUS_INTERVAL: + case INTERVAL_MINUS_INTERVAL: + type = TypeInfo.getTypeInfo(Value.getHigherOrder(l, r)); + break; + case INTERVAL_DIVIDE_INTERVAL: + type = DataType.isYearMonthIntervalType(l) ? INTERVAL_DIVIDE_INTERVAL_YEAR_TYPE + : INTERVAL_DIVIDE_INTERVAL_DAY_TYPE; + break; + case DATETIME_PLUS_INTERVAL: + case DATETIME_MINUS_INTERVAL: + case INTERVAL_MULTIPLY_NUMERIC: + case INTERVAL_DIVIDE_NUMERIC: + type = left.getType(); + break; + case DATETIME_MINUS_DATETIME: + if (forcedType != null) { + type = forcedType; + } else if ((l == Value.TIME || l == Value.TIME_TZ) && (r == Value.TIME || r == Value.TIME_TZ)) { + type = TypeInfo.TYPE_INTERVAL_HOUR_TO_SECOND; + } else if (l == Value.DATE && r == Value.DATE) { + type = TypeInfo.TYPE_INTERVAL_DAY; + } else { + type = TypeInfo.TYPE_INTERVAL_DAY_TO_SECOND; + } + } + } + + @Override + public boolean needParentheses() { + return forcedType == null; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (forcedType != null) { + getInnerSQL2(builder.append('('), sqlFlags); + getForcedTypeSQL(builder.append(") "), forcedType); + } else { + getInnerSQL2(builder, sqlFlags); + } + return builder; + } + + private void getInnerSQL2(StringBuilder builder, int sqlFlags) { + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(' ').append(getOperationToken()).append(' '); + right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + static StringBuilder getForcedTypeSQL(StringBuilder builder, TypeInfo forcedType) { + int precision = (int) forcedType.getPrecision(); + int scale = forcedType.getScale(); + return IntervalQualifier.valueOf(forcedType.getValueType() - Value.INTERVAL_YEAR).getTypeName(builder, + precision == ValueInterval.DEFAULT_PRECISION ? -1 : (int) precision, + scale == ValueInterval.DEFAULT_SCALE ? -1 : scale, true); + } + + private char getOperationToken() { + switch (opType) { + case INTERVAL_PLUS_INTERVAL: + case DATETIME_PLUS_INTERVAL: + return '+'; + case INTERVAL_MINUS_INTERVAL: + case DATETIME_MINUS_INTERVAL: + case DATETIME_MINUS_DATETIME: + return '-'; + case INTERVAL_MULTIPLY_NUMERIC: + return '*'; + case INTERVAL_DIVIDE_INTERVAL: + case INTERVAL_DIVIDE_NUMERIC: + return '/'; + default: + throw DbException.getInternalError("opType=" + opType); + } + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + Value r = right.getValue(session); + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + int lType = l.getValueType(), rType = r.getValueType(); + switch (opType) { + case INTERVAL_PLUS_INTERVAL: + case INTERVAL_MINUS_INTERVAL: { + BigInteger a1 = IntervalUtils.intervalToAbsolute((ValueInterval) l); + BigInteger a2 = IntervalUtils.intervalToAbsolute((ValueInterval) r); + return IntervalUtils.intervalFromAbsolute( + IntervalQualifier.valueOf(Value.getHigherOrder(lType, rType) - Value.INTERVAL_YEAR), + opType == IntervalOpType.INTERVAL_PLUS_INTERVAL ? a1.add(a2) : a1.subtract(a2)); + } + case INTERVAL_DIVIDE_INTERVAL: + return ValueNumeric.get(IntervalUtils.intervalToAbsolute((ValueInterval) l)) + .divide(ValueNumeric.get(IntervalUtils.intervalToAbsolute((ValueInterval) r)), type); + case DATETIME_PLUS_INTERVAL: + case DATETIME_MINUS_INTERVAL: + return getDateTimeWithInterval(session, l, r, lType, rType); + case INTERVAL_MULTIPLY_NUMERIC: + case INTERVAL_DIVIDE_NUMERIC: { + BigDecimal a1 = new BigDecimal(IntervalUtils.intervalToAbsolute((ValueInterval) l)); + BigDecimal a2 = r.getBigDecimal(); + return IntervalUtils.intervalFromAbsolute(IntervalQualifier.valueOf(lType - Value.INTERVAL_YEAR), + (opType == IntervalOpType.INTERVAL_MULTIPLY_NUMERIC ? a1.multiply(a2) : a1.divide(a2)) + .toBigInteger()); + } + case DATETIME_MINUS_DATETIME: { + Value result; + if ((lType == Value.TIME || lType == Value.TIME_TZ) && (rType == Value.TIME || rType == Value.TIME_TZ)) { + long diff; + if (lType == Value.TIME && rType == Value.TIME) { + diff = ((ValueTime) l).getNanos() - ((ValueTime) r).getNanos(); + } else { + ValueTimeTimeZone left = (ValueTimeTimeZone) l.convertTo(TypeInfo.TYPE_TIME_TZ, session), + right = (ValueTimeTimeZone) r.convertTo(TypeInfo.TYPE_TIME_TZ, session); + diff = left.getNanos() - right.getNanos() + + (right.getTimeZoneOffsetSeconds() - left.getTimeZoneOffsetSeconds()) + * DateTimeUtils.NANOS_PER_SECOND; + } + boolean negative = diff < 0; + if (negative) { + diff = -diff; + } + result = ValueInterval.from(IntervalQualifier.HOUR_TO_SECOND, negative, diff / NANOS_PER_HOUR, + diff % NANOS_PER_HOUR); + } else if (forcedType != null && DataType.isYearMonthIntervalType(forcedType.getValueType())) { + long[] dt1 = dateAndTimeFromValue(l, session), dt2 = dateAndTimeFromValue(r, session); + long dateValue1 = lType == Value.TIME || lType == Value.TIME_TZ + ? session.currentTimestamp().getDateValue() + : dt1[0]; + long dateValue2 = rType == Value.TIME || rType == Value.TIME_TZ + ? session.currentTimestamp().getDateValue() + : dt2[0]; + long leading = 12L + * (DateTimeUtils.yearFromDateValue(dateValue1) - DateTimeUtils.yearFromDateValue(dateValue2)) + + DateTimeUtils.monthFromDateValue(dateValue1) - DateTimeUtils.monthFromDateValue(dateValue2); + int d1 = DateTimeUtils.dayFromDateValue(dateValue1); + int d2 = DateTimeUtils.dayFromDateValue(dateValue2); + if (leading >= 0) { + if (d1 < d2 || d1 == d2 && dt1[1] < dt2[1]) { + leading--; + } + } else if (d1 > d2 || d1 == d2 && dt1[1] > dt2[1]) { + leading++; + } + boolean negative; + if (leading < 0) { + negative = true; + leading = -leading; + } else { + negative = false; + } + result = ValueInterval.from(IntervalQualifier.MONTH, negative, leading, 0L); + } else if (lType == Value.DATE && rType == Value.DATE) { + long diff = absoluteDayFromDateValue(((ValueDate) l).getDateValue()) + - absoluteDayFromDateValue(((ValueDate) r).getDateValue()); + boolean negative = diff < 0; + if (negative) { + diff = -diff; + } + result = ValueInterval.from(IntervalQualifier.DAY, negative, diff, 0L); + } else { + BigInteger diff = nanosFromValue(session, l).subtract(nanosFromValue(session, r)); + if (lType == Value.TIMESTAMP_TZ || rType == Value.TIMESTAMP_TZ) { + l = l.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, session); + r = r.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, session); + diff = diff.add(BigInteger.valueOf((((ValueTimestampTimeZone) r).getTimeZoneOffsetSeconds() + - ((ValueTimestampTimeZone) l).getTimeZoneOffsetSeconds()) * NANOS_PER_SECOND)); + } + result = IntervalUtils.intervalFromAbsolute(IntervalQualifier.DAY_TO_SECOND, diff); + } + if (forcedType != null) { + result = result.castTo(forcedType, session); + } + return result; + } + } + throw DbException.getInternalError("type=" + opType); + } + + private Value getDateTimeWithInterval(SessionLocal session, Value l, Value r, int lType, int rType) { + switch (lType) { + case Value.TIME: + if (DataType.isYearMonthIntervalType(rType)) { + throw DbException.getInternalError("type=" + rType); + } + return ValueTime.fromNanos(getTimeWithInterval(r, ((ValueTime) l).getNanos())); + case Value.TIME_TZ: { + if (DataType.isYearMonthIntervalType(rType)) { + throw DbException.getInternalError("type=" + rType); + } + ValueTimeTimeZone t = (ValueTimeTimeZone) l; + return ValueTimeTimeZone.fromNanos(getTimeWithInterval(r, t.getNanos()), t.getTimeZoneOffsetSeconds()); + } + case Value.DATE: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + if (DataType.isYearMonthIntervalType(rType)) { + long m = IntervalUtils.intervalToAbsolute((ValueInterval) r).longValue(); + if (opType == IntervalOpType.DATETIME_MINUS_INTERVAL) { + m = -m; + } + return DateTimeFunction.dateadd(session, DateTimeFunction.MONTH, m, l); + } else { + BigInteger a2 = IntervalUtils.intervalToAbsolute((ValueInterval) r); + if (lType == Value.DATE) { + BigInteger a1 = BigInteger.valueOf(absoluteDayFromDateValue(((ValueDate) l).getDateValue())); + a2 = a2.divide(NANOS_PER_DAY_BI); + BigInteger n = opType == IntervalOpType.DATETIME_PLUS_INTERVAL ? a1.add(a2) : a1.subtract(a2); + return ValueDate.fromDateValue(dateValueFromAbsoluteDay(n.longValue())); + } else { + long[] a = dateAndTimeFromValue(l, session); + long absoluteDay = absoluteDayFromDateValue(a[0]); + long timeNanos = a[1]; + BigInteger[] dr = a2.divideAndRemainder(NANOS_PER_DAY_BI); + if (opType == IntervalOpType.DATETIME_PLUS_INTERVAL) { + absoluteDay += dr[0].longValue(); + timeNanos += dr[1].longValue(); + } else { + absoluteDay -= dr[0].longValue(); + timeNanos -= dr[1].longValue(); + } + if (timeNanos >= NANOS_PER_DAY) { + timeNanos -= NANOS_PER_DAY; + absoluteDay++; + } else if (timeNanos < 0) { + timeNanos += NANOS_PER_DAY; + absoluteDay--; + } + return dateTimeToValue(l, dateValueFromAbsoluteDay(absoluteDay), timeNanos); + } + } + } + throw DbException.getInternalError("type=" + opType); + } + + private long getTimeWithInterval(Value r, long nanos) { + BigInteger a1 = BigInteger.valueOf(nanos); + BigInteger a2 = IntervalUtils.intervalToAbsolute((ValueInterval) r); + BigInteger n = opType == IntervalOpType.DATETIME_PLUS_INTERVAL ? a1.add(a2) : a1.subtract(a2); + if (n.signum() < 0 || n.compareTo(NANOS_PER_DAY_BI) >= 0) { + throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, n.toString()); + } + nanos = n.longValue(); + return nanos; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + if (left.isConstant() && right.isConstant()) { + return ValueExpression.get(getValue(session)); + } + return this; + } + +} diff --git a/h2/src/main/org/h2/expression/JavaAggregate.java b/h2/src/main/org/h2/expression/JavaAggregate.java deleted file mode 100644 index bcee065b51..0000000000 --- a/h2/src/main/org/h2/expression/JavaAggregate.java +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.HashMap; -import org.h2.api.Aggregate; -import org.h2.api.ErrorCode; -import org.h2.command.Parser; -import org.h2.command.dml.Select; -import org.h2.engine.Session; -import org.h2.engine.UserAggregate; -import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.StatementBuilder; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * This class wraps a user-defined aggregate. - */ -public class JavaAggregate extends Expression { - - private final UserAggregate userAggregate; - private final Select select; - private final Expression[] args; - private int[] argTypes; - private int dataType; - private Connection userConnection; - private int lastGroupRowId; - - public JavaAggregate(UserAggregate userAggregate, Expression[] args, - Select select) { - this.userAggregate = userAggregate; - this.args = args; - this.select = select; - } - - @Override - public int getCost() { - int cost = 5; - for (Expression e : args) { - cost += e.getCost(); - } - return cost; - } - - @Override - public long getPrecision() { - return Integer.MAX_VALUE; - } - - @Override - public int getDisplaySize() { - return Integer.MAX_VALUE; - } - - @Override - public int getScale() { - return DataType.getDataType(dataType).defaultScale; - } - - @Override - public String getSQL() { - StatementBuilder buff = new StatementBuilder(); - buff.append(Parser.quoteIdentifier(userAggregate.getName())).append('('); - for (Expression e : args) { - buff.appendExceptFirst(", "); - buff.append(e.getSQL()); - } - return buff.append(')').toString(); - } - - @Override - public int getType() { - return dataType; - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - switch(visitor.getType()) { - case ExpressionVisitor.DETERMINISTIC: - // TODO optimization: some functions are deterministic, but we don't - // know (no setting for that) - case ExpressionVisitor.OPTIMIZABLE_MIN_MAX_COUNT_ALL: - // user defined aggregate functions can not be optimized - return false; - case ExpressionVisitor.GET_DEPENDENCIES: - visitor.addDependency(userAggregate); - break; - default: - } - for (Expression e : args) { - if (e != null && !e.isEverything(visitor)) { - return false; - } - } - return true; - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - for (Expression arg : args) { - arg.mapColumns(resolver, level); - } - } - - @Override - public Expression optimize(Session session) { - userConnection = session.createConnection(false); - int len = args.length; - argTypes = new int[len]; - for (int i = 0; i < len; i++) { - Expression expr = args[i]; - args[i] = expr.optimize(session); - int type = expr.getType(); - argTypes[i] = type; - } - try { - Aggregate aggregate = getInstance(); - dataType = aggregate.getInternalType(argTypes); - } catch (SQLException e) { - throw DbException.convert(e); - } - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - for (Expression e : args) { - e.setEvaluatable(tableFilter, b); - } - } - - private Aggregate getInstance() throws SQLException { - Aggregate agg = userAggregate.getInstance(); - agg.init(userConnection); - return agg; - } - - @Override - public Value getValue(Session session) { - HashMap group = select.getCurrentGroup(); - if (group == null) { - throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getSQL()); - } - try { - Aggregate agg = (Aggregate) group.get(this); - if (agg == null) { - agg = getInstance(); - } - Object obj = agg.getResult(); - if (obj == null) { - return ValueNull.INSTANCE; - } - return DataType.convertToValue(session, obj, dataType); - } catch (SQLException e) { - throw DbException.convert(e); - } - } - - @Override - public void updateAggregate(Session session) { - HashMap group = select.getCurrentGroup(); - if (group == null) { - // this is a different level (the enclosing query) - return; - } - - int groupRowId = select.getCurrentGroupRowId(); - if (lastGroupRowId == groupRowId) { - // already visited - return; - } - lastGroupRowId = groupRowId; - - Aggregate agg = (Aggregate) group.get(this); - try { - if (agg == null) { - agg = getInstance(); - group.put(this, agg); - } - Object[] argValues = new Object[args.length]; - Object arg = null; - for (int i = 0, len = args.length; i < len; i++) { - Value v = args[i].getValue(session); - v = v.convertTo(argTypes[i]); - arg = v.getObject(); - argValues[i] = arg; - } - if (args.length == 1) { - agg.add(arg); - } else { - agg.add(argValues); - } - } catch (SQLException e) { - throw DbException.convert(e); - } - } - -} diff --git a/h2/src/main/org/h2/expression/JavaFunction.java b/h2/src/main/org/h2/expression/JavaFunction.java deleted file mode 100644 index 6ef3c16147..0000000000 --- a/h2/src/main/org/h2/expression/JavaFunction.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.command.Parser; -import org.h2.engine.Constants; -import org.h2.engine.FunctionAlias; -import org.h2.engine.Session; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.StatementBuilder; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; - -/** - * This class wraps a user-defined function. - */ -public class JavaFunction extends Expression implements FunctionCall { - - private final FunctionAlias functionAlias; - private final FunctionAlias.JavaMethod javaMethod; - private final Expression[] args; - - public JavaFunction(FunctionAlias functionAlias, Expression[] args) { - this.functionAlias = functionAlias; - this.javaMethod = functionAlias.findJavaMethod(args); - this.args = args; - } - - @Override - public Value getValue(Session session) { - return javaMethod.getValue(session, args, false); - } - - @Override - public int getType() { - return javaMethod.getDataType(); - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - for (Expression e : args) { - e.mapColumns(resolver, level); - } - } - - @Override - public Expression optimize(Session session) { - boolean allConst = isDeterministic(); - for (int i = 0, len = args.length; i < len; i++) { - Expression e = args[i].optimize(session); - args[i] = e; - allConst &= e.isConstant(); - } - if (allConst) { - return ValueExpression.get(getValue(session)); - } - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - for (Expression e : args) { - if (e != null) { - e.setEvaluatable(tableFilter, b); - } - } - } - - @Override - public int getScale() { - return DataType.getDataType(getType()).defaultScale; - } - - @Override - public long getPrecision() { - return Integer.MAX_VALUE; - } - - @Override - public int getDisplaySize() { - return Integer.MAX_VALUE; - } - - @Override - public String getSQL() { - StatementBuilder buff = new StatementBuilder(); - // TODO always append the schema once FUNCTIONS_IN_SCHEMA is enabled - if (functionAlias.getDatabase().getSettings().functionsInSchema || - !functionAlias.getSchema().getName().equals(Constants.SCHEMA_MAIN)) { - buff.append( - Parser.quoteIdentifier(functionAlias.getSchema().getName())) - .append('.'); - } - buff.append(Parser.quoteIdentifier(functionAlias.getName())).append('('); - for (Expression e : args) { - buff.appendExceptFirst(", "); - buff.append(e.getSQL()); - } - return buff.append(')').toString(); - } - - @Override - public void updateAggregate(Session session) { - for (Expression e : args) { - if (e != null) { - e.updateAggregate(session); - } - } - } - - @Override - public String getName() { - return functionAlias.getName(); - } - - @Override - public ValueResultSet getValueForColumnList(Session session, - Expression[] argList) { - Value v = javaMethod.getValue(session, argList, true); - return v == ValueNull.INSTANCE ? null : (ValueResultSet) v; - } - - @Override - public Expression[] getArgs() { - return args; - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - switch(visitor.getType()) { - case ExpressionVisitor.DETERMINISTIC: - if (!isDeterministic()) { - return false; - } - // only if all parameters are deterministic as well - break; - case ExpressionVisitor.GET_DEPENDENCIES: - visitor.addDependency(functionAlias); - break; - default: - } - for (Expression e : args) { - if (e != null && !e.isEverything(visitor)) { - return false; - } - } - return true; - } - - @Override - public int getCost() { - int cost = javaMethod.hasConnectionParam() ? 25 : 5; - for (Expression e : args) { - cost += e.getCost(); - } - return cost; - } - - @Override - public boolean isDeterministic() { - return functionAlias.isDeterministic(); - } - - @Override - public Expression[] getExpressionColumns(Session session) { - switch (getType()) { - case Value.RESULT_SET: - ValueResultSet rs = getValueForColumnList(session, getArgs()); - return getExpressionColumns(session, rs.getResultSet()); - case Value.ARRAY: - return getExpressionColumns(session, (ValueArray) getValue(session)); - } - return super.getExpressionColumns(session); - } - - @Override - public boolean isBufferResultSetToLocalTemp() { - return functionAlias.isBufferResultSetToLocalTemp(); - } - -} diff --git a/h2/src/main/org/h2/expression/Operation.java b/h2/src/main/org/h2/expression/Operation.java deleted file mode 100644 index 96aa5b88e6..0000000000 --- a/h2/src/main/org/h2/expression/Operation.java +++ /dev/null @@ -1,404 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import org.h2.engine.Mode; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; -import org.h2.util.MathUtils; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueInt; -import org.h2.value.ValueNull; -import org.h2.value.ValueString; - -/** - * A mathematical expression, or string concatenation. - */ -public class Operation extends Expression { - - /** - * This operation represents a string concatenation as in - * 'Hello' || 'World'. - */ - public static final int CONCAT = 0; - - /** - * This operation represents an addition as in 1 + 2. - */ - public static final int PLUS = 1; - - /** - * This operation represents a subtraction as in 2 - 1. - */ - public static final int MINUS = 2; - - /** - * This operation represents a multiplication as in 2 * 3. - */ - public static final int MULTIPLY = 3; - - /** - * This operation represents a division as in 4 * 2. - */ - public static final int DIVIDE = 4; - - /** - * This operation represents a negation as in - ID. - */ - public static final int NEGATE = 5; - - /** - * This operation represents a modulus as in 5 % 2. - */ - public static final int MODULUS = 6; - - private int opType; - private Expression left, right; - private int dataType; - private boolean convertRight = true; - - public Operation(int opType, Expression left, Expression right) { - this.opType = opType; - this.left = left; - this.right = right; - } - - @Override - public String getSQL() { - String sql; - if (opType == NEGATE) { - // don't remove the space, otherwise it might end up some thing like - // --1 which is a line remark - sql = "- " + left.getSQL(); - } else { - // don't remove the space, otherwise it might end up some thing like - // --1 which is a line remark - sql = left.getSQL() + " " + getOperationToken() + " " + right.getSQL(); - } - return "(" + sql + ")"; - } - - private String getOperationToken() { - switch (opType) { - case NEGATE: - return "-"; - case CONCAT: - return "||"; - case PLUS: - return "+"; - case MINUS: - return "-"; - case MULTIPLY: - return "*"; - case DIVIDE: - return "/"; - case MODULUS: - return "%"; - default: - throw DbException.throwInternalError("opType=" + opType); - } - } - - @Override - public Value getValue(Session session) { - Value l = left.getValue(session).convertTo(dataType); - Value r; - if (right == null) { - r = null; - } else { - r = right.getValue(session); - if (convertRight) { - r = r.convertTo(dataType); - } - } - switch (opType) { - case NEGATE: - return l == ValueNull.INSTANCE ? l : l.negate(); - case CONCAT: { - Mode mode = session.getDatabase().getMode(); - if (l == ValueNull.INSTANCE) { - if (mode.nullConcatIsNull) { - return ValueNull.INSTANCE; - } - return r; - } else if (r == ValueNull.INSTANCE) { - if (mode.nullConcatIsNull) { - return ValueNull.INSTANCE; - } - return l; - } - String s1 = l.getString(), s2 = r.getString(); - StringBuilder buff = new StringBuilder(s1.length() + s2.length()); - buff.append(s1).append(s2); - return ValueString.get(buff.toString()); - } - case PLUS: - if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { - return ValueNull.INSTANCE; - } - return l.add(r); - case MINUS: - if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { - return ValueNull.INSTANCE; - } - return l.subtract(r); - case MULTIPLY: - if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { - return ValueNull.INSTANCE; - } - return l.multiply(r); - case DIVIDE: - if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { - return ValueNull.INSTANCE; - } - return l.divide(r); - case MODULUS: - if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { - return ValueNull.INSTANCE; - } - return l.modulus(r); - default: - throw DbException.throwInternalError("type=" + opType); - } - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - left.mapColumns(resolver, level); - if (right != null) { - right.mapColumns(resolver, level); - } - } - - @Override - public Expression optimize(Session session) { - left = left.optimize(session); - switch (opType) { - case NEGATE: - dataType = left.getType(); - if (dataType == Value.UNKNOWN) { - dataType = Value.DECIMAL; - } - break; - case CONCAT: - right = right.optimize(session); - dataType = Value.STRING; - if (left.isConstant() && right.isConstant()) { - return ValueExpression.get(getValue(session)); - } - break; - case PLUS: - case MINUS: - case MULTIPLY: - case DIVIDE: - case MODULUS: - right = right.optimize(session); - int l = left.getType(); - int r = right.getType(); - if ((l == Value.NULL && r == Value.NULL) || - (l == Value.UNKNOWN && r == Value.UNKNOWN)) { - // (? + ?) - use decimal by default (the most safe data type) or - // string when text concatenation with + is enabled - if (opType == PLUS && session.getDatabase(). - getMode().allowPlusForStringConcat) { - dataType = Value.STRING; - opType = CONCAT; - } else { - dataType = Value.DECIMAL; - } - } else if (l == Value.DATE || l == Value.TIMESTAMP || - l == Value.TIME || r == Value.DATE || - r == Value.TIMESTAMP || r == Value.TIME) { - if (opType == PLUS) { - if (r != Value.getHigherOrder(l, r)) { - // order left and right: INT < TIME < DATE < TIMESTAMP - swap(); - int t = l; - l = r; - r = t; - } - if (l == Value.INT) { - // Oracle date add - Function f = Function.getFunction(session.getDatabase(), "DATEADD"); - f.setParameter(0, ValueExpression.get(ValueString.get("DAY"))); - f.setParameter(1, left); - f.setParameter(2, right); - f.doneWithParameters(); - return f.optimize(session); - } else if (l == Value.DECIMAL || l == Value.FLOAT || l == Value.DOUBLE) { - // Oracle date add - Function f = Function.getFunction(session.getDatabase(), "DATEADD"); - f.setParameter(0, ValueExpression.get(ValueString.get("SECOND"))); - left = new Operation(Operation.MULTIPLY, ValueExpression.get(ValueInt - .get(60 * 60 * 24)), left); - f.setParameter(1, left); - f.setParameter(2, right); - f.doneWithParameters(); - return f.optimize(session); - } else if (l == Value.TIME && r == Value.TIME) { - dataType = Value.TIME; - return this; - } else if (l == Value.TIME) { - dataType = Value.TIMESTAMP; - return this; - } - } else if (opType == MINUS) { - if ((l == Value.DATE || l == Value.TIMESTAMP) && r == Value.INT) { - // Oracle date subtract - Function f = Function.getFunction(session.getDatabase(), "DATEADD"); - f.setParameter(0, ValueExpression.get(ValueString.get("DAY"))); - right = new Operation(NEGATE, right, null); - right = right.optimize(session); - f.setParameter(1, right); - f.setParameter(2, left); - f.doneWithParameters(); - return f.optimize(session); - } else if ((l == Value.DATE || l == Value.TIMESTAMP) && - (r == Value.DECIMAL || r == Value.FLOAT || r == Value.DOUBLE)) { - // Oracle date subtract - Function f = Function.getFunction(session.getDatabase(), "DATEADD"); - f.setParameter(0, ValueExpression.get(ValueString.get("SECOND"))); - right = new Operation(Operation.MULTIPLY, ValueExpression.get(ValueInt - .get(60 * 60 * 24)), right); - right = new Operation(NEGATE, right, null); - right = right.optimize(session); - f.setParameter(1, right); - f.setParameter(2, left); - f.doneWithParameters(); - return f.optimize(session); - } else if (l == Value.DATE || l == Value.TIMESTAMP) { - if (r == Value.TIME) { - dataType = Value.TIMESTAMP; - return this; - } else if (r == Value.DATE || r == Value.TIMESTAMP) { - // Oracle date subtract - Function f = Function.getFunction(session.getDatabase(), "DATEDIFF"); - f.setParameter(0, ValueExpression.get(ValueString.get("DAY"))); - f.setParameter(1, right); - f.setParameter(2, left); - f.doneWithParameters(); - return f.optimize(session); - } - } else if (l == Value.TIME && r == Value.TIME) { - dataType = Value.TIME; - return this; - } - } else if (opType == MULTIPLY) { - if (l == Value.TIME) { - dataType = Value.TIME; - convertRight = false; - return this; - } else if (r == Value.TIME) { - swap(); - dataType = Value.TIME; - convertRight = false; - return this; - } - } else if (opType == DIVIDE) { - if (l == Value.TIME) { - dataType = Value.TIME; - convertRight = false; - return this; - } - } - throw DbException.getUnsupportedException( - DataType.getDataType(l).name + " " + - getOperationToken() + " " + - DataType.getDataType(r).name); - } else { - dataType = Value.getHigherOrder(l, r); - if (DataType.isStringType(dataType) && - session.getDatabase().getMode().allowPlusForStringConcat) { - opType = CONCAT; - } - } - break; - default: - DbException.throwInternalError("type=" + opType); - } - if (left.isConstant() && (right == null || right.isConstant())) { - return ValueExpression.get(getValue(session)); - } - return this; - } - - private void swap() { - Expression temp = left; - left = right; - right = temp; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - left.setEvaluatable(tableFilter, b); - if (right != null) { - right.setEvaluatable(tableFilter, b); - } - } - - @Override - public int getType() { - return dataType; - } - - @Override - public long getPrecision() { - if (right != null) { - switch (opType) { - case CONCAT: - return left.getPrecision() + right.getPrecision(); - default: - return Math.max(left.getPrecision(), right.getPrecision()); - } - } - return left.getPrecision(); - } - - @Override - public int getDisplaySize() { - if (right != null) { - switch (opType) { - case CONCAT: - return MathUtils.convertLongToInt((long) left.getDisplaySize() + - (long) right.getDisplaySize()); - default: - return Math.max(left.getDisplaySize(), right.getDisplaySize()); - } - } - return left.getDisplaySize(); - } - - @Override - public int getScale() { - if (right != null) { - return Math.max(left.getScale(), right.getScale()); - } - return left.getScale(); - } - - @Override - public void updateAggregate(Session session) { - left.updateAggregate(session); - if (right != null) { - right.updateAggregate(session); - } - } - - @Override - public boolean isEverything(ExpressionVisitor visitor) { - return left.isEverything(visitor) && - (right == null || right.isEverything(visitor)); - } - - @Override - public int getCost() { - return left.getCost() + 1 + (right == null ? 0 : right.getCost()); - } - -} diff --git a/h2/src/main/org/h2/expression/Operation0.java b/h2/src/main/org/h2/expression/Operation0.java new file mode 100644 index 0000000000..23349d23a1 --- /dev/null +++ b/h2/src/main/org/h2/expression/Operation0.java @@ -0,0 +1,40 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; + +/** + * Operation without subexpressions. + */ +public abstract class Operation0 extends Expression { + + protected Operation0() { + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + // Nothing to do + } + + @Override + public Expression optimize(SessionLocal session) { + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + // Nothing to do + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + // Nothing to do + } + +} diff --git a/h2/src/main/org/h2/expression/Operation1.java b/h2/src/main/org/h2/expression/Operation1.java new file mode 100644 index 0000000000..a4ff48cca5 --- /dev/null +++ b/h2/src/main/org/h2/expression/Operation1.java @@ -0,0 +1,75 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; + +/** + * Operation with one argument. + */ +public abstract class Operation1 extends Expression { + + /** + * The argument of the operation. + */ + protected Expression arg; + + /** + * The type of the result. + */ + protected TypeInfo type; + + protected Operation1(Expression arg) { + this.arg = arg; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + arg.mapColumns(resolver, level, state); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + arg.setEvaluatable(tableFilter, value); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + arg.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return arg.isEverything(visitor); + } + + @Override + public int getCost() { + return arg.getCost() + 1; + } + + @Override + public int getSubexpressionCount() { + return 1; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return arg; + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/Operation1_2.java b/h2/src/main/org/h2/expression/Operation1_2.java new file mode 100644 index 0000000000..78bed3190a --- /dev/null +++ b/h2/src/main/org/h2/expression/Operation1_2.java @@ -0,0 +1,97 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; + +/** + * Operation with one or two arguments. + */ +public abstract class Operation1_2 extends Expression { + + /** + * The left part of the operation (the first argument). + */ + protected Expression left; + + /** + * The right part of the operation (the second argument). + */ + protected Expression right; + + /** + * The type of the result. + */ + protected TypeInfo type; + + protected Operation1_2(Expression left, Expression right) { + this.left = left; + this.right = right; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + if (right != null) { + right.mapColumns(resolver, level, state); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + left.setEvaluatable(tableFilter, value); + if (right != null) { + right.setEvaluatable(tableFilter, value); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + if (right != null) { + right.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && (right == null || right.isEverything(visitor)); + } + + @Override + public int getCost() { + int cost = left.getCost() + 1; + if (right != null) { + cost += right.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return right != null ? 2 : 1; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return left; + } + if (index == 1 && right != null) { + return right; + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/Operation2.java b/h2/src/main/org/h2/expression/Operation2.java new file mode 100644 index 0000000000..d729157712 --- /dev/null +++ b/h2/src/main/org/h2/expression/Operation2.java @@ -0,0 +1,88 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; + +/** + * Operation with two arguments. + */ +public abstract class Operation2 extends Expression { + + /** + * The left part of the operation (the first argument). + */ + protected Expression left; + + /** + * The right part of the operation (the second argument). + */ + protected Expression right; + + /** + * The type of the result. + */ + protected TypeInfo type; + + protected Operation2(Expression left, Expression right) { + this.left = left; + this.right = right; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + right.mapColumns(resolver, level, state); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + left.setEvaluatable(tableFilter, value); + right.setEvaluatable(tableFilter, value); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + right.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && right.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost() + right.getCost() + 1; + } + + @Override + public int getSubexpressionCount() { + return 2; + } + + @Override + public Expression getSubexpression(int index) { + switch (index) { + case 0: + return left; + case 1: + return right; + default: + throw new IndexOutOfBoundsException(); + } + } + +} diff --git a/h2/src/main/org/h2/expression/OperationN.java b/h2/src/main/org/h2/expression/OperationN.java new file mode 100644 index 0000000000..ff964ea697 --- /dev/null +++ b/h2/src/main/org/h2/expression/OperationN.java @@ -0,0 +1,132 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import java.util.Arrays; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; + +/** + * Operation with many arguments. + */ +public abstract class OperationN extends Expression implements ExpressionWithVariableParameters { + + /** + * The array of arguments. + */ + protected Expression[] args; + + /** + * The number of arguments. + */ + protected int argsCount; + + /** + * The type of the result. + */ + protected TypeInfo type; + + protected OperationN(Expression[] args) { + this.args = args; + } + + @Override + public void addParameter(Expression param) { + int capacity = args.length; + if (argsCount >= capacity) { + args = Arrays.copyOf(args, capacity * 2); + } + args[argsCount++] = param; + } + + @Override + public void doneWithParameters() throws DbException { + if (args.length != argsCount) { + args = Arrays.copyOf(args, argsCount); + } + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + for (Expression e : args) { + e.mapColumns(resolver, level, state); + } + } + + /** + * Optimizes arguments. + * + * @param session + * the session + * @param allConst + * whether operation is deterministic + * @return whether operation is deterministic and all arguments are + * constants + */ + protected boolean optimizeArguments(SessionLocal session, boolean allConst) { + for (int i = 0, l = args.length; i < l; i++) { + Expression e = args[i].optimize(session); + args[i] = e; + if (allConst && !e.isConstant()) { + allConst = false; + } + } + return allConst; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + for (Expression e : args) { + e.setEvaluatable(tableFilter, value); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + for (Expression e : args) { + e.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + for (Expression e : args) { + if (!e.isEverything(visitor)) { + return false; + } + } + return true; + } + + @Override + public int getCost() { + int cost = args.length + 1; + for (Expression e : args) { + cost += e.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return args.length; + } + + @Override + public Expression getSubexpression(int index) { + return args[index]; + } + +} diff --git a/h2/src/main/org/h2/expression/Parameter.java b/h2/src/main/org/h2/expression/Parameter.java index 88075780c5..5c30d6facc 100644 --- a/h2/src/main/org/h2/expression/Parameter.java +++ b/h2/src/main/org/h2/expression/Parameter.java @@ -1,24 +1,24 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import org.h2.api.ErrorCode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.expression.condition.Comparison; import org.h2.message.DbException; import org.h2.table.Column; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueBoolean; import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; /** * A parameter of a prepared statement. */ -public class Parameter extends Expression implements ParameterInterface { +public final class Parameter extends Operation0 implements ParameterInterface { private Value value; private Column column; @@ -29,8 +29,8 @@ public Parameter(int index) { } @Override - public String getSQL() { - return "?" + (index + 1); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return builder.append('?').append(index + 1); } @Override @@ -54,24 +54,19 @@ public Value getParamValue() { } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { return getParamValue(); } @Override - public int getType() { + public TypeInfo getType() { if (value != null) { return value.getType(); } if (column != null) { return column.getType(); } - return Value.UNKNOWN; - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - // can't map + return TypeInfo.TYPE_UNKNOWN; } @Override @@ -82,83 +77,27 @@ public void checkSet() { } @Override - public Expression optimize(Session session) { + public Expression optimize(SessionLocal session) { + if (session.getDatabase().getMode().treatEmptyStringsAsNull) { + if (value instanceof ValueVarchar && value.getString().isEmpty()) { + value = ValueNull.INSTANCE; + } + } return this; } - @Override - public boolean isConstant() { - return false; - } - @Override public boolean isValueSet() { return value != null; } - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - // not bound - } - - @Override - public int getScale() { - if (value != null) { - return value.getScale(); - } - if (column != null) { - return column.getScale(); - } - return 0; - } - - @Override - public long getPrecision() { - if (value != null) { - return value.getPrecision(); - } - if (column != null) { - return column.getPrecision(); - } - return 0; - } - - @Override - public int getDisplaySize() { - if (value != null) { - return value.getDisplaySize(); - } - if (column != null) { - return column.getDisplaySize(); - } - return 0; - } - - @Override - public void updateAggregate(Session session) { - // nothing to do - } - @Override public boolean isEverything(ExpressionVisitor visitor) { - switch(visitor.getType()) { - case ExpressionVisitor.EVALUATABLE: - // the parameter _will_be_ evaluatable at execute time - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - // it is checked independently if the value is the same as the last - // time - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.OPTIMIZABLE_MIN_MAX_COUNT_ALL: - case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.GET_COLUMNS: - return true; + switch (visitor.getType()) { case ExpressionVisitor.INDEPENDENT: return value != null; default: - throw DbException.throwInternalError("type="+visitor.getType()); + return true; } } @@ -168,9 +107,8 @@ public int getCost() { } @Override - public Expression getNotIfPossible(Session session) { - return new Comparison(session, Comparison.EQUAL, this, - ValueExpression.get(ValueBoolean.get(false))); + public Expression getNotIfPossible(SessionLocal session) { + return new Comparison(Comparison.EQUAL, this, ValueExpression.FALSE, false); } public void setColumn(Column column) { diff --git a/h2/src/main/org/h2/expression/ParameterInterface.java b/h2/src/main/org/h2/expression/ParameterInterface.java index c2e404122d..2f8405213d 100644 --- a/h2/src/main/org/h2/expression/ParameterInterface.java +++ b/h2/src/main/org/h2/expression/ParameterInterface.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import org.h2.message.DbException; +import org.h2.value.TypeInfo; import org.h2.value.Value; /** @@ -43,26 +44,12 @@ public interface ParameterInterface { boolean isValueSet(); /** - * Get the expected data type of the parameter if no value is set, or the + * Returns the expected data type if no value is set, or the * data type of the value if one is set. * * @return the data type */ - int getType(); - - /** - * Get the expected precision of this parameter. - * - * @return the expected precision - */ - long getPrecision(); - - /** - * Get the expected scale of this parameter. - * - * @return the expected scale - */ - int getScale(); + TypeInfo getType(); /** * Check if this column is nullable. diff --git a/h2/src/main/org/h2/expression/ParameterRemote.java b/h2/src/main/org/h2/expression/ParameterRemote.java index d299b42e25..fe6a46b9e5 100644 --- a/h2/src/main/org/h2/expression/ParameterRemote.java +++ b/h2/src/main/org/h2/expression/ParameterRemote.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; @@ -11,7 +11,9 @@ import org.h2.api.ErrorCode; import org.h2.message.DbException; import org.h2.value.Transfer; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueLob; /** * A client side (remote) parameter. @@ -20,9 +22,7 @@ public class ParameterRemote implements ParameterInterface { private Value value; private final int index; - private int dataType = Value.UNKNOWN; - private long precision; - private int scale; + private TypeInfo type = TypeInfo.TYPE_UNKNOWN; private int nullable = ResultSetMetaData.columnNullableUnknown; public ParameterRemote(int index) { @@ -31,8 +31,8 @@ public ParameterRemote(int index) { @Override public void setValue(Value newValue, boolean closeOld) { - if (closeOld && value != null) { - value.close(); + if (closeOld && value instanceof ValueLob) { + ((ValueLob) value).remove(); } value = newValue; } @@ -55,18 +55,8 @@ public boolean isValueSet() { } @Override - public int getType() { - return value == null ? dataType : value.getType(); - } - - @Override - public long getPrecision() { - return value == null ? precision : value.getPrecision(); - } - - @Override - public int getScale() { - return value == null ? scale : value.getScale(); + public TypeInfo getType() { + return value == null ? type : value.getType(); } @Override @@ -75,14 +65,13 @@ public int getNullable() { } /** - * Write the parameter meta data from the transfer object. + * Read the parameter meta data from the transfer object. * * @param transfer the transfer object + * @throws IOException on failure */ public void readMetaData(Transfer transfer) throws IOException { - dataType = transfer.readInt(); - precision = transfer.readLong(); - scale = transfer.readInt(); + type = transfer.readTypeInfo(); nullable = transfer.readInt(); } @@ -91,13 +80,10 @@ public void readMetaData(Transfer transfer) throws IOException { * * @param transfer the transfer object * @param p the parameter + * @throws IOException on failure */ - public static void writeMetaData(Transfer transfer, ParameterInterface p) - throws IOException { - transfer.writeInt(p.getType()); - transfer.writeLong(p.getPrecision()); - transfer.writeInt(p.getScale()); - transfer.writeInt(p.getNullable()); + public static void writeMetaData(Transfer transfer, ParameterInterface p) throws IOException { + transfer.writeTypeInfo(p.getType()).writeInt(p.getNullable()); } } diff --git a/h2/src/main/org/h2/expression/Rownum.java b/h2/src/main/org/h2/expression/Rownum.java index e2527182bc..0b7db71504 100644 --- a/h2/src/main/org/h2/expression/Rownum.java +++ b/h2/src/main/org/h2/expression/Rownum.java @@ -1,97 +1,69 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import org.h2.command.Prepared; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueInt; +import org.h2.value.ValueBigint; /** * Represents the ROWNUM function. */ -public class Rownum extends Expression { +public final class Rownum extends Operation0 { private final Prepared prepared; + private boolean singleRow; + public Rownum(Prepared prepared) { + if (prepared == null) { + throw DbException.getInternalError(); + } this.prepared = prepared; } @Override - public Value getValue(Session session) { - return ValueInt.get(prepared.getCurrentRowNumber()); + public Value getValue(SessionLocal session) { + return ValueBigint.get(prepared.getCurrentRowNumber()); } @Override - public int getType() { - return Value.INT; + public TypeInfo getType() { + return TypeInfo.TYPE_BIGINT; } @Override - public void mapColumns(ColumnResolver resolver, int level) { - // nothing to do + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return builder.append("ROWNUM()"); } @Override - public Expression optimize(Session session) { - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - // nothing to do - } - - @Override - public int getScale() { - return 0; - } - - @Override - public long getPrecision() { - return ValueInt.PRECISION; - } - - @Override - public int getDisplaySize() { - return ValueInt.DISPLAY_SIZE; - } - - @Override - public String getSQL() { - return "ROWNUM()"; - } - - @Override - public void updateAggregate(Session session) { - // nothing to do + public Expression optimize(SessionLocal session) { + return singleRow ? ValueExpression.get(ValueBigint.get(1L)) : this; } @Override public boolean isEverything(ExpressionVisitor visitor) { - switch(visitor.getType()) { + switch (visitor.getType()) { case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.OPTIMIZABLE_MIN_MAX_COUNT_ALL: + case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: case ExpressionVisitor.DETERMINISTIC: case ExpressionVisitor.INDEPENDENT: - return false; case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - case ExpressionVisitor.GET_COLUMNS: - // if everything else is the same, the rownum is the same - return true; + return false; + case ExpressionVisitor.DECREMENT_QUERY_LEVEL: + if (visitor.getQueryLevel() > 0) { + singleRow = true; + } + //$FALL-THROUGH$ default: - throw DbException.throwInternalError("type="+visitor.getType()); + return true; } } diff --git a/h2/src/main/org/h2/expression/SearchedCase.java b/h2/src/main/org/h2/expression/SearchedCase.java new file mode 100644 index 0000000000..05ba3454a8 --- /dev/null +++ b/h2/src/main/org/h2/expression/SearchedCase.java @@ -0,0 +1,95 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A searched case. + */ +public final class SearchedCase extends OperationN { + + public SearchedCase() { + super(new Expression[4]); + } + + public SearchedCase(Expression[] args) { + super(args); + } + + @Override + public Value getValue(SessionLocal session) { + int len = args.length - 1; + for (int i = 0; i < len; i += 2) { + if (args[i].getBooleanValue(session)) { + return args[i + 1].getValue(session).convertTo(type, session); + } + } + if ((len & 1) == 0) { + return args[len].getValue(session).convertTo(type, session); + } + return ValueNull.INSTANCE; + } + + @Override + public Expression optimize(SessionLocal session) { + TypeInfo typeInfo = TypeInfo.TYPE_UNKNOWN; + int len = args.length - 1; + boolean allConst = true; + for (int i = 0; i < len; i += 2) { + Expression condition = args[i].optimize(session); + Expression result = args[i + 1].optimize(session); + if (allConst) { + if (condition.isConstant()) { + if (condition.getBooleanValue(session)) { + return result; + } + } else { + allConst = false; + } + } + args[i] = condition; + args[i + 1] = result; + typeInfo = SimpleCase.combineTypes(typeInfo, result); + } + if ((len & 1) == 0) { + Expression result = args[len].optimize(session); + if (allConst) { + return result; + } + args[len] = result; + typeInfo = SimpleCase.combineTypes(typeInfo, result); + } else if (allConst) { + return ValueExpression.NULL; + } + if (typeInfo.getValueType() == Value.UNKNOWN) { + typeInfo = TypeInfo.TYPE_VARCHAR; + } + type = typeInfo; + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append("CASE"); + int len = args.length - 1; + for (int i = 0; i < len; i += 2) { + builder.append(" WHEN "); + args[i].getUnenclosedSQL(builder, sqlFlags); + builder.append(" THEN "); + args[i + 1].getUnenclosedSQL(builder, sqlFlags); + } + if ((len & 1) == 0) { + builder.append(" ELSE "); + args[len].getUnenclosedSQL(builder, sqlFlags); + } + return builder.append(" END"); + } + +} diff --git a/h2/src/main/org/h2/expression/SequenceValue.java b/h2/src/main/org/h2/expression/SequenceValue.java index fc12dd78e8..96a4410d4e 100644 --- a/h2/src/main/org/h2/expression/SequenceValue.java +++ b/h2/src/main/org/h2/expression/SequenceValue.java @@ -1,92 +1,73 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.engine.Session; -import org.h2.message.DbException; +import org.h2.command.Prepared; +import org.h2.engine.SessionLocal; import org.h2.schema.Sequence; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; /** * Wraps a sequence when used in a statement. */ -public class SequenceValue extends Expression { +public final class SequenceValue extends Operation0 { private final Sequence sequence; - public SequenceValue(Sequence sequence) { - this.sequence = sequence; - } - - @Override - public Value getValue(Session session) { - long value = sequence.getNext(session); - session.setLastIdentity(ValueLong.get(value)); - return ValueLong.get(value); - } - - @Override - public int getType() { - return Value.LONG; - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - // nothing to do - } - - @Override - public Expression optimize(Session session) { - return this; - } + private final boolean current; - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - // nothing to do - } + private final Prepared prepared; - @Override - public int getScale() { - return 0; + /** + * Creates new instance of NEXT VALUE FOR expression. + * + * @param sequence + * the sequence + * @param prepared + * the owner command, or {@code null} + */ + public SequenceValue(Sequence sequence, Prepared prepared) { + this.sequence = sequence; + current = false; + this.prepared = prepared; } - @Override - public long getPrecision() { - return ValueInt.PRECISION; + /** + * Creates new instance of CURRENT VALUE FOR expression. + * + * @param sequence + * the sequence + */ + public SequenceValue(Sequence sequence) { + this.sequence = sequence; + current = true; + prepared = null; } @Override - public int getDisplaySize() { - return ValueInt.DISPLAY_SIZE; + public Value getValue(SessionLocal session) { + return current ? session.getCurrentValueFor(sequence) : session.getNextValueFor(sequence, prepared); } @Override - public String getSQL() { - return "(NEXT VALUE FOR " + sequence.getSQL() +")"; + public TypeInfo getType() { + return sequence.getDataType(); } @Override - public void updateAggregate(Session session) { - // nothing to do + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(current ? "CURRENT" : "NEXT").append(" VALUE FOR "); + return sequence.getSQL(builder, sqlFlags); } @Override public boolean isEverything(ExpressionVisitor visitor) { - switch(visitor.getType()) { - case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.OPTIMIZABLE_MIN_MAX_COUNT_ALL: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.GET_COLUMNS: - return true; + switch (visitor.getType()) { case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.READONLY: case ExpressionVisitor.INDEPENDENT: case ExpressionVisitor.QUERY_COMPARABLE: return false; @@ -96,8 +77,10 @@ public boolean isEverything(ExpressionVisitor visitor) { case ExpressionVisitor.GET_DEPENDENCIES: visitor.addDependency(sequence); return true; + case ExpressionVisitor.READONLY: + return current; default: - throw DbException.throwInternalError("type="+visitor.getType()); + return true; } } diff --git a/h2/src/main/org/h2/expression/SimpleCase.java b/h2/src/main/org/h2/expression/SimpleCase.java new file mode 100644 index 0000000000..1fc46fa57e --- /dev/null +++ b/h2/src/main/org/h2/expression/SimpleCase.java @@ -0,0 +1,273 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A simple case. + */ +public final class SimpleCase extends Expression { + + public static final class SimpleWhen { + + Expression[] operands; + + Expression result; + + SimpleWhen next; + + public SimpleWhen(Expression operand, Expression result) { + this(new Expression[] { operand }, result); + } + + public SimpleWhen(Expression[] operands, Expression result) { + this.operands = operands; + this.result = result; + } + + public void setWhen(SimpleWhen next) { + this.next = next; + } + + } + + private Expression operand; + + private SimpleWhen when; + + private Expression elseResult; + + private TypeInfo type; + + public SimpleCase(Expression operand, SimpleWhen when, Expression elseResult) { + this.operand = operand; + this.when = when; + this.elseResult = elseResult; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = operand.getValue(session); + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + if (e.getWhenValue(session, v)) { + return when.result.getValue(session).convertTo(type, session); + } + } + } + if (elseResult != null) { + return elseResult.getValue(session).convertTo(type, session); + } + return ValueNull.INSTANCE; + } + + @Override + public Expression optimize(SessionLocal session) { + TypeInfo typeInfo = TypeInfo.TYPE_UNKNOWN; + operand = operand.optimize(session); + boolean allConst = operand.isConstant(); + Value v = null; + if (allConst) { + v = operand.getValue(session); + } + TypeInfo operandType = operand.getType(); + for (SimpleWhen when = this.when; when != null; when = when.next) { + Expression[] operands = when.operands; + for (int i = 0; i < operands.length; i++) { + Expression e = operands[i].optimize(session); + if (!e.isWhenConditionOperand()) { + TypeInfo.checkComparable(operandType, e.getType()); + } + if (allConst) { + if (e.isConstant()) { + if (e.getWhenValue(session, v)) { + return when.result.optimize(session); + } + } else { + allConst = false; + } + } + operands[i] = e; + } + when.result = when.result.optimize(session); + typeInfo = combineTypes(typeInfo, when.result); + } + if (elseResult != null) { + elseResult = elseResult.optimize(session); + if (allConst) { + return elseResult; + } + typeInfo = combineTypes(typeInfo, elseResult); + } else if (allConst) { + return ValueExpression.NULL; + } + if (typeInfo.getValueType() == Value.UNKNOWN) { + typeInfo = TypeInfo.TYPE_VARCHAR; + } + type = typeInfo; + return this; + } + + static TypeInfo combineTypes(TypeInfo typeInfo, Expression e) { + if (!e.isNullConstant()) { + TypeInfo type = e.getType(); + int valueType = type.getValueType(); + if (valueType != Value.UNKNOWN && valueType != Value.NULL) { + typeInfo = TypeInfo.getHigherType(typeInfo, type); + } + } + return typeInfo; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + operand.getUnenclosedSQL(builder.append("CASE "), sqlFlags); + for (SimpleWhen when = this.when; when != null; when = when.next) { + builder.append(" WHEN"); + Expression[] operands = when.operands; + for (int i = 0, len = operands.length; i < len; i++) { + if (i > 0) { + builder.append(','); + } + operands[i].getWhenSQL(builder, sqlFlags); + } + when.result.getUnenclosedSQL(builder.append(" THEN "), sqlFlags); + } + if (elseResult != null) { + elseResult.getUnenclosedSQL(builder.append(" ELSE "), sqlFlags); + } + return builder.append(" END"); + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + operand.mapColumns(resolver, level, state); + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + e.mapColumns(resolver, level, state); + } + when.result.mapColumns(resolver, level, state); + } + if (elseResult != null) { + elseResult.mapColumns(resolver, level, state); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + operand.setEvaluatable(tableFilter, value); + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + e.setEvaluatable(tableFilter, value); + } + when.result.setEvaluatable(tableFilter, value); + } + if (elseResult != null) { + elseResult.setEvaluatable(tableFilter, value); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + operand.updateAggregate(session, stage); + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + e.updateAggregate(session, stage); + } + when.result.updateAggregate(session, stage); + } + if (elseResult != null) { + elseResult.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!operand.isEverything(visitor)) { + return false; + } + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + if (!e.isEverything(visitor)) { + return false; + } + } + if (!when.result.isEverything(visitor)) { + return false; + } + } + if (elseResult != null && !elseResult.isEverything(visitor)) { + return false; + } + return true; + } + + @Override + public int getCost() { + int cost = 1, resultCost = 0; + cost += operand.getCost(); + for (SimpleWhen when = this.when; when != null; when = when.next) { + for (Expression e : when.operands) { + cost += e.getCost(); + } + resultCost = Math.max(resultCost, when.result.getCost()); + } + if (elseResult != null) { + resultCost = Math.max(resultCost, elseResult.getCost()); + } + return cost + resultCost; + } + + @Override + public int getSubexpressionCount() { + int count = 1; + for (SimpleWhen when = this.when; when != null; when = when.next) { + count += when.operands.length + 1; + } + if (elseResult != null) { + count++; + } + return count; + } + + @Override + public Expression getSubexpression(int index) { + if (index >= 0) { + if (index == 0) { + return operand; + } + int ptr = 1; + for (SimpleWhen when = this.when; when != null; when = when.next) { + Expression[] operands = when.operands; + int count = operands.length; + int offset = index - ptr; + if (offset < count) { + return operands[offset]; + } + ptr += count; + if (index == ptr++) { + return when.result; + } + } + if (elseResult != null && index == ptr) { + return elseResult; + } + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/Subquery.java b/h2/src/main/org/h2/expression/Subquery.java index 3b49a30e50..236a538b25 100644 --- a/h2/src/main/org/h2/expression/Subquery.java +++ b/h2/src/main/org/h2/expression/Subquery.java @@ -1,124 +1,150 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; import org.h2.api.ErrorCode; -import org.h2.command.dml.Query; -import org.h2.engine.Session; +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.ResultInterface; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueArray; import org.h2.value.ValueNull; +import org.h2.value.ValueRow; /** * A query returning a single value. * Subqueries are used inside other statements. */ -public class Subquery extends Expression { +public final class Subquery extends Expression { private final Query query; + private Expression expression; + private Value nullValue; + + private HashSet outerResolvers = new HashSet<>(); + public Subquery(Query query) { this.query = query; } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { query.setSession(session); - ResultInterface result = query.query(2); - try { - int rowcount = result.getRowCount(); - if (rowcount > 1) { - throw DbException.get(ErrorCode.SCALAR_SUBQUERY_CONTAINS_MORE_THAN_ONE_ROW); - } + try (ResultInterface result = query.query(2)) { Value v; - if (rowcount <= 0) { - v = ValueNull.INSTANCE; + if (!result.next()) { + return nullValue; } else { - result.next(); - Value[] values = result.currentRow(); - if (result.getVisibleColumnCount() == 1) { - v = values[0]; - } else { - v = ValueArray.get(values); + v = readRow(result); + if (result.hasNext()) { + throw DbException.get(ErrorCode.SCALAR_SUBQUERY_CONTAINS_MORE_THAN_ONE_ROW); } } return v; - } finally { - result.close(); } } - @Override - public int getType() { - return getExpression().getType(); + /** + * Evaluates and returns all rows of the subquery. + * + * @param session + * the session + * @return values in all rows + */ + public ArrayList getAllRows(SessionLocal session) { + ArrayList list = new ArrayList<>(); + query.setSession(session); + try (ResultInterface result = query.query(Integer.MAX_VALUE)) { + while (result.next()) { + list.add(readRow(result)); + } + } + return list; } - @Override - public void mapColumns(ColumnResolver resolver, int level) { - query.mapColumns(resolver, level + 1); + private Value readRow(ResultInterface result) { + Value[] values = result.currentRow(); + int visible = result.getVisibleColumnCount(); + return visible == 1 ? values[0] + : ValueRow.get(getType(), visible == values.length ? values : Arrays.copyOf(values, visible)); } @Override - public Expression optimize(Session session) { - query.prepare(); - return this; + public TypeInfo getType() { + return expression.getType(); } @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - query.setEvaluatable(tableFilter, b); + public void mapColumns(ColumnResolver resolver, int level, int state) { + outerResolvers.add(resolver); + query.mapColumns(resolver, level + 1); } @Override - public int getScale() { - return getExpression().getScale(); + public Expression optimize(SessionLocal session) { + query.prepare(); + if (query.isConstantQuery()) { + setType(); + return ValueExpression.get(getValue(session)); + } + if (outerResolvers != null && session.getDatabase().getSettings().optimizeSimpleSingleRowSubqueries) { + Expression e = query.getIfSingleRow(); + if (e != null && e.isEverything(ExpressionVisitor.getDecrementQueryLevelVisitor(outerResolvers, 0))) { + e.isEverything(ExpressionVisitor.getDecrementQueryLevelVisitor(outerResolvers, 1)); + return e.optimize(session); + } + } + outerResolvers = null; + setType(); + return this; } - @Override - public long getPrecision() { - return getExpression().getPrecision(); + private void setType() { + ArrayList expressions = query.getExpressions(); + int columnCount = query.getColumnCount(); + if (columnCount == 1) { + expression = expressions.get(0); + nullValue = ValueNull.INSTANCE; + } else { + Expression[] list = new Expression[columnCount]; + Value[] nulls = new Value[columnCount]; + for (int i = 0; i < columnCount; i++) { + list[i] = expressions.get(i); + nulls[i] = ValueNull.INSTANCE; + } + ExpressionList expressionList = new ExpressionList(list, false); + expressionList.initializeType(); + expression = expressionList; + nullValue = ValueRow.get(new ExtTypeInfoRow(list), nulls); + } } @Override - public int getDisplaySize() { - return getExpression().getDisplaySize(); + public void setEvaluatable(TableFilter tableFilter, boolean b) { + query.setEvaluatable(tableFilter, b); } @Override - public String getSQL() { - return "(" + query.getPlanSQL() + ")"; + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return builder.append('(').append(query.getPlanSQL(sqlFlags)).append(')'); } @Override - public void updateAggregate(Session session) { - query.updateAggregate(session); - } - - private Expression getExpression() { - if (expression == null) { - ArrayList expressions = query.getExpressions(); - int columnCount = query.getColumnCount(); - if (columnCount == 1) { - expression = expressions.get(0); - } else { - Expression[] list = new Expression[columnCount]; - for (int i = 0; i < columnCount; i++) { - list[i] = expressions.get(i); - } - expression = new ExpressionList(list); - } - } - return expression; + public void updateAggregate(SessionLocal session, int stage) { + query.updateAggregate(session, stage); } @Override @@ -136,7 +162,8 @@ public int getCost() { } @Override - public Expression[] getExpressionColumns(Session session) { - return getExpression().getExpressionColumns(session); + public boolean isConstant() { + return query.isConstantQuery(); } + } diff --git a/h2/src/main/org/h2/expression/TableFunction.java b/h2/src/main/org/h2/expression/TableFunction.java deleted file mode 100644 index e9b09b86d2..0000000000 --- a/h2/src/main/org/h2/expression/TableFunction.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.expression; - -import java.util.ArrayList; - -import org.h2.api.ErrorCode; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.LocalResult; -import org.h2.result.ResultInterface; -import org.h2.table.Column; -import org.h2.tools.SimpleResultSet; -import org.h2.util.MathUtils; -import org.h2.util.StatementBuilder; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; - -/** - * Implementation of the functions TABLE(..) and TABLE_DISTINCT(..). - */ -public class TableFunction extends Function { - private final boolean distinct; - private final long rowCount; - private Column[] columnList; - - TableFunction(Database database, FunctionInfo info, long rowCount) { - super(database, info); - distinct = info.type == Function.TABLE_DISTINCT; - this.rowCount = rowCount; - } - - @Override - public Value getValue(Session session) { - return getTable(session, args, false, distinct); - } - - @Override - protected void checkParameterCount(int len) { - if (len < 1) { - throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), ">0"); - } - } - - @Override - public String getSQL() { - StatementBuilder buff = new StatementBuilder(getName()); - buff.append('('); - int i = 0; - for (Expression e : args) { - buff.appendExceptFirst(", "); - buff.append(columnList[i++].getCreateSQL()).append('=').append(e.getSQL()); - } - return buff.append(')').toString(); - } - - - @Override - public String getName() { - return distinct ? "TABLE_DISTINCT" : "TABLE"; - } - - @Override - public ValueResultSet getValueForColumnList(Session session, - Expression[] nullArgs) { - return getTable(session, args, true, false); - } - - public void setColumns(ArrayList columns) { - this.columnList = new Column[columns.size()]; - columns.toArray(columnList); - } - - private ValueResultSet getTable(Session session, Expression[] argList, - boolean onlyColumnList, boolean distinctRows) { - int len = columnList.length; - Expression[] header = new Expression[len]; - Database db = session.getDatabase(); - for (int i = 0; i < len; i++) { - Column c = columnList[i]; - ExpressionColumn col = new ExpressionColumn(db, c); - header[i] = col; - } - LocalResult result = new LocalResult(session, header, len); - if (distinctRows) { - result.setDistinct(); - } - if (!onlyColumnList) { - Value[][] list = new Value[len][]; - int rows = 0; - for (int i = 0; i < len; i++) { - Value v = argList[i].getValue(session); - if (v == ValueNull.INSTANCE) { - list[i] = new Value[0]; - } else { - ValueArray array = (ValueArray) v.convertTo(Value.ARRAY); - Value[] l = array.getList(); - list[i] = l; - rows = Math.max(rows, l.length); - } - } - for (int row = 0; row < rows; row++) { - Value[] r = new Value[len]; - for (int j = 0; j < len; j++) { - Value[] l = list[j]; - Value v; - if (l.length <= row) { - v = ValueNull.INSTANCE; - } else { - Column c = columnList[j]; - v = l[row]; - v = c.convert(v); - v = v.convertPrecision(c.getPrecision(), false); - v = v.convertScale(true, c.getScale()); - } - r[j] = v; - } - result.addRow(r); - } - } - result.done(); - ValueResultSet vr = ValueResultSet.get(getSimpleResultSet(result, - Integer.MAX_VALUE)); - return vr; - } - - private static SimpleResultSet getSimpleResultSet(ResultInterface rs, - int maxrows) { - int columnCount = rs.getVisibleColumnCount(); - SimpleResultSet simple = new SimpleResultSet(); - simple.setAutoClose(false); - for (int i = 0; i < columnCount; i++) { - String name = rs.getColumnName(i); - int sqlType = DataType.convertTypeToSQLType(rs.getColumnType(i)); - int precision = MathUtils.convertLongToInt(rs.getColumnPrecision(i)); - int scale = rs.getColumnScale(i); - simple.addColumn(name, sqlType, precision, scale); - } - rs.reset(); - for (int i = 0; i < maxrows && rs.next(); i++) { - Object[] list = new Object[columnCount]; - for (int j = 0; j < columnCount; j++) { - list[j] = rs.currentRow()[j].getObject(); - } - simple.addRow(list); - } - return simple; - } - - public long getRowCount() { - return rowCount; - } - - @Override - public Expression[] getExpressionColumns(Session session) { - return getExpressionColumns(session, - getTable(session, getArgs(), true, false).getResultSet()); - } - -} diff --git a/h2/src/main/org/h2/expression/TimeZoneOperation.java b/h2/src/main/org/h2/expression/TimeZoneOperation.java new file mode 100644 index 0000000000..3c7de63b63 --- /dev/null +++ b/h2/src/main/org/h2/expression/TimeZoneOperation.java @@ -0,0 +1,146 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.util.TimeZoneProvider; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInterval; +import org.h2.value.ValueNull; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + * A time zone specification (AT { TIME ZONE | LOCAL }). + */ +public final class TimeZoneOperation extends Operation1_2 { + + public TimeZoneOperation(Expression left, Expression right) { + super(left, right); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(" AT "); + if (right != null) { + right.getSQL(builder.append("TIME ZONE "), sqlFlags, AUTO_PARENTHESES); + } else { + builder.append("LOCAL"); + } + return builder; + } + + @Override + public Value getValue(SessionLocal session) { + Value a = left.getValue(session).convertTo(type, session); + int valueType = a.getValueType(); + if ((valueType == Value.TIMESTAMP_TZ || valueType == Value.TIME_TZ) && right != null) { + Value b = right.getValue(session); + if (b != ValueNull.INSTANCE) { + if (valueType == Value.TIMESTAMP_TZ) { + ValueTimestampTimeZone v = (ValueTimestampTimeZone) a; + long dateValue = v.getDateValue(); + long timeNanos = v.getTimeNanos(); + int offsetSeconds = v.getTimeZoneOffsetSeconds(); + int newOffset = parseTimeZone(b, dateValue, timeNanos, offsetSeconds, true); + if (offsetSeconds != newOffset) { + a = DateTimeUtils.timestampTimeZoneAtOffset(dateValue, timeNanos, offsetSeconds, newOffset); + } + } else { + ValueTimeTimeZone v = (ValueTimeTimeZone) a; + long timeNanos = v.getNanos(); + int offsetSeconds = v.getTimeZoneOffsetSeconds(); + int newOffset = parseTimeZone(b, DateTimeUtils.EPOCH_DATE_VALUE, timeNanos, offsetSeconds, false); + if (offsetSeconds != newOffset) { + timeNanos += (newOffset - offsetSeconds) * DateTimeUtils.NANOS_PER_SECOND; + a = ValueTimeTimeZone.fromNanos(DateTimeUtils.normalizeNanosOfDay(timeNanos), newOffset); + } + } + } else { + a = ValueNull.INSTANCE; + } + } + return a; + } + + private static int parseTimeZone(Value b, long dateValue, long timeNanos, int offsetSeconds, + boolean allowTimeZoneName) { + if (DataType.isCharacterStringType(b.getValueType())) { + TimeZoneProvider timeZone; + try { + timeZone = TimeZoneProvider.ofId(b.getString()); + } catch (RuntimeException ex) { + throw DbException.getInvalidValueException("time zone", b.getTraceSQL()); + } + if (!allowTimeZoneName && !timeZone.hasFixedOffset()) { + throw DbException.getInvalidValueException("time zone", b.getTraceSQL()); + } + return timeZone.getTimeZoneOffsetUTC(DateTimeUtils.getEpochSeconds(dateValue, timeNanos, offsetSeconds)); + } + return parseInterval(b); + } + + /** + * Parses a daytime interval as time zone offset. + * + * @param interval the interval + * @return the time zone offset in seconds + */ + public static int parseInterval(Value interval) { + ValueInterval i = (ValueInterval) interval.convertTo(TypeInfo.TYPE_INTERVAL_HOUR_TO_SECOND); + long h = i.getLeading(), seconds = i.getRemaining(); + if (h > 18 || h == 18 && seconds != 0 || seconds % DateTimeUtils.NANOS_PER_SECOND != 0) { + throw DbException.getInvalidValueException("time zone", i.getTraceSQL()); + } + int newOffset = (int) (h * 3_600 + seconds / DateTimeUtils.NANOS_PER_SECOND); + if (i.isNegative()) { + newOffset = -newOffset; + } + return newOffset; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + TypeInfo type = left.getType(); + int valueType = Value.TIMESTAMP_TZ, scale = ValueTimestamp.MAXIMUM_SCALE; + switch (type.getValueType()) { + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + scale = type.getScale(); + break; + case Value.TIME: + case Value.TIME_TZ: + valueType = Value.TIME_TZ; + scale = type.getScale(); + break; + default: + StringBuilder builder = left.getSQL(new StringBuilder(), TRACE_SQL_FLAGS, AUTO_PARENTHESES); + int offset = builder.length(); + builder.append(" AT "); + if (right != null) { + right.getSQL(builder.append("TIME ZONE "), TRACE_SQL_FLAGS, AUTO_PARENTHESES); + } else { + builder.append("LOCAL"); + } + throw DbException.getSyntaxError(builder.toString(), offset, "time, timestamp"); + } + this.type = TypeInfo.getTypeInfo(valueType, -1, scale, null); + if (left.isConstant() && (right == null || right.isConstant())) { + return ValueExpression.get(getValue(session)); + } + return this; + } + +} diff --git a/h2/src/main/org/h2/expression/TypedValueExpression.java b/h2/src/main/org/h2/expression/TypedValueExpression.java new file mode 100644 index 0000000000..dd16296665 --- /dev/null +++ b/h2/src/main/org/h2/expression/TypedValueExpression.java @@ -0,0 +1,103 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import java.util.Objects; + +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * An expression representing a constant value with a type cast. + */ +public class TypedValueExpression extends ValueExpression { + + /** + * The expression represents the SQL UNKNOWN value. + */ + public static final TypedValueExpression UNKNOWN = new TypedValueExpression(ValueNull.INSTANCE, + TypeInfo.TYPE_BOOLEAN); + + /** + * Create a new expression with the given value and type. + * + * @param value + * the value + * @param type + * the value type + * @return the expression + */ + public static ValueExpression get(Value value, TypeInfo type) { + return getImpl(value, type, true); + } + + /** + * Create a new typed value expression with the given value and type if + * value is {@code NULL}, or a plain value expression otherwise. + * + * @param value + * the value + * @param type + * the value type + * @return the expression + */ + public static ValueExpression getTypedIfNull(Value value, TypeInfo type) { + return getImpl(value, type, false); + } + + private static ValueExpression getImpl(Value value, TypeInfo type, boolean preserveStrictType) { + if (value == ValueNull.INSTANCE) { + switch (type.getValueType()) { + case Value.NULL: + return ValueExpression.NULL; + case Value.BOOLEAN: + return UNKNOWN; + } + return new TypedValueExpression(value, type); + } + if (preserveStrictType) { + DataType dt = DataType.getDataType(type.getValueType()); + TypeInfo vt = value.getType(); + if (dt.supportsPrecision && type.getPrecision() != vt.getPrecision() + || dt.supportsScale && type.getScale() != vt.getScale() + || !Objects.equals(type.getExtTypeInfo(), vt.getExtTypeInfo())) { + return new TypedValueExpression(value, type); + } + } + return ValueExpression.get(value); + } + + private final TypeInfo type; + + private TypedValueExpression(Value value, TypeInfo type) { + super(value); + this.type = type; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (this == UNKNOWN) { + builder.append("UNKNOWN"); + } else { + value.getSQL(builder.append("CAST("), sqlFlags | NO_CASTS).append(" AS "); + type.getSQL(builder, sqlFlags).append(')'); + } + return builder; + } + + @Override + public boolean isNullConstant() { + return value == ValueNull.INSTANCE; + } + +} diff --git a/h2/src/main/org/h2/expression/UnaryOperation.java b/h2/src/main/org/h2/expression/UnaryOperation.java new file mode 100644 index 0000000000..6860d7ebdc --- /dev/null +++ b/h2/src/main/org/h2/expression/UnaryOperation.java @@ -0,0 +1,55 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression; + +import org.h2.engine.SessionLocal; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Unary operation. Only negation operation is currently supported. + */ +public class UnaryOperation extends Operation1 { + + public UnaryOperation(Expression arg) { + super(arg); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + // don't remove the space, otherwise it might end up some thing like + // --1 which is a line remark + return arg.getSQL(builder.append("- "), sqlFlags, AUTO_PARENTHESES); + } + + @Override + public Value getValue(SessionLocal session) { + Value a = arg.getValue(session).convertTo(type, session); + return a == ValueNull.INSTANCE ? a : a.negate(); + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = arg.getType(); + if (type.getValueType() == Value.UNKNOWN) { + type = TypeInfo.TYPE_NUMERIC_FLOATING_POINT; + } else if (type.getValueType() == Value.ENUM) { + type = TypeInfo.TYPE_INTEGER; + } + if (arg.isConstant()) { + return ValueExpression.get(getValue(session)); + } + return this; + } + +} diff --git a/h2/src/main/org/h2/expression/ValueExpression.java b/h2/src/main/org/h2/expression/ValueExpression.java index b8ed4f7084..d0515e76aa 100644 --- a/h2/src/main/org/h2/expression/ValueExpression.java +++ b/h2/src/main/org/h2/expression/ValueExpression.java @@ -1,58 +1,53 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.expression.condition.Comparison; import org.h2.index.IndexCondition; -import org.h2.message.DbException; -import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueArray; import org.h2.value.ValueBoolean; import org.h2.value.ValueNull; /** * An expression representing a constant value. */ -public class ValueExpression extends Expression { +public class ValueExpression extends Operation0 { + /** * The expression represents ValueNull.INSTANCE. */ - private static final Object NULL = new ValueExpression(ValueNull.INSTANCE); + public static final ValueExpression NULL = new ValueExpression(ValueNull.INSTANCE); /** * This special expression represents the default value. It is used for * UPDATE statements of the form SET COLUMN = DEFAULT. The value is * ValueNull.INSTANCE, but should never be accessed. */ - private static final Object DEFAULT = new ValueExpression(ValueNull.INSTANCE); - - private final Value value; + public static final ValueExpression DEFAULT = new ValueExpression(ValueNull.INSTANCE); - private ValueExpression(Value value) { - this.value = value; - } + /** + * The expression represents ValueBoolean.TRUE. + */ + public static final ValueExpression TRUE = new ValueExpression(ValueBoolean.TRUE); /** - * Get the NULL expression. - * - * @return the NULL expression + * The expression represents ValueBoolean.FALSE. */ - public static ValueExpression getNull() { - return (ValueExpression) NULL; - } + public static final ValueExpression FALSE = new ValueExpression(ValueBoolean.FALSE); /** - * Get the DEFAULT expression. - * - * @return the DEFAULT expression + * The value. */ - public static ValueExpression getDefault() { - return (ValueExpression) DEFAULT; + final Value value; + + ValueExpression(Value value) { + this.value = value; } /** @@ -63,45 +58,60 @@ public static ValueExpression getDefault() { */ public static ValueExpression get(Value value) { if (value == ValueNull.INSTANCE) { - return getNull(); + return NULL; + } + if (value.getValueType() == Value.BOOLEAN) { + return getBoolean(value.getBoolean()); } return new ValueExpression(value); } - @Override - public Value getValue(Session session) { - return value; + /** + * Create a new expression with the given boolean value. + * + * @param value the boolean value + * @return the expression + */ + public static ValueExpression getBoolean(Value value) { + if (value == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + return getBoolean(value.getBoolean()); } - @Override - public int getType() { - return value.getType(); + /** + * Create a new expression with the given boolean value. + * + * @param value the boolean value + * @return the expression + */ + public static ValueExpression getBoolean(boolean value) { + return value ? TRUE : FALSE; } @Override - public void createIndexConditions(Session session, TableFilter filter) { - if (value.getType() == Value.BOOLEAN) { - boolean v = ((ValueBoolean) value).getBoolean().booleanValue(); - if (!v) { - filter.addIndexCondition(IndexCondition.get(Comparison.FALSE, null, this)); - } - } + public Value getValue(SessionLocal session) { + return value; } @Override - public Expression getNotIfPossible(Session session) { - return new Comparison(session, Comparison.EQUAL, this, - ValueExpression.get(ValueBoolean.get(false))); + public TypeInfo getType() { + return value.getType(); } @Override - public void mapColumns(ColumnResolver resolver, int level) { - // nothing to do + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (value.getValueType() == Value.BOOLEAN && !value.getBoolean()) { + filter.addIndexCondition(IndexCondition.get(Comparison.FALSE, null, this)); + } } @Override - public Expression optimize(Session session) { - return this; + public Expression getNotIfPossible(SessionLocal session) { + if (value == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + return getBoolean(!value.getBoolean()); } @Override @@ -110,60 +120,28 @@ public boolean isConstant() { } @Override - public boolean isValueSet() { - return true; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - // nothing to do - } - - @Override - public int getScale() { - return value.getScale(); + public boolean isNullConstant() { + return this == NULL; } @Override - public long getPrecision() { - return value.getPrecision(); - } - - @Override - public int getDisplaySize() { - return value.getDisplaySize(); + public boolean isValueSet() { + return true; } @Override - public String getSQL() { + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { if (this == DEFAULT) { - return "DEFAULT"; + builder.append("DEFAULT"); + } else { + value.getSQL(builder, sqlFlags); } - return value.getSQL(); - } - - @Override - public void updateAggregate(Session session) { - // nothing to do + return builder; } @Override public boolean isEverything(ExpressionVisitor visitor) { - switch (visitor.getType()) { - case ExpressionVisitor.OPTIMIZABLE_MIN_MAX_COUNT_ALL: - case ExpressionVisitor.DETERMINISTIC: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.INDEPENDENT: - case ExpressionVisitor.EVALUATABLE: - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.GET_COLUMNS: - return true; - default: - throw DbException.throwInternalError("type=" + visitor.getType()); - } + return true; } @Override @@ -171,11 +149,4 @@ public int getCost() { return 0; } - @Override - public Expression[] getExpressionColumns(Session session) { - if (getType() == Value.ARRAY) { - return getExpressionColumns(session, (ValueArray) getValue(session)); - } - return super.getExpressionColumns(session); - } } diff --git a/h2/src/main/org/h2/expression/Variable.java b/h2/src/main/org/h2/expression/Variable.java index c6c6f190cd..b1d8da2823 100644 --- a/h2/src/main/org/h2/expression/Variable.java +++ b/h2/src/main/org/h2/expression/Variable.java @@ -1,26 +1,24 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; -import org.h2.command.Parser; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.table.ColumnResolver; -import org.h2.table.TableFilter; +import org.h2.engine.SessionLocal; +import org.h2.util.ParserUtil; +import org.h2.value.TypeInfo; import org.h2.value.Value; /** * A user-defined variable, for example: @ID. */ -public class Variable extends Expression { +public final class Variable extends Operation0 { private final String name; private Value lastValue; - public Variable(Session session, String name) { + public Variable(SessionLocal session, String name) { this.name = name; lastValue = session.getVariable(name); } @@ -31,79 +29,31 @@ public int getCost() { } @Override - public int getDisplaySize() { - return lastValue.getDisplaySize(); + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return ParserUtil.quoteIdentifier(builder.append('@'), name, sqlFlags); } @Override - public long getPrecision() { - return lastValue.getPrecision(); - } - - @Override - public String getSQL() { - return "@" + Parser.quoteIdentifier(name); - } - - @Override - public int getScale() { - return lastValue.getScale(); - } - - @Override - public int getType() { + public TypeInfo getType() { return lastValue.getType(); } @Override - public Value getValue(Session session) { + public Value getValue(SessionLocal session) { lastValue = session.getVariable(name); return lastValue; } @Override public boolean isEverything(ExpressionVisitor visitor) { - switch(visitor.getType()) { - case ExpressionVisitor.EVALUATABLE: - // the value will be evaluated at execute time - case ExpressionVisitor.SET_MAX_DATA_MODIFICATION_ID: - // it is checked independently if the value is the same as the last - // time - case ExpressionVisitor.OPTIMIZABLE_MIN_MAX_COUNT_ALL: - case ExpressionVisitor.READONLY: - case ExpressionVisitor.INDEPENDENT: - case ExpressionVisitor.NOT_FROM_RESOLVER: - case ExpressionVisitor.QUERY_COMPARABLE: - case ExpressionVisitor.GET_DEPENDENCIES: - case ExpressionVisitor.GET_COLUMNS: - return true; + switch (visitor.getType()) { case ExpressionVisitor.DETERMINISTIC: return false; default: - throw DbException.throwInternalError("type="+visitor.getType()); + return true; } } - @Override - public void mapColumns(ColumnResolver resolver, int level) { - // nothing to do - } - - @Override - public Expression optimize(Session session) { - return this; - } - - @Override - public void setEvaluatable(TableFilter tableFilter, boolean value) { - // nothing to do - } - - @Override - public void updateAggregate(Session session) { - // nothing to do - } - public String getName() { return name; } diff --git a/h2/src/main/org/h2/expression/Wildcard.java b/h2/src/main/org/h2/expression/Wildcard.java index 9571f07abe..17d8cc9997 100644 --- a/h2/src/main/org/h2/expression/Wildcard.java +++ b/h2/src/main/org/h2/expression/Wildcard.java @@ -1,16 +1,21 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.expression; +import java.util.ArrayList; +import java.util.HashMap; + import org.h2.api.ErrorCode; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; +import org.h2.table.Column; import org.h2.table.ColumnResolver; import org.h2.table.TableFilter; import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; import org.h2.value.Value; /** @@ -18,58 +23,73 @@ * This object is only used temporarily during the parsing phase, and later * replaced by column expressions. */ -public class Wildcard extends Expression { +public final class Wildcard extends Expression { + private final String schema; private final String table; + private ArrayList exceptColumns; + public Wildcard(String schema, String table) { this.schema = schema; this.table = table; } - @Override - public boolean isWildcard() { - return true; + public ArrayList getExceptColumns() { + return exceptColumns; } - @Override - public Value getValue(Session session) { - throw DbException.throwInternalError(); + public void setExceptColumns(ArrayList exceptColumns) { + this.exceptColumns = exceptColumns; } - @Override - public int getType() { - throw DbException.throwInternalError(); - } - - @Override - public void mapColumns(ColumnResolver resolver, int level) { - throw DbException.get(ErrorCode.SYNTAX_ERROR_1, table); + /** + * Returns map of excluded table columns to expression columns and validates + * that all columns are resolved and not duplicated. + * + * @return map of excluded table columns to expression columns + */ + public HashMap mapExceptColumns() { + HashMap exceptTableColumns = new HashMap<>(); + for (ExpressionColumn ec : exceptColumns) { + Column column = ec.getColumn(); + if (column == null) { + throw ec.getColumnException(ErrorCode.COLUMN_NOT_FOUND_1); + } + if (exceptTableColumns.putIfAbsent(column, ec) != null) { + throw ec.getColumnException(ErrorCode.DUPLICATE_COLUMN_NAME_1); + } + } + return exceptTableColumns; } @Override - public Expression optimize(Session session) { - throw DbException.get(ErrorCode.SYNTAX_ERROR_1, table); + public Value getValue(SessionLocal session) { + throw DbException.getInternalError(toString()); } @Override - public void setEvaluatable(TableFilter tableFilter, boolean b) { - DbException.throwInternalError(); + public TypeInfo getType() { + throw DbException.getInternalError(toString()); } @Override - public int getScale() { - throw DbException.throwInternalError(); + public void mapColumns(ColumnResolver resolver, int level, int state) { + if (exceptColumns != null) { + for (ExpressionColumn column : exceptColumns) { + column.mapColumns(resolver, level, state); + } + } } @Override - public long getPrecision() { - throw DbException.throwInternalError(); + public Expression optimize(SessionLocal session) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_1, table); } @Override - public int getDisplaySize() { - throw DbException.throwInternalError(); + public void setEvaluatable(TableFilter tableFilter, boolean b) { + throw DbException.getInternalError(toString()); } @Override @@ -83,16 +103,20 @@ public String getSchemaName() { } @Override - public String getSQL() { - if (table == null) { - return "*"; + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (table != null) { + StringUtils.quoteIdentifier(builder, table).append('.'); + } + builder.append('*'); + if (exceptColumns != null) { + writeExpressions(builder.append(" EXCEPT ("), exceptColumns, sqlFlags).append(')'); } - return StringUtils.quoteIdentifier(table) + ".*"; + return builder; } @Override - public void updateAggregate(Session session) { - DbException.throwInternalError(); + public void updateAggregate(SessionLocal session, int stage) { + throw DbException.getInternalError(toString()); } @Override @@ -100,12 +124,12 @@ public boolean isEverything(ExpressionVisitor visitor) { if (visitor.getType() == ExpressionVisitor.QUERY_COMPARABLE) { return true; } - throw DbException.throwInternalError(); + throw DbException.getInternalError(Integer.toString(visitor.getType())); } @Override public int getCost() { - throw DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/expression/aggregate/AbstractAggregate.java b/h2/src/main/org/h2/expression/aggregate/AbstractAggregate.java new file mode 100644 index 0000000000..09dbf84f8c --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AbstractAggregate.java @@ -0,0 +1,324 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; + +import org.h2.command.query.Select; +import org.h2.command.query.SelectGroups; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.analysis.DataAnalysisOperation; +import org.h2.expression.analysis.WindowFrame; +import org.h2.expression.analysis.WindowFrameBound; +import org.h2.expression.analysis.WindowFrameBoundType; +import org.h2.expression.analysis.WindowFrameExclusion; +import org.h2.expression.analysis.WindowFrameUnits; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * A base class for aggregate functions. + */ +public abstract class AbstractAggregate extends DataAnalysisOperation { + + /** + * is this a DISTINCT aggregate + */ + protected final boolean distinct; + + /** + * The arguments. + */ + protected final Expression[] args; + + /** + * FILTER condition for aggregate + */ + protected Expression filterCondition; + + /** + * The type of the result. + */ + protected TypeInfo type; + + AbstractAggregate(Select select, Expression[] args, boolean distinct) { + super(select); + this.args = args; + this.distinct = distinct; + } + + @Override + public final boolean isAggregate() { + return true; + } + + /** + * Sets the FILTER condition. + * + * @param filterCondition + * FILTER condition + */ + public void setFilterCondition(Expression filterCondition) { + this.filterCondition = filterCondition; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumnsAnalysis(ColumnResolver resolver, int level, int innerState) { + for (Expression arg : args) { + arg.mapColumns(resolver, level, innerState); + } + if (filterCondition != null) { + filterCondition.mapColumns(resolver, level, innerState); + } + super.mapColumnsAnalysis(resolver, level, innerState); + } + + @Override + public Expression optimize(SessionLocal session) { + for (int i = 0; i < args.length; i++) { + args[i] = args[i].optimize(session); + } + if (filterCondition != null) { + filterCondition = filterCondition.optimizeCondition(session); + } + return super.optimize(session); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + for (Expression arg : args) { + arg.setEvaluatable(tableFilter, b); + } + if (filterCondition != null) { + filterCondition.setEvaluatable(tableFilter, b); + } + super.setEvaluatable(tableFilter, b); + } + + @Override + protected void getOrderedResultLoop(SessionLocal session, HashMap result, + ArrayList ordered, int rowIdColumn) { + WindowFrame frame = over.getWindowFrame(); + /* + * With RANGE (default) or GROUPS units and EXCLUDE GROUP or EXCLUDE NO + * OTHERS (default) exclusion all rows in the group have the same value + * of window aggregate function. + */ + boolean grouped = frame == null + || frame.getUnits() != WindowFrameUnits.ROWS && frame.getExclusion().isGroupOrNoOthers(); + if (frame == null) { + aggregateFastPartition(session, result, ordered, rowIdColumn, grouped); + return; + } + boolean variableBounds = frame.isVariableBounds(); + if (variableBounds) { + variableBounds = checkVariableBounds(frame, ordered); + } + if (variableBounds) { + grouped = false; + } else if (frame.getExclusion() == WindowFrameExclusion.EXCLUDE_NO_OTHERS) { + WindowFrameBound following = frame.getFollowing(); + boolean unboundedFollowing = following != null + && following.getType() == WindowFrameBoundType.UNBOUNDED_FOLLOWING; + if (frame.getStarting().getType() == WindowFrameBoundType.UNBOUNDED_PRECEDING) { + if (unboundedFollowing) { + aggregateWholePartition(session, result, ordered, rowIdColumn); + } else { + aggregateFastPartition(session, result, ordered, rowIdColumn, grouped); + } + return; + } + if (unboundedFollowing) { + aggregateFastPartitionInReverse(session, result, ordered, rowIdColumn, grouped); + return; + } + } + // All other types of frames (slow) + int size = ordered.size(); + for (int i = 0; i < size;) { + Object aggregateData = createAggregateData(); + for (Iterator iter = WindowFrame.iterator(over, session, ordered, getOverOrderBySort(), i, + false); iter.hasNext();) { + updateFromExpressions(session, aggregateData, iter.next()); + } + Value r = getAggregatedValue(session, aggregateData); + i = processGroup(result, r, ordered, rowIdColumn, i, size, grouped); + } + } + + private static boolean checkVariableBounds(WindowFrame frame, ArrayList ordered) { + int size = ordered.size(); + WindowFrameBound bound = frame.getStarting(); + if (bound.isVariable()) { + int offset = bound.getExpressionIndex(); + Value v = ordered.get(0)[offset]; + for (int i = 1; i < size; i++) { + if (!v.equals(ordered.get(i)[offset])) { + return true; + } + } + } + bound = frame.getFollowing(); + if (bound != null && bound.isVariable()) { + int offset = bound.getExpressionIndex(); + Value v = ordered.get(0)[offset]; + for (int i = 1; i < size; i++) { + if (!v.equals(ordered.get(i)[offset])) { + return true; + } + } + } + return false; + } + + private void aggregateFastPartition(SessionLocal session, HashMap result, + ArrayList ordered, int rowIdColumn, boolean grouped) { + Object aggregateData = createAggregateData(); + int size = ordered.size(); + int lastIncludedRow = -1; + Value r = null; + for (int i = 0; i < size;) { + int newLast = WindowFrame.getEndIndex(over, session, ordered, getOverOrderBySort(), i); + assert newLast >= lastIncludedRow; + if (newLast > lastIncludedRow) { + for (int j = lastIncludedRow + 1; j <= newLast; j++) { + updateFromExpressions(session, aggregateData, ordered.get(j)); + } + lastIncludedRow = newLast; + r = getAggregatedValue(session, aggregateData); + } else if (r == null) { + r = getAggregatedValue(session, aggregateData); + } + i = processGroup(result, r, ordered, rowIdColumn, i, size, grouped); + } + } + + private void aggregateFastPartitionInReverse(SessionLocal session, HashMap result, + ArrayList ordered, int rowIdColumn, boolean grouped) { + Object aggregateData = createAggregateData(); + int firstIncludedRow = ordered.size(); + Value r = null; + for (int i = firstIncludedRow - 1; i >= 0;) { + int newLast = over.getWindowFrame().getStartIndex(session, ordered, getOverOrderBySort(), i); + assert newLast <= firstIncludedRow; + if (newLast < firstIncludedRow) { + for (int j = firstIncludedRow - 1; j >= newLast; j--) { + updateFromExpressions(session, aggregateData, ordered.get(j)); + } + firstIncludedRow = newLast; + r = getAggregatedValue(session, aggregateData); + } else if (r == null) { + r = getAggregatedValue(session, aggregateData); + } + Value[] lastRowInGroup = ordered.get(i), currentRowInGroup = lastRowInGroup; + do { + result.put(currentRowInGroup[rowIdColumn].getInt(), r); + } while (--i >= 0 && grouped + && overOrderBySort.compare(lastRowInGroup, currentRowInGroup = ordered.get(i)) == 0); + } + } + + private int processGroup(HashMap result, Value r, ArrayList ordered, + int rowIdColumn, int i, int size, boolean grouped) { + Value[] firstRowInGroup = ordered.get(i), currentRowInGroup = firstRowInGroup; + do { + result.put(currentRowInGroup[rowIdColumn].getInt(), r); + } while (++i < size && grouped + && overOrderBySort.compare(firstRowInGroup, currentRowInGroup = ordered.get(i)) == 0); + return i; + } + + private void aggregateWholePartition(SessionLocal session, HashMap result, + ArrayList ordered, int rowIdColumn) { + // Aggregate values from the whole partition + Object aggregateData = createAggregateData(); + for (Value[] row : ordered) { + updateFromExpressions(session, aggregateData, row); + } + // All rows have the same value + Value value = getAggregatedValue(session, aggregateData); + for (Value[] row : ordered) { + result.put(row[rowIdColumn].getInt(), value); + } + } + + /** + * Updates the provided aggregate data from the remembered expressions. + * + * @param session + * the session + * @param aggregateData + * aggregate data + * @param array + * values of expressions + */ + protected abstract void updateFromExpressions(SessionLocal session, Object aggregateData, Value[] array); + + @Override + protected void updateAggregate(SessionLocal session, SelectGroups groupData, int groupRowId) { + if (filterCondition == null || filterCondition.getBooleanValue(session)) { + if (over != null) { + if (over.isOrdered()) { + updateOrderedAggregate(session, groupData, groupRowId, over.getOrderBy()); + } else { + updateAggregate(session, getWindowData(session, groupData, false)); + } + } else { + updateAggregate(session, getGroupData(groupData, false)); + } + } else if (over != null && over.isOrdered()) { + updateOrderedAggregate(session, groupData, groupRowId, over.getOrderBy()); + } + } + + /** + * Updates an aggregate value. + * + * @param session + * the session + * @param aggregateData + * aggregate data + */ + protected abstract void updateAggregate(SessionLocal session, Object aggregateData); + + @Override + protected void updateGroupAggregates(SessionLocal session, int stage) { + if (filterCondition != null) { + filterCondition.updateAggregate(session, stage); + } + super.updateGroupAggregates(session, stage); + } + + @Override + protected StringBuilder appendTailConditions(StringBuilder builder, int sqlFlags, boolean forceOrderBy) { + if (filterCondition != null) { + builder.append(" FILTER (WHERE "); + filterCondition.getUnenclosedSQL(builder, sqlFlags).append(')'); + } + return super.appendTailConditions(builder, sqlFlags, forceOrderBy); + } + + @Override + public int getSubexpressionCount() { + return args.length; + } + + @Override + public Expression getSubexpression(int index) { + return args[index]; + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/Aggregate.java b/h2/src/main/org/h2/expression/aggregate/Aggregate.java new file mode 100644 index 0000000000..ac8082c354 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/Aggregate.java @@ -0,0 +1,1345 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import java.io.ByteArrayOutputStream; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.h2.api.ErrorCode; +import org.h2.command.query.QueryOrderBy; +import org.h2.command.query.Select; +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.ExpressionWithFlags; +import org.h2.expression.ValueExpression; +import org.h2.expression.aggregate.AggregateDataCollecting.NullCollectionMode; +import org.h2.expression.analysis.Window; +import org.h2.expression.function.BitFunction; +import org.h2.expression.function.JsonConstructorFunction; +import org.h2.index.Cursor; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.mvstore.db.MVSpatialIndex; +import org.h2.result.SearchRow; +import org.h2.result.SortOrder; +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.table.Table; +import org.h2.table.TableFilter; +import org.h2.util.StringUtils; +import org.h2.util.json.JsonConstructorUtils; +import org.h2.value.CompareMode; +import org.h2.value.DataType; +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; +import org.h2.value.ValueInterval; +import org.h2.value.ValueJson; +import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueRow; +import org.h2.value.ValueVarchar; + +/** + * Implements the integrated aggregate functions, such as COUNT, MAX, SUM. + */ +public class Aggregate extends AbstractAggregate implements ExpressionWithFlags { + + /** + * The additional result precision in decimal digits for a SUM aggregate function. + */ + private static final int ADDITIONAL_SUM_PRECISION = 10; + + /** + * The additional precision and scale in decimal digits for an AVG aggregate function. + */ + private static final int ADDITIONAL_AVG_SCALE = 10; + + private static final HashMap AGGREGATES = new HashMap<>(128); + + private final AggregateType aggregateType; + + private ArrayList orderByList; + private SortOrder orderBySort; + + private Object extraArguments; + + private int flags; + + /** + * Create a new aggregate object. + * + * @param aggregateType + * the aggregate type + * @param args + * the aggregated expressions + * @param select + * the select statement + * @param distinct + * if distinct is used + */ + public Aggregate(AggregateType aggregateType, Expression[] args, Select select, boolean distinct) { + super(select, args, distinct); + if (distinct && aggregateType == AggregateType.COUNT_ALL) { + throw DbException.getInternalError(); + } + this.aggregateType = aggregateType; + } + + static { + /* + * Update initial size of AGGREGATES after editing the following list. + */ + addAggregate("COUNT", AggregateType.COUNT); + addAggregate("SUM", AggregateType.SUM); + addAggregate("MIN", AggregateType.MIN); + addAggregate("MAX", AggregateType.MAX); + addAggregate("AVG", AggregateType.AVG); + addAggregate("LISTAGG", AggregateType.LISTAGG); + // MySQL compatibility: group_concat(expression, delimiter) + addAggregate("GROUP_CONCAT", AggregateType.LISTAGG); + // PostgreSQL compatibility: string_agg(expression, delimiter) + addAggregate("STRING_AGG", AggregateType.LISTAGG); + addAggregate("STDDEV_SAMP", AggregateType.STDDEV_SAMP); + addAggregate("STDDEV", AggregateType.STDDEV_SAMP); + addAggregate("STDDEV_POP", AggregateType.STDDEV_POP); + addAggregate("STDDEVP", AggregateType.STDDEV_POP); + addAggregate("VAR_POP", AggregateType.VAR_POP); + addAggregate("VARP", AggregateType.VAR_POP); + addAggregate("VAR_SAMP", AggregateType.VAR_SAMP); + addAggregate("VAR", AggregateType.VAR_SAMP); + addAggregate("VARIANCE", AggregateType.VAR_SAMP); + addAggregate("ANY", AggregateType.ANY); + addAggregate("SOME", AggregateType.ANY); + // PostgreSQL compatibility + addAggregate("BOOL_OR", AggregateType.ANY); + addAggregate("EVERY", AggregateType.EVERY); + // PostgreSQL compatibility + addAggregate("BOOL_AND", AggregateType.EVERY); + addAggregate("HISTOGRAM", AggregateType.HISTOGRAM); + addAggregate("BIT_AND_AGG", AggregateType.BIT_AND_AGG); + addAggregate("BIT_AND", AggregateType.BIT_AND_AGG); + addAggregate("BIT_OR_AGG", AggregateType.BIT_OR_AGG); + addAggregate("BIT_OR", AggregateType.BIT_OR_AGG); + addAggregate("BIT_XOR_AGG", AggregateType.BIT_XOR_AGG); + addAggregate("BIT_NAND_AGG", AggregateType.BIT_NAND_AGG); + addAggregate("BIT_NOR_AGG", AggregateType.BIT_NOR_AGG); + addAggregate("BIT_XNOR_AGG", AggregateType.BIT_XNOR_AGG); + + addAggregate("COVAR_POP", AggregateType.COVAR_POP); + addAggregate("COVAR_SAMP", AggregateType.COVAR_SAMP); + addAggregate("CORR", AggregateType.CORR); + addAggregate("REGR_SLOPE", AggregateType.REGR_SLOPE); + addAggregate("REGR_INTERCEPT", AggregateType.REGR_INTERCEPT); + addAggregate("REGR_COUNT", AggregateType.REGR_COUNT); + addAggregate("REGR_R2", AggregateType.REGR_R2); + addAggregate("REGR_AVGX", AggregateType.REGR_AVGX); + addAggregate("REGR_AVGY", AggregateType.REGR_AVGY); + addAggregate("REGR_SXX", AggregateType.REGR_SXX); + addAggregate("REGR_SYY", AggregateType.REGR_SYY); + addAggregate("REGR_SXY", AggregateType.REGR_SXY); + + addAggregate("RANK", AggregateType.RANK); + addAggregate("DENSE_RANK", AggregateType.DENSE_RANK); + addAggregate("PERCENT_RANK", AggregateType.PERCENT_RANK); + addAggregate("CUME_DIST", AggregateType.CUME_DIST); + + addAggregate("PERCENTILE_CONT", AggregateType.PERCENTILE_CONT); + addAggregate("PERCENTILE_DISC", AggregateType.PERCENTILE_DISC); + addAggregate("MEDIAN", AggregateType.MEDIAN); + + addAggregate("ARRAY_AGG", AggregateType.ARRAY_AGG); + addAggregate("MODE", AggregateType.MODE); + // Oracle compatibility + addAggregate("STATS_MODE", AggregateType.MODE); + addAggregate("ENVELOPE", AggregateType.ENVELOPE); + + addAggregate("JSON_OBJECTAGG", AggregateType.JSON_OBJECTAGG); + addAggregate("JSON_ARRAYAGG", AggregateType.JSON_ARRAYAGG); + } + + private static void addAggregate(String name, AggregateType type) { + AGGREGATES.put(name, type); + } + + /** + * Get the aggregate type for this name, or -1 if no aggregate has been + * found. + * + * @param name + * the aggregate function name + * @return null if no aggregate function has been found, or the aggregate + * type + */ + public static AggregateType getAggregateType(String name) { + return AGGREGATES.get(name); + } + + /** + * Set the order for ARRAY_AGG() or GROUP_CONCAT() aggregate. + * + * @param orderByList + * the order by list + */ + public void setOrderByList(ArrayList orderByList) { + this.orderByList = orderByList; + } + + /** + * Returns the type of this aggregate. + * + * @return the type of this aggregate + */ + public AggregateType getAggregateType() { + return aggregateType; + } + + /** + * Sets the additional arguments. + * + * @param extraArguments the additional arguments + */ + public void setExtraArguments(Object extraArguments) { + this.extraArguments = extraArguments; + } + + /** + * Returns the additional arguments. + * + * @return the additional arguments + */ + public Object getExtraArguments() { + return extraArguments; + } + + @Override + public void setFlags(int flags) { + this.flags = flags; + } + + @Override + public int getFlags() { + return flags; + } + + private void sortWithOrderBy(Value[] array) { + final SortOrder sortOrder = orderBySort; + Arrays.sort(array, + sortOrder != null + ? (v1, v2) -> sortOrder.compare(((ValueRow) v1).getList(), ((ValueRow) v2).getList()) + : select.getSession().getDatabase().getCompareMode()); + } + + @Override + protected void updateAggregate(SessionLocal session, Object aggregateData) { + AggregateData data = (AggregateData) aggregateData; + Value v = args.length == 0 ? null : args[0].getValue(session); + updateData(session, data, v, null); + } + + private void updateData(SessionLocal session, AggregateData data, Value v, Value[] remembered) { + switch (aggregateType) { + case COVAR_POP: + case COVAR_SAMP: + case CORR: + case REGR_SLOPE: + case REGR_INTERCEPT: + case REGR_R2: + case REGR_SXY: { + Value x; + if (v == ValueNull.INSTANCE || (x = getSecondValue(session, remembered)) == ValueNull.INSTANCE) { + return; + } + ((AggregateDataBinarySet) data).add(session, v, x); + return; + } + case REGR_COUNT: + case REGR_AVGY: + case REGR_SYY: + if (v == ValueNull.INSTANCE || getSecondValue(session, remembered) == ValueNull.INSTANCE) { + return; + } + break; + case REGR_AVGX: + case REGR_SXX: + if (v == ValueNull.INSTANCE || (v = getSecondValue(session, remembered)) == ValueNull.INSTANCE) { + return; + } + break; + case LISTAGG: + if (v == ValueNull.INSTANCE) { + return; + } + v = updateCollecting(session, v.convertTo(TypeInfo.TYPE_VARCHAR), remembered); + break; + case ARRAY_AGG: + v = updateCollecting(session, v, remembered); + break; + case RANK: + case DENSE_RANK: + case PERCENT_RANK: + case CUME_DIST: { + int count = args.length; + Value[] a = new Value[count]; + for (int i = 0; i < count; i++) { + a[i] = remembered != null ? remembered[i] : args[i].getValue(session); + } + ((AggregateDataCollecting) data).setSharedArgument(ValueRow.get(a)); + a = new Value[count]; + for (int i = 0; i < count; i++) { + a[i] = remembered != null ? remembered[count + i] : orderByList.get(i).expression.getValue(session); + } + v = ValueRow.get(a); + break; + } + case PERCENTILE_CONT: + case PERCENTILE_DISC: + ((AggregateDataCollecting) data).setSharedArgument(v); + v = remembered != null ? remembered[1] : orderByList.get(0).expression.getValue(session); + break; + case MODE: + v = remembered != null ? remembered[0] : orderByList.get(0).expression.getValue(session); + break; + case JSON_ARRAYAGG: + v = updateCollecting(session, v, remembered); + break; + case JSON_OBJECTAGG: { + Value key = v; + Value value = getSecondValue(session, remembered); + if (key == ValueNull.INSTANCE) { + throw DbException.getInvalidValueException("JSON_OBJECTAGG key", "NULL"); + } + v = ValueRow.get(new Value[] { key, value }); + break; + } + default: + // Use argument as is + } + data.add(session, v); + } + + private Value getSecondValue(SessionLocal session, Value[] remembered) { + return remembered != null ? remembered[1] : args[1].getValue(session); + } + + @Override + protected void updateGroupAggregates(SessionLocal session, int stage) { + super.updateGroupAggregates(session, stage); + for (Expression arg : args) { + arg.updateAggregate(session, stage); + } + if (orderByList != null) { + for (QueryOrderBy orderBy : orderByList) { + orderBy.expression.updateAggregate(session, stage); + } + } + } + + private Value updateCollecting(SessionLocal session, Value v, Value[] remembered) { + if (orderByList != null) { + int size = orderByList.size(); + Value[] row = new Value[1 + size]; + row[0] = v; + if (remembered == null) { + for (int i = 0; i < size; i++) { + QueryOrderBy o = orderByList.get(i); + row[i + 1] = o.expression.getValue(session); + } + } else { + System.arraycopy(remembered, 1, row, 1, size); + } + v = ValueRow.get(row); + } + return v; + } + + @Override + protected int getNumExpressions() { + int n = args.length; + if (orderByList != null) { + n += orderByList.size(); + } + if (filterCondition != null) { + n++; + } + return n; + } + + @Override + protected void rememberExpressions(SessionLocal session, Value[] array) { + int offset = 0; + for (Expression arg : args) { + array[offset++] = arg.getValue(session); + } + if (orderByList != null) { + for (QueryOrderBy o : orderByList) { + array[offset++] = o.expression.getValue(session); + } + } + if (filterCondition != null) { + array[offset] = ValueBoolean.get(filterCondition.getBooleanValue(session)); + } + } + + @Override + protected void updateFromExpressions(SessionLocal session, Object aggregateData, Value[] array) { + if (filterCondition == null || array[getNumExpressions() - 1].isTrue()) { + AggregateData data = (AggregateData) aggregateData; + Value v = args.length == 0 ? null : array[0]; + updateData(session, data, v, array); + } + } + + @Override + protected Object createAggregateData() { + switch (aggregateType) { + case COUNT_ALL: + case REGR_COUNT: + return new AggregateDataCount(true); + case COUNT: + if (!distinct) { + return new AggregateDataCount(false); + } + break; + case RANK: + case DENSE_RANK: + case PERCENT_RANK: + case CUME_DIST: + case PERCENTILE_CONT: + case PERCENTILE_DISC: + case MEDIAN: + break; + case SUM: + case BIT_XOR_AGG: + case BIT_XNOR_AGG: + if (distinct) { + break; + } + //$FALL-THROUGH$ + case MIN: + case MAX: + case BIT_AND_AGG: + case BIT_OR_AGG: + case BIT_NAND_AGG: + case BIT_NOR_AGG: + case ANY: + case EVERY: + return new AggregateDataDefault(aggregateType, type); + case AVG: + if (distinct) { + break; + } + //$FALL-THROUGH$ + case REGR_AVGX: + case REGR_AVGY: + return new AggregateDataAvg(type); + case STDDEV_POP: + case STDDEV_SAMP: + case VAR_POP: + case VAR_SAMP: + if (distinct) { + break; + } + //$FALL-THROUGH$ + case REGR_SXX: + case REGR_SYY: + return new AggregateDataStdVar(aggregateType); + case HISTOGRAM: + return new AggregateDataDistinctWithCounts(false, Constants.SELECTIVITY_DISTINCT_COUNT); + case COVAR_POP: + case COVAR_SAMP: + case REGR_SXY: + return new AggregateDataCovar(aggregateType); + case CORR: + case REGR_SLOPE: + case REGR_INTERCEPT: + case REGR_R2: + return new AggregateDataCorr(aggregateType); + case LISTAGG: // NULL values are excluded by Aggregate + case ARRAY_AGG: + return new AggregateDataCollecting(distinct, orderByList != null, NullCollectionMode.USED_OR_IMPOSSIBLE); + case MODE: + return new AggregateDataDistinctWithCounts(true, Integer.MAX_VALUE); + case ENVELOPE: + return new AggregateDataEnvelope(); + case JSON_ARRAYAGG: + return new AggregateDataCollecting(distinct, orderByList != null, + (flags & JsonConstructorUtils.JSON_ABSENT_ON_NULL) != 0 ? NullCollectionMode.EXCLUDED + : NullCollectionMode.USED_OR_IMPOSSIBLE); + case JSON_OBJECTAGG: + // ROW(key, value) are collected, so NULL values can't be passed + return new AggregateDataCollecting(distinct, false, NullCollectionMode.USED_OR_IMPOSSIBLE); + default: + throw DbException.getInternalError("type=" + aggregateType); + } + return new AggregateDataCollecting(distinct, false, NullCollectionMode.IGNORED); + } + + @Override + public Value getValue(SessionLocal session) { + return select.isQuickAggregateQuery() ? getValueQuick(session) : super.getValue(session); + } + + private Value getValueQuick(SessionLocal session) { + switch (aggregateType) { + case COUNT: + case COUNT_ALL: + Table table = select.getTopTableFilter().getTable(); + return ValueBigint.get(table.getRowCount(session)); + case MIN: + case MAX: { + boolean first = aggregateType == AggregateType.MIN; + Index index = getMinMaxColumnIndex(); + int sortType = index.getIndexColumns()[0].sortType; + if ((sortType & SortOrder.DESCENDING) != 0) { + first = !first; + } + Cursor cursor = index.findFirstOrLast(session, first); + SearchRow row = cursor.getSearchRow(); + Value v; + if (row == null) { + v = ValueNull.INSTANCE; + } else { + v = row.getValue(index.getColumns()[0].getColumnId()); + } + return v; + } + case PERCENTILE_CONT: + case PERCENTILE_DISC: { + Value v = args[0].getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + BigDecimal arg = v.getBigDecimal(); + if (arg.signum() >= 0 && arg.compareTo(BigDecimal.ONE) <= 0) { + return Percentile.getFromIndex(session, orderByList.get(0).expression, type.getValueType(), + orderByList, arg, aggregateType == AggregateType.PERCENTILE_CONT); + } else { + throw DbException.getInvalidValueException(aggregateType == AggregateType.PERCENTILE_CONT ? + "PERCENTILE_CONT argument" : "PERCENTILE_DISC argument", arg); + } + } + case MEDIAN: + return Percentile.getFromIndex(session, args[0], type.getValueType(), orderByList, Percentile.HALF, true); + case ENVELOPE: + return ((MVSpatialIndex) AggregateDataEnvelope.getGeometryColumnIndex(args[0])).getBounds(session); + default: + throw DbException.getInternalError("type=" + aggregateType); + } + } + + @Override + public Value getAggregatedValue(SessionLocal session, Object aggregateData) { + AggregateData data = (AggregateData) aggregateData; + if (data == null) { + data = (AggregateData) createAggregateData(); + } + switch (aggregateType) { + case COUNT: + if (distinct) { + return ValueBigint.get(((AggregateDataCollecting) data).getCount()); + } + break; + case SUM: + case BIT_XOR_AGG: + case BIT_XNOR_AGG: + if (distinct) { + AggregateDataCollecting c = ((AggregateDataCollecting) data); + if (c.getCount() == 0) { + return ValueNull.INSTANCE; + } + return collect(session, c, new AggregateDataDefault(aggregateType, type)); + } + break; + case AVG: + if (distinct) { + AggregateDataCollecting c = ((AggregateDataCollecting) data); + if (c.getCount() == 0) { + return ValueNull.INSTANCE; + } + return collect(session, c, new AggregateDataAvg(type)); + } + break; + case STDDEV_POP: + case STDDEV_SAMP: + case VAR_POP: + case VAR_SAMP: + if (distinct) { + AggregateDataCollecting c = ((AggregateDataCollecting) data); + if (c.getCount() == 0) { + return ValueNull.INSTANCE; + } + return collect(session, c, new AggregateDataStdVar(aggregateType)); + } + break; + case HISTOGRAM: + return getHistogram(session, data); + case LISTAGG: + return getListagg(session, data); + case ARRAY_AGG: { + Value[] array = ((AggregateDataCollecting) data).getArray(); + if (array == null) { + return ValueNull.INSTANCE; + } + if (orderByList != null || distinct) { + sortWithOrderBy(array); + } + if (orderByList != null) { + for (int i = 0; i < array.length; i++) { + array[i] = ((ValueRow) array[i]).getList()[0]; + } + } + return ValueArray.get((TypeInfo) type.getExtTypeInfo(), array, session); + } + case RANK: + case DENSE_RANK: + case PERCENT_RANK: + case CUME_DIST: + return getHypotheticalSet(session, data); + case PERCENTILE_CONT: + case PERCENTILE_DISC: { + AggregateDataCollecting collectingData = (AggregateDataCollecting) data; + Value[] array = collectingData.getArray(); + if (array == null) { + return ValueNull.INSTANCE; + } + Value v = collectingData.getSharedArgument(); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + BigDecimal arg = v.getBigDecimal(); + if (arg.signum() >= 0 && arg.compareTo(BigDecimal.ONE) <= 0) { + return Percentile.getValue(session, array, type.getValueType(), orderByList, arg, + aggregateType == AggregateType.PERCENTILE_CONT); + } else { + throw DbException.getInvalidValueException(aggregateType == AggregateType.PERCENTILE_CONT ? + "PERCENTILE_CONT argument" : "PERCENTILE_DISC argument", arg); + } + } + case MEDIAN: { + Value[] array = ((AggregateDataCollecting) data).getArray(); + if (array == null) { + return ValueNull.INSTANCE; + } + return Percentile.getValue(session, array, type.getValueType(), orderByList, Percentile.HALF, true); + } + case MODE: + return getMode(session, data); + case JSON_ARRAYAGG: { + Value[] array = ((AggregateDataCollecting) data).getArray(); + if (array == null) { + return ValueNull.INSTANCE; + } + if (orderByList != null) { + sortWithOrderBy(array); + } + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('['); + for (Value v : array) { + if (orderByList != null) { + v = ((ValueRow) v).getList()[0]; + } + JsonConstructorUtils.jsonArrayAppend(baos, v != ValueNull.INSTANCE ? v : ValueJson.NULL, flags); + } + baos.write(']'); + return ValueJson.getInternal(baos.toByteArray()); + } + case JSON_OBJECTAGG: { + Value[] array = ((AggregateDataCollecting) data).getArray(); + if (array == null) { + return ValueNull.INSTANCE; + } + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('{'); + for (Value v : array) { + Value[] row = ((ValueRow) v).getList(); + String key = row[0].getString(); + if (key == null) { + throw DbException.getInvalidValueException("JSON_OBJECTAGG key", "NULL"); + } + Value value = row[1]; + if (value == ValueNull.INSTANCE) { + if ((flags & JsonConstructorUtils.JSON_ABSENT_ON_NULL) != 0) { + continue; + } + value = ValueJson.NULL; + } + JsonConstructorUtils.jsonObjectAppend(baos, key, value); + } + return JsonConstructorUtils.jsonObjectFinish(baos, flags); + } + default: + // Avoid compiler warning + } + return data.getValue(session); + } + + private static Value collect(SessionLocal session, AggregateDataCollecting c, AggregateData d) { + for (Value v : c) { + d.add(session, v); + } + return d.getValue(session); + } + + private Value getHypotheticalSet(SessionLocal session, AggregateData data) { + AggregateDataCollecting collectingData = (AggregateDataCollecting) data; + Value arg = collectingData.getSharedArgument(); + if (arg == null) { + switch (aggregateType) { + case RANK: + case DENSE_RANK: + return ValueInteger.get(1); + case PERCENT_RANK: + return ValueDouble.ZERO; + case CUME_DIST: + return ValueDouble.ONE; + default: + throw DbException.getUnsupportedException("aggregateType=" + aggregateType); + } + } + collectingData.add(session, arg); + Value[] array = collectingData.getArray(); + Comparator sort = orderBySort.getRowValueComparator(); + Arrays.sort(array, sort); + return aggregateType == AggregateType.CUME_DIST ? getCumeDist(array, arg, sort) : getRank(array, arg, sort); + } + + private Value getRank(Value[] ordered, Value arg, Comparator sort) { + int size = ordered.length; + int number = 0; + for (int i = 0; i < size; i++) { + Value row = ordered[i]; + if (i == 0) { + number = 1; + } else if (sort.compare(ordered[i - 1], row) != 0) { + if (aggregateType == AggregateType.DENSE_RANK) { + number++; + } else { + number = i + 1; + } + } + Value v; + if (aggregateType == AggregateType.PERCENT_RANK) { + int nm = number - 1; + v = nm == 0 ? ValueDouble.ZERO : ValueDouble.get((double) nm / (size - 1)); + } else { + v = ValueBigint.get(number); + } + if (sort.compare(row, arg) == 0) { + return v; + } + } + throw DbException.getInternalError(); + } + + private static Value getCumeDist(Value[] ordered, Value arg, Comparator sort) { + int size = ordered.length; + for (int start = 0; start < size;) { + Value array = ordered[start]; + int end = start + 1; + while (end < size && sort.compare(array, ordered[end]) == 0) { + end++; + } + ValueDouble v = ValueDouble.get((double) end / size); + for (int i = start; i < end; i++) { + if (sort.compare(ordered[i], arg) == 0) { + return v; + } + } + start = end; + } + throw DbException.getInternalError(); + } + + private Value getListagg(SessionLocal session, AggregateData data) { + AggregateDataCollecting collectingData = (AggregateDataCollecting) data; + Value[] array = collectingData.getArray(); + if (array == null) { + return ValueNull.INSTANCE; + } + if (array.length == 1) { + Value v = array[0]; + if (orderByList != null) { + v = ((ValueRow) v).getList()[0]; + } + return v.convertTo(Value.VARCHAR, session); + } + if (orderByList != null || distinct) { + sortWithOrderBy(array); + } + ListaggArguments arguments = (ListaggArguments) extraArguments; + String separator = arguments.getEffectiveSeparator(); + return ValueVarchar + .get((arguments.getOnOverflowTruncate() + ? getListaggTruncate(array, separator, arguments.getEffectiveFilter(), + arguments.isWithoutCount()) + : getListaggError(array, separator)).toString(), session); + } + + private StringBuilder getListaggError(Value[] array, String separator) { + StringBuilder builder = new StringBuilder(getListaggItem(array[0])); + for (int i = 1, count = array.length; i < count; i++) { + builder.append(separator).append(getListaggItem(array[i])); + if (builder.length() > Constants.MAX_STRING_LENGTH) { + throw DbException.getValueTooLongException("CHARACTER VARYING", builder.substring(0, 81), -1L); + } + } + return builder; + } + + private StringBuilder getListaggTruncate(Value[] array, String separator, String filter, + boolean withoutCount) { + int count = array.length; + String[] strings = new String[count]; + String s = getListaggItem(array[0]); + strings[0] = s; + StringBuilder builder = new StringBuilder(s); + loop: for (int i = 1; i < count; i++) { + builder.append(separator).append(strings[i] = s = getListaggItem(array[i])); + int length = builder.length(); + if (length > Constants.MAX_STRING_LENGTH) { + for (; i > 0; i--) { + length -= strings[i].length(); + builder.setLength(length); + builder.append(filter); + if (!withoutCount) { + builder.append('(').append(count - i).append(')'); + } + if (builder.length() <= Constants.MAX_STRING_LENGTH) { + break loop; + } + length -= separator.length(); + } + builder.setLength(0); + builder.append(filter).append('(').append(count).append(')'); + break; + } + } + return builder; + } + + private String getListaggItem(Value v) { + if (orderByList != null) { + v = ((ValueRow) v).getList()[0]; + } + return v.getString(); + } + + private Value getHistogram(SessionLocal session, AggregateData data) { + TreeMap distinctValues = ((AggregateDataDistinctWithCounts) data).getValues(); + TypeInfo rowType = (TypeInfo) type.getExtTypeInfo(); + if (distinctValues == null) { + return ValueArray.get(rowType, Value.EMPTY_VALUES, session); + } + ValueRow[] values = new ValueRow[distinctValues.size()]; + int i = 0; + for (Entry entry : distinctValues.entrySet()) { + LongDataCounter d = entry.getValue(); + values[i] = ValueRow.get(rowType, new Value[] { entry.getKey(), ValueBigint.get(d.count) }); + i++; + } + Database db = session.getDatabase(); + CompareMode compareMode = db.getCompareMode(); + Arrays.sort(values, (v1, v2) -> v1.getList()[0].compareTo(v2.getList()[0], session, compareMode)); + return ValueArray.get(rowType, values, session); + } + + private Value getMode(SessionLocal session, AggregateData data) { + Value v = ValueNull.INSTANCE; + TreeMap distinctValues = ((AggregateDataDistinctWithCounts) data).getValues(); + if (distinctValues == null) { + return v; + } + long count = 0L; + if (orderByList != null) { + boolean desc = (orderByList.get(0).sortType & SortOrder.DESCENDING) != 0; + for (Entry entry : distinctValues.entrySet()) { + long c = entry.getValue().count; + if (c > count) { + v = entry.getKey(); + count = c; + } else if (c == count) { + Value v2 = entry.getKey(); + int cmp = session.compareTypeSafe(v, v2); + if (desc) { + if (cmp >= 0) { + continue; + } + } else if (cmp <= 0) { + continue; + } + v = v2; + } + } + } else { + for (Entry entry : distinctValues.entrySet()) { + long c = entry.getValue().count; + if (c > count) { + v = entry.getKey(); + count = c; + } + } + } + return v; + } + + @Override + public void mapColumnsAnalysis(ColumnResolver resolver, int level, int innerState) { + if (orderByList != null) { + for (QueryOrderBy o : orderByList) { + o.expression.mapColumns(resolver, level, innerState); + } + } + super.mapColumnsAnalysis(resolver, level, innerState); + } + + @Override + public Expression optimize(SessionLocal session) { + super.optimize(session); + if (args.length == 1) { + type = args[0].getType(); + } + if (orderByList != null) { + int offset; + switch (aggregateType) { + case ARRAY_AGG: + case LISTAGG: + case JSON_ARRAYAGG: + offset = 1; + break; + default: + offset = 0; + } + for (Iterator i = orderByList.iterator(); i.hasNext();) { + QueryOrderBy o = i.next(); + Expression e = o.expression.optimize(session); + if (offset != 0 && e.isConstant()) { + i.remove(); + } else { + o.expression = e; + } + } + if (orderByList.isEmpty()) { + orderByList = null; + } else { + orderBySort = createOrder(session, orderByList, offset); + } + } + switch (aggregateType) { + case LISTAGG: + type = TypeInfo.TYPE_VARCHAR; + break; + case COUNT: + if (args[0].isConstant()) { + if (args[0].getValue(session) == ValueNull.INSTANCE) { + return ValueExpression.get(ValueBigint.get(0L)); + } + if (!distinct) { + Aggregate aggregate = new Aggregate(AggregateType.COUNT_ALL, new Expression[0], select, false); + aggregate.setFilterCondition(filterCondition); + aggregate.setOverCondition(over); + return aggregate.optimize(session); + } + } + //$FALL-THROUGH$ + case COUNT_ALL: + case REGR_COUNT: + type = TypeInfo.TYPE_BIGINT; + break; + case HISTOGRAM: { + LinkedHashMap fields = new LinkedHashMap<>(4); + fields.put("VALUE", type); + fields.put("COUNT", TypeInfo.TYPE_BIGINT); + type = TypeInfo.getTypeInfo(Value.ARRAY, -1, 0, + TypeInfo.getTypeInfo(Value.ROW, -1, -1, new ExtTypeInfoRow(fields))); + break; + } + case SUM: + if ((type = getSumType(type)) == null) { + throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getTraceSQL()); + } + break; + case AVG: + if ((type = getAvgType(type)) == null) { + throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getTraceSQL()); + } + break; + case MIN: + case MAX: + break; + case STDDEV_POP: + case STDDEV_SAMP: + case VAR_POP: + case VAR_SAMP: + case COVAR_POP: + case COVAR_SAMP: + case CORR: + case REGR_SLOPE: + case REGR_INTERCEPT: + case REGR_R2: + case REGR_SXX: + case REGR_SYY: + case REGR_SXY: + type = TypeInfo.TYPE_DOUBLE; + break; + case REGR_AVGX: + if ((type = getAvgType(args[1].getType())) == null) { + throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getTraceSQL()); + } + break; + case REGR_AVGY: + if ((type = getAvgType(args[0].getType())) == null) { + throw DbException.get(ErrorCode.SUM_OR_AVG_ON_WRONG_DATATYPE_1, getTraceSQL()); + } + break; + case RANK: + case DENSE_RANK: + type = TypeInfo.TYPE_BIGINT; + break; + case PERCENT_RANK: + case CUME_DIST: + type = TypeInfo.TYPE_DOUBLE; + break; + case PERCENTILE_CONT: + type = orderByList.get(0).expression.getType(); + //$FALL-THROUGH$ + case MEDIAN: + switch (type.getValueType()) { + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + case Value.NUMERIC: + case Value.REAL: + case Value.DOUBLE: + case Value.DECFLOAT: + type = TypeInfo.TYPE_NUMERIC_FLOATING_POINT; + break; + } + break; + case PERCENTILE_DISC: + case MODE: + type = orderByList.get(0).expression.getType(); + break; + case EVERY: + case ANY: + type = TypeInfo.TYPE_BOOLEAN; + break; + case BIT_AND_AGG: + case BIT_OR_AGG: + case BIT_XOR_AGG: + case BIT_NAND_AGG: + case BIT_NOR_AGG: + case BIT_XNOR_AGG: + BitFunction.checkArgType(args[0]); + break; + case ARRAY_AGG: + type = TypeInfo.getTypeInfo(Value.ARRAY, -1, 0, args[0].getType()); + break; + case ENVELOPE: + type = TypeInfo.TYPE_GEOMETRY; + break; + case JSON_OBJECTAGG: + case JSON_ARRAYAGG: + type = TypeInfo.TYPE_JSON; + break; + default: + throw DbException.getInternalError("type=" + aggregateType); + } + return this; + } + + private static TypeInfo getSumType(TypeInfo type) { + int valueType = type.getValueType(); + switch (valueType) { + case Value.BOOLEAN: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + return TypeInfo.TYPE_BIGINT; + case Value.BIGINT: + return TypeInfo.getTypeInfo(Value.NUMERIC, ValueBigint.DECIMAL_PRECISION + ADDITIONAL_SUM_PRECISION, -1, + null); + case Value.NUMERIC: + return TypeInfo.getTypeInfo(Value.NUMERIC, type.getPrecision() + ADDITIONAL_SUM_PRECISION, + type.getDeclaredScale(), null); + case Value.REAL: + return TypeInfo.TYPE_DOUBLE; + case Value.DOUBLE: + return TypeInfo.getTypeInfo(Value.DECFLOAT, ValueDouble.DECIMAL_PRECISION + ADDITIONAL_SUM_PRECISION, -1, + null); + case Value.DECFLOAT: + return TypeInfo.getTypeInfo(Value.DECFLOAT, type.getPrecision() + ADDITIONAL_SUM_PRECISION, -1, null); + default: + if (DataType.isIntervalType(valueType)) { + return TypeInfo.getTypeInfo(valueType, ValueInterval.MAXIMUM_PRECISION, type.getDeclaredScale(), null); + } + return null; + } + } + + private static TypeInfo getAvgType(TypeInfo type) { + switch (type.getValueType()) { + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.REAL: + return TypeInfo.TYPE_DOUBLE; + case Value.BIGINT: + return TypeInfo.getTypeInfo(Value.NUMERIC, ValueBigint.DECIMAL_PRECISION + ADDITIONAL_AVG_SCALE, + ADDITIONAL_AVG_SCALE, null); + case Value.NUMERIC: { + int additionalScale = Math.min(ValueNumeric.MAXIMUM_SCALE - type.getScale(), + Math.min(Constants.MAX_NUMERIC_PRECISION - (int) type.getPrecision(), ADDITIONAL_AVG_SCALE)); + return TypeInfo.getTypeInfo(Value.NUMERIC, type.getPrecision() + additionalScale, + type.getScale() + additionalScale, null); + } + case Value.DOUBLE: + return TypeInfo.getTypeInfo(Value.DECFLOAT, ValueDouble.DECIMAL_PRECISION + ADDITIONAL_AVG_SCALE, -1, // + null); + case Value.DECFLOAT: + return TypeInfo.getTypeInfo(Value.DECFLOAT, type.getPrecision() + ADDITIONAL_AVG_SCALE, -1, null); + case Value.INTERVAL_YEAR: + case Value.INTERVAL_YEAR_TO_MONTH: + return TypeInfo.getTypeInfo(Value.INTERVAL_YEAR_TO_MONTH, type.getDeclaredPrecision(), 0, null); + case Value.INTERVAL_MONTH: + return TypeInfo.getTypeInfo(Value.INTERVAL_MONTH, type.getDeclaredPrecision(), 0, null); + case Value.INTERVAL_DAY: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + return TypeInfo.getTypeInfo(Value.INTERVAL_DAY_TO_SECOND, type.getDeclaredPrecision(), + ValueInterval.MAXIMUM_SCALE, null); + case Value.INTERVAL_HOUR: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + return TypeInfo.getTypeInfo(Value.INTERVAL_HOUR_TO_SECOND, type.getDeclaredPrecision(), + ValueInterval.MAXIMUM_SCALE, null); + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_MINUTE_TO_SECOND: + return TypeInfo.getTypeInfo(Value.INTERVAL_MINUTE_TO_SECOND, type.getDeclaredPrecision(), + ValueInterval.MAXIMUM_SCALE, null); + case Value.INTERVAL_SECOND: + return TypeInfo.getTypeInfo(Value.INTERVAL_SECOND, type.getDeclaredPrecision(), // + ValueInterval.MAXIMUM_SCALE, null); + default: + return null; + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + if (orderByList != null) { + for (QueryOrderBy o : orderByList) { + o.expression.setEvaluatable(tableFilter, b); + } + } + super.setEvaluatable(tableFilter, b); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + switch (aggregateType) { + case COUNT_ALL: + return appendTailConditions(builder.append("COUNT(*)"), sqlFlags, false); + case LISTAGG: + return getSQLListagg(builder, sqlFlags); + case ARRAY_AGG: + return getSQLArrayAggregate(builder, sqlFlags); + case JSON_OBJECTAGG: + return getSQLJsonObjectAggregate(builder, sqlFlags); + case JSON_ARRAYAGG: + return getSQLJsonArrayAggregate(builder, sqlFlags); + default: + } + builder.append(aggregateType.name()); + if (distinct) { + builder.append("(DISTINCT "); + } else { + builder.append('('); + } + writeExpressions(builder, args, sqlFlags).append(')'); + if (orderByList != null) { + builder.append(" WITHIN GROUP ("); + Window.appendOrderBy(builder, orderByList, sqlFlags, false); + builder.append(')'); + } + return appendTailConditions(builder, sqlFlags, false); + } + + private StringBuilder getSQLArrayAggregate(StringBuilder builder, int sqlFlags) { + builder.append("ARRAY_AGG("); + if (distinct) { + builder.append("DISTINCT "); + } + args[0].getUnenclosedSQL(builder, sqlFlags); + Window.appendOrderBy(builder, orderByList, sqlFlags, false); + builder.append(')'); + return appendTailConditions(builder, sqlFlags, false); + } + + private StringBuilder getSQLListagg(StringBuilder builder, int sqlFlags) { + builder.append("LISTAGG("); + if (distinct) { + builder.append("DISTINCT "); + } + args[0].getUnenclosedSQL(builder, sqlFlags); + ListaggArguments arguments = (ListaggArguments) extraArguments; + String s = arguments.getSeparator(); + if (s != null) { + StringUtils.quoteStringSQL(builder.append(", "), s); + } + if (arguments.getOnOverflowTruncate()) { + builder.append(" ON OVERFLOW TRUNCATE "); + s = arguments.getFilter(); + if (s != null) { + StringUtils.quoteStringSQL(builder, s).append(' '); + } + builder.append(arguments.isWithoutCount() ? "WITHOUT" : "WITH").append(" COUNT"); + } + builder.append(')'); + builder.append(" WITHIN GROUP ("); + Window.appendOrderBy(builder, orderByList, sqlFlags, true); + builder.append(')'); + return appendTailConditions(builder, sqlFlags, false); + } + + private StringBuilder getSQLJsonObjectAggregate(StringBuilder builder, int sqlFlags) { + builder.append("JSON_OBJECTAGG("); + args[0].getUnenclosedSQL(builder, sqlFlags).append(": "); + args[1].getUnenclosedSQL(builder, sqlFlags); + JsonConstructorFunction.getJsonFunctionFlagsSQL(builder, flags, false).append(')'); + return appendTailConditions(builder, sqlFlags, false); + } + + private StringBuilder getSQLJsonArrayAggregate(StringBuilder builder, int sqlFlags) { + builder.append("JSON_ARRAYAGG("); + if (distinct) { + builder.append("DISTINCT "); + } + args[0].getUnenclosedSQL(builder, sqlFlags); + JsonConstructorFunction.getJsonFunctionFlagsSQL(builder, flags, true); + Window.appendOrderBy(builder, orderByList, sqlFlags, false); + builder.append(')'); + return appendTailConditions(builder, sqlFlags, false); + } + + private Index getMinMaxColumnIndex() { + Expression arg = args[0]; + if (arg instanceof ExpressionColumn) { + ExpressionColumn col = (ExpressionColumn) arg; + Column column = col.getColumn(); + TableFilter filter = col.getTableFilter(); + if (filter != null) { + Table table = filter.getTable(); + return table.getIndexForColumn(column, true, false); + } + } + return null; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!super.isEverything(visitor)) { + return false; + } + if (filterCondition != null && !filterCondition.isEverything(visitor)) { + return false; + } + if (visitor.getType() == ExpressionVisitor.OPTIMIZABLE_AGGREGATE) { + switch (aggregateType) { + case COUNT: + if (distinct || args[0].getNullable() != Column.NOT_NULLABLE) { + return false; + } + //$FALL-THROUGH$ + case COUNT_ALL: + return visitor.getTable().canGetRowCount(select.getSession()); + case MIN: + case MAX: + return getMinMaxColumnIndex() != null; + case PERCENTILE_CONT: + case PERCENTILE_DISC: + return args[0].isConstant() && Percentile.getColumnIndex(select.getSession().getDatabase(), + orderByList.get(0).expression) != null; + case MEDIAN: + if (distinct) { + return false; + } + return Percentile.getColumnIndex(select.getSession().getDatabase(), args[0]) != null; + case ENVELOPE: + return AggregateDataEnvelope.getGeometryColumnIndex(args[0]) != null; + default: + return false; + } + } + for (Expression arg : args) { + if (!arg.isEverything(visitor)) { + return false; + } + } + if (orderByList != null) { + for (QueryOrderBy o : orderByList) { + if (!o.expression.isEverything(visitor)) { + return false; + } + } + } + return true; + } + + @Override + public int getCost() { + int cost = 1; + for (Expression arg : args) { + cost += arg.getCost(); + } + if (orderByList != null) { + for (QueryOrderBy o : orderByList) { + cost += o.expression.getCost(); + } + } + if (filterCondition != null) { + cost += filterCondition.getCost(); + } + return cost; + } + + /** + * Returns the select statement. + * @return the select statement + */ + public Select getSelect() { + return select; + } + + /** + * Returns if distinct is used. + * + * @return if distinct is used + */ + public boolean isDistinct() { + return distinct; + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateData.java b/h2/src/main/org/h2/expression/aggregate/AggregateData.java new file mode 100644 index 0000000000..97986b4838 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateData.java @@ -0,0 +1,32 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.value.Value; + +/** + * Abstract class for the computation of an aggregate. + */ +abstract class AggregateData { + + /** + * Add a value to this aggregate. + * + * @param session the session + * @param v the value + */ + abstract void add(SessionLocal session, Value v); + + /** + * Get the aggregate result. + * + * @param session the session + * @return the value + */ + abstract Value getValue(SessionLocal session); + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataAvg.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataAvg.java new file mode 100644 index 0000000000..283ad625d8 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataAvg.java @@ -0,0 +1,90 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.math.RoundingMode; + +import org.h2.api.IntervalQualifier; +import org.h2.engine.SessionLocal; +import org.h2.util.IntervalUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDecfloat; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInterval; +import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; + +/** + * Data stored while calculating an AVG aggregate. + */ +final class AggregateDataAvg extends AggregateData { + + private final TypeInfo dataType; + private long count; + private double doubleValue; + private BigDecimal decimalValue; + private BigInteger integerValue; + + /** + * @param dataType + * the data type of the computed result + */ + AggregateDataAvg(TypeInfo dataType) { + this.dataType = dataType; + } + + @Override + void add(SessionLocal session, Value v) { + if (v == ValueNull.INSTANCE) { + return; + } + count++; + switch (dataType.getValueType()) { + case Value.DOUBLE: + doubleValue += v.getDouble(); + break; + case Value.NUMERIC: + case Value.DECFLOAT: { + BigDecimal bd = v.getBigDecimal(); + decimalValue = decimalValue == null ? bd : decimalValue.add(bd); + break; + } + default: { + BigInteger bi = IntervalUtils.intervalToAbsolute((ValueInterval) v); + integerValue = integerValue == null ? bi : integerValue.add(bi); + } + } + } + + @Override + Value getValue(SessionLocal session) { + if (count == 0) { + return ValueNull.INSTANCE; + } + Value v; + int valueType = dataType.getValueType(); + switch (valueType) { + case Value.DOUBLE: + v = ValueDouble.get(doubleValue / count); + break; + case Value.NUMERIC: + v = ValueNumeric + .get(decimalValue.divide(BigDecimal.valueOf(count), dataType.getScale(), RoundingMode.HALF_DOWN)); + break; + case Value.DECFLOAT: + v = ValueDecfloat.divide(decimalValue, BigDecimal.valueOf(count), dataType); + break; + default: + v = IntervalUtils.intervalFromAbsolute(IntervalQualifier.valueOf(valueType - Value.INTERVAL_YEAR), + integerValue.divide(BigInteger.valueOf(count))); + } + return v.castTo(dataType, session); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataBinarySet.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataBinarySet.java new file mode 100644 index 0000000000..fc788db76d --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataBinarySet.java @@ -0,0 +1,24 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.Value; + +/** + * Aggregate data of binary set functions. + */ +abstract class AggregateDataBinarySet extends AggregateData { + + abstract void add(SessionLocal session, Value yValue, Value xValue); + + @Override + final void add(SessionLocal session, Value v) { + throw DbException.getInternalError(); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataCollecting.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataCollecting.java new file mode 100644 index 0000000000..af1e267fcf --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataCollecting.java @@ -0,0 +1,168 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.TreeSet; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueRow; + +/** + * Data stored while calculating an aggregate that needs collecting of all + * values or a distinct aggregate. + * + *

    + * NULL values are not collected. {@link #getValue(SessionLocal)} method + * returns {@code null}. Use {@link #getArray()} for instances of this class + * instead. + *

    + */ +final class AggregateDataCollecting extends AggregateData implements Iterable { + + /** + * NULL values collection mode. + */ + enum NullCollectionMode { + + /** + * Rows with NULL value are completely ignored. + */ + IGNORED, + + /** + * Rows with NULL values are processed causing the result to be not + * NULL, but NULL values aren't collected. + */ + EXCLUDED, + + /** + * Rows with NULL values are aggregated just like rows with any other + * values, should also be used when NULL values aren't passed to + * {@linkplain AggregateDataCollecting}. + */ + USED_OR_IMPOSSIBLE; + + } + + private final boolean distinct; + + private final boolean orderedWithOrder; + + private final NullCollectionMode nullCollectionMode; + + Collection values; + + private Value shared; + + /** + * Creates new instance of data for collecting aggregates. + * + * @param distinct + * if distinct is used + * @param orderedWithOrder + * if aggregate is an ordered aggregate with ORDER BY clause + * @param nullCollectionMode + * NULL values collection mode + */ + AggregateDataCollecting(boolean distinct, boolean orderedWithOrder, NullCollectionMode nullCollectionMode) { + this.distinct = distinct; + this.orderedWithOrder = orderedWithOrder; + this.nullCollectionMode = nullCollectionMode; + } + + @Override + void add(SessionLocal session, Value v) { + if (nullCollectionMode == NullCollectionMode.IGNORED && isNull(v)) { + return; + } + Collection c = values; + if (c == null) { + if (distinct) { + Comparator comparator = session.getDatabase().getCompareMode(); + if (orderedWithOrder) { + comparator = Comparator.comparing(t -> ((ValueRow) t).getList()[0], comparator); + } + c = new TreeSet<>(comparator); + } else { + c = new ArrayList<>(); + } + values = c; + } + if (nullCollectionMode == NullCollectionMode.EXCLUDED && isNull(v)) { + return; + } + c.add(v); + } + + private boolean isNull(Value v) { + return (orderedWithOrder ? ((ValueRow) v).getList()[0] : v) == ValueNull.INSTANCE; + } + + @Override + Value getValue(SessionLocal session) { + return null; + } + + /** + * Returns the count of values. + * + * @return the count of values + */ + int getCount() { + return values != null ? values.size() : 0; + } + + /** + * Returns array with values or {@code null}. + * + * @return array with values or {@code null} + */ + Value[] getArray() { + Collection values = this.values; + if (values == null) { + return null; + } + return values.toArray(Value.EMPTY_VALUES); + } + + @Override + public Iterator iterator() { + return values != null ? values.iterator() : Collections.emptyIterator(); + } + + /** + * Sets value of a shared argument. + * + * @param shared the shared value + */ + void setSharedArgument(Value shared) { + if (this.shared == null) { + this.shared = shared; + } else if (!this.shared.equals(shared)) { + throw DbException.get(ErrorCode.INVALID_VALUE_2, "Inverse distribution function argument", + this.shared.getTraceSQL() + "<>" + shared.getTraceSQL()); + } + } + + /** + * Returns value of a shared argument. + * + * @return value of a shared argument + */ + Value getSharedArgument() { + return shared; + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataCorr.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataCorr.java new file mode 100644 index 0000000000..28b6160b6f --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataCorr.java @@ -0,0 +1,96 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; + +/** + * Data stored while calculating a CORR, REG_SLOPE, REG_INTERCEPT, or REGR_R2 + * aggregate. + */ +final class AggregateDataCorr extends AggregateDataBinarySet { + + private final AggregateType aggregateType; + + private long count; + + private double sumY, sumX, sumYX; + + private double m2y, meanY; + + private double m2x, meanX; + + AggregateDataCorr(AggregateType aggregateType) { + this.aggregateType = aggregateType; + } + + @Override + void add(SessionLocal session, Value yValue, Value xValue) { + double y = yValue.getDouble(), x = xValue.getDouble(); + sumY += y; + sumX += x; + sumYX += y * x; + if (++count == 1) { + meanY = y; + meanX = x; + m2x = m2y = 0; + } else { + double delta = y - meanY; + meanY += delta / count; + m2y += delta * (y - meanY); + delta = x - meanX; + meanX += delta / count; + m2x += delta * (x - meanX); + } + } + + @Override + Value getValue(SessionLocal session) { + if (count < 1) { + return ValueNull.INSTANCE; + } + double v; + switch (aggregateType) { + case CORR: + if (m2y == 0 || m2x == 0) { + return ValueNull.INSTANCE; + } + v = (sumYX - sumX * sumY / count) / Math.sqrt(m2y * m2x); + break; + case REGR_SLOPE: + if (m2x == 0) { + return ValueNull.INSTANCE; + } + v = (sumYX - sumX * sumY / count) / m2x; + break; + case REGR_INTERCEPT: + if (m2x == 0) { + return ValueNull.INSTANCE; + } + v = meanY - (sumYX - sumX * sumY / count) / m2x * meanX; + break; + case REGR_R2: { + if (m2x == 0) { + return ValueNull.INSTANCE; + } + if (m2y == 0) { + return ValueDouble.ONE; + } + v = sumYX - sumX * sumY / count; + v = v * v / (m2y * m2x); + break; + } + default: + throw DbException.getInternalError("type=" + aggregateType); + } + return ValueDouble.get(v); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataCount.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataCount.java new file mode 100644 index 0000000000..b0841b1551 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataCount.java @@ -0,0 +1,38 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; + +/** + * Data stored while calculating a COUNT aggregate. + */ +final class AggregateDataCount extends AggregateData { + + private final boolean all; + + private long count; + + AggregateDataCount(boolean all) { + this.all = all; + } + + @Override + void add(SessionLocal session, Value v) { + if (all || v != ValueNull.INSTANCE) { + count++; + } + } + + @Override + Value getValue(SessionLocal session) { + return ValueBigint.get(count); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataCovar.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataCovar.java new file mode 100644 index 0000000000..acd0031054 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataCovar.java @@ -0,0 +1,70 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; + +/** + * Data stored while calculating a COVAR_POP, COVAR_SAMP, or REGR_SXY aggregate. + */ +final class AggregateDataCovar extends AggregateDataBinarySet { + + private final AggregateType aggregateType; + + private long count; + + private double sumY, sumX, sumYX; + + /** + * @param aggregateType + * the type of the aggregate operation + */ + AggregateDataCovar(AggregateType aggregateType) { + this.aggregateType = aggregateType; + } + + @Override + void add(SessionLocal session, Value yValue, Value xValue) { + double y = yValue.getDouble(), x = xValue.getDouble(); + sumY += y; + sumX += x; + sumYX += y * x; + count++; + } + + @Override + Value getValue(SessionLocal session) { + double v; + switch (aggregateType) { + case COVAR_POP: + if (count < 1) { + return ValueNull.INSTANCE; + } + v = (sumYX - sumX * sumY / count) / count; + break; + case COVAR_SAMP: + if (count < 2) { + return ValueNull.INSTANCE; + } + v = (sumYX - sumX * sumY / count) / (count - 1); + break; + case REGR_SXY: + if (count < 1) { + return ValueNull.INSTANCE; + } + v = sumYX - sumX * sumY / count; + break; + default: + throw DbException.getInternalError("type=" + aggregateType); + } + return ValueDouble.get(v); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataDefault.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataDefault.java new file mode 100644 index 0000000000..0ff71f2270 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataDefault.java @@ -0,0 +1,119 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.expression.function.BitFunction; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * Data stored while calculating an aggregate. + */ +final class AggregateDataDefault extends AggregateData { + + private final AggregateType aggregateType; + private final TypeInfo dataType; + private Value value; + + /** + * @param aggregateType the type of the aggregate operation + * @param dataType the data type of the computed result + */ + AggregateDataDefault(AggregateType aggregateType, TypeInfo dataType) { + this.aggregateType = aggregateType; + this.dataType = dataType; + } + + @Override + void add(SessionLocal session, Value v) { + if (v == ValueNull.INSTANCE) { + return; + } + switch (aggregateType) { + case SUM: + if (value == null) { + value = v.convertTo(dataType.getValueType()); + } else { + v = v.convertTo(value.getValueType()); + value = value.add(v); + } + break; + case MIN: + if (value == null || session.compare(v, value) < 0) { + value = v; + } + break; + case MAX: + if (value == null || session.compare(v, value) > 0) { + value = v; + } + break; + case EVERY: + v = v.convertToBoolean(); + if (value == null) { + value = v; + } else { + value = ValueBoolean.get(value.getBoolean() && v.getBoolean()); + } + break; + case ANY: + v = v.convertToBoolean(); + if (value == null) { + value = v; + } else { + value = ValueBoolean.get(value.getBoolean() || v.getBoolean()); + } + break; + case BIT_AND_AGG: + case BIT_NAND_AGG: + if (value == null) { + value = v; + } else { + value = BitFunction.getBitwise(BitFunction.BITAND, dataType, value, v); + } + break; + case BIT_OR_AGG: + case BIT_NOR_AGG: + if (value == null) { + value = v; + } else { + value = BitFunction.getBitwise(BitFunction.BITOR, dataType, value, v); + } + break; + case BIT_XOR_AGG: + case BIT_XNOR_AGG: + if (value == null) { + value = v; + } else { + value = BitFunction.getBitwise(BitFunction.BITXOR, dataType, value, v); + } + break; + default: + throw DbException.getInternalError("type=" + aggregateType); + } + } + + @SuppressWarnings("incomplete-switch") + @Override + Value getValue(SessionLocal session) { + Value v = value; + if (v == null) { + return ValueNull.INSTANCE; + } + switch (aggregateType) { + case BIT_NAND_AGG: + case BIT_NOR_AGG: + case BIT_XNOR_AGG: + v = BitFunction.getBitwise(BitFunction.BITNOT, dataType, v, null); + } + return v.convertTo(dataType); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataDistinctWithCounts.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataDistinctWithCounts.java new file mode 100644 index 0000000000..60bd31ef3f --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataDistinctWithCounts.java @@ -0,0 +1,72 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import java.util.TreeMap; +import org.h2.engine.SessionLocal; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Data stored while calculating an aggregate that needs distinct values with + * their counts. + */ +final class AggregateDataDistinctWithCounts extends AggregateData { + + private final boolean ignoreNulls; + + private final int maxDistinctCount; + + private TreeMap values; + + /** + * Creates new instance of data for aggregate that needs distinct values + * with their counts. + * + * @param ignoreNulls + * whether NULL values should be ignored + * @param maxDistinctCount + * maximum count of distinct values to collect + */ + AggregateDataDistinctWithCounts(boolean ignoreNulls, int maxDistinctCount) { + this.ignoreNulls = ignoreNulls; + this.maxDistinctCount = maxDistinctCount; + } + + @Override + void add(SessionLocal session, Value v) { + if (ignoreNulls && v == ValueNull.INSTANCE) { + return; + } + if (values == null) { + values = new TreeMap<>(session.getDatabase().getCompareMode()); + } + LongDataCounter a = values.get(v); + if (a == null) { + if (values.size() >= maxDistinctCount) { + return; + } + a = new LongDataCounter(); + values.put(v, a); + } + a.count++; + } + + @Override + Value getValue(SessionLocal session) { + return null; + } + + /** + * Returns map with values and their counts. + * + * @return map with values and their counts + */ + TreeMap getValues() { + return values; + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataEnvelope.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataEnvelope.java new file mode 100644 index 0000000000..a2215249d7 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataEnvelope.java @@ -0,0 +1,72 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import java.util.ArrayList; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.index.Index; +import org.h2.mvstore.db.MVSpatialIndex; +import org.h2.table.Column; +import org.h2.table.TableFilter; +import org.h2.util.geometry.GeometryUtils; +import org.h2.value.Value; +import org.h2.value.ValueGeometry; +import org.h2.value.ValueNull; + +/** + * Data stored while calculating an aggregate. + */ +final class AggregateDataEnvelope extends AggregateData { + + private double[] envelope; + + /** + * Get the index (if any) for the column specified in the geometry + * aggregate. + * + * @param on + * the expression (usually a column expression) + * @return the index, or null + */ + static Index getGeometryColumnIndex(Expression on) { + if (on instanceof ExpressionColumn) { + ExpressionColumn col = (ExpressionColumn) on; + Column column = col.getColumn(); + if (column.getType().getValueType() == Value.GEOMETRY) { + TableFilter filter = col.getTableFilter(); + if (filter != null) { + ArrayList indexes = filter.getTable().getIndexes(); + if (indexes != null) { + for (int i = 1, size = indexes.size(); i < size; i++) { + Index index = indexes.get(i); + if (index instanceof MVSpatialIndex && index.isFirstColumn(column)) { + return index; + } + } + } + } + } + } + return null; + } + + @Override + void add(SessionLocal session, Value v) { + if (v == ValueNull.INSTANCE) { + return; + } + envelope = GeometryUtils.union(envelope, v.convertToGeometry(null).getEnvelopeNoCopy()); + } + + @Override + Value getValue(SessionLocal session) { + return ValueGeometry.fromEnvelope(envelope); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateDataStdVar.java b/h2/src/main/org/h2/expression/aggregate/AggregateDataStdVar.java new file mode 100644 index 0000000000..2c64503025 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateDataStdVar.java @@ -0,0 +1,90 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; + +/** + * Data stored while calculating a STDDEV_POP, STDDEV_SAMP, VAR_SAMP, VAR_POP, + * REGR_SXX, or REGR_SYY aggregate. + */ +final class AggregateDataStdVar extends AggregateData { + + private final AggregateType aggregateType; + + private long count; + + private double m2, mean; + + /** + * @param aggregateType + * the type of the aggregate operation + */ + AggregateDataStdVar(AggregateType aggregateType) { + this.aggregateType = aggregateType; + } + + @Override + void add(SessionLocal session, Value v) { + if (v == ValueNull.INSTANCE) { + return; + } + // Using Welford's method, see also + // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + // https://www.johndcook.com/standard_deviation.html + double x = v.getDouble(); + if (++count == 1) { + mean = x; + m2 = 0; + } else { + double delta = x - mean; + mean += delta / count; + m2 += delta * (x - mean); + } + } + + @Override + Value getValue(SessionLocal session) { + double v; + switch (aggregateType) { + case STDDEV_SAMP: + case VAR_SAMP: + if (count < 2) { + return ValueNull.INSTANCE; + } + v = m2 / (count - 1); + if (aggregateType == AggregateType.STDDEV_SAMP) { + v = Math.sqrt(v); + } + break; + case STDDEV_POP: + case VAR_POP: + if (count < 1) { + return ValueNull.INSTANCE; + } + v = m2 / count; + if (aggregateType == AggregateType.STDDEV_POP) { + v = Math.sqrt(v); + } + break; + case REGR_SXX: + case REGR_SYY: + if (count < 1) { + return ValueNull.INSTANCE; + } + v = m2; + break; + default: + throw DbException.getInternalError("type=" + aggregateType); + } + return ValueDouble.get(v); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/AggregateType.java b/h2/src/main/org/h2/expression/aggregate/AggregateType.java new file mode 100644 index 0000000000..23df562bf1 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/AggregateType.java @@ -0,0 +1,233 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +/** + * The type of an aggregate function. + */ +public enum AggregateType { + + /** + * The aggregate type for COUNT(*). + */ + COUNT_ALL, + + /** + * The aggregate type for COUNT(expression). + */ + COUNT, + + /** + * The aggregate type for SUM(expression). + */ + SUM, + + /** + * The aggregate type for MIN(expression). + */ + MIN, + + /** + * The aggregate type for MAX(expression). + */ + MAX, + + /** + * The aggregate type for AVG(expression). + */ + AVG, + + /** + * The aggregate type for STDDEV_POP(expression). + */ + STDDEV_POP, + + /** + * The aggregate type for STDDEV_SAMP(expression). + */ + STDDEV_SAMP, + + /** + * The aggregate type for VAR_POP(expression). + */ + VAR_POP, + + /** + * The aggregate type for VAR_SAMP(expression). + */ + VAR_SAMP, + + /** + * The aggregate type for ANY(expression). + */ + ANY, + + /** + * The aggregate type for EVERY(expression). + */ + EVERY, + + /** + * The aggregate type for BIT_AND_AGG(expression). + */ + BIT_AND_AGG, + + /** + * The aggregate type for BIT_OR_AGG(expression). + */ + BIT_OR_AGG, + + /** + * The aggregate type for BIT_XOR_AGG(expression). + */ + BIT_XOR_AGG, + + /** + * The aggregate type for BIT_NAND_AGG(expression). + */ + BIT_NAND_AGG, + + /** + * The aggregate type for BIT_NOR_AGG(expression). + */ + BIT_NOR_AGG, + + /** + * The aggregate type for BIT_XNOR_AGG(expression). + */ + BIT_XNOR_AGG, + + /** + * The aggregate type for HISTOGRAM(expression). + */ + HISTOGRAM, + + /** + * The aggregate type for COVAR_POP binary set function. + */ + COVAR_POP, + + /** + * The aggregate type for COVAR_SAMP binary set function. + */ + COVAR_SAMP, + + /** + * The aggregate type for CORR binary set function. + */ + CORR, + + /** + * The aggregate type for REGR_SLOPE binary set function. + */ + REGR_SLOPE, + + /** + * The aggregate type for REGR_INTERCEPT binary set function. + */ + REGR_INTERCEPT, + + /** + * The aggregate type for REGR_COUNT binary set function. + */ + REGR_COUNT, + + /** + * The aggregate type for REGR_R2 binary set function. + */ + REGR_R2, + + /** + * The aggregate type for REGR_AVGX binary set function. + */ + REGR_AVGX, + + /** + * The aggregate type for REGR_AVGY binary set function. + */ + REGR_AVGY, + + /** + * The aggregate type for REGR_SXX binary set function. + */ + REGR_SXX, + + /** + * The aggregate type for REGR_SYY binary set function. + */ + REGR_SYY, + + /** + * The aggregate type for REGR_SXY binary set function. + */ + REGR_SXY, + + /** + * The type for RANK() hypothetical set function. + */ + RANK, + + /** + * The type for DENSE_RANK() hypothetical set function. + */ + DENSE_RANK, + + /** + * The type for PERCENT_RANK() hypothetical set function. + */ + PERCENT_RANK, + + /** + * The type for CUME_DIST() hypothetical set function. + */ + CUME_DIST, + + /** + * The aggregate type for PERCENTILE_CONT(expression). + */ + PERCENTILE_CONT, + + /** + * The aggregate type for PERCENTILE_DISC(expression). + */ + PERCENTILE_DISC, + + /** + * The aggregate type for MEDIAN(expression). + */ + MEDIAN, + + /** + * The aggregate type for LISTAGG(...). + */ + LISTAGG, + + /** + * The aggregate type for ARRAY_AGG(expression). + */ + ARRAY_AGG, + + /** + * The aggregate type for MODE(expression). + */ + MODE, + + /** + * The aggregate type for ENVELOPE(expression). + */ + ENVELOPE, + + /** + * The aggregate type for JSON_OBJECTAGG(expression: expression). + */ + JSON_OBJECTAGG, + + /** + * The aggregate type for JSON_ARRAYAGG(expression). + */ + JSON_ARRAYAGG, + +} diff --git a/h2/src/main/org/h2/expression/aggregate/JavaAggregate.java b/h2/src/main/org/h2/expression/aggregate/JavaAggregate.java new file mode 100644 index 0000000000..d4ce36570c --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/JavaAggregate.java @@ -0,0 +1,225 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import java.sql.SQLException; +import org.h2.api.Aggregate; +import org.h2.command.query.Select; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.aggregate.AggregateDataCollecting.NullCollectionMode; +import org.h2.jdbc.JdbcConnection; +import org.h2.message.DbException; +import org.h2.schema.UserAggregate; +import org.h2.util.ParserUtil; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; +import org.h2.value.ValueRow; +import org.h2.value.ValueToObjectConverter; + +/** + * This class wraps a user-defined aggregate. + */ +public class JavaAggregate extends AbstractAggregate { + + private final UserAggregate userAggregate; + private int[] argTypes; + private int dataType; + private JdbcConnection userConnection; + + public JavaAggregate(UserAggregate userAggregate, Expression[] args, Select select, boolean distinct) { + super(select, args, distinct); + this.userAggregate = userAggregate; + } + + @Override + public int getCost() { + int cost = 5; + for (Expression e : args) { + cost += e.getCost(); + } + if (filterCondition != null) { + cost += filterCondition.getCost(); + } + return cost; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + ParserUtil.quoteIdentifier(builder, userAggregate.getName(), sqlFlags).append('('); + writeExpressions(builder, args, sqlFlags).append(')'); + return appendTailConditions(builder, sqlFlags, false); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!super.isEverything(visitor)) { + return false; + } + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + // TODO optimization: some functions are deterministic, but we don't + // know (no setting for that) + case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: + // user defined aggregate functions can not be optimized + return false; + case ExpressionVisitor.GET_DEPENDENCIES: + visitor.addDependency(userAggregate); + break; + default: + } + for (Expression e : args) { + if (e != null && !e.isEverything(visitor)) { + return false; + } + } + return filterCondition == null || filterCondition.isEverything(visitor); + } + + @Override + public Expression optimize(SessionLocal session) { + super.optimize(session); + userConnection = session.createConnection(false); + int len = args.length; + argTypes = new int[len]; + for (int i = 0; i < len; i++) { + int type = args[i].getType().getValueType(); + argTypes[i] = type; + } + try { + Aggregate aggregate = getInstance(); + dataType = aggregate.getInternalType(argTypes); + type = TypeInfo.getTypeInfo(dataType); + } catch (SQLException e) { + throw DbException.convert(e); + } + return this; + } + + private Aggregate getInstance() { + Aggregate agg = userAggregate.getInstance(); + try { + agg.init(userConnection); + } catch (SQLException ex) { + throw DbException.convert(ex); + } + return agg; + } + + @Override + public Value getAggregatedValue(SessionLocal session, Object aggregateData) { + try { + Aggregate agg; + if (distinct) { + agg = getInstance(); + AggregateDataCollecting data = (AggregateDataCollecting) aggregateData; + if (data != null) { + for (Value value : data.values) { + if (args.length == 1) { + agg.add(ValueToObjectConverter.valueToDefaultObject(value, userConnection, false)); + } else { + Value[] values = ((ValueRow) value).getList(); + Object[] argValues = new Object[args.length]; + for (int i = 0, len = args.length; i < len; i++) { + argValues[i] = ValueToObjectConverter.valueToDefaultObject(values[i], userConnection, + false); + } + agg.add(argValues); + } + } + } + } else { + agg = (Aggregate) aggregateData; + if (agg == null) { + agg = getInstance(); + } + } + Object obj = agg.getResult(); + if (obj == null) { + return ValueNull.INSTANCE; + } + return ValueToObjectConverter.objectToValue(session, obj, dataType); + } catch (SQLException e) { + throw DbException.convert(e); + } + } + + @Override + protected void updateAggregate(SessionLocal session, Object aggregateData) { + updateData(session, aggregateData, null); + } + + private void updateData(SessionLocal session, Object aggregateData, Value[] remembered) { + try { + if (distinct) { + AggregateDataCollecting data = (AggregateDataCollecting) aggregateData; + Value[] argValues = new Value[args.length]; + Value arg = null; + for (int i = 0, len = args.length; i < len; i++) { + arg = remembered == null ? args[i].getValue(session) : remembered[i]; + argValues[i] = arg; + } + data.add(session, args.length == 1 ? arg : ValueRow.get(argValues)); + } else { + Aggregate agg = (Aggregate) aggregateData; + Object[] argValues = new Object[args.length]; + Object arg = null; + for (int i = 0, len = args.length; i < len; i++) { + Value v = remembered == null ? args[i].getValue(session) : remembered[i]; + arg = ValueToObjectConverter.valueToDefaultObject(v, userConnection, false); + argValues[i] = arg; + } + agg.add(args.length == 1 ? arg : argValues); + } + } catch (SQLException e) { + throw DbException.convert(e); + } + } + + @Override + protected void updateGroupAggregates(SessionLocal session, int stage) { + super.updateGroupAggregates(session, stage); + for (Expression expr : args) { + expr.updateAggregate(session, stage); + } + } + + @Override + protected int getNumExpressions() { + int n = args.length; + if (filterCondition != null) { + n++; + } + return n; + } + + @Override + protected void rememberExpressions(SessionLocal session, Value[] array) { + int length = args.length; + for (int i = 0; i < length; i++) { + array[i] = args[i].getValue(session); + } + if (filterCondition != null) { + array[length] = ValueBoolean.get(filterCondition.getBooleanValue(session)); + } + } + + @Override + protected void updateFromExpressions(SessionLocal session, Object aggregateData, Value[] array) { + if (filterCondition == null || array[getNumExpressions() - 1].isTrue()) { + updateData(session, aggregateData, array); + } + } + + @Override + protected Object createAggregateData() { + return distinct ? new AggregateDataCollecting(true, false, NullCollectionMode.IGNORED) : getInstance(); + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/ListaggArguments.java b/h2/src/main/org/h2/expression/aggregate/ListaggArguments.java new file mode 100644 index 0000000000..ee134f7a8c --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/ListaggArguments.java @@ -0,0 +1,126 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +/** + * Additional arguments of LISTAGG aggregate function. + */ +public final class ListaggArguments { + + private String separator; + + private boolean onOverflowTruncate; + + private String filter; + + private boolean withoutCount; + + /** + * Creates a new instance of additional arguments of LISTAGG aggregate + * function. + */ + public ListaggArguments() { + } + + /** + * Sets the custom LISTAGG separator. + * + * @param separator + * the LISTAGG separator, {@code null} or empty string means no + * separator + */ + public void setSeparator(String separator) { + this.separator = separator != null ? separator : ""; + } + + /** + * Returns the LISTAGG separator. + * + * @return the LISTAGG separator, {@code null} means the default + */ + public String getSeparator() { + return separator; + } + + /** + * Returns the effective LISTAGG separator. + * + * @return the effective LISTAGG separator + */ + public String getEffectiveSeparator() { + return separator != null ? separator : ","; + } + + /** + * Sets the LISTAGG overflow behavior. + * + * @param onOverflowTruncate + * {@code true} for ON OVERFLOW TRUNCATE, {@code false} for ON + * OVERFLOW ERROR + */ + public void setOnOverflowTruncate(boolean onOverflowTruncate) { + this.onOverflowTruncate = onOverflowTruncate; + } + + /** + * Returns the LISTAGG overflow behavior. + * + * @return {@code true} for ON OVERFLOW TRUNCATE, {@code false} for ON + * OVERFLOW ERROR + */ + public boolean getOnOverflowTruncate() { + return onOverflowTruncate; + } + + /** + * Sets the custom LISTAGG truncation filter. + * + * @param filter + * the LISTAGG truncation filter, {@code null} or empty string + * means no truncation filter + */ + public void setFilter(String filter) { + this.filter = filter != null ? filter : ""; + } + + /** + * Returns the LISTAGG truncation filter. + * + * @return the LISTAGG truncation filter, {@code null} means the default + */ + public String getFilter() { + return filter; + } + + /** + * Returns the effective LISTAGG truncation filter. + * + * @return the effective LISTAGG truncation filter + */ + public String getEffectiveFilter() { + return filter != null ? filter : "..."; + } + + /** + * Sets the LISTAGG count indication. + * + * @param withoutCount + * {@code true} for WITHOUT COUNT, {@code false} for WITH COUNT + */ + public void setWithoutCount(boolean withoutCount) { + this.withoutCount = withoutCount; + } + + /** + * Returns the LISTAGG count indication. + * + * @return {@code true} for WITHOUT COUNT, {@code false} for WITH COUNT + */ + public boolean isWithoutCount() { + return withoutCount; + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/LongDataCounter.java b/h2/src/main/org/h2/expression/aggregate/LongDataCounter.java new file mode 100644 index 0000000000..2bd5086f19 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/LongDataCounter.java @@ -0,0 +1,18 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +/** + * Counter. + */ +final class LongDataCounter { + + /** + * The value. + */ + long count; + +} diff --git a/h2/src/main/org/h2/expression/aggregate/Percentile.java b/h2/src/main/org/h2/expression/aggregate/Percentile.java new file mode 100644 index 0000000000..39bae3ca73 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/Percentile.java @@ -0,0 +1,384 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.aggregate; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Arrays; + +import org.h2.api.IntervalQualifier; +import org.h2.command.query.QueryOrderBy; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.index.Cursor; +import org.h2.index.Index; +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.SearchRow; +import org.h2.result.SortOrder; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.table.TableFilter; +import org.h2.util.DateTimeUtils; +import org.h2.util.IntervalUtils; +import org.h2.value.CompareMode; +import org.h2.value.Value; +import org.h2.value.ValueDate; +import org.h2.value.ValueInterval; +import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + * PERCENTILE_CONT, PERCENTILE_DISC, and MEDIAN inverse distribution functions. + */ +final class Percentile { + + /** + * BigDecimal value of 0.5. + */ + static final BigDecimal HALF = BigDecimal.valueOf(0.5d); + + private static boolean isNullsLast(DefaultNullOrdering defaultNullOrdering, Index index) { + return defaultNullOrdering.compareNull(true, index.getIndexColumns()[0].sortType) > 0; + } + + /** + * Get the index (if any) for the column specified in the inverse + * distribution function. + * + * @param database the database + * @param on the expression (usually a column expression) + * @return the index, or null + */ + static Index getColumnIndex(Database database, Expression on) { + DefaultNullOrdering defaultNullOrdering = database.getDefaultNullOrdering(); + if (on instanceof ExpressionColumn) { + ExpressionColumn col = (ExpressionColumn) on; + Column column = col.getColumn(); + TableFilter filter = col.getTableFilter(); + if (filter != null) { + Table table = filter.getTable(); + ArrayList indexes = table.getIndexes(); + Index result = null; + if (indexes != null) { + boolean nullable = column.isNullable(); + for (int i = 1, size = indexes.size(); i < size; i++) { + Index index = indexes.get(i); + if (!index.canFindNext()) { + continue; + } + if (!index.isFirstColumn(column)) { + continue; + } + // Prefer index without nulls last for nullable columns + if (result == null || result.getColumns().length > index.getColumns().length + || nullable && isNullsLast(defaultNullOrdering, result) + && !isNullsLast(defaultNullOrdering, index)) { + result = index; + } + } + } + return result; + } + } + return null; + } + + /** + * Get the result from the array of values. + * + * @param session the session + * @param array array with values + * @param dataType the data type + * @param orderByList ORDER BY list + * @param percentile argument of percentile function, or 0.5d for median + * @param interpolate whether value should be interpolated + * @return the result + */ + static Value getValue(SessionLocal session, Value[] array, int dataType, ArrayList orderByList, + BigDecimal percentile, boolean interpolate) { + final CompareMode compareMode = session.getDatabase().getCompareMode(); + Arrays.sort(array, compareMode); + int count = array.length; + boolean reverseIndex = orderByList != null && (orderByList.get(0).sortType & SortOrder.DESCENDING) != 0; + BigDecimal fpRow = BigDecimal.valueOf(count - 1).multiply(percentile); + int rowIdx1 = fpRow.intValue(); + BigDecimal factor = fpRow.subtract(BigDecimal.valueOf(rowIdx1)); + int rowIdx2; + if (factor.signum() == 0) { + interpolate = false; + rowIdx2 = rowIdx1; + } else { + rowIdx2 = rowIdx1 + 1; + if (!interpolate) { + if (factor.compareTo(HALF) > 0) { + rowIdx1 = rowIdx2; + } else { + rowIdx2 = rowIdx1; + } + } + } + if (reverseIndex) { + rowIdx1 = count - 1 - rowIdx1; + rowIdx2 = count - 1 - rowIdx2; + } + Value v = array[rowIdx1]; + if (!interpolate) { + return v; + } + return interpolate(v, array[rowIdx2], factor, dataType, session, compareMode); + } + + /** + * Get the result from the index. + * + * @param session the session + * @param expression the expression + * @param dataType the data type + * @param orderByList ORDER BY list + * @param percentile argument of percentile function, or 0.5d for median + * @param interpolate whether value should be interpolated + * @return the result + */ + static Value getFromIndex(SessionLocal session, Expression expression, int dataType, + ArrayList orderByList, BigDecimal percentile, boolean interpolate) { + Database db = session.getDatabase(); + Index index = getColumnIndex(db, expression); + long count = index.getRowCount(session); + if (count == 0) { + return ValueNull.INSTANCE; + } + Cursor cursor = index.find(session, null, null); + cursor.next(); + int columnId = index.getColumns()[0].getColumnId(); + ExpressionColumn expr = (ExpressionColumn) expression; + if (expr.getColumn().isNullable()) { + boolean hasNulls = false; + SearchRow row; + // Try to skip nulls from the start first with the same cursor that + // will be used to read values. + while (count > 0) { + row = cursor.getSearchRow(); + if (row == null) { + return ValueNull.INSTANCE; + } + if (row.getValue(columnId) == ValueNull.INSTANCE) { + count--; + cursor.next(); + hasNulls = true; + } else { + break; + } + } + if (count == 0) { + return ValueNull.INSTANCE; + } + // If no nulls found and if index orders nulls last create a second + // cursor to count nulls at the end. + if (!hasNulls && isNullsLast(db.getDefaultNullOrdering(), index)) { + TableFilter tableFilter = expr.getTableFilter(); + SearchRow check = tableFilter.getTable().getTemplateSimpleRow(true); + check.setValue(columnId, ValueNull.INSTANCE); + Cursor nullsCursor = index.find(session, check, check); + while (nullsCursor.next()) { + count--; + } + if (count <= 0) { + return ValueNull.INSTANCE; + } + } + } + boolean reverseIndex = (orderByList != null ? orderByList.get(0).sortType & SortOrder.DESCENDING : 0) + != (index.getIndexColumns()[0].sortType & SortOrder.DESCENDING); + BigDecimal fpRow = BigDecimal.valueOf(count - 1).multiply(percentile); + long rowIdx1 = fpRow.longValue(); + BigDecimal factor = fpRow.subtract(BigDecimal.valueOf(rowIdx1)); + long rowIdx2; + if (factor.signum() == 0) { + interpolate = false; + rowIdx2 = rowIdx1; + } else { + rowIdx2 = rowIdx1 + 1; + if (!interpolate) { + if (factor.compareTo(HALF) > 0) { + rowIdx1 = rowIdx2; + } else { + rowIdx2 = rowIdx1; + } + } + } + long skip = reverseIndex ? count - 1 - rowIdx2 : rowIdx1; + for (int i = 0; i < skip; i++) { + cursor.next(); + } + SearchRow row = cursor.getSearchRow(); + if (row == null) { + return ValueNull.INSTANCE; + } + Value v = row.getValue(columnId); + if (v == ValueNull.INSTANCE) { + return v; + } + if (interpolate) { + cursor.next(); + row = cursor.getSearchRow(); + if (row == null) { + return v; + } + Value v2 = row.getValue(columnId); + if (v2 == ValueNull.INSTANCE) { + return v; + } + if (reverseIndex) { + Value t = v; + v = v2; + v2 = t; + } + return interpolate(v, v2, factor, dataType, session, db.getCompareMode()); + } + return v; + } + + private static Value interpolate(Value v0, Value v1, BigDecimal factor, int dataType, SessionLocal session, + CompareMode compareMode) { + if (v0.compareTo(v1, session, compareMode) == 0) { + return v0; + } + switch (dataType) { + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + return ValueNumeric.get( + interpolateDecimal(BigDecimal.valueOf(v0.getInt()), BigDecimal.valueOf(v1.getInt()), factor)); + case Value.BIGINT: + return ValueNumeric.get( + interpolateDecimal(BigDecimal.valueOf(v0.getLong()), BigDecimal.valueOf(v1.getLong()), factor)); + case Value.NUMERIC: + case Value.DECFLOAT: + return ValueNumeric.get(interpolateDecimal(v0.getBigDecimal(), v1.getBigDecimal(), factor)); + case Value.REAL: + case Value.DOUBLE: + return ValueNumeric.get( + interpolateDecimal( + BigDecimal.valueOf(v0.getDouble()), BigDecimal.valueOf(v1.getDouble()), factor)); + case Value.TIME: { + ValueTime t0 = (ValueTime) v0, t1 = (ValueTime) v1; + BigDecimal n0 = BigDecimal.valueOf(t0.getNanos()); + BigDecimal n1 = BigDecimal.valueOf(t1.getNanos()); + return ValueTime.fromNanos(interpolateDecimal(n0, n1, factor).longValue()); + } + case Value.TIME_TZ: { + ValueTimeTimeZone t0 = (ValueTimeTimeZone) v0, t1 = (ValueTimeTimeZone) v1; + BigDecimal n0 = BigDecimal.valueOf(t0.getNanos()); + BigDecimal n1 = BigDecimal.valueOf(t1.getNanos()); + BigDecimal offset = BigDecimal.valueOf(t0.getTimeZoneOffsetSeconds()) + .multiply(BigDecimal.ONE.subtract(factor)) + .add(BigDecimal.valueOf(t1.getTimeZoneOffsetSeconds()).multiply(factor)); + int intOffset = offset.intValue(); + BigDecimal intOffsetBD = BigDecimal.valueOf(intOffset); + BigDecimal bd = interpolateDecimal(n0, n1, factor); + if (offset.compareTo(intOffsetBD) != 0) { + bd = bd.add( + offset.subtract(intOffsetBD).multiply(BigDecimal.valueOf(DateTimeUtils.NANOS_PER_SECOND))); + } + long timeNanos = bd.longValue(); + if (timeNanos < 0L) { + timeNanos += DateTimeUtils.NANOS_PER_SECOND; + intOffset++; + } else if (timeNanos >= DateTimeUtils.NANOS_PER_DAY) { + timeNanos -= DateTimeUtils.NANOS_PER_SECOND; + intOffset--; + } + return ValueTimeTimeZone.fromNanos(timeNanos, intOffset); + } + case Value.DATE: { + ValueDate d0 = (ValueDate) v0, d1 = (ValueDate) v1; + BigDecimal a0 = BigDecimal.valueOf(DateTimeUtils.absoluteDayFromDateValue(d0.getDateValue())); + BigDecimal a1 = BigDecimal.valueOf(DateTimeUtils.absoluteDayFromDateValue(d1.getDateValue())); + return ValueDate.fromDateValue( + DateTimeUtils.dateValueFromAbsoluteDay(interpolateDecimal(a0, a1, factor).longValue())); + } + case Value.TIMESTAMP: { + ValueTimestamp ts0 = (ValueTimestamp) v0, ts1 = (ValueTimestamp) v1; + BigDecimal a0 = timestampToDecimal(ts0.getDateValue(), ts0.getTimeNanos()); + BigDecimal a1 = timestampToDecimal(ts1.getDateValue(), ts1.getTimeNanos()); + BigInteger[] dr = interpolateDecimal(a0, a1, factor).toBigInteger() + .divideAndRemainder(IntervalUtils.NANOS_PER_DAY_BI); + long absoluteDay = dr[0].longValue(); + long timeNanos = dr[1].longValue(); + if (timeNanos < 0) { + timeNanos += DateTimeUtils.NANOS_PER_DAY; + absoluteDay--; + } + return ValueTimestamp.fromDateValueAndNanos( + DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay), timeNanos); + } + case Value.TIMESTAMP_TZ: { + ValueTimestampTimeZone ts0 = (ValueTimestampTimeZone) v0, ts1 = (ValueTimestampTimeZone) v1; + BigDecimal a0 = timestampToDecimal(ts0.getDateValue(), ts0.getTimeNanos()); + BigDecimal a1 = timestampToDecimal(ts1.getDateValue(), ts1.getTimeNanos()); + BigDecimal offset = BigDecimal.valueOf(ts0.getTimeZoneOffsetSeconds()) + .multiply(BigDecimal.ONE.subtract(factor)) + .add(BigDecimal.valueOf(ts1.getTimeZoneOffsetSeconds()).multiply(factor)); + int intOffset = offset.intValue(); + BigDecimal intOffsetBD = BigDecimal.valueOf(intOffset); + BigDecimal bd = interpolateDecimal(a0, a1, factor); + if (offset.compareTo(intOffsetBD) != 0) { + bd = bd.add( + offset.subtract(intOffsetBD).multiply(BigDecimal.valueOf(DateTimeUtils.NANOS_PER_SECOND))); + } + BigInteger[] dr = bd.toBigInteger().divideAndRemainder(IntervalUtils.NANOS_PER_DAY_BI); + long absoluteDay = dr[0].longValue(); + long timeNanos = dr[1].longValue(); + if (timeNanos < 0) { + timeNanos += DateTimeUtils.NANOS_PER_DAY; + absoluteDay--; + } + return ValueTimestampTimeZone.fromDateValueAndNanos(DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay), + timeNanos, intOffset); + } + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + return IntervalUtils.intervalFromAbsolute(IntervalQualifier.valueOf(dataType - Value.INTERVAL_YEAR), + interpolateDecimal(new BigDecimal(IntervalUtils.intervalToAbsolute((ValueInterval) v0)), + new BigDecimal(IntervalUtils.intervalToAbsolute((ValueInterval) v1)), factor) + .toBigInteger()); + default: + // Use the same rules as PERCENTILE_DISC + return (factor.compareTo(HALF) > 0 ? v1 : v0); + } + } + + private static BigDecimal timestampToDecimal(long dateValue, long timeNanos) { + return new BigDecimal(BigInteger.valueOf(DateTimeUtils.absoluteDayFromDateValue(dateValue)) + .multiply(IntervalUtils.NANOS_PER_DAY_BI).add(BigInteger.valueOf(timeNanos))); + } + + private static BigDecimal interpolateDecimal(BigDecimal d0, BigDecimal d1, BigDecimal factor) { + return d0.multiply(BigDecimal.ONE.subtract(factor)).add(d1.multiply(factor)); + } + + private Percentile() { + } + +} diff --git a/h2/src/main/org/h2/expression/aggregate/package.html b/h2/src/main/org/h2/expression/aggregate/package.html new file mode 100644 index 0000000000..e20a45ac82 --- /dev/null +++ b/h2/src/main/org/h2/expression/aggregate/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Aggregate functions. + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/expression/analysis/DataAnalysisOperation.java b/h2/src/main/org/h2/expression/analysis/DataAnalysisOperation.java new file mode 100644 index 0000000000..8cb6ebda12 --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/DataAnalysisOperation.java @@ -0,0 +1,536 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.analysis; + +import java.util.ArrayList; +import java.util.HashMap; + +import org.h2.api.ErrorCode; +import org.h2.command.query.QueryOrderBy; +import org.h2.command.query.Select; +import org.h2.command.query.SelectGroups; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.result.SortOrder; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueInteger; + +/** + * A base class for data analysis operations such as aggregates and window + * functions. + */ +public abstract class DataAnalysisOperation extends Expression { + + /** + * Reset stage. Used to reset internal data to its initial state. + */ + public static final int STAGE_RESET = 0; + + /** + * Group stage, used for explicit or implicit GROUP BY operation. + */ + public static final int STAGE_GROUP = 1; + + /** + * Window processing stage. + */ + public static final int STAGE_WINDOW = 2; + + /** + * SELECT + */ + protected final Select select; + + /** + * OVER clause + */ + protected Window over; + + /** + * Sort order for OVER + */ + protected SortOrder overOrderBySort; + + private int numFrameExpressions; + + private int lastGroupRowId; + + /** + * Create sort order. + * + * @param session + * database session + * @param orderBy + * array of order by expressions + * @param offset + * index offset + * @return the SortOrder + */ + protected static SortOrder createOrder(SessionLocal session, ArrayList orderBy, int offset) { + int size = orderBy.size(); + int[] index = new int[size]; + int[] sortType = new int[size]; + for (int i = 0; i < size; i++) { + QueryOrderBy o = orderBy.get(i); + index[i] = i + offset; + sortType[i] = o.sortType; + } + return new SortOrder(session, index, sortType, null); + } + + protected DataAnalysisOperation(Select select) { + this.select = select; + } + + /** + * Sets the OVER condition. + * + * @param over + * OVER condition + */ + public void setOverCondition(Window over) { + this.over = over; + } + + /** + * Checks whether this expression is an aggregate function. + * + * @return true if this is an aggregate function (including aggregates with + * OVER clause), false if this is a window function + */ + public abstract boolean isAggregate(); + + /** + * Returns the sort order for OVER clause. + * + * @return the sort order for OVER clause + */ + protected SortOrder getOverOrderBySort() { + return overOrderBySort; + } + + @Override + public final void mapColumns(ColumnResolver resolver, int level, int state) { + if (over != null) { + if (state != MAP_INITIAL) { + throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getTraceSQL()); + } + state = MAP_IN_WINDOW; + } else { + if (state == MAP_IN_AGGREGATE) { + throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getTraceSQL()); + } + state = MAP_IN_AGGREGATE; + } + mapColumnsAnalysis(resolver, level, state); + } + + /** + * Map the columns of the resolver to expression columns. + * + * @param resolver + * the column resolver + * @param level + * the subquery nesting level + * @param innerState + * one of the Expression MAP_IN_* values + */ + protected void mapColumnsAnalysis(ColumnResolver resolver, int level, int innerState) { + if (over != null) { + over.mapColumns(resolver, level); + } + } + + @Override + public Expression optimize(SessionLocal session) { + if (over != null) { + over.optimize(session); + ArrayList orderBy = over.getOrderBy(); + if (orderBy != null) { + overOrderBySort = createOrder(session, orderBy, getNumExpressions()); + } else if (!isAggregate()) { + overOrderBySort = new SortOrder(session, new int[getNumExpressions()]); + } + WindowFrame frame = over.getWindowFrame(); + if (frame != null) { + int index = getNumExpressions(); + int orderBySize = 0; + if (orderBy != null) { + orderBySize = orderBy.size(); + index += orderBySize; + } + int n = 0; + WindowFrameBound bound = frame.getStarting(); + if (bound.isParameterized()) { + checkOrderBy(frame.getUnits(), orderBySize); + if (bound.isVariable()) { + bound.setExpressionIndex(index); + n++; + } + } + bound = frame.getFollowing(); + if (bound != null && bound.isParameterized()) { + checkOrderBy(frame.getUnits(), orderBySize); + if (bound.isVariable()) { + bound.setExpressionIndex(index + n); + n++; + } + } + numFrameExpressions = n; + } + } + return this; + } + + private void checkOrderBy(WindowFrameUnits units, int orderBySize) { + switch (units) { + case RANGE: + if (orderBySize != 1) { + String sql = getTraceSQL(); + throw DbException.getSyntaxError(sql, sql.length() - 1, + "exactly one sort key is required for RANGE units"); + } + break; + case GROUPS: + if (orderBySize < 1) { + String sql = getTraceSQL(); + throw DbException.getSyntaxError(sql, sql.length() - 1, + "a sort key is required for GROUPS units"); + } + break; + default: + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + if (over != null) { + over.setEvaluatable(tableFilter, b); + } + } + + @Override + public final void updateAggregate(SessionLocal session, int stage) { + if (stage == STAGE_RESET) { + updateGroupAggregates(session, STAGE_RESET); + lastGroupRowId = 0; + return; + } + boolean window = stage == STAGE_WINDOW; + if (window != (over != null)) { + if (!window && select.isWindowQuery()) { + updateGroupAggregates(session, stage); + } + return; + } + SelectGroups groupData = select.getGroupDataIfCurrent(window); + if (groupData == null) { + // this is a different level (the enclosing query) + return; + } + + int groupRowId = groupData.getCurrentGroupRowId(); + if (lastGroupRowId == groupRowId) { + // already visited + return; + } + lastGroupRowId = groupRowId; + + if (over != null) { + if (!select.isGroupQuery()) { + over.updateAggregate(session, stage); + } + } + updateAggregate(session, groupData, groupRowId); + } + + /** + * Update a row of an aggregate. + * + * @param session + * the database session + * @param groupData + * data for the aggregate group + * @param groupRowId + * row id of group + */ + protected abstract void updateAggregate(SessionLocal session, SelectGroups groupData, int groupRowId); + + /** + * Invoked when processing group stage of grouped window queries to update + * arguments of this aggregate. + * + * @param session + * the session + * @param stage + * select stage + */ + protected void updateGroupAggregates(SessionLocal session, int stage) { + if (over != null) { + over.updateAggregate(session, stage); + } + } + + /** + * Returns the number of expressions, excluding OVER clause. + * + * @return the number of expressions + */ + protected abstract int getNumExpressions(); + + /** + * Returns the number of window frame expressions. + * + * @return the number of window frame expressions + */ + private int getNumFrameExpressions() { + return numFrameExpressions; + } + + /** + * Stores current values of expressions into the specified array. + * + * @param session + * the session + * @param array + * array to store values of expressions + */ + protected abstract void rememberExpressions(SessionLocal session, Value[] array); + + /** + * Get the aggregate data for a window clause. + * + * @param session + * database session + * @param groupData + * aggregate group data + * @param forOrderBy + * true if this is for ORDER BY + * @return the aggregate data object, specific to each kind of aggregate. + */ + protected Object getWindowData(SessionLocal session, SelectGroups groupData, boolean forOrderBy) { + Object data; + Value key = over.getCurrentKey(session); + PartitionData partition = groupData.getWindowExprData(this, key); + if (partition == null) { + data = forOrderBy ? new ArrayList<>() : createAggregateData(); + groupData.setWindowExprData(this, key, new PartitionData(data)); + } else { + data = partition.getData(); + } + return data; + } + + /** + * Get the aggregate group data object from the collector object. + * + * @param groupData + * the collector object + * @param ifExists + * if true, return null if object not found, if false, return new + * object if nothing found + * @return group data object + */ + protected Object getGroupData(SelectGroups groupData, boolean ifExists) { + Object data; + data = groupData.getCurrentGroupExprData(this); + if (data == null) { + if (ifExists) { + return null; + } + data = createAggregateData(); + groupData.setCurrentGroupExprData(this, data); + } + return data; + } + + /** + * Create aggregate data object specific to the subclass. + * + * @return aggregate-specific data object. + */ + protected abstract Object createAggregateData(); + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (over == null) { + return true; + } + switch (visitor.getType()) { + case ExpressionVisitor.QUERY_COMPARABLE: + case ExpressionVisitor.OPTIMIZABLE_AGGREGATE: + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.INDEPENDENT: + case ExpressionVisitor.DECREMENT_QUERY_LEVEL: + return false; + default: + return true; + } + } + + @Override + public Value getValue(SessionLocal session) { + SelectGroups groupData = select.getGroupDataIfCurrent(over != null); + if (groupData == null) { + throw DbException.get(ErrorCode.INVALID_USE_OF_AGGREGATE_FUNCTION_1, getTraceSQL()); + } + return over == null ? getAggregatedValue(session, getGroupData(groupData, true)) + : getWindowResult(session, groupData); + } + + /** + * Returns result of this window function or window aggregate. This method + * is not used for plain aggregates. + * + * @param session + * the session + * @param groupData + * the group data + * @return result of this function + */ + private Value getWindowResult(SessionLocal session, SelectGroups groupData) { + PartitionData partition; + Object data; + boolean isOrdered = over.isOrdered(); + Value key = over.getCurrentKey(session); + partition = groupData.getWindowExprData(this, key); + if (partition == null) { + // Window aggregates with FILTER clause may have no collected values + data = isOrdered ? new ArrayList<>() : createAggregateData(); + partition = new PartitionData(data); + groupData.setWindowExprData(this, key, partition); + } else { + data = partition.getData(); + } + if (isOrdered || !isAggregate()) { + Value result = getOrderedResult(session, groupData, partition, data); + if (result == null) { + return getAggregatedValue(session, null); + } + return result; + } + // Window aggregate without ORDER BY clause in window specification + Value result = partition.getResult(); + if (result == null) { + result = getAggregatedValue(session, data); + partition.setResult(result); + } + return result; + } + + /*** + * Returns aggregated value. + * + * @param session + * the session + * @param aggregateData + * the aggregate data + * @return aggregated value. + */ + protected abstract Value getAggregatedValue(SessionLocal session, Object aggregateData); + + /** + * Update a row of an ordered aggregate. + * + * @param session + * the database session + * @param groupData + * data for the aggregate group + * @param groupRowId + * row id of group + * @param orderBy + * list of order by expressions + */ + protected void updateOrderedAggregate(SessionLocal session, SelectGroups groupData, int groupRowId, + ArrayList orderBy) { + int ne = getNumExpressions(); + int size = orderBy != null ? orderBy.size() : 0; + int frameSize = getNumFrameExpressions(); + Value[] array = new Value[ne + size + frameSize + 1]; + rememberExpressions(session, array); + for (int i = 0; i < size; i++) { + @SuppressWarnings("null") + QueryOrderBy o = orderBy.get(i); + array[ne++] = o.expression.getValue(session); + } + if (frameSize > 0) { + WindowFrame frame = over.getWindowFrame(); + WindowFrameBound bound = frame.getStarting(); + if (bound.isVariable()) { + array[ne++] = bound.getValue().getValue(session); + } + bound = frame.getFollowing(); + if (bound != null && bound.isVariable()) { + array[ne++] = bound.getValue().getValue(session); + } + } + array[ne] = ValueInteger.get(groupRowId); + @SuppressWarnings("unchecked") + ArrayList data = (ArrayList) getWindowData(session, groupData, true); + data.add(array); + } + + private Value getOrderedResult(SessionLocal session, SelectGroups groupData, PartitionData partition, // + Object data) { + HashMap result = partition.getOrderedResult(); + if (result == null) { + result = new HashMap<>(); + @SuppressWarnings("unchecked") + ArrayList orderedData = (ArrayList) data; + int rowIdColumn = getNumExpressions(); + ArrayList orderBy = over.getOrderBy(); + if (orderBy != null) { + rowIdColumn += orderBy.size(); + orderedData.sort(overOrderBySort); + } + rowIdColumn += getNumFrameExpressions(); + getOrderedResultLoop(session, result, orderedData, rowIdColumn); + partition.setOrderedResult(result); + } + return result.get(groupData.getCurrentGroupRowId()); + } + + /** + * Returns result of this window function or window aggregate. This method + * may not be called on window aggregate without window order clause. + * + * @param session + * the session + * @param result + * the map to append result to + * @param ordered + * ordered data + * @param rowIdColumn + * the index of row id value + */ + protected abstract void getOrderedResultLoop(SessionLocal session, HashMap result, + ArrayList ordered, int rowIdColumn); + + /** + * Used to create SQL for the OVER and FILTER clauses. + * + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @param forceOrderBy + * whether synthetic ORDER BY clause should be generated when it + * is missing + * @return the builder object + */ + protected StringBuilder appendTailConditions(StringBuilder builder, int sqlFlags, boolean forceOrderBy) { + if (over != null) { + builder.append(' '); + over.getSQL(builder, sqlFlags, forceOrderBy); + } + return builder; + } + +} diff --git a/h2/src/main/org/h2/expression/analysis/PartitionData.java b/h2/src/main/org/h2/expression/analysis/PartitionData.java new file mode 100644 index 0000000000..afa7494f9f --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/PartitionData.java @@ -0,0 +1,91 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.analysis; + +import java.util.HashMap; + +import org.h2.value.Value; + +/** + * Partition data of a window aggregate. + */ +public final class PartitionData { + + /** + * Aggregate data. + */ + private Object data; + + /** + * Evaluated result. + */ + private Value result; + + /** + * Evaluated ordered result. + */ + private HashMap orderedResult; + + /** + * Creates new instance of partition data. + * + * @param data + * aggregate data + */ + PartitionData(Object data) { + this.data = data; + } + + /** + * Returns the aggregate data. + * + * @return the aggregate data + */ + Object getData() { + return data; + } + + /** + * Returns the result. + * + * @return the result + */ + Value getResult() { + return result; + } + + /** + * Sets the result. + * + * @param result + * the result to set + */ + void setResult(Value result) { + this.result = result; + data = null; + } + + /** + * Returns the ordered result. + * + * @return the ordered result + */ + HashMap getOrderedResult() { + return orderedResult; + } + + /** + * Sets the ordered result. + * + * @param orderedResult + * the ordered result to set + */ + void setOrderedResult(HashMap orderedResult) { + this.orderedResult = orderedResult; + data = null; + } + +} diff --git a/h2/src/main/org/h2/expression/analysis/Window.java b/h2/src/main/org/h2/expression/analysis/Window.java new file mode 100644 index 0000000000..7a26d1f18f --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/Window.java @@ -0,0 +1,338 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.analysis; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.ListIterator; + +import org.h2.api.ErrorCode; +import org.h2.command.query.QueryOrderBy; +import org.h2.command.query.Select; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.result.SortOrder; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.util.HasSQL; +import org.h2.value.Value; +import org.h2.value.ValueRow; + +/** + * Window clause. + */ +public final class Window { + + private ArrayList partitionBy; + + private ArrayList orderBy; + + private WindowFrame frame; + + private String parent; + + /** + * Appends ORDER BY clause to the specified builder. + * + * @param builder + * string builder + * @param orderBy + * ORDER BY clause, or null + * @param sqlFlags + * formatting flags + * @param forceOrderBy + * whether synthetic ORDER BY clause should be generated when it + * is missing + */ + public static void appendOrderBy(StringBuilder builder, ArrayList orderBy, int sqlFlags, + boolean forceOrderBy) { + if (orderBy != null && !orderBy.isEmpty()) { + appendOrderByStart(builder); + for (int i = 0; i < orderBy.size(); i++) { + QueryOrderBy o = orderBy.get(i); + if (i > 0) { + builder.append(", "); + } + o.expression.getUnenclosedSQL(builder, sqlFlags); + SortOrder.typeToString(builder, o.sortType); + } + } else if (forceOrderBy) { + appendOrderByStart(builder); + builder.append("NULL"); + } + } + + private static void appendOrderByStart(StringBuilder builder) { + if (builder.charAt(builder.length() - 1) != '(') { + builder.append(' '); + } + builder.append("ORDER BY "); + } + + /** + * Creates a new instance of window clause. + * + * @param parent + * name of the parent window + * @param partitionBy + * PARTITION BY clause, or null + * @param orderBy + * ORDER BY clause, or null + * @param frame + * window frame clause, or null + */ + public Window(String parent, ArrayList partitionBy, ArrayList orderBy, + WindowFrame frame) { + this.parent = parent; + this.partitionBy = partitionBy; + this.orderBy = orderBy; + this.frame = frame; + } + + /** + * Map the columns of the resolver to expression columns. + * + * @param resolver + * the column resolver + * @param level + * the subquery nesting level + * @see Expression#mapColumns(ColumnResolver, int, int) + */ + public void mapColumns(ColumnResolver resolver, int level) { + resolveWindows(resolver); + if (partitionBy != null) { + for (Expression e : partitionBy) { + e.mapColumns(resolver, level, Expression.MAP_IN_WINDOW); + } + } + if (orderBy != null) { + for (QueryOrderBy o : orderBy) { + o.expression.mapColumns(resolver, level, Expression.MAP_IN_WINDOW); + } + } + if (frame != null) { + frame.mapColumns(resolver, level, Expression.MAP_IN_WINDOW); + } + } + + private void resolveWindows(ColumnResolver resolver) { + if (parent != null) { + Select select = resolver.getSelect(); + Window p; + while ((p = select.getWindow(parent)) == null) { + select = select.getParentSelect(); + if (select == null) { + throw DbException.get(ErrorCode.WINDOW_NOT_FOUND_1, parent); + } + } + p.resolveWindows(resolver); + if (partitionBy == null) { + partitionBy = p.partitionBy; + } + if (orderBy == null) { + orderBy = p.orderBy; + } + if (frame == null) { + frame = p.frame; + } + parent = null; + } + } + + /** + * Try to optimize the window conditions. + * + * @param session + * the session + */ + public void optimize(SessionLocal session) { + if (partitionBy != null) { + for (ListIterator i = partitionBy.listIterator(); i.hasNext();) { + Expression e = i.next().optimize(session); + if (e.isConstant()) { + i.remove(); + } else { + i.set(e); + } + } + if (partitionBy.isEmpty()) { + partitionBy = null; + } + } + if (orderBy != null) { + for (Iterator i = orderBy.iterator(); i.hasNext();) { + QueryOrderBy o = i.next(); + Expression e = o.expression.optimize(session); + if (e.isConstant()) { + i.remove(); + } else { + o.expression = e; + } + } + if (orderBy.isEmpty()) { + orderBy = null; + } + } + if (frame != null) { + frame.optimize(session); + } + } + + /** + * Tell the expression columns whether the table filter can return values + * now. This is used when optimizing the query. + * + * @param tableFilter + * the table filter + * @param value + * true if the table filter can return value + * @see Expression#setEvaluatable(TableFilter, boolean) + */ + public void setEvaluatable(TableFilter tableFilter, boolean value) { + if (partitionBy != null) { + for (Expression e : partitionBy) { + e.setEvaluatable(tableFilter, value); + } + } + if (orderBy != null) { + for (QueryOrderBy o : orderBy) { + o.expression.setEvaluatable(tableFilter, value); + } + } + } + + /** + * Returns ORDER BY clause. + * + * @return ORDER BY clause, or null + */ + public ArrayList getOrderBy() { + return orderBy; + } + + /** + * Returns window frame, or null. + * + * @return window frame, or null + */ + public WindowFrame getWindowFrame() { + return frame; + } + + /** + * Returns {@code true} if window ordering clause is specified or ROWS unit + * is used. + * + * @return {@code true} if window ordering clause is specified or ROWS unit + * is used + */ + public boolean isOrdered() { + if (orderBy != null) { + return true; + } + if (frame != null && frame.getUnits() == WindowFrameUnits.ROWS) { + if (frame.getStarting().getType() == WindowFrameBoundType.UNBOUNDED_PRECEDING) { + WindowFrameBound following = frame.getFollowing(); + if (following != null && following.getType() == WindowFrameBoundType.UNBOUNDED_FOLLOWING) { + return false; + } + } + return true; + } + return false; + } + + /** + * Returns the key for the current group. + * + * @param session + * session + * @return key for the current group, or null + */ + public Value getCurrentKey(SessionLocal session) { + if (partitionBy == null) { + return null; + } + int len = partitionBy.size(); + if (len == 1) { + return partitionBy.get(0).getValue(session); + } else { + Value[] keyValues = new Value[len]; + // update group + for (int i = 0; i < len; i++) { + Expression expr = partitionBy.get(i); + keyValues[i] = expr.getValue(session); + } + return ValueRow.get(keyValues); + } + } + + /** + * Appends SQL representation to the specified builder. + * + * @param builder + * string builder + * @param sqlFlags + * formatting flags + * @param forceOrderBy + * whether synthetic ORDER BY clause should be generated when it + * is missing + * @return the specified string builder + * @see Expression#getSQL(StringBuilder, int, int) + */ + public StringBuilder getSQL(StringBuilder builder, int sqlFlags, boolean forceOrderBy) { + builder.append("OVER ("); + if (partitionBy != null) { + builder.append("PARTITION BY "); + for (int i = 0; i < partitionBy.size(); i++) { + if (i > 0) { + builder.append(", "); + } + partitionBy.get(i).getUnenclosedSQL(builder, sqlFlags); + } + } + appendOrderBy(builder, orderBy, sqlFlags, forceOrderBy); + if (frame != null) { + if (builder.charAt(builder.length() - 1) != '(') { + builder.append(' '); + } + frame.getSQL(builder, sqlFlags); + } + return builder.append(')'); + } + + /** + * Update an aggregate value. + * + * @param session + * the session + * @param stage + * select stage + * @see Expression#updateAggregate(SessionLocal, int) + */ + public void updateAggregate(SessionLocal session, int stage) { + if (partitionBy != null) { + for (Expression expr : partitionBy) { + expr.updateAggregate(session, stage); + } + } + if (orderBy != null) { + for (QueryOrderBy o : orderBy) { + o.expression.updateAggregate(session, stage); + } + } + if (frame != null) { + frame.updateAggregate(session, stage); + } + } + + @Override + public String toString() { + return getSQL(new StringBuilder(), HasSQL.TRACE_SQL_FLAGS, false).toString(); + } + +} diff --git a/h2/src/main/org/h2/expression/analysis/WindowFrame.java b/h2/src/main/org/h2/expression/analysis/WindowFrame.java new file mode 100644 index 0000000000..a5b40722d9 --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/WindowFrame.java @@ -0,0 +1,877 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.analysis; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.NoSuchElementException; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.BinaryOperation; +import org.h2.expression.BinaryOperation.OpType; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.result.SortOrder; +import org.h2.table.ColumnResolver; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Window frame clause. + */ +public final class WindowFrame { + + private abstract static class Itr implements Iterator { + + final ArrayList orderedRows; + + int cursor; + + Itr(ArrayList orderedRows) { + this.orderedRows = orderedRows; + } + + } + + private static class PlainItr extends Itr { + + final int endIndex; + + PlainItr(ArrayList orderedRows, int startIndex, int endIndex) { + super(orderedRows); + this.endIndex = endIndex; + cursor = startIndex; + } + + @Override + public boolean hasNext() { + return cursor <= endIndex; + } + + @Override + public Value[] next() { + if (cursor > endIndex) { + throw new NoSuchElementException(); + } + return orderedRows.get(cursor++); + } + + } + + private static class PlainReverseItr extends Itr { + + final int startIndex; + + PlainReverseItr(ArrayList orderedRows, int startIndex, int endIndex) { + super(orderedRows); + this.startIndex = startIndex; + cursor = endIndex; + } + + @Override + public boolean hasNext() { + return cursor >= startIndex; + } + + @Override + public Value[] next() { + if (cursor < startIndex) { + throw new NoSuchElementException(); + } + return orderedRows.get(cursor--); + } + + } + + private static class BiItr extends PlainItr { + + final int end1, start1; + + BiItr(ArrayList orderedRows, int startIndex1, int endIndex1, int startIndex2, int endIndex2) { + super(orderedRows, startIndex1, endIndex2); + end1 = endIndex1; + start1 = startIndex2; + } + + @Override + public Value[] next() { + if (cursor > endIndex) { + throw new NoSuchElementException(); + } + Value[] r = orderedRows.get(cursor); + cursor = cursor != end1 ? cursor + 1 : start1; + return r; + } + + } + + private static class BiReverseItr extends PlainReverseItr { + + final int end1, start1; + + BiReverseItr(ArrayList orderedRows, int startIndex1, int endIndex1, int startIndex2, int endIndex2) { + super(orderedRows, startIndex1, endIndex2); + end1 = endIndex1; + start1 = startIndex2; + } + + @Override + public Value[] next() { + if (cursor < startIndex) { + throw new NoSuchElementException(); + } + Value[] r = orderedRows.get(cursor); + cursor = cursor != start1 ? cursor - 1 : end1; + return r; + } + + } + + private static final class TriItr extends BiItr { + + private final int end2, start2; + + TriItr(ArrayList orderedRows, int startIndex1, int endIndex1, int startIndex2, int endIndex2, + int startIndex3, int endIndex3) { + super(orderedRows, startIndex1, endIndex1, startIndex2, endIndex3); + end2 = endIndex2; + start2 = startIndex3; + } + + @Override + public Value[] next() { + if (cursor > endIndex) { + throw new NoSuchElementException(); + } + Value[] r = orderedRows.get(cursor); + cursor = cursor != end1 ? cursor != end2 ? cursor + 1 : start2 : start1; + return r; + } + + } + + private static final class TriReverseItr extends BiReverseItr { + + private final int end2, start2; + + TriReverseItr(ArrayList orderedRows, int startIndex1, int endIndex1, int startIndex2, int endIndex2, + int startIndex3, int endIndex3) { + super(orderedRows, startIndex1, endIndex1, startIndex2, endIndex3); + end2 = endIndex2; + start2 = startIndex3; + } + + @Override + public Value[] next() { + if (cursor < startIndex) { + throw new NoSuchElementException(); + } + Value[] r = orderedRows.get(cursor); + cursor = cursor != start1 ? cursor != start2 ? cursor - 1 : end2 : end1; + return r; + } + + } + + private final WindowFrameUnits units; + + private final WindowFrameBound starting; + + private final WindowFrameBound following; + + private final WindowFrameExclusion exclusion; + + /** + * Returns iterator for the specified frame, or default iterator if frame is + * null. + * + * @param over + * window + * @param session + * the session + * @param orderedRows + * ordered rows + * @param sortOrder + * sort order + * @param currentRow + * index of the current row + * @param reverse + * whether iterator should iterate in reverse order + * @return iterator + */ + public static Iterator iterator(Window over, SessionLocal session, ArrayList orderedRows, + SortOrder sortOrder, int currentRow, boolean reverse) { + WindowFrame frame = over.getWindowFrame(); + if (frame != null) { + return frame.iterator(session, orderedRows, sortOrder, currentRow, reverse); + } + int endIndex = orderedRows.size() - 1; + return plainIterator(orderedRows, 0, + over.getOrderBy() == null ? endIndex : toGroupEnd(orderedRows, sortOrder, currentRow, endIndex), + reverse); + } + + /** + * Returns end index for the specified frame, or default end index if frame + * is null. + * + * @param over + * window + * @param session + * the session + * @param orderedRows + * ordered rows + * @param sortOrder + * sort order + * @param currentRow + * index of the current row + * @return end index + * @throws UnsupportedOperationException + * if over is not null and its exclusion clause is not EXCLUDE + * NO OTHERS + */ + public static int getEndIndex(Window over, SessionLocal session, ArrayList orderedRows, + SortOrder sortOrder, int currentRow) { + WindowFrame frame = over.getWindowFrame(); + if (frame != null) { + return frame.getEndIndex(session, orderedRows, sortOrder, currentRow); + } + int endIndex = orderedRows.size() - 1; + return over.getOrderBy() == null ? endIndex : toGroupEnd(orderedRows, sortOrder, currentRow, endIndex); + } + + private static Iterator plainIterator(ArrayList orderedRows, int startIndex, int endIndex, + boolean reverse) { + if (endIndex < startIndex) { + return Collections.emptyIterator(); + } + return reverse ? new PlainReverseItr(orderedRows, startIndex, endIndex) + : new PlainItr(orderedRows, startIndex, endIndex); + } + + private static Iterator biIterator(ArrayList orderedRows, int startIndex1, int endIndex1, + int startIndex2, int endIndex2, boolean reverse) { + return reverse ? new BiReverseItr(orderedRows, startIndex1, endIndex1, startIndex2, endIndex2) + : new BiItr(orderedRows, startIndex1, endIndex1, startIndex2, endIndex2); + } + + private static Iterator triIterator(ArrayList orderedRows, int startIndex1, int endIndex1, + int startIndex2, int endIndex2, int startIndex3, int endIndex3, boolean reverse) { + return reverse ? new TriReverseItr(orderedRows, startIndex1, endIndex1, startIndex2, endIndex2, // + startIndex3, endIndex3) + : new TriItr(orderedRows, startIndex1, endIndex1, startIndex2, endIndex2, startIndex3, endIndex3); + } + + private static int toGroupStart(ArrayList orderedRows, SortOrder sortOrder, int offset, int minOffset) { + Value[] row = orderedRows.get(offset); + while (offset > minOffset && sortOrder.compare(row, orderedRows.get(offset - 1)) == 0) { + offset--; + } + return offset; + } + + private static int toGroupEnd(ArrayList orderedRows, SortOrder sortOrder, int offset, int maxOffset) { + Value[] row = orderedRows.get(offset); + while (offset < maxOffset && sortOrder.compare(row, orderedRows.get(offset + 1)) == 0) { + offset++; + } + return offset; + } + + private static int getIntOffset(WindowFrameBound bound, Value[] values, SessionLocal session) { + Value v = bound.isVariable() ? values[bound.getExpressionIndex()] : bound.getValue().getValue(session); + int value; + if (v == ValueNull.INSTANCE || (value = v.getInt()) < 0) { + throw DbException.get(ErrorCode.INVALID_PRECEDING_OR_FOLLOWING_1, v.getTraceSQL()); + } + return value; + } + + /** + * Appends bound value to the current row and produces row for comparison + * operations. + * + * @param session + * the session + * @param orderedRows + * rows in partition + * @param sortOrder + * the sort order + * @param currentRow + * index of the current row + * @param bound + * window frame bound + * @param add + * false for PRECEDING, true for FOLLOWING + * @return row for comparison operations, or null if result is out of range + * and should be treated as UNLIMITED + */ + private static Value[] getCompareRow(SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, + int currentRow, WindowFrameBound bound, boolean add) { + int sortIndex = sortOrder.getQueryColumnIndexes()[0]; + Value[] row = orderedRows.get(currentRow); + Value currentValue = row[sortIndex]; + int type = currentValue.getValueType(); + Value newValue; + Value range = getValueOffset(bound, orderedRows.get(currentRow), session); + switch (type) { + case Value.NULL: + newValue = ValueNull.INSTANCE; + break; + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + case Value.NUMERIC: + case Value.REAL: + case Value.DOUBLE: + case Value.DECFLOAT: + case Value.TIME: + case Value.TIME_TZ: + case Value.DATE: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + OpType opType = add ^ (sortOrder.getSortTypes()[0] & SortOrder.DESCENDING) != 0 ? OpType.PLUS + : OpType.MINUS; + try { + newValue = new BinaryOperation(opType, ValueExpression.get(currentValue), ValueExpression.get(range)) + .optimize(session).getValue(session).convertTo(type); + } catch (DbException ex) { + switch (ex.getErrorCode()) { + case ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1: + case ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_2: + return null; + } + throw ex; + } + break; + default: + throw DbException.getInvalidValueException("unsupported type of sort key for RANGE units", + currentValue.getTraceSQL()); + } + Value[] newRow = row.clone(); + newRow[sortIndex] = newValue; + return newRow; + } + + private static Value getValueOffset(WindowFrameBound bound, Value[] values, SessionLocal session) { + Value value = bound.isVariable() ? values[bound.getExpressionIndex()] : bound.getValue().getValue(session); + if (value == ValueNull.INSTANCE || value.getSignum() < 0) { + throw DbException.get(ErrorCode.INVALID_PRECEDING_OR_FOLLOWING_1, value.getTraceSQL()); + } + return value; + } + + /** + * Creates new instance of window frame clause. + * + * @param units + * units + * @param starting + * starting clause + * @param following + * following clause + * @param exclusion + * exclusion clause + */ + public WindowFrame(WindowFrameUnits units, WindowFrameBound starting, WindowFrameBound following, + WindowFrameExclusion exclusion) { + this.units = units; + this.starting = starting; + if (following != null && following.getType() == WindowFrameBoundType.CURRENT_ROW) { + following = null; + } + this.following = following; + this.exclusion = exclusion; + } + + /** + * Returns the units. + * + * @return the units + */ + public WindowFrameUnits getUnits() { + return units; + } + + /** + * Returns the starting clause. + * + * @return the starting clause + */ + public WindowFrameBound getStarting() { + return starting; + } + + /** + * Returns the following clause. + * + * @return the following clause, or null + */ + public WindowFrameBound getFollowing() { + return following; + } + + /** + * Returns the exclusion clause. + * + * @return the exclusion clause + */ + public WindowFrameExclusion getExclusion() { + return exclusion; + } + + /** + * Checks validity of this frame. + * + * @return whether bounds of this frame valid + */ + public boolean isValid() { + WindowFrameBoundType s = starting.getType(), + f = following != null ? following.getType() : WindowFrameBoundType.CURRENT_ROW; + return s != WindowFrameBoundType.UNBOUNDED_FOLLOWING && f != WindowFrameBoundType.UNBOUNDED_PRECEDING + && s.compareTo(f) <= 0; + } + + /** + * Check if bounds of this frame has variable expressions. This method may + * be used only after {@link #optimize(SessionLocal)} invocation. + * + * @return if bounds of this frame has variable expressions + */ + public boolean isVariableBounds() { + if (starting.isVariable()) { + return true; + } + if (following != null && following.isVariable()) { + return true; + } + return false; + } + + /** + * Map the columns of the resolver to expression columns. + * + * @param resolver + * the column resolver + * @param level + * the subquery nesting level + * @param state + * current state for nesting checks + */ + void mapColumns(ColumnResolver resolver, int level, int state) { + starting.mapColumns(resolver, level, state); + if (following != null) { + following.mapColumns(resolver, level, state); + } + } + + /** + * Try to optimize bound expressions. + * + * @param session + * the session + */ + void optimize(SessionLocal session) { + starting.optimize(session); + if (following != null) { + following.optimize(session); + } + } + + /** + * Update an aggregate value. + * + * @param session + * the session + * @param stage + * select stage + * @see Expression#updateAggregate(SessionLocal, int) + */ + void updateAggregate(SessionLocal session, int stage) { + starting.updateAggregate(session, stage); + if (following != null) { + following.updateAggregate(session, stage); + } + } + + /** + * Returns iterator. + * + * @param session + * the session + * @param orderedRows + * ordered rows + * @param sortOrder + * sort order + * @param currentRow + * index of the current row + * @param reverse + * whether iterator should iterate in reverse order + * @return iterator + */ + public Iterator iterator(SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, + int currentRow, boolean reverse) { + int startIndex = getIndex(session, orderedRows, sortOrder, currentRow, starting, false); + int endIndex = following != null ? getIndex(session, orderedRows, sortOrder, currentRow, following, true) + : units == WindowFrameUnits.ROWS ? currentRow + : toGroupEnd(orderedRows, sortOrder, currentRow, orderedRows.size() - 1); + if (endIndex < startIndex) { + return Collections.emptyIterator(); + } + int size = orderedRows.size(); + if (startIndex >= size || endIndex < 0) { + return Collections.emptyIterator(); + } + if (startIndex < 0) { + startIndex = 0; + } + if (endIndex >= size) { + endIndex = size - 1; + } + return exclusion != WindowFrameExclusion.EXCLUDE_NO_OTHERS + ? complexIterator(orderedRows, sortOrder, currentRow, startIndex, endIndex, reverse) + : plainIterator(orderedRows, startIndex, endIndex, reverse); + } + + /** + * Returns start index of this frame, + * + * @param session + * the session + * @param orderedRows + * ordered rows + * @param sortOrder + * sort order + * @param currentRow + * index of the current row + * @return start index + * @throws UnsupportedOperationException + * if exclusion clause is not EXCLUDE NO OTHERS + */ + public int getStartIndex(SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, // + int currentRow) { + if (exclusion != WindowFrameExclusion.EXCLUDE_NO_OTHERS) { + throw new UnsupportedOperationException(); + } + int startIndex = getIndex(session, orderedRows, sortOrder, currentRow, starting, false); + if (startIndex < 0) { + startIndex = 0; + } + return startIndex; + } + + /** + * Returns end index of this frame, + * + * @param session + * the session + * @param orderedRows + * ordered rows + * @param sortOrder + * sort order + * @param currentRow + * index of the current row + * @return end index + * @throws UnsupportedOperationException + * if exclusion clause is not EXCLUDE NO OTHERS + */ + private int getEndIndex(SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, // + int currentRow) { + if (exclusion != WindowFrameExclusion.EXCLUDE_NO_OTHERS) { + throw new UnsupportedOperationException(); + } + int endIndex = following != null ? getIndex(session, orderedRows, sortOrder, currentRow, following, true) + : units == WindowFrameUnits.ROWS ? currentRow + : toGroupEnd(orderedRows, sortOrder, currentRow, orderedRows.size() - 1); + int size = orderedRows.size(); + if (endIndex >= size) { + endIndex = size - 1; + } + return endIndex; + } + + /** + * Returns starting or ending index of a window frame. + * + * @param session + * the session + * @param orderedRows + * rows in partition + * @param sortOrder + * the sort order + * @param currentRow + * index of the current row + * @param bound + * window frame bound + * @param forFollowing + * false for start index, true for end index + * @return starting or ending index of a window frame (inclusive), can be 0 + * or be equal to the number of rows if frame is not limited from + * that side + */ + private int getIndex(SessionLocal session, ArrayList orderedRows, SortOrder sortOrder, int currentRow, + WindowFrameBound bound, boolean forFollowing) { + int size = orderedRows.size(); + int last = size - 1; + int index; + switch (bound.getType()) { + case UNBOUNDED_PRECEDING: + index = -1; + break; + case PRECEDING: + switch (units) { + case ROWS: { + int value = getIntOffset(bound, orderedRows.get(currentRow), session); + index = value > currentRow ? -1 : currentRow - value; + break; + } + case GROUPS: { + int value = getIntOffset(bound, orderedRows.get(currentRow), session); + if (!forFollowing) { + index = toGroupStart(orderedRows, sortOrder, currentRow, 0); + while (value > 0 && index > 0) { + value--; + index = toGroupStart(orderedRows, sortOrder, index - 1, 0); + } + if (value > 0) { + index = -1; + } + } else { + if (value == 0) { + index = toGroupEnd(orderedRows, sortOrder, currentRow, last); + } else { + index = currentRow; + while (value > 0 && index >= 0) { + value--; + index = toGroupStart(orderedRows, sortOrder, index, 0) - 1; + } + } + } + break; + } + case RANGE: { + index = currentRow; + Value[] row = getCompareRow(session, orderedRows, sortOrder, index, bound, false); + if (row != null) { + index = Collections.binarySearch(orderedRows, row, sortOrder); + if (index >= 0) { + if (!forFollowing) { + while (index > 0 && sortOrder.compare(row, orderedRows.get(index - 1)) == 0) { + index--; + } + } else { + while (index < last && sortOrder.compare(row, orderedRows.get(index + 1)) == 0) { + index++; + } + } + } else { + index = ~index; + if (!forFollowing) { + if (index == 0) { + index = -1; + } + } else { + index--; + } + } + } else { + index = -1; + } + break; + } + default: + throw DbException.getUnsupportedException("units=" + units); + } + break; + case CURRENT_ROW: + switch (units) { + case ROWS: + index = currentRow; + break; + case GROUPS: + case RANGE: + index = forFollowing ? toGroupEnd(orderedRows, sortOrder, currentRow, last) + : toGroupStart(orderedRows, sortOrder, currentRow, 0); + break; + default: + throw DbException.getUnsupportedException("units=" + units); + } + break; + case FOLLOWING: + switch (units) { + case ROWS: { + int value = getIntOffset(bound, orderedRows.get(currentRow), session); + int rem = last - currentRow; + index = value > rem ? size : currentRow + value; + break; + } + case GROUPS: { + int value = getIntOffset(bound, orderedRows.get(currentRow), session); + if (forFollowing) { + index = toGroupEnd(orderedRows, sortOrder, currentRow, last); + while (value > 0 && index < last) { + value--; + index = toGroupEnd(orderedRows, sortOrder, index + 1, last); + } + if (value > 0) { + index = size; + } + } else { + if (value == 0) { + index = toGroupStart(orderedRows, sortOrder, currentRow, 0); + } else { + index = currentRow; + while (value > 0 && index <= last) { + value--; + index = toGroupEnd(orderedRows, sortOrder, index, last) + 1; + } + } + } + break; + } + case RANGE: { + index = currentRow; + Value[] row = getCompareRow(session, orderedRows, sortOrder, index, bound, true); + if (row != null) { + index = Collections.binarySearch(orderedRows, row, sortOrder); + if (index >= 0) { + if (forFollowing) { + while (index < last && sortOrder.compare(row, orderedRows.get(index + 1)) == 0) { + index++; + } + } else { + while (index > 0 && sortOrder.compare(row, orderedRows.get(index - 1)) == 0) { + index--; + } + } + } else { + index = ~index; + if (forFollowing) { + if (index != size) { + index--; + } + } + } + } else { + index = size; + } + break; + } + default: + throw DbException.getUnsupportedException("units=" + units); + } + break; + case UNBOUNDED_FOLLOWING: + index = size; + break; + default: + throw DbException.getUnsupportedException("window frame bound type=" + bound.getType()); + } + return index; + } + + private Iterator complexIterator(ArrayList orderedRows, SortOrder sortOrder, int currentRow, + int startIndex, int endIndex, boolean reverse) { + if (exclusion == WindowFrameExclusion.EXCLUDE_CURRENT_ROW) { + if (currentRow < startIndex || currentRow > endIndex) { + // Nothing to exclude + } else if (currentRow == startIndex) { + startIndex++; + } else if (currentRow == endIndex) { + endIndex--; + } else { + return biIterator(orderedRows, startIndex, currentRow - 1, currentRow + 1, endIndex, reverse); + } + } else { + // Do not include previous rows if they are not in the range + int exStart = toGroupStart(orderedRows, sortOrder, currentRow, startIndex); + // Do not include next rows if they are not in the range + int exEnd = toGroupEnd(orderedRows, sortOrder, currentRow, endIndex); + boolean includeCurrentRow = exclusion == WindowFrameExclusion.EXCLUDE_TIES; + if (includeCurrentRow) { + // Simplify exclusion if possible + if (currentRow == exStart) { + exStart++; + includeCurrentRow = false; + } else if (currentRow == exEnd) { + exEnd--; + includeCurrentRow = false; + } + } + if (exStart > exEnd || exEnd < startIndex || exStart > endIndex) { + // Empty range or nothing to exclude + } else if (includeCurrentRow) { + if (startIndex == exStart) { + if (endIndex == exEnd) { + return Collections.singleton(orderedRows.get(currentRow)).iterator(); + } else { + return biIterator(orderedRows, currentRow, currentRow, exEnd + 1, endIndex, reverse); + } + } else { + if (endIndex == exEnd) { + return biIterator(orderedRows, startIndex, exStart - 1, currentRow, currentRow, reverse); + } else { + return triIterator(orderedRows, startIndex, exStart - 1, currentRow, currentRow, exEnd + 1, + endIndex, reverse); + } + } + } else { + if (startIndex >= exStart) { + startIndex = exEnd + 1; + } else if (endIndex <= exEnd) { + endIndex = exStart - 1; + } else { + return biIterator(orderedRows, startIndex, exStart - 1, exEnd + 1, endIndex, reverse); + } + } + } + return plainIterator(orderedRows, startIndex, endIndex, reverse); + } + + /** + * Append SQL representation to the specified builder. + * + * @param builder + * string builder + * @param formattingFlags + * quote all identifiers + * @return the specified string builder + * @see org.h2.expression.Expression#getSQL(StringBuilder, int, int) + */ + public StringBuilder getSQL(StringBuilder builder, int formattingFlags) { + builder.append(units.getSQL()); + if (following == null) { + builder.append(' '); + starting.getSQL(builder, false, formattingFlags); + } else { + builder.append(" BETWEEN "); + starting.getSQL(builder, false, formattingFlags).append(" AND "); + following.getSQL(builder, true, formattingFlags); + } + if (exclusion != WindowFrameExclusion.EXCLUDE_NO_OTHERS) { + builder.append(' ').append(exclusion.getSQL()); + } + return builder; + } + +} diff --git a/h2/src/main/org/h2/expression/analysis/WindowFrameBound.java b/h2/src/main/org/h2/expression/analysis/WindowFrameBound.java new file mode 100644 index 0000000000..ca520458d3 --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/WindowFrameBound.java @@ -0,0 +1,164 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.analysis; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.table.ColumnResolver; + +/** + * Window frame bound. + */ +public class WindowFrameBound { + + private final WindowFrameBoundType type; + + private Expression value; + + private boolean isVariable; + + private int expressionIndex = -1; + + /** + * Creates new instance of window frame bound. + * + * @param type + * bound type + * @param value + * bound value, if any + */ + public WindowFrameBound(WindowFrameBoundType type, Expression value) { + this.type = type; + if (type == WindowFrameBoundType.PRECEDING || type == WindowFrameBoundType.FOLLOWING) { + this.value = value; + } else { + this.value = null; + } + } + + /** + * Returns the type + * + * @return the type + */ + public WindowFrameBoundType getType() { + return type; + } + + /** + * Returns the value. + * + * @return the value + */ + public Expression getValue() { + return value; + } + + /** + * Returns whether bound is defined as n PRECEDING or n FOLLOWING. + * + * @return whether bound is defined as n PRECEDING or n FOLLOWING + */ + public boolean isParameterized() { + return type == WindowFrameBoundType.PRECEDING || type == WindowFrameBoundType.FOLLOWING; + } + + /** + * Returns whether bound is defined with a variable. This method may be used + * only after {@link #optimize(SessionLocal)} invocation. + * + * @return whether bound is defined with a variable + */ + public boolean isVariable() { + return isVariable; + } + + /** + * Returns the index of preserved expression. + * + * @return the index of preserved expression, or -1 + */ + public int getExpressionIndex() { + return expressionIndex; + } + + /** + * Sets the index of preserved expression. + * + * @param expressionIndex + * the index to set + */ + void setExpressionIndex(int expressionIndex) { + this.expressionIndex = expressionIndex; + } + + /** + * Map the columns of the resolver to expression columns. + * + * @param resolver + * the column resolver + * @param level + * the subquery nesting level + * @param state + * current state for nesting checks + */ + void mapColumns(ColumnResolver resolver, int level, int state) { + if (value != null) { + value.mapColumns(resolver, level, state); + } + } + + /** + * Try to optimize bound expression. + * + * @param session + * the session + */ + void optimize(SessionLocal session) { + if (value != null) { + value = value.optimize(session); + if (!value.isConstant()) { + isVariable = true; + } + } + } + + /** + * Update an aggregate value. + * + * @param session + * the session + * @param stage + * select stage + * @see Expression#updateAggregate(SessionLocal, int) + */ + void updateAggregate(SessionLocal session, int stage) { + if (value != null) { + value.updateAggregate(session, stage); + } + } + + /** + * Appends SQL representation to the specified builder. + * + * @param builder + * string builder + * @param following + * if false return SQL for starting clause, if true return SQL + * for following clause + * @param sqlFlags + * formatting flags + * @return the specified string builder + * @see Expression#getSQL(StringBuilder, int, int) + */ + public StringBuilder getSQL(StringBuilder builder, boolean following, int sqlFlags) { + if (type == WindowFrameBoundType.PRECEDING || type == WindowFrameBoundType.FOLLOWING) { + value.getUnenclosedSQL(builder, sqlFlags).append(' '); + } + return builder.append(type.getSQL()); + } + +} diff --git a/h2/src/main/org/h2/expression/analysis/WindowFrameBoundType.java b/h2/src/main/org/h2/expression/analysis/WindowFrameBoundType.java new file mode 100644 index 0000000000..27b2e3a274 --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/WindowFrameBoundType.java @@ -0,0 +1,54 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.analysis; + +/** + * Window frame bound type. + */ +public enum WindowFrameBoundType { + + /** + * UNBOUNDED PRECEDING clause. + */ + UNBOUNDED_PRECEDING("UNBOUNDED PRECEDING"), + + /** + * PRECEDING clause. + */ + PRECEDING("PRECEDING"), + + /** + * CURRENT_ROW clause. + */ + CURRENT_ROW("CURRENT ROW"), + + /** + * FOLLOWING clause. + */ + FOLLOWING("FOLLOWING"), + + /** + * UNBOUNDED FOLLOWING clause. + */ + UNBOUNDED_FOLLOWING("UNBOUNDED FOLLOWING"); + + private final String sql; + + private WindowFrameBoundType(String sql) { + this.sql = sql; + } + + /** + * Returns SQL representation. + * + * @return SQL representation. + * @see org.h2.expression.Expression#getSQL(int) + */ + public String getSQL() { + return sql; + } + +} diff --git a/h2/src/main/org/h2/expression/analysis/WindowFrameExclusion.java b/h2/src/main/org/h2/expression/analysis/WindowFrameExclusion.java new file mode 100644 index 0000000000..e587732c50 --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/WindowFrameExclusion.java @@ -0,0 +1,62 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.analysis; + +/** + * Window frame exclusion clause. + */ +public enum WindowFrameExclusion { + + /** + * EXCLUDE CURRENT ROW exclusion clause. + */ + EXCLUDE_CURRENT_ROW("EXCLUDE CURRENT ROW"), + + /** + * EXCLUDE GROUP exclusion clause. + */ + EXCLUDE_GROUP("EXCLUDE GROUP"), + + /** + * EXCLUDE TIES exclusion clause. + */ + EXCLUDE_TIES("EXCLUDE TIES"), + + /** + * EXCLUDE NO OTHERS exclusion clause. + */ + EXCLUDE_NO_OTHERS("EXCLUDE NO OTHERS"), + + ; + + private final String sql; + + private WindowFrameExclusion(String sql) { + this.sql = sql; + } + + /** + * Returns true if this exclusion clause excludes or includes the whole + * group. + * + * @return true if this exclusion clause is {@link #EXCLUDE_GROUP} or + * {@link #EXCLUDE_NO_OTHERS} + */ + public boolean isGroupOrNoOthers() { + return this == WindowFrameExclusion.EXCLUDE_GROUP || this == EXCLUDE_NO_OTHERS; + } + + /** + * Returns SQL representation. + * + * @return SQL representation. + * @see org.h2.expression.Expression#getSQL(int) + */ + public String getSQL() { + return sql; + } + +} diff --git a/h2/src/main/org/h2/expression/analysis/WindowFrameUnits.java b/h2/src/main/org/h2/expression/analysis/WindowFrameUnits.java new file mode 100644 index 0000000000..081438ea90 --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/WindowFrameUnits.java @@ -0,0 +1,40 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.analysis; + +/** + * Window frame units. + */ +public enum WindowFrameUnits { + + /** + * ROWS unit. + */ + ROWS, + + /** + * RANGE unit. + */ + RANGE, + + /** + * GROUPS unit. + */ + GROUPS, + + ; + + /** + * Returns SQL representation. + * + * @return SQL representation. + * @see org.h2.expression.Expression#getSQL(int) + */ + public String getSQL() { + return name(); + } + +} diff --git a/h2/src/main/org/h2/expression/analysis/WindowFunction.java b/h2/src/main/org/h2/expression/analysis/WindowFunction.java new file mode 100644 index 0000000000..c3ddc40e63 --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/WindowFunction.java @@ -0,0 +1,544 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.analysis; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; + +import org.h2.command.query.Select; +import org.h2.command.query.SelectGroups; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; + +/** + * A window function. + */ +public class WindowFunction extends DataAnalysisOperation { + + private final WindowFunctionType type; + + private final Expression[] args; + + private boolean fromLast; + + private boolean ignoreNulls; + + /** + * Returns minimal number of arguments for the specified type. + * + * @param type + * the type of a window function + * @return minimal number of arguments + */ + public static int getMinArgumentCount(WindowFunctionType type) { + switch (type) { + case NTILE: + case LEAD: + case LAG: + case FIRST_VALUE: + case LAST_VALUE: + case RATIO_TO_REPORT: + return 1; + case NTH_VALUE: + return 2; + default: + return 0; + } + } + + /** + * Returns maximal number of arguments for the specified type. + * + * @param type + * the type of a window function + * @return maximal number of arguments + */ + public static int getMaxArgumentCount(WindowFunctionType type) { + switch (type) { + case NTILE: + case FIRST_VALUE: + case LAST_VALUE: + case RATIO_TO_REPORT: + return 1; + case LEAD: + case LAG: + return 3; + case NTH_VALUE: + return 2; + default: + return 0; + } + } + + private static Value getNthValue(Iterator iterator, int number, boolean ignoreNulls) { + Value v = ValueNull.INSTANCE; + int cnt = 0; + while (iterator.hasNext()) { + Value t = iterator.next()[0]; + if (!ignoreNulls || t != ValueNull.INSTANCE) { + if (cnt++ == number) { + v = t; + break; + } + } + } + return v; + } + + /** + * Creates new instance of a window function. + * + * @param type + * the type + * @param select + * the select statement + * @param args + * arguments, or null + */ + public WindowFunction(WindowFunctionType type, Select select, Expression[] args) { + super(select); + this.type = type; + this.args = args; + } + + /** + * Returns the type of this function. + * + * @return the type of this function + */ + public WindowFunctionType getFunctionType() { + return type; + } + + /** + * Sets FROM FIRST or FROM LAST clause value. + * + * @param fromLast + * whether FROM LAST clause was specified. + */ + public void setFromLast(boolean fromLast) { + this.fromLast = fromLast; + } + + /** + * Sets RESPECT NULLS or IGNORE NULLS clause value. + * + * @param ignoreNulls + * whether IGNORE NULLS clause was specified + */ + public void setIgnoreNulls(boolean ignoreNulls) { + this.ignoreNulls = ignoreNulls; + } + + @Override + public boolean isAggregate() { + return false; + } + + @Override + protected void updateAggregate(SessionLocal session, SelectGroups groupData, int groupRowId) { + updateOrderedAggregate(session, groupData, groupRowId, over.getOrderBy()); + } + + @Override + protected void updateGroupAggregates(SessionLocal session, int stage) { + super.updateGroupAggregates(session, stage); + if (args != null) { + for (Expression expr : args) { + expr.updateAggregate(session, stage); + } + } + } + + @Override + protected int getNumExpressions() { + return args != null ? args.length : 0; + } + + @Override + protected void rememberExpressions(SessionLocal session, Value[] array) { + if (args != null) { + for (int i = 0, cnt = args.length; i < cnt; i++) { + array[i] = args[i].getValue(session); + } + } + } + + @Override + protected Object createAggregateData() { + throw DbException.getUnsupportedException("Window function"); + } + + @Override + protected void getOrderedResultLoop(SessionLocal session, HashMap result, + ArrayList ordered, int rowIdColumn) { + switch (type) { + case ROW_NUMBER: + for (int i = 0, size = ordered.size(); i < size;) { + result.put(ordered.get(i)[rowIdColumn].getInt(), ValueBigint.get(++i)); + } + break; + case RANK: + case DENSE_RANK: + case PERCENT_RANK: + getRank(result, ordered, rowIdColumn); + break; + case CUME_DIST: + getCumeDist(result, ordered, rowIdColumn); + break; + case NTILE: + getNtile(result, ordered, rowIdColumn); + break; + case LEAD: + case LAG: + getLeadLag(result, ordered, rowIdColumn, session); + break; + case FIRST_VALUE: + case LAST_VALUE: + case NTH_VALUE: + getNth(session, result, ordered, rowIdColumn); + break; + case RATIO_TO_REPORT: + getRatioToReport(result, ordered, rowIdColumn); + break; + default: + throw DbException.getInternalError("type=" + type); + } + } + + private void getRank(HashMap result, ArrayList ordered, int rowIdColumn) { + int size = ordered.size(); + int number = 0; + for (int i = 0; i < size; i++) { + Value[] row = ordered.get(i); + if (i == 0) { + number = 1; + } else if (getOverOrderBySort().compare(ordered.get(i - 1), row) != 0) { + if (type == WindowFunctionType.DENSE_RANK) { + number++; + } else { + number = i + 1; + } + } + Value v; + if (type == WindowFunctionType.PERCENT_RANK) { + int nm = number - 1; + v = nm == 0 ? ValueDouble.ZERO : ValueDouble.get((double) nm / (size - 1)); + } else { + v = ValueBigint.get(number); + } + result.put(row[rowIdColumn].getInt(), v); + } + } + + private void getCumeDist(HashMap result, ArrayList orderedData, int rowIdColumn) { + int size = orderedData.size(); + for (int start = 0; start < size;) { + Value[] array = orderedData.get(start); + int end = start + 1; + while (end < size && overOrderBySort.compare(array, orderedData.get(end)) == 0) { + end++; + } + ValueDouble v = ValueDouble.get((double) end / size); + for (int i = start; i < end; i++) { + int rowId = orderedData.get(i)[rowIdColumn].getInt(); + result.put(rowId, v); + } + start = end; + } + } + + private static void getNtile(HashMap result, ArrayList orderedData, int rowIdColumn) { + int size = orderedData.size(); + for (int i = 0; i < size; i++) { + Value[] array = orderedData.get(i); + long buckets = array[0].getLong(); + if (buckets <= 0) { + throw DbException.getInvalidValueException("number of tiles", buckets); + } + long perTile = size / buckets; + long numLarger = size - perTile * buckets; + long largerGroup = numLarger * (perTile + 1); + long v; + if (i >= largerGroup) { + v = (i - largerGroup) / perTile + numLarger + 1; + } else { + v = i / (perTile + 1) + 1; + } + result.put(orderedData.get(i)[rowIdColumn].getInt(), ValueBigint.get(v)); + } + } + + private void getLeadLag(HashMap result, ArrayList ordered, int rowIdColumn, + SessionLocal session) { + int size = ordered.size(); + int numExpressions = getNumExpressions(); + TypeInfo dataType = args[0].getType(); + for (int i = 0; i < size; i++) { + Value[] row = ordered.get(i); + int rowId = row[rowIdColumn].getInt(); + int n; + if (numExpressions >= 2) { + n = row[1].getInt(); + // 0 is valid here + if (n < 0) { + throw DbException.getInvalidValueException("nth row", n); + } + } else { + n = 1; + } + Value v = null; + if (n == 0) { + v = ordered.get(i)[0]; + } else if (type == WindowFunctionType.LEAD) { + if (ignoreNulls) { + for (int j = i + 1; n > 0 && j < size; j++) { + v = ordered.get(j)[0]; + if (v != ValueNull.INSTANCE) { + n--; + } + } + if (n > 0) { + v = null; + } + } else { + if (n <= size - i - 1) { + v = ordered.get(i + n)[0]; + } + } + } else /* LAG */ { + if (ignoreNulls) { + for (int j = i - 1; n > 0 && j >= 0; j--) { + v = ordered.get(j)[0]; + if (v != ValueNull.INSTANCE) { + n--; + } + } + if (n > 0) { + v = null; + } + } else { + if (n <= i) { + v = ordered.get(i - n)[0]; + } + } + } + if (v == null) { + if (numExpressions >= 3) { + v = row[2].convertTo(dataType, session); + } else { + v = ValueNull.INSTANCE; + } + } + result.put(rowId, v); + } + } + + private void getNth(SessionLocal session, HashMap result, ArrayList ordered, + int rowIdColumn) { + int size = ordered.size(); + for (int i = 0; i < size; i++) { + Value[] row = ordered.get(i); + int rowId = row[rowIdColumn].getInt(); + Value v; + switch (type) { + case FIRST_VALUE: + v = getNthValue(WindowFrame.iterator(over, session, ordered, getOverOrderBySort(), i, false), 0, + ignoreNulls); + break; + case LAST_VALUE: + v = getNthValue(WindowFrame.iterator(over, session, ordered, getOverOrderBySort(), i, true), 0, + ignoreNulls); + break; + case NTH_VALUE: { + int n = row[1].getInt(); + if (n <= 0) { + throw DbException.getInvalidValueException("nth row", n); + } + n--; + Iterator iter = WindowFrame.iterator(over, session, ordered, getOverOrderBySort(), i, + fromLast); + v = getNthValue(iter, n, ignoreNulls); + break; + } + default: + throw DbException.getInternalError("type=" + type); + } + result.put(rowId, v); + } + } + + private static void getRatioToReport(HashMap result, ArrayList ordered, int rowIdColumn) { + int size = ordered.size(); + Value value = null; + for (int i = 0; i < size; i++) { + Value v = ordered.get(i)[0]; + if (v != ValueNull.INSTANCE) { + if (value == null) { + value = v.convertToDouble(); + } else { + value = value.add(v.convertToDouble()); + } + } + } + if (value != null && value.getSignum() == 0) { + value = null; + } + for (int i = 0; i < size; i++) { + Value[] row = ordered.get(i); + Value v; + if (value == null) { + v = ValueNull.INSTANCE; + } else { + v = row[0]; + if (v != ValueNull.INSTANCE) { + v = v.convertToDouble().divide(value, TypeInfo.TYPE_DOUBLE); + } + } + result.put(row[rowIdColumn].getInt(), v); + } + } + + @Override + protected Value getAggregatedValue(SessionLocal session, Object aggregateData) { + throw DbException.getUnsupportedException("Window function"); + } + + @Override + public void mapColumnsAnalysis(ColumnResolver resolver, int level, int innerState) { + if (args != null) { + for (Expression arg : args) { + arg.mapColumns(resolver, level, innerState); + } + } + super.mapColumnsAnalysis(resolver, level, innerState); + } + + @Override + public Expression optimize(SessionLocal session) { + if (over.getWindowFrame() != null) { + switch (type) { + case FIRST_VALUE: + case LAST_VALUE: + case NTH_VALUE: + break; + default: + String sql = getTraceSQL(); + throw DbException.getSyntaxError(sql, sql.length() - 1); + } + } + if (over.getOrderBy() == null) { + if (type.requiresWindowOrdering()) { + String sql = getTraceSQL(); + throw DbException.getSyntaxError(sql, sql.length() - 1, "ORDER BY"); + } + } else if (type == WindowFunctionType.RATIO_TO_REPORT) { + String sql = getTraceSQL(); + throw DbException.getSyntaxError(sql, sql.length() - 1); + } + super.optimize(session); + // Need to re-test, because optimization may remove the window ordering + // clause. + if (over.getOrderBy() == null) { + switch (type) { + case RANK: + case DENSE_RANK: + return ValueExpression.get(ValueBigint.get(1L)); + case PERCENT_RANK: + return ValueExpression.get(ValueDouble.ZERO); + case CUME_DIST: + return ValueExpression.get(ValueDouble.ONE); + default: + } + } + if (args != null) { + for (int i = 0; i < args.length; i++) { + args[i] = args[i].optimize(session); + } + } + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + if (args != null) { + for (Expression e : args) { + e.setEvaluatable(tableFilter, b); + } + } + super.setEvaluatable(tableFilter, b); + } + + @Override + public TypeInfo getType() { + switch (type) { + case ROW_NUMBER: + case RANK: + case DENSE_RANK: + case NTILE: + return TypeInfo.TYPE_BIGINT; + case PERCENT_RANK: + case CUME_DIST: + case RATIO_TO_REPORT: + return TypeInfo.TYPE_DOUBLE; + case LEAD: + case LAG: + case FIRST_VALUE: + case LAST_VALUE: + case NTH_VALUE: + return args[0].getType(); + default: + throw DbException.getInternalError("type=" + type); + } + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(type.getSQL()).append('('); + if (args != null) { + writeExpressions(builder, args, sqlFlags); + } + builder.append(')'); + if (fromLast && type == WindowFunctionType.NTH_VALUE) { + builder.append(" FROM LAST"); + } + if (ignoreNulls) { + switch (type) { + case LEAD: + case LAG: + case FIRST_VALUE: + case LAST_VALUE: + case NTH_VALUE: + builder.append(" IGNORE NULLS"); + //$FALL-THROUGH$ + default: + } + } + return appendTailConditions(builder, sqlFlags, type.requiresWindowOrdering()); + } + + @Override + public int getCost() { + int cost = 1; + if (args != null) { + for (Expression expr : args) { + cost += expr.getCost(); + } + } + return cost; + } + +} diff --git a/h2/src/main/org/h2/expression/analysis/WindowFunctionType.java b/h2/src/main/org/h2/expression/analysis/WindowFunctionType.java new file mode 100644 index 0000000000..cc468157b5 --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/WindowFunctionType.java @@ -0,0 +1,142 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.analysis; + +/** + * A type of a window function. + */ +public enum WindowFunctionType { + + /** + * The type for ROW_NUMBER() window function. + */ + ROW_NUMBER, + + /** + * The type for RANK() window function. + */ + RANK, + + /** + * The type for DENSE_RANK() window function. + */ + DENSE_RANK, + + /** + * The type for PERCENT_RANK() window function. + */ + PERCENT_RANK, + + /** + * The type for CUME_DIST() window function. + */ + CUME_DIST, + + /** + * The type for NTILE() window function. + */ + NTILE, + + /** + * The type for LEAD() window function. + */ + LEAD, + + /** + * The type for LAG() window function. + */ + LAG, + + /** + * The type for FIRST_VALUE() window function. + */ + FIRST_VALUE, + + /** + * The type for LAST_VALUE() window function. + */ + LAST_VALUE, + + /** + * The type for NTH_VALUE() window function. + */ + NTH_VALUE, + + /** + * The type for RATIO_TO_REPORT() window function. + */ + RATIO_TO_REPORT, + + ; + + /** + * Returns the type of window function with the specified name, or null. + * + * @param name + * name of a window function + * @return the type of window function, or null. + */ + public static WindowFunctionType get(String name) { + switch (name) { + case "ROW_NUMBER": + return ROW_NUMBER; + case "RANK": + return RANK; + case "DENSE_RANK": + return DENSE_RANK; + case "PERCENT_RANK": + return PERCENT_RANK; + case "CUME_DIST": + return CUME_DIST; + case "NTILE": + return NTILE; + case "LEAD": + return LEAD; + case "LAG": + return LAG; + case "FIRST_VALUE": + return FIRST_VALUE; + case "LAST_VALUE": + return LAST_VALUE; + case "NTH_VALUE": + return NTH_VALUE; + case "RATIO_TO_REPORT": + return RATIO_TO_REPORT; + default: + return null; + } + } + + /** + * Returns SQL representation. + * + * @return SQL representation. + * @see org.h2.expression.Expression#getSQL(int) + */ + public String getSQL() { + return name(); + } + + /** + * Returns whether window function of this type requires window ordering + * clause. + * + * @return {@code true} if it does, {@code false} if it may be omitted + */ + public boolean requiresWindowOrdering() { + switch (this) { + case RANK: + case DENSE_RANK: + case NTILE: + case LEAD: + case LAG: + return true; + default: + return false; + } + } + +} \ No newline at end of file diff --git a/h2/src/main/org/h2/expression/analysis/package.html b/h2/src/main/org/h2/expression/analysis/package.html new file mode 100644 index 0000000000..5cc4ba03b0 --- /dev/null +++ b/h2/src/main/org/h2/expression/analysis/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Base classes for data analysis operations and implementations of window functions. + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/expression/condition/BetweenPredicate.java b/h2/src/main/org/h2/expression/condition/BetweenPredicate.java new file mode 100644 index 0000000000..b5b7b11f4d --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/BetweenPredicate.java @@ -0,0 +1,207 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * BETWEEN predicate. + */ +public final class BetweenPredicate extends Condition { + + private Expression left; + + private final boolean not; + + private final boolean whenOperand; + + private boolean symmetric; + + private Expression a, b; + + public BetweenPredicate(Expression left, boolean not, boolean whenOperand, boolean symmetric, Expression a, + Expression b) { + this.left = left; + this.not = not; + this.whenOperand = whenOperand; + this.symmetric = symmetric; + this.a = a; + this.b = b; + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append(" NOT"); + } + builder.append(" BETWEEN "); + if (symmetric) { + builder.append("SYMMETRIC "); + } + a.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(" AND "); + return b.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + a = a.optimize(session); + b = b.optimize(session); + TypeInfo leftType = left.getType(); + TypeInfo.checkComparable(leftType, a.getType()); + TypeInfo.checkComparable(leftType, b.getType()); + if (whenOperand) { + return this; + } + Value value = left.isConstant() ? left.getValue(session) : null, + aValue = a.isConstant() ? a.getValue(session) : null, + bValue = b.isConstant() ? b.getValue(session) : null; + if (value != null) { + if (value == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + if (aValue != null && bValue != null) { + return ValueExpression.getBoolean(getValue(session, value, aValue, bValue)); + } + } + if (symmetric) { + if (aValue == ValueNull.INSTANCE || bValue == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + } else if (aValue == ValueNull.INSTANCE && bValue == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + if (aValue != null && bValue != null && session.compareWithNull(aValue, bValue, false) == 0) { + return new Comparison(not ? Comparison.NOT_EQUAL : Comparison.EQUAL, left, a, false).optimize(session); + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + Value value = left.getValue(session); + if (value == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return getValue(session, value, a.getValue(session), b.getValue(session)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + if (left == ValueNull.INSTANCE) { + return false; + } + return getValue(session, left, a.getValue(session), b.getValue(session)).isTrue(); + } + + private Value getValue(SessionLocal session, Value value, Value aValue, Value bValue) { + int cmp1 = session.compareWithNull(aValue, value, false); + int cmp2 = session.compareWithNull(value, bValue, false); + if (cmp1 == Integer.MIN_VALUE) { + return symmetric || cmp2 <= 0 ? ValueNull.INSTANCE : ValueBoolean.get(not); + } else if (cmp2 == Integer.MIN_VALUE) { + return symmetric || cmp1 <= 0 ? ValueNull.INSTANCE : ValueBoolean.get(not); + } else { + return ValueBoolean.get(not ^ // + (symmetric ? cmp1 <= 0 && cmp2 <= 0 || cmp1 >= 0 && cmp2 >= 0 : cmp1 <= 0 && cmp2 <= 0)); + } + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new BetweenPredicate(left, !not, false, symmetric, a, b); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (!not && !whenOperand && !symmetric) { + Comparison.createIndexConditions(filter, a, left, Comparison.SMALLER_EQUAL); + Comparison.createIndexConditions(filter, left, b, Comparison.SMALLER_EQUAL); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + left.setEvaluatable(tableFilter, value); + a.setEvaluatable(tableFilter, value); + b.setEvaluatable(tableFilter, value); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + a.updateAggregate(session, stage); + b.updateAggregate(session, stage); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + a.mapColumns(resolver, level, state); + b.mapColumns(resolver, level, state); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && a.isEverything(visitor) && b.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost() + a.getCost() + b.getCost() + 1; + } + + @Override + public int getSubexpressionCount() { + return 3; + } + + @Override + public Expression getSubexpression(int index) { + switch (index) { + case 0: + return left; + case 1: + return a; + case 2: + return b; + default: + throw new IndexOutOfBoundsException(); + } + } + +} diff --git a/h2/src/main/org/h2/expression/condition/BooleanTest.java b/h2/src/main/org/h2/expression/condition/BooleanTest.java new file mode 100644 index 0000000000..47a07743f0 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/BooleanTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.ArrayList; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.index.IndexCondition; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * Boolean test (IS [NOT] { TRUE | FALSE | UNKNOWN }). + */ +public final class BooleanTest extends SimplePredicate { + + private final Boolean right; + + public BooleanTest(Expression left, boolean not, boolean whenOperand, Boolean right) { + super(left, not, whenOperand); + this.right = right; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + return builder.append(not ? " IS NOT " : " IS ").append(right == null ? "UNKNOWN" : right ? "TRUE" : "FALSE"); + } + + @Override + public Value getValue(SessionLocal session) { + return ValueBoolean.get(getValue(left.getValue(session))); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + return getValue(left); + } + + private boolean getValue(Value left) { + return (left == ValueNull.INSTANCE ? right == null : right != null && right == left.getBoolean()) ^ not; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new BooleanTest(left, !not, false, right); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (whenOperand || !filter.getTable().isQueryComparable()) { + return; + } + if (left instanceof ExpressionColumn) { + ExpressionColumn c = (ExpressionColumn) left; + if (c.getType().getValueType() == Value.BOOLEAN && filter == c.getTableFilter()) { + if (not) { + if (right == null && c.getColumn().isNullable()) { + ArrayList list = new ArrayList<>(2); + list.add(ValueExpression.FALSE); + list.add(ValueExpression.TRUE); + filter.addIndexCondition(IndexCondition.getInList(c, list)); + } + } else { + filter.addIndexCondition(IndexCondition.get(Comparison.EQUAL_NULL_SAFE, c, + right == null ? TypedValueExpression.UNKNOWN : ValueExpression.getBoolean(right))); + } + } + } + } + +} diff --git a/h2/src/main/org/h2/expression/condition/CompareLike.java b/h2/src/main/org/h2/expression/condition/CompareLike.java new file mode 100644 index 0000000000..e62dbaaa24 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/CompareLike.java @@ -0,0 +1,634 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; +import org.h2.api.ErrorCode; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.SearchedCase; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.index.IndexCondition; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.CompareMode; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; +import org.h2.value.ValueVarcharIgnoreCase; + +/** + * Pattern matching comparison expression: WHERE NAME LIKE ? + */ +public final class CompareLike extends Condition { + + /** + * The type of comparison. + */ + public enum LikeType { + /** + * LIKE. + */ + LIKE, + + /** + * ILIKE (case-insensitive LIKE). + */ + ILIKE, + + /** + * REGEXP + */ + REGEXP + } + + private static final int MATCH = 0, ONE = 1, ANY = 2; + + private final CompareMode compareMode; + private final String defaultEscape; + + private final LikeType likeType; + private Expression left; + + private final boolean not; + + private final boolean whenOperand; + + private Expression right; + private Expression escape; + + private boolean isInit; + + private char[] patternChars; + private String patternString; + /** one of MATCH / ONE / ANY */ + private int[] patternTypes; + private int patternLength; + + private Pattern patternRegexp; + + private boolean ignoreCase; + private boolean fastCompare; + private boolean invalidPattern; + /** indicates that we can shortcut the comparison and use startsWith */ + private boolean shortcutToStartsWith; + /** indicates that we can shortcut the comparison and use endsWith */ + private boolean shortcutToEndsWith; + /** indicates that we can shortcut the comparison and use contains */ + private boolean shortcutToContains; + + public CompareLike(Database db, Expression left, boolean not, boolean whenOperand, Expression right, + Expression escape, LikeType likeType) { + this(db.getCompareMode(), db.getSettings().defaultEscape, left, not, whenOperand, right, escape, likeType); + } + + public CompareLike(CompareMode compareMode, String defaultEscape, Expression left, boolean not, + boolean whenOperand, Expression right, Expression escape, LikeType likeType) { + this.compareMode = compareMode; + this.defaultEscape = defaultEscape; + this.likeType = likeType; + this.left = left; + this.not = not; + this.whenOperand = whenOperand; + this.right = right; + this.escape = escape; + } + + private static Character getEscapeChar(String s) { + return s == null || s.isEmpty() ? null : s.charAt(0); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append(" NOT"); + } + switch (likeType) { + case LIKE: + case ILIKE: + builder.append(likeType == LikeType.LIKE ? " LIKE " : " ILIKE "); + right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + if (escape != null) { + escape.getSQL(builder.append(" ESCAPE "), sqlFlags, AUTO_PARENTHESES); + } + break; + case REGEXP: + builder.append(" REGEXP "); + right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + break; + default: + throw DbException.getUnsupportedException(likeType.name()); + } + return builder; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + if (likeType == LikeType.ILIKE || left.getType().getValueType() == Value.VARCHAR_IGNORECASE) { + ignoreCase = true; + } + if (escape != null) { + escape = escape.optimize(session); + } + if (whenOperand) { + return this; + } + if (left.isValueSet()) { + Value l = left.getValue(session); + if (l == ValueNull.INSTANCE) { + // NULL LIKE something > NULL + return TypedValueExpression.UNKNOWN; + } + } + if (right.isValueSet() && (escape == null || escape.isValueSet())) { + if (left.isValueSet()) { + return ValueExpression.getBoolean(getValue(session)); + } + Value r = right.getValue(session); + if (r == ValueNull.INSTANCE) { + // something LIKE NULL > NULL + return TypedValueExpression.UNKNOWN; + } + Value e = escape == null ? null : escape.getValue(session); + if (e == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + String p = r.getString(); + initPattern(p, getEscapeChar(e)); + if (invalidPattern) { + return TypedValueExpression.UNKNOWN; + } + if (likeType != LikeType.REGEXP && "%".equals(p)) { + // optimization for X LIKE '%' + return new SearchedCase(new Expression[] { new NullPredicate(left, true, false), + ValueExpression.getBoolean(!not), TypedValueExpression.UNKNOWN }).optimize(session); + } + if (isFullMatch()) { + // optimization for X LIKE 'Hello': convert to X = 'Hello' + Value value = ignoreCase ? ValueVarcharIgnoreCase.get(patternString) : ValueVarchar.get(patternString); + Expression expr = ValueExpression.get(value); + return new Comparison(not ? Comparison.NOT_EQUAL : Comparison.EQUAL, left, expr, false) + .optimize(session); + } + isInit = true; + } + return this; + } + + private Character getEscapeChar(Value e) { + if (e == null) { + return getEscapeChar(defaultEscape); + } + String es = e.getString(); + Character esc; + if (es == null) { + esc = getEscapeChar(defaultEscape); + } else if (es.length() == 0) { + esc = null; + } else if (es.length() > 1) { + throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, es); + } else { + esc = es.charAt(0); + } + return esc; + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (not || whenOperand || likeType == LikeType.REGEXP || !(left instanceof ExpressionColumn)) { + return; + } + ExpressionColumn l = (ExpressionColumn) left; + if (filter != l.getTableFilter() || !TypeInfo.haveSameOrdering(l.getType(), + ignoreCase ? TypeInfo.TYPE_VARCHAR_IGNORECASE : TypeInfo.TYPE_VARCHAR)) { + return; + } + // parameters are always evaluatable, but + // we need to check if the value is set + // (at prepare time) + // otherwise we would need to prepare at execute time, + // which may be slower (possibly not in this case) + if (!right.isEverything(ExpressionVisitor.INDEPENDENT_VISITOR)) { + return; + } + if (escape != null && !escape.isEverything(ExpressionVisitor.INDEPENDENT_VISITOR)) { + return; + } + String p = right.getValue(session).getString(); + if (!isInit) { + Value e = escape == null ? null : escape.getValue(session); + if (e == ValueNull.INSTANCE) { + // should already be optimized + throw DbException.getInternalError(); + } + initPattern(p, getEscapeChar(e)); + } + if (invalidPattern) { + return; + } + if (patternLength <= 0 || patternTypes[0] != MATCH) { + // can't use an index + return; + } + if (!DataType.isStringType(l.getColumn().getType().getValueType())) { + // column is not a varchar - can't use the index + return; + } + // Get the MATCH prefix and see if we can create an index condition from + // that. + int maxMatch = 0; + StringBuilder buff = new StringBuilder(); + while (maxMatch < patternLength && patternTypes[maxMatch] == MATCH) { + buff.append(patternChars[maxMatch++]); + } + String begin = buff.toString(); + if (maxMatch == patternLength) { + filter.addIndexCondition(IndexCondition.get(Comparison.EQUAL, l, + ValueExpression.get(ValueVarchar.get(begin)))); + } else { + // TODO check if this is correct according to Unicode rules + // (code points) + String end; + if (begin.length() > 0) { + filter.addIndexCondition(IndexCondition.get( + Comparison.BIGGER_EQUAL, l, + ValueExpression.get(ValueVarchar.get(begin)))); + char next = begin.charAt(begin.length() - 1); + // search the 'next' unicode character (or at least a character + // that is higher) + for (int i = 1; i < 2000; i++) { + end = begin.substring(0, begin.length() - 1) + (char) (next + i); + if (compareMode.compareString(begin, end, ignoreCase) < 0) { + filter.addIndexCondition(IndexCondition.get( + Comparison.SMALLER, l, + ValueExpression.get(ValueVarchar.get(end)))); + break; + } + } + } + } + } + + @Override + public Value getValue(SessionLocal session) { + return getValue(session, left.getValue(session)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + return getValue(session, left).isTrue(); + } + + private Value getValue(SessionLocal session, Value left) { + if (left == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + if (!isInit) { + Value r = right.getValue(session); + if (r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String p = r.getString(); + Value e = escape == null ? null : escape.getValue(session); + if (e == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + initPattern(p, getEscapeChar(e)); + } + if (invalidPattern) { + return ValueNull.INSTANCE; + } + String value = left.getString(); + boolean result; + if (likeType == LikeType.REGEXP) { + result = patternRegexp.matcher(value).find(); + } else if (shortcutToStartsWith) { + result = value.regionMatches(ignoreCase, 0, patternString, 0, patternLength - 1); + } else if (shortcutToEndsWith) { + result = value.regionMatches(ignoreCase, value.length() - + patternLength + 1, patternString, 1, patternLength - 1); + } else if (shortcutToContains) { + String p = patternString.substring(1, patternString.length() - 1); + if (ignoreCase) { + result = containsIgnoreCase(value, p); + } else { + result = value.contains(p); + } + } else { + result = compareAt(value, 0, 0, value.length(), patternChars, patternTypes); + } + return ValueBoolean.get(not ^ result); + } + + private static boolean containsIgnoreCase(String src, String what) { + final int length = what.length(); + if (length == 0) { + // Empty string is contained + return true; + } + + final char firstLo = Character.toLowerCase(what.charAt(0)); + final char firstUp = Character.toUpperCase(what.charAt(0)); + + for (int i = src.length() - length; i >= 0; i--) { + // Quick check before calling the more expensive regionMatches() + final char ch = src.charAt(i); + if (ch != firstLo && ch != firstUp) { + continue; + } + if (src.regionMatches(true, i, what, 0, length)) { + return true; + } + } + + return false; + } + + private boolean compareAt(String s, int pi, int si, int sLen, + char[] pattern, int[] types) { + for (; pi < patternLength; pi++) { + switch (types[pi]) { + case MATCH: + if ((si >= sLen) || !compare(pattern, s, pi, si++)) { + return false; + } + break; + case ONE: + if (si++ >= sLen) { + return false; + } + break; + case ANY: + if (++pi >= patternLength) { + return true; + } + while (si < sLen) { + if (compare(pattern, s, pi, si) && + compareAt(s, pi, si, sLen, pattern, types)) { + return true; + } + si++; + } + return false; + default: + throw DbException.getInternalError(Integer.toString(types[pi])); + } + } + return si == sLen; + } + + private boolean compare(char[] pattern, String s, int pi, int si) { + return pattern[pi] == s.charAt(si) || + (!fastCompare && compareMode.equalsChars(patternString, pi, s, + si, ignoreCase)); + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + /** + * Test if the value matches the pattern. + * + * @param testPattern the pattern + * @param value the value + * @param escapeChar the escape character + * @return true if the value matches + */ + public boolean test(String testPattern, String value, char escapeChar) { + initPattern(testPattern, escapeChar); + return test(value); + } + + /** + * Test if the value matches the initialized pattern. + * + * @param value the value + * @return true if the value matches + */ + public boolean test(String value) { + if (invalidPattern) { + return false; + } + return compareAt(value, 0, 0, value.length(), patternChars, patternTypes); + } + + /** + * Initializes the pattern. + * + * @param p the pattern + * @param escapeChar the escape character + */ + public void initPattern(String p, Character escapeChar) { + if (compareMode.getName().equals(CompareMode.OFF) && !ignoreCase) { + fastCompare = true; + } + if (likeType == LikeType.REGEXP) { + patternString = p; + try { + if (ignoreCase) { + patternRegexp = Pattern.compile(p, Pattern.CASE_INSENSITIVE); + } else { + patternRegexp = Pattern.compile(p); + } + } catch (PatternSyntaxException e) { + throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, p); + } + return; + } + patternLength = 0; + if (p == null) { + patternTypes = null; + patternChars = null; + return; + } + int len = p.length(); + patternChars = new char[len]; + patternTypes = new int[len]; + boolean lastAny = false; + for (int i = 0; i < len; i++) { + char c = p.charAt(i); + int type; + if (escapeChar != null && escapeChar == c) { + if (i >= len - 1) { + invalidPattern = true; + return; + } + c = p.charAt(++i); + type = MATCH; + lastAny = false; + } else if (c == '%') { + if (lastAny) { + continue; + } + type = ANY; + lastAny = true; + } else if (c == '_') { + type = ONE; + } else { + type = MATCH; + lastAny = false; + } + patternTypes[patternLength] = type; + patternChars[patternLength++] = c; + } + for (int i = 0; i < patternLength - 1; i++) { + if ((patternTypes[i] == ANY) && (patternTypes[i + 1] == ONE)) { + patternTypes[i] = ONE; + patternTypes[i + 1] = ANY; + } + } + patternString = new String(patternChars, 0, patternLength); + + // Clear optimizations + shortcutToStartsWith = false; + shortcutToEndsWith = false; + shortcutToContains = false; + + // optimizes the common case of LIKE 'foo%' + if (compareMode.getName().equals(CompareMode.OFF) && patternLength > 1) { + int maxMatch = 0; + while (maxMatch < patternLength && patternTypes[maxMatch] == MATCH) { + maxMatch++; + } + if (maxMatch == patternLength - 1 && patternTypes[patternLength - 1] == ANY) { + shortcutToStartsWith = true; + return; + } + } + // optimizes the common case of LIKE '%foo' + if (compareMode.getName().equals(CompareMode.OFF) && patternLength > 1) { + if (patternTypes[0] == ANY) { + int maxMatch = 1; + while (maxMatch < patternLength && patternTypes[maxMatch] == MATCH) { + maxMatch++; + } + if (maxMatch == patternLength) { + shortcutToEndsWith = true; + return; + } + } + } + // optimizes the common case of LIKE '%foo%' + if (compareMode.getName().equals(CompareMode.OFF) && patternLength > 2) { + if (patternTypes[0] == ANY) { + int maxMatch = 1; + while (maxMatch < patternLength && patternTypes[maxMatch] == MATCH) { + maxMatch++; + } + if (maxMatch == patternLength - 1 && patternTypes[patternLength - 1] == ANY) { + shortcutToContains = true; + } + } + } + } + + private boolean isFullMatch() { + if (patternTypes == null) { + return false; + } + for (int type : patternTypes) { + if (type != MATCH) { + return false; + } + } + return true; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new CompareLike(compareMode, defaultEscape, left, !not, false, right, escape, likeType); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + right.mapColumns(resolver, level, state); + if (escape != null) { + escape.mapColumns(resolver, level, state); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + right.setEvaluatable(tableFilter, b); + if (escape != null) { + escape.setEvaluatable(tableFilter, b); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + right.updateAggregate(session, stage); + if (escape != null) { + escape.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && right.isEverything(visitor) + && (escape == null || escape.isEverything(visitor)); + } + + @Override + public int getCost() { + return left.getCost() + right.getCost() + 3; + } + + @Override + public int getSubexpressionCount() { + return escape == null ? 2 : 3; + } + + @Override + public Expression getSubexpression(int index) { + switch (index) { + case 0: + return left; + case 1: + return right; + case 2: + if (escape != null) { + return escape; + } + //$FALL-THROUGH$ + default: + throw new IndexOutOfBoundsException(); + } + } + +} diff --git a/h2/src/main/org/h2/expression/condition/Comparison.java b/h2/src/main/org/h2/expression/condition/Comparison.java new file mode 100644 index 0000000000..666f4063d7 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/Comparison.java @@ -0,0 +1,599 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.ArrayList; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.expression.aggregate.Aggregate; +import org.h2.expression.aggregate.AggregateType; +import org.h2.index.IndexCondition; +import org.h2.message.DbException; +import org.h2.table.Column; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * Example comparison expressions are ID=1, NAME=NAME, NAME IS NULL. + * + * @author Thomas Mueller + * @author Noel Grandin + * @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 + */ +public final class Comparison extends Condition { + + /** + * The comparison type meaning = as in ID=1. + */ + public static final int EQUAL = 0; + + /** + * The comparison type meaning <> as in ID<>1. + */ + public static final int NOT_EQUAL = 1; + + /** + * The comparison type meaning < as in ID<1. + */ + public static final int SMALLER = 2; + + /** + * The comparison type meaning > as in ID>1. + */ + public static final int BIGGER = 3; + + /** + * The comparison type meaning <= as in ID<=1. + */ + public static final int SMALLER_EQUAL = 4; + + /** + * The comparison type meaning >= as in ID>=1. + */ + public static final int BIGGER_EQUAL = 5; + + /** + * The comparison type meaning ID IS NOT DISTINCT FROM 1. + */ + public static final int EQUAL_NULL_SAFE = 6; + + /** + * The comparison type meaning ID IS DISTINCT FROM 1. + */ + public static final int NOT_EQUAL_NULL_SAFE = 7; + + /** + * This is a comparison type that is only used for spatial index + * conditions (operator "&&"). + */ + public static final int SPATIAL_INTERSECTS = 8; + + static final String[] COMPARE_TYPES = { "=", "<>", "<", ">", "<=", ">=", // + "IS NOT DISTINCT FROM", "IS DISTINCT FROM", // + "&&" }; + + /** + * This is a pseudo comparison type that is only used for index conditions. + * It means the comparison will always yield FALSE. Example: 1=0. + */ + public static final int FALSE = 9; + + /** + * This is a pseudo comparison type that is only used for index conditions. + * It means equals any value of a list. Example: IN(1, 2, 3). + */ + public static final int IN_LIST = 10; + + /** + * This is a pseudo comparison type that is only used for index conditions. + * It means equals any value of a list. Example: IN(SELECT ...). + */ + public static final int IN_QUERY = 11; + + private int compareType; + private Expression left; + private Expression right; + private final boolean whenOperand; + + public Comparison(int compareType, Expression left, Expression right, boolean whenOperand) { + this.left = left; + this.right = right; + this.compareType = compareType; + this.whenOperand = whenOperand; + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + builder.append(' ').append(COMPARE_TYPES[compareType]).append(' '); + return right.getSQL(builder, sqlFlags, + right instanceof Aggregate && ((Aggregate) right).getAggregateType() == AggregateType.ANY + ? WITH_PARENTHESES + : AUTO_PARENTHESES); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + check: { + TypeInfo leftType = left.getType(), rightType = right.getType(); + if (session.getMode().numericWithBooleanComparison) { + switch (compareType) { + case EQUAL: + case NOT_EQUAL: + case EQUAL_NULL_SAFE: + case NOT_EQUAL_NULL_SAFE: + int lValueType = leftType.getValueType(); + if (lValueType == Value.BOOLEAN) { + if (DataType.isNumericType(rightType.getValueType())) { + break check; + } + } else if (DataType.isNumericType(lValueType) && rightType.getValueType() == Value.BOOLEAN) { + break check; + } + } + } + TypeInfo.checkComparable(leftType, rightType); + } + if (whenOperand) { + return this; + } + if (right instanceof ExpressionColumn) { + if (left.isConstant() || left instanceof Parameter) { + Expression temp = left; + left = right; + right = temp; + compareType = getReversedCompareType(compareType); + } + } + if (left instanceof ExpressionColumn) { + if (right.isConstant()) { + Value r = right.getValue(session); + if (r == ValueNull.INSTANCE) { + if ((compareType & ~1) != EQUAL_NULL_SAFE) { + return TypedValueExpression.UNKNOWN; + } + } + TypeInfo colType = left.getType(), constType = r.getType(); + int constValueType = constType.getValueType(); + if (constValueType != colType.getValueType() || constValueType >= Value.ARRAY) { + TypeInfo resType = TypeInfo.getHigherType(colType, constType); + // If not, the column values will need to be promoted + // to constant type, but vise versa, then let's do this here + // once. + if (constValueType != resType.getValueType() || constValueType >= Value.ARRAY) { + Column column = ((ExpressionColumn) left).getColumn(); + right = ValueExpression.get(r.convertTo(resType, session, column)); + } + } + } else if (right instanceof Parameter) { + ((Parameter) right).setColumn(((ExpressionColumn) left).getColumn()); + } + } + if (left.isConstant() && right.isConstant()) { + return ValueExpression.getBoolean(getValue(session)); + } + if (left.isNullConstant() || right.isNullConstant()) { + // TODO NULL handling: maybe issue a warning when comparing with + // a NULL constants + if ((compareType & ~1) != EQUAL_NULL_SAFE) { + return TypedValueExpression.UNKNOWN; + } else { + Expression e = left.isNullConstant() ? right : left; + int type = e.getType().getValueType(); + if (type != Value.UNKNOWN && type != Value.ROW) { + return new NullPredicate(e, compareType == NOT_EQUAL_NULL_SAFE, false); + } + } + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + // Optimization: do not evaluate right if not necessary + if (l == ValueNull.INSTANCE && (compareType & ~1) != EQUAL_NULL_SAFE) { + return ValueNull.INSTANCE; + } + return compare(session, l, right.getValue(session), compareType); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + // Optimization: do not evaluate right if not necessary + if (left == ValueNull.INSTANCE && (compareType & ~1) != EQUAL_NULL_SAFE) { + return false; + } + return compare(session, left, right.getValue(session), compareType).isTrue(); + } + + /** + * Compare two values. + * + * @param session the session + * @param l the first value + * @param r the second value + * @param compareType the compare type + * @return result of comparison, either TRUE, FALSE, or NULL + */ + static Value compare(SessionLocal session, Value l, Value r, int compareType) { + Value result; + switch (compareType) { + case EQUAL: { + int cmp = session.compareWithNull(l, r, true); + if (cmp == 0) { + result = ValueBoolean.TRUE; + } else if (cmp == Integer.MIN_VALUE) { + result = ValueNull.INSTANCE; + } else { + result = ValueBoolean.FALSE; + } + break; + } + case EQUAL_NULL_SAFE: + result = ValueBoolean.get(session.areEqual(l, r)); + break; + case NOT_EQUAL: { + int cmp = session.compareWithNull(l, r, true); + if (cmp == 0) { + result = ValueBoolean.FALSE; + } else if (cmp == Integer.MIN_VALUE) { + result = ValueNull.INSTANCE; + } else { + result = ValueBoolean.TRUE; + } + break; + } + case NOT_EQUAL_NULL_SAFE: + result = ValueBoolean.get(!session.areEqual(l, r)); + break; + case BIGGER_EQUAL: { + int cmp = session.compareWithNull(l, r, false); + if (cmp >= 0) { + result = ValueBoolean.TRUE; + } else if (cmp == Integer.MIN_VALUE) { + result = ValueNull.INSTANCE; + } else { + result = ValueBoolean.FALSE; + } + break; + } + case BIGGER: { + int cmp = session.compareWithNull(l, r, false); + if (cmp > 0) { + result = ValueBoolean.TRUE; + } else if (cmp == Integer.MIN_VALUE) { + result = ValueNull.INSTANCE; + } else { + result = ValueBoolean.FALSE; + } + break; + } + case SMALLER_EQUAL: { + int cmp = session.compareWithNull(l, r, false); + if (cmp == Integer.MIN_VALUE) { + result = ValueNull.INSTANCE; + } else { + result = ValueBoolean.get(cmp <= 0); + } + break; + } + case SMALLER: { + int cmp = session.compareWithNull(l, r, false); + if (cmp == Integer.MIN_VALUE) { + result = ValueNull.INSTANCE; + } else { + result = ValueBoolean.get(cmp < 0); + } + break; + } + case SPATIAL_INTERSECTS: { + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + result = ValueNull.INSTANCE; + } else { + result = ValueBoolean.get(l.convertToGeometry(null).intersectsBoundingBox(r.convertToGeometry(null))); + } + break; + } + default: + throw DbException.getInternalError("type=" + compareType); + } + return result; + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + private static int getReversedCompareType(int type) { + switch (type) { + case EQUAL: + case EQUAL_NULL_SAFE: + case NOT_EQUAL: + case NOT_EQUAL_NULL_SAFE: + case SPATIAL_INTERSECTS: + return type; + case BIGGER_EQUAL: + return SMALLER_EQUAL; + case BIGGER: + return SMALLER; + case SMALLER_EQUAL: + return BIGGER_EQUAL; + case SMALLER: + return BIGGER; + default: + throw DbException.getInternalError("type=" + type); + } + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (compareType == SPATIAL_INTERSECTS || whenOperand) { + return null; + } + int type = getNotCompareType(); + return new Comparison(type, left, right, false); + } + + private int getNotCompareType() { + switch (compareType) { + case EQUAL: + return NOT_EQUAL; + case EQUAL_NULL_SAFE: + return NOT_EQUAL_NULL_SAFE; + case NOT_EQUAL: + return EQUAL; + case NOT_EQUAL_NULL_SAFE: + return EQUAL_NULL_SAFE; + case BIGGER_EQUAL: + return SMALLER; + case BIGGER: + return SMALLER_EQUAL; + case SMALLER_EQUAL: + return BIGGER; + case SMALLER: + return BIGGER_EQUAL; + default: + throw DbException.getInternalError("type=" + compareType); + } + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (!whenOperand) { + createIndexConditions(filter, left, right, compareType); + } + } + + static void createIndexConditions(TableFilter filter, Expression left, Expression right, int compareType) { + if (!filter.getTable().isQueryComparable()) { + return; + } + ExpressionColumn l = null; + if (left instanceof ExpressionColumn) { + l = (ExpressionColumn) left; + if (filter != l.getTableFilter()) { + l = null; + } + } + ExpressionColumn r = null; + if (right instanceof ExpressionColumn) { + r = (ExpressionColumn) right; + if (filter != r.getTableFilter()) { + r = null; + } + } + // one side must be from the current filter + if ((l == null) == (r == null)) { + return; + } + if (l == null) { + if (!left.isEverything(ExpressionVisitor.getNotFromResolverVisitor(filter))) { + return; + } + } else { // r == null + if (!right.isEverything(ExpressionVisitor.getNotFromResolverVisitor(filter))) { + return; + } + } + switch (compareType) { + case NOT_EQUAL: + case NOT_EQUAL_NULL_SAFE: + break; + case EQUAL: + case EQUAL_NULL_SAFE: + case BIGGER: + case BIGGER_EQUAL: + case SMALLER_EQUAL: + case SMALLER: + case SPATIAL_INTERSECTS: + if (l != null) { + TypeInfo colType = l.getType(); + if (TypeInfo.haveSameOrdering(colType, TypeInfo.getHigherType(colType, right.getType()))) { + filter.addIndexCondition(IndexCondition.get(compareType, l, right)); + } + } else { + @SuppressWarnings("null") + TypeInfo colType = r.getType(); + if (TypeInfo.haveSameOrdering(colType, TypeInfo.getHigherType(colType, left.getType()))) { + filter.addIndexCondition(IndexCondition.get(getReversedCompareType(compareType), r, left)); + } + } + break; + default: + throw DbException.getInternalError("type=" + compareType); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + if (right != null) { + right.setEvaluatable(tableFilter, b); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + if (right != null) { + right.updateAggregate(session, stage); + } + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + right.mapColumns(resolver, level, state); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && right.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost() + right.getCost() + 1; + } + + /** + * Get the other expression if this is an equals comparison and the other + * expression matches. + * + * @param match the expression that should match + * @return null if no match, the other expression if there is a match + */ + Expression getIfEquals(Expression match) { + if (compareType == EQUAL) { + String sql = match.getSQL(DEFAULT_SQL_FLAGS); + if (left.getSQL(DEFAULT_SQL_FLAGS).equals(sql)) { + return right; + } else if (right.getSQL(DEFAULT_SQL_FLAGS).equals(sql)) { + return left; + } + } + return null; + } + + /** + * Get an additional condition if possible. Example: given two conditions + * A=B AND B=C, the new condition A=C is returned. + * + * @param session the session + * @param other the second condition + * @return null or the third condition for indexes + */ + Expression getAdditionalAnd(SessionLocal session, Comparison other) { + if (compareType == EQUAL && other.compareType == EQUAL && !whenOperand) { + boolean lc = left.isConstant(); + boolean rc = right.isConstant(); + boolean l2c = other.left.isConstant(); + boolean r2c = other.right.isConstant(); + String l = left.getSQL(DEFAULT_SQL_FLAGS); + String l2 = other.left.getSQL(DEFAULT_SQL_FLAGS); + String r = right.getSQL(DEFAULT_SQL_FLAGS); + String r2 = other.right.getSQL(DEFAULT_SQL_FLAGS); + // a=b AND a=c + // must not compare constants. example: NOT(B=2 AND B=3) + if (!(rc && r2c) && l.equals(l2)) { + return new Comparison(EQUAL, right, other.right, false); + } else if (!(rc && l2c) && l.equals(r2)) { + return new Comparison(EQUAL, right, other.left, false); + } else if (!(lc && r2c) && r.equals(l2)) { + return new Comparison(EQUAL, left, other.right, false); + } else if (!(lc && l2c) && r.equals(r2)) { + return new Comparison(EQUAL, left, other.left, false); + } + } + return null; + } + + /** + * Replace the OR condition with IN condition if possible. Example: given + * the two conditions A=1 OR A=2, the new condition A IN(1, 2) is returned. + * + * @param session the session + * @param other the second condition + * @return null or the joined IN condition + */ + Expression optimizeOr(SessionLocal session, Comparison other) { + if (compareType == EQUAL && other.compareType == EQUAL) { + Expression left2 = other.left; + Expression right2 = other.right; + String l2 = left2.getSQL(DEFAULT_SQL_FLAGS); + String r2 = right2.getSQL(DEFAULT_SQL_FLAGS); + if (left.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + String l = left.getSQL(DEFAULT_SQL_FLAGS); + if (l.equals(l2)) { + return getConditionIn(left, right, right2); + } else if (l.equals(r2)) { + return getConditionIn(left, right, left2); + } + } + if (right.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + String r = right.getSQL(DEFAULT_SQL_FLAGS); + if (r.equals(l2)) { + return getConditionIn(right, left, right2); + } else if (r.equals(r2)) { + return getConditionIn(right, left, left2); + } + } + } + return null; + } + + private static ConditionIn getConditionIn(Expression left, Expression value1, + Expression value2) { + ArrayList right = new ArrayList<>(2); + right.add(value1); + right.add(value2); + return new ConditionIn(left, false, false, right); + } + + @Override + public int getSubexpressionCount() { + return 2; + } + + @Override + public Expression getSubexpression(int index) { + switch (index) { + case 0: + return left; + case 1: + return right; + default: + throw new IndexOutOfBoundsException(); + } + } + +} diff --git a/h2/src/main/org/h2/expression/condition/Condition.java b/h2/src/main/org/h2/expression/condition/Condition.java new file mode 100644 index 0000000000..ba3d50991a --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/Condition.java @@ -0,0 +1,38 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.function.CastSpecification; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Represents a condition returning a boolean value, or NULL. + */ +abstract class Condition extends Expression { + + /** + * Add a cast around the expression (if necessary) so that the type is boolean. + * + * @param session the session + * @param expression the expression + * @return the new expression + */ + static Expression castToBoolean(SessionLocal session, Expression expression) { + if (expression.getType().getValueType() == Value.BOOLEAN) { + return expression; + } + return new CastSpecification(expression, TypeInfo.TYPE_BOOLEAN); + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_BOOLEAN; + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionAndOr.java b/h2/src/main/org/h2/expression/condition/ConditionAndOr.java new file mode 100644 index 0000000000..82dc4fbcb3 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionAndOr.java @@ -0,0 +1,367 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * An 'and' or 'or' condition as in WHERE ID=1 AND NAME=? + */ +public class ConditionAndOr extends Condition { + + /** + * The AND condition type as in ID=1 AND NAME='Hello'. + */ + public static final int AND = 0; + + /** + * The OR condition type as in ID=1 OR NAME='Hello'. + */ + public static final int OR = 1; + + private final int andOrType; + private Expression left, right; + + /** + * Additional condition for index only. + */ + private Expression added; + + public ConditionAndOr(int andOrType, Expression left, Expression right) { + if (left == null || right == null) { + throw DbException.getInternalError(left + " " + right); + } + this.andOrType = andOrType; + this.left = left; + this.right = right; + } + + int getAndOrType() { + return this.andOrType; + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + switch (andOrType) { + case AND: + builder.append("\n AND "); + break; + case OR: + builder.append("\n OR "); + break; + default: + throw DbException.getInternalError("andOrType=" + andOrType); + } + return right.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (andOrType == AND) { + left.createIndexConditions(session, filter); + right.createIndexConditions(session, filter); + if (added != null) { + added.createIndexConditions(session, filter); + } + } + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + // (NOT (A OR B)): (NOT(A) AND NOT(B)) + // (NOT (A AND B)): (NOT(A) OR NOT(B)) + Expression l = left.getNotIfPossible(session); + if (l == null) { + l = new ConditionNot(left); + } + Expression r = right.getNotIfPossible(session); + if (r == null) { + r = new ConditionNot(right); + } + int reversed = andOrType == AND ? OR : AND; + return new ConditionAndOr(reversed, l, r); + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + Value r; + switch (andOrType) { + case AND: { + if (l.isFalse() || (r = right.getValue(session)).isFalse()) { + return ValueBoolean.FALSE; + } + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return ValueBoolean.TRUE; + } + case OR: { + if (l.isTrue() || (r = right.getValue(session)).isTrue()) { + return ValueBoolean.TRUE; + } + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return ValueBoolean.FALSE; + } + default: + throw DbException.getInternalError("type=" + andOrType); + } + } + + @Override + public Expression optimize(SessionLocal session) { + // NULL handling: see wikipedia, + // http://www-cs-students.stanford.edu/~wlam/compsci/sqlnulls + left = left.optimize(session); + right = right.optimize(session); + int lc = left.getCost(), rc = right.getCost(); + if (rc < lc) { + Expression t = left; + left = right; + right = t; + } + switch (andOrType) { + case AND: + if (!session.getDatabase().getSettings().optimizeTwoEquals) { + break; + } + // this optimization does not work in the following case, + // but NOT is optimized before: + // CREATE TABLE TEST(A INT, B INT); + // INSERT INTO TEST VALUES(1, NULL); + // SELECT * FROM TEST WHERE NOT (B=A AND B=0); // no rows + // SELECT * FROM TEST WHERE NOT (B=A AND B=0 AND A=0); // 1, NULL + // try to add conditions (A=B AND B=1: add A=1) + if (left instanceof Comparison && right instanceof Comparison) { + // try to add conditions (A=B AND B=1: add A=1) + Expression added = ((Comparison) left).getAdditionalAnd(session, (Comparison) right); + if (added != null) { + this.added = added.optimize(session); + } + } + break; + case OR: + if (!session.getDatabase().getSettings().optimizeOr) { + break; + } + Expression reduced; + if (left instanceof Comparison && right instanceof Comparison) { + reduced = ((Comparison) left).optimizeOr(session, (Comparison) right); + } else if (left instanceof ConditionIn && right instanceof Comparison) { + reduced = ((ConditionIn) left).getAdditional((Comparison) right); + } else if (right instanceof ConditionIn && left instanceof Comparison) { + reduced = ((ConditionIn) right).getAdditional((Comparison) left); + } else if (left instanceof ConditionInConstantSet && right instanceof Comparison) { + reduced = ((ConditionInConstantSet) left).getAdditional(session, (Comparison) right); + } else if (right instanceof ConditionInConstantSet && left instanceof Comparison) { + reduced = ((ConditionInConstantSet) right).getAdditional(session, (Comparison) left); + } else if (left instanceof ConditionAndOr && right instanceof ConditionAndOr) { + reduced = optimizeConditionAndOr((ConditionAndOr)left, (ConditionAndOr)right); + } else { + // TODO optimization: convert .. OR .. to UNION if the cost is lower + break; + } + if (reduced != null) { + return reduced.optimize(session); + } + } + Expression e = optimizeIfConstant(session, andOrType, left, right); + if (e == null) { + return optimizeN(this); + } + if (e instanceof ConditionAndOr) { + return optimizeN((ConditionAndOr) e); + } + return e; + } + + private static Expression optimizeN(ConditionAndOr condition) { + if (condition.right instanceof ConditionAndOr) { + ConditionAndOr rightCondition = (ConditionAndOr) condition.right; + if (rightCondition.andOrType == condition.andOrType) { + return new ConditionAndOrN(condition.andOrType, condition.left, rightCondition.left, + rightCondition.right); + } + } + if (condition.right instanceof ConditionAndOrN) { + ConditionAndOrN rightCondition = (ConditionAndOrN) condition.right; + if (rightCondition.getAndOrType() == condition.andOrType) { + rightCondition.addFirst(condition.left); + return rightCondition; + } + } + return condition; + } + + /** + * Optimize the condition if at least one part is constant. + * + * @param session the session + * @param andOrType the type + * @param left the left part of the condition + * @param right the right part of the condition + * @return the optimized condition, or {@code null} if condition cannot be optimized + */ + static Expression optimizeIfConstant(SessionLocal session, int andOrType, Expression left, Expression right) { + if (!left.isConstant()) { + if (!right.isConstant()) { + return null; + } else { + return optimizeConstant(session, andOrType, right.getValue(session), left); + } + } + Value l = left.getValue(session); + if (!right.isConstant()) { + return optimizeConstant(session, andOrType, l, right); + } + Value r = right.getValue(session); + switch (andOrType) { + case AND: { + if (l.isFalse() || r.isFalse()) { + return ValueExpression.FALSE; + } + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + return ValueExpression.TRUE; + } + case OR: { + if (l.isTrue() || r.isTrue()) { + return ValueExpression.TRUE; + } + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + return ValueExpression.FALSE; + } + default: + throw DbException.getInternalError("type=" + andOrType); + } + } + + private static Expression optimizeConstant(SessionLocal session, int andOrType, Value l, Expression right) { + if (l != ValueNull.INSTANCE) { + switch (andOrType) { + case AND: + return l.getBoolean() ? castToBoolean(session, right) : ValueExpression.FALSE; + case OR: + return l.getBoolean() ? ValueExpression.TRUE : castToBoolean(session, right); + default: + throw DbException.getInternalError("type=" + andOrType); + } + } + return null; + } + + @Override + public void addFilterConditions(TableFilter filter) { + if (andOrType == AND) { + left.addFilterConditions(filter); + right.addFilterConditions(filter); + } else { + super.addFilterConditions(filter); + } + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + right.mapColumns(resolver, level, state); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + right.setEvaluatable(tableFilter, b); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + right.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && right.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost() + right.getCost(); + } + + @Override + public int getSubexpressionCount() { + return 2; + } + + @Override + public Expression getSubexpression(int index) { + switch (index) { + case 0: + return left; + case 1: + return right; + default: + throw new IndexOutOfBoundsException(); + } + } + + /** + * Optimize query according to the given condition. Example: + * (A AND B) OR (C AND B), the new condition B AND (A OR C) is returned + * + * @param left the session + * @param right the second condition + * @return null or the third condition + */ + static Expression optimizeConditionAndOr(ConditionAndOr left, ConditionAndOr right) { + if (left.andOrType != AND || right.andOrType != AND) { + return null; + } + Expression leftLeft = left.getSubexpression(0), leftRight = left.getSubexpression(1); + Expression rightLeft = right.getSubexpression(0), rightRight = right.getSubexpression(1); + String rightLeftSQL = rightLeft.getSQL(DEFAULT_SQL_FLAGS); + String rightRightSQL = rightRight.getSQL(DEFAULT_SQL_FLAGS); + if (leftLeft.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + String leftLeftSQL = leftLeft.getSQL(DEFAULT_SQL_FLAGS); + if (leftLeftSQL.equals(rightLeftSQL)) { + return new ConditionAndOr(AND, leftLeft, new ConditionAndOr(OR, leftRight, rightRight)); + } + if (leftLeftSQL.equals(rightRightSQL)) { + return new ConditionAndOr(AND, leftLeft, new ConditionAndOr(OR, leftRight, rightLeft)); + } + } + if (leftRight.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + String leftRightSQL = leftRight.getSQL(DEFAULT_SQL_FLAGS); + if (leftRightSQL.equals(rightLeftSQL)) { + return new ConditionAndOr(AND, leftRight, new ConditionAndOr(OR, leftLeft, rightRight)); + } else if (leftRightSQL.equals(rightRightSQL)) { + return new ConditionAndOr(AND, leftRight, new ConditionAndOr(OR, leftLeft, rightLeft)); + } + } + return null; + } +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionAndOrN.java b/h2/src/main/org/h2/expression/condition/ConditionAndOrN.java new file mode 100644 index 0000000000..51ed2b1216 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionAndOrN.java @@ -0,0 +1,341 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, and the + * EPL 1.0 (https://h2database.com/html/license.html). Initial Developer: H2 + * Group + */ +package org.h2.expression.condition; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * An 'and' or 'or' condition as in WHERE ID=1 AND NAME=? with N operands. + * Mostly useful for optimisation and preventing stack overflow where generated + * SQL has tons of conditions. + */ +public class ConditionAndOrN extends Condition { + + private final int andOrType; + /** + * Use an ArrayDeque because we primarily insert at the front. + */ + private final List expressions; + + /** + * Additional conditions for index only. + */ + private List added; + + public ConditionAndOrN(int andOrType, Expression expr1, Expression expr2, Expression expr3) { + this.andOrType = andOrType; + this.expressions = new ArrayList<>(3); + expressions.add(expr1); + expressions.add(expr2); + expressions.add(expr3); + } + + public ConditionAndOrN(int andOrType, List expressions) { + this.andOrType = andOrType; + this.expressions = expressions; + } + + int getAndOrType() { + return andOrType; + } + + /** + * Add the expression at the beginning of the list. + * + * @param e the expression + */ + void addFirst(Expression e) { + expressions.add(0, e); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + Iterator it = expressions.iterator(); + it.next().getSQL(builder, sqlFlags, AUTO_PARENTHESES); + while (it.hasNext()) { + switch (andOrType) { + case ConditionAndOr.AND: + builder.append("\n AND "); + break; + case ConditionAndOr.OR: + builder.append("\n OR "); + break; + default: + throw DbException.getInternalError("andOrType=" + andOrType); + } + it.next().getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + return builder; + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (andOrType == ConditionAndOr.AND) { + for (Expression e : expressions) { + e.createIndexConditions(session, filter); + } + if (added != null) { + for (Expression e : added) { + e.createIndexConditions(session, filter); + } + } + } + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + // (NOT (A OR B)): (NOT(A) AND NOT(B)) + // (NOT (A AND B)): (NOT(A) OR NOT(B)) + final ArrayList newList = new ArrayList<>(expressions.size()); + for (Expression e : expressions) { + Expression l = e.getNotIfPossible(session); + if (l == null) { + l = new ConditionNot(e); + } + newList.add(l); + } + int reversed = andOrType == ConditionAndOr.AND ? ConditionAndOr.OR : ConditionAndOr.AND; + return new ConditionAndOrN(reversed, newList); + } + + @Override + public Value getValue(SessionLocal session) { + boolean hasNull = false; + switch (andOrType) { + case ConditionAndOr.AND: { + for (Expression e : expressions) { + Value v = e.getValue(session); + if (v == ValueNull.INSTANCE) { + hasNull = true; + } else if (!v.getBoolean()) { + return ValueBoolean.FALSE; + } + } + return hasNull ? ValueNull.INSTANCE : ValueBoolean.TRUE; + } + case ConditionAndOr.OR: { + for (Expression e : expressions) { + Value v = e.getValue(session); + if (v == ValueNull.INSTANCE) { + hasNull = true; + } else if (v.getBoolean()) { + return ValueBoolean.TRUE; + } + } + return hasNull ? ValueNull.INSTANCE : ValueBoolean.FALSE; + } + default: + throw DbException.getInternalError("type=" + andOrType); + } + } + + private static final Comparator COMPARE_BY_COST = new Comparator() { + @Override + public int compare(Expression lhs, Expression rhs) { + return lhs.getCost() - rhs.getCost(); + } + + }; + + @Override + public Expression optimize(SessionLocal session) { + // NULL handling: see wikipedia, + // http://www-cs-students.stanford.edu/~wlam/compsci/sqlnulls + + // first pass, optimize individual sub-expressions + for (int i = 0; i < expressions.size(); i++ ) { + expressions.set(i, expressions.get(i).optimize(session)); + } + + Collections.sort(expressions, COMPARE_BY_COST); + + // TODO we're only matching pairs so that are next to each other, so in complex expressions + // we will miss opportunities + + // second pass, optimize combinations + optimizeMerge(0); + for (int i = 1; i < expressions.size(); ) { + Expression left = expressions.get(i-1); + Expression right = expressions.get(i); + switch (andOrType) { + case ConditionAndOr.AND: + if (!session.getDatabase().getSettings().optimizeTwoEquals) { + break; + } + // this optimization does not work in the following case, + // but NOT is optimized before: + // CREATE TABLE TEST(A INT, B INT); + // INSERT INTO TEST VALUES(1, NULL); + // SELECT * FROM TEST WHERE NOT (B=A AND B=0); // no rows + // SELECT * FROM TEST WHERE NOT (B=A AND B=0 AND A=0); // 1, + // NULL + // try to add conditions (A=B AND B=1: add A=1) + if (left instanceof Comparison && right instanceof Comparison) { + // try to add conditions (A=B AND B=1: add A=1) + Expression added = ((Comparison) left).getAdditionalAnd(session, (Comparison) right); + if (added != null) { + if (this.added == null) { + this.added = new ArrayList<>(); + } + this.added.add(added.optimize(session)); + } + } + break; + case ConditionAndOr.OR: + if (!session.getDatabase().getSettings().optimizeOr) { + break; + } + Expression reduced; + if (left instanceof Comparison && right instanceof Comparison) { + reduced = ((Comparison) left).optimizeOr(session, (Comparison) right); + } else if (left instanceof ConditionIn && right instanceof Comparison) { + reduced = ((ConditionIn) left).getAdditional((Comparison) right); + } else if (right instanceof ConditionIn && left instanceof Comparison) { + reduced = ((ConditionIn) right).getAdditional((Comparison) left); + } else if (left instanceof ConditionInConstantSet && right instanceof Comparison) { + reduced = ((ConditionInConstantSet) left).getAdditional(session, (Comparison) right); + } else if (right instanceof ConditionInConstantSet && left instanceof Comparison) { + reduced = ((ConditionInConstantSet) right).getAdditional(session, (Comparison) left); + } else if (left instanceof ConditionAndOr && right instanceof ConditionAndOr) { + reduced = ConditionAndOr.optimizeConditionAndOr((ConditionAndOr) left, (ConditionAndOr) right); + } else { + // TODO optimization: convert .. OR .. to UNION if the cost + // is lower + break; + } + if (reduced != null) { + expressions.remove(i); + expressions.set(i - 1, reduced.optimize(session)); + continue; // because we don't want to increment, we want to compare the new pair exposed + } + } + + Expression e = ConditionAndOr.optimizeIfConstant(session, andOrType, left, right); + if (e != null) { + expressions.remove(i); + expressions.set(i-1, e); + continue; // because we don't want to increment, we want to compare the new pair exposed + } + + if (optimizeMerge(i)) { + continue; + } + + i++; + } + + Collections.sort(expressions, COMPARE_BY_COST); + + if (expressions.size() == 1) { + return Condition.castToBoolean(session, expressions.get(0)); + } + return this; + } + + + private boolean optimizeMerge(int i) { + Expression e = expressions.get(i); + // If we have a ConditionAndOrN as a sub-expression, see if we can merge it + // into this one. + if (e instanceof ConditionAndOrN) { + ConditionAndOrN rightCondition = (ConditionAndOrN) e; + if (this.andOrType == rightCondition.andOrType) { + expressions.remove(i); + expressions.addAll(i, rightCondition.expressions); + return true; + } + } + else if (e instanceof ConditionAndOr) { + ConditionAndOr rightCondition = (ConditionAndOr) e; + if (this.andOrType == rightCondition.getAndOrType()) { + expressions.set(i, rightCondition.getSubexpression(0)); + expressions.add(i+1, rightCondition.getSubexpression(1)); + return true; + } + } + return false; + } + + @Override + public void addFilterConditions(TableFilter filter) { + if (andOrType == ConditionAndOr.AND) { + for (Expression e : expressions) { + e.addFilterConditions(filter); + } + } else { + super.addFilterConditions(filter); + } + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + for (Expression e : expressions) { + e.mapColumns(resolver, level, state); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + for (Expression e : expressions) { + e.setEvaluatable(tableFilter, b); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + for (Expression e : expressions) { + e.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + for (Expression e : expressions) { + if (!e.isEverything(visitor)) { + return false; + } + } + return true; + } + + @Override + public int getCost() { + int cost = 0; + for (Expression e : expressions) { + cost += e.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return expressions.size(); + } + + @Override + public Expression getSubexpression(int index) { + return expressions.get(index); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionIn.java b/h2/src/main/org/h2/expression/condition/ConditionIn.java new file mode 100644 index 0000000000..663f6fc24a --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionIn.java @@ -0,0 +1,270 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.ArrayList; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.index.IndexCondition; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * An 'in' condition with a list of values, as in WHERE NAME IN(...) + */ +public final class ConditionIn extends Condition { + + private Expression left; + private final boolean not; + private final boolean whenOperand; + private final ArrayList valueList; + + /** + * Create a new IN(..) condition. + * + * @param left the expression before IN + * @param not whether the result should be negated + * @param whenOperand whether this is a when operand + * @param values the value list (at least one element) + */ + public ConditionIn(Expression left, boolean not, boolean whenOperand, ArrayList values) { + this.left = left; + this.not = not; + this.whenOperand = whenOperand; + this.valueList = values; + } + + @Override + public Value getValue(SessionLocal session) { + return getValue(session, left.getValue(session)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + return getValue(session, left).isTrue(); + } + + private Value getValue(SessionLocal session, Value left) { + if (left.containsNull()) { + return ValueNull.INSTANCE; + } + boolean hasNull = false; + for (Expression e : valueList) { + Value r = e.getValue(session); + Value cmp = Comparison.compare(session, left, r, Comparison.EQUAL); + if (cmp == ValueNull.INSTANCE) { + hasNull = true; + } else if (cmp == ValueBoolean.TRUE) { + return ValueBoolean.get(!not); + } + } + if (hasNull) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(not); + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + for (Expression e : valueList) { + e.mapColumns(resolver, level, state); + } + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + boolean constant = !whenOperand && left.isConstant(); + if (constant && left.isNullConstant()) { + return TypedValueExpression.UNKNOWN; + } + boolean allValuesConstant = true; + boolean allValuesNull = true; + TypeInfo leftType = left.getType(); + for (int i = 0, l = valueList.size(); i < l; i++) { + Expression e = valueList.get(i); + e = e.optimize(session); + TypeInfo.checkComparable(leftType, e.getType()); + if (e.isConstant() && !e.getValue(session).containsNull()) { + allValuesNull = false; + } + if (allValuesConstant && !e.isConstant()) { + allValuesConstant = false; + } + if (left instanceof ExpressionColumn && e instanceof Parameter) { + ((Parameter) e).setColumn(((ExpressionColumn) left).getColumn()); + } + valueList.set(i, e); + } + return optimize2(session, constant, allValuesConstant, allValuesNull, valueList); + } + + private Expression optimize2(SessionLocal session, boolean constant, boolean allValuesConstant, + boolean allValuesNull, ArrayList values) { + if (constant && allValuesConstant) { + return ValueExpression.getBoolean(getValue(session)); + } + if (values.size() == 1) { + return new Comparison(not ? Comparison.NOT_EQUAL : Comparison.EQUAL, left, values.get(0), whenOperand) + .optimize(session); + } + if (allValuesConstant && !allValuesNull) { + int leftType = left.getType().getValueType(); + if (leftType == Value.UNKNOWN) { + return this; + } + if (leftType == Value.ENUM && !(left instanceof ExpressionColumn)) { + return this; + } + return new ConditionInConstantSet(session, left, not, whenOperand, values).optimize(session); + } + return this; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new ConditionIn(left, !not, false, valueList); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (not || whenOperand || !(left instanceof ExpressionColumn)) { + return; + } + ExpressionColumn l = (ExpressionColumn) left; + if (filter != l.getTableFilter()) { + return; + } + if (session.getDatabase().getSettings().optimizeInList) { + ExpressionVisitor visitor = ExpressionVisitor.getNotFromResolverVisitor(filter); + TypeInfo colType = l.getType(); + for (Expression e : valueList) { + if (!e.isEverything(visitor) + || !TypeInfo.haveSameOrdering(colType, TypeInfo.getHigherType(colType, e.getType()))) { + return; + } + } + filter.addIndexCondition(IndexCondition.getInList(l, valueList)); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + for (Expression e : valueList) { + e.setEvaluatable(tableFilter, b); + } + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append(" NOT"); + } + return writeExpressions(builder.append(" IN("), valueList, sqlFlags).append(')'); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + for (Expression e : valueList) { + e.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!left.isEverything(visitor)) { + return false; + } + return areAllValues(visitor); + } + + private boolean areAllValues(ExpressionVisitor visitor) { + for (Expression e : valueList) { + if (!e.isEverything(visitor)) { + return false; + } + } + return true; + } + + @Override + public int getCost() { + int cost = left.getCost(); + for (Expression e : valueList) { + cost += e.getCost(); + } + return cost; + } + + /** + * Add an additional element if possible. Example: given two conditions + * A IN(1, 2) OR A=3, the constant 3 is added: A IN(1, 2, 3). + * + * @param other the second condition + * @return null if the condition was not added, or the new condition + */ + Expression getAdditional(Comparison other) { + if (!not && !whenOperand && left.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + Expression add = other.getIfEquals(left); + if (add != null) { + ArrayList list = new ArrayList<>(valueList.size() + 1); + list.addAll(valueList); + list.add(add); + return new ConditionIn(left, false, false, list); + } + } + return null; + } + + @Override + public int getSubexpressionCount() { + return 1 + valueList.size(); + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return left; + } else if (index > 0 && index <= valueList.size()) { + return valueList.get(index - 1); + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionInConstantSet.java b/h2/src/main/org/h2/expression/condition/ConditionInConstantSet.java new file mode 100644 index 0000000000..4174e8bd15 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionInConstantSet.java @@ -0,0 +1,219 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.ArrayList; +import java.util.TreeSet; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.index.IndexCondition; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * Used for optimised IN(...) queries where the contents of the IN list are all + * constant and of the same type. + */ +public final class ConditionInConstantSet extends Condition { + + private Expression left; + private final boolean not; + private final boolean whenOperand; + private final ArrayList valueList; + // HashSet cannot be used here, because we need to compare values of + // different type or scale properly. + private final TreeSet valueSet; + private boolean hasNull; + private final TypeInfo type; + + /** + * Create a new IN(..) condition. + * + * @param session the session + * @param left + * the expression before IN. Cannot have {@link Value#UNKNOWN} + * data type and {@link Value#ENUM} type is also supported only + * for {@link ExpressionColumn}. + * @param not whether the result should be negated + * @param whenOperand whether this is a when operand + * @param valueList + * the value list (at least two elements); all values must be + * comparable with left value + */ + ConditionInConstantSet(SessionLocal session, Expression left, boolean not, boolean whenOperand, + ArrayList valueList) { + this.left = left; + this.not = not; + this.whenOperand = whenOperand; + this.valueList = valueList; + this.valueSet = new TreeSet<>(session.getDatabase().getCompareMode()); + TypeInfo type = left.getType(); + for (Expression expression : valueList) { + type = TypeInfo.getHigherType(type, expression.getType()); + } + this.type = type; + for (Expression expression : valueList) { + add(expression.getValue(session), session); + } + } + + private void add(Value v, SessionLocal session) { + if ((v = v.convertTo(type, session)).containsNull()) { + hasNull = true; + } else { + valueSet.add(v); + } + } + + @Override + public Value getValue(SessionLocal session) { + return getValue(left.getValue(session), session); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + return getValue(left, session).isTrue(); + } + + private Value getValue(Value left, SessionLocal session) { + if ((left = left.convertTo(type, session)).containsNull()) { + return ValueNull.INSTANCE; + } + boolean result = valueSet.contains(left); + if (!result && hasNull) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(not ^ result); + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + return this; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new ConditionInConstantSet(session, left, !not, false, valueList); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (not || whenOperand || !(left instanceof ExpressionColumn)) { + return; + } + ExpressionColumn l = (ExpressionColumn) left; + if (filter != l.getTableFilter()) { + return; + } + if (session.getDatabase().getSettings().optimizeInList) { + TypeInfo colType = l.getType(); + if (TypeInfo.haveSameOrdering(colType, TypeInfo.getHigherType(colType, type))) { + filter.addIndexCondition(IndexCondition.getInList(l, valueList)); + } + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append(" NOT"); + } + return writeExpressions(builder.append(" IN("), valueList, sqlFlags).append(')'); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost(); + } + + /** + * Add an additional element if possible. Example: given two conditions + * A IN(1, 2) OR A=3, the constant 3 is added: A IN(1, 2, 3). + * + * @param session the session + * @param other the second condition + * @return null if the condition was not added, or the new condition + */ + Expression getAdditional(SessionLocal session, Comparison other) { + if (!not && !whenOperand && left.isEverything(ExpressionVisitor.DETERMINISTIC_VISITOR)) { + Expression add = other.getIfEquals(left); + if (add != null) { + if (add.isConstant()) { + ArrayList list = new ArrayList<>(valueList.size() + 1); + list.addAll(valueList); + list.add(add); + return new ConditionInConstantSet(session, left, false, false, list); + } + } + } + return null; + } + + @Override + public int getSubexpressionCount() { + return 1 + valueList.size(); + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return left; + } else if (index > 0 && index <= valueList.size()) { + return valueList.get(index - 1); + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionInParameter.java b/h2/src/main/org/h2/expression/condition/ConditionInParameter.java new file mode 100644 index 0000000000..6bbf2f82be --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionInParameter.java @@ -0,0 +1,224 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.AbstractList; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Parameter; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.index.IndexCondition; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * A condition with parameter as {@code = ANY(?)}. + */ +public final class ConditionInParameter extends Condition { + private static final class ParameterList extends AbstractList { + private final Parameter parameter; + + ParameterList(Parameter parameter) { + this.parameter = parameter; + } + + @Override + public Expression get(int index) { + Value value = parameter.getParamValue(); + if (value instanceof ValueArray) { + return ValueExpression.get(((ValueArray) value).getList()[index]); + } + if (index != 0) { + throw new IndexOutOfBoundsException(); + } + return ValueExpression.get(value); + } + + @Override + public int size() { + if (!parameter.isValueSet()) { + return 0; + } + Value value = parameter.getParamValue(); + if (value instanceof ValueArray) { + return ((ValueArray) value).getList().length; + } + return 1; + } + } + + private Expression left; + + private boolean not; + + private boolean whenOperand; + + private final Parameter parameter; + + /** + * Gets evaluated condition value. + * + * @param session the session + * @param l left value. + * @param not whether the result should be negated + * @param value parameter value. + * @return Evaluated condition value. + */ + static Value getValue(SessionLocal session, Value l, boolean not, Value value) { + boolean hasNull = false; + if (value.containsNull()) { + hasNull = true; + } else { + for (Value r : value.convertToAnyArray(session).getList()) { + Value cmp = Comparison.compare(session, l, r, Comparison.EQUAL); + if (cmp == ValueNull.INSTANCE) { + hasNull = true; + } else if (cmp == ValueBoolean.TRUE) { + return ValueBoolean.get(!not); + } + } + } + if (hasNull) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(not); + } + + /** + * Create a new {@code = ANY(?)} condition. + * + * @param left + * the expression before {@code = ANY(?)} + * @param not whether the result should be negated + * @param whenOperand whether this is a when operand + * @param parameter + * parameter + */ + public ConditionInParameter(Expression left, boolean not, boolean whenOperand, Parameter parameter) { + this.left = left; + this.not = not; + this.whenOperand = whenOperand; + this.parameter = parameter; + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + if (l == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return getValue(session, l, not, parameter.getValue(session)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + if (left == ValueNull.INSTANCE) { + return false; + } + return getValue(session, left, not, parameter.getValue(session)).isTrue(); + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (!whenOperand && left.isNullConstant()) { + return TypedValueExpression.UNKNOWN; + } + return this; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new ConditionInParameter(left, !not, false, parameter); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (not || whenOperand || !(left instanceof ExpressionColumn)) { + return; + } + ExpressionColumn l = (ExpressionColumn) left; + if (filter != l.getTableFilter()) { + return; + } + filter.addIndexCondition(IndexCondition.getInList(l, new ParameterList(parameter))); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append("NOT ("); + } + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + parameter.getSQL(builder.append(" = ANY("), sqlFlags, AUTO_PARENTHESES).append(')'); + if (not) { + builder.append(')'); + } + return builder; + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (not) { + builder.append(" NOT IN(UNNEST("); + parameter.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append("))"); + } else { + builder.append(" = ANY("); + parameter.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append(')'); + } + return builder; + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && parameter.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost(); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionInQuery.java b/h2/src/main/org/h2/expression/condition/ConditionInQuery.java new file mode 100644 index 0000000000..700aea1917 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionInQuery.java @@ -0,0 +1,256 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.index.IndexCondition; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; +import org.h2.value.ValueRow; + +/** + * An IN() condition with a subquery, as in WHERE ID IN(SELECT ...) + */ +public final class ConditionInQuery extends PredicateWithSubquery { + + private Expression left; + private final boolean not; + private final boolean whenOperand; + private final boolean all; + private final int compareType; + + public ConditionInQuery(Expression left, boolean not, boolean whenOperand, Query query, boolean all, + int compareType) { + super(query); + this.left = left; + this.not = not; + this.whenOperand = whenOperand; + /* + * Need to do it now because other methods may be invoked in different + * order. + */ + query.setRandomAccessResult(true); + query.setNeverLazy(true); + query.setDistinctIfPossible(); + this.all = all; + this.compareType = compareType; + } + + @Override + public Value getValue(SessionLocal session) { + return getValue(session, left.getValue(session)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + return getValue(session, left).isTrue(); + } + + private Value getValue(SessionLocal session, Value left) { + query.setSession(session); + LocalResult rows = (LocalResult) query.query(0); + if (!rows.hasNext()) { + return ValueBoolean.get(not ^ all); + } + if ((compareType & ~1) == Comparison.EQUAL_NULL_SAFE) { + return getNullSafeValueSlow(session, rows, left); + } + if (left.containsNull()) { + return ValueNull.INSTANCE; + } + if (all || compareType != Comparison.EQUAL || !session.getDatabase().getSettings().optimizeInSelect) { + return getValueSlow(session, rows, left); + } + int columnCount = query.getColumnCount(); + if (columnCount != 1) { + Value[] leftValue = left.convertToAnyRow().getList(); + if (columnCount == leftValue.length && rows.containsDistinct(leftValue)) { + return ValueBoolean.get(!not); + } + } else { + TypeInfo colType = rows.getColumnType(0); + if (colType.getValueType() == Value.NULL) { + return ValueNull.INSTANCE; + } + if (left.getValueType() == Value.ROW) { + left = ((ValueRow) left).getList()[0]; + } + if (rows.containsDistinct(new Value[] { left })) { + return ValueBoolean.get(!not); + } + } + if (rows.containsNull()) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(not); + } + + private Value getValueSlow(SessionLocal session, ResultInterface rows, Value l) { + // this only returns the correct result if the result has at least one + // row, and if l is not null + boolean simple = l.getValueType() != Value.ROW && query.getColumnCount() == 1; + boolean hasNull = false; + ValueBoolean searched = ValueBoolean.get(!all); + while (rows.next()) { + Value[] currentRow = rows.currentRow(); + Value cmp = Comparison.compare(session, l, simple ? currentRow[0] : ValueRow.get(currentRow), + compareType); + if (cmp == ValueNull.INSTANCE) { + hasNull = true; + } else if (cmp == searched) { + return ValueBoolean.get(not == all); + } + } + if (hasNull) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(not ^ all); + } + + private Value getNullSafeValueSlow(SessionLocal session, ResultInterface rows, Value l) { + boolean simple = l.getValueType() != Value.ROW && query.getColumnCount() == 1; + boolean searched = all == (compareType == Comparison.NOT_EQUAL_NULL_SAFE); + while (rows.next()) { + Value[] currentRow = rows.currentRow(); + if (session.areEqual(l, simple ? currentRow[0] : ValueRow.get(currentRow)) == searched) { + return ValueBoolean.get(not == all); + } + } + return ValueBoolean.get(not ^ all); + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new ConditionInQuery(left, !not, false, query, all, compareType); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + super.mapColumns(resolver, level, state); + } + + @Override + public Expression optimize(SessionLocal session) { + super.optimize(session); + left = left.optimize(session); + TypeInfo.checkComparable(left.getType(), query.getRowDataType()); + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + super.setEvaluatable(tableFilter, b); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + boolean outerNot = not && (all || compareType != Comparison.EQUAL); + if (outerNot) { + builder.append("NOT ("); + } + left.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + getWhenSQL(builder, sqlFlags); + if (outerNot) { + builder.append(')'); + } + return builder; + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + if (all) { + builder.append(Comparison.COMPARE_TYPES[compareType]).append(" ALL"); + } else if (compareType == Comparison.EQUAL) { + if (not) { + builder.append(" NOT"); + } + builder.append(" IN"); + } else { + builder.append(' ').append(Comparison.COMPARE_TYPES[compareType]).append(" ANY"); + } + return super.getUnenclosedSQL(builder, sqlFlags); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + super.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor) && super.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost() + super.getCost(); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (!session.getDatabase().getSettings().optimizeInList) { + return; + } + if (not || compareType != Comparison.EQUAL) { + return; + } + if (query.getColumnCount() != 1) { + return; + } + if (!(left instanceof ExpressionColumn)) { + return; + } + TypeInfo colType = left.getType(); + TypeInfo queryType = query.getExpressions().get(0).getType(); + if (!TypeInfo.haveSameOrdering(colType, TypeInfo.getHigherType(colType, queryType))) { + return; + } + int leftType = colType.getValueType(); + if (!DataType.hasTotalOrdering(leftType) && leftType != queryType.getValueType()) { + return; + } + ExpressionColumn l = (ExpressionColumn) left; + if (filter != l.getTableFilter()) { + return; + } + ExpressionVisitor visitor = ExpressionVisitor.getNotFromResolverVisitor(filter); + if (!query.isEverything(visitor)) { + return; + } + filter.addIndexCondition(IndexCondition.getInQuery(l, query)); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionLocalAndGlobal.java b/h2/src/main/org/h2/expression/condition/ConditionLocalAndGlobal.java new file mode 100644 index 0000000000..032604b6bb --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionLocalAndGlobal.java @@ -0,0 +1,152 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * A global condition or combination of local and global conditions. May be used + * only as a top-level expression in a WHERE, HAVING, or QUALIFY clause of a + * SELECT. + */ +public class ConditionLocalAndGlobal extends Condition { + + private Expression local, global; + + public ConditionLocalAndGlobal(Expression local, Expression global) { + if (global == null) { + throw DbException.getInternalError(); + } + this.local = local; + this.global = global; + } + + @Override + public boolean needParentheses() { + return local != null || global.needParentheses(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + if (local == null) { + return global.getUnenclosedSQL(builder, sqlFlags); + } + local.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + builder.append("\n _LOCAL_AND_GLOBAL_ "); + return global.getSQL(builder, sqlFlags, AUTO_PARENTHESES); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (local != null) { + local.createIndexConditions(session, filter); + } + global.createIndexConditions(session, filter); + } + + @Override + public Value getValue(SessionLocal session) { + if (local == null) { + return global.getValue(session); + } + Value l = local.getValue(session), r; + if (l.isFalse() || (r = global.getValue(session)).isFalse()) { + return ValueBoolean.FALSE; + } + if (l == ValueNull.INSTANCE || r == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return ValueBoolean.TRUE; + } + + @Override + public Expression optimize(SessionLocal session) { + global = global.optimize(session); + if (local != null) { + local = local.optimize(session); + Expression e = ConditionAndOr.optimizeIfConstant(session, ConditionAndOr.AND, local, global); + if (e != null) { + return e; + } + } + return this; + } + + @Override + public void addFilterConditions(TableFilter filter) { + if (local != null) { + local.addFilterConditions(filter); + } + global.addFilterConditions(filter); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + if (local != null) { + local.mapColumns(resolver, level, state); + } + global.mapColumns(resolver, level, state); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + if (local != null) { + local.setEvaluatable(tableFilter, b); + } + global.setEvaluatable(tableFilter, b); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + if (local != null) { + local.updateAggregate(session, stage); + } + global.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return (local == null || local.isEverything(visitor)) && global.isEverything(visitor); + } + + @Override + public int getCost() { + int cost = global.getCost(); + if (local != null) { + cost += local.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return local == null ? 1 : 2; + } + + @Override + public Expression getSubexpression(int index) { + switch (index) { + case 0: + return local != null ? local : global; + case 1: + if (local != null) { + return global; + } + //$FALL-THROUGH$ + default: + throw new IndexOutOfBoundsException(); + } + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ConditionNot.java b/h2/src/main/org/h2/expression/condition/ConditionNot.java new file mode 100644 index 0000000000..215926c059 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ConditionNot.java @@ -0,0 +1,109 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A NOT condition. + */ +public class ConditionNot extends Condition { + + private Expression condition; + + public ConditionNot(Expression condition) { + this.condition = condition; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + return castToBoolean(session, condition.optimize(session)); + } + + @Override + public Value getValue(SessionLocal session) { + Value v = condition.getValue(session); + if (v == ValueNull.INSTANCE) { + return v; + } + return v.convertToBoolean().negate(); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + condition.mapColumns(resolver, level, state); + } + + @Override + public Expression optimize(SessionLocal session) { + Expression e2 = condition.getNotIfPossible(session); + if (e2 != null) { + return e2.optimize(session); + } + Expression expr = condition.optimize(session); + if (expr.isConstant()) { + Value v = expr.getValue(session); + if (v == ValueNull.INSTANCE) { + return TypedValueExpression.UNKNOWN; + } + return ValueExpression.getBoolean(!v.getBoolean()); + } + condition = expr; + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + condition.setEvaluatable(tableFilter, b); + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return condition.getSQL(builder.append("NOT "), sqlFlags, AUTO_PARENTHESES); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + condition.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return condition.isEverything(visitor); + } + + @Override + public int getCost() { + return condition.getCost(); + } + + @Override + public int getSubexpressionCount() { + return 1; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return condition; + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/ExistsPredicate.java b/h2/src/main/org/h2/expression/condition/ExistsPredicate.java new file mode 100644 index 0000000000..be487b4342 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/ExistsPredicate.java @@ -0,0 +1,33 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; + +/** + * Exists predicate as in EXISTS(SELECT ...) + */ +public class ExistsPredicate extends PredicateWithSubquery { + + public ExistsPredicate(Query query) { + super(query); + } + + @Override + public Value getValue(SessionLocal session) { + query.setSession(session); + return ValueBoolean.get(query.exists()); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return super.getUnenclosedSQL(builder.append("EXISTS"), sqlFlags); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/IsJsonPredicate.java b/h2/src/main/org/h2/expression/condition/IsJsonPredicate.java new file mode 100644 index 0000000000..67b56ea0a3 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/IsJsonPredicate.java @@ -0,0 +1,217 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.util.json.JSONBytesSource; +import org.h2.util.json.JSONItemType; +import org.h2.util.json.JSONStringSource; +import org.h2.util.json.JSONValidationTarget; +import org.h2.util.json.JSONValidationTargetWithUniqueKeys; +import org.h2.util.json.JSONValidationTargetWithoutUniqueKeys; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueJson; +import org.h2.value.ValueNull; + +/** + * IS JSON predicate. + */ +public final class IsJsonPredicate extends Condition { + + private Expression left; + private final boolean not; + private final boolean whenOperand; + private final boolean withUniqueKeys; + private final JSONItemType itemType; + + public IsJsonPredicate(Expression left, boolean not, boolean whenOperand, boolean withUniqueKeys, + JSONItemType itemType) { + this.left = left; + this.whenOperand = whenOperand; + this.not = not; + this.withUniqueKeys = withUniqueKeys; + this.itemType = itemType; + } + + @Override + public boolean needParentheses() { + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + builder.append(" IS"); + if (not) { + builder.append(" NOT"); + } + builder.append(" JSON"); + switch (itemType) { + case VALUE: + break; + case ARRAY: + builder.append(" ARRAY"); + break; + case OBJECT: + builder.append(" OBJECT"); + break; + case SCALAR: + builder.append(" SCALAR"); + break; + default: + throw DbException.getInternalError("itemType=" + itemType); + } + if (withUniqueKeys) { + builder.append(" WITH UNIQUE KEYS"); + } + return builder; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (!whenOperand && left.isConstant()) { + return ValueExpression.getBoolean(getValue(session)); + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + if (l == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(getValue(l)); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + if (left == ValueNull.INSTANCE) { + return false; + } + return getValue(left); + } + + private boolean getValue(Value left) { + boolean result; + switch (left.getValueType()) { + case Value.VARBINARY: + case Value.BINARY: + case Value.BLOB: { + byte[] bytes = left.getBytesNoCopy(); + JSONValidationTarget target = withUniqueKeys ? new JSONValidationTargetWithUniqueKeys() + : new JSONValidationTargetWithoutUniqueKeys(); + try { + result = itemType.includes(JSONBytesSource.parse(bytes, target)) ^ not; + } catch (RuntimeException ex) { + result = not; + } + break; + } + case Value.JSON: { + JSONItemType valueItemType = ((ValueJson) left).getItemType(); + if (!itemType.includes(valueItemType)) { + result = not; + break; + } else if (!withUniqueKeys || valueItemType == JSONItemType.SCALAR) { + result = !not; + break; + } + } + //$FALL-THROUGH$ + case Value.VARCHAR: + case Value.VARCHAR_IGNORECASE: + case Value.CHAR: + case Value.CLOB: { + String string = left.getString(); + JSONValidationTarget target = withUniqueKeys ? new JSONValidationTargetWithUniqueKeys() + : new JSONValidationTargetWithoutUniqueKeys(); + try { + result = itemType.includes(JSONStringSource.parse(string, target)) ^ not; + } catch (RuntimeException ex) { + result = not; + } + break; + } + default: + result = not; + } + return result; + } + + @Override + public boolean isWhenConditionOperand() { + return whenOperand; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new IsJsonPredicate(left, !not, false, withUniqueKeys, itemType); + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor); + } + + @Override + public int getCost() { + int cost = left.getCost(); + if (left.getType().getValueType() == Value.JSON && (!withUniqueKeys || itemType == JSONItemType.SCALAR)) { + cost++; + } else { + cost += 10; + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return 1; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return left; + } + throw new IndexOutOfBoundsException(); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/NullPredicate.java b/h2/src/main/org/h2/expression/condition/NullPredicate.java new file mode 100644 index 0000000000..46ae3bfcd8 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/NullPredicate.java @@ -0,0 +1,153 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.ArrayList; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionList; +import org.h2.expression.ValueExpression; +import org.h2.index.IndexCondition; +import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; +import org.h2.value.ValueRow; + +/** + * Null predicate (IS [NOT] NULL). + */ +public final class NullPredicate extends SimplePredicate { + + private boolean optimized; + + public NullPredicate(Expression left, boolean not, boolean whenOperand) { + super(left, not, whenOperand); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + return builder.append(not ? " IS NOT NULL" : " IS NULL"); + } + + @Override + public Expression optimize(SessionLocal session) { + if (optimized) { + return this; + } + Expression o = super.optimize(session); + if (o != this) { + return o; + } + optimized = true; + if (!whenOperand && left instanceof ExpressionList) { + ExpressionList list = (ExpressionList) left; + if (!list.isArray()) { + for (int i = 0, count = list.getSubexpressionCount(); i < count; i++) { + if (list.getSubexpression(i).isNullConstant()) { + if (not) { + return ValueExpression.FALSE; + } + ArrayList newList = new ArrayList<>(count - 1); + for (int j = 0; j < i; j++) { + newList.add(list.getSubexpression(j)); + } + for (int j = i + 1; j < count; j++) { + Expression e = list.getSubexpression(j); + if (!e.isNullConstant()) { + newList.add(e); + } + } + left = newList.size() == 1 ? newList.get(0) // + : new ExpressionList(newList.toArray(new Expression[0]), false); + break; + } + } + } + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + return ValueBoolean.get(getValue(left.getValue(session))); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + return getValue(left); + } + + private boolean getValue(Value left) { + if (left.getType().getValueType() == Value.ROW) { + for (Value v : ((ValueRow) left).getList()) { + if (v != ValueNull.INSTANCE ^ not) { + return false; + } + } + return true; + } + return left == ValueNull.INSTANCE ^ not; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + Expression o = optimize(session); + if (o != this) { + return o.getNotIfPossible(session); + } + switch (left.getType().getValueType()) { + case Value.UNKNOWN: + case Value.ROW: + return null; + } + return new NullPredicate(left, !not, false); + } + + @Override + public void createIndexConditions(SessionLocal session, TableFilter filter) { + if (not || whenOperand || !filter.getTable().isQueryComparable()) { + return; + } + if (left instanceof ExpressionColumn) { + createNullIndexCondition(filter, (ExpressionColumn) left); + } else if (left instanceof ExpressionList) { + ExpressionList list = (ExpressionList) left; + if (!list.isArray()) { + for (int i = 0, count = list.getSubexpressionCount(); i < count; i++) { + Expression e = list.getSubexpression(i); + if (e instanceof ExpressionColumn) { + createNullIndexCondition(filter, (ExpressionColumn) e); + } + } + } + } + } + + private static void createNullIndexCondition(TableFilter filter, ExpressionColumn c) { + /* + * Columns with row value data type aren't valid, but perform such check + * to be sure. + */ + if (filter == c.getTableFilter() && c.getType().getValueType() != Value.ROW) { + filter.addIndexCondition(IndexCondition.get(Comparison.EQUAL_NULL_SAFE, c, ValueExpression.NULL)); + } + } + +} diff --git a/h2/src/main/org/h2/expression/condition/PredicateWithSubquery.java b/h2/src/main/org/h2/expression/condition/PredicateWithSubquery.java new file mode 100644 index 0000000000..8065315a72 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/PredicateWithSubquery.java @@ -0,0 +1,66 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.util.StringUtils; + +/** + * Base class for predicates with a subquery. + */ +abstract class PredicateWithSubquery extends Condition { + + /** + * The subquery. + */ + final Query query; + + PredicateWithSubquery(Query query) { + this.query = query; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + query.mapColumns(resolver, level + 1); + } + + @Override + public Expression optimize(SessionLocal session) { + query.prepare(); + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + query.setEvaluatable(tableFilter, value); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return StringUtils.indent(builder.append('('), query.getPlanSQL(sqlFlags), 4, false).append(')'); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + query.updateAggregate(session, stage); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return query.isEverything(visitor); + } + + @Override + public int getCost() { + return query.getCostAsExpression(); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/SimplePredicate.java b/h2/src/main/org/h2/expression/condition/SimplePredicate.java new file mode 100644 index 0000000000..6a23513a85 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/SimplePredicate.java @@ -0,0 +1,98 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.ValueExpression; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; + +/** + * Base class for simple predicates. + */ +public abstract class SimplePredicate extends Condition { + + /** + * The left hand side of the expression. + */ + Expression left; + + /** + * Whether it is a "not" condition (e.g. "is not null"). + */ + final boolean not; + + /** + * Where this is the when operand of the simple case. + */ + final boolean whenOperand; + + SimplePredicate(Expression left, boolean not, boolean whenOperand) { + this.left = left; + this.not = not; + this.whenOperand = whenOperand; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (!whenOperand && left.isConstant()) { + return ValueExpression.getBoolean(getValue(session)); + } + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + left.setEvaluatable(tableFilter, b); + } + + @Override + public final boolean needParentheses() { + return true; + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + left.updateAggregate(session, stage); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + left.mapColumns(resolver, level, state); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return left.isEverything(visitor); + } + + @Override + public int getCost() { + return left.getCost() + 1; + } + + @Override + public int getSubexpressionCount() { + return 1; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0) { + return left; + } + throw new IndexOutOfBoundsException(); + } + + @Override + public final boolean isWhenConditionOperand() { + return whenOperand; + } + +} diff --git a/h2/src/main/org/h2/expression/condition/TypePredicate.java b/h2/src/main/org/h2/expression/condition/TypePredicate.java new file mode 100644 index 0000000000..74ce12ee23 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/TypePredicate.java @@ -0,0 +1,90 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.Arrays; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * Type predicate (IS [NOT] OF). + */ +public final class TypePredicate extends SimplePredicate { + + private final TypeInfo[] typeList; + private int[] valueTypes; + + public TypePredicate(Expression left, boolean not, boolean whenOperand, TypeInfo[] typeList) { + super(left, not, whenOperand); + this.typeList = typeList; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return getWhenSQL(left.getSQL(builder, sqlFlags, AUTO_PARENTHESES), sqlFlags); + } + + @Override + public StringBuilder getWhenSQL(StringBuilder builder, int sqlFlags) { + builder.append(" IS"); + if (not) { + builder.append(" NOT"); + } + builder.append(" OF ("); + for (int i = 0; i < typeList.length; i++) { + if (i > 0) { + builder.append(", "); + } + typeList[i].getSQL(builder, sqlFlags); + } + return builder.append(')'); + } + + @Override + public Expression optimize(SessionLocal session) { + int count = typeList.length; + valueTypes = new int[count]; + for (int i = 0; i < count; i++) { + valueTypes[i] = typeList[i].getValueType(); + } + Arrays.sort(valueTypes); + return super.optimize(session); + } + + @Override + public Value getValue(SessionLocal session) { + Value l = left.getValue(session); + if (l == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return ValueBoolean.get(Arrays.binarySearch(valueTypes, l.getValueType()) >= 0 ^ not); + } + + @Override + public boolean getWhenValue(SessionLocal session, Value left) { + if (!whenOperand) { + return super.getWhenValue(session, left); + } + if (left == ValueNull.INSTANCE) { + return false; + } + return Arrays.binarySearch(valueTypes, left.getValueType()) >= 0 ^ not; + } + + @Override + public Expression getNotIfPossible(SessionLocal session) { + if (whenOperand) { + return null; + } + return new TypePredicate(left, !not, false, typeList); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/UniquePredicate.java b/h2/src/main/org/h2/expression/condition/UniquePredicate.java new file mode 100644 index 0000000000..745e242fe9 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/UniquePredicate.java @@ -0,0 +1,102 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.condition; + +import java.util.Arrays; + +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.result.LocalResult; +import org.h2.result.ResultTarget; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * Unique predicate as in UNIQUE(SELECT ...) + */ +public class UniquePredicate extends PredicateWithSubquery { + + private static final class Target implements ResultTarget { + + private final int columnCount; + + private final LocalResult result; + + boolean hasDuplicates; + + Target(int columnCount, LocalResult result) { + this.columnCount = columnCount; + this.result = result; + } + + @Override + public void limitsWereApplied() { + // Nothing to do + } + + @Override + public long getRowCount() { + // Not required + return 0L; + } + + @Override + public void addRow(Value... values) { + if (hasDuplicates) { + return; + } + for (int i = 0; i < columnCount; i++) { + if (values[i] == ValueNull.INSTANCE) { + return; + } + } + if (values.length != columnCount) { + values = Arrays.copyOf(values, columnCount); + } + long expected = result.getRowCount() + 1; + result.addRow(values); + if (expected != result.getRowCount()) { + hasDuplicates = true; + result.close(); + } + } + } + + public UniquePredicate(Query query) { + super(query); + } + + @Override + public Expression optimize(SessionLocal session) { + super.optimize(session); + if (query.isStandardDistinct()) { + return ValueExpression.TRUE; + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + query.setSession(session); + int columnCount = query.getColumnCount(); + LocalResult result = new LocalResult(session, + query.getExpressions().toArray(new Expression[0]), columnCount, columnCount); + result.setDistinct(); + Target target = new Target(columnCount, result); + query.query(Integer.MAX_VALUE, target); + result.close(); + return ValueBoolean.get(!target.hasDuplicates); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return super.getUnenclosedSQL(builder.append("UNIQUE"), sqlFlags); + } + +} diff --git a/h2/src/main/org/h2/expression/condition/package.html b/h2/src/main/org/h2/expression/condition/package.html new file mode 100644 index 0000000000..b8c56e2158 --- /dev/null +++ b/h2/src/main/org/h2/expression/condition/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Condition expressions. + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/expression/function/ArrayFunction.java b/h2/src/main/org/h2/expression/function/ArrayFunction.java new file mode 100644 index 0000000000..ff9798d0a4 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/ArrayFunction.java @@ -0,0 +1,176 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.engine.Mode.ModeEnum; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueCollectionBase; +import org.h2.value.ValueNull; + +/** + * An array function. + */ +public final class ArrayFunction extends FunctionN { + + /** + * TRIM_ARRAY(). + */ + public static final int TRIM_ARRAY = 0; + + /** + * ARRAY_CONTAINS() (non-standard). + */ + public static final int ARRAY_CONTAINS = TRIM_ARRAY + 1; + + /** + * ARRAY_SLICE() (non-standard). + */ + public static final int ARRAY_SLICE = ARRAY_CONTAINS + 1; + + private static final String[] NAMES = { // + "TRIM_ARRAY", "ARRAY_CONTAINS", "ARRAY_SLICE" // + }; + + private final int function; + + public ArrayFunction(Expression arg1, Expression arg2, Expression arg3, int function) { + super(arg3 == null ? new Expression[] { arg1, arg2 } : new Expression[] { arg1, arg2, arg3 }); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v1 = args[0].getValue(session), v2 = args[1].getValue(session); + switch (function) { + case TRIM_ARRAY: { + if (v2 == ValueNull.INSTANCE) { + v1 = ValueNull.INSTANCE; + break; + } + int trim = v2.getInt(); + if (trim < 0) { + // This exception should be thrown even when array is null + throw DbException.get(ErrorCode.ARRAY_ELEMENT_ERROR_2, Integer.toString(trim), // + "0..CARDINALITY(array)"); + } + if (v1 == ValueNull.INSTANCE) { + break; + } + final ValueArray array = v1.convertToAnyArray(session); + Value[] elements = array.getList(); + int length = elements.length; + if (trim > length) { + throw DbException.get(ErrorCode.ARRAY_ELEMENT_ERROR_2, Integer.toString(trim), "0.." + length); + } else if (trim == 0) { + v1 = array; + } else { + v1 = ValueArray.get(array.getComponentType(), Arrays.copyOf(elements, length - trim), session); + } + break; + } + case ARRAY_CONTAINS: { + int t = v1.getValueType(); + if (t == Value.ARRAY || t == Value.ROW) { + Value[] list = ((ValueCollectionBase) v1).getList(); + v1 = ValueBoolean.FALSE; + for (Value v : list) { + if (session.areEqual(v, v2)) { + v1 = ValueBoolean.TRUE; + break; + } + } + } else { + v1 = ValueNull.INSTANCE; + } + break; + } + case ARRAY_SLICE: { + Value v3; + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE + || (v3 = args[2].getValue(session)) == ValueNull.INSTANCE) { + v1 = ValueNull.INSTANCE; + break; + } + ValueArray array = v1.convertToAnyArray(session); + // SQL is 1-based + int index1 = v2.getInt() - 1; + // 1-based and inclusive as postgreSQL (-1+1) + int index2 = v3.getInt(); + // https://www.postgresql.org/docs/current/arrays.html#ARRAYS-ACCESSING + // For historical reasons postgreSQL ignore invalid indexes + final boolean isPG = session.getMode().getEnum() == ModeEnum.PostgreSQL; + if (index1 > index2) { + v1 = isPG ? ValueArray.get(array.getComponentType(), Value.EMPTY_VALUES, session) : ValueNull.INSTANCE; + break; + } + if (index1 < 0) { + if (isPG) { + index1 = 0; + } else { + v1 = ValueNull.INSTANCE; + break; + } + } + if (index2 > array.getList().length) { + if (isPG) { + index2 = array.getList().length; + } else { + v1 = ValueNull.INSTANCE; + break; + } + } + v1 = ValueArray.get(array.getComponentType(), Arrays.copyOfRange(array.getList(), index1, index2), // + session); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + switch (function) { + case TRIM_ARRAY: + case ARRAY_SLICE: { + Expression arg = args[0]; + type = arg.getType(); + int t = type.getValueType(); + if (t != Value.ARRAY && t != Value.NULL) { + throw DbException.getInvalidExpressionTypeException(getName() + " array argument", arg); + } + break; + } + case ARRAY_CONTAINS: + type = TypeInfo.TYPE_BOOLEAN; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/BitFunction.java b/h2/src/main/org/h2/expression/function/BitFunction.java new file mode 100644 index 0000000000..7172ff8b66 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/BitFunction.java @@ -0,0 +1,724 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.Arrays; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.aggregate.Aggregate; +import org.h2.expression.aggregate.AggregateType; +import org.h2.message.DbException; +import org.h2.util.Bits; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBinary; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueInteger; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueTinyint; +import org.h2.value.ValueVarbinary; + +/** + * A bitwise function. + */ +public final class BitFunction extends Function1_2 { + + /** + * BITAND() (non-standard). + */ + public static final int BITAND = 0; + + /** + * BITOR() (non-standard). + */ + public static final int BITOR = BITAND + 1; + + /** + * BITXOR() (non-standard). + */ + public static final int BITXOR = BITOR + 1; + + /** + * BITNOT() (non-standard). + */ + public static final int BITNOT = BITXOR + 1; + + /** + * BITNAND() (non-standard). + */ + public static final int BITNAND = BITNOT + 1; + + /** + * BITNOR() (non-standard). + */ + public static final int BITNOR = BITNAND + 1; + + /** + * BITXNOR() (non-standard). + */ + public static final int BITXNOR = BITNOR + 1; + + /** + * BITGET() (non-standard). + */ + public static final int BITGET = BITXNOR + 1; + + /** + * BITCOUNT() (non-standard). + */ + public static final int BITCOUNT = BITGET + 1; + + /** + * LSHIFT() (non-standard). + */ + public static final int LSHIFT = BITCOUNT + 1; + + /** + * RSHIFT() (non-standard). + */ + public static final int RSHIFT = LSHIFT + 1; + + /** + * ULSHIFT() (non-standard). + */ + public static final int ULSHIFT = RSHIFT + 1; + + /** + * URSHIFT() (non-standard). + */ + public static final int URSHIFT = ULSHIFT + 1; + + /** + * ROTATELEFT() (non-standard). + */ + public static final int ROTATELEFT = URSHIFT + 1; + + /** + * ROTATERIGHT() (non-standard). + */ + public static final int ROTATERIGHT = ROTATELEFT + 1; + + private static final String[] NAMES = { // + "BITAND", "BITOR", "BITXOR", "BITNOT", "BITNAND", "BITNOR", "BITXNOR", "BITGET", "BITCOUNT", "LSHIFT", + "RSHIFT", "ULSHIFT", "URSHIFT", "ROTATELEFT", "ROTATERIGHT" // + }; + + private final int function; + + public BitFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + switch (function) { + case BITGET: + return bitGet(v1, v2); + case BITCOUNT: + return bitCount(v1); + case LSHIFT: + return shift(v1, v2.getLong(), false); + case RSHIFT: { + long offset = v2.getLong(); + return shift(v1, offset != Long.MIN_VALUE ? -offset : Long.MAX_VALUE, false); + } + case ULSHIFT: + return shift(v1, v2.getLong(), true); + case URSHIFT: + return shift(v1, -v2.getLong(), true); + case ROTATELEFT: + return rotate(v1, v2.getLong(), false); + case ROTATERIGHT: + return rotate(v1, v2.getLong(), true); + } + return getBitwise(function, type, v1, v2); + } + + private static ValueBoolean bitGet(Value v1, Value v2) { + long offset = v2.getLong(); + boolean b; + if (offset >= 0L) { + switch (v1.getValueType()) { + case Value.BINARY: + case Value.VARBINARY: { + byte[] bytes = v1.getBytesNoCopy(); + int bit = (int) (offset & 0x7); + offset >>>= 3; + b = offset < bytes.length && (bytes[(int) offset] & (1 << bit)) != 0; + break; + } + case Value.TINYINT: + b = offset < 8 && (v1.getByte() & (1 << offset)) != 0; + break; + case Value.SMALLINT: + b = offset < 16 && (v1.getShort() & (1 << offset)) != 0; + break; + case Value.INTEGER: + b = offset < 32 && (v1.getInt() & (1 << offset)) != 0; + break; + case Value.BIGINT: + b = (v1.getLong() & (1L << offset)) != 0; + break; + default: + throw DbException.getInvalidValueException("bit function parameter", v1.getTraceSQL()); + } + } else { + b = false; + } + return ValueBoolean.get(b); + } + + private static ValueBigint bitCount(Value v1) { + long c; + switch (v1.getValueType()) { + case Value.BINARY: + case Value.VARBINARY: { + byte[] bytes = v1.getBytesNoCopy(); + int l = bytes.length; + c = 0L; + int blocks = l >>> 3; + for (int i = 0; i < blocks; i++) { + c += Long.bitCount(Bits.readLong(bytes, i)); + } + for (int i = blocks << 3; i < l; i++) { + c += Integer.bitCount(bytes[i] & 0xff); + } + break; + } + case Value.TINYINT: + c = Integer.bitCount(v1.getByte() & 0xff); + break; + case Value.SMALLINT: + c = Integer.bitCount(v1.getShort() & 0xffff); + break; + case Value.INTEGER: + c = Integer.bitCount(v1.getInt()); + break; + case Value.BIGINT: + c = Long.bitCount(v1.getLong()); + break; + default: + throw DbException.getInvalidValueException("bit function parameter", v1.getTraceSQL()); + } + return ValueBigint.get(c); + } + + private static Value shift(Value v1, long offset, boolean unsigned) { + if (offset == 0L) { + return v1; + } + int vt = v1.getValueType(); + switch (vt) { + case Value.BINARY: + case Value.VARBINARY: { + byte[] bytes = v1.getBytesNoCopy(); + int length = bytes.length; + if (length == 0) { + return v1; + } + byte[] newBytes = new byte[length]; + if (offset > -8L * length && offset < 8L * length) { + if (offset > 0) { + int nBytes = (int) (offset >> 3); + int nBits = ((int) offset) & 0x7; + if (nBits == 0) { + System.arraycopy(bytes, nBytes, newBytes, 0, length - nBytes); + } else { + int nBits2 = 8 - nBits; + int dstIndex = 0, srcIndex = nBytes; + length--; + while (srcIndex < length) { + newBytes[dstIndex++] = (byte) (bytes[srcIndex++] << nBits + | (bytes[srcIndex] & 0xff) >>> nBits2); + } + newBytes[dstIndex] = (byte) (bytes[srcIndex] << nBits); + } + } else { + offset = -offset; + int nBytes = (int) (offset >> 3); + int nBits = ((int) offset) & 0x7; + if (nBits == 0) { + System.arraycopy(bytes, 0, newBytes, nBytes, length - nBytes); + } else { + int nBits2 = 8 - nBits; + int dstIndex = nBytes, srcIndex = 0; + newBytes[dstIndex++] = (byte) ((bytes[srcIndex] & 0xff) >>> nBits); + while (dstIndex < length) { + newBytes[dstIndex++] = (byte) (bytes[srcIndex++] << nBits2 + | (bytes[srcIndex] & 0xff) >>> nBits); + } + } + } + } + return vt == Value.BINARY ? ValueBinary.getNoCopy(newBytes) : ValueVarbinary.getNoCopy(newBytes); + } + case Value.TINYINT: { + byte v; + if (offset < 8) { + v = v1.getByte(); + if (offset > -8) { + if (offset > 0) { + v <<= (int) offset; + } else if (unsigned) { + v = (byte) ((v & 0xFF) >>> (int) -offset); + } else { + v >>= (int) -offset; + } + } else if (unsigned) { + v = 0; + } else { + v >>= 7; + } + } else { + v = 0; + } + return ValueTinyint.get(v); + } + case Value.SMALLINT: { + short v; + if (offset < 16) { + v = v1.getShort(); + if (offset > -16) { + if (offset > 0) { + v <<= (int) offset; + } else if (unsigned) { + v = (short) ((v & 0xFFFF) >>> (int) -offset); + } else { + v >>= (int) -offset; + } + } else if (unsigned) { + v = 0; + } else { + v >>= 15; + } + } else { + v = 0; + } + return ValueSmallint.get(v); + } + case Value.INTEGER: { + int v; + if (offset < 32) { + v = v1.getInt(); + if (offset > -32) { + if (offset > 0) { + v <<= (int) offset; + } else if (unsigned) { + v >>>= (int) -offset; + } else { + v >>= (int) -offset; + } + } else if (unsigned) { + v = 0; + } else { + v >>= 31; + } + } else { + v = 0; + } + return ValueInteger.get(v); + } + case Value.BIGINT: { + long v; + if (offset < 64) { + v = v1.getLong(); + if (offset > -64) { + if (offset > 0) { + v <<= offset; + } else if (unsigned) { + v >>>= -offset; + } else { + v >>= -offset; + } + } else if (unsigned) { + v = 0; + } else { + v >>= 63; + } + } else { + v = 0; + } + return ValueBigint.get(v); + } + default: + throw DbException.getInvalidValueException("bit function parameter", v1.getTraceSQL()); + } + } + + private static Value rotate(Value v1, long offset, boolean right) { + int vt = v1.getValueType(); + switch (vt) { + case Value.BINARY: + case Value.VARBINARY: { + byte[] bytes = v1.getBytesNoCopy(); + int length = bytes.length; + if (length == 0) { + return v1; + } + long bitLength = length << 3L; + offset %= bitLength; + if (right) { + offset = -offset; + } + if (offset == 0L) { + return v1; + } else if (offset < 0) { + offset += bitLength; + } + byte[] newBytes = new byte[length]; + int nBytes = (int) (offset >> 3); + int nBits = ((int) offset) & 0x7; + if (nBits == 0) { + System.arraycopy(bytes, nBytes, newBytes, 0, length - nBytes); + System.arraycopy(bytes, 0, newBytes, length - nBytes, nBytes); + } else { + int nBits2 = 8 - nBits; + for (int dstIndex = 0, srcIndex = nBytes; dstIndex < length;) { + newBytes[dstIndex++] = (byte) (bytes[srcIndex] << nBits + | (bytes[srcIndex = (srcIndex + 1) % length] & 0xFF) >>> nBits2); + } + } + return vt == Value.BINARY ? ValueBinary.getNoCopy(newBytes) : ValueVarbinary.getNoCopy(newBytes); + } + case Value.TINYINT: { + int o = (int) offset; + if (right) { + o = -o; + } + if ((o &= 0x7) == 0) { + return v1; + } + int v = v1.getByte() & 0xFF; + return ValueTinyint.get((byte) ((v << o) | (v >>> 8 - o))); + } + case Value.SMALLINT: { + int o = (int) offset; + if (right) { + o = -o; + } + if ((o &= 0xF) == 0) { + return v1; + } + int v = v1.getShort() & 0xFFFF; + return ValueSmallint.get((short) ((v << o) | (v >>> 16 - o))); + } + case Value.INTEGER: { + int o = (int) offset; + if (right) { + o = -o; + } + if ((o &= 0x1F) == 0) { + return v1; + } + return ValueInteger.get(Integer.rotateLeft(v1.getInt(), o)); + } + case Value.BIGINT: { + int o = (int) offset; + if (right) { + o = -o; + } + if ((o &= 0x3F) == 0) { + return v1; + } + return ValueBigint.get(Long.rotateLeft(v1.getLong(), o)); + } + default: + throw DbException.getInvalidValueException("bit function parameter", v1.getTraceSQL()); + } + } + + /** + * Computes the value of bitwise function. + * + * @param function + * one of {@link #BITAND}, {@link #BITOR}, {@link #BITXOR}, + * {@link #BITNOT}, {@link #BITNAND}, {@link #BITNOR}, + * {@link #BITXNOR} + * @param type + * the type of result + * @param v1 + * the value of first argument + * @param v2 + * the value of second argument, or {@code null} + * @return the resulting value + */ + public static Value getBitwise(int function, TypeInfo type, Value v1, Value v2) { + return type.getValueType() < Value.TINYINT ? getBinaryString(function, type, v1, v2) + : getNumeric(function, type, v1, v2); + } + + private static Value getBinaryString(int function, TypeInfo type, Value v1, Value v2) { + byte[] bytes; + if (function == BITNOT) { + bytes = v1.getBytes(); + for (int i = 0, l = bytes.length; i < l; i++) { + bytes[i] = (byte) ~bytes[i]; + } + } else { + byte[] bytes1 = v1.getBytesNoCopy(), bytes2 = v2.getBytesNoCopy(); + int length1 = bytes1.length, length2 = bytes2.length; + int min, max; + if (length1 <= length2) { + min = length1; + max = length2; + } else { + min = length2; + max = length1; + byte[] t = bytes1; + bytes1 = bytes2; + bytes2 = t; + } + int limit = (int) type.getPrecision(); + if (min > limit) { + max = min = limit; + } else if (max > limit) { + max = limit; + } + bytes = new byte[max]; + int i = 0; + switch (function) { + case BITAND: + for (; i < min; i++) { + bytes[i] = (byte) (bytes1[i] & bytes2[i]); + } + break; + case BITOR: + for (; i < min; i++) { + bytes[i] = (byte) (bytes1[i] | bytes2[i]); + } + System.arraycopy(bytes2, i, bytes, i, max - i); + break; + case BITXOR: + for (; i < min; i++) { + bytes[i] = (byte) (bytes1[i] ^ bytes2[i]); + } + System.arraycopy(bytes2, i, bytes, i, max - i); + break; + case BITNAND: + for (; i < min; i++) { + bytes[i] = (byte) ~(bytes1[i] & bytes2[i]); + } + Arrays.fill(bytes, i, max, (byte) -1); + break; + case BITNOR: + for (; i < min; i++) { + bytes[i] = (byte) ~(bytes1[i] | bytes2[i]); + } + for (; i < max; i++) { + bytes[i] = (byte) ~bytes2[i]; + } + break; + case BITXNOR: + for (; i < min; i++) { + bytes[i] = (byte) ~(bytes1[i] ^ bytes2[i]); + } + for (; i < max; i++) { + bytes[i] = (byte) ~bytes2[i]; + } + break; + default: + throw DbException.getInternalError("function=" + function); + } + } + return type.getValueType() == Value.BINARY ? ValueBinary.getNoCopy(bytes) : ValueVarbinary.getNoCopy(bytes); + } + + private static Value getNumeric(int function, TypeInfo type, Value v1, Value v2) { + long l1 = v1.getLong(); + switch (function) { + case BITAND: + l1 &= v2.getLong(); + break; + case BITOR: + l1 |= v2.getLong(); + break; + case BITXOR: + l1 ^= v2.getLong(); + break; + case BITNOT: + l1 = ~l1; + break; + case BITNAND: + l1 = ~(l1 & v2.getLong()); + break; + case BITNOR: + l1 = ~(l1 | v2.getLong()); + break; + case BITXNOR: + l1 = ~(l1 ^ v2.getLong()); + break; + default: + throw DbException.getInternalError("function=" + function); + } + switch (type.getValueType()) { + case Value.TINYINT: + return ValueTinyint.get((byte) l1); + case Value.SMALLINT: + return ValueSmallint.get((short) l1); + case Value.INTEGER: + return ValueInteger.get((int) l1); + case Value.BIGINT: + return ValueBigint.get(l1); + default: + throw DbException.getInternalError(); + } + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case BITNOT: + return optimizeNot(session); + case BITGET: + type = TypeInfo.TYPE_BOOLEAN; + break; + case BITCOUNT: + type = TypeInfo.TYPE_BIGINT; + break; + case LSHIFT: + case RSHIFT: + case ULSHIFT: + case URSHIFT: + case ROTATELEFT: + case ROTATERIGHT: + type = checkArgType(left); + break; + default: + type = getCommonType(left, right); + break; + } + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + private Expression optimizeNot(SessionLocal session) { + type = checkArgType(left); + if (left.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } else if (left instanceof BitFunction) { + BitFunction l = (BitFunction) left; + int f = l.function; + switch (f) { + case BITAND: + case BITOR: + case BITXOR: + f += BITNAND - BITAND; + break; + case BITNOT: + return l.left; + case BITNAND: + case BITNOR: + case BITXNOR: + f -= BITNAND - BITAND; + break; + default: + return this; + } + return new BitFunction(l.left, l.right, f).optimize(session); + } else if (left instanceof Aggregate) { + Aggregate l = (Aggregate) left; + AggregateType t; + switch (l.getAggregateType()) { + case BIT_AND_AGG: + t = AggregateType.BIT_NAND_AGG; + break; + case BIT_OR_AGG: + t = AggregateType.BIT_NOR_AGG; + break; + case BIT_XOR_AGG: + t = AggregateType.BIT_XNOR_AGG; + break; + case BIT_NAND_AGG: + t = AggregateType.BIT_AND_AGG; + break; + case BIT_NOR_AGG: + t = AggregateType.BIT_OR_AGG; + break; + case BIT_XNOR_AGG: + t = AggregateType.BIT_XOR_AGG; + break; + default: + return this; + } + return new Aggregate(t, new Expression[] { l.getSubexpression(0) }, l.getSelect(), l.isDistinct()) + .optimize(session); + } + return this; + } + + private static TypeInfo getCommonType(Expression arg1, Expression arg2) { + TypeInfo t1 = checkArgType(arg1), t2 = checkArgType(arg2); + int vt1 = t1.getValueType(), vt2 = t2.getValueType(); + boolean bs = DataType.isBinaryStringType(vt1); + if (bs != DataType.isBinaryStringType(vt2)) { + throw DbException.getInvalidValueException("bit function parameters", + t2.getSQL(t1.getSQL(new StringBuilder(), TRACE_SQL_FLAGS).append(" vs "), TRACE_SQL_FLAGS) + .toString()); + } + if (bs) { + long precision; + if (vt1 == Value.BINARY) { + precision = t1.getDeclaredPrecision(); + if (vt2 == Value.BINARY) { + precision = Math.max(precision, t2.getDeclaredPrecision()); + } + } else { + if (vt2 == Value.BINARY) { + vt1 = Value.BINARY; + precision = t2.getDeclaredPrecision(); + } else { + long precision1 = t1.getDeclaredPrecision(), precision2 = t2.getDeclaredPrecision(); + precision = precision1 <= 0L || precision2 <= 0L ? -1L : Math.max(precision1, precision2); + } + } + return TypeInfo.getTypeInfo(vt1, precision, 0, null); + } + return TypeInfo.getTypeInfo(Math.max(vt1, vt2)); + } + + /** + * Checks the type of an argument of bitwise function (one of + * {@link #BITAND}, {@link #BITOR}, {@link #BITXOR}, {@link #BITNOT}, + * {@link #BITNAND}, {@link #BITNOR}, {@link #BITXNOR}). + * + * @param arg + * the argument + * @return the type of the specified argument + * @throws DbException + * if argument type is not supported by bitwise functions + */ + public static TypeInfo checkArgType(Expression arg) { + TypeInfo t = arg.getType(); + switch (t.getValueType()) { + case Value.NULL: + case Value.BINARY: + case Value.VARBINARY: + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + return t; + } + throw DbException.getInvalidExpressionTypeException("bit function argument", arg); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/BuiltinFunctions.java b/h2/src/main/org/h2/expression/function/BuiltinFunctions.java new file mode 100644 index 0000000000..efb1187842 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/BuiltinFunctions.java @@ -0,0 +1,136 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.HashSet; + +import org.h2.engine.Database; +import org.h2.mode.ModeFunction; + +/** + * Maintains the list of built-in functions. + */ +public final class BuiltinFunctions { + + private static final HashSet FUNCTIONS; + + static { + String[] names = { // + // MathFunction + "ABS", "MOD", "FLOOR", "CEIL", "ROUND", "ROUNDMAGIC", "SIGN", "TRUNC", "TRUNCATE", + // MathFunction1 + "SIN", "COS", "TAN", "COT", "SINH", "COSH", "TANH", "ASIN", "ACOS", "ATAN", // + "LOG10", "LN", "EXP", "SQRT", "DEGREES", "RADIANS", + // MathFunction2 + "ATAN2", "LOG", "POWER", + // BitFunction + "BITAND", "BITOR", "BITXOR", "BITNOT", "BITNAND", "BITNOR", "BITXNOR", "BITGET", "BITCOUNT", "LSHIFT", + "RSHIFT", "ULSHIFT", "URSHIFT", "ROTATELEFT", "ROTATERIGHT", + // DateTimeFunction + "EXTRACT", "DATE_TRUNC", "DATEADD", "DATEDIFF", // + "TIMESTAMPADD", "TIMESTAMPDIFF", + // DateTimeFormatFunction + "FORMATDATETIME", "PARSEDATETIME", + // DayMonthNameFunction + "DAYNAME", "MONTHNAME", + // CardinalityExpression + "CARDINALITY", "ARRAY_MAX_CARDINALITY", + // StringFunction + "LOCATE", "INSERT", "REPLACE", "LPAD", "RPAD", "TRANSLATE", + // StringFunction1 + "UPPER", "LOWER", "ASCII", "CHAR", "CHR", "STRINGENCODE", "STRINGDECODE", "STRINGTOUTF8", + "UTF8TOSTRING", "HEXTORAW", "RAWTOHEX", "SPACE", "QUOTE_IDENT", + // StringFunction2 + /* LEFT and RIGHT are keywords */ "REPEAT", + // SubstringFunction + "SUBSTRING", + // ToCharFunction + "TO_CHAR", + // LengthFunction + "CHAR_LENGTH", "CHARACTER_LENGTH", "LENGTH", "OCTET_LENGTH", "BIT_LENGTH", + // TrimFunction + "TRIM", + // RegexpFunction + "REGEXP_LIKE", "REGEXP_REPLACE", "REGEXP_SUBSTR", + // XMLFunction + "XMLATTR", "XMLCDATA", "XMLCOMMENT", "XMLNODE", "XMLSTARTDOC", "XMLTEXT", + // ArrayFunction + "TRIM_ARRAY", "ARRAY_CONTAINS", "ARRAY_SLICE", + // CompressFunction + "COMPRESS", "EXPAND", + // SoundexFunction + "SOUNDEX", "DIFFERENCE", + // JsonConstructorFunction + "JSON_OBJECT", "JSON_ARRAY", + // CryptFunction + "ENCRYPT", "DECRYPT", + // CoalesceFunction + "COALESCE", "GREATEST", "LEAST", + // NullIfFunction + "NULLIF", + // ConcatFunction + "CONCAT", "CONCAT_WS", + // HashFunction + "HASH", "ORA_HASH", + // RandFunction + "RAND", "RANDOM", "SECURE_RAND", "RANDOM_UUID", "UUID", + // SessionControlFunction + "ABORT_SESSION", "CANCEL_SESSION", + // SysInfoFunction + "AUTOCOMMIT", "DATABASE_PATH", "H2VERSION", "LOCK_MODE", "LOCK_TIMEOUT", "MEMORY_FREE", "MEMORY_USED", + "READONLY", "SESSION_ID", "TRANSACTION_ID", + // TableInfoFunction + "DISK_SPACE_USED", "ESTIMATED_ENVELOPE", + // FileFunction + "FILE_READ", "FILE_WRITE", + // DataTypeSQLFunction + "DATA_TYPE_SQL", + // DBObjectFunction + "DB_OBJECT_ID", "DB_OBJECT_SQL", + // CSVWriteFunction + "CSVWRITE", + // SetFunction + /* SET is keyword */ + // SignalFunction + "SIGNAL", + // TruncateValueFunction + "TRUNCATE_VALUE", + // CompatibilitySequenceValueFunction + "CURRVAL", "NEXTVAL", + // Constants + "ZERO", "PI", + // ArrayTableFunction + "UNNEST", /* TABLE is a keyword */ "TABLE_DISTINCT", + // CSVReadFunction + "CSVREAD", + // LinkSchemaFunction + "LINK_SCHEMA", + // + }; + HashSet set = new HashSet<>(128); + for (String n : names) { + set.add(n); + } + FUNCTIONS = set; + } + + /** + * Returns whether specified function is a non-keyword built-in function. + * + * @param database + * the database + * @param upperName + * the name of the function in upper case + * @return {@code true} if it is + */ + public static boolean isBuiltinFunction(Database database, String upperName) { + return FUNCTIONS.contains(upperName) || ModeFunction.getFunction(database, upperName) != null; + } + + private BuiltinFunctions() { + } + +} diff --git a/h2/src/main/org/h2/expression/function/CSVWriteFunction.java b/h2/src/main/org/h2/expression/function/CSVWriteFunction.java new file mode 100644 index 0000000000..ce1e379559 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CSVWriteFunction.java @@ -0,0 +1,126 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.sql.Connection; +import java.sql.SQLException; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.tools.Csv; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; + +/** + * A CSVWRITE function. + */ +public final class CSVWriteFunction extends FunctionN { + + public CSVWriteFunction() { + super(new Expression[4]); + } + + @Override + public Value getValue(SessionLocal session) { + session.getUser().checkAdmin(); + Connection conn = session.createConnection(false); + Csv csv = new Csv(); + String options = getValue(session, 2); + String charset = null; + if (options != null && options.indexOf('=') >= 0) { + charset = csv.setOptions(options); + } else { + charset = options; + String fieldSeparatorWrite = getValue(session, 3); + String fieldDelimiter = getValue(session, 4); + String escapeCharacter = getValue(session, 5); + String nullString = getValue(session, 6); + String lineSeparator = getValue(session, 7); + setCsvDelimiterEscape(csv, fieldSeparatorWrite, fieldDelimiter, escapeCharacter); + csv.setNullString(nullString); + if (lineSeparator != null) { + csv.setLineSeparator(lineSeparator); + } + } + try { + return ValueInteger.get(csv.write(conn, args[0].getValue(session).getString(), + args[1].getValue(session).getString(), charset)); + } catch (SQLException e) { + throw DbException.convert(e); + } + } + + private String getValue(SessionLocal session, int index) { + return index < args.length ? args[index].getValue(session).getString() : null; + } + + /** + * Sets delimiter options. + * + * @param csv + * the CSV utility instance + * @param fieldSeparator + * the field separator + * @param fieldDelimiter + * the field delimiter + * @param escapeCharacter + * the escape character + */ + public static void setCsvDelimiterEscape(Csv csv, String fieldSeparator, String fieldDelimiter, + String escapeCharacter) { + if (fieldSeparator != null) { + csv.setFieldSeparatorWrite(fieldSeparator); + if (!fieldSeparator.isEmpty()) { + char fs = fieldSeparator.charAt(0); + csv.setFieldSeparatorRead(fs); + } + } + if (fieldDelimiter != null) { + char fd = fieldDelimiter.isEmpty() ? 0 : fieldDelimiter.charAt(0); + csv.setFieldDelimiter(fd); + } + if (escapeCharacter != null) { + char ec = escapeCharacter.isEmpty() ? 0 : escapeCharacter.charAt(0); + csv.setEscapeCharacter(ec); + } + } + + @Override + public Expression optimize(SessionLocal session) { + optimizeArguments(session, false); + int len = args.length; + if (len < 2 || len > 8) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), "2..8"); + } + type = TypeInfo.TYPE_INTEGER; + return this; + } + + @Override + public String getName() { + return "CSVWRITE"; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!super.isEverything(visitor)) { + return false; + } + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.QUERY_COMPARABLE: + case ExpressionVisitor.READONLY: + return false; + default: + return true; + } + } + +} diff --git a/h2/src/main/org/h2/expression/function/CardinalityExpression.java b/h2/src/main/org/h2/expression/function/CardinalityExpression.java new file mode 100644 index 0000000000..f565a809e6 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CardinalityExpression.java @@ -0,0 +1,78 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.MathUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; + +/** + * Cardinality expression. + */ +public final class CardinalityExpression extends Function1 { + + private final boolean max; + + /** + * Creates new instance of cardinality expression. + * + * @param arg + * argument + * @param max + * {@code false} for {@code CARDINALITY}, {@code true} for + * {@code ARRAY_MAX_CARDINALITY} + */ + public CardinalityExpression(Expression arg, boolean max) { + super(arg); + this.max = max; + } + + @Override + public Value getValue(SessionLocal session) { + int result; + if (max) { + TypeInfo t = arg.getType(); + if (t.getValueType() == Value.ARRAY) { + result = MathUtils.convertLongToInt(t.getPrecision()); + } else { + throw DbException.getInvalidValueException("array", arg.getValue(session).getTraceSQL()); + } + } else { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + if (v.getValueType() != Value.ARRAY) { + throw DbException.getInvalidValueException("array", v.getTraceSQL()); + } + result = ((ValueArray) v).getList().length; + } + return ValueInteger.get(result); + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = TypeInfo.TYPE_INTEGER; + if (arg.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return max ? "ARRAY_MAX_CARDINALITY" : "CARDINALITY"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CastSpecification.java b/h2/src/main/org/h2/expression/function/CastSpecification.java new file mode 100644 index 0000000000..d0a54bfc0e --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CastSpecification.java @@ -0,0 +1,115 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.ValueExpression; +import org.h2.schema.Domain; +import org.h2.table.Column; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A cast specification. + */ +public final class CastSpecification extends Function1 { + + private Domain domain; + + public CastSpecification(Expression arg, Column column) { + super(arg); + type = column.getType(); + domain = column.getDomain(); + } + + public CastSpecification(Expression arg, TypeInfo type) { + super(arg); + this.type = type; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session).castTo(type, session); + if (domain != null) { + domain.checkConstraints(session, v); + } + return v; + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + if (arg.isConstant()) { + Value v = getValue(session); + if (v == ValueNull.INSTANCE || canOptimizeCast(arg.getType().getValueType(), type.getValueType())) { + return TypedValueExpression.get(v, type); + } + } + return this; + } + + @Override + public boolean isConstant() { + return arg instanceof ValueExpression && canOptimizeCast(arg.getType().getValueType(), type.getValueType()); + } + + private static boolean canOptimizeCast(int src, int dst) { + switch (src) { + case Value.TIME: + switch (dst) { + case Value.TIME_TZ: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + return false; + } + break; + case Value.TIME_TZ: + switch (dst) { + case Value.TIME: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + return false; + } + break; + case Value.DATE: + if (dst == Value.TIMESTAMP_TZ) { + return false; + } + break; + case Value.TIMESTAMP: + switch (dst) { + case Value.TIME_TZ: + case Value.TIMESTAMP_TZ: + return false; + } + break; + case Value.TIMESTAMP_TZ: + switch (dst) { + case Value.TIME: + case Value.DATE: + case Value.TIMESTAMP: + return false; + } + } + return true; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append("CAST("); + arg.getUnenclosedSQL(builder, arg instanceof ValueExpression ? sqlFlags | NO_CASTS : sqlFlags).append(" AS "); + return (domain != null ? domain : type).getSQL(builder, sqlFlags).append(')'); + } + + @Override + public String getName() { + return "CAST"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CoalesceFunction.java b/h2/src/main/org/h2/expression/function/CoalesceFunction.java new file mode 100644 index 0000000000..3d5377feb1 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CoalesceFunction.java @@ -0,0 +1,111 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A COALESCE, GREATEST, or LEAST function. + */ +public final class CoalesceFunction extends FunctionN { + + /** + * COALESCE(). + */ + public static final int COALESCE = 0; + + /** + * GREATEST() (non-standard). + */ + public static final int GREATEST = COALESCE + 1; + + /** + * LEAST() (non-standard). + */ + public static final int LEAST = GREATEST + 1; + + private static final String[] NAMES = { // + "COALESCE", "GREATEST", "LEAST" // + }; + + private final int function; + + public CoalesceFunction(int function) { + this(function, new Expression[4]); + } + + public CoalesceFunction(int function, Expression... args) { + super(args); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = ValueNull.INSTANCE; + switch (function) { + case COALESCE: { + for (int i = 0, l = args.length; i < l; i++) { + Value v2 = args[i].getValue(session); + if (v2 != ValueNull.INSTANCE) { + v = v2.convertTo(type, session); + break; + } + } + break; + } + case GREATEST: + case LEAST: { + for (int i = 0, l = args.length; i < l; i++) { + Value v2 = args[i].getValue(session); + if (v2 != ValueNull.INSTANCE) { + v2 = v2.convertTo(type, session); + if (v == ValueNull.INSTANCE) { + v = v2; + } else { + int comp = session.compareTypeSafe(v, v2); + if (function == GREATEST) { + if (comp < 0) { + v = v2; + } + } else if (comp > 0) { + v = v2; + } + } + } + } + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + type = TypeInfo.getHigherType(args); + if (type.getValueType() <= Value.NULL) { + type = TypeInfo.TYPE_VARCHAR; + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CompatibilitySequenceValueFunction.java b/h2/src/main/org/h2/expression/function/CompatibilitySequenceValueFunction.java new file mode 100644 index 0000000000..2d9fd62f69 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CompatibilitySequenceValueFunction.java @@ -0,0 +1,100 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.command.Parser; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.schema.Sequence; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * NEXTVAL() and CURRVAL() compatibility functions. + */ +public final class CompatibilitySequenceValueFunction extends Function1_2 { + + private final boolean current; + + public CompatibilitySequenceValueFunction(Expression left, Expression right, boolean current) { + super(left, right); + this.current = current; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + String schemaName, sequenceName; + if (v2 == null) { + Parser p = new Parser(session); + String sql = v1.getString(); + Expression expr = p.parseExpression(sql); + if (expr instanceof ExpressionColumn) { + ExpressionColumn seq = (ExpressionColumn) expr; + schemaName = seq.getOriginalTableAliasName(); + if (schemaName == null) { + schemaName = session.getCurrentSchemaName(); + sequenceName = sql; + } else { + sequenceName = seq.getColumnName(session, -1); + } + } else { + throw DbException.getSyntaxError(sql, 1); + } + } else { + schemaName = v1.getString(); + sequenceName = v2.getString(); + } + Database database = session.getDatabase(); + Schema s = database.findSchema(schemaName); + if (s == null) { + schemaName = StringUtils.toUpperEnglish(schemaName); + s = database.getSchema(schemaName); + } + Sequence seq = s.findSequence(sequenceName); + if (seq == null) { + sequenceName = StringUtils.toUpperEnglish(sequenceName); + seq = s.getSequence(sequenceName); + } + return (current ? session.getCurrentValueFor(seq) : session.getNextValueFor(seq, null)).convertTo(type); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + type = session.getMode().decimalSequences ? TypeInfo.TYPE_NUMERIC_BIGINT : TypeInfo.TYPE_BIGINT; + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.INDEPENDENT: + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.QUERY_COMPARABLE: + return false; + case ExpressionVisitor.READONLY: + if (!current) { + return false; + } + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return current ? "CURRVAL" : "NEXTVAL"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CompressFunction.java b/h2/src/main/org/h2/expression/function/CompressFunction.java new file mode 100644 index 0000000000..348c87297e --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CompressFunction.java @@ -0,0 +1,77 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.tools.CompressTool; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueVarbinary; + +/** + * A COMPRESS or EXPAND function. + */ +public final class CompressFunction extends Function1_2 { + + /** + * COMPRESS() (non-standard). + */ + public static final int COMPRESS = 0; + + /** + * EXPAND() (non-standard). + */ + public static final int EXPAND = COMPRESS + 1; + + private static final String[] NAMES = { // + "COMPRESS", "EXPAND" // + }; + + private final int function; + + public CompressFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + switch (function) { + case COMPRESS: + v1 = ValueVarbinary.getNoCopy( + CompressTool.getInstance().compress(v1.getBytesNoCopy(), v2 != null ? v2.getString() : null)); + break; + case EXPAND: + v1 = ValueVarbinary.getNoCopy(CompressTool.getInstance().expand(v1.getBytesNoCopy())); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + type = TypeInfo.TYPE_VARBINARY; + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/ConcatFunction.java b/h2/src/main/org/h2/expression/function/ConcatFunction.java new file mode 100644 index 0000000000..14f5646c97 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/ConcatFunction.java @@ -0,0 +1,118 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * A CONCAT or CONCAT_WS function. + */ +public final class ConcatFunction extends FunctionN { + + /** + * CONCAT() (non-standard). + */ + public static final int CONCAT = 0; + + /** + * CONCAT_WS() (non-standard). + */ + public static final int CONCAT_WS = CONCAT + 1; + + private static final String[] NAMES = { // + "CONCAT", "CONCAT_WS" // + }; + + private final int function; + + public ConcatFunction(int function) { + this(function, new Expression[4]); + } + + public ConcatFunction(int function, Expression... args) { + super(args); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + int i = 0; + String separator = null; + if (function == CONCAT_WS) { + i = 1; + separator = args[0].getValue(session).getString(); + } + StringBuilder builder = new StringBuilder(); + boolean f = false; + for (int l = args.length; i < l; i++) { + Value v = args[i].getValue(session); + if (v != ValueNull.INSTANCE) { + if (separator != null) { + if (f) { + builder.append(separator); + } + f = true; + } + builder.append(v.getString()); + } + } + return ValueVarchar.get(builder.toString(), session); + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + int i = 0; + long extra = 0L; + if (function == CONCAT_WS) { + i = 1; + extra = getPrecision(0); + } + long precision = 0L; + int l = args.length; + boolean f = false; + for (; i < l; i++) { + if (args[i].isNullConstant()) { + continue; + } + precision = DataType.addPrecision(precision, getPrecision(i)); + if (extra != 0L && f) { + precision = DataType.addPrecision(precision, extra); + } + f = true; + } + type = TypeInfo.getTypeInfo(Value.VARCHAR, precision, 0, null); + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + private long getPrecision(int i) { + TypeInfo t = args[i].getType(); + int valueType = t.getValueType(); + if (valueType == Value.NULL) { + return 0L; + } else if (DataType.isCharacterStringType(valueType)) { + return t.getPrecision(); + } else { + return Long.MAX_VALUE; + } + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CryptFunction.java b/h2/src/main/org/h2/expression/function/CryptFunction.java new file mode 100644 index 0000000000..47fbb966b6 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CryptFunction.java @@ -0,0 +1,87 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.security.BlockCipher; +import org.h2.security.CipherFactory; +import org.h2.util.MathUtils; +import org.h2.util.Utils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueVarbinary; + +/** + * An ENCRYPT or DECRYPT function. + */ +public final class CryptFunction extends FunctionN { + + /** + * ENCRYPT() (non-standard). + */ + public static final int ENCRYPT = 0; + + /** + * DECRYPT() (non-standard). + */ + public static final int DECRYPT = ENCRYPT + 1; + + private static final String[] NAMES = { // + "ENCRYPT", "DECRYPT" // + }; + + private final int function; + + public CryptFunction(Expression arg1, Expression arg2, Expression arg3, int function) { + super(new Expression[] { arg1, arg2, arg3 }); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + BlockCipher cipher = CipherFactory.getBlockCipher(v1.getString()); + cipher.setKey(getPaddedArrayCopy(v2.getBytesNoCopy(), cipher.getKeyLength())); + byte[] newData = getPaddedArrayCopy(v3.getBytesNoCopy(), BlockCipher.ALIGN); + switch (function) { + case ENCRYPT: + cipher.encrypt(newData, 0, newData.length); + break; + case DECRYPT: + cipher.decrypt(newData, 0, newData.length); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return ValueVarbinary.getNoCopy(newData); + } + + private static byte[] getPaddedArrayCopy(byte[] data, int blockSize) { + return Utils.copyBytes(data, MathUtils.roundUpInt(data.length, blockSize)); + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + TypeInfo t = args[2].getType(); + type = DataType.isBinaryStringType(t.getValueType()) + ? TypeInfo.getTypeInfo(Value.VARBINARY, t.getPrecision(), 0, null) + : TypeInfo.TYPE_VARBINARY; + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CurrentDateTimeValueFunction.java b/h2/src/main/org/h2/expression/function/CurrentDateTimeValueFunction.java new file mode 100644 index 0000000000..de11882bc9 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CurrentDateTimeValueFunction.java @@ -0,0 +1,112 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Operation0; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimestamp; + +/** + * Current datetime value function. + */ +public final class CurrentDateTimeValueFunction extends Operation0 implements NamedExpression { + + /** + * The function "CURRENT_DATE" + */ + public static final int CURRENT_DATE = 0; + + /** + * The function "CURRENT_TIME" + */ + public static final int CURRENT_TIME = 1; + + /** + * The function "LOCALTIME" + */ + public static final int LOCALTIME = 2; + + /** + * The function "CURRENT_TIMESTAMP" + */ + public static final int CURRENT_TIMESTAMP = 3; + + /** + * The function "LOCALTIMESTAMP" + */ + public static final int LOCALTIMESTAMP = 4; + + private static final int[] TYPES = { Value.DATE, Value.TIME_TZ, Value.TIME, Value.TIMESTAMP_TZ, Value.TIMESTAMP }; + + private static final String[] NAMES = { "CURRENT_DATE", "CURRENT_TIME", "LOCALTIME", "CURRENT_TIMESTAMP", + "LOCALTIMESTAMP" }; + + /** + * Get the name for this function id. + * + * @param function the function id + * @return the name + */ + public static String getName(int function) { + return NAMES[function]; + } + + private final int function, scale; + + private final TypeInfo type; + + public CurrentDateTimeValueFunction(int function, int scale) { + this.function = function; + this.scale = scale; + if (scale < 0) { + scale = function >= CURRENT_TIMESTAMP ? ValueTimestamp.DEFAULT_SCALE : ValueTime.DEFAULT_SCALE; + } + type = TypeInfo.getTypeInfo(TYPES[function], 0L, scale, null); + } + + @Override + public Value getValue(SessionLocal session) { + return session.currentTimestamp().castTo(type, session); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(getName()); + if (scale >= 0) { + builder.append('(').append(scale).append(')'); + } + return builder; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return true; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public int getCost() { + return 1; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/CurrentGeneralValueSpecification.java b/h2/src/main/org/h2/expression/function/CurrentGeneralValueSpecification.java new file mode 100644 index 0000000000..ca76fa7e4c --- /dev/null +++ b/h2/src/main/org/h2/expression/function/CurrentGeneralValueSpecification.java @@ -0,0 +1,147 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Operation0; +import org.h2.message.DbException; +import org.h2.util.HasSQL; +import org.h2.util.ParserUtil; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Simple general value specifications. + */ +public final class CurrentGeneralValueSpecification extends Operation0 implements NamedExpression { + + /** + * The "CURRENT_CATALOG" general value specification. + */ + public static final int CURRENT_CATALOG = 0; + + /** + * The "CURRENT_PATH" general value specification. + */ + public static final int CURRENT_PATH = CURRENT_CATALOG + 1; + + /** + * The function "CURRENT_ROLE" general value specification. + */ + public static final int CURRENT_ROLE = CURRENT_PATH + 1; + + /** + * The function "CURRENT_SCHEMA" general value specification. + */ + public static final int CURRENT_SCHEMA = CURRENT_ROLE + 1; + + /** + * The function "CURRENT_USER" general value specification. + */ + public static final int CURRENT_USER = CURRENT_SCHEMA + 1; + + /** + * The function "SESSION_USER" general value specification. + */ + public static final int SESSION_USER = CURRENT_USER + 1; + + /** + * The function "SYSTEM_USER" general value specification. + */ + public static final int SYSTEM_USER = SESSION_USER + 1; + + private static final String[] NAMES = { "CURRENT_CATALOG", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA", + "CURRENT_USER", "SESSION_USER", "SYSTEM_USER" }; + + private final int specification; + + public CurrentGeneralValueSpecification(int specification) { + this.specification = specification; + } + + @Override + public Value getValue(SessionLocal session) { + String s; + switch (specification) { + case CURRENT_CATALOG: + s = session.getDatabase().getShortName(); + break; + case CURRENT_PATH: { + String[] searchPath = session.getSchemaSearchPath(); + if (searchPath != null) { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < searchPath.length; i++) { + if (i > 0) { + builder.append(','); + } + ParserUtil.quoteIdentifier(builder, searchPath[i], HasSQL.DEFAULT_SQL_FLAGS); + } + s = builder.toString(); + } else { + s = ""; + } + break; + } + case CURRENT_ROLE: { + Database db = session.getDatabase(); + s = db.getPublicRole().getName(); + if (db.getSettings().databaseToLower) { + s = StringUtils.toLowerEnglish(s); + } + break; + } + case CURRENT_SCHEMA: + s = session.getCurrentSchemaName(); + break; + case CURRENT_USER: + case SESSION_USER: + case SYSTEM_USER: + s = session.getUser().getName(); + if (session.getDatabase().getSettings().databaseToLower) { + s = StringUtils.toLowerEnglish(s); + } + break; + default: + throw DbException.getInternalError("specification=" + specification); + } + return s != null ? ValueVarchar.get(s, session) : ValueNull.INSTANCE; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return builder.append(getName()); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return true; + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_VARCHAR; + } + + @Override + public int getCost() { + return 1; + } + + @Override + public String getName() { + return NAMES[specification]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/DBObjectFunction.java b/h2/src/main/org/h2/expression/function/DBObjectFunction.java new file mode 100644 index 0000000000..55441dc51e --- /dev/null +++ b/h2/src/main/org/h2/expression/function/DBObjectFunction.java @@ -0,0 +1,144 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.Database; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * DB_OBJECT_ID() and DB_OBJECT_SQL() functions. + */ +public final class DBObjectFunction extends FunctionN { + + /** + * DB_OBJECT_ID() (non-standard). + */ + public static final int DB_OBJECT_ID = 0; + + /** + * DB_OBJECT_SQL() (non-standard). + */ + public static final int DB_OBJECT_SQL = DB_OBJECT_ID + 1; + + private static final String[] NAMES = { // + "DB_OBJECT_ID", "DB_OBJECT_SQL" // + }; + + private final int function; + + public DBObjectFunction(Expression objectType, Expression arg1, Expression arg2, int function) { + super(arg2 == null ? new Expression[] { objectType, arg1, } : new Expression[] { objectType, arg1, arg2 }); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + session.getUser().checkAdmin(); + String objectType = v1.getString(); + DbObject object; + if (v3 != null) { + Schema schema = session.getDatabase().findSchema(v2.getString()); + if (schema == null) { + return ValueNull.INSTANCE; + } + String objectName = v3.getString(); + switch (objectType) { + case "CONSTANT": + object = schema.findConstant(objectName); + break; + case "CONSTRAINT": + object = schema.findConstraint(session, objectName); + break; + case "DOMAIN": + object = schema.findDomain(objectName); + break; + case "INDEX": + object = schema.findIndex(session, objectName); + break; + case "ROUTINE": + object = schema.findFunctionOrAggregate(objectName); + break; + case "SEQUENCE": + object = schema.findSequence(objectName); + break; + case "SYNONYM": + object = schema.getSynonym(objectName); + break; + case "TABLE": + object = schema.findTableOrView(session, objectName); + break; + case "TRIGGER": + object = schema.findTrigger(objectName); + break; + default: + return ValueNull.INSTANCE; + } + } else { + String objectName = v2.getString(); + Database database = session.getDatabase(); + switch (objectType) { + case "ROLE": + object = database.findRole(objectName); + break; + case "SETTING": + object = database.findSetting(objectName); + break; + case "SCHEMA": + object = database.findSchema(objectName); + break; + case "USER": + object = database.findUser(objectName); + break; + default: + return ValueNull.INSTANCE; + } + } + if (object == null) { + return ValueNull.INSTANCE; + } + switch (function) { + case DB_OBJECT_ID: + return ValueInteger.get(object.getId()); + case DB_OBJECT_SQL: + String sql = object.getCreateSQLForMeta(); + return sql != null ? ValueVarchar.get(sql, session) : ValueNull.INSTANCE; + default: + throw DbException.getInternalError("function=" + function); + } + } + + @Override + public Expression optimize(SessionLocal session) { + optimizeArguments(session, false); + type = function == DB_OBJECT_ID ? TypeInfo.TYPE_INTEGER : TypeInfo.TYPE_VARCHAR; + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/DataTypeSQLFunction.java b/h2/src/main/org/h2/expression/function/DataTypeSQLFunction.java new file mode 100644 index 0000000000..39c77512e4 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/DataTypeSQLFunction.java @@ -0,0 +1,157 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.schema.Constant; +import org.h2.schema.Domain; +import org.h2.schema.FunctionAlias; +import org.h2.schema.Schema; +import org.h2.schema.FunctionAlias.JavaMethod; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueToObjectConverter2; +import org.h2.value.ValueVarchar; + +/** + * DATA_TYPE_SQL() function. + */ +public final class DataTypeSQLFunction extends FunctionN { + + public DataTypeSQLFunction(Expression objectSchema, Expression objectName, Expression objectType, + Expression typeIdentifier) { + super(new Expression[] { objectSchema, objectName, objectType, typeIdentifier }); + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + Schema schema = session.getDatabase().findSchema(v1.getString()); + if (schema == null) { + return ValueNull.INSTANCE; + } + String objectName = v2.getString(); + String objectType = v3.getString(); + String typeIdentifier = args[3].getValue(session).getString(); + if (typeIdentifier == null) { + return ValueNull.INSTANCE; + } + TypeInfo t; + switch (objectType) { + case "CONSTANT": { + Constant constant = schema.findConstant(objectName); + if (constant == null || !typeIdentifier.equals("TYPE")) { + return ValueNull.INSTANCE; + } + t = constant.getValue().getType(); + break; + } + case "DOMAIN": { + Domain domain = schema.findDomain(objectName); + if (domain == null || !typeIdentifier.equals("TYPE")) { + return ValueNull.INSTANCE; + } + t = domain.getDataType(); + break; + } + case "ROUTINE": { + int idx = objectName.lastIndexOf('_'); + if (idx < 0) { + return ValueNull.INSTANCE; + } + FunctionAlias function = schema.findFunction(objectName.substring(0, idx)); + if (function == null) { + return ValueNull.INSTANCE; + } + int ordinal; + try { + ordinal = Integer.parseInt(objectName.substring(idx + 1)); + } catch (NumberFormatException e) { + return ValueNull.INSTANCE; + } + JavaMethod[] methods; + try { + methods = function.getJavaMethods(); + } catch (DbException e) { + return ValueNull.INSTANCE; + } + if (ordinal < 1 || ordinal > methods.length) { + return ValueNull.INSTANCE; + } + FunctionAlias.JavaMethod method = methods[ordinal - 1]; + if (typeIdentifier.equals("RESULT")) { + t = method.getDataType(); + } else { + try { + ordinal = Integer.parseInt(typeIdentifier); + } catch (NumberFormatException e) { + return ValueNull.INSTANCE; + } + if (ordinal < 1) { + return ValueNull.INSTANCE; + } + if (!method.hasConnectionParam()) { + ordinal--; + } + Class[] columnList = method.getColumnClasses(); + if (ordinal >= columnList.length) { + return ValueNull.INSTANCE; + } + t = ValueToObjectConverter2.classToType(columnList[ordinal]); + } + break; + } + case "TABLE": { + Table table = schema.findTableOrView(session, objectName); + if (table == null) { + return ValueNull.INSTANCE; + } + int ordinal; + try { + ordinal = Integer.parseInt(typeIdentifier); + } catch (NumberFormatException e) { + return ValueNull.INSTANCE; + } + Column[] columns = table.getColumns(); + if (ordinal < 1 || ordinal > columns.length) { + return ValueNull.INSTANCE; + } + t = columns[ordinal - 1].getType(); + break; + } + default: + return ValueNull.INSTANCE; + } + return ValueVarchar.get(t.getSQL(DEFAULT_SQL_FLAGS)); + } + + @Override + public Expression optimize(SessionLocal session) { + optimizeArguments(session, false); + type = TypeInfo.TYPE_VARCHAR; + return this; + } + + @Override + public String getName() { + return "DATA_TYPE_SQL"; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return true; + } + +} diff --git a/h2/src/main/org/h2/expression/function/DateTimeFormatFunction.java b/h2/src/main/org/h2/expression/function/DateTimeFormatFunction.java new file mode 100644 index 0000000000..e426807e91 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/DateTimeFormatFunction.java @@ -0,0 +1,313 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.time.temporal.TemporalQueries; +import java.util.LinkedHashMap; +import java.util.Locale; +import java.util.Objects; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.JSR310Utils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarchar; + +/** + * A date-time format function. + */ +public final class DateTimeFormatFunction extends FunctionN { + + private static final class CacheKey { + + private final String format; + + private final String locale; + + private final String timeZone; + + CacheKey(String format, String locale, String timeZone) { + this.format = format; + this.locale = locale; + this.timeZone = timeZone; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + format.hashCode(); + result = prime * result + ((locale == null) ? 0 : locale.hashCode()); + result = prime * result + ((timeZone == null) ? 0 : timeZone.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (!(obj instanceof CacheKey)) { + return false; + } + CacheKey other = (CacheKey) obj; + return format.equals(other.format) && Objects.equals(locale, other.locale) + && Objects.equals(timeZone, other.timeZone); + } + + } + + private static final class CacheValue { + + final DateTimeFormatter formatter; + + final ZoneId zoneId; + + CacheValue(DateTimeFormatter formatter, ZoneId zoneId) { + this.formatter = formatter; + this.zoneId = zoneId; + } + + } + + /** + * FORMATDATETIME() (non-standard). + */ + public static final int FORMATDATETIME = 0; + + /** + * PARSEDATETIME() (non-standard). + */ + public static final int PARSEDATETIME = FORMATDATETIME + 1; + + private static final String[] NAMES = { // + "FORMATDATETIME", "PARSEDATETIME" // + }; + + private static final LinkedHashMap CACHE = new LinkedHashMap() { + + private static final long serialVersionUID = 1L; + + @Override + protected boolean removeEldestEntry(java.util.Map.Entry eldest) { + return size() > 100; + } + + }; + + private final int function; + + public DateTimeFormatFunction(int function) { + super(new Expression[4]); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + String format = v2.getString(), locale, tz; + if (v3 != null) { + locale = v3.getString(); + tz = args.length > 3 ? args[3].getValue(session).getString() : null; + } else { + tz = locale = null; + } + switch (function) { + case FORMATDATETIME: + v1 = ValueVarchar.get(formatDateTime(session, v1, format, locale, tz)); + break; + case PARSEDATETIME: + v1 = parseDateTime(session, v1.getString(), format, locale, tz); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + /** + * Formats a date using a format string. + * + * @param session + * the session + * @param date + * the date to format + * @param format + * the format string + * @param locale + * the locale + * @param timeZone + * the time zone + * @return the formatted date + */ + public static String formatDateTime(SessionLocal session, Value date, String format, String locale, + String timeZone) { + CacheValue formatAndZone = getDateFormat(format, locale, timeZone); + ZoneId zoneId = formatAndZone.zoneId; + TemporalAccessor value; + if (date instanceof ValueTimestampTimeZone) { + OffsetDateTime dateTime = JSR310Utils.valueToOffsetDateTime(date, session); + ZoneId zoneToSet; + if (zoneId != null) { + zoneToSet = zoneId; + } else { + ZoneOffset offset = dateTime.getOffset(); + zoneToSet = ZoneId.ofOffset(offset.getTotalSeconds() == 0 ? "UTC" : "GMT", offset); + } + value = dateTime.atZoneSameInstant(zoneToSet); + } else { + LocalDateTime dateTime = JSR310Utils.valueToLocalDateTime(date, session); + value = dateTime.atZone(zoneId != null ? zoneId : ZoneId.of(session.currentTimeZone().getId())); + } + return formatAndZone.formatter.format(value); + } + + /** + * Parses a date using a format string. + * + * @param session + * the session + * @param date + * the date to parse + * @param format + * the parsing format + * @param locale + * the locale + * @param timeZone + * the time zone + * @return the parsed date + */ + public static ValueTimestampTimeZone parseDateTime(SessionLocal session, String date, String format, String locale, + String timeZone) { + CacheValue formatAndZone = getDateFormat(format, locale, timeZone); + try { + ValueTimestampTimeZone result; + TemporalAccessor parsed = formatAndZone.formatter.parse(date); + ZoneId parsedZoneId = parsed.query(TemporalQueries.zoneId()); + if (parsed.isSupported(ChronoField.OFFSET_SECONDS)) { + result = JSR310Utils.offsetDateTimeToValue(OffsetDateTime.from(parsed)); + } else { + if (parsed.isSupported(ChronoField.INSTANT_SECONDS)) { + Instant instant = Instant.from(parsed); + if (parsedZoneId == null) { + parsedZoneId = formatAndZone.zoneId; + } + if (parsedZoneId != null) { + result = JSR310Utils.zonedDateTimeToValue(instant.atZone(parsedZoneId)); + } else { + result = JSR310Utils.offsetDateTimeToValue(instant.atOffset(ZoneOffset.ofTotalSeconds( // + session.currentTimeZone().getTimeZoneOffsetUTC(instant.getEpochSecond())))); + } + } else { + LocalDate localDate = parsed.query(TemporalQueries.localDate()); + LocalTime localTime = parsed.query(TemporalQueries.localTime()); + if (parsedZoneId == null) { + parsedZoneId = formatAndZone.zoneId; + } + if (localDate != null) { + LocalDateTime localDateTime = localTime != null ? LocalDateTime.of(localDate, localTime) + : localDate.atStartOfDay(); + result = parsedZoneId != null + ? JSR310Utils.zonedDateTimeToValue(localDateTime.atZone(parsedZoneId)) + : (ValueTimestampTimeZone) JSR310Utils.localDateTimeToValue(localDateTime) + .convertTo(Value.TIMESTAMP_TZ, session); + } else { + result = parsedZoneId != null + ? JSR310Utils.zonedDateTimeToValue( + JSR310Utils.valueToInstant(session.currentTimestamp(), session) + .atZone(parsedZoneId).with(localTime)) + : (ValueTimestampTimeZone) ValueTime.fromNanos(localTime.toNanoOfDay()) + .convertTo(Value.TIMESTAMP_TZ, session); + } + } + } + return result; + } catch (RuntimeException e) { + throw DbException.get(ErrorCode.PARSE_ERROR_1, e, date); + } + } + + private static CacheValue getDateFormat(String format, String locale, String timeZone) { + Exception ex = null; + if (format.length() <= 100) { + try { + CacheValue value; + CacheKey key = new CacheKey(format, locale, timeZone); + synchronized (CACHE) { + value = CACHE.get(key); + if (value == null) { + DateTimeFormatter df; + if (locale == null) { + df = DateTimeFormatter.ofPattern(format); + } else { + df = DateTimeFormatter.ofPattern(format, new Locale(locale)); + } + ZoneId zoneId; + if (timeZone != null) { + zoneId = getZoneId(timeZone); + df.withZone(zoneId); + } else { + zoneId = null; + } + value = new CacheValue(df, zoneId); + CACHE.put(key, value); + } + } + return value; + } catch (Exception e) { + ex = e; + } + } + throw DbException.get(ErrorCode.PARSE_ERROR_1, ex, format + '/' + locale); + } + + private static ZoneId getZoneId(String timeZone) { + try { + return ZoneId.of(timeZone, ZoneId.SHORT_IDS); + } catch (RuntimeException e) { + throw DbException.getInvalidValueException("TIME ZONE", timeZone); + } + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + switch (function) { + case FORMATDATETIME: + type = TypeInfo.TYPE_VARCHAR; + break; + case PARSEDATETIME: + type = TypeInfo.TYPE_TIMESTAMP_TZ; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/DateTimeFunction.java b/h2/src/main/org/h2/expression/function/DateTimeFunction.java new file mode 100644 index 0000000000..9f9c2add21 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/DateTimeFunction.java @@ -0,0 +1,1037 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import static org.h2.util.DateTimeUtils.MILLIS_PER_DAY; +import static org.h2.util.DateTimeUtils.NANOS_PER_DAY; +import static org.h2.util.DateTimeUtils.NANOS_PER_HOUR; +import static org.h2.util.DateTimeUtils.NANOS_PER_MINUTE; +import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.time.temporal.WeekFields; +import java.util.Locale; + +import org.h2.api.IntervalQualifier; +import org.h2.engine.Mode.ModeEnum; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.util.IntervalUtils; +import org.h2.util.MathUtils; +import org.h2.util.StringUtils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueDate; +import org.h2.value.ValueInteger; +import org.h2.value.ValueInterval; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + * A date-time function. + */ +public final class DateTimeFunction extends Function1_2 { + + /** + * EXTRACT(). + */ + public static final int EXTRACT = 0; + + /** + * DATE_TRUNC() (non-standard). + */ + public static final int DATE_TRUNC = EXTRACT + 1; + + /** + * DATEADD() (non-standard). + */ + public static final int DATEADD = DATE_TRUNC + 1; + + /** + * DATEDIFF() (non-standard). + */ + public static final int DATEDIFF = DATEADD + 1; + + private static final String[] NAMES = { // + "EXTRACT", "DATE_TRUNC", "DATEADD", "DATEDIFF" // + }; + + // Standard fields + + /** + * Year. + */ + public static final int YEAR = 0; + + /** + * Month. + */ + public static final int MONTH = YEAR + 1; + + /** + * Day of month. + */ + public static final int DAY = MONTH + 1; + + /** + * Hour. + */ + public static final int HOUR = DAY + 1; + + /** + * Minute. + */ + public static final int MINUTE = HOUR + 1; + + /** + * Second. + */ + public static final int SECOND = MINUTE + 1; + + /** + * Time zone hour. + */ + public static final int TIMEZONE_HOUR = SECOND + 1; + + /** + * Time zone minute. + */ + public static final int TIMEZONE_MINUTE = TIMEZONE_HOUR + 1; + + // Additional fields + + /** + * Time zone second. + */ + public static final int TIMEZONE_SECOND = TIMEZONE_MINUTE + 1; + + /** + * Millennium. + */ + public static final int MILLENNIUM = TIMEZONE_SECOND + 1; + + /** + * Century. + */ + public static final int CENTURY = MILLENNIUM + 1; + + /** + * Decade. + */ + public static final int DECADE = CENTURY + 1; + + /** + * Quarter. + */ + public static final int QUARTER = DECADE + 1; + + /** + * Millisecond. + */ + public static final int MILLISECOND = QUARTER + 1; + + /** + * Microsecond. + */ + public static final int MICROSECOND = MILLISECOND + 1; + + /** + * Nanosecond. + */ + public static final int NANOSECOND = MICROSECOND + 1; + + /** + * Day of year. + */ + public static final int DAY_OF_YEAR = NANOSECOND + 1; + + /** + * ISO day of week. + */ + public static final int ISO_DAY_OF_WEEK = DAY_OF_YEAR + 1; + + /** + * ISO week. + */ + public static final int ISO_WEEK = ISO_DAY_OF_WEEK + 1; + + /** + * ISO week-based year. + */ + public static final int ISO_WEEK_YEAR = ISO_WEEK + 1; + + /** + * Day of week (locale-specific). + */ + public static final int DAY_OF_WEEK = ISO_WEEK_YEAR + 1; + + /** + * Week (locale-specific). + */ + public static final int WEEK = DAY_OF_WEEK + 1; + + /** + * Week-based year (locale-specific). + */ + public static final int WEEK_YEAR = WEEK + 1; + + /** + * Epoch. + */ + public static final int EPOCH = WEEK_YEAR + 1; + + /** + * Day of week (locale-specific) for PostgreSQL compatibility. + */ + public static final int DOW = EPOCH + 1; + + private static final int FIELDS_COUNT = DOW + 1; + + private static final String[] FIELD_NAMES = { // + "YEAR", "MONTH", "DAY", // + "HOUR", "MINUTE", "SECOND", // + "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TIMEZONE_SECOND", // + "MILLENNIUM", "CENTURY", "DECADE", // + "QUARTER", // + "MILLISECOND", "MICROSECOND", "NANOSECOND", // + "DAY_OF_YEAR", // + "ISO_DAY_OF_WEEK", "ISO_WEEK", "ISO_WEEK_YEAR", // + "DAY_OF_WEEK", "WEEK", "WEEK_YEAR", // + "EPOCH", "DOW", // + }; + + private static final BigDecimal BD_SECONDS_PER_DAY = new BigDecimal(DateTimeUtils.SECONDS_PER_DAY); + + private static final BigInteger BI_SECONDS_PER_DAY = BigInteger.valueOf(DateTimeUtils.SECONDS_PER_DAY); + + private static final BigDecimal BD_NANOS_PER_SECOND = new BigDecimal(NANOS_PER_SECOND); + + /** + * Local definitions of day-of-week, week-of-month, and week-of-year. + */ + private static volatile WeekFields WEEK_FIELDS; + + /** + * Get date-time field for the specified name. + * + * @param name + * the name + * @return the date-time field + * @throws DbException + * on unknown field name + */ + public static int getField(String name) { + switch (StringUtils.toUpperEnglish(name)) { + case "YEAR": + case "YY": + case "YYYY": + case "SQL_TSI_YEAR": + return YEAR; + case "MONTH": + case "M": + case "MM": + case "SQL_TSI_MONTH": + return MONTH; + case "DAY": + case "D": + case "DD": + case "SQL_TSI_DAY": + return DAY; + case "HOUR": + case "HH": + case "SQL_TSI_HOUR": + return HOUR; + case "MINUTE": + case "MI": + case "N": + case "SQL_TSI_MINUTE": + return MINUTE; + case "SECOND": + case "S": + case "SS": + case "SQL_TSI_SECOND": + return SECOND; + case "TIMEZONE_HOUR": + return TIMEZONE_HOUR; + case "TIMEZONE_MINUTE": + return TIMEZONE_MINUTE; + case "TIMEZONE_SECOND": + return TIMEZONE_SECOND; + case "MILLENNIUM": + return MILLENNIUM; + case "CENTURY": + return CENTURY; + case "DECADE": + return DECADE; + case "QUARTER": + return QUARTER; + case "MILLISECOND": + case "MILLISECONDS": + case "MS": + return MILLISECOND; + case "MICROSECOND": + case "MICROSECONDS": + case "MCS": + return MICROSECOND; + case "NANOSECOND": + case "NS": + return NANOSECOND; + case "DAY_OF_YEAR": + case "DAYOFYEAR": + case "DY": + case "DOY": + return DAY_OF_YEAR; + case "ISO_DAY_OF_WEEK": + case "ISODOW": + return ISO_DAY_OF_WEEK; + case "ISO_WEEK": + return ISO_WEEK; + case "ISO_WEEK_YEAR": + case "ISO_YEAR": + case "ISOYEAR": + return ISO_WEEK_YEAR; + case "DAY_OF_WEEK": + case "DAYOFWEEK": + return DAY_OF_WEEK; + case "WEEK": + case "WK": + case "WW": + case "SQL_TSI_WEEK": + return WEEK; + case "WEEK_YEAR": + return WEEK_YEAR; + case "EPOCH": + return EPOCH; + case "DOW": + return DOW; + default: + throw DbException.getInvalidValueException("date-time field", name); + } + } + + /** + * Get the name of the specified date-time field. + * + * @param field + * the date-time field + * @return the name of the specified field + */ + public static String getFieldName(int field) { + if (field < 0 || field >= FIELDS_COUNT) { + throw DbException.getUnsupportedException("datetime field " + field); + } + return FIELD_NAMES[field]; + } + + private final int function, field; + + public DateTimeFunction(int function, int field, Expression arg1, Expression arg2) { + super(arg1, arg2); + this.function = function; + this.field = field; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + switch (function) { + case EXTRACT: + v1 = field == EPOCH ? extractEpoch(session, v1) : ValueInteger.get(extractInteger(session, v1, field)); + break; + case DATE_TRUNC: + v1 = truncateDate(session, field, v1); + break; + case DATEADD: + v1 = dateadd(session, field, v1.getLong(), v2); + break; + case DATEDIFF: + v1 = ValueBigint.get(datediff(session, field, v1, v2)); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + /** + * Get the specified field of a date, however with years normalized to + * positive or negative, and month starting with 1. + * + * @param session + * the session + * @param date + * the date value + * @param field + * the field type + * @return the value + */ + private static int extractInteger(SessionLocal session, Value date, int field) { + return date instanceof ValueInterval ? extractInterval(date, field) : extractDateTime(session, date, field); + } + + private static int extractInterval(Value date, int field) { + ValueInterval interval = (ValueInterval) date; + IntervalQualifier qualifier = interval.getQualifier(); + boolean negative = interval.isNegative(); + long leading = interval.getLeading(), remaining = interval.getRemaining(); + long v; + switch (field) { + case YEAR: + v = IntervalUtils.yearsFromInterval(qualifier, negative, leading, remaining); + break; + case MONTH: + v = IntervalUtils.monthsFromInterval(qualifier, negative, leading, remaining); + break; + case DAY: + case DAY_OF_YEAR: + v = IntervalUtils.daysFromInterval(qualifier, negative, leading, remaining); + break; + case HOUR: + v = IntervalUtils.hoursFromInterval(qualifier, negative, leading, remaining); + break; + case MINUTE: + v = IntervalUtils.minutesFromInterval(qualifier, negative, leading, remaining); + break; + case SECOND: + v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) / NANOS_PER_SECOND; + break; + case MILLISECOND: + v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) / 1_000_000 % 1_000; + break; + case MICROSECOND: + v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) / 1_000 % 1_000_000; + break; + case NANOSECOND: + v = IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining) % NANOS_PER_SECOND; + break; + default: + throw DbException.getUnsupportedException("getDatePart(" + date + ", " + field + ')'); + } + return (int) v; + } + + static int extractDateTime(SessionLocal session, Value date, int field) { + long[] a = DateTimeUtils.dateAndTimeFromValue(date, session); + long dateValue = a[0]; + long timeNanos = a[1]; + switch (field) { + case YEAR: + return DateTimeUtils.yearFromDateValue(dateValue); + case MONTH: + return DateTimeUtils.monthFromDateValue(dateValue); + case DAY: + return DateTimeUtils.dayFromDateValue(dateValue); + case HOUR: + return (int) (timeNanos / NANOS_PER_HOUR % 24); + case MINUTE: + return (int) (timeNanos / NANOS_PER_MINUTE % 60); + case SECOND: + return (int) (timeNanos / NANOS_PER_SECOND % 60); + case MILLISECOND: + return (int) (timeNanos / 1_000_000 % 1_000); + case MICROSECOND: + return (int) (timeNanos / 1_000 % 1_000_000); + case NANOSECOND: + return (int) (timeNanos % NANOS_PER_SECOND); + case MILLENNIUM: + return millennium(DateTimeUtils.yearFromDateValue(dateValue)); + case CENTURY: + return century(DateTimeUtils.yearFromDateValue(dateValue)); + case DECADE: + return decade(DateTimeUtils.yearFromDateValue(dateValue)); + case DAY_OF_YEAR: + return DateTimeUtils.getDayOfYear(dateValue); + case DOW: + if (session.getMode().getEnum() == ModeEnum.PostgreSQL) { + return DateTimeUtils.getSundayDayOfWeek(dateValue) - 1; + } + //$FALL-THROUGH$ + case DAY_OF_WEEK: + return getLocalDayOfWeek(dateValue); + case WEEK: + return getLocalWeekOfYear(dateValue); + case WEEK_YEAR: { + WeekFields wf = getWeekFields(); + return DateTimeUtils.getWeekYear(dateValue, wf.getFirstDayOfWeek().getValue(), + wf.getMinimalDaysInFirstWeek()); + } + case QUARTER: + return (DateTimeUtils.monthFromDateValue(dateValue) - 1) / 3 + 1; + case ISO_WEEK_YEAR: + return DateTimeUtils.getIsoWeekYear(dateValue); + case ISO_WEEK: + return DateTimeUtils.getIsoWeekOfYear(dateValue); + case ISO_DAY_OF_WEEK: + return DateTimeUtils.getIsoDayOfWeek(dateValue); + case TIMEZONE_HOUR: + case TIMEZONE_MINUTE: + case TIMEZONE_SECOND: { + int offsetSeconds; + if (date instanceof ValueTimestampTimeZone) { + offsetSeconds = ((ValueTimestampTimeZone) date).getTimeZoneOffsetSeconds(); + } else if (date instanceof ValueTimeTimeZone) { + offsetSeconds = ((ValueTimeTimeZone) date).getTimeZoneOffsetSeconds(); + } else { + offsetSeconds = session.currentTimeZone().getTimeZoneOffsetLocal(dateValue, timeNanos); + } + if (field == TIMEZONE_HOUR) { + return offsetSeconds / 3_600; + } else if (field == TIMEZONE_MINUTE) { + return offsetSeconds % 3_600 / 60; + } else { + return offsetSeconds % 60; + } + } + default: + throw DbException.getUnsupportedException("EXTRACT(" + getFieldName(field) + " FROM " + date + ')'); + } + } + + /** + * Truncate the given date-time value to the specified field. + * + * @param session + * the session + * @param field + * the date-time field + * @param value + * the date-time value + * @return date the truncated value + */ + private static Value truncateDate(SessionLocal session, int field, Value value) { + long[] fieldDateAndTime = DateTimeUtils.dateAndTimeFromValue(value, session); + long dateValue = fieldDateAndTime[0]; + long timeNanos = fieldDateAndTime[1]; + switch (field) { + case MICROSECOND: + timeNanos = timeNanos / 1_000L * 1_000L; + break; + case MILLISECOND: + timeNanos = timeNanos / 1_000_000L * 1_000_000L; + break; + case SECOND: + timeNanos = timeNanos / NANOS_PER_SECOND * NANOS_PER_SECOND; + break; + case MINUTE: + timeNanos = timeNanos / NANOS_PER_MINUTE * NANOS_PER_MINUTE; + break; + case HOUR: + timeNanos = timeNanos / NANOS_PER_HOUR * NANOS_PER_HOUR; + break; + case DAY: + timeNanos = 0L; + break; + case ISO_WEEK: + dateValue = truncateToWeek(dateValue, 1); + timeNanos = 0L; + break; + case WEEK: + dateValue = truncateToWeek(dateValue, getWeekFields().getFirstDayOfWeek().getValue()); + timeNanos = 0L; + break; + case ISO_WEEK_YEAR: + dateValue = truncateToWeekYear(dateValue, 1, 4); + timeNanos = 0L; + break; + case WEEK_YEAR: { + WeekFields weekFields = getWeekFields(); + dateValue = truncateToWeekYear(dateValue, weekFields.getFirstDayOfWeek().getValue(), + weekFields.getMinimalDaysInFirstWeek()); + break; + } + case MONTH: + dateValue = dateValue & (-1L << DateTimeUtils.SHIFT_MONTH) | 1L; + timeNanos = 0L; + break; + case QUARTER: + dateValue = DateTimeUtils.dateValue(DateTimeUtils.yearFromDateValue(dateValue), + ((DateTimeUtils.monthFromDateValue(dateValue) - 1) / 3) * 3 + 1, 1); + timeNanos = 0L; + break; + case YEAR: + dateValue = dateValue & (-1L << DateTimeUtils.SHIFT_YEAR) | (1L << DateTimeUtils.SHIFT_MONTH | 1L); + timeNanos = 0L; + break; + case DECADE: { + int year = DateTimeUtils.yearFromDateValue(dateValue); + if (year >= 0) { + year = year / 10 * 10; + } else { + year = (year - 9) / 10 * 10; + } + dateValue = DateTimeUtils.dateValue(year, 1, 1); + timeNanos = 0L; + break; + } + case CENTURY: { + int year = DateTimeUtils.yearFromDateValue(dateValue); + if (year > 0) { + year = (year - 1) / 100 * 100 + 1; + } else { + year = year / 100 * 100 - 99; + } + dateValue = DateTimeUtils.dateValue(year, 1, 1); + timeNanos = 0L; + break; + } + case MILLENNIUM: { + int year = DateTimeUtils.yearFromDateValue(dateValue); + if (year > 0) { + year = (year - 1) / 1000 * 1000 + 1; + } else { + year = year / 1000 * 1000 - 999; + } + dateValue = DateTimeUtils.dateValue(year, 1, 1); + timeNanos = 0L; + break; + } + default: + throw DbException.getUnsupportedException("DATE_TRUNC " + getFieldName(field)); + } + Value result = DateTimeUtils.dateTimeToValue(value, dateValue, timeNanos); + if (session.getMode().getEnum() == ModeEnum.PostgreSQL && result.getValueType() == Value.DATE) { + result = result.convertTo(Value.TIMESTAMP_TZ, session); + } + return result; + } + + private static long truncateToWeek(long dateValue, int firstDayOfWeek) { + long absoluteDay = DateTimeUtils.absoluteDayFromDateValue(dateValue); + int dayOfWeek = DateTimeUtils.getDayOfWeekFromAbsolute(absoluteDay, firstDayOfWeek); + if (dayOfWeek != 1) { + dateValue = DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay - dayOfWeek + 1); + } + return dateValue; + } + + private static long truncateToWeekYear(long dateValue, int firstDayOfWeek, int minimalDaysInFirstWeek) { + long abs = DateTimeUtils.absoluteDayFromDateValue(dateValue); + int year = DateTimeUtils.yearFromDateValue(dateValue); + long base = DateTimeUtils.getWeekYearAbsoluteStart(year, firstDayOfWeek, minimalDaysInFirstWeek); + if (abs < base) { + base = DateTimeUtils.getWeekYearAbsoluteStart(year - 1, firstDayOfWeek, minimalDaysInFirstWeek); + } else if (DateTimeUtils.monthFromDateValue(dateValue) == 12 + && 24 + minimalDaysInFirstWeek < DateTimeUtils.dayFromDateValue(dateValue)) { + long next = DateTimeUtils.getWeekYearAbsoluteStart(year + 1, firstDayOfWeek, minimalDaysInFirstWeek); + if (abs >= next) { + base = next; + } + } + return DateTimeUtils.dateValueFromAbsoluteDay(base); + } + + /** + * DATEADD function. + * + * @param session + * the session + * @param field + * the date-time field + * @param count + * count to add + * @param v + * value to add to + * @return result + */ + public static Value dateadd(SessionLocal session, int field, long count, Value v) { + if (field != MILLISECOND && field != MICROSECOND && field != NANOSECOND + && (count > Integer.MAX_VALUE || count < Integer.MIN_VALUE)) { + throw DbException.getInvalidValueException("DATEADD count", count); + } + long[] a = DateTimeUtils.dateAndTimeFromValue(v, session); + long dateValue = a[0]; + long timeNanos = a[1]; + int type = v.getValueType(); + switch (field) { + case MILLENNIUM: + return addYearsMonths(field, true, count * 1_000, v, type, dateValue, timeNanos); + case CENTURY: + return addYearsMonths(field, true, count * 100, v, type, dateValue, timeNanos); + case DECADE: + return addYearsMonths(field, true, count * 10, v, type, dateValue, timeNanos); + case YEAR: + return addYearsMonths(field, true, count, v, type, dateValue, timeNanos); + case QUARTER: + return addYearsMonths(field, false, count *= 3, v, type, dateValue, timeNanos); + case MONTH: + return addYearsMonths(field, false, count, v, type, dateValue, timeNanos); + case WEEK: + case ISO_WEEK: + count *= 7; + //$FALL-THROUGH$ + case DAY_OF_WEEK: + case DOW: + case ISO_DAY_OF_WEEK: + case DAY: + case DAY_OF_YEAR: + if (type == Value.TIME || type == Value.TIME_TZ) { + throw DbException.getInvalidValueException("DATEADD time part", getFieldName(field)); + } + dateValue = DateTimeUtils + .dateValueFromAbsoluteDay(DateTimeUtils.absoluteDayFromDateValue(dateValue) + count); + return DateTimeUtils.dateTimeToValue(v, dateValue, timeNanos); + case HOUR: + count *= NANOS_PER_HOUR; + break; + case MINUTE: + count *= NANOS_PER_MINUTE; + break; + case SECOND: + case EPOCH: + count *= NANOS_PER_SECOND; + break; + case MILLISECOND: + count *= 1_000_000; + break; + case MICROSECOND: + count *= 1_000; + break; + case NANOSECOND: + break; + case TIMEZONE_HOUR: + return addToTimeZone(field, count * 3_600, v, type, dateValue, timeNanos); + case TIMEZONE_MINUTE: + return addToTimeZone(field, count * 60, v, type, dateValue, timeNanos); + case TIMEZONE_SECOND: + return addToTimeZone(field, count, v, type, dateValue, timeNanos); + default: + throw DbException.getUnsupportedException("DATEADD " + getFieldName(field)); + } + timeNanos += count; + if (timeNanos >= NANOS_PER_DAY || timeNanos < 0) { + long d; + if (timeNanos >= NANOS_PER_DAY) { + d = timeNanos / NANOS_PER_DAY; + } else { + d = (timeNanos - NANOS_PER_DAY + 1) / NANOS_PER_DAY; + } + dateValue = DateTimeUtils.dateValueFromAbsoluteDay(DateTimeUtils.absoluteDayFromDateValue(dateValue) + d); + timeNanos -= d * NANOS_PER_DAY; + } + if (type == Value.DATE) { + return ValueTimestamp.fromDateValueAndNanos(dateValue, timeNanos); + } + return DateTimeUtils.dateTimeToValue(v, dateValue, timeNanos); + } + + private static Value addYearsMonths(int field, boolean years, long count, Value v, int type, long dateValue, + long timeNanos) { + if (type == Value.TIME || type == Value.TIME_TZ) { + throw DbException.getInvalidValueException("DATEADD time part", getFieldName(field)); + } + long year = DateTimeUtils.yearFromDateValue(dateValue); + long month = DateTimeUtils.monthFromDateValue(dateValue); + if (years) { + year += count; + } else { + month += count; + } + return DateTimeUtils.dateTimeToValue(v, + DateTimeUtils.dateValueFromDenormalizedDate(year, month, DateTimeUtils.dayFromDateValue(dateValue)), + timeNanos); + } + + private static Value addToTimeZone(int field, long count, Value v, int type, long dateValue, long timeNanos) { + if (type == Value.TIMESTAMP_TZ) { + return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, timeNanos, + MathUtils.convertLongToInt(count + ((ValueTimestampTimeZone) v).getTimeZoneOffsetSeconds())); + } else if (type == Value.TIME_TZ) { + return ValueTimeTimeZone.fromNanos(timeNanos, + MathUtils.convertLongToInt(count + ((ValueTimeTimeZone) v).getTimeZoneOffsetSeconds())); + } else { + throw DbException.getUnsupportedException("DATEADD " + getFieldName(field)); + } + } + + /** + * Calculate the number of crossed unit boundaries between two timestamps. + * This method is supported for MS SQL Server compatibility. + * + *
    +     * DATEDIFF(YEAR, '2004-12-31', '2005-01-01') = 1
    +     * 
    + * + * @param session + * the session + * @param field + * the date-time field + * @param v1 + * the first date-time value + * @param v2 + * the second date-time value + * @return the number of crossed boundaries + */ + private static long datediff(SessionLocal session, int field, Value v1, Value v2) { + long[] a1 = DateTimeUtils.dateAndTimeFromValue(v1, session); + long dateValue1 = a1[0]; + long absolute1 = DateTimeUtils.absoluteDayFromDateValue(dateValue1); + long[] a2 = DateTimeUtils.dateAndTimeFromValue(v2, session); + long dateValue2 = a2[0]; + long absolute2 = DateTimeUtils.absoluteDayFromDateValue(dateValue2); + switch (field) { + case NANOSECOND: + case MICROSECOND: + case MILLISECOND: + case SECOND: + case EPOCH: + case MINUTE: + case HOUR: + long timeNanos1 = a1[1]; + long timeNanos2 = a2[1]; + switch (field) { + case NANOSECOND: + return (absolute2 - absolute1) * NANOS_PER_DAY + (timeNanos2 - timeNanos1); + case MICROSECOND: + return (absolute2 - absolute1) * (MILLIS_PER_DAY * 1_000) + (timeNanos2 / 1_000 - timeNanos1 / 1_000); + case MILLISECOND: + return (absolute2 - absolute1) * MILLIS_PER_DAY + (timeNanos2 / 1_000_000 - timeNanos1 / 1_000_000); + case SECOND: + case EPOCH: + return (absolute2 - absolute1) * 86_400 + + (timeNanos2 / NANOS_PER_SECOND - timeNanos1 / NANOS_PER_SECOND); + case MINUTE: + return (absolute2 - absolute1) * 1_440 + + (timeNanos2 / NANOS_PER_MINUTE - timeNanos1 / NANOS_PER_MINUTE); + case HOUR: + return (absolute2 - absolute1) * 24 + (timeNanos2 / NANOS_PER_HOUR - timeNanos1 / NANOS_PER_HOUR); + } + // Fake fall-through + // $FALL-THROUGH$ + case DAY: + case DAY_OF_YEAR: + case DAY_OF_WEEK: + case DOW: + case ISO_DAY_OF_WEEK: + return absolute2 - absolute1; + case WEEK: + return weekdiff(absolute1, absolute2, getWeekFields().getFirstDayOfWeek().getValue()); + case ISO_WEEK: + return weekdiff(absolute1, absolute2, 1); + case MONTH: + return (DateTimeUtils.yearFromDateValue(dateValue2) - DateTimeUtils.yearFromDateValue(dateValue1)) * 12 + + DateTimeUtils.monthFromDateValue(dateValue2) - DateTimeUtils.monthFromDateValue(dateValue1); + case QUARTER: + return (DateTimeUtils.yearFromDateValue(dateValue2) - DateTimeUtils.yearFromDateValue(dateValue1)) * 4 + + (DateTimeUtils.monthFromDateValue(dateValue2) - 1) / 3 + - (DateTimeUtils.monthFromDateValue(dateValue1) - 1) / 3; + case MILLENNIUM: + return millennium(DateTimeUtils.yearFromDateValue(dateValue2)) + - millennium(DateTimeUtils.yearFromDateValue(dateValue1)); + case CENTURY: + return century(DateTimeUtils.yearFromDateValue(dateValue2)) + - century(DateTimeUtils.yearFromDateValue(dateValue1)); + case DECADE: + return decade(DateTimeUtils.yearFromDateValue(dateValue2)) + - decade(DateTimeUtils.yearFromDateValue(dateValue1)); + case YEAR: + return DateTimeUtils.yearFromDateValue(dateValue2) - DateTimeUtils.yearFromDateValue(dateValue1); + case TIMEZONE_HOUR: + case TIMEZONE_MINUTE: + case TIMEZONE_SECOND: { + int offsetSeconds1; + if (v1 instanceof ValueTimestampTimeZone) { + offsetSeconds1 = ((ValueTimestampTimeZone) v1).getTimeZoneOffsetSeconds(); + } else if (v1 instanceof ValueTimeTimeZone) { + offsetSeconds1 = ((ValueTimeTimeZone) v1).getTimeZoneOffsetSeconds(); + } else { + offsetSeconds1 = session.currentTimeZone().getTimeZoneOffsetLocal(dateValue1, a1[1]); + } + int offsetSeconds2; + if (v2 instanceof ValueTimestampTimeZone) { + offsetSeconds2 = ((ValueTimestampTimeZone) v2).getTimeZoneOffsetSeconds(); + } else if (v2 instanceof ValueTimeTimeZone) { + offsetSeconds2 = ((ValueTimeTimeZone) v2).getTimeZoneOffsetSeconds(); + } else { + offsetSeconds2 = session.currentTimeZone().getTimeZoneOffsetLocal(dateValue2, a2[1]); + } + if (field == TIMEZONE_HOUR) { + return (offsetSeconds2 / 3_600) - (offsetSeconds1 / 3_600); + } else if (field == TIMEZONE_MINUTE) { + return (offsetSeconds2 / 60) - (offsetSeconds1 / 60); + } else { + return offsetSeconds2 - offsetSeconds1; + } + } + default: + throw DbException.getUnsupportedException("DATEDIFF " + getFieldName(field)); + } + } + + private static long weekdiff(long absolute1, long absolute2, int firstDayOfWeek) { + absolute1 += 4 - firstDayOfWeek; + long r1 = absolute1 / 7; + if (absolute1 < 0 && (r1 * 7 != absolute1)) { + r1--; + } + absolute2 += 4 - firstDayOfWeek; + long r2 = absolute2 / 7; + if (absolute2 < 0 && (r2 * 7 != absolute2)) { + r2--; + } + return r2 - r1; + } + + private static int millennium(int year) { + return year > 0 ? (year + 999) / 1_000 : year / 1_000; + } + + private static int century(int year) { + return year > 0 ? (year + 99) / 100 : year / 100; + } + + private static int decade(int year) { + return year >= 0 ? year / 10 : (year - 9) / 10; + } + + private static int getLocalDayOfWeek(long dateValue) { + return DateTimeUtils.getDayOfWeek(dateValue, getWeekFields().getFirstDayOfWeek().getValue()); + } + + private static int getLocalWeekOfYear(long dateValue) { + WeekFields weekFields = getWeekFields(); + return DateTimeUtils.getWeekOfYear(dateValue, weekFields.getFirstDayOfWeek().getValue(), + weekFields.getMinimalDaysInFirstWeek()); + } + + private static WeekFields getWeekFields() { + WeekFields weekFields = WEEK_FIELDS; + if (weekFields == null) { + WEEK_FIELDS = weekFields = WeekFields.of(Locale.getDefault()); + } + return weekFields; + } + + private static ValueNumeric extractEpoch(SessionLocal session, Value value) { + ValueNumeric result; + if (value instanceof ValueInterval) { + ValueInterval interval = (ValueInterval) value; + if (interval.getQualifier().isYearMonth()) { + interval = (ValueInterval) interval.convertTo(TypeInfo.TYPE_INTERVAL_YEAR_TO_MONTH); + long leading = interval.getLeading(); + long remaining = interval.getRemaining(); + BigInteger bi = BigInteger.valueOf(leading).multiply(BigInteger.valueOf(31557600)) + .add(BigInteger.valueOf(remaining * 2592000)); + if (interval.isNegative()) { + bi = bi.negate(); + } + return ValueNumeric.get(bi); + } else { + return ValueNumeric + .get(new BigDecimal(IntervalUtils.intervalToAbsolute(interval)).divide(BD_NANOS_PER_SECOND)); + } + } + long[] a = DateTimeUtils.dateAndTimeFromValue(value, session); + long dateValue = a[0]; + long timeNanos = a[1]; + if (value instanceof ValueTime) { + result = ValueNumeric.get(BigDecimal.valueOf(timeNanos).divide(BD_NANOS_PER_SECOND)); + } else if (value instanceof ValueDate) { + result = ValueNumeric.get(BigInteger.valueOf(DateTimeUtils.absoluteDayFromDateValue(dateValue)) // + .multiply(BI_SECONDS_PER_DAY)); + } else { + BigDecimal bd = BigDecimal.valueOf(timeNanos).divide(BD_NANOS_PER_SECOND) + .add(BigDecimal.valueOf(DateTimeUtils.absoluteDayFromDateValue(dateValue)) // + .multiply(BD_SECONDS_PER_DAY)); + if (value instanceof ValueTimestampTimeZone) { + result = ValueNumeric.get( + bd.subtract(BigDecimal.valueOf(((ValueTimestampTimeZone) value).getTimeZoneOffsetSeconds()))); + } else if (value instanceof ValueTimeTimeZone) { + result = ValueNumeric + .get(bd.subtract(BigDecimal.valueOf(((ValueTimeTimeZone) value).getTimeZoneOffsetSeconds()))); + } else { + result = ValueNumeric.get(bd); + } + } + return result; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case EXTRACT: + type = field == EPOCH ? TypeInfo.getTypeInfo(Value.NUMERIC, + ValueBigint.DECIMAL_PRECISION + ValueTimestamp.MAXIMUM_SCALE, ValueTimestamp.MAXIMUM_SCALE, null) + : TypeInfo.TYPE_INTEGER; + break; + case DATE_TRUNC: { + type = left.getType(); + int valueType = type.getValueType(); + // TODO set scale when possible + if (!DataType.isDateTimeType(valueType)) { + throw DbException.getInvalidExpressionTypeException("DATE_TRUNC datetime argument", left); + } else if (session.getMode().getEnum() == ModeEnum.PostgreSQL && valueType == Value.DATE) { + type = TypeInfo.TYPE_TIMESTAMP_TZ; + } + break; + } + case DATEADD: { + int valueType = right.getType().getValueType(); + if (valueType == Value.DATE) { + switch (field) { + case HOUR: + case MINUTE: + case SECOND: + case MILLISECOND: + case MICROSECOND: + case NANOSECOND: + case EPOCH: + valueType = Value.TIMESTAMP; + } + } + type = TypeInfo.getTypeInfo(valueType); + break; + } + case DATEDIFF: + type = TypeInfo.TYPE_BIGINT; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(getName()).append('(').append(getFieldName(field)); + switch (function) { + case EXTRACT: + left.getUnenclosedSQL(builder.append(" FROM "), sqlFlags); + break; + case DATE_TRUNC: + left.getUnenclosedSQL(builder.append(", "), sqlFlags); + break; + case DATEADD: + case DATEDIFF: + left.getUnenclosedSQL(builder.append(", "), sqlFlags).append(", "); + right.getUnenclosedSQL(builder, sqlFlags); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return builder.append(')'); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/DayMonthNameFunction.java b/h2/src/main/org/h2/expression/function/DayMonthNameFunction.java new file mode 100644 index 0000000000..a6d521a1e7 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/DayMonthNameFunction.java @@ -0,0 +1,107 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.text.DateFormatSymbols; +import java.util.Locale; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * A DAYNAME() or MONTHNAME() function. + */ +public final class DayMonthNameFunction extends Function1 { + + /** + * DAYNAME() (non-standard). + */ + public static final int DAYNAME = 0; + + /** + * MONTHNAME() (non-standard). + */ + public static final int MONTHNAME = DAYNAME + 1; + + private static final String[] NAMES = { // + "DAYNAME", "MONTHNAME" // + }; + + /** + * English names of months and week days. + */ + private static volatile String[][] MONTHS_AND_WEEKS; + + private final int function; + + public DayMonthNameFunction(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + long dateValue = DateTimeUtils.dateAndTimeFromValue(v, session)[0]; + String result; + switch (function) { + case DAYNAME: + result = getMonthsAndWeeks(1)[DateTimeUtils.getDayOfWeek(dateValue, 0)]; + break; + case MONTHNAME: + result = getMonthsAndWeeks(0)[DateTimeUtils.monthFromDateValue(dateValue) - 1]; + break; + default: + throw DbException.getInternalError("function=" + function); + } + return ValueVarchar.get(result, session); + } + + /** + * Return names of month or weeks. + * + * @param field + * 0 for months, 1 for weekdays + * @return names of month or weeks + */ + private static String[] getMonthsAndWeeks(int field) { + String[][] result = MONTHS_AND_WEEKS; + if (result == null) { + result = new String[2][]; + DateFormatSymbols dfs = DateFormatSymbols.getInstance(Locale.ENGLISH); + result[0] = dfs.getMonths(); + result[1] = dfs.getWeekdays(); + MONTHS_AND_WEEKS = result; + } + return result[field]; + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = TypeInfo.getTypeInfo(Value.VARCHAR, 20, 0, null); + if (arg.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/FileFunction.java b/h2/src/main/org/h2/expression/function/FileFunction.java new file mode 100644 index 0000000000..123582d851 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/FileFunction.java @@ -0,0 +1,145 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.Reader; +import java.nio.file.Files; +import java.nio.file.Paths; + +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.store.fs.FileUtils; +import org.h2.util.IOUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueLob; +import org.h2.value.ValueNull; + +/** + * A FILE_READ or FILE_WRITE function. + */ +public final class FileFunction extends Function1_2 { + + /** + * FILE_READ() (non-standard). + */ + public static final int FILE_READ = 0; + + /** + * FILE_WRITE() (non-standard). + */ + public static final int FILE_WRITE = FILE_READ + 1; + + private static final String[] NAMES = { // + "FILE_READ", "FILE_WRITE" // + }; + + private final int function; + + public FileFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + session.getUser().checkAdmin(); + Value v1 = left.getValue(session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + switch (function) { + case FILE_READ: { + String fileName = v1.getString(); + Database database = session.getDatabase(); + try { + long fileLength = FileUtils.size(fileName); + ValueLob lob; + try (InputStream in = FileUtils.newInputStream(fileName)) { + if (right == null) { + lob = database.getLobStorage().createBlob(in, fileLength); + } else { + Value v2 = right.getValue(session); + Reader reader = v2 == ValueNull.INSTANCE ? new InputStreamReader(in) + : new InputStreamReader(in, v2.getString()); + lob = database.getLobStorage().createClob(reader, fileLength); + } + } + v1 = session.addTemporaryLob(lob); + } catch (IOException e) { + throw DbException.convertIOException(e, fileName); + } + break; + } + case FILE_WRITE: { + Value v2 = right.getValue(session); + if (v2 == ValueNull.INSTANCE) { + v1 = ValueNull.INSTANCE; + } else { + String fileName = v2.getString(); + try (OutputStream fileOutputStream = Files.newOutputStream(Paths.get(fileName)); + InputStream in = v1.getInputStream()) { + v1 = ValueBigint.get(IOUtils.copy(in, fileOutputStream)); + } catch (IOException e) { + throw DbException.convertIOException(e, fileName); + } + } + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case FILE_READ: + type = right == null ? TypeInfo.getTypeInfo(Value.BLOB, Integer.MAX_VALUE, 0, null) + : TypeInfo.getTypeInfo(Value.CLOB, Integer.MAX_VALUE, 0, null); + break; + case FILE_WRITE: + type = TypeInfo.TYPE_BIGINT; + break; + default: + throw DbException.getInternalError("function=" + function); + } + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.QUERY_COMPARABLE: + return false; + case ExpressionVisitor.READONLY: + if (function == FILE_WRITE) { + return false; + } + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/Function0_1.java b/h2/src/main/org/h2/expression/function/Function0_1.java new file mode 100644 index 0000000000..a255c6984b --- /dev/null +++ b/h2/src/main/org/h2/expression/function/Function0_1.java @@ -0,0 +1,96 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; + +/** + * Function with one optional argument. + */ +public abstract class Function0_1 extends Expression implements NamedExpression { + + /** + * The argument of the operation. + */ + protected Expression arg; + + /** + * The type of the result. + */ + protected TypeInfo type; + + protected Function0_1(Expression arg) { + this.arg = arg; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + if (arg != null) { + arg.mapColumns(resolver, level, state); + } + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean value) { + if (arg != null) { + arg.setEvaluatable(tableFilter, value); + } + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + if (arg != null) { + arg.updateAggregate(session, stage); + } + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return arg == null || arg.isEverything(visitor); + } + + @Override + public int getCost() { + int cost = 1; + if (arg != null) { + cost += arg.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return arg != null ? 1 : 0; + } + + @Override + public Expression getSubexpression(int index) { + if (index == 0 && arg != null) { + return arg; + } + throw new IndexOutOfBoundsException(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(getName()).append('('); + if (arg != null) { + arg.getUnenclosedSQL(builder, sqlFlags); + } + return builder.append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/Function1.java b/h2/src/main/org/h2/expression/function/Function1.java new file mode 100644 index 0000000000..190113a876 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/Function1.java @@ -0,0 +1,25 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.expression.Expression; +import org.h2.expression.Operation1; + +/** + * Function with one argument. + */ +public abstract class Function1 extends Operation1 implements NamedExpression { + + protected Function1(Expression arg) { + super(arg); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return arg.getUnenclosedSQL(builder.append(getName()).append('('), sqlFlags).append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/Function1_2.java b/h2/src/main/org/h2/expression/function/Function1_2.java new file mode 100644 index 0000000000..75b0d0ec51 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/Function1_2.java @@ -0,0 +1,66 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.Operation1_2; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Function with two arguments. + */ +public abstract class Function1_2 extends Operation1_2 implements NamedExpression { + + protected Function1_2(Expression left, Expression right) { + super(left, right); + } + + @Override + public Value getValue(SessionLocal session) { + Value v1 = left.getValue(session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + Value v2; + if (right != null) { + v2 = right.getValue(session); + if (v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + } else { + v2 = null; + } + return getValue(session, v1, v2); + } + + /** + * Returns the value of this function. + * + * @param session + * the session + * @param v1 + * the value of first argument + * @param v2 + * the value of second argument, or {@code null} + * @return the resulting value + */ + protected Value getValue(SessionLocal session, Value v1, Value v2) { + throw DbException.getInternalError(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getUnenclosedSQL(builder.append(getName()).append('('), sqlFlags); + if (right != null) { + right.getUnenclosedSQL(builder.append(", "), sqlFlags); + } + return builder.append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/Function2.java b/h2/src/main/org/h2/expression/function/Function2.java new file mode 100644 index 0000000000..cfb340f7b6 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/Function2.java @@ -0,0 +1,58 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.Operation2; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Function with two arguments. + */ +public abstract class Function2 extends Operation2 implements NamedExpression { + + protected Function2(Expression left, Expression right) { + super(left, right); + } + + @Override + public Value getValue(SessionLocal session) { + Value v1 = left.getValue(session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + Value v2 = right.getValue(session); + if (v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + return getValue(session, v1, v2); + } + + /** + * Returns the value of this function. + * + * @param session + * the session + * @param v1 + * the value of first argument + * @param v2 + * the value of second argument + * @return the resulting value + */ + protected Value getValue(SessionLocal session, Value v1, Value v2) { + throw DbException.getInternalError(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + left.getUnenclosedSQL(builder.append(getName()).append('('), sqlFlags).append(", "); + return right.getUnenclosedSQL(builder, sqlFlags).append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/FunctionN.java b/h2/src/main/org/h2/expression/function/FunctionN.java new file mode 100644 index 0000000000..079191a15e --- /dev/null +++ b/h2/src/main/org/h2/expression/function/FunctionN.java @@ -0,0 +1,77 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.OperationN; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Function with many arguments. + */ +public abstract class FunctionN extends OperationN implements NamedExpression { + + protected FunctionN(Expression[] args) { + super(args); + } + + @Override + public Value getValue(SessionLocal session) { + Value v1, v2, v3; + int count = args.length; + if (count >= 1) { + v1 = args[0].getValue(session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + if (count >= 2) { + v2 = args[1].getValue(session); + if (v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + if (count >= 3) { + v3 = args[2].getValue(session); + if (v3 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + } else { + v3 = null; + } + } else { + v3 = v2 = null; + } + } else { + v3 = v2 = v1 = null; + } + return getValue(session, v1, v2, v3); + } + + /** + * Returns the value of this function. + * + * @param session + * the session + * @param v1 + * the value of first argument, or {@code null} + * @param v2 + * the value of second argument, or {@code null} + * @param v3 + * the value of third argument, or {@code null} + * @return the resulting value + */ + protected Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + throw DbException.getInternalError(); + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return writeExpressions(builder.append(getName()).append('('), args, sqlFlags).append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/HashFunction.java b/h2/src/main/org/h2/expression/function/HashFunction.java new file mode 100644 index 0000000000..5ea0057992 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/HashFunction.java @@ -0,0 +1,193 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.security.SHA3; +import org.h2.util.Bits; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarbinary; + +/** + * A HASH or ORA_HASH function. + */ +public final class HashFunction extends FunctionN { + + /** + * HASH() (non-standard). + */ + public static final int HASH = 0; + + /** + * ORA_HASH() (non-standard). + */ + public static final int ORA_HASH = HASH + 1; + + private static final String[] NAMES = { // + "HASH", "ORA_HASH" // + }; + + private final int function; + + public HashFunction(Expression arg, int function) { + super(new Expression[] { arg }); + this.function = function; + } + + public HashFunction(Expression arg1, Expression arg2, Expression arg3, int function) { + super(arg3 == null ? new Expression[] { arg1, arg2 } : new Expression[] { arg1, arg2, arg3 }); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + switch (function) { + case HASH: + v1 = getHash(v1.getString(), v2, v3 == null ? 1 : v3.getInt()); + break; + case ORA_HASH: + v1 = oraHash(v1, v2 == null ? 0xffff_ffffL : v2.getLong(), v3 == null ? 0L : v3.getLong()); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + private static Value getHash(String algorithm, Value value, int iterations) { + if (iterations <= 0) { + throw DbException.getInvalidValueException("iterations", iterations); + } + MessageDigest md; + switch (StringUtils.toUpperEnglish(algorithm)) { + case "MD5": + case "SHA-1": + case "SHA-224": + case "SHA-256": + case "SHA-384": + case "SHA-512": + md = hashImpl(value, algorithm); + break; + case "SHA256": + md = hashImpl(value, "SHA-256"); + break; + case "SHA3-224": + md = hashImpl(value, SHA3.getSha3_224()); + break; + case "SHA3-256": + md = hashImpl(value, SHA3.getSha3_256()); + break; + case "SHA3-384": + md = hashImpl(value, SHA3.getSha3_384()); + break; + case "SHA3-512": + md = hashImpl(value, SHA3.getSha3_512()); + break; + default: + throw DbException.getInvalidValueException("algorithm", algorithm); + } + byte[] b = md.digest(); + for (int i = 1; i < iterations; i++) { + b = md.digest(b); + } + return ValueVarbinary.getNoCopy(b); + } + + private static Value oraHash(Value value, long bucket, long seed) { + if ((bucket & 0xffff_ffff_0000_0000L) != 0L) { + throw DbException.getInvalidValueException("bucket", bucket); + } + if ((seed & 0xffff_ffff_0000_0000L) != 0L) { + throw DbException.getInvalidValueException("seed", seed); + } + MessageDigest md = hashImpl(value, "SHA-1"); + if (md == null) { + return ValueNull.INSTANCE; + } + if (seed != 0L) { + byte[] b = new byte[4]; + Bits.writeInt(b, 0, (int) seed); + md.update(b); + } + long hc = Bits.readLong(md.digest(), 0); + // Strip sign and use modulo operation to get value from 0 to bucket + // inclusive + return ValueBigint.get((hc & Long.MAX_VALUE) % (bucket + 1)); + } + + private static MessageDigest hashImpl(Value value, String algorithm) { + MessageDigest md; + try { + md = MessageDigest.getInstance(algorithm); + } catch (Exception ex) { + throw DbException.convert(ex); + } + return hashImpl(value, md); + } + + private static MessageDigest hashImpl(Value value, MessageDigest md) { + try { + switch (value.getValueType()) { + case Value.VARCHAR: + case Value.CHAR: + case Value.VARCHAR_IGNORECASE: + md.update(value.getString().getBytes(StandardCharsets.UTF_8)); + break; + case Value.BLOB: + case Value.CLOB: { + byte[] buf = new byte[4096]; + try (InputStream is = value.getInputStream()) { + for (int r; (r = is.read(buf)) > 0;) { + md.update(buf, 0, r); + } + } + break; + } + default: + md.update(value.getBytesNoCopy()); + } + return md; + } catch (Exception ex) { + throw DbException.convert(ex); + } + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + switch (function) { + case HASH: + type = TypeInfo.TYPE_VARBINARY; + break; + case ORA_HASH: + type = TypeInfo.TYPE_BIGINT; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/JavaFunction.java b/h2/src/main/org/h2/expression/function/JavaFunction.java new file mode 100644 index 0000000000..afc617cbdd --- /dev/null +++ b/h2/src/main/org/h2/expression/function/JavaFunction.java @@ -0,0 +1,140 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.schema.FunctionAlias; +import org.h2.table.ColumnResolver; +import org.h2.table.TableFilter; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * This class wraps a user-defined function. + */ +public final class JavaFunction extends Expression implements NamedExpression { + + private final FunctionAlias functionAlias; + private final FunctionAlias.JavaMethod javaMethod; + private final Expression[] args; + + public JavaFunction(FunctionAlias functionAlias, Expression[] args) { + this.functionAlias = functionAlias; + this.javaMethod = functionAlias.findJavaMethod(args); + if (javaMethod.getDataType() == null) { + throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, getName()); + } + this.args = args; + } + + @Override + public Value getValue(SessionLocal session) { + return javaMethod.getValue(session, args, false); + } + + @Override + public TypeInfo getType() { + return javaMethod.getDataType(); + } + + @Override + public void mapColumns(ColumnResolver resolver, int level, int state) { + for (Expression e : args) { + e.mapColumns(resolver, level, state); + } + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = functionAlias.isDeterministic(); + for (int i = 0, len = args.length; i < len; i++) { + Expression e = args[i].optimize(session); + args[i] = e; + allConst &= e.isConstant(); + } + if (allConst) { + return ValueExpression.get(getValue(session)); + } + return this; + } + + @Override + public void setEvaluatable(TableFilter tableFilter, boolean b) { + for (Expression e : args) { + if (e != null) { + e.setEvaluatable(tableFilter, b); + } + } + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return writeExpressions(functionAlias.getSQL(builder, sqlFlags).append('('), args, sqlFlags).append(')'); + } + + @Override + public void updateAggregate(SessionLocal session, int stage) { + for (Expression e : args) { + if (e != null) { + e.updateAggregate(session, stage); + } + } + } + + @Override + public String getName() { + return functionAlias.getName(); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.READONLY: + case ExpressionVisitor.QUERY_COMPARABLE: + if (!functionAlias.isDeterministic()) { + return false; + } + // only if all parameters are deterministic as well + break; + case ExpressionVisitor.GET_DEPENDENCIES: + visitor.addDependency(functionAlias); + break; + default: + } + for (Expression e : args) { + if (e != null && !e.isEverything(visitor)) { + return false; + } + } + return true; + } + + @Override + public int getCost() { + int cost = javaMethod.hasConnectionParam() ? 25 : 5; + for (Expression e : args) { + cost += e.getCost(); + } + return cost; + } + + @Override + public int getSubexpressionCount() { + return args.length; + } + + @Override + public Expression getSubexpression(int index) { + return args[index]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/JsonConstructorFunction.java b/h2/src/main/org/h2/expression/function/JsonConstructorFunction.java new file mode 100644 index 0000000000..87ab74037c --- /dev/null +++ b/h2/src/main/org/h2/expression/function/JsonConstructorFunction.java @@ -0,0 +1,171 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.io.ByteArrayOutputStream; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionWithFlags; +import org.h2.expression.Format; +import org.h2.expression.OperationN; +import org.h2.expression.Subquery; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.json.JsonConstructorUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueJson; +import org.h2.value.ValueNull; + +/** + * JSON constructor function. + */ +public final class JsonConstructorFunction extends OperationN implements ExpressionWithFlags, NamedExpression { + + private final boolean array; + + private int flags; + + /** + * Creates a new instance of JSON constructor function. + * + * @param array + * {@code false} for {@code JSON_OBJECT}, {@code true} for + * {@code JSON_ARRAY}. + */ + public JsonConstructorFunction(boolean array) { + super(new Expression[4]); + this.array = array; + } + + @Override + public void setFlags(int flags) { + this.flags = flags; + } + + @Override + public int getFlags() { + return flags; + } + + @Override + public Value getValue(SessionLocal session) { + return array ? jsonArray(session, args) : jsonObject(session, args); + } + + private Value jsonObject(SessionLocal session, Expression[] args) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('{'); + for (int i = 0, l = args.length; i < l;) { + String name = args[i++].getValue(session).getString(); + if (name == null) { + throw DbException.getInvalidValueException("JSON_OBJECT key", "NULL"); + } + Value value = args[i++].getValue(session); + if (value == ValueNull.INSTANCE) { + if ((flags & JsonConstructorUtils.JSON_ABSENT_ON_NULL) != 0) { + continue; + } else { + value = ValueJson.NULL; + } + } + JsonConstructorUtils.jsonObjectAppend(baos, name, value); + } + return JsonConstructorUtils.jsonObjectFinish(baos, flags); + } + + private Value jsonArray(SessionLocal session, Expression[] args) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('['); + int l = args.length; + evaluate: { + if (l == 1) { + Expression arg0 = args[0]; + if (arg0 instanceof Subquery) { + Subquery q = (Subquery) arg0; + for (Value value : q.getAllRows(session)) { + JsonConstructorUtils.jsonArrayAppend(baos, value, flags); + } + break evaluate; + } else if (arg0 instanceof Format) { + Format format = (Format) arg0; + arg0 = format.getSubexpression(0); + if (arg0 instanceof Subquery) { + Subquery q = (Subquery) arg0; + for (Value value : q.getAllRows(session)) { + JsonConstructorUtils.jsonArrayAppend(baos, format.getValue(value), flags); + } + break evaluate; + } + } + } + for (int i = 0; i < l;) { + JsonConstructorUtils.jsonArrayAppend(baos, args[i++].getValue(session), flags); + } + } + baos.write(']'); + return ValueJson.getInternal(baos.toByteArray()); + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + type = TypeInfo.TYPE_JSON; + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(getName()).append('('); + if (array) { + writeExpressions(builder, args, sqlFlags); + } else { + for (int i = 0, l = args.length; i < l;) { + if (i > 0) { + builder.append(", "); + } + args[i++].getUnenclosedSQL(builder, sqlFlags).append(": "); + args[i++].getUnenclosedSQL(builder, sqlFlags); + } + } + return getJsonFunctionFlagsSQL(builder, flags, array).append(')'); + } + + /** + * Appends flags of a JSON function to the specified string builder. + * + * @param builder + * string builder to append to + * @param flags + * flags to append + * @param forArray + * whether the function is an array function + * @return the specified string builder + */ + public static StringBuilder getJsonFunctionFlagsSQL(StringBuilder builder, int flags, boolean forArray) { + if ((flags & JsonConstructorUtils.JSON_ABSENT_ON_NULL) != 0) { + if (!forArray) { + builder.append(" ABSENT ON NULL"); + } + } else if (forArray) { + builder.append(" NULL ON NULL"); + } + if (!forArray && (flags & JsonConstructorUtils.JSON_WITH_UNIQUE_KEYS) != 0) { + builder.append(" WITH UNIQUE KEYS"); + } + return builder; + } + + @Override + public String getName() { + return array ? "JSON_ARRAY" : "JSON_OBJECT"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/LengthFunction.java b/h2/src/main/org/h2/expression/function/LengthFunction.java new file mode 100644 index 0000000000..199837ddbb --- /dev/null +++ b/h2/src/main/org/h2/expression/function/LengthFunction.java @@ -0,0 +1,86 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; + +/** + * CHAR_LENGTH(), or OCTET_LENGTH() function. + */ +public final class LengthFunction extends Function1 { + + /** + * CHAR_LENGTH(). + */ + public static final int CHAR_LENGTH = 0; + + /** + * OCTET_LENGTH(). + */ + public static final int OCTET_LENGTH = CHAR_LENGTH + 1; + + /** + * BIT_LENGTH() (non-standard). + */ + public static final int BIT_LENGTH = OCTET_LENGTH + 1; + + private static final String[] NAMES = { // + "CHAR_LENGTH", "OCTET_LENGTH", "BIT_LENGTH" // + }; + + private final int function; + + public LengthFunction(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + long l; + switch (function) { + case CHAR_LENGTH: + l = v.charLength(); + break; + case OCTET_LENGTH: + l = v.octetLength(); + break; + case BIT_LENGTH: + l = v.octetLength() * 8; + break; + default: + throw DbException.getInternalError("function=" + function); + } + return ValueBigint.get(l); + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = TypeInfo.TYPE_BIGINT; + if (arg.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/MathFunction.java b/h2/src/main/org/h2/expression/function/MathFunction.java new file mode 100644 index 0000000000..cfae2b4a9e --- /dev/null +++ b/h2/src/main/org/h2/expression/function/MathFunction.java @@ -0,0 +1,394 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.math.BigDecimal; +import java.math.RoundingMode; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueDecfloat; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; + +/** + * A math function. + */ +public final class MathFunction extends Function1_2 { + + /** + * ABS(). + */ + public static final int ABS = 0; + + /** + * MOD(). + */ + public static final int MOD = ABS + 1; + + /** + * FLOOR(). + */ + public static final int FLOOR = MOD + 1; + + /** + * CEIL() or CEILING(). + */ + public static final int CEIL = FLOOR + 1; + + /** + * ROUND() (non-standard) + */ + public static final int ROUND = CEIL + 1; + + /** + * ROUNDMAGIC() (non-standard) + */ + public static final int ROUNDMAGIC = ROUND + 1; + + /** + * SIGN() (non-standard) + */ + public static final int SIGN = ROUNDMAGIC + 1; + + /** + * TRUNC() (non-standard) + */ + public static final int TRUNC = SIGN + 1; + + private static final String[] NAMES = { // + "ABS", "MOD", "FLOOR", "CEIL", "ROUND", "ROUNDMAGIC", "SIGN", "TRUNC" // + }; + + private final int function; + + private TypeInfo commonType; + + public MathFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + switch (function) { + case ABS: + if (v1.getSignum() < 0) { + v1 = v1.negate(); + } + break; + case MOD: + v1 = v1.convertTo(commonType, session).modulus(v2.convertTo(commonType, session)).convertTo(type, session); + break; + case FLOOR: + v1 = round(v1, v2, RoundingMode.FLOOR); + break; + case CEIL: + v1 = round(v1, v2, RoundingMode.CEILING); + break; + case ROUND: + v1 = round(v1, v2, RoundingMode.HALF_UP); + break; + case ROUNDMAGIC: + v1 = ValueDouble.get(roundMagic(v1.getDouble())); + break; + case SIGN: + v1 = ValueInteger.get(v1.getSignum()); + break; + case TRUNC: + v1 = round(v1, v2, RoundingMode.DOWN); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @SuppressWarnings("incomplete-switch") + private Value round(Value v1, Value v2, RoundingMode roundingMode) { + int scale = v2 != null ? v2.getInt() : 0; + int t = type.getValueType(); + c: switch (t) { + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: { + if (scale < 0) { + long original = v1.getLong(); + long scaled = BigDecimal.valueOf(original).setScale(scale, roundingMode).longValue(); + if (original != scaled) { + v1 = ValueBigint.get(scaled).convertTo(type); + } + } + break; + } + case Value.NUMERIC: { + int targetScale = type.getScale(); + BigDecimal bd = v1.getBigDecimal(); + if (scale < targetScale) { + bd = bd.setScale(scale, roundingMode); + } + v1 = ValueNumeric.get(bd.setScale(targetScale, roundingMode)); + break; + } + case Value.REAL: + case Value.DOUBLE: { + l: if (scale == 0) { + double d; + switch (roundingMode) { + case DOWN: + d = v1.getDouble(); + d = d < 0 ? Math.ceil(d) : Math.floor(d); + break; + case CEILING: + d = Math.ceil(v1.getDouble()); + break; + case FLOOR: + d = Math.floor(v1.getDouble()); + break; + default: + break l; + } + v1 = t == Value.REAL ? ValueReal.get((float) d) : ValueDouble.get(d); + break c; + } + BigDecimal bd = v1.getBigDecimal().setScale(scale, roundingMode); + v1 = t == Value.REAL ? ValueReal.get(bd.floatValue()) : ValueDouble.get(bd.doubleValue()); + break; + } + case Value.DECFLOAT: + v1 = ValueDecfloat.get(v1.getBigDecimal().setScale(scale, roundingMode)); + } + return v1; + } + + private static double roundMagic(double d) { + if ((d < 0.000_000_000_000_1) && (d > -0.000_000_000_000_1)) { + return 0.0; + } + if ((d > 1_000_000_000_000d) || (d < -1_000_000_000_000d)) { + return d; + } + StringBuilder s = new StringBuilder(); + s.append(d); + if (s.toString().indexOf('E') >= 0) { + return d; + } + int len = s.length(); + if (len < 16) { + return d; + } + if (s.toString().indexOf('.') > len - 3) { + return d; + } + s.delete(len - 2, len); + len -= 2; + char c1 = s.charAt(len - 2); + char c2 = s.charAt(len - 3); + char c3 = s.charAt(len - 4); + if ((c1 == '0') && (c2 == '0') && (c3 == '0')) { + s.setCharAt(len - 1, '0'); + } else if ((c1 == '9') && (c2 == '9') && (c3 == '9')) { + s.setCharAt(len - 1, '9'); + s.append('9'); + s.append('9'); + s.append('9'); + } + return Double.parseDouble(s.toString()); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case ABS: + type = left.getType(); + if (type.getValueType() == Value.NULL) { + type = TypeInfo.TYPE_NUMERIC_FLOATING_POINT; + } + break; + case FLOOR: + case CEIL: { + Expression e = optimizeRound(0, true, false, true); + if (e != null) { + return e; + } + break; + } + case MOD: + TypeInfo divisorType = right.getType(); + commonType = TypeInfo.getHigherType(left.getType(), divisorType); + int valueType = commonType.getValueType(); + if (valueType == Value.NULL) { + commonType = TypeInfo.TYPE_BIGINT; + } else if (!DataType.isNumericType(valueType)) { + throw DbException.getInvalidExpressionTypeException("MOD argument", + DataType.isNumericType(left.getType().getValueType()) ? right : left); + } + type = DataType.isNumericType(divisorType.getValueType()) ? divisorType : commonType; + break; + case ROUND: { + Expression e = optimizeRoundWithScale(session, true); + if (e != null) { + return e; + } + break; + } + case ROUNDMAGIC: + type = TypeInfo.TYPE_DOUBLE; + break; + case SIGN: + type = TypeInfo.TYPE_INTEGER; + break; + case TRUNC: + switch (left.getType().getValueType()) { + case Value.VARCHAR: + left = new CastSpecification(left, TypeInfo.getTypeInfo(Value.TIMESTAMP, -1L, 0, null)) + .optimize(session); + //$FALL-THROUGH$ + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + if (right != null) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, "TRUNC", "1"); + } + return new DateTimeFunction(DateTimeFunction.DATE_TRUNC, DateTimeFunction.DAY, left, null) + .optimize(session); + case Value.DATE: + if (right != null) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, "TRUNC", "1"); + } + return new CastSpecification(left, TypeInfo.getTypeInfo(Value.TIMESTAMP, -1L, 0, null)) + .optimize(session); + default: { + Expression e = optimizeRoundWithScale(session, false); + if (e != null) { + return e; + } + } + } + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + private Expression optimizeRoundWithScale(SessionLocal session, boolean possibleRoundUp) { + int scale; + boolean scaleIsKnown = false, scaleIsNull = false; + if (right != null) { + if (right.isConstant()) { + Value scaleValue = right.getValue(session); + scaleIsKnown = true; + if (scaleValue != ValueNull.INSTANCE) { + scale = scaleValue.getInt(); + } else { + scale = -1; + scaleIsNull = true; + } + } else { + scale = -1; + } + } else { + scale = 0; + scaleIsKnown = true; + } + return optimizeRound(scale, scaleIsKnown, scaleIsNull, possibleRoundUp); + } + + /** + * Optimizes rounding and truncation functions. + * + * @param scale + * the scale, if known + * @param scaleIsKnown + * whether scale is known + * @param scaleIsNull + * whether scale is {@code NULL} + * @param possibleRoundUp + * {@code true} if result of rounding can have larger precision + * than precision of argument, {@code false} otherwise + * @return the optimized expression or {@code null} if this function should + * be used + */ + private Expression optimizeRound(int scale, boolean scaleIsKnown, boolean scaleIsNull, boolean possibleRoundUp) { + TypeInfo leftType = left.getType(); + switch (leftType.getValueType()) { + case Value.NULL: + type = TypeInfo.TYPE_NUMERIC_SCALE_0; + break; + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + if (scaleIsKnown && scale >= 0) { + return left; + } + type = leftType; + break; + case Value.REAL: + case Value.DOUBLE: + case Value.DECFLOAT: + type = leftType; + break; + case Value.NUMERIC: { + long precision; + int originalScale = leftType.getScale(); + if (scaleIsKnown) { + if (originalScale <= scale) { + return left; + } else { + if (scale < 0) { + scale = 0; + } else if (scale > ValueNumeric.MAXIMUM_SCALE) { + scale = ValueNumeric.MAXIMUM_SCALE; + } + precision = leftType.getPrecision() - originalScale + scale; + if (possibleRoundUp) { + precision++; + } + } + } else { + precision = leftType.getPrecision(); + if (possibleRoundUp) { + precision++; + } + scale = originalScale; + } + type = TypeInfo.getTypeInfo(Value.NUMERIC, precision, scale, null); + break; + } + default: + throw DbException.getInvalidExpressionTypeException(getName() + " argument", left); + } + if (scaleIsNull) { + return TypedValueExpression.get(ValueNull.INSTANCE, type); + } + return null; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/MathFunction1.java b/h2/src/main/org/h2/expression/function/MathFunction1.java new file mode 100644 index 0000000000..416b093165 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/MathFunction1.java @@ -0,0 +1,212 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; + +/** + * A math function with one argument and DOUBLE PRECISION result. + */ +public final class MathFunction1 extends Function1 { + + // Trigonometric functions + + /** + * SIN(). + */ + public static final int SIN = 0; + + /** + * COS(). + */ + public static final int COS = SIN + 1; + + /** + * TAN(). + */ + public static final int TAN = COS + 1; + + /** + * COT() (non-standard). + */ + public static final int COT = TAN + 1; + + /** + * SINH(). + */ + public static final int SINH = COT + 1; + + /** + * COSH(). + */ + public static final int COSH = SINH + 1; + + /** + * TANH(). + */ + public static final int TANH = COSH + 1; + + /** + * ASIN(). + */ + public static final int ASIN = TANH + 1; + + /** + * ACOS(). + */ + public static final int ACOS = ASIN + 1; + + /** + * ATAN(). + */ + public static final int ATAN = ACOS + 1; + + // Logarithm functions + + /** + * LOG10(). + */ + public static final int LOG10 = ATAN + 1; + + /** + * LN(). + */ + public static final int LN = LOG10 + 1; + + // Exponential function + + /** + * EXP(). + */ + public static final int EXP = LN + 1; + + // Square root + + /** + * SQRT(). + */ + public static final int SQRT = EXP + 1; + + // Other non-standard + + /** + * DEGREES() (non-standard). + */ + public static final int DEGREES = SQRT + 1; + + /** + * RADIANS() (non-standard). + */ + public static final int RADIANS = DEGREES + 1; + + private static final String[] NAMES = { // + "SIN", "COS", "TAN", "COT", "SINH", "COSH", "TANH", "ASIN", "ACOS", "ATAN", // + "LOG10", "LN", "EXP", "SQRT", "DEGREES", "RADIANS" // + }; + + private final int function; + + public MathFunction1(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + double d = v.getDouble(); + switch (function) { + case SIN: + d = Math.sin(d); + break; + case COS: + d = Math.cos(d); + break; + case TAN: + d = Math.tan(d); + break; + case COT: + d = Math.tan(d); + if (d == 0.0) { + throw DbException.get(ErrorCode.DIVISION_BY_ZERO_1, getTraceSQL()); + } + d = 1d / d; + break; + case SINH: + d = Math.sinh(d); + break; + case COSH: + d = Math.cosh(d); + break; + case TANH: + d = Math.tanh(d); + break; + case ASIN: + d = Math.asin(d); + break; + case ACOS: + d = Math.acos(d); + break; + case ATAN: + d = Math.atan(d); + break; + case LOG10: + if (d <= 0) { + throw DbException.getInvalidValueException("LOG10() argument", d); + } + d = Math.log10(d); + break; + case LN: + if (d <= 0) { + throw DbException.getInvalidValueException("LN() argument", d); + } + d = Math.log(d); + break; + case EXP: + d = Math.exp(d); + break; + case SQRT: + d = Math.sqrt(d); + break; + case DEGREES: + d = Math.toDegrees(d); + break; + case RADIANS: + d = Math.toRadians(d); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return ValueDouble.get(d); + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = TypeInfo.TYPE_DOUBLE; + if (arg.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/MathFunction2.java b/h2/src/main/org/h2/expression/function/MathFunction2.java new file mode 100644 index 0000000000..52dff56652 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/MathFunction2.java @@ -0,0 +1,100 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDouble; + +/** + * A math function with two arguments and DOUBLE PRECISION result. + */ +public final class MathFunction2 extends Function2 { + + /** + * ATAN2() (non-standard). + */ + public static final int ATAN2 = 0; + + /** + * LOG(). + */ + public static final int LOG = ATAN2 + 1; + + /** + * POWER(). + */ + public static final int POWER = LOG + 1; + + private static final String[] NAMES = { // + "ATAN2", "LOG", "POWER" // + }; + + private final int function; + + public MathFunction2(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + double d1 = v1.getDouble(), d2 = v2.getDouble(); + switch (function) { + case ATAN2: + d1 = Math.atan2(d1, d2); + break; + case LOG: { + if (session.getMode().swapLogFunctionParameters) { + double t = d2; + d2 = d1; + d1 = t; + } + if (d2 <= 0) { + throw DbException.getInvalidValueException("LOG() argument", d2); + } + if (d1 <= 0 || d1 == 1) { + throw DbException.getInvalidValueException("LOG() base", d1); + } + if (d1 == Math.E) { + d1 = Math.log(d2); + } else if (d1 == 10d) { + d1 = Math.log10(d2); + } else { + d1 = Math.log(d2) / Math.log(d1); + } + break; + } + case POWER: + d1 = Math.pow(d1, d2); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return ValueDouble.get(d1); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + type = TypeInfo.TYPE_DOUBLE; + if (left.isConstant() && right.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/NamedExpression.java b/h2/src/main/org/h2/expression/function/NamedExpression.java new file mode 100644 index 0000000000..021c87ec13 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/NamedExpression.java @@ -0,0 +1,20 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +/** + * A function-like expression with a name. + */ +public interface NamedExpression { + + /** + * Get the name. + * + * @return the name in uppercase + */ + String getName(); + +} diff --git a/h2/src/main/org/h2/expression/function/NullIfFunction.java b/h2/src/main/org/h2/expression/function/NullIfFunction.java new file mode 100644 index 0000000000..b4b32d67be --- /dev/null +++ b/h2/src/main/org/h2/expression/function/NullIfFunction.java @@ -0,0 +1,50 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * A NULLIF function. + */ +public final class NullIfFunction extends Function2 { + + public NullIfFunction(Expression arg1, Expression arg2) { + super(arg1, arg2); + } + + @Override + public Value getValue(SessionLocal session) { + Value v = left.getValue(session); + if (session.compareWithNull(v, right.getValue(session), true) == 0) { + v = ValueNull.INSTANCE; + } + return v; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + type = left.getType(); + TypeInfo.checkComparable(type, right.getType()); + if (left.isConstant() && right.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return "NULLIF"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/RandFunction.java b/h2/src/main/org/h2/expression/function/RandFunction.java new file mode 100644 index 0000000000..9b4c3afd08 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/RandFunction.java @@ -0,0 +1,124 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.Random; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.util.MathUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDouble; +import org.h2.value.ValueNull; +import org.h2.value.ValueUuid; +import org.h2.value.ValueVarbinary; + +/** + * A RAND, SECURE_RAND, or RANDOM_UUID function. + */ +public final class RandFunction extends Function0_1 { + + /** + * RAND() (non-standard). + */ + public static final int RAND = 0; + + /** + * SECURE_RAND() (non-standard). + */ + public static final int SECURE_RAND = RAND + 1; + + /** + * RANDOM_UUID() (non-standard). + */ + public static final int RANDOM_UUID = SECURE_RAND + 1; + + private static final String[] NAMES = { // + "RAND", "SECURE_RAND", "RANDOM_UUID" // + }; + + private final int function; + + public RandFunction(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v; + if (arg != null) { + v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + } else { + v = null; + } + switch (function) { + case RAND: { + Random random = session.getRandom(); + if (v != null) { + random.setSeed(v.getInt()); + } + v = ValueDouble.get(random.nextDouble()); + break; + } + case SECURE_RAND: + v = ValueVarbinary.getNoCopy(MathUtils.secureRandomBytes(v.getInt())); + break; + case RANDOM_UUID: + v = ValueUuid.getNewRandom(); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v; + } + + @Override + public Expression optimize(SessionLocal session) { + if (arg != null) { + arg = arg.optimize(session); + } + switch (function) { + case RAND: + type = TypeInfo.TYPE_DOUBLE; + break; + case SECURE_RAND: { + Value v; + type = arg.isConstant() && (v = arg.getValue(session)) != ValueNull.INSTANCE + ? TypeInfo.getTypeInfo(Value.VARBINARY, Math.max(v.getInt(), 1), 0, null) + : TypeInfo.TYPE_VARBINARY; + break; + } + case RANDOM_UUID: + type = TypeInfo.TYPE_UUID; + break; + default: + throw DbException.getInternalError("function=" + function); + } + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/RegexpFunction.java b/h2/src/main/org/h2/expression/function/RegexpFunction.java new file mode 100644 index 0000000000..a3c1928ab0 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/RegexpFunction.java @@ -0,0 +1,270 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; + +import org.h2.api.ErrorCode; +import org.h2.engine.Mode; +import org.h2.engine.Mode.ModeEnum; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * A regular expression function. + */ +public final class RegexpFunction extends FunctionN { + + /** + * REGEXP_LIKE() (non-standard). + */ + public static final int REGEXP_LIKE = 0; + + /** + * REGEXP_REPLACE() (non-standard). + */ + public static final int REGEXP_REPLACE = REGEXP_LIKE + 1; + + /** + * REGEXP_SUBSTR() (non-standard). + */ + public static final int REGEXP_SUBSTR = REGEXP_REPLACE + 1; + + private static final String[] NAMES = { // + "REGEXP_LIKE", "REGEXP_REPLACE", "REGEXP_SUBSTR" // + }; + + private final int function; + + public RegexpFunction(int function) { + super(new Expression[function == REGEXP_LIKE ? 3 : 6]); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v1 = args[0].getValue(session); + Value v2 = args[1].getValue(session); + int length = args.length; + switch (function) { + case REGEXP_LIKE: { + Value v3 = length >= 3 ? args[2].getValue(session) : null; + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE || v3 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String regexp = v2.getString(); + String regexpMode = v3 != null ? v3.getString() : null; + int flags = makeRegexpFlags(regexpMode, false); + try { + v1 = ValueBoolean.get(Pattern.compile(regexp, flags).matcher(v1.getString()).find()); + } catch (PatternSyntaxException e) { + throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, regexp); + } + break; + } + case REGEXP_REPLACE: { + String input = v1.getString(); + if (session.getMode().getEnum() == ModeEnum.Oracle) { + String replacement = args[2].getValue(session).getString(); + int position = length >= 4 ? args[3].getValue(session).getInt() : 1; + int occurrence = length >= 5 ? args[4].getValue(session).getInt() : 0; + String regexpMode = length >= 6 ? args[5].getValue(session).getString() : null; + if (input == null) { + v1 = ValueNull.INSTANCE; + } else { + String regexp = v2.getString(); + v1 = regexpReplace(session, input, regexp != null ? regexp : "", + replacement != null ? replacement : "", position, occurrence, regexpMode); + } + } else { + if (length > 4) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), "3..4"); + } + Value v3 = args[2].getValue(session); + Value v4 = length == 4 ? args[3].getValue(session) : null; + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE || v3 == ValueNull.INSTANCE + || v4 == ValueNull.INSTANCE) { + v1 = ValueNull.INSTANCE; + } else { + v1 = regexpReplace(session, input, v2.getString(), v3.getString(), 1, 0, + v4 != null ? v4.getString() : null); + } + } + break; + } + case REGEXP_SUBSTR: { + Value v3 = length >= 3 ? args[2].getValue(session) : null; + Value v4 = length >= 4 ? args[3].getValue(session) : null; + Value v5 = length >= 5 ? args[4].getValue(session) : null; + Value v6 = length >= 6 ? args[5].getValue(session) : null; + v1 = regexpSubstr(v1, v2, v3, v4, v5, v6, session); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + private static Value regexpReplace(SessionLocal session, String input, String regexp, String replacement, + int position, int occurrence, String regexpMode) { + Mode mode = session.getMode(); + if (mode.regexpReplaceBackslashReferences) { + if ((replacement.indexOf('\\') >= 0) || (replacement.indexOf('$') >= 0)) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < replacement.length(); i++) { + char c = replacement.charAt(i); + if (c == '$') { + sb.append('\\'); + } else if (c == '\\' && ++i < replacement.length()) { + c = replacement.charAt(i); + sb.append(c >= '0' && c <= '9' ? '$' : '\\'); + } + sb.append(c); + } + replacement = sb.toString(); + } + } + boolean isInPostgreSqlMode = mode.getEnum() == ModeEnum.PostgreSQL; + int flags = makeRegexpFlags(regexpMode, isInPostgreSqlMode); + if (isInPostgreSqlMode && (regexpMode == null || regexpMode.isEmpty() || !regexpMode.contains("g"))) { + occurrence = 1; + } + try { + Matcher matcher = Pattern.compile(regexp, flags).matcher(input).region(position - 1, input.length()); + if (occurrence == 0) { + return ValueVarchar.get(matcher.replaceAll(replacement), session); + } else { + StringBuffer sb = new StringBuffer(); + int index = 1; + while (matcher.find()) { + if (index == occurrence) { + matcher.appendReplacement(sb, replacement); + break; + } + index++; + } + matcher.appendTail(sb); + return ValueVarchar.get(sb.toString(), session); + } + } catch (PatternSyntaxException e) { + throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, regexp); + } catch (StringIndexOutOfBoundsException | IllegalArgumentException e) { + throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, replacement); + } + } + + private static Value regexpSubstr(Value inputString, Value regexpArg, Value positionArg, Value occurrenceArg, + Value regexpModeArg, Value subexpressionArg, SessionLocal session) { + if (inputString == ValueNull.INSTANCE || regexpArg == ValueNull.INSTANCE || positionArg == ValueNull.INSTANCE + || occurrenceArg == ValueNull.INSTANCE || subexpressionArg == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String regexp = regexpArg.getString(); + + int position = positionArg != null ? positionArg.getInt() - 1 : 0; + int requestedOccurrence = occurrenceArg != null ? occurrenceArg.getInt() : 1; + String regexpMode = regexpModeArg != null ? regexpModeArg.getString() : null; + int subexpression = subexpressionArg != null ? subexpressionArg.getInt() : 0; + int flags = makeRegexpFlags(regexpMode, false); + try { + Matcher m = Pattern.compile(regexp, flags).matcher(inputString.getString()); + + boolean found = m.find(position); + for (int occurrence = 1; occurrence < requestedOccurrence && found; occurrence++) { + found = m.find(); + } + + if (!found) { + return ValueNull.INSTANCE; + } else { + return ValueVarchar.get(m.group(subexpression), session); + } + } catch (PatternSyntaxException e) { + throw DbException.get(ErrorCode.LIKE_ESCAPE_ERROR_1, e, regexp); + } catch (IndexOutOfBoundsException e) { + return ValueNull.INSTANCE; + } + } + + private static int makeRegexpFlags(String stringFlags, boolean ignoreGlobalFlag) { + int flags = Pattern.UNICODE_CASE; + if (stringFlags != null) { + for (int i = 0; i < stringFlags.length(); ++i) { + switch (stringFlags.charAt(i)) { + case 'i': + flags |= Pattern.CASE_INSENSITIVE; + break; + case 'c': + flags &= ~Pattern.CASE_INSENSITIVE; + break; + case 'n': + flags |= Pattern.DOTALL; + break; + case 'm': + flags |= Pattern.MULTILINE; + break; + case 'g': + if (ignoreGlobalFlag) { + break; + } + //$FALL-THROUGH$ + default: + throw DbException.get(ErrorCode.INVALID_VALUE_2, stringFlags); + } + } + } + return flags; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + int min, max; + switch (function) { + case REGEXP_LIKE: + min = 2; + max = 3; + type = TypeInfo.TYPE_BOOLEAN; + break; + case REGEXP_REPLACE: + min = 3; + max = 6; + type = TypeInfo.TYPE_VARCHAR; + break; + case REGEXP_SUBSTR: + min = 2; + max = 6; + type = TypeInfo.TYPE_VARCHAR; + break; + default: + throw DbException.getInternalError("function=" + function); + } + int len = args.length; + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), min + ".." + max); + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/SessionControlFunction.java b/h2/src/main/org/h2/expression/function/SessionControlFunction.java new file mode 100644 index 0000000000..c8d3024ff1 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SessionControlFunction.java @@ -0,0 +1,99 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.command.Command; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueNull; + +/** + * An ABORT_SESSION() or CANCEL_SESSION() function. + */ +public final class SessionControlFunction extends Function1 { + + /** + * ABORT_SESSION(). + */ + public static final int ABORT_SESSION = 0; + + /** + * CANCEL_SESSION(). + */ + public static final int CANCEL_SESSION = ABORT_SESSION + 1; + + private static final String[] NAMES = { // + "ABORT_SESSION", "CANCEL_SESSION" // + }; + + private final int function; + + public SessionControlFunction(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + int targetSessionId = v.getInt(); + session.getUser().checkAdmin(); + loop: for (SessionLocal s : session.getDatabase().getSessions(false)) { + if (s.getId() == targetSessionId) { + Command c = s.getCurrentCommand(); + switch (function) { + case ABORT_SESSION: + if (c != null) { + c.cancel(); + } + s.close(); + return ValueBoolean.TRUE; + case CANCEL_SESSION: + if (c != null) { + c.cancel(); + return ValueBoolean.TRUE; + } + break loop; + default: + throw DbException.getInternalError("function=" + function); + } + } + } + return ValueBoolean.FALSE; + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + type = TypeInfo.TYPE_BOOLEAN; + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.READONLY: + case ExpressionVisitor.QUERY_COMPARABLE: + return false; + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/SetFunction.java b/h2/src/main/org/h2/expression/function/SetFunction.java new file mode 100644 index 0000000000..6b85efccee --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SetFunction.java @@ -0,0 +1,64 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Variable; +import org.h2.message.DbException; +import org.h2.value.Value; + +/** + * A SET function. + */ +public final class SetFunction extends Function2 { + + public SetFunction(Expression arg1, Expression arg2) { + super(arg1, arg2); + } + + @Override + public Value getValue(SessionLocal session) { + Variable var = (Variable) left; + Value v = right.getValue(session); + session.setVariable(var.getName(), v); + return v; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + type = right.getType(); + if (!(left instanceof Variable)) { + throw DbException.get(ErrorCode.CAN_ONLY_ASSIGN_TO_VARIABLE_1, left.getTraceSQL()); + } + return this; + } + + @Override + public String getName() { + return "SET"; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!super.isEverything(visitor)) { + return false; + } + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.QUERY_COMPARABLE: + case ExpressionVisitor.READONLY: + return false; + default: + return true; + } + } + +} diff --git a/h2/src/main/org/h2/expression/function/SignalFunction.java b/h2/src/main/org/h2/expression/function/SignalFunction.java new file mode 100644 index 0000000000..b8f42d2563 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SignalFunction.java @@ -0,0 +1,49 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.regex.Pattern; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * A SIGNAL function. + */ +public final class SignalFunction extends Function2 { + + private static final Pattern SIGNAL_PATTERN = Pattern.compile("[0-9A-Z]{5}"); + + public SignalFunction(Expression arg1, Expression arg2) { + super(arg1, arg2); + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + String sqlState = v1.getString(); + if (sqlState.startsWith("00") || !SIGNAL_PATTERN.matcher(sqlState).matches()) { + throw DbException.getInvalidValueException("SQLSTATE", sqlState); + } + throw DbException.fromUser(sqlState, v2.getString()); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + type = TypeInfo.TYPE_NULL; + return this; + } + + @Override + public String getName() { + return "SIGNAL"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/SoundexFunction.java b/h2/src/main/org/h2/expression/function/SoundexFunction.java new file mode 100644 index 0000000000..b7165c341f --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SoundexFunction.java @@ -0,0 +1,128 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.nio.charset.StandardCharsets; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueVarchar; + +/** + * A SOUNDEX or DIFFERENCE function. + */ +public final class SoundexFunction extends Function1_2 { + + /** + * SOUNDEX() (non-standard). + */ + public static final int SOUNDEX = 0; + + /** + * DIFFERENCE() (non-standard). + */ + public static final int DIFFERENCE = SOUNDEX + 1; + + private static final String[] NAMES = { // + "SOUNDEX", "DIFFERENCE" // + }; + + private static final byte[] SOUNDEX_INDEX = // + "71237128722455712623718272\000\000\000\000\000\00071237128722455712623718272" + .getBytes(StandardCharsets.ISO_8859_1); + + private final int function; + + public SoundexFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + switch (function) { + case SOUNDEX: + v1 = ValueVarchar.get(new String(getSoundex(v1.getString()), StandardCharsets.ISO_8859_1), session); + break; + case DIFFERENCE: { + v1 = ValueInteger.get(getDifference(v1.getString(), v2.getString())); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + private static int getDifference(String s1, String s2) { + // TODO function difference: compatibility with SQL Server and HSQLDB + byte[] b1 = getSoundex(s1), b2 = getSoundex(s2); + int e = 0; + for (int i = 0; i < 4; i++) { + if (b1[i] == b2[i]) { + e++; + } + } + return e; + } + + private static byte[] getSoundex(String s) { + byte[] chars = { '0', '0', '0', '0' }; + byte lastDigit = '0'; + for (int i = 0, j = 0, l = s.length(); i < l && j < 4; i++) { + char c = s.charAt(i); + if (c >= 'A' && c <= 'z') { + byte newDigit = SOUNDEX_INDEX[c - 'A']; + if (newDigit != 0) { + if (j == 0) { + chars[j++] = (byte) c; + lastDigit = newDigit; + } else if (newDigit <= '6') { + if (newDigit != lastDigit) { + chars[j++] = lastDigit = newDigit; + } + } else if (newDigit == '7') { + lastDigit = newDigit; + } + } + } + } + return chars; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case SOUNDEX: + type = TypeInfo.getTypeInfo(Value.VARCHAR, 4, 0, null); + break; + case DIFFERENCE: + type = TypeInfo.TYPE_INTEGER; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/StringFunction.java b/h2/src/main/org/h2/expression/function/StringFunction.java new file mode 100644 index 0000000000..d34cfada92 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/StringFunction.java @@ -0,0 +1,244 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.Mode.ModeEnum; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * An string function with multiple arguments. + */ +public final class StringFunction extends FunctionN { + + /** + * LOCATE() (non-standard). + */ + public static final int LOCATE = 0; + + /** + * INSERT() (non-standard). + */ + public static final int INSERT = LOCATE + 1; + + /** + * REPLACE() (non-standard). + */ + public static final int REPLACE = INSERT + 1; + + /** + * LPAD() (non-standard). + */ + public static final int LPAD = REPLACE + 1; + + /** + * RPAD() (non-standard). + */ + public static final int RPAD = LPAD + 1; + + /** + * TRANSLATE() (non-standard). + */ + public static final int TRANSLATE = RPAD + 1; + + private static final String[] NAMES = { // + "LOCATE", "INSERT", "REPLACE", "LPAD", "RPAD", "TRANSLATE" // + }; + + private final int function; + + public StringFunction(Expression arg1, Expression arg2, Expression arg3, int function) { + super(arg3 == null ? new Expression[] { arg1, arg2 } : new Expression[] { arg1, arg2, arg3 }); + this.function = function; + } + + public StringFunction(Expression arg1, Expression arg2, Expression arg3, Expression arg4, int function) { + super(new Expression[] { arg1, arg2, arg3, arg4 }); + this.function = function; + } + + public StringFunction(Expression[] args, int function) { + super(args); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v1 = args[0].getValue(session), v2 = args[1].getValue(session); + switch (function) { + case LOCATE: { + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + Value v3 = args.length >= 3 ? args[2].getValue(session) : null; + if (v3 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + v1 = ValueInteger.get(locate(v1.getString(), v2.getString(), v3 == null ? 1 : v3.getInt())); + break; + } + case INSERT: { + Value v3 = args[2].getValue(session), v4 = args[3].getValue(session); + if (v2 != ValueNull.INSTANCE && v3 != ValueNull.INSTANCE) { + String s = insert(v1.getString(), v2.getInt(), v3.getInt(), v4.getString()); + v1 = s != null ? ValueVarchar.get(s, session) : ValueNull.INSTANCE; + } + break; + } + case REPLACE: { + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String after; + if (args.length >= 3) { + Value v3 = args[2].getValue(session); + if (v3 == ValueNull.INSTANCE && session.getMode().getEnum() != ModeEnum.Oracle) { + return ValueNull.INSTANCE; + } + after = v3.getString(); + if (after == null) { + after = ""; + } + } else { + after = ""; + } + v1 = ValueVarchar.get(StringUtils.replaceAll(v1.getString(), v2.getString(), after), session); + break; + } + case LPAD: + case RPAD: + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String padding; + if (args.length >= 3) { + Value v3 = args[2].getValue(session); + if (v3 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + padding = v3.getString(); + } else { + padding = null; + } + v1 = ValueVarchar.get(StringUtils.pad(v1.getString(), v2.getInt(), padding, function == RPAD), session); + break; + case TRANSLATE: { + if (v1 == ValueNull.INSTANCE || v2 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + Value v3 = args[2].getValue(session); + if (v3 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + String matching = v2.getString(); + String replacement = v3.getString(); + if (session.getMode().getEnum() == ModeEnum.DB2) { + String t = matching; + matching = replacement; + replacement = t; + } + v1 = ValueVarchar.get(translate(v1.getString(), matching, replacement), session); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + private static int locate(String search, String s, int start) { + if (start < 0) { + return s.lastIndexOf(search, s.length() + start) + 1; + } + return s.indexOf(search, start == 0 ? 0 : start - 1) + 1; + } + + private static String insert(String s1, int start, int length, String s2) { + if (s1 == null) { + return s2; + } + if (s2 == null) { + return s1; + } + int len1 = s1.length(); + int len2 = s2.length(); + start--; + if (start < 0 || length <= 0 || len2 == 0 || start > len1) { + return s1; + } + if (start + length > len1) { + length = len1 - start; + } + return s1.substring(0, start) + s2 + s1.substring(start + length); + } + + private static String translate(String original, String findChars, String replaceChars) { + if (StringUtils.isNullOrEmpty(original) || StringUtils.isNullOrEmpty(findChars)) { + return original; + } + // if it stays null, then no replacements have been made + StringBuilder builder = null; + // if shorter than findChars, then characters are removed + // (if null, we don't access replaceChars at all) + int replaceSize = replaceChars == null ? 0 : replaceChars.length(); + for (int i = 0, size = original.length(); i < size; i++) { + char ch = original.charAt(i); + int index = findChars.indexOf(ch); + if (index >= 0) { + if (builder == null) { + builder = new StringBuilder(size); + if (i > 0) { + builder.append(original, 0, i); + } + } + if (index < replaceSize) { + ch = replaceChars.charAt(index); + } + } + if (builder != null) { + builder.append(ch); + } + } + return builder == null ? original : builder.toString(); + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + switch (function) { + case LOCATE: + type = TypeInfo.TYPE_INTEGER; + break; + case INSERT: + case REPLACE: + case LPAD: + case RPAD: + case TRANSLATE: + type = TypeInfo.TYPE_VARCHAR; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/StringFunction1.java b/h2/src/main/org/h2/expression/function/StringFunction1.java new file mode 100644 index 0000000000..9b24996541 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/StringFunction1.java @@ -0,0 +1,283 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.engine.Mode; +import org.h2.engine.Mode.ModeEnum; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; + +/** + * A string function with one argument. + */ +public final class StringFunction1 extends Function1 { + + // Fold functions + + /** + * UPPER(). + */ + public static final int UPPER = 0; + + /** + * LOWER(). + */ + public static final int LOWER = UPPER + 1; + + // Various non-standard functions + + /** + * ASCII() (non-standard). + */ + public static final int ASCII = LOWER + 1; + + /** + * CHAR() (non-standard). + */ + public static final int CHAR = ASCII + 1; + + /** + * STRINGENCODE() (non-standard). + */ + public static final int STRINGENCODE = CHAR + 1; + + /** + * STRINGDECODE() (non-standard). + */ + public static final int STRINGDECODE = STRINGENCODE + 1; + + /** + * STRINGTOUTF8() (non-standard). + */ + public static final int STRINGTOUTF8 = STRINGDECODE + 1; + + /** + * UTF8TOSTRING() (non-standard). + */ + public static final int UTF8TOSTRING = STRINGTOUTF8 + 1; + + /** + * HEXTORAW() (non-standard). + */ + public static final int HEXTORAW = UTF8TOSTRING + 1; + + /** + * RAWTOHEX() (non-standard). + */ + public static final int RAWTOHEX = HEXTORAW + 1; + + /** + * SPACE() (non-standard). + */ + public static final int SPACE = RAWTOHEX + 1; + + /** + * QUOTE_IDENT() (non-standard). + */ + public static final int QUOTE_IDENT = SPACE + 1; + + private static final String[] NAMES = { // + "UPPER", "LOWER", "ASCII", "CHAR", "STRINGENCODE", "STRINGDECODE", "STRINGTOUTF8", "UTF8TOSTRING", + "HEXTORAW", "RAWTOHEX", "SPACE", "QUOTE_IDENT" // + }; + + private final int function; + + public StringFunction1(Expression arg, int function) { + super(arg); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = arg.getValue(session); + if (v == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + switch (function) { + case UPPER: + // TODO this is locale specific, need to document or provide a way + // to set the locale + v = ValueVarchar.get(v.getString().toUpperCase(), session); + break; + case LOWER: + // TODO this is locale specific, need to document or provide a way + // to set the locale + v = ValueVarchar.get(v.getString().toLowerCase(), session); + break; + case ASCII: { + String s = v.getString(); + v = s.isEmpty() ? ValueNull.INSTANCE : ValueInteger.get(s.charAt(0)); + break; + } + case CHAR: + v = ValueVarchar.get(String.valueOf((char) v.getInt()), session); + break; + case STRINGENCODE: + v = ValueVarchar.get(StringUtils.javaEncode(v.getString()), session); + break; + case STRINGDECODE: + v = ValueVarchar.get(StringUtils.javaDecode(v.getString()), session); + break; + case STRINGTOUTF8: + v = ValueVarbinary.getNoCopy(v.getString().getBytes(StandardCharsets.UTF_8)); + break; + case UTF8TOSTRING: + v = ValueVarchar.get(new String(v.getBytesNoCopy(), StandardCharsets.UTF_8), session); + break; + case HEXTORAW: + v = hexToRaw(v.getString(), session); + break; + case RAWTOHEX: + v = ValueVarchar.get(rawToHex(v, session.getMode()), session); + break; + case SPACE: { + byte[] chars = new byte[Math.max(0, v.getInt())]; + Arrays.fill(chars, (byte) ' '); + v = ValueVarchar.get(new String(chars, StandardCharsets.ISO_8859_1), session); + break; + } + case QUOTE_IDENT: + v = ValueVarchar.get(StringUtils.quoteIdentifier(v.getString()), session); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v; + } + + private static Value hexToRaw(String s, SessionLocal session) { + if (session.getMode().getEnum() == ModeEnum.Oracle) { + return ValueVarbinary.get(StringUtils.convertHexToBytes(s)); + } + int len = s.length(); + if (len % 4 != 0) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); + } + StringBuilder builder = new StringBuilder(len / 4); + for (int i = 0; i < len; i += 4) { + try { + builder.append((char) Integer.parseInt(s.substring(i, i + 4), 16)); + } catch (NumberFormatException e) { + throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, s); + } + } + return ValueVarchar.get(builder.toString(), session); + } + + private static String rawToHex(Value v, Mode mode) { + if (DataType.isBinaryStringOrSpecialBinaryType(v.getValueType())) { + return StringUtils.convertBytesToHex(v.getBytesNoCopy()); + } + String s = v.getString(); + if (mode.getEnum() == ModeEnum.Oracle) { + return StringUtils.convertBytesToHex(s.getBytes(StandardCharsets.UTF_8)); + } + int length = s.length(); + StringBuilder buff = new StringBuilder(4 * length); + for (int i = 0; i < length; i++) { + String hex = Integer.toHexString(s.charAt(i) & 0xffff); + for (int j = hex.length(); j < 4; j++) { + buff.append('0'); + } + buff.append(hex); + } + return buff.toString(); + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + switch (function) { + /* + * UPPER and LOWER may return string of different length for some + * characters. + */ + case UPPER: + case LOWER: + case STRINGENCODE: + case SPACE: + case QUOTE_IDENT: + type = TypeInfo.TYPE_VARCHAR; + break; + case ASCII: + type = TypeInfo.TYPE_INTEGER; + break; + case CHAR: + type = TypeInfo.getTypeInfo(Value.VARCHAR, 1L, 0, null); + break; + case STRINGDECODE: { + TypeInfo t = arg.getType(); + type = DataType.isCharacterStringType(t.getValueType()) + ? TypeInfo.getTypeInfo(Value.VARCHAR, t.getPrecision(), 0, null) + : TypeInfo.TYPE_VARCHAR; + break; + } + case STRINGTOUTF8: + type = TypeInfo.TYPE_VARBINARY; + break; + case UTF8TOSTRING: { + TypeInfo t = arg.getType(); + type = DataType.isBinaryStringType(t.getValueType()) + ? TypeInfo.getTypeInfo(Value.VARCHAR, t.getPrecision(), 0, null) + : TypeInfo.TYPE_VARCHAR; + break; + } + case HEXTORAW: { + TypeInfo t = arg.getType(); + if (session.getMode().getEnum() == ModeEnum.Oracle) { + if (DataType.isCharacterStringType(t.getValueType())) { + type = TypeInfo.getTypeInfo(Value.VARBINARY, t.getPrecision() / 2, 0, null); + } else { + type = TypeInfo.TYPE_VARBINARY; + } + } else { + if (DataType.isCharacterStringType(t.getValueType())) { + type = TypeInfo.getTypeInfo(Value.VARCHAR, t.getPrecision() / 4, 0, null); + } else { + type = TypeInfo.TYPE_VARCHAR; + } + } + break; + } + case RAWTOHEX: { + TypeInfo t = arg.getType(); + long precision = t.getPrecision(); + int mul = DataType.isBinaryStringOrSpecialBinaryType(t.getValueType()) ? 2 + : session.getMode().getEnum() == ModeEnum.Oracle ? 6 : 4; + type = TypeInfo.getTypeInfo(Value.VARCHAR, + precision <= Long.MAX_VALUE / mul ? precision * mul : Long.MAX_VALUE, 0, null); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + if (arg.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/StringFunction2.java b/h2/src/main/org/h2/expression/function/StringFunction2.java new file mode 100644 index 0000000000..6b7395cb02 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/StringFunction2.java @@ -0,0 +1,108 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueVarchar; + +/** + * A string function with two arguments. + */ +public final class StringFunction2 extends Function2 { + + /** + * LEFT() (non-standard). + */ + public static final int LEFT = 0; + + /** + * RIGHT() (non-standard). + */ + public static final int RIGHT = LEFT + 1; + + /** + * REPEAT() (non-standard). + */ + public static final int REPEAT = RIGHT + 1; + + private static final String[] NAMES = { // + "LEFT", "RIGHT", "REPEAT" // + }; + + private final int function; + + public StringFunction2(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + String s = v1.getString(); + int count = v2.getInt(); + if (count <= 0) { + return ValueVarchar.get("", session); + } + int length = s.length(); + switch (function) { + case LEFT: + if (count > length) { + count = length; + } + s = s.substring(0, count); + break; + case RIGHT: + if (count > length) { + count = length; + } + s = s.substring(length - count); + break; + case REPEAT: { + StringBuilder builder = new StringBuilder(length * count); + while (count-- > 0) { + builder.append(s); + } + s = builder.toString(); + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return ValueVarchar.get(s, session); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + right = right.optimize(session); + switch (function) { + case LEFT: + case RIGHT: + type = TypeInfo.getTypeInfo(Value.VARCHAR, left.getType().getPrecision(), 0, null); + break; + case REPEAT: + type = TypeInfo.TYPE_VARCHAR; + break; + default: + throw DbException.getInternalError("function=" + function); + } + if (left.isConstant() && right.isConstant()) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/SubstringFunction.java b/h2/src/main/org/h2/expression/function/SubstringFunction.java new file mode 100644 index 0000000000..b93e464e54 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SubstringFunction.java @@ -0,0 +1,126 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; + +/** + * A SUBSTRING function. + */ +public final class SubstringFunction extends FunctionN { + + public SubstringFunction() { + super(new Expression[3]); + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + if (type.getValueType() == Value.VARBINARY) { + byte[] s = v1.getBytesNoCopy(); + int sl = s.length; + int start = v2.getInt(); + // These compatibility conditions violate the Standard + if (start == 0) { + start = 1; + } else if (start < 0) { + start = sl + start + 1; + } + int end = v3 == null ? Math.max(sl + 1, start) : start + v3.getInt(); + // SQL Standard requires "data exception - substring error" when + // end < start but H2 does not throw it for compatibility + start = Math.max(start, 1); + end = Math.min(end, sl + 1); + if (start > sl || end <= start) { + return ValueVarbinary.EMPTY; + } + start--; + end--; + if (start == 0 && end == s.length) { + return v1.convertTo(TypeInfo.TYPE_VARBINARY); + } + return ValueVarbinary.getNoCopy(Arrays.copyOfRange(s, start, end)); + } else { + String s = v1.getString(); + int sl = s.length(); + int start = v2.getInt(); + // These compatibility conditions violate the Standard + if (start == 0) { + start = 1; + } else if (start < 0) { + start = sl + start + 1; + } + int end = v3 == null ? Math.max(sl + 1, start) : start + v3.getInt(); + // SQL Standard requires "data exception - substring error" when + // end < start but H2 does not throw it for compatibility + start = Math.max(start, 1); + end = Math.min(end, sl + 1); + if (start > sl || end <= start) { + return session.getMode().treatEmptyStringsAsNull ? ValueNull.INSTANCE : ValueVarchar.EMPTY; + } + return ValueVarchar.get(s.substring(start - 1, end - 1), null); + } + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + int len = args.length; + if (len < 2 || len > 3) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), "2..3"); + } + TypeInfo argType = args[0].getType(); + long p = argType.getPrecision(); + Expression arg = args[1]; + Value v; + if (arg.isConstant() && (v = arg.getValue(session)) != ValueNull.INSTANCE) { + // if only two arguments are used, + // subtract offset from first argument length + p -= v.getLong() - 1; + } + if (args.length == 3) { + arg = args[2]; + if (arg.isConstant() && (v = arg.getValue(session)) != ValueNull.INSTANCE) { + // if the third argument is constant it is at most this value + p = Math.min(p, v.getLong()); + } + } + p = Math.max(0, p); + type = TypeInfo.getTypeInfo( + DataType.isBinaryStringType(argType.getValueType()) ? Value.VARBINARY : Value.VARCHAR, p, 0, null); + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + args[0].getUnenclosedSQL(builder.append(getName()).append('('), sqlFlags); + args[1].getUnenclosedSQL(builder.append(" FROM "), sqlFlags); + if (args.length > 2) { + args[2].getUnenclosedSQL(builder.append(" FOR "), sqlFlags); + } + return builder.append(')'); + } + + @Override + public String getName() { + return "SUBSTRING"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/SysInfoFunction.java b/h2/src/main/org/h2/expression/function/SysInfoFunction.java new file mode 100644 index 0000000000..dd02010060 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/SysInfoFunction.java @@ -0,0 +1,176 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.Constants; +import org.h2.engine.SessionLocal; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Operation0; +import org.h2.message.DbException; +import org.h2.util.Utils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Database or session information function. + */ +public final class SysInfoFunction extends Operation0 implements NamedExpression { + + /** + * AUTOCOMMIT(). + */ + public static final int AUTOCOMMIT = 0; + + /** + * DATABASE_PATH(). + */ + public static final int DATABASE_PATH = AUTOCOMMIT + 1; + + /** + * H2VERSION(). + */ + public static final int H2VERSION = DATABASE_PATH + 1; + + /** + * LOCK_MODE(). + */ + public static final int LOCK_MODE = H2VERSION + 1; + + /** + * LOCK_TIMEOUT(). + */ + public static final int LOCK_TIMEOUT = LOCK_MODE + 1; + + /** + * MEMORY_FREE(). + */ + public static final int MEMORY_FREE = LOCK_TIMEOUT + 1; + + /** + * MEMORY_USED(). + */ + public static final int MEMORY_USED = MEMORY_FREE + 1; + + /** + * READONLY(). + */ + public static final int READONLY = MEMORY_USED + 1; + + /** + * SESSION_ID(). + */ + public static final int SESSION_ID = READONLY + 1; + + /** + * TRANSACTION_ID(). + */ + public static final int TRANSACTION_ID = SESSION_ID + 1; + + private static final int[] TYPES = { Value.BOOLEAN, Value.VARCHAR, Value.VARCHAR, Value.INTEGER, Value.INTEGER, + Value.BIGINT, Value.BIGINT, Value.BOOLEAN, Value.INTEGER, Value.VARCHAR }; + + private static final String[] NAMES = { "AUTOCOMMIT", "DATABASE_PATH", "H2VERSION", "LOCK_MODE", "LOCK_TIMEOUT", + "MEMORY_FREE", "MEMORY_USED", "READONLY", "SESSION_ID", "TRANSACTION_ID" }; + + /** + * Get the name for this function id. + * + * @param function + * the function id + * @return the name + */ + public static String getName(int function) { + return NAMES[function]; + } + + private final int function; + + private final TypeInfo type; + + public SysInfoFunction(int function) { + this.function = function; + type = TypeInfo.getTypeInfo(TYPES[function]); + } + + @Override + public Value getValue(SessionLocal session) { + Value result; + switch (function) { + case AUTOCOMMIT: + result = ValueBoolean.get(session.getAutoCommit()); + break; + case DATABASE_PATH: { + String path = session.getDatabase().getDatabasePath(); + result = path != null ? ValueVarchar.get(path, session) : ValueNull.INSTANCE; + break; + } + case H2VERSION: + result = ValueVarchar.get(Constants.VERSION, session); + break; + case LOCK_MODE: + result = ValueInteger.get(session.getDatabase().getLockMode()); + break; + case LOCK_TIMEOUT: + result = ValueInteger.get(session.getLockTimeout()); + break; + case MEMORY_FREE: + session.getUser().checkAdmin(); + result = ValueBigint.get(Utils.getMemoryFree()); + break; + case MEMORY_USED: + session.getUser().checkAdmin(); + result = ValueBigint.get(Utils.getMemoryUsed()); + break; + case READONLY: + result = ValueBoolean.get(session.getDatabase().isReadOnly()); + break; + case SESSION_ID: + result = ValueInteger.get(session.getId()); + break; + case TRANSACTION_ID: + result = session.getTransactionId(); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return result; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return builder.append(getName()).append("()"); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return true; + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public int getCost() { + return 1; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/TableInfoFunction.java b/h2/src/main/org/h2/expression/function/TableInfoFunction.java new file mode 100644 index 0000000000..c447033f88 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/TableInfoFunction.java @@ -0,0 +1,111 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.util.ArrayList; + +import org.h2.command.Parser; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.mvstore.db.MVSpatialIndex; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; + +/** + * A table information function. + */ +public final class TableInfoFunction extends Function1_2 { + + /** + * DISK_SPACE_USED() (non-standard). + */ + public static final int DISK_SPACE_USED = 0; + + /** + * ESTIMATED_ENVELOPE(). + */ + public static final int ESTIMATED_ENVELOPE = DISK_SPACE_USED + 1; + + private static final String[] NAMES = { // + "DISK_SPACE_USED", "ESTIMATED_ENVELOPE" // + }; + + private final int function; + + public TableInfoFunction(Expression arg1, Expression arg2, int function) { + super(arg1, arg2); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + Table table = new Parser(session).parseTableName(v1.getString()); + l: switch (function) { + case DISK_SPACE_USED: + v1 = ValueBigint.get(table.getDiskSpaceUsed()); + break; + case ESTIMATED_ENVELOPE: { + Column column = table.getColumn(v2.getString()); + ArrayList indexes = table.getIndexes(); + if (indexes != null) { + for (int i = 1, size = indexes.size(); i < size; i++) { + Index index = indexes.get(i); + if (index instanceof MVSpatialIndex && index.isFirstColumn(column)) { + v1 = ((MVSpatialIndex) index).getEstimatedBounds(session); + break l; + } + } + } + v1 = ValueNull.INSTANCE; + break; + } + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + switch (function) { + case DISK_SPACE_USED: + type = TypeInfo.TYPE_BIGINT; + break; + case ESTIMATED_ENVELOPE: + type = TypeInfo.TYPE_GEOMETRY; + break; + default: + throw DbException.getInternalError("function=" + function); + } + return this; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return super.isEverything(visitor); + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/ToCharFunction.java b/h2/src/main/org/h2/expression/function/ToCharFunction.java new file mode 100644 index 0000000000..9eb178060c --- /dev/null +++ b/h2/src/main/org/h2/expression/function/ToCharFunction.java @@ -0,0 +1,1127 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Daniel Gredler + */ +package org.h2.expression.function; + +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.text.DateFormatSymbols; +import java.text.DecimalFormat; +import java.text.DecimalFormatSymbols; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Currency; +import java.util.Locale; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarchar; + +/** + * Emulates Oracle's TO_CHAR function. + */ +public final class ToCharFunction extends FunctionN { + + /** + * The beginning of the Julian calendar. + */ + public static final int JULIAN_EPOCH = -2_440_588; + + private static final int[] ROMAN_VALUES = { 1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, + 5, 4, 1 }; + + private static final String[] ROMAN_NUMERALS = { "M", "CM", "D", "CD", "C", "XC", + "L", "XL", "X", "IX", "V", "IV", "I" }; + + /** + * The month field. + */ + public static final int MONTHS = 0; + + /** + * The month field (short form). + */ + public static final int SHORT_MONTHS = 1; + + /** + * The weekday field. + */ + public static final int WEEKDAYS = 2; + + /** + * The weekday field (short form). + */ + public static final int SHORT_WEEKDAYS = 3; + + /** + * The AM / PM field. + */ + static final int AM_PM = 4; + + private static volatile String[][] NAMES; + + /** + * Emulates Oracle's TO_CHAR(number) function. + * + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    TO_CHAR(number) function
    InputOutputClosest {@link DecimalFormat} Equivalent
    ,Grouping separator.,
    .Decimal separator..
    $Leading dollar sign.$
    0Leading or trailing zeroes.0
    9Digit.#
    BBlanks integer part of a fixed point number less than 1.#
    CISO currency symbol.\u00A4
    DLocal decimal separator..
    EEEEReturns a value in scientific notation.E
    FMReturns values with no leading or trailing spaces.None.
    GLocal grouping separator.,
    LLocal currency symbol.\u00A4
    MINegative values get trailing minus sign, + * positive get trailing space.-
    PRNegative values get enclosing angle brackets, + * positive get spaces.None.
    RNReturns values in Roman numerals.None.
    SReturns values with leading/trailing +/- signs.None.
    TMReturns smallest number of characters possible.None.
    UReturns the dual currency symbol.None.
    VReturns a value multiplied by 10^n.None.
    XHex value.None.
    + * See also TO_CHAR(number) and number format models + * in the Oracle documentation. + * + * @param number the number to format + * @param format the format pattern to use (if any) + * @param nlsParam the NLS parameter (if any) + * @return the formatted number + */ + public static String toChar(BigDecimal number, String format, + @SuppressWarnings("unused") String nlsParam) { + + // short-circuit logic for formats that don't follow common logic below + String formatUp = format != null ? StringUtils.toUpperEnglish(format) : null; + if (formatUp == null || formatUp.equals("TM") || formatUp.equals("TM9")) { + String s = number.toPlainString(); + return s.startsWith("0.") ? s.substring(1) : s; + } else if (formatUp.equals("TME")) { + int pow = number.precision() - number.scale() - 1; + number = number.movePointLeft(pow); + return number.toPlainString() + "E" + + (pow < 0 ? '-' : '+') + (Math.abs(pow) < 10 ? "0" : "") + Math.abs(pow); + } else if (formatUp.equals("RN")) { + boolean lowercase = format.startsWith("r"); + String rn = StringUtils.pad(toRomanNumeral(number.intValue()), 15, " ", false); + return lowercase ? rn.toLowerCase() : rn; + } else if (formatUp.equals("FMRN")) { + boolean lowercase = format.charAt(2) == 'r'; + String rn = toRomanNumeral(number.intValue()); + return lowercase ? rn.toLowerCase() : rn; + } else if (formatUp.endsWith("X")) { + return toHex(number, format); + } + + String originalFormat = format; + DecimalFormatSymbols symbols = DecimalFormatSymbols.getInstance(); + char localGrouping = symbols.getGroupingSeparator(); + char localDecimal = symbols.getDecimalSeparator(); + + boolean leadingSign = formatUp.startsWith("S"); + if (leadingSign) { + format = format.substring(1); + } + + boolean trailingSign = formatUp.endsWith("S"); + if (trailingSign) { + format = format.substring(0, format.length() - 1); + } + + boolean trailingMinus = formatUp.endsWith("MI"); + if (trailingMinus) { + format = format.substring(0, format.length() - 2); + } + + boolean angleBrackets = formatUp.endsWith("PR"); + if (angleBrackets) { + format = format.substring(0, format.length() - 2); + } + + int v = formatUp.indexOf('V'); + if (v >= 0) { + int digits = 0; + for (int i = v + 1; i < format.length(); i++) { + char c = format.charAt(i); + if (c == '0' || c == '9') { + digits++; + } + } + number = number.movePointRight(digits); + format = format.substring(0, v) + format.substring(v + 1); + } + + Integer power; + if (format.endsWith("EEEE")) { + power = number.precision() - number.scale() - 1; + number = number.movePointLeft(power); + format = format.substring(0, format.length() - 4); + } else { + power = null; + } + + int maxLength = 1; + boolean fillMode = !formatUp.startsWith("FM"); + if (!fillMode) { + format = format.substring(2); + } + + // blanks flag doesn't seem to actually do anything + format = format.replaceAll("[Bb]", ""); + + // if we need to round the number to fit into the format specified, + // go ahead and do that first + int separator = findDecimalSeparator(format); + int formatScale = calculateScale(format, separator); + int numberScale = number.scale(); + if (formatScale < numberScale) { + number = number.setScale(formatScale, RoundingMode.HALF_UP); + } else if (numberScale < 0) { + number = number.setScale(0); + } + + // any 9s to the left of the decimal separator but to the right of a + // 0 behave the same as a 0, e.g. "09999.99" -> "00000.99" + for (int i = format.indexOf('0'); i >= 0 && i < separator; i++) { + if (format.charAt(i) == '9') { + format = format.substring(0, i) + "0" + format.substring(i + 1); + } + } + + StringBuilder output = new StringBuilder(); + String unscaled = (number.abs().compareTo(BigDecimal.ONE) < 0 ? + zeroesAfterDecimalSeparator(number) : "") + + number.unscaledValue().abs().toString(); + + // start at the decimal point and fill in the numbers to the left, + // working our way from right to left + int i = separator - 1; + int j = unscaled.length() - number.scale() - 1; + for (; i >= 0; i--) { + char c = format.charAt(i); + maxLength++; + if (c == '9' || c == '0') { + if (j >= 0) { + char digit = unscaled.charAt(j); + output.insert(0, digit); + j--; + } else if (c == '0' && power == null) { + output.insert(0, '0'); + } + } else if (c == ',') { + // only add the grouping separator if we have more numbers + if (j >= 0 || (i > 0 && format.charAt(i - 1) == '0')) { + output.insert(0, c); + } + } else if (c == 'G' || c == 'g') { + // only add the grouping separator if we have more numbers + if (j >= 0 || (i > 0 && format.charAt(i - 1) == '0')) { + output.insert(0, localGrouping); + } + } else if (c == 'C' || c == 'c') { + Currency currency = getCurrency(); + output.insert(0, currency.getCurrencyCode()); + maxLength += 6; + } else if (c == 'L' || c == 'l' || c == 'U' || c == 'u') { + Currency currency = getCurrency(); + output.insert(0, currency.getSymbol()); + maxLength += 9; + } else if (c == '$') { + Currency currency = getCurrency(); + String cs = currency.getSymbol(); + output.insert(0, cs); + } else { + throw DbException.get( + ErrorCode.INVALID_TO_CHAR_FORMAT, originalFormat); + } + } + + // if the format (to the left of the decimal point) was too small + // to hold the number, return a big "######" string + if (j >= 0) { + return StringUtils.pad("", format.length() + 1, "#", true); + } + + if (separator < format.length()) { + + // add the decimal point + maxLength++; + char pt = format.charAt(separator); + if (pt == 'd' || pt == 'D') { + output.append(localDecimal); + } else { + output.append(pt); + } + + // start at the decimal point and fill in the numbers to the right, + // working our way from left to right + i = separator + 1; + j = unscaled.length() - number.scale(); + for (; i < format.length(); i++) { + char c = format.charAt(i); + maxLength++; + if (c == '9' || c == '0') { + if (j < unscaled.length()) { + char digit = unscaled.charAt(j); + output.append(digit); + j++; + } else { + if (c == '0' || fillMode) { + output.append('0'); + } + } + } else { + throw DbException.get( + ErrorCode.INVALID_TO_CHAR_FORMAT, originalFormat); + } + } + } + + addSign(output, number.signum(), leadingSign, trailingSign, + trailingMinus, angleBrackets, fillMode); + + if (power != null) { + output.append('E'); + output.append(power < 0 ? '-' : '+'); + output.append(Math.abs(power) < 10 ? "0" : ""); + output.append(Math.abs(power)); + } + + if (fillMode) { + if (power != null) { + output.insert(0, ' '); + } else { + while (output.length() < maxLength) { + output.insert(0, ' '); + } + } + } + + return output.toString(); + } + + private static Currency getCurrency() { + Locale locale = Locale.getDefault(); + return Currency.getInstance(locale.getCountry().length() == 2 ? locale : Locale.US); + } + + private static String zeroesAfterDecimalSeparator(BigDecimal number) { + final String numberStr = number.toPlainString(); + final int idx = numberStr.indexOf('.'); + if (idx < 0) { + return ""; + } + int i = idx + 1; + boolean allZeroes = true; + int length = numberStr.length(); + for (; i < length; i++) { + if (numberStr.charAt(i) != '0') { + allZeroes = false; + break; + } + } + final char[] zeroes = new char[allZeroes ? length - idx - 1: i - 1 - idx]; + Arrays.fill(zeroes, '0'); + return String.valueOf(zeroes); + } + + private static void addSign(StringBuilder output, int signum, + boolean leadingSign, boolean trailingSign, boolean trailingMinus, + boolean angleBrackets, boolean fillMode) { + if (angleBrackets) { + if (signum < 0) { + output.insert(0, '<'); + output.append('>'); + } else if (fillMode) { + output.insert(0, ' '); + output.append(' '); + } + } else { + String sign; + if (signum == 0) { + sign = ""; + } else if (signum < 0) { + sign = "-"; + } else { + if (leadingSign || trailingSign) { + sign = "+"; + } else if (fillMode) { + sign = " "; + } else { + sign = ""; + } + } + if (trailingMinus || trailingSign) { + output.append(sign); + } else { + output.insert(0, sign); + } + } + } + + private static int findDecimalSeparator(String format) { + int index = format.indexOf('.'); + if (index == -1) { + index = format.indexOf('D'); + if (index == -1) { + index = format.indexOf('d'); + if (index == -1) { + index = format.length(); + } + } + } + return index; + } + + private static int calculateScale(String format, int separator) { + int scale = 0; + for (int i = separator; i < format.length(); i++) { + char c = format.charAt(i); + if (c == '0' || c == '9') { + scale++; + } + } + return scale; + } + + private static String toRomanNumeral(int number) { + StringBuilder result = new StringBuilder(); + for (int i = 0; i < ROMAN_VALUES.length; i++) { + int value = ROMAN_VALUES[i]; + String numeral = ROMAN_NUMERALS[i]; + while (number >= value) { + result.append(numeral); + number -= value; + } + } + return result.toString(); + } + + private static String toHex(BigDecimal number, String format) { + + boolean fillMode = !StringUtils.toUpperEnglish(format).startsWith("FM"); + boolean uppercase = !format.contains("x"); + boolean zeroPadded = format.startsWith("0"); + int digits = 0; + for (int i = 0; i < format.length(); i++) { + char c = format.charAt(i); + if (c == '0' || c == 'X' || c == 'x') { + digits++; + } + } + + int i = number.setScale(0, RoundingMode.HALF_UP).intValue(); + String hex = Integer.toHexString(i); + if (digits < hex.length()) { + hex = StringUtils.pad("", digits + 1, "#", true); + } else { + if (uppercase) { + hex = StringUtils.toUpperEnglish(hex); + } + if (zeroPadded) { + hex = StringUtils.pad(hex, digits, "0", false); + } + if (fillMode) { + hex = StringUtils.pad(hex, format.length() + 1, " ", false); + } + } + + return hex; + } + + /** + * Get the date (month / weekday / ...) names. + * + * @param names the field + * @return the names + */ + public static String[] getDateNames(int names) { + String[][] result = NAMES; + if (result == null) { + result = new String[5][]; + DateFormatSymbols dfs = DateFormatSymbols.getInstance(); + result[MONTHS] = dfs.getMonths(); + String[] months = dfs.getShortMonths(); + for (int i = 0; i < 12; i++) { + String month = months[i]; + if (month.endsWith(".")) { + months[i] = month.substring(0, month.length() - 1); + } + } + result[SHORT_MONTHS] = months; + result[WEEKDAYS] = dfs.getWeekdays(); + result[SHORT_WEEKDAYS] = dfs.getShortWeekdays(); + result[AM_PM] = dfs.getAmPmStrings(); + NAMES = result; + } + return result[names]; + } + + /** + * Used for testing. + */ + public static void clearNames() { + NAMES = null; + } + + /** + * Returns time zone display name or ID for the specified date-time value. + * + * @param session + * the session + * @param value + * value + * @param tzd + * if {@code true} return TZD (time zone region with Daylight Saving + * Time information included), if {@code false} return TZR (time zone + * region) + * @return time zone display name or ID + */ + private static String getTimeZone(SessionLocal session, Value value, boolean tzd) { + if (value instanceof ValueTimestampTimeZone) { + return DateTimeUtils.timeZoneNameFromOffsetSeconds(((ValueTimestampTimeZone) value) + .getTimeZoneOffsetSeconds()); + } else if (value instanceof ValueTimeTimeZone) { + return DateTimeUtils.timeZoneNameFromOffsetSeconds(((ValueTimeTimeZone) value) + .getTimeZoneOffsetSeconds()); + } else { + TimeZoneProvider tz = session.currentTimeZone(); + if (tzd) { + ValueTimestamp v = (ValueTimestamp) value.convertTo(TypeInfo.TYPE_TIMESTAMP, session); + return tz.getShortId(tz.getEpochSecondsFromLocal(v.getDateValue(), v.getTimeNanos())); + } + return tz.getId(); + } + } + + /** + * Emulates Oracle's TO_CHAR(datetime) function. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    TO_CHAR(datetime) function
    InputOutputClosest {@link SimpleDateFormat} Equivalent
    - / , . ; : "text"Reproduced verbatim.'text'
    A.D. AD B.C. BCEra designator, with or without periods.G
    A.M. AM P.M. PMAM/PM marker.a
    CC SCCCentury.None.
    DDay of week.u
    DAYName of day.EEEE
    DYAbbreviated day name.EEE
    DDDay of month.d
    DDDDay of year.D
    DLLong date format.EEEE, MMMM d, yyyy
    DSShort date format.MM/dd/yyyy
    EAbbreviated era name (Japanese, Chinese, Thai)None.
    EEFull era name (Japanese, Chinese, Thai)None.
    FF[1-9]Fractional seconds.S
    FMReturns values with no leading or trailing spaces.None.
    FXRequires exact matches between character data and format model.None.
    HH HH12Hour in AM/PM (1-12).hh
    HH24Hour in day (0-23).HH
    IWWeek in year.w
    WWWeek in year.w
    WWeek in month.W
    IYYY IYY IY ILast 4/3/2/1 digit(s) of ISO year.yyyy yyy yy y
    RRRR RRLast 4/2 digits of year.yyyy yy
    Y,YYYYear with comma.None.
    YEAR SYEARYear spelled out (S prefixes BC years with minus sign).None.
    YYYY SYYYY4-digit year (S prefixes BC years with minus sign).yyyy
    YYY YY YLast 3/2/1 digit(s) of year.yyy yy y
    JJulian day (number of days since January 1, 4712 BC).None.
    MIMinute in hour.mm
    MMMonth in year.MM
    MONAbbreviated name of month.MMM
    MONTHName of month, padded with spaces.MMMM
    RMRoman numeral month.None.
    QQuarter of year.None.
    SSSeconds in minute.ss
    SSSSSSeconds in day.None.
    TSShort time format.h:mm:ss aa
    TZDDaylight savings time zone abbreviation.z
    TZRTime zone region information.zzzz
    XLocal radix character.None.
    + *

    + * See also TO_CHAR(datetime) and datetime format models + * in the Oracle documentation. + * + * @param session the session + * @param value the date-time value to format + * @param format the format pattern to use (if any) + * @param nlsParam the NLS parameter (if any) + * + * @return the formatted timestamp + */ + public static String toCharDateTime(SessionLocal session, Value value, String format, + @SuppressWarnings("unused") String nlsParam) { + long[] a = DateTimeUtils.dateAndTimeFromValue(value, session); + long dateValue = a[0]; + long timeNanos = a[1]; + int year = DateTimeUtils.yearFromDateValue(dateValue); + int monthOfYear = DateTimeUtils.monthFromDateValue(dateValue); + int dayOfMonth = DateTimeUtils.dayFromDateValue(dateValue); + int posYear = Math.abs(year); + int second = (int) (timeNanos / 1_000_000_000); + int nanos = (int) (timeNanos - second * 1_000_000_000); + int minute = second / 60; + second -= minute * 60; + int hour = minute / 60; + minute -= hour * 60; + int h12 = (hour + 11) % 12 + 1; + boolean isAM = hour < 12; + if (format == null) { + format = "DD-MON-YY HH.MI.SS.FF PM"; + } + + StringBuilder output = new StringBuilder(); + boolean fillMode = true; + + for (int i = 0, length = format.length(); i < length;) { + + Capitalization cap; + + // AD / BC + + if ((cap = containsAt(format, i, "A.D.", "B.C.")) != null) { + String era = year > 0 ? "A.D." : "B.C."; + output.append(cap.apply(era)); + i += 4; + } else if ((cap = containsAt(format, i, "AD", "BC")) != null) { + String era = year > 0 ? "AD" : "BC"; + output.append(cap.apply(era)); + i += 2; + + // AM / PM + + } else if ((cap = containsAt(format, i, "A.M.", "P.M.")) != null) { + String am = isAM ? "A.M." : "P.M."; + output.append(cap.apply(am)); + i += 4; + } else if ((cap = containsAt(format, i, "AM", "PM")) != null) { + String am = isAM ? "AM" : "PM"; + output.append(cap.apply(am)); + i += 2; + + // Long/short date/time format + + } else if (containsAt(format, i, "DL") != null) { + String day = getDateNames(WEEKDAYS)[DateTimeUtils.getSundayDayOfWeek(dateValue)]; + String month = getDateNames(MONTHS)[monthOfYear - 1]; + output.append(day).append(", ").append(month).append(' ').append(dayOfMonth).append(", "); + StringUtils.appendZeroPadded(output, 4, posYear); + i += 2; + } else if (containsAt(format, i, "DS") != null) { + StringUtils.appendTwoDigits(output, monthOfYear).append('/'); + StringUtils.appendTwoDigits(output, dayOfMonth).append('/'); + StringUtils.appendZeroPadded(output, 4, posYear); + i += 2; + } else if (containsAt(format, i, "TS") != null) { + output.append(h12).append(':'); + StringUtils.appendTwoDigits(output, minute).append(':'); + StringUtils.appendTwoDigits(output, second).append(' ').append(getDateNames(AM_PM)[isAM ? 0 : 1]); + i += 2; + + // Day + + } else if (containsAt(format, i, "DDD") != null) { + output.append(DateTimeUtils.getDayOfYear(dateValue)); + i += 3; + } else if (containsAt(format, i, "DD") != null) { + StringUtils.appendTwoDigits(output, dayOfMonth); + i += 2; + } else if ((cap = containsAt(format, i, "DY")) != null) { + String day = getDateNames(SHORT_WEEKDAYS)[DateTimeUtils.getSundayDayOfWeek(dateValue)]; + output.append(cap.apply(day)); + i += 2; + } else if ((cap = containsAt(format, i, "DAY")) != null) { + String day = getDateNames(WEEKDAYS)[DateTimeUtils.getSundayDayOfWeek(dateValue)]; + if (fillMode) { + day = StringUtils.pad(day, "Wednesday".length(), " ", true); + } + output.append(cap.apply(day)); + i += 3; + } else if (containsAt(format, i, "D") != null) { + output.append(DateTimeUtils.getSundayDayOfWeek(dateValue)); + i += 1; + } else if (containsAt(format, i, "J") != null) { + output.append(DateTimeUtils.absoluteDayFromDateValue(dateValue) - JULIAN_EPOCH); + i += 1; + + // Hours + + } else if (containsAt(format, i, "HH24") != null) { + StringUtils.appendTwoDigits(output, hour); + i += 4; + } else if (containsAt(format, i, "HH12") != null) { + StringUtils.appendTwoDigits(output, h12); + i += 4; + } else if (containsAt(format, i, "HH") != null) { + StringUtils.appendTwoDigits(output, h12); + i += 2; + + // Minutes + + } else if (containsAt(format, i, "MI") != null) { + StringUtils.appendTwoDigits(output, minute); + i += 2; + + // Seconds + + } else if (containsAt(format, i, "SSSSS") != null) { + int seconds = (int) (timeNanos / 1_000_000_000); + output.append(seconds); + i += 5; + } else if (containsAt(format, i, "SS") != null) { + StringUtils.appendTwoDigits(output, second); + i += 2; + + // Fractional seconds + + } else if (containsAt(format, i, "FF1", "FF2", + "FF3", "FF4", "FF5", "FF6", "FF7", "FF8", "FF9") != null) { + int x = format.charAt(i + 2) - '0'; + int ff = (int) (nanos * Math.pow(10, x - 9)); + StringUtils.appendZeroPadded(output, x, ff); + i += 3; + } else if (containsAt(format, i, "FF") != null) { + StringUtils.appendZeroPadded(output, 9, nanos); + i += 2; + + // Time zone + + } else if (containsAt(format, i, "TZR") != null) { + output.append(getTimeZone(session, value, false)); + i += 3; + } else if (containsAt(format, i, "TZD") != null) { + output.append(getTimeZone(session, value, true)); + i += 3; + } else if (containsAt(format, i, "TZH") != null) { + int hours = DateTimeFunction.extractDateTime(session, value, DateTimeFunction.TIMEZONE_HOUR); + output.append( hours < 0 ? '-' : '+'); + StringUtils.appendTwoDigits(output, Math.abs(hours)); + i += 3; + + } else if (containsAt(format, i, "TZM") != null) { + StringUtils.appendTwoDigits(output, + Math.abs(DateTimeFunction.extractDateTime(session, value, DateTimeFunction.TIMEZONE_MINUTE))); + i += 3; + + // Week + } else if (containsAt(format, i, "WW") != null) { + StringUtils.appendTwoDigits(output, (DateTimeUtils.getDayOfYear(dateValue) - 1) / 7 + 1); + i += 2; + } else if (containsAt(format, i, "IW") != null) { + StringUtils.appendTwoDigits(output, DateTimeUtils.getIsoWeekOfYear(dateValue)); + i += 2; + } else if (containsAt(format, i, "W") != null) { + output.append((dayOfMonth - 1) / 7 + 1); + i += 1; + + // Year + + } else if (containsAt(format, i, "Y,YYY") != null) { + output.append(new DecimalFormat("#,###").format(posYear)); + i += 5; + } else if (containsAt(format, i, "SYYYY") != null) { + // Should be <= 0, but Oracle prints negative years with off-by-one difference + if (year < 0) { + output.append('-'); + } + StringUtils.appendZeroPadded(output, 4, posYear); + i += 5; + } else if (containsAt(format, i, "YYYY", "RRRR") != null) { + StringUtils.appendZeroPadded(output, 4, posYear); + i += 4; + } else if (containsAt(format, i, "IYYY") != null) { + StringUtils.appendZeroPadded(output, 4, Math.abs(DateTimeUtils.getIsoWeekYear(dateValue))); + i += 4; + } else if (containsAt(format, i, "YYY") != null) { + StringUtils.appendZeroPadded(output, 3, posYear % 1000); + i += 3; + } else if (containsAt(format, i, "IYY") != null) { + StringUtils.appendZeroPadded(output, 3, Math.abs(DateTimeUtils.getIsoWeekYear(dateValue)) % 1000); + i += 3; + } else if (containsAt(format, i, "YY", "RR") != null) { + StringUtils.appendTwoDigits(output, posYear % 100); + i += 2; + } else if (containsAt(format, i, "IY") != null) { + StringUtils.appendTwoDigits(output, Math.abs(DateTimeUtils.getIsoWeekYear(dateValue)) % 100); + i += 2; + } else if (containsAt(format, i, "Y") != null) { + output.append(posYear % 10); + i += 1; + } else if (containsAt(format, i, "I") != null) { + output.append(Math.abs(DateTimeUtils.getIsoWeekYear(dateValue)) % 10); + i += 1; + + // Month / quarter + + } else if ((cap = containsAt(format, i, "MONTH")) != null) { + String month = getDateNames(MONTHS)[monthOfYear - 1]; + if (fillMode) { + month = StringUtils.pad(month, "September".length(), " ", true); + } + output.append(cap.apply(month)); + i += 5; + } else if ((cap = containsAt(format, i, "MON")) != null) { + String month = getDateNames(SHORT_MONTHS)[monthOfYear - 1]; + output.append(cap.apply(month)); + i += 3; + } else if (containsAt(format, i, "MM") != null) { + StringUtils.appendTwoDigits(output, monthOfYear); + i += 2; + } else if ((cap = containsAt(format, i, "RM")) != null) { + output.append(cap.apply(toRomanNumeral(monthOfYear))); + i += 2; + } else if (containsAt(format, i, "Q") != null) { + int q = 1 + ((monthOfYear - 1) / 3); + output.append(q); + i += 1; + + // Local radix character + + } else if (containsAt(format, i, "X") != null) { + char c = DecimalFormatSymbols.getInstance().getDecimalSeparator(); + output.append(c); + i += 1; + + // Format modifiers + + } else if (containsAt(format, i, "FM") != null) { + fillMode = !fillMode; + i += 2; + } else if (containsAt(format, i, "FX") != null) { + i += 2; + + // Literal text + + } else if (containsAt(format, i, "\"") != null) { + for (i = i + 1; i < format.length(); i++) { + char c = format.charAt(i); + if (c != '"') { + output.append(c); + } else { + i++; + break; + } + } + } else if (format.charAt(i) == '-' + || format.charAt(i) == '/' + || format.charAt(i) == ',' + || format.charAt(i) == '.' + || format.charAt(i) == ';' + || format.charAt(i) == ':' + || format.charAt(i) == ' ') { + output.append(format.charAt(i)); + i += 1; + + // Anything else + + } else { + throw DbException.get(ErrorCode.INVALID_TO_CHAR_FORMAT, format); + } + } + + return output.toString(); + } + + /** + * Returns a capitalization strategy if the specified string contains any of + * the specified substrings at the specified index. The capitalization + * strategy indicates the casing of the substring that was found. If none of + * the specified substrings are found, this method returns null + * . + * + * @param s the string to check + * @param index the index to check at + * @param substrings the substrings to check for within the string + * @return a capitalization strategy if the specified string contains any of + * the specified substrings at the specified index, + * null otherwise + */ + private static Capitalization containsAt(String s, int index, + String... substrings) { + for (String substring : substrings) { + if (index + substring.length() <= s.length()) { + boolean found = true; + Boolean up1 = null; + Boolean up2 = null; + for (int i = 0; i < substring.length(); i++) { + char c1 = s.charAt(index + i); + char c2 = substring.charAt(i); + if (c1 != c2 && Character.toUpperCase(c1) != Character.toUpperCase(c2)) { + found = false; + break; + } else if (Character.isLetter(c1)) { + if (up1 == null) { + up1 = Character.isUpperCase(c1); + } else if (up2 == null) { + up2 = Character.isUpperCase(c1); + } + } + } + if (found) { + return Capitalization.toCapitalization(up1, up2); + } + } + } + return null; + } + + /** Represents a capitalization / casing strategy. */ + public enum Capitalization { + + /** + * All letters are uppercased. + */ + UPPERCASE, + + /** + * All letters are lowercased. + */ + LOWERCASE, + + /** + * The string is capitalized (first letter uppercased, subsequent + * letters lowercased). + */ + CAPITALIZE; + + /** + * Returns the capitalization / casing strategy which should be used + * when the first and second letters have the specified casing. + * + * @param up1 whether or not the first letter is uppercased + * @param up2 whether or not the second letter is uppercased + * @return the capitalization / casing strategy which should be used + * when the first and second letters have the specified casing + */ + static Capitalization toCapitalization(Boolean up1, Boolean up2) { + if (up1 == null) { + return Capitalization.CAPITALIZE; + } else if (up2 == null) { + return up1 ? Capitalization.UPPERCASE : Capitalization.LOWERCASE; + } else if (up1) { + return up2 ? Capitalization.UPPERCASE : Capitalization.CAPITALIZE; + } else { + return Capitalization.LOWERCASE; + } + } + + /** + * Applies this capitalization strategy to the specified string. + * + * @param s the string to apply this strategy to + * @return the resultant string + */ + public String apply(String s) { + if (s == null || s.isEmpty()) { + return s; + } + switch (this) { + case UPPERCASE: + return StringUtils.toUpperEnglish(s); + case LOWERCASE: + return StringUtils.toLowerEnglish(s); + case CAPITALIZE: + return Character.toUpperCase(s.charAt(0)) + + (s.length() > 1 ? StringUtils.toLowerEnglish(s).substring(1) : ""); + default: + throw new IllegalArgumentException( + "Unknown capitalization strategy: " + this); + } + } + } + + public ToCharFunction(Expression arg1, Expression arg2, Expression arg3) { + super(arg2 == null ? new Expression[] { arg1 } + : arg3 == null ? new Expression[] { arg1, arg2 } : new Expression[] { arg1, arg2, arg3 }); + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + switch (v1.getValueType()) { + case Value.TIME: + case Value.DATE: + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + v1 = ValueVarchar.get(toCharDateTime(session, v1, v2 == null ? null : v2.getString(), + v3 == null ? null : v3.getString()), session); + break; + case Value.SMALLINT: + case Value.INTEGER: + case Value.BIGINT: + case Value.NUMERIC: + case Value.DOUBLE: + case Value.REAL: + v1 = ValueVarchar.get(toChar(v1.getBigDecimal(), v2 == null ? null : v2.getString(), + v3 == null ? null : v3.getString()), session); + break; + default: + v1 = ValueVarchar.get(v1.getString(), session); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + type = TypeInfo.TYPE_VARCHAR; + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return "TO_CHAR"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/TrimFunction.java b/h2/src/main/org/h2/expression/function/TrimFunction.java new file mode 100644 index 0000000000..21f56a6d31 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/TrimFunction.java @@ -0,0 +1,86 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueVarchar; + +/** + * A TRIM function. + */ +public final class TrimFunction extends Function1_2 { + + /** + * The LEADING flag. + */ + public static final int LEADING = 1; + + /** + * The TRAILING flag. + */ + public static final int TRAILING = 2; + + private int flags; + + public TrimFunction(Expression from, Expression space, int flags) { + super(from, space); + this.flags = flags; + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2) { + return ValueVarchar.get(StringUtils.trim(v1.getString(), (flags & LEADING) != 0, (flags & TRAILING) != 0, + v2 != null ? v2.getString() : " "), session); + } + + @Override + public Expression optimize(SessionLocal session) { + left = left.optimize(session); + if (right != null) { + right = right.optimize(session); + } + type = TypeInfo.getTypeInfo(Value.VARCHAR, left.getType().getPrecision(), 0, null); + if (left.isConstant() && (right == null || right.isConstant())) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + builder.append(getName()).append('('); + boolean needFrom = false; + switch (flags) { + case LEADING: + builder.append("LEADING "); + needFrom = true; + break; + case TRAILING: + builder.append("TRAILING "); + needFrom = true; + break; + } + if (right != null) { + right.getUnenclosedSQL(builder, sqlFlags); + needFrom = true; + } + if (needFrom) { + builder.append(" FROM "); + } + return left.getUnenclosedSQL(builder, sqlFlags).append(')'); + } + + @Override + public String getName() { + return "TRIM"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/TruncateValueFunction.java b/h2/src/main/org/h2/expression/function/TruncateValueFunction.java new file mode 100644 index 0000000000..4bbedf930d --- /dev/null +++ b/h2/src/main/org/h2/expression/function/TruncateValueFunction.java @@ -0,0 +1,105 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import java.math.BigDecimal; +import java.math.MathContext; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.MathUtils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDecfloat; +import org.h2.value.ValueNumeric; + +/** + * A TRUNCATE_VALUE function. + */ +public final class TruncateValueFunction extends FunctionN { + + public TruncateValueFunction(Expression arg1, Expression arg2, Expression arg3) { + super(new Expression[] { arg1, arg2, arg3 }); + } + + @Override + public Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + long precision = v2.getLong(); + boolean force = v3.getBoolean(); + if (precision <= 0) { + throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Long.toString(precision), "1", + "" + Integer.MAX_VALUE); + } + TypeInfo t = v1.getType(); + int valueType = t.getValueType(); + if (DataType.getDataType(valueType).supportsPrecision) { + if (precision < t.getPrecision()) { + switch (valueType) { + case Value.NUMERIC: { + BigDecimal bd = v1.getBigDecimal().round(new MathContext(MathUtils.convertLongToInt(precision))); + if (bd.scale() < 0) { + bd = bd.setScale(0); + } + return ValueNumeric.get(bd); + } + case Value.DECFLOAT: + return ValueDecfloat + .get(v1.getBigDecimal().round(new MathContext(MathUtils.convertLongToInt(precision)))); + default: + return v1.castTo(TypeInfo.getTypeInfo(valueType, precision, t.getScale(), t.getExtTypeInfo()), + session); + } + } + } else if (force) { + BigDecimal bd; + switch (valueType) { + case Value.TINYINT: + case Value.SMALLINT: + case Value.INTEGER: + bd = BigDecimal.valueOf(v1.getInt()); + break; + case Value.BIGINT: + bd = BigDecimal.valueOf(v1.getLong()); + break; + case Value.REAL: + case Value.DOUBLE: + bd = v1.getBigDecimal(); + break; + default: + return v1; + } + bd = bd.round(new MathContext(MathUtils.convertLongToInt(precision))); + if (valueType == Value.DECFLOAT) { + return ValueDecfloat.get(bd); + } + if (bd.scale() < 0) { + bd = bd.setScale(0); + } + return ValueNumeric.get(bd).convertTo(valueType); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + type = args[0].getType(); + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return "TRUNCATE_VALUE"; + } + +} diff --git a/h2/src/main/org/h2/expression/function/XMLFunction.java b/h2/src/main/org/h2/expression/function/XMLFunction.java new file mode 100644 index 0000000000..fb4491b40f --- /dev/null +++ b/h2/src/main/org/h2/expression/function/XMLFunction.java @@ -0,0 +1,161 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.message.DbException; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * An XML function. + */ +public final class XMLFunction extends FunctionN { + + /** + * XMLATTR() (non-standard). + */ + public static final int XMLATTR = 0; + + /** + * XMLCDATA() (non-standard). + */ + public static final int XMLCDATA = XMLATTR + 1; + + /** + * XMLCOMMENT() (non-standard). + */ + public static final int XMLCOMMENT = XMLCDATA + 1; + + /** + * XMLNODE() (non-standard). + */ + public static final int XMLNODE = XMLCOMMENT + 1; + + /** + * XMLSTARTDOC() (non-standard). + */ + public static final int XMLSTARTDOC = XMLNODE + 1; + + /** + * XMLTEXT() (non-standard). + */ + public static final int XMLTEXT = XMLSTARTDOC + 1; + + private static final String[] NAMES = { // + "XMLATTR", "XMLCDATA", "XMLCOMMENT", "XMLNODE", "XMLSTARTDOC", "XMLTEXT" // + }; + + private final int function; + + public XMLFunction(int function) { + super(new Expression[4]); + this.function = function; + } + + @Override + public Value getValue(SessionLocal session) { + switch (function) { + case XMLNODE: + return xmlNode(session); + case XMLSTARTDOC: + return ValueVarchar.get(StringUtils.xmlStartDoc(), session); + default: + return super.getValue(session); + } + } + + private Value xmlNode(SessionLocal session) { + Value v1 = args[0].getValue(session); + if (v1 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + int length = args.length; + String attr = length >= 2 ? args[1].getValue(session).getString() : null; + String content = length >= 3 ? args[2].getValue(session).getString() : null; + boolean indent; + if (length >= 4) { + Value v4 = args[3].getValue(session); + if (v4 == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + indent = v4.getBoolean(); + } else { + indent = true; + } + return ValueVarchar.get(StringUtils.xmlNode(v1.getString(), attr, content, indent), session); + } + + @Override + protected Value getValue(SessionLocal session, Value v1, Value v2, Value v3) { + switch (function) { + case XMLATTR: + v1 = ValueVarchar.get(StringUtils.xmlAttr(v1.getString(), v2.getString()), session); + break; + case XMLCDATA: + v1 = ValueVarchar.get(StringUtils.xmlCData(v1.getString()), session); + break; + case XMLCOMMENT: + v1 = ValueVarchar.get(StringUtils.xmlComment(v1.getString()), session); + break; + case XMLTEXT: + v1 = ValueVarchar.get(StringUtils.xmlText(v1.getString(), v2 != null && v2.getBoolean()), session); + break; + default: + throw DbException.getInternalError("function=" + function); + } + return v1; + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session, true); + int min, max; + switch (function) { + case XMLATTR: + max = min = 2; + break; + case XMLNODE: + min = 1; + max = 4; + break; + case XMLCDATA: + case XMLCOMMENT: + max = min = 1; + break; + case XMLSTARTDOC: + max = min = 0; + break; + case XMLTEXT: + min = 1; + max = 2; + break; + default: + throw DbException.getInternalError("function=" + function); + } + int len = args.length; + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), min + ".." + max); + } + type = TypeInfo.TYPE_VARCHAR; + if (allConst) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + return this; + } + + @Override + public String getName() { + return NAMES[function]; + } + +} diff --git a/h2/src/main/org/h2/expression/function/package.html b/h2/src/main/org/h2/expression/function/package.html new file mode 100644 index 0000000000..934f342526 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Functions. + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/expression/function/table/ArrayTableFunction.java b/h2/src/main/org/h2/expression/function/table/ArrayTableFunction.java new file mode 100644 index 0000000000..eb5b5c7fa4 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/table/ArrayTableFunction.java @@ -0,0 +1,178 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function.table; + +import java.util.ArrayList; + +import org.h2.api.ErrorCode; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.message.DbException; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.table.Column; +import org.h2.value.Value; +import org.h2.value.ValueCollectionBase; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; + +/** + * A table value function. + */ +public final class ArrayTableFunction extends TableFunction { + + /** + * UNNEST(). + */ + public static final int UNNEST = 0; + + /** + * TABLE() (non-standard). + */ + public static final int TABLE = UNNEST + 1; + + /** + * TABLE_DISTINCT() (non-standard). + */ + public static final int TABLE_DISTINCT = TABLE + 1; + + private Column[] columns; + + private static final String[] NAMES = { // + "UNNEST", "TABLE", "TABLE_DISTINCT" // + }; + + private final int function; + + public ArrayTableFunction(int function) { + super(new Expression[1]); + this.function = function; + } + + @Override + public ResultInterface getValue(SessionLocal session) { + return getTable(session, false); + } + + @Override + public void optimize(SessionLocal session) { + super.optimize(session); + if (args.length < 1) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), ">0"); + } + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + if (function == UNNEST) { + super.getSQL(builder, sqlFlags); + if (args.length < columns.length) { + builder.append(" WITH ORDINALITY"); + } + } else { + builder.append(getName()).append('('); + for (int i = 0; i < args.length; i++) { + if (i > 0) { + builder.append(", "); + } + builder.append(columns[i].getCreateSQL()).append('='); + args[i].getUnenclosedSQL(builder, sqlFlags); + } + builder.append(')'); + } + return builder; + } + + @Override + public ResultInterface getValueTemplate(SessionLocal session) { + return getTable(session, true); + } + + public void setColumns(ArrayList columns) { + this.columns = columns.toArray(new Column[0]); + } + + private ResultInterface getTable(SessionLocal session, boolean onlyColumnList) { + int totalColumns = columns.length; + Expression[] header = new Expression[totalColumns]; + Database db = session.getDatabase(); + for (int i = 0; i < totalColumns; i++) { + Column c = columns[i]; + ExpressionColumn col = new ExpressionColumn(db, c); + header[i] = col; + } + LocalResult result = new LocalResult(session, header, totalColumns, totalColumns); + if (!onlyColumnList && function == TABLE_DISTINCT) { + result.setDistinct(); + } + if (!onlyColumnList) { + int len = totalColumns; + boolean unnest = function == UNNEST, addNumber = false; + if (unnest) { + len = args.length; + if (len < totalColumns) { + addNumber = true; + } + } + Value[][] list = new Value[len][]; + int rows = 0; + for (int i = 0; i < len; i++) { + Value v = args[i].getValue(session); + if (v == ValueNull.INSTANCE) { + list[i] = Value.EMPTY_VALUES; + } else { + int type = v.getValueType(); + if (type != Value.ARRAY && type != Value.ROW) { + v = v.convertToAnyArray(session); + } + Value[] l = ((ValueCollectionBase) v).getList(); + list[i] = l; + rows = Math.max(rows, l.length); + } + } + for (int row = 0; row < rows; row++) { + Value[] r = new Value[totalColumns]; + for (int j = 0; j < len; j++) { + Value[] l = list[j]; + Value v; + if (l.length <= row) { + v = ValueNull.INSTANCE; + } else { + Column c = columns[j]; + v = l[row]; + if (!unnest) { + v = v.convertForAssignTo(c.getType(), session, c); + } + } + r[j] = v; + } + if (addNumber) { + r[len] = ValueInteger.get(row + 1); + } + result.addRow(r); + } + } + result.done(); + return result; + } + + @Override + public String getName() { + return NAMES[function]; + } + + @Override + public boolean isDeterministic() { + return true; + } + + public int getFunctionType() { + return function; + } + +} diff --git a/h2/src/main/org/h2/expression/function/table/CSVReadFunction.java b/h2/src/main/org/h2/expression/function/table/CSVReadFunction.java new file mode 100644 index 0000000000..f03ad1c8b2 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/table/CSVReadFunction.java @@ -0,0 +1,119 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function.table; + +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.function.CSVWriteFunction; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.schema.FunctionAlias.JavaMethod; +import org.h2.tools.Csv; +import org.h2.util.StringUtils; + +/** + * A CSVREAD function. + */ +public final class CSVReadFunction extends TableFunction { + + public CSVReadFunction() { + super(new Expression[4]); + } + + @Override + public ResultInterface getValue(SessionLocal session) { + session.getUser().checkAdmin(); + String fileName = getValue(session, 0); + String columnList = getValue(session, 1); + Csv csv = new Csv(); + String options = getValue(session, 2); + String charset = null; + if (options != null && options.indexOf('=') >= 0) { + charset = csv.setOptions(options); + } else { + charset = options; + String fieldSeparatorRead = getValue(session, 3); + String fieldDelimiter = getValue(session, 4); + String escapeCharacter = getValue(session, 5); + String nullString = getValue(session, 6); + CSVWriteFunction.setCsvDelimiterEscape(csv, fieldSeparatorRead, fieldDelimiter, escapeCharacter); + csv.setNullString(nullString); + } + char fieldSeparator = csv.getFieldSeparatorRead(); + String[] columns = StringUtils.arraySplit(columnList, fieldSeparator, true); + try { + // TODO create result directly + return JavaMethod.resultSetToResult(session, csv.read(fileName, columns, charset), Integer.MAX_VALUE); + } catch (SQLException e) { + throw DbException.convert(e); + } + } + + private String getValue(SessionLocal session, int index) { + return getValue(session, args, index); + } + + @Override + public void optimize(SessionLocal session) { + super.optimize(session); + int len = args.length; + if (len < 1 || len > 7) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), "1..7"); + } + } + + @Override + public ResultInterface getValueTemplate(SessionLocal session) { + session.getUser().checkAdmin(); + String fileName = getValue(session, args, 0); + if (fileName == null) { + throw DbException.get(ErrorCode.PARAMETER_NOT_SET_1, "fileName"); + } + String columnList = getValue(session, args, 1); + Csv csv = new Csv(); + String options = getValue(session, args, 2); + String charset = null; + if (options != null && options.indexOf('=') >= 0) { + charset = csv.setOptions(options); + } else { + charset = options; + String fieldSeparatorRead = getValue(session, args, 3); + String fieldDelimiter = getValue(session, args, 4); + String escapeCharacter = getValue(session, args, 5); + CSVWriteFunction.setCsvDelimiterEscape(csv, fieldSeparatorRead, fieldDelimiter, escapeCharacter); + } + char fieldSeparator = csv.getFieldSeparatorRead(); + String[] columns = StringUtils.arraySplit(columnList, fieldSeparator, true); + ResultInterface result; + try (ResultSet rs = csv.read(fileName, columns, charset)) { + result = JavaMethod.resultSetToResult(session, rs, 0); + } catch (SQLException e) { + throw DbException.convert(e); + } finally { + csv.close(); + } + return result; + } + + private static String getValue(SessionLocal session, Expression[] args, int index) { + return index < args.length ? args[index].getValue(session).getString() : null; + } + + @Override + public String getName() { + return "CSVREAD"; + } + + @Override + public boolean isDeterministic() { + return false; + } + +} diff --git a/h2/src/main/org/h2/expression/function/table/JavaTableFunction.java b/h2/src/main/org/h2/expression/function/table/JavaTableFunction.java new file mode 100644 index 0000000000..dc74497c2f --- /dev/null +++ b/h2/src/main/org/h2/expression/function/table/JavaTableFunction.java @@ -0,0 +1,63 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function.table; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.schema.FunctionAlias; + +/** + * This class wraps a user-defined function. + */ +public final class JavaTableFunction extends TableFunction { + + private final FunctionAlias functionAlias; + private final FunctionAlias.JavaMethod javaMethod; + + public JavaTableFunction(FunctionAlias functionAlias, Expression[] args) { + super(args); + this.functionAlias = functionAlias; + this.javaMethod = functionAlias.findJavaMethod(args); + if (javaMethod.getDataType() != null) { + throw DbException.get(ErrorCode.FUNCTION_MUST_RETURN_RESULT_SET_1, getName()); + } + } + + @Override + public ResultInterface getValue(SessionLocal session) { + return javaMethod.getTableValue(session, args, false); + } + + @Override + public ResultInterface getValueTemplate(SessionLocal session) { + return javaMethod.getTableValue(session, args, true); + } + + @Override + public void optimize(SessionLocal session) { + super.optimize(session); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return Expression.writeExpressions(functionAlias.getSQL(builder, sqlFlags).append('('), args, sqlFlags) + .append(')'); + } + + @Override + public String getName() { + return functionAlias.getName(); + } + + @Override + public boolean isDeterministic() { + return functionAlias.isDeterministic(); + } + +} diff --git a/h2/src/main/org/h2/expression/function/table/LinkSchemaFunction.java b/h2/src/main/org/h2/expression/function/table/LinkSchemaFunction.java new file mode 100644 index 0000000000..2a17b973ef --- /dev/null +++ b/h2/src/main/org/h2/expression/function/table/LinkSchemaFunction.java @@ -0,0 +1,125 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function.table; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.result.SimpleResult; +import org.h2.util.JdbcUtils; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.ValueVarchar; + +/** + * A LINK_SCHEMA function. + */ +public final class LinkSchemaFunction extends TableFunction { + + public LinkSchemaFunction() { + super(new Expression[6]); + } + + @Override + public ResultInterface getValue(SessionLocal session) { + session.getUser().checkAdmin(); + String targetSchema = getValue(session, 0); + String driver = getValue(session, 1); + String url = getValue(session, 2); + String user = getValue(session, 3); + String password = getValue(session, 4); + String sourceSchema = getValue(session, 5); + if (targetSchema == null || driver == null || url == null || user == null || password == null + || sourceSchema == null) { + return getValueTemplate(session); + } + Connection conn = session.createConnection(false); + Connection c2 = null; + Statement stat = null; + ResultSet rs = null; + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + try { + c2 = JdbcUtils.getConnection(driver, url, user, password); + stat = conn.createStatement(); + stat.execute(StringUtils.quoteIdentifier(new StringBuilder("CREATE SCHEMA IF NOT EXISTS "), targetSchema) + .toString()); + // Workaround for PostgreSQL to avoid index names + if (url.startsWith("jdbc:postgresql:")) { + rs = c2.getMetaData().getTables(null, sourceSchema, null, + new String[] { "TABLE", "LINKED TABLE", "VIEW", "EXTERNAL" }); + } else { + rs = c2.getMetaData().getTables(null, sourceSchema, null, null); + } + while (rs.next()) { + String table = rs.getString("TABLE_NAME"); + StringBuilder buff = new StringBuilder(); + buff.append("DROP TABLE IF EXISTS "); + StringUtils.quoteIdentifier(buff, targetSchema).append('.'); + StringUtils.quoteIdentifier(buff, table); + stat.execute(buff.toString()); + buff.setLength(0); + buff.append("CREATE LINKED TABLE "); + StringUtils.quoteIdentifier(buff, targetSchema).append('.'); + StringUtils.quoteIdentifier(buff, table).append('('); + StringUtils.quoteStringSQL(buff, driver).append(", "); + StringUtils.quoteStringSQL(buff, url).append(", "); + StringUtils.quoteStringSQL(buff, user).append(", "); + StringUtils.quoteStringSQL(buff, password).append(", "); + StringUtils.quoteStringSQL(buff, sourceSchema).append(", "); + StringUtils.quoteStringSQL(buff, table).append(')'); + stat.execute(buff.toString()); + result.addRow(ValueVarchar.get(table, session)); + } + } catch (SQLException e) { + result.close(); + throw DbException.convert(e); + } finally { + JdbcUtils.closeSilently(rs); + JdbcUtils.closeSilently(c2); + JdbcUtils.closeSilently(stat); + } + return result; + } + + private String getValue(SessionLocal session, int index) { + return args[index].getValue(session).getString(); + } + + @Override + public void optimize(SessionLocal session) { + super.optimize(session); + int len = args.length; + if (len != 6) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, getName(), "6"); + } + } + + @Override + public ResultInterface getValueTemplate(SessionLocal session) { + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + return result; + } + + @Override + public String getName() { + return "LINK_SCHEMA"; + } + + @Override + public boolean isDeterministic() { + return false; + } + +} diff --git a/h2/src/main/org/h2/expression/function/table/TableFunction.java b/h2/src/main/org/h2/expression/function/table/TableFunction.java new file mode 100644 index 0000000000..729421f883 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/table/TableFunction.java @@ -0,0 +1,90 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.expression.function.table; + +import java.util.Arrays; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionWithVariableParameters; +import org.h2.expression.function.NamedExpression; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.util.HasSQL; + +/** + * A table value function. + */ +public abstract class TableFunction implements HasSQL, NamedExpression, ExpressionWithVariableParameters { + + protected Expression[] args; + + private int argsCount; + + protected TableFunction(Expression[] args) { + this.args = args; + } + + @Override + public void addParameter(Expression param) { + int capacity = args.length; + if (argsCount >= capacity) { + args = Arrays.copyOf(args, capacity * 2); + } + args[argsCount++] = param; + } + + @Override + public void doneWithParameters() throws DbException { + if (args.length != argsCount) { + args = Arrays.copyOf(args, argsCount); + } + } + + /** + * Get a result with. + * + * @param session + * the session + * @return the result + */ + public abstract ResultInterface getValue(SessionLocal session); + + /** + * Get an empty result with the column names set. + * + * @param session + * the session + * @return the empty result + */ + public abstract ResultInterface getValueTemplate(SessionLocal session); + + /** + * Try to optimize this table function + * + * @param session + * the session + */ + public void optimize(SessionLocal session) { + for (int i = 0, l = args.length; i < l; i++) { + args[i] = args[i].optimize(session); + } + } + + /** + * Whether the function always returns the same result for the same + * parameters. + * + * @return true if it does + */ + public abstract boolean isDeterministic(); + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + return Expression.writeExpressions(builder.append(getName()).append('('), args, sqlFlags).append(')'); + } + +} diff --git a/h2/src/main/org/h2/expression/function/table/package.html b/h2/src/main/org/h2/expression/function/table/package.html new file mode 100644 index 0000000000..8dd9d74c78 --- /dev/null +++ b/h2/src/main/org/h2/expression/function/table/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Table value functions. + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/expression/package.html b/h2/src/main/org/h2/expression/package.html index b619c969e8..7bf9c9620d 100644 --- a/h2/src/main/org/h2/expression/package.html +++ b/h2/src/main/org/h2/expression/package.html @@ -1,7 +1,7 @@ @@ -9,6 +9,6 @@ Javadoc package documentation

    -Expressions include mathematical operations, conditions, simple values, and functions. +Expressions include mathematical operations, simple values, and others.

    \ No newline at end of file diff --git a/h2/src/main/org/h2/fulltext/FullText.java b/h2/src/main/org/h2/fulltext/FullText.java index 58a747007e..8d7dd71b34 100644 --- a/h2/src/main/org/h2/fulltext/FullText.java +++ b/h2/src/main/org/h2/fulltext/FullText.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.fulltext; @@ -18,26 +18,26 @@ import java.sql.Types; import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; +import java.util.Collections; import java.util.HashSet; -import java.util.Iterator; +import java.util.Set; import java.util.StringTokenizer; import java.util.UUID; + import org.h2.api.Trigger; import org.h2.command.Parser; -import org.h2.engine.Session; -import org.h2.expression.Comparison; -import org.h2.expression.ConditionAndOr; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ValueExpression; +import org.h2.expression.condition.Comparison; +import org.h2.expression.condition.ConditionAndOr; import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; import org.h2.tools.SimpleResultSet; import org.h2.util.IOUtils; -import org.h2.util.New; -import org.h2.util.StatementBuilder; import org.h2.util.StringUtils; +import org.h2.util.Utils; /** * This class implements the native full text search. @@ -75,7 +75,7 @@ public class FullText { private static final String SELECT_MAP_BY_WORD_ID = "SELECT ROWID FROM " + SCHEMA + ".MAP WHERE WORDID=?"; private static final String SELECT_ROW_BY_ID = - "SELECT KEY, INDEXID FROM " + SCHEMA + ".ROWS WHERE ID=?"; + "SELECT `KEY`, INDEXID FROM " + SCHEMA + ".ROWS WHERE ID=?"; /** * The column name of the result set returned by the search method. @@ -103,38 +103,34 @@ public class FullText { *
    * * @param conn the connection + * @throws SQLException on failure */ public static void init(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("CREATE SCHEMA IF NOT EXISTS " + SCHEMA); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + - ".INDEXES(ID INT AUTO_INCREMENT PRIMARY KEY, " + - "SCHEMA VARCHAR, TABLE VARCHAR, COLUMNS VARCHAR, " + - "UNIQUE(SCHEMA, TABLE))"); + ".INDEXES(ID INT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, " + + "SCHEMA VARCHAR, `TABLE` VARCHAR, COLUMNS VARCHAR, " + + "UNIQUE(SCHEMA, `TABLE`))"); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + - ".WORDS(ID INT AUTO_INCREMENT PRIMARY KEY, " + + ".WORDS(ID INT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, " + "NAME VARCHAR, UNIQUE(NAME))"); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + - ".ROWS(ID IDENTITY, HASH INT, INDEXID INT, " + - "KEY VARCHAR, UNIQUE(HASH, INDEXID, KEY))"); + ".ROWS(ID BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, HASH INT, INDEXID INT, " + + "`KEY` VARCHAR, UNIQUE(HASH, INDEXID, `KEY`))"); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + ".MAP(ROWID INT, WORDID INT, PRIMARY KEY(WORDID, ROWID))"); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + ".IGNORELIST(LIST VARCHAR)"); stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + - ".SETTINGS(KEY VARCHAR PRIMARY KEY, VALUE VARCHAR)"); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_CREATE_INDEX FOR \"" + - FullText.class.getName() + ".createIndex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_DROP_INDEX FOR \"" + - FullText.class.getName() + ".dropIndex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_SEARCH FOR \"" + - FullText.class.getName() + ".search\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_SEARCH_DATA FOR \"" + - FullText.class.getName() + ".searchData\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_REINDEX FOR \"" + - FullText.class.getName() + ".reindex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_DROP_ALL FOR \"" + - FullText.class.getName() + ".dropAll\""); + ".SETTINGS(`KEY` VARCHAR PRIMARY KEY, `VALUE` VARCHAR)"); + String className = FullText.class.getName(); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_CREATE_INDEX FOR '" + className + ".createIndex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_DROP_INDEX FOR '" + className + ".dropIndex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_SEARCH FOR '" + className + ".search'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_SEARCH_DATA FOR '" + className + ".searchData'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_REINDEX FOR '" + className + ".reindex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_DROP_ALL FOR '" + className + ".dropAll'"); FullTextSettings setting = FullTextSettings.getInstance(conn); ResultSet rs = stat.executeQuery("SELECT * FROM " + SCHEMA + ".IGNORELIST"); @@ -151,13 +147,12 @@ public static void init(Connection conn) throws SQLException { } } rs = stat.executeQuery("SELECT * FROM " + SCHEMA + ".WORDS"); - HashMap map = setting.getWordList(); while (rs.next()) { String word = rs.getString("NAME"); int id = rs.getInt("ID"); word = setting.convertWord(word); if (word != null) { - map.put(word, id); + setting.addWord(word, id); } } setting.setInitialized(true); @@ -171,12 +166,13 @@ public static void init(Connection conn) throws SQLException { * @param schema the schema name of the table (case sensitive) * @param table the table name (case sensitive) * @param columnList the column list (null for all columns) + * @throws SQLException on failure */ public static void createIndex(Connection conn, String schema, String table, String columnList) throws SQLException { init(conn); PreparedStatement prep = conn.prepareStatement("INSERT INTO " + SCHEMA - + ".INDEXES(SCHEMA, TABLE, COLUMNS) VALUES(?, ?, ?)"); + + ".INDEXES(SCHEMA, `TABLE`, COLUMNS) VALUES(?, ?, ?)"); prep.setString(1, schema); prep.setString(2, table); prep.setString(3, columnList); @@ -190,12 +186,13 @@ public static void createIndex(Connection conn, String schema, * usually not needed, as the index is kept up-to-date automatically. * * @param conn the connection + * @throws SQLException on failure */ public static void reindex(Connection conn) throws SQLException { init(conn); removeAllTriggers(conn, TRIGGER_PREFIX); FullTextSettings setting = FullTextSettings.getInstance(conn); - setting.getWordList().clear(); + setting.clearWordList(); Statement stat = conn.createStatement(); stat.execute("TRUNCATE TABLE " + SCHEMA + ".WORDS"); stat.execute("TRUNCATE TABLE " + SCHEMA + ".ROWS"); @@ -216,12 +213,13 @@ public static void reindex(Connection conn) throws SQLException { * @param conn the connection * @param schema the schema name of the table (case sensitive) * @param table the table name (case sensitive) + * @throws SQLException on failure */ public static void dropIndex(Connection conn, String schema, String table) throws SQLException { init(conn); PreparedStatement prep = conn.prepareStatement("SELECT ID FROM " + SCHEMA - + ".INDEXES WHERE SCHEMA=? AND TABLE=?"); + + ".INDEXES WHERE SCHEMA=? AND `TABLE`=?"); prep.setString(1, schema); prep.setString(2, table); ResultSet rs = prep.executeQuery(); @@ -243,9 +241,9 @@ public static void dropIndex(Connection conn, String schema, String table) break; } } - prep = conn.prepareStatement("DELETE FROM " + SCHEMA + ".MAP M " + + prep = conn.prepareStatement("DELETE FROM " + SCHEMA + ".MAP " + "WHERE NOT EXISTS (SELECT * FROM " + SCHEMA + - ".ROWS R WHERE R.ID=M.ROWID) AND ROWID<10000"); + ".ROWS R WHERE R.ID=ROWID) AND ROWID<10000"); while (true) { int deleted = prep.executeUpdate(); if (deleted == 0) { @@ -258,16 +256,17 @@ public static void dropIndex(Connection conn, String schema, String table) * Drops all full text indexes from the database. * * @param conn the connection + * @throws SQLException on failure */ public static void dropAll(Connection conn) throws SQLException { init(conn); Statement stat = conn.createStatement(); - stat.execute("DROP SCHEMA IF EXISTS " + SCHEMA); + stat.execute("DROP SCHEMA IF EXISTS " + SCHEMA + " CASCADE"); removeAllTriggers(conn, TRIGGER_PREFIX); FullTextSettings setting = FullTextSettings.getInstance(conn); setting.removeAllIndexes(); - setting.getIgnoreList().clear(); - setting.getWordList().clear(); + setting.clearIgnored(); + setting.clearWordList(); } /** @@ -285,6 +284,7 @@ public static void dropAll(Connection conn) throws SQLException { * @param limit the maximum number of rows or 0 for no limit * @param offset the offset or 0 for no offset * @return the result set + * @throws SQLException on failure */ public static ResultSet search(Connection conn, String text, int limit, int offset) throws SQLException { @@ -316,6 +316,7 @@ public static ResultSet search(Connection conn, String text, int limit, * @param limit the maximum number of rows or 0 for no limit * @param offset the offset or 0 for no offset * @return the result set + * @throws SQLException on failure */ public static ResultSet searchData(Connection conn, String text, int limit, int offset) throws SQLException { @@ -334,6 +335,7 @@ public static ResultSet searchData(Connection conn, String text, int limit, * * @param conn the connection * @param commaSeparatedList the list + * @throws SQLException on failure */ public static void setIgnoreList(Connection conn, String commaSeparatedList) throws SQLException { @@ -359,6 +361,7 @@ public static void setIgnoreList(Connection conn, String commaSeparatedList) * * @param conn the connection * @param whitespaceChars the list of characters + * @throws SQLException on failure */ public static void setWhitespaceChars(Connection conn, String whitespaceChars) throws SQLException { @@ -383,6 +386,7 @@ public static void setWhitespaceChars(Connection conn, * @param data the object * @param type the SQL type * @return the string + * @throws SQLException on failure */ protected static String asString(Object data, int type) throws SQLException { if (data == null) { @@ -446,8 +450,8 @@ protected static SimpleResultSet createResultSet(boolean data) { if (data) { result.addColumn(FullText.FIELD_SCHEMA, Types.VARCHAR, 0, 0); result.addColumn(FullText.FIELD_TABLE, Types.VARCHAR, 0, 0); - result.addColumn(FullText.FIELD_COLUMNS, Types.ARRAY, 0, 0); - result.addColumn(FullText.FIELD_KEYS, Types.ARRAY, 0, 0); + result.addColumn(FullText.FIELD_COLUMNS, Types.ARRAY, "VARCHAR ARRAY", 0, 0); + result.addColumn(FullText.FIELD_KEYS, Types.ARRAY, "VARCHAR ARRAY", 0, 0); } else { result.addColumn(FullText.FIELD_QUERY, Types.VARCHAR, 0, 0); } @@ -462,19 +466,17 @@ protected static SimpleResultSet createResultSet(boolean data) { * @param key the primary key condition as a string * @return an array containing the column name list and the data list */ - protected static Object[][] parseKey(Connection conn, String key) { - ArrayList columns = New.arrayList(); - ArrayList data = New.arrayList(); + protected static String[][] parseKey(Connection conn, String key) { + ArrayList columns = Utils.newSmallArrayList(); + ArrayList data = Utils.newSmallArrayList(); JdbcConnection c = (JdbcConnection) conn; - Session session = (Session) c.getSession(); + SessionLocal session = (SessionLocal) c.getSession(); Parser p = new Parser(session); Expression expr = p.parseExpression(key); - addColumnData(columns, data, expr); - Object[] col = new Object[columns.size()]; - columns.toArray(col); - Object[] dat = new Object[columns.size()]; - data.toArray(dat); - Object[][] columnData = { col, dat }; + addColumnData(session, columns, data, expr); + String[] col = columns.toArray(new String[0]); + String[] dat = data.toArray(new String[0]); + String[][] columnData = { col, dat }; return columnData; } @@ -485,6 +487,7 @@ protected static Object[][] parseKey(Connection conn, String key) { * @param data the object * @param type the SQL type * @return the SQL String + * @throws SQLException on failure */ protected static String quoteSQL(Object data, int type) throws SQLException { if (data == null) { @@ -514,9 +517,12 @@ protected static String quoteSQL(Object data, int type) throws SQLException { case Types.LONGVARBINARY: case Types.BINARY: if (data instanceof UUID) { - return "'" + data.toString() + "'"; + return "'" + data + "'"; } - return "'" + StringUtils.convertBytesToHex((byte[]) data) + "'"; + byte[] bytes = (byte[]) data; + StringBuilder builder = new StringBuilder(bytes.length * 2 + 2).append('\''); + StringUtils.convertBytesToHex(builder, bytes).append('\''); + return builder.toString(); case Types.CLOB: case Types.JAVA_OBJECT: case Types.OTHER: @@ -538,11 +544,13 @@ protected static String quoteSQL(Object data, int type) throws SQLException { * * @param conn the database connection * @param prefix the prefix + * @throws SQLException on failure */ protected static void removeAllTriggers(Connection conn, String prefix) throws SQLException { Statement stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM INFORMATION_SCHEMA.TRIGGERS"); + ResultSet rs = stat.executeQuery( + "SELECT DISTINCT TRIGGER_SCHEMA, TRIGGER_NAME FROM INFORMATION_SCHEMA.TRIGGERS"); Statement stat2 = conn.createStatement(); while (rs.next()) { String schema = rs.getString("TRIGGER_SCHEMA"); @@ -561,6 +569,7 @@ protected static void removeAllTriggers(Connection conn, String prefix) * @param index the column indices (will be modified) * @param keys the key list * @param columns the column list + * @throws SQLException on failure */ protected static void setColumns(int[] index, ArrayList keys, ArrayList columns) throws SQLException { @@ -590,6 +599,7 @@ protected static void setColumns(int[] index, ArrayList keys, * @param offset the offset * @param data whether the raw data should be returned * @return the result set + * @throws SQLException on failure */ protected static ResultSet search(Connection conn, String text, int limit, int offset, boolean data) throws SQLException { @@ -598,28 +608,27 @@ protected static ResultSet search(Connection conn, String text, int limit, // this is just to query the result set columns return result; } - if (text == null || text.trim().length() == 0) { + if (text == null || StringUtils.isWhitespaceOrEmpty(text)) { return result; } FullTextSettings setting = FullTextSettings.getInstance(conn); if (!setting.isInitialized()) { init(conn); } - HashSet words = New.hashSet(); + Set words = new HashSet<>(); addWords(setting, words, text); - HashSet rIds = null, lastRowIds = null; - HashMap allWords = setting.getWordList(); + Set rIds = null, lastRowIds; PreparedStatement prepSelectMapByWordId = setting.prepare(conn, SELECT_MAP_BY_WORD_ID); for (String word : words) { lastRowIds = rIds; - rIds = New.hashSet(); - Integer wId = allWords.get(word); + rIds = new HashSet<>(); + Integer wId = setting.getWordId(word); if (wId == null) { continue; } - prepSelectMapByWordId.setInt(1, wId.intValue()); + prepSelectMapByWordId.setInt(1, wId); ResultSet rs = prepSelectMapByWordId.executeQuery(); while (rs.next()) { Integer rId = rs.getInt(1); @@ -628,7 +637,7 @@ protected static ResultSet search(Connection conn, String text, int limit, } } } - if (rIds == null || rIds.size() == 0) { + if (rIds == null || rIds.isEmpty()) { return result; } PreparedStatement prepSelectRowById = setting.prepare(conn, SELECT_ROW_BY_ID); @@ -646,7 +655,7 @@ protected static ResultSet search(Connection conn, String text, int limit, int indexId = rs.getInt(2); IndexInfo index = setting.getIndexInfo(indexId); if (data) { - Object[][] columnData = parseKey(conn, key); + String[][] columnData = parseKey(conn, key); result.addRow( index.schema, index.table, @@ -668,23 +677,21 @@ protected static ResultSet search(Connection conn, String text, int limit, return result; } - private static void addColumnData(ArrayList columns, - ArrayList data, Expression expr) { + private static void addColumnData(SessionLocal session, ArrayList columns, ArrayList data, + Expression expr) { if (expr instanceof ConditionAndOr) { ConditionAndOr and = (ConditionAndOr) expr; - Expression left = and.getExpression(true); - Expression right = and.getExpression(false); - addColumnData(columns, data, left); - addColumnData(columns, data, right); + addColumnData(session, columns, data, and.getSubexpression(0)); + addColumnData(session, columns, data, and.getSubexpression(1)); } else { Comparison comp = (Comparison) expr; - ExpressionColumn ec = (ExpressionColumn) comp.getExpression(true); - ValueExpression ev = (ValueExpression) comp.getExpression(false); - String columnName = ec.getColumnName(); + ExpressionColumn ec = (ExpressionColumn) comp.getSubexpression(0); + String columnName = ec.getColumnName(session, -1); columns.add(columnName); - if (ev == null) { + if (expr.getSubexpressionCount() == 1) { data.add(null); } else { + ValueExpression ev = (ValueExpression) comp.getSubexpression(1); data.add(ev.getValue(null).getString()); } } @@ -698,7 +705,7 @@ private static void addColumnData(ArrayList columns, * @param reader the reader */ protected static void addWords(FullTextSettings setting, - HashSet set, Reader reader) { + Set set, Reader reader) { StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.resetSyntax(); tokenizer.wordChars(' ' + 1, 255); @@ -732,7 +739,7 @@ protected static void addWords(FullTextSettings setting, * @param text the text */ protected static void addWords(FullTextSettings setting, - HashSet set, String text) { + Set set, String text) { String whitespaceChars = setting.getWhitespaceChars(); StringTokenizer tokenizer = new StringTokenizer(text, whitespaceChars); while (tokenizer.hasMoreTokens()) { @@ -750,31 +757,36 @@ protected static void addWords(FullTextSettings setting, * @param conn the database connection * @param schema the schema name * @param table the table name + * @throws SQLException on failure */ - protected static void createTrigger(Connection conn, String schema, + private static void createTrigger(Connection conn, String schema, String table) throws SQLException { createOrDropTrigger(conn, schema, table, true); } private static void createOrDropTrigger(Connection conn, String schema, String table, boolean create) throws SQLException { - Statement stat = conn.createStatement(); - String trigger = StringUtils.quoteIdentifier(schema) + "." - + StringUtils.quoteIdentifier(TRIGGER_PREFIX + table); - stat.execute("DROP TRIGGER IF EXISTS " + trigger); - if (create) { - StringBuilder buff = new StringBuilder("CREATE TRIGGER IF NOT EXISTS "); - // needs to be called on rollback as well, because we use the init - // connection do to changes in the index (not the user connection) - buff.append(trigger). - append(" AFTER INSERT, UPDATE, DELETE, ROLLBACK ON "). - append(StringUtils.quoteIdentifier(schema)). - append('.'). - append(StringUtils.quoteIdentifier(table)). - append(" FOR EACH ROW CALL \""). - append(FullText.FullTextTrigger.class.getName()). - append('\"'); - stat.execute(buff.toString()); + try (Statement stat = conn.createStatement()) { + String trigger = StringUtils.quoteIdentifier(schema) + "." + + StringUtils.quoteIdentifier(TRIGGER_PREFIX + table); + stat.execute("DROP TRIGGER IF EXISTS " + trigger); + if (create) { + StringBuilder buff = new StringBuilder( + "CREATE TRIGGER IF NOT EXISTS "); + // unless multithread, trigger needs to be called on rollback as well, + // because we use the init connection do to changes in the index + // (not the user connection) + buff.append(trigger). + append(" AFTER INSERT, UPDATE, DELETE"); + buff.append(" ON "); + StringUtils.quoteIdentifier(buff, schema). + append('.'); + StringUtils.quoteIdentifier(buff, table). + append(" FOR EACH ROW CALL \""). + append(FullText.FullTextTrigger.class.getName()). + append('"'); + stat.execute(buff.toString()); + } } } @@ -784,13 +796,14 @@ private static void createOrDropTrigger(Connection conn, * @param conn the database connection * @param schema the schema name * @param table the table name + * @throws SQLException on failure */ - protected static void indexExistingRows(Connection conn, String schema, + private static void indexExistingRows(Connection conn, String schema, String table) throws SQLException { FullText.FullTextTrigger existing = new FullText.FullTextTrigger(); existing.init(conn, schema, null, table, false, Trigger.INSERT); - String sql = "SELECT * FROM " + StringUtils.quoteIdentifier(schema) + - "." + StringUtils.quoteIdentifier(table); + String sql = "SELECT * FROM " + StringUtils.quoteIdentifier(schema) + + "." + StringUtils.quoteIdentifier(table); ResultSet rs = conn.createStatement().executeQuery(sql); int columnCount = rs.getMetaData().getColumnCount(); while (rs.next()) { @@ -823,13 +836,7 @@ private static String quoteString(String data) { private static void setIgnoreList(FullTextSettings setting, String commaSeparatedList) { String[] list = StringUtils.arraySplit(commaSeparatedList, ',', true); - HashSet set = setting.getIgnoreList(); - for (String word : list) { - String converted = setting.convertWord(word); - if (converted != null) { - set.add(converted); - } - } + setting.addIgnored(Arrays.asList(list)); } /** @@ -860,17 +867,30 @@ protected static boolean hasChanged(Object[] oldRow, Object[] newRow, /** * Trigger updates the index when a inserting, updating, or deleting a row. */ - public static class FullTextTrigger implements Trigger { + public static final class FullTextTrigger implements Trigger { + private FullTextSettings setting; + private IndexInfo index; + private int[] columnTypes; + + private static final int INSERT_WORD = 0; + private static final int INSERT_ROW = 1; + private static final int INSERT_MAP = 2; + private static final int DELETE_ROW = 3; + private static final int DELETE_MAP = 4; + private static final int SELECT_ROW = 5; - protected FullTextSettings setting; - protected IndexInfo index; - protected int[] columnTypes; - protected PreparedStatement prepInsertWord, prepInsertRow, prepInsertMap; - protected PreparedStatement prepDeleteRow, prepDeleteMap; - protected PreparedStatement prepSelectRow; + private static final String[] SQL = { + "MERGE INTO " + SCHEMA + ".WORDS(NAME) KEY(NAME) VALUES(?)", + "INSERT INTO " + SCHEMA + ".ROWS(HASH, INDEXID, `KEY`) VALUES(?, ?, ?)", + "INSERT INTO " + SCHEMA + ".MAP(ROWID, WORDID) VALUES(?, ?)", + "DELETE FROM " + SCHEMA + ".ROWS WHERE HASH=? AND INDEXID=? AND `KEY`=?", + "DELETE FROM " + SCHEMA + ".MAP WHERE ROWID=? AND WORDID=?", + "SELECT ID FROM " + SCHEMA + ".ROWS WHERE HASH=? AND INDEXID=? AND `KEY`=?" + }; /** * INTERNAL + * @see Trigger#init(Connection, String, String, String, boolean, int) */ @Override public void init(Connection conn, String schemaName, String triggerName, @@ -879,13 +899,13 @@ public void init(Connection conn, String schemaName, String triggerName, if (!setting.isInitialized()) { FullText.init(conn); } - ArrayList keyList = New.arrayList(); + ArrayList keyList = Utils.newSmallArrayList(); DatabaseMetaData meta = conn.getMetaData(); ResultSet rs = meta.getColumns(null, StringUtils.escapeMetaDataPattern(schemaName), StringUtils.escapeMetaDataPattern(tableName), null); - ArrayList columnList = New.arrayList(); + ArrayList columnList = Utils.newSmallArrayList(); while (rs.next()) { columnList.add(rs.getString("COLUMN_NAME")); } @@ -893,8 +913,7 @@ public void init(Connection conn, String schemaName, String triggerName, index = new IndexInfo(); index.schema = schemaName; index.table = tableName; - index.columns = new String[columnList.size()]; - columnList.toArray(index.columns); + index.columns = columnList.toArray(new String[0]); rs = meta.getColumns(null, StringUtils.escapeMetaDataPattern(schemaName), StringUtils.escapeMetaDataPattern(tableName), @@ -902,7 +921,7 @@ public void init(Connection conn, String schemaName, String triggerName, for (int i = 0; rs.next(); i++) { columnTypes[i] = rs.getInt("DATA_TYPE"); } - if (keyList.size() == 0) { + if (keyList.isEmpty()) { rs = meta.getPrimaryKeys(null, StringUtils.escapeMetaDataPattern(schemaName), tableName); @@ -910,12 +929,13 @@ public void init(Connection conn, String schemaName, String triggerName, keyList.add(rs.getString("COLUMN_NAME")); } } - if (keyList.size() == 0) { + if (keyList.isEmpty()) { throw throwException("No primary key for table " + tableName); } - ArrayList indexList = New.arrayList(); + ArrayList indexList = Utils.newSmallArrayList(); PreparedStatement prep = conn.prepareStatement( - "SELECT ID, COLUMNS FROM " + SCHEMA + ".INDEXES WHERE SCHEMA=? AND TABLE=?"); + "SELECT ID, COLUMNS FROM " + SCHEMA + ".INDEXES" + + " WHERE SCHEMA=? AND `TABLE`=?"); prep.setString(1, schemaName); prep.setString(2, tableName); rs = prep.executeQuery(); @@ -923,12 +943,10 @@ public void init(Connection conn, String schemaName, String triggerName, index.id = rs.getInt(1); String columns = rs.getString(2); if (columns != null) { - for (String s : StringUtils.arraySplit(columns, ',', true)) { - indexList.add(s); - } + Collections.addAll(indexList, StringUtils.arraySplit(columns, ',', true)); } } - if (indexList.size() == 0) { + if (indexList.isEmpty()) { indexList.addAll(columnList); } index.keys = new int[keyList.size()]; @@ -936,22 +954,11 @@ public void init(Connection conn, String schemaName, String triggerName, index.indexColumns = new int[indexList.size()]; setColumns(index.indexColumns, indexList, columnList); setting.addIndexInfo(index); - prepInsertWord = conn.prepareStatement( - "INSERT INTO " + SCHEMA + ".WORDS(NAME) VALUES(?)"); - prepInsertRow = conn.prepareStatement( - "INSERT INTO " + SCHEMA + ".ROWS(HASH, INDEXID, KEY) VALUES(?, ?, ?)"); - prepInsertMap = conn.prepareStatement( - "INSERT INTO " + SCHEMA + ".MAP(ROWID, WORDID) VALUES(?, ?)"); - prepDeleteRow = conn.prepareStatement( - "DELETE FROM " + SCHEMA + ".ROWS WHERE HASH=? AND INDEXID=? AND KEY=?"); - prepDeleteMap = conn.prepareStatement( - "DELETE FROM " + SCHEMA + ".MAP WHERE ROWID=? AND WORDID=?"); - prepSelectRow = conn.prepareStatement( - "SELECT ID FROM " + SCHEMA + ".ROWS WHERE HASH=? AND INDEXID=? AND KEY=?"); } /** * INTERNAL + * @see Trigger#fire(Connection, Object[], Object[]) */ @Override public void fire(Connection conn, Object[] oldRow, Object[] newRow) @@ -960,16 +967,16 @@ public void fire(Connection conn, Object[] oldRow, Object[] newRow) if (newRow != null) { // update if (hasChanged(oldRow, newRow, index.indexColumns)) { - delete(oldRow); - insert(newRow); + delete(conn, oldRow); + insert(conn, newRow); } } else { // delete - delete(oldRow); + delete(conn, oldRow); } } else if (newRow != null) { // insert - insert(newRow); + insert(conn, newRow); } } @@ -992,55 +999,81 @@ public void remove() { /** * Add a row to the index. * + * @param conn to use * @param row the row + * @throws SQLException on failure */ - protected void insert(Object[] row) throws SQLException { - String key = getKey(row); - int hash = key.hashCode(); - prepInsertRow.setInt(1, hash); - prepInsertRow.setInt(2, index.id); - prepInsertRow.setString(3, key); - prepInsertRow.execute(); - ResultSet rs = prepInsertRow.getGeneratedKeys(); - rs.next(); - int rowId = rs.getInt(1); - prepInsertMap.setInt(1, rowId); - int[] wordIds = getWordIds(row); - for (int id : wordIds) { - prepInsertMap.setInt(2, id); - prepInsertMap.execute(); + private void insert(Connection conn, Object[] row) throws SQLException { + PreparedStatement prepInsertRow = null; + PreparedStatement prepInsertMap = null; + try { + String key = getKey(row); + int hash = key.hashCode(); + prepInsertRow = getStatement(conn, INSERT_ROW); + prepInsertRow.setInt(1, hash); + prepInsertRow.setInt(2, index.id); + prepInsertRow.setString(3, key); + prepInsertRow.execute(); + ResultSet rs = prepInsertRow.getGeneratedKeys(); + rs.next(); + int rowId = rs.getInt(1); + + prepInsertMap = getStatement(conn, INSERT_MAP); + prepInsertMap.setInt(1, rowId); + int[] wordIds = getWordIds(conn, row); + for (int id : wordIds) { + prepInsertMap.setInt(2, id); + prepInsertMap.execute(); + } + } finally { + IOUtils.closeSilently(prepInsertRow); + IOUtils.closeSilently(prepInsertMap); } } /** * Delete a row from the index. * + * @param conn to use * @param row the row + * @throws SQLException on failure */ - protected void delete(Object[] row) throws SQLException { - String key = getKey(row); - int hash = key.hashCode(); - prepSelectRow.setInt(1, hash); - prepSelectRow.setInt(2, index.id); - prepSelectRow.setString(3, key); - ResultSet rs = prepSelectRow.executeQuery(); - if (rs.next()) { - int rowId = rs.getInt(1); - prepDeleteMap.setInt(1, rowId); - int[] wordIds = getWordIds(row); - for (int id : wordIds) { - prepDeleteMap.setInt(2, id); - prepDeleteMap.executeUpdate(); + private void delete(Connection conn, Object[] row) throws SQLException { + PreparedStatement prepSelectRow = null; + PreparedStatement prepDeleteMap = null; + PreparedStatement prepDeleteRow = null; + try { + String key = getKey(row); + int hash = key.hashCode(); + prepSelectRow = getStatement(conn, SELECT_ROW); + prepSelectRow.setInt(1, hash); + prepSelectRow.setInt(2, index.id); + prepSelectRow.setString(3, key); + ResultSet rs = prepSelectRow.executeQuery(); + prepDeleteMap = getStatement(conn, DELETE_MAP); + prepDeleteRow = getStatement(conn, DELETE_ROW); + if (rs.next()) { + int rowId = rs.getInt(1); + prepDeleteMap.setInt(1, rowId); + int[] wordIds = getWordIds(conn, row); + for (int id : wordIds) { + prepDeleteMap.setInt(2, id); + prepDeleteMap.executeUpdate(); + } + prepDeleteRow.setInt(1, hash); + prepDeleteRow.setInt(2, index.id); + prepDeleteRow.setString(3, key); + prepDeleteRow.executeUpdate(); } - prepDeleteRow.setInt(1, hash); - prepDeleteRow.setInt(2, index.id); - prepDeleteRow.setString(3, key); - prepDeleteRow.executeUpdate(); + } finally { + IOUtils.closeSilently(prepSelectRow); + IOUtils.closeSilently(prepDeleteMap); + IOUtils.closeSilently(prepDeleteRow); } } - private int[] getWordIds(Object[] row) throws SQLException { - HashSet words = New.hashSet(); + private int[] getWordIds(Connection conn, Object[] row) throws SQLException { + HashSet words = new HashSet<>(); for (int idx : index.indexColumns) { int type = columnTypes[idx]; Object data = row[idx]; @@ -1057,42 +1090,57 @@ private int[] getWordIds(Object[] row) throws SQLException { addWords(setting, words, string); } } - HashMap allWords = setting.getWordList(); - int[] wordIds = new int[words.size()]; - Iterator it = words.iterator(); - for (int i = 0; it.hasNext(); i++) { - String word = it.next(); - Integer wId = allWords.get(word); - int wordId; - if (wId == null) { - prepInsertWord.setString(1, word); - prepInsertWord.execute(); - ResultSet rs = prepInsertWord.getGeneratedKeys(); - rs.next(); - wordId = rs.getInt(1); - allWords.put(word, wordId); - } else { - wordId = wId.intValue(); + PreparedStatement prepInsertWord = null; + try { + prepInsertWord = getStatement(conn, INSERT_WORD); + int[] wordIds = new int[words.size()]; + int i = 0; + for (String word : words) { + int wordId; + Integer wId; + while((wId = setting.getWordId(word)) == null) { + prepInsertWord.setString(1, word); + prepInsertWord.execute(); + ResultSet rs = prepInsertWord.getGeneratedKeys(); + if (rs.next()) { + wordId = rs.getInt(1); + if (wordId != 0) { + setting.addWord(word, wordId); + wId = wordId; + break; + } + } + } + wordIds[i++] = wId; } - wordIds[i] = wordId; + Arrays.sort(wordIds); + return wordIds; + } finally { + IOUtils.closeSilently(prepInsertWord); } - Arrays.sort(wordIds); - return wordIds; } private String getKey(Object[] row) throws SQLException { - StatementBuilder buff = new StatementBuilder(); - for (int columnIndex : index.keys) { - buff.appendExceptFirst(" AND "); - buff.append(StringUtils.quoteIdentifier(index.columns[columnIndex])); + StringBuilder builder = new StringBuilder(); + int[] keys = index.keys; + for (int i = 0, l = keys.length; i < l; i++) { + if (i > 0) { + builder.append(" AND "); + } + int columnIndex = keys[i]; + StringUtils.quoteIdentifier(builder, index.columns[columnIndex]); Object o = row[columnIndex]; if (o == null) { - buff.append(" IS NULL"); + builder.append(" IS NULL"); } else { - buff.append('=').append(quoteSQL(o, columnTypes[columnIndex])); + builder.append('=').append(quoteSQL(o, columnTypes[columnIndex])); } } - return buff.toString(); + return builder.toString(); + } + + private static PreparedStatement getStatement(Connection conn, int index) throws SQLException { + return conn.prepareStatement(SQL[index], Statement.RETURN_GENERATED_KEYS); } } @@ -1116,5 +1164,4 @@ protected static SQLException throwException(String message) throws SQLException { throw new SQLException(message, "FULLTEXT"); } - } diff --git a/h2/src/main/org/h2/fulltext/FullTextLucene.java b/h2/src/main/org/h2/fulltext/FullTextLucene.java index 8864b624f6..802563cff3 100644 --- a/h2/src/main/org/h2/fulltext/FullTextLucene.java +++ b/h2/src/main/org/h2/fulltext/FullTextLucene.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.fulltext; import java.io.IOException; +import java.nio.file.Paths; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.PreparedStatement; @@ -13,37 +14,39 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.DateTools; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; -import org.apache.lucene.queryParser.QueryParser; +import org.apache.lucene.queryparser.flexible.standard.StandardQueryParser; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Searcher; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.ByteBuffersDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FSDirectory; import org.h2.api.Trigger; import org.h2.command.Parser; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.ExpressionColumn; import org.h2.jdbc.JdbcConnection; import org.h2.store.fs.FileUtils; import org.h2.tools.SimpleResultSet; -import org.h2.util.New; -import org.h2.util.StatementBuilder; import org.h2.util.StringUtils; import org.h2.util.Utils; -import java.io.File; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.Version; -import org.apache.lucene.index.IndexWriter; /** * This class implements the full text search based on Apache Lucene. @@ -57,7 +60,7 @@ public class FullTextLucene extends FullText { protected static final boolean STORE_DOCUMENT_TEXT_IN_INDEX = Utils.getProperty("h2.storeDocumentTextInIndex", false); - private static final HashMap INDEX_ACCESS = New.hashMap(); + private static final HashMap INDEX_ACCESS = new HashMap<>(); private static final String TRIGGER_PREFIX = "FTL_"; private static final String SCHEMA = "FTL"; private static final String LUCENE_FIELD_DATA = "_DATA"; @@ -92,29 +95,21 @@ public class FullTextLucene extends FullText { * * * @param conn the connection + * @throws SQLException on failure */ public static void init(Connection conn) throws SQLException { - Statement stat = conn.createStatement(); - stat.execute("CREATE SCHEMA IF NOT EXISTS " + SCHEMA); - stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + - ".INDEXES(SCHEMA VARCHAR, TABLE VARCHAR, " + - "COLUMNS VARCHAR, PRIMARY KEY(SCHEMA, TABLE))"); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_CREATE_INDEX FOR \"" + - FullTextLucene.class.getName() + ".createIndex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_DROP_INDEX FOR \"" + - FullTextLucene.class.getName() + ".dropIndex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_SEARCH FOR \"" + - FullTextLucene.class.getName() + ".search\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_SEARCH_DATA FOR \"" + - FullTextLucene.class.getName() + ".searchData\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_REINDEX FOR \"" + - FullTextLucene.class.getName() + ".reindex\""); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_DROP_ALL FOR \"" + - FullTextLucene.class.getName() + ".dropAll\""); - try { - getIndexAccess(conn); - } catch (SQLException e) { - throw convertException(e); + try (Statement stat = conn.createStatement()) { + stat.execute("CREATE SCHEMA IF NOT EXISTS " + SCHEMA); + stat.execute("CREATE TABLE IF NOT EXISTS " + SCHEMA + + ".INDEXES(SCHEMA VARCHAR, `TABLE` VARCHAR, " + + "COLUMNS VARCHAR, PRIMARY KEY(SCHEMA, `TABLE`))"); + String className = FullTextLucene.class.getName(); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_CREATE_INDEX FOR '" + className + ".createIndex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_DROP_INDEX FOR '" + className + ".dropIndex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_SEARCH FOR '" + className + ".search'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_SEARCH_DATA FOR '" + className + ".searchData'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_REINDEX FOR '" + className + ".reindex'"); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_DROP_ALL FOR '" + className + ".dropAll'"); } } @@ -126,12 +121,13 @@ public static void init(Connection conn) throws SQLException { * @param schema the schema name of the table (case sensitive) * @param table the table name (case sensitive) * @param columnList the column list (null for all columns) + * @throws SQLException on failure */ public static void createIndex(Connection conn, String schema, String table, String columnList) throws SQLException { init(conn); PreparedStatement prep = conn.prepareStatement("INSERT INTO " + SCHEMA - + ".INDEXES(SCHEMA, TABLE, COLUMNS) VALUES(?, ?, ?)"); + + ".INDEXES(SCHEMA, `TABLE`, COLUMNS) VALUES(?, ?, ?)"); prep.setString(1, schema); prep.setString(2, table); prep.setString(3, columnList); @@ -147,21 +143,20 @@ public static void createIndex(Connection conn, String schema, * @param conn the connection * @param schema the schema name of the table (case sensitive) * @param table the table name (case sensitive) + * @throws SQLException on failure */ public static void dropIndex(Connection conn, String schema, String table) throws SQLException { init(conn); PreparedStatement prep = conn.prepareStatement("DELETE FROM " + SCHEMA - + ".INDEXES WHERE SCHEMA=? AND TABLE=?"); + + ".INDEXES WHERE SCHEMA=? AND `TABLE`=?"); prep.setString(1, schema); prep.setString(2, table); int rowCount = prep.executeUpdate(); - if (rowCount == 0) { - return; + if (rowCount != 0) { + reindex(conn); } - - reindex(conn); } /** @@ -169,6 +164,7 @@ public static void dropIndex(Connection conn, String schema, String table) * usually not needed, as the index is kept up-to-date automatically. * * @param conn the connection + * @throws SQLException on failure */ public static void reindex(Connection conn) throws SQLException { init(conn); @@ -188,10 +184,11 @@ public static void reindex(Connection conn) throws SQLException { * Drops all full text indexes from the database. * * @param conn the connection + * @throws SQLException on failure */ public static void dropAll(Connection conn) throws SQLException { Statement stat = conn.createStatement(); - stat.execute("DROP SCHEMA IF EXISTS " + SCHEMA); + stat.execute("DROP SCHEMA IF EXISTS " + SCHEMA + " CASCADE"); removeAllTriggers(conn, TRIGGER_PREFIX); removeIndexFiles(conn); } @@ -210,6 +207,7 @@ public static void dropAll(Connection conn) throws SQLException { * @param limit the maximum number of rows or 0 for no limit * @param offset the offset or 0 for no offset * @return the result set + * @throws SQLException on failure */ public static ResultSet search(Connection conn, String text, int limit, int offset) throws SQLException { @@ -235,6 +233,7 @@ public static ResultSet search(Connection conn, String text, int limit, * @param limit the maximum number of rows or 0 for no limit * @param offset the offset or 0 for no offset * @return the result set + * @throws SQLException on failure */ public static ResultSet searchData(Connection conn, String text, int limit, int offset) throws SQLException { @@ -248,10 +247,7 @@ public static ResultSet searchData(Connection conn, String text, int limit, * @return the converted SQL exception */ protected static SQLException convertException(Exception e) { - SQLException e2 = new SQLException( - "Error while indexing document", "FULLTEXT"); - e2.initCause(e); - return e2; + return new SQLException("Error while indexing document", "FULLTEXT", e); } /** @@ -260,8 +256,9 @@ protected static SQLException convertException(Exception e) { * @param conn the database connection * @param schema the schema name * @param table the table name + * @throws SQLException on failure */ - protected static void createTrigger(Connection conn, String schema, + private static void createTrigger(Connection conn, String schema, String table) throws SQLException { createOrDropTrigger(conn, schema, table, true); } @@ -273,19 +270,19 @@ private static void createOrDropTrigger(Connection conn, StringUtils.quoteIdentifier(TRIGGER_PREFIX + table); stat.execute("DROP TRIGGER IF EXISTS " + trigger); if (create) { - StringBuilder buff = new StringBuilder( + StringBuilder builder = new StringBuilder( "CREATE TRIGGER IF NOT EXISTS "); // the trigger is also called on rollback because transaction // rollback will not undo the changes in the Lucene index - buff.append(trigger). - append(" AFTER INSERT, UPDATE, DELETE, ROLLBACK ON "). - append(StringUtils.quoteIdentifier(schema)). - append('.'). - append(StringUtils.quoteIdentifier(table)). + builder.append(trigger). + append(" AFTER INSERT, UPDATE, DELETE, ROLLBACK ON "); + StringUtils.quoteIdentifier(builder, schema). + append('.'); + StringUtils.quoteIdentifier(builder, table). append(" FOR EACH ROW CALL \""). append(FullTextLucene.FullTextTrigger.class.getName()). append('\"'); - stat.execute(buff.toString()); + stat.execute(builder.toString()); } } @@ -294,30 +291,31 @@ private static void createOrDropTrigger(Connection conn, * * @param conn the connection * @return the index access wrapper + * @throws SQLException on failure */ protected static IndexAccess getIndexAccess(Connection conn) throws SQLException { String path = getIndexPath(conn); synchronized (INDEX_ACCESS) { IndexAccess access = INDEX_ACCESS.get(path); - if (access == null) { + while (access == null) { try { Directory indexDir = path.startsWith(IN_MEMORY_PREFIX) ? - new RAMDirectory() : FSDirectory.open(new File(path)); - boolean recreate = !IndexReader.indexExists(indexDir); - Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30); - IndexWriter writer = new IndexWriter(indexDir, analyzer, - recreate, IndexWriter.MaxFieldLength.UNLIMITED); - //see http://wiki.apache.org/lucene-java/NearRealtimeSearch - IndexReader reader = writer.getReader(); - access = new IndexAccess(); - access.writer = writer; - access.reader = reader; - access.searcher = new IndexSearcher(reader); + new ByteBuffersDirectory() : FSDirectory.open(Paths.get(path)); + Analyzer analyzer = new StandardAnalyzer(); + IndexWriterConfig conf = new IndexWriterConfig(analyzer); + conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); + IndexWriter writer = new IndexWriter(indexDir, conf); + //see https://cwiki.apache.org/confluence/display/lucene/NearRealtimeSearch + access = new IndexAccess(writer); + } catch (IndexFormatTooOldException e) { + reindex(conn); + continue; } catch (IOException e) { throw convertException(e); } INDEX_ACCESS.put(path, access); + break; } return access; } @@ -328,6 +326,7 @@ protected static IndexAccess getIndexAccess(Connection conn) * * @param conn the database connection * @return the path + * @throws SQLException on failure */ protected static String getIndexPath(Connection conn) throws SQLException { Statement stat = conn.createStatement(); @@ -352,13 +351,14 @@ protected static String getIndexPath(Connection conn) throws SQLException { * @param conn the database connection * @param schema the schema name * @param table the table name + * @throws SQLException on failure */ - protected static void indexExistingRows(Connection conn, String schema, + private static void indexExistingRows(Connection conn, String schema, String table) throws SQLException { FullTextLucene.FullTextTrigger existing = new FullTextLucene.FullTextTrigger(); existing.init(conn, schema, null, table, false, Trigger.INSERT); - String sql = "SELECT * FROM " + StringUtils.quoteIdentifier(schema) + - "." + StringUtils.quoteIdentifier(table); + String sql = "SELECT * FROM " + StringUtils.quoteIdentifier(schema) + + "." + StringUtils.quoteIdentifier(table); ResultSet rs = conn.createStatement().executeQuery(sql); int columnCount = rs.getMetaData().getColumnCount(); while (rs.next()) { @@ -373,10 +373,7 @@ protected static void indexExistingRows(Connection conn, String schema, private static void removeIndexFiles(Connection conn) throws SQLException { String path = getIndexPath(conn); - IndexAccess access = INDEX_ACCESS.get(path); - if (access != null) { - removeIndexAccess(access, path); - } + removeIndexAccess(path); if (!path.startsWith(IN_MEMORY_PREFIX)) { FileUtils.deleteRecursive(path, false); } @@ -386,17 +383,17 @@ private static void removeIndexFiles(Connection conn) throws SQLException { * Close the index writer and searcher and remove them from the index access * set. * - * @param access the index writer/searcher wrapper * @param indexPath the index path + * @throws SQLException on failure */ - protected static void removeIndexAccess(IndexAccess access, String indexPath) + protected static void removeIndexAccess(String indexPath) throws SQLException { synchronized (INDEX_ACCESS) { try { - INDEX_ACCESS.remove(indexPath); - access.searcher.close(); - access.reader.close(); - access.writer.close(); + IndexAccess access = INDEX_ACCESS.remove(indexPath); + if(access != null) { + access.close(); + } } catch (Exception e) { throw convertException(e); } @@ -412,6 +409,7 @@ protected static void removeIndexAccess(IndexAccess access, String indexPath) * @param offset the offset * @param data whether the raw data should be returned * @return the result set + * @throws SQLException on failure */ protected static ResultSet search(Connection conn, String text, int limit, int offset, boolean data) throws SQLException { @@ -420,55 +418,57 @@ protected static ResultSet search(Connection conn, String text, // this is just to query the result set columns return result; } - if (text == null || text.trim().length() == 0) { + if (text == null || StringUtils.isWhitespaceOrEmpty(text)) { return result; } try { IndexAccess access = getIndexAccess(conn); // take a reference as the searcher may change - Searcher searcher = access.searcher; - // reuse the same analyzer; it's thread-safe; - // also allows subclasses to control the analyzer used. - Analyzer analyzer = access.writer.getAnalyzer(); - QueryParser parser = new QueryParser(Version.LUCENE_30, - LUCENE_FIELD_DATA, analyzer); - Query query = parser.parse(text); - // Lucene 3 insists on a hard limit and will not provide - // a total hits value. Take at least 100 which is - // an optimal limit for Lucene as any more - // will trigger writing results to disk. - int maxResults = (limit == 0 ? 100 : limit) + offset; - TopDocs docs = searcher.search(query, maxResults); - if (limit == 0) { - limit = docs.totalHits; - } - for (int i = 0, len = docs.scoreDocs.length; - i < limit && i + offset < docs.totalHits - && i + offset < len; i++) { - ScoreDoc sd = docs.scoreDocs[i + offset]; - Document doc = searcher.doc(sd.doc); - float score = sd.score; - String q = doc.get(LUCENE_FIELD_QUERY); - if (data) { - int idx = q.indexOf(" WHERE "); - JdbcConnection c = (JdbcConnection) conn; - Session session = (Session) c.getSession(); - Parser p = new Parser(session); - String tab = q.substring(0, idx); - ExpressionColumn expr = (ExpressionColumn) p.parseExpression(tab); - String schemaName = expr.getOriginalTableAliasName(); - String tableName = expr.getColumnName(); - q = q.substring(idx + " WHERE ".length()); - Object[][] columnData = parseKey(conn, q); - result.addRow( - schemaName, - tableName, - columnData[0], - columnData[1], - score); - } else { - result.addRow(q, score); + IndexSearcher searcher = access.getSearcher(); + try { + // reuse the same analyzer; it's thread-safe; + // also allows subclasses to control the analyzer used. + Analyzer analyzer = access.writer.getAnalyzer(); + StandardQueryParser parser = new StandardQueryParser(analyzer); + Query query = parser.parse(text, LUCENE_FIELD_DATA); + // Lucene insists on a hard limit and will not provide + // a total hits value. Take at least 100 which is + // an optimal limit for Lucene as any more + // will trigger writing results to disk. + int maxResults = (limit == 0 ? 100 : limit) + offset; + TopDocs docs = searcher.search(query, maxResults); + long totalHits = docs.totalHits.value; + if (limit == 0) { + // in this context it's safe to cast + limit = (int) totalHits; + } + for (int i = 0, len = docs.scoreDocs.length; i < limit + && i + offset < totalHits + && i + offset < len; i++) { + ScoreDoc sd = docs.scoreDocs[i + offset]; + Document doc = searcher.doc(sd.doc); + float score = sd.score; + String q = doc.get(LUCENE_FIELD_QUERY); + if (data) { + int idx = q.indexOf(" WHERE "); + JdbcConnection c = (JdbcConnection) conn; + SessionLocal session = (SessionLocal) c.getSession(); + Parser p = new Parser(session); + String tab = q.substring(0, idx); + ExpressionColumn expr = (ExpressionColumn) p + .parseExpression(tab); + String schemaName = expr.getOriginalTableAliasName(); + String tableName = expr.getColumnName(session, -1); + q = q.substring(idx + " WHERE ".length()); + String[][] columnData = parseKey(conn, q); + result.addRow(schemaName, tableName, columnData[0], + columnData[1], score); + } else { + result.addRow(q, score); + } } + } finally { + access.returnSearcher(searcher); } } catch (Exception e) { throw convertException(e); @@ -479,19 +479,28 @@ protected static ResultSet search(Connection conn, String text, /** * Trigger updates the index when a inserting, updating, or deleting a row. */ - public static class FullTextTrigger implements Trigger { - - protected String schema; - protected String table; - protected int[] keys; - protected int[] indexColumns; - protected String[] columns; - protected int[] columnTypes; - protected String indexPath; - protected IndexAccess indexAccess; + public static final class FullTextTrigger implements Trigger { + + private String schema; + private String table; + private int[] keys; + private int[] indexColumns; + private String[] columns; + private int[] columnTypes; + private String indexPath; + private IndexAccess indexAccess; + + private final FieldType DOC_ID_FIELD_TYPE; + + public FullTextTrigger() { + DOC_ID_FIELD_TYPE = new FieldType(TextField.TYPE_STORED); + DOC_ID_FIELD_TYPE.setTokenized(false); + DOC_ID_FIELD_TYPE.freeze(); + } /** * INTERNAL + * @see Trigger#init(Connection, String, String, String, boolean, int) */ @Override public void init(Connection conn, String schemaName, String triggerName, @@ -500,19 +509,18 @@ public void init(Connection conn, String schemaName, String triggerName, this.table = tableName; this.indexPath = getIndexPath(conn); this.indexAccess = getIndexAccess(conn); - ArrayList keyList = New.arrayList(); + ArrayList keyList = Utils.newSmallArrayList(); DatabaseMetaData meta = conn.getMetaData(); ResultSet rs = meta.getColumns(null, StringUtils.escapeMetaDataPattern(schemaName), StringUtils.escapeMetaDataPattern(tableName), null); - ArrayList columnList = New.arrayList(); + ArrayList columnList = Utils.newSmallArrayList(); while (rs.next()) { columnList.add(rs.getString("COLUMN_NAME")); } columnTypes = new int[columnList.size()]; - columns = new String[columnList.size()]; - columnList.toArray(columns); + columns = columnList.toArray(new String[0]); rs = meta.getColumns(null, StringUtils.escapeMetaDataPattern(schemaName), StringUtils.escapeMetaDataPattern(tableName), @@ -520,7 +528,7 @@ public void init(Connection conn, String schemaName, String triggerName, for (int i = 0; rs.next(); i++) { columnTypes[i] = rs.getInt("DATA_TYPE"); } - if (keyList.size() == 0) { + if (keyList.isEmpty()) { rs = meta.getPrimaryKeys(null, StringUtils.escapeMetaDataPattern(schemaName), tableName); @@ -528,25 +536,24 @@ public void init(Connection conn, String schemaName, String triggerName, keyList.add(rs.getString("COLUMN_NAME")); } } - if (keyList.size() == 0) { + if (keyList.isEmpty()) { throw throwException("No primary key for table " + tableName); } - ArrayList indexList = New.arrayList(); + ArrayList indexList = Utils.newSmallArrayList(); PreparedStatement prep = conn.prepareStatement( "SELECT COLUMNS FROM " + SCHEMA - + ".INDEXES WHERE SCHEMA=? AND TABLE=?"); + + ".INDEXES WHERE SCHEMA=? AND `TABLE`=?"); prep.setString(1, schemaName); prep.setString(2, tableName); rs = prep.executeQuery(); if (rs.next()) { String cols = rs.getString(1); if (cols != null) { - for (String s : StringUtils.arraySplit(cols, ',', true)) { - indexList.add(s); - } + Collections.addAll(indexList, + StringUtils.arraySplit(cols, ',', true)); } } - if (indexList.size() == 0) { + if (indexList.isEmpty()) { indexList.addAll(columnList); } keys = new int[keyList.size()]; @@ -557,6 +564,7 @@ public void init(Connection conn, String schemaName, String triggerName, /** * INTERNAL + * @see Trigger#fire(Connection, Object[], Object[]) */ @Override public void fire(Connection conn, Object[] oldRow, Object[] newRow) @@ -583,32 +591,16 @@ public void fire(Connection conn, Object[] oldRow, Object[] newRow) */ @Override public void close() throws SQLException { - if (indexAccess != null) { - removeIndexAccess(indexAccess, indexPath); - indexAccess = null; - } - } - - /** - * INTERNAL - */ - @Override - public void remove() { - // ignore + removeIndexAccess(indexPath); } /** * Commit all changes to the Lucene index. + * @throws SQLException on failure */ void commitIndex() throws SQLException { try { - indexAccess.writer.commit(); - // recreate Searcher with the IndexWriter's reader. - indexAccess.searcher.close(); - indexAccess.reader.close(); - IndexReader reader = indexAccess.writer.getReader(); - indexAccess.reader = reader; - indexAccess.searcher = new IndexSearcher(reader); + indexAccess.commit(); } catch (IOException e) { throw convertException(e); } @@ -619,18 +611,19 @@ void commitIndex() throws SQLException { * * @param row the row * @param commitIndex whether to commit the changes to the Lucene index + * @throws SQLException on failure */ - protected void insert(Object[] row, boolean commitIndex) throws SQLException { + void insert(Object[] row, boolean commitIndex) throws SQLException { String query = getQuery(row); Document doc = new Document(); - doc.add(new Field(LUCENE_FIELD_QUERY, query, - Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add(new Field(LUCENE_FIELD_QUERY, query, DOC_ID_FIELD_TYPE)); long time = System.currentTimeMillis(); doc.add(new Field(LUCENE_FIELD_MODIFIED, DateTools.timeToString(time, DateTools.Resolution.SECOND), - Field.Store.YES, Field.Index.NOT_ANALYZED)); - StatementBuilder buff = new StatementBuilder(); - for (int index : indexColumns) { + TextField.TYPE_STORED)); + StringBuilder builder = new StringBuilder(); + for (int i = 0, length = indexColumns.length; i < length; i++) { + int index = indexColumns[i]; String columnName = columns[index]; String data = asString(row[index], columnTypes[index]); // column names that start with _ @@ -639,15 +632,15 @@ protected void insert(Object[] row, boolean commitIndex) throws SQLException { if (columnName.startsWith(LUCENE_FIELD_COLUMN_PREFIX)) { columnName = LUCENE_FIELD_COLUMN_PREFIX + columnName; } - doc.add(new Field(columnName, data, - Field.Store.NO, Field.Index.ANALYZED)); - buff.appendExceptFirst(" "); - buff.append(data); + doc.add(new Field(columnName, data, TextField.TYPE_NOT_STORED)); + if (i > 0) { + builder.append(' '); + } + builder.append(data); } - Field.Store storeText = STORE_DOCUMENT_TEXT_IN_INDEX ? - Field.Store.YES : Field.Store.NO; - doc.add(new Field(LUCENE_FIELD_DATA, buff.toString(), storeText, - Field.Index.ANALYZED)); + FieldType dataFieldType = STORE_DOCUMENT_TEXT_IN_INDEX ? + TextField.TYPE_STORED : TextField.TYPE_NOT_STORED; + doc.add(new Field(LUCENE_FIELD_DATA, builder.toString(), dataFieldType)); try { indexAccess.writer.addDocument(doc); if (commitIndex) { @@ -663,8 +656,9 @@ protected void insert(Object[] row, boolean commitIndex) throws SQLException { * * @param row the row * @param commitIndex whether to commit the changes to the Lucene index + * @throws SQLException on failure */ - protected void delete(Object[] row, boolean commitIndex) throws SQLException { + private void delete(Object[] row, boolean commitIndex) throws SQLException { String query = getQuery(row); try { Term term = new Term(LUCENE_FIELD_QUERY, query); @@ -678,44 +672,93 @@ protected void delete(Object[] row, boolean commitIndex) throws SQLException { } private String getQuery(Object[] row) throws SQLException { - StatementBuilder buff = new StatementBuilder(); + StringBuilder builder = new StringBuilder(); if (schema != null) { - buff.append(StringUtils.quoteIdentifier(schema)).append('.'); + StringUtils.quoteIdentifier(builder, schema).append('.'); } - buff.append(StringUtils.quoteIdentifier(table)).append(" WHERE "); - for (int columnIndex : keys) { - buff.appendExceptFirst(" AND "); - buff.append(StringUtils.quoteIdentifier(columns[columnIndex])); + StringUtils.quoteIdentifier(builder, table).append(" WHERE "); + for (int i = 0, length = keys.length; i < length; i++) { + if (i > 0) { + builder.append(" AND "); + } + int columnIndex = keys[i]; + StringUtils.quoteIdentifier(builder, columns[columnIndex]); Object o = row[columnIndex]; if (o == null) { - buff.append(" IS NULL"); + builder.append(" IS NULL"); } else { - buff.append('=').append(FullText.quoteSQL(o, columnTypes[columnIndex])); + builder.append('=').append(FullText.quoteSQL(o, columnTypes[columnIndex])); } } - return buff.toString(); + return builder.toString(); } } /** * A wrapper for the Lucene writer and searcher. */ - static class IndexAccess { + private static final class IndexAccess { /** * The index writer. */ - IndexWriter writer; + final IndexWriter writer; /** - * The index reader. + * The index searcher. */ - IndexReader reader; + private IndexSearcher searcher; + + IndexAccess(IndexWriter writer) throws IOException { + this.writer = writer; + initializeSearcher(); + } /** - * The index searcher. + * Start using the searcher. + * + * @return the searcher + * @throws IOException on failure */ - Searcher searcher; - } + synchronized IndexSearcher getSearcher() throws IOException { + if (!searcher.getIndexReader().tryIncRef()) { + initializeSearcher(); + } + return searcher; + } + + private void initializeSearcher() throws IOException { + IndexReader reader = DirectoryReader.open(writer); + searcher = new IndexSearcher(reader); + } + + /** + * Stop using the searcher. + * + * @param searcher the searcher + * @throws IOException on failure + */ + synchronized void returnSearcher(IndexSearcher searcher) throws IOException { + searcher.getIndexReader().decRef(); + } + + /** + * Commit the changes. + * @throws IOException on failure + */ + public synchronized void commit() throws IOException { + writer.commit(); + returnSearcher(searcher); + searcher = new IndexSearcher(DirectoryReader.open(writer)); + } + /** + * Close the index. + * @throws IOException on failure + */ + public synchronized void close() throws IOException { + searcher = null; + writer.close(); + } + } } diff --git a/h2/src/main/org/h2/fulltext/FullTextSettings.java b/h2/src/main/org/h2/fulltext/FullTextSettings.java index 991cb1654e..7cdfc2841c 100644 --- a/h2/src/main/org/h2/fulltext/FullTextSettings.java +++ b/h2/src/main/org/h2/fulltext/FullTextSettings.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.fulltext; @@ -12,18 +12,20 @@ import java.sql.Statement; import java.util.HashMap; import java.util.HashSet; -import org.h2.util.New; -import org.h2.util.SoftHashMap; +import java.util.WeakHashMap; +import java.util.concurrent.ConcurrentHashMap; + +import org.h2.util.SoftValuesHashMap; /** * The global settings of a full text search. */ -class FullTextSettings { +final class FullTextSettings { /** * The settings of open indexes. */ - private static final HashMap SETTINGS = New.hashMap(); + private static final HashMap SETTINGS = new HashMap<>(); /** * Whether this instance has been initialized. @@ -33,24 +35,22 @@ class FullTextSettings { /** * The set of words not to index (stop words). */ - private final HashSet ignoreList = New.hashSet(); + private final HashSet ignoreList = new HashSet<>(); /** * The set of words / terms. */ - private final HashMap words = New.hashMap(); + private final HashMap words = new HashMap<>(); /** * The set of indexes in this database. */ - private final HashMap indexes = New.hashMap(); + private final ConcurrentHashMap indexes = new ConcurrentHashMap<>(); /** * The prepared statement cache. */ - private final SoftHashMap> cache = - new SoftHashMap>(); + private final WeakHashMap> cache = new WeakHashMap<>(); /** * The whitespace characters. @@ -60,26 +60,61 @@ class FullTextSettings { /** * Create a new instance. */ - protected FullTextSettings() { + private FullTextSettings() { // don't allow construction } /** - * Get the ignore list. - * - * @return the ignore list + * Clear set of ignored words */ - protected HashSet getIgnoreList() { - return ignoreList; + public void clearIgnored() { + synchronized (ignoreList) { + ignoreList.clear(); + } } /** - * Get the word list. - * - * @return the word list + * Amend set of ignored words + * @param words to add + */ + public void addIgnored(Iterable words) { + synchronized (ignoreList) { + for (String word : words) { + word = normalizeWord(word); + ignoreList.add(word); + } + } + } + + /** + * Clear set of searchable words + */ + public void clearWordList() { + synchronized (words) { + words.clear(); + } + } + + /** + * Get id for a searchable word + * @param word to find id for + * @return Integer id or null if word is not found + */ + public Integer getWordId(String word) { + synchronized (words) { + return words.get(word); + } + } + + /** + * Register searchable word + * @param word to register + * @param id to register with */ - protected HashMap getWordList() { - return words; + public void addWord(String word, Integer id) { + synchronized (words) { + words.putIfAbsent(word, id); + } } /** @@ -88,7 +123,7 @@ protected HashMap getWordList() { * @param indexId the index id * @return the index info */ - protected IndexInfo getIndexInfo(int indexId) { + IndexInfo getIndexInfo(int indexId) { return indexes.get(indexId); } @@ -97,7 +132,7 @@ protected IndexInfo getIndexInfo(int indexId) { * * @param index the index */ - protected void addIndexInfo(IndexInfo index) { + void addIndexInfo(IndexInfo index) { indexes.put(index.id, index); } @@ -108,11 +143,12 @@ protected void addIndexInfo(IndexInfo index) { * @param word the word to convert and check * @return the uppercase version of the word or null */ - protected String convertWord(String word) { - // TODO this is locale specific, document - word = word.toUpperCase(); - if (ignoreList.contains(word)) { - return null; + String convertWord(String word) { + word = normalizeWord(word); + synchronized (ignoreList) { + if (ignoreList.contains(word)) { + return null; + } } return word; } @@ -122,14 +158,18 @@ protected String convertWord(String word) { * * @param conn the connection * @return the settings + * @throws SQLException on failure */ - protected static FullTextSettings getInstance(Connection conn) + static FullTextSettings getInstance(Connection conn) throws SQLException { String path = getIndexPath(conn); - FullTextSettings setting = SETTINGS.get(path); - if (setting == null) { - setting = new FullTextSettings(); - SETTINGS.put(path, setting); + FullTextSettings setting; + synchronized (SETTINGS) { + setting = SETTINGS.get(path); + if (setting == null) { + setting = new FullTextSettings(); + SETTINGS.put(path, setting); + } } return setting; } @@ -140,10 +180,10 @@ protected static FullTextSettings getInstance(Connection conn) * @param conn the connection * @return the file system path */ - protected static String getIndexPath(Connection conn) throws SQLException { + private static String getIndexPath(Connection conn) throws SQLException { Statement stat = conn.createStatement(); ResultSet rs = stat.executeQuery( - "CALL IFNULL(DATABASE_PATH(), 'MEM:' || DATABASE())"); + "CALL COALESCE(DATABASE_PATH(), 'MEM:' || DATABASE())"); rs.next(); String path = rs.getString(1); if ("MEM:UNNAMED".equals(path)) { @@ -161,12 +201,13 @@ protected static String getIndexPath(Connection conn) throws SQLException { * @param conn the connection * @param sql the statement * @return the prepared statement + * @throws SQLException on failure */ - protected synchronized PreparedStatement prepare(Connection conn, String sql) + synchronized PreparedStatement prepare(Connection conn, String sql) throws SQLException { - SoftHashMap c = cache.get(conn); + SoftValuesHashMap c = cache.get(conn); if (c == null) { - c = new SoftHashMap(); + c = new SoftValuesHashMap<>(); cache.put(conn, c); } PreparedStatement prep = c.get(sql); @@ -218,7 +259,9 @@ protected boolean isInitialized() { * Close all fulltext settings, freeing up memory. */ protected static void closeAll() { - SETTINGS.clear(); + synchronized (SETTINGS) { + SETTINGS.clear(); + } } protected void setWhitespaceChars(String whitespaceChars) { @@ -229,4 +272,8 @@ protected String getWhitespaceChars() { return whitespaceChars; } + private static String normalizeWord(String word) { + // TODO this is locale specific, document + return word.toUpperCase(); + } } diff --git a/h2/src/main/org/h2/fulltext/IndexInfo.java b/h2/src/main/org/h2/fulltext/IndexInfo.java index 3522d3b4f1..22c5498afc 100644 --- a/h2/src/main/org/h2/fulltext/IndexInfo.java +++ b/h2/src/main/org/h2/fulltext/IndexInfo.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.fulltext; diff --git a/h2/src/main/org/h2/fulltext/package.html b/h2/src/main/org/h2/fulltext/package.html index 149fb35323..d3c046257f 100644 --- a/h2/src/main/org/h2/fulltext/package.html +++ b/h2/src/main/org/h2/fulltext/package.html @@ -1,7 +1,7 @@ @@ -9,6 +9,6 @@ Javadoc package documentation

    -The native full text search implementation, and the wrapper for the the Lucene full text search implementation. +The native full text search implementation, and the wrapper for the Lucene full text search implementation.

    \ No newline at end of file diff --git a/h2/src/main/org/h2/index/BaseIndex.java b/h2/src/main/org/h2/index/BaseIndex.java deleted file mode 100644 index 182970ebcb..0000000000 --- a/h2/src/main/org/h2/index/BaseIndex.java +++ /dev/null @@ -1,430 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.engine.DbObject; -import org.h2.engine.Mode; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.message.Trace; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.schema.SchemaObjectBase; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.util.MathUtils; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * Most index implementations extend the base index. - */ -public abstract class BaseIndex extends SchemaObjectBase implements Index { - - protected IndexColumn[] indexColumns; - protected Column[] columns; - protected int[] columnIds; - protected Table table; - protected IndexType indexType; - protected boolean isMultiVersion; - - /** - * Initialize the base index. - * - * @param newTable the table - * @param id the object id - * @param name the index name - * @param newIndexColumns the columns that are indexed or null if this is - * not yet known - * @param newIndexType the index type - */ - protected void initBaseIndex(Table newTable, int id, String name, - IndexColumn[] newIndexColumns, IndexType newIndexType) { - initSchemaObjectBase(newTable.getSchema(), id, name, Trace.INDEX); - this.indexType = newIndexType; - this.table = newTable; - if (newIndexColumns != null) { - this.indexColumns = newIndexColumns; - columns = new Column[newIndexColumns.length]; - int len = columns.length; - columnIds = new int[len]; - for (int i = 0; i < len; i++) { - Column col = newIndexColumns[i].column; - columns[i] = col; - columnIds[i] = col.getColumnId(); - } - } - } - - /** - * Check that the index columns are not CLOB or BLOB. - * - * @param columns the columns - */ - protected static void checkIndexColumnTypes(IndexColumn[] columns) { - for (IndexColumn c : columns) { - int type = c.column.getType(); - if (type == Value.CLOB || type == Value.BLOB) { - throw DbException.getUnsupportedException( - "Index on BLOB or CLOB column: " + c.column.getCreateSQL()); - } - } - } - - @Override - public String getDropSQL() { - return null; - } - - /** - * Create a duplicate key exception with a message that contains the index - * name. - * - * @param key the key values - * @return the exception - */ - protected DbException getDuplicateKeyException(String key) { - String sql = getName() + " ON " + table.getSQL() + - "(" + getColumnListSQL() + ")"; - if (key != null) { - sql += " VALUES " + key; - } - DbException e = DbException.get(ErrorCode.DUPLICATE_KEY_1, sql); - e.setSource(this); - return e; - } - - @Override - public String getPlanSQL() { - return getSQL(); - } - - @Override - public void removeChildrenAndResources(Session session) { - table.removeIndex(this); - remove(session); - database.removeMeta(session, getId()); - } - - @Override - public boolean canFindNext() { - return false; - } - - - @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - return find(filter.getSession(), first, last); - } - - /** - * Find a row or a list of rows that is larger and create a cursor to - * iterate over the result. The base implementation doesn't support this - * feature. - * - * @param session the session - * @param higherThan the lower limit (excluding) - * @param last the last row, or null for no limit - * @return the cursor - * @throws DbException always - */ - @Override - public Cursor findNext(Session session, SearchRow higherThan, SearchRow last) { - throw DbException.throwInternalError(); - } - - /** - * Calculate the cost for the given mask as if this index was a typical - * b-tree range index. This is the estimated cost required to search one - * row, and then iterate over the given number of rows. - * - * @param masks the search mask - * @param rowCount the number of rows in the index - * @param filter the table filter - * @param sortOrder the sort order - * @return the estimated cost - */ - protected long getCostRangeIndex(int[] masks, long rowCount, - TableFilter filter, SortOrder sortOrder) { - rowCount += Constants.COST_ROW_OFFSET; - long cost = rowCount; - long rows = rowCount; - int totalSelectivity = 0; - if (masks == null) { - return cost; - } - for (int i = 0, len = columns.length; i < len; i++) { - Column column = columns[i]; - int index = column.getColumnId(); - int mask = masks[index]; - if ((mask & IndexCondition.EQUALITY) == IndexCondition.EQUALITY) { - if (i == columns.length - 1 && getIndexType().isUnique()) { - cost = 3; - break; - } - totalSelectivity = 100 - ((100 - totalSelectivity) * - (100 - column.getSelectivity()) / 100); - long distinctRows = rowCount * totalSelectivity / 100; - if (distinctRows <= 0) { - distinctRows = 1; - } - rows = Math.max(rowCount / distinctRows, 1); - cost = 2 + rows; - } else if ((mask & IndexCondition.RANGE) == IndexCondition.RANGE) { - cost = 2 + rows / 4; - break; - } else if ((mask & IndexCondition.START) == IndexCondition.START) { - cost = 2 + rows / 3; - break; - } else if ((mask & IndexCondition.END) == IndexCondition.END) { - cost = rows / 3; - break; - } else { - break; - } - } - // if the ORDER BY clause matches the ordering of this index, - // it will be cheaper than another index, so adjust the cost accordingly - if (sortOrder != null) { - boolean sortOrderMatches = true; - int coveringCount = 0; - int[] sortTypes = sortOrder.getSortTypes(); - for (int i = 0, len = sortTypes.length; i < len; i++) { - if (i >= indexColumns.length) { - // we can still use this index if we are sorting by more - // than it's columns, it's just that the coveringCount - // is lower than with an index that contains - // more of the order by columns - break; - } - Column col = sortOrder.getColumn(i, filter); - if (col == null) { - sortOrderMatches = false; - break; - } - IndexColumn indexCol = indexColumns[i]; - if (col != indexCol.column) { - sortOrderMatches = false; - break; - } - int sortType = sortTypes[i]; - if (sortType != indexCol.sortType) { - sortOrderMatches = false; - break; - } - coveringCount++; - } - if (sortOrderMatches) { - // "coveringCount" makes sure that when we have two - // or more covering indexes, we choose the one - // that covers more - cost -= coveringCount; - } - } - return cost; - } - - @Override - public int compareRows(SearchRow rowData, SearchRow compare) { - if (rowData == compare) { - return 0; - } - for (int i = 0, len = indexColumns.length; i < len; i++) { - int index = columnIds[i]; - Value v = compare.getValue(index); - if (v == null) { - // can't compare further - return 0; - } - int c = compareValues(rowData.getValue(index), v, indexColumns[i].sortType); - if (c != 0) { - return c; - } - } - return 0; - } - - /** - * Check if one of the columns is NULL and multiple rows with NULL are - * allowed using the current compatibility mode for unique indexes. Note: - * NULL behavior is complicated in SQL. - * - * @param newRow the row to check - * @return true if one of the columns is null and multiple nulls in unique - * indexes are allowed - */ - protected boolean containsNullAndAllowMultipleNull(SearchRow newRow) { - Mode mode = database.getMode(); - if (mode.uniqueIndexSingleNull) { - return false; - } else if (mode.uniqueIndexSingleNullExceptAllColumnsAreNull) { - for (int index : columnIds) { - Value v = newRow.getValue(index); - if (v != ValueNull.INSTANCE) { - return false; - } - } - return true; - } - for (int index : columnIds) { - Value v = newRow.getValue(index); - if (v == ValueNull.INSTANCE) { - return true; - } - } - return false; - } - - /** - * Compare the positions of two rows. - * - * @param rowData the first row - * @param compare the second row - * @return 0 if both rows are equal, -1 if the first row is smaller, - * otherwise 1 - */ - int compareKeys(SearchRow rowData, SearchRow compare) { - long k1 = rowData.getKey(); - long k2 = compare.getKey(); - if (k1 == k2) { - if (isMultiVersion) { - int v1 = rowData.getVersion(); - int v2 = compare.getVersion(); - return MathUtils.compareInt(v2, v1); - } - return 0; - } - return k1 > k2 ? 1 : -1; - } - - private int compareValues(Value a, Value b, int sortType) { - if (a == b) { - return 0; - } - boolean aNull = a == null, bNull = b == null; - if (aNull || bNull) { - return SortOrder.compareNull(aNull, sortType); - } - int comp = table.compareTypeSave(a, b); - if ((sortType & SortOrder.DESCENDING) != 0) { - comp = -comp; - } - return comp; - } - - @Override - public int getColumnIndex(Column col) { - for (int i = 0, len = columns.length; i < len; i++) { - if (columns[i].equals(col)) { - return i; - } - } - return -1; - } - - /** - * Get the list of columns as a string. - * - * @return the list of columns - */ - private String getColumnListSQL() { - StatementBuilder buff = new StatementBuilder(); - for (IndexColumn c : indexColumns) { - buff.appendExceptFirst(", "); - buff.append(c.getSQL()); - } - return buff.toString(); - } - - @Override - public String getCreateSQLForCopy(Table targetTable, String quotedName) { - StringBuilder buff = new StringBuilder("CREATE "); - buff.append(indexType.getSQL()); - buff.append(' '); - if (table.isHidden()) { - buff.append("IF NOT EXISTS "); - } - buff.append(quotedName); - buff.append(" ON ").append(targetTable.getSQL()); - if (comment != null) { - buff.append(" COMMENT ").append(StringUtils.quoteStringSQL(comment)); - } - buff.append('(').append(getColumnListSQL()).append(')'); - return buff.toString(); - } - - @Override - public String getCreateSQL() { - return getCreateSQLForCopy(table, getSQL()); - } - - @Override - public IndexColumn[] getIndexColumns() { - return indexColumns; - } - - @Override - public Column[] getColumns() { - return columns; - } - - @Override - public IndexType getIndexType() { - return indexType; - } - - @Override - public int getType() { - return DbObject.INDEX; - } - - @Override - public Table getTable() { - return table; - } - - @Override - public void commit(int operation, Row row) { - // nothing to do - } - - void setMultiVersion(boolean multiVersion) { - this.isMultiVersion = multiVersion; - } - - @Override - public Row getRow(Session session, long key) { - throw DbException.getUnsupportedException(toString()); - } - - @Override - public boolean isHidden() { - return table.isHidden(); - } - - @Override - public boolean isRowIdIndex() { - return false; - } - - @Override - public boolean canScan() { - return true; - } - - @Override - public void setSortedInsertMode(boolean sortedInsertMode) { - // ignore - } - -} diff --git a/h2/src/main/org/h2/index/Cursor.java b/h2/src/main/org/h2/index/Cursor.java index 199731c249..a8e768ae2c 100644 --- a/h2/src/main/org/h2/index/Cursor.java +++ b/h2/src/main/org/h2/index/Cursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; diff --git a/h2/src/main/org/h2/index/DualCursor.java b/h2/src/main/org/h2/index/DualCursor.java new file mode 100644 index 0000000000..e49a8bc1fe --- /dev/null +++ b/h2/src/main/org/h2/index/DualCursor.java @@ -0,0 +1,48 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.index; + +import org.h2.message.DbException; +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.value.Value; + +/** + * The cursor implementation for the DUAL index. + */ +class DualCursor implements Cursor { + + private Row currentRow; + + DualCursor() { + } + + @Override + public Row get() { + return currentRow; + } + + @Override + public SearchRow getSearchRow() { + return currentRow; + } + + @Override + public boolean next() { + if (currentRow == null) { + currentRow = Row.get(Value.EMPTY_VALUES, 1); + return true; + } else { + return false; + } + } + + @Override + public boolean previous() { + throw DbException.getInternalError(toString()); + } + +} diff --git a/h2/src/main/org/h2/index/DualIndex.java b/h2/src/main/org/h2/index/DualIndex.java new file mode 100644 index 0000000000..74539c41b5 --- /dev/null +++ b/h2/src/main/org/h2/index/DualIndex.java @@ -0,0 +1,58 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.index; + +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.result.SortOrder; +import org.h2.table.DualTable; +import org.h2.table.IndexColumn; +import org.h2.table.TableFilter; +import org.h2.value.Value; + +/** + * An index for the DUAL table. + */ +public class DualIndex extends VirtualTableIndex { + + public DualIndex(DualTable table) { + super(table, "DUAL_INDEX", new IndexColumn[0]); + } + + @Override + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + return new DualCursor(); + } + + @Override + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { + return 1d; + } + + @Override + public String getCreateSQL() { + return null; + } + + @Override + public boolean canGetFirstOrLast() { + return true; + } + + @Override + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + return new SingleRowCursor(Row.get(Value.EMPTY_VALUES, 1)); + } + + @Override + public String getPlanSQL() { + return "dual index"; + } + +} diff --git a/h2/src/main/org/h2/index/FunctionCursor.java b/h2/src/main/org/h2/index/FunctionCursor.java deleted file mode 100644 index 3c9e876aa2..0000000000 --- a/h2/src/main/org/h2/index/FunctionCursor.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.message.DbException; -import org.h2.result.ResultInterface; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.value.Value; - -/** - * A cursor for a function that returns a result. - */ -public class FunctionCursor implements Cursor { - - private final ResultInterface result; - private Value[] values; - private Row row; - - FunctionCursor(ResultInterface result) { - this.result = result; - } - - @Override - public Row get() { - if (values == null) { - return null; - } - if (row == null) { - row = new Row(values, 1); - } - return row; - } - - @Override - public SearchRow getSearchRow() { - return get(); - } - - @Override - public boolean next() { - row = null; - if (result != null && result.next()) { - values = result.currentRow(); - } else { - values = null; - } - return values != null; - } - - @Override - public boolean previous() { - throw DbException.throwInternalError(); - } - -} diff --git a/h2/src/main/org/h2/index/FunctionCursorResultSet.java b/h2/src/main/org/h2/index/FunctionCursorResultSet.java deleted file mode 100644 index 3727d092fe..0000000000 --- a/h2/src/main/org/h2/index/FunctionCursorResultSet.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.SQLException; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.value.DataType; -import org.h2.value.Value; - -/** - * A cursor for a function that returns a JDBC result set. - */ -public class FunctionCursorResultSet implements Cursor { - - private final Session session; - private final ResultSet result; - private final ResultSetMetaData meta; - private Value[] values; - private Row row; - - FunctionCursorResultSet(Session session, ResultSet result) { - this.session = session; - this.result = result; - try { - this.meta = result.getMetaData(); - } catch (SQLException e) { - throw DbException.convert(e); - } - } - - @Override - public Row get() { - if (values == null) { - return null; - } - if (row == null) { - row = new Row(values, 1); - } - return row; - } - - @Override - public SearchRow getSearchRow() { - return get(); - } - - @Override - public boolean next() { - row = null; - try { - if (result != null && result.next()) { - int columnCount = meta.getColumnCount(); - values = new Value[columnCount]; - for (int i = 0; i < columnCount; i++) { - int type = DataType.getValueTypeFromResultSet(meta, i + 1); - values[i] = DataType.readValue(session, result, i + 1, type); - } - } else { - values = null; - } - } catch (SQLException e) { - throw DbException.convert(e); - } - return values != null; - } - - @Override - public boolean previous() { - throw DbException.throwInternalError(); - } - -} \ No newline at end of file diff --git a/h2/src/main/org/h2/index/FunctionIndex.java b/h2/src/main/org/h2/index/FunctionIndex.java deleted file mode 100644 index 593f0f4219..0000000000 --- a/h2/src/main/org/h2/index/FunctionIndex.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.FunctionTable; -import org.h2.table.IndexColumn; -import org.h2.table.TableFilter; - -/** - * An index for a function that returns a result set. This index can only scan - * through all rows, search is not supported. - */ -public class FunctionIndex extends BaseIndex { - - private final FunctionTable functionTable; - - public FunctionIndex(FunctionTable functionTable, IndexColumn[] columns) { - initBaseIndex(functionTable, 0, null, columns, IndexType.createNonUnique(true)); - this.functionTable = functionTable; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public void add(Session session, Row row) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public void remove(Session session, Row row) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - if (functionTable.isBufferResultSetToLocalTemp()) { - return new FunctionCursor(functionTable.getResult(session)); - } - return new FunctionCursorResultSet(session, - functionTable.getResultSet(session)); - } - - @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - if (masks != null) { - throw DbException.getUnsupportedException("ALIAS"); - } - long expectedRows; - if (functionTable.canGetRowCount()) { - expectedRows = functionTable.getRowCountApproximation(); - } else { - expectedRows = database.getSettings().estimatedFunctionTableRows; - } - return expectedRows * 10; - } - - @Override - public void remove(Session session) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public void truncate(Session session) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("ALIAS"); - } - - @Override - public long getRowCount(Session session) { - return functionTable.getRowCount(session); - } - - @Override - public long getRowCountApproximation() { - return functionTable.getRowCountApproximation(); - } - - @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public String getPlanSQL() { - return "function"; - } - - @Override - public boolean canScan() { - return false; - } - -} diff --git a/h2/src/main/org/h2/index/HashIndex.java b/h2/src/main/org/h2/index/HashIndex.java deleted file mode 100644 index 24fd0db79a..0000000000 --- a/h2/src/main/org/h2/index/HashIndex.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.RegularTable; -import org.h2.table.TableFilter; -import org.h2.util.ValueHashMap; -import org.h2.value.Value; - -/** - * An unique index based on an in-memory hash map. - */ -public class HashIndex extends BaseIndex { - - /** - * The index of the indexed column. - */ - private final int indexColumn; - - private final RegularTable tableData; - private ValueHashMap rows; - - public HashIndex(RegularTable table, int id, String indexName, - IndexColumn[] columns, IndexType indexType) { - initBaseIndex(table, id, indexName, columns, indexType); - this.indexColumn = columns[0].column.getColumnId(); - this.tableData = table; - reset(); - } - - private void reset() { - rows = ValueHashMap.newInstance(); - } - - @Override - public void truncate(Session session) { - reset(); - } - - @Override - public void add(Session session, Row row) { - Value key = row.getValue(indexColumn); - Object old = rows.get(key); - if (old != null) { - // TODO index duplicate key for hash indexes: is this allowed? - throw getDuplicateKeyException(key.toString()); - } - rows.put(key, row.getKey()); - } - - @Override - public void remove(Session session, Row row) { - rows.remove(row.getValue(indexColumn)); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - if (first == null || last == null) { - // TODO hash index: should additionally check if values are the same - throw DbException.throwInternalError(); - } - Value v = first.getValue(indexColumn); - /* - * Sometimes the incoming search is a similar, but not the same type - * e.g. the search value is INT, but the index column is LONG. In which - * case we need to convert, otherwise the ValueHashMap will not find the - * result. - */ - v = v.convertTo(tableData.getColumn(indexColumn).getType()); - Row result; - Long pos = rows.get(v); - if (pos == null) { - result = null; - } else { - result = tableData.getRow(session, pos.intValue()); - } - return new SingleRowCursor(result); - } - - @Override - public long getRowCount(Session session) { - return getRowCountApproximation(); - } - - @Override - public long getRowCountApproximation() { - return rows.size(); - } - - @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public void remove(Session session) { - // nothing to do - } - - @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - for (Column column : columns) { - int index = column.getColumnId(); - int mask = masks[index]; - if ((mask & IndexCondition.EQUALITY) != IndexCondition.EQUALITY) { - return Long.MAX_VALUE; - } - } - return 2; - } - - @Override - public void checkRename() { - // ok - } - - @Override - public boolean needRebuild() { - return true; - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("HASH"); - } - - @Override - public boolean canScan() { - return false; - } - -} diff --git a/h2/src/main/org/h2/index/Index.java b/h2/src/main/org/h2/index/Index.java index 0eb240d443..b0104db1b3 100644 --- a/h2/src/main/org/h2/index/Index.java +++ b/h2/src/main/org/h2/index/Index.java @@ -1,12 +1,22 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; -import org.h2.engine.Session; +import java.util.ArrayList; +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.Constants; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.message.Trace; import org.h2.result.Row; +import org.h2.result.RowFactory; import org.h2.result.SearchRow; import org.h2.result.SortOrder; import org.h2.schema.SchemaObject; @@ -14,25 +24,185 @@ import org.h2.table.IndexColumn; import org.h2.table.Table; import org.h2.table.TableFilter; +import org.h2.util.StringUtils; +import org.h2.value.CompareMode; +import org.h2.value.DataType; +import org.h2.value.Value; +import org.h2.value.ValueNull; /** * An index. Indexes are used to speed up searching data. */ -public interface Index extends SchemaObject { +public abstract class Index extends SchemaObject { + + /** + * Check that the index columns are not CLOB or BLOB. + * + * @param columns the columns + */ + protected static void checkIndexColumnTypes(IndexColumn[] columns) { + for (IndexColumn c : columns) { + if (!DataType.isIndexable(c.column.getType())) { + throw DbException.getUnsupportedException("Index on column: " + c.column.getCreateSQL()); + } + } + } + + /** + * Columns of this index. + */ + protected IndexColumn[] indexColumns; + + /** + * Table columns used in this index. + */ + protected Column[] columns; + + /** + * Identities of table columns. + */ + protected int[] columnIds; + + /** + * Count of unique columns. Unique columns, if any, are always first columns + * in the lists. + */ + protected final int uniqueColumnColumn; + + /** + * The table. + */ + protected final Table table; + + /** + * The index type. + */ + protected final IndexType indexType; + + private final RowFactory rowFactory; + + private final RowFactory uniqueRowFactory; + + /** + * Initialize the index. + * + * @param newTable the table + * @param id the object id + * @param name the index name + * @param newIndexColumns the columns that are indexed or null if this is + * not yet known + * @param uniqueColumnCount count of unique columns + * @param newIndexType the index type + */ + protected Index(Table newTable, int id, String name, IndexColumn[] newIndexColumns, int uniqueColumnCount, + IndexType newIndexType) { + super(newTable.getSchema(), id, name, Trace.INDEX); + this.uniqueColumnColumn = uniqueColumnCount; + this.indexType = newIndexType; + this.table = newTable; + if (newIndexColumns != null) { + this.indexColumns = newIndexColumns; + columns = new Column[newIndexColumns.length]; + int len = columns.length; + columnIds = new int[len]; + for (int i = 0; i < len; i++) { + Column col = newIndexColumns[i].column; + columns[i] = col; + columnIds[i] = col.getColumnId(); + } + } + RowFactory databaseRowFactory = database.getRowFactory(); + CompareMode compareMode = database.getCompareMode(); + Column[] tableColumns = table.getColumns(); + rowFactory = databaseRowFactory.createRowFactory(database, compareMode, database, tableColumns, + newIndexType.isScan() ? null : newIndexColumns, true); + RowFactory uniqueRowFactory; + if (uniqueColumnCount > 0) { + if (newIndexColumns == null || uniqueColumnCount == newIndexColumns.length) { + uniqueRowFactory = rowFactory; + } else { + uniqueRowFactory = databaseRowFactory.createRowFactory(database, compareMode, database, tableColumns, + Arrays.copyOf(newIndexColumns, uniqueColumnCount), true); + } + } else { + uniqueRowFactory = null; + } + this.uniqueRowFactory = uniqueRowFactory; + } + + @Override + public final int getType() { + return DbObject.INDEX; + } + + @Override + public void removeChildrenAndResources(SessionLocal session) { + table.removeIndex(this); + remove(session); + database.removeMeta(session, getId()); + } + + @Override + public final boolean isHidden() { + return table.isHidden(); + } + + @Override + public String getCreateSQLForCopy(Table targetTable, String quotedName) { + StringBuilder builder = new StringBuilder("CREATE "); + builder.append(indexType.getSQL()); + builder.append(' '); + if (table.isHidden()) { + builder.append("IF NOT EXISTS "); + } + builder.append(quotedName); + builder.append(" ON "); + targetTable.getSQL(builder, DEFAULT_SQL_FLAGS); + if (comment != null) { + builder.append(" COMMENT "); + StringUtils.quoteStringSQL(builder, comment); + } + return getColumnListSQL(builder, DEFAULT_SQL_FLAGS).toString(); + } + + /** + * Get the list of columns as a string. + * + * @param sqlFlags formatting flags + * @return the list of columns + */ + private StringBuilder getColumnListSQL(StringBuilder builder, int sqlFlags) { + builder.append('('); + int length = indexColumns.length; + if (uniqueColumnColumn > 0 && uniqueColumnColumn < length) { + IndexColumn.writeColumns(builder, indexColumns, 0, uniqueColumnColumn, sqlFlags).append(") INCLUDE("); + IndexColumn.writeColumns(builder, indexColumns, uniqueColumnColumn, length, sqlFlags); + } else { + IndexColumn.writeColumns(builder, indexColumns, 0, length, sqlFlags); + } + return builder.append(')'); + } + + @Override + public String getCreateSQL() { + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS)); + } /** * Get the message to show in a EXPLAIN statement. * * @return the plan */ - String getPlanSQL(); + public String getPlanSQL() { + return getSQL(TRACE_SQL_FLAGS | ADD_PLAN_INFORMATION); + } /** * Close this index. * * @param session the session used to write data */ - void close(Session session); + public abstract void close(SessionLocal session); /** * Add a row to the index. @@ -40,7 +210,7 @@ public interface Index extends SchemaObject { * @param session the session to use * @param row the row to add */ - void add(Session session, Row row); + public abstract void add(SessionLocal session, Row row); /** * Remove a row from the index. @@ -48,30 +218,41 @@ public interface Index extends SchemaObject { * @param session the session * @param row the row */ - void remove(Session session, Row row); + public abstract void remove(SessionLocal session, Row row); /** - * Find a row or a list of rows and create a cursor to iterate over the - * result. + * Update index after row change. * * @param session the session - * @param first the first row, or null for no limit - * @param last the last row, or null for no limit - * @return the cursor to iterate over the results + * @param oldRow row before the update + * @param newRow row after the update + */ + public void update(SessionLocal session, Row oldRow, Row newRow) { + remove(session, oldRow); + add(session, newRow); + } + + /** + * Returns {@code true} if {@code find()} implementation performs scan over all + * index, {@code false} if {@code find()} performs the fast lookup. + * + * @return {@code true} if {@code find()} implementation performs scan over all + * index, {@code false} if {@code find()} performs the fast lookup */ - Cursor find(Session session, SearchRow first, SearchRow last); + public boolean isFindUsingFullTableScan() { + return false; + } /** * Find a row or a list of rows and create a cursor to iterate over the * result. * - * @param filter the table filter (which possibly knows about additional - * conditions) + * @param session the session * @param first the first row, or null for no limit * @param last the last row, or null for no limit * @return the cursor to iterate over the results */ - Cursor find(TableFilter filter, SearchRow first, SearchRow last); + public abstract Cursor find(SessionLocal session, SearchRow first, SearchRow last); /** * Estimate the cost to search for rows given the search mask. @@ -81,26 +262,28 @@ public interface Index extends SchemaObject { * @param session the session * @param masks per-column comparison bit masks, null means 'always false', * see constants in IndexCondition - * @param filter the table filter + * @param filters all joined table filters + * @param filter the current table filter index * @param sortOrder the sort order + * @param allColumnsSet the set of all columns * @return the estimated cost */ - double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder); + public abstract double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, + SortOrder sortOrder, AllColumnsForPlan allColumnsSet); /** * Remove the index. * * @param session the session */ - void remove(Session session); + public abstract void remove(SessionLocal session); /** * Remove all rows from the index. * * @param session the session */ - void truncate(Session session); + public abstract void truncate(SessionLocal session); /** * Check if the index can directly look up the lowest or highest value of a @@ -108,14 +291,18 @@ public interface Index extends SchemaObject { * * @return true if it can */ - boolean canGetFirstOrLast(); + public boolean canGetFirstOrLast() { + return false; + } /** * Check if the index can get the next higher value. * * @return true if it can */ - boolean canFindNext(); + public boolean canFindNext() { + return false; + } /** * Find a row or a list of rows that is larger and create a cursor to @@ -126,7 +313,9 @@ public interface Index extends SchemaObject { * @param last the last row, or null for no limit * @return the cursor */ - Cursor findNext(Session session, SearchRow higherThan, SearchRow last); + public Cursor findNext(SessionLocal session, SearchRow higherThan, SearchRow last) { + throw DbException.getInternalError(toString()); + } /** * Find the first (or last) value of this index. The cursor returned is @@ -137,7 +326,9 @@ public interface Index extends SchemaObject { * value should be returned * @return a cursor (never null) */ - Cursor findFirstOrLast(Session session, boolean first); + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + throw DbException.getInternalError(toString()); + } /** * Check if the index needs to be rebuilt. @@ -145,7 +336,7 @@ public interface Index extends SchemaObject { * * @return true if a rebuild is required. */ - boolean needRebuild(); + public abstract boolean needRebuild(); /** * Get the row count of this table, for the given session. @@ -153,21 +344,24 @@ public interface Index extends SchemaObject { * @param session the session * @return the row count */ - long getRowCount(Session session); + public abstract long getRowCount(SessionLocal session); /** * Get the approximated row count for this table. * + * @param session the session * @return the approximated row count */ - long getRowCountApproximation(); + public abstract long getRowCountApproximation(SessionLocal session); /** * Get the used disk space for this index. * * @return the estimated number of bytes */ - long getDiskSpaceUsed(); + public long getDiskSpaceUsed() { + return 0L; + } /** * Compare two rows. @@ -177,7 +371,40 @@ public interface Index extends SchemaObject { * @return 0 if both rows are equal, -1 if the first row is smaller, * otherwise 1 */ - int compareRows(SearchRow rowData, SearchRow compare); + public final int compareRows(SearchRow rowData, SearchRow compare) { + if (rowData == compare) { + return 0; + } + for (int i = 0, len = indexColumns.length; i < len; i++) { + int index = columnIds[i]; + Value v1 = rowData.getValue(index); + Value v2 = compare.getValue(index); + if (v1 == null || v2 == null) { + // can't compare further + return 0; + } + int c = compareValues(v1, v2, indexColumns[i].sortType); + if (c != 0) { + return c; + } + } + return 0; + } + + private int compareValues(Value a, Value b, int sortType) { + if (a == b) { + return 0; + } + boolean aNull = a == ValueNull.INSTANCE; + if (aNull || b == ValueNull.INSTANCE) { + return table.getDatabase().getDefaultNullOrdering().compareNull(aNull, sortType); + } + int comp = table.compareValues(database, a, b); + if ((sortType & SortOrder.DESCENDING) != 0) { + comp = -comp; + } + return comp; + } /** * Get the index of a column in the list of index columns @@ -185,44 +412,71 @@ public interface Index extends SchemaObject { * @param col the column * @return the index (0 meaning first column) */ - int getColumnIndex(Column col); + public int getColumnIndex(Column col) { + for (int i = 0, len = columns.length; i < len; i++) { + if (columns[i].equals(col)) { + return i; + } + } + return -1; + } + + /** + * Check if the given column is the first for this index + * + * @param column the column + * @return true if the given columns is the first + */ + public boolean isFirstColumn(Column column) { + return column.equals(columns[0]); + } /** * Get the indexed columns as index columns (with ordering information). * * @return the index columns */ - IndexColumn[] getIndexColumns(); + public final IndexColumn[] getIndexColumns() { + return indexColumns; + } /** * Get the indexed columns. * * @return the columns */ - Column[] getColumns(); + public final Column[] getColumns() { + return columns; + } /** - * Get the index type. + * Returns count of unique columns. Unique columns, if any, are always first + * columns in the lists. Unique indexes may have additional indexed + * non-unique columns. * - * @return the index type + * @return count of unique columns, or 0 if index isn't unique */ - IndexType getIndexType(); + public final int getUniqueColumnCount() { + return uniqueColumnColumn; + } /** - * Get the table on which this index is based. + * Get the index type. * - * @return the table + * @return the index type */ - Table getTable(); + public final IndexType getIndexType() { + return indexType; + } /** - * Commit the operation for a row. This is only important for multi-version - * indexes. The method is only called if multi-version is enabled. + * Get the table on which this index is based. * - * @param operation the operation type - * @param row the row + * @return the table */ - void commit(int operation, Row row); + public Table getTable() { + return table; + } /** * Get the row with the given key. @@ -231,29 +485,259 @@ public interface Index extends SchemaObject { * @param key the unique key * @return the row */ - Row getRow(Session session, long key); + public Row getRow(SessionLocal session, long key) { + throw DbException.getUnsupportedException(toString()); + } /** * Does this index support lookup by row id? * * @return true if it does */ - boolean isRowIdIndex(); + public boolean isRowIdIndex() { + return false; + } /** * Can this index iterate over all rows? * * @return true if it can */ - boolean canScan(); + public boolean canScan() { + return true; + } + + /** + * Create a duplicate key exception with a message that contains the index + * name. + * + * @param key the key values + * @return the exception + */ + public DbException getDuplicateKeyException(String key) { + StringBuilder builder = new StringBuilder(); + getSQL(builder, TRACE_SQL_FLAGS).append(" ON "); + table.getSQL(builder, TRACE_SQL_FLAGS); + getColumnListSQL(builder, TRACE_SQL_FLAGS); + if (key != null) { + builder.append(" VALUES ").append(key); + } + DbException e = DbException.get(ErrorCode.DUPLICATE_KEY_1, builder.toString()); + e.setSource(this); + return e; + } + + /** + * Get "PRIMARY KEY ON <table> [(column)]". + * + * @param mainIndexColumn the column index + * @return the message + */ + protected StringBuilder getDuplicatePrimaryKeyMessage(int mainIndexColumn) { + StringBuilder builder = new StringBuilder("PRIMARY KEY ON "); + table.getSQL(builder, TRACE_SQL_FLAGS); + if (mainIndexColumn >= 0 && mainIndexColumn < indexColumns.length) { + builder.append('('); + indexColumns[mainIndexColumn].getSQL(builder, TRACE_SQL_FLAGS).append(')'); + } + return builder; + } + + /** + * Calculate the cost for the given mask as if this index was a typical + * b-tree range index. This is the estimated cost required to search one + * row, and then iterate over the given number of rows. + * + * @param masks the IndexCondition search masks, one for each column in the + * table + * @param rowCount the number of rows in the index + * @param filters all joined table filters + * @param filter the current table filter index + * @param sortOrder the sort order + * @param isScanIndex whether this is a "table scan" index + * @param allColumnsSet the set of all columns + * @return the estimated cost + */ + protected final long getCostRangeIndex(int[] masks, long rowCount, TableFilter[] filters, int filter, + SortOrder sortOrder, boolean isScanIndex, AllColumnsForPlan allColumnsSet) { + rowCount += Constants.COST_ROW_OFFSET; + int totalSelectivity = 0; + long rowsCost = rowCount; + if (masks != null) { + int i = 0, len = columns.length; + boolean tryAdditional = false; + while (i < len) { + Column column = columns[i++]; + int index = column.getColumnId(); + int mask = masks[index]; + if ((mask & IndexCondition.EQUALITY) == IndexCondition.EQUALITY) { + if (i > 0 && i == uniqueColumnColumn) { + rowsCost = 3; + break; + } + totalSelectivity = 100 - ((100 - totalSelectivity) * + (100 - column.getSelectivity()) / 100); + long distinctRows = rowCount * totalSelectivity / 100; + if (distinctRows <= 0) { + distinctRows = 1; + } + rowsCost = 2 + Math.max(rowCount / distinctRows, 1); + } else if ((mask & IndexCondition.RANGE) == IndexCondition.RANGE) { + rowsCost = 2 + rowsCost / 4; + tryAdditional = true; + break; + } else if ((mask & IndexCondition.START) == IndexCondition.START) { + rowsCost = 2 + rowsCost / 3; + tryAdditional = true; + break; + } else if ((mask & IndexCondition.END) == IndexCondition.END) { + rowsCost = rowsCost / 3; + tryAdditional = true; + break; + } else { + if (mask == 0) { + // Adjust counter of used columns (i) + i--; + } + break; + } + } + // Some additional columns can still be used + if (tryAdditional) { + while (i < len && masks[columns[i].getColumnId()] != 0) { + i++; + rowsCost--; + } + } + // Increase cost of indexes with additional unused columns + rowsCost += len - i; + } + // If the ORDER BY clause matches the ordering of this index, + // it will be cheaper than another index, so adjust the cost + // accordingly. + long sortingCost = 0; + if (sortOrder != null) { + sortingCost = 100 + rowCount / 10; + } + if (sortOrder != null && !isScanIndex) { + boolean sortOrderMatches = true; + int coveringCount = 0; + int[] sortTypes = sortOrder.getSortTypesWithNullOrdering(); + TableFilter tableFilter = filters == null ? null : filters[filter]; + for (int i = 0, len = sortTypes.length; i < len; i++) { + if (i >= indexColumns.length) { + // We can still use this index if we are sorting by more + // than it's columns, it's just that the coveringCount + // is lower than with an index that contains + // more of the order by columns. + break; + } + Column col = sortOrder.getColumn(i, tableFilter); + if (col == null) { + sortOrderMatches = false; + break; + } + IndexColumn indexCol = indexColumns[i]; + if (!col.equals(indexCol.column)) { + sortOrderMatches = false; + break; + } + int sortType = sortTypes[i]; + if (sortType != indexCol.sortType) { + sortOrderMatches = false; + break; + } + coveringCount++; + } + if (sortOrderMatches) { + // "coveringCount" makes sure that when we have two + // or more covering indexes, we choose the one + // that covers more. + sortingCost = 100 - coveringCount; + } + } + // If we have two indexes with the same cost, and one of the indexes can + // satisfy the query without needing to read from the primary table + // (scan index), make that one slightly lower cost. + boolean needsToReadFromScanIndex; + if (!isScanIndex && allColumnsSet != null) { + needsToReadFromScanIndex = false; + ArrayList foundCols = allColumnsSet.get(getTable()); + if (foundCols != null) { + int main = table.getMainIndexColumn(); + loop: for (Column c : foundCols) { + int id = c.getColumnId(); + if (id == SearchRow.ROWID_INDEX || id == main) { + continue; + } + for (Column c2 : columns) { + if (c == c2) { + continue loop; + } + } + needsToReadFromScanIndex = true; + break; + } + } + } else { + needsToReadFromScanIndex = true; + } + long rc; + if (isScanIndex) { + rc = rowsCost + sortingCost + 20; + } else if (needsToReadFromScanIndex) { + rc = rowsCost + rowsCost + sortingCost + 20; + } else { + // The (20-x) calculation makes sure that when we pick a covering + // index, we pick the covering index that has the smallest number of + // columns (the more columns we have in index - the higher cost). + // This is faster because a smaller index will fit into fewer data + // blocks. + rc = rowsCost + sortingCost + columns.length; + } + return rc; + } + /** - * Enable or disable the 'sorted insert' optimizations (rows are inserted in - * ascending or descending order) if applicable for this index - * implementation. + * Check if this row may have duplicates with the same indexed values in the + * current compatibility mode. Duplicates with {@code NULL} values are + * allowed in some modes. * - * @param sortedInsertMode the new value + * @param searchRow + * the row to check + * @return {@code true} if specified row may have duplicates, + * {@code false otherwise} */ - void setSortedInsertMode(boolean sortedInsertMode); + public final boolean mayHaveNullDuplicates(SearchRow searchRow) { + switch (database.getMode().uniqueIndexNullsHandling) { + case ALLOW_DUPLICATES_WITH_ANY_NULL: + for (int i = 0; i < uniqueColumnColumn; i++) { + int index = columnIds[i]; + if (searchRow.getValue(index) == ValueNull.INSTANCE) { + return true; + } + } + return false; + case ALLOW_DUPLICATES_WITH_ALL_NULLS: + for (int i = 0; i < uniqueColumnColumn; i++) { + int index = columnIds[i]; + if (searchRow.getValue(index) != ValueNull.INSTANCE) { + return false; + } + } + return true; + default: + return false; + } + } + + public RowFactory getRowFactory() { + return rowFactory; + } + + public RowFactory getUniqueRowFactory() { + return uniqueRowFactory; + } } diff --git a/h2/src/main/org/h2/index/IndexCondition.java b/h2/src/main/org/h2/index/IndexCondition.java index 3f9fbdfa17..d4b32d0590 100644 --- a/h2/src/main/org/h2/index/IndexCondition.java +++ b/h2/src/main/org/h2/index/IndexCondition.java @@ -1,27 +1,25 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; import java.util.ArrayList; import java.util.Arrays; -import java.util.Comparator; -import java.util.HashSet; import java.util.List; -import org.h2.command.dml.Query; -import org.h2.engine.Session; -import org.h2.expression.Comparison; +import java.util.TreeSet; + +import org.h2.command.query.Query; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; import org.h2.expression.ExpressionVisitor; +import org.h2.expression.condition.Comparison; import org.h2.message.DbException; import org.h2.result.ResultInterface; import org.h2.table.Column; -import org.h2.table.Table; -import org.h2.util.StatementBuilder; -import org.h2.value.CompareMode; +import org.h2.table.TableType; import org.h2.value.Value; /** @@ -125,8 +123,8 @@ public static IndexCondition getInList(ExpressionColumn column, * @return the index condition */ public static IndexCondition getInQuery(ExpressionColumn column, Query query) { - IndexCondition cond = new IndexCondition(Comparison.IN_QUERY, column, - null); + assert query.isRandomAccessResult(); + IndexCondition cond = new IndexCondition(Comparison.IN_QUERY, column, null); cond.expressionQuery = query; return cond; } @@ -137,7 +135,7 @@ public static IndexCondition getInQuery(ExpressionColumn column, Query query) { * @param session the session * @return the value */ - public Value getCurrentValue(Session session) { + public Value getCurrentValue(SessionLocal session) { return expression.getValue(session); } @@ -148,22 +146,15 @@ public Value getCurrentValue(Session session) { * @param session the session * @return the value list */ - public Value[] getCurrentValueList(Session session) { - HashSet valueSet = new HashSet(); + public Value[] getCurrentValueList(SessionLocal session) { + TreeSet valueSet = new TreeSet<>(session.getDatabase().getCompareMode()); for (Expression e : expressionList) { Value v = e.getValue(session); - v = column.convert(v); + v = column.convert(session, v); valueSet.add(v); } - Value[] array = new Value[valueSet.size()]; - valueSet.toArray(array); - final CompareMode mode = session.getDatabase().getCompareMode(); - Arrays.sort(array, new Comparator() { - @Override - public int compare(Value o1, Value o2) { - return o1.compareTo(o2, mode); - } - }); + Value[] array = valueSet.toArray(new Value[valueSet.size()]); + Arrays.sort(array, session.getDatabase().getCompareMode()); return array; } @@ -180,56 +171,55 @@ public ResultInterface getCurrentResult() { /** * Get the SQL snippet of this comparison. * + * @param sqlFlags formatting flags * @return the SQL snippet */ - public String getSQL() { + public String getSQL(int sqlFlags) { if (compareType == Comparison.FALSE) { return "FALSE"; } - StatementBuilder buff = new StatementBuilder(); - buff.append(column.getSQL()); + StringBuilder builder = new StringBuilder(); + column.getSQL(builder, sqlFlags); switch (compareType) { case Comparison.EQUAL: - buff.append(" = "); + builder.append(" = "); break; case Comparison.EQUAL_NULL_SAFE: - buff.append(" IS "); + builder.append(expression.isNullConstant() + || column.getType().getValueType() == Value.BOOLEAN && expression.isConstant() // + ? " IS " + : " IS NOT DISTINCT FROM "); break; case Comparison.BIGGER_EQUAL: - buff.append(" >= "); + builder.append(" >= "); break; case Comparison.BIGGER: - buff.append(" > "); + builder.append(" > "); break; case Comparison.SMALLER_EQUAL: - buff.append(" <= "); + builder.append(" <= "); break; case Comparison.SMALLER: - buff.append(" < "); + builder.append(" < "); break; case Comparison.IN_LIST: - buff.append(" IN("); - for (Expression e : expressionList) { - buff.appendExceptFirst(", "); - buff.append(e.getSQL()); - } - buff.append(')'); + Expression.writeExpressions(builder.append(" IN("), expressionList, sqlFlags).append(')'); break; case Comparison.IN_QUERY: - buff.append(" IN("); - buff.append(expressionQuery.getPlanSQL()); - buff.append(')'); + builder.append(" IN("); + builder.append(expressionQuery.getPlanSQL(sqlFlags)); + builder.append(')'); break; case Comparison.SPATIAL_INTERSECTS: - buff.append(" && "); + builder.append(" && "); break; default: - DbException.throwInternalError("type=" + compareType); + throw DbException.getInternalError("type=" + compareType); } if (expression != null) { - buff.append(expression.getSQL()); + expression.getSQL(builder, sqlFlags, Expression.AUTO_PARENTHESES); } - return buff.toString(); + return builder.toString(); } /** @@ -248,7 +238,7 @@ public int getMask(ArrayList indexConditions) { case Comparison.IN_LIST: case Comparison.IN_QUERY: if (indexConditions.size() > 1) { - if (!Table.TABLE.equals(column.getTable().getTableType())) { + if (TableType.TABLE != column.getTable().getTableType()) { // if combined with other conditions, // IN(..) can only be used for regular tables // test case: @@ -270,7 +260,7 @@ public int getMask(ArrayList indexConditions) { case Comparison.SPATIAL_INTERSECTS: return SPATIAL_INTERSECTS; default: - throw DbException.throwInternalError("type=" + compareType); + throw DbException.getInternalError("type=" + compareType); } } @@ -334,22 +324,6 @@ public boolean isSpatialIntersects() { } } - /** - * Check if this index condition is of the type equality. - * - * @param constantExpression if the inner node is a constant expression - * @return true if this is a equality condition - */ - public boolean isEquality(boolean constantExpression) { - switch (compareType) { - case Comparison.EQUAL: - case Comparison.EQUAL_NULL_SAFE: - return !constantExpression || expression.isConstant(); - default: - return false; - } - } - public int getCompareType() { return compareType; } @@ -363,6 +337,33 @@ public Column getColumn() { return column; } + /** + * Get expression. + * + * @return Expression. + */ + public Expression getExpression() { + return expression; + } + + /** + * Get expression list. + * + * @return Expression list. + */ + public List getExpressionList() { + return expressionList; + } + + /** + * Get expression query. + * + * @return Expression query. + */ + public Query getExpressionQuery() { + return expressionQuery; + } + /** * Check if the expression can be evaluated. * @@ -387,36 +388,47 @@ public boolean isEvaluatable() { @Override public String toString() { - return "column=" + column + - ", compareType=" + compareTypeToString(compareType) + - ", expression=" + expression + - ", expressionList=" + expressionList.toString() + - ", expressionQuery=" + expressionQuery; + StringBuilder builder = new StringBuilder("column=").append(column).append(", compareType="); + return compareTypeToString(builder, compareType) + .append(", expression=").append(expression) + .append(", expressionList=").append(expressionList) + .append(", expressionQuery=").append(expressionQuery).toString(); } - private static String compareTypeToString(int i) { - StatementBuilder s = new StatementBuilder(); + private static StringBuilder compareTypeToString(StringBuilder builder, int i) { + boolean f = false; if ((i & EQUALITY) == EQUALITY) { - s.appendExceptFirst("&"); - s.append("EQUALITY"); + f = true; + builder.append("EQUALITY"); } if ((i & START) == START) { - s.appendExceptFirst("&"); - s.append("START"); + if (f) { + builder.append(", "); + } + f = true; + builder.append("START"); } if ((i & END) == END) { - s.appendExceptFirst("&"); - s.append("END"); + if (f) { + builder.append(", "); + } + f = true; + builder.append("END"); } if ((i & ALWAYS_FALSE) == ALWAYS_FALSE) { - s.appendExceptFirst("&"); - s.append("ALWAYS_FALSE"); + if (f) { + builder.append(", "); + } + f = true; + builder.append("ALWAYS_FALSE"); } if ((i & SPATIAL_INTERSECTS) == SPATIAL_INTERSECTS) { - s.appendExceptFirst("&"); - s.append("SPATIAL_INTERSECTS"); + if (f) { + builder.append(", "); + } + builder.append("SPATIAL_INTERSECTS"); } - return s.toString(); + return builder; } } diff --git a/h2/src/main/org/h2/index/IndexCursor.java b/h2/src/main/org/h2/index/IndexCursor.java index 32783bf48b..2fe8d6fd73 100644 --- a/h2/src/main/org/h2/index/IndexCursor.java +++ b/h2/src/main/org/h2/index/IndexCursor.java @@ -1,14 +1,14 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; import java.util.ArrayList; -import java.util.HashSet; -import org.h2.engine.Session; -import org.h2.expression.Comparison; + +import org.h2.engine.SessionLocal; +import org.h2.expression.condition.Comparison; import org.h2.message.DbException; import org.h2.result.ResultInterface; import org.h2.result.Row; @@ -17,7 +17,6 @@ import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.Table; -import org.h2.table.TableFilter; import org.h2.value.Value; import org.h2.value.ValueGeometry; import org.h2.value.ValueNull; @@ -32,8 +31,7 @@ */ public class IndexCursor implements Cursor { - private Session session; - private final TableFilter tableFilter; + private SessionLocal session; private Index index; private Table table; private IndexColumn[] indexColumns; @@ -45,10 +43,8 @@ public class IndexCursor implements Cursor { private int inListIndex; private Value[] inList; private ResultInterface inResult; - private HashSet inResultTested; - public IndexCursor(TableFilter filter) { - this.tableFilter = filter; + public IndexCursor() { } public void setIndex(Index index) { @@ -68,27 +64,29 @@ public void setIndex(Index index) { } /** - * Re-evaluate the start and end values of the index search for rows. + * Prepare this index cursor to make a lookup in index. * - * @param s the session - * @param indexConditions the index conditions + * @param s Session. + * @param indexConditions Index conditions. */ - public void find(Session s, ArrayList indexConditions) { - this.session = s; + public void prepare(SessionLocal s, ArrayList indexConditions) { + session = s; alwaysFalse = false; start = end = null; inList = null; inColumn = null; inResult = null; - inResultTested = null; intersects = null; - // don't use enhanced for loop to avoid creating objects - for (int i = 0, size = indexConditions.size(); i < size; i++) { - IndexCondition condition = indexConditions.get(i); + for (IndexCondition condition : indexConditions) { if (condition.isAlwaysFalse()) { alwaysFalse = true; break; } + // If index can perform only full table scan do not try to use it for regular + // lookups, each such lookup will perform an own table scan. + if (index.isFindUsingFullTableScan()) { + continue; + } Column column = condition.getColumn(); if (condition.getCompareType() == Comparison.IN_LIST) { if (start == null && end == null) { @@ -111,7 +109,7 @@ public void find(Session s, ArrayList indexConditions) { boolean isEnd = condition.isEnd(); boolean isIntersects = condition.isSpatialIntersects(); int columnId = column.getColumnId(); - if (columnId >= 0) { + if (columnId != SearchRow.ROWID_INDEX) { IndexColumn idxCol = indexColumns[columnId]; if (idxCol != null && (idxCol.sortType & SortOrder.DESCENDING) != 0) { // if the index column is sorted the other way, we swap @@ -131,32 +129,36 @@ public void find(Session s, ArrayList indexConditions) { if (isIntersects) { intersects = getSpatialSearchRow(intersects, columnId, v); } - if (isStart || isEnd) { - // an X=? condition will produce less rows than - // an X IN(..) condition + // An X=? condition will produce less rows than + // an X IN(..) condition, unless the X IN condition can use the index. + if ((isStart || isEnd) && !canUseIndexFor(inColumn)) { inColumn = null; inList = null; inResult = null; } - if (!session.getDatabase().getSettings().optimizeIsNull) { - if (isStart && isEnd) { - if (v == ValueNull.INSTANCE) { - // join on a column=NULL is always false - alwaysFalse = true; - } - } - } } } + if (inColumn != null) { + start = table.getTemplateRow(); + } + } + + /** + * Re-evaluate the start and end values of the index search for rows. + * + * @param s the session + * @param indexConditions the index conditions + */ + public void find(SessionLocal s, ArrayList indexConditions) { + prepare(s, indexConditions); if (inColumn != null) { return; } if (!alwaysFalse) { if (intersects != null && index instanceof SpatialIndex) { - cursor = ((SpatialIndex) index).findByGeometry(tableFilter, - intersects); - } else { - cursor = index.find(tableFilter, start, end); + cursor = ((SpatialIndex) index).findByGeometry(session, start, end, intersects); + } else if (index != null) { + cursor = index.find(session, start, end); } } } @@ -166,6 +168,10 @@ private boolean canUseIndexForIn(Column column) { // only one IN(..) condition can be used at the same time return false; } + return canUseIndexFor(column); + } + + private boolean canUseIndexFor(Column column) { // The first column of the index must match this column, // or it must be a VIEW index (where the column is null). // Multiple IN conditions with views are not supported, see @@ -183,30 +189,27 @@ private SearchRow getSpatialSearchRow(SearchRow row, int columnId, Value v) { row = table.getTemplateRow(); } else if (row.getValue(columnId) != null) { // if an object needs to overlap with both a and b, - // then it needs to overlap with the the union of a and b + // then it needs to overlap with the union of a and b // (not the intersection) - ValueGeometry vg = (ValueGeometry) row.getValue(columnId). - convertTo(Value.GEOMETRY); - v = ((ValueGeometry) v.convertTo(Value.GEOMETRY)). - getEnvelopeUnion(vg); + ValueGeometry vg = row.getValue(columnId).convertToGeometry(null); + v = v.convertToGeometry(null).getEnvelopeUnion(vg); } - if (columnId < 0) { - row.setKey(v.getLong()); + if (columnId == SearchRow.ROWID_INDEX) { + row.setKey(v == ValueNull.INSTANCE ? Long.MIN_VALUE : v.getLong()); } else { row.setValue(columnId, v); } return row; } - private SearchRow getSearchRow(SearchRow row, int columnId, Value v, - boolean max) { + private SearchRow getSearchRow(SearchRow row, int columnId, Value v, boolean max) { if (row == null) { row = table.getTemplateRow(); } else { v = getMax(row.getValue(columnId), v, max); } - if (columnId < 0) { - row.setKey(v.getLong()); + if (columnId == SearchRow.ROWID_INDEX) { + row.setKey(v == ValueNull.INSTANCE ? Long.MIN_VALUE : v.getLong()); } else { row.setValue(columnId, v); } @@ -219,28 +222,17 @@ private Value getMax(Value a, Value b, boolean bigger) { } else if (b == null) { return a; } - if (session.getDatabase().getSettings().optimizeIsNull) { - // IS NULL must be checked later - if (a == ValueNull.INSTANCE) { - return b; - } else if (b == ValueNull.INSTANCE) { - return a; - } + // IS NULL must be checked later + if (a == ValueNull.INSTANCE) { + return b; + } else if (b == ValueNull.INSTANCE) { + return a; } - int comp = a.compareTo(b, table.getDatabase().getCompareMode()); + int comp = session.compare(a, b); if (comp == 0) { return a; } - if (a == ValueNull.INSTANCE || b == ValueNull.INSTANCE) { - if (session.getDatabase().getSettings().optimizeIsNull) { - // column IS NULL AND column is always false - return null; - } - } - if (!bigger) { - comp = -comp; - } - return comp > 0 ? a : b; + return (comp > 0) == bigger ? a : b; } /** @@ -252,6 +244,24 @@ public boolean isAlwaysFalse() { return alwaysFalse; } + /** + * Get start search row. + * + * @return search row + */ + public SearchRow getStart() { + return start; + } + + /** + * Get end search row. + * + * @return search row + */ + public SearchRow getEnd() { + return end; + } + @Override public Row get() { if (cursor == null) { @@ -294,32 +304,23 @@ private void nextCursor() { while (inResult.next()) { Value v = inResult.currentRow()[0]; if (v != ValueNull.INSTANCE) { - v = inColumn.convert(v); - if (inResultTested == null) { - inResultTested = new HashSet(); - } - if (inResultTested.add(v)) { - find(v); - break; - } + find(v); + break; } } } } private void find(Value v) { - v = inColumn.convert(v); + v = inColumn.convert(session, v); int id = inColumn.getColumnId(); - if (start == null) { - start = table.getTemplateRow(); - } start.setValue(id, v); - cursor = index.find(tableFilter, start, start); + cursor = index.find(session, start, start); } @Override public boolean previous() { - throw DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/IndexType.java b/h2/src/main/org/h2/index/IndexType.java index 896b2ceb43..6949b61585 100644 --- a/h2/src/main/org/h2/index/IndexType.java +++ b/h2/src/main/org/h2/index/IndexType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; diff --git a/h2/src/main/org/h2/index/LinkedCursor.java b/h2/src/main/org/h2/index/LinkedCursor.java index bdb4c87638..75fb1e3b82 100644 --- a/h2/src/main/org/h2/index/LinkedCursor.java +++ b/h2/src/main/org/h2/index/LinkedCursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -8,15 +8,12 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; - -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; -import org.h2.table.Column; import org.h2.table.TableLink; -import org.h2.value.DataType; -import org.h2.value.Value; +import org.h2.value.ValueToObjectConverter2; /** * The cursor implementation for the linked index. @@ -26,11 +23,11 @@ public class LinkedCursor implements Cursor { private final TableLink tableLink; private final PreparedStatement prep; private final String sql; - private final Session session; + private final SessionLocal session; private final ResultSet rs; private Row current; - LinkedCursor(TableLink tableLink, ResultSet rs, Session session, + LinkedCursor(TableLink tableLink, ResultSet rs, SessionLocal session, String sql, PreparedStatement prep) { this.session = session; this.tableLink = tableLink; @@ -64,16 +61,15 @@ public boolean next() { } current = tableLink.getTemplateRow(); for (int i = 0; i < current.getColumnCount(); i++) { - Column col = tableLink.getColumn(i); - Value v = DataType.readValue(session, rs, i + 1, col.getType()); - current.setValue(i, v); + current.setValue(i, ValueToObjectConverter2.readValue(session, rs, i + 1, + tableLink.getColumn(i).getType().getValueType())); } return true; } @Override public boolean previous() { - throw DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/LinkedIndex.java b/h2/src/main/org/h2/index/LinkedIndex.java index ca2dc1f90f..b5b9a00914 100644 --- a/h2/src/main/org/h2/index/LinkedIndex.java +++ b/h2/src/main/org/h2/index/LinkedIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -8,8 +8,10 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.util.ArrayList; + +import org.h2.command.query.AllColumnsForPlan; import org.h2.engine.Constants; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; @@ -18,8 +20,8 @@ import org.h2.table.IndexColumn; import org.h2.table.TableFilter; import org.h2.table.TableLink; -import org.h2.util.New; -import org.h2.util.StatementBuilder; +import org.h2.util.Utils; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueNull; @@ -27,15 +29,16 @@ * A linked index is a index for a linked (remote) table. * It is backed by an index on the remote table which is accessed over JDBC. */ -public class LinkedIndex extends BaseIndex { +public class LinkedIndex extends Index { private final TableLink link; private final String targetTableName; private long rowCount; - public LinkedIndex(TableLink table, int id, IndexColumn[] columns, - IndexType indexType) { - initBaseIndex(table, id, null, columns, indexType); + private final int sqlFlags = QUOTE_ONLY_WHEN_REQUIRED; + + public LinkedIndex(TableLink table, int id, IndexColumn[] columns, int uniqueColumnCount, IndexType indexType) { + super(table, id, null, columns, uniqueColumnCount, indexType); link = table; targetTableName = link.getQualifiedTable(); } @@ -46,7 +49,7 @@ public String getCreateSQL() { } @Override - public void close(Session session) { + public void close(SessionLocal session) { // nothing to do } @@ -55,13 +58,15 @@ private static boolean isNull(Value v) { } @Override - public void add(Session session, Row row) { - ArrayList params = New.arrayList(); - StatementBuilder buff = new StatementBuilder("INSERT INTO "); + public void add(SessionLocal session, Row row) { + ArrayList params = Utils.newSmallArrayList(); + StringBuilder buff = new StringBuilder("INSERT INTO "); buff.append(targetTableName).append(" VALUES("); for (int i = 0; i < row.getColumnCount(); i++) { Value v = row.getValue(i); - buff.appendExceptFirst(", "); + if (i > 0) { + buff.append(", "); + } if (v == null) { buff.append("DEFAULT"); } else if (isNull(v)) { @@ -74,7 +79,7 @@ public void add(Session session, Row row) { buff.append(')'); String sql = buff.toString(); try { - link.execute(sql, params, true); + link.execute(sql, params, true, session); rowCount++; } catch (Exception e) { throw TableLink.wrapException(sql, e); @@ -82,22 +87,22 @@ public void add(Session session, Row row) { } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - ArrayList params = New.arrayList(); - StatementBuilder buff = new StatementBuilder("SELECT * FROM "); - buff.append(targetTableName).append(" T"); + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + ArrayList params = Utils.newSmallArrayList(); + StringBuilder builder = new StringBuilder("SELECT * FROM ").append(targetTableName).append(" T"); + boolean f = false; for (int i = 0; first != null && i < first.getColumnCount(); i++) { Value v = first.getValue(i); if (v != null) { - buff.appendOnlyFirst(" WHERE "); - buff.appendExceptFirst(" AND "); + builder.append(f ? " AND " : " WHERE "); + f = true; Column col = table.getColumn(i); - buff.append(col.getSQL()); + col.getSQL(builder, sqlFlags); if (v == ValueNull.INSTANCE) { - buff.append(" IS NULL"); + builder.append(" IS NULL"); } else { - buff.append(">="); - addParameter(buff, col); + builder.append(">="); + addParameter(builder, col); params.add(v); } } @@ -105,22 +110,22 @@ public Cursor find(Session session, SearchRow first, SearchRow last) { for (int i = 0; last != null && i < last.getColumnCount(); i++) { Value v = last.getValue(i); if (v != null) { - buff.appendOnlyFirst(" WHERE "); - buff.appendExceptFirst(" AND "); + builder.append(f ? " AND " : " WHERE "); + f = true; Column col = table.getColumn(i); - buff.append(col.getSQL()); + col.getSQL(builder, sqlFlags); if (v == ValueNull.INSTANCE) { - buff.append(" IS NULL"); + builder.append(" IS NULL"); } else { - buff.append("<="); - addParameter(buff, col); + builder.append("<="); + addParameter(builder, col); params.add(v); } } } - String sql = buff.toString(); + String sql = builder.toString(); try { - PreparedStatement prep = link.execute(sql, params, false); + PreparedStatement prep = link.execute(sql, params, false, session); ResultSet rs = prep.getResultSet(); return new LinkedCursor(link, rs, session, sql, prep); } catch (Exception e) { @@ -128,32 +133,34 @@ public Cursor find(Session session, SearchRow first, SearchRow last) { } } - private void addParameter(StatementBuilder buff, Column col) { - if (col.getType() == Value.STRING_FIXED && link.isOracle()) { + private void addParameter(StringBuilder builder, Column col) { + TypeInfo type = col.getType(); + if (type.getValueType() == Value.CHAR && link.isOracle()) { // workaround for Oracle // create table test(id int primary key, name char(15)); // insert into test values(1, 'Hello') // select * from test where name = ? -- where ? = "Hello" > no rows - buff.append("CAST(? AS CHAR(").append(col.getPrecision()).append("))"); + builder.append("CAST(? AS CHAR(").append(type.getPrecision()).append("))"); } else { - buff.append('?'); + builder.append('?'); } } @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { + public double getCost(SessionLocal session, int[] masks, + TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { return 100 + getCostRangeIndex(masks, rowCount + - Constants.COST_ROW_OFFSET, filter, sortOrder); + Constants.COST_ROW_OFFSET, filters, filter, sortOrder, false, allColumnsSet); } @Override - public void remove(Session session) { + public void remove(SessionLocal session) { // nothing to do } @Override - public void truncate(Session session) { + public void truncate(SessionLocal session) { // nothing to do } @@ -168,39 +175,28 @@ public boolean needRebuild() { } @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - // TODO optimization: could get the first or last value (in any case; - // maybe not optimized) - throw DbException.getUnsupportedException("LINKED"); - } - - @Override - public void remove(Session session, Row row) { - ArrayList params = New.arrayList(); - StatementBuilder buff = new StatementBuilder("DELETE FROM "); - buff.append(targetTableName).append(" WHERE "); + public void remove(SessionLocal session, Row row) { + ArrayList params = Utils.newSmallArrayList(); + StringBuilder builder = new StringBuilder("DELETE FROM ").append(targetTableName).append(" WHERE "); for (int i = 0; i < row.getColumnCount(); i++) { - buff.appendExceptFirst("AND "); + if (i > 0) { + builder.append("AND "); + } Column col = table.getColumn(i); - buff.append(col.getSQL()); + col.getSQL(builder, sqlFlags); Value v = row.getValue(i); if (isNull(v)) { - buff.append(" IS NULL "); + builder.append(" IS NULL "); } else { - buff.append('='); - addParameter(buff, col); + builder.append('='); + addParameter(builder, col); params.add(v); - buff.append(' '); + builder.append(' '); } } - String sql = buff.toString(); + String sql = builder.toString(); try { - PreparedStatement prep = link.execute(sql, params, false); + PreparedStatement prep = link.execute(sql, params, false, session); int count = prep.executeUpdate(); link.reusePreparedStatement(prep, sql); rowCount -= count; @@ -215,57 +211,56 @@ public void remove(Session session, Row row) { * * @param oldRow the old data * @param newRow the new data + * @param session the session */ - public void update(Row oldRow, Row newRow) { - ArrayList params = New.arrayList(); - StatementBuilder buff = new StatementBuilder("UPDATE "); - buff.append(targetTableName).append(" SET "); + public void update(Row oldRow, Row newRow, SessionLocal session) { + ArrayList params = Utils.newSmallArrayList(); + StringBuilder builder = new StringBuilder("UPDATE ").append(targetTableName).append(" SET "); for (int i = 0; i < newRow.getColumnCount(); i++) { - buff.appendExceptFirst(", "); - buff.append(table.getColumn(i).getSQL()).append('='); + if (i > 0) { + builder.append(", "); + } + table.getColumn(i).getSQL(builder, sqlFlags).append('='); Value v = newRow.getValue(i); if (v == null) { - buff.append("DEFAULT"); + builder.append("DEFAULT"); } else { - buff.append('?'); + builder.append('?'); params.add(v); } } - buff.append(" WHERE "); - buff.resetCount(); + builder.append(" WHERE "); for (int i = 0; i < oldRow.getColumnCount(); i++) { Column col = table.getColumn(i); - buff.appendExceptFirst(" AND "); - buff.append(col.getSQL()); + if (i > 0) { + builder.append(" AND "); + } + col.getSQL(builder, sqlFlags); Value v = oldRow.getValue(i); if (isNull(v)) { - buff.append(" IS NULL"); + builder.append(" IS NULL"); } else { - buff.append('='); + builder.append('='); params.add(v); - addParameter(buff, col); + addParameter(builder, col); } } - String sql = buff.toString(); + String sql = builder.toString(); try { - link.execute(sql, params, true); + link.execute(sql, params, true, session); } catch (Exception e) { throw TableLink.wrapException(sql, e); } } @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return rowCount; } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return rowCount; } - @Override - public long getDiskSpaceUsed() { - return 0; - } } diff --git a/h2/src/main/org/h2/index/MetaCursor.java b/h2/src/main/org/h2/index/MetaCursor.java index bcc3efe35b..8932d016ca 100644 --- a/h2/src/main/org/h2/index/MetaCursor.java +++ b/h2/src/main/org/h2/index/MetaCursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -42,7 +42,7 @@ public boolean next() { @Override public boolean previous() { - throw DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/MetaIndex.java b/h2/src/main/org/h2/index/MetaIndex.java index f67cf4d0bf..86ee869899 100644 --- a/h2/src/main/org/h2/index/MetaIndex.java +++ b/h2/src/main/org/h2/index/MetaIndex.java @@ -1,12 +1,14 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; import java.util.ArrayList; -import org.h2.engine.Session; + +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; @@ -19,55 +21,56 @@ /** * The index implementation for meta data tables. */ -public class MetaIndex extends BaseIndex { +public class MetaIndex extends Index { private final MetaTable meta; private final boolean scan; public MetaIndex(MetaTable meta, IndexColumn[] columns, boolean scan) { - initBaseIndex(meta, 0, null, columns, IndexType.createNonUnique(true)); + super(meta, 0, null, columns, 0, IndexType.createNonUnique(true)); this.meta = meta; this.scan = scan; } @Override - public void close(Session session) { + public void close(SessionLocal session) { // nothing to do } @Override - public void add(Session session, Row row) { + public void add(SessionLocal session, Row row) { throw DbException.getUnsupportedException("META"); } @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { throw DbException.getUnsupportedException("META"); } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { ArrayList rows = meta.generateRows(session, first, last); return new MetaCursor(rows); } @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { + public double getCost(SessionLocal session, int[] masks, + TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { if (scan) { return 10 * MetaTable.ROW_COUNT_APPROXIMATION; } return getCostRangeIndex(masks, MetaTable.ROW_COUNT_APPROXIMATION, - filter, sortOrder); + filters, filter, sortOrder, false, allColumnsSet); } @Override - public void truncate(Session session) { + public void truncate(SessionLocal session) { throw DbException.getUnsupportedException("META"); } @Override - public void remove(Session session) { + public void remove(SessionLocal session) { throw DbException.getUnsupportedException("META"); } @@ -80,6 +83,14 @@ public int getColumnIndex(Column col) { return super.getColumnIndex(col); } + @Override + public boolean isFirstColumn(Column column) { + if (scan) { + return false; + } + return super.isFirstColumn(column); + } + @Override public void checkRename() { throw DbException.getUnsupportedException("META"); @@ -96,22 +107,12 @@ public String getCreateSQL() { } @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("META"); - } - - @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return MetaTable.ROW_COUNT_APPROXIMATION; } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return MetaTable.ROW_COUNT_APPROXIMATION; } diff --git a/h2/src/main/org/h2/index/MultiVersionCursor.java b/h2/src/main/org/h2/index/MultiVersionCursor.java deleted file mode 100644 index 32afb5e23a..0000000000 --- a/h2/src/main/org/h2/index/MultiVersionCursor.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.util.MathUtils; - -/** - * The cursor implementation for the multi-version index. - */ -public class MultiVersionCursor implements Cursor { - - private final MultiVersionIndex index; - private final Session session; - private final Cursor baseCursor, deltaCursor; - private final Object sync; - private SearchRow baseRow; - private Row deltaRow; - private boolean onBase; - private boolean end; - private boolean needNewDelta, needNewBase; - private boolean reverse; - - MultiVersionCursor(Session session, MultiVersionIndex index, Cursor base, - Cursor delta, Object sync) { - this.session = session; - this.index = index; - this.baseCursor = base; - this.deltaCursor = delta; - this.sync = sync; - needNewDelta = true; - needNewBase = true; - } - - /** - * Load the current row. - */ - void loadCurrent() { - synchronized (sync) { - baseRow = baseCursor.getSearchRow(); - deltaRow = deltaCursor.get(); - needNewDelta = false; - needNewBase = false; - } - } - - private void loadNext(boolean base) { - synchronized (sync) { - if (base) { - if (step(baseCursor)) { - baseRow = baseCursor.getSearchRow(); - } else { - baseRow = null; - } - } else { - if (step(deltaCursor)) { - deltaRow = deltaCursor.get(); - } else { - deltaRow = null; - } - } - } - } - - private boolean step(Cursor cursor) { - return reverse ? cursor.previous() : cursor.next(); - } - - @Override - public Row get() { - synchronized (sync) { - if (end) { - return null; - } - return onBase ? baseCursor.get() : deltaCursor.get(); - } - } - - @Override - public SearchRow getSearchRow() { - synchronized (sync) { - if (end) { - return null; - } - return onBase ? baseCursor.getSearchRow() : deltaCursor.getSearchRow(); - } - } - - @Override - public boolean next() { - synchronized (sync) { - if (SysProperties.CHECK && end) { - DbException.throwInternalError(); - } - while (true) { - if (needNewDelta) { - loadNext(false); - needNewDelta = false; - } - if (needNewBase) { - loadNext(true); - needNewBase = false; - } - if (deltaRow == null) { - if (baseRow == null) { - end = true; - return false; - } - onBase = true; - needNewBase = true; - return true; - } - int sessionId = deltaRow.getSessionId(); - boolean isThisSession = sessionId == session.getId(); - boolean isDeleted = deltaRow.isDeleted(); - if (isThisSession && isDeleted) { - needNewDelta = true; - continue; - } - if (baseRow == null) { - if (isDeleted) { - if (isThisSession) { - end = true; - return false; - } - // the row was deleted by another session: return it - onBase = false; - needNewDelta = true; - return true; - } - DbException.throwInternalError(); - } - int compare = index.compareRows(deltaRow, baseRow); - if (compare == 0) { - // can't use compareKeys because the - // version would be compared as well - long k1 = deltaRow.getKey(); - long k2 = baseRow.getKey(); - compare = MathUtils.compareLong(k1, k2); - } - if (compare == 0) { - if (isDeleted) { - if (isThisSession) { - DbException.throwInternalError(); - } - // another session updated the row - } else { - if (isThisSession) { - onBase = false; - needNewBase = true; - needNewDelta = true; - return true; - } - // another session inserted the row: ignore - needNewBase = true; - needNewDelta = true; - continue; - } - } - if (compare > 0) { - onBase = true; - needNewBase = true; - return true; - } - onBase = false; - needNewDelta = true; - return true; - } - } - } - - @Override - public boolean previous() { - reverse = true; - try { - return next(); - } finally { - reverse = false; - } - } - -} diff --git a/h2/src/main/org/h2/index/MultiVersionIndex.java b/h2/src/main/org/h2/index/MultiVersionIndex.java deleted file mode 100644 index b14658c004..0000000000 --- a/h2/src/main/org/h2/index/MultiVersionIndex.java +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.ArrayList; - -import org.h2.api.ErrorCode; -import org.h2.engine.Database; -import org.h2.engine.DbObject; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.schema.Schema; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.RegularTable; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * A multi-version index is a combination of a regular index, - * and a in-memory tree index that contains uncommitted changes. - * Uncommitted changes can include new rows, and deleted rows. - */ -public class MultiVersionIndex implements Index { - - private final Index base; - private final TreeIndex delta; - private final RegularTable table; - private final Object sync; - private final Column firstColumn; - - public MultiVersionIndex(Index base, RegularTable table) { - this.base = base; - this.table = table; - IndexType deltaIndexType = IndexType.createNonUnique(false); - if (base instanceof SpatialIndex) { - throw DbException.get(ErrorCode.FEATURE_NOT_SUPPORTED_1, - "MVCC & spatial index"); - } - this.delta = new TreeIndex(table, -1, "DELTA", base.getIndexColumns(), - deltaIndexType); - delta.setMultiVersion(true); - this.sync = base.getDatabase(); - this.firstColumn = base.getColumns()[0]; - } - - @Override - public void add(Session session, Row row) { - synchronized (sync) { - base.add(session, row); - if (removeIfExists(session, row)) { - // for example rolling back an delete operation - } else if (row.getSessionId() != 0) { - // don't insert rows that are added when creating an index - delta.add(session, row); - } - } - } - - @Override - public void close(Session session) { - synchronized (sync) { - base.close(session); - } - } - - @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - synchronized (sync) { - Cursor baseCursor = base.find(filter, first, last); - Cursor deltaCursor = delta.find(filter, first, last); - return new MultiVersionCursor(filter.getSession(), this, - baseCursor, deltaCursor, sync); - } - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - synchronized (sync) { - Cursor baseCursor = base.find(session, first, last); - Cursor deltaCursor = delta.find(session, first, last); - return new MultiVersionCursor(session, this, baseCursor, deltaCursor, sync); - } - } - - @Override - public Cursor findNext(Session session, SearchRow first, SearchRow last) { - throw DbException.throwInternalError(); - } - - @Override - public boolean canFindNext() { - // TODO possible, but more complicated - return false; - } - - @Override - public boolean canGetFirstOrLast() { - return base.canGetFirstOrLast() && delta.canGetFirstOrLast(); - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - if (first) { - // TODO optimization: this loops through NULL elements - Cursor cursor = find(session, null, null); - while (cursor.next()) { - SearchRow row = cursor.getSearchRow(); - Value v = row.getValue(firstColumn.getColumnId()); - if (v != ValueNull.INSTANCE) { - return cursor; - } - } - return cursor; - } - Cursor baseCursor = base.findFirstOrLast(session, false); - Cursor deltaCursor = delta.findFirstOrLast(session, false); - MultiVersionCursor cursor = new MultiVersionCursor(session, this, - baseCursor, deltaCursor, sync); - cursor.loadCurrent(); - // TODO optimization: this loops through NULL elements - while (cursor.previous()) { - SearchRow row = cursor.getSearchRow(); - if (row == null) { - break; - } - Value v = row.getValue(firstColumn.getColumnId()); - if (v != ValueNull.INSTANCE) { - return cursor; - } - } - return cursor; - } - - @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - return base.getCost(session, masks, filter, sortOrder); - } - - @Override - public boolean needRebuild() { - return base.needRebuild(); - } - - /** - * Check if there is an uncommitted row with the given key - * within a different session. - * - * @param session the original session - * @param row the row (only the key is checked) - * @return true if there is an uncommitted row - */ - public boolean isUncommittedFromOtherSession(Session session, Row row) { - Cursor c = delta.find(session, row, row); - while (c.next()) { - Row r = c.get(); - return r.getSessionId() != session.getId(); - } - return false; - } - - private boolean removeIfExists(Session session, Row row) { - // maybe it was inserted by the same session just before - Cursor c = delta.find(session, row, row); - while (c.next()) { - Row r = c.get(); - if (r.getKey() == row.getKey() && r.getVersion() == row.getVersion()) { - if (r != row && table.getScanIndex(session).compareRows(r, row) != 0) { - row.setVersion(r.getVersion() + 1); - } else { - delta.remove(session, r); - return true; - } - } - } - return false; - } - - @Override - public void remove(Session session, Row row) { - synchronized (sync) { - base.remove(session, row); - if (removeIfExists(session, row)) { - // added and deleted in the same transaction: no change - } else { - delta.add(session, row); - } - } - } - - @Override - public void remove(Session session) { - synchronized (sync) { - base.remove(session); - } - } - - @Override - public void truncate(Session session) { - synchronized (sync) { - delta.truncate(session); - base.truncate(session); - } - } - - @Override - public void commit(int operation, Row row) { - synchronized (sync) { - removeIfExists(null, row); - } - } - - @Override - public int compareRows(SearchRow rowData, SearchRow compare) { - return base.compareRows(rowData, compare); - } - - @Override - public int getColumnIndex(Column col) { - return base.getColumnIndex(col); - } - - @Override - public Column[] getColumns() { - return base.getColumns(); - } - - @Override - public IndexColumn[] getIndexColumns() { - return base.getIndexColumns(); - } - - @Override - public String getCreateSQL() { - return base.getCreateSQL(); - } - - @Override - public String getCreateSQLForCopy(Table forTable, String quotedName) { - return base.getCreateSQLForCopy(forTable, quotedName); - } - - @Override - public String getDropSQL() { - return base.getDropSQL(); - } - - @Override - public IndexType getIndexType() { - return base.getIndexType(); - } - - @Override - public String getPlanSQL() { - return base.getPlanSQL(); - } - - @Override - public long getRowCount(Session session) { - return base.getRowCount(session); - } - - @Override - public Table getTable() { - return base.getTable(); - } - - @Override - public int getType() { - return base.getType(); - } - - @Override - public void removeChildrenAndResources(Session session) { - synchronized (sync) { - table.removeIndex(this); - remove(session); - } - } - - @Override - public String getSQL() { - return base.getSQL(); - } - - @Override - public Schema getSchema() { - return base.getSchema(); - } - - @Override - public void checkRename() { - base.checkRename(); - } - - @Override - public ArrayList getChildren() { - return base.getChildren(); - } - - @Override - public String getComment() { - return base.getComment(); - } - - @Override - public Database getDatabase() { - return base.getDatabase(); - } - - @Override - public int getId() { - return base.getId(); - } - - @Override - public String getName() { - return base.getName(); - } - - @Override - public boolean isTemporary() { - return base.isTemporary(); - } - - @Override - public void rename(String newName) { - base.rename(newName); - } - - @Override - public void setComment(String comment) { - base.setComment(comment); - } - - @Override - public void setTemporary(boolean temporary) { - base.setTemporary(temporary); - } - - @Override - public long getRowCountApproximation() { - return base.getRowCountApproximation(); - } - - @Override - public long getDiskSpaceUsed() { - return base.getDiskSpaceUsed(); - } - - public Index getBaseIndex() { - return base; - } - - @Override - public Row getRow(Session session, long key) { - return base.getRow(session, key); - } - - @Override - public boolean isHidden() { - return base.isHidden(); - } - - @Override - public boolean isRowIdIndex() { - return base.isRowIdIndex() && delta.isRowIdIndex(); - } - - @Override - public boolean canScan() { - return base.canScan(); - } - - @Override - public void setSortedInsertMode(boolean sortedInsertMode) { - base.setSortedInsertMode(sortedInsertMode); - delta.setSortedInsertMode(sortedInsertMode); - } - -} diff --git a/h2/src/main/org/h2/index/NonUniqueHashCursor.java b/h2/src/main/org/h2/index/NonUniqueHashCursor.java deleted file mode 100644 index e88b25045e..0000000000 --- a/h2/src/main/org/h2/index/NonUniqueHashCursor.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.ArrayList; -import org.h2.engine.Session; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.table.RegularTable; - -/** - * Cursor implementation for non-unique hash index - * - * @author Sergi Vladykin - */ -public class NonUniqueHashCursor implements Cursor { - - private final Session session; - private final ArrayList positions; - private final RegularTable tableData; - - private int index = -1; - - public NonUniqueHashCursor(Session session, RegularTable tableData, - ArrayList positions) { - this.session = session; - this.tableData = tableData; - this.positions = positions; - } - - @Override - public Row get() { - if (index < 0 || index >= positions.size()) { - return null; - } - return tableData.getRow(session, positions.get(index)); - } - - @Override - public SearchRow getSearchRow() { - return get(); - } - - @Override - public boolean next() { - return positions != null && ++index < positions.size(); - } - - @Override - public boolean previous() { - return positions != null && --index >= 0; - } - -} diff --git a/h2/src/main/org/h2/index/NonUniqueHashIndex.java b/h2/src/main/org/h2/index/NonUniqueHashIndex.java deleted file mode 100644 index fa00a68dcd..0000000000 --- a/h2/src/main/org/h2/index/NonUniqueHashIndex.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.ArrayList; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.RegularTable; -import org.h2.table.TableFilter; -import org.h2.util.New; -import org.h2.util.ValueHashMap; -import org.h2.value.Value; - -/** - * A non-unique index based on an in-memory hash map. - * - * @author Sergi Vladykin - */ -public class NonUniqueHashIndex extends BaseIndex { - - /** - * The index of the indexed column. - */ - private final int indexColumn; - private ValueHashMap> rows; - private final RegularTable tableData; - private long rowCount; - - public NonUniqueHashIndex(RegularTable table, int id, String indexName, - IndexColumn[] columns, IndexType indexType) { - initBaseIndex(table, id, indexName, columns, indexType); - this.indexColumn = columns[0].column.getColumnId(); - this.tableData = table; - reset(); - } - - private void reset() { - rows = ValueHashMap.newInstance(); - rowCount = 0; - } - - @Override - public void truncate(Session session) { - reset(); - } - - @Override - public void add(Session session, Row row) { - Value key = row.getValue(indexColumn); - ArrayList positions = rows.get(key); - if (positions == null) { - positions = New.arrayList(); - rows.put(key, positions); - } - positions.add(row.getKey()); - rowCount++; - } - - @Override - public void remove(Session session, Row row) { - if (rowCount == 1) { - // last row in table - reset(); - } else { - Value key = row.getValue(indexColumn); - ArrayList positions = rows.get(key); - if (positions.size() == 1) { - // last row with such key - rows.remove(key); - } else { - positions.remove(row.getKey()); - } - rowCount--; - } - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - if (first == null || last == null) { - throw DbException.throwInternalError(); - } - if (first != last) { - if (compareKeys(first, last) != 0) { - throw DbException.throwInternalError(); - } - } - Value v = first.getValue(indexColumn); - /* - * Sometimes the incoming search is a similar, but not the same type - * e.g. the search value is INT, but the index column is LONG. In which - * case we need to convert, otherwise the ValueHashMap will not find the - * result. - */ - v = v.convertTo(tableData.getColumn(indexColumn).getType()); - ArrayList positions = rows.get(v); - return new NonUniqueHashCursor(session, tableData, positions); - } - - @Override - public long getRowCount(Session session) { - return rowCount; - } - - @Override - public long getRowCountApproximation() { - return rowCount; - } - - @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public void remove(Session session) { - // nothing to do - } - - @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - for (Column column : columns) { - int index = column.getColumnId(); - int mask = masks[index]; - if ((mask & IndexCondition.EQUALITY) != IndexCondition.EQUALITY) { - return Long.MAX_VALUE; - } - } - return 2; - } - - @Override - public void checkRename() { - // ok - } - - @Override - public boolean needRebuild() { - return true; - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("HASH"); - } - - @Override - public boolean canScan() { - return false; - } - -} diff --git a/h2/src/main/org/h2/index/PageBtree.java b/h2/src/main/org/h2/index/PageBtree.java deleted file mode 100644 index 0148e05c95..0000000000 --- a/h2/src/main/org/h2/index/PageBtree.java +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.result.SearchRow; -import org.h2.store.Data; -import org.h2.store.Page; - -/** - * A page that contains index data. - */ -public abstract class PageBtree extends Page { - - /** - * This is a root page. - */ - static final int ROOT = 0; - - /** - * Indicator that the row count is not known. - */ - static final int UNKNOWN_ROWCOUNT = -1; - - /** - * The index. - */ - protected final PageBtreeIndex index; - - /** - * The page number of the parent. - */ - protected int parentPageId; - - /** - * The data page. - */ - protected final Data data; - - /** - * The row offsets. - */ - protected int[] offsets; - - /** - * The number of entries. - */ - protected int entryCount; - - /** - * The index data - */ - protected SearchRow[] rows; - - /** - * The start of the data area. - */ - protected int start; - - /** - * If only the position of the row is stored in the page - */ - protected boolean onlyPosition; - - /** - * Whether the data page is up-to-date. - */ - protected boolean written; - - /** - * The estimated memory used by this object. - */ - private final int memoryEstimated; - - PageBtree(PageBtreeIndex index, int pageId, Data data) { - this.index = index; - this.data = data; - setPos(pageId); - memoryEstimated = index.getMemoryPerPage(); - } - - /** - * Get the real row count. If required, this will read all child pages. - * - * @return the row count - */ - abstract int getRowCount(); - - /** - * Set the stored row count. This will write the page. - * - * @param rowCount the stored row count - */ - abstract void setRowCountStored(int rowCount); - - /** - * Find an entry. - * - * @param compare the row - * @param bigger if looking for a larger row - * @param add if the row should be added (check for duplicate keys) - * @param compareKeys compare the row keys as well - * @return the index of the found row - */ - int find(SearchRow compare, boolean bigger, boolean add, boolean compareKeys) { - if (compare == null) { - return 0; - } - int l = 0, r = entryCount; - int comp = 1; - while (l < r) { - int i = (l + r) >>> 1; - SearchRow row = getRow(i); - comp = index.compareRows(row, compare); - if (comp == 0) { - if (add && index.indexType.isUnique()) { - if (!index.containsNullAndAllowMultipleNull(compare)) { - throw index.getDuplicateKeyException(compare.toString()); - } - } - if (compareKeys) { - comp = index.compareKeys(row, compare); - if (comp == 0) { - return i; - } - } - } - if (comp > 0 || (!bigger && comp == 0)) { - r = i; - } else { - l = i + 1; - } - } - return l; - } - - /** - * Add a row if possible. If it is possible this method returns -1, - * otherwise the split point. It is always possible to add one row. - * - * @param row the row to add - * @return the split point of this page, or -1 if no split is required - */ - abstract int addRowTry(SearchRow row); - - /** - * Find the first row. - * - * @param cursor the cursor - * @param first the row to find - * @param bigger if the row should be bigger - */ - abstract void find(PageBtreeCursor cursor, SearchRow first, boolean bigger); - - /** - * Find the last row. - * - * @param cursor the cursor - */ - abstract void last(PageBtreeCursor cursor); - - /** - * Get the row at this position. - * - * @param at the index - * @return the row - */ - SearchRow getRow(int at) { - SearchRow row = rows[at]; - if (row == null) { - row = index.readRow(data, offsets[at], onlyPosition, true); - memoryChange(); - rows[at] = row; - } else if (!index.hasData(row)) { - row = index.readRow(row.getKey()); - memoryChange(); - rows[at] = row; - } - return row; - } - - /** - * The memory usage of this page was changed. Propagate the change if - * needed. - */ - protected void memoryChange() { - // nothing to do - } - - /** - * Split the index page at the given point. - * - * @param splitPoint the index where to split - * @return the new page that contains about half the entries - */ - abstract PageBtree split(int splitPoint); - - /** - * Change the page id. - * - * @param id the new page id - */ - void setPageId(int id) { - changeCount = index.getPageStore().getChangeCount(); - written = false; - index.getPageStore().removeFromCache(getPos()); - setPos(id); - index.getPageStore().logUndo(this, null); - remapChildren(); - } - - /** - * Get the first child leaf page of a page. - * - * @return the page - */ - abstract PageBtreeLeaf getFirstLeaf(); - - /** - * Get the first child leaf page of a page. - * - * @return the page - */ - abstract PageBtreeLeaf getLastLeaf(); - - /** - * Change the parent page id. - * - * @param id the new parent page id - */ - void setParentPageId(int id) { - index.getPageStore().logUndo(this, data); - changeCount = index.getPageStore().getChangeCount(); - written = false; - parentPageId = id; - } - - /** - * Update the parent id of all children. - */ - abstract void remapChildren(); - - /** - * Remove a row. - * - * @param row the row to remove - * @return null if the last row didn't change, - * the deleted row if the page is now empty, - * otherwise the new last row of this page - */ - abstract SearchRow remove(SearchRow row); - - /** - * Free this page and all child pages. - */ - abstract void freeRecursive(); - - /** - * Ensure all rows are read in memory. - */ - protected void readAllRows() { - for (int i = 0; i < entryCount; i++) { - SearchRow row = rows[i]; - if (row == null) { - row = index.readRow(data, offsets[i], onlyPosition, false); - rows[i] = row; - } - } - } - - /** - * Get the estimated memory size. - * - * @return number of double words (4 bytes) - */ - @Override - public int getMemory() { - // need to always return the same value for the same object (otherwise - // the cache size would change after adding and then removing the same - // page from the cache) but index.getMemoryPerPage() can adopt according - // to how much memory a row needs on average - return memoryEstimated; - } - - @Override - public boolean canRemove() { - if (changeCount >= index.getPageStore().getChangeCount()) { - return false; - } - return true; - } - -} diff --git a/h2/src/main/org/h2/index/PageBtreeCursor.java b/h2/src/main/org/h2/index/PageBtreeCursor.java deleted file mode 100644 index c479106392..0000000000 --- a/h2/src/main/org/h2/index/PageBtreeCursor.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.result.Row; -import org.h2.result.SearchRow; - -/** - * The cursor implementation for the page b-tree index. - */ -public class PageBtreeCursor implements Cursor { - - private final Session session; - private final PageBtreeIndex index; - private final SearchRow last; - private PageBtreeLeaf current; - private int i; - private SearchRow currentSearchRow; - private Row currentRow; - - PageBtreeCursor(Session session, PageBtreeIndex index, SearchRow last) { - this.session = session; - this.index = index; - this.last = last; - } - - /** - * Set the position of the current row. - * - * @param current the leaf page - * @param i the index within the page - */ - void setCurrent(PageBtreeLeaf current, int i) { - this.current = current; - this.i = i; - } - - @Override - public Row get() { - if (currentRow == null && currentSearchRow != null) { - currentRow = index.getRow(session, currentSearchRow.getKey()); - } - return currentRow; - } - - @Override - public SearchRow getSearchRow() { - return currentSearchRow; - } - - @Override - public boolean next() { - if (current == null) { - return false; - } - if (i >= current.getEntryCount()) { - current.nextPage(this); - if (current == null) { - return false; - } - } - currentSearchRow = current.getRow(i); - currentRow = null; - if (last != null && index.compareRows(currentSearchRow, last) > 0) { - currentSearchRow = null; - return false; - } - i++; - return true; - } - - @Override - public boolean previous() { - if (current == null) { - return false; - } - if (i < 0) { - current.previousPage(this); - if (current == null) { - return false; - } - } - currentSearchRow = current.getRow(i); - currentRow = null; - i--; - return true; - } - -} diff --git a/h2/src/main/org/h2/index/PageBtreeIndex.java b/h2/src/main/org/h2/index/PageBtreeIndex.java deleted file mode 100644 index b86be49aa4..0000000000 --- a/h2/src/main/org/h2/index/PageBtreeIndex.java +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.RegularTable; -import org.h2.table.TableFilter; -import org.h2.util.MathUtils; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * This is the most common type of index, a b tree index. - * Only the data of the indexed columns are stored in the index. - */ -public class PageBtreeIndex extends PageIndex { - - private static int memoryChangeRequired; - - private final PageStore store; - private final RegularTable tableData; - private final boolean needRebuild; - private long rowCount; - private int memoryPerPage; - private int memoryCount; - - public PageBtreeIndex(RegularTable table, int id, String indexName, - IndexColumn[] columns, - IndexType indexType, boolean create, Session session) { - initBaseIndex(table, id, indexName, columns, indexType); - if (!database.isStarting() && create) { - checkIndexColumnTypes(columns); - } - // int test; - // trace.setLevel(TraceSystem.DEBUG); - tableData = table; - if (!database.isPersistent() || id < 0) { - throw DbException.throwInternalError("" + indexName); - } - this.store = database.getPageStore(); - store.addIndex(this); - if (create) { - // new index - rootPageId = store.allocatePage(); - // TODO currently the head position is stored in the log - // it should not for new tables, otherwise redo of other operations - // must ensure this page is not used for other things - store.addMeta(this, session); - PageBtreeLeaf root = PageBtreeLeaf.create(this, rootPageId, PageBtree.ROOT); - store.logUndo(root, null); - store.update(root); - } else { - rootPageId = store.getRootPageId(id); - PageBtree root = getPage(rootPageId); - rowCount = root.getRowCount(); - } - this.needRebuild = create || (rowCount == 0 && store.isRecoveryRunning()); - if (trace.isDebugEnabled()) { - trace.debug("opened {0} rows: {1}", getName() , rowCount); - } - memoryPerPage = (Constants.MEMORY_PAGE_BTREE + store.getPageSize()) >> 2; - } - - @Override - public void add(Session session, Row row) { - if (trace.isDebugEnabled()) { - trace.debug("{0} add {1}", getName(), row); - } - // safe memory - SearchRow newRow = getSearchRow(row); - try { - addRow(newRow); - } finally { - store.incrementChangeCount(); - } - } - - private void addRow(SearchRow newRow) { - while (true) { - PageBtree root = getPage(rootPageId); - int splitPoint = root.addRowTry(newRow); - if (splitPoint == -1) { - break; - } - if (trace.isDebugEnabled()) { - trace.debug("split {0}", splitPoint); - } - SearchRow pivot = root.getRow(splitPoint - 1); - store.logUndo(root, root.data); - PageBtree page1 = root; - PageBtree page2 = root.split(splitPoint); - store.logUndo(page2, null); - int id = store.allocatePage(); - page1.setPageId(id); - page1.setParentPageId(rootPageId); - page2.setParentPageId(rootPageId); - PageBtreeNode newRoot = PageBtreeNode.create( - this, rootPageId, PageBtree.ROOT); - store.logUndo(newRoot, null); - newRoot.init(page1, pivot, page2); - store.update(page1); - store.update(page2); - store.update(newRoot); - root = newRoot; - } - invalidateRowCount(); - rowCount++; - } - - /** - * Create a search row for this row. - * - * @param row the row - * @return the search row - */ - private SearchRow getSearchRow(Row row) { - SearchRow r = table.getTemplateSimpleRow(columns.length == 1); - r.setKeyAndVersion(row); - for (Column c : columns) { - int idx = c.getColumnId(); - r.setValue(idx, row.getValue(idx)); - } - return r; - } - - /** - * Read the given page. - * - * @param id the page id - * @return the page - */ - PageBtree getPage(int id) { - Page p = store.getPage(id); - if (p == null) { - PageBtreeLeaf empty = PageBtreeLeaf.create(this, id, PageBtree.ROOT); - // could have been created before, but never committed - store.logUndo(empty, null); - store.update(empty); - return empty; - } else if (!(p instanceof PageBtree)) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "" + p); - } - return (PageBtree) p; - } - - @Override - public boolean canGetFirstOrLast() { - return true; - } - - @Override - public Cursor findNext(Session session, SearchRow first, SearchRow last) { - return find(session, first, true, last); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return find(session, first, false, last); - } - - private Cursor find(Session session, SearchRow first, boolean bigger, - SearchRow last) { - if (SysProperties.CHECK && store == null) { - throw DbException.get(ErrorCode.OBJECT_CLOSED); - } - PageBtree root = getPage(rootPageId); - PageBtreeCursor cursor = new PageBtreeCursor(session, this, last); - root.find(cursor, first, bigger); - return cursor; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - if (first) { - // TODO optimization: this loops through NULL elements - Cursor cursor = find(session, null, false, null); - while (cursor.next()) { - SearchRow row = cursor.getSearchRow(); - Value v = row.getValue(columnIds[0]); - if (v != ValueNull.INSTANCE) { - return cursor; - } - } - return cursor; - } - PageBtree root = getPage(rootPageId); - PageBtreeCursor cursor = new PageBtreeCursor(session, this, null); - root.last(cursor); - cursor.previous(); - // TODO optimization: this loops through NULL elements - do { - SearchRow row = cursor.getSearchRow(); - if (row == null) { - break; - } - Value v = row.getValue(columnIds[0]); - if (v != ValueNull.INSTANCE) { - return cursor; - } - } while (cursor.previous()); - return cursor; - } - - @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - return 10 * getCostRangeIndex(masks, tableData.getRowCount(session), - filter, sortOrder); - } - - @Override - public boolean needRebuild() { - return needRebuild; - } - - @Override - public void remove(Session session, Row row) { - if (trace.isDebugEnabled()) { - trace.debug("{0} remove {1}", getName(), row); - } - // TODO invalidate row count - // setChanged(session); - if (rowCount == 1) { - removeAllRows(); - } else { - try { - PageBtree root = getPage(rootPageId); - root.remove(row); - invalidateRowCount(); - rowCount--; - } finally { - store.incrementChangeCount(); - } - } - } - - @Override - public void remove(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("remove"); - } - removeAllRows(); - store.free(rootPageId); - store.removeMeta(this, session); - } - - @Override - public void truncate(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("truncate"); - } - removeAllRows(); - if (tableData.getContainsLargeObject()) { - database.getLobStorage().removeAllForTable(table.getId()); - } - tableData.setRowCount(0); - } - - private void removeAllRows() { - try { - PageBtree root = getPage(rootPageId); - root.freeRecursive(); - root = PageBtreeLeaf.create(this, rootPageId, PageBtree.ROOT); - store.removeFromCache(rootPageId); - store.update(root); - rowCount = 0; - } finally { - store.incrementChangeCount(); - } - } - - @Override - public void checkRename() { - // ok - } - - /** - * Get a row from the main index. - * - * @param session the session - * @param key the row key - * @return the row - */ - @Override - public Row getRow(Session session, long key) { - return tableData.getRow(session, key); - } - - PageStore getPageStore() { - return store; - } - - @Override - public long getRowCountApproximation() { - return tableData.getRowCountApproximation(); - } - - @Override - public long getDiskSpaceUsed() { - return tableData.getDiskSpaceUsed(); - } - - @Override - public long getRowCount(Session session) { - return rowCount; - } - - @Override - public void close(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("close"); - } - // can not close the index because it might get used afterwards, - // for example after running recovery - try { - writeRowCount(); - } finally { - store.incrementChangeCount(); - } - } - - /** - * Read a row from the data page at the given offset. - * - * @param data the data - * @param offset the offset - * @param onlyPosition whether only the position of the row is stored - * @param needData whether the row data is required - * @return the row - */ - SearchRow readRow(Data data, int offset, boolean onlyPosition, - boolean needData) { - synchronized (data) { - data.setPos(offset); - long key = data.readVarLong(); - if (onlyPosition) { - if (needData) { - return tableData.getRow(null, key); - } - SearchRow row = table.getTemplateSimpleRow(true); - row.setKey(key); - return row; - } - SearchRow row = table.getTemplateSimpleRow(columns.length == 1); - row.setKey(key); - for (Column col : columns) { - int idx = col.getColumnId(); - row.setValue(idx, data.readValue()); - } - return row; - } - } - - /** - * Get the complete row from the data index. - * - * @param key the key - * @return the row - */ - SearchRow readRow(long key) { - return tableData.getRow(null, key); - } - - /** - * Write a row to the data page at the given offset. - * - * @param data the data - * @param offset the offset - * @param onlyPosition whether only the position of the row is stored - * @param row the row to write - */ - void writeRow(Data data, int offset, SearchRow row, boolean onlyPosition) { - data.setPos(offset); - data.writeVarLong(row.getKey()); - if (!onlyPosition) { - for (Column col : columns) { - int idx = col.getColumnId(); - data.writeValue(row.getValue(idx)); - } - } - } - - /** - * Get the size of a row (only the part that is stored in the index). - * - * @param dummy a dummy data page to calculate the size - * @param row the row - * @param onlyPosition whether only the position of the row is stored - * @return the number of bytes - */ - int getRowSize(Data dummy, SearchRow row, boolean onlyPosition) { - int rowsize = Data.getVarLongLen(row.getKey()); - if (!onlyPosition) { - for (Column col : columns) { - Value v = row.getValue(col.getColumnId()); - rowsize += dummy.getValueLen(v); - } - } - return rowsize; - } - - @Override - public boolean canFindNext() { - return true; - } - - /** - * The root page has changed. - * - * @param session the session - * @param newPos the new position - */ - void setRootPageId(Session session, int newPos) { - store.removeMeta(this, session); - this.rootPageId = newPos; - store.addMeta(this, session); - store.addIndex(this); - } - - private void invalidateRowCount() { - PageBtree root = getPage(rootPageId); - root.setRowCountStored(PageData.UNKNOWN_ROWCOUNT); - } - - @Override - public void writeRowCount() { - if (SysProperties.MODIFY_ON_WRITE && rootPageId == 0) { - // currently creating the index - return; - } - PageBtree root = getPage(rootPageId); - root.setRowCountStored(MathUtils.convertLongToInt(rowCount)); - } - - /** - * Check whether the given row contains data. - * - * @param row the row - * @return true if it contains data - */ - boolean hasData(SearchRow row) { - return row.getValue(columns[0].getColumnId()) != null; - } - - int getMemoryPerPage() { - return memoryPerPage; - } - - /** - * The memory usage of a page was changed. The new value is used to adopt - * the average estimated memory size of a page. - * - * @param x the new memory size - */ - void memoryChange(int x) { - if (memoryCount < Constants.MEMORY_FACTOR) { - memoryPerPage += (x - memoryPerPage) / ++memoryCount; - } else { - memoryPerPage += (x > memoryPerPage ? 1 : -1) + - ((x - memoryPerPage) / Constants.MEMORY_FACTOR); - } - } - - /** - * Check if calculating the memory is required. - * - * @return true if it is - */ - static boolean isMemoryChangeRequired() { - if (memoryChangeRequired-- <= 0) { - memoryChangeRequired = 10; - return true; - } - return false; - } - -} diff --git a/h2/src/main/org/h2/index/PageBtreeLeaf.java b/h2/src/main/org/h2/index/PageBtreeLeaf.java deleted file mode 100644 index 5b414ff409..0000000000 --- a/h2/src/main/org/h2/index/PageBtreeLeaf.java +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.Arrays; - -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.SearchRow; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; - -/** - * A b-tree leaf page that contains index data. Format: - *
      - *
    • page type: byte
    • - *
    • checksum: short
    • - *
    • parent page id (0 for root): int
    • - *
    • index id: varInt
    • - *
    • entry count: short
    • - *
    • list of offsets: short
    • - *
    • data (key: varLong, value,...)
    • - *
    - */ -public class PageBtreeLeaf extends PageBtree { - - private static final int OFFSET_LENGTH = 2; - - private final boolean optimizeUpdate; - private boolean writtenData; - - private PageBtreeLeaf(PageBtreeIndex index, int pageId, Data data) { - super(index, pageId, data); - this.optimizeUpdate = index.getDatabase().getSettings().optimizeUpdate; - } - - /** - * Read a b-tree leaf page. - * - * @param index the index - * @param data the data - * @param pageId the page id - * @return the page - */ - public static Page read(PageBtreeIndex index, Data data, int pageId) { - PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, data); - p.read(); - return p; - } - - /** - * Create a new page. - * - * @param index the index - * @param pageId the page id - * @param parentPageId the parent - * @return the page - */ - static PageBtreeLeaf create(PageBtreeIndex index, int pageId, - int parentPageId) { - PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, index.getPageStore() - .createData()); - index.getPageStore().logUndo(p, null); - p.rows = SearchRow.EMPTY_ARRAY; - p.parentPageId = parentPageId; - p.writeHead(); - p.start = p.data.length(); - return p; - } - - private void read() { - data.reset(); - int type = data.readByte(); - data.readShortInt(); - this.parentPageId = data.readInt(); - onlyPosition = (type & Page.FLAG_LAST) == 0; - int indexId = data.readVarInt(); - if (indexId != index.getId()) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "page:" + getPos() + " expected index:" + index.getId() + - "got:" + indexId); - } - entryCount = data.readShortInt(); - offsets = new int[entryCount]; - rows = new SearchRow[entryCount]; - for (int i = 0; i < entryCount; i++) { - offsets[i] = data.readShortInt(); - } - start = data.length(); - written = true; - writtenData = true; - } - - @Override - int addRowTry(SearchRow row) { - int x = addRow(row, true); - memoryChange(); - return x; - } - - private int addRow(SearchRow row, boolean tryOnly) { - int rowLength = index.getRowSize(data, row, onlyPosition); - int pageSize = index.getPageStore().getPageSize(); - int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - if (last - rowLength < start + OFFSET_LENGTH) { - if (tryOnly && entryCount > 1) { - int x = find(row, false, true, true); - if (entryCount < 5) { - // required, otherwise the index doesn't work correctly - return entryCount / 2; - } - // split near the insertion point to better fill pages - // split in half would be: - // return entryCount / 2; - int third = entryCount / 3; - return x < third ? third : x >= 2 * third ? 2 * third : x; - } - readAllRows(); - writtenData = false; - onlyPosition = true; - // change the offsets (now storing only positions) - int o = pageSize; - for (int i = 0; i < entryCount; i++) { - o -= index.getRowSize(data, getRow(i), true); - offsets[i] = o; - } - last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - rowLength = index.getRowSize(data, row, true); - if (SysProperties.CHECK && last - rowLength < start + OFFSET_LENGTH) { - throw DbException.throwInternalError(); - } - } - index.getPageStore().logUndo(this, data); - if (!optimizeUpdate) { - readAllRows(); - } - changeCount = index.getPageStore().getChangeCount(); - written = false; - int x; - if (entryCount == 0) { - x = 0; - } else { - x = find(row, false, true, true); - } - start += OFFSET_LENGTH; - int offset = (x == 0 ? pageSize : offsets[x - 1]) - rowLength; - if (optimizeUpdate && writtenData) { - if (entryCount > 0) { - byte[] d = data.getBytes(); - int dataStart = offsets[entryCount - 1]; - int dataEnd = offset; - System.arraycopy(d, dataStart, d, dataStart - rowLength, - dataEnd - dataStart + rowLength); - } - index.writeRow(data, offset, row, onlyPosition); - } - offsets = insert(offsets, entryCount, x, offset); - add(offsets, x + 1, entryCount + 1, -rowLength); - rows = insert(rows, entryCount, x, row); - entryCount++; - index.getPageStore().update(this); - return -1; - } - - private void removeRow(int at) { - if (!optimizeUpdate) { - readAllRows(); - } - index.getPageStore().logUndo(this, data); - entryCount--; - written = false; - changeCount = index.getPageStore().getChangeCount(); - if (entryCount <= 0) { - DbException.throwInternalError(); - } - int startNext = at > 0 ? offsets[at - 1] : index.getPageStore().getPageSize(); - int rowLength = startNext - offsets[at]; - start -= OFFSET_LENGTH; - - if (optimizeUpdate) { - if (writtenData) { - byte[] d = data.getBytes(); - int dataStart = offsets[entryCount]; - System.arraycopy(d, dataStart, d, - dataStart + rowLength, offsets[at] - dataStart); - Arrays.fill(d, dataStart, dataStart + rowLength, (byte) 0); - } - } - - offsets = remove(offsets, entryCount + 1, at); - add(offsets, at, entryCount, rowLength); - rows = remove(rows, entryCount + 1, at); - } - - int getEntryCount() { - return entryCount; - } - - @Override - PageBtree split(int splitPoint) { - int newPageId = index.getPageStore().allocatePage(); - PageBtreeLeaf p2 = PageBtreeLeaf.create(index, newPageId, parentPageId); - for (int i = splitPoint; i < entryCount;) { - p2.addRow(getRow(splitPoint), false); - removeRow(splitPoint); - } - memoryChange(); - p2.memoryChange(); - return p2; - } - - @Override - PageBtreeLeaf getFirstLeaf() { - return this; - } - - @Override - PageBtreeLeaf getLastLeaf() { - return this; - } - - @Override - SearchRow remove(SearchRow row) { - int at = find(row, false, false, true); - SearchRow delete = getRow(at); - if (index.compareRows(row, delete) != 0 || delete.getKey() != row.getKey()) { - throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, - index.getSQL() + ": " + row); - } - index.getPageStore().logUndo(this, data); - if (entryCount == 1) { - // the page is now empty - return row; - } - removeRow(at); - memoryChange(); - index.getPageStore().update(this); - if (at == entryCount) { - // the last row changed - return getRow(at - 1); - } - // the last row didn't change - return null; - } - - @Override - void freeRecursive() { - index.getPageStore().logUndo(this, data); - index.getPageStore().free(getPos()); - } - - @Override - int getRowCount() { - return entryCount; - } - - @Override - void setRowCountStored(int rowCount) { - // ignore - } - - @Override - public void write() { - writeData(); - index.getPageStore().writePage(getPos(), data); - } - - private void writeHead() { - data.reset(); - data.writeByte((byte) (Page.TYPE_BTREE_LEAF | - (onlyPosition ? 0 : Page.FLAG_LAST))); - data.writeShortInt(0); - data.writeInt(parentPageId); - data.writeVarInt(index.getId()); - data.writeShortInt(entryCount); - } - - private void writeData() { - if (written) { - return; - } - if (!optimizeUpdate) { - readAllRows(); - } - writeHead(); - for (int i = 0; i < entryCount; i++) { - data.writeShortInt(offsets[i]); - } - if (!writtenData || !optimizeUpdate) { - for (int i = 0; i < entryCount; i++) { - index.writeRow(data, offsets[i], rows[i], onlyPosition); - } - writtenData = true; - } - written = true; - memoryChange(); - } - - @Override - void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) { - int i = find(first, bigger, false, false); - if (i > entryCount) { - if (parentPageId == PageBtree.ROOT) { - return; - } - PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId); - next.find(cursor, first, bigger); - return; - } - cursor.setCurrent(this, i); - } - - @Override - void last(PageBtreeCursor cursor) { - cursor.setCurrent(this, entryCount - 1); - } - - @Override - void remapChildren() { - // nothing to do - } - - /** - * Set the cursor to the first row of the next page. - * - * @param cursor the cursor - */ - void nextPage(PageBtreeCursor cursor) { - if (parentPageId == PageBtree.ROOT) { - cursor.setCurrent(null, 0); - return; - } - PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId); - next.nextPage(cursor, getPos()); - } - - /** - * Set the cursor to the last row of the previous page. - * - * @param cursor the cursor - */ - void previousPage(PageBtreeCursor cursor) { - if (parentPageId == PageBtree.ROOT) { - cursor.setCurrent(null, 0); - return; - } - PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId); - next.previousPage(cursor, getPos()); - } - - @Override - public String toString() { - return "page[" + getPos() + "] b-tree leaf table:" + - index.getId() + " entries:" + entryCount; - } - - @Override - public void moveTo(Session session, int newPos) { - PageStore store = index.getPageStore(); - readAllRows(); - PageBtreeLeaf p2 = PageBtreeLeaf.create(index, newPos, parentPageId); - store.logUndo(this, data); - store.logUndo(p2, null); - p2.rows = rows; - p2.entryCount = entryCount; - p2.offsets = offsets; - p2.onlyPosition = onlyPosition; - p2.parentPageId = parentPageId; - p2.start = start; - store.update(p2); - if (parentPageId == ROOT) { - index.setRootPageId(session, newPos); - } else { - PageBtreeNode p = (PageBtreeNode) store.getPage(parentPageId); - p.moveChild(getPos(), newPos); - } - store.free(getPos()); - } - - @Override - protected void memoryChange() { - if (!PageBtreeIndex.isMemoryChangeRequired()) { - return; - } - int memory = Constants.MEMORY_PAGE_BTREE + index.getPageStore().getPageSize(); - if (rows != null) { - memory += getEntryCount() * (4 + Constants.MEMORY_POINTER); - for (int i = 0; i < entryCount; i++) { - SearchRow r = rows[i]; - if (r != null) { - memory += r.getMemory(); - } - } - } - index.memoryChange(memory >> 2); - } - -} diff --git a/h2/src/main/org/h2/index/PageBtreeNode.java b/h2/src/main/org/h2/index/PageBtreeNode.java deleted file mode 100644 index c24e987203..0000000000 --- a/h2/src/main/org/h2/index/PageBtreeNode.java +++ /dev/null @@ -1,610 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.api.DatabaseEventListener; -import org.h2.api.ErrorCode; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.SearchRow; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; -import org.h2.util.Utils; - -/** - * A b-tree node page that contains index data. Format: - *
      - *
    • page type: byte
    • - *
    • checksum: short
    • - *
    • parent page id (0 for root): int
    • - *
    • index id: varInt
    • - *
    • count of all children (-1 if not known): int
    • - *
    • entry count: short
    • - *
    • rightmost child page id: int
    • - *
    • entries (child page id: int, offset: short)
    • - *
    - * The row contains the largest key of the respective child, - * meaning row[0] contains the largest key of child[0]. - */ -public class PageBtreeNode extends PageBtree { - - private static final int CHILD_OFFSET_PAIR_LENGTH = 6; - private static final int MAX_KEY_LENGTH = 10; - - private final boolean pageStoreInternalCount; - - /** - * The page ids of the children. - */ - private int[] childPageIds; - - private int rowCountStored = UNKNOWN_ROWCOUNT; - - private int rowCount = UNKNOWN_ROWCOUNT; - - private PageBtreeNode(PageBtreeIndex index, int pageId, Data data) { - super(index, pageId, data); - this.pageStoreInternalCount = index.getDatabase(). - getSettings().pageStoreInternalCount; - } - - /** - * Read a b-tree node page. - * - * @param index the index - * @param data the data - * @param pageId the page id - * @return the page - */ - public static Page read(PageBtreeIndex index, Data data, int pageId) { - PageBtreeNode p = new PageBtreeNode(index, pageId, data); - p.read(); - return p; - } - - /** - * Create a new b-tree node page. - * - * @param index the index - * @param pageId the page id - * @param parentPageId the parent page id - * @return the page - */ - static PageBtreeNode create(PageBtreeIndex index, int pageId, - int parentPageId) { - PageBtreeNode p = new PageBtreeNode(index, pageId, index.getPageStore() - .createData()); - index.getPageStore().logUndo(p, null); - p.parentPageId = parentPageId; - p.writeHead(); - // 4 bytes for the rightmost child page id - p.start = p.data.length() + 4; - p.rows = SearchRow.EMPTY_ARRAY; - if (p.pageStoreInternalCount) { - p.rowCount = 0; - } - return p; - } - - private void read() { - data.reset(); - int type = data.readByte(); - data.readShortInt(); - this.parentPageId = data.readInt(); - onlyPosition = (type & Page.FLAG_LAST) == 0; - int indexId = data.readVarInt(); - if (indexId != index.getId()) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "page:" + getPos() + " expected index:" + index.getId() + - "got:" + indexId); - } - rowCount = rowCountStored = data.readInt(); - entryCount = data.readShortInt(); - childPageIds = new int[entryCount + 1]; - childPageIds[entryCount] = data.readInt(); - rows = entryCount == 0 ? SearchRow.EMPTY_ARRAY : new SearchRow[entryCount]; - offsets = Utils.newIntArray(entryCount); - for (int i = 0; i < entryCount; i++) { - childPageIds[i] = data.readInt(); - offsets[i] = data.readShortInt(); - } - check(); - start = data.length(); - written = true; - } - - /** - * Add a row. If it is possible this method returns -1, otherwise - * the split point. It is always possible to add two rows. - * - * @param row the now to add - * @return the split point of this page, or -1 if no split is required - */ - private int addChildTry(SearchRow row) { - if (entryCount < 4) { - return -1; - } - int startData; - if (onlyPosition) { - // if we only store the position, we may at most store as many - // entries as there is space for keys, because the current data area - // might get larger when _removing_ a child (if the new key needs - // more space) - and removing a child can't split this page - startData = entryCount + 1 * MAX_KEY_LENGTH; - } else { - int rowLength = index.getRowSize(data, row, onlyPosition); - int pageSize = index.getPageStore().getPageSize(); - int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - startData = last - rowLength; - } - if (startData < start + CHILD_OFFSET_PAIR_LENGTH) { - return entryCount / 2; - } - return -1; - } - - /** - * Add a child at the given position. - * - * @param x the position - * @param childPageId the child - * @param row the row smaller than the first row of the child and its - * children - */ - private void addChild(int x, int childPageId, SearchRow row) { - int rowLength = index.getRowSize(data, row, onlyPosition); - int pageSize = index.getPageStore().getPageSize(); - int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - if (last - rowLength < start + CHILD_OFFSET_PAIR_LENGTH) { - readAllRows(); - onlyPosition = true; - // change the offsets (now storing only positions) - int o = pageSize; - for (int i = 0; i < entryCount; i++) { - o -= index.getRowSize(data, getRow(i), true); - offsets[i] = o; - } - last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - rowLength = index.getRowSize(data, row, true); - if (SysProperties.CHECK && last - rowLength < - start + CHILD_OFFSET_PAIR_LENGTH) { - throw DbException.throwInternalError(); - } - } - int offset = last - rowLength; - if (entryCount > 0) { - if (x < entryCount) { - offset = (x == 0 ? pageSize : offsets[x - 1]) - rowLength; - } - } - rows = insert(rows, entryCount, x, row); - offsets = insert(offsets, entryCount, x, offset); - add(offsets, x + 1, entryCount + 1, -rowLength); - childPageIds = insert(childPageIds, entryCount + 1, x + 1, childPageId); - start += CHILD_OFFSET_PAIR_LENGTH; - if (pageStoreInternalCount) { - if (rowCount != UNKNOWN_ROWCOUNT) { - rowCount += offset; - } - } - entryCount++; - written = false; - changeCount = index.getPageStore().getChangeCount(); - } - - @Override - int addRowTry(SearchRow row) { - while (true) { - int x = find(row, false, true, true); - PageBtree page = index.getPage(childPageIds[x]); - int splitPoint = page.addRowTry(row); - if (splitPoint == -1) { - break; - } - SearchRow pivot = page.getRow(splitPoint - 1); - index.getPageStore().logUndo(this, data); - int splitPoint2 = addChildTry(pivot); - if (splitPoint2 != -1) { - return splitPoint2; - } - PageBtree page2 = page.split(splitPoint); - readAllRows(); - addChild(x, page2.getPos(), pivot); - index.getPageStore().update(page); - index.getPageStore().update(page2); - index.getPageStore().update(this); - } - updateRowCount(1); - written = false; - changeCount = index.getPageStore().getChangeCount(); - return -1; - } - - private void updateRowCount(int offset) { - if (rowCount != UNKNOWN_ROWCOUNT) { - rowCount += offset; - } - if (rowCountStored != UNKNOWN_ROWCOUNT) { - rowCountStored = UNKNOWN_ROWCOUNT; - index.getPageStore().logUndo(this, data); - if (written) { - writeHead(); - } - index.getPageStore().update(this); - } - } - - @Override - PageBtree split(int splitPoint) { - int newPageId = index.getPageStore().allocatePage(); - PageBtreeNode p2 = PageBtreeNode.create(index, newPageId, parentPageId); - index.getPageStore().logUndo(this, data); - if (onlyPosition) { - // TODO optimize: maybe not required - p2.onlyPosition = true; - } - int firstChild = childPageIds[splitPoint]; - readAllRows(); - for (int i = splitPoint; i < entryCount;) { - p2.addChild(p2.entryCount, childPageIds[splitPoint + 1], getRow(splitPoint)); - removeChild(splitPoint); - } - int lastChild = childPageIds[splitPoint - 1]; - removeChild(splitPoint - 1); - childPageIds[splitPoint - 1] = lastChild; - if (p2.childPageIds == null) { - p2.childPageIds = new int[1]; - } - p2.childPageIds[0] = firstChild; - p2.remapChildren(); - return p2; - } - - @Override - protected void remapChildren() { - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageBtree p = index.getPage(child); - p.setParentPageId(getPos()); - index.getPageStore().update(p); - } - } - - /** - * Initialize the page. - * - * @param page1 the first child page - * @param pivot the pivot key - * @param page2 the last child page - */ - void init(PageBtree page1, SearchRow pivot, PageBtree page2) { - entryCount = 0; - childPageIds = new int[] { page1.getPos() }; - rows = SearchRow.EMPTY_ARRAY; - offsets = Utils.EMPTY_INT_ARRAY; - addChild(0, page2.getPos(), pivot); - if (pageStoreInternalCount) { - rowCount = page1.getRowCount() + page2.getRowCount(); - } - check(); - } - - @Override - void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) { - int i = find(first, bigger, false, false); - if (i > entryCount) { - if (parentPageId == PageBtree.ROOT) { - return; - } - PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId); - next.find(cursor, first, bigger); - return; - } - PageBtree page = index.getPage(childPageIds[i]); - page.find(cursor, first, bigger); - } - - @Override - void last(PageBtreeCursor cursor) { - int child = childPageIds[entryCount]; - index.getPage(child).last(cursor); - } - - @Override - PageBtreeLeaf getFirstLeaf() { - int child = childPageIds[0]; - return index.getPage(child).getFirstLeaf(); - } - - @Override - PageBtreeLeaf getLastLeaf() { - int child = childPageIds[entryCount]; - return index.getPage(child).getLastLeaf(); - } - - @Override - SearchRow remove(SearchRow row) { - int at = find(row, false, false, true); - // merge is not implemented to allow concurrent usage - // TODO maybe implement merge - PageBtree page = index.getPage(childPageIds[at]); - SearchRow last = page.remove(row); - index.getPageStore().logUndo(this, data); - updateRowCount(-1); - written = false; - changeCount = index.getPageStore().getChangeCount(); - if (last == null) { - // the last row didn't change - nothing to do - return null; - } else if (last == row) { - // this child is now empty - index.getPageStore().free(page.getPos()); - if (entryCount < 1) { - // no more children - this page is empty as well - return row; - } - if (at == entryCount) { - // removing the last child - last = getRow(at - 1); - } else { - last = null; - } - removeChild(at); - index.getPageStore().update(this); - return last; - } - // the last row is in the last child - if (at == entryCount) { - return last; - } - int child = childPageIds[at]; - removeChild(at); - // TODO this can mean only the position is now stored - // should split at the next possible moment - addChild(at, child, last); - // remove and add swapped two children, fix that - int temp = childPageIds[at]; - childPageIds[at] = childPageIds[at + 1]; - childPageIds[at + 1] = temp; - index.getPageStore().update(this); - return null; - } - - @Override - int getRowCount() { - if (rowCount == UNKNOWN_ROWCOUNT) { - int count = 0; - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageBtree page = index.getPage(child); - count += page.getRowCount(); - index.getDatabase().setProgress( - DatabaseEventListener.STATE_SCAN_FILE, - index.getName(), count, Integer.MAX_VALUE); - } - rowCount = count; - } - return rowCount; - } - - @Override - void setRowCountStored(int rowCount) { - if (rowCount < 0 && pageStoreInternalCount) { - return; - } - this.rowCount = rowCount; - if (rowCountStored != rowCount) { - rowCountStored = rowCount; - index.getPageStore().logUndo(this, data); - if (written) { - changeCount = index.getPageStore().getChangeCount(); - writeHead(); - } - index.getPageStore().update(this); - } - } - - private void check() { - if (SysProperties.CHECK) { - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - if (child == 0) { - DbException.throwInternalError(); - } - } - } - } - - @Override - public void write() { - check(); - writeData(); - index.getPageStore().writePage(getPos(), data); - } - - private void writeHead() { - data.reset(); - data.writeByte((byte) (Page.TYPE_BTREE_NODE | - (onlyPosition ? 0 : Page.FLAG_LAST))); - data.writeShortInt(0); - data.writeInt(parentPageId); - data.writeVarInt(index.getId()); - data.writeInt(rowCountStored); - data.writeShortInt(entryCount); - } - - private void writeData() { - if (written) { - return; - } - readAllRows(); - writeHead(); - data.writeInt(childPageIds[entryCount]); - for (int i = 0; i < entryCount; i++) { - data.writeInt(childPageIds[i]); - data.writeShortInt(offsets[i]); - } - for (int i = 0; i < entryCount; i++) { - index.writeRow(data, offsets[i], rows[i], onlyPosition); - } - written = true; - } - - @Override - void freeRecursive() { - index.getPageStore().logUndo(this, data); - index.getPageStore().free(getPos()); - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - index.getPage(child).freeRecursive(); - } - } - - private void removeChild(int i) { - readAllRows(); - entryCount--; - if (pageStoreInternalCount) { - updateRowCount(-index.getPage(childPageIds[i]).getRowCount()); - } - written = false; - changeCount = index.getPageStore().getChangeCount(); - if (entryCount < 0) { - DbException.throwInternalError(); - } - if (entryCount > i) { - int startNext = i > 0 ? offsets[i - 1] : index.getPageStore().getPageSize(); - int rowLength = startNext - offsets[i]; - add(offsets, i, entryCount + 1, rowLength); - } - rows = remove(rows, entryCount + 1, i); - offsets = remove(offsets, entryCount + 1, i); - childPageIds = remove(childPageIds, entryCount + 2, i); - start -= CHILD_OFFSET_PAIR_LENGTH; - } - - /** - * Set the cursor to the first row of the next page. - * - * @param cursor the cursor - * @param pageId id of the next page - */ - void nextPage(PageBtreeCursor cursor, int pageId) { - int i; - // TODO maybe keep the index in the child page (transiently) - for (i = 0; i < entryCount + 1; i++) { - if (childPageIds[i] == pageId) { - i++; - break; - } - } - if (i > entryCount) { - if (parentPageId == PageBtree.ROOT) { - cursor.setCurrent(null, 0); - return; - } - PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId); - next.nextPage(cursor, getPos()); - return; - } - PageBtree page = index.getPage(childPageIds[i]); - PageBtreeLeaf leaf = page.getFirstLeaf(); - cursor.setCurrent(leaf, 0); - } - - /** - * Set the cursor to the last row of the previous page. - * - * @param cursor the cursor - * @param pageId id of the previous page - */ - void previousPage(PageBtreeCursor cursor, int pageId) { - int i; - // TODO maybe keep the index in the child page (transiently) - for (i = entryCount; i >= 0; i--) { - if (childPageIds[i] == pageId) { - i--; - break; - } - } - if (i < 0) { - if (parentPageId == PageBtree.ROOT) { - cursor.setCurrent(null, 0); - return; - } - PageBtreeNode previous = (PageBtreeNode) index.getPage(parentPageId); - previous.previousPage(cursor, getPos()); - return; - } - PageBtree page = index.getPage(childPageIds[i]); - PageBtreeLeaf leaf = page.getLastLeaf(); - cursor.setCurrent(leaf, leaf.entryCount - 1); - } - - - @Override - public String toString() { - return "page[" + getPos() + "] b-tree node table:" + - index.getId() + " entries:" + entryCount; - } - - @Override - public void moveTo(Session session, int newPos) { - PageStore store = index.getPageStore(); - store.logUndo(this, data); - PageBtreeNode p2 = PageBtreeNode.create(index, newPos, parentPageId); - readAllRows(); - p2.rowCountStored = rowCountStored; - p2.rowCount = rowCount; - p2.childPageIds = childPageIds; - p2.rows = rows; - p2.entryCount = entryCount; - p2.offsets = offsets; - p2.onlyPosition = onlyPosition; - p2.parentPageId = parentPageId; - p2.start = start; - store.update(p2); - if (parentPageId == ROOT) { - index.setRootPageId(session, newPos); - } else { - Page p = store.getPage(parentPageId); - if (!(p instanceof PageBtreeNode)) { - throw DbException.throwInternalError(); - } - PageBtreeNode n = (PageBtreeNode) p; - n.moveChild(getPos(), newPos); - } - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageBtree p = index.getPage(child); - p.setParentPageId(newPos); - store.update(p); - } - store.free(getPos()); - } - - /** - * One of the children has moved to a new page. - * - * @param oldPos the old position - * @param newPos the new position - */ - void moveChild(int oldPos, int newPos) { - for (int i = 0; i < entryCount + 1; i++) { - if (childPageIds[i] == oldPos) { - index.getPageStore().logUndo(this, data); - written = false; - changeCount = index.getPageStore().getChangeCount(); - childPageIds[i] = newPos; - index.getPageStore().update(this); - return; - } - } - throw DbException.throwInternalError(); - } - -} \ No newline at end of file diff --git a/h2/src/main/org/h2/index/PageData.java b/h2/src/main/org/h2/index/PageData.java deleted file mode 100644 index 4a74e397cf..0000000000 --- a/h2/src/main/org/h2/index/PageData.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.result.Row; -import org.h2.store.Data; -import org.h2.store.Page; - -/** - * A page that contains data rows. - */ -abstract class PageData extends Page { - - /** - * The position of the parent page id. - */ - static final int START_PARENT = 3; - - /** - * This is a root page. - */ - static final int ROOT = 0; - - /** - * Indicator that the row count is not known. - */ - static final int UNKNOWN_ROWCOUNT = -1; - - /** - * The index. - */ - protected final PageDataIndex index; - - /** - * The page number of the parent. - */ - protected int parentPageId; - - /** - * The data page. - */ - protected final Data data; - - /** - * The number of entries. - */ - protected int entryCount; - - /** - * The row keys. - */ - protected long[] keys; - - /** - * Whether the data page is up-to-date. - */ - protected boolean written; - - /** - * The estimated heap memory used by this object, in number of double words - * (4 bytes each). - */ - private final int memoryEstimated; - - PageData(PageDataIndex index, int pageId, Data data) { - this.index = index; - this.data = data; - setPos(pageId); - memoryEstimated = index.getMemoryPerPage(); - } - - /** - * Get the real row count. If required, this will read all child pages. - * - * @return the row count - */ - abstract int getRowCount(); - - /** - * Set the stored row count. This will write the page. - * - * @param rowCount the stored row count - */ - abstract void setRowCountStored(int rowCount); - - /** - * Get the used disk space for this index. - * - * @return the estimated number of bytes - */ - abstract long getDiskSpaceUsed(); - - /** - * Find an entry by key. - * - * @param key the key (may not exist) - * @return the matching or next index - */ - int find(long key) { - int l = 0, r = entryCount; - while (l < r) { - int i = (l + r) >>> 1; - long k = keys[i]; - if (k == key) { - return i; - } else if (k > key) { - r = i; - } else { - l = i + 1; - } - } - return l; - } - - /** - * Add a row if possible. If it is possible this method returns -1, - * otherwise the split point. It is always possible to add one row. - * - * @param row the now to add - * @return the split point of this page, or -1 if no split is required - */ - abstract int addRowTry(Row row); - - /** - * Get a cursor. - * - * @param session the session - * @param minKey the smallest key - * @param maxKey the largest key - * @param multiVersion if the delta should be used - * @return the cursor - */ - abstract Cursor find(Session session, long minKey, long maxKey, - boolean multiVersion); - - /** - * Get the key at this position. - * - * @param at the index - * @return the key - */ - long getKey(int at) { - return keys[at]; - } - - /** - * Split the index page at the given point. - * - * @param splitPoint the index where to split - * @return the new page that contains about half the entries - */ - abstract PageData split(int splitPoint); - - /** - * Change the page id. - * - * @param id the new page id - */ - void setPageId(int id) { - int old = getPos(); - index.getPageStore().removeFromCache(getPos()); - setPos(id); - index.getPageStore().logUndo(this, null); - remapChildren(old); - } - - /** - * Get the last key of a page. - * - * @return the last key - */ - abstract long getLastKey(); - - /** - * Get the first child leaf page of a page. - * - * @return the page - */ - abstract PageDataLeaf getFirstLeaf(); - - /** - * Change the parent page id. - * - * @param id the new parent page id - */ - void setParentPageId(int id) { - index.getPageStore().logUndo(this, data); - parentPageId = id; - if (written) { - changeCount = index.getPageStore().getChangeCount(); - data.setInt(START_PARENT, parentPageId); - } - } - - /** - * Update the parent id of all children. - * - * @param old the previous position - */ - abstract void remapChildren(int old); - - /** - * Remove a row. - * - * @param key the key of the row to remove - * @return true if this page is now empty - */ - abstract boolean remove(long key); - - /** - * Free this page and all child pages. - */ - abstract void freeRecursive(); - - /** - * Get the row for the given key. - * - * @param key the key - * @return the row - */ - abstract Row getRowWithKey(long key); - - /** - * Get the estimated heap memory size. - * - * @return number of double words (4 bytes each) - */ - @Override - public int getMemory() { - // need to always return the same value for the same object (otherwise - // the cache size would change after adding and then removing the same - // page from the cache) but index.getMemoryPerPage() can adopt according - // to how much memory a row needs on average - return memoryEstimated; - } - - int getParentPageId() { - return parentPageId; - } - - @Override - public boolean canRemove() { - if (changeCount >= index.getPageStore().getChangeCount()) { - return false; - } - return true; - } - -} diff --git a/h2/src/main/org/h2/index/PageDataCursor.java b/h2/src/main/org/h2/index/PageDataCursor.java deleted file mode 100644 index ba483d2f59..0000000000 --- a/h2/src/main/org/h2/index/PageDataCursor.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.Iterator; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; - -/** - * The cursor implementation for the page scan index. - */ -class PageDataCursor implements Cursor { - - private PageDataLeaf current; - private int idx; - private final long maxKey; - private Row row; - private final boolean multiVersion; - private final Session session; - private Iterator delta; - - PageDataCursor(Session session, PageDataLeaf current, int idx, long maxKey, - boolean multiVersion) { - this.current = current; - this.idx = idx; - this.maxKey = maxKey; - this.multiVersion = multiVersion; - this.session = session; - if (multiVersion) { - delta = current.index.getDelta(); - } - } - - @Override - public Row get() { - return row; - } - - @Override - public SearchRow getSearchRow() { - return get(); - } - - @Override - public boolean next() { - if (!multiVersion) { - nextRow(); - return checkMax(); - } - while (true) { - if (delta != null) { - if (!delta.hasNext()) { - delta = null; - row = null; - continue; - } - row = delta.next(); - if (!row.isDeleted() || row.getSessionId() == session.getId()) { - continue; - } - } else { - nextRow(); - if (row != null && row.getSessionId() != 0 && - row.getSessionId() != session.getId()) { - continue; - } - } - break; - } - return checkMax(); - } - - private boolean checkMax() { - if (row != null) { - if (maxKey != Long.MAX_VALUE) { - long x = current.index.getKey(row, Long.MAX_VALUE, Long.MAX_VALUE); - if (x > maxKey) { - row = null; - return false; - } - } - return true; - } - return false; - } - - private void nextRow() { - if (idx >= current.getEntryCount()) { - current = current.getNextPage(); - idx = 0; - if (current == null) { - row = null; - return; - } - } - row = current.getRowAt(idx); - idx++; - } - - @Override - public boolean previous() { - throw DbException.throwInternalError(); - } - -} diff --git a/h2/src/main/org/h2/index/PageDataIndex.java b/h2/src/main/org/h2/index/PageDataIndex.java deleted file mode 100644 index 3777e42092..0000000000 --- a/h2/src/main/org/h2/index/PageDataIndex.java +++ /dev/null @@ -1,587 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; - -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.engine.UndoLogRecord; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.store.Page; -import org.h2.store.PageStore; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.RegularTable; -import org.h2.table.TableFilter; -import org.h2.util.MathUtils; -import org.h2.util.New; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * The scan index allows to access a row by key. It can be used to iterate over - * all rows of a table. Each regular table has one such object, even if no - * primary key or indexes are defined. - */ -public class PageDataIndex extends PageIndex { - - private final PageStore store; - private final RegularTable tableData; - private long lastKey; - private long rowCount; - private HashSet delta; - private int rowCountDiff; - private final HashMap sessionRowCount; - private int mainIndexColumn = -1; - private DbException fastDuplicateKeyException; - - /** - * The estimated heap memory per page, in number of double words (4 bytes - * each). - */ - private int memoryPerPage; - private int memoryCount; - - private final boolean multiVersion; - - public PageDataIndex(RegularTable table, int id, IndexColumn[] columns, - IndexType indexType, boolean create, Session session) { - initBaseIndex(table, id, table.getName() + "_DATA", columns, indexType); - this.multiVersion = database.isMultiVersion(); - - // trace = database.getTrace(Trace.PAGE_STORE + "_di"); - // trace.setLevel(TraceSystem.DEBUG); - if (multiVersion) { - sessionRowCount = New.hashMap(); - isMultiVersion = true; - } else { - sessionRowCount = null; - } - tableData = table; - this.store = database.getPageStore(); - store.addIndex(this); - if (!database.isPersistent()) { - throw DbException.throwInternalError(table.getName()); - } - if (create) { - rootPageId = store.allocatePage(); - store.addMeta(this, session); - PageDataLeaf root = PageDataLeaf.create(this, rootPageId, PageData.ROOT); - store.update(root); - } else { - rootPageId = store.getRootPageId(id); - PageData root = getPage(rootPageId, 0); - lastKey = root.getLastKey(); - rowCount = root.getRowCount(); - } - if (trace.isDebugEnabled()) { - trace.debug("{0} opened rows: {1}", this, rowCount); - } - table.setRowCount(rowCount); - memoryPerPage = (Constants.MEMORY_PAGE_DATA + store.getPageSize()) >> 2; - } - - @Override - public DbException getDuplicateKeyException(String key) { - if (fastDuplicateKeyException == null) { - fastDuplicateKeyException = super.getDuplicateKeyException(null); - } - return fastDuplicateKeyException; - } - - @Override - public void add(Session session, Row row) { - boolean retry = false; - if (mainIndexColumn != -1) { - row.setKey(row.getValue(mainIndexColumn).getLong()); - } else { - if (row.getKey() == 0) { - row.setKey((int) ++lastKey); - retry = true; - } - } - if (tableData.getContainsLargeObject()) { - for (int i = 0, len = row.getColumnCount(); i < len; i++) { - Value v = row.getValue(i); - Value v2 = v.link(database, getId()); - if (v2.isLinked()) { - session.unlinkAtCommitStop(v2); - } - if (v != v2) { - row.setValue(i, v2); - } - } - } - // when using auto-generated values, it's possible that multiple - // tries are required (specially if there was originally a primary key) - if (trace.isDebugEnabled()) { - trace.debug("{0} add {1}", getName(), row); - } - long add = 0; - while (true) { - try { - addTry(session, row); - break; - } catch (DbException e) { - if (e != fastDuplicateKeyException) { - throw e; - } - if (!retry) { - throw getNewDuplicateKeyException(); - } - if (add == 0) { - // in the first re-try add a small random number, - // to avoid collisions after a re-start - row.setKey((long) (row.getKey() + Math.random() * 10000)); - } else { - row.setKey(row.getKey() + add); - } - add++; - } finally { - store.incrementChangeCount(); - } - } - lastKey = Math.max(lastKey, row.getKey()); - } - - public DbException getNewDuplicateKeyException() { - String sql = "PRIMARY KEY ON " + table.getSQL(); - if (mainIndexColumn >= 0 && mainIndexColumn < indexColumns.length) { - sql += "(" + indexColumns[mainIndexColumn].getSQL() + ")"; - } - DbException e = DbException.get(ErrorCode.DUPLICATE_KEY_1, sql); - e.setSource(this); - return e; - } - - private void addTry(Session session, Row row) { - while (true) { - PageData root = getPage(rootPageId, 0); - int splitPoint = root.addRowTry(row); - if (splitPoint == -1) { - break; - } - if (trace.isDebugEnabled()) { - trace.debug("{0} split", this); - } - long pivot = splitPoint == 0 ? row.getKey() : root.getKey(splitPoint - 1); - PageData page1 = root; - PageData page2 = root.split(splitPoint); - int id = store.allocatePage(); - page1.setPageId(id); - page1.setParentPageId(rootPageId); - page2.setParentPageId(rootPageId); - PageDataNode newRoot = PageDataNode.create(this, rootPageId, PageData.ROOT); - newRoot.init(page1, pivot, page2); - store.update(page1); - store.update(page2); - store.update(newRoot); - root = newRoot; - } - row.setDeleted(false); - if (multiVersion) { - if (delta == null) { - delta = New.hashSet(); - } - boolean wasDeleted = delta.remove(row); - if (!wasDeleted) { - delta.add(row); - } - incrementRowCount(session.getId(), 1); - } - invalidateRowCount(); - rowCount++; - store.logAddOrRemoveRow(session, tableData.getId(), row, true); - } - - /** - * Read an overflow page page. - * - * @param id the page id - * @return the page - */ - PageDataOverflow getPageOverflow(int id) { - Page p = store.getPage(id); - if (p instanceof PageDataOverflow) { - return (PageDataOverflow) p; - } - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - p == null ? "null" : p.toString()); - } - - /** - * Read the given page. - * - * @param id the page id - * @param parent the parent, or -1 if unknown - * @return the page - */ - PageData getPage(int id, int parent) { - Page pd = store.getPage(id); - if (pd == null) { - PageDataLeaf empty = PageDataLeaf.create(this, id, parent); - // could have been created before, but never committed - store.logUndo(empty, null); - store.update(empty); - return empty; - } else if (!(pd instanceof PageData)) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "" + pd); - } - PageData p = (PageData) pd; - if (parent != -1) { - if (p.getParentPageId() != parent) { - throw DbException.throwInternalError(p + - " parent " + p.getParentPageId() + " expected " + parent); - } - } - return p; - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - /** - * Get the key from the row. - * - * @param row the row - * @param ifEmpty the value to use if the row is empty - * @param ifNull the value to use if the column is NULL - * @return the key - */ - long getKey(SearchRow row, long ifEmpty, long ifNull) { - if (row == null) { - return ifEmpty; - } - Value v = row.getValue(mainIndexColumn); - if (v == null) { - throw DbException.throwInternalError(row.toString()); - } else if (v == ValueNull.INSTANCE) { - return ifNull; - } - return v.getLong(); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - long from = first == null ? Long.MIN_VALUE : first.getKey(); - long to = last == null ? Long.MAX_VALUE : last.getKey(); - PageData root = getPage(rootPageId, 0); - return root.find(session, from, to, isMultiVersion); - - } - - /** - * Search for a specific row or a set of rows. - * - * @param session the session - * @param first the key of the first row - * @param last the key of the last row - * @param multiVersion if mvcc should be used - * @return the cursor - */ - Cursor find(Session session, long first, long last, boolean multiVersion) { - PageData root = getPage(rootPageId, 0); - return root.find(session, first, last, multiVersion); - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.throwInternalError(); - } - - long getLastKey() { - PageData root = getPage(rootPageId, 0); - return root.getLastKey(); - } - - @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - long cost = 10 * (tableData.getRowCountApproximation() + - Constants.COST_ROW_OFFSET); - return cost; - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public void remove(Session session, Row row) { - if (tableData.getContainsLargeObject()) { - for (int i = 0, len = row.getColumnCount(); i < len; i++) { - Value v = row.getValue(i); - if (v.isLinked()) { - session.unlinkAtCommit(v); - } - } - } - if (trace.isDebugEnabled()) { - trace.debug("{0} remove {1}", getName(), row); - } - if (rowCount == 1) { - removeAllRows(); - } else { - try { - long key = row.getKey(); - PageData root = getPage(rootPageId, 0); - root.remove(key); - invalidateRowCount(); - rowCount--; - } finally { - store.incrementChangeCount(); - } - } - if (multiVersion) { - // if storage is null, the delete flag is not yet set - row.setDeleted(true); - if (delta == null) { - delta = New.hashSet(); - } - boolean wasAdded = delta.remove(row); - if (!wasAdded) { - delta.add(row); - } - incrementRowCount(session.getId(), -1); - } - store.logAddOrRemoveRow(session, tableData.getId(), row, false); - } - - @Override - public void remove(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("{0} remove", this); - } - removeAllRows(); - store.free(rootPageId); - store.removeMeta(this, session); - } - - @Override - public void truncate(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("{0} truncate", this); - } - store.logTruncate(session, tableData.getId()); - removeAllRows(); - if (tableData.getContainsLargeObject() && tableData.isPersistData()) { - // unfortunately, the data is gone on rollback - session.commit(false); - database.getLobStorage().removeAllForTable(table.getId()); - } - if (multiVersion) { - sessionRowCount.clear(); - } - tableData.setRowCount(0); - } - - private void removeAllRows() { - try { - PageData root = getPage(rootPageId, 0); - root.freeRecursive(); - root = PageDataLeaf.create(this, rootPageId, PageData.ROOT); - store.removeFromCache(rootPageId); - store.update(root); - rowCount = 0; - lastKey = 0; - } finally { - store.incrementChangeCount(); - } - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("PAGE"); - } - - @Override - public Row getRow(Session session, long key) { - return getRowWithKey(key); - } - - /** - * Get the row with the given key. - * - * @param key the key - * @return the row - */ - public Row getRowWithKey(long key) { - PageData root = getPage(rootPageId, 0); - return root.getRowWithKey(key); - } - - PageStore getPageStore() { - return store; - } - - @Override - public long getRowCountApproximation() { - return rowCount; - } - - @Override - public long getRowCount(Session session) { - if (multiVersion) { - Integer i = sessionRowCount.get(session.getId()); - long count = i == null ? 0 : i.intValue(); - count += rowCount; - count -= rowCountDiff; - return count; - } - return rowCount; - } - - @Override - public long getDiskSpaceUsed() { - PageData root = getPage(rootPageId, 0); - return root.getDiskSpaceUsed(); - } - - @Override - public String getCreateSQL() { - return null; - } - - @Override - public int getColumnIndex(Column col) { - // can not use this index - use the PageDelegateIndex instead - return -1; - } - - @Override - public void close(Session session) { - if (trace.isDebugEnabled()) { - trace.debug("{0} close", this); - } - if (delta != null) { - delta.clear(); - } - rowCountDiff = 0; - if (sessionRowCount != null) { - sessionRowCount.clear(); - } - // can not close the index because it might get used afterwards, - // for example after running recovery - writeRowCount(); - } - - Iterator getDelta() { - if (delta == null) { - List e = Collections.emptyList(); - return e.iterator(); - } - return delta.iterator(); - } - - private void incrementRowCount(int sessionId, int count) { - if (multiVersion) { - Integer id = sessionId; - Integer c = sessionRowCount.get(id); - int current = c == null ? 0 : c.intValue(); - sessionRowCount.put(id, current + count); - rowCountDiff += count; - } - } - - @Override - public void commit(int operation, Row row) { - if (multiVersion) { - if (delta != null) { - delta.remove(row); - } - incrementRowCount(row.getSessionId(), - operation == UndoLogRecord.DELETE ? 1 : -1); - } - } - - /** - * The root page has changed. - * - * @param session the session - * @param newPos the new position - */ - void setRootPageId(Session session, int newPos) { - store.removeMeta(this, session); - this.rootPageId = newPos; - store.addMeta(this, session); - store.addIndex(this); - } - - public void setMainIndexColumn(int mainIndexColumn) { - this.mainIndexColumn = mainIndexColumn; - } - - public int getMainIndexColumn() { - return mainIndexColumn; - } - - @Override - public String toString() { - return getName(); - } - - private void invalidateRowCount() { - PageData root = getPage(rootPageId, 0); - root.setRowCountStored(PageData.UNKNOWN_ROWCOUNT); - } - - @Override - public void writeRowCount() { - if (SysProperties.MODIFY_ON_WRITE && rootPageId == 0) { - // currently creating the index - return; - } - try { - PageData root = getPage(rootPageId, 0); - root.setRowCountStored(MathUtils.convertLongToInt(rowCount)); - } finally { - store.incrementChangeCount(); - } - } - - @Override - public String getPlanSQL() { - return table.getSQL() + ".tableScan"; - } - - int getMemoryPerPage() { - return memoryPerPage; - } - - /** - * The memory usage of a page was changed. The new value is used to adopt - * the average estimated memory size of a page. - * - * @param x the new memory size - */ - void memoryChange(int x) { - if (memoryCount < Constants.MEMORY_FACTOR) { - memoryPerPage += (x - memoryPerPage) / ++memoryCount; - } else { - memoryPerPage += (x > memoryPerPage ? 1 : -1) + - ((x - memoryPerPage) / Constants.MEMORY_FACTOR); - } - } - - @Override - public boolean isRowIdIndex() { - return true; - } - -} diff --git a/h2/src/main/org/h2/index/PageDataLeaf.java b/h2/src/main/org/h2/index/PageDataLeaf.java deleted file mode 100644 index 92a49ef1ef..0000000000 --- a/h2/src/main/org/h2/index/PageDataLeaf.java +++ /dev/null @@ -1,631 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.lang.ref.SoftReference; -import java.util.Arrays; - -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; -import org.h2.table.RegularTable; -import org.h2.value.Value; - -/** - * A leaf page that contains data of one or multiple rows. Format: - *
      - *
    • page type: byte (0)
    • - *
    • checksum: short (1-2)
    • - *
    • parent page id (0 for root): int (3-6)
    • - *
    • table id: varInt
    • - *
    • column count: varInt
    • - *
    • entry count: short
    • - *
    • with overflow: the first overflow page id: int
    • - *
    • list of key / offset pairs (key: varLong, offset: shortInt)
    • - *
    • data
    • - *
    - */ -public class PageDataLeaf extends PageData { - - private final boolean optimizeUpdate; - - /** - * The row offsets. - */ - private int[] offsets; - - /** - * The rows. - */ - private Row[] rows; - - /** - * For pages with overflow: the soft reference to the row - */ - private SoftReference rowRef; - - /** - * The page id of the first overflow page (0 if no overflow). - */ - private int firstOverflowPageId; - - /** - * The start of the data area. - */ - private int start; - - /** - * The size of the row in bytes for large rows. - */ - private int overflowRowSize; - - private int columnCount; - - private int memoryData; - - private boolean writtenData; - - private PageDataLeaf(PageDataIndex index, int pageId, Data data) { - super(index, pageId, data); - this.optimizeUpdate = index.getDatabase().getSettings().optimizeUpdate; - } - - /** - * Create a new page. - * - * @param index the index - * @param pageId the page id - * @param parentPageId the parent - * @return the page - */ - static PageDataLeaf create(PageDataIndex index, int pageId, int parentPageId) { - PageDataLeaf p = new PageDataLeaf(index, pageId, index.getPageStore() - .createData()); - index.getPageStore().logUndo(p, null); - p.rows = Row.EMPTY_ARRAY; - p.parentPageId = parentPageId; - p.columnCount = index.getTable().getColumns().length; - p.writeHead(); - p.start = p.data.length(); - return p; - } - - /** - * Read a data leaf page. - * - * @param index the index - * @param data the data - * @param pageId the page id - * @return the page - */ - public static Page read(PageDataIndex index, Data data, int pageId) { - PageDataLeaf p = new PageDataLeaf(index, pageId, data); - p.read(); - return p; - } - - private void read() { - data.reset(); - int type = data.readByte(); - data.readShortInt(); - this.parentPageId = data.readInt(); - int tableId = data.readVarInt(); - if (tableId != index.getId()) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "page:" + getPos() + " expected table:" + index.getId() + - " got:" + tableId + " type:" + type); - } - columnCount = data.readVarInt(); - entryCount = data.readShortInt(); - offsets = new int[entryCount]; - keys = new long[entryCount]; - rows = new Row[entryCount]; - if (type == Page.TYPE_DATA_LEAF) { - if (entryCount != 1) { - DbException.throwInternalError("entries: " + entryCount); - } - firstOverflowPageId = data.readInt(); - } - for (int i = 0; i < entryCount; i++) { - keys[i] = data.readVarLong(); - offsets[i] = data.readShortInt(); - } - start = data.length(); - written = true; - writtenData = true; - } - - private int getRowLength(Row row) { - int size = 0; - for (int i = 0; i < columnCount; i++) { - size += data.getValueLen(row.getValue(i)); - } - return size; - } - - private int findInsertionPoint(long key) { - int x = find(key); - if (x < entryCount && keys[x] == key) { - throw index.getDuplicateKeyException(""+key); - } - return x; - } - - @Override - int addRowTry(Row row) { - index.getPageStore().logUndo(this, data); - int rowLength = getRowLength(row); - int pageSize = index.getPageStore().getPageSize(); - int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; - int keyOffsetPairLen = 2 + Data.getVarLongLen(row.getKey()); - if (entryCount > 0 && last - rowLength < start + keyOffsetPairLen) { - int x = findInsertionPoint(row.getKey()); - if (entryCount > 1) { - if (entryCount < 5) { - // required, otherwise the index doesn't work correctly - return entryCount / 2; - } - if (index.isSortedInsertMode()) { - return x < 2 ? 1 : x > entryCount - 1 ? entryCount - 1 : x; - } - // split near the insertion point to better fill pages - // split in half would be: - // return entryCount / 2; - int third = entryCount / 3; - return x < third ? third : x >= 2 * third ? 2 * third : x; - } - return x; - } - index.getPageStore().logUndo(this, data); - int x; - if (entryCount == 0) { - x = 0; - } else { - if (!optimizeUpdate) { - readAllRows(); - } - x = findInsertionPoint(row.getKey()); - } - written = false; - changeCount = index.getPageStore().getChangeCount(); - last = x == 0 ? pageSize : offsets[x - 1]; - int offset = last - rowLength; - start += keyOffsetPairLen; - offsets = insert(offsets, entryCount, x, offset); - add(offsets, x + 1, entryCount + 1, -rowLength); - keys = insert(keys, entryCount, x, row.getKey()); - rows = insert(rows, entryCount, x, row); - entryCount++; - index.getPageStore().update(this); - if (optimizeUpdate) { - if (writtenData && offset >= start) { - byte[] d = data.getBytes(); - int dataStart = offsets[entryCount - 1] + rowLength; - int dataEnd = offsets[x]; - System.arraycopy(d, dataStart, d, dataStart - rowLength, - dataEnd - dataStart + rowLength); - data.setPos(dataEnd); - for (int j = 0; j < columnCount; j++) { - data.writeValue(row.getValue(j)); - } - } - } - if (offset < start) { - writtenData = false; - if (entryCount > 1) { - DbException.throwInternalError(); - } - // need to write the overflow page id - start += 4; - int remaining = rowLength - (pageSize - start); - // fix offset - offset = start; - offsets[x] = offset; - int previous = getPos(); - int dataOffset = pageSize; - int page = index.getPageStore().allocatePage(); - firstOverflowPageId = page; - this.overflowRowSize = pageSize + rowLength; - writeData(); - // free up the space used by the row - Row r = rows[0]; - rowRef = new SoftReference(r); - rows[0] = null; - Data all = index.getPageStore().createData(); - all.checkCapacity(data.length()); - all.write(data.getBytes(), 0, data.length()); - data.truncate(index.getPageStore().getPageSize()); - do { - int type, size, next; - if (remaining <= pageSize - PageDataOverflow.START_LAST) { - type = Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST; - size = remaining; - next = 0; - } else { - type = Page.TYPE_DATA_OVERFLOW; - size = pageSize - PageDataOverflow.START_MORE; - next = index.getPageStore().allocatePage(); - } - PageDataOverflow overflow = PageDataOverflow.create(index.getPageStore(), - page, type, previous, next, all, dataOffset, size); - index.getPageStore().update(overflow); - dataOffset += size; - remaining -= size; - previous = page; - page = next; - } while (remaining > 0); - } - if (rowRef == null) { - memoryChange(true, row); - } else { - memoryChange(true, null); - } - return -1; - } - - private void removeRow(int i) { - index.getPageStore().logUndo(this, data); - written = false; - changeCount = index.getPageStore().getChangeCount(); - if (!optimizeUpdate) { - readAllRows(); - } - Row r = getRowAt(i); - if (r != null) { - memoryChange(false, r); - } - entryCount--; - if (entryCount < 0) { - DbException.throwInternalError(); - } - if (firstOverflowPageId != 0) { - start -= 4; - freeOverflow(); - firstOverflowPageId = 0; - overflowRowSize = 0; - rowRef = null; - } - int keyOffsetPairLen = 2 + Data.getVarLongLen(keys[i]); - int startNext = i > 0 ? offsets[i - 1] : index.getPageStore().getPageSize(); - int rowLength = startNext - offsets[i]; - if (optimizeUpdate) { - if (writtenData) { - byte[] d = data.getBytes(); - int dataStart = offsets[entryCount]; - System.arraycopy(d, dataStart, d, dataStart + rowLength, - offsets[i] - dataStart); - Arrays.fill(d, dataStart, dataStart + rowLength, (byte) 0); - } - } else { - int clearStart = offsets[entryCount]; - Arrays.fill(data.getBytes(), clearStart, clearStart + rowLength, (byte) 0); - } - start -= keyOffsetPairLen; - offsets = remove(offsets, entryCount + 1, i); - add(offsets, i, entryCount, rowLength); - keys = remove(keys, entryCount + 1, i); - rows = remove(rows, entryCount + 1, i); - } - - @Override - Cursor find(Session session, long minKey, long maxKey, boolean multiVersion) { - int x = find(minKey); - return new PageDataCursor(session, this, x, maxKey, multiVersion); - } - - /** - * Get the row at the given index. - * - * @param at the index - * @return the row - */ - Row getRowAt(int at) { - Row r = rows[at]; - if (r == null) { - if (firstOverflowPageId == 0) { - r = readRow(data, offsets[at], columnCount); - } else { - if (rowRef != null) { - r = rowRef.get(); - if (r != null) { - return r; - } - } - PageStore store = index.getPageStore(); - Data buff = store.createData(); - int pageSize = store.getPageSize(); - int offset = offsets[at]; - buff.write(data.getBytes(), offset, pageSize - offset); - int next = firstOverflowPageId; - do { - PageDataOverflow page = index.getPageOverflow(next); - next = page.readInto(buff); - } while (next != 0); - overflowRowSize = pageSize + buff.length(); - r = readRow(buff, 0, columnCount); - } - r.setKey(keys[at]); - if (firstOverflowPageId != 0) { - rowRef = new SoftReference(r); - } else { - rows[at] = r; - memoryChange(true, r); - } - } - return r; - } - - int getEntryCount() { - return entryCount; - } - - @Override - PageData split(int splitPoint) { - int newPageId = index.getPageStore().allocatePage(); - PageDataLeaf p2 = PageDataLeaf.create(index, newPageId, parentPageId); - for (int i = splitPoint; i < entryCount;) { - int split = p2.addRowTry(getRowAt(splitPoint)); - if (split != -1) { - DbException.throwInternalError("split " + split); - } - removeRow(splitPoint); - } - return p2; - } - - @Override - long getLastKey() { - // TODO re-use keys, but remove this mechanism - if (entryCount == 0) { - return 0; - } - return getRowAt(entryCount - 1).getKey(); - } - - PageDataLeaf getNextPage() { - if (parentPageId == PageData.ROOT) { - return null; - } - PageDataNode next = (PageDataNode) index.getPage(parentPageId, -1); - return next.getNextPage(keys[entryCount - 1]); - } - - @Override - PageDataLeaf getFirstLeaf() { - return this; - } - - @Override - protected void remapChildren(int old) { - if (firstOverflowPageId == 0) { - return; - } - PageDataOverflow overflow = index.getPageOverflow(firstOverflowPageId); - overflow.setParentPageId(getPos()); - index.getPageStore().update(overflow); - } - - @Override - boolean remove(long key) { - int i = find(key); - if (keys == null || keys[i] != key) { - throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, - index.getSQL() + ": " + key + " " + (keys == null ? -1 : keys[i])); - } - index.getPageStore().logUndo(this, data); - if (entryCount == 1) { - freeRecursive(); - return true; - } - removeRow(i); - index.getPageStore().update(this); - return false; - } - - @Override - void freeRecursive() { - index.getPageStore().logUndo(this, data); - index.getPageStore().free(getPos()); - freeOverflow(); - } - - private void freeOverflow() { - if (firstOverflowPageId != 0) { - int next = firstOverflowPageId; - do { - PageDataOverflow page = index.getPageOverflow(next); - page.free(); - next = page.getNextOverflow(); - } while (next != 0); - } - } - - @Override - Row getRowWithKey(long key) { - int at = find(key); - return getRowAt(at); - } - - @Override - int getRowCount() { - return entryCount; - } - - @Override - void setRowCountStored(int rowCount) { - // ignore - } - - @Override - long getDiskSpaceUsed() { - return index.getPageStore().getPageSize(); - } - - @Override - public void write() { - writeData(); - index.getPageStore().writePage(getPos(), data); - data.truncate(index.getPageStore().getPageSize()); - } - - private void readAllRows() { - for (int i = 0; i < entryCount; i++) { - getRowAt(i); - } - } - - private void writeHead() { - data.reset(); - int type; - if (firstOverflowPageId == 0) { - type = Page.TYPE_DATA_LEAF | Page.FLAG_LAST; - } else { - type = Page.TYPE_DATA_LEAF; - } - data.writeByte((byte) type); - data.writeShortInt(0); - if (SysProperties.CHECK2) { - if (data.length() != START_PARENT) { - DbException.throwInternalError(); - } - } - data.writeInt(parentPageId); - data.writeVarInt(index.getId()); - data.writeVarInt(columnCount); - data.writeShortInt(entryCount); - } - - private void writeData() { - if (written) { - return; - } - if (!optimizeUpdate) { - readAllRows(); - } - writeHead(); - if (firstOverflowPageId != 0) { - data.writeInt(firstOverflowPageId); - data.checkCapacity(overflowRowSize); - } - for (int i = 0; i < entryCount; i++) { - data.writeVarLong(keys[i]); - data.writeShortInt(offsets[i]); - } - if (!writtenData || !optimizeUpdate) { - for (int i = 0; i < entryCount; i++) { - data.setPos(offsets[i]); - Row r = getRowAt(i); - for (int j = 0; j < columnCount; j++) { - data.writeValue(r.getValue(j)); - } - } - writtenData = true; - } - written = true; - } - - @Override - public String toString() { - return "page[" + getPos() + "] data leaf table:" + - index.getId() + " " + index.getTable().getName() + - " entries:" + entryCount + " parent:" + parentPageId + - (firstOverflowPageId == 0 ? "" : " overflow:" + firstOverflowPageId) + - " keys:" + Arrays.toString(keys) + " offsets:" + Arrays.toString(offsets); - } - - @Override - public void moveTo(Session session, int newPos) { - PageStore store = index.getPageStore(); - // load the pages into the cache, to ensure old pages - // are written - if (parentPageId != ROOT) { - store.getPage(parentPageId); - } - store.logUndo(this, data); - PageDataLeaf p2 = PageDataLeaf.create(index, newPos, parentPageId); - readAllRows(); - p2.keys = keys; - p2.overflowRowSize = overflowRowSize; - p2.firstOverflowPageId = firstOverflowPageId; - p2.rowRef = rowRef; - p2.rows = rows; - if (firstOverflowPageId != 0) { - p2.rows[0] = getRowAt(0); - } - p2.entryCount = entryCount; - p2.offsets = offsets; - p2.start = start; - p2.remapChildren(getPos()); - p2.writeData(); - p2.data.truncate(index.getPageStore().getPageSize()); - store.update(p2); - if (parentPageId == ROOT) { - index.setRootPageId(session, newPos); - } else { - PageDataNode p = (PageDataNode) store.getPage(parentPageId); - p.moveChild(getPos(), newPos); - } - store.free(getPos()); - } - - /** - * Set the overflow page id. - * - * @param old the old overflow page id - * @param overflow the new overflow page id - */ - void setOverflow(int old, int overflow) { - if (SysProperties.CHECK && old != firstOverflowPageId) { - DbException.throwInternalError("move " + this + " " + firstOverflowPageId); - } - index.getPageStore().logUndo(this, data); - firstOverflowPageId = overflow; - if (written) { - changeCount = index.getPageStore().getChangeCount(); - writeHead(); - data.writeInt(firstOverflowPageId); - } - index.getPageStore().update(this); - } - - private void memoryChange(boolean add, Row r) { - int diff = r == null ? 0 : 4 + 8 + Constants.MEMORY_POINTER + r.getMemory(); - memoryData += add ? diff : -diff; - index.memoryChange((Constants.MEMORY_PAGE_DATA + - memoryData + index.getPageStore().getPageSize()) >> 2); - } - - @Override - public boolean isStream() { - return firstOverflowPageId > 0; - } - - /** - * Read a row from the data page at the given position. - * - * @param data the data page - * @param pos the position to read from - * @param columnCount the number of columns - * @return the row - */ - private static Row readRow(Data data, int pos, int columnCount) { - Value[] values = new Value[columnCount]; - synchronized (data) { - data.setPos(pos); - for (int i = 0; i < columnCount; i++) { - values[i] = data.readValue(); - } - } - return RegularTable.createRow(values); - } - -} diff --git a/h2/src/main/org/h2/index/PageDataNode.java b/h2/src/main/org/h2/index/PageDataNode.java deleted file mode 100644 index d621a96736..0000000000 --- a/h2/src/main/org/h2/index/PageDataNode.java +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.Arrays; -import org.h2.api.DatabaseEventListener; -import org.h2.api.ErrorCode; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; -import org.h2.util.Utils; - -/** - * A leaf page that contains data of one or multiple rows. Format: - *
      - *
    • page type: byte (0)
    • - *
    • checksum: short (1-2)
    • - *
    • parent page id (0 for root): int (3-6)
    • - *
    • table id: varInt
    • - *
    • count of all children (-1 if not known): int
    • - *
    • entry count: short
    • - *
    • rightmost child page id: int
    • - *
    • entries (child page id: int, key: varLong)
    • - *
    - * The key is the largest key of the respective child, meaning key[0] is the - * largest key of child[0]. - */ -public class PageDataNode extends PageData { - - /** - * The page ids of the children. - */ - private int[] childPageIds; - - private int rowCountStored = UNKNOWN_ROWCOUNT; - - private int rowCount = UNKNOWN_ROWCOUNT; - - /** - * The number of bytes used in the page - */ - private int length; - - private PageDataNode(PageDataIndex index, int pageId, Data data) { - super(index, pageId, data); - } - - /** - * Create a new page. - * - * @param index the index - * @param pageId the page id - * @param parentPageId the parent - * @return the page - */ - static PageDataNode create(PageDataIndex index, int pageId, int parentPageId) { - PageDataNode p = new PageDataNode(index, pageId, - index.getPageStore().createData()); - index.getPageStore().logUndo(p, null); - p.parentPageId = parentPageId; - p.writeHead(); - // 4 bytes for the rightmost child page id - p.length = p.data.length() + 4; - return p; - } - - /** - * Read a data node page. - * - * @param index the index - * @param data the data - * @param pageId the page id - * @return the page - */ - public static Page read(PageDataIndex index, Data data, int pageId) { - PageDataNode p = new PageDataNode(index, pageId, data); - p.read(); - return p; - } - - private void read() { - data.reset(); - data.readByte(); - data.readShortInt(); - this.parentPageId = data.readInt(); - int indexId = data.readVarInt(); - if (indexId != index.getId()) { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "page:" + getPos() + " expected index:" + index.getId() + - "got:" + indexId); - } - rowCount = rowCountStored = data.readInt(); - entryCount = data.readShortInt(); - childPageIds = new int[entryCount + 1]; - childPageIds[entryCount] = data.readInt(); - keys = Utils.newLongArray(entryCount); - for (int i = 0; i < entryCount; i++) { - childPageIds[i] = data.readInt(); - keys[i] = data.readVarLong(); - } - length = data.length(); - check(); - written = true; - } - - private void addChild(int x, int childPageId, long key) { - index.getPageStore().logUndo(this, data); - written = false; - changeCount = index.getPageStore().getChangeCount(); - childPageIds = insert(childPageIds, entryCount + 1, x + 1, childPageId); - keys = insert(keys, entryCount, x, key); - entryCount++; - length += 4 + Data.getVarLongLen(key); - } - - @Override - int addRowTry(Row row) { - index.getPageStore().logUndo(this, data); - int keyOffsetPairLen = 4 + Data.getVarLongLen(row.getKey()); - while (true) { - int x = find(row.getKey()); - PageData page = index.getPage(childPageIds[x], getPos()); - int splitPoint = page.addRowTry(row); - if (splitPoint == -1) { - break; - } - if (length + keyOffsetPairLen > index.getPageStore().getPageSize()) { - return entryCount / 2; - } - long pivot = splitPoint == 0 ? row.getKey() : page.getKey(splitPoint - 1); - PageData page2 = page.split(splitPoint); - index.getPageStore().update(page); - index.getPageStore().update(page2); - addChild(x, page2.getPos(), pivot); - index.getPageStore().update(this); - } - updateRowCount(1); - return -1; - } - - private void updateRowCount(int offset) { - if (rowCount != UNKNOWN_ROWCOUNT) { - rowCount += offset; - } - if (rowCountStored != UNKNOWN_ROWCOUNT) { - rowCountStored = UNKNOWN_ROWCOUNT; - index.getPageStore().logUndo(this, data); - if (written) { - writeHead(); - } - index.getPageStore().update(this); - } - } - - @Override - Cursor find(Session session, long minKey, long maxKey, boolean multiVersion) { - int x = find(minKey); - int child = childPageIds[x]; - return index.getPage(child, getPos()).find(session, minKey, maxKey, - multiVersion); - } - - @Override - PageData split(int splitPoint) { - int newPageId = index.getPageStore().allocatePage(); - PageDataNode p2 = PageDataNode.create(index, newPageId, parentPageId); - int firstChild = childPageIds[splitPoint]; - for (int i = splitPoint; i < entryCount;) { - p2.addChild(p2.entryCount, childPageIds[splitPoint + 1], keys[splitPoint]); - removeChild(splitPoint); - } - int lastChild = childPageIds[splitPoint - 1]; - removeChild(splitPoint - 1); - childPageIds[splitPoint - 1] = lastChild; - p2.childPageIds[0] = firstChild; - p2.remapChildren(getPos()); - return p2; - } - - @Override - protected void remapChildren(int old) { - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageData p = index.getPage(child, old); - p.setParentPageId(getPos()); - index.getPageStore().update(p); - } - } - - /** - * Initialize the page. - * - * @param page1 the first child page - * @param pivot the pivot key - * @param page2 the last child page - */ - void init(PageData page1, long pivot, PageData page2) { - entryCount = 1; - childPageIds = new int[] { page1.getPos(), page2.getPos() }; - keys = new long[] { pivot }; - length += 4 + Data.getVarLongLen(pivot); - check(); - } - - @Override - long getLastKey() { - return index.getPage(childPageIds[entryCount], getPos()).getLastKey(); - } - - /** - * Get the next leaf page. - * - * @param key the last key of the current page - * @return the next leaf page - */ - PageDataLeaf getNextPage(long key) { - int i = find(key) + 1; - if (i > entryCount) { - if (parentPageId == PageData.ROOT) { - return null; - } - PageDataNode next = (PageDataNode) index.getPage(parentPageId, -1); - return next.getNextPage(key); - } - PageData page = index.getPage(childPageIds[i], getPos()); - return page.getFirstLeaf(); - } - - @Override - PageDataLeaf getFirstLeaf() { - int child = childPageIds[0]; - return index.getPage(child, getPos()).getFirstLeaf(); - } - - @Override - boolean remove(long key) { - int at = find(key); - // merge is not implemented to allow concurrent usage - // TODO maybe implement merge - PageData page = index.getPage(childPageIds[at], getPos()); - boolean empty = page.remove(key); - index.getPageStore().logUndo(this, data); - updateRowCount(-1); - if (!empty) { - // the first row didn't change - nothing to do - return false; - } - // this child is now empty - index.getPageStore().free(page.getPos()); - if (entryCount < 1) { - // no more children - this page is empty as well - return true; - } - removeChild(at); - index.getPageStore().update(this); - return false; - } - - @Override - void freeRecursive() { - index.getPageStore().logUndo(this, data); - index.getPageStore().free(getPos()); - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - index.getPage(child, getPos()).freeRecursive(); - } - } - - @Override - Row getRowWithKey(long key) { - int at = find(key); - PageData page = index.getPage(childPageIds[at], getPos()); - return page.getRowWithKey(key); - } - - @Override - int getRowCount() { - if (rowCount == UNKNOWN_ROWCOUNT) { - int count = 0; - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageData page = index.getPage(child, getPos()); - if (getPos() == page.getPos()) { - throw DbException.throwInternalError("Page is its own child: " + getPos()); - } - count += page.getRowCount(); - index.getDatabase().setProgress(DatabaseEventListener.STATE_SCAN_FILE, - index.getTable() + "." + index.getName(), count, Integer.MAX_VALUE); - } - rowCount = count; - } - return rowCount; - } - - @Override - long getDiskSpaceUsed() { - long count = 0; - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageData page = index.getPage(child, getPos()); - if (getPos() == page.getPos()) { - throw DbException.throwInternalError("Page is its own child: " + getPos()); - } - count += page.getDiskSpaceUsed(); - index.getDatabase().setProgress(DatabaseEventListener.STATE_SCAN_FILE, - index.getTable() + "." + index.getName(), - (int) (count >> 16), Integer.MAX_VALUE); - } - return count; - } - - @Override - void setRowCountStored(int rowCount) { - this.rowCount = rowCount; - if (rowCountStored != rowCount) { - rowCountStored = rowCount; - index.getPageStore().logUndo(this, data); - if (written) { - changeCount = index.getPageStore().getChangeCount(); - writeHead(); - } - index.getPageStore().update(this); - } - } - - private void check() { - if (SysProperties.CHECK) { - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - if (child == 0) { - DbException.throwInternalError(); - } - } - } - } - - @Override - public void write() { - writeData(); - index.getPageStore().writePage(getPos(), data); - } - - private void writeHead() { - data.reset(); - data.writeByte((byte) Page.TYPE_DATA_NODE); - data.writeShortInt(0); - if (SysProperties.CHECK2) { - if (data.length() != START_PARENT) { - DbException.throwInternalError(); - } - } - data.writeInt(parentPageId); - data.writeVarInt(index.getId()); - data.writeInt(rowCountStored); - data.writeShortInt(entryCount); - } - - private void writeData() { - if (written) { - return; - } - check(); - writeHead(); - data.writeInt(childPageIds[entryCount]); - for (int i = 0; i < entryCount; i++) { - data.writeInt(childPageIds[i]); - data.writeVarLong(keys[i]); - } - if (length != data.length()) { - DbException.throwInternalError("expected pos: " + length + - " got: " + data.length()); - } - written = true; - } - - private void removeChild(int i) { - index.getPageStore().logUndo(this, data); - written = false; - changeCount = index.getPageStore().getChangeCount(); - int removedKeyIndex = i < entryCount ? i : i - 1; - entryCount--; - length -= 4 + Data.getVarLongLen(keys[removedKeyIndex]); - if (entryCount < 0) { - DbException.throwInternalError(); - } - keys = remove(keys, entryCount + 1, removedKeyIndex); - childPageIds = remove(childPageIds, entryCount + 2, i); - } - - @Override - public String toString() { - return "page[" + getPos() + "] data node table:" + index.getId() + - " entries:" + entryCount + " " + Arrays.toString(childPageIds); - } - - @Override - public void moveTo(Session session, int newPos) { - PageStore store = index.getPageStore(); - // load the pages into the cache, to ensure old pages - // are written - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - store.getPage(child); - } - if (parentPageId != ROOT) { - store.getPage(parentPageId); - } - store.logUndo(this, data); - PageDataNode p2 = PageDataNode.create(index, newPos, parentPageId); - p2.rowCountStored = rowCountStored; - p2.rowCount = rowCount; - p2.childPageIds = childPageIds; - p2.keys = keys; - p2.entryCount = entryCount; - p2.length = length; - store.update(p2); - if (parentPageId == ROOT) { - index.setRootPageId(session, newPos); - } else { - PageDataNode p = (PageDataNode) store.getPage(parentPageId); - p.moveChild(getPos(), newPos); - } - for (int i = 0; i < entryCount + 1; i++) { - int child = childPageIds[i]; - PageData p = (PageData) store.getPage(child); - p.setParentPageId(newPos); - store.update(p); - } - store.free(getPos()); - } - - /** - * One of the children has moved to another page. - * - * @param oldPos the old position - * @param newPos the new position - */ - void moveChild(int oldPos, int newPos) { - for (int i = 0; i < entryCount + 1; i++) { - if (childPageIds[i] == oldPos) { - index.getPageStore().logUndo(this, data); - written = false; - changeCount = index.getPageStore().getChangeCount(); - childPageIds[i] = newPos; - index.getPageStore().update(this); - return; - } - } - throw DbException.throwInternalError(); - } - -} diff --git a/h2/src/main/org/h2/index/PageDataOverflow.java b/h2/src/main/org/h2/index/PageDataOverflow.java deleted file mode 100644 index c0a9d4d0a6..0000000000 --- a/h2/src/main/org/h2/index/PageDataOverflow.java +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.store.Data; -import org.h2.store.Page; -import org.h2.store.PageStore; - -/** - * Overflow data for a leaf page. Format: - *
      - *
    • page type: byte (0)
    • - *
    • checksum: short (1-2)
    • - *
    • parent page id (0 for root): int (3-6)
    • - *
    • more data: next overflow page id: int (7-10)
    • - *
    • last remaining size: short (7-8)
    • - *
    • data (11-/9-)
    • - *
    - */ -public class PageDataOverflow extends Page { - - /** - * The start of the data in the last overflow page. - */ - static final int START_LAST = 9; - - /** - * The start of the data in a overflow page that is not the last one. - */ - static final int START_MORE = 11; - - private static final int START_NEXT_OVERFLOW = 7; - - /** - * The page store. - */ - private final PageStore store; - - /** - * The page type. - */ - private int type; - - /** - * The parent page (overflow or leaf). - */ - private int parentPageId; - - /** - * The next overflow page, or 0. - */ - private int nextPage; - - private final Data data; - - private int start; - private int size; - - /** - * Create an object from the given data page. - * - * @param store the page store - * @param pageId the page id - * @param data the data page - */ - private PageDataOverflow(PageStore store, int pageId, Data data) { - this.store = store; - setPos(pageId); - this.data = data; - } - - /** - * Read an overflow page. - * - * @param store the page store - * @param data the data - * @param pageId the page id - * @return the page - */ - public static Page read(PageStore store, Data data, int pageId) { - PageDataOverflow p = new PageDataOverflow(store, pageId, data); - p.read(); - return p; - } - - /** - * Create a new overflow page. - * - * @param store the page store - * @param page the page id - * @param type the page type - * @param parentPageId the parent page id - * @param next the next page or 0 - * @param all the data - * @param offset the offset within the data - * @param size the number of bytes - * @return the page - */ - static PageDataOverflow create(PageStore store, int page, - int type, int parentPageId, int next, - Data all, int offset, int size) { - Data data = store.createData(); - PageDataOverflow p = new PageDataOverflow(store, page, data); - store.logUndo(p, null); - data.writeByte((byte) type); - data.writeShortInt(0); - data.writeInt(parentPageId); - if (type == Page.TYPE_DATA_OVERFLOW) { - data.writeInt(next); - } else { - data.writeShortInt(size); - } - p.start = data.length(); - data.write(all.getBytes(), offset, size); - p.type = type; - p.parentPageId = parentPageId; - p.nextPage = next; - p.size = size; - return p; - } - - /** - * Read the page. - */ - private void read() { - data.reset(); - type = data.readByte(); - data.readShortInt(); - parentPageId = data.readInt(); - if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { - size = data.readShortInt(); - nextPage = 0; - } else if (type == Page.TYPE_DATA_OVERFLOW) { - nextPage = data.readInt(); - size = store.getPageSize() - data.length(); - } else { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "page:" + - getPos() + " type:" + type); - } - start = data.length(); - } - - /** - * Read the data into a target buffer. - * - * @param target the target data page - * @return the next page, or 0 if no next page - */ - int readInto(Data target) { - target.checkCapacity(size); - if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { - target.write(data.getBytes(), START_LAST, size); - return 0; - } - target.write(data.getBytes(), START_MORE, size); - return nextPage; - } - - int getNextOverflow() { - return nextPage; - } - - private void writeHead() { - data.writeByte((byte) type); - data.writeShortInt(0); - data.writeInt(parentPageId); - } - - @Override - public void write() { - writeData(); - store.writePage(getPos(), data); - } - - - private void writeData() { - data.reset(); - writeHead(); - if (type == Page.TYPE_DATA_OVERFLOW) { - data.writeInt(nextPage); - } else { - data.writeShortInt(size); - } - } - - - @Override - public String toString() { - return "page[" + getPos() + "] data leaf overflow parent:" + - parentPageId + " next:" + nextPage; - } - - /** - * Get the estimated memory size. - * - * @return number of double words (4 bytes) - */ - @Override - public int getMemory() { - return (Constants.MEMORY_PAGE_DATA_OVERFLOW + store.getPageSize()) >> 2; - } - - void setParentPageId(int parent) { - store.logUndo(this, data); - this.parentPageId = parent; - } - - @Override - public void moveTo(Session session, int newPos) { - // load the pages into the cache, to ensure old pages - // are written - Page parent = store.getPage(parentPageId); - if (parent == null) { - throw DbException.throwInternalError(); - } - PageDataOverflow next = null; - if (nextPage != 0) { - next = (PageDataOverflow) store.getPage(nextPage); - } - store.logUndo(this, data); - PageDataOverflow p2 = PageDataOverflow.create(store, newPos, type, - parentPageId, nextPage, data, start, size); - store.update(p2); - if (next != null) { - next.setParentPageId(newPos); - store.update(next); - } - if (parent instanceof PageDataOverflow) { - PageDataOverflow p1 = (PageDataOverflow) parent; - p1.setNext(getPos(), newPos); - } else { - PageDataLeaf p1 = (PageDataLeaf) parent; - p1.setOverflow(getPos(), newPos); - } - store.update(parent); - store.free(getPos()); - } - - private void setNext(int old, int nextPage) { - if (SysProperties.CHECK && old != this.nextPage) { - DbException.throwInternalError("move " + this + " " + nextPage); - } - store.logUndo(this, data); - this.nextPage = nextPage; - data.setInt(START_NEXT_OVERFLOW, nextPage); - } - - /** - * Free this page. - */ - void free() { - store.logUndo(this, data); - store.free(getPos()); - } - - @Override - public boolean canRemove() { - return true; - } - - @Override - public boolean isStream() { - return true; - } - -} diff --git a/h2/src/main/org/h2/index/PageDelegateIndex.java b/h2/src/main/org/h2/index/PageDelegateIndex.java deleted file mode 100644 index 7e4f824175..0000000000 --- a/h2/src/main/org/h2/index/PageDelegateIndex.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.store.PageStore; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.RegularTable; -import org.h2.table.TableFilter; - -/** - * An index that delegates indexing to the page data index. - */ -public class PageDelegateIndex extends PageIndex { - - private final PageDataIndex mainIndex; - - public PageDelegateIndex(RegularTable table, int id, String name, - IndexType indexType, PageDataIndex mainIndex, boolean create, - Session session) { - IndexColumn[] cols = IndexColumn.wrap( - new Column[] { table.getColumn(mainIndex.getMainIndexColumn())}); - this.initBaseIndex(table, id, name, cols, indexType); - this.mainIndex = mainIndex; - if (!database.isPersistent() || id < 0) { - throw DbException.throwInternalError("" + name); - } - PageStore store = database.getPageStore(); - store.addIndex(this); - if (create) { - store.addMeta(this, session); - } - } - - @Override - public void add(Session session, Row row) { - // nothing to do - } - - @Override - public boolean canFindNext() { - return false; - } - - @Override - public boolean canGetFirstOrLast() { - return true; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - long min = mainIndex.getKey(first, Long.MIN_VALUE, Long.MIN_VALUE); - // ifNull is MIN_VALUE as well, because the column is never NULL - // so avoid returning all rows (returning one row is OK) - long max = mainIndex.getKey(last, Long.MAX_VALUE, Long.MIN_VALUE); - return mainIndex.find(session, min, max, false); - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - Cursor cursor; - if (first) { - cursor = mainIndex.find(session, Long.MIN_VALUE, Long.MAX_VALUE, false); - } else { - long x = mainIndex.getLastKey(); - cursor = mainIndex.find(session, x, x, false); - } - cursor.next(); - return cursor; - } - - @Override - public Cursor findNext(Session session, SearchRow higherThan, SearchRow last) { - throw DbException.throwInternalError(); - } - - @Override - public int getColumnIndex(Column col) { - if (col.getColumnId() == mainIndex.getMainIndexColumn()) { - return 0; - } - return -1; - } - - @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - return 10 * getCostRangeIndex(masks, mainIndex.getRowCount(session), - filter, sortOrder); - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public void remove(Session session, Row row) { - // nothing to do - } - - @Override - public void remove(Session session) { - mainIndex.setMainIndexColumn(-1); - session.getDatabase().getPageStore().removeMeta(this, session); - } - - @Override - public void truncate(Session session) { - // nothing to do - } - - @Override - public void checkRename() { - // ok - } - - @Override - public long getRowCount(Session session) { - return mainIndex.getRowCount(session); - } - - @Override - public long getRowCountApproximation() { - return mainIndex.getRowCountApproximation(); - } - - @Override - public long getDiskSpaceUsed() { - return mainIndex.getDiskSpaceUsed(); - } - - @Override - public void writeRowCount() { - // ignore - } - -} diff --git a/h2/src/main/org/h2/index/PageIndex.java b/h2/src/main/org/h2/index/PageIndex.java deleted file mode 100644 index c964913623..0000000000 --- a/h2/src/main/org/h2/index/PageIndex.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - - -/** - * A page store index. - */ -public abstract class PageIndex extends BaseIndex { - - /** - * The root page of this index. - */ - protected int rootPageId; - - private boolean sortedInsertMode; - - /** - * Get the root page of this index. - * - * @return the root page id - */ - public int getRootPageId() { - return rootPageId; - } - - /** - * Write back the row count if it has changed. - */ - public abstract void writeRowCount(); - - @Override - public void setSortedInsertMode(boolean sortedInsertMode) { - this.sortedInsertMode = sortedInsertMode; - } - - boolean isSortedInsertMode() { - return sortedInsertMode; - } - -} diff --git a/h2/src/main/org/h2/index/RangeCursor.java b/h2/src/main/org/h2/index/RangeCursor.java index e32685afa5..e51e1d0406 100644 --- a/h2/src/main/org/h2/index/RangeCursor.java +++ b/h2/src/main/org/h2/index/RangeCursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -9,7 +9,7 @@ import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.value.Value; -import org.h2.value.ValueLong; +import org.h2.value.ValueBigint; /** * The cursor implementation for the range index. @@ -21,10 +21,6 @@ class RangeCursor implements Cursor { private Row currentRow; private final long start, end, step; - RangeCursor(long start, long end) { - this(start, end, 1); - } - RangeCursor(long start, long end, long step) { this.start = start; this.end = end; @@ -50,13 +46,13 @@ public boolean next() { } else { current += step; } - currentRow = new Row(new Value[]{ValueLong.get(current)}, 1); + currentRow = Row.get(new Value[]{ValueBigint.get(current)}, 1); return step > 0 ? current <= end : current >= end; } @Override public boolean previous() { - throw DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/RangeIndex.java b/h2/src/main/org/h2/index/RangeIndex.java index 153e27e418..30f3bab70b 100644 --- a/h2/src/main/org/h2/index/RangeIndex.java +++ b/h2/src/main/org/h2/index/RangeIndex.java @@ -1,11 +1,13 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; -import org.h2.engine.Session; +import org.h2.api.ErrorCode; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; @@ -13,58 +15,66 @@ import org.h2.table.IndexColumn; import org.h2.table.RangeTable; import org.h2.table.TableFilter; +import org.h2.value.Value; +import org.h2.value.ValueBigint; /** * An index for the SYSTEM_RANGE table. * This index can only scan through all rows, search is not supported. */ -public class RangeIndex extends BaseIndex { +public class RangeIndex extends VirtualTableIndex { private final RangeTable rangeTable; public RangeIndex(RangeTable table, IndexColumn[] columns) { - initBaseIndex(table, 0, "RANGE_INDEX", columns, - IndexType.createNonUnique(true)); + super(table, "RANGE_INDEX", columns); this.rangeTable = table; } @Override - public void close(Session session) { - // nothing to do - } - - @Override - public void add(Session session, Row row) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public void remove(Session session, Row row) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - long min = rangeTable.getMin(session), start = min; - long max = rangeTable.getMax(session), end = max; + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + long min = rangeTable.getMin(session); + long max = rangeTable.getMax(session); long step = rangeTable.getStep(session); - try { - start = Math.max(min, first == null ? min : first.getValue(0).getLong()); - } catch (Exception e) { - // error when converting the value - ignore + if (step == 0L) { + throw DbException.get(ErrorCode.STEP_SIZE_MUST_NOT_BE_ZERO); + } + if (first != null) { + try { + long v = first.getValue(0).getLong(); + if (step > 0) { + if (v > min) { + min += (v - min + step - 1) / step * step; + } + } else if (v > max) { + max = v; + } + } catch (DbException e) { + // error when converting the value - ignore + } } - try { - end = Math.min(max, last == null ? max : last.getValue(0).getLong()); - } catch (Exception e) { - // error when converting the value - ignore + if (last != null) { + try { + long v = last.getValue(0).getLong(); + if (step > 0) { + if (v < max) { + max = v; + } + } else if (v < min) { + min -= (min - v - step - 1) / step * step; + } + } catch (DbException e) { + // error when converting the value - ignore + } } - return new RangeCursor(start, end, step); + return new RangeCursor(min, max, step); } @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - return 1; + public double getCost(SessionLocal session, int[] masks, + TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { + return 1d; } @Override @@ -72,49 +82,26 @@ public String getCreateSQL() { return null; } - @Override - public void remove(Session session) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public void truncate(Session session) { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("SYSTEM_RANGE"); - } - @Override public boolean canGetFirstOrLast() { return true; } @Override - public Cursor findFirstOrLast(Session session, boolean first) { - long pos = first ? rangeTable.getMin(session) : rangeTable.getMax(session); - return new RangeCursor(pos, pos); - } - - @Override - public long getRowCount(Session session) { - return rangeTable.getRowCountApproximation(); + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + long min = rangeTable.getMin(session); + long max = rangeTable.getMax(session); + long step = rangeTable.getStep(session); + if (step == 0L) { + throw DbException.get(ErrorCode.STEP_SIZE_MUST_NOT_BE_ZERO); + } + return new SingleRowCursor((step > 0 ? min <= max : min >= max) + ? Row.get(new Value[]{ ValueBigint.get(first ^ min >= max ? min : max) }, 1) : null); } @Override - public long getRowCountApproximation() { - return rangeTable.getRowCountApproximation(); + public String getPlanSQL() { + return "range index"; } - @Override - public long getDiskSpaceUsed() { - return 0; - } } diff --git a/h2/src/main/org/h2/index/ScanCursor.java b/h2/src/main/org/h2/index/ScanCursor.java deleted file mode 100644 index 3240298c0e..0000000000 --- a/h2/src/main/org/h2/index/ScanCursor.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.Iterator; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; - -/** - * The cursor implementation for the scan index. - */ -public class ScanCursor implements Cursor { - private final ScanIndex scan; - private Row row; - private final Session session; - private final boolean multiVersion; - private Iterator delta; - - ScanCursor(Session session, ScanIndex scan, boolean multiVersion) { - this.session = session; - this.scan = scan; - this.multiVersion = multiVersion; - if (multiVersion) { - delta = scan.getDelta(); - } - row = null; - } - - @Override - public Row get() { - return row; - } - - @Override - public SearchRow getSearchRow() { - return row; - } - - @Override - public boolean next() { - if (multiVersion) { - while (true) { - if (delta != null) { - if (!delta.hasNext()) { - delta = null; - row = null; - continue; - } - row = delta.next(); - if (!row.isDeleted() || row.getSessionId() == session.getId()) { - continue; - } - } else { - row = scan.getNextRow(row); - if (row != null && row.getSessionId() != 0 && - row.getSessionId() != session.getId()) { - continue; - } - } - break; - } - return row != null; - } - row = scan.getNextRow(row); - return row != null; - } - - @Override - public boolean previous() { - throw DbException.throwInternalError(); - } - -} diff --git a/h2/src/main/org/h2/index/ScanIndex.java b/h2/src/main/org/h2/index/ScanIndex.java deleted file mode 100644 index 4e058c81c0..0000000000 --- a/h2/src/main/org/h2/index/ScanIndex.java +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; - -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.engine.UndoLogRecord; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.RegularTable; -import org.h2.table.TableFilter; -import org.h2.util.New; - -/** - * The scan index is not really an 'index' in the strict sense, because it can - * not be used for direct lookup. It can only be used to iterate over all rows - * of a table. Each regular table has one such object, even if no primary key or - * indexes are defined. - */ -public class ScanIndex extends BaseIndex { - private long firstFree = -1; - private ArrayList rows = New.arrayList(); - private final RegularTable tableData; - private int rowCountDiff; - private final HashMap sessionRowCount; - private HashSet delta; - private long rowCount; - - public ScanIndex(RegularTable table, int id, IndexColumn[] columns, - IndexType indexType) { - initBaseIndex(table, id, table.getName() + "_DATA", columns, indexType); - if (database.isMultiVersion()) { - sessionRowCount = New.hashMap(); - } else { - sessionRowCount = null; - } - tableData = table; - } - - @Override - public void remove(Session session) { - truncate(session); - } - - @Override - public void truncate(Session session) { - rows = New.arrayList(); - firstFree = -1; - if (tableData.getContainsLargeObject() && tableData.isPersistData()) { - database.getLobStorage().removeAllForTable(table.getId()); - } - tableData.setRowCount(0); - rowCount = 0; - rowCountDiff = 0; - if (database.isMultiVersion()) { - sessionRowCount.clear(); - } - } - - @Override - public String getCreateSQL() { - return null; - } - - @Override - public void close(Session session) { - // nothing to do - } - - @Override - public Row getRow(Session session, long key) { - return rows.get((int) key); - } - - @Override - public void add(Session session, Row row) { - // in-memory - if (firstFree == -1) { - int key = rows.size(); - row.setKey(key); - rows.add(row); - } else { - long key = firstFree; - Row free = rows.get((int) key); - firstFree = free.getKey(); - row.setKey(key); - rows.set((int) key, row); - } - row.setDeleted(false); - if (database.isMultiVersion()) { - if (delta == null) { - delta = New.hashSet(); - } - boolean wasDeleted = delta.remove(row); - if (!wasDeleted) { - delta.add(row); - } - incrementRowCount(session.getId(), 1); - } - rowCount++; - } - - @Override - public void commit(int operation, Row row) { - if (database.isMultiVersion()) { - if (delta != null) { - delta.remove(row); - } - incrementRowCount(row.getSessionId(), - operation == UndoLogRecord.DELETE ? 1 : -1); - } - } - - private void incrementRowCount(int sessionId, int count) { - if (database.isMultiVersion()) { - Integer id = sessionId; - Integer c = sessionRowCount.get(id); - int current = c == null ? 0 : c.intValue(); - sessionRowCount.put(id, current + count); - rowCountDiff += count; - } - } - - @Override - public void remove(Session session, Row row) { - // in-memory - if (!database.isMultiVersion() && rowCount == 1) { - rows = New.arrayList(); - firstFree = -1; - } else { - Row free = new Row(null, 1); - free.setKey(firstFree); - long key = row.getKey(); - if (rows.size() <= key) { - throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, - rows.size() + ": " + key); - } - rows.set((int) key, free); - firstFree = key; - } - if (database.isMultiVersion()) { - // if storage is null, the delete flag is not yet set - row.setDeleted(true); - if (delta == null) { - delta = New.hashSet(); - } - boolean wasAdded = delta.remove(row); - if (!wasAdded) { - delta.add(row); - } - incrementRowCount(session.getId(), -1); - } - rowCount--; - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return new ScanCursor(session, this, database.isMultiVersion()); - } - - @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - return tableData.getRowCountApproximation() + Constants.COST_ROW_OFFSET; - } - - @Override - public long getRowCount(Session session) { - if (database.isMultiVersion()) { - Integer i = sessionRowCount.get(session.getId()); - long count = i == null ? 0 : i.intValue(); - count += rowCount; - count -= rowCountDiff; - return count; - } - return rowCount; - } - - /** - * Get the next row that is stored after this row. - * - * @param row the current row or null to start the scan - * @return the next row or null if there are no more rows - */ - Row getNextRow(Row row) { - long key; - if (row == null) { - key = -1; - } else { - key = row.getKey(); - } - while (true) { - key++; - if (key >= rows.size()) { - return null; - } - row = rows.get((int) key); - if (!row.isEmpty()) { - return row; - } - } - } - - @Override - public int getColumnIndex(Column col) { - // the scan index cannot use any columns - return -1; - } - - @Override - public void checkRename() { - throw DbException.getUnsupportedException("SCAN"); - } - - @Override - public boolean needRebuild() { - return false; - } - - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("SCAN"); - } - - Iterator getDelta() { - if (delta == null) { - List e = Collections.emptyList(); - return e.iterator(); - } - return delta.iterator(); - } - - @Override - public long getRowCountApproximation() { - return rowCount; - } - - @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public String getPlanSQL() { - return table.getSQL() + ".tableScan"; - } - -} diff --git a/h2/src/main/org/h2/index/SingleRowCursor.java b/h2/src/main/org/h2/index/SingleRowCursor.java index ab51b7e3ce..1ef602b207 100644 --- a/h2/src/main/org/h2/index/SingleRowCursor.java +++ b/h2/src/main/org/h2/index/SingleRowCursor.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; @@ -47,7 +47,7 @@ public boolean next() { @Override public boolean previous() { - throw DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/SpatialIndex.java b/h2/src/main/org/h2/index/SpatialIndex.java index f66a51ba48..1494d36cbe 100644 --- a/h2/src/main/org/h2/index/SpatialIndex.java +++ b/h2/src/main/org/h2/index/SpatialIndex.java @@ -1,29 +1,30 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; +import org.h2.engine.SessionLocal; import org.h2.result.SearchRow; -import org.h2.table.TableFilter; /** * A spatial index. Spatial indexes are used to speed up searching * spatial/geometric data. */ -public interface SpatialIndex extends Index { +public interface SpatialIndex { /** * Find a row or a list of rows and create a cursor to iterate over the * result. * - * @param filter the table filter (which possibly knows about additional - * conditions) + * @param session the session + * @param first the lower bound + * @param last the upper bound * @param intersection the geometry which values should intersect with, or * null for anything * @return the cursor to iterate over the results */ - Cursor findByGeometry(TableFilter filter, SearchRow intersection); + Cursor findByGeometry(SessionLocal session, SearchRow first, SearchRow last, SearchRow intersection); } diff --git a/h2/src/main/org/h2/index/SpatialTreeIndex.java b/h2/src/main/org/h2/index/SpatialTreeIndex.java deleted file mode 100644 index 00351f6a40..0000000000 --- a/h2/src/main/org/h2/index/SpatialTreeIndex.java +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import java.util.Iterator; - -import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.mvstore.MVStore; -import org.h2.mvstore.db.MVTableEngine; -import org.h2.mvstore.rtree.MVRTreeMap; -import org.h2.mvstore.rtree.SpatialKey; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.Table; -import org.h2.table.TableFilter; -import org.h2.value.Value; -import org.h2.value.ValueGeometry; -import org.h2.value.ValueNull; - -import com.vividsolutions.jts.geom.Envelope; -import com.vividsolutions.jts.geom.Geometry; - -/** - * This is an index based on a MVR-TreeMap. - * - * @author Thomas Mueller - * @author Noel Grandin - * @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 - */ -public class SpatialTreeIndex extends BaseIndex implements SpatialIndex { - - private static final String MAP_PREFIX = "RTREE_"; - - private final MVRTreeMap treeMap; - private final MVStore store; - - private boolean closed; - private boolean needRebuild; - - /** - * Constructor. - * - * @param table the table instance - * @param id the index id - * @param indexName the index name - * @param columns the indexed columns (only one geometry column allowed) - * @param persistent whether the index should be persisted - * @param indexType the index type (only spatial index) - * @param create whether to create a new index - * @param session the session. - */ - public SpatialTreeIndex(Table table, int id, String indexName, - IndexColumn[] columns, IndexType indexType, boolean persistent, - boolean create, Session session) { - if (indexType.isUnique()) { - throw DbException.getUnsupportedException("not unique"); - } - if (!persistent && !create) { - throw DbException.getUnsupportedException( - "Non persistent index called with create==false"); - } - if (columns.length > 1) { - throw DbException.getUnsupportedException( - "can only do one column"); - } - if ((columns[0].sortType & SortOrder.DESCENDING) != 0) { - throw DbException.getUnsupportedException( - "cannot do descending"); - } - if ((columns[0].sortType & SortOrder.NULLS_FIRST) != 0) { - throw DbException.getUnsupportedException( - "cannot do nulls first"); - } - if ((columns[0].sortType & SortOrder.NULLS_LAST) != 0) { - throw DbException.getUnsupportedException( - "cannot do nulls last"); - } - initBaseIndex(table, id, indexName, columns, indexType); - this.needRebuild = create; - this.table = table; - if (!database.isStarting()) { - if (columns[0].column.getType() != Value.GEOMETRY) { - throw DbException.getUnsupportedException( - "spatial index on non-geometry column, " + - columns[0].column.getCreateSQL()); - } - } - if (!persistent) { - // Index in memory - store = MVStore.open(null); - treeMap = store.openMap("spatialIndex", - new MVRTreeMap.Builder()); - } else { - if (id < 0) { - throw DbException.getUnsupportedException( - "Persistent index with id<0"); - } - MVTableEngine.init(session.getDatabase()); - store = session.getDatabase().getMvStore().getStore(); - // Called after CREATE SPATIAL INDEX or - // by PageStore.addMeta - treeMap = store.openMap(MAP_PREFIX + getId(), - new MVRTreeMap.Builder()); - if (treeMap.isEmpty()) { - needRebuild = true; - } - } - } - - @Override - public void close(Session session) { - store.close(); - closed = true; - } - - @Override - public void add(Session session, Row row) { - if (closed) { - throw DbException.throwInternalError(); - } - treeMap.add(getKey(row), row.getKey()); - } - - private SpatialKey getKey(SearchRow row) { - if (row == null) { - return null; - } - Value v = row.getValue(columnIds[0]); - if (v == ValueNull.INSTANCE) { - return null; - } - Geometry g = ((ValueGeometry) v.convertTo(Value.GEOMETRY)).getGeometryNoCopy(); - Envelope env = g.getEnvelopeInternal(); - return new SpatialKey(row.getKey(), - (float) env.getMinX(), (float) env.getMaxX(), - (float) env.getMinY(), (float) env.getMaxY()); - } - - @Override - public void remove(Session session, Row row) { - if (closed) { - throw DbException.throwInternalError(); - } - if (!treeMap.remove(getKey(row), row.getKey())) { - throw DbException.throwInternalError("row not found"); - } - } - - @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - return find(filter.getSession()); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return find(session); - } - - private Cursor find(Session session) { - return new SpatialCursor(treeMap.keySet().iterator(), table, session); - } - - @Override - public Cursor findByGeometry(TableFilter filter, SearchRow intersection) { - if (intersection == null) { - return find(filter.getSession()); - } - return new SpatialCursor( - treeMap.findIntersectingKeys(getKey(intersection)), table, - filter.getSession()); - } - - @Override - protected long getCostRangeIndex(int[] masks, long rowCount, - TableFilter filter, SortOrder sortOrder) { - return getCostRangeIndex(masks, rowCount, columns); - } - - /** - * Compute spatial index cost - * @param masks Search mask - * @param rowCount Table row count - * @param columns Table columns - * @return Index cost hint - */ - public static long getCostRangeIndex(int[] masks, long rowCount, Column[] columns) { - rowCount += Constants.COST_ROW_OFFSET; - long cost = rowCount; - if (masks == null) { - return cost; - } - for (Column column : columns) { - int index = column.getColumnId(); - int mask = masks[index]; - if ((mask & IndexCondition.SPATIAL_INTERSECTS) != 0) { - cost = 3 + rowCount / 4; - } - } - return 10 * cost; - } - - @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - return getCostRangeIndex(masks, table.getRowCountApproximation(), - filter, sortOrder); - } - - @Override - public void remove(Session session) { - if (!treeMap.isClosed()) { - store.removeMap(treeMap); - } - } - - @Override - public void truncate(Session session) { - treeMap.clear(); - } - - @Override - public void checkRename() { - // nothing to do - } - - @Override - public boolean needRebuild() { - return needRebuild; - } - - @Override - public boolean canGetFirstOrLast() { - return true; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - if (closed) { - throw DbException.throwInternalError(); - } - if (!first) { - throw DbException.throwInternalError( - "Spatial Index can only be fetch by ascending order"); - } - return find(session); - } - - @Override - public long getRowCount(Session session) { - return treeMap.sizeAsLong(); - } - - @Override - public long getRowCountApproximation() { - return treeMap.sizeAsLong(); - } - - @Override - public long getDiskSpaceUsed() { - // TODO estimate disk space usage - return 0; - } - - /** - * A cursor to iterate over spatial keys. - */ - private static final class SpatialCursor implements Cursor { - - private final Iterator it; - private SpatialKey current; - private final Table table; - private Session session; - - public SpatialCursor(Iterator it, Table table, Session session) { - this.it = it; - this.table = table; - this.session = session; - } - - @Override - public Row get() { - return table.getRow(session, current.getId()); - } - - @Override - public SearchRow getSearchRow() { - return get(); - } - - @Override - public boolean next() { - if (!it.hasNext()) { - return false; - } - current = it.next(); - return true; - } - - @Override - public boolean previous() { - return false; - } - - } - -} - diff --git a/h2/src/main/org/h2/index/TreeCursor.java b/h2/src/main/org/h2/index/TreeCursor.java deleted file mode 100644 index a77288382e..0000000000 --- a/h2/src/main/org/h2/index/TreeCursor.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.result.Row; -import org.h2.result.SearchRow; - -/** - * The cursor implementation for a tree index. - */ -public class TreeCursor implements Cursor { - private final TreeIndex tree; - private TreeNode node; - private boolean beforeFirst; - private final SearchRow first, last; - - TreeCursor(TreeIndex tree, TreeNode node, SearchRow first, SearchRow last) { - this.tree = tree; - this.node = node; - this.first = first; - this.last = last; - beforeFirst = true; - } - - @Override - public Row get() { - return node == null ? null : node.row; - } - - @Override - public SearchRow getSearchRow() { - return get(); - } - - @Override - public boolean next() { - if (beforeFirst) { - beforeFirst = false; - if (node == null) { - return false; - } - if (first != null && tree.compareRows(node.row, first) < 0) { - node = next(node); - } - } else { - node = next(node); - } - if (node != null && last != null) { - if (tree.compareRows(node.row, last) > 0) { - node = null; - } - } - return node != null; - } - - @Override - public boolean previous() { - node = previous(node); - return node != null; - } - - /** - * Get the next node if there is one. - * - * @param x the node - * @return the next node or null - */ - private static TreeNode next(TreeNode x) { - if (x == null) { - return null; - } - TreeNode r = x.right; - if (r != null) { - x = r; - TreeNode l = x.left; - while (l != null) { - x = l; - l = x.left; - } - return x; - } - TreeNode ch = x; - x = x.parent; - while (x != null && ch == x.right) { - ch = x; - x = x.parent; - } - return x; - } - - - /** - * Get the previous node if there is one. - * - * @param x the node - * @return the previous node or null - */ - private static TreeNode previous(TreeNode x) { - if (x == null) { - return null; - } - TreeNode l = x.left; - if (l != null) { - x = l; - TreeNode r = x.right; - while (r != null) { - x = r; - r = x.right; - } - return x; - } - TreeNode ch = x; - x = x.parent; - while (x != null && ch == x.left) { - ch = x; - x = x.parent; - } - return x; - } - -} diff --git a/h2/src/main/org/h2/index/TreeIndex.java b/h2/src/main/org/h2/index/TreeIndex.java deleted file mode 100644 index 689615d2b5..0000000000 --- a/h2/src/main/org/h2/index/TreeIndex.java +++ /dev/null @@ -1,411 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.engine.Session; -import org.h2.engine.SysProperties; -import org.h2.message.DbException; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.result.SortOrder; -import org.h2.table.IndexColumn; -import org.h2.table.RegularTable; -import org.h2.table.TableFilter; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * The tree index is an in-memory index based on a binary AVL trees. - */ -public class TreeIndex extends BaseIndex { - - private TreeNode root; - private final RegularTable tableData; - private long rowCount; - private boolean closed; - - public TreeIndex(RegularTable table, int id, String indexName, - IndexColumn[] columns, IndexType indexType) { - initBaseIndex(table, id, indexName, columns, indexType); - tableData = table; - if (!database.isStarting()) { - checkIndexColumnTypes(columns); - } - } - - @Override - public void close(Session session) { - root = null; - closed = true; - } - - @Override - public void add(Session session, Row row) { - if (closed) { - throw DbException.throwInternalError(); - } - TreeNode i = new TreeNode(row); - TreeNode n = root, x = n; - boolean isLeft = true; - while (true) { - if (n == null) { - if (x == null) { - root = i; - rowCount++; - return; - } - set(x, isLeft, i); - break; - } - Row r = n.row; - int compare = compareRows(row, r); - if (compare == 0) { - if (indexType.isUnique()) { - if (!containsNullAndAllowMultipleNull(row)) { - throw getDuplicateKeyException(row.toString()); - } - } - compare = compareKeys(row, r); - } - isLeft = compare < 0; - x = n; - n = child(x, isLeft); - } - balance(x, isLeft); - rowCount++; - } - - private void balance(TreeNode x, boolean isLeft) { - while (true) { - int sign = isLeft ? 1 : -1; - switch (x.balance * sign) { - case 1: - x.balance = 0; - return; - case 0: - x.balance = -sign; - break; - case -1: - TreeNode l = child(x, isLeft); - if (l.balance == -sign) { - replace(x, l); - set(x, isLeft, child(l, !isLeft)); - set(l, !isLeft, x); - x.balance = 0; - l.balance = 0; - } else { - TreeNode r = child(l, !isLeft); - replace(x, r); - set(l, !isLeft, child(r, isLeft)); - set(r, isLeft, l); - set(x, isLeft, child(r, !isLeft)); - set(r, !isLeft, x); - int rb = r.balance; - x.balance = (rb == -sign) ? sign : 0; - l.balance = (rb == sign) ? -sign : 0; - r.balance = 0; - } - return; - default: - DbException.throwInternalError("b:" + x.balance * sign); - } - if (x == root) { - return; - } - isLeft = x.isFromLeft(); - x = x.parent; - } - } - - private static TreeNode child(TreeNode x, boolean isLeft) { - return isLeft ? x.left : x.right; - } - - private void replace(TreeNode x, TreeNode n) { - if (x == root) { - root = n; - if (n != null) { - n.parent = null; - } - } else { - set(x.parent, x.isFromLeft(), n); - } - } - - private static void set(TreeNode parent, boolean left, TreeNode n) { - if (left) { - parent.left = n; - } else { - parent.right = n; - } - if (n != null) { - n.parent = parent; - } - } - - @Override - public void remove(Session session, Row row) { - if (closed) { - throw DbException.throwInternalError(); - } - TreeNode x = findFirstNode(row, true); - if (x == null) { - throw DbException.throwInternalError("not found!"); - } - TreeNode n; - if (x.left == null) { - n = x.right; - } else if (x.right == null) { - n = x.left; - } else { - TreeNode d = x; - x = x.left; - for (TreeNode temp = x; (temp = temp.right) != null;) { - x = temp; - } - // x will be replaced with n later - n = x.left; - // swap d and x - int b = x.balance; - x.balance = d.balance; - d.balance = b; - - // set x.parent - TreeNode xp = x.parent; - TreeNode dp = d.parent; - if (d == root) { - root = x; - } - x.parent = dp; - if (dp != null) { - if (dp.right == d) { - dp.right = x; - } else { - dp.left = x; - } - } - // TODO index / tree: link d.r = x(p?).r directly - if (xp == d) { - d.parent = x; - if (d.left == x) { - x.left = d; - x.right = d.right; - } else { - x.right = d; - x.left = d.left; - } - } else { - d.parent = xp; - xp.right = d; - x.right = d.right; - x.left = d.left; - } - - if (SysProperties.CHECK && x.right == null) { - DbException.throwInternalError("tree corrupted"); - } - x.right.parent = x; - x.left.parent = x; - // set d.left, d.right - d.left = n; - if (n != null) { - n.parent = d; - } - d.right = null; - x = d; - } - rowCount--; - - boolean isLeft = x.isFromLeft(); - replace(x, n); - n = x.parent; - while (n != null) { - x = n; - int sign = isLeft ? 1 : -1; - switch (x.balance * sign) { - case -1: - x.balance = 0; - break; - case 0: - x.balance = sign; - return; - case 1: - TreeNode r = child(x, !isLeft); - int b = r.balance; - if (b * sign >= 0) { - replace(x, r); - set(x, !isLeft, child(r, isLeft)); - set(r, isLeft, x); - if (b == 0) { - x.balance = sign; - r.balance = -sign; - return; - } - x.balance = 0; - r.balance = 0; - x = r; - } else { - TreeNode l = child(r, isLeft); - replace(x, l); - b = l.balance; - set(r, isLeft, child(l, !isLeft)); - set(l, !isLeft, r); - set(x, !isLeft, child(l, isLeft)); - set(l, isLeft, x); - x.balance = (b == sign) ? -sign : 0; - r.balance = (b == -sign) ? sign : 0; - l.balance = 0; - x = l; - } - break; - default: - DbException.throwInternalError("b: " + x.balance * sign); - } - isLeft = x.isFromLeft(); - n = x.parent; - } - } - - private TreeNode findFirstNode(SearchRow row, boolean withKey) { - TreeNode x = root, result = x; - while (x != null) { - result = x; - int compare = compareRows(x.row, row); - if (compare == 0 && withKey) { - compare = compareKeys(x.row, row); - } - if (compare == 0) { - if (withKey) { - return x; - } - x = x.left; - } else if (compare > 0) { - x = x.left; - } else { - x = x.right; - } - } - return result; - } - - @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - return find(first, last); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return find(first, last); - } - - private Cursor find(SearchRow first, SearchRow last) { - if (first == null) { - TreeNode x = root, n; - while (x != null) { - n = x.left; - if (n == null) { - break; - } - x = n; - } - return new TreeCursor(this, x, null, last); - } - TreeNode x = findFirstNode(first, false); - return new TreeCursor(this, x, first, last); - } - - @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - return getCostRangeIndex(masks, tableData.getRowCountApproximation(), - filter, sortOrder); - } - - @Override - public void remove(Session session) { - truncate(session); - } - - @Override - public void truncate(Session session) { - root = null; - rowCount = 0; - } - - @Override - public void checkRename() { - // nothing to do - } - - @Override - public boolean needRebuild() { - return true; - } - - @Override - public boolean canGetFirstOrLast() { - return true; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - if (closed) { - throw DbException.throwInternalError(); - } - if (first) { - // TODO optimization: this loops through NULL - Cursor cursor = find(session, null, null); - while (cursor.next()) { - SearchRow row = cursor.getSearchRow(); - Value v = row.getValue(columnIds[0]); - if (v != ValueNull.INSTANCE) { - return cursor; - } - } - return cursor; - } - TreeNode x = root, n; - while (x != null) { - n = x.right; - if (n == null) { - break; - } - x = n; - } - TreeCursor cursor = new TreeCursor(this, x, null, null); - if (x == null) { - return cursor; - } - // TODO optimization: this loops through NULL elements - do { - SearchRow row = cursor.getSearchRow(); - if (row == null) { - break; - } - Value v = row.getValue(columnIds[0]); - if (v != ValueNull.INSTANCE) { - return cursor; - } - } while (cursor.previous()); - return cursor; - } - - @Override - public long getRowCount(Session session) { - return rowCount; - } - - @Override - public long getRowCountApproximation() { - return rowCount; - } - - @Override - public long getDiskSpaceUsed() { - return 0; - } - -} diff --git a/h2/src/main/org/h2/index/TreeNode.java b/h2/src/main/org/h2/index/TreeNode.java deleted file mode 100644 index 681bbb7edf..0000000000 --- a/h2/src/main/org/h2/index/TreeNode.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.index; - -import org.h2.result.Row; - -/** - * Represents a index node of a tree index. - */ -class TreeNode { - - /** - * The balance. For more information, see the AVL tree documentation. - */ - int balance; - - /** - * The left child node or null. - */ - TreeNode left; - - /** - * The right child node or null. - */ - TreeNode right; - - /** - * The parent node or null if this is the root node. - */ - TreeNode parent; - - /** - * The row. - */ - final Row row; - - TreeNode(Row row) { - this.row = row; - } - - /** - * Check if this node is the left child of its parent. This method returns - * true if this is the root node. - * - * @return true if this node is the root or a left child - */ - boolean isFromLeft() { - return parent == null || parent.left == this; - } - -} diff --git a/h2/src/main/org/h2/index/ViewCursor.java b/h2/src/main/org/h2/index/ViewCursor.java index 85d8d9dfd1..53ac2a72ab 100644 --- a/h2/src/main/org/h2/index/ViewCursor.java +++ b/h2/src/main/org/h2/index/ViewCursor.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; import org.h2.message.DbException; -import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.table.Table; @@ -20,11 +20,11 @@ public class ViewCursor implements Cursor { private final Table table; private final ViewIndex index; - private final LocalResult result; + private final ResultInterface result; private final SearchRow first, last; private Row current; - ViewCursor(ViewIndex index, LocalResult result, SearchRow first, + public ViewCursor(ViewIndex index, ResultInterface result, SearchRow first, SearchRow last) { this.table = index.getTable(); this.index = index; @@ -81,7 +81,7 @@ public boolean next() { @Override public boolean previous() { - throw DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } } diff --git a/h2/src/main/org/h2/index/ViewIndex.java b/h2/src/main/org/h2/index/ViewIndex.java index e79bf2451d..173fe9a9b8 100644 --- a/h2/src/main/org/h2/index/ViewIndex.java +++ b/h2/src/main/org/h2/index/ViewIndex.java @@ -1,21 +1,25 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.index; import java.util.ArrayList; +import java.util.concurrent.TimeUnit; import org.h2.api.ErrorCode; -import org.h2.command.dml.Query; -import org.h2.command.dml.SelectUnion; +import org.h2.command.Parser; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.command.query.Query; +import org.h2.command.query.SelectUnion; import org.h2.engine.Constants; -import org.h2.engine.Session; -import org.h2.expression.Comparison; +import org.h2.engine.SessionLocal; import org.h2.expression.Parameter; +import org.h2.expression.condition.Comparison; import org.h2.message.DbException; import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.result.SortOrder; @@ -24,31 +28,41 @@ import org.h2.table.TableFilter; import org.h2.table.TableView; import org.h2.util.IntArray; -import org.h2.util.New; -import org.h2.util.SmallLRUCache; -import org.h2.util.SynchronizedVerifier; -import org.h2.util.Utils; import org.h2.value.Value; /** * This object represents a virtual index for a query. * Actually it only represents a prepared SELECT statement. */ -public class ViewIndex extends BaseIndex implements SpatialIndex { +public class ViewIndex extends Index implements SpatialIndex { + + private static final long MAX_AGE_NANOS = + TimeUnit.MILLISECONDS.toNanos(Constants.VIEW_COST_CACHE_MAX_AGE); private final TableView view; private final String querySQL; private final ArrayList originalParameters; - private final SmallLRUCache costCache = - SmallLRUCache.newInstance(Constants.VIEW_INDEX_CACHE_SIZE); private boolean recursive; private final int[] indexMasks; private Query query; - private final Session createSession; + private final SessionLocal createSession; + /** + * The time in nanoseconds when this index (and its cost) was calculated. + */ + private final long evaluatedAt; + + /** + * Constructor for the original index in {@link TableView}. + * + * @param view the table view + * @param querySQL the query SQL + * @param originalParameters the original parameters + * @param recursive if the view is recursive + */ public ViewIndex(TableView view, String querySQL, ArrayList originalParameters, boolean recursive) { - initBaseIndex(view, 0, null, null, IndexType.createNonUnique(false)); + super(view, 0, null, null, 0, IndexType.createNonUnique(false)); this.view = view; this.querySQL = querySQL; this.originalParameters = originalParameters; @@ -56,11 +70,26 @@ public ViewIndex(TableView view, String querySQL, columns = new Column[0]; this.createSession = null; this.indexMasks = null; + // this is a main index of TableView, it does not need eviction time + // stamp + evaluatedAt = Long.MIN_VALUE; } - public ViewIndex(TableView view, ViewIndex index, Session session, - int[] masks) { - initBaseIndex(view, 0, null, null, IndexType.createNonUnique(false)); + /** + * Constructor for plan item generation. Over this index the query will be + * executed. + * + * @param view the table view + * @param index the view index + * @param session the session + * @param masks the masks + * @param filters table filters + * @param filter current filter + * @param sortOrder sort order + */ + public ViewIndex(TableView view, ViewIndex index, SessionLocal session, + int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder) { + super(view, 0, null, null, 0, IndexType.createNonUnique(false)); this.view = view; this.querySQL = index.querySQL; this.originalParameters = index.originalParameters; @@ -71,175 +100,137 @@ public ViewIndex(TableView view, ViewIndex index, Session session, if (!recursive) { query = getQuery(session, masks); } + if (recursive || view.getTopQuery() != null) { + evaluatedAt = Long.MAX_VALUE; + } else { + long time = System.nanoTime(); + if (time == Long.MAX_VALUE) { + time++; + } + evaluatedAt = time; + } } - public Session getSession() { + public SessionLocal getSession() { return createSession; } + public boolean isExpired() { + assert evaluatedAt != Long.MIN_VALUE : "must not be called for main index of TableView"; + return !recursive && view.getTopQuery() == null && + System.nanoTime() - evaluatedAt > MAX_AGE_NANOS; + } + @Override public String getPlanSQL() { - return query == null ? null : query.getPlanSQL(); + return query == null ? null : query.getPlanSQL(TRACE_SQL_FLAGS | ADD_PLAN_INFORMATION); } @Override - public void close(Session session) { + public void close(SessionLocal session) { // nothing to do } @Override - public void add(Session session, Row row) { + public void add(SessionLocal session, Row row) { throw DbException.getUnsupportedException("VIEW"); } @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { throw DbException.getUnsupportedException("VIEW"); } - /** - * A calculated cost value. - */ - static class CostElement { - - /** - * The time in milliseconds when this cost was calculated. - */ - long evaluatedAt; - - /** - * The cost. - */ - double cost; - } - @Override - public synchronized double getCost(Session session, int[] masks, - TableFilter filter, SortOrder sortOrder) { - if (recursive) { - return 1000; - } - IntArray masksArray = new IntArray(masks == null ? - Utils.EMPTY_INT_ARRAY : masks); - SynchronizedVerifier.check(costCache); - CostElement cachedCost = costCache.get(masksArray); - if (cachedCost != null) { - long time = System.currentTimeMillis(); - if (time < cachedCost.evaluatedAt + Constants.VIEW_COST_CACHE_MAX_AGE) { - return cachedCost.cost; - } - } - Query q = (Query) session.prepare(querySQL, true); - if (masks != null) { - IntArray paramIndex = new IntArray(); - for (int i = 0; i < masks.length; i++) { - int mask = masks[i]; - if (mask == 0) { - continue; - } - paramIndex.add(i); - } - int len = paramIndex.size(); - for (int i = 0; i < len; i++) { - int idx = paramIndex.get(i); - int mask = masks[idx]; - int nextParamIndex = q.getParameters().size() + view.getParameterOffset(); - if ((mask & IndexCondition.EQUALITY) != 0) { - Parameter param = new Parameter(nextParamIndex); - q.addGlobalCondition(param, idx, Comparison.EQUAL_NULL_SAFE); - } else if ((mask & IndexCondition.SPATIAL_INTERSECTS) != 0) { - Parameter param = new Parameter(nextParamIndex); - q.addGlobalCondition(param, idx, Comparison.SPATIAL_INTERSECTS); - } else { - if ((mask & IndexCondition.START) != 0) { - Parameter param = new Parameter(nextParamIndex); - q.addGlobalCondition(param, idx, Comparison.BIGGER_EQUAL); - } - if ((mask & IndexCondition.END) != 0) { - Parameter param = new Parameter(nextParamIndex); - q.addGlobalCondition(param, idx, Comparison.SMALLER_EQUAL); - } - } - } - String sql = q.getPlanSQL(); - q = (Query) session.prepare(sql, true); - } - double cost = q.getCost(); - cachedCost = new CostElement(); - cachedCost.evaluatedAt = System.currentTimeMillis(); - cachedCost.cost = cost; - costCache.put(masksArray, cachedCost); - return cost; + public double getCost(SessionLocal session, int[] masks, + TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { + return recursive ? 1000 : query.getCost(); } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { return find(session, first, last, null); } @Override - public Cursor findByGeometry(TableFilter filter, SearchRow intersection) { - return find(filter.getSession(), null, null, intersection); + public Cursor findByGeometry(SessionLocal session, SearchRow first, SearchRow last, SearchRow intersection) { + return find(session, first, last, intersection); } - private Cursor find(Session session, SearchRow first, SearchRow last, - SearchRow intersection) { - if (recursive) { - LocalResult recResult = view.getRecursiveResult(); - if (recResult != null) { - recResult.reset(); - return new ViewCursor(this, recResult, first, last); - } - if (query == null) { - query = (Query) createSession.prepare(querySQL, true); - } - if (!(query instanceof SelectUnion)) { - throw DbException.get(ErrorCode.SYNTAX_ERROR_2, - "recursive queries without UNION ALL"); - } - SelectUnion union = (SelectUnion) query; - if (union.getUnionType() != SelectUnion.UNION_ALL) { - throw DbException.get(ErrorCode.SYNTAX_ERROR_2, - "recursive queries without UNION ALL"); - } - Query left = union.getLeft(); - // to ensure the last result is not closed - left.disableCache(); - LocalResult r = left.query(0); - LocalResult result = union.getEmptyResult(); - // ensure it is not written to disk, - // because it is not closed normally - result.setMaxMemoryRows(Integer.MAX_VALUE); - while (r.next()) { - result.addRow(r.currentRow()); + private Cursor findRecursive(SearchRow first, SearchRow last) { + assert recursive; + ResultInterface recursiveResult = view.getRecursiveResult(); + if (recursiveResult != null) { + recursiveResult.reset(); + return new ViewCursor(this, recursiveResult, first, last); + } + if (query == null) { + Parser parser = new Parser(createSession); + parser.setRightsChecked(true); + parser.setSuppliedParameters(originalParameters); + query = (Query) parser.prepare(querySQL); + query.setNeverLazy(true); + } + if (!query.isUnion()) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_2, + "recursive queries without UNION"); + } + SelectUnion union = (SelectUnion) query; + Query left = union.getLeft(); + left.setNeverLazy(true); + // to ensure the last result is not closed + left.disableCache(); + ResultInterface resultInterface = left.query(0); + LocalResult localResult = union.getEmptyResult(); + // ensure it is not written to disk, + // because it is not closed normally + localResult.setMaxMemoryRows(Integer.MAX_VALUE); + while (resultInterface.next()) { + Value[] cr = resultInterface.currentRow(); + localResult.addRow(cr); + } + Query right = union.getRight(); + right.setNeverLazy(true); + resultInterface.reset(); + view.setRecursiveResult(resultInterface); + // to ensure the last result is not closed + right.disableCache(); + while (true) { + resultInterface = right.query(0); + if (!resultInterface.hasNext()) { + break; } - Query right = union.getRight(); - r.reset(); - view.setRecursiveResult(r); - // to ensure the last result is not closed - right.disableCache(); - while (true) { - r = right.query(0); - if (r.getRowCount() == 0) { - break; - } - while (r.next()) { - result.addRow(r.currentRow()); - } - r.reset(); - view.setRecursiveResult(r); + while (resultInterface.next()) { + Value[] cr = resultInterface.currentRow(); + localResult.addRow(cr); } - view.setRecursiveResult(null); - result.done(); - return new ViewCursor(this, result, first, last); + resultInterface.reset(); + view.setRecursiveResult(resultInterface); } + view.setRecursiveResult(null); + localResult.done(); + return new ViewCursor(this, localResult, first, last); + } + + /** + * Set the query parameters. + * + * @param session the session + * @param first the lower bound + * @param last the upper bound + * @param intersection the intersection + */ + public void setupQueryParameters(SessionLocal session, SearchRow first, SearchRow last, + SearchRow intersection) { ArrayList paramList = query.getParameters(); if (originalParameters != null) { - for (int i = 0, size = originalParameters.size(); i < size; i++) { - Parameter orig = originalParameters.get(i); - int idx = orig.getIndex(); - Value value = orig.getValue(session); - setParameter(paramList, idx, value); + for (Parameter orig : originalParameters) { + if (orig != null) { + int idx = orig.getIndex(); + Value value = orig.getValue(session); + setParameter(paramList, idx, value); + } } } int len; @@ -252,8 +243,7 @@ private Cursor find(Session session, SearchRow first, SearchRow last, } else { len = 0; } - int idx = originalParameters == null ? 0 : originalParameters.size(); - idx += view.getParameterOffset(); + int idx = view.getParameterOffset(originalParameters); for (int i = 0; i < len; i++) { int mask = indexMasks[i]; if ((mask & IndexCondition.EQUALITY) != 0) { @@ -269,7 +259,15 @@ private Cursor find(Session session, SearchRow first, SearchRow last, setParameter(paramList, idx++, intersection.getValue(i)); } } - LocalResult result = query.query(0); + } + + private Cursor find(SessionLocal session, SearchRow first, SearchRow last, + SearchRow intersection) { + if (recursive) { + return findRecursive(first, last); + } + setupQueryParameters(session, first, last, intersection); + ResultInterface result = query.query(0); return new ViewCursor(this, result, first, last); } @@ -284,18 +282,23 @@ private static void setParameter(ArrayList paramList, int x, param.setValue(v); } - private Query getQuery(Session session, int[] masks) { - Query q = (Query) session.prepare(querySQL, true); + public Query getQuery() { + return query; + } + + private Query getQuery(SessionLocal session, int[] masks) { + Query q = (Query) session.prepare(querySQL, true, true); if (masks == null) { return q; } if (!q.allowGlobalConditions()) { return q; } - int firstIndexParam = originalParameters == null ? - 0 : originalParameters.size(); - firstIndexParam += view.getParameterOffset(); - IntArray paramIndex = new IntArray(); + int firstIndexParam = view.getParameterOffset(originalParameters); + // the column index of each parameter + // (for example: paramColumnIndex {0, 0} mean + // param[0] is column 0, and param[1] is also column 0) + IntArray paramColumnIndex = new IntArray(); int indexColumnCount = 0; for (int i = 0; i < masks.length; i++) { int mask = masks[i]; @@ -303,16 +306,18 @@ private Query getQuery(Session session, int[] masks) { continue; } indexColumnCount++; - paramIndex.add(i); - if (Integer.bitCount(mask) > 1) { - // two parameters for range queries: >= x AND <= y - paramIndex.add(i); + // the number of parameters depends on the mask; + // for range queries it is 2: >= x AND <= y + // but bitMask could also be 7 (=, and <=, and >=) + int bitCount = Integer.bitCount(mask); + for (int j = 0; j < bitCount; j++) { + paramColumnIndex.add(i); } } - int len = paramIndex.size(); - ArrayList columnList = New.arrayList(); + int len = paramColumnIndex.size(); + ArrayList columnList = new ArrayList<>(len); for (int i = 0; i < len;) { - int idx = paramIndex.get(i); + int idx = paramColumnIndex.get(i); columnList.add(table.getColumn(idx)); int mask = masks[idx]; if ((mask & IndexCondition.EQUALITY) != 0) { @@ -336,8 +341,7 @@ private Query getQuery(Session session, int[] masks) { i++; } } - columns = new Column[columnList.size()]; - columnList.toArray(columns); + columns = columnList.toArray(new Column[0]); // reconstruct the index columns from the masks this.indexColumns = new IndexColumn[indexColumnCount]; @@ -359,26 +363,25 @@ private Query getQuery(Session session, int[] masks) { continue; } } - IndexColumn c = new IndexColumn(); - c.column = table.getColumn(i); - indexColumns[indexColumnId] = c; - columnIds[indexColumnId] = c.column.getColumnId(); + Column column = table.getColumn(i); + indexColumns[indexColumnId] = new IndexColumn(column); + columnIds[indexColumnId] = column.getColumnId(); indexColumnId++; } } - String sql = q.getPlanSQL(); - q = (Query) session.prepare(sql, true); + String sql = q.getPlanSQL(DEFAULT_SQL_FLAGS); + q = (Query) session.prepare(sql, true, true); return q; } @Override - public void remove(Session session) { + public void remove(SessionLocal session) { throw DbException.getUnsupportedException("VIEW"); } @Override - public void truncate(Session session) { + public void truncate(SessionLocal session) { throw DbException.getUnsupportedException("VIEW"); } @@ -392,37 +395,21 @@ public boolean needRebuild() { return false; } - @Override - public boolean canGetFirstOrLast() { - return false; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - throw DbException.getUnsupportedException("VIEW"); - } - public void setRecursive(boolean value) { this.recursive = value; } @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return 0; } @Override - public long getRowCountApproximation() { - return 0; - } - - @Override - public long getDiskSpaceUsed() { + public long getRowCountApproximation(SessionLocal session) { return 0; } public boolean isRecursive() { return recursive; } - } diff --git a/h2/src/main/org/h2/index/VirtualConstructedTableIndex.java b/h2/src/main/org/h2/index/VirtualConstructedTableIndex.java new file mode 100644 index 0000000000..bde72c8df3 --- /dev/null +++ b/h2/src/main/org/h2/index/VirtualConstructedTableIndex.java @@ -0,0 +1,66 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.index; + +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.result.SearchRow; +import org.h2.result.SortOrder; +import org.h2.table.FunctionTable; +import org.h2.table.IndexColumn; +import org.h2.table.TableFilter; +import org.h2.table.VirtualConstructedTable; + +/** + * An index for a virtual table that returns a result set. Search in this index + * performs scan over all rows and should be avoided. + */ +public class VirtualConstructedTableIndex extends VirtualTableIndex { + + private final VirtualConstructedTable table; + + public VirtualConstructedTableIndex(VirtualConstructedTable table, IndexColumn[] columns) { + super(table, null, columns); + this.table = table; + } + + @Override + public boolean isFindUsingFullTableScan() { + return true; + } + + @Override + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + return new VirtualTableCursor(this, first, last, table.getResult(session)); + } + + @Override + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { + if (masks != null) { + throw DbException.getUnsupportedException("Virtual table"); + } + long expectedRows; + if (table.canGetRowCount(session)) { + expectedRows = table.getRowCountApproximation(session); + } else { + expectedRows = database.getSettings().estimatedFunctionTableRows; + } + return expectedRows * 10; + } + + @Override + public String getPlanSQL() { + return table instanceof FunctionTable ? "function" : "table scan"; + } + + @Override + public boolean canScan() { + return false; + } + +} diff --git a/h2/src/main/org/h2/index/VirtualTableCursor.java b/h2/src/main/org/h2/index/VirtualTableCursor.java new file mode 100644 index 0000000000..0831454efb --- /dev/null +++ b/h2/src/main/org/h2/index/VirtualTableCursor.java @@ -0,0 +1,112 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.index; + +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.value.Value; + +/** + * A cursor for a virtual table. This implementation filters the rows (only + * returns entries that are larger or equal to "first", and smaller than last or + * equal to "last"). + */ +class VirtualTableCursor implements Cursor { + + private final VirtualTableIndex index; + + private final SearchRow first; + + private final SearchRow last; + + private final ResultInterface result; + + Value[] values; + + Row row; + + /** + * @param index + * index + * @param first + * first row + * @param last + * last row + * @param result + * the result + */ + VirtualTableCursor(VirtualTableIndex index, SearchRow first, SearchRow last, + ResultInterface result) { + this.index = index; + this.first = first; + this.last = last; + this.result = result; + } + + @Override + public Row get() { + if (values == null) { + return null; + } + if (row == null) { + row = Row.get(values, 1); + } + return row; + } + + @Override + public SearchRow getSearchRow() { + return get(); + } + + @Override + public boolean next() { + final SearchRow first = this.first, last = this.last; + if (first == null && last == null) { + return nextImpl(); + } + while (nextImpl()) { + Row current = get(); + if (first != null) { + int comp = index.compareRows(current, first); + if (comp < 0) { + continue; + } + } + if (last != null) { + int comp = index.compareRows(current, last); + if (comp > 0) { + continue; + } + } + return true; + } + return false; + } + + /** + * Skip to the next row if one is available. This method does not filter. + * + * @return true if another row is available + */ + private boolean nextImpl() { + row = null; + if (result != null && result.next()) { + values = result.currentRow(); + } else { + values = null; + } + return values != null; + } + + @Override + public boolean previous() { + throw DbException.getInternalError(toString()); + } + +} diff --git a/h2/src/main/org/h2/index/VirtualTableIndex.java b/h2/src/main/org/h2/index/VirtualTableIndex.java new file mode 100644 index 0000000000..eee94df727 --- /dev/null +++ b/h2/src/main/org/h2/index/VirtualTableIndex.java @@ -0,0 +1,68 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.index; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.result.Row; +import org.h2.table.IndexColumn; +import org.h2.table.VirtualTable; + +/** + * An base class for indexes of virtual tables. + */ +public abstract class VirtualTableIndex extends Index { + + protected VirtualTableIndex(VirtualTable table, String name, IndexColumn[] columns) { + super(table, 0, name, columns, 0, IndexType.createNonUnique(true)); + } + + @Override + public void close(SessionLocal session) { + // nothing to do + } + + @Override + public void add(SessionLocal session, Row row) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public void remove(SessionLocal session, Row row) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public void remove(SessionLocal session) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public void truncate(SessionLocal session) { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public boolean needRebuild() { + return false; + } + + @Override + public void checkRename() { + throw DbException.getUnsupportedException("Virtual table"); + } + + @Override + public long getRowCount(SessionLocal session) { + return table.getRowCount(session); + } + + @Override + public long getRowCountApproximation(SessionLocal session) { + return table.getRowCountApproximation(session); + } + +} diff --git a/h2/src/main/org/h2/index/package.html b/h2/src/main/org/h2/index/package.html index d61b187072..40a17031a5 100644 --- a/h2/src/main/org/h2/index/package.html +++ b/h2/src/main/org/h2/index/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/jdbc/JdbcArray.java b/h2/src/main/org/h2/jdbc/JdbcArray.java index 3df593e8ad..90c745d051 100644 --- a/h2/src/main/org/h2/jdbc/JdbcArray.java +++ b/h2/src/main/org/h2/jdbc/JdbcArray.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -8,30 +8,37 @@ import java.sql.Array; import java.sql.ResultSet; import java.sql.SQLException; -import java.sql.Types; import java.util.Map; import org.h2.api.ErrorCode; import org.h2.message.DbException; import org.h2.message.TraceObject; -import org.h2.tools.SimpleResultSet; +import org.h2.result.SimpleResult; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; +import org.h2.value.ValueToObjectConverter; /** * Represents an ARRAY value. */ -public class JdbcArray extends TraceObject implements Array { +public final class JdbcArray extends TraceObject implements Array { - private Value value; + private ValueArray value; private final JdbcConnection conn; /** * INTERNAL + * @param conn it belongs to + * @param value of + * @param id of the trace object */ - JdbcArray(JdbcConnection conn, Value value, int id) { + public JdbcArray(JdbcConnection conn, Value value, int id) { setTrace(conn.getSession().getTrace(), TraceObject.ARRAY, id); this.conn = conn; - this.value = value; + this.value = value.convertToAnyArray(conn); } /** @@ -61,7 +68,9 @@ public Object getArray() throws SQLException { @Override public Object getArray(Map> map) throws SQLException { try { - debugCode("getArray("+quoteMap(map)+");"); + if (isDebugEnabled()) { + debugCode("getArray(" + quoteMap(map) + ')'); + } JdbcConnection.checkMap(map); checkClosed(); return get(); @@ -82,7 +91,9 @@ public Object getArray(Map> map) throws SQLException { @Override public Object getArray(long index, int count) throws SQLException { try { - debugCode("getArray(" + index + ", " + count + ");"); + if (isDebugEnabled()) { + debugCode("getArray(" + index + ", " + count + ')'); + } checkClosed(); return get(index, count); } catch (Exception e) { @@ -104,7 +115,9 @@ public Object getArray(long index, int count) throws SQLException { public Object getArray(long index, int count, Map> map) throws SQLException { try { - debugCode("getArray(" + index + ", " + count + ", " + quoteMap(map)+");"); + if (isDebugEnabled()) { + debugCode("getArray(" + index + ", " + count + ", " + quoteMap(map) + ')'); + } checkClosed(); JdbcConnection.checkMap(map); return get(index, count); @@ -114,17 +127,16 @@ public Object getArray(long index, int count, Map> map) } /** - * Returns the base type of the array. This database does support mixed type - * arrays and therefore there is no base type. + * Returns the base type of the array. * - * @return Types.NULL + * @return the base type or Types.NULL */ @Override public int getBaseType() throws SQLException { try { debugCodeCall("getBaseType"); checkClosed(); - return Types.NULL; + return DataType.convertTypeToSQLType(value.getComponentType()); } catch (Exception e) { throw logAndConvert(e); } @@ -134,14 +146,14 @@ public int getBaseType() throws SQLException { * Returns the base type name of the array. This database does support mixed * type arrays and therefore there is no base type. * - * @return "NULL" + * @return the base type name or "NULL" */ @Override public String getBaseTypeName() throws SQLException { try { debugCodeCall("getBaseTypeName"); checkClosed(); - return "NULL"; + return value.getComponentType().getDeclaredTypeName(); } catch (Exception e) { throw logAndConvert(e); } @@ -159,7 +171,7 @@ public ResultSet getResultSet() throws SQLException { try { debugCodeCall("getResultSet"); checkClosed(); - return getResultSet(get(), 0); + return getResultSetImpl(1L, Integer.MAX_VALUE); } catch (Exception e) { throw logAndConvert(e); } @@ -175,10 +187,12 @@ public ResultSet getResultSet() throws SQLException { @Override public ResultSet getResultSet(Map> map) throws SQLException { try { - debugCode("getResultSet("+quoteMap(map)+");"); + if (isDebugEnabled()) { + debugCode("getResultSet(" + quoteMap(map) + ')'); + } checkClosed(); JdbcConnection.checkMap(map); - return getResultSet(get(), 0); + return getResultSetImpl(1L, Integer.MAX_VALUE); } catch (Exception e) { throw logAndConvert(e); } @@ -197,9 +211,11 @@ public ResultSet getResultSet(Map> map) throws SQLException { @Override public ResultSet getResultSet(long index, int count) throws SQLException { try { - debugCode("getResultSet("+index+", " + count+");"); + if (isDebugEnabled()) { + debugCode("getResultSet(" + index + ", " + count + ')'); + } checkClosed(); - return getResultSet(get(index, count), index - 1); + return getResultSetImpl(index, count); } catch (Exception e) { throw logAndConvert(e); } @@ -221,10 +237,12 @@ public ResultSet getResultSet(long index, int count) throws SQLException { public ResultSet getResultSet(long index, int count, Map> map) throws SQLException { try { - debugCode("getResultSet("+index+", " + count+", " + quoteMap(map)+");"); + if (isDebugEnabled()) { + debugCode("getResultSet(" + index + ", " + count + ", " + quoteMap(map) + ')'); + } checkClosed(); JdbcConnection.checkMap(map); - return getResultSet(get(index, count), index - 1); + return getResultSetImpl(index, count); } catch (Exception e) { throw logAndConvert(e); } @@ -239,15 +257,17 @@ public void free() { value = null; } - private static ResultSet getResultSet(Object[] array, long offset) { - SimpleResultSet rs = new SimpleResultSet(); - rs.addColumn("INDEX", Types.BIGINT, 0, 0); - // TODO array result set: there are multiple data types possible - rs.addColumn("VALUE", Types.NULL, 0, 0); - for (int i = 0; i < array.length; i++) { - rs.addRow(Long.valueOf(offset + i + 1), array[i]); + private ResultSet getResultSetImpl(long index, int count) { + int id = getNextId(TraceObject.RESULT_SET); + SimpleResult rs = new SimpleResult(); + rs.addColumn("INDEX", TypeInfo.TYPE_BIGINT); + rs.addColumn("VALUE", value.getComponentType()); + Value[] values = value.getList(); + count = checkRange(index, count, values.length); + for (int i = (int) index; i < index + count; i++) { + rs.addRow(ValueBigint.get(i), values[i - 1]); } - return rs; + return new JdbcResultSet(conn, null, null, rs, id, true, false, false); } private void checkClosed() { @@ -257,23 +277,29 @@ private void checkClosed() { } } - private Object[] get() { - return (Object[]) value.convertTo(Value.ARRAY).getObject(); + private Object get() { + return ValueToObjectConverter.valueToDefaultArray(value, conn, true); } - private Object[] get(long index, int count) { - Object[] array = get(); - if (count < 0 || count > array.length) { - throw DbException.getInvalidValueException("count (1.." - + array.length + ")", count); + private Object get(long index, int count) { + Value[] values = value.getList(); + count = checkRange(index, count, values.length); + Object[] a = new Object[count]; + for (int i = 0, j = (int) index - 1; i < count; i++, j++) { + a[i] = ValueToObjectConverter.valueToDefaultObject(values[j], conn, true); } - if (index < 1 || index > array.length) { - throw DbException.getInvalidValueException("index (1.." - + array.length + ")", index); + return a; + } + + private static int checkRange(long index, int count, int len) { + if (index < 1 || (index != 1 && index > len)) { + throw DbException.getInvalidValueException("index (1.." + len + ')', index); + } + int rem = len - (int) index + 1; + if (count < 0) { + throw DbException.getInvalidValueException("count (0.." + rem + ')', count); } - Object[] subset = new Object[count]; - System.arraycopy(array, (int) (index - 1), subset, 0, count); - return subset; + return Math.min(rem, count); } /** diff --git a/h2/src/main/org/h2/jdbc/JdbcBatchUpdateException.java b/h2/src/main/org/h2/jdbc/JdbcBatchUpdateException.java index 9484215811..e8040c8a82 100644 --- a/h2/src/main/org/h2/jdbc/JdbcBatchUpdateException.java +++ b/h2/src/main/org/h2/jdbc/JdbcBatchUpdateException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -13,18 +13,30 @@ /** * Represents a batch update database exception. */ -public class JdbcBatchUpdateException extends BatchUpdateException { +public final class JdbcBatchUpdateException extends BatchUpdateException { private static final long serialVersionUID = 1L; /** * INTERNAL + * @param next exception + * @param updateCounts affected record counts */ JdbcBatchUpdateException(SQLException next, int[] updateCounts) { super(next.getMessage(), next.getSQLState(), next.getErrorCode(), updateCounts); setNextException(next); } + /** + * INTERNAL + * @param next exception + * @param updateCounts affected record counts + */ + JdbcBatchUpdateException(SQLException next, long[] updateCounts) { + super(next.getMessage(), next.getSQLState(), next.getErrorCode(), updateCounts, null); + setNextException(next); + } + /** * INTERNAL */ diff --git a/h2/src/main/org/h2/jdbc/JdbcBlob.java b/h2/src/main/org/h2/jdbc/JdbcBlob.java index 6d7cf1014e..b6a49b1e38 100644 --- a/h2/src/main/org/h2/jdbc/JdbcBlob.java +++ b/h2/src/main/org/h2/jdbc/JdbcBlob.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -9,37 +9,33 @@ import java.io.BufferedOutputStream; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; -import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.PipedInputStream; -import java.io.PipedOutputStream; import java.sql.Blob; import java.sql.SQLException; -import org.h2.api.ErrorCode; import org.h2.engine.Constants; import org.h2.message.DbException; import org.h2.message.TraceObject; -import org.h2.util.Task; import org.h2.util.IOUtils; +import org.h2.util.Task; import org.h2.value.Value; /** * Represents a BLOB value. */ -public class JdbcBlob extends TraceObject implements Blob { - - Value value; - private final JdbcConnection conn; +public final class JdbcBlob extends JdbcLob implements Blob { /** * INTERNAL + * @param conn it belongs to + * @param value of + * @param state of the LOB + * @param id of the trace object */ - public JdbcBlob(JdbcConnection conn, Value value, int id) { - setTrace(conn.getSession().getTrace(), TraceObject.BLOB, id); - this.conn = conn; - this.value = value; + public JdbcBlob(JdbcConnection conn, Value value, State state, int id) { + super(conn, value, state, TraceObject.BLOB, id); } /** @@ -51,9 +47,9 @@ public JdbcBlob(JdbcConnection conn, Value value, int id) { public long length() throws SQLException { try { debugCodeCall("length"); - checkClosed(); - if (value.getType() == Value.BLOB) { - long precision = value.getPrecision(); + checkReadable(); + if (value.getValueType() == Value.BLOB) { + long precision = value.getType().getPrecision(); if (precision > 0) { return precision; } @@ -85,16 +81,13 @@ public void truncate(long len) throws SQLException { public byte[] getBytes(long pos, int length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getBytes("+pos+", "+length+");"); + debugCode("getBytes(" + pos + ", " + length + ')'); } - checkClosed(); + checkReadable(); ByteArrayOutputStream out = new ByteArrayOutputStream(); - InputStream in = value.getInputStream(); - try { + try (InputStream in = value.getInputStream()) { IOUtils.skipFully(in, pos - 1); IOUtils.copy(in, out, length); - } finally { - in.close(); } return out.toByteArray(); } catch (Exception e) { @@ -113,15 +106,18 @@ public byte[] getBytes(long pos, int length) throws SQLException { */ @Override public int setBytes(long pos, byte[] bytes) throws SQLException { + if (bytes == null) { + throw new NullPointerException(); + } try { if (isDebugEnabled()) { - debugCode("setBytes("+pos+", "+quoteBytes(bytes)+");"); + debugCode("setBytes(" + pos + ", " + quoteBytes(bytes) + ')'); } - checkClosed(); + checkEditable(); if (pos != 1) { throw DbException.getInvalidValueException("pos", pos); } - value = conn.createBlob(new ByteArrayInputStream(bytes), -1); + completeWrite(conn.createBlob(new ByteArrayInputStream(bytes), -1)); return bytes.length; } catch (Exception e) { throw logAndConvert(e); @@ -129,7 +125,7 @@ public int setBytes(long pos, byte[] bytes) throws SQLException { } /** - * [Not supported] Sets some bytes of the object. + * Sets some bytes of the object. * * @param pos the write position * @param bytes the bytes to set @@ -140,25 +136,29 @@ public int setBytes(long pos, byte[] bytes) throws SQLException { @Override public int setBytes(long pos, byte[] bytes, int offset, int len) throws SQLException { - throw unsupported("LOB update"); - } - - /** - * Returns the input stream. - * - * @return the input stream - */ - @Override - public InputStream getBinaryStream() throws SQLException { + if (bytes == null) { + throw new NullPointerException(); + } try { - debugCodeCall("getBinaryStream"); - checkClosed(); - return value.getInputStream(); + if (isDebugEnabled()) { + debugCode("setBytes(" + pos + ", " + quoteBytes(bytes) + ", " + offset + ", " + len + ')'); + } + checkEditable(); + if (pos != 1) { + throw DbException.getInvalidValueException("pos", pos); + } + completeWrite(conn.createBlob(new ByteArrayInputStream(bytes, offset, len), -1)); + return (int) value.getType().getPrecision(); } catch (Exception e) { throw logAndConvert(e); } } + @Override + public InputStream getBinaryStream() throws SQLException { + return super.getBinaryStream(); + } + /** * Get a writer to update the Blob. This is only supported for new, empty * Blob objects that were created with Connection.createBlob(). The Blob is @@ -173,35 +173,22 @@ public InputStream getBinaryStream() throws SQLException { public OutputStream setBinaryStream(long pos) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBinaryStream("+pos+");"); + debugCodeCall("setBinaryStream", pos); } - checkClosed(); + checkEditable(); if (pos != 1) { throw DbException.getInvalidValueException("pos", pos); } - if (value.getPrecision() != 0) { - throw DbException.getInvalidValueException("length", value.getPrecision()); - } - final JdbcConnection c = conn; final PipedInputStream in = new PipedInputStream(); final Task task = new Task() { @Override public void call() { - value = c.createBlob(in, -1); - } - }; - PipedOutputStream out = new PipedOutputStream(in) { - @Override - public void close() throws IOException { - super.close(); - try { - task.get(); - } catch (Exception e) { - throw DbException.convertToIOException(e); - } + completeWrite(conn.createBlob(in, -1)); } }; + LobPipedOutputStream out = new LobPipedOutputStream(in, task); task.execute(); + state = State.SET_CALLED; return new BufferedOutputStream(out); } catch (Exception e) { throw logAndConvert(e); @@ -218,11 +205,11 @@ public void close() throws IOException { @Override public long position(byte[] pattern, long start) throws SQLException { if (isDebugEnabled()) { - debugCode("position("+quoteBytes(pattern)+", "+start+");"); + debugCode("position(" + quoteBytes(pattern) + ", " + start + ')'); } if (Constants.BLOB_SEARCH) { try { - checkClosed(); + checkReadable(); if (pattern == null) { return -1; } @@ -273,11 +260,11 @@ public long position(byte[] pattern, long start) throws SQLException { @Override public long position(Blob blobPattern, long start) throws SQLException { if (isDebugEnabled()) { - debugCode("position(blobPattern, "+start+");"); + debugCode("position(blobPattern, " + start + ')'); } if (Constants.BLOB_SEARCH) { try { - checkClosed(); + checkReadable(); if (blobPattern == null) { return -1; } @@ -299,16 +286,7 @@ public long position(Blob blobPattern, long start) throws SQLException { } /** - * Release all resources of this object. - */ - @Override - public void free() { - debugCodeCall("free"); - value = null; - } - - /** - * [Not supported] Returns the input stream, starting from an offset. + * Returns the input stream, starting from an offset. * * @param pos where to start reading * @param length the number of bytes that will be read @@ -316,23 +294,23 @@ public void free() { */ @Override public InputStream getBinaryStream(long pos, long length) throws SQLException { - throw unsupported("LOB update"); - } - - private void checkClosed() { - conn.checkClosed(); - if (value == null) { - throw DbException.get(ErrorCode.OBJECT_CLOSED); + try { + if (isDebugEnabled()) { + debugCode("getBinaryStream(" + pos + ", " + length + ')'); + } + checkReadable(); + if (state == State.NEW) { + if (pos != 1) { + throw DbException.getInvalidValueException("pos", pos); + } + if (length != 0) { + throw DbException.getInvalidValueException("length", pos); + } + } + return value.getInputStream(pos, length); + } catch (Exception e) { + throw logAndConvert(e); } } - /** - * INTERNAL - */ - @Override - public String toString() { - return getTraceObjectName() + ": " + - (value == null ? "null" : value.getTraceSQL()); - } - } diff --git a/h2/src/main/org/h2/jdbc/JdbcCallableStatement.java b/h2/src/main/org/h2/jdbc/JdbcCallableStatement.java index aa17680109..6541722bbb 100644 --- a/h2/src/main/org/h2/jdbc/JdbcCallableStatement.java +++ b/h2/src/main/org/h2/jdbc/JdbcCallableStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -19,37 +19,53 @@ import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; +import java.sql.SQLType; import java.sql.SQLXML; import java.sql.Time; import java.sql.Timestamp; +import java.util.BitSet; import java.util.Calendar; import java.util.HashMap; import java.util.Map; - import org.h2.api.ErrorCode; import org.h2.expression.ParameterInterface; import org.h2.message.DbException; import org.h2.message.TraceObject; -import org.h2.util.BitField; -import org.h2.util.New; import org.h2.value.ValueNull; /** * Represents a callable statement. - * + *

    + * Thread safety: the callable statement is not thread-safe. If the same + * callable statement is used by multiple threads access to it must be + * synchronized. The single synchronized block must include assignment of + * parameters, execution of the command and all operations with its result. + *

    + *
    + * synchronized (call) {
    + *     call.setInt(1, 10);
    + *     try (ResultSet rs = call.executeQuery()) {
    + *         while (rs.next) {
    + *             // Do something
    + *         }
    + *     }
    + * }
    + * synchronized (call) {
    + *     call.setInt(1, 15);
    + *     updateCount = call.executeUpdate();
    + * }
    + * 
    * @author Sergi Vladykin * @author Thomas Mueller */ -public class JdbcCallableStatement extends JdbcPreparedStatement implements - CallableStatement { +public final class JdbcCallableStatement extends JdbcPreparedStatement implements CallableStatement { - private BitField outParameters; + private BitSet outParameters; private int maxOutParameters; private HashMap namedParameters; - JdbcCallableStatement(JdbcConnection conn, String sql, int id, - int resultSetType, int resultSetConcurrency) { - super(conn, sql, id, resultSetType, resultSetConcurrency, false); + JdbcCallableStatement(JdbcConnection conn, String sql, int id, int resultSetType, int resultSetConcurrency) { + super(conn, sql, id, resultSetType, resultSetConcurrency, null); setTrace(session.getTrace(), TraceObject.CALLABLE_STATEMENT, id); } @@ -83,6 +99,36 @@ public int executeUpdate() throws SQLException { } } + /** + * Executes a statement (insert, update, delete, create, drop) + * and returns the update count. + * If another result set exists for this statement, this will be closed + * (even if this statement fails). + * + * If auto commit is on, this statement will be committed. + * If the statement is a DDL statement (create, drop, alter) and does not + * throw an exception, the current transaction (if any) is committed after + * executing the statement. + * + * @return the update count (number of row affected by an insert, update or + * delete, or 0 if no rows or the statement was a create, drop, + * commit or rollback) + * @throws SQLException if this object is closed or invalid + */ + @Override + public long executeLargeUpdate() throws SQLException { + try { + checkClosed(); + if (command.isQuery()) { + super.executeQuery(); + return 0; + } + return super.executeLargeUpdate(); + } catch (Exception e) { + throw logAndConvert(e); + } + } + /** * Registers the given OUT parameter. * @@ -300,6 +346,7 @@ public double getDouble(int parameterIndex) throws SQLException { * @throws SQLException if the column is not found or if this object is * closed */ + @Deprecated @Override public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException { @@ -323,11 +370,16 @@ public byte[] getBytes(int parameterIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDate.class)} instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override public Date getDate(int parameterIndex) throws SQLException { @@ -337,11 +389,16 @@ public Date getDate(int parameterIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalTime.class)} instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override public Time getTime(int parameterIndex) throws SQLException { @@ -351,11 +408,16 @@ public Time getTime(int parameterIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDateTime.class)} instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override public Timestamp getTimestamp(int parameterIndex) throws SQLException { @@ -455,12 +517,17 @@ public Array getArray(int parameterIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDate.class)} instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override public Date getDate(int parameterIndex, Calendar cal) throws SQLException { @@ -471,12 +538,17 @@ public Date getDate(int parameterIndex, Calendar cal) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalTime.class)} instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override public Time getTime(int parameterIndex, Calendar cal) throws SQLException { @@ -487,16 +559,20 @@ public Time getTime(int parameterIndex, Calendar cal) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDateTime.class)} instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(int, Class) */ @Override - public Timestamp getTimestamp(int parameterIndex, Calendar cal) - throws SQLException { + public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException { checkRegistered(parameterIndex); return getOpenResultSet().getTimestamp(parameterIndex, cal); } @@ -512,28 +588,37 @@ public URL getURL(String parameterName) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalDateTime.class)} instead. + *

    * * @param parameterName the parameter name * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override - public Timestamp getTimestamp(String parameterName, Calendar cal) - throws SQLException { + public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { return getTimestamp(getIndexForName(parameterName), cal); } /** * Returns the value of the specified column as a java.sql.Time using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalTime.class)} instead. + *

    * * @param parameterName the parameter name * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override public Time getTime(String parameterName, Calendar cal) throws SQLException { @@ -543,12 +628,17 @@ public Time getTime(String parameterName, Calendar cal) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalDate.class)} instead. + *

    * * @param parameterName the parameter name * @param cal the calendar * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override public Date getDate(String parameterName, Calendar cal) throws SQLException { @@ -641,11 +731,16 @@ public Object getObject(String parameterName) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalDateTime.class)} instead. + *

    * * @param parameterName the parameter name * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override public Timestamp getTimestamp(String parameterName) throws SQLException { @@ -654,11 +749,16 @@ public Timestamp getTimestamp(String parameterName) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalTime.class)} instead. + *

    * * @param parameterName the parameter name * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override public Time getTime(String parameterName) throws SQLException { @@ -667,11 +767,16 @@ public Time getTime(String parameterName) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(parameterName, LocalDate.class)} instead. + *

    * * @param parameterName the parameter name * @return the value * @throws SQLException if the column is not found or if this object is * closed + * @see #getObject(String, Class) */ @Override public Date getDate(String parameterName) throws SQLException { @@ -843,21 +948,30 @@ public NClob getNClob(String parameterName) throws SQLException { } /** - * [Not supported] Returns the value of the specified column as a SQLXML - * object. + * Returns the value of the specified column as a SQLXML object. + * + * @param parameterIndex the parameter index (1, 2, ...) + * @return the value + * @throws SQLException if the column is not found or if this object is + * closed */ @Override public SQLXML getSQLXML(int parameterIndex) throws SQLException { - throw unsupported("SQLXML"); + checkRegistered(parameterIndex); + return getOpenResultSet().getSQLXML(parameterIndex); } /** - * [Not supported] Returns the value of the specified column as a SQLXML - * object. + * Returns the value of the specified column as a SQLXML object. + * + * @param parameterName the parameter name + * @return the value + * @throws SQLException if the column is not found or if this object is + * closed */ @Override public SQLXML getSQLXML(String parameterName) throws SQLException { - throw unsupported("SQLXML"); + return getSQLXML(getIndexForName(parameterName)); } /** @@ -976,45 +1090,60 @@ public void setNull(String parameterName, int sqlType) throws SQLException { /** * Sets the timestamp using a specified time zone. The value will be * converted to the local time zone. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

    * * @param parameterName the parameter name * @param x the value * @param cal the calendar * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override - public void setTimestamp(String parameterName, Timestamp x, Calendar cal) - throws SQLException { + public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException { setTimestamp(getIndexForName(parameterName), x, cal); } /** * Sets the time using a specified time zone. The value will be converted to * the local time zone. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with {@link java.time.LocalTime} + * parameter instead. + *

    * * @param parameterName the parameter name * @param x the value * @param cal the calendar * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override - public void setTime(String parameterName, Time x, Calendar cal) - throws SQLException { + public void setTime(String parameterName, Time x, Calendar cal) throws SQLException { setTime(getIndexForName(parameterName), x, cal); } /** * Sets the date using a specified time zone. The value will be converted to * the local time zone. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with {@link java.time.LocalDate} + * parameter instead. + *

    * * @param parameterName the parameter name * @param x the value * @param cal the calendar * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override - public void setDate(String parameterName, Date x, Calendar cal) - throws SQLException { + public void setDate(String parameterName, Date x, Calendar cal) throws SQLException { setDate(getIndexForName(parameterName), x, cal); } @@ -1080,6 +1209,38 @@ public void setObject(String parameterName, Object x, int targetSqlType, setObject(getIndexForName(parameterName), x, targetSqlType, scale); } + /** + * Sets the value of a parameter. The object is converted, if required, to + * the specified data type before sending to the database. + * Objects of unknown classes are serialized (on the client side). + * + * @param parameterName the parameter name + * @param x the value, null is allowed + * @param targetSqlType the type + * @throws SQLException if this object is closed + */ + @Override + public void setObject(String parameterName, Object x, SQLType targetSqlType) throws SQLException { + setObject(getIndexForName(parameterName), x, targetSqlType); + } + + /** + * Sets the value of a parameter. The object is converted, if required, to + * the specified data type before sending to the database. + * Objects of unknown classes are serialized (on the client side). + * + * @param parameterName the parameter name + * @param x the value, null is allowed + * @param targetSqlType the type + * @param scaleOrLength is ignored + * @throws SQLException if this object is closed + */ + @Override + public void setObject(String parameterName, Object x, SQLType targetSqlType, int scaleOrLength) + throws SQLException { + setObject(getIndexForName(parameterName), x, targetSqlType, scaleOrLength); + } + /** * Sets the value of a parameter as an input stream. * This method does not close the stream. @@ -1114,23 +1275,34 @@ public void setAsciiStream(String parameterName, /** * Sets the value of a parameter. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

    * * @param parameterName the parameter name * @param x the value * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override - public void setTimestamp(String parameterName, Timestamp x) - throws SQLException { + public void setTimestamp(String parameterName, Timestamp x) throws SQLException { setTimestamp(getIndexForName(parameterName), x); } /** * Sets the time using a specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with {@link java.time.LocalTime} + * parameter instead. + *

    * * @param parameterName the parameter name * @param x the value * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override public void setTime(String parameterName, Time x) throws SQLException { @@ -1139,10 +1311,16 @@ public void setTime(String parameterName, Time x) throws SQLException { /** * Sets the value of a parameter. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterName, value)} with {@link java.time.LocalDate} + * parameter instead. + *

    * * @param parameterName the parameter name * @param x the value * @throws SQLException if this object is closed + * @see #setObject(String, Object) */ @Override public void setDate(String parameterName, Date x) throws SQLException { @@ -1555,39 +1733,47 @@ public void setNClob(String parameterName, Reader x) } /** - * [Not supported] Sets the value of a parameter as a SQLXML object. + * Sets the value of a parameter as a SQLXML object. + * + * @param parameterName the parameter name + * @param x the value + * @throws SQLException if this object is closed */ @Override public void setSQLXML(String parameterName, SQLXML x) throws SQLException { - throw unsupported("SQLXML"); + setSQLXML(getIndexForName(parameterName), x); } /** - * [Not supported] + * Returns the value of the specified column as a Java object of the + * specified type. * * @param parameterIndex the parameter index (1, 2, ...) * @param type the class of the returned value + * @return the value + * @throws SQLException if the column is not found or if this object is + * closed */ -/*## Java 1.7 ## @Override - public T getObject(int parameterIndex, Class type) { - return null; + public T getObject(int parameterIndex, Class type) throws SQLException { + return getOpenResultSet().getObject(parameterIndex, type); } -//*/ /** - * [Not supported] + * Returns the value of the specified column as a Java object of the + * specified type. * * @param parameterName the parameter name * @param type the class of the returned value + * @return the value + * @throws SQLException if the column is not found or if this object is + * closed */ -/*## Java 1.7 ## @Override - public T getObject(String parameterName, Class type) { - return null; + public T getObject(String parameterName, Class type) throws SQLException { + return getObject(getIndexForName(parameterName), type); } -//*/ private ResultSetMetaData getCheckedMetaData() throws SQLException { ResultSetMetaData meta = getMetaData(); @@ -1612,7 +1798,7 @@ private void registerOutParameter(int parameterIndex) throws SQLException { maxOutParameters = Math.min( getParameterMetaData().getParameterCount(), getCheckedMetaData().getColumnCount()); - outParameters = new BitField(); + outParameters = new BitSet(); } checkIndexBounds(parameterIndex); ParameterInterface param = command.getParameters().get(--parameterIndex); @@ -1642,7 +1828,7 @@ private int getIndexForName(String parameterName) throws SQLException { if (namedParameters == null) { ResultSetMetaData meta = getCheckedMetaData(); int columnCount = meta.getColumnCount(); - HashMap map = New.hashMap(columnCount); + HashMap map = new HashMap<>(); for (int i = 1; i <= columnCount; i++) { map.put(meta.getColumnLabel(i), i); } diff --git a/h2/src/main/org/h2/jdbc/JdbcClob.java b/h2/src/main/org/h2/jdbc/JdbcClob.java index 86f1ba2dbf..d23dbfafc7 100644 --- a/h2/src/main/org/h2/jdbc/JdbcClob.java +++ b/h2/src/main/org/h2/jdbc/JdbcClob.java @@ -1,15 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; -import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.PipedInputStream; -import java.io.PipedOutputStream; import java.io.Reader; import java.io.StringReader; import java.io.StringWriter; @@ -18,30 +15,27 @@ import java.sql.NClob; import java.sql.SQLException; -import org.h2.api.ErrorCode; import org.h2.engine.Constants; import org.h2.message.DbException; import org.h2.message.TraceObject; +import org.h2.store.RangeReader; import org.h2.util.IOUtils; -import org.h2.util.Task; import org.h2.value.Value; /** * Represents a CLOB value. */ -public class JdbcClob extends TraceObject implements NClob -{ - - Value value; - private final JdbcConnection conn; +public final class JdbcClob extends JdbcLob implements NClob { /** * INTERNAL + * @param conn it belongs to + * @param value of + * @param state of the LOB + * @param id of the trace object */ - public JdbcClob(JdbcConnection conn, Value value, int id) { - setTrace(conn.getSession().getTrace(), TraceObject.CLOB, id); - this.conn = conn; - this.value = value; + public JdbcClob(JdbcConnection conn, Value value, State state, int id) { + super(conn, value, state, TraceObject.CLOB, id); } /** @@ -53,9 +47,9 @@ public JdbcClob(JdbcConnection conn, Value value, int id) { public long length() throws SQLException { try { debugCodeCall("length"); - checkClosed(); - if (value.getType() == Value.CLOB) { - long precision = value.getPrecision(); + checkReadable(); + if (value.getValueType() == Value.CLOB) { + long precision = value.getType().getPrecision(); if (precision > 0) { return precision; } @@ -83,7 +77,7 @@ public void truncate(long len) throws SQLException { public InputStream getAsciiStream() throws SQLException { try { debugCodeCall("getAsciiStream"); - checkClosed(); + checkReadable(); String s = value.getString(); return IOUtils.getInputStreamFromString(s); } catch (Exception e) { @@ -99,20 +93,9 @@ public OutputStream setAsciiStream(long pos) throws SQLException { throw unsupported("LOB update"); } - /** - * Returns the reader. - * - * @return the reader - */ @Override public Reader getCharacterStream() throws SQLException { - try { - debugCodeCall("getCharacterStream"); - checkClosed(); - return value.getReader(); - } catch (Exception e) { - throw logAndConvert(e); - } + return super.getCharacterStream(); } /** @@ -129,39 +112,14 @@ public Reader getCharacterStream() throws SQLException { public Writer setCharacterStream(long pos) throws SQLException { try { if (isDebugEnabled()) { - debugCodeCall("setCharacterStream(" + pos + ");"); + debugCodeCall("setCharacterStream", pos); } - checkClosed(); + checkEditable(); if (pos != 1) { throw DbException.getInvalidValueException("pos", pos); } - if (value.getPrecision() != 0) { - throw DbException.getInvalidValueException("length", value.getPrecision()); - } - final JdbcConnection c = conn; - // PipedReader / PipedWriter are a lot slower - // than PipedInputStream / PipedOutputStream - // (Sun/Oracle Java 1.6.0_20) - final PipedInputStream in = new PipedInputStream(); - final Task task = new Task() { - @Override - public void call() { - value = c.createClob(IOUtils.getReader(in), -1); - } - }; - PipedOutputStream out = new PipedOutputStream(in) { - @Override - public void close() throws IOException { - super.close(); - try { - task.get(); - } catch (Exception e) { - throw DbException.convertToIOException(e); - } - } - }; - task.execute(); - return IOUtils.getBufferedWriter(out); + state = State.SET_CALLED; + return setCharacterStreamImpl(); } catch (Exception e) { throw logAndConvert(e); } @@ -178,9 +136,9 @@ public void close() throws IOException { public String getSubString(long pos, int length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getSubString(" + pos + ", " + length + ");"); + debugCode("getSubString(" + pos + ", " + length + ')'); } - checkClosed(); + checkReadable(); if (pos < 1) { throw DbException.getInvalidValueException("pos", pos); } @@ -189,12 +147,9 @@ public String getSubString(long pos, int length) throws SQLException { } StringWriter writer = new StringWriter( Math.min(Constants.IO_BUFFER_SIZE, length)); - Reader reader = value.getReader(); - try { + try (Reader reader = value.getReader()) { IOUtils.skipFully(reader, pos - 1); IOUtils.copyAndCloseInput(reader, writer, length); - } finally { - reader.close(); } return writer.toString(); } catch (Exception e) { @@ -210,20 +165,21 @@ public String getSubString(long pos, int length) throws SQLException { * @param pos where to start writing (the first character is at position 1) * @param str the string to add * @return the length of the added text + * @throws SQLException on failure */ @Override public int setString(long pos, String str) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setString(" + pos + ", " + quote(str) + ");"); + debugCode("setString(" + pos + ", " + quote(str) + ')'); } - checkClosed(); + checkEditable(); if (pos != 1) { throw DbException.getInvalidValueException("pos", pos); } else if (str == null) { throw DbException.getInvalidValueException("str", str); } - value = conn.createClob(new StringReader(str), -1); + completeWrite(conn.createClob(new StringReader(str), -1)); return str.length(); } catch (Exception e) { throw logAndConvert(e); @@ -231,12 +187,34 @@ public int setString(long pos, String str) throws SQLException { } /** - * [Not supported] Sets a substring. + * Fills the Clob. This is only supported for new, empty Clob objects that + * were created with Connection.createClob() or createNClob(). The position + * must be 1, meaning the whole Clob data is set. + * + * @param pos where to start writing (the first character is at position 1) + * @param str the string to add + * @param offset the string offset + * @param len the number of characters to read + * @return the length of the added text */ @Override public int setString(long pos, String str, int offset, int len) throws SQLException { - throw unsupported("LOB update"); + try { + if (isDebugEnabled()) { + debugCode("setString(" + pos + ", " + quote(str) + ", " + offset + ", " + len + ')'); + } + checkEditable(); + if (pos != 1) { + throw DbException.getInvalidValueException("pos", pos); + } else if (str == null) { + throw DbException.getInvalidValueException("str", str); + } + completeWrite(conn.createClob(new RangeReader(new StringReader(str), offset, len), -1)); + return (int) value.getType().getPrecision(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -256,36 +234,31 @@ public long position(Clob clobPattern, long start) throws SQLException { } /** - * Release all resources of this object. - */ - @Override - public void free() { - debugCodeCall("free"); - value = null; - } - - /** - * [Not supported] Returns the reader, starting from an offset. + * Returns the reader, starting from an offset. + * + * @param pos 1-based offset + * @param length length of requested area + * @return the reader */ @Override public Reader getCharacterStream(long pos, long length) throws SQLException { - throw unsupported("LOB subset"); - } - - private void checkClosed() { - conn.checkClosed(); - if (value == null) { - throw DbException.get(ErrorCode.OBJECT_CLOSED); + try { + if (isDebugEnabled()) { + debugCode("getCharacterStream(" + pos + ", " + length + ')'); + } + checkReadable(); + if (state == State.NEW) { + if (pos != 1) { + throw DbException.getInvalidValueException("pos", pos); + } + if (length != 0) { + throw DbException.getInvalidValueException("length", pos); + } + } + return value.getReader(pos, length); + } catch (Exception e) { + throw logAndConvert(e); } } - /** - * INTERNAL - */ - @Override - public String toString() { - return getTraceObjectName() + ": " + (value == null ? - "null" : value.getTraceSQL()); - } - } diff --git a/h2/src/main/org/h2/jdbc/JdbcConnection.java b/h2/src/main/org/h2/jdbc/JdbcConnection.java index c2570f40be..9834e7a03f 100644 --- a/h2/src/main/org/h2/jdbc/JdbcConnection.java +++ b/h2/src/main/org/h2/jdbc/JdbcConnection.java @@ -1,13 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, and the + * EPL 1.0 (https://h2database.com/html/license.html). Initial Developer: H2 + * Group */ package org.h2.jdbc; -import java.io.ByteArrayInputStream; import java.io.InputStream; -import java.io.InputStreamReader; import java.io.Reader; import java.sql.Array; import java.sql.Blob; @@ -26,43 +24,55 @@ import java.sql.Statement; import java.sql.Struct; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import java.util.Objects; import java.util.Properties; +import java.util.concurrent.Executor; +import java.util.regex.Pattern; import org.h2.api.ErrorCode; +import org.h2.api.JavaObjectSerializer; import org.h2.command.CommandInterface; +import org.h2.engine.CastDataProvider; import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; -import org.h2.engine.SessionInterface; +import org.h2.engine.IsolationLevel; +import org.h2.engine.Mode; +import org.h2.engine.Session; +import org.h2.engine.Session.StaticSettings; import org.h2.engine.SessionRemote; import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.message.TraceObject; import org.h2.result.ResultInterface; import org.h2.util.CloseWatcher; -import org.h2.util.JdbcUtils; -import org.h2.util.Utils; +import org.h2.util.TimeZoneProvider; import org.h2.value.CompareMode; import org.h2.value.Value; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueString; - -/*## Java 1.7 ## -import java.util.concurrent.Executor; -//*/ +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; /** - *

    * Represents a connection (session) to a database. - *

    *

    - * Thread safety: the connection is thread-safe, because access - * is synchronized. However, for compatibility with other databases, a - * connection should only be used in one thread at any time. + * Thread safety: the connection is thread-safe, because access is synchronized. + * Different statements from the same connection may try to execute their + * commands in parallel, but they will be executed sequentially. If real + * concurrent execution of these commands is needed, different connections + * should be used. *

    */ -public class JdbcConnection extends TraceObject implements Connection { +public class JdbcConnection extends TraceObject implements Connection, JdbcConnectionBackwardsCompat, + CastDataProvider { + + private static final String NUM_SERVERS = "numServers"; + private static final String PREFIX_SERVER = "server"; private static boolean keepOpenStackTrace; @@ -72,48 +82,51 @@ public class JdbcConnection extends TraceObject implements Connection { // ResultSet.HOLD_CURSORS_OVER_COMMIT private int holdability = 1; - private SessionInterface session; + private Session session; private CommandInterface commit, rollback; private CommandInterface getReadOnly, getGeneratedKeys; - private CommandInterface setLockMode, getLockMode; private CommandInterface setQueryTimeout, getQueryTimeout; private int savepointId; private String catalog; private Statement executingStatement; - private final CompareMode compareMode = CompareMode.getInstance(null, 0); private final CloseWatcher watcher; private int queryTimeoutCache = -1; - /** - * INTERNAL - */ - public JdbcConnection(String url, Properties info) throws SQLException { - this(new ConnectionInfo(url, info), true); - } + private Map clientInfo; /** * INTERNAL - */ - public JdbcConnection(ConnectionInfo ci, boolean useBaseDir) + * the session closable object does not leak as Eclipse warns - due to the + * CloseWatcher. + * @param url of this connection + * @param info of this connection + * @param user of this connection + * @param password for the user + * @param forbidCreation whether database creation is forbidden + * @throws SQLException on failure + */ + @SuppressWarnings("resource") + public JdbcConnection(String url, Properties info, String user, Object password, boolean forbidCreation) throws SQLException { try { - if (useBaseDir) { - String baseDir = SysProperties.getBaseDir(); - if (baseDir != null) { - ci.setBaseDir(baseDir); - } + ConnectionInfo ci = new ConnectionInfo(url, info, user, password); + if (forbidCreation) { + ci.setProperty("FORBID_CREATION", "TRUE"); + } + String baseDir = SysProperties.getBaseDir(); + if (baseDir != null) { + ci.setBaseDir(baseDir); } // this will return an embedded or server connection session = new SessionRemote(ci).connectEmbeddedOrServer(false); - trace = session.getTrace(); - int id = getNextId(TraceObject.CONNECTION); - setTrace(trace, TraceObject.CONNECTION, id); + setTrace(session.getTrace(), TraceObject.CONNECTION, getNextId(TraceObject.CONNECTION)); this.user = ci.getUserName(); if (isInfoEnabled()) { trace.infoCode("Connection " + getTraceObjectName() - + " = DriverManager.getConnection(" + quote(ci.getOriginalURL()) - + ", " + quote(user) + ", \"\");"); + + " = DriverManager.getConnection(" + + quote(ci.getOriginalURL()) + ", " + quote(this.user) + + ", \"\");"); } this.url = ci.getURL(); closeOld(); @@ -125,32 +138,34 @@ public JdbcConnection(ConnectionInfo ci, boolean useBaseDir) /** * INTERNAL + * @param clone connection to clone */ public JdbcConnection(JdbcConnection clone) { this.session = clone.session; - trace = session.getTrace(); - int id = getNextId(TraceObject.CONNECTION); - setTrace(trace, TraceObject.CONNECTION, id); + setTrace(session.getTrace(), TraceObject.CONNECTION, getNextId(TraceObject.CONNECTION)); this.user = clone.user; this.url = clone.url; this.catalog = clone.catalog; this.commit = clone.commit; this.getGeneratedKeys = clone.getGeneratedKeys; - this.getLockMode = clone.getLockMode; this.getQueryTimeout = clone.getQueryTimeout; this.getReadOnly = clone.getReadOnly; this.rollback = clone.rollback; this.watcher = null; + if (clone.clientInfo != null) { + this.clientInfo = new HashMap<>(clone.clientInfo); + } } /** * INTERNAL + * @param session of this connection + * @param user of this connection + * @param url of this connection */ - public JdbcConnection(SessionInterface session, String user, String url) { + public JdbcConnection(Session session, String user, String url) { this.session = session; - trace = session.getTrace(); - int id = getNextId(TraceObject.CONNECTION); - setTrace(trace, TraceObject.CONNECTION, id); + setTrace(session.getTrace(), TraceObject.CONNECTION, getNextId(TraceObject.CONNECTION)); this.user = user; this.url = url; this.watcher = null; @@ -171,7 +186,8 @@ private void closeOld() { // keep the stack trace from now on keepOpenStackTrace = true; String s = w.getOpenStackTrace(); - Exception ex = DbException.get(ErrorCode.TRACE_CONNECTION_NOT_CLOSED); + Exception ex = DbException + .get(ErrorCode.TRACE_CONNECTION_NOT_CLOSED); trace.error(ex, s); } } @@ -186,13 +202,9 @@ private void closeOld() { public Statement createStatement() throws SQLException { try { int id = getNextId(TraceObject.STATEMENT); - if (isDebugEnabled()) { - debugCodeAssign("Statement", TraceObject.STATEMENT, id, "createStatement()"); - } + debugCodeAssign("Statement", TraceObject.STATEMENT, id, "createStatement()"); checkClosed(); - return new JdbcStatement(this, id, - ResultSet.TYPE_FORWARD_ONLY, - Constants.DEFAULT_RESULT_SET_CONCURRENCY, false); + return new JdbcStatement(this, id, ResultSet.TYPE_FORWARD_ONLY, Constants.DEFAULT_RESULT_SET_CONCURRENCY); } catch (Exception e) { throw logAndConvert(e); } @@ -204,22 +216,21 @@ public Statement createStatement() throws SQLException { * @param resultSetType the result set type (ResultSet.TYPE_*) * @param resultSetConcurrency the concurrency (ResultSet.CONCUR_*) * @return the statement - * @throws SQLException - * if the connection is closed or the result set type or - * concurrency are not supported + * @throws SQLException if the connection is closed or the result set type + * or concurrency are not supported */ @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) - throws SQLException { + public Statement createStatement(int resultSetType, + int resultSetConcurrency) throws SQLException { try { int id = getNextId(TraceObject.STATEMENT); if (isDebugEnabled()) { debugCodeAssign("Statement", TraceObject.STATEMENT, id, - "createStatement(" + resultSetType + ", " + resultSetConcurrency + ")"); + "createStatement(" + resultSetType + ", " + resultSetConcurrency + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkClosed(); - return new JdbcStatement(this, id, resultSetType, resultSetConcurrency, false); + return new JdbcStatement(this, id, resultSetType, resultSetConcurrency); } catch (Exception e) { throw logAndConvert(e); } @@ -244,13 +255,14 @@ public Statement createStatement(int resultSetType, int id = getNextId(TraceObject.STATEMENT); if (isDebugEnabled()) { debugCodeAssign("Statement", TraceObject.STATEMENT, id, - "createStatement(" + resultSetType + ", " + - resultSetConcurrency + ", " + resultSetHoldability + ")"); + "createStatement(" + resultSetType + ", " + + resultSetConcurrency + ", " + + resultSetHoldability + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkHoldability(resultSetHoldability); checkClosed(); - return new JdbcStatement(this, id, resultSetType, resultSetConcurrency, false); + return new JdbcStatement(this, id, resultSetType, resultSetConcurrency); } catch (Exception e) { throw logAndConvert(e); } @@ -268,40 +280,13 @@ public PreparedStatement prepareStatement(String sql) throws SQLException { try { int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("PreparedStatement", - TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ")"); - } - checkClosed(); - sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, - ResultSet.TYPE_FORWARD_ONLY, - Constants.DEFAULT_RESULT_SET_CONCURRENCY, false); - } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Prepare a statement that will automatically close when the result set is - * closed. This method is used to retrieve database meta data. - * - * @param sql the SQL statement - * @return the prepared statement - */ - PreparedStatement prepareAutoCloseStatement(String sql) throws SQLException { - try { - int id = getNextId(TraceObject.PREPARED_STATEMENT); - if (isDebugEnabled()) { - debugCodeAssign("PreparedStatement", - TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ")"); + debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, + "prepareStatement(" + quote(sql) + ')'); } checkClosed(); sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, - ResultSet.TYPE_FORWARD_ONLY, - Constants.DEFAULT_RESULT_SET_CONCURRENCY, true); + return new JdbcPreparedStatement(this, sql, id, ResultSet.TYPE_FORWARD_ONLY, + Constants.DEFAULT_RESULT_SET_CONCURRENCY, null); } catch (Exception e) { throw logAndConvert(e); } @@ -317,10 +302,7 @@ PreparedStatement prepareAutoCloseStatement(String sql) throws SQLException { public DatabaseMetaData getMetaData() throws SQLException { try { int id = getNextId(TraceObject.DATABASE_META_DATA); - if (isDebugEnabled()) { - debugCodeAssign("DatabaseMetaData", - TraceObject.DATABASE_META_DATA, id, "getMetaData()"); - } + debugCodeAssign("DatabaseMetaData", TraceObject.DATABASE_META_DATA, id, "getMetaData()"); checkClosed(); return new JdbcDatabaseMetaData(this, trace, id); } catch (Exception e) { @@ -330,8 +312,9 @@ public DatabaseMetaData getMetaData() throws SQLException { /** * INTERNAL + * @return session */ - public SessionInterface getSession() { + public Session getSession() { return session; } @@ -350,34 +333,27 @@ public synchronized void close() throws SQLException { } CloseWatcher.unregister(watcher); session.cancel(); - if (executingStatement != null) { - try { - executingStatement.cancel(); - } catch (NullPointerException e) { - // ignore - } - } synchronized (session) { + if (executingStatement != null) { + try { + executingStatement.cancel(); + } catch (NullPointerException | SQLException e) { + // ignore + } + } try { if (!session.isClosed()) { try { if (session.hasPendingTransaction()) { - // roll back unless that would require to - // re-connect (the transaction can't be rolled - // back after re-connecting) - if (!session.isReconnectNeeded(true)) { - try { - rollbackInternal(); - } catch (DbException e) { - // ignore if the connection is broken - // right now - if (e.getErrorCode() != - ErrorCode.CONNECTION_BROKEN_1) { - throw e; - } + try { + rollbackInternal(); + } catch (DbException e) { + // ignore if the connection is broken or database shut down + if (e.getErrorCode() != ErrorCode.CONNECTION_BROKEN_1 && + e.getErrorCode() != ErrorCode.DATABASE_IS_CLOSED) { + throw e; } } - session.afterWriting(); } closePreparedCommands(); } finally { @@ -388,7 +364,7 @@ public synchronized void close() throws SQLException { session = null; } } - } catch (Exception e) { + } catch (Throwable e) { throw logAndConvert(e); } } @@ -398,8 +374,6 @@ private void closePreparedCommands() { rollback = closeAndSetNull(rollback); getReadOnly = closeAndSetNull(getReadOnly); getGeneratedKeys = closeAndSetNull(getGeneratedKeys); - getLockMode = closeAndSetNull(getLockMode); - setLockMode = closeAndSetNull(setLockMode); getQueryTimeout = closeAndSetNull(getQueryTimeout); setQueryTimeout = closeAndSetNull(setQueryTimeout); } @@ -423,13 +397,15 @@ public synchronized void setAutoCommit(boolean autoCommit) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setAutoCommit(" + autoCommit + ");"); + debugCode("setAutoCommit(" + autoCommit + ')'); } checkClosed(); - if (autoCommit && !session.getAutoCommit()) { - commit(); + synchronized (session) { + if (autoCommit && !session.getAutoCommit()) { + commit(); + } + session.setAutoCommit(autoCommit); } - session.setAutoCommit(autoCommit); } catch (Exception e) { throw logAndConvert(e); } @@ -462,13 +438,13 @@ public synchronized boolean getAutoCommit() throws SQLException { public synchronized void commit() throws SQLException { try { debugCodeCall("commit"); - checkClosedForWrite(); - try { - commit = prepareCommand("COMMIT", commit); - commit.executeUpdate(); - } finally { - afterWriting(); + checkClosed(); + if (SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT + && getAutoCommit()) { + throw DbException.get(ErrorCode.METHOD_DISABLED_ON_AUTOCOMMIT_TRUE, "commit()"); } + commit = prepareCommand("COMMIT", commit); + commit.executeUpdate(null); } catch (Exception e) { throw logAndConvert(e); } @@ -484,12 +460,12 @@ public synchronized void commit() throws SQLException { public synchronized void rollback() throws SQLException { try { debugCodeCall("rollback"); - checkClosedForWrite(); - try { - rollbackInternal(); - } finally { - afterWriting(); + checkClosed(); + if (SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT + && getAutoCommit()) { + throw DbException.get(ErrorCode.METHOD_DISABLED_ON_AUTOCOMMIT_TRUE, "rollback()"); } + rollbackInternal(); } catch (Exception e) { throw logAndConvert(e); } @@ -539,7 +515,7 @@ public String nativeSQL(String sql) throws SQLException { public void setReadOnly(boolean readOnly) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setReadOnly(" + readOnly + ");"); + debugCode("setReadOnly(" + readOnly + ')'); } checkClosed(); } catch (Exception e) { @@ -561,8 +537,7 @@ public boolean isReadOnly() throws SQLException { getReadOnly = prepareCommand("CALL READONLY()", getReadOnly); ResultInterface result = getReadOnly.executeQuery(0, false); result.next(); - boolean readOnly = result.currentRow()[0].getBoolean().booleanValue(); - return readOnly; + return result.currentRow()[0].getBoolean(); } catch (Exception e) { throw logAndConvert(e); } @@ -596,7 +571,8 @@ public String getCatalog() throws SQLException { debugCodeCall("getCatalog"); checkClosed(); if (catalog == null) { - CommandInterface cat = prepareCommand("CALL DATABASE()", Integer.MAX_VALUE); + CommandInterface cat = prepareCommand("CALL DATABASE()", + Integer.MAX_VALUE); ResultInterface result = cat.executeQuery(0, false); result.next(); catalog = result.currentRow()[0].getString(); @@ -645,9 +621,8 @@ public void clearWarnings() throws SQLException { * @param resultSetType the result set type (ResultSet.TYPE_*) * @param resultSetConcurrency the concurrency (ResultSet.CONCUR_*) * @return the prepared statement - * @throws SQLException - * if the connection is closed or the result set type or - * concurrency are not supported + * @throws SQLException if the connection is closed or the result set type + * or concurrency are not supported */ @Override public PreparedStatement prepareStatement(String sql, int resultSetType, @@ -656,15 +631,12 @@ public PreparedStatement prepareStatement(String sql, int resultSetType, int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ", " + - resultSetType + ", " + resultSetConcurrency + - ")"); + "prepareStatement(" + quote(sql) + ", " + resultSetType + ", " + resultSetConcurrency + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkClosed(); sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, resultSetType, - resultSetConcurrency, false); + return new JdbcPreparedStatement(this, sql, id, resultSetType, resultSetConcurrency, null); } catch (Exception e) { throw logAndConvert(e); } @@ -673,52 +645,26 @@ public PreparedStatement prepareStatement(String sql, int resultSetType, /** * Changes the current transaction isolation level. Calling this method will * commit an open transaction, even if the new level is the same as the old - * one, except if the level is not supported. Internally, this method calls - * SET LOCK_MODE, which affects all connections. - * The following isolation levels are supported: - *
      - *
    • Connection.TRANSACTION_READ_UNCOMMITTED = SET LOCK_MODE 0: no - * locking (should only be used for testing).
    • - *
    • Connection.TRANSACTION_SERIALIZABLE = SET LOCK_MODE 1: table level - * locking.
    • - *
    • Connection.TRANSACTION_READ_COMMITTED = SET LOCK_MODE 3: table - * level locking, but read locks are released immediately (default).
    • - *
    - * This setting is not persistent. Please note that using - * TRANSACTION_READ_UNCOMMITTED while at the same time using multiple - * connections may result in inconsistent transactions. + * one. * * @param level the new transaction isolation level: * Connection.TRANSACTION_READ_UNCOMMITTED, - * Connection.TRANSACTION_READ_COMMITTED, or + * Connection.TRANSACTION_READ_COMMITTED, + * Connection.TRANSACTION_REPEATABLE_READ, + * 6 (SNAPSHOT), or * Connection.TRANSACTION_SERIALIZABLE * @throws SQLException if the connection is closed or the isolation level - * is not supported + * is not valid */ @Override public void setTransactionIsolation(int level) throws SQLException { try { debugCodeCall("setTransactionIsolation", level); checkClosed(); - int lockMode; - switch(level) { - case Connection.TRANSACTION_READ_UNCOMMITTED: - lockMode = Constants.LOCK_MODE_OFF; - break; - case Connection.TRANSACTION_READ_COMMITTED: - lockMode = Constants.LOCK_MODE_READ_COMMITTED; - break; - case Connection.TRANSACTION_REPEATABLE_READ: - case Connection.TRANSACTION_SERIALIZABLE: - lockMode = Constants.LOCK_MODE_TABLE; - break; - default: - throw DbException.getInvalidValueException("level", level); + if (!getAutoCommit()) { + commit(); } - commit(); - setLockMode = prepareCommand("SET LOCK_MODE ?", setLockMode); - setLockMode.getParameters().get(0).setValue(ValueInt.get(lockMode), false); - setLockMode.executeUpdate(); + session.setIsolationLevel(IsolationLevel.fromJdbc(level)); } catch (Exception e) { throw logAndConvert(e); } @@ -727,14 +673,15 @@ public void setTransactionIsolation(int level) throws SQLException { /** * INTERNAL */ - public void setQueryTimeout(int seconds) throws SQLException { + void setQueryTimeout(int seconds) throws SQLException { try { debugCodeCall("setQueryTimeout", seconds); checkClosed(); - setQueryTimeout = prepareCommand("SET QUERY_TIMEOUT ?", setQueryTimeout); - setQueryTimeout.getParameters().get(0). - setValue(ValueInt.get(seconds * 1000), false); - setQueryTimeout.executeUpdate(); + setQueryTimeout = prepareCommand("SET QUERY_TIMEOUT ?", + setQueryTimeout); + setQueryTimeout.getParameters().get(0) + .setValue(ValueInteger.get(seconds * 1000), false); + setQueryTimeout.executeUpdate(null); queryTimeoutCache = seconds; } catch (Exception e) { throw logAndConvert(e); @@ -748,11 +695,11 @@ int getQueryTimeout() throws SQLException { try { if (queryTimeoutCache == -1) { checkClosed(); - getQueryTimeout = prepareCommand( - "SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS " + - "WHERE NAME=?", getQueryTimeout); - getQueryTimeout.getParameters().get(0). - setValue(ValueString.get("QUERY_TIMEOUT"), false); + getQueryTimeout = prepareCommand(!session.isOldInformationSchema() + ? "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME=?" + : "SELECT `VALUE` FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME=?", getQueryTimeout); + getQueryTimeout.getParameters().get(0) + .setValue(ValueVarchar.get("QUERY_TIMEOUT"), false); ResultInterface result = getQueryTimeout.executeQuery(0, false); result.next(); int queryTimeout = result.currentRow()[0].getInt(); @@ -773,7 +720,7 @@ int getQueryTimeout() throws SQLException { /** * Returns the current transaction isolation level. * - * @return the isolation level. + * @return the isolation level * @throws SQLException if the connection is closed */ @Override @@ -781,27 +728,7 @@ public int getTransactionIsolation() throws SQLException { try { debugCodeCall("getTransactionIsolation"); checkClosed(); - getLockMode = prepareCommand("CALL LOCK_MODE()", getLockMode); - ResultInterface result = getLockMode.executeQuery(0, false); - result.next(); - int lockMode = result.currentRow()[0].getInt(); - result.close(); - int transactionIsolationLevel; - switch(lockMode) { - case Constants.LOCK_MODE_OFF: - transactionIsolationLevel = Connection.TRANSACTION_READ_UNCOMMITTED; - break; - case Constants.LOCK_MODE_READ_COMMITTED: - transactionIsolationLevel = Connection.TRANSACTION_READ_COMMITTED; - break; - case Constants.LOCK_MODE_TABLE: - case Constants.LOCK_MODE_TABLE_GC: - transactionIsolationLevel = Connection.TRANSACTION_SERIALIZABLE; - break; - default: - throw DbException.throwInternalError("lockMode:" + lockMode); - } - return transactionIsolationLevel; + return session.getIsolationLevel().getJdbc(); } catch (Exception e) { throw logAndConvert(e); } @@ -810,12 +737,10 @@ public int getTransactionIsolation() throws SQLException { /** * Changes the current result set holdability. * - * @param holdability - * ResultSet.HOLD_CURSORS_OVER_COMMIT or + * @param holdability ResultSet.HOLD_CURSORS_OVER_COMMIT or * ResultSet.CLOSE_CURSORS_AT_COMMIT; - * @throws SQLException - * if the connection is closed or the holdability is not - * supported + * @throws SQLException if the connection is closed or the holdability is + * not supported */ @Override public void setHoldability(int holdability) throws SQLException { @@ -870,7 +795,9 @@ public Map> getTypeMap() throws SQLException { @Override public void setTypeMap(Map> map) throws SQLException { try { - debugCode("setTypeMap(" + quoteMap(map) + ");"); + if (isDebugEnabled()) { + debugCode("setTypeMap(" + quoteMap(map) + ')'); + } checkMap(map); } catch (Exception e) { throw logAndConvert(e); @@ -882,17 +809,16 @@ public void setTypeMap(Map> map) throws SQLException { * * @param sql the SQL statement * @return the callable statement - * @throws SQLException - * if the connection is closed or the statement is not valid + * @throws SQLException if the connection is closed or the statement is not + * valid */ @Override public CallableStatement prepareCall(String sql) throws SQLException { try { int id = getNextId(TraceObject.CALLABLE_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("CallableStatement", - TraceObject.CALLABLE_STATEMENT, id, "prepareCall(" + - quote(sql) + ")"); + debugCodeAssign("CallableStatement", TraceObject.CALLABLE_STATEMENT, id, + "prepareCall(" + quote(sql) + ')'); } checkClosed(); sql = translateSQL(sql); @@ -912,9 +838,8 @@ public CallableStatement prepareCall(String sql) throws SQLException { * @param resultSetType the result set type (ResultSet.TYPE_*) * @param resultSetConcurrency the concurrency (ResultSet.CONCUR_*) * @return the callable statement - * @throws SQLException - * if the connection is closed or the result set type or - * concurrency are not supported + * @throws SQLException if the connection is closed or the result set type + * or concurrency are not supported */ @Override public CallableStatement prepareCall(String sql, int resultSetType, @@ -922,10 +847,8 @@ public CallableStatement prepareCall(String sql, int resultSetType, try { int id = getNextId(TraceObject.CALLABLE_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("CallableStatement", - TraceObject.CALLABLE_STATEMENT, id, "prepareCall(" + - quote(sql) + ", " + resultSetType + ", " + - resultSetConcurrency + ")"); + debugCodeAssign("CallableStatement", TraceObject.CALLABLE_STATEMENT, id, + "prepareCall(" + quote(sql) + ", " + resultSetType + ", " + resultSetConcurrency + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkClosed(); @@ -946,21 +869,19 @@ public CallableStatement prepareCall(String sql, int resultSetType, * @param resultSetConcurrency the concurrency (ResultSet.CONCUR_*) * @param resultSetHoldability the holdability (ResultSet.HOLD* / CLOSE*) * @return the callable statement - * @throws SQLException - * if the connection is closed or the result set type, + * @throws SQLException if the connection is closed or the result set type, * concurrency, or holdability are not supported */ @Override public CallableStatement prepareCall(String sql, int resultSetType, - int resultSetConcurrency, int resultSetHoldability) throws SQLException { + int resultSetConcurrency, int resultSetHoldability) + throws SQLException { try { int id = getNextId(TraceObject.CALLABLE_STATEMENT); if (isDebugEnabled()) { - debugCodeAssign("CallableStatement", - TraceObject.CALLABLE_STATEMENT, id, "prepareCall(" + - quote(sql) + ", " + resultSetType + ", " + - resultSetConcurrency + ", " + - resultSetHoldability + ")"); + debugCodeAssign("CallableStatement", TraceObject.CALLABLE_STATEMENT, id, + "prepareCall(" + quote(sql) + ", " + resultSetType + ", " + resultSetConcurrency + ", " + + resultSetHoldability + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkHoldability(resultSetHoldability); @@ -982,15 +903,14 @@ public CallableStatement prepareCall(String sql, int resultSetType, public Savepoint setSavepoint() throws SQLException { try { int id = getNextId(TraceObject.SAVEPOINT); - if (isDebugEnabled()) { - debugCodeAssign("Savepoint", TraceObject.SAVEPOINT, id, "setSavepoint()"); - } + debugCodeAssign("Savepoint", TraceObject.SAVEPOINT, id, "setSavepoint()"); checkClosed(); CommandInterface set = prepareCommand( "SAVEPOINT " + JdbcSavepoint.getName(null, savepointId), Integer.MAX_VALUE); - set.executeUpdate(); - JdbcSavepoint savepoint = new JdbcSavepoint(this, savepointId, null, trace, id); + set.executeUpdate(null); + JdbcSavepoint savepoint = new JdbcSavepoint(this, savepointId, null, + trace, id); savepointId++; return savepoint; } catch (Exception e) { @@ -1009,15 +929,14 @@ public Savepoint setSavepoint(String name) throws SQLException { try { int id = getNextId(TraceObject.SAVEPOINT); if (isDebugEnabled()) { - debugCodeAssign("Savepoint", - TraceObject.SAVEPOINT, id, "setSavepoint(" + quote(name) + ")"); + debugCodeAssign("Savepoint", TraceObject.SAVEPOINT, id, "setSavepoint(" + quote(name) + ')'); } checkClosed(); CommandInterface set = prepareCommand( - "SAVEPOINT " + JdbcSavepoint.getName(name, 0), Integer.MAX_VALUE); - set.executeUpdate(); - JdbcSavepoint savepoint = new JdbcSavepoint(this, 0, name, trace, id); - return savepoint; + "SAVEPOINT " + JdbcSavepoint.getName(name, 0), + Integer.MAX_VALUE); + set.executeUpdate(null); + return new JdbcSavepoint(this, 0, name, trace, id); } catch (Exception e) { throw logAndConvert(e); } @@ -1032,13 +951,11 @@ public Savepoint setSavepoint(String name) throws SQLException { public void rollback(Savepoint savepoint) throws SQLException { try { JdbcSavepoint sp = convertSavepoint(savepoint); - debugCode("rollback(" + sp.getTraceObjectName() + ");"); - checkClosedForWrite(); - try { - sp.rollback(); - } finally { - afterWriting(); + if (isDebugEnabled()) { + debugCode("rollback(" + sp.getTraceObjectName() + ')'); } + checkClosed(); + sp.rollback(); } catch (Exception e) { throw logAndConvert(e); } @@ -1052,7 +969,7 @@ public void rollback(Savepoint savepoint) throws SQLException { @Override public void releaseSavepoint(Savepoint savepoint) throws SQLException { try { - debugCode("releaseSavepoint(savepoint);"); + debugCode("releaseSavepoint(savepoint)"); checkClosed(); convertSavepoint(savepoint).release(); } catch (Exception e) { @@ -1062,7 +979,8 @@ public void releaseSavepoint(Savepoint savepoint) throws SQLException { private static JdbcSavepoint convertSavepoint(Savepoint savepoint) { if (!(savepoint instanceof JdbcSavepoint)) { - throw DbException.get(ErrorCode.SAVEPOINT_IS_INVALID_1, "" + savepoint); + throw DbException.get(ErrorCode.SAVEPOINT_IS_INVALID_1, + String.valueOf(savepoint)); } return (JdbcSavepoint) savepoint; } @@ -1087,16 +1005,14 @@ public PreparedStatement prepareStatement(String sql, int resultSetType, int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, - "prepareStatement(" + quote(sql) + ", " + - resultSetType + ", " + resultSetConcurrency + ", " + - resultSetHoldability + ")"); + "prepareStatement(" + quote(sql) + ", " + resultSetType + ", " + resultSetConcurrency + ", " + + resultSetHoldability + ')'); } checkTypeConcurrency(resultSetType, resultSetConcurrency); checkHoldability(resultSetHoldability); checkClosed(); sql = translateSQL(sql); - return new JdbcPreparedStatement(this, sql, id, - resultSetType, resultSetConcurrency, false); + return new JdbcPreparedStatement(this, sql, id, resultSetType, resultSetConcurrency, null); } catch (Exception e) { throw logAndConvert(e); } @@ -1104,23 +1020,28 @@ public PreparedStatement prepareStatement(String sql, int resultSetType, /** * Creates a new prepared statement. - * This method just calls prepareStatement(String sql) internally. - * The method getGeneratedKeys only supports one column. * * @param sql the SQL statement - * @param autoGeneratedKeys ignored + * @param autoGeneratedKeys + * {@link Statement#RETURN_GENERATED_KEYS} if generated keys should + * be available for retrieval, {@link Statement#NO_GENERATED_KEYS} if + * generated keys should not be available * @return the prepared statement - * @throws SQLException - * if the connection is closed + * @throws SQLException if the connection is closed */ @Override public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { try { + int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { - debugCode("prepareStatement(" + quote(sql) + ", " + autoGeneratedKeys + ");"); + debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, + "prepareStatement(" + quote(sql) + ", " + autoGeneratedKeys + ')'); } - return prepareStatement(sql); + checkClosed(); + sql = translateSQL(sql); + return new JdbcPreparedStatement(this, sql, id, ResultSet.TYPE_FORWARD_ONLY, + Constants.DEFAULT_RESULT_SET_CONCURRENCY, autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS); } catch (Exception e) { throw logAndConvert(e); } @@ -1128,24 +1049,27 @@ public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) /** * Creates a new prepared statement. - * This method just calls prepareStatement(String sql) internally. - * The method getGeneratedKeys only supports one column. * * @param sql the SQL statement - * @param columnIndexes ignored + * @param columnIndexes + * an array of column indexes indicating the columns with generated + * keys that should be returned from the inserted row * @return the prepared statement - * @throws SQLException - * if the connection is closed + * @throws SQLException if the connection is closed */ @Override public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { try { + int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { - debugCode("prepareStatement(" + quote(sql) + ", " + - quoteIntArray(columnIndexes) + ");"); + debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, + "prepareStatement(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ')'); } - return prepareStatement(sql); + checkClosed(); + sql = translateSQL(sql); + return new JdbcPreparedStatement(this, sql, id, ResultSet.TYPE_FORWARD_ONLY, + Constants.DEFAULT_RESULT_SET_CONCURRENCY, columnIndexes); } catch (Exception e) { throw logAndConvert(e); } @@ -1153,24 +1077,27 @@ public PreparedStatement prepareStatement(String sql, int[] columnIndexes) /** * Creates a new prepared statement. - * This method just calls prepareStatement(String sql) internally. - * The method getGeneratedKeys only supports one column. * * @param sql the SQL statement - * @param columnNames ignored + * @param columnNames + * an array of column names indicating the columns with generated + * keys that should be returned from the inserted row * @return the prepared statement - * @throws SQLException - * if the connection is closed + * @throws SQLException if the connection is closed */ @Override public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { try { + int id = getNextId(TraceObject.PREPARED_STATEMENT); if (isDebugEnabled()) { - debugCode("prepareStatement(" + quote(sql) + ", " + - quoteArray(columnNames) + ");"); + debugCodeAssign("PreparedStatement", TraceObject.PREPARED_STATEMENT, id, + "prepareStatement(" + quote(sql) + ", " + quoteArray(columnNames) + ')'); } - return prepareStatement(sql); + checkClosed(); + sql = translateSQL(sql); + return new JdbcPreparedStatement(this, sql, id, ResultSet.TYPE_FORWARD_ONLY, + Constants.DEFAULT_RESULT_SET_CONCURRENCY, columnNames); } catch (Exception e) { throw logAndConvert(e); } @@ -1190,15 +1117,16 @@ CommandInterface prepareCommand(String sql, int fetchSize) { } private CommandInterface prepareCommand(String sql, CommandInterface old) { - return old == null ? session.prepareCommand(sql, Integer.MAX_VALUE) : old; + return old == null ? session.prepareCommand(sql, Integer.MAX_VALUE) + : old; } private static int translateGetEnd(String sql, int i, char c) { int len = sql.length(); - switch(c) { + switch (c) { case '$': { - if (i < len - 1 && sql.charAt(i + 1) == '$' && - (i == 0 || sql.charAt(i - 1) <= ' ')) { + if (i < len - 1 && sql.charAt(i + 1) == '$' + && (i == 0 || sql.charAt(i - 1) <= ' ')) { int j = sql.indexOf("$$", i + 2); if (j < 0) { throw DbException.getSyntaxError(sql, i); @@ -1222,7 +1150,7 @@ private static int translateGetEnd(String sql, int i, char c) { return j; } case '/': { - checkRunOver(i+1, len, sql); + checkRunOver(i + 1, len, sql); if (sql.charAt(i + 1) == '*') { // block comment int j = sql.indexOf("*/", i + 2); @@ -1240,7 +1168,7 @@ private static int translateGetEnd(String sql, int i, char c) { return i; } case '-': { - checkRunOver(i+1, len, sql); + checkRunOver(i + 1, len, sql); if (sql.charAt(i + 1) == '-') { // single line comment i += 2; @@ -1251,13 +1179,13 @@ private static int translateGetEnd(String sql, int i, char c) { return i; } default: - throw DbException.throwInternalError("c=" + c); + throw DbException.getInternalError("c=" + c); } } /** - * Convert JDBC escape sequences in the SQL statement. This - * method throws an exception if the SQL statement is null. + * Convert JDBC escape sequences in the SQL statement. This method throws an + * exception if the SQL statement is null. * * @param sql the SQL statement with or without JDBC escape sequences * @return the SQL statement without JDBC escape sequences @@ -1278,12 +1206,13 @@ static String translateSQL(String sql, boolean escapeProcessing) { if (sql == null) { throw DbException.getInvalidValueException("SQL", null); } - if (!escapeProcessing) { - return sql; - } - if (sql.indexOf('{') < 0) { + if (!escapeProcessing || sql.indexOf('{') < 0) { return sql; } + return translateSQLImpl(sql); + } + + private static String translateSQLImpl(String sql) { int len = sql.length(); char[] chars = null; int level = 0; @@ -1434,56 +1363,17 @@ private static void checkHoldability(int resultSetHoldability) { } /** - * INTERNAL. - * Check if this connection is closed. - * The next operation is a read request. + * INTERNAL. Check if this connection is closed. * * @throws DbException if the connection or session is closed */ protected void checkClosed() { - checkClosed(false); - } - - /** - * Check if this connection is closed. - * The next operation may be a write request. - * - * @throws DbException if the connection or session is closed - */ - private void checkClosedForWrite() { - checkClosed(true); - } - - /** - * INTERNAL. - * Check if this connection is closed. - * - * @param write if the next operation is possibly writing - * @throws DbException if the connection or session is closed - */ - protected void checkClosed(boolean write) { if (session == null) { throw DbException.get(ErrorCode.OBJECT_CLOSED); } if (session.isClosed()) { throw DbException.get(ErrorCode.DATABASE_CALLED_AT_SHUTDOWN); } - if (session.isReconnectNeeded(write)) { - trace.debug("reconnect"); - closePreparedCommands(); - session = session.reconnect(write); - trace = session.getTrace(); - } - } - - /** - * INTERNAL. - * Called after executing a command that could have written something. - */ - protected void afterWriting() { - if (session != null) { - session.afterWriting(); - } } String getURL() { @@ -1498,45 +1388,16 @@ String getUser() { private void rollbackInternal() { rollback = prepareCommand("ROLLBACK", rollback); - rollback.executeUpdate(); - } - - /** - * INTERNAL - */ - public int getPowerOffCount() { - return (session == null || session.isClosed()) ? - 0 : session.getPowerOffCount(); + rollback.executeUpdate(null); } /** * INTERNAL */ - public void setPowerOffCount(int count) { - if (session != null) { - session.setPowerOffCount(count); - } - } - - /** - * INTERNAL - */ - public void setExecutingStatement(Statement stat) { + void setExecutingStatement(Statement stat) { executingStatement = stat; } - /** - * INTERNAL - */ - ResultSet getGeneratedKeys(JdbcStatement stat, int id) { - getGeneratedKeys = prepareCommand( - "SELECT SCOPE_IDENTITY() " + - "WHERE SCOPE_IDENTITY() IS NOT NULL", getGeneratedKeys); - ResultInterface result = getGeneratedKeys.executeQuery(0, false); - ResultSet rs = new JdbcResultSet(this, stat, result, id, false, true, false); - return rs; - } - /** * Create a new empty Clob object. * @@ -1547,16 +1408,8 @@ public Clob createClob() throws SQLException { try { int id = getNextId(TraceObject.CLOB); debugCodeAssign("Clob", TraceObject.CLOB, id, "createClob()"); - checkClosedForWrite(); - try { - Value v = session.getDataHandler().getLobStorage().createClob( - new InputStreamReader( - new ByteArrayInputStream(Utils.EMPTY_BYTES)), 0); - session.addTemporaryLob(v); - return new JdbcClob(this, v, id); - } finally { - afterWriting(); - } + checkClosed(); + return new JdbcClob(this, ValueVarchar.EMPTY, JdbcLob.State.NEW, id); } catch (Exception e) { throw logAndConvert(e); } @@ -1572,15 +1425,8 @@ public Blob createBlob() throws SQLException { try { int id = getNextId(TraceObject.BLOB); debugCodeAssign("Blob", TraceObject.BLOB, id, "createClob()"); - checkClosedForWrite(); - try { - Value v = session.getDataHandler().getLobStorage().createBlob( - new ByteArrayInputStream(Utils.EMPTY_BYTES), 0); - session.addTemporaryLob(v); - return new JdbcBlob(this, v, id); - } finally { - afterWriting(); - } + checkClosed(); + return new JdbcBlob(this, ValueVarbinary.EMPTY, JdbcLob.State.NEW, id); } catch (Exception e) { throw logAndConvert(e); } @@ -1596,36 +1442,49 @@ public NClob createNClob() throws SQLException { try { int id = getNextId(TraceObject.CLOB); debugCodeAssign("NClob", TraceObject.CLOB, id, "createNClob()"); - checkClosedForWrite(); - try { - Value v = session.getDataHandler().getLobStorage().createClob( - new InputStreamReader( - new ByteArrayInputStream(Utils.EMPTY_BYTES)), 0); - session.addTemporaryLob(v); - return new JdbcClob(this, v, id); - } finally { - afterWriting(); - } + checkClosed(); + return new JdbcClob(this, ValueVarchar.EMPTY, JdbcLob.State.NEW, id); } catch (Exception e) { throw logAndConvert(e); } } /** - * [Not supported] Create a new empty SQLXML object. + * Create a new SQLXML object with no data. + * + * @return the object */ @Override public SQLXML createSQLXML() throws SQLException { - throw unsupported("SQLXML"); + try { + int id = getNextId(TraceObject.SQLXML); + debugCodeAssign("SQLXML", TraceObject.SQLXML, id, "createSQLXML()"); + checkClosed(); + return new JdbcSQLXML(this, ValueVarchar.EMPTY, JdbcLob.State.NEW, id); + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * [Not supported] Create a new empty Array object. + * Create a new Array object. + * + * @param typeName the type name + * @param elements the values + * @return the array */ @Override public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - throw unsupported("createArray"); + try { + int id = getNextId(TraceObject.ARRAY); + debugCodeAssign("Array", TraceObject.ARRAY, id, "createArrayOf()"); + checkClosed(); + Value value = ValueToObjectConverter.objectToValue(session, elements, Value.ARRAY); + return new JdbcArray(this, value, id); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1662,53 +1521,100 @@ public synchronized boolean isValid(int timeout) { } /** - * Set a client property. - * This method always throws a SQLClientInfoException. + * Set a client property. This method always throws a SQLClientInfoException + * in standard mode. In compatibility mode the following properties are + * supported: + *
      + *
    • DB2: The properties: ApplicationName, ClientAccountingInformation, + * ClientUser and ClientCorrelationToken are supported.
    • + *
    • MySQL: All property names are supported.
    • + *
    • Oracle: All properties in the form <namespace>.<key name> + * are supported.
    • + *
    • PostgreSQL: The ApplicationName property is supported.
    • + *
    + * For unsupported properties a SQLClientInfoException is thrown. * - * @param name the name of the property (ignored) - * @param value the value (ignored) + * @param name the name of the property + * @param value the value */ @Override public void setClientInfo(String name, String value) throws SQLClientInfoException { try { if (isDebugEnabled()) { - debugCode("setClientInfo(" - +quote(name)+", " - +quote(value)+");"); + debugCode("setClientInfo(" + quote(name) + ", " + quote(value) + ')'); } checkClosed(); - // we don't have any client properties, so just throw - throw new SQLClientInfoException(); + + // no change to property: Ignore call. This early exit fixes a + // problem with websphere liberty resetting the client info of a + // pooled connection to its initial values. + if (Objects.equals(value, getClientInfo(name))) { + return; + } + + if (isInternalProperty(name)) { + throw new SQLClientInfoException( + "Property name '" + name + " is used internally by H2.", + Collections.emptyMap()); + } + + Pattern clientInfoNameRegEx = getMode().supportedClientInfoPropertiesRegEx; + + if (clientInfoNameRegEx != null + && clientInfoNameRegEx.matcher(name).matches()) { + if (clientInfo == null) { + clientInfo = new HashMap<>(); + } + clientInfo.put(name, value); + } else { + throw new SQLClientInfoException( + "Client info name '" + name + "' not supported.", + Collections.emptyMap()); + } } catch (Exception e) { throw convertToClientInfoException(logAndConvert(e)); } } + private static boolean isInternalProperty(String name) { + return NUM_SERVERS.equals(name) || name.startsWith(PREFIX_SERVER); + } + private static SQLClientInfoException convertToClientInfoException( SQLException x) { if (x instanceof SQLClientInfoException) { return (SQLClientInfoException) x; } - return new SQLClientInfoException( - x.getMessage(), x.getSQLState(), x.getErrorCode(), null, null); + return new SQLClientInfoException(x.getMessage(), x.getSQLState(), + x.getErrorCode(), null, null); } /** - * Set the client properties. - * This method always throws a SQLClientInfoException. + * Set the client properties. This replaces all existing properties. This + * method always throws a SQLClientInfoException in standard mode. In + * compatibility mode some properties may be supported (see + * setProperty(String, String) for details). * * @param properties the properties (ignored) */ @Override - public void setClientInfo(Properties properties) throws SQLClientInfoException { + public void setClientInfo(Properties properties) + throws SQLClientInfoException { try { if (isDebugEnabled()) { - debugCode("setClientInfo(properties);"); + debugCode("setClientInfo(properties)"); } checkClosed(); - // we don't have any client properties, so just throw - throw new SQLClientInfoException(); + if (clientInfo == null) { + clientInfo = new HashMap<>(); + } else { + clientInfo.clear(); + } + for (Map.Entry entry : properties.entrySet()) { + setClientInfo((String) entry.getKey(), + (String) entry.getValue()); + } } catch (Exception e) { throw convertToClientInfoException(logAndConvert(e)); } @@ -1722,17 +1628,22 @@ public void setClientInfo(Properties properties) throws SQLClientInfoException { @Override public Properties getClientInfo() throws SQLException { try { - if (isDebugEnabled()) { - debugCode("getClientInfo();"); - } + debugCodeCall("getClientInfo"); checkClosed(); ArrayList serverList = session.getClusterServers(); Properties p = new Properties(); - p.setProperty("numServers", String.valueOf(serverList.size())); + if (clientInfo != null) { + for (Map.Entry entry : clientInfo.entrySet()) { + p.setProperty(entry.getKey(), entry.getValue()); + } + } + + p.setProperty(NUM_SERVERS, Integer.toString(serverList.size())); for (int i = 0; i < serverList.size(); i++) { - p.setProperty("server" + String.valueOf(i), serverList.get(i)); + p.setProperty(PREFIX_SERVER + i, serverList.get(i)); } + return p; } catch (Exception e) { throw logAndConvert(e); @@ -1742,8 +1653,9 @@ public Properties getClientInfo() throws SQLException { /** * Get a client property. * - * @param name the client info name (ignored) - * @return the property value + * @param name the client info name + * @return the property value or null if the property is not found or not + * supported. */ @Override public String getClientInfo(String name) throws SQLException { @@ -1752,12 +1664,10 @@ public String getClientInfo(String name) throws SQLException { debugCodeCall("getClientInfo", name); } checkClosed(); - Properties p = getClientInfo(); - String s = p.getProperty(name); - if (s == null) { - throw new SQLClientInfoException(); + if (name == null) { + throw DbException.getInvalidValueException("name", null); } - return s; + return getClientInfo().getProperty(name); } catch (Exception e) { throw logAndConvert(e); } @@ -1772,10 +1682,14 @@ public String getClientInfo(String name) throws SQLException { @Override @SuppressWarnings("unchecked") public T unwrap(Class iface) throws SQLException { - if (isWrapperFor(iface)) { - return (T) this; + try { + if (isWrapperFor(iface)) { + return (T) this; + } + throw DbException.getInvalidValueException("iface", iface); + } catch (Exception e) { + throw logAndConvert(e); } - throw DbException.getInvalidValueException("iface", iface); } /** @@ -1797,16 +1711,14 @@ public boolean isWrapperFor(Class iface) throws SQLException { * end of file is read) * @return the value */ - public Value createClob(Reader x, long length) { + Value createClob(Reader x, long length) { if (x == null) { return ValueNull.INSTANCE; } if (length <= 0) { length = -1; } - Value v = session.getDataHandler().getLobStorage().createClob(x, length); - session.addTemporaryLob(v); - return v; + return session.addTemporaryLob(session.getDataHandler().getLobStorage().createClob(x, length)); } /** @@ -1817,51 +1729,60 @@ public Value createClob(Reader x, long length) { * end of file is read) * @return the value */ - public Value createBlob(InputStream x, long length) { + Value createBlob(InputStream x, long length) { if (x == null) { return ValueNull.INSTANCE; } if (length <= 0) { length = -1; } - Value v = session.getDataHandler().getLobStorage().createBlob(x, length); - session.addTemporaryLob(v); - return v; + return session.addTemporaryLob(session.getDataHandler().getLobStorage().createBlob(x, length)); } /** - * [Not supported] + * Sets the given schema name to access. Current implementation is case + * sensitive, i.e. requires schema name to be passed in correct case. * - * @param schema the schema + * @param schema the schema name */ -/*## Java 1.7 ## @Override - public void setSchema(String schema) { - // not supported + public void setSchema(String schema) throws SQLException { + try { + if (isDebugEnabled()) { + debugCodeCall("setSchema", schema); + } + checkClosed(); + session.setCurrentSchemaName(schema); + } catch (Exception e) { + throw logAndConvert(e); + } } -//*/ /** - * [Not supported] + * Retrieves this current schema name for this connection. + * + * @return current schema name */ -/*## Java 1.7 ## @Override - public String getSchema() { - return null; + public String getSchema() throws SQLException { + try { + debugCodeCall("getSchema"); + checkClosed(); + return session.getCurrentSchemaName(); + } catch (Exception e) { + throw logAndConvert(e); + } } -//*/ /** * [Not supported] * * @param executor the executor used by this method */ -/*## Java 1.7 ## @Override public void abort(Executor executor) { // not supported } -//*/ /** * [Not supported] @@ -1869,22 +1790,18 @@ public void abort(Executor executor) { * @param executor the executor used by this method * @param milliseconds the TCP connection timeout */ -/*## Java 1.7 ## @Override public void setNetworkTimeout(Executor executor, int milliseconds) { // not supported } -//*/ /** * [Not supported] */ -/*## Java 1.7 ## @Override public int getNetworkTimeout() { return 0; } -//*/ /** * Check that the given type map is either null or empty. @@ -1906,46 +1823,58 @@ public String toString() { return getTraceObjectName() + ": url=" + url + " user=" + user; } + CompareMode getCompareMode() { + return session.getDataHandler().getCompareMode(); + } + + @Override + public Mode getMode() { + return session.getMode(); + } + /** - * Convert an object to the default Java object for the given SQL type. For - * example, LOB objects are converted to java.sql.Clob / java.sql.Blob. - * - * @param v the value - * @return the object + * INTERNAL + * @return StaticSettings */ - Object convertToDefaultObject(Value v) { - Object o; - switch (v.getType()) { - case Value.CLOB: { - int id = getNextId(TraceObject.CLOB); - o = new JdbcClob(this, v, id); - break; - } - case Value.BLOB: { - int id = getNextId(TraceObject.BLOB); - o = new JdbcBlob(this, v, id); - break; + public StaticSettings getStaticSettings() { + checkClosed(); + return session.getStaticSettings(); + } + + @Override + public ValueTimestampTimeZone currentTimestamp() { + Session session = this.session; + if (session == null) { + throw DbException.get(ErrorCode.OBJECT_CLOSED); } - case Value.JAVA_OBJECT: - if (SysProperties.serializeJavaObject) { - o = JdbcUtils.deserialize(v.getBytesNoCopy(), session.getDataHandler()); - break; - } - default: - o = v.getObject(); + return session.currentTimestamp(); + } + + @Override + public TimeZoneProvider currentTimeZone() { + Session session = this.session; + if (session == null) { + throw DbException.get(ErrorCode.OBJECT_CLOSED); } - return o; + return session.currentTimeZone(); } - CompareMode getCompareMode() { - return compareMode; + @Override + public JavaObjectSerializer getJavaObjectSerializer() { + Session session = this.session; + if (session == null) { + throw DbException.get(ErrorCode.OBJECT_CLOSED); + } + return session.getJavaObjectSerializer(); } - /** - * INTERNAL - */ - public void setTraceLevel(int level) { - trace.setLevel(level); + @Override + public boolean zeroBasedEnums() { + Session session = this.session; + if (session == null) { + throw DbException.get(ErrorCode.OBJECT_CLOSED); + } + return session.zeroBasedEnums(); } } diff --git a/h2/src/main/org/h2/jdbc/JdbcConnectionBackwardsCompat.java b/h2/src/main/org/h2/jdbc/JdbcConnectionBackwardsCompat.java new file mode 100644 index 0000000000..ba85d7d0f6 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcConnectionBackwardsCompat.java @@ -0,0 +1,16 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +/** + * Allows us to compile on older platforms, while still implementing the methods + * from the newer JDBC API. + */ +public interface JdbcConnectionBackwardsCompat { + + // compatibility interface + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaData.java b/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaData.java index 86d459edae..842f3aeff1 100644 --- a/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaData.java +++ b/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaData.java @@ -1,37 +1,48 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; import java.sql.Connection; import java.sql.DatabaseMetaData; -import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.RowIdLifetime; import java.sql.SQLException; +import java.util.Map.Entry; +import java.util.Properties; + import org.h2.engine.Constants; -import org.h2.engine.SysProperties; +import org.h2.engine.Session; +import org.h2.jdbc.meta.DatabaseMeta; +import org.h2.jdbc.meta.DatabaseMetaLegacy; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.message.TraceObject; -import org.h2.tools.SimpleResultSet; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.ResultInterface; +import org.h2.result.SimpleResult; +import org.h2.value.TypeInfo; +import org.h2.value.ValueInteger; +import org.h2.value.ValueVarchar; /** * Represents the meta data for a database. */ -public class JdbcDatabaseMetaData extends TraceObject implements - DatabaseMetaData { +public final class JdbcDatabaseMetaData extends TraceObject + implements DatabaseMetaData, JdbcDatabaseMetaDataBackwardsCompat { private final JdbcConnection conn; - private String mode; + + private final DatabaseMeta meta; JdbcDatabaseMetaData(JdbcConnection conn, Trace trace, int id) { setTrace(trace, TraceObject.DATABASE_META_DATA, id); this.conn = conn; + Session session = conn.getSession(); + meta = session.isOldInformationSchema() ? new DatabaseMetaLegacy(session) + : conn.getSession().getDatabaseMeta(); } /** @@ -65,7 +76,7 @@ public int getDriverMinorVersion() { public String getDatabaseProductName() { debugCodeCall("getDatabaseProductName"); // This value must stay like that, see - // http://opensource.atlassian.com/projects/hibernate/browse/HHH-2682 + // https://hibernate.atlassian.net/browse/HHH-2682 return "H2"; } @@ -75,9 +86,13 @@ public String getDatabaseProductName() { * @return the product version */ @Override - public String getDatabaseProductVersion() { - debugCodeCall("getDatabaseProductVersion"); - return Constants.getFullVersion(); + public String getDatabaseProductVersion() throws SQLException { + try { + debugCodeCall("getDatabaseProductVersion"); + return meta.getDatabaseProductVersion(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -100,29 +115,28 @@ public String getDriverName() { @Override public String getDriverVersion() { debugCodeCall("getDriverVersion"); - return Constants.getFullVersion(); + return Constants.FULL_VERSION; } /** * Gets the list of tables in the database. The result set is sorted by * TABLE_TYPE, TABLE_SCHEM, and TABLE_NAME. * - *
      - *
    • 1 TABLE_CAT (String) table catalog
    • - *
    • 2 TABLE_SCHEM (String) table schema
    • - *
    • 3 TABLE_NAME (String) table name
    • - *
    • 4 TABLE_TYPE (String) table type
    • - *
    • 5 REMARKS (String) comment
    • - *
    • 6 TYPE_CAT (String) always null
    • - *
    • 7 TYPE_SCHEM (String) always null
    • - *
    • 8 TYPE_NAME (String) always null
    • - *
    • 9 SELF_REFERENCING_COL_NAME (String) always null
    • - *
    • 10 REF_GENERATION (String) always null
    • - *
    • 11 SQL (String) the create table statement or NULL for systems tables - *
    • - *
    + *
      + *
    1. TABLE_CAT (String) table catalog
    2. + *
    3. TABLE_SCHEM (String) table schema
    4. + *
    5. TABLE_NAME (String) table name
    6. + *
    7. TABLE_TYPE (String) table type
    8. + *
    9. REMARKS (String) comment
    10. + *
    11. TYPE_CAT (String) always null
    12. + *
    13. TYPE_SCHEM (String) always null
    14. + *
    15. TYPE_NAME (String) always null
    16. + *
    17. SELF_REFERENCING_COL_NAME (String) always null
    18. + *
    19. REF_GENERATION (String) always null
    20. + *
    21. SQL (String) the create table statement or NULL for systems tables.
    22. + *
    * - * @param catalogPattern null (to get all objects) or the catalog name + * @param catalog null (to get all objects) or the catalog name * @param schemaPattern null (to get all objects) or a schema name * (uppercase for unquoted names) * @param tableNamePattern null (to get all objects) or a table name @@ -132,54 +146,14 @@ public String getDriverVersion() { * @throws SQLException if the connection is closed */ @Override - public ResultSet getTables(String catalogPattern, String schemaPattern, - String tableNamePattern, String[] types) throws SQLException { + public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) + throws SQLException { try { if (isDebugEnabled()) { - debugCode("getTables(" + quote(catalogPattern) + ", " + - quote(schemaPattern) + ", " + quote(tableNamePattern) + - ", " + quoteArray(types) + ");"); - } - checkClosed(); - String tableType; - if (types != null && types.length > 0) { - StatementBuilder buff = new StatementBuilder("TABLE_TYPE IN("); - for (int i = 0; i < types.length; i++) { - buff.appendExceptFirst(", "); - buff.append('?'); - } - tableType = buff.append(')').toString(); - } else { - tableType = "TRUE"; - } - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "TABLE_TYPE, " - + "REMARKS, " - + "TYPE_NAME TYPE_CAT, " - + "TYPE_NAME TYPE_SCHEM, " - + "TYPE_NAME, " - + "TYPE_NAME SELF_REFERENCING_COL_NAME, " - + "TYPE_NAME REF_GENERATION, " - + "SQL " - + "FROM INFORMATION_SCHEMA.TABLES " - + "WHERE TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND TABLE_NAME LIKE ? ESCAPE ? " - + "AND (" + tableType + ") " - + "ORDER BY TABLE_TYPE, TABLE_SCHEMA, TABLE_NAME"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, getPattern(tableNamePattern)); - prep.setString(6, "\\"); - for (int i = 0; types != null && i < types.length; i++) { - prep.setString(7 + i, types[i]); + debugCode("getTables(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + quote(tableNamePattern) + + ", " + quoteArray(types) + ')'); } - return prep.executeQuery(); + return getResultSet(meta.getTables(catalog, schemaPattern, tableNamePattern, types)); } catch (Exception e) { throw logAndConvert(e); } @@ -189,36 +163,35 @@ public ResultSet getTables(String catalogPattern, String schemaPattern, * Gets the list of columns. The result set is sorted by TABLE_SCHEM, * TABLE_NAME, and ORDINAL_POSITION. * - *
      - *
    • 1 TABLE_CAT (String) table catalog
    • - *
    • 2 TABLE_SCHEM (String) table schema
    • - *
    • 3 TABLE_NAME (String) table name
    • - *
    • 4 COLUMN_NAME (String) column name
    • - *
    • 5 DATA_TYPE (short) data type (see java.sql.Types)
    • - *
    • 6 TYPE_NAME (String) data type name ("INTEGER", "VARCHAR",...)
    • - *
    • 7 COLUMN_SIZE (int) precision + *
        + *
      1. TABLE_CAT (String) table catalog
      2. + *
      3. TABLE_SCHEM (String) table schema
      4. + *
      5. TABLE_NAME (String) table name
      6. + *
      7. COLUMN_NAME (String) column name
      8. + *
      9. DATA_TYPE (int) data type (see java.sql.Types)
      10. + *
      11. TYPE_NAME (String) data type name ("INTEGER", "VARCHAR",...)
      12. + *
      13. COLUMN_SIZE (int) precision * (values larger than 2 GB are returned as 2 GB)
      14. - *
      15. 8 BUFFER_LENGTH (int) unused
      16. - *
      17. 9 DECIMAL_DIGITS (int) scale (0 for INTEGER and VARCHAR)
      18. - *
      19. 10 NUM_PREC_RADIX (int) radix (always 10)
      20. - *
      21. 11 NULLABLE (int) columnNoNulls or columnNullable
      22. - *
      23. 12 REMARKS (String) comment (always empty)
      24. - *
      25. 13 COLUMN_DEF (String) default value
      26. - *
      27. 14 SQL_DATA_TYPE (int) unused
      28. - *
      29. 15 SQL_DATETIME_SUB (int) unused
      30. - *
      31. 16 CHAR_OCTET_LENGTH (int) unused
      32. - *
      33. 17 ORDINAL_POSITION (int) the column index (1,2,...)
      34. - *
      35. 18 IS_NULLABLE (String) "NO" or "YES"
      36. - *
      37. 19 SCOPE_CATALOG (String) always null
      38. - *
      39. 20 SCOPE_SCHEMA (String) always null
      40. - *
      41. 21 SCOPE_TABLE (String) always null
      42. - *
      43. 22 SOURCE_DATA_TYPE (short) null
      44. - *
      45. 23 IS_AUTOINCREMENT (String) "NO" or "YES"
      46. - *
      47. 24 SCOPE_CATLOG (String) always null (the typo is on purpose, - * for compatibility with the JDBC specification prior to 4.1)
      48. - *
    + *
  • BUFFER_LENGTH (int) unused
  • + *
  • DECIMAL_DIGITS (int) scale (0 for INTEGER and VARCHAR)
  • + *
  • NUM_PREC_RADIX (int) radix
  • + *
  • NULLABLE (int) columnNoNulls or columnNullable
  • + *
  • REMARKS (String) comment
  • + *
  • COLUMN_DEF (String) default value
  • + *
  • SQL_DATA_TYPE (int) unused
  • + *
  • SQL_DATETIME_SUB (int) unused
  • + *
  • CHAR_OCTET_LENGTH (int) unused
  • + *
  • ORDINAL_POSITION (int) the column index (1,2,...)
  • + *
  • IS_NULLABLE (String) "NO" or "YES"
  • + *
  • SCOPE_CATALOG (String) always null
  • + *
  • SCOPE_SCHEMA (String) always null
  • + *
  • SCOPE_TABLE (String) always null
  • + *
  • SOURCE_DATA_TYPE (short) null
  • + *
  • IS_AUTOINCREMENT (String) "NO" or "YES"
  • + *
  • IS_GENERATEDCOLUMN (String) "NO" or "YES"
  • + * * - * @param catalogPattern null (to get all objects) or the catalog name + * @param catalog null (to get all objects) or the catalog name * @param schemaPattern null (to get all objects) or a schema name * (uppercase for unquoted names) * @param tableNamePattern null (to get all objects) or a table name @@ -229,60 +202,16 @@ public ResultSet getTables(String catalogPattern, String schemaPattern, * @throws SQLException if the connection is closed */ @Override - public ResultSet getColumns(String catalogPattern, String schemaPattern, - String tableNamePattern, String columnNamePattern) - throws SQLException { + public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getColumns(" + quote(catalogPattern)+", " + debugCode("getColumns(" + quote(catalog)+", " +quote(schemaPattern)+", " +quote(tableNamePattern)+", " - +quote(columnNamePattern)+");"); + +quote(columnNamePattern)+')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "COLUMN_NAME, " - + "DATA_TYPE, " - + "TYPE_NAME, " - + "CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " - + "CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " - + "NUMERIC_SCALE DECIMAL_DIGITS, " - + "NUMERIC_PRECISION_RADIX NUM_PREC_RADIX, " - + "NULLABLE, " - + "REMARKS, " - + "COLUMN_DEFAULT COLUMN_DEF, " - + "DATA_TYPE SQL_DATA_TYPE, " - + "ZERO() SQL_DATETIME_SUB, " - + "CHARACTER_OCTET_LENGTH CHAR_OCTET_LENGTH, " - + "ORDINAL_POSITION, " - + "IS_NULLABLE IS_NULLABLE, " - + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATALOG, " - + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_SCHEMA, " - + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_TABLE, " - + "SOURCE_DATA_TYPE, " - + "CASE WHEN SEQUENCE_NAME IS NULL THEN " - + "CAST(? AS VARCHAR) ELSE CAST(? AS VARCHAR) END IS_AUTOINCREMENT, " - + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATLOG " - + "FROM INFORMATION_SCHEMA.COLUMNS " - + "WHERE TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND TABLE_NAME LIKE ? ESCAPE ? " - + "AND COLUMN_NAME LIKE ? ESCAPE ? " - + "ORDER BY TABLE_SCHEM, TABLE_NAME, ORDINAL_POSITION"); - prep.setString(1, "NO"); - prep.setString(2, "YES"); - prep.setString(3, getCatalogPattern(catalogPattern)); - prep.setString(4, "\\"); - prep.setString(5, getSchemaPattern(schemaPattern)); - prep.setString(6, "\\"); - prep.setString(7, getPattern(tableNamePattern)); - prep.setString(8, "\\"); - prep.setString(9, getPattern(columnNamePattern)); - prep.setString(10, "\\"); - return prep.executeQuery(); + return getResultSet(meta.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -294,78 +223,43 @@ public ResultSet getColumns(String catalogPattern, String schemaPattern, * is sorted by NON_UNIQUE ('false' first), TYPE, TABLE_SCHEM, INDEX_NAME, * and ORDINAL_POSITION. * - *
      - *
    • 1 TABLE_CAT (String) table catalog
    • - *
    • 2 TABLE_SCHEM (String) table schema
    • - *
    • 3 TABLE_NAME (String) table name
    • - *
    • 4 NON_UNIQUE (boolean) 'true' if non-unique
    • - *
    • 5 INDEX_QUALIFIER (String) index catalog
    • - *
    • 6 INDEX_NAME (String) index name
    • - *
    • 7 TYPE (short) the index type (always tableIndexOther)
    • - *
    • 8 ORDINAL_POSITION (short) column index (1, 2, ...)
    • - *
    • 9 COLUMN_NAME (String) column name
    • - *
    • 10 ASC_OR_DESC (String) ascending or descending (always 'A')
    • - *
    • 11 CARDINALITY (int) numbers of unique values
    • - *
    • 12 PAGES (int) number of pages use (always 0)
    • - *
    • 13 FILTER_CONDITION (String) filter condition (always empty)
    • - *
    • 14 SORT_TYPE (int) the sort type bit map: 1=DESCENDING, - * 2=NULLS_FIRST, 4=NULLS_LAST
    • - *
    - * - * @param catalogPattern null or the catalog name - * @param schemaPattern null (to get all objects) or a schema name + *
      + *
    1. TABLE_CAT (String) table catalog
    2. + *
    3. TABLE_SCHEM (String) table schema
    4. + *
    5. TABLE_NAME (String) table name
    6. + *
    7. NON_UNIQUE (boolean) 'true' if non-unique
    8. + *
    9. INDEX_QUALIFIER (String) index catalog
    10. + *
    11. INDEX_NAME (String) index name
    12. + *
    13. TYPE (short) the index type (tableIndexOther or tableIndexHash for + * unique indexes on non-nullable columns, tableIndexStatistics for other + * indexes)
    14. + *
    15. ORDINAL_POSITION (short) column index (1, 2, ...)
    16. + *
    17. COLUMN_NAME (String) column name
    18. + *
    19. ASC_OR_DESC (String) ascending or descending (always 'A')
    20. + *
    21. CARDINALITY (long) number of rows or numbers of unique values for + * unique indexes on non-nullable columns
    22. + *
    23. PAGES (long) number of pages use
    24. + *
    25. FILTER_CONDITION (String) filter condition (always empty)
    26. + *
    + * + * @param catalog null or the catalog name + * @param schema null (to get all objects) or a schema name * (uppercase for unquoted names) - * @param tableName table name (must be specified) + * @param table table name (must be specified) * @param unique only unique indexes - * @param approximate is ignored + * @param approximate if true, return fast, but approximate CARDINALITY * @return the list of indexes and columns * @throws SQLException if the connection is closed */ @Override - public ResultSet getIndexInfo(String catalogPattern, String schemaPattern, - String tableName, boolean unique, boolean approximate) + public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getIndexInfo(" + quote(catalogPattern) + ", " + - quote(schemaPattern) + ", " + quote(tableName) + ", " + - unique + ", " + approximate + ");"); + debugCode("getIndexInfo(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ", " + unique + + ", " + approximate + ')'); } - String uniqueCondition; - if (unique) { - uniqueCondition = "NON_UNIQUE=FALSE"; - } else { - uniqueCondition = "TRUE"; - } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "NON_UNIQUE, " - + "TABLE_CATALOG INDEX_QUALIFIER, " - + "INDEX_NAME, " - + "INDEX_TYPE TYPE, " - + "ORDINAL_POSITION, " - + "COLUMN_NAME, " - + "ASC_OR_DESC, " - // TODO meta data for number of unique values in an index - + "CARDINALITY, " - + "PAGES, " - + "FILTER_CONDITION, " - + "SORT_TYPE " - + "FROM INFORMATION_SCHEMA.INDEXES " - + "WHERE TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND (" + uniqueCondition + ") " - + "AND TABLE_NAME = ? " - + "ORDER BY NON_UNIQUE, TYPE, TABLE_SCHEM, INDEX_NAME, ORDINAL_POSITION"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, tableName); - return prep.executeQuery(); + return getResultSet(meta.getIndexInfo(catalog, schema, table, unique, approximate)); } catch (Exception e) { throw logAndConvert(e); } @@ -375,52 +269,29 @@ public ResultSet getIndexInfo(String catalogPattern, String schemaPattern, * Gets the primary key columns for a table. The result set is sorted by * TABLE_SCHEM, and COLUMN_NAME (and not by KEY_SEQ). * - *
      - *
    • 1 TABLE_CAT (String) table catalog
    • - *
    • 2 TABLE_SCHEM (String) table schema
    • - *
    • 3 TABLE_NAME (String) table name
    • - *
    • 4 COLUMN_NAME (String) column name
    • - *
    • 5 KEY_SEQ (short) the column index of this column (1,2,...)
    • - *
    • 6 PK_NAME (String) the name of the primary key index
    • - *
    + *
      + *
    1. TABLE_CAT (String) table catalog
    2. + *
    3. TABLE_SCHEM (String) table schema
    4. + *
    5. TABLE_NAME (String) table name
    6. + *
    7. COLUMN_NAME (String) column name
    8. + *
    9. KEY_SEQ (short) the column index of this column (1,2,...)
    10. + *
    11. PK_NAME (String) the name of the primary key index
    12. + *
    * - * @param catalogPattern null or the catalog name - * @param schemaPattern null (to get all objects) or a schema name + * @param catalog null or the catalog name + * @param schema null (to get all objects) or a schema name * (uppercase for unquoted names) - * @param tableName table name (must be specified) + * @param table table name (must be specified) * @return the list of primary key columns * @throws SQLException if the connection is closed */ @Override - public ResultSet getPrimaryKeys(String catalogPattern, - String schemaPattern, String tableName) throws SQLException { + public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getPrimaryKeys(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(tableName)+");"); + debugCode("getPrimaryKeys(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "COLUMN_NAME, " - + "ORDINAL_POSITION KEY_SEQ, " - + "IFNULL(CONSTRAINT_NAME, INDEX_NAME) PK_NAME " - + "FROM INFORMATION_SCHEMA.INDEXES " - + "WHERE TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND TABLE_NAME = ? " - + "AND PRIMARY_KEY = TRUE " - + "ORDER BY COLUMN_NAME"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, tableName); - return prep.executeQuery(); + return getResultSet(meta.getPrimaryKeys(catalog, schema, table)); } catch (Exception e) { throw logAndConvert(e); } @@ -495,50 +366,67 @@ public boolean isReadOnly() throws SQLException { } /** - * Checks if NULL is sorted high (bigger than anything that is not null). + * Checks if NULL values are sorted high (bigger than anything that is not + * null). * - * @return false by default; true if the system property h2.sortNullsHigh is - * set to true + * @return if NULL values are sorted high */ @Override - public boolean nullsAreSortedHigh() { - debugCodeCall("nullsAreSortedHigh"); - return SysProperties.SORT_NULLS_HIGH; + public boolean nullsAreSortedHigh() throws SQLException { + try { + debugCodeCall("nullsAreSortedHigh"); + return meta.defaultNullOrdering() == DefaultNullOrdering.HIGH; + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * Checks if NULL is sorted low (smaller than anything that is not null). + * Checks if NULL values are sorted low (smaller than anything that is not + * null). * - * @return true by default; false if the system property h2.sortNullsHigh is - * set to true + * @return if NULL values are sorted low */ @Override - public boolean nullsAreSortedLow() { - debugCodeCall("nullsAreSortedLow"); - return !SysProperties.SORT_NULLS_HIGH; + public boolean nullsAreSortedLow() throws SQLException { + try { + debugCodeCall("nullsAreSortedLow"); + return meta.defaultNullOrdering() == DefaultNullOrdering.LOW; + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * Checks if NULL is sorted at the beginning (no matter if ASC or DESC is - * used). + * Checks if NULL values are sorted at the beginning (no matter if ASC or + * DESC is used). * - * @return false + * @return if NULL values are sorted at the beginning */ @Override - public boolean nullsAreSortedAtStart() { - debugCodeCall("nullsAreSortedAtStart"); - return false; + public boolean nullsAreSortedAtStart() throws SQLException { + try { + debugCodeCall("nullsAreSortedAtStart"); + return meta.defaultNullOrdering() == DefaultNullOrdering.FIRST; + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * Checks if NULL is sorted at the end (no matter if ASC or DESC is used). + * Checks if NULL values are sorted at the end (no matter if ASC or DESC is + * used). * - * @return false + * @return if NULL values are sorted at the end */ @Override - public boolean nullsAreSortedAtEnd() { - debugCodeCall("nullsAreSortedAtEnd"); - return false; + public boolean nullsAreSortedAtEnd() throws SQLException { + try { + debugCodeCall("nullsAreSortedAtEnd"); + return meta.defaultNullOrdering() == DefaultNullOrdering.LAST; + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -558,20 +446,21 @@ public Connection getConnection() { * procedures with the same name, each with a different number of input * parameters. * - *
      - *
    • 1 PROCEDURE_CAT (String) catalog
    • - *
    • 2 PROCEDURE_SCHEM (String) schema
    • - *
    • 3 PROCEDURE_NAME (String) name
    • - *
    • 4 NUM_INPUT_PARAMS (int) the number of arguments
    • - *
    • 5 NUM_OUTPUT_PARAMS (int) for future use, always 0
    • - *
    • 6 NUM_RESULT_SETS (int) for future use, always 0
    • - *
    • 7 REMARKS (String) description
    • - *
    • 8 PROCEDURE_TYPE (short) if this procedure returns a result - * (procedureNoResult or procedureReturnsResult)
    • - *
    • 9 SPECIFIC_NAME (String) name
    • - *
    - * - * @param catalogPattern null or the catalog name + *
      + *
    1. PROCEDURE_CAT (String) catalog
    2. + *
    3. PROCEDURE_SCHEM (String) schema
    4. + *
    5. PROCEDURE_NAME (String) name
    6. + *
    7. reserved
    8. + *
    9. reserved
    10. + *
    11. reserved
    12. + *
    13. REMARKS (String) description
    14. + *
    15. PROCEDURE_TYPE (short) if this procedure returns a result + * (procedureNoResult or procedureReturnsResult)
    16. + *
    17. SPECIFIC_NAME (String) non-ambiguous name to distinguish + * overloads
    18. + *
    + * + * @param catalog null or the catalog name * @param schemaPattern null (to get all objects) or a schema name * (uppercase for unquoted names) * @param procedureNamePattern the procedure name pattern @@ -579,38 +468,16 @@ public Connection getConnection() { * @throws SQLException if the connection is closed */ @Override - public ResultSet getProcedures(String catalogPattern, String schemaPattern, + public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { try { if (isDebugEnabled()) { debugCode("getProcedures(" - +quote(catalogPattern)+", " + +quote(catalog)+", " +quote(schemaPattern)+", " - +quote(procedureNamePattern)+");"); + +quote(procedureNamePattern)+')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "ALIAS_CATALOG PROCEDURE_CAT, " - + "ALIAS_SCHEMA PROCEDURE_SCHEM, " - + "ALIAS_NAME PROCEDURE_NAME, " - + "COLUMN_COUNT NUM_INPUT_PARAMS, " - + "ZERO() NUM_OUTPUT_PARAMS, " - + "ZERO() NUM_RESULT_SETS, " - + "REMARKS, " - + "RETURNS_RESULT PROCEDURE_TYPE, " - + "ALIAS_NAME SPECIFIC_NAME " - + "FROM INFORMATION_SCHEMA.FUNCTION_ALIASES " - + "WHERE ALIAS_CATALOG LIKE ? ESCAPE ? " - + "AND ALIAS_SCHEMA LIKE ? ESCAPE ? " - + "AND ALIAS_NAME LIKE ? ESCAPE ? " - + "ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, NUM_INPUT_PARAMS"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, getPattern(procedureNamePattern)); - prep.setString(6, "\\"); - return prep.executeQuery(); + return getResultSet(meta.getProcedures(catalog, schemaPattern, procedureNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -622,34 +489,35 @@ public ResultSet getProcedures(String catalogPattern, String schemaPattern, * There are potentially multiple procedures with the same name, each with a * different number of input parameters. * - *
      - *
    • 1 PROCEDURE_CAT (String) catalog
    • - *
    • 2 PROCEDURE_SCHEM (String) schema
    • - *
    • 3 PROCEDURE_NAME (String) name
    • - *
    • 4 COLUMN_NAME (String) column name
    • - *
    • 5 COLUMN_TYPE (short) column type + *
        + *
      1. PROCEDURE_CAT (String) catalog
      2. + *
      3. PROCEDURE_SCHEM (String) schema
      4. + *
      5. PROCEDURE_NAME (String) name
      6. + *
      7. COLUMN_NAME (String) column name
      8. + *
      9. COLUMN_TYPE (short) column type * (always DatabaseMetaData.procedureColumnIn)
      10. - *
      11. 6 DATA_TYPE (short) sql type
      12. - *
      13. 7 TYPE_NAME (String) type name
      14. - *
      15. 8 PRECISION (int) precision
      16. - *
      17. 9 LENGTH (int) length
      18. - *
      19. 10 SCALE (short) scale
      20. - *
      21. 11 RADIX (int) always 10
      22. - *
      23. 12 NULLABLE (short) nullable + *
      24. DATA_TYPE (short) sql type
      25. + *
      26. TYPE_NAME (String) type name
      27. + *
      28. PRECISION (int) precision
      29. + *
      30. LENGTH (int) length
      31. + *
      32. SCALE (short) scale
      33. + *
      34. RADIX (int)
      35. + *
      36. NULLABLE (short) nullable * (DatabaseMetaData.columnNoNulls for primitive data types, * DatabaseMetaData.columnNullable otherwise)
      37. - *
      38. 13 REMARKS (String) description
      39. - *
      40. 14 COLUMN_DEF (String) always null
      41. - *
      42. 15 SQL_DATA_TYPE (int) for future use, always 0
      43. - *
      44. 16 SQL_DATETIME_SUB (int) for future use, always 0
      45. - *
      46. 17 CHAR_OCTET_LENGTH (int) always null
      47. - *
      48. 18 ORDINAL_POSITION (int) the parameter index - * starting from 1 (0 is the return value)
      49. - *
      50. 19 IS_NULLABLE (String) always "YES"
      51. - *
      52. 20 SPECIFIC_NAME (String) name
      53. - *
    - * - * @param catalogPattern null or the catalog name + *
  • REMARKS (String) description
  • + *
  • COLUMN_DEF (String) always null
  • + *
  • SQL_DATA_TYPE (int) for future use
  • + *
  • SQL_DATETIME_SUB (int) for future use
  • + *
  • CHAR_OCTET_LENGTH (int)
  • + *
  • ORDINAL_POSITION (int) the parameter index + * starting from 1 (0 is the return value)
  • + *
  • IS_NULLABLE (String) always "YES"
  • + *
  • SPECIFIC_NAME (String) non-ambiguous procedure name to distinguish + * overloads
  • + * + * + * @param catalog null or the catalog name * @param schemaPattern null (to get all objects) or a schema name * (uppercase for unquoted names) * @param procedureNamePattern the procedure name pattern @@ -658,55 +526,16 @@ public ResultSet getProcedures(String catalogPattern, String schemaPattern, * @throws SQLException if the connection is closed */ @Override - public ResultSet getProcedureColumns(String catalogPattern, - String schemaPattern, String procedureNamePattern, + public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getProcedureColumns(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(procedureNamePattern)+", " - +quote(columnNamePattern)+");"); + debugCode("getProcedureColumns(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(procedureNamePattern) + ", " + quote(columnNamePattern) + ')'); } checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "ALIAS_CATALOG PROCEDURE_CAT, " - + "ALIAS_SCHEMA PROCEDURE_SCHEM, " - + "ALIAS_NAME PROCEDURE_NAME, " - + "COLUMN_NAME, " - + "COLUMN_TYPE, " - + "DATA_TYPE, " - + "TYPE_NAME, " - + "PRECISION, " - + "PRECISION LENGTH, " - + "SCALE, " - + "RADIX, " - + "NULLABLE, " - + "REMARKS, " - + "COLUMN_DEFAULT COLUMN_DEF, " - + "ZERO() SQL_DATA_TYPE, " - + "ZERO() SQL_DATETIME_SUB, " - + "ZERO() CHAR_OCTET_LENGTH, " - + "POS ORDINAL_POSITION, " - + "? IS_NULLABLE, " - + "ALIAS_NAME SPECIFIC_NAME " - + "FROM INFORMATION_SCHEMA.FUNCTION_COLUMNS " - + "WHERE ALIAS_CATALOG LIKE ? ESCAPE ? " - + "AND ALIAS_SCHEMA LIKE ? ESCAPE ? " - + "AND ALIAS_NAME LIKE ? ESCAPE ? " - + "AND COLUMN_NAME LIKE ? ESCAPE ? " - + "ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, ORDINAL_POSITION"); - prep.setString(1, "YES"); - prep.setString(2, getCatalogPattern(catalogPattern)); - prep.setString(3, "\\"); - prep.setString(4, getSchemaPattern(schemaPattern)); - prep.setString(5, "\\"); - prep.setString(6, getPattern(procedureNamePattern)); - prep.setString(7, "\\"); - prep.setString(8, getPattern(columnNamePattern)); - prep.setString(9, "\\"); - return prep.executeQuery(); + return getResultSet( + meta.getProcedureColumns(catalog, schemaPattern, procedureNamePattern, columnNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -716,11 +545,10 @@ public ResultSet getProcedureColumns(String catalogPattern, * Gets the list of schemas. * The result set is sorted by TABLE_SCHEM. * - *
      - *
    • 1 TABLE_SCHEM (String) schema name - *
    • 2 TABLE_CATALOG (String) catalog name - *
    • 3 IS_DEFAULT (boolean) if this is the default schema - *
    + *
      + *
    1. TABLE_SCHEM (String) schema name
    2. + *
    3. TABLE_CATALOG (String) catalog name
    4. + *
    * * @return the schema list * @throws SQLException if the connection is closed @@ -729,15 +557,7 @@ public ResultSet getProcedureColumns(String catalogPattern, public ResultSet getSchemas() throws SQLException { try { debugCodeCall("getSchemas"); - checkClosed(); - PreparedStatement prep = conn - .prepareAutoCloseStatement("SELECT " - + "SCHEMA_NAME TABLE_SCHEM, " - + "CATALOG_NAME TABLE_CATALOG, " - +" IS_DEFAULT " - + "FROM INFORMATION_SCHEMA.SCHEMATA " - + "ORDER BY SCHEMA_NAME"); - return prep.executeQuery(); + return getResultSet(meta.getSchemas()); } catch (Exception e) { throw logAndConvert(e); } @@ -747,9 +567,9 @@ public ResultSet getSchemas() throws SQLException { * Gets the list of catalogs. * The result set is sorted by TABLE_CAT. * - *
      - *
    • 1 TABLE_CAT (String) catalog name - *
    + *
      + *
    1. TABLE_CAT (String) catalog name
    2. + *
    * * @return the catalog list * @throws SQLException if the connection is closed @@ -758,24 +578,18 @@ public ResultSet getSchemas() throws SQLException { public ResultSet getCatalogs() throws SQLException { try { debugCodeCall("getCatalogs"); - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement( - "SELECT CATALOG_NAME TABLE_CAT " - + "FROM INFORMATION_SCHEMA.CATALOGS"); - return prep.executeQuery(); + return getResultSet(meta.getCatalogs()); } catch (Exception e) { throw logAndConvert(e); } } /** - * Gets the list of table types. This call returns a result set with three - * records: "SYSTEM TABLE", "TABLE", "and "VIEW". - * The result set is sorted by TABLE_TYPE. - * - *
      - *
    • 1 TABLE_TYPE (String) table type - *
    + * Gets the list of table types. This call returns a result set with five + * records: "SYSTEM TABLE", "TABLE", "VIEW", "TABLE LINK" and "EXTERNAL". + *
      + *
    1. TABLE_TYPE (String) table type
    2. + *
    * * @return the table types * @throws SQLException if the connection is closed @@ -784,12 +598,7 @@ public ResultSet getCatalogs() throws SQLException { public ResultSet getTableTypes() throws SQLException { try { debugCodeCall("getTableTypes"); - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TYPE TABLE_TYPE " - + "FROM INFORMATION_SCHEMA.TABLE_TYPES " - + "ORDER BY TABLE_TYPE"); - return prep.executeQuery(); + return getResultSet(meta.getTableTypes()); } catch (Exception e) { throw logAndConvert(e); } @@ -799,21 +608,21 @@ public ResultSet getTableTypes() throws SQLException { * Gets the list of column privileges. The result set is sorted by * COLUMN_NAME and PRIVILEGE * - *
      - *
    • 1 TABLE_CAT (String) table catalog
    • - *
    • 2 TABLE_SCHEM (String) table schema
    • - *
    • 3 TABLE_NAME (String) table name
    • - *
    • 4 COLUMN_NAME (String) column name
    • - *
    • 5 GRANTOR (String) grantor of access
    • - *
    • 6 GRANTEE (String) grantee of access
    • - *
    • 7 PRIVILEGE (String) SELECT, INSERT, UPDATE, DELETE or REFERENCES + *
        + *
      1. TABLE_CAT (String) table catalog
      2. + *
      3. TABLE_SCHEM (String) table schema
      4. + *
      5. TABLE_NAME (String) table name
      6. + *
      7. COLUMN_NAME (String) column name
      8. + *
      9. GRANTOR (String) grantor of access
      10. + *
      11. GRANTEE (String) grantee of access
      12. + *
      13. PRIVILEGE (String) SELECT, INSERT, UPDATE, DELETE or REFERENCES * (only one per row)
      14. - *
      15. 8 IS_GRANTABLE (String) YES means the grantee can grant access to + *
      16. IS_GRANTABLE (String) YES means the grantee can grant access to * others
      17. - *
    + * * - * @param catalogPattern null (to get all objects) or the catalog name - * @param schemaPattern null (to get all objects) or a schema name + * @param catalog null (to get all objects) or the catalog name + * @param schema null (to get all objects) or a schema name * (uppercase for unquoted names) * @param table a table name (uppercase for unquoted names) * @param columnNamePattern null (to get all objects) or a column name @@ -822,41 +631,14 @@ public ResultSet getTableTypes() throws SQLException { * @throws SQLException if the connection is closed */ @Override - public ResultSet getColumnPrivileges(String catalogPattern, - String schemaPattern, String table, String columnNamePattern) + public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getColumnPrivileges(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(table)+", " - +quote(columnNamePattern)+");"); + debugCode("getColumnPrivileges(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ", " + + quote(columnNamePattern) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "COLUMN_NAME, " - + "GRANTOR, " - + "GRANTEE, " - + "PRIVILEGE_TYPE PRIVILEGE, " - + "IS_GRANTABLE " - + "FROM INFORMATION_SCHEMA.COLUMN_PRIVILEGES " - + "WHERE TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND TABLE_NAME = ? " - + "AND COLUMN_NAME LIKE ? ESCAPE ? " - + "ORDER BY COLUMN_NAME, PRIVILEGE"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, table); - prep.setString(6, getPattern(columnNamePattern)); - prep.setString(7, "\\"); - return prep.executeQuery(); + return getResultSet(meta.getColumnPrivileges(catalog, schema, table, columnNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -866,19 +648,19 @@ public ResultSet getColumnPrivileges(String catalogPattern, * Gets the list of table privileges. The result set is sorted by * TABLE_SCHEM, TABLE_NAME, and PRIVILEGE. * - *
      - *
    • 1 TABLE_CAT (String) table catalog
    • - *
    • 2 TABLE_SCHEM (String) table schema
    • - *
    • 3 TABLE_NAME (String) table name
    • - *
    • 4 GRANTOR (String) grantor of access
    • - *
    • 5 GRANTEE (String) grantee of access
    • - *
    • 6 PRIVILEGE (String) SELECT, INSERT, UPDATE, DELETE or REFERENCES - * (only one per row)
    • - *
    • 7 IS_GRANTABLE (String) YES means the grantee can grant access to - * others
    • - *
    + *
      + *
    1. TABLE_CAT (String) table catalog
    2. + *
    3. TABLE_SCHEM (String) table schema
    4. + *
    5. TABLE_NAME (String) table name
    6. + *
    7. GRANTOR (String) grantor of access
    8. + *
    9. GRANTEE (String) grantee of access
    10. + *
    11. PRIVILEGE (String) SELECT, INSERT, UPDATE, DELETE or REFERENCES + * (only one per row)
    12. + *
    13. IS_GRANTABLE (String) YES means the grantee can grant access to + * others
    14. + *
    * - * @param catalogPattern null (to get all objects) or the catalog name + * @param catalog null (to get all objects) or the catalog name * @param schemaPattern null (to get all objects) or a schema name * (uppercase for unquoted names) * @param tableNamePattern null (to get all objects) or a table name @@ -887,36 +669,15 @@ public ResultSet getColumnPrivileges(String catalogPattern, * @throws SQLException if the connection is closed */ @Override - public ResultSet getTablePrivileges(String catalogPattern, - String schemaPattern, String tableNamePattern) throws SQLException { + public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) + throws SQLException { try { if (isDebugEnabled()) { - debugCode("getTablePrivileges(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(tableNamePattern)+");"); + debugCode("getTablePrivileges(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(tableNamePattern) + ')'); } checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TABLE_CATALOG TABLE_CAT, " - + "TABLE_SCHEMA TABLE_SCHEM, " - + "TABLE_NAME, " - + "GRANTOR, " - + "GRANTEE, " - + "PRIVILEGE_TYPE PRIVILEGE, " - + "IS_GRANTABLE " - + "FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES " - + "WHERE TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND TABLE_NAME LIKE ? ESCAPE ? " - + "ORDER BY TABLE_SCHEM, TABLE_NAME, PRIVILEGE"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, getPattern(tableNamePattern)); - prep.setString(6, "\\"); - return prep.executeQuery(); + return getResultSet(meta.getTablePrivileges(catalog, schemaPattern, tableNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -926,68 +687,36 @@ public ResultSet getTablePrivileges(String catalogPattern, * Gets the list of columns that best identifier a row in a table. * The list is ordered by SCOPE. * - *
      - *
    • 1 SCOPE (short) scope of result (always bestRowSession) - *
    • 2 COLUMN_NAME (String) column name - *
    • 3 DATA_TYPE (short) SQL data type, see also java.sql.Types - *
    • 4 TYPE_NAME (String) type name - *
    • 5 COLUMN_SIZE (int) precision - * (values larger than 2 GB are returned as 2 GB) - *
    • 6 BUFFER_LENGTH (int) unused - *
    • 7 DECIMAL_DIGITS (short) scale - *
    • 8 PSEUDO_COLUMN (short) (always bestRowNotPseudo) - *
    + *
      + *
    1. SCOPE (short) scope of result (always bestRowSession)
    2. + *
    3. COLUMN_NAME (String) column name
    4. + *
    5. DATA_TYPE (short) SQL data type, see also java.sql.Types
    6. + *
    7. TYPE_NAME (String) type name
    8. + *
    9. COLUMN_SIZE (int) precision + * (values larger than 2 GB are returned as 2 GB)
    10. + *
    11. BUFFER_LENGTH (int) unused
    12. + *
    13. DECIMAL_DIGITS (short) scale
    14. + *
    15. PSEUDO_COLUMN (short) (always bestRowNotPseudo)
    16. + *
    * - * @param catalogPattern null (to get all objects) or the catalog name - * @param schemaPattern null (to get all objects) or a schema name + * @param catalog null (to get all objects) or the catalog name + * @param schema null (to get all objects) or a schema name * (uppercase for unquoted names) - * @param tableName table name (must be specified) + * @param table table name (must be specified) * @param scope ignored * @param nullable ignored * @return the primary key index * @throws SQLException if the connection is closed */ @Override - public ResultSet getBestRowIdentifier(String catalogPattern, - String schemaPattern, String tableName, int scope, boolean nullable) + public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getBestRowIdentifier(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(tableName)+", " - +scope+", "+nullable+");"); + debugCode("getBestRowIdentifier(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ", " + + scope + ", " + nullable + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "CAST(? AS SMALLINT) SCOPE, " - + "C.COLUMN_NAME, " - + "C.DATA_TYPE, " - + "C.TYPE_NAME, " - + "C.CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " - + "C.CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " - + "CAST(C.NUMERIC_SCALE AS SMALLINT) DECIMAL_DIGITS, " - + "CAST(? AS SMALLINT) PSEUDO_COLUMN " - + "FROM INFORMATION_SCHEMA.INDEXES I, " - +" INFORMATION_SCHEMA.COLUMNS C " - + "WHERE C.TABLE_NAME = I.TABLE_NAME " - + "AND C.COLUMN_NAME = I.COLUMN_NAME " - + "AND C.TABLE_CATALOG LIKE ? ESCAPE ? " - + "AND C.TABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND C.TABLE_NAME = ? " - + "AND I.PRIMARY_KEY = TRUE " - + "ORDER BY SCOPE"); - // SCOPE - prep.setInt(1, DatabaseMetaData.bestRowSession); - // PSEUDO_COLUMN - prep.setInt(2, DatabaseMetaData.bestRowNotPseudo); - prep.setString(3, getCatalogPattern(catalogPattern)); - prep.setString(4, "\\"); - prep.setString(5, getSchemaPattern(schemaPattern)); - prep.setString(6, "\\"); - prep.setString(7, tableName); - return prep.executeQuery(); + return getResultSet(meta.getBestRowIdentifier(catalog, schema, table, scope, nullable)); } catch (Exception e) { throw logAndConvert(e); } @@ -997,47 +726,31 @@ public ResultSet getBestRowIdentifier(String catalogPattern, * Get the list of columns that are update when any value is updated. * The result set is always empty. * - *
      - *
    • 1 SCOPE (int) not used - *
    • 2 COLUMN_NAME (String) column name - *
    • 3 DATA_TYPE (int) SQL data type - see also java.sql.Types - *
    • 4 TYPE_NAME (String) data type name - *
    • 5 COLUMN_SIZE (int) precision - * (values larger than 2 GB are returned as 2 GB) - *
    • 6 BUFFER_LENGTH (int) length (bytes) - *
    • 7 DECIMAL_DIGITS (int) scale - *
    • 8 PSEUDO_COLUMN (int) is this column a pseudo column - *
    + *
      + *
    1. 1 SCOPE (int) not used
    2. + *
    3. 2 COLUMN_NAME (String) column name
    4. + *
    5. 3 DATA_TYPE (int) SQL data type - see also java.sql.Types
    6. + *
    7. 4 TYPE_NAME (String) data type name
    8. + *
    9. 5 COLUMN_SIZE (int) precision + * (values larger than 2 GB are returned as 2 GB)
    10. + *
    11. 6 BUFFER_LENGTH (int) length (bytes)
    12. + *
    13. 7 DECIMAL_DIGITS (int) scale
    14. + *
    15. 8 PSEUDO_COLUMN (int) is this column a pseudo column
    16. + *
    * * @param catalog null (to get all objects) or the catalog name * @param schema null (to get all objects) or a schema name - * @param tableName table name (must be specified) + * @param table table name (must be specified) * @return an empty result set * @throws SQLException if the connection is closed */ @Override - public ResultSet getVersionColumns(String catalog, String schema, - String tableName) throws SQLException { + public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getVersionColumns(" - +quote(catalog)+", " - +quote(schema)+", " - +quote(tableName)+");"); + debugCode("getVersionColumns(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "ZERO() SCOPE, " - + "COLUMN_NAME, " - + "CAST(DATA_TYPE AS INT) DATA_TYPE, " - + "TYPE_NAME, " - + "NUMERIC_PRECISION COLUMN_SIZE, " - + "NUMERIC_PRECISION BUFFER_LENGTH, " - + "NUMERIC_PRECISION DECIMAL_DIGITS, " - + "ZERO() PSEUDO_COLUMN " - + "FROM INFORMATION_SCHEMA.COLUMNS " - + "WHERE FALSE"); - return prep.executeQuery(); + return getResultSet(meta.getVersionColumns(catalog, schema, table)); } catch (Exception e) { throw logAndConvert(e); } @@ -1048,69 +761,39 @@ public ResultSet getVersionColumns(String catalog, String schema, * result set is sorted by PKTABLE_CAT, PKTABLE_SCHEM, PKTABLE_NAME, * FK_NAME, KEY_SEQ. * - *
      - *
    • 1 PKTABLE_CAT (String) primary catalog
    • - *
    • 2 PKTABLE_SCHEM (String) primary schema
    • - *
    • 3 PKTABLE_NAME (String) primary table
    • - *
    • 4 PKCOLUMN_NAME (String) primary column
    • - *
    • 5 FKTABLE_CAT (String) foreign catalog
    • - *
    • 6 FKTABLE_SCHEM (String) foreign schema
    • - *
    • 7 FKTABLE_NAME (String) foreign table
    • - *
    • 8 FKCOLUMN_NAME (String) foreign column
    • - *
    • 9 KEY_SEQ (short) sequence number (1, 2, ...)
    • - *
    • 10 UPDATE_RULE (short) action on update (see - * DatabaseMetaData.importedKey...)
    • - *
    • 11 DELETE_RULE (short) action on delete (see - * DatabaseMetaData.importedKey...)
    • - *
    • 12 FK_NAME (String) foreign key name
    • - *
    • 13 PK_NAME (String) primary key name
    • - *
    • 14 DEFERRABILITY (short) deferrable or not (always - * importedKeyNotDeferrable)
    • - *
    + *
      + *
    1. PKTABLE_CAT (String) primary catalog
    2. + *
    3. PKTABLE_SCHEM (String) primary schema
    4. + *
    5. PKTABLE_NAME (String) primary table
    6. + *
    7. PKCOLUMN_NAME (String) primary column
    8. + *
    9. FKTABLE_CAT (String) foreign catalog
    10. + *
    11. FKTABLE_SCHEM (String) foreign schema
    12. + *
    13. FKTABLE_NAME (String) foreign table
    14. + *
    15. FKCOLUMN_NAME (String) foreign column
    16. + *
    17. KEY_SEQ (short) sequence number (1, 2, ...)
    18. + *
    19. UPDATE_RULE (short) action on update (see + * DatabaseMetaData.importedKey...)
    20. + *
    21. DELETE_RULE (short) action on delete (see + * DatabaseMetaData.importedKey...)
    22. + *
    23. FK_NAME (String) foreign key name
    24. + *
    25. PK_NAME (String) primary key name
    26. + *
    27. DEFERRABILITY (short) deferrable or not (always + * importedKeyNotDeferrable)
    28. + *
    * - * @param catalogPattern null (to get all objects) or the catalog name - * @param schemaPattern the schema name of the foreign table - * @param tableName the name of the foreign table + * @param catalog null (to get all objects) or the catalog name + * @param schema the schema name of the foreign table + * @param table the name of the foreign table * @return the result set * @throws SQLException if the connection is closed */ @Override - public ResultSet getImportedKeys(String catalogPattern, - String schemaPattern, String tableName) throws SQLException { + public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getImportedKeys(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(tableName)+");"); + debugCode("getImportedKeys(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "PKTABLE_CATALOG PKTABLE_CAT, " - + "PKTABLE_SCHEMA PKTABLE_SCHEM, " - + "PKTABLE_NAME PKTABLE_NAME, " - + "PKCOLUMN_NAME, " - + "FKTABLE_CATALOG FKTABLE_CAT, " - + "FKTABLE_SCHEMA FKTABLE_SCHEM, " - + "FKTABLE_NAME, " - + "FKCOLUMN_NAME, " - + "ORDINAL_POSITION KEY_SEQ, " - + "UPDATE_RULE, " - + "DELETE_RULE, " - + "FK_NAME, " - + "PK_NAME, " - + "DEFERRABILITY " - + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " - + "WHERE FKTABLE_CATALOG LIKE ? ESCAPE ? " - + "AND FKTABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND FKTABLE_NAME = ? " - + "ORDER BY PKTABLE_CAT, PKTABLE_SCHEM, PKTABLE_NAME, FK_NAME, KEY_SEQ"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, tableName); - return prep.executeQuery(); + return getResultSet(meta.getImportedKeys(catalog, schema, table)); } catch (Exception e) { throw logAndConvert(e); } @@ -1121,69 +804,39 @@ public ResultSet getImportedKeys(String catalogPattern, * set is sorted by FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, * KEY_SEQ. * - *
      - *
    • 1 PKTABLE_CAT (String) primary catalog
    • - *
    • 2 PKTABLE_SCHEM (String) primary schema
    • - *
    • 3 PKTABLE_NAME (String) primary table
    • - *
    • 4 PKCOLUMN_NAME (String) primary column
    • - *
    • 5 FKTABLE_CAT (String) foreign catalog
    • - *
    • 6 FKTABLE_SCHEM (String) foreign schema
    • - *
    • 7 FKTABLE_NAME (String) foreign table
    • - *
    • 8 FKCOLUMN_NAME (String) foreign column
    • - *
    • 9 KEY_SEQ (short) sequence number (1,2,...)
    • - *
    • 10 UPDATE_RULE (short) action on update (see - * DatabaseMetaData.importedKey...)
    • - *
    • 11 DELETE_RULE (short) action on delete (see - * DatabaseMetaData.importedKey...)
    • - *
    • 12 FK_NAME (String) foreign key name
    • - *
    • 13 PK_NAME (String) primary key name
    • - *
    • 14 DEFERRABILITY (short) deferrable or not (always - * importedKeyNotDeferrable)
    • - *
    - * - * @param catalogPattern null or the catalog name - * @param schemaPattern the schema name of the primary table - * @param tableName the name of the primary table + *
      + *
    1. PKTABLE_CAT (String) primary catalog
    2. + *
    3. PKTABLE_SCHEM (String) primary schema
    4. + *
    5. PKTABLE_NAME (String) primary table
    6. + *
    7. PKCOLUMN_NAME (String) primary column
    8. + *
    9. FKTABLE_CAT (String) foreign catalog
    10. + *
    11. FKTABLE_SCHEM (String) foreign schema
    12. + *
    13. FKTABLE_NAME (String) foreign table
    14. + *
    15. FKCOLUMN_NAME (String) foreign column
    16. + *
    17. KEY_SEQ (short) sequence number (1,2,...)
    18. + *
    19. UPDATE_RULE (short) action on update (see + * DatabaseMetaData.importedKey...)
    20. + *
    21. DELETE_RULE (short) action on delete (see + * DatabaseMetaData.importedKey...)
    22. + *
    23. FK_NAME (String) foreign key name
    24. + *
    25. PK_NAME (String) primary key name
    26. + *
    27. DEFERRABILITY (short) deferrable or not (always + * importedKeyNotDeferrable)
    28. + *
    + * + * @param catalog null or the catalog name + * @param schema the schema name of the primary table + * @param table the name of the primary table * @return the result set * @throws SQLException if the connection is closed */ @Override - public ResultSet getExportedKeys(String catalogPattern, - String schemaPattern, String tableName) throws SQLException { + public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getExportedKeys(" - +quote(catalogPattern)+", " - +quote(schemaPattern)+", " - +quote(tableName)+");"); + debugCode("getExportedKeys(" + quote(catalog) + ", " + quote(schema) + ", " + quote(table) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "PKTABLE_CATALOG PKTABLE_CAT, " - + "PKTABLE_SCHEMA PKTABLE_SCHEM, " - + "PKTABLE_NAME PKTABLE_NAME, " - + "PKCOLUMN_NAME, " - + "FKTABLE_CATALOG FKTABLE_CAT, " - + "FKTABLE_SCHEMA FKTABLE_SCHEM, " - + "FKTABLE_NAME, " - + "FKCOLUMN_NAME, " - + "ORDINAL_POSITION KEY_SEQ, " - + "UPDATE_RULE, " - + "DELETE_RULE, " - + "FK_NAME, " - + "PK_NAME, " - + "DEFERRABILITY " - + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " - + "WHERE PKTABLE_CATALOG LIKE ? ESCAPE ? " - + "AND PKTABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND PKTABLE_NAME = ? " - + "ORDER BY FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, KEY_SEQ"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, tableName); - return prep.executeQuery(); + return getResultSet(meta.getExportedKeys(catalog, schema, table)); } catch (Exception e) { throw logAndConvert(e); } @@ -1195,86 +848,48 @@ public ResultSet getExportedKeys(String catalogPattern, * result set is sorted by FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, * FK_NAME, KEY_SEQ. * - *
      - *
    • 1 PKTABLE_CAT (String) primary catalog
    • - *
    • 2 PKTABLE_SCHEM (String) primary schema
    • - *
    • 3 PKTABLE_NAME (String) primary table
    • - *
    • 4 PKCOLUMN_NAME (String) primary column
    • - *
    • 5 FKTABLE_CAT (String) foreign catalog
    • - *
    • 6 FKTABLE_SCHEM (String) foreign schema
    • - *
    • 7 FKTABLE_NAME (String) foreign table
    • - *
    • 8 FKCOLUMN_NAME (String) foreign column
    • - *
    • 9 KEY_SEQ (short) sequence number (1,2,...)
    • - *
    • 10 UPDATE_RULE (short) action on update (see - * DatabaseMetaData.importedKey...)
    • - *
    • 11 DELETE_RULE (short) action on delete (see - * DatabaseMetaData.importedKey...)
    • - *
    • 12 FK_NAME (String) foreign key name
    • - *
    • 13 PK_NAME (String) primary key name
    • - *
    • 14 DEFERRABILITY (short) deferrable or not (always - * importedKeyNotDeferrable)
    • - *
    - * - * @param primaryCatalogPattern null or the catalog name - * @param primarySchemaPattern the schema name of the primary table + *
      + *
    1. PKTABLE_CAT (String) primary catalog
    2. + *
    3. PKTABLE_SCHEM (String) primary schema
    4. + *
    5. PKTABLE_NAME (String) primary table
    6. + *
    7. PKCOLUMN_NAME (String) primary column
    8. + *
    9. FKTABLE_CAT (String) foreign catalog
    10. + *
    11. FKTABLE_SCHEM (String) foreign schema
    12. + *
    13. FKTABLE_NAME (String) foreign table
    14. + *
    15. FKCOLUMN_NAME (String) foreign column
    16. + *
    17. KEY_SEQ (short) sequence number (1,2,...)
    18. + *
    19. UPDATE_RULE (short) action on update (see + * DatabaseMetaData.importedKey...)
    20. + *
    21. DELETE_RULE (short) action on delete (see + * DatabaseMetaData.importedKey...)
    22. + *
    23. FK_NAME (String) foreign key name
    24. + *
    25. PK_NAME (String) primary key name
    26. + *
    27. DEFERRABILITY (short) deferrable or not (always + * importedKeyNotDeferrable)
    28. + *
    + * + * @param primaryCatalog null or the catalog name + * @param primarySchema the schema name of the primary table * (optional) * @param primaryTable the name of the primary table (must be specified) - * @param foreignCatalogPattern null or the catalog name - * @param foreignSchemaPattern the schema name of the foreign table + * @param foreignCatalog null or the catalog name + * @param foreignSchema the schema name of the foreign table * (optional) * @param foreignTable the name of the foreign table (must be specified) * @return the result set * @throws SQLException if the connection is closed */ @Override - public ResultSet getCrossReference(String primaryCatalogPattern, - String primarySchemaPattern, String primaryTable, String foreignCatalogPattern, - String foreignSchemaPattern, String foreignTable) throws SQLException { + public ResultSet getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getCrossReference(" - +quote(primaryCatalogPattern)+", " - +quote(primarySchemaPattern)+", " - +quote(primaryTable)+", " - +quote(foreignCatalogPattern)+", " - +quote(foreignSchemaPattern)+", " - +quote(foreignTable)+");"); + debugCode("getCrossReference(" + quote(primaryCatalog) + ", " + quote(primarySchema) + ", " + + quote(primaryTable) + ", " + quote(foreignCatalog) + ", " + quote(foreignSchema) + ", " + + quote(foreignTable) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "PKTABLE_CATALOG PKTABLE_CAT, " - + "PKTABLE_SCHEMA PKTABLE_SCHEM, " - + "PKTABLE_NAME PKTABLE_NAME, " - + "PKCOLUMN_NAME, " - + "FKTABLE_CATALOG FKTABLE_CAT, " - + "FKTABLE_SCHEMA FKTABLE_SCHEM, " - + "FKTABLE_NAME, " - + "FKCOLUMN_NAME, " - + "ORDINAL_POSITION KEY_SEQ, " - + "UPDATE_RULE, " - + "DELETE_RULE, " - + "FK_NAME, " - + "PK_NAME, " - + "DEFERRABILITY " - + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " - + "WHERE PKTABLE_CATALOG LIKE ? ESCAPE ? " - + "AND PKTABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND PKTABLE_NAME = ? " - + "AND FKTABLE_CATALOG LIKE ? ESCAPE ? " - + "AND FKTABLE_SCHEMA LIKE ? ESCAPE ? " - + "AND FKTABLE_NAME = ? " - + "ORDER BY FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, KEY_SEQ"); - prep.setString(1, getCatalogPattern(primaryCatalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(primarySchemaPattern)); - prep.setString(4, "\\"); - prep.setString(5, primaryTable); - prep.setString(6, getCatalogPattern(foreignCatalogPattern)); - prep.setString(7, "\\"); - prep.setString(8, getSchemaPattern(foreignSchemaPattern)); - prep.setString(9, "\\"); - prep.setString(10, foreignTable); - return prep.executeQuery(); + return getResultSet(meta.getCrossReference(primaryCatalog, primarySchema, primaryTable, foreignCatalog, + foreignSchema, foreignTable)); } catch (Exception e) { throw logAndConvert(e); } @@ -1284,15 +899,15 @@ public ResultSet getCrossReference(String primaryCatalogPattern, * Gets the list of user-defined data types. * This call returns an empty result set. * - *
      - *
    • 1 TYPE_CAT (String) catalog - *
    • 2 TYPE_SCHEM (String) schema - *
    • 3 TYPE_NAME (String) type name - *
    • 4 CLASS_NAME (String) Java class - *
    • 5 DATA_TYPE (short) SQL Type - see also java.sql.Types - *
    • 6 REMARKS (String) description - *
    • 7 BASE_TYPE (short) base type - see also java.sql.Types - *
    + *
      + *
    1. TYPE_CAT (String) catalog
    2. + *
    3. TYPE_SCHEM (String) schema
    4. + *
    5. TYPE_NAME (String) type name
    6. + *
    7. CLASS_NAME (String) Java class
    8. + *
    9. DATA_TYPE (short) SQL Type - see also java.sql.Types
    10. + *
    11. REMARKS (String) description
    12. + *
    13. BASE_TYPE (short) base type - see also java.sql.Types
    14. + *
    * * @param catalog ignored * @param schemaPattern ignored @@ -1310,19 +925,9 @@ public ResultSet getUDTs(String catalog, String schemaPattern, +quote(catalog)+", " +quote(schemaPattern)+", " +quote(typeNamePattern)+", " - +quoteIntArray(types)+");"); + +quoteIntArray(types)+')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "CAST(NULL AS VARCHAR) TYPE_CAT, " - + "CAST(NULL AS VARCHAR) TYPE_SCHEM, " - + "CAST(NULL AS VARCHAR) TYPE_NAME, " - + "CAST(NULL AS VARCHAR) CLASS_NAME, " - + "CAST(NULL AS SMALLINT) DATA_TYPE, " - + "CAST(NULL AS VARCHAR) REMARKS, " - + "CAST(NULL AS SMALLINT) BASE_TYPE " - + "FROM DUAL WHERE FALSE"); - return prep.executeQuery(); + return getResultSet(meta.getUDTs(catalog, schemaPattern, typeNamePattern, types)); } catch (Exception e) { throw logAndConvert(e); } @@ -1333,27 +938,26 @@ public ResultSet getUDTs(String catalog, String schemaPattern, * afterwards by how closely the data type maps to the corresponding JDBC * SQL type (best match first). * - *
      - *
    • 1 TYPE_NAME (String) type name
    • - *
    • 2 DATA_TYPE (short) SQL data type - see also java.sql.Types
    • - *
    • 3 PRECISION (int) maximum precision
    • - *
    • 4 LITERAL_PREFIX (String) prefix used to quote a literal
    • - *
    • 5 LITERAL_SUFFIX (String) suffix used to quote a literal
    • - *
    • 6 CREATE_PARAMS (String) parameters used (may be null)
    • - *
    • 7 NULLABLE (short) typeNoNulls (NULL not allowed) or typeNullable - *
    • - *
    • 8 CASE_SENSITIVE (boolean) case sensitive
    • - *
    • 9 SEARCHABLE (short) typeSearchable
    • - *
    • 10 UNSIGNED_ATTRIBUTE (boolean) unsigned
    • - *
    • 11 FIXED_PREC_SCALE (boolean) fixed precision
    • - *
    • 12 AUTO_INCREMENT (boolean) auto increment
    • - *
    • 13 LOCAL_TYPE_NAME (String) localized version of the data type
    • - *
    • 14 MINIMUM_SCALE (short) minimum scale
    • - *
    • 15 MAXIMUM_SCALE (short) maximum scale
    • - *
    • 16 SQL_DATA_TYPE (int) unused
    • - *
    • 17 SQL_DATETIME_SUB (int) unused
    • - *
    • 18 NUM_PREC_RADIX (int) 2 for binary, 10 for decimal
    • - *
    + *
      + *
    1. TYPE_NAME (String) type name
    2. + *
    3. DATA_TYPE (short) SQL data type - see also java.sql.Types
    4. + *
    5. PRECISION (int) maximum precision
    6. + *
    7. LITERAL_PREFIX (String) prefix used to quote a literal
    8. + *
    9. LITERAL_SUFFIX (String) suffix used to quote a literal
    10. + *
    11. CREATE_PARAMS (String) parameters used (may be null)
    12. + *
    13. NULLABLE (short) typeNoNulls (NULL not allowed) or typeNullable
    14. + *
    15. CASE_SENSITIVE (boolean) case sensitive
    16. + *
    17. SEARCHABLE (short) typeSearchable
    18. + *
    19. UNSIGNED_ATTRIBUTE (boolean) unsigned
    20. + *
    21. FIXED_PREC_SCALE (boolean) fixed precision
    22. + *
    23. AUTO_INCREMENT (boolean) auto increment
    24. + *
    25. LOCAL_TYPE_NAME (String) localized version of the data type
    26. + *
    27. MINIMUM_SCALE (short) minimum scale
    28. + *
    29. MAXIMUM_SCALE (short) maximum scale
    30. + *
    31. SQL_DATA_TYPE (int) unused
    32. + *
    33. SQL_DATETIME_SUB (int) unused
    34. + *
    35. NUM_PREC_RADIX (int) 2 for binary, 10 for decimal
    36. + *
    * * @return the list of data types * @throws SQLException if the connection is closed @@ -1362,30 +966,7 @@ public ResultSet getUDTs(String catalog, String schemaPattern, public ResultSet getTypeInfo() throws SQLException { try { debugCodeCall("getTypeInfo"); - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "TYPE_NAME, " - + "DATA_TYPE, " - + "PRECISION, " - + "PREFIX LITERAL_PREFIX, " - + "SUFFIX LITERAL_SUFFIX, " - + "PARAMS CREATE_PARAMS, " - + "NULLABLE, " - + "CASE_SENSITIVE, " - + "SEARCHABLE, " - + "FALSE UNSIGNED_ATTRIBUTE, " - + "FALSE FIXED_PREC_SCALE, " - + "AUTO_INCREMENT, " - + "TYPE_NAME LOCAL_TYPE_NAME, " - + "MINIMUM_SCALE, " - + "MAXIMUM_SCALE, " - + "DATA_TYPE SQL_DATA_TYPE, " - + "ZERO() SQL_DATETIME_SUB, " - + "RADIX NUM_PREC_RADIX " - + "FROM INFORMATION_SCHEMA.TYPE_INFO " - + "ORDER BY DATA_TYPE, POS"); - ResultSet rs = prep.executeQuery(); - return rs; + return getResultSet(meta.getTypeInfo()); } catch (Exception e) { throw logAndConvert(e); } @@ -1426,26 +1007,22 @@ public String getIdentifierQuoteString() { /** * Gets the comma-separated list of all SQL keywords that are not supported - * as table/column/index name, in addition to the SQL-92 keywords. The list - * returned is: - *
    -     * LIMIT,MINUS,ROWNUM,SYSDATE,SYSTIME,SYSTIMESTAMP,TODAY
    -     * 
    - * The complete list of keywords (including SQL-92 keywords) is: - *
    -     * CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT,
    -     * EXCEPT, EXISTS, FALSE, FOR, FROM, FULL, GROUP, HAVING, INNER,
    -     * INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, NOT, NULL, ON,
    -     * ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP,
    -     * TODAY, TRUE, UNION, UNIQUE, WHERE
    -     * 
    + * as unquoted identifiers, in addition to the SQL:2003 reserved words. + *

    + * List of keywords in H2 may depend on compatibility mode and other + * settings. + *

    * - * @return a list of additional the keywords + * @return a list of additional keywords */ @Override - public String getSQLKeywords() { - debugCodeCall("getSQLKeywords"); - return "LIMIT,MINUS,ROWNUM,SYSDATE,SYSTIME,SYSTIMESTAMP,TODAY"; + public String getSQLKeywords() throws SQLException { + try { + debugCodeCall("getSQLKeywords"); + return meta.getSQLKeywords(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1455,8 +1032,12 @@ public String getSQLKeywords() { */ @Override public String getNumericFunctions() throws SQLException { - debugCodeCall("getNumericFunctions"); - return getFunctions("Functions (Numeric)"); + try { + debugCodeCall("getNumericFunctions"); + return meta.getNumericFunctions(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1466,8 +1047,12 @@ public String getNumericFunctions() throws SQLException { */ @Override public String getStringFunctions() throws SQLException { - debugCodeCall("getStringFunctions"); - return getFunctions("Functions (String)"); + try { + debugCodeCall("getStringFunctions"); + return meta.getStringFunctions(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1477,8 +1062,12 @@ public String getStringFunctions() throws SQLException { */ @Override public String getSystemFunctions() throws SQLException { - debugCodeCall("getSystemFunctions"); - return getFunctions("Functions (System)"); + try { + debugCodeCall("getSystemFunctions"); + return meta.getSystemFunctions(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1488,34 +1077,9 @@ public String getSystemFunctions() throws SQLException { */ @Override public String getTimeDateFunctions() throws SQLException { - debugCodeCall("getTimeDateFunctions"); - return getFunctions("Functions (Time and Date)"); - } - - private String getFunctions(String section) throws SQLException { try { - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT TOPIC " - + "FROM INFORMATION_SCHEMA.HELP WHERE SECTION = ?"); - prep.setString(1, section); - ResultSet rs = prep.executeQuery(); - StatementBuilder buff = new StatementBuilder(); - while (rs.next()) { - String s = rs.getString(1).trim(); - String[] array = StringUtils.arraySplit(s, ',', true); - for (String a : array) { - buff.appendExceptFirst(","); - String f = a.trim(); - if (f.indexOf(' ') >= 0) { - // remove 'Function' from 'INSERT Function' - f = f.substring(0, f.indexOf(' ')).trim(); - } - buff.append(f); - } - } - rs.close(); - prep.close(); - return buff.toString(); + debugCodeCall("getTimeDateFunctions"); + return meta.getTimeDateFunctions(); } catch (Exception e) { throw logAndConvert(e); } @@ -1529,9 +1093,13 @@ private String getFunctions(String section) throws SQLException { * mode) */ @Override - public String getSearchStringEscape() { - debugCodeCall("getSearchStringEscape"); - return "\\"; + public String getSearchStringEscape() throws SQLException { + try { + debugCodeCall("getSearchStringEscape"); + return meta.getSearchStringEscape(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1548,6 +1116,7 @@ public String getExtraNameCharacters() { /** * Returns whether alter table with add column is supported. + * * @return true */ @Override @@ -1610,7 +1179,7 @@ public boolean supportsConvert() { @Override public boolean supportsConvert(int fromType, int toType) { if (isDebugEnabled()) { - debugCode("supportsConvert("+fromType+", "+fromType+");"); + debugCode("supportsConvert(" + fromType + ", " + toType + ')'); } return true; } @@ -2020,23 +1589,23 @@ public boolean supportsCatalogsInPrivilegeDefinitions() { /** * Returns whether positioned deletes are supported. * - * @return true + * @return false */ @Override public boolean supportsPositionedDelete() { debugCodeCall("supportsPositionedDelete"); - return true; + return false; } /** * Returns whether positioned updates are supported. * - * @return true + * @return false */ @Override public boolean supportsPositionedUpdate() { debugCodeCall("supportsPositionedUpdate"); - return true; + return false; } /** @@ -2202,18 +1771,16 @@ public boolean supportsTransactions() { @Override public boolean supportsTransactionIsolationLevel(int level) throws SQLException { debugCodeCall("supportsTransactionIsolationLevel"); - if (level == Connection.TRANSACTION_READ_UNCOMMITTED) { - // currently the combination of LOCK_MODE=0 and MULTI_THREADED - // is not supported, also see code in Database#setLockMode(int) - PreparedStatement prep = conn.prepareAutoCloseStatement( - "SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME=?"); - prep.setString(1, "MULTI_THREADED"); - ResultSet rs = prep.executeQuery(); - if (rs.next() && rs.getString(1).equals("1")) { - return false; - } + switch (level) { + case Connection.TRANSACTION_READ_UNCOMMITTED: + case Connection.TRANSACTION_READ_COMMITTED: + case Connection.TRANSACTION_REPEATABLE_READ: + case Constants.TRANSACTION_SNAPSHOT: + case Connection.TRANSACTION_SERIALIZABLE: + return true; + default: + return false; } - return true; } /** @@ -2266,7 +1833,7 @@ public boolean dataDefinitionIgnoredInTransactions() { * ResultSet.TYPE_SCROLL_SENSITIVE is not supported. * * @param type the result set type - * @return true for all types except ResultSet.TYPE_FORWARD_ONLY + * @return true for all types except ResultSet.TYPE_SCROLL_SENSITIVE */ @Override public boolean supportsResultSetType(int type) { @@ -2285,7 +1852,7 @@ public boolean supportsResultSetType(int type) { @Override public boolean supportsResultSetConcurrency(int type, int concurrency) { if (isDebugEnabled()) { - debugCode("supportsResultSetConcurrency("+type+", "+concurrency+");"); + debugCode("supportsResultSetConcurrency(" + type + ", " + concurrency + ')'); } return type != ResultSet.TYPE_SCROLL_SENSITIVE; } @@ -2433,89 +2000,75 @@ public int getDefaultTransactionIsolation() { /** * Checks if for CREATE TABLE Test(ID INT), getTables returns Test as the - * table name. + * table name and identifiers are case sensitive. * - * @return false + * @return true is so, false otherwise */ @Override - public boolean supportsMixedCaseIdentifiers() { + public boolean supportsMixedCaseIdentifiers() throws SQLException { debugCodeCall("supportsMixedCaseIdentifiers"); - return false; - } - - /** - * Checks if a table created with CREATE TABLE "Test"(ID INT) is a different - * table than a table created with CREATE TABLE TEST(ID INT). - * - * @return true usually, and false in MySQL mode - */ - @Override - public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { - debugCodeCall("supportsMixedCaseQuotedIdentifiers"); - String m = getMode(); - if (m.equals("MySQL")) { - return false; - } - return true; + Session.StaticSettings settings = conn.getStaticSettings(); + return !settings.databaseToUpper && !settings.databaseToLower && !settings.caseInsensitiveIdentifiers; } /** * Checks if for CREATE TABLE Test(ID INT), getTables returns TEST as the * table name. * - * @return true usually, and false in MySQL mode + * @return true is so, false otherwise */ @Override public boolean storesUpperCaseIdentifiers() throws SQLException { debugCodeCall("storesUpperCaseIdentifiers"); - String m = getMode(); - if (m.equals("MySQL")) { - return false; - } - return true; + return conn.getStaticSettings().databaseToUpper; } /** * Checks if for CREATE TABLE Test(ID INT), getTables returns test as the * table name. * - * @return false usually, and true in MySQL mode + * @return true is so, false otherwise */ @Override public boolean storesLowerCaseIdentifiers() throws SQLException { debugCodeCall("storesLowerCaseIdentifiers"); - String m = getMode(); - if (m.equals("MySQL")) { - return true; - } - return false; + return conn.getStaticSettings().databaseToLower; } /** * Checks if for CREATE TABLE Test(ID INT), getTables returns Test as the - * table name. + * table name and identifiers are not case sensitive. * - * @return false + * @return true is so, false otherwise */ @Override - public boolean storesMixedCaseIdentifiers() { + public boolean storesMixedCaseIdentifiers() throws SQLException { debugCodeCall("storesMixedCaseIdentifiers"); - return false; + Session.StaticSettings settings = conn.getStaticSettings(); + return !settings.databaseToUpper && !settings.databaseToLower && settings.caseInsensitiveIdentifiers; + } + + /** + * Checks if a table created with CREATE TABLE "Test"(ID INT) is a different + * table than a table created with CREATE TABLE "TEST"(ID INT). + * + * @return true is so, false otherwise + */ + @Override + public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { + debugCodeCall("supportsMixedCaseQuotedIdentifiers"); + return !conn.getStaticSettings().caseInsensitiveIdentifiers; } /** * Checks if for CREATE TABLE "Test"(ID INT), getTables returns TEST as the * table name. * - * @return false usually, and true in MySQL mode + * @return false */ @Override public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { debugCodeCall("storesUpperCaseQuotedIdentifiers"); - String m = getMode(); - if (m.equals("MySQL")) { - return true; - } return false; } @@ -2523,32 +2076,24 @@ public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { * Checks if for CREATE TABLE "Test"(ID INT), getTables returns test as the * table name. * - * @return false usually, and true in MySQL mode + * @return false */ @Override public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { debugCodeCall("storesLowerCaseQuotedIdentifiers"); - String m = getMode(); - if (m.equals("MySQL")) { - return true; - } return false; } /** * Checks if for CREATE TABLE "Test"(ID INT), getTables returns Test as the - * table name. + * table name and identifiers are case insensitive. * - * @return true usually, and false in MySQL mode + * @return true is so, false otherwise */ @Override public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { debugCodeCall("storesMixedCaseQuotedIdentifiers"); - String m = getMode(); - if (m.equals("MySQL")) { - return false; - } - return true; + return conn.getStaticSettings().caseInsensitiveIdentifiers; } /** @@ -2794,14 +2339,15 @@ public boolean supportsNamedParameters() { } /** - * Does the database support multiple open result sets. + * Does the database support multiple open result sets returned from a + * CallableStatement. * - * @return true + * @return false */ @Override public boolean supportsMultipleOpenResults() { debugCodeCall("supportsMultipleOpenResults"); - return true; + return false; } /** @@ -2819,20 +2365,27 @@ public boolean supportsGetGeneratedKeys() { * [Not supported] */ @Override - public ResultSet getSuperTypes(String catalog, String schemaPattern, - String typeNamePattern) throws SQLException { - throw unsupported("superTypes"); + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("getSuperTypes(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(typeNamePattern) + ')'); + } + return getResultSet(meta.getSuperTypes(catalog, schemaPattern, typeNamePattern)); + } catch (Exception e) { + throw logAndConvert(e); + } } /** * Get the list of super tables of a table. This method currently returns an * empty result set. - *
      - *
    • 1 TABLE_CAT (String) table catalog
    • - *
    • 2 TABLE_SCHEM (String) table schema
    • - *
    • 3 TABLE_NAME (String) table name
    • - *
    • 4 SUPERTABLE_NAME (String) the name of the super table
    • - *
    + *
      + *
    1. TABLE_CAT (String) table catalog
    2. + *
    3. TABLE_SCHEM (String) table schema
    4. + *
    5. TABLE_NAME (String) table name
    6. + *
    7. SUPERTABLE_NAME (String) the name of the super table
    8. + *
    * * @param catalog null (to get all objects) or the catalog name * @param schemaPattern null (to get all objects) or a schema name @@ -2842,24 +2395,14 @@ public ResultSet getSuperTypes(String catalog, String schemaPattern, * @return an empty result set */ @Override - public ResultSet getSuperTables(String catalog, String schemaPattern, - String tableNamePattern) throws SQLException { + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) // + throws SQLException { try { if (isDebugEnabled()) { - debugCode("getSuperTables(" - +quote(catalog)+", " - +quote(schemaPattern)+", " - +quote(tableNamePattern)+");"); + debugCode("getSuperTables(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(tableNamePattern) + ')'); } - checkClosed(); - PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT " - + "CATALOG_NAME TABLE_CAT, " - + "CATALOG_NAME TABLE_SCHEM, " - + "CATALOG_NAME TABLE_NAME, " - + "CATALOG_NAME SUPERTABLE_NAME " - + "FROM INFORMATION_SCHEMA.CATALOGS " - + "WHERE FALSE"); - return prep.executeQuery(); + return getResultSet(meta.getSuperTables(catalog, schemaPattern, tableNamePattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -2869,10 +2412,17 @@ public ResultSet getSuperTables(String catalog, String schemaPattern, * [Not supported] */ @Override - public ResultSet getAttributes(String catalog, String schemaPattern, - String typeNamePattern, String attributeNamePattern) - throws SQLException { - throw unsupported("attributes"); + public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("getAttributes(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(typeNamePattern) + ", " + quote(attributeNamePattern) + ')'); + } + return getResultSet(meta.getAttributes(catalog, schemaPattern, typeNamePattern, attributeNamePattern)); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2905,9 +2455,13 @@ public int getResultSetHoldability() { * @return the major version */ @Override - public int getDatabaseMajorVersion() { - debugCodeCall("getDatabaseMajorVersion"); - return Constants.VERSION_MAJOR; + public int getDatabaseMajorVersion() throws SQLException { + try { + debugCodeCall("getDatabaseMajorVersion"); + return meta.getDatabaseMajorVersion(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2916,9 +2470,13 @@ public int getDatabaseMajorVersion() { * @return the minor version */ @Override - public int getDatabaseMinorVersion() { - debugCodeCall("getDatabaseMinorVersion"); - return Constants.VERSION_MINOR; + public int getDatabaseMinorVersion() throws SQLException { + try { + debugCodeCall("getDatabaseMinorVersion"); + return meta.getDatabaseMinorVersion(); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2935,23 +2493,23 @@ public int getJDBCMajorVersion() { /** * Gets the minor version of the supported JDBC API. * - * @return the minor version (0) + * @return the minor version (2) */ @Override public int getJDBCMinorVersion() { debugCodeCall("getJDBCMinorVersion"); - return 0; + return 2; } /** * Gets the SQL State type. * - * @return DatabaseMetaData.sqlStateSQL99 + * @return {@link DatabaseMetaData#sqlStateSQL} */ @Override public int getSQLStateType() { debugCodeCall("getSQLStateType"); - return DatabaseMetaData.sqlStateSQL99; + return DatabaseMetaData.sqlStateSQL; } /** @@ -2982,22 +2540,6 @@ private void checkClosed() { conn.checkClosed(); } - private static String getPattern(String pattern) { - return pattern == null ? "%" : pattern; - } - - private static String getSchemaPattern(String pattern) { - return pattern == null ? "%" : pattern.length() == 0 ? - Constants.SCHEMA_MAIN : pattern; - } - - private static String getCatalogPattern(String catalogPattern) { - // Workaround for OpenOffice: getColumns is called with "" as the - // catalog - return catalogPattern == null || catalogPattern.length() == 0 ? - "%" : catalogPattern; - } - /** * Get the lifetime of a rowid. * @@ -3013,11 +2555,10 @@ public RowIdLifetime getRowIdLifetime() { * Gets the list of schemas in the database. * The result set is sorted by TABLE_SCHEM. * - *
      - *
    • 1 TABLE_SCHEM (String) schema name - *
    • 2 TABLE_CATALOG (String) catalog name - *
    • 3 IS_DEFAULT (boolean) if this is the default schema - *
    + *
      + *
    1. TABLE_SCHEM (String) schema name
    2. + *
    3. TABLE_CATALOG (String) catalog name
    4. + *
    * * @param catalogPattern null (to get all objects) or the catalog name * @param schemaPattern null (to get all objects) or a schema name @@ -3030,21 +2571,7 @@ public ResultSet getSchemas(String catalogPattern, String schemaPattern) throws SQLException { try { debugCodeCall("getSchemas(String,String)"); - checkClosed(); - PreparedStatement prep = conn - .prepareAutoCloseStatement("SELECT " - + "SCHEMA_NAME TABLE_SCHEM, " - + "CATALOG_NAME TABLE_CATALOG, " - +" IS_DEFAULT " - + "FROM INFORMATION_SCHEMA.SCHEMATA " - + "WHERE CATALOG_NAME LIKE ? ESCAPE ? " - + "AND SCHEMA_NAME LIKE ? ESCAPE ? " - + "ORDER BY SCHEMA_NAME"); - prep.setString(1, getCatalogPattern(catalogPattern)); - prep.setString(2, "\\"); - prep.setString(3, getSchemaPattern(schemaPattern)); - prep.setString(4, "\\"); - return prep.executeQuery(); + return getResultSet(meta.getSchemas(catalogPattern, schemaPattern)); } catch (Exception e) { throw logAndConvert(e); } @@ -3074,13 +2601,23 @@ public boolean autoCommitFailureClosesAllResultSets() { return false; } - /** - * [Not supported] Returns the client info properties. - */ @Override public ResultSet getClientInfoProperties() throws SQLException { - // we don't have any client properties, so return an empty result set - return new SimpleResultSet(); + Properties clientInfo = conn.getClientInfo(); + SimpleResult result = new SimpleResult(); + result.addColumn("NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("MAX_LEN", TypeInfo.TYPE_INTEGER); + result.addColumn("DEFAULT_VALUE", TypeInfo.TYPE_VARCHAR); + result.addColumn("DESCRIPTION", TypeInfo.TYPE_VARCHAR); + // Non-standard column + result.addColumn("VALUE", TypeInfo.TYPE_VARCHAR); + for (Entry entry : clientInfo.entrySet()) { + result.addRow(ValueVarchar.get((String) entry.getKey()), ValueInteger.get(Integer.MAX_VALUE), + ValueVarchar.EMPTY, ValueVarchar.EMPTY, ValueVarchar.get((String) entry.getValue())); + } + int id = getNextId(TraceObject.RESULT_SET); + debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "getClientInfoProperties()"); + return new JdbcResultSet(conn, null, null, result, id, true, false, false); } /** @@ -3092,10 +2629,14 @@ public ResultSet getClientInfoProperties() throws SQLException { @Override @SuppressWarnings("unchecked") public T unwrap(Class iface) throws SQLException { - if (isWrapperFor(iface)) { - return (T) this; + try { + if (isWrapperFor(iface)) { + return (T) this; + } + throw DbException.getInvalidValueException("iface", iface); + } catch (Exception e) { + throw logAndConvert(e); } - throw DbException.getInvalidValueException("iface", iface); } /** @@ -3113,33 +2654,69 @@ public boolean isWrapperFor(Class iface) throws SQLException { * [Not supported] Gets the list of function columns. */ @Override - public ResultSet getFunctionColumns(String catalog, String schemaPattern, - String functionNamePattern, String columnNamePattern) - throws SQLException { - throw unsupported("getFunctionColumns"); + public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, + String columnNamePattern) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("getFunctionColumns(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(functionNamePattern) + ", " + quote(columnNamePattern) + ')'); + } + return getResultSet( + meta.getFunctionColumns(catalog, schemaPattern, functionNamePattern, columnNamePattern)); + } catch (Exception e) { + throw logAndConvert(e); + } } /** * [Not supported] Gets the list of functions. */ @Override - public ResultSet getFunctions(String catalog, String schemaPattern, - String functionNamePattern) throws SQLException { - throw unsupported("getFunctions"); + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) + throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("getFunctions(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(functionNamePattern) + ')'); + } + return getResultSet(meta.getFunctions(catalog, schemaPattern, functionNamePattern)); + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * [Not supported] + * Returns whether database always returns generated keys if valid names or + * indexes of columns were specified and command was completed successfully. + * + * @return true */ - /*## Java 1.7 ## @Override public boolean generatedKeyAlwaysReturned() { return true; } - //*/ /** - * [Not supported] + * Gets the list of pseudo and invisible columns. The result set is sorted + * by TABLE_SCHEM, TABLE_NAME, and COLUMN_NAME. + * + *
      + *
    1. TABLE_CAT (String) table catalog
    2. + *
    3. TABLE_SCHEM (String) table schema
    4. + *
    5. TABLE_NAME (String) table name
    6. + *
    7. COLUMN_NAME (String) column name
    8. + *
    9. DATA_TYPE (int) data type (see java.sql.Types)
    10. + *
    11. COLUMN_SIZE (int) precision + * (values larger than 2 GB are returned as 2 GB)
    12. + *
    13. DECIMAL_DIGITS (int) scale (0 for INTEGER and VARCHAR)
    14. + *
    15. NUM_PREC_RADIX (int) radix
    16. + *
    17. COLUMN_USAGE (String) he allowed usage for the column, + * see {@link java.sql.PseudoColumnUsage}
    18. + *
    19. REMARKS (String) comment
    20. + *
    21. CHAR_OCTET_LENGTH (int) for char types the + * maximum number of bytes in the column
    22. + *
    23. IS_NULLABLE (String) "NO" or "YES"
    24. + *
    * * @param catalog null (to get all objects) or the catalog name * @param schemaPattern null (to get all objects) or a schema name @@ -3148,14 +2725,21 @@ public boolean generatedKeyAlwaysReturned() { * (uppercase for unquoted names) * @param columnNamePattern null (to get all objects) or a column name * (uppercase for unquoted names) + * @return the list of pseudo and invisible columns */ - /*## Java 1.7 ## @Override - public ResultSet getPseudoColumns(String catalog, String schemaPattern, - String tableNamePattern, String columnNamePattern) { - return null; + public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("getPseudoColumns(" + quote(catalog) + ", " + quote(schemaPattern) + ", " + + quote(tableNamePattern) + ", " + quote(columnNamePattern) + ')'); + } + return getResultSet(meta.getPseudoColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern)); + } catch (Exception e) { + throw logAndConvert(e); + } } - //*/ /** * INTERNAL @@ -3165,17 +2749,8 @@ public String toString() { return getTraceObjectName() + ": " + conn; } - private String getMode() throws SQLException { - if (mode == null) { - PreparedStatement prep = conn.prepareStatement( - "SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME=?"); - prep.setString(1, "MODE"); - ResultSet rs = prep.executeQuery(); - rs.next(); - mode = rs.getString(1); - prep.close(); - } - return mode; + private JdbcResultSet getResultSet(ResultInterface result) { + return new JdbcResultSet(conn, null, null, result, getNextId(TraceObject.RESULT_SET), true, false, false); } } diff --git a/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaDataBackwardsCompat.java b/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaDataBackwardsCompat.java new file mode 100644 index 0000000000..9dafb7ab58 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcDatabaseMetaDataBackwardsCompat.java @@ -0,0 +1,16 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, and the + * EPL 1.0 (https://h2database.com/html/license.html). Initial Developer: H2 + * Group + */ +package org.h2.jdbc; + +/** + * Allows us to compile on older platforms, while still implementing the methods + * from the newer JDBC API. + */ +public interface JdbcDatabaseMetaDataBackwardsCompat { + + // compatibility interface + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcException.java b/h2/src/main/org/h2/jdbc/JdbcException.java new file mode 100644 index 0000000000..4578f57454 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcException.java @@ -0,0 +1,51 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +/** + * This interface contains additional methods for database exceptions. + */ +public interface JdbcException { + + /** + * Returns the H2-specific error code. + * + * @return the H2-specific error code + */ + public int getErrorCode(); + + /** + * INTERNAL + * @return original message + */ + String getOriginalMessage(); + + /** + * Returns the SQL statement. + *

    + * SQL statements that contain '--hide--' are not listed. + *

    + * + * @return the SQL statement + */ + String getSQL(); + + /** + * INTERNAL + * @param sql to set + */ + void setSQL(String sql); + + /** + * Returns the class name, the message, and in the server mode, the stack + * trace of the server + * + * @return the string representation + */ + @Override + String toString(); + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcLob.java b/h2/src/main/org/h2/jdbc/JdbcLob.java new file mode 100644 index 0000000000..6862c1b984 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcLob.java @@ -0,0 +1,230 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.IOException; +import java.io.InputStream; +import java.io.PipedInputStream; +import java.io.PipedOutputStream; +import java.io.Reader; +import java.io.Writer; +import java.sql.SQLException; + +import org.h2.api.ErrorCode; +import org.h2.message.DbException; +import org.h2.message.TraceObject; +import org.h2.mvstore.DataUtils; +import org.h2.util.IOUtils; +import org.h2.util.Task; +import org.h2.value.Value; + +/** + * Represents a large object value. + */ +public abstract class JdbcLob extends TraceObject { + + static final class LobPipedOutputStream extends PipedOutputStream { + private final Task task; + + LobPipedOutputStream(PipedInputStream snk, Task task) throws IOException { + super(snk); + this.task = task; + } + + @Override + public void close() throws IOException { + super.close(); + try { + task.get(); + } catch (Exception e) { + throw DataUtils.convertToIOException(e); + } + } + } + + /** + * State of the object. + */ + public enum State { + /** + * New object without a value. + */ + NEW, + + /** + * One of setter methods is invoked, but stream is not closed yet. + */ + SET_CALLED, + + /** + * A value is set. + */ + WITH_VALUE, + + /** + * Object is closed. + */ + CLOSED; + } + + /** + * JDBC connection. + */ + final JdbcConnection conn; + + /** + * Value. + */ + Value value; + + /** + * State. + */ + State state; + + JdbcLob(JdbcConnection conn, Value value, State state, int type, int id) { + setTrace(conn.getSession().getTrace(), type, id); + this.conn = conn; + this.value = value; + this.state = state; + } + + /** + * Check that connection and LOB is not closed, otherwise throws exception with + * error code {@link org.h2.api.ErrorCode#OBJECT_CLOSED}. + */ + void checkClosed() { + conn.checkClosed(); + if (state == State.CLOSED) { + throw DbException.get(ErrorCode.OBJECT_CLOSED); + } + } + + /** + * Check the state of the LOB and throws the exception when check failed + * (set is supported only for a new LOB). + */ + void checkEditable() { + checkClosed(); + if (state != State.NEW) { + throw DbException.getUnsupportedException("Allocate a new object to set its value."); + } + } + + /** + * Check the state of the LOB and throws the exception when check failed + * (the LOB must be set completely before read). + * + * @throws SQLException on SQL exception + * @throws IOException on I/O exception + */ + void checkReadable() throws SQLException, IOException { + checkClosed(); + if (state == State.SET_CALLED) { + throw DbException.getUnsupportedException("Stream setter is not yet closed."); + } + } + + /** + * Change the state LOB state (LOB value is set completely and available to read). + * @param blob LOB value. + */ + void completeWrite(Value blob) { + checkClosed(); + state = State.WITH_VALUE; + value = blob; + } + + /** + * Release all resources of this object. + */ + public void free() { + debugCodeCall("free"); + state = State.CLOSED; + value = null; + } + + /** + * Returns the input stream. + * + * @return the input stream + * @throws SQLException on failure + */ + InputStream getBinaryStream() throws SQLException { + try { + debugCodeCall("getBinaryStream"); + checkReadable(); + return value.getInputStream(); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Returns the reader. + * + * @return the reader + * @throws SQLException on failure + */ + Reader getCharacterStream() throws SQLException { + try { + debugCodeCall("getCharacterStream"); + checkReadable(); + return value.getReader(); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Returns the writer. + * + * @return Writer. + * @throws IOException If an I/O error occurs. + */ + Writer setCharacterStreamImpl() throws IOException { + return IOUtils.getBufferedWriter(setClobOutputStreamImpl()); + } + + /** + * Returns the writer stream. + * + * @return Output stream.. + * @throws IOException If an I/O error occurs. + */ + LobPipedOutputStream setClobOutputStreamImpl() throws IOException { + // PipedReader / PipedWriter are a lot slower + // than PipedInputStream / PipedOutputStream + // (Sun/Oracle Java 1.6.0_20) + final PipedInputStream in = new PipedInputStream(); + final Task task = new Task() { + @Override + public void call() { + completeWrite(conn.createClob(IOUtils.getReader(in), -1)); + } + }; + LobPipedOutputStream out = new LobPipedOutputStream(in, task); + task.execute(); + return out; + } + + /** + * INTERNAL + */ + @Override + public String toString() { + StringBuilder builder = new StringBuilder().append(getTraceObjectName()).append(": "); + if (state == State.SET_CALLED) { + builder.append(""); + } else if (state == State.CLOSED) { + builder.append(""); + } else { + builder.append(value.getTraceSQL()); + } + return builder.toString(); + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcParameterMetaData.java b/h2/src/main/org/h2/jdbc/JdbcParameterMetaData.java index 80c24274be..febbe79dcf 100644 --- a/h2/src/main/org/h2/jdbc/JdbcParameterMetaData.java +++ b/h2/src/main/org/h2/jdbc/JdbcParameterMetaData.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -15,13 +15,14 @@ import org.h2.message.TraceObject; import org.h2.util.MathUtils; import org.h2.value.DataType; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueToObjectConverter; /** * Information about the parameters of a prepared statement. */ -public class JdbcParameterMetaData extends TraceObject implements - ParameterMetaData { +public final class JdbcParameterMetaData extends TraceObject implements ParameterMetaData { private final JdbcPreparedStatement prep; private final int paramCount; @@ -80,12 +81,11 @@ public int getParameterMode(int param) throws SQLException { public int getParameterType(int param) throws SQLException { try { debugCodeCall("getParameterType", param); - ParameterInterface p = getParameter(param); - int type = p.getType(); - if (type == Value.UNKNOWN) { - type = Value.STRING; + TypeInfo type = getParameter(param).getType(); + if (type.getValueType() == Value.UNKNOWN) { + type = TypeInfo.TYPE_VARCHAR; } - return DataType.getDataType(type).sqlType; + return DataType.convertTypeToSQLType(type); } catch (Exception e) { throw logAndConvert(e); } @@ -102,8 +102,8 @@ public int getParameterType(int param) throws SQLException { public int getPrecision(int param) throws SQLException { try { debugCodeCall("getPrecision", param); - ParameterInterface p = getParameter(param); - return MathUtils.convertLongToInt(p.getPrecision()); + TypeInfo type = getParameter(param).getType(); + return type.getValueType() == Value.UNKNOWN ? 0 : MathUtils.convertLongToInt(type.getPrecision()); } catch (Exception e) { throw logAndConvert(e); } @@ -120,8 +120,8 @@ public int getPrecision(int param) throws SQLException { public int getScale(int param) throws SQLException { try { debugCodeCall("getScale", param); - ParameterInterface p = getParameter(param); - return p.getScale(); + TypeInfo type = getParameter(param).getType(); + return type.getValueType() == Value.UNKNOWN ? 0 : type.getScale(); } catch (Exception e) { throw logAndConvert(e); } @@ -173,12 +173,11 @@ public boolean isSigned(int param) throws SQLException { public String getParameterClassName(int param) throws SQLException { try { debugCodeCall("getParameterClassName", param); - ParameterInterface p = getParameter(param); - int type = p.getType(); + int type = getParameter(param).getType().getValueType(); if (type == Value.UNKNOWN) { - type = Value.STRING; + type = Value.VARCHAR; } - return DataType.getTypeClassName(type); + return ValueToObjectConverter.getDefaultClass(type, true).getName(); } catch (Exception e) { throw logAndConvert(e); } @@ -195,12 +194,11 @@ public String getParameterClassName(int param) throws SQLException { public String getParameterTypeName(int param) throws SQLException { try { debugCodeCall("getParameterTypeName", param); - ParameterInterface p = getParameter(param); - int type = p.getType(); - if (type == Value.UNKNOWN) { - type = Value.STRING; + TypeInfo type = getParameter(param).getType(); + if (type.getValueType() == Value.UNKNOWN) { + type = TypeInfo.TYPE_VARCHAR; } - return DataType.getDataType(type).name; + return type.getDeclaredTypeName(); } catch (Exception e) { throw logAndConvert(e); } @@ -227,10 +225,14 @@ private void checkClosed() { @Override @SuppressWarnings("unchecked") public T unwrap(Class iface) throws SQLException { - if (isWrapperFor(iface)) { - return (T) this; + try { + if (isWrapperFor(iface)) { + return (T) this; + } + throw DbException.getInvalidValueException("iface", iface); + } catch (Exception e) { + throw logAndConvert(e); } - throw DbException.getInvalidValueException("iface", iface); } /** diff --git a/h2/src/main/org/h2/jdbc/JdbcPreparedStatement.java b/h2/src/main/org/h2/jdbc/JdbcPreparedStatement.java index 9fe13b5735..9533d97c3b 100644 --- a/h2/src/main/org/h2/jdbc/JdbcPreparedStatement.java +++ b/h2/src/main/org/h2/jdbc/JdbcPreparedStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -20,6 +20,7 @@ import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; +import java.sql.SQLType; import java.sql.SQLXML; import java.sql.Statement; import java.util.ArrayList; @@ -31,44 +32,63 @@ import org.h2.expression.ParameterInterface; import org.h2.message.DbException; import org.h2.message.TraceObject; +import org.h2.result.MergedResult; import org.h2.result.ResultInterface; -import org.h2.util.DateTimeUtils; +import org.h2.result.ResultWithGeneratedKeys; import org.h2.util.IOUtils; -import org.h2.util.New; +import org.h2.util.LegacyDateTimeUtils; +import org.h2.util.Utils; import org.h2.value.DataType; import org.h2.value.Value; +import org.h2.value.ValueBigint; import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueTinyint; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; /** * Represents a prepared statement. + *

    + * Thread safety: the prepared statement is not thread-safe. If the same + * prepared statement is used by multiple threads access to it must be + * synchronized. The single synchronized block must include assignment of + * parameters, execution of the command and all operations with its result. + *

    + *
    + * synchronized (prep) {
    + *     prep.setInt(1, 10);
    + *     try (ResultSet rs = prep.executeQuery()) {
    + *         while (rs.next) {
    + *             // Do something
    + *         }
    + *     }
    + * }
    + * synchronized (prep) {
    + *     prep.setInt(1, 15);
    + *     updateCount = prep.executeUpdate();
    + * }
    + * 
    */ -public class JdbcPreparedStatement extends JdbcStatement implements - PreparedStatement { +public class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { protected CommandInterface command; - private final String sqlStatement; private ArrayList batchParameters; + private MergedResult batchIdentities; private HashMap cachedColumnLabelMap; + private final Object generatedKeysRequest; - JdbcPreparedStatement(JdbcConnection conn, String sql, int id, - int resultSetType, int resultSetConcurrency, - boolean closeWithResultSet) { - super(conn, id, resultSetType, resultSetConcurrency, closeWithResultSet); + JdbcPreparedStatement(JdbcConnection conn, String sql, int id, int resultSetType, int resultSetConcurrency, + Object generatedKeysRequest) { + super(conn, id, resultSetType, resultSetConcurrency); + this.generatedKeysRequest = generatedKeysRequest; setTrace(session.getTrace(), TraceObject.PREPARED_STATEMENT, id); - this.sqlStatement = sql; command = conn.prepareCommand(sql, fetchSize); } @@ -94,23 +114,26 @@ void setCachedColumnLabelMap(HashMap cachedColumnLabelMap) { public ResultSet executeQuery() throws SQLException { try { int id = getNextId(TraceObject.RESULT_SET); - if (isDebugEnabled()) { - debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "executeQuery()"); - } + debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "executeQuery()"); + batchIdentities = null; synchronized (session) { checkClosed(); closeOldResultSet(); ResultInterface result; + boolean lazy = false; boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY; boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE; try { setExecutingStatement(command); result = command.executeQuery(maxRows, scrollable); + lazy = result.isLazy(); } finally { - setExecutingStatement(null); + if (!lazy) { + setExecutingStatement(null); + } } - resultSet = new JdbcResultSet(conn, this, result, id, - closedByResultSet, scrollable, updatable, cachedColumnLabelMap); + resultSet = new JdbcResultSet(conn, this, command, result, id, scrollable, updatable, + cachedColumnLabelMap); } return resultSet; } catch (Exception e) { @@ -129,32 +152,67 @@ public ResultSet executeQuery() throws SQLException { * throw an exception, the current transaction (if any) is committed after * executing the statement. * - * @return the update count (number of row affected by an insert, update or - * delete, or 0 if no rows or the statement was a create, drop, - * commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returns nothing, or + * {@link #SUCCESS_NO_INFO} if number of rows is too large for + * {@code int} data type) * @throws SQLException if this object is closed or invalid + * @see #executeLargeUpdate() */ @Override public int executeUpdate() throws SQLException { try { debugCodeCall("executeUpdate"); - checkClosedForWrite(); - try { - return executeUpdateInternal(); - } finally { - afterWriting(); - } + checkClosed(); + batchIdentities = null; + long updateCount = executeUpdateInternal(); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } } - private int executeUpdateInternal() throws SQLException { + /** + * Executes a statement (insert, update, delete, create, drop) + * and returns the update count. + * If another result set exists for this statement, this will be closed + * (even if this statement fails). + * + * If auto commit is on, this statement will be committed. + * If the statement is a DDL statement (create, drop, alter) and does not + * throw an exception, the current transaction (if any) is committed after + * executing the statement. + * + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returns nothing) + * @throws SQLException if this object is closed or invalid + */ + @Override + public long executeLargeUpdate() throws SQLException { + try { + debugCodeCall("executeLargeUpdate"); + checkClosed(); + batchIdentities = null; + return executeUpdateInternal(); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private long executeUpdateInternal() { closeOldResultSet(); synchronized (session) { try { setExecutingStatement(command); - updateCount = command.executeUpdate(); + ResultWithGeneratedKeys result = command.executeUpdate(generatedKeysRequest); + updateCount = result.getUpdateCount(); + ResultInterface gk = result.getGeneratedKeys(); + if (gk != null) { + int id = getNextId(TraceObject.RESULT_SET); + generatedKeys = new JdbcResultSet(conn, this, command, gk, id, true, false, false); + } } finally { setExecutingStatement(null); } @@ -175,37 +233,39 @@ private int executeUpdateInternal() throws SQLException { public boolean execute() throws SQLException { try { int id = getNextId(TraceObject.RESULT_SET); - if (isDebugEnabled()) { - debugCodeCall("execute"); - } - checkClosedForWrite(); - try { - boolean returnsResultSet; - synchronized (conn.getSession()) { - closeOldResultSet(); - try { - setExecutingStatement(command); - if (command.isQuery()) { - returnsResultSet = true; - boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY; - boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE; - ResultInterface result = command.executeQuery(maxRows, scrollable); - resultSet = new JdbcResultSet(conn, this, result, - id, closedByResultSet, scrollable, - updatable); - } else { - returnsResultSet = false; - updateCount = command.executeUpdate(); + debugCodeCall("execute"); + checkClosed(); + boolean returnsResultSet; + synchronized (session) { + closeOldResultSet(); + boolean lazy = false; + try { + setExecutingStatement(command); + if (command.isQuery()) { + returnsResultSet = true; + boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY; + boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE; + ResultInterface result = command.executeQuery(maxRows, scrollable); + lazy = result.isLazy(); + resultSet = new JdbcResultSet(conn, this, command, result, id, scrollable, updatable, + cachedColumnLabelMap); + } else { + returnsResultSet = false; + ResultWithGeneratedKeys result = command.executeUpdate(generatedKeysRequest); + updateCount = result.getUpdateCount(); + ResultInterface gk = result.getGeneratedKeys(); + if (gk != null) { + generatedKeys = new JdbcResultSet(conn, this, command, gk, id, true, false, false); } - } finally { + } + } finally { + if (!lazy) { setExecutingStatement(null); } } - return returnsResultSet; - } finally { - afterWriting(); } - } catch (Exception e) { + return returnsResultSet; + } catch (Throwable e) { throw logAndConvert(e); } } @@ -221,8 +281,7 @@ public void clearParameters() throws SQLException { debugCodeCall("clearParameters"); checkClosed(); ArrayList parameters = command.getParameters(); - for (int i = 0, size = parameters.size(); i < size; i++) { - ParameterInterface param = parameters.get(i); + for (ParameterInterface param : parameters) { // can only delete old temp files if they are not in the batch param.setValue(null, batchParameters == null); } @@ -263,38 +322,6 @@ public void addBatch(String sql) throws SQLException { } } - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @throws SQLException Unsupported Feature - */ - @Override - public int executeUpdate(String sql) throws SQLException { - try { - debugCodeCall("executeUpdate", sql); - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @throws SQLException Unsupported Feature - */ - @Override - public boolean execute(String sql) throws SQLException { - try { - debugCodeCall("execute", sql); - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); - } - } - // ============================================================= /** @@ -308,7 +335,7 @@ public boolean execute(String sql) throws SQLException { public void setNull(int parameterIndex, int sqlType) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNull("+parameterIndex+", "+sqlType+");"); + debugCode("setNull(" + parameterIndex + ", " + sqlType + ')'); } setParameter(parameterIndex, ValueNull.INSTANCE); } catch (Exception e) { @@ -327,9 +354,9 @@ public void setNull(int parameterIndex, int sqlType) throws SQLException { public void setInt(int parameterIndex, int x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setInt("+parameterIndex+", "+x+");"); + debugCode("setInt(" + parameterIndex + ", " + x + ')'); } - setParameter(parameterIndex, ValueInt.get(x)); + setParameter(parameterIndex, ValueInteger.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -346,10 +373,9 @@ public void setInt(int parameterIndex, int x) throws SQLException { public void setString(int parameterIndex, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setString("+parameterIndex+", "+quote(x)+");"); + debugCode("setString(" + parameterIndex + ", " + quote(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueString.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -363,14 +389,12 @@ public void setString(int parameterIndex, String x) throws SQLException { * @throws SQLException if this object is closed */ @Override - public void setBigDecimal(int parameterIndex, BigDecimal x) - throws SQLException { + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBigDecimal("+parameterIndex+", " + quoteBigDecimal(x) + ");"); + debugCode("setBigDecimal(" + parameterIndex + ", " + quoteBigDecimal(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueDecimal.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : ValueNumeric.getAnyScale(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -378,20 +402,24 @@ public void setBigDecimal(int parameterIndex, BigDecimal x) /** * Sets the value of a parameter. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with {@link java.time.LocalDate} + * parameter instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setDate(int parameterIndex, java.sql.Date x) - throws SQLException { + public void setDate(int parameterIndex, java.sql.Date x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setDate("+parameterIndex+", " + quoteDate(x) + ");"); + debugCode("setDate(" + parameterIndex + ", " + quoteDate(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueDate.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromDate(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -399,20 +427,24 @@ public void setDate(int parameterIndex, java.sql.Date x) /** * Sets the value of a parameter. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with {@link java.time.LocalTime} + * parameter instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setTime(int parameterIndex, java.sql.Time x) - throws SQLException { + public void setTime(int parameterIndex, java.sql.Time x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setTime("+parameterIndex+", " + quoteTime(x) + ");"); + debugCode("setTime(" + parameterIndex + ", " + quoteTime(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueTime.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTime(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -420,20 +452,25 @@ public void setTime(int parameterIndex, java.sql.Time x) /** * Sets the value of a parameter. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setTimestamp(int parameterIndex, java.sql.Timestamp x) - throws SQLException { + public void setTimestamp(int parameterIndex, java.sql.Timestamp x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setTimestamp("+parameterIndex+", " + quoteTimestamp(x) + ");"); + debugCode("setTimestamp(" + parameterIndex + ", " + quoteTimestamp(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueTimestamp.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTimestamp(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -451,14 +488,12 @@ public void setTimestamp(int parameterIndex, java.sql.Timestamp x) public void setObject(int parameterIndex, Object x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setObject("+parameterIndex+", x);"); + debugCode("setObject(" + parameterIndex + ", x)"); } if (x == null) { - // throw Errors.getInvalidValueException("null", "x"); setParameter(parameterIndex, ValueNull.INSTANCE); } else { - setParameter(parameterIndex, - DataType.convertToValue(session, x, Value.UNKNOWN)); + setParameter(parameterIndex, ValueToObjectConverter.objectToValue(session, x, Value.UNKNOWN)); } } catch (Exception e) { throw logAndConvert(e); @@ -480,15 +515,9 @@ public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setObject("+parameterIndex+", x, "+targetSqlType+");"); - } - int type = DataType.convertSQLTypeToValueType(targetSqlType); - if (x == null) { - setParameter(parameterIndex, ValueNull.INSTANCE); - } else { - Value v = DataType.convertToValue(conn.getSession(), x, type); - setParameter(parameterIndex, v.convertTo(type)); + debugCode("setObject(" + parameterIndex + ", x, " + targetSqlType + ')'); } + setObjectWithType(parameterIndex, x, DataType.convertSQLTypeToValueType(targetSqlType)); } catch (Exception e) { throw logAndConvert(e); } @@ -510,14 +539,72 @@ public void setObject(int parameterIndex, Object x, int targetSqlType, int scale) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setObject("+parameterIndex+", x, "+targetSqlType+", "+scale+");"); + debugCode("setObject(" + parameterIndex + ", x, " + targetSqlType + ", " + scale + ')'); + } + setObjectWithType(parameterIndex, x, DataType.convertSQLTypeToValueType(targetSqlType)); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Sets the value of a parameter. The object is converted, if required, to + * the specified data type before sending to the database. + * Objects of unknown classes are serialized (on the client side). + * + * @param parameterIndex the parameter index (1, 2, ...) + * @param x the value, null is allowed + * @param targetSqlType the SQL type + * @throws SQLException if this object is closed + */ + @Override + public void setObject(int parameterIndex, Object x, SQLType targetSqlType) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("setObject(" + parameterIndex + ", x, " + DataType.sqlTypeToString(targetSqlType) + ')'); } - setObject(parameterIndex, x, targetSqlType); + setObjectWithType(parameterIndex, x, DataType.convertSQLTypeToValueType(targetSqlType)); } catch (Exception e) { throw logAndConvert(e); } } + /** + * Sets the value of a parameter. The object is converted, if required, to + * the specified data type before sending to the database. + * Objects of unknown classes are serialized (on the client side). + * + * @param parameterIndex the parameter index (1, 2, ...) + * @param x the value, null is allowed + * @param targetSqlType the SQL type + * @param scaleOrLength is ignored + * @throws SQLException if this object is closed + */ + @Override + public void setObject(int parameterIndex, Object x, SQLType targetSqlType, int scaleOrLength) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("setObject(" + parameterIndex + ", x, " + DataType.sqlTypeToString(targetSqlType) + ", " + + scaleOrLength + ')'); + } + setObjectWithType(parameterIndex, x, DataType.convertSQLTypeToValueType(targetSqlType)); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private void setObjectWithType(int parameterIndex, Object x, int type) { + if (x == null) { + setParameter(parameterIndex, ValueNull.INSTANCE); + } else { + Value v = ValueToObjectConverter.objectToValue(conn.getSession(), x, type); + if (type != Value.UNKNOWN) { + v = v.convertTo(type, conn); + } + setParameter(parameterIndex, v); + } + } + /** * Sets the value of a parameter. * @@ -529,7 +616,7 @@ public void setObject(int parameterIndex, Object x, int targetSqlType, public void setBoolean(int parameterIndex, boolean x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBoolean("+parameterIndex+", "+x+");"); + debugCode("setBoolean(" + parameterIndex + ", " + x + ')'); } setParameter(parameterIndex, ValueBoolean.get(x)); } catch (Exception e) { @@ -548,9 +635,9 @@ public void setBoolean(int parameterIndex, boolean x) throws SQLException { public void setByte(int parameterIndex, byte x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setByte("+parameterIndex+", "+x+");"); + debugCode("setByte(" + parameterIndex + ", " + x + ')'); } - setParameter(parameterIndex, ValueByte.get(x)); + setParameter(parameterIndex, ValueTinyint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -567,9 +654,9 @@ public void setByte(int parameterIndex, byte x) throws SQLException { public void setShort(int parameterIndex, short x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setShort("+parameterIndex+", (short) "+x+");"); + debugCode("setShort(" + parameterIndex + ", (short) " + x + ')'); } - setParameter(parameterIndex, ValueShort.get(x)); + setParameter(parameterIndex, ValueSmallint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -586,9 +673,9 @@ public void setShort(int parameterIndex, short x) throws SQLException { public void setLong(int parameterIndex, long x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setLong("+parameterIndex+", "+x+"L);"); + debugCode("setLong(" + parameterIndex + ", " + x + "L)"); } - setParameter(parameterIndex, ValueLong.get(x)); + setParameter(parameterIndex, ValueBigint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -605,9 +692,9 @@ public void setLong(int parameterIndex, long x) throws SQLException { public void setFloat(int parameterIndex, float x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setFloat("+parameterIndex+", "+x+"f);"); + debugCode("setFloat(" + parameterIndex + ", " + x + "f)"); } - setParameter(parameterIndex, ValueFloat.get(x)); + setParameter(parameterIndex, ValueReal.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -624,7 +711,7 @@ public void setFloat(int parameterIndex, float x) throws SQLException { public void setDouble(int parameterIndex, double x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setDouble("+parameterIndex+", "+x+"d);"); + debugCode("setDouble(" + parameterIndex + ", " + x + "d)"); } setParameter(parameterIndex, ValueDouble.get(x)); } catch (Exception e) { @@ -643,23 +730,29 @@ public void setRef(int parameterIndex, Ref x) throws SQLException { /** * Sets the date using a specified time zone. The value will be converted to * the local time zone. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with {@link java.time.LocalDate} + * parameter instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @param calendar the calendar * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setDate(int parameterIndex, java.sql.Date x, Calendar calendar) - throws SQLException { + public void setDate(int parameterIndex, java.sql.Date x, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setDate("+parameterIndex+", " + quoteDate(x) + ", calendar);"); + debugCode("setDate(" + parameterIndex + ", " + quoteDate(x) + ", calendar)"); } if (x == null) { setParameter(parameterIndex, ValueNull.INSTANCE); } else { - setParameter(parameterIndex, DateTimeUtils.convertDate(x, calendar)); + setParameter(parameterIndex, + LegacyDateTimeUtils.fromDate(conn, calendar != null ? calendar.getTimeZone() : null, x)); } } catch (Exception e) { throw logAndConvert(e); @@ -669,23 +762,29 @@ public void setDate(int parameterIndex, java.sql.Date x, Calendar calendar) /** * Sets the time using a specified time zone. The value will be converted to * the local time zone. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with {@link java.time.LocalTime} + * parameter instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @param calendar the calendar * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setTime(int parameterIndex, java.sql.Time x, Calendar calendar) - throws SQLException { + public void setTime(int parameterIndex, java.sql.Time x, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setTime("+parameterIndex+", " + quoteTime(x) + ", calendar);"); + debugCode("setTime(" + parameterIndex + ", " + quoteTime(x) + ", calendar)"); } if (x == null) { setParameter(parameterIndex, ValueNull.INSTANCE); } else { - setParameter(parameterIndex, DateTimeUtils.convertTime(x, calendar)); + setParameter(parameterIndex, + LegacyDateTimeUtils.fromTime(conn, calendar != null ? calendar.getTimeZone() : null, x)); } } catch (Exception e) { throw logAndConvert(e); @@ -695,24 +794,29 @@ public void setTime(int parameterIndex, java.sql.Time x, Calendar calendar) /** * Sets the timestamp using a specified time zone. The value will be * converted to the local time zone. + *

    + * Usage of this method is discouraged. Use + * {@code setObject(parameterIndex, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

    * * @param parameterIndex the parameter index (1, 2, ...) * @param x the value * @param calendar the calendar * @throws SQLException if this object is closed + * @see #setObject(int, Object) */ @Override - public void setTimestamp(int parameterIndex, java.sql.Timestamp x, - Calendar calendar) throws SQLException { + public void setTimestamp(int parameterIndex, java.sql.Timestamp x, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setTimestamp(" + parameterIndex + ", " + - quoteTimestamp(x) + ", calendar);"); + debugCode("setTimestamp(" + parameterIndex + ", " + quoteTimestamp(x) + ", calendar)"); } if (x == null) { setParameter(parameterIndex, ValueNull.INSTANCE); } else { - setParameter(parameterIndex, DateTimeUtils.convertTimestamp(x, calendar)); + setParameter(parameterIndex, + LegacyDateTimeUtils.fromTimestamp(conn, calendar != null ? calendar.getTimeZone() : null, x)); } } catch (Exception e) { throw logAndConvert(e); @@ -724,6 +828,7 @@ public void setTimestamp(int parameterIndex, java.sql.Timestamp x, * * @deprecated since JDBC 2.0, use setCharacterStream */ + @Deprecated @Override public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { @@ -743,7 +848,7 @@ public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNull("+parameterIndex+", "+sqlType+", "+quote(typeName)+");"); + debugCode("setNull(" + parameterIndex + ", " + sqlType + ", " + quote(typeName) + ')'); } setNull(parameterIndex, sqlType); } catch (Exception e) { @@ -762,20 +867,16 @@ public void setNull(int parameterIndex, int sqlType, String typeName) public void setBlob(int parameterIndex, Blob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBlob("+parameterIndex+", x);"); + debugCode("setBlob(" + parameterIndex + ", x)"); } - checkClosedForWrite(); - try { - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createBlob(x.getBinaryStream(), -1); - } - setParameter(parameterIndex, v); - } finally { - afterWriting(); + checkClosed(); + Value v; + if (x == null) { + v = ValueNull.INSTANCE; + } else { + v = conn.createBlob(x.getBinaryStream(), -1); } + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -794,15 +895,11 @@ public void setBlob(int parameterIndex, Blob x) throws SQLException { public void setBlob(int parameterIndex, InputStream x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBlob("+parameterIndex+", x);"); - } - checkClosedForWrite(); - try { - Value v = conn.createBlob(x, -1); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setBlob(" + parameterIndex + ", x)"); } + checkClosed(); + Value v = conn.createBlob(x, -1); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -819,20 +916,16 @@ public void setBlob(int parameterIndex, InputStream x) throws SQLException { public void setClob(int parameterIndex, Clob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setClob("+parameterIndex+", x);"); + debugCode("setClob(" + parameterIndex + ", x)"); } - checkClosedForWrite(); - try { - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createClob(x.getCharacterStream(), -1); - } - setParameter(parameterIndex, v); - } finally { - afterWriting(); + checkClosed(); + Value v; + if (x == null) { + v = ValueNull.INSTANCE; + } else { + v = conn.createClob(x.getCharacterStream(), -1); } + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -851,31 +944,45 @@ public void setClob(int parameterIndex, Clob x) throws SQLException { public void setClob(int parameterIndex, Reader x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setClob("+parameterIndex+", x);"); + debugCode("setClob(" + parameterIndex + ", x)"); } - checkClosedForWrite(); - try { - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createClob(x, -1); - } - setParameter(parameterIndex, v); - } finally { - afterWriting(); + checkClosed(); + Value v; + if (x == null) { + v = ValueNull.INSTANCE; + } else { + v = conn.createClob(x, -1); } + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } } /** - * [Not supported] Sets the value of a parameter as a Array. + * Sets the value of a parameter as an Array. + * + * @param parameterIndex the parameter index (1, 2, ...) + * @param x the value + * @throws SQLException if this object is closed */ @Override public void setArray(int parameterIndex, Array x) throws SQLException { - throw unsupported("setArray"); + try { + if (isDebugEnabled()) { + debugCode("setArray(" + parameterIndex + ", x)"); + } + checkClosed(); + Value v; + if (x == null) { + v = ValueNull.INSTANCE; + } else { + v = ValueToObjectConverter.objectToValue(session, x.getArray(), Value.ARRAY); + } + setParameter(parameterIndex, v); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -889,10 +996,9 @@ public void setArray(int parameterIndex, Array x) throws SQLException { public void setBytes(int parameterIndex, byte[] x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBytes("+parameterIndex+", "+quoteBytes(x)+");"); + debugCode("setBytes(" + parameterIndex + ", " + quoteBytes(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueBytes.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : ValueVarbinary.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -913,15 +1019,11 @@ public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBinaryStream("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createBlob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setBinaryStream(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createBlob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -989,15 +1091,11 @@ public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setAsciiStream("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(IOUtils.getAsciiReader(x), length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setAsciiStream(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createClob(IOUtils.getAsciiReader(x), length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1064,15 +1162,11 @@ public void setCharacterStream(int parameterIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setCharacterStream("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setCharacterStream(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createClob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1103,14 +1197,9 @@ public ResultSetMetaData getMetaData() throws SQLException { return null; } int id = getNextId(TraceObject.RESULT_SET_META_DATA); - if (isDebugEnabled()) { - debugCodeAssign("ResultSetMetaData", - TraceObject.RESULT_SET_META_DATA, id, "getMetaData()"); - } + debugCodeAssign("ResultSetMetaData", TraceObject.RESULT_SET_META_DATA, id, "getMetaData()"); String catalog = conn.getCatalog(); - JdbcResultSetMetaData meta = new JdbcResultSetMetaData( - null, this, result, catalog, session.getTrace(), id); - return meta; + return new JdbcResultSetMetaData(null, this, result, catalog, session.getTrace(), id); } catch (Exception e) { throw logAndConvert(e); } @@ -1140,6 +1229,7 @@ public void close() throws SQLException { try { super.close(); batchParameters = null; + batchIdentities = null; if (command != null) { command.close(); command = null; @@ -1154,197 +1244,125 @@ public void close() throws SQLException { * If one of the batched statements fails, this database will continue. * * @return the array of update counts + * @see #executeLargeBatch() */ @Override public int[] executeBatch() throws SQLException { try { debugCodeCall("executeBatch"); if (batchParameters == null) { - // TODO batch: check what other database do if no parameters are - // set - batchParameters = New.arrayList(); + // Empty batch is allowed, see JDK-4639504 and other issues + batchParameters = new ArrayList<>(); } + batchIdentities = new MergedResult(); int size = batchParameters.size(); int[] result = new int[size]; - boolean error = false; - SQLException next = null; - checkClosedForWrite(); - try { - for (int i = 0; i < size; i++) { - Value[] set = batchParameters.get(i); - ArrayList parameters = - command.getParameters(); - for (int j = 0; j < set.length; j++) { - Value value = set[j]; - ParameterInterface param = parameters.get(j); - param.setValue(value, false); - } - try { - result[i] = executeUpdateInternal(); - } catch (Exception re) { - SQLException e = logAndConvert(re); - if (next == null) { - next = e; - } else { - e.setNextException(next); - next = e; - } - result[i] = Statement.EXECUTE_FAILED; - error = true; - } - } - batchParameters = null; - if (error) { - JdbcBatchUpdateException e = new JdbcBatchUpdateException(next, result); - throw e; - } - return result; - } finally { - afterWriting(); + SQLException exception = new SQLException(); + checkClosed(); + for (int i = 0; i < size; i++) { + long updateCount = executeBatchElement(batchParameters.get(i), exception); + result[i] = updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } - } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Adds the current settings to the batch. - */ - @Override - public void addBatch() throws SQLException { - try { - debugCodeCall("addBatch"); - checkClosedForWrite(); - try { - ArrayList parameters = - command.getParameters(); - int size = parameters.size(); - Value[] set = new Value[size]; - for (int i = 0; i < size; i++) { - ParameterInterface param = parameters.get(i); - Value value = param.getParamValue(); - set[i] = value; - } - if (batchParameters == null) { - batchParameters = New.arrayList(); - } - batchParameters.add(set); - } finally { - afterWriting(); + batchParameters = null; + exception = exception.getNextException(); + if (exception != null) { + throw new JdbcBatchUpdateException(exception, result); } + return result; } catch (Exception e) { throw logAndConvert(e); } } /** - * Calling this method is not legal on a PreparedStatement. + * Executes the batch. + * If one of the batched statements fails, this database will continue. * - * @param sql ignored - * @param autoGeneratedKeys ignored - * @throws SQLException Unsupported Feature + * @return the array of update counts */ @Override - public int executeUpdate(String sql, int autoGeneratedKeys) - throws SQLException { + public long[] executeLargeBatch() throws SQLException { try { - debugCode("executeUpdate("+quote(sql)+", "+autoGeneratedKeys+");"); - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + debugCodeCall("executeLargeBatch"); + if (batchParameters == null) { + // Empty batch is allowed, see JDK-4639504 and other issues + batchParameters = new ArrayList<>(); + } + batchIdentities = new MergedResult(); + int size = batchParameters.size(); + long[] result = new long[size]; + SQLException exception = new SQLException(); + checkClosed(); + for (int i = 0; i < size; i++) { + result[i] = executeBatchElement(batchParameters.get(i), exception); + } + batchParameters = null; + exception = exception.getNextException(); + if (exception != null) { + throw new JdbcBatchUpdateException(exception, result); + } + return result; } catch (Exception e) { throw logAndConvert(e); } } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param columnIndexes ignored - * @throws SQLException Unsupported Feature - */ - @Override - public int executeUpdate(String sql, int[] columnIndexes) - throws SQLException { - try { - debugCode("executeUpdate(" + quote(sql) + ", " + - quoteIntArray(columnIndexes) + ");"); - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); + private long executeBatchElement(Value[] set, SQLException exception) { + ArrayList parameters = command.getParameters(); + for (int i = 0, l = set.length; i < l; i++) { + parameters.get(i).setValue(set[i], false); } - } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param columnNames ignored - * @throws SQLException Unsupported Feature - */ - @Override - public int executeUpdate(String sql, String[] columnNames) - throws SQLException { + long updateCount; try { - debugCode("executeUpdate(" + quote(sql) + ", " + - quoteArray(columnNames) + ");"); - throw DbException.get( - ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + updateCount = executeUpdateInternal(); + // Cannot use own implementation, it returns batch identities + ResultSet rs = super.getGeneratedKeys(); + batchIdentities.add(((JdbcResultSet) rs).result); } catch (Exception e) { - throw logAndConvert(e); - } - } - - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param autoGeneratedKeys ignored - * @throws SQLException Unsupported Feature - */ - @Override - public boolean execute(String sql, int autoGeneratedKeys) - throws SQLException { - try { - debugCode("execute(" + quote(sql) + ", " + autoGeneratedKeys + ");"); - throw DbException.get( - ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); + exception.setNextException(logAndConvert(e)); + updateCount = Statement.EXECUTE_FAILED; } + return updateCount; } - /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param columnIndexes ignored - * @throws SQLException Unsupported Feature - */ @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - try { - debugCode("execute(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ");"); - throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); - } catch (Exception e) { - throw logAndConvert(e); + public ResultSet getGeneratedKeys() throws SQLException { + if (batchIdentities != null) { + try { + int id = getNextId(TraceObject.RESULT_SET); + debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "getGeneratedKeys()"); + checkClosed(); + generatedKeys = new JdbcResultSet(conn, this, null, batchIdentities.getResult(), id, true, false, + false); + } catch (Exception e) { + throw logAndConvert(e); + } } + return super.getGeneratedKeys(); } /** - * Calling this method is not legal on a PreparedStatement. - * - * @param sql ignored - * @param columnNames ignored - * @throws SQLException Unsupported Feature + * Adds the current settings to the batch. */ @Override - public boolean execute(String sql, String[] columnNames) - throws SQLException { + public void addBatch() throws SQLException { try { - debugCode("execute(" + quote(sql) + ", " + quoteArray(columnNames) + ");"); - throw DbException.get( - ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + debugCodeCall("addBatch"); + checkClosed(); + ArrayList parameters = + command.getParameters(); + int size = parameters.size(); + Value[] set = new Value[size]; + for (int i = 0; i < size; i++) { + ParameterInterface param = parameters.get(i); + param.checkSet(); + Value value = param.getParamValue(); + set[i] = value; + } + if (batchParameters == null) { + batchParameters = Utils.newSmallArrayList(); + } + batchParameters.add(set); } catch (Exception e) { throw logAndConvert(e); } @@ -1359,14 +1377,9 @@ public boolean execute(String sql, String[] columnNames) public ParameterMetaData getParameterMetaData() throws SQLException { try { int id = getNextId(TraceObject.PARAMETER_META_DATA); - if (isDebugEnabled()) { - debugCodeAssign("ParameterMetaData", - TraceObject.PARAMETER_META_DATA, id, "getParameterMetaData()"); - } + debugCodeAssign("ParameterMetaData", TraceObject.PARAMETER_META_DATA, id, "getParameterMetaData()"); checkClosed(); - JdbcParameterMetaData meta = new JdbcParameterMetaData( - session.getTrace(), this, command, id); - return meta; + return new JdbcParameterMetaData(session.getTrace(), this, command, id); } catch (Exception e) { throw logAndConvert(e); } @@ -1406,10 +1419,9 @@ public void setRowId(int parameterIndex, RowId x) throws SQLException { public void setNString(int parameterIndex, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNString("+parameterIndex+", "+quote(x)+");"); + debugCode("setNString(" + parameterIndex + ", " + quote(x) + ')'); } - Value v = x == null ? (Value) ValueNull.INSTANCE : ValueString.get(x); - setParameter(parameterIndex, v); + setParameter(parameterIndex, x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -1430,16 +1442,11 @@ public void setNCharacterStream(int parameterIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNCharacterStream("+ - parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setNCharacterStream(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createClob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1471,9 +1478,9 @@ public void setNCharacterStream(int parameterIndex, Reader x) public void setNClob(int parameterIndex, NClob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNClob("+parameterIndex+", x);"); + debugCode("setNClob(" + parameterIndex + ", x)"); } - checkClosedForWrite(); + checkClosed(); Value v; if (x == null) { v = ValueNull.INSTANCE; @@ -1499,15 +1506,11 @@ public void setNClob(int parameterIndex, NClob x) throws SQLException { public void setNClob(int parameterIndex, Reader x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNClob("+parameterIndex+", x);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(x, -1); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setNClob(" + parameterIndex + ", x)"); } + checkClosed(); + Value v = conn.createClob(x, -1); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1527,15 +1530,11 @@ public void setClob(int parameterIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setClob("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setClob(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createClob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1556,15 +1555,11 @@ public void setBlob(int parameterIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setBlob("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createBlob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setBlob(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createBlob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } @@ -1585,26 +1580,40 @@ public void setNClob(int parameterIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setNClob("+parameterIndex+", x, "+length+"L);"); - } - checkClosedForWrite(); - try { - Value v = conn.createClob(x, length); - setParameter(parameterIndex, v); - } finally { - afterWriting(); + debugCode("setNClob(" + parameterIndex + ", x, " + length + "L)"); } + checkClosed(); + Value v = conn.createClob(x, length); + setParameter(parameterIndex, v); } catch (Exception e) { throw logAndConvert(e); } } /** - * [Not supported] Sets the value of a parameter as a SQLXML object. + * Sets the value of a parameter as a SQLXML. + * + * @param parameterIndex the parameter index (1, 2, ...) + * @param x the value + * @throws SQLException if this object is closed */ @Override public void setSQLXML(int parameterIndex, SQLXML x) throws SQLException { - throw unsupported("SQLXML"); + try { + if (isDebugEnabled()) { + debugCode("setSQLXML(" + parameterIndex + ", x)"); + } + checkClosed(); + Value v; + if (x == null) { + v = ValueNull.INSTANCE; + } else { + v = conn.createClob(x.getCharacterStream(), -1); + } + setParameter(parameterIndex, v); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1615,24 +1624,4 @@ public String toString() { return getTraceObjectName() + ": " + command; } - @Override - protected boolean checkClosed(boolean write) { - if (super.checkClosed(write)) { - // if the session was re-connected, re-prepare the statement - ArrayList oldParams = command.getParameters(); - command = conn.prepareCommand(sqlStatement, fetchSize); - ArrayList newParams = command.getParameters(); - for (int i = 0, size = oldParams.size(); i < size; i++) { - ParameterInterface old = oldParams.get(i); - Value value = old.getParamValue(); - if (value != null) { - ParameterInterface n = newParams.get(i); - n.setValue(value, false); - } - } - return true; - } - return false; - } - } diff --git a/h2/src/main/org/h2/jdbc/JdbcResultSet.java b/h2/src/main/org/h2/jdbc/JdbcResultSet.java index 3cb304ffc8..5984628827 100644 --- a/h2/src/main/org/h2/jdbc/JdbcResultSet.java +++ b/h2/src/main/org/h2/jdbc/JdbcResultSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -19,6 +19,7 @@ import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; +import java.sql.SQLType; import java.sql.SQLWarning; import java.sql.SQLXML; import java.sql.Statement; @@ -27,43 +28,45 @@ import java.util.Calendar; import java.util.HashMap; import java.util.Map; + import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.engine.Session; import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.message.TraceObject; import org.h2.result.ResultInterface; import org.h2.result.UpdatableRow; -import org.h2.util.DateTimeUtils; import org.h2.util.IOUtils; -import org.h2.util.New; +import org.h2.util.LegacyDateTimeUtils; import org.h2.util.StringUtils; import org.h2.value.CompareMode; import org.h2.value.DataType; import org.h2.value.Value; +import org.h2.value.ValueBigint; import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueTinyint; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; /** - *

    * Represents a result set. - *

    *

    * Column labels are case-insensitive, quotes are not supported. The first * column has the column index 1. *

    *

    + * Thread safety: the result set is not thread-safe and must not be used by + * multiple threads concurrently. + *

    + *

    * Updatable result sets: Result sets are updatable when the result only * contains columns from one table, and if it contains all columns of a unique * index (primary key or other) of this table. Key columns may not contain NULL @@ -71,11 +74,12 @@ * changes are visible, but not own inserts and deletes. *

    */ -public class JdbcResultSet extends TraceObject implements ResultSet { - private final boolean closeStatement; +public final class JdbcResultSet extends TraceObject implements ResultSet { + private final boolean scrollable; private final boolean updatable; - private ResultInterface result; + private final boolean triggerUpdatable; + ResultInterface result; private JdbcConnection conn; private JdbcStatement stat; private int columnCount; @@ -83,28 +87,27 @@ public class JdbcResultSet extends TraceObject implements ResultSet { private Value[] insertRow; private Value[] updateRow; private HashMap columnLabelMap; - private HashMap patchedRows; + private HashMap patchedRows; private JdbcPreparedStatement preparedStatement; + private final CommandInterface command; - JdbcResultSet(JdbcConnection conn, JdbcStatement stat, - ResultInterface result, int id, boolean closeStatement, - boolean scrollable, boolean updatable) { + public JdbcResultSet(JdbcConnection conn, JdbcStatement stat, CommandInterface command, ResultInterface result, + int id, boolean scrollable, boolean updatable, boolean triggerUpdatable) { setTrace(conn.getSession().getTrace(), TraceObject.RESULT_SET, id); this.conn = conn; this.stat = stat; + this.command = command; this.result = result; - columnCount = result.getVisibleColumnCount(); - this.closeStatement = closeStatement; + this.columnCount = result.getVisibleColumnCount(); this.scrollable = scrollable; this.updatable = updatable; + this.triggerUpdatable = triggerUpdatable; } - JdbcResultSet(JdbcConnection conn, JdbcPreparedStatement preparedStatement, - ResultInterface result, int id, boolean closeStatement, - boolean scrollable, boolean updatable, + JdbcResultSet(JdbcConnection conn, JdbcPreparedStatement preparedStatement, CommandInterface command, + ResultInterface result, int id, boolean scrollable, boolean updatable, HashMap columnLabelMap) { - this(conn, preparedStatement, result, id, closeStatement, scrollable, - updatable); + this(conn, preparedStatement, command, result, id, scrollable, updatable, false); this.columnLabelMap = columnLabelMap; this.preparedStatement = preparedStatement; } @@ -134,15 +137,10 @@ public boolean next() throws SQLException { public ResultSetMetaData getMetaData() throws SQLException { try { int id = getNextId(TraceObject.RESULT_SET_META_DATA); - if (isDebugEnabled()) { - debugCodeAssign("ResultSetMetaData", - TraceObject.RESULT_SET_META_DATA, id, "getMetaData()"); - } + debugCodeAssign("ResultSetMetaData", TraceObject.RESULT_SET_META_DATA, id, "getMetaData()"); checkClosed(); String catalog = conn.getCatalog(); - JdbcResultSetMetaData meta = new JdbcResultSetMetaData( - this, null, result, catalog, conn.getSession().getTrace(), id); - return meta; + return new JdbcResultSetMetaData(this, null, result, catalog, conn.getSession().getTrace(), id); } catch (Exception e) { throw logAndConvert(e); } @@ -190,7 +188,7 @@ public int findColumn(String columnLabel) throws SQLException { public void close() throws SQLException { try { debugCodeCall("close"); - closeInternal(); + closeInternal(false); } catch (Exception e) { throw logAndConvert(e); } @@ -198,21 +196,26 @@ public void close() throws SQLException { /** * Close the result set. This method also closes the statement if required. + * @param fromStatement if true - close statement in the end */ - void closeInternal() throws SQLException { + void closeInternal(boolean fromStatement) { if (result != null) { try { - result.close(); - if (closeStatement && stat != null) { - stat.close(); + if (result.isLazy()) { + stat.onLazyResultSetClose(command, preparedStatement == null); } + result.close(); } finally { + JdbcStatement s = stat; columnCount = 0; result = null; stat = null; conn = null; insertRow = null; updateRow = null; + if (!fromStatement && s != null) { + s.closeIfCloseOnCompletion(); + } } } } @@ -228,10 +231,6 @@ public Statement getStatement() throws SQLException { try { debugCodeCall("getStatement"); checkClosed(); - if (closeStatement) { - // if the result set was opened by a DatabaseMetaData call - return null; - } return stat; } catch (Exception e) { throw logAndConvert(e); @@ -281,7 +280,7 @@ public void clearWarnings() throws SQLException { public String getString(int columnIndex) throws SQLException { try { debugCodeCall("getString", columnIndex); - return get(columnIndex).getString(); + return get(checkColumnIndex(columnIndex)).getString(); } catch (Exception e) { throw logAndConvert(e); } @@ -299,7 +298,7 @@ public String getString(int columnIndex) throws SQLException { public String getString(String columnLabel) throws SQLException { try { debugCodeCall("getString", columnLabel); - return get(columnLabel).getString(); + return get(getColumnIndex(columnLabel)).getString(); } catch (Exception e) { throw logAndConvert(e); } @@ -317,7 +316,7 @@ public String getString(String columnLabel) throws SQLException { public int getInt(int columnIndex) throws SQLException { try { debugCodeCall("getInt", columnIndex); - return get(columnIndex).getInt(); + return getIntInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -335,12 +334,25 @@ public int getInt(int columnIndex) throws SQLException { public int getInt(String columnLabel) throws SQLException { try { debugCodeCall("getInt", columnLabel); - return get(columnLabel).getInt(); + return getIntInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private int getIntInternal(int columnIndex) { + Value v = getInternal(columnIndex); + int result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getInt(); + } else { + wasNull = true; + result = 0; + } + return result; + } + /** * Returns the value of the specified column as a BigDecimal. * @@ -353,7 +365,7 @@ public int getInt(String columnLabel) throws SQLException { public BigDecimal getBigDecimal(int columnIndex) throws SQLException { try { debugCodeCall("getBigDecimal", columnIndex); - return get(columnIndex).getBigDecimal(); + return get(checkColumnIndex(columnIndex)).getBigDecimal(); } catch (Exception e) { throw logAndConvert(e); } @@ -361,17 +373,22 @@ public BigDecimal getBigDecimal(int columnIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDate.class)} instead. + *

    * * @param columnIndex (1,2,...) * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override public Date getDate(int columnIndex) throws SQLException { try { debugCodeCall("getDate", columnIndex); - return get(columnIndex).getDate(); + return LegacyDateTimeUtils.toDate(conn, null, get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -379,17 +396,22 @@ public Date getDate(int columnIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalTime.class)} instead. + *

    * * @param columnIndex (1,2,...) * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override public Time getTime(int columnIndex) throws SQLException { try { debugCodeCall("getTime", columnIndex); - return get(columnIndex).getTime(); + return LegacyDateTimeUtils.toTime(conn, null, get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -397,17 +419,22 @@ public Time getTime(int columnIndex) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDateTime.class)} instead. + *

    * * @param columnIndex (1,2,...) * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { try { debugCodeCall("getTimestamp", columnIndex); - return get(columnIndex).getTimestamp(); + return LegacyDateTimeUtils.toTimestamp(conn, null, get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -425,7 +452,7 @@ public Timestamp getTimestamp(int columnIndex) throws SQLException { public BigDecimal getBigDecimal(String columnLabel) throws SQLException { try { debugCodeCall("getBigDecimal", columnLabel); - return get(columnLabel).getBigDecimal(); + return get(getColumnIndex(columnLabel)).getBigDecimal(); } catch (Exception e) { throw logAndConvert(e); } @@ -433,17 +460,22 @@ public BigDecimal getBigDecimal(String columnLabel) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalDate.class)} instead. + *

    * * @param columnLabel the column label * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override public Date getDate(String columnLabel) throws SQLException { try { debugCodeCall("getDate", columnLabel); - return get(columnLabel).getDate(); + return LegacyDateTimeUtils.toDate(conn, null, get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -451,17 +483,22 @@ public Date getDate(String columnLabel) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalTime.class)} instead. + *

    * * @param columnLabel the column label * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override public Time getTime(String columnLabel) throws SQLException { try { debugCodeCall("getTime", columnLabel); - return get(columnLabel).getTime(); + return LegacyDateTimeUtils.toTime(conn, null, get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -469,17 +506,22 @@ public Time getTime(String columnLabel) throws SQLException { /** * Returns the value of the specified column as a java.sql.Timestamp. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalDateTime.class)} instead. + *

    * * @param columnLabel the column label * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override public Timestamp getTimestamp(String columnLabel) throws SQLException { try { debugCodeCall("getTimestamp", columnLabel); - return get(columnLabel).getTimestamp(); + return LegacyDateTimeUtils.toTimestamp(conn, null, get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -498,8 +540,7 @@ public Timestamp getTimestamp(String columnLabel) throws SQLException { public Object getObject(int columnIndex) throws SQLException { try { debugCodeCall("getObject", columnIndex); - Value v = get(columnIndex); - return conn.convertToDefaultObject(v); + return ValueToObjectConverter.valueToDefaultObject(get(checkColumnIndex(columnIndex)), conn, true); } catch (Exception e) { throw logAndConvert(e); } @@ -518,8 +559,7 @@ public Object getObject(int columnIndex) throws SQLException { public Object getObject(String columnLabel) throws SQLException { try { debugCodeCall("getObject", columnLabel); - Value v = get(columnLabel); - return conn.convertToDefaultObject(v); + return ValueToObjectConverter.valueToDefaultObject(get(getColumnIndex(columnLabel)), conn, true); } catch (Exception e) { throw logAndConvert(e); } @@ -537,8 +577,7 @@ public Object getObject(String columnLabel) throws SQLException { public boolean getBoolean(int columnIndex) throws SQLException { try { debugCodeCall("getBoolean", columnIndex); - Boolean v = get(columnIndex).getBoolean(); - return v == null ? false : v.booleanValue(); + return getBooleanInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -556,13 +595,25 @@ public boolean getBoolean(int columnIndex) throws SQLException { public boolean getBoolean(String columnLabel) throws SQLException { try { debugCodeCall("getBoolean", columnLabel); - Boolean v = get(columnLabel).getBoolean(); - return v == null ? false : v.booleanValue(); + return getBooleanInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private boolean getBooleanInternal(int columnIndex) { + Value v = getInternal(columnIndex); + boolean result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getBoolean(); + } else { + wasNull = true; + result = false; + } + return result; + } + /** * Returns the value of the specified column as a byte. * @@ -575,7 +626,7 @@ public boolean getBoolean(String columnLabel) throws SQLException { public byte getByte(int columnIndex) throws SQLException { try { debugCodeCall("getByte", columnIndex); - return get(columnIndex).getByte(); + return getByteInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -593,12 +644,25 @@ public byte getByte(int columnIndex) throws SQLException { public byte getByte(String columnLabel) throws SQLException { try { debugCodeCall("getByte", columnLabel); - return get(columnLabel).getByte(); + return getByteInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private byte getByteInternal(int columnIndex) { + Value v = getInternal(columnIndex); + byte result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getByte(); + } else { + wasNull = true; + result = 0; + } + return result; + } + /** * Returns the value of the specified column as a short. * @@ -611,7 +675,7 @@ public byte getByte(String columnLabel) throws SQLException { public short getShort(int columnIndex) throws SQLException { try { debugCodeCall("getShort", columnIndex); - return get(columnIndex).getShort(); + return getShortInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -629,12 +693,25 @@ public short getShort(int columnIndex) throws SQLException { public short getShort(String columnLabel) throws SQLException { try { debugCodeCall("getShort", columnLabel); - return get(columnLabel).getShort(); + return getShortInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private short getShortInternal(int columnIndex) { + Value v = getInternal(columnIndex); + short result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getShort(); + } else { + wasNull = true; + result = 0; + } + return result; + } + /** * Returns the value of the specified column as a long. * @@ -647,7 +724,7 @@ public short getShort(String columnLabel) throws SQLException { public long getLong(int columnIndex) throws SQLException { try { debugCodeCall("getLong", columnIndex); - return get(columnIndex).getLong(); + return getLongInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -665,12 +742,25 @@ public long getLong(int columnIndex) throws SQLException { public long getLong(String columnLabel) throws SQLException { try { debugCodeCall("getLong", columnLabel); - return get(columnLabel).getLong(); + return getLongInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private long getLongInternal(int columnIndex) { + Value v = getInternal(columnIndex); + long result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getLong(); + } else { + wasNull = true; + result = 0L; + } + return result; + } + /** * Returns the value of the specified column as a float. * @@ -683,7 +773,7 @@ public long getLong(String columnLabel) throws SQLException { public float getFloat(int columnIndex) throws SQLException { try { debugCodeCall("getFloat", columnIndex); - return get(columnIndex).getFloat(); + return getFloatInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -701,12 +791,25 @@ public float getFloat(int columnIndex) throws SQLException { public float getFloat(String columnLabel) throws SQLException { try { debugCodeCall("getFloat", columnLabel); - return get(columnLabel).getFloat(); + return getFloatInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private float getFloatInternal(int columnIndex) { + Value v = getInternal(columnIndex); + float result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getFloat(); + } else { + wasNull = true; + result = 0f; + } + return result; + } + /** * Returns the value of the specified column as a double. * @@ -719,7 +822,7 @@ public float getFloat(String columnLabel) throws SQLException { public double getDouble(int columnIndex) throws SQLException { try { debugCodeCall("getDouble", columnIndex); - return get(columnIndex).getDouble(); + return getDoubleInternal(checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -737,12 +840,25 @@ public double getDouble(int columnIndex) throws SQLException { public double getDouble(String columnLabel) throws SQLException { try { debugCodeCall("getDouble", columnLabel); - return get(columnLabel).getDouble(); + return getDoubleInternal(getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private double getDoubleInternal(int columnIndex) { + Value v = getInternal(columnIndex); + double result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = v.getDouble(); + } else { + wasNull = true; + result = 0d; + } + return result; + } + /** * Returns the value of the specified column as a BigDecimal. * @@ -754,19 +870,18 @@ public double getDouble(String columnLabel) throws SQLException { * @throws SQLException if the column is not found or if the result set is * closed */ + @Deprecated @Override - public BigDecimal getBigDecimal(String columnLabel, int scale) - throws SQLException { + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getBigDecimal(" + - StringUtils.quoteJavaString(columnLabel)+", "+scale+");"); + debugCode("getBigDecimal(" + quote(columnLabel) + ", " + scale + ')'); } if (scale < 0) { throw DbException.getInvalidValueException("scale", scale); } - BigDecimal bd = get(columnLabel).getBigDecimal(); - return bd == null ? null : ValueDecimal.setScale(bd, scale); + BigDecimal bd = get(getColumnIndex(columnLabel)).getBigDecimal(); + return bd == null ? null : ValueNumeric.setScale(bd, scale); } catch (Exception e) { throw logAndConvert(e); } @@ -783,18 +898,18 @@ public BigDecimal getBigDecimal(String columnLabel, int scale) * @throws SQLException if the column is not found or if the result set is * closed */ + @Deprecated @Override - public BigDecimal getBigDecimal(int columnIndex, int scale) - throws SQLException { + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getBigDecimal(" + columnIndex + ", " + scale + ");"); + debugCode("getBigDecimal(" + columnIndex + ", " + scale + ')'); } if (scale < 0) { throw DbException.getInvalidValueException("scale", scale); } - BigDecimal bd = get(columnIndex).getBigDecimal(); - return bd == null ? null : ValueDecimal.setScale(bd, scale); + BigDecimal bd = get(checkColumnIndex(columnIndex)).getBigDecimal(); + return bd == null ? null : ValueNumeric.setScale(bd, scale); } catch (Exception e) { throw logAndConvert(e); } @@ -804,6 +919,7 @@ public BigDecimal getBigDecimal(int columnIndex, int scale) * [Not supported] * @deprecated since JDBC 2.0, use getCharacterStream */ + @Deprecated @Override public InputStream getUnicodeStream(int columnIndex) throws SQLException { throw unsupported("unicodeStream"); @@ -813,6 +929,7 @@ public InputStream getUnicodeStream(int columnIndex) throws SQLException { * [Not supported] * @deprecated since JDBC 2.0, use setCharacterStream */ + @Deprecated @Override public InputStream getUnicodeStream(String columnLabel) throws SQLException { throw unsupported("unicodeStream"); @@ -857,12 +974,17 @@ public Ref getRef(String columnLabel) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDate.class)} instead. + *

    * * @param columnIndex (1,2,...) * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override public Date getDate(int columnIndex, Calendar calendar) throws SQLException { @@ -870,7 +992,8 @@ public Date getDate(int columnIndex, Calendar calendar) throws SQLException { if (isDebugEnabled()) { debugCode("getDate(" + columnIndex + ", calendar)"); } - return DateTimeUtils.convertDate(get(columnIndex), calendar); + return LegacyDateTimeUtils.toDate(conn, calendar != null ? calendar.getTimeZone() : null, + get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -879,23 +1002,26 @@ public Date getDate(int columnIndex, Calendar calendar) throws SQLException { /** * Returns the value of the specified column as a java.sql.Date using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalDate.class)} instead. + *

    * * @param columnLabel the column label * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override - public Date getDate(String columnLabel, Calendar calendar) - throws SQLException { + public Date getDate(String columnLabel, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getDate(" + - StringUtils.quoteJavaString(columnLabel) + - ", calendar)"); + debugCode("getDate(" + quote(columnLabel) + ", calendar)"); } - return DateTimeUtils.convertDate(get(columnLabel), calendar); + return LegacyDateTimeUtils.toDate(conn, calendar != null ? calendar.getTimeZone() : null, + get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -904,12 +1030,17 @@ public Date getDate(String columnLabel, Calendar calendar) /** * Returns the value of the specified column as a java.sql.Time using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalTime.class)} instead. + *

    * * @param columnIndex (1,2,...) * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override public Time getTime(int columnIndex, Calendar calendar) throws SQLException { @@ -917,7 +1048,8 @@ public Time getTime(int columnIndex, Calendar calendar) throws SQLException { if (isDebugEnabled()) { debugCode("getTime(" + columnIndex + ", calendar)"); } - return DateTimeUtils.convertTime(get(columnIndex), calendar); + return LegacyDateTimeUtils.toTime(conn, calendar != null ? calendar.getTimeZone() : null, + get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -926,23 +1058,26 @@ public Time getTime(int columnIndex, Calendar calendar) throws SQLException { /** * Returns the value of the specified column as a java.sql.Time using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalTime.class)} instead. + *

    * * @param columnLabel the column label * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override - public Time getTime(String columnLabel, Calendar calendar) - throws SQLException { + public Time getTime(String columnLabel, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getTime(" + - StringUtils.quoteJavaString(columnLabel) + - ", calendar)"); + debugCode("getTime(" + quote(columnLabel) + ", calendar)"); } - return DateTimeUtils.convertTime(get(columnLabel), calendar); + return LegacyDateTimeUtils.toTime(conn, calendar != null ? calendar.getTimeZone() : null, + get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -951,22 +1086,26 @@ public Time getTime(String columnLabel, Calendar calendar) /** * Returns the value of the specified column as a java.sql.Timestamp using a * specified time zone. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnIndex, LocalDateTime.class)} instead. + *

    * * @param columnIndex (1,2,...) * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(int, Class) */ @Override - public Timestamp getTimestamp(int columnIndex, Calendar calendar) - throws SQLException { + public Timestamp getTimestamp(int columnIndex, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { debugCode("getTimestamp(" + columnIndex + ", calendar)"); } - Value value = get(columnIndex); - return DateTimeUtils.convertTimestamp(value, calendar); + return LegacyDateTimeUtils.toTimestamp(conn, calendar != null ? calendar.getTimeZone() : null, + get(checkColumnIndex(columnIndex))); } catch (Exception e) { throw logAndConvert(e); } @@ -974,24 +1113,26 @@ public Timestamp getTimestamp(int columnIndex, Calendar calendar) /** * Returns the value of the specified column as a java.sql.Timestamp. + *

    + * Usage of this method is discouraged. Use + * {@code getObject(columnLabel, LocalDateTime.class)} instead. + *

    * * @param columnLabel the column label * @param calendar the calendar * @return the value * @throws SQLException if the column is not found or if the result set is * closed + * @see #getObject(String, Class) */ @Override - public Timestamp getTimestamp(String columnLabel, Calendar calendar) - throws SQLException { + public Timestamp getTimestamp(String columnLabel, Calendar calendar) throws SQLException { try { if (isDebugEnabled()) { - debugCode("getTimestamp(" + - StringUtils.quoteJavaString(columnLabel) + - ", calendar)"); + debugCode("getTimestamp(" + quote(columnLabel) + ", calendar)"); } - Value value = get(columnLabel); - return DateTimeUtils.convertTimestamp(value, calendar); + return LegacyDateTimeUtils.toTimestamp(conn, calendar != null ? calendar.getTimeZone() : null, + get(getColumnIndex(columnLabel))); } catch (Exception e) { throw logAndConvert(e); } @@ -1009,10 +1150,10 @@ public Timestamp getTimestamp(String columnLabel, Calendar calendar) public Blob getBlob(int columnIndex) throws SQLException { try { int id = getNextId(TraceObject.BLOB); - debugCodeAssign("Blob", TraceObject.BLOB, - id, "getBlob(" + columnIndex + ")"); - Value v = get(columnIndex); - return v == ValueNull.INSTANCE ? null : new JdbcBlob(conn, v, id); + if (isDebugEnabled()) { + debugCodeAssign("Blob", TraceObject.BLOB, id, "getBlob(" + columnIndex + ')'); + } + return getBlob(id, checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -1030,15 +1171,28 @@ public Blob getBlob(int columnIndex) throws SQLException { public Blob getBlob(String columnLabel) throws SQLException { try { int id = getNextId(TraceObject.BLOB); - debugCodeAssign("Blob", TraceObject.BLOB, - id, "getBlob(" + quote(columnLabel) + ")"); - Value v = get(columnLabel); - return v == ValueNull.INSTANCE ? null : new JdbcBlob(conn, v, id); + if (isDebugEnabled()) { + debugCodeAssign("Blob", TraceObject.BLOB, id, "getBlob(" + quote(columnLabel) + ')'); + } + return getBlob(id, getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private JdbcBlob getBlob(int id, int columnIndex) { + Value v = getInternal(columnIndex); + JdbcBlob result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = new JdbcBlob(conn, v, JdbcLob.State.WITH_VALUE, id); + } else { + wasNull = true; + result = null; + } + return result; + } + /** * Returns the value of the specified column as a byte array. * @@ -1051,7 +1205,7 @@ public Blob getBlob(String columnLabel) throws SQLException { public byte[] getBytes(int columnIndex) throws SQLException { try { debugCodeCall("getBytes", columnIndex); - return get(columnIndex).getBytes(); + return get(checkColumnIndex(columnIndex)).getBytes(); } catch (Exception e) { throw logAndConvert(e); } @@ -1069,7 +1223,7 @@ public byte[] getBytes(int columnIndex) throws SQLException { public byte[] getBytes(String columnLabel) throws SQLException { try { debugCodeCall("getBytes", columnLabel); - return get(columnLabel).getBytes(); + return get(getColumnIndex(columnLabel)).getBytes(); } catch (Exception e) { throw logAndConvert(e); } @@ -1087,7 +1241,7 @@ public byte[] getBytes(String columnLabel) throws SQLException { public InputStream getBinaryStream(int columnIndex) throws SQLException { try { debugCodeCall("getBinaryStream", columnIndex); - return get(columnIndex).getInputStream(); + return get(checkColumnIndex(columnIndex)).getInputStream(); } catch (Exception e) { throw logAndConvert(e); } @@ -1105,7 +1259,7 @@ public InputStream getBinaryStream(int columnIndex) throws SQLException { public InputStream getBinaryStream(String columnLabel) throws SQLException { try { debugCodeCall("getBinaryStream", columnLabel); - return get(columnLabel).getInputStream(); + return get(getColumnIndex(columnLabel)).getInputStream(); } catch (Exception e) { throw logAndConvert(e); } @@ -1124,9 +1278,10 @@ public InputStream getBinaryStream(String columnLabel) throws SQLException { public Clob getClob(int columnIndex) throws SQLException { try { int id = getNextId(TraceObject.CLOB); - debugCodeAssign("Clob", TraceObject.CLOB, id, "getClob(" + columnIndex + ")"); - Value v = get(columnIndex); - return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, id); + if (isDebugEnabled()) { + debugCodeAssign("Clob", TraceObject.CLOB, id, "getClob(" + columnIndex + ')'); + } + return getClob(id, checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -1144,10 +1299,10 @@ public Clob getClob(int columnIndex) throws SQLException { public Clob getClob(String columnLabel) throws SQLException { try { int id = getNextId(TraceObject.CLOB); - debugCodeAssign("Clob", TraceObject.CLOB, id, "getClob(" + - quote(columnLabel) + ")"); - Value v = get(columnLabel); - return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, id); + if (isDebugEnabled()) { + debugCodeAssign("Clob", TraceObject.CLOB, id, "getClob(" + quote(columnLabel) + ')'); + } + return getClob(id, getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } @@ -1165,9 +1320,10 @@ public Clob getClob(String columnLabel) throws SQLException { public Array getArray(int columnIndex) throws SQLException { try { int id = getNextId(TraceObject.ARRAY); - debugCodeAssign("Clob", TraceObject.ARRAY, id, "getArray(" + columnIndex + ")"); - Value v = get(columnIndex); - return v == ValueNull.INSTANCE ? null : new JdbcArray(conn, v, id); + if (isDebugEnabled()) { + debugCodeAssign("Array", TraceObject.ARRAY, id, "getArray(" + columnIndex + ')'); + } + return getArray(id, checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -1185,15 +1341,28 @@ public Array getArray(int columnIndex) throws SQLException { public Array getArray(String columnLabel) throws SQLException { try { int id = getNextId(TraceObject.ARRAY); - debugCodeAssign("Clob", TraceObject.ARRAY, id, "getArray(" + - quote(columnLabel) + ")"); - Value v = get(columnLabel); - return v == ValueNull.INSTANCE ? null : new JdbcArray(conn, v, id); + if (isDebugEnabled()) { + debugCodeAssign("Array", TraceObject.ARRAY, id, "getArray(" + quote(columnLabel) + ')'); + } + return getArray(id, getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private Array getArray(int id, int columnIndex) { + Value v = getInternal(columnIndex); + JdbcArray result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = new JdbcArray(conn, v, id); + } else { + wasNull = true; + result = null; + } + return result; + } + /** * Returns the value of the specified column as an input stream. * @@ -1206,7 +1375,7 @@ public Array getArray(String columnLabel) throws SQLException { public InputStream getAsciiStream(int columnIndex) throws SQLException { try { debugCodeCall("getAsciiStream", columnIndex); - String s = get(columnIndex).getString(); + String s = get(checkColumnIndex(columnIndex)).getString(); return s == null ? null : IOUtils.getInputStreamFromString(s); } catch (Exception e) { throw logAndConvert(e); @@ -1225,7 +1394,7 @@ public InputStream getAsciiStream(int columnIndex) throws SQLException { public InputStream getAsciiStream(String columnLabel) throws SQLException { try { debugCodeCall("getAsciiStream", columnLabel); - String s = get(columnLabel).getString(); + String s = get(getColumnIndex(columnLabel)).getString(); return IOUtils.getInputStreamFromString(s); } catch (Exception e) { throw logAndConvert(e); @@ -1244,7 +1413,7 @@ public InputStream getAsciiStream(String columnLabel) throws SQLException { public Reader getCharacterStream(int columnIndex) throws SQLException { try { debugCodeCall("getCharacterStream", columnIndex); - return get(columnIndex).getReader(); + return get(checkColumnIndex(columnIndex)).getReader(); } catch (Exception e) { throw logAndConvert(e); } @@ -1262,7 +1431,7 @@ public Reader getCharacterStream(int columnIndex) throws SQLException { public Reader getCharacterStream(String columnLabel) throws SQLException { try { debugCodeCall("getCharacterStream", columnLabel); - return get(columnLabel).getReader(); + return get(getColumnIndex(columnLabel)).getReader(); } catch (Exception e) { throw logAndConvert(e); } @@ -1296,7 +1465,7 @@ public URL getURL(String columnLabel) throws SQLException { public void updateNull(int columnIndex) throws SQLException { try { debugCodeCall("updateNull", columnIndex); - update(columnIndex, ValueNull.INSTANCE); + update(checkColumnIndex(columnIndex), ValueNull.INSTANCE); } catch (Exception e) { throw logAndConvert(e); } @@ -1312,7 +1481,7 @@ public void updateNull(int columnIndex) throws SQLException { public void updateNull(String columnLabel) throws SQLException { try { debugCodeCall("updateNull", columnLabel); - update(columnLabel, ValueNull.INSTANCE); + update(getColumnIndex(columnLabel), ValueNull.INSTANCE); } catch (Exception e) { throw logAndConvert(e); } @@ -1329,9 +1498,9 @@ public void updateNull(String columnLabel) throws SQLException { public void updateBoolean(int columnIndex, boolean x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBoolean("+columnIndex+", "+x+");"); + debugCode("updateBoolean(" + columnIndex + ", " + x + ')'); } - update(columnIndex, ValueBoolean.get(x)); + update(checkColumnIndex(columnIndex), ValueBoolean.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1345,13 +1514,12 @@ public void updateBoolean(int columnIndex, boolean x) throws SQLException { * @throws SQLException if result set is closed or not updatable */ @Override - public void updateBoolean(String columnLabel, boolean x) - throws SQLException { + public void updateBoolean(String columnLabel, boolean x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBoolean("+quote(columnLabel)+", "+x+");"); + debugCode("updateBoolean(" + quote(columnLabel) + ", " + x + ')'); } - update(columnLabel, ValueBoolean.get(x)); + update(getColumnIndex(columnLabel), ValueBoolean.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1368,9 +1536,9 @@ public void updateBoolean(String columnLabel, boolean x) public void updateByte(int columnIndex, byte x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateByte("+columnIndex+", "+x+");"); + debugCode("updateByte(" + columnIndex + ", " + x + ')'); } - update(columnIndex, ValueByte.get(x)); + update(checkColumnIndex(columnIndex), ValueTinyint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1387,9 +1555,9 @@ public void updateByte(int columnIndex, byte x) throws SQLException { public void updateByte(String columnLabel, byte x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateByte("+columnLabel+", "+x+");"); + debugCode("updateByte(" + quote(columnLabel) + ", " + x + ')'); } - update(columnLabel, ValueByte.get(x)); + update(getColumnIndex(columnLabel), ValueTinyint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1406,9 +1574,9 @@ public void updateByte(String columnLabel, byte x) throws SQLException { public void updateBytes(int columnIndex, byte[] x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBytes("+columnIndex+", x);"); + debugCode("updateBytes(" + columnIndex + ", x)"); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE : ValueBytes.get(x)); + update(checkColumnIndex(columnIndex), x == null ? ValueNull.INSTANCE : ValueVarbinary.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1425,9 +1593,9 @@ public void updateBytes(int columnIndex, byte[] x) throws SQLException { public void updateBytes(String columnLabel, byte[] x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBytes("+quote(columnLabel)+", x);"); + debugCode("updateBytes(" + quote(columnLabel) + ", x)"); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : ValueBytes.get(x)); + update(getColumnIndex(columnLabel), x == null ? ValueNull.INSTANCE : ValueVarbinary.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1444,9 +1612,9 @@ public void updateBytes(String columnLabel, byte[] x) throws SQLException { public void updateShort(int columnIndex, short x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateShort("+columnIndex+", (short) "+x+");"); + debugCode("updateShort(" + columnIndex + ", (short) " + x + ')'); } - update(columnIndex, ValueShort.get(x)); + update(checkColumnIndex(columnIndex), ValueSmallint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1463,9 +1631,9 @@ public void updateShort(int columnIndex, short x) throws SQLException { public void updateShort(String columnLabel, short x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateShort("+quote(columnLabel)+", (short) "+x+");"); + debugCode("updateShort(" + quote(columnLabel) + ", (short) " + x + ')'); } - update(columnLabel, ValueShort.get(x)); + update(getColumnIndex(columnLabel), ValueSmallint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1482,9 +1650,9 @@ public void updateShort(String columnLabel, short x) throws SQLException { public void updateInt(int columnIndex, int x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateInt("+columnIndex+", "+x+");"); + debugCode("updateInt(" + columnIndex + ", " + x + ')'); } - update(columnIndex, ValueInt.get(x)); + update(checkColumnIndex(columnIndex), ValueInteger.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1501,9 +1669,9 @@ public void updateInt(int columnIndex, int x) throws SQLException { public void updateInt(String columnLabel, int x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateInt("+quote(columnLabel)+", "+x+");"); + debugCode("updateInt(" + quote(columnLabel) + ", " + x + ')'); } - update(columnLabel, ValueInt.get(x)); + update(getColumnIndex(columnLabel), ValueInteger.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1520,9 +1688,9 @@ public void updateInt(String columnLabel, int x) throws SQLException { public void updateLong(int columnIndex, long x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateLong("+columnIndex+", "+x+"L);"); + debugCode("updateLong(" + columnIndex + ", " + x + "L)"); } - update(columnIndex, ValueLong.get(x)); + update(checkColumnIndex(columnIndex), ValueBigint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1539,9 +1707,9 @@ public void updateLong(int columnIndex, long x) throws SQLException { public void updateLong(String columnLabel, long x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateLong("+quote(columnLabel)+", "+x+"L);"); + debugCode("updateLong(" + quote(columnLabel) + ", " + x + "L)"); } - update(columnLabel, ValueLong.get(x)); + update(getColumnIndex(columnLabel), ValueBigint.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1558,9 +1726,9 @@ public void updateLong(String columnLabel, long x) throws SQLException { public void updateFloat(int columnIndex, float x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateFloat("+columnIndex+", "+x+"f);"); + debugCode("updateFloat(" + columnIndex + ", " + x + "f)"); } - update(columnIndex, ValueFloat.get(x)); + update(checkColumnIndex(columnIndex), ValueReal.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1577,9 +1745,9 @@ public void updateFloat(int columnIndex, float x) throws SQLException { public void updateFloat(String columnLabel, float x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateFloat("+quote(columnLabel)+", "+x+"f);"); + debugCode("updateFloat(" + quote(columnLabel) + ", " + x + "f)"); } - update(columnLabel, ValueFloat.get(x)); + update(getColumnIndex(columnLabel), ValueReal.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1596,9 +1764,9 @@ public void updateFloat(String columnLabel, float x) throws SQLException { public void updateDouble(int columnIndex, double x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateDouble("+columnIndex+", "+x+"d);"); + debugCode("updateDouble(" + columnIndex + ", " + x + "d)"); } - update(columnIndex, ValueDouble.get(x)); + update(checkColumnIndex(columnIndex), ValueDouble.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1615,9 +1783,9 @@ public void updateDouble(int columnIndex, double x) throws SQLException { public void updateDouble(String columnLabel, double x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateDouble("+quote(columnLabel)+", "+x+"d);"); + debugCode("updateDouble(" + quote(columnLabel) + ", " + x + "d)"); } - update(columnLabel, ValueDouble.get(x)); + update(getColumnIndex(columnLabel), ValueDouble.get(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1631,14 +1799,12 @@ public void updateDouble(String columnLabel, double x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBigDecimal(int columnIndex, BigDecimal x) - throws SQLException { + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBigDecimal("+columnIndex+", " + quoteBigDecimal(x) + ");"); + debugCode("updateBigDecimal(" + columnIndex + ", " + quoteBigDecimal(x) + ')'); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE - : ValueDecimal.get(x)); + update(checkColumnIndex(columnIndex), x == null ? ValueNull.INSTANCE : ValueNumeric.getAnyScale(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1652,15 +1818,12 @@ public void updateBigDecimal(int columnIndex, BigDecimal x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBigDecimal(String columnLabel, BigDecimal x) - throws SQLException { + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBigDecimal(" + quote(columnLabel) + ", " + - quoteBigDecimal(x) + ");"); + debugCode("updateBigDecimal(" + quote(columnLabel) + ", " + quoteBigDecimal(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE - : ValueDecimal.get(x)); + update(getColumnIndex(columnLabel), x == null ? ValueNull.INSTANCE : ValueNumeric.getAnyScale(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1677,10 +1840,9 @@ public void updateBigDecimal(String columnLabel, BigDecimal x) public void updateString(int columnIndex, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateString("+columnIndex+", "+quote(x)+");"); + debugCode("updateString(" + columnIndex + ", " + quote(x) + ')'); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE - : ValueString.get(x)); + update(checkColumnIndex(columnIndex), x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -1697,10 +1859,9 @@ public void updateString(int columnIndex, String x) throws SQLException { public void updateString(String columnLabel, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateString("+quote(columnLabel)+", "+quote(x)+");"); + debugCode("updateString(" + quote(columnLabel) + ", " + quote(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE - : ValueString.get(x)); + update(getColumnIndex(columnLabel), x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -1708,18 +1869,25 @@ public void updateString(String columnLabel, String x) throws SQLException { /** * Updates a column in the current or insert row. + *

    + * Usage of this method is discouraged. Use + * {@code updateObject(columnIndex, value)} with {@link java.time.LocalDate} + * parameter instead. + *

    * * @param columnIndex (1,2,...) * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(int, Object) */ @Override public void updateDate(int columnIndex, Date x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateDate("+columnIndex+", x);"); + debugCode("updateDate(" + columnIndex + ", " + quoteDate(x) + ')'); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE : ValueDate.get(x)); + update(checkColumnIndex(columnIndex), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromDate(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1727,18 +1895,25 @@ public void updateDate(int columnIndex, Date x) throws SQLException { /** * Updates a column in the current or insert row. + *

    + * Usage of this method is discouraged. Use + * {@code updateObject(columnLabel, value)} with {@link java.time.LocalDate} + * parameter instead. + *

    * * @param columnLabel the column label * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(String, Object) */ @Override public void updateDate(String columnLabel, Date x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateDate("+quote(columnLabel)+", x);"); + debugCode("updateDate(" + quote(columnLabel) + ", " + quoteDate(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : ValueDate.get(x)); + update(getColumnIndex(columnLabel), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromDate(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1746,18 +1921,25 @@ public void updateDate(String columnLabel, Date x) throws SQLException { /** * Updates a column in the current or insert row. + *

    + * Usage of this method is discouraged. Use + * {@code updateObject(columnIndex, value)} with {@link java.time.LocalTime} + * parameter instead. + *

    * * @param columnIndex (1,2,...) * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(int, Object) */ @Override public void updateTime(int columnIndex, Time x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateTime("+columnIndex+", x);"); + debugCode("updateTime(" + columnIndex + ", " + quoteTime(x) + ')'); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE : ValueTime.get(x)); + update(checkColumnIndex(columnIndex), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTime(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1765,18 +1947,25 @@ public void updateTime(int columnIndex, Time x) throws SQLException { /** * Updates a column in the current or insert row. + *

    + * Usage of this method is discouraged. Use + * {@code updateObject(columnLabel, value)} with {@link java.time.LocalTime} + * parameter instead. + *

    * * @param columnLabel the column label * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(String, Object) */ @Override public void updateTime(String columnLabel, Time x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateTime("+quote(columnLabel)+", x);"); + debugCode("updateTime(" + quote(columnLabel) + ", " + quoteTime(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : ValueTime.get(x)); + update(getColumnIndex(columnLabel), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTime(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1784,20 +1973,25 @@ public void updateTime(String columnLabel, Time x) throws SQLException { /** * Updates a column in the current or insert row. + *

    + * Usage of this method is discouraged. Use + * {@code updateObject(columnIndex, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

    * * @param columnIndex (1,2,...) * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(int, Object) */ @Override - public void updateTimestamp(int columnIndex, Timestamp x) - throws SQLException { + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateTimestamp("+columnIndex+", x);"); + debugCode("updateTimestamp(" + columnIndex + ", " + quoteTimestamp(x) + ')'); } - update(columnIndex, x == null ? (Value) ValueNull.INSTANCE - : ValueTimestamp.get(x)); + update(checkColumnIndex(columnIndex), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTimestamp(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1805,20 +1999,25 @@ public void updateTimestamp(int columnIndex, Timestamp x) /** * Updates a column in the current or insert row. + *

    + * Usage of this method is discouraged. Use + * {@code updateObject(columnLabel, value)} with + * {@link java.time.LocalDateTime} parameter instead. + *

    * * @param columnLabel the column label * @param x the value * @throws SQLException if the result set is closed or not updatable + * @see #updateObject(String, Object) */ @Override - public void updateTimestamp(String columnLabel, Timestamp x) - throws SQLException { + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateTimestamp("+quote(columnLabel)+", x);"); + debugCode("updateTimestamp(" + quote(columnLabel) + ", " + quoteTimestamp(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE - : ValueTimestamp.get(x)); + update(getColumnIndex(columnLabel), + x == null ? ValueNull.INSTANCE : LegacyDateTimeUtils.fromTimestamp(conn, null, x)); } catch (Exception e) { throw logAndConvert(e); } @@ -1833,9 +2032,15 @@ public void updateTimestamp(String columnLabel, Timestamp x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateAsciiStream(int columnIndex, InputStream x, int length) - throws SQLException { - updateAsciiStream(columnIndex, x, (long) length); + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateAsciiStream(" + columnIndex + ", x, " + length + ')'); + } + updateAscii(checkColumnIndex(columnIndex), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1846,9 +2051,15 @@ public void updateAsciiStream(int columnIndex, InputStream x, int length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateAsciiStream(int columnIndex, InputStream x) - throws SQLException { - updateAsciiStream(columnIndex, x, -1); + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateAsciiStream(" + columnIndex + ", x)"); + } + updateAscii(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1860,15 +2071,12 @@ public void updateAsciiStream(int columnIndex, InputStream x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateAsciiStream(int columnIndex, InputStream x, long length) - throws SQLException { + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateAsciiStream("+columnIndex+", x, "+length+"L);"); + debugCode("updateAsciiStream(" + columnIndex + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(IOUtils.getAsciiReader(x), length); - update(columnIndex, v); + updateAscii(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -1883,9 +2091,15 @@ public void updateAsciiStream(int columnIndex, InputStream x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateAsciiStream(String columnLabel, InputStream x, int length) - throws SQLException { - updateAsciiStream(columnLabel, x, (long) length); + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateAsciiStream(" + quote(columnLabel) + ", x, " + length + ')'); + } + updateAscii(getColumnIndex(columnLabel), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1896,9 +2110,15 @@ public void updateAsciiStream(String columnLabel, InputStream x, int length) * @throws SQLException if the result set is closed */ @Override - public void updateAsciiStream(String columnLabel, InputStream x) - throws SQLException { - updateAsciiStream(columnLabel, x, -1); + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateAsciiStream(" + quote(columnLabel) + ", x)"); + } + updateAscii(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1910,20 +2130,21 @@ public void updateAsciiStream(String columnLabel, InputStream x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateAsciiStream(String columnLabel, InputStream x, long length) - throws SQLException { + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateAsciiStream("+quote(columnLabel)+", x, "+length+"L);"); + debugCode("updateAsciiStream(" + quote(columnLabel) + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(IOUtils.getAsciiReader(x), length); - update(columnLabel, v); + updateAscii(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } } + private void updateAscii(int columnIndex, InputStream x, long length) { + update(columnIndex, conn.createClob(IOUtils.getAsciiReader(x), length)); + } + /** * Updates a column in the current or insert row. * @@ -1933,9 +2154,15 @@ public void updateAsciiStream(String columnLabel, InputStream x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(int columnIndex, InputStream x, int length) - throws SQLException { - updateBinaryStream(columnIndex, x, (long) length); + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateBinaryStream(" + columnIndex + ", x, " + length + ')'); + } + updateBlobImpl(checkColumnIndex(columnIndex), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1946,9 +2173,15 @@ public void updateBinaryStream(int columnIndex, InputStream x, int length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(int columnIndex, InputStream x) - throws SQLException { - updateBinaryStream(columnIndex, x, -1); + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateBinaryStream(" + columnIndex + ", x)"); + } + updateBlobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1960,15 +2193,12 @@ public void updateBinaryStream(int columnIndex, InputStream x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(int columnIndex, InputStream x, long length) - throws SQLException { + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBinaryStream("+columnIndex+", x, "+length+"L);"); + debugCode("updateBinaryStream(" + columnIndex + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createBlob(x, length); - update(columnIndex, v); + updateBlobImpl(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -1982,9 +2212,15 @@ public void updateBinaryStream(int columnIndex, InputStream x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(String columnLabel, InputStream x) - throws SQLException { - updateBinaryStream(columnLabel, x, -1); + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateBinaryStream(" + quote(columnLabel) + ", x)"); + } + updateBlobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -1996,9 +2232,15 @@ public void updateBinaryStream(String columnLabel, InputStream x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(String columnLabel, InputStream x, int length) - throws SQLException { - updateBinaryStream(columnLabel, x, (long) length); + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateBinaryStream(" + quote(columnLabel) + ", x, " + length + ')'); + } + updateBlobImpl(getColumnIndex(columnLabel), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2010,15 +2252,12 @@ public void updateBinaryStream(String columnLabel, InputStream x, int length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBinaryStream(String columnLabel, InputStream x, - long length) throws SQLException { + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBinaryStream("+quote(columnLabel)+", x, "+length+"L);"); + debugCode("updateBinaryStream(" + quote(columnLabel) + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createBlob(x, length); - update(columnLabel, v); + updateBlobImpl(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2033,15 +2272,12 @@ public void updateBinaryStream(String columnLabel, InputStream x, * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(int columnIndex, Reader x, long length) - throws SQLException { + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateCharacterStream("+columnIndex+", x, "+length+"L);"); + debugCode("updateCharacterStream(" + columnIndex + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnIndex, v); + updateClobImpl(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2056,9 +2292,15 @@ public void updateCharacterStream(int columnIndex, Reader x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(int columnIndex, Reader x, int length) - throws SQLException { - updateCharacterStream(columnIndex, x, (long) length); + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateCharacterStream(" + columnIndex + ", x, " + length + ')'); + } + updateClobImpl(checkColumnIndex(columnIndex), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2069,9 +2311,15 @@ public void updateCharacterStream(int columnIndex, Reader x, int length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(int columnIndex, Reader x) - throws SQLException { - updateCharacterStream(columnIndex, x, -1); + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateCharacterStream(" + columnIndex + ", x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2083,9 +2331,15 @@ public void updateCharacterStream(int columnIndex, Reader x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(String columnLabel, Reader x, int length) - throws SQLException { - updateCharacterStream(columnLabel, x, (long) length); + public void updateCharacterStream(String columnLabel, Reader x, int length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateCharacterStream(" + quote(columnLabel) + ", x, " + length + ')'); + } + updateClobImpl(getColumnIndex(columnLabel), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2096,9 +2350,15 @@ public void updateCharacterStream(String columnLabel, Reader x, int length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(String columnLabel, Reader x) - throws SQLException { - updateCharacterStream(columnLabel, x, -1); + public void updateCharacterStream(String columnLabel, Reader x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateCharacterStream(" + quote(columnLabel) + ", x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2110,15 +2370,12 @@ public void updateCharacterStream(String columnLabel, Reader x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateCharacterStream(String columnLabel, Reader x, long length) - throws SQLException { + public void updateCharacterStream(String columnLabel, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateCharacterStream("+quote(columnLabel)+", x, "+length+"L);"); + debugCode("updateCharacterStream(" + quote(columnLabel) + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnLabel, v); + updateClobImpl(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2133,20 +2390,17 @@ public void updateCharacterStream(String columnLabel, Reader x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateObject(int columnIndex, Object x, int scale) - throws SQLException { + public void updateObject(int columnIndex, Object x, int scale) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateObject("+columnIndex+", x, "+scale+");"); + debugCode("updateObject(" + columnIndex + ", x, " + scale + ')'); } - update(columnIndex, convertToUnknownValue(x)); + update(checkColumnIndex(columnIndex), convertToUnknownValue(x)); } catch (Exception e) { throw logAndConvert(e); } } - - /** * Updates a column in the current or insert row. * @@ -2156,13 +2410,12 @@ public void updateObject(int columnIndex, Object x, int scale) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateObject(String columnLabel, Object x, int scale) - throws SQLException { + public void updateObject(String columnLabel, Object x, int scale) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateObject("+quote(columnLabel)+", x, "+scale+");"); + debugCode("updateObject(" + quote(columnLabel) + ", x, " + scale + ')'); } - update(columnLabel, convertToUnknownValue(x)); + update(getColumnIndex(columnLabel), convertToUnknownValue(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -2179,9 +2432,9 @@ public void updateObject(String columnLabel, Object x, int scale) public void updateObject(int columnIndex, Object x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateObject("+columnIndex+", x);"); + debugCode("updateObject(" + columnIndex + ", x)"); } - update(columnIndex, convertToUnknownValue(x)); + update(checkColumnIndex(columnIndex), convertToUnknownValue(x)); } catch (Exception e) { throw logAndConvert(e); } @@ -2198,40 +2451,32 @@ public void updateObject(int columnIndex, Object x) throws SQLException { public void updateObject(String columnLabel, Object x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateObject("+quote(columnLabel)+", x);"); + debugCode("updateObject(" + quote(columnLabel) + ", x)"); } - update(columnLabel, convertToUnknownValue(x)); + update(getColumnIndex(columnLabel), convertToUnknownValue(x)); } catch (Exception e) { throw logAndConvert(e); } } - /** - * [Not supported] - */ - @Override - public void updateRef(int columnIndex, Ref x) throws SQLException { - throw unsupported("ref"); - } - - /** - * [Not supported] - */ - @Override - public void updateRef(String columnLabel, Ref x) throws SQLException { - throw unsupported("ref"); - } - /** * Updates a column in the current or insert row. * * @param columnIndex (1,2,...) * @param x the value + * @param targetSqlType the SQL type * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBlob(int columnIndex, InputStream x) throws SQLException { - updateBlob(columnIndex, x, -1); + public void updateObject(int columnIndex, Object x, SQLType targetSqlType) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateObject(" + columnIndex + ", x, " + DataType.sqlTypeToString(targetSqlType) + ')'); + } + update(checkColumnIndex(columnIndex), convertToValue(x, targetSqlType)); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2239,19 +2484,18 @@ public void updateBlob(int columnIndex, InputStream x) throws SQLException { * * @param columnIndex (1,2,...) * @param x the value - * @param length the length + * @param targetSqlType the SQL type + * @param scaleOrLength is ignored * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBlob(int columnIndex, InputStream x, long length) - throws SQLException { + public void updateObject(int columnIndex, Object x, SQLType targetSqlType, int scaleOrLength) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBlob("+columnIndex+", x, " + length + "L);"); + debugCode("updateObject(" + columnIndex + ", x, " + DataType.sqlTypeToString(targetSqlType) + ", " + + scaleOrLength + ')'); } - checkClosed(); - Value v = conn.createBlob(x, length); - update(columnIndex, v); + update(checkColumnIndex(columnIndex), convertToValue(x, targetSqlType)); } catch (Exception e) { throw logAndConvert(e); } @@ -2260,24 +2504,19 @@ public void updateBlob(int columnIndex, InputStream x, long length) /** * Updates a column in the current or insert row. * - * @param columnIndex (1,2,...) + * @param columnLabel the column label * @param x the value + * @param targetSqlType the SQL type * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBlob(int columnIndex, Blob x) throws SQLException { + public void updateObject(String columnLabel, Object x, SQLType targetSqlType) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBlob("+columnIndex+", x);"); - } - checkClosed(); - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createBlob(x.getBinaryStream(), -1); + debugCode("updateObject(" + quote(columnLabel) + ", x, " + DataType.sqlTypeToString(targetSqlType) + + ')'); } - update(columnIndex, v); + update(getColumnIndex(columnLabel), convertToValue(x, targetSqlType)); } catch (Exception e) { throw logAndConvert(e); } @@ -2288,57 +2527,54 @@ public void updateBlob(int columnIndex, Blob x) throws SQLException { * * @param columnLabel the column label * @param x the value + * @param targetSqlType the SQL type + * @param scaleOrLength is ignored * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBlob(String columnLabel, Blob x) throws SQLException { + public void updateObject(String columnLabel, Object x, SQLType targetSqlType, int scaleOrLength) + throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBlob("+quote(columnLabel)+", x);"); - } - checkClosed(); - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createBlob(x.getBinaryStream(), -1); + debugCode("updateObject(" + quote(columnLabel) + ", x, " + DataType.sqlTypeToString(targetSqlType) + + ", " + scaleOrLength + ')'); } - update(columnLabel, v); + update(getColumnIndex(columnLabel), convertToValue(x, targetSqlType)); } catch (Exception e) { throw logAndConvert(e); } } /** - * Updates a column in the current or insert row. - * - * @param columnLabel the column label - * @param x the value - * @throws SQLException if the result set is closed or not updatable + * [Not supported] */ @Override - public void updateBlob(String columnLabel, InputStream x) throws SQLException { - updateBlob(columnLabel, x, -1); + public void updateRef(int columnIndex, Ref x) throws SQLException { + throw unsupported("ref"); + } + + /** + * [Not supported] + */ + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + throw unsupported("ref"); } /** * Updates a column in the current or insert row. * - * @param columnLabel the column label + * @param columnIndex (1,2,...) * @param x the value - * @param length the length * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateBlob(String columnLabel, InputStream x, long length) - throws SQLException { + public void updateBlob(int columnIndex, InputStream x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateBlob("+quote(columnLabel)+", x, " + length + "L);"); + debugCode("updateBlob(" + columnIndex + ", (InputStream) x)"); } - checkClosed(); - Value v = conn.createBlob(x, -1); - update(columnLabel, v); + updateBlobImpl(checkColumnIndex(columnIndex), x, -1L); } catch (Exception e) { throw logAndConvert(e); } @@ -2349,22 +2585,16 @@ public void updateBlob(String columnLabel, InputStream x, long length) * * @param columnIndex (1,2,...) * @param x the value + * @param length the length * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateClob(int columnIndex, Clob x) throws SQLException { + public void updateBlob(int columnIndex, InputStream x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateClob("+columnIndex+", x);"); - } - checkClosed(); - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createClob(x.getCharacterStream(), -1); + debugCode("updateBlob(" + columnIndex + ", (InputStream) x, " + length + "L)"); } - update(columnIndex, v); + updateBlobImpl(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -2378,33 +2608,40 @@ public void updateClob(int columnIndex, Clob x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateClob(int columnIndex, Reader x) throws SQLException { - updateClob(columnIndex, x, -1); + public void updateBlob(int columnIndex, Blob x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateBlob(" + columnIndex + ", (Blob) x)"); + } + updateBlobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** * Updates a column in the current or insert row. * - * @param columnIndex (1,2,...) + * @param columnLabel the column label * @param x the value - * @param length the length * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateClob(int columnIndex, Reader x, long length) - throws SQLException { + public void updateBlob(String columnLabel, Blob x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateClob("+columnIndex+", x, " + length + "L);"); + debugCode("updateBlob(" + quote(columnLabel) + ", (Blob) x)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnIndex, v); + updateBlobImpl(getColumnIndex(columnLabel), x, -1L); } catch (Exception e) { throw logAndConvert(e); } } + private void updateBlobImpl(int columnIndex, Blob x, long length) throws SQLException { + update(columnIndex, x == null ? ValueNull.INSTANCE : conn.createBlob(x.getBinaryStream(), length)); + } + /** * Updates a column in the current or insert row. * @@ -2413,19 +2650,113 @@ public void updateClob(int columnIndex, Reader x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateClob(String columnLabel, Clob x) throws SQLException { + public void updateBlob(String columnLabel, InputStream x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateClob("+quote(columnLabel)+", x);"); + debugCode("updateBlob(" + quote(columnLabel) + ", (InputStream) x)"); } - checkClosed(); - Value v; - if (x == null) { - v = ValueNull.INSTANCE; - } else { - v = conn.createClob(x.getCharacterStream(), -1); + updateBlobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Updates a column in the current or insert row. + * + * @param columnLabel the column label + * @param x the value + * @param length the length + * @throws SQLException if the result set is closed or not updatable + */ + @Override + public void updateBlob(String columnLabel, InputStream x, long length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateBlob(" + quote(columnLabel) + ", (InputStream) x, " + length + "L)"); + } + updateBlobImpl(getColumnIndex(columnLabel), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private void updateBlobImpl(int columnIndex, InputStream x, long length) { + update(columnIndex, conn.createBlob(x, length)); + } + + /** + * Updates a column in the current or insert row. + * + * @param columnIndex (1,2,...) + * @param x the value + * @throws SQLException if the result set is closed or not updatable + */ + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateClob(" + columnIndex + ", (Clob) x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Updates a column in the current or insert row. + * + * @param columnIndex (1,2,...) + * @param x the value + * @throws SQLException if the result set is closed or not updatable + */ + @Override + public void updateClob(int columnIndex, Reader x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateClob(" + columnIndex + ", (Reader) x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Updates a column in the current or insert row. + * + * @param columnIndex (1,2,...) + * @param x the value + * @param length the length + * @throws SQLException if the result set is closed or not updatable + */ + @Override + public void updateClob(int columnIndex, Reader x, long length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateClob(" + columnIndex + ", (Reader) x, " + length + "L)"); } - update(columnLabel, v); + updateClobImpl(checkColumnIndex(columnIndex), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Updates a column in the current or insert row. + * + * @param columnLabel the column label + * @param x the value + * @throws SQLException if the result set is closed or not updatable + */ + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateClob(" + quote(columnLabel) + ", (Clob) x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x); } catch (Exception e) { throw logAndConvert(e); } @@ -2440,7 +2771,14 @@ public void updateClob(String columnLabel, Clob x) throws SQLException { */ @Override public void updateClob(String columnLabel, Reader x) throws SQLException { - updateClob(columnLabel, x, -1); + try { + if (isDebugEnabled()) { + debugCode("updateClob(" + quote(columnLabel) + ", (Reader) x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -2452,34 +2790,58 @@ public void updateClob(String columnLabel, Reader x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateClob(String columnLabel, Reader x, long length) - throws SQLException { + public void updateClob(String columnLabel, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateClob("+quote(columnLabel)+", x, " + length + "L);"); + debugCode("updateClob(" + quote(columnLabel) + ", (Reader) x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnLabel, v); + updateClobImpl(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } } /** - * [Not supported] + * Updates a column in the current or insert row. + * + * @param columnIndex (1,2,...) + * @param x the value + * @throws SQLException if the result set is closed or not updatable */ @Override public void updateArray(int columnIndex, Array x) throws SQLException { - throw unsupported("setArray"); + try { + if (isDebugEnabled()) { + debugCode("updateArray(" + columnIndex + ", x)"); + } + updateArrayImpl(checkColumnIndex(columnIndex), x); + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * [Not supported] + * Updates a column in the current or insert row. + * + * @param columnLabel the column label + * @param x the value + * @throws SQLException if the result set is closed or not updatable */ @Override public void updateArray(String columnLabel, Array x) throws SQLException { - throw unsupported("setArray"); + try { + if (isDebugEnabled()) { + debugCode("updateArray(" + quote(columnLabel) + ", x)"); + } + updateArrayImpl(getColumnIndex(columnLabel), x); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private void updateArrayImpl(int columnIndex, Array x) throws SQLException { + update(columnIndex, x == null ? ValueNull.INSTANCE + : ValueToObjectConverter.objectToValue(stat.session, x.getArray(), Value.ARRAY)); } /** @@ -2503,11 +2865,11 @@ public int getRow() throws SQLException { try { debugCodeCall("getRow"); checkClosed(); - int rowId = result.getRowId(); - if (rowId >= result.getRowCount()) { + if (result.isAfterLast()) { return 0; } - return rowId + 1; + long rowNumber = result.getRowId() + 1; + return rowNumber <= Integer.MAX_VALUE ? (int) rowNumber : Statement.SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } @@ -2613,7 +2975,11 @@ public void setFetchSize(int rows) throws SQLException { */ @Override public void setFetchDirection(int direction) throws SQLException { - throw unsupported("setFetchDirection"); + debugCodeCall("setFetchDirection", direction); + // ignore FETCH_FORWARD, that's the default value, which we do support + if (direction != ResultSet.FETCH_FORWARD) { + throw unsupported("setFetchDirection"); + } } /** @@ -2648,9 +3014,7 @@ public boolean isBeforeFirst() throws SQLException { try { debugCodeCall("isBeforeFirst"); checkClosed(); - int row = result.getRowId(); - int count = result.getRowCount(); - return count > 0 && row < 0; + return result.getRowId() < 0 && result.hasNext(); } catch (Exception e) { throw logAndConvert(e); } @@ -2669,9 +3033,7 @@ public boolean isAfterLast() throws SQLException { try { debugCodeCall("isAfterLast"); checkClosed(); - int row = result.getRowId(); - int count = result.getRowCount(); - return count > 0 && row >= count; + return result.getRowId() > 0 && result.isAfterLast(); } catch (Exception e) { throw logAndConvert(e); } @@ -2689,8 +3051,7 @@ public boolean isFirst() throws SQLException { try { debugCodeCall("isFirst"); checkClosed(); - int row = result.getRowId(); - return row == 0 && row < result.getRowCount(); + return result.getRowId() == 0 && !result.isAfterLast(); } catch (Exception e) { throw logAndConvert(e); } @@ -2708,8 +3069,8 @@ public boolean isLast() throws SQLException { try { debugCodeCall("isLast"); checkClosed(); - int row = result.getRowId(); - return row >= 0 && row == result.getRowCount() - 1; + long rowId = result.getRowId(); + return rowId >= 0 && !result.isAfterLast() && !result.hasNext(); } catch (Exception e) { throw logAndConvert(e); } @@ -2751,7 +3112,7 @@ public void afterLast() throws SQLException { } catch (Exception e) { throw logAndConvert(e); } -} + } /** * Moves the current position to the first row. This is the same as calling @@ -2765,10 +3126,9 @@ public boolean first() throws SQLException { try { debugCodeCall("first"); checkClosed(); - if (result.getRowId() < 0) { - return nextRow(); + if (result.getRowId() >= 0) { + resetResult(); } - resetResult(); return nextRow(); } catch (Exception e) { throw logAndConvert(e); @@ -2786,7 +3146,13 @@ public boolean last() throws SQLException { try { debugCodeCall("last"); checkClosed(); - return absolute(-1); + if (result.isAfterLast()) { + resetResult(); + } + while (result.hasNext()) { + nextRow(); + } + return isOnValidRow(); } catch (Exception e) { throw logAndConvert(e); } @@ -2798,7 +3164,7 @@ public boolean last() throws SQLException { * @param rowNumber the row number. 0 is not allowed, 1 means the first row, * 2 the second. -1 means the last row, -2 the row before the * last row. If the value is too large, the position is moved - * after the last row, if if the value is too small it is moved + * after the last row, if the value is too small it is moved * before the first row. * @return true if there is a row available, false if not * @throws SQLException if the result set is closed @@ -2808,19 +3174,16 @@ public boolean absolute(int rowNumber) throws SQLException { try { debugCodeCall("absolute", rowNumber); checkClosed(); - if (rowNumber < 0) { - rowNumber = result.getRowCount() + rowNumber + 1; - } else if (rowNumber > result.getRowCount() + 1) { - rowNumber = result.getRowCount() + 1; - } - if (rowNumber <= result.getRowId()) { + long longRowNumber = rowNumber >= 0 ? rowNumber : result.getRowCount() + rowNumber + 1; + if (--longRowNumber < result.getRowId()) { resetResult(); } - while (result.getRowId() + 1 < rowNumber) { - nextRow(); + while (result.getRowId() < longRowNumber) { + if (!nextRow()) { + return false; + } } - int row = result.getRowId(); - return row >= 0 && row < result.getRowCount(); + return isOnValidRow(); } catch (Exception e) { throw logAndConvert(e); } @@ -2831,7 +3194,7 @@ public boolean absolute(int rowNumber) throws SQLException { * * @param rowCount 0 means don't do anything, 1 is the next row, -1 the * previous. If the value is too large, the position is moved - * after the last row, if if the value is too small it is moved + * after the last row, if the value is too small it is moved * before the first row. * @return true if there is a row available, false if not * @throws SQLException if the result set is closed @@ -2841,13 +3204,19 @@ public boolean relative(int rowCount) throws SQLException { try { debugCodeCall("relative", rowCount); checkClosed(); - int row = result.getRowId() + 1 + rowCount; - if (row < 0) { - row = 0; - } else if (row > result.getRowCount()) { - row = result.getRowCount() + 1; + long longRowCount; + if (rowCount < 0) { + longRowCount = result.getRowId() + rowCount + 1; + resetResult(); + } else { + longRowCount = rowCount; + } + while (longRowCount-- > 0) { + if (!nextRow()) { + return false; + } } - return absolute(row); + return isOnValidRow(); } catch (Exception e) { throw logAndConvert(e); } @@ -2990,7 +3359,7 @@ public void updateRow() throws SQLException { UpdatableRow row = getUpdatableRow(); Value[] current = new Value[columnCount]; for (int i = 0; i < updateRow.length; i++) { - current[i] = get(i + 1); + current[i] = getInternal(checkColumnIndex(i + 1)); } row.updateRow(current, updateRow); for (int i = 0; i < updateRow.length; i++) { @@ -3091,21 +3460,24 @@ private int getColumnIndex(String columnLabel) { if (columnCount >= 3) { // use a hash table if more than 2 columns if (columnLabelMap == null) { - HashMap map = New.hashMap(columnCount); + HashMap map = new HashMap<>(); // column labels have higher priority for (int i = 0; i < columnCount; i++) { String c = StringUtils.toUpperEnglish(result.getAlias(i)); - mapColumn(map, c, i); + // Don't override previous mapping + map.putIfAbsent(c, i); } for (int i = 0; i < columnCount; i++) { String colName = result.getColumnName(i); if (colName != null) { colName = StringUtils.toUpperEnglish(colName); - mapColumn(map, colName, i); + // Don't override previous mapping + map.putIfAbsent(colName, i); String tabName = result.getTableName(i); if (tabName != null) { - colName = StringUtils.toUpperEnglish(tabName) + "." + colName; - mapColumn(map, colName, i); + colName = StringUtils.toUpperEnglish(tabName) + '.' + colName; + // Don't override previous mapping + map.putIfAbsent(colName, i); } } } @@ -3119,7 +3491,7 @@ private int getColumnIndex(String columnLabel) { if (index == null) { throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, columnLabel); } - return index.intValue() + 1; + return index + 1; } for (int i = 0; i < columnCount; i++) { if (columnLabel.equalsIgnoreCase(result.getAlias(i))) { @@ -3146,22 +3518,12 @@ private int getColumnIndex(String columnLabel) { throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, columnLabel); } - private static void mapColumn(HashMap map, String label, - int index) { - // put the index (usually that's the only operation) - Integer old = map.put(label, index); - if (old != null) { - // if there was a clash (which is seldom), - // put the old one back - map.put(label, old); - } - } - - private void checkColumnIndex(int columnIndex) { + private int checkColumnIndex(int columnIndex) { checkClosed(); if (columnIndex < 1 || columnIndex > columnCount) { throw DbException.getInvalidValueException("columnIndex", columnIndex); } + return columnIndex; } /** @@ -3181,42 +3543,42 @@ void checkClosed() { } } + private boolean isOnValidRow() { + return result.getRowId() >= 0 && !result.isAfterLast(); + } + private void checkOnValidRow() { - if (result.getRowId() < 0 || result.getRowId() >= result.getRowCount()) { + if (!isOnValidRow()) { throw DbException.get(ErrorCode.NO_DATA_AVAILABLE); } } private Value get(int columnIndex) { - checkColumnIndex(columnIndex); - checkOnValidRow(); - Value[] list; - if (patchedRows == null) { - list = result.currentRow(); - } else { - list = patchedRows.get(result.getRowId()); - if (list == null) { - list = result.currentRow(); - } - } - Value value = list[columnIndex - 1]; + Value value = getInternal(columnIndex); wasNull = value == ValueNull.INSTANCE; return value; } - private Value get(String columnLabel) { - int columnIndex = getColumnIndex(columnLabel); - return get(columnIndex); - } - - private void update(String columnLabel, Value v) { - int columnIndex = getColumnIndex(columnLabel); - update(columnIndex, v); + /** + * INTERNAL + * + * @param columnIndex + * index of a column + * @return internal representation of the value in the specified column + */ + public Value getInternal(int columnIndex) { + checkOnValidRow(); + Value[] list; + if (patchedRows == null || (list = patchedRows.get(result.getRowId())) == null) { + list = result.currentRow(); + } + return list[columnIndex - 1]; } private void update(int columnIndex, Value v) { - checkUpdatable(); - checkColumnIndex(columnIndex); + if (!triggerUpdatable) { + checkUpdatable(); + } if (insertRow != null) { insertRow[columnIndex - 1] = v; } else { @@ -3228,13 +3590,28 @@ private void update(int columnIndex, Value v) { } private boolean nextRow() { - boolean next = result.next(); + boolean next = result.isLazy() ? nextLazyRow() : result.next(); if (!next && !scrollable) { result.close(); } return next; } + private boolean nextLazyRow() { + Session session; + if (stat.isCancelled() || conn == null || (session = conn.getSession()) == null) { + throw DbException.get(ErrorCode.STATEMENT_WAS_CANCELED); + } + Session oldSession = session.setThreadLocalSession(); + boolean next; + try { + next = result.next(); + } finally { + session.resetThreadLocalSession(oldSession); + } + return next; + } + private void resetResult() { if (!scrollable) { throw DbException.get(ErrorCode.RESULT_SET_NOT_SCROLLABLE); @@ -3327,10 +3704,9 @@ public boolean isClosed() throws SQLException { public void updateNString(int columnIndex, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateNString("+columnIndex+", "+quote(x)+");"); + debugCode("updateNString(" + columnIndex + ", " + quote(x) + ')'); } - update(columnIndex, x == null ? (Value) - ValueNull.INSTANCE : ValueString.get(x)); + update(checkColumnIndex(columnIndex), x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } @@ -3347,21 +3723,31 @@ public void updateNString(int columnIndex, String x) throws SQLException { public void updateNString(String columnLabel, String x) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateNString("+quote(columnLabel)+", "+quote(x)+");"); + debugCode("updateNString(" + quote(columnLabel) + ", " + quote(x) + ')'); } - update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : - ValueString.get(x)); + update(getColumnIndex(columnLabel), x == null ? ValueNull.INSTANCE : ValueVarchar.get(x, conn)); } catch (Exception e) { throw logAndConvert(e); } } /** - * [Not supported] + * Updates a column in the current or insert row. + * + * @param columnIndex (1,2,...) + * @param x the value + * @throws SQLException if the result set is closed or not updatable */ @Override public void updateNClob(int columnIndex, NClob x) throws SQLException { - throw unsupported("NClob"); + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + columnIndex + ", (NClob) x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3373,7 +3759,14 @@ public void updateNClob(int columnIndex, NClob x) throws SQLException { */ @Override public void updateNClob(int columnIndex, Reader x) throws SQLException { - updateClob(columnIndex, x, -1); + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + columnIndex + ", (Reader) x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3385,9 +3778,15 @@ public void updateNClob(int columnIndex, Reader x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNClob(int columnIndex, Reader x, long length) - throws SQLException { - updateClob(columnIndex, x, length); + public void updateNClob(int columnIndex, Reader x, long length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + columnIndex + ", (Reader) x, " + length + "L)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3399,7 +3798,14 @@ public void updateNClob(int columnIndex, Reader x, long length) */ @Override public void updateNClob(String columnLabel, Reader x) throws SQLException { - updateClob(columnLabel, x, -1); + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + quote(columnLabel) + ", (Reader) x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3411,17 +3817,38 @@ public void updateNClob(String columnLabel, Reader x) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNClob(String columnLabel, Reader x, long length) - throws SQLException { - updateClob(columnLabel, x, length); + public void updateNClob(String columnLabel, Reader x, long length) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + quote(columnLabel) + ", (Reader) x, " + length + "L)"); + } + updateClobImpl(getColumnIndex(columnLabel), x, length); + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * [Not supported] + * Updates a column in the current or insert row. + * + * @param columnLabel the column label + * @param x the value + * @throws SQLException if the result set is closed or not updatable */ @Override public void updateNClob(String columnLabel, NClob x) throws SQLException { - throw unsupported("NClob"); + try { + if (isDebugEnabled()) { + debugCode("updateNClob(" + quote(columnLabel) + ", (NClob) x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private void updateClobImpl(int columnIndex, Clob x) throws SQLException { + update(columnIndex, x == null ? ValueNull.INSTANCE : conn.createClob(x.getCharacterStream(), -1)); } /** @@ -3436,9 +3863,10 @@ public void updateNClob(String columnLabel, NClob x) throws SQLException { public NClob getNClob(int columnIndex) throws SQLException { try { int id = getNextId(TraceObject.CLOB); - debugCodeAssign("NClob", TraceObject.CLOB, id, "getNClob(" + columnIndex + ")"); - Value v = get(columnIndex); - return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, id); + if (isDebugEnabled()) { + debugCodeAssign("NClob", TraceObject.CLOB, id, "getNClob(" + columnIndex + ')'); + } + return getClob(id, checkColumnIndex(columnIndex)); } catch (Exception e) { throw logAndConvert(e); } @@ -3456,48 +3884,113 @@ public NClob getNClob(int columnIndex) throws SQLException { public NClob getNClob(String columnLabel) throws SQLException { try { int id = getNextId(TraceObject.CLOB); - debugCodeAssign("NClob", TraceObject.CLOB, id, "getNClob(" + columnLabel + ")"); - Value v = get(columnLabel); - return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, id); + if (isDebugEnabled()) { + debugCodeAssign("NClob", TraceObject.CLOB, id, "getNClob(" + quote(columnLabel) + ')'); + } + return getClob(id, getColumnIndex(columnLabel)); } catch (Exception e) { throw logAndConvert(e); } } + private JdbcClob getClob(int id, int columnIndex) { + Value v = getInternal(columnIndex); + JdbcClob result; + if (v != ValueNull.INSTANCE) { + wasNull = false; + result = new JdbcClob(conn, v, JdbcLob.State.WITH_VALUE, id); + } else { + wasNull = true; + result = null; + } + return result; + } + /** - * [Not supported] Returns the value of the specified column as a SQLXML - * object. + * Returns the value of the specified column as a SQLXML. + * + * @param columnIndex (1,2,...) + * @return the value + * @throws SQLException if the column is not found or if the result set is + * closed */ @Override public SQLXML getSQLXML(int columnIndex) throws SQLException { - throw unsupported("SQLXML"); + try { + int id = getNextId(TraceObject.SQLXML); + if (isDebugEnabled()) { + debugCodeAssign("SQLXML", TraceObject.SQLXML, id, "getSQLXML(" + columnIndex + ')'); + } + Value v = get(checkColumnIndex(columnIndex)); + return v == ValueNull.INSTANCE ? null : new JdbcSQLXML(conn, v, JdbcLob.State.WITH_VALUE, id); + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * [Not supported] Returns the value of the specified column as a SQLXML - * object. + * Returns the value of the specified column as a SQLXML. + * + * @param columnLabel the column label + * @return the value + * @throws SQLException if the column is not found or if the result set is + * closed */ @Override public SQLXML getSQLXML(String columnLabel) throws SQLException { - throw unsupported("SQLXML"); + try { + int id = getNextId(TraceObject.SQLXML); + if (isDebugEnabled()) { + debugCodeAssign("SQLXML", TraceObject.SQLXML, id, "getSQLXML(" + quote(columnLabel) + ')'); + } + Value v = get(getColumnIndex(columnLabel)); + return v == ValueNull.INSTANCE ? null : new JdbcSQLXML(conn, v, JdbcLob.State.WITH_VALUE, id); + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * [Not supported] Updates a column in the current or insert row. + * Updates a column in the current or insert row. + * + * @param columnIndex (1,2,...) + * @param xmlObject the value + * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateSQLXML(int columnIndex, SQLXML xmlObject) - throws SQLException { - throw unsupported("SQLXML"); + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateSQLXML(" + columnIndex + ", x)"); + } + updateSQLXMLImpl(checkColumnIndex(columnIndex), xmlObject); + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * [Not supported] Updates a column in the current or insert row. + * Updates a column in the current or insert row. + * + * @param columnLabel the column label + * @param xmlObject the value + * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateSQLXML(String columnLabel, SQLXML xmlObject) - throws SQLException { - throw unsupported("SQLXML"); + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateSQLXML(" + quote(columnLabel) + ", x)"); + } + updateSQLXMLImpl(getColumnIndex(columnLabel), xmlObject); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private void updateSQLXMLImpl(int columnIndex, SQLXML xmlObject) throws SQLException { + update(columnIndex, + xmlObject == null ? ValueNull.INSTANCE : conn.createClob(xmlObject.getCharacterStream(), -1)); } /** @@ -3512,7 +4005,7 @@ public void updateSQLXML(String columnLabel, SQLXML xmlObject) public String getNString(int columnIndex) throws SQLException { try { debugCodeCall("getNString", columnIndex); - return get(columnIndex).getString(); + return get(checkColumnIndex(columnIndex)).getString(); } catch (Exception e) { throw logAndConvert(e); } @@ -3530,7 +4023,7 @@ public String getNString(int columnIndex) throws SQLException { public String getNString(String columnLabel) throws SQLException { try { debugCodeCall("getNString", columnLabel); - return get(columnLabel).getString(); + return get(getColumnIndex(columnLabel)).getString(); } catch (Exception e) { throw logAndConvert(e); } @@ -3548,7 +4041,7 @@ public String getNString(String columnLabel) throws SQLException { public Reader getNCharacterStream(int columnIndex) throws SQLException { try { debugCodeCall("getNCharacterStream", columnIndex); - return get(columnIndex).getReader(); + return get(checkColumnIndex(columnIndex)).getReader(); } catch (Exception e) { throw logAndConvert(e); } @@ -3566,7 +4059,7 @@ public Reader getNCharacterStream(int columnIndex) throws SQLException { public Reader getNCharacterStream(String columnLabel) throws SQLException { try { debugCodeCall("getNCharacterStream", columnLabel); - return get(columnLabel).getReader(); + return get(getColumnIndex(columnLabel)).getReader(); } catch (Exception e) { throw logAndConvert(e); } @@ -3580,9 +4073,15 @@ public Reader getNCharacterStream(String columnLabel) throws SQLException { * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNCharacterStream(int columnIndex, Reader x) - throws SQLException { - updateNCharacterStream(columnIndex, x, -1); + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateNCharacterStream(" + columnIndex + ", x)"); + } + updateClobImpl(checkColumnIndex(columnIndex), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3594,15 +4093,12 @@ public void updateNCharacterStream(int columnIndex, Reader x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNCharacterStream(int columnIndex, Reader x, long length) - throws SQLException { + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateNCharacterStream("+columnIndex+", x, "+length+"L);"); + debugCode("updateNCharacterStream(" + columnIndex + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnIndex, v); + updateClobImpl(checkColumnIndex(columnIndex), x, length); } catch (Exception e) { throw logAndConvert(e); } @@ -3616,9 +4112,15 @@ public void updateNCharacterStream(int columnIndex, Reader x, long length) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNCharacterStream(String columnLabel, Reader x) - throws SQLException { - updateNCharacterStream(columnLabel, x, -1); + public void updateNCharacterStream(String columnLabel, Reader x) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("updateNCharacterStream(" + quote(columnLabel) + ", x)"); + } + updateClobImpl(getColumnIndex(columnLabel), x, -1L); + } catch (Exception e) { + throw logAndConvert(e); + } } /** @@ -3630,20 +4132,21 @@ public void updateNCharacterStream(String columnLabel, Reader x) * @throws SQLException if the result set is closed or not updatable */ @Override - public void updateNCharacterStream(String columnLabel, Reader x, long length) - throws SQLException { + public void updateNCharacterStream(String columnLabel, Reader x, long length) throws SQLException { try { if (isDebugEnabled()) { - debugCode("updateNCharacterStream("+quote(columnLabel)+", x, "+length+"L);"); + debugCode("updateNCharacterStream(" + quote(columnLabel) + ", x, " + length + "L)"); } - checkClosed(); - Value v = conn.createClob(x, length); - update(columnLabel, v); + updateClobImpl(getColumnIndex(columnLabel), x, length); } catch (Exception e) { throw logAndConvert(e); } } + private void updateClobImpl(int columnIndex, Reader x, long length) { + update(columnIndex, conn.createClob(x, length)); + } + /** * Return an object of this class if possible. * @@ -3653,10 +4156,14 @@ public void updateNCharacterStream(String columnLabel, Reader x, long length) @Override @SuppressWarnings("unchecked") public T unwrap(Class iface) throws SQLException { - if (isWrapperFor(iface)) { - return (T) this; + try { + if (isWrapperFor(iface)) { + return (T) this; + } + throw DbException.getInvalidValueException("iface", iface); + } catch (Exception e) { + throw logAndConvert(e); } - throw DbException.getInvalidValueException("iface", iface); } /** @@ -3671,30 +4178,46 @@ public boolean isWrapperFor(Class iface) throws SQLException { } /** - * [Not supported] + * Returns a column value as a Java object of the specified type. * * @param columnIndex the column index (1, 2, ...) * @param type the class of the returned value + * @return the value + * @throws SQLException if the column is not found or if the result set is + * closed */ -/*## Java 1.7 ## @Override - public T getObject(int columnIndex, Class type) { - return null; + public T getObject(int columnIndex, Class type) throws SQLException { + try { + if (type == null) { + throw DbException.getInvalidValueException("type", type); + } + debugCodeCall("getObject", columnIndex); + return ValueToObjectConverter.valueToObject(type, get(checkColumnIndex(columnIndex)), conn); + } catch (Exception e) { + throw logAndConvert(e); + } } -//*/ /** - * [Not supported] + * Returns a column value as a Java object of the specified type. * * @param columnName the column name * @param type the class of the returned value + * @return the value */ -/*## Java 1.7 ## @Override - public T getObject(String columnName, Class type) { - return null; + public T getObject(String columnName, Class type) throws SQLException { + try { + if (type == null) { + throw DbException.getInvalidValueException("type", type); + } + debugCodeCall("getObject", columnName); + return ValueToObjectConverter.valueToObject(type, get(getColumnIndex(columnName)), conn); + } catch (Exception e) { + throw logAndConvert(e); + } } -//*/ /** * INTERNAL @@ -3707,17 +4230,17 @@ public String toString() { private void patchCurrentRow(Value[] row) { boolean changed = false; Value[] current = result.currentRow(); - CompareMode mode = conn.getCompareMode(); + CompareMode compareMode = conn.getCompareMode(); for (int i = 0; i < row.length; i++) { - if (row[i].compareTo(current[i], mode) != 0) { + if (row[i].compareTo(current[i], conn, compareMode) != 0) { changed = true; break; } } if (patchedRows == null) { - patchedRows = New.hashMap(); + patchedRows = new HashMap<>(); } - Integer rowId = result.getRowId(); + Long rowId = result.getRowId(); if (!changed) { patchedRows.remove(rowId); } else { @@ -3725,9 +4248,18 @@ private void patchCurrentRow(Value[] row) { } } + private Value convertToValue(Object x, SQLType targetSqlType) { + if (x == null) { + return ValueNull.INSTANCE; + } else { + int type = DataType.convertSQLTypeToValueType(targetSqlType); + Value v = ValueToObjectConverter.objectToValue(conn.getSession(), x, type); + return v.convertTo(type, conn); + } + } + private Value convertToUnknownValue(Object x) { - checkClosed(); - return DataType.convertToValue(conn.getSession(), x, Value.UNKNOWN); + return ValueToObjectConverter.objectToValue(conn.getSession(), x, Value.UNKNOWN); } private void checkUpdatable() { @@ -3737,4 +4269,22 @@ private void checkUpdatable() { } } + /** + * INTERNAL + * + * @return array of column values for the current row + */ + public Value[] getUpdateRow() { + return updateRow; + } + + /** + * INTERNAL + * + * @return result + */ + public ResultInterface getResult() { + return result; + } + } diff --git a/h2/src/main/org/h2/jdbc/JdbcResultSetMetaData.java b/h2/src/main/org/h2/jdbc/JdbcResultSetMetaData.java index 772e7475f2..e3658d6f23 100644 --- a/h2/src/main/org/h2/jdbc/JdbcResultSetMetaData.java +++ b/h2/src/main/org/h2/jdbc/JdbcResultSetMetaData.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -13,12 +13,12 @@ import org.h2.result.ResultInterface; import org.h2.util.MathUtils; import org.h2.value.DataType; +import org.h2.value.ValueToObjectConverter; /** * Represents the meta data for a ResultSet. */ -public class JdbcResultSetMetaData extends TraceObject implements - ResultSetMetaData { +public final class JdbcResultSetMetaData extends TraceObject implements ResultSetMetaData { private final String catalog; private final JdbcResultSet rs; @@ -63,9 +63,7 @@ public int getColumnCount() throws SQLException { @Override public String getColumnLabel(int column) throws SQLException { try { - debugCodeCall("getColumnLabel", column); - checkColumnIndex(column); - return result.getAlias(--column); + return result.getAlias(getColumn("getColumnLabel", column)); } catch (Exception e) { throw logAndConvert(e); } @@ -81,9 +79,7 @@ public String getColumnLabel(int column) throws SQLException { @Override public String getColumnName(int column) throws SQLException { try { - debugCodeCall("getColumnName", column); - checkColumnIndex(column); - return result.getColumnName(--column); + return result.getColumnName(getColumn("getColumnName", column)); } catch (Exception e) { throw logAndConvert(e); } @@ -100,10 +96,7 @@ public String getColumnName(int column) throws SQLException { @Override public int getColumnType(int column) throws SQLException { try { - debugCodeCall("getColumnType", column); - checkColumnIndex(column); - int type = result.getColumnType(--column); - return DataType.convertTypeToSQLType(type); + return DataType.convertTypeToSQLType(result.getColumnType(getColumn("getColumnType", column))); } catch (Exception e) { throw logAndConvert(e); } @@ -119,10 +112,7 @@ public int getColumnType(int column) throws SQLException { @Override public String getColumnTypeName(int column) throws SQLException { try { - debugCodeCall("getColumnTypeName", column); - checkColumnIndex(column); - int type = result.getColumnType(--column); - return DataType.getDataType(type).name; + return result.getColumnType(getColumn("getColumnTypeName", column)).getDeclaredTypeName(); } catch (Exception e) { throw logAndConvert(e); } @@ -138,9 +128,7 @@ public String getColumnTypeName(int column) throws SQLException { @Override public String getSchemaName(int column) throws SQLException { try { - debugCodeCall("getSchemaName", column); - checkColumnIndex(column); - String schema = result.getSchemaName(--column); + String schema = result.getSchemaName(getColumn("getSchemaName", column)); return schema == null ? "" : schema; } catch (Exception e) { throw logAndConvert(e); @@ -157,9 +145,7 @@ public String getSchemaName(int column) throws SQLException { @Override public String getTableName(int column) throws SQLException { try { - debugCodeCall("getTableName", column); - checkColumnIndex(column); - String table = result.getTableName(--column); + String table = result.getTableName(getColumn("getTableName", column)); return table == null ? "" : table; } catch (Exception e) { throw logAndConvert(e); @@ -176,8 +162,7 @@ public String getTableName(int column) throws SQLException { @Override public String getCatalogName(int column) throws SQLException { try { - debugCodeCall("getCatalogName", column); - checkColumnIndex(column); + getColumn("getCatalogName", column); return catalog == null ? "" : catalog; } catch (Exception e) { throw logAndConvert(e); @@ -194,9 +179,7 @@ public String getCatalogName(int column) throws SQLException { @Override public boolean isAutoIncrement(int column) throws SQLException { try { - debugCodeCall("isAutoIncrement", column); - checkColumnIndex(column); - return result.isAutoIncrement(--column); + return result.isIdentity(getColumn("isAutoIncrement", column)); } catch (Exception e) { throw logAndConvert(e); } @@ -213,8 +196,7 @@ public boolean isAutoIncrement(int column) throws SQLException { @Override public boolean isCaseSensitive(int column) throws SQLException { try { - debugCodeCall("isCaseSensitive", column); - checkColumnIndex(column); + getColumn("isCaseSensitive", column); return true; } catch (Exception e) { throw logAndConvert(e); @@ -232,8 +214,7 @@ public boolean isCaseSensitive(int column) throws SQLException { @Override public boolean isSearchable(int column) throws SQLException { try { - debugCodeCall("isSearchable", column); - checkColumnIndex(column); + getColumn("isSearchable", column); return true; } catch (Exception e) { throw logAndConvert(e); @@ -251,8 +232,7 @@ public boolean isSearchable(int column) throws SQLException { @Override public boolean isCurrency(int column) throws SQLException { try { - debugCodeCall("isCurrency", column); - checkColumnIndex(column); + getColumn("isCurrency", column); return false; } catch (Exception e) { throw logAndConvert(e); @@ -273,9 +253,7 @@ public boolean isCurrency(int column) throws SQLException { @Override public int isNullable(int column) throws SQLException { try { - debugCodeCall("isNullable", column); - checkColumnIndex(column); - return result.getNullable(--column); + return result.getNullable(getColumn("isNullable", column)); } catch (Exception e) { throw logAndConvert(e); } @@ -283,18 +261,16 @@ public int isNullable(int column) throws SQLException { /** * Checks if this column is signed. - * It always returns true. + * Returns true for numeric columns. * * @param column the column index (1,2,...) - * @return true + * @return true for numeric columns * @throws SQLException if the result set is closed or invalid */ @Override public boolean isSigned(int column) throws SQLException { try { - debugCodeCall("isSigned", column); - checkColumnIndex(column); - return true; + return DataType.isNumericType(result.getColumnType(getColumn("isSigned", column)).getValueType()); } catch (Exception e) { throw logAndConvert(e); } @@ -311,8 +287,7 @@ public boolean isSigned(int column) throws SQLException { @Override public boolean isReadOnly(int column) throws SQLException { try { - debugCodeCall("isReadOnly", column); - checkColumnIndex(column); + getColumn("isReadOnly", column); return false; } catch (Exception e) { throw logAndConvert(e); @@ -330,8 +305,7 @@ public boolean isReadOnly(int column) throws SQLException { @Override public boolean isWritable(int column) throws SQLException { try { - debugCodeCall("isWritable", column); - checkColumnIndex(column); + getColumn("isWritable", column); return true; } catch (Exception e) { throw logAndConvert(e); @@ -349,8 +323,7 @@ public boolean isWritable(int column) throws SQLException { @Override public boolean isDefinitelyWritable(int column) throws SQLException { try { - debugCodeCall("isDefinitelyWritable", column); - checkColumnIndex(column); + getColumn("isDefinitelyWritable", column); return false; } catch (Exception e) { throw logAndConvert(e); @@ -368,10 +341,8 @@ public boolean isDefinitelyWritable(int column) throws SQLException { @Override public String getColumnClassName(int column) throws SQLException { try { - debugCodeCall("getColumnClassName", column); - checkColumnIndex(column); - int type = result.getColumnType(--column); - return DataType.getTypeClassName(type); + int type = result.getColumnType(getColumn("getColumnClassName", column)).getValueType(); + return ValueToObjectConverter.getDefaultClass(type, true).getName(); } catch (Exception e) { throw logAndConvert(e); } @@ -387,10 +358,7 @@ public String getColumnClassName(int column) throws SQLException { @Override public int getPrecision(int column) throws SQLException { try { - debugCodeCall("getPrecision", column); - checkColumnIndex(column); - long prec = result.getColumnPrecision(--column); - return MathUtils.convertLongToInt(prec); + return MathUtils.convertLongToInt(result.getColumnType(getColumn("getPrecision", column)).getPrecision()); } catch (Exception e) { throw logAndConvert(e); } @@ -406,9 +374,7 @@ public int getPrecision(int column) throws SQLException { @Override public int getScale(int column) throws SQLException { try { - debugCodeCall("getScale", column); - checkColumnIndex(column); - return result.getColumnScale(--column); + return result.getColumnType(getColumn("getScale", column)).getScale(); } catch (Exception e) { throw logAndConvert(e); } @@ -424,9 +390,7 @@ public int getScale(int column) throws SQLException { @Override public int getColumnDisplaySize(int column) throws SQLException { try { - debugCodeCall("getColumnDisplaySize", column); - checkColumnIndex(column); - return result.getDisplaySize(--column); + return result.getColumnType(getColumn("getColumnDisplaySize", column)).getDisplaySize(); } catch (Exception e) { throw logAndConvert(e); } @@ -441,11 +405,23 @@ private void checkClosed() { } } - private void checkColumnIndex(int columnIndex) { + /** + * Writes trace information and checks validity of this object and + * parameter. + * + * @param methodName + * the called method name + * @param columnIndex + * 1-based column index + * @return 0-based column index + */ + private int getColumn(String methodName, int columnIndex) { + debugCodeCall(methodName, columnIndex); checkClosed(); if (columnIndex < 1 || columnIndex > columnCount) { throw DbException.getInvalidValueException("columnIndex", columnIndex); } + return columnIndex - 1; } /** @@ -457,10 +433,14 @@ private void checkColumnIndex(int columnIndex) { @Override @SuppressWarnings("unchecked") public T unwrap(Class iface) throws SQLException { - if (isWrapperFor(iface)) { - return (T) this; + try { + if (isWrapperFor(iface)) { + return (T) this; + } + throw DbException.getInvalidValueException("iface", iface); + } catch (Exception e) { + throw logAndConvert(e); } - throw DbException.getInvalidValueException("iface", iface); } /** diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLDataException.java b/h2/src/main/org/h2/jdbc/JdbcSQLDataException.java new file mode 100644 index 0000000000..0016f23f3f --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLDataException.java @@ -0,0 +1,87 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.sql.SQLDataException; + +import org.h2.message.DbException; + +/** + * Represents a database exception. + */ +public final class JdbcSQLDataException extends SQLDataException implements JdbcException { + + private static final long serialVersionUID = 1L; + + private final String originalMessage; + private final String stackTrace; + private String message; + private String sql; + + /** + * Creates a SQLDataException. + * + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + */ + public JdbcSQLDataException(String message, String sql, String state, + int errorCode, Throwable cause, String stackTrace) { + super(message, state, errorCode); + this.originalMessage = message; + this.stackTrace = stackTrace; + // setSQL() also generates message + setSQL(sql); + initCause(cause); + } + + @Override + public String getMessage() { + return message; + } + + @Override + public String getOriginalMessage() { + return originalMessage; + } + + @Override + public void printStackTrace(PrintWriter s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public void printStackTrace(PrintStream s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public String getSQL() { + return sql; + } + + @Override + public void setSQL(String sql) { + this.sql = sql; + message = DbException.buildMessageForException(this); + } + + @Override + public String toString() { + if (stackTrace == null) { + return super.toString(); + } + return stackTrace; + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLException.java b/h2/src/main/org/h2/jdbc/JdbcSQLException.java index e2b495e90e..de08d17dde 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSQLException.java +++ b/h2/src/main/org/h2/jdbc/JdbcSQLException.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -9,23 +9,16 @@ import java.io.PrintWriter; import java.sql.SQLException; -import org.h2.engine.Constants; +import org.h2.message.DbException; /** * Represents a database exception. */ -public class JdbcSQLException extends SQLException { - - /** - * If the SQL statement contains this text, then it is never added to the - * SQL exception. Hiding the SQL statement may be important if it contains a - * passwords, such as a CREATE LINKED TABLE statement. - */ - public static final String HIDE_SQL = "--hide--"; +public final class JdbcSQLException extends SQLException implements JdbcException { private static final long serialVersionUID = 1L; + private final String originalMessage; - private final Throwable cause; private final String stackTrace; private String message; private String sql; @@ -44,131 +37,45 @@ public JdbcSQLException(String message, String sql, String state, int errorCode, Throwable cause, String stackTrace) { super(message, state, errorCode); this.originalMessage = message; - setSQL(sql); - this.cause = cause; this.stackTrace = stackTrace; - buildMessage(); + // setSQL() also generates message + setSQL(sql); initCause(cause); } - /** - * Get the detail error message. - * - * @return the message - */ @Override public String getMessage() { return message; } - /** - * INTERNAL - */ + @Override public String getOriginalMessage() { return originalMessage; } - /** - * Prints the stack trace to the standard error stream. - */ - @Override - public void printStackTrace() { - // The default implementation already does that, - // but we do it again to avoid problems. - // If it is not implemented, somebody might implement it - // later on which would be a problem if done in the wrong way. - printStackTrace(System.err); - } - - /** - * Prints the stack trace to the specified print writer. - * - * @param s the print writer - */ @Override public void printStackTrace(PrintWriter s) { - if (s != null) { - super.printStackTrace(s); - // getNextException().printStackTrace(s) would be very very slow - // if many exceptions are joined - SQLException next = getNextException(); - for (int i = 0; i < 100 && next != null; i++) { - s.println(next.toString()); - next = next.getNextException(); - } - if (next != null) { - s.println("(truncated)"); - } - } + super.printStackTrace(s); + DbException.printNextExceptions(this, s); } - /** - * Prints the stack trace to the specified print stream. - * - * @param s the print stream - */ @Override public void printStackTrace(PrintStream s) { - if (s != null) { - super.printStackTrace(s); - // getNextException().printStackTrace(s) would be very very slow - // if many exceptions are joined - SQLException next = getNextException(); - for (int i = 0; i < 100 && next != null; i++) { - s.println(next.toString()); - next = next.getNextException(); - } - if (next != null) { - s.println("(truncated)"); - } - } + super.printStackTrace(s); + DbException.printNextExceptions(this, s); } - /** - * INTERNAL - */ - public Throwable getOriginalCause() { - return cause; - } - - /** - * Returns the SQL statement. - * SQL statements that contain '--hide--' are not listed. - * - * @return the SQL statement - */ + @Override public String getSQL() { return sql; } - /** - * INTERNAL - */ + @Override public void setSQL(String sql) { - if (sql != null && sql.contains(HIDE_SQL)) { - sql = "-"; - } this.sql = sql; - buildMessage(); - } - - private void buildMessage() { - StringBuilder buff = new StringBuilder(originalMessage == null ? - "- " : originalMessage); - if (sql != null) { - buff.append("; SQL statement:\n").append(sql); - } - buff.append(" [").append(getErrorCode()). - append('-').append(Constants.BUILD_ID).append(']'); - message = buff.toString(); + message = DbException.buildMessageForException(this); } - /** - * Returns the class name, the message, and in the server mode, the stack - * trace of the server - * - * @return the string representation - */ @Override public String toString() { if (stackTrace == null) { diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLFeatureNotSupportedException.java b/h2/src/main/org/h2/jdbc/JdbcSQLFeatureNotSupportedException.java new file mode 100644 index 0000000000..bf9416b842 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLFeatureNotSupportedException.java @@ -0,0 +1,88 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.sql.SQLFeatureNotSupportedException; + +import org.h2.message.DbException; + +/** + * Represents a database exception. + */ +public final class JdbcSQLFeatureNotSupportedException extends SQLFeatureNotSupportedException + implements JdbcException { + + private static final long serialVersionUID = 1L; + + private final String originalMessage; + private final String stackTrace; + private String message; + private String sql; + + /** + * Creates a SQLFeatureNotSupportedException. + * + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + */ + public JdbcSQLFeatureNotSupportedException(String message, String sql, String state, + int errorCode, Throwable cause, String stackTrace) { + super(message, state, errorCode); + this.originalMessage = message; + this.stackTrace = stackTrace; + // setSQL() also generates message + setSQL(sql); + initCause(cause); + } + + @Override + public String getMessage() { + return message; + } + + @Override + public String getOriginalMessage() { + return originalMessage; + } + + @Override + public void printStackTrace(PrintWriter s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public void printStackTrace(PrintStream s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public String getSQL() { + return sql; + } + + @Override + public void setSQL(String sql) { + this.sql = sql; + message = DbException.buildMessageForException(this); + } + + @Override + public String toString() { + if (stackTrace == null) { + return super.toString(); + } + return stackTrace; + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLIntegrityConstraintViolationException.java b/h2/src/main/org/h2/jdbc/JdbcSQLIntegrityConstraintViolationException.java new file mode 100644 index 0000000000..6ce24217ae --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLIntegrityConstraintViolationException.java @@ -0,0 +1,88 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.sql.SQLIntegrityConstraintViolationException; + +import org.h2.message.DbException; + +/** + * Represents a database exception. + */ +public final class JdbcSQLIntegrityConstraintViolationException extends SQLIntegrityConstraintViolationException + implements JdbcException { + + private static final long serialVersionUID = 1L; + + private final String originalMessage; + private final String stackTrace; + private String message; + private String sql; + + /** + * Creates a SQLIntegrityConstraintViolationException. + * + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + */ + public JdbcSQLIntegrityConstraintViolationException(String message, String sql, String state, + int errorCode, Throwable cause, String stackTrace) { + super(message, state, errorCode); + this.originalMessage = message; + this.stackTrace = stackTrace; + // setSQL() also generates message + setSQL(sql); + initCause(cause); + } + + @Override + public String getMessage() { + return message; + } + + @Override + public String getOriginalMessage() { + return originalMessage; + } + + @Override + public void printStackTrace(PrintWriter s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public void printStackTrace(PrintStream s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public String getSQL() { + return sql; + } + + @Override + public void setSQL(String sql) { + this.sql = sql; + message = DbException.buildMessageForException(this); + } + + @Override + public String toString() { + if (stackTrace == null) { + return super.toString(); + } + return stackTrace; + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLInvalidAuthorizationSpecException.java b/h2/src/main/org/h2/jdbc/JdbcSQLInvalidAuthorizationSpecException.java new file mode 100644 index 0000000000..d06886c201 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLInvalidAuthorizationSpecException.java @@ -0,0 +1,88 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.sql.SQLInvalidAuthorizationSpecException; + +import org.h2.message.DbException; + +/** + * Represents a database exception. + */ +public final class JdbcSQLInvalidAuthorizationSpecException extends SQLInvalidAuthorizationSpecException + implements JdbcException { + + private static final long serialVersionUID = 1L; + + private final String originalMessage; + private final String stackTrace; + private String message; + private String sql; + + /** + * Creates a SQLInvalidAuthorizationSpecException. + * + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + */ + public JdbcSQLInvalidAuthorizationSpecException(String message, String sql, String state, + int errorCode, Throwable cause, String stackTrace) { + super(message, state, errorCode); + this.originalMessage = message; + this.stackTrace = stackTrace; + // setSQL() also generates message + setSQL(sql); + initCause(cause); + } + + @Override + public String getMessage() { + return message; + } + + @Override + public String getOriginalMessage() { + return originalMessage; + } + + @Override + public void printStackTrace(PrintWriter s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public void printStackTrace(PrintStream s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public String getSQL() { + return sql; + } + + @Override + public void setSQL(String sql) { + this.sql = sql; + message = DbException.buildMessageForException(this); + } + + @Override + public String toString() { + if (stackTrace == null) { + return super.toString(); + } + return stackTrace; + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientConnectionException.java b/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientConnectionException.java new file mode 100644 index 0000000000..b76dd0d0c3 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientConnectionException.java @@ -0,0 +1,88 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.sql.SQLNonTransientConnectionException; + +import org.h2.message.DbException; + +/** + * Represents a database exception. + */ +public final class JdbcSQLNonTransientConnectionException extends SQLNonTransientConnectionException + implements JdbcException { + + private static final long serialVersionUID = 1L; + + private final String originalMessage; + private final String stackTrace; + private String message; + private String sql; + + /** + * Creates a SQLNonTransientConnectionException. + * + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + */ + public JdbcSQLNonTransientConnectionException(String message, String sql, String state, + int errorCode, Throwable cause, String stackTrace) { + super(message, state, errorCode); + this.originalMessage = message; + this.stackTrace = stackTrace; + // setSQL() also generates message + setSQL(sql); + initCause(cause); + } + + @Override + public String getMessage() { + return message; + } + + @Override + public String getOriginalMessage() { + return originalMessage; + } + + @Override + public void printStackTrace(PrintWriter s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public void printStackTrace(PrintStream s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public String getSQL() { + return sql; + } + + @Override + public void setSQL(String sql) { + this.sql = sql; + message = DbException.buildMessageForException(this); + } + + @Override + public String toString() { + if (stackTrace == null) { + return super.toString(); + } + return stackTrace; + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientException.java b/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientException.java new file mode 100644 index 0000000000..858a5647af --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLNonTransientException.java @@ -0,0 +1,87 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.sql.SQLNonTransientException; + +import org.h2.message.DbException; + +/** + * Represents a database exception. + */ +public final class JdbcSQLNonTransientException extends SQLNonTransientException implements JdbcException { + + private static final long serialVersionUID = 1L; + + private final String originalMessage; + private final String stackTrace; + private String message; + private String sql; + + /** + * Creates a SQLNonTransientException. + * + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + */ + public JdbcSQLNonTransientException(String message, String sql, String state, + int errorCode, Throwable cause, String stackTrace) { + super(message, state, errorCode); + this.originalMessage = message; + this.stackTrace = stackTrace; + // setSQL() also generates message + setSQL(sql); + initCause(cause); + } + + @Override + public String getMessage() { + return message; + } + + @Override + public String getOriginalMessage() { + return originalMessage; + } + + @Override + public void printStackTrace(PrintWriter s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public void printStackTrace(PrintStream s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public String getSQL() { + return sql; + } + + @Override + public void setSQL(String sql) { + this.sql = sql; + message = DbException.buildMessageForException(this); + } + + @Override + public String toString() { + if (stackTrace == null) { + return super.toString(); + } + return stackTrace; + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLSyntaxErrorException.java b/h2/src/main/org/h2/jdbc/JdbcSQLSyntaxErrorException.java new file mode 100644 index 0000000000..97bb472f2a --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLSyntaxErrorException.java @@ -0,0 +1,87 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.sql.SQLSyntaxErrorException; + +import org.h2.message.DbException; + +/** + * Represents a database exception. + */ +public final class JdbcSQLSyntaxErrorException extends SQLSyntaxErrorException implements JdbcException { + + private static final long serialVersionUID = 1L; + + private final String originalMessage; + private final String stackTrace; + private String message; + private String sql; + + /** + * Creates a SQLSyntaxErrorException. + * + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + */ + public JdbcSQLSyntaxErrorException(String message, String sql, String state, + int errorCode, Throwable cause, String stackTrace) { + super(message, state, errorCode); + this.originalMessage = message; + this.stackTrace = stackTrace; + // setSQL() also generates message + setSQL(sql); + initCause(cause); + } + + @Override + public String getMessage() { + return message; + } + + @Override + public String getOriginalMessage() { + return originalMessage; + } + + @Override + public void printStackTrace(PrintWriter s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public void printStackTrace(PrintStream s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public String getSQL() { + return sql; + } + + @Override + public void setSQL(String sql) { + this.sql = sql; + message = DbException.buildMessageForException(this); + } + + @Override + public String toString() { + if (stackTrace == null) { + return super.toString(); + } + return stackTrace; + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLTimeoutException.java b/h2/src/main/org/h2/jdbc/JdbcSQLTimeoutException.java new file mode 100644 index 0000000000..7e8ee1a2a9 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLTimeoutException.java @@ -0,0 +1,87 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.sql.SQLTimeoutException; + +import org.h2.message.DbException; + +/** + * Represents a database exception. + */ +public final class JdbcSQLTimeoutException extends SQLTimeoutException implements JdbcException { + + private static final long serialVersionUID = 1L; + + private final String originalMessage; + private final String stackTrace; + private String message; + private String sql; + + /** + * Creates a SQLTimeoutException. + * + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + */ + public JdbcSQLTimeoutException(String message, String sql, String state, + int errorCode, Throwable cause, String stackTrace) { + super(message, state, errorCode); + this.originalMessage = message; + this.stackTrace = stackTrace; + // setSQL() also generates message + setSQL(sql); + initCause(cause); + } + + @Override + public String getMessage() { + return message; + } + + @Override + public String getOriginalMessage() { + return originalMessage; + } + + @Override + public void printStackTrace(PrintWriter s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public void printStackTrace(PrintStream s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public String getSQL() { + return sql; + } + + @Override + public void setSQL(String sql) { + this.sql = sql; + message = DbException.buildMessageForException(this); + } + + @Override + public String toString() { + if (stackTrace == null) { + return super.toString(); + } + return stackTrace; + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLTransactionRollbackException.java b/h2/src/main/org/h2/jdbc/JdbcSQLTransactionRollbackException.java new file mode 100644 index 0000000000..34e54b36b8 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLTransactionRollbackException.java @@ -0,0 +1,88 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.sql.SQLTransactionRollbackException; + +import org.h2.message.DbException; + +/** + * Represents a database exception. + */ +public final class JdbcSQLTransactionRollbackException extends SQLTransactionRollbackException + implements JdbcException { + + private static final long serialVersionUID = 1L; + + private final String originalMessage; + private final String stackTrace; + private String message; + private String sql; + + /** + * Creates a SQLTransactionRollbackException. + * + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + */ + public JdbcSQLTransactionRollbackException(String message, String sql, String state, + int errorCode, Throwable cause, String stackTrace) { + super(message, state, errorCode); + this.originalMessage = message; + this.stackTrace = stackTrace; + // setSQL() also generates message + setSQL(sql); + initCause(cause); + } + + @Override + public String getMessage() { + return message; + } + + @Override + public String getOriginalMessage() { + return originalMessage; + } + + @Override + public void printStackTrace(PrintWriter s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public void printStackTrace(PrintStream s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public String getSQL() { + return sql; + } + + @Override + public void setSQL(String sql) { + this.sql = sql; + message = DbException.buildMessageForException(this); + } + + @Override + public String toString() { + if (stackTrace == null) { + return super.toString(); + } + return stackTrace; + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLTransientException.java b/h2/src/main/org/h2/jdbc/JdbcSQLTransientException.java new file mode 100644 index 0000000000..6566d1d9a3 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLTransientException.java @@ -0,0 +1,87 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.PrintStream; +import java.io.PrintWriter; +import java.sql.SQLTransientException; + +import org.h2.message.DbException; + +/** + * Represents a database exception. + */ +public final class JdbcSQLTransientException extends SQLTransientException implements JdbcException { + + private static final long serialVersionUID = 1L; + + private final String originalMessage; + private final String stackTrace; + private String message; + private String sql; + + /** + * Creates a SQLTransientException. + * + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + */ + public JdbcSQLTransientException(String message, String sql, String state, + int errorCode, Throwable cause, String stackTrace) { + super(message, state, errorCode); + this.originalMessage = message; + this.stackTrace = stackTrace; + // setSQL() also generates message + setSQL(sql); + initCause(cause); + } + + @Override + public String getMessage() { + return message; + } + + @Override + public String getOriginalMessage() { + return originalMessage; + } + + @Override + public void printStackTrace(PrintWriter s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public void printStackTrace(PrintStream s) { + super.printStackTrace(s); + DbException.printNextExceptions(this, s); + } + + @Override + public String getSQL() { + return sql; + } + + @Override + public void setSQL(String sql) { + this.sql = sql; + message = DbException.buildMessageForException(this); + } + + @Override + public String toString() { + if (stackTrace == null) { + return super.toString(); + } + return stackTrace; + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSQLXML.java b/h2/src/main/org/h2/jdbc/JdbcSQLXML.java new file mode 100644 index 0000000000..83a0a6a6b9 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcSQLXML.java @@ -0,0 +1,270 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.io.BufferedOutputStream; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Reader; +import java.io.StringReader; +import java.io.StringWriter; +import java.io.Writer; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.util.HashMap; +import java.util.Map; + +import javax.xml.XMLConstants; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.SAXParserFactory; +import javax.xml.stream.XMLInputFactory; +import javax.xml.stream.XMLOutputFactory; +import javax.xml.transform.Result; +import javax.xml.transform.Source; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.URIResolver; +import javax.xml.transform.dom.DOMResult; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.sax.SAXResult; +import javax.xml.transform.sax.SAXSource; +import javax.xml.transform.sax.SAXTransformerFactory; +import javax.xml.transform.sax.TransformerHandler; +import javax.xml.transform.stax.StAXResult; +import javax.xml.transform.stax.StAXSource; +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.stream.StreamSource; + +import org.h2.message.DbException; +import org.h2.message.TraceObject; +import org.h2.value.Value; +import org.w3c.dom.Node; +import org.xml.sax.EntityResolver; +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; +import org.xml.sax.helpers.DefaultHandler; + +/** + * Represents a SQLXML value. + */ +public final class JdbcSQLXML extends JdbcLob implements SQLXML { + + private static final Map secureFeatureMap = new HashMap<>(); + private static final EntityResolver NOOP_ENTITY_RESOLVER = (pubId, sysId) -> new InputSource(new StringReader("")); + private static final URIResolver NOOP_URI_RESOLVER = (href, base) -> new StreamSource(new StringReader("")); + + static { + secureFeatureMap.put(XMLConstants.FEATURE_SECURE_PROCESSING, true); + secureFeatureMap.put("http://apache.org/xml/features/disallow-doctype-decl", true); + secureFeatureMap.put("http://xml.org/sax/features/external-general-entities", false); + secureFeatureMap.put("http://xml.org/sax/features/external-parameter-entities", false); + secureFeatureMap.put("http://apache.org/xml/features/nonvalidating/load-external-dtd", false); + } + + private DOMResult domResult; + + /** + * Underlying stream for SAXResult, StAXResult, and StreamResult. + */ + private Closeable closable; + + /** + * INTERNAL + * @param conn to use + * @param value for this JdbcSQLXML + * @param state of the LOB + * @param id of the trace object + */ + public JdbcSQLXML(JdbcConnection conn, Value value, State state, int id) { + super(conn, value, state, TraceObject.SQLXML, id); + } + + @Override + void checkReadable() throws SQLException, IOException { + checkClosed(); + if (state == State.SET_CALLED) { + if (domResult != null) { + Node node = domResult.getNode(); + domResult = null; + TransformerFactory factory = TransformerFactory.newInstance(); + try { + Transformer transformer = factory.newTransformer(); + DOMSource domSource = new DOMSource(node); + StringWriter stringWriter = new StringWriter(); + StreamResult streamResult = new StreamResult(stringWriter); + transformer.transform(domSource, streamResult); + completeWrite(conn.createClob(new StringReader(stringWriter.toString()), -1)); + } catch (Exception e) { + throw logAndConvert(e); + } + return; + } else if (closable != null) { + closable.close(); + closable = null; + return; + } + throw DbException.getUnsupportedException("Stream setter is not yet closed."); + } + } + + @Override + public InputStream getBinaryStream() throws SQLException { + return super.getBinaryStream(); + } + + @Override + public Reader getCharacterStream() throws SQLException { + return super.getCharacterStream(); + } + + @SuppressWarnings("unchecked") + @Override + public T getSource(Class sourceClass) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode( + "getSource(" + (sourceClass != null ? sourceClass.getSimpleName() + ".class" : "null") + ')'); + } + checkReadable(); + // see https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html + if (sourceClass == null || sourceClass == DOMSource.class) { + DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + for (Map.Entry entry : secureFeatureMap.entrySet()) { + try { + dbf.setFeature(entry.getKey(), entry.getValue()); + } catch (Exception ignore) {/**/} + } + dbf.setXIncludeAware(false); + dbf.setExpandEntityReferences(false); + dbf.setAttribute(XMLConstants.ACCESS_EXTERNAL_SCHEMA, ""); + DocumentBuilder db = dbf.newDocumentBuilder(); + db.setEntityResolver(NOOP_ENTITY_RESOLVER); + return (T) new DOMSource(db.parse(new InputSource(value.getInputStream()))); + } else if (sourceClass == SAXSource.class) { + SAXParserFactory spf = SAXParserFactory.newInstance(); + for (Map.Entry entry : secureFeatureMap.entrySet()) { + try { + spf.setFeature(entry.getKey(), entry.getValue()); + } catch (Exception ignore) {/**/} + } + XMLReader reader = spf.newSAXParser().getXMLReader(); + reader.setEntityResolver(NOOP_ENTITY_RESOLVER); + return (T) new SAXSource(reader, new InputSource(value.getInputStream())); + } else if (sourceClass == StAXSource.class) { + XMLInputFactory xif = XMLInputFactory.newInstance(); + xif.setProperty(XMLInputFactory.SUPPORT_DTD, false); + xif.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, ""); + xif.setProperty("javax.xml.stream.isSupportingExternalEntities", false); + return (T) new StAXSource(xif.createXMLStreamReader(value.getInputStream())); + } else if (sourceClass == StreamSource.class) { + TransformerFactory tf = TransformerFactory.newInstance(); + tf.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, ""); + tf.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, ""); + tf.setURIResolver(NOOP_URI_RESOLVER); + tf.newTransformer().transform(new StreamSource(value.getInputStream()), + new SAXResult(new DefaultHandler())); + return (T) new StreamSource(value.getInputStream()); + } + throw unsupported(sourceClass.getName()); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + @Override + public String getString() throws SQLException { + try { + debugCodeCall("getString"); + checkReadable(); + return value.getString(); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + @Override + public OutputStream setBinaryStream() throws SQLException { + try { + debugCodeCall("setBinaryStream"); + checkEditable(); + state = State.SET_CALLED; + return new BufferedOutputStream(setClobOutputStreamImpl()); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + @Override + public Writer setCharacterStream() throws SQLException { + try { + debugCodeCall("setCharacterStream"); + checkEditable(); + state = State.SET_CALLED; + return setCharacterStreamImpl(); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + @SuppressWarnings("unchecked") + @Override + public T setResult(Class resultClass) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode( + "setResult(" + (resultClass != null ? resultClass.getSimpleName() + ".class" : "null") + ')'); + } + checkEditable(); + if (resultClass == null || resultClass == DOMResult.class) { + domResult = new DOMResult(); + state = State.SET_CALLED; + return (T) domResult; + } else if (resultClass == SAXResult.class) { + SAXTransformerFactory transformerFactory = (SAXTransformerFactory) TransformerFactory.newInstance(); + TransformerHandler transformerHandler = transformerFactory.newTransformerHandler(); + Writer writer = setCharacterStreamImpl(); + transformerHandler.setResult(new StreamResult(writer)); + SAXResult saxResult = new SAXResult(transformerHandler); + closable = writer; + state = State.SET_CALLED; + return (T) saxResult; + } else if (resultClass == StAXResult.class) { + XMLOutputFactory xof = XMLOutputFactory.newInstance(); + Writer writer = setCharacterStreamImpl(); + StAXResult staxResult = new StAXResult(xof.createXMLStreamWriter(writer)); + closable = writer; + state = State.SET_CALLED; + return (T) staxResult; + } else if (StreamResult.class.equals(resultClass)) { + Writer writer = setCharacterStreamImpl(); + StreamResult streamResult = new StreamResult(writer); + closable = writer; + state = State.SET_CALLED; + return (T) streamResult; + } + throw unsupported(resultClass.getName()); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + @Override + public void setString(String value) throws SQLException { + try { + if (isDebugEnabled()) { + debugCodeCall("getSource", value); + } + checkEditable(); + completeWrite(conn.createClob(new StringReader(value), -1)); + } catch (Exception e) { + throw logAndConvert(e); + } + } + +} diff --git a/h2/src/main/org/h2/jdbc/JdbcSavepoint.java b/h2/src/main/org/h2/jdbc/JdbcSavepoint.java index a443b41f2e..f08eabdbde 100644 --- a/h2/src/main/org/h2/jdbc/JdbcSavepoint.java +++ b/h2/src/main/org/h2/jdbc/JdbcSavepoint.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -19,7 +19,7 @@ * rolled back. The tasks that where done before the savepoint are not rolled * back in this case. */ -public class JdbcSavepoint extends TraceObject implements Savepoint { +public final class JdbcSavepoint extends TraceObject implements Savepoint { private static final String SYSTEM_SAVEPOINT_PREFIX = "SYSTEM_SAVEPOINT_"; @@ -65,7 +65,7 @@ void rollback() { checkValid(); conn.prepareCommand( "ROLLBACK TO SAVEPOINT " + getName(name, savepointId), - Integer.MAX_VALUE).executeUpdate(); + Integer.MAX_VALUE).executeUpdate(null); } private void checkValid() { diff --git a/h2/src/main/org/h2/jdbc/JdbcStatement.java b/h2/src/main/org/h2/jdbc/JdbcStatement.java index 1c20a553a7..80ce508023 100644 --- a/h2/src/main/org/h2/jdbc/JdbcStatement.java +++ b/h2/src/main/org/h2/jdbc/JdbcStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbc; @@ -13,41 +13,61 @@ import java.util.ArrayList; import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; -import org.h2.engine.SessionInterface; +import org.h2.engine.Session; import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.message.TraceObject; import org.h2.result.ResultInterface; -import org.h2.util.New; +import org.h2.result.ResultWithGeneratedKeys; +import org.h2.result.SimpleResult; +import org.h2.util.ParserUtil; +import org.h2.util.StringUtils; +import org.h2.util.Utils; /** * Represents a statement. + *

    + * Thread safety: the statement is not thread-safe. If the same statement is + * used by multiple threads access to it must be synchronized. The single + * synchronized block must include execution of the command and all operations + * with its result. + *

    + *
    + * synchronized (stat) {
    + *     try (ResultSet rs = stat.executeQuery(queryString)) {
    + *         while (rs.next) {
    + *             // Do something
    + *         }
    + *     }
    + * }
    + * synchronized (stat) {
    + *     updateCount = stat.executeUpdate(commandString);
    + * }
    + * 
    */ -public class JdbcStatement extends TraceObject implements Statement { +public class JdbcStatement extends TraceObject implements Statement, JdbcStatementBackwardsCompat { protected JdbcConnection conn; - protected SessionInterface session; + protected Session session; protected JdbcResultSet resultSet; - protected int maxRows; + protected long maxRows; protected int fetchSize = SysProperties.SERVER_RESULT_SET_FETCH_SIZE; - protected int updateCount; + protected long updateCount; + protected JdbcResultSet generatedKeys; protected final int resultSetType; protected final int resultSetConcurrency; - protected final boolean closedByResultSet; - private CommandInterface executingCommand; - private int lastExecutedCommandType; + private volatile CommandInterface executingCommand; private ArrayList batchCommands; private boolean escapeProcessing = true; - private boolean cancelled; + private volatile boolean cancelled; + private boolean closeOnCompletion; - JdbcStatement(JdbcConnection conn, int id, int resultSetType, - int resultSetConcurrency, boolean closeWithResultSet) { + JdbcStatement(JdbcConnection conn, int id, int resultSetType, int resultSetConcurrency) { this.conn = conn; this.session = conn.getSession(); setTrace(session.getTrace(), TraceObject.STATEMENT, id); this.resultSetType = resultSetType; this.resultSetConcurrency = resultSetConcurrency; - this.closedByResultSet = closeWithResultSet; } /** @@ -63,8 +83,7 @@ public ResultSet executeQuery(String sql) throws SQLException { try { int id = getNextId(TraceObject.RESULT_SET); if (isDebugEnabled()) { - debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, - "executeQuery(" + quote(sql) + ")"); + debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "executeQuery(" + quote(sql) + ')'); } synchronized (session) { checkClosed(); @@ -72,17 +91,22 @@ public ResultSet executeQuery(String sql) throws SQLException { sql = JdbcConnection.translateSQL(sql, escapeProcessing); CommandInterface command = conn.prepareCommand(sql, fetchSize); ResultInterface result; + boolean lazy = false; boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY; boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE; setExecutingStatement(command); try { result = command.executeQuery(maxRows, scrollable); + lazy = result.isLazy(); } finally { - setExecutingStatement(null); + if (!lazy) { + setExecutingStatement(null); + } + } + if (!lazy) { + command.close(); } - command.close(); - resultSet = new JdbcResultSet(conn, this, result, id, - closedByResultSet, scrollable, updatable); + resultSet = new JdbcResultSet(conn, this, command, result, id, scrollable, updatable, false); } return resultSet; } catch (Exception e) { @@ -92,7 +116,8 @@ public ResultSet executeQuery(String sql) throws SQLException { /** * Executes a statement (insert, update, delete, create, drop) - * and returns the update count. + * and returns the update count. This method is not + * allowed for prepared statements. * If another result set exists for this statement, this will be closed * (even if this statement fails). * @@ -102,45 +127,85 @@ public ResultSet executeQuery(String sql) throws SQLException { * executing the statement. * * @param sql the SQL statement - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or + * {@link #SUCCESS_NO_INFO} if number of rows is too large for the + * {@code int} data type) * @throws SQLException if a database error occurred or a * select statement was executed + * @see #executeLargeUpdate(String) */ @Override - public int executeUpdate(String sql) throws SQLException { + public final int executeUpdate(String sql) throws SQLException { try { debugCodeCall("executeUpdate", sql); - return executeUpdateInternal(sql); + long updateCount = executeUpdateInternal(sql, null); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } } - private int executeUpdateInternal(String sql) throws SQLException { - checkClosedForWrite(); + /** + * Executes a statement (insert, update, delete, create, drop) + * and returns the update count. This method is not + * allowed for prepared statements. + * If another result set exists for this statement, this will be closed + * (even if this statement fails). + * + * If auto commit is on, this statement will be committed. + * If the statement is a DDL statement (create, drop, alter) and does not + * throw an exception, the current transaction (if any) is committed after + * executing the statement. + * + * @param sql the SQL statement + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing) + * @throws SQLException if a database error occurred or a + * select statement was executed + */ + @Override + public final long executeLargeUpdate(String sql) throws SQLException { try { - closeOldResultSet(); - sql = JdbcConnection.translateSQL(sql, escapeProcessing); - CommandInterface command = conn.prepareCommand(sql, fetchSize); - synchronized (session) { - setExecutingStatement(command); - try { - updateCount = command.executeUpdate(); - } finally { - setExecutingStatement(null); + debugCodeCall("executeLargeUpdate", sql); + return executeUpdateInternal(sql, null); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private long executeUpdateInternal(String sql, Object generatedKeysRequest) { + if (getClass() != JdbcStatement.class) { + throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + } + checkClosed(); + closeOldResultSet(); + sql = JdbcConnection.translateSQL(sql, escapeProcessing); + CommandInterface command = conn.prepareCommand(sql, fetchSize); + synchronized (session) { + setExecutingStatement(command); + try { + ResultWithGeneratedKeys result = command.executeUpdate(generatedKeysRequest); + updateCount = result.getUpdateCount(); + ResultInterface gk = result.getGeneratedKeys(); + if (gk != null) { + int id = getNextId(TraceObject.RESULT_SET); + generatedKeys = new JdbcResultSet(conn, this, command, gk, id, true, false, false); } + } finally { + setExecutingStatement(null); } - command.close(); - return updateCount; - } finally { - afterWriting(); } + command.close(); + return updateCount; } /** - * Executes an arbitrary statement. If another result set exists for this + * Executes a statement and returns type of its result. This method is not + * allowed for prepared statements. + * If another result set exists for this * statement, this will be closed (even if this statement fails). * * If the statement is a create or drop and does not throw an exception, the @@ -149,49 +214,58 @@ private int executeUpdateInternal(String sql) throws SQLException { * will be committed. * * @param sql the SQL statement to execute - * @return true if a result set is available, false if not + * @return true if result is a result set, false otherwise */ @Override - public boolean execute(String sql) throws SQLException { + public final boolean execute(String sql) throws SQLException { try { debugCodeCall("execute", sql); - return executeInternal(sql); + return executeInternal(sql, false); } catch (Exception e) { throw logAndConvert(e); } } - private boolean executeInternal(String sql) throws SQLException { + private boolean executeInternal(String sql, Object generatedKeysRequest) { + if (getClass() != JdbcStatement.class) { + throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT); + } int id = getNextId(TraceObject.RESULT_SET); - checkClosedForWrite(); - try { - closeOldResultSet(); - sql = JdbcConnection.translateSQL(sql, escapeProcessing); - CommandInterface command = conn.prepareCommand(sql, fetchSize); - boolean returnsResultSet; - synchronized (session) { - setExecutingStatement(command); - try { - if (command.isQuery()) { - returnsResultSet = true; - boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY; - boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE; - ResultInterface result = command.executeQuery(maxRows, scrollable); - resultSet = new JdbcResultSet(conn, this, result, id, - closedByResultSet, scrollable, updatable); - } else { - returnsResultSet = false; - updateCount = command.executeUpdate(); + checkClosed(); + closeOldResultSet(); + sql = JdbcConnection.translateSQL(sql, escapeProcessing); + CommandInterface command = conn.prepareCommand(sql, fetchSize); + boolean lazy = false; + boolean returnsResultSet; + synchronized (session) { + setExecutingStatement(command); + try { + if (command.isQuery()) { + returnsResultSet = true; + boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY; + boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE; + ResultInterface result = command.executeQuery(maxRows, scrollable); + lazy = result.isLazy(); + resultSet = new JdbcResultSet(conn, this, command, result, id, scrollable, updatable, false); + } else { + returnsResultSet = false; + ResultWithGeneratedKeys result = command.executeUpdate(generatedKeysRequest); + updateCount = result.getUpdateCount(); + ResultInterface gk = result.getGeneratedKeys(); + if (gk != null) { + generatedKeys = new JdbcResultSet(conn, this, command, gk, id, true, false, false); } - } finally { + } + } finally { + if (!lazy) { setExecutingStatement(null); } } + } + if (!lazy) { command.close(); - return returnsResultSet; - } finally { - afterWriting(); } + return returnsResultSet; } /** @@ -218,16 +292,39 @@ public ResultSet getResultSet() throws SQLException { /** * Returns the last update count of this statement. * - * @return the update count (number of row affected by an insert, update or - * delete, or 0 if no rows or the statement was a create, drop, - * commit or rollback; -1 if the statement was a select). + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or -1 if + * statement was a query, or {@link #SUCCESS_NO_INFO} if number of + * rows is too large for the {@code int} data type) * @throws SQLException if this object is closed or invalid + * @see #getLargeUpdateCount() */ @Override - public int getUpdateCount() throws SQLException { + public final int getUpdateCount() throws SQLException { try { debugCodeCall("getUpdateCount"); checkClosed(); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Returns the last update count of this statement. + * + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or -1 if + * statement was a query) + * @throws SQLException if this object is closed or invalid + */ + @Override + public final long getLargeUpdateCount() throws SQLException { + try { + debugCodeCall("getLargeUpdateCount"); + checkClosed(); return updateCount; } catch (Exception e) { throw logAndConvert(e); @@ -243,17 +340,21 @@ public int getUpdateCount() throws SQLException { public void close() throws SQLException { try { debugCodeCall("close"); - synchronized (session) { - closeOldResultSet(); - if (conn != null) { - conn = null; - } - } + closeInternal(); } catch (Exception e) { throw logAndConvert(e); } } + private void closeInternal() { + synchronized (session) { + closeOldResultSet(); + if (conn != null) { + conn = null; + } + } + } + /** * Returns the connection that created this object. * @@ -357,6 +458,23 @@ public int getMaxRows() throws SQLException { try { debugCodeCall("getMaxRows"); checkClosed(); + return maxRows <= Integer.MAX_VALUE ? (int) maxRows : 0; + } catch (Exception e) { + throw logAndConvert(e); + } + } + + /** + * Gets the maximum number of rows for a ResultSet. + * + * @return the number of rows where 0 means no limit + * @throws SQLException if this object is closed + */ + @Override + public long getLargeMaxRows() throws SQLException { + try { + debugCodeCall("getLargeMaxRows"); + checkClosed(); return maxRows; } catch (Exception e) { throw logAndConvert(e); @@ -383,6 +501,26 @@ public void setMaxRows(int maxRows) throws SQLException { } } + /** + * Gets the maximum number of rows for a ResultSet. + * + * @param maxRows the number of rows where 0 means no limit + * @throws SQLException if this object is closed + */ + @Override + public void setLargeMaxRows(long maxRows) throws SQLException { + try { + debugCodeCall("setLargeMaxRows", maxRows); + checkClosed(); + if (maxRows < 0) { + throw DbException.getInvalidValueException("maxRows", maxRows); + } + this.maxRows = maxRows; + } catch (Exception e) { + throw logAndConvert(e); + } + } + /** * Sets the number of rows suggested to read in one step. * This value cannot be higher than the maximum rows (setMaxRows) @@ -506,7 +644,7 @@ public void setMaxFieldSize(int max) throws SQLException { public void setEscapeProcessing(boolean enable) throws SQLException { try { if (isDebugEnabled()) { - debugCode("setEscapeProcessing("+enable+");"); + debugCode("setEscapeProcessing(" + enable + ')'); } checkClosed(); escapeProcessing = enable; @@ -549,7 +687,7 @@ public void cancel() throws SQLException { * * @return true if yes */ - public boolean wasCancelled() { + public boolean isCancelled() { return cancelled; } @@ -612,7 +750,7 @@ public void addBatch(String sql) throws SQLException { checkClosed(); sql = JdbcConnection.translateSQL(sql, escapeProcessing); if (batchCommands == null) { - batchCommands = New.arrayList(); + batchCommands = Utils.newSmallArrayList(); } batchCommands.add(sql); } catch (Exception e) { @@ -639,69 +777,122 @@ public void clearBatch() throws SQLException { * If one of the batched statements fails, this database will continue. * * @return the array of update counts + * @see #executeLargeBatch() */ @Override public int[] executeBatch() throws SQLException { try { debugCodeCall("executeBatch"); - checkClosedForWrite(); - try { - if (batchCommands == null) { - // TODO batch: check what other database do if no commands - // are set - batchCommands = New.arrayList(); - } - int size = batchCommands.size(); - int[] result = new int[size]; - boolean error = false; - SQLException next = null; - for (int i = 0; i < size; i++) { - String sql = batchCommands.get(i); - try { - result[i] = executeUpdateInternal(sql); - } catch (Exception re) { - SQLException e = logAndConvert(re); - if (next == null) { - next = e; - } else { - e.setNextException(next); - next = e; - } - result[i] = Statement.EXECUTE_FAILED; - error = true; - } - } - batchCommands = null; - if (error) { - throw new JdbcBatchUpdateException(next, result); - } - return result; - } finally { - afterWriting(); + checkClosed(); + if (batchCommands == null) { + batchCommands = new ArrayList<>(); } + int size = batchCommands.size(); + int[] result = new int[size]; + SQLException exception = new SQLException(); + for (int i = 0; i < size; i++) { + long updateCount = executeBatchElement(batchCommands.get(i), exception); + result[i] = updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; + } + batchCommands = null; + exception = exception.getNextException(); + if (exception != null) { + throw new JdbcBatchUpdateException(exception, result); + } + return result; } catch (Exception e) { throw logAndConvert(e); } } /** - * Return a result set that contains the last generated auto-increment key - * for this connection, if there was one. If no key was generated by the - * last modification statement, then an empty result set is returned. - * The returned result set only contains the data for the very last row. + * Executes the batch. + * If one of the batched statements fails, this database will continue. * - * @return the result set with one row and one column containing the key + * @return the array of update counts + */ + @Override + public long[] executeLargeBatch() throws SQLException { + try { + debugCodeCall("executeLargeBatch"); + checkClosed(); + if (batchCommands == null) { + batchCommands = new ArrayList<>(); + } + int size = batchCommands.size(); + long[] result = new long[size]; + SQLException exception = new SQLException(); + for (int i = 0; i < size; i++) { + result[i] = executeBatchElement(batchCommands.get(i), exception); + } + batchCommands = null; + exception = exception.getNextException(); + if (exception != null) { + throw new JdbcBatchUpdateException(exception, result); + } + return result; + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private long executeBatchElement(String sql, SQLException exception) { + long updateCount; + try { + updateCount = executeUpdateInternal(sql, null); + } catch (Exception e) { + exception.setNextException(logAndConvert(e)); + updateCount = Statement.EXECUTE_FAILED; + } + return updateCount; + } + + /** + * Return a result set with generated keys from the latest executed command + * or an empty result set if keys were not generated or were not requested + * with {@link Statement#RETURN_GENERATED_KEYS}, column indexes, or column + * names. + *

    + * Generated keys are only returned from from {@code INSERT}, + * {@code UPDATE}, {@code MERGE INTO}, and {@code MERGE INTO ... USING} + * commands. + *

    + *

    + * If SQL command inserts or updates multiple rows with generated keys each + * such inserted or updated row is returned. Batch methods are also + * supported. + *

    + *

    + * When {@link Statement#RETURN_GENERATED_KEYS} is used H2 chooses columns + * to return automatically. The following columns are chosen: + *

    + *
      + *
    • Columns with sequences including {@code IDENTITY} columns and columns + * with {@code AUTO_INCREMENT}.
    • + *
    • Columns with other default values that are not evaluated into + * constant expressions (like {@code DEFAULT RANDOM_UUID()}).
    • + *
    • Columns that are included into the PRIMARY KEY constraint.
    • + *
    + *

    + * Exact required columns for the returning result set may be specified on + * execution of command with names or indexes of columns. + *

    + * + * @return the possibly empty result set with generated keys * @throws SQLException if this object is closed */ @Override public ResultSet getGeneratedKeys() throws SQLException { try { - int id = getNextId(TraceObject.RESULT_SET); + int id = generatedKeys != null ? generatedKeys.getTraceId() : getNextId(TraceObject.RESULT_SET); if (isDebugEnabled()) { debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "getGeneratedKeys()"); } checkClosed(); - return conn.getGeneratedKeys(this, id); + if (generatedKeys == null) { + generatedKeys = new JdbcResultSet(conn, this, null, new SimpleResult(), id, true, false, false); + } + return generatedKeys; } catch (Exception e) { throw logAndConvert(e); } @@ -760,138 +951,158 @@ public boolean getMoreResults(int current) throws SQLException { } /** - * Executes a statement and returns the update count. - * This method just calls executeUpdate(String sql) internally. - * The method getGeneratedKeys supports at most one columns and row. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement - * @param autoGeneratedKeys ignored - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @param autoGeneratedKeys + * {@link Statement#RETURN_GENERATED_KEYS} if generated keys should + * be available for retrieval, {@link Statement#NO_GENERATED_KEYS} if + * generated keys should not be available + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or + * {@link #SUCCESS_NO_INFO} if number of rows is too large for the + * {@code int} data type) * @throws SQLException if a database error occurred or a * select statement was executed + * @see #executeLargeUpdate(String, int) */ @Override - public int executeUpdate(String sql, int autoGeneratedKeys) + public final int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { try { if (isDebugEnabled()) { - debugCode("executeUpdate("+quote(sql)+", "+autoGeneratedKeys+");"); + debugCode("executeUpdate(" + quote(sql) + ", " + autoGeneratedKeys + ')'); } - return executeUpdateInternal(sql); + long updateCount = executeUpdateInternal(sql, autoGeneratedKeys == RETURN_GENERATED_KEYS); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } } /** - * Executes a statement and returns the update count. - * This method just calls executeUpdate(String sql) internally. - * The method getGeneratedKeys supports at most one columns and row. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement - * @param columnIndexes ignored - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @param autoGeneratedKeys + * {@link Statement#RETURN_GENERATED_KEYS} if generated keys should + * be available for retrieval, {@link Statement#NO_GENERATED_KEYS} if + * generated keys should not be available + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing) * @throws SQLException if a database error occurred or a * select statement was executed */ @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + public final long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { try { if (isDebugEnabled()) { - debugCode("executeUpdate("+quote(sql)+", "+quoteIntArray(columnIndexes)+");"); + debugCode("executeLargeUpdate(" + quote(sql) + ", " + autoGeneratedKeys + ')'); } - return executeUpdateInternal(sql); + return executeUpdateInternal(sql, autoGeneratedKeys == RETURN_GENERATED_KEYS); } catch (Exception e) { throw logAndConvert(e); } } /** - * Executes a statement and returns the update count. - * This method just calls executeUpdate(String sql) internally. - * The method getGeneratedKeys supports at most one columns and row. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement - * @param columnNames ignored - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @param columnIndexes + * an array of column indexes indicating the columns with generated + * keys that should be returned from the inserted row + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or + * {@link #SUCCESS_NO_INFO} if number of rows is too large for the + * {@code int} data type) * @throws SQLException if a database error occurred or a * select statement was executed + * @see #executeLargeUpdate(String, int[]) */ @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { + public final int executeUpdate(String sql, int[] columnIndexes) throws SQLException { try { if (isDebugEnabled()) { - debugCode("executeUpdate("+quote(sql)+", "+quoteArray(columnNames)+");"); + debugCode("executeUpdate(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ')'); } - return executeUpdateInternal(sql); + long updateCount = executeUpdateInternal(sql, columnIndexes); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } } /** - * Executes a statement and returns the update count. - * This method just calls execute(String sql) internally. - * The method getGeneratedKeys supports at most one columns and row. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement - * @param autoGeneratedKeys ignored - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @param columnIndexes + * an array of column indexes indicating the columns with generated + * keys that should be returned from the inserted row + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing) * @throws SQLException if a database error occurred or a * select statement was executed */ @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + public final long executeLargeUpdate(String sql, int columnIndexes[]) throws SQLException { try { if (isDebugEnabled()) { - debugCode("execute("+quote(sql)+", "+autoGeneratedKeys+");"); + debugCode("executeLargeUpdate(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ')'); } - return executeInternal(sql); + return executeUpdateInternal(sql, columnIndexes); } catch (Exception e) { throw logAndConvert(e); } } /** - * Executes a statement and returns the update count. - * This method just calls execute(String sql) internally. - * The method getGeneratedKeys supports at most one columns and row. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement - * @param columnIndexes ignored - * @return the update count (number of row affected by an insert, - * update or delete, or 0 if no rows or the statement was a - * create, drop, commit or rollback) + * @param columnNames + * an array of column names indicating the columns with generated + * keys that should be returned from the inserted row + * @return the update count (number of affected rows by a DML statement or + * other statement able to return number of rows, or 0 if no rows + * were affected or the statement returned nothing, or + * {@link #SUCCESS_NO_INFO} if number of rows is too large for the + * {@code int} data type) * @throws SQLException if a database error occurred or a * select statement was executed + * @see #executeLargeUpdate(String, String[]) */ @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { + public final int executeUpdate(String sql, String[] columnNames) throws SQLException { try { if (isDebugEnabled()) { - debugCode("execute("+quote(sql)+", "+quoteIntArray(columnIndexes)+");"); + debugCode("executeUpdate(" + quote(sql) + ", " + quoteArray(columnNames) + ')'); } - return executeInternal(sql); + long updateCount = executeUpdateInternal(sql, columnNames); + return updateCount <= Integer.MAX_VALUE ? (int) updateCount : SUCCESS_NO_INFO; } catch (Exception e) { throw logAndConvert(e); } } /** - * Executes a statement and returns the update count. - * This method just calls execute(String sql) internally. - * The method getGeneratedKeys supports at most one columns and row. + * Executes a statement and returns the update count. This method is not + * allowed for prepared statements. * * @param sql the SQL statement - * @param columnNames ignored + * @param columnNames + * an array of column names indicating the columns with generated + * keys that should be returned from the inserted row * @return the update count (number of row affected by an insert, * update or delete, or 0 if no rows or the statement was a * create, drop, commit or rollback) @@ -899,123 +1110,186 @@ public boolean execute(String sql, int[] columnIndexes) throws SQLException { * select statement was executed */ @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { + public final long executeLargeUpdate(String sql, String columnNames[]) throws SQLException { try { if (isDebugEnabled()) { - debugCode("execute("+quote(sql)+", "+quoteArray(columnNames)+");"); + debugCode("executeLargeUpdate(" + quote(sql) + ", " + quoteArray(columnNames) + ')'); } - return executeInternal(sql); + return executeUpdateInternal(sql, columnNames); } catch (Exception e) { throw logAndConvert(e); } } /** - * Gets the result set holdability. + * Executes a statement and returns type of its result. This method is not + * allowed for prepared statements. * - * @return the holdability + * @param sql the SQL statement + * @param autoGeneratedKeys + * {@link Statement#RETURN_GENERATED_KEYS} if generated keys should + * be available for retrieval, {@link Statement#NO_GENERATED_KEYS} if + * generated keys should not be available + * @return true if result is a result set, false otherwise + * @throws SQLException if a database error occurred or a + * select statement was executed */ @Override - public int getResultSetHoldability() throws SQLException { + public final boolean execute(String sql, int autoGeneratedKeys) throws SQLException { try { - debugCodeCall("getResultSetHoldability"); - checkClosed(); - return ResultSet.HOLD_CURSORS_OVER_COMMIT; + if (isDebugEnabled()) { + debugCode("execute(" + quote(sql) + ", " + autoGeneratedKeys + ')'); + } + return executeInternal(sql, autoGeneratedKeys == RETURN_GENERATED_KEYS); } catch (Exception e) { throw logAndConvert(e); } } /** - * [Not supported] + * Executes a statement and returns type of its result. This method is not + * allowed for prepared statements. + * + * @param sql the SQL statement + * @param columnIndexes + * an array of column indexes indicating the columns with generated + * keys that should be returned from the inserted row + * @return true if result is a result set, false otherwise + * @throws SQLException if a database error occurred or a + * select statement was executed */ -/*## Java 1.7 ## @Override - public void closeOnCompletion() { - // not supported + public final boolean execute(String sql, int[] columnIndexes) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("execute(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ')'); + } + return executeInternal(sql, columnIndexes); + } catch (Exception e) { + throw logAndConvert(e); + } } -//*/ /** - * [Not supported] + * Executes a statement and returns type of its result. This method is not + * allowed for prepared statements. + * + * @param sql the SQL statement + * @param columnNames + * an array of column names indicating the columns with generated + * keys that should be returned from the inserted row + * @return true if result is a result set, false otherwise + * @throws SQLException if a database error occurred or a + * select statement was executed */ -/*## Java 1.7 ## @Override - public boolean isCloseOnCompletion() { - return true; + public final boolean execute(String sql, String[] columnNames) throws SQLException { + try { + if (isDebugEnabled()) { + debugCode("execute(" + quote(sql) + ", " + quoteArray(columnNames) + ')'); + } + return executeInternal(sql, columnNames); + } catch (Exception e) { + throw logAndConvert(e); + } } -//*/ - - // ============================================================= /** - * Check if this connection is closed. - * The next operation is a read request. + * Gets the result set holdability. * - * @return true if the session was re-connected - * @throws DbException if the connection or session is closed + * @return the holdability */ - boolean checkClosed() { - return checkClosed(false); + @Override + public int getResultSetHoldability() throws SQLException { + try { + debugCodeCall("getResultSetHoldability"); + checkClosed(); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * Check if this connection is closed. - * The next operation may be a write request. + * Specifies that this statement will be closed when its dependent result + * set is closed. * - * @return true if the session was re-connected - * @throws DbException if the connection or session is closed + * @throws SQLException + * if this statement is closed */ - boolean checkClosedForWrite() { - return checkClosed(true); + @Override + public void closeOnCompletion() throws SQLException { + try { + debugCodeCall("closeOnCompletion"); + checkClosed(); + closeOnCompletion = true; + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * INTERNAL. - * Check if the statement is closed. + * Returns whether this statement will be closed when its dependent result + * set is closed. * - * @param write if the next operation is possibly writing - * @return true if a reconnect was required - * @throws DbException if it is closed + * @return {@code true} if this statement will be closed when its dependent + * result set is closed + * @throws SQLException + * if this statement is closed */ - protected boolean checkClosed(boolean write) { - if (conn == null) { - throw DbException.get(ErrorCode.OBJECT_CLOSED); + @Override + public boolean isCloseOnCompletion() throws SQLException { + try { + debugCodeCall("isCloseOnCompletion"); + checkClosed(); + return closeOnCompletion; + } catch (Exception e) { + throw logAndConvert(e); } - conn.checkClosed(write); - SessionInterface s = conn.getSession(); - if (s != session) { - session = s; - trace = session.getTrace(); - return true; + } + + void closeIfCloseOnCompletion() { + if (closeOnCompletion) { + try { + closeInternal(); + } catch (Exception e) { + // Don't re-throw + logAndConvert(e); + } } - return false; } + // ============================================================= + /** - * Called after each write operation. + * Check if this connection is closed. + * + * @throws DbException if the connection or session is closed */ - void afterWriting() { - if (conn != null) { - conn.afterWriting(); + void checkClosed() { + if (conn == null) { + throw DbException.get(ErrorCode.OBJECT_CLOSED); } + conn.checkClosed(); } /** * INTERNAL. * Close and old result set if there is still one open. */ - protected void closeOldResultSet() throws SQLException { + protected void closeOldResultSet() { try { - if (!closedByResultSet) { - if (resultSet != null) { - resultSet.closeInternal(); - } + if (resultSet != null) { + resultSet.closeInternal(true); + } + if (generatedKeys != null) { + generatedKeys.closeInternal(true); } } finally { cancelled = false; resultSet = null; updateCount = -1; + generatedKeys = null; } } @@ -1025,22 +1299,27 @@ protected void closeOldResultSet() throws SQLException { * * @param c the command */ - protected void setExecutingStatement(CommandInterface c) { + void setExecutingStatement(CommandInterface c) { if (c == null) { conn.setExecutingStatement(null); } else { conn.setExecutingStatement(this); - lastExecutedCommandType = c.getCommandType(); } executingCommand = c; } /** - * INTERNAL. - * Get the command type of the last executed command. + * Called when the result set is closed. + * + * @param command the command + * @param closeCommand whether to close the command */ - public int getLastExecutedCommandType() { - return lastExecutedCommandType; + void onLazyResultSetClose(CommandInterface command, boolean closeCommand) { + setExecutingStatement(null); + command.stop(); + if (closeCommand) { + command.close(); + } } /** @@ -1067,10 +1346,14 @@ public boolean isClosed() throws SQLException { @Override @SuppressWarnings("unchecked") public T unwrap(Class iface) throws SQLException { - if (isWrapperFor(iface)) { - return (T) this; + try { + if (isWrapperFor(iface)) { + return (T) this; + } + throw DbException.getInvalidValueException("iface", iface); + } catch (Exception e) { + throw logAndConvert(e); } - throw DbException.getInvalidValueException("iface", iface); } /** @@ -1103,8 +1386,78 @@ public boolean isPoolable() { @Override public void setPoolable(boolean poolable) { if (isDebugEnabled()) { - debugCode("setPoolable("+poolable+");"); + debugCode("setPoolable(" + poolable + ')'); + } + } + + /** + * @param identifier + * identifier to quote if required, may be quoted or unquoted + * @param alwaysQuote + * if {@code true} identifier will be quoted unconditionally + * @return specified identifier quoted if required, explicitly requested, or + * if it was already quoted + * @throws NullPointerException + * if identifier is {@code null} + * @throws SQLException + * if identifier is not a valid identifier + */ + @Override + public String enquoteIdentifier(String identifier, boolean alwaysQuote) throws SQLException { + if (isSimpleIdentifier(identifier)) { + return alwaysQuote ? '"' + identifier + '"': identifier; + } + try { + int length = identifier.length(); + if (length > 0) { + if (identifier.charAt(0) == '"') { + checkQuotes(identifier, 1, length); + return identifier; + } else if (identifier.startsWith("U&\"") || identifier.startsWith("u&\"")) { + // Check validity of double quotes + checkQuotes(identifier, 3, length); + // Check validity of escape sequences + StringUtils.decodeUnicodeStringSQL(identifier, '\\'); + return identifier; + } + } + return StringUtils.quoteIdentifier(identifier); + } catch (Exception e) { + throw logAndConvert(e); + } + } + + private static void checkQuotes(String identifier, int offset, int length) { + boolean quoted = true; + for (int i = offset; i < length; i++) { + if (identifier.charAt(i) == '"') { + quoted = !quoted; + } else if (!quoted) { + throw DbException.get(ErrorCode.INVALID_NAME_1, identifier); + } + } + if (quoted) { + throw DbException.get(ErrorCode.INVALID_NAME_1, identifier); + } + } + + /** + * @param identifier + * identifier to check + * @return is specified identifier may be used without quotes + * @throws NullPointerException + * if identifier is {@code null} + */ + @Override + public boolean isSimpleIdentifier(String identifier) throws SQLException { + Session.StaticSettings settings; + try { + checkClosed(); + settings = conn.getStaticSettings(); + } catch (Exception e) { + throw logAndConvert(e); } + return ParserUtil.isSimpleIdentifier(identifier, settings.databaseToUpper, settings.databaseToLower); } /** diff --git a/h2/src/main/org/h2/jdbc/JdbcStatementBackwardsCompat.java b/h2/src/main/org/h2/jdbc/JdbcStatementBackwardsCompat.java new file mode 100644 index 0000000000..5406337da0 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/JdbcStatementBackwardsCompat.java @@ -0,0 +1,41 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc; + +import java.sql.SQLException; + +/** + * Allows us to compile on older platforms, while still implementing the methods + * from the newer JDBC API. + */ +public interface JdbcStatementBackwardsCompat { + + // compatibility interface + + // JDBC 4.3 (incomplete) + + /** + * Enquotes the specified identifier. + * + * @param identifier + * identifier to quote if required + * @param alwaysQuote + * if {@code true} identifier will be quoted unconditionally + * @return specified identifier quoted if required or explicitly requested + * @throws SQLException on failure + */ + String enquoteIdentifier(String identifier, boolean alwaysQuote) throws SQLException; + + /** + * Checks if specified identifier may be used without quotes. + * + * @param identifier + * identifier to check + * @return is specified identifier may be used without quotes + * @throws SQLException on failure + */ + boolean isSimpleIdentifier(String identifier) throws SQLException; +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMeta.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMeta.java new file mode 100644 index 0000000000..0b7da247eb --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMeta.java @@ -0,0 +1,395 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.ResultInterface; + +/** + * Database meta information. + */ +public abstract class DatabaseMeta { + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#nullsAreSortedHigh() + * @see java.sql.DatabaseMetaData#nullsAreSortedLow() + * @see java.sql.DatabaseMetaData#nullsAreSortedAtStart() + * @see java.sql.DatabaseMetaData#nullsAreSortedAtEnd() + * @return DefaultNullOrdering + */ + public abstract DefaultNullOrdering defaultNullOrdering(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getDatabaseProductVersion() + * @return product version as String + */ + public abstract String getDatabaseProductVersion(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getSQLKeywords() + * @return list of supported SQL keywords + */ + public abstract String getSQLKeywords(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getNumericFunctions() + * @return list of supported numeric functions + */ + public abstract String getNumericFunctions(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getStringFunctions() + * @return list of supported string functions + */ + public abstract String getStringFunctions(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getSystemFunctions() + * @return list of supported system functions + */ + public abstract String getSystemFunctions(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getTimeDateFunctions() + * @return list of supported time/date functions + */ + public abstract String getTimeDateFunctions(); + + /** + * INTERNAL + * + * @see java.sql.DatabaseMetaData#getSearchStringEscape() + * @return search string escape sequence + */ + public abstract String getSearchStringEscape(); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param procedureNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getProcedures(String, String, String) + */ + public abstract ResultInterface getProcedures(String catalog, String schemaPattern, String procedureNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param procedureNamePattern "LIKE" style pattern to filter result + * @param columnNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getProcedureColumns(String, String, + * String, String) + */ + public abstract ResultInterface getProcedureColumns(String catalog, String schemaPattern, + String procedureNamePattern, String columnNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableNamePattern "LIKE" style pattern to filter result + * @param types String[] + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getTables(String, String, String, + * String[]) + */ + public abstract ResultInterface getTables(String catalog, String schemaPattern, String tableNamePattern, + String[] types); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getSchemas() + */ + public abstract ResultInterface getSchemas(); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getCatalogs() + */ + public abstract ResultInterface getCatalogs(); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getTableTypes() + */ + public abstract ResultInterface getTableTypes(); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableNamePattern "LIKE" style pattern to filter result + * @param columnNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getColumns(String, String, String, String) + */ + public abstract ResultInterface getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @param columnNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getColumnPrivileges(String, String, + * String, String) + */ + public abstract ResultInterface getColumnPrivileges(String catalog, String schema, String table, + String columnNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getTablePrivileges(String, String, String) + */ + public abstract ResultInterface getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern); + + /** + * INTERNAL + * @param catalogPattern "LIKE" style pattern to filter result + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableName table of interest + * @param scope of interest + * @param nullable include nullable columns + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getBestRowIdentifier(String, String, + * String, int, boolean) + */ + public abstract ResultInterface getBestRowIdentifier(String catalogPattern, String schemaPattern, String tableName, + int scope, boolean nullable); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getVersionColumns(String, String, String) + */ + public abstract ResultInterface getVersionColumns(String catalog, String schema, String table); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getPrimaryKeys(String, String, String) + */ + public abstract ResultInterface getPrimaryKeys(String catalog, String schema, String table); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getImportedKeys(String, String, String) + */ + public abstract ResultInterface getImportedKeys(String catalog, String schema, String table); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getExportedKeys(String, String, String) + */ + public abstract ResultInterface getExportedKeys(String catalog, String schema, String table); + + /** + * INTERNAL + * @param primaryCatalog to inspect + * @param primarySchema to inspect + * @param primaryTable to inspect + * @param foreignCatalog to inspect + * @param foreignSchema to inspect + * @param foreignTable to inspect + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getCrossReference(String, String, String, + * String, String, String) + */ + public abstract ResultInterface getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getTypeInfo() + */ + public abstract ResultInterface getTypeInfo(); + + /** + * INTERNAL + * @param catalog to inspect + * @param schema to inspect + * @param table to inspect + * @param unique only + * @param approximate allowed + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getIndexInfo(String, String, String, + * boolean, boolean) + */ + public abstract ResultInterface getIndexInfo(String catalog, String schema, String table, boolean unique, + boolean approximate); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param typeNamePattern "LIKE" style pattern to filter result + * @param types int[] + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getUDTs(String, String, String, int[]) + */ + public abstract ResultInterface getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param typeNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getSuperTypes(String, String, String) + */ + public abstract ResultInterface getSuperTypes(String catalog, String schemaPattern, String typeNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getSuperTables(String, String, String) + */ + public abstract ResultInterface getSuperTables(String catalog, String schemaPattern, String tableNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param typeNamePattern "LIKE" style pattern to filter result + * @param attributeNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getAttributes(String, String, String, + * String) + */ + public abstract ResultInterface getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getDatabaseMajorVersion() + */ + public abstract int getDatabaseMajorVersion(); + + /** + * INTERNAL + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getDatabaseMinorVersion() + */ + public abstract int getDatabaseMinorVersion(); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getSchemas(String, String) + */ + public abstract ResultInterface getSchemas(String catalog, String schemaPattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param functionNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getFunctions(String, String, String) + */ + public abstract ResultInterface getFunctions(String catalog, String schemaPattern, String functionNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param functionNamePattern "LIKE" style pattern to filter result + * @param columnNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getFunctionColumns(String, String, String, + * String) + */ + public abstract ResultInterface getFunctionColumns(String catalog, String schemaPattern, // + String functionNamePattern, String columnNamePattern); + + /** + * INTERNAL + * @param catalog to inspect + * @param schemaPattern "LIKE" style pattern to filter result + * @param tableNamePattern "LIKE" style pattern to filter result + * @param columnNamePattern "LIKE" style pattern to filter result + * @return ResultInterface + * + * @see java.sql.DatabaseMetaData#getPseudoColumns(String, String, String, + * String) + */ + public abstract ResultInterface getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern); + +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLegacy.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLegacy.java new file mode 100644 index 0000000000..c33ff10c3c --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLegacy.java @@ -0,0 +1,691 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import java.sql.DatabaseMetaData; +import java.util.ArrayList; +import java.util.Arrays; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandInterface; +import org.h2.engine.Constants; +import org.h2.engine.Session; +import org.h2.expression.ParameterInterface; +import org.h2.message.DbException; +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.ResultInterface; +import org.h2.util.StringUtils; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Legacy implementation of database meta information. + */ +public final class DatabaseMetaLegacy extends DatabaseMetaLocalBase { + + private static final Value PERCENT = ValueVarchar.get("%"); + + private static final Value BACKSLASH = ValueVarchar.get("\\"); + + private static final Value YES = ValueVarchar.get("YES"); + + private static final Value NO = ValueVarchar.get("NO"); + + private static final Value SCHEMA_MAIN = ValueVarchar.get(Constants.SCHEMA_MAIN); + + private final Session session; + + public DatabaseMetaLegacy(Session session) { + this.session = session; + } + + @Override + public final DefaultNullOrdering defaultNullOrdering() { + return DefaultNullOrdering.LOW; + } + + @Override + public String getSQLKeywords() { + return "CURRENT_CATALOG," // + + "CURRENT_SCHEMA," // + + "GROUPS," // + + "IF,ILIKE,INTERSECTS," // + + "KEY," // + + "LIMIT," // + + "MINUS," // + + "OFFSET," // + + "QUALIFY," // + + "REGEXP,ROWNUM," // + + "SYSDATE,SYSTIME,SYSTIMESTAMP," // + + "TODAY,TOP,"// + + "_ROWID_"; + } + + @Override + public String getNumericFunctions() { + return getFunctions("Functions (Numeric)"); + } + + @Override + public String getStringFunctions() { + return getFunctions("Functions (String)"); + } + + @Override + public String getSystemFunctions() { + return getFunctions("Functions (System)"); + } + + @Override + public String getTimeDateFunctions() { + return getFunctions("Functions (Time and Date)"); + } + + private String getFunctions(String section) { + String sql = "SELECT TOPIC FROM INFORMATION_SCHEMA.HELP WHERE SECTION = ?"; + Value[] args = new Value[] { getString(section) }; + ResultInterface result = executeQuery(sql, args); + StringBuilder builder = new StringBuilder(); + while (result.next()) { + String s = result.currentRow()[0].getString().trim(); + String[] array = StringUtils.arraySplit(s, ',', true); + for (String a : array) { + if (builder.length() != 0) { + builder.append(','); + } + String f = a.trim(); + int spaceIndex = f.indexOf(' '); + if (spaceIndex >= 0) { + // remove 'Function' from 'INSERT Function' + StringUtils.trimSubstring(builder, f, 0, spaceIndex); + } else { + builder.append(f); + } + } + } + return builder.toString(); + } + + @Override + public String getSearchStringEscape() { + return "\\"; + } + + @Override + public ResultInterface getProcedures(String catalog, String schemaPattern, String procedureNamePattern) { + return executeQuery("SELECT " // + + "ALIAS_CATALOG PROCEDURE_CAT, " // + + "ALIAS_SCHEMA PROCEDURE_SCHEM, " // + + "ALIAS_NAME PROCEDURE_NAME, " // + + "COLUMN_COUNT NUM_INPUT_PARAMS, " // + + "ZERO() NUM_OUTPUT_PARAMS, " // + + "ZERO() NUM_RESULT_SETS, " // + + "REMARKS, " // + + "RETURNS_RESULT PROCEDURE_TYPE, " // + + "ALIAS_NAME SPECIFIC_NAME " // + + "FROM INFORMATION_SCHEMA.FUNCTION_ALIASES " // + + "WHERE ALIAS_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND ALIAS_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND ALIAS_NAME LIKE ?3 ESCAPE ?4 " // + + "ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, NUM_INPUT_PARAMS", // + getCatalogPattern(catalog), // + getSchemaPattern(schemaPattern), // + getPattern(procedureNamePattern), // + BACKSLASH); + } + + @Override + public ResultInterface getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, + String columnNamePattern) { + return executeQuery("SELECT " // + + "ALIAS_CATALOG PROCEDURE_CAT, " // + + "ALIAS_SCHEMA PROCEDURE_SCHEM, " // + + "ALIAS_NAME PROCEDURE_NAME, " // + + "COLUMN_NAME, " // + + "COLUMN_TYPE, " // + + "DATA_TYPE, " // + + "TYPE_NAME, " // + + "PRECISION, " // + + "PRECISION LENGTH, " // + + "SCALE, " // + + "RADIX, " // + + "NULLABLE, " // + + "REMARKS, " // + + "COLUMN_DEFAULT COLUMN_DEF, " // + + "ZERO() SQL_DATA_TYPE, " // + + "ZERO() SQL_DATETIME_SUB, " // + + "ZERO() CHAR_OCTET_LENGTH, " // + + "POS ORDINAL_POSITION, " // + + "?1 IS_NULLABLE, " // + + "ALIAS_NAME SPECIFIC_NAME " // + + "FROM INFORMATION_SCHEMA.FUNCTION_COLUMNS " // + + "WHERE ALIAS_CATALOG LIKE ?2 ESCAPE ?6 " // + + "AND ALIAS_SCHEMA LIKE ?3 ESCAPE ?6 " // + + "AND ALIAS_NAME LIKE ?4 ESCAPE ?6 " // + + "AND COLUMN_NAME LIKE ?5 ESCAPE ?6 " // + + "ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, ORDINAL_POSITION", // + YES, // + getCatalogPattern(catalog), // + getSchemaPattern(schemaPattern), // + getPattern(procedureNamePattern), // + getPattern(columnNamePattern), // + BACKSLASH); + } + + @Override + public ResultInterface getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) { + int typesLength = types != null ? types.length : 0; + boolean includeSynonyms = types == null || Arrays.asList(types).contains("SYNONYM"); + // (1024 - 16) is enough for the most cases + StringBuilder select = new StringBuilder(1008); + if (includeSynonyms) { + select.append("SELECT " // + + "TABLE_CAT, " // + + "TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "TABLE_TYPE, " // + + "REMARKS, " // + + "TYPE_CAT, " // + + "TYPE_SCHEM, " // + + "TYPE_NAME, " // + + "SELF_REFERENCING_COL_NAME, " // + + "REF_GENERATION, " // + + "SQL " // + + "FROM (" // + + "SELECT " // + + "SYNONYM_CATALOG TABLE_CAT, " // + + "SYNONYM_SCHEMA TABLE_SCHEM, " // + + "SYNONYM_NAME as TABLE_NAME, " // + + "TYPE_NAME AS TABLE_TYPE, " // + + "REMARKS, " // + + "TYPE_NAME TYPE_CAT, " // + + "TYPE_NAME TYPE_SCHEM, " // + + "TYPE_NAME AS TYPE_NAME, " // + + "TYPE_NAME SELF_REFERENCING_COL_NAME, " // + + "TYPE_NAME REF_GENERATION, " // + + "NULL AS SQL " // + + "FROM INFORMATION_SCHEMA.SYNONYMS " // + + "WHERE SYNONYM_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND SYNONYM_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND SYNONYM_NAME LIKE ?3 ESCAPE ?4 " // + + "UNION "); + } + select.append("SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "TABLE_TYPE, " // + + "REMARKS, " // + + "TYPE_NAME TYPE_CAT, " // + + "TYPE_NAME TYPE_SCHEM, " // + + "TYPE_NAME, " // + + "TYPE_NAME SELF_REFERENCING_COL_NAME, " // + + "TYPE_NAME REF_GENERATION, " // + + "SQL " // + + "FROM INFORMATION_SCHEMA.TABLES " // + + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND TABLE_NAME LIKE ?3 ESCAPE ?4"); + if (typesLength > 0) { + select.append(" AND TABLE_TYPE IN("); + for (int i = 0; i < typesLength; i++) { + if (i > 0) { + select.append(", "); + } + select.append('?').append(i + 5); + } + select.append(')'); + } + if (includeSynonyms) { + select.append(')'); + } + Value[] args = new Value[typesLength + 4]; + args[0] = getCatalogPattern(catalog); + args[1] = getSchemaPattern(schemaPattern); + args[2] = getPattern(tableNamePattern); + args[3] = BACKSLASH; + for (int i = 0; i < typesLength; i++) { + args[i + 4] = getString(types[i]); + } + return executeQuery(select.append(" ORDER BY TABLE_TYPE, TABLE_SCHEM, TABLE_NAME").toString(), args); + } + + @Override + public ResultInterface getSchemas() { + return executeQuery("SELECT " // + + "SCHEMA_NAME TABLE_SCHEM, " // + + "CATALOG_NAME TABLE_CATALOG " // + + "FROM INFORMATION_SCHEMA.SCHEMATA " // + + "ORDER BY SCHEMA_NAME"); + } + + @Override + public ResultInterface getCatalogs() { + return executeQuery("SELECT CATALOG_NAME TABLE_CAT " // + + "FROM INFORMATION_SCHEMA.CATALOGS"); + } + + @Override + public ResultInterface getTableTypes() { + return executeQuery("SELECT " // + + "TYPE TABLE_TYPE " // + + "FROM INFORMATION_SCHEMA.TABLE_TYPES " // + + "ORDER BY TABLE_TYPE"); + } + + @Override + public ResultInterface getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + return executeQuery("SELECT " // + + "TABLE_CAT, " // + + "TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "COLUMN_NAME, " // + + "DATA_TYPE, " // + + "TYPE_NAME, " // + + "COLUMN_SIZE, " // + + "BUFFER_LENGTH, " // + + "DECIMAL_DIGITS, " // + + "NUM_PREC_RADIX, " // + + "NULLABLE, " // + + "REMARKS, " // + + "COLUMN_DEF, " // + + "SQL_DATA_TYPE, " // + + "SQL_DATETIME_SUB, " // + + "CHAR_OCTET_LENGTH, " // + + "ORDINAL_POSITION, " // + + "IS_NULLABLE, " // + + "SCOPE_CATALOG, " // + + "SCOPE_SCHEMA, " // + + "SCOPE_TABLE, " // + + "SOURCE_DATA_TYPE, " // + + "IS_AUTOINCREMENT, " // + + "IS_GENERATEDCOLUMN " // + + "FROM (" // + + "SELECT " // + + "s.SYNONYM_CATALOG TABLE_CAT, " // + + "s.SYNONYM_SCHEMA TABLE_SCHEM, " // + + "s.SYNONYM_NAME TABLE_NAME, " // + + "c.COLUMN_NAME, " // + + "c.DATA_TYPE, " // + + "c.TYPE_NAME, " // + + "c.CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " // + + "c.CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " // + + "c.NUMERIC_SCALE DECIMAL_DIGITS, " // + + "c.NUMERIC_PRECISION_RADIX NUM_PREC_RADIX, " // + + "c.NULLABLE, " // + + "c.REMARKS, " // + + "c.COLUMN_DEFAULT COLUMN_DEF, " // + + "c.DATA_TYPE SQL_DATA_TYPE, " // + + "ZERO() SQL_DATETIME_SUB, " // + + "c.CHARACTER_OCTET_LENGTH CHAR_OCTET_LENGTH, " // + + "c.ORDINAL_POSITION, " // + + "c.IS_NULLABLE IS_NULLABLE, " // + + "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATALOG, " // + + "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_SCHEMA, " // + + "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_TABLE, " // + + "c.SOURCE_DATA_TYPE, " // + + "CASE WHEN c.SEQUENCE_NAME IS NULL THEN " // + + "CAST(?1 AS VARCHAR) ELSE CAST(?2 AS VARCHAR) END IS_AUTOINCREMENT, " // + + "CASE WHEN c.IS_COMPUTED THEN " // + + "CAST(?2 AS VARCHAR) ELSE CAST(?1 AS VARCHAR) END IS_GENERATEDCOLUMN " // + + "FROM INFORMATION_SCHEMA.COLUMNS c JOIN INFORMATION_SCHEMA.SYNONYMS s ON " // + + "s.SYNONYM_FOR = c.TABLE_NAME " // + + "AND s.SYNONYM_FOR_SCHEMA = c.TABLE_SCHEMA " // + + "WHERE s.SYNONYM_CATALOG LIKE ?3 ESCAPE ?7 " // + + "AND s.SYNONYM_SCHEMA LIKE ?4 ESCAPE ?7 " // + + "AND s.SYNONYM_NAME LIKE ?5 ESCAPE ?7 " // + + "AND c.COLUMN_NAME LIKE ?6 ESCAPE ?7 " // + + "UNION SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "COLUMN_NAME, " // + + "DATA_TYPE, " // + + "TYPE_NAME, " // + + "CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " // + + "CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " // + + "NUMERIC_SCALE DECIMAL_DIGITS, " // + + "NUMERIC_PRECISION_RADIX NUM_PREC_RADIX, " // + + "NULLABLE, " // + + "REMARKS, " // + + "COLUMN_DEFAULT COLUMN_DEF, " // + + "DATA_TYPE SQL_DATA_TYPE, " // + + "ZERO() SQL_DATETIME_SUB, " // + + "CHARACTER_OCTET_LENGTH CHAR_OCTET_LENGTH, " // + + "ORDINAL_POSITION, " // + + "IS_NULLABLE IS_NULLABLE, " // + + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATALOG, " // + + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_SCHEMA, " // + + "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_TABLE, " // + + "SOURCE_DATA_TYPE, " // + + "CASE WHEN SEQUENCE_NAME IS NULL THEN " // + + "CAST(?1 AS VARCHAR) ELSE CAST(?2 AS VARCHAR) END IS_AUTOINCREMENT, " // + + "CASE WHEN IS_COMPUTED THEN " // + + "CAST(?2 AS VARCHAR) ELSE CAST(?1 AS VARCHAR) END IS_GENERATEDCOLUMN " // + + "FROM INFORMATION_SCHEMA.COLUMNS " // + + "WHERE TABLE_CATALOG LIKE ?3 ESCAPE ?7 " // + + "AND TABLE_SCHEMA LIKE ?4 ESCAPE ?7 " // + + "AND TABLE_NAME LIKE ?5 ESCAPE ?7 " // + + "AND COLUMN_NAME LIKE ?6 ESCAPE ?7) " // + + "ORDER BY TABLE_SCHEM, TABLE_NAME, ORDINAL_POSITION", // + NO, // + YES, // + getCatalogPattern(catalog), // + getSchemaPattern(schemaPattern), // + getPattern(tableNamePattern), // + getPattern(columnNamePattern), // + BACKSLASH); + } + + @Override + public ResultInterface getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) { + return executeQuery("SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "COLUMN_NAME, " // + + "GRANTOR, " // + + "GRANTEE, " // + + "PRIVILEGE_TYPE PRIVILEGE, " // + + "IS_GRANTABLE " // + + "FROM INFORMATION_SCHEMA.COLUMN_PRIVILEGES " // + + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?5 " // + + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?5 " // + + "AND TABLE_NAME = ?3 " // + + "AND COLUMN_NAME LIKE ?4 ESCAPE ?5 " // + + "ORDER BY COLUMN_NAME, PRIVILEGE", // + getCatalogPattern(catalog), // + getSchemaPattern(schema), // + getString(table), // + getPattern(columnNamePattern), // + BACKSLASH); + } + + @Override + public ResultInterface getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) { + return executeQuery("SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "GRANTOR, " // + + "GRANTEE, " // + + "PRIVILEGE_TYPE PRIVILEGE, " // + + "IS_GRANTABLE " // + + "FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES " // + + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND TABLE_NAME LIKE ?3 ESCAPE ?4 " // + + "ORDER BY TABLE_SCHEM, TABLE_NAME, PRIVILEGE", // + getCatalogPattern(catalog), // + getSchemaPattern(schemaPattern), // + getPattern(tableNamePattern), // + BACKSLASH); + } + + @Override + public ResultInterface getBestRowIdentifier(String catalogPattern, String schemaPattern, String tableName, + int scope, boolean nullable) { + return executeQuery("SELECT " // + + "CAST(?1 AS SMALLINT) SCOPE, " // + + "C.COLUMN_NAME, " // + + "C.DATA_TYPE, " // + + "C.TYPE_NAME, " // + + "C.CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, " // + + "C.CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, " // + + "CAST(C.NUMERIC_SCALE AS SMALLINT) DECIMAL_DIGITS, " // + + "CAST(?2 AS SMALLINT) PSEUDO_COLUMN " // + + "FROM INFORMATION_SCHEMA.INDEXES I, " // + + "INFORMATION_SCHEMA.COLUMNS C " // + + "WHERE C.TABLE_NAME = I.TABLE_NAME " // + + "AND C.COLUMN_NAME = I.COLUMN_NAME " // + + "AND C.TABLE_CATALOG LIKE ?3 ESCAPE ?6 " // + + "AND C.TABLE_SCHEMA LIKE ?4 ESCAPE ?6 " // + + "AND C.TABLE_NAME = ?5 " // + + "AND I.PRIMARY_KEY = TRUE " // + + "ORDER BY SCOPE", // + // SCOPE + ValueInteger.get(DatabaseMetaData.bestRowSession), // + // PSEUDO_COLUMN + ValueInteger.get(DatabaseMetaData.bestRowNotPseudo), // + getCatalogPattern(catalogPattern), // + getSchemaPattern(schemaPattern), // + getString(tableName), // + BACKSLASH); + } + + @Override + public ResultInterface getPrimaryKeys(String catalog, String schema, String table) { + return executeQuery("SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "COLUMN_NAME, " // + + "ORDINAL_POSITION KEY_SEQ, " // + + "COALESCE(CONSTRAINT_NAME, INDEX_NAME) PK_NAME " // + + "FROM INFORMATION_SCHEMA.INDEXES " // + + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND TABLE_NAME = ?3 " // + + "AND PRIMARY_KEY = TRUE " // + + "ORDER BY COLUMN_NAME", // + getCatalogPattern(catalog), // + getSchemaPattern(schema), // + getString(table), // + BACKSLASH); + } + + @Override + public ResultInterface getImportedKeys(String catalog, String schema, String table) { + return executeQuery("SELECT " // + + "PKTABLE_CATALOG PKTABLE_CAT, " // + + "PKTABLE_SCHEMA PKTABLE_SCHEM, " // + + "PKTABLE_NAME PKTABLE_NAME, " // + + "PKCOLUMN_NAME, " // + + "FKTABLE_CATALOG FKTABLE_CAT, " // + + "FKTABLE_SCHEMA FKTABLE_SCHEM, " // + + "FKTABLE_NAME, " // + + "FKCOLUMN_NAME, " // + + "ORDINAL_POSITION KEY_SEQ, " // + + "UPDATE_RULE, " // + + "DELETE_RULE, " // + + "FK_NAME, " // + + "PK_NAME, " // + + "DEFERRABILITY " // + + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " // + + "WHERE FKTABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND FKTABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND FKTABLE_NAME = ?3 " // + + "ORDER BY PKTABLE_CAT, PKTABLE_SCHEM, PKTABLE_NAME, FK_NAME, KEY_SEQ", // + getCatalogPattern(catalog), // + getSchemaPattern(schema), // + getString(table), // + BACKSLASH); + } + + @Override + public ResultInterface getExportedKeys(String catalog, String schema, String table) { + return executeQuery("SELECT " // + + "PKTABLE_CATALOG PKTABLE_CAT, " // + + "PKTABLE_SCHEMA PKTABLE_SCHEM, " // + + "PKTABLE_NAME PKTABLE_NAME, " // + + "PKCOLUMN_NAME, " // + + "FKTABLE_CATALOG FKTABLE_CAT, " // + + "FKTABLE_SCHEMA FKTABLE_SCHEM, " // + + "FKTABLE_NAME, " // + + "FKCOLUMN_NAME, " // + + "ORDINAL_POSITION KEY_SEQ, " // + + "UPDATE_RULE, " // + + "DELETE_RULE, " // + + "FK_NAME, " // + + "PK_NAME, " // + + "DEFERRABILITY " // + + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " // + + "WHERE PKTABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND PKTABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND PKTABLE_NAME = ?3 " // + + "ORDER BY FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, KEY_SEQ", // + getCatalogPattern(catalog), // + getSchemaPattern(schema), // + getString(table), // + BACKSLASH); + } + + @Override + public ResultInterface getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) { + return executeQuery("SELECT " // + + "PKTABLE_CATALOG PKTABLE_CAT, " // + + "PKTABLE_SCHEMA PKTABLE_SCHEM, " // + + "PKTABLE_NAME PKTABLE_NAME, " // + + "PKCOLUMN_NAME, " // + + "FKTABLE_CATALOG FKTABLE_CAT, " // + + "FKTABLE_SCHEMA FKTABLE_SCHEM, " // + + "FKTABLE_NAME, " // + + "FKCOLUMN_NAME, " // + + "ORDINAL_POSITION KEY_SEQ, " // + + "UPDATE_RULE, " // + + "DELETE_RULE, " // + + "FK_NAME, " // + + "PK_NAME, " // + + "DEFERRABILITY " // + + "FROM INFORMATION_SCHEMA.CROSS_REFERENCES " // + + "WHERE PKTABLE_CATALOG LIKE ?1 ESCAPE ?7 " // + + "AND PKTABLE_SCHEMA LIKE ?2 ESCAPE ?7 " // + + "AND PKTABLE_NAME = ?3 " // + + "AND FKTABLE_CATALOG LIKE ?4 ESCAPE ?7 " // + + "AND FKTABLE_SCHEMA LIKE ?5 ESCAPE ?7 " // + + "AND FKTABLE_NAME = ?6 " // + + "ORDER BY FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, KEY_SEQ", // + getCatalogPattern(primaryCatalog), // + getSchemaPattern(primarySchema), // + getString(primaryTable), // + getCatalogPattern(foreignCatalog), // + getSchemaPattern(foreignSchema), // + getString(foreignTable), // + BACKSLASH); + } + + @Override + public ResultInterface getTypeInfo() { + return executeQuery("SELECT " // + + "TYPE_NAME, " // + + "DATA_TYPE, " // + + "PRECISION, " // + + "PREFIX LITERAL_PREFIX, " // + + "SUFFIX LITERAL_SUFFIX, " // + + "PARAMS CREATE_PARAMS, " // + + "NULLABLE, " // + + "CASE_SENSITIVE, " // + + "SEARCHABLE, " // + + "FALSE UNSIGNED_ATTRIBUTE, " // + + "FALSE FIXED_PREC_SCALE, " // + + "AUTO_INCREMENT, " // + + "TYPE_NAME LOCAL_TYPE_NAME, " // + + "MINIMUM_SCALE, " // + + "MAXIMUM_SCALE, " // + + "DATA_TYPE SQL_DATA_TYPE, " // + + "ZERO() SQL_DATETIME_SUB, " // + + "RADIX NUM_PREC_RADIX " // + + "FROM INFORMATION_SCHEMA.TYPE_INFO " // + + "ORDER BY DATA_TYPE, POS"); + } + + @Override + public ResultInterface getIndexInfo(String catalog, String schema, String table, boolean unique, + boolean approximate) { + String uniqueCondition = unique ? "NON_UNIQUE=FALSE" : "TRUE"; + return executeQuery("SELECT " // + + "TABLE_CATALOG TABLE_CAT, " // + + "TABLE_SCHEMA TABLE_SCHEM, " // + + "TABLE_NAME, " // + + "NON_UNIQUE, " // + + "TABLE_CATALOG INDEX_QUALIFIER, " // + + "INDEX_NAME, " // + + "INDEX_TYPE TYPE, " // + + "ORDINAL_POSITION, " // + + "COLUMN_NAME, " // + + "ASC_OR_DESC, " // + // TODO meta data for number of unique values in an index + + "CARDINALITY, " // + + "PAGES, " // + + "FILTER_CONDITION, " // + + "SORT_TYPE " // + + "FROM INFORMATION_SCHEMA.INDEXES " // + + "WHERE TABLE_CATALOG LIKE ?1 ESCAPE ?4 " // + + "AND TABLE_SCHEMA LIKE ?2 ESCAPE ?4 " // + + "AND (" + uniqueCondition + ") " // + + "AND TABLE_NAME = ?3 " // + + "ORDER BY NON_UNIQUE, TYPE, TABLE_SCHEM, INDEX_NAME, ORDINAL_POSITION", // + getCatalogPattern(catalog), // + getSchemaPattern(schema), // + getString(table), // + BACKSLASH); + } + + @Override + public ResultInterface getSchemas(String catalog, String schemaPattern) { + return executeQuery("SELECT " // + + "SCHEMA_NAME TABLE_SCHEM, " // + + "CATALOG_NAME TABLE_CATALOG " // + + "FROM INFORMATION_SCHEMA.SCHEMATA " // + + "WHERE CATALOG_NAME LIKE ?1 ESCAPE ?3 " // + + "AND SCHEMA_NAME LIKE ?2 ESCAPE ?3 " // + + "ORDER BY SCHEMA_NAME", // + getCatalogPattern(catalog), // + getSchemaPattern(schemaPattern), // + BACKSLASH); + } + + @Override + public ResultInterface getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + return getPseudoColumnsResult(); + } + + private ResultInterface executeQuery(String sql, Value... args) { + checkClosed(); + synchronized (session) { + CommandInterface command = session.prepareCommand(sql, Integer.MAX_VALUE); + int l = args.length; + if (l > 0) { + ArrayList parameters = command.getParameters(); + for (int i = 0; i < l; i++) { + parameters.get(i).setValue(args[i], true); + } + } + ResultInterface result = command.executeQuery(0, false); + command.close(); + return result; + } + } + + @Override + void checkClosed() { + if (session.isClosed()) { + throw DbException.get(ErrorCode.DATABASE_CALLED_AT_SHUTDOWN); + } + } + + private Value getString(String string) { + return string != null ? ValueVarchar.get(string, session) : ValueNull.INSTANCE; + } + + private Value getPattern(String pattern) { + return pattern == null ? PERCENT : getString(pattern); + } + + private Value getSchemaPattern(String pattern) { + return pattern == null ? PERCENT : pattern.isEmpty() ? SCHEMA_MAIN : getString(pattern); + } + + private Value getCatalogPattern(String catalogPattern) { + return catalogPattern == null || catalogPattern.isEmpty() ? PERCENT : getString(catalogPattern); + } + +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocal.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocal.java new file mode 100644 index 0000000000..fa43376376 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocal.java @@ -0,0 +1,1523 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; + +import org.h2.api.ErrorCode; +import org.h2.command.dml.Help; +import org.h2.constraint.Constraint; +import org.h2.constraint.ConstraintActionType; +import org.h2.constraint.ConstraintReferential; +import org.h2.constraint.ConstraintUnique; +import org.h2.engine.Database; +import org.h2.engine.DbObject; +import org.h2.engine.Mode; +import org.h2.engine.Right; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.expression.condition.CompareLike; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.ResultInterface; +import org.h2.result.SimpleResult; +import org.h2.result.SortOrder; +import org.h2.schema.FunctionAlias; +import org.h2.schema.FunctionAlias.JavaMethod; +import org.h2.schema.Schema; +import org.h2.schema.SchemaObject; +import org.h2.schema.UserDefinedFunction; +import org.h2.table.Column; +import org.h2.table.IndexColumn; +import org.h2.table.Table; +import org.h2.table.TableSynonym; +import org.h2.util.MathUtils; +import org.h2.util.StringUtils; +import org.h2.util.Utils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueToObjectConverter2; +import org.h2.value.ValueVarchar; + +/** + * Local implementation of database meta information. + */ +public final class DatabaseMetaLocal extends DatabaseMetaLocalBase { + + private static final Value YES = ValueVarchar.get("YES"); + + private static final Value NO = ValueVarchar.get("NO"); + + private static final ValueSmallint BEST_ROW_SESSION = ValueSmallint.get((short) DatabaseMetaData.bestRowSession); + + private static final ValueSmallint BEST_ROW_NOT_PSEUDO = ValueSmallint + .get((short) DatabaseMetaData.bestRowNotPseudo); + + private static final ValueInteger COLUMN_NO_NULLS = ValueInteger.get(DatabaseMetaData.columnNoNulls); + + private static final ValueSmallint COLUMN_NO_NULLS_SMALL = ValueSmallint + .get((short) DatabaseMetaData.columnNoNulls); + + private static final ValueInteger COLUMN_NULLABLE = ValueInteger.get(DatabaseMetaData.columnNullable); + + private static final ValueSmallint COLUMN_NULLABLE_UNKNOWN_SMALL = ValueSmallint + .get((short) DatabaseMetaData.columnNullableUnknown); + + private static final ValueSmallint IMPORTED_KEY_CASCADE = ValueSmallint + .get((short) DatabaseMetaData.importedKeyCascade); + + private static final ValueSmallint IMPORTED_KEY_RESTRICT = ValueSmallint + .get((short) DatabaseMetaData.importedKeyRestrict); + + private static final ValueSmallint IMPORTED_KEY_DEFAULT = ValueSmallint + .get((short) DatabaseMetaData.importedKeySetDefault); + + private static final ValueSmallint IMPORTED_KEY_SET_NULL = ValueSmallint + .get((short) DatabaseMetaData.importedKeySetNull); + + private static final ValueSmallint IMPORTED_KEY_NOT_DEFERRABLE = ValueSmallint + .get((short) DatabaseMetaData.importedKeyNotDeferrable); + + private static final ValueSmallint PROCEDURE_COLUMN_IN = ValueSmallint + .get((short) DatabaseMetaData.procedureColumnIn); + + private static final ValueSmallint PROCEDURE_COLUMN_RETURN = ValueSmallint + .get((short) DatabaseMetaData.procedureColumnReturn); + + private static final ValueSmallint PROCEDURE_NO_RESULT = ValueSmallint + .get((short) DatabaseMetaData.procedureNoResult); + + private static final ValueSmallint PROCEDURE_RETURNS_RESULT = ValueSmallint + .get((short) DatabaseMetaData.procedureReturnsResult); + + private static final ValueSmallint TABLE_INDEX_HASHED = ValueSmallint.get(DatabaseMetaData.tableIndexHashed); + + private static final ValueSmallint TABLE_INDEX_OTHER = ValueSmallint.get(DatabaseMetaData.tableIndexOther); + + // This list must be ordered + private static final String[] TABLE_TYPES = { "BASE TABLE", "GLOBAL TEMPORARY", "LOCAL TEMPORARY", "SYNONYM", + "VIEW" }; + + private static final ValueSmallint TYPE_NULLABLE = ValueSmallint.get((short) DatabaseMetaData.typeNullable); + + private static final ValueSmallint TYPE_SEARCHABLE = ValueSmallint.get((short) DatabaseMetaData.typeSearchable); + + private static final Value NO_USAGE_RESTRICTIONS = ValueVarchar.get("NO_USAGE_RESTRICTIONS"); + + private final SessionLocal session; + + public DatabaseMetaLocal(SessionLocal session) { + this.session = session; + } + + @Override + public final DefaultNullOrdering defaultNullOrdering() { + return session.getDatabase().getDefaultNullOrdering(); + } + + @Override + public String getSQLKeywords() { + StringBuilder builder = new StringBuilder(103).append( // + "CURRENT_CATALOG," // + + "CURRENT_SCHEMA," // + + "GROUPS," // + + "IF,ILIKE," // + + "KEY,"); + Mode mode = session.getMode(); + if (mode.limit) { + builder.append("LIMIT,"); + } + if (mode.minusIsExcept) { + builder.append("MINUS,"); + } + builder.append( // + "OFFSET," // + + "QUALIFY," // + + "REGEXP,ROWNUM,"); + if (mode.topInSelect || mode.topInDML) { + builder.append("TOP,"); + } + return builder.append("_ROWID_") // + .toString(); + } + + @Override + public String getNumericFunctions() { + return getFunctions("Functions (Numeric)"); + } + + @Override + public String getStringFunctions() { + return getFunctions("Functions (String)"); + } + + @Override + public String getSystemFunctions() { + return getFunctions("Functions (System)"); + } + + @Override + public String getTimeDateFunctions() { + return getFunctions("Functions (Time and Date)"); + } + + private String getFunctions(String section) { + checkClosed(); + StringBuilder builder = new StringBuilder(); + try { + ResultSet rs = Help.getTable(); + while (rs.next()) { + if (rs.getString(1).trim().equals(section)) { + if (builder.length() != 0) { + builder.append(','); + } + String topic = rs.getString(2).trim(); + int spaceIndex = topic.indexOf(' '); + if (spaceIndex >= 0) { + // remove 'Function' from 'INSERT Function' + StringUtils.trimSubstring(builder, topic, 0, spaceIndex); + } else { + builder.append(topic); + } + } + } + } catch (Exception e) { + throw DbException.convert(e); + } + return builder.toString(); + } + + @Override + public String getSearchStringEscape() { + return session.getDatabase().getSettings().defaultEscape; + } + + @Override + public ResultInterface getProcedures(String catalog, String schemaPattern, String procedureNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("PROCEDURE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("PROCEDURE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("PROCEDURE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("RESERVED1", TypeInfo.TYPE_NULL); + result.addColumn("RESERVED2", TypeInfo.TYPE_NULL); + result.addColumn("RESERVED3", TypeInfo.TYPE_NULL); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("PROCEDURE_TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("SPECIFIC_NAME", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike procedureLike = getLike(procedureNamePattern); + for (Schema s : getSchemasForPattern(schemaPattern)) { + Value schemaValue = getString(s.getName()); + for (UserDefinedFunction userDefinedFunction : s.getAllFunctionsAndAggregates()) { + String procedureName = userDefinedFunction.getName(); + if (procedureLike != null && !procedureLike.test(procedureName)) { + continue; + } + Value procedureNameValue = getString(procedureName); + if (userDefinedFunction instanceof FunctionAlias) { + JavaMethod[] methods; + try { + methods = ((FunctionAlias) userDefinedFunction).getJavaMethods(); + } catch (DbException e) { + continue; + } + for (int i = 0; i < methods.length; i++) { + JavaMethod method = methods[i]; + TypeInfo typeInfo = method.getDataType(); + getProceduresAdd(result, catalogValue, schemaValue, procedureNameValue, + userDefinedFunction.getComment(), + typeInfo == null || typeInfo.getValueType() != Value.NULL ? PROCEDURE_RETURNS_RESULT + : PROCEDURE_NO_RESULT, + getString(procedureName + '_' + (i + 1))); + } + } else { + getProceduresAdd(result, catalogValue, schemaValue, procedureNameValue, + userDefinedFunction.getComment(), PROCEDURE_RETURNS_RESULT, procedureNameValue); + } + } + } + // PROCEDURE_CAT, PROCEDURE_SCHEM, PROCEDURE_NAME, SPECIFIC_ NAME + result.sortRows(new SortOrder(session, new int[] { 1, 2, 8 })); + return result; + } + + private void getProceduresAdd(SimpleResult result, Value catalogValue, Value schemaValue, Value procedureNameValue, + String comment, ValueSmallint procedureType, Value specificNameValue) { + result.addRow( + // PROCEDURE_CAT + catalogValue, + // PROCEDURE_SCHEM + schemaValue, + // PROCEDURE_NAME + procedureNameValue, + // RESERVED1 + ValueNull.INSTANCE, + // RESERVED2 + ValueNull.INSTANCE, + // RESERVED3 + ValueNull.INSTANCE, + // REMARKS + getString(comment), + // PROCEDURE_TYPE + procedureType, + // SPECIFIC_NAME + specificNameValue); + } + + @Override + public ResultInterface getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, + String columnNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("PROCEDURE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("PROCEDURE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("PROCEDURE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("PRECISION", TypeInfo.TYPE_INTEGER); + result.addColumn("LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("SCALE", TypeInfo.TYPE_SMALLINT); + result.addColumn("RADIX", TypeInfo.TYPE_SMALLINT); + result.addColumn("NULLABLE", TypeInfo.TYPE_SMALLINT); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_DEF", TypeInfo.TYPE_VARCHAR); + result.addColumn("SQL_DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("SQL_DATETIME_SUB", TypeInfo.TYPE_INTEGER); + result.addColumn("CHAR_OCTET_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER); + result.addColumn("IS_NULLABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SPECIFIC_NAME", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike procedureLike = getLike(procedureNamePattern); + for (Schema s : getSchemasForPattern(schemaPattern)) { + Value schemaValue = getString(s.getName()); + for (UserDefinedFunction userDefinedFunction : s.getAllFunctionsAndAggregates()) { + if (!(userDefinedFunction instanceof FunctionAlias)) { + continue; + } + String procedureName = userDefinedFunction.getName(); + if (procedureLike != null && !procedureLike.test(procedureName)) { + continue; + } + Value procedureNameValue = getString(procedureName); + JavaMethod[] methods; + try { + methods = ((FunctionAlias) userDefinedFunction).getJavaMethods(); + } catch (DbException e) { + continue; + } + for (int i = 0, l = methods.length; i < l; i++) { + JavaMethod method = methods[i]; + Value specificNameValue = getString(procedureName + '_' + (i + 1)); + TypeInfo typeInfo = method.getDataType(); + if (typeInfo != null && typeInfo.getValueType() != Value.NULL) { + getProcedureColumnAdd(result, catalogValue, schemaValue, procedureNameValue, specificNameValue, + typeInfo, method.getClass().isPrimitive(), 0); + } + Class[] columnList = method.getColumnClasses(); + for (int o = 1, p = method.hasConnectionParam() ? 1 : 0, n = columnList.length; p < n; o++, p++) { + Class clazz = columnList[p]; + getProcedureColumnAdd(result, catalogValue, schemaValue, procedureNameValue, specificNameValue, + ValueToObjectConverter2.classToType(clazz), clazz.isPrimitive(), o); + } + } + } + } + // PROCEDURE_CAT, PROCEDURE_SCHEM, PROCEDURE_NAME, SPECIFIC_NAME, return + // value first + result.sortRows(new SortOrder(session, new int[] { 1, 2, 19 })); + return result; + } + + private void getProcedureColumnAdd(SimpleResult result, Value catalogValue, Value schemaValue, + Value procedureNameValue, Value specificNameValue, TypeInfo type, boolean notNull, int ordinal) { + int valueType = type.getValueType(); + DataType dt = DataType.getDataType(valueType); + ValueInteger precisionValue = ValueInteger.get(MathUtils.convertLongToInt(type.getPrecision())); + result.addRow( + // PROCEDURE_CAT + catalogValue, + // PROCEDURE_SCHEM + schemaValue, + // PROCEDURE_NAME + procedureNameValue, + // COLUMN_NAME + getString(ordinal == 0 ? "RESULT" : "P" + ordinal), + // COLUMN_TYPE + ordinal == 0 ? PROCEDURE_COLUMN_RETURN : PROCEDURE_COLUMN_IN, + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(type)), + // TYPE_NAME + getDataTypeName(type), + // PRECISION + precisionValue, + // LENGTH + precisionValue, + // SCALE + dt.supportsScale // + ? ValueSmallint.get(MathUtils.convertIntToShort(dt.defaultScale)) + : ValueNull.INSTANCE, + // RADIX + getRadix(valueType, true), + // NULLABLE + notNull ? COLUMN_NO_NULLS_SMALL : COLUMN_NULLABLE_UNKNOWN_SMALL, + // REMARKS + ValueNull.INSTANCE, + // COLUMN_DEF + ValueNull.INSTANCE, + // SQL_DATA_TYPE + ValueNull.INSTANCE, + // SQL_DATETIME_SUB + ValueNull.INSTANCE, + // CHAR_OCTET_LENGTH + DataType.isBinaryStringType(valueType) || DataType.isCharacterStringType(valueType) ? precisionValue + : ValueNull.INSTANCE, + // ORDINAL_POSITION + ValueInteger.get(ordinal), + // IS_NULLABLE + ValueVarchar.EMPTY, + // SPECIFIC_NAME + specificNameValue); + } + + @Override + public ResultInterface getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) { + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_TYPE", TypeInfo.TYPE_VARCHAR); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("SELF_REFERENCING_COL_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("REF_GENERATION", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + HashSet typesSet; + if (types != null) { + typesSet = new HashSet<>(8); + for (String type : types) { + int idx = Arrays.binarySearch(TABLE_TYPES, type); + if (idx >= 0) { + typesSet.add(TABLE_TYPES[idx]); + } else if (type.equals("TABLE")) { + typesSet.add("BASE TABLE"); + } + } + if (typesSet.isEmpty()) { + return result; + } + } else { + typesSet = null; + } + for (Schema schema : getSchemasForPattern(schemaPattern)) { + Value schemaValue = getString(schema.getName()); + for (SchemaObject object : getTablesForPattern(schema, tableNamePattern)) { + Value tableName = getString(object.getName()); + if (object instanceof Table) { + Table t = (Table) object; + if (!t.isHidden()) { + getTablesAdd(result, catalogValue, schemaValue, tableName, t, false, typesSet); + } + } else { + getTablesAdd(result, catalogValue, schemaValue, tableName, ((TableSynonym) object).getSynonymFor(), + true, typesSet); + } + } + } + // TABLE_TYPE, TABLE_CAT, TABLE_SCHEM, TABLE_NAME + result.sortRows(new SortOrder(session, new int[] { 3, 1, 2 })); + return result; + } + + private void getTablesAdd(SimpleResult result, Value catalogValue, Value schemaValue, Value tableName, Table t, + boolean synonym, HashSet typesSet) { + String type = synonym ? "SYNONYM" : t.getSQLTableType(); + if (typesSet != null && !typesSet.contains(type)) { + return; + } + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableName, + // TABLE_TYPE + getString(type), + // REMARKS + getString(t.getComment()), + // TYPE_CAT + ValueNull.INSTANCE, + // TYPE_SCHEM + ValueNull.INSTANCE, + // TYPE_NAME + ValueNull.INSTANCE, + // SELF_REFERENCING_COL_NAME + ValueNull.INSTANCE, + // REF_GENERATION + ValueNull.INSTANCE); + } + + @Override + public ResultInterface getSchemas() { + return getSchemas(null, null); + } + + @Override + public ResultInterface getCatalogs() { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addRow(getString(session.getDatabase().getShortName())); + return result; + } + + @Override + public ResultInterface getTableTypes() { + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_TYPE", TypeInfo.TYPE_VARCHAR); + // Order by TABLE_TYPE + result.addRow(getString("BASE TABLE")); + result.addRow(getString("GLOBAL TEMPORARY")); + result.addRow(getString("LOCAL TEMPORARY")); + result.addRow(getString("SYNONYM")); + result.addRow(getString("VIEW")); + return result; + } + + @Override + public ResultInterface getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_SIZE", TypeInfo.TYPE_INTEGER); + result.addColumn("BUFFER_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("DECIMAL_DIGITS", TypeInfo.TYPE_INTEGER); + result.addColumn("NUM_PREC_RADIX", TypeInfo.TYPE_INTEGER); + result.addColumn("NULLABLE", TypeInfo.TYPE_INTEGER); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_DEF", TypeInfo.TYPE_VARCHAR); + result.addColumn("SQL_DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("SQL_DATETIME_SUB", TypeInfo.TYPE_INTEGER); + result.addColumn("CHAR_OCTET_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER); + result.addColumn("IS_NULLABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_CATALOG", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_SCHEMA", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_TABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SOURCE_DATA_TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("IS_AUTOINCREMENT", TypeInfo.TYPE_VARCHAR); + result.addColumn("IS_GENERATEDCOLUMN", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike columnLike = getLike(columnNamePattern); + for (Schema schema : getSchemasForPattern(schemaPattern)) { + Value schemaValue = getString(schema.getName()); + for (SchemaObject object : getTablesForPattern(schema, tableNamePattern)) { + Value tableName = getString(object.getName()); + if (object instanceof Table) { + Table t = (Table) object; + if (!t.isHidden()) { + getColumnsAdd(result, catalogValue, schemaValue, tableName, t, columnLike); + } + } else { + TableSynonym s = (TableSynonym) object; + Table t = s.getSynonymFor(); + getColumnsAdd(result, catalogValue, schemaValue, tableName, t, columnLike); + } + } + } + // TABLE_CAT, TABLE_SCHEM, TABLE_NAME, ORDINAL_POSITION + result.sortRows(new SortOrder(session, new int[] { 1, 2, 16 })); + return result; + } + + private void getColumnsAdd(SimpleResult result, Value catalogValue, Value schemaValue, Value tableName, Table t, + CompareLike columnLike) { + int ordinal = 0; + for (Column c : t.getColumns()) { + if (!c.getVisible()) { + continue; + } + ordinal++; + String name = c.getName(); + if (columnLike != null && !columnLike.test(name)) { + continue; + } + TypeInfo type = c.getType(); + ValueInteger precision = ValueInteger.get(MathUtils.convertLongToInt(type.getPrecision())); + boolean nullable = c.isNullable(), isGenerated = c.isGenerated(); + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableName, + // COLUMN_NAME + getString(name), + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(type)), + // TYPE_NAME + getDataTypeName(type), + // COLUMN_SIZE + precision, + // BUFFER_LENGTH + ValueNull.INSTANCE, + // DECIMAL_DIGITS + ValueInteger.get(type.getScale()), + // NUM_PREC_RADIX + getRadix(type.getValueType(), false), + // NULLABLE + nullable ? COLUMN_NULLABLE : COLUMN_NO_NULLS, + // REMARKS + getString(c.getComment()), + // COLUMN_DEF + isGenerated ? ValueNull.INSTANCE : getString(c.getDefaultSQL()), + // SQL_DATA_TYPE (unused) + ValueNull.INSTANCE, + // SQL_DATETIME_SUB (unused) + ValueNull.INSTANCE, + // CHAR_OCTET_LENGTH + precision, + // ORDINAL_POSITION + ValueInteger.get(ordinal), + // IS_NULLABLE + nullable ? YES : NO, + // SCOPE_CATALOG + ValueNull.INSTANCE, + // SCOPE_SCHEMA + ValueNull.INSTANCE, + // SCOPE_TABLE + ValueNull.INSTANCE, + // SOURCE_DATA_TYPE + ValueNull.INSTANCE, + // IS_AUTOINCREMENT + c.isIdentity() ? YES : NO, + // IS_GENERATEDCOLUMN + isGenerated ? YES : NO); + } + } + + @Override + public ResultInterface getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("GRANTOR", TypeInfo.TYPE_VARCHAR); + result.addColumn("GRANTEE", TypeInfo.TYPE_VARCHAR); + result.addColumn("PRIVILEGE", TypeInfo.TYPE_VARCHAR); + result.addColumn("IS_GRANTABLE", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike columnLike = getLike(columnNamePattern); + for (Right r : db.getAllRights()) { + DbObject object = r.getGrantedObject(); + if (!(object instanceof Table)) { + continue; + } + Table t = (Table) object; + if (t.isHidden()) { + continue; + } + String tableName = t.getName(); + if (!db.equalsIdentifiers(table, tableName)) { + continue; + } + Schema s = t.getSchema(); + if (!checkSchema(schema, s)) { + continue; + } + addPrivileges(result, catalogValue, s.getName(), tableName, r.getGrantee(), r.getRightMask(), columnLike, + t.getColumns()); + } + // COLUMN_NAME, PRIVILEGE + result.sortRows(new SortOrder(session, new int[] { 3, 6 })); + return result; + } + + @Override + public ResultInterface getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("GRANTOR", TypeInfo.TYPE_VARCHAR); + result.addColumn("GRANTEE", TypeInfo.TYPE_VARCHAR); + result.addColumn("PRIVILEGE", TypeInfo.TYPE_VARCHAR); + result.addColumn("IS_GRANTABLE", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike schemaLike = getLike(schemaPattern); + CompareLike tableLike = getLike(tableNamePattern); + for (Right r : db.getAllRights()) { + DbObject object = r.getGrantedObject(); + if (!(object instanceof Table)) { + continue; + } + Table table = (Table) object; + if (table.isHidden()) { + continue; + } + String tableName = table.getName(); + if (tableLike != null && !tableLike.test(tableName)) { + continue; + } + Schema schema = table.getSchema(); + String schemaName = schema.getName(); + if (schemaPattern != null) { + if (schemaPattern.isEmpty()) { + if (schema != db.getMainSchema()) { + continue; + } + } else { + if (!schemaLike.test(schemaName)) { + continue; + } + } + } + addPrivileges(result, catalogValue, schemaName, tableName, r.getGrantee(), r.getRightMask(), null, null); + } + // TABLE_CAT, TABLE_SCHEM, TABLE_NAME, PRIVILEGE + result.sortRows(new SortOrder(session, new int[] { 1, 2, 5 })); + return result; + } + + private void addPrivileges(SimpleResult result, Value catalogValue, String schemaName, String tableName, + DbObject grantee, int rightMask, CompareLike columnLike, Column[] columns) { + Value schemaValue = getString(schemaName); + Value tableValue = getString(tableName); + Value granteeValue = getString(grantee.getName()); + boolean isAdmin = grantee.getType() == DbObject.USER && ((User) grantee).isAdmin(); + if ((rightMask & Right.SELECT) != 0) { + addPrivilege(result, catalogValue, schemaValue, tableValue, granteeValue, "SELECT", isAdmin, columnLike, + columns); + } + if ((rightMask & Right.INSERT) != 0) { + addPrivilege(result, catalogValue, schemaValue, tableValue, granteeValue, "INSERT", isAdmin, columnLike, + columns); + } + if ((rightMask & Right.UPDATE) != 0) { + addPrivilege(result, catalogValue, schemaValue, tableValue, granteeValue, "UPDATE", isAdmin, columnLike, + columns); + } + if ((rightMask & Right.DELETE) != 0) { + addPrivilege(result, catalogValue, schemaValue, tableValue, granteeValue, "DELETE", isAdmin, columnLike, + columns); + } + } + + private void addPrivilege(SimpleResult result, Value catalogValue, Value schemaValue, Value tableValue, + Value granteeValue, String right, boolean isAdmin, CompareLike columnLike, Column[] columns) { + if (columns == null) { + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableValue, + // GRANTOR + ValueNull.INSTANCE, + // GRANTEE + granteeValue, + // PRIVILEGE + getString(right), + // IS_GRANTABLE + isAdmin ? YES : NO); + } else { + for (Column column : columns) { + String columnName = column.getName(); + if (columnLike != null && !columnLike.test(columnName)) { + continue; + } + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableValue, + // COLUMN_NAME + getString(columnName), + // GRANTOR + ValueNull.INSTANCE, + // GRANTEE + granteeValue, + // PRIVILEGE + getString(right), + // IS_GRANTABLE + isAdmin ? YES : NO); + } + } + } + + @Override + public ResultInterface getBestRowIdentifier(String catalog, String schema, String table, int scope, + boolean nullable) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("SCOPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_SIZE", TypeInfo.TYPE_INTEGER); + result.addColumn("BUFFER_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("DECIMAL_DIGITS", TypeInfo.TYPE_SMALLINT); + result.addColumn("PSEUDO_COLUMN", TypeInfo.TYPE_SMALLINT); + if (!checkCatalogName(catalog)) { + return result; + } + for (Schema s : getSchemas(schema)) { + Table t = s.findTableOrView(session, table); + if (t == null || t.isHidden()) { + continue; + } + ArrayList constraints = t.getConstraints(); + if (constraints == null) { + continue; + } + for (Constraint constraint : constraints) { + if (constraint.getConstraintType() != Constraint.Type.PRIMARY_KEY) { + continue; + } + IndexColumn[] columns = ((ConstraintUnique) constraint).getColumns(); + for (int i = 0, l = columns.length; i < l; i++) { + IndexColumn ic = columns[i]; + Column c = ic.column; + TypeInfo type = c.getType(); + DataType dt = DataType.getDataType(type.getValueType()); + result.addRow( + // SCOPE + BEST_ROW_SESSION, + // COLUMN_NAME + getString(c.getName()), + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(type)), + // TYPE_NAME + getDataTypeName(type), + // COLUMN_SIZE + ValueInteger.get(MathUtils.convertLongToInt(type.getPrecision())), + // BUFFER_LENGTH + ValueNull.INSTANCE, + // DECIMAL_DIGITS + dt.supportsScale ? ValueSmallint.get(MathUtils.convertIntToShort(type.getScale())) + : ValueNull.INSTANCE, + // PSEUDO_COLUMN + BEST_ROW_NOT_PSEUDO); + } + } + } + // Order by SCOPE (always the same) + return result; + } + + private Value getDataTypeName(TypeInfo typeInfo) { + return getString(typeInfo.getDeclaredTypeName()); + } + + @Override + public ResultInterface getPrimaryKeys(String catalog, String schema, String table) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("KEY_SEQ", TypeInfo.TYPE_SMALLINT); + result.addColumn("PK_NAME", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + for (Schema s : getSchemas(schema)) { + Table t = s.findTableOrView(session, table); + if (t == null || t.isHidden()) { + continue; + } + ArrayList constraints = t.getConstraints(); + if (constraints == null) { + continue; + } + for (Constraint constraint : constraints) { + if (constraint.getConstraintType() != Constraint.Type.PRIMARY_KEY) { + continue; + } + Value schemaValue = getString(s.getName()); + Value tableValue = getString(t.getName()); + Value pkValue = getString(constraint.getName()); + IndexColumn[] columns = ((ConstraintUnique) constraint).getColumns(); + for (int i = 0, l = columns.length; i < l;) { + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableValue, + // COLUMN_NAME + getString(columns[i].column.getName()), + // KEY_SEQ + ValueSmallint.get((short) ++i), + // PK_NAME + pkValue); + } + } + } + // COLUMN_NAME + result.sortRows(new SortOrder(session, new int[] { 3 })); + return result; + } + + @Override + public ResultInterface getImportedKeys(String catalog, String schema, String table) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + SimpleResult result = initCrossReferenceResult(); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + for (Schema s : getSchemas(schema)) { + Table t = s.findTableOrView(session, table); + if (t == null || t.isHidden()) { + continue; + } + ArrayList constraints = t.getConstraints(); + if (constraints == null) { + continue; + } + for (Constraint constraint : constraints) { + if (constraint.getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + ConstraintReferential fk = (ConstraintReferential) constraint; + Table fkTable = fk.getTable(); + if (fkTable != t) { + continue; + } + Table pkTable = fk.getRefTable(); + addCrossReferenceResult(result, catalogValue, pkTable.getSchema().getName(), pkTable, + fkTable.getSchema().getName(), fkTable, fk); + } + } + // PKTABLE_CAT, PKTABLE_SCHEM, PKTABLE_NAME, KEY_SEQ + result.sortRows(new SortOrder(session, new int[] { 1, 2, 8 })); + return result; + } + + @Override + public ResultInterface getExportedKeys(String catalog, String schema, String table) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + SimpleResult result = initCrossReferenceResult(); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + for (Schema s : getSchemas(schema)) { + Table t = s.findTableOrView(session, table); + if (t == null || t.isHidden()) { + continue; + } + ArrayList constraints = t.getConstraints(); + if (constraints == null) { + continue; + } + for (Constraint constraint : constraints) { + if (constraint.getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + ConstraintReferential fk = (ConstraintReferential) constraint; + Table pkTable = fk.getRefTable(); + if (pkTable != t) { + continue; + } + Table fkTable = fk.getTable(); + addCrossReferenceResult(result, catalogValue, pkTable.getSchema().getName(), pkTable, + fkTable.getSchema().getName(), fkTable, fk); + } + } + // FKTABLE_CAT FKTABLE_SCHEM, FKTABLE_NAME, KEY_SEQ + result.sortRows(new SortOrder(session, new int[] { 5, 6, 8 })); + return result; + } + + @Override + public ResultInterface getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) { + if (primaryTable == null) { + throw DbException.getInvalidValueException("primaryTable", null); + } + if (foreignTable == null) { + throw DbException.getInvalidValueException("foreignTable", null); + } + SimpleResult result = initCrossReferenceResult(); + if (!checkCatalogName(primaryCatalog) || !checkCatalogName(foreignCatalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + for (Schema s : getSchemas(foreignSchema)) { + Table t = s.findTableOrView(session, foreignTable); + if (t == null || t.isHidden()) { + continue; + } + ArrayList constraints = t.getConstraints(); + if (constraints == null) { + continue; + } + for (Constraint constraint : constraints) { + if (constraint.getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + ConstraintReferential fk = (ConstraintReferential) constraint; + Table fkTable = fk.getTable(); + if (fkTable != t) { + continue; + } + Table pkTable = fk.getRefTable(); + if (!db.equalsIdentifiers(pkTable.getName(), primaryTable)) { + continue; + } + Schema pkSchema = pkTable.getSchema(); + if (!checkSchema(primarySchema, pkSchema)) { + continue; + } + addCrossReferenceResult(result, catalogValue, pkSchema.getName(), pkTable, + fkTable.getSchema().getName(), fkTable, fk); + } + } + // FKTABLE_CAT FKTABLE_SCHEM, FKTABLE_NAME, KEY_SEQ + result.sortRows(new SortOrder(session, new int[] { 5, 6, 8 })); + return result; + } + + private SimpleResult initCrossReferenceResult() { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("PKTABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("PKTABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("PKTABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("PKCOLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("FKTABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("FKTABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("FKTABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("FKCOLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("KEY_SEQ", TypeInfo.TYPE_SMALLINT); + result.addColumn("UPDATE_RULE", TypeInfo.TYPE_SMALLINT); + result.addColumn("DELETE_RULE", TypeInfo.TYPE_SMALLINT); + result.addColumn("FK_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("PK_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DEFERRABILITY", TypeInfo.TYPE_SMALLINT); + return result; + } + + private void addCrossReferenceResult(SimpleResult result, Value catalog, String pkSchema, Table pkTable, + String fkSchema, Table fkTable, ConstraintReferential fk) { + Value pkSchemaValue = getString(pkSchema); + Value pkTableValue = getString(pkTable.getName()); + Value fkSchemaValue = getString(fkSchema); + Value fkTableValue = getString(fkTable.getName()); + IndexColumn[] pkCols = fk.getRefColumns(); + IndexColumn[] fkCols = fk.getColumns(); + Value update = getRefAction(fk.getUpdateAction()); + Value delete = getRefAction(fk.getDeleteAction()); + Value fkNameValue = getString(fk.getName()); + Value pkNameValue = getString(fk.getReferencedConstraint().getName()); + for (int j = 0, len = fkCols.length; j < len; j++) { + result.addRow( + // PKTABLE_CAT + catalog, + // PKTABLE_SCHEM + pkSchemaValue, + // PKTABLE_NAME + pkTableValue, + // PKCOLUMN_NAME + getString(pkCols[j].column.getName()), + // FKTABLE_CAT + catalog, + // FKTABLE_SCHEM + fkSchemaValue, + // FKTABLE_NAME + fkTableValue, + // FKCOLUMN_NAME + getString(fkCols[j].column.getName()), + // KEY_SEQ + ValueSmallint.get((short) (j + 1)), + // UPDATE_RULE + update, + // DELETE_RULE + delete, + // FK_NAME + fkNameValue, + // PK_NAME + pkNameValue, + // DEFERRABILITY + IMPORTED_KEY_NOT_DEFERRABLE); + } + } + + private static ValueSmallint getRefAction(ConstraintActionType action) { + switch (action) { + case CASCADE: + return IMPORTED_KEY_CASCADE; + case RESTRICT: + return IMPORTED_KEY_RESTRICT; + case SET_DEFAULT: + return IMPORTED_KEY_DEFAULT; + case SET_NULL: + return IMPORTED_KEY_SET_NULL; + default: + throw DbException.getInternalError("action=" + action); + } + } + + @Override + public ResultInterface getTypeInfo() { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("PRECISION", TypeInfo.TYPE_INTEGER); + result.addColumn("LITERAL_PREFIX", TypeInfo.TYPE_VARCHAR); + result.addColumn("LITERAL_SUFFIX", TypeInfo.TYPE_VARCHAR); + result.addColumn("CREATE_PARAMS", TypeInfo.TYPE_VARCHAR); + result.addColumn("NULLABLE", TypeInfo.TYPE_SMALLINT); + result.addColumn("CASE_SENSITIVE", TypeInfo.TYPE_BOOLEAN); + result.addColumn("SEARCHABLE", TypeInfo.TYPE_SMALLINT); + result.addColumn("UNSIGNED_ATTRIBUTE", TypeInfo.TYPE_BOOLEAN); + result.addColumn("FIXED_PREC_SCALE", TypeInfo.TYPE_BOOLEAN); + result.addColumn("AUTO_INCREMENT", TypeInfo.TYPE_BOOLEAN); + result.addColumn("LOCAL_TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("MINIMUM_SCALE", TypeInfo.TYPE_SMALLINT); + result.addColumn("MAXIMUM_SCALE", TypeInfo.TYPE_SMALLINT); + result.addColumn("SQL_DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("SQL_DATETIME_SUB", TypeInfo.TYPE_INTEGER); + result.addColumn("NUM_PREC_RADIX", TypeInfo.TYPE_INTEGER); + for (int i = 1, l = Value.TYPE_COUNT; i < l; i++) { + DataType t = DataType.getDataType(i); + Value name = getString(Value.getTypeName(t.type)); + result.addRow( + // TYPE_NAME + name, + // DATA_TYPE + ValueInteger.get(t.sqlType), + // PRECISION + ValueInteger.get(MathUtils.convertLongToInt(t.maxPrecision)), + // LITERAL_PREFIX + getString(t.prefix), + // LITERAL_SUFFIX + getString(t.suffix), + // CREATE_PARAMS + getString(t.params), + // NULLABLE + TYPE_NULLABLE, + // CASE_SENSITIVE + ValueBoolean.get(t.caseSensitive), + // SEARCHABLE + TYPE_SEARCHABLE, + // UNSIGNED_ATTRIBUTE + ValueBoolean.FALSE, + // FIXED_PREC_SCALE + ValueBoolean.get(t.type == Value.NUMERIC), + // AUTO_INCREMENT + ValueBoolean.FALSE, + // LOCAL_TYPE_NAME + name, + // MINIMUM_SCALE + ValueSmallint.get(MathUtils.convertIntToShort(t.minScale)), + // MAXIMUM_SCALE + ValueSmallint.get(MathUtils.convertIntToShort(t.maxScale)), + // SQL_DATA_TYPE (unused) + ValueNull.INSTANCE, + // SQL_DATETIME_SUB (unused) + ValueNull.INSTANCE, + // NUM_PREC_RADIX + getRadix(t.type, false)); + } + // DATA_TYPE, better types first + result.sortRows(new SortOrder(session, new int[] { 1 })); + return result; + } + + private static Value getRadix(int valueType, boolean small) { + if (DataType.isNumericType(valueType)) { + int radix = valueType == Value.NUMERIC || valueType == Value.DECFLOAT ? 10 : 2; + return small ? ValueSmallint.get((short) radix) : ValueInteger.get(radix); + } + return ValueNull.INSTANCE; + } + + @Override + public ResultInterface getIndexInfo(String catalog, String schema, String table, boolean unique, + boolean approximate) { + if (table == null) { + throw DbException.getInvalidValueException("table", null); + } + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("NON_UNIQUE", TypeInfo.TYPE_BOOLEAN); + result.addColumn("INDEX_QUALIFIER", TypeInfo.TYPE_VARCHAR); + result.addColumn("INDEX_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("ORDINAL_POSITION", TypeInfo.TYPE_SMALLINT); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("ASC_OR_DESC", TypeInfo.TYPE_VARCHAR); + result.addColumn("CARDINALITY", TypeInfo.TYPE_BIGINT); + result.addColumn("PAGES", TypeInfo.TYPE_BIGINT); + result.addColumn("FILTER_CONDITION", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + for (Schema s : getSchemas(schema)) { + Table t = s.findTableOrView(session, table); + if (t == null || t.isHidden()) { + continue; + } + getIndexInfo(catalogValue, getString(s.getName()), t, unique, approximate, result, db); + } + // NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION + result.sortRows(new SortOrder(session, new int[] { 3, 6, 5, 7 })); + return result; + } + + private void getIndexInfo(Value catalogValue, Value schemaValue, Table table, boolean unique, boolean approximate, + SimpleResult result, Database db) { + ArrayList indexes = table.getIndexes(); + if (indexes != null) { + for (Index index : indexes) { + if (index.getCreateSQL() == null) { + continue; + } + int uniqueColumnCount = index.getUniqueColumnCount(); + if (unique && uniqueColumnCount == 0) { + continue; + } + Value tableValue = getString(table.getName()); + Value indexValue = getString(index.getName()); + IndexColumn[] cols = index.getIndexColumns(); + ValueSmallint type = index.getIndexType().isHash() ? TABLE_INDEX_HASHED : TABLE_INDEX_OTHER; + for (int i = 0, l = cols.length; i < l; i++) { + IndexColumn c = cols[i]; + boolean nonUnique = i >= uniqueColumnCount; + if (unique && nonUnique) { + break; + } + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableValue, + // NON_UNIQUE + ValueBoolean.get(nonUnique), + // INDEX_QUALIFIER + catalogValue, + // INDEX_NAME + indexValue, + // TYPE + type, + // ORDINAL_POSITION + ValueSmallint.get((short) (i + 1)), + // COLUMN_NAME + getString(c.column.getName()), + // ASC_OR_DESC + getString((c.sortType & SortOrder.DESCENDING) != 0 ? "D" : "A"), + // CARDINALITY + ValueBigint.get(approximate // + ? index.getRowCountApproximation(session) + : index.getRowCount(session)), + // PAGES + ValueBigint.get(index.getDiskSpaceUsed() / db.getPageSize()), + // FILTER_CONDITION + ValueNull.INSTANCE); + } + } + } + } + + @Override + public ResultInterface getSchemas(String catalog, String schemaPattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_CATALOG", TypeInfo.TYPE_VARCHAR); + if (!checkCatalogName(catalog)) { + return result; + } + CompareLike schemaLike = getLike(schemaPattern); + Collection allSchemas = session.getDatabase().getAllSchemas(); + Value catalogValue = getString(session.getDatabase().getShortName()); + if (schemaLike == null) { + for (Schema s : allSchemas) { + result.addRow(getString(s.getName()), catalogValue); + } + } else { + for (Schema s : allSchemas) { + String name = s.getName(); + if (schemaLike.test(name)) { + result.addRow(getString(s.getName()), catalogValue); + } + } + } + // TABLE_CATALOG, TABLE_SCHEM + result.sortRows(new SortOrder(session, new int[] { 0 })); + return result; + } + + @Override + public ResultInterface getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + SimpleResult result = getPseudoColumnsResult(); + if (!checkCatalogName(catalog)) { + return result; + } + Database db = session.getDatabase(); + Value catalogValue = getString(db.getShortName()); + CompareLike columnLike = getLike(columnNamePattern); + for (Schema schema : getSchemasForPattern(schemaPattern)) { + Value schemaValue = getString(schema.getName()); + for (SchemaObject object : getTablesForPattern(schema, tableNamePattern)) { + Value tableName = getString(object.getName()); + if (object instanceof Table) { + Table t = (Table) object; + if (!t.isHidden()) { + getPseudoColumnsAdd(result, catalogValue, schemaValue, tableName, t, columnLike); + } + } else { + TableSynonym s = (TableSynonym) object; + Table t = s.getSynonymFor(); + getPseudoColumnsAdd(result, catalogValue, schemaValue, tableName, t, columnLike); + } + } + } + // TABLE_CAT, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME + result.sortRows(new SortOrder(session, new int[] { 1, 2, 3 })); + return result; + } + + private void getPseudoColumnsAdd(SimpleResult result, Value catalogValue, Value schemaValue, Value tableName, + Table t, CompareLike columnLike) { + Column rowId = t.getRowIdColumn(); + if (rowId != null) { + getPseudoColumnsAdd(result, catalogValue, schemaValue, tableName, columnLike, rowId); + } + for (Column c : t.getColumns()) { + if (!c.getVisible()) { + getPseudoColumnsAdd(result, catalogValue, schemaValue, tableName, columnLike, c); + } + } + } + + private void getPseudoColumnsAdd(SimpleResult result, Value catalogValue, Value schemaValue, Value tableName, + CompareLike columnLike, Column c) { + String name = c.getName(); + if (columnLike != null && !columnLike.test(name)) { + return; + } + TypeInfo type = c.getType(); + ValueInteger precision = ValueInteger.get(MathUtils.convertLongToInt(type.getPrecision())); + result.addRow( + // TABLE_CAT + catalogValue, + // TABLE_SCHEM + schemaValue, + // TABLE_NAME + tableName, + // COLUMN_NAME + getString(name), + // DATA_TYPE + ValueInteger.get(DataType.convertTypeToSQLType(type)), + // COLUMN_SIZE + precision, + // DECIMAL_DIGITS + ValueInteger.get(type.getScale()), + // NUM_PREC_RADIX + getRadix(type.getValueType(), false), + // COLUMN_USAGE + NO_USAGE_RESTRICTIONS, + // REMARKS + getString(c.getComment()), + // CHAR_OCTET_LENGTH + precision, + // IS_NULLABLE + c.isNullable() ? YES : NO); + } + + @Override + void checkClosed() { + if (session.isClosed()) { + throw DbException.get(ErrorCode.DATABASE_CALLED_AT_SHUTDOWN); + } + } + + Value getString(String string) { + return string != null ? ValueVarchar.get(string, session) : ValueNull.INSTANCE; + } + + private boolean checkCatalogName(String catalog) { + if (catalog != null && !catalog.isEmpty()) { + Database db = session.getDatabase(); + return db.equalsIdentifiers(catalog, db.getShortName()); + } + return true; + } + + private Collection getSchemas(String schema) { + Database db = session.getDatabase(); + if (schema == null) { + return db.getAllSchemas(); + } else if (schema.isEmpty()) { + return Collections.singleton(db.getMainSchema()); + } else { + Schema s = db.findSchema(schema); + if (s != null) { + return Collections.singleton(s); + } + return Collections.emptySet(); + } + } + + private Collection getSchemasForPattern(String schemaPattern) { + Database db = session.getDatabase(); + if (schemaPattern == null) { + return db.getAllSchemas(); + } else if (schemaPattern.isEmpty()) { + return Collections.singleton(db.getMainSchema()); + } else { + ArrayList list = Utils.newSmallArrayList(); + CompareLike like = getLike(schemaPattern); + for (Schema s : db.getAllSchemas()) { + if (like.test(s.getName())) { + list.add(s); + } + } + return list; + } + } + + private Collection getTablesForPattern(Schema schema, String tablePattern) { + Collection tables = schema.getAllTablesAndViews(session); + Collection synonyms = schema.getAllSynonyms(); + if (tablePattern == null) { + if (tables.isEmpty()) { + return synonyms; + } else if (synonyms.isEmpty()) { + return tables; + } + ArrayList list = new ArrayList<>(tables.size() + synonyms.size()); + list.addAll(tables); + list.addAll(synonyms); + return list; + } else if (tables.isEmpty() && synonyms.isEmpty()) { + return Collections.emptySet(); + } else { + ArrayList list = Utils.newSmallArrayList(); + CompareLike like = getLike(tablePattern); + for (Table t : tables) { + if (like.test(t.getName())) { + list.add(t); + } + } + for (TableSynonym t : synonyms) { + if (like.test(t.getName())) { + list.add(t); + } + } + return list; + } + } + + private boolean checkSchema(String schemaName, Schema schema) { + if (schemaName == null) { + return true; + } else if (schemaName.isEmpty()) { + return schema == session.getDatabase().getMainSchema(); + } else { + return session.getDatabase().equalsIdentifiers(schemaName, schema.getName()); + } + } + + private CompareLike getLike(String pattern) { + if (pattern == null) { + return null; + } + CompareLike like = new CompareLike(session.getDatabase().getCompareMode(), "\\", null, false, false, null, // + null, CompareLike.LikeType.LIKE); + like.initPattern(pattern, '\\'); + return like; + } + +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocalBase.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocalBase.java new file mode 100644 index 0000000000..70a96e669e --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaLocalBase.java @@ -0,0 +1,173 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import org.h2.engine.Constants; +import org.h2.result.ResultInterface; +import org.h2.result.SimpleResult; +import org.h2.value.TypeInfo; + +/** + * Base implementation of database meta information. + */ +abstract class DatabaseMetaLocalBase extends DatabaseMeta { + + @Override + public final String getDatabaseProductVersion() { + return Constants.FULL_VERSION; + } + + @Override + public final ResultInterface getVersionColumns(String catalog, String schema, String table) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("SCOPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_SIZE", TypeInfo.TYPE_INTEGER); + result.addColumn("BUFFER_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("DECIMAL_DIGITS", TypeInfo.TYPE_SMALLINT); + result.addColumn("PSEUDO_COLUMN", TypeInfo.TYPE_SMALLINT); + return result; + } + + @Override + public final ResultInterface getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TYPE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("CLASS_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("BASE_TYPE", TypeInfo.TYPE_SMALLINT); + return result; + } + + @Override + public final ResultInterface getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TYPE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("SUPERTYPE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("SUPERTYPE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("SUPERTYPE_NAME", TypeInfo.TYPE_VARCHAR); + return result; + } + + @Override + public final ResultInterface getSuperTables(String catalog, String schemaPattern, String tableNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("SUPERTABLE_NAME", TypeInfo.TYPE_VARCHAR); + return result; + } + + @Override + public final ResultInterface getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TYPE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("ATTR_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("ATTR_TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("ATTR_SIZE", TypeInfo.TYPE_INTEGER); + result.addColumn("DECIMAL_DIGITS", TypeInfo.TYPE_INTEGER); + result.addColumn("NUM_PREC_RADIX", TypeInfo.TYPE_INTEGER); + result.addColumn("NULLABLE", TypeInfo.TYPE_INTEGER); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("ATTR_DEF", TypeInfo.TYPE_VARCHAR); + result.addColumn("SQL_DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("SQL_DATETIME_SUB", TypeInfo.TYPE_INTEGER); + result.addColumn("CHAR_OCTET_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER); + result.addColumn("IS_NULLABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_CATALOG", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_SCHEMA", TypeInfo.TYPE_VARCHAR); + result.addColumn("SCOPE_TABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SOURCE_DATA_TYPE", TypeInfo.TYPE_SMALLINT); + return result; + } + + @Override + public final int getDatabaseMajorVersion() { + return Constants.VERSION_MAJOR; + } + + @Override + public final int getDatabaseMinorVersion() { + return Constants.VERSION_MINOR; + } + + @Override + public final ResultInterface getFunctions(String catalog, String schemaPattern, String functionNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("FUNCTION_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("FUNCTION_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("FUNCTION_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("FUNCTION_TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("SPECIFIC_NAME", TypeInfo.TYPE_VARCHAR); + return result; + } + + @Override + public final ResultInterface getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, + String columnNamePattern) { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("FUNCTION_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("FUNCTION_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("FUNCTION_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_TYPE", TypeInfo.TYPE_SMALLINT); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("TYPE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("PRECISION", TypeInfo.TYPE_INTEGER); + result.addColumn("LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("SCALE", TypeInfo.TYPE_SMALLINT); + result.addColumn("RADIX", TypeInfo.TYPE_SMALLINT); + result.addColumn("NULLABLE", TypeInfo.TYPE_SMALLINT); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("CHAR_OCTET_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("ORDINAL_POSITION", TypeInfo.TYPE_INTEGER); + result.addColumn("IS_NULLABLE", TypeInfo.TYPE_VARCHAR); + result.addColumn("SPECIFIC_NAME", TypeInfo.TYPE_VARCHAR); + return result; + } + + final SimpleResult getPseudoColumnsResult() { + checkClosed(); + SimpleResult result = new SimpleResult(); + result.addColumn("TABLE_CAT", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_SCHEM", TypeInfo.TYPE_VARCHAR); + result.addColumn("TABLE_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("COLUMN_NAME", TypeInfo.TYPE_VARCHAR); + result.addColumn("DATA_TYPE", TypeInfo.TYPE_INTEGER); + result.addColumn("COLUMN_SIZE", TypeInfo.TYPE_INTEGER); + result.addColumn("DECIMAL_DIGITS", TypeInfo.TYPE_INTEGER); + result.addColumn("NUM_PREC_RADIX", TypeInfo.TYPE_INTEGER); + result.addColumn("COLUMN_USAGE", TypeInfo.TYPE_VARCHAR); + result.addColumn("REMARKS", TypeInfo.TYPE_VARCHAR); + result.addColumn("CHAR_OCTET_LENGTH", TypeInfo.TYPE_INTEGER); + result.addColumn("IS_NULLABLE", TypeInfo.TYPE_VARCHAR); + return result; + } + + abstract void checkClosed(); + +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMetaRemote.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaRemote.java new file mode 100644 index 0000000000..8c099838ae --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaRemote.java @@ -0,0 +1,383 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import java.io.IOException; +import java.util.ArrayList; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionRemote; +import org.h2.message.DbException; +import org.h2.mode.DefaultNullOrdering; +import org.h2.result.ResultInterface; +import org.h2.result.ResultRemote; +import org.h2.value.Transfer; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Remote implementation of database meta information. + */ +public class DatabaseMetaRemote extends DatabaseMeta { + + static final int DEFAULT_NULL_ORDERING = 0; + + static final int GET_DATABASE_PRODUCT_VERSION = 1; + + static final int GET_SQL_KEYWORDS = 2; + + static final int GET_NUMERIC_FUNCTIONS = 3; + + static final int GET_STRING_FUNCTIONS = 4; + + static final int GET_SYSTEM_FUNCTIONS = 5; + + static final int GET_TIME_DATE_FUNCTIONS = 6; + + static final int GET_SEARCH_STRING_ESCAPE = 7; + + static final int GET_PROCEDURES_3 = 8; + + static final int GET_PROCEDURE_COLUMNS_4 = 9; + + static final int GET_TABLES_4 = 10; + + static final int GET_SCHEMAS = 11; + + static final int GET_CATALOGS = 12; + + static final int GET_TABLE_TYPES = 13; + + static final int GET_COLUMNS_4 = 14; + + static final int GET_COLUMN_PRIVILEGES_4 = 15; + + static final int GET_TABLE_PRIVILEGES_3 = 16; + + static final int GET_BEST_ROW_IDENTIFIER_5 = 17; + + static final int GET_VERSION_COLUMNS_3 = 18; + + static final int GET_PRIMARY_KEYS_3 = 19; + + static final int GET_IMPORTED_KEYS_3 = 20; + + static final int GET_EXPORTED_KEYS_3 = 21; + + static final int GET_CROSS_REFERENCE_6 = 22; + + static final int GET_TYPE_INFO = 23; + + static final int GET_INDEX_INFO_5 = 24; + + static final int GET_UDTS_4 = 25; + + static final int GET_SUPER_TYPES_3 = 26; + + static final int GET_SUPER_TABLES_3 = 27; + + static final int GET_ATTRIBUTES_4 = 28; + + static final int GET_DATABASE_MAJOR_VERSION = 29; + + static final int GET_DATABASE_MINOR_VERSION = 30; + + static final int GET_SCHEMAS_2 = 31; + + static final int GET_FUNCTIONS_3 = 32; + + static final int GET_FUNCTION_COLUMNS_4 = 33; + + static final int GET_PSEUDO_COLUMNS_4 = 34; + + private final SessionRemote session; + + private final ArrayList transferList; + + public DatabaseMetaRemote(SessionRemote session, ArrayList transferList) { + this.session = session; + this.transferList = transferList; + } + + @Override + public DefaultNullOrdering defaultNullOrdering() { + ResultInterface result = executeQuery(DEFAULT_NULL_ORDERING); + result.next(); + return DefaultNullOrdering.valueOf(result.currentRow()[0].getInt()); + } + + @Override + public String getDatabaseProductVersion() { + ResultInterface result = executeQuery(GET_DATABASE_PRODUCT_VERSION); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getSQLKeywords() { + ResultInterface result = executeQuery(GET_SQL_KEYWORDS); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getNumericFunctions() { + ResultInterface result = executeQuery(GET_NUMERIC_FUNCTIONS); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getStringFunctions() { + ResultInterface result = executeQuery(GET_STRING_FUNCTIONS); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getSystemFunctions() { + ResultInterface result = executeQuery(GET_SYSTEM_FUNCTIONS); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getTimeDateFunctions() { + ResultInterface result = executeQuery(GET_TIME_DATE_FUNCTIONS); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public String getSearchStringEscape() { + ResultInterface result = executeQuery(GET_SEARCH_STRING_ESCAPE); + result.next(); + return result.currentRow()[0].getString(); + } + + @Override + public ResultInterface getProcedures(String catalog, String schemaPattern, String procedureNamePattern) { + return executeQuery(GET_PROCEDURES_3, getString(catalog), getString(schemaPattern), + getString(procedureNamePattern)); + } + + @Override + public ResultInterface getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, + String columnNamePattern) { + return executeQuery(GET_PROCEDURE_COLUMNS_4, getString(catalog), getString(schemaPattern), + getString(procedureNamePattern), getString(columnNamePattern)); + } + + @Override + public ResultInterface getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) { + return executeQuery(GET_TABLES_4, getString(catalog), getString(schemaPattern), getString(tableNamePattern), + getStringArray(types)); + } + + @Override + public ResultInterface getSchemas() { + return executeQuery(GET_SCHEMAS); + } + + @Override + public ResultInterface getCatalogs() { + return executeQuery(GET_CATALOGS); + } + + @Override + public ResultInterface getTableTypes() { + return executeQuery(GET_TABLE_TYPES); + } + + @Override + public ResultInterface getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + return executeQuery(GET_COLUMNS_4, getString(catalog), getString(schemaPattern), getString(tableNamePattern), + getString(columnNamePattern)); + } + + @Override + public ResultInterface getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) { + return executeQuery(GET_COLUMN_PRIVILEGES_4, getString(catalog), getString(schema), getString(table), + getString(columnNamePattern)); + } + + @Override + public ResultInterface getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) { + return executeQuery(GET_TABLE_PRIVILEGES_3, getString(catalog), getString(schemaPattern), // + getString(tableNamePattern)); + } + + @Override + public ResultInterface getBestRowIdentifier(String catalog, String schema, String table, int scope, + boolean nullable) { + return executeQuery(GET_BEST_ROW_IDENTIFIER_5, getString(catalog), getString(schema), getString(table), + ValueInteger.get(scope), ValueBoolean.get(nullable)); + } + + @Override + public ResultInterface getVersionColumns(String catalog, String schema, String table) { + return executeQuery(GET_VERSION_COLUMNS_3, getString(catalog), getString(schema), getString(table)); + } + + @Override + public ResultInterface getPrimaryKeys(String catalog, String schema, String table) { + return executeQuery(GET_PRIMARY_KEYS_3, getString(catalog), getString(schema), getString(table)); + } + + @Override + public ResultInterface getImportedKeys(String catalog, String schema, String table) { + return executeQuery(GET_IMPORTED_KEYS_3, getString(catalog), getString(schema), getString(table)); + } + + @Override + public ResultInterface getExportedKeys(String catalog, String schema, String table) { + return executeQuery(GET_EXPORTED_KEYS_3, getString(catalog), getString(schema), getString(table)); + } + + @Override + public ResultInterface getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) { + return executeQuery(GET_CROSS_REFERENCE_6, getString(primaryCatalog), getString(primarySchema), + getString(primaryTable), getString(foreignCatalog), getString(foreignSchema), getString(foreignTable)); + } + + @Override + public ResultInterface getTypeInfo() { + return executeQuery(GET_TYPE_INFO); + } + + @Override + public ResultInterface getIndexInfo(String catalog, String schema, String table, boolean unique, + boolean approximate) { + return executeQuery(GET_INDEX_INFO_5, getString(catalog), getString(schema), // + getString(table), ValueBoolean.get(unique), ValueBoolean.get(approximate)); + } + + @Override + public ResultInterface getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) { + return executeQuery(GET_UDTS_4, getString(catalog), getString(schemaPattern), getString(typeNamePattern), + getIntArray(types)); + } + + @Override + public ResultInterface getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) { + return executeQuery(GET_SUPER_TYPES_3, getString(catalog), getString(schemaPattern), + getString(typeNamePattern)); + } + + @Override + public ResultInterface getSuperTables(String catalog, String schemaPattern, String tableNamePattern) { + return executeQuery(GET_SUPER_TABLES_3, getString(catalog), getString(schemaPattern), + getString(tableNamePattern)); + } + + @Override + public ResultInterface getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern) { + return executeQuery(GET_ATTRIBUTES_4, getString(catalog), getString(schemaPattern), getString(typeNamePattern), + getString(attributeNamePattern)); + } + + @Override + public int getDatabaseMajorVersion() { + ResultInterface result = executeQuery(GET_DATABASE_MAJOR_VERSION); + result.next(); + return result.currentRow()[0].getInt(); + } + + @Override + public int getDatabaseMinorVersion() { + ResultInterface result = executeQuery(GET_DATABASE_MINOR_VERSION); + result.next(); + return result.currentRow()[0].getInt(); + } + + @Override + public ResultInterface getSchemas(String catalog, String schemaPattern) { + return executeQuery(GET_SCHEMAS_2, getString(catalog), getString(schemaPattern)); + } + + @Override + public ResultInterface getFunctions(String catalog, String schemaPattern, String functionNamePattern) { + return executeQuery(GET_FUNCTIONS_3, getString(catalog), getString(schemaPattern), + getString(functionNamePattern)); + } + + @Override + public ResultInterface getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, + String columnNamePattern) { + return executeQuery(GET_FUNCTION_COLUMNS_4, getString(catalog), getString(schemaPattern), + getString(functionNamePattern), getString(columnNamePattern)); + } + + @Override + public ResultInterface getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) { + return executeQuery(GET_PSEUDO_COLUMNS_4, getString(catalog), getString(schemaPattern), + getString(tableNamePattern), getString(columnNamePattern)); + } + + private ResultInterface executeQuery(int code, Value... args) { + if (session.isClosed()) { + throw DbException.get(ErrorCode.DATABASE_CALLED_AT_SHUTDOWN); + } + synchronized (session) { + int objectId = session.getNextId(); + for (int i = 0, count = 0; i < transferList.size(); i++) { + Transfer transfer = transferList.get(i); + try { + session.traceOperation("GET_META", objectId); + int len = args.length; + transfer.writeInt(SessionRemote.GET_JDBC_META).writeInt(code).writeInt(len); + for (int j = 0; j < len; j++) { + transfer.writeValue(args[j]); + } + session.done(transfer); + int columnCount = transfer.readInt(); + return new ResultRemote(session, transfer, objectId, columnCount, Integer.MAX_VALUE); + } catch (IOException e) { + session.removeServer(e, i--, ++count); + } + } + return null; + } + } + + private Value getIntArray(int[] array) { + if (array == null) { + return ValueNull.INSTANCE; + } + int cardinality = array.length; + Value[] values = new Value[cardinality]; + for (int i = 0; i < cardinality; i++) { + values[i] = ValueInteger.get(array[i]); + } + return ValueArray.get(TypeInfo.TYPE_INTEGER, values, session); + } + + private Value getStringArray(String[] array) { + if (array == null) { + return ValueNull.INSTANCE; + } + int cardinality = array.length; + Value[] values = new Value[cardinality]; + for (int i = 0; i < cardinality; i++) { + values[i] = getString(array[i]); + } + return ValueArray.get(TypeInfo.TYPE_VARCHAR, values, session); + } + + private Value getString(String string) { + return string != null ? ValueVarchar.get(string, session) : ValueNull.INSTANCE; + } + +} diff --git a/h2/src/main/org/h2/jdbc/meta/DatabaseMetaServer.java b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaServer.java new file mode 100644 index 0000000000..9559233526 --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/DatabaseMetaServer.java @@ -0,0 +1,198 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbc.meta; + +import static org.h2.jdbc.meta.DatabaseMetaRemote.DEFAULT_NULL_ORDERING; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_ATTRIBUTES_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_BEST_ROW_IDENTIFIER_5; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_CATALOGS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_COLUMNS_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_COLUMN_PRIVILEGES_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_CROSS_REFERENCE_6; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_DATABASE_MAJOR_VERSION; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_DATABASE_MINOR_VERSION; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_DATABASE_PRODUCT_VERSION; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_EXPORTED_KEYS_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_FUNCTIONS_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_FUNCTION_COLUMNS_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_IMPORTED_KEYS_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_INDEX_INFO_5; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_NUMERIC_FUNCTIONS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_PRIMARY_KEYS_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_PROCEDURES_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_PROCEDURE_COLUMNS_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_PSEUDO_COLUMNS_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SCHEMAS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SCHEMAS_2; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SEARCH_STRING_ESCAPE; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SQL_KEYWORDS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_STRING_FUNCTIONS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SUPER_TABLES_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SUPER_TYPES_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_SYSTEM_FUNCTIONS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_TABLES_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_TABLE_PRIVILEGES_3; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_TABLE_TYPES; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_TIME_DATE_FUNCTIONS; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_TYPE_INFO; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_UDTS_4; +import static org.h2.jdbc.meta.DatabaseMetaRemote.GET_VERSION_COLUMNS_3; + +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.result.ResultInterface; +import org.h2.result.SimpleResult; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Server side support of database meta information. + */ +public final class DatabaseMetaServer { + + /** + * Process a database meta data request. + * + * @param session the session + * @param code the operation code + * @param args the arguments + * @return the result + */ + public static ResultInterface process(SessionLocal session, int code, Value[] args) { + DatabaseMeta meta = session.getDatabaseMeta(); + switch (code) { + case DEFAULT_NULL_ORDERING: + return result(meta.defaultNullOrdering().ordinal()); + case GET_DATABASE_PRODUCT_VERSION: + return result(session, meta.getDatabaseProductVersion()); + case GET_SQL_KEYWORDS: + return result(session, meta.getSQLKeywords()); + case GET_NUMERIC_FUNCTIONS: + return result(session, meta.getNumericFunctions()); + case GET_STRING_FUNCTIONS: + return result(session, meta.getStringFunctions()); + case GET_SYSTEM_FUNCTIONS: + return result(session, meta.getSystemFunctions()); + case GET_TIME_DATE_FUNCTIONS: + return result(session, meta.getTimeDateFunctions()); + case GET_SEARCH_STRING_ESCAPE: + return result(session, meta.getSearchStringEscape()); + case GET_PROCEDURES_3: + return meta.getProcedures(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_PROCEDURE_COLUMNS_4: + return meta.getProcedureColumns(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString()); + case GET_TABLES_4: + return meta.getTables(args[0].getString(), args[1].getString(), args[2].getString(), + toStringArray(args[3])); + case GET_SCHEMAS: + return meta.getSchemas(); + case GET_CATALOGS: + return meta.getCatalogs(); + case GET_TABLE_TYPES: + return meta.getTableTypes(); + case GET_COLUMNS_4: + return meta.getColumns(args[0].getString(), args[1].getString(), args[2].getString(), args[3].getString()); + case GET_COLUMN_PRIVILEGES_4: + return meta.getColumnPrivileges(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString()); + case GET_TABLE_PRIVILEGES_3: + return meta.getTablePrivileges(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_BEST_ROW_IDENTIFIER_5: + return meta.getBestRowIdentifier(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getInt(), args[4].getBoolean()); + case GET_VERSION_COLUMNS_3: + return meta.getVersionColumns(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_PRIMARY_KEYS_3: + return meta.getPrimaryKeys(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_IMPORTED_KEYS_3: + return meta.getImportedKeys(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_EXPORTED_KEYS_3: + return meta.getExportedKeys(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_CROSS_REFERENCE_6: + return meta.getCrossReference(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString(), args[4].getString(), args[5].getString()); + case GET_TYPE_INFO: + return meta.getTypeInfo(); + case GET_INDEX_INFO_5: + return meta.getIndexInfo(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getBoolean(), args[4].getBoolean()); + case GET_UDTS_4: + return meta.getUDTs(args[0].getString(), args[1].getString(), args[2].getString(), toIntArray(args[3])); + case GET_SUPER_TYPES_3: + return meta.getSuperTypes(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_SUPER_TABLES_3: + return meta.getSuperTables(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_ATTRIBUTES_4: + return meta.getAttributes(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString()); + case GET_DATABASE_MAJOR_VERSION: + return result(meta.getDatabaseMajorVersion()); + case GET_DATABASE_MINOR_VERSION: + return result(meta.getDatabaseMinorVersion()); + case GET_SCHEMAS_2: + return meta.getSchemas(args[0].getString(), args[1].getString()); + case GET_FUNCTIONS_3: + return meta.getFunctions(args[0].getString(), args[1].getString(), args[2].getString()); + case GET_FUNCTION_COLUMNS_4: + return meta.getFunctionColumns(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString()); + case GET_PSEUDO_COLUMNS_4: + return meta.getPseudoColumns(args[0].getString(), args[1].getString(), args[2].getString(), + args[3].getString()); + default: + throw DbException.getUnsupportedException("META " + code); + } + } + + private static String[] toStringArray(Value value) { + if (value == ValueNull.INSTANCE) { + return null; + } + Value[] list = ((ValueArray) value).getList(); + int l = list.length; + String[] result = new String[l]; + for (int i = 0; i < l; i++) { + result[i] = list[i].getString(); + } + return result; + } + + private static int[] toIntArray(Value value) { + if (value == ValueNull.INSTANCE) { + return null; + } + Value[] list = ((ValueArray) value).getList(); + int l = list.length; + int[] result = new int[l]; + for (int i = 0; i < l; i++) { + result[i] = list[i].getInt(); + } + return result; + } + + private static ResultInterface result(int value) { + return result(ValueInteger.get(value)); + } + + private static ResultInterface result(SessionLocal session, String value) { + return result(ValueVarchar.get(value, session)); + } + + private static ResultInterface result(Value v) { + SimpleResult result = new SimpleResult(); + result.addColumn("RESULT", v.getType()); + result.addRow(v); + return result; + } + + private DatabaseMetaServer() { + } + +} diff --git a/h2/src/main/org/h2/jdbc/meta/package.html b/h2/src/main/org/h2/jdbc/meta/package.html new file mode 100644 index 0000000000..68e717102e --- /dev/null +++ b/h2/src/main/org/h2/jdbc/meta/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Implementation of the JDBC database metadata API (package java.sql). + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/jdbc/package.html b/h2/src/main/org/h2/jdbc/package.html index 6b0edcaf71..ffc7f90f3d 100644 --- a/h2/src/main/org/h2/jdbc/package.html +++ b/h2/src/main/org/h2/jdbc/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/jdbcx/JdbcConnectionPool.java b/h2/src/main/org/h2/jdbcx/JdbcConnectionPool.java index 864605f130..0ff22cd22f 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcConnectionPool.java +++ b/h2/src/main/org/h2/jdbcx/JdbcConnectionPool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Christian d'Heureuse, www.source-code.biz * * This class is multi-licensed under LGPL, MPL 2.0, and EPL 1.0. @@ -9,7 +9,7 @@ * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation, either * version 3 of the License, or (at your option) any later version. - * See http://www.gnu.org/licenses/lgpl.html + * See https://www.gnu.org/licenses/lgpl-3.0.html * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied @@ -22,18 +22,19 @@ import java.io.PrintWriter; import java.sql.Connection; import java.sql.SQLException; -import java.util.ArrayList; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Logger; + import javax.sql.ConnectionEvent; import javax.sql.ConnectionEventListener; import javax.sql.ConnectionPoolDataSource; import javax.sql.DataSource; import javax.sql.PooledConnection; -import org.h2.util.New; -import org.h2.message.DbException; -/*## Java 1.7 ## -import java.util.logging.Logger; -//*/ +import org.h2.message.DbException; /** * A simple standalone JDBC connection pool. @@ -62,18 +63,19 @@ * (www.source-code.biz) * @author Thomas Mueller */ -public class JdbcConnectionPool implements DataSource, ConnectionEventListener { +public final class JdbcConnectionPool + implements DataSource, ConnectionEventListener, JdbcConnectionPoolBackwardsCompat { private static final int DEFAULT_TIMEOUT = 30; private static final int DEFAULT_MAX_CONNECTIONS = 10; private final ConnectionPoolDataSource dataSource; - private final ArrayList recycledConnections = New.arrayList(); + private final Queue recycledConnections = new ConcurrentLinkedQueue<>(); private PrintWriter logWriter; - private int maxConnections = DEFAULT_MAX_CONNECTIONS; - private int timeout = DEFAULT_TIMEOUT; - private int activeConnections; - private boolean isDisposed; + private volatile int maxConnections = DEFAULT_MAX_CONNECTIONS; + private volatile int timeout = DEFAULT_TIMEOUT; + private AtomicInteger activeConnections = new AtomicInteger(); + private AtomicBoolean isDisposed = new AtomicBoolean(); protected JdbcConnectionPool(ConnectionPoolDataSource dataSource) { this.dataSource = dataSource; @@ -119,13 +121,11 @@ public static JdbcConnectionPool create(String url, String user, * * @param max the maximum number of connections */ - public synchronized void setMaxConnections(int max) { + public void setMaxConnections(int max) { if (max < 1) { throw new IllegalArgumentException("Invalid maxConnections value: " + max); } this.maxConnections = max; - // notify waiting threads if the value was increased - notifyAll(); } /** @@ -133,7 +133,7 @@ public synchronized void setMaxConnections(int max) { * * @return the max the maximum number of connections */ - public synchronized int getMaxConnections() { + public int getMaxConnections() { return maxConnections; } @@ -143,7 +143,7 @@ public synchronized int getMaxConnections() { * @return the timeout in seconds */ @Override - public synchronized int getLoginTimeout() { + public int getLoginTimeout() { return timeout; } @@ -155,7 +155,7 @@ public synchronized int getLoginTimeout() { * @param seconds the timeout, 0 meaning the default */ @Override - public synchronized void setLoginTimeout(int seconds) { + public void setLoginTimeout(int seconds) { if (seconds == 0) { seconds = DEFAULT_TIMEOUT; } @@ -166,14 +166,12 @@ public synchronized void setLoginTimeout(int seconds) { * Closes all unused pooled connections. * Exceptions while closing are written to the log stream (if set). */ - public synchronized void dispose() { - if (isDisposed) { - return; - } - isDisposed = true; - ArrayList list = recycledConnections; - for (int i = 0, size = list.size(); i < size; i++) { - closeConnection(list.get(i)); + public void dispose() { + isDisposed.set(true); + + PooledConnection pc; + while ((pc = recycledConnections.poll()) != null) { + closeConnection(pc); } } @@ -192,19 +190,29 @@ public synchronized void dispose() { */ @Override public Connection getConnection() throws SQLException { - long max = System.currentTimeMillis() + timeout * 1000; + long max = System.nanoTime() + timeout * 1_000_000_000L; + int spin = 0; do { - synchronized (this) { - if (activeConnections < maxConnections) { - return getConnectionNow(); - } + if (activeConnections.incrementAndGet() <= maxConnections) { try { - wait(1000); - } catch (InterruptedException e) { - // ignore + return getConnectionNow(); + } catch (Throwable t) { + activeConnections.decrementAndGet(); + throw t; } + } else { + activeConnections.decrementAndGet(); + } + if (--spin >= 0) { + continue; + } + try { + spin = 3; + Thread.sleep(1); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); } - } while (System.currentTimeMillis() <= max); + } while (System.nanoTime() - max <= 0); throw new SQLException("Login timeout", "08001", 8001); } @@ -217,17 +225,14 @@ public Connection getConnection(String user, String password) { } private Connection getConnectionNow() throws SQLException { - if (isDisposed) { + if (isDisposed.get()) { throw new IllegalStateException("Connection pool has been disposed."); } - PooledConnection pc; - if (!recycledConnections.isEmpty()) { - pc = recycledConnections.remove(recycledConnections.size() - 1); - } else { + PooledConnection pc = recycledConnections.poll(); + if (pc == null) { pc = dataSource.getPooledConnection(); } Connection conn = pc.getConnection(); - activeConnections++; pc.addConnectionEventListener(this); return conn; } @@ -239,19 +244,20 @@ private Connection getConnectionNow() throws SQLException { * * @param pc the pooled connection */ - synchronized void recycleConnection(PooledConnection pc) { - if (activeConnections <= 0) { + private void recycleConnection(PooledConnection pc) { + int active = activeConnections.decrementAndGet(); + if (active < 0) { + activeConnections.incrementAndGet(); throw new AssertionError(); } - activeConnections--; - if (!isDisposed && activeConnections < maxConnections) { + if (!isDisposed.get() && active < maxConnections) { recycledConnections.add(pc); + if (isDisposed.get()) { + dispose(); + } } else { closeConnection(pc); } - if (activeConnections >= maxConnections - 1) { - notifyAll(); - } } private void closeConnection(PooledConnection pc) { @@ -290,8 +296,8 @@ public void connectionErrorOccurred(ConnectionEvent event) { * * @return the number of active connections. */ - public synchronized int getActiveConnections() { - return activeConnections; + public int getActiveConnections() { + return activeConnections.get(); } /** @@ -311,34 +317,42 @@ public void setLogWriter(PrintWriter logWriter) { } /** - * [Not supported] Return an object of this class if possible. + * Return an object of this class if possible. * * @param iface the class + * @return this */ @Override + @SuppressWarnings("unchecked") public T unwrap(Class iface) throws SQLException { - throw DbException.getUnsupportedException("unwrap"); + try { + if (isWrapperFor(iface)) { + return (T) this; + } + throw DbException.getInvalidValueException("iface", iface); + } catch (Exception e) { + throw DbException.toSQLException(e); + } } /** - * [Not supported] Checks if unwrap can return an object of this class. + * Checks if unwrap can return an object of this class. * * @param iface the class + * @return whether or not the interface is assignable from this class */ @Override public boolean isWrapperFor(Class iface) throws SQLException { - throw DbException.getUnsupportedException("isWrapperFor"); + return iface != null && iface.isAssignableFrom(getClass()); } /** * [Not supported] */ -/*## Java 1.7 ## @Override public Logger getParentLogger() { return null; } -//*/ } diff --git a/h2/src/main/org/h2/jdbcx/JdbcConnectionPoolBackwardsCompat.java b/h2/src/main/org/h2/jdbcx/JdbcConnectionPoolBackwardsCompat.java new file mode 100644 index 0000000000..b901d49301 --- /dev/null +++ b/h2/src/main/org/h2/jdbcx/JdbcConnectionPoolBackwardsCompat.java @@ -0,0 +1,16 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbcx; + +/** + * Allows us to compile on older platforms, while still implementing the methods + * from the newer JDBC API. + */ +public interface JdbcConnectionPoolBackwardsCompat { + + // compatibility interface + +} diff --git a/h2/src/main/org/h2/jdbcx/JdbcDataSource.java b/h2/src/main/org/h2/jdbcx/JdbcDataSource.java index 54110e8694..4c0ab0cfad 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcDataSource.java +++ b/h2/src/main/org/h2/jdbcx/JdbcDataSource.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbcx; @@ -11,7 +11,7 @@ import java.io.Serializable; import java.sql.Connection; import java.sql.SQLException; -import java.util.Properties; +import java.util.logging.Logger; import javax.naming.Reference; import javax.naming.Referenceable; import javax.naming.StringRefAddr; @@ -20,15 +20,11 @@ import javax.sql.PooledConnection; import javax.sql.XAConnection; import javax.sql.XADataSource; -import org.h2.Driver; import org.h2.jdbc.JdbcConnection; +import org.h2.message.DbException; import org.h2.message.TraceObject; import org.h2.util.StringUtils; -/*## Java 1.7 ## -import java.util.logging.Logger; -//*/ - /** * A data source for H2 database connections. It is a factory for XAConnection * and Connection objects. This class is usually registered in a JNDI naming @@ -62,8 +58,8 @@ * In this example the user name and password are serialized as * well; this may be a security problem in some cases. */ -public class JdbcDataSource extends TraceObject implements XADataSource, - DataSource, ConnectionPoolDataSource, Serializable, Referenceable { +public final class JdbcDataSource extends TraceObject implements XADataSource, DataSource, ConnectionPoolDataSource, + Serializable, Referenceable, JdbcDataSourceBackwardsCompat { private static final long serialVersionUID = 1288136338451857771L; @@ -75,10 +71,6 @@ public class JdbcDataSource extends TraceObject implements XADataSource, private String url = ""; private String description; - static { - org.h2.Driver.load(); - } - /** * The public constructor. */ @@ -92,6 +84,8 @@ public JdbcDataSource() { * Called when de-serializing the object. * * @param in the input stream + * @throws IOException on failure + * @throws ClassNotFoundException on failure */ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { @@ -158,8 +152,7 @@ public void setLogWriter(PrintWriter out) { @Override public Connection getConnection() throws SQLException { debugCodeCall("getConnection"); - return getJdbcConnection(userName, - StringUtils.cloneCharArray(passwordChars)); + return new JdbcConnection(url, null, userName, StringUtils.cloneCharArray(passwordChars), false); } /** @@ -174,29 +167,9 @@ public Connection getConnection() throws SQLException { public Connection getConnection(String user, String password) throws SQLException { if (isDebugEnabled()) { - debugCode("getConnection("+quote(user)+", \"\");"); - } - return getJdbcConnection(user, convertToCharArray(password)); - } - - private JdbcConnection getJdbcConnection(String user, char[] password) - throws SQLException { - if (isDebugEnabled()) { - debugCode("getJdbcConnection("+quote(user)+", new char[0]);"); - } - Properties info = new Properties(); - info.setProperty("user", user); - info.put("password", password); - Connection conn = Driver.load().connect(url, info); - if (conn == null) { - throw new SQLException("No suitable driver found for " + url, - "08001", 8001); - } else if (!(conn instanceof JdbcConnection)) { - throw new SQLException( - "Connecting with old version is not supported: " + url, - "08001", 8001); + debugCode("getConnection(" + quote(user) + ", \"\")"); } - return (JdbcConnection) conn; + return new JdbcConnection(url, null, user, password, false); } /** @@ -250,7 +223,7 @@ public void setUrl(String url) { */ public void setPassword(String password) { debugCodeCall("setPassword", ""); - this.passwordChars = convertToCharArray(password); + this.passwordChars = password == null ? null : password.toCharArray(); } /** @@ -260,15 +233,11 @@ public void setPassword(String password) { */ public void setPasswordChars(char[] password) { if (isDebugEnabled()) { - debugCode("setPasswordChars(new char[0]);"); + debugCode("setPasswordChars(new char[0])"); } this.passwordChars = password; } - private static char[] convertToCharArray(String s) { - return s == null ? null : s.toCharArray(); - } - private static String convertToString(char[] a) { return a == null ? null : new String(a); } @@ -336,7 +305,7 @@ public Reference getReference() { ref.add(new StringRefAddr("url", url)); ref.add(new StringRefAddr("user", userName)); ref.add(new StringRefAddr("password", convertToString(passwordChars))); - ref.add(new StringRefAddr("loginTimeout", String.valueOf(loginTimeout))); + ref.add(new StringRefAddr("loginTimeout", Integer.toString(loginTimeout))); ref.add(new StringRefAddr("description", description)); return ref; } @@ -349,9 +318,8 @@ public Reference getReference() { @Override public XAConnection getXAConnection() throws SQLException { debugCodeCall("getXAConnection"); - int id = getNextId(XA_DATA_SOURCE); - return new JdbcXAConnection(factory, id, getJdbcConnection(userName, - StringUtils.cloneCharArray(passwordChars))); + return new JdbcXAConnection(factory, getNextId(XA_DATA_SOURCE), + new JdbcConnection(url, null, userName, StringUtils.cloneCharArray(passwordChars), false)); } /** @@ -366,11 +334,10 @@ public XAConnection getXAConnection() throws SQLException { public XAConnection getXAConnection(String user, String password) throws SQLException { if (isDebugEnabled()) { - debugCode("getXAConnection("+quote(user)+", \"\");"); + debugCode("getXAConnection(" + quote(user) + ", \"\")"); } - int id = getNextId(XA_DATA_SOURCE); - return new JdbcXAConnection(factory, id, getJdbcConnection(user, - convertToCharArray(password))); + return new JdbcXAConnection(factory, getNextId(XA_DATA_SOURCE), + new JdbcConnection(url, null, user, password, false)); } /** @@ -397,40 +364,48 @@ public PooledConnection getPooledConnection() throws SQLException { public PooledConnection getPooledConnection(String user, String password) throws SQLException { if (isDebugEnabled()) { - debugCode("getPooledConnection("+quote(user)+", \"\");"); + debugCode("getPooledConnection(" + quote(user) + ", \"\")"); } return getXAConnection(user, password); } /** - * [Not supported] Return an object of this class if possible. + * Return an object of this class if possible. * * @param iface the class + * @return this */ @Override + @SuppressWarnings("unchecked") public T unwrap(Class iface) throws SQLException { - throw unsupported("unwrap"); + try { + if (isWrapperFor(iface)) { + return (T) this; + } + throw DbException.getInvalidValueException("iface", iface); + } catch (Exception e) { + throw logAndConvert(e); + } } /** - * [Not supported] Checks if unwrap can return an object of this class. + * Checks if unwrap can return an object of this class. * * @param iface the class + * @return whether or not the interface is assignable from this class */ @Override public boolean isWrapperFor(Class iface) throws SQLException { - throw unsupported("isWrapperFor"); + return iface != null && iface.isAssignableFrom(getClass()); } /** * [Not supported] */ -/*## Java 1.7 ## @Override public Logger getParentLogger() { return null; } -//*/ /** * INTERNAL diff --git a/h2/src/main/org/h2/jdbcx/JdbcDataSourceBackwardsCompat.java b/h2/src/main/org/h2/jdbcx/JdbcDataSourceBackwardsCompat.java new file mode 100644 index 0000000000..cf00ae6b82 --- /dev/null +++ b/h2/src/main/org/h2/jdbcx/JdbcDataSourceBackwardsCompat.java @@ -0,0 +1,16 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.jdbcx; + +/** + * Allows us to compile on older platforms, while still implementing the methods + * from the newer JDBC API. + */ +public interface JdbcDataSourceBackwardsCompat { + + // compatibility interface + +} diff --git a/h2/src/main/org/h2/jdbcx/JdbcDataSourceFactory.java b/h2/src/main/org/h2/jdbcx/JdbcDataSourceFactory.java index 0536858f76..07673fff43 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcDataSourceFactory.java +++ b/h2/src/main/org/h2/jdbcx/JdbcDataSourceFactory.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbcx; @@ -21,20 +21,23 @@ * This class is used to create new DataSource objects. * An application should not use this class directly. */ -public class JdbcDataSourceFactory implements ObjectFactory { +public final class JdbcDataSourceFactory implements ObjectFactory { + + private static final TraceSystem traceSystem; - private static TraceSystem cachedTraceSystem; private final Trace trace; static { - org.h2.Driver.load(); + traceSystem = new TraceSystem(SysProperties.CLIENT_TRACE_DIRECTORY + "h2datasource" + + Constants.SUFFIX_TRACE_FILE); + traceSystem.setLevelFile(SysProperties.DATASOURCE_TRACE_LEVEL); } /** * The public constructor to create new factory objects. */ public JdbcDataSourceFactory() { - trace = getTraceSystem().getTrace("JDBCX"); + trace = traceSystem.getTrace(Trace.JDBCX); } /** @@ -74,17 +77,10 @@ public synchronized Object getObjectInstance(Object obj, Name name, /** * INTERNAL + * @return TraceSystem */ public static TraceSystem getTraceSystem() { - synchronized (JdbcDataSourceFactory.class) { - if (cachedTraceSystem == null) { - cachedTraceSystem = new TraceSystem( - SysProperties.CLIENT_TRACE_DIRECTORY + "h2datasource" + - Constants.SUFFIX_TRACE_FILE); - cachedTraceSystem.setLevelFile(SysProperties.DATASOURCE_TRACE_LEVEL); - } - return cachedTraceSystem; - } + return traceSystem; } Trace getTrace() { diff --git a/h2/src/main/org/h2/jdbcx/JdbcXAConnection.java b/h2/src/main/org/h2/jdbcx/JdbcXAConnection.java index 0e85005fcc..fe7cbe5953 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcXAConnection.java +++ b/h2/src/main/org/h2/jdbcx/JdbcXAConnection.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbcx; @@ -10,6 +10,7 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; + import javax.sql.ConnectionEvent; import javax.sql.ConnectionEventListener; import javax.sql.StatementEventListener; @@ -20,11 +21,9 @@ import org.h2.api.ErrorCode; import org.h2.jdbc.JdbcConnection; -import org.h2.util.JdbcUtils; -import org.h2.util.New; - import org.h2.message.DbException; import org.h2.message.TraceObject; +import org.h2.util.Utils; /** @@ -32,7 +31,7 @@ * An application developer usually does not use this interface. * It is used by the transaction manager internally. */ -public class JdbcXAConnection extends TraceObject implements XAConnection, +public final class JdbcXAConnection extends TraceObject implements XAConnection, XAResource { private final JdbcDataSourceFactory factory; @@ -42,14 +41,10 @@ public class JdbcXAConnection extends TraceObject implements XAConnection, // this connection is replaced whenever getConnection is called private volatile Connection handleConn; - private final ArrayList listeners = New.arrayList(); + private final ArrayList listeners = Utils.newSmallArrayList(); private Xid currentTransaction; private boolean prepared; - static { - org.h2.Driver.load(); - } - JdbcXAConnection(JdbcDataSourceFactory factory, int id, JdbcConnection physicalConn) { this.factory = factory; @@ -116,7 +111,7 @@ public Connection getConnection() throws SQLException { */ @Override public void addConnectionEventListener(ConnectionEventListener listener) { - debugCode("addConnectionEventListener(listener);"); + debugCode("addConnectionEventListener(listener)"); listeners.add(listener); } @@ -127,7 +122,7 @@ public void addConnectionEventListener(ConnectionEventListener listener) { */ @Override public void removeConnectionEventListener(ConnectionEventListener listener) { - debugCode("removeConnectionEventListener(listener);"); + debugCode("removeConnectionEventListener(listener)"); listeners.remove(listener); } @@ -135,7 +130,7 @@ public void removeConnectionEventListener(ConnectionEventListener listener) { * INTERNAL */ void closedHandle() { - debugCode("closedHandle();"); + debugCodeCall("closedHandle"); ConnectionEvent event = new ConnectionEvent(this); // go backward so that a listener can remove itself // (otherwise we need to clone the list) @@ -177,7 +172,7 @@ public boolean setTransactionTimeout(int seconds) { */ @Override public boolean isSameRM(XAResource xares) { - debugCode("isSameRM(xares);"); + debugCode("isSameRM(xares)"); return xares == this; } @@ -193,22 +188,18 @@ public boolean isSameRM(XAResource xares) { public Xid[] recover(int flag) throws XAException { debugCodeCall("recover", quoteFlags(flag)); checkOpen(); - Statement stat = null; - try { - stat = physicalConn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM " + - "INFORMATION_SCHEMA.IN_DOUBT ORDER BY TRANSACTION"); - ArrayList list = New.arrayList(); + try (Statement stat = physicalConn.createStatement()) { + ResultSet rs = stat.executeQuery("SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT ORDER BY TRANSACTION_NAME"); + ArrayList list = Utils.newSmallArrayList(); while (rs.next()) { - String tid = rs.getString("TRANSACTION"); + String tid = rs.getString("TRANSACTION_NAME"); int id = getNextId(XID); Xid xid = new JdbcXid(factory, id, tid); list.add(xid); } rs.close(); - Xid[] result = new Xid[list.size()]; - list.toArray(result); - if (list.size() > 0) { + Xid[] result = list.toArray(new Xid[0]); + if (!list.isEmpty()) { prepared = true; } return result; @@ -216,8 +207,6 @@ public Xid[] recover(int flag) throws XAException { XAException xa = new XAException(XAException.XAER_RMERR); xa.initCause(e); throw xa; - } finally { - JdbcUtils.closeSilently(stat); } } @@ -230,21 +219,18 @@ public Xid[] recover(int flag) throws XAException { @Override public int prepare(Xid xid) throws XAException { if (isDebugEnabled()) { - debugCode("prepare("+JdbcXid.toString(xid)+");"); + debugCode("prepare(" + quoteXid(xid) + ')'); } checkOpen(); if (!currentTransaction.equals(xid)) { throw new XAException(XAException.XAER_INVAL); } - Statement stat = null; - try { - stat = physicalConn.createStatement(); - stat.execute("PREPARE COMMIT " + JdbcXid.toString(xid)); + + try (Statement stat = physicalConn.createStatement()) { + stat.execute(JdbcXid.toString(new StringBuilder("PREPARE COMMIT \""), xid).append('"').toString()); prepared = true; } catch (SQLException e) { throw convertException(e); - } finally { - JdbcUtils.closeSilently(stat); } return XA_OK; } @@ -258,7 +244,7 @@ public int prepare(Xid xid) throws XAException { @Override public void forget(Xid xid) { if (isDebugEnabled()) { - debugCode("forget("+JdbcXid.toString(xid)+");"); + debugCode("forget(" + quoteXid(xid) + ')'); } prepared = false; } @@ -271,21 +257,19 @@ public void forget(Xid xid) { @Override public void rollback(Xid xid) throws XAException { if (isDebugEnabled()) { - debugCode("rollback("+JdbcXid.toString(xid)+");"); + debugCode("rollback(" + quoteXid(xid) + ')'); } try { - physicalConn.rollback(); - physicalConn.setAutoCommit(true); if (prepared) { - Statement stat = null; - try { - stat = physicalConn.createStatement(); - stat.execute("ROLLBACK TRANSACTION " + JdbcXid.toString(xid)); - } finally { - JdbcUtils.closeSilently(stat); + try (Statement stat = physicalConn.createStatement()) { + stat.execute(JdbcXid.toString( // + new StringBuilder("ROLLBACK TRANSACTION \""), xid).append('"').toString()); } prepared = false; + } else { + physicalConn.rollback(); } + physicalConn.setAutoCommit(true); } catch (SQLException e) { throw convertException(e); } @@ -301,7 +285,7 @@ public void rollback(Xid xid) throws XAException { @Override public void end(Xid xid, int flags) throws XAException { if (isDebugEnabled()) { - debugCode("end("+JdbcXid.toString(xid)+", "+quoteFlags(flags)+");"); + debugCode("end(" + quoteXid(xid) + ", " + quoteFlags(flags) + ')'); } // TODO transaction end: implement this method if (flags == TMSUSPEND) { @@ -322,7 +306,7 @@ public void end(Xid xid, int flags) throws XAException { @Override public void start(Xid xid, int flags) throws XAException { if (isDebugEnabled()) { - debugCode("start("+JdbcXid.toString(xid)+", "+quoteFlags(flags)+");"); + debugCode("start(" + quoteXid(xid) + ", " + quoteFlags(flags) + ')'); } if (flags == TMRESUME) { return; @@ -352,22 +336,22 @@ public void start(Xid xid, int flags) throws XAException { @Override public void commit(Xid xid, boolean onePhase) throws XAException { if (isDebugEnabled()) { - debugCode("commit("+JdbcXid.toString(xid)+", "+onePhase+");"); + debugCode("commit(" + quoteXid(xid) + ", " + onePhase + ')'); } - Statement stat = null; + try { if (onePhase) { physicalConn.commit(); } else { - stat = physicalConn.createStatement(); - stat.execute("COMMIT TRANSACTION " + JdbcXid.toString(xid)); - prepared = false; + try (Statement stat = physicalConn.createStatement()) { + stat.execute( + JdbcXid.toString(new StringBuilder("COMMIT TRANSACTION \""), xid).append('"').toString()); + prepared = false; + } } physicalConn.setAutoCommit(true); } catch (SQLException e) { throw convertException(e); - } finally { - JdbcUtils.closeSilently(stat); } currentTransaction = null; } @@ -406,6 +390,10 @@ private static XAException convertException(SQLException e) { return xa; } + private static String quoteXid(Xid xid) { + return JdbcXid.toString(new StringBuilder(), xid).toString().replace('-', '$'); + } + private static String quoteFlags(int flags) { StringBuilder buff = new StringBuilder(); if ((flags & XAResource.TMENDRSCAN) != 0) { @@ -438,7 +426,7 @@ private static String quoteFlags(int flags) { if (buff.length() == 0) { buff.append("|XAResource.TMNOFLAGS"); } - return buff.toString().substring(1); + return buff.substring(1); } private void checkOpen() throws XAException { @@ -450,7 +438,7 @@ private void checkOpen() throws XAException { /** * A pooled connection. */ - class PooledJdbcConnection extends JdbcConnection { + final class PooledJdbcConnection extends JdbcConnection { private boolean isClosed; @@ -478,11 +466,11 @@ public synchronized boolean isClosed() throws SQLException { } @Override - protected synchronized void checkClosed(boolean write) { + protected synchronized void checkClosed() { if (isClosed) { throw DbException.get(ErrorCode.OBJECT_CLOSED); } - super.checkClosed(write); + super.checkClosed(); } } diff --git a/h2/src/main/org/h2/jdbcx/JdbcXid.java b/h2/src/main/org/h2/jdbcx/JdbcXid.java index d9e4919370..c31cc0f4ff 100644 --- a/h2/src/main/org/h2/jdbcx/JdbcXid.java +++ b/h2/src/main/org/h2/jdbcx/JdbcXid.java @@ -1,25 +1,26 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jdbcx; -import java.util.StringTokenizer; +import java.util.Base64; import javax.transaction.xa.Xid; import org.h2.api.ErrorCode; import org.h2.message.DbException; import org.h2.message.TraceObject; -import org.h2.util.StringUtils; /** * An object of this class represents a transaction id. */ -public class JdbcXid extends TraceObject implements Xid { +public final class JdbcXid extends TraceObject implements Xid { private static final String PREFIX = "XID"; + private static final Base64.Encoder ENCODER = Base64.getUrlEncoder().withoutPadding(); + private final int formatId; private final byte[] branchQualifier; private final byte[] globalTransactionId; @@ -27,31 +28,29 @@ public class JdbcXid extends TraceObject implements Xid { JdbcXid(JdbcDataSourceFactory factory, int id, String tid) { setTrace(factory.getTrace(), TraceObject.XID, id); try { - StringTokenizer tokenizer = new StringTokenizer(tid, "_"); - String prefix = tokenizer.nextToken(); - if (!PREFIX.equals(prefix)) { - throw DbException.get(ErrorCode.WRONG_XID_FORMAT_1, tid); + String[] splits = tid.split("\\|"); + if (splits.length == 4 && PREFIX.equals(splits[0])) { + formatId = Integer.parseInt(splits[1]); + Base64.Decoder decoder = Base64.getUrlDecoder(); + branchQualifier = decoder.decode(splits[2]); + globalTransactionId = decoder.decode(splits[3]); + return; } - formatId = Integer.parseInt(tokenizer.nextToken()); - branchQualifier = StringUtils.convertHexToBytes(tokenizer.nextToken()); - globalTransactionId = StringUtils.convertHexToBytes(tokenizer.nextToken()); - } catch (RuntimeException e) { - throw DbException.get(ErrorCode.WRONG_XID_FORMAT_1, tid); + } catch (IllegalArgumentException e) { } + throw DbException.get(ErrorCode.WRONG_XID_FORMAT_1, tid); } /** * INTERNAL + * @param builder to put result into + * @param xid to provide string representation for + * @return provided StringBuilder */ - public static String toString(Xid xid) { - StringBuilder buff = new StringBuilder(PREFIX); - buff.append('_'). - append(xid.getFormatId()). - append('_'). - append(StringUtils.convertBytesToHex(xid.getBranchQualifier())). - append('_'). - append(StringUtils.convertBytesToHex(xid.getGlobalTransactionId())); - return buff.toString(); + static StringBuilder toString(StringBuilder builder, Xid xid) { + return builder.append(PREFIX).append('|').append(xid.getFormatId()) // + .append('|').append(ENCODER.encodeToString(xid.getBranchQualifier())) // + .append('|').append(ENCODER.encodeToString(xid.getGlobalTransactionId())); } /** diff --git a/h2/src/main/org/h2/jdbcx/package.html b/h2/src/main/org/h2/jdbcx/package.html index 418e83f2d7..aae3de2eb6 100644 --- a/h2/src/main/org/h2/jdbcx/package.html +++ b/h2/src/main/org/h2/jdbcx/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/jmx/DatabaseInfo.java b/h2/src/main/org/h2/jmx/DatabaseInfo.java index a7dddac7bb..9e14dfdde4 100644 --- a/h2/src/main/org/h2/jmx/DatabaseInfo.java +++ b/h2/src/main/org/h2/jmx/DatabaseInfo.java @@ -1,16 +1,15 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jmx; import java.lang.management.ManagementFactory; - -import java.sql.Timestamp; +import java.util.HashMap; import java.util.Hashtable; import java.util.Map; -import java.util.TreeMap; +import java.util.Map.Entry; import javax.management.JMException; import javax.management.MBeanServer; import javax.management.ObjectName; @@ -18,10 +17,9 @@ import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.store.PageStore; +import org.h2.engine.SessionLocal; import org.h2.table.Table; -import org.h2.util.New; +import org.h2.util.NetworkConnectionInfo; /** * The MBean implementation. @@ -31,7 +29,7 @@ */ public class DatabaseInfo implements DatabaseInfoMBean { - private static final Map MBEANS = New.hashMap(); + private static final Map MBEANS = new HashMap<>(); /** Database. */ private final Database database; @@ -55,7 +53,7 @@ private static ObjectName getObjectName(String name, String path) throws JMException { name = name.replace(':', '_'); path = path.replace(':', '_'); - Hashtable map = new Hashtable(); + Hashtable map = new Hashtable<>(); map.put("name", name); map.put("path", path); return new ObjectName("org.h2", map); @@ -66,6 +64,7 @@ private static ObjectName getObjectName(String name, String path) * * @param connectionInfo connection info * @param database database + * @throws JMException on failure */ public static void registerMBean(ConnectionInfo connectionInfo, Database database) throws JMException { @@ -85,6 +84,7 @@ public static void registerMBean(ConnectionInfo connectionInfo, * Unregisters the MBean for the database if one is registered. * * @param name database name + * @throws JMException on failure */ public static void unregisterMBean(String name) throws Exception { ObjectName mbeanObjectName = MBEANS.remove(name); @@ -109,26 +109,6 @@ public String getMode() { return database.getMode().getName(); } - @Override - public boolean isMultiThreaded() { - return database.isMultiThreaded(); - } - - @Override - public boolean isMvcc() { - return database.isMultiVersion(); - } - - @Override - public int getLogMode() { - return database.getLogMode(); - } - - @Override - public void setLogMode(int value) { - database.setLogMode(value); - } - @Override public int getTraceLevel() { return database.getTraceSystem().getLevelFile(); @@ -139,66 +119,37 @@ public void setTraceLevel(int level) { database.getTraceSystem().setLevelFile(level); } - @Override - public long getFileWriteCountTotal() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getWriteCountTotal(); - } - // TODO remove this method when removing the page store - // (the MVStore doesn't support it) - return 0; - } - @Override public long getFileWriteCount() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getWriteCount(); + if (database.isPersistent()) { + return database.getStore().getMvStore().getFileStore().getWriteCount(); } - return database.getMvStore().getStore().getFileStore().getReadCount(); + return 0; } @Override public long getFileReadCount() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getReadCount(); + if (database.isPersistent()) { + return database.getStore().getMvStore().getFileStore().getReadCount(); } - return database.getMvStore().getStore().getFileStore().getReadCount(); + return 0; } @Override public long getFileSize() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getPageCount() * p.getPageSize() / 1024; + long size = 0; + if (database.isPersistent()) { + size = database.getStore().getMvStore().getFileStore().size(); } - return database.getMvStore().getStore().getFileStore().size(); + return size / 1024; } @Override public int getCacheSizeMax() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getCache().getMaxMemory(); + if (database.isPersistent()) { + return database.getStore().getMvStore().getCacheSize() * 1024; } - return database.getMvStore().getStore().getCacheSize() * 1024; + return 0; } @Override @@ -210,67 +161,64 @@ public void setCacheSizeMax(int kb) { @Override public int getCacheSize() { - if (!database.isPersistent()) { - return 0; - } - PageStore p = database.getPageStore(); - if (p != null) { - return p.getCache().getMemory(); + if (database.isPersistent()) { + return database.getStore().getMvStore().getCacheSizeUsed() * 1024; } - return database.getMvStore().getStore().getCacheSizeUsed() * 1024; + return 0; } @Override public String getVersion() { - return Constants.getFullVersion(); + return Constants.FULL_VERSION; } @Override public String listSettings() { - StringBuilder buff = new StringBuilder(); - for (Map.Entry e : - new TreeMap( - database.getSettings().getSettings()).entrySet()) { - buff.append(e.getKey()).append(" = ").append(e.getValue()).append('\n'); + StringBuilder builder = new StringBuilder(); + for (Entry e : database.getSettings().getSortedSettings()) { + builder.append(e.getKey()).append(" = ").append(e.getValue()).append('\n'); } - return buff.toString(); + return builder.toString(); } @Override public String listSessions() { StringBuilder buff = new StringBuilder(); - for (Session session : database.getSessions(false)) { + for (SessionLocal session : database.getSessions(false)) { buff.append("session id: ").append(session.getId()); buff.append(" user: "). append(session.getUser().getName()). append('\n'); + NetworkConnectionInfo networkConnectionInfo = session.getNetworkConnectionInfo(); + if (networkConnectionInfo != null) { + buff.append("server: ").append(networkConnectionInfo.getServer()).append('\n') // + .append("clientAddr: ").append(networkConnectionInfo.getClient()).append('\n'); + String clientInfo = networkConnectionInfo.getClientInfo(); + if (clientInfo != null) { + buff.append("clientInfo: ").append(clientInfo).append('\n'); + } + } buff.append("connected: "). - append(new Timestamp(session.getSessionStart())). + append(session.getSessionStart().getString()). append('\n'); Command command = session.getCurrentCommand(); if (command != null) { - buff.append("statement: "). - append(session.getCurrentCommand()). - append('\n'); - long commandStart = session.getCurrentCommandStart(); - if (commandStart != 0) { - buff.append("started: ").append( - new Timestamp(commandStart)). - append('\n'); - } + buff.append("statement: ") + .append(command) + .append('\n') + .append("started: ") + .append(session.getCommandStartOrEnd().getString()) + .append('\n'); } - Table[] t = session.getLocks(); - if (t.length > 0) { - for (Table table : session.getLocks()) { - if (table.isLockedExclusivelyBy(session)) { - buff.append("write lock on "); - } else { - buff.append("read lock on "); - } - buff.append(table.getSchema().getName()). - append('.').append(table.getName()). - append('\n'); + for (Table table : session.getLocks()) { + if (table.isLockedExclusivelyBy(session)) { + buff.append("write lock on "); + } else { + buff.append("read lock on "); } + buff.append(table.getSchema().getName()). + append('.').append(table.getName()). + append('\n'); } buff.append('\n'); } diff --git a/h2/src/main/org/h2/jmx/DatabaseInfoMBean.java b/h2/src/main/org/h2/jmx/DatabaseInfoMBean.java index bbf27ecad4..15f994d296 100644 --- a/h2/src/main/org/h2/jmx/DatabaseInfoMBean.java +++ b/h2/src/main/org/h2/jmx/DatabaseInfoMBean.java @@ -1,13 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jmx; /** * Information and management operations for the given database. - * @h2.resource * * @author Eric Dong * @author Thomas Mueller @@ -16,7 +15,6 @@ public interface DatabaseInfoMBean { /** * Is the database open in exclusive mode? - * @h2.resource * * @return true if the database is open in exclusive mode, false otherwise */ @@ -24,7 +22,6 @@ public interface DatabaseInfoMBean { /** * Is the database read-only? - * @h2.resource * * @return true if the database is read-only, false otherwise */ @@ -33,54 +30,13 @@ public interface DatabaseInfoMBean { /** * The database compatibility mode (REGULAR if no compatibility mode is * used). - * @h2.resource * * @return the database mode */ String getMode(); - /** - * Is multi-threading enabled? - * @h2.resource - * - * @return true if multi-threading is enabled, false otherwise - */ - boolean isMultiThreaded(); - - /** - * Is MVCC (multi version concurrency) enabled? - * @h2.resource - * - * @return true if MVCC is enabled, false otherwise - */ - boolean isMvcc(); - - /** - * The transaction log mode (0 disabled, 1 without sync, 2 enabled). - * @h2.resource - * - * @return the transaction log mode - */ - int getLogMode(); - - /** - * Set the transaction log mode. - * - * @param value the new log mode - */ - void setLogMode(int value); - - /** - * The number of write operations since the database was created. - * @h2.resource - * - * @return the total write count - */ - long getFileWriteCountTotal(); - /** * The number of write operations since the database was opened. - * @h2.resource * * @return the write count */ @@ -88,7 +44,6 @@ public interface DatabaseInfoMBean { /** * The file read count since the database was opened. - * @h2.resource * * @return the read count */ @@ -96,7 +51,6 @@ public interface DatabaseInfoMBean { /** * The database file size in KB. - * @h2.resource * * @return the number of pages */ @@ -104,7 +58,6 @@ public interface DatabaseInfoMBean { /** * The maximum cache size in KB. - * @h2.resource * * @return the maximum size */ @@ -119,7 +72,6 @@ public interface DatabaseInfoMBean { /** * The current cache size in KB. - * @h2.resource * * @return the current size */ @@ -127,7 +79,6 @@ public interface DatabaseInfoMBean { /** * The database version. - * @h2.resource * * @return the version */ @@ -135,7 +86,6 @@ public interface DatabaseInfoMBean { /** * The trace level (0 disabled, 1 error, 2 info, 3 debug). - * @h2.resource * * @return the level */ @@ -150,7 +100,6 @@ public interface DatabaseInfoMBean { /** * List the database settings. - * @h2.resource * * @return the database settings */ @@ -159,7 +108,6 @@ public interface DatabaseInfoMBean { /** * List sessions, including the queries that are in * progress, and locked tables. - * @h2.resource * * @return information about the sessions */ diff --git a/h2/src/main/org/h2/jmx/DocumentedMBean.java b/h2/src/main/org/h2/jmx/DocumentedMBean.java index bc0993de6e..e36fd104ad 100644 --- a/h2/src/main/org/h2/jmx/DocumentedMBean.java +++ b/h2/src/main/org/h2/jmx/DocumentedMBean.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jmx; diff --git a/h2/src/main/org/h2/jmx/package.html b/h2/src/main/org/h2/jmx/package.html index a504d208b0..01ab3555ce 100644 --- a/h2/src/main/org/h2/jmx/package.html +++ b/h2/src/main/org/h2/jmx/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/message/DbException.java b/h2/src/main/org/h2/message/DbException.java index b62fa4d198..a2549073df 100644 --- a/h2/src/main/org/h2/message/DbException.java +++ b/h2/src/main/org/h2/message/DbException.java @@ -1,14 +1,18 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; +import static org.h2.api.ErrorCode.*; + import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.PrintStream; import java.io.PrintWriter; import java.lang.reflect.InvocationTargetException; +import java.nio.charset.StandardCharsets; import java.sql.DriverManager; import java.sql.SQLException; import java.text.MessageFormat; @@ -16,12 +20,26 @@ import java.util.Map.Entry; import java.util.Properties; -import org.h2.api.ErrorCode; import org.h2.engine.Constants; +import org.h2.jdbc.JdbcException; +import org.h2.jdbc.JdbcSQLDataException; import org.h2.jdbc.JdbcSQLException; +import org.h2.jdbc.JdbcSQLFeatureNotSupportedException; +import org.h2.jdbc.JdbcSQLIntegrityConstraintViolationException; +import org.h2.jdbc.JdbcSQLInvalidAuthorizationSpecException; +import org.h2.jdbc.JdbcSQLNonTransientConnectionException; +import org.h2.jdbc.JdbcSQLNonTransientException; +import org.h2.jdbc.JdbcSQLSyntaxErrorException; +import org.h2.jdbc.JdbcSQLTimeoutException; +import org.h2.jdbc.JdbcSQLTransactionRollbackException; +import org.h2.jdbc.JdbcSQLTransientException; +import org.h2.util.HasSQL; import org.h2.util.SortedProperties; import org.h2.util.StringUtils; import org.h2.util.Utils; +import org.h2.value.TypeInfo; +import org.h2.value.Typed; +import org.h2.value.Value; /** * This exception wraps a checked exception. @@ -32,14 +50,28 @@ public class DbException extends RuntimeException { private static final long serialVersionUID = 1L; + /** + * If the SQL statement contains this text, then it is never added to the + * SQL exception. Hiding the SQL statement may be important if it contains a + * passwords, such as a CREATE LINKED TABLE statement. + */ + public static final String HIDE_SQL = "--hide--"; + private static final Properties MESSAGES = new Properties(); + /** + * Thrown when OOME exception happens on handle error + * inside {@link #convert(java.lang.Throwable)}. + */ + public static final SQLException SQL_OOME = + new SQLException("OutOfMemoryError", "HY000", OUT_OF_MEMORY, new OutOfMemoryError()); + private static final DbException OOME = new DbException(SQL_OOME); + private Object source; static { try { - byte[] messages = Utils.getResource( - "/org/h2/res/_messages_en.prop"); + byte[] messages = Utils.getResource("/org/h2/res/_messages_en.prop"); if (messages != null) { MESSAGES.load(new ByteArrayInputStream(messages)); } @@ -51,7 +83,7 @@ public class DbException extends RuntimeException { // (otherwise certain applications don't work) if (translations != null) { Properties p = SortedProperties.fromLines( - new String(translations, Constants.UTF8)); + new String(translations, StandardCharsets.UTF_8)); for (Entry e : p.entrySet()) { String key = (String) e.getKey(); String translation = (String) e.getValue(); @@ -63,9 +95,7 @@ public class DbException extends RuntimeException { } } } - } catch (OutOfMemoryError e) { - DbException.traceThrowable(e); - } catch (IOException e) { + } catch (OutOfMemoryError | IOException e) { DbException.traceThrowable(e); } } @@ -75,11 +105,7 @@ private DbException(SQLException e) { } private static String translate(String key, String... params) { - String message = null; - if (MESSAGES != null) { - // Tomcat sets final static fields to null sometimes - message = MESSAGES.getProperty(key); - } + String message = MESSAGES.getProperty(key); if (message == null) { message = "(Message " + key + " not found)"; } @@ -87,7 +113,7 @@ private static String translate(String key, String... params) { for (int i = 0; i < params.length; i++) { String s = params[i]; if (s != null && s.length() > 0) { - params[i] = StringUtils.quoteIdentifier(s); + params[i] = quote(s); } } message = MessageFormat.format(message, (Object[]) params); @@ -95,6 +121,29 @@ private static String translate(String key, String... params) { return message; } + private static String quote(String s) { + int l = s.length(); + StringBuilder builder = new StringBuilder(l + 2).append('"'); + for (int i = 0; i < l;) { + int cp = s.codePointAt(i); + i += Character.charCount(cp); + int t = Character.getType(cp); + if (t == 0 || t >= Character.SPACE_SEPARATOR && t <= Character.SURROGATE && cp != ' ') { + if (cp <= 0xffff) { + StringUtils.appendHex(builder.append('\\'), cp, 2); + } else { + StringUtils.appendHex(builder.append("\\+"), cp, 3); + } + } else { + if (cp == '"' || cp == '\\') { + builder.append((char) cp); + } + builder.appendCodePoint(cp); + } + } + return builder.append('"').toString(); + } + /** * Get the SQLException object. * @@ -122,15 +171,14 @@ public int getErrorCode() { */ public DbException addSQL(String sql) { SQLException e = getSQLException(); - if (e instanceof JdbcSQLException) { - JdbcSQLException j = (JdbcSQLException) e; + if (e instanceof JdbcException) { + JdbcException j = (JdbcException) e; if (j.getSQL() == null) { - j.setSQL(sql); + j.setSQL(filterSQL(sql)); } return this; } - e = new JdbcSQLException(e.getMessage(), sql, e.getSQLState(), - e.getErrorCode(), e, null); + e = getJdbcSQLException(e.getMessage(), sql, e.getSQLState(), e.getErrorCode(), e, null); return new DbException(e); } @@ -179,6 +227,18 @@ public static DbException get(int errorCode, String... params) { return new DbException(getJdbcSQLException(errorCode, null, params)); } + /** + * Create a database exception for an arbitrary SQLState. + * + * @param sqlstate the state to use + * @param message the message to use + * @return the exception + */ + public static DbException fromUser(String sqlstate, String message) { + // do not translate as sqlstate is arbitrary : avoid "message not found" + return new DbException(getJdbcSQLException(message, null, sqlstate, 0, null, null)); + } + /** * Create a syntax error exception. * @@ -188,7 +248,7 @@ public static DbException get(int errorCode, String... params) { */ public static DbException getSyntaxError(String sql, int index) { sql = StringUtils.addAsterisk(sql, index); - return get(ErrorCode.SYNTAX_ERROR_1, sql); + return get(SYNTAX_ERROR_1, sql); } /** @@ -202,8 +262,23 @@ public static DbException getSyntaxError(String sql, int index) { public static DbException getSyntaxError(String sql, int index, String message) { sql = StringUtils.addAsterisk(sql, index); - return new DbException(getJdbcSQLException(ErrorCode.SYNTAX_ERROR_2, - null, sql, message)); + return new DbException(getJdbcSQLException(SYNTAX_ERROR_2, null, sql, message)); + } + + /** + * Create a syntax error exception for a specific error code. + * + * @param errorCode the error code + * @param sql the SQL statement + * @param index the position of the error in the SQL statement + * @param params the list of parameters of the message + * @return the exception + */ + public static DbException getSyntaxError(int errorCode, String sql, int index, String... params) { + sql = StringUtils.addAsterisk(sql, index); + String sqlstate = getState(errorCode); + String message = translate(sqlstate, params); + return new DbException(getJdbcSQLException(message, sql, sqlstate, errorCode, null, null)); } /** @@ -213,7 +288,7 @@ public static DbException getSyntaxError(String sql, int index, * @return the exception */ public static DbException getUnsupportedException(String message) { - return get(ErrorCode.FEATURE_NOT_SUPPORTED_1, message); + return get(FEATURE_NOT_SUPPORTED_1, message); } /** @@ -221,38 +296,81 @@ public static DbException getUnsupportedException(String message) { * * @param param the name of the parameter * @param value the value passed - * @return the IllegalArgumentException object + * @return the exception + */ + public static DbException getInvalidValueException(String param, Object value) { + return get(INVALID_VALUE_2, value == null ? "null" : value.toString(), param); + } + + /** + * Gets a SQL exception meaning the type of expression is invalid or unknown. + * + * @param param the name of the parameter + * @param e the expression + * @return the exception + */ + public static DbException getInvalidExpressionTypeException(String param, Typed e) { + TypeInfo type = e.getType(); + if (type.getValueType() == Value.UNKNOWN) { + return get(UNKNOWN_DATA_TYPE_1, (e instanceof HasSQL ? (HasSQL) e : type).getTraceSQL()); + } + return get(INVALID_VALUE_2, type.getTraceSQL(), param); + } + + /** + * Gets a SQL exception meaning this value is too long. + * + * @param columnOrType + * column with data type or data type name + * @param value + * string representation of value, will be truncated to 80 + * characters + * @param valueLength + * the actual length of value, {@code -1L} if unknown + * @return the exception + */ + public static DbException getValueTooLongException(String columnOrType, String value, long valueLength) { + int length = value.length(); + int m = valueLength >= 0 ? 22 : 0; + StringBuilder builder = length > 80 // + ? new StringBuilder(83 + m).append(value, 0, 80).append("...") + : new StringBuilder(length + m).append(value); + if (valueLength >= 0) { + builder.append(" (").append(valueLength).append(')'); + } + return get(VALUE_TOO_LONG_2, columnOrType, builder.toString()); + } + + /** + * Gets a file version exception. + * + * @param dataFileName the name of the database + * @return the exception */ - public static DbException getInvalidValueException(String param, - Object value) { - return get(ErrorCode.INVALID_VALUE_2, - value == null ? "null" : value.toString(), param); + public static DbException getFileVersionError(String dataFileName) { + return DbException.get(FILE_VERSION_ERROR_1, "Old database: " + dataFileName + + " - please convert the database to a SQL script and re-create it."); } /** - * Throw an internal error. This method seems to return an exception object, - * so that it can be used instead of 'return', but in fact it always throws - * the exception. + * Gets an internal error. * * @param s the message * @return the RuntimeException object - * @throws RuntimeException the exception */ - public static RuntimeException throwInternalError(String s) { + public static RuntimeException getInternalError(String s) { RuntimeException e = new RuntimeException(s); DbException.traceThrowable(e); - throw e; + return e; } /** - * Throw an internal error. This method seems to return an exception object, - * so that it can be used instead of 'return', but in fact it always throws - * the exception. + * Gets an internal error. * * @return the RuntimeException object */ - public static RuntimeException throwInternalError() { - return throwInternalError("Unexpected code path"); + public static RuntimeException getInternalError() { + return getInternalError("Unexpected code path"); } /** @@ -261,7 +379,7 @@ public static RuntimeException throwInternalError() { * @param e the root cause * @return the SQL exception object */ - public static SQLException toSQLException(Exception e) { + public static SQLException toSQLException(Throwable e) { if (e instanceof SQLException) { return (SQLException) e; } @@ -277,22 +395,35 @@ public static SQLException toSQLException(Exception e) { * @return the exception object */ public static DbException convert(Throwable e) { - if (e instanceof DbException) { - return (DbException) e; - } else if (e instanceof SQLException) { - return new DbException((SQLException) e); - } else if (e instanceof InvocationTargetException) { - return convertInvocation((InvocationTargetException) e, null); - } else if (e instanceof IOException) { - return get(ErrorCode.IO_EXCEPTION_1, e, e.toString()); - } else if (e instanceof OutOfMemoryError) { - return get(ErrorCode.OUT_OF_MEMORY, e); - } else if (e instanceof StackOverflowError || e instanceof LinkageError) { - return get(ErrorCode.GENERAL_ERROR_1, e, e.toString()); - } else if (e instanceof Error) { - throw (Error) e; + try { + if (e instanceof DbException) { + return (DbException) e; + } else if (e instanceof SQLException) { + return new DbException((SQLException) e); + } else if (e instanceof InvocationTargetException) { + return convertInvocation((InvocationTargetException) e, null); + } else if (e instanceof IOException) { + return get(IO_EXCEPTION_1, e, e.toString()); + } else if (e instanceof OutOfMemoryError) { + return get(OUT_OF_MEMORY, e); + } else if (e instanceof StackOverflowError || e instanceof LinkageError) { + return get(GENERAL_ERROR_1, e, e.toString()); + } else if (e instanceof Error) { + throw (Error) e; + } + return get(GENERAL_ERROR_1, e, e.toString()); + } catch (OutOfMemoryError ignore) { + return OOME; + } catch (Throwable ex) { + try { + DbException dbException = new DbException( + new SQLException("GeneralError", "HY000", GENERAL_ERROR_1, e)); + dbException.addSuppressed(ex); + return dbException; + } catch (OutOfMemoryError ignore) { + return OOME; + } } - return get(ErrorCode.GENERAL_ERROR_1, e, e.toString()); } /** @@ -309,7 +440,7 @@ public static DbException convertInvocation(InvocationTargetException te, return convert(t); } message = message == null ? t.getMessage() : message + ": " + t.getMessage(); - return get(ErrorCode.EXCEPTION_IN_FUNCTION_1, t, message); + return get(EXCEPTION_IN_FUNCTION_1, t, message); } /** @@ -325,9 +456,30 @@ public static DbException convertIOException(IOException e, String message) { if (t instanceof DbException) { return (DbException) t; } - return get(ErrorCode.IO_EXCEPTION_1, e, e.toString()); + return get(IO_EXCEPTION_1, e, e.toString()); } - return get(ErrorCode.IO_EXCEPTION_2, e, e.toString(), message); + return get(IO_EXCEPTION_2, e, e.toString(), message); + } + + /** + * Gets the SQL exception object for a specific error code. + * + * @param errorCode the error code + * @return the SQLException object + */ + public static SQLException getJdbcSQLException(int errorCode) { + return getJdbcSQLException(errorCode, (Throwable)null); + } + + /** + * Gets the SQL exception object for a specific error code. + * + * @param errorCode the error code + * @param p1 the first parameter of the message + * @return the SQLException object + */ + public static SQLException getJdbcSQLException(int errorCode, String p1) { + return getJdbcSQLException(errorCode, null, p1); } /** @@ -338,30 +490,277 @@ public static DbException convertIOException(IOException e, String message) { * @param params the list of parameters of the message * @return the SQLException object */ - private static JdbcSQLException getJdbcSQLException(int errorCode, - Throwable cause, String... params) { - String sqlstate = ErrorCode.getState(errorCode); + public static SQLException getJdbcSQLException(int errorCode, Throwable cause, String... params) { + String sqlstate = getState(errorCode); String message = translate(sqlstate, params); - return new JdbcSQLException(message, null, sqlstate, errorCode, cause, null); + return getJdbcSQLException(message, null, sqlstate, errorCode, cause, null); } /** - * Convert an exception to an IO exception. + * Creates a SQLException. * - * @param e the root cause - * @return the IO exception + * @param message the reason + * @param sql the SQL statement + * @param state the SQL state + * @param errorCode the error code + * @param cause the exception that was the reason for this exception + * @param stackTrace the stack trace + * @return the SQLException object + */ + public static SQLException getJdbcSQLException(String message, String sql, String state, int errorCode, + Throwable cause, String stackTrace) { + sql = filterSQL(sql); + // Use SQLState class value to detect type + switch (errorCode / 1_000) { + case 2: + return new JdbcSQLNonTransientException(message, sql, state, errorCode, cause, stackTrace); + case 7: + case 21: + case 42: + case 54: + return new JdbcSQLSyntaxErrorException(message, sql, state, errorCode, cause, stackTrace); + case 8: + return new JdbcSQLNonTransientConnectionException(message, sql, state, errorCode, cause, stackTrace); + case 22: + return new JdbcSQLDataException(message, sql, state, errorCode, cause, stackTrace); + case 23: + return new JdbcSQLIntegrityConstraintViolationException(message, sql, state, errorCode, cause, stackTrace); + case 28: + return new JdbcSQLInvalidAuthorizationSpecException(message, sql, state, errorCode, cause, stackTrace); + case 40: + return new JdbcSQLTransactionRollbackException(message, sql, state, errorCode, cause, stackTrace); + } + // Check error code + switch (errorCode){ + case GENERAL_ERROR_1: + case UNKNOWN_DATA_TYPE_1: + case METHOD_NOT_ALLOWED_FOR_QUERY: + case METHOD_ONLY_ALLOWED_FOR_QUERY: + case SEQUENCE_EXHAUSTED: + case OBJECT_CLOSED: + case CANNOT_DROP_CURRENT_USER: + case UNSUPPORTED_SETTING_COMBINATION: + case FILE_RENAME_FAILED_2: + case FILE_DELETE_FAILED_1: + case IO_EXCEPTION_1: + case NOT_ON_UPDATABLE_ROW: + case IO_EXCEPTION_2: + case TRACE_FILE_ERROR_2: + case ADMIN_RIGHTS_REQUIRED: + case ERROR_EXECUTING_TRIGGER_3: + case COMMIT_ROLLBACK_NOT_ALLOWED: + case FILE_CREATION_FAILED_1: + case SAVEPOINT_IS_INVALID_1: + case SAVEPOINT_IS_UNNAMED: + case SAVEPOINT_IS_NAMED: + case NOT_ENOUGH_RIGHTS_FOR_1: + case DATABASE_IS_READ_ONLY: + case WRONG_XID_FORMAT_1: + case UNSUPPORTED_COMPRESSION_OPTIONS_1: + case UNSUPPORTED_COMPRESSION_ALGORITHM_1: + case COMPRESSION_ERROR: + case EXCEPTION_IN_FUNCTION_1: + case ERROR_ACCESSING_LINKED_TABLE_2: + case FILE_NOT_FOUND_1: + case INVALID_CLASS_2: + case DATABASE_IS_NOT_PERSISTENT: + case RESULT_SET_NOT_UPDATABLE: + case RESULT_SET_NOT_SCROLLABLE: + case METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT: + case ACCESS_DENIED_TO_CLASS_1: + case RESULT_SET_READONLY: + case CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1: + return new JdbcSQLNonTransientException(message, sql, state, errorCode, cause, stackTrace); + case FEATURE_NOT_SUPPORTED_1: + return new JdbcSQLFeatureNotSupportedException(message, sql, state, errorCode, cause, stackTrace); + case LOCK_TIMEOUT_1: + case STATEMENT_WAS_CANCELED: + case LOB_CLOSED_ON_TIMEOUT_1: + return new JdbcSQLTimeoutException(message, sql, state, errorCode, cause, stackTrace); + case FUNCTION_MUST_RETURN_RESULT_SET_1: + case INVALID_TRIGGER_FLAGS_1: + case SUM_OR_AVG_ON_WRONG_DATATYPE_1: + case MUST_GROUP_BY_COLUMN_1: + case SECOND_PRIMARY_KEY: + case FUNCTION_NOT_FOUND_1: + case COLUMN_MUST_NOT_BE_NULLABLE_1: + case USER_NOT_FOUND_1: + case USER_ALREADY_EXISTS_1: + case SEQUENCE_ALREADY_EXISTS_1: + case SEQUENCE_NOT_FOUND_1: + case VIEW_NOT_FOUND_1: + case VIEW_ALREADY_EXISTS_1: + case TRIGGER_ALREADY_EXISTS_1: + case TRIGGER_NOT_FOUND_1: + case ERROR_CREATING_TRIGGER_OBJECT_3: + case CONSTRAINT_ALREADY_EXISTS_1: + case SUBQUERY_IS_NOT_SINGLE_COLUMN: + case INVALID_USE_OF_AGGREGATE_FUNCTION_1: + case CONSTRAINT_NOT_FOUND_1: + case AMBIGUOUS_COLUMN_NAME_1: + case ORDER_BY_NOT_IN_RESULT: + case ROLE_ALREADY_EXISTS_1: + case ROLE_NOT_FOUND_1: + case USER_OR_ROLE_NOT_FOUND_1: + case ROLES_AND_RIGHT_CANNOT_BE_MIXED: + case METHODS_MUST_HAVE_DIFFERENT_PARAMETER_COUNTS_2: + case ROLE_ALREADY_GRANTED_1: + case COLUMN_IS_PART_OF_INDEX_1: + case FUNCTION_ALIAS_ALREADY_EXISTS_1: + case FUNCTION_ALIAS_NOT_FOUND_1: + case SCHEMA_ALREADY_EXISTS_1: + case SCHEMA_NOT_FOUND_1: + case SCHEMA_NAME_MUST_MATCH: + case COLUMN_CONTAINS_NULL_VALUES_1: + case SEQUENCE_BELONGS_TO_A_TABLE_1: + case COLUMN_IS_REFERENCED_1: + case CANNOT_DROP_LAST_COLUMN: + case INDEX_BELONGS_TO_CONSTRAINT_2: + case CLASS_NOT_FOUND_1: + case METHOD_NOT_FOUND_1: + case COLLATION_CHANGE_WITH_DATA_TABLE_1: + case SCHEMA_CAN_NOT_BE_DROPPED_1: + case ROLE_CAN_NOT_BE_DROPPED_1: + case CANNOT_TRUNCATE_1: + case CANNOT_DROP_2: + case VIEW_IS_INVALID_2: + case TYPES_ARE_NOT_COMPARABLE_2: + case CONSTANT_ALREADY_EXISTS_1: + case CONSTANT_NOT_FOUND_1: + case LITERALS_ARE_NOT_ALLOWED: + case CANNOT_DROP_TABLE_1: + case DOMAIN_ALREADY_EXISTS_1: + case DOMAIN_NOT_FOUND_1: + case WITH_TIES_WITHOUT_ORDER_BY: + case CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS: + case TRANSACTION_NOT_FOUND_1: + case AGGREGATE_NOT_FOUND_1: + case WINDOW_NOT_FOUND_1: + case CAN_ONLY_ASSIGN_TO_VARIABLE_1: + case PUBLIC_STATIC_JAVA_METHOD_NOT_FOUND_1: + case JAVA_OBJECT_SERIALIZER_CHANGE_WITH_DATA_TABLE: + case FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT: + case INVALID_VALUE_PRECISION: + case INVALID_VALUE_SCALE: + case CONSTRAINT_IS_USED_BY_CONSTRAINT_2: + case UNCOMPARABLE_REFERENCED_COLUMN_2: + case GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1: + case GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2: + case COLUMN_ALIAS_IS_NOT_SPECIFIED_1: + case GROUP_BY_NOT_IN_THE_RESULT: + return new JdbcSQLSyntaxErrorException(message, sql, state, errorCode, cause, stackTrace); + case HEX_STRING_ODD_1: + case HEX_STRING_WRONG_1: + case INVALID_VALUE_2: + case SEQUENCE_ATTRIBUTES_INVALID_7: + case INVALID_TO_CHAR_FORMAT: + case PARAMETER_NOT_SET_1: + case PARSE_ERROR_1: + case INVALID_TO_DATE_FORMAT: + case STRING_FORMAT_ERROR_1: + case SERIALIZATION_FAILED_1: + case DESERIALIZATION_FAILED_1: + case SCALAR_SUBQUERY_CONTAINS_MORE_THAN_ONE_ROW: + case STEP_SIZE_MUST_NOT_BE_ZERO: + return new JdbcSQLDataException(message, sql, state, errorCode, cause, stackTrace); + case URL_RELATIVE_TO_CWD: + case DATABASE_NOT_FOUND_1: + case DATABASE_NOT_FOUND_WITH_IF_EXISTS_1: + case REMOTE_DATABASE_NOT_FOUND_1: + case TRACE_CONNECTION_NOT_CLOSED: + case DATABASE_ALREADY_OPEN_1: + case FILE_CORRUPTED_1: + case URL_FORMAT_ERROR_2: + case DRIVER_VERSION_ERROR_2: + case FILE_VERSION_ERROR_1: + case FILE_ENCRYPTION_ERROR_1: + case WRONG_PASSWORD_FORMAT: + case UNSUPPORTED_CIPHER: + case UNSUPPORTED_LOCK_METHOD_1: + case EXCEPTION_OPENING_PORT_2: + case DUPLICATE_PROPERTY_1: + case CONNECTION_BROKEN_1: + case UNKNOWN_MODE_1: + case CLUSTER_ERROR_DATABASE_RUNS_ALONE: + case CLUSTER_ERROR_DATABASE_RUNS_CLUSTERED_1: + case DATABASE_IS_CLOSED: + case ERROR_SETTING_DATABASE_EVENT_LISTENER_2: + case OUT_OF_MEMORY: + case UNSUPPORTED_SETTING_1: + case REMOTE_CONNECTION_NOT_ALLOWED: + case DATABASE_CALLED_AT_SHUTDOWN: + case CANNOT_CHANGE_SETTING_WHEN_OPEN_1: + case DATABASE_IS_IN_EXCLUSIVE_MODE: + case INVALID_DATABASE_NAME_1: + case AUTHENTICATOR_NOT_AVAILABLE: + case METHOD_DISABLED_ON_AUTOCOMMIT_TRUE: + return new JdbcSQLNonTransientConnectionException(message, sql, state, errorCode, cause, stackTrace); + case ROW_NOT_FOUND_WHEN_DELETING_1: + case CONCURRENT_UPDATE_1: + case ROW_NOT_FOUND_IN_PRIMARY_INDEX: + return new JdbcSQLTransientException(message, sql, state, errorCode, cause, stackTrace); + } + // Default + return new JdbcSQLException(message, sql, state, errorCode, cause, stackTrace); + } + + private static String filterSQL(String sql) { + return sql == null || !sql.contains(HIDE_SQL) ? sql : "-"; + } + + /** + * Builds message for an exception. + * + * @param e exception + * @return message */ - public static IOException convertToIOException(Throwable e) { - if (e instanceof IOException) { - return (IOException) e; + public static String buildMessageForException(JdbcException e) { + String s = e.getOriginalMessage(); + StringBuilder buff = new StringBuilder(s != null ? s : "- "); + s = e.getSQL(); + if (s != null) { + buff.append("; SQL statement:\n").append(s); } - if (e instanceof JdbcSQLException) { - JdbcSQLException e2 = (JdbcSQLException) e; - if (e2.getOriginalCause() != null) { - e = e2.getOriginalCause(); + buff.append(" [").append(e.getErrorCode()).append('-').append(Constants.BUILD_ID).append(']'); + return buff.toString(); + } + + /** + * Prints up to 100 next exceptions for a specified SQL exception. + * + * @param e SQL exception + * @param s print writer + */ + public static void printNextExceptions(SQLException e, PrintWriter s) { + // getNextException().printStackTrace(s) would be very slow + // if many exceptions are joined + int i = 0; + while ((e = e.getNextException()) != null) { + if (i++ == 100) { + s.println("(truncated)"); + return; + } + s.println(e.toString()); + } + } + + /** + * Prints up to 100 next exceptions for a specified SQL exception. + * + * @param e SQL exception + * @param s print stream + */ + public static void printNextExceptions(SQLException e, PrintStream s) { + // getNextException().printStackTrace(s) would be very slow + // if many exceptions are joined + int i = 0; + while ((e = e.getNextException()) != null) { + if (i++ == 100) { + s.println("(truncated)"); + return; } + s.println(e.toString()); } - return new IOException(e.toString(), e); } public Object getSource() { diff --git a/h2/src/main/org/h2/message/Trace.java b/h2/src/main/org/h2/message/Trace.java index 73659e62af..066d026cdd 100644 --- a/h2/src/main/org/h2/message/Trace.java +++ b/h2/src/main/org/h2/message/Trace.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; @@ -8,101 +8,123 @@ import java.text.MessageFormat; import java.util.ArrayList; -import org.h2.engine.SysProperties; import org.h2.expression.ParameterInterface; -import org.h2.util.StatementBuilder; import org.h2.util.StringUtils; -import org.h2.value.Value; /** * This class represents a trace module. */ -public class Trace { +public final class Trace { /** - * The trace module name for commands. + * The trace module id for commands. */ - public static final String COMMAND = "command"; + public static final int COMMAND = 0; /** - * The trace module name for constraints. + * The trace module id for constraints. */ - public static final String CONSTRAINT = "constraint"; + public static final int CONSTRAINT = 1; /** - * The trace module name for databases. + * The trace module id for databases. */ - public static final String DATABASE = "database"; + public static final int DATABASE = 2; /** - * The trace module name for functions. + * The trace module id for functions. */ - public static final String FUNCTION = "function"; + public static final int FUNCTION = 3; /** - * The trace module name for file locks. + * The trace module id for file locks. */ - public static final String FILE_LOCK = "fileLock"; + public static final int FILE_LOCK = 4; /** - * The trace module name for indexes. + * The trace module id for indexes. */ - public static final String INDEX = "index"; + public static final int INDEX = 5; /** - * The trace module name for the JDBC API. + * The trace module id for the JDBC API. */ - public static final String JDBC = "jdbc"; + public static final int JDBC = 6; /** - * The trace module name for locks. + * The trace module id for locks. */ - public static final String LOCK = "lock"; + public static final int LOCK = 7; /** - * The trace module name for schemas. + * The trace module id for schemas. */ - public static final String SCHEMA = "schema"; + public static final int SCHEMA = 8; /** - * The trace module name for sequences. + * The trace module id for sequences. */ - public static final String SEQUENCE = "sequence"; + public static final int SEQUENCE = 9; /** - * The trace module name for settings. + * The trace module id for settings. */ - public static final String SETTING = "setting"; + public static final int SETTING = 10; /** - * The trace module name for tables. + * The trace module id for tables. */ - public static final String TABLE = "table"; + public static final int TABLE = 11; /** - * The trace module name for triggers. + * The trace module id for triggers. */ - public static final String TRIGGER = "trigger"; + public static final int TRIGGER = 12; /** - * The trace module name for users. + * The trace module id for users. */ - public static final String USER = "user"; + public static final int USER = 13; /** - * The trace module name for the page store. + * The trace module id for the JDBCX API */ - public static final String PAGE_STORE = "pageStore"; + public static final int JDBCX = 14; + + /** + * Module names by their ids as array indexes. + */ + public static final String[] MODULE_NAMES = { + "command", + "constraint", + "database", + "function", + "fileLock", + "index", + "jdbc", + "lock", + "schema", + "sequence", + "setting", + "table", + "trigger", + "user", + "JDBCX" + }; private final TraceWriter traceWriter; private final String module; private final String lineSeparator; private int traceLevel = TraceSystem.PARENT; + Trace(TraceWriter traceWriter, int moduleId) { + this(traceWriter, MODULE_NAMES[moduleId]); + } + Trace(TraceWriter traceWriter, String module) { this.traceWriter = traceWriter; this.module = module; - this.lineSeparator = SysProperties.LINE_SEPARATOR; + this.lineSeparator = System.lineSeparator(); } /** @@ -208,29 +230,23 @@ void info(Throwable t, String s) { * @param parameters the parameter list * @return the formatted text */ - public static String formatParams( - ArrayList parameters) { - if (parameters.size() == 0) { + public static String formatParams(ArrayList parameters) { + if (parameters.isEmpty()) { return ""; } - StatementBuilder buff = new StatementBuilder(); + StringBuilder builder = new StringBuilder(); int i = 0; - boolean params = false; for (ParameterInterface p : parameters) { if (p.isValueSet()) { - if (!params) { - buff.append(" {"); - params = true; - } - buff.appendExceptFirst(", "); - Value v = p.getParamValue(); - buff.append(++i).append(": ").append(v.getTraceSQL()); + builder.append(i == 0 ? " {" : ", ") // + .append(++i).append(": ") // + .append(p.getParamValue().getTraceSQL()); } } - if (params) { - buff.append('}'); + if (i != 0) { + builder.append('}'); } - return buff.toString(); + return builder.toString(); } /** @@ -241,7 +257,7 @@ public static String formatParams( * @param count the update count * @param time the time it took to run the statement in ms */ - public void infoSQL(String sql, String params, int count, long time) { + public void infoSQL(String sql, String params, long count, long time) { if (!isEnabled(TraceSystem.INFO)) { return; } @@ -269,10 +285,10 @@ public void infoSQL(String sql, String params, int count, long time) { if (!space) { buff.append(' '); } - buff.append("*/"). - append(StringUtils.javaEncode(sql)). - append(StringUtils.javaEncode(params)). - append(';'); + buff.append("*/"); + StringUtils.javaEncode(sql, buff, false); + StringUtils.javaEncode(params, buff, false); + buff.append(';'); sql = buff.toString(); traceWriter.write(TraceSystem.INFO, module, sql, null); } diff --git a/h2/src/main/org/h2/message/TraceObject.java b/h2/src/main/org/h2/message/TraceObject.java index 23cc5a759b..58444781ea 100644 --- a/h2/src/main/org/h2/message/TraceObject.java +++ b/h2/src/main/org/h2/message/TraceObject.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; @@ -8,12 +8,15 @@ import java.math.BigDecimal; import java.sql.SQLException; import java.util.Map; +import java.util.concurrent.atomic.AtomicIntegerArray; + +import org.h2.api.ErrorCode; import org.h2.util.StringUtils; /** * The base class for objects that can print trace information about themselves. */ -public class TraceObject { +public abstract class TraceObject { /** * The trace type id for callable statements. @@ -90,11 +93,19 @@ public class TraceObject { */ protected static final int ARRAY = 16; - private static final int LAST = ARRAY + 1; - private static final int[] ID = new int[LAST]; + /** + * The trace type id for SQLXML objects. + */ + protected static final int SQLXML = 17; + + private static final int LAST = SQLXML + 1; + private static final AtomicIntegerArray ID = new AtomicIntegerArray(LAST); + private static final String[] PREFIX = { "call", "conn", "dbMeta", "prep", "rs", "rsMeta", "sp", "ex", "stat", "blob", "clob", "pMeta", "ds", - "xads", "xares", "xid", "ar" }; + "xads", "xares", "xid", "ar", "sqlxml" }; + + private static final SQLException SQL_OOME = DbException.SQL_OOME; /** * The trace module used by this object. @@ -119,6 +130,7 @@ protected void setTrace(Trace trace, int type, int id) { /** * INTERNAL + * @return id */ public int getTraceId() { return id; @@ -126,6 +138,7 @@ public int getTraceId() { /** * INTERNAL + * @return object name */ public String getTraceObjectName() { return PREFIX[traceType] + id; @@ -138,7 +151,7 @@ public String getTraceObjectName() { * @return the new trace object id */ protected static int getNextId(int type) { - return ID[type]++; + return ID.getAndIncrement(type); } /** @@ -146,7 +159,7 @@ protected static int getNextId(int type) { * * @return true if it is */ - protected boolean isDebugEnabled() { + protected final boolean isDebugEnabled() { return trace.isDebugEnabled(); } @@ -155,7 +168,7 @@ protected boolean isDebugEnabled() { * * @return true if it is */ - protected boolean isInfoEnabled() { + protected final boolean isInfoEnabled() { return trace.isInfoEnabled(); } @@ -168,11 +181,10 @@ protected boolean isInfoEnabled() { * @param newId the trace object id of the created object * @param value the value to assign this new object to */ - protected void debugCodeAssign(String className, int newType, int newId, - String value) { + protected final void debugCodeAssign(String className, int newType, int newId, String value) { if (trace.isDebugEnabled()) { - trace.debugCode(className + " " + PREFIX[newType] + - newId + " = " + getTraceObjectName() + "." + value + ";"); + trace.debugCode(className + ' ' + PREFIX[newType] + newId + " = " + getTraceObjectName() + '.' + value + + ';'); } } @@ -182,9 +194,9 @@ protected void debugCodeAssign(String className, int newType, int newId, * * @param methodName the method name */ - protected void debugCodeCall(String methodName) { + protected final void debugCodeCall(String methodName) { if (trace.isDebugEnabled()) { - trace.debugCode(getTraceObjectName() + "." + methodName + "();"); + trace.debugCode(getTraceObjectName() + '.' + methodName + "();"); } } @@ -196,10 +208,9 @@ protected void debugCodeCall(String methodName) { * @param methodName the method name * @param param one single long parameter */ - protected void debugCodeCall(String methodName, long param) { + protected final void debugCodeCall(String methodName, long param) { if (trace.isDebugEnabled()) { - trace.debugCode(getTraceObjectName() + "." + - methodName + "(" + param + ");"); + trace.debugCode(getTraceObjectName() + '.' + methodName + '(' + param + ");"); } } @@ -211,10 +222,9 @@ protected void debugCodeCall(String methodName, long param) { * @param methodName the method name * @param param one single string parameter */ - protected void debugCodeCall(String methodName, String param) { + protected final void debugCodeCall(String methodName, String param) { if (trace.isDebugEnabled()) { - trace.debugCode(getTraceObjectName() + "." + - methodName + "(" + quote(param) + ");"); + trace.debugCode(getTraceObjectName() + '.' + methodName + '(' + quote(param) + ");"); } } @@ -223,9 +233,9 @@ protected void debugCodeCall(String methodName, String param) { * * @param text the trace text */ - protected void debugCode(String text) { + protected final void debugCode(String text) { if (trace.isDebugEnabled()) { - trace.debugCode(getTraceObjectName() + "." + text); + trace.debugCode(getTraceObjectName() + '.' + text + ';'); } } @@ -301,8 +311,9 @@ protected static String quoteBytes(byte[] x) { if (x == null) { return "null"; } - return "org.h2.util.StringUtils.convertHexToBytes(\"" + - StringUtils.convertBytesToHex(x) + "\")"; + StringBuilder builder = new StringBuilder(x.length * 2 + 45) + .append("org.h2.util.StringUtils.convertHexToBytes(\""); + return StringUtils.convertBytesToHex(builder, x).append("\")").toString(); } /** @@ -348,29 +359,40 @@ protected static String quoteMap(Map> map) { * @param ex the exception * @return the SQL exception object */ - protected SQLException logAndConvert(Exception ex) { - SQLException e = DbException.toSQLException(ex); - if (trace == null) { - DbException.traceThrowable(e); - } else { - int errorCode = e.getErrorCode(); - if (errorCode >= 23000 && errorCode < 24000) { - trace.info(e, "exception"); + protected SQLException logAndConvert(Throwable ex) { + SQLException e = null; + try { + e = DbException.toSQLException(ex); + if (trace == null) { + DbException.traceThrowable(e); } else { - trace.error(e, "exception"); + int errorCode = e.getErrorCode(); + if (errorCode >= 23000 && errorCode < 24000) { + trace.info(e, "exception"); + } else { + trace.error(e, "exception"); + } + } + } catch(Throwable another) { + if (e == null) { + try { + e = new SQLException("GeneralError", "HY000", ErrorCode.GENERAL_ERROR_1, ex); + } catch (OutOfMemoryError | NoClassDefFoundError ignored) { + return SQL_OOME; + } } + e.addSuppressed(another); } return e; } /** - * Get and throw a SQL exception meaning this feature is not supported. + * Get a SQL exception meaning this feature is not supported. * * @param message the message - * @return never returns normally - * @throws SQLException the exception + * @return the SQL exception */ - protected SQLException unsupported(String message) throws SQLException { + protected SQLException unsupported(String message) { try { throw DbException.getUnsupportedException(message); } catch (Exception e) { diff --git a/h2/src/main/org/h2/message/TraceSystem.java b/h2/src/main/org/h2/message/TraceSystem.java index 81740cfd18..96743a26c2 100644 --- a/h2/src/main/org/h2/message/TraceSystem.java +++ b/h2/src/main/org/h2/message/TraceSystem.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; @@ -10,14 +10,12 @@ import java.io.PrintWriter; import java.io.Writer; import java.text.SimpleDateFormat; -import java.util.HashMap; - +import java.util.concurrent.atomic.AtomicReferenceArray; import org.h2.api.ErrorCode; import org.h2.engine.Constants; -import org.h2.jdbc.JdbcSQLException; +import org.h2.jdbc.JdbcException; import org.h2.store.fs.FileUtils; import org.h2.util.IOUtils; -import org.h2.util.New; /** * The trace mechanism is the logging facility of this database. There is @@ -83,11 +81,17 @@ public class TraceSystem implements TraceWriter { private int levelMax; private int maxFileSize = DEFAULT_MAX_FILE_SIZE; private String fileName; - private HashMap traces; + private final AtomicReferenceArray traces = + new AtomicReferenceArray<>(Trace.MODULE_NAMES.length); private SimpleDateFormat dateFormat; private Writer fileWriter; private PrintWriter printWriter; - private int checkSize; + /** + * Starts at -1 so that we check the file size immediately upon open. This + * Can be important if we open and close the trace file without managing to + * have written CHECK_SIZE_EACH_WRITES bytes each time. + */ + private int checkSize = -1; private boolean closed; private boolean writingErrorLogged; private TraceWriter writer = this; @@ -117,30 +121,39 @@ public void setSysOut(PrintStream out) { } /** - * Get or create a trace object for this module. Trace modules with names - * such as "JDBC[1]" are not cached (modules where the name ends with "]"). - * All others are cached. + * Get or create a trace object for this module id. Trace modules with id + * are cached. * - * @param module the module name + * @param moduleId module id * @return the trace object */ - public synchronized Trace getTrace(String module) { - if (module.endsWith("]")) { - return new Trace(writer, module); - } - if (traces == null) { - traces = New.hashMap(16); - } - Trace t = traces.get(module); + public Trace getTrace(int moduleId) { + Trace t = traces.get(moduleId); if (t == null) { - t = new Trace(writer, module); - traces.put(module, t); + t = new Trace(writer, moduleId); + if (!traces.compareAndSet(moduleId, null, t)) { + t = traces.get(moduleId); + } } return t; } + /** + * Create a trace object for this module. Trace modules with names are not + * cached. + * + * @param module the module name + * @return the trace object + */ + public Trace getTrace(String module) { + return new Trace(writer, module); + } + @Override public boolean isEnabled(int level) { + if (levelMax == ADAPTER) { + return writer.isEnabled(level); + } return level <= this.levelMax; } @@ -181,7 +194,7 @@ public void setLevelFile(int level) { if (level == ADAPTER) { String adapterClass = "org.h2.message.TraceWriterAdapter"; try { - writer = (TraceWriter) Class.forName(adapterClass).newInstance(); + writer = (TraceWriter) Class.forName(adapterClass).getDeclaredConstructor().newInstance(); } catch (Throwable e) { e = DbException.get(ErrorCode.CLASS_NOT_FOUND_1, e, adapterClass); write(ERROR, Trace.DATABASE, adapterClass, e); @@ -209,11 +222,16 @@ public int getLevelFile() { private synchronized String format(String module, String s) { if (dateFormat == null) { - dateFormat = new SimpleDateFormat("MM-dd HH:mm:ss "); + dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss "); } return dateFormat.format(System.currentTimeMillis()) + module + ": " + s; } + @Override + public void write(int level, int moduleId, String s, Throwable t) { + write(level, Trace.MODULE_NAMES[moduleId], s, t); + } + @Override public void write(int level, String module, String s, Throwable t) { if (level <= levelSystemOut || level > this.levelMax) { @@ -233,8 +251,8 @@ public void write(int level, String module, String s, Throwable t) { private synchronized void writeFile(String s, Throwable t) { try { - if (checkSize++ >= CHECK_SIZE_EACH_WRITES) { - checkSize = 0; + checkSize = (checkSize + 1) % CHECK_SIZE_EACH_WRITES; + if (checkSize == 0) { closeWriter(); if (maxFileSize > 0 && FileUtils.size(fileName) > maxFileSize) { String old = fileName + ".old"; @@ -247,11 +265,11 @@ private synchronized void writeFile(String s, Throwable t) { } printWriter.println(s); if (t != null) { - if (levelFile == ERROR && t instanceof JdbcSQLException) { - JdbcSQLException se = (JdbcSQLException) t; + if (levelFile == ERROR && t instanceof JdbcException) { + JdbcException se = (JdbcException) t; int code = se.getErrorCode(); if (ErrorCode.isCommon(code)) { - printWriter.println(t.toString()); + printWriter.println(t); } else { t.printStackTrace(printWriter); } diff --git a/h2/src/main/org/h2/message/TraceWriter.java b/h2/src/main/org/h2/message/TraceWriter.java index 0648ec4b7c..368411e6bc 100644 --- a/h2/src/main/org/h2/message/TraceWriter.java +++ b/h2/src/main/org/h2/message/TraceWriter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; @@ -30,6 +30,17 @@ interface TraceWriter { */ void write(int level, String module, String s, Throwable t); + /** + * Write a message. + * + * @param level the trace level + * @param moduleId the id of the module + * @param s the message + * @param t the exception (may be null) + */ + void write(int level, int moduleId, String s, Throwable t); + + /** * Check the given trace / log level is enabled. * diff --git a/h2/src/main/org/h2/message/TraceWriterAdapter.java b/h2/src/main/org/h2/message/TraceWriterAdapter.java index 6ff40ee6f1..2ec4867155 100644 --- a/h2/src/main/org/h2/message/TraceWriterAdapter.java +++ b/h2/src/main/org/h2/message/TraceWriterAdapter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.message; @@ -45,6 +45,11 @@ public boolean isEnabled(int level) { } } + @Override + public void write(int level, int moduleId, String s, Throwable t) { + write(level, Trace.MODULE_NAMES[moduleId], s, t); + } + @Override public void write(int level, String module, String s, Throwable t) { if (isEnabled(level)) { diff --git a/h2/src/main/org/h2/message/package.html b/h2/src/main/org/h2/message/package.html index 15af5c5dc9..ccdcc35a66 100644 --- a/h2/src/main/org/h2/message/package.html +++ b/h2/src/main/org/h2/message/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mode/DefaultNullOrdering.java b/h2/src/main/org/h2/mode/DefaultNullOrdering.java new file mode 100644 index 0000000000..32c4e4a297 --- /dev/null +++ b/h2/src/main/org/h2/mode/DefaultNullOrdering.java @@ -0,0 +1,102 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import static org.h2.result.SortOrder.DESCENDING; +import static org.h2.result.SortOrder.NULLS_FIRST; +import static org.h2.result.SortOrder.NULLS_LAST; + +/** + * Default ordering of NULL values. + */ +public enum DefaultNullOrdering { + + /** + * NULL values are considered as smaller than other values during sorting. + */ + LOW(NULLS_FIRST, NULLS_LAST), + + /** + * NULL values are considered as larger than other values during sorting. + */ + HIGH(NULLS_LAST, NULLS_FIRST), + + /** + * NULL values are sorted before other values, no matter if ascending or + * descending order is used. + */ + FIRST(NULLS_FIRST, NULLS_FIRST), + + /** + * NULL values are sorted after other values, no matter if ascending or + * descending order is used. + */ + LAST(NULLS_LAST, NULLS_LAST); + + private static final DefaultNullOrdering[] VALUES = values(); + + /** + * Returns default ordering of NULL values for the specified ordinal number. + * + * @param ordinal + * ordinal number + * @return default ordering of NULL values for the specified ordinal number + * @see #ordinal() + */ + public static DefaultNullOrdering valueOf(int ordinal) { + return VALUES[ordinal]; + } + + private final int defaultAscNulls, defaultDescNulls; + + private final int nullAsc, nullDesc; + + private DefaultNullOrdering(int defaultAscNulls, int defaultDescNulls) { + this.defaultAscNulls = defaultAscNulls; + this.defaultDescNulls = defaultDescNulls; + nullAsc = defaultAscNulls == NULLS_FIRST ? -1 : 1; + nullDesc = defaultDescNulls == NULLS_FIRST ? -1 : 1; + } + + /** + * Returns a sort type bit mask with {@link org.h2.result.SortOrder#NULLS_FIRST} or + * {@link org.h2.result.SortOrder#NULLS_LAST} explicitly set + * + * @param sortType + * sort type bit mask + * @return bit mask with {@link org.h2.result.SortOrder#NULLS_FIRST} or {@link org.h2.result.SortOrder#NULLS_LAST} + * explicitly set + */ + public int addExplicitNullOrdering(int sortType) { + if ((sortType & (NULLS_FIRST | NULLS_LAST)) == 0) { + sortType |= ((sortType & DESCENDING) == 0 ? defaultAscNulls : defaultDescNulls); + } + return sortType; + } + + /** + * Compare two expressions where one of them is NULL. + * + * @param aNull + * whether the first expression is null + * @param sortType + * the sort bit mask to use + * @return the result of the comparison (-1 meaning the first expression + * should appear before the second, 0 if they are equal) + */ + public int compareNull(boolean aNull, int sortType) { + if ((sortType & NULLS_FIRST) != 0) { + return aNull ? -1 : 1; + } else if ((sortType & NULLS_LAST) != 0) { + return aNull ? 1 : -1; + } else if ((sortType & DESCENDING) == 0) { + return aNull ? nullAsc : -nullAsc; + } else { + return aNull ? nullDesc : -nullDesc; + } + } + +} diff --git a/h2/src/main/org/h2/mode/FunctionInfo.java b/h2/src/main/org/h2/mode/FunctionInfo.java new file mode 100644 index 0000000000..ba47964407 --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionInfo.java @@ -0,0 +1,89 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +/** + * This class contains information about a built-in function. + */ +public final class FunctionInfo { + + /** + * The name of the function. + */ + public final String name; + + /** + * The function type. + */ + public final int type; + + /** + * The number of parameters. + */ + final int parameterCount; + + /** + * The data type of the return value. + */ + public final int returnDataType; + + /** + * If the result of the function is NULL if any of the parameters is NULL. + */ + public final boolean nullIfParameterIsNull; + + /** + * If this function always returns the same value for the same parameters. + */ + public final boolean deterministic; + + /** + * Creates new instance of built-in function information. + * + * @param name + * the name of the function + * @param type + * the function type + * @param parameterCount + * the number of parameters + * @param returnDataType + * the data type of the return value + * @param nullIfParameterIsNull + * if the result of the function is NULL if any of the parameters + * is NULL + * @param deterministic + * if this function always returns the same value for the same + * parameters + */ + public FunctionInfo(String name, int type, int parameterCount, int returnDataType, boolean nullIfParameterIsNull, + boolean deterministic) { + this.name = name; + this.type = type; + this.parameterCount = parameterCount; + this.returnDataType = returnDataType; + this.nullIfParameterIsNull = nullIfParameterIsNull; + this.deterministic = deterministic; + } + + /** + * Creates a copy of built-in function information with a different name. A + * copy will require parentheses. + * + * @param source + * the source information + * @param name + * the new name + */ + public FunctionInfo(FunctionInfo source, String name) { + this.name = name; + type = source.type; + returnDataType = source.returnDataType; + parameterCount = source.parameterCount; + nullIfParameterIsNull = source.nullIfParameterIsNull; + deterministic = source.deterministic; + } + +} diff --git a/h2/src/main/org/h2/mode/FunctionsDB2Derby.java b/h2/src/main/org/h2/mode/FunctionsDB2Derby.java new file mode 100644 index 0000000000..bc61364705 --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionsDB2Derby.java @@ -0,0 +1,73 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.value.ExtTypeInfoNumeric; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Functions for {@link org.h2.engine.Mode.ModeEnum#DB2} and + * {@link org.h2.engine.Mode.ModeEnum#Derby} compatibility modes. + */ +public final class FunctionsDB2Derby extends ModeFunction { + + private static final int IDENTITY_VAL_LOCAL = 5001; + + private static final HashMap FUNCTIONS = new HashMap<>(); + + private static final TypeInfo IDENTITY_VAL_LOCAL_TYPE = TypeInfo.getTypeInfo(Value.NUMERIC, 31, 0, + ExtTypeInfoNumeric.DECIMAL); + + static { + FUNCTIONS.put("IDENTITY_VAL_LOCAL", + new FunctionInfo("IDENTITY_VAL_LOCAL", IDENTITY_VAL_LOCAL, 0, Value.BIGINT, true, false)); + } + + /** + * Returns mode-specific function for a given name, or {@code null}. + * + * @param upperName + * the upper-case name of a function + * @return the function with specified name or {@code null} + */ + public static FunctionsDB2Derby getFunction(String upperName) { + FunctionInfo info = FUNCTIONS.get(upperName); + return info != null ? new FunctionsDB2Derby(info) : null; + } + + private FunctionsDB2Derby(FunctionInfo info) { + super(info); + } + + @Override + public Value getValue(SessionLocal session) { + switch (info.type) { + case IDENTITY_VAL_LOCAL: + return session.getLastIdentity().convertTo(type); + default: + throw DbException.getInternalError("type=" + info.type); + } + } + + @Override + public Expression optimize(SessionLocal session) { + switch (info.type) { + case IDENTITY_VAL_LOCAL: + type = IDENTITY_VAL_LOCAL_TYPE; + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + return this; + } + +} diff --git a/h2/src/main/org/h2/mode/FunctionsLegacy.java b/h2/src/main/org/h2/mode/FunctionsLegacy.java new file mode 100644 index 0000000000..64df770078 --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionsLegacy.java @@ -0,0 +1,69 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * This class implements some legacy functions not available in Regular mode. + */ +public class FunctionsLegacy extends ModeFunction { + + private static final HashMap FUNCTIONS = new HashMap<>(); + + private static final int IDENTITY = 6001; + + private static final int SCOPE_IDENTITY = IDENTITY + 1; + + static { + FUNCTIONS.put("IDENTITY", new FunctionInfo("IDENTITY", IDENTITY, 0, Value.BIGINT, true, false)); + FUNCTIONS.put("SCOPE_IDENTITY", + new FunctionInfo("SCOPE_IDENTITY", SCOPE_IDENTITY, 0, Value.BIGINT, true, false)); + } + + /** + * Returns mode-specific function for a given name, or {@code null}. + * + * @param upperName + * the upper-case name of a function + * @return the function with specified name or {@code null} + */ + public static FunctionsLegacy getFunction(String upperName) { + FunctionInfo info = FUNCTIONS.get(upperName); + if (info != null) { + return new FunctionsLegacy(info); + } + return null; + } + + private FunctionsLegacy(FunctionInfo info) { + super(info); + } + + @Override + public Value getValue(SessionLocal session) { + switch (info.type) { + case IDENTITY: + case SCOPE_IDENTITY: + return session.getLastIdentity().convertTo(type); + default: + throw DbException.getInternalError("type=" + info.type); + } + } + + @Override + public Expression optimize(SessionLocal session) { + type = TypeInfo.getTypeInfo(info.returnDataType); + return this; + } + +} diff --git a/h2/src/main/org/h2/mode/FunctionsMSSQLServer.java b/h2/src/main/org/h2/mode/FunctionsMSSQLServer.java new file mode 100644 index 0000000000..92cfca0867 --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionsMSSQLServer.java @@ -0,0 +1,143 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.TypedValueExpression; +import org.h2.expression.function.CoalesceFunction; +import org.h2.expression.function.CurrentDateTimeValueFunction; +import org.h2.expression.function.RandFunction; +import org.h2.expression.function.StringFunction; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; + +/** + * Functions for {@link org.h2.engine.Mode.ModeEnum#MSSQLServer} compatibility + * mode. + */ +public final class FunctionsMSSQLServer extends ModeFunction { + + private static final HashMap FUNCTIONS = new HashMap<>(); + + private static final int CHARINDEX = 4001; + + private static final int GETDATE = CHARINDEX + 1; + + private static final int ISNULL = GETDATE + 1; + + private static final int LEN = ISNULL + 1; + + private static final int NEWID = LEN + 1; + + private static final int SCOPE_IDENTITY = NEWID + 1; + + private static final TypeInfo SCOPE_IDENTITY_TYPE = TypeInfo.getTypeInfo(Value.NUMERIC, 38, 0, null); + + static { + FUNCTIONS.put("CHARINDEX", new FunctionInfo("CHARINDEX", CHARINDEX, VAR_ARGS, Value.INTEGER, true, true)); + FUNCTIONS.put("GETDATE", new FunctionInfo("GETDATE", GETDATE, 0, Value.TIMESTAMP, false, true)); + FUNCTIONS.put("LEN", new FunctionInfo("LEN", LEN, 1, Value.INTEGER, true, true)); + FUNCTIONS.put("NEWID", new FunctionInfo("NEWID", NEWID, 0, Value.UUID, true, false)); + FUNCTIONS.put("ISNULL", new FunctionInfo("ISNULL", ISNULL, 2, Value.NULL, false, true)); + FUNCTIONS.put("SCOPE_IDENTITY", + new FunctionInfo("SCOPE_IDENTITY", SCOPE_IDENTITY, 0, Value.NUMERIC, true, false)); + } + + /** + * Returns mode-specific function for a given name, or {@code null}. + * + * @param upperName + * the upper-case name of a function + * @return the function with specified name or {@code null} + */ + public static FunctionsMSSQLServer getFunction(String upperName) { + FunctionInfo info = FUNCTIONS.get(upperName); + if (info != null) { + return new FunctionsMSSQLServer(info); + } + return null; + } + + private FunctionsMSSQLServer(FunctionInfo info) { + super(info); + } + + @Override + protected void checkParameterCount(int len) { + int min, max; + switch (info.type) { + case CHARINDEX: + min = 2; + max = 3; + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, min + ".." + max); + } + } + + @Override + public Value getValue(SessionLocal session) { + Value[] values = getArgumentsValues(session, args); + if (values == null) { + return ValueNull.INSTANCE; + } + Value v0 = getNullOrValue(session, args, values, 0); + switch (info.type) { + case LEN: { + long len; + if (v0.getValueType() == Value.CHAR) { + String s = v0.getString(); + int l = s.length(); + while (l > 0 && s.charAt(l - 1) == ' ') { + l--; + } + len = l; + } else { + len = v0.charLength(); + } + return ValueBigint.get(len); + } + case SCOPE_IDENTITY: + return session.getLastIdentity().convertTo(type); + default: + throw DbException.getInternalError("type=" + info.type); + } + } + + @Override + public Expression optimize(SessionLocal session) { + switch (info.type) { + case CHARINDEX: + return new StringFunction(args, StringFunction.LOCATE).optimize(session); + case GETDATE: + return new CurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, 3).optimize(session); + case ISNULL: + return new CoalesceFunction(CoalesceFunction.COALESCE, args).optimize(session); + case NEWID: + return new RandFunction(null, RandFunction.RANDOM_UUID).optimize(session); + case SCOPE_IDENTITY: + type = SCOPE_IDENTITY_TYPE; + break; + default: + type = TypeInfo.getTypeInfo(info.returnDataType); + if (optimizeArguments(session)) { + return TypedValueExpression.getTypedIfNull(getValue(session), type); + } + } + return this; + } + +} diff --git a/h2/src/main/org/h2/mode/FunctionsMySQL.java b/h2/src/main/org/h2/mode/FunctionsMySQL.java new file mode 100644 index 0000000000..480100ee3a --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionsMySQL.java @@ -0,0 +1,258 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Jason Brittain (jason.brittain at gmail.com) + */ +package org.h2.mode; + +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.Locale; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.util.DateTimeUtils; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarchar; + +/** + * This class implements some MySQL-specific functions. + * + * @author Jason Brittain + * @author Thomas Mueller + */ +public final class FunctionsMySQL extends ModeFunction { + + private static final int UNIX_TIMESTAMP = 1001, FROM_UNIXTIME = 1002, DATE = 1003, LAST_INSERT_ID = 1004; + + private static final HashMap FUNCTIONS = new HashMap<>(); + + static { + FUNCTIONS.put("UNIX_TIMESTAMP", + new FunctionInfo("UNIX_TIMESTAMP", UNIX_TIMESTAMP, VAR_ARGS, Value.INTEGER, false, false)); + FUNCTIONS.put("FROM_UNIXTIME", + new FunctionInfo("FROM_UNIXTIME", FROM_UNIXTIME, VAR_ARGS, Value.VARCHAR, false, true)); + FUNCTIONS.put("DATE", new FunctionInfo("DATE", DATE, 1, Value.DATE, false, true)); + FUNCTIONS.put("LAST_INSERT_ID", + new FunctionInfo("LAST_INSERT_ID", LAST_INSERT_ID, VAR_ARGS, Value.BIGINT, false, false)); + } + + /** + * The date format of a MySQL formatted date/time. + * Example: 2008-09-25 08:40:59 + */ + private static final String DATE_TIME_FORMAT = "yyyy-MM-dd HH:mm:ss"; + + /** + * Format replacements for MySQL date formats. + * See + * https://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_date-format + */ + private static final String[] FORMAT_REPLACE = { + "%a", "EEE", + "%b", "MMM", + "%c", "MM", + "%d", "dd", + "%e", "d", + "%H", "HH", + "%h", "hh", + "%I", "hh", + "%i", "mm", + "%j", "DDD", + "%k", "H", + "%l", "h", + "%M", "MMMM", + "%m", "MM", + "%p", "a", + "%r", "hh:mm:ss a", + "%S", "ss", + "%s", "ss", + "%T", "HH:mm:ss", + "%W", "EEEE", + "%w", "F", + "%Y", "yyyy", + "%y", "yy", + "%%", "%", + }; + + /** + * Get the seconds since 1970-01-01 00:00:00 UTC of the given timestamp. + * See + * https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_unix-timestamp + * + * @param session the session + * @param value the timestamp + * @return the timestamp in seconds since EPOCH + */ + public static int unixTimestamp(SessionLocal session, Value value) { + long seconds; + if (value instanceof ValueTimestampTimeZone) { + ValueTimestampTimeZone t = (ValueTimestampTimeZone) value; + long timeNanos = t.getTimeNanos(); + seconds = DateTimeUtils.absoluteDayFromDateValue(t.getDateValue()) * DateTimeUtils.SECONDS_PER_DAY + + timeNanos / DateTimeUtils.NANOS_PER_SECOND - t.getTimeZoneOffsetSeconds(); + } else { + ValueTimestamp t = (ValueTimestamp) value.convertTo(TypeInfo.TYPE_TIMESTAMP, session); + long timeNanos = t.getTimeNanos(); + seconds = session.currentTimeZone().getEpochSecondsFromLocal(t.getDateValue(), timeNanos); + } + return (int) seconds; + } + + /** + * See + * https://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_from-unixtime + * + * @param seconds The current timestamp in seconds. + * @return a formatted date/time String in the format "yyyy-MM-dd HH:mm:ss". + */ + public static String fromUnixTime(int seconds) { + SimpleDateFormat formatter = new SimpleDateFormat(DATE_TIME_FORMAT, + Locale.ENGLISH); + return formatter.format(new Date(seconds * 1_000L)); + } + + /** + * See + * https://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_from-unixtime + * + * @param seconds The current timestamp in seconds. + * @param format The format of the date/time String to return. + * @return a formatted date/time String in the given format. + */ + public static String fromUnixTime(int seconds, String format) { + format = convertToSimpleDateFormat(format); + SimpleDateFormat formatter = new SimpleDateFormat(format, Locale.ENGLISH); + return formatter.format(new Date(seconds * 1_000L)); + } + + private static String convertToSimpleDateFormat(String format) { + String[] replace = FORMAT_REPLACE; + for (int i = 0; i < replace.length; i += 2) { + format = StringUtils.replaceAll(format, replace[i], replace[i + 1]); + } + return format; + } + + /** + * Returns mode-specific function for a given name, or {@code null}. + * + * @param upperName + * the upper-case name of a function + * @return the function with specified name or {@code null} + */ + public static FunctionsMySQL getFunction(String upperName) { + FunctionInfo info = FUNCTIONS.get(upperName); + return info != null ? new FunctionsMySQL(info) : null; + } + + FunctionsMySQL(FunctionInfo info) { + super(info); + } + + @Override + protected void checkParameterCount(int len) { + int min, max; + switch (info.type) { + case UNIX_TIMESTAMP: + min = 0; + max = 1; + break; + case FROM_UNIXTIME: + min = 1; + max = 2; + break; + case DATE: + min = 1; + max = 1; + break; + case LAST_INSERT_ID: + min = 0; + max = 1; + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, min + ".." + max); + } + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session); + type = TypeInfo.getTypeInfo(info.returnDataType); + if (allConst) { + return ValueExpression.get(getValue(session)); + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + Value[] values = new Value[args.length]; + Value v0 = getNullOrValue(session, args, values, 0); + Value v1 = getNullOrValue(session, args, values, 1); + Value result; + switch (info.type) { + case UNIX_TIMESTAMP: + result = ValueInteger.get(unixTimestamp(session, v0 == null ? session.currentTimestamp() : v0)); + break; + case FROM_UNIXTIME: + result = ValueVarchar.get( + v1 == null ? fromUnixTime(v0.getInt()) : fromUnixTime(v0.getInt(), v1.getString())); + break; + case DATE: + switch (v0.getValueType()) { + case Value.NULL: + case Value.DATE: + result = v0; + break; + default: + try { + v0 = v0.convertTo(TypeInfo.TYPE_TIMESTAMP, session); + } catch (DbException ex) { + result = ValueNull.INSTANCE; + break; + } + //$FALL-THROUGH$ + case Value.TIMESTAMP: + case Value.TIMESTAMP_TZ: + result = v0.convertToDate(session); + } + break; + case LAST_INSERT_ID: + if (args.length == 0) { + result = session.getLastIdentity(); + if (result == ValueNull.INSTANCE) { + result = ValueBigint.get(0L); + } else { + result = result.convertToBigint(null); + } + } else { + result = v0; + if (result == ValueNull.INSTANCE) { + session.setLastIdentity(ValueNull.INSTANCE); + } else { + session.setLastIdentity(result = result.convertToBigint(null)); + } + } + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + return result; + } + +} diff --git a/h2/src/main/org/h2/mode/FunctionsOracle.java b/h2/src/main/org/h2/mode/FunctionsOracle.java new file mode 100644 index 0000000000..d950752c6b --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionsOracle.java @@ -0,0 +1,135 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.expression.function.DateTimeFunction; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueUuid; + +/** + * Functions for {@link org.h2.engine.Mode.ModeEnum#Oracle} compatibility mode. + */ +public final class FunctionsOracle extends ModeFunction { + + private static final int ADD_MONTHS = 2001; + + private static final int SYS_GUID = ADD_MONTHS + 1; + + private static final int TO_DATE = SYS_GUID + 1; + + private static final int TO_TIMESTAMP = TO_DATE + 1; + + private static final int TO_TIMESTAMP_TZ = TO_TIMESTAMP + 1; + + private static final HashMap FUNCTIONS = new HashMap<>(); + + static { + FUNCTIONS.put("ADD_MONTHS", + new FunctionInfo("ADD_MONTHS", ADD_MONTHS, 2, Value.TIMESTAMP, true, true)); + FUNCTIONS.put("SYS_GUID", + new FunctionInfo("SYS_GUID", SYS_GUID, 0, Value.VARBINARY, false, false)); + FUNCTIONS.put("TO_DATE", + new FunctionInfo("TO_DATE", TO_DATE, VAR_ARGS, Value.TIMESTAMP, true, true)); + FUNCTIONS.put("TO_TIMESTAMP", + new FunctionInfo("TO_TIMESTAMP", TO_TIMESTAMP, VAR_ARGS, Value.TIMESTAMP, true, true)); + FUNCTIONS.put("TO_TIMESTAMP_TZ", + new FunctionInfo("TO_TIMESTAMP_TZ", TO_TIMESTAMP_TZ, VAR_ARGS, Value.TIMESTAMP_TZ, true, true)); + } + + /** + * Returns mode-specific function for a given name, or {@code null}. + * + * @param upperName + * the upper-case name of a function + * @return the function with specified name or {@code null} + */ + public static FunctionsOracle getFunction(String upperName) { + FunctionInfo info = FUNCTIONS.get(upperName); + return info != null ? new FunctionsOracle(info) : null; + } + + private FunctionsOracle(FunctionInfo info) { + super(info); + } + + @Override + protected void checkParameterCount(int len) { + int min = 0, max = Integer.MAX_VALUE; + switch (info.type) { + case TO_TIMESTAMP: + case TO_TIMESTAMP_TZ: + min = 1; + max = 2; + break; + case TO_DATE: + min = 1; + max = 3; + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, min + ".." + max); + } + } + + @Override + public Expression optimize(SessionLocal session) { + boolean allConst = optimizeArguments(session); + switch (info.type) { + case SYS_GUID: + type = TypeInfo.getTypeInfo(Value.VARBINARY, 16, 0, null); + break; + default: + type = TypeInfo.getTypeInfo(info.returnDataType); + } + if (allConst) { + return ValueExpression.get(getValue(session)); + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + Value[] values = getArgumentsValues(session, args); + if (values == null) { + return ValueNull.INSTANCE; + } + Value v0 = getNullOrValue(session, args, values, 0); + Value v1 = getNullOrValue(session, args, values, 1); + Value result; + switch (info.type) { + case ADD_MONTHS: + result = DateTimeFunction.dateadd(session, DateTimeFunction.MONTH, v1.getInt(), v0); + break; + case SYS_GUID: + result = ValueUuid.getNewRandom().convertTo(TypeInfo.TYPE_VARBINARY); + break; + case TO_DATE: + result = ToDateParser.toDate(session, v0.getString(), v1 == null ? null : v1.getString()); + break; + case TO_TIMESTAMP: + result = ToDateParser.toTimestamp(session, v0.getString(), v1 == null ? null : v1.getString()); + break; + case TO_TIMESTAMP_TZ: + result = ToDateParser.toTimestampTz(session, v0.getString(), v1 == null ? null : v1.getString()); + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + return result; + } + +} diff --git a/h2/src/main/org/h2/mode/FunctionsPostgreSQL.java b/h2/src/main/org/h2/mode/FunctionsPostgreSQL.java new file mode 100644 index 0000000000..ad2be4d957 --- /dev/null +++ b/h2/src/main/org/h2/mode/FunctionsPostgreSQL.java @@ -0,0 +1,377 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; +import java.util.StringJoiner; + +import org.h2.api.ErrorCode; +import org.h2.command.Parser; +import org.h2.engine.Constants; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.expression.function.CurrentGeneralValueSpecification; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.server.pg.PgServer; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueVarchar; + +/** + * Functions for {@link org.h2.engine.Mode.ModeEnum#PostgreSQL} compatibility + * mode. + */ +public final class FunctionsPostgreSQL extends ModeFunction { + + private static final int CURRENT_DATABASE = 3001; + + private static final int CURRTID2 = CURRENT_DATABASE + 1; + + private static final int FORMAT_TYPE = CURRTID2 + 1; + + private static final int HAS_DATABASE_PRIVILEGE = FORMAT_TYPE + 1; + + private static final int HAS_SCHEMA_PRIVILEGE = HAS_DATABASE_PRIVILEGE + 1; + + private static final int HAS_TABLE_PRIVILEGE = HAS_SCHEMA_PRIVILEGE + 1; + + private static final int LASTVAL = HAS_TABLE_PRIVILEGE + 1; + + private static final int VERSION = LASTVAL + 1; + + private static final int OBJ_DESCRIPTION = VERSION + 1; + + private static final int PG_ENCODING_TO_CHAR = OBJ_DESCRIPTION + 1; + + private static final int PG_GET_EXPR = PG_ENCODING_TO_CHAR + 1; + + private static final int PG_GET_INDEXDEF = PG_GET_EXPR + 1; + + private static final int PG_GET_USERBYID = PG_GET_INDEXDEF + 1; + + private static final int PG_POSTMASTER_START_TIME = PG_GET_USERBYID + 1; + + private static final int PG_RELATION_SIZE = PG_POSTMASTER_START_TIME + 1; + + private static final int PG_TABLE_IS_VISIBLE = PG_RELATION_SIZE + 1; + + private static final int SET_CONFIG = PG_TABLE_IS_VISIBLE + 1; + + private static final int ARRAY_TO_STRING = SET_CONFIG + 1; + + private static final int PG_STAT_GET_NUMSCANS = ARRAY_TO_STRING + 1; + + private static final int TO_DATE = PG_STAT_GET_NUMSCANS + 1; + + private static final int TO_TIMESTAMP = TO_DATE + 1; + + private static final HashMap FUNCTIONS = new HashMap<>(32); + + static { + FUNCTIONS.put("CURRENT_DATABASE", + new FunctionInfo("CURRENT_DATABASE", CURRENT_DATABASE, 0, Value.VARCHAR, true, false)); + FUNCTIONS.put("CURRTID2", new FunctionInfo("CURRTID2", CURRTID2, 2, Value.INTEGER, true, false)); + FUNCTIONS.put("FORMAT_TYPE", new FunctionInfo("FORMAT_TYPE", FORMAT_TYPE, 2, Value.VARCHAR, false, true)); + FUNCTIONS.put("HAS_DATABASE_PRIVILEGE", new FunctionInfo("HAS_DATABASE_PRIVILEGE", HAS_DATABASE_PRIVILEGE, + VAR_ARGS, Value.BOOLEAN, true, false)); + FUNCTIONS.put("HAS_SCHEMA_PRIVILEGE", + new FunctionInfo("HAS_SCHEMA_PRIVILEGE", HAS_SCHEMA_PRIVILEGE, VAR_ARGS, Value.BOOLEAN, true, false)); + FUNCTIONS.put("HAS_TABLE_PRIVILEGE", + new FunctionInfo("HAS_TABLE_PRIVILEGE", HAS_TABLE_PRIVILEGE, VAR_ARGS, Value.BOOLEAN, true, false)); + FUNCTIONS.put("LASTVAL", new FunctionInfo("LASTVAL", LASTVAL, 0, Value.BIGINT, true, false)); + FUNCTIONS.put("VERSION", new FunctionInfo("VERSION", VERSION, 0, Value.VARCHAR, true, false)); + FUNCTIONS.put("OBJ_DESCRIPTION", + new FunctionInfo("OBJ_DESCRIPTION", OBJ_DESCRIPTION, VAR_ARGS, Value.VARCHAR, true, false)); + FUNCTIONS.put("PG_ENCODING_TO_CHAR", + new FunctionInfo("PG_ENCODING_TO_CHAR", PG_ENCODING_TO_CHAR, 1, Value.VARCHAR, true, true)); + FUNCTIONS.put("PG_GET_EXPR", // + new FunctionInfo("PG_GET_EXPR", PG_GET_EXPR, VAR_ARGS, Value.VARCHAR, true, true)); + FUNCTIONS.put("PG_GET_INDEXDEF", + new FunctionInfo("PG_GET_INDEXDEF", PG_GET_INDEXDEF, VAR_ARGS, Value.VARCHAR, true, false)); + FUNCTIONS.put("PG_GET_USERBYID", + new FunctionInfo("PG_GET_USERBYID", PG_GET_USERBYID, 1, Value.VARCHAR, true, false)); + FUNCTIONS.put("PG_POSTMASTER_START_TIME", // + new FunctionInfo("PG_POSTMASTER_START_TIME", PG_POSTMASTER_START_TIME, 0, Value.TIMESTAMP_TZ, true, + false)); + FUNCTIONS.put("PG_RELATION_SIZE", + new FunctionInfo("PG_RELATION_SIZE", PG_RELATION_SIZE, VAR_ARGS, Value.BIGINT, true, false)); + FUNCTIONS.put("PG_TABLE_IS_VISIBLE", + new FunctionInfo("PG_TABLE_IS_VISIBLE", PG_TABLE_IS_VISIBLE, 1, Value.BOOLEAN, true, false)); + FUNCTIONS.put("SET_CONFIG", new FunctionInfo("SET_CONFIG", SET_CONFIG, 3, Value.VARCHAR, true, false)); + FUNCTIONS.put("ARRAY_TO_STRING", + new FunctionInfo("ARRAY_TO_STRING", ARRAY_TO_STRING, VAR_ARGS, Value.VARCHAR, false, true)); + FUNCTIONS.put("PG_STAT_GET_NUMSCANS", + new FunctionInfo("PG_STAT_GET_NUMSCANS", PG_STAT_GET_NUMSCANS, 1, Value.INTEGER, true, true)); + FUNCTIONS.put("TO_DATE", new FunctionInfo("TO_DATE", TO_DATE, 2, Value.DATE, true, true)); + FUNCTIONS.put("TO_TIMESTAMP", + new FunctionInfo("TO_TIMESTAMP", TO_TIMESTAMP, 2, Value.TIMESTAMP_TZ, true, true)); + + } + + /** + * Returns mode-specific function for a given name, or {@code null}. + * + * @param upperName + * the upper-case name of a function + * @return the function with specified name or {@code null} + */ + public static FunctionsPostgreSQL getFunction(String upperName) { + FunctionInfo info = FUNCTIONS.get(upperName); + if (info != null) { + return new FunctionsPostgreSQL(info); + } + return null; + } + + private FunctionsPostgreSQL(FunctionInfo info) { + super(info); + } + + @Override + protected void checkParameterCount(int len) { + int min, max; + switch (info.type) { + case HAS_DATABASE_PRIVILEGE: + case HAS_SCHEMA_PRIVILEGE: + case HAS_TABLE_PRIVILEGE: + min = 2; + max = 3; + break; + case OBJ_DESCRIPTION: + case PG_RELATION_SIZE: + min = 1; + max = 2; + break; + case PG_GET_INDEXDEF: + if (len != 1 && len != 3) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, "1, 3"); + } + return; + case PG_GET_EXPR: + case ARRAY_TO_STRING: + min = 2; + max = 3; + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + if (len < min || len > max) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, min + ".." + max); + } + } + + @Override + public Expression optimize(SessionLocal session) { + switch (info.type) { + case CURRENT_DATABASE: + return new CurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_CATALOG) + .optimize(session); + default: + boolean allConst = optimizeArguments(session); + type = TypeInfo.getTypeInfo(info.returnDataType); + if (allConst) { + return ValueExpression.get(getValue(session)); + } + } + return this; + } + + @Override + public Value getValue(SessionLocal session) { + Value[] values = getArgumentsValues(session, args); + if (values == null) { + return ValueNull.INSTANCE; + } + Value v0 = getNullOrValue(session, args, values, 0); + Value v1 = getNullOrValue(session, args, values, 1); + Value v2 = getNullOrValue(session, args, values, 2); + Value result; + switch (info.type) { + case CURRTID2: + // Not implemented + result = ValueInteger.get(1); + break; + case FORMAT_TYPE: + result = v0 != ValueNull.INSTANCE ? ValueVarchar.get(PgServer.formatType(v0.getInt())) // + : ValueNull.INSTANCE; + break; + case HAS_DATABASE_PRIVILEGE: + case HAS_SCHEMA_PRIVILEGE: + case HAS_TABLE_PRIVILEGE: + case PG_TABLE_IS_VISIBLE: + // Not implemented + result = ValueBoolean.TRUE; + break; + case LASTVAL: + result = session.getLastIdentity(); + if (result == ValueNull.INSTANCE) { + throw DbException.get(ErrorCode.CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1, "lastval()"); + } + result = result.convertToBigint(null); + break; + case VERSION: + result = ValueVarchar + .get("PostgreSQL " + Constants.PG_VERSION + " server protocol using H2 " + Constants.FULL_VERSION); + break; + case OBJ_DESCRIPTION: + // Not implemented + result = ValueNull.INSTANCE; + break; + case PG_ENCODING_TO_CHAR: + result = ValueVarchar.get(encodingToChar(v0.getInt())); + break; + case PG_GET_EXPR: + // Not implemented + result = ValueNull.INSTANCE; + break; + case PG_GET_INDEXDEF: + result = getIndexdef(session, v0.getInt(), v1, v2); + break; + case PG_GET_USERBYID: + result = ValueVarchar.get(getUserbyid(session, v0.getInt())); + break; + case PG_POSTMASTER_START_TIME: + result = session.getDatabase().getSystemSession().getSessionStart(); + break; + case PG_RELATION_SIZE: + // Optional second argument is ignored + result = relationSize(session, v0); + break; + case SET_CONFIG: + // Not implemented + result = v1.convertTo(Value.VARCHAR); + break; + case ARRAY_TO_STRING: + if (v0 == ValueNull.INSTANCE || v1 == ValueNull.INSTANCE) { + result = ValueNull.INSTANCE; + break; + } + StringJoiner joiner = new StringJoiner(v1.getString()); + if (v0.getValueType() != Value.ARRAY) { + throw DbException.getInvalidValueException("ARRAY_TO_STRING array", v0); + } + String nullString = null; + if (v2 != null) { + nullString = v2.getString(); + } + for (Value v : ((ValueArray) v0).getList()) { + if (v != ValueNull.INSTANCE) { + joiner.add(v.getString()); + } else if (nullString != null) { + joiner.add(nullString); + } + } + result = ValueVarchar.get(joiner.toString()); + break; + case PG_STAT_GET_NUMSCANS: + // Not implemented + result = ValueInteger.get(0); + break; + case TO_DATE: + result = ToDateParser.toDate(session, v0.getString(), v1.getString()).convertToDate(session); + break; + case TO_TIMESTAMP: + result = ToDateParser.toTimestampTz(session, v0.getString(), v1.getString()); + break; + default: + throw DbException.getInternalError("type=" + info.type); + } + return result; + } + + private static String encodingToChar(int code) { + switch (code) { + case 0: + return "SQL_ASCII"; + case 6: + return "UTF8"; + case 8: + return "LATIN1"; + default: + // This function returns empty string for unknown encodings + return code < 40 ? "UTF8" : ""; + } + } + + private static Value getIndexdef(SessionLocal session, int indexId, Value ordinalPosition, Value pretty) { + for (Schema schema : session.getDatabase().getAllSchemasNoMeta()) { + for (Index index : schema.getAllIndexes()) { + if (index.getId() == indexId) { + if (!index.getTable().isHidden()) { + int ordinal; + if (ordinalPosition == null || (ordinal = ordinalPosition.getInt()) == 0) { + return ValueVarchar.get(index.getCreateSQL()); + } + Column[] columns; + if (ordinal >= 1 && ordinal <= (columns = index.getColumns()).length) { + return ValueVarchar.get(columns[ordinal - 1].getName()); + } + } + break; + } + } + } + return ValueNull.INSTANCE; + } + + private static String getUserbyid(SessionLocal session, int uid) { + User u = session.getUser(); + String name; + search: { + if (u.getId() == uid) { + name = u.getName(); + break search; + } else { + if (u.isAdmin()) { + for (RightOwner rightOwner : session.getDatabase().getAllUsersAndRoles()) { + if (rightOwner.getId() == uid) { + name = rightOwner.getName(); + break search; + } + } + } + } + return "unknown (OID=" + uid + ')'; + } + if (session.getDatabase().getSettings().databaseToLower) { + name = StringUtils.toLowerEnglish(name); + } + return name; + } + + private static Value relationSize(SessionLocal session, Value tableOidOrName) { + Table t; + if (tableOidOrName.getValueType() == Value.INTEGER) { + int tid = tableOidOrName.getInt(); + for (Schema schema : session.getDatabase().getAllSchemasNoMeta()) { + for (Table table : schema.getAllTablesAndViews(session)) { + if (tid == table.getId()) { + t = table; + break; + } + } + } + return ValueNull.INSTANCE; + } else { + t = new Parser(session).parseTableName(tableOidOrName.getString()); + } + return ValueBigint.get(t.getDiskSpaceUsed()); + } + +} diff --git a/h2/src/main/org/h2/mode/ModeFunction.java b/h2/src/main/org/h2/mode/ModeFunction.java new file mode 100644 index 0000000000..59f212242e --- /dev/null +++ b/h2/src/main/org/h2/mode/ModeFunction.java @@ -0,0 +1,184 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import org.h2.api.ErrorCode; +import org.h2.engine.Database; +import org.h2.engine.Mode.ModeEnum; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.function.FunctionN; +import org.h2.message.DbException; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Base class for mode-specific functions. + */ +public abstract class ModeFunction extends FunctionN { + + /** + * Constant for variable number of arguments. + */ + protected static final int VAR_ARGS = -1; + + /** + * The information about this function. + */ + protected final FunctionInfo info; + + /** + * Get an instance of the given function for this database. + * If no function with this name is found, null is returned. + * + * @param database the database + * @param name the upper case function name + * @return the function object or null + */ + public static ModeFunction getFunction(Database database, String name) { + ModeEnum modeEnum = database.getMode().getEnum(); + if (modeEnum != ModeEnum.REGULAR) { + return getCompatibilityModeFunction(name, modeEnum); + } + return null; + } + + private static ModeFunction getCompatibilityModeFunction(String name, ModeEnum modeEnum) { + switch (modeEnum) { + case LEGACY: + return FunctionsLegacy.getFunction(name); + case DB2: + case Derby: + return FunctionsDB2Derby.getFunction(name); + case MSSQLServer: + return FunctionsMSSQLServer.getFunction(name); + case MySQL: + return FunctionsMySQL.getFunction(name); + case Oracle: + return FunctionsOracle.getFunction(name); + case PostgreSQL: + return FunctionsPostgreSQL.getFunction(name); + default: + return null; + } + } + + + /** + * Creates a new instance of function. + * + * @param info function information + */ + ModeFunction(FunctionInfo info) { + super(new Expression[info.parameterCount != VAR_ARGS ? info.parameterCount : 4]); + this.info = info; + } + + /** + * Get value transformed by expression, or null if i is out of range or + * the input value is null. + * + * @param session database session + * @param args expressions + * @param values array of input values + * @param i index of value of transform + * @return value or null + */ + static Value getNullOrValue(SessionLocal session, Expression[] args, + Value[] values, int i) { + if (i >= args.length) { + return null; + } + Value v = values[i]; + if (v == null) { + Expression e = args[i]; + if (e == null) { + return null; + } + v = values[i] = e.getValue(session); + } + return v; + } + + /** + * Gets values of arguments and checks them for NULL values if function + * returns NULL on NULL argument. + * + * @param session + * the session + * @param args + * the arguments + * @return the values, or {@code null} if function should return NULL due to + * NULL argument + */ + final Value[] getArgumentsValues(SessionLocal session, Expression[] args) { + Value[] values = new Value[args.length]; + if (info.nullIfParameterIsNull) { + for (int i = 0, l = args.length; i < l; i++) { + Value v = args[i].getValue(session); + if (v == ValueNull.INSTANCE) { + return null; + } + values[i] = v; + } + } + return values; + } + + /** + * Check if the parameter count is correct. + * + * @param len the number of parameters set + * @throws DbException if the parameter count is incorrect + */ + void checkParameterCount(int len) { + throw DbException.getInternalError("type=" + info.type); + } + + @Override + public void doneWithParameters() { + int count = info.parameterCount; + if (count == VAR_ARGS) { + checkParameterCount(argsCount); + super.doneWithParameters(); + } else if (count != argsCount) { + throw DbException.get(ErrorCode.INVALID_PARAMETER_COUNT_2, info.name, Integer.toString(argsCount)); + } + } + + /** + * Optimizes arguments. + * + * @param session + * the session + * @return whether all arguments are constants and function is deterministic + */ + final boolean optimizeArguments(SessionLocal session) { + return optimizeArguments(session, info.deterministic); + } + + @Override + public String getName() { + return info.name; + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + if (!super.isEverything(visitor)) { + return false; + } + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + case ExpressionVisitor.QUERY_COMPARABLE: + case ExpressionVisitor.READONLY: + return info.deterministic; + default: + return true; + } + } + +} diff --git a/h2/src/main/org/h2/mode/OnDuplicateKeyValues.java b/h2/src/main/org/h2/mode/OnDuplicateKeyValues.java new file mode 100644 index 0000000000..44c245682b --- /dev/null +++ b/h2/src/main/org/h2/mode/OnDuplicateKeyValues.java @@ -0,0 +1,64 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import org.h2.command.dml.Update; +import org.h2.engine.SessionLocal; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Operation0; +import org.h2.message.DbException; +import org.h2.table.Column; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * VALUES(column) function for ON DUPLICATE KEY UPDATE clause. + */ +public final class OnDuplicateKeyValues extends Operation0 { + + private final Column column; + + private final Update update; + + public OnDuplicateKeyValues(Column column, Update update) { + this.column = column; + this.update = update; + } + + @Override + public Value getValue(SessionLocal session) { + Value v = update.getOnDuplicateKeyInsert().getOnDuplicateKeyValue(column.getColumnId()); + if (v == null) { + throw DbException.getUnsupportedException(getTraceSQL()); + } + return v; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return column.getSQL(builder.append("VALUES("), sqlFlags).append(')'); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + switch (visitor.getType()) { + case ExpressionVisitor.DETERMINISTIC: + return false; + } + return true; + } + + @Override + public TypeInfo getType() { + return column.getType(); + } + + @Override + public int getCost() { + return 1; + } + +} diff --git a/h2/src/main/org/h2/mode/PgCatalogSchema.java b/h2/src/main/org/h2/mode/PgCatalogSchema.java new file mode 100644 index 0000000000..e88f20ac54 --- /dev/null +++ b/h2/src/main/org/h2/mode/PgCatalogSchema.java @@ -0,0 +1,59 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.HashMap; +import java.util.Map; + +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.schema.MetaSchema; +import org.h2.table.Table; + +/** + * {@code pg_catalog} schema. + */ +public final class PgCatalogSchema extends MetaSchema { + + private volatile HashMap tables; + + /** + * Creates new instance of {@code pg_catalog} schema. + * + * @param database + * the database + * @param owner + * the owner of the schema (system user) + */ + public PgCatalogSchema(Database database, User owner) { + super(database, Constants.PG_CATALOG_SCHEMA_ID, database.sysIdentifier(Constants.SCHEMA_PG_CATALOG), owner); + } + + @Override + protected Map getMap(SessionLocal session) { + HashMap map = tables; + if (map == null) { + map = fillMap(); + } + return map; + } + + private synchronized HashMap fillMap() { + HashMap map = tables; + if (map == null) { + map = database.newStringMap(); + for (int type = 0; type < PgCatalogTable.META_TABLE_TYPE_COUNT; type++) { + PgCatalogTable table = new PgCatalogTable(this, Constants.PG_CATALOG_SCHEMA_ID - type, type); + map.put(table.getName(), table); + } + tables = map; + } + return map; + } + +} diff --git a/h2/src/main/org/h2/mode/PgCatalogTable.java b/h2/src/main/org/h2/mode/PgCatalogTable.java new file mode 100644 index 0000000000..161da669a1 --- /dev/null +++ b/h2/src/main/org/h2/mode/PgCatalogTable.java @@ -0,0 +1,721 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; + +import org.h2.constraint.Constraint; +import org.h2.engine.Constants; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.schema.Schema; +import org.h2.schema.TriggerObject; +import org.h2.server.pg.PgServer; +import org.h2.table.Column; +import org.h2.table.MetaTable; +import org.h2.table.Table; +import org.h2.util.StringUtils; +import org.h2.util.Utils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; +import org.h2.value.ValueSmallint; + +/** + * This class is responsible to build the pg_catalog tables. + */ +public final class PgCatalogTable extends MetaTable { + + private static final int PG_AM = 0; + + private static final int PG_ATTRDEF = PG_AM + 1; + + private static final int PG_ATTRIBUTE = PG_ATTRDEF + 1; + + private static final int PG_AUTHID = PG_ATTRIBUTE + 1; + + private static final int PG_CLASS = PG_AUTHID + 1; + + private static final int PG_CONSTRAINT = PG_CLASS + 1; + + private static final int PG_DATABASE = PG_CONSTRAINT + 1; + + private static final int PG_DESCRIPTION = PG_DATABASE + 1; + + private static final int PG_GROUP = PG_DESCRIPTION + 1; + + private static final int PG_INDEX = PG_GROUP + 1; + + private static final int PG_INHERITS = PG_INDEX + 1; + + private static final int PG_NAMESPACE = PG_INHERITS + 1; + + private static final int PG_PROC = PG_NAMESPACE + 1; + + private static final int PG_ROLES = PG_PROC + 1; + + private static final int PG_SETTINGS = PG_ROLES + 1; + + private static final int PG_TABLESPACE = PG_SETTINGS + 1; + + private static final int PG_TRIGGER = PG_TABLESPACE + 1; + + private static final int PG_TYPE = PG_TRIGGER + 1; + + private static final int PG_USER = PG_TYPE + 1; + + /** + * The number of meta table types. Supported meta table types are + * {@code 0..META_TABLE_TYPE_COUNT - 1}. + */ + public static final int META_TABLE_TYPE_COUNT = PG_USER + 1; + + private static final Object[][] PG_EXTRA_TYPES = { + { 18, "char", 1, 0 }, + { 19, "name", 64, 18 }, + { 22, "int2vector", -1, 21 }, + { 24, "regproc", 4, 0 }, + { PgServer.PG_TYPE_INT2_ARRAY, "_int2", -1, PgServer.PG_TYPE_INT2 }, + { PgServer.PG_TYPE_INT4_ARRAY, "_int4", -1, PgServer.PG_TYPE_INT4 }, + { PgServer.PG_TYPE_VARCHAR_ARRAY, "_varchar", -1, PgServer.PG_TYPE_VARCHAR }, + { 2205, "regclass", 4, 0 }, + }; + + /** + * Create a new metadata table. + * + * @param schema + * the schema + * @param id + * the object id + * @param type + * the meta table type + */ + public PgCatalogTable(Schema schema, int id, int type) { + super(schema, id, type); + Column[] cols; + switch (type) { + case PG_AM: + setMetaTableName("PG_AM"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("AMNAME", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_ATTRDEF: + setMetaTableName("PG_ATTRDEF"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("ADSRC", TypeInfo.TYPE_INTEGER), // + column("ADRELID", TypeInfo.TYPE_INTEGER), // + column("ADNUM", TypeInfo.TYPE_INTEGER), // + column("ADBIN", TypeInfo.TYPE_VARCHAR), // pg_node_tree + }; + break; + case PG_ATTRIBUTE: + setMetaTableName("PG_ATTRIBUTE"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("ATTRELID", TypeInfo.TYPE_INTEGER), // + column("ATTNAME", TypeInfo.TYPE_VARCHAR), // + column("ATTTYPID", TypeInfo.TYPE_INTEGER), // + column("ATTLEN", TypeInfo.TYPE_INTEGER), // + column("ATTNUM", TypeInfo.TYPE_INTEGER), // + column("ATTTYPMOD", TypeInfo.TYPE_INTEGER), // + column("ATTNOTNULL", TypeInfo.TYPE_BOOLEAN), // + column("ATTISDROPPED", TypeInfo.TYPE_BOOLEAN), // + column("ATTHASDEF", TypeInfo.TYPE_BOOLEAN), // + }; + break; + case PG_AUTHID: + setMetaTableName("PG_AUTHID"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("ROLNAME", TypeInfo.TYPE_VARCHAR), // + column("ROLSUPER", TypeInfo.TYPE_BOOLEAN), // + column("ROLINHERIT", TypeInfo.TYPE_BOOLEAN), // + column("ROLCREATEROLE", TypeInfo.TYPE_BOOLEAN), // + column("ROLCREATEDB", TypeInfo.TYPE_BOOLEAN), // + column("ROLCATUPDATE", TypeInfo.TYPE_BOOLEAN), // + column("ROLCANLOGIN", TypeInfo.TYPE_BOOLEAN), // + column("ROLCONNLIMIT", TypeInfo.TYPE_BOOLEAN), // + column("ROLPASSWORD", TypeInfo.TYPE_BOOLEAN), // + column("ROLVALIDUNTIL", TypeInfo.TYPE_TIMESTAMP_TZ), // + column("ROLCONFIG", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_VARCHAR)), // + }; + break; + case PG_CLASS: + setMetaTableName("PG_CLASS"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("RELNAME", TypeInfo.TYPE_VARCHAR), // + column("RELNAMESPACE", TypeInfo.TYPE_INTEGER), // + column("RELKIND", TypeInfo.TYPE_CHAR), // + column("RELAM", TypeInfo.TYPE_INTEGER), // + column("RELTUPLES", TypeInfo.TYPE_DOUBLE), // + column("RELTABLESPACE", TypeInfo.TYPE_INTEGER), // + column("RELPAGES", TypeInfo.TYPE_INTEGER), // + column("RELHASINDEX", TypeInfo.TYPE_BOOLEAN), // + column("RELHASRULES", TypeInfo.TYPE_BOOLEAN), // + column("RELHASOIDS", TypeInfo.TYPE_BOOLEAN), // + column("RELCHECKS", TypeInfo.TYPE_SMALLINT), // + column("RELTRIGGERS", TypeInfo.TYPE_INTEGER), // + }; + break; + case PG_CONSTRAINT: + setMetaTableName("PG_CONSTRAINT"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("CONNAME", TypeInfo.TYPE_VARCHAR), // + column("CONTYPE", TypeInfo.TYPE_VARCHAR), // + column("CONRELID", TypeInfo.TYPE_INTEGER), // + column("CONFRELID", TypeInfo.TYPE_INTEGER), // + column("CONKEY", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_SMALLINT)), // + }; + break; + case PG_DATABASE: + setMetaTableName("PG_DATABASE"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("DATNAME", TypeInfo.TYPE_VARCHAR), // + column("ENCODING", TypeInfo.TYPE_INTEGER), // + column("DATLASTSYSOID", TypeInfo.TYPE_INTEGER), // + column("DATALLOWCONN", TypeInfo.TYPE_BOOLEAN), // + column("DATCONFIG", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_VARCHAR)), // + column("DATACL", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_VARCHAR)), // aclitem[] + column("DATDBA", TypeInfo.TYPE_INTEGER), // + column("DATTABLESPACE", TypeInfo.TYPE_INTEGER), // + }; + break; + case PG_DESCRIPTION: + setMetaTableName("PG_DESCRIPTION"); + cols = new Column[] { // + column("OBJOID", TypeInfo.TYPE_INTEGER), // + column("OBJSUBID", TypeInfo.TYPE_INTEGER), // + column("CLASSOID", TypeInfo.TYPE_INTEGER), // + column("DESCRIPTION", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_GROUP: + setMetaTableName("PG_GROUP"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("GRONAME", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_INDEX: + setMetaTableName("PG_INDEX"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("INDEXRELID", TypeInfo.TYPE_INTEGER), // + column("INDRELID", TypeInfo.TYPE_INTEGER), // + column("INDISCLUSTERED", TypeInfo.TYPE_BOOLEAN), // + column("INDISUNIQUE", TypeInfo.TYPE_BOOLEAN), // + column("INDISPRIMARY", TypeInfo.TYPE_BOOLEAN), // + column("INDEXPRS", TypeInfo.TYPE_VARCHAR), // + column("INDKEY", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_INTEGER)), // + column("INDPRED", TypeInfo.TYPE_VARCHAR), // pg_node_tree + }; + break; + case PG_INHERITS: + setMetaTableName("PG_INHERITS"); + cols = new Column[] { // + column("INHRELID", TypeInfo.TYPE_INTEGER), // + column("INHPARENT", TypeInfo.TYPE_INTEGER), // + column("INHSEQNO", TypeInfo.TYPE_INTEGER), // + }; + break; + case PG_NAMESPACE: + setMetaTableName("PG_NAMESPACE"); + cols = new Column[] { // + column("ID", TypeInfo.TYPE_INTEGER), // + column("NSPNAME", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_PROC: + setMetaTableName("PG_PROC"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("PRONAME", TypeInfo.TYPE_VARCHAR), // + column("PRORETTYPE", TypeInfo.TYPE_INTEGER), // + column("PROARGTYPES", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_INTEGER)), // + column("PRONAMESPACE", TypeInfo.TYPE_INTEGER), // + }; + break; + case PG_ROLES: + setMetaTableName("PG_ROLES"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("ROLNAME", TypeInfo.TYPE_VARCHAR), // + column("ROLSUPER", TypeInfo.TYPE_CHAR), // + column("ROLCREATEROLE", TypeInfo.TYPE_CHAR), // + column("ROLCREATEDB", TypeInfo.TYPE_CHAR), // + }; + break; + case PG_SETTINGS: + setMetaTableName("PG_SETTINGS"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("NAME", TypeInfo.TYPE_VARCHAR), // + column("SETTING", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_TABLESPACE: + setMetaTableName("PG_TABLESPACE"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("SPCNAME", TypeInfo.TYPE_VARCHAR), // + column("SPCLOCATION", TypeInfo.TYPE_VARCHAR), // + column("SPCOWNER", TypeInfo.TYPE_INTEGER), // + column("SPCACL", TypeInfo.getTypeInfo(Value.ARRAY, -1L, 0, TypeInfo.TYPE_VARCHAR)), // ACLITEM[] + }; + break; + case PG_TRIGGER: + setMetaTableName("PG_TRIGGER"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("TGCONSTRRELID", TypeInfo.TYPE_INTEGER), // + column("TGFOID", TypeInfo.TYPE_INTEGER), // + column("TGARGS", TypeInfo.TYPE_INTEGER), // + column("TGNARGS", TypeInfo.TYPE_INTEGER), // + column("TGDEFERRABLE", TypeInfo.TYPE_BOOLEAN), // + column("TGINITDEFERRED", TypeInfo.TYPE_BOOLEAN), // + column("TGCONSTRNAME", TypeInfo.TYPE_VARCHAR), // + column("TGRELID", TypeInfo.TYPE_INTEGER), // + }; + break; + case PG_TYPE: + setMetaTableName("PG_TYPE"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("TYPNAME", TypeInfo.TYPE_VARCHAR), // + column("TYPNAMESPACE", TypeInfo.TYPE_INTEGER), // + column("TYPLEN", TypeInfo.TYPE_INTEGER), // + column("TYPTYPE", TypeInfo.TYPE_VARCHAR), // + column("TYPDELIM", TypeInfo.TYPE_VARCHAR), // + column("TYPRELID", TypeInfo.TYPE_INTEGER), // + column("TYPELEM", TypeInfo.TYPE_INTEGER), // + column("TYPBASETYPE", TypeInfo.TYPE_INTEGER), // + column("TYPTYPMOD", TypeInfo.TYPE_INTEGER), // + column("TYPNOTNULL", TypeInfo.TYPE_BOOLEAN), // + column("TYPINPUT", TypeInfo.TYPE_VARCHAR), // + }; + break; + case PG_USER: + setMetaTableName("PG_USER"); + cols = new Column[] { // + column("OID", TypeInfo.TYPE_INTEGER), // + column("USENAME", TypeInfo.TYPE_VARCHAR), // + column("USECREATEDB", TypeInfo.TYPE_BOOLEAN), // + column("USESUPER", TypeInfo.TYPE_BOOLEAN), // + }; + break; + default: + throw DbException.getInternalError("type=" + type); + } + setColumns(cols); + indexColumn = -1; + metaIndex = null; + } + + @Override + public ArrayList generateRows(SessionLocal session, SearchRow first, SearchRow last) { + ArrayList rows = Utils.newSmallArrayList(); + String catalog = database.getShortName(); + boolean admin = session.getUser().isAdmin(); + switch (type) { + case PG_AM: { + String[] am = { "btree", "hash" }; + for (int i = 0, l = am.length; i < l; i++) { + add(session, rows, + // OID + ValueInteger.get(i), + // AMNAME + am[i]); + } + break; + } + case PG_ATTRDEF: + break; + case PG_ATTRIBUTE: + for (Schema schema : database.getAllSchemas()) { + for (Table table : schema.getAllTablesAndViews(session)) { + if (!hideTable(table, session)) { + pgAttribute(session, rows, table); + } + } + } + for (Table table: session.getLocalTempTables()) { + if (!hideTable(table, session)) { + pgAttribute(session, rows, table); + } + } + break; + case PG_AUTHID: + break; + case PG_CLASS: + for (Schema schema : database.getAllSchemas()) { + for (Table table : schema.getAllTablesAndViews(session)) { + if (!hideTable(table, session)) { + pgClass(session, rows, table); + } + } + } + for (Table table: session.getLocalTempTables()) { + if (!hideTable(table, session)) { + pgClass(session, rows, table); + } + } + break; + case PG_CONSTRAINT: + pgConstraint(session, rows); + break; + case PG_DATABASE: { + int uid = Integer.MAX_VALUE; + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (rightOwner instanceof User && ((User) rightOwner).isAdmin()) { + int id = rightOwner.getId(); + if (id < uid) { + uid = id; + } + } + } + add(session, rows, + // OID + ValueInteger.get(100_001), + // DATNAME + catalog, + // ENCODING INT, + ValueInteger.get(6), // UTF-8 + // DATLASTSYSOID INT, + ValueInteger.get(100_000), + // DATALLOWCONN BOOLEAN, + ValueBoolean.TRUE, + // DATCONFIG ARRAY, -- TEXT[] + null, + // DATACL ARRAY, -- ACLITEM[] + null, + // DATDBA INT, + ValueInteger.get(uid), + // DATTABLESPACE INT + ValueInteger.get(0)); + break; + } + case PG_DESCRIPTION: + add(session, rows, + // OBJOID + ValueInteger.get(0), + // OBJSUBID + ValueInteger.get(0), + // CLASSOID + ValueInteger.get(-1), + // DESCRIPTION + catalog); + break; + case PG_GROUP: + // The next one returns no rows due to MS Access problem opening + // tables with primary key + case PG_INDEX: + case PG_INHERITS: + break; + case PG_NAMESPACE: + for (Schema schema : database.getAllSchemas()) { + add(session, rows, + // ID + ValueInteger.get(schema.getId()), + // NSPNAME + schema.getName()); + } + break; + case PG_PROC: + break; + case PG_ROLES: + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (admin || session.getUser() == rightOwner) { + String r = rightOwner instanceof User && ((User) rightOwner).isAdmin() ? "t" : "f"; + add(session, rows, + // OID + ValueInteger.get(rightOwner.getId()), + // ROLNAME + identifier(rightOwner.getName()), + // ROLSUPER + r, + // ROLCREATEROLE + r, + // ROLCREATEDB; + r); + } + } + break; + case PG_SETTINGS: { + String[][] settings = { { "autovacuum", "on" }, { "stats_start_collector", "on" }, + { "stats_row_level", "on" } }; + for (int i = 0, l = settings.length; i < l; i++) { + String[] setting = settings[i]; + add(session, rows, + // OID + ValueInteger.get(i), + // NAME + setting[0], + // SETTING + setting[1]); + } + break; + } + case PG_TABLESPACE: + add(session, rows, + // OID INTEGER + ValueInteger.get(0), + // SPCNAME + "main", + // SPCLOCATION + "?", + // SPCOWNER + ValueInteger.get(0), + // SPCACL + null); + break; + case PG_TRIGGER: + break; + case PG_TYPE: { + HashSet types = new HashSet<>(); + for (int i = 1, l = Value.TYPE_COUNT; i < l; i++) { + DataType t = DataType.getDataType(i); + if (t.type == Value.ARRAY) { + continue; + } + int pgType = PgServer.convertType(TypeInfo.getTypeInfo(t.type)); + if (pgType == PgServer.PG_TYPE_UNKNOWN || !types.add(pgType)) { + continue; + } + add(session, rows, + // OID + ValueInteger.get(pgType), + // TYPNAME + Value.getTypeName(t.type), + // TYPNAMESPACE + ValueInteger.get(Constants.PG_CATALOG_SCHEMA_ID), + // TYPLEN + ValueInteger.get(-1), + // TYPTYPE + "b", + // TYPDELIM + ",", + // TYPRELID + ValueInteger.get(0), + // TYPELEM + ValueInteger.get(0), + // TYPBASETYPE + ValueInteger.get(0), + // TYPTYPMOD + ValueInteger.get(-1), + // TYPNOTNULL + ValueBoolean.FALSE, + // TYPINPUT + null); + } + for (Object[] pgType : PG_EXTRA_TYPES) { + add(session, rows, + // OID + ValueInteger.get((int) pgType[0]), + // TYPNAME + pgType[1], + // TYPNAMESPACE + ValueInteger.get(Constants.PG_CATALOG_SCHEMA_ID), + // TYPLEN + ValueInteger.get((int) pgType[2]), + // TYPTYPE + "b", + // TYPDELIM + ",", + // TYPRELID + ValueInteger.get(0), + // TYPELEM + ValueInteger.get((int) pgType[3]), + // TYPBASETYPE + ValueInteger.get(0), + // TYPTYPMOD + ValueInteger.get(-1), + // TYPNOTNULL + ValueBoolean.FALSE, + // TYPINPUT + null); + } + break; + } + case PG_USER: + for (RightOwner rightOwner : database.getAllUsersAndRoles()) { + if (rightOwner instanceof User) { + User u = (User) rightOwner; + if (admin || session.getUser() == u) { + ValueBoolean r = ValueBoolean.get(u.isAdmin()); + add(session, rows, + // OID + ValueInteger.get(u.getId()), + // USENAME + identifier(u.getName()), + // USECREATEDB + r, + // USESUPER; + r); + } + } + } + break; + default: + throw DbException.getInternalError("type=" + type); + } + return rows; + + } + + private void pgAttribute(SessionLocal session, ArrayList rows, Table table) { + Column[] cols = table.getColumns(); + int tableId = table.getId(); + for (int i = 0; i < cols.length;) { + Column column = cols[i++]; + addAttribute(session, rows, tableId * 10_000 + i, tableId, table, column, i); + } + for (Index index : table.getIndexes()) { + if (index.getCreateSQL() == null) { + continue; + } + cols = index.getColumns(); + for (int i = 0; i < cols.length;) { + Column column = cols[i++]; + int indexId = index.getId(); + addAttribute(session, rows, 1_000_000 * indexId + tableId * 10_000 + i, indexId, table, column, + i); + } + } + } + + private void pgClass(SessionLocal session, ArrayList rows, Table table) { + ArrayList triggers = table.getTriggers(); + addClass(session, rows, table.getId(), table.getName(), table.getSchema().getId(), + table.isView() ? "v" : "r", false, triggers != null ? triggers.size() : 0); + ArrayList indexes = table.getIndexes(); + if (indexes != null) { + for (Index index : indexes) { + if (index.getCreateSQL() == null) { + continue; + } + addClass(session, rows, index.getId(), index.getName(), index.getSchema().getId(), "i", true, + 0); + } + } + } + + private void pgConstraint(SessionLocal session, ArrayList rows) { + for (Schema schema : database.getAllSchemasNoMeta()) { + for (Constraint constraint : schema.getAllConstraints()) { + Constraint.Type constraintType = constraint.getConstraintType(); + if (constraintType == Constraint.Type.DOMAIN) { + continue; + } + Table table = constraint.getTable(); + if (hideTable(table, session)) { + continue; + } + List conkey = new ArrayList<>(); + for (Column column : constraint.getReferencedColumns(table)) { + conkey.add(ValueSmallint.get((short) (column.getColumnId() + 1))); + } + Table refTable = constraint.getRefTable(); + add(session, + rows, + // OID + ValueInteger.get(constraint.getId()), + // CONNAME + constraint.getName(), + // CONTYPE + StringUtils.toLowerEnglish(constraintType.getSqlName().substring(0, 1)), + // CONRELID + ValueInteger.get(table.getId()), + // CONFRELID + ValueInteger.get(refTable != null && refTable != table + && !hideTable(refTable, session) ? table.getId() : 0), + // CONKEY + ValueArray.get(TypeInfo.TYPE_SMALLINT, conkey.toArray(Value.EMPTY_VALUES), null) + ); + } + } + } + + private void addAttribute(SessionLocal session, ArrayList rows, int id, int relId, Table table, Column column, + int ordinal) { + long precision = column.getType().getPrecision(); + add(session, rows, + // OID + ValueInteger.get(id), + // ATTRELID + ValueInteger.get(relId), + // ATTNAME + column.getName(), + // ATTTYPID + ValueInteger.get(PgServer.convertType(column.getType())), + // ATTLEN + ValueInteger.get(precision > 255 ? -1 : (int) precision), + // ATTNUM + ValueInteger.get(ordinal), + // ATTTYPMOD + ValueInteger.get(-1), + // ATTNOTNULL + ValueBoolean.get(!column.isNullable()), + // ATTISDROPPED + ValueBoolean.FALSE, + // ATTHASDEF + ValueBoolean.FALSE); + } + + private void addClass(SessionLocal session, ArrayList rows, int id, String name, int schema, String kind, + boolean index, int triggers) { + add(session, rows, + // OID + ValueInteger.get(id), + // RELNAME + name, + // RELNAMESPACE + ValueInteger.get(schema), + // RELKIND + kind, + // RELAM + ValueInteger.get(0), + // RELTUPLES + ValueDouble.get(0d), + // RELTABLESPACE + ValueInteger.get(0), + // RELPAGES + ValueInteger.get(0), + // RELHASINDEX + ValueBoolean.get(index), + // RELHASRULES + ValueBoolean.FALSE, + // RELHASOIDS + ValueBoolean.FALSE, + // RELCHECKS + ValueSmallint.get((short) 0), + // RELTRIGGERS + ValueInteger.get(triggers)); + } + + @Override + public long getMaxDataModificationId() { + return database.getModificationDataId(); + } + +} diff --git a/h2/src/main/org/h2/mode/Regclass.java b/h2/src/main/org/h2/mode/Regclass.java new file mode 100644 index 0000000000..e3fc92303b --- /dev/null +++ b/h2/src/main/org/h2/mode/Regclass.java @@ -0,0 +1,82 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mode; + +import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.Operation1; +import org.h2.expression.ValueExpression; +import org.h2.index.Index; +import org.h2.message.DbException; +import org.h2.schema.Schema; +import org.h2.table.Table; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; + +/** + * A ::regclass expression. + */ +public final class Regclass extends Operation1 { + + public Regclass(Expression arg) { + super(arg); + } + + @Override + public Value getValue(SessionLocal session) { + Value value = arg.getValue(session); + if (value == ValueNull.INSTANCE) { + return ValueNull.INSTANCE; + } + int valueType = value.getValueType(); + if (valueType >= Value.TINYINT && valueType <= Value.INTEGER) { + return value.convertToInt(null); + } + if (valueType == Value.BIGINT) { + return ValueInteger.get((int) value.getLong()); + } + String name = value.getString(); + for (Schema schema : session.getDatabase().getAllSchemas()) { + Table table = schema.findTableOrView(session, name); + if (table != null && !table.isHidden()) { + return ValueInteger.get(table.getId()); + } + Index index = schema.findIndex(session, name); + if (index != null && index.getCreateSQL() != null) { + return ValueInteger.get(index.getId()); + } + } + throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, name); + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_INTEGER; + } + + @Override + public Expression optimize(SessionLocal session) { + arg = arg.optimize(session); + if (arg.isConstant()) { + return ValueExpression.get(getValue(session)); + } + return this; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + return arg.getSQL(builder, sqlFlags, AUTO_PARENTHESES).append("::REGCLASS"); + } + + @Override + public int getCost() { + return arg.getCost() + 100; + } + +} diff --git a/h2/src/main/org/h2/mode/ToDateParser.java b/h2/src/main/org/h2/mode/ToDateParser.java new file mode 100644 index 0000000000..b789555175 --- /dev/null +++ b/h2/src/main/org/h2/mode/ToDateParser.java @@ -0,0 +1,376 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Daniel Gredler + */ +package org.h2.mode; + +import static java.lang.String.format; + +import java.util.List; + +import org.h2.engine.SessionLocal; +import org.h2.util.DateTimeUtils; +import org.h2.util.TimeZoneProvider; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + * Emulates Oracle's TO_DATE function.
    + * This class holds and handles the input data form the TO_DATE-method + */ +public final class ToDateParser { + + private final SessionLocal session; + + private final String unmodifiedInputStr; + private final String unmodifiedFormatStr; + private final ConfigParam functionName; + private String inputStr; + private String formatStr; + + private boolean doyValid = false, absoluteDayValid = false, + hour12Valid = false, + timeZoneHMValid = false; + + private boolean bc; + + private long absoluteDay; + + private int year, month, day = 1; + + private int dayOfYear; + + private int hour, minute, second, nanos; + + private int hour12; + + private boolean isAM = true; + + private TimeZoneProvider timeZone; + + private int timeZoneHour, timeZoneMinute; + + private int currentYear, currentMonth; + + /** + * @param session the database session + * @param functionName one of [TO_DATE, TO_TIMESTAMP] (both share the same + * code) + * @param input the input date with the date-time info + * @param format the format of date-time info + */ + private ToDateParser(SessionLocal session, ConfigParam functionName, String input, String format) { + this.session = session; + this.functionName = functionName; + inputStr = input.trim(); + // Keep a copy + unmodifiedInputStr = inputStr; + if (format == null || format.isEmpty()) { + // default Oracle format. + formatStr = functionName.getDefaultFormatStr(); + } else { + formatStr = format.trim(); + } + // Keep a copy + unmodifiedFormatStr = formatStr; + } + + private static ToDateParser getTimestampParser(SessionLocal session, ConfigParam param, String input, + String format) { + ToDateParser result = new ToDateParser(session, param, input, format); + parse(result); + return result; + } + + private ValueTimestamp getResultingValue() { + long dateValue; + if (absoluteDayValid) { + dateValue = DateTimeUtils.dateValueFromAbsoluteDay(absoluteDay); + } else { + int year = this.year; + if (year == 0) { + year = getCurrentYear(); + } + if (bc) { + year = 1 - year; + } + if (doyValid) { + dateValue = DateTimeUtils.dateValueFromAbsoluteDay( + DateTimeUtils.absoluteDayFromYear(year) + dayOfYear - 1); + } else { + int month = this.month; + if (month == 0) { + // Oracle uses current month as default + month = getCurrentMonth(); + } + dateValue = DateTimeUtils.dateValue(year, month, day); + } + } + int hour; + if (hour12Valid) { + hour = hour12 % 12; + if (!isAM) { + hour += 12; + } + } else { + hour = this.hour; + } + long timeNanos = ((((hour * 60) + minute) * 60) + second) * 1_000_000_000L + nanos; + return ValueTimestamp.fromDateValueAndNanos(dateValue, timeNanos); + } + + private ValueTimestampTimeZone getResultingValueWithTimeZone() { + ValueTimestamp ts = getResultingValue(); + long dateValue = ts.getDateValue(), timeNanos = ts.getTimeNanos(); + int offset; + if (timeZoneHMValid) { + offset = (timeZoneHour * 60 + ((timeZoneHour >= 0) ? timeZoneMinute : -timeZoneMinute)) * 60; + } else { + offset = (timeZone != null ? timeZone : session.currentTimeZone()) + .getTimeZoneOffsetLocal(dateValue, timeNanos); + } + return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, ts.getTimeNanos(), offset); + } + + String getInputStr() { + return inputStr; + } + + String getFormatStr() { + return formatStr; + } + + String getFunctionName() { + return functionName.name(); + } + + private void queryCurrentYearAndMonth() { + long dateValue = session.currentTimestamp().getDateValue(); + currentYear = DateTimeUtils.yearFromDateValue(dateValue); + currentMonth = DateTimeUtils.monthFromDateValue(dateValue); + } + + int getCurrentYear() { + if (currentYear == 0) { + queryCurrentYearAndMonth(); + } + return currentYear; + } + + int getCurrentMonth() { + if (currentMonth == 0) { + queryCurrentYearAndMonth(); + } + return currentMonth; + } + + void setAbsoluteDay(int absoluteDay) { + doyValid = false; + absoluteDayValid = true; + this.absoluteDay = absoluteDay; + } + + void setBC(boolean bc) { + doyValid = false; + absoluteDayValid = false; + this.bc = bc; + } + + void setYear(int year) { + doyValid = false; + absoluteDayValid = false; + this.year = year; + } + + void setMonth(int month) { + doyValid = false; + absoluteDayValid = false; + this.month = month; + if (year == 0) { + year = 1970; + } + } + + void setDay(int day) { + doyValid = false; + absoluteDayValid = false; + this.day = day; + if (year == 0) { + year = 1970; + } + } + + void setDayOfYear(int dayOfYear) { + doyValid = true; + absoluteDayValid = false; + this.dayOfYear = dayOfYear; + } + + void setHour(int hour) { + hour12Valid = false; + this.hour = hour; + } + + void setMinute(int minute) { + this.minute = minute; + } + + void setSecond(int second) { + this.second = second; + } + + void setNanos(int nanos) { + this.nanos = nanos; + } + + void setAmPm(boolean isAM) { + hour12Valid = true; + this.isAM = isAM; + } + + void setHour12(int hour12) { + hour12Valid = true; + this.hour12 = hour12; + } + + void setTimeZone(TimeZoneProvider timeZone) { + timeZoneHMValid = false; + this.timeZone = timeZone; + } + + void setTimeZoneHour(int timeZoneHour) { + timeZoneHMValid = true; + this.timeZoneHour = timeZoneHour; + } + + void setTimeZoneMinute(int timeZoneMinute) { + timeZoneHMValid = true; + this.timeZoneMinute = timeZoneMinute; + } + + private boolean hasToParseData() { + return !formatStr.isEmpty(); + } + + private void removeFirstChar() { + if (!formatStr.isEmpty()) { + formatStr = formatStr.substring(1); + } + if (!inputStr.isEmpty()) { + inputStr = inputStr.substring(1); + } + } + + private static ToDateParser parse(ToDateParser p) { + while (p.hasToParseData()) { + List tokenList = + ToDateTokenizer.FormatTokenEnum.getTokensInQuestion(p.getFormatStr()); + if (tokenList == null) { + p.removeFirstChar(); + continue; + } + boolean foundAnToken = false; + for (ToDateTokenizer.FormatTokenEnum token : tokenList) { + if (token.parseFormatStrWithToken(p)) { + foundAnToken = true; + break; + } + } + if (!foundAnToken) { + p.removeFirstChar(); + } + } + return p; + } + + /** + * Remove a token from a string. + * + * @param inputFragmentStr the input fragment + * @param formatFragment the format fragment + */ + void remove(String inputFragmentStr, String formatFragment) { + if (inputFragmentStr != null && inputStr.length() >= inputFragmentStr.length()) { + inputStr = inputStr.substring(inputFragmentStr.length()); + } + if (formatFragment != null && formatStr.length() >= formatFragment.length()) { + formatStr = formatStr.substring(formatFragment.length()); + } + } + + @Override + public String toString() { + int inputStrLen = inputStr.length(); + int orgInputLen = unmodifiedInputStr.length(); + int currentInputPos = orgInputLen - inputStrLen; + int restInputLen = inputStrLen <= 0 ? inputStrLen : inputStrLen - 1; + + int orgFormatLen = unmodifiedFormatStr.length(); + int currentFormatPos = orgFormatLen - formatStr.length(); + + return format("\n %s('%s', '%s')", functionName, unmodifiedInputStr, unmodifiedFormatStr) + + format("\n %s^%s , %s^ <-- Parsing failed at this point", + format("%" + (functionName.name().length() + currentInputPos) + "s", ""), + restInputLen <= 0 ? "" : format("%" + restInputLen + "s", ""), + currentFormatPos <= 0 ? "" : format("%" + currentFormatPos + "s", "")); + } + + /** + * Parse a string as a timestamp with the given format. + * + * @param session the database session + * @param input the input + * @param format the format + * @return the timestamp + */ + public static ValueTimestamp toTimestamp(SessionLocal session, String input, String format) { + ToDateParser parser = getTimestampParser(session, ConfigParam.TO_TIMESTAMP, input, format); + return parser.getResultingValue(); + } + + /** + * Parse a string as a timestamp with the given format. + * + * @param session the database session + * @param input the input + * @param format the format + * @return the timestamp + */ + public static ValueTimestampTimeZone toTimestampTz(SessionLocal session, String input, String format) { + ToDateParser parser = getTimestampParser(session, ConfigParam.TO_TIMESTAMP_TZ, input, format); + return parser.getResultingValueWithTimeZone(); + } + + /** + * Parse a string as a date with the given format. + * + * @param session the database session + * @param input the input + * @param format the format + * @return the date as a timestamp + */ + public static ValueTimestamp toDate(SessionLocal session, String input, String format) { + ToDateParser parser = getTimestampParser(session, ConfigParam.TO_DATE, input, format); + return parser.getResultingValue(); + } + + /** + * The configuration of the date parser. + */ + private enum ConfigParam { + TO_DATE("DD MON YYYY"), + TO_TIMESTAMP("DD MON YYYY HH:MI:SS"), + TO_TIMESTAMP_TZ("DD MON YYYY HH:MI:SS TZR"); + + private final String defaultFormatStr; + ConfigParam(String defaultFormatStr) { + this.defaultFormatStr = defaultFormatStr; + } + String getDefaultFormatStr() { + return defaultFormatStr; + } + + } + +} diff --git a/h2/src/main/org/h2/mode/ToDateTokenizer.java b/h2/src/main/org/h2/mode/ToDateTokenizer.java new file mode 100644 index 0000000000..1cf83463e5 --- /dev/null +++ b/h2/src/main/org/h2/mode/ToDateTokenizer.java @@ -0,0 +1,717 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Daniel Gredler + */ +package org.h2.mode; + +import static java.lang.String.format; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.h2.api.ErrorCode; +import org.h2.expression.function.ToCharFunction; +import org.h2.message.DbException; +import org.h2.util.TimeZoneProvider; + +/** + * Emulates Oracle's TO_DATE function. This class knows all about the + * TO_DATE-format conventions and how to parse the corresponding data. + */ +final class ToDateTokenizer { + + /** + * The pattern for a number. + */ + static final Pattern PATTERN_INLINE = Pattern.compile("(\"[^\"]*\")"); + + /** + * The pattern for a number. + */ + static final Pattern PATTERN_NUMBER = Pattern.compile("^([+-]?[0-9]+)"); + + /** + * The pattern for four digits (typically a year). + */ + static final Pattern PATTERN_FOUR_DIGITS = Pattern + .compile("^([+-]?[0-9]{4})"); + + /** + * The pattern 2-4 digits (e.g. for RRRR). + */ + static final Pattern PATTERN_TWO_TO_FOUR_DIGITS = Pattern + .compile("^([+-]?[0-9]{2,4})"); + /** + * The pattern for three digits. + */ + static final Pattern PATTERN_THREE_DIGITS = Pattern + .compile("^([+-]?[0-9]{3})"); + + /** + * The pattern for two digits. + */ + static final Pattern PATTERN_TWO_DIGITS = Pattern + .compile("^([+-]?[0-9]{2})"); + + /** + * The pattern for one or two digits. + */ + static final Pattern PATTERN_TWO_DIGITS_OR_LESS = Pattern + .compile("^([+-]?[0-9][0-9]?)"); + + /** + * The pattern for one digit. + */ + static final Pattern PATTERN_ONE_DIGIT = Pattern.compile("^([+-]?[0-9])"); + + /** + * The pattern for a fraction (of a second for example). + */ + static final Pattern PATTERN_FF = Pattern.compile("^(FF[0-9]?)", + Pattern.CASE_INSENSITIVE); + + /** + * The pattern for "am" or "pm". + */ + static final Pattern PATTERN_AM_PM = Pattern + .compile("^(AM|A\\.M\\.|PM|P\\.M\\.)", Pattern.CASE_INSENSITIVE); + + /** + * The pattern for "bc" or "ad". + */ + static final Pattern PATTERN_BC_AD = Pattern + .compile("^(BC|B\\.C\\.|AD|A\\.D\\.)", Pattern.CASE_INSENSITIVE); + + /** + * The parslet for a year. + */ + static final YearParslet PARSLET_YEAR = new YearParslet(); + + /** + * The parslet for a month. + */ + static final MonthParslet PARSLET_MONTH = new MonthParslet(); + + /** + * The parslet for a day. + */ + static final DayParslet PARSLET_DAY = new DayParslet(); + + /** + * The parslet for time. + */ + static final TimeParslet PARSLET_TIME = new TimeParslet(); + + /** + * The inline parslet. E.g. 'YYYY-MM-DD"T"HH24:MI:SS"Z"' where "T" and "Z" + * are inlined + */ + static final InlineParslet PARSLET_INLINE = new InlineParslet(); + + /** + * Interface of the classes that can parse a specialized small bit of the + * TO_DATE format-string. + */ + interface ToDateParslet { + + /** + * Parse a date part. + * + * @param params the parameters that contains the string + * @param formatTokenEnum the format + * @param formatTokenStr the format string + */ + void parse(ToDateParser params, FormatTokenEnum formatTokenEnum, + String formatTokenStr); + } + + /** + * Parslet responsible for parsing year parameter + */ + static class YearParslet implements ToDateParslet { + + @Override + public void parse(ToDateParser params, FormatTokenEnum formatTokenEnum, + String formatTokenStr) { + String inputFragmentStr = null; + int dateNr = 0; + switch (formatTokenEnum) { + case SYYYY: + case YYYY: + inputFragmentStr = matchStringOrThrow(PATTERN_FOUR_DIGITS, + params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + // Gregorian calendar does not have a year 0. + // 0 = 0001 BC, -1 = 0002 BC, ... so we adjust + if (dateNr == 0) { + throwException(params, "Year may not be zero"); + } + params.setYear(dateNr >= 0 ? dateNr : dateNr + 1); + break; + case YYY: + inputFragmentStr = matchStringOrThrow(PATTERN_THREE_DIGITS, + params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + if (dateNr > 999) { + throwException(params, "Year may have only three digits with specified format"); + } + dateNr += (params.getCurrentYear() / 1_000) * 1_000; + // Gregorian calendar does not have a year 0. + // 0 = 0001 BC, -1 = 0002 BC, ... so we adjust + params.setYear(dateNr >= 0 ? dateNr : dateNr + 1); + break; + case RRRR: + inputFragmentStr = matchStringOrThrow( + PATTERN_TWO_TO_FOUR_DIGITS, params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + if (inputFragmentStr.length() < 4) { + if (dateNr < 50) { + dateNr += 2000; + } else if (dateNr < 100) { + dateNr += 1900; + } + } + if (dateNr == 0) { + throwException(params, "Year may not be zero"); + } + params.setYear(dateNr); + break; + case RR: + int cc = params.getCurrentYear() / 100; + inputFragmentStr = matchStringOrThrow(PATTERN_TWO_DIGITS, + params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr) + cc * 100; + params.setYear(dateNr); + break; + case EE /* NOT supported yet */: + throwException(params, format("token '%s' not supported yet.", + formatTokenEnum.name())); + break; + case E /* NOT supported yet */: + throwException(params, format("token '%s' not supported yet.", + formatTokenEnum.name())); + break; + case YY: + inputFragmentStr = matchStringOrThrow(PATTERN_TWO_DIGITS, + params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + if (dateNr > 99) { + throwException(params, "Year may have only two digits with specified format"); + } + dateNr += (params.getCurrentYear() / 100) * 100; + // Gregorian calendar does not have a year 0. + // 0 = 0001 BC, -1 = 0002 BC, ... so we adjust + params.setYear(dateNr >= 0 ? dateNr : dateNr + 1); + break; + case SCC: + case CC: + inputFragmentStr = matchStringOrThrow(PATTERN_TWO_DIGITS, + params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr) * 100; + params.setYear(dateNr); + break; + case Y: + inputFragmentStr = matchStringOrThrow(PATTERN_ONE_DIGIT, params, + formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + if (dateNr > 9) { + throwException(params, "Year may have only two digits with specified format"); + } + dateNr += (params.getCurrentYear() / 10) * 10; + // Gregorian calendar does not have a year 0. + // 0 = 0001 BC, -1 = 0002 BC, ... so we adjust + params.setYear(dateNr >= 0 ? dateNr : dateNr + 1); + break; + case BC_AD: + inputFragmentStr = matchStringOrThrow(PATTERN_BC_AD, params, + formatTokenEnum); + params.setBC(inputFragmentStr.toUpperCase().startsWith("B")); + break; + default: + throw new IllegalArgumentException(format( + "%s: Internal Error. Unhandled case: %s", + this.getClass().getSimpleName(), formatTokenEnum)); + } + params.remove(inputFragmentStr, formatTokenStr); + } + } + + /** + * Parslet responsible for parsing month parameter + */ + static class MonthParslet implements ToDateParslet { + private static final String[] ROMAN_MONTH = { "I", "II", "III", "IV", + "V", "VI", "VII", "VIII", "IX", "X", "XI", "XII" }; + + @Override + public void parse(ToDateParser params, FormatTokenEnum formatTokenEnum, + String formatTokenStr) { + String s = params.getInputStr(); + String inputFragmentStr = null; + int dateNr = 0; + switch (formatTokenEnum) { + case MONTH: + inputFragmentStr = setByName(params, ToCharFunction.MONTHS); + break; + case Q /* NOT supported yet */: + throwException(params, format("token '%s' not supported yet.", + formatTokenEnum.name())); + break; + case MON: + inputFragmentStr = setByName(params, ToCharFunction.SHORT_MONTHS); + break; + case MM: + // Note: In Calendar Month go from 0 - 11 + inputFragmentStr = matchStringOrThrow( + PATTERN_TWO_DIGITS_OR_LESS, params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setMonth(dateNr); + break; + case RM: + dateNr = 0; + for (String monthName : ROMAN_MONTH) { + dateNr++; + int len = monthName.length(); + if (s.length() >= len && monthName + .equalsIgnoreCase(s.substring(0, len))) { + params.setMonth(dateNr + 1); + inputFragmentStr = monthName; + break; + } + } + if (inputFragmentStr == null || inputFragmentStr.isEmpty()) { + throwException(params, + format("Issue happened when parsing token '%s'. " + + "Expected one of: %s", + formatTokenEnum.name(), + Arrays.toString(ROMAN_MONTH))); + } + break; + default: + throw new IllegalArgumentException(format( + "%s: Internal Error. Unhandled case: %s", + this.getClass().getSimpleName(), formatTokenEnum)); + } + params.remove(inputFragmentStr, formatTokenStr); + } + } + + /** + * Parslet responsible for parsing day parameter + */ + static class DayParslet implements ToDateParslet { + @Override + public void parse(ToDateParser params, FormatTokenEnum formatTokenEnum, + String formatTokenStr) { + String inputFragmentStr = null; + int dateNr = 0; + switch (formatTokenEnum) { + case DDD: + inputFragmentStr = matchStringOrThrow(PATTERN_NUMBER, params, + formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setDayOfYear(dateNr); + break; + case DD: + inputFragmentStr = matchStringOrThrow( + PATTERN_TWO_DIGITS_OR_LESS, params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setDay(dateNr); + break; + case D: + inputFragmentStr = matchStringOrThrow(PATTERN_ONE_DIGIT, params, + formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setDay(dateNr); + break; + case DAY: + inputFragmentStr = setByName(params, ToCharFunction.WEEKDAYS); + break; + case DY: + inputFragmentStr = setByName(params, ToCharFunction.SHORT_WEEKDAYS); + break; + case J: + inputFragmentStr = matchStringOrThrow(PATTERN_NUMBER, params, + formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setAbsoluteDay(dateNr + ToCharFunction.JULIAN_EPOCH); + break; + default: + throw new IllegalArgumentException(format( + "%s: Internal Error. Unhandled case: %s", + this.getClass().getSimpleName(), formatTokenEnum)); + } + params.remove(inputFragmentStr, formatTokenStr); + } + } + + /** + * Parslet responsible for parsing time parameter + */ + static class TimeParslet implements ToDateParslet { + + @Override + public void parse(ToDateParser params, FormatTokenEnum formatTokenEnum, + String formatTokenStr) { + String inputFragmentStr = null; + int dateNr = 0; + switch (formatTokenEnum) { + case HH24: + inputFragmentStr = matchStringOrThrow( + PATTERN_TWO_DIGITS_OR_LESS, params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setHour(dateNr); + break; + case HH12: + case HH: + inputFragmentStr = matchStringOrThrow( + PATTERN_TWO_DIGITS_OR_LESS, params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setHour12(dateNr); + break; + case MI: + inputFragmentStr = matchStringOrThrow( + PATTERN_TWO_DIGITS_OR_LESS, params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setMinute(dateNr); + break; + case SS: + inputFragmentStr = matchStringOrThrow( + PATTERN_TWO_DIGITS_OR_LESS, params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setSecond(dateNr); + break; + case SSSSS: { + inputFragmentStr = matchStringOrThrow(PATTERN_NUMBER, params, + formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + int second = dateNr % 60; + dateNr /= 60; + int minute = dateNr % 60; + dateNr /= 60; + int hour = dateNr % 24; + params.setHour(hour); + params.setMinute(minute); + params.setSecond(second); + break; + } + case FF: + inputFragmentStr = matchStringOrThrow(PATTERN_NUMBER, params, + formatTokenEnum); + String paddedRightNrStr = format("%-9s", inputFragmentStr) + .replace(' ', '0'); + paddedRightNrStr = paddedRightNrStr.substring(0, 9); + double nineDigits = Double.parseDouble(paddedRightNrStr); + params.setNanos((int) nineDigits); + break; + case AM_PM: + inputFragmentStr = matchStringOrThrow(PATTERN_AM_PM, params, + formatTokenEnum); + if (inputFragmentStr.toUpperCase().startsWith("A")) { + params.setAmPm(true); + } else { + params.setAmPm(false); + } + break; + case TZH: + inputFragmentStr = matchStringOrThrow( + PATTERN_TWO_DIGITS_OR_LESS, params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setTimeZoneHour(dateNr); + break; + case TZM: + inputFragmentStr = matchStringOrThrow( + PATTERN_TWO_DIGITS_OR_LESS, params, formatTokenEnum); + dateNr = Integer.parseInt(inputFragmentStr); + params.setTimeZoneMinute(dateNr); + break; + case TZR: + case TZD: + String tzName = params.getInputStr(); + params.setTimeZone(TimeZoneProvider.ofId(tzName)); + inputFragmentStr = tzName; + break; + default: + throw new IllegalArgumentException(format( + "%s: Internal Error. Unhandled case: %s", + this.getClass().getSimpleName(), formatTokenEnum)); + } + params.remove(inputFragmentStr, formatTokenStr); + } + } + + /** + * Parslet responsible for parsing year parameter + */ + static class InlineParslet implements ToDateParslet { + @Override + public void parse(ToDateParser params, FormatTokenEnum formatTokenEnum, + String formatTokenStr) { + String inputFragmentStr = null; + switch (formatTokenEnum) { + case INLINE: + inputFragmentStr = formatTokenStr.replace("\"", ""); + break; + default: + throw new IllegalArgumentException(format( + "%s: Internal Error. Unhandled case: %s", + this.getClass().getSimpleName(), formatTokenEnum)); + } + params.remove(inputFragmentStr, formatTokenStr); + } + + } + + /** + * Match the pattern, or if not possible throw an exception. + * + * @param p the pattern + * @param params the parameters with the input string + * @param aEnum the pattern name + * @return the matched value + */ + static String matchStringOrThrow(Pattern p, ToDateParser params, + Enum aEnum) { + String s = params.getInputStr(); + Matcher matcher = p.matcher(s); + if (!matcher.find()) { + throwException(params, format( + "Issue happened when parsing token '%s'", aEnum.name())); + } + return matcher.group(1); + } + + /** + * Set the given field in the calendar. + * + * @param params the parameters with the input string + * @param field the field to set + * @return the matched value + */ + static String setByName(ToDateParser params, int field) { + String inputFragmentStr = null; + String s = params.getInputStr(); + String[] values = ToCharFunction.getDateNames(field); + for (int i = 0; i < values.length; i++) { + String dayName = values[i]; + if (dayName == null) { + continue; + } + int len = dayName.length(); + if (dayName.equalsIgnoreCase(s.substring(0, len))) { + switch (field) { + case ToCharFunction.MONTHS: + case ToCharFunction.SHORT_MONTHS: + params.setMonth(i + 1); + break; + case ToCharFunction.WEEKDAYS: + case ToCharFunction.SHORT_WEEKDAYS: + // TODO + break; + default: + throw new IllegalArgumentException(); + } + inputFragmentStr = dayName; + break; + } + } + if (inputFragmentStr == null || inputFragmentStr.isEmpty()) { + throwException(params, format( + "Tried to parse one of '%s' but failed (may be an internal error?)", + Arrays.toString(values))); + } + return inputFragmentStr; + } + + /** + * Throw a parse exception. + * + * @param params the parameters with the input string + * @param errorStr the error string + */ + static void throwException(ToDateParser params, String errorStr) { + throw DbException.get(ErrorCode.INVALID_TO_DATE_FORMAT, + params.getFunctionName(), + format(" %s. Details: %s", errorStr, params)); + } + + /** + * The format tokens. + */ + public enum FormatTokenEnum { + // 4-digit year + YYYY(PARSLET_YEAR), + // 4-digit year with sign (- = B.C.) + SYYYY(PARSLET_YEAR), + // 3-digit year + YYY(PARSLET_YEAR), + // 2-digit year + YY(PARSLET_YEAR), + // Two-digit century with sign (- = B.C.) + SCC(PARSLET_YEAR), + // Two-digit century. + CC(PARSLET_YEAR), + // 2-digit -> 4-digit year 0-49 -> 20xx , 50-99 -> 19xx + RRRR(PARSLET_YEAR), + // last 2-digit of the year using "current" century value. + RR(PARSLET_YEAR), + // Meridian indicator + BC_AD(PARSLET_YEAR, PATTERN_BC_AD), + // Full Name of month + MONTH(PARSLET_MONTH), + // Abbreviated name of month. + MON(PARSLET_MONTH), + // Month (01-12; JAN = 01). + MM(PARSLET_MONTH), + // Roman numeral month (I-XII; JAN = I). + RM(PARSLET_MONTH), + // Day of year (1-366). + DDD(PARSLET_DAY), + // Name of day. + DAY(PARSLET_DAY), + // Day of month (1-31). + DD(PARSLET_DAY), + // Abbreviated name of day. + DY(PARSLET_DAY), HH24(PARSLET_TIME), HH12(PARSLET_TIME), + // Hour of day (1-12). + HH(PARSLET_TIME), + // Min + MI(PARSLET_TIME), + // Seconds past midnight (0-86399) + SSSSS(PARSLET_TIME), SS(PARSLET_TIME), + // Fractional seconds + FF(PARSLET_TIME, PATTERN_FF), + // Time zone hour. + TZH(PARSLET_TIME), + // Time zone minute. + TZM(PARSLET_TIME), + // Time zone region ID + TZR(PARSLET_TIME), + // Daylight savings information. Example: + // PST (for US/Pacific standard time); + TZD(PARSLET_TIME), + // Meridian indicator + AM_PM(PARSLET_TIME, PATTERN_AM_PM), + // NOT supported yet - + // Full era name (Japanese Imperial, ROC Official, + // and Thai Buddha calendars). + EE(PARSLET_YEAR), + // NOT supported yet - + // Abbreviated era name (Japanese Imperial, + // ROC Official, and Thai Buddha calendars). + E(PARSLET_YEAR), Y(PARSLET_YEAR), + // Quarter of year (1, 2, 3, 4; JAN-MAR = 1). + Q(PARSLET_MONTH), + // Day of week (1-7). + D(PARSLET_DAY), + // NOT supported yet - + // Julian day; the number of days since Jan 1, 4712 BC. + J(PARSLET_DAY), + // Inline text e.g. to_date('2017-04-21T00:00:00Z', + // 'YYYY-MM-DD"T"HH24:MI:SS"Z"') + // where "T" and "Z" are inlined + INLINE(PARSLET_INLINE, PATTERN_INLINE); + + private static final List INLINE_LIST = Collections.singletonList(INLINE); + + private static List[] TOKENS; + private final ToDateParslet toDateParslet; + private final Pattern patternToUse; + + /** + * Construct a format token. + * + * @param toDateParslet the date parslet + * @param patternToUse the pattern + */ + FormatTokenEnum(ToDateParslet toDateParslet, Pattern patternToUse) { + this.toDateParslet = toDateParslet; + this.patternToUse = patternToUse; + } + + /** + * Construct a format token. + * + * @param toDateParslet the date parslet + */ + FormatTokenEnum(ToDateParslet toDateParslet) { + this.toDateParslet = toDateParslet; + patternToUse = Pattern.compile(format("^(%s)", name()), + Pattern.CASE_INSENSITIVE); + } + + /** + * Optimization: Only return a list of {@link FormatTokenEnum} that + * share the same 1st char using the 1st char of the 'to parse' + * formatStr. Or return {@code null} if no match. + * + * @param formatStr the format string + * @return the list of tokens, or {@code null} + */ + static List getTokensInQuestion(String formatStr) { + if (formatStr != null && !formatStr.isEmpty()) { + char key = Character.toUpperCase(formatStr.charAt(0)); + if (key >= 'A' && key <= 'Y') { + List[] tokens = TOKENS; + if (tokens == null) { + tokens = initTokens(); + } + return tokens[key - 'A']; + } else if (key == '"') { + return INLINE_LIST; + } + } + return null; + } + + @SuppressWarnings("unchecked") + private static List[] initTokens() { + List[] tokens = new List[25]; + for (FormatTokenEnum token : FormatTokenEnum.values()) { + String name = token.name(); + if (name.indexOf('_') >= 0) { + for (String tokenLets : name.split("_")) { + putToCache(tokens, token, tokenLets); + } + } else { + putToCache(tokens, token, name); + } + } + return TOKENS = tokens; + } + + private static void putToCache(List[] cache, FormatTokenEnum token, String name) { + int idx = Character.toUpperCase(name.charAt(0)) - 'A'; + List l = cache[idx]; + if (l == null) { + l = new ArrayList<>(1); + cache[idx] = l; + } + l.add(token); + } + + /** + * Parse the format-string with passed token of {@link FormatTokenEnum}. + * If token matches return true, otherwise false. + * + * @param params the parameters + * @return true if it matches + */ + boolean parseFormatStrWithToken(ToDateParser params) { + Matcher matcher = patternToUse.matcher(params.getFormatStr()); + boolean foundToken = matcher.find(); + if (foundToken) { + String formatTokenStr = matcher.group(1); + toDateParslet.parse(params, this, formatTokenStr); + } + return foundToken; + } + } + + private ToDateTokenizer() { + } + +} diff --git a/h2/src/main/org/h2/mode/package.html b/h2/src/main/org/h2/mode/package.html new file mode 100644 index 0000000000..b1194fe11f --- /dev/null +++ b/h2/src/main/org/h2/mode/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Utility classes for compatibility with other database, for example MySQL. + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/mvstore/Chunk.java b/h2/src/main/org/h2/mvstore/Chunk.java index 83608aa7f1..c6da22f2c0 100644 --- a/h2/src/main/org/h2/mvstore/Chunk.java +++ b/h2/src/main/org/h2/mvstore/Chunk.java @@ -1,21 +1,29 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; import java.nio.ByteBuffer; -import java.util.HashMap; +import java.nio.charset.StandardCharsets; +import java.util.BitSet; +import java.util.Comparator; +import java.util.Map; + +import org.h2.util.StringUtils; /** * A chunk of data, containing one or multiple pages. *

    - * Chunks are page aligned (each page is usually 4096 bytes). + * Minimum chunk size is usually 4096 bytes, and it grows in those fixed increments (blocks). + * Chunk's length and it's position in the underlying filestore + * are multiples of that increment (block size), + * therefore they both are measured in blocks, instead of bytes. * There are at most 67 million (2^26) chunks, - * each chunk is at most 2 GB large. + * and each chunk is at most 2 GB large. */ -public class Chunk { +public final class Chunk { /** * The maximum chunk id. @@ -34,6 +42,25 @@ public class Chunk { */ static final int FOOTER_LENGTH = 128; + private static final String ATTR_CHUNK = "chunk"; + private static final String ATTR_BLOCK = "block"; + private static final String ATTR_LEN = "len"; + private static final String ATTR_MAP = "map"; + private static final String ATTR_MAX = "max"; + private static final String ATTR_NEXT = "next"; + private static final String ATTR_PAGES = "pages"; + private static final String ATTR_ROOT = "root"; + private static final String ATTR_TIME = "time"; + private static final String ATTR_VERSION = "version"; + private static final String ATTR_LIVE_MAX = "liveMax"; + private static final String ATTR_LIVE_PAGES = "livePages"; + private static final String ATTR_UNUSED = "unused"; + private static final String ATTR_UNUSED_AT_VERSION = "unusedAtVersion"; + private static final String ATTR_PIN_COUNT = "pinCount"; + private static final String ATTR_TOC = "toc"; + private static final String ATTR_OCCUPANCY = "occupancy"; + private static final String ATTR_FLETCHER = "fletcher"; + /** * The chunk id. */ @@ -42,7 +69,7 @@ public class Chunk { /** * The start block number within the file. */ - public long block; + public volatile long block; /** * The length in number of blocks. @@ -52,12 +79,27 @@ public class Chunk { /** * The total number of pages in this chunk. */ - public int pageCount; + int pageCount; + + /** + * The number of pages that are still alive in the latest version of the store. + */ + int pageCountLive; /** - * The number of pages still alive. + * Offset (from the beginning of the chunk) for the table of content. + * Table of content is holding a value of type "long" for each page in the chunk. + * This value consists of map id, page offset, page length and page type. + * Format is the same as page's position id, but with map id replacing chunk id. + * + * @see DataUtils#getTocElement(int, int, int, int) for field format details */ - public int pageCountLive; + int tocPos; + + /** + * Collection of "deleted" flags for all pages in the chunk. + */ + BitSet occupancy; /** * The sum of the max length of all pages. @@ -65,7 +107,7 @@ public class Chunk { public long maxLen; /** - * The sum of the max length of all pages that are in use. + * The sum of the length of all pages that are still alive. */ public long maxLenLive; @@ -73,12 +115,12 @@ public class Chunk { * The garbage collection priority. Priority 0 means it needs to be * collected, a high value means low priority. */ - public int collectPriority; + int collectPriority; /** - * The position of the meta root. + * The position of the root of layout map. */ - public long metaRootPos; + long layoutRootPos; /** * The version stored in this chunk. @@ -97,6 +139,12 @@ public class Chunk { */ public long unused; + /** + * Version of the store at which chunk become unused and therefore can be + * considered "dead" and collected after this version is no longer in use. + */ + long unusedAtVersion; + /** * The last used map id. */ @@ -107,8 +155,58 @@ public class Chunk { */ public long next; + /** + * Number of live pinned pages. + */ + private int pinCount; + + + private Chunk(String s) { + this(DataUtils.parseMap(s), true); + } + + Chunk(Map map) { + this(map, false); + } + + private Chunk(Map map, boolean full) { + this(DataUtils.readHexInt(map, ATTR_CHUNK, 0)); + block = DataUtils.readHexLong(map, ATTR_BLOCK, 0); + version = DataUtils.readHexLong(map, ATTR_VERSION, id); + if (full) { + len = DataUtils.readHexInt(map, ATTR_LEN, 0); + pageCount = DataUtils.readHexInt(map, ATTR_PAGES, 0); + pageCountLive = DataUtils.readHexInt(map, ATTR_LIVE_PAGES, pageCount); + mapId = DataUtils.readHexInt(map, ATTR_MAP, 0); + maxLen = DataUtils.readHexLong(map, ATTR_MAX, 0); + maxLenLive = DataUtils.readHexLong(map, ATTR_LIVE_MAX, maxLen); + layoutRootPos = DataUtils.readHexLong(map, ATTR_ROOT, 0); + time = DataUtils.readHexLong(map, ATTR_TIME, 0); + unused = DataUtils.readHexLong(map, ATTR_UNUSED, 0); + unusedAtVersion = DataUtils.readHexLong(map, ATTR_UNUSED_AT_VERSION, 0); + next = DataUtils.readHexLong(map, ATTR_NEXT, 0); + pinCount = DataUtils.readHexInt(map, ATTR_PIN_COUNT, 0); + tocPos = DataUtils.readHexInt(map, ATTR_TOC, 0); + byte[] bytes = DataUtils.parseHexBytes(map, ATTR_OCCUPANCY); + if (bytes == null) { + occupancy = new BitSet(); + } else { + occupancy = BitSet.valueOf(bytes); + if (pageCount - pageCountLive != occupancy.cardinality()) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, "Inconsistent occupancy info {0} - {1} != {2} {3}", + pageCount, pageCountLive, occupancy.cardinality(), this); + } + } + } + } + Chunk(int id) { this.id = id; + if (id <= 0) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, "Invalid chunk id {0}", id); + } } /** @@ -127,17 +225,17 @@ static Chunk readChunkHeader(ByteBuffer buff, long start) { if (data[i] == '\n') { // set the position to the start of the first page buff.position(pos + i + 1); - String s = new String(data, 0, i, DataUtils.LATIN).trim(); + String s = new String(data, 0, i, StandardCharsets.ISO_8859_1).trim(); return fromString(s); } } } catch (Exception e) { // there could be various reasons - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "File corrupt reading chunk at position {0}", start, e); } - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "File corrupt reading chunk at position {0}", start); } @@ -149,13 +247,13 @@ static Chunk readChunkHeader(ByteBuffer buff, long start) { * @param minLength the minimum length */ void writeChunkHeader(WriteBuffer buff, int minLength) { - long pos = buff.position(); - buff.put(asString().getBytes(DataUtils.LATIN)); - while (buff.position() - pos < minLength - 1) { + long delimiterPosition = buff.position() + minLength - 1; + buff.put(asString().getBytes(StandardCharsets.ISO_8859_1)); + while (buff.position() < delimiterPosition) { buff.put((byte) ' '); } - if (minLength != 0 && buff.position() > minLength) { - throw DataUtils.newIllegalStateException( + if (minLength != 0 && buff.position() > delimiterPosition) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Chunk metadata too long"); } @@ -169,7 +267,7 @@ void writeChunkHeader(WriteBuffer buff, int minLength) { * @return the metadata key */ static String getMetaKey(int chunkId) { - return "chunk." + Integer.toHexString(chunkId); + return ATTR_CHUNK + "." + Integer.toHexString(chunkId); } /** @@ -179,22 +277,7 @@ static String getMetaKey(int chunkId) { * @return the block */ public static Chunk fromString(String s) { - HashMap map = DataUtils.parseMap(s); - int id = DataUtils.readHexInt(map, "chunk", 0); - Chunk c = new Chunk(id); - c.block = DataUtils.readHexLong(map, "block", 0); - c.len = DataUtils.readHexInt(map, "len", 0); - c.pageCount = DataUtils.readHexInt(map, "pages", 0); - c.pageCountLive = DataUtils.readHexInt(map, "livePages", c.pageCount); - c.mapId = DataUtils.readHexInt(map, "map", 0); - c.maxLen = DataUtils.readHexLong(map, "max", 0); - c.maxLenLive = DataUtils.readHexLong(map, "liveMax", c.maxLen); - c.metaRootPos = DataUtils.readHexLong(map, "root", 0); - c.time = DataUtils.readHexLong(map, "time", 0); - c.unused = DataUtils.readHexLong(map, "unused", 0); - c.version = DataUtils.readHexLong(map, "version", id); - c.next = DataUtils.readHexLong(map, "next", 0); - return c; + return new Chunk(s); } /** @@ -202,7 +285,8 @@ public static Chunk fromString(String s) { * * @return the fill rate */ - public int getFillRate() { + int getFillRate() { + assert maxLenLive <= maxLen : maxLenLive + " > " + maxLen; if (maxLenLive <= 0) { return 0; } else if (maxLenLive == maxLen) { @@ -227,44 +311,221 @@ public boolean equals(Object o) { * @return the string */ public String asString() { - StringBuilder buff = new StringBuilder(); - DataUtils.appendMap(buff, "chunk", id); - DataUtils.appendMap(buff, "block", block); - DataUtils.appendMap(buff, "len", len); + StringBuilder buff = new StringBuilder(240); + DataUtils.appendMap(buff, ATTR_CHUNK, id); + DataUtils.appendMap(buff, ATTR_BLOCK, block); + DataUtils.appendMap(buff, ATTR_LEN, len); if (maxLen != maxLenLive) { - DataUtils.appendMap(buff, "liveMax", maxLenLive); + DataUtils.appendMap(buff, ATTR_LIVE_MAX, maxLenLive); } if (pageCount != pageCountLive) { - DataUtils.appendMap(buff, "livePages", pageCountLive); + DataUtils.appendMap(buff, ATTR_LIVE_PAGES, pageCountLive); } - DataUtils.appendMap(buff, "map", mapId); - DataUtils.appendMap(buff, "max", maxLen); + DataUtils.appendMap(buff, ATTR_MAP, mapId); + DataUtils.appendMap(buff, ATTR_MAX, maxLen); if (next != 0) { - DataUtils.appendMap(buff, "next", next); + DataUtils.appendMap(buff, ATTR_NEXT, next); } - DataUtils.appendMap(buff, "pages", pageCount); - DataUtils.appendMap(buff, "root", metaRootPos); - DataUtils.appendMap(buff, "time", time); + DataUtils.appendMap(buff, ATTR_PAGES, pageCount); + DataUtils.appendMap(buff, ATTR_ROOT, layoutRootPos); + DataUtils.appendMap(buff, ATTR_TIME, time); if (unused != 0) { - DataUtils.appendMap(buff, "unused", unused); + DataUtils.appendMap(buff, ATTR_UNUSED, unused); + } + if (unusedAtVersion != 0) { + DataUtils.appendMap(buff, ATTR_UNUSED_AT_VERSION, unusedAtVersion); + } + DataUtils.appendMap(buff, ATTR_VERSION, version); + if (pinCount > 0) { + DataUtils.appendMap(buff, ATTR_PIN_COUNT, pinCount); + } + if (tocPos > 0) { + DataUtils.appendMap(buff, ATTR_TOC, tocPos); + } + if (!occupancy.isEmpty()) { + DataUtils.appendMap(buff, ATTR_OCCUPANCY, + StringUtils.convertBytesToHex(occupancy.toByteArray())); } - DataUtils.appendMap(buff, "version", version); return buff.toString(); } byte[] getFooterBytes() { - StringBuilder buff = new StringBuilder(); - DataUtils.appendMap(buff, "chunk", id); - DataUtils.appendMap(buff, "block", block); - DataUtils.appendMap(buff, "version", version); - byte[] bytes = buff.toString().getBytes(DataUtils.LATIN); - int checksum = DataUtils.getFletcher32(bytes, bytes.length); - DataUtils.appendMap(buff, "fletcher", checksum); - while (buff.length() < Chunk.FOOTER_LENGTH - 1) { + StringBuilder buff = new StringBuilder(FOOTER_LENGTH); + DataUtils.appendMap(buff, ATTR_CHUNK, id); + DataUtils.appendMap(buff, ATTR_BLOCK, block); + DataUtils.appendMap(buff, ATTR_VERSION, version); + byte[] bytes = buff.toString().getBytes(StandardCharsets.ISO_8859_1); + int checksum = DataUtils.getFletcher32(bytes, 0, bytes.length); + DataUtils.appendMap(buff, ATTR_FLETCHER, checksum); + while (buff.length() < FOOTER_LENGTH - 1) { buff.append(' '); } - buff.append("\n"); - return buff.toString().getBytes(DataUtils.LATIN); + buff.append('\n'); + return buff.toString().getBytes(StandardCharsets.ISO_8859_1); + } + + boolean isSaved() { + return block != Long.MAX_VALUE; + } + + boolean isLive() { + return pageCountLive > 0; + } + + boolean isRewritable() { + return isSaved() + && isLive() + && pageCountLive < pageCount // not fully occupied + && isEvacuatable(); + } + + private boolean isEvacuatable() { + return pinCount == 0; + } + + /** + * Read a page of data into a ByteBuffer. + * + * @param fileStore to use + * @param offset of the page data + * @param pos page pos + * @return ByteBuffer containing page data. + */ + ByteBuffer readBufferForPage(FileStore fileStore, int offset, long pos) { + assert isSaved() : this; + while (true) { + long originalBlock = block; + try { + long filePos = originalBlock * MVStore.BLOCK_SIZE; + long maxPos = filePos + (long) len * MVStore.BLOCK_SIZE; + filePos += offset; + if (filePos < 0) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, + "Negative position {0}; p={1}, c={2}", filePos, pos, toString()); + } + + int length = DataUtils.getPageMaxLength(pos); + if (length == DataUtils.PAGE_LARGE) { + // read the first bytes to figure out actual length + length = fileStore.readFully(filePos, 128).getInt(); + // pageNo is deliberately not included into length to preserve compatibility + // TODO: remove this adjustment when page on disk format is re-organized + length += 4; + } + length = (int) Math.min(maxPos - filePos, length); + if (length < 0) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "Illegal page length {0} reading at {1}; max pos {2} ", length, filePos, maxPos); + } + + ByteBuffer buff = fileStore.readFully(filePos, length); + + if (originalBlock == block) { + return buff; + } + } catch (MVStoreException ex) { + if (originalBlock == block) { + throw ex; + } + } + } + } + + long[] readToC(FileStore fileStore) { + assert isSaved() : this; + assert tocPos > 0; + while (true) { + long originalBlock = block; + try { + long filePos = originalBlock * MVStore.BLOCK_SIZE + tocPos; + int length = pageCount * 8; + long[] toc = new long[pageCount]; + fileStore.readFully(filePos, length).asLongBuffer().get(toc); + if (originalBlock == block) { + return toc; + } + } catch (MVStoreException ex) { + if (originalBlock == block) { + throw ex; + } + } + } + } + + /** + * Modifies internal state to reflect the fact that one more page is stored + * within this chunk. + * @param pageLengthOnDisk + * size of the page + * @param singleWriter + * indicates whether page belongs to append mode capable map + * (single writer map). Such pages are "pinned" to the chunk, + * they can't be evacuated (moved to a different chunk) while + */ + void accountForWrittenPage(int pageLengthOnDisk, boolean singleWriter) { + maxLen += pageLengthOnDisk; + pageCount++; + maxLenLive += pageLengthOnDisk; + pageCountLive++; + if (singleWriter) { + pinCount++; + } + assert pageCount - pageCountLive == occupancy.cardinality() + : pageCount + " - " + pageCountLive + " <> " + occupancy.cardinality() + " : " + occupancy; + } + + /** + * Modifies internal state to reflect the fact that one the pages within + * this chunk was removed from the map. + * + * @param pageNo + * sequential page number within the chunk + * @param pageLength + * on disk of the removed page + * @param pinned + * whether removed page was pinned + * @param now + * is a moment in time (since creation of the store), when + * removal is recorded, and retention period starts + * @param version + * at which page was removed + * @return true if all of the pages, this chunk contains, were already + * removed, and false otherwise + */ + boolean accountForRemovedPage(int pageNo, int pageLength, boolean pinned, long now, long version) { + assert isSaved() : this; + // legacy chunks do not have a table of content, + // therefore pageNo is not valid, skip + if (tocPos > 0) { + assert pageNo >= 0 && pageNo < pageCount : pageNo + " // " + pageCount; + assert !occupancy.get(pageNo) : pageNo + " " + this + " " + occupancy; + assert pageCount - pageCountLive == occupancy.cardinality() + : pageCount + " - " + pageCountLive + " <> " + occupancy.cardinality() + " : " + occupancy; + occupancy.set(pageNo); + } + + maxLenLive -= pageLength; + pageCountLive--; + if (pinned) { + pinCount--; + } + + if (unusedAtVersion < version) { + unusedAtVersion = version; + } + + assert pinCount >= 0 : this; + assert pageCountLive >= 0 : this; + assert pinCount <= pageCountLive : this; + assert maxLenLive >= 0 : this; + assert (pageCountLive == 0) == (maxLenLive == 0) : this; + + if (!isLive()) { + unused = now; + return true; + } + return false; } @Override @@ -272,5 +533,16 @@ public String toString() { return asString(); } + + public static final class PositionComparator implements Comparator { + public static final Comparator INSTANCE = new PositionComparator(); + + private PositionComparator() {} + + @Override + public int compare(Chunk one, Chunk two) { + return Long.compare(one.block, two.block); + } + } } diff --git a/h2/src/main/org/h2/mvstore/ConcurrentArrayList.java b/h2/src/main/org/h2/mvstore/ConcurrentArrayList.java deleted file mode 100644 index 7e1f1450fe..0000000000 --- a/h2/src/main/org/h2/mvstore/ConcurrentArrayList.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.mvstore; - -import java.util.Arrays; -import java.util.Iterator; - -/** - * A very simple array list that supports concurrent access. - * Internally, it uses immutable objects. - * - * @param the key type - */ -public class ConcurrentArrayList { - - /** - * The array. - */ - @SuppressWarnings("unchecked") - K[] array = (K[]) new Object[0]; - - /** - * Get the first element, or null if none. - * - * @return the first element - */ - public K peekFirst() { - K[] a = array; - return a.length == 0 ? null : a[0]; - } - - /** - * Get the last element, or null if none. - * - * @return the last element - */ - public K peekLast() { - K[] a = array; - int len = a.length; - return len == 0 ? null : a[len - 1]; - } - - /** - * Add an element at the end. - * - * @param obj the element - */ - public synchronized void add(K obj) { - int len = array.length; - array = Arrays.copyOf(array, len + 1); - array[len] = obj; - } - - /** - * Remove the first element, if it matches. - * - * @param obj the element to remove - * @return true if the element matched and was removed - */ - public synchronized boolean removeFirst(K obj) { - if (peekFirst() != obj) { - return false; - } - int len = array.length; - @SuppressWarnings("unchecked") - K[] a = (K[]) new Object[len - 1]; - System.arraycopy(array, 1, a, 0, len - 1); - array = a; - return true; - } - - /** - * Remove the last element, if it matches. - * - * @param obj the element to remove - * @return true if the element matched and was removed - */ - public synchronized boolean removeLast(K obj) { - if (peekLast() != obj) { - return false; - } - array = Arrays.copyOf(array, array.length - 1); - return true; - } - - /** - * Get an iterator over all entries. - * - * @return the iterator - */ - public Iterator iterator() { - return new Iterator() { - - K[] a = array; - int index; - - @Override - public boolean hasNext() { - return index < a.length; - } - - @Override - public K next() { - return a[index++]; - } - - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - - }; - } - -} diff --git a/h2/src/main/org/h2/mvstore/Cursor.java b/h2/src/main/org/h2/mvstore/Cursor.java index b8b3a4b281..d60ca8c29a 100644 --- a/h2/src/main/org/h2/mvstore/Cursor.java +++ b/h2/src/main/org/h2/mvstore/Cursor.java @@ -1,54 +1,102 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; import java.util.Iterator; +import java.util.NoSuchElementException; /** - * A cursor to iterate over elements in ascending order. + * A cursor to iterate over elements in ascending or descending order. * * @param the key type * @param the value type */ -public class Cursor implements Iterator { +public final class Cursor implements Iterator { + private final boolean reverse; + private final K to; + private CursorPos cursorPos; + private CursorPos keeper; + private K current; + private K last; + private V lastValue; + private Page lastPage; - private final MVMap map; - private final K from; - private CursorPos pos; - private K current, last; - private V currentValue, lastValue; - private Page lastPage; - private final Page root; - private boolean initialized; - Cursor(MVMap map, Page root, K from) { - this.map = map; - this.root = root; - this.from = from; + public Cursor(RootReference rootReference, K from, K to) { + this(rootReference, from, to, false); + } + + /** + * @param rootReference of the tree + * @param from starting key (inclusive), if null start from the first / last key + * @param to ending key (inclusive), if null there is no boundary + * @param reverse true if tree should be iterated in key's descending order + */ + public Cursor(RootReference rootReference, K from, K to, boolean reverse) { + this.lastPage = rootReference.root; + this.cursorPos = traverseDown(lastPage, from, reverse); + this.to = to; + this.reverse = reverse; } @Override public boolean hasNext() { - if (!initialized) { - min(root, from); - initialized = true; - fetchNext(); + if (cursorPos != null) { + int increment = reverse ? -1 : 1; + while (current == null) { + Page page = cursorPos.page; + int index = cursorPos.index; + if (reverse ? index < 0 : index >= upperBound(page)) { + // traversal of this page is over, going up a level or stop if at the root already + CursorPos tmp = cursorPos; + cursorPos = cursorPos.parent; + if (cursorPos == null) { + return false; + } + tmp.parent = keeper; + keeper = tmp; + } else { + // traverse down to the leaf taking the leftmost path + while (!page.isLeaf()) { + page = page.getChildPage(index); + index = reverse ? upperBound(page) - 1 : 0; + if (keeper == null) { + cursorPos = new CursorPos<>(page, index, cursorPos); + } else { + CursorPos tmp = keeper; + keeper = keeper.parent; + tmp.parent = cursorPos; + tmp.page = page; + tmp.index = index; + cursorPos = tmp; + } + } + if (reverse ? index >= 0 : index < page.getKeyCount()) { + K key = page.getKey(index); + if (to != null && Integer.signum(page.map.getKeyType().compare(key, to)) == increment) { + return false; + } + current = last = key; + lastValue = page.getValue(index); + lastPage = page; + } + } + cursorPos.index += increment; + } } return current != null; } @Override public K next() { - hasNext(); - K c = current; - last = current; - lastValue = currentValue; - lastPage = pos == null ? null : pos.page; - fetchNext(); - return c; + if(!hasNext()) { + throw new NoSuchElementException(); + } + current = null; + return last; } /** @@ -69,7 +117,13 @@ public V getValue() { return lastValue; } - Page getPage() { + /** + * Get the page where last retrieved key is located. + * + * @return the page + */ + @SuppressWarnings("unused") + Page getPage() { return lastPage; } @@ -80,77 +134,52 @@ Page getPage() { * @param n the number of entries to skip */ public void skip(long n) { - if (!hasNext()) { - return; - } if (n < 10) { - while (n-- > 0) { - fetchNext(); + while (n-- > 0 && hasNext()) { + next(); } - return; + } else if(hasNext()) { + assert cursorPos != null; + CursorPos cp = cursorPos; + CursorPos parent; + while ((parent = cp.parent) != null) cp = parent; + Page root = cp.page; + MVMap map = root.map; + long index = map.getKeyIndex(next()); + last = map.getKey(index + (reverse ? -n : n)); + this.cursorPos = traverseDown(root, last, reverse); } - long index = map.getKeyIndex(current); - K k = map.getKey(index + n); - pos = null; - min(root, k); - fetchNext(); - } - - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removing is not supported"); } /** * Fetch the next entry that is equal or larger than the given key, starting - * from the given page. This method retains the stack. + * from the given page. This method returns the path. * - * @param p the page to start - * @param from the key to search + * @param key type + * @param value type + * + * @param page to start from as a root + * @param key to search for, null means search for the first available key + * @param reverse true if traversal is in reverse direction, false otherwise + * @return CursorPos representing path from the entry found, + * or from insertion point if not, + * all the way up to to the root page provided */ - private void min(Page p, K from) { - while (true) { - if (p.isLeaf()) { - int x = from == null ? 0 : p.binarySearch(from); - if (x < 0) { - x = -x - 1; - } - pos = new CursorPos(p, x, pos); - break; + static CursorPos traverseDown(Page page, K key, boolean reverse) { + CursorPos cursorPos = key != null ? CursorPos.traverseDown(page, key) : + reverse ? page.getAppendCursorPos(null) : page.getPrependCursorPos(null); + int index = cursorPos.index; + if (index < 0) { + index = ~index; + if (reverse) { + --index; } - int x = from == null ? -1 : p.binarySearch(from); - if (x < 0) { - x = -x - 1; - } else { - x++; - } - pos = new CursorPos(p, x + 1, pos); - p = p.getChildPage(x); + cursorPos.index = index; } + return cursorPos; } - /** - * Fetch the next entry if there is one. - */ - @SuppressWarnings("unchecked") - private void fetchNext() { - while (pos != null) { - if (pos.index < pos.page.getKeyCount()) { - int index = pos.index++; - current = (K) pos.page.getKey(index); - currentValue = (V) pos.page.getValue(index); - return; - } - pos = pos.parent; - if (pos == null) { - break; - } - if (pos.index < map.getChildPageCount(pos.page)) { - min(pos.page.getChildPage(pos.index++), null); - } - } - current = null; + private static int upperBound(Page page) { + return page.isLeaf() ? page.getKeyCount() : page.map.getChildPageCount(page); } - } diff --git a/h2/src/main/org/h2/mvstore/CursorPos.java b/h2/src/main/org/h2/mvstore/CursorPos.java index 32057c9e06..15334bc9d4 100644 --- a/h2/src/main/org/h2/mvstore/CursorPos.java +++ b/h2/src/main/org/h2/mvstore/CursorPos.java @@ -1,35 +1,89 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; /** - * A position in a cursor + * A position in a cursor. + * Instance represents a node in the linked list, which traces path + * from a specific (target) key within a leaf node all the way up to te root + * (bottom up path). */ -public class CursorPos { +public final class CursorPos { /** - * The current page. + * The page at the current level. */ - public Page page; + public Page page; /** - * The current index. + * Index of the key (within page above) used to go down to a lower level + * in case of intermediate nodes, or index of the target key for leaf a node. + * In a later case, it could be negative, if the key is not present. */ public int index; /** - * The position in the parent page, if any. + * Next node in the linked list, representing the position within parent level, + * or null, if we are at the root level already. */ - public final CursorPos parent; + public CursorPos parent; - public CursorPos(Page page, int index, CursorPos parent) { + + public CursorPos(Page page, int index, CursorPos parent) { this.page = page; this.index = index; this.parent = parent; } + /** + * Searches for a given key and creates a breadcrumb trail through a B-tree + * rooted at a given Page. Resulting path starts at "insertion point" for a + * given key and goes back to the root. + * + * @param key type + * @param value type + * + * @param page root of the tree + * @param key the key to search for + * @return head of the CursorPos chain (insertion point) + */ + static CursorPos traverseDown(Page page, K key) { + CursorPos cursorPos = null; + while (!page.isLeaf()) { + int index = page.binarySearch(key) + 1; + if (index < 0) { + index = -index; + } + cursorPos = new CursorPos<>(page, index, cursorPos); + page = page.getChildPage(index); + } + return new CursorPos<>(page, page.binarySearch(key), cursorPos); + } + + /** + * Calculate the memory used by changes that are not yet stored. + * + * @param version the version + * @return the amount of memory + */ + int processRemovalInfo(long version) { + int unsavedMemory = 0; + for (CursorPos head = this; head != null; head = head.parent) { + unsavedMemory += head.page.removePage(version); + } + return unsavedMemory; + } + + @Override + public String toString() { + return "CursorPos{" + + "page=" + page + + ", index=" + index + + ", parent=" + parent + + '}'; + } } diff --git a/h2/src/main/org/h2/mvstore/DataUtils.java b/h2/src/main/org/h2/mvstore/DataUtils.java index 0afcfadb59..872e7b79e6 100644 --- a/h2/src/main/org/h2/mvstore/DataUtils.java +++ b/h2/src/main/org/h2/mvstore/DataUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -10,21 +10,20 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.text.MessageFormat; -import java.util.ArrayList; -import java.util.Collections; -import java.util.ConcurrentModificationException; +import java.util.Arrays; import java.util.HashMap; import java.util.Map; import org.h2.engine.Constants; -import org.h2.util.New; +import org.h2.jdbc.JdbcException; +import org.h2.util.StringUtils; /** * Utility methods */ -public class DataUtils { +public final class DataUtils { /** * An error occurred while reading from the file. @@ -99,6 +98,22 @@ public class DataUtils { */ public static final int ERROR_TRANSACTION_ILLEGAL_STATE = 103; + /** + * The transaction contains too many changes. + */ + public static final int ERROR_TRANSACTION_TOO_BIG = 104; + + /** + * Deadlock discovered and one of transactions involved chosen as victim and rolled back. + */ + public static final int ERROR_TRANSACTIONS_DEADLOCK = 105; + + /** + * The transaction store can not be initialized because data type + * is not found in type registry. + */ + public static final int ERROR_UNKNOWN_DATA_TYPE = 106; + /** * The type for leaf page. */ @@ -119,6 +134,11 @@ public class DataUtils { */ public static final int PAGE_COMPRESSED_HIGH = 2 + 4; + /** + * The bit mask for pages with page sequential number. + */ + public static final int PAGE_HAS_PAGE_NO = 8; + /** * The maximum length of a variable size int. */ @@ -141,40 +161,38 @@ public class DataUtils { */ public static final long COMPRESSED_VAR_LONG_MAX = 0x1ffffffffffffL; - /** - * The estimated number of bytes used per page object. - */ - public static final int PAGE_MEMORY = 128; - - /** - * The estimated number of bytes used per child entry. - */ - public static final int PAGE_MEMORY_CHILD = 16; - /** * The marker size of a very large page. */ public static final int PAGE_LARGE = 2 * 1024 * 1024; + // The following are key prefixes used in layout map + /** - * The UTF-8 character encoding format. + * The prefix for chunks ("chunk."). This, plus the chunk id (hex encoded) + * is the key, and the serialized chunk metadata is the value. */ - public static final Charset UTF8 = Charset.forName("UTF-8"); + public static final String META_CHUNK = "chunk."; /** - * The ISO Latin character encoding format. + * The prefix for root positions of maps ("root."). This, plus the map id + * (hex encoded) is the key, and the position (hex encoded) is the value. */ - public static final Charset LATIN = Charset.forName("ISO-8859-1"); + public static final String META_ROOT = "root."; + + // The following are key prefixes used in meta map /** - * An 0-size byte array. + * The prefix for names ("name."). This, plus the name of the map, is the + * key, and the map id (hex encoded) is the value. */ - private static final byte[] EMPTY_BYTES = {}; + public static final String META_NAME = "name."; /** - * The maximum byte to grow a buffer at a time. + * The prefix for maps ("map."). This, plus the map id (hex encoded) is the + * key, and the serialized in the map metadata is the value. */ - private static final int MAX_GROW = 16 * 1024 * 1024; + public static final String META_MAP = "map."; /** * Get the length of the variable size int. @@ -274,10 +292,11 @@ public static long readVarLong(ByteBuffer buff) { * * @param out the output stream * @param x the value + * @throws IOException if some data could not be written */ public static void writeVarInt(OutputStream out, int x) throws IOException { while ((x & ~0x7f) != 0) { - out.write((byte) (0x80 | (x & 0x7f))); + out.write((byte) (x | 0x80)); x >>>= 7; } out.write((byte) x); @@ -291,7 +310,7 @@ public static void writeVarInt(OutputStream out, int x) throws IOException { */ public static void writeVarInt(ByteBuffer buff, int x) { while ((x & ~0x7f) != 0) { - buff.put((byte) (0x80 | (x & 0x7f))); + buff.put((byte) (x | 0x80)); x >>>= 7; } buff.put((byte) x); @@ -300,14 +319,12 @@ public static void writeVarInt(ByteBuffer buff, int x) { /** * Write characters from a string (without the length). * - * @param buff the target buffer + * @param buff the target buffer (must be large enough) * @param s the string * @param len the number of characters - * @return the byte buffer */ - public static ByteBuffer writeStringData(ByteBuffer buff, + public static void writeStringData(ByteBuffer buff, String s, int len) { - buff = DataUtils.ensureCapacity(buff, 3 * len); for (int i = 0; i < len; i++) { int c = s.charAt(i); if (c < 0x80) { @@ -321,7 +338,16 @@ public static ByteBuffer writeStringData(ByteBuffer buff, buff.put((byte) (c & 0x3f)); } } - return buff; + } + + /** + * Read a string. + * + * @param buff the source buffer + * @return the value + */ + public static String readString(ByteBuffer buff) { + return readString(buff, readVarInt(buff)); } /** @@ -355,7 +381,7 @@ public static String readString(ByteBuffer buff, int len) { */ public static void writeVarLong(ByteBuffer buff, long x) { while ((x & ~0x7f) != 0) { - buff.put((byte) (0x80 | (x & 0x7f))); + buff.put((byte) (x | 0x80)); x >>>= 7; } buff.put((byte) x); @@ -366,11 +392,12 @@ public static void writeVarLong(ByteBuffer buff, long x) { * * @param out the output stream * @param x the value + * @throws IOException if some data could not be written */ public static void writeVarLong(OutputStream out, long x) throws IOException { while ((x & ~0x7f) != 0) { - out.write((byte) (0x80 | (x & 0x7f))); + out.write((byte) (x | 0x80)); x >>>= 7; } out.write((byte) x); @@ -421,7 +448,7 @@ public static void copyExcept(Object src, Object dst, int oldSize, * @param file the file channel * @param pos the absolute position within the file * @param dst the byte buffer - * @throws IllegalStateException if some data could not be read + * @throws MVStoreException if some data could not be read */ public static void readFully(FileChannel file, long pos, ByteBuffer dst) { try { @@ -440,11 +467,11 @@ public static void readFully(FileChannel file, long pos, ByteBuffer dst) { } catch (IOException e2) { size = -1; } - throw newIllegalStateException( + throw newMVStoreException( ERROR_READING_FAILED, - "Reading from {0} failed; file length {1} " + - "read length {2} at {3}", - file, size, dst.remaining(), pos, e); + "Reading from file {0} failed at {1} (length {2}), " + + "read {3}, remaining {4}", + file, pos, size, dst.position(), dst.remaining(), e); } } @@ -463,7 +490,7 @@ public static void writeFully(FileChannel file, long pos, ByteBuffer src) { off += len; } while (src.remaining() > 0); } catch (IOException e) { - throw newIllegalStateException( + throw newMVStoreException( ERROR_WRITING_FAILED, "Writing to {0} failed; length {1} at {2}", file, src.remaining(), pos, e); @@ -514,14 +541,34 @@ public static int getPageChunkId(long pos) { } /** - * Get the maximum length for the given code. - * For the code 31, PAGE_LARGE is returned. + * Get the map id from the chunk's table of content element. + * + * @param tocElement packed table of content element + * @return the map id + */ + public static int getPageMapId(long tocElement) { + return (int) (tocElement >>> 38); + } + + /** + * Get the maximum length for the given page position. * * @param pos the position * @return the maximum length */ public static int getPageMaxLength(long pos) { int code = (int) ((pos >> 1) & 31); + return decodePageLength(code); + } + + /** + * Get the maximum length for the given code. + * For the code 31, PAGE_LARGE is returned. + * + * @param code encoded page length + * @return the maximum length + */ + public static int decodePageLength(int code) { if (code == 31) { return PAGE_LARGE; } @@ -531,11 +578,11 @@ public static int getPageMaxLength(long pos) { /** * Get the offset from the position. * - * @param pos the position + * @param tocElement packed table of content element * @return the offset */ - public static int getPageOffset(long pos) { - return (int) (pos >> 6); + public static int getPageOffset(long tocElement) { + return (int) (tocElement >> 6); } /** @@ -548,9 +595,39 @@ public static int getPageType(long pos) { return ((int) pos) & 1; } + /** + * Determines whether specified file position corresponds to a leaf page + * @param pos the position + * @return true if it is a leaf, false otherwise + */ + public static boolean isLeafPosition(long pos) { + return getPageType(pos) == PAGE_TYPE_LEAF; + } + + /** + * Find out if page was saved. + * + * @param pos the position + * @return true if page has been saved + */ + public static boolean isPageSaved(long pos) { + return (pos & ~1L) != 0; + } + + /** + * Find out if page was removed. + * + * @param pos the position + * @return true if page has been removed (no longer accessible from the + * current root of the tree) + */ + static boolean isPageRemoved(long pos) { + return pos == 1L; + } + /** * Get the position of this page. The following information is encoded in - * the position: the chunk id, the offset, the maximum length, and the type + * the position: the chunk id, the page sequential number, the maximum length, and the type * (node or leaf). * * @param chunkId the chunk id @@ -559,8 +636,7 @@ public static int getPageType(long pos) { * @param type the page type (1 for node, 0 for leaf) * @return the position */ - public static long getPagePos(int chunkId, int offset, - int length, int type) { + public static long getPagePos(int chunkId, int offset, int length, int type) { long pos = (long) chunkId << 38; pos |= (long) offset << 6; pos |= encodeLength(length) << 1; @@ -568,6 +644,36 @@ public static long getPagePos(int chunkId, int offset, return pos; } + /** + * Convert tocElement into pagePos by replacing mapId with chunkId. + * + * @param chunkId the chunk id + * @param tocElement the element + * @return the page position + */ + public static long getPagePos(int chunkId, long tocElement) { + return (tocElement & 0x3FFFFFFFFFL) | ((long) chunkId << 38); + } + + /** + * Create table of content element. The following information is encoded in it: + * the map id, the page offset, the maximum length, and the type + * (node or leaf). + * + * @param mapId the chunk id + * @param offset the offset + * @param length the length + * @param type the page type (1 for node, 0 for leaf) + * @return the position + */ + public static long getTocElement(int mapId, int offset, int length, int type) { + long pos = (long) mapId << 38; + pos |= (long) offset << 6; + pos |= encodeLength(length) << 1; + pos |= type; + return pos; + } + /** * Calculate a check value for the given integer. A check value is mean to * verify the data is consistent with a high probability, but not meant to @@ -587,16 +693,30 @@ public static short getCheckValue(int x) { * @param map the map * @return the string builder */ - public static StringBuilder appendMap(StringBuilder buff, - HashMap map) { - ArrayList list = New.arrayList(map.keySet()); - Collections.sort(list); - for (String k : list) { - appendMap(buff, k, map.get(k)); + public static StringBuilder appendMap(StringBuilder buff, HashMap map) { + Object[] keys = map.keySet().toArray(); + Arrays.sort(keys); + for (Object k : keys) { + String key = (String) k; + Object value = map.get(key); + if (value instanceof Long) { + appendMap(buff, key, (long) value); + } else if (value instanceof Integer) { + appendMap(buff, key, (int) value); + } else { + appendMap(buff, key, value.toString()); + } } return buff; } + private static StringBuilder appendMapKey(StringBuilder buff, String key) { + if (buff.length() > 0) { + buff.append(','); + } + return buff.append(key).append(':'); + } + /** * Append a key-value pair to the string builder. Keys may not contain a * colon. Values that contain a comma or a double quote are enclosed in @@ -606,25 +726,14 @@ public static StringBuilder appendMap(StringBuilder buff, * @param key the key * @param value the value */ - public static void appendMap(StringBuilder buff, String key, Object value) { - if (buff.length() > 0) { - buff.append(','); - } - buff.append(key).append(':'); - String v; - if (value instanceof Long) { - v = Long.toHexString((Long) value); - } else if (value instanceof Integer) { - v = Integer.toHexString((Integer) value); - } else { - v = value.toString(); - } - if (v.indexOf(',') < 0 && v.indexOf('\"') < 0) { - buff.append(v); + public static void appendMap(StringBuilder buff, String key, String value) { + appendMapKey(buff, key); + if (value.indexOf(',') < 0 && value.indexOf('\"') < 0) { + buff.append(value); } else { buff.append('\"'); - for (int i = 0, size = v.length(); i < size; i++) { - char c = v.charAt(i); + for (int i = 0, size = value.length(); i < size; i++) { + char c = value.charAt(i); if (c == '\"') { buff.append('\\'); } @@ -634,72 +743,204 @@ public static void appendMap(StringBuilder buff, String key, Object value) { } } + /** + * Append a key-value pair to the string builder. Keys may not contain a + * colon. + * + * @param buff the target buffer + * @param key the key + * @param value the value + */ + public static void appendMap(StringBuilder buff, String key, long value) { + appendMapKey(buff, key).append(Long.toHexString(value)); + } + + /** + * Append a key-value pair to the string builder. Keys may not contain a + * colon. + * + * @param buff the target buffer + * @param key the key + * @param value the value + */ + public static void appendMap(StringBuilder buff, String key, int value) { + appendMapKey(buff, key).append(Integer.toHexString(value)); + } + + /** + * @param buff output buffer, should be empty + * @param s parsed string + * @param i offset to parse from + * @param size stop offset (exclusive) + * @return new offset + */ + private static int parseMapValue(StringBuilder buff, String s, int i, int size) { + while (i < size) { + char c = s.charAt(i++); + if (c == ',') { + break; + } else if (c == '\"') { + while (i < size) { + c = s.charAt(i++); + if (c == '\\') { + if (i == size) { + throw newMVStoreException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); + } + c = s.charAt(i++); + } else if (c == '\"') { + break; + } + buff.append(c); + } + } else { + buff.append(c); + } + } + return i; + } + /** * Parse a key-value pair list. * * @param s the list * @return the map - * @throws IllegalStateException if parsing failed + * @throws MVStoreException if parsing failed */ public static HashMap parseMap(String s) { - HashMap map = New.hashMap(); + HashMap map = new HashMap<>(); + StringBuilder buff = new StringBuilder(); for (int i = 0, size = s.length(); i < size;) { int startKey = i; i = s.indexOf(':', i); if (i < 0) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, "Not a map: {0}", s); + throw newMVStoreException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); } String key = s.substring(startKey, i++); - StringBuilder buff = new StringBuilder(); - while (i < size) { - char c = s.charAt(i++); - if (c == ',') { - break; - } else if (c == '\"') { - while (i < size) { - c = s.charAt(i++); - if (c == '\\') { - if (i == size) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "Not a map: {0}", s); - } + i = parseMapValue(buff, s, i, size); + map.put(key, buff.toString()); + buff.setLength(0); + } + return map; + } + + /** + * Parse a key-value pair list and checks its checksum. + * + * @param bytes encoded map + * @return the map without mapping for {@code "fletcher"}, or {@code null} if checksum is wrong + * or parameter do not represent a properly formatted map serialization + */ + static HashMap parseChecksummedMap(byte[] bytes) { + int start = 0, end = bytes.length; + while (start < end && bytes[start] <= ' ') { + start++; + } + while (start < end && bytes[end - 1] <= ' ') { + end--; + } + String s = new String(bytes, start, end - start, StandardCharsets.ISO_8859_1); + HashMap map = new HashMap<>(); + StringBuilder buff = new StringBuilder(); + for (int i = 0, size = s.length(); i < size;) { + int startKey = i; + i = s.indexOf(':', i); + if (i < 0) { + // Corrupted map + return null; + } + if (i - startKey == 8 && s.regionMatches(startKey, "fletcher", 0, 8)) { + parseMapValue(buff, s, i + 1, size); + int check = (int) Long.parseLong(buff.toString(), 16); + if (check == getFletcher32(bytes, start, startKey - 1)) { + return map; + } + // Corrupted map + return null; + } + String key = s.substring(startKey, i++); + i = parseMapValue(buff, s, i, size); + map.put(key, buff.toString()); + buff.setLength(0); + } + // Corrupted map + return null; + } + + /** + * Parse a name from key-value pair list. + * + * @param s the list + * @return value of name item, or {@code null} + * @throws MVStoreException if parsing failed + */ + public static String getMapName(String s) { + return getFromMap(s, "name"); + } + + /** + * Parse a specified pair from key-value pair list. + * + * @param s the list + * @param key the name of the key + * @return value of the specified item, or {@code null} + * @throws MVStoreException if parsing failed + */ + public static String getFromMap(String s, String key) { + int keyLength = key.length(); + for (int i = 0, size = s.length(); i < size;) { + int startKey = i; + i = s.indexOf(':', i); + if (i < 0) { + throw newMVStoreException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); + } + if (i++ - startKey == keyLength && s.regionMatches(startKey, key, 0, keyLength)) { + StringBuilder buff = new StringBuilder(); + parseMapValue(buff, s, i, size); + return buff.toString(); + } else { + while (i < size) { + char c = s.charAt(i++); + if (c == ',') { + break; + } else if (c == '\"') { + while (i < size) { c = s.charAt(i++); - } else if (c == '\"') { - break; + if (c == '\\') { + if (i++ == size) { + throw newMVStoreException(ERROR_FILE_CORRUPT, "Not a map: {0}", s); + } + } else if (c == '\"') { + break; + } } - buff.append(c); } - } else { - buff.append(c); } } - map.put(key, buff.toString()); } - return map; + return null; } /** * Calculate the Fletcher32 checksum. * * @param bytes the bytes + * @param offset initial offset * @param length the message length (if odd, 0 is appended) * @return the checksum */ - public static int getFletcher32(byte[] bytes, int length) { + public static int getFletcher32(byte[] bytes, int offset, int length) { int s1 = 0xffff, s2 = 0xffff; - int i = 0, evenLength = length / 2 * 2; - while (i < evenLength) { + int i = offset, len = offset + (length & ~1); + while (i < len) { // reduce after 360 words (each word is two bytes) - for (int end = Math.min(i + 720, evenLength); i < end;) { + for (int end = Math.min(i + 720, len); i < end;) { int x = ((bytes[i++] & 0xff) << 8) | (bytes[i++] & 0xff); s2 += s1 += x; } s1 = (s1 & 0xffff) + (s1 >>> 16); s2 = (s2 & 0xffff) + (s2 >>> 16); } - if (i < length) { + if ((length & 1) != 0) { // odd length: append 0 int x = (bytes[i] & 0xff) << 8; s2 += s1 += x; @@ -750,27 +991,16 @@ public static IllegalArgumentException newIllegalArgumentException( } /** - * Create a new ConcurrentModificationException. - * - * @param message the message - * @return the exception - */ - public static ConcurrentModificationException - newConcurrentModificationException(String message) { - return new ConcurrentModificationException(formatMessage(0, message)); - } - - /** - * Create a new IllegalStateException. + * Create a new MVStoreException. * * @param errorCode the error code * @param message the message * @param arguments the arguments * @return the exception */ - public static IllegalStateException newIllegalStateException( + public static MVStoreException newMVStoreException( int errorCode, String message, Object... arguments) { - return initCause(new IllegalStateException( + return initCause(new MVStoreException(errorCode, formatMessage(errorCode, message, arguments)), arguments); } @@ -779,8 +1009,8 @@ private static T initCause(T e, Object... arguments) { int size = arguments.length; if (size > 0) { Object o = arguments[size - 1]; - if (o instanceof Exception) { - e.initCause((Exception) o); + if (o instanceof Throwable) { + e.initCause((Throwable) o); } } return e; @@ -797,6 +1027,7 @@ private static T initCause(T e, Object... arguments) { public static String formatMessage(int errorCode, String message, Object... arguments) { // convert arguments to strings, to avoid locale specific formatting + arguments = arguments.clone(); for (int i = 0; i < arguments.length; i++) { Object a = arguments[i]; if (!(a instanceof Exception)) { @@ -813,79 +1044,6 @@ public static String formatMessage(int errorCode, String message, "/" + errorCode + "]"; } - /** - * Get the error code from an exception message. - * - * @param m the message - * @return the error code, or 0 if none - */ - public static int getErrorCode(String m) { - if (m != null && m.endsWith("]")) { - int dash = m.lastIndexOf('/'); - if (dash >= 0) { - String s = m.substring(dash + 1, m.length() - 1); - try { - return Integer.parseInt(s); - } catch (NumberFormatException e) { - // no error code - } - } - } - return 0; - } - - /** - * Create an array of bytes with the given size. If this is not possible - * because not enough memory is available, an OutOfMemoryError with the - * requested size in the message is thrown. - *

    - * This method should be used if the size of the array is user defined, or - * stored in a file, so wrong size data can be distinguished from regular - * out-of-memory. - * - * @param len the number of bytes requested - * @return the byte array - * @throws OutOfMemoryError if the allocation was too large - */ - public static byte[] newBytes(int len) { - if (len == 0) { - return EMPTY_BYTES; - } - try { - return new byte[len]; - } catch (OutOfMemoryError e) { - Error e2 = new OutOfMemoryError("Requested memory: " + len); - e2.initCause(e); - throw e2; - } - } - - /** - * Ensure the byte buffer has the given capacity, plus 1 KB. If not, a new, - * larger byte buffer is created and the data is copied. - * - * @param buff the byte buffer - * @param len the minimum remaining capacity - * @return the byte buffer (possibly a new one) - */ - public static ByteBuffer ensureCapacity(ByteBuffer buff, int len) { - len += 1024; - if (buff.remaining() > len) { - return buff; - } - return grow(buff, len); - } - - private static ByteBuffer grow(ByteBuffer buff, int len) { - len = buff.remaining() + len; - int capacity = buff.capacity(); - len = Math.max(len, Math.min(capacity + MAX_GROW, capacity * 2)); - ByteBuffer temp = ByteBuffer.allocate(len); - buff.flip(); - temp.put(buff); - return temp; - } - /** * Read a hex long value from a map. * @@ -893,10 +1051,9 @@ private static ByteBuffer grow(ByteBuffer buff, int len) { * @param key the key * @param defaultValue if the value is null * @return the parsed value - * @throws IllegalStateException if parsing fails + * @throws MVStoreException if parsing fails */ - public static long readHexLong(Map map, - String key, long defaultValue) { + public static long readHexLong(Map map, String key, long defaultValue) { Object v = map.get(key); if (v == null) { return defaultValue; @@ -906,7 +1063,7 @@ public static long readHexLong(Map map, try { return parseHexLong((String) v); } catch (NumberFormatException e) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, + throw newMVStoreException(ERROR_FILE_CORRUPT, "Error parsing the value {0}", v, e); } } @@ -916,7 +1073,7 @@ public static long readHexLong(Map map, * * @param x the string * @return the parsed value - * @throws IllegalStateException if parsing fails + * @throws MVStoreException if parsing fails */ public static long parseHexLong(String x) { try { @@ -928,7 +1085,7 @@ public static long parseHexLong(String x) { } return Long.parseLong(x, 16); } catch (NumberFormatException e) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, + throw newMVStoreException(ERROR_FILE_CORRUPT, "Error parsing the value {0}", x, e); } } @@ -938,7 +1095,7 @@ public static long parseHexLong(String x) { * * @param x the string * @return the parsed value - * @throws IllegalStateException if parsing fails + * @throws MVStoreException if parsing fails */ public static int parseHexInt(String x) { try { @@ -946,7 +1103,7 @@ public static int parseHexInt(String x) { // in Java 8, we can use Integer.parseLong(x, 16); return (int) Long.parseLong(x, 16); } catch (NumberFormatException e) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, + throw newMVStoreException(ERROR_FILE_CORRUPT, "Error parsing the value {0}", x, e); } } @@ -958,10 +1115,9 @@ public static int parseHexInt(String x) { * @param key the key * @param defaultValue if the value is null * @return the parsed value - * @throws IllegalStateException if parsing fails + * @throws MVStoreException if parsing fails */ - public static int readHexInt(HashMap map, - String key, int defaultValue) { + static int readHexInt(Map map, String key, int defaultValue) { Object v = map.get(key); if (v == null) { return defaultValue; @@ -972,43 +1128,63 @@ public static int readHexInt(HashMap map, // support unsigned hex value return (int) Long.parseLong((String) v, 16); } catch (NumberFormatException e) { - throw newIllegalStateException(ERROR_FILE_CORRUPT, + throw newMVStoreException(ERROR_FILE_CORRUPT, "Error parsing the value {0}", v, e); } } /** - * An entry of a map. + * Parse the hex-encoded bytes of an entry in the map. * - * @param the key type - * @param the value type + * @param map the map + * @param key the key + * @return the byte array, or null if not in the map */ - public static class MapEntry implements Map.Entry { - - private final K key; - private V value; - - public MapEntry(K key, V value) { - this.key = key; - this.value = value; + static byte[] parseHexBytes(Map map, String key) { + Object v = map.get(key); + if (v == null) { + return null; } + return StringUtils.convertHexToBytes((String)v); + } - @Override - public K getKey() { - return key; + /** + * Get the configuration parameter value, or default. + * + * @param config the configuration + * @param key the key + * @param defaultValue the default + * @return the configured value or default + */ + static int getConfigParam(Map config, String key, int defaultValue) { + Object o = config.get(key); + if (o instanceof Number) { + return ((Number) o).intValue(); + } else if (o != null) { + try { + return Integer.decode(o.toString()); + } catch (NumberFormatException e) { + // ignore + } } + return defaultValue; + } - @Override - public V getValue() { - return value; + /** + * Convert an exception to an IO exception. + * + * @param e the root cause + * @return the IO exception + */ + public static IOException convertToIOException(Throwable e) { + if (e instanceof IOException) { + return (IOException) e; } - - @Override - public V setValue(V value) { - throw DataUtils.newUnsupportedOperationException( - "Updating the value is not supported"); + if (e instanceof JdbcException) { + if (e.getCause() != null) { + e = e.getCause(); + } } - + return new IOException(e.toString(), e); } - } diff --git a/h2/src/main/org/h2/mvstore/FileStore.java b/h2/src/main/org/h2/mvstore/FileStore.java index 29266c0177..dc1142fcac 100644 --- a/h2/src/main/org/h2/mvstore/FileStore.java +++ b/h2/src/main/org/h2/mvstore/FileStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -10,12 +10,11 @@ import java.nio.channels.FileChannel; import java.nio.channels.FileLock; import java.nio.channels.OverlappingFileLockException; - +import java.util.concurrent.atomic.AtomicLong; import org.h2.mvstore.cache.FilePathCache; import org.h2.store.fs.FilePath; -import org.h2.store.fs.FilePathDisk; -import org.h2.store.fs.FilePathEncrypt; -import org.h2.store.fs.FilePathNio; +import org.h2.store.fs.encrypt.FileEncrypt; +import org.h2.store.fs.encrypt.FilePathEncrypt; /** * The default storage mechanism of the MVStore. This implementation persists @@ -27,22 +26,22 @@ public class FileStore { /** * The number of read operations. */ - protected long readCount; + protected final AtomicLong readCount = new AtomicLong(); /** * The number of read bytes. */ - protected long readBytes; + protected final AtomicLong readBytes = new AtomicLong(); /** * The number of write operations. */ - protected long writeCount; + protected final AtomicLong writeCount = new AtomicLong(); /** * The number of written bytes. */ - protected long writeBytes; + protected final AtomicLong writeBytes = new AtomicLong(); /** * The free spaces between the chunks. The first block to use is block 2 @@ -54,12 +53,12 @@ public class FileStore { /** * The file name. */ - protected String fileName; + private String fileName; /** * Whether this store is read-only. */ - protected boolean readOnly; + private boolean readOnly; /** * The file size (cached). @@ -69,17 +68,17 @@ public class FileStore { /** * The file. */ - protected FileChannel file; + private FileChannel file; /** * The encrypted file (if encryption is used). */ - protected FileChannel encryptedFile; + private FileChannel encryptedFile; /** * The file lock. */ - protected FileLock fileLock; + private FileLock fileLock; @Override public String toString() { @@ -96,8 +95,8 @@ public String toString() { public ByteBuffer readFully(long pos, int len) { ByteBuffer dst = ByteBuffer.allocate(len); DataUtils.readFully(file, pos, dst); - readCount++; - readBytes += len; + readCount.incrementAndGet(); + readBytes.addAndGet(len); return dst; } @@ -111,8 +110,8 @@ public void writeFully(long pos, ByteBuffer src) { int len = src.remaining(); fileSize = Math.max(fileSize, pos + len); DataUtils.writeFully(file, pos, src); - writeCount++; - writeBytes += len; + writeCount.incrementAndGet(); + writeBytes.addAndGet(len); } /** @@ -128,16 +127,8 @@ public void open(String fileName, boolean readOnly, char[] encryptionKey) { if (file != null) { return; } - if (fileName != null) { - FilePath p = FilePath.get(fileName); - // if no explicit scheme was specified, NIO is used - if (p instanceof FilePathDisk && - !fileName.startsWith(p.getScheme() + ":")) { - // ensure the NIO file system is registered - FilePathNio.class.getName(); - fileName = "nio:" + fileName; - } - } + // ensure the Cache file system is registered + FilePathCache.INSTANCE.getScheme(); this.fileName = fileName; FilePath f = FilePath.get(fileName); FilePath parent = f.getParent(); @@ -154,9 +145,8 @@ public void open(String fileName, boolean readOnly, char[] encryptionKey) { if (encryptionKey != null) { byte[] key = FilePathEncrypt.getPasswordBytes(encryptionKey); encryptedFile = file; - file = new FilePathEncrypt.FileEncrypt(fileName, key, file); + file = new FileEncrypt(fileName, key, file); } - file = FilePathCache.wrap(file); try { if (readOnly) { fileLock = file.tryLock(0, Long.MAX_VALUE, true); @@ -164,18 +154,20 @@ public void open(String fileName, boolean readOnly, char[] encryptionKey) { fileLock = file.tryLock(); } } catch (OverlappingFileLockException e) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_LOCKED, "The file is locked: {0}", fileName, e); } if (fileLock == null) { - throw DataUtils.newIllegalStateException( + try { close(); } catch (Exception ignore) {} + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_LOCKED, "The file is locked: {0}", fileName); } fileSize = file.size(); } catch (IOException e) { - throw DataUtils.newIllegalStateException( + try { close(); } catch (Exception ignore) {} + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Could not open file {0}", fileName, e); } @@ -186,17 +178,18 @@ public void open(String fileName, boolean readOnly, char[] encryptionKey) { */ public void close() { try { - if (fileLock != null) { - fileLock.release(); - fileLock = null; + if(file != null && file.isOpen()) { + if (fileLock != null) { + fileLock.release(); + } + file.close(); } - file.close(); - freeSpace.clear(); } catch (Exception e) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_WRITING_FAILED, "Closing failed for file {0}", fileName, e); } finally { + fileLock = null; file = null; } } @@ -205,12 +198,14 @@ public void close() { * Flush all changes. */ public void sync() { - try { - file.force(true); - } catch (IOException e) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_WRITING_FAILED, - "Could not sync file {0}", fileName, e); + if (file != null) { + try { + file.force(true); + } catch (IOException e) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_WRITING_FAILED, + "Could not sync file {0}", fileName, e); + } } } @@ -229,15 +224,23 @@ public long size() { * @param size the new file size */ public void truncate(long size) { - try { - writeCount++; - file.truncate(size); - fileSize = Math.min(fileSize, size); - } catch (IOException e) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_WRITING_FAILED, - "Could not truncate file {0} to size {1}", - fileName, size, e); + int attemptCount = 0; + while (true) { + try { + writeCount.incrementAndGet(); + file.truncate(size); + fileSize = Math.min(fileSize, size); + return; + } catch (IOException e) { + if (++attemptCount == 10) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_WRITING_FAILED, + "Could not truncate file {0} to size {1}", + fileName, size, e); + } + System.gc(); + Thread.yield(); + } } } @@ -272,7 +275,7 @@ public FileChannel getEncryptedFile() { * @return the number of write operations */ public long getWriteCount() { - return writeCount; + return writeCount.get(); } /** @@ -281,7 +284,7 @@ public long getWriteCount() { * @return the number of write operations */ public long getWriteBytes() { - return writeBytes; + return writeBytes.get(); } /** @@ -291,7 +294,7 @@ public long getWriteBytes() { * @return the number of read operations */ public long getReadCount() { - return readCount; + return readCount.get(); } /** @@ -300,7 +303,7 @@ public long getReadCount() { * @return the number of write operations */ public long getReadBytes() { - return readBytes; + return readBytes.get(); } public boolean isReadOnly() { @@ -313,7 +316,7 @@ public boolean isReadOnly() { * @return the retention time */ public int getDefaultRetentionTime() { - return 45000; + return 45_000; } /** @@ -330,10 +333,30 @@ public void markUsed(long pos, int length) { * Allocate a number of blocks and mark them as used. * * @param length the number of bytes to allocate + * @param reservedLow start block index of the reserved area (inclusive) + * @param reservedHigh end block index of the reserved area (exclusive), + * special value -1 means beginning of the infinite free area * @return the start position in bytes */ - public long allocate(int length) { - return freeSpace.allocate(length); + long allocate(int length, long reservedLow, long reservedHigh) { + return freeSpace.allocate(length, reservedLow, reservedHigh); + } + + /** + * Calculate starting position of the prospective allocation. + * + * @param blocks the number of blocks to allocate + * @param reservedLow start block index of the reserved area (inclusive) + * @param reservedHigh end block index of the reserved area (exclusive), + * special value -1 means beginning of the infinite free area + * @return the starting block index + */ + long predictAllocation(int blocks, long reservedLow, long reservedHigh) { + return freeSpace.predictAllocation(blocks, reservedLow, reservedHigh); + } + + boolean isFragmented() { + return freeSpace.isFragmented(); } /** @@ -350,10 +373,41 @@ public int getFillRate() { return freeSpace.getFillRate(); } + /** + * Calculates a prospective fill rate, which store would have after rewrite + * of sparsely populated chunk(s) and evacuation of still live data into a + * new chunk. + * + * @param vacatedBlocks + * number of blocks vacated + * @return prospective fill rate (0 - 100) + */ + public int getProjectedFillRate(int vacatedBlocks) { + return freeSpace.getProjectedFillRate(vacatedBlocks); + } + long getFirstFree() { return freeSpace.getFirstFree(); } + long getFileLengthInUse() { + return freeSpace.getLastFree(); + } + + /** + * Calculates relative "priority" for chunk to be moved. + * + * @param block where chunk starts + * @return priority, bigger number indicate that chunk need to be moved sooner + */ + int getMovePriority(int block) { + return freeSpace.getMovePriority(block); + } + + long getAfterLastBlock() { + return freeSpace.getAfterLastBlock(); + } + /** * Mark the file as empty. */ diff --git a/h2/src/main/org/h2/mvstore/FreeSpaceBitSet.java b/h2/src/main/org/h2/mvstore/FreeSpaceBitSet.java index ee9aea7c57..f302283bec 100644 --- a/h2/src/main/org/h2/mvstore/FreeSpaceBitSet.java +++ b/h2/src/main/org/h2/mvstore/FreeSpaceBitSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -31,6 +31,16 @@ public class FreeSpaceBitSet { */ private final BitSet set = new BitSet(); + /** + * Left-shifting register, which holds outcomes of recent allocations. Only + * allocations done in "reuseSpace" mode are recorded here. For example, + * rightmost bit set to 1 means that last allocation failed to find a hole + * big enough, and next bit set to 0 means that previous allocation request + * have found one. + */ + private int failureFlags; + + /** * Create a new free space map. * @@ -94,14 +104,68 @@ public boolean isFree(long pos, int length) { * @return the start position in bytes */ public long allocate(int length) { - int blocks = getBlockCount(length); + return allocate(length, 0, 0); + } + + /** + * Allocate a number of blocks and mark them as used. + * + * @param length the number of bytes to allocate + * @param reservedLow start block index of the reserved area (inclusive) + * @param reservedHigh end block index of the reserved area (exclusive), + * special value -1 means beginning of the infinite free area + * @return the start position in bytes + */ + long allocate(int length, long reservedLow, long reservedHigh) { + return getPos(allocate(getBlockCount(length), (int)reservedLow, (int)reservedHigh, true)); + } + + /** + * Calculate starting position of the prospective allocation. + * + * @param blocks the number of blocks to allocate + * @param reservedLow start block index of the reserved area (inclusive) + * @param reservedHigh end block index of the reserved area (exclusive), + * special value -1 means beginning of the infinite free area + * @return the starting block index + */ + long predictAllocation(int blocks, long reservedLow, long reservedHigh) { + return allocate(blocks, (int)reservedLow, (int)reservedHigh, false); + } + + boolean isFragmented() { + return Integer.bitCount(failureFlags & 0x0F) > 1; + } + + private int allocate(int blocks, int reservedLow, int reservedHigh, boolean allocate) { + int freeBlocksTotal = 0; for (int i = 0;;) { int start = set.nextClearBit(i); int end = set.nextSetBit(start + 1); - if (end < 0 || end - start >= blocks) { - set.set(start, start + blocks); - return getPos(start); + int freeBlocks = end - start; + if (end < 0 || freeBlocks >= blocks) { + if ((reservedHigh < 0 || start < reservedHigh) && start + blocks > reservedLow) { // overlap detected + if (reservedHigh < 0) { + start = getAfterLastBlock(); + end = -1; + } else { + i = reservedHigh; + continue; + } + } + assert set.nextSetBit(start) == -1 || set.nextSetBit(start) >= start + blocks : + "Double alloc: " + Integer.toHexString(start) + "/" + Integer.toHexString(blocks) + " " + this; + if (allocate) { + set.set(start, start + blocks); + } else { + failureFlags <<= 1; + if (end < 0 && freeBlocksTotal > 4 * blocks) { + failureFlags |= 1; + } + } + return start; } + freeBlocksTotal += freeBlocks; i = end; } } @@ -115,6 +179,13 @@ public long allocate(int length) { public void markUsed(long pos, int length) { int start = getBlock(pos); int blocks = getBlockCount(length); + // this is not an assert because we get called during file opening + if (set.nextSetBit(start) != -1 && set.nextSetBit(start) < start + blocks ) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, + "Double mark: " + Integer.toHexString(start) + + "/" + Integer.toHexString(blocks) + " " + this); + } set.set(start, start + blocks); } @@ -127,6 +198,8 @@ public void markUsed(long pos, int length) { public void free(long pos, int length) { int start = getBlock(pos); int blocks = getBlockCount(length); + assert set.nextClearBit(start) >= start + blocks : + "Double free: " + Integer.toHexString(start) + "/" + Integer.toHexString(blocks) + " " + this; set.clear(start, start + blocks); } @@ -148,17 +221,37 @@ private int getBlockCount(int length) { * * @return the fill rate (0 - 100) */ - public int getFillRate() { - int total = set.length(), count = 0; - for (int i = 0; i < total; i++) { - if (set.get(i)) { - count++; + int getFillRate() { + return getProjectedFillRate(0); + } + + /** + * Calculates a prospective fill rate, which store would have after rewrite + * of sparsely populated chunk(s) and evacuation of still live data into a + * new chunk. + * + * @param vacatedBlocks + * number of blocks vacated as a result of live data evacuation less + * number of blocks in prospective chunk with evacuated live data + * @return prospective fill rate (0 - 100) + */ + int getProjectedFillRate(int vacatedBlocks) { + // it's not bullet-proof against race condition but should be good enough + // to get approximation without holding a store lock + int usedBlocks; + int totalBlocks; + // to prevent infinite loop, which I saw once + int cnt = 3; + do { + if (--cnt == 0) { + return 100; } - } - if (count == 0) { - return 0; - } - return Math.max(1, (int) (100L * count / total)); + totalBlocks = set.length(); + usedBlocks = set.cardinality(); + } while (totalBlocks != set.length() || usedBlocks > totalBlocks); + usedBlocks -= firstFreeBlock + vacatedBlocks; + totalBlocks -= firstFreeBlock; + return usedBlocks == 0 ? 0 : (int)((100L * usedBlocks + totalBlocks - 1) / totalBlocks); } /** @@ -166,10 +259,56 @@ public int getFillRate() { * * @return the position. */ - public long getFirstFree() { + long getFirstFree() { return getPos(set.nextClearBit(0)); } + /** + * Get the position of the last (infinite) free space. + * + * @return the position. + */ + long getLastFree() { + return getPos(getAfterLastBlock()); + } + + /** + * Get the index of the first block after last occupied one. + * It marks the beginning of the last (infinite) free space. + * + * @return block index + */ + int getAfterLastBlock() { + return set.previousSetBit(set.size() - 1) + 1; + } + + /** + * Calculates relative "priority" for chunk to be moved. + * + * @param block where chunk starts + * @return priority, bigger number indicate that chunk need to be moved sooner + */ + int getMovePriority(int block) { + // The most desirable chunks to move are the ones sitting within + // a relatively short span of occupied blocks which is surrounded + // from both sides by relatively long free spans + int prevEnd = set.previousClearBit(block); + int freeSize; + if (prevEnd < 0) { + prevEnd = firstFreeBlock; + freeSize = 0; + } else { + freeSize = prevEnd - set.previousSetBit(prevEnd); + } + + int nextStart = set.nextClearBit(block); + int nextEnd = set.nextSetBit(nextStart); + if (nextEnd >= 0) { + freeSize += nextEnd - nextStart; + } + return (nextStart - prevEnd - 1) * 1000 / (freeSize + 1); + } + @Override public String toString() { StringBuilder buff = new StringBuilder(); @@ -188,9 +327,9 @@ public String toString() { on = 0; } } - buff.append("\n"); - buff.append(" on " + onCount + " off " + offCount); - buff.append(" " + 100 * onCount / (onCount+offCount) + "% used "); + buff.append('\n') + .append(" on ").append(onCount).append(" off ").append(offCount) + .append(' ').append(100 * onCount / (onCount+offCount)).append("% used "); } buff.append('['); for (int i = 0;;) { @@ -209,5 +348,4 @@ public String toString() { buff.append(']'); return buff.toString(); } - } \ No newline at end of file diff --git a/h2/src/main/org/h2/mvstore/MVMap.java b/h2/src/main/org/h2/mvstore/MVMap.java index 168a125279..d1de1f181b 100644 --- a/h2/src/main/org/h2/mvstore/MVMap.java +++ b/h2/src/main/org/h2/mvstore/MVMap.java @@ -1,73 +1,126 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; +import static org.h2.engine.Constants.MEMORY_POINTER; + import java.util.AbstractList; import java.util.AbstractMap; import java.util.AbstractSet; -import java.util.HashMap; +import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import org.h2.mvstore.type.DataType; import org.h2.mvstore.type.ObjectDataType; -import org.h2.util.New; +import org.h2.util.MemoryEstimator; /** * A stored map. *

    - * Read operations can happen concurrently with all other + * All read and write operations can happen concurrently with all other * operations, without risk of corruption. - *

    - * Write operations first read the relevant area from disk to memory - * concurrently, and only then modify the data. The in-memory part of write - * operations is synchronized. For scalable concurrent in-memory write - * operations, the map should be split into multiple smaller sub-maps that are - * then synchronized independently. * * @param the key class * @param the value class */ -public class MVMap extends AbstractMap - implements ConcurrentMap { +public class MVMap extends AbstractMap implements ConcurrentMap { /** * The store. */ - protected MVStore store; + public final MVStore store; /** - * The current root page (may not be null). + * Reference to the current root page. + */ + private final AtomicReference> root; + + private final int id; + private final long createVersion; + private final DataType keyType; + private final DataType valueType; + private final int keysPerPage; + private final boolean singleWriter; + private final K[] keysBuffer; + private final V[] valuesBuffer; + + private final Object lock = new Object(); + private volatile boolean notificationRequested; + + /** + * Whether the map is closed. Volatile so we don't accidentally write to a + * closed map in multithreaded mode. */ - protected volatile Page root; + private volatile boolean closed; + private boolean readOnly; + private boolean isVolatile; + private final AtomicLong avgKeySize; + private final AtomicLong avgValSize; /** - * The version used for writing. + * This designates the "last stored" version for a store which was + * just open for the first time. */ - protected volatile long writeVersion; + static final long INITIAL_VERSION = -1; - private int id; - private long createVersion; - private final DataType keyType; - private final DataType valueType; - private ConcurrentArrayList oldRoots = - new ConcurrentArrayList(); + protected MVMap(Map config, DataType keyType, DataType valueType) { + this((MVStore) config.get("store"), keyType, valueType, + DataUtils.readHexInt(config, "id", 0), + DataUtils.readHexLong(config, "createVersion", 0), + new AtomicReference<>(), + ((MVStore) config.get("store")).getKeysPerPage(), + config.containsKey("singleWriter") && (Boolean) config.get("singleWriter") + ); + setInitialRoot(createEmptyLeaf(), store.getCurrentVersion()); + } - private boolean closed; - private boolean readOnly; - private boolean isVolatile; + // constructor for cloneIt() + @SuppressWarnings("CopyConstructorMissesField") + protected MVMap(MVMap source) { + this(source.store, source.keyType, source.valueType, source.id, source.createVersion, + new AtomicReference<>(source.root.get()), source.keysPerPage, source.singleWriter); + } + + // meta map constructor + MVMap(MVStore store, int id, DataType keyType, DataType valueType) { + this(store, keyType, valueType, id, 0, new AtomicReference<>(), store.getKeysPerPage(), false); + setInitialRoot(createEmptyLeaf(), store.getCurrentVersion()); + } - protected MVMap(DataType keyType, DataType valueType) { + private MVMap(MVStore store, DataType keyType, DataType valueType, int id, long createVersion, + AtomicReference> root, int keysPerPage, boolean singleWriter) { + this.store = store; + this.id = id; + this.createVersion = createVersion; this.keyType = keyType; this.valueType = valueType; - this.root = Page.createEmpty(this, -1); + this.root = root; + this.keysPerPage = keysPerPage; + this.keysBuffer = singleWriter ? keyType.createStorage(keysPerPage) : null; + this.valuesBuffer = singleWriter ? valueType.createStorage(keysPerPage) : null; + this.singleWriter = singleWriter; + this.avgKeySize = keyType.isMemoryEstimationAllowed() ? new AtomicLong() : null; + this.avgValSize = valueType.isMemoryEstimationAllowed() ? new AtomicLong() : null; + + } + + /** + * Clone the current map. + * + * @return clone of this. + */ + protected MVMap cloneIt() { + return new MVMap<>(this); } /** @@ -77,7 +130,7 @@ protected MVMap(DataType keyType, DataType valueType) { * @return the metadata key */ static String getMapRootKey(int mapId) { - return "root." + Integer.toHexString(mapId); + return DataUtils.META_ROOT + Integer.toHexString(mapId); } /** @@ -87,20 +140,7 @@ static String getMapRootKey(int mapId) { * @return the metadata key */ static String getMapKey(int mapId) { - return "map." + Integer.toHexString(mapId); - } - - /** - * Open this map with the given store and configuration. - * - * @param store the store - * @param config the configuration - */ - protected void init(MVStore store, HashMap config) { - this.store = store; - this.id = DataUtils.readHexInt(config, "id", 0); - this.createVersion = DataUtils.readHexLong(config, "createVersion", 0); - this.writeVersion = store.getCurrentVersion(); + return DataUtils.META_MAP + Integer.toHexString(mapId); } /** @@ -111,101 +151,9 @@ protected void init(MVStore store, HashMap config) { * @return the old value if the key existed, or null otherwise */ @Override - @SuppressWarnings("unchecked") - public synchronized V put(K key, V value) { - DataUtils.checkArgument(value != null, "The value may not be null"); - beforeWrite(); - long v = writeVersion; - Page p = root.copy(v); - p = splitRootIfNeeded(p, v); - Object result = put(p, v, key, value); - newRoot(p); - return (V) result; - } - - /** - * Add or replace a key-value pair in a branch. - * - * @param root the root page - * @param key the key (may not be null) - * @param value the value (may not be null) - * @return the new root page - */ - synchronized Page putBranch(Page root, K key, V value) { + public V put(K key, V value) { DataUtils.checkArgument(value != null, "The value may not be null"); - long v = writeVersion; - Page p = root.copy(v); - p = splitRootIfNeeded(p, v); - put(p, v, key, value); - return p; - } - - /** - * Split the root page if necessary. - * - * @param p the page - * @param writeVersion the write version - * @return the new sibling - */ - protected Page splitRootIfNeeded(Page p, long writeVersion) { - if (p.getMemory() <= store.getPageSplitSize() || p.getKeyCount() <= 1) { - return p; - } - int at = p.getKeyCount() / 2; - long totalCount = p.getTotalCount(); - Object k = p.getKey(at); - Page split = p.split(at); - Object[] keys = { k }; - Page.PageReference[] children = { - new Page.PageReference(p, p.getPos(), p.getTotalCount()), - new Page.PageReference(split, split.getPos(), split.getTotalCount()), - }; - p = Page.create(this, writeVersion, - keys, null, - children, - totalCount, 0); - return p; - } - - /** - * Add or update a key-value pair. - * - * @param p the page - * @param writeVersion the write version - * @param key the key (may not be null) - * @param value the value (may not be null) - * @return the old value, or null - */ - protected Object put(Page p, long writeVersion, Object key, Object value) { - int index = p.binarySearch(key); - if (p.isLeaf()) { - if (index < 0) { - index = -index - 1; - p.insertLeaf(index, key, value); - return null; - } - return p.setValue(index, value); - } - // p is a node - if (index < 0) { - index = -index - 1; - } else { - index++; - } - Page c = p.getChildPage(index).copy(writeVersion); - if (c.getMemory() > store.getPageSplitSize() && c.getKeyCount() > 1) { - // split on the way down - int at = c.getKeyCount() / 2; - Object k = c.getKey(at); - Page split = c.split(at); - p.setChild(index, split); - p.insertNode(index, k, c); - // now we are not sure where to add - return put(p, writeVersion, key, value); - } - Object result = put(c, writeVersion, key, value); - p.setChild(index, c); - return result; + return operate(key, value, DecisionMaker.PUT); } /** @@ -213,7 +161,7 @@ protected Object put(Page p, long writeVersion, Object key, Object value) { * * @return the first key, or null */ - public K firstKey() { + public final K firstKey() { return getFirstLast(true); } @@ -222,7 +170,7 @@ public K firstKey() { * * @return the last key, or null */ - public K lastKey() { + public final K lastKey() { return getFirstLast(false); } @@ -234,19 +182,19 @@ public K lastKey() { * @param index the index * @return the key */ - @SuppressWarnings("unchecked") - public K getKey(long index) { - if (index < 0 || index >= size()) { + public final K getKey(long index) { + if (index < 0 || index >= sizeAsLong()) { return null; } - Page p = root; + Page p = getRootPage(); long offset = 0; while (true) { if (p.isLeaf()) { if (index >= offset + p.getKeyCount()) { return null; } - return (K) p.getKey((int) (index - offset)); + K key = p.getKey((int) (index - offset)); + return key; } int i = 0, size = getChildPageCount(p); for (; i < size; i++) { @@ -271,7 +219,7 @@ public K getKey(long index) { * * @return the key list */ - public List keyList() { + public final List keyList() { return new AbstractList() { @Override @@ -305,24 +253,22 @@ public int indexOf(Object key) { * @param key the key * @return the index */ - public long getKeyIndex(K key) { - if (size() == 0) { + public final long getKeyIndex(K key) { + Page p = getRootPage(); + if (p.getTotalCount() == 0) { return -1; } - Page p = root; long offset = 0; while (true) { int x = p.binarySearch(key); if (p.isLeaf()) { if (x < 0) { - return -offset + x; + offset = -offset; } return offset + x; } - if (x < 0) { - x = -x - 1; - } else { - x++; + if (x++ < 0) { + x = -x; } for (int i = 0; i < x; i++) { offset += p.getCounts(i); @@ -337,38 +283,53 @@ public long getKeyIndex(K key) { * @param first whether to retrieve the first key * @return the key, or null if the map is empty */ - @SuppressWarnings("unchecked") - protected K getFirstLast(boolean first) { - if (size() == 0) { + private K getFirstLast(boolean first) { + Page p = getRootPage(); + return getFirstLast(p, first); + } + + private K getFirstLast(Page p, boolean first) { + if (p.getTotalCount() == 0) { return null; } - Page p = root; while (true) { if (p.isLeaf()) { - return (K) p.getKey(first ? 0 : p.getKeyCount() - 1); + return p.getKey(first ? 0 : p.getKeyCount() - 1); } p = p.getChildPage(first ? 0 : getChildPageCount(p) - 1); } } /** - * Get the smallest key that is larger than the given key, or null if no - * such key exists. + * Get the smallest key that is larger than the given key (next key in ascending order), + * or null if no such key exists. * * @param key the key * @return the result */ - public K higherKey(K key) { + public final K higherKey(K key) { return getMinMax(key, false, true); } + /** + * Get the smallest key that is larger than the given key, for the given + * root page, or null if no such key exists. + * + * @param rootRef the root reference of the map + * @param key to start from + * @return the result + */ + public final K higherKey(RootReference rootRef, K key) { + return getMinMax(rootRef, key, false, true); + } + /** * Get the smallest key that is larger or equal to this key. * * @param key the key * @return the result */ - public K ceilingKey(K key) { + public final K ceilingKey(K key) { return getMinMax(key, false, false); } @@ -378,7 +339,7 @@ public K ceilingKey(K key) { * @param key the key * @return the result */ - public K floorKey(K key) { + public final K floorKey(K key) { return getMinMax(key, true, false); } @@ -389,10 +350,22 @@ public K floorKey(K key) { * @param key the key * @return the result */ - public K lowerKey(K key) { + public final K lowerKey(K key) { return getMinMax(key, true, true); } + /** + * Get the largest key that is smaller than the given key, for the given + * root page, or null if no such key exists. + * + * @param rootRef the root page + * @param key the key + * @return the result + */ + public final K lowerKey(RootReference rootRef, K key) { + return getMinMax(rootRef, key, true, true); + } + /** * Get the smallest or largest key using the given bounds. * @@ -401,14 +374,17 @@ public K lowerKey(K key) { * @param excluding if the given upper/lower bound is exclusive * @return the key, or null if no such key exists */ - protected K getMinMax(K key, boolean min, boolean excluding) { - return getMinMax(root, key, min, excluding); + private K getMinMax(K key, boolean min, boolean excluding) { + return getMinMax(flushAndGetRoot(), key, min, excluding); } - @SuppressWarnings("unchecked") - private K getMinMax(Page p, K key, boolean min, boolean excluding) { + private K getMinMax(RootReference rootRef, K key, boolean min, boolean excluding) { + return getMinMax(rootRef.root, key, min, excluding); + } + + private K getMinMax(Page p, K key, boolean min, boolean excluding) { + int x = p.binarySearch(key); if (p.isLeaf()) { - int x = p.binarySearch(key); if (x < 0) { x = -x - (min ? 2 : 1); } else if (excluding) { @@ -417,13 +393,10 @@ private K getMinMax(Page p, K key, boolean min, boolean excluding) { if (x < 0 || x >= p.getKeyCount()) { return null; } - return (K) p.getKey(x); + return p.getKey(x); } - int x = p.binarySearch(key); - if (x < 0) { - x = -x - 1; - } else { - x++; + if (x++ < 0) { + x = -x; } while (true) { if (x < 0 || x >= getChildPageCount(p)) { @@ -439,89 +412,94 @@ private K getMinMax(Page p, K key, boolean min, boolean excluding) { /** - * Get a value. + * Get the value for the given key, or null if not found. * * @param key the key * @return the value, or null if not found + * @throws ClassCastException if type of the specified key is not compatible with this map */ - @Override @SuppressWarnings("unchecked") - public V get(Object key) { - return (V) binarySearch(root, key); + @Override + public final V get(Object key) { + return get(getRootPage(), (K) key); } /** - * Get the value for the given key, or null if not found. + * Get the value for the given key from a snapshot, or null if not found. * - * @param p the page + * @param p the root of a snapshot * @param key the key - * @return the value or null + * @return the value, or null if not found + * @throws ClassCastException if type of the specified key is not compatible with this map */ - protected Object binarySearch(Page p, Object key) { - int x = p.binarySearch(key); - if (!p.isLeaf()) { - if (x < 0) { - x = -x - 1; - } else { - x++; - } - p = p.getChildPage(x); - return binarySearch(p, key); - } - if (x >= 0) { - return p.getValue(x); - } - return null; + public V get(Page p, K key) { + return Page.get(p, key); } @Override - public boolean containsKey(Object key) { + public final boolean containsKey(Object key) { return get(key) != null; } /** - * Get the value for the given key, or null if not found. - * - * @param p the parent page - * @param key the key - * @return the page or null + * Remove all entries. */ - protected Page binarySearchPage(Page p, Object key) { - int x = p.binarySearch(key); - if (!p.isLeaf()) { - if (x < 0) { - x = -x - 1; - } else { - x++; - } - p = p.getChildPage(x); - return binarySearchPage(p, key); - } - if (x >= 0) { - return p; - } - return null; + @Override + public void clear() { + clearIt(); } /** - * Remove all entries. + * Remove all entries and return the root reference. + * + * @return the new root reference */ - @Override - public synchronized void clear() { - beforeWrite(); - root.removeAllRecursive(); - newRoot(Page.createEmpty(this, writeVersion)); + RootReference clearIt() { + Page emptyRootPage = createEmptyLeaf(); + int attempt = 0; + while (true) { + RootReference rootReference = flushAndGetRoot(); + if (rootReference.getTotalCount() == 0) { + return rootReference; + } + boolean locked = rootReference.isLockedByCurrentThread(); + if (!locked) { + if (attempt++ == 0) { + beforeWrite(); + } else if (attempt > 3 || rootReference.isLocked()) { + rootReference = lockRoot(rootReference, attempt); + locked = true; + } + } + Page rootPage = rootReference.root; + long version = rootReference.version; + try { + if (!locked) { + rootReference = rootReference.updateRootPage(emptyRootPage, attempt); + if (rootReference == null) { + continue; + } + } + store.registerUnsavedMemory(rootPage.removeAllRecursive(version)); + rootPage = emptyRootPage; + return rootReference; + } finally { + if(locked) { + unlockRoot(rootPage); + } + } + } } /** * Close the map. Accessing the data is still possible (to allow concurrent * reads), but it is marked as closed. */ - void close() { + final void close() { closed = true; } - public boolean isClosed() { + public final boolean isClosed() { return closed; } @@ -530,26 +508,12 @@ public boolean isClosed() { * * @param key the key (may not be null) * @return the old value if the key existed, or null otherwise + * @throws ClassCastException if type of the specified key is not compatible with this map */ @Override @SuppressWarnings("unchecked") public V remove(Object key) { - beforeWrite(); - V result = get(key); - if (result == null) { - return null; - } - long v = writeVersion; - synchronized (this) { - Page p = root.copy(v); - result = (V) remove(p, v, key); - if (!p.isLeaf() && p.getTotalCount() == 0) { - p.removePage(); - p = Page.createEmpty(this, p.getVersion()); - } - newRoot(p); - } - return result; + return operate((K)key, null, DecisionMaker.REMOVE); } /** @@ -560,12 +524,8 @@ public V remove(Object key) { * @return the old value if the key existed, or null otherwise */ @Override - public synchronized V putIfAbsent(K key, V value) { - V old = get(key); - if (old == null) { - put(key, value); - } - return old; + public final V putIfAbsent(K key, V value) { + return operate(key, value, DecisionMaker.IF_ABSENT); } /** @@ -575,30 +535,27 @@ public synchronized V putIfAbsent(K key, V value) { * @param value the expected value * @return true if the item was removed */ + @SuppressWarnings("unchecked") @Override - public synchronized boolean remove(Object key, Object value) { - V old = get(key); - if (areValuesEqual(old, value)) { - remove(key); - return true; - } - return false; + public boolean remove(Object key, Object value) { + EqualsDecisionMaker decisionMaker = new EqualsDecisionMaker<>(valueType, (V)value); + operate((K)key, null, decisionMaker); + return decisionMaker.getDecision() != Decision.ABORT; } /** * Check whether the two values are equal. * + * @param type of values to compare + * * @param a the first value * @param b the second value + * @param datatype to use for comparison * @return true if they are equal */ - public boolean areValuesEqual(Object a, Object b) { - if (a == b) { - return true; - } else if (a == null || b == null) { - return false; - } - return valueType.compare(a, b) == 0; + static boolean areValuesEqual(DataType datatype, X a, X b) { + return a == b + || a != null && b != null && datatype.compare(a, b) == 0; } /** @@ -610,13 +567,12 @@ public boolean areValuesEqual(Object a, Object b) { * @return true if the value was replaced */ @Override - public synchronized boolean replace(K key, V oldValue, V newValue) { - V old = get(key); - if (areValuesEqual(old, oldValue)) { - put(key, newValue); - return true; - } - return false; + public final boolean replace(K key, V oldValue, V newValue) { + EqualsDecisionMaker decisionMaker = new EqualsDecisionMaker<>(valueType, oldValue); + V result = operate(key, newValue, decisionMaker); + boolean res = decisionMaker.getDecision() != Decision.ABORT; + assert !res || areValuesEqual(valueType, oldValue, result) : oldValue + " != " + result; + return res; } /** @@ -627,74 +583,8 @@ public synchronized boolean replace(K key, V oldValue, V newValue) { * @return the old value, if the value was replaced, or null */ @Override - public synchronized V replace(K key, V value) { - V old = get(key); - if (old != null) { - put(key, value); - return old; - } - return null; - } - - /** - * Remove a key-value pair. - * - * @param p the page (may not be null) - * @param writeVersion the write version - * @param key the key - * @return the old value, or null if the key did not exist - */ - protected Object remove(Page p, long writeVersion, Object key) { - int index = p.binarySearch(key); - Object result = null; - if (p.isLeaf()) { - if (index >= 0) { - result = p.getValue(index); - p.remove(index); - } - return result; - } - // node - if (index < 0) { - index = -index - 1; - } else { - index++; - } - Page cOld = p.getChildPage(index); - Page c = cOld.copy(writeVersion); - result = remove(c, writeVersion, key); - if (result == null || c.getTotalCount() != 0) { - // no change, or - // there are more nodes - p.setChild(index, c); - } else { - // this child was deleted - if (p.getKeyCount() == 0) { - p.setChild(index, c); - c.removePage(); - } else { - p.remove(index); - } - } - return result; - } - - /** - * Use the new root page from now on. - * - * @param newRoot the new root page - */ - protected void newRoot(Page newRoot) { - if (root != newRoot) { - removeUnusedOldVersions(); - if (root.getVersion() != newRoot.getVersion()) { - Page last = oldRoots.peekLast(); - if (last == null || last.getVersion() != root.getVersion()) { - oldRoots.add(root); - } - } - root = newRoot; - } + public final V replace(K key, V value) { + return operate(key, value, DecisionMaker.IF_PRESENT); } /** @@ -704,7 +594,8 @@ protected void newRoot(Page newRoot) { * @param b the second key * @return -1 if the first key is smaller, 1 if bigger, 0 if equal */ - int compare(Object a, Object b) { + @SuppressWarnings("unused") + final int compare(K a, K b) { return keyType.compare(a, b); } @@ -713,7 +604,7 @@ int compare(Object a, Object b) { * * @return the key type */ - public DataType getKeyType() { + public final DataType getKeyType() { return keyType; } @@ -722,29 +613,48 @@ public DataType getKeyType() { * * @return the value type */ - public DataType getValueType() { + public final DataType getValueType() { return valueType; } + boolean isSingleWriter() { + return singleWriter; + } + /** * Read a page. * * @param pos the position of the page * @return the page */ - Page readPage(long pos) { + final Page readPage(long pos) { return store.readPage(this, pos); } /** * Set the position of the root page. - * * @param rootPos the position, 0 for empty - * @param version the version of the root + * @param version to set for this map + * */ - void setRootPos(long rootPos, long version) { - root = rootPos == 0 ? Page.createEmpty(this, -1) : readPage(rootPos); - root.setVersion(version); + final void setRootPos(long rootPos, long version) { + Page root = readOrCreateRootPage(rootPos); + if (root.map != this) { + // this can only happen on concurrent opening of existing map, + // when second thread picks up some cached page already owned by + // the first map's instantiation (both maps share the same id) + assert id == root.map.id; + // since it is unknown which one will win the race, + // let each map instance to have it's own copy + root = root.copy(this, false); + } + setInitialRoot(root, version); + setWriteVersion(store.getCurrentVersion()); + } + + private Page readOrCreateRootPage(long rootPos) { + Page root = rootPos == 0 ? createEmptyLeaf() : readPage(rootPos); + return root; } /** @@ -753,120 +663,80 @@ void setRootPos(long rootPos, long version) { * @param from the first key to return * @return the iterator */ - public Iterator keyIterator(K from) { - return new Cursor(this, root, from); + public final Iterator keyIterator(K from) { + return cursor(from, null, false); } /** - * Re-write any pages that belong to one of the chunks in the given set. + * Iterate over a number of keys in reverse order * - * @param set the set of chunk ids - * @return whether rewriting was successful + * @param from the first key to return + * @return the iterator */ - boolean rewrite(Set set) { - // read from old version, to avoid concurrent reads - long previousVersion = store.getCurrentVersion() - 1; - if (previousVersion < createVersion) { - // a new map - return true; - } - MVMap readMap; - try { - readMap = openVersion(previousVersion); - } catch (IllegalArgumentException e) { - // unknown version: ok - // TODO should not rely on exception handling + public final Iterator keyIteratorReverse(K from) { + return cursor(from, null, true); + } + + final boolean rewritePage(long pagePos) { + Page p = readPage(pagePos); + if (p.getKeyCount()==0) { return true; } - try { - rewrite(readMap.root, set); - return true; - } catch (IllegalStateException e) { - // TODO should not rely on exception handling - if (DataUtils.getErrorCode(e.getMessage()) == DataUtils.ERROR_CHUNK_NOT_FOUND) { - // ignore - return false; - } - throw e; + assert p.isSaved(); + K key = p.getKey(0); + if (!isClosed()) { + RewriteDecisionMaker decisionMaker = new RewriteDecisionMaker<>(p.getPos()); + V result = operate(key, null, decisionMaker); + boolean res = decisionMaker.getDecision() != Decision.ABORT; + assert !res || result != null; + return res; } + return false; } - private int rewrite(Page p, Set set) { - if (p.isLeaf()) { - long pos = p.getPos(); - int chunkId = DataUtils.getPageChunkId(pos); - if (!set.contains(chunkId)) { - return 0; - } - if (p.getKeyCount() > 0) { - @SuppressWarnings("unchecked") - K key = (K) p.getKey(0); - V value = get(key); - if (value != null) { - replace(key, value, value); - } - } - return 1; - } - int writtenPageCount = 0; - for (int i = 0; i < getChildPageCount(p); i++) { - long childPos = p.getChildPagePos(i); - if (childPos != 0 && DataUtils.getPageType(childPos) == DataUtils.PAGE_TYPE_LEAF) { - // we would need to load the page, and it's a leaf: - // only do that if it's within the set of chunks we are - // interested in - int chunkId = DataUtils.getPageChunkId(childPos); - if (!set.contains(chunkId)) { - continue; - } - } - writtenPageCount += rewrite(p.getChildPage(i), set); - } - if (writtenPageCount == 0) { - long pos = p.getPos(); - int chunkId = DataUtils.getPageChunkId(pos); - if (set.contains(chunkId)) { - // an inner node page that is in one of the chunks, - // but only points to chunks that are not in the set: - // if no child was changed, we need to do that now - // (this is not needed if anyway one of the children - // was changed, as this would have updated this - // page as well) - Page p2 = p; - while (!p2.isLeaf()) { - p2 = p2.getChildPage(0); - } - @SuppressWarnings("unchecked") - K key = (K) p2.getKey(0); - V value = get(key); - if (value != null) { - replace(key, value, value); - } - writtenPageCount++; - } - } - return writtenPageCount; + /** + * Get a cursor to iterate over a number of keys and values in the latest version of this map. + * + * @param from the first key to return + * @return the cursor + */ + public final Cursor cursor(K from) { + return cursor(from, null, false); + } + + /** + * Get a cursor to iterate over a number of keys and values in the latest version of this map. + * + * @param from the first key to return + * @param to the last key to return + * @param reverse if true, iterate in reverse (descending) order + * @return the cursor + */ + public final Cursor cursor(K from, K to, boolean reverse) { + return cursor(flushAndGetRoot(), from, to, reverse); } /** * Get a cursor to iterate over a number of keys and values. * + * @param rootReference of this map's version to iterate over * @param from the first key to return + * @param to the last key to return + * @param reverse if true, iterate in reverse (descending) order * @return the cursor */ - public Cursor cursor(K from) { - return new Cursor(this, root, from); + public Cursor cursor(RootReference rootReference, K from, K to, boolean reverse) { + return new Cursor<>(rootReference, from, to, reverse); } @Override - public Set> entrySet() { - final MVMap map = this; - final Page root = this.root; + public final Set> entrySet() { + final RootReference rootReference = flushAndGetRoot(); return new AbstractSet>() { @Override public Iterator> iterator() { - final Cursor cursor = new Cursor(map, root, null); + final Cursor cursor = cursor(rootReference, null, null, false); return new Iterator>() { @Override @@ -877,13 +747,7 @@ public boolean hasNext() { @Override public Entry next() { K k = cursor.next(); - return new DataUtils.MapEntry(k, cursor.getValue()); - } - - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removing is not supported"); + return new SimpleImmutableEntry<>(k, cursor.getValue()); } }; @@ -905,13 +769,12 @@ public boolean contains(Object o) { @Override public Set keySet() { - final MVMap map = this; - final Page root = this.root; + final RootReference rootReference = flushAndGetRoot(); return new AbstractSet() { @Override public Iterator iterator() { - return new Cursor(map, root, null); + return cursor(rootReference, null, null, false); } @Override @@ -927,117 +790,175 @@ public boolean contains(Object o) { }; } - /** - * Get the root page. - * - * @return the root page - */ - public Page getRoot() { - return root; - } - /** * Get the map name. * * @return the name */ - public String getName() { + public final String getName() { return store.getMapName(id); } - public MVStore getStore() { + public final MVStore getStore() { return store; } + protected final boolean isPersistent() { + return store.getFileStore() != null && !isVolatile; + } + /** * Get the map id. Please note the map id may be different after compacting * a store. * * @return the map id */ - public int getId() { + public final int getId() { return id; } /** - * Rollback to the given version. + * The current root page (may not be null). * - * @param version the version + * @return the root page */ - void rollbackTo(long version) { - beforeWrite(); - if (version <= createVersion) { - // the map is removed later - } else if (root.getVersion() >= version) { - while (true) { - Page last = oldRoots.peekLast(); - if (last == null) { - break; - } - // slow, but rollback is not a common operation - oldRoots.removeLast(last); - root = last; - if (root.getVersion() < version) { - break; - } - } - } + public final Page getRootPage() { + return flushAndGetRoot().root; + } + + public RootReference getRoot() { + return root.get(); } /** - * Forget those old versions that are no longer needed. + * Get the root reference, flushing any current append buffer. + * + * @return current root reference */ - void removeUnusedOldVersions() { - long oldest = store.getOldestVersionToKeep(); - if (oldest == -1) { - return; - } - Page last = oldRoots.peekLast(); - while (true) { - Page p = oldRoots.peekFirst(); - if (p == null || p.getVersion() >= oldest || p == last) { - break; - } - oldRoots.removeFirst(p); + public RootReference flushAndGetRoot() { + RootReference rootReference = getRoot(); + if (singleWriter && rootReference.getAppendCounter() > 0) { + return flushAppendBuffer(rootReference, true); } + return rootReference; } - public boolean isReadOnly() { - return readOnly; + /** + * Set the initial root. + * + * @param rootPage root page + * @param version initial version + */ + final void setInitialRoot(Page rootPage, long version) { + root.set(new RootReference<>(rootPage, version)); } /** - * Set the volatile flag of the map. + * Compare and set the root reference. * - * @param isVolatile the volatile flag + * @param expectedRootReference the old (expected) + * @param updatedRootReference the new + * @return whether updating worked */ - public void setVolatile(boolean isVolatile) { - this.isVolatile = isVolatile; + final boolean compareAndSetRoot(RootReference expectedRootReference, + RootReference updatedRootReference) { + return root.compareAndSet(expectedRootReference, updatedRootReference); } /** - * Whether this is volatile map, meaning that changes - * are not persisted. By default (even if the store is not persisted), - * maps are not volatile. + * Rollback to the given version. * - * @return whether this map is volatile + * @param version the version */ - public boolean isVolatile() { - return isVolatile; + final void rollbackTo(long version) { + // check if the map was removed and re-created later ? + if (version > createVersion) { + rollbackRoot(version); + } } /** - * This method is called before writing to the map. The default - * implementation checks whether writing is allowed, and tries + * Roll the root back to the specified version. + * + * @param version to rollback to + * @return true if rollback was a success, false if there was not enough in-memory history + */ + boolean rollbackRoot(long version) { + RootReference rootReference = flushAndGetRoot(); + RootReference previous; + while (rootReference.version >= version && (previous = rootReference.previous) != null) { + if (root.compareAndSet(rootReference, previous)) { + rootReference = previous; + closed = false; + } + } + setWriteVersion(version); + return rootReference.version < version; + } + + /** + * Use the new root page from now on. + * + * @param the key class + * @param the value class + * @param expectedRootReference expected current root reference + * @param newRootPage the new root page + * @param attemptUpdateCounter how many attempt (including current) + * were made to update root + * @return new RootReference or null if update failed + */ + protected static boolean updateRoot(RootReference expectedRootReference, Page newRootPage, + int attemptUpdateCounter) { + return expectedRootReference.updateRootPage(newRootPage, attemptUpdateCounter) != null; + } + + /** + * Forget those old versions that are no longer needed. + * @param rootReference to inspect + */ + private void removeUnusedOldVersions(RootReference rootReference) { + rootReference.removeUnusedOldVersions(store.getOldestVersionToKeep()); + } + + public final boolean isReadOnly() { + return readOnly; + } + + /** + * Set the volatile flag of the map. + * + * @param isVolatile the volatile flag + */ + public final void setVolatile(boolean isVolatile) { + this.isVolatile = isVolatile; + } + + /** + * Whether this is volatile map, meaning that changes + * are not persisted. By default (even if the store is not persisted), + * maps are not volatile. + * + * @return whether this map is volatile + */ + public final boolean isVolatile() { + return isVolatile; + } + + /** + * This method is called before writing to the map. The default + * implementation checks whether writing is allowed, and tries * to detect concurrent modification. * * @throws UnsupportedOperationException if the map is read-only, * or if another thread is concurrently writing */ - protected void beforeWrite() { + protected final void beforeWrite() { + assert !getRoot().isLockedByCurrentThread() : getRoot(); if (closed) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_CLOSED, "This map is closed"); + int id = getId(); + String mapName = store.getMapName(id); + throw DataUtils.newMVStoreException( + DataUtils.ERROR_CLOSED, "Map {0}({1}) is closed. {2}", mapName, id, store.getPanicException()); } if (readOnly) { throw DataUtils.newUnsupportedOperationException( @@ -1047,23 +968,24 @@ protected void beforeWrite() { } @Override - public int hashCode() { + public final int hashCode() { return id; } @Override - public boolean equals(Object o) { + public final boolean equals(Object o) { return this == o; } /** - * Get the number of entries, as a integer. Integer.MAX_VALUE is returned if - * there are more than this entries. + * Get the number of entries, as a integer. {@link Integer#MAX_VALUE} is + * returned if there are more than this entries. * * @return the number of entries, as an integer + * @see #sizeAsLong() */ @Override - public int size() { + public final int size() { long size = sizeAsLong(); return size > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) size; } @@ -1073,37 +995,29 @@ public int size() { * * @return the number of entries */ - public long sizeAsLong() { - return root.getTotalCount(); + public final long sizeAsLong() { + return getRoot().getTotalCount(); } @Override public boolean isEmpty() { - // could also use (sizeAsLong() == 0) - return root.isLeaf() && root.getKeyCount() == 0; + return sizeAsLong() == 0; } - public long getCreateVersion() { + final long getCreateVersion() { return createVersion; } - /** - * Remove the given page (make the space available). - * - * @param pos the position of the page to remove - * @param memory the number of bytes used for this page - */ - protected void removePage(long pos, int memory) { - store.removePage(this, pos, memory); - } - /** * Open an old version for the given map. + * It will restore map at last known state of the version specified. + * (at the point right before the commit() call, which advanced map to the next version) + * Map is opened in read-only mode. * * @param version the version * @return the map */ - public MVMap openVersion(long version) { + public final MVMap openVersion(long version) { if (readOnly) { throw DataUtils.newUnsupportedOperationException( "This map is read-only; need to call " + @@ -1112,54 +1026,57 @@ public MVMap openVersion(long version) { DataUtils.checkArgument(version >= createVersion, "Unknown version {0}; this map was created in version is {1}", version, createVersion); - Page newest = null; - // need to copy because it can change - Page r = root; - if (version >= r.getVersion() && - (version == writeVersion || - r.getVersion() >= 0 || - version <= createVersion || - store.getFileStore() == null)) { - newest = r; - } else { - Page last = oldRoots.peekFirst(); - if (last == null || version < last.getVersion()) { - // smaller than all in-memory versions - return store.openMapVersion(version, id, this); - } - Iterator it = oldRoots.iterator(); - while (it.hasNext()) { - Page p = it.next(); - if (p.getVersion() > version) { - break; - } - last = p; - } - newest = last; + RootReference rootReference = flushAndGetRoot(); + removeUnusedOldVersions(rootReference); + RootReference previous; + while ((previous = rootReference.previous) != null && previous.version >= version) { + rootReference = previous; + } + if (previous == null && version < store.getOldestVersionToKeep()) { + throw DataUtils.newIllegalArgumentException("Unknown version {0}", version); } - MVMap m = openReadOnly(); - m.root = newest; + MVMap m = openReadOnly(rootReference.root, version); + assert m.getVersion() <= version : m.getVersion() + " <= " + version; return m; } /** * Open a copy of the map in read-only mode. * + * @param rootPos position of the root page + * @param version to open * @return the opened map */ - MVMap openReadOnly() { - MVMap m = new MVMap(keyType, valueType); + final MVMap openReadOnly(long rootPos, long version) { + Page root = readOrCreateRootPage(rootPos); + return openReadOnly(root, version); + } + + private MVMap openReadOnly(Page root, long version) { + MVMap m = cloneIt(); m.readOnly = true; - HashMap config = New.hashMap(); - config.put("id", id); - config.put("createVersion", createVersion); - m.init(store, config); - m.root = root; + m.setInitialRoot(root, version); return m; } - public long getVersion() { - return root.getVersion(); + /** + * Get version of the map, which is the version of the store, + * at the moment when map was modified last time. + * + * @return version + */ + public final long getVersion() { + return getRoot().getVersion(); + } + + /** + * Does the root have changes since the specified version? + * + * @param version root version + * @return true if has changes + */ + final boolean hasChangesSince(long version) { + return getRoot().hasChangesSince(version, isPersistent()); } /** @@ -1170,7 +1087,7 @@ public long getVersion() { * @param p the page * @return the number of direct children */ - protected int getChildPageCount(Page p) { + protected int getChildPageCount(Page p) { return p.getRawChildPageCount(); } @@ -1189,7 +1106,7 @@ public String getType() { * @param name the map name (or null) * @return the string */ - String asString(String name) { + protected String asString(String name) { StringBuilder buff = new StringBuilder(); if (name != null) { DataUtils.appendMap(buff, "name", name); @@ -1204,8 +1121,58 @@ String asString(String name) { return buff.toString(); } - void setWriteVersion(long writeVersion) { - this.writeVersion = writeVersion; + final RootReference setWriteVersion(long writeVersion) { + int attempt = 0; + while(true) { + RootReference rootReference = flushAndGetRoot(); + if(rootReference.version >= writeVersion) { + return rootReference; + } else if (isClosed()) { + // map was closed a while back and can not possibly be in use by now + // it's time to remove it completely from the store (it was anonymous already) + if (rootReference.getVersion() + 1 < store.getOldestVersionToKeep()) { + store.deregisterMapRoot(id); + return null; + } + } + + RootReference lockedRootReference = null; + if (++attempt > 3 || rootReference.isLocked()) { + lockedRootReference = lockRoot(rootReference, attempt); + rootReference = flushAndGetRoot(); + } + + try { + rootReference = rootReference.tryUnlockAndUpdateVersion(writeVersion, attempt); + if (rootReference != null) { + lockedRootReference = null; + removeUnusedOldVersions(rootReference); + return rootReference; + } + } finally { + if (lockedRootReference != null) { + unlockRoot(); + } + } + } + } + + /** + * Create empty leaf node page. + * + * @return new page + */ + protected Page createEmptyLeaf() { + return Page.createEmptyLeaf(this); + } + + /** + * Create empty internal node page. + * + * @return new page + */ + protected Page createEmptyNode() { + return Page.createEmptyNode(this); } /** @@ -1213,48 +1180,262 @@ void setWriteVersion(long writeVersion) { * * @param sourceMap the source map */ - void copyFrom(MVMap sourceMap) { - beforeWrite(); - newRoot(copy(sourceMap.root, null)); - } - - private Page copy(Page source, CursorPos parent) { - Page target = Page.create(this, writeVersion, source); - if (source.isLeaf()) { - Page child = target; - for (CursorPos p = parent; p != null; p = p.parent) { - p.page.setChild(p.index, child); - p.page = p.page.copy(writeVersion); - child = p.page; - if (p.parent == null) { - newRoot(p.page); - beforeWrite(); - } - } + final void copyFrom(MVMap sourceMap) { + MVStore.TxCounter txCounter = store.registerVersionUsage(); + try { + beforeWrite(); + copy(sourceMap.getRootPage(), null, 0); + } finally { + store.deregisterVersionUsage(txCounter); + } + } + + private void copy(Page source, Page parent, int index) { + Page target = source.copy(this, true); + if (parent == null) { + setInitialRoot(target, INITIAL_VERSION); } else { - // temporarily, replace child pages with empty pages, - // to ensure there are no links to the old store - for (int i = 0; i < getChildPageCount(target); i++) { - target.setChild(i, null); - } - CursorPos pos = new CursorPos(target, 0, parent); + parent.setChild(index, target); + } + if (!source.isLeaf()) { for (int i = 0; i < getChildPageCount(target); i++) { - pos.index = i; - long p = source.getChildPagePos(i); - if (p != 0) { - // p == 0 means no child + if (source.getChildPagePos(i) != 0) { + // position 0 means no child // (for example the last entry of an r-tree node) // (the MVMap is also used for r-trees for compacting) - copy(source.getChildPage(i), pos); + copy(source.getChildPage(i), target, i); + } + } + target.setComplete(); + } + store.registerUnsavedMemory(target.getMemory()); + if (store.isSaveNeeded()) { + store.commit(); + } + } + + /** + * If map was used in append mode, this method will ensure that append buffer + * is flushed - emptied with all entries inserted into map as a new leaf. + * @param rootReference current RootReference + * @param fullFlush whether buffer should be completely flushed, + * otherwise just a single empty slot is required + * @return potentially updated RootReference + */ + private RootReference flushAppendBuffer(RootReference rootReference, boolean fullFlush) { + boolean preLocked = rootReference.isLockedByCurrentThread(); + boolean locked = preLocked; + int keysPerPage = store.getKeysPerPage(); + try { + IntValueHolder unsavedMemoryHolder = new IntValueHolder(); + int attempt = 0; + int keyCount; + int availabilityThreshold = fullFlush ? 0 : keysPerPage - 1; + while ((keyCount = rootReference.getAppendCounter()) > availabilityThreshold) { + if (!locked) { + // instead of just calling lockRoot() we loop here and check if someone else + // already flushed the buffer, then we don't need a lock + rootReference = tryLock(rootReference, ++attempt); + if (rootReference == null) { + rootReference = getRoot(); + continue; + } + locked = true; + } + + Page rootPage = rootReference.root; + long version = rootReference.version; + CursorPos pos = rootPage.getAppendCursorPos(null); + assert pos != null; + assert pos.index < 0 : pos.index; + int index = -pos.index - 1; + assert index == pos.page.getKeyCount() : index + " != " + pos.page.getKeyCount(); + Page p = pos.page; + CursorPos tip = pos; + pos = pos.parent; + + int remainingBuffer = 0; + Page page = null; + int available = keysPerPage - p.getKeyCount(); + if (available > 0) { + p = p.copy(); + if (keyCount <= available) { + p.expand(keyCount, keysBuffer, valuesBuffer); + } else { + p.expand(available, keysBuffer, valuesBuffer); + keyCount -= available; + if (fullFlush) { + K[] keys = p.createKeyStorage(keyCount); + V[] values = p.createValueStorage(keyCount); + System.arraycopy(keysBuffer, available, keys, 0, keyCount); + if (valuesBuffer != null) { + System.arraycopy(valuesBuffer, available, values, 0, keyCount); + } + page = Page.createLeaf(this, keys, values, 0); + } else { + System.arraycopy(keysBuffer, available, keysBuffer, 0, keyCount); + if (valuesBuffer != null) { + System.arraycopy(valuesBuffer, available, valuesBuffer, 0, keyCount); + } + remainingBuffer = keyCount; + } + } + } else { + tip = tip.parent; + page = Page.createLeaf(this, + Arrays.copyOf(keysBuffer, keyCount), + valuesBuffer == null ? null : Arrays.copyOf(valuesBuffer, keyCount), + 0); } + + unsavedMemoryHolder.value = 0; + if (page != null) { + assert page.map == this; + assert page.getKeyCount() > 0; + K key = page.getKey(0); + unsavedMemoryHolder.value += page.getMemory(); + while (true) { + if (pos == null) { + if (p.getKeyCount() == 0) { + p = page; + } else { + K[] keys = p.createKeyStorage(1); + keys[0] = key; + Page.PageReference[] children = Page.createRefStorage(2); + children[0] = new Page.PageReference<>(p); + children[1] = new Page.PageReference<>(page); + unsavedMemoryHolder.value += p.getMemory(); + p = Page.createNode(this, keys, children, p.getTotalCount() + page.getTotalCount(), 0); + } + break; + } + Page c = p; + p = pos.page; + index = pos.index; + pos = pos.parent; + p = p.copy(); + p.setChild(index, page); + p.insertNode(index, key, c); + keyCount = p.getKeyCount(); + int at = keyCount - (p.isLeaf() ? 1 : 2); + if (keyCount <= keysPerPage && + (p.getMemory() < store.getMaxPageSize() || at <= 0)) { + break; + } + key = p.getKey(at); + page = p.split(at); + unsavedMemoryHolder.value += p.getMemory() + page.getMemory(); + } + } + p = replacePage(pos, p, unsavedMemoryHolder); + rootReference = rootReference.updatePageAndLockedStatus(p, preLocked || isPersistent(), + remainingBuffer); + if (rootReference != null) { + // should always be the case, except for spurious failure? + locked = preLocked || isPersistent(); + if (isPersistent() && tip != null) { + store.registerUnsavedMemory(unsavedMemoryHolder.value + tip.processRemovalInfo(version)); + } + assert rootReference.getAppendCounter() <= availabilityThreshold; + break; + } + rootReference = getRoot(); } - target = pos.page; + } finally { + if (locked && !preLocked) { + rootReference = unlockRoot(); + } + } + return rootReference; + } + + private static Page replacePage(CursorPos path, Page replacement, + IntValueHolder unsavedMemoryHolder) { + int unsavedMemory = replacement.isSaved() ? 0 : replacement.getMemory(); + while (path != null) { + Page parent = path.page; + // condition below should always be true, but older versions (up to 1.4.197) + // may create single-childed (with no keys) internal nodes, which we skip here + if (parent.getKeyCount() > 0) { + Page child = replacement; + replacement = parent.copy(); + replacement.setChild(path.index, child); + unsavedMemory += replacement.getMemory(); + } + path = path.parent; + } + unsavedMemoryHolder.value += unsavedMemory; + return replacement; + } + + /** + * Appends entry to this map. this method is NOT thread safe and can not be used + * neither concurrently, nor in combination with any method that updates this map. + * Non-updating method may be used concurrently, but latest appended values + * are not guaranteed to be visible. + * @param key should be higher in map's order than any existing key + * @param value to be appended + */ + public void append(K key, V value) { + if (singleWriter) { + beforeWrite(); + RootReference rootReference = lockRoot(getRoot(), 1); + int appendCounter = rootReference.getAppendCounter(); + try { + if (appendCounter >= keysPerPage) { + rootReference = flushAppendBuffer(rootReference, false); + appendCounter = rootReference.getAppendCounter(); + assert appendCounter < keysPerPage; + } + keysBuffer[appendCounter] = key; + if (valuesBuffer != null) { + valuesBuffer[appendCounter] = value; + } + ++appendCounter; + } finally { + unlockRoot(appendCounter); + } + } else { + put(key, value); + } + } + + /** + * Removes last entry from this map. this method is NOT thread safe and can not be used + * neither concurrently, nor in combination with any method that updates this map. + * Non-updating method may be used concurrently, but latest removal may not be visible. + */ + public void trimLast() { + if (singleWriter) { + RootReference rootReference = getRoot(); + int appendCounter = rootReference.getAppendCounter(); + boolean useRegularRemove = appendCounter == 0; + if (!useRegularRemove) { + rootReference = lockRoot(rootReference, 1); + try { + appendCounter = rootReference.getAppendCounter(); + useRegularRemove = appendCounter == 0; + if (!useRegularRemove) { + --appendCounter; + } + } finally { + unlockRoot(appendCounter); + } + } + if (useRegularRemove) { + Page lastLeaf = rootReference.root.getAppendCursorPos(null).page; + assert lastLeaf.isLeaf(); + assert lastLeaf.getKeyCount() > 0; + Object key = lastLeaf.getKey(lastLeaf.getKeyCount() - 1); + remove(key); + } + } else { + remove(lastKey()); } - return target; } @Override - public String toString() { + public final String toString() { return asString(null); } @@ -1269,10 +1450,20 @@ public interface MapBuilder, K, V> { /** * Create a new map of the given type. + * @param store which will own this map + * @param config configuration * * @return the map */ - M create(); + M create(MVStore store, Map config); + + DataType getKeyType(); + + DataType getValueType(); + + void setKeyType(DataType dataType); + + void setValueType(DataType dataType); } @@ -1282,59 +1473,649 @@ public interface MapBuilder, K, V> { * @param the key type * @param the value type */ - public static class Builder implements MapBuilder, K, V> { + public abstract static class BasicBuilder, K, V> implements MapBuilder { - protected DataType keyType; - protected DataType valueType; + private DataType keyType; + private DataType valueType; /** * Create a new builder with the default key and value data types. */ - public Builder() { + protected BasicBuilder() { // ignore } + @Override + public DataType getKeyType() { + return keyType; + } + + @Override + public DataType getValueType() { + return valueType; + } + + @SuppressWarnings("unchecked") + @Override + public void setKeyType(DataType keyType) { + this.keyType = (DataType)keyType; + } + + @SuppressWarnings("unchecked") + @Override + public void setValueType(DataType valueType) { + this.valueType = (DataType)valueType; + } + /** * Set the key data type. * * @param keyType the key type * @return this */ - public Builder keyType(DataType keyType) { - this.keyType = keyType; + public BasicBuilder keyType(DataType keyType) { + setKeyType(keyType); return this; } - public DataType getKeyType() { - return keyType; - } - - public DataType getValueType() { - return valueType; - } - /** * Set the value data type. * * @param valueType the value type * @return this */ - public Builder valueType(DataType valueType) { - this.valueType = valueType; + public BasicBuilder valueType(DataType valueType) { + setValueType(valueType); + return this; + } + + @Override + public M create(MVStore store, Map config) { + if (getKeyType() == null) { + setKeyType(new ObjectDataType()); + } + if (getValueType() == null) { + setValueType(new ObjectDataType()); + } + DataType keyType = getKeyType(); + DataType valueType = getValueType(); + config.put("store", store); + config.put("key", keyType); + config.put("val", valueType); + return create(config); + } + + /** + * Create map from config. + * @param config config map + * @return new map + */ + protected abstract M create(Map config); + + } + + /** + * A builder for this class. + * + * @param the key type + * @param the value type + */ + public static class Builder extends BasicBuilder, K, V> { + private boolean singleWriter; + + public Builder() {} + + @Override + public Builder keyType(DataType dataType) { + setKeyType(dataType); + return this; + } + + @Override + public Builder valueType(DataType dataType) { + setValueType(dataType); + return this; + } + + /** + * Set up this Builder to produce MVMap, which can be used in append mode + * by a single thread. + * @see MVMap#append(Object, Object) + * @return this Builder for chained execution + */ + public Builder singleWriter() { + singleWriter = true; return this; } @Override - public MVMap create() { - if (keyType == null) { - keyType = new ObjectDataType(); + protected MVMap create(Map config) { + config.put("singleWriter", singleWriter); + Object type = config.get("type"); + if(type == null || type.equals("rtree")) { + return new MVMap<>(config, getKeyType(), getValueType()); + } + throw new IllegalArgumentException("Incompatible map type"); + } + } + + /** + * The decision on what to do on an update. + */ + public enum Decision { ABORT, REMOVE, PUT, REPEAT } + + /** + * Class DecisionMaker provides callback interface (and should become a such in Java 8) + * for MVMap.operate method. + * It provides control logic to make a decision about how to proceed with update + * at the point in execution when proper place and possible existing value + * for insert/update/delete key is found. + * Revised value for insert/update is also provided based on original input value + * and value currently existing in the map. + * + * @param value type of the map + */ + public abstract static class DecisionMaker { + /** + * Decision maker for transaction rollback. + */ + public static final DecisionMaker DEFAULT = new DecisionMaker() { + @Override + public Decision decide(Object existingValue, Object providedValue) { + return providedValue == null ? Decision.REMOVE : Decision.PUT; + } + + @Override + public String toString() { + return "default"; + } + }; + + /** + * Decision maker for put(). + */ + public static final DecisionMaker PUT = new DecisionMaker() { + @Override + public Decision decide(Object existingValue, Object providedValue) { + return Decision.PUT; + } + + @Override + public String toString() { + return "put"; + } + }; + + /** + * Decision maker for remove(). + */ + public static final DecisionMaker REMOVE = new DecisionMaker() { + @Override + public Decision decide(Object existingValue, Object providedValue) { + return Decision.REMOVE; + } + + @Override + public String toString() { + return "remove"; + } + }; + + /** + * Decision maker for putIfAbsent() key/value. + */ + static final DecisionMaker IF_ABSENT = new DecisionMaker() { + @Override + public Decision decide(Object existingValue, Object providedValue) { + return existingValue == null ? Decision.PUT : Decision.ABORT; + } + + @Override + public String toString() { + return "if_absent"; + } + }; + + /** + * Decision maker for replace(). + */ + static final DecisionMaker IF_PRESENT= new DecisionMaker() { + @Override + public Decision decide(Object existingValue, Object providedValue) { + return existingValue != null ? Decision.PUT : Decision.ABORT; + } + + @Override + public String toString() { + return "if_present"; + } + }; + + /** + * Makes a decision about how to proceed with the update. + * + * @param existingValue the old value + * @param providedValue the new value + * @param tip the cursor position + * @return the decision + */ + public Decision decide(V existingValue, V providedValue, CursorPos tip) { + return decide(existingValue, providedValue); + } + + /** + * Makes a decision about how to proceed with the update. + * @param existingValue value currently exists in the map + * @param providedValue original input value + * @return PUT if a new value need to replace existing one or + * a new value to be inserted if there is none + * REMOVE if existing value should be deleted + * ABORT if update operation should be aborted or repeated later + * REPEAT if update operation should be repeated immediately + */ + public abstract Decision decide(V existingValue, V providedValue); + + /** + * Provides revised value for insert/update based on original input value + * and value currently existing in the map. + * This method is only invoked after call to decide(), if it returns PUT. + * @param existingValue value currently exists in the map + * @param providedValue original input value + * @param value type + * @return value to be used by insert/update + */ + public T selectValue(T existingValue, T providedValue) { + return providedValue; + } + + /** + * Resets internal state (if any) of a this DecisionMaker to it's initial state. + * This method is invoked whenever concurrent update failure is encountered, + * so we can re-start update process. + */ + public void reset() {} + } + + /** + * Add, replace or remove a key-value pair. + * + * @param key the key (may not be null) + * @param value new value, it may be null when removal is intended + * @param decisionMaker command object to make choices during transaction. + * @return previous value, if mapping for that key existed, or null otherwise + */ + public V operate(K key, V value, DecisionMaker decisionMaker) { + IntValueHolder unsavedMemoryHolder = new IntValueHolder(); + int attempt = 0; + while(true) { + RootReference rootReference = flushAndGetRoot(); + boolean locked = rootReference.isLockedByCurrentThread(); + if (!locked) { + if (attempt++ == 0) { + beforeWrite(); + } + if (attempt > 3 || rootReference.isLocked()) { + rootReference = lockRoot(rootReference, attempt); + locked = true; + } + } + Page rootPage = rootReference.root; + long version = rootReference.version; + CursorPos tip; + V result; + unsavedMemoryHolder.value = 0; + try { + CursorPos pos = CursorPos.traverseDown(rootPage, key); + if(!locked && rootReference != getRoot()) { + continue; + } + Page p = pos.page; + int index = pos.index; + tip = pos; + pos = pos.parent; + result = index < 0 ? null : p.getValue(index); + Decision decision = decisionMaker.decide(result, value, tip); + + switch (decision) { + case REPEAT: + decisionMaker.reset(); + continue; + case ABORT: + if(!locked && rootReference != getRoot()) { + decisionMaker.reset(); + continue; + } + return result; + case REMOVE: { + if (index < 0) { + if(!locked && rootReference != getRoot()) { + decisionMaker.reset(); + continue; + } + return null; + } + + if (p.getTotalCount() == 1 && pos != null) { + int keyCount; + do { + p = pos.page; + index = pos.index; + pos = pos.parent; + keyCount = p.getKeyCount(); + // condition below should always be false, but older + // versions (up to 1.4.197) may create + // single-childed (with no keys) internal nodes, + // which we skip here + } while (keyCount == 0 && pos != null); + + if (keyCount <= 1) { + if (keyCount == 1) { + assert index <= 1; + p = p.getChildPage(1 - index); + } else { + // if root happens to be such single-childed + // (with no keys) internal node, then just + // replace it with empty leaf + p = Page.createEmptyLeaf(this); + } + break; + } + } + p = p.copy(); + p.remove(index); + break; + } + case PUT: { + value = decisionMaker.selectValue(result, value); + p = p.copy(); + if (index < 0) { + p.insertLeaf(-index - 1, key, value); + int keyCount; + while ((keyCount = p.getKeyCount()) > store.getKeysPerPage() + || p.getMemory() > store.getMaxPageSize() + && keyCount > (p.isLeaf() ? 1 : 2)) { + long totalCount = p.getTotalCount(); + int at = keyCount >> 1; + K k = p.getKey(at); + Page split = p.split(at); + unsavedMemoryHolder.value += p.getMemory() + split.getMemory(); + if (pos == null) { + K[] keys = p.createKeyStorage(1); + keys[0] = k; + Page.PageReference[] children = Page.createRefStorage(2); + children[0] = new Page.PageReference<>(p); + children[1] = new Page.PageReference<>(split); + p = Page.createNode(this, keys, children, totalCount, 0); + break; + } + Page c = p; + p = pos.page; + index = pos.index; + pos = pos.parent; + p = p.copy(); + p.setChild(index, split); + p.insertNode(index, k, c); + } + } else { + p.setValue(index, value); + } + break; + } + } + rootPage = replacePage(pos, p, unsavedMemoryHolder); + if (!locked) { + rootReference = rootReference.updateRootPage(rootPage, attempt); + if (rootReference == null) { + decisionMaker.reset(); + continue; + } + } + store.registerUnsavedMemory(unsavedMemoryHolder.value + tip.processRemovalInfo(version)); + return result; + } finally { + if(locked) { + unlockRoot(rootPage); + } + } + } + } + + private RootReference lockRoot(RootReference rootReference, int attempt) { + while(true) { + RootReference lockedRootReference = tryLock(rootReference, attempt++); + if (lockedRootReference != null) { + return lockedRootReference; } - if (valueType == null) { - valueType = new ObjectDataType(); + rootReference = getRoot(); + } + } + + /** + * Try to lock the root. + * + * @param rootReference the old root reference + * @param attempt the number of attempts so far + * @return the new root reference + */ + protected RootReference tryLock(RootReference rootReference, int attempt) { + RootReference lockedRootReference = rootReference.tryLock(attempt); + if (lockedRootReference != null) { + return lockedRootReference; + } + assert !rootReference.isLockedByCurrentThread() : rootReference; + RootReference oldRootReference = rootReference.previous; + int contention = 1; + if (oldRootReference != null) { + long updateAttemptCounter = rootReference.updateAttemptCounter - + oldRootReference.updateAttemptCounter; + assert updateAttemptCounter >= 0 : updateAttemptCounter; + long updateCounter = rootReference.updateCounter - oldRootReference.updateCounter; + assert updateCounter >= 0 : updateCounter; + assert updateAttemptCounter >= updateCounter : updateAttemptCounter + " >= " + updateCounter; + contention += (int)((updateAttemptCounter+1) / (updateCounter+1)); + } + + if(attempt > 4) { + if (attempt <= 12) { + Thread.yield(); + } else if (attempt <= 70 - 2 * contention) { + try { + Thread.sleep(contention); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + } else { + synchronized (lock) { + notificationRequested = true; + try { + lock.wait(5); + } catch (InterruptedException ignore) { + } + } } - return new MVMap(keyType, valueType); } + return null; + } + + /** + * Unlock the root page, the new root being null. + * + * @return the new root reference (never null) + */ + private RootReference unlockRoot() { + return unlockRoot(null, -1); + } + /** + * Unlock the root page. + * + * @param newRootPage the new root + * @return the new root reference (never null) + */ + protected RootReference unlockRoot(Page newRootPage) { + return unlockRoot(newRootPage, -1); + } + + private void unlockRoot(int appendCounter) { + unlockRoot(null, appendCounter); + } + + private RootReference unlockRoot(Page newRootPage, int appendCounter) { + RootReference updatedRootReference; + do { + RootReference rootReference = getRoot(); + assert rootReference.isLockedByCurrentThread(); + updatedRootReference = rootReference.updatePageAndLockedStatus( + newRootPage == null ? rootReference.root : newRootPage, + false, + appendCounter == -1 ? rootReference.getAppendCounter() : appendCounter + ); + } while(updatedRootReference == null); + + notifyWaiters(); + return updatedRootReference; + } + + private void notifyWaiters() { + if (notificationRequested) { + synchronized (lock) { + notificationRequested = false; + lock.notify(); + } + } + } + + final boolean isMemoryEstimationAllowed() { + return avgKeySize != null || avgValSize != null; + } + + final int evaluateMemoryForKeys(K[] storage, int count) { + if (avgKeySize == null) { + return calculateMemory(keyType, storage, count); + } + return MemoryEstimator.estimateMemory(avgKeySize, keyType, storage, count); + } + + final int evaluateMemoryForValues(V[] storage, int count) { + if (avgValSize == null) { + return calculateMemory(valueType, storage, count); + } + return MemoryEstimator.estimateMemory(avgValSize, valueType, storage, count); + } + + private static int calculateMemory(DataType keyType, T[] storage, int count) { + int mem = count * MEMORY_POINTER; + for (int i = 0; i < count; i++) { + mem += keyType.getMemory(storage[i]); + } + return mem; + } + + final int evaluateMemoryForKey(K key) { + if (avgKeySize == null) { + return keyType.getMemory(key); + } + return MemoryEstimator.estimateMemory(avgKeySize, keyType, key); } + final int evaluateMemoryForValue(V value) { + if (avgValSize == null) { + return valueType.getMemory(value); + } + return MemoryEstimator.estimateMemory(avgValSize, valueType, value); + } + + static int samplingPct(AtomicLong stats) { + return MemoryEstimator.samplingPct(stats); + } + + private static final class EqualsDecisionMaker extends DecisionMaker { + private final DataType dataType; + private final V expectedValue; + private Decision decision; + + EqualsDecisionMaker(DataType dataType, V expectedValue) { + this.dataType = dataType; + this.expectedValue = expectedValue; + } + + @Override + public Decision decide(V existingValue, V providedValue) { + assert decision == null; + decision = !areValuesEqual(dataType, expectedValue, existingValue) ? Decision.ABORT : + providedValue == null ? Decision.REMOVE : Decision.PUT; + return decision; + } + + @Override + public void reset() { + decision = null; + } + + Decision getDecision() { + return decision; + } + + @Override + public String toString() { + return "equals_to "+expectedValue; + } + } + + private static final class RewriteDecisionMaker extends DecisionMaker { + private final long pagePos; + private Decision decision; + + RewriteDecisionMaker(long pagePos) { + this.pagePos = pagePos; + } + + @Override + public Decision decide(V existingValue, V providedValue, CursorPos tip) { + assert decision == null; + decision = Decision.ABORT; + if(!DataUtils.isLeafPosition(pagePos)) { + while ((tip = tip.parent) != null) { + if (tip.page.getPos() == pagePos) { + decision = decide(existingValue, providedValue); + break; + } + } + } else if (tip.page.getPos() == pagePos) { + decision = decide(existingValue, providedValue); + } + return decision; + } + + @Override + public Decision decide(V existingValue, V providedValue) { + decision = existingValue == null ? Decision.ABORT : Decision.PUT; + return decision; + } + + @Override + public T selectValue(T existingValue, T providedValue) { + return existingValue; + } + + @Override + public void reset() { + decision = null; + } + + Decision getDecision() { + return decision; + } + + @Override + public String toString() { + return "rewrite"; + } + } + + private static final class IntValueHolder { + int value; + + IntValueHolder() {} + } } diff --git a/h2/src/main/org/h2/mvstore/MVMapConcurrent.java b/h2/src/main/org/h2/mvstore/MVMapConcurrent.java deleted file mode 100644 index 5f6842b6d2..0000000000 --- a/h2/src/main/org/h2/mvstore/MVMapConcurrent.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.mvstore; - -import org.h2.mvstore.type.DataType; -import org.h2.mvstore.type.ObjectDataType; - -/** - * A class used for backward compatibility. - * - * @param the key type - * @param the value type - */ -public class MVMapConcurrent extends MVMap { - - public MVMapConcurrent(DataType keyType, DataType valueType) { - super(keyType, valueType); - } - - /** - * A builder for this class. - * - * @param the key type - * @param the value type - */ - public static class Builder implements - MapBuilder, K, V> { - - protected DataType keyType; - protected DataType valueType; - - /** - * Create a new builder with the default key and value data types. - */ - public Builder() { - // ignore - } - - /** - * Set the key data type. - * - * @param keyType the key type - * @return this - */ - public Builder keyType(DataType keyType) { - this.keyType = keyType; - return this; - } - - /** - * Set the key data type. - * - * @param valueType the key type - * @return this - */ - public Builder valueType(DataType valueType) { - this.valueType = valueType; - return this; - } - - @Override - public MVMapConcurrent create() { - if (keyType == null) { - keyType = new ObjectDataType(); - } - if (valueType == null) { - valueType = new ObjectDataType(); - } - return new MVMapConcurrent(keyType, valueType); - } - - } - -} diff --git a/h2/src/main/org/h2/mvstore/MVStore.java b/h2/src/main/org/h2/mvstore/MVStore.java index ba7870ee79..46daadd11e 100644 --- a/h2/src/main/org/h2/mvstore/MVStore.java +++ b/h2/src/main/org/h2/mvstore/MVStore.java @@ -1,32 +1,53 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; +import static org.h2.mvstore.MVMap.INITIAL_VERSION; import java.lang.Thread.UncaughtExceptionHandler; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.Collections; import java.util.Comparator; +import java.util.Deque; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; import java.util.Map; -import java.util.Map.Entry; +import java.util.PriorityQueue; +import java.util.Queue; import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ConcurrentHashMap; - +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.PriorityBlockingQueue; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Predicate; +import java.util.function.Supplier; import org.h2.compress.CompressDeflate; import org.h2.compress.CompressLZF; import org.h2.compress.Compressor; import org.h2.mvstore.cache.CacheLongKeyLIRS; import org.h2.mvstore.type.StringDataType; -import org.h2.mvstore.Page.PageChildren; import org.h2.util.MathUtils; -import org.h2.util.New; +import org.h2.util.Utils; /* @@ -112,21 +133,31 @@ to a map (possibly the metadata map) - a map lookup when reading old data; also, this old data map needs to be cleaned up somehow; maybe using an additional timeout -- rollback of removeMap should restore the data - - which has big consequences, as the metadata map - would probably need references to the root nodes of all maps - */ /** * A persistent storage for maps. */ -public class MVStore { +public class MVStore implements AutoCloseable { + + // The following are attribute names (keys) in store header map + private static final String HDR_H = "H"; + private static final String HDR_BLOCK_SIZE = "blockSize"; + private static final String HDR_FORMAT = "format"; + private static final String HDR_CREATED = "created"; + private static final String HDR_FORMAT_READ = "formatRead"; + private static final String HDR_CHUNK = "chunk"; + private static final String HDR_BLOCK = "block"; + private static final String HDR_VERSION = "version"; + private static final String HDR_CLEAN = "clean"; + private static final String HDR_FLETCHER = "fletcher"; /** - * Whether assertions are enabled. + * The key for the entry within "layout" map, which contains id of "meta" map. + * Entry value (hex encoded) is usually equal to 1, unless it's a legacy + * (upgraded) database and id 1 has been taken already by another map. */ - public static final boolean ASSERT = false; + public static final String META_ID_KEY = "meta.id"; /** * The block size (physical sector size) of the disk. The store header is @@ -134,77 +165,124 @@ public class MVStore { */ static final int BLOCK_SIZE = 4 * 1024; - private static final int FORMAT_WRITE = 1; - private static final int FORMAT_READ = 1; + private static final int FORMAT_WRITE_MIN = 2; + private static final int FORMAT_WRITE_MAX = 2; + private static final int FORMAT_READ_MIN = 2; + private static final int FORMAT_READ_MAX = 2; + + /** + * Store is open. + */ + private static final int STATE_OPEN = 0; + + /** + * Store is about to close now, but is still operational. + * Outstanding store operation by background writer or other thread may be in progress. + * New updates must not be initiated, unless they are part of a closing procedure itself. + */ + private static final int STATE_STOPPING = 1; + + /** + * Store is closing now, and any operation on it may fail. + */ + private static final int STATE_CLOSING = 2; + + /** + * Store is closed. + */ + private static final int STATE_CLOSED = 3; + + private static final int PIPE_LENGTH = 1; + + + /** + * Lock which governs access to major store operations: store(), close(), ... + * It serves as a replacement for synchronized(this), except it allows for + * non-blocking lock attempts. + */ + private final ReentrantLock storeLock = new ReentrantLock(true); + private final ReentrantLock serializationLock = new ReentrantLock(true); + private final ReentrantLock saveChunkLock = new ReentrantLock(true); /** - * Used to mark a chunk as free, when it was detected that live bookkeeping - * is incorrect. + * Reference to a background thread, which is expected to be running, if any. */ - private static final int MARKED_FREE = 10000000; + private final AtomicReference backgroundWriterThread = new AtomicReference<>(); /** - * The background thread, if any. + * Single-threaded executor for serialization of the store snapshot into ByteBuffer */ - volatile BackgroundWriterThread backgroundWriterThread; + private ThreadPoolExecutor serializationExecutor; + + /** + * Single-threaded executor for saving ByteBuffer as a new Chunk + */ + private ThreadPoolExecutor bufferSaveExecutor; private volatile boolean reuseSpace = true; - private boolean closed; + private volatile int state; + + private final FileStore fileStore; - private FileStore fileStore; - private boolean fileStoreIsProvided; + private final boolean fileStoreIsProvided; private final int pageSplitSize; + private final int keysPerPage; + /** * The page cache. The default size is 16 MB, and the average size is 2 KB. * It is split in 16 segments. The stack move distance is 2% of the expected * number of entries. */ - private CacheLongKeyLIRS cache; + private final CacheLongKeyLIRS> cache; /** - * The page chunk references cache. The default size is 4 MB, and the - * average size is 2 KB. It is split in 16 segments. The stack move distance - * is 2% of the expected number of entries. + * Cache for chunks "Table of Content" used to translate page's + * sequential number within containing chunk into byte position + * within chunk's image. Cache keyed by chunk id. */ - private CacheLongKeyLIRS cacheChunkRef; + private final CacheLongKeyLIRS chunksToC; /** * The newest chunk. If nothing was stored yet, this field is not set. */ - private Chunk lastChunk; + private volatile Chunk lastChunk; /** * The map of chunks. */ - private final ConcurrentHashMap chunks = - new ConcurrentHashMap(); + private final ConcurrentHashMap chunks = new ConcurrentHashMap<>(); + + private final Queue removedPages = new PriorityBlockingQueue<>(); + + private final Deque deadChunks = new ArrayDeque<>(); + + private long updateCounter = 0; + private long updateAttemptCounter = 0; /** - * The map of temporarily freed storage space caused by freed pages. The key - * is the unsaved version, the value is the map of chunks. The maps contains - * the number of freed entries per chunk. Access is synchronized. + * The layout map. Contains chunks metadata and root locations for all maps. + * This is relatively fast changing part of metadata */ - private final ConcurrentHashMap> freedPageSpace = - new ConcurrentHashMap>(); + private final MVMap layout; /** - * The metadata map. Write access to this map needs to be synchronized on - * the store. + * The metadata map. Holds name -> id and id -> name and id -> metadata + * mapping for all maps. This is relatively slow changing part of metadata */ - private MVMap meta; + private final MVMap meta; - private final ConcurrentHashMap> maps = - new ConcurrentHashMap>(); + private final ConcurrentHashMap> maps = new ConcurrentHashMap<>(); - private HashMap storeHeader = New.hashMap(); + private final HashMap storeHeader = new HashMap<>(); - private WriteBuffer writeBuffer; + private final Queue writeBufferPool = new ArrayBlockingQueue<>(PIPE_LENGTH + 1); - private int lastMapId; + private final AtomicInteger lastMapId = new AtomicInteger(); + + private int lastChunkId; private int versionsToKeep = 5; @@ -218,14 +296,27 @@ public class MVStore { private Compressor compressorHigh; - private final UncaughtExceptionHandler backgroundExceptionHandler; + private final boolean recoveryMode; + + public final UncaughtExceptionHandler backgroundExceptionHandler; + + private volatile long currentVersion; + + /** + * Oldest store version in use. All version beyond this can be safely dropped + */ + private final AtomicLong oldestVersionToKeep = new AtomicLong(); - private long currentVersion; + /** + * Ordered collection of all version usage counters for all versions starting + * from oldestVersionToKeep and up to current. + */ + private final Deque versions = new LinkedList<>(); /** - * The version of the last stored chunk, or -1 if nothing was stored so far. + * Counter of open transactions for the latest (current) store version */ - private long lastStoredVersion; + private volatile TxCounter currentTxCounter = new TxCounter(currentVersion); /** * The estimated memory used by unsaved pages. This number is not accurate, @@ -233,28 +324,28 @@ public class MVStore { * are counted. */ private int unsavedMemory; - private int autoCommitMemory; - private boolean saveNeeded; + private final int autoCommitMemory; + private volatile boolean saveNeeded; /** * The time the store was created, in milliseconds since 1970. */ private long creationTime; - private int retentionTime; - - private long lastCommitTime; /** - * The earliest chunk to retain, if any. + * How long to retain old, persisted chunks, in milliseconds. For larger or + * equal to zero, a chunk is never directly overwritten if unused, but + * instead, the unused field is set. If smaller zero, chunks are directly + * overwritten if unused. */ - private Chunk retainChunk; + private int retentionTime; + + private long lastCommitTime; /** * The version of the current store operation (if any). */ - private volatile long currentStoreVersion = -1; - - private Thread currentStoreThread; + private volatile long currentStoreVersion = INITIAL_VERSION; private volatile boolean metaChanged; @@ -263,115 +354,244 @@ public class MVStore { */ private int autoCommitDelay; - private int autoCompactFillRate; + private final int autoCompactFillRate; private long autoCompactLastFileOpCount; - private Object compactSync = new Object(); + private volatile MVStoreException panicException; + + private long lastTimeAbsolute; + + private long leafCount; + private long nonLeafCount; - private IllegalStateException panicException; /** * Create and open the store. * * @param config the configuration to use - * @throws IllegalStateException if the file is corrupt, or an exception + * @throws MVStoreException if the file is corrupt, or an exception * occurred while opening * @throws IllegalArgumentException if the directory does not exist */ - MVStore(HashMap config) { - Object o = config.get("compress"); - this.compressionLevel = o == null ? 0 : (Integer) o; + MVStore(Map config) { + recoveryMode = config.containsKey("recoveryMode"); + compressionLevel = DataUtils.getConfigParam(config, "compress", 0); String fileName = (String) config.get("fileName"); - o = config.get("pageSplitSize"); - if (o == null) { - pageSplitSize = fileName == null ? 4 * 1024 : 16 * 1024; - } else { - pageSplitSize = (Integer) o; - } - o = config.get("backgroundExceptionHandler"); - this.backgroundExceptionHandler = (UncaughtExceptionHandler) o; - meta = new MVMap(StringDataType.INSTANCE, - StringDataType.INSTANCE); - HashMap c = New.hashMap(); - c.put("id", 0); - c.put("createVersion", currentVersion); - meta.init(this, c); - fileStore = (FileStore) config.get("fileStore"); - if (fileName == null && fileStore == null) { - cache = null; - cacheChunkRef = null; - return; - } + FileStore fileStore = (FileStore) config.get("fileStore"); if (fileStore == null) { fileStoreIsProvided = false; - fileStore = new FileStore(); + if (fileName != null) { + fileStore = new FileStore(); + } } else { + if (fileName != null) { + throw new IllegalArgumentException("fileName && fileStore"); + } fileStoreIsProvided = true; } - retentionTime = fileStore.getDefaultRetentionTime(); - boolean readOnly = config.containsKey("readOnly"); - o = config.get("cacheSize"); - int mb = o == null ? 16 : (Integer) o; - if (mb > 0) { - long maxMemoryBytes = mb * 1024L * 1024L; - int segmentCount = 16; - int stackMoveDistance = 8; - cache = new CacheLongKeyLIRS( - maxMemoryBytes, - segmentCount, stackMoveDistance); - cacheChunkRef = new CacheLongKeyLIRS( - maxMemoryBytes / 4, - segmentCount, stackMoveDistance); - } - o = config.get("autoCommitBufferSize"); - int kb = o == null ? 1024 : (Integer) o; - // 19 KB memory is about 1 KB storage - autoCommitMemory = kb * 1024 * 19; - - o = config.get("autoCompactFillRate"); - autoCompactFillRate = o == null ? 50 : (Integer) o; - - char[] encryptionKey = (char[]) config.get("encryptionKey"); - try { - if (!fileStoreIsProvided) { - fileStore.open(fileName, readOnly, encryptionKey); - } - if (fileStore.size() == 0) { - creationTime = getTime(); - lastCommitTime = creationTime; - storeHeader.put("H", 2); - storeHeader.put("blockSize", BLOCK_SIZE); - storeHeader.put("format", FORMAT_WRITE); - storeHeader.put("created", creationTime); - writeStoreHeader(); - } else { - readStoreHeader(); + this.fileStore = fileStore; + + int pgSplitSize = 48; // for "mem:" case it is # of keys + CacheLongKeyLIRS.Config cc = null; + CacheLongKeyLIRS.Config cc2 = null; + if (this.fileStore != null) { + int mb = DataUtils.getConfigParam(config, "cacheSize", 16); + if (mb > 0) { + cc = new CacheLongKeyLIRS.Config(); + cc.maxMemory = mb * 1024L * 1024L; + Object o = config.get("cacheConcurrency"); + if (o != null) { + cc.segmentCount = (Integer)o; + } } - } catch (IllegalStateException e) { - panic(e); - } finally { - if (encryptionKey != null) { - Arrays.fill(encryptionKey, (char) 0); + cc2 = new CacheLongKeyLIRS.Config(); + cc2.maxMemory = 1024L * 1024L; + pgSplitSize = 16 * 1024; + } + if (cc != null) { + cache = new CacheLongKeyLIRS<>(cc); + } else { + cache = null; + } + chunksToC = cc2 == null ? null : new CacheLongKeyLIRS<>(cc2); + + pgSplitSize = DataUtils.getConfigParam(config, "pageSplitSize", pgSplitSize); + // Make sure pages will fit into cache + if (cache != null && pgSplitSize > cache.getMaxItemSize()) { + pgSplitSize = (int)cache.getMaxItemSize(); + } + pageSplitSize = pgSplitSize; + keysPerPage = DataUtils.getConfigParam(config, "keysPerPage", 48); + backgroundExceptionHandler = + (UncaughtExceptionHandler)config.get("backgroundExceptionHandler"); + layout = new MVMap<>(this, 0, StringDataType.INSTANCE, StringDataType.INSTANCE); + if (this.fileStore != null) { + retentionTime = this.fileStore.getDefaultRetentionTime(); + // 19 KB memory is about 1 KB storage + int kb = Math.max(1, Math.min(19, Utils.scaleForAvailableMemory(64))) * 1024; + kb = DataUtils.getConfigParam(config, "autoCommitBufferSize", kb); + autoCommitMemory = kb * 1024; + autoCompactFillRate = DataUtils.getConfigParam(config, "autoCompactFillRate", 90); + char[] encryptionKey = (char[]) config.get("encryptionKey"); + // there is no need to lock store here, since it is not opened (or even created) yet, + // just to make some assertions happy, when they ensure single-threaded access + storeLock.lock(); + try { + saveChunkLock.lock(); + try { + if (!fileStoreIsProvided) { + boolean readOnly = config.containsKey("readOnly"); + this.fileStore.open(fileName, readOnly, encryptionKey); + } + if (this.fileStore.size() == 0) { + creationTime = getTimeAbsolute(); + storeHeader.put(HDR_H, 2); + storeHeader.put(HDR_BLOCK_SIZE, BLOCK_SIZE); + storeHeader.put(HDR_FORMAT, FORMAT_WRITE_MAX); + storeHeader.put(HDR_CREATED, creationTime); + setLastChunk(null); + writeStoreHeader(); + } else { + readStoreHeader(); + } + } finally { + saveChunkLock.unlock(); + } + } catch (MVStoreException e) { + panic(e); + } finally { + if (encryptionKey != null) { + Arrays.fill(encryptionKey, (char) 0); + } + unlockAndCheckPanicCondition(); } + lastCommitTime = getTimeSinceCreation(); + + meta = openMetaMap(); + scrubLayoutMap(); + scrubMetaMap(); + + // setAutoCommitDelay starts the thread, but only if + // the parameter is different from the old value + int delay = DataUtils.getConfigParam(config, "autoCommitDelay", 1000); + setAutoCommitDelay(delay); + } else { + autoCommitMemory = 0; + autoCompactFillRate = 0; + meta = openMetaMap(); } - lastCommitTime = getTime(); + onVersionChange(currentVersion); + } - // setAutoCommitDelay starts the thread, but only if - // the parameter is different from the old value - o = config.get("autoCommitDelay"); - int delay = o == null ? 1000 : (Integer) o; - setAutoCommitDelay(delay); + private MVMap openMetaMap() { + String metaIdStr = layout.get(META_ID_KEY); + int metaId; + if (metaIdStr == null) { + metaId = lastMapId.incrementAndGet(); + layout.put(META_ID_KEY, Integer.toHexString(metaId)); + } else { + metaId = DataUtils.parseHexInt(metaIdStr); + } + MVMap map = new MVMap<>(this, metaId, StringDataType.INSTANCE, StringDataType.INSTANCE); + map.setRootPos(getRootPos(map.getId()), currentVersion - 1); + return map; } - private void panic(IllegalStateException e) { - if (backgroundExceptionHandler != null) { - backgroundExceptionHandler.uncaughtException(null, e); + private void scrubLayoutMap() { + Set keysToRemove = new HashSet<>(); + + // split meta map off layout map + for (String prefix : new String[]{ DataUtils.META_NAME, DataUtils.META_MAP }) { + for (Iterator it = layout.keyIterator(prefix); it.hasNext(); ) { + String key = it.next(); + if (!key.startsWith(prefix)) { + break; + } + meta.putIfAbsent(key, layout.get(key)); + markMetaChanged(); + keysToRemove.add(key); + } + } + + // remove roots of non-existent maps (leftover after unfinished map removal) + for (Iterator it = layout.keyIterator(DataUtils.META_ROOT); it.hasNext();) { + String key = it.next(); + if (!key.startsWith(DataUtils.META_ROOT)) { + break; + } + String mapIdStr = key.substring(key.lastIndexOf('.') + 1); + if(!meta.containsKey(DataUtils.META_MAP + mapIdStr) && DataUtils.parseHexInt(mapIdStr) != meta.getId()) { + keysToRemove.add(key); + } + } + + for (String key : keysToRemove) { + layout.remove(key); + } + } + + private void scrubMetaMap() { + Set keysToRemove = new HashSet<>(); + + // ensure that there is only one name mapped to each id + // this could be a leftover of an unfinished map rename + for (Iterator it = meta.keyIterator(DataUtils.META_NAME); it.hasNext();) { + String key = it.next(); + if (!key.startsWith(DataUtils.META_NAME)) { + break; + } + String mapName = key.substring(DataUtils.META_NAME.length()); + int mapId = DataUtils.parseHexInt(meta.get(key)); + String realMapName = getMapName(mapId); + if(!mapName.equals(realMapName)) { + keysToRemove.add(key); + } + } + + for (String key : keysToRemove) { + meta.remove(key); + markMetaChanged(); + } + + for (Iterator it = meta.keyIterator(DataUtils.META_MAP); it.hasNext();) { + String key = it.next(); + if (!key.startsWith(DataUtils.META_MAP)) { + break; + } + String mapName = DataUtils.getMapName(meta.get(key)); + String mapIdStr = key.substring(DataUtils.META_MAP.length()); + // ensure that last map id is not smaller than max of any existing map ids + int mapId = DataUtils.parseHexInt(mapIdStr); + if (mapId > lastMapId.get()) { + lastMapId.set(mapId); + } + // each map should have a proper name + if(!mapIdStr.equals(meta.get(DataUtils.META_NAME + mapName))) { + meta.put(DataUtils.META_NAME + mapName, mapIdStr); + markMetaChanged(); + } + } + } + + private void unlockAndCheckPanicCondition() { + storeLock.unlock(); + if (getPanicException() != null) { + closeImmediately(); + } + } + + private void panic(MVStoreException e) { + if (isOpen()) { + handleException(e); + panicException = e; } - panicException = e; - closeImmediately(); throw e; } + public MVStoreException getPanicException() { + return panicException; + } + /** * Open a store in exclusive mode. For a file-based store, the parent * directory must already exist. @@ -380,29 +600,11 @@ private void panic(IllegalStateException e) { * @return the store */ public static MVStore open(String fileName) { - HashMap config = New.hashMap(); + HashMap config = new HashMap<>(); config.put("fileName", fileName); return new MVStore(config); } - /** - * Open an old, stored version of a map. - * - * @param version the version - * @param mapId the map id - * @param template the template map - * @return the read-only map - */ - @SuppressWarnings("unchecked") - > T openMapVersion(long version, int mapId, - MVMap template) { - MVMap oldMeta = getMetaMap(version); - long rootPos = getRootPos(oldMeta, mapId); - MVMap m = template.openReadOnly(); - m.setRootPos(rootPos, version); - return (T) m; - } - /** * Open a map with the default settings. The map is automatically create if * it does not yet exist. If a map with this name is already open, this map @@ -414,7 +616,7 @@ public static MVStore open(String fileName) { * @return the map */ public MVMap openMap(String name) { - return openMap(name, new MVMap.Builder()); + return openMap(name, new MVMap.Builder<>()); } /** @@ -422,49 +624,94 @@ public MVMap openMap(String name) { * does not yet exist. If a map with this name is already open, this map is * returned. * + * @param the map type * @param the key type * @param the value type * @param name the name of the map * @param builder the map builder * @return the map */ - public synchronized , K, V> M openMap( - String name, MVMap.MapBuilder builder) { - checkOpen(); - String x = meta.get("name." + name); - int id; - long root; - HashMap c; - M map; - if (x != null) { - id = DataUtils.parseHexInt(x); + public , K, V> M openMap(String name, MVMap.MapBuilder builder) { + int id = getMapId(name); + if (id >= 0) { @SuppressWarnings("unchecked") - M old = (M) maps.get(id); - if (old != null) { - return old; - } - map = builder.create(); - String config = meta.get(MVMap.getMapKey(id)); - c = New.hashMap(); - c.putAll(DataUtils.parseMap(config)); - c.put("id", id); - map.init(this, c); - root = getRootPos(meta, id); + M map = (M) getMap(id); + if(map == null) { + map = openMap(id, builder); + } + assert builder.getKeyType() == null || map.getKeyType().getClass().equals(builder.getKeyType().getClass()); + assert builder.getValueType() == null + || map.getValueType().getClass().equals(builder.getValueType().getClass()); + return map; } else { - c = New.hashMap(); - id = ++lastMapId; + HashMap c = new HashMap<>(); + id = lastMapId.incrementAndGet(); + assert getMap(id) == null; c.put("id", id); c.put("createVersion", currentVersion); - map = builder.create(); - map.init(this, c); - markMetaChanged(); - x = Integer.toHexString(id); + M map = builder.create(this, c); + String x = Integer.toHexString(id); meta.put(MVMap.getMapKey(id), map.asString(name)); - meta.put("name." + name, x); - root = 0; + String existing = meta.putIfAbsent(DataUtils.META_NAME + name, x); + if (existing != null) { + // looks like map was created concurrently, cleanup and re-start + meta.remove(MVMap.getMapKey(id)); + return openMap(name, builder); + } + long lastStoredVersion = currentVersion - 1; + map.setRootPos(0, lastStoredVersion); + markMetaChanged(); + @SuppressWarnings("unchecked") + M existingMap = (M) maps.putIfAbsent(id, map); + if (existingMap != null) { + map = existingMap; + } + return map; + } + } + + /** + * Open an existing map with the given builder. + * + * @param the map type + * @param the key type + * @param the value type + * @param id the map id + * @param builder the map builder + * @return the map + */ + @SuppressWarnings("unchecked") + public , K, V> M openMap(int id, MVMap.MapBuilder builder) { + M map; + while ((map = (M)getMap(id)) == null) { + String configAsString = meta.get(MVMap.getMapKey(id)); + DataUtils.checkArgument(configAsString != null, "Missing map with id {0}", id); + HashMap config = new HashMap<>(DataUtils.parseMap(configAsString)); + config.put("id", id); + map = builder.create(this, config); + long root = getRootPos(id); + long lastStoredVersion = currentVersion - 1; + map.setRootPos(root, lastStoredVersion); + if (maps.putIfAbsent(id, map) == null) { + break; + } + // looks like map has been concurrently created already, re-start } - map.setRootPos(root, -1); - maps.put(id, map); + return map; + } + + /** + * Get map by id. + * + * @param the key type + * @param the value type + * @param id map id + * @return Map + */ + public MVMap getMap(int id) { + checkOpen(); + @SuppressWarnings("unchecked") + MVMap map = (MVMap) maps.get(id); return map; } @@ -473,33 +720,49 @@ public synchronized , K, V> M openMap( * * @return the set of names */ - public synchronized Set getMapNames() { - HashSet set = New.hashSet(); + public Set getMapNames() { + HashSet set = new HashSet<>(); checkOpen(); - for (Iterator it = meta.keyIterator("name."); it.hasNext();) { + for (Iterator it = meta.keyIterator(DataUtils.META_NAME); it.hasNext();) { String x = it.next(); - if (!x.startsWith("name.")) { + if (!x.startsWith(DataUtils.META_NAME)) { break; } - set.add(x.substring("name.".length())); + String mapName = x.substring(DataUtils.META_NAME.length()); + set.add(mapName); } return set; } + /** + * Get this store's layout map. This data is for informational purposes only. The + * data is subject to change in future versions. + *

    + * The data in this map should not be modified (changing system data may corrupt the store). + *

    + * The layout map contains the following entries: + *

    +     * chunk.{chunkId} = {chunk metadata}
    +     * root.{mapId} = {root position}
    +     * 
    + * + * @return the metadata map + */ + public MVMap getLayoutMap() { + checkOpen(); + return layout; + } + /** * Get the metadata map. This data is for informational purposes only. The * data is subject to change in future versions. *

    - * The data in this map should not be modified (changing system data may - * corrupt the store). If modifications are needed, they need be - * synchronized on the store. + * The data in this map should not be modified (changing system data may corrupt the store). *

    * The metadata map contains the following entries: *

    -     * chunk.{chunkId} = {chunk metadata}
          * name.{name} = {mapId}
          * map.{mapId} = {map metadata}
    -     * root.{mapId} = {root position}
          * setting.storeVersion = {version}
          * 
    * @@ -510,23 +773,25 @@ public MVMap getMetaMap() { return meta; } - private MVMap getMetaMap(long version) { + private MVMap getLayoutMap(long version) { Chunk c = getChunkForVersion(version); DataUtils.checkArgument(c != null, "Unknown version {0}", version); - c = readChunkHeader(c.block); - MVMap oldMeta = meta.openReadOnly(); - oldMeta.setRootPos(c.metaRootPos, version); - return oldMeta; + long block = c.block; + c = readChunkHeader(block); + MVMap oldMap = layout.openReadOnly(c.layoutRootPos, version); + return oldMap; } private Chunk getChunkForVersion(long version) { - Chunk c = lastChunk; - while (true) { - if (c == null || c.version <= version) { - return c; + Chunk newest = null; + for (Chunk c : chunks.values()) { + if (c.version <= version) { + if (newest == null || c.id > newest.id) { + newest = c; + } } - c = chunks.get(c.id - 1); } + return newest; } /** @@ -536,7 +801,17 @@ private Chunk getChunkForVersion(long version) { * @return true if it exists */ public boolean hasMap(String name) { - return meta.containsKey("name." + name); + return meta.containsKey(DataUtils.META_NAME + name); + } + + /** + * Check whether a given map exists and has data. + * + * @param name the map name + * @return true if it exists and has data. + */ + public boolean hasData(String name) { + return hasMap(name) && getRootPos(getMapId(name)) != 0; } private void markMetaChanged() { @@ -545,11 +820,11 @@ private void markMetaChanged() { metaChanged = true; } - private synchronized void readStoreHeader() { - boolean validHeader = false; - // we don't know yet which chunk and version are the newest - long newestVersion = -1; - long chunkBlock = -1; + private void readStoreHeader() { + Chunk newest = null; + boolean assumeCleanShutdown = true; + boolean validStoreHeader = false; + // find out which chunk and version are the newest // read the first two blocks ByteBuffer fileHeaderBlocks = fileStore.readFully(0, 2 * BLOCK_SIZE); byte[] buff = new byte[BLOCK_SIZE]; @@ -557,61 +832,70 @@ private synchronized void readStoreHeader() { fileHeaderBlocks.get(buff); // the following can fail for various reasons try { - String s = new String(buff, 0, BLOCK_SIZE, - DataUtils.LATIN).trim(); - HashMap m = DataUtils.parseMap(s); - int blockSize = DataUtils.readHexInt( - m, "blockSize", BLOCK_SIZE); - if (blockSize != BLOCK_SIZE) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_UNSUPPORTED_FORMAT, - "Block size {0} is currently not supported", - blockSize); - } - int check = DataUtils.readHexInt(m, "fletcher", 0); - m.remove("fletcher"); - s = s.substring(0, s.lastIndexOf("fletcher") - 1); - byte[] bytes = s.getBytes(DataUtils.LATIN); - int checksum = DataUtils.getFletcher32(bytes, - bytes.length); - if (check != checksum) { + HashMap m = DataUtils.parseChecksummedMap(buff); + if (m == null) { + assumeCleanShutdown = false; continue; } - long version = DataUtils.readHexLong(m, "version", 0); - if (version > newestVersion) { - newestVersion = version; + long version = DataUtils.readHexLong(m, HDR_VERSION, 0); + // if both header blocks do agree on version + // we'll continue on happy path - assume that previous shutdown was clean + assumeCleanShutdown = assumeCleanShutdown && (newest == null || version == newest.version); + if (newest == null || version > newest.version) { + validStoreHeader = true; storeHeader.putAll(m); - chunkBlock = DataUtils.readHexLong(m, "block", 0); - creationTime = DataUtils.readHexLong(m, "created", 0); - validHeader = true; + creationTime = DataUtils.readHexLong(m, HDR_CREATED, 0); + int chunkId = DataUtils.readHexInt(m, HDR_CHUNK, 0); + long block = DataUtils.readHexLong(m, HDR_BLOCK, 2); + Chunk test = readChunkHeaderAndFooter(block, chunkId); + if (test != null) { + newest = test; + } } - } catch (Exception e) { - continue; + } catch (Exception ignore) { + assumeCleanShutdown = false; } } - if (!validHeader) { - throw DataUtils.newIllegalStateException( + + if (!validStoreHeader) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "Store header is corrupt: {0}", fileStore); } - long format = DataUtils.readHexLong(storeHeader, "format", 1); - if (format > FORMAT_WRITE && !fileStore.isReadOnly()) { - throw DataUtils.newIllegalStateException( + int blockSize = DataUtils.readHexInt(storeHeader, HDR_BLOCK_SIZE, BLOCK_SIZE); + if (blockSize != BLOCK_SIZE) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_UNSUPPORTED_FORMAT, - "The write format {0} is larger " + - "than the supported format {1}, " + - "and the file was not opened in read-only mode", - format, FORMAT_WRITE); - } - format = DataUtils.readHexLong(storeHeader, "formatRead", format); - if (format > FORMAT_READ) { - throw DataUtils.newIllegalStateException( + "Block size {0} is currently not supported", + blockSize); + } + long format = DataUtils.readHexLong(storeHeader, HDR_FORMAT, 1); + if (!fileStore.isReadOnly()) { + if (format > FORMAT_WRITE_MAX) { + throw getUnsupportedWriteFormatException(format, FORMAT_WRITE_MAX, + "The write format {0} is larger than the supported format {1}"); + } else if (format < FORMAT_WRITE_MIN) { + throw getUnsupportedWriteFormatException(format, FORMAT_WRITE_MIN, + "The write format {0} is smaller than the supported format {1}"); + } + } + format = DataUtils.readHexLong(storeHeader, HDR_FORMAT_READ, format); + if (format > FORMAT_READ_MAX) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_UNSUPPORTED_FORMAT, - "The read format {0} is larger " + - "than the supported format {1}", - format, FORMAT_READ); + "The read format {0} is larger than the supported format {1}", + format, FORMAT_READ_MAX); + } else if (format < FORMAT_READ_MIN) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_UNSUPPORTED_FORMAT, + "The read format {0} is smaller than the supported format {1}", + format, FORMAT_READ_MIN); + } + + assumeCleanShutdown = assumeCleanShutdown && newest != null && !recoveryMode; + if (assumeCleanShutdown) { + assumeCleanShutdown = DataUtils.readHexInt(storeHeader, HDR_CLEAN, 0) != 0; } - lastStoredVersion = -1; chunks.clear(); long now = System.currentTimeMillis(); // calculate the year (doesn't have to be exact; @@ -627,229 +911,460 @@ private synchronized void readStoreHeader() { // the system time was set to the past: // we change the creation time creationTime = now; - storeHeader.put("created", creationTime); + storeHeader.put(HDR_CREATED, creationTime); } - Chunk footer = readChunkFooter(fileStore.size()); - if (footer != null) { - if (footer.version > newestVersion) { - newestVersion = footer.version; - chunkBlock = footer.block; + long fileSize = fileStore.size(); + long blocksInStore = fileSize / BLOCK_SIZE; + + Comparator chunkComparator = (one, two) -> { + int result = Long.compare(two.version, one.version); + if (result == 0) { + // out of two copies of the same chunk we prefer the one + // close to the beginning of file (presumably later version) + result = Long.compare(one.block, two.block); + } + return result; + }; + + Map validChunksByLocation = new HashMap<>(); + if (!assumeCleanShutdown) { + Chunk tailChunk = discoverChunk(blocksInStore); + if (tailChunk != null) { + blocksInStore = tailChunk.block; // for a possible full scan later on + if (newest == null || tailChunk.version > newest.version) { + newest = tailChunk; + } + } + + if (newest != null) { + // read the chunk header and footer, + // and follow the chain of next chunks + while (true) { + validChunksByLocation.put(newest.block, newest); + if (newest.next == 0 || newest.next >= blocksInStore) { + // no (valid) next + break; + } + Chunk test = readChunkHeaderAndFooter(newest.next, newest.id + 1); + if (test == null || test.version <= newest.version) { + break; + } + // if shutdown was really clean then chain should be empty + assumeCleanShutdown = false; + newest = test; + } } - } - if (chunkBlock <= 0) { - // no chunk - return; } - // read the chunk header and footer, - // and follow the chain of next chunks - lastChunk = null; - while (true) { - Chunk header; + if (assumeCleanShutdown) { + // quickly check latest 20 chunks referenced in meta table + Queue chunksToVerify = new PriorityQueue<>(20, Collections.reverseOrder(chunkComparator)); try { - header = readChunkHeader(chunkBlock); - } catch (Exception e) { - // invalid chunk header: ignore, but stop - break; - } - if (header.version < newestVersion) { - // we have reached the end - break; - } - footer = readChunkFooter((chunkBlock + header.len) * BLOCK_SIZE); - if (footer == null || footer.id != header.id) { - // invalid chunk footer, or the wrong one - break; - } - lastChunk = header; - newestVersion = header.version; - if (header.next == 0 || - header.next >= fileStore.size() / BLOCK_SIZE) { - // no (valid) next - break; + setLastChunk(newest); + // load the chunk metadata: although meta's root page resides in the lastChunk, + // traversing meta map might recursively load another chunk(s) + Cursor cursor = layout.cursor(DataUtils.META_CHUNK); + while (cursor.hasNext() && cursor.next().startsWith(DataUtils.META_CHUNK)) { + Chunk c = Chunk.fromString(cursor.getValue()); + assert c.version <= currentVersion; + // might be there already, due to meta traversal + // see readPage() ... getChunkIfFound() + chunks.putIfAbsent(c.id, c); + chunksToVerify.offer(c); + if (chunksToVerify.size() == 20) { + chunksToVerify.poll(); + } + } + Chunk c; + while (assumeCleanShutdown && (c = chunksToVerify.poll()) != null) { + Chunk test = readChunkHeaderAndFooter(c.block, c.id); + assumeCleanShutdown = test != null; + if (assumeCleanShutdown) { + validChunksByLocation.put(test.block, test); + } + } + } catch(MVStoreException ignored) { + assumeCleanShutdown = false; } - chunkBlock = header.next; - } - if (lastChunk == null) { - // no valid chunk - return; } - lastMapId = lastChunk.mapId; - currentVersion = lastChunk.version; - setWriteVersion(currentVersion); - chunks.put(lastChunk.id, lastChunk); - meta.setRootPos(lastChunk.metaRootPos, -1); - - // load the chunk metadata: we can load in any order, - // because loading chunk metadata might recursively load another chunk - for (Iterator it = meta.keyIterator("chunk."); it.hasNext();) { - String s = it.next(); - if (!s.startsWith("chunk.")) { - break; - } - s = meta.get(s); - Chunk c = Chunk.fromString(s); - if (!chunks.containsKey(c.id)) { - if (c.block == Long.MAX_VALUE) { - throw DataUtils.newIllegalStateException( + + if (!assumeCleanShutdown) { + boolean quickRecovery = false; + if (!recoveryMode) { + // now we know, that previous shutdown did not go well and file + // is possibly corrupted but there is still hope for a quick + // recovery + + // this collection will hold potential candidates for lastChunk to fall back to, + // in order from the most to least likely + Chunk[] lastChunkCandidates = validChunksByLocation.values().toArray(new Chunk[0]); + Arrays.sort(lastChunkCandidates, chunkComparator); + Map validChunksById = new HashMap<>(); + for (Chunk chunk : lastChunkCandidates) { + validChunksById.put(chunk.id, chunk); + } + quickRecovery = findLastChunkWithCompleteValidChunkSet(lastChunkCandidates, validChunksByLocation, + validChunksById, false); + } + + if (!quickRecovery) { + // scan whole file and try to fetch chunk header and/or footer out of every block + // matching pairs with nothing in-between are considered as valid chunk + long block = blocksInStore; + Chunk tailChunk; + while ((tailChunk = discoverChunk(block)) != null) { + block = tailChunk.block; + validChunksByLocation.put(block, tailChunk); + } + + // this collection will hold potential candidates for lastChunk to fall back to, + // in order from the most to least likely + Chunk[] lastChunkCandidates = validChunksByLocation.values().toArray(new Chunk[0]); + Arrays.sort(lastChunkCandidates, chunkComparator); + Map validChunksById = new HashMap<>(); + for (Chunk chunk : lastChunkCandidates) { + validChunksById.put(chunk.id, chunk); + } + if (!findLastChunkWithCompleteValidChunkSet(lastChunkCandidates, validChunksByLocation, + validChunksById, true) && lastChunk != null) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, - "Chunk {0} is invalid", c.id); + "File is corrupted - unable to recover a valid set of chunks"); + } - chunks.put(c.id, c); } } + + fileStore.clear(); // build the free space list for (Chunk c : chunks.values()) { - if (c.pageCountLive == 0) { - // remove this chunk in the next save operation - registerFreePage(currentVersion, c.id, 0, 0); + if (c.isSaved()) { + long start = c.block * BLOCK_SIZE; + int length = c.len * BLOCK_SIZE; + fileStore.markUsed(start, length); + } + if (!c.isLive()) { + deadChunks.offer(c); } - long start = c.block * BLOCK_SIZE; - int length = c.len * BLOCK_SIZE; - fileStore.markUsed(start, length); } + assert validateFileLength("on open"); } - /** - * Try to read a chunk footer. - * - * @param end the end of the chunk - * @return the chunk, or null if not successful - */ - private Chunk readChunkFooter(long end) { - // the following can fail for various reasons - try { - // read the chunk footer of the last block of the file - ByteBuffer lastBlock = fileStore.readFully( - end - Chunk.FOOTER_LENGTH, Chunk.FOOTER_LENGTH); - byte[] buff = new byte[Chunk.FOOTER_LENGTH]; - lastBlock.get(buff); - String s = new String(buff, DataUtils.LATIN).trim(); - HashMap m = DataUtils.parseMap(s); - int check = DataUtils.readHexInt(m, "fletcher", 0); - m.remove("fletcher"); - s = s.substring(0, s.lastIndexOf("fletcher") - 1); - byte[] bytes = s.getBytes(DataUtils.LATIN); - int checksum = DataUtils.getFletcher32(bytes, bytes.length); - if (check == checksum) { - int chunk = DataUtils.readHexInt(m, "chunk", 0); - Chunk c = new Chunk(chunk); - c.version = DataUtils.readHexLong(m, "version", 0); - c.block = DataUtils.readHexLong(m, "block", 0); - return c; + private MVStoreException getUnsupportedWriteFormatException(long format, int expectedFormat, String s) { + format = DataUtils.readHexLong(storeHeader, HDR_FORMAT_READ, format); + if (format >= FORMAT_READ_MIN && format <= FORMAT_READ_MAX) { + s += ", and the file was not opened in read-only mode"; + } + return DataUtils.newMVStoreException(DataUtils.ERROR_UNSUPPORTED_FORMAT, s, format, expectedFormat); + } + + private boolean findLastChunkWithCompleteValidChunkSet(Chunk[] lastChunkCandidates, + Map validChunksByLocation, + Map validChunksById, + boolean afterFullScan) { + // Try candidates for "last chunk" in order from newest to oldest + // until suitable is found. Suitable one should have meta map + // where all chunk references point to valid locations. + for (Chunk chunk : lastChunkCandidates) { + boolean verified = true; + try { + setLastChunk(chunk); + // load the chunk metadata: although meta's root page resides in the lastChunk, + // traversing meta map might recursively load another chunk(s) + Cursor cursor = layout.cursor(DataUtils.META_CHUNK); + while (cursor.hasNext() && cursor.next().startsWith(DataUtils.META_CHUNK)) { + Chunk c = Chunk.fromString(cursor.getValue()); + assert c.version <= currentVersion; + // might be there already, due to meta traversal + // see readPage() ... getChunkIfFound() + Chunk test = chunks.putIfAbsent(c.id, c); + if (test != null) { + c = test; + } + assert chunks.get(c.id) == c; + if ((test = validChunksByLocation.get(c.block)) == null || test.id != c.id) { + if ((test = validChunksById.get(c.id)) != null) { + // We do not have a valid chunk at that location, + // but there is a copy of same chunk from original + // location. + // Chunk header at original location does not have + // any dynamic (occupancy) metadata, so it can't be + // used here as is, re-point our chunk to original + // location instead. + c.block = test.block; + } else if (c.isLive() && (afterFullScan || readChunkHeaderAndFooter(c.block, c.id) == null)) { + // chunk reference is invalid + // this "last chunk" candidate is not suitable + verified = false; + break; + } + } + if (!c.isLive()) { + // we can just remove entry from meta, referencing to this chunk, + // but store maybe R/O, and it's not properly started yet, + // so lets make this chunk "dead" and taking no space, + // and it will be automatically removed later. + c.block = Long.MAX_VALUE; + c.len = Integer.MAX_VALUE; + if (c.unused == 0) { + c.unused = creationTime; + } + if (c.unusedAtVersion == 0) { + c.unusedAtVersion = INITIAL_VERSION; + } + } + } + } catch(Exception ignored) { + verified = false; + } + if (verified) { + return true; } - } catch (Exception e) { - // ignore } - return null; + return false; } - private void writeStoreHeader() { - StringBuilder buff = new StringBuilder(); - if (lastChunk != null) { - storeHeader.put("block", lastChunk.block); - storeHeader.put("chunk", lastChunk.id); - storeHeader.put("version", lastChunk.version); + private void setLastChunk(Chunk last) { + chunks.clear(); + lastChunk = last; + lastChunkId = 0; + currentVersion = lastChunkVersion(); + long layoutRootPos = 0; + int mapId = 0; + if (last != null) { // there is a valid chunk + lastChunkId = last.id; + currentVersion = last.version; + layoutRootPos = last.layoutRootPos; + mapId = last.mapId; + chunks.put(last.id, last); } - DataUtils.appendMap(buff, storeHeader); - byte[] bytes = buff.toString().getBytes(DataUtils.LATIN); - int checksum = DataUtils.getFletcher32(bytes, bytes.length); - DataUtils.appendMap(buff, "fletcher", checksum); - buff.append("\n"); - bytes = buff.toString().getBytes(DataUtils.LATIN); - ByteBuffer header = ByteBuffer.allocate(2 * BLOCK_SIZE); - header.put(bytes); - header.position(BLOCK_SIZE); - header.put(bytes); - header.rewind(); - write(0, header); + lastMapId.set(mapId); + layout.setRootPos(layoutRootPos, currentVersion - 1); } - private void write(long pos, ByteBuffer buffer) { - try { + /** + * Discover a valid chunk, searching file backwards from the given block + * + * @param block to start search from (found chunk footer should be no + * further than block-1) + * @return valid chunk or null if none found + */ + private Chunk discoverChunk(long block) { + long candidateLocation = Long.MAX_VALUE; + Chunk candidate = null; + while (true) { + if (block == candidateLocation) { + return candidate; + } + if (block == 2) { // number of blocks occupied by headers + return null; + } + Chunk test = readChunkFooter(block); + if (test != null) { + // if we encounter chunk footer (with or without corresponding header) + // in the middle of prospective chunk, stop considering it + candidateLocation = Long.MAX_VALUE; + test = readChunkHeaderOptionally(test.block, test.id); + if (test != null) { + // if that footer has a corresponding header, + // consider them as a new candidate for a valid chunk + candidate = test; + candidateLocation = test.block; + } + } + + // if we encounter chunk header without corresponding footer + // (due to incomplete write?) in the middle of prospective + // chunk, stop considering it + if (--block > candidateLocation && readChunkHeaderOptionally(block) != null) { + candidateLocation = Long.MAX_VALUE; + } + } + } + + + /** + * Read a chunk header and footer, and verify the stored data is consistent. + * + * @param block the block + * @param expectedId of the chunk + * @return the chunk, or null if the header or footer don't match or are not + * consistent + */ + private Chunk readChunkHeaderAndFooter(long block, int expectedId) { + Chunk header = readChunkHeaderOptionally(block, expectedId); + if (header != null) { + Chunk footer = readChunkFooter(block + header.len); + if (footer == null || footer.id != expectedId || footer.block != header.block) { + return null; + } + } + return header; + } + + /** + * Try to read a chunk footer. + * + * @param block the index of the next block after the chunk + * @return the chunk, or null if not successful + */ + private Chunk readChunkFooter(long block) { + // the following can fail for various reasons + try { + // read the chunk footer of the last block of the file + long pos = block * BLOCK_SIZE - Chunk.FOOTER_LENGTH; + if(pos < 0) { + return null; + } + ByteBuffer lastBlock = fileStore.readFully(pos, Chunk.FOOTER_LENGTH); + byte[] buff = new byte[Chunk.FOOTER_LENGTH]; + lastBlock.get(buff); + HashMap m = DataUtils.parseChecksummedMap(buff); + if (m != null) { + return new Chunk(m); + } + } catch (Exception e) { + // ignore + } + return null; + } + + private void writeStoreHeader() { + Chunk lastChunk = this.lastChunk; + if (lastChunk != null) { + storeHeader.put(HDR_BLOCK, lastChunk.block); + storeHeader.put(HDR_CHUNK, lastChunk.id); + storeHeader.put(HDR_VERSION, lastChunk.version); + } + StringBuilder buff = new StringBuilder(112); + DataUtils.appendMap(buff, storeHeader); + byte[] bytes = buff.toString().getBytes(StandardCharsets.ISO_8859_1); + int checksum = DataUtils.getFletcher32(bytes, 0, bytes.length); + DataUtils.appendMap(buff, HDR_FLETCHER, checksum); + buff.append('\n'); + bytes = buff.toString().getBytes(StandardCharsets.ISO_8859_1); + ByteBuffer header = ByteBuffer.allocate(2 * BLOCK_SIZE); + header.put(bytes); + header.position(BLOCK_SIZE); + header.put(bytes); + header.rewind(); + write(0, header); + } + + private void write(long pos, ByteBuffer buffer) { + try { fileStore.writeFully(pos, buffer); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { panic(e); - throw e; } } /** * Close the file and the store. Unsaved changes are written to disk first. */ + @Override public void close() { - if (closed) { - return; - } - if (fileStore != null && !fileStore.isReadOnly()) { - stopBackgroundThread(); - if (hasUnsavedChanges()) { - commitAndSave(); - } - } - closeStore(true); + closeStore(true, 0); + } + + /** + * Close the file and the store. Unsaved changes are written to disk first, + * and compaction (up to a specified number of milliseconds) is attempted. + * + * @param allowedCompactionTime the allowed time for compaction (in + * milliseconds) + */ + public void close(int allowedCompactionTime) { + closeStore(true, allowedCompactionTime); } /** - * Close the file and the store, without writing anything. This will stop - * the background thread. This method ignores all errors. + * Close the file and the store, without writing anything. + * This will try to stop the background thread (without waiting for it). + * This method ignores all errors. */ public void closeImmediately() { try { - closeStore(false); - } catch (Exception e) { - if (backgroundExceptionHandler != null) { - backgroundExceptionHandler.uncaughtException(null, e); - } + closeStore(false, 0); + } catch (Throwable e) { + handleException(e); } } - private void closeStore(boolean shrinkIfPossible) { - if (closed) { - return; - } - // can not synchronize on this yet, because - // the thread also synchronized on this, which - // could result in a deadlock - stopBackgroundThread(); - closed = true; - if (fileStore == null) { - return; - } - synchronized (this) { - if (shrinkIfPossible) { - shrinkFileIfPossible(0); - } - // release memory early - this is important when called - // because of out of memory - cache = null; - cacheChunkRef = null; - for (MVMap m : New.arrayList(maps.values())) { - m.close(); - } - meta = null; - chunks.clear(); - maps.clear(); + private void closeStore(boolean normalShutdown, int allowedCompactionTime) { + // If any other thead have already initiated closure procedure, + // isClosed() would wait until closure is done and then we jump out of the loop. + // This is a subtle difference between !isClosed() and isOpen(). + while (!isClosed()) { + stopBackgroundThread(normalShutdown); + storeLock.lock(); try { - if (!fileStoreIsProvided) { - fileStore.close(); + if (state == STATE_OPEN) { + state = STATE_STOPPING; + try { + try { + if (normalShutdown && fileStore != null && !fileStore.isReadOnly()) { + for (MVMap map : maps.values()) { + if (map.isClosed()) { + deregisterMapRoot(map.getId()); + } + } + setRetentionTime(0); + commit(); + if (allowedCompactionTime > 0) { + compactFile(allowedCompactionTime); + } else if (allowedCompactionTime < 0) { + doMaintenance(autoCompactFillRate); + } + + saveChunkLock.lock(); + try { + shrinkFileIfPossible(0); + storeHeader.put(HDR_CLEAN, 1); + writeStoreHeader(); + sync(); + assert validateFileLength("on close"); + } finally { + saveChunkLock.unlock(); + } + } + + state = STATE_CLOSING; + + // release memory early - this is important when called + // because of out of memory + clearCaches(); + for (MVMap m : new ArrayList<>(maps.values())) { + m.close(); + } + chunks.clear(); + maps.clear(); + } finally { + if (fileStore != null && !fileStoreIsProvided) { + fileStore.close(); + } + } + } finally { + state = STATE_CLOSED; + } } } finally { - fileStore = null; + storeLock.unlock(); } } } - /** - * Whether the chunk at the given position is live. - * - * @param the chunk id - * @return true if it is live - */ - boolean isChunkLive(int chunkId) { - String s = meta.get(Chunk.getMetaKey(chunkId)); - return s != null; + private static void shutdownExecutor(ThreadPoolExecutor executor) { + if (executor != null) { + executor.shutdown(); + try { + if (executor.awaitTermination(1000, TimeUnit.MILLISECONDS)) { + return; + } + } catch (InterruptedException ignore) {/**/} + executor.shutdownNow(); + } } /** @@ -859,36 +1374,19 @@ boolean isChunkLive(int chunkId) { * @return the chunk */ private Chunk getChunk(long pos) { - Chunk c = getChunkIfFound(pos); - if (c == null) { - int chunkId = DataUtils.getPageChunkId(pos); - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "Chunk {0} not found", chunkId); - } - return c; - } - - private Chunk getChunkIfFound(long pos) { int chunkId = DataUtils.getPageChunkId(pos); Chunk c = chunks.get(chunkId); if (c == null) { checkOpen(); - if (!Thread.holdsLock(this)) { - // it could also be unsynchronized metadata - // access (if synchronization on this was forgotten) - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_CHUNK_NOT_FOUND, - "Chunk {0} no longer exists", - chunkId); - } - String s = meta.get(Chunk.getMetaKey(chunkId)); + String s = layout.get(Chunk.getMetaKey(chunkId)); if (s == null) { - return null; + throw DataUtils.newMVStoreException( + DataUtils.ERROR_CHUNK_NOT_FOUND, + "Chunk {0} not found", chunkId); } c = Chunk.fromString(s); - if (c.block == Long.MAX_VALUE) { - throw DataUtils.newIllegalStateException( + if (!c.isSaved()) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "Chunk {0} is invalid", chunkId); } @@ -898,172 +1396,315 @@ private Chunk getChunkIfFound(long pos) { } private void setWriteVersion(long version) { - for (MVMap map : maps.values()) { - map.setWriteVersion(version); + for (Iterator> iter = maps.values().iterator(); iter.hasNext(); ) { + MVMap map = iter.next(); + assert map != layout && map != meta; + if (map.setWriteVersion(version) == null) { + iter.remove(); + } } meta.setWriteVersion(version); + layout.setWriteVersion(version); + onVersionChange(version); } /** - * Commit the changes. - *

    - * For in-memory stores, this method increments the version. - *

    - * For persistent stores, it also writes changes to disk. It does nothing if - * there are no unsaved changes, and returns the old version. It is not - * necessary to call this method when auto-commit is enabled (the default - * setting), as in this case it is automatically called from time to time or - * when enough changes have accumulated. However, it may still be called to - * flush all changes to disk. + * Unlike regular commit this method returns immediately if there is commit + * in progress on another thread, otherwise it acts as regular commit. + * + * This method may return BEFORE this thread changes are actually persisted! * - * @return the new version + * @return the new version (incremented if there were changes) */ - public long commit() { - if (fileStore != null) { - return commitAndSave(); + public long tryCommit() { + return tryCommit(x -> true); + } + + private long tryCommit(Predicate check) { + // we need to prevent re-entrance, which may be possible, + // because meta map is modified within storeNow() and that + // causes beforeWrite() call with possibility of going back here + if ((!storeLock.isHeldByCurrentThread() || currentStoreVersion < 0) && + storeLock.tryLock()) { + try { + if (check.test(this)) { + store(false); + } + } finally { + unlockAndCheckPanicCondition(); + } } - long v = ++currentVersion; - setWriteVersion(v); - return v; + return currentVersion; } /** - * Commit all changes and persist them to disk. This method does nothing if - * there are no unsaved changes, otherwise it increments the current version + * Commit the changes. + *

    + * This method does nothing if there are no unsaved changes, + * otherwise it increments the current version * and stores the data (for file based stores). *

    + * It is not necessary to call this method when auto-commit is enabled (the default + * setting), as in this case it is automatically called from time to time or + * when enough changes have accumulated. However, it may still be called to + * flush all changes to disk. + *

    * At most one store operation may run at any time. * * @return the new version (incremented if there were changes) */ - private synchronized long commitAndSave() { - if (closed) { - return currentVersion; - } - if (fileStore == null) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_WRITING_FAILED, - "This is an in-memory store"); + public long commit() { + return commit(x -> true); + } + + private long commit(Predicate check) { + // we need to prevent re-entrance, which may be possible, + // because meta map is modified within storeNow() and that + // causes beforeWrite() call with possibility of going back here + if(!storeLock.isHeldByCurrentThread() || currentStoreVersion < 0) { + storeLock.lock(); + try { + if (check.test(this)) { + store(true); + } + } finally { + unlockAndCheckPanicCondition(); + } } - if (currentStoreVersion >= 0) { - // store is possibly called within store, if the meta map changed - return currentVersion; + return currentVersion; + } + + private void store(boolean syncWrite) { + assert storeLock.isHeldByCurrentThread(); + assert !saveChunkLock.isHeldByCurrentThread(); + if (isOpenOrStopping()) { + if (hasUnsavedChanges()) { + dropUnusedChunks(); + try { + currentStoreVersion = currentVersion; + if (fileStore == null) { + //noinspection NonAtomicOperationOnVolatileField + ++currentVersion; + setWriteVersion(currentVersion); + metaChanged = false; + } else { + if (fileStore.isReadOnly()) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_WRITING_FAILED, "This store is read-only"); + } + storeNow(syncWrite, 0, () -> reuseSpace ? 0 : getAfterLastBlock()); + } + } finally { + // in any case reset the current store version, + // to allow closing the store + currentStoreVersion = -1; + } + } } - if (!hasUnsavedChanges()) { - return currentVersion; + } + + private void storeNow(boolean syncWrite, long reservedLow, Supplier reservedHighSupplier) { + try { + lastCommitTime = getTimeSinceCreation(); + int currentUnsavedPageCount = unsavedMemory; + // it is ok, since that path suppose to be single-threaded under storeLock + //noinspection NonAtomicOperationOnVolatileField + long version = ++currentVersion; + ArrayList> changed = collectChangedMapRoots(version); + + assert storeLock.isHeldByCurrentThread(); + submitOrRun(serializationExecutor, + () -> serializeAndStore(syncWrite, reservedLow, reservedHighSupplier, + changed, lastCommitTime, version), + syncWrite); + + // some pages might have been changed in the meantime (in the newest + // version) + saveNeeded = false; + unsavedMemory = Math.max(0, unsavedMemory - currentUnsavedPageCount); + } catch (MVStoreException e) { + panic(e); + } catch (Throwable e) { + panic(DataUtils.newMVStoreException(DataUtils.ERROR_INTERNAL, "{0}", e.toString(), + e)); } - if (fileStore.isReadOnly()) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_WRITING_FAILED, "This store is read-only"); + } + + private static void submitOrRun(ThreadPoolExecutor executor, Runnable action, + boolean syncRun) throws ExecutionException { + if (executor != null) { + try { + Future future = executor.submit(action); + if (syncRun || executor.getQueue().size() > PIPE_LENGTH) { + try { + future.get(); + } catch (InterruptedException ignore) {/**/} + } + return; + } catch (RejectedExecutionException ex) { + assert executor.isShutdown(); + shutdownExecutor(executor); + } + } + action.run(); + } + + private ArrayList> collectChangedMapRoots(long version) { + long lastStoredVersion = version - 2; + ArrayList> changed = new ArrayList<>(); + for (Iterator> iter = maps.values().iterator(); iter.hasNext(); ) { + MVMap map = iter.next(); + RootReference rootReference = map.setWriteVersion(version); + if (rootReference == null) { + iter.remove(); + } else if (map.getCreateVersion() < version && // if map was created after storing started, skip it + !map.isVolatile() && + map.hasChangesSince(lastStoredVersion)) { + assert rootReference.version <= version : rootReference.version + " > " + version; + Page rootPage = rootReference.root; + if (!rootPage.isSaved() || + // after deletion previously saved leaf + // may pop up as a root, but we still need + // to save new root pos in meta + rootPage.isLeaf()) { + changed.add(rootPage); + } + } } - try { - currentStoreVersion = currentVersion; - currentStoreThread = Thread.currentThread(); - return storeNow(); - } finally { - // in any case reset the current store version, - // to allow closing the store - currentStoreVersion = -1; - currentStoreThread = null; + RootReference rootReference = meta.setWriteVersion(version); + if (meta.hasChangesSince(lastStoredVersion) || metaChanged) { + assert rootReference != null && rootReference.version <= version + : rootReference == null ? "null" : rootReference.version + " > " + version; + Page rootPage = rootReference.root; + if (!rootPage.isSaved() || + // after deletion previously saved leaf + // may pop up as a root, but we still need + // to save new root pos in meta + rootPage.isLeaf()) { + changed.add(rootPage); + } } + return changed; } - private long storeNow() { + private void serializeAndStore(boolean syncRun, long reservedLow, Supplier reservedHighSupplier, + ArrayList> changed, long time, long version) { + serializationLock.lock(); try { - return storeNowTry(); - } catch (IllegalStateException e) { + Chunk c = createChunk(time, version); + chunks.put(c.id, c); + WriteBuffer buff = getWriteBuffer(); + serializeToBuffer(buff, changed, c, reservedLow, reservedHighSupplier); + + submitOrRun(bufferSaveExecutor, () -> storeBuffer(c, buff, changed), syncRun); + + } catch (MVStoreException e) { panic(e); - return -1; - } - } - - private long storeNowTry() { - freeUnusedChunks(); - int currentUnsavedPageCount = unsavedMemory; - long storeVersion = currentStoreVersion; - long version = ++currentVersion; - setWriteVersion(version); - long time = getTime(); - lastCommitTime = time; - retainChunk = null; - - // the metadata of the last chunk was not stored so far, and needs to be - // set now (it's better not to update right after storing, because that - // would modify the meta map again) - int lastChunkId; - if (lastChunk == null) { - lastChunkId = 0; - } else { - lastChunkId = lastChunk.id; - meta.put(Chunk.getMetaKey(lastChunkId), lastChunk.asString()); + } catch (Throwable e) { + panic(DataUtils.newMVStoreException(DataUtils.ERROR_INTERNAL, "{0}", e.toString(), e)); + } finally { + serializationLock.unlock(); + } + } + + private Chunk createChunk(long time, long version) { + int chunkId = lastChunkId; + if (chunkId != 0) { + chunkId &= Chunk.MAX_ID; + Chunk lastChunk = chunks.get(chunkId); + assert lastChunk != null; + assert lastChunk.isSaved(); + assert lastChunk.version + 1 == version : lastChunk.version + " " + version; + // the metadata of the last chunk was not stored so far, and needs to be + // set now (it's better not to update right after storing, because that + // would modify the meta map again) + layout.put(Chunk.getMetaKey(chunkId), lastChunk.asString()); // never go backward in time time = Math.max(lastChunk.time, time); } - int newChunkId = lastChunkId; - do { - newChunkId = (newChunkId + 1) % Chunk.MAX_ID; - } while (chunks.containsKey(newChunkId)); + int newChunkId; + while (true) { + newChunkId = ++lastChunkId & Chunk.MAX_ID; + Chunk old = chunks.get(newChunkId); + if (old == null) { + break; + } + if (!old.isSaved()) { + MVStoreException e = DataUtils.newMVStoreException( + DataUtils.ERROR_INTERNAL, + "Last block {0} not stored, possibly due to out-of-memory", old); + panic(e); + } + } Chunk c = new Chunk(newChunkId); - - c.pageCount = Integer.MAX_VALUE; - c.pageCountLive = Integer.MAX_VALUE; - c.maxLen = Long.MAX_VALUE; - c.maxLenLive = Long.MAX_VALUE; - c.metaRootPos = Long.MAX_VALUE; + c.pageCount = 0; + c.pageCountLive = 0; + c.maxLen = 0; + c.maxLenLive = 0; + c.layoutRootPos = Long.MAX_VALUE; c.block = Long.MAX_VALUE; c.len = Integer.MAX_VALUE; c.time = time; c.version = version; - c.mapId = lastMapId; c.next = Long.MAX_VALUE; - chunks.put(c.id, c); - // force a metadata update - meta.put(Chunk.getMetaKey(c.id), c.asString()); - meta.remove(Chunk.getMetaKey(c.id)); - ArrayList> list = New.arrayList(maps.values()); - ArrayList> changed = New.arrayList(); - for (MVMap m : list) { - m.setWriteVersion(version); - long v = m.getVersion(); - if (m.getCreateVersion() > storeVersion) { - // the map was created after storing started - continue; - } - if (m.isVolatile()) { - continue; - } - if (v >= 0 && v >= lastStoredVersion) { - MVMap r = m.openVersion(storeVersion); - if (r.getRoot().getPos() == 0) { - changed.add(r); - } - } - } - applyFreedSpace(storeVersion); - WriteBuffer buff = getWriteBuffer(); + c.occupancy = new BitSet(); + return c; + } + + private void serializeToBuffer(WriteBuffer buff, ArrayList> changed, Chunk c, + long reservedLow, Supplier reservedHighSupplier) { // need to patch the header later c.writeChunkHeader(buff, 0); - int headerLength = buff.position(); - c.pageCount = 0; - c.pageCountLive = 0; - c.maxLen = 0; - c.maxLenLive = 0; - for (MVMap m : changed) { - Page p = m.getRoot(); - String key = MVMap.getMapRootKey(m.getId()); + int headerLength = buff.position() + 44; + buff.position(headerLength); + + long version = c.version; + List toc = new ArrayList<>(); + for (Page p : changed) { + String key = MVMap.getMapRootKey(p.getMapId()); if (p.getTotalCount() == 0) { - meta.put(key, "0"); + layout.remove(key); } else { - p.writeUnsavedRecursive(c, buff); + p.writeUnsavedRecursive(c, buff, toc); long root = p.getPos(); - meta.put(key, Long.toHexString(root)); + layout.put(key, Long.toHexString(root)); } } - meta.setWriteVersion(version); - Page metaRoot = meta.getRoot(); - metaRoot.writeUnsavedRecursive(c, buff); + acceptChunkOccupancyChanges(c.time, version); + RootReference layoutRootReference = layout.setWriteVersion(version); + assert layoutRootReference != null; + assert layoutRootReference.version == version : layoutRootReference.version + " != " + version; + metaChanged = false; + + acceptChunkOccupancyChanges(c.time, version); + + onVersionChange(version); + + Page layoutRoot = layoutRootReference.root; + layoutRoot.writeUnsavedRecursive(c, buff, toc); + c.layoutRootPos = layoutRoot.getPos(); + changed.add(layoutRoot); + + // last allocated map id should be captured after the meta map was saved, because + // this will ensure that concurrently created map, which made it into meta before save, + // will have it's id reflected in mapid field of currently written chunk + c.mapId = lastMapId.get(); + + c.tocPos = buff.position(); + long[] tocArray = new long[toc.size()]; + int index = 0; + for (long tocElement : toc) { + tocArray[index++] = tocElement; + buff.putLong(tocElement); + if (DataUtils.isLeafPosition(tocElement)) { + ++leafCount; + } else { + ++nonLeafCount; + } + } + chunksToC.put(c.id, tocArray); int chunkLength = buff.position(); // add the store header and round to the next block @@ -1071,240 +1712,97 @@ private long storeNowTry() { Chunk.FOOTER_LENGTH, BLOCK_SIZE); buff.limit(length); - // the length of the file that is still in use - // (not necessarily the end of the file) - long end = getFileLengthInUse(); - long filePos; - if (reuseSpace) { - filePos = fileStore.allocate(length); - } else { - filePos = end; - } - // end is not necessarily the end of the file - boolean storeAtEndOfFile = filePos + length >= fileStore.size(); - - if (!reuseSpace) { - // we can not mark it earlier, because it - // might have been allocated by one of the - // removed chunks - fileStore.markUsed(end, length); - } - - c.block = filePos / BLOCK_SIZE; - c.len = length / BLOCK_SIZE; - c.metaRootPos = metaRoot.getPos(); - // calculate and set the likely next position - if (reuseSpace) { - int predictBlocks = c.len; - long predictedNextStart = fileStore.allocate( - predictBlocks * BLOCK_SIZE); - fileStore.free(predictedNextStart, predictBlocks * BLOCK_SIZE); - c.next = predictedNextStart / BLOCK_SIZE; - } else { - // just after this chunk - c.next = 0; + saveChunkLock.lock(); + try { + Long reservedHigh = reservedHighSupplier.get(); + long filePos = fileStore.allocate(buff.limit(), reservedLow, reservedHigh); + c.len = buff.limit() / BLOCK_SIZE; + c.block = filePos / BLOCK_SIZE; + assert validateFileLength(c.asString()); + // calculate and set the likely next position + if (reservedLow > 0 || reservedHigh == reservedLow) { + c.next = fileStore.predictAllocation(c.len, 0, 0); + } else { + // just after this chunk + c.next = 0; + } + assert c.pageCountLive == c.pageCount : c; + assert c.occupancy.cardinality() == 0 : c; + + buff.position(0); + assert c.pageCountLive == c.pageCount : c; + assert c.occupancy.cardinality() == 0 : c; + c.writeChunkHeader(buff, headerLength); + + buff.position(buff.limit() - Chunk.FOOTER_LENGTH); + buff.put(c.getFooterBytes()); + } finally { + saveChunkLock.unlock(); } - buff.position(0); - c.writeChunkHeader(buff, headerLength); - revertTemp(storeVersion); + } + + private void storeBuffer(Chunk c, WriteBuffer buff, ArrayList> changed) { + saveChunkLock.lock(); + try { + buff.position(0); + long filePos = c.block * BLOCK_SIZE; + write(filePos, buff.getBuffer()); + releaseWriteBuffer(buff); - buff.position(buff.limit() - Chunk.FOOTER_LENGTH); - buff.put(c.getFooterBytes()); + // end of the used space is not necessarily the end of the file + boolean storeAtEndOfFile = filePos + buff.limit() >= fileStore.size(); + boolean writeStoreHeader = isWriteStoreHeader(c, storeAtEndOfFile); + lastChunk = c; + if (writeStoreHeader) { + writeStoreHeader(); + } + if (!storeAtEndOfFile) { + // may only shrink after the store header was written + shrinkFileIfPossible(1); + } + } catch (MVStoreException e) { + panic(e); + } catch (Throwable e) { + panic(DataUtils.newMVStoreException(DataUtils.ERROR_INTERNAL, "{0}", e.toString(), e)); + } finally { + saveChunkLock.unlock(); + } - buff.position(0); - write(filePos, buff.getBuffer()); - releaseWriteBuffer(buff); + for (Page p : changed) { + p.releaseSavedPages(); + } + } + private boolean isWriteStoreHeader(Chunk c, boolean storeAtEndOfFile) { // whether we need to write the store header boolean writeStoreHeader = false; if (!storeAtEndOfFile) { + Chunk lastChunk = this.lastChunk; if (lastChunk == null) { writeStoreHeader = true; } else if (lastChunk.next != c.block) { // the last prediction did not matched writeStoreHeader = true; } else { - long headerVersion = DataUtils.readHexLong( - storeHeader, "version", 0); + long headerVersion = DataUtils.readHexLong(storeHeader, HDR_VERSION, 0); if (lastChunk.version - headerVersion > 20) { - // we write after at least 20 entries + // we write after at least every 20 versions writeStoreHeader = true; } else { - int chunkId = DataUtils.readHexInt(storeHeader, "chunk", 0); - while (true) { - Chunk old = chunks.get(chunkId); - if (old == null) { - // one of the chunks in between - // was removed - writeStoreHeader = true; - break; - } - if (chunkId == lastChunk.id) { - break; - } - chunkId++; + for (int chunkId = DataUtils.readHexInt(storeHeader, HDR_CHUNK, 0); + !writeStoreHeader && chunkId <= lastChunk.id; ++chunkId) { + // one of the chunks in between + // was removed + writeStoreHeader = !chunks.containsKey(chunkId); } } } } - lastChunk = c; - if (writeStoreHeader) { - writeStoreHeader(); - } - if (!storeAtEndOfFile) { - // may only shrink after the store header was written - shrinkFileIfPossible(1); - } - - for (MVMap m : changed) { - Page p = m.getRoot(); - if (p.getTotalCount() > 0) { - p.writeEnd(); - } - } - metaRoot.writeEnd(); - - // some pages might have been changed in the meantime (in the newest - // version) - unsavedMemory = Math.max(0, unsavedMemory - - currentUnsavedPageCount); - - metaChanged = false; - lastStoredVersion = storeVersion; - - return version; - } - - private synchronized void freeUnusedChunks() { - if (lastChunk == null || !reuseSpace) { - return; - } - Set referenced = collectReferencedChunks(); - ArrayList free = New.arrayList(); - long time = getTime(); - for (Chunk c : chunks.values()) { - if (!referenced.contains(c.id)) { - free.add(c); - } - } - for (Chunk c : free) { - if (canOverwriteChunk(c, time)) { - chunks.remove(c.id); - markMetaChanged(); - meta.remove(Chunk.getMetaKey(c.id)); - long start = c.block * BLOCK_SIZE; - int length = c.len * BLOCK_SIZE; - fileStore.free(start, length); - } else { - if (c.unused == 0) { - c.unused = time; - meta.put(Chunk.getMetaKey(c.id), c.asString()); - markMetaChanged(); - } - } - } - } - - private Set collectReferencedChunks() { - long testVersion = lastChunk.version; - DataUtils.checkArgument(testVersion > 0, "Collect references on version 0"); - long readCount = getFileStore().readCount; - Set referenced = New.hashSet(); - for (Cursor c = meta.cursor("root."); c.hasNext();) { - String key = c.next(); - if (!key.startsWith("root.")) { - break; - } - long pos = DataUtils.parseHexLong(c.getValue()); - if (pos == 0) { - continue; - } - int mapId = DataUtils.parseHexInt(key.substring("root.".length())); - collectReferencedChunks(referenced, mapId, pos, 0); - } - long pos = lastChunk.metaRootPos; - collectReferencedChunks(referenced, 0, pos, 0); - readCount = fileStore.readCount - readCount; - return referenced; - } - - private void collectReferencedChunks(Set targetChunkSet, - int mapId, long pos, int level) { - int c = DataUtils.getPageChunkId(pos); - targetChunkSet.add(c); - if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) { - return; - } - PageChildren refs = readPageChunkReferences(mapId, pos, -1); - if (!refs.chunkList) { - Set target = New.hashSet(); - for (int i = 0; i < refs.children.length; i++) { - long p = refs.children[i]; - collectReferencedChunks(target, mapId, p, level + 1); - } - // we don't need a reference to this chunk - target.remove(c); - long[] children = new long[target.size()]; - int i = 0; - for (Integer p : target) { - children[i++] = DataUtils.getPagePos(p, 0, 0, - DataUtils.PAGE_TYPE_LEAF); - } - refs.children = children; - refs.chunkList = true; - if (cacheChunkRef != null) { - cacheChunkRef.put(refs.pos, refs, refs.getMemory()); - } - } - for (long p : refs.children) { - targetChunkSet.add(DataUtils.getPageChunkId(p)); + if (storeHeader.remove(HDR_CLEAN) != null) { + writeStoreHeader = true; } - } - - private PageChildren readPageChunkReferences(int mapId, long pos, int parentChunk) { - if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) { - return null; - } - PageChildren r; - if (cacheChunkRef != null) { - r = cacheChunkRef.get(pos); - } else { - r = null; - } - if (r == null) { - // if possible, create it from the cached page - if (cache != null) { - Page p = cache.get(pos); - if (p != null) { - r = new PageChildren(p); - } - } - if (r == null) { - // page was not cached: read the data - Chunk c = getChunk(pos); - long filePos = c.block * BLOCK_SIZE; - filePos += DataUtils.getPageOffset(pos); - if (filePos < 0) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "Negative position {0}; p={1}, c={2}", filePos, pos, c.toString()); - } - long maxPos = (c.block + c.len) * BLOCK_SIZE; - r = PageChildren.read(fileStore, pos, mapId, filePos, maxPos); - } - r.removeDuplicateChunkReferences(); - if (cacheChunkRef != null) { - cacheChunkRef.put(pos, r, r.getMemory()); - } - } - if (r.children.length == 0) { - int chunk = DataUtils.getPageChunkId(pos); - if (chunk == parentChunk) { - return null; - } - } - return r; + return writeStoreHeader; } /** @@ -1314,9 +1812,8 @@ private PageChildren readPageChunkReferences(int mapId, long pos, int parentChun * @return the buffer */ private WriteBuffer getWriteBuffer() { - WriteBuffer buff; - if (writeBuffer != null) { - buff = writeBuffer; + WriteBuffer buff = writeBufferPool.poll(); + if (buff != null) { buff.clear(); } else { buff = new WriteBuffer(); @@ -1332,82 +1829,70 @@ private WriteBuffer getWriteBuffer() { */ private void releaseWriteBuffer(WriteBuffer buff) { if (buff.capacity() <= 4 * 1024 * 1024) { - writeBuffer = buff; + writeBufferPool.offer(buff); } } - private boolean canOverwriteChunk(Chunk c, long time) { - if (c.time + retentionTime > time) { - return false; - } - if (c.unused == 0 || c.unused + retentionTime / 2 > time) { - return false; - } - Chunk r = retainChunk; - if (r != null && c.version > r.version) { - return false; - } - return true; + private static boolean canOverwriteChunk(Chunk c, long oldestVersionToKeep) { + return !c.isLive() && c.unusedAtVersion < oldestVersionToKeep; + } + + private boolean isSeasonedChunk(Chunk chunk, long time) { + return retentionTime < 0 || chunk.time + retentionTime <= time; + } + + private long getTimeSinceCreation() { + return Math.max(0, getTimeAbsolute() - creationTime); } - private long getTime() { - return System.currentTimeMillis() - creationTime; + private long getTimeAbsolute() { + long now = System.currentTimeMillis(); + if (lastTimeAbsolute != 0 && now < lastTimeAbsolute) { + // time seems to have run backwards - this can happen + // when the system time is adjusted, for example + // on a leap second + now = lastTimeAbsolute; + } else { + lastTimeAbsolute = now; + } + return now; } /** * Apply the freed space to the chunk metadata. The metadata is updated, but * completely free chunks are not removed from the set of chunks, and the - * disk space is not yet marked as free. - * - * @param storeVersion apply up to the given version + * disk space is not yet marked as free. They are queued instead and wait until + * their usage is over. */ - private void applyFreedSpace(long storeVersion) { - while (true) { - ArrayList modified = New.arrayList(); - Iterator>> it; - it = freedPageSpace.entrySet().iterator(); - while (it.hasNext()) { - Entry> e = it.next(); - long v = e.getKey(); - if (v > storeVersion) { - continue; - } - HashMap freed = e.getValue(); - for (Chunk f : freed.values()) { - Chunk c = chunks.get(f.id); - if (c == null) { - // already removed - continue; - } - // no need to synchronize, as old entries - // are not concurrently modified - c.maxLenLive += f.maxLenLive; - c.pageCountLive += f.pageCountLive; - if (c.pageCountLive < 0 && c.pageCountLive > -MARKED_FREE) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_INTERNAL, - "Corrupt page count {0}", c.pageCountLive); - } - if (c.maxLenLive < 0 && c.maxLenLive > -MARKED_FREE) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_INTERNAL, - "Corrupt max length {0}", c.maxLenLive); - } - if (c.pageCountLive <= 0 && c.maxLenLive > 0 || - c.maxLenLive <= 0 && c.pageCountLive > 0) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_INTERNAL, - "Corrupt max length {0}", c.maxLenLive); + private void acceptChunkOccupancyChanges(long time, long version) { + assert serializationLock.isHeldByCurrentThread(); + if (lastChunk != null) { + Set modifiedChunks = new HashSet<>(); + while (true) { + RemovedPageInfo rpi; + while ((rpi = removedPages.peek()) != null && rpi.version < version) { + rpi = removedPages.poll(); // could be different from the peeked one + assert rpi != null; // since nobody else retrieves from queue + assert rpi.version < version : rpi + " < " + version; + int chunkId = rpi.getPageChunkId(); + Chunk chunk = chunks.get(chunkId); + assert !isOpen() || chunk != null : chunkId; + if (chunk != null) { + modifiedChunks.add(chunk); + if (chunk.accountForRemovedPage(rpi.getPageNo(), rpi.getPageLength(), + rpi.isPinned(), time, rpi.version)) { + deadChunks.offer(chunk); + } } - modified.add(c); } - it.remove(); - } - for (Chunk c : modified) { - meta.put(Chunk.getMetaKey(c.id), c.asString()); - } - if (modified.size() == 0) { - break; + if (modifiedChunks.isEmpty()) { + return; + } + for (Chunk chunk : modifiedChunks) { + int chunkId = chunk.id; + layout.put(Chunk.getMetaKey(chunkId), chunk.asString()); + } + modifiedChunks.clear(); } } } @@ -1419,6 +1904,10 @@ private void applyFreedSpace(long storeVersion) { * @param minPercent the minimum percentage to save */ private void shrinkFileIfPossible(int minPercent) { + assert saveChunkLock.isHeldByCurrentThread(); + if (fileStore.isReadOnly()) { + return; + } long end = getFileLengthInUse(); long fileSize = fileStore.size(); if (end >= fileSize) { @@ -1431,23 +1920,44 @@ private void shrinkFileIfPossible(int minPercent) { if (savedPercent < minPercent) { return; } + if (isOpenOrStopping()) { + sync(); + } fileStore.truncate(end); } /** - * Get the position of the last used byte. + * Get the position right after the last used byte. * * @return the position */ private long getFileLengthInUse() { - long size = 2 * BLOCK_SIZE; + assert saveChunkLock.isHeldByCurrentThread(); + long result = fileStore.getFileLengthInUse(); + assert result == measureFileLengthInUse() : result + " != " + measureFileLengthInUse(); + return result; + } + + /** + * Get the index of the first block after last occupied one. + * It marks the beginning of the last (infinite) free space. + * + * @return block index + */ + private long getAfterLastBlock() { + assert saveChunkLock.isHeldByCurrentThread(); + return fileStore.getAfterLastBlock(); + } + + private long measureFileLengthInUse() { + assert saveChunkLock.isHeldByCurrentThread(); + long size = 2; for (Chunk c : chunks.values()) { - if (c.len != Integer.MAX_VALUE) { - long x = (c.block + c.len) * BLOCK_SIZE; - size = Math.max(size, x); + if (c.isSaved()) { + size = Math.max(size, c.block + c.len); } } - return size; + return size * BLOCK_SIZE; } /** @@ -1456,19 +1966,18 @@ private long getFileLengthInUse() { * @return if there are any changes */ public boolean hasUnsavedChanges() { - checkOpen(); if (metaChanged) { return true; } + long lastStoredVersion = currentVersion - 1; for (MVMap m : maps.values()) { if (!m.isClosed()) { - long v = m.getVersion(); - if (v >= 0 && v > lastStoredVersion) { + if(m.hasChangesSince(lastStoredVersion)) { return true; } } } - return false; + return layout.hasChangesSince(lastStoredVersion) && lastStoredVersion > INITIAL_VERSION; } private Chunk readChunkHeader(long block) { @@ -1477,45 +1986,25 @@ private Chunk readChunkHeader(long block) { return Chunk.readChunkHeader(buff, p); } - /** - * Compact the store by moving all live pages to new chunks. - * - * @return if anything was written - */ - public synchronized boolean compactRewriteFully() { - checkOpen(); - if (lastChunk == null) { - // nothing to do - return false; - } - for (MVMap m : maps.values()) { - @SuppressWarnings("unchecked") - MVMap map = (MVMap) m; - Cursor cursor = map.cursor(null); - Page lastPage = null; - while (cursor.hasNext()) { - cursor.next(); - Page p = cursor.getPage(); - if (p == lastPage) { - continue; - } - Object k = p.getKey(0); - Object v = p.getValue(0); - map.put(k, v); - lastPage = p; - } + private Chunk readChunkHeaderOptionally(long block) { + try { + Chunk chunk = readChunkHeader(block); + return chunk.block != block ? null : chunk; + } catch (Exception ignore) { + return null; } - commitAndSave(); - return true; + } + + private Chunk readChunkHeaderOptionally(long block, int expectedId) { + Chunk chunk = readChunkHeaderOptionally(block); + return chunk == null || chunk.id != expectedId ? null : chunk; } /** * Compact by moving all chunks next to each other. - * - * @return if anything was written */ - public synchronized boolean compactMoveChunks() { - return compactMoveChunks(100, Long.MAX_VALUE); + public void compactMoveChunks() { + compactMoveChunks(100, Long.MAX_VALUE); } /** @@ -1527,136 +2016,241 @@ public synchronized boolean compactMoveChunks() { * @param targetFillRate do nothing if the file store fill rate is higher * than this * @param moveSize the number of bytes to move - * @return if anything was written + * @return true if any chunks were moved as result of this operation, false otherwise */ - public synchronized boolean compactMoveChunks(int targetFillRate, long moveSize) { - checkOpen(); - if (lastChunk == null || !reuseSpace) { - // nothing to do - return false; - } - int oldRetentionTime = retentionTime; - boolean oldReuse = reuseSpace; + boolean compactMoveChunks(int targetFillRate, long moveSize) { + boolean res = false; + storeLock.lock(); try { - retentionTime = 0; - freeUnusedChunks(); - if (fileStore.getFillRate() > targetFillRate) { - return false; - } - long start = fileStore.getFirstFree() / BLOCK_SIZE; - ArrayList move = compactGetMoveBlocks(start, moveSize); - compactMoveChunks(move); - freeUnusedChunks(); - storeNow(); + checkOpen(); + // because serializationExecutor is a single-threaded one and + // all task submissions to it are done under storeLock, + // it is guaranteed, that upon this dummy task completion + // there are no pending / in-progress task here + submitOrRun(serializationExecutor, () -> {}, true); + serializationLock.lock(); + try { + // similarly, all task submissions to bufferSaveExecutor + // are done under serializationLock, and upon this dummy task completion + // it will be no pending / in-progress task here + submitOrRun(bufferSaveExecutor, () -> {}, true); + saveChunkLock.lock(); + try { + if (lastChunk != null && reuseSpace && getFillRate() <= targetFillRate) { + res = compactMoveChunks(moveSize); + } + } finally { + saveChunkLock.unlock(); + } + } finally { + serializationLock.unlock(); + } + } catch (MVStoreException e) { + panic(e); + } catch (Throwable e) { + panic(DataUtils.newMVStoreException( + DataUtils.ERROR_INTERNAL, "{0}", e.toString(), e)); } finally { - reuseSpace = oldReuse; - retentionTime = oldRetentionTime; + unlockAndCheckPanicCondition(); + } + return res; + } + + private boolean compactMoveChunks(long moveSize) { + assert storeLock.isHeldByCurrentThread(); + dropUnusedChunks(); + long start = fileStore.getFirstFree() / BLOCK_SIZE; + Iterable chunksToMove = findChunksToMove(start, moveSize); + if (chunksToMove == null) { + return false; } + compactMoveChunks(chunksToMove); return true; } - private ArrayList compactGetMoveBlocks(long startBlock, long moveSize) { - ArrayList move = New.arrayList(); - for (Chunk c : chunks.values()) { - if (c.block > startBlock) { - move.add(c); - } - } - // sort by block - Collections.sort(move, new Comparator() { - @Override - public int compare(Chunk o1, Chunk o2) { - return Long.signum(o1.block - o2.block); + private Iterable findChunksToMove(long startBlock, long moveSize) { + long maxBlocksToMove = moveSize / BLOCK_SIZE; + Iterable result = null; + if (maxBlocksToMove > 0) { + PriorityQueue queue = new PriorityQueue<>(chunks.size() / 2 + 1, + (o1, o2) -> { + // instead of selection just closest to beginning of the file, + // pick smaller chunk(s) which sit in between bigger holes + int res = Integer.compare(o2.collectPriority, o1.collectPriority); + if (res != 0) { + return res; + } + return Long.signum(o2.block - o1.block); + }); + long size = 0; + for (Chunk chunk : chunks.values()) { + if (chunk.isSaved() && chunk.block > startBlock) { + chunk.collectPriority = getMovePriority(chunk); + queue.offer(chunk); + size += chunk.len; + while (size > maxBlocksToMove) { + Chunk removed = queue.poll(); + if (removed == null) { + break; + } + size -= removed.len; + } + } } - }); - // find which is the last block to keep - int count = 0; - long size = 0; - for (Chunk c : move) { - long chunkSize = c.len * (long) BLOCK_SIZE; - if (size + chunkSize > moveSize) { - break; + if (!queue.isEmpty()) { + ArrayList list = new ArrayList<>(queue); + list.sort(Chunk.PositionComparator.INSTANCE); + result = list; } - size += chunkSize; - count++; - } - // move the first block (so the first gap is moved), - // and the one at the end (so the file shrinks) - while (move.size() > count && move.size() > 1) { - move.remove(1); } + return result; + } - return move; + private int getMovePriority(Chunk chunk) { + return fileStore.getMovePriority((int)chunk.block); } - private void compactMoveChunks(ArrayList move) { - for (Chunk c : move) { - WriteBuffer buff = getWriteBuffer(); - long start = c.block * BLOCK_SIZE; - int length = c.len * BLOCK_SIZE; - buff.limit(length); - ByteBuffer readBuff = fileStore.readFully(start, length); - Chunk.readChunkHeader(readBuff, start); - int chunkHeaderLen = readBuff.position(); - buff.position(chunkHeaderLen); - buff.put(readBuff); - long end = getFileLengthInUse(); - fileStore.markUsed(end, length); - fileStore.free(start, length); - c.block = end / BLOCK_SIZE; - c.next = 0; - buff.position(0); - c.writeChunkHeader(buff, chunkHeaderLen); - buff.position(length - Chunk.FOOTER_LENGTH); - buff.put(lastChunk.getFooterBytes()); - buff.position(0); - write(end, buff.getBuffer()); - releaseWriteBuffer(buff); - markMetaChanged(); - meta.put(Chunk.getMetaKey(c.id), c.asString()); + private void compactMoveChunks(Iterable move) { + assert storeLock.isHeldByCurrentThread(); + assert serializationLock.isHeldByCurrentThread(); + assert saveChunkLock.isHeldByCurrentThread(); + if (move != null) { + // this will ensure better recognition of the last chunk + // in case of power failure, since we are going to move older chunks + // to the end of the file + writeStoreHeader(); + sync(); + + Iterator iterator = move.iterator(); + assert iterator.hasNext(); + long leftmostBlock = iterator.next().block; + long originalBlockCount = getAfterLastBlock(); + // we need to ensure that chunks moved within the following loop + // do not overlap with space just released by chunks moved before them, + // hence the need to reserve this area [leftmostBlock, originalBlockCount) + for (Chunk chunk : move) { + moveChunk(chunk, leftmostBlock, originalBlockCount); + } + // update the metadata (hopefully within the file) + store(leftmostBlock, originalBlockCount); + sync(); + + Chunk chunkToMove = lastChunk; + assert chunkToMove != null; + long postEvacuationBlockCount = getAfterLastBlock(); + + boolean chunkToMoveIsAlreadyInside = chunkToMove.block < leftmostBlock; + boolean movedToEOF = !chunkToMoveIsAlreadyInside; + // move all chunks, which previously did not fit before reserved area + // now we can re-use previously reserved area [leftmostBlock, originalBlockCount), + // but need to reserve [originalBlockCount, postEvacuationBlockCount) + for (Chunk c : move) { + if (c.block >= originalBlockCount && + moveChunk(c, originalBlockCount, postEvacuationBlockCount)) { + assert c.block < originalBlockCount; + movedToEOF = true; + } + } + assert postEvacuationBlockCount >= getAfterLastBlock(); + + if (movedToEOF) { + boolean moved = moveChunkInside(chunkToMove, originalBlockCount); + + // store a new chunk with updated metadata (hopefully within a file) + store(originalBlockCount, postEvacuationBlockCount); + sync(); + // if chunkToMove did not fit within originalBlockCount (move is + // false), and since now previously reserved area + // [originalBlockCount, postEvacuationBlockCount) also can be + // used, lets try to move that chunk into this area, closer to + // the beginning of the file + long lastBoundary = moved || chunkToMoveIsAlreadyInside ? + postEvacuationBlockCount : chunkToMove.block; + moved = !moved && moveChunkInside(chunkToMove, lastBoundary); + if (moveChunkInside(lastChunk, lastBoundary) || moved) { + store(lastBoundary, -1); + } + } + + shrinkFileIfPossible(0); + sync(); } + } - // update the metadata (store at the end of the file) - reuseSpace = false; - commitAndSave(); + private void store(long reservedLow, long reservedHigh) { + saveChunkLock.unlock(); + try { + serializationLock.unlock(); + try { + storeNow(true, reservedLow, () -> reservedHigh); + } finally { + serializationLock.lock(); + } + } finally { + saveChunkLock.lock(); + } + } - sync(); + private boolean moveChunkInside(Chunk chunkToMove, long boundary) { + boolean res = chunkToMove.block >= boundary && + fileStore.predictAllocation(chunkToMove.len, boundary, -1) < boundary && + moveChunk(chunkToMove, boundary, -1); + assert !res || chunkToMove.block + chunkToMove.len <= boundary; + return res; + } - // now re-use the empty space - reuseSpace = true; - for (Chunk c : move) { - if (!chunks.containsKey(c.id)) { - // already removed during the - // previous store operation - continue; - } - WriteBuffer buff = getWriteBuffer(); - long start = c.block * BLOCK_SIZE; - int length = c.len * BLOCK_SIZE; + /** + * Move specified chunk into free area of the file. "Reserved" area + * specifies file interval to be avoided, when un-allocated space will be + * chosen for a new chunk's location. + * + * @param chunk to move + * @param reservedAreaLow low boundary of reserved area, inclusive + * @param reservedAreaHigh high boundary of reserved area, exclusive + * @return true if block was moved, false otherwise + */ + private boolean moveChunk(Chunk chunk, long reservedAreaLow, long reservedAreaHigh) { + // ignore if already removed during the previous store operations + // those are possible either as explicit commit calls + // or from meta map updates at the end of this method + if (!chunks.containsKey(chunk.id)) { + return false; + } + long start = chunk.block * BLOCK_SIZE; + int length = chunk.len * BLOCK_SIZE; + long block; + WriteBuffer buff = getWriteBuffer(); + try { buff.limit(length); ByteBuffer readBuff = fileStore.readFully(start, length); - Chunk.readChunkHeader(readBuff, 0); + Chunk chunkFromFile = Chunk.readChunkHeader(readBuff, start); int chunkHeaderLen = readBuff.position(); buff.position(chunkHeaderLen); buff.put(readBuff); - long pos = fileStore.allocate(length); - fileStore.free(start, length); + long pos = fileStore.allocate(length, reservedAreaLow, reservedAreaHigh); + block = pos / BLOCK_SIZE; + // in the absence of a reserved area, + // block should always move closer to the beginning of the file + assert reservedAreaHigh > 0 || block <= chunk.block : block + " " + chunk; buff.position(0); - c.block = pos / BLOCK_SIZE; - c.writeChunkHeader(buff, chunkHeaderLen); + // can not set chunk's new block/len until it's fully written at new location, + // because concurrent reader can pick it up prematurely, + // also occupancy accounting fields should not leak into header + chunkFromFile.block = block; + chunkFromFile.next = 0; + chunkFromFile.writeChunkHeader(buff, chunkHeaderLen); buff.position(length - Chunk.FOOTER_LENGTH); - buff.put(lastChunk.getFooterBytes()); + buff.put(chunkFromFile.getFooterBytes()); buff.position(0); write(pos, buff.getBuffer()); + } finally { releaseWriteBuffer(buff); - markMetaChanged(); - meta.put(Chunk.getMetaKey(c.id), c.asString()); } - - // update the metadata (within the file) - commitAndSave(); - sync(); - shrinkFileIfPossible(0); + fileStore.free(start, length); + chunk.block = block; + chunk.next = 0; + layout.put(Chunk.getMetaKey(chunk.id), chunk.asString()); + return true; } /** @@ -1664,7 +2258,31 @@ private void compactMoveChunks(ArrayList move) { * implementation calls FileChannel.force(true). */ public void sync() { - fileStore.sync(); + checkOpen(); + FileStore f = fileStore; + if (f != null) { + f.sync(); + } + } + + /** + * Compact store file, that is, compact blocks that have a low + * fill rate, and move chunks next to each other. This will typically + * shrink the file. Changes are flushed to the file, and old + * chunks are overwritten. + * + * @param maxCompactTime the maximum time in milliseconds to compact + */ + public void compactFile(int maxCompactTime) { + setRetentionTime(0); + long stopAt = System.nanoTime() + maxCompactTime * 1_000_000L; + while (compact(95, 16 * 1024 * 1024)) { + sync(); + compactMoveChunks(95, 16 * 1024 * 1024); + if (System.nanoTime() - stopAt > 0L) { + break; + } + } } /** @@ -1686,233 +2304,344 @@ public void sync() { * @return if a chunk was re-written */ public boolean compact(int targetFillRate, int write) { - if (!reuseSpace) { - return false; - } - synchronized (compactSync) { + if (reuseSpace && lastChunk != null) { checkOpen(); - ArrayList old; - synchronized (this) { - old = compactGetOldChunks(targetFillRate, write); - } - if (old == null || old.size() == 0) { - return false; + if (targetFillRate > 0 && getChunksFillRate() < targetFillRate) { + // We can't wait forever for the lock here, + // because if called from the background thread, + // it might go into deadlock with concurrent database closure + // and attempt to stop this thread. + try { + if (storeLock.tryLock(10, TimeUnit.MILLISECONDS)) { + try { + return rewriteChunks(write, 100); + } finally { + storeLock.unlock(); + } + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } } - compactRewrite(old); - return true; } + return false; } - private ArrayList compactGetOldChunks(int targetFillRate, int write) { - if (lastChunk == null) { - // nothing to do - return null; + private boolean rewriteChunks(int writeLimit, int targetFillRate) { + serializationLock.lock(); + try { + TxCounter txCounter = registerVersionUsage(); + try { + acceptChunkOccupancyChanges(getTimeSinceCreation(), currentVersion); + Iterable old = findOldChunks(writeLimit, targetFillRate); + if (old != null) { + HashSet idSet = createIdSet(old); + return !idSet.isEmpty() && compactRewrite(idSet) > 0; + } + } finally { + deregisterVersionUsage(txCounter); + } + return false; + } finally { + serializationLock.unlock(); } + } - // calculate the fill rate - long maxLengthSum = 0; - long maxLengthLiveSum = 0; + /** + * Get the current fill rate (percentage of used space in the file). Unlike + * the fill rate of the store, here we only account for chunk data; the fill + * rate here is how much of the chunk data is live (still referenced). Young + * chunks are considered live. + * + * @return the fill rate, in percent (100 is completely full) + */ + public int getChunksFillRate() { + return getChunksFillRate(true); + } - long time = getTime(); + public int getRewritableChunksFillRate() { + return getChunksFillRate(false); + } + private int getChunksFillRate(boolean all) { + long maxLengthSum = 1; + long maxLengthLiveSum = 1; + long time = getTimeSinceCreation(); for (Chunk c : chunks.values()) { - // ignore young chunks, because we don't optimize those - if (c.time + retentionTime > time) { - continue; + if (all || isRewritable(c, time)) { + assert c.maxLen >= 0; + maxLengthSum += c.maxLen; + maxLengthLiveSum += c.maxLenLive; } - maxLengthSum += c.maxLen; - maxLengthLiveSum += c.maxLenLive; - } - if (maxLengthLiveSum < 0) { - // no old data - return null; } // the fill rate of all chunks combined - if (maxLengthSum <= 0) { - // avoid division by 0 - maxLengthSum = 1; - } int fillRate = (int) (100 * maxLengthLiveSum / maxLengthSum); - if (fillRate >= targetFillRate) { - return null; + return fillRate; + } + + /** + * Get data chunks count. + * + * @return number of existing chunks in store. + */ + public int getChunkCount() { + return chunks.size(); + } + + /** + * Get data pages count. + * + * @return number of existing pages in store. + */ + public int getPageCount() { + int count = 0; + for (Chunk chunk : chunks.values()) { + count += chunk.pageCount; } + return count; + } - // the 'old' list contains the chunks we want to free up - ArrayList old = New.arrayList(); - Chunk last = chunks.get(lastChunk.id); - for (Chunk c : chunks.values()) { - // only look at chunk older than the retention time - // (it's possible to compact chunks earlier, but right - // now we don't do that) - if (c.time + retentionTime > time) { - continue; + /** + * Get live data pages count. + * + * @return number of existing live pages in store. + */ + public int getLivePageCount() { + int count = 0; + for (Chunk chunk : chunks.values()) { + count += chunk.pageCountLive; + } + return count; + } + + private int getProjectedFillRate(int thresholdChunkFillRate) { + saveChunkLock.lock(); + try { + int vacatedBlocks = 0; + long maxLengthSum = 1; + long maxLengthLiveSum = 1; + long time = getTimeSinceCreation(); + for (Chunk c : chunks.values()) { + assert c.maxLen >= 0; + if (isRewritable(c, time) && c.getFillRate() <= thresholdChunkFillRate) { + assert c.maxLen >= c.maxLenLive; + vacatedBlocks += c.len; + maxLengthSum += c.maxLen; + maxLengthLiveSum += c.maxLenLive; + } } - long age = last.version - c.version + 1; - c.collectPriority = (int) (c.getFillRate() * 1000 / age); - old.add(c); + int additionalBlocks = (int) (vacatedBlocks * maxLengthLiveSum / maxLengthSum); + int fillRate = fileStore.getProjectedFillRate(vacatedBlocks - additionalBlocks); + return fillRate; + } finally { + saveChunkLock.unlock(); } - if (old.size() == 0) { - return null; + } + + public int getFillRate() { + saveChunkLock.lock(); + try { + return fileStore.getFillRate(); + } finally { + saveChunkLock.unlock(); } + } - // sort the list, so the first entry should be collected first - Collections.sort(old, new Comparator() { - @Override - public int compare(Chunk o1, Chunk o2) { - int comp = new Integer(o1.collectPriority). - compareTo(o2.collectPriority); - if (comp == 0) { - comp = new Long(o1.maxLenLive). - compareTo(o2.maxLenLive); - } - return comp; - } - }); - // find out up to were in the old list we need to move - long written = 0; - int chunkCount = 0; - Chunk move = null; - for (Chunk c : old) { - if (move != null) { - if (c.collectPriority > 0 && written > write) { - break; + private Iterable findOldChunks(int writeLimit, int targetFillRate) { + assert lastChunk != null; + long time = getTimeSinceCreation(); + + // the queue will contain chunks we want to free up + // the smaller the collectionPriority, the more desirable this chunk's re-write is + // queue will be ordered in descending order of collectionPriority values, + // so most desirable chunks will stay at the tail + PriorityQueue queue = new PriorityQueue<>(this.chunks.size() / 4 + 1, + (o1, o2) -> { + int comp = Integer.compare(o2.collectPriority, o1.collectPriority); + if (comp == 0) { + comp = Long.compare(o2.maxLenLive, o1.maxLenLive); + } + return comp; + }); + + long totalSize = 0; + long latestVersion = lastChunk.version + 1; + for (Chunk chunk : chunks.values()) { + // only look at chunk older than the retention time + // (it's possible to compact chunks earlier, but right + // now we don't do that) + int fillRate = chunk.getFillRate(); + if (isRewritable(chunk, time) && fillRate <= targetFillRate) { + long age = Math.max(1, latestVersion - chunk.version); + chunk.collectPriority = (int) (fillRate * 1000 / age); + totalSize += chunk.maxLenLive; + queue.offer(chunk); + while (totalSize > writeLimit) { + Chunk removed = queue.poll(); + if (removed == null) { + break; + } + totalSize -= removed.maxLenLive; } } - written += c.maxLenLive; - chunkCount++; - move = c; - } - if (chunkCount < 1) { - return null; } - // remove the chunks we want to keep from this list - boolean remove = false; - for (Iterator it = old.iterator(); it.hasNext();) { - Chunk c = it.next(); - if (move == c) { - remove = true; - } else if (remove) { - it.remove(); + + return queue.isEmpty() ? null : queue; + } + + private boolean isRewritable(Chunk chunk, long time) { + return chunk.isRewritable() && isSeasonedChunk(chunk, time); + } + + private int compactRewrite(Set set) { + assert storeLock.isHeldByCurrentThread(); + assert currentStoreVersion < 0; // we should be able to do tryCommit() -> store() + acceptChunkOccupancyChanges(getTimeSinceCreation(), currentVersion); + int rewrittenPageCount = rewriteChunks(set, false); + acceptChunkOccupancyChanges(getTimeSinceCreation(), currentVersion); + rewrittenPageCount += rewriteChunks(set, true); + return rewrittenPageCount; + } + + private int rewriteChunks(Set set, boolean secondPass) { + int rewrittenPageCount = 0; + for (int chunkId : set) { + Chunk chunk = chunks.get(chunkId); + long[] toc = getToC(chunk); + if (toc != null) { + for (int pageNo = 0; (pageNo = chunk.occupancy.nextClearBit(pageNo)) < chunk.pageCount; ++pageNo) { + long tocElement = toc[pageNo]; + int mapId = DataUtils.getPageMapId(tocElement); + MVMap map = mapId == layout.getId() ? layout : mapId == meta.getId() ? meta : getMap(mapId); + if (map != null && !map.isClosed()) { + assert !map.isSingleWriter(); + if (secondPass || DataUtils.isLeafPosition(tocElement)) { + long pagePos = DataUtils.getPagePos(chunkId, tocElement); + serializationLock.unlock(); + try { + if (map.rewritePage(pagePos)) { + ++rewrittenPageCount; + if (map == meta) { + markMetaChanged(); + } + } + } finally { + serializationLock.lock(); + } + } + } + } } } - return old; + return rewrittenPageCount; } - private void compactRewrite(ArrayList old) { - HashSet set = New.hashSet(); - for (Chunk c : old) { + private static HashSet createIdSet(Iterable toCompact) { + HashSet set = new HashSet<>(); + for (Chunk c : toCompact) { set.add(c.id); } - for (MVMap m : maps.values()) { - @SuppressWarnings("unchecked") - MVMap map = (MVMap) m; - if (!map.rewrite(set)) { - return; - } - } - if (!meta.rewrite(set)) { - return; - } - freeUnusedChunks(); - commitAndSave(); + return set; } /** * Read a page. * + * @param key type + * @param value type + * * @param map the map * @param pos the page position * @return the page */ - Page readPage(MVMap map, long pos) { - if (pos == 0) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, "Position 0"); - } - Page p = cache == null ? null : cache.get(pos); - if (p == null) { - Chunk c = getChunk(pos); - long filePos = c.block * BLOCK_SIZE; - filePos += DataUtils.getPageOffset(pos); - if (filePos < 0) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "Negative position {0}", filePos); + Page readPage(MVMap map, long pos) { + try { + if (!DataUtils.isPageSaved(pos)) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, "Position 0"); + } + Page p = readPageFromCache(pos); + if (p == null) { + Chunk chunk = getChunk(pos); + int pageOffset = DataUtils.getPageOffset(pos); + try { + ByteBuffer buff = chunk.readBufferForPage(fileStore, pageOffset, pos); + p = Page.read(buff, pos, map); + } catch (MVStoreException e) { + throw e; + } catch (Exception e) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "Unable to read the page at position {0}, chunk {1}, offset {2}", + pos, chunk.id, pageOffset, e); + } + cachePage(p); + } + return p; + } catch (MVStoreException e) { + if (recoveryMode) { + return map.createEmptyLeaf(); } - long maxPos = (c.block + c.len) * BLOCK_SIZE; - p = Page.read(fileStore, pos, map, filePos, maxPos); - cachePage(pos, p, p.getMemory()); + throw e; } - return p; } - /** - * Remove a page. - * - * @param map the map the page belongs to - * @param pos the position of the page - * @param memory the memory usage - */ - void removePage(MVMap map, long pos, int memory) { - // we need to keep temporary pages, - // to support reading old versions and rollback - if (pos == 0) { - // the page was not yet stored: - // just using "unsavedMemory -= memory" could result in negative - // values, because in some cases a page is allocated, but never - // stored, so we need to use max - unsavedMemory = Math.max(0, unsavedMemory - memory); - return; + private long[] getToC(Chunk chunk) { + if (chunk.tocPos == 0) { + // legacy chunk without table of content + return null; } - - // This could result in a cache miss if the operation is rolled back, - // but we don't optimize for rollback. - // We could also keep the page in the cache, as somebody - // could still read it (reading the old version). - if (cache != null) { - if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) { - // keep nodes in the cache, because they are still used for - // garbage collection - cache.remove(pos); - } + long[] toc = chunksToC.get(chunk.id); + if (toc == null) { + toc = chunk.readToC(fileStore); + chunksToC.put(chunk.id, toc, toc.length * 8); } + assert toc.length == chunk.pageCount : toc.length + " != " + chunk.pageCount; + return toc; + } - Chunk c = getChunk(pos); - long version = currentVersion; - if (map == meta && currentStoreVersion >= 0) { - if (Thread.currentThread() == currentStoreThread) { - // if the meta map is modified while storing, - // then this freed page needs to be registered - // with the stored chunk, so that the old chunk - // can be re-used - version = currentStoreVersion; - } - } - registerFreePage(version, c.id, - DataUtils.getPageMaxLength(pos), 1); + @SuppressWarnings("unchecked") + private Page readPageFromCache(long pos) { + return cache == null ? null : (Page)cache.get(pos); } - private void registerFreePage(long version, int chunkId, - long maxLengthLive, int pageCount) { - HashMap freed = freedPageSpace.get(version); - if (freed == null) { - freed = New.hashMap(); - HashMap f2 = freedPageSpace.putIfAbsent(version, - freed); - if (f2 != null) { - freed = f2; - } - } - // synchronize, because pages could be freed concurrently - synchronized (freed) { - Chunk f = freed.get(chunkId); - if (f == null) { - f = new Chunk(chunkId); - freed.put(chunkId, f); + /** + * Remove a page. + * @param pos the position of the page + * @param version at which page was removed + * @param pinned whether page is considered pinned + * @param pageNo sequential page number within chunk + */ + void accountForRemovedPage(long pos, long version, boolean pinned, int pageNo) { + assert DataUtils.isPageSaved(pos); + if (pageNo < 0) { + pageNo = calculatePageNo(pos); + } + RemovedPageInfo rpi = new RemovedPageInfo(pos, pinned, version, pageNo); + removedPages.add(rpi); + } + + private int calculatePageNo(long pos) { + int pageNo = -1; + Chunk chunk = getChunk(pos); + long[] toC = getToC(chunk); + if (toC != null) { + int offset = DataUtils.getPageOffset(pos); + int low = 0; + int high = toC.length - 1; + while (low <= high) { + int mid = (low + high) >>> 1; + long midVal = DataUtils.getPageOffset(toC[mid]); + if (midVal < offset) { + low = mid + 1; + } else if (midVal > offset) { + high = mid - 1; + } else { + pageNo = mid; + break; + } } - f.maxLenLive -= maxLengthLive; - f.pageCountLive -= pageCount; } + return pageNo; } Compressor getCompressorFast() { @@ -1937,6 +2666,14 @@ public int getPageSplitSize() { return pageSplitSize; } + public int getKeysPerPage() { + return keysPerPage; + } + + public long getMaxPageSize() { + return cache == null ? Long.MAX_VALUE : cache.getMaxItemSize() >> 4; + } + public boolean getReuseSpace() { return reuseSpace; } @@ -2007,23 +2744,43 @@ public long getVersionsToKeep() { } /** - * Get the oldest version to retain in memory, which is the manually set - * retain version, or the current store version (whatever is older). + * Get the oldest version to retain. + * We keep at least number of previous versions specified by "versionsToKeep" + * configuration parameter (default 5). + * Previously it was used only in case of non-persistent MVStore. + * Now it's honored in all cases (although H2 always sets it to zero). + * Oldest version determination also takes into account calls (de)registerVersionUsage(), + * an will not release the version, while version is still in use. * * @return the version */ long getOldestVersionToKeep() { - long v = currentVersion; - if (fileStore == null) { - return v - versionsToKeep; - } - long storeVersion = currentStoreVersion; - if (storeVersion > -1) { - v = Math.min(v, storeVersion); + long v = oldestVersionToKeep.get(); + v = Math.max(v - versionsToKeep, INITIAL_VERSION); + if (fileStore != null) { + long storeVersion = lastChunkVersion() - 1; + if (storeVersion != INITIAL_VERSION && storeVersion < v) { + v = storeVersion; + } } return v; } + private void setOldestVersionToKeep(long oldestVersionToKeep) { + boolean success; + do { + long current = this.oldestVersionToKeep.get(); + // Oldest version may only advance, never goes back + success = oldestVersionToKeep <= current || + this.oldestVersionToKeep.compareAndSet(current, oldestVersionToKeep); + } while (!success); + } + + private long lastChunkVersion() { + Chunk chunk = lastChunk; + return chunk == null ? INITIAL_VERSION + 1 : chunk.version; + } + /** * Check whether all data can be read from this version. This requires that * all chunks referenced by this version are still available (not @@ -2036,7 +2793,7 @@ private boolean isKnownVersion(long version) { if (version > currentVersion || version < 0) { return false; } - if (version == currentVersion || chunks.size() == 0) { + if (version == currentVersion || chunks.isEmpty()) { // no stored data return true; } @@ -2047,29 +2804,40 @@ private boolean isKnownVersion(long version) { } // also, all chunks referenced by this version // need to be available in the file - MVMap oldMeta = getMetaMap(version); - if (oldMeta == null) { - return false; - } - for (Iterator it = oldMeta.keyIterator("chunk."); - it.hasNext();) { - String chunkKey = it.next(); - if (!chunkKey.startsWith("chunk.")) { - break; - } - if (!meta.containsKey(chunkKey)) { - return false; + MVMap oldLayoutMap = getLayoutMap(version); + try { + for (Iterator it = oldLayoutMap.keyIterator(DataUtils.META_CHUNK); it.hasNext();) { + String chunkKey = it.next(); + if (!chunkKey.startsWith(DataUtils.META_CHUNK)) { + break; + } + if (!layout.containsKey(chunkKey)) { + String s = oldLayoutMap.get(chunkKey); + Chunk c2 = Chunk.fromString(s); + Chunk test = readChunkHeaderAndFooter(c2.block, c2.id); + if (test == null) { + return false; + } + } } + } catch (MVStoreException e) { + // the chunk missing where the metadata is stored + return false; } return true; } /** - * Increment the number of unsaved pages. + * Adjust amount of "unsaved memory" meaning amount of RAM occupied by pages + * not saved yet to the file. This is the amount which triggers auto-commit. * - * @param memory the memory usage of the page + * @param memory adjustment */ - void registerUnsavedPage(int memory) { + public void registerUnsavedMemory(int memory) { + // this counter was intentionally left unprotected against race + // condition for performance reasons + // TODO: evaluate performance impact of atomic implementation, + // since updates to unsavedMemory are largely aggregated now unsavedMemory += memory; int newValue = unsavedMemory; if (newValue > autoCommitMemory && autoCommitMemory > 0) { @@ -2077,28 +2845,47 @@ void registerUnsavedPage(int memory) { } } + boolean isSaveNeeded() { + return saveNeeded; + } + /** * This method is called before writing to a map. * * @param map the map */ void beforeWrite(MVMap map) { - if (saveNeeded) { - if (map == meta) { - // to, don't save while the metadata map is locked - // this is to avoid deadlocks that could occur when we - // synchronize on the store and then on the metadata map - // TODO there should be no deadlocks possible - return; - } + if (saveNeeded && fileStore != null && isOpenOrStopping() && + // condition below is to prevent potential deadlock, + // because we should never seek storeLock while holding + // map root lock + (storeLock.isHeldByCurrentThread() || !map.getRoot().isLockedByCurrentThread()) && + // to avoid infinite recursion via store() -> dropUnusedChunks() -> layout.remove() + map != layout) { + saveNeeded = false; // check again, because it could have been written by now - if (unsavedMemory > autoCommitMemory && autoCommitMemory > 0) { - commitAndSave(); + if (autoCommitMemory > 0 && needStore()) { + // if unsaved memory creation rate is to high, + // some back pressure need to be applied + // to slow things down and avoid OOME + if (requireStore() && !map.isSingleWriter()) { + commit(MVStore::requireStore); + } else { + tryCommit(MVStore::needStore); + } } } } + private boolean requireStore() { + return 3 * unsavedMemory > 4 * autoCommitMemory; + } + + private boolean needStore() { + return unsavedMemory > autoCommitMemory; + } + /** * Get the store version. The store version is usually used to upgrade the * structure of the store after upgrading the application. Initially the @@ -2117,10 +2904,15 @@ public int getStoreVersion() { * * @param version the new store version */ - public synchronized void setStoreVersion(int version) { - checkOpen(); - markMetaChanged(); - meta.put("setting.storeVersion", Integer.toHexString(version)); + public void setStoreVersion(int version) { + storeLock.lock(); + try { + checkOpen(); + markMetaChanged(); + meta.put("setting.storeVersion", Integer.toHexString(version)); + } finally { + storeLock.unlock(); + } } /** @@ -2139,122 +2931,110 @@ public void rollback() { * * @param version the version to revert to */ - public synchronized void rollbackTo(long version) { - checkOpen(); - if (version == 0) { - // special case: remove all data - for (MVMap m : maps.values()) { - m.close(); - } - meta.clear(); - chunks.clear(); - if (fileStore != null) { - fileStore.clear(); - } - maps.clear(); - freedPageSpace.clear(); + public void rollbackTo(long version) { + storeLock.lock(); + try { + checkOpen(); currentVersion = version; - setWriteVersion(version); - metaChanged = false; - return; - } - DataUtils.checkArgument( - isKnownVersion(version), - "Unknown version {0}", version); - for (MVMap m : maps.values()) { - m.rollbackTo(version); - } - for (long v = currentVersion; v >= version; v--) { - if (freedPageSpace.size() == 0) { - break; + if (version == 0) { + // special case: remove all data + layout.setInitialRoot(layout.createEmptyLeaf(), INITIAL_VERSION); + meta.setInitialRoot(meta.createEmptyLeaf(), INITIAL_VERSION); + layout.put(META_ID_KEY, Integer.toHexString(meta.getId())); + deadChunks.clear(); + removedPages.clear(); + chunks.clear(); + clearCaches(); + if (fileStore != null) { + saveChunkLock.lock(); + try { + fileStore.clear(); + } finally { + saveChunkLock.unlock(); + } + } + lastChunk = null; + versions.clear(); + setWriteVersion(version); + metaChanged = false; + for (MVMap m : maps.values()) { + m.close(); + } + return; } - freedPageSpace.remove(v); - } - meta.rollbackTo(version); - metaChanged = false; - boolean loadFromFile = false; - // get the largest chunk with a version - // higher or equal the requested version - Chunk removeChunksNewerThan = null; - Chunk c = lastChunk; - while (true) { - if (c == null || c.version < version) { - break; + DataUtils.checkArgument( + isKnownVersion(version), + "Unknown version {0}", version); + + TxCounter txCounter; + while ((txCounter = versions.peekLast()) != null && txCounter.version >= version) { + versions.removeLast(); } - removeChunksNewerThan = c; - c = chunks.get(c.id - 1); - } - Chunk last = lastChunk; - if (removeChunksNewerThan != null && - last.version > removeChunksNewerThan.version) { - revertTemp(version); - loadFromFile = true; - while (true) { - last = lastChunk; - if (last == null) { - break; - } else if (last.version <= removeChunksNewerThan.version) { - break; - } - chunks.remove(lastChunk.id); - long start = last.block * BLOCK_SIZE; - int length = last.len * BLOCK_SIZE; - fileStore.free(start, length); - // need to overwrite the chunk, - // so it can not be used - WriteBuffer buff = getWriteBuffer(); - buff.limit(length); - // buff.clear() does not set the data - Arrays.fill(buff.getBuffer().array(), (byte) 0); - write(start, buff.getBuffer()); - releaseWriteBuffer(buff); - lastChunk = chunks.get(lastChunk.id - 1); + currentTxCounter = new TxCounter(version); + + if (!layout.rollbackRoot(version)) { + MVMap layoutMap = getLayoutMap(version); + layout.setInitialRoot(layoutMap.getRootPage(), version); } - writeStoreHeader(); - readStoreHeader(); - } - for (MVMap m : New.arrayList(maps.values())) { - int id = m.getId(); - if (m.getCreateVersion() >= version) { - m.close(); - maps.remove(id); - } else { - if (loadFromFile) { - m.setRootPos(getRootPos(meta, id), -1); + if (!meta.rollbackRoot(version)) { + meta.setRootPos(getRootPos(meta.getId()), version - 1); + } + metaChanged = false; + + for (MVMap m : new ArrayList<>(maps.values())) { + int id = m.getId(); + if (m.getCreateVersion() >= version) { + m.close(); + maps.remove(id); + } else { + if (!m.rollbackRoot(version)) { + m.setRootPos(getRootPos(id), version - 1); + } } } - } - // rollback might have rolled back the stored chunk metadata as well - if (lastChunk != null) { - c = chunks.get(lastChunk.id - 1); - if (c != null) { - meta.put(Chunk.getMetaKey(c.id), c.asString()); + deadChunks.clear(); + removedPages.clear(); + clearCaches(); + + serializationLock.lock(); + try { + Chunk keep = getChunkForVersion(version); + if (keep != null) { + saveChunkLock.lock(); + try { + setLastChunk(keep); + storeHeader.put(HDR_CLEAN, 1); + writeStoreHeader(); + readStoreHeader(); + } finally { + saveChunkLock.unlock(); + } + } + } finally { + serializationLock.unlock(); } + onVersionChange(currentVersion); + assert !hasUnsavedChanges(); + } finally { + unlockAndCheckPanicCondition(); } - currentVersion = version; - setWriteVersion(version); - } - - private static long getRootPos(MVMap map, int mapId) { - String root = map.get(MVMap.getMapRootKey(mapId)); - return root == null ? 0 : DataUtils.parseHexLong(root); } - private void revertTemp(long storeVersion) { - for (Iterator it = freedPageSpace.keySet().iterator(); - it.hasNext();) { - long v = it.next(); - if (v > storeVersion) { - continue; - } - it.remove(); + private void clearCaches() { + if (cache != null) { + cache.clear(); } - for (MVMap m : maps.values()) { - m.removeUnusedOldVersions(); + if (chunksToC != null) { + chunksToC.clear(); } } + private long getRootPos(int mapId) { + String root = layout.get(MVMap.getMapRootKey(mapId)); + return root == null ? 0 : DataUtils.parseHexLong(root); + } + /** * Get the current version of the data. When a new store is created, the * version is 0. @@ -2286,8 +3066,8 @@ public Map getStoreHeader() { } private void checkOpen() { - if (closed) { - throw DataUtils.newIllegalStateException(DataUtils.ERROR_CLOSED, + if (!isOpenOrStopping()) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_CLOSED, "This store is closed", panicException); } } @@ -2298,43 +3078,84 @@ private void checkOpen() { * @param map the map * @param newName the new name */ - public synchronized void renameMap(MVMap map, String newName) { + public void renameMap(MVMap map, String newName) { checkOpen(); - DataUtils.checkArgument(map != meta, + DataUtils.checkArgument(map != layout && map != meta, "Renaming the meta map is not allowed"); int id = map.getId(); String oldName = getMapName(id); - if (oldName.equals(newName)) { - return; + if (oldName != null && !oldName.equals(newName)) { + String idHexStr = Integer.toHexString(id); + // at first create a new name as an "alias" + String existingIdHexStr = meta.putIfAbsent(DataUtils.META_NAME + newName, idHexStr); + // we need to cope with the case of previously unfinished rename + DataUtils.checkArgument( + existingIdHexStr == null || existingIdHexStr.equals(idHexStr), + "A map named {0} already exists", newName); + // switch roles of a new and old names - old one is an alias now + meta.put(MVMap.getMapKey(id), map.asString(newName)); + // get rid of the old name completely + meta.remove(DataUtils.META_NAME + oldName); + markMetaChanged(); } - DataUtils.checkArgument( - !meta.containsKey("name." + newName), - "A map named {0} already exists", newName); - markMetaChanged(); - String x = Integer.toHexString(id); - meta.remove("name." + oldName); - meta.put(MVMap.getMapKey(id), map.asString(newName)); - meta.put("name." + newName, x); } /** - * Remove a map. Please note rolling back this operation does not restore - * the data; if you need this ability, use Map.clear(). + * Remove a map from the current version of the store. * * @param map the map to remove */ - public synchronized void removeMap(MVMap map) { - checkOpen(); - DataUtils.checkArgument(map != meta, - "Removing the meta map is not allowed"); - map.clear(); - int id = map.getId(); - String name = getMapName(id); - markMetaChanged(); - meta.remove(MVMap.getMapKey(id)); - meta.remove("name." + name); - meta.remove(MVMap.getMapRootKey(id)); - maps.remove(id); + public void removeMap(MVMap map) { + storeLock.lock(); + try { + checkOpen(); + DataUtils.checkArgument(layout != meta && map != meta, + "Removing the meta map is not allowed"); + RootReference rootReference = map.clearIt(); + map.close(); + + updateCounter += rootReference.updateCounter; + updateAttemptCounter += rootReference.updateAttemptCounter; + + int id = map.getId(); + String name = getMapName(id); + if (meta.remove(MVMap.getMapKey(id)) != null) { + markMetaChanged(); + } + if (meta.remove(DataUtils.META_NAME + name) != null) { + markMetaChanged(); + } + } finally { + storeLock.unlock(); + } + } + + /** + * Performs final stage of map removal - delete root location info from the layout table. + * Map is supposedly closed and anonymous and has no outstanding usage by now. + * + * @param mapId to deregister + */ + void deregisterMapRoot(int mapId) { + if (layout.remove(MVMap.getMapRootKey(mapId)) != null) { + markMetaChanged(); + } + } + + /** + * Remove map by name. + * + * @param name the map name + */ + public void removeMap(String name) { + int id = getMapId(name); + if(id > 0) { + MVMap map = getMap(id); + if (map == null) { + map = openMap(name, MVStoreTool.getGenericMapBuilder()); + } + removeMap(map); + } } /** @@ -2343,10 +3164,14 @@ public synchronized void removeMap(MVMap map) { * @param id the map id * @return the name, or null if not found */ - public synchronized String getMapName(int id) { - checkOpen(); + public String getMapName(int id) { String m = meta.get(MVMap.getMapKey(id)); - return m == null ? null : DataUtils.parseMap(m).get("name"); + return m == null ? null : DataUtils.getMapName(m); + } + + private int getMapId(String name) { + String m = meta.get(DataUtils.META_NAME + name); + return m == null ? -1 : DataUtils.parseHexInt(m); } /** @@ -2354,47 +3179,124 @@ public synchronized String getMapName(int id) { * needed. */ void writeInBackground() { - if (closed) { - return; - } + try { + if (!isOpenOrStopping() || isReadOnly()) { + return; + } - // could also commit when there are many unsaved pages, - // but according to a test it doesn't really help + // could also commit when there are many unsaved pages, + // but according to a test it doesn't really help - long time = getTime(); - if (time <= lastCommitTime + autoCommitDelay) { - return; + long time = getTimeSinceCreation(); + if (time > lastCommitTime + autoCommitDelay) { + tryCommit(); + if (autoCompactFillRate < 0) { + compact(-getTargetFillRate(), autoCommitMemory); + } + } + int fillRate = getFillRate(); + if (fileStore.isFragmented() && fillRate < autoCompactFillRate) { + if (storeLock.tryLock(10, TimeUnit.MILLISECONDS)) { + try { + int moveSize = autoCommitMemory; + if (isIdle()) { + moveSize *= 4; + } + compactMoveChunks(101, moveSize); + } finally { + unlockAndCheckPanicCondition(); + } + } + } else if (fillRate >= autoCompactFillRate && lastChunk != null) { + int chunksFillRate = getRewritableChunksFillRate(); + chunksFillRate = isIdle() ? 100 - (100 - chunksFillRate) / 2 : chunksFillRate; + if (chunksFillRate < getTargetFillRate()) { + if (storeLock.tryLock(10, TimeUnit.MILLISECONDS)) { + try { + int writeLimit = autoCommitMemory * fillRate / Math.max(chunksFillRate, 1); + if (!isIdle()) { + writeLimit /= 4; + } + if (rewriteChunks(writeLimit, chunksFillRate)) { + dropUnusedChunks(); + } + } finally { + storeLock.unlock(); + } + } + } + } + autoCompactLastFileOpCount = fileStore.getWriteCount() + fileStore.getReadCount(); + } catch (InterruptedException ignore) { + } catch (Throwable e) { + handleException(e); + if (backgroundExceptionHandler == null) { + throw e; + } } - if (hasUnsavedChanges()) { + } + + private void doMaintenance(int targetFillRate) { + if (autoCompactFillRate > 0 && lastChunk != null && reuseSpace) { try { - commitAndSave(); - } catch (Exception e) { - if (backgroundExceptionHandler != null) { - backgroundExceptionHandler.uncaughtException(null, e); - return; + int lastProjectedFillRate = -1; + for (int cnt = 0; cnt < 5; cnt++) { + int fillRate = getFillRate(); + int projectedFillRate = fillRate; + if (fillRate > targetFillRate) { + projectedFillRate = getProjectedFillRate(100); + if (projectedFillRate > targetFillRate || projectedFillRate <= lastProjectedFillRate) { + break; + } + } + lastProjectedFillRate = projectedFillRate; + // We can't wait forever for the lock here, + // because if called from the background thread, + // it might go into deadlock with concurrent database closure + // and attempt to stop this thread. + if (!storeLock.tryLock(10, TimeUnit.MILLISECONDS)) { + break; + } + try { + int writeLimit = autoCommitMemory * targetFillRate / Math.max(projectedFillRate, 1); + if (projectedFillRate < fillRate) { + if ((!rewriteChunks(writeLimit, targetFillRate) || dropUnusedChunks() == 0) && cnt > 0) { + break; + } + } + if (!compactMoveChunks(101, writeLimit)) { + break; + } + } finally { + unlockAndCheckPanicCondition(); + } } + } catch (InterruptedException e) { + throw new RuntimeException(e); } } - if (autoCompactFillRate > 0) { + } + + private int getTargetFillRate() { + int targetRate = autoCompactFillRate; + // use a lower fill rate if there were any file operations since the last time + if (!isIdle()) { + targetRate /= 2; + } + return targetRate; + } + + private boolean isIdle() { + return autoCompactLastFileOpCount == fileStore.getWriteCount() + fileStore.getReadCount(); + } + + private void handleException(Throwable ex) { + if (backgroundExceptionHandler != null) { try { - // whether there were file read or write operations since - // the last time - boolean fileOps; - long fileOpCount = fileStore.getWriteCount() + fileStore.getReadCount(); - if (autoCompactLastFileOpCount != fileOpCount) { - fileOps = true; - } else { - fileOps = false; - } - // use a lower fill rate if there were any file operations - int fillRate = fileOps ? autoCompactFillRate / 3 : autoCompactFillRate; - // TODO how to avoid endless compaction if there is a bug - // in the bookkeeping? - compact(fillRate, autoCommitMemory); - autoCompactLastFileOpCount = fileStore.getWriteCount() + fileStore.getReadCount(); - } catch (Exception e) { - if (backgroundExceptionHandler != null) { - backgroundExceptionHandler.uncaughtException(null, e); + backgroundExceptionHandler.uncaughtException(Thread.currentThread(), ex); + } catch(Throwable e) { + if (ex != e) { // OOME may be the same + ex.addSuppressed(e); } } } @@ -2406,38 +3308,65 @@ void writeInBackground() { * @param mb the cache size in MB. */ public void setCacheSize(int mb) { + final long bytes = (long) mb * 1024 * 1024; if (cache != null) { - cache.setMaxMemory((long) mb * 1024 * 1024); + cache.setMaxMemory(bytes); cache.clear(); } } - public boolean isClosed() { - return closed; + private boolean isOpen() { + return state == STATE_OPEN; } - private void stopBackgroundThread() { - BackgroundWriterThread t = backgroundWriterThread; - if (t == null) { - return; - } - backgroundWriterThread = null; - if (Thread.currentThread() == t) { - // within the thread itself - can not join - return; - } - synchronized (t.sync) { - t.sync.notifyAll(); - } - if (Thread.holdsLock(this)) { - // called from storeNow: can not join, - // because that could result in a deadlock - return; + /** + * Determine that store is open, or wait for it to be closed (by other thread) + * @return true if store is open, false otherwise + */ + public boolean isClosed() { + if (isOpen()) { + return false; } + storeLock.lock(); try { - t.join(); - } catch (Exception e) { - // ignore + assert state == STATE_CLOSED; + return true; + } finally { + storeLock.unlock(); + } + } + + private boolean isOpenOrStopping() { + return state <= STATE_STOPPING; + } + + private void stopBackgroundThread(boolean waitForIt) { + // Loop here is not strictly necessary, except for case of a spurious failure, + // which should not happen with non-weak flavour of CAS operation, + // but I've seen it, so just to be safe... + BackgroundWriterThread t; + while ((t = backgroundWriterThread.get()) != null) { + if (backgroundWriterThread.compareAndSet(t, null)) { + // if called from within the thread itself - can not join + if (t != Thread.currentThread()) { + synchronized (t.sync) { + t.sync.notifyAll(); + } + + if (waitForIt) { + try { + t.join(); + } catch (Exception e) { + // ignore + } + } + } + shutdownExecutor(serializationExecutor); + serializationExecutor = null; + shutdownExecutor(bufferSaveExecutor); + bufferSaveExecutor = null; + break; + } } } @@ -2460,18 +3389,35 @@ public void setAutoCommitDelay(int millis) { if (fileStore == null || fileStore.isReadOnly()) { return; } - stopBackgroundThread(); + stopBackgroundThread(true); // start the background thread if needed - if (millis > 0) { + if (millis > 0 && isOpen()) { int sleep = Math.max(1, millis / 10); BackgroundWriterThread t = new BackgroundWriterThread(this, sleep, fileStore.toString()); - t.start(); - backgroundWriterThread = t; + if (backgroundWriterThread.compareAndSet(null, t)) { + t.start(); + serializationExecutor = createSingleThreadExecutor("H2-serialization"); + bufferSaveExecutor = createSingleThreadExecutor("H2-save"); + } } } + private static ThreadPoolExecutor createSingleThreadExecutor(String threadName) { + return new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>(), + r -> { + Thread thread = new Thread(r, threadName); + thread.setDaemon(true); + return thread; + }); + } + + public boolean isBackgroundThread() { + return Thread.currentThread() == backgroundWriterThread.get(); + } + /** * Get the auto-commit delay. * @@ -2505,19 +3451,18 @@ public int getUnsavedMemory() { /** * Put the page in the cache. - * - * @param pos the page position * @param page the page - * @param memory the memory used */ - void cachePage(long pos, Page page, int memory) { + void cachePage(Page page) { if (cache != null) { - cache.put(pos, page, memory); + cache.put(page.getPos(), page, page.getMemory()); } } /** * Get the amount of memory used for caching, in MB. + * Note that this does not include the page chunk references cache, which is + * 25% of the size of the page cache. * * @return the amount of memory used for caching */ @@ -2525,11 +3470,13 @@ public int getCacheSizeUsed() { if (cache == null) { return 0; } - return (int) (cache.getUsedMemory() / 1024 / 1024); + return (int) (cache.getUsedMemory() >> 20); } /** * Get the maximum cache size, in MB. + * Note that this does not include the page chunk references cache, which is + * 25% of the size of the page cache. * * @return the cache size */ @@ -2537,7 +3484,7 @@ public int getCacheSize() { if (cache == null) { return 0; } - return (int) (cache.getMaxMemory() / 1024 / 1024); + return (int) (cache.getMaxMemory() >> 20); } /** @@ -2545,10 +3492,236 @@ public int getCacheSize() { * * @return the cache */ - public CacheLongKeyLIRS getCache() { + public CacheLongKeyLIRS> getCache() { return cache; } + /** + * Whether the store is read-only. + * + * @return true if it is + */ + public boolean isReadOnly() { + return fileStore != null && fileStore.isReadOnly(); + } + + public int getCacheHitRatio() { + return getCacheHitRatio(cache); + } + + public int getTocCacheHitRatio() { + return getCacheHitRatio(chunksToC); + } + + private static int getCacheHitRatio(CacheLongKeyLIRS cache) { + if (cache == null) { + return 0; + } + long hits = cache.getHits(); + return (int) (100 * hits / (hits + cache.getMisses() + 1)); + } + + public int getLeafRatio() { + return (int)(leafCount * 100 / Math.max(1, leafCount + nonLeafCount)); + } + + public double getUpdateFailureRatio() { + long updateCounter = this.updateCounter; + long updateAttemptCounter = this.updateAttemptCounter; + RootReference rootReference = layout.getRoot(); + updateCounter += rootReference.updateCounter; + updateAttemptCounter += rootReference.updateAttemptCounter; + rootReference = meta.getRoot(); + updateCounter += rootReference.updateCounter; + updateAttemptCounter += rootReference.updateAttemptCounter; + for (MVMap map : maps.values()) { + RootReference root = map.getRoot(); + updateCounter += root.updateCounter; + updateAttemptCounter += root.updateAttemptCounter; + } + return updateAttemptCounter == 0 ? 0 : 1 - ((double)updateCounter / updateAttemptCounter); + } + + /** + * Register opened operation (transaction). + * This would increment usage counter for the current version. + * This version (and all after it) should not be dropped until all + * transactions involved are closed and usage counter goes to zero. + * @return TxCounter to be decremented when operation finishes (transaction closed). + */ + public TxCounter registerVersionUsage() { + TxCounter txCounter; + while(true) { + txCounter = currentTxCounter; + if(txCounter.incrementAndGet() > 0) { + return txCounter; + } + // The only way for counter to be negative + // if it was retrieved right before onVersionChange() + // and now onVersionChange() is done. + // This version is eligible for reclamation now + // and should not be used here, so restore count + // not to upset accounting and try again with a new + // version (currentTxCounter should have changed). + assert txCounter != currentTxCounter : txCounter; + txCounter.decrementAndGet(); + } + } + + /** + * De-register (close) completed operation (transaction). + * This will decrement usage counter for the corresponding version. + * If counter reaches zero, that version (and all unused after it) + * can be dropped immediately. + * + * @param txCounter to be decremented, obtained from registerVersionUsage() + */ + public void deregisterVersionUsage(TxCounter txCounter) { + if(txCounter != null) { + if(txCounter.decrementAndGet() <= 0) { + if (storeLock.isHeldByCurrentThread()) { + dropUnusedVersions(); + } else if (storeLock.tryLock()) { + try { + dropUnusedVersions(); + } finally { + storeLock.unlock(); + } + } + } + } + } + + private void onVersionChange(long version) { + TxCounter txCounter = currentTxCounter; + assert txCounter.get() >= 0; + versions.add(txCounter); + currentTxCounter = new TxCounter(version); + txCounter.decrementAndGet(); + dropUnusedVersions(); + } + + private void dropUnusedVersions() { + TxCounter txCounter; + while ((txCounter = versions.peek()) != null + && txCounter.get() < 0) { + versions.poll(); + } + setOldestVersionToKeep((txCounter != null ? txCounter : currentTxCounter).version); + } + + private int dropUnusedChunks() { + assert storeLock.isHeldByCurrentThread(); + int count = 0; + if (!deadChunks.isEmpty()) { + long oldestVersionToKeep = getOldestVersionToKeep(); + long time = getTimeSinceCreation(); + saveChunkLock.lock(); + try { + Chunk chunk; + while ((chunk = deadChunks.poll()) != null && + (isSeasonedChunk(chunk, time) && canOverwriteChunk(chunk, oldestVersionToKeep) || + // if chunk is not ready yet, put it back and exit + // since this deque is unbounded, offerFirst() always return true + !deadChunks.offerFirst(chunk))) { + + if (chunks.remove(chunk.id) != null) { + // purge dead pages from cache + long[] toc = chunksToC.remove(chunk.id); + if (toc != null && cache != null) { + for (long tocElement : toc) { + long pagePos = DataUtils.getPagePos(chunk.id, tocElement); + cache.remove(pagePos); + } + } + + if (layout.remove(Chunk.getMetaKey(chunk.id)) != null) { + markMetaChanged(); + } + if (chunk.isSaved()) { + freeChunkSpace(chunk); + } + ++count; + } + } + } finally { + saveChunkLock.unlock(); + } + } + return count; + } + + private void freeChunkSpace(Chunk chunk) { + long start = chunk.block * BLOCK_SIZE; + int length = chunk.len * BLOCK_SIZE; + freeFileSpace(start, length); + } + + private void freeFileSpace(long start, int length) { + fileStore.free(start, length); + assert validateFileLength(start + ":" + length); + } + + private boolean validateFileLength(String msg) { + assert saveChunkLock.isHeldByCurrentThread(); + assert fileStore.getFileLengthInUse() == measureFileLengthInUse() : + fileStore.getFileLengthInUse() + " != " + measureFileLengthInUse() + " " + msg; + return true; + } + + /** + * Class TxCounter is a simple data structure to hold version of the store + * along with the counter of open transactions, + * which are still operating on this version. + */ + public static final class TxCounter { + + /** + * Version of a store, this TxCounter is related to + */ + public final long version; + + /** + * Counter of outstanding operation on this version of a store + */ + private volatile int counter; + + private static final AtomicIntegerFieldUpdater counterUpdater = + AtomicIntegerFieldUpdater.newUpdater(TxCounter.class, "counter"); + + + TxCounter(long version) { + this.version = version; + } + + int get() { + return counter; + } + + /** + * Increment and get the counter value. + * + * @return the new value + */ + int incrementAndGet() { + return counterUpdater.incrementAndGet(this); + } + + /** + * Decrement and get the counter values. + * + * @return the new value + */ + int decrementAndGet() { + return counterUpdater.decrementAndGet(this); + } + + @Override + public String toString() { + return "v=" + version + " / cnt=" + counter; + } + } + /** * A background writer thread to automatically store changes from time to * time. @@ -2568,30 +3741,101 @@ private static class BackgroundWriterThread extends Thread { @Override public void run() { - while (true) { - Thread t = store.backgroundWriterThread; - if (t == null) { - break; - } + while (store.isBackgroundThread()) { synchronized (sync) { try { sync.wait(sleep); - } catch (InterruptedException e) { - continue; + } catch (InterruptedException ignore) { } } + if (!store.isBackgroundThread()) { + break; + } store.writeInBackground(); } } + } + + private static class RemovedPageInfo implements Comparable { + final long version; + final long removedPageInfo; + + RemovedPageInfo(long pagePos, boolean pinned, long version, int pageNo) { + this.removedPageInfo = createRemovedPageInfo(pagePos, pinned, pageNo); + this.version = version; + } + + @Override + public int compareTo(RemovedPageInfo other) { + return Long.compare(version, other.version); + } + + int getPageChunkId() { + return DataUtils.getPageChunkId(removedPageInfo); + } + + int getPageNo() { + return DataUtils.getPageOffset(removedPageInfo); + } + + int getPageLength() { + return DataUtils.getPageMaxLength(removedPageInfo); + } + + /** + * Find out if removed page was pinned (can not be evacuated to a new chunk). + * @return true if page has been pinned + */ + boolean isPinned() { + return (removedPageInfo & 1) == 1; + } + + /** + * Transforms saved page position into removed page info by + * replacing "page offset" with "page sequential number" and + * "page type" bit with "pinned page" flag. + * @param pagePos of the saved page + * @param isPinned whether page belong to a "single writer" map + * @param pageNo 0-based sequential page number within containing chunk + * @return removed page info that contains chunk id, page number, page length and pinned flag + */ + private static long createRemovedPageInfo(long pagePos, boolean isPinned, int pageNo) { + long result = (pagePos & ~((0xFFFFFFFFL << 6) | 1)) | ((pageNo << 6) & 0xFFFFFFFFL); + if (isPinned) { + result |= 1; + } + return result; + } + @Override + public String toString() { + return "RemovedPageInfo{" + + "version=" + version + + ", chunk=" + getPageChunkId() + + ", pageNo=" + getPageNo() + + ", len=" + getPageLength() + + (isPinned() ? ", pinned" : "") + + '}'; + } } /** * A builder for an MVStore. */ - public static class Builder { + public static final class Builder { + + private final HashMap config; + + private Builder(HashMap config) { + this.config = config; + } - private final HashMap config = New.hashMap(); + /** + * Creates new instance of MVStore.Builder. + */ + public Builder() { + config = new HashMap<>(); + } private Builder set(String key, Object value) { config.put(key, value); @@ -2609,7 +3853,7 @@ public Builder autoCommitDisabled() { // no thread is started if the write delay is 0 // (if we only had a setter in the MVStore, // the thread would need to be started in any case) - set("autoCommitBufferSize", 0); + //set("autoCommitBufferSize", 0); return set("autoCommitDelay", 0); } @@ -2638,8 +3882,8 @@ public Builder autoCommitBufferSize(int kb) { * this value, then chunks at the end of the file are moved. Compaction * stops if the target fill rate is reached. *

    - * The default value is 50 (50%). The value 0 disables auto-compacting. - *

    + * The default value is 90 (90%). The value 0 disables auto-compacting. + *

    * * @param percent the target fill rate * @return this @@ -2690,6 +3934,25 @@ public Builder readOnly() { return set("readOnly", 1); } + /** + * Set the number of keys per page. + * + * @param keyCount the number of keys + * @return this + */ + public Builder keysPerPage(int keyCount) { + return set("keysPerPage", keyCount); + } + + /** + * Open the file in recovery mode, where some errors may be ignored. + * + * @return this + */ + public Builder recoveryMode() { + return set("recoveryMode", 1); + } + /** * Set the read cache size in MB. The default is 16 MB. * @@ -2700,6 +3963,17 @@ public Builder cacheSize(int mb) { return set("cacheSize", mb); } + /** + * Set the read cache concurrency. The default is 16, meaning 16 + * segments are used. + * + * @param concurrency the cache concurrency + * @return this + */ + public Builder cacheConcurrency(int concurrency) { + return set("cacheConcurrency", concurrency); + } + /** * Compress data before writing using the LZF algorithm. This will save * about 50% of the disk space, but will slow down read and write @@ -2793,13 +4067,10 @@ public String toString() { * @param s the string representation * @return the builder */ + @SuppressWarnings({"unchecked", "rawtypes", "unused"}) public static Builder fromString(String s) { - HashMap config = DataUtils.parseMap(s); - Builder builder = new Builder(); - builder.config.putAll(config); - return builder; + // Cast from HashMap to HashMap is safe + return new Builder((HashMap) DataUtils.parseMap(s)); } - } - } diff --git a/h2/src/main/org/h2/mvstore/MVStoreException.java b/h2/src/main/org/h2/mvstore/MVStoreException.java new file mode 100644 index 0000000000..0cd1b95c7b --- /dev/null +++ b/h2/src/main/org/h2/mvstore/MVStoreException.java @@ -0,0 +1,25 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore; + +/** + * Various kinds of MVStore problems, along with associated error code. + */ +public class MVStoreException extends RuntimeException { + + private static final long serialVersionUID = 2847042930249663807L; + + private final int errorCode; + + public MVStoreException(int errorCode, String message) { + super(message); + this.errorCode = errorCode; + } + + public int getErrorCode() { + return errorCode; + } +} diff --git a/h2/src/main/org/h2/mvstore/MVStoreTool.java b/h2/src/main/org/h2/mvstore/MVStoreTool.java index 9bf20561b9..ae7f5e4f37 100644 --- a/h2/src/main/org/h2/mvstore/MVStoreTool.java +++ b/h2/src/main/org/h2/mvstore/MVStoreTool.java @@ -1,26 +1,33 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; import java.io.IOException; +import java.io.OutputStream; import java.io.PrintWriter; import java.io.Writer; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; import java.sql.Timestamp; import java.util.Map; import java.util.Map.Entry; import java.util.TreeMap; +import org.h2.compress.CompressDeflate; +import org.h2.compress.CompressLZF; +import org.h2.compress.Compressor; import org.h2.engine.Constants; import org.h2.message.DbException; -import org.h2.mvstore.type.DataType; +import org.h2.mvstore.tx.TransactionStore; +import org.h2.mvstore.type.BasicDataType; import org.h2.mvstore.type.StringDataType; import org.h2.store.fs.FilePath; import org.h2.store.fs.FileUtils; +import org.h2.util.Utils; /** * Utility methods used in combination with the MVStore. @@ -31,6 +38,7 @@ public class MVStoreTool { * Runs this tool. * Options are case sensitive. Supported options are: *

    + * * * * @@ -57,6 +65,13 @@ public static void main(String... args) { } else if ("-compress".equals(args[i])) { String fileName = args[++i]; compact(fileName, true); + } else if ("-rollback".equals(args[i])) { + String fileName = args[++i]; + long targetVersion = Long.decode(args[++i]); + rollback(fileName, targetVersion, new PrintWriter(System.out)); + } else if ("-repair".equals(args[i])) { + String fileName = args[++i]; + repair(fileName); } } } @@ -96,26 +111,34 @@ public static void dump(String fileName, Writer writer, boolean details) { } long size = FileUtils.size(fileName); pw.printf("File %s, %d bytes, %d MB\n", fileName, size, size / 1024 / 1024); - FileChannel file = null; int blockSize = MVStore.BLOCK_SIZE; TreeMap mapSizesTotal = - new TreeMap(); + new TreeMap<>(); long pageSizeTotal = 0; - try { - file = FilePath.get(fileName).open("r"); + try (FileChannel file = FilePath.get(fileName).open("r")) { long fileSize = file.size(); int len = Long.toHexString(fileSize).length(); ByteBuffer block = ByteBuffer.allocate(4096); long pageCount = 0; - for (long pos = 0; pos < fileSize;) { + for (long pos = 0; pos < fileSize; ) { block.rewind(); - DataUtils.readFully(file, pos, block); + // Bugfix - An MVStoreException that wraps EOFException is + // thrown when partial writes happens in the case of power off + // or file system issues. + // So we should skip the broken block at end of the DB file. + try { + DataUtils.readFully(file, pos, block); + } catch (MVStoreException e) { + pos += blockSize; + pw.printf("ERROR illegal position %d%n", pos); + continue; + } block.rewind(); int headerType = block.get(); if (headerType == 'H') { + String header = new String(block.array(), StandardCharsets.ISO_8859_1).trim(); pw.printf("%0" + len + "x fileHeader %s%n", - pos, - new String(block.array(), DataUtils.LATIN).trim()); + pos, header); pos += blockSize; continue; } @@ -124,10 +147,10 @@ public static void dump(String fileName, Writer writer, boolean details) { continue; } block.position(0); - Chunk c = null; + Chunk c; try { c = Chunk.readChunkHeader(block, pos); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { pos += blockSize; continue; } @@ -146,9 +169,10 @@ public static void dump(String fileName, Writer writer, boolean details) { int remaining = c.pageCount; pageCount += c.pageCount; TreeMap mapSizes = - new TreeMap(); + new TreeMap<>(); int pageSizeSum = 0; while (remaining > 0) { + int start = p; try { chunk.position(p); } catch (IllegalArgumentException e) { @@ -159,23 +183,24 @@ public static void dump(String fileName, Writer writer, boolean details) { int pageSize = chunk.getInt(); // check value (ignored) chunk.getShort(); + /*int pageNo =*/ DataUtils.readVarInt(chunk); int mapId = DataUtils.readVarInt(chunk); int entries = DataUtils.readVarInt(chunk); int type = chunk.get(); - boolean compressed = (type & 2) != 0; - boolean node = (type & 1) != 0; + boolean compressed = (type & DataUtils.PAGE_COMPRESSED) != 0; + boolean node = (type & DataUtils.PAGE_TYPE_NODE) != 0; if (details) { pw.printf( "+%0" + len + - "x %s, map %x, %d entries, %d bytes, maxLen %x%n", + "x %s, map %x, %d entries, %d bytes, maxLen %x%n", p, (node ? "node" : "leaf") + - (compressed ? " compressed" : ""), + (compressed ? " compressed" : ""), mapId, node ? entries + 1 : entries, pageSize, DataUtils.getPageMaxLength(DataUtils.getPagePos(0, 0, pageSize, 0)) - ); + ); } p += pageSize; Integer mapSize = mapSizes.get(mapId); @@ -206,19 +231,31 @@ public static void dump(String fileName, Writer writer, boolean details) { } String[] keys = new String[entries]; if (mapId == 0 && details) { - if (!compressed) { - for (int i = 0; i < entries; i++) { - String k = StringDataType.INSTANCE.read(chunk); - keys[i] = k; - } + ByteBuffer data; + if (compressed) { + boolean fast = (type & DataUtils.PAGE_COMPRESSED_HIGH) != DataUtils.PAGE_COMPRESSED_HIGH; + Compressor compressor = getCompressor(fast); + int lenAdd = DataUtils.readVarInt(chunk); + int compLen = pageSize + start - chunk.position(); + byte[] comp = Utils.newBytes(compLen); + chunk.get(comp); + int l = compLen + lenAdd; + data = ByteBuffer.allocate(l); + compressor.expand(comp, 0, compLen, data.array(), 0, l); + } else { + data = chunk; + } + for (int i = 0; i < entries; i++) { + String k = StringDataType.INSTANCE.read(data); + keys[i] = k; } if (node) { // meta map node for (int i = 0; i < entries; i++) { long cp = children[i]; pw.printf(" %d children < %s @ " + - "chunk %x +%0" + - len + "x%n", + "chunk %x +%0" + + len + "x%n", counts[i], keys[i], DataUtils.getPageChunkId(cp), @@ -226,16 +263,16 @@ public static void dump(String fileName, Writer writer, boolean details) { } long cp = children[entries]; pw.printf(" %d children >= %s @ chunk %x +%0" + - len + "x%n", + len + "x%n", counts[entries], keys.length >= entries ? null : keys[entries], DataUtils.getPageChunkId(cp), DataUtils.getPageOffset(cp)); - } else if (!compressed) { + } else { // meta map leaf String[] values = new String[entries]; for (int i = 0; i < entries; i++) { - String v = StringDataType.INSTANCE.read(chunk); + String v = StringDataType.INSTANCE.read(data); values[i] = v; } for (int i = 0; i < entries; i++) { @@ -248,7 +285,7 @@ public static void dump(String fileName, Writer writer, boolean details) { for (int i = 0; i <= entries; i++) { long cp = children[i]; pw.printf(" %d children @ chunk %x +%0" + - len + "x%n", + len + "x%n", counts[i], DataUtils.getPageChunkId(cp), DataUtils.getPageOffset(cp)); @@ -268,7 +305,7 @@ public static void dump(String fileName, Writer writer, boolean details) { "+%0" + len + "x chunkFooter %s%n", footerPos, new String(chunk.array(), chunk.position(), - Chunk.FOOTER_LENGTH, DataUtils.LATIN).trim()); + Chunk.FOOTER_LENGTH, StandardCharsets.ISO_8859_1).trim()); } catch (IllegalArgumentException e) { // too far pw.printf("ERROR illegal footer position %d%n", footerPos); @@ -287,46 +324,44 @@ public static void dump(String fileName, Writer writer, boolean details) { } catch (IOException e) { pw.println("ERROR: " + e); e.printStackTrace(pw); - } finally { - if (file != null) { - try { - file.close(); - } catch (IOException e) { - // ignore - } - } } + // ignore pw.flush(); } + private static Compressor getCompressor(boolean fast) { + return fast ? new CompressLZF() : new CompressDeflate(); + } + /** * Read the summary information of the file and write them to system out. * * @param fileName the name of the file * @param writer the print writer + * @return null if successful (if there was no error), otherwise the error + * message */ - public static void info(String fileName, Writer writer) { + public static String info(String fileName, Writer writer) { PrintWriter pw = new PrintWriter(writer, true); if (!FilePath.get(fileName).exists()) { pw.println("File not found: " + fileName); - return; + return "File not found: " + fileName; } long fileLength = FileUtils.size(fileName); - MVStore store = new MVStore.Builder(). - fileName(fileName). - readOnly().open(); - try { - MVMap meta = store.getMetaMap(); + try (MVStore store = new MVStore.Builder(). + fileName(fileName).recoveryMode(). + readOnly().open()) { + MVMap layout = store.getLayoutMap(); Map header = store.getStoreHeader(); long fileCreated = DataUtils.readHexLong(header, "created", 0L); - TreeMap chunks = new TreeMap(); + TreeMap chunks = new TreeMap<>(); long chunkLength = 0; long maxLength = 0; long maxLengthLive = 0; long maxLengthNotEmpty = 0; - for (Entry e : meta.entrySet()) { + for (Entry e : layout.entrySet()) { String k = e.getKey(); - if (k.startsWith("chunk.")) { + if (k.startsWith(DataUtils.META_CHUNK)) { Chunk c = Chunk.fromString(e.getValue()); chunks.put(c.id, c); chunkLength += c.len * MVStore.BLOCK_SIZE; @@ -368,10 +403,10 @@ c.id, formatTimestamp(created, fileCreated), } catch (Exception e) { pw.println("ERROR: " + e); e.printStackTrace(pw); - } finally { - store.close(); + return e.getMessage(); } pw.flush(); + return null; } private static String formatTimestamp(long t, long start) { @@ -447,20 +482,23 @@ public static void compactCleanUp(String fileName) { * @param compress whether to compress the data */ public static void compact(String sourceFileName, String targetFileName, boolean compress) { - MVStore source = new MVStore.Builder(). - fileName(sourceFileName). - readOnly(). - open(); - FileUtils.delete(targetFileName); - MVStore.Builder b = new MVStore.Builder(). + try (MVStore source = new MVStore.Builder(). + fileName(sourceFileName).readOnly().open()) { + // Bugfix - Add double "try-finally" statements to close source and target stores for + //releasing lock and file resources in these stores even if OOM occurs. + // Fix issues such as "Cannot delete file "/h2/data/test.mv.db.tempFile" [90025-197]" + //when client connects to this server and reopens this store database in this process. + // @since 2018-09-13 little-pan + FileUtils.delete(targetFileName); + MVStore.Builder b = new MVStore.Builder(). fileName(targetFileName); - if (compress) { - b.compress(); + if (compress) { + b.compress(); + } + try (MVStore target = b.open()) { + compact(source, target); + } } - MVStore target = b.open(); - compact(source, target); - target.close(); - source.close(); } /** @@ -470,65 +508,228 @@ public static void compact(String sourceFileName, String targetFileName, boolean * @param target the target store */ public static void compact(MVStore source, MVStore target) { - MVMap sourceMeta = source.getMetaMap(); - MVMap targetMeta = target.getMetaMap(); - for (Entry m : sourceMeta.entrySet()) { - String key = m.getKey(); - if (key.startsWith("chunk.")) { - // ignore - } else if (key.startsWith("map.")) { - // ignore - } else if (key.startsWith("name.")) { - // ignore - } else if (key.startsWith("root.")) { + int autoCommitDelay = target.getAutoCommitDelay(); + boolean reuseSpace = target.getReuseSpace(); + try { + target.setReuseSpace(false); // disable unused chunks collection + target.setAutoCommitDelay(0); // disable autocommit + MVMap sourceMeta = source.getMetaMap(); + MVMap targetMeta = target.getMetaMap(); + for (Entry m : sourceMeta.entrySet()) { + String key = m.getKey(); + if (key.startsWith(DataUtils.META_MAP)) { + // ignore + } else if (key.startsWith(DataUtils.META_NAME)) { + // ignore + } else { + targetMeta.put(key, m.getValue()); + } + } + // We are going to cheat a little bit in the copyFrom() by employing "incomplete" pages, + // which would be spared of saving, but save completed pages underneath, + // and those may appear as dead (non-reachable). + // That's why it is important to preserve all chunks + // created in the process, especially if retention time + // is set to a lower value, or even 0. + for (String mapName : source.getMapNames()) { + MVMap.Builder mp = getGenericMapBuilder(); + // This is a hack to preserve chunks occupancy rate accounting. + // It exposes design deficiency flaw in MVStore related to lack of + // map's type metadata. + // TODO: Introduce type metadata which will allow to open any store + // TODO: without prior knowledge of keys / values types and map implementation + // TODO: (MVMap vs MVRTreeMap, regular vs. singleWriter etc.) + if (mapName.startsWith(TransactionStore.UNDO_LOG_NAME_PREFIX)) { + mp.singleWriter(); + } + MVMap sourceMap = source.openMap(mapName, mp); + MVMap targetMap = target.openMap(mapName, mp); + targetMap.copyFrom(sourceMap); + targetMeta.put(MVMap.getMapKey(targetMap.getId()), sourceMeta.get(MVMap.getMapKey(sourceMap.getId()))); + } + // this will end hacky mode of operation with incomplete pages + // end ensure that all pages are saved + target.commit(); + } finally { + target.setAutoCommitDelay(autoCommitDelay); + target.setReuseSpace(reuseSpace); + } + } + + /** + * Repair a store by rolling back to the newest good version. + * + * @param fileName the file name + */ + public static void repair(String fileName) { + PrintWriter pw = new PrintWriter(System.out); + long version = Long.MAX_VALUE; + OutputStream ignore = new OutputStream() { + @Override + public void write(int b) { // ignore - } else { - targetMeta.put(key, m.getValue()); } + }; + while (version >= 0) { + pw.println(version == Long.MAX_VALUE ? "Trying latest version" + : ("Trying version " + version)); + pw.flush(); + version = rollback(fileName, version, new PrintWriter(ignore)); + try { + String error = info(fileName + ".temp", new PrintWriter(ignore)); + if (error == null) { + FilePath.get(fileName).moveTo(FilePath.get(fileName + ".back"), true); + FilePath.get(fileName + ".temp").moveTo(FilePath.get(fileName), true); + pw.println("Success"); + break; + } + pw.println(" ... failed: " + error); + } catch (Exception e) { + pw.println("Fail: " + e.getMessage()); + pw.flush(); + } + version--; } - for (String mapName : source.getMapNames()) { - MVMap.Builder mp = - new MVMap.Builder(). - keyType(new GenericDataType()). - valueType(new GenericDataType()); - MVMap sourceMap = source.openMap(mapName, mp); - MVMap targetMap = target.openMap(mapName, mp); - targetMap.copyFrom(sourceMap); + pw.flush(); + } + + /** + * Roll back to a given revision into a file called *.temp. + * + * @param fileName the file name + * @param targetVersion the version to roll back to (Long.MAX_VALUE for the + * latest version) + * @param writer the log writer + * @return the version rolled back to (-1 if no version) + */ + public static long rollback(String fileName, long targetVersion, Writer writer) { + long newestVersion = -1; + PrintWriter pw = new PrintWriter(writer, true); + if (!FilePath.get(fileName).exists()) { + pw.println("File not found: " + fileName); + return newestVersion; } + FileChannel file = null; + FileChannel target = null; + int blockSize = MVStore.BLOCK_SIZE; + try { + file = FilePath.get(fileName).open("r"); + FilePath.get(fileName + ".temp").delete(); + target = FilePath.get(fileName + ".temp").open("rw"); + long fileSize = file.size(); + ByteBuffer block = ByteBuffer.allocate(4096); + Chunk newestChunk = null; + for (long pos = 0; pos < fileSize;) { + block.rewind(); + DataUtils.readFully(file, pos, block); + block.rewind(); + int headerType = block.get(); + if (headerType == 'H') { + block.rewind(); + target.write(block, pos); + pos += blockSize; + continue; + } + if (headerType != 'c') { + pos += blockSize; + continue; + } + Chunk c; + try { + c = Chunk.readChunkHeader(block, pos); + } catch (MVStoreException e) { + pos += blockSize; + continue; + } + if (c.len <= 0) { + // not a chunk + pos += blockSize; + continue; + } + int length = c.len * MVStore.BLOCK_SIZE; + ByteBuffer chunk = ByteBuffer.allocate(length); + DataUtils.readFully(file, pos, chunk); + if (c.version > targetVersion) { + // newer than the requested version + pos += length; + continue; + } + chunk.rewind(); + target.write(chunk, pos); + if (newestChunk == null || c.version > newestChunk.version) { + newestChunk = c; + newestVersion = c.version; + } + pos += length; + } + int length = newestChunk.len * MVStore.BLOCK_SIZE; + ByteBuffer chunk = ByteBuffer.allocate(length); + DataUtils.readFully(file, newestChunk.block * MVStore.BLOCK_SIZE, chunk); + chunk.rewind(); + target.write(chunk, fileSize); + } catch (IOException e) { + pw.println("ERROR: " + e); + e.printStackTrace(pw); + } finally { + if (file != null) { + try { + file.close(); + } catch (IOException e) { + // ignore + } + } + if (target != null) { + try { + target.close(); + } catch (IOException e) { + // ignore + } + } + } + pw.flush(); + return newestVersion; + } + + @SuppressWarnings({"rawtypes","unchecked"}) + static MVMap.Builder getGenericMapBuilder() { + return (MVMap.Builder)new MVMap.Builder(). + keyType(GenericDataType.INSTANCE). + valueType(GenericDataType.INSTANCE); } /** * A data type that can read any data that is persisted, and converts it to * a byte array. */ - static class GenericDataType implements DataType { + private static class GenericDataType extends BasicDataType { + static GenericDataType INSTANCE = new GenericDataType(); + + private GenericDataType() {} @Override - public int compare(Object a, Object b) { - throw DataUtils.newUnsupportedOperationException("Can not compare"); + public boolean isMemoryEstimationAllowed() { + return false; } @Override - public int getMemory(Object obj) { - return obj == null ? 0 : ((byte[]) obj).length * 8; + public int getMemory(byte[] obj) { + return obj == null ? 0 : obj.length * 8; } @Override - public void write(WriteBuffer buff, Object obj) { - if (obj != null) { - buff.put((byte[]) obj); - } + public byte[][] createStorage(int size) { + return new byte[size][]; } @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (Object o : obj) { - write(buff, o); + public void write(WriteBuffer buff, byte[] obj) { + if (obj != null) { + buff.put(obj); } } @Override - public Object read(ByteBuffer buff) { + public byte[] read(ByteBuffer buff) { int len = buff.remaining(); if (len == 0) { return null; @@ -537,15 +738,5 @@ public Object read(ByteBuffer buff) { buff.get(data); return data; } - - @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < obj.length; i++) { - obj[i] = read(buff); - } - } - } - - } diff --git a/h2/src/main/org/h2/mvstore/OffHeapStore.java b/h2/src/main/org/h2/mvstore/OffHeapStore.java index 07ff58895b..6dc9d8764c 100644 --- a/h2/src/main/org/h2/mvstore/OffHeapStore.java +++ b/h2/src/main/org/h2/mvstore/OffHeapStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -17,7 +17,7 @@ public class OffHeapStore extends FileStore { private final TreeMap memory = - new TreeMap(); + new TreeMap<>(); @Override public void open(String fileName, boolean readOnly, char[] encryptionKey) { @@ -33,12 +33,12 @@ public String toString() { public ByteBuffer readFully(long pos, int len) { Entry memEntry = memory.floorEntry(pos); if (memEntry == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Could not read from position {0}", pos); } - readCount++; - readBytes += len; + readCount.incrementAndGet(); + readBytes.addAndGet(len); ByteBuffer buff = memEntry.getValue(); ByteBuffer read = buff.duplicate(); int offset = (int) (pos - memEntry.getKey()); @@ -54,7 +54,7 @@ public void free(long pos, int length) { if (buff == null) { // nothing was written (just allocated) } else if (buff.remaining() != length) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Partial remove is not supported at position {0}", pos); } @@ -75,19 +75,19 @@ public void writeFully(long pos, ByteBuffer src) { int length = src.remaining(); if (prevPos == pos) { if (prevLength != length) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Could not write to position {0}; " + "partial overwrite is not supported", pos); } - writeCount++; - writeBytes += length; + writeCount.incrementAndGet(); + writeBytes.addAndGet(length); buff.rewind(); buff.put(src); return; } if (prevPos + prevLength > pos) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Could not write to position {0}; " + "partial overwrite is not supported", pos); @@ -97,8 +97,8 @@ public void writeFully(long pos, ByteBuffer src) { private void writeNewEntry(long pos, ByteBuffer src) { int length = src.remaining(); - writeCount++; - writeBytes += length; + writeCount.incrementAndGet(); + writeBytes.addAndGet(length); ByteBuffer buff = ByteBuffer.allocateDirect(length); buff.put(src); buff.rewind(); @@ -107,7 +107,7 @@ private void writeNewEntry(long pos, ByteBuffer src) { @Override public void truncate(long size) { - writeCount++; + writeCount.incrementAndGet(); if (size == 0) { fileSize = 0; memory.clear(); @@ -121,7 +121,7 @@ public void truncate(long size) { } ByteBuffer buff = memory.get(pos); if (buff.capacity() > size) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_READING_FAILED, "Could not truncate to {0}; " + "partial truncate is not supported", pos); diff --git a/h2/src/main/org/h2/mvstore/Page.java b/h2/src/main/org/h2/mvstore/Page.java index 363f006f14..5ff8b3477b 100644 --- a/h2/src/main/org/h2/mvstore/Page.java +++ b/h2/src/main/org/h2/mvstore/Page.java @@ -1,18 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; +import static org.h2.engine.Constants.MEMORY_ARRAY; +import static org.h2.engine.Constants.MEMORY_OBJECT; +import static org.h2.engine.Constants.MEMORY_POINTER; +import static org.h2.mvstore.DataUtils.PAGE_TYPE_LEAF; import java.nio.ByteBuffer; import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; - +import java.util.List; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; import org.h2.compress.Compressor; -import org.h2.mvstore.type.DataType; -import org.h2.util.New; +import org.h2.util.Utils; /** * A page (a node or a leaf). @@ -20,32 +22,45 @@ * For b-tree nodes, the key at a given index is larger than the largest key of * the child at the same index. *

    - * File format: - * page length (including length): int + * Serialized format: + * length of a serialized page in bytes (including this field): int * check value: short + * page number (0-based sequential number within a chunk): varInt * map id: varInt * number of keys: varInt * type: byte (0: leaf, 1: node; +2: compressed) + * children of the non-leaf node (1 more than keys) * compressed: bytes saved (varInt) * keys - * leaf: values (one for each key) - * node: children (1 more than keys) + * values of the leaf node (one for each key) */ -public class Page { +public abstract class Page implements Cloneable { /** - * An empty object array. + * Map this page belongs to */ - public static final Object[] EMPTY_OBJECT_ARRAY = new Object[0]; + public final MVMap map; - private final MVMap map; - private long version; - private long pos; + /** + * Position of this page's saved image within a Chunk + * or 0 if this page has not been saved yet + * or 1 if this page has not been saved yet, but already removed + * This "removed" flag is to keep track of pages that concurrently + * changed while they are being stored, in which case the live bookkeeping + * needs to be aware of such cases. + * Field need to be volatile to avoid races between saving thread setting it + * and other thread reading it to access the page. + * On top of this update atomicity is required so removal mark and saved position + * can be set concurrently. + * + * @see DataUtils#getPagePos(int, int, int, int) for field format details + */ + private volatile long pos; /** - * The total entry count of this page and all children. + * Sequential 0-based number of the page within containing chunk. */ - private long totalCount; + public int pageNo = -1; /** * The last result of a find operation is cached. @@ -53,157 +68,229 @@ public class Page { private int cachedCompare; /** - * The estimated memory used. + * The estimated memory used in persistent case, IN_MEMORY marker value otherwise. */ private int memory; + /** + * Amount of used disk space by this page only in persistent case. + */ + private int diskSpaceUsed; + /** * The keys. - *

    - * The array might be larger than needed, to avoid frequent re-sizing. */ - private Object[] keys; + private K[] keys; /** - * The values. - *

    - * The array might be larger than needed, to avoid frequent re-sizing. + * Updater for pos field, which can be updated when page is saved, + * but can be concurrently marked as removed + */ + @SuppressWarnings("rawtypes") + private static final AtomicLongFieldUpdater posUpdater = + AtomicLongFieldUpdater.newUpdater(Page.class, "pos"); + /** + * The estimated number of bytes used per child entry. */ - private Object[] values; + static final int PAGE_MEMORY_CHILD = MEMORY_POINTER + 16; // 16 = two longs /** - * The child page references. - *

    - * The array might be larger than needed, to avoid frequent re-sizing. + * The estimated number of bytes used per base page. + */ + private static final int PAGE_MEMORY = + MEMORY_OBJECT + // this + 2 * MEMORY_POINTER + // map, keys + MEMORY_ARRAY + // Object[] keys + 17; // pos, cachedCompare, memory, removedInMemory + /** + * The estimated number of bytes used per empty internal page object. */ - private PageReference[] children; + static final int PAGE_NODE_MEMORY = + PAGE_MEMORY + // super + MEMORY_POINTER + // children + MEMORY_ARRAY + // Object[] children + 8; // totalCount /** - * Whether the page is an in-memory (not stored, or not yet stored) page, - * and it is removed. This is to keep track of pages that concurrently - * changed while they are being stored, in which case the live bookkeeping - * needs to be aware of such cases. + * The estimated number of bytes used per empty leaf page. + */ + static final int PAGE_LEAF_MEMORY = + PAGE_MEMORY + // super + MEMORY_POINTER + // values + MEMORY_ARRAY; // Object[] values + + /** + * Marker value for memory field, meaning that memory accounting is replaced by key count. */ - private volatile boolean removedInMemory; + private static final int IN_MEMORY = Integer.MIN_VALUE; + + @SuppressWarnings("rawtypes") + private static final PageReference[] SINGLE_EMPTY = { PageReference.EMPTY }; + + + Page(MVMap map) { + this.map = map; + } + + Page(MVMap map, Page source) { + this(map, source.keys); + memory = source.memory; + } - Page(MVMap map, long version) { + Page(MVMap map, K[] keys) { this.map = map; - this.version = version; + this.keys = keys; } /** - * Create a new, empty page. + * Create a new, empty leaf page. + * + * @param key type + * @param value type * * @param map the map - * @param version the version * @return the new page */ - static Page createEmpty(MVMap map, long version) { - return create(map, version, - EMPTY_OBJECT_ARRAY, EMPTY_OBJECT_ARRAY, - null, - 0, DataUtils.PAGE_MEMORY); + static Page createEmptyLeaf(MVMap map) { + return createLeaf(map, map.getKeyType().createStorage(0), + map.getValueType().createStorage(0), PAGE_LEAF_MEMORY); } /** - * Create a new page. The arrays are not cloned. + * Create a new, empty internal node page. + * + * @param key type + * @param value type * * @param map the map - * @param version the version + * @return the new page + */ + @SuppressWarnings("unchecked") + static Page createEmptyNode(MVMap map) { + return createNode(map, map.getKeyType().createStorage(0), SINGLE_EMPTY, 0, + PAGE_NODE_MEMORY + MEMORY_POINTER + PAGE_MEMORY_CHILD); // there is always one child + } + + /** + * Create a new non-leaf page. The arrays are not cloned. + * + * @param the key class + * @param the value class + * @param map the map * @param keys the keys - * @param values the values * @param children the child page positions * @param totalCount the total number of keys * @param memory the memory used in bytes * @return the page */ - public static Page create(MVMap map, long version, - Object[] keys, Object[] values, PageReference[] children, - long totalCount, int memory) { - Page p = new Page(map, version); - // the position is 0 - p.keys = keys; - p.values = values; - p.children = children; - p.totalCount = totalCount; - if (memory == 0) { - p.recalculateMemory(); - } else { - p.addMemory(memory); - } - MVStore store = map.store; - if (store != null) { - store.registerUnsavedPage(p.memory); - } - return p; + public static Page createNode(MVMap map, K[] keys, PageReference[] children, + long totalCount, int memory) { + assert keys != null; + Page page = new NonLeaf<>(map, keys, children, totalCount); + page.initMemoryAccount(memory); + return page; } /** - * Create a copy of a page. + * Create a new leaf page. The arrays are not cloned. + * + * @param key type + * @param value type * * @param map the map - * @param version the version - * @param source the source page + * @param keys the keys + * @param values the values + * @param memory the memory used in bytes * @return the page */ - public static Page create(MVMap map, long version, Page source) { - Page p = new Page(map, version); - // the position is 0 - p.keys = source.keys; - p.values = source.values; - p.children = source.children; - p.totalCount = source.totalCount; - p.memory = source.memory; - MVStore store = map.store; - if (store != null) { - store.registerUnsavedPage(p.memory); + static Page createLeaf(MVMap map, K[] keys, V[] values, int memory) { + assert keys != null; + Page page = new Leaf<>(map, keys, values); + page.initMemoryAccount(memory); + return page; + } + + private void initMemoryAccount(int memoryCount) { + if(!map.isPersistent()) { + memory = IN_MEMORY; + } else if (memoryCount == 0) { + recalculateMemory(); + } else { + addMemory(memoryCount); + assert memoryCount == getMemory(); + } + } + + /** + * Get the value for the given key, or null if not found. + * Search is done in the tree rooted at given page. + * + * @param key type + * @param value type + * + * @param key the key + * @param p the root page + * @return the value, or null if not found + */ + static V get(Page p, K key) { + while (true) { + int index = p.binarySearch(key); + if (p.isLeaf()) { + return index >= 0 ? p.getValue(index) : null; + } else if (index++ < 0) { + index = -index; + } + p = p.getChildPage(index); } - return p; } /** * Read a page. * - * @param fileStore the file store + * @param key type + * @param value type + * + * @param buff ByteBuffer containing serialized page info * @param pos the position * @param map the map - * @param filePos the position in the file - * @param maxPos the maximum position (the end of the chunk) * @return the page */ - static Page read(FileStore fileStore, long pos, MVMap map, - long filePos, long maxPos) { - ByteBuffer buff; - int maxLength = DataUtils.getPageMaxLength(pos); - if (maxLength == DataUtils.PAGE_LARGE) { - buff = fileStore.readFully(filePos, 128); - maxLength = buff.getInt(); - // read the first bytes again - } - maxLength = (int) Math.min(maxPos - filePos, maxLength); - int length = maxLength; - if (length < 0) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "Illegal page length {0} reading at {1}; max pos {2} ", - length, filePos, maxPos); - } - buff = fileStore.readFully(filePos, length); - Page p = new Page(map, 0); + static Page read(ByteBuffer buff, long pos, MVMap map) { + boolean leaf = (DataUtils.getPageType(pos) & 1) == PAGE_TYPE_LEAF; + Page p = leaf ? new Leaf<>(map) : new NonLeaf<>(map); p.pos = pos; - int chunkId = DataUtils.getPageChunkId(pos); - int offset = DataUtils.getPageOffset(pos); - p.read(buff, chunkId, offset, maxLength); + p.read(buff); return p; } + /** + * Get the id of the page's owner map + * @return id + */ + public final int getMapId() { + return map.getId(); + } + + /** + * Create a copy of this page with potentially different owning map. + * This is used exclusively during bulk map copying. + * Child page references for nodes are cleared (re-pointed to an empty page) + * to be filled-in later to copying procedure. This way it can be saved + * mid-process without tree integrity violation + * + * @param map new map to own resulting page + * @param eraseChildrenRefs whether cloned Page should have no child references or keep originals + * @return the page + */ + abstract Page copy(MVMap map, boolean eraseChildrenRefs); + /** * Get the key at the given index. * * @param index the index * @return the key */ - public Object getKey(int index) { + public K getKey(int index) { return keys[index]; } @@ -213,10 +300,7 @@ public Object getKey(int index) { * @param index the index * @return the child page */ - public Page getChildPage(int index) { - PageReference ref = children[index]; - return ref.page != null ? ref.page : map.readPage(ref.pos); - } + public abstract Page getChildPage(int index); /** * Get the position of the child. @@ -224,9 +308,7 @@ public Page getChildPage(int index) { * @param index the index * @return the position */ - public long getChildPagePos(int index) { - return children[index].pos; - } + public abstract long getChildPagePos(int index); /** * Get the value at the given index. @@ -234,16 +316,14 @@ public long getChildPagePos(int index) { * @param index the index * @return the value */ - public Object getValue(int index) { - return values[index]; - } + public abstract V getValue(int index); /** * Get the number of keys in this page. * * @return the number of keys */ - public int getKeyCount() { + public final int getKeyCount() { return keys.length; } @@ -252,64 +332,65 @@ public int getKeyCount() { * * @return true if it is a leaf */ - public boolean isLeaf() { - return children == null; + public final boolean isLeaf() { + return getNodeType() == PAGE_TYPE_LEAF; } + public abstract int getNodeType(); + /** * Get the position of the page * * @return the position */ - public long getPos() { + public final long getPos() { return pos; } @Override public String toString() { StringBuilder buff = new StringBuilder(); + dump(buff); + return buff.toString(); + } + + /** + * Dump debug data for this page. + * + * @param buff append buffer + */ + protected void dump(StringBuilder buff) { buff.append("id: ").append(System.identityHashCode(this)).append('\n'); - buff.append("version: ").append(Long.toHexString(version)).append("\n"); - buff.append("pos: ").append(Long.toHexString(pos)).append("\n"); - if (pos != 0) { + buff.append("pos: ").append(Long.toHexString(pos)).append('\n'); + if (isSaved()) { int chunkId = DataUtils.getPageChunkId(pos); - buff.append("chunk: ").append(Long.toHexString(chunkId)).append("\n"); + buff.append("chunk: ").append(Long.toHexString(chunkId)).append('\n'); } - for (int i = 0; i <= keys.length; i++) { - if (i > 0) { - buff.append(" "); - } - if (children != null) { - buff.append("[" + Long.toHexString(children[i].pos) + "] "); - } - if (i < keys.length) { - buff.append(keys[i]); - if (values != null) { - buff.append(':'); - buff.append(values[i]); - } - } - } - return buff.toString(); } /** * Create a copy of this page. * - * @param version the new version - * @return a page with the given version - */ - public Page copy(long version) { - Page newPage = create(map, version, - keys, values, - children, totalCount, - getMemory()); - // mark the old as deleted - removePage(); - newPage.cachedCompare = cachedCompare; + * @return a mutable copy of this page + */ + public final Page copy() { + Page newPage = clone(); + newPage.pos = 0; return newPage; } + @SuppressWarnings("unchecked") + @Override + protected final Page clone() { + Page clone; + try { + clone = (Page) super.clone(); + } catch (CloneNotSupportedException impossible) { + throw new RuntimeException(impossible); + } + return clone; + } + /** * Search the key in this page using a binary search. Instead of always * starting the search in the middle, the last found index is cached. @@ -321,45 +402,10 @@ public Page copy(long version) { * @param key the key * @return the value or null */ - public int binarySearch(Object key) { - int low = 0, high = keys.length - 1; - // the cached index minus one, so that - // for the first time (when cachedCompare is 0), - // the default value is used - int x = cachedCompare - 1; - if (x < 0 || x > high) { - x = high >>> 1; - } - Object[] k = keys; - while (low <= high) { - int compare = map.compare(key, k[x]); - if (compare > 0) { - low = x + 1; - } else if (compare < 0) { - high = x - 1; - } else { - cachedCompare = x + 1; - return x; - } - x = (low + high) >>> 1; - } - cachedCompare = low; - return -(low + 1); - - // regular binary search (without caching) - // int low = 0, high = keys.length - 1; - // while (low <= high) { - // int x = (low + high) >>> 1; - // int compare = map.compare(key, keys[x]); - // if (compare > 0) { - // low = x + 1; - // } else if (compare < 0) { - // high = x - 1; - // } else { - // return x; - // } - // } - // return -(low + 1); + int binarySearch(K key) { + int res = map.getKeyType().binarySearch(key, keys, getKeyCount(), cachedCompare); + cachedCompare = res < 0 ? ~res : res + 1; + return res; } /** @@ -368,64 +414,47 @@ public int binarySearch(Object key) { * @param at the split index * @return the page with the entries after the split index */ - Page split(int at) { - return isLeaf() ? splitLeaf(at) : splitNode(at); - } + abstract Page split(int at); - private Page splitLeaf(int at) { - int a = at, b = keys.length - a; - Object[] aKeys = new Object[a]; - Object[] bKeys = new Object[b]; - System.arraycopy(keys, 0, aKeys, 0, a); - System.arraycopy(keys, a, bKeys, 0, b); + /** + * Split the current keys array into two arrays. + * + * @param aCount size of the first array. + * @param bCount size of the second array/ + * @return the second array. + */ + final K[] splitKeys(int aCount, int bCount) { + assert aCount + bCount <= getKeyCount(); + K[] aKeys = createKeyStorage(aCount); + K[] bKeys = createKeyStorage(bCount); + System.arraycopy(keys, 0, aKeys, 0, aCount); + System.arraycopy(keys, getKeyCount() - bCount, bKeys, 0, bCount); keys = aKeys; - Object[] aValues = new Object[a]; - Object[] bValues = new Object[b]; - bValues = new Object[b]; - System.arraycopy(values, 0, aValues, 0, a); - System.arraycopy(values, a, bValues, 0, b); - values = aValues; - totalCount = a; - Page newPage = create(map, version, - bKeys, bValues, - null, - bKeys.length, 0); - recalculateMemory(); - newPage.recalculateMemory(); - return newPage; + return bKeys; } - private Page splitNode(int at) { - int a = at, b = keys.length - a; - - Object[] aKeys = new Object[a]; - Object[] bKeys = new Object[b - 1]; - System.arraycopy(keys, 0, aKeys, 0, a); - System.arraycopy(keys, a + 1, bKeys, 0, b - 1); - keys = aKeys; + /** + * Append additional key/value mappings to this Page. + * New mappings suppose to be in correct key order. + * + * @param extraKeyCount number of mappings to be added + * @param extraKeys to be added + * @param extraValues to be added + */ + abstract void expand(int extraKeyCount, K[] extraKeys, V[] extraValues); - PageReference[] aChildren = new PageReference[a + 1]; - PageReference[] bChildren = new PageReference[b]; - System.arraycopy(children, 0, aChildren, 0, a + 1); - System.arraycopy(children, a + 1, bChildren, 0, b); - children = aChildren; - - long t = 0; - for (PageReference x : aChildren) { - t += x.count; - } - totalCount = t; - t = 0; - for (PageReference x : bChildren) { - t += x.count; - } - Page newPage = create(map, version, - bKeys, null, - bChildren, - t, 0); - recalculateMemory(); - newPage.recalculateMemory(); - return newPage; + /** + * Expand the keys array. + * + * @param extraKeyCount number of extra key entries to create + * @param extraKeys extra key values + */ + final void expandKeys(int extraKeyCount, K[] extraKeys) { + int keyCount = getKeyCount(); + K[] newKeys = createKeyStorage(keyCount + extraKeyCount); + System.arraycopy(keys, 0, newKeys, 0, keyCount); + System.arraycopy(extraKeys, 0, newKeys, keyCount, extraKeyCount); + keys = newKeys; } /** @@ -433,34 +462,15 @@ private Page splitNode(int at) { * * @return the number of key-value pairs */ - public long getTotalCount() { - if (MVStore.ASSERT) { - long check = 0; - if (isLeaf()) { - check = keys.length; - } else { - for (PageReference x : children) { - check += x.count; - } - } - if (check != totalCount) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_INTERNAL, - "Expected: {0} got: {1}", check, totalCount); - } - } - return totalCount; - } + public abstract long getTotalCount(); /** - * Get the descendant counts for the given child. + * Get the number of key-value pairs for a given child. * * @param index the child index * @return the descendant count */ - long getCounts(int index) { - return children[index].count; - } + abstract long getCounts(int index); /** * Replace the child page. @@ -468,22 +478,7 @@ long getCounts(int index) { * @param index the index * @param c the new child page */ - public void setChild(int index, Page c) { - if (c == null) { - long oldCount = children[index].count; - children = Arrays.copyOf(children, children.length); - PageReference ref = new PageReference(null, 0, 0); - children[index] = ref; - totalCount -= oldCount; - } else if (c != children[index].page || - c.getPos() != children[index].pos) { - long oldCount = children[index].count; - children = Arrays.copyOf(children, children.length); - PageReference ref = new PageReference(c, c.pos, c.totalCount); - children[index] = ref; - totalCount += c.totalCount - oldCount; - } - } + public abstract void setChild(int index, Page c); /** * Replace the key at an index in this page. @@ -491,15 +486,18 @@ public void setChild(int index, Page c) { * @param index the index * @param key the new key */ - public void setKey(int index, Object key) { - keys = Arrays.copyOf(keys, keys.length); - Object old = keys[index]; - DataType keyType = map.getKeyType(); - int mem = keyType.getMemory(key); - if (old != null) { - mem -= keyType.getMemory(old); + public final void setKey(int index, K key) { + keys = keys.clone(); + if(isPersistent()) { + K old = keys[index]; + if (!map.isMemoryEstimationAllowed() || old == null) { + int mem = map.evaluateMemoryForKey(key); + if (old != null) { + mem -= map.evaluateMemoryForKey(old); + } + addMemory(mem); + } } - addMemory(mem); keys[index] = key; } @@ -510,39 +508,7 @@ public void setKey(int index, Object key) { * @param value the new value * @return the old value */ - public Object setValue(int index, Object value) { - Object old = values[index]; - values = Arrays.copyOf(values, values.length); - DataType valueType = map.getValueType(); - addMemory(valueType.getMemory(value) - - valueType.getMemory(old)); - values[index] = value; - return old; - } - - /** - * Remove this page and all child pages. - */ - void removeAllRecursive() { - if (children != null) { - for (int i = 0, size = map.getChildPageCount(this); i < size; i++) { - PageReference ref = children[i]; - if (ref.page != null) { - ref.page.removeAllRecursive(); - } else { - long c = children[i].pos; - int type = DataUtils.getPageType(c); - if (type == DataUtils.PAGE_TYPE_LEAF) { - int mem = DataUtils.getPageMaxLength(c); - map.removePage(c, mem); - } else { - map.readPage(c).removeAllRecursive(); - } - } - } - } - removePage(); - } + public abstract V setValue(int index, V value); /** * Insert a key-value pair into this leaf. @@ -551,20 +517,7 @@ void removeAllRecursive() { * @param key the key * @param value the value */ - public void insertLeaf(int index, Object key, Object value) { - int len = keys.length + 1; - Object[] newKeys = new Object[len]; - DataUtils.copyWithGap(keys, newKeys, len - 1, index); - keys = newKeys; - Object[] newValues = new Object[len]; - DataUtils.copyWithGap(values, newValues, len - 1, index); - values = newValues; - keys[index] = key; - values[index] = value; - totalCount++; - addMemory(map.getKeyType().getMemory(key) + - map.getValueType().getMemory(value)); - } + public abstract void insertLeaf(int index, K key, V value); /** * Insert a child page into this node. @@ -573,23 +526,26 @@ public void insertLeaf(int index, Object key, Object value) { * @param key the key * @param childPage the child page */ - public void insertNode(int index, Object key, Page childPage) { + public abstract void insertNode(int index, K key, Page childPage); - Object[] newKeys = new Object[keys.length + 1]; - DataUtils.copyWithGap(keys, newKeys, keys.length, index); - newKeys[index] = key; + /** + * Insert a key into the key array + * + * @param index index to insert at + * @param key the key value + */ + final void insertKey(int index, K key) { + int keyCount = getKeyCount(); + assert index <= keyCount : index + " > " + keyCount; + K[] newKeys = createKeyStorage(keyCount + 1); + DataUtils.copyWithGap(keys, newKeys, keyCount, index); keys = newKeys; - int childCount = children.length; - PageReference[] newChildren = new PageReference[childCount + 1]; - DataUtils.copyWithGap(children, newChildren, childCount, index); - newChildren[index] = new PageReference( - childPage, childPage.getPos(), childPage.totalCount); - children = newChildren; + keys[index] = key; - totalCount += childPage.totalCount; - addMemory(map.getKeyType().getMemory(key) + - DataUtils.PAGE_MEMORY_CHILD); + if (isPersistent()) { + addMemory(MEMORY_POINTER + map.evaluateMemoryForKey(key)); + } } /** @@ -598,87 +554,75 @@ public void insertNode(int index, Object key, Page childPage) { * @param index the index */ public void remove(int index) { - int keyLength = keys.length; - int keyIndex = index >= keyLength ? index - 1 : index; - Object old = keys[keyIndex]; - addMemory(-map.getKeyType().getMemory(old)); - Object[] newKeys = new Object[keyLength - 1]; - DataUtils.copyExcept(keys, newKeys, keyLength, keyIndex); - keys = newKeys; - - if (values != null) { - old = values[index]; - addMemory(-map.getValueType().getMemory(old)); - Object[] newValues = new Object[keyLength - 1]; - DataUtils.copyExcept(values, newValues, keyLength, index); - values = newValues; - totalCount--; + int keyCount = getKeyCount(); + if (index == keyCount) { + --index; } - if (children != null) { - addMemory(-DataUtils.PAGE_MEMORY_CHILD); - long countOffset = children[index].count; - - int childCount = children.length; - PageReference[] newChildren = new PageReference[childCount - 1]; - DataUtils.copyExcept(children, newChildren, childCount, index); - children = newChildren; - - totalCount -= countOffset; + if(isPersistent()) { + if (!map.isMemoryEstimationAllowed()) { + K old = getKey(index); + addMemory(-MEMORY_POINTER - map.evaluateMemoryForKey(old)); + } } + K[] newKeys = createKeyStorage(keyCount - 1); + DataUtils.copyExcept(keys, newKeys, keyCount, index); + keys = newKeys; } /** * Read the page from the buffer. * - * @param buff the buffer - * @param chunkId the chunk id - * @param offset the offset within the chunk - * @param maxLength the maximum length + * @param buff the buffer to read from */ - void read(ByteBuffer buff, int chunkId, int offset, int maxLength) { + private void read(ByteBuffer buff) { + int chunkId = DataUtils.getPageChunkId(pos); + int offset = DataUtils.getPageOffset(pos); + int start = buff.position(); - int pageLength = buff.getInt(); - if (pageLength > maxLength) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "File corrupted in chunk {0}, expected page length =< {1}, got {2}", - chunkId, maxLength, pageLength); + int pageLength = buff.getInt(); // does not include optional part (pageNo) + int remaining = buff.remaining() + 4; + if (pageLength > remaining || pageLength < 4) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "File corrupted in chunk {0}, expected page length 4..{1}, got {2}", chunkId, remaining, + pageLength); } - buff.limit(start + pageLength); + short check = buff.getShort(); - int mapId = DataUtils.readVarInt(buff); - if (mapId != map.getId()) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "File corrupted in chunk {0}, expected map id {1}, got {2}", - chunkId, map.getId(), mapId); - } int checkTest = DataUtils.getCheckValue(chunkId) ^ DataUtils.getCheckValue(offset) ^ DataUtils.getCheckValue(pageLength); if (check != (short) checkTest) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "File corrupted in chunk {0}, expected check value {1}, got {2}", - chunkId, checkTest, check); + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "File corrupted in chunk {0}, expected check value {1}, got {2}", chunkId, checkTest, check); + } + + pageNo = DataUtils.readVarInt(buff); + if (pageNo < 0) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "File corrupted in chunk {0}, got negative page No {1}", chunkId, pageNo); } - int len = DataUtils.readVarInt(buff); - keys = new Object[len]; + + int mapId = DataUtils.readVarInt(buff); + if (mapId != map.getId()) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_FILE_CORRUPT, + "File corrupted in chunk {0}, expected map id {1}, got {2}", chunkId, map.getId(), mapId); + } + + int keyCount = DataUtils.readVarInt(buff); + keys = createKeyStorage(keyCount); int type = buff.get(); - boolean node = (type & 1) == DataUtils.PAGE_TYPE_NODE; - if (node) { - children = new PageReference[len + 1]; - long[] p = new long[len + 1]; - for (int i = 0; i <= len; i++) { - p[i] = buff.getLong(); - } - long total = 0; - for (int i = 0; i <= len; i++) { - long s = DataUtils.readVarLong(buff); - total += s; - children[i] = new PageReference(null, p[i], s); - } - totalCount = total; + if(isLeaf() != ((type & 1) == PAGE_TYPE_LEAF)) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_FILE_CORRUPT, + "File corrupted in chunk {0}, expected node type {1}, got {2}", + chunkId, isLeaf() ? "0" : "1" , type); + } + + // to restrain hacky GenericDataType, which grabs the whole remainder of the buffer + buff.limit(start + pageLength); + + if (!isLeaf()) { + readPayLoad(buff); } boolean compressed = (type & DataUtils.PAGE_COMPRESSED) != 0; if (compressed) { @@ -690,52 +634,91 @@ void read(ByteBuffer buff, int chunkId, int offset, int maxLength) { compressor = map.getStore().getCompressorFast(); } int lenAdd = DataUtils.readVarInt(buff); - int compLen = pageLength + start - buff.position(); - byte[] comp = DataUtils.newBytes(compLen); - buff.get(comp); + int compLen = buff.remaining(); + byte[] comp; + int pos = 0; + if (buff.hasArray()) { + comp = buff.array(); + pos = buff.arrayOffset() + buff.position(); + } else { + comp = Utils.newBytes(compLen); + buff.get(comp); + } int l = compLen + lenAdd; buff = ByteBuffer.allocate(l); - compressor.expand(comp, 0, compLen, buff.array(), + compressor.expand(comp, pos, compLen, buff.array(), buff.arrayOffset(), l); } - map.getKeyType().read(buff, keys, len, true); - if (!node) { - values = new Object[len]; - map.getValueType().read(buff, values, len, false); - totalCount = len; + map.getKeyType().read(buff, keys, keyCount); + if (isLeaf()) { + readPayLoad(buff); } + diskSpaceUsed = pageLength; recalculateMemory(); } + /** + * Read the page payload from the buffer. + * + * @param buff the buffer + */ + protected abstract void readPayLoad(ByteBuffer buff); + + public final boolean isSaved() { + return DataUtils.isPageSaved(pos); + } + + public final boolean isRemoved() { + return DataUtils.isPageRemoved(pos); + } + + /** + * Mark this page as removed "in memory". That means that only adjustment of + * "unsaved memory" amount is required. On the other hand, if page was + * persisted, it's removal should be reflected in occupancy of the + * containing chunk. + * + * @return true if it was marked by this call or has been marked already, + * false if page has been saved already. + */ + private boolean markAsRemoved() { + assert getTotalCount() > 0 : this; + long pagePos; + do { + pagePos = pos; + if (DataUtils.isPageSaved(pagePos)) { + return false; + } + assert !DataUtils.isPageRemoved(pagePos); + } while (!posUpdater.compareAndSet(this, 0L, 1L)); + return true; + } + /** * Store the page and update the position. * * @param chunk the chunk * @param buff the target buffer + * @param toc prospective table of content * @return the position of the buffer just after the type */ - private int write(Chunk chunk, WriteBuffer buff) { + protected final int write(Chunk chunk, WriteBuffer buff, List toc) { + pageNo = toc.size(); + int keyCount = getKeyCount(); int start = buff.position(); - int len = keys.length; - int type = children != null ? DataUtils.PAGE_TYPE_NODE - : DataUtils.PAGE_TYPE_LEAF; - buff.putInt(0). - putShort((byte) 0). - putVarInt(map.getId()). - putVarInt(len); + buff.putInt(0) // placeholder for pageLength + .putShort((byte)0) // placeholder for check + .putVarInt(pageNo) + .putVarInt(map.getId()) + .putVarInt(keyCount); int typePos = buff.position(); - buff.put((byte) type); - if (type == DataUtils.PAGE_TYPE_NODE) { - writeChildren(buff); - for (int i = 0; i <= len; i++) { - buff.putVarLong(children[i].count); - } - } + int type = isLeaf() ? PAGE_TYPE_LEAF : DataUtils.PAGE_TYPE_NODE; + buff.put((byte)type); + int childrenPos = buff.position(); + writeChildren(buff, true); int compressStart = buff.position(); - map.getKeyType().write(buff, keys, len, true); - if (type == DataUtils.PAGE_TYPE_LEAF) { - map.getValueType().write(buff, values, len, false); - } + map.getKeyType().write(buff, keys, keyCount); + writeValues(buff); MVStore store = map.getStore(); int expLen = buff.position() - compressStart; if (expLen > 16) { @@ -744,382 +727,936 @@ private int write(Chunk chunk, WriteBuffer buff) { Compressor compressor; int compressType; if (compressionLevel == 1) { - compressor = map.getStore().getCompressorFast(); + compressor = store.getCompressorFast(); compressType = DataUtils.PAGE_COMPRESSED; } else { - compressor = map.getStore().getCompressorHigh(); + compressor = store.getCompressorHigh(); compressType = DataUtils.PAGE_COMPRESSED_HIGH; } - byte[] exp = new byte[expLen]; - buff.position(compressStart).get(exp); byte[] comp = new byte[expLen * 2]; - int compLen = compressor.compress(exp, expLen, comp, 0); - int plus = DataUtils.getVarIntLen(compLen - expLen); + ByteBuffer byteBuffer = buff.getBuffer(); + int pos = 0; + byte[] exp; + if (byteBuffer.hasArray()) { + exp = byteBuffer.array(); + pos = byteBuffer.arrayOffset() + compressStart; + } else { + exp = Utils.newBytes(expLen); + buff.position(compressStart).get(exp); + } + int compLen = compressor.compress(exp, pos, expLen, comp, 0); + int plus = DataUtils.getVarIntLen(expLen - compLen); if (compLen + plus < expLen) { - buff.position(typePos). - put((byte) (type + compressType)); - buff.position(compressStart). - putVarInt(expLen - compLen). - put(comp, 0, compLen); + buff.position(typePos) + .put((byte) (type | compressType)); + buff.position(compressStart) + .putVarInt(expLen - compLen) + .put(comp, 0, compLen); } } } int pageLength = buff.position() - start; + long tocElement = DataUtils.getTocElement(getMapId(), start, buff.position() - start, type); + toc.add(tocElement); int chunkId = chunk.id; int check = DataUtils.getCheckValue(chunkId) ^ DataUtils.getCheckValue(start) ^ DataUtils.getCheckValue(pageLength); buff.putInt(start, pageLength). putShort(start + 4, (short) check); - if (pos != 0) { - throw DataUtils.newIllegalStateException( + if (isSaved()) { + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Page already stored"); } - pos = DataUtils.getPagePos(chunkId, start, pageLength, type); - store.cachePage(pos, this, getMemory()); + long pagePos = DataUtils.getPagePos(chunkId, tocElement); + boolean isDeleted = isRemoved(); + while (!posUpdater.compareAndSet(this, isDeleted ? 1L : 0L, pagePos)) { + isDeleted = isRemoved(); + } + store.cachePage(this); if (type == DataUtils.PAGE_TYPE_NODE) { // cache again - this will make sure nodes stays in the cache // for a longer time - store.cachePage(pos, this, getMemory()); - } - long max = DataUtils.getPageMaxLength(pos); - chunk.maxLen += max; - chunk.maxLenLive += max; - chunk.pageCount++; - chunk.pageCountLive++; - if (removedInMemory) { - // if the page was removed _before_ the position was assigned, we - // need to mark it removed here, so the fields are updated - // when the next chunk is stored - map.removePage(pos, memory); - } - return typePos + 1; - } - - private void writeChildren(WriteBuffer buff) { - int len = keys.length; - for (int i = 0; i <= len; i++) { - buff.putLong(children[i].pos); + store.cachePage(this); + } + int pageLengthEncoded = DataUtils.getPageMaxLength(pos); + boolean singleWriter = map.isSingleWriter(); + chunk.accountForWrittenPage(pageLengthEncoded, singleWriter); + if (isDeleted) { + store.accountForRemovedPage(pagePos, chunk.version + 1, singleWriter, pageNo); } + diskSpaceUsed = pageLengthEncoded != DataUtils.PAGE_LARGE ? pageLengthEncoded : pageLength; + return childrenPos; } + /** + * Write values that the buffer contains to the buff. + * + * @param buff the target buffer + */ + protected abstract void writeValues(WriteBuffer buff); + + /** + * Write page children to the buff. + * + * @param buff the target buffer + * @param withCounts true if the descendant counts should be written + */ + protected abstract void writeChildren(WriteBuffer buff, boolean withCounts); + /** * Store this page and all children that are changed, in reverse order, and * update the position and the children. - * * @param chunk the chunk * @param buff the target buffer + * @param toc prospective table of content */ - void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff) { - if (pos != 0) { - // already stored before - return; - } - int patch = write(chunk, buff); - if (!isLeaf()) { - int len = children.length; - for (int i = 0; i < len; i++) { - Page p = children[i].page; - if (p != null) { - p.writeUnsavedRecursive(chunk, buff); - children[i] = new PageReference(p, p.getPos(), p.totalCount); - } - } - int old = buff.position(); - buff.position(patch); - writeChildren(buff); - buff.position(old); + abstract void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff, List toc); + + /** + * Unlink the children recursively after all data is written. + */ + abstract void releaseSavedPages(); + + public abstract int getRawChildPageCount(); + + protected final boolean isPersistent() { + return memory != IN_MEMORY; + } + + public final int getMemory() { + if (isPersistent()) { +// assert memory == calculateMemory() : +// "Memory calculation error " + memory + " != " + calculateMemory(); + return memory; } + return 0; } /** - * Unlink the children recursively after all data is written. + * Amount of used disk space in persistent case including child pages. + * + * @return amount of used disk space in persistent case */ - void writeEnd() { - if (isLeaf()) { - return; - } - int len = children.length; - for (int i = 0; i < len; i++) { - PageReference ref = children[i]; - if (ref.page != null) { - if (ref.page.getPos() == 0) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_INTERNAL, "Page not written"); + public long getDiskSpaceUsed() { + long r = 0; + if (isPersistent()) { + r += diskSpaceUsed; + if (!isLeaf()) { + for (int i = 0; i < getRawChildPageCount(); i++) { + long pos = getChildPagePos(i); + if (pos != 0) { + r += getChildPage(i).getDiskSpaceUsed(); + } } - ref.page.writeEnd(); - children[i] = new PageReference(null, ref.pos, ref.count); } } + return r; } - long getVersion() { - return version; + /** + * Increase estimated memory used in persistent case. + * + * @param mem additional memory size. + */ + final void addMemory(int mem) { + memory += mem; + assert memory >= 0; } - public int getRawChildPageCount() { - return children.length; + /** + * Recalculate estimated memory used in persistent case. + */ + final void recalculateMemory() { + assert isPersistent(); + memory = calculateMemory(); } - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } - if (other instanceof Page) { - if (pos != 0 && ((Page) other).pos == pos) { - return true; - } - return this == other; + /** + * Calculate estimated memory used in persistent case. + * + * @return memory in bytes + */ + protected int calculateMemory() { +//* + return map.evaluateMemoryForKeys(keys, getKeyCount()); +/*/ + int keyCount = getKeyCount(); + int mem = keyCount * MEMORY_POINTER; + DataType keyType = map.getKeyType(); + for (int i = 0; i < keyCount; i++) { + mem += getMemory(keyType, keys[i]); } - return false; + return mem; +//*/ } - @Override - public int hashCode() { - return pos != 0 ? (int) (pos | (pos >>> 32)) : super.hashCode(); + public boolean isComplete() { + return true; } - public int getMemory() { - if (MVStore.ASSERT) { - int mem = memory; - recalculateMemory(); - if (mem != memory) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_INTERNAL, "Memory calculation error"); + /** + * Called when done with copying page. + */ + public void setComplete() {} + + /** + * Make accounting changes (chunk occupancy or "unsaved" RAM), related to + * this page removal. + * + * @param version at which page was removed + * @return amount (negative), by which "unsaved memory" should be adjusted, + * if page is unsaved one, and 0 for page that was already saved, or + * in case of non-persistent map + */ + public final int removePage(long version) { + if(isPersistent() && getTotalCount() > 0) { + MVStore store = map.store; + if (!markAsRemoved()) { // only if it has been saved already + long pagePos = pos; + store.accountForRemovedPage(pagePos, version, map.isSingleWriter(), pageNo); + } else { + return -memory; } } - return memory; + return 0; } - private void addMemory(int mem) { - memory += mem; - } + /** + * Extend path from a given CursorPos chain to "prepend point" in a B-tree, rooted at this Page. + * + * @param cursorPos presumably pointing to this Page (null if real root), to build upon + * @return new head of the CursorPos chain + */ + public abstract CursorPos getPrependCursorPos(CursorPos cursorPos); - private void recalculateMemory() { - int mem = DataUtils.PAGE_MEMORY; - DataType keyType = map.getKeyType(); - for (int i = 0; i < keys.length; i++) { - mem += keyType.getMemory(keys[i]); - } - if (this.isLeaf()) { - DataType valueType = map.getValueType(); - for (int i = 0; i < keys.length; i++) { - mem += valueType.getMemory(values[i]); - } - } else { - mem += this.getRawChildPageCount() * DataUtils.PAGE_MEMORY_CHILD; - } - addMemory(mem - memory); + /** + * Extend path from a given CursorPos chain to "append point" in a B-tree, rooted at this Page. + * + * @param cursorPos presumably pointing to this Page (null if real root), to build upon + * @return new head of the CursorPos chain + */ + public abstract CursorPos getAppendCursorPos(CursorPos cursorPos); + + /** + * Remove all page data recursively. + * @param version at which page got removed + * @return adjustment for "unsaved memory" amount + */ + public abstract int removeAllRecursive(long version); + + /** + * Create array for keys storage. + * + * @param size number of entries + * @return values array + */ + public final K[] createKeyStorage(int size) { + return map.getKeyType().createStorage(size); } - void setVersion(long version) { - this.version = version; + /** + * Create array for values storage. + * + * @param size number of entries + * @return values array + */ + final V[] createValueStorage(int size) { + return map.getValueType().createStorage(size); } /** - * Remove the page. + * Create an array of page references. + * + * @param the key class + * @param the value class + * @param size the number of entries + * @return the array */ - public void removePage() { - long p = pos; - if (p == 0) { - removedInMemory = true; - } - map.removePage(p, memory); + @SuppressWarnings("unchecked") + public static PageReference[] createRefStorage(int size) { + return new PageReference[size]; } /** * A pointer to a page, either in-memory or using a page position. */ - public static class PageReference { + public static final class PageReference { + + /** + * Singleton object used when arrays of PageReference have not yet been filled. + */ + @SuppressWarnings("rawtypes") + static final PageReference EMPTY = new PageReference<>(null, 0, 0); /** * The position, if known, or 0. */ - final long pos; + private long pos; /** * The page, if in memory, or null. */ - final Page page; + private Page page; /** * The descendant count for this child page. */ final long count; - public PageReference(Page page, long pos, long count) { + /** + * Get an empty page reference. + * + * @param the key class + * @param the value class + * @return the page reference + */ + @SuppressWarnings("unchecked") + public static PageReference empty() { + return EMPTY; + } + + public PageReference(Page page) { + this(page, page.getPos(), page.getTotalCount()); + } + + PageReference(long pos, long count) { + this(null, pos, count); + assert DataUtils.isPageSaved(pos); + } + + private PageReference(Page page, long pos, long count) { this.page = page; this.pos = pos; this.count = count; } - } - - /** - * Contains information about which other pages are referenced (directly or - * indirectly) by the given page. This is a subset of the page data, for - * pages of type node. This information is used for garbage collection (to - * quickly find out which chunks are still in use). - */ - public static class PageChildren { + public Page getPage() { + return page; + } /** - * An empty array of type long. + * Clear if necessary, reference to the actual child Page object, + * so it can be garbage collected if not actively used elsewhere. + * Reference is cleared only if corresponding page was already saved on a disk. */ - public static final long[] EMPTY_ARRAY = new long[0]; + void clearPageReference() { + if (page != null) { + page.releaseSavedPages(); + assert page.isSaved() || !page.isComplete(); + if (page.isSaved()) { + assert pos == page.getPos(); + assert count == page.getTotalCount() : count + " != " + page.getTotalCount(); + page = null; + } + } + } + + long getPos() { + return pos; + } /** - * The position of the page. + * Re-acquire position from in-memory page. */ - final long pos; + void resetPos() { + Page p = page; + if (p != null && p.isSaved()) { + pos = p.getPos(); + assert count == p.getTotalCount(); + } + } + + @Override + public String toString() { + return "Cnt:" + count + ", pos:" + (pos == 0 ? "0" : DataUtils.getPageChunkId(pos) + + (page == null ? "" : "/" + page.pageNo) + + "-" + DataUtils.getPageOffset(pos) + ":" + DataUtils.getPageMaxLength(pos)) + + ((page == null ? DataUtils.getPageType(pos) == 0 : page.isLeaf()) ? " leaf" : " node") + + ", page:{" + page + "}"; + } + } + + private static class NonLeaf extends Page { /** - * The page positions of (direct or indirect) children. Depending on the - * use case, this can be the complete list, or only a subset of all - * children, for example only only one reference to a child in another - * chunk. + * The child page references. */ - long[] children; + private PageReference[] children; /** - * Whether this object only contains the list of chunks. - */ - boolean chunkList; + * The total entry count of this page and all children. + */ + private long totalCount; - private PageChildren(long pos, long[] children) { - this.pos = pos; + NonLeaf(MVMap map) { + super(map); + } + + NonLeaf(MVMap map, NonLeaf source, PageReference[] children, long totalCount) { + super(map, source); + this.children = children; + this.totalCount = totalCount; + } + + NonLeaf(MVMap map, K[] keys, PageReference[] children, long totalCount) { + super(map, keys); this.children = children; + this.totalCount = totalCount; + } + + @Override + public int getNodeType() { + return DataUtils.PAGE_TYPE_NODE; + } + + @Override + public Page copy(MVMap map, boolean eraseChildrenRefs) { + return eraseChildrenRefs ? + new IncompleteNonLeaf<>(map, this) : + new NonLeaf<>(map, this, children, totalCount); } - PageChildren(Page p) { - this.pos = p.getPos(); - int count = p.getRawChildPageCount(); - this.children = new long[count]; - for (int i = 0; i < count; i++) { - children[i] = p.getChildPagePos(i); + @Override + public Page getChildPage(int index) { + PageReference ref = children[index]; + Page page = ref.getPage(); + if(page == null) { + page = map.readPage(ref.getPos()); + assert ref.getPos() == page.getPos(); + assert ref.count == page.getTotalCount(); } + return page; } - int getMemory() { - return 64 + 8 * children.length; + @Override + public long getChildPagePos(int index) { + return children[index].getPos(); } - /** - * Read an inner node page from the buffer, but ignore the keys and - * values. - * - * @param fileStore the file store - * @param pos the position - * @param mapId the map id - * @param filePos the position in the file - * @param maxPos the maximum position (the end of the chunk) - * @return the page children object - */ - static PageChildren read(FileStore fileStore, long pos, int mapId, - long filePos, long maxPos) { - ByteBuffer buff; - int maxLength = DataUtils.getPageMaxLength(pos); - if (maxLength == DataUtils.PAGE_LARGE) { - buff = fileStore.readFully(filePos, 128); - maxLength = buff.getInt(); - // read the first bytes again + @Override + public V getValue(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public Page split(int at) { + assert !isSaved(); + int b = getKeyCount() - at; + K[] bKeys = splitKeys(at, b - 1); + PageReference[] aChildren = createRefStorage(at + 1); + PageReference[] bChildren = createRefStorage(b); + System.arraycopy(children, 0, aChildren, 0, at + 1); + System.arraycopy(children, at + 1, bChildren, 0, b); + children = aChildren; + + long t = 0; + for (PageReference x : aChildren) { + t += x.count; } - maxLength = (int) Math.min(maxPos - filePos, maxLength); - int length = maxLength; - if (length < 0) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "Illegal page length {0} reading at {1}; max pos {2} ", - length, filePos, maxPos); + totalCount = t; + t = 0; + for (PageReference x : bChildren) { + t += x.count; } - buff = fileStore.readFully(filePos, length); - int chunkId = DataUtils.getPageChunkId(pos); - int offset = DataUtils.getPageOffset(pos); - int start = buff.position(); - int pageLength = buff.getInt(); - if (pageLength > maxLength) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "File corrupted in chunk {0}, expected page length =< {1}, got {2}", - chunkId, maxLength, pageLength); + Page newPage = createNode(map, bKeys, bChildren, t, 0); + if(isPersistent()) { + recalculateMemory(); } - buff.limit(start + pageLength); - short check = buff.getShort(); - int m = DataUtils.readVarInt(buff); - if (m != mapId) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "File corrupted in chunk {0}, expected map id {1}, got {2}", - chunkId, mapId, m); + return newPage; + } + + @Override + public void expand(int keyCount, Object[] extraKeys, Object[] extraValues) { + throw new UnsupportedOperationException(); + } + + @Override + public long getTotalCount() { + assert !isComplete() || totalCount == calculateTotalCount() : + "Total count: " + totalCount + " != " + calculateTotalCount(); + return totalCount; + } + + private long calculateTotalCount() { + long check = 0; + int keyCount = getKeyCount(); + for (int i = 0; i <= keyCount; i++) { + check += children[i].count; } - int checkTest = DataUtils.getCheckValue(chunkId) - ^ DataUtils.getCheckValue(offset) - ^ DataUtils.getCheckValue(pageLength); - if (check != (short) checkTest) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_FILE_CORRUPT, - "File corrupted in chunk {0}, expected check value {1}, got {2}", - chunkId, checkTest, check); + return check; + } + + void recalculateTotalCount() { + totalCount = calculateTotalCount(); + } + + @Override + long getCounts(int index) { + return children[index].count; + } + + @Override + public void setChild(int index, Page c) { + assert c != null; + PageReference child = children[index]; + if (c != child.getPage() || c.getPos() != child.getPos()) { + totalCount += c.getTotalCount() - child.count; + children = children.clone(); + children[index] = new PageReference<>(c); } - int len = DataUtils.readVarInt(buff); - int type = buff.get(); - boolean node = (type & 1) == DataUtils.PAGE_TYPE_NODE; - if (!node) { - return null; + } + + @Override + public V setValue(int index, V value) { + throw new UnsupportedOperationException(); + } + + @Override + public void insertLeaf(int index, K key, V value) { + throw new UnsupportedOperationException(); + } + + @Override + public void insertNode(int index, K key, Page childPage) { + int childCount = getRawChildPageCount(); + insertKey(index, key); + + PageReference[] newChildren = createRefStorage(childCount + 1); + DataUtils.copyWithGap(children, newChildren, childCount, index); + children = newChildren; + children[index] = new PageReference<>(childPage); + + totalCount += childPage.getTotalCount(); + if (isPersistent()) { + addMemory(MEMORY_POINTER + PAGE_MEMORY_CHILD); } - long[] children = new long[len + 1]; - for (int i = 0; i <= len; i++) { - children[i] = buff.getLong(); + } + + @Override + public void remove(int index) { + int childCount = getRawChildPageCount(); + super.remove(index); + if(isPersistent()) { + if (map.isMemoryEstimationAllowed()) { + addMemory(-getMemory() / childCount); + } else { + addMemory(-MEMORY_POINTER - PAGE_MEMORY_CHILD); + } } - return new PageChildren(pos, children); + totalCount -= children[index].count; + PageReference[] newChildren = createRefStorage(childCount - 1); + DataUtils.copyExcept(children, newChildren, childCount, index); + children = newChildren; } - /** - * Only keep one reference to the same chunk. Only leaf references are - * removed (references to inner nodes are not removed, as they could - * indirectly point to other chunks). - */ - void removeDuplicateChunkReferences() { - HashSet chunks = New.hashSet(); - // we don't need references to leaves in the same chunk - chunks.add(DataUtils.getPageChunkId(pos)); - for (int i = 0; i < children.length; i++) { - long p = children[i]; - int chunkId = DataUtils.getPageChunkId(p); - boolean wasNew = chunks.add(chunkId); - if (DataUtils.getPageType(p) == DataUtils.PAGE_TYPE_NODE) { - continue; + @Override + public int removeAllRecursive(long version) { + int unsavedMemory = removePage(version); + if (isPersistent()) { + for (int i = 0, size = map.getChildPageCount(this); i < size; i++) { + PageReference ref = children[i]; + Page page = ref.getPage(); + if (page != null) { + unsavedMemory += page.removeAllRecursive(version); + } else { + long pagePos = ref.getPos(); + assert DataUtils.isPageSaved(pagePos); + if (DataUtils.isLeafPosition(pagePos)) { + map.store.accountForRemovedPage(pagePos, version, map.isSingleWriter(), -1); + } else { + unsavedMemory += map.readPage(pagePos).removeAllRecursive(version); + } + } } - if (wasNew) { - continue; + } + return unsavedMemory; + } + + @Override + public CursorPos getPrependCursorPos(CursorPos cursorPos) { + Page childPage = getChildPage(0); + return childPage.getPrependCursorPos(new CursorPos<>(this, 0, cursorPos)); + } + + @Override + public CursorPos getAppendCursorPos(CursorPos cursorPos) { + int keyCount = getKeyCount(); + Page childPage = getChildPage(keyCount); + return childPage.getAppendCursorPos(new CursorPos<>(this, keyCount, cursorPos)); + } + + @Override + protected void readPayLoad(ByteBuffer buff) { + int keyCount = getKeyCount(); + children = createRefStorage(keyCount + 1); + long[] p = new long[keyCount + 1]; + for (int i = 0; i <= keyCount; i++) { + p[i] = buff.getLong(); + } + long total = 0; + for (int i = 0; i <= keyCount; i++) { + long s = DataUtils.readVarLong(buff); + long position = p[i]; + assert position == 0 ? s == 0 : s >= 0; + total += s; + children[i] = position == 0 ? + PageReference.empty() : + new PageReference<>(position, s); + } + totalCount = total; + } + + @Override + protected void writeValues(WriteBuffer buff) {} + + @Override + protected void writeChildren(WriteBuffer buff, boolean withCounts) { + int keyCount = getKeyCount(); + for (int i = 0; i <= keyCount; i++) { + buff.putLong(children[i].getPos()); + } + if(withCounts) { + for (int i = 0; i <= keyCount; i++) { + buff.putVarLong(children[i].count); + } + } + } + + @Override + void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff, List toc) { + if (!isSaved()) { + int patch = write(chunk, buff, toc); + writeChildrenRecursive(chunk, buff, toc); + int old = buff.position(); + buff.position(patch); + writeChildren(buff, false); + buff.position(old); + } + } + + void writeChildrenRecursive(Chunk chunk, WriteBuffer buff, List toc) { + int len = getRawChildPageCount(); + for (int i = 0; i < len; i++) { + PageReference ref = children[i]; + Page p = ref.getPage(); + if (p != null) { + p.writeUnsavedRecursive(chunk, buff, toc); + ref.resetPos(); + } + } + } + + @Override + void releaseSavedPages() { + int len = getRawChildPageCount(); + for (int i = 0; i < len; i++) { + children[i].clearPageReference(); + } + } + + @Override + public int getRawChildPageCount() { + return getKeyCount() + 1; + } + + @Override + protected int calculateMemory() { + return super.calculateMemory() + PAGE_NODE_MEMORY + + getRawChildPageCount() * (MEMORY_POINTER + PAGE_MEMORY_CHILD); + } + + @Override + public void dump(StringBuilder buff) { + super.dump(buff); + int keyCount = getKeyCount(); + for (int i = 0; i <= keyCount; i++) { + if (i > 0) { + buff.append(" "); } - removeChild(i--); + buff.append("[").append(Long.toHexString(children[i].getPos())).append("]"); + if(i < keyCount) { + buff.append(" ").append(getKey(i)); + } + } + } + } + + + private static class IncompleteNonLeaf extends NonLeaf { + + private boolean complete; + + IncompleteNonLeaf(MVMap map, NonLeaf source) { + super(map, source, constructEmptyPageRefs(source.getRawChildPageCount()), source.getTotalCount()); + } + + private static PageReference[] constructEmptyPageRefs(int size) { + // replace child pages with empty pages + PageReference[] children = createRefStorage(size); + Arrays.fill(children, PageReference.empty()); + return children; + } + + @Override + void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff, List toc) { + if (complete) { + super.writeUnsavedRecursive(chunk, buff, toc); + } else if (!isSaved()) { + writeChildrenRecursive(chunk, buff, toc); } } + @Override + public boolean isComplete() { + return complete; + } + + @Override + public void setComplete() { + recalculateTotalCount(); + complete = true; + } + + @Override + public void dump(StringBuilder buff) { + super.dump(buff); + buff.append(", complete:").append(complete); + } + + } + + + + private static class Leaf extends Page { /** - * Collect the set of chunks referenced directly by this page. - * - * @param target the target set + * The storage for values. */ - void collectReferencedChunks(Set target) { - target.add(DataUtils.getPageChunkId(pos)); - for (long p : children) { - target.add(DataUtils.getPageChunkId(p)); + private V[] values; + + Leaf(MVMap map) { + super(map); + } + + private Leaf(MVMap map, Leaf source) { + super(map, source); + this.values = source.values; + } + + Leaf(MVMap map, K[] keys, V[] values) { + super(map, keys); + this.values = values; + } + + @Override + public int getNodeType() { + return PAGE_TYPE_LEAF; + } + + @Override + public Page copy(MVMap map, boolean eraseChildrenRefs) { + return new Leaf<>(map, this); + } + + @Override + public Page getChildPage(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public long getChildPagePos(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public V getValue(int index) { + return values == null ? null : values[index]; + } + + @Override + public Page split(int at) { + assert !isSaved(); + int b = getKeyCount() - at; + K[] bKeys = splitKeys(at, b); + V[] bValues = createValueStorage(b); + if(values != null) { + V[] aValues = createValueStorage(at); + System.arraycopy(values, 0, aValues, 0, at); + System.arraycopy(values, at, bValues, 0, b); + values = aValues; + } + Page newPage = createLeaf(map, bKeys, bValues, 0); + if(isPersistent()) { + recalculateMemory(); } + return newPage; } - private void removeChild(int index) { - if (index == 0 && children.length == 1) { - children = EMPTY_ARRAY; - return; + @Override + public void expand(int extraKeyCount, K[] extraKeys, V[] extraValues) { + int keyCount = getKeyCount(); + expandKeys(extraKeyCount, extraKeys); + if(values != null) { + V[] newValues = createValueStorage(keyCount + extraKeyCount); + System.arraycopy(values, 0, newValues, 0, keyCount); + System.arraycopy(extraValues, 0, newValues, keyCount, extraKeyCount); + values = newValues; + } + if(isPersistent()) { + recalculateMemory(); } - long[] c2 = new long[children.length - 1]; - DataUtils.copyExcept(children, c2, children.length, index); - children = c2; } - } + @Override + public long getTotalCount() { + return getKeyCount(); + } + + @Override + long getCounts(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public void setChild(int index, Page c) { + throw new UnsupportedOperationException(); + } + + @Override + public V setValue(int index, V value) { + values = values.clone(); + V old = setValueInternal(index, value); + if(isPersistent()) { + if (!map.isMemoryEstimationAllowed()) { + addMemory(map.evaluateMemoryForValue(value) - + map.evaluateMemoryForValue(old)); + } + } + return old; + } + + private V setValueInternal(int index, V value) { + V old = values[index]; + values[index] = value; + return old; + } + + @Override + public void insertLeaf(int index, K key, V value) { + int keyCount = getKeyCount(); + insertKey(index, key); + + if(values != null) { + V[] newValues = createValueStorage(keyCount + 1); + DataUtils.copyWithGap(values, newValues, keyCount, index); + values = newValues; + setValueInternal(index, value); + if (isPersistent()) { + addMemory(MEMORY_POINTER + map.evaluateMemoryForValue(value)); + } + } + } + + @Override + public void insertNode(int index, K key, Page childPage) { + throw new UnsupportedOperationException(); + } + + @Override + public void remove(int index) { + int keyCount = getKeyCount(); + super.remove(index); + if (values != null) { + if(isPersistent()) { + if (map.isMemoryEstimationAllowed()) { + addMemory(-getMemory() / keyCount); + } else { + V old = getValue(index); + addMemory(-MEMORY_POINTER - map.evaluateMemoryForValue(old)); + } + } + V[] newValues = createValueStorage(keyCount - 1); + DataUtils.copyExcept(values, newValues, keyCount, index); + values = newValues; + } + } + + @Override + public int removeAllRecursive(long version) { + return removePage(version); + } + + @Override + public CursorPos getPrependCursorPos(CursorPos cursorPos) { + return new CursorPos<>(this, -1, cursorPos); + } + @Override + public CursorPos getAppendCursorPos(CursorPos cursorPos) { + int keyCount = getKeyCount(); + return new CursorPos<>(this, ~keyCount, cursorPos); + } + + @Override + protected void readPayLoad(ByteBuffer buff) { + int keyCount = getKeyCount(); + values = createValueStorage(keyCount); + map.getValueType().read(buff, values, getKeyCount()); + } + + @Override + protected void writeValues(WriteBuffer buff) { + map.getValueType().write(buff, values, getKeyCount()); + } + + @Override + protected void writeChildren(WriteBuffer buff, boolean withCounts) {} + + @Override + void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff, List toc) { + if (!isSaved()) { + write(chunk, buff, toc); + } + } + + @Override + void releaseSavedPages() {} + + @Override + public int getRawChildPageCount() { + return 0; + } + + @Override + protected int calculateMemory() { +//* + return super.calculateMemory() + PAGE_LEAF_MEMORY + + (values == null ? 0 : map.evaluateMemoryForValues(values, getKeyCount())); +/*/ + int keyCount = getKeyCount(); + int mem = super.calculateMemory() + PAGE_LEAF_MEMORY + keyCount * MEMORY_POINTER; + DataType valueType = map.getValueType(); + for (int i = 0; i < keyCount; i++) { + mem += getMemory(valueType, values[i]); + } + return mem; +//*/ + } + + @Override + public void dump(StringBuilder buff) { + super.dump(buff); + int keyCount = getKeyCount(); + for (int i = 0; i < keyCount; i++) { + if (i > 0) { + buff.append(" "); + } + buff.append(getKey(i)); + if (values != null) { + buff.append(':'); + buff.append(getValue(i)); + } + } + } + } } diff --git a/h2/src/main/org/h2/mvstore/RootReference.java b/h2/src/main/org/h2/mvstore/RootReference.java new file mode 100644 index 0000000000..dff79839c0 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/RootReference.java @@ -0,0 +1,256 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore; + +/** + * Class RootReference is an immutable structure to represent state of the MVMap as a whole + * (not related to a particular B-Tree node). + * Single structure would allow for non-blocking atomic state change. + * The most important part of it is a reference to the root node. + * + * @author Andrei Tokar + */ +public final class RootReference { + + /** + * The root page. + */ + public final Page root; + /** + * The version used for writing. + */ + public final long version; + /** + * Counter of reentrant locks. + */ + private final byte holdCount; + /** + * Lock owner thread id. + */ + private final long ownerId; + /** + * Reference to the previous root in the chain. + * That is the last root of the previous version, which had any data changes. + * Versions without any data changes are dropped from the chain, as it built. + */ + volatile RootReference previous; + /** + * Counter for successful root updates. + */ + final long updateCounter; + /** + * Counter for attempted root updates. + */ + final long updateAttemptCounter; + /** + * Size of the occupied part of the append buffer. + */ + private final byte appendCounter; + + + // This one is used to set root initially and for r/o snapshots + RootReference(Page root, long version) { + this.root = root; + this.version = version; + this.previous = null; + this.updateCounter = 1; + this.updateAttemptCounter = 1; + this.holdCount = 0; + this.ownerId = 0; + this.appendCounter = 0; + } + + private RootReference(RootReference r, Page root, long updateAttemptCounter) { + this.root = root; + this.version = r.version; + this.previous = r.previous; + this.updateCounter = r.updateCounter + 1; + this.updateAttemptCounter = r.updateAttemptCounter + updateAttemptCounter; + this.holdCount = 0; + this.ownerId = 0; + this.appendCounter = r.appendCounter; + } + + // This one is used for locking + private RootReference(RootReference r, int attempt) { + this.root = r.root; + this.version = r.version; + this.previous = r.previous; + this.updateCounter = r.updateCounter + 1; + this.updateAttemptCounter = r.updateAttemptCounter + attempt; + assert r.holdCount == 0 || r.ownerId == Thread.currentThread().getId() // + : Thread.currentThread().getId() + " " + r; + this.holdCount = (byte)(r.holdCount + 1); + this.ownerId = Thread.currentThread().getId(); + this.appendCounter = r.appendCounter; + } + + // This one is used for unlocking + private RootReference(RootReference r, Page root, boolean keepLocked, int appendCounter) { + this.root = root; + this.version = r.version; + this.previous = r.previous; + this.updateCounter = r.updateCounter; + this.updateAttemptCounter = r.updateAttemptCounter; + assert r.holdCount > 0 && r.ownerId == Thread.currentThread().getId() // + : Thread.currentThread().getId() + " " + r; + this.holdCount = (byte)(r.holdCount - (keepLocked ? 0 : 1)); + this.ownerId = this.holdCount == 0 ? 0 : Thread.currentThread().getId(); + this.appendCounter = (byte) appendCounter; + } + + // This one is used for version change + private RootReference(RootReference r, long version, int attempt) { + RootReference previous = r; + RootReference tmp; + while ((tmp = previous.previous) != null && tmp.root == r.root) { + previous = tmp; + } + this.root = r.root; + this.version = version; + this.previous = previous; + this.updateCounter = r.updateCounter + 1; + this.updateAttemptCounter = r.updateAttemptCounter + attempt; + this.holdCount = r.holdCount == 0 ? 0 : (byte)(r.holdCount - 1); + this.ownerId = this.holdCount == 0 ? 0 : r.ownerId; + assert r.appendCounter == 0; + this.appendCounter = 0; + } + + /** + * Try to unlock. + * + * @param newRootPage the new root page + * @param attemptCounter the number of attempts so far + * @return the new, unlocked, root reference, or null if not successful + */ + RootReference updateRootPage(Page newRootPage, long attemptCounter) { + return isFree() ? tryUpdate(new RootReference<>(this, newRootPage, attemptCounter)) : null; + } + + /** + * Try to lock. + * + * @param attemptCounter the number of attempts so far + * @return the new, locked, root reference, or null if not successful + */ + RootReference tryLock(int attemptCounter) { + return canUpdate() ? tryUpdate(new RootReference<>(this, attemptCounter)) : null; + } + + /** + * Try to unlock, and if successful update the version + * + * @param version the version + * @param attempt the number of attempts so far + * @return the new, unlocked and updated, root reference, or null if not successful + */ + RootReference tryUnlockAndUpdateVersion(long version, int attempt) { + return canUpdate() ? tryUpdate(new RootReference<>(this, version, attempt)) : null; + } + + /** + * Update the page, possibly keeping it locked. + * + * @param page the page + * @param keepLocked whether to keep it locked + * @param appendCounter number of items in append buffer + * @return the new root reference, or null if not successful + */ + RootReference updatePageAndLockedStatus(Page page, boolean keepLocked, int appendCounter) { + return canUpdate() ? tryUpdate(new RootReference<>(this, page, keepLocked, appendCounter)) : null; + } + + /** + * Removed old versions that are not longer used. + * + * @param oldestVersionToKeep the oldest version that needs to be retained + */ + void removeUnusedOldVersions(long oldestVersionToKeep) { + // We need to keep at least one previous version (if any) here, + // because in order to retain whole history of some version + // we really need last root of the previous version. + // Root labeled with version "X" is the LAST known root for that version + // and therefore the FIRST known root for the version "X+1" + for(RootReference rootRef = this; rootRef != null; rootRef = rootRef.previous) { + if (rootRef.version < oldestVersionToKeep) { + RootReference previous; + assert (previous = rootRef.previous) == null || previous.getAppendCounter() == 0 // + : oldestVersionToKeep + " " + rootRef.previous; + rootRef.previous = null; + } + } + } + + boolean isLocked() { + return holdCount != 0; + } + + private boolean isFree() { + return holdCount == 0; + } + + + private boolean canUpdate() { + return isFree() || ownerId == Thread.currentThread().getId(); + } + + public boolean isLockedByCurrentThread() { + return holdCount != 0 && ownerId == Thread.currentThread().getId(); + } + + private RootReference tryUpdate(RootReference updatedRootReference) { + assert canUpdate(); + return root.map.compareAndSetRoot(this, updatedRootReference) ? updatedRootReference : null; + } + + long getVersion() { + RootReference prev = previous; + return prev == null || prev.root != root || + prev.appendCounter != appendCounter ? + version : prev.getVersion(); + } + + /** + * Does the root have changes since the specified version? + * + * @param version to check against + * @param persistent whether map is backed by persistent storage + * @return true if this root has unsaved changes + */ + boolean hasChangesSince(long version, boolean persistent) { + return persistent && (root.isSaved() ? getAppendCounter() > 0 : getTotalCount() > 0) + || getVersion() > version; + } + + int getAppendCounter() { + return appendCounter & 0xff; + } + + /** + * Whether flushing is needed. + * + * @return true if yes + */ + public boolean needFlush() { + return appendCounter != 0; + } + + public long getTotalCount() { + return root.getTotalCount() + getAppendCounter(); + } + + @Override + public String toString() { + return "RootReference(" + System.identityHashCode(root) + + ", v=" + version + + ", owner=" + ownerId + (ownerId == Thread.currentThread().getId() ? "(current)" : "") + + ", holdCnt=" + holdCount + + ", keys=" + root.getTotalCount() + + ", append=" + getAppendCounter() + + ")"; + } +} diff --git a/h2/src/main/org/h2/mvstore/StreamStore.java b/h2/src/main/org/h2/mvstore/StreamStore.java index 6d4bc42664..82a3944d83 100644 --- a/h2/src/main/org/h2/mvstore/StreamStore.java +++ b/h2/src/main/org/h2/mvstore/StreamStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -41,7 +41,7 @@ public class StreamStore { private int maxBlockSize = 256 * 1024; private final AtomicLong nextKey = new AtomicLong(); private final AtomicReference nextBuffer = - new AtomicReference(); + new AtomicReference<>(); /** * Create a stream store instance. @@ -95,15 +95,14 @@ public long getMaxBlockSize() { * * @param in the stream * @return the id (potentially an empty array) + * @throws IOException If an I/O error occurs */ + @SuppressWarnings("resource") public byte[] put(InputStream in) throws IOException { ByteArrayOutputStream id = new ByteArrayOutputStream(); int level = 0; try { - while (true) { - if (put(id, in, level)) { - break; - } + while (!put(id, in, level)) { if (id.size() > maxBlockSize / 2) { id = putIndirectId(id); level++; @@ -206,6 +205,7 @@ private long writeBlock(byte[] data) { * * @param len the length of the stored block. */ + @SuppressWarnings("unused") protected void onStore(int len) { // do nothing by default } @@ -264,9 +264,13 @@ public long getMaxBlockKey(byte[] id) { // indirect: 2, total len (long), blockId (long) DataUtils.readVarLong(idBuffer); long k2 = DataUtils.readVarLong(idBuffer); - // recurse + maxKey = k2; byte[] r = map.get(k2); - maxKey = Math.max(maxKey, getMaxBlockKey(r)); + // recurse + long m = getMaxBlockKey(r); + if (m >= 0) { + maxKey = Math.max(maxKey, m); + } break; default: throw DataUtils.newIllegalArgumentException( @@ -311,6 +315,50 @@ public void remove(byte[] id) { } } + /** + * Convert the id to a human readable string. + * + * @param id the stream id + * @return the string + */ + public static String toString(byte[] id) { + StringBuilder buff = new StringBuilder(); + ByteBuffer idBuffer = ByteBuffer.wrap(id); + long length = 0; + while (idBuffer.hasRemaining()) { + long block; + int len; + switch (idBuffer.get()) { + case 0: + // in-place: 0, len (int), data + len = DataUtils.readVarInt(idBuffer); + idBuffer.position(idBuffer.position() + len); + buff.append("data len=").append(len); + length += len; + break; + case 1: + // block: 1, len (int), blockId (long) + len = DataUtils.readVarInt(idBuffer); + length += len; + block = DataUtils.readVarLong(idBuffer); + buff.append("block ").append(block).append(" len=").append(len); + break; + case 2: + // indirect: 2, total len (long), blockId (long) + len = DataUtils.readVarInt(idBuffer); + length += DataUtils.readVarLong(idBuffer); + block = DataUtils.readVarLong(idBuffer); + buff.append("indirect block ").append(block).append(" len=").append(len); + break; + default: + buff.append("error"); + } + buff.append(", "); + } + buff.append("length=").append(length); + return buff.toString(); + } + /** * Calculate the number of data bytes for the given id. As the length is * encoded in the id, this operation does not cause any reads in the map. @@ -385,7 +433,7 @@ public InputStream get(byte[] id) { byte[] getBlock(long key) { byte[] data = map.get(key); if (data == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_BLOCK_NOT_FOUND, "Block {0} not found", key); } @@ -458,7 +506,7 @@ public int read(byte[] b, int off, int len) throws IOException { if (buffer == null) { try { buffer = nextBuffer(); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { String msg = DataUtils.formatMessage( DataUtils.ERROR_BLOCK_NOT_FOUND, "Block not found in id {0}", diff --git a/h2/src/main/org/h2/mvstore/WriteBuffer.java b/h2/src/main/org/h2/mvstore/WriteBuffer.java index 0da1d92741..9dd2be2460 100644 --- a/h2/src/main/org/h2/mvstore/WriteBuffer.java +++ b/h2/src/main/org/h2/mvstore/WriteBuffer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore; @@ -12,6 +12,10 @@ */ public class WriteBuffer { + /** + * The maximum size of the buffer in order to be re-used after a clear + * operation. + */ private static final int MAX_REUSE_CAPACITY = 4 * 1024 * 1024; /** @@ -19,9 +23,24 @@ public class WriteBuffer { */ private static final int MIN_GROW = 1024 * 1024; - private ByteBuffer reuse = ByteBuffer.allocate(MIN_GROW); + /** + * The buffer that is used after a clear operation. + */ + private ByteBuffer reuse; - private ByteBuffer buff = reuse; + /** + * The current buffer (may be replaced if it is too small). + */ + private ByteBuffer buff; + + public WriteBuffer(int initialSize) { + reuse = ByteBuffer.allocate(initialSize); + buff = reuse; + } + + public WriteBuffer() { + this(MIN_GROW); + } /** * Write a variable size integer. @@ -54,19 +73,7 @@ public WriteBuffer putVarLong(long x) { */ public WriteBuffer putStringData(String s, int len) { ByteBuffer b = ensureCapacity(3 * len); - for (int i = 0; i < len; i++) { - int c = s.charAt(i); - if (c < 0x80) { - b.put((byte) c); - } else if (c >= 0x800) { - b.put((byte) (0xe0 | (c >> 12))); - b.put((byte) (((c >> 6) & 0x3f))); - b.put((byte) (c & 0x3f)); - } else { - b.put((byte) (0xc0 | (c >> 6))); - b.put((byte) (c & 0x3f)); - } - } + DataUtils.writeStringData(b, s, len); return this; } @@ -178,7 +185,7 @@ public WriteBuffer put(byte[] bytes, int offset, int length) { * @return this */ public WriteBuffer put(ByteBuffer src) { - ensureCapacity(buff.remaining()).put(src); + ensureCapacity(src.remaining()).put(src); return this; } diff --git a/h2/src/main/org/h2/mvstore/cache/CacheLongKeyLIRS.java b/h2/src/main/org/h2/mvstore/cache/CacheLongKeyLIRS.java index 447505263c..d75127e3a6 100644 --- a/h2/src/main/org/h2/mvstore/cache/CacheLongKeyLIRS.java +++ b/h2/src/main/org/h2/mvstore/cache/CacheLongKeyLIRS.java @@ -1,10 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.cache; +import java.lang.ref.WeakReference; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -24,9 +25,9 @@ * at most the specified amount of memory. The memory unit is not relevant, * however it is suggested to use bytes as the unit. *

    - * This class implements an approximation of the the LIRS replacement algorithm + * This class implements an approximation of the LIRS replacement algorithm * invented by Xiaodong Zhang and Song Jiang as described in - * http://www.cse.ohio-state.edu/~zhang/lirs-sigmetrics-02.html with a few + * https://web.cse.ohio-state.edu/~zhang.574/lirs-sigmetrics-02.html with a few * smaller changes: An additional queue for non-resident entries is used, to * prevent unbound memory usage. The maximum size of this queue is at most the * size of the rest of the stack. About 6.25% of the mapped entries are cold. @@ -55,35 +56,25 @@ public class CacheLongKeyLIRS { private final int segmentShift; private final int segmentMask; private final int stackMoveDistance; - - /** - * Create a new cache with the given number of entries, and the default - * settings (16 segments, and stack move distance of 8. - * - * @param maxMemory the maximum memory to use (1 or larger) - */ - public CacheLongKeyLIRS(long maxMemory) { - this(maxMemory, 16, 8); - } + private final int nonResidentQueueSize; + private final int nonResidentQueueSizeHigh; /** * Create a new cache with the given memory size. * - * @param maxMemory the maximum memory to use (1 or larger) - * @param segmentCount the number of cache segments (must be a power of 2) - * @param stackMoveDistance how many other item are to be moved to the top - * of the stack before the current item is moved + * @param config the configuration */ @SuppressWarnings("unchecked") - public CacheLongKeyLIRS(long maxMemory, - int segmentCount, int stackMoveDistance) { - setMaxMemory(maxMemory); + public CacheLongKeyLIRS(Config config) { + setMaxMemory(config.maxMemory); + this.nonResidentQueueSize = config.nonResidentQueueSize; + this.nonResidentQueueSizeHigh = config.nonResidentQueueSizeHigh; DataUtils.checkArgument( - Integer.bitCount(segmentCount) == 1, - "The segment count must be a power of 2, is {0}", segmentCount); - this.segmentCount = segmentCount; + Integer.bitCount(config.segmentCount) == 1, + "The segment count must be a power of 2, is {0}", config.segmentCount); + this.segmentCount = config.segmentCount; this.segmentMask = segmentCount - 1; - this.stackMoveDistance = stackMoveDistance; + this.stackMoveDistance = config.stackMoveDistance; segments = new Segment[segmentCount]; clear(); // use the high bits for the segment @@ -94,13 +85,21 @@ public CacheLongKeyLIRS(long maxMemory, * Remove all entries. */ public void clear() { - long max = Math.max(1, maxMemory / segmentCount); + long max = getMaxItemSize(); for (int i = 0; i < segmentCount; i++) { - segments[i] = new Segment( - max, stackMoveDistance, 8); + segments[i] = new Segment<>(max, stackMoveDistance, 8, nonResidentQueueSize, + nonResidentQueueSizeHigh); } } + /** + * Determines max size of the data item size to fit into cache + * @return data items size limit + */ + public long getMaxItemSize() { + return Math.max(1, maxMemory / segmentCount); + } + private Entry find(long key) { int hash = getHash(key); return getSegment(hash).find(key, hash); @@ -114,8 +113,8 @@ private Entry find(long key) { * @return true if there is a resident entry */ public boolean containsKey(long key) { - int hash = getHash(key); - return getSegment(hash).containsKey(key, hash); + Entry e = find(key); + return e != null && e.value != null; } /** @@ -127,7 +126,7 @@ public boolean containsKey(long key) { */ public V peek(long key) { Entry e = find(key); - return e == null ? null : e.value; + return e == null ? null : e.getValue(); } /** @@ -152,6 +151,10 @@ public V put(long key, V value) { * @return the old value, or null if there was no resident entry */ public V put(long key, V value, int memory) { + if (value == null) { + throw DataUtils.newIllegalArgumentException( + "The value may not be null"); + } int hash = getHash(key); int segmentIndex = getSegmentIndex(hash); Segment s = segments[segmentIndex]; @@ -174,7 +177,7 @@ private Segment resizeIfNeeded(Segment s, int segmentIndex) { Segment s2 = segments[segmentIndex]; if (s == s2) { // no other thread resized, so we do - s = new Segment(s, newLen); + s = new Segment<>(s, newLen); segments[segmentIndex] = s; } return s; @@ -186,6 +189,7 @@ private Segment resizeIfNeeded(Segment s, int segmentIndex) { * @param value the value * @return the size */ + @SuppressWarnings("unused") protected int sizeOf(V value) { return 1; } @@ -217,8 +221,8 @@ public V remove(long key) { * @return the memory, or 0 if there is no resident entry */ public int getMemory(long key) { - int hash = getHash(key); - return getSegment(hash).getMemory(key, hash); + Entry e = find(key); + return e == null ? 0 : e.getMemory(); } /** @@ -231,7 +235,9 @@ public int getMemory(long key) { */ public V get(long key) { int hash = getHash(key); - return getSegment(hash).get(key, hash); + Segment s = getSegment(hash); + Entry e = s.find(key, hash); + return s.get(e); } private Segment getSegment(int hash) { @@ -307,11 +313,7 @@ public long getMaxMemory() { * @return the entry set */ public synchronized Set> entrySet() { - HashMap map = new HashMap(); - for (long k : keySet()) { - map.put(k, find(k).value); - } - return map.entrySet(); + return getMap().entrySet(); } /** @@ -320,7 +322,7 @@ public synchronized Set> entrySet() { * @return the set of keys */ public Set keySet() { - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); for (Segment s : segments) { set.addAll(s.keySet()); } @@ -414,7 +416,7 @@ public int size() { * @return the key list */ public List keys(boolean cold, boolean nonResident) { - ArrayList keys = new ArrayList(); + ArrayList keys = new ArrayList<>(); for (Segment s : segments) { keys.addAll(s.keys(cold, nonResident)); } @@ -427,9 +429,9 @@ public List keys(boolean cold, boolean nonResident) { * @return the entry set */ public List values() { - ArrayList list = new ArrayList(); + ArrayList list = new ArrayList<>(); for (long k : keySet()) { - V value = find(k).value; + V value = peek(k); if (value != null) { list.add(value); } @@ -452,7 +454,7 @@ public boolean isEmpty() { * @param value the value * @return true if it is stored */ - public boolean containsValue(Object value) { + public boolean containsValue(V value) { return getMap().containsValue(value); } @@ -462,9 +464,9 @@ public boolean containsValue(Object value) { * @return the map */ public Map getMap() { - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); for (long k : keySet()) { - V x = find(k).value; + V x = peek(k); if (x != null) { map.put(k, x); } @@ -484,6 +486,17 @@ public void putAll(Map m) { } } + /** + * Loop through segments, trimming the non resident queue. + */ + public void trimNonResidentQueue() { + for (Segment s : segments) { + synchronized (s) { + s.trimNonResidentQueue(); + } + } + } + /** * A cache segment * @@ -543,6 +556,18 @@ private static class Segment { */ private final int mask; + /** + * Low watermark for the number of entries in the non-resident queue, + * as a factor of the number of entries in the map. + */ + private final int nonResidentQueueSize; + + /** + * High watermark for the number of entries in the non-resident queue, + * as a factor of the number of entries in the map. + */ + private final int nonResidentQueueSizeHigh; + /** * The stack of recently referenced elements. This includes all hot * entries, and the recently referenced cold entries. Resident cold @@ -579,25 +604,29 @@ private static class Segment { /** * Create a new cache segment. - * - * @param maxMemory the maximum memory to use + * @param maxMemory the maximum memory to use * @param stackMoveDistance the number of other entries to be moved to * the top of the stack before moving an entry to the top * @param len the number of hash table buckets (must be a power of 2) + * @param nonResidentQueueSize the non-resident queue size low watermark factor + * @param nonResidentQueueSizeHigh the non-resident queue size high watermark factor */ - Segment(long maxMemory, int stackMoveDistance, int len) { + Segment(long maxMemory, int stackMoveDistance, int len, + int nonResidentQueueSize, int nonResidentQueueSizeHigh) { setMaxMemory(maxMemory); this.stackMoveDistance = stackMoveDistance; + this.nonResidentQueueSize = nonResidentQueueSize; + this.nonResidentQueueSizeHigh = nonResidentQueueSizeHigh; // the bit mask has all bits set mask = len - 1; // initialize the stack and queue heads - stack = new Entry(); + stack = new Entry<>(); stack.stackPrev = stack.stackNext = stack; - queue = new Entry(); + queue = new Entry<>(); queue.queuePrev = queue.queueNext = queue; - queue2 = new Entry(); + queue2 = new Entry<>(); queue2.queuePrev = queue2.queueNext = queue2; @SuppressWarnings("unchecked") @@ -614,12 +643,13 @@ private static class Segment { * @param len the number of hash table buckets (must be a power of 2) */ Segment(Segment old, int len) { - this(old.maxMemory, old.stackMoveDistance, len); + this(old.maxMemory, old.stackMoveDistance, len, + old.nonResidentQueueSize, old.nonResidentQueueSizeHigh); hits = old.hits; misses = old.misses; Entry s = old.stack.stackPrev; while (s != old.stack) { - Entry e = copy(s); + Entry e = new Entry<>(s); addToMap(e); addToStack(e); s = s.stackPrev; @@ -628,7 +658,7 @@ private static class Segment { while (s != old.queue) { Entry e = find(s.key, getHash(s.key)); if (e == null) { - e = copy(s); + e = new Entry<>(s); addToMap(e); } addToQueue(queue, e); @@ -638,7 +668,7 @@ private static class Segment { while (s != old.queue2) { Entry e = find(s.key, getHash(s.key)); if (e == null) { - e = copy(s); + e = new Entry<>(s); addToMap(e); } addToQueue(queue2, e); @@ -668,64 +698,28 @@ private void addToMap(Entry e) { int index = getHash(e.key) & mask; e.mapNext = entries[index]; entries[index] = e; - usedMemory += e.memory; + usedMemory += e.getMemory(); mapSize++; } - private static Entry copy(Entry old) { - Entry e = new Entry(); - e.key = old.key; - e.value = old.value; - e.memory = old.memory; - e.topMove = old.topMove; - return e; - } - /** - * Get the memory used for the given key. + * Get the value from the given entry. + * This method adjusts the internal state of the cache sometimes, + * to ensure commonly used entries stay in the cache. * - * @param key the key (may not be null) - * @param hash the hash - * @return the memory, or 0 if there is no resident entry - */ - int getMemory(long key, int hash) { - Entry e = find(key, hash); - return e == null ? 0 : e.memory; - } - - /** - * Get the value for the given key if the entry is cached. This method - * adjusts the internal state of the cache sometimes, to ensure commonly - * used entries stay in the cache. - * - * @param key the key (may not be null) - * @param hash the hash + * @param e the entry * @return the value, or null if there is no resident entry */ - V get(long key, int hash) { - Entry e = find(key, hash); - if (e == null) { - // the entry was not found - misses++; - return null; - } - V value = e.value; + synchronized V get(Entry e) { + V value = e == null ? null : e.getValue(); if (value == null) { - // it was a non-resident entry + // the entry was not found + // or it was a non-resident entry misses++; - return null; - } - if (e.isHot()) { - if (e != stack.stackNext) { - if (stackMoveDistance == 0 || - stackMoveCounter - e.topMove > stackMoveDistance) { - access(key, hash); - } - } } else { - access(key, hash); + access(e); + hits++; } - hits++; return value; } @@ -733,17 +727,12 @@ V get(long key, int hash) { * Access an item, moving the entry to the top of the stack or front of * the queue if found. * - * @param key the key + * @param e entry to record access for */ - private synchronized void access(long key, int hash) { - Entry e = find(key, hash); - if (e == null || e.value == null) { - return; - } + private void access(Entry e) { if (e.isHot()) { - if (e != stack.stackNext) { - if (stackMoveDistance == 0 || - stackMoveCounter - e.topMove > stackMoveDistance) { + if (e != stack.stackNext && e.stackNext != null) { + if (stackMoveCounter - e.topMove > stackMoveDistance) { // move a hot entry to the top of the stack // unless it is already there boolean wasEnd = e == stack.stackPrev; @@ -757,22 +746,33 @@ private synchronized void access(long key, int hash) { } } } else { - removeFromQueue(e); - if (e.stackNext != null) { - // resident cold entries become hot - // if they are on the stack - removeFromStack(e); - // which means a hot entry needs to become cold - // (this entry is cold, that means there is at least one - // more entry in the stack, which must be hot) - convertOldestHotToCold(); - } else { - // cold entries that are not on the stack - // move to the front of the queue - addToQueue(queue, e); + V v = e.getValue(); + if (v != null) { + removeFromQueue(e); + if (e.reference != null) { + e.value = v; + e.reference = null; + usedMemory += e.memory; + } + if (e.stackNext != null) { + // resident, or even non-resident (weak value reference), + // cold entries become hot if they are on the stack + removeFromStack(e); + // which means a hot entry needs to become cold + // (this entry is cold, that means there is at least one + // more entry in the stack, which must be hot) + convertOldestHotToCold(); + } else { + // cold entries that are not on the stack + // move to the front of the queue + addToQueue(queue, e); + } + // in any case, the cold entry is moved to the top of the stack + addToStack(e); + // but if newly promoted cold/non-resident is the only entry on a stack now + // that means last one is cold, need to prune + pruneStack(); } - // in any case, the cold entry is moved to the top of the stack - addToStack(e); } } @@ -788,26 +788,18 @@ private synchronized void access(long key, int hash) { * @return the old value, or null if there was no resident entry */ synchronized V put(long key, int hash, V value, int memory) { - if (value == null) { - throw DataUtils.newIllegalArgumentException( - "The value may not be null"); - } - V old; Entry e = find(key, hash); - if (e == null) { - old = null; - } else { - old = e.value; + boolean existed = e != null; + V old = null; + if (existed) { + old = e.getValue(); remove(key, hash); } if (memory > maxMemory) { // the new entry is too big to fit return old; } - e = new Entry(); - e.key = key; - e.value = value; - e.memory = memory; + e = new Entry<>(key, value, memory); int index = hash & mask; e.mapNext = entries[index]; entries[index] = e; @@ -825,6 +817,10 @@ synchronized V put(long key, int hash, V value, int memory) { mapSize++; // added entries are always added to the stack addToStack(e); + if (existed) { + // if it was there before (even non-resident), it becomes hot + access(e); + } return old; } @@ -842,9 +838,7 @@ synchronized V remove(long key, int hash) { if (e == null) { return null; } - V old; if (e.key == key) { - old = e.value; entries[index] = e.mapNext; } else { Entry last; @@ -855,11 +849,11 @@ synchronized V remove(long key, int hash) { return null; } } while (e.key != key); - old = e.value; last.mapNext = e.mapNext; } + V old = e.getValue(); mapSize--; - usedMemory -= e.memory; + usedMemory -= e.getMemory(); if (e.stackNext != null) { removeFromStack(e); } @@ -873,10 +867,10 @@ synchronized V remove(long key, int hash) { addToStackBottom(e); } } + pruneStack(); } else { removeFromQueue(e); } - pruneStack(); return old; } @@ -895,7 +889,7 @@ private void evictBlock() { // ensure there are not too many hot entries: right shift of 5 is // division by 32, that means if there are only 1/32 (3.125%) or // less cold entries, a hot entry needs to become cold - while (queueSize <= (mapSize >>> 5) && stackSize > 0) { + while (queueSize <= ((mapSize - queue2Size) >>> 5) && stackSize > 0) { convertOldestHotToCold(); } // the oldest resident cold entries become non-resident @@ -903,15 +897,28 @@ private void evictBlock() { Entry e = queue.queuePrev; usedMemory -= e.memory; removeFromQueue(e); + e.reference = new WeakReference<>(e.value); e.value = null; - e.memory = 0; addToQueue(queue2, e); // the size of the non-resident-cold entries needs to be limited - while (queue2Size + queue2Size > stackSize) { - e = queue2.queuePrev; - int hash = getHash(e.key); - remove(e.key, hash); + trimNonResidentQueue(); + } + } + + void trimNonResidentQueue() { + int residentCount = mapSize - queue2Size; + int maxQueue2SizeHigh = nonResidentQueueSizeHigh * residentCount; + int maxQueue2Size = nonResidentQueueSize * residentCount; + while (queue2Size > maxQueue2Size) { + Entry e = queue2.queuePrev; + if (queue2Size <= maxQueue2SizeHigh) { + WeakReference reference = e.reference; + if (reference != null && reference.get() != null) { + break; // stop trimming if entry holds a value + } } + int hash = getHash(e.key); + remove(e.key, hash); } } @@ -1025,7 +1032,7 @@ private void removeFromQueue(Entry e) { * @return the key list */ synchronized List keys(boolean cold, boolean nonResident) { - ArrayList keys = new ArrayList(); + ArrayList keys = new ArrayList<>(); if (cold) { Entry start = nonResident ? queue2 : queue; for (Entry e = start.queueNext; e != start; @@ -1041,26 +1048,13 @@ synchronized List keys(boolean cold, boolean nonResident) { return keys; } - /** - * Check whether there is a resident entry for the given key. This - * method does not adjust the internal state of the cache. - * - * @param key the key (may not be null) - * @param hash the hash - * @return true if there is a resident entry - */ - boolean containsKey(long key, int hash) { - Entry e = find(key, hash); - return e != null && e.value != null; - } - /** * Get the set of keys for resident entries. * * @return the set of keys */ synchronized Set keySet() { - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); for (Entry e = stack.stackNext; e != stack; e = e.stackNext) { set.add(e.key); } @@ -1097,17 +1091,22 @@ static class Entry { /** * The key. */ - long key; + final long key; /** * The value. Set to null for non-resident-cold entries. */ V value; + /** + * Weak reference to the value. Set to null for resident entries. + */ + WeakReference reference; + /** * The estimated memory used. */ - int memory; + final int memory; /** * When the item was last moved to the top of the stack. @@ -1140,6 +1139,23 @@ static class Entry { */ Entry mapNext; + + Entry() { + this(0L, null, 0); + } + + Entry(long key, V value, int memory) { + this.key = key; + this.memory = memory; + this.value = value; + } + + Entry(Entry old) { + this(old.key, old.value, old.memory); + this.reference = old.reference; + this.topMove = old.topMove; + } + /** * Whether this entry is hot. Cold entries are in one of the two queues. * @@ -1149,6 +1165,46 @@ boolean isHot() { return queueNext == null; } + V getValue() { + return value == null ? reference.get() : value; + } + + int getMemory() { + return value == null ? 0 : memory; + } } + /** + * The cache configuration. + */ + public static class Config { + + /** + * The maximum memory to use (1 or larger). + */ + public long maxMemory = 1; + + /** + * The number of cache segments (must be a power of 2). + */ + public int segmentCount = 16; + + /** + * How many other item are to be moved to the top of the stack before + * the current item is moved. + */ + public int stackMoveDistance = 32; + + /** + * Low water mark for the number of entries in the non-resident queue, + * as a factor of the number of all other entries in the map. + */ + public final int nonResidentQueueSize = 3; + + /** + * High watermark for the number of entries in the non-resident queue, + * as a factor of the number of all other entries in the map + */ + public final int nonResidentQueueSizeHigh = 12; + } } diff --git a/h2/src/main/org/h2/mvstore/cache/FilePathCache.java b/h2/src/main/org/h2/mvstore/cache/FilePathCache.java index 2a744db789..fc04065198 100644 --- a/h2/src/main/org/h2/mvstore/cache/FilePathCache.java +++ b/h2/src/main/org/h2/mvstore/cache/FilePathCache.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.cache; @@ -10,6 +10,7 @@ import java.nio.channels.FileChannel; import java.nio.channels.FileLock; import org.h2.store.fs.FileBase; +import org.h2.store.fs.FilePath; import org.h2.store.fs.FilePathWrapper; /** @@ -17,6 +18,18 @@ */ public class FilePathCache extends FilePathWrapper { + /** + * The instance. + */ + public static final FilePathCache INSTANCE = new FilePathCache(); + + /** + * Register the file system. + */ + static { + FilePath.register(INSTANCE); + } + public static FileChannel wrap(FileChannel f) { return new FileCache(f); } @@ -38,9 +51,15 @@ public static class FileCache extends FileBase { private static final int CACHE_BLOCK_SIZE = 4 * 1024; private final FileChannel base; - // 1 MB cache size - private final CacheLongKeyLIRS cache = - new CacheLongKeyLIRS(1024 * 1024); + + private final CacheLongKeyLIRS cache; + + { + CacheLongKeyLIRS.Config cc = new CacheLongKeyLIRS.Config(); + // 1 MB cache size + cc.maxMemory = 1024 * 1024; + cache = new CacheLongKeyLIRS<>(cc); + } FileCache(FileChannel base) { this.base = base; @@ -68,7 +87,7 @@ public int read(ByteBuffer dst) throws IOException { } @Override - public int read(ByteBuffer dst, long position) throws IOException { + public synchronized int read(ByteBuffer dst, long position) throws IOException { long cachePos = getCachePos(position); int off = (int) (position - cachePos); int len = CACHE_BLOCK_SIZE - off; @@ -111,20 +130,20 @@ public long size() throws IOException { } @Override - public FileChannel truncate(long newSize) throws IOException { + public synchronized FileChannel truncate(long newSize) throws IOException { cache.clear(); base.truncate(newSize); return this; } @Override - public int write(ByteBuffer src, long position) throws IOException { + public synchronized int write(ByteBuffer src, long position) throws IOException { clearCache(src, position); return base.write(src, position); } @Override - public int write(ByteBuffer src) throws IOException { + public synchronized int write(ByteBuffer src) throws IOException { clearCache(src, position()); return base.write(src); } diff --git a/h2/src/main/org/h2/mvstore/cache/package.html b/h2/src/main/org/h2/mvstore/cache/package.html index 30aa0d81f9..0821fb4922 100644 --- a/h2/src/main/org/h2/mvstore/cache/package.html +++ b/h2/src/main/org/h2/mvstore/cache/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mvstore/db/LobStorageMap.java b/h2/src/main/org/h2/mvstore/db/LobStorageMap.java new file mode 100644 index 0000000000..16d74229ae --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/LobStorageMap.java @@ -0,0 +1,563 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicLong; +import org.h2.api.ErrorCode; +import org.h2.engine.Database; +import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.StreamStore; +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.tx.TransactionStore; +import org.h2.mvstore.type.BasicDataType; +import org.h2.mvstore.type.ByteArrayDataType; +import org.h2.mvstore.type.LongDataType; +import org.h2.store.CountingReaderInputStream; +import org.h2.store.LobStorageFrontend; +import org.h2.store.LobStorageInterface; +import org.h2.store.RangeInputStream; +import org.h2.util.IOUtils; +import org.h2.util.StringUtils; +import org.h2.value.Value; +import org.h2.value.ValueBlob; +import org.h2.value.ValueClob; +import org.h2.value.ValueLob; +import org.h2.value.ValueNull; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; + +/** + * This class stores LOB objects in the database, in maps. This is the back-end + * i.e. the server side of the LOB storage. + */ +public final class LobStorageMap implements LobStorageInterface +{ + private static final boolean TRACE = false; + + private final Database database; + final MVStore mvStore; + private final AtomicLong nextLobId = new AtomicLong(0); + + /** + * The lob metadata map. It contains the mapping from the lob id + * (which is a long) to the blob metadata, including stream store id (which is a byte array). + */ + private final MVMap lobMap; + + /** + * The lob metadata map for temporary lobs. It contains the mapping from the lob id + * (which is a long) to the stream store id (which is a byte array). + * + * Key: lobId (long) + * Value: streamStoreId (byte[]) + */ + private final MVMap tempLobMap; + + /** + * The reference map. It is used to remove data from the stream store: if no + * more entries for the given streamStoreId exist, the data is removed from + * the stream store. + */ + private final MVMap refMap; + + private final StreamStore streamStore; + + + /** + * Open map used to store LOB metadata + * @param txStore containing map + * @return MVMap instance + */ + public static MVMap openLobMap(TransactionStore txStore) { + return txStore.openMap("lobMap", LongDataType.INSTANCE, LobStorageMap.BlobMeta.Type.INSTANCE); + } + + /** + * Open map used to store LOB data + * @param txStore containing map + * @return MVMap instance + */ + public static MVMap openLobDataMap(TransactionStore txStore) { + return txStore.openMap("lobData", LongDataType.INSTANCE, ByteArrayDataType.INSTANCE); + } + + public LobStorageMap(Database database) { + this.database = database; + Store s = database.getStore(); + TransactionStore txStore = s.getTransactionStore(); + mvStore = s.getMvStore(); + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + lobMap = openLobMap(txStore); + tempLobMap = txStore.openMap("tempLobMap", LongDataType.INSTANCE, ByteArrayDataType.INSTANCE); + refMap = txStore.openMap("lobRef", BlobReference.Type.INSTANCE, NullValueDataType.INSTANCE); + /* The stream store data map. + * + * Key: stream store block id (long). + * Value: data (byte[]). + */ + MVMap dataMap = openLobDataMap(txStore); + streamStore = new StreamStore(dataMap); + // garbage collection of the last blocks + if (!database.isReadOnly()) { + // don't re-use block ids, except at the very end + Long last = dataMap.lastKey(); + if (last != null) { + streamStore.setNextKey(last + 1); + } + // find the latest lob ID + Long id1 = lobMap.lastKey(); + Long id2 = tempLobMap.lastKey(); // just in case we had unclean shutdown + long next = 1; + if (id1 != null) { + next = id1 + 1; + } + if (id2 != null) { + next = Math.max(next, id2 + 1); + } + nextLobId.set( next ); + } + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public ValueBlob createBlob(InputStream in, long maxLength) { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + if (maxLength != -1 + && maxLength <= database.getMaxLengthInplaceLob()) { + byte[] small = new byte[(int) maxLength]; + int len = IOUtils.readFully(in, small, (int) maxLength); + if (len > maxLength) { + throw new IllegalStateException( + "len > blobLength, " + len + " > " + maxLength); + } + if (len < small.length) { + small = Arrays.copyOf(small, len); + } + return ValueBlob.createSmall(small); + } + if (maxLength != -1) { + in = new RangeInputStream(in, 0L, maxLength); + } + return createBlob(in); + } catch (IllegalStateException e) { + throw DbException.get(ErrorCode.OBJECT_CLOSED, e); + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public ValueClob createClob(Reader reader, long maxLength) { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + // we multiple by 3 here to get the worst-case size in bytes + if (maxLength != -1 + && maxLength * 3 <= database.getMaxLengthInplaceLob()) { + char[] small = new char[(int) maxLength]; + int len = IOUtils.readFully(reader, small, (int) maxLength); + if (len > maxLength) { + throw new IllegalStateException( + "len > blobLength, " + len + " > " + maxLength); + } + byte[] utf8 = new String(small, 0, len) + .getBytes(StandardCharsets.UTF_8); + if (utf8.length > database.getMaxLengthInplaceLob()) { + throw new IllegalStateException( + "len > maxinplace, " + utf8.length + " > " + + database.getMaxLengthInplaceLob()); + } + return ValueClob.createSmall(utf8, len); + } + if (maxLength < 0) { + maxLength = Long.MAX_VALUE; + } + CountingReaderInputStream in = new CountingReaderInputStream(reader, maxLength); + ValueBlob blob = createBlob(in); + LobData lobData = blob.getLobData(); + return new ValueClob(lobData, blob.octetLength(), in.getLength()); + } catch (IllegalStateException e) { + throw DbException.get(ErrorCode.OBJECT_CLOSED, e); + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + private ValueBlob createBlob(InputStream in) throws IOException { + byte[] streamStoreId; + try { + streamStoreId = streamStore.put(in); + } catch (Exception e) { + throw DataUtils.convertToIOException(e); + } + long lobId = generateLobId(); + long length = streamStore.length(streamStoreId); + final int tableId = LobStorageFrontend.TABLE_TEMP; + tempLobMap.put(lobId, streamStoreId); + BlobReference key = new BlobReference(streamStoreId, lobId); + refMap.put(key, ValueNull.INSTANCE); + ValueBlob lob = new ValueBlob(new LobDataDatabase(database, tableId, lobId), length); + if (TRACE) { + trace("create " + tableId + "/" + lobId); + } + return lob; + } + + private long generateLobId() { + return nextLobId.getAndIncrement(); + } + + @Override + public boolean isReadOnly() { + return database.isReadOnly(); + } + + @Override + public ValueLob copyLob(ValueLob old, int tableId) { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + final LobDataDatabase lobData = (LobDataDatabase) old.getLobData(); + final int type = old.getValueType(); + final long oldLobId = lobData.getLobId(); + long octetLength = old.octetLength(); + // get source lob + final byte[] streamStoreId; + if (isTemporaryLob(lobData.getTableId())) { + streamStoreId = tempLobMap.get(oldLobId); + } else { + BlobMeta value = lobMap.get(oldLobId); + streamStoreId = value.streamStoreId; + } + // create destination lob + final long newLobId = generateLobId(); + if (isTemporaryLob(tableId)) { + tempLobMap.put(newLobId, streamStoreId); + } else { + BlobMeta value = new BlobMeta(streamStoreId, tableId, + type == Value.CLOB ? old.charLength() : octetLength, 0); + lobMap.put(newLobId, value); + } + BlobReference refMapKey = new BlobReference(streamStoreId, newLobId); + refMap.put(refMapKey, ValueNull.INSTANCE); + LobDataDatabase newLobData = new LobDataDatabase(database, tableId, newLobId); + ValueLob lob = type == Value.BLOB ? new ValueBlob(newLobData, octetLength) + : new ValueClob(newLobData, octetLength, old.charLength()); + if (TRACE) { + trace("copy " + lobData.getTableId() + "/" + lobData.getLobId() + + " > " + tableId + "/" + newLobId); + } + return lob; + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public InputStream getInputStream(long lobId, long byteCount) + throws IOException { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + byte[] streamStoreId = tempLobMap.get(lobId); + if (streamStoreId == null) { + BlobMeta value = lobMap.get(lobId); + streamStoreId = value.streamStoreId; + } + if (streamStoreId == null) { + throw DbException.get(ErrorCode.LOB_CLOSED_ON_TIMEOUT_1, "" + lobId); + } + InputStream inputStream = streamStore.get(streamStoreId); + return new LobInputStream(inputStream); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public InputStream getInputStream(long lobId, int tableId, long byteCount) + throws IOException { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + byte[] streamStoreId; + if (isTemporaryLob(tableId)) { + streamStoreId = tempLobMap.get(lobId); + } else { + BlobMeta value = lobMap.get(lobId); + streamStoreId = value.streamStoreId; + } + if (streamStoreId == null) { + throw DbException.get(ErrorCode.LOB_CLOSED_ON_TIMEOUT_1, "" + lobId); + } + InputStream inputStream = streamStore.get(streamStoreId); + return new LobInputStream(inputStream); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + private final class LobInputStream extends FilterInputStream { + + public LobInputStream(InputStream in) { + super(in); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + return super.read(b, off, len); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public int read() throws IOException { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + return super.read(); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + } + + @Override + public void removeAllForTable(int tableId) { + if (mvStore.isClosed()) { + return; + } + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + if (isTemporaryLob(tableId)) { + final Iterator iter = tempLobMap.keyIterator(0L); + while (iter.hasNext()) { + long lobId = iter.next(); + removeLob(tableId, lobId); + } + tempLobMap.clear(); + } else { + final ArrayList list = new ArrayList<>(); + // This might not be very efficient, but should only happen + // on DROP TABLE. + // To speed it up, we would need yet another map. + for (Entry e : lobMap.entrySet()) { + BlobMeta value = e.getValue(); + if (value.tableId == tableId) { + list.add(e.getKey()); + } + } + for (long lobId : list) { + removeLob(tableId, lobId); + } + } + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + @Override + public void removeLob(ValueLob lob) { + MVStore.TxCounter txCounter = mvStore.registerVersionUsage(); + try { + LobDataDatabase lobData = (LobDataDatabase) lob.getLobData(); + int tableId = lobData.getTableId(); + long lobId = lobData.getLobId(); + removeLob(tableId, lobId); + } finally { + mvStore.deregisterVersionUsage(txCounter); + } + } + + private void removeLob(int tableId, long lobId) { + if (TRACE) { + trace("remove " + tableId + "/" + lobId); + } + byte[] streamStoreId; + if (isTemporaryLob(tableId)) { + streamStoreId = tempLobMap.remove(lobId); + if (streamStoreId == null) { + // already removed + return; + } + } else { + BlobMeta value = lobMap.remove(lobId); + if (value == null) { + // already removed + return; + } + streamStoreId = value.streamStoreId; + } + BlobReference key = new BlobReference(streamStoreId, lobId); + Value existing = refMap.remove(key); + assert existing != null; + // check if there are more entries for this streamStoreId + key = new BlobReference(streamStoreId, 0L); + BlobReference value = refMap.ceilingKey(key); + boolean hasMoreEntries = false; + if (value != null) { + byte[] s2 = value.streamStoreId; + if (Arrays.equals(streamStoreId, s2)) { + if (TRACE) { + trace(" stream still needed in lob " + value.lobId); + } + hasMoreEntries = true; + } + } + if (!hasMoreEntries) { + if (TRACE) { + trace(" remove stream " + StringUtils.convertBytesToHex(streamStoreId)); + } + streamStore.remove(streamStoreId); + } + } + + private static boolean isTemporaryLob(int tableId) { + return tableId == LobStorageFrontend.TABLE_ID_SESSION_VARIABLE || tableId == LobStorageFrontend.TABLE_TEMP + || tableId == LobStorageFrontend.TABLE_RESULT; + } + + private static void trace(String op) { + System.out.println("[" + Thread.currentThread().getName() + "] LOB " + op); + } + + + public static final class BlobReference implements Comparable + { + public final byte[] streamStoreId; + public final long lobId; + + public BlobReference(byte[] streamStoreId, long lobId) { + this.streamStoreId = streamStoreId; + this.lobId = lobId; + } + + @Override + public int compareTo(BlobReference other) { + int res = Integer.compare(streamStoreId.length, other.streamStoreId.length); + if (res == 0) { + for (int i = 0; res == 0 && i < streamStoreId.length; i++) { + res = Byte.compare(streamStoreId[i], other.streamStoreId[i]); + } + if (res == 0) { + res = Long.compare(lobId, other.lobId); + } + } + return res; + } + + public static final class Type extends BasicDataType { + public static final Type INSTANCE = new Type(); + + private Type() {} + + @Override + public int getMemory(BlobReference blobReference) { + return blobReference.streamStoreId.length + 8; + } + + @Override + public int compare(BlobReference one, BlobReference two) { + return one == two ? 0 : one == null ? 1 : two == null ? -1 : one.compareTo(two); + } + + @Override + public void write(WriteBuffer buff, BlobReference blobReference) { + buff.putVarInt(blobReference.streamStoreId.length); + buff.put(blobReference.streamStoreId); + buff.putVarLong(blobReference.lobId); + } + + @Override + public BlobReference read(ByteBuffer buff) { + int len = DataUtils.readVarInt(buff); + byte[] streamStoreId = new byte[len]; + buff.get(streamStoreId); + long blobId = DataUtils.readVarLong(buff); + return new BlobReference(streamStoreId, blobId); + } + + @Override + public BlobReference[] createStorage(int size) { + return new BlobReference[size]; + } + } + } + + public static final class BlobMeta + { + /** + * Stream identifier. It is used as a key in LOB data map. + */ + public final byte[] streamStoreId; + final int tableId; + final long byteCount; + final long hash; + + public BlobMeta(byte[] streamStoreId, int tableId, long byteCount, long hash) { + this.streamStoreId = streamStoreId; + this.tableId = tableId; + this.byteCount = byteCount; + this.hash = hash; + } + + public static final class Type extends BasicDataType { + public static final Type INSTANCE = new Type(); + + private Type() { + } + + @Override + public int getMemory(BlobMeta blobMeta) { + return blobMeta.streamStoreId.length + 20; + } + + @Override + public void write(WriteBuffer buff, BlobMeta blobMeta) { + buff.putVarInt(blobMeta.streamStoreId.length); + buff.put(blobMeta.streamStoreId); + buff.putVarInt(blobMeta.tableId); + buff.putVarLong(blobMeta.byteCount); + buff.putLong(blobMeta.hash); + } + + @Override + public BlobMeta read(ByteBuffer buff) { + int len = DataUtils.readVarInt(buff); + byte[] streamStoreId = new byte[len]; + buff.get(streamStoreId); + int tableId = DataUtils.readVarInt(buff); + long byteCount = DataUtils.readVarLong(buff); + long hash = buff.getLong(); + return new BlobMeta(streamStoreId, tableId, byteCount, hash); + } + + @Override + public BlobMeta[] createStorage(int size) { + return new BlobMeta[size]; + } + } + } +} diff --git a/h2/src/main/org/h2/mvstore/db/MVDelegateIndex.java b/h2/src/main/org/h2/mvstore/db/MVDelegateIndex.java index 220c5515ad..0cceba0c96 100644 --- a/h2/src/main/org/h2/mvstore/db/MVDelegateIndex.java +++ b/h2/src/main/org/h2/mvstore/db/MVDelegateIndex.java @@ -1,82 +1,95 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; import java.util.List; -import org.h2.engine.Session; -import org.h2.index.BaseIndex; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; import org.h2.index.Cursor; import org.h2.index.IndexType; import org.h2.message.DbException; +import org.h2.mvstore.MVMap; import org.h2.result.Row; +import org.h2.result.RowFactory; import org.h2.result.SearchRow; import org.h2.result.SortOrder; import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.TableFilter; -import org.h2.value.ValueLong; +import org.h2.value.VersionedValue; /** * An index that delegates indexing to another index. */ -public class MVDelegateIndex extends BaseIndex implements MVIndex { +public class MVDelegateIndex extends MVIndex { private final MVPrimaryIndex mainIndex; - public MVDelegateIndex(MVTable table, int id, String name, - MVPrimaryIndex mainIndex, - IndexType indexType) { - IndexColumn[] cols = IndexColumn.wrap(new Column[] { table - .getColumn(mainIndex.getMainIndexColumn()) }); - this.initBaseIndex(table, id, name, cols, indexType); + public MVDelegateIndex(MVTable table, int id, String name, MVPrimaryIndex mainIndex, IndexType indexType) { + super(table, id, name, IndexColumn.wrap(new Column[] { table.getColumn(mainIndex.getMainIndexColumn()) }), + 1, indexType); this.mainIndex = mainIndex; if (id < 0) { - throw DbException.throwInternalError("" + name); + throw DbException.getInternalError(name); } } + @Override + public RowFactory getRowFactory() { + return mainIndex.getRowFactory(); + } + @Override public void addRowsToBuffer(List rows, String bufferName) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } @Override public void addBufferedRows(List bufferNames) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); + } + + @Override + public MVMap> getMVMap() { + return mainIndex.getMVMap(); } @Override - public void add(Session session, Row row) { + public void add(SessionLocal session, Row row) { // nothing to do } + @Override + public Row getRow(SessionLocal session, long key) { + return mainIndex.getRow(session, key); + } + + @Override + public boolean isRowIdIndex() { + return true; + } + @Override public boolean canGetFirstOrLast() { return true; } @Override - public void close(Session session) { + public void close(SessionLocal session) { // nothing to do } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - ValueLong min = mainIndex.getKey(first, - MVPrimaryIndex.MIN, MVPrimaryIndex.MIN); - // ifNull is MIN_VALUE as well, because the column is never NULL - // so avoid returning all rows (returning one row is OK) - ValueLong max = mainIndex.getKey(last, - MVPrimaryIndex.MAX, MVPrimaryIndex.MIN); - return mainIndex.find(session, min, max); + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + return mainIndex.find(session, first, last); } @Override - public Cursor findFirstOrLast(Session session, boolean first) { + public Cursor findFirstOrLast(SessionLocal session, boolean first) { return mainIndex.findFirstOrLast(session, first); } @@ -89,50 +102,51 @@ public int getColumnIndex(Column col) { } @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - return 10 * getCostRangeIndex(masks, - mainIndex.getRowCountApproximation(), filter, sortOrder); + public boolean isFirstColumn(Column column) { + return getColumnIndex(column) == 0; } @Override - public boolean needRebuild() { - return false; + public double getCost(SessionLocal session, int[] masks, + TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { + return 10 * getCostRangeIndex(masks, mainIndex.getRowCountApproximation(session), + filters, filter, sortOrder, true, allColumnsSet); } @Override - public void remove(Session session, Row row) { - // nothing to do + public boolean needRebuild() { + return false; } @Override - public void remove(Session session) { - mainIndex.setMainIndexColumn(-1); + public void remove(SessionLocal session, Row row) { + // nothing to do } @Override - public void truncate(Session session) { + public void update(SessionLocal session, Row oldRow, Row newRow) { // nothing to do } @Override - public void checkRename() { - // ok + public void remove(SessionLocal session) { + mainIndex.setMainIndexColumn(SearchRow.ROWID_INDEX); } @Override - public long getRowCount(Session session) { - return mainIndex.getRowCount(session); + public void truncate(SessionLocal session) { + // nothing to do } @Override - public long getRowCountApproximation() { - return mainIndex.getRowCountApproximation(); + public long getRowCount(SessionLocal session) { + return mainIndex.getRowCount(session); } @Override - public long getDiskSpaceUsed() { - return 0; + public long getRowCountApproximation(SessionLocal session) { + return mainIndex.getRowCountApproximation(session); } } diff --git a/h2/src/main/org/h2/mvstore/db/MVInDoubtTransaction.java b/h2/src/main/org/h2/mvstore/db/MVInDoubtTransaction.java new file mode 100644 index 0000000000..e8e9c01dae --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/MVInDoubtTransaction.java @@ -0,0 +1,47 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import org.h2.mvstore.MVStore; +import org.h2.mvstore.tx.Transaction; +import org.h2.store.InDoubtTransaction; + +/** + * An in-doubt transaction. + */ +final class MVInDoubtTransaction implements InDoubtTransaction { + + private final MVStore store; + private final Transaction transaction; + private int state = InDoubtTransaction.IN_DOUBT; + + MVInDoubtTransaction(MVStore store, Transaction transaction) { + this.store = store; + this.transaction = transaction; + } + + @Override + public void setState(int state) { + if (state == InDoubtTransaction.COMMIT) { + transaction.commit(); + } else { + transaction.rollback(); + } + store.commit(); + this.state = state; + } + + @Override + public int getState() { + return state; + } + + @Override + public String getTransactionName() { + return transaction.getName(); + } + +} diff --git a/h2/src/main/org/h2/mvstore/db/MVIndex.java b/h2/src/main/org/h2/mvstore/db/MVIndex.java index 7a6ca20321..a831e6d9c3 100644 --- a/h2/src/main/org/h2/mvstore/db/MVIndex.java +++ b/h2/src/main/org/h2/mvstore/db/MVIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; @@ -8,12 +8,22 @@ import java.util.List; import org.h2.index.Index; +import org.h2.index.IndexType; +import org.h2.mvstore.MVMap; import org.h2.result.Row; +import org.h2.table.IndexColumn; +import org.h2.table.Table; +import org.h2.value.VersionedValue; /** * An index that stores the data in an MVStore. */ -public interface MVIndex extends Index { +public abstract class MVIndex extends Index { + + protected MVIndex(Table newTable, int id, String name, IndexColumn[] newIndexColumns, int uniqueColumnCount, + IndexType newIndexType) { + super(newTable, id, name, newIndexColumns, uniqueColumnCount, newIndexType); + } /** * Add the rows to a temporary storage (not to the index yet). The rows are @@ -22,7 +32,7 @@ public interface MVIndex extends Index { * @param rows the rows * @param bufferName the name of the temporary storage */ - void addRowsToBuffer(List rows, String bufferName); + public abstract void addRowsToBuffer(List rows, String bufferName); /** * Add all the index data from the buffers to the index. The index will @@ -30,6 +40,8 @@ public interface MVIndex extends Index { * * @param bufferNames the names of the temporary storage */ - void addBufferedRows(List bufferNames); + public abstract void addBufferedRows(List bufferNames); + + public abstract MVMap> getMVMap(); } diff --git a/h2/src/main/org/h2/mvstore/db/MVPlainTempResult.java b/h2/src/main/org/h2/mvstore/db/MVPlainTempResult.java new file mode 100644 index 0000000000..e00e19e7ce --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/MVPlainTempResult.java @@ -0,0 +1,124 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import org.h2.engine.Database; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.mvstore.Cursor; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVMap.Builder; +import org.h2.mvstore.type.LongDataType; +import org.h2.result.ResultExternal; +import org.h2.result.RowFactory.DefaultRowFactory; +import org.h2.value.Value; +import org.h2.value.ValueRow; + +/** + * Plain temporary result. + */ +class MVPlainTempResult extends MVTempResult { + + /** + * Map with identities of rows as keys rows as values. + */ + private final MVMap map; + + /** + * Counter for the identities of rows. A separate counter is used instead of + * {@link #rowCount} because rows due to presence of {@link #removeRow(Value[])} + * method to ensure that each row will have an own identity. + */ + private long counter; + + /** + * Cursor for the {@link #next()} method. + */ + private Cursor cursor; + + /** + * Creates a shallow copy of the result. + * + * @param parent + * parent result + */ + private MVPlainTempResult(MVPlainTempResult parent) { + super(parent); + this.map = parent.map; + } + + /** + * Creates a new plain temporary result. This result does not sort its rows, + * but it can be used in index-sorted queries and it can preserve additional + * columns for WITH TIES processing. + * + * @param database + * database + * @param expressions + * column expressions + * @param visibleColumnCount + * count of visible columns + * @param resultColumnCount + * the number of columns including visible columns and additional + * virtual columns for ORDER BY clause + */ + MVPlainTempResult(Database database, Expression[] expressions, int visibleColumnCount, int resultColumnCount) { + super(database, expressions, visibleColumnCount, resultColumnCount); + ValueDataType valueType = new ValueDataType(database, new int[resultColumnCount]); + valueType.setRowFactory(DefaultRowFactory.INSTANCE.createRowFactory(database, database.getCompareMode(), + database, expressions, null, false)); + Builder builder = new MVMap.Builder().keyType(LongDataType.INSTANCE) + .valueType(valueType).singleWriter(); + map = store.openMap("tmp", builder); + } + + @Override + public int addRow(Value[] values) { + assert parent == null; + map.append(counter++, ValueRow.get(values)); + return ++rowCount; + } + + @Override + public boolean contains(Value[] values) { + throw DbException.getUnsupportedException("contains()"); + } + + @Override + public synchronized ResultExternal createShallowCopy() { + if (parent != null) { + return parent.createShallowCopy(); + } + if (closed) { + return null; + } + childCount++; + return new MVPlainTempResult(this); + } + + @Override + public Value[] next() { + if (cursor == null) { + cursor = map.cursor(null); + } + if (!cursor.hasNext()) { + return null; + } + cursor.next(); + return cursor.getValue().getList(); + } + + @Override + public int removeRow(Value[] values) { + throw DbException.getUnsupportedException("removeRow()"); + } + + @Override + public void reset() { + cursor = null; + } + +} diff --git a/h2/src/main/org/h2/mvstore/db/MVPrimaryIndex.java b/h2/src/main/org/h2/mvstore/db/MVPrimaryIndex.java index c95b626f46..bf1a576a7f 100644 --- a/h2/src/main/org/h2/mvstore/db/MVPrimaryIndex.java +++ b/h2/src/main/org/h2/mvstore/db/MVPrimaryIndex.java @@ -1,27 +1,27 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; import java.util.List; import java.util.Map.Entry; - +import java.util.concurrent.atomic.AtomicLong; import org.h2.api.ErrorCode; -import org.h2.engine.Constants; +import org.h2.command.query.AllColumnsForPlan; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.index.BaseIndex; +import org.h2.engine.SessionLocal; import org.h2.index.Cursor; import org.h2.index.IndexType; +import org.h2.index.SingleRowCursor; import org.h2.message.DbException; -import org.h2.mvstore.DataUtils; -import org.h2.mvstore.db.TransactionStore.Transaction; -import org.h2.mvstore.db.TransactionStore.TransactionMap; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.tx.Transaction; +import org.h2.mvstore.tx.TransactionMap; +import org.h2.mvstore.tx.TransactionMap.TMIterator; +import org.h2.mvstore.type.LongDataType; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.result.SortOrder; @@ -29,56 +29,35 @@ import org.h2.table.IndexColumn; import org.h2.table.TableFilter; import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueLong; +import org.h2.value.ValueLob; import org.h2.value.ValueNull; +import org.h2.value.VersionedValue; /** * A table stored in a MVStore. */ -public class MVPrimaryIndex extends BaseIndex { - - /** - * The minimum long value. - */ - static final ValueLong MIN = ValueLong.get(Long.MIN_VALUE); - - /** - * The maximum long value. - */ - static final ValueLong MAX = ValueLong.get(Long.MAX_VALUE); - - /** - * The zero long value. - */ - static final ValueLong ZERO = ValueLong.get(0); +public class MVPrimaryIndex extends MVIndex { private final MVTable mvTable; private final String mapName; - private TransactionMap dataMap; - private long lastKey; - private int mainIndexColumn = -1; + private final TransactionMap dataMap; + private final AtomicLong lastKey = new AtomicLong(); + private int mainIndexColumn = SearchRow.ROWID_INDEX; - public MVPrimaryIndex(Database db, MVTable table, int id, - IndexColumn[] columns, IndexType indexType) { + public MVPrimaryIndex(Database db, MVTable table, int id, IndexColumn[] columns, IndexType indexType) { + super(table, id, table.getName() + "_DATA", columns, 0, indexType); this.mvTable = table; - initBaseIndex(table, id, table.getName() + "_DATA", columns, indexType); - int[] sortTypes = new int[columns.length]; - for (int i = 0; i < columns.length; i++) { - sortTypes[i] = SortOrder.ASCENDING; - } - ValueDataType keyType = new ValueDataType(null, null, null); - ValueDataType valueType = new ValueDataType(db.getCompareMode(), db, - sortTypes); + RowDataType valueType = table.getRowFactory().getRowDataType(); mapName = "table." + getId(); - Transaction t = mvTable.getTransaction(null); - dataMap = t.openMap(mapName, keyType, valueType); - t.commit(); - if (!table.isPersistData()) { - dataMap.map.setVolatile(true); + Transaction t = mvTable.getTransactionBegin(); + dataMap = t.openMap(mapName, LongDataType.INSTANCE, valueType); + dataMap.map.setVolatile(!table.isPersistData() || !indexType.isPersistent()); + if (!db.isStarting()) { + dataMap.clear(); } - Value k = dataMap.lastKey(); - lastKey = k == null ? 0 : k.getLong(); + t.commit(); + Long k = dataMap.map.lastKey(); // include uncommitted keys as well + lastKey.set(k == null ? 0 : k); } @Override @@ -88,7 +67,7 @@ public String getCreateSQL() { @Override public String getPlanSQL() { - return table.getSQL() + ".tableScan"; + return table.getSQL(new StringBuilder(), TRACE_SQL_FLAGS).append(".tableScan").toString(); } public void setMainIndexColumn(int mainIndexColumn) { @@ -100,15 +79,15 @@ public int getMainIndexColumn() { } @Override - public void close(Session session) { + public void close(SessionLocal session) { // ok } @Override - public void add(Session session, Row row) { - if (mainIndexColumn == -1) { + public void add(SessionLocal session, Row row) { + if (mainIndexColumn == SearchRow.ROWID_INDEX) { if (row.getKey() == 0) { - row.setKey(++lastKey); + row.setKey(lastKey.incrementAndGet()); } } else { long c = row.getValue(mainIndexColumn).getLong(); @@ -118,113 +97,184 @@ public void add(Session session, Row row) { if (mvTable.getContainsLargeObject()) { for (int i = 0, len = row.getColumnCount(); i < len; i++) { Value v = row.getValue(i); - Value v2 = v.link(database, getId()); - if (v2.isLinked()) { - session.unlinkAtCommitStop(v2); - } - if (v != v2) { - row.setValue(i, v2); + if (v instanceof ValueLob) { + ValueLob lob = ((ValueLob) v).copy(database, getId()); + session.removeAtCommitStop(lob); + if (v != lob) { + row.setValue(i, lob); + } } } } - TransactionMap map = getMap(session); - Value key = ValueLong.get(row.getKey()); - Value old = map.getLatest(key); - if (old != null) { - String sql = "PRIMARY KEY ON " + table.getSQL(); - if (mainIndexColumn >= 0 && mainIndexColumn < indexColumns.length) { - sql += "(" + indexColumns[mainIndexColumn].getSQL() + ")"; + TransactionMap map = getMap(session); + long rowKey = row.getKey(); + try { + Row old = (Row)map.putIfAbsent(rowKey, row); + if (old != null) { + int errorCode = ErrorCode.CONCURRENT_UPDATE_1; + if (map.getImmediate(rowKey) != null || map.getFromSnapshot(rowKey) != null) { + // committed + errorCode = ErrorCode.DUPLICATE_KEY_1; + } + DbException e = DbException.get(errorCode, + getDuplicatePrimaryKeyMessage(mainIndexColumn).append(' ').append(old).toString()); + e.setSource(this); + throw e; } - DbException e = DbException.get(ErrorCode.DUPLICATE_KEY_1, sql); - e.setSource(this); - throw e; + } catch (MVStoreException e) { + throw mvTable.convertException(e); } - try { - map.put(key, ValueArray.get(row.getValueList())); - } catch (IllegalStateException e) { - throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, - e, table.getName()); + // because it's possible to directly update the key using the _rowid_ + // syntax + long last; + while (rowKey > (last = lastKey.get())) { + if(lastKey.compareAndSet(last, rowKey)) break; } - lastKey = Math.max(lastKey, row.getKey()); } @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { if (mvTable.getContainsLargeObject()) { for (int i = 0, len = row.getColumnCount(); i < len; i++) { Value v = row.getValue(i); - if (v.isLinked()) { - session.unlinkAtCommit(v); + if (v instanceof ValueLob) { + session.removeAtCommit((ValueLob) v); } } } - TransactionMap map = getMap(session); + TransactionMap map = getMap(session); try { - Value old = map.remove(ValueLong.get(row.getKey())); - if (old == null) { - throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, - getSQL() + ": " + row.getKey()); + Row existing = (Row)map.remove(row.getKey()); + if (existing == null) { + StringBuilder builder = new StringBuilder(); + getSQL(builder, TRACE_SQL_FLAGS).append(": ").append(row.getKey()); + throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, builder.toString()); } - } catch (IllegalStateException e) { - throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, - e, table.getName()); + } catch (MVStoreException e) { + throw mvTable.convertException(e); } } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - ValueLong min, max; - if (first == null) { - min = MIN; - } else if (mainIndexColumn < 0) { - min = ValueLong.get(first.getKey()); - } else { - ValueLong v = (ValueLong) first.getValue(mainIndexColumn); - if (v == null) { - min = ValueLong.get(first.getKey()); - } else { - min = v; + public void update(SessionLocal session, Row oldRow, Row newRow) { + if (mainIndexColumn != SearchRow.ROWID_INDEX) { + long c = newRow.getValue(mainIndexColumn).getLong(); + newRow.setKey(c); + } + long key = oldRow.getKey(); + assert mainIndexColumn != SearchRow.ROWID_INDEX || key != 0; + assert key == newRow.getKey() : key + " != " + newRow.getKey(); + if (mvTable.getContainsLargeObject()) { + for (int i = 0, len = oldRow.getColumnCount(); i < len; i++) { + Value oldValue = oldRow.getValue(i); + Value newValue = newRow.getValue(i); + if (oldValue != newValue) { + if (oldValue instanceof ValueLob) { + session.removeAtCommit((ValueLob) oldValue); + } + if (newValue instanceof ValueLob) { + ValueLob lob = ((ValueLob) newValue).copy(database, getId()); + session.removeAtCommitStop(lob); + if (newValue != lob) { + newRow.setValue(i, lob); + } + } + } + } + } + + TransactionMap map = getMap(session); + try { + Row existing = (Row)map.put(key, newRow); + if (existing == null) { + StringBuilder builder = new StringBuilder(); + getSQL(builder, TRACE_SQL_FLAGS).append(": ").append(key); + throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, builder.toString()); } + } catch (MVStoreException e) { + throw mvTable.convertException(e); + } + + + // because it's possible to directly update the key using the _rowid_ + // syntax + if (newRow.getKey() > lastKey.get()) { + lastKey.set(newRow.getKey()); } - if (last == null) { - max = MAX; - } else if (mainIndexColumn < 0) { - max = ValueLong.get(last.getKey()); + } + + /** + * Lock a single row. + * + * @param session database session + * @param row to lock + * @return row object if it exists + */ + Row lockRow(SessionLocal session, Row row) { + TransactionMap map = getMap(session); + long key = row.getKey(); + return lockRow(map, key); + } + + private Row lockRow(TransactionMap map, long key) { + try { + return setRowKey((Row) map.lock(key), key); + } catch (MVStoreException ex) { + throw mvTable.convertException(ex); + } + } + + @Override + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + long min = extractPKFromRow(first, Long.MIN_VALUE); + long max = extractPKFromRow(last, Long.MAX_VALUE); + return find(session, min, max); + } + + private long extractPKFromRow(SearchRow row, long defaultValue) { + long result; + if (row == null) { + result = defaultValue; + } else if (mainIndexColumn == SearchRow.ROWID_INDEX) { + result = row.getKey(); } else { - ValueLong v = (ValueLong) last.getValue(mainIndexColumn); + Value v = row.getValue(mainIndexColumn); if (v == null) { - max = ValueLong.get(last.getKey()); + result = row.getKey(); + } else if (v == ValueNull.INSTANCE) { + result = 0L; } else { - max = v; + result = v.getLong(); } } - TransactionMap map = getMap(session); - return new MVStoreCursor(map.entryIterator(min), max); + return result; } + @Override public MVTable getTable() { return mvTable; } @Override - public Row getRow(Session session, long key) { - TransactionMap map = getMap(session); - Value v = map.get(ValueLong.get(key)); - ValueArray array = (ValueArray) v; - Row row = new Row(array.getList(), 0); - row.setKey(key); - return row; + public Row getRow(SessionLocal session, long key) { + TransactionMap map = getMap(session); + Row row = (Row) map.getFromSnapshot(key); + if (row == null) { + throw DbException.get(ErrorCode.ROW_NOT_FOUND_IN_PRIMARY_INDEX, getTraceSQL(), String.valueOf(key)); + } + return setRowKey(row, key); } @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { + public double getCost(SessionLocal session, int[] masks, + TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { try { - long cost = 10 * (dataMap.sizeAsLongMax() + Constants.COST_ROW_OFFSET); - return cost; - } catch (IllegalStateException e) { + return 10 * getCostRangeIndex(masks, dataMap.sizeAsLongMax(), + filters, filter, sortOrder, true, allColumnsSet); + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @@ -232,25 +282,29 @@ public double getCost(Session session, int[] masks, TableFilter filter, @Override public int getColumnIndex(Column col) { // can not use this index - use the delegate index instead - return -1; + return SearchRow.ROWID_INDEX; + } + + @Override + public boolean isFirstColumn(Column column) { + return false; } @Override - public void remove(Session session) { - TransactionMap map = getMap(session); + public void remove(SessionLocal session) { + TransactionMap map = getMap(session); if (!map.isClosed()) { - Transaction t = mvTable.getTransaction(session); + Transaction t = session.getTransaction(); t.removeMap(map); } } @Override - public void truncate(Session session) { - TransactionMap map = getMap(session); + public void truncate(SessionLocal session) { if (mvTable.getContainsLargeObject()) { database.getLobStorage().removeAllForTable(table.getId()); } - map.clear(); + getMap(session).clear(); } @Override @@ -259,20 +313,10 @@ public boolean canGetFirstOrLast() { } @Override - public Cursor findFirstOrLast(Session session, boolean first) { - TransactionMap map = getMap(session); - ValueLong v = (ValueLong) (first ? map.firstKey() : map.lastKey()); - if (v == null) { - return new MVStoreCursor(Collections - .> emptyList().iterator(), null); - } - Value value = map.get(v); - Entry e = new DataUtils.MapEntry(v, value); - @SuppressWarnings("unchecked") - List> list = Arrays.asList(e); - MVStoreCursor c = new MVStoreCursor(list.iterator(), v); - c.next(); - return c; + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + TransactionMap map = getMap(session); + Entry entry = first ? map.firstEntry() : map.lastEntry(); + return new SingleRowCursor(entry != null ? setRowKey((Row) entry.getValue(), entry.getKey()) : null); } @Override @@ -281,9 +325,8 @@ public boolean needRebuild() { } @Override - public long getRowCount(Session session) { - TransactionMap map = getMap(session); - return map.sizeAsLong(); + public long getRowCount(SessionLocal session) { + return getMap(session).sizeAsLong(); } /** @@ -292,22 +335,17 @@ public long getRowCount(Session session) { * @return the maximum number of rows */ public long getRowCountMax() { - try { - return dataMap.sizeAsLongMax(); - } catch (IllegalStateException e) { - throw DbException.get(ErrorCode.OBJECT_CLOSED, e); - } + return dataMap.sizeAsLongMax(); } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return getRowCountMax(); } @Override public long getDiskSpaceUsed() { - // TODO estimate disk space usage - return 0; + return dataMap.map.getRootPage().getDiskSpaceUsed(); } public String getMapName() { @@ -315,42 +353,21 @@ public String getMapName() { } @Override - public void checkRename() { - // ok + public void addRowsToBuffer(List rows, String bufferName) { + throw new UnsupportedOperationException(); } - /** - * Get the key from the row. - * - * @param row the row - * @param ifEmpty the value to use if the row is empty - * @param ifNull the value to use if the column is NULL - * @return the key - */ - ValueLong getKey(SearchRow row, ValueLong ifEmpty, ValueLong ifNull) { - if (row == null) { - return ifEmpty; - } - Value v = row.getValue(mainIndexColumn); - if (v == null) { - throw DbException.throwInternalError(row.toString()); - } else if (v == ValueNull.INSTANCE) { - return ifNull; - } - return (ValueLong) v.convertTo(Value.LONG); + @Override + public void addBufferedRows(List bufferNames) { + throw new UnsupportedOperationException(); } - /** - * Search for a specific row or a set of rows. - * - * @param session the session - * @param first the key of the first row - * @param last the key of the last row - * @return the cursor - */ - Cursor find(Session session, ValueLong first, ValueLong last) { - TransactionMap map = getMap(session); - return new MVStoreCursor(map.entryIterator(first), last); + private Cursor find(SessionLocal session, Long first, Long last) { + TransactionMap map = getMap(session); + if (first != null && last != null && first.longValue() == last.longValue()) { + return new SingleRowCursor(setRowKey((Row) map.getFromSnapshot(first), first)); + } + return new MVStoreCursor(map.entryIterator(first, last)); } @Override @@ -364,36 +381,47 @@ public boolean isRowIdIndex() { * @param session the session * @return the map */ - TransactionMap getMap(Session session) { + TransactionMap getMap(SessionLocal session) { if (session == null) { return dataMap; } - Transaction t = mvTable.getTransaction(session); - return dataMap.getInstance(t, Long.MAX_VALUE); + Transaction t = session.getTransaction(); + return dataMap.getInstance(t); + } + + @Override + public MVMap> getMVMap() { + return dataMap.map; + } + + private static Row setRowKey(Row row, long key) { + if (row != null && row.getKey() == 0) { + row.setKey(key); + } + return row; } /** * A cursor. */ - class MVStoreCursor implements Cursor { + static final class MVStoreCursor implements Cursor { - private final Iterator> it; - private final ValueLong last; - private Entry current; + private final TMIterator> it; + private Entry current; private Row row; - public MVStoreCursor(Iterator> it, ValueLong last) { + public MVStoreCursor(TMIterator> it) { this.it = it; - this.last = last; } @Override public Row get() { if (row == null) { if (current != null) { - ValueArray array = (ValueArray) current.getValue(); - row = new Row(array.getList(), 0); - row.setKey(current.getKey().getLong()); + row = (Row)current.getValue(); + if (row.getKey() == 0) { + row.setKey(current.getKey()); + } } } return row; @@ -406,10 +434,7 @@ public SearchRow getSearchRow() { @Override public boolean next() { - current = it.hasNext() ? it.next() : null; - if (current != null && current.getKey().getLong() > last.getLong()) { - current = null; - } + current = it.fetchNext(); row = null; return current != null; } @@ -418,7 +443,5 @@ public boolean next() { public boolean previous() { throw DbException.getUnsupportedException("previous"); } - } - } diff --git a/h2/src/main/org/h2/mvstore/db/MVSecondaryIndex.java b/h2/src/main/org/h2/mvstore/db/MVSecondaryIndex.java index e7b3bf41eb..0792c6a17c 100644 --- a/h2/src/main/org/h2/mvstore/db/MVSecondaryIndex.java +++ b/h2/src/main/org/h2/mvstore/db/MVSecondaryIndex.java @@ -1,217 +1,224 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.Iterator; import java.util.List; -import java.util.TreeSet; - +import java.util.Objects; +import java.util.PriorityQueue; +import java.util.Queue; import org.h2.api.ErrorCode; +import org.h2.command.query.AllColumnsForPlan; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.index.BaseIndex; +import org.h2.engine.SessionLocal; import org.h2.index.Cursor; import org.h2.index.IndexType; +import org.h2.index.SingleRowCursor; import org.h2.message.DbException; import org.h2.mvstore.MVMap; -import org.h2.mvstore.db.TransactionStore.Transaction; -import org.h2.mvstore.db.TransactionStore.TransactionMap; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.tx.Transaction; +import org.h2.mvstore.tx.TransactionMap; +import org.h2.mvstore.tx.TransactionMap.TMIterator; +import org.h2.mvstore.type.DataType; import org.h2.result.Row; +import org.h2.result.RowFactory; import org.h2.result.SearchRow; import org.h2.result.SortOrder; -import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.TableFilter; -import org.h2.util.New; -import org.h2.value.CompareMode; import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueLong; import org.h2.value.ValueNull; +import org.h2.value.VersionedValue; /** - * A table stored in a MVStore. + * An index stored in a MVStore. */ -public class MVSecondaryIndex extends BaseIndex implements MVIndex { +public final class MVSecondaryIndex extends MVIndex { /** * The multi-value table. */ - final MVTable mvTable; - - private final int keyColumns; - private final String mapName; - private TransactionMap dataMap; + private final MVTable mvTable; + private final TransactionMap dataMap; public MVSecondaryIndex(Database db, MVTable table, int id, String indexName, - IndexColumn[] columns, IndexType indexType) { + IndexColumn[] columns, int uniqueColumnCount, IndexType indexType) { + super(table, id, indexName, columns, uniqueColumnCount, indexType); this.mvTable = table; - initBaseIndex(table, id, indexName, columns, indexType); if (!database.isStarting()) { checkIndexColumnTypes(columns); } - // always store the row key in the map key, - // even for unique indexes, as some of the index columns could be null - keyColumns = columns.length + 1; - mapName = "index." + getId(); - int[] sortTypes = new int[keyColumns]; - for (int i = 0; i < columns.length; i++) { - sortTypes[i] = columns[i].sortType; - } - sortTypes[keyColumns - 1] = SortOrder.ASCENDING; - ValueDataType keyType = new ValueDataType( - db.getCompareMode(), db, sortTypes); - ValueDataType valueType = new ValueDataType(null, null, null); - Transaction t = mvTable.getTransaction(null); - dataMap = t.openMap(mapName, keyType, valueType); + String mapName = "index." + getId(); + RowDataType keyType = getRowFactory().getRowDataType(); + Transaction t = mvTable.getTransactionBegin(); + dataMap = t.openMap(mapName, keyType, NullValueDataType.INSTANCE); + dataMap.map.setVolatile(!table.isPersistData() || !indexType.isPersistent()); + if (!db.isStarting()) { + dataMap.clear(); + } t.commit(); if (!keyType.equals(dataMap.getKeyType())) { - throw DbException.throwInternalError("Incompatible key type"); + throw DbException.getInternalError( + "Incompatible key type, expected " + keyType + " but got " + + dataMap.getKeyType() + " for index " + indexName); } } @Override public void addRowsToBuffer(List rows, String bufferName) { - MVMap map = openMap(bufferName); + MVMap map = openMap(bufferName); for (Row row : rows) { - ValueArray key = convertToKey(row); - map.put(key, ValueNull.INSTANCE); + SearchRow r = getRowFactory().createRow(); + r.copyFrom(row); + map.append(r, ValueNull.INSTANCE); } } - @Override - public void addBufferedRows(List bufferNames) { - ArrayList mapNames = New.arrayList(bufferNames); - final CompareMode compareMode = database.getCompareMode(); - /** - * A source of values. - */ - class Source implements Comparable { - Value value; - Iterator next; - int sourceId; + private static final class Source { + + private final Iterator iterator; + + SearchRow currentRowData; + + public Source(Iterator iterator) { + assert iterator.hasNext(); + this.iterator = iterator; + this.currentRowData = iterator.next(); + } + + public boolean hasNext() { + boolean result = iterator.hasNext(); + if(result) { + currentRowData = iterator.next(); + } + return result; + } + + public SearchRow next() { + return currentRowData; + } + + static final class Comparator implements java.util.Comparator { + + private final DataType type; + + public Comparator(DataType type) { + this.type = type; + } + @Override - public int compareTo(Source o) { - int comp = value.compareTo(o.value, compareMode); - if (comp == 0) { - comp = sourceId - o.sourceId; - } - return comp; + public int compare(Source one, Source two) { + return type.compare(one.currentRowData, two.currentRowData); } } - TreeSet sources = new TreeSet(); - for (int i = 0; i < bufferNames.size(); i++) { - MVMap map = openMap(bufferNames.get(i)); - Iterator it = map.keyIterator(null); - if (it.hasNext()) { - Source s = new Source(); - s.value = it.next(); - s.next = it; - s.sourceId = i; - sources.add(s); + } + + @Override + public void addBufferedRows(List bufferNames) { + int buffersCount = bufferNames.size(); + Queue queue = new PriorityQueue<>(buffersCount, + new Source.Comparator(getRowFactory().getRowDataType())); + for (String bufferName : bufferNames) { + Iterator iter = openMap(bufferName).keyIterator(null); + if (iter.hasNext()) { + queue.offer(new Source(iter)); } } + try { - while (true) { - Source s = sources.first(); - Value v = s.value; - - if (indexType.isUnique()) { - Value[] array = ((ValueArray) v).getList(); - // don't change the original value - array = Arrays.copyOf(array, array.length); - array[keyColumns - 1] = ValueLong.get(Long.MIN_VALUE); - ValueArray unique = ValueArray.get(array); - SearchRow row = convertToSearchRow((ValueArray) v); - checkUnique(row, dataMap, unique); + while (!queue.isEmpty()) { + Source s = queue.poll(); + SearchRow row = s.next(); + + if (uniqueColumnColumn > 0 && !mayHaveNullDuplicates(row)) { + checkUnique(false, dataMap, row, Long.MIN_VALUE); } - dataMap.putCommitted(v, ValueNull.INSTANCE); - - Iterator it = s.next; - if (!it.hasNext()) { - sources.remove(s); - if (sources.size() == 0) { - break; - } - } else { - Value nextValue = it.next(); - sources.remove(s); - s.value = nextValue; - sources.add(s); + dataMap.putCommitted(row, ValueNull.INSTANCE); + + if (s.hasNext()) { + queue.offer(s); } } } finally { - for (String tempMapName : mapNames) { - MVMap map = openMap(tempMapName); - map.getStore().removeMap(map); + MVStore mvStore = database.getStore().getMvStore(); + for (String tempMapName : bufferNames) { + mvStore.removeMap(tempMapName); } } } - private MVMap openMap(String mapName) { - int[] sortTypes = new int[keyColumns]; - for (int i = 0; i < indexColumns.length; i++) { - sortTypes[i] = indexColumns[i].sortType; - } - sortTypes[keyColumns - 1] = SortOrder.ASCENDING; - ValueDataType keyType = new ValueDataType( - database.getCompareMode(), database, sortTypes); - ValueDataType valueType = new ValueDataType(null, null, null); - MVMap.Builder builder = - new MVMap.Builder().keyType(keyType).valueType(valueType); - MVMap map = database.getMvStore(). - getStore().openMap(mapName, builder); + private MVMap openMap(String mapName) { + RowDataType keyType = getRowFactory().getRowDataType(); + MVMap.Builder builder = new MVMap.Builder() + .singleWriter() + .keyType(keyType) + .valueType(NullValueDataType.INSTANCE); + MVMap map = database.getStore().getMvStore() + .openMap(mapName, builder); if (!keyType.equals(map.getKeyType())) { - throw DbException.throwInternalError("Incompatible key type"); + throw DbException.getInternalError( + "Incompatible key type, expected " + keyType + " but got " + + map.getKeyType() + " for map " + mapName); } return map; } @Override - public void close(Session session) { + public void close(SessionLocal session) { // ok } @Override - public void add(Session session, Row row) { - TransactionMap map = getMap(session); - ValueArray array = convertToKey(row); - ValueArray unique = null; - if (indexType.isUnique()) { - // this will detect committed entries only - unique = convertToKey(row); - unique.getList()[keyColumns - 1] = ValueLong.get(Long.MIN_VALUE); - checkUnique(row, map, unique); + public void add(SessionLocal session, Row row) { + TransactionMap map = getMap(session); + SearchRow key = convertToKey(row, null); + boolean checkRequired = uniqueColumnColumn > 0 && !mayHaveNullDuplicates(row); + if (checkRequired) { + boolean repeatableRead = !session.getTransaction().allowNonRepeatableRead(); + checkUnique(repeatableRead, map, row, Long.MIN_VALUE); } + try { - map.put(array, ValueNull.INSTANCE); - } catch (IllegalStateException e) { - throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, - e, table.getName()); - } - if (indexType.isUnique()) { - Iterator it = map.keyIterator(unique, true); - while (it.hasNext()) { - ValueArray k = (ValueArray) it.next(); - SearchRow r2 = convertToSearchRow(k); - if (compareRows(row, r2) != 0) { - break; - } - if (containsNullAndAllowMultipleNull(r2)) { - // this is allowed - continue; - } - if (map.isSameTransaction(k)) { - continue; + map.put(key, ValueNull.INSTANCE); + } catch (MVStoreException e) { + throw mvTable.convertException(e); + } + + if (checkRequired) { + checkUnique(false, map, row, row.getKey()); + } + } + + private void checkUnique(boolean repeatableRead, TransactionMap map, SearchRow row, + long newKey) { + RowFactory uniqueRowFactory = getUniqueRowFactory(); + SearchRow from = uniqueRowFactory.createRow(); + from.copyFrom(row); + from.setKey(Long.MIN_VALUE); + SearchRow to = uniqueRowFactory.createRow(); + to.copyFrom(row); + to.setKey(Long.MAX_VALUE); + if (repeatableRead) { + // In order to guarantee repeatable reads, snapshot taken at the beginning of the statement or transaction + // need to be checked additionally, because existence of the key should be accounted for, + // even if since then, it was already deleted by another (possibly committed) transaction. + TMIterator it = map.keyIterator(from, to); + for (SearchRow k; (k = it.fetchNext()) != null;) { + if (newKey != k.getKey() && !map.isDeletedByCurrentTransaction(k)) { + throw getDuplicateKeyException(k.toString()); } - if (map.get(k) != null) { + } + } + TMIterator it = map.keyIteratorUncommitted(from, to); + for (SearchRow k; (k = it.fetchNext()) != null;) { + if (newKey != k.getKey()) { + if (map.getImmediate(k) != null) { // committed throw getDuplicateKeyException(k.toString()); } @@ -220,134 +227,66 @@ public void add(Session session, Row row) { } } - private void checkUnique(SearchRow row, TransactionMap map, ValueArray unique) { - Iterator it = map.keyIterator(unique, true); - while (it.hasNext()) { - ValueArray k = (ValueArray) it.next(); - SearchRow r2 = convertToSearchRow(k); - if (compareRows(row, r2) != 0) { - break; - } - if (map.get(k) != null) { - if (!containsNullAndAllowMultipleNull(r2)) { - throw getDuplicateKeyException(k.toString()); - } + @Override + public void remove(SessionLocal session, Row row) { + SearchRow searchRow = convertToKey(row, null); + TransactionMap map = getMap(session); + try { + if (map.remove(searchRow) == null) { + StringBuilder builder = new StringBuilder(); + getSQL(builder, TRACE_SQL_FLAGS).append(": ").append(row.getKey()); + throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, builder.toString()); } + } catch (MVStoreException e) { + throw mvTable.convertException(e); } } - @Override - public void remove(Session session, Row row) { - ValueArray array = convertToKey(row); - TransactionMap map = getMap(session); - try { - Value old = map.remove(array); - if (old == null) { - throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, - getSQL() + ": " + row.getKey()); + public void update(SessionLocal session, Row oldRow, Row newRow) { + SearchRow searchRowOld = convertToKey(oldRow, null); + SearchRow searchRowNew = convertToKey(newRow, null); + if (!rowsAreEqual(searchRowOld, searchRowNew)) { + super.update(session, oldRow, newRow); + } + } + + private boolean rowsAreEqual(SearchRow rowOne, SearchRow rowTwo) { + if (rowOne == rowTwo) { + return true; + } + for (int index : columnIds) { + Value v1 = rowOne.getValue(index); + Value v2 = rowTwo.getValue(index); + if (!Objects.equals(v1, v2)) { + return false; } - } catch (IllegalStateException e) { - throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, - e, table.getName()); } + return rowOne.getKey() == rowTwo.getKey(); } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { return find(session, first, false, last); } - private Cursor find(Session session, SearchRow first, boolean bigger, SearchRow last) { - ValueArray min = convertToKey(first); - if (min != null) { - min.getList()[keyColumns - 1] = ValueLong.get(Long.MIN_VALUE); - } - TransactionMap map = getMap(session); - if (bigger && min != null) { - // search for the next: first skip 1, then 2, 4, 8, until - // we have a higher key; then skip 4, 2,... - // (binary search), until 1 - int offset = 1; - while (true) { - ValueArray v = (ValueArray) map.relativeKey(min, offset); - if (v != null) { - boolean foundHigher = false; - for (int i = 0; i < keyColumns - 1; i++) { - int idx = columnIds[i]; - Value b = first.getValue(idx); - if (b == null) { - break; - } - Value a = v.getList()[i]; - if (database.compare(a, b) > 0) { - foundHigher = true; - break; - } - } - if (!foundHigher) { - offset += offset; - min = v; - continue; - } - } - if (offset > 1) { - offset /= 2; - continue; - } - if (map.get(v) == null) { - min = (ValueArray) map.higherKey(min); - if (min == null) { - break; - } - continue; - } - min = v; - break; - } - if (min == null) { - return new MVStoreCursor(session, - Collections.emptyList().iterator(), null); - } - } - return new MVStoreCursor(session, map.keyIterator(min), last); + private Cursor find(SessionLocal session, SearchRow first, boolean bigger, SearchRow last) { + SearchRow min = convertToKey(first, bigger); + SearchRow max = convertToKey(last, Boolean.TRUE); + return new MVStoreCursor(session, getMap(session).keyIterator(min, max), mvTable); } - private ValueArray convertToKey(SearchRow r) { + private SearchRow convertToKey(SearchRow r, Boolean minMax) { if (r == null) { return null; } - Value[] array = new Value[keyColumns]; - for (int i = 0; i < columns.length; i++) { - Column c = columns[i]; - int idx = c.getColumnId(); - Value v = r.getValue(idx); - if (v != null) { - array[i] = v.convertTo(c.getType()); - } - } - array[keyColumns - 1] = ValueLong.get(r.getKey()); - return ValueArray.get(array); - } - /** - * Convert array of values to a SearchRow. - * - * @param array the index key - * @return the row - */ - SearchRow convertToSearchRow(ValueArray key) { - Value[] array = key.getList(); - SearchRow searchRow = mvTable.getTemplateRow(); - searchRow.setKey((array[array.length - 1]).getLong()); - Column[] cols = getColumns(); - for (int i = 0; i < array.length - 1; i++) { - Column c = cols[i]; - int idx = c.getColumnId(); - Value v = array[i]; - searchRow.setValue(idx, v); - } - return searchRow; + SearchRow row = getRowFactory().createRow(); + row.copyFrom(r); + if (minMax != null) { + row.setKey(minMax ? Long.MAX_VALUE : Long.MIN_VALUE); + } + return row; } @Override @@ -356,28 +295,29 @@ public MVTable getTable() { } @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { + public double getCost(SessionLocal session, int[] masks, + TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { try { - return 10 * getCostRangeIndex(masks, - dataMap.sizeAsLongMax(), filter, sortOrder); - } catch (IllegalStateException e) { + return 10 * getCostRangeIndex(masks, dataMap.sizeAsLongMax(), + filters, filter, sortOrder, false, allColumnsSet); + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @Override - public void remove(Session session) { - TransactionMap map = getMap(session); + public void remove(SessionLocal session) { + TransactionMap map = getMap(session); if (!map.isClosed()) { - Transaction t = mvTable.getTransaction(session); + Transaction t = session.getTransaction(); t.removeMap(map); } } @Override - public void truncate(Session session) { - TransactionMap map = getMap(session); + public void truncate(SessionLocal session) { + TransactionMap map = getMap(session); map.clear(); } @@ -387,46 +327,36 @@ public boolean canGetFirstOrLast() { } @Override - public Cursor findFirstOrLast(Session session, boolean first) { - TransactionMap map = getMap(session); - Value key = first ? map.firstKey() : map.lastKey(); - while (true) { - if (key == null) { - return new MVStoreCursor(session, - Collections.emptyList().iterator(), null); - } - if (((ValueArray) key).getList()[0] != ValueNull.INSTANCE) { - break; + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + TMIterator iter = getMap(session).keyIterator(null, !first); + for (SearchRow key; (key = iter.fetchNext()) != null;) { + if (key.getValue(columnIds[0]) != ValueNull.INSTANCE) { + return new SingleRowCursor(mvTable.getRow(session, key.getKey())); } - key = first ? map.higherKey(key) : map.lowerKey(key); } - ArrayList list = New.arrayList(); - list.add(key); - MVStoreCursor cursor = new MVStoreCursor(session, list.iterator(), null); - cursor.next(); - return cursor; + return new SingleRowCursor(null); } @Override public boolean needRebuild() { try { return dataMap.sizeAsLongMax() == 0; - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @Override - public long getRowCount(Session session) { - TransactionMap map = getMap(session); + public long getRowCount(SessionLocal session) { + TransactionMap map = getMap(session); return map.sizeAsLong(); } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { try { return dataMap.sizeAsLongMax(); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @@ -443,45 +373,44 @@ public boolean canFindNext() { } @Override - public Cursor findNext(Session session, SearchRow higherThan, SearchRow last) { + public Cursor findNext(SessionLocal session, SearchRow higherThan, SearchRow last) { return find(session, higherThan, true, last); } - @Override - public void checkRename() { - // ok - } - /** * Get the map to store the data. * * @param session the session * @return the map */ - TransactionMap getMap(Session session) { + private TransactionMap getMap(SessionLocal session) { if (session == null) { return dataMap; } - Transaction t = mvTable.getTransaction(session); - return dataMap.getInstance(t, Long.MAX_VALUE); + Transaction t = session.getTransaction(); + return dataMap.getInstance(t); + } + + @Override + public MVMap> getMVMap() { + return dataMap.map; } /** * A cursor. */ - class MVStoreCursor implements Cursor { + static final class MVStoreCursor implements Cursor { - private final Session session; - private final Iterator it; - private final SearchRow last; - private Value current; - private SearchRow searchRow; - private Row row; + private final SessionLocal session; + private final TMIterator it; + private final MVTable mvTable; + private SearchRow current; + private Row row; - public MVStoreCursor(Session session, Iterator it, SearchRow last) { + MVStoreCursor(SessionLocal session, TMIterator it, MVTable mvTable) { this.session = session; this.it = it; - this.last = last; + this.mvTable = mvTable; } @Override @@ -497,24 +426,12 @@ public Row get() { @Override public SearchRow getSearchRow() { - if (searchRow == null) { - if (current != null) { - searchRow = convertToSearchRow((ValueArray) current); - } - } - return searchRow; + return current; } @Override public boolean next() { - current = it.hasNext() ? it.next() : null; - searchRow = null; - if (current != null) { - if (last != null && compareRows(getSearchRow(), last) > 0) { - searchRow = null; - current = null; - } - } + current = it.fetchNext(); row = null; return current != null; } @@ -523,7 +440,6 @@ public boolean next() { public boolean previous() { throw DbException.getUnsupportedException("previous"); } - } } diff --git a/h2/src/main/org/h2/mvstore/db/MVSortedTempResult.java b/h2/src/main/org/h2/mvstore/db/MVSortedTempResult.java new file mode 100644 index 0000000000..17579c9479 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/MVSortedTempResult.java @@ -0,0 +1,389 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.util.Arrays; +import java.util.BitSet; + +import org.h2.engine.Database; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.mvstore.Cursor; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVMap.Builder; +import org.h2.mvstore.type.DataType; +import org.h2.mvstore.type.LongDataType; +import org.h2.result.ResultExternal; +import org.h2.result.RowFactory.DefaultRowFactory; +import org.h2.result.SortOrder; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueRow; + +/** + * Sorted temporary result. + * + *

    + * This result is used for distinct and/or sorted results. + *

    + */ +class MVSortedTempResult extends MVTempResult { + + /** + * Whether this result is a standard distinct result. + */ + private final boolean distinct; + + /** + * Distinct indexes for DISTINCT ON results. + */ + private final int[] distinctIndexes; + + /** + * Mapping of indexes of columns to its positions in the store, or {@code null} + * if columns are not reordered. + */ + private final int[] indexes; + + /** + * Map with rows as keys and counts of duplicate rows as values. If this map is + * distinct all values are 1. + */ + private final MVMap map; + + /** + * Optional index. This index is created only if result is distinct and + * {@code columnCount != distinctColumnCount} or if + * {@link #contains(Value[])} method is invoked. Only the root result should + * have an index if required. + */ + private MVMap index; + + /** + * Used for DISTINCT ON in presence of ORDER BY. + */ + private ValueDataType orderedDistinctOnType; + + /** + * Cursor for the {@link #next()} method. + */ + private Cursor cursor; + + /** + * Current value for the {@link #next()} method. Used in non-distinct results + * with duplicate rows. + */ + private Value[] current; + + /** + * Count of remaining duplicate rows for the {@link #next()} method. Used in + * non-distinct results. + */ + private long valueCount; + + /** + * Creates a shallow copy of the result. + * + * @param parent + * parent result + */ + private MVSortedTempResult(MVSortedTempResult parent) { + super(parent); + this.distinct = parent.distinct; + this.distinctIndexes = parent.distinctIndexes; + this.indexes = parent.indexes; + this.map = parent.map; + this.rowCount = parent.rowCount; + } + + /** + * Creates a new sorted temporary result. + * + * @param database + * database + * @param expressions + * column expressions + * @param distinct + * whether this result should be distinct + * @param distinctIndexes + * indexes of distinct columns for DISTINCT ON results + * @param visibleColumnCount + * count of visible columns + * @param resultColumnCount + * the number of columns including visible columns and additional + * virtual columns for ORDER BY and DISTINCT ON clauses + * @param sort + * sort order, or {@code null} if this result does not need any + * sorting + */ + MVSortedTempResult(Database database, Expression[] expressions, boolean distinct, int[] distinctIndexes, + int visibleColumnCount, int resultColumnCount, SortOrder sort) { + super(database, expressions, visibleColumnCount, resultColumnCount); + this.distinct = distinct; + this.distinctIndexes = distinctIndexes; + int[] sortTypes = new int[resultColumnCount]; + int[] indexes; + if (sort != null) { + /* + * If sorting is specified we need to reorder columns in requested order and set + * sort types (ASC, DESC etc) for them properly. + */ + indexes = new int[resultColumnCount]; + int[] colIndex = sort.getQueryColumnIndexes(); + int len = colIndex.length; + // This set is used to remember columns that are already included + BitSet used = new BitSet(); + for (int i = 0; i < len; i++) { + int idx = colIndex[i]; + assert !used.get(idx); + used.set(idx); + indexes[i] = idx; + sortTypes[i] = sort.getSortTypes()[i]; + } + /* + * Because this result may have more columns than specified in sorting we need + * to add all remaining columns to the mapping of columns. A default sorting + * order (ASC / 0) will be used for them. + */ + int idx = 0; + for (int i = len; i < resultColumnCount; i++) { + idx = used.nextClearBit(idx); + indexes[i] = idx; + idx++; + } + /* + * Sometimes columns may be not reordered. Because reordering of columns + * slightly slows down other methods we check whether columns are really + * reordered or have the same order. + */ + sameOrder: { + for (int i = 0; i < resultColumnCount; i++) { + if (indexes[i] != i) { + // Columns are reordered + break sameOrder; + } + } + /* + * Columns are not reordered, set this field to null to disable reordering in + * other methods. + */ + indexes = null; + } + } else { + // Columns are not reordered if sort order is not specified + indexes = null; + } + this.indexes = indexes; + ValueDataType keyType = new ValueDataType(database, SortOrder.addNullOrdering(database, sortTypes)); + if (indexes != null) { + int l = indexes.length; + TypeInfo[] types = new TypeInfo[l]; + for (int i = 0; i < l; i++) { + types[i] = expressions[indexes[i]].getType(); + } + keyType.setRowFactory(DefaultRowFactory.INSTANCE.createRowFactory(database, database.getCompareMode(), + database, types, null, false)); + } else { + keyType.setRowFactory(DefaultRowFactory.INSTANCE.createRowFactory(database, database.getCompareMode(), + database, expressions, null, false)); + } + Builder builder = new MVMap.Builder().keyType(keyType) + .valueType(LongDataType.INSTANCE); + map = store.openMap("tmp", builder); + if (distinct && resultColumnCount != visibleColumnCount || distinctIndexes != null) { + int count; + TypeInfo[] types; + if (distinctIndexes != null) { + count = distinctIndexes.length; + types = new TypeInfo[count]; + for (int i = 0; i < count; i++) { + types[i] = expressions[distinctIndexes[i]].getType(); + } + } else { + count = visibleColumnCount; + types = new TypeInfo[count]; + for (int i = 0; i < count; i++) { + types[i] = expressions[i].getType(); + } + } + ValueDataType distinctType = new ValueDataType(database, new int[count]); + distinctType.setRowFactory(DefaultRowFactory.INSTANCE.createRowFactory(database, database.getCompareMode(), + database, types, null, false)); + DataType distinctValueType; + if (distinctIndexes != null && sort != null) { + distinctValueType = orderedDistinctOnType = keyType; + } else { + distinctValueType = NullValueDataType.INSTANCE; + } + Builder indexBuilder = new MVMap.Builder().keyType(distinctType) + .valueType(distinctValueType); + index = store.openMap("idx", indexBuilder); + } + } + + @Override + public int addRow(Value[] values) { + assert parent == null; + ValueRow key = getKey(values); + if (distinct || distinctIndexes != null) { + if (distinctIndexes != null) { + int cnt = distinctIndexes.length; + Value[] newValues = new Value[cnt]; + for (int i = 0; i < cnt; i++) { + newValues[i] = values[distinctIndexes[i]]; + } + ValueRow distinctRow = ValueRow.get(newValues); + if (orderedDistinctOnType == null) { + if (index.putIfAbsent(distinctRow, ValueNull.INSTANCE) != null) { + return rowCount; + } + } else { + ValueRow previous = (ValueRow) index.get(distinctRow); + if (previous == null) { + index.put(distinctRow, key); + } else if (orderedDistinctOnType.compare(previous, key) > 0) { + map.remove(previous); + rowCount--; + index.put(distinctRow, key); + } else { + return rowCount; + } + } + } else if (visibleColumnCount != resultColumnCount) { + ValueRow distinctRow = ValueRow.get(Arrays.copyOf(values, visibleColumnCount)); + if (index.putIfAbsent(distinctRow, ValueNull.INSTANCE) != null) { + return rowCount; + } + } + // Add a row and increment the counter only if row does not exist + if (map.putIfAbsent(key, 1L) == null) { + rowCount++; + } + } else { + // Try to set counter to 1 first if such row does not exist yet + Long old = map.putIfAbsent(key, 1L); + if (old != null) { + // This rows is already in the map, increment its own counter + map.put(key, old + 1); + } + rowCount++; + } + return rowCount; + } + + @Override + public boolean contains(Value[] values) { + // Only parent result maintains the index + if (parent != null) { + return parent.contains(values); + } + assert distinct; + if (visibleColumnCount != resultColumnCount) { + return index.containsKey(ValueRow.get(values)); + } + return map.containsKey(getKey(values)); + } + + @Override + public synchronized ResultExternal createShallowCopy() { + if (parent != null) { + return parent.createShallowCopy(); + } + if (closed) { + return null; + } + childCount++; + return new MVSortedTempResult(this); + } + + /** + * Reorder values if required and convert them into {@link ValueRow}. + * + * @param values + * values + * @return ValueRow for maps + */ + private ValueRow getKey(Value[] values) { + if (indexes != null) { + Value[] r = new Value[indexes.length]; + for (int i = 0; i < indexes.length; i++) { + r[i] = values[indexes[i]]; + } + values = r; + } + return ValueRow.get(values); + } + + /** + * Reorder values back if required. + * + * @param key + * reordered values + * @return original values + */ + private Value[] getValue(Value[] key) { + if (indexes != null) { + Value[] r = new Value[indexes.length]; + for (int i = 0; i < indexes.length; i++) { + r[indexes[i]] = key[i]; + } + key = r; + } + return key; + } + + @Override + public Value[] next() { + if (cursor == null) { + cursor = map.cursor(null); + current = null; + valueCount = 0L; + } + // If we have multiple rows with the same values return them all + if (--valueCount > 0) { + /* + * Underflow in valueCount is hypothetically possible after a lot of invocations + * (not really possible in practice), but current will be null anyway. + */ + return current; + } + if (!cursor.hasNext()) { + // Set current to null to be sure + current = null; + return null; + } + // Read the next row + current = getValue(cursor.next().getList()); + /* + * If valueCount is greater than 1 that is possible for non-distinct results the + * following invocations of next() will use this.current and this.valueCount. + */ + valueCount = cursor.getValue(); + return current; + } + + @Override + public int removeRow(Value[] values) { + assert parent == null && distinct; + if (visibleColumnCount != resultColumnCount) { + throw DbException.getUnsupportedException("removeRow()"); + } + // If an entry was removed decrement the counter + if (map.remove(getKey(values)) != null) { + rowCount--; + } + return rowCount; + } + + @Override + public void reset() { + cursor = null; + current = null; + valueCount = 0L; + } + +} diff --git a/h2/src/main/org/h2/mvstore/db/MVSpatialIndex.java b/h2/src/main/org/h2/mvstore/db/MVSpatialIndex.java index 7be701dd0b..5d07ec7607 100644 --- a/h2/src/main/org/h2/mvstore/db/MVSpatialIndex.java +++ b/h2/src/main/org/h2/mvstore/db/MVSpatialIndex.java @@ -1,41 +1,45 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; +import org.h2.mvstore.rtree.Spatial; +import static org.h2.util.geometry.GeometryUtils.MAX_X; +import static org.h2.util.geometry.GeometryUtils.MAX_Y; +import static org.h2.util.geometry.GeometryUtils.MIN_X; +import static org.h2.util.geometry.GeometryUtils.MIN_Y; + import java.util.Iterator; import java.util.List; - import org.h2.api.ErrorCode; +import org.h2.command.query.AllColumnsForPlan; import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.index.BaseIndex; +import org.h2.engine.SessionLocal; import org.h2.index.Cursor; +import org.h2.index.IndexCondition; import org.h2.index.IndexType; import org.h2.index.SpatialIndex; -import org.h2.index.SpatialTreeIndex; import org.h2.message.DbException; -import org.h2.mvstore.db.TransactionStore.Transaction; -import org.h2.mvstore.db.TransactionStore.TransactionMap; -import org.h2.mvstore.db.TransactionStore.VersionedValue; -import org.h2.mvstore.db.TransactionStore.VersionedValueType; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.Page; import org.h2.mvstore.rtree.MVRTreeMap; import org.h2.mvstore.rtree.MVRTreeMap.RTreeCursor; -import org.h2.mvstore.rtree.SpatialKey; +import org.h2.mvstore.tx.Transaction; +import org.h2.mvstore.tx.TransactionMap; +import org.h2.mvstore.tx.VersionedValueType; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.result.SortOrder; +import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.TableFilter; import org.h2.value.Value; import org.h2.value.ValueGeometry; -import org.h2.value.ValueLong; import org.h2.value.ValueNull; - -import com.vividsolutions.jts.geom.Envelope; -import com.vividsolutions.jts.geom.Geometry; +import org.h2.value.VersionedValue; /** * This is an index based on a MVRTreeMap. @@ -44,16 +48,15 @@ * @author Noel Grandin * @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 */ -public class MVSpatialIndex extends BaseIndex implements SpatialIndex, MVIndex { +public class MVSpatialIndex extends MVIndex implements SpatialIndex { /** * The multi-value table. */ final MVTable mvTable; - private final String mapName; - private TransactionMap dataMap; - private MVRTreeMap spatialMap; + private final TransactionMap dataMap; + private final MVRTreeMap> spatialMap; /** * Constructor. @@ -63,11 +66,12 @@ public class MVSpatialIndex extends BaseIndex implements SpatialIndex, MVIndex { * @param id the index id * @param indexName the index name * @param columns the indexed columns (only one geometry column allowed) + * @param uniqueColumnCount count of unique columns (0 or 1) * @param indexType the index type (only spatial index) */ - public MVSpatialIndex( - Database db, MVTable table, int id, String indexName, - IndexColumn[] columns, IndexType indexType) { + public MVSpatialIndex(Database db, MVTable table, int id, String indexName, IndexColumn[] columns, + int uniqueColumnCount, IndexType indexType) { + super(table, id, indexName, columns, uniqueColumnCount, indexType); if (columns.length != 1) { throw DbException.getUnsupportedException( "Can only index one column"); @@ -85,76 +89,79 @@ public MVSpatialIndex( throw DbException.getUnsupportedException( "Nulls last is not supported"); } - if (col.column.getType() != Value.GEOMETRY) { + if (col.column.getType().getValueType() != Value.GEOMETRY) { throw DbException.getUnsupportedException( "Spatial index on non-geometry column, " + col.column.getCreateSQL()); } this.mvTable = table; - initBaseIndex(table, id, indexName, columns, indexType); if (!database.isStarting()) { checkIndexColumnTypes(columns); } - mapName = "index." + getId(); - ValueDataType vt = new ValueDataType(null, null, null); - VersionedValueType valueType = new VersionedValueType(vt); - MVRTreeMap.Builder mapBuilder = - new MVRTreeMap.Builder(). + String mapName = "index." + getId(); + VersionedValueType valueType = new VersionedValueType<>(NullValueDataType.INSTANCE); + MVRTreeMap.Builder> mapBuilder = + new MVRTreeMap.Builder>(). valueType(valueType); - spatialMap = db.getMvStore().getStore().openMap(mapName, mapBuilder); - Transaction t = mvTable.getTransaction(null); - dataMap = t.openMap(spatialMap); + spatialMap = db.getStore().getMvStore().openMap(mapName, mapBuilder); + Transaction t = mvTable.getTransactionBegin(); + dataMap = t.openMapX(spatialMap); + dataMap.map.setVolatile(!table.isPersistData() || !indexType.isPersistent()); t.commit(); } @Override public void addRowsToBuffer(List rows, String bufferName) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } @Override public void addBufferedRows(List bufferNames) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(); } @Override - public void close(Session session) { + public void close(SessionLocal session) { // ok } @Override - public void add(Session session, Row row) { - TransactionMap map = getMap(session); + public void add(SessionLocal session, Row row) { + TransactionMap map = getMap(session); SpatialKey key = getKey(row); - if (indexType.isUnique()) { + + if (key.isNull()) { + return; + } + + if (uniqueColumnColumn > 0) { // this will detect committed entries only - RTreeCursor cursor = spatialMap.findContainedKeys(key); - Iterator it = map.wrapIterator(cursor, false); + RTreeCursor> cursor = spatialMap.findContainedKeys(key); + Iterator it = new SpatialKeyIterator(map, cursor, false); while (it.hasNext()) { - SpatialKey k = it.next(); + Spatial k = it.next(); if (k.equalsIgnoringId(key)) { throw getDuplicateKeyException(key.toString()); } } } try { - map.put(key, ValueLong.get(0)); - } catch (IllegalStateException e) { - throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, - e, table.getName()); + map.put(key, ValueNull.INSTANCE); + } catch (MVStoreException e) { + throw mvTable.convertException(e); } - if (indexType.isUnique()) { + if (uniqueColumnColumn > 0) { // check if there is another (uncommitted) entry - RTreeCursor cursor = spatialMap.findContainedKeys(key); - Iterator it = map.wrapIterator(cursor, true); + RTreeCursor> cursor = spatialMap.findContainedKeys(key); + Iterator it = new SpatialKeyIterator(map, cursor, true); while (it.hasNext()) { - SpatialKey k = it.next(); + Spatial k = it.next(); if (k.equalsIgnoringId(key)) { if (map.isSameTransaction(k)) { continue; } map.remove(key); - if (map.get(k) != null) { + if (map.getImmediate(k) != null) { // committed throw getDuplicateKeyException(k.toString()); } @@ -165,76 +172,105 @@ public void add(Session session, Row row) { } @Override - public void remove(Session session, Row row) { + public void remove(SessionLocal session, Row row) { SpatialKey key = getKey(row); - TransactionMap map = getMap(session); + + if (key.isNull()) { + return; + } + + TransactionMap map = getMap(session); try { Value old = map.remove(key); if (old == null) { - throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, - getSQL() + ": " + row.getKey()); + StringBuilder builder = new StringBuilder(); + getSQL(builder, TRACE_SQL_FLAGS).append(": ").append(row.getKey()); + throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, builder.toString()); } - } catch (IllegalStateException e) { - throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, - e, table.getName()); + } catch (MVStoreException e) { + throw mvTable.convertException(e); } } @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - return find(filter.getSession()); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return find(session); - } - - private Cursor find(Session session) { - Iterator cursor = spatialMap.keyIterator(null); - TransactionMap map = getMap(session); - Iterator it = map.wrapIterator(cursor, false); - return new MVStoreCursor(session, it); + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + Iterator cursor = spatialMap.keyIterator(null); + TransactionMap map = getMap(session); + Iterator it = new SpatialKeyIterator(map, cursor, false); + return new MVStoreCursor(session, it, mvTable); } @Override - public Cursor findByGeometry(TableFilter filter, SearchRow intersection) { - Session session = filter.getSession(); + public Cursor findByGeometry(SessionLocal session, SearchRow first, SearchRow last, SearchRow intersection) { if (intersection == null) { - return find(session); + return find(session, first, last); } - Iterator cursor = + Iterator cursor = spatialMap.findIntersectingKeys(getKey(intersection)); - TransactionMap map = getMap(session); - Iterator it = map.wrapIterator(cursor, false); - return new MVStoreCursor(session, it); + TransactionMap map = getMap(session); + Iterator it = new SpatialKeyIterator(map, cursor, false); + return new MVStoreCursor(session, it, mvTable); } - private SpatialKey getKey(SearchRow row) { - if (row == null) { - return null; - } - Value v = row.getValue(columnIds[0]); - if (v == ValueNull.INSTANCE) { - return null; + /** + * Returns the minimum bounding box that encloses all keys. + * + * @param session the session + * @return the minimum bounding box that encloses all keys, or null + */ + public Value getBounds(SessionLocal session) { + FindBoundsCursor cursor = new FindBoundsCursor(spatialMap.getRootPage(), new SpatialKey(0), session, + getMap(session), columnIds[0]); + while (cursor.hasNext()) { + cursor.next(); } - Geometry g = ((ValueGeometry) v.convertTo(Value.GEOMETRY)).getGeometryNoCopy(); - Envelope env = g.getEnvelopeInternal(); - return new SpatialKey(row.getKey(), - (float) env.getMinX(), (float) env.getMaxX(), - (float) env.getMinY(), (float) env.getMaxY()); + return cursor.getBounds(); } /** - * Get the row with the given index key. + * Returns the estimated minimum bounding box that encloses all keys. + * + * The returned value may be incorrect. * - * @param key the index key - * @return the row + * @param session the session + * @return the estimated minimum bounding box that encloses all keys, or null */ - SearchRow getRow(SpatialKey key) { - SearchRow searchRow = mvTable.getTemplateRow(); - searchRow.setKey(key.getId()); - return searchRow; + public Value getEstimatedBounds(SessionLocal session) { + Page> p = spatialMap.getRootPage(); + int count = p.getKeyCount(); + if (count > 0) { + Spatial key = p.getKey(0); + float bminxf = key.min(0), bmaxxf = key.max(0), bminyf = key.min(1), bmaxyf = key.max(1); + for (int i = 1; i < count; i++) { + key = p.getKey(i); + float minxf = key.min(0), maxxf = key.max(0), minyf = key.min(1), maxyf = key.max(1); + if (minxf < bminxf) { + bminxf = minxf; + } + if (maxxf > bmaxxf) { + bmaxxf = maxxf; + } + if (minyf < bminyf) { + bminyf = minyf; + } + if (maxyf > bmaxyf) { + bmaxyf = maxyf; + } + } + return ValueGeometry.fromEnvelope(new double[] {bminxf, bmaxxf, bminyf, bmaxyf}); + } + return ValueNull.INSTANCE; + } + + private SpatialKey getKey(SearchRow row) { + Value v = row.getValue(columnIds[0]); + double[] env; + if (v == ValueNull.INSTANCE || (env = v.convertToGeometry(null).getEnvelopeNoCopy()) == null) { + return new SpatialKey(row.getKey()); + } + return new SpatialKey(row.getKey(), + (float) env[MIN_X], (float) env[MAX_X], + (float) env[MIN_Y], (float) env[MAX_Y]); } @Override @@ -243,67 +279,68 @@ public MVTable getTable() { } @Override - public double getCost(Session session, int[] masks, TableFilter filter, - SortOrder sortOrder) { - return getCostRangeIndex(masks, table.getRowCountApproximation(), - filter, sortOrder); + public double getCost(SessionLocal session, int[] masks, TableFilter[] filters, + int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { + return getCostRangeIndex(masks, columns); } - @Override - protected long getCostRangeIndex(int[] masks, long rowCount, - TableFilter filter, SortOrder sortOrder) { - return SpatialTreeIndex.getCostRangeIndex(masks, rowCount, columns); + /** + * Compute spatial index cost + * @param masks Search mask + * @param columns Table columns + * @return Index cost hint + */ + public static long getCostRangeIndex(int[] masks, Column[] columns) { + // Never use spatial tree index without spatial filter + if (columns.length == 0) { + return Long.MAX_VALUE; + } + for (Column column : columns) { + int index = column.getColumnId(); + int mask = masks[index]; + if ((mask & IndexCondition.SPATIAL_INTERSECTS) != IndexCondition.SPATIAL_INTERSECTS) { + return Long.MAX_VALUE; + } + } + return 2; } @Override - public void remove(Session session) { - TransactionMap map = getMap(session); + public void remove(SessionLocal session) { + TransactionMap map = getMap(session); if (!map.isClosed()) { - Transaction t = mvTable.getTransaction(session); + Transaction t = session.getTransaction(); t.removeMap(map); } } @Override - public void truncate(Session session) { - TransactionMap map = getMap(session); + public void truncate(SessionLocal session) { + TransactionMap map = getMap(session); map.clear(); } - @Override - public boolean canGetFirstOrLast() { - return true; - } - - @Override - public Cursor findFirstOrLast(Session session, boolean first) { - if (!first) { - throw DbException.throwInternalError( - "Spatial Index can only be fetch in ascending order"); - } - return find(session); - } - @Override public boolean needRebuild() { try { return dataMap.sizeAsLongMax() == 0; - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @Override - public long getRowCount(Session session) { - TransactionMap map = getMap(session); + public long getRowCount(SessionLocal session) { + TransactionMap map = getMap(session); return map.sizeAsLong(); } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { try { return dataMap.sizeAsLongMax(); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { throw DbException.get(ErrorCode.OBJECT_CLOSED, e); } } @@ -314,39 +351,42 @@ public long getDiskSpaceUsed() { return 0; } - @Override - public void checkRename() { - // ok - } - /** * Get the map to store the data. * * @param session the session * @return the map */ - TransactionMap getMap(Session session) { + private TransactionMap getMap(SessionLocal session) { if (session == null) { return dataMap; } - Transaction t = mvTable.getTransaction(session); - return dataMap.getInstance(t, Long.MAX_VALUE); + Transaction t = session.getTransaction(); + return dataMap.getInstance(t); + } + + @Override + public MVMap> getMVMap() { + return dataMap.map; } + /** * A cursor. */ - class MVStoreCursor implements Cursor { + private static class MVStoreCursor implements Cursor { - private final Session session; - private final Iterator it; - private SpatialKey current; + private final SessionLocal session; + private final Iterator it; + private final MVTable mvTable; + private Spatial current; private SearchRow searchRow; private Row row; - public MVStoreCursor(Session session, Iterator it) { + MVStoreCursor(SessionLocal session, Iterator it, MVTable mvTable) { this.session = session; this.it = it; + this.mvTable = mvTable; } @Override @@ -364,7 +404,8 @@ public Row get() { public SearchRow getSearchRow() { if (searchRow == null) { if (current != null) { - searchRow = getRow(current); + searchRow = mvTable.getTemplateRow(); + searchRow.setKey(current.getId()); } } return searchRow; @@ -372,7 +413,7 @@ public SearchRow getSearchRow() { @Override public boolean next() { - current = it.next(); + current = it.hasNext() ? it.next() : null; searchRow = null; row = null; return current != null; @@ -385,5 +426,125 @@ public boolean previous() { } + private static class SpatialKeyIterator implements Iterator { + + private final TransactionMap map; + private final Iterator iterator; + private final boolean includeUncommitted; + private Spatial current; + + SpatialKeyIterator(TransactionMap map, + Iterator iterator, boolean includeUncommitted) { + this.map = map; + this.iterator = iterator; + this.includeUncommitted = includeUncommitted; + fetchNext(); + } + + private void fetchNext() { + while (iterator.hasNext()) { + current = iterator.next(); + if (includeUncommitted || map.containsKey(current)) { + return; + } + } + current = null; + } + + @Override + public boolean hasNext() { + return current != null; + } + + @Override + public Spatial next() { + Spatial result = current; + fetchNext(); + return result; + } + } + + /** + * A cursor for getBounds() method. + */ + private final class FindBoundsCursor extends RTreeCursor> { + + private final SessionLocal session; + + private final TransactionMap map; + + private final int columnId; + + private boolean hasBounds; + + private float bminxf, bmaxxf, bminyf, bmaxyf; + + private double bminxd, bmaxxd, bminyd, bmaxyd; + + FindBoundsCursor(Page> root, Spatial filter, SessionLocal session, + TransactionMap map, int columnId) { + super(root, filter); + this.session = session; + this.map = map; + this.columnId = columnId; + } + + @Override + protected boolean check(boolean leaf, Spatial key, Spatial test) { + float minxf = key.min(0), maxxf = key.max(0), minyf = key.min(1), maxyf = key.max(1); + if (leaf) { + if (hasBounds) { + if ((minxf <= bminxf || maxxf >= bmaxxf || minyf <= bminyf || maxyf >= bmaxyf) + && map.containsKey(key)) { + double[] env = ((ValueGeometry) mvTable.getRow(session, key.getId()).getValue(columnId)) + .getEnvelopeNoCopy(); + double minxd = env[MIN_X], maxxd = env[MAX_X], minyd = env[MIN_Y], maxyd = env[MAX_Y]; + if (minxd < bminxd) { + bminxf = minxf; + bminxd = minxd; + } + if (maxxd > bmaxxd) { + bmaxxf = maxxf; + bmaxxd = maxxd; + } + if (minyd < bminyd) { + bminyf = minyf; + bminyd = minyd; + } + if (maxyd > bmaxyd) { + bmaxyf = maxyf; + bmaxyd = maxyd; + } + } + } else if (map.containsKey(key)) { + hasBounds = true; + double[] env = ((ValueGeometry) mvTable.getRow(session, key.getId()).getValue(columnId)) + .getEnvelopeNoCopy(); + bminxf = minxf; + bminxd = env[MIN_X]; + bmaxxf = maxxf; + bmaxxd = env[MAX_X]; + bminyf = minyf; + bminyd = env[MIN_Y]; + bmaxyf = maxyf; + bmaxyd = env[MAX_Y]; + } + } else if (hasBounds) { + if (minxf <= bminxf || maxxf >= bmaxxf || minyf <= bminyf || maxyf >= bmaxyf) { + return true; + } + } else { + return true; + } + return false; + } + + Value getBounds() { + return hasBounds ? ValueGeometry.fromEnvelope(new double[] {bminxd, bmaxxd, bminyd, bmaxyd}) + : ValueNull.INSTANCE; + } + + } + } diff --git a/h2/src/main/org/h2/mvstore/db/MVTable.java b/h2/src/main/org/h2/mvstore/db/MVTable.java index c5733dd2c5..65c611845e 100644 --- a/h2/src/main/org/h2/mvstore/db/MVTable.java +++ b/h2/src/main/org/h2/mvstore/db/MVTable.java @@ -1,93 +1,158 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; import java.util.ArrayDeque; import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; +import java.util.HashSet; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; -import org.h2.command.ddl.Analyze; import org.h2.command.ddl.CreateTableData; import org.h2.constraint.Constraint; import org.h2.constraint.ConstraintReferential; import org.h2.engine.Constants; -import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; import org.h2.engine.SysProperties; import org.h2.index.Cursor; import org.h2.index.Index; import org.h2.index.IndexType; -import org.h2.index.MultiVersionIndex; import org.h2.message.DbException; import org.h2.message.Trace; -import org.h2.mvstore.db.MVTableEngine.Store; -import org.h2.mvstore.db.TransactionStore.Transaction; +import org.h2.mode.DefaultNullOrdering; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.tx.Transaction; +import org.h2.mvstore.tx.TransactionStore; import org.h2.result.Row; +import org.h2.result.SearchRow; import org.h2.result.SortOrder; -import org.h2.schema.SchemaObject; import org.h2.table.Column; import org.h2.table.IndexColumn; import org.h2.table.Table; import org.h2.table.TableBase; -import org.h2.util.MathUtils; -import org.h2.util.New; +import org.h2.table.TableType; +import org.h2.util.DebuggingThreadLocal; +import org.h2.util.Utils; import org.h2.value.DataType; -import org.h2.value.Value; +import org.h2.value.TypeInfo; /** * A table stored in a MVStore. */ public class MVTable extends TableBase { + /** + * The table name this thread is waiting to lock. + */ + public static final DebuggingThreadLocal WAITING_FOR_LOCK; - private MVPrimaryIndex primaryIndex; - private final ArrayList indexes = New.arrayList(); - private long lastModificationId; - private volatile Session lockExclusiveSession; + /** + * The table names this thread has exclusively locked. + */ + public static final DebuggingThreadLocal> EXCLUSIVE_LOCKS; - // using a ConcurrentHashMap as a set - private final ConcurrentHashMap lockSharedSessions = - new ConcurrentHashMap(); + /** + * The tables names this thread has a shared lock on. + */ + public static final DebuggingThreadLocal> SHARED_LOCKS; + + /** + * The type of trace lock events + */ + private enum TraceLockEvent{ + + TRACE_LOCK_OK("ok"), + TRACE_LOCK_WAITING_FOR("waiting for"), + TRACE_LOCK_REQUESTING_FOR("requesting for"), + TRACE_LOCK_TIMEOUT_AFTER("timeout after "), + TRACE_LOCK_UNLOCK("unlock"), + TRACE_LOCK_ADDED_FOR("added for"), + TRACE_LOCK_ADD_UPGRADED_FOR("add (upgraded) for "); + + private final String eventText; + + TraceLockEvent(String eventText) { + this.eventText = eventText; + } + + public String getEventText() { + return eventText; + } + } + private static final String NO_EXTRA_INFO = ""; + + static { + if (SysProperties.THREAD_DEADLOCK_DETECTOR) { + WAITING_FOR_LOCK = new DebuggingThreadLocal<>(); + EXCLUSIVE_LOCKS = new DebuggingThreadLocal<>(); + SHARED_LOCKS = new DebuggingThreadLocal<>(); + } else { + WAITING_FOR_LOCK = null; + EXCLUSIVE_LOCKS = null; + SHARED_LOCKS = null; + } + } + + /** + * Whether the table contains a CLOB or BLOB. + */ + private final boolean containsLargeObject; + + /** + * The session (if any) that has exclusively locked this table. + */ + private volatile SessionLocal lockExclusiveSession; + + /** + * The set of sessions (if any) that have a shared lock on the table. Here + * we are using using a ConcurrentHashMap as a set, as there is no + * ConcurrentHashSet. + */ + private final ConcurrentHashMap lockSharedSessions = new ConcurrentHashMap<>(); + + private Column rowIdColumn; + + private final MVPrimaryIndex primaryIndex; + private final ArrayList indexes = Utils.newSmallArrayList(); + private final AtomicLong lastModificationId = new AtomicLong(); /** * The queue of sessions waiting to lock the table. It is a FIFO queue to * prevent starvation, since Java's synchronized locking is biased. */ - private final ArrayDeque waitingSessions = new ArrayDeque(); + private final ArrayDeque waitingSessions = new ArrayDeque<>(); private final Trace traceLock; - private int changesSinceAnalyze; + private final AtomicInteger changesUntilAnalyze; private int nextAnalyze; - private boolean containsLargeObject; - private Column rowIdColumn; - private final TransactionStore store; + private final Store store; + private final TransactionStore transactionStore; - public MVTable(CreateTableData data, MVTableEngine.Store store) { + public MVTable(CreateTableData data, Store store) { super(data); - nextAnalyze = database.getSettings().analyzeAuto; - this.store = store.getTransactionStore(); this.isHidden = data.isHidden; + boolean b = false; for (Column col : getColumns()) { - if (DataType.isLargeObject(col.getType())) { - containsLargeObject = true; + if (DataType.isLargeObject(col.getType().getValueType())) { + b = true; + break; } } + containsLargeObject = b; + nextAnalyze = database.getSettings().analyzeAuto; + changesUntilAnalyze = nextAnalyze <= 0 ? null : new AtomicInteger(nextAnalyze); + this.store = store; + this.transactionStore = store.getTransactionStore(); traceLock = database.getTrace(Trace.LOCK); - } - /** - * Initialize the table. - * - * @param session the session - */ - void init(Session session) { - primaryIndex = new MVPrimaryIndex(session.getDatabase(), this, getId(), + primaryIndex = new MVPrimaryIndex(database, this, getId(), IndexColumn.wrap(getColumns()), IndexType.createScan(true)); indexes.add(primaryIndex); } @@ -97,345 +162,206 @@ public String getMapName() { } @Override - public boolean lock(Session session, boolean exclusive, - boolean forceLockEvenInMvcc) { - int lockMode = database.getLockMode(); - if (lockMode == Constants.LOCK_MODE_OFF) { + public boolean lock(SessionLocal session, int lockType) { + if (database.getLockMode() == Constants.LOCK_MODE_OFF) { + session.registerTableAsUpdated(this); return false; } - if (!forceLockEvenInMvcc && database.isMultiVersion()) { - // MVCC: update, delete, and insert use a shared lock. - // Select doesn't lock except when using FOR UPDATE and - // the system property h2.selectForUpdateMvcc - // is not enabled - if (exclusive) { - exclusive = false; - } else { - if (lockExclusiveSession == null) { - return false; - } - } + if (lockType == Table.READ_LOCK && lockExclusiveSession == null) { + return false; } if (lockExclusiveSession == session) { return true; } - if (!exclusive && lockSharedSessions.containsKey(session)) { + if (lockType != Table.EXCLUSIVE_LOCK && lockSharedSessions.containsKey(session)) { return true; } - synchronized (getLockSyncObject()) { - if (!exclusive && lockSharedSessions.containsKey(session)) { + synchronized (this) { + if (lockType != Table.EXCLUSIVE_LOCK && lockSharedSessions.containsKey(session)) { return true; } session.setWaitForLock(this, Thread.currentThread()); + if (SysProperties.THREAD_DEADLOCK_DETECTOR) { + WAITING_FOR_LOCK.set(getName()); + } waitingSessions.addLast(session); try { - doLock1(session, lockMode, exclusive); + doLock1(session, lockType); } finally { session.setWaitForLock(null, null); + if (SysProperties.THREAD_DEADLOCK_DETECTOR) { + WAITING_FOR_LOCK.remove(); + } waitingSessions.remove(session); } } return false; } - /** - * The the object on which to synchronize and wait on. For the - * multi-threaded mode, this is this object, but for non-multi-threaded, it - * is the database, as in this case all operations are synchronized on the - * database object. - * - * @return the lock sync object - */ - private Object getLockSyncObject() { - if (database.isMultiThreaded()) { - return this; - } - return database; - } - - private void doLock1(Session session, int lockMode, boolean exclusive) { - traceLock(session, exclusive, "requesting for"); + private void doLock1(SessionLocal session, int lockType) { + traceLock(session, lockType, TraceLockEvent.TRACE_LOCK_REQUESTING_FOR, NO_EXTRA_INFO); // don't get the current time unless necessary - long max = 0; + long max = 0L; boolean checkDeadlock = false; while (true) { // if I'm the next one in the queue - if (waitingSessions.getFirst() == session) { - if (doLock2(session, lockMode, exclusive)) { + if (waitingSessions.getFirst() == session && lockExclusiveSession == null) { + if (doLock2(session, lockType)) { return; } } if (checkDeadlock) { - ArrayList sessions = checkDeadlock(session, null, null); + ArrayList sessions = checkDeadlock(session, null, null); if (sessions != null) { throw DbException.get(ErrorCode.DEADLOCK_1, - getDeadlockDetails(sessions, exclusive)); + getDeadlockDetails(sessions, lockType)); } } else { // check for deadlocks from now on checkDeadlock = true; } - long now = System.currentTimeMillis(); - if (max == 0) { + long now = System.nanoTime(); + if (max == 0L) { // try at least one more time - max = now + session.getLockTimeout(); - } else if (now >= max) { - traceLock(session, exclusive, - "timeout after " + session.getLockTimeout()); + max = Utils.nanoTimePlusMillis(now, session.getLockTimeout()); + } else if (now - max >= 0L) { + traceLock(session, lockType, + TraceLockEvent.TRACE_LOCK_TIMEOUT_AFTER, Integer.toString(session.getLockTimeout())); throw DbException.get(ErrorCode.LOCK_TIMEOUT_1, getName()); } try { - traceLock(session, exclusive, "waiting for"); - if (database.getLockMode() == Constants.LOCK_MODE_TABLE_GC) { - for (int i = 0; i < 20; i++) { - long free = Runtime.getRuntime().freeMemory(); - System.gc(); - long free2 = Runtime.getRuntime().freeMemory(); - if (free == free2) { - break; - } - } - } + traceLock(session, lockType, TraceLockEvent.TRACE_LOCK_WAITING_FOR, NO_EXTRA_INFO); // don't wait too long so that deadlocks are detected early - long sleep = Math.min(Constants.DEADLOCK_CHECK, max - now); + long sleep = Math.min(Constants.DEADLOCK_CHECK, (max - now) / 1_000_000L); if (sleep == 0) { sleep = 1; } - getLockSyncObject().wait(sleep); + wait(sleep); } catch (InterruptedException e) { // ignore } } } - private boolean doLock2(Session session, int lockMode, boolean exclusive) { - if (exclusive) { - if (lockExclusiveSession == null) { - if (lockSharedSessions.isEmpty()) { - traceLock(session, exclusive, "added for"); - session.addLock(this); - lockExclusiveSession = session; - return true; - } else if (lockSharedSessions.size() == 1 && - lockSharedSessions.containsKey(session)) { - traceLock(session, exclusive, "add (upgraded) for "); - lockExclusiveSession = session; - return true; - } + private boolean doLock2(SessionLocal session, int lockType) { + switch (lockType) { + case Table.EXCLUSIVE_LOCK: + int size = lockSharedSessions.size(); + if (size == 0) { + traceLock(session, lockType, TraceLockEvent.TRACE_LOCK_ADDED_FOR, NO_EXTRA_INFO); + session.registerTableAsLocked(this); + } else if (size == 1 && lockSharedSessions.containsKey(session)) { + traceLock(session, lockType, TraceLockEvent.TRACE_LOCK_ADD_UPGRADED_FOR, NO_EXTRA_INFO); + } else { + return false; } - } else { - if (lockExclusiveSession == null) { - if (lockMode == Constants.LOCK_MODE_READ_COMMITTED) { - if (!database.isMultiThreaded() && - !database.isMultiVersion()) { - // READ_COMMITTED: a read lock is acquired, - // but released immediately after the operation - // is complete. - // When allowing only one thread, no lock is - // required. - // Row level locks work like read committed. - return true; - } - } - if (!lockSharedSessions.containsKey(session)) { - traceLock(session, exclusive, "ok"); - session.addLock(this); - lockSharedSessions.put(session, session); - } - return true; + lockExclusiveSession = session; + if (SysProperties.THREAD_DEADLOCK_DETECTOR) { + addLockToDebugList(EXCLUSIVE_LOCKS); } - } - return false; - } - - private static String getDeadlockDetails(ArrayList sessions, boolean exclusive) { - // We add the thread details here to make it easier for customers to - // match up these error messages with their own logs. - StringBuilder buff = new StringBuilder(); - for (Session s : sessions) { - Table lock = s.getWaitForLock(); - Thread thread = s.getWaitForLockThread(); - buff.append("\nSession ").append(s.toString()) - .append(" on thread ").append(thread.getName()) - .append(" is waiting to lock ").append(lock.toString()) - .append(exclusive ? " (exclusive)" : " (shared)") - .append(" while locking "); - int i = 0; - for (Table t : s.getLocks()) { - if (i++ > 0) { - buff.append(", "); - } - buff.append(t.toString()); - if (t instanceof MVTable) { - if (((MVTable) t).lockExclusiveSession == s) { - buff.append(" (exclusive)"); - } else { - buff.append(" (shared)"); - } + break; + case Table.WRITE_LOCK: + if (lockSharedSessions.putIfAbsent(session, session) == null) { + traceLock(session, lockType, TraceLockEvent.TRACE_LOCK_OK, NO_EXTRA_INFO); + session.registerTableAsLocked(this); + if (SysProperties.THREAD_DEADLOCK_DETECTOR) { + addLockToDebugList(SHARED_LOCKS); } } - buff.append('.'); } - return buff.toString(); + return true; } - @Override - public ArrayList checkDeadlock(Session session, Session clash, - Set visited) { - // only one deadlock check at any given time - synchronized (MVTable.class) { - if (clash == null) { - // verification is started - clash = session; - visited = New.hashSet(); - } else if (clash == session) { - // we found a circle where this session is involved - return New.arrayList(); - } else if (visited.contains(session)) { - // we have already checked this session. - // there is a circle, but the sessions in the circle need to - // find it out themselves - return null; - } - visited.add(session); - ArrayList error = null; - for (Session s : lockSharedSessions.keySet()) { - if (s == session) { - // it doesn't matter if we have locked the object already - continue; - } - Table t = s.getWaitForLock(); - if (t != null) { - error = t.checkDeadlock(s, clash, visited); - if (error != null) { - error.add(session); - break; - } - } - } - // take a local copy so we don't see inconsistent data, since we are - // not locked while checking the lockExclusiveSession value - Session copyOfLockExclusiveSession = lockExclusiveSession; - if (error == null && copyOfLockExclusiveSession != null) { - Table t = copyOfLockExclusiveSession.getWaitForLock(); - if (t != null) { - error = t.checkDeadlock(copyOfLockExclusiveSession, clash, - visited); - if (error != null) { - error.add(session); - } - } - } - return error; + private void addLockToDebugList(DebuggingThreadLocal> locks) { + ArrayList list = locks.get(); + if (list == null) { + list = new ArrayList<>(); + locks.set(list); } + list.add(getName()); } - private void traceLock(Session session, boolean exclusive, String s) { + private void traceLock(SessionLocal session, int lockType, TraceLockEvent eventEnum, String extraInfo) { if (traceLock.isDebugEnabled()) { - traceLock.debug("{0} {1} {2} {3}", session.getId(), - exclusive ? "exclusive write lock" : "shared read lock", s, - getName()); + traceLock.debug("{0} {1} {2} {3} {4}", session.getId(), + lockTypeToString(lockType), eventEnum.getEventText(), + getName(), extraInfo); } } @Override - public boolean isLockedExclusively() { - return lockExclusiveSession != null; - } - - @Override - public boolean isLockedExclusivelyBy(Session session) { - return lockExclusiveSession == session; - } - - @Override - public void unlock(Session s) { + public void unlock(SessionLocal s) { if (database != null) { - traceLock(s, lockExclusiveSession == s, "unlock"); + int lockType; if (lockExclusiveSession == s) { + lockType = Table.EXCLUSIVE_LOCK; + lockSharedSessions.remove(s); lockExclusiveSession = null; - } - synchronized (getLockSyncObject()) { - if (lockSharedSessions.size() > 0) { - lockSharedSessions.remove(s); + if (SysProperties.THREAD_DEADLOCK_DETECTOR) { + ArrayList exclusiveLocks = EXCLUSIVE_LOCKS.get(); + if (exclusiveLocks != null) { + exclusiveLocks.remove(getName()); + } } - if (!waitingSessions.isEmpty()) { - getLockSyncObject().notifyAll(); + } else { + lockType = lockSharedSessions.remove(s) != null ? Table.WRITE_LOCK : Table.READ_LOCK; + if (SysProperties.THREAD_DEADLOCK_DETECTOR) { + ArrayList sharedLocks = SHARED_LOCKS.get(); + if (sharedLocks != null) { + sharedLocks.remove(getName()); + } } } - } - } - - @Override - public boolean canTruncate() { - if (getCheckForeignKeyConstraints() && - database.getReferentialIntegrity()) { - ArrayList constraints = getConstraints(); - if (constraints != null) { - for (int i = 0, size = constraints.size(); i < size; i++) { - Constraint c = constraints.get(i); - if (!(c.getConstraintType().equals(Constraint.REFERENTIAL))) { - continue; - } - ConstraintReferential ref = (ConstraintReferential) c; - if (ref.getRefTable() == this) { - return false; - } + traceLock(s, lockType, TraceLockEvent.TRACE_LOCK_UNLOCK, NO_EXTRA_INFO); + if (lockType != Table.READ_LOCK && !waitingSessions.isEmpty()) { + synchronized (this) { + notifyAll(); } } } - return true; } @Override - public void close(Session session) { + public void close(SessionLocal session) { // ignore } @Override - public Row getRow(Session session, long key) { + public Row getRow(SessionLocal session, long key) { return primaryIndex.getRow(session, key); } @Override - public Index addIndex(Session session, String indexName, int indexId, - IndexColumn[] cols, IndexType indexType, boolean create, - String indexComment) { - if (indexType.isPrimaryKey()) { - for (IndexColumn c : cols) { - Column column = c.column; - if (column.isNullable()) { - throw DbException.get( - ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, - column.getName()); - } - column.setPrimaryKey(true); - } - } + public Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment) { + cols = prepareColumns(database, cols, indexType); boolean isSessionTemporary = isTemporary() && !isGlobalTemporary(); if (!isSessionTemporary) { database.lockMeta(session); } - MVIndex index; - int mainIndexColumn; - mainIndexColumn = getMainIndexColumn(indexType, cols); + MVIndex index; + int mainIndexColumn = primaryIndex.getMainIndexColumn() != SearchRow.ROWID_INDEX + ? SearchRow.ROWID_INDEX : getMainIndexColumn(indexType, cols); if (database.isStarting()) { - if (store.store.hasMap("index." + indexId)) { - mainIndexColumn = -1; + // if index does exists as a separate map it can't be a delegate + if (transactionStore.hasMap("index." + indexId)) { + // we can not reuse primary index + mainIndexColumn = SearchRow.ROWID_INDEX; } } else if (primaryIndex.getRowCountMax() != 0) { - mainIndexColumn = -1; + mainIndexColumn = SearchRow.ROWID_INDEX; } - if (mainIndexColumn != -1) { + + if (mainIndexColumn != SearchRow.ROWID_INDEX) { primaryIndex.setMainIndexColumn(mainIndexColumn); index = new MVDelegateIndex(this, indexId, indexName, primaryIndex, indexType); } else if (indexType.isSpatial()) { index = new MVSpatialIndex(session.getDatabase(), this, indexId, - indexName, cols, indexType); + indexName, cols, uniqueColumnCount, indexType); } else { index = new MVSecondaryIndex(session.getDatabase(), this, indexId, - indexName, cols, indexType); + indexName, cols, uniqueColumnCount, indexType); } if (index.needRebuild()) { rebuildIndex(session, index, indexName); @@ -454,10 +380,9 @@ public Index addIndex(Session session, String indexName, int indexId, return index; } - private void rebuildIndex(Session session, MVIndex index, String indexName) { + private void rebuildIndex(SessionLocal session, MVIndex index, String indexName) { try { - if (session.getDatabase().getMvStore() == null || - index instanceof MVSpatialIndex) { + if (!session.getDatabase().isPersistent() || index instanceof MVSpatialIndex) { // in-memory rebuildIndexBuffered(session, index); } else { @@ -478,11 +403,7 @@ private void rebuildIndex(Session session, MVIndex index, String indexName) { } } - private void rebuildIndexBlockMerge(Session session, MVIndex index) { - if (index instanceof MVSpatialIndex) { - // the spatial index doesn't support multi-way merge sort - rebuildIndexBuffered(session, index); - } + private void rebuildIndexBlockMerge(SessionLocal session, MVIndex index) { // Read entries in memory, sort them, write to a new map (in sorted // order); repeat (using a new map for every block of 1 MB) until all // record are read. Merge all maps to the target (using merge sort; @@ -495,18 +416,16 @@ private void rebuildIndexBlockMerge(Session session, MVIndex index) { long total = remaining; Cursor cursor = scan.find(session, null, null); long i = 0; - Store store = session.getDatabase().getMvStore(); + Store store = session.getDatabase().getStore(); int bufferSize = database.getMaxMemoryRows() / 2; - ArrayList buffer = New.arrayList(bufferSize); - String n = getName() + ":" + index.getName(); - int t = MathUtils.convertLongToInt(total); - ArrayList bufferNames = New.arrayList(); + ArrayList buffer = new ArrayList<>(bufferSize); + String n = getName() + ':' + index.getName(); + ArrayList bufferNames = Utils.newSmallArrayList(); while (cursor.next()) { Row row = cursor.get(); buffer.add(row); - database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n, - MathUtils.convertLongToInt(i++), t); + database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n, i++, total); if (buffer.size() >= bufferSize) { sortRows(buffer, index); String mapName = store.nextTemporaryMapName(); @@ -517,7 +436,7 @@ private void rebuildIndexBlockMerge(Session session, MVIndex index) { remaining--; } sortRows(buffer, index); - if (bufferNames.size() > 0) { + if (!bufferNames.isEmpty()) { String mapName = store.nextTemporaryMapName(); index.addRowsToBuffer(buffer, mapName); bufferNames.add(mapName); @@ -526,84 +445,39 @@ private void rebuildIndexBlockMerge(Session session, MVIndex index) { } else { addRowsToIndex(session, buffer, index); } - if (SysProperties.CHECK && remaining != 0) { - DbException.throwInternalError("rowcount remaining=" + remaining + - " " + getName()); + if (remaining != 0) { + throw DbException.getInternalError("rowcount remaining=" + remaining + ' ' + getName()); } } - private void rebuildIndexBuffered(Session session, Index index) { + private void rebuildIndexBuffered(SessionLocal session, Index index) { Index scan = getScanIndex(session); long remaining = scan.getRowCount(session); long total = remaining; Cursor cursor = scan.find(session, null, null); long i = 0; int bufferSize = (int) Math.min(total, database.getMaxMemoryRows()); - ArrayList buffer = New.arrayList(bufferSize); - String n = getName() + ":" + index.getName(); - int t = MathUtils.convertLongToInt(total); + ArrayList buffer = new ArrayList<>(bufferSize); + String n = getName() + ':' + index.getName(); while (cursor.next()) { Row row = cursor.get(); buffer.add(row); - database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n, - MathUtils.convertLongToInt(i++), t); + database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n, i++, total); if (buffer.size() >= bufferSize) { addRowsToIndex(session, buffer, index); } remaining--; } addRowsToIndex(session, buffer, index); - if (SysProperties.CHECK && remaining != 0) { - DbException.throwInternalError("rowcount remaining=" + remaining + - " " + getName()); - } - } - - private int getMainIndexColumn(IndexType indexType, IndexColumn[] cols) { - if (primaryIndex.getMainIndexColumn() != -1) { - return -1; - } - if (!indexType.isPrimaryKey() || cols.length != 1) { - return -1; - } - IndexColumn first = cols[0]; - if (first.sortType != SortOrder.ASCENDING) { - return -1; - } - switch (first.column.getType()) { - case Value.BYTE: - case Value.SHORT: - case Value.INT: - case Value.LONG: - break; - default: - return -1; - } - return first.column.getColumnId(); - } - - private static void addRowsToIndex(Session session, ArrayList list, - Index index) { - sortRows(list, index); - for (Row row : list) { - index.add(session, row); + if (remaining != 0) { + throw DbException.getInternalError("rowcount remaining=" + remaining + ' ' + getName()); } - list.clear(); - } - - private static void sortRows(ArrayList list, final Index index) { - Collections.sort(list, new Comparator() { - @Override - public int compare(Row r1, Row r2) { - return index.compareRows(r1, r2); - } - }); } @Override - public void removeRow(Session session, Row row) { - lastModificationId = database.getNextModificationDataId(); - Transaction t = getTransaction(session); + public void removeRow(SessionLocal session, Row row) { + syncLastModificationIdWithDatabase(); + Transaction t = session.getTransaction(); long savepoint = t.setSavepoint(); try { for (int i = indexes.size() - 1; i >= 0; i--) { @@ -611,84 +485,94 @@ public void removeRow(Session session, Row row) { index.remove(session, row); } } catch (Throwable e) { - t.rollbackToSavepoint(savepoint); + try { + t.rollbackToSavepoint(savepoint); + } catch (Throwable nested) { + e.addSuppressed(nested); + } throw DbException.convert(e); } analyzeIfRequired(session); } @Override - public void truncate(Session session) { - lastModificationId = database.getNextModificationDataId(); + public long truncate(SessionLocal session) { + syncLastModificationIdWithDatabase(); + long result = getRowCountApproximation(session); for (int i = indexes.size() - 1; i >= 0; i--) { Index index = indexes.get(i); index.truncate(session); } - changesSinceAnalyze = 0; + if (changesUntilAnalyze != null) { + changesUntilAnalyze.set(nextAnalyze); + } + return result; } @Override - public void addRow(Session session, Row row) { - lastModificationId = database.getNextModificationDataId(); - Transaction t = getTransaction(session); + public void addRow(SessionLocal session, Row row) { + syncLastModificationIdWithDatabase(); + Transaction t = session.getTransaction(); long savepoint = t.setSavepoint(); try { - for (int i = 0, size = indexes.size(); i < size; i++) { - Index index = indexes.get(i); + for (Index index : indexes) { index.add(session, row); } } catch (Throwable e) { - t.rollbackToSavepoint(savepoint); - DbException de = DbException.convert(e); - if (de.getErrorCode() == ErrorCode.DUPLICATE_KEY_1) { - for (int j = 0; j < indexes.size(); j++) { - Index index = indexes.get(j); - if (index.getIndexType().isUnique() && - index instanceof MultiVersionIndex) { - MultiVersionIndex mv = (MultiVersionIndex) index; - if (mv.isUncommittedFromOtherSession(session, row)) { - throw DbException.get( - ErrorCode.CONCURRENT_UPDATE_1, - index.getName()); - } - } - } + try { + t.rollbackToSavepoint(savepoint); + } catch (Throwable nested) { + e.addSuppressed(nested); } - throw de; + throw DbException.convert(e); } analyzeIfRequired(session); } - private void analyzeIfRequired(Session session) { - if (nextAnalyze == 0 || nextAnalyze > changesSinceAnalyze++) { - return; - } - changesSinceAnalyze = 0; - int n = 2 * nextAnalyze; - if (n > 0) { - nextAnalyze = n; - } - int rows = session.getDatabase().getSettings().analyzeSample / 10; - Analyze.analyzeTable(session, this, rows, false); - } - @Override - public void checkSupportAlter() { - // ok + public void updateRow(SessionLocal session, Row oldRow, Row newRow) { + newRow.setKey(oldRow.getKey()); + syncLastModificationIdWithDatabase(); + Transaction t = session.getTransaction(); + long savepoint = t.setSavepoint(); + try { + for (Index index : indexes) { + index.update(session, oldRow, newRow); + } + } catch (Throwable e) { + try { + t.rollbackToSavepoint(savepoint); + } catch (Throwable nested) { + e.addSuppressed(nested); + } + throw DbException.convert(e); + } + analyzeIfRequired(session); } @Override - public String getTableType() { - return Table.TABLE; + public Row lockRow(SessionLocal session, Row row) { + Row lockedRow = primaryIndex.lockRow(session, row); + if (lockedRow == null || !row.hasSharedData(lockedRow)) { + syncLastModificationIdWithDatabase(); + } + return lockedRow; } - @Override - public Index getScanIndex(Session session) { - return primaryIndex; + private void analyzeIfRequired(SessionLocal session) { + if (changesUntilAnalyze != null) { + if (changesUntilAnalyze.decrementAndGet() == 0) { + if (nextAnalyze <= Integer.MAX_VALUE / 2) { + nextAnalyze *= 2; + } + changesUntilAnalyze.set(nextAnalyze); + session.markTableForAnalyze(this); + } + } } @Override - public Index getUniqueIndex() { + public Index getScanIndex(SessionLocal session) { return primaryIndex; } @@ -699,72 +583,43 @@ public ArrayList getIndexes() { @Override public long getMaxDataModificationId() { - return lastModificationId; - } - - public boolean getContainsLargeObject() { - return containsLargeObject; + return lastModificationId.get(); } @Override - public boolean isDeterministic() { - return true; - } - - @Override - public boolean canGetRowCount() { - return true; - } - - @Override - public boolean canDrop() { - return true; - } - - @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { if (containsLargeObject) { // unfortunately, the data is gone on rollback truncate(session); database.getLobStorage().removeAllForTable(getId()); database.lockMeta(session); } - database.getMvStore().removeTable(this); + database.getStore().removeTable(this); super.removeChildrenAndResources(session); - // go backwards because database.removeIndex will - // call table.removeIndex + // remove scan index (at position 0 on the list) last while (indexes.size() > 1) { Index index = indexes.get(1); + index.remove(session); if (index.getName() != null) { database.removeSchemaObject(session, index); } // needed for session temporary indexes indexes.remove(index); } - if (SysProperties.CHECK) { - for (SchemaObject obj : database - .getAllSchemaObjects(DbObject.INDEX)) { - Index index = (Index) obj; - if (index.getTable() == this) { - DbException.throwInternalError("index not dropped: " + - index.getName()); - } - } - } primaryIndex.remove(session); - database.removeMeta(session, getId()); + indexes.clear(); close(session); invalidate(); } @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return primaryIndex.getRowCount(session); } @Override - public long getRowCountApproximation() { - return primaryIndex.getRowCountApproximation(); + public long getRowCountApproximation(SessionLocal session) { + return primaryIndex.getRowCountApproximation(session); } @Override @@ -772,52 +627,320 @@ public long getDiskSpaceUsed() { return primaryIndex.getDiskSpaceUsed(); } + /** + * Get a new transaction. + * + * @return the transaction + */ + Transaction getTransactionBegin() { + // TODO need to commit/rollback the transaction + return transactionStore.begin(); + } + @Override - public void checkRename() { - // ok + public boolean isRowLockable() { + return true; + } + + /** + * Mark the transaction as committed, so that the modification counter of + * the database is incremented. + */ + public void commit() { + if (database != null) { + syncLastModificationIdWithDatabase(); + } + } + + // Field lastModificationId can not be just a volatile, because window of opportunity + // between reading database's modification id and storing this value in the field + // could be exploited by another thread. + // Second thread may do the same with possibly bigger (already advanced) + // modification id, and when first thread finally updates the field, it will + // result in lastModificationId jumping back. + // This is, of course, unacceptable. + private void syncLastModificationIdWithDatabase() { + long nextModificationDataId = database.getNextModificationDataId(); + long currentId; + do { + currentId = lastModificationId.get(); + } while (nextModificationDataId > currentId && + !lastModificationId.compareAndSet(currentId, nextModificationDataId)); } /** - * Get the transaction to use for this session. + * Convert the MVStoreException to a database exception. * - * @param session the session - * @return the transaction + * @param e the illegal state exception + * @return the database exception */ - Transaction getTransaction(Session session) { - if (session == null) { - // TODO need to commit/rollback the transaction - return store.begin(); + DbException convertException(MVStoreException e) { + int errorCode = e.getErrorCode(); + if (errorCode == DataUtils.ERROR_TRANSACTION_LOCKED) { + throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, + e, getName()); } - return session.getTransaction(); + if (errorCode == DataUtils.ERROR_TRANSACTIONS_DEADLOCK) { + throw DbException.get(ErrorCode.DEADLOCK_1, + e, getName()); + } + return store.convertMVStoreException(e); + } + + @Override + public int getMainIndexColumn() { + return primaryIndex.getMainIndexColumn(); + } + + + /** + * Appends the specified rows to the specified index. + * + * @param session + * the session + * @param list + * the rows, list is cleared on completion + * @param index + * the index to append to + */ + private static void addRowsToIndex(SessionLocal session, ArrayList list, Index index) { + sortRows(list, index); + for (Row row : list) { + index.add(session, row); + } + list.clear(); + } + + /** + * Formats details of a deadlock. + * + * @param sessions + * the list of sessions + * @param lockType + * the type of lock + * @return formatted details of a deadlock + */ + private static String getDeadlockDetails(ArrayList sessions, int lockType) { + // We add the thread details here to make it easier for customers to + // match up these error messages with their own logs. + StringBuilder builder = new StringBuilder(); + for (SessionLocal s : sessions) { + Table lock = s.getWaitForLock(); + Thread thread = s.getWaitForLockThread(); + builder.append("\nSession ").append(s).append(" on thread ").append(thread.getName()) + .append(" is waiting to lock ").append(lock.toString()) + .append(" (").append(lockTypeToString(lockType)).append(") while locking "); + boolean addComma = false; + for (Table t : s.getLocks()) { + if (addComma) { + builder.append(", "); + } + addComma = true; + builder.append(t.toString()); + if (t instanceof MVTable) { + if (((MVTable) t).lockExclusiveSession == s) { + builder.append(" (exclusive)"); + } else { + builder.append(" (shared)"); + } + } + } + builder.append('.'); + } + return builder.toString(); + } + + private static String lockTypeToString(int lockType) { + return lockType == Table.READ_LOCK ? "shared read" + : lockType == Table.WRITE_LOCK ? "shared write" : "exclusive"; + } + + /** + * Sorts the specified list of rows for a specified index. + * + * @param list + * the list of rows + * @param index + * the index to sort for + */ + private static void sortRows(ArrayList list, final Index index) { + list.sort(index::compareRows); + } + + @Override + public boolean canDrop() { + return true; + } + + @Override + public boolean canGetRowCount(SessionLocal session) { + return true; + } + + @Override + public boolean canTruncate() { + if (getCheckForeignKeyConstraints() && database.getReferentialIntegrity()) { + ArrayList constraints = getConstraints(); + if (constraints != null) { + for (Constraint c : constraints) { + if (c.getConstraintType() != Constraint.Type.REFERENTIAL) { + continue; + } + ConstraintReferential ref = (ConstraintReferential) c; + if (ref.getRefTable() == this) { + return false; + } + } + } + } + return true; + } + + @Override + public ArrayList checkDeadlock(SessionLocal session, SessionLocal clash, Set visited) { + // only one deadlock check at any given time + synchronized (getClass()) { + if (clash == null) { + // verification is started + clash = session; + visited = new HashSet<>(); + } else if (clash == session) { + // we found a cycle where this session is involved + return new ArrayList<>(0); + } else if (visited.contains(session)) { + // we have already checked this session. + // there is a cycle, but the sessions in the cycle need to + // find it out themselves + return null; + } + visited.add(session); + ArrayList error = null; + for (SessionLocal s : lockSharedSessions.keySet()) { + if (s == session) { + // it doesn't matter if we have locked the object already + continue; + } + Table t = s.getWaitForLock(); + if (t != null) { + error = t.checkDeadlock(s, clash, visited); + if (error != null) { + error.add(session); + break; + } + } + } + // take a local copy so we don't see inconsistent data, since we are + // not locked while checking the lockExclusiveSession value + SessionLocal copyOfLockExclusiveSession = lockExclusiveSession; + if (error == null && copyOfLockExclusiveSession != null) { + Table t = copyOfLockExclusiveSession.getWaitForLock(); + if (t != null) { + error = t.checkDeadlock(copyOfLockExclusiveSession, clash, visited); + if (error != null) { + error.add(session); + } + } + } + return error; + } + } + + @Override + public void checkSupportAlter() { + // ok + } + + public boolean getContainsLargeObject() { + return containsLargeObject; } @Override public Column getRowIdColumn() { if (rowIdColumn == null) { - rowIdColumn = new Column(Column.ROWID, Value.LONG); - rowIdColumn.setTable(this, -1); + rowIdColumn = new Column(Column.ROWID, TypeInfo.TYPE_BIGINT, this, SearchRow.ROWID_INDEX); + rowIdColumn.setRowId(true); + rowIdColumn.setNullable(false); } return rowIdColumn; } @Override - public String toString() { - return getSQL(); + public TableType getTableType() { + return TableType.TABLE; } @Override - public boolean isMVStore() { + public boolean isDeterministic() { return true; } + @Override + public boolean isLockedExclusively() { + return lockExclusiveSession != null; + } + + @Override + public boolean isLockedExclusivelyBy(SessionLocal session) { + return lockExclusiveSession == session; + } + + @Override + protected void invalidate() { + super.invalidate(); + /* + * Query cache of a some sleeping session can have references to + * invalidated tables. When this table was dropped by another session, + * the field below still points to it and prevents its garbage + * collection, so this field needs to be cleared to prevent a memory + * leak. + */ + lockExclusiveSession = null; + } + + @Override + public String toString() { + return getTraceSQL(); + } + /** - * Mark the transaction as committed, so that the modification counter of - * the database is incremented. + * Prepares columns of an index. + * + * @param database the database + * @param cols the index columns + * @param indexType the type of an index + * @return the prepared columns with flags set */ - public void commit() { - if (database != null) { - lastModificationId = database.getNextModificationDataId(); + private static IndexColumn[] prepareColumns(Database database, IndexColumn[] cols, IndexType indexType) { + if (indexType.isPrimaryKey()) { + for (IndexColumn c : cols) { + Column column = c.column; + if (column.isNullable()) { + throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, column.getName()); + } + } + for (IndexColumn c : cols) { + c.column.setPrimaryKey(true); + } + } else if (!indexType.isSpatial()) { + int i = 0, l = cols.length; + while (i < l && (cols[i].sortType & (SortOrder.NULLS_FIRST | SortOrder.NULLS_LAST)) != 0) { + i++; + } + if (i != l) { + cols = cols.clone(); + DefaultNullOrdering defaultNullOrdering = database.getDefaultNullOrdering(); + for (; i < l; i++) { + IndexColumn oldColumn = cols[i]; + int sortTypeOld = oldColumn.sortType; + int sortTypeNew = defaultNullOrdering.addExplicitNullOrdering(sortTypeOld); + if (sortTypeNew != sortTypeOld) { + IndexColumn newColumn = new IndexColumn(oldColumn.columnName, sortTypeNew); + newColumn.column = oldColumn.column; + cols[i] = newColumn; + } + } + } } + return cols; } - } diff --git a/h2/src/main/org/h2/mvstore/db/MVTableEngine.java b/h2/src/main/org/h2/mvstore/db/MVTableEngine.java deleted file mode 100644 index 9e483e323c..0000000000 --- a/h2/src/main/org/h2/mvstore/db/MVTableEngine.java +++ /dev/null @@ -1,440 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.mvstore.db; - -import java.io.InputStream; -import java.lang.Thread.UncaughtExceptionHandler; -import java.nio.channels.FileChannel; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import org.h2.api.ErrorCode; -import org.h2.api.TableEngine; -import org.h2.command.ddl.CreateTableData; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.message.DbException; -import org.h2.mvstore.DataUtils; -import org.h2.mvstore.FileStore; -import org.h2.mvstore.MVMap; -import org.h2.mvstore.MVStore; -import org.h2.mvstore.MVStoreTool; -import org.h2.mvstore.db.TransactionStore.Transaction; -import org.h2.mvstore.db.TransactionStore.TransactionMap; -import org.h2.store.InDoubtTransaction; -import org.h2.store.fs.FileChannelInputStream; -import org.h2.store.fs.FileUtils; -import org.h2.table.TableBase; -import org.h2.util.BitField; -import org.h2.util.New; - -/** - * A table engine that internally uses the MVStore. - */ -public class MVTableEngine implements TableEngine { - - /** - * Initialize the MVStore. - * - * @param db the database - * @return the store - */ - public static Store init(final Database db) { - Store store = db.getMvStore(); - if (store != null) { - return store; - } - byte[] key = db.getFileEncryptionKey(); - String dbPath = db.getDatabasePath(); - MVStore.Builder builder = new MVStore.Builder(); - if (dbPath == null) { - store = new Store(db, builder); - } else { - String fileName = dbPath + Constants.SUFFIX_MV_FILE; - MVStoreTool.compactCleanUp(fileName); - builder.fileName(fileName); - builder.pageSplitSize(db.getPageSize()); - if (db.isReadOnly()) { - builder.readOnly(); - } else { - // possibly create the directory - boolean exists = FileUtils.exists(fileName); - if (exists && !FileUtils.canWrite(fileName)) { - // read only - } else { - String dir = FileUtils.getParent(fileName); - FileUtils.createDirectories(dir); - } - } - if (key != null) { - char[] password = new char[key.length / 2]; - for (int i = 0; i < password.length; i++) { - password[i] = (char) (((key[i + i] & 255) << 16) | - ((key[i + i + 1]) & 255)); - } - builder.encryptionKey(password); - } - if (db.getSettings().compressData) { - builder.compress(); - // use a larger page split size to improve the compression ratio - builder.pageSplitSize(64 * 1024); - } - builder.backgroundExceptionHandler(new UncaughtExceptionHandler() { - - @Override - public void uncaughtException(Thread t, Throwable e) { - db.setBackgroundException(DbException.convert(e)); - } - - }); - try { - store = new Store(db, builder); - } catch (IllegalStateException e) { - int errorCode = DataUtils.getErrorCode(e.getMessage()); - if (errorCode == DataUtils.ERROR_FILE_CORRUPT) { - if (key != null) { - throw DbException.get( - ErrorCode.FILE_ENCRYPTION_ERROR_1, - e, fileName); - } - } else if (errorCode == DataUtils.ERROR_FILE_LOCKED) { - throw DbException.get( - ErrorCode.DATABASE_ALREADY_OPEN_1, - e, fileName); - } else if (errorCode == DataUtils.ERROR_READING_FAILED) { - throw DbException.get( - ErrorCode.IO_EXCEPTION_1, - e, fileName); - } - throw DbException.get( - ErrorCode.FILE_CORRUPTED_1, - e, fileName); - } - } - db.setMvStore(store); - return store; - } - - @Override - public TableBase createTable(CreateTableData data) { - Database db = data.session.getDatabase(); - Store store = init(db); - MVTable table = new MVTable(data, store); - table.init(data.session); - store.tableMap.put(table.getMapName(), table); - return table; - } - - /** - * A store with open tables. - */ - public static class Store { - - /** - * The map of open tables. - * Key: the map name, value: the table. - */ - final ConcurrentHashMap tableMap = - new ConcurrentHashMap(); - - /** - * The store. - */ - private final MVStore store; - - /** - * The transaction store. - */ - private final TransactionStore transactionStore; - - private long statisticsStart; - - private int temporaryMapId; - - public Store(Database db, MVStore.Builder builder) { - this.store = builder.open(); - if (!db.getSettings().reuseSpace) { - store.setReuseSpace(false); - } - this.transactionStore = new TransactionStore( - store, - new ValueDataType(null, db, null)); - transactionStore.init(); - } - - public MVStore getStore() { - return store; - } - - public TransactionStore getTransactionStore() { - return transactionStore; - } - - public HashMap getTables() { - return new HashMap(tableMap); - } - - /** - * Remove a table. - * - * @param table the table - */ - public void removeTable(MVTable table) { - tableMap.remove(table.getMapName()); - } - - /** - * Store all pending changes. - */ - public void flush() { - FileStore s = store.getFileStore(); - if (s == null || s.isReadOnly()) { - return; - } - if (!store.compact(50, 4 * 1024 * 1024)) { - store.commit(); - } - } - - /** - * Close the store, without persisting changes. - */ - public void closeImmediately() { - if (store.isClosed()) { - return; - } - store.closeImmediately(); - } - - /** - * Commit all transactions that are in the committing state, and - * rollback all open transactions. - */ - public void initTransactions() { - List list = transactionStore.getOpenTransactions(); - for (Transaction t : list) { - if (t.getStatus() == Transaction.STATUS_COMMITTING) { - t.commit(); - } else if (t.getStatus() != Transaction.STATUS_PREPARED) { - t.rollback(); - } - } - } - - /** - * Remove all temporary maps. - * - * @param objectIds the ids of the objects to keep - */ - public void removeTemporaryMaps(BitField objectIds) { - for (String mapName : store.getMapNames()) { - if (mapName.startsWith("temp.")) { - MVMap map = store.openMap(mapName); - store.removeMap(map); - } else if (mapName.startsWith("table.") || mapName.startsWith("index.")) { - int id = Integer.parseInt(mapName.substring(1 + mapName.indexOf("."))); - if (!objectIds.get(id)) { - ValueDataType keyType = new ValueDataType(null, null, null); - ValueDataType valueType = new ValueDataType(null, null, null); - Transaction t = transactionStore.begin(); - TransactionMap m = t.openMap(mapName, keyType, valueType); - transactionStore.removeMap(m); - t.commit(); - } - } - } - } - - /** - * Get the name of the next available temporary map. - * - * @return the map name - */ - public synchronized String nextTemporaryMapName() { - return "temp." + temporaryMapId++; - } - - /** - * Prepare a transaction. - * - * @param session the session - * @param transactionName the transaction name (may be null) - */ - public void prepareCommit(Session session, String transactionName) { - Transaction t = session.getTransaction(); - t.setName(transactionName); - t.prepare(); - store.commit(); - } - - public ArrayList getInDoubtTransactions() { - List list = transactionStore.getOpenTransactions(); - ArrayList result = New.arrayList(); - for (Transaction t : list) { - if (t.getStatus() == Transaction.STATUS_PREPARED) { - result.add(new MVInDoubtTransaction(store, t)); - } - } - return result; - } - - /** - * Set the maximum memory to be used by the cache. - * - * @param kb the maximum size in KB - */ - public void setCacheSize(int kb) { - store.setCacheSize(Math.max(1, kb / 1024)); - } - - public InputStream getInputStream() { - FileChannel fc = store.getFileStore().getEncryptedFile(); - if (fc == null) { - fc = store.getFileStore().getFile(); - } - return new FileChannelInputStream(fc, false); - } - - /** - * Force the changes to disk. - */ - public void sync() { - flush(); - store.sync(); - } - - /** - * Compact the database file, that is, compact blocks that have a low - * fill rate, and move chunks next to each other. This will typically - * shrink the database file. Changes are flushed to the file, and old - * chunks are overwritten. - * - * @param maxCompactTime the maximum time in milliseconds to compact - */ - public void compactFile(long maxCompactTime) { - store.setRetentionTime(0); - long start = System.currentTimeMillis(); - while (store.compact(95, 16 * 1024 * 1024)) { - store.sync(); - store.compactMoveChunks(95, 16 * 1024 * 1024); - long time = System.currentTimeMillis() - start; - if (time > maxCompactTime) { - break; - } - } - } - - /** - * Close the store. Pending changes are persisted. Chunks with a low - * fill rate are compacted, but old chunks are kept for some time, so - * most likely the database file will not shrink. - * - * @param maxCompactTime the maximum time in milliseconds to compact - */ - public void close(long maxCompactTime) { - try { - if (!store.isClosed() && store.getFileStore() != null) { - boolean compactFully = false; - if (!store.getFileStore().isReadOnly()) { - transactionStore.close(); - if (maxCompactTime == Long.MAX_VALUE) { - compactFully = true; - } - } - String fileName = store.getFileStore().getFileName(); - store.close(); - if (compactFully && FileUtils.exists(fileName)) { - // the file could have been deleted concurrently, - // so only compact if the file still exists - MVStoreTool.compact(fileName, true); - } - } - } catch (IllegalStateException e) { - int errorCode = DataUtils.getErrorCode(e.getMessage()); - if (errorCode == DataUtils.ERROR_WRITING_FAILED) { - // disk full - ok - } else if (errorCode == DataUtils.ERROR_FILE_CORRUPT) { - // wrong encryption key - ok - } - store.closeImmediately(); - throw DbException.get(ErrorCode.IO_EXCEPTION_1, e, "Closing"); - } - } - - /** - * Start collecting statistics. - */ - public void statisticsStart() { - FileStore fs = store.getFileStore(); - statisticsStart = fs == null ? 0 : fs.getReadCount(); - } - - /** - * Stop collecting statistics. - * - * @return the statistics - */ - public Map statisticsEnd() { - HashMap map = New.hashMap(); - FileStore fs = store.getFileStore(); - int reads = fs == null ? 0 : (int) (fs.getReadCount() - statisticsStart); - map.put("reads", reads); - return map; - } - - } - - /** - * An in-doubt transaction. - */ - private static class MVInDoubtTransaction implements InDoubtTransaction { - - private final MVStore store; - private final Transaction transaction; - private int state = InDoubtTransaction.IN_DOUBT; - - MVInDoubtTransaction(MVStore store, Transaction transaction) { - this.store = store; - this.transaction = transaction; - } - - @Override - public void setState(int state) { - if (state == InDoubtTransaction.COMMIT) { - transaction.commit(); - } else { - transaction.rollback(); - } - store.commit(); - this.state = state; - } - - @Override - public String getState() { - switch(state) { - case IN_DOUBT: - return "IN_DOUBT"; - case COMMIT: - return "COMMIT"; - case ROLLBACK: - return "ROLLBACK"; - default: - throw DbException.throwInternalError("state="+state); - } - } - - @Override - public String getTransactionName() { - return transaction.getName(); - } - - } - -} diff --git a/h2/src/main/org/h2/mvstore/db/MVTempResult.java b/h2/src/main/org/h2/mvstore/db/MVTempResult.java new file mode 100644 index 0000000000..97779cba55 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/MVTempResult.java @@ -0,0 +1,230 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.io.IOException; +import java.lang.ref.Reference; +import java.util.Collection; + +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStore.Builder; +import org.h2.result.ResultExternal; +import org.h2.result.SortOrder; +import org.h2.store.fs.FileUtils; +import org.h2.util.TempFileDeleter; +import org.h2.value.Value; + +/** + * Temporary result. + * + *

    + * A separate MVStore in a temporary file is used for each result. The file is + * removed when this result and all its copies are closed. + * {@link TempFileDeleter} is also used to delete this file if results are not + * closed properly. + *

    + */ +public abstract class MVTempResult implements ResultExternal { + + private static final class CloseImpl implements AutoCloseable { + /** + * MVStore. + */ + private final MVStore store; + + /** + * File name. + */ + private final String fileName; + + CloseImpl(MVStore store, String fileName) { + this.store = store; + this.fileName = fileName; + } + + @Override + public void close() throws Exception { + store.closeImmediately(); + FileUtils.tryDelete(fileName); + } + + } + + /** + * Creates MVStore-based temporary result. + * + * @param database + * database + * @param expressions + * expressions + * @param distinct + * is output distinct + * @param distinctIndexes + * indexes of distinct columns for DISTINCT ON results + * @param visibleColumnCount + * count of visible columns + * @param resultColumnCount + * the number of columns including visible columns and additional + * virtual columns for ORDER BY and DISTINCT ON clauses + * @param sort + * sort order, or {@code null} + * @return temporary result + */ + public static ResultExternal of(Database database, Expression[] expressions, boolean distinct, + int[] distinctIndexes, int visibleColumnCount, int resultColumnCount, SortOrder sort) { + return distinct || distinctIndexes != null || sort != null + ? new MVSortedTempResult(database, expressions, distinct, distinctIndexes, visibleColumnCount, + resultColumnCount, sort) + : new MVPlainTempResult(database, expressions, visibleColumnCount, resultColumnCount); + } + + private final Database database; + + /** + * MVStore. + */ + final MVStore store; + + /** + * Column expressions. + */ + final Expression[] expressions; + + /** + * Count of visible columns. + */ + final int visibleColumnCount; + + /** + * Total count of columns. + */ + final int resultColumnCount; + + /** + * Count of rows. Used only in a root results, copies always have 0 value. + */ + int rowCount; + + /** + * Parent store for copies. If {@code null} this result is a root result. + */ + final MVTempResult parent; + + /** + * Count of child results. + */ + int childCount; + + /** + * Whether this result is closed. + */ + boolean closed; + + /** + * Temporary file deleter. + */ + private final TempFileDeleter tempFileDeleter; + + /** + * Closeable to close the storage. + */ + private final CloseImpl closeable; + + /** + * Reference to the record in the temporary file deleter. + */ + private final Reference fileRef; + + /** + * Creates a shallow copy of the result. + * + * @param parent + * parent result + */ + MVTempResult(MVTempResult parent) { + this.parent = parent; + this.database = parent.database; + this.store = parent.store; + this.expressions = parent.expressions; + this.visibleColumnCount = parent.visibleColumnCount; + this.resultColumnCount = parent.resultColumnCount; + this.tempFileDeleter = null; + this.closeable = null; + this.fileRef = null; + } + + /** + * Creates a new temporary result. + * + * @param database + * database + * @param expressions + * column expressions + * @param visibleColumnCount + * count of visible columns + * @param resultColumnCount + * total count of columns + */ + MVTempResult(Database database, Expression[] expressions, int visibleColumnCount, int resultColumnCount) { + this.database = database; + try { + String fileName = FileUtils.createTempFile("h2tmp", Constants.SUFFIX_TEMP_FILE, true); + Builder builder = new MVStore.Builder().fileName(fileName).cacheSize(0).autoCommitDisabled(); + byte[] key = database.getFileEncryptionKey(); + if (key != null) { + builder.encryptionKey(Store.decodePassword(key)); + } + store = builder.open(); + this.expressions = expressions; + this.visibleColumnCount = visibleColumnCount; + this.resultColumnCount = resultColumnCount; + tempFileDeleter = database.getTempFileDeleter(); + closeable = new CloseImpl(store, fileName); + fileRef = tempFileDeleter.addFile(closeable, this); + } catch (IOException e) { + throw DbException.convert(e); + } + parent = null; + } + + @Override + public int addRows(Collection rows) { + for (Value[] row : rows) { + addRow(row); + } + return rowCount; + } + + @Override + public synchronized void close() { + if (closed) { + return; + } + closed = true; + if (parent != null) { + parent.closeChild(); + } else { + if (childCount == 0) { + delete(); + } + } + } + + private synchronized void closeChild() { + if (--childCount == 0 && closed) { + delete(); + } + } + + private void delete() { + tempFileDeleter.deleteFile(fileRef, closeable); + } + +} diff --git a/h2/src/main/org/h2/mvstore/db/NullValueDataType.java b/h2/src/main/org/h2/mvstore/db/NullValueDataType.java new file mode 100644 index 0000000000..c9b4ff3035 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/NullValueDataType.java @@ -0,0 +1,73 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.DataType; +import org.h2.value.Value; +import org.h2.value.ValueNull; + +/** + * Dummy data type used when no value is required. This data type doesn't use + * any disk space and always returns SQL NULL value. + */ +public final class NullValueDataType implements DataType { + + /** + * Dummy data type instance. + */ + public static final NullValueDataType INSTANCE = new NullValueDataType(); + + private NullValueDataType() { + } + + @Override + public int compare(Value a, Value b) { + return 0; + } + + @Override + public int binarySearch(Value key, Object storage, int size, int initialGuess) { + return 0; + } + + @Override + public int getMemory(Value obj) { + return 0; + } + + @Override + public boolean isMemoryEstimationAllowed() { + return true; + } + + @Override + public void write(WriteBuffer buff, Value obj) { + } + + @Override + public void write(WriteBuffer buff, Object storage, int len) { + } + + @Override + public Value read(ByteBuffer buff) { + return ValueNull.INSTANCE; + } + + @Override + public void read(ByteBuffer buff, Object storage, int len) { + Arrays.fill((Value[]) storage, 0, len, ValueNull.INSTANCE); + } + + @Override + public Value[] createStorage(int size) { + return new Value[size]; + } + +} diff --git a/h2/src/main/org/h2/mvstore/db/RowDataType.java b/h2/src/main/org/h2/mvstore/db/RowDataType.java new file mode 100644 index 0000000000..3486203410 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/RowDataType.java @@ -0,0 +1,262 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.h2.engine.CastDataProvider; +import org.h2.engine.Database; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.BasicDataType; +import org.h2.mvstore.type.MetaType; +import org.h2.mvstore.type.StatefulDataType; +import org.h2.result.RowFactory; +import org.h2.result.SearchRow; +import org.h2.store.DataHandler; +import org.h2.value.CompareMode; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * The data type for rows. + * + * @author Andrei Tokar + */ +public final class RowDataType extends BasicDataType implements StatefulDataType { + + private final ValueDataType valueDataType; + private final int[] sortTypes; + private final int[] indexes; + private final int columnCount; + private final boolean storeKeys; + + public RowDataType(CastDataProvider provider, CompareMode compareMode, DataHandler handler, int[] sortTypes, + int[] indexes, int columnCount, boolean storeKeys) { + this.valueDataType = new ValueDataType(provider, compareMode, handler, sortTypes); + this.sortTypes = sortTypes; + this.indexes = indexes; + this.columnCount = columnCount; + this.storeKeys = storeKeys; + assert indexes == null || sortTypes.length == indexes.length; + } + + public int[] getIndexes() { + return indexes; + } + + public RowFactory getRowFactory() { + return valueDataType.getRowFactory(); + } + + public void setRowFactory(RowFactory rowFactory) { + valueDataType.setRowFactory(rowFactory); + } + + public int getColumnCount() { + return columnCount; + } + + public boolean isStoreKeys() { + return storeKeys; + } + + @Override + public SearchRow[] createStorage(int capacity) { + return new SearchRow[capacity]; + } + + @Override + public int compare(SearchRow a, SearchRow b) { + if (a == b) { + return 0; + } + if (indexes == null) { + int len = a.getColumnCount(); + assert len == b.getColumnCount() : len + " != " + b.getColumnCount(); + for (int i = 0; i < len; i++) { + int comp = valueDataType.compareValues(a.getValue(i), b.getValue(i), sortTypes[i]); + if (comp != 0) { + return comp; + } + } + return 0; + } else { + return compareSearchRows(a, b); + } + } + + private int compareSearchRows(SearchRow a, SearchRow b) { + for (int i = 0; i < indexes.length; i++) { + int index = indexes[i]; + Value v1 = a.getValue(index); + Value v2 = b.getValue(index); + if (v1 == null || v2 == null) { + // can't compare further + break; + } + int comp = valueDataType.compareValues(v1, v2, sortTypes[i]); + if (comp != 0) { + return comp; + } + } + long aKey = a.getKey(); + long bKey = b.getKey(); + return aKey == SearchRow.MATCH_ALL_ROW_KEY || bKey == SearchRow.MATCH_ALL_ROW_KEY ? + 0 : Long.compare(aKey, bKey); + } + + @Override + public int binarySearch(SearchRow key, Object storage, int size, int initialGuess) { + return binarySearch(key, (SearchRow[])storage, size, initialGuess); + } + + public int binarySearch(SearchRow key, SearchRow[] keys, int size, int initialGuess) { + int low = 0; + int high = size - 1; + // the cached index minus one, so that + // for the first time (when cachedCompare is 0), + // the default value is used + int x = initialGuess - 1; + if (x < 0 || x > high) { + x = high >>> 1; + } + while (low <= high) { + int compare = compareSearchRows(key, keys[x]); + if (compare > 0) { + low = x + 1; + } else if (compare < 0) { + high = x - 1; + } else { + return x; + } + x = (low + high) >>> 1; + } + return -(low + 1); + } + + @Override + public int getMemory(SearchRow row) { + return row.getMemory(); + } + + @Override + public SearchRow read(ByteBuffer buff) { + RowFactory rowFactory = valueDataType.getRowFactory(); + SearchRow row = rowFactory.createRow(); + if (storeKeys) { + row.setKey(DataUtils.readVarLong(buff)); + } + TypeInfo[] columnTypes = rowFactory.getColumnTypes(); + if (indexes == null) { + int columnCount = row.getColumnCount(); + for (int i = 0; i < columnCount; i++) { + row.setValue(i, valueDataType.readValue(buff, columnTypes != null ? columnTypes[i] : null)); + } + } else { + for (int i : indexes) { + row.setValue(i, valueDataType.readValue(buff, columnTypes != null ? columnTypes[i] : null)); + } + } + return row; + } + + @Override + public void write(WriteBuffer buff, SearchRow row) { + if (storeKeys) { + buff.putVarLong(row.getKey()); + } + if (indexes == null) { + int columnCount = row.getColumnCount(); + for (int i = 0; i < columnCount; i++) { + valueDataType.write(buff, row.getValue(i)); + } + } else { + for (int i : indexes) { + valueDataType.write(buff, row.getValue(i)); + } + } + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } else if (obj == null || obj.getClass() != RowDataType.class) { + return false; + } + RowDataType other = (RowDataType) obj; + return columnCount == other.columnCount + && Arrays.equals(indexes, other.indexes) + && Arrays.equals(sortTypes, other.sortTypes) + && valueDataType.equals(other.valueDataType); + } + + @Override + public int hashCode() { + int res = super.hashCode(); + res = res * 31 + columnCount; + res = res * 31 + Arrays.hashCode(indexes); + res = res * 31 + Arrays.hashCode(sortTypes); + res = res * 31 + valueDataType.hashCode(); + return res; + } + + @Override + public void save(WriteBuffer buff, MetaType metaType) { + buff.putVarInt(columnCount); + writeIntArray(buff, sortTypes); + writeIntArray(buff, indexes); + buff.put(storeKeys ? (byte) 1 : (byte) 0); + } + + private static void writeIntArray(WriteBuffer buff, int[] array) { + if(array == null) { + buff.putVarInt(0); + } else { + buff.putVarInt(array.length + 1); + for (int i : array) { + buff.putVarInt(i); + } + } + } + + @Override + public Factory getFactory() { + return FACTORY; + } + + + + private static final Factory FACTORY = new Factory(); + + public static final class Factory implements StatefulDataType.Factory { + + @Override + public RowDataType create(ByteBuffer buff, MetaType metaDataType, Database database) { + int columnCount = DataUtils.readVarInt(buff); + int[] sortTypes = readIntArray(buff); + int[] indexes = readIntArray(buff); + boolean storeKeys = buff.get() != 0; + CompareMode compareMode = database == null ? CompareMode.getInstance(null, 0) : database.getCompareMode(); + RowFactory rowFactory = RowFactory.getDefaultRowFactory().createRowFactory(database, compareMode, database, + sortTypes, indexes, null, columnCount, storeKeys); + return rowFactory.getRowDataType(); + } + + private static int[] readIntArray(ByteBuffer buff) { + int len = DataUtils.readVarInt(buff) - 1; + if(len < 0) { + return null; + } + int[] res = new int[len]; + for (int i = 0; i < res.length; i++) { + res[i] = DataUtils.readVarInt(buff); + } + return res; + } + } +} diff --git a/h2/src/main/org/h2/mvstore/db/SpatialKey.java b/h2/src/main/org/h2/mvstore/db/SpatialKey.java new file mode 100644 index 0000000000..2a9438eb15 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/SpatialKey.java @@ -0,0 +1,143 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.util.Arrays; +import org.h2.engine.CastDataProvider; +import org.h2.mvstore.rtree.Spatial; +import org.h2.value.CompareMode; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * A unique spatial key. + */ +public class SpatialKey extends Value implements Spatial { + + private final long id; + private final float[] minMax; + + /** + * Create a new key. + * + * @param id the id + * @param minMax min x, max x, min y, max y, and so on + */ + public SpatialKey(long id, float... minMax) { + this.id = id; + this.minMax = minMax; + } + + public SpatialKey(long id, SpatialKey other) { + this.id = id; + this.minMax = other.minMax.clone(); + } + + @Override + public float min(int dim) { + return minMax[dim + dim]; + } + + @Override + public void setMin(int dim, float x) { + minMax[dim + dim] = x; + } + + @Override + public float max(int dim) { + return minMax[dim + dim + 1]; + } + + @Override + public void setMax(int dim, float x) { + minMax[dim + dim + 1] = x; + } + + @Override + public Spatial clone(long id) { + return new SpatialKey(id, this); + } + + @Override + public long getId() { + return id; + } + + @Override + public boolean isNull() { + return minMax.length == 0; + } + + @Override + public String toString() { + return getString(); + } + + @Override + public int hashCode() { + return (int) ((id >>> 32) ^ id); + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (!(other instanceof SpatialKey)) { + return false; + } + SpatialKey o = (SpatialKey) other; + if (id != o.id) { + return false; + } + return equalsIgnoringId(o); + } + + @Override + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + throw new UnsupportedOperationException(); +// return 0; + } + + /** + * Check whether two objects are equals, but do not compare the id fields. + * + * @param o the other key + * @return true if the contents are the same + */ + @Override + public boolean equalsIgnoringId(Spatial o) { + return Arrays.equals(minMax, ((SpatialKey)o).minMax); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + builder.append(id).append(": ("); + for (int i = 0; i < minMax.length; i += 2) { + if (i > 0) { + builder.append(", "); + } + builder.append(minMax[i]).append('/').append(minMax[i + 1]); + } + builder.append(")"); + return builder; + } + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_GEOMETRY; + } + + @Override + public int getValueType() { + return Value.GEOMETRY; + } + + @Override + public String getString() { + return getTraceSQL(); + } + +} diff --git a/h2/src/main/org/h2/mvstore/db/Store.java b/h2/src/main/org/h2/mvstore/db/Store.java new file mode 100644 index 0000000000..6f5b5befcf --- /dev/null +++ b/h2/src/main/org/h2/mvstore/db/Store.java @@ -0,0 +1,396 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.db; + +import java.io.InputStream; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import org.h2.api.ErrorCode; +import org.h2.command.ddl.CreateTableData; +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.FileStore; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.MVStoreTool; +import org.h2.mvstore.tx.Transaction; +import org.h2.mvstore.tx.TransactionStore; +import org.h2.mvstore.type.MetaType; +import org.h2.store.InDoubtTransaction; +import org.h2.store.fs.FileChannelInputStream; +import org.h2.store.fs.FileUtils; +import org.h2.util.StringUtils; +import org.h2.util.Utils; + +/** + * A store with open tables. + */ +public final class Store { + + /** + * Convert password from byte[] to char[]. + * + * @param key password as byte[] + * @return password as char[]. + */ + static char[] decodePassword(byte[] key) { + char[] password = new char[key.length / 2]; + for (int i = 0; i < password.length; i++) { + password[i] = (char) (((key[i + i] & 255) << 16) | ((key[i + i + 1]) & 255)); + } + return password; + } + + /** + * The map of open tables. + * Key: the map name, value: the table. + */ + private final ConcurrentHashMap tableMap = new ConcurrentHashMap<>(); + + /** + * The store. + */ + private final MVStore mvStore; + + /** + * The transaction store. + */ + private final TransactionStore transactionStore; + + private long statisticsStart; + + private int temporaryMapId; + + private final boolean encrypted; + + private final String fileName; + + /** + * Creates the store. + * + * @param db the database + */ + public Store(Database db) { + byte[] key = db.getFileEncryptionKey(); + String dbPath = db.getDatabasePath(); + MVStore.Builder builder = new MVStore.Builder(); + boolean encrypted = false; + if (dbPath != null) { + String fileName = dbPath + Constants.SUFFIX_MV_FILE; + MVStoreTool.compactCleanUp(fileName); + builder.fileName(fileName); + builder.pageSplitSize(db.getPageSize()); + if (db.isReadOnly()) { + builder.readOnly(); + } else { + // possibly create the directory + boolean exists = FileUtils.exists(fileName); + if (exists && !FileUtils.canWrite(fileName)) { + // read only + } else { + String dir = FileUtils.getParent(fileName); + FileUtils.createDirectories(dir); + } + int autoCompactFillRate = db.getSettings().autoCompactFillRate; + if (autoCompactFillRate <= 100) { + builder.autoCompactFillRate(autoCompactFillRate); + } + } + if (key != null) { + encrypted = true; + builder.encryptionKey(decodePassword(key)); + } + if (db.getSettings().compressData) { + builder.compress(); + // use a larger page split size to improve the compression ratio + builder.pageSplitSize(64 * 1024); + } + builder.backgroundExceptionHandler((t, e) -> db.setBackgroundException(DbException.convert(e))); + // always start without background thread first, and if necessary, + // it will be set up later, after db has been fully started, + // otherwise background thread would compete for store lock + // with maps opening procedure + builder.autoCommitDisabled(); + } + this.encrypted = encrypted; + try { + this.mvStore = builder.open(); + FileStore fs = mvStore.getFileStore(); + fileName = fs != null ? fs.getFileName() : null; + if (!db.getSettings().reuseSpace) { + mvStore.setReuseSpace(false); + } + mvStore.setVersionsToKeep(0); + this.transactionStore = new TransactionStore(mvStore, + new MetaType<>(db, mvStore.backgroundExceptionHandler), new ValueDataType(db, null), + db.getLockTimeout()); + } catch (MVStoreException e) { + throw convertMVStoreException(e); + } + } + + /** + * Convert a MVStoreException to the similar exception used + * for the table/sql layers. + * + * @param e the illegal state exception + * @return the database exception + */ + DbException convertMVStoreException(MVStoreException e) { + switch (e.getErrorCode()) { + case DataUtils.ERROR_CLOSED: + throw DbException.get(ErrorCode.DATABASE_IS_CLOSED, e, fileName); + case DataUtils.ERROR_FILE_CORRUPT: + if (encrypted) { + throw DbException.get(ErrorCode.FILE_ENCRYPTION_ERROR_1, e, fileName); + } + throw DbException.get(ErrorCode.FILE_CORRUPTED_1, e, fileName); + case DataUtils.ERROR_FILE_LOCKED: + throw DbException.get(ErrorCode.DATABASE_ALREADY_OPEN_1, e, fileName); + case DataUtils.ERROR_READING_FAILED: + case DataUtils.ERROR_WRITING_FAILED: + throw DbException.get(ErrorCode.IO_EXCEPTION_1, e, fileName); + default: + throw DbException.get(ErrorCode.GENERAL_ERROR_1, e, e.getMessage()); + } + } + + public MVStore getMvStore() { + return mvStore; + } + + public TransactionStore getTransactionStore() { + return transactionStore; + } + + /** + * Get MVTable by table name. + * + * @param tableName table name + * @return MVTable + */ + public MVTable getTable(String tableName) { + return tableMap.get(tableName); + } + + /** + * Create a table. + * + * @param data CreateTableData + * @return table created + */ + public MVTable createTable(CreateTableData data) { + try { + MVTable table = new MVTable(data, this); + tableMap.put(table.getMapName(), table); + return table; + } catch (MVStoreException e) { + throw convertMVStoreException(e); + } + } + + /** + * Remove a table. + * + * @param table the table + */ + public void removeTable(MVTable table) { + try { + tableMap.remove(table.getMapName()); + } catch (MVStoreException e) { + throw convertMVStoreException(e); + } + } + + /** + * Store all pending changes. + */ + public void flush() { + FileStore s = mvStore.getFileStore(); + if (s == null || s.isReadOnly()) { + return; + } + if (!mvStore.compact(50, 4 * 1024 * 1024)) { + mvStore.commit(); + } + } + + /** + * Close the store, without persisting changes. + */ + public void closeImmediately() { + if (!mvStore.isClosed()) { + mvStore.closeImmediately(); + } + } + + /** + * Remove all temporary maps. + * + * @param objectIds the ids of the objects to keep + */ + public void removeTemporaryMaps(BitSet objectIds) { + for (String mapName : mvStore.getMapNames()) { + if (mapName.startsWith("temp.")) { + mvStore.removeMap(mapName); + } else if (mapName.startsWith("table.") || mapName.startsWith("index.")) { + int id = StringUtils.parseUInt31(mapName, mapName.indexOf('.') + 1, mapName.length()); + if (!objectIds.get(id)) { + mvStore.removeMap(mapName); + } + } + } + } + + /** + * Get the name of the next available temporary map. + * + * @return the map name + */ + public synchronized String nextTemporaryMapName() { + return "temp." + temporaryMapId++; + } + + /** + * Prepare a transaction. + * + * @param session the session + * @param transactionName the transaction name (may be null) + */ + public void prepareCommit(SessionLocal session, String transactionName) { + Transaction t = session.getTransaction(); + t.setName(transactionName); + t.prepare(); + mvStore.commit(); + } + + public ArrayList getInDoubtTransactions() { + List list = transactionStore.getOpenTransactions(); + ArrayList result = Utils.newSmallArrayList(); + for (Transaction t : list) { + if (t.getStatus() == Transaction.STATUS_PREPARED) { + result.add(new MVInDoubtTransaction(mvStore, t)); + } + } + return result; + } + + /** + * Set the maximum memory to be used by the cache. + * + * @param kb the maximum size in KB + */ + public void setCacheSize(int kb) { + mvStore.setCacheSize(Math.max(1, kb / 1024)); + } + + public InputStream getInputStream() { + FileChannel fc = mvStore.getFileStore().getEncryptedFile(); + if (fc == null) { + fc = mvStore.getFileStore().getFile(); + } + return new FileChannelInputStream(fc, false); + } + + /** + * Force the changes to disk. + */ + public void sync() { + flush(); + mvStore.sync(); + } + + /** + * Compact the database file, that is, compact blocks that have a low + * fill rate, and move chunks next to each other. This will typically + * shrink the database file. Changes are flushed to the file, and old + * chunks are overwritten. + * + * @param maxCompactTime the maximum time in milliseconds to compact + */ + @SuppressWarnings("unused") + public void compactFile(int maxCompactTime) { + mvStore.compactFile(maxCompactTime); + } + + /** + * Close the store. Pending changes are persisted. + * If time is allocated for housekeeping, chunks with a low + * fill rate are compacted, and some chunks are put next to each other. + * If time is unlimited then full compaction is performed, which uses + * different algorithm - opens alternative temp store and writes all live + * data there, then replaces this store with a new one. + * + * @param allowedCompactionTime time (in milliseconds) alloted for file + * compaction activity, 0 means no compaction, + * -1 means unlimited time (full compaction) + */ + public void close(int allowedCompactionTime) { + try { + FileStore fileStore = mvStore.getFileStore(); + if (!mvStore.isClosed() && fileStore != null) { + boolean compactFully = allowedCompactionTime == -1; + if (fileStore.isReadOnly()) { + compactFully = false; + } else { + transactionStore.close(); + } + if (compactFully) { + allowedCompactionTime = 0; + } + + mvStore.close(allowedCompactionTime); + + String fileName = fileStore.getFileName(); + if (compactFully && FileUtils.exists(fileName)) { + // the file could have been deleted concurrently, + // so only compact if the file still exists + MVStoreTool.compact(fileName, true); + } + } + } catch (MVStoreException e) { + int errorCode = e.getErrorCode(); + if (errorCode == DataUtils.ERROR_WRITING_FAILED) { + // disk full - ok + } else if (errorCode == DataUtils.ERROR_FILE_CORRUPT) { + // wrong encryption key - ok + } + mvStore.closeImmediately(); + throw DbException.get(ErrorCode.IO_EXCEPTION_1, e, "Closing"); + } + } + + /** + * Start collecting statistics. + */ + public void statisticsStart() { + FileStore fs = mvStore.getFileStore(); + statisticsStart = fs == null ? 0 : fs.getReadCount(); + } + + /** + * Stop collecting statistics. + * + * @return the statistics + */ + public Map statisticsEnd() { + HashMap map = new HashMap<>(); + FileStore fs = mvStore.getFileStore(); + int reads = fs == null ? 0 : (int) (fs.getReadCount() - statisticsStart); + map.put("reads", reads); + return map; + } + +} diff --git a/h2/src/main/org/h2/mvstore/db/TransactionStore.java b/h2/src/main/org/h2/mvstore/db/TransactionStore.java deleted file mode 100644 index de9f25aadf..0000000000 --- a/h2/src/main/org/h2/mvstore/db/TransactionStore.java +++ /dev/null @@ -1,1775 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.mvstore.db; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.BitSet; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; - -import org.h2.mvstore.Cursor; -import org.h2.mvstore.DataUtils; -import org.h2.mvstore.MVMap; -import org.h2.mvstore.MVStore; -import org.h2.mvstore.WriteBuffer; -import org.h2.mvstore.type.DataType; -import org.h2.mvstore.type.ObjectDataType; -import org.h2.util.New; - -/** - * A store that supports concurrent MVCC read-committed transactions. - */ -public class TransactionStore { - - /** - * The store. - */ - final MVStore store; - - /** - * The persisted map of prepared transactions. - * Key: transactionId, value: [ status, name ]. - */ - final MVMap preparedTransactions; - - /** - * The undo log. - *

    - * If the first entry for a transaction doesn't have a logId - * of 0, then the transaction is partially committed (which means rollback - * is not possible). Log entries are written before the data is changed - * (write-ahead). - *

    - * Key: opId, value: [ mapId, key, oldValue ]. - */ - final MVMap undoLog; - - /** - * The map of maps. - */ - private HashMap> maps = - New.hashMap(); - - private final DataType dataType; - - private final BitSet openTransactions = new BitSet(); - - private boolean init; - - private int maxTransactionId = 0xffff; - - /** - * The next id of a temporary map. - */ - private int nextTempMapId; - - /** - * Create a new transaction store. - * - * @param store the store - */ - public TransactionStore(MVStore store) { - this(store, new ObjectDataType()); - } - - /** - * Create a new transaction store. - * - * @param store the store - * @param dataType the data type for map keys and values - */ - public TransactionStore(MVStore store, DataType dataType) { - this.store = store; - this.dataType = dataType; - preparedTransactions = store.openMap("openTransactions", - new MVMap.Builder()); - VersionedValueType oldValueType = new VersionedValueType(dataType); - ArrayType undoLogValueType = new ArrayType(new DataType[]{ - new ObjectDataType(), dataType, oldValueType - }); - MVMap.Builder builder = - new MVMap.Builder(). - valueType(undoLogValueType); - undoLog = store.openMap("undoLog", builder); - if (undoLog.getValueType() != undoLogValueType) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_TRANSACTION_CORRUPT, - "Undo map open with a different value type"); - } - } - - /** - * Initialize the store. This is needed before a transaction can be opened. - * If the transaction store is corrupt, this method can throw an exception, - * in which case the store can only be used for reading. - */ - public synchronized void init() { - init = true; - // remove all temporary maps - for (String mapName : store.getMapNames()) { - if (mapName.startsWith("temp.")) { - MVMap temp = openTempMap(mapName); - store.removeMap(temp); - } - } - synchronized (undoLog) { - if (undoLog.size() > 0) { - for (Long key : undoLog.keySet()) { - int transactionId = getTransactionId(key); - openTransactions.set(transactionId); - } - } - } - } - - /** - * Set the maximum transaction id, after which ids are re-used. If the old - * transaction is still in use when re-using an old id, the new transaction - * fails. - * - * @param max the maximum id - */ - public void setMaxTransactionId(int max) { - this.maxTransactionId = max; - } - - /** - * Combine the transaction id and the log id to an operation id. - * - * @param transactionId the transaction id - * @param logId the log id - * @return the operation id - */ - static long getOperationId(int transactionId, long logId) { - DataUtils.checkArgument(transactionId >= 0 && transactionId < (1 << 24), - "Transaction id out of range: {0}", transactionId); - DataUtils.checkArgument(logId >= 0 && logId < (1L << 40), - "Transaction log id out of range: {0}", logId); - return ((long) transactionId << 40) | logId; - } - - /** - * Get the transaction id for the given operation id. - * - * @param operationId the operation id - * @return the transaction id - */ - static int getTransactionId(long operationId) { - return (int) (operationId >>> 40); - } - - /** - * Get the log id for the given operation id. - * - * @param operationId the operation id - * @return the log id - */ - static long getLogId(long operationId) { - return operationId & ((1L << 40) - 1); - } - - /** - * Get the list of unclosed transactions that have pending writes. - * - * @return the list of transactions (sorted by id) - */ - public List getOpenTransactions() { - synchronized (undoLog) { - ArrayList list = New.arrayList(); - Long key = undoLog.firstKey(); - while (key != null) { - int transactionId = getTransactionId(key); - key = undoLog.lowerKey(getOperationId(transactionId + 1, 0)); - long logId = getLogId(key) + 1; - Object[] data = preparedTransactions.get(transactionId); - int status; - String name; - if (data == null) { - if (undoLog.containsKey(getOperationId(transactionId, 0))) { - status = Transaction.STATUS_OPEN; - } else { - status = Transaction.STATUS_COMMITTING; - } - name = null; - } else { - status = (Integer) data[0]; - name = (String) data[1]; - } - Transaction t = new Transaction(this, transactionId, status, - name, logId); - list.add(t); - key = undoLog.ceilingKey(getOperationId(transactionId + 1, 0)); - } - return list; - } - } - - /** - * Close the transaction store. - */ - public synchronized void close() { - store.commit(); - } - - /** - * Begin a new transaction. - * - * @return the transaction - */ - public synchronized Transaction begin() { - - int transactionId; - int status; - if (!init) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE, - "Not initialized"); - } - transactionId = openTransactions.nextClearBit(1); - if (transactionId > maxTransactionId) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_TOO_MANY_OPEN_TRANSACTIONS, - "There are {0} open transactions", - transactionId - 1); - } - openTransactions.set(transactionId); - status = Transaction.STATUS_OPEN; - return new Transaction(this, transactionId, status, null, 0); - } - - /** - * Store a transaction. - * - * @param t the transaction - */ - synchronized void storeTransaction(Transaction t) { - if (t.getStatus() == Transaction.STATUS_PREPARED || - t.getName() != null) { - Object[] v = { t.getStatus(), t.getName() }; - preparedTransactions.put(t.getId(), v); - } - } - - /** - * Log an entry. - * - * @param t the transaction - * @param logId the log id - * @param mapId the map id - * @param key the key - * @param oldValue the old value - */ - void log(Transaction t, long logId, int mapId, - Object key, Object oldValue) { - Long undoKey = getOperationId(t.getId(), logId); - Object[] log = new Object[] { mapId, key, oldValue }; - synchronized (undoLog) { - if (logId == 0) { - if (undoLog.containsKey(undoKey)) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_TOO_MANY_OPEN_TRANSACTIONS, - "An old transaction with the same id " + - "is still open: {0}", - t.getId()); - } - } - undoLog.put(undoKey, log); - } - } - - /** - * Remove a log entry. - * - * @param t the transaction - * @param logId the log id - */ - public void logUndo(Transaction t, long logId) { - Long undoKey = getOperationId(t.getId(), logId); - synchronized (undoLog) { - Object[] old = undoLog.remove(undoKey); - if (old == null) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE, - "Transaction {0} was concurrently rolled back", - t.getId()); - } - } - } - - /** - * Remove the given map. - * - * @param the key type - * @param the value type - * @param map the map - */ - synchronized void removeMap(TransactionMap map) { - maps.remove(map.mapId); - store.removeMap(map.map); - } - - /** - * Commit a transaction. - * - * @param t the transaction - * @param maxLogId the last log id - */ - void commit(Transaction t, long maxLogId) { - if (store.isClosed()) { - return; - } - // TODO could synchronize on blocks (100 at a time or so) - synchronized (undoLog) { - t.setStatus(Transaction.STATUS_COMMITTING); - for (long logId = 0; logId < maxLogId; logId++) { - Long undoKey = getOperationId(t.getId(), logId); - Object[] op = undoLog.get(undoKey); - if (op == null) { - // partially committed: load next - undoKey = undoLog.ceilingKey(undoKey); - if (undoKey == null || - getTransactionId(undoKey) != t.getId()) { - break; - } - logId = getLogId(undoKey) - 1; - continue; - } - int mapId = (Integer) op[0]; - MVMap map = openMap(mapId); - if (map == null) { - // map was later removed - } else { - Object key = op[1]; - VersionedValue value = map.get(key); - if (value == null) { - // nothing to do - } else if (value.value == null) { - // remove the value - map.remove(key); - } else { - VersionedValue v2 = new VersionedValue(); - v2.value = value.value; - map.put(key, v2); - } - } - undoLog.remove(undoKey); - } - } - endTransaction(t); - } - - /** - * Open the map with the given name. - * - * @param the key type - * @param name the map name - * @param keyType the key type - * @param valueType the value type - * @return the map - */ - synchronized MVMap openMap(String name, - DataType keyType, DataType valueType) { - if (keyType == null) { - keyType = new ObjectDataType(); - } - if (valueType == null) { - valueType = new ObjectDataType(); - } - VersionedValueType vt = new VersionedValueType(valueType); - MVMap map; - MVMap.Builder builder = - new MVMap.Builder(). - keyType(keyType).valueType(vt); - map = store.openMap(name, builder); - @SuppressWarnings("unchecked") - MVMap m = (MVMap) map; - maps.put(map.getId(), m); - return map; - } - - /** - * Open the map with the given id. - * - * @param mapId the id - * @return the map - */ - synchronized MVMap openMap(int mapId) { - MVMap map = maps.get(mapId); - if (map != null) { - return map; - } - String mapName = store.getMapName(mapId); - if (mapName == null) { - // the map was removed later on - return null; - } - VersionedValueType vt = new VersionedValueType(dataType); - MVMap.Builder mapBuilder = - new MVMap.Builder(). - keyType(dataType).valueType(vt); - map = store.openMap(mapName, mapBuilder); - maps.put(mapId, map); - return map; - } - - /** - * Create a temporary map. Such maps are removed when opening the store. - * - * @return the map - */ - synchronized MVMap createTempMap() { - String mapName = "temp." + nextTempMapId++; - return openTempMap(mapName); - } - - /** - * Open a temporary map. - * - * @param mapName the map name - * @return the map - */ - MVMap openTempMap(String mapName) { - MVMap.Builder mapBuilder = - new MVMap.Builder(). - keyType(dataType); - return store.openMap(mapName, mapBuilder); - } - - /** - * End this transaction - * - * @param t the transaction - */ - synchronized void endTransaction(Transaction t) { - if (t.getStatus() == Transaction.STATUS_PREPARED) { - preparedTransactions.remove(t.getId()); - } - t.setStatus(Transaction.STATUS_CLOSED); - openTransactions.clear(t.transactionId); - if (store.getAutoCommitDelay() == 0) { - store.commit(); - return; - } - // to avoid having to store the transaction log, - // if there is no open transaction, - // and if there have been many changes, store them now - if (undoLog.isEmpty()) { - int unsaved = store.getUnsavedMemory(); - int max = store.getAutoCommitMemory(); - // save at 3/4 capacity - if (unsaved * 4 > max * 3) { - store.commit(); - } - } - } - - /** - * Rollback to an old savepoint. - * - * @param t the transaction - * @param maxLogId the last log id - * @param toLogId the log id to roll back to - */ - void rollbackTo(Transaction t, long maxLogId, long toLogId) { - // TODO could synchronize on blocks (100 at a time or so) - synchronized (undoLog) { - for (long logId = maxLogId - 1; logId >= toLogId; logId--) { - Long undoKey = getOperationId(t.getId(), logId); - Object[] op = undoLog.get(undoKey); - if (op == null) { - // partially rolled back: load previous - undoKey = undoLog.floorKey(undoKey); - if (undoKey == null || - getTransactionId(undoKey) != t.getId()) { - break; - } - logId = getLogId(undoKey) + 1; - continue; - } - int mapId = ((Integer) op[0]).intValue(); - MVMap map = openMap(mapId); - if (map != null) { - Object key = op[1]; - VersionedValue oldValue = (VersionedValue) op[2]; - if (oldValue == null) { - // this transaction added the value - map.remove(key); - } else { - // this transaction updated the value - map.put(key, oldValue); - } - } - undoLog.remove(undoKey); - } - } - } - - /** - * Get the changes of the given transaction, starting from the latest log id - * back to the given log id. - * - * @param t the transaction - * @param maxLogId the maximum log id - * @param toLogId the minimum log id - * @return the changes - */ - Iterator getChanges(final Transaction t, final long maxLogId, - final long toLogId) { - return new Iterator() { - - private long logId = maxLogId - 1; - private Change current; - - { - fetchNext(); - } - - private void fetchNext() { - synchronized (undoLog) { - while (logId >= toLogId) { - Long undoKey = getOperationId(t.getId(), logId); - Object[] op = undoLog.get(undoKey); - logId--; - if (op == null) { - // partially rolled back: load previous - undoKey = undoLog.floorKey(undoKey); - if (undoKey == null || - getTransactionId(undoKey) != t.getId()) { - break; - } - logId = getLogId(undoKey); - continue; - } - int mapId = ((Integer) op[0]).intValue(); - MVMap m = openMap(mapId); - if (m == null) { - // map was removed later on - } else { - current = new Change(); - current.mapName = m.getName(); - current.key = op[1]; - VersionedValue oldValue = (VersionedValue) op[2]; - current.value = oldValue == null ? - null : oldValue.value; - return; - } - } - } - current = null; - } - - @Override - public boolean hasNext() { - return current != null; - } - - @Override - public Change next() { - if (current == null) { - throw DataUtils.newUnsupportedOperationException("no data"); - } - Change result = current; - fetchNext(); - return result; - } - - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - - }; - } - - /** - * A change in a map. - */ - public static class Change { - - /** - * The name of the map where the change occurred. - */ - public String mapName; - - /** - * The key. - */ - public Object key; - - /** - * The value. - */ - public Object value; - } - - /** - * A transaction. - */ - public static class Transaction { - - /** - * The status of a closed transaction (committed or rolled back). - */ - public static final int STATUS_CLOSED = 0; - - /** - * The status of an open transaction. - */ - public static final int STATUS_OPEN = 1; - - /** - * The status of a prepared transaction. - */ - public static final int STATUS_PREPARED = 2; - - /** - * The status of a transaction that is being committed, but possibly not - * yet finished. A transactions can go into this state when the store is - * closed while the transaction is committing. When opening a store, - * such transactions should be committed. - */ - public static final int STATUS_COMMITTING = 3; - - /** - * The transaction store. - */ - final TransactionStore store; - - /** - * The transaction id. - */ - final int transactionId; - - /** - * The log id of the last entry in the undo log map. - */ - long logId; - - private int status; - - private String name; - - Transaction(TransactionStore store, int transactionId, int status, - String name, long logId) { - this.store = store; - this.transactionId = transactionId; - this.status = status; - this.name = name; - this.logId = logId; - } - - public int getId() { - return transactionId; - } - - public int getStatus() { - return status; - } - - void setStatus(int status) { - this.status = status; - } - - public void setName(String name) { - checkNotClosed(); - this.name = name; - store.storeTransaction(this); - } - - public String getName() { - return name; - } - - /** - * Create a new savepoint. - * - * @return the savepoint id - */ - public long setSavepoint() { - return logId; - } - - /** - * Add a log entry. - * - * @param mapId the map id - * @param key the key - * @param oldValue the old value - */ - void log(int mapId, Object key, Object oldValue) { - store.log(this, logId, mapId, key, oldValue); - // only increment the log id if logging was successful - logId++; - } - - /** - * Remove the last log entry. - */ - void logUndo() { - store.logUndo(this, --logId); - } - - /** - * Open a data map. - * - * @param the key type - * @param the value type - * @param name the name of the map - * @return the transaction map - */ - public TransactionMap openMap(String name) { - return openMap(name, null, null); - } - - /** - * Open the map to store the data. - * - * @param the key type - * @param the value type - * @param name the name of the map - * @param keyType the key data type - * @param valueType the value data type - * @return the transaction map - */ - public TransactionMap openMap(String name, - DataType keyType, DataType valueType) { - checkNotClosed(); - MVMap map = store.openMap(name, keyType, - valueType); - int mapId = map.getId(); - return new TransactionMap(this, map, mapId); - } - - /** - * Open the transactional version of the given map. - * - * @param the key type - * @param the value type - * @param map the base map - * @return the transactional map - */ - public TransactionMap openMap( - MVMap map) { - checkNotClosed(); - int mapId = map.getId(); - return new TransactionMap(this, map, mapId); - } - - /** - * Prepare the transaction. Afterwards, the transaction can only be - * committed or rolled back. - */ - public void prepare() { - checkNotClosed(); - status = STATUS_PREPARED; - store.storeTransaction(this); - } - - /** - * Commit the transaction. Afterwards, this transaction is closed. - */ - public void commit() { - checkNotClosed(); - store.commit(this, logId); - } - - /** - * Roll back to the given savepoint. This is only allowed if the - * transaction is open. - * - * @param savepointId the savepoint id - */ - public void rollbackToSavepoint(long savepointId) { - checkNotClosed(); - store.rollbackTo(this, logId, savepointId); - logId = savepointId; - } - - /** - * Roll the transaction back. Afterwards, this transaction is closed. - */ - public void rollback() { - checkNotClosed(); - store.rollbackTo(this, logId, 0); - store.endTransaction(this); - } - - /** - * Get the list of changes, starting with the latest change, up to the - * given savepoint (in reverse order than they occurred). The value of - * the change is the value before the change was applied. - * - * @param savepointId the savepoint id, 0 meaning the beginning of the - * transaction - * @return the changes - */ - public Iterator getChanges(long savepointId) { - return store.getChanges(this, logId, savepointId); - } - - /** - * Check whether this transaction is open or prepared. - */ - void checkNotClosed() { - if (status == STATUS_CLOSED) { - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_CLOSED, "Transaction is closed"); - } - } - - /** - * Remove the map. - * - * @param map the map - */ - public void removeMap(TransactionMap map) { - store.removeMap(map); - } - - @Override - public String toString() { - return "" + transactionId; - } - - } - - /** - * A map that supports transactions. - * - * @param the key type - * @param the value type - */ - public static class TransactionMap { - - /** - * The map id. - */ - final int mapId; - - /** - * If a record was read that was updated by this transaction, and the - * update occurred before this log id, the older version is read. This - * is so that changes are not immediately visible, to support statement - * processing (for example "update test set id = id + 1"). - */ - long readLogId = Long.MAX_VALUE; - - /** - * The map used for writing (the latest version). - *

    - * Key: key the key of the data. - * Value: { transactionId, oldVersion, value } - */ - final MVMap map; - - private Transaction transaction; - - TransactionMap(Transaction transaction, MVMap map, - int mapId) { - this.transaction = transaction; - this.map = map; - this.mapId = mapId; - } - - /** - * Set the savepoint. Afterwards, reads are based on the specified - * savepoint. - * - * @param savepoint the savepoint - */ - public void setSavepoint(long savepoint) { - this.readLogId = savepoint; - } - - /** - * Get a clone of this map for the given transaction. - * - * @param transaction the transaction - * @param savepoint the savepoint - * @return the map - */ - public TransactionMap getInstance(Transaction transaction, - long savepoint) { - TransactionMap m = - new TransactionMap(transaction, map, mapId); - m.setSavepoint(savepoint); - return m; - } - - /** - * Get the size of the raw map. This includes uncommitted entries, and - * transiently removed entries, so it is the maximum number of entries. - * - * @return the maximum size - */ - public long sizeAsLongMax() { - return map.sizeAsLong(); - } - - /** - * Get the size of the map as seen by this transaction. - * - * @return the size - */ - public long sizeAsLong() { - long sizeRaw = map.sizeAsLong(); - MVMap undo = transaction.store.undoLog; - long undoLogSize; - synchronized (undo) { - undoLogSize = undo.sizeAsLong(); - } - if (undoLogSize == 0) { - return sizeRaw; - } - if (undoLogSize > sizeRaw) { - // the undo log is larger than the map - - // count the entries of the map - long size = 0; - Cursor cursor = map.cursor(null); - while (cursor.hasNext()) { - K key = cursor.next(); - VersionedValue data = cursor.getValue(); - data = getValue(key, readLogId, data); - if (data != null && data.value != null) { - size++; - } - } - return size; - } - // the undo log is smaller than the map - - // scan the undo log and subtract invisible entries - synchronized (undo) { - // re-fetch in case any transaction was committed now - long size = map.sizeAsLong(); - MVMap temp = transaction.store.createTempMap(); - try { - for (Entry e : undo.entrySet()) { - Object[] op = e.getValue(); - int m = (Integer) op[0]; - if (m != mapId) { - // a different map - ignore - continue; - } - @SuppressWarnings("unchecked") - K key = (K) op[1]; - if (get(key) == null) { - Integer old = temp.put(key, 1); - // count each key only once (there might be multiple - // changes for the same key) - if (old == null) { - size--; - } - } - } - } finally { - transaction.store.store.removeMap(temp); - } - return size; - } - } - - /** - * Remove an entry. - *

    - * If the row is locked, this method will retry until the row could be - * updated or until a lock timeout. - * - * @param key the key - * @throws IllegalStateException if a lock timeout occurs - */ - public V remove(K key) { - return set(key, null); - } - - /** - * Update the value for the given key. - *

    - * If the row is locked, this method will retry until the row could be - * updated or until a lock timeout. - * - * @param key the key - * @param value the new value (not null) - * @return the old value - * @throws IllegalStateException if a lock timeout occurs - */ - public V put(K key, V value) { - DataUtils.checkArgument(value != null, "The value may not be null"); - return set(key, value); - } - - /** - * Update the value for the given key, without adding an undo log entry. - * - * @param key the key - * @param value the value - * @return the old value - */ - @SuppressWarnings("unchecked") - public V putCommitted(K key, V value) { - DataUtils.checkArgument(value != null, "The value may not be null"); - VersionedValue newValue = new VersionedValue(); - newValue.value = value; - VersionedValue oldValue = map.put(key, newValue); - return (V) (oldValue == null ? null : oldValue.value); - } - - private V set(K key, V value) { - transaction.checkNotClosed(); - V old = get(key); - boolean ok = trySet(key, value, false); - if (ok) { - return old; - } - throw DataUtils.newIllegalStateException( - DataUtils.ERROR_TRANSACTION_LOCKED, "Entry is locked"); - } - - /** - * Try to remove the value for the given key. - *

    - * This will fail if the row is locked by another transaction (that - * means, if another open transaction changed the row). - * - * @param key the key - * @return whether the entry could be removed - */ - public boolean tryRemove(K key) { - return trySet(key, null, false); - } - - /** - * Try to update the value for the given key. - *

    - * This will fail if the row is locked by another transaction (that - * means, if another open transaction changed the row). - * - * @param key the key - * @param value the new value - * @return whether the entry could be updated - */ - public boolean tryPut(K key, V value) { - DataUtils.checkArgument(value != null, "The value may not be null"); - return trySet(key, value, false); - } - - /** - * Try to set or remove the value. When updating only unchanged entries, - * then the value is only changed if it was not changed after opening - * the map. - * - * @param key the key - * @param value the new value (null to remove the value) - * @param onlyIfUnchanged only set the value if it was not changed (by - * this or another transaction) since the map was opened - * @return true if the value was set, false if there was a concurrent - * update - */ - public boolean trySet(K key, V value, boolean onlyIfUnchanged) { - VersionedValue current = map.get(key); - if (onlyIfUnchanged) { - VersionedValue old = getValue(key, readLogId); - if (!map.areValuesEqual(old, current)) { - long tx = getTransactionId(current.operationId); - if (tx == transaction.transactionId) { - if (value == null) { - // ignore removing an entry - // if it was added or changed - // in the same statement - return true; - } else if (current.value == null) { - // add an entry that was removed - // in the same statement - } else { - return false; - } - } else { - return false; - } - } - } - VersionedValue newValue = new VersionedValue(); - newValue.operationId = getOperationId( - transaction.transactionId, transaction.logId); - newValue.value = value; - if (current == null) { - // a new value - transaction.log(mapId, key, current); - VersionedValue old = map.putIfAbsent(key, newValue); - if (old != null) { - transaction.logUndo(); - return false; - } - return true; - } - long id = current.operationId; - if (id == 0) { - // committed - transaction.log(mapId, key, current); - // the transaction is committed: - // overwrite the value - if (!map.replace(key, current, newValue)) { - // somebody else was faster - transaction.logUndo(); - return false; - } - return true; - } - int tx = getTransactionId(current.operationId); - if (tx == transaction.transactionId) { - // added or updated by this transaction - transaction.log(mapId, key, current); - if (!map.replace(key, current, newValue)) { - // strange, somebody overwrote the value - // even though the change was not committed - transaction.logUndo(); - return false; - } - return true; - } - // the transaction is not yet committed - return false; - } - - /** - * Get the value for the given key at the time when this map was opened. - * - * @param key the key - * @return the value or null - */ - public V get(K key) { - return get(key, readLogId); - } - - /** - * Get the most recent value for the given key. - * - * @param key the key - * @return the value or null - */ - public V getLatest(K key) { - return get(key, Long.MAX_VALUE); - } - - /** - * Whether the map contains the key. - * - * @param key the key - * @return true if the map contains an entry for this key - */ - public boolean containsKey(K key) { - return get(key) != null; - } - - /** - * Get the value for the given key. - * - * @param key the key - * @param maxLogId the maximum log id - * @return the value or null - */ - @SuppressWarnings("unchecked") - public V get(K key, long maxLogId) { - VersionedValue data = getValue(key, maxLogId); - return data == null ? null : (V) data.value; - } - - /** - * Whether the entry for this key was added or removed from this - * session. - * - * @param key the key - * @return true if yes - */ - public boolean isSameTransaction(K key) { - VersionedValue data = map.get(key); - if (data == null) { - // doesn't exist or deleted by a committed transaction - return false; - } - int tx = getTransactionId(data.operationId); - return tx == transaction.transactionId; - } - - private VersionedValue getValue(K key, long maxLog) { - VersionedValue data = map.get(key); - return getValue(key, maxLog, data); - } - - /** - * Get the versioned value for the given key. - * - * @param key the key - * @param maxLog the maximum log id of the entry - * @param data the value stored in the main map - * @return the value - */ - VersionedValue getValue(K key, long maxLog, VersionedValue data) { - while (true) { - if (data == null) { - // doesn't exist or deleted by a committed transaction - return null; - } - long id = data.operationId; - if (id == 0) { - // it is committed - return data; - } - int tx = getTransactionId(id); - if (tx == transaction.transactionId) { - // added by this transaction - if (getLogId(id) < maxLog) { - return data; - } - } - // get the value before the uncommitted transaction - Object[] d; - synchronized (transaction.store.undoLog) { - d = transaction.store.undoLog.get(id); - } - if (d == null) { - // this entry should be committed or rolled back - // in the meantime (the transaction might still be open) - // or it might be changed again in a different - // transaction (possibly one with the same id) - data = map.get(key); - } else { - data = (VersionedValue) d[2]; - } - } - } - - /** - * Check whether this map is closed. - * - * @return true if closed - */ - public boolean isClosed() { - return map.isClosed(); - } - - /** - * Clear the map. - */ - public void clear() { - // TODO truncate transactionally? - map.clear(); - } - - /** - * Get the first key. - * - * @return the first key, or null if empty - */ - public K firstKey() { - Iterator it = keyIterator(null); - return it.hasNext() ? it.next() : null; - } - - /** - * Get the last key. - * - * @return the last key, or null if empty - */ - public K lastKey() { - K k = map.lastKey(); - while (true) { - if (k == null) { - return null; - } - if (get(k) != null) { - return k; - } - k = map.lowerKey(k); - } - } - - /** - * Get the smallest key that is larger than the given key, or null if no - * such key exists. - * - * @param key the key (may not be null) - * @return the result - */ - public K higherKey(K key) { - while (true) { - K k = map.higherKey(key); - if (k == null || get(k) != null) { - return k; - } - key = k; - } - } - - /** - * Get one of the previous or next keys. There might be no value - * available for the returned key. - * - * @param key the key (may not be null) - * @param offset how many keys to skip (-1 for previous, 1 for next) - * @return the key - */ - public K relativeKey(K key, long offset) { - K k = offset > 0 ? map.ceilingKey(key) : map.floorKey(key); - if (k == null) { - return k; - } - long index = map.getKeyIndex(k); - return map.getKey(index + offset); - } - - /** - * Get the largest key that is smaller than the given key, or null if no - * such key exists. - * - * @param key the key (may not be null) - * @return the result - */ - public K lowerKey(K key) { - while (true) { - K k = map.lowerKey(key); - if (k == null || get(k) != null) { - return k; - } - key = k; - } - } - - /** - * Iterate over keys. - * - * @param from the first key to return - * @return the iterator - */ - public Iterator keyIterator(K from) { - return keyIterator(from, false); - } - - /** - * Iterate over keys. - * - * @param from the first key to return - * @param includeUncommitted whether uncommitted entries should be - * included - * @return the iterator - */ - public Iterator keyIterator(final K from, final boolean includeUncommitted) { - return new Iterator() { - private K currentKey = from; - private Cursor cursor = map.cursor(currentKey); - - { - fetchNext(); - } - - private void fetchNext() { - while (cursor.hasNext()) { - K k; - try { - k = cursor.next(); - } catch (IllegalStateException e) { - // TODO this is a bit ugly - if (DataUtils.getErrorCode(e.getMessage()) == - DataUtils.ERROR_CHUNK_NOT_FOUND) { - cursor = map.cursor(currentKey); - // we (should) get the current key again, - // we need to ignore that one - if (!cursor.hasNext()) { - break; - } - cursor.next(); - if (!cursor.hasNext()) { - break; - } - k = cursor.next(); - } else { - throw e; - } - } - currentKey = k; - if (includeUncommitted) { - return; - } - if (containsKey(k)) { - return; - } - } - currentKey = null; - } - - @Override - public boolean hasNext() { - return currentKey != null; - } - - @Override - public K next() { - K result = currentKey; - fetchNext(); - return result; - } - - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removing is not supported"); - } - }; - } - - /** - * Iterate over entries. - * - * @param from the first key to return - * @return the iterator - */ - public Iterator> entryIterator(final K from) { - return new Iterator>() { - private Entry current; - private K currentKey = from; - private Cursor cursor = map.cursor(currentKey); - - { - fetchNext(); - } - - private void fetchNext() { - while (cursor.hasNext()) { - K k; - try { - k = cursor.next(); - } catch (IllegalStateException e) { - // TODO this is a bit ugly - if (DataUtils.getErrorCode(e.getMessage()) == - DataUtils.ERROR_CHUNK_NOT_FOUND) { - cursor = map.cursor(currentKey); - // we (should) get the current key again, - // we need to ignore that one - if (!cursor.hasNext()) { - break; - } - cursor.next(); - if (!cursor.hasNext()) { - break; - } - k = cursor.next(); - } else { - throw e; - } - } - final K key = k; - VersionedValue data = cursor.getValue(); - data = getValue(key, readLogId, data); - if (data != null && data.value != null) { - @SuppressWarnings("unchecked") - final V value = (V) data.value; - current = new DataUtils.MapEntry(key, value); - currentKey = key; - return; - } - } - current = null; - currentKey = null; - } - - @Override - public boolean hasNext() { - return current != null; - } - - @Override - public Entry next() { - Entry result = current; - fetchNext(); - return result; - } - - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removing is not supported"); - } - }; - - } - - /** - * Iterate over keys. - * - * @param iterator the iterator to wrap - * @param includeUncommitted whether uncommitted entries should be - * included - * @return the iterator - */ - public Iterator wrapIterator(final Iterator iterator, - final boolean includeUncommitted) { - // TODO duplicate code for wrapIterator and entryIterator - return new Iterator() { - private K current; - - { - fetchNext(); - } - - private void fetchNext() { - while (iterator.hasNext()) { - current = iterator.next(); - if (includeUncommitted) { - return; - } - if (containsKey(current)) { - return; - } - } - current = null; - } - - @Override - public boolean hasNext() { - return current != null; - } - - @Override - public K next() { - K result = current; - fetchNext(); - return result; - } - - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removing is not supported"); - } - }; - } - - public Transaction getTransaction() { - return transaction; - } - - public DataType getKeyType() { - return map.getKeyType(); - } - - } - - /** - * A versioned value (possibly null). It contains a pointer to the old - * value, and the value itself. - */ - static class VersionedValue { - - /** - * The operation id. - */ - public long operationId; - - /** - * The value. - */ - public Object value; - - @Override - public String toString() { - return value + (operationId == 0 ? "" : ( - " " + - getTransactionId(operationId) + "/" + - getLogId(operationId))); - } - - } - - /** - * The value type for a versioned value. - */ - public static class VersionedValueType implements DataType { - - private final DataType valueType; - - VersionedValueType(DataType valueType) { - this.valueType = valueType; - } - - @Override - public int getMemory(Object obj) { - VersionedValue v = (VersionedValue) obj; - return valueType.getMemory(v.value) + 8; - } - - @Override - public int compare(Object aObj, Object bObj) { - if (aObj == bObj) { - return 0; - } - VersionedValue a = (VersionedValue) aObj; - VersionedValue b = (VersionedValue) bObj; - long comp = a.operationId - b.operationId; - if (comp == 0) { - return valueType.compare(a.value, b.value); - } - return Long.signum(comp); - } - - @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - if (buff.get() == 0) { - // fast path (no op ids or null entries) - for (int i = 0; i < len; i++) { - VersionedValue v = new VersionedValue(); - v.value = valueType.read(buff); - obj[i] = v; - } - } else { - // slow path (some entries may be null) - for (int i = 0; i < len; i++) { - obj[i] = read(buff); - } - } - } - - @Override - public Object read(ByteBuffer buff) { - VersionedValue v = new VersionedValue(); - v.operationId = DataUtils.readVarLong(buff); - if (buff.get() == 1) { - v.value = valueType.read(buff); - } - return v; - } - - @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - boolean fastPath = true; - for (int i = 0; i < len; i++) { - VersionedValue v = (VersionedValue) obj[i]; - if (v.operationId != 0 || v.value == null) { - fastPath = false; - } - } - if (fastPath) { - buff.put((byte) 0); - for (int i = 0; i < len; i++) { - VersionedValue v = (VersionedValue) obj[i]; - valueType.write(buff, v.value); - } - } else { - // slow path: - // store op ids, and some entries may be null - buff.put((byte) 1); - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } - } - } - - @Override - public void write(WriteBuffer buff, Object obj) { - VersionedValue v = (VersionedValue) obj; - buff.putVarLong(v.operationId); - if (v.value == null) { - buff.put((byte) 0); - } else { - buff.put((byte) 1); - valueType.write(buff, v.value); - } - } - - } - - /** - * A data type that contains an array of objects with the specified data - * types. - */ - public static class ArrayType implements DataType { - - private final int arrayLength; - private final DataType[] elementTypes; - - ArrayType(DataType[] elementTypes) { - this.arrayLength = elementTypes.length; - this.elementTypes = elementTypes; - } - - @Override - public int getMemory(Object obj) { - Object[] array = (Object[]) obj; - int size = 0; - for (int i = 0; i < arrayLength; i++) { - DataType t = elementTypes[i]; - Object o = array[i]; - if (o != null) { - size += t.getMemory(o); - } - } - return size; - } - - @Override - public int compare(Object aObj, Object bObj) { - if (aObj == bObj) { - return 0; - } - Object[] a = (Object[]) aObj; - Object[] b = (Object[]) bObj; - for (int i = 0; i < arrayLength; i++) { - DataType t = elementTypes[i]; - int comp = t.compare(a[i], b[i]); - if (comp != 0) { - return comp; - } - } - return 0; - } - - @Override - public void read(ByteBuffer buff, Object[] obj, - int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); - } - } - - @Override - public void write(WriteBuffer buff, Object[] obj, - int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } - } - - @Override - public void write(WriteBuffer buff, Object obj) { - Object[] array = (Object[]) obj; - for (int i = 0; i < arrayLength; i++) { - DataType t = elementTypes[i]; - Object o = array[i]; - if (o == null) { - buff.put((byte) 0); - } else { - buff.put((byte) 1); - t.write(buff, o); - } - } - } - - @Override - public Object read(ByteBuffer buff) { - Object[] array = new Object[arrayLength]; - for (int i = 0; i < arrayLength; i++) { - DataType t = elementTypes[i]; - if (buff.get() == 1) { - array[i] = t.read(buff); - } - } - return array; - } - - } - -} diff --git a/h2/src/main/org/h2/mvstore/db/ValueDataType.java b/h2/src/main/org/h2/mvstore/db/ValueDataType.java index 76bc4ce59a..36d4ccbe0f 100644 --- a/h2/src/main/org/h2/mvstore/db/ValueDataType.java +++ b/h2/src/main/org/h2/mvstore/db/ValueDataType.java @@ -1,106 +1,184 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.db; +import static org.h2.mvstore.DataUtils.readString; +import static org.h2.mvstore.DataUtils.readVarInt; +import static org.h2.mvstore.DataUtils.readVarLong; + import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.SQLException; import java.util.Arrays; - +import java.util.Iterator; +import java.util.Map.Entry; import org.h2.api.ErrorCode; +import org.h2.api.IntervalQualifier; +import org.h2.engine.CastDataProvider; +import org.h2.engine.Database; import org.h2.message.DbException; +import org.h2.mode.DefaultNullOrdering; import org.h2.mvstore.DataUtils; import org.h2.mvstore.WriteBuffer; -import org.h2.mvstore.rtree.SpatialDataType; -import org.h2.mvstore.rtree.SpatialKey; +import org.h2.mvstore.type.BasicDataType; import org.h2.mvstore.type.DataType; +import org.h2.mvstore.type.MetaType; +import org.h2.mvstore.type.StatefulDataType; +import org.h2.result.RowFactory; +import org.h2.result.SearchRow; import org.h2.result.SortOrder; import org.h2.store.DataHandler; -import org.h2.tools.SimpleResultSet; +import org.h2.util.DateTimeUtils; +import org.h2.util.Utils; import org.h2.value.CompareMode; +import org.h2.value.ExtTypeInfoEnum; +import org.h2.value.ExtTypeInfoRow; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBinary; +import org.h2.value.ValueBlob; import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; +import org.h2.value.ValueChar; +import org.h2.value.ValueClob; +import org.h2.value.ValueCollectionBase; import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; +import org.h2.value.ValueDecfloat; import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; import org.h2.value.ValueGeometry; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; +import org.h2.value.ValueInterval; import org.h2.value.ValueJavaObject; -import org.h2.value.ValueLobDb; -import org.h2.value.ValueLong; +import org.h2.value.ValueJson; import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueStringFixed; -import org.h2.value.ValueStringIgnoreCase; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; +import org.h2.value.ValueRow; +import org.h2.value.ValueSmallint; import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueTinyint; import org.h2.value.ValueUuid; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; +import org.h2.value.ValueVarcharIgnoreCase; +import org.h2.value.lob.LobData; +import org.h2.value.lob.LobDataDatabase; +import org.h2.value.lob.LobDataInMemory; /** * A row type. */ -public class ValueDataType implements DataType { - - private static final int INT_0_15 = 32; - private static final int LONG_0_7 = 48; - private static final int DECIMAL_0_1 = 56; - private static final int DECIMAL_SMALL_0 = 58; - private static final int DECIMAL_SMALL = 59; - private static final int DOUBLE_0_1 = 60; - private static final int FLOAT_0_1 = 62; - private static final int BOOLEAN_FALSE = 64; - private static final int BOOLEAN_TRUE = 65; - private static final int INT_NEG = 66; - private static final int LONG_NEG = 67; - private static final int STRING_0_31 = 68; - private static final int BYTES_0_31 = 100; - private static final int SPATIAL_KEY_2D = 132; +public final class ValueDataType extends BasicDataType implements StatefulDataType { + + private static final byte NULL = 0; + private static final byte TINYINT = 2; + private static final byte SMALLINT = 3; + private static final byte INTEGER = 4; + private static final byte BIGINT = 5; + private static final byte NUMERIC = 6; + private static final byte DOUBLE = 7; + private static final byte REAL = 8; + private static final byte TIME = 9; + private static final byte DATE = 10; + private static final byte TIMESTAMP = 11; + private static final byte VARBINARY = 12; + private static final byte VARCHAR = 13; + private static final byte VARCHAR_IGNORECASE = 14; + private static final byte BLOB = 15; + private static final byte CLOB = 16; + private static final byte ARRAY = 17; + private static final byte JAVA_OBJECT = 19; + private static final byte UUID = 20; + private static final byte CHAR = 21; + private static final byte GEOMETRY = 22; + private static final byte TIMESTAMP_TZ_OLD = 24; + private static final byte ENUM = 25; + private static final byte INTERVAL = 26; + private static final byte ROW = 27; + private static final byte INT_0_15 = 32; + private static final byte BIGINT_0_7 = 48; + private static final byte NUMERIC_0_1 = 56; + private static final byte NUMERIC_SMALL_0 = 58; + private static final byte NUMERIC_SMALL = 59; + private static final byte DOUBLE_0_1 = 60; + private static final byte REAL_0_1 = 62; + private static final byte BOOLEAN_FALSE = 64; + private static final byte BOOLEAN_TRUE = 65; + private static final byte INT_NEG = 66; + private static final byte BIGINT_NEG = 67; + private static final byte VARCHAR_0_31 = 68; + private static final int VARBINARY_0_31 = 100; + // 132 was used for SPATIAL_KEY_2D + // 133 was used for CUSTOM_DATA_TYPE + private static final int JSON = 134; + private static final int TIMESTAMP_TZ = 135; + private static final int TIME_TZ = 136; + private static final int BINARY = 137; + private static final int DECFLOAT = 138; final DataHandler handler; + final CastDataProvider provider; final CompareMode compareMode; final int[] sortTypes; - SpatialDataType spatialType; + private RowFactory rowFactory; - public ValueDataType(CompareMode compareMode, DataHandler handler, - int[] sortTypes) { + public ValueDataType() { + this(null, CompareMode.getInstance(null, 0), null, null); + } + + public ValueDataType(Database database, int[] sortTypes) { + this(database, database.getCompareMode(), database, sortTypes); + } + + public ValueDataType(CastDataProvider provider, CompareMode compareMode, DataHandler handler, int[] sortTypes) { + this.provider = provider; this.compareMode = compareMode; this.handler = handler; this.sortTypes = sortTypes; } - private SpatialDataType getSpatialDataType() { - if (spatialType == null) { - spatialType = new SpatialDataType(2); - } - return spatialType; + public RowFactory getRowFactory() { + return rowFactory; + } + + public void setRowFactory(RowFactory rowFactory) { + this.rowFactory = rowFactory; } @Override - public int compare(Object a, Object b) { + public Value[] createStorage(int size) { + return new Value[size]; + } + + @Override + public int compare(Value a, Value b) { if (a == b) { return 0; } - if (a instanceof ValueArray && b instanceof ValueArray) { - Value[] ax = ((ValueArray) a).getList(); - Value[] bx = ((ValueArray) b).getList(); + if (a instanceof SearchRow && b instanceof SearchRow) { + return compare((SearchRow)a, (SearchRow)b); + } else if (a instanceof ValueCollectionBase && b instanceof ValueCollectionBase) { + Value[] ax = ((ValueCollectionBase) a).getList(); + Value[] bx = ((ValueCollectionBase) b).getList(); int al = ax.length; int bl = bx.length; int len = Math.min(al, bl); for (int i = 0; i < len; i++) { - int sortType = sortTypes[i]; - int comp = compareValues(ax[i], bx[i], sortType); + int sortType = sortTypes == null ? SortOrder.ASCENDING : sortTypes[i]; + Value one = ax[i]; + Value two = bx[i]; + if (one == null || two == null) { + return compareValues(ax[len - 1], bx[len - 1], SortOrder.ASCENDING); + } + + int comp = compareValues(one, two, sortType); if (comp != 0) { return comp; } @@ -112,144 +190,140 @@ public int compare(Object a, Object b) { } return 0; } - return compareValues((Value) a, (Value) b, SortOrder.ASCENDING); + return compareValues(a, b, SortOrder.ASCENDING); } - private int compareValues(Value a, Value b, int sortType) { + private int compare(SearchRow a, SearchRow b) { if (a == b) { return 0; } - // null is never stored; - // comparison with null is used to retrieve all entries - // in which case null is always lower than all entries - // (even for descending ordered indexes) - if (a == null) { - return -1; - } else if (b == null) { - return 1; - } - boolean aNull = a == ValueNull.INSTANCE; - boolean bNull = b == ValueNull.INSTANCE; - if (aNull || bNull) { - return SortOrder.compareNull(aNull, sortType); - } - int comp = compareTypeSave(a, b); - if ((sortType & SortOrder.DESCENDING) != 0) { - comp = -comp; + int[] indexes = rowFactory.getIndexes(); + if (indexes == null) { + int len = a.getColumnCount(); + assert len == b.getColumnCount() : len + " != " + b.getColumnCount(); + for (int i = 0; i < len; i++) { + int comp = compareValues(a.getValue(i), b.getValue(i), sortTypes[i]); + if (comp != 0) { + return comp; + } + } + return 0; + } else { + assert sortTypes.length == indexes.length; + for (int i = 0; i < indexes.length; i++) { + int index = indexes[i]; + Value v1 = a.getValue(index); + Value v2 = b.getValue(index); + if (v1 == null || v2 == null) { + // can't compare further + break; + } + int comp = compareValues(a.getValue(index), b.getValue(index), sortTypes[i]); + if (comp != 0) { + return comp; + } + } + long aKey = a.getKey(); + long bKey = b.getKey(); + return aKey == SearchRow.MATCH_ALL_ROW_KEY || bKey == SearchRow.MATCH_ALL_ROW_KEY ? + 0 : Long.compare(aKey, bKey); } - return comp; } - private int compareTypeSave(Value a, Value b) { + /** + * Compares the specified values. + * + * @param a the first value + * @param b the second value + * @param sortType the sorting type + * @return 0 if equal, -1 if first value is smaller for ascending or larger + * for descending sort type, 1 otherwise + */ + public int compareValues(Value a, Value b, int sortType) { if (a == b) { return 0; } - return a.compareTypeSave(b, compareMode); - } - - @Override - public int getMemory(Object obj) { - if (obj instanceof SpatialKey) { - return getSpatialDataType().getMemory(obj); + boolean aNull = a == ValueNull.INSTANCE; + if (aNull || b == ValueNull.INSTANCE) { + /* + * Indexes with nullable values should have explicit null ordering, + * so default should not matter. + */ + return DefaultNullOrdering.LOW.compareNull(aNull, sortType); } - return getMemory((Value) obj); - } - private static int getMemory(Value v) { - return v == null ? 0 : v.getMemory(); - } + int comp = a.compareTo(b, provider, compareMode); - @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); + if ((sortType & SortOrder.DESCENDING) != 0) { + comp = -comp; } + return comp; } @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } + public int getMemory(Value v) { + return v == null ? 0 : v.getMemory(); } @Override - public Object read(ByteBuffer buff) { - return readValue(buff); + public Value read(ByteBuffer buff) { + return readValue(buff, null); } @Override - public void write(WriteBuffer buff, Object obj) { - if (obj instanceof SpatialKey) { - buff.put((byte) SPATIAL_KEY_2D); - getSpatialDataType().write(buff, obj); - return; - } - Value x = (Value) obj; - writeValue(buff, x); - } - - private void writeValue(WriteBuffer buff, Value v) { + public void write(WriteBuffer buff, Value v) { if (v == ValueNull.INSTANCE) { buff.put((byte) 0); return; } - int type = v.getType(); + int type = v.getValueType(); switch (type) { case Value.BOOLEAN: - buff.put((byte) (v.getBoolean().booleanValue() ? - BOOLEAN_TRUE : BOOLEAN_FALSE)); + buff.put(v.getBoolean() ? BOOLEAN_TRUE : BOOLEAN_FALSE); break; - case Value.BYTE: - buff.put((byte) type).put(v.getByte()); + case Value.TINYINT: + buff.put(TINYINT).put(v.getByte()); break; - case Value.SHORT: - buff.put((byte) type).putShort(v.getShort()); + case Value.SMALLINT: + buff.put(SMALLINT).putShort(v.getShort()); break; - case Value.INT: { + case Value.ENUM: + case Value.INTEGER: { int x = v.getInt(); if (x < 0) { - buff.put((byte) INT_NEG).putVarInt(-x); + buff.put(INT_NEG).putVarInt(-x); } else if (x < 16) { buff.put((byte) (INT_0_15 + x)); } else { - buff.put((byte) type).putVarInt(x); + buff.put(type == Value.INTEGER ? INTEGER : ENUM).putVarInt(x); } break; } - case Value.LONG: { - long x = v.getLong(); - if (x < 0) { - buff.put((byte) LONG_NEG).putVarLong(-x); - } else if (x < 8) { - buff.put((byte) (LONG_0_7 + x)); - } else { - buff.put((byte) type).putVarLong(x); - } + case Value.BIGINT: + writeLong(buff, v.getLong()); break; - } - case Value.DECIMAL: { + case Value.NUMERIC: { BigDecimal x = v.getBigDecimal(); if (BigDecimal.ZERO.equals(x)) { - buff.put((byte) DECIMAL_0_1); + buff.put(NUMERIC_0_1); } else if (BigDecimal.ONE.equals(x)) { - buff.put((byte) (DECIMAL_0_1 + 1)); + buff.put((byte) (NUMERIC_0_1 + 1)); } else { int scale = x.scale(); BigInteger b = x.unscaledValue(); int bits = b.bitLength(); if (bits <= 63) { if (scale == 0) { - buff.put((byte) DECIMAL_SMALL_0). + buff.put(NUMERIC_SMALL_0). putVarLong(b.longValue()); } else { - buff.put((byte) DECIMAL_SMALL). + buff.put(NUMERIC_SMALL). putVarInt(scale). putVarLong(b.longValue()); } } else { byte[] bytes = b.toByteArray(); - buff.put((byte) type). + buff.put(NUMERIC). putVarInt(scale). putVarInt(bytes.length). put(bytes); @@ -257,76 +331,94 @@ private void writeValue(WriteBuffer buff, Value v) { } break; } - case Value.TIME: { - ValueTime t = (ValueTime) v; - long nanos = t.getNanos(); - long millis = nanos / 1000000; - nanos -= millis * 1000000; - buff.put((byte) type). - putVarLong(millis). - putVarLong(nanos); + case Value.DECFLOAT: { + ValueDecfloat d = (ValueDecfloat) v; + buff.put((byte) DECFLOAT); + if (d.isFinite()) { + BigDecimal x = d.getBigDecimal(); + byte[] bytes = x.unscaledValue().toByteArray(); + buff.putVarInt(x.scale()). + putVarInt(bytes.length). + put(bytes); + } else { + int c; + if (d == ValueDecfloat.NEGATIVE_INFINITY) { + c = -3; + } else if (d == ValueDecfloat.POSITIVE_INFINITY) { + c = -2; + } else { + c = -1; + } + buff.putVarInt(0).putVarInt(c); + } break; } - case Value.DATE: { - long x = ((ValueDate) v).getDateValue(); - buff.put((byte) type).putVarLong(x); + case Value.TIME: + writeTimestampTime(buff.put(TIME), ((ValueTime) v).getNanos()); + break; + case Value.TIME_TZ: { + ValueTimeTimeZone t = (ValueTimeTimeZone) v; + long nanosOfDay = t.getNanos(); + buff.put((byte) TIME_TZ). + putVarInt((int) (nanosOfDay / DateTimeUtils.NANOS_PER_SECOND)). + putVarInt((int) (nanosOfDay % DateTimeUtils.NANOS_PER_SECOND)); + writeTimeZone(buff, t.getTimeZoneOffsetSeconds()); break; } + case Value.DATE: + buff.put(DATE).putVarLong(((ValueDate) v).getDateValue()); + break; case Value.TIMESTAMP: { ValueTimestamp ts = (ValueTimestamp) v; - long dateValue = ts.getDateValue(); - long nanos = ts.getTimeNanos(); - long millis = nanos / 1000000; - nanos -= millis * 1000000; - buff.put((byte) type). - putVarLong(dateValue). - putVarLong(millis). - putVarLong(nanos); + buff.put(TIMESTAMP).putVarLong(ts.getDateValue()); + writeTimestampTime(buff, ts.getTimeNanos()); break; } - case Value.JAVA_OBJECT: { - byte[] b = v.getBytesNoCopy(); - buff.put((byte) type). - putVarInt(b.length). - put(b); + case Value.TIMESTAMP_TZ: { + ValueTimestampTimeZone ts = (ValueTimestampTimeZone) v; + buff.put((byte) TIMESTAMP_TZ).putVarLong(ts.getDateValue()); + writeTimestampTime(buff, ts.getTimeNanos()); + writeTimeZone(buff, ts.getTimeZoneOffsetSeconds()); break; } - case Value.BYTES: { + case Value.JAVA_OBJECT: + writeBinary(JAVA_OBJECT, buff, v); + break; + case Value.VARBINARY: { byte[] b = v.getBytesNoCopy(); int len = b.length; if (len < 32) { - buff.put((byte) (BYTES_0_31 + len)). - put(b); + buff.put((byte) (VARBINARY_0_31 + len)).put(b); } else { - buff.put((byte) type). - putVarInt(b.length). - put(b); + buff.put(VARBINARY).putVarInt(len).put(b); } break; } + case Value.BINARY: + writeBinary((byte) BINARY, buff, v); + break; case Value.UUID: { ValueUuid uuid = (ValueUuid) v; - buff.put((byte) type). + buff.put(UUID). putLong(uuid.getHigh()). putLong(uuid.getLow()); break; } - case Value.STRING: { + case Value.VARCHAR: { String s = v.getString(); int len = s.length(); if (len < 32) { - buff.put((byte) (STRING_0_31 + len)). - putStringData(s, len); + buff.put((byte) (VARCHAR_0_31 + len)).putStringData(s, len); } else { - buff.put((byte) type); - writeString(buff, s); + writeString(buff.put(VARCHAR), s); } break; } - case Value.STRING_IGNORECASE: - case Value.STRING_FIXED: - buff.put((byte) type); - writeString(buff, v.getString()); + case Value.VARCHAR_IGNORECASE: + writeString(buff.put(VARCHAR_IGNORECASE), v.getString()); + break; + case Value.CHAR: + writeString(buff.put(CHAR), v.getString()); break; case Value.DOUBLE: { double x = v.getDouble(); @@ -335,94 +427,138 @@ private void writeValue(WriteBuffer buff, Value v) { } else { long d = Double.doubleToLongBits(x); if (d == ValueDouble.ZERO_BITS) { - buff.put((byte) DOUBLE_0_1); + buff.put(DOUBLE_0_1); } else { - buff.put((byte) type). + buff.put(DOUBLE). putVarLong(Long.reverse(d)); } } break; } - case Value.FLOAT: { + case Value.REAL: { float x = v.getFloat(); if (x == 1.0f) { - buff.put((byte) (FLOAT_0_1 + 1)); + buff.put((byte) (REAL_0_1 + 1)); } else { int f = Float.floatToIntBits(x); - if (f == ValueFloat.ZERO_BITS) { - buff.put((byte) FLOAT_0_1); + if (f == ValueReal.ZERO_BITS) { + buff.put(REAL_0_1); } else { - buff.put((byte) type). + buff.put(REAL). putVarInt(Integer.reverse(f)); } } break; } - case Value.BLOB: - case Value.CLOB: { - buff.put((byte) type); - ValueLobDb lob = (ValueLobDb) v; - byte[] small = lob.getSmall(); - if (small == null) { + case Value.BLOB: { + buff.put(BLOB); + ValueBlob lob = (ValueBlob) v; + LobData lobData = lob.getLobData(); + if (lobData instanceof LobDataDatabase) { + LobDataDatabase lobDataDatabase = (LobDataDatabase) lobData; buff.putVarInt(-3). - putVarInt(lob.getTableId()). - putVarLong(lob.getLobId()). - putVarLong(lob.getPrecision()); + putVarInt(lobDataDatabase.getTableId()). + putVarLong(lobDataDatabase.getLobId()). + putVarLong(lob.octetLength()); } else { + byte[] small = ((LobDataInMemory) lobData).getSmall(); buff.putVarInt(small.length). put(small); } break; } - case Value.ARRAY: { - Value[] list = ((ValueArray) v).getList(); - buff.put((byte) type).putVarInt(list.length); + case Value.CLOB: { + buff.put(CLOB); + ValueClob lob = (ValueClob) v; + LobData lobData = lob.getLobData(); + if (lobData instanceof LobDataDatabase) { + LobDataDatabase lobDataDatabase = (LobDataDatabase) lobData; + buff.putVarInt(-3). + putVarInt(lobDataDatabase.getTableId()). + putVarLong(lobDataDatabase.getLobId()). + putVarLong(lob.octetLength()). + putVarLong(lob.charLength()); + } else { + byte[] small = ((LobDataInMemory) lobData).getSmall(); + buff.putVarInt(small.length). + put(small). + putVarLong(lob.charLength()); + } + break; + } + case Value.ARRAY: + case Value.ROW: { + Value[] list = ((ValueCollectionBase) v).getList(); + buff.put(type == Value.ARRAY ? ARRAY : ROW) + .putVarInt(list.length); for (Value x : list) { - writeValue(buff, x); + write(buff, x); } break; } - case Value.RESULT_SET: { - buff.put((byte) type); - try { - ResultSet rs = ((ValueResultSet) v).getResultSet(); - rs.beforeFirst(); - ResultSetMetaData meta = rs.getMetaData(); - int columnCount = meta.getColumnCount(); - buff.putVarInt(columnCount); - for (int i = 0; i < columnCount; i++) { - writeString(buff, meta.getColumnName(i + 1)); - buff.putVarInt(meta.getColumnType(i + 1)). - putVarInt(meta.getPrecision(i + 1)). - putVarInt(meta.getScale(i + 1)); - } - while (rs.next()) { - buff.put((byte) 1); - for (int i = 0; i < columnCount; i++) { - int t = org.h2.value.DataType. - getValueTypeFromResultSet(meta, i + 1); - Value val = org.h2.value.DataType.readValue( - null, rs, i + 1, t); - writeValue(buff, val); - } - } - buff.put((byte) 0); - rs.beforeFirst(); - } catch (SQLException e) { - throw DbException.convert(e); + case Value.GEOMETRY: + writeBinary(GEOMETRY, buff, v); + break; + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: { + ValueInterval interval = (ValueInterval) v; + int ordinal = type - Value.INTERVAL_YEAR; + if (interval.isNegative()) { + ordinal = ~ordinal; } + buff.put(INTERVAL). + put((byte) ordinal). + putVarLong(interval.getLeading()); break; } - case Value.GEOMETRY: { - byte[] b = v.getBytes(); - int len = b.length; - buff.put((byte) type). - putVarInt(len). - put(b); + case Value.INTERVAL_SECOND: + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: { + ValueInterval interval = (ValueInterval) v; + int ordinal = type - Value.INTERVAL_YEAR; + if (interval.isNegative()) { + ordinal = ~ordinal; + } + buff.put(INTERVAL). + put((byte) (ordinal)). + putVarLong(interval.getLeading()). + putVarLong(interval.getRemaining()); break; } + case Value.JSON: + writeBinary((byte) JSON, buff, v); + break; default: - DbException.throwInternalError("type=" + v.getType()); + throw DbException.getInternalError("type=" + v.getValueType()); + } + } + + private static void writeBinary(byte type, WriteBuffer buff, Value v) { + byte[] b = v.getBytesNoCopy(); + buff.put(type).putVarInt(b.length).put(b); + } + + /** + * Writes a long. + * + * @param buff the target buffer + * @param x the long value + */ + public static void writeLong(WriteBuffer buff, long x) { + if (x < 0) { + buff.put(BIGINT_NEG).putVarLong(-x); + } else if (x < 8) { + buff.put((byte) (BIGINT_0_7 + x)); + } else { + buff.put(BIGINT).putVarLong(x); } } @@ -431,192 +567,247 @@ private static void writeString(WriteBuffer buff, String s) { buff.putVarInt(len).putStringData(s, len); } + private static void writeTimestampTime(WriteBuffer buff, long nanos) { + long millis = nanos / 1_000_000L; + buff.putVarLong(millis).putVarInt((int) (nanos - millis * 1_000_000L)); + } + + private static void writeTimeZone(WriteBuffer buff, int timeZoneOffset) { + // Valid JSR-310 offsets are -64,800..64,800 + // Use 1 byte for common time zones (including +8:45 etc.) + if (timeZoneOffset % 900 == 0) { + // -72..72 + buff.put((byte) (timeZoneOffset / 900)); + } else if (timeZoneOffset > 0) { + buff.put(Byte.MAX_VALUE).putVarInt(timeZoneOffset); + } else { + buff.put(Byte.MIN_VALUE).putVarInt(-timeZoneOffset); + } + } + /** * Read a value. * + * @param buff the source buffer + * @param columnType the data type of value, or {@code null} * @return the value */ - private Object readValue(ByteBuffer buff) { + Value readValue(ByteBuffer buff, TypeInfo columnType) { int type = buff.get() & 255; switch (type) { - case Value.NULL: + case NULL: return ValueNull.INSTANCE; case BOOLEAN_TRUE: - return ValueBoolean.get(true); + return ValueBoolean.TRUE; case BOOLEAN_FALSE: - return ValueBoolean.get(false); + return ValueBoolean.FALSE; case INT_NEG: - return ValueInt.get(-readVarInt(buff)); - case Value.INT: - return ValueInt.get(readVarInt(buff)); - case LONG_NEG: - return ValueLong.get(-readVarLong(buff)); - case Value.LONG: - return ValueLong.get(readVarLong(buff)); - case Value.BYTE: - return ValueByte.get(buff.get()); - case Value.SHORT: - return ValueShort.get(buff.getShort()); - case DECIMAL_0_1: - return ValueDecimal.ZERO; - case DECIMAL_0_1 + 1: - return ValueDecimal.ONE; - case DECIMAL_SMALL_0: - return ValueDecimal.get(BigDecimal.valueOf( - readVarLong(buff))); - case DECIMAL_SMALL: { + return ValueInteger.get(-readVarInt(buff)); + case INTEGER: + return ValueInteger.get(readVarInt(buff)); + case BIGINT_NEG: + return ValueBigint.get(-readVarLong(buff)); + case BIGINT: + return ValueBigint.get(readVarLong(buff)); + case TINYINT: + return ValueTinyint.get(buff.get()); + case SMALLINT: + return ValueSmallint.get(buff.getShort()); + case NUMERIC_0_1: + return ValueNumeric.ZERO; + case NUMERIC_0_1 + 1: + return ValueNumeric.ONE; + case NUMERIC_SMALL_0: + return ValueNumeric.get(BigDecimal.valueOf(readVarLong(buff))); + case NUMERIC_SMALL: { int scale = readVarInt(buff); - return ValueDecimal.get(BigDecimal.valueOf( - readVarLong(buff), scale)); + return ValueNumeric.get(BigDecimal.valueOf(readVarLong(buff), scale)); } - case Value.DECIMAL: { + case NUMERIC: { int scale = readVarInt(buff); - int len = readVarInt(buff); - byte[] buff2 = DataUtils.newBytes(len); - buff.get(buff2, 0, len); - BigInteger b = new BigInteger(buff2); - return ValueDecimal.get(new BigDecimal(b, scale)); + return ValueNumeric.get(new BigDecimal(new BigInteger(readVarBytes(buff)), scale)); + } + case DECFLOAT: { + int scale = readVarInt(buff), len = readVarInt(buff); + switch (len) { + case -3: + return ValueDecfloat.NEGATIVE_INFINITY; + case -2: + return ValueDecfloat.POSITIVE_INFINITY; + case -1: + return ValueDecfloat.NAN; + default: + byte[] b = Utils.newBytes(len); + buff.get(b, 0, len); + return ValueDecfloat.get(new BigDecimal(new BigInteger(b), scale)); + } } - case Value.DATE: { + case DATE: return ValueDate.fromDateValue(readVarLong(buff)); + case TIME: + return ValueTime.fromNanos(readTimestampTime(buff)); + case TIME_TZ: + return ValueTimeTimeZone.fromNanos(readVarInt(buff) * DateTimeUtils.NANOS_PER_SECOND + readVarInt(buff), + readTimeZone(buff)); + case TIMESTAMP: + return ValueTimestamp.fromDateValueAndNanos(readVarLong(buff), readTimestampTime(buff)); + case TIMESTAMP_TZ_OLD: + return ValueTimestampTimeZone.fromDateValueAndNanos(readVarLong(buff), readTimestampTime(buff), + readVarInt(buff) * 60); + case TIMESTAMP_TZ: + return ValueTimestampTimeZone.fromDateValueAndNanos(readVarLong(buff), readTimestampTime(buff), + readTimeZone(buff)); + case VARBINARY: + return ValueVarbinary.getNoCopy(readVarBytes(buff)); + case BINARY: + return ValueBinary.getNoCopy(readVarBytes(buff)); + case JAVA_OBJECT: + return ValueJavaObject.getNoCopy(readVarBytes(buff)); + case UUID: + return ValueUuid.get(buff.getLong(), buff.getLong()); + case VARCHAR: + return ValueVarchar.get(readString(buff)); + case VARCHAR_IGNORECASE: + return ValueVarcharIgnoreCase.get(readString(buff)); + case CHAR: + return ValueChar.get(readString(buff)); + case ENUM: { + int ordinal = readVarInt(buff); + if (columnType != null) { + return ((ExtTypeInfoEnum) columnType.getExtTypeInfo()).getValue(ordinal, provider); + } + return ValueInteger.get(ordinal); } - case Value.TIME: { - long nanos = readVarLong(buff) * 1000000 + readVarLong(buff); - return ValueTime.fromNanos(nanos); - } - case Value.TIMESTAMP: { - long dateValue = readVarLong(buff); - long nanos = readVarLong(buff) * 1000000 + readVarLong(buff); - return ValueTimestamp.fromDateValueAndNanos(dateValue, nanos); - } - case Value.BYTES: { - int len = readVarInt(buff); - byte[] b = DataUtils.newBytes(len); - buff.get(b, 0, len); - return ValueBytes.getNoCopy(b); - } - case Value.JAVA_OBJECT: { - int len = readVarInt(buff); - byte[] b = DataUtils.newBytes(len); - buff.get(b, 0, len); - return ValueJavaObject.getNoCopy(null, b, handler); + case INTERVAL: { + int ordinal = buff.get(); + boolean negative = ordinal < 0; + if (negative) { + ordinal = ~ordinal; + } + return ValueInterval.from(IntervalQualifier.valueOf(ordinal), negative, readVarLong(buff), + ordinal < 5 ? 0 : readVarLong(buff)); } - case Value.UUID: - return ValueUuid.get(buff.getLong(), buff.getLong()); - case Value.STRING: - return ValueString.get(readString(buff)); - case Value.STRING_IGNORECASE: - return ValueStringIgnoreCase.get(readString(buff)); - case Value.STRING_FIXED: - return ValueStringFixed.get(readString(buff)); - case FLOAT_0_1: - return ValueFloat.get(0); - case FLOAT_0_1 + 1: - return ValueFloat.get(1); + case REAL_0_1: + return ValueReal.ZERO; + case REAL_0_1 + 1: + return ValueReal.ONE; case DOUBLE_0_1: - return ValueDouble.get(0); + return ValueDouble.ZERO; case DOUBLE_0_1 + 1: - return ValueDouble.get(1); - case Value.DOUBLE: - return ValueDouble.get(Double.longBitsToDouble( - Long.reverse(readVarLong(buff)))); - case Value.FLOAT: - return ValueFloat.get(Float.intBitsToFloat( - Integer.reverse(readVarInt(buff)))); - case Value.BLOB: - case Value.CLOB: { + return ValueDouble.ONE; + case DOUBLE: + return ValueDouble.get(Double.longBitsToDouble(Long.reverse(readVarLong(buff)))); + case REAL: + return ValueReal.get(Float.intBitsToFloat(Integer.reverse(readVarInt(buff)))); + case BLOB: { int smallLen = readVarInt(buff); if (smallLen >= 0) { - byte[] small = DataUtils.newBytes(smallLen); + byte[] small = Utils.newBytes(smallLen); buff.get(small, 0, smallLen); - return ValueLobDb.createSmallLob(type, small); + return ValueBlob.createSmall(small); } else if (smallLen == -3) { - int tableId = readVarInt(buff); - long lobId = readVarLong(buff); - long precision = readVarLong(buff); - ValueLobDb lob = ValueLobDb.create(type, - handler, tableId, lobId, null, precision); - return lob; + return new ValueBlob(readLobDataDatabase(buff), readVarLong(buff)); } else { - throw DbException.get(ErrorCode.FILE_CORRUPTED_1, - "lob type: " + smallLen); + throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "lob type: " + smallLen); } } - case Value.ARRAY: { - int len = readVarInt(buff); - Value[] list = new Value[len]; - for (int i = 0; i < len; i++) { - list[i] = (Value) readValue(buff); - } - return ValueArray.get(list); - } - case Value.RESULT_SET: { - SimpleResultSet rs = new SimpleResultSet(); - rs.setAutoClose(false); - int columns = readVarInt(buff); - for (int i = 0; i < columns; i++) { - rs.addColumn(readString(buff), - readVarInt(buff), - readVarInt(buff), - readVarInt(buff)); + case CLOB: { + int smallLen = readVarInt(buff); + if (smallLen >= 0) { + byte[] small = Utils.newBytes(smallLen); + buff.get(small, 0, smallLen); + return ValueClob.createSmall(small, readVarLong(buff)); + } else if (smallLen == -3) { + return new ValueClob(readLobDataDatabase(buff), readVarLong(buff), readVarLong(buff)); + } else { + throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "lob type: " + smallLen); } - while (true) { - if (buff.get() == 0) { - break; - } - Object[] o = new Object[columns]; - for (int i = 0; i < columns; i++) { - o[i] = ((Value) readValue(buff)).getObject(); - } - rs.addRow(o); + } + case ARRAY: { + if (columnType != null) { + TypeInfo elementType = (TypeInfo) columnType.getExtTypeInfo(); + return ValueArray.get(elementType, readArrayElements(buff, elementType), provider); } - return ValueResultSet.get(rs); + return ValueArray.get(readArrayElements(buff, null), provider); } - case Value.GEOMETRY: { + case ROW: { int len = readVarInt(buff); - byte[] b = DataUtils.newBytes(len); - buff.get(b, 0, len); - return ValueGeometry.get(b); + Value[] list = new Value[len]; + if (columnType != null) { + ExtTypeInfoRow extTypeInfoRow = (ExtTypeInfoRow) columnType.getExtTypeInfo(); + Iterator> fields = extTypeInfoRow.getFields().iterator(); + for (int i = 0; i < len; i++) { + list[i] = readValue(buff, fields.next().getValue()); + } + return ValueRow.get(columnType, list); + } + TypeInfo[] columnTypes = rowFactory.getColumnTypes(); + for (int i = 0; i < len; i++) { + list[i] = readValue(buff, columnTypes[i]); + } + return ValueRow.get(list); } - case SPATIAL_KEY_2D: - return getSpatialDataType().read(buff); + case GEOMETRY: + return ValueGeometry.get(readVarBytes(buff)); + case JSON: + return ValueJson.getInternal(readVarBytes(buff)); default: if (type >= INT_0_15 && type < INT_0_15 + 16) { - return ValueInt.get(type - INT_0_15); - } else if (type >= LONG_0_7 && type < LONG_0_7 + 8) { - return ValueLong.get(type - LONG_0_7); - } else if (type >= BYTES_0_31 && type < BYTES_0_31 + 32) { - int len = type - BYTES_0_31; - byte[] b = DataUtils.newBytes(len); + int i = type - INT_0_15; + if (columnType != null && columnType.getValueType() == Value.ENUM) { + return ((ExtTypeInfoEnum) columnType.getExtTypeInfo()).getValue(i, provider); + } + return ValueInteger.get(i); + } else if (type >= BIGINT_0_7 && type < BIGINT_0_7 + 8) { + return ValueBigint.get(type - BIGINT_0_7); + } else if (type >= VARBINARY_0_31 && type < VARBINARY_0_31 + 32) { + int len = type - VARBINARY_0_31; + byte[] b = Utils.newBytes(len); buff.get(b, 0, len); - return ValueBytes.getNoCopy(b); - } else if (type >= STRING_0_31 && type < STRING_0_31 + 32) { - return ValueString.get(readString(buff, type - STRING_0_31)); + return ValueVarbinary.getNoCopy(b); + } else if (type >= VARCHAR_0_31 && type < VARCHAR_0_31 + 32) { + return ValueVarchar.get(readString(buff, type - VARCHAR_0_31)); } throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "type: " + type); } } - private static int readVarInt(ByteBuffer buff) { - return DataUtils.readVarInt(buff); + private LobDataDatabase readLobDataDatabase(ByteBuffer buff) { + int tableId = readVarInt(buff); + long lobId = readVarLong(buff); + LobDataDatabase lobData = new LobDataDatabase(handler, tableId, lobId); + return lobData; } - private static long readVarLong(ByteBuffer buff) { - return DataUtils.readVarLong(buff); + private Value[] readArrayElements(ByteBuffer buff, TypeInfo elementType) { + int len = readVarInt(buff); + Value[] list = new Value[len]; + for (int i = 0; i < len; i++) { + list[i] = readValue(buff, elementType); + } + return list; } - private static String readString(ByteBuffer buff, int len) { - return DataUtils.readString(buff, len); + private static byte[] readVarBytes(ByteBuffer buff) { + int len = readVarInt(buff); + byte[] b = Utils.newBytes(len); + buff.get(b, 0, len); + return b; } - private static String readString(ByteBuffer buff) { - int len = readVarInt(buff); - return DataUtils.readString(buff, len); + private static long readTimestampTime(ByteBuffer buff) { + return readVarLong(buff) * 1_000_000L + readVarInt(buff); } - @Override - public int hashCode() { - return compareMode.hashCode() ^ Arrays.hashCode(sortTypes); + private static int readTimeZone(ByteBuffer buff) { + byte b = buff.get(); + if (b == Byte.MAX_VALUE) { + return readVarInt(buff); + } else if (b == Byte.MIN_VALUE) { + return -readVarInt(buff); + } else { + return b * 900; + } } @Override @@ -630,7 +821,77 @@ public boolean equals(Object obj) { if (!compareMode.equals(v.compareMode)) { return false; } - return Arrays.equals(sortTypes, v.sortTypes); + int[] indexes = rowFactory == null ? null : rowFactory.getIndexes(); + int[] indexes2 = v.rowFactory == null ? null : v.rowFactory.getIndexes(); + return Arrays.equals(sortTypes, v.sortTypes) + && Arrays.equals(indexes, indexes2); + } + + @Override + public int hashCode() { + int[] indexes = rowFactory == null ? null : rowFactory.getIndexes(); + return super.hashCode() ^ Arrays.hashCode(indexes) + ^ compareMode.hashCode() ^ Arrays.hashCode(sortTypes); + } + + @Override + public void save(WriteBuffer buff, MetaType metaType) { + writeIntArray(buff, sortTypes); + int columnCount = rowFactory == null ? 0 : rowFactory.getColumnCount(); + buff.putVarInt(columnCount); + int[] indexes = rowFactory == null ? null : rowFactory.getIndexes(); + writeIntArray(buff, indexes); + buff.put(rowFactory == null || rowFactory.getRowDataType().isStoreKeys() ? (byte) 1 : (byte) 0); + } + + private static void writeIntArray(WriteBuffer buff, int[] array) { + if(array == null) { + buff.putVarInt(0); + } else { + buff.putVarInt(array.length + 1); + for (int i : array) { + buff.putVarInt(i); + } + } + } + + @Override + public Factory getFactory() { + return FACTORY; + } + + private static final Factory FACTORY = new Factory(); + + public static final class Factory implements StatefulDataType.Factory { + + @Override + public DataType create(ByteBuffer buff, MetaType metaType, Database database) { + int[] sortTypes = readIntArray(buff); + int columnCount = DataUtils.readVarInt(buff); + int[] indexes = readIntArray(buff); + boolean storeKeys = buff.get() != 0; + CompareMode compareMode = database == null ? CompareMode.getInstance(null, 0) : database.getCompareMode(); + if (database == null) { + return new ValueDataType(); + } else if (sortTypes == null) { + return new ValueDataType(database, null); + } + RowFactory rowFactory = RowFactory.getDefaultRowFactory().createRowFactory(database, compareMode, database, + sortTypes, indexes, null, columnCount, storeKeys); + return rowFactory.getRowDataType(); + } + + private static int[] readIntArray(ByteBuffer buff) { + int len = DataUtils.readVarInt(buff) - 1; + if(len < 0) { + return null; + } + int[] res = new int[len]; + for (int i = 0; i < res.length; i++) { + res[i] = DataUtils.readVarInt(buff); + } + return res; + } } } diff --git a/h2/src/main/org/h2/mvstore/db/package.html b/h2/src/main/org/h2/mvstore/db/package.html index 9000ca0ab9..efa1e98076 100644 --- a/h2/src/main/org/h2/mvstore/db/package.html +++ b/h2/src/main/org/h2/mvstore/db/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mvstore/package.html b/h2/src/main/org/h2/mvstore/package.html index 65eabdc54b..9ebeb43f22 100644 --- a/h2/src/main/org/h2/mvstore/package.html +++ b/h2/src/main/org/h2/mvstore/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mvstore/rtree/DefaultSpatial.java b/h2/src/main/org/h2/mvstore/rtree/DefaultSpatial.java new file mode 100644 index 0000000000..e8b7a200f2 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/rtree/DefaultSpatial.java @@ -0,0 +1,75 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.rtree; + +import java.util.Arrays; + +/** + * Class BasicSpatialImpl. + * + * @author Andrei Tokar + */ +final class DefaultSpatial implements Spatial +{ + private final long id; + private final float[] minMax; + + /** + * Create a new key. + * + * @param id the id + * @param minMax min x, max x, min y, max y, and so on + */ + public DefaultSpatial(long id, float... minMax) { + this.id = id; + this.minMax = minMax; + } + + private DefaultSpatial(long id, DefaultSpatial other) { + this.id = id; + this.minMax = other.minMax.clone(); + } + + @Override + public float min(int dim) { + return minMax[dim + dim]; + } + + @Override + public void setMin(int dim, float x) { + minMax[dim + dim] = x; + } + + @Override + public float max(int dim) { + return minMax[dim + dim + 1]; + } + + @Override + public void setMax(int dim, float x) { + minMax[dim + dim + 1] = x; + } + + @Override + public Spatial clone(long id) { + return new DefaultSpatial(id, this); + } + + @Override + public long getId() { + return id; + } + + @Override + public boolean isNull() { + return minMax.length == 0; + } + + @Override + public boolean equalsIgnoringId(Spatial o) { + return Arrays.equals(minMax, ((DefaultSpatial)o).minMax); + } +} diff --git a/h2/src/main/org/h2/mvstore/rtree/MVRTreeMap.java b/h2/src/main/org/h2/mvstore/rtree/MVRTreeMap.java index 46b5e860bb..4b8a7a60c1 100644 --- a/h2/src/main/org/h2/mvstore/rtree/MVRTreeMap.java +++ b/h2/src/main/org/h2/mvstore/rtree/MVRTreeMap.java @@ -1,56 +1,51 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.rtree; import java.util.ArrayList; +import java.util.Collection; import java.util.Iterator; +import java.util.Map; import org.h2.mvstore.CursorPos; -import org.h2.mvstore.DataUtils; import org.h2.mvstore.MVMap; import org.h2.mvstore.Page; +import org.h2.mvstore.RootReference; import org.h2.mvstore.type.DataType; -import org.h2.mvstore.type.ObjectDataType; -import org.h2.util.New; /** - * An r-tree implementation. It uses the quadratic split algorithm. + * An r-tree implementation. It supports both the linear and the quadratic split + * algorithm. * * @param the value class */ -public class MVRTreeMap extends MVMap { +public final class MVRTreeMap extends MVMap { /** * The spatial key type. */ - final SpatialDataType keyType; + private final SpatialDataType keyType; private boolean quadraticSplit; - public MVRTreeMap(int dimensions, DataType valueType) { - super(new SpatialDataType(dimensions), valueType); - this.keyType = (SpatialDataType) getKeyType(); + public MVRTreeMap(Map config, SpatialDataType keyType, DataType valueType) { + super(config, keyType, valueType); + this.keyType = keyType; + quadraticSplit = Boolean.parseBoolean(String.valueOf(config.get("quadraticSplit"))); } - /** - * Create a new map with the given dimensions and value type. - * - * @param the value type - * @param dimensions the number of dimensions - * @param valueType the value type - * @return the map - */ - public static MVRTreeMap create(int dimensions, DataType valueType) { - return new MVRTreeMap(dimensions, valueType); + private MVRTreeMap(MVRTreeMap source) { + super(source); + this.keyType = source.keyType; + this.quadraticSplit = source.quadraticSplit; } @Override - @SuppressWarnings("unchecked") - public V get(Object key) { - return (V) get(root, key); + public MVRTreeMap cloneIt() { + return new MVRTreeMap<>(this); } /** @@ -59,14 +54,8 @@ public V get(Object key) { * @param x the rectangle * @return the iterator */ - public RTreeCursor findIntersectingKeys(SpatialKey x) { - return new RTreeCursor(root, x) { - @Override - protected boolean check(boolean leaf, SpatialKey key, - SpatialKey test) { - return keyType.isOverlap(key, test); - } - }; + public RTreeCursor findIntersectingKeys(Spatial x) { + return new IntersectsRTreeCursor<>(getRootPage(), x, keyType); } /** @@ -76,20 +65,11 @@ protected boolean check(boolean leaf, SpatialKey key, * @param x the rectangle * @return the iterator */ - public RTreeCursor findContainedKeys(SpatialKey x) { - return new RTreeCursor(root, x) { - @Override - protected boolean check(boolean leaf, SpatialKey key, - SpatialKey test) { - if (leaf) { - return keyType.isInside(key, test); - } - return keyType.isOverlap(key, test); - } - }; + public RTreeCursor findContainedKeys(Spatial x) { + return new ContainsRTreeCursor<>(getRootPage(), x, keyType); } - private boolean contains(Page p, int index, Object key) { + private boolean contains(Page p, int index, Object key) { return keyType.contains(p.getKey(index), key); } @@ -100,18 +80,20 @@ private boolean contains(Page p, int index, Object key) { * @param key the key * @return the value, or null if not found */ - protected Object get(Page p, Object key) { + @Override + public V get(Page p, Spatial key) { + int keyCount = p.getKeyCount(); if (!p.isLeaf()) { - for (int i = 0; i < p.getKeyCount(); i++) { + for (int i = 0; i < keyCount; i++) { if (contains(p, i, key)) { - Object o = get(p.getChildPage(i), key); + V o = get(p.getChildPage(i), key); if (o != null) { return o; } } } } else { - for (int i = 0; i < p.getKeyCount(); i++) { + for (int i = 0; i < keyCount; i++) { if (keyType.equals(p.getKey(i), key)) { return p.getValue(i); } @@ -120,154 +102,132 @@ protected Object get(Page p, Object key) { return null; } - @Override - protected synchronized Object remove(Page p, long writeVersion, Object key) { - Object result = null; - if (p.isLeaf()) { - for (int i = 0; i < p.getKeyCount(); i++) { - if (keyType.equals(p.getKey(i), key)) { - result = p.getValue(i); - p.remove(i); - break; - } - } - return result; - } - for (int i = 0; i < p.getKeyCount(); i++) { - if (contains(p, i, key)) { - Page cOld = p.getChildPage(i); - // this will mark the old page as deleted - // so we need to update the parent in any case - // (otherwise the old page might be deleted again) - Page c = cOld.copy(writeVersion); - long oldSize = c.getTotalCount(); - result = remove(c, writeVersion, key); - p.setChild(i, c); - if (oldSize == c.getTotalCount()) { - continue; - } - if (c.getTotalCount() == 0) { - // this child was deleted - p.remove(i); - if (p.getKeyCount() == 0) { - c.removePage(); - } - break; - } - Object oldBounds = p.getKey(i); - if (!keyType.isInside(key, oldBounds)) { - p.setKey(i, getBounds(c)); - } - break; - } - } - return result; - } - - private Object getBounds(Page x) { - Object bounds = keyType.createBoundingBox(x.getKey(0)); - for (int i = 1; i < x.getKeyCount(); i++) { - keyType.increaseBounds(bounds, x.getKey(i)); - } - return bounds; - } - - @Override - @SuppressWarnings("unchecked") - public V put(SpatialKey key, V value) { - return (V) putOrAdd(key, value, false); - } - /** - * Add a given key-value pair. The key should not exist (if it exists, the - * result is undefined). + * Remove a key-value pair, if the key exists. * - * @param key the key - * @param value the value + * @param key the key (may not be null) + * @return the old value if the key existed, or null otherwise */ - public void add(SpatialKey key, V value) { - putOrAdd(key, value, true); + @Override + public V remove(Object key) { + return operate((Spatial) key, null, DecisionMaker.REMOVE); } - private synchronized Object putOrAdd(SpatialKey key, V value, boolean alwaysAdd) { - beforeWrite(); - long v = writeVersion; - Page p = root.copy(v); - Object result; - if (alwaysAdd || get(key) == null) { - if (p.getMemory() > store.getPageSplitSize() && - p.getKeyCount() > 3) { + @Override + public V operate(Spatial key, V value, DecisionMaker decisionMaker) { + int attempt = 0; + final Collection> removedPages = isPersistent() ? new ArrayList<>() : null; + while(true) { + RootReference rootReference = flushAndGetRoot(); + if (attempt++ == 0 && !rootReference.isLockedByCurrentThread()) { + beforeWrite(); + } + Page p = rootReference.root; + if (removedPages != null && p.getTotalCount() > 0) { + removedPages.add(p); + } + p = p.copy(); + V result = operate(p, key, value, decisionMaker, removedPages); + if (!p.isLeaf() && p.getTotalCount() == 0) { + if (removedPages != null) { + removedPages.add(p); + } + p = createEmptyLeaf(); + } else if (p.getKeyCount() > store.getKeysPerPage() || p.getMemory() > store.getMaxPageSize() + && p.getKeyCount() > 3) { // only possible if this is the root, else we would have // split earlier (this requires pageSplitSize is fixed) long totalCount = p.getTotalCount(); - Page split = split(p, v); - Object k1 = getBounds(p); - Object k2 = getBounds(split); - Object[] keys = { k1, k2 }; - Page.PageReference[] children = { - new Page.PageReference(p, p.getPos(), p.getTotalCount()), - new Page.PageReference(split, split.getPos(), split.getTotalCount()), - new Page.PageReference(null, 0, 0) - }; - p = Page.create(this, v, - keys, null, - children, - totalCount, 0); - // now p is a node; continues + Page split = split(p); + Spatial k1 = getBounds(p); + Spatial k2 = getBounds(split); + Spatial[] keys = p.createKeyStorage(2); + keys[0] = k1; + keys[1] = k2; + Page.PageReference[] children = Page.createRefStorage(3); + children[0] = new Page.PageReference<>(p); + children[1] = new Page.PageReference<>(split); + children[2] = Page.PageReference.empty(); + p = Page.createNode(this, keys, children, totalCount, 0); + if(isPersistent()) { + store.registerUnsavedMemory(p.getMemory()); + } } - add(p, v, key, value); - result = null; - } else { - result = set(p, v, key, value); + + if (removedPages == null) { + if (updateRoot(rootReference, p, attempt)) { + return result; + } + } else { + RootReference lockedRootReference = tryLock(rootReference, attempt); + if (lockedRootReference != null) { + try { + long version = lockedRootReference.version; + int unsavedMemory = 0; + for (Page page : removedPages) { + if (!page.isRemoved()) { + unsavedMemory += page.removePage(version); + } + } + store.registerUnsavedMemory(unsavedMemory); + } finally { + unlockRoot(p); + } + return result; + } + removedPages.clear(); + } + decisionMaker.reset(); } - newRoot(p); - return result; } - /** - * Update the value for the given key. The key must exist. - * - * @param p the page - * @param writeVersion the write version - * @param key the key - * @param value the new value - * @return the old value (never null) - */ - private Object set(Page p, long writeVersion, Object key, Object value) { + private V operate(Page p, Spatial key, V value, DecisionMaker decisionMaker, + Collection> removedPages) { + V result; if (p.isLeaf()) { - for (int i = 0; i < p.getKeyCount(); i++) { + int index = -1; + int keyCount = p.getKeyCount(); + for (int i = 0; i < keyCount; i++) { if (keyType.equals(p.getKey(i), key)) { - return p.setValue(i, value); + index = i; } } - } else { - for (int i = 0; i < p.getKeyCount(); i++) { - if (contains(p, i, key)) { - Page c = p.getChildPage(i); - if (get(c, key) != null) { - c = c.copy(writeVersion); - Object result = set(c, writeVersion, key, value); - p.setChild(i, c); - return result; + result = index < 0 ? null : p.getValue(index); + Decision decision = decisionMaker.decide(result, value); + switch (decision) { + case REPEAT: + case ABORT: + break; + case REMOVE: + if(index >= 0) { + p.remove(index); } - } + break; + case PUT: + value = decisionMaker.selectValue(result, value); + if(index < 0) { + p.insertLeaf(p.getKeyCount(), key, value); + } else { + p.setKey(index, key); + p.setValue(index, value); + } + break; } + return result; } - throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, - "Not found: {0}", key); - } - private void add(Page p, long writeVersion, Object key, Object value) { - if (p.isLeaf()) { - p.insertLeaf(p.getKeyCount(), key, value); - return; - } - // p is a node + // p is an internal node int index = -1; for (int i = 0; i < p.getKeyCount(); i++) { if (contains(p, i, key)) { - index = i; - break; + Page c = p.getChildPage(i); + if(get(c, key) != null) { + index = i; + break; + } + if(index < 0) { + index = i; + } } } if (index < 0) { @@ -282,41 +242,80 @@ private void add(Page p, long writeVersion, Object key, Object value) { } } } - Page c = p.getChildPage(index).copy(writeVersion); - if (c.getMemory() > store.getPageSplitSize() && c.getKeyCount() > 4) { + Page c = p.getChildPage(index); + if (removedPages != null) { + removedPages.add(c); + } + c = c.copy(); + if (c.getKeyCount() > store.getKeysPerPage() || c.getMemory() > store.getMaxPageSize() + && c.getKeyCount() > 4) { // split on the way down - Page split = split(c, writeVersion); + Page split = split(c); p.setKey(index, getBounds(c)); p.setChild(index, c); p.insertNode(index, getBounds(split), split); // now we are not sure where to add - add(p, writeVersion, key, value); - return; - } - add(c, writeVersion, key, value); - Object bounds = p.getKey(index); - keyType.increaseBounds(bounds, key); - p.setKey(index, bounds); - p.setChild(index, c); + result = operate(p, key, value, decisionMaker, removedPages); + } else { + result = operate(c, key, value, decisionMaker, removedPages); + Spatial bounds = p.getKey(index); + if (!keyType.contains(bounds, key)) { + bounds = keyType.createBoundingBox(bounds); + keyType.increaseBounds(bounds, key); + p.setKey(index, bounds); + } + if (c.getTotalCount() > 0) { + p.setChild(index, c); + } else { + p.remove(index); + } + } + return result; + } + + private Spatial getBounds(Page x) { + Spatial bounds = keyType.createBoundingBox(x.getKey(0)); + int keyCount = x.getKeyCount(); + for (int i = 1; i < keyCount; i++) { + keyType.increaseBounds(bounds, x.getKey(i)); + } + return bounds; + } + + @Override + public V put(Spatial key, V value) { + return operate(key, value, DecisionMaker.PUT); } - private Page split(Page p, long writeVersion) { + /** + * Add a given key-value pair. The key should not exist (if it exists, the + * result is undefined). + * + * @param key the key + * @param value the value + */ + public void add(Spatial key, V value) { + operate(key, value, DecisionMaker.PUT); + } + + private Page split(Page p) { return quadraticSplit ? - splitQuadratic(p, writeVersion) : - splitLinear(p, writeVersion); + splitQuadratic(p) : + splitLinear(p); } - private Page splitLinear(Page p, long writeVersion) { - ArrayList keys = New.arrayList(); - for (int i = 0; i < p.getKeyCount(); i++) { + private Page splitLinear(Page p) { + int keyCount = p.getKeyCount(); + ArrayList keys = new ArrayList<>(keyCount); + for (int i = 0; i < keyCount; i++) { keys.add(p.getKey(i)); } int[] extremes = keyType.getExtremes(keys); if (extremes == null) { - return splitQuadratic(p, writeVersion); + return splitQuadratic(p); } - Page splitA = newPage(p.isLeaf(), writeVersion); - Page splitB = newPage(p.isLeaf(), writeVersion); + Page splitA = newPage(p.isLeaf()); + Page splitB = newPage(p.isLeaf()); move(p, splitA, extremes[0]); if (extremes[1] > extremes[0]) { extremes[1]--; @@ -342,14 +341,15 @@ private Page splitLinear(Page p, long writeVersion) { return splitA; } - private Page splitQuadratic(Page p, long writeVersion) { - Page splitA = newPage(p.isLeaf(), writeVersion); - Page splitB = newPage(p.isLeaf(), writeVersion); + private Page splitQuadratic(Page p) { + Page splitA = newPage(p.isLeaf()); + Page splitB = newPage(p.isLeaf()); float largest = Float.MIN_VALUE; int ia = 0, ib = 0; - for (int a = 0; a < p.getKeyCount(); a++) { + int keyCount = p.getKeyCount(); + for (int a = 0; a < keyCount; a++) { Object objA = p.getKey(a); - for (int b = 0; b < p.getKeyCount(); b++) { + for (int b = 0; b < keyCount; b++) { if (a == b) { continue; } @@ -372,7 +372,8 @@ private Page splitQuadratic(Page p, long writeVersion) { while (p.getKeyCount() > 0) { float diff = 0, bestA = 0, bestB = 0; int best = 0; - for (int i = 0; i < p.getKeyCount(); i++) { + keyCount = p.getKeyCount(); + for (int i = 0; i < keyCount; i++) { Object o = p.getKey(i); float incA = keyType.getAreaIncrease(boundsA, o); float incB = keyType.getAreaIncrease(boundsB, o); @@ -398,29 +399,21 @@ private Page splitQuadratic(Page p, long writeVersion) { return splitA; } - private Page newPage(boolean leaf, long writeVersion) { - Object[] values; - Page.PageReference[] refs; - if (leaf) { - values = Page.EMPTY_OBJECT_ARRAY; - refs = null; - } else { - values = null; - refs = new Page.PageReference[] { - new Page.PageReference(null, 0, 0)}; + private Page newPage(boolean leaf) { + Page page = leaf ? createEmptyLeaf() : createEmptyNode(); + if(isPersistent()) { + store.registerUnsavedMemory(page.getMemory()); } - return Page.create(this, writeVersion, - Page.EMPTY_OBJECT_ARRAY, values, - refs, 0, 0); + return page; } - private static void move(Page source, Page target, int sourceIndex) { - Object k = source.getKey(sourceIndex); + private static void move(Page source, Page target, int sourceIndex) { + Spatial k = source.getKey(sourceIndex); if (source.isLeaf()) { - Object v = source.getValue(sourceIndex); + V v = source.getValue(sourceIndex); target.insertLeaf(0, k, v); } else { - Page c = source.getChildPage(sourceIndex); + Page c = source.getChildPage(sourceIndex); target.insertNode(0, k, c); } source.remove(sourceIndex); @@ -433,15 +426,17 @@ private static void move(Page source, Page target, int sourceIndex) { * @param list the list * @param p the root page */ - public void addNodeKeys(ArrayList list, Page p) { + public void addNodeKeys(ArrayList list, Page p) { if (p != null && !p.isLeaf()) { - for (int i = 0; i < p.getKeyCount(); i++) { - list.add((SpatialKey) p.getKey(i)); + int keyCount = p.getKeyCount(); + for (int i = 0; i < keyCount; i++) { + list.add(p.getKey(i)); addNodeKeys(list, p.getChildPage(i)); } } } + @SuppressWarnings("unused") public boolean isQuadraticSplit() { return quadraticSplit; } @@ -451,22 +446,22 @@ public void setQuadraticSplit(boolean quadraticSplit) { } @Override - protected int getChildPageCount(Page p) { + protected int getChildPageCount(Page p) { return p.getRawChildPageCount() - 1; } /** * A cursor to iterate over a subset of the keys. */ - public static class RTreeCursor implements Iterator { + public abstract static class RTreeCursor implements Iterator { - private final SpatialKey filter; - private CursorPos pos; - private SpatialKey current; - private final Page root; + private final Spatial filter; + private CursorPos pos; + private Spatial current; + private final Page root; private boolean initialized; - protected RTreeCursor(Page root, SpatialKey filter) { + protected RTreeCursor(Page root, Spatial filter) { this.root = root; this.filter = filter; } @@ -475,7 +470,7 @@ protected RTreeCursor(Page root, SpatialKey filter) { public boolean hasNext() { if (!initialized) { // init - pos = new CursorPos(root, 0, null); + pos = new CursorPos<>(root, 0, null); fetchNext(); initialized = true; } @@ -495,30 +490,24 @@ public void skip(long n) { } @Override - public SpatialKey next() { + public Spatial next() { if (!hasNext()) { return null; } - SpatialKey c = current; + Spatial c = current; fetchNext(); return c; } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException( - "Removing is not supported"); - } - /** * Fetch the next entry if there is one. */ - protected void fetchNext() { + void fetchNext() { while (pos != null) { - Page p = pos.page; + Page p = pos.page; if (p.isLeaf()) { while (pos.index < p.getKeyCount()) { - SpatialKey c = (SpatialKey) p.getKey(pos.index++); + Spatial c = p.getKey(pos.index++); if (filter == null || check(true, c, filter)) { current = c; return; @@ -528,10 +517,10 @@ protected void fetchNext() { boolean found = false; while (pos.index < p.getKeyCount()) { int index = pos.index++; - SpatialKey c = (SpatialKey) p.getKey(index); + Spatial c = p.getKey(index); if (filter == null || check(false, c, filter)) { - Page child = pos.page.getChildPage(index); - pos = new CursorPos(child, 0, pos); + Page child = pos.page.getChildPage(index); + pos = new CursorPos<>(child, 0, pos); found = true; break; } @@ -554,10 +543,38 @@ protected void fetchNext() { * @param test the user-supplied test key * @return true if there is a match */ - protected boolean check(boolean leaf, SpatialKey key, SpatialKey test) { - return true; + protected abstract boolean check(boolean leaf, Spatial key, Spatial test); + } + + private static final class IntersectsRTreeCursor extends RTreeCursor { + private final SpatialDataType keyType; + + public IntersectsRTreeCursor(Page root, Spatial filter, SpatialDataType keyType) { + super(root, filter); + this.keyType = keyType; + } + + @Override + protected boolean check(boolean leaf, Spatial key, + Spatial test) { + return keyType.isOverlap(key, test); + } + } + + private static final class ContainsRTreeCursor extends RTreeCursor { + private final SpatialDataType keyType; + + public ContainsRTreeCursor(Page root, Spatial filter, SpatialDataType keyType) { + super(root, filter); + this.keyType = keyType; } + @Override + protected boolean check(boolean leaf, Spatial key, Spatial test) { + return leaf ? + keyType.isInside(key, test) : + keyType.isOverlap(key, test); + } } @Override @@ -570,17 +587,15 @@ public String getType() { * * @param the value type */ - public static class Builder implements - MVMap.MapBuilder, SpatialKey, V> { + public static class Builder extends MVMap.BasicBuilder, Spatial, V> { private int dimensions = 2; - private DataType valueType; /** * Create a new builder for maps with 2 dimensions. */ public Builder() { - // default + setKeyType(new SpatialDataType(dimensions)); } /** @@ -591,6 +606,7 @@ public Builder() { */ public Builder dimensions(int dimensions) { this.dimensions = dimensions; + setKeyType(new SpatialDataType(dimensions)); return this; } @@ -600,19 +616,15 @@ public Builder dimensions(int dimensions) { * @param valueType the key type * @return this */ - public Builder valueType(DataType valueType) { - this.valueType = valueType; + @Override + public Builder valueType(DataType valueType) { + setValueType(valueType); return this; } @Override - public MVRTreeMap create() { - if (valueType == null) { - valueType = new ObjectDataType(); - } - return new MVRTreeMap(dimensions, valueType); + public MVRTreeMap create(Map config) { + return new MVRTreeMap<>(config, (SpatialDataType)getKeyType(), getValueType()); } - } - } diff --git a/h2/src/main/org/h2/mvstore/rtree/Spatial.java b/h2/src/main/org/h2/mvstore/rtree/Spatial.java new file mode 100644 index 0000000000..1b9682d354 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/rtree/Spatial.java @@ -0,0 +1,76 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.rtree; + +/** + * Interface Spatial represents boxes in 2+ dimensional space, + * where total ordering is not that straight-forward. + * They can be used as keys for MVRTree. + * + * @author Andrei Tokar + */ +public interface Spatial +{ + /** + * Get the minimum value for the given dimension. + * + * @param dim the dimension + * @return the value + */ + float min(int dim); + + /** + * Set the minimum value for the given dimension. + * + * @param dim the dimension + * @param x the value + */ + void setMin(int dim, float x); + + /** + * Get the maximum value for the given dimension. + * + * @param dim the dimension + * @return the value + */ + float max(int dim); + + /** + * Set the maximum value for the given dimension. + * + * @param dim the dimension + * @param x the value + */ + void setMax(int dim, float x); + + /** + * Creates a copy of this Spatial object with different id. + * + * @param id for the new Spatial object + * @return a clone + */ + Spatial clone(long id); + + /** + * Get id of this Spatial object + * @return id + */ + long getId(); + + /** + * Test whether this object has no value + * @return true if it is NULL, false otherwise + */ + boolean isNull(); + + /** + * Check whether two objects are equals, but do not compare the id fields. + * + * @param o the other key + * @return true if the contents are the same + */ + boolean equalsIgnoringId(Spatial o); +} diff --git a/h2/src/main/org/h2/mvstore/rtree/SpatialDataType.java b/h2/src/main/org/h2/mvstore/rtree/SpatialDataType.java index 340e305cad..6af8a5887e 100644 --- a/h2/src/main/org/h2/mvstore/rtree/SpatialDataType.java +++ b/h2/src/main/org/h2/mvstore/rtree/SpatialDataType.java @@ -1,22 +1,23 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.rtree; import java.nio.ByteBuffer; import java.util.ArrayList; + import org.h2.mvstore.DataUtils; import org.h2.mvstore.WriteBuffer; -import org.h2.mvstore.type.DataType; +import org.h2.mvstore.type.BasicDataType; /** * A spatial data type. This class supports up to 31 dimensions. Each dimension * can have a minimum and a maximum value of type float. For each dimension, the * maximum value is only stored when it is not the same as the minimum. */ -public class SpatialDataType implements DataType { +public class SpatialDataType extends BasicDataType { private final int dimensions; @@ -30,8 +31,24 @@ public SpatialDataType(int dimensions) { this.dimensions = dimensions; } + /** + * Creates spatial object with specified parameters. + * + * @param id the ID + * @param minMax min x, max x, min y, max y, and so on + * @return the spatial object + */ + protected Spatial create(long id, float... minMax) { + return new DefaultSpatial(id, minMax); + } + @Override - public int compare(Object a, Object b) { + public Spatial[] createStorage(int size) { + return new Spatial[size]; + } + + @Override + public int compare(Spatial a, Spatial b) { if (a == b) { return 0; } else if (a == null) { @@ -39,9 +56,9 @@ public int compare(Object a, Object b) { } else if (b == null) { return 1; } - long la = ((SpatialKey) a).getId(); - long lb = ((SpatialKey) b).getId(); - return la < lb ? -1 : la > lb ? 1 : 0; + long la = a.getId(); + long lb = b.getId(); + return Long.compare(la, lb); } /** @@ -57,37 +74,23 @@ public boolean equals(Object a, Object b) { } else if (a == null || b == null) { return false; } - long la = ((SpatialKey) a).getId(); - long lb = ((SpatialKey) b).getId(); + long la = ((Spatial) a).getId(); + long lb = ((Spatial) b).getId(); return la == lb; } @Override - public int getMemory(Object obj) { + public int getMemory(Spatial obj) { return 40 + dimensions * 4; } @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); - } - } - - @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } - } - - @Override - public void write(WriteBuffer buff, Object obj) { - if (obj == null) { + public void write(WriteBuffer buff, Spatial k) { + if (k.isNull()) { buff.putVarInt(-1); + buff.putVarLong(k.getId()); return; } - SpatialKey k = (SpatialKey) obj; int flags = 0; for (int i = 0; i < dimensions; i++) { if (k.min(i) == k.max(i)) { @@ -105,10 +108,11 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff) { + public Spatial read(ByteBuffer buff) { int flags = DataUtils.readVarInt(buff); if (flags == -1) { - return null; + long id = DataUtils.readVarLong(buff); + return create(id); } float[] minMax = new float[dimensions * 2]; for (int i = 0; i < dimensions; i++) { @@ -123,19 +127,20 @@ public Object read(ByteBuffer buff) { minMax[i + i + 1] = max; } long id = DataUtils.readVarLong(buff); - return new SpatialKey(id, minMax); + return create(id, minMax); } /** * Check whether the two objects overlap. * - * @param objA the first object - * @param objB the second object + * @param a the first object + * @param b the second object * @return true if they overlap */ - public boolean isOverlap(Object objA, Object objB) { - SpatialKey a = (SpatialKey) objA; - SpatialKey b = (SpatialKey) objB; + public boolean isOverlap(Spatial a, Spatial b) { + if (a.isNull() || b.isNull()) { + return false; + } for (int i = 0; i < dimensions; i++) { if (a.max(i) < b.min(i) || a.min(i) > b.max(i)) { return false; @@ -151,11 +156,20 @@ public boolean isOverlap(Object objA, Object objB) { * @param add the value */ public void increaseBounds(Object bounds, Object add) { - SpatialKey b = (SpatialKey) bounds; - SpatialKey a = (SpatialKey) add; + Spatial a = (Spatial) add; + Spatial b = (Spatial) bounds; + if (a.isNull() || b.isNull()) { + return; + } for (int i = 0; i < dimensions; i++) { - b.setMin(i, Math.min(b.min(i), a.min(i))); - b.setMax(i, Math.max(b.max(i), a.max(i))); + float v = a.min(i); + if (v < b.min(i)) { + b.setMin(i, v); + } + v = a.max(i); + if (v > b.max(i)) { + b.setMax(i, v); + } } } @@ -167,8 +181,11 @@ public void increaseBounds(Object bounds, Object add) { * @return the area */ public float getAreaIncrease(Object objA, Object objB) { - SpatialKey a = (SpatialKey) objA; - SpatialKey b = (SpatialKey) objB; + Spatial b = (Spatial) objB; + Spatial a = (Spatial) objA; + if (a.isNull() || b.isNull()) { + return 0; + } float min = a.min(0); float max = a.max(0); float areaOld = max - min; @@ -194,8 +211,13 @@ public float getAreaIncrease(Object objA, Object objB) { * @return the area */ float getCombinedArea(Object objA, Object objB) { - SpatialKey a = (SpatialKey) objA; - SpatialKey b = (SpatialKey) objB; + Spatial a = (Spatial) objA; + Spatial b = (Spatial) objB; + if (a.isNull()) { + return getArea(b); + } else if (b.isNull()) { + return getArea(a); + } float area = 1; for (int i = 0; i < dimensions; i++) { float min = Math.min(a.min(i), b.min(i)); @@ -205,6 +227,17 @@ float getCombinedArea(Object objA, Object objB) { return area; } + private float getArea(Spatial a) { + if (a.isNull()) { + return 0; + } + float area = 1; + for (int i = 0; i < dimensions; i++) { + area *= a.max(i) - a.min(i); + } + return area; + } + /** * Check whether a contains b. * @@ -213,8 +246,11 @@ float getCombinedArea(Object objA, Object objB) { * @return the area */ public boolean contains(Object objA, Object objB) { - SpatialKey a = (SpatialKey) objA; - SpatialKey b = (SpatialKey) objB; + Spatial a = (Spatial) objA; + Spatial b = (Spatial) objB; + if (a.isNull() || b.isNull()) { + return false; + } for (int i = 0; i < dimensions; i++) { if (a.min(i) > b.min(i) || a.max(i) < b.max(i)) { return false; @@ -232,8 +268,11 @@ public boolean contains(Object objA, Object objB) { * @return true if a is completely inside b */ public boolean isInside(Object objA, Object objB) { - SpatialKey a = (SpatialKey) objA; - SpatialKey b = (SpatialKey) objB; + Spatial a = (Spatial) objA; + Spatial b = (Spatial) objB; + if (a.isNull() || b.isNull()) { + return false; + } for (int i = 0; i < dimensions; i++) { if (a.min(i) <= b.min(i) || a.max(i) >= b.max(i)) { return false; @@ -248,14 +287,12 @@ public boolean isInside(Object objA, Object objB) { * @param objA the object * @return the bounding box */ - Object createBoundingBox(Object objA) { - float[] minMax = new float[dimensions * 2]; - SpatialKey a = (SpatialKey) objA; - for (int i = 0; i < dimensions; i++) { - minMax[i + i] = a.min(i); - minMax[i + i + 1] = a.max(i); + Spatial createBoundingBox(Object objA) { + Spatial a = (Spatial) objA; + if (a.isNull()) { + return a; } - return new SpatialKey(0, minMax); + return a.clone(0); } /** @@ -267,15 +304,18 @@ Object createBoundingBox(Object objA) { * @return the indexes of the extremes */ public int[] getExtremes(ArrayList list) { - SpatialKey bounds = (SpatialKey) createBoundingBox(list.get(0)); - SpatialKey boundsInner = (SpatialKey) createBoundingBox(bounds); + list = getNotNull(list); + if (list.isEmpty()) { + return null; + } + Spatial bounds = createBoundingBox(list.get(0)); + Spatial boundsInner = createBoundingBox(bounds); for (int i = 0; i < dimensions; i++) { float t = boundsInner.min(i); boundsInner.setMin(i, boundsInner.max(i)); boundsInner.setMax(i, t); } - for (int i = 0; i < list.size(); i++) { - Object o = list.get(i); + for (Object o : list) { increaseBounds(bounds, o); increaseMaxInnerBounds(boundsInner, o); } @@ -301,7 +341,7 @@ public int[] getExtremes(ArrayList list) { int firstIndex = -1, lastIndex = -1; for (int i = 0; i < list.size() && (firstIndex < 0 || lastIndex < 0); i++) { - SpatialKey o = (SpatialKey) list.get(i); + Spatial o = (Spatial) list.get(i); if (firstIndex < 0 && o.max(bestDim) == min) { firstIndex = i; } else if (lastIndex < 0 && o.min(bestDim) == max) { @@ -311,9 +351,31 @@ public int[] getExtremes(ArrayList list) { return new int[] { firstIndex, lastIndex }; } + private static ArrayList getNotNull(ArrayList list) { + boolean foundNull = false; + for (Object o : list) { + Spatial a = (Spatial) o; + if (a.isNull()) { + foundNull = true; + break; + } + } + if (!foundNull) { + return list; + } + ArrayList result = new ArrayList<>(); + for (Object o : list) { + Spatial a = (Spatial) o; + if (!a.isNull()) { + result.add(a); + } + } + return result; + } + private void increaseMaxInnerBounds(Object bounds, Object add) { - SpatialKey b = (SpatialKey) bounds; - SpatialKey a = (SpatialKey) add; + Spatial b = (Spatial) bounds; + Spatial a = (Spatial) add; for (int i = 0; i < dimensions; i++) { b.setMin(i, Math.min(b.min(i), a.max(i))); b.setMax(i, Math.max(b.max(i), a.min(i))); diff --git a/h2/src/main/org/h2/mvstore/rtree/SpatialKey.java b/h2/src/main/org/h2/mvstore/rtree/SpatialKey.java deleted file mode 100644 index ac05524cd7..0000000000 --- a/h2/src/main/org/h2/mvstore/rtree/SpatialKey.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.mvstore.rtree; - -import java.util.Arrays; - -/** - * A unique spatial key. - */ -public class SpatialKey { - - private final long id; - private final float[] minMax; - - /** - * Create a new key. - * - * @param id the id - * @param minMax min x, max x, min y, max y, and so on - */ - public SpatialKey(long id, float... minMax) { - this.id = id; - this.minMax = minMax; - } - - /** - * Get the minimum value for the given dimension. - * - * @param dim the dimension - * @return the value - */ - public float min(int dim) { - return minMax[dim + dim]; - } - - /** - * Set the minimum value for the given dimension. - * - * @param dim the dimension - * @param x the value - */ - public void setMin(int dim, float x) { - minMax[dim + dim] = x; - } - - /** - * Get the maximum value for the given dimension. - * - * @param dim the dimension - * @return the value - */ - public float max(int dim) { - return minMax[dim + dim + 1]; - } - - /** - * Set the maximum value for the given dimension. - * - * @param dim the dimension - * @param x the value - */ - public void setMax(int dim, float x) { - minMax[dim + dim + 1] = x; - } - - public long getId() { - return id; - } - - @Override - public String toString() { - StringBuilder buff = new StringBuilder(); - buff.append(id).append(": ("); - for (int i = 0; i < minMax.length; i += 2) { - if (i > 0) { - buff.append(", "); - } - buff.append(minMax[i]).append('/').append(minMax[i + 1]); - } - return buff.append(")").toString(); - } - - @Override - public int hashCode() { - return (int) ((id >>> 32) ^ id); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (!(other instanceof SpatialKey)) { - return false; - } - SpatialKey o = (SpatialKey) other; - if (id != o.id) { - return false; - } - return equalsIgnoringId(o); - } - - /** - * Check whether two objects are equals, but do not compare the id fields. - * - * @param o the other key - * @return true if the contents are the same - */ - public boolean equalsIgnoringId(SpatialKey o) { - return Arrays.equals(minMax, o.minMax); - } - -} diff --git a/h2/src/main/org/h2/mvstore/rtree/package.html b/h2/src/main/org/h2/mvstore/rtree/package.html index 997e6112e4..240224c617 100644 --- a/h2/src/main/org/h2/mvstore/rtree/package.html +++ b/h2/src/main/org/h2/mvstore/rtree/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/mvstore/tx/CommitDecisionMaker.java b/h2/src/main/org/h2/mvstore/tx/CommitDecisionMaker.java new file mode 100644 index 0000000000..f3867b3b86 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/CommitDecisionMaker.java @@ -0,0 +1,65 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import org.h2.mvstore.MVMap; +import org.h2.value.VersionedValue; + +/** + * Class CommitDecisionMaker makes a decision during post-commit processing + * about how to transform uncommitted map entry into committed one, + * based on undo log information. + * + * @author Andrei Tokar + */ +final class CommitDecisionMaker extends MVMap.DecisionMaker> { + private long undoKey; + private MVMap.Decision decision; + + void setUndoKey(long undoKey) { + this.undoKey = undoKey; + reset(); + } + + @Override + public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { + assert decision == null; + if (existingValue == null || + // map entry was treated as already committed, and then + // it has been removed by another transaction (committed and closed by now) + existingValue.getOperationId() != undoKey) { + // this is not a final undo log entry for this key, + // or map entry was treated as already committed and then + // overwritten by another transaction + // see TxDecisionMaker.decide() + + decision = MVMap.Decision.ABORT; + } else /* this is final undo log entry for this key */ if (existingValue.getCurrentValue() == null) { + decision = MVMap.Decision.REMOVE; + } else { + decision = MVMap.Decision.PUT; + } + return decision; + } + + @SuppressWarnings("unchecked") + @Override + public > T selectValue(T existingValue, T providedValue) { + assert decision == MVMap.Decision.PUT; + assert existingValue != null; + return (T) VersionedValueCommitted.getInstance(existingValue.getCurrentValue()); + } + + @Override + public void reset() { + decision = null; + } + + @Override + public String toString() { + return "commit " + TransactionStore.getTransactionId(undoKey); + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/Record.java b/h2/src/main/org/h2/mvstore/tx/Record.java new file mode 100644 index 0000000000..4da15fdb44 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/Record.java @@ -0,0 +1,118 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import java.nio.ByteBuffer; +import org.h2.engine.Constants; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.BasicDataType; +import org.h2.value.VersionedValue; + +/** + * Class Record is a value for undoLog. + * It contains information about a single change of some map. + * + * @author Andrei Tokar + */ +final class Record { + + // -1 is a bogus map id + static final Record COMMIT_MARKER = new Record<>(-1, null, null); + + /** + * Map id for this change is related to + */ + final int mapId; + + /** + * Key of the changed map entry key + */ + final K key; + + /** + * Value of the entry before change. + * It is null if entry did not exist before the change (addition). + */ + final VersionedValue oldValue; + + Record(int mapId, K key, VersionedValue oldValue) { + this.mapId = mapId; + this.key = key; + this.oldValue = oldValue; + } + + @Override + public String toString() { + return "mapId=" + mapId + ", key=" + key + ", value=" + oldValue; + } + + /** + * A data type for undo log values + */ + static final class Type extends BasicDataType> { + private final TransactionStore transactionStore; + + Type(TransactionStore transactionStore) { + this.transactionStore = transactionStore; + } + + @Override + public int getMemory(Record record) { + int result = Constants.MEMORY_OBJECT + 4 + 3 * Constants.MEMORY_POINTER; + if (record.mapId >= 0) { + MVMap> map = transactionStore.getMap(record.mapId); + result += map.getKeyType().getMemory(record.key) + + map.getValueType().getMemory(record.oldValue); + } + return result; + } + + @Override + public int compare(Record aObj, Record bObj) { + throw new UnsupportedOperationException(); + } + + @Override + public void write(WriteBuffer buff, Record record) { + buff.putVarInt(record.mapId); + if (record.mapId >= 0) { + MVMap> map = transactionStore.getMap(record.mapId); + map.getKeyType().write(buff, record.key); + VersionedValue oldValue = record.oldValue; + if (oldValue == null) { + buff.put((byte) 0); + } else { + buff.put((byte) 1); + map.getValueType().write(buff, oldValue); + } + } + } + + @SuppressWarnings("unchecked") + @Override + public Record read(ByteBuffer buff) { + int mapId = DataUtils.readVarInt(buff); + if (mapId < 0) { + return (Record)COMMIT_MARKER; + } + MVMap> map = transactionStore.getMap(mapId); + K key = map.getKeyType().read(buff); + VersionedValue oldValue = null; + if (buff.get() == 1) { + oldValue = map.getValueType().read(buff); + } + return new Record<>(mapId, key, oldValue); + } + + @SuppressWarnings("unchecked") + @Override + public Record[] createStorage(int size) { + return new Record[size]; + } + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/RollbackDecisionMaker.java b/h2/src/main/org/h2/mvstore/tx/RollbackDecisionMaker.java new file mode 100644 index 0000000000..923605ed56 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/RollbackDecisionMaker.java @@ -0,0 +1,69 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import org.h2.mvstore.MVMap; +import org.h2.value.VersionedValue; + +/** + * Class RollbackDecisionMaker process undo log record during transaction rollback. + * + * @author Andrei Tokar + */ +final class RollbackDecisionMaker extends MVMap.DecisionMaker> { + private final TransactionStore store; + private final long transactionId; + private final long toLogId; + private final TransactionStore.RollbackListener listener; + private MVMap.Decision decision; + + RollbackDecisionMaker(TransactionStore store, long transactionId, long toLogId, + TransactionStore.RollbackListener listener) { + this.store = store; + this.transactionId = transactionId; + this.toLogId = toLogId; + this.listener = listener; + } + + @SuppressWarnings({"unchecked","rawtypes"}) + @Override + public MVMap.Decision decide(Record existingValue, Record providedValue) { + assert decision == null; + if (existingValue == null) { + // normally existingValue will always be there except of db initialization + // where some undo log entry was captured on disk but actual map entry was not + decision = MVMap.Decision.ABORT; + } else { + VersionedValue valueToRestore = existingValue.oldValue; + long operationId; + if (valueToRestore == null || + (operationId = valueToRestore.getOperationId()) == 0 || + TransactionStore.getTransactionId(operationId) == transactionId + && TransactionStore.getLogId(operationId) < toLogId) { + int mapId = existingValue.mapId; + MVMap> map = store.openMap(mapId); + if (map != null && !map.isClosed()) { + Object key = existingValue.key; + VersionedValue previousValue = map.operate(key, valueToRestore, + MVMap.DecisionMaker.DEFAULT); + listener.onRollback(map, key, previousValue, valueToRestore); + } + } + decision = MVMap.Decision.REMOVE; + } + return decision; + } + + @Override + public void reset() { + decision = null; + } + + @Override + public String toString() { + return "rollback-" + transactionId; + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/Snapshot.java b/h2/src/main/org/h2/mvstore/tx/Snapshot.java new file mode 100644 index 0000000000..224d1ce1ff --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/Snapshot.java @@ -0,0 +1,54 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import java.util.BitSet; + +import org.h2.mvstore.RootReference; + +/** + * Snapshot of the map root and committing transactions. + */ +final class Snapshot { + + /** + * The root reference. + */ + final RootReference root; + + /** + * The committing transactions (see also TransactionStore.committingTransactions). + */ + final BitSet committingTransactions; + + Snapshot(RootReference root, BitSet committingTransactions) { + this.root = root; + this.committingTransactions = committingTransactions; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + committingTransactions.hashCode(); + result = prime * result + root.hashCode(); + return result; + } + + @SuppressWarnings("unchecked") + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof Snapshot)) { + return false; + } + Snapshot other = (Snapshot) obj; + return committingTransactions == other.committingTransactions && root == other.root; + } + +} diff --git a/h2/src/main/org/h2/mvstore/tx/Transaction.java b/h2/src/main/org/h2/mvstore/tx/Transaction.java new file mode 100644 index 0000000000..892bf4ef79 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/Transaction.java @@ -0,0 +1,807 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import java.util.BitSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import org.h2.engine.IsolationLevel; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.RootReference; +import org.h2.mvstore.type.DataType; +import org.h2.value.VersionedValue; + +/** + * A transaction. + */ +public final class Transaction { + + /** + * The status of a closed transaction (committed or rolled back). + */ + public static final int STATUS_CLOSED = 0; + + /** + * The status of an open transaction. + */ + public static final int STATUS_OPEN = 1; + + /** + * The status of a prepared transaction. + */ + public static final int STATUS_PREPARED = 2; + + /** + * The status of a transaction that has been logically committed or rather + * marked as committed, because it might be still listed among prepared, + * if it was prepared for commit. Undo log entries might still exists for it + * and not all of it's changes within map's are re-written as committed yet. + * Nevertheless, those changes should be already viewed by other + * transactions as committed. + * This transaction's id can not be re-used until all of the above is completed + * and transaction is closed. + * A transactions can be observed in this state when the store was + * closed while the transaction was not closed yet. + * When opening a store, such transactions will automatically + * be processed and closed as committed. + */ + public static final int STATUS_COMMITTED = 3; + + /** + * The status of a transaction that currently in a process of rolling back + * to a savepoint. + */ + private static final int STATUS_ROLLING_BACK = 4; + + /** + * The status of a transaction that has been rolled back completely, + * but undo operations are not finished yet. + */ + private static final int STATUS_ROLLED_BACK = 5; + + private static final String[] STATUS_NAMES = { + "CLOSED", "OPEN", "PREPARED", "COMMITTED", "ROLLING_BACK", "ROLLED_BACK" + }; + /** + * How many bits of the "operation id" we store in the transaction belong to the + * log id (the rest belong to the transaction id). + */ + static final int LOG_ID_BITS = 40; + private static final int LOG_ID_BITS1 = LOG_ID_BITS + 1; + private static final long LOG_ID_LIMIT = 1L << LOG_ID_BITS; + private static final long LOG_ID_MASK = (1L << LOG_ID_BITS1) - 1; + private static final int STATUS_BITS = 4; + private static final int STATUS_MASK = (1 << STATUS_BITS) - 1; + + + /** + * The transaction store. + */ + final TransactionStore store; + + /** + * Listener for this transaction's rollback changes. + */ + final TransactionStore.RollbackListener listener; + + /** + * The transaction id. + * More appropriate name for this field would be "slotId" + */ + final int transactionId; + + /** + * This is really a transaction identity, because it's not re-used. + */ + final long sequenceNum; + + /* + * Transaction state is an atomic composite field: + * bit 45 : flag whether transaction had rollback(s) + * bits 44-41 : status + * bits 40 : overflow control bit, 1 indicates overflow + * bits 39-0 : log id of the last entry in the undo log map + */ + private final AtomicLong statusAndLogId; + + /** + * Reference to a counter for an earliest store version used by this transaction. + * Referenced version and all newer ones can not be discarded + * at least until this transaction ends. + */ + private MVStore.TxCounter txCounter; + + /** + * Transaction name. + */ + private String name; + + /** + * Indicates whether this transaction was stored in preparedTransactions map + */ + boolean wasStored; + + /** + * How long to wait for blocking transaction to commit or rollback. + */ + int timeoutMillis; + + /** + * Identification of the owner of this transaction, + * usually the owner is a database session. + */ + private final int ownerId; + + /** + * Blocking transaction, if any + */ + private volatile Transaction blockingTransaction; + + /** + * Map on which this transaction is blocked. + */ + private String blockingMapName; + + /** + * Key in blockingMap on which this transaction is blocked. + */ + private Object blockingKey; + + /** + * Whether other transaction(s) are waiting for this to close. + */ + private volatile boolean notificationRequested; + + /** + * RootReferences for undo log snapshots + */ + private RootReference>[] undoLogRootReferences; + + /** + * Map of transactional maps for this transaction + */ + private final Map> transactionMaps = new HashMap<>(); + + /** + * The current isolation level. + */ + final IsolationLevel isolationLevel; + + + Transaction(TransactionStore store, int transactionId, long sequenceNum, int status, + String name, long logId, int timeoutMillis, int ownerId, + IsolationLevel isolationLevel, TransactionStore.RollbackListener listener) { + this.store = store; + this.transactionId = transactionId; + this.sequenceNum = sequenceNum; + this.statusAndLogId = new AtomicLong(composeState(status, logId, false)); + this.name = name; + setTimeoutMillis(timeoutMillis); + this.ownerId = ownerId; + this.isolationLevel = isolationLevel; + this.listener = listener; + } + + public int getId() { + return transactionId; + } + + public long getSequenceNum() { + return sequenceNum; + } + + public int getStatus() { + return getStatus(statusAndLogId.get()); + } + + RootReference>[] getUndoLogRootReferences() { + return undoLogRootReferences; + } + + /** + * Changes transaction status to a specified value + * @param status to be set + * @return transaction state as it was before status change + */ + private long setStatus(int status) { + while (true) { + long currentState = statusAndLogId.get(); + long logId = getLogId(currentState); + int currentStatus = getStatus(currentState); + boolean valid; + switch (status) { + case STATUS_ROLLING_BACK: + valid = currentStatus == STATUS_OPEN; + break; + case STATUS_PREPARED: + valid = currentStatus == STATUS_OPEN; + break; + case STATUS_COMMITTED: + valid = currentStatus == STATUS_OPEN || + currentStatus == STATUS_PREPARED || + // this case is only possible if called + // from endLeftoverTransactions() + currentStatus == STATUS_COMMITTED; + break; + case STATUS_ROLLED_BACK: + valid = currentStatus == STATUS_OPEN || + currentStatus == STATUS_PREPARED || + currentStatus == STATUS_ROLLING_BACK; + break; + case STATUS_CLOSED: + valid = currentStatus == STATUS_COMMITTED || + currentStatus == STATUS_ROLLED_BACK; + break; + case STATUS_OPEN: + default: + valid = false; + break; + } + if (!valid) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE, + "Transaction was illegally transitioned from {0} to {1}", + getStatusName(currentStatus), getStatusName(status)); + } + long newState = composeState(status, logId, hasRollback(currentState)); + if (statusAndLogId.compareAndSet(currentState, newState)) { + return currentState; + } + } + } + + /** + * Determine if any database changes were made as part of this transaction. + * + * @return true if there are changes to commit, false otherwise + */ + public boolean hasChanges() { + return hasChanges(statusAndLogId.get()); + } + + public void setName(String name) { + checkNotClosed(); + this.name = name; + store.storeTransaction(this); + } + + public String getName() { + return name; + } + + public int getBlockerId() { + Transaction blocker = this.blockingTransaction; + return blocker == null ? 0 : blocker.ownerId; + } + + /** + * Create a new savepoint. + * + * @return the savepoint id + */ + public long setSavepoint() { + return getLogId(); + } + + /** + * Returns whether statement dependencies are currently set. + * + * @return whether statement dependencies are currently set + */ + public boolean hasStatementDependencies() { + return !transactionMaps.isEmpty(); + } + + /** + * Returns the isolation level of this transaction. + * + * @return the isolation level of this transaction + */ + public IsolationLevel getIsolationLevel() { + return isolationLevel; + } + + boolean isReadCommitted() { + return isolationLevel == IsolationLevel.READ_COMMITTED; + } + + /** + * Whether this transaction has isolation level READ_COMMITTED or below. + * @return true if isolation level is READ_COMMITTED or READ_UNCOMMITTED + */ + public boolean allowNonRepeatableRead() { + return isolationLevel.allowNonRepeatableRead(); + } + + /** + * Mark an entry into a new SQL statement execution within this transaction. + * + * @param maps + * set of maps used by transaction or statement is about to be executed + */ + @SuppressWarnings({"unchecked","rawtypes"}) + public void markStatementStart(HashSet>> maps) { + markStatementEnd(); + if (txCounter == null) { + txCounter = store.store.registerVersionUsage(); + } + + if (maps != null && !maps.isEmpty()) { + // The purpose of the following loop is to get a coherent picture + // In order to get such a "snapshot", we wait for a moment of silence, + // when no new transaction were committed / closed. + BitSet committingTransactions; + do { + committingTransactions = store.committingTransactions.get(); + for (MVMap> map : maps) { + TransactionMap txMap = openMapX(map); + txMap.setStatementSnapshot(new Snapshot(map.flushAndGetRoot(), committingTransactions)); + } + if (isReadCommitted()) { + undoLogRootReferences = store.collectUndoLogRootReferences(); + } + } while (committingTransactions != store.committingTransactions.get()); + // Now we have a snapshot, where each map RootReference point to state of the map, + // undoLogRootReferences captures the state of undo logs + // and committingTransactions mask tells us which of seemingly uncommitted changes + // should be considered as committed. + // Subsequent processing uses this snapshot info only. + for (MVMap> map : maps) { + TransactionMap txMap = openMapX(map); + txMap.promoteSnapshot(); + } + } + } + + /** + * Mark an exit from SQL statement execution within this transaction. + */ + public void markStatementEnd() { + if (allowNonRepeatableRead()) { + releaseSnapshot(); + } + for (TransactionMap transactionMap : transactionMaps.values()) { + transactionMap.setStatementSnapshot(null); + } + } + + private void markTransactionEnd() { + if (!allowNonRepeatableRead()) { + releaseSnapshot(); + } + } + + private void releaseSnapshot() { + transactionMaps.clear(); + undoLogRootReferences = null; + MVStore.TxCounter counter = txCounter; + if (counter != null) { + txCounter = null; + store.store.deregisterVersionUsage(counter); + } + } + + /** + * Add a log entry. + * + * @param logRecord to append + * + * @return key for the newly added undo log entry + */ + long log(Record logRecord) { + long currentState = statusAndLogId.getAndIncrement(); + long logId = getLogId(currentState); + if (logId >= LOG_ID_LIMIT) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_TRANSACTION_TOO_BIG, + "Transaction {0} has too many changes", + transactionId); + } + int currentStatus = getStatus(currentState); + checkOpen(currentStatus); + long undoKey = store.addUndoLogRecord(transactionId, logId, logRecord); + return undoKey; + } + + /** + * Remove the last log entry. + */ + void logUndo() { + long currentState = statusAndLogId.decrementAndGet(); + long logId = getLogId(currentState); + if (logId >= LOG_ID_LIMIT) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_TRANSACTION_CORRUPT, + "Transaction {0} has internal error", + transactionId); + } + int currentStatus = getStatus(currentState); + checkOpen(currentStatus); + store.removeUndoLogRecord(transactionId); + } + + /** + * Open a data map. + * + * @param the key type + * @param the value type + * @param name the name of the map + * @return the transaction map + */ + public TransactionMap openMap(String name) { + return openMap(name, null, null); + } + + /** + * Open the map to store the data. + * + * @param the key type + * @param the value type + * @param name the name of the map + * @param keyType the key data type + * @param valueType the value data type + * @return the transaction map + */ + public TransactionMap openMap(String name, + DataType keyType, + DataType valueType) { + MVMap> map = store.openVersionedMap(name, keyType, valueType); + return openMapX(map); + } + + /** + * Open the transactional version of the given map. + * + * @param the key type + * @param the value type + * @param map the base map + * @return the transactional map + */ + @SuppressWarnings("unchecked") + public TransactionMap openMapX(MVMap> map) { + checkNotClosed(); + int id = map.getId(); + TransactionMap transactionMap = (TransactionMap)transactionMaps.get(id); + if (transactionMap == null) { + transactionMap = new TransactionMap<>(this, map); + transactionMaps.put(id, transactionMap); + } + return transactionMap; + } + + /** + * Prepare the transaction. Afterwards, the transaction can only be + * committed or completely rolled back. + */ + public void prepare() { + setStatus(STATUS_PREPARED); + store.storeTransaction(this); + } + + /** + * Commit the transaction. Afterwards, this transaction is closed. + */ + public void commit() { + assert store.openTransactions.get().get(transactionId); + markTransactionEnd(); + Throwable ex = null; + boolean hasChanges = false; + int previousStatus = STATUS_OPEN; + try { + long state = setStatus(STATUS_COMMITTED); + hasChanges = hasChanges(state); + previousStatus = getStatus(state); + if (hasChanges) { + store.commit(this, previousStatus == STATUS_COMMITTED); + } + } catch (Throwable e) { + ex = e; + throw e; + } finally { + if (isActive(previousStatus)) { + try { + store.endTransaction(this, hasChanges); + } catch (Throwable e) { + if (ex == null) { + throw e; + } else { + ex.addSuppressed(e); + } + } + } + } + } + + /** + * Roll back to the given savepoint. This is only allowed if the + * transaction is open. + * + * @param savepointId the savepoint id + */ + public void rollbackToSavepoint(long savepointId) { + long lastState = setStatus(STATUS_ROLLING_BACK); + long logId = getLogId(lastState); + boolean success; + try { + store.rollbackTo(this, logId, savepointId); + } finally { + notifyAllWaitingTransactions(); + long expectedState = composeState(STATUS_ROLLING_BACK, logId, hasRollback(lastState)); + long newState = composeState(STATUS_OPEN, savepointId, true); + do { + success = statusAndLogId.compareAndSet(expectedState, newState); + } while (!success && statusAndLogId.get() == expectedState); + } + // this is moved outside of finally block to avert masking original exception, if any + if (!success) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE, + "Transaction {0} concurrently modified while rollback to savepoint was in progress", + transactionId); + } + } + + /** + * Roll the transaction back. Afterwards, this transaction is closed. + */ + public void rollback() { + markTransactionEnd(); + Throwable ex = null; + int status = STATUS_OPEN; + try { + long lastState = setStatus(STATUS_ROLLED_BACK); + status = getStatus(lastState); + long logId = getLogId(lastState); + if (logId > 0) { + store.rollbackTo(this, logId, 0); + } + } catch (Throwable e) { + status = getStatus(); + if (isActive(status)) { + ex = e; + throw e; + } + } finally { + try { + if (isActive(status)) { + store.endTransaction(this, true); + } + } catch (Throwable e) { + if (ex == null) { + throw e; + } else { + ex.addSuppressed(e); + } + } + } + } + + private static boolean isActive(int status) { + return status != STATUS_CLOSED + && status != STATUS_COMMITTED + && status != STATUS_ROLLED_BACK; + } + + /** + * Get the list of changes, starting with the latest change, up to the + * given savepoint (in reverse order than they occurred). The value of + * the change is the value before the change was applied. + * + * @param savepointId the savepoint id, 0 meaning the beginning of the + * transaction + * @return the changes + */ + public Iterator getChanges(long savepointId) { + return store.getChanges(this, getLogId(), savepointId); + } + + /** + * Sets the new lock timeout. + * + * @param timeoutMillis the new lock timeout in milliseconds + */ + public void setTimeoutMillis(int timeoutMillis) { + this.timeoutMillis = timeoutMillis > 0 ? timeoutMillis : store.timeoutMillis; + } + + private long getLogId() { + return getLogId(statusAndLogId.get()); + } + + /** + * Check whether this transaction is open. + */ + private void checkOpen(int status) { + if (status != STATUS_OPEN) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE, + "Transaction {0} has status {1}, not OPEN", transactionId, getStatusName(status)); + } + } + + /** + * Check whether this transaction is open or prepared. + */ + private void checkNotClosed() { + if (getStatus() == STATUS_CLOSED) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_CLOSED, "Transaction {0} is closed", transactionId); + } + } + + /** + * Transition this transaction into a closed state. + */ + void closeIt() { + transactionMaps.clear(); + long lastState = setStatus(STATUS_CLOSED); + store.store.deregisterVersionUsage(txCounter); + if((hasChanges(lastState) || hasRollback(lastState))) { + notifyAllWaitingTransactions(); + } + } + + private void notifyAllWaitingTransactions() { + if (notificationRequested) { + synchronized (this) { + notifyAll(); + } + } + } + + /** + * Make this transaction to wait for the specified transaction to be closed, + * because both of them try to modify the same map entry. + * + * @param toWaitFor transaction to wait for + * @param mapName name of the map containing blocking entry + * @param key of the blocking entry + * @return true if other transaction was closed and this one can proceed, false if timed out + */ + public boolean waitFor(Transaction toWaitFor, String mapName, Object key) { + blockingTransaction = toWaitFor; + blockingMapName = mapName; + blockingKey = key; + if (isDeadlocked(toWaitFor)) { + tryThrowDeadLockException(false); + } + boolean result = toWaitFor.waitForThisToEnd(timeoutMillis, this); + blockingMapName = null; + blockingKey = null; + blockingTransaction = null; + return result; + } + + private boolean isDeadlocked(Transaction toWaitFor) { + // use transaction sequence No as a tie-breaker + // the youngest transaction should be selected as a victim + Transaction youngest = toWaitFor; + int backstop = store.getMaxTransactionId(); + for(Transaction tx = toWaitFor, nextTx; + (nextTx = tx.blockingTransaction) != null && tx.getStatus() == Transaction.STATUS_OPEN && backstop > 0; + tx = nextTx, --backstop) { + + if (nextTx.sequenceNum > youngest.sequenceNum) { + youngest = nextTx; + } + + if (nextTx == this) { + if (youngest == this) { + return true; + } + Transaction btx = youngest.blockingTransaction; + if (btx != null) { + youngest.setStatus(STATUS_ROLLING_BACK); + btx.notifyAllWaitingTransactions(); + return false; + } + } + } + return false; + } + + private void tryThrowDeadLockException(boolean throwIt) { + BitSet visited = new BitSet(); + StringBuilder details = new StringBuilder( + String.format("Transaction %d has been chosen as a deadlock victim. Details:%n", transactionId)); + for (Transaction tx = this, nextTx; + !visited.get(tx.transactionId) && (nextTx = tx.blockingTransaction) != null; tx = nextTx) { + visited.set(tx.transactionId); + details.append(String.format( + "Transaction %d attempts to update map <%s> entry with key <%s> modified by transaction %s%n", + tx.transactionId, tx.blockingMapName, tx.blockingKey, tx.blockingTransaction)); + if (nextTx == this) { + throwIt = true; + } + } + if (throwIt) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_TRANSACTIONS_DEADLOCK, "{0}", details.toString()); + } + } + + private synchronized boolean waitForThisToEnd(int millis, Transaction waiter) { + long until = System.currentTimeMillis() + millis; + notificationRequested = true; + long state; + int status; + while((status = getStatus(state = statusAndLogId.get())) != STATUS_CLOSED + && status != STATUS_ROLLED_BACK && !hasRollback(state)) { + if (waiter.getStatus() != STATUS_OPEN) { + waiter.tryThrowDeadLockException(true); + } + long dur = until - System.currentTimeMillis(); + if(dur <= 0) { + return false; + } + try { + wait(dur); + } catch (InterruptedException ex) { + return false; + } + } + return true; + } + + /** + * Remove the map. + * + * @param the key type + * @param the value type + * @param map the map + */ + public void removeMap(TransactionMap map) { + store.removeMap(map); + } + + @Override + public String toString() { + return transactionId + "(" + sequenceNum + ") " + stateToString(); + } + + private String stateToString() { + return stateToString(statusAndLogId.get()); + } + + private static String stateToString(long state) { + return getStatusName(getStatus(state)) + (hasRollback(state) ? "<" : "") + " " + getLogId(state); + } + + + private static int getStatus(long state) { + return (int)(state >>> LOG_ID_BITS1) & STATUS_MASK; + } + + private static long getLogId(long state) { + return state & LOG_ID_MASK; + } + + private static boolean hasRollback(long state) { + return (state & (1L << (STATUS_BITS + LOG_ID_BITS1))) != 0; + } + + private static boolean hasChanges(long state) { + return getLogId(state) != 0; + } + + private static long composeState(int status, long logId, boolean hasRollback) { + assert logId < LOG_ID_LIMIT : logId; + assert (status & ~STATUS_MASK) == 0 : status; + + if (hasRollback) { + status |= 1 << STATUS_BITS; + } + return ((long)status << LOG_ID_BITS1) | logId; + } + + private static String getStatusName(int status) { + return status >= 0 && status < STATUS_NAMES.length ? STATUS_NAMES[status] : "UNKNOWN_STATUS_" + status; + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/TransactionMap.java b/h2/src/main/org/h2/mvstore/tx/TransactionMap.java new file mode 100644 index 0000000000..2c5d7f2a63 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/TransactionMap.java @@ -0,0 +1,1127 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import java.util.AbstractMap; +import java.util.AbstractSet; +import java.util.BitSet; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; + +import org.h2.engine.IsolationLevel; +import org.h2.mvstore.Cursor; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.RootReference; +import org.h2.mvstore.type.DataType; +import org.h2.value.VersionedValue; + +/** + * A map that supports transactions. + * + *

    + * Methods of this class may be changed at any time without notice. If + * you use this class directly make sure that your application or library + * requires exactly the same version of MVStore or H2 jar as the version that + * you use during its development and build. + *

    + * + * @param the key type + * @param the value type + */ +public final class TransactionMap extends AbstractMap { + + /** + * The map used for writing (the latest version). + *

    + * Key: key the key of the data. + * Value: { transactionId, oldVersion, value } + */ + public final MVMap> map; + + /** + * The transaction which is used for this map. + */ + private final Transaction transaction; + + /** + * Snapshot of this map as of beginning of transaction or + * first usage within transaction or + * beginning of the statement, depending on isolation level + */ + private Snapshot> snapshot; + + /** + * Snapshot of this map as of beginning of beginning of the statement + */ + private Snapshot> statementSnapshot; + + /** + * Indicates whether underlying map was modified from within related transaction + */ + private boolean hasChanges; + + private final TxDecisionMaker txDecisionMaker; + private final TxDecisionMaker ifAbsentDecisionMaker; + private final TxDecisionMaker lockDecisionMaker; + + + TransactionMap(Transaction transaction, MVMap> map) { + this.transaction = transaction; + this.map = map; + this.txDecisionMaker = new TxDecisionMaker<>(map.getId(), transaction); + this.ifAbsentDecisionMaker = new TxDecisionMaker.PutIfAbsentDecisionMaker<>(map.getId(), + transaction, this::getFromSnapshot); + this.lockDecisionMaker = transaction.allowNonRepeatableRead() + ? new TxDecisionMaker.LockDecisionMaker<>(map.getId(), transaction) + : new TxDecisionMaker.RepeatableReadLockDecisionMaker<>(map.getId(), transaction, + map.getValueType(), this::getFromSnapshot); + + } + + /** + * Get a clone of this map for the given transaction. + * + * @param transaction the transaction + * @return the map + */ + public TransactionMap getInstance(Transaction transaction) { + return transaction.openMapX(map); + } + + /** + * Get the number of entries, as a integer. {@link Integer#MAX_VALUE} is + * returned if there are more than this entries. + * + * @return the number of entries, as an integer + * @see #sizeAsLong() + */ + @Override + public int size() { + long size = sizeAsLong(); + return size > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) size; + } + + /** + * Get the size of the raw map. This includes uncommitted entries, and + * transiently removed entries, so it is the maximum number of entries. + * + * @return the maximum size + */ + public long sizeAsLongMax() { + return map.sizeAsLong(); + } + + /** + * Get the size of the map as seen by this transaction. + * + * @return the size + */ + public long sizeAsLong() { + IsolationLevel isolationLevel = transaction.getIsolationLevel(); + if (!isolationLevel.allowNonRepeatableRead() && hasChanges) { + return sizeAsLongRepeatableReadWithChanges(); + } + // getting coherent picture of the map, committing transactions, and undo logs + // either from values stored in transaction (never loops in that case), + // or current values from the transaction store (loops until moment of silence) + Snapshot> snapshot; + RootReference>[] undoLogRootReferences; + do { + snapshot = getSnapshot(); + undoLogRootReferences = getTransaction().getUndoLogRootReferences(); + } while (!snapshot.equals(getSnapshot())); + + RootReference> mapRootReference = snapshot.root; + long size = mapRootReference.getTotalCount(); + long undoLogsTotalSize = undoLogRootReferences == null ? size + : TransactionStore.calculateUndoLogsTotalSize(undoLogRootReferences); + // if we are looking at the map without any uncommitted values + if (undoLogsTotalSize == 0) { + return size; + } + return adjustSize(undoLogRootReferences, mapRootReference, + isolationLevel == IsolationLevel.READ_UNCOMMITTED ? null : snapshot.committingTransactions, + size, undoLogsTotalSize); + } + + private long adjustSize(RootReference>[] undoLogRootReferences, + RootReference> mapRootReference, BitSet committingTransactions, long size, + long undoLogsTotalSize) { + // Entries describing removals from the map by this transaction and all transactions, + // which are committed but not closed yet, + // and entries about additions to the map by other uncommitted transactions were counted, + // but they should not contribute into total count. + if (2 * undoLogsTotalSize > size) { + // the undo log is larger than half of the map - scan the entries of the map directly + Cursor> cursor = map.cursor(mapRootReference, null, null, false); + while (cursor.hasNext()) { + cursor.next(); + VersionedValue currentValue = cursor.getValue(); + assert currentValue != null; + long operationId = currentValue.getOperationId(); + if (operationId != 0 && // skip committed entries + isIrrelevant(operationId, currentValue, committingTransactions)) { + --size; + } + } + } else { + assert undoLogRootReferences != null; + // The undo logs are much smaller than the map - scan all undo logs, + // and then lookup relevant map entry. + for (RootReference> undoLogRootReference : undoLogRootReferences) { + if (undoLogRootReference != null) { + Cursor> cursor = undoLogRootReference.root.map.cursor(undoLogRootReference, + null, null, false); + while (cursor.hasNext()) { + cursor.next(); + Record op = cursor.getValue(); + if (op.mapId == map.getId()) { + @SuppressWarnings("unchecked") + VersionedValue currentValue = map.get(mapRootReference.root, (K)op.key); + // If map entry is not there, then we never counted + // it, in the first place, so skip it. + // This is possible when undo entry exists because + // it belongs to a committed but not yet closed + // transaction, and it was later deleted by some + // other already committed and closed transaction. + if (currentValue != null) { + // only the last undo entry for any given map + // key should be considered + long operationId = cursor.getKey(); + assert operationId != 0; + if (currentValue.getOperationId() == operationId && + isIrrelevant(operationId, currentValue, committingTransactions)) { + --size; + } + } + } + } + } + } + } + return size; + } + + private boolean isIrrelevant(long operationId, VersionedValue currentValue, BitSet committingTransactions) { + Object v; + if (committingTransactions == null) { + v = currentValue.getCurrentValue(); + } else { + int txId = TransactionStore.getTransactionId(operationId); + v = txId == transaction.transactionId || committingTransactions.get(txId) + ? currentValue.getCurrentValue() : currentValue.getCommittedValue(); + } + return v == null; + } + + private long sizeAsLongRepeatableReadWithChanges() { + long count = 0L; + RepeatableIterator iterator = new RepeatableIterator<>(this, null, null, false, false); + while (iterator.fetchNext() != null) { + count++; + } + return count; + } + + /** + * Remove an entry. + *

    + * If the row is locked, this method will retry until the row could be + * updated or until a lock timeout. + * + * @param key the key + * @throws MVStoreException if a lock timeout occurs + * @throws ClassCastException if type of the specified key is not compatible with this map + */ + @SuppressWarnings("unchecked") + @Override + public V remove(Object key) { + return set((K)key, (V)null); + } + + /** + * Update the value for the given key. + *

    + * If the row is locked, this method will retry until the row could be + * updated or until a lock timeout. + * + * @param key the key + * @param value the new value (not null) + * @return the old value + * @throws MVStoreException if a lock timeout occurs + */ + @Override + public V put(K key, V value) { + DataUtils.checkArgument(value != null, "The value may not be null"); + return set(key, value); + } + + /** + * Put the value for the given key if entry for this key does not exist. + * It is atomic equivalent of the following expression: + * contains(key) ? get(k) : put(key, value); + * + * @param key the key + * @param value the new value (not null) + * @return the old value + */ + @Override + public V putIfAbsent(K key, V value) { + DataUtils.checkArgument(value != null, "The value may not be null"); + ifAbsentDecisionMaker.initialize(key, value); + V result = set(key, ifAbsentDecisionMaker); + if (ifAbsentDecisionMaker.getDecision() == MVMap.Decision.ABORT) { + result = ifAbsentDecisionMaker.getLastValue(); + } + return result; + } + + /** + * Appends entry to underlying map. This method may be used concurrently, + * but latest appended values are not guaranteed to be visible. + * @param key should be higher in map's order than any existing key + * @param value to be appended + */ + public void append(K key, V value) { + map.append(key, VersionedValueUncommitted.getInstance( + transaction.log(new Record<>(map.getId(), key, null)), value, null)); + hasChanges = true; + } + + /** + * Lock row for the given key. + *

    + * If the row is locked, this method will retry until the row could be + * updated or until a lock timeout. + * + * @param key the key + * @return the locked value + * @throws MVStoreException if a lock timeout occurs + */ + public V lock(K key) { + lockDecisionMaker.initialize(key, null); + return set(key, lockDecisionMaker); + } + + /** + * Update the value for the given key, without adding an undo log entry. + * + * @param key the key + * @param value the value + * @return the old value + */ + @SuppressWarnings("UnusedReturnValue") + public V putCommitted(K key, V value) { + DataUtils.checkArgument(value != null, "The value may not be null"); + VersionedValue newValue = VersionedValueCommitted.getInstance(value); + VersionedValue oldValue = map.put(key, newValue); + V result = oldValue == null ? null : oldValue.getCurrentValue(); + return result; + } + + private V set(K key, V value) { + txDecisionMaker.initialize(key, value); + return set(key, txDecisionMaker); + } + + private V set(Object key, TxDecisionMaker decisionMaker) { + Transaction blockingTransaction; + VersionedValue result; + String mapName = null; + do { + assert transaction.getBlockerId() == 0; + @SuppressWarnings("unchecked") + K k = (K) key; + // second parameter (value) is not really used, + // since TxDecisionMaker has it embedded + result = map.operate(k, null, decisionMaker); + + MVMap.Decision decision = decisionMaker.getDecision(); + assert decision != null; + assert decision != MVMap.Decision.REPEAT; + blockingTransaction = decisionMaker.getBlockingTransaction(); + if (decision != MVMap.Decision.ABORT || blockingTransaction == null) { + hasChanges |= decision != MVMap.Decision.ABORT; + V res = result == null ? null : result.getCurrentValue(); + return res; + } + decisionMaker.reset(); + if (mapName == null) { + mapName = map.getName(); + } + } while (transaction.waitFor(blockingTransaction, mapName, key)); + + throw DataUtils.newMVStoreException(DataUtils.ERROR_TRANSACTION_LOCKED, + "Map entry <{0}> with key <{1}> and value {2} is locked by tx {3} and can not be updated by tx {4}" + + " within allocated time interval {5} ms.", + mapName, key, result, blockingTransaction.transactionId, transaction.transactionId, + transaction.timeoutMillis); + } + + /** + * Try to remove the value for the given key. + *

    + * This will fail if the row is locked by another transaction (that + * means, if another open transaction changed the row). + * + * @param key the key + * @return whether the entry could be removed + */ + public boolean tryRemove(K key) { + return trySet(key, null); + } + + /** + * Try to update the value for the given key. + *

    + * This will fail if the row is locked by another transaction (that + * means, if another open transaction changed the row). + * + * @param key the key + * @param value the new value + * @return whether the entry could be updated + */ + public boolean tryPut(K key, V value) { + DataUtils.checkArgument(value != null, "The value may not be null"); + return trySet(key, value); + } + + /** + * Try to set or remove the value. When updating only unchanged entries, + * then the value is only changed if it was not changed after opening + * the map. + * + * @param key the key + * @param value the new value (null to remove the value) + * @return true if the value was set, false if there was a concurrent + * update + */ + public boolean trySet(K key, V value) { + try { + // TODO: effective transaction.timeoutMillis should be set to 0 here + // and restored before return + // TODO: eliminate exception usage as part of normal control flaw + set(key, value); + return true; + } catch (MVStoreException e) { + return false; + } + } + + /** + * Get the effective value for the given key. + * + * @param key the key + * @return the value or null + * @throws ClassCastException if type of the specified key is not compatible with this map + */ + @SuppressWarnings("unchecked") + @Override + public V get(Object key) { + return getImmediate((K)key); + } + + /** + * Get the value for the given key, or null if value does not exist in accordance with transactional rules. + * Value is taken from a snapshot, appropriate for an isolation level of the related transaction + * + * @param key the key + * @return the value, or null if not found + */ + public V getFromSnapshot(K key) { + switch (transaction.isolationLevel) { + case READ_UNCOMMITTED: { + Snapshot> snapshot = getStatementSnapshot(); + VersionedValue data = map.get(snapshot.root.root, key); + if (data != null) { + return data.getCurrentValue(); + } + return null; + } + case REPEATABLE_READ: + case SNAPSHOT: + case SERIALIZABLE: + if (transaction.hasChanges()) { + Snapshot> snapshot = getStatementSnapshot(); + VersionedValue data = map.get(snapshot.root.root, key); + if (data != null) { + long id = data.getOperationId(); + if (id != 0L && transaction.transactionId == TransactionStore.getTransactionId(id)) { + return data.getCurrentValue(); + } + } + } + //$FALL-THROUGH$ + case READ_COMMITTED: + default: + Snapshot> snapshot = getSnapshot(); + return getFromSnapshot(snapshot.root, snapshot.committingTransactions, key); + } + } + + private V getFromSnapshot(RootReference> rootRef, BitSet committingTransactions, K key) { + VersionedValue data = map.get(rootRef.root, key); + if (data == null) { + // doesn't exist + return null; + } + long id = data.getOperationId(); + if (id != 0) { + int tx = TransactionStore.getTransactionId(id); + if (tx != transaction.transactionId && !committingTransactions.get(tx)) { + // added/modified/removed by uncommitted transaction, change should not be visible + return data.getCommittedValue(); + } + } + // added/modified/removed by this transaction or another transaction which is committed by now + return data.getCurrentValue(); + } + + /** + * Get the value for the given key, or null if not found. + * Operation is performed on a snapshot of the map taken during this call. + * + * @param key the key + * @return the value, or null if not found + */ + public V getImmediate(K key) { + return useSnapshot((rootReference, committedTransactions) -> + getFromSnapshot(rootReference, committedTransactions, key)); + } + + Snapshot> getSnapshot() { + return snapshot == null ? createSnapshot() : snapshot; + } + + Snapshot> getStatementSnapshot() { + return statementSnapshot == null ? createSnapshot() : statementSnapshot; + } + + void setStatementSnapshot(Snapshot> snapshot) { + statementSnapshot = snapshot; + } + + void promoteSnapshot() { + if (snapshot == null) { + snapshot = statementSnapshot; + } + } + + /** + * Create a new snapshot for this map. + * + * @return the snapshot + */ + Snapshot> createSnapshot() { + return useSnapshot(Snapshot::new); + } + + /** + * Gets a coherent picture of committing transactions and root reference, + * passes it to the specified function, and returns its result. + * + * @param type of the result + * + * @param snapshotConsumer + * function to invoke on a snapshot + * @return function's result + */ + R useSnapshot(BiFunction>, BitSet, R> snapshotConsumer) { + // The purpose of the following loop is to get a coherent picture + // of a state of two independent volatile / atomic variables, + // which they had at some recent moment in time. + // In order to get such a "snapshot", we wait for a moment of silence, + // when neither of the variables concurrently changes it's value. + AtomicReference holder = transaction.store.committingTransactions; + BitSet committingTransactions = holder.get(); + while (true) { + BitSet prevCommittingTransactions = committingTransactions; + RootReference> root = map.getRoot(); + committingTransactions = holder.get(); + if (committingTransactions == prevCommittingTransactions) { + return snapshotConsumer.apply(root, committingTransactions); + } + } + } + + /** + * Whether the map contains the key. + * + * @param key the key + * @return true if the map contains an entry for this key + * @throws ClassCastException if type of the specified key is not compatible with this map + */ + @SuppressWarnings("unchecked") + @Override + public boolean containsKey(Object key) { + return getImmediate((K)key) != null; + } + + /** + * Check if the row was deleted by this transaction. + * + * @param key the key + * @return {@code true} if it was + */ + public boolean isDeletedByCurrentTransaction(K key) { + VersionedValue data = map.get(key); + if (data != null) { + long id = data.getOperationId(); + return id != 0 && TransactionStore.getTransactionId(id) == transaction.transactionId + && data.getCurrentValue() == null; + } + return false; + } + + /** + * Whether the entry for this key was added or removed from this + * session. + * + * @param key the key + * @return true if yes + */ + public boolean isSameTransaction(K key) { + VersionedValue data = map.get(key); + if (data == null) { + // doesn't exist or deleted by a committed transaction + return false; + } + int tx = TransactionStore.getTransactionId(data.getOperationId()); + return tx == transaction.transactionId; + } + + /** + * Check whether this map is closed. + * + * @return true if closed + */ + public boolean isClosed() { + return map.isClosed(); + } + + /** + * Clear the map. + */ + @Override + public void clear() { + // TODO truncate transactionally? + map.clear(); + hasChanges = true; + } + + @Override + public Set> entrySet() { + return new AbstractSet>() { + + @Override + public Iterator> iterator() { + return entryIterator(null, null); + } + + @Override + public int size() { + return TransactionMap.this.size(); + } + + @Override + public boolean contains(Object o) { + return TransactionMap.this.containsKey(o); + } + + }; + } + + /** + * Get the first entry. + * + * @return the first entry, or null if empty + */ + public Entry firstEntry() { + return this.>chooseIterator(null, null, false, true).fetchNext(); + } + + /** + * Get the first key. + * + * @return the first key, or null if empty + */ + public K firstKey() { + return this.chooseIterator(null, null, false, false).fetchNext(); + } + + /** + * Get the last entry. + * + * @return the last entry, or null if empty + */ + public Entry lastEntry() { + return this.>chooseIterator(null, null, true, true).fetchNext(); + } + + /** + * Get the last key. + * + * @return the last key, or null if empty + */ + public K lastKey() { + return this.chooseIterator(null, null, true, false).fetchNext(); + } + + /** + * Get the entry with smallest key that is larger than the given key, or null if no + * such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public Entry higherEntry(K key) { + return higherLowerEntry(key, false); + } + + /** + * Get the smallest key that is larger than the given key, or null if no + * such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public K higherKey(K key) { + return higherLowerKey(key, false); + } + + /** + * Get the entry with smallest key that is larger than or equal to this key, + * or null if no such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public Entry ceilingEntry(K key) { + return this.>chooseIterator(key, null, false, true).fetchNext(); + } + + /** + * Get the smallest key that is larger than or equal to this key, + * or null if no such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public K ceilingKey(K key) { + return this.chooseIterator(key, null, false, false).fetchNext(); + } + + /** + * Get the entry with largest key that is smaller than or equal to this key, + * or null if no such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public Entry floorEntry(K key) { + return this.>chooseIterator(key, null, true, true).fetchNext(); + } + + /** + * Get the largest key that is smaller than or equal to this key, + * or null if no such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public K floorKey(K key) { + return this.chooseIterator(key, null, true, false).fetchNext(); + } + + /** + * Get the entry with largest key that is smaller than the given key, or null if no + * such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public Entry lowerEntry(K key) { + return higherLowerEntry(key, true); + } + + /** + * Get the largest key that is smaller than the given key, or null if no + * such key exists. + * + * @param key the key (may not be null) + * @return the result + */ + public K lowerKey(K key) { + return higherLowerKey(key, true); + } + + private Entry higherLowerEntry(K key, boolean lower) { + TMIterator> it = chooseIterator(key, null, lower, true); + Entry result = it.fetchNext(); + if (result != null && map.getKeyType().compare(key, result.getKey()) == 0) { + result = it.fetchNext(); + } + return result; + } + + private K higherLowerKey(K key, boolean lower) { + TMIterator it = chooseIterator(key, null, lower, false); + K result = it.fetchNext(); + if (result != null && map.getKeyType().compare(key, result) == 0) { + result = it.fetchNext(); + } + return result; + } + + /** + * Iterate over keys. + * + * @param from the first key to return + * @return the iterator + */ + public Iterator keyIterator(K from) { + return chooseIterator(from, null, false, false); + } + + /** + * Iterate over keys in the specified order. + * + * @param from the first key to return + * @param reverse if true, iterate in reverse (descending) order + * @return the iterator + */ + public TMIterator keyIterator(K from, boolean reverse) { + return chooseIterator(from, null, reverse, false); + } + + /** + * Iterate over keys. + * + * @param from the first key to return + * @param to the last key to return or null if there is no limit + * @return the iterator + */ + public TMIterator keyIterator(K from, K to) { + return chooseIterator(from, to, false, false); + } + + /** + * Iterate over keys, including keys from uncommitted entries. + * + * @param from the first key to return + * @param to the last key to return or null if there is no limit + * @return the iterator + */ + public TMIterator keyIteratorUncommitted(K from, K to) { + return new ValidationIterator<>(this, from, to); + } + + /** + * Iterate over entries. + * + * @param from the first key to return + * @param to the last key to return + * @return the iterator + */ + public TMIterator> entryIterator(final K from, final K to) { + return chooseIterator(from, to, false, true); + } + + private TMIterator chooseIterator(K from, K to, boolean reverse, boolean forEntries) { + switch (transaction.isolationLevel) { + case READ_UNCOMMITTED: + return new UncommittedIterator<>(this, from, to, reverse, forEntries); + case REPEATABLE_READ: + case SNAPSHOT: + case SERIALIZABLE: + if (hasChanges) { + return new RepeatableIterator<>(this, from, to, reverse, forEntries); + } + //$FALL-THROUGH$ + case READ_COMMITTED: + default: + return new CommittedIterator<>(this, from, to, reverse, forEntries); + } + } + + public Transaction getTransaction() { + return transaction; + } + + public DataType getKeyType() { + return map.getKeyType(); + } + + /** + * The iterator for read uncommitted isolation level. This iterator is also + * used for unique indexes. + * + * @param + * the type of keys + * @param + * the type of elements + */ + private static class UncommittedIterator extends TMIterator { + UncommittedIterator(TransactionMap transactionMap, K from, K to, boolean reverse, boolean forEntries) { + super(transactionMap, from, to, transactionMap.createSnapshot(), reverse, forEntries); + } + + UncommittedIterator(TransactionMap transactionMap, K from, K to, Snapshot> snapshot, + boolean reverse, boolean forEntries) { + super(transactionMap, from, to, snapshot, reverse, forEntries); + } + + @Override + public final X fetchNext() { + while (cursor.hasNext()) { + K key = cursor.next(); + VersionedValue data = cursor.getValue(); + if (data != null) { + Object currentValue = data.getCurrentValue(); + if (currentValue != null || shouldIgnoreRemoval(data)) { + return toElement(key, currentValue); + } + } + } + return null; + } + + boolean shouldIgnoreRemoval(VersionedValue data) { + return false; + } + } + + + // This iterator should include all entries applicable for unique index validation, + // committed and otherwise, only excluding keys removed by the current transaction + // or by some other already committed (but not closed yet) transactions + private static final class ValidationIterator extends UncommittedIterator { + ValidationIterator(TransactionMap transactionMap, K from, K to) { + super(transactionMap, from, to, transactionMap.createSnapshot(), false, false); + } + + @Override + boolean shouldIgnoreRemoval(VersionedValue data) { + assert data.getCurrentValue() == null; + long id = data.getOperationId(); + if (id != 0) { + int tx = TransactionStore.getTransactionId(id); + return transactionId != tx && !committingTransactions.get(tx); + } + return false; + } + } + + /** + * The iterator for read committed isolation level. Can also be used on + * higher levels when the transaction doesn't have own changes. + * + * @param + * the type of keys + * @param + * the type of elements + */ + private static final class CommittedIterator extends TMIterator { + CommittedIterator(TransactionMap transactionMap, K from, K to, boolean reverse, boolean forEntries) { + super(transactionMap, from, to, transactionMap.getSnapshot(), reverse, forEntries); + } + + @Override + public X fetchNext() { + while (cursor.hasNext()) { + K key = cursor.next(); + VersionedValue data = cursor.getValue(); + // If value doesn't exist or it was deleted by a committed transaction, + // or if value is a committed one, just return it. + if (data != null) { + long id = data.getOperationId(); + if (id != 0) { + int tx = TransactionStore.getTransactionId(id); + if (tx != transactionId && !committingTransactions.get(tx)) { + // current value comes from another uncommitted transaction + // take committed value instead + Object committedValue = data.getCommittedValue(); + if (committedValue == null) { + continue; + } + return toElement(key, committedValue); + } + } + Object currentValue = data.getCurrentValue(); + if (currentValue != null) { + return toElement(key, currentValue); + } + } + } + return null; + } + } + + /** + * The iterator for repeatable read and serializable isolation levels. + * + * @param + * the type of keys + * @param + * the type of elements + */ + private static final class RepeatableIterator extends TMIterator { + private final DataType keyType; + + private K snapshotKey; + + private Object snapshotValue; + + private final Cursor> uncommittedCursor; + + private K uncommittedKey; + + private V uncommittedValue; + + RepeatableIterator(TransactionMap transactionMap, K from, K to, boolean reverse, boolean forEntries) { + super(transactionMap, from, to, transactionMap.getSnapshot(), reverse, forEntries); + keyType = transactionMap.map.getKeyType(); + Snapshot> snapshot = transactionMap.getStatementSnapshot(); + uncommittedCursor = transactionMap.map.cursor(snapshot.root, from, to, reverse); + } + + @Override + public X fetchNext() { + X next = null; + do { + if (snapshotKey == null) { + fetchSnapshot(); + } + if (uncommittedKey == null) { + fetchUncommitted(); + } + if (snapshotKey == null && uncommittedKey == null) { + break; + } + int cmp = snapshotKey == null ? 1 : + uncommittedKey == null ? -1 : + keyType.compare(snapshotKey, uncommittedKey); + if (cmp < 0) { + next = toElement(snapshotKey, snapshotValue); + snapshotKey = null; + break; + } + if (uncommittedValue != null) { + // This entry was added / updated by this transaction, use the new value + next = toElement(uncommittedKey, uncommittedValue); + } + if (cmp == 0) { // This entry was updated / deleted + snapshotKey = null; + } + uncommittedKey = null; + } while (next == null); + return next; + } + + private void fetchSnapshot() { + while (cursor.hasNext()) { + K key = cursor.next(); + VersionedValue data = cursor.getValue(); + // If value doesn't exist or it was deleted by a committed transaction, + // or if value is a committed one, just return it. + if (data != null) { + Object value = data.getCommittedValue(); + long id = data.getOperationId(); + if (id != 0) { + int tx = TransactionStore.getTransactionId(id); + if (tx == transactionId || committingTransactions.get(tx)) { + // value comes from this transaction or another committed transaction + // take current value instead instead of committed one + value = data.getCurrentValue(); + } + } + if (value != null) { + snapshotKey = key; + snapshotValue = value; + return; + } + } + } + } + + private void fetchUncommitted() { + while (uncommittedCursor.hasNext()) { + K key = uncommittedCursor.next(); + VersionedValue data = uncommittedCursor.getValue(); + if (data != null) { + long id = data.getOperationId(); + if (id != 0L && transactionId == TransactionStore.getTransactionId(id)) { + uncommittedKey = key; + uncommittedValue = data.getCurrentValue(); + return; + } + } + } + } + } + + public abstract static class TMIterator implements Iterator { + final int transactionId; + + final BitSet committingTransactions; + + protected final Cursor> cursor; + + private final boolean forEntries; + + X current; + + TMIterator(TransactionMap transactionMap, K from, K to, Snapshot> snapshot, + boolean reverse, boolean forEntries) { + Transaction transaction = transactionMap.getTransaction(); + this.transactionId = transaction.transactionId; + this.forEntries = forEntries; + this.cursor = transactionMap.map.cursor(snapshot.root, from, to, reverse); + this.committingTransactions = snapshot.committingTransactions; + } + + @SuppressWarnings("unchecked") + final X toElement(K key, Object value) { + return (X) (forEntries ? new AbstractMap.SimpleImmutableEntry<>(key, value) : key); + } + + /** + * Fetches a next entry. + * + * This method cannot be used together with {@link #hasNext()} and + * {@link #next()}. + * + * @return the next entry or {@code null} + */ + public abstract X fetchNext(); + + @Override + public final boolean hasNext() { + return current != null || (current = fetchNext()) != null; + } + + @Override + public final X next() { + X result = current; + if (result == null) { + if ((result = fetchNext()) == null) { + throw new NoSuchElementException(); + } + } else { + current = null; + } + return result; + } + + } + +} diff --git a/h2/src/main/org/h2/mvstore/tx/TransactionStore.java b/h2/src/main/org/h2/mvstore/tx/TransactionStore.java new file mode 100644 index 0000000000..bd4d43cdd9 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/TransactionStore.java @@ -0,0 +1,961 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.AtomicReferenceArray; +import org.h2.engine.IsolationLevel; +import org.h2.mvstore.Cursor; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.RootReference; +import org.h2.mvstore.rtree.MVRTreeMap; +import org.h2.mvstore.rtree.SpatialDataType; +import org.h2.mvstore.type.DataType; +import org.h2.mvstore.type.LongDataType; +import org.h2.mvstore.type.MetaType; +import org.h2.mvstore.type.ObjectDataType; +import org.h2.mvstore.type.StringDataType; +import org.h2.util.StringUtils; +import org.h2.value.VersionedValue; + +/** + * A store that supports concurrent MVCC read-committed transactions. + */ +public class TransactionStore { + + /** + * The store. + */ + final MVStore store; + + /** + * Default blocked transaction timeout + */ + final int timeoutMillis; + + /** + * The persisted map of prepared transactions. + * Key: transactionId, value: [ status, name ]. + */ + private final MVMap preparedTransactions; + + private final MVMap> typeRegistry; + + /** + * Undo logs. + *

    + * If the first entry for a transaction doesn't have a logId + * of 0, then the transaction is partially committed (which means rollback + * is not possible). Log entries are written before the data is changed + * (write-ahead). + *

    + * Key: opId, value: [ mapId, key, oldValue ]. + */ + @SuppressWarnings("unchecked") + final MVMap>[] undoLogs = new MVMap[MAX_OPEN_TRANSACTIONS]; + private final MVMap.Builder> undoLogBuilder; + + private final DataType dataType; + + /** + * This BitSet is used as vacancy indicator for transaction slots in transactions[]. + * It provides easy way to find first unoccupied slot, and also allows for copy-on-write + * non-blocking updates. + */ + final AtomicReference openTransactions = new AtomicReference<>(new VersionedBitSet()); + + /** + * This is intended to be the source of ultimate truth about transaction being committed. + * Once bit is set, corresponding transaction is logically committed, + * although it might be plenty of "uncommitted" entries in various maps + * and undo record are still around. + * Nevertheless, all of those should be considered by other transactions as committed. + */ + final AtomicReference committingTransactions = new AtomicReference<>(new BitSet()); + + private boolean init; + + /** + * Soft limit on the number of concurrently opened transactions. + * Not really needed but used by some test. + */ + private int maxTransactionId = MAX_OPEN_TRANSACTIONS; + + /** + * Array holding all open transaction objects. + * Position in array is "transaction id". + * VolatileReferenceArray would do the job here, but there is no such thing in Java yet + */ + private final AtomicReferenceArray transactions = + new AtomicReferenceArray<>(MAX_OPEN_TRANSACTIONS + 1); + + private static final String TYPE_REGISTRY_NAME = "_"; + + /** + * The prefix for undo log entries. + */ + public static final String UNDO_LOG_NAME_PREFIX = "undoLog"; + + // must come before open in lexicographical order + private static final char UNDO_LOG_COMMITTED = '-'; + + private static final char UNDO_LOG_OPEN = '.'; + + /** + * Hard limit on the number of concurrently opened transactions + */ + // TODO: introduce constructor parameter instead of a static field, driven by URL parameter + private static final int MAX_OPEN_TRANSACTIONS = 65535; + + /** + * Generate a string used to name undo log map for a specific transaction. + * This name will contain transaction id. + * + * @param transactionId of the corresponding transaction + * @return undo log name + */ + private static String getUndoLogName(int transactionId) { + return transactionId > 0 ? UNDO_LOG_NAME_PREFIX + UNDO_LOG_OPEN + transactionId + : UNDO_LOG_NAME_PREFIX + UNDO_LOG_OPEN; + } + + /** + * Create a new transaction store. + * + * @param store the store + */ + public TransactionStore(MVStore store) { + this(store, new ObjectDataType()); + } + + public TransactionStore(MVStore store, DataType dataType) { + this(store, new MetaType<>(null, store.backgroundExceptionHandler), dataType, 0); + } + + /** + * Create a new transaction store. + * @param store the store + * @param metaDataType the data type for type registry map values + * @param dataType default data type for map keys and values + * @param timeoutMillis lock acquisition timeout in milliseconds, 0 means no wait + */ + public TransactionStore(MVStore store, MetaType metaDataType, DataType dataType, int timeoutMillis) { + this.store = store; + this.dataType = dataType; + this.timeoutMillis = timeoutMillis; + this.typeRegistry = openTypeRegistry(store, metaDataType); + this.preparedTransactions = store.openMap("openTransactions", new MVMap.Builder<>()); + this.undoLogBuilder = createUndoLogBuilder(); + } + + @SuppressWarnings({"unchecked","rawtypes"}) + MVMap.Builder> createUndoLogBuilder() { + return new MVMap.Builder>() + .singleWriter() + .keyType(LongDataType.INSTANCE) + .valueType(new Record.Type(this)); + } + + private static MVMap> openTypeRegistry(MVStore store, MetaType metaDataType) { + MVMap.Builder> typeRegistryBuilder = + new MVMap.Builder>() + .keyType(StringDataType.INSTANCE) + .valueType(metaDataType); + return store.openMap(TYPE_REGISTRY_NAME, typeRegistryBuilder); + } + + /** + * Initialize the store without any RollbackListener. + * @see #init(RollbackListener) + */ + public void init() { + init(ROLLBACK_LISTENER_NONE); + } + + /** + * Initialize the store. This is needed before a transaction can be opened. + * If the transaction store is corrupt, this method can throw an exception, + * in which case the store can only be used for reading. + * + * @param listener to notify about transaction rollback + */ + public void init(RollbackListener listener) { + if (!init) { + for (String mapName : store.getMapNames()) { + if (mapName.startsWith(UNDO_LOG_NAME_PREFIX)) { + // Unexpectedly short name may be encountered upon upgrade from older version + // where undo log was persisted as a single map, remove it. + if (mapName.length() > UNDO_LOG_NAME_PREFIX.length()) { + // make a decision about tx status based on a log name + // to handle upgrade from a previous versions + boolean committed = mapName.charAt(UNDO_LOG_NAME_PREFIX.length()) == UNDO_LOG_COMMITTED; + if (store.hasData(mapName)) { + int transactionId = StringUtils.parseUInt31(mapName, UNDO_LOG_NAME_PREFIX.length() + 1, + mapName.length()); + VersionedBitSet openTxBitSet = openTransactions.get(); + if (!openTxBitSet.get(transactionId)) { + Object[] data = preparedTransactions.get(transactionId); + int status; + String name; + if (data == null) { + status = Transaction.STATUS_OPEN; + name = null; + } else { + status = (Integer) data[0]; + name = (String) data[1]; + } + MVMap> undoLog = store.openMap(mapName, undoLogBuilder); + undoLogs[transactionId] = undoLog; + Long lastUndoKey = undoLog.lastKey(); + assert lastUndoKey != null; + assert getTransactionId(lastUndoKey) == transactionId; + long logId = getLogId(lastUndoKey) + 1; + if (committed) { + // give it a proper name and used marker record instead + store.renameMap(undoLog, getUndoLogName(transactionId)); + markUndoLogAsCommitted(transactionId); + } else { + committed = logId > LOG_ID_MASK; + } + if (committed) { + status = Transaction.STATUS_COMMITTED; + lastUndoKey = undoLog.lowerKey(lastUndoKey); + assert lastUndoKey == null || getTransactionId(lastUndoKey) == transactionId; + logId = lastUndoKey == null ? 0 : getLogId(lastUndoKey) + 1; + } + registerTransaction(transactionId, status, name, logId, timeoutMillis, 0, + IsolationLevel.READ_COMMITTED, listener); + continue; + } + } + } + + if (!store.isReadOnly()) { + store.removeMap(mapName); + } + } + } + init = true; + } + } + + private void markUndoLogAsCommitted(int transactionId) { + addUndoLogRecord(transactionId, LOG_ID_MASK, Record.COMMIT_MARKER); + } + + /** + * Commit all transactions that are in the committed state, and + * rollback all open transactions. + */ + public void endLeftoverTransactions() { + List list = getOpenTransactions(); + for (Transaction t : list) { + int status = t.getStatus(); + if (status == Transaction.STATUS_COMMITTED) { + t.commit(); + } else if (status != Transaction.STATUS_PREPARED) { + t.rollback(); + } + } + } + + int getMaxTransactionId() { + return maxTransactionId; + } + + /** + * Set the maximum transaction id, after which ids are re-used. If the old + * transaction is still in use when re-using an old id, the new transaction + * fails. + * + * @param max the maximum id + */ + public void setMaxTransactionId(int max) { + DataUtils.checkArgument(max <= MAX_OPEN_TRANSACTIONS, + "Concurrent transactions limit is too high: {0}", max); + this.maxTransactionId = max; + } + + /** + * Check whether a given map exists. + * + * @param name the map name + * @return true if it exists + */ + public boolean hasMap(String name) { + return store.hasMap(name); + } + + private static final int LOG_ID_BITS = Transaction.LOG_ID_BITS; + private static final long LOG_ID_MASK = (1L << LOG_ID_BITS) - 1; + + /** + * Combine the transaction id and the log id to an operation id. + * + * @param transactionId the transaction id + * @param logId the log id + * @return the operation id + */ + static long getOperationId(int transactionId, long logId) { + DataUtils.checkArgument(transactionId >= 0 && transactionId < (1 << (64 - LOG_ID_BITS)), + "Transaction id out of range: {0}", transactionId); + DataUtils.checkArgument(logId >= 0 && logId <= LOG_ID_MASK, + "Transaction log id out of range: {0}", logId); + return ((long) transactionId << LOG_ID_BITS) | logId; + } + + /** + * Get the transaction id for the given operation id. + * + * @param operationId the operation id + * @return the transaction id + */ + static int getTransactionId(long operationId) { + return (int) (operationId >>> LOG_ID_BITS); + } + + /** + * Get the log id for the given operation id. + * + * @param operationId the operation id + * @return the log id + */ + static long getLogId(long operationId) { + return operationId & LOG_ID_MASK; + } + + /** + * Get the list of unclosed transactions that have pending writes. + * + * @return the list of transactions (sorted by id) + */ + public List getOpenTransactions() { + if(!init) { + init(); + } + ArrayList list = new ArrayList<>(); + int transactionId = 0; + BitSet bitSet = openTransactions.get(); + while((transactionId = bitSet.nextSetBit(transactionId + 1)) > 0) { + Transaction transaction = getTransaction(transactionId); + if(transaction != null) { + if(transaction.getStatus() != Transaction.STATUS_CLOSED) { + list.add(transaction); + } + } + } + return list; + } + + /** + * Close the transaction store. + */ + public synchronized void close() { + store.commit(); + } + + /** + * Begin a new transaction. + * + * @return the transaction + */ + public Transaction begin() { + return begin(ROLLBACK_LISTENER_NONE, timeoutMillis, 0, IsolationLevel.READ_COMMITTED); + } + + /** + * Begin a new transaction. + * @param listener to be notified in case of a rollback + * @param timeoutMillis to wait for a blocking transaction + * @param ownerId of the owner (Session?) to be reported by getBlockerId + * @param isolationLevel of new transaction + * @return the transaction + */ + public Transaction begin(RollbackListener listener, int timeoutMillis, int ownerId, + IsolationLevel isolationLevel) { + Transaction transaction = registerTransaction(0, Transaction.STATUS_OPEN, null, 0, + timeoutMillis, ownerId, isolationLevel, listener); + return transaction; + } + + private Transaction registerTransaction(int txId, int status, String name, long logId, + int timeoutMillis, int ownerId, + IsolationLevel isolationLevel, RollbackListener listener) { + int transactionId; + long sequenceNo; + boolean success; + do { + VersionedBitSet original = openTransactions.get(); + if (txId == 0) { + transactionId = original.nextClearBit(1); + } else { + transactionId = txId; + assert !original.get(transactionId); + } + if (transactionId > maxTransactionId) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_TOO_MANY_OPEN_TRANSACTIONS, + "There are {0} open transactions", + transactionId - 1); + } + VersionedBitSet clone = original.clone(); + clone.set(transactionId); + sequenceNo = clone.getVersion() + 1; + clone.setVersion(sequenceNo); + success = openTransactions.compareAndSet(original, clone); + } while(!success); + + Transaction transaction = new Transaction(this, transactionId, sequenceNo, status, name, logId, + timeoutMillis, ownerId, isolationLevel, listener); + + assert transactions.get(transactionId) == null; + transactions.set(transactionId, transaction); + + if (undoLogs[transactionId] == null) { + String undoName = getUndoLogName(transactionId); + MVMap> undoLog = store.openMap(undoName, undoLogBuilder); + undoLogs[transactionId] = undoLog; + } + return transaction; + } + + /** + * Store a transaction. + * + * @param t the transaction + */ + void storeTransaction(Transaction t) { + if (t.getStatus() == Transaction.STATUS_PREPARED || + t.getName() != null) { + Object[] v = { t.getStatus(), t.getName() }; + preparedTransactions.put(t.getId(), v); + t.wasStored = true; + } + } + + /** + * Add an undo log entry. + * + * @param transactionId id of the transaction + * @param logId sequential number of the log record within transaction + * @param record Record(mapId, key, previousValue) to add + * @return key for the added record + */ + long addUndoLogRecord(int transactionId, long logId, Record record) { + MVMap> undoLog = undoLogs[transactionId]; + long undoKey = getOperationId(transactionId, logId); + if (logId == 0 && !undoLog.isEmpty()) { + throw DataUtils.newMVStoreException( + DataUtils.ERROR_TOO_MANY_OPEN_TRANSACTIONS, + "An old transaction with the same id " + + "is still open: {0}", + transactionId); + } + undoLog.append(undoKey, record); + return undoKey; + } + + /** + * Remove an undo log entry. + * @param transactionId id of the transaction + */ + void removeUndoLogRecord(int transactionId) { + undoLogs[transactionId].trimLast(); + } + + /** + * Remove the given map. + * + * @param map the map + */ + void removeMap(TransactionMap map) { + store.removeMap(map.map); + } + + /** + * Commit a transaction. + * @param t transaction to commit + * @param recovery if called during initial transaction recovery procedure + * therefore undo log is stored under "committed" name already + */ + void commit(Transaction t, boolean recovery) { + if (!store.isClosed()) { + int transactionId = t.transactionId; + // First, mark log as "committed". + // It does not change the way this transaction is treated by others, + // but preserves fact of commit in case of abrupt termination. + MVMap> undoLog = undoLogs[transactionId]; + Cursor> cursor; + if(recovery) { + removeUndoLogRecord(transactionId); + cursor = undoLog.cursor(null); + } else { + cursor = undoLog.cursor(null); + markUndoLogAsCommitted(transactionId); + } + + // this is an atomic action that causes all changes + // made by this transaction, to be considered as "committed" + flipCommittingTransactionsBit(transactionId, true); + + CommitDecisionMaker commitDecisionMaker = new CommitDecisionMaker<>(); + try { + while (cursor.hasNext()) { + Long undoKey = cursor.next(); + Record op = cursor.getValue(); + int mapId = op.mapId; + MVMap> map = openMap(mapId); + if (map != null && !map.isClosed()) { // might be null if map was removed later + Object key = op.key; + commitDecisionMaker.setUndoKey(undoKey); + // second parameter (value) is not really + // used by CommitDecisionMaker + map.operate(key, null, commitDecisionMaker); + } + } + } finally { + try { + undoLog.clear(); + } finally { + flipCommittingTransactionsBit(transactionId, false); + } + } + } + } + + private void flipCommittingTransactionsBit(int transactionId, boolean flag) { + boolean success; + do { + BitSet original = committingTransactions.get(); + assert original.get(transactionId) != flag : flag ? "Double commit" : "Mysterious bit's disappearance"; + BitSet clone = (BitSet) original.clone(); + clone.set(transactionId, flag); + success = committingTransactions.compareAndSet(original, clone); + } while(!success); + } + + MVMap> openVersionedMap(String name, DataType keyType, DataType valueType) { + VersionedValueType vt = valueType == null ? null : new VersionedValueType<>(valueType); + return openMap(name, keyType, vt); + } + + /** + * Open the map with the given name. + * + * @param the key type + * @param the value type + * @param name the map name + * @param keyType the key type + * @param valueType the value type + * @return the map + */ + public MVMap openMap(String name, DataType keyType, DataType valueType) { + return store.openMap(name, new TxMapBuilder(typeRegistry, dataType) + .keyType(keyType).valueType(valueType)); + } + + /** + * Open the map with the given id. + * + * @param key type + * @param value type + * + * @param mapId the id + * @return the map + */ + MVMap> openMap(int mapId) { + MVMap> map = store.getMap(mapId); + if (map == null) { + String mapName = store.getMapName(mapId); + if (mapName == null) { + // the map was removed later on + return null; + } + MVMap.Builder> txMapBuilder = new TxMapBuilder<>(typeRegistry, dataType); + map = store.openMap(mapId, txMapBuilder); + } + return map; + } + + MVMap> getMap(int mapId) { + MVMap> map = store.getMap(mapId); + if (map == null && !init) { + map = openMap(mapId); + } + assert map != null : "map with id " + mapId + " is missing" + + (init ? "" : " during initialization"); + return map; + } + + /** + * End this transaction. Change status to CLOSED and vacate transaction slot. + * Will try to commit MVStore if autocommitDelay is 0 or if database is idle + * and amount of unsaved changes is sizable. + * + * @param t the transaction + * @param hasChanges true if transaction has done any updates + * (even if they are fully rolled back), + * false if it just performed a data access + */ + void endTransaction(Transaction t, boolean hasChanges) { + t.closeIt(); + int txId = t.transactionId; + transactions.set(txId, null); + + boolean success; + do { + VersionedBitSet original = openTransactions.get(); + assert original.get(txId); + VersionedBitSet clone = original.clone(); + clone.clear(txId); + success = openTransactions.compareAndSet(original, clone); + } while(!success); + + if (hasChanges) { + boolean wasStored = t.wasStored; + if (wasStored && !preparedTransactions.isClosed()) { + preparedTransactions.remove(txId); + } + + if (store.getFileStore() != null) { + if (wasStored || store.getAutoCommitDelay() == 0) { + store.commit(); + } else { + if (isUndoEmpty()) { + // to avoid having to store the transaction log, + // if there is no open transaction, + // and if there have been many changes, store them now + int unsaved = store.getUnsavedMemory(); + int max = store.getAutoCommitMemory(); + // save at 3/4 capacity + if (unsaved * 4 > max * 3) { + store.tryCommit(); + } + } + } + } + } + } + + /** + * Get the root references (snapshots) for undo-log maps. + * Those snapshots can potentially be used to optimize TransactionMap.size(). + * + * @return the array of root references or null if snapshotting is not possible + */ + RootReference>[] collectUndoLogRootReferences() { + BitSet opentransactions = openTransactions.get(); + @SuppressWarnings("unchecked") + RootReference>[] undoLogRootReferences = new RootReference[opentransactions.length()]; + for (int i = opentransactions.nextSetBit(0); i >= 0; i = opentransactions.nextSetBit(i+1)) { + MVMap> undoLog = undoLogs[i]; + if (undoLog != null) { + RootReference> rootReference = undoLog.getRoot(); + if (rootReference.needFlush()) { + // abort attempt to collect snapshots for all undo logs + // because map's append buffer can't be flushed from a non-owning thread + return null; + } + undoLogRootReferences[i] = rootReference; + } + } + return undoLogRootReferences; + } + + /** + * Calculate the size for undo log entries. + * + * @param undoLogRootReferences the root references + * @return the number of key-value pairs + */ + static long calculateUndoLogsTotalSize(RootReference>[] undoLogRootReferences) { + long undoLogsTotalSize = 0; + for (RootReference> rootReference : undoLogRootReferences) { + if (rootReference != null) { + undoLogsTotalSize += rootReference.getTotalCount(); + } + } + return undoLogsTotalSize; + } + + private boolean isUndoEmpty() { + BitSet openTrans = openTransactions.get(); + for (int i = openTrans.nextSetBit(0); i >= 0; i = openTrans.nextSetBit(i + 1)) { + MVMap> undoLog = undoLogs[i]; + if (undoLog != null && !undoLog.isEmpty()) { + return false; + } + } + return true; + } + + /** + * Get Transaction object for a transaction id. + * + * @param transactionId id for an open transaction + * @return Transaction object. + */ + Transaction getTransaction(int transactionId) { + return transactions.get(transactionId); + } + + /** + * Rollback to an old savepoint. + * + * @param t the transaction + * @param maxLogId the last log id + * @param toLogId the log id to roll back to + */ + void rollbackTo(Transaction t, long maxLogId, long toLogId) { + int transactionId = t.getId(); + MVMap> undoLog = undoLogs[transactionId]; + RollbackDecisionMaker decisionMaker = new RollbackDecisionMaker(this, transactionId, toLogId, t.listener); + for (long logId = maxLogId - 1; logId >= toLogId; logId--) { + Long undoKey = getOperationId(transactionId, logId); + undoLog.operate(undoKey, null, decisionMaker); + decisionMaker.reset(); + } + } + + /** + * Get the changes of the given transaction, starting from the latest log id + * back to the given log id. + * + * @param t the transaction + * @param maxLogId the maximum log id + * @param toLogId the minimum log id + * @return the changes + */ + Iterator getChanges(final Transaction t, final long maxLogId, + final long toLogId) { + + final MVMap> undoLog = undoLogs[t.getId()]; + return new Iterator() { + + private long logId = maxLogId - 1; + private Change current; + + private void fetchNext() { + int transactionId = t.getId(); + while (logId >= toLogId) { + Long undoKey = getOperationId(transactionId, logId); + Record op = undoLog.get(undoKey); + logId--; + if (op == null) { + // partially rolled back: load previous + undoKey = undoLog.floorKey(undoKey); + if (undoKey == null || getTransactionId(undoKey) != transactionId) { + break; + } + logId = getLogId(undoKey); + continue; + } + int mapId = op.mapId; + MVMap> m = openMap(mapId); + if (m != null) { // could be null if map was removed later on + VersionedValue oldValue = op.oldValue; + current = new Change(m.getName(), op.key, + oldValue == null ? null : oldValue.getCurrentValue()); + return; + } + } + current = null; + } + + @Override + public boolean hasNext() { + if(current == null) { + fetchNext(); + } + return current != null; + } + + @Override + public Change next() { + if(!hasNext()) { + throw DataUtils.newUnsupportedOperationException("no data"); + } + Change result = current; + current = null; + return result; + } + + }; + } + + /** + * A change in a map. + */ + public static class Change { + + /** + * The name of the map where the change occurred. + */ + public final String mapName; + + /** + * The key. + */ + public final Object key; + + /** + * The value. + */ + public final Object value; + + public Change(String mapName, Object key, Object value) { + this.mapName = mapName; + this.key = key; + this.value = value; + } + } + + /** + * This listener can be registered with the transaction to be notified of + * every compensating change during transaction rollback. + * Normally this is not required, if no external resources were modified, + * because state of all transactional maps will be restored automatically. + * Only state of external resources, possibly modified by triggers + * need to be restored. + */ + public interface RollbackListener { + + /** + * Notified of a single map change (add/update/remove) + * @param map modified + * @param key of the modified entry + * @param existingValue value in the map (null if delete is rolled back) + * @param restoredValue value to be restored (null if add is rolled back) + */ + void onRollback(MVMap> map, Object key, + VersionedValue existingValue, VersionedValue restoredValue); + } + + private static final RollbackListener ROLLBACK_LISTENER_NONE = (map, key, existingValue, restoredValue) -> {}; + + private static final class TxMapBuilder extends MVMap.Builder { + + private final MVMap> typeRegistry; + private final DataType defaultDataType; + + TxMapBuilder(MVMap> typeRegistry, DataType defaultDataType) { + this.typeRegistry = typeRegistry; + this.defaultDataType = defaultDataType; + } + + private void registerDataType(DataType dataType) { + String key = getDataTypeRegistrationKey(dataType); + DataType registeredDataType = typeRegistry.putIfAbsent(key, dataType); + if(registeredDataType != null) { + // TODO: ensure type consistency + } + } + + static String getDataTypeRegistrationKey(DataType dataType) { + return Integer.toHexString(Objects.hashCode(dataType)); + } + + @SuppressWarnings("unchecked") + @Override + public MVMap create(MVStore store, Map config) { + DataType keyType = getKeyType(); + if (keyType == null) { + String keyTypeKey = (String) config.remove("key"); + if (keyTypeKey != null) { + keyType = (DataType)typeRegistry.get(keyTypeKey); + if (keyType == null) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_UNKNOWN_DATA_TYPE, + "Data type with hash {0} can not be found", keyTypeKey); + } + setKeyType(keyType); + } + } else { + registerDataType(keyType); + } + + DataType valueType = getValueType(); + if (valueType == null) { + String valueTypeKey = (String) config.remove("val"); + if (valueTypeKey != null) { + valueType = (DataType)typeRegistry.get(valueTypeKey); + if (valueType == null) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_UNKNOWN_DATA_TYPE, + "Data type with hash {0} can not be found", valueTypeKey); + } + setValueType(valueType); + } + } else { + registerDataType(valueType); + } + + if (getKeyType() == null) { + setKeyType(defaultDataType); + registerDataType(getKeyType()); + } + if (getValueType() == null) { + setValueType((DataType) new VersionedValueType(defaultDataType)); + registerDataType(getValueType()); + } + + config.put("store", store); + config.put("key", getKeyType()); + config.put("val", getValueType()); + return create(config); + } + + @Override + @SuppressWarnings("unchecked") + protected MVMap create(Map config) { + if ("rtree".equals(config.get("type"))) { + MVMap map = (MVMap) new MVRTreeMap<>(config, (SpatialDataType) getKeyType(), + getValueType()); + return map; + } + return new TMVMap<>(config, getKeyType(), getValueType()); + } + + private static final class TMVMap extends MVMap { + private final String type; + + TMVMap(Map config, DataType keyType, DataType valueType) { + super(config, keyType, valueType); + type = (String)config.get("type"); + } + + private TMVMap(MVMap source) { + super(source); + type = source.getType(); + } + + @Override + protected MVMap cloneIt() { + return new TMVMap<>(this); + } + + @Override + public String getType() { + return type; + } + + @Override + protected String asString(String name) { + StringBuilder buff = new StringBuilder(); + buff.append(super.asString(name)); + DataUtils.appendMap(buff, "key", getDataTypeRegistrationKey(getKeyType())); + DataUtils.appendMap(buff, "val", getDataTypeRegistrationKey(getValueType())); + return buff.toString(); + } + } + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/TxDecisionMaker.java b/h2/src/main/org/h2/mvstore/tx/TxDecisionMaker.java new file mode 100644 index 0000000000..2ab6535b6d --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/TxDecisionMaker.java @@ -0,0 +1,383 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import java.util.function.Function; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVMap.Decision; +import org.h2.mvstore.type.DataType; +import org.h2.value.VersionedValue; + +/** + * Class TxDecisionMaker is a base implementation of MVMap.DecisionMaker + * to be used for TransactionMap modification. + * + * @author Andrei Tokar + */ +class TxDecisionMaker extends MVMap.DecisionMaker> { + /** + * Map to decide upon + */ + private final int mapId; + + /** + * Key for the map entry to decide upon + */ + protected K key; + + /** + * Value for the map entry + */ + private V value; + + /** + * Transaction we are operating within + */ + private final Transaction transaction; + + /** + * Id for the undo log entry created for this modification + */ + private long undoKey; + + /** + * Id of the last operation, we decided to + * {@link org.h2.mvstore.MVMap.Decision#REPEAT}. + */ + private long lastOperationId; + + private Transaction blockingTransaction; + private MVMap.Decision decision; + private V lastValue; + + TxDecisionMaker(int mapId, Transaction transaction) { + this.mapId = mapId; + this.transaction = transaction; + } + + void initialize(K key, V value) { + this.key = key; + this.value = value; + decision = null; + reset(); + } + + @Override + public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { + assert decision == null; + long id; + int blockingId; + // if map does not have that entry yet + if (existingValue == null || + // or entry is a committed one + (id = existingValue.getOperationId()) == 0 || + // or it came from the same transaction + isThisTransaction(blockingId = TransactionStore.getTransactionId(id))) { + logAndDecideToPut(existingValue, existingValue == null ? null : existingValue.getCommittedValue()); + } else if (isCommitted(blockingId)) { + // Condition above means that entry belongs to a committing transaction. + // We assume that we are looking at the final value for this transaction, + // and if it's not the case, then it will fail later, + // because a tree root has definitely been changed. + V currentValue = existingValue.getCurrentValue(); + logAndDecideToPut(currentValue == null ? null : VersionedValueCommitted.getInstance(currentValue), + currentValue); + } else if (getBlockingTransaction() != null) { + // this entry comes from a different transaction, and this + // transaction is not committed yet + // should wait on blockingTransaction that was determined earlier + lastValue = existingValue.getCurrentValue(); + decision = MVMap.Decision.ABORT; + } else if (isRepeatedOperation(id)) { + // There is no transaction with that id, and we've tried it just + // before, but map root has not changed (which must be the case if + // we just missed a closed transaction), therefore we came back here + // again. + // Now we assume it's a leftover after unclean shutdown (map update + // was written but not undo log), and will effectively roll it back + // (just assume committed value and overwrite). + V committedValue = existingValue.getCommittedValue(); + logAndDecideToPut(committedValue == null ? null : VersionedValueCommitted.getInstance(committedValue), + committedValue); + } else { + // transaction has been committed/rolled back and is closed by now, so + // we can retry immediately and either that entry become committed + // or we'll hit case above + decision = MVMap.Decision.REPEAT; + } + return decision; + } + + @Override + public final void reset() { + if (decision != MVMap.Decision.REPEAT) { + lastOperationId = 0; + if (decision == MVMap.Decision.PUT) { + // positive decision has been made already and undo record created, + // but map was updated afterwards and undo record deletion required + transaction.logUndo(); + } + } + blockingTransaction = null; + decision = null; + lastValue = null; + } + + @SuppressWarnings("unchecked") + @Override + // always return value (ignores existingValue) + public > T selectValue(T existingValue, T providedValue) { + return (T) VersionedValueUncommitted.getInstance(undoKey, getNewValue(existingValue), lastValue); + } + + /** + * Get the new value. + * This implementation always return the current value (ignores the parameter). + * + * @param existingValue the parameter value + * @return the current value. + */ + V getNewValue(VersionedValue existingValue) { + return value; + } + + /** + * Create undo log entry and record for future references + * {@link org.h2.mvstore.MVMap.Decision#PUT} decision along with last known + * committed value + * + * @param valueToLog previous value to be logged + * @param lastValue last known committed value + * @return {@link org.h2.mvstore.MVMap.Decision#PUT} + */ + MVMap.Decision logAndDecideToPut(VersionedValue valueToLog, V lastValue) { + undoKey = transaction.log(new Record<>(mapId, key, valueToLog)); + this.lastValue = lastValue; + return setDecision(MVMap.Decision.PUT); + } + + final MVMap.Decision decideToAbort(V lastValue) { + this.lastValue = lastValue; + return setDecision(Decision.ABORT); + } + + final boolean allowNonRepeatableRead() { + return transaction.allowNonRepeatableRead(); + } + + final MVMap.Decision getDecision() { + return decision; + } + + final Transaction getBlockingTransaction() { + return blockingTransaction; + } + + final V getLastValue() { + return lastValue; + } + + /** + * Check whether specified transaction id belongs to "current" transaction + * (transaction we are acting within). + * + * @param transactionId to check + * @return true it it is "current" transaction's id, false otherwise + */ + final boolean isThisTransaction(int transactionId) { + return transactionId == transaction.transactionId; + } + + /** + * Determine whether specified id corresponds to a logically committed transaction. + * In case of pending transaction, reference to actual Transaction object (if any) + * is preserved for future use. + * + * @param transactionId to use + * @return true if transaction should be considered as committed, false otherwise + */ + final boolean isCommitted(int transactionId) { + Transaction blockingTx; + boolean result; + TransactionStore store = transaction.store; + do { + blockingTx = store.getTransaction(transactionId); + result = store.committingTransactions.get().get(transactionId); + } while (blockingTx != store.getTransaction(transactionId)); + + if (!result) { + blockingTransaction = blockingTx; + } + return result; + } + + /** + * Store operation id provided, but before that, compare it against last stored one. + * This is to prevent an infinite loop in case of uncommitted "leftover" entry + * (one without a corresponding undo log entry, most likely as a result of unclean shutdown). + * + * @param id + * for the operation we decided to + * {@link org.h2.mvstore.MVMap.Decision#REPEAT} + * @return true if the same as last operation id, false otherwise + */ + final boolean isRepeatedOperation(long id) { + if (id == lastOperationId) { + return true; + } + lastOperationId = id; + return false; + } + + /** + * Record for future references specified value as a decision that has been made. + * + * @param decision made + * @return argument provided + */ + final MVMap.Decision setDecision(MVMap.Decision decision) { + return this.decision = decision; + } + + @Override + public final String toString() { + return "txdm " + transaction.transactionId; + } + + + + public static final class PutIfAbsentDecisionMaker extends TxDecisionMaker { + private final Function oldValueSupplier; + + PutIfAbsentDecisionMaker(int mapId, Transaction transaction, Function oldValueSupplier) { + super(mapId, transaction); + this.oldValueSupplier = oldValueSupplier; + } + + @Override + public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { + assert getDecision() == null; + int blockingId; + // if map does not have that entry yet + if (existingValue == null) { + V snapshotValue = getValueInSnapshot(); + if (snapshotValue != null) { + // value exists in a snapshot but not in current map, therefore + // it was removed and committed by another transaction + return decideToAbort(snapshotValue); + } + return logAndDecideToPut(null, null); + } else { + long id = existingValue.getOperationId(); + if (id == 0 // entry is a committed one + // or it came from the same transaction + || isThisTransaction(blockingId = TransactionStore.getTransactionId(id))) { + if(existingValue.getCurrentValue() != null) { + return decideToAbort(existingValue.getCurrentValue()); + } + if (id == 0) { + V snapshotValue = getValueInSnapshot(); + if (snapshotValue != null) { + return decideToAbort(snapshotValue); + } + } + return logAndDecideToPut(existingValue, existingValue.getCommittedValue()); + } else if (isCommitted(blockingId)) { + // entry belongs to a committing transaction + // and therefore will be committed soon + if(existingValue.getCurrentValue() != null) { + return decideToAbort(existingValue.getCurrentValue()); + } + // even if that commit will result in entry removal + // current operation should fail within repeatable read transaction + // if initial snapshot carries some value + V snapshotValue = getValueInSnapshot(); + if (snapshotValue != null) { + return decideToAbort(snapshotValue); + } + return logAndDecideToPut(null, null); + } else if (getBlockingTransaction() != null) { + // this entry comes from a different transaction, and this + // transaction is not committed yet + // should wait on blockingTransaction that was determined + // earlier and then try again + return decideToAbort(existingValue.getCurrentValue()); + } else if (isRepeatedOperation(id)) { + // There is no transaction with that id, and we've tried it + // just before, but map root has not changed (which must be + // the case if we just missed a closed transaction), + // therefore we came back here again. + // Now we assume it's a leftover after unclean shutdown (map + // update was written but not undo log), and will + // effectively roll it back (just assume committed value and + // overwrite). + V committedValue = existingValue.getCommittedValue(); + if (committedValue != null) { + return decideToAbort(committedValue); + } + return logAndDecideToPut(null, null); + } else { + // transaction has been committed/rolled back and is closed + // by now, so we can retry immediately and either that entry + // become committed or we'll hit case above + return setDecision(MVMap.Decision.REPEAT); + } + } + } + + private V getValueInSnapshot() { + return allowNonRepeatableRead() ? null : oldValueSupplier.apply(key); + } + } + + + public static class LockDecisionMaker extends TxDecisionMaker { + + LockDecisionMaker(int mapId, Transaction transaction) { + super(mapId, transaction); + } + + @Override + public MVMap.Decision decide(VersionedValue existingValue, VersionedValue providedValue) { + MVMap.Decision decision = super.decide(existingValue, providedValue); + if (existingValue == null) { + assert decision == MVMap.Decision.PUT; + decision = setDecision(MVMap.Decision.REMOVE); + } + return decision; + } + + @Override + V getNewValue(VersionedValue existingValue) { + return existingValue == null ? null : existingValue.getCurrentValue(); + } + } + + public static final class RepeatableReadLockDecisionMaker extends LockDecisionMaker { + + private final DataType> valueType; + + private final Function snapshotValueSupplier; + + RepeatableReadLockDecisionMaker(int mapId, Transaction transaction, + DataType> valueType, Function snapshotValueSupplier) { + super(mapId, transaction); + this.valueType = valueType; + this.snapshotValueSupplier = snapshotValueSupplier; + } + + @Override + Decision logAndDecideToPut(VersionedValue valueToLog, V value) { + V snapshotValue = snapshotValueSupplier.apply(key); + if (snapshotValue != null && (valueToLog == null + || valueType.compare(VersionedValueCommitted.getInstance(snapshotValue), valueToLog) != 0)) { + throw DataUtils.newMVStoreException(DataUtils.ERROR_TRANSACTIONS_DEADLOCK, ""); + } + return super.logAndDecideToPut(valueToLog, value); + } + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/VersionedBitSet.java b/h2/src/main/org/h2/mvstore/tx/VersionedBitSet.java new file mode 100644 index 0000000000..e0d8351195 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/VersionedBitSet.java @@ -0,0 +1,33 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import java.util.BitSet; + +/** + * Class VersionedBitSet extends standard BitSet to add a version field. + * This will allow bit set and version to be changed atomically. + */ +final class VersionedBitSet extends BitSet { + private static final long serialVersionUID = 1L; + + private long version; + + public VersionedBitSet() {} + + public long getVersion() { + return version; + } + + public void setVersion(long version) { + this.version = version; + } + + @Override + public VersionedBitSet clone() { + return (VersionedBitSet)super.clone(); + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/VersionedValueCommitted.java b/h2/src/main/org/h2/mvstore/tx/VersionedValueCommitted.java new file mode 100644 index 0000000000..3d0df25758 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/VersionedValueCommitted.java @@ -0,0 +1,53 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import org.h2.value.VersionedValue; + +/** + * Class CommittedVersionedValue. + * + * @author Andrei Tokar + */ +class VersionedValueCommitted extends VersionedValue { + /** + * The current value. + */ + public final T value; + + VersionedValueCommitted(T value) { + this.value = value; + } + + /** + * Either cast to VersionedValue, or wrap in VersionedValueCommitted + * + * @param type of the value to get the VersionedValue for + * + * @param value the object to cast/wrap + * @return VersionedValue instance + */ + @SuppressWarnings("unchecked") + static VersionedValue getInstance(X value) { + assert value != null; + return value instanceof VersionedValue ? (VersionedValue)value : new VersionedValueCommitted<>(value); + } + + @Override + public T getCurrentValue() { + return value; + } + + @Override + public T getCommittedValue() { + return value; + } + + @Override + public String toString() { + return String.valueOf(value); + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/VersionedValueType.java b/h2/src/main/org/h2/mvstore/tx/VersionedValueType.java new file mode 100644 index 0000000000..a088b70c41 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/VersionedValueType.java @@ -0,0 +1,164 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import java.nio.ByteBuffer; +import org.h2.engine.Constants; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.BasicDataType; +import org.h2.mvstore.type.DataType; +import org.h2.mvstore.type.MetaType; +import org.h2.mvstore.type.StatefulDataType; +import org.h2.value.VersionedValue; + +/** + * The value type for a versioned value. + */ +public class VersionedValueType extends BasicDataType> implements StatefulDataType { + + private final DataType valueType; + private final Factory factory = new Factory<>(); + + + public VersionedValueType(DataType valueType) { + this.valueType = valueType; + } + + @Override + @SuppressWarnings("unchecked") + public VersionedValue[] createStorage(int size) { + return new VersionedValue[size]; + } + + @Override + public int getMemory(VersionedValue v) { + if(v == null) return 0; + int res = Constants.MEMORY_OBJECT + 8 + 2 * Constants.MEMORY_POINTER + + getValMemory(v.getCurrentValue()); + if (v.getOperationId() != 0) { + res += getValMemory(v.getCommittedValue()); + } + return res; + } + + private int getValMemory(T obj) { + return obj == null ? 0 : valueType.getMemory(obj); + } + + @Override + public void read(ByteBuffer buff, Object storage, int len) { + if (buff.get() == 0) { + // fast path (no op ids or null entries) + for (int i = 0; i < len; i++) { + cast(storage)[i] = VersionedValueCommitted.getInstance(valueType.read(buff)); + } + } else { + // slow path (some entries may be null) + for (int i = 0; i < len; i++) { + cast(storage)[i] = read(buff); + } + } + } + + @Override + public VersionedValue read(ByteBuffer buff) { + long operationId = DataUtils.readVarLong(buff); + if (operationId == 0) { + return VersionedValueCommitted.getInstance(valueType.read(buff)); + } else { + byte flags = buff.get(); + T value = (flags & 1) != 0 ? valueType.read(buff) : null; + T committedValue = (flags & 2) != 0 ? valueType.read(buff) : null; + return VersionedValueUncommitted.getInstance(operationId, value, committedValue); + } + } + + @Override + public void write(WriteBuffer buff, Object storage, int len) { + boolean fastPath = true; + for (int i = 0; i < len; i++) { + VersionedValue v = cast(storage)[i]; + if (v.getOperationId() != 0 || v.getCurrentValue() == null) { + fastPath = false; + } + } + if (fastPath) { + buff.put((byte) 0); + for (int i = 0; i < len; i++) { + VersionedValue v = cast(storage)[i]; + valueType.write(buff, v.getCurrentValue()); + } + } else { + // slow path: + // store op ids, and some entries may be null + buff.put((byte) 1); + for (int i = 0; i < len; i++) { + write(buff, cast(storage)[i]); + } + } + } + + @Override + public void write(WriteBuffer buff, VersionedValue v) { + long operationId = v.getOperationId(); + buff.putVarLong(operationId); + if (operationId == 0) { + valueType.write(buff, v.getCurrentValue()); + } else { + T committedValue = v.getCommittedValue(); + int flags = (v.getCurrentValue() == null ? 0 : 1) | (committedValue == null ? 0 : 2); + buff.put((byte) flags); + if (v.getCurrentValue() != null) { + valueType.write(buff, v.getCurrentValue()); + } + if (committedValue != null) { + valueType.write(buff, committedValue); + } + } + } + + @Override + @SuppressWarnings("unchecked") + public boolean equals(Object obj) { + if (obj == this) { + return true; + } else if (!(obj instanceof VersionedValueType)) { + return false; + } + VersionedValueType other = (VersionedValueType) obj; + return valueType.equals(other.valueType); + } + + @Override + public int hashCode() { + return super.hashCode() ^ valueType.hashCode(); + } + + @Override + public void save(WriteBuffer buff, MetaType metaType) { + metaType.write(buff, valueType); + } + + @Override + public int compare(VersionedValue a, VersionedValue b) { + return valueType.compare(a.getCurrentValue(), b.getCurrentValue()); + } + + @Override + public Factory getFactory() { + return factory; + } + + public static final class Factory implements StatefulDataType.Factory { + @SuppressWarnings("unchecked") + @Override + public DataType create(ByteBuffer buff, MetaType metaType, D database) { + DataType> valueType = (DataType>)metaType.read(buff); + return new VersionedValueType,D>(valueType); + } + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/VersionedValueUncommitted.java b/h2/src/main/org/h2/mvstore/tx/VersionedValueUncommitted.java new file mode 100644 index 0000000000..dad0b330c3 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/VersionedValueUncommitted.java @@ -0,0 +1,61 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.tx; + +import org.h2.value.VersionedValue; + +/** + * Class VersionedValueUncommitted. + * + * @author Andrei Tokar + */ +class VersionedValueUncommitted extends VersionedValueCommitted { + private final long operationId; + private final T committedValue; + + private VersionedValueUncommitted(long operationId, T value, T committedValue) { + super(value); + assert operationId != 0; + this.operationId = operationId; + this.committedValue = committedValue; + } + + /** + * Create new VersionedValueUncommitted. + * + * @param type of the value to get the VersionedValue for + * + * @param operationId combined log/transaction id + * @param value value before commit + * @param committedValue value after commit + * @return VersionedValue instance + */ + static VersionedValue getInstance(long operationId, X value, X committedValue) { + return new VersionedValueUncommitted<>(operationId, value, committedValue); + } + + @Override + public boolean isCommitted() { + return false; + } + + @Override + public long getOperationId() { + return operationId; + } + + @Override + public T getCommittedValue() { + return committedValue; + } + + @Override + public String toString() { + return super.toString() + + " " + TransactionStore.getTransactionId(operationId) + "/" + + TransactionStore.getLogId(operationId) + " " + committedValue; + } +} diff --git a/h2/src/main/org/h2/mvstore/tx/package.html b/h2/src/main/org/h2/mvstore/tx/package.html new file mode 100644 index 0000000000..08b0f02706 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/tx/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Helper classes to use the MVStore in a transactional manner. + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/mvstore/type/BasicDataType.java b/h2/src/main/org/h2/mvstore/type/BasicDataType.java new file mode 100644 index 0000000000..d9c79e6f08 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/type/BasicDataType.java @@ -0,0 +1,98 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.type; + +import java.nio.ByteBuffer; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; + +/** + * The base class for data type implementations. + * + * @author Andrei Tokar + */ +public abstract class BasicDataType implements DataType { + + @Override + public abstract int getMemory(T obj); + + @Override + public abstract void write(WriteBuffer buff, T obj); + + @Override + public abstract T read(ByteBuffer buff); + + @Override + public int compare(T a, T b) { + throw DataUtils.newUnsupportedOperationException("Can not compare"); + } + + @Override + public boolean isMemoryEstimationAllowed() { + return true; + } + + @Override + public int binarySearch(T key, Object storageObj, int size, int initialGuess) { + T[] storage = cast(storageObj); + int low = 0; + int high = size - 1; + // the cached index minus one, so that + // for the first time (when cachedCompare is 0), + // the default value is used + int x = initialGuess - 1; + if (x < 0 || x > high) { + x = high >>> 1; + } + while (low <= high) { + int compare = compare(key, storage[x]); + if (compare > 0) { + low = x + 1; + } else if (compare < 0) { + high = x - 1; + } else { + return x; + } + x = (low + high) >>> 1; + } + return ~low; + } + + @Override + public void write(WriteBuffer buff, Object storage, int len) { + for (int i = 0; i < len; i++) { + write(buff, cast(storage)[i]); + } + } + + @Override + public void read(ByteBuffer buff, Object storage, int len) { + for (int i = 0; i < len; i++) { + cast(storage)[i] = read(buff); + } + } + + @Override + public int hashCode() { + return getClass().getName().hashCode(); + } + + @Override + public boolean equals(Object obj) { + return obj != null && getClass().equals(obj.getClass()); + } + + /** + * Cast the storage object to an array of type T. + * + * @param storage the storage object + * @return the array + */ + @SuppressWarnings("unchecked") + protected final T[] cast(Object storage) { + return (T[])storage; + } +} diff --git a/h2/src/main/org/h2/mvstore/type/ByteArrayDataType.java b/h2/src/main/org/h2/mvstore/type/ByteArrayDataType.java new file mode 100644 index 0000000000..9fb8546268 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/type/ByteArrayDataType.java @@ -0,0 +1,46 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.type; + +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; +import java.nio.ByteBuffer; + +/** + * Class ByteArrayDataType. + * + * @author Andrei Tokar + */ +public final class ByteArrayDataType extends BasicDataType +{ + public static final ByteArrayDataType INSTANCE = new ByteArrayDataType(); + + private ByteArrayDataType() {} + + @Override + public int getMemory(byte[] data) { + return data.length; + } + + @Override + public void write(WriteBuffer buff, byte[] data) { + buff.putVarInt(data.length); + buff.put(data); + } + + @Override + public byte[] read(ByteBuffer buff) { + int size = DataUtils.readVarInt(buff); + byte[] data = new byte[size]; + buff.get(data); + return data; + } + + @Override + public byte[][] createStorage(int size) { + return new byte[size][]; + } +} diff --git a/h2/src/main/org/h2/mvstore/type/DataType.java b/h2/src/main/org/h2/mvstore/type/DataType.java index efb0a73e28..4066cbc057 100644 --- a/h2/src/main/org/h2/mvstore/type/DataType.java +++ b/h2/src/main/org/h2/mvstore/type/DataType.java @@ -1,18 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.type; import java.nio.ByteBuffer; +import java.util.Comparator; import org.h2.mvstore.WriteBuffer; /** * A data type. */ -public interface DataType { +public interface DataType extends Comparator { /** * Compare two keys. @@ -22,15 +23,32 @@ public interface DataType { * @return -1 if the first key is smaller, 1 if larger, and 0 if equal * @throws UnsupportedOperationException if the type is not orderable */ - int compare(Object a, Object b); + @Override + int compare(T a, T b); /** - * Estimate the used memory in bytes. + * Perform binary search for the key within the storage + * @param key to search for + * @param storage to search within (an array of type T) + * @param size number of data items in the storage + * @param initialGuess for key position + * @return index of the key , if found, - index of the insertion point, if not + */ + int binarySearch(T key, Object storage, int size, int initialGuess); + + /** + * Calculates the amount of used memory in bytes. * * @param obj the object * @return the used memory */ - int getMemory(Object obj); + int getMemory(T obj); + + /** + * Whether memory estimation based on previously seen values is allowed/desirable + * @return true if memory estimation is allowed + */ + boolean isMemoryEstimationAllowed(); /** * Write an object. @@ -38,17 +56,16 @@ public interface DataType { * @param buff the target buffer * @param obj the value */ - void write(WriteBuffer buff, Object obj); + void write(WriteBuffer buff, T obj); /** * Write a list of objects. * * @param buff the target buffer - * @param obj the objects + * @param storage the objects * @param len the number of objects to write - * @param key whether the objects are keys */ - void write(WriteBuffer buff, Object[] obj, int len, boolean key); + void write(WriteBuffer buff, Object storage, int len); /** * Read an object. @@ -56,17 +73,23 @@ public interface DataType { * @param buff the source buffer * @return the object */ - Object read(ByteBuffer buff); + T read(ByteBuffer buff); /** * Read a list of objects. * * @param buff the target buffer - * @param obj the objects + * @param storage the objects * @param len the number of objects to read - * @param key whether the objects are keys */ - void read(ByteBuffer buff, Object[] obj, int len, boolean key); + void read(ByteBuffer buff, Object storage, int len); + /** + * Create storage object of array type to hold values + * + * @param size number of values to hold + * @return storage object + */ + T[] createStorage(int size); } diff --git a/h2/src/main/org/h2/mvstore/type/LongDataType.java b/h2/src/main/org/h2/mvstore/type/LongDataType.java new file mode 100644 index 0000000000..1fbca0eb7f --- /dev/null +++ b/h2/src/main/org/h2/mvstore/type/LongDataType.java @@ -0,0 +1,83 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.type; + +import java.nio.ByteBuffer; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; + +/** + * Class LongDataType. + *
      + *
    • 8/21/17 6:52 PM initial creation + *
    + * + * @author Andrei Tokar + */ +public class LongDataType extends BasicDataType { + + public static final LongDataType INSTANCE = new LongDataType(); + + private static final Long[] EMPTY_LONG_ARR = new Long[0]; + + private LongDataType() {} + + @Override + public int getMemory(Long obj) { + return 8; + } + + @Override + public void write(WriteBuffer buff, Long data) { + buff.putVarLong(data); + } + + @Override + public Long read(ByteBuffer buff) { + return DataUtils.readVarLong(buff); + } + + @Override + public Long[] createStorage(int size) { + return size == 0 ? EMPTY_LONG_ARR : new Long[size]; + } + + @Override + public int compare(Long one, Long two) { + return Long.compare(one, two); + } + + @Override + public int binarySearch(Long keyObj, Object storageObj, int size, int initialGuess) { + long key = keyObj; + Long[] storage = cast(storageObj); + int low = 0; + int high = size - 1; + // the cached index minus one, so that + // for the first time (when cachedCompare is 0), + // the default value is used + int x = initialGuess - 1; + if (x < 0 || x > high) { + x = high >>> 1; + } + return binarySearch(key, storage, low, high, x); + } + + private static int binarySearch(long key, Long[] storage, int low, int high, int x) { + while (low <= high) { + long midVal = storage[x]; + if (key > midVal) { + low = x + 1; + } else if (key < midVal) { + high = x - 1; + } else { + return x; + } + x = (low + high) >>> 1; + } + return -(low + 1); + } +} diff --git a/h2/src/main/org/h2/mvstore/type/MetaType.java b/h2/src/main/org/h2/mvstore/type/MetaType.java new file mode 100644 index 0000000000..d522ca17c0 --- /dev/null +++ b/h2/src/main/org/h2/mvstore/type/MetaType.java @@ -0,0 +1,108 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.type; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +import org.h2.engine.Constants; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.WriteBuffer; + +/** + * Class DBMetaType is a type for values in the type registry map. + * + * @param type of opaque parameter passed as an operational context to Factory.create() + * + * @author Andrei Tokar + */ +public final class MetaType extends BasicDataType> { + + private final D database; + private final Thread.UncaughtExceptionHandler exceptionHandler; + private final Map cache = new HashMap<>(); + + public MetaType(D database, Thread.UncaughtExceptionHandler exceptionHandler) { + this.database = database; + this.exceptionHandler = exceptionHandler; + } + + @Override + public int compare(DataType a, DataType b) { + throw new UnsupportedOperationException(); + } + + @Override + public int getMemory(DataType obj) { + return Constants.MEMORY_OBJECT; + } + + @SuppressWarnings("unchecked") + @Override + public void write(WriteBuffer buff, DataType obj) { + Class clazz = obj.getClass(); + StatefulDataType statefulDataType = null; + if (obj instanceof StatefulDataType) { + statefulDataType = (StatefulDataType) obj; + StatefulDataType.Factory factory = statefulDataType.getFactory(); + if (factory != null) { + clazz = factory.getClass(); + } + } + String className = clazz.getName(); + int len = className.length(); + buff.putVarInt(len) + .putStringData(className, len); + if (statefulDataType != null) { + statefulDataType.save(buff, this); + } + } + + @SuppressWarnings("unchecked") + @Override + public DataType read(ByteBuffer buff) { + int len = DataUtils.readVarInt(buff); + String className = DataUtils.readString(buff, len); + try { + Object o = cache.get(className); + if (o != null) { + if (o instanceof StatefulDataType.Factory) { + return ((StatefulDataType.Factory) o).create(buff, this, database); + } + return (DataType) o; + } + Class clazz = Class.forName(className); + boolean singleton = false; + Object obj; + try { + obj = clazz.getDeclaredField("INSTANCE").get(null); + singleton = true; + } catch (ReflectiveOperationException | NullPointerException e) { + obj = clazz.getDeclaredConstructor().newInstance(); + } + if (obj instanceof StatefulDataType.Factory) { + StatefulDataType.Factory factory = (StatefulDataType.Factory) obj; + cache.put(className, factory); + return factory.create(buff, this, database); + } + if (singleton) { + cache.put(className, obj); + } + return (DataType) obj; + } catch (ReflectiveOperationException | SecurityException | IllegalArgumentException e) { + if (exceptionHandler != null) { + exceptionHandler.uncaughtException(Thread.currentThread(), e); + } + throw new RuntimeException(e); + } + } + + @Override + public DataType[] createStorage(int size) { + return new DataType[size]; + } +} diff --git a/h2/src/main/org/h2/mvstore/type/ObjectDataType.java b/h2/src/main/org/h2/mvstore/type/ObjectDataType.java index 140d1018a2..3b41c930d8 100644 --- a/h2/src/main/org/h2/mvstore/type/ObjectDataType.java +++ b/h2/src/main/org/h2/mvstore/type/ObjectDataType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.type; @@ -19,13 +19,13 @@ import java.util.UUID; import org.h2.mvstore.DataUtils; import org.h2.mvstore.WriteBuffer; -import org.h2.util.New; +import org.h2.util.Utils; /** * A data type implementation for the most common data types, including * serializable objects. */ -public class ObjectDataType implements DataType { +public class ObjectDataType extends BasicDataType { /** * The type constants are also used as tag values. @@ -94,77 +94,101 @@ public class ObjectDataType implements DataType { Float.class, Double.class, BigDecimal.class, String.class, UUID.class, Date.class }; - private static final HashMap, Integer> COMMON_CLASSES_MAP = New - .hashMap(); + private static class Holder { + private static final HashMap, Integer> COMMON_CLASSES_MAP = new HashMap<>(32); - private AutoDetectDataType last = new StringType(this); + static { + for (int i = 0, size = COMMON_CLASSES.length; i < size; i++) { + COMMON_CLASSES_MAP.put(COMMON_CLASSES[i], i); + } + } - @Override - public int compare(Object a, Object b) { - return last.compare(a, b); + /** + * Get the class id, or null if not found. + * + * @param clazz the class + * @return the class id or null + */ + static Integer getCommonClassId(Class clazz) { + return COMMON_CLASSES_MAP.get(clazz); + } } + @SuppressWarnings("unchecked") + private AutoDetectDataType last = selectDataType(TYPE_NULL); + @Override - public int getMemory(Object obj) { - return last.getMemory(obj); + public Object[] createStorage(int size) { + return new Object[size]; } @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); + public int compare(Object a, Object b) { + int typeId = getTypeId(a); + int typeDiff = typeId - getTypeId(b); + if (typeDiff == 0) { + return newType(typeId).compare(a, b); } + return Integer.signum(typeDiff); } @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } + public int getMemory(Object obj) { + return switchType(obj).getMemory(obj); } @Override public void write(WriteBuffer buff, Object obj) { - last.write(buff, obj); + switchType(obj).write(buff, obj); + } + + @SuppressWarnings("unchecked") + private AutoDetectDataType newType(int typeId) { + if (typeId == last.typeId) { + return last; + } + return selectDataType(typeId); } - private AutoDetectDataType newType(int typeId) { + @SuppressWarnings("rawtypes") + private AutoDetectDataType selectDataType(int typeId) { switch (typeId) { case TYPE_NULL: - return new NullType(this); + return NullType.INSTANCE; case TYPE_BOOLEAN: - return new BooleanType(this); + return BooleanType.INSTANCE; case TYPE_BYTE: - return new ByteType(this); + return ByteType.INSTANCE; case TYPE_SHORT: - return new ShortType(this); + return ShortType.INSTANCE; case TYPE_CHAR: - return new CharacterType(this); + return CharacterType.INSTANCE; case TYPE_INT: - return new IntegerType(this); + return IntegerType.INSTANCE; case TYPE_LONG: - return new LongType(this); + return LongType.INSTANCE; case TYPE_FLOAT: - return new FloatType(this); + return FloatType.INSTANCE; case TYPE_DOUBLE: - return new DoubleType(this); + return DoubleType.INSTANCE; case TYPE_BIG_INTEGER: - return new BigIntegerType(this); + return BigIntegerType.INSTANCE; case TYPE_BIG_DECIMAL: - return new BigDecimalType(this); + return BigDecimalType.INSTANCE; case TYPE_STRING: - return new StringType(this); + return StringType.INSTANCE; case TYPE_UUID: - return new UUIDType(this); + return UUIDType.INSTANCE; case TYPE_DATE: - return new DateType(this); + return DateType.INSTANCE; case TYPE_ARRAY: - return new ObjectArrayType(this); + return new ObjectArrayType(); case TYPE_SERIALIZED_OBJECT: return new SerializedObjectType(this); + default: + throw DataUtils.newMVStoreException(DataUtils.ERROR_INTERNAL, + "Unsupported type {0}", typeId); } - throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, - "Unsupported type {0}", typeId); } @Override @@ -219,13 +243,13 @@ public Object read(ByteBuffer buff) { && tag <= TAG_BYTE_ARRAY_0_15 + 15) { typeId = TYPE_ARRAY; } else { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_FILE_CORRUPT, "Unknown tag {0}", tag); } } } - AutoDetectDataType t = last; + AutoDetectDataType t = last; if (typeId != t.typeId) { last = t = newType(typeId); } @@ -273,9 +297,9 @@ private static int getTypeId(Object obj) { * @param obj the object * @return the auto-detected type used */ - AutoDetectDataType switchType(Object obj) { + AutoDetectDataType switchType(Object obj) { int typeId = getTypeId(obj); - AutoDetectDataType l = last; + AutoDetectDataType l = last; if (typeId != l.typeId) { last = l = newType(typeId); } @@ -289,7 +313,7 @@ AutoDetectDataType switchType(Object obj) { * @return true if yes */ static boolean isBigInteger(Object obj) { - return obj instanceof BigInteger && obj.getClass() == BigInteger.class; + return obj != null && obj.getClass() == BigInteger.class; } /** @@ -299,7 +323,7 @@ static boolean isBigInteger(Object obj) { * @return true if yes */ static boolean isBigDecimal(Object obj) { - return obj instanceof BigDecimal && obj.getClass() == BigDecimal.class; + return obj != null && obj.getClass() == BigDecimal.class; } /** @@ -309,7 +333,7 @@ static boolean isBigDecimal(Object obj) { * @return true if yes */ static boolean isDate(Object obj) { - return obj instanceof Date && obj.getClass() == Date.class; + return obj != null && obj.getClass() == Date.class; } /** @@ -322,23 +346,6 @@ static boolean isArray(Object obj) { return obj != null && obj.getClass().isArray(); } - /** - * Get the class id, or null if not found. - * - * @param clazz the class - * @return the class id or null - */ - static Integer getCommonClassId(Class clazz) { - HashMap, Integer> map = COMMON_CLASSES_MAP; - if (map.size() == 0) { - // lazy initialization - for (int i = 0, size = COMMON_CLASSES.length; i < size; i++) { - COMMON_CLASSES_MAP.put(COMMON_CLASSES[i], i); - } - } - return map.get(clazz); - } - /** * Serialize the object to a byte array. * @@ -404,10 +411,19 @@ public static int compareNotNull(byte[] data1, byte[] data2) { /** * The base class for auto-detect data types. */ - abstract static class AutoDetectDataType implements DataType { + abstract static class AutoDetectDataType extends BasicDataType { + + private final ObjectDataType base; - protected final ObjectDataType base; - protected final int typeId; + /** + * The type id. + */ + final int typeId; + + AutoDetectDataType(int typeId) { + this.base = null; + this.typeId = typeId; + } AutoDetectDataType(ObjectDataType base, int typeId) { this.base = base; @@ -415,55 +431,22 @@ abstract static class AutoDetectDataType implements DataType { } @Override - public int getMemory(Object o) { + public int getMemory(T o) { return getType(o).getMemory(o); } @Override - public int compare(Object aObj, Object bObj) { - AutoDetectDataType aType = getType(aObj); - AutoDetectDataType bType = getType(bObj); - int typeDiff = aType.typeId - bType.typeId; - if (typeDiff == 0) { - return aType.compare(aObj, bObj); - } - return Integer.signum(typeDiff); - } - - @Override - public void write(WriteBuffer buff, Object[] obj, - int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } - } - - @Override - public void write(WriteBuffer buff, Object o) { + public void write(WriteBuffer buff, T o) { getType(o).write(buff, o); } - @Override - public void read(ByteBuffer buff, Object[] obj, - int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); - } - } - - @Override - public final Object read(ByteBuffer buff) { - throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, - "Internal error"); - } - /** * Get the type for the given object. * * @param o the object * @return the type */ - AutoDetectDataType getType(Object o) { + DataType getType(Object o) { return base.switchType(o); } @@ -481,38 +464,42 @@ AutoDetectDataType getType(Object o) { /** * The type for the null value */ - static class NullType extends AutoDetectDataType { + static class NullType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final NullType INSTANCE = new NullType(); + + private NullType() { + super(TYPE_NULL); + } - NullType(ObjectDataType base) { - super(base, TYPE_NULL); + @Override + public Object[] createStorage(int size) { + return null; } @Override public int compare(Object aObj, Object bObj) { - if (aObj == null && bObj == null) { - return 0; - } else if (aObj == null) { - return -1; - } else if (bObj == null) { - return 1; - } - return super.compare(aObj, bObj); + return 0; } @Override public int getMemory(Object obj) { - return obj == null ? 0 : super.getMemory(obj); + return 0; } @Override public void write(WriteBuffer buff, Object obj) { - if (obj != null) { - super.write(buff, obj); - return; - } buff.put((byte) TYPE_NULL); } + @Override + public Object read(ByteBuffer buff) { + return null; + } + @Override public Object read(ByteBuffer buff, int tag) { return null; @@ -523,81 +510,92 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for boolean true and false. */ - static class BooleanType extends AutoDetectDataType { + static class BooleanType extends AutoDetectDataType { - BooleanType(ObjectDataType base) { - super(base, TYPE_BOOLEAN); + /** + * The only instance of this type. + */ + static final BooleanType INSTANCE = new BooleanType(); + + private BooleanType() { + super(TYPE_BOOLEAN); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Boolean && bObj instanceof Boolean) { - Boolean a = (Boolean) aObj; - Boolean b = (Boolean) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Boolean[] createStorage(int size) { + return new Boolean[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Boolean ? 0 : super.getMemory(obj); + public int compare(Boolean a, Boolean b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Boolean)) { - super.write(buff, obj); - return; - } - int tag = ((Boolean) obj) ? TAG_BOOLEAN_TRUE : TYPE_BOOLEAN; + public int getMemory(Boolean obj) { + return 0; + } + + @Override + public void write(WriteBuffer buff, Boolean obj) { + int tag = obj ? TAG_BOOLEAN_TRUE : TYPE_BOOLEAN; buff.put((byte) tag); } @Override - public Object read(ByteBuffer buff, int tag) { - return tag == TYPE_BOOLEAN ? Boolean.FALSE : Boolean.TRUE; + public Boolean read(ByteBuffer buff) { + return buff.get() == TAG_BOOLEAN_TRUE ? Boolean.TRUE : Boolean.FALSE; } + @Override + public Boolean read(ByteBuffer buff, int tag) { + return tag == TYPE_BOOLEAN ? Boolean.FALSE : Boolean.TRUE; + } } /** * The type for byte objects. */ - static class ByteType extends AutoDetectDataType { + static class ByteType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final ByteType INSTANCE = new ByteType(); - ByteType(ObjectDataType base) { - super(base, TYPE_BYTE); + private ByteType() { + super(TYPE_BYTE); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Byte && bObj instanceof Byte) { - Byte a = (Byte) aObj; - Byte b = (Byte) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Byte[] createStorage(int size) { + return new Byte[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Byte ? 0 : super.getMemory(obj); + public int compare(Byte a, Byte b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Byte)) { - super.write(buff, obj); - return; - } + public int getMemory(Byte obj) { + return 1; + } + + @Override + public void write(WriteBuffer buff, Byte obj) { buff.put((byte) TYPE_BYTE); - buff.put(((Byte) obj).byteValue()); + buff.put(obj); + } + + @Override + public Byte read(ByteBuffer buff) { + return buff.get(); } @Override public Object read(ByteBuffer buff, int tag) { - return Byte.valueOf(buff.get()); + return buff.get(); } } @@ -605,116 +603,127 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for character objects. */ - static class CharacterType extends AutoDetectDataType { + static class CharacterType extends AutoDetectDataType { - CharacterType(ObjectDataType base) { - super(base, TYPE_CHAR); + /** + * The only instance of this type. + */ + static final CharacterType INSTANCE = new CharacterType(); + + private CharacterType() { + super(TYPE_CHAR); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Character && bObj instanceof Character) { - Character a = (Character) aObj; - Character b = (Character) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Character[] createStorage(int size) { + return new Character[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Character ? 24 : super.getMemory(obj); + public int compare(Character a, Character b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Character)) { - super.write(buff, obj); - return; - } + public int getMemory(Character obj) { + return 24; + } + + @Override + public void write(WriteBuffer buff, Character obj) { buff.put((byte) TYPE_CHAR); - buff.putChar(((Character) obj).charValue()); + buff.putChar(obj); } @Override - public Object read(ByteBuffer buff, int tag) { - return Character.valueOf(buff.getChar()); + public Character read(ByteBuffer buff) { + return buff.getChar(); } + @Override + public Character read(ByteBuffer buff, int tag) { + return buff.getChar(); + } } /** * The type for short objects. */ - static class ShortType extends AutoDetectDataType { + static class ShortType extends AutoDetectDataType { - ShortType(ObjectDataType base) { - super(base, TYPE_SHORT); + /** + * The only instance of this type. + */ + static final ShortType INSTANCE = new ShortType(); + + private ShortType() { + super(TYPE_SHORT); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Short && bObj instanceof Short) { - Short a = (Short) aObj; - Short b = (Short) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Short[] createStorage(int size) { + return new Short[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Short ? 24 : super.getMemory(obj); + public int compare(Short a, Short b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Short)) { - super.write(buff, obj); - return; - } + public int getMemory(Short obj) { + return 24; + } + + @Override + public void write(WriteBuffer buff, Short obj) { buff.put((byte) TYPE_SHORT); - buff.putShort(((Short) obj).shortValue()); + buff.putShort(obj); } @Override - public Object read(ByteBuffer buff, int tag) { - return Short.valueOf(buff.getShort()); + public Short read(ByteBuffer buff) { + return read(buff, buff.get()); } + @Override + public Short read(ByteBuffer buff, int tag) { + return buff.getShort(); + } } /** * The type for integer objects. */ - static class IntegerType extends AutoDetectDataType { + static class IntegerType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final IntegerType INSTANCE = new IntegerType(); - IntegerType(ObjectDataType base) { - super(base, TYPE_INT); + private IntegerType() { + super(TYPE_INT); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Integer && bObj instanceof Integer) { - Integer a = (Integer) aObj; - Integer b = (Integer) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Integer[] createStorage(int size) { + return new Integer[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Integer ? 24 : super.getMemory(obj); + public int compare(Integer a, Integer b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Integer)) { - super.write(buff, obj); - return; - } - int x = (Integer) obj; + public int getMemory(Integer obj) { + return 24; + } + + @Override + public void write(WriteBuffer buff, Integer obj) { + int x = obj; if (x < 0) { // -Integer.MIN_VALUE is smaller than 0 if (-x < 0 || -x > DataUtils.COMPRESSED_VAR_INT_MAX) { @@ -732,7 +741,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public Integer read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public Integer read(ByteBuffer buff, int tag) { switch (tag) { case TYPE_INT: return DataUtils.readVarInt(buff); @@ -743,40 +757,40 @@ public Object read(ByteBuffer buff, int tag) { } return tag - TAG_INTEGER_0_15; } - } /** * The type for long objects. */ - static class LongType extends AutoDetectDataType { + static class LongType extends AutoDetectDataType { - LongType(ObjectDataType base) { - super(base, TYPE_LONG); + /** + * The only instance of this type. + */ + static final LongType INSTANCE = new LongType(); + + private LongType() { + super(TYPE_LONG); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Long && bObj instanceof Long) { - Long a = (Long) aObj; - Long b = (Long) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Long[] createStorage(int size) { + return new Long[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Long ? 30 : super.getMemory(obj); + public int compare(Long a, Long b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Long)) { - super.write(buff, obj); - return; - } - long x = (Long) obj; + public int getMemory(Long obj) { + return 30; + } + + @Override + public void write(WriteBuffer buff, Long obj) { + long x = obj; if (x < 0) { // -Long.MIN_VALUE is smaller than 0 if (-x < 0 || -x > DataUtils.COMPRESSED_VAR_LONG_MAX) { @@ -798,7 +812,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public Long read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public Long read(ByteBuffer buff, int tag) { switch (tag) { case TYPE_LONG: return DataUtils.readVarLong(buff); @@ -807,42 +826,42 @@ public Object read(ByteBuffer buff, int tag) { case TAG_LONG_FIXED: return buff.getLong(); } - return Long.valueOf(tag - TAG_LONG_0_7); + return (long) (tag - TAG_LONG_0_7); } - } /** * The type for float objects. */ - static class FloatType extends AutoDetectDataType { + static class FloatType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final FloatType INSTANCE = new FloatType(); - FloatType(ObjectDataType base) { - super(base, TYPE_FLOAT); + private FloatType() { + super(TYPE_FLOAT); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Float && bObj instanceof Float) { - Float a = (Float) aObj; - Float b = (Float) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Float[] createStorage(int size) { + return new Float[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Float ? 24 : super.getMemory(obj); + public int compare(Float a, Float b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Float)) { - super.write(buff, obj); - return; - } - float x = (Float) obj; + public int getMemory(Float obj) { + return 24; + } + + @Override + public void write(WriteBuffer buff, Float obj) { + float x = obj; int f = Float.floatToIntBits(x); if (f == ObjectDataType.FLOAT_ZERO_BITS) { buff.put((byte) TAG_FLOAT_0); @@ -859,7 +878,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public Float read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public Float read(ByteBuffer buff, int tag) { switch (tag) { case TAG_FLOAT_0: return 0f; @@ -877,34 +901,35 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for double objects. */ - static class DoubleType extends AutoDetectDataType { + static class DoubleType extends AutoDetectDataType { - DoubleType(ObjectDataType base) { - super(base, TYPE_DOUBLE); + /** + * The only instance of this type. + */ + static final DoubleType INSTANCE = new DoubleType(); + + private DoubleType() { + super(TYPE_DOUBLE); } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof Double && bObj instanceof Double) { - Double a = (Double) aObj; - Double b = (Double) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public Double[] createStorage(int size) { + return new Double[size]; } @Override - public int getMemory(Object obj) { - return obj instanceof Double ? 30 : super.getMemory(obj); + public int compare(Double a, Double b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof Double)) { - super.write(buff, obj); - return; - } - double x = (Double) obj; + public int getMemory(Double obj) { + return 30; + } + + @Override + public void write(WriteBuffer buff, Double obj) { + double x = obj; long d = Double.doubleToLongBits(x); if (d == ObjectDataType.DOUBLE_ZERO_BITS) { buff.put((byte) TAG_DOUBLE_0); @@ -923,7 +948,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public Double read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public Double read(ByteBuffer buff, int tag) { switch (tag) { case TAG_DOUBLE_0: return 0d; @@ -935,40 +965,39 @@ public Object read(ByteBuffer buff, int tag) { return Double.longBitsToDouble(Long.reverse(DataUtils .readVarLong(buff))); } - } /** * The type for BigInteger objects. */ - static class BigIntegerType extends AutoDetectDataType { + static class BigIntegerType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final BigIntegerType INSTANCE = new BigIntegerType(); - BigIntegerType(ObjectDataType base) { - super(base, TYPE_BIG_INTEGER); + private BigIntegerType() { + super(TYPE_BIG_INTEGER); } @Override - public int compare(Object aObj, Object bObj) { - if (isBigInteger(aObj) && isBigInteger(bObj)) { - BigInteger a = (BigInteger) aObj; - BigInteger b = (BigInteger) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public BigInteger[] createStorage(int size) { + return new BigInteger[size]; } @Override - public int getMemory(Object obj) { - return isBigInteger(obj) ? 100 : super.getMemory(obj); + public int compare(BigInteger a, BigInteger b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!isBigInteger(obj)) { - super.write(buff, obj); - return; - } - BigInteger x = (BigInteger) obj; + public int getMemory(BigInteger obj) { + return 100; + } + + @Override + public void write(WriteBuffer buff, BigInteger x) { if (BigInteger.ZERO.equals(x)) { buff.put((byte) TAG_BIG_INTEGER_0); } else if (BigInteger.ONE.equals(x)) { @@ -987,7 +1016,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public BigInteger read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public BigInteger read(ByteBuffer buff, int tag) { switch (tag) { case TAG_BIG_INTEGER_0: return BigInteger.ZERO; @@ -997,44 +1031,43 @@ public Object read(ByteBuffer buff, int tag) { return BigInteger.valueOf(DataUtils.readVarLong(buff)); } int len = DataUtils.readVarInt(buff); - byte[] bytes = DataUtils.newBytes(len); + byte[] bytes = Utils.newBytes(len); buff.get(bytes); return new BigInteger(bytes); } - } /** * The type for BigDecimal objects. */ - static class BigDecimalType extends AutoDetectDataType { + static class BigDecimalType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final BigDecimalType INSTANCE = new BigDecimalType(); - BigDecimalType(ObjectDataType base) { - super(base, TYPE_BIG_DECIMAL); + private BigDecimalType() { + super(TYPE_BIG_DECIMAL); } @Override - public int compare(Object aObj, Object bObj) { - if (isBigDecimal(aObj) && isBigDecimal(bObj)) { - BigDecimal a = (BigDecimal) aObj; - BigDecimal b = (BigDecimal) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public BigDecimal[] createStorage(int size) { + return new BigDecimal[size]; } @Override - public int getMemory(Object obj) { - return isBigDecimal(obj) ? 150 : super.getMemory(obj); + public int compare(BigDecimal a, BigDecimal b) { + return a.compareTo(b); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!isBigDecimal(obj)) { - super.write(buff, obj); - return; - } - BigDecimal x = (BigDecimal) obj; + public int getMemory(BigDecimal obj) { + return 150; + } + + @Override + public void write(WriteBuffer buff, BigDecimal x) { if (BigDecimal.ZERO.equals(x)) { buff.put((byte) TAG_BIG_DECIMAL_0); } else if (BigDecimal.ONE.equals(x)) { @@ -1060,7 +1093,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public BigDecimal read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public BigDecimal read(ByteBuffer buff, int tag) { switch (tag) { case TAG_BIG_DECIMAL_0: return BigDecimal.ZERO; @@ -1074,7 +1112,7 @@ public Object read(ByteBuffer buff, int tag) { } int scale = DataUtils.readVarInt(buff); int len = DataUtils.readVarInt(buff); - byte[] bytes = DataUtils.newBytes(len); + byte[] bytes = Utils.newBytes(len); buff.get(bytes); BigInteger b = new BigInteger(bytes); return new BigDecimal(b, scale); @@ -1085,35 +1123,34 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for string objects. */ - static class StringType extends AutoDetectDataType { + static class StringType extends AutoDetectDataType { - StringType(ObjectDataType base) { - super(base, TYPE_STRING); + /** + * The only instance of this type. + */ + static final StringType INSTANCE = new StringType(); + + private StringType() { + super(TYPE_STRING); } @Override - public int getMemory(Object obj) { - if (!(obj instanceof String)) { - return super.getMemory(obj); - } - return 24 + 2 * obj.toString().length(); + public String[] createStorage(int size) { + return new String[size]; } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof String && bObj instanceof String) { - return aObj.toString().compareTo(bObj.toString()); - } - return super.compare(aObj, bObj); + public int getMemory(String obj) { + return 24 + 2 * obj.length(); } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof String)) { - super.write(buff, obj); - return; - } - String s = (String) obj; + public int compare(String aObj, String bObj) { + return aObj.compareTo(bObj); + } + + @Override + public void write(WriteBuffer buff, String s) { int len = s.length(); if (len <= 15) { buff.put((byte) (TAG_STRING_0_15 + len)); @@ -1124,7 +1161,12 @@ public void write(WriteBuffer buff, Object obj) { } @Override - public Object read(ByteBuffer buff, int tag) { + public String read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public String read(ByteBuffer buff, int tag) { int len; if (tag == TYPE_STRING) { len = DataUtils.readVarInt(buff); @@ -1139,41 +1181,46 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for UUID objects. */ - static class UUIDType extends AutoDetectDataType { + static class UUIDType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final UUIDType INSTANCE = new UUIDType(); - UUIDType(ObjectDataType base) { - super(base, TYPE_UUID); + private UUIDType() { + super(TYPE_UUID); } @Override - public int getMemory(Object obj) { - return obj instanceof UUID ? 40 : super.getMemory(obj); + public UUID[] createStorage(int size) { + return new UUID[size]; } @Override - public int compare(Object aObj, Object bObj) { - if (aObj instanceof UUID && bObj instanceof UUID) { - UUID a = (UUID) aObj; - UUID b = (UUID) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public int getMemory(UUID obj) { + return 40; } @Override - public void write(WriteBuffer buff, Object obj) { - if (!(obj instanceof UUID)) { - super.write(buff, obj); - return; - } + public int compare(UUID a, UUID b) { + return a.compareTo(b); + } + + @Override + public void write(WriteBuffer buff, UUID a) { buff.put((byte) TYPE_UUID); - UUID a = (UUID) obj; buff.putLong(a.getMostSignificantBits()); buff.putLong(a.getLeastSignificantBits()); } @Override - public Object read(ByteBuffer buff, int tag) { + public UUID read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public UUID read(ByteBuffer buff, int tag) { long a = buff.getLong(), b = buff.getLong(); return new UUID(a, b); } @@ -1183,40 +1230,45 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for java.util.Date objects. */ - static class DateType extends AutoDetectDataType { + static class DateType extends AutoDetectDataType { + + /** + * The only instance of this type. + */ + static final DateType INSTANCE = new DateType(); - DateType(ObjectDataType base) { - super(base, TYPE_DATE); + private DateType() { + super(TYPE_DATE); } @Override - public int getMemory(Object obj) { - return isDate(obj) ? 40 : super.getMemory(obj); + public Date[] createStorage(int size) { + return new Date[size]; } @Override - public int compare(Object aObj, Object bObj) { - if (isDate(aObj) && isDate(bObj)) { - Date a = (Date) aObj; - Date b = (Date) bObj; - return a.compareTo(b); - } - return super.compare(aObj, bObj); + public int getMemory(Date obj) { + return 40; } @Override - public void write(WriteBuffer buff, Object obj) { - if (!isDate(obj)) { - super.write(buff, obj); - return; - } + public int compare(Date a, Date b) { + return a.compareTo(b); + } + + @Override + public void write(WriteBuffer buff, Date a) { buff.put((byte) TYPE_DATE); - Date a = (Date) obj; buff.putLong(a.getTime()); } @Override - public Object read(ByteBuffer buff, int tag) { + public Date read(ByteBuffer buff) { + return read(buff, buff.get()); + } + + @Override + public Date read(ByteBuffer buff, int tag) { long a = buff.getLong(); return new Date(a); } @@ -1226,12 +1278,16 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for object arrays. */ - static class ObjectArrayType extends AutoDetectDataType { - + static class ObjectArrayType extends AutoDetectDataType { private final ObjectDataType elementType = new ObjectDataType(); - ObjectArrayType(ObjectDataType base) { - super(base, TYPE_ARRAY); + ObjectArrayType() { + super(TYPE_ARRAY); + } + + @Override + public Object[] createStorage(int size) { + return new Object[size]; } @Override @@ -1243,21 +1299,13 @@ public int getMemory(Object obj) { Class type = obj.getClass().getComponentType(); if (type.isPrimitive()) { int len = Array.getLength(obj); - if (type == boolean.class) { + if (type == boolean.class || type == byte.class) { size += len; - } else if (type == byte.class) { - size += len; - } else if (type == char.class) { - size += len * 2; - } else if (type == short.class) { + } else if (type == char.class || type == short.class) { size += len * 2; - } else if (type == int.class) { + } else if (type == int.class || type == float.class) { size += len * 4; - } else if (type == float.class) { - size += len * 4; - } else if (type == double.class) { - size += len * 8; - } else if (type == long.class) { + } else if (type == double.class || type == long.class) { size += len * 8; } } else { @@ -1283,8 +1331,8 @@ public int compare(Object aObj, Object bObj) { Class type = aObj.getClass().getComponentType(); Class bType = bObj.getClass().getComponentType(); if (type != bType) { - Integer classA = getCommonClassId(type); - Integer classB = getCommonClassId(bType); + Integer classA = Holder.getCommonClassId(type); + Integer classB = Holder.getCommonClassId(bType); if (classA != null) { if (classB != null) { return classA.compareTo(classB); @@ -1318,7 +1366,7 @@ public int compare(Object aObj, Object bObj) { } else if (type == int.class) { int a = ((int[]) aObj)[i]; int b = ((int[]) bObj)[i]; - x = a == b ? 0 : a < b ? -1 : 1; + x = Integer.compare(a, b); } else if (type == float.class) { x = Float.compare(((float[]) aObj)[i], ((float[]) bObj)[i]); @@ -1328,7 +1376,7 @@ public int compare(Object aObj, Object bObj) { } else { long a = ((long[]) aObj)[i]; long b = ((long[]) bObj)[i]; - x = a == b ? 0 : a < b ? -1 : 1; + x = Long.compare(a, b); } if (x != 0) { return x; @@ -1344,7 +1392,7 @@ public int compare(Object aObj, Object bObj) { } } } - return aLen == bLen ? 0 : aLen < bLen ? -1 : 1; + return Integer.compare(aLen, bLen); } @Override @@ -1354,7 +1402,7 @@ public void write(WriteBuffer buff, Object obj) { return; } Class type = obj.getClass().getComponentType(); - Integer classId = getCommonClassId(type); + Integer classId = Holder.getCommonClassId(type); if (classId != null) { if (type.isPrimitive()) { if (type == byte.class) { @@ -1406,12 +1454,17 @@ public void write(WriteBuffer buff, Object obj) { } } + @Override + public Object read(ByteBuffer buff) { + return read(buff, buff.get()); + } + @Override public Object read(ByteBuffer buff, int tag) { if (tag != TYPE_ARRAY) { byte[] data; int len = tag - TAG_BYTE_ARRAY_0_15; - data = DataUtils.newBytes(len); + data = Utils.newBytes(len); buff.get(data); return data; } @@ -1423,7 +1476,7 @@ public Object read(ByteBuffer buff, int tag) { try { clazz = Class.forName(componentType); } catch (Exception e) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_SERIALIZATION, "Could not get class {0}", componentType, e); } @@ -1434,7 +1487,7 @@ public Object read(ByteBuffer buff, int tag) { try { obj = Array.newInstance(clazz, len); } catch (Exception e) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_SERIALIZATION, "Could not create array of type {0} length {1}", clazz, len, e); @@ -1473,22 +1526,27 @@ public Object read(ByteBuffer buff, int tag) { /** * The type for serialized objects. */ - static class SerializedObjectType extends AutoDetectDataType { + static class SerializedObjectType extends AutoDetectDataType { - private int averageSize = 10000; + private int averageSize = 10_000; SerializedObjectType(ObjectDataType base) { super(base, TYPE_SERIALIZED_OBJECT); } + @Override + public Object[] createStorage(int size) { + return new Object[size]; + } + @SuppressWarnings("unchecked") @Override public int compare(Object aObj, Object bObj) { if (aObj == bObj) { return 0; } - DataType ta = getType(aObj); - DataType tb = getType(bObj); + DataType ta = getType(aObj); + DataType tb = getType(bObj); if (ta != this || tb != this) { if (ta == tb) { return ta.compare(aObj, bObj); @@ -1514,7 +1572,7 @@ public int compare(Object aObj, Object bObj) { @Override public int getMemory(Object obj) { - DataType t = getType(obj); + DataType t = getType(obj); if (t == this) { return averageSize; } @@ -1523,7 +1581,7 @@ public int getMemory(Object obj) { @Override public void write(WriteBuffer buff, Object obj) { - DataType t = getType(obj); + DataType t = getType(obj); if (t != this) { t.write(buff, obj); return; @@ -1534,15 +1592,24 @@ public void write(WriteBuffer buff, Object obj) { int size = data.length * 2; // adjust the average size // using an exponential moving average - averageSize = (size + 15 * averageSize) / 16; + averageSize = (int) ((size + 15L * averageSize) / 16); buff.put((byte) TYPE_SERIALIZED_OBJECT).putVarInt(data.length) .put(data); } + @Override + public Object read(ByteBuffer buff) { + return read(buff, buff.get()); + } + @Override public Object read(ByteBuffer buff, int tag) { int len = DataUtils.readVarInt(buff); - byte[] data = DataUtils.newBytes(len); + byte[] data = Utils.newBytes(len); + int size = data.length * 2; + // adjust the average size + // using an exponential moving average + averageSize = (int) ((size + 15L * averageSize) / 16); buff.get(data); return deserialize(data); } diff --git a/h2/src/main/org/h2/mvstore/type/StatefulDataType.java b/h2/src/main/org/h2/mvstore/type/StatefulDataType.java new file mode 100644 index 0000000000..9a53c2cdda --- /dev/null +++ b/h2/src/main/org/h2/mvstore/type/StatefulDataType.java @@ -0,0 +1,47 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.mvstore.type; + +import java.nio.ByteBuffer; + +import org.h2.mvstore.WriteBuffer; + +/** + * A data type that allows to save its state. + * + * @param type of opaque parameter passed as an operational context to Factory.create() + * + * @author Andrei Tokar + */ +public interface StatefulDataType { + + /** + * Save the state. + * + * @param buff the target buffer + * @param metaType the meta type + */ + void save(WriteBuffer buff, MetaType metaType); + + Factory getFactory(); + + /** + * A factory for data types. + * + * @param the database type + */ + interface Factory { + /** + * Reads the data type. + * + * @param buff the buffer the source buffer + * @param metaDataType the type + * @param database the database + * @return the data type + */ + DataType create(ByteBuffer buff, MetaType metaDataType, D database); + } +} diff --git a/h2/src/main/org/h2/mvstore/type/StringDataType.java b/h2/src/main/org/h2/mvstore/type/StringDataType.java index 28a4fa2d19..63f907c90e 100644 --- a/h2/src/main/org/h2/mvstore/type/StringDataType.java +++ b/h2/src/main/org/h2/mvstore/type/StringDataType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.mvstore.type; @@ -12,46 +12,61 @@ /** * A string type. */ -public class StringDataType implements DataType { +public class StringDataType extends BasicDataType { public static final StringDataType INSTANCE = new StringDataType(); + private static final String[] EMPTY_STRING_ARR = new String[0]; + @Override - public int compare(Object a, Object b) { - return a.toString().compareTo(b.toString()); + public String[] createStorage(int size) { + return size == 0 ? EMPTY_STRING_ARR : new String[size]; } @Override - public int getMemory(Object obj) { - return 24 + 2 * obj.toString().length(); + public int compare(String a, String b) { + return a.compareTo(b); } @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); + public int binarySearch(String key, Object storageObj, int size, int initialGuess) { + String[] storage = cast(storageObj); + int low = 0; + int high = size - 1; + // the cached index minus one, so that + // for the first time (when cachedCompare is 0), + // the default value is used + int x = initialGuess - 1; + if (x < 0 || x > high) { + x = high >>> 1; + } + while (low <= high) { + int compare = key.compareTo(storage[x]); + if (compare > 0) { + low = x + 1; + } else if (compare < 0) { + high = x - 1; + } else { + return x; + } + x = (low + high) >>> 1; } + return -(low + 1); } - @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } + public int getMemory(String obj) { + return 24 + 2 * obj.length(); } @Override public String read(ByteBuffer buff) { - int len = DataUtils.readVarInt(buff); - return DataUtils.readString(buff, len); + return DataUtils.readString(buff); } @Override - public void write(WriteBuffer buff, Object obj) { - String s = obj.toString(); + public void write(WriteBuffer buff, String s) { int len = s.length(); buff.putVarInt(len).putStringData(s, len); } - } diff --git a/h2/src/main/org/h2/mvstore/type/package.html b/h2/src/main/org/h2/mvstore/type/package.html index 88e83b5d08..110f3d7863 100644 --- a/h2/src/main/org/h2/mvstore/type/package.html +++ b/h2/src/main/org/h2/mvstore/type/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/package.html b/h2/src/main/org/h2/package.html index ef4fc2bc3f..77e208421c 100644 --- a/h2/src/main/org/h2/package.html +++ b/h2/src/main/org/h2/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/res/_messages_cs.prop b/h2/src/main/org/h2/res/_messages_cs.prop index 105ed9b04d..f827d3dd88 100644 --- a/h2/src/main/org/h2/res/_messages_cs.prop +++ b/h2/src/main/org/h2/res/_messages_cs.prop @@ -5,10 +5,17 @@ 21S02=Počet sloupců nesouhlasí 22001=Příliš dlouhá hodnota pro sloupec {0}: {1} 22003=Číselná hodnota je mimo rozsah: {0} +22004=#Numeric value out of range: {0} in column {1} 22007=Nelze zpracovat konstantu {0} {1} 22012=Dělení nulou: {0} +22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Chyba při převodu dat {0} 22025=Chyba v LIKE escapování: {0} +2202E=#Array element error: {0}, expected {1} +22030=#Value not permitted for column {0}: {1} +22031=#Value not a member of enumerators {0}: {1} +22032=#Empty enums are not allowed +22033=#Duplicate enumerators are not allowed for enum types: {0} 23502=Pro sloupec {0} není hodnota NULL povolena 23503=Nedodržení omezení referenční integrity: {0} 23505=Nedodržení unikátního indexu nebo primárního klíče: {0} @@ -20,23 +27,29 @@ 40001=Detekován deadlock. Probíhající transakce byla vrácena zpět. Podrobnosti: {0} 42000=Chyba syntaxe v SQL příkazu {0} 42001=Chyba syntaxe v SQL příkazu {0}; očekáváno {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=Tabulka {0} již existuje 42S02=Tabulka {0} nenalezena +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=Index {0} již existuje 42S12=Index {0} nenalezen 42S21=Duplicitní název sloupce {0} 42S22=Sloupec {0} nenalezen -42S32=Nastavení {0} nenalezeno +42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=Příkaz byl zrušen nebo připojení vypršelo 90000=Funkce {0} musí vracet výsledek 90001=Metoda neumožňuje dotazování. Použijte execute nebo executeQuery namísto executeUpdate 90002=Metoda umožňuje pouze pro dotazování. Použijte execute nebo executeUpdate namísto executeQuery 90003=Hexadecimální řetězec s lichým počtem znaků: {0} +90005=#Invalid trigger flags: {0} 90004=Hexadecimální řetězec obsahuje neplatný znak: {0} 90006=#Sequence {0} has run out of numbers 90007=Tento objekt byl již uzavřen 90008=Neplatná hodnota {0} pro parametr {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=#Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=#Invalid TO_CHAR format {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parametr {0} není nastaven @@ -82,6 +95,7 @@ 90053=Skalární vnořený dotaz obsahuje více než jeden řádek 90054=Neplatné použití agregátní funkce {0} 90055=Nepodporované šifrování {0} +90056=#Function {0}: Invalid date format: {1} 90057=Omezení {0} nenalezeno 90058=Vkládání nebo vrácení změn není povoleno uvnitř triggeru 90059=Dvojsmyslný název sloupce {0} @@ -133,6 +147,7 @@ 90107=Nelze odstranit {0}, protože {1} na něm závisí 90108=Nedostatek paměti. 90109=Pohled {0} je neplatný: {1} +90110=#Values of types {0} and {1} are not comparable 90111=Chyba přístupu propojené tabulky s SQL příkazem {0}, příčina: {1} 90112=Řádek nebyl nalezen při pokusu o smazání z indexu {0} 90113=Nepodporované nastavení připojení {0} @@ -141,10 +156,10 @@ 90116=Definice tohoto druhu nejsou povoleny 90117=Vzdálené připojení není na tomto serveru povoleno, zkontrolujte volbu -tcpAllowOthers 90118=Nelze odstranit tabulku {0} -90119=Uživatelský datový typ {0} již existuje -90120=Uživatelský datový typ {0} nenalezen +90119=Doména {0} již existuje +90120=Doména {0} nenalezen 90121=Databáze byla již ukončena (pro deaktivaci automatického ukončení při zastavení virtuálního stroje přidejte parametr ";DB_CLOSE_ON_EXIT=FALSE" do URL databáze) -90122=Operace není podporována pro tabulku {0}, pokud na tabulku existují pohledy: {1} +90122=#The WITH TIES clause is not allowed without a corresponding ORDER BY clause. 90123=Nelze vzájemně míchat indexované a neindexované parametry 90124=Soubor nenalezen: {0} 90125=Neplatná třída, očekáváno {0}, ale obdrženo {1} @@ -158,13 +173,28 @@ 90133=Nelze změnit nastavení {0}, pokud je již databáze otevřena 90134=Přístup ke třídě {0} byl odepřen 90135=Databáze je spuštěna ve vyhrazeném režimu; nelze otevřít další spojení -90136=Nepodporovaná podmínka vnějšího spojení: {0} +90136=#Window not found: {0} 90137=Lze přiřadit pouze proměnné, nikoli: {0} 90138=Neplatný název databáze: {0} 90139=Nenalezena veřejná statická Java metoda: {0} 90140=Vrácený výsledek je pouze pro čtení. Možná budete muset použít conn.createStatement(..., ResultSet.CONCUR_UPDATABLE). 90141=#Serializer cannot be changed because there is a data table: {0} 90142=#Step size must not be zero +90143=#Row {1} not found in primary index {0} +90144=#Authenticator not enabled on database {0} +90145=#FOR UPDATE is not allowed in DISTINCT or grouped select +90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Obecná chyba: {0} HY004=Neznámý datový typ: {0} HYC00=Vlastnost není podporována: {0} diff --git a/h2/src/main/org/h2/res/_messages_de.prop b/h2/src/main/org/h2/res/_messages_de.prop index 4429851d06..f91951e045 100644 --- a/h2/src/main/org/h2/res/_messages_de.prop +++ b/h2/src/main/org/h2/res/_messages_de.prop @@ -3,12 +3,19 @@ 07001=Ungültige Anzahl Parameter für {0}, erwartet: {1} 08000=Fehler beim Öffnen der Datenbank: {0} 21S02=Anzahl der Felder stimmt nicht überein -22001=Wert zu gross / lang für Feld {0}: {1} -22003=Zahlenwert ausserhalb des Bereichs: {0} +22001=Wert zu groß / lang für Feld {0}: {1} +22003=Numerischer Wert außerhalb des Bereichs: {0} +22004=Numerischer Wert außerhalb des Bereichs: {0} in Feld {1} 22007=Kann {0} {1} nicht umwandeln 22012=Division durch 0: {0} +22013=Ungültige PRECEDING oder FOLLOWING Größe in Window-Funktion: {0} 22018=Datenumwandlungsfehler beim Umwandeln von {0} 22025=Fehler in LIKE ESCAPE: {0} +2202E=Fehlerhaftes Array-Element: {0}, erwartet: {1} +22030=Wert nicht erlaubt für Feld {0}: {1} +22031=Wert nicht Teil der Aufzählung {0}: {1} +22032=Leere Aufzählungen sind nicht erlaubt +22033=Doppelte Nennungen sind nicht erlaubt für Aufzählungstypen: {0} 23502=NULL nicht zulässig für Feld {0} 23503=Referentielle Integrität verletzt: {0} 23505=Eindeutiger Index oder Primärschlüssel verletzt: {0} @@ -16,29 +23,35 @@ 23507=Kein Vorgabewert für Feld {0} 23513=Bedingung verletzt: {0} 23514=Ungültige Bedingung: {0} -28000=Falscher Benutzer Name oder Passwort +28000=Falscher Benutzername oder Passwort 40001=Eine Verklemmung (Deadlock) ist aufgetreten. Die aktuelle Transaktion wurde rückgängig gemacht. Details: {0} 42000=Syntax Fehler in SQL Befehl {0} 42001=Syntax Fehler in SQL Befehl {0}; erwartet {1} +42602=Ungültiger Name {0} +42622=Der Name mit {0} beginnt ist zu lang. Die maximale Länge beträgt {1} 42S01=Tabelle {0} besteht bereits 42S02=Tabelle {0} nicht gefunden +42S03=Tabelle {0} nicht gefunden (mögliche Kandidaten: {1}) +42S04=Tabelle {0} nicht gefunden (diese Datenbank ist leer) 42S11=Index {0} besteht bereits 42S12=Index {0} nicht gefunden 42S21=Doppelter Feldname {0} 42S22=Feld {0} nicht gefunden -42S32=Einstellung {0} nicht gefunden +42S31=Es sollten identische Ausdrücke verwendet werden; erwartet {0}, tatsächlich {1} +54011=Zu viele Felder definiert. Maximale Anzahl von Felder: {0} 57014=Befehl wurde abgebrochen oder das Session-Timeout ist abgelaufen 90000=Funktion {0} muss Zeilen zurückgeben 90001=Methode nicht zulässig für eine Abfrage. Erlaubt sind execute oder executeQuery, nicht jedoch executeUpdate -90002=Methode nur zulässig for eine Abfrage. Erlaubt sind execute oder executeUpdate, nicht jedoch executeQuery +90002=Methode nur zulässig für eine Abfrage. Erlaubt sind execute oder executeUpdate, nicht jedoch executeQuery 90003=Hexadezimal Zahl mit einer ungeraden Anzahl Zeichen: {0} 90004=Hexadezimal Zahl enthält unerlaubtes Zeichen: {0} +90005=Ungültige Triggeroptionen: {0} 90006=Die Sequenz {0} hat keine freien Nummern mehr 90007=Das Objekt wurde bereits geschlossen 90008=Unerlaubter Wert {0} für Parameter {1} -90009=Kann die Sequenz {0} nicht ändern aufgrund falscher Attribute (Start-Wert {1}, Minimal-Wert {2}, Maximal-Wert {3}, Inkrement {4}) +90009=Kann die Sequenz {0} nicht ändern aufgrund falscher Attribute (Basiswert {1}, Start-Wert {2}, Minimal-Wert {3}, Maximal-Wert {4}, Inkrement {5}, Cachegröße {6}) 90010=Ungültiges TO_CHAR Format {0} -90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. +90011=Ein implizit relativer Pfad zum Arbeitsverzeichnis ist nicht erlaubt in der Datenbank URL {0}. Bitte absolute Pfade, ~/name, ./name, oder baseDir verwenden. 90012=Parameter {0} wurde nicht gesetzt 90013=Datenbank {0} nicht gefunden 90014=Fehler beim Parsen von {0} @@ -50,11 +63,11 @@ 90020=Datenbank wird wahrscheinlich bereits benutzt: {0}. Mögliche Lösungen: alle Verbindungen schliessen; Server Modus verwenden 90021=Diese Kombination von Einstellungen wird nicht unterstützt {0} 90022=Funktion {0} nicht gefunden -90023=Feld {0} darf nicht NULL nicht erlauben +90023=Feld {0} darf nicht nullable sein 90024=Fehler beim Umbenennen der Datei {0} nach {1} 90025=Kann Datei {0} nicht löschen 90026=Serialisierung fehlgeschlagen, Grund: {0} -90027=De-Serialisierung fehlgeschlagen, Grund: {1} +90027=De-Serialisierung fehlgeschlagen, Grund: {0} 90028=Eingabe/Ausgabe Fehler: {0} 90029=Im Moment nicht auf einer veränderbaren Zeile 90030=Datei fehlerhaft beim Lesen des Datensatzes: {0}. Mögliche Lösung: Recovery Werkzeug verwenden @@ -66,7 +79,7 @@ 90036=Sequenz {0} nicht gefunden 90037=View {0} nicht gefunden 90038=View {0} besteht bereits -90039=#This CLOB or BLOB reference timed out: {0} +90039=Diese CLOB oder BLOB Reference ist abgelaufen: {0} 90040=Für diese Operation werden Administrator-Rechte benötigt 90041=Trigger {0} besteht bereits 90042=Trigger {0} nicht gefunden @@ -74,14 +87,15 @@ 90044=Fehler beim Ausführen des Triggers {0}, Klasse {1}, Grund: {1}; siehe Ursache für Details 90045=Bedingung {0} besteht bereits 90046=URL Format Fehler; erwartet {0}, erhalten {1} -90047=Falsche Version, Treiber Version ist {0}, Server Version ist {1} +90047=Falsche Version, Treiberversion ist {0}, Serverversion ist {1} 90048=Datenbank Datei Version wird nicht unterstützt oder ungültiger Dateikopf in Datei {0} 90049=Verschlüsselungsfehler in Datei {0} -90050=Falsches Passwort Format, benötigt wird: Datei-Passwort Benutzer-Passwort +90050=Falsches Passwortformat, benötigt wird: Datei-Passwort Benutzer-Passwort 90052=Unterabfrage gibt mehr als eine Feld zurück 90053=Skalar-Unterabfrage enthält mehr als eine Zeile 90054=Ungültige Verwendung der Aggregat Funktion {0} 90055=Chiffre nicht unterstützt: {0} +90056=Funktion {0}: Ungültiges Datums-Format: {1} 90057=Bedingung {0} nicht gefunden 90058=Innerhalb eines Triggers sind Commit und Rollback ist nicht erlaubt 90059=Mehrdeutiger Feldname {0} @@ -93,7 +107,7 @@ 90065=Savepoint hat einen Namen 90066=Doppeltes Merkmahl {0} 90067=Verbindung ist unterbrochen: {0} -90068=Sortier-Ausdruck {0} muss in diesem Fall im Resultat vorkommen +90068=Sortierausdruck {0} muss in diesem Fall im Resultat vorkommen 90069=Rolle {0} besteht bereits 90070=Rolle {0} nicht gefunden 90071=Benutzer or Rolle {0} nicht gefunden @@ -105,7 +119,7 @@ 90077=Funktions-Alias {0} nicht gefunden 90078=Schema {0} besteht bereits 90079=Schema {0} nicht gefunden -90080=Schema Namen müssen übereinstimmen +90080=Schemanamen müssen übereinstimmen 90081=Feld {0} enthält NULL Werte 90082=Sequenz {0} gehört zu einer Tabelle 90083=Feld wird referenziert durch {0} @@ -118,7 +132,7 @@ 90090=Schema {0} kann nicht gelöscht werden 90091=Rolle {0} kann nicht gelöscht werden 90093=Clustering Fehler - Datenbank läuft bereits im autonomen Modus -90094=Clustering Fehler - Datenbank läuft bereits im Cluster Modus, Serverliste: {0} +90094=Clustering Fehler - Datenbank läuft bereits im Cluster-Modus, Serverliste: {0} 90095=Textformat Fehler: {0} 90096=Nicht genug Rechte für Objekt {0} 90097=Die Datenbank ist schreibgeschützt @@ -127,12 +141,13 @@ 90101=Falsches XID Format: {0} 90102=Datenkompressions-Option nicht unterstützt: {0} 90103=Datenkompressions-Algorithmus nicht unterstützt: {0} -90104=Datenkompressions Fehler +90104=Datenkompressions-Fehler 90105=Fehler beim Aufruf eine benutzerdefinierten Funktion: {0} 90106=Kann {0} nicht zurücksetzen per TRUNCATE 90107=Kann {0} nicht löschen weil {1} davon abhängt 90108=Nicht genug Hauptspeicher. 90109=View {0} ist ungültig: {1} +90110=Werte des Typs {0} und {1} sind nicht vergleichbar 90111=Fehler beim Zugriff auf eine verknüpfte Tabelle mit SQL Befehl {0}, Grund: {1} 90112=Zeile nicht gefunden beim Löschen von Index {0} 90113=Datenbank-Verbindungs Option {0} nicht unterstützt @@ -141,10 +156,10 @@ 90116=Literal dieser Art nicht zugelassen 90117=Verbindungen von anderen Rechnern sind nicht freigegeben, siehe -tcpAllowOthers 90118=Kann Tabelle nicht löschen {0} -90119=Benutzer-Datentyp {0} besteht bereits -90120=Benutzer-Datentyp {0} nicht gefunden +90119=Domäne {0} besteht bereits +90120=Domäne {0} nicht gefunden 90121=Die Datenbank wurde bereits geschlossen (um das automatische Schliessen beim Stopp der VM zu deaktivieren, die Datenbank URL mit ";DB_CLOSE_ON_EXIT=FALSE" ergänzen) -90122=Funktion nicht unterstützt für Tabelle {0} wenn Views auf die Tabelle vorhanden sind: {1} +90122=Der WITH TIES Ausdruck ist ohne zugehörigem ORDER BY Ausdruck nicht erlaubt. 90123=Kann nicht indizierte und nicht indizierte Parameter mischen 90124=Datei nicht gefunden: {0} 90125=Ungültig Klasse, erwartet {0} erhalten {1} @@ -158,13 +173,28 @@ 90133=Kann das Setting {0} nicht ändern wenn die Datenbank bereits geöffnet ist 90134=Der Zugriff auf die Klasse {0} ist nicht erlaubt 90135=Die Datenbank befindet sich im Exclusiv Modus; es können keine zusätzlichen Verbindungen geöffnet werden -90136=Diese Outer Join Bedingung wird nicht unterstützt: {0} +90136=Bereich (Window) nicht gefunden: {0} 90137=Werte können nur einer Variablen zugewiesen werden, nicht an: {0} -90138=Ungültiger Datenbank Name: {0} -90139=Die (public static) Java Funktion wurde nicht gefunden: {0} +90138=Ungültiger Datenbankname: {0} +90139=Die (public static) Java-Funktion wurde nicht gefunden: {0} 90140=Die Resultat-Zeilen können nicht verändert werden. Mögliche Lösung: conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). 90141=Serialisierer kann nicht geändert werden wenn eine Daten-Tabelle existiert: {0} -90142=#Step size must not be zero +90142=Schrittgröße darf nicht 0 sein +90143=Zeile {1} nicht gefunden im Primärschlüssel {0} +90144=Authenticator ist für die Datenbank {0} nicht aktiviert +90145=FOR UPDATE ist in einem DISTINCT oder gruppiertem Select nicht erlaubt +90146=Datenbank {0} nicht gefunden und IFEXISTS=true, daher können wir sie nicht automatisch anlegen +90147=Methode {0} ist nicht erlaubt, wenn sich die Verbindung im auto-commit Modus befindet +90148=Der aktuelle Wert der Sequenz {0} ist in dieser Session noch nicht definiert +90149=Datenbank {0} nicht gefunden. Entweder legen Sie sie an oder erlauben das Anlegen einer Datenbank aus der Ferne (nicht empfohlen in sicherheitsrelevanten Umgebungen) +90150=Genauigkeit ({0}) muss zwischen {1} und {2} inklusive liegen +90151=Genauigkeit von Skalierung oder anteiligen Sekunden ({0}) muss zwischen {1} und {2} inklusive liegen +90152=Referentielle Integrität {0} wird von referentieller Integrität {1} genutzt +90153=Spalte {0} bezieht sich auf nicht vergleichbare Spalte {1} +90154=Erzeugte Spalte {0} kann nicht zugewiesen werden +90155=Erzeugte Spalte {0} kann nicht durch eine referentielle Integrität mit dem Ausdruck {1} veränderbar sein +90156=Spalten-Alias ist nicht für den Audruck {0} angegeben +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Allgemeiner Fehler: {0} HY004=Unbekannter Datentyp: {0} HYC00=Dieses Feature wird nicht unterstützt: {0} diff --git a/h2/src/main/org/h2/res/_messages_en.prop b/h2/src/main/org/h2/res/_messages_en.prop index 21ee868385..85844f6d1e 100644 --- a/h2/src/main/org/h2/res/_messages_en.prop +++ b/h2/src/main/org/h2/res/_messages_en.prop @@ -5,10 +5,17 @@ 21S02=Column count does not match 22001=Value too long for column {0}: {1} 22003=Numeric value out of range: {0} +22004=Numeric value out of range: {0} in column {1} 22007=Cannot parse {0} constant {1} 22012=Division by zero: {0} +22013=Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Data conversion error converting {0} 22025=Error in LIKE ESCAPE: {0} +2202E=Array element error: {0}, expected {1} +22030=Value not permitted for column {0}: {1} +22031=Value not a member of enumerators {0}: {1} +22032=Empty enums are not allowed +22033=Duplicate enumerators are not allowed for enum types: {0} 23502=NULL not allowed for column {0} 23503=Referential integrity constraint violation: {0} 23505=Unique index or primary key violation: {0} @@ -20,23 +27,29 @@ 40001=Deadlock detected. The current transaction was rolled back. Details: {0} 42000=Syntax error in SQL statement {0} 42001=Syntax error in SQL statement {0}; expected {1} +42602=Invalid name {0} +42622=The name that starts with {0} is too long. The maximum length is {1} 42S01=Table {0} already exists 42S02=Table {0} not found +42S03=Table {0} not found (candidates are: {1}) +42S04=Table {0} not found (this database is empty) 42S11=Index {0} already exists 42S12=Index {0} not found 42S21=Duplicate column name {0} 42S22=Column {0} not found -42S32=Setting {0} not found +42S31=Identical expressions should be used; expected {0}, found {1} +54011=Too many columns. The maximum count is {0} 57014=Statement was canceled or the session timed out 90000=Function {0} must return a result set 90001=Method is not allowed for a query. Use execute or executeQuery instead of executeUpdate 90002=Method is only allowed for a query. Use execute or executeUpdate instead of executeQuery 90003=Hexadecimal string with odd number of characters: {0} 90004=Hexadecimal string contains non-hex character: {0} +90005=Invalid trigger flags: {0} 90006=Sequence {0} has run out of numbers 90007=The object is already closed 90008=Invalid value {0} for parameter {1} -90009=Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=Invalid TO_CHAR format {0} 90011=A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parameter {0} is not set @@ -82,6 +95,7 @@ 90053=Scalar subquery contains more than one row 90054=Invalid use of aggregate function {0} 90055=Unsupported cipher {0} +90056=Function {0}: Invalid date format: {1} 90057=Constraint {0} not found 90058=Commit or rollback is not allowed within a trigger 90059=Ambiguous column name {0} @@ -133,6 +147,7 @@ 90107=Cannot drop {0} because {1} depends on it 90108=Out of memory. 90109=View {0} is invalid: {1} +90110=Values of types {0} and {1} are not comparable 90111=Error accessing linked table with SQL statement {0}, cause: {1} 90112=Row not found when trying to delete from index {0} 90113=Unsupported connection setting {0} @@ -141,10 +156,10 @@ 90116=Literals of this kind are not allowed 90117=Remote connections to this server are not allowed, see -tcpAllowOthers 90118=Cannot drop table {0} -90119=User data type {0} already exists -90120=User data type {0} not found +90119=Domain {0} already exists +90120=Domain {0} not found 90121=Database is already closed (to disable automatic closing at VM shutdown, add ";DB_CLOSE_ON_EXIT=FALSE" to the db URL) -90122=Operation not supported for table {0} when there are views on the table: {1} +90122=The WITH TIES clause is not allowed without a corresponding ORDER BY clause. 90123=Cannot mix indexed and non-indexed parameters 90124=File not found: {0} 90125=Invalid class, expected {0} but got {1} @@ -158,13 +173,28 @@ 90133=Cannot change the setting {0} when the database is already open 90134=Access to the class {0} is denied 90135=The database is open in exclusive mode; can not open additional connections -90136=Unsupported outer join condition: {0} +90136=Window not found: {0} 90137=Can only assign to a variable, not to: {0} 90138=Invalid database name: {0} 90139=The public static Java method was not found: {0} 90140=The result set is readonly. You may need to use conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). 90141=Serializer cannot be changed because there is a data table: {0} 90142=Step size must not be zero +90143=Row {1} not found in primary index {0} +90144=Authenticator not enabled on database {0} +90145=FOR UPDATE is not allowed in DISTINCT or grouped select +90146=Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=Method {0} is not allowed when connection is in auto-commit mode +90148=Current value of sequence {0} is not yet defined in this session +90149=Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=Precision ({0}) must be between {1} and {2} inclusive +90151=Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=Constraint {0} is used by constraint {1} +90153=Column {0} references uncomparable column {1} +90154=Generated column {0} cannot be assigned +90155=Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=Column alias is not specified for expression {0} +90157=Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=General error: {0} HY004=Unknown data type: {0} HYC00=Feature not supported: {0} diff --git a/h2/src/main/org/h2/res/_messages_es.prop b/h2/src/main/org/h2/res/_messages_es.prop index f5e9d848d9..50089a49b0 100644 --- a/h2/src/main/org/h2/res/_messages_es.prop +++ b/h2/src/main/org/h2/res/_messages_es.prop @@ -5,10 +5,17 @@ 21S02=La cantidad de columnas no coincide 22001=Valor demasiado largo para la columna {0}: {1} 22003=Valor numerico fuera de rango: {0} +22004=#Numeric value out of range: {0} in column {1} 22007=Imposible interpretar la constante {0} {1} 22012=División por cero: {0} +22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Conversión de datos fallida, convirtiendo {0} 22025=Error en LIKE ESCAPE: {0} +2202E=#Array element error: {0}, expected {1} +22030=Valor no permitido para la columna {0}: {1} +22031=#Value not a member of enumerators {0}: {1} +22032=#Empty enums are not allowed +22033=#Duplicate enumerators are not allowed for enum types: {0} 23502=La columna {0} no permite valores nulos (NULL) 23503=Violación de una restricción de Integridad Referencial: {0} 23505=Violación de indice de Unicidad ó Clave primaria: {0} @@ -20,23 +27,29 @@ 40001=Deadlock - Punto muerto detectado. La transacción actual fue retrotraída (rollback). Detalles: {0} 42000=Error de Sintaxis en sentencia SQL {0} 42001=Error de Sintaxis en sentencia SQL {0}; se esperaba {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=Tabla {0} ya existe 42S02=Tabla {0} no encontrada +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=Indice {0} ya existe 42S12=Indice {0} no encontrado 42S21=Nombre de columna Duplicada {0} 42S22=Columna {0} no encontrada -42S32=Setting {0} no encontrado +42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=Ls sentencia fue cancelado ó la sesión expiró por tiempo vencido 90000=Función {0} debe devolver un set de resultados (ResultSet) 90001=Metodo no permitido en un query. Use execute ó executeQuery en lugar de executeUpdate 90002=Metodo permitido unicamente en un query. Use execute ó executeUpdate en lugar de executeQuery 90003=Cadena Hexadecimal con cantidad impar de caracteres: {0} 90004=Cadena Hexadecimal contiene caracteres invalidos: {0} +90005=#Invalid trigger flags: {0} 90006=#Sequence {0} has run out of numbers 90007=El objeto ya está cerrado 90008=Valor Invalido {0} para el parametro {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=#Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=#Invalid TO_CHAR format {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parametro {0} no está fijado @@ -82,6 +95,7 @@ 90053=El Subquery escalar contiene mas de una fila 90054=Uso Invalido de la función de columna agregada {0} 90055=Cipher No soportado {0} +90056=Function {0}: Invalid date format: {1} 90057=Constraint {0} no encontrado 90058=Commit ó rollback no permitido dentro de un trigger 90059=Nombre de columna ambigua {0} @@ -112,7 +126,7 @@ 90084=Imposible eliminar la ultima columna {0} 90085=Index {0} pertenece a un constraint {1} 90086=Class {0} no encontrada -90087=Method {0} no encontrado +90087=#Method {0} not found 90088=Modo desconocido {0} 90089=Collation no puede ser cambiado debido a que existe una tabla de datos: {0} 90090=Schema {0} no puede ser eliminado @@ -133,6 +147,7 @@ 90107=Imposible eliminar {0} debido a que {1} depende de él. 90108=Memoria Insuficiente - Out of memory. Tamaño: {0} 90109=La Vista {0} es invalida: {1} +90110=#Values of types {0} and {1} are not comparable 90111=Error accediendo Linked Table con sentencia SQL {0}, causa: {1} 90112=Fila no encontrada mientras se intentaba borrar del indice {0} 90113=Parametro de conexión No soportado {0} @@ -141,10 +156,10 @@ 90116=Literales de este tipo no estan permitidos 90117=Este server no permite Conexiones Remotas, vea -tcpAllowOthers 90118=Imposible eliminar tabla {0} -90119=Tipo de dato de usuario {0} ya existe -90120=Tipo de dato de usuario {0} no encontrado +90119=Dominio {0} ya existe +90120=Dominio {0} no encontrado 90121=La base de datos ya esta cerrada (para des-habilitar el cerrado automatico durante el shutdown de la VM, agregue ";DB_CLOSE_ON_EXIT=FALSE" a la URL de conexión) -90122=Operación no soportada para la tabla {0} cuando existen vistas sobre la tabla: {1} +90122=#The WITH TIES clause is not allowed without a corresponding ORDER BY clause. 90123=No se puede mezclar parametros indexados y no-indexados 90124=Archivo no encontrado: {0} 90125=Clase Invalida, se esperaba {0} pero se obtuvo {1} @@ -158,13 +173,28 @@ 90133=No puede cambiar el setting {0} cuando la base de datos esta abierta 90134=Acceso denegado a la clase {0} 90135=La base de datos esta abierta en modo EXCLUSIVO; no puede abrir conexiones adicionales -90136=Condición No soportada en Outer join : {0} +90136=#Window not found: {0} 90137=Solo puede asignarse a una variable, no a: {0} 90138=Nombre de base de datos Invalido: {0} 90139=El metodo Java (publico y estatico) : {0} no fue encontrado 90140=El conjunto de resultados es de solo lectura. Puede ser necesario usar conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). 90141=#Serializer cannot be changed because there is a data table: {0} 90142=#Step size must not be zero +90143=#Row {1} not found in primary index {0} +90144=#Authenticator not enabled on database {0} +90145=#FOR UPDATE is not allowed in DISTINCT or grouped select +90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Error General : {0} HY004=Tipo de dato desconocido : {0} HYC00=Caracteristica no soportada: {0} diff --git a/h2/src/main/org/h2/res/_messages_fr.prop b/h2/src/main/org/h2/res/_messages_fr.prop new file mode 100644 index 0000000000..69671ba7fe --- /dev/null +++ b/h2/src/main/org/h2/res/_messages_fr.prop @@ -0,0 +1,201 @@ +.translator=Xavier Bouclet +02000=Aucune donnée disponible +07001=Nombre de paramètre invalide pour {0}, nombre de paramètre attendu: {1} +08000=Une erreur est survenue lors de l''ouverture de la base de données: {0} +21S02=Le nombre de colonnes ne correspond pas +22001=Valeur trop longue pour la colonne {0}: {1} +22003=Valeur numérique hors de portée: {0} +22004=#Numeric value out of range: {0} in column {1} +22007=Impossible d''analyser {0} constante {1} +22012=Division par zéro: {0} +22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} +22018=Erreur lors de la conversion de données {0} +22025=Erreur dans LIKE ESCAPE: {0} +2202E=#Array element error: {0}, expected {1} +22030=Valeur non permise pour la colonne {0}: {1} +22031=La valeur n''est pas un membre de l''énumération {0}: {1} +22032=Les enums vides ne sont pas permis +22033=Les valeurs énumérées en double ne sont pas autorisées pour les types énumérés: {0} +23502=NULL non permis pour la colonne {0} +23503=Intégrité référentielle violation de contrainte: {0} +23505=Violation d''index unique ou clé primaire: {0} +23506=Intégrité référentielle violation de contrainte: {0} +23507=Pas de valeur par défaut initialisée pour la colonne {0} +23513=Vérifiez la violation de contrainte: {0} +23514=Vérifiez la contraite invalide: {0} +28000=Mauvais nom d''utilisateur ou mot de passe +40001=Deadlock détecté. La transaction courante a été annulée. Détails: {0} +42000=Erreur de syntaxe dans l''instruction SQL {0} +42001=Erreur de syntaxe dans l''instruction SQL {0}; attendu {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} +42S01=La table {0} existe déjà +42S02=Table {0} non trouvée +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) +42S11=L''index {0} existe déjà +42S12=Index {0} non trouvé +42S21=Duplication du nom de colonnes {0} +42S22=Colonne {0} non trouvée +42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} +57014=L''instruction a été annulée ou la session a expiré +90000=La fonction {0} doit retourner résultat +90001=Methode non autorisée pour une requête. Utilisez execute ou executeQuery à la place d''executeUpdate +90002=Methode est autorisée uniquement pour une requête. Utilisez execute ou executeUpdate à la place d''executeQuery +90003=Chaîne héxadecimale contenant un nombre impair de caractères: {0} +90004=Chaîne héxadecimale contenant un caractère non-héxa: {0} +90005=#Invalid trigger flags: {0} +90006=La séquence {0} a épuisé ses éléments +90007=L''objet est déjà fermé +90008=Valeur invalide {0} pour le paramètre {1} +90009=Impossible de créer ou modifier la séquence {0} car les attributs sont invalides (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) +90010=Format invalide TO_CHAR {0} +90011=Un chemin de fichier implicitement relatif au répertoire de travail actuel n''est pas autorisé dans l''URL de la base de données {0}. Utilisez un chemin absolu, ~ /nom, ./nom ou le paramètre baseDir à la place. +90012=La paramètre {0} n''est pas initialisé +90013=Base de données {0} non trouvée +90014=Analyse d''erreur {0} +90015=SUM ou AVG sur le mauvais type de données pour {0} +90016=La colonne {0} doit être dans la liste du GROUP BY +90017=Tentative de définir une seconde clé primaire +90018=La connexion n''a pas été fermée et a été récupérée par le ramasse miette. +90019=Impossible de supprimer l''utilisateur actuel +90020=La base de données est peut-être en cours d''utilisation: {0}. Solutions posibles: fermer toutes les autres connexions; utilisez le mode serveur +90021=Cette combinaison de paramètres de base de données n''est pas supportée: {0} +90022=La fonction {0} n''a pas été trouvée +90023=La colonne {0} ne doit pas être nulle +90024=Erreur lors du renommage du fichier {0} vers {1} +90025=Impossible de supprimer le fichier {0} +90026=La sérialisation a échoué, cause: {0} +90027=La désérialisation a échoué, cause: {0} +90028=IO Exception: {0} +90029=Actuellement sur une ligne non actualisable +90030=Fichier corrompu lors de la lecture de l''enregistrement: {0}. Solution possible: utiliser l''outil de récupération +90031=IO Exception: {0}; {1} +90032=Utilisateur {0} non trouvé +90033=L''utilisateur {0} existe déjà +90034=Erreur du fichier journal: {0}, cause: {1} +90035=La séquence {0} existe déjà +90036=Séquence {0} non trouvée +90037=Vue {0} non trouvée +90038=La vue {0} existe déjà +90039=La référence CLOB ou BLOB a expiré: {0} +90040=Les droits admins sont requis pour cette opération +90041=Le trigger {0} existe déjà +90042=Trigger {0} non trouvé +90043=Erreur lors de la création ou l''initialisation du trigger {0} object, class {1}, cause: {2}; voir la racine de l''erreur pour les détails +90044=Erreur lors de l''exécution du trigger {0}, class {1}, cause : {2}; voir la racine de l''erreur pour les détails +90045=La contrainte {0} existe déjà +90046=Erreur dans le format de l''URL; doit être {0} mais est {1} +90047=Version non correspondante, la version du driver est {0}mais la version du serveur est {1} +90048=Version de fichier de base de données non supportée ou entête de ficher invalide dans le fichier {0} +90049=Erreur de cryptage dans le fichier {0} +90050=Mauvais format de mot de passe, doit être: mot de passe du fichier mot de passe de l''utilisateur +90052=La sous requête n''est pas une requête sur une seule colonne +90053=La sous-requête scalaire contient plus d''une rangée +90054=Utilisation invalide de la fonction agrégée {0} +90055=Chiffrement non pris en charge {0} +90056=Fonction {0}: Format de date invalide: {1} +90057=Contrainte {0} non trouvée +90058=Commit ou rollback n''est pas autorisé à l''intérieur d''un trigger +90059=Nom de colonne ambigu {0} +90060=Méthode de verrouillage de fichier non prise en charge {0} +90061=Exception à l''ouverture du port {0} (le port est peut-être en cours d''utilisation), cause: {1} +90062=Erreur lors de la création du fichier {0} +90063=Le point de sauvegarde est invalide: {0} +90064=Le point de sauvegarde est sans nom +90065=Le point de sauvegarde est nommé +90066=Propriété dupliquée {0} +90067=La connexion est cassée: {0} +90068=L''expression Order by {0} doit être dans ce cas dans la liste des résultats +90069=Le rôle {0} existe déjà +90070=Rôle {0} non trouvé +90071=Utilisateur ou rôle {0} non trouvé +90072=Les rôles et les droits ne peuvent être mélangés +90073=Les méthodes Java correspondantes doivent avoir un nombre de paramètres différents: {0} et {1} +90074=Le rôle {0} est déjà accordé +90075=La colonne fait partie de l''index {0} +90076=L''alias de fonction {0} existe déjà +90077=Alias de fonction {0} non trouvé +90078=Le schéma {0} existe déjà +90079=Schéma {0} non trouvé +90080=Le nom de schéma doit correspondre +90081=La colonne {0} contient des valeurs nulles +90082=La séquence {0} appartient une table +90083=La colonne doit être référencée par {0} +90084=Impossible de supprimer la dernière colonne {0} +90085=L''index {0} appartient à la contrainte {1} +90086=Classe {0} non trouvée +90087=Méthode {0} non trouvée +90088=Mode inconnu {0} +90089=La collation ne peut pas être changée parce qu''il y a des données dans la table: {0} +90090=Le schéma {0} ne peut pas être supprimé +90091=Le rôle {0} ne peut pas être supprimé +90093=Erreur de clustering - la base de données s''exécute actuellement en mode autonome +90094=Erreur de clustering - la base de données s''exécute actuellement en mode cluster, liste de serveurs: {0} +90095=Erreur de format de chaîne: {0} +90096=Pas assez de droit pour l''objet {0} +90097=La base de données est en lecture seule +90098=La base de données a été fermée +90099=Erreur lors du paramétrage de l''auditeur d''événements de la base de données {0}, cause: {1} +90101=Mauvais format XID: {0} +90102=Options de compression non supportées: {0} +90103=Algorithme de conpression non supporté: {0} +90104=Erreur de compression +90105=Exception lors de l''appel de la fonction définie par l''utilisateur: {0} +90106=Impossible de tronquer {0} +90107=Impossible de supprimer {0} car {1} dépend de lui +90108=Mémoire insuffisante. +90109=La vue {0} est invalide: {1} +90110=#Values of types {0} and {1} are not comparable +90111=Erreur lors de l''accès à la table liée à l''aide de l''instruction SQL {0}, cause: {1} +90112=Ligne non trouvée lors de la tentative de suppression à partir de l''index {0} +90113=Paramétrage de connexion non pris en charge {0} +90114=La constante {0} existe déjà +90115=Constante {0} non trouvée +90116=Les littérals de ce type ne sont pas permis +90117=Les connexions à distance à ce serveur ne sont pas autorisées, voir -tcpAllowOthers +90118=Impossible de supprimer la table {0} +90119=Le domaine {0} existe déjà +90120=Le domaine {0} non trouvé +90121=La base de données est déjà fermée (pour désactiver la fermeture automatique à l''arrêt de la VM, ajoutez "; DB_CLOSE_ON_EXIT = FALSE" à l''URL db) +90122=#The WITH TIES clause is not allowed without a corresponding ORDER BY clause. +90123=Impossible de mélanger des paramètres indexés et non-indexés +90124=Fichier non trouvé: {0} +90125=Classe invalide, attendue {0} mais obtenue {1} +90126=La base de données n''est pas persistante +90127=L''ensemble des résultats ne peut pas être mis à jour. La requête doit sélectionner toutes les colonnes à partir d''une clé unique. Seule une table peut être sélectionnée. +90128=L''ensemble des résultats n''est pas scollable et ne peut pas être réinitialisé. Vous pouvez avoir besoin d''utiliser conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ..). +90129=Transaction {0} non trouvée +90130=Cette méthode n''est pas autorisée pour une instruction paramétrée; à la place utilisez une instruction régulière. +90131=Mise à jour concurrente dans la table {0}: une autre transaction à mis à jour ou supprimé la même ligne +90132=Aggregat {0} non trouvé +90133=Impossible de changer le paramétrage {0} lorsque la base de données est déjà ouverte +90134=L''accès à la classe {0} est interdit +90135=La base de données est ouverte en mode exclusif; impossible d''ouvrir des connexions additionnelles +90136=#Window not found: {0} +90137=Peut seulement être assigné à une variable, pas à: {0} +90138=Nom de la base de données invalide: {0} +90139=La méthode Java public static n''a pas été trouvée: {0} +90140=''ensemble des résultats est en lecture seule. Vous pouvez avoir besoin d''utiliser conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). +90141=Le sérialiseur ne peut être changé parce que il y a des données dans la table: {0} +90142=La taille de l''étape ne doit pas être de 0 +90143=#Row {1} not found in primary index {0} +90144=#Authenticator not enabled on database {0} +90145=#FOR UPDATE is not allowed in DISTINCT or grouped select +90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} +HY000=Erreur générale: {0} +HY004=Type de données inconnu: {0} +HYC00=Fonctionnalité non supportée: {0} +HYT00=Dépassement du temps lors du vérrouillage de la table {0} diff --git a/h2/src/main/org/h2/res/_messages_ja.prop b/h2/src/main/org/h2/res/_messages_ja.prop index 8741a3dcc5..9eab01d8e5 100644 --- a/h2/src/main/org/h2/res/_messages_ja.prop +++ b/h2/src/main/org/h2/res/_messages_ja.prop @@ -5,40 +5,53 @@ 21S02=列番号が一致しません 22001=列 {0} の値が長過ぎます: {1} 22003=範囲外の数値です: {0} +22004=#Numeric value out of range: {0} in column {1} 22007={0} 定数 {1} を解析できません 22012=ゼロで除算しました: {0} +22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=データ変換中にエラーが発生しました {0} 22025=LIKE ESCAPE にエラーがあります: {0} +2202E=#Array element error: {0}, expected {1} +22030=#Value not permitted for column {0}: {1} +22031=#Value not a member of enumerators {0}: {1} +22032=#Empty enums are not allowed +22033=#Duplicate enumerators are not allowed for enum types: {0} 23502=列 {0} にはnull値が許されていません 23503=参照整合性制約違反: {0} 23505=ユニークインデックス、またはプライマリキー違反: {0} 23506=参照整合性制約違反: {0} 23507=列 {0} にデフォルト値が設定されていません 23513=制約違反を確認してください: {0} -23514=#Check constraint invalid: {0} +23514=制約が無効です。確認してください: {0} 28000=ユーザ名またはパスワードが不正です 40001=デッドロックが検出されました。現在のトランザクションはロールバックされました。詳細: {0} 42000=SQLステートメントに文法エラーがあります {0} 42001=SQLステートメントに文法エラーがあります {0}; 期待されるステートメント {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=テーブル {0} はすでに存在します 42S02=テーブル {0} が見つかりません +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=インデックス {0} はすでに存在します 42S12=インデックス {0} が見つかりません 42S21=列名 {0} が重複しています 42S22=列 {0} が見つかりません -42S32=設定 {0} が見つかりません +42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=ステートメントがキャンセルされたか、セッションがタイムアウトしました 90000=関数 {0} はリザルトセットを返さなければなりません 90001=メソッドはクエリをサポートしていません。executeUpdateのかわりに、excute、またはexecuteQueryを使用してください 90002=メソッドはクエリしかサポートしていません。executeQueryのかわりに、excecute、またはexecuteUpdateを使用してください 90003=文字数が奇数の16進文字列です: {0} 90004=16進文字列に不正な文字が含まれています: {0} -90006=#Sequence {0} has run out of numbers +90005=#Invalid trigger flags: {0} +90006=シーケンス {0} を使い果たしました 90007=オブジェクトはすでに閉じられています 90008=パラメータ {1} に対する値 {0} が不正です -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) -90010=#Invalid TO_CHAR format {0} -90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. +90009=#無効な属性により、シーケンス {0} の作成または変更ができません。(base value {1}, 開始値 {2}, 最小値 {3}, 最大値 {4}, 増分 {5}, cache size {6}) +90010=無効な TO_CHAR フォーマット {0} +90011=暗黙的なカレントディレクトリからの相対ファイルパスをデータベースURL({0})に指定することは許可されていません。代わりに絶対パスか相対パス( ~/name, ./name)あるいは baseDir を指定して下さい. 90012=パラメータ {0} がセットされていません 90013=データベース {0} が見つかりません 90014=解析エラー {0} @@ -48,13 +61,13 @@ 90018=アプリケーションにより閉じられていない接続がガベージコレクトされました 90019=使用中のユーザをドロップすることはできません 90020=データベースが使用中です: {0}. 可能な解決策: 他の接続を全て閉じる; サーバモードを使う -90021=#This combination of database settings is not supported: {0} +90021=この組み合わせのデータベース設定はサポートされていません: {0} 90022=関数 {0} が見つかりません 90023=列 {0} にはnull値を許すべきてはありません 90024=ファイル名を {0} から {1} に変更中にエラーが発生しました 90025=ファイル {0} を削除できません -90026=直列化に失敗しました -90027=直列化復元に失敗しました +90026=直列化に失敗しました: {0} +90027=直列化復元に失敗しました: {0} 90028=入出力例外: {0} 90029=現在行は更新不可です 90030=レコード {0} を読み込み中にファイルの破損を検出しました。可能な解決策: リカバリツールを使用してください @@ -66,7 +79,7 @@ 90036=シーケンス {0} が見つかりません 90037=ビュー {0} が見つかりません 90038=ビュー {0} はすでに存在します -90039=#This CLOB or BLOB reference timed out: {0} +90039=この CLOB または BLOB の参照がタイムアウトしました: {0} 90040=この操作には管理権限が必要です 90041=トリガ {0} はすでに存在します 90042=トリガ {0} が見つかりません @@ -82,6 +95,7 @@ 90053=数値サブクエリが複数の行を含んでいます 90054=集約関数 {0} の不正な使用 90055={0} は未サポートの暗号です +90056=関数 {0}: 無効な日付フォーマット: {1} 90057=制約 {0} が見つかりません 90058=トリガ内でのコミット、ロールバックは許されていません 90059=列名 {0} があいまいです @@ -133,6 +147,7 @@ 90107={1} が依存しているため、{0} をドロップすることはできません 90108=メモリが不足しています 90109=ビュー {0} は無効です: {1} +90110=#Values of types {0} and {1} are not comparable 90111=SQLステートメント {0} による結合テーブルアクセスエラー 90112=インデックス {0} から削除を試みましたが、行が見つかりません 90113=未サポートの接続設定 {0} @@ -141,10 +156,10 @@ 90116=この種類のリテラルは許されていません 90117=このサーバへのリモート接続は許されていません, -tcpAllowOthersを参照 90118=テーブル {0} はドロップできません -90119=ユーザデータ型 {0} はすでに存在します -90120=ユーザデータ型 {0} が見つかりません +90119=ドメイン {0} はすでに存在します +90120=ドメイン {0} が見つかりません 90121=データベースはすでに閉じられています (VM終了時の自動データベースクローズを無効にするためには、db URLに ";DB_CLOSE_ON_EXIT=FALSE" を追加してください) -90122=ビューが存在するテーブル {0} に対する操作はサポートされていません: {1} +90122=#The WITH TIES clause is not allowed without a corresponding ORDER BY clause. 90123=インデックスの付いたパラメータと付いていないパラメータを混在させることはできません 90124=ファイルが見つかりません: {0} 90125=無効なクラス, {0} が期待されているにもかかわらず {1} を取得しました @@ -158,13 +173,28 @@ 90133=データベースオープン中には、設定 {0} を変更できません 90134=クラス {0} へのアクセスが拒否されました 90135=データベースは排他モードでオープンされています; 接続を追加することはできません -90136=未サポートの外部結合条件: {0} +90136=#Window not found: {0} 90137=割り当ては変数にのみ可能です。{0} にはできません 90138=不正なデータベース名: {0} 90139=public staticであるJavaメソッドが見つかりません: {0} 90140=リザルトセットは読み込み専用です。conn.createStatement(.., ResultSet.CONCUR_UPDATABLE) を使う必要があるかもしれません -90141=#Serializer cannot be changed because there is a data table: {0} -90142=#Step size must not be zero +90141=データテーブル {0} があるため、シリアライザを変更することはできません +90142=ステップサイズに0は指定できません +90143=#Row {1} not found in primary index {0} +90144=#Authenticator not enabled on database {0} +90145=#FOR UPDATE is not allowed in DISTINCT or grouped select +90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=一般エラー: {0} HY004=不明なデータ型: {0} HYC00=機能はサポートされていません: {0} diff --git a/h2/src/main/org/h2/res/_messages_pl.prop b/h2/src/main/org/h2/res/_messages_pl.prop index 287d78e121..44d4eebd9a 100644 --- a/h2/src/main/org/h2/res/_messages_pl.prop +++ b/h2/src/main/org/h2/res/_messages_pl.prop @@ -5,10 +5,17 @@ 21S02=Niezgodna ilość kolumn 22001=Wartość za długa dla kolumny {0}: {1} 22003=Wartość numeryczna poza zakresem: {0} +22004=#Numeric value out of range: {0} in column {1} 22007=Nie można odczytać {0} jako {1} 22012=Dzielenie przez zero: {0} +22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Błąd konwersji danych {0} 22025=Błąd w LIKE ESCAPE: {0} +2202E=#Array element error: {0}, expected {1} +22030=#Value not permitted for column {0}: {1} +22031=#Value not a member of enumerators {0}: {1} +22032=#Empty enums are not allowed +22033=#Duplicate enumerators are not allowed for enum types: {0} 23502=Pole nie może być NULL{0} 23503=Naruszenie więzów integralności: {0} 23505=Naruszenie ograniczenia Klucza Głównego lub Indeksu Unikalnego: {0} @@ -20,23 +27,29 @@ 40001=Wykryto zakleszczenie. Bieżąca transakcja została wycofana. Szczegóły : {0} 42000=Błąd składniowy w wyrażeniu SQL {0} 42001=Błąd składniowy w wyrażeniu SQL {0}; oczekiwano {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=Tabela {0} już istnieje 42S02=Tabela {0} nie istnieje +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=Indeks {0} już istnieje 42S12=Indeks {0} nie istnieje 42S21=Zduplikowana nazwa kolumny {0} 42S22=Kolumna {0} nie istnieje -42S32=Ustawienie {0} nie istnieje +42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=Kwerenda została anulowana albo sesja wygasła 90000=Funkcja {0} musi zwrócić dane 90001=Metoda nie jest dozwolona w kwerendzie 90002=Metoda jest dozwolona tylko w kwerendzie 90003=Heksadecymalny string z nieparzystą liczbą znaków: {0} 90004=Heksadecymalny string zawiera niedozwolony znak: {0} +90005=#Invalid trigger flags: {0} 90006=Sekwencja {0} została wyczerpana 90007=Obiekt jest zamknięty 90008=Nieprawidłowa wartość {0} parametru {1} -90009=Nie można utworzyć/zmienić sekwencji {0} ponieważ podane atrybuty są nieprawidłowe (wartość początkowa {1}, wartość minimalna {2}, wartość maksymalna {3}, przyrost {4}) +90009=#Nie można utworzyć/zmienić sekwencji {0} ponieważ podane atrybuty są nieprawidłowe (base value {1}, wartość początkowa {2}, wartość minimalna {3}, wartość maksymalna {4}, przyrost {5}, cache size {6}) 90010=Nieprawidłowy format TO_CHAR {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parametr o numerze {0} nie jest ustalony @@ -82,6 +95,7 @@ 90053=Skalarna pod-kwerenda zawiera więcej niż jeden wiersz 90054=Nieprawidłowe użycie funkcji agregującej {0} 90055=Nieobsługiwany szyfr {0} +90056=Function {0}: Invalid date format: {1} 90057=Ograniczenie {0} nie istnieje 90058=Zatwierdzenie lub wycofanie transakcji nie jest dozwolone w wyzwalaczu 90059=Niejednoznaczna nazwa kolumny {0} @@ -112,7 +126,7 @@ 90084=Nie można skasować ostatniej kolumny {0} 90085=Indeks {0} należy do ograniczenia {1} 90086=Klasa {0} nie istnieje -90087=Metoda {0} nie istnieje +90087=#Method {0} not found 90088=Nieznany stan {0} 90089=Metoda porównywania językowego nie może być zmieniona z powodu istnienia danych w tabeli {0} 90090=Schemat {0} nie może zostać skasowany @@ -133,6 +147,7 @@ 90107=Nie można skasować {0} ponieważ zależy od {1} 90108=Brak pamięci. 90109=Widok {0} jest nieprawidłowy +90110=#Values of types {0} and {1} are not comparable 90111=Błąd dostępu do tabeli skrzyżowań przy pomocy zapytania SQL {0}, błąd: {1} 90112=Rekord nie znaleziony przy probie kasowania z indeksu {0} 90113=Nie wspierana opcja połączenia {0} @@ -141,10 +156,10 @@ 90116=Literał tego typu nie jest dozwolony 90117=Zdalne połączenia do tego serwera nie są dozwolone, zobacz -tcpAllowOthers 90118=Nie można skasować tabeli {0} -90119=Typ danych użytkownika {0} już istnieje -90120=Typ danych użytkownika {0} nie istnieje +90119=Domena {0} już istnieje +90120=Domena {0} nie istnieje 90121=Baza danych jest już zamknięta (aby zablokować samoczynne zamykanie podczas zamknięcia VM dodaj ";DB_CLOSE_ON_EXIT=FALSE" do URL bazy danych) -90122=Operacja nie jest dozwolona dla tabeli {0} gdy istnieją widoki oparte na tabeli: {1} +90122=#The WITH TIES clause is not allowed without a corresponding ORDER BY clause. 90123=Nie można mieszać parametrów indeksowych z nieindeksowymi 90124=Plik nie istnieje: {0} 90125=Nieprawidłowa klasa, oczekiwano {0}, a jest {1} @@ -158,13 +173,28 @@ 90133=Nie można zmienić ustawienia {0} gdy baza danych jest otwarta 90134=Dostęp do klasy {0} jest zabroniony 90135=Baza danych jest otwarta w trybie wyłączności, nie można otworzyć dodatkowych połączeń -90136=Nieobsługiwany warunek złączenia zewnętrznego: {0} +90136=#Window not found: {0} 90137=Można przypisywać tylko do zmiennych, nie do: {0} 90138=Nieprawidłowa nazwa bazy danych: {0} 90139=Publiczna, statyczna metoda Java nie znaleziona: {0} 90140=Wyniki są tylko do odczytu. Być może potrzebujesz użyć conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). 90141=Serializator nie może być zmieniony ponieważ istnieje tabela z danymi: {0} 90142=#Step size must not be zero +90143=#Row {1} not found in primary index {0} +90144=#Authenticator not enabled on database {0} +90145=#FOR UPDATE is not allowed in DISTINCT or grouped select +90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Błąd ogólny: {0} HY004=Nieznany typ danych: {0} HYC00=Cecha nie jest wspierana: {0} diff --git a/h2/src/main/org/h2/res/_messages_pt_br.prop b/h2/src/main/org/h2/res/_messages_pt_br.prop index a59186496e..e9383f5128 100644 --- a/h2/src/main/org/h2/res/_messages_pt_br.prop +++ b/h2/src/main/org/h2/res/_messages_pt_br.prop @@ -5,10 +5,17 @@ 21S02=A quantidade de colunas não corresponde 22001=Valor muito longo para a coluna {0}: {1} 22003=Valor númerico não esta dentro do limite: {0} +22004=#Numeric value out of range: {0} in column {1} 22007=Não é possível converter {1} para {0} 22012=Divisão por zero: {0} +22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Erro na conversão de dado, convertendo {0} 22025=Erro em LIKE ESCAPE: {0} +2202E=#Array element error: {0}, expected {1} +22030=#Value not permitted for column {0}: {1} +22031=#Value not a member of enumerators {0}: {1} +22032=#Empty enums are not allowed +22033=#Duplicate enumerators are not allowed for enum types: {0} 23502=NULL não é permitido para a coluna {0} 23503=Violação da integridade de restrição: {0} 23505=Violação de índice único ou de chave primária: {0} @@ -20,23 +27,29 @@ 40001=#Deadlock detected. The current transaction was rolled back. Details: {0} 42000=Erro de sintax na declaração SQL {0} 42001=Erro de sintax na declaração SQL {0}; esperado {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=Tabela {0} já existe 42S02=Tabela {0} não foi encontrada +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=índice {0} já existe 42S12=índice {0} não foi encontrado 42S21=Nome duplicado da coluna {0} 42S22=Coluna {0} não foi encontrada -42S32=Definição {0} não foi encontrada +42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=#Statement was canceled or the session timed out 90000=Função {0} deve retornar algum resultado 90001=O método não esta hábilitado para consulta. Use o execute ou o executeQuery em vez de executeUpdate 90002=O método é apenas para consulta. Use o execute ou o executeUpdate em vez de executeQuery 90003=Sequência Hexadecimal com número ímpar de caracteres: {0} 90004=Sequência Hexadecimal contêm caracteres inválidos: {0} +90005=#Invalid trigger flags: {0} 90006=#Sequence {0} has run out of numbers 90007=O objeto está fechado 90008=Valor inválido {0} para o parâmetro {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=#Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=#Invalid TO_CHAR format {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parâmetro {0} não esta definido @@ -82,6 +95,7 @@ 90053=A Subquery contém mais de uma linha 90054=Uso inválido da função {0} agregada 90055=Cipher {0} não é suportado +90056=Function {0}: Invalid date format: {1} 90057=Restrição {0} não foi encontrada 90058=#Commit or rollback is not allowed within a trigger 90059=Nome da coluna {0} é ambíguo. @@ -112,7 +126,7 @@ 90084=Não pode apagar a última coluna {0} 90085=índice {0} pertence a uma restrição {1} 90086=Classe {0} não foi encontrada -90087=Método {0} não foi encontrado +90087=#Method {0} not found 90088=Modo {0} desconhecido 90089=A coleção não pode ser alterada, porque existe uma tabela de dados: {0} 90090=Esquema {0} não pode ser apagado @@ -133,6 +147,7 @@ 90107=Não pode apagar {0} por que depende de {1} 90108=#Out of memory. 90109=Vista {0} é inválida: {1} +90110=#Values of types {0} and {1} are not comparable 90111=Erro ao acessar a tabela lincada com a instrução SQL {0}, causa: {1} 90112=A linha não foi encontrada ao tentar eliminar apartir do índice {0} 90113=Não suporta a definição de conecção {0} @@ -141,10 +156,10 @@ 90116=Literais deste tipo não são permitidas 90117=Conecções remotas para este servidor não estão habilitadas, veja -tcpAllowOthers 90118=Não pode apagar a tabela {0} -90119=Tipo de dados do usuário {0} já existe -90120=Tipo de dados do usuário {0} não foram encontrados +90119=Domínio {0} já existe +90120=Domínio {0} não foram encontrados 90121=Base de dados já está fechada (para desabilitar o fechamento automático quando a VM terminar, addicione ";DB_CLOSE_ON_EXIT=FALSE" na url da base de dados) -90122=Operação não suportada para a tabela {0} quando existe alguma vista sobre a tabela: {1} +90122=#The WITH TIES clause is not allowed without a corresponding ORDER BY clause. 90123=Não pode combinar parâmetros de índices com não índices 90124=Arquivo não encontrado: {0} 90125=Classe inválida, experada {0} mas está {1} @@ -158,13 +173,28 @@ 90133=#Cannot change the setting {0} when the database is already open 90134=#Access to the class {0} is denied 90135=#The database is open in exclusive mode; can not open additional connections -90136=#Unsupported outer join condition: {0} +90136=#Window not found: {0} 90137=#Can only assign to a variable, not to: {0} 90138=#Invalid database name: {0} 90139=#The public static Java method was not found: {0} 90140=#The result set is readonly. You may need to use conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). 90141=#Serializer cannot be changed because there is a data table: {0} 90142=#Step size must not be zero +90143=#Row {1} not found in primary index {0} +90144=#Authenticator not enabled on database {0} +90145=#FOR UPDATE is not allowed in DISTINCT or grouped select +90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Erro geral: {0} HY004=Tipo de dados desconhecido: {0} HYC00=Recurso não suportado: {0} diff --git a/h2/src/main/org/h2/res/_messages_ru.prop b/h2/src/main/org/h2/res/_messages_ru.prop index 626d4363b2..c037c350ff 100644 --- a/h2/src/main/org/h2/res/_messages_ru.prop +++ b/h2/src/main/org/h2/res/_messages_ru.prop @@ -1,44 +1,57 @@ -.translator=Sergi Vladykin +.translator=Sergi Vladykin; Evgenij Ryazanov 02000=Нет данных 07001=Неверное количество параметров для функции {0}, ожидаемое количество: {1} 08000=Ошибка при открытии базы данных: {0} 21S02=Неверное количество столбцов 22001=Значение слишком длинное для поля {0}: {1} 22003=Численное значение вне допустимого диапазона: {0} +22004=Численное значение вне допустимого диапазона: {0} в столбце {1} 22007=Невозможно преобразование строки {1} в тип {0} 22012=Деление на ноль: {0} +22013=Недопустимое значение PRECEDING или FOLLOWING в оконной функции: {0} 22018=Ошибка преобразования данных при конвертации {0} 22025=Ошибка в LIKE ESCAPE: {0} +2202E=Недопустимый элемент массива: {0}, ожидался {1} +22030=Недопустимое значение для столбца {0}: {1} +22031=Значение не указано в перечислимом типе {0}: {1} +22032=Пустые перечислимые типы не допускаются +22033=Повторяющиеся значения в перечислимом типе: {0} 23502=Значение NULL не разрешено для поля {0} 23503=Нарушение ссылочной целостности: {0} 23505=Нарушение уникального индекса или первичного ключа: {0} 23506=Нарушение ссылочной целостности: {0} 23507=Для поля {0} не установлено значение по умолчанию 23513=Нарушение ограничения: {0} -23514=#Check constraint invalid: {0} +23514=Неправильное ограничение CHECK: {0} 28000=Неверное имя пользователя или пароль 40001=Обнаружена взаимная блокировка потоков. Текущая транзакция была откачена. Детали: {0} 42000=Синтаксическая ошибка в выражении SQL {0} 42001=Синтаксическая ошибка в выражении SQL {0}; ожидалось {1} +42602=Недопустимое имя {0} +42622=Имя, начинающееся с {0}, слишком длинное. Максимальная длина: {1} 42S01=Таблица {0} уже существует 42S02=Таблица {0} не найдена +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=Индекс {0} уже существует 42S12=Индекс {0} не найден 42S21=Повтор имени столбца {0} 42S22=Столбец {0} не найден -42S32=Настройка {0} не найдена +42S31=Должны использоваться идентичные выражения; ожидалось {0}, получено {1} +54011=Слишком много столбцов. Масимальное количество {0} 57014=Запрос был отменен или закончилось время ожидания сессии 90000=Функция {0} должна возвращать набор записей 90001=Метод не разрешен для запросов. Используйте execute или executeQuery вместо executeUpdate 90002=Метод разрешен только для запросов. Используйте execute или executeUpdate вместо executeQuery 90003=Шестнадцатиричная строка содержит нечетное количество символов: {0} 90004=Шестнадцатиричная строка содержит нешестнадцатиричные символы: {0} -90006=#Sequence {0} has run out of numbers +90005=Недопустимые флаги триггера: {0} +90006=Последовательность {0} вышла за границы (MINVALUE, MAXVALUE) 90007=Объект уже закрыт 90008=Недопустимое значение {0} для параметра {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) -90010=#Invalid TO_CHAR format {0} -90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. +90009=Невозможно создать или изменить последовательность {0} из-за неправильных атрибутов (базовое значение {1}, начальное значение {2}, минимальное значение {3}, максимальное значение {4}, приращение {5}, кэш {6}) +90010=Неправильный формат TO_CHAR {0} +90011=Путь неявно является относительным для текущего рабочего каталога и не допустим в URL базы данных {0}. Используйте абсолютный путь, ~/name, ./name, или настройку baseDir. 90012=Параметр {0} не установлен 90013=База данных {0} не найдена 90014=Ошибка при разборе {0} @@ -48,7 +61,7 @@ 90018=Незакрытое приложением соединение уничтожено сборщиком мусора 90019=Невозможно удалить текущего пользователя 90020=База данных уже используется: {0}. Возможные решения: закрыть все другие соединения; использовать режим сервера -90021=#This combination of database settings is not supported: {0} +90021=Такое сочетание настроек базы данных не поддерживается: {0} 90022=Функция {0} не найдена 90023=Поле {0} не должно поддерживать значение NULL 90024=Ошибка при переименовании файла {0} в {1} @@ -66,7 +79,7 @@ 90036=Последовательность {0} не найдена 90037=Представление {0} не найдено 90038=Представление {0} уже существует -90039=#This CLOB or BLOB reference timed out: {0} +90039=Этот CLOB или BLOB объект закрыт по таймауту: {0} 90040=Для выполнения данной операции необходимы права администратора 90041=Триггер {0} уже существует 90042=Триггер {0} не найден @@ -82,6 +95,7 @@ 90053=Подзапрос выбирает более одной строки 90054=Некорректное использование агрегирующей функции {0} 90055=Метод шифрования {0} не поддерживается +90056=Функция {0}: Неверный формат даты: {1} 90057=Ограничение {0} не найдено 90058=Commit или rollback внутри триггера не допускается 90059=Неоднозначное имя столбца {0} @@ -133,6 +147,8 @@ 90107=Невозможно удалить {0}, пока существует зависимый объект {1} 90108=Ошибка нехватки памяти 90109=Представление {0} содержит ошибки: {1} +90110=Значения типов данных {0} и {1} не сравнимы друг с другом +90110=Сравнение массива (ARRAY) со скалярным значением 90111=Ошибка при обращении к линкованной таблице SQL запросом {0}, причина: {1} 90112=Запись не найдена при удалении из индекса {0} 90113=Неподдерживаемая опция соединения {0} @@ -141,10 +157,10 @@ 90116=Вычисление литералов запрещено 90117=Удаленные соединения к данному серверу запрещены, см. -tcpAllowOthers 90118=Невозможно удалить таблицу {0} -90119=Объект с именем {0} уже существует +90119=Домен {0} уже существует 90120=Домен {0} не найден 90121=База данных уже закрыта (чтобы отключить автоматическое закрытие базы данных при останове JVM, добавьте ";DB_CLOSE_ON_EXIT=FALSE" в URL) -90122=Операция для таблицы {0} не поддерживается, пока существуют представления: {1} +90122=Ограничение WITH TIES использовано без соответствующего раздела ORDER BY. 90123=Одновременное использование индексированных и неиндексированных параметров в запросе не поддерживается 90124=Файл не найден: {0} 90125=Недопустимый класс, ожидался {0}, но получен {1} @@ -158,13 +174,28 @@ 90133=Невозможно изменить опцию {0}, когда база данных уже открыта 90134=Доступ к классу {0} запрещен 90135=База данных открыта в эксклюзивном режиме, открыть дополнительные соединения невозможно -90136=Данное условие не поддерживается в OUTER JOIN : {0} +90136=Окно не найдено: {0} 90137=Присваивать значения возможно только переменным, но не: {0} 90138=Недопустимое имя базы данных: {0} 90139=public static Java метод не найден: {0} 90140=Набор записей не является обновляемым. Возможно необходимо использовать conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). -90141=#Serializer cannot be changed because there is a data table: {0} -90142=#Step size must not be zero +90141=Serializer не может быть изменен, потому что есть таблица данных: {0} +90142=Размер шага не должен быть равен нулю +90143=Строка {1} не найдена в первичном индексе {0} +90144=Внешняя аутентификация не включена в базе данных {0} +90145=FOR UPDATE не допускается в запросе с DISTINCT или запросе с группировкой +90146=База данных {0} не найдена и её автоматическое создание запрещено флагом IFEXISTS=true +90147=Нельзя использовать метод {0} при включённом автовыполнении +90148=Текущее значение последовательности {0} ещё не определено в этой сессии +90149=База данных {0} не найдена, создайте её предварительно или разрешите удалённое создание баз данных (не рекомендуется в защищённых системах) +90150=Диапазон или точность ({0}) должны быть в пределах от {1} до {2} включительно +90151=Масштаб или точность долей секунды ({0}) должны быть в пределах {1} до {2} включительно +90152=Ограничение {0} испльзуется ограничением {1} +90153=Столбец {0} ссылается на столбец {1}, не имеющий допустимой операции сравнения +90154=Нельзя присвоить значение генерируемому столбцу {0} +90155=Генерируемый столбец {0} не может обновляться ссылочным ограничением с пунктом {1} +90156=Имя столбца не указано для выражения {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Внутренняя ошибка: {0} HY004=Неизвестный тип данных: {0} HYC00=Данная функция не поддерживается: {0} diff --git a/h2/src/main/org/h2/res/_messages_sk.prop b/h2/src/main/org/h2/res/_messages_sk.prop index b0393fa056..b86a883353 100644 --- a/h2/src/main/org/h2/res/_messages_sk.prop +++ b/h2/src/main/org/h2/res/_messages_sk.prop @@ -5,10 +5,17 @@ 21S02=Počet stĺpcov sa nezhoduje 22001=Hodnota je príliš dlhá pre stĺpec {0}: {1} 22003=Číselná hodnota mimo rozsah: {0} +22004=#Numeric value out of range: {0} in column {1} 22007=Nemožem rozobrať {0} konštantu {1} 22012=Delenie nulou: {0} +22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=Chyba konverzie dát pre {0} 22025=Chyba v LIKE ESCAPE: {0} +2202E=#Array element error: {0}, expected {1} +22030=#Value not permitted for column {0}: {1} +22031=#Value not a member of enumerators {0}: {1} +22032=#Empty enums are not allowed +22033=#Duplicate enumerators are not allowed for enum types: {0} 23502=NULL nie je povolený pre stĺpec {0} 23503=Porušenie obmedzenia (constraint) referenčnej integrity: {0} 23505=Porušenie jedinečnosti (unique) indexu alebo primárneho kľúča: {0} @@ -20,23 +27,29 @@ 40001=Mŕtvy bod (deadlock) detegovaný. Aktuálna transakcia bude odvolaná (rolled back). Podrobnosti: {0} 42000=Syntaktická chyba v SQL príkaze {0} 42001=Syntaktická chyba v SQL príkaze {0}; očakávané {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01=Tabuľka {0} už existuje 42S02=Tabuľka {0} nenájdená +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=Index {0} už existuje 42S12=Index {0} nenájdený 42S21=Duplicitné meno stĺpca {0} 42S22=Stĺpec {0} nenájdený -42S32=Nastavenie {0} nenájdené +42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=Príkaz bol zrušený alebo vypršal časový limit sedenia 90000=Funkcia {0} musí vracať výsledok (result set) 90001=Metóda nie je povolená pre dopyt (query). Použite execute alebo executeQuery namiesto executeUpdate 90002=Metóda je povolená iba pre dopyt (query). Použite execute alebo executeUpdate namiesto executeQuery 90003=Hexadecimálny reťazec s nepárnym počtom znakov: {0} 90004=Hexadecimálny reťazec obsahuje nepovolené znaky pre šestnáskovú sústavu: {0} +90005=#Invalid trigger flags: {0} 90006=#Sequence {0} has run out of numbers 90007=Objekt už je zatvorený 90008=Nesprávna hodnota {0} parametra {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=#Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=#Invalid TO_CHAR format {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=Parameter {0} nie je nastavený @@ -82,6 +95,7 @@ 90053=Skalárny vnorený dopyt (scalar subquery) obsahuje viac ako jeden riadok 90054=Nesprávne použitie agregačnej funkcie {0} 90055=Nepodporovaný typ šifry {0} +90056=#Function {0}: Invalid date format: {1} 90057=Obmedzenie (constraint) {0} nenájdený 90058=Commit alebo Rollback nie je povolené použiť v spúšťači (trigger) 90059=Nejednoznačné meno stĺpca {0} @@ -133,6 +147,7 @@ 90107=Nemôžem zmazať {0} lebo {1} zavisí na {0} 90108=Nedostatok pamäte. 90109=Pohľad (view) {0} je nesprávny: {1} +90110=#Values of types {0} and {1} are not comparable 90111=Chyba prístupu k linkovanej tabuľke SQL príkazom {0}, dôvod: {1} 90112=Riadok nenájdený pri pokuse o vymazanie cez index {0} 90113=Nepodporované nastavenie spojenia {0} @@ -141,10 +156,10 @@ 90116=Písmená (literals) tohto druhu nie sú povolené 90117=Vzdialené pripojenia na tento server nie sú povolené, pozrite -tcpAllowOthers 90118=Nemôžem zmazať tabuľku {0} -90119=Používateľský dátový typ {0} už existuje -90120=Používateľský dátový typ {0} nenájdený +90119=Doména {0} už existuje +90120=Doména {0} nenájdený 90121=Databáza už je zatvorená (na zamedzenie automatického zatvárania pri ukončení VM, pridajte ";DB_CLOSE_ON_EXIT=FALSE" do DB URL) -90122=Operácia pre tabuľku {0} nie je podporovaná, kedže existujú na tabuľku pohľady (views): {1} +90122=#The WITH TIES clause is not allowed without a corresponding ORDER BY clause. 90123=Nemožno miešať indexované a neindexované parametre 90124=Súbor nenájdený: {0} 90125=Nesprávna trieda {1}, očakávana je {0} @@ -158,13 +173,28 @@ 90133=Nemôžem zmeniť nastavenie {0} keď už je databáza otvorená 90134=Prístup k triede {0} odoprený 90135=Databáza je otvorená vo výhradnom (exclusive) móde; nemôžem na ňu otvoriť ďalšie pripojenia -90136=Nepodporovaná "outer join" podmienka: {0} +90136=#Window not found: {0} 90137=Môžete priradiť len do premennej, nie do: {0} 90138=Nesprávne meno databázy: {0} 90139=Verejná statická Java metóda nebola nájdená: {0} 90140=Výsledok (result set) je iba na čítanie. Je potrebné použiť conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). 90141=#Serializer cannot be changed because there is a data table: {0} 90142=#Step size must not be zero +90143=#Row {1} not found in primary index {0} +90144=#Authenticator not enabled on database {0} +90145=#FOR UPDATE is not allowed in DISTINCT or grouped select +90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=Všeobecná chyba: {0} HY004=Neznámy dátový typ: {0} HYC00=Vlastnosť nie je podporovaná: {0} diff --git a/h2/src/main/org/h2/res/_messages_zh_cn.prop b/h2/src/main/org/h2/res/_messages_zh_cn.prop index 8373f0b34d..03d1079e61 100644 --- a/h2/src/main/org/h2/res/_messages_zh_cn.prop +++ b/h2/src/main/org/h2/res/_messages_zh_cn.prop @@ -5,10 +5,17 @@ 21S02=字段数目不匹配 22001=字段 {0}数值太大: {1} 22003=数值超出范围: {0} +22004=#Numeric value out of range: {0} in column {1} 22007=不能解析字段 {0} 的数值 :{1} 22012=除数为零: {0} +22013=#Invalid PRECEDING or FOLLOWING size in window function: {0} 22018=转换数据{0}期间出现转换错误 22025=LIKE ESCAPE(转义符)存在错误: {0} +2202E=#Array element error: {0}, expected {1} +22030=#Value not permitted for column {0}: {1} +22031=#Value not a member of enumerators {0}: {1} +22032=#Empty enums are not allowed +22033=#Duplicate enumerators are not allowed for enum types: {0} 23502=字段 {0} 不允许为NULL值 23503=违反引用完整性约束: {0} 23505=违反唯一索引或逐渐约束: {0} @@ -20,23 +27,29 @@ 40001=检测到死锁.当前事务已回滚.详情: {0} 42000=SQL语法错误 {0} 42001=SQL语法错误 {0}; 预期: {1} +42602=#Invalid name {0} +42622=#The name that starts with {0} is too long. The maximum length is {1} 42S01= {0}表已存在 42S02=找不到表 {0} +42S03=#Table {0} not found (candidates are: {1}) +42S04=#Table {0} not found (this database is empty) 42S11=索引 {0} 已存在 42S12=找不到索引 {0} 42S21=重复的字段: {0} 42S22=找不到字段 {0} -42S32=找不到设置 {0} +42S31=#Identical expressions should be used; expected {0}, found {1} +54011=#Too many columns. The maximum count is {0} 57014=语句已取消执行或会话已过期 90000={0} 函数必须返回一个结果集 90001=不允许在查询内使用的方法,使用execute 或 executeQuery 代替 executeUpdate 90002=只允许在查询内使用的方法. 使用 execute 或 executeUpdate 代替 executeQuery 90003=十六进制字符串包含奇数个数字字符: {0} 90004=十六进制字符串包含非十六进制字符: {0} +90005=#Invalid trigger flags: {0} 90006=#Sequence {0} has run out of numbers 90007=对象已关闭 90008=被发现非法的数值 {0} 在参数 {1} -90009=#Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4}) +90009=#Unable to create or alter sequence {0} because of invalid attributes (base value {1}, start value {2}, min value {3}, max value {4}, increment {5}, cache size {6}) 90010=#Invalid TO_CHAR format {0} 90011=#A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead. 90012=参数 {0} 的值还没有设置 @@ -82,6 +95,7 @@ 90053=标量子查询(Scalar subquery)包含多于一行结果 90054=非法使用聚合函数 {0} 90055=不支持的加密算法 {0} +90056=#Function {0}: Invalid date format: {1} 90057=约束 {0} 找不到 90058=提交或回滚不能办函触发器 90059=不明确的字段名 {0} @@ -133,6 +147,7 @@ 90107=不能删除 {0} ,因为 {1} 依赖着它 90108=内存不足. 90109=视图 {0} 无效: {1} +90110=#Values of types {0} and {1} are not comparable 90111=SQL语句访问表连接错误 {0}, 原因: {1} 90112=尝试从索引中删除 {0}的时候找不到行 90113=不支持的连接设置 {0} @@ -141,10 +156,10 @@ 90116=不允许此类型的字面值 90117=不允许远程连接到本服务器, 参见 -tcpAllowOthers 90118=不能删除表 {0} -90119=用户数据类型 {0} 已存在 -90120=找不到用户数据类型 {0} +90119=域 {0} 已存在 +90120=找不到域 {0} 90121=数据库已关闭 (若需要禁用在虚拟机关闭的时候同时关闭数据库,请加上 ";DB_CLOSE_ON_EXIT=FALSE" 到数据库连接的 URL) -90122={0}表不支持本操作,因为在表上存在视图: {1} +90122=#The WITH TIES clause is not allowed without a corresponding ORDER BY clause. 90123=不能混合已索引和未索引的参数 90124=文件 找不到: {0} 90125=无效的类, 取代找到 {0} 但得到 {1} @@ -158,13 +173,28 @@ 90133=数据库有已启动的时候不允许更改设置{0} 90134=访问 {0}类时被拒绝 90135=数据库运行在独占模式(exclusive mode); 不能打开额外的连接 -90136=不支持的外连接条件: {0} +90136=#Window not found: {0} 90137=只能赋值到一个变量,而不是: {0} 90138=无效数据库名称: {0} 90139=找不到公用Java静态方法: {0} 90140=结果集是只读的. 你可以使用 conn.createStatement(.., ResultSet.CONCUR_UPDATABLE). 90141=#Serializer cannot be changed because there is a data table: {0} 90142=#Step size must not be zero +90143=#Row {1} not found in primary index {0} +90144=#Authenticator not enabled on database {0} +90145=#FOR UPDATE is not allowed in DISTINCT or grouped select +90146=#Database {0} not found, and IFEXISTS=true, so we can't auto-create it +90147=#Method {0} is not allowed when connection is in auto-commit mode +90148=#Current value of sequence {0} is not yet defined in this session +90149=#Database {0} not found, either pre-create it or allow remote database creation (not recommended in secure environments) +90150=#Precision ({0}) must be between {1} and {2} inclusive +90151=#Scale or fractional seconds precision ({0}) must be between {1} and {2} inclusive +90152=#Constraint {0} is used by constraint {1} +90153=#Column {0} references uncomparable column {1} +90154=#Generated column {0} cannot be assigned +90155=#Generated column {0} cannot be updatable by a referential constraint with {1} clause +90156=#Column alias is not specified for expression {0} +90157=#Column index {0} in GROUP BY clause is outside valid range 1 - {1} HY000=常规错误: {0} HY004=位置数据类型: {0} HYC00=不支持的特性: {0} diff --git a/h2/src/main/org/h2/res/help.csv b/h2/src/main/org/h2/res/help.csv index f7d29224a7..d783fa770a 100644 --- a/h2/src/main/org/h2/res/help.csv +++ b/h2/src/main/org/h2/res/help.csv @@ -1,1457 +1,7474 @@ -# Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, -# and the EPL 1.0 (http://h2database.com/html/license.html). -# Initial Developer: H2 Group) -"SECTION","TOPIC","SYNTAX","TEXT" +# Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +# and the EPL 1.0 (https://h2database.com/html/license.html). +# Initial Developer: H2 Group + +"SECTION","TOPIC","SYNTAX","TEXT","EXAMPLE" + "Commands (DML)","SELECT"," -SELECT [ TOP term ] [ DISTINCT | ALL ] selectExpression [,...] -FROM tableExpression [,...] [ WHERE expression ] -[ GROUP BY expression [,...] ] [ HAVING expression ] -[ { UNION [ ALL ] | MINUS | EXCEPT | INTERSECT } select ] [ ORDER BY order [,...] ] -[ LIMIT expression [ OFFSET expression ] [ SAMPLE_SIZE rowCountInt ] ] -[ FOR UPDATE ] -"," -Selects data from a table or multiple tables." +SELECT [ DISTINCT @h2@ [ ON ( expression [,...] ) ] | ALL ] +selectExpression [,...] +[ FROM tableExpression [,...] ] +[ WHERE expression ] +[ GROUP BY groupingElement [,...] ] [ HAVING expression ] +[ WINDOW { { windowName AS windowSpecification } [,...] } ] +@h2@ [ QUALIFY expression ] +[ { UNION [ ALL ] | EXCEPT | INTERSECT } query ] +[ ORDER BY selectOrder [,...] ] +[ OFFSET expression { ROW | ROWS } ] +[ FETCH { FIRST | NEXT } [ expression [ PERCENT ] ] { ROW | ROWS } + { ONLY | WITH TIES } ] +@h2@ [ FOR UPDATE ] +"," +Selects data from a table or multiple tables. + +Command is executed in the following logical order: + +1. Data is taken from table value expressions that are specified in the FROM clause, joins are executed. +If FROM clause is not specified a single row is constructed. + +2. WHERE filters rows. Aggregate or window functions are not allowed in this clause. + +3. GROUP BY groups the result by the given expression(s). +If GROUP BY clause is not specified, but non-window aggregate functions are used or HAVING is specified +all rows are grouped together. + +4. Aggregate functions are evaluated. + +5. HAVING filters rows after grouping and evaluation of aggregate functions. +Non-window aggregate functions are allowed in this clause. + +6. Window functions are evaluated. + +7. QUALIFY filters rows after evaluation of window functions. +Aggregate and window functions are allowed in this clause. + +8. DISTINCT removes duplicates. +If DISTINCT ON is used only the specified expressions are checked for duplicates; +ORDER BY clause, if any, is used to determine preserved rows. +First row is each DISTINCT ON group is preserved. +In absence of ORDER BY preserved rows are not determined, database may choose any row from each DISTINCT ON group. + +9. UNION, EXCEPT, and INTERSECT combine the result of this query with the results of another query. +INTERSECT has higher precedence than UNION and EXCEPT. +Operators with equal precedence are evaluated from left to right. + +10. ORDER BY sorts the result by the given column(s) or expression(s). + +11. Number of rows in output can be limited with OFFSET and FETCH clauses. +OFFSET specifies how many rows to skip. +Please note that queries with high offset values can be slow. +FETCH FIRST/NEXT limits the number of rows returned by the query. +If PERCENT is specified number of rows is specified as a percent of the total number of rows +and should be an integer value between 0 and 100 inclusive. +WITH TIES can be used only together with ORDER BY and means that all additional rows that have the same sorting position +as the last row will be also returned. + +WINDOW clause specifies window definitions for window functions and window aggregate functions. +This clause can be used to reuse the same definition in multiple functions. + +If FOR UPDATE is specified, the tables or rows are locked for writing. +This clause is not allowed in DISTINCT queries and in queries with non-window aggregates, GROUP BY, or HAVING clauses. +Only the selected rows are locked as in an UPDATE statement. +Rows from the right side of a left join and from the left side of a right join, including nested joins, aren't locked. +Locking behavior for rows that were excluded from result using OFFSET / FETCH / LIMIT / TOP or QUALIFY is undefined, +to avoid possible locking of excessive rows try to filter out unneeded rows with the WHERE criteria when possible. +Rows are processed one by one. Each row is read, tested with WHERE criteria, locked, read again and re-tested, +because its value may be changed by concurrent transaction before lock acquisition. +Note that new uncommitted rows from other transactions are not visible unless read uncommitted isolation level is used +and therefore cannot be selected and locked. +Modified uncommitted rows from other transactions that satisfy the WHERE criteria cause this SELECT to wait for +commit or rollback of those transactions. +"," +SELECT * FROM TEST; +SELECT * FROM TEST ORDER BY NAME; +SELECT ID, COUNT(*) FROM TEST GROUP BY ID; +SELECT NAME, COUNT(*) FROM TEST GROUP BY NAME HAVING COUNT(*) > 2; +SELECT 'ID' COL, MAX(ID) AS MAX FROM TEST UNION SELECT 'NAME', MAX(NAME) FROM TEST; +SELECT * FROM TEST OFFSET 1000 ROWS FETCH FIRST 1000 ROWS ONLY; +SELECT A, B FROM TEST ORDER BY A FETCH FIRST 10 ROWS WITH TIES; +SELECT * FROM (SELECT ID, COUNT(*) FROM TEST + GROUP BY ID UNION SELECT NULL, COUNT(*) FROM TEST) + ORDER BY 1 NULLS LAST; +SELECT DISTINCT C1, C2 FROM TEST; +SELECT DISTINCT ON(C1) C1, C2 FROM TEST ORDER BY C1; +" + "Commands (DML)","INSERT"," -INSERT INTO tableName -{ [ ( columnName [,...] ) ] - { VALUES { ( { DEFAULT | expression } [,...] ) } [,...] | [ DIRECT ] [ SORTED ] select } } | - { SET { columnName = { DEFAULT | expression } } [,...] } +INSERT INTO [schemaName.]tableName [ ( columnName [,...] ) ] +{ [ overrideClause ] { insertValues | @h2@ [ DIRECT ] query } } + | DEFAULT VALUES "," -Inserts a new row / new rows into a table." -"Commands (DML)","UPDATE"," -UPDATE tableName [ [ AS ] newTableAlias ] SET -{ { columnName = { DEFAULT | expression } } [,...] } | - { ( columnName [,...] ) = ( select ) } -[ WHERE expression ] [ ORDER BY order [,...] ] [ LIMIT expression ] +Inserts a new row / new rows into a table. + +When using DIRECT, then the results from the query are directly applied in the target table without any intermediate step. "," -Updates data in a table." +INSERT INTO TEST VALUES(1, 'Hello') +" + +"Commands (DML)","UPDATE"," +UPDATE [schemaName.]tableName [ [ AS ] newTableAlias ] SET setClauseList +[ WHERE expression ] @c@ [ ORDER BY sortSpecificationList ] +@h2@ FETCH { FIRST | NEXT } [ expression ] { ROW | ROWS } ONLY +"," +Updates data in a table. +ORDER BY is supported for MySQL compatibility, but it is ignored. +If FETCH is specified, at most the specified number of rows are updated (no limit if null or smaller than zero). +"," +UPDATE TEST SET NAME='Hi' WHERE ID=1; +UPDATE PERSON P SET NAME=(SELECT A.NAME FROM ADDRESS A WHERE A.ID=P.ID); +" + "Commands (DML)","DELETE"," -DELETE [ TOP term ] FROM tableName [ WHERE expression ] [ LIMIT term ] +DELETE FROM [schemaName.]tableName +[ WHERE expression ] +@h2@ FETCH { FIRST | NEXT } [ expression ] { ROW | ROWS } ONLY +"," +Deletes rows form a table. +If FETCH is specified, at most the specified number of rows are deleted (no limit if null or smaller than zero). "," -Deletes rows form a table." +DELETE FROM TEST WHERE ID=2 +" + "Commands (DML)","BACKUP"," -BACKUP TO fileNameString +@h2@ BACKUP TO fileNameString "," -Backs up the database files to a ." +Backs up the database files to a .zip file. Objects are not locked, but +the backup is transactionally consistent because the transaction log is also copied. +Admin rights are required to execute this command. +"," +BACKUP TO 'backup.zip' +" + "Commands (DML)","CALL"," CALL expression "," -Calculates a simple expression." +Calculates a simple expression. This statement returns a result set with one row, +except if the called function returns a result set itself. +If the called function returns an array, then each element in this array is returned as a column. +"," +CALL 15*25 +" + +"Commands (DML)","EXECUTE IMMEDIATE"," +EXECUTE IMMEDIATE sqlString +"," +Dynamically prepares and executes the SQL command specified as a string. Query commands may not be used. +"," +EXECUTE IMMEDIATE 'ALTER TABLE TEST DROP CONSTRAINT ' || + QUOTE_IDENT((SELECT CONSTRAINT_NAME + FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS + WHERE TABLE_SCHEMA = 'PUBLIC' AND TABLE_NAME = 'TEST' + AND CONSTRAINT_TYPE = 'UNIQUE')); +" + "Commands (DML)","EXPLAIN"," -EXPLAIN { [ PLAN FOR ] | ANALYZE } { select | insert | update | delete | merge } -"," -Shows the execution plan for a statement." -"Commands (DML)","MERGE"," -MERGE INTO tableName [ ( columnName [,...] ) ] -[ KEY ( columnName [,...] ) ] -{ VALUES { ( { DEFAULT | expression } [,...] ) } [,...] | select } -"," -Updates existing rows, and insert rows that don't exist." +@h2@ EXPLAIN { [ PLAN FOR ] | ANALYZE } +@h2@ { query | insert | update | delete | mergeInto | mergeUsing } +"," +Shows the execution plan for a statement. +When using EXPLAIN ANALYZE, the statement is actually executed, and the query plan +will include the actual row scan count for each table. +"," +EXPLAIN SELECT * FROM TEST WHERE ID=1 +" + +"Commands (DML)","MERGE INTO"," +@h2@ MERGE INTO [schemaName.]tableName [ ( columnName [,...] ) ] +@h2@ [ KEY ( columnName [,...] ) ] +@h2@ { insertValues | query } +"," +Updates existing rows, and insert rows that don't exist. If no key column is +specified, the primary key columns are used to find the row. If more than one +row per new row is affected, an exception is thrown. +"," +MERGE INTO TEST KEY(ID) VALUES(2, 'World') +" + +"Commands (DML)","MERGE USING"," +MERGE INTO [schemaName.]targetTableName [ [AS] targetAlias] +USING tableExpression +ON expression +mergeWhenClause [,...] +"," +Updates or deletes existing rows, and insert rows that don't exist. + +The ON clause specifies the matching column expression. + +Different rows from a source table may not match with the same target row +(this is not ensured by H2 if target table is an updatable view). +One source row may be matched with multiple target rows. + +If statement doesn't need a source table a DUAL table can be substituted. +"," +MERGE INTO TARGET_TABLE AS T USING SOURCE_TABLE AS S + ON T.ID = S.ID + WHEN MATCHED AND T.COL2 <> 'FINAL' THEN + UPDATE SET T.COL1 = S.COL1 + WHEN MATCHED AND T.COL2 = 'FINAL' THEN + DELETE + WHEN NOT MATCHED THEN + INSERT (ID, COL1, COL2) VALUES(S.ID, S.COL1, S.COL2); +MERGE INTO TARGET_TABLE AS T USING (SELECT * FROM SOURCE_TABLE) AS S + ON T.ID = S.ID + WHEN MATCHED AND T.COL2 <> 'FINAL' THEN + UPDATE SET T.COL1 = S.COL1 + WHEN MATCHED AND T.COL2 = 'FINAL' THEN + DELETE + WHEN NOT MATCHED THEN + INSERT VALUES (S.ID, S.COL1, S.COL2); +MERGE INTO TARGET T USING (VALUES (1, 4), (2, 15)) S(ID, V) + ON T.ID = S.ID + WHEN MATCHED THEN UPDATE SET V = S.V + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.V); +MERGE INTO TARGET_TABLE USING DUAL ON ID = 1 + WHEN NOT MATCHED THEN INSERT VALUES (1, 'Test') + WHEN MATCHED THEN UPDATE SET NAME = 'Test'; +" + "Commands (DML)","RUNSCRIPT"," -RUNSCRIPT FROM fileNameString scriptCompressionEncryption -[ CHARSET charsetString ] -"," -Runs a SQL script from a file." +@h2@ RUNSCRIPT FROM fileNameString scriptCompressionEncryption +@h2@ [ CHARSET charsetString ] +@h2@ { [ QUIRKS_MODE ] [ VARIABLE_BINARY ] | FROM_1X } +"," +Runs a SQL script from a file. The script is a text file containing SQL +statements; each statement must end with ';'. This command can be used to +restore a database from a backup. The password must be in single quotes; it is +case sensitive and can contain spaces. + +Instead of a file name, a URL may be used. +To read a stream from the classpath, use the prefix 'classpath:'. +See the [Pluggable File System](https://h2database.com/html/advanced.html#file_system) section. + +The compression algorithm must match the one used when creating the script. +Instead of a file, a URL may be used. + +If ""QUIRKS_MODE"" is specified, the various compatibility quirks for scripts from older versions of H2 are enabled. +Use this clause when you import script that was generated by H2 1.4.200 or an older version into more recent version. + +If ""VARIABLE_BINARY"" is specified, the ""BINARY"" data type will be parsed as ""VARBINARY"". +Use this clause when you import script that was generated by H2 1.4.200 or an older version into more recent version. + +If ""FROM_1X"" is specified, quirks for scripts exported from H2 1.*.* are enabled. +Use this flag to populate a new database with the data exported from 1.*.* versions of H2. +This flag also enables ""QUIRKS_MODE"" and ""VARIABLE_BINARY"" implicitly. + +Admin rights are required to execute this command. +"," +RUNSCRIPT FROM 'backup.sql' +RUNSCRIPT FROM 'classpath:/com/acme/test.sql' +RUNSCRIPT FROM 'dump_from_1_4_200.sql' FROM_1X +" + "Commands (DML)","SCRIPT"," -SCRIPT [ SIMPLE ] [ NODATA ] [ NOPASSWORDS ] [ NOSETTINGS ] -[ DROP ] [ BLOCKSIZE blockSizeInt ] -[ TO fileNameString scriptCompressionEncryption +@h2@ SCRIPT { [ NODATA ] | [ SIMPLE ] [ COLUMNS ] } +@h2@ [ NOPASSWORDS ] @h2@ [ NOSETTINGS ] +@h2@ [ DROP ] @h2@ [ BLOCKSIZE blockSizeInt ] +@h2@ [ TO fileNameString scriptCompressionEncryption [ CHARSET charsetString ] ] -[ TABLE tableName [, ...] ] -[ SCHEMA schemaName [, ...] ] -"," -Creates a SQL script from the database." +@h2@ [ TABLE [schemaName.]tableName [, ...] ] +@h2@ [ SCHEMA schemaName [, ...] ] +"," +Creates a SQL script from the database. + +NODATA will not emit INSERT statements. +SIMPLE does not use multi-row insert statements. +COLUMNS includes column name lists into insert statements. +If the DROP option is specified, drop statements are created for tables, views, +and sequences. If the block size is set, CLOB and BLOB values larger than this +size are split into separate blocks. +BLOCKSIZE is used when writing out LOB data, and specifies the point at the +values transition from being inserted as inline values, to be inserted using +out-of-line commands. +NOSETTINGS turns off dumping the database settings (the SET XXX commands) + +If no 'TO fileName' clause is specified, the +script is returned as a result set. This command can be used to create a backup +of the database. For long term storage, it is more portable than copying the +database files. + +If a 'TO fileName' clause is specified, then the whole +script (including insert statements) is written to this file, and a result set +without the insert statements is returned. + +The password must be in single quotes; it is case sensitive and can contain spaces. + +This command locks objects while it is running. +Admin rights are required to execute this command. + +When using the TABLE or SCHEMA option, only the selected table(s) / schema(s) are included. +"," +SCRIPT NODATA +" + "Commands (DML)","SHOW"," -SHOW { SCHEMAS | TABLES [ FROM schemaName ] | +@c@ SHOW { SCHEMAS | TABLES [ FROM schemaName ] | COLUMNS FROM tableName [ FROM schemaName ] } "," -Lists the schemas, tables, or the columns of a table." +Lists the schemas, tables, or the columns of a table. +"," +SHOW TABLES +" + +"Commands (DML)","Explicit table"," +TABLE [schemaName.]tableName +[ ORDER BY selectOrder [,...] ] +[ OFFSET expression { ROW | ROWS } ] +[ FETCH { FIRST | NEXT } [ expression [ PERCENT ] ] { ROW | ROWS } + { ONLY | WITH TIES } ] +"," +Selects data from a table. + +This command is an equivalent to SELECT * FROM tableName. +See [SELECT](https://h2database.com/html/commands.html#select) command for description of ORDER BY, OFFSET, and FETCH. +"," +TABLE TEST; +TABLE TEST ORDER BY ID FETCH FIRST ROW ONLY; +" + +"Commands (DML)","Table value"," +VALUES rowValueExpression [,...] +[ ORDER BY selectOrder [,...] ] +[ OFFSET expression { ROW | ROWS } ] +[ FETCH { FIRST | NEXT } [ expression [ PERCENT ] ] { ROW | ROWS } + { ONLY | WITH TIES } ] +"," +A list of rows that can be used like a table. +See See [SELECT](https://h2database.com/html/commands.html#select) command for description of ORDER BY, OFFSET, and FETCH. +The column list of the resulting table is C1, C2, and so on. +"," +VALUES (1, 'Hello'), (2, 'World'); +" + +"Commands (DML)","WITH"," +WITH [ RECURSIVE ] { name [( columnName [,...] )] AS ( query ) [,...] } +{ query | @h2@ { insert | update | delete | mergeInto | mergeUsing | createTable } } +"," +Can be used to create a recursive or non-recursive query (common table expression). +For recursive queries the first select has to be a UNION. +One or more common table entries can be referred to by name. +Column name declarations are now optional - the column names will be inferred from the named select queries. +The final action in a WITH statement can be a select, insert, update, merge, delete or create table. +"," +WITH RECURSIVE cte(n) AS ( + SELECT 1 + UNION ALL + SELECT n + 1 + FROM cte + WHERE n < 100 +) +SELECT sum(n) FROM cte; + +Example 2: +WITH cte1 AS ( + SELECT 1 AS FIRST_COLUMN +), cte2 AS ( + SELECT FIRST_COLUMN+1 AS FIRST_COLUMN FROM cte1 +) +SELECT sum(FIRST_COLUMN) FROM cte2; +" + +"Commands (DDL)","ALTER DOMAIN"," +ALTER DOMAIN @h2@ [ IF EXISTS ] [schemaName.]domainName +{ SET DEFAULT expression } + | { DROP DEFAULT } + | @h2@ { SET ON UPDATE expression } + | @h2@ { DROP ON UPDATE } +"," +Changes the default or on update expression of a domain. +Schema owner rights are required to execute this command. + +SET DEFAULT changes the default expression of a domain. + +DROP DEFAULT removes the default expression of a domain. +Old expression is copied into domains and columns that use this domain and don't have an own default expression. + +SET ON UPDATE changes the expression that is set on update if value for this domain is not specified in update +statement. + +DROP ON UPDATE removes the expression that is set on update of a column with this domain. +Old expression is copied into domains and columns that use this domain and don't have an own on update expression. + +This command commits an open transaction in this connection. +"," +ALTER DOMAIN D1 SET DEFAULT ''; +ALTER DOMAIN D1 DROP DEFAULT; +ALTER DOMAIN D1 SET ON UPDATE CURRENT_TIMESTAMP; +ALTER DOMAIN D1 DROP ON UPDATE; +" + +"Commands (DDL)","ALTER DOMAIN ADD CONSTRAINT"," +ALTER DOMAIN @h2@ [ IF EXISTS ] [schemaName.]domainName +ADD [ constraintNameDefinition ] +CHECK (condition) @h2@ [ CHECK | NOCHECK ] +"," +Adds a constraint to a domain. +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. +"," +ALTER DOMAIN D ADD CONSTRAINT D_POSITIVE CHECK (VALUE > 0) +" + +"Commands (DDL)","ALTER DOMAIN DROP CONSTRAINT"," +ALTER DOMAIN @h2@ [ IF EXISTS ] [schemaName.]domainName +DROP CONSTRAINT @h2@ [ IF EXISTS ] [schemaName.]constraintName +"," +Removes a constraint from a domain. +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. +"," +ALTER DOMAIN D DROP CONSTRAINT D_POSITIVE +" + +"Commands (DDL)","ALTER DOMAIN RENAME"," +@h2@ ALTER DOMAIN [ IF EXISTS ] [schemaName.]domainName RENAME TO newName +"," +Renames a domain. +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. +"," +ALTER DOMAIN TEST RENAME TO MY_TYPE +" + +"Commands (DDL)","ALTER DOMAIN RENAME CONSTRAINT"," +@h2@ ALTER DOMAIN [ IF EXISTS ] [schemaName.]domainName +@h2@ RENAME CONSTRAINT [schemaName.]oldConstraintName +@h2@ TO newConstraintName +"," +Renames a constraint. +This command commits an open transaction in this connection. +"," +ALTER DOMAIN D RENAME CONSTRAINT FOO TO BAR +" + "Commands (DDL)","ALTER INDEX RENAME"," -ALTER INDEX indexName RENAME TO newIndexName +@h2@ ALTER INDEX [ IF EXISTS ] [schemaName.]indexName RENAME TO newIndexName +"," +Renames an index. +This command commits an open transaction in this connection. "," -Renames an index." +ALTER INDEX IDXNAME RENAME TO IDX_TEST_NAME +" + "Commands (DDL)","ALTER SCHEMA RENAME"," -ALTER SCHEMA schema RENAME TO newSchemaName +@h2@ ALTER SCHEMA [ IF EXISTS ] schemaName RENAME TO newSchemaName "," -Renames a schema." +Renames a schema. +Schema admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +ALTER SCHEMA TEST RENAME TO PRODUCTION +" + "Commands (DDL)","ALTER SEQUENCE"," -ALTER SEQUENCE sequenceName [ RESTART WITH long ] [ INCREMENT BY long ] -[ MINVALUE long | NOMINVALUE | NO MINVALUE ] -[ MAXVALUE long | NOMAXVALUE | NO MAXVALUE ] -[ CYCLE long | NOCYCLE | NO CYCLE ] -[ CACHE long | NOCACHE | NO CACHE ] +ALTER SEQUENCE @h2@ [ IF EXISTS ] [schemaName.]sequenceName alterSequenceOption [...] "," -Changes the parameters of a sequence." -"Commands (DDL)","ALTER TABLE ADD"," -ALTER TABLE tableName ADD [ COLUMN ] -{ [ IF NOT EXISTS ] columnDefinition [ { BEFORE | AFTER } columnName ] - | ( { columnDefinition } [,...] ) } +Changes the parameters of a sequence. +Schema owner rights are required to execute this command. +This command does not commit the current transaction; however the new value is used by other +transactions immediately, and rolling back this command has no effect. "," -Adds a new column to a table." +ALTER SEQUENCE SEQ_ID RESTART WITH 1000 +" + +"Commands (DDL)","ALTER TABLE ADD"," +ALTER TABLE @h2@ [ IF EXISTS ] [schemaName.]tableName ADD [ COLUMN ] +{ @h2@ [ IF NOT EXISTS ] columnName columnDefinition @h2@ [ USING initialValueExpression ] + | @h2@ { ( { columnName columnDefinition | tableConstraintDefinition } [,...] ) } } +@h2@ [ { { BEFORE | AFTER } columnName } | FIRST ] +"," +Adds a new column to a table. +This command commits an open transaction in this connection. + +If USING is specified the provided expression is used to generate initial value of the new column for each row. +The expression may reference existing columns of the table. +Otherwise the DEFAULT expression is used, if any. +If neither USING nor DEFAULT are specified, the NULL is used. +"," +ALTER TABLE TEST ADD CREATEDATE TIMESTAMP +" + "Commands (DDL)","ALTER TABLE ADD CONSTRAINT"," -ALTER TABLE tableName ADD constraint [ CHECK | NOCHECK ] -"," -Adds a constraint to a table." +ALTER TABLE @h2@ [ IF EXISTS ] tableName ADD tableConstraintDefinition @h2@ [ CHECK | NOCHECK ] +"," +Adds a constraint to a table. If NOCHECK is specified, existing rows are not +checked for consistency (the default is to check consistency for existing rows). +The required indexes are automatically created if they don't exist yet. +It is not possible to disable checking for unique constraints. +This command commits an open transaction in this connection. +"," +ALTER TABLE TEST ADD CONSTRAINT NAME_UNIQUE UNIQUE(NAME) +" + +"Commands (DDL)","ALTER TABLE RENAME CONSTRAINT"," +@h2@ ALTER TABLE [ IF EXISTS ] [schemaName.]tableName +@h2@ RENAME CONSTRAINT [schemaName.]oldConstraintName +@h2@ TO newConstraintName +"," +Renames a constraint. +This command commits an open transaction in this connection. +"," +ALTER TABLE TEST RENAME CONSTRAINT FOO TO BAR +" + "Commands (DDL)","ALTER TABLE ALTER COLUMN"," -ALTER TABLE tableName ALTER COLUMN columnName -{ { dataType [ DEFAULT expression ] [ [ NOT ] NULL ] [ AUTO_INCREMENT | IDENTITY ] } - | { RENAME TO name } - | { RESTART WITH long } - | { SELECTIVITY int } +ALTER TABLE @h2@ [ IF EXISTS ] [schemaName.]tableName +ALTER COLUMN @h2@ [ IF EXISTS ] columnName +{ @h2@ { columnDefinition } + | @h2@ { RENAME TO name } + | SET GENERATED { ALWAYS | BY DEFAULT } [ alterIdentityColumnOption [...] ] + | alterIdentityColumnOption [...] + | DROP IDENTITY + | @h2@ { SELECTIVITY int } | { SET DEFAULT expression } - | { SET NULL } - | { SET NOT NULL } } + | { DROP DEFAULT } + | DROP EXPRESSION + | @h2@ { SET ON UPDATE expression } + | @h2@ { DROP ON UPDATE } + | @h2@ { SET DEFAULT ON NULL } + | @h2@ { DROP DEFAULT ON NULL } + | { SET NOT NULL } + | { DROP NOT NULL } | @c@ { SET NULL } + | { SET DATA TYPE dataTypeOrDomain @h2@ [ USING newValueExpression ] } + | @h2@ { SET { VISIBLE | INVISIBLE } } } "," Changes the data type of a column, rename a column, -change the identity value, or change the selectivity." +change the identity value, or change the selectivity. + +Changing the data type fails if the data can not be converted. + +SET GENERATED ALWAYS, SET GENERATED BY DEFAULT, or identity options convert the column into identity column +(if it wasn't an identity column) and set new values of specified options for its sequence. + +DROP IDENTITY removes identity status of a column. + +SELECTIVITY sets the selectivity (1-100) for a column. +Setting the selectivity to 0 means the default value. +Selectivity is used by the cost based optimizer to calculate the estimated cost of an index. +Selectivity 100 means values are unique, 10 means every distinct value appears 10 times on average. + +SET DEFAULT changes the default value of a column. +This command doesn't affect generated and identity columns. + +DROP DEFAULT removes the default value of a column. + +DROP EXPRESSION converts generated column into base column. + +SET ON UPDATE changes the value that is set on update if value for this column is not specified in update statement. +This command doesn't affect generated and identity columns. + +DROP ON UPDATE removes the value that is set on update of a column. + +SET DEFAULT ON NULL makes NULL value work as DEFAULT value is assignments to this column. + +DROP DEFAULT ON NULL makes NULL value work as NULL value in assignments to this column. + +SET NOT NULL sets a column to not allow NULL. Rows may not contain NULL in this column. + +DROP NOT NULL and SET NULL set a column to allow NULL. +The column may not be part of a primary key and may not be an identity column. + +SET DATA TYPE changes the data type of a column, for each row old value is converted to this data type +unless USING is specified with a custom expression. +USING expression may reference previous value of the modified column by its name and values of other columns. + +SET INVISIBLE makes the column hidden, i.e. it will not appear in SELECT * results. +SET VISIBLE has the reverse effect. + +This command commits an open transaction in this connection. +"," +ALTER TABLE TEST ALTER COLUMN NAME CLOB; +ALTER TABLE TEST ALTER COLUMN NAME RENAME TO TEXT; +ALTER TABLE TEST ALTER COLUMN ID RESTART WITH 10000; +ALTER TABLE TEST ALTER COLUMN NAME SELECTIVITY 100; +ALTER TABLE TEST ALTER COLUMN NAME SET DEFAULT ''; +ALTER TABLE TEST ALTER COLUMN NAME SET NOT NULL; +ALTER TABLE TEST ALTER COLUMN NAME SET NULL; +ALTER TABLE TEST ALTER COLUMN NAME SET VISIBLE; +ALTER TABLE TEST ALTER COLUMN NAME SET INVISIBLE; +" + "Commands (DDL)","ALTER TABLE DROP COLUMN"," -ALTER TABLE tableName DROP COLUMN [ IF EXISTS ] columnName +ALTER TABLE @h2@ [ IF EXISTS ] [schemaName.]tableName +DROP [ COLUMN ] @h2@ [ IF EXISTS ] +@h2@ { ( columnName [,...] ) } | columnName @c@ [,...] +"," +Removes column(s) from a table. +This command commits an open transaction in this connection. "," -Removes a column from a table." +ALTER TABLE TEST DROP COLUMN NAME +ALTER TABLE TEST DROP COLUMN (NAME1, NAME2) +" + "Commands (DDL)","ALTER TABLE DROP CONSTRAINT"," -ALTER TABLE tableName DROP { CONSTRAINT [ IF EXISTS ] constraintName | PRIMARY KEY } +ALTER TABLE @h2@ [ IF EXISTS ] [schemaName.]tableName DROP +CONSTRAINT @h2@ [ IF EXISTS ] [schemaName.]constraintName [ RESTRICT | CASCADE ] | @c@ { PRIMARY KEY } "," -Removes a constraint or a primary key from a table." -"Commands (DDL)","ALTER TABLE SET"," -ALTER TABLE tableName SET REFERENTIAL_INTEGRITY - { FALSE | TRUE [ CHECK | NOCHECK ] } +Removes a constraint or a primary key from a table. +If CASCADE is specified, unique or primary key constraint is dropped together with all +referential constraints that reference the specified constraint. +This command commits an open transaction in this connection. "," -Disables or enables referential integrity checking for a table." +ALTER TABLE TEST DROP CONSTRAINT UNIQUE_NAME RESTRICT +" + +"Commands (DDL)","ALTER TABLE SET"," +@h2@ ALTER TABLE [ IF EXISTS ] [schemaName.]tableName +SET REFERENTIAL_INTEGRITY +@h2@ { FALSE | TRUE } @h2@ [ CHECK | NOCHECK ] +"," +Disables or enables referential integrity checking for a table. This command can +be used inside a transaction. Enabling referential integrity does not check +existing data, except if CHECK is specified. Use SET REFERENTIAL_INTEGRITY to +disable it for all tables; the global flag and the flag for each table are +independent. + +This command commits an open transaction in this connection. +"," +ALTER TABLE TEST SET REFERENTIAL_INTEGRITY FALSE +" + "Commands (DDL)","ALTER TABLE RENAME"," -ALTER TABLE tableName RENAME TO newName +@h2@ ALTER TABLE [ IF EXISTS ] [schemaName.]tableName RENAME TO newName +"," +Renames a table. +This command commits an open transaction in this connection. "," -Renames a table." +ALTER TABLE TEST RENAME TO MY_DATA +" + "Commands (DDL)","ALTER USER ADMIN"," -ALTER USER userName ADMIN { TRUE | FALSE } +@h2@ ALTER USER userName ADMIN { TRUE | FALSE } "," -Switches the admin flag of a user on or off." -"Commands (DDL)","ALTER USER RENAME"," -ALTER USER userName RENAME TO newUserName +Switches the admin flag of a user on or off. + +Only unquoted or uppercase user names are allowed. +Admin rights are required to execute this command. +This command commits an open transaction in this connection. "," -Renames a user." +ALTER USER TOM ADMIN TRUE +" + +"Commands (DDL)","ALTER USER RENAME"," +@h2@ ALTER USER userName RENAME TO newUserName +"," +Renames a user. +After renaming a user, the password becomes invalid and needs to be changed as well. + +Only unquoted or uppercase user names are allowed. +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +ALTER USER TOM RENAME TO THOMAS +" + "Commands (DDL)","ALTER USER SET PASSWORD"," -ALTER USER userName SET { PASSWORD string | SALT bytes HASH bytes } -"," -Changes the password of a user." -"Commands (DDL)","ALTER VIEW"," -ALTER VIEW viewName RECOMPILE -"," -Recompiles a view after the underlying tables have been changed or created." +@h2@ ALTER USER userName SET { PASSWORD string | SALT bytes HASH bytes } +"," +Changes the password of a user. +Only unquoted or uppercase user names are allowed. +The password must be enclosed in single quotes. It is case sensitive +and can contain spaces. The salt and hash values are hex strings. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +ALTER USER SA SET PASSWORD 'rioyxlgt' +" + +"Commands (DDL)","ALTER VIEW RECOMPILE"," +@h2@ ALTER VIEW [ IF EXISTS ] [schemaName.]viewName RECOMPILE +"," +Recompiles a view after the underlying tables have been changed or created. +Schema owner rights are required to execute this command. +This command is used for views created using CREATE FORCE VIEW. +This command commits an open transaction in this connection. +"," +ALTER VIEW ADDRESS_VIEW RECOMPILE +" + +"Commands (DDL)","ALTER VIEW RENAME"," +@h2@ ALTER VIEW [ IF EXISTS ] [schemaName.]viewName RENAME TO newName +"," +Renames a view. +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. +"," +ALTER VIEW TEST RENAME TO MY_VIEW +" + "Commands (DDL)","ANALYZE"," -ANALYZE [ SAMPLE_SIZE rowCountInt ] -"," -Updates the selectivity statistics of all tables." -"Commands (DDL)","COMMENT"," -COMMENT ON -{ { COLUMN [ schemaName. ] tableName.columnName } +@h2@ ANALYZE [ TABLE [schemaName.]tableName ] [ SAMPLE_SIZE rowCountInt ] +"," +Updates the selectivity statistics of tables. +If no table name is given, all tables are analyzed. +The selectivity is used by the +cost based optimizer to select the best index for a given query. If no sample +size is set, up to 10000 rows per table are read. The value 0 means all rows are +read. The selectivity can be set manually using ALTER TABLE ALTER COLUMN +SELECTIVITY. Manual values are overwritten by this statement. The selectivity is +available in the INFORMATION_SCHEMA.COLUMNS table. + +This command commits an open transaction in this connection. +"," +ANALYZE SAMPLE_SIZE 1000 +" + +"Commands (DDL)","COMMENT ON"," +@h2@ COMMENT ON +@h2@ { { COLUMN [schemaName.]tableName.columnName } | { { TABLE | VIEW | CONSTANT | CONSTRAINT | ALIAS | INDEX | ROLE - | SCHEMA | SEQUENCE | TRIGGER | USER | DOMAIN } [ schemaName. ] objectName } } -IS expression -"," -Sets the comment of a database object." + | SCHEMA | SEQUENCE | TRIGGER | USER | DOMAIN } [schemaName.]objectName } } +@h2@ IS expression +"," +Sets the comment of a database object. Use NULL or empty string to remove the comment. + +Admin rights are required to execute this command if object is a USER or ROLE. +Schema owner rights are required to execute this command for all other types of objects. +This command commits an open transaction in this connection. +"," +COMMENT ON TABLE TEST IS 'Table used for testing' +" + "Commands (DDL)","CREATE AGGREGATE"," -CREATE AGGREGATE [ IF NOT EXISTS ] newAggregateName FOR className -"," -Creates a new user-defined aggregate function." +@h2@ CREATE AGGREGATE [ IF NOT EXISTS ] [schemaName.]aggregateName FOR classNameString +"," +Creates a new user-defined aggregate function. The method name must be the full +qualified class name. The class must implement the interface +""org.h2.api.Aggregate"" or ""org.h2.api.AggregateFunction"". + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +CREATE AGGREGATE SIMPLE_MEDIAN FOR 'com.acme.db.Median' +" + "Commands (DDL)","CREATE ALIAS"," -CREATE ALIAS [ IF NOT EXISTS ] newFunctionAliasName [ DETERMINISTIC ] -[ NOBUFFER ] { FOR classAndMethodName | AS sourceCodeString } -"," -Creates a new function alias." +@h2@ CREATE ALIAS [ IF NOT EXISTS ] [schemaName.]functionAliasName +@h2@ [ DETERMINISTIC ] +@h2@ { FOR classAndMethodString | AS sourceCodeString } +"," +Creates a new function alias. If this is a ResultSet returning function, +by default the return value is cached in a local temporary file. + +DETERMINISTIC - Deterministic functions must always return the same value for the same parameters. + +The method name must be the full qualified class and method name, +and may optionally include the parameter classes as in +""java.lang.Integer.parseInt(java.lang.String, int)"". The class and the method +must both be public, and the method must be static. The class must be available +in the classpath of the database engine (when using the server mode, +it must be in the classpath of the server). + +When defining a function alias with source code, the Sun ""javac"" is compiler +is used if the file ""tools.jar"" is in the classpath. If not, ""javac"" is run as a separate process. +Only the source code is stored in the database; the class is compiled each time +the database is re-opened. Source code is usually passed +as dollar quoted text to avoid escaping problems. If import statements are used, +then the tag @CODE must be added before the method. + +If the method throws an SQLException, it is directly re-thrown to the calling application; +all other exceptions are first converted to a SQLException. + +If the first parameter of the Java function is a ""java.sql.Connection"", then a +connection to the database is provided. This connection must not be closed. +If the class contains multiple methods with the given name but different +parameter count, all methods are mapped. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. + +If you have the Groovy jar in your classpath, it is also possible to write methods using Groovy. +"," +CREATE ALIAS MY_SQRT FOR 'java.lang.Math.sqrt'; +CREATE ALIAS MY_ROUND FOR 'java.lang.Math.round(double)'; +CREATE ALIAS GET_SYSTEM_PROPERTY FOR 'java.lang.System.getProperty'; +CALL GET_SYSTEM_PROPERTY('java.class.path'); +CALL GET_SYSTEM_PROPERTY('com.acme.test', 'true'); +CREATE ALIAS REVERSE AS 'String reverse(String s) { return new StringBuilder(s).reverse().toString(); }'; +CALL REVERSE('Test'); +CREATE ALIAS tr AS '@groovy.transform.CompileStatic + static String tr(String str, String sourceSet, String replacementSet){ + return str.tr(sourceSet, replacementSet); + } +' +" + "Commands (DDL)","CREATE CONSTANT"," -CREATE CONSTANT [ IF NOT EXISTS ] newConstantName VALUE expression +@h2@ CREATE CONSTANT [ IF NOT EXISTS ] [schemaName.]constantName +VALUE expression "," -Creates a new constant." -"Commands (DDL)","CREATE DOMAIN"," -CREATE DOMAIN [ IF NOT EXISTS ] newDomainName AS dataType -[ DEFAULT expression ] [ [ NOT ] NULL ] [ SELECTIVITY selectivity ] -[ CHECK condition ] +Creates a new constant. +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. "," -Creates a new data type (domain)." +CREATE CONSTANT ONE VALUE 1 +" + +"Commands (DDL)","CREATE DOMAIN"," +CREATE DOMAIN @h2@ [ IF NOT EXISTS ] [schemaName.]domainName +[ AS ] dataTypeOrDomain +[ DEFAULT expression ] +@h2@ [ ON UPDATE expression ] +@h2@ [ COMMENT expression ] +[ CHECK (condition) ] [...] +"," +Creates a new domain to define a set of permissible values. +Schema owner rights are required to execute this command. +Domains can be used as data types. +The domain constraints must evaluate to TRUE or to UNKNOWN. +In the conditions, the term VALUE refers to the value being tested. + +This command commits an open transaction in this connection. +"," +CREATE DOMAIN EMAIL AS VARCHAR(255) CHECK (POSITION('@', VALUE) > 1) +" + "Commands (DDL)","CREATE INDEX"," -CREATE -{ [ UNIQUE ] [ HASH | SPATIAL] INDEX [ [ IF NOT EXISTS ] newIndexName ] - | PRIMARY KEY [ HASH ] } -ON tableName ( indexColumn [,...] ) -"," -Creates a new index." +@h2@ CREATE [ UNIQUE | SPATIAL ] INDEX +@h2@ [ [ IF NOT EXISTS ] [schemaName.]indexName ] +@h2@ ON [schemaName.]tableName ( indexColumn [,...] ) +@h2@ [ INCLUDE ( indexColumn [,...] ) ] +"," +Creates a new index. +This command commits an open transaction in this connection. + +INCLUDE clause may only be specified for UNIQUE indexes. +With this clause additional columns are included into index, but aren't used in unique checks. + +Spatial indexes are supported only on GEOMETRY columns. +They may contain only one column and are used by the +[spatial overlapping operator](https://h2database.com/html/grammar.html#compare). +"," +CREATE INDEX IDXNAME ON TEST(NAME) +" + "Commands (DDL)","CREATE LINKED TABLE"," -CREATE [ FORCE ] [ [ GLOBAL | LOCAL ] TEMPORARY ] -LINKED TABLE [ IF NOT EXISTS ] -name ( driverString, urlString, userString, passwordString, -[ originalSchemaString, ] originalTableString ) [ EMIT UPDATES | READONLY ] -"," -Creates a table link to an external table." +@h2@ CREATE [ FORCE ] [ [ GLOBAL | LOCAL ] TEMPORARY ] +@h2@ LINKED TABLE [ IF NOT EXISTS ] +@h2@ [schemaName.]tableName ( driverString, urlString, userString, passwordString, +@h2@ [ originalSchemaString, ] @h2@ originalTableString ) +@h2@ [ EMIT UPDATES | READONLY ] [ FETCH_SIZE sizeInt] [AUTOCOMMIT ON|OFF] +"," +Creates a table link to an external table. The driver name may be empty if the +driver is already loaded. If the schema name is not set, only one table with +that name may exist in the target database. + +FORCE - Create the LINKED TABLE even if the remote database/table does not exist. + +EMIT UPDATES - Usually, for update statements, the old rows are deleted first and then the new +rows are inserted. It is possible to emit update statements (except on +rollback), however in this case multi-row unique key updates may not always +work. Linked tables to the same database share one connection. + +READONLY - is set, the remote table may not be updated. This is enforced by H2. + +FETCH_SIZE - the number of rows fetched, a hint with non-negative number of rows to fetch from the external table +at once, may be ignored by the driver of external database. 0 is default and means no hint. +The value is passed to ""java.sql.Statement.setFetchSize()"" method. + +AUTOCOMMIT - is set to ON, the auto-commit mode is enable. OFF is disable. +The value is passed to ""java.sql.Connection.setAutoCommit()"" method. + +If the connection to the source database is lost, the connection is re-opened +(this is a workaround for MySQL that disconnects after 8 hours of inactivity by default). + +If a query is used instead of the original table name, the table is read only. +Queries must be enclosed in parenthesis: ""(SELECT * FROM ORDERS)"". + +To use JNDI to get the connection, the driver class must be a +javax.naming.Context (for example ""javax.naming.InitialContext""), and the URL must +be the resource name (for example ""java:comp/env/jdbc/Test""). + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +CREATE LINKED TABLE LINK('org.h2.Driver', 'jdbc:h2:./test2', + 'sa', 'sa', 'TEST'); +CREATE LINKED TABLE LINK('', 'jdbc:h2:./test2', 'sa', 'sa', + '(SELECT * FROM TEST WHERE ID>0)'); +CREATE LINKED TABLE LINK('javax.naming.InitialContext', + 'java:comp/env/jdbc/Test', NULL, NULL, + '(SELECT * FROM TEST WHERE ID>0)'); +" + "Commands (DDL)","CREATE ROLE"," -CREATE ROLE [ IF NOT EXISTS ] newRoleName +CREATE ROLE @h2@ [ IF NOT EXISTS ] newRoleName "," -Creates a new role." -"Commands (DDL)","CREATE SCHEMA"," -CREATE SCHEMA [ IF NOT EXISTS ] name [ AUTHORIZATION ownerUserName ] +Creates a new role. +This command commits an open transaction in this connection. "," -Creates a new schema." +CREATE ROLE READONLY +" + +"Commands (DDL)","CREATE SCHEMA"," +CREATE SCHEMA @h2@ [ IF NOT EXISTS ] +{ name [ AUTHORIZATION ownerName ] | [ AUTHORIZATION ownerName ] } +@h2@ [ WITH tableEngineParamName [,...] ] +"," +Creates a new schema. +Schema admin rights are required to execute this command. + +If schema name is not specified, the owner name is used as a schema name. +If schema name is specified, but no owner is specified, the current user is used as an owner. + +Schema owners can create, rename, and drop objects in the schema. +Schema owners can drop the schema itself, but cannot rename it. +Some objects may still require admin rights for their creation, +see documentation of their CREATE statements for details. + +Optional table engine parameters are used when CREATE TABLE command +is run on this schema without having its engine params set. + +This command commits an open transaction in this connection. +"," +CREATE SCHEMA TEST_SCHEMA AUTHORIZATION SA +" + "Commands (DDL)","CREATE SEQUENCE"," -CREATE SEQUENCE [ IF NOT EXISTS ] newSequenceName [ START WITH long ] -[ INCREMENT BY long ] -[ MINVALUE long | NOMINVALUE | NO MINVALUE ] -[ MAXVALUE long | NOMAXVALUE | NO MAXVALUE ] -[ CYCLE long | NOCYCLE | NO CYCLE ] -[ CACHE long | NOCACHE | NO CACHE ] -"," -Creates a new sequence." +CREATE SEQUENCE @h2@ [ IF NOT EXISTS ] [schemaName.]sequenceName +[ { AS dataType | sequenceOption } [...] ] +"," +Creates a new sequence. +Schema owner rights are required to execute this command. + +The data type of a sequence must be a numeric type, the default is BIGINT. +Sequence can produce only integer values. +For TINYINT the allowed values are between -128 and 127. +For SMALLINT the allowed values are between -32768 and 32767. +For INTEGER the allowed values are between -2147483648 and 2147483647. +For BIGINT the allowed values are between -9223372036854775808 and 9223372036854775807. +For NUMERIC and DECFLOAT the allowed values depend on precision, +but cannot exceed the range of BIGINT data type (from -9223372036854775808 to 9223372036854775807); +the scale of NUMERIC must be 0. +For REAL the allowed values are between -16777216 and 16777216. +For DOUBLE PRECISION the allowed values are between -9007199254740992 and 9007199254740992. + +Used values are never re-used, even when the transaction is rolled back. + +This command commits an open transaction in this connection. +"," +CREATE SEQUENCE SEQ_ID; +CREATE SEQUENCE SEQ2 AS INTEGER START WITH 10; +" + "Commands (DDL)","CREATE TABLE"," -CREATE [ CACHED | MEMORY ] [ TEMP | [ GLOBAL | LOCAL ] TEMPORARY ] -TABLE [ IF NOT EXISTS ] name -[ ( { columnDefinition | constraint } [,...] ) ] -[ ENGINE tableEngineName [ WITH tableEngineParamName [,...] ] ] -[ NOT PERSISTENT ] [ TRANSACTIONAL ] -[ AS select ]"," -Creates a new table." +CREATE @h2@ [ CACHED | MEMORY ] [ @c@ { TEMP } | [ GLOBAL | LOCAL ] TEMPORARY ] +TABLE @h2@ [ IF NOT EXISTS ] [schemaName.]tableName +[ ( { columnName [columnDefinition] | tableConstraintDefinition } [,...] ) ] +@h2@ [ ENGINE tableEngineName ] +@h2@ [ WITH tableEngineParamName [,...] ] +@h2@ [ NOT PERSISTENT ] @h2@ [ TRANSACTIONAL ] +[ AS query [ WITH [ NO ] DATA ] ]"," +Creates a new table. + +Cached tables (the default for regular tables) are persistent, +and the number of rows is not limited by the main memory. +Memory tables (the default for temporary tables) are persistent, +but the index data is kept in main memory, +that means memory tables should not get too large. + +Temporary tables are deleted when closing or opening a database. +Temporary tables can be global (accessible by all connections) +or local (only accessible by the current connection). +The default for temporary tables is global. +Indexes of temporary tables are kept fully in main memory, +unless the temporary table is created using CREATE CACHED TABLE. + +The ENGINE option is only required when custom table implementations are used. +The table engine class must implement the interface ""org.h2.api.TableEngine"". +Any table engine parameters are passed down in the tableEngineParams field of the CreateTableData object. + +Either ENGINE, or WITH (table engine params), or both may be specified. If ENGINE is not specified +in CREATE TABLE, then the engine specified by DEFAULT_TABLE_ENGINE option of database params is used. + +Tables with the NOT PERSISTENT modifier are kept fully in memory, and all +rows are lost when the database is closed. + +The column definitions are optional if a query is specified. +In that case the column list of the query is used. +If the query is specified its results are inserted into created table unless WITH NO DATA is specified. + +This command commits an open transaction, except when using +TRANSACTIONAL (only supported for temporary tables). +"," +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)) +" + "Commands (DDL)","CREATE TRIGGER"," -CREATE TRIGGER [ IF NOT EXISTS ] newTriggerName { BEFORE | AFTER | INSTEAD OF } -{ INSERT | UPDATE | DELETE | SELECT | ROLLBACK } [,...] ON tableName [ FOR EACH ROW ] -[ QUEUE int ] [ NOWAIT ] { CALL triggeredClassName | AS sourceCodeString } -"," -Creates a new trigger." +CREATE TRIGGER @h2@ [ IF NOT EXISTS ] [schemaName.]triggerName +{ BEFORE | AFTER | INSTEAD OF } +{ INSERT | UPDATE | DELETE | @h2@ { SELECT | ROLLBACK } } +@h2@ [,...] ON [schemaName.]tableName [ FOR EACH { ROW | STATEMENT } ] +@c@ [ QUEUE int ] @h2@ [ NOWAIT ] +@h2@ { CALL triggeredClassNameString | AS sourceCodeString } +"," +Creates a new trigger. +Admin rights are required to execute this command. + +The trigger class must be public and implement ""org.h2.api.Trigger"". +Inner classes are not supported. +The class must be available in the classpath of the database engine +(when using the server mode, it must be in the classpath of the server). + +The sourceCodeString must define a single method with no parameters that returns ""org.h2.api.Trigger"". +See CREATE ALIAS for requirements regarding the compilation. +Alternatively, javax.script.ScriptEngineManager can be used to create an instance of ""org.h2.api.Trigger"". +Currently javascript (included in every JRE) and ruby (with JRuby) are supported. +In that case the source must begin respectively with ""//javascript"" or ""#ruby"". + +BEFORE triggers are called after data conversion is made, default values are set, +null and length constraint checks have been made; +but before other constraints have been checked. +If there are multiple triggers, the order in which they are called is undefined. + +ROLLBACK can be specified in combination with INSERT, UPDATE, and DELETE. +Only row based AFTER trigger can be called on ROLLBACK. +Exceptions that occur within such triggers are ignored. +As the operations that occur within a trigger are part of the transaction, +ROLLBACK triggers are only required if an operation communicates outside of the database. + +INSTEAD OF triggers are implicitly row based and behave like BEFORE triggers. +Only the first such trigger is called. Such triggers on views are supported. +They can be used to make views updatable. +These triggers on INSERT and UPDATE must update the passed new row to values that were actually inserted +by the trigger; they are used for [FINAL TABLE](https://h2database.com/html/grammar.html#data_change_delta_table) +and for retrieval of generated keys. + +A BEFORE SELECT trigger is fired just before the database engine tries to read from the table. +The trigger can be used to update a table on demand. +The trigger is called with both 'old' and 'new' set to null. + +The MERGE statement will call both INSERT and UPDATE triggers. +Not supported are SELECT triggers with the option FOR EACH ROW, +and AFTER SELECT triggers. + +Committing or rolling back a transaction within a trigger is not allowed, except for SELECT triggers. + +By default a trigger is called once for each statement, without the old and new rows. +FOR EACH ROW triggers are called once for each inserted, updated, or deleted row. + +QUEUE is implemented for syntax compatibility with HSQL and has no effect. + +The trigger need to be created in the same schema as the table. +The schema name does not need to be specified when creating the trigger. + +This command commits an open transaction in this connection. +"," +CREATE TRIGGER TRIG_INS BEFORE INSERT ON TEST FOR EACH ROW CALL 'MyTrigger'; +CREATE TRIGGER TRIG_SRC BEFORE INSERT ON TEST AS + 'org.h2.api.Trigger create() { return new MyTrigger(""constructorParam""); }'; +CREATE TRIGGER TRIG_JS BEFORE INSERT ON TEST AS '//javascript +return new Packages.MyTrigger(""constructorParam"");'; +CREATE TRIGGER TRIG_RUBY BEFORE INSERT ON TEST AS '#ruby +Java::MyPackage::MyTrigger.new(""constructorParam"")'; +" "Commands (DDL)","CREATE USER"," -CREATE USER [ IF NOT EXISTS ] newUserName -{ PASSWORD string | SALT bytes HASH bytes } [ ADMIN ] -"," -Creates a new user." +@h2@ CREATE USER [ IF NOT EXISTS ] newUserName +@h2@ { PASSWORD string | SALT bytes HASH bytes } @h2@ [ ADMIN ] +"," +Creates a new user. For compatibility, only unquoted or uppercase user names are allowed. +The password must be in single quotes. It is case sensitive and can contain spaces. +The salt and hash values are hex strings. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +CREATE USER GUEST PASSWORD 'abc' +" + "Commands (DDL)","CREATE VIEW"," -CREATE [ OR REPLACE ] [ FORCE ] VIEW [ IF NOT EXISTS ] newViewName -[ ( columnName [,...] ) ] AS select -"," -Creates a new view." +CREATE @h2@ [ OR REPLACE ] @h2@ [ FORCE ] +VIEW @h2@ [ IF NOT EXISTS ] [schemaName.]viewName +[ ( columnName [,...] ) ] AS query +"," +Creates a new view. If the force option is used, then the view is created even +if the underlying table(s) don't exist. +Schema owner rights are required to execute this command. + +If the OR REPLACE clause is used an existing view will be replaced, and any +dependent views will not need to be recreated. If dependent views will become +invalid as a result of the change an error will be generated, but this error +can be ignored if the FORCE clause is also used. + +Views are not updatable except when using 'instead of' triggers. + +This command commits an open transaction in this connection. +"," +CREATE VIEW TEST_VIEW AS SELECT * FROM TEST WHERE ID < 100 +" + "Commands (DDL)","DROP AGGREGATE"," -DROP AGGREGATE [ IF EXISTS ] aggregateName +@h2@ DROP AGGREGATE [ IF EXISTS ] aggregateName +"," +Drops an existing user-defined aggregate function. +Schema owner rights are required to execute this command. + +This command commits an open transaction in this connection. "," -Drops an existing user-defined aggregate function." +DROP AGGREGATE SIMPLE_MEDIAN +" + "Commands (DDL)","DROP ALIAS"," -DROP ALIAS [ IF EXISTS ] existingFunctionAliasName +@h2@ DROP ALIAS [ IF EXISTS ] [schemaName.]aliasName +"," +Drops an existing function alias. +Schema owner rights are required to execute this command. + +This command commits an open transaction in this connection. "," -Drops an existing function alias." +DROP ALIAS MY_SQRT +" + "Commands (DDL)","DROP ALL OBJECTS"," -DROP ALL OBJECTS [ DELETE FILES ] +@h2@ DROP ALL OBJECTS [ DELETE FILES ] "," Drops all existing views, tables, sequences, schemas, function aliases, roles, -user-defined aggregate functions, domains, and users (except the current user)." +user-defined aggregate functions, domains, and users (except the current user). +If DELETE FILES is specified, the database files will be removed when the last +user disconnects from the database. Warning: this command can not be rolled +back. + +Admin rights are required to execute this command. +"," +DROP ALL OBJECTS +" + "Commands (DDL)","DROP CONSTANT"," -DROP CONSTANT [ IF EXISTS ] constantName +@h2@ DROP CONSTANT [ IF EXISTS ] [schemaName.]constantName "," -Drops a constant." -"Commands (DDL)","DROP DOMAIN"," -DROP DOMAIN [ IF EXISTS ] domainName +Drops a constant. +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. "," -Drops a data type (domain)." +DROP CONSTANT ONE +" + +"Commands (DDL)","DROP DOMAIN"," +DROP DOMAIN @h2@ [ IF EXISTS ] [schemaName.]domainName [ RESTRICT | CASCADE ] +"," +Drops a data type (domain). +Schema owner rights are required to execute this command. + +The command will fail if it is referenced by a column or another domain (the default). +Column descriptors are replaced with original definition of specified domain if the CASCADE clause is used. +Default and on update expressions are copied into domains and columns that use this domain and don't have own +expressions. Domain constraints are copied into domains that use this domain and to columns (as check constraints) that +use this domain. +This command commits an open transaction in this connection. +"," +DROP DOMAIN EMAIL +" + "Commands (DDL)","DROP INDEX"," -DROP INDEX [ IF EXISTS ] indexName +@h2@ DROP INDEX [ IF EXISTS ] [schemaName.]indexName +"," +Drops an index. +This command commits an open transaction in this connection. "," -Drops an index." +DROP INDEX IF EXISTS IDXNAME +" + "Commands (DDL)","DROP ROLE"," -DROP ROLE [ IF EXISTS ] roleName +DROP ROLE @h2@ [ IF EXISTS ] roleName "," -Drops a role." +Drops a role. +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +DROP ROLE READONLY +" + "Commands (DDL)","DROP SCHEMA"," -DROP SCHEMA [ IF EXISTS ] schemaName +DROP SCHEMA @h2@ [ IF EXISTS ] schemaName [ RESTRICT | CASCADE ] +"," +Drops a schema. +Schema owner rights are required to execute this command. +The command will fail if objects in this schema exist and the RESTRICT clause is used (the default). +All objects in this schema are dropped as well if the CASCADE clause is used. +This command commits an open transaction in this connection. "," -Drops a schema." +DROP SCHEMA TEST_SCHEMA +" + "Commands (DDL)","DROP SEQUENCE"," -DROP SEQUENCE [ IF EXISTS ] sequenceName +DROP SEQUENCE @h2@ [ IF EXISTS ] [schemaName.]sequenceName +"," +Drops a sequence. +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. "," -Drops a sequence." +DROP SEQUENCE SEQ_ID +" + "Commands (DDL)","DROP TABLE"," -DROP TABLE [ IF EXISTS ] tableName [,...] [ RESTRICT | CASCADE ] +DROP TABLE @h2@ [ IF EXISTS ] [schemaName.]tableName @h2@ [,...] +[ RESTRICT | CASCADE ] "," -Drops an existing table, or a list of tables." +Drops an existing table, or a list of tables. +The command will fail if dependent objects exist and the RESTRICT clause is used (the default). +All dependent views and constraints are dropped as well if the CASCADE clause is used. +This command commits an open transaction in this connection. +"," +DROP TABLE TEST +" + "Commands (DDL)","DROP TRIGGER"," -DROP TRIGGER [ IF EXISTS ] triggerName +DROP TRIGGER @h2@ [ IF EXISTS ] [schemaName.]triggerName +"," +Drops an existing trigger. +This command commits an open transaction in this connection. "," -Drops an existing trigger." +DROP TRIGGER TRIG_INS +" + "Commands (DDL)","DROP USER"," -DROP USER [ IF EXISTS ] userName +@h2@ DROP USER [ IF EXISTS ] userName +"," +Drops a user. The current user cannot be dropped. +For compatibility, only unquoted or uppercase user names are allowed. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. "," -Drops a user." +DROP USER TOM +" + "Commands (DDL)","DROP VIEW"," -DROP VIEW [ IF EXISTS ] viewName [ RESTRICT | CASCADE ] +DROP VIEW @h2@ [ IF EXISTS ] [schemaName.]viewName [ RESTRICT | CASCADE ] "," -Drops an existing view." -"Commands (DDL)","TRUNCATE TABLE"," -TRUNCATE TABLE tableName +Drops an existing view. +Schema owner rights are required to execute this command. +All dependent views are dropped as well if the CASCADE clause is used (the default). +The command will fail if dependent views exist and the RESTRICT clause is used. +This command commits an open transaction in this connection. "," -Removes all rows from a table." +DROP VIEW TEST_VIEW +" + +"Commands (DDL)","TRUNCATE TABLE"," +TRUNCATE TABLE [schemaName.]tableName [ [ CONTINUE | RESTART ] IDENTITY ] +"," +Removes all rows from a table. +Unlike DELETE FROM without where clause, this command can not be rolled back. +This command is faster than DELETE without where clause. +Only regular data tables without foreign key constraints can be truncated +(except if referential integrity is disabled for this database or for this table). +Linked tables can't be truncated. +If RESTART IDENTITY is specified next values for identity columns are restarted. + +This command commits an open transaction in this connection. +"," +TRUNCATE TABLE TEST +" + "Commands (Other)","CHECKPOINT"," -CHECKPOINT +@h2@ CHECKPOINT +"," +Flushes the data to disk. + +Admin rights are required to execute this command. "," -Flushes the data to disk." +CHECKPOINT +" + "Commands (Other)","CHECKPOINT SYNC"," -CHECKPOINT SYNC +@h2@ CHECKPOINT SYNC +"," +Flushes the data to disk and forces all system buffers be written +to the underlying device. + +Admin rights are required to execute this command. "," -Flushes the data to disk and and forces all system buffers be written -to the underlying device." +CHECKPOINT SYNC +" + "Commands (Other)","COMMIT"," COMMIT [ WORK ] "," -Commits a transaction." +Commits a transaction. +"," +COMMIT +" + "Commands (Other)","COMMIT TRANSACTION"," -COMMIT TRANSACTION transactionName +@h2@ COMMIT TRANSACTION transactionName "," -Sets the resolution of an in-doubt transaction to 'commit'." -"Commands (Other)","GRANT RIGHT"," -GRANT { SELECT | INSERT | UPDATE | DELETE | ALL } [,...] ON -tableName [,...] TO { PUBLIC | userName | roleName } +Sets the resolution of an in-doubt transaction to 'commit'. + +Admin rights are required to execute this command. +This command is part of the 2-phase-commit protocol. "," -Grants rights for a table to a user or role." +COMMIT TRANSACTION XID_TEST +" + +"Commands (Other)","GRANT RIGHT"," +GRANT { { SELECT | INSERT | UPDATE | DELETE } [,..] | ALL [ PRIVILEGES ] } ON +{ @h2@ { SCHEMA schemaName } | { [ TABLE ] [schemaName.]tableName @h2@ [,...] } } +TO { PUBLIC | userName | roleName } +"," +Grants rights for a table to a user or role. + +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. +"," +GRANT SELECT ON TEST TO READONLY +" + "Commands (Other)","GRANT ALTER ANY SCHEMA"," -GRANT ALTER ANY SCHEMA TO userName -"," -Grant schema altering rights to a user." +@h2@ GRANT ALTER ANY SCHEMA TO userName +"," +Grant schema admin rights to a user. + +Schema admin can create, rename, or drop schemas and also has schema owner rights in every schema. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +GRANT ALTER ANY SCHEMA TO Bob +" + "Commands (Other)","GRANT ROLE"," -GRANT roleName TO { PUBLIC | userName | roleName } +GRANT { roleName [,...] } TO { PUBLIC | userName | roleName } +"," +Grants a role to a user or role. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. "," -Grants a role to a user or role." +GRANT READONLY TO PUBLIC +" + "Commands (Other)","HELP"," -HELP [ anything [...] ] +@h2@ HELP [ anything [...] ] +"," +Displays the help pages of SQL commands or keywords. "," -Displays the help pages of SQL commands or keywords." +HELP SELECT +" + "Commands (Other)","PREPARE COMMIT"," -PREPARE COMMIT newTransactionName +@h2@ PREPARE COMMIT newTransactionName "," -Prepares committing a transaction." -"Commands (Other)","REVOKE RIGHT"," -REVOKE { SELECT | INSERT | UPDATE | DELETE | ALL } [,...] ON -tableName [,...] FROM { PUBLIC | userName | roleName } +Prepares committing a transaction. +This command is part of the 2-phase-commit protocol. "," -Removes rights for a table from a user or role." +PREPARE COMMIT XID_TEST +" + +"Commands (Other)","REVOKE RIGHT"," +REVOKE { { SELECT | INSERT | UPDATE | DELETE } [,..] | ALL [ PRIVILEGES ] } ON +{ @h2@ { SCHEMA schemaName } | { [ TABLE ] [schemaName.]tableName @h2@ [,...] } } +FROM { PUBLIC | userName | roleName } +"," +Removes rights for a table from a user or role. + +Schema owner rights are required to execute this command. +This command commits an open transaction in this connection. +"," +REVOKE SELECT ON TEST FROM READONLY +" + +"Commands (Other)","REVOKE ALTER ANY SCHEMA"," +@h2@ REVOKE ALTER ANY SCHEMA FROM userName +"," +Removes schema admin rights from a user. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +GRANT ALTER ANY SCHEMA TO Bob +" + "Commands (Other)","REVOKE ROLE"," -REVOKE roleName FROM { PUBLIC | userName | roleName } +REVOKE { roleName [,...] } FROM { PUBLIC | userName | roleName } +"," +Removes a role from a user or role. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. "," -Removes a role from a user or role." +REVOKE READONLY FROM TOM +" + "Commands (Other)","ROLLBACK"," -ROLLBACK [ TO SAVEPOINT savepointName ] +ROLLBACK [ WORK ] [ TO SAVEPOINT savepointName ] "," -Rolls back a transaction." +Rolls back a transaction. If a savepoint name is used, the transaction is only +rolled back to the specified savepoint. +"," +ROLLBACK +" + "Commands (Other)","ROLLBACK TRANSACTION"," -ROLLBACK TRANSACTION transactionName +@h2@ ROLLBACK TRANSACTION transactionName +"," +Sets the resolution of an in-doubt transaction to 'rollback'. + +Admin rights are required to execute this command. +This command is part of the 2-phase-commit protocol. "," -Sets the resolution of an in-doubt transaction to 'rollback'." +ROLLBACK TRANSACTION XID_TEST +" + "Commands (Other)","SAVEPOINT"," SAVEPOINT savepointName "," -Create a new savepoint." +Create a new savepoint. See also ROLLBACK. +Savepoints are only valid until the transaction is committed or rolled back. +"," +SAVEPOINT HALF_DONE +" + "Commands (Other)","SET @"," -SET @variableName [ = ] expression +@h2@ SET @variableName [ = ] expression "," -Updates a user-defined variable." -"Commands (Other)","SET ALLOW_LITERALS"," -SET ALLOW_LITERALS { NONE | ALL | NUMBERS } +Updates a user-defined variable. +Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. +This command does not commit a transaction, and rollback does not affect it. "," -This setting can help solve the SQL injection problem." +SET @TOTAL=0 +" + +"Commands (Other)","SET ALLOW_LITERALS"," +@h2@ SET ALLOW_LITERALS { NONE | ALL | NUMBERS } +"," +This setting can help solve the SQL injection problem. By default, text and +number literals are allowed in SQL statements. However, this enables SQL +injection if the application dynamically builds SQL statements. SQL injection is +not possible if user data is set using parameters ('?'). + +NONE means literals of any kind are not allowed, only parameters and constants +are allowed. NUMBERS mean only numerical and boolean literals are allowed. ALL +means all literals are allowed (default). + +See also CREATE CONSTANT. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +This setting can be appended to the database URL: ""jdbc:h2:./test;ALLOW_LITERALS=NONE"" +"," +SET ALLOW_LITERALS NONE +" + "Commands (Other)","SET AUTOCOMMIT"," -SET AUTOCOMMIT { TRUE | ON | FALSE | OFF } -"," -Switches auto commit on or off." +@h2@ SET AUTOCOMMIT { TRUE | ON | FALSE | OFF } +"," +Switches auto commit on or off. +This setting can be appended to the database URL: ""jdbc:h2:./test;AUTOCOMMIT=OFF"" - +however this will not work as expected when using a connection pool +(the connection pool manager will re-enable autocommit when returning +the connection to the pool, so autocommit will only be disabled the first +time the connection is used. +"," +SET AUTOCOMMIT OFF +" + "Commands (Other)","SET CACHE_SIZE"," -SET CACHE_SIZE int -"," -Sets the size of the cache in KB (each KB being 1024 bytes) for the current database." +@h2@ SET CACHE_SIZE int +"," +Sets the size of the cache in KB (each KB being 1024 bytes) for the current database. +The default is 65536 per available GB of RAM, i.e. 64 MB per GB. +The value is rounded to the next higher power of two. +Depending on the virtual machine, the actual memory required may be higher. + +This setting is persistent and affects all connections as there is only one cache per database. +Using a very small value (specially 0) will reduce performance a lot. +This setting only affects the database engine (the server in a client/server environment; +in embedded mode, the database engine is in the same process as the application). +It has no effect for in-memory databases. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +This setting can be appended to the database URL: ""jdbc:h2:./test;CACHE_SIZE=8192"" +"," +SET CACHE_SIZE 8192 +" + "Commands (Other)","SET CLUSTER"," -SET CLUSTER serverListString +@h2@ SET CLUSTER serverListString "," This command should not be used directly by an application, the statement is -executed automatically by the system." -"Commands (Other)","SET BINARY_COLLATION"," -SET BINARY_COLLATION -{ UNSIGNED | SIGNED } ] } -"," -Sets the collation used for comparing BINARY columns, the default is SIGNED -for version 1." +executed automatically by the system. The behavior may change in future +releases. Sets the cluster server list. An empty string switches off the cluster +mode. Switching on the cluster mode requires admin rights, but any user can +switch it off (this is automatically done when the client detects the other +server is not responding). + +This command is effective immediately, but does not commit an open transaction. +"," +SET CLUSTER '' +" + +"Commands (Other)","SET BUILTIN_ALIAS_OVERRIDE"," +@h2@ SET BUILTIN_ALIAS_OVERRIDE { TRUE | FALSE } +"," +Allows the overriding of the builtin system date/time functions +for unit testing purposes. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +SET BUILTIN_ALIAS_OVERRIDE TRUE +" + +"Commands (Other)","SET CATALOG"," +SET CATALOG { catalogString | @h2@ { catalogName } } +"," +This command has no effect if the specified name matches the name of the database, otherwise it throws an exception. + +This command does not commit a transaction. +"," +SET CATALOG 'DB' +SET CATALOG DB_NAME +" + "Commands (Other)","SET COLLATION"," -SET [ DATABASE ] COLLATION -{ OFF | collationName [ STRENGTH { PRIMARY | SECONDARY | TERTIARY | IDENTICAL } ] } -"," -Sets the collation used for comparing strings." -"Commands (Other)","SET COMPRESS_LOB"," -SET COMPRESS_LOB { NO | LZF | DEFLATE } -"," -This feature is only available for the PageStore storage engine." +@h2@ SET [ DATABASE ] COLLATION +@h2@ { OFF | collationName + [ STRENGTH { PRIMARY | SECONDARY | TERTIARY | IDENTICAL } ] } +"," +Sets the collation used for comparing strings. +This command can only be executed if there are no tables defined. +See ""java.text.Collator"" for details about the supported collations and the STRENGTH +(PRIMARY is usually case- and umlaut-insensitive; SECONDARY is case-insensitive but umlaut-sensitive; +TERTIARY is both case- and umlaut-sensitive; IDENTICAL is sensitive to all differences and only affects ordering). + +The ICU4J collator is used if it is in the classpath. +It is also used if the collation name starts with ICU4J_ +(in that case, the ICU4J must be in the classpath, otherwise an exception is thrown). +The default collator is used if the collation name starts with DEFAULT_ +(even if ICU4J is in the classpath). +The charset collator is used if the collation name starts with CHARSET_ (e.g. CHARSET_CP500). This collator sorts +strings according to the binary representation in the given charset. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +This setting is persistent. +This setting can be appended to the database URL: ""jdbc:h2:./test;COLLATION='ENGLISH'"" +"," +SET COLLATION ENGLISH +SET COLLATION CHARSET_CP500 +" + "Commands (Other)","SET DATABASE_EVENT_LISTENER"," -SET DATABASE_EVENT_LISTENER classNameString -"," -Sets the event listener class." +@h2@ SET DATABASE_EVENT_LISTENER classNameString +"," +Sets the event listener class. An empty string ('') means no listener should be +used. This setting is not persistent. + +Admin rights are required to execute this command, except if it is set when +opening the database (in this case it is reset just after opening the database). +This setting can be appended to the database URL: ""jdbc:h2:./test;DATABASE_EVENT_LISTENER='sample.MyListener'"" +"," +SET DATABASE_EVENT_LISTENER 'sample.MyListener' +" + "Commands (Other)","SET DB_CLOSE_DELAY"," -SET DB_CLOSE_DELAY int -"," -Sets the delay for closing a database if all connections are closed." +@h2@ SET DB_CLOSE_DELAY int +"," +Sets the delay for closing a database if all connections are closed. +The value -1 means the database is never closed until the close delay is set to some other value or SHUTDOWN is called. +The value 0 means no delay (default; the database is closed if the last connection to it is closed). +Values 1 and larger mean the number of seconds the database is left open after closing the last connection. + +If the application exits normally or System.exit is called, the database is closed immediately, even if a delay is set. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +This setting can be appended to the database URL: ""jdbc:h2:./test;DB_CLOSE_DELAY=-1"" +"," +SET DB_CLOSE_DELAY -1 +" + "Commands (Other)","SET DEFAULT_LOCK_TIMEOUT"," -SET DEFAULT LOCK_TIMEOUT int +@h2@ SET DEFAULT LOCK_TIMEOUT int "," Sets the default lock timeout (in milliseconds) in this database that is used -for the new sessions." +for the new sessions. The default value for this setting is 1000 (one second). + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +"," +SET DEFAULT_LOCK_TIMEOUT 5000 +" + +"Commands (Other)","SET DEFAULT_NULL_ORDERING"," +@h2@ SET DEFAULT_NULL_ORDERING { LOW | HIGH | FIRST | LAST } +"," +Changes the default ordering of NULL values. +This setting affects new indexes without explicit NULLS FIRST or NULLS LAST columns, +and ordering clauses of other commands without explicit null ordering. +This setting doesn't affect ordering of NULL values inside ARRAY or ROW values +(""ARRAY[NULL]"" is always considered as smaller than ""ARRAY[1]"" during sorting). + +LOW is the default one, NULL values are considered as smaller than other values during sorting. + +With HIGH default ordering NULL values are considered as larger than other values during sorting. + +With FIRST default ordering NULL values are sorted before other values, +no matter if ascending or descending order is used. + +WITH LAST default ordering NULL values are sorted after other values, +no matter if ascending or descending order is used. + +This setting is not persistent, but indexes are persisted with explicit NULLS FIRST or NULLS LAST ordering +and aren't affected by changes in this setting. +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting can be appended to the database URL: ""jdbc:h2:./test;DEFAULT_NULL_ORDERING=HIGH"" +"," +SET DEFAULT_NULL_ORDERING HIGH +" + "Commands (Other)","SET DEFAULT_TABLE_TYPE"," -SET DEFAULT_TABLE_TYPE { MEMORY | CACHED } -"," -Sets the default table storage type that is used when creating new tables." +@h2@ SET DEFAULT_TABLE_TYPE { MEMORY | CACHED } +"," +Sets the default table storage type that is used when creating new tables. +Memory tables are kept fully in the main memory (including indexes), however +the data is still stored in the database file. The size of memory tables is +limited by the memory. The default is CACHED. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +It has no effect for in-memory databases. +"," +SET DEFAULT_TABLE_TYPE MEMORY +" + "Commands (Other)","SET EXCLUSIVE"," -SET EXCLUSIVE { 0 | 1 | 2 } -"," -Switched the database to exclusive mode (1, 2) and back to normal mode (0)." +@h2@ SET EXCLUSIVE { 0 | 1 | 2 } +"," +Switched the database to exclusive mode (1, 2) and back to normal mode (0). + +In exclusive mode, new connections are rejected, and operations by +other connections are paused until the exclusive mode is disabled. +When using the value 1, existing connections stay open. +When using the value 2, all existing connections are closed +(and current transactions are rolled back) except the connection +that executes SET EXCLUSIVE. +Only the connection that set the exclusive mode can disable it. +When the connection is closed, it is automatically disabled. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +"," +SET EXCLUSIVE 1 +" + "Commands (Other)","SET IGNORECASE"," -SET IGNORECASE { TRUE | FALSE } +@h2@ SET IGNORECASE { TRUE | FALSE } "," If IGNORECASE is enabled, text columns in newly created tables will be -case-insensitive." +case-insensitive. Already existing tables are not affected. The effect of +case-insensitive columns is similar to using a collation with strength PRIMARY. +Case-insensitive columns are compared faster than when using a collation. +String literals and parameters are however still considered case sensitive even if this option is set. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +This setting can be appended to the database URL: ""jdbc:h2:./test;IGNORECASE=TRUE"" +"," +SET IGNORECASE TRUE +" + +"Commands (Other)","SET IGNORE_CATALOGS"," +@c@ SET IGNORE_CATALOGS { TRUE | FALSE } +"," +If IGNORE_CATALOGS is enabled, catalog names in front of schema names will be ignored. This can be used if +multiple catalogs used by the same connections must be simulated. Caveat: if both catalogs contain schemas of the +same name and if those schemas contain objects of the same name, this will lead to errors, when trying to manage, +access or change these objects. +This setting can be appended to the database URL: ""jdbc:h2:./test;IGNORE_CATALOGS=TRUE"" +"," +SET IGNORE_CATALOGS TRUE +" + "Commands (Other)","SET JAVA_OBJECT_SERIALIZER"," -SET JAVA_OBJECT_SERIALIZER -{ null | className } -"," -Sets the object used to serialize and deserialize java objects being stored in column of type OTHER." -"Commands (Other)","SET LOG"," -SET LOG int -"," -Sets the transaction log mode." +@h2@ SET JAVA_OBJECT_SERIALIZER { null | className } +"," +Sets the object used to serialize and deserialize java objects being stored in column of type OTHER. +The serializer class must be public and implement ""org.h2.api.JavaObjectSerializer"". +Inner classes are not supported. +The class must be available in the classpath of the database engine +(when using the server mode, it must be both in the classpath of the server and the client). +This command can only be executed if there are no tables defined. + +Admin rights are required to execute this command. +This command commits an open transaction in this connection. +This setting is persistent. +This setting can be appended to the database URL: ""jdbc:h2:./test;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'"" +"," +SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName' +" + +"Commands (Other)","SET LAZY_QUERY_EXECUTION"," +@h2@ SET LAZY_QUERY_EXECUTION int +"," +Sets the lazy query execution mode. The values 0, 1 are supported. + +If true, then large results are retrieved in chunks. + +Note that not all queries support this feature, queries which do not are processed normally. + +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;LAZY_QUERY_EXECUTION=1"" +"," +SET LAZY_QUERY_EXECUTION 1 +" + "Commands (Other)","SET LOCK_MODE"," -SET LOCK_MODE int -"," -Sets the lock mode." +@h2@ SET LOCK_MODE int +"," +Sets the lock mode. The values 0, 1, 2, and 3 are supported. The default is 3. +This setting affects all connections. + +The value 0 means no locking (should only be used for testing). +Please note that using SET LOCK_MODE 0 while at the same time +using multiple connections may result in inconsistent transactions. + +The value 3 means row-level locking for write operations. + +The values 1 and 2 have the same effect as 3. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +This setting can be appended to the database URL: ""jdbc:h2:./test;LOCK_MODE=0"" +"," +SET LOCK_MODE 0 +" + "Commands (Other)","SET LOCK_TIMEOUT"," -SET LOCK_TIMEOUT int +@h2@ SET LOCK_TIMEOUT int "," -Sets the lock timeout (in milliseconds) for the current session." -"Commands (Other)","SET MAX_LENGTH_INPLACE_LOB"," -SET MAX_LENGTH_INPLACE_LOB int +Sets the lock timeout (in milliseconds) for the current session. The default +value for this setting is 1000 (one second). + +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;LOCK_TIMEOUT=10000"" "," -Sets the maximum size of an in-place LOB object." +SET LOCK_TIMEOUT 1000 +" + +"Commands (Other)","SET MAX_LENGTH_INPLACE_LOB"," +@h2@ SET MAX_LENGTH_INPLACE_LOB int +"," +Sets the maximum size of an in-place LOB object. + +This is the maximum length of an LOB that is stored with the record itself, +and the default value is 256. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +"," +SET MAX_LENGTH_INPLACE_LOB 128 +" + "Commands (Other)","SET MAX_LOG_SIZE"," -SET MAX_LOG_SIZE int -"," -Sets the maximum size of the transaction log, in megabytes." +@h2@ SET MAX_LOG_SIZE int +"," +Sets the maximum size of the transaction log, in megabytes. +If the log is larger, and if there is no open transaction, the transaction log is truncated. +If there is an open transaction, the transaction log will continue to grow however. +The default max size is 16 MB. +This setting has no effect for in-memory databases. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +"," +SET MAX_LOG_SIZE 2 +" + "Commands (Other)","SET MAX_MEMORY_ROWS"," -SET MAX_MEMORY_ROWS int -"," -The maximum number of rows in a result set that are kept in-memory." +@h2@ SET MAX_MEMORY_ROWS int +"," +The maximum number of rows in a result set that are kept in-memory. If more rows +are read, then the rows are buffered to disk. +The default is 40000 per GB of available RAM. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +It has no effect for in-memory databases. +"," +SET MAX_MEMORY_ROWS 1000 +" + "Commands (Other)","SET MAX_MEMORY_UNDO"," -SET MAX_MEMORY_UNDO int -"," -The maximum number of undo records per a session that are kept in-memory." +@h2@ SET MAX_MEMORY_UNDO int +"," +The maximum number of undo records per a session that are kept in-memory. +If a transaction is larger, the records are buffered to disk. +The default value is 50000. +Changes to tables without a primary key can not be buffered to disk. +This setting is not supported when using multi-version concurrency. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +It has no effect for in-memory databases. +"," +SET MAX_MEMORY_UNDO 1000 +" + "Commands (Other)","SET MAX_OPERATION_MEMORY"," -SET MAX_OPERATION_MEMORY int -"," -Sets the maximum memory used for large operations (delete and insert), in bytes." +@h2@ SET MAX_OPERATION_MEMORY int +"," +Sets the maximum memory used for large operations (delete and insert), in bytes. +Operations that use more memory are buffered to disk, slowing down the +operation. The default max size is 100000. 0 means no limit. + +This setting is not persistent. +Admin rights are required to execute this command, as it affects all connections. +It has no effect for in-memory databases. +This setting can be appended to the database URL: ""jdbc:h2:./test;MAX_OPERATION_MEMORY=10000"" +"," +SET MAX_OPERATION_MEMORY 0 +" + "Commands (Other)","SET MODE"," -SET MODE { REGULAR | DB2 | DERBY | HSQLDB | MSSQLSERVER | MYSQL | ORACLE | POSTGRESQL } -"," -Changes to another database compatibility mode." -"Commands (Other)","SET MULTI_THREADED"," -SET MULTI_THREADED { 0 | 1 } -"," -Enabled (1) or disabled (0) multi-threading inside the database engine." +@h2@ SET MODE { REGULAR | STRICT | LEGACY | DB2 | DERBY | HSQLDB | MSSQLSERVER | MYSQL | ORACLE | POSTGRESQL } +"," +Changes to another database compatibility mode. For details, see +[Compatibility Modes](https://h2database.com/html/features.html#compatibility_modes). + +This setting is not persistent. +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting can be appended to the database URL: ""jdbc:h2:./test;MODE=MYSQL"" +"," +SET MODE HSQLDB +" + +"Commands (Other)","SET NON_KEYWORDS"," +@h2@ SET NON_KEYWORDS [ name [,...] ] +"," +Converts the specified tokens from keywords to plain identifiers for the current session. +This setting may break some commands and should be used with caution and only when necessary. +Use [quoted identifiers](https://h2database.com/html/grammar.html#quoted_name) instead of this setting if possible. + +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;NON_KEYWORDS=KEY,VALUE"" +"," +SET NON_KEYWORDS KEY, VALUE +" + "Commands (Other)","SET OPTIMIZE_REUSE_RESULTS"," -SET OPTIMIZE_REUSE_RESULTS { 0 | 1 } -"," -Enabled (1) or disabled (0) the result reuse optimization." +@h2@ SET OPTIMIZE_REUSE_RESULTS { 0 | 1 } +"," +Enabled (1) or disabled (0) the result reuse optimization. If enabled, +subqueries and views used as subqueries are only re-run if the data in one of +the tables was changed. This option is enabled by default. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting can be appended to the database URL: ""jdbc:h2:./test;OPTIMIZE_REUSE_RESULTS=0"" +"," +SET OPTIMIZE_REUSE_RESULTS 0 +" + "Commands (Other)","SET PASSWORD"," -SET PASSWORD string +@h2@ SET PASSWORD string "," -Changes the password of the current user." -"Commands (Other)","SET QUERY_STATISTICS"," -SET QUERY_STATISTICS { TRUE | FALSE } +Changes the password of the current user. The password must be in single quotes. +It is case sensitive and can contain spaces. + +This command commits an open transaction in this connection. "," -Disabled or enables query statistics gathering for the whole database." +SET PASSWORD 'abcstzri!.5' +" + +"Commands (Other)","SET QUERY_STATISTICS"," +@h2@ SET QUERY_STATISTICS { TRUE | FALSE } +"," +Disabled or enables query statistics gathering for the whole database. +The statistics are reflected in the INFORMATION_SCHEMA.QUERY_STATISTICS meta-table. + +This setting is not persistent. +This command commits an open transaction in this connection. +Admin rights are required to execute this command, as it affects all connections. +"," +SET QUERY_STATISTICS FALSE +" + +"Commands (Other)","SET QUERY_STATISTICS_MAX_ENTRIES"," +@h2@ SET QUERY_STATISTICS int +"," +Set the maximum number of entries in query statistics meta-table. +Default value is 100. + +This setting is not persistent. +This command commits an open transaction in this connection. +Admin rights are required to execute this command, as it affects all connections. +"," +SET QUERY_STATISTICS_MAX_ENTRIES 500 +" + "Commands (Other)","SET QUERY_TIMEOUT"," -SET QUERY_TIMEOUT int +@h2@ SET QUERY_TIMEOUT int "," -Set the query timeout of the current session to the given value." -"Commands (Other)","SET REFERENTIAL_INTEGRITY"," -SET REFERENTIAL_INTEGRITY { TRUE | FALSE } +Set the query timeout of the current session to the given value. The timeout is +in milliseconds. All kinds of statements will throw an exception if they take +longer than the given value. The default timeout is 0, meaning no timeout. + +This command does not commit a transaction, and rollback does not affect it. "," -Disabled or enables referential integrity checking for the whole database." +SET QUERY_TIMEOUT 10000 +" + +"Commands (Other)","SET REFERENTIAL_INTEGRITY"," +@h2@ SET REFERENTIAL_INTEGRITY { TRUE | FALSE } +"," +Disabled or enables referential integrity checking for the whole database. +Enabling it does not check existing data. Use ALTER TABLE SET to disable it only +for one table. + +This setting is not persistent. +This command commits an open transaction in this connection. +Admin rights are required to execute this command, as it affects all connections. +"," +SET REFERENTIAL_INTEGRITY FALSE +" + "Commands (Other)","SET RETENTION_TIME"," -SET RETENTION_TIME int -"," -This property is only used when using the MVStore storage engine." +@h2@ SET RETENTION_TIME int +"," +How long to retain old, persisted data, in milliseconds. +The default is 45000 (45 seconds), 0 means overwrite data as early as possible. +It is assumed that a file system and hard disk will flush all write buffers within this time. +Using a lower value might be dangerous, unless the file system and hard disk flush the buffers earlier. +To manually flush the buffers, use CHECKPOINT SYNC, +however please note that according to various tests this does not always work as expected +depending on the operating system and hardware. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting is persistent. +This setting can be appended to the database URL: ""jdbc:h2:./test;RETENTION_TIME=0"" +"," +SET RETENTION_TIME 0 +" + "Commands (Other)","SET SALT HASH"," -SET SALT bytes HASH bytes +@h2@ SET SALT bytes HASH bytes "," -Sets the password salt and hash for the current user." -"Commands (Other)","SET SCHEMA"," -SET SCHEMA schemaName +Sets the password salt and hash for the current user. The password must be in +single quotes. It is case sensitive and can contain spaces. + +This command commits an open transaction in this connection. "," -Changes the default schema of the current connection." +SET SALT '00' HASH '1122' +" + +"Commands (Other)","SET SCHEMA"," +SET SCHEMA { schemaString | @h2@ { schemaName } } +"," +Changes the default schema of the current connection. The default schema is used +in statements where no schema is set explicitly. The default schema for new +connections is PUBLIC. + +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;SCHEMA=ABC"" +"," +SET SCHEMA 'PUBLIC' +SET SCHEMA INFORMATION_SCHEMA +" + "Commands (Other)","SET SCHEMA_SEARCH_PATH"," -SET SCHEMA_SEARCH_PATH schemaName [,...] -"," -Changes the schema search path of the current connection." +@h2@ SET SCHEMA_SEARCH_PATH schemaName [,...] +"," +Changes the schema search path of the current connection. The default schema is +used in statements where no schema is set explicitly. The default schema for new +connections is PUBLIC. + +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;SCHEMA_SEARCH_PATH=ABC,DEF"" +"," +SET SCHEMA_SEARCH_PATH INFORMATION_SCHEMA, PUBLIC +" + +"Commands (Other)","SET SESSION CHARACTERISTICS"," +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL +{ READ UNCOMMITTED | READ COMMITTED | REPEATABLE READ | SERIALIZABLE } +"," +Changes the transaction isolation level of the current session. +The actual support of isolation levels depends on the database engine. + +This command commits an open transaction in this session. +"," +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE +" + "Commands (Other)","SET THROTTLE"," -SET THROTTLE int -"," -Sets the throttle for the current connection." +@h2@ SET THROTTLE int +"," +Sets the throttle for the current connection. The value is the number of +milliseconds delay after each 50 ms. The default value is 0 (throttling +disabled). + +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;THROTTLE=50"" +"," +SET THROTTLE 200 +" + +"Commands (Other)","SET TIME ZONE"," +SET TIME ZONE { LOCAL | intervalHourToMinute | @h2@ { intervalHourToSecond | string } } +"," +Sets the current time zone for the session. + +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;TIME ZONE='1:00'"" + +Time zone offset used for [CURRENT_TIME](https://h2database.com/html/functions.html#current_time), +[CURRENT_TIMESTAMP](https://h2database.com/html/functions.html#current_timestamp), +[CURRENT_DATE](https://h2database.com/html/functions.html#current_date), +[LOCALTIME](https://h2database.com/html/functions.html#localtime), +and [LOCALTIMESTAMP](https://h2database.com/html/functions.html#localtimestamp) is adjusted, +so these functions will return new values based on the same UTC timestamp after execution of this command. +"," +SET TIME ZONE LOCAL +SET TIME ZONE '-5:00' +SET TIME ZONE INTERVAL '1:00' HOUR TO MINUTE +SET TIME ZONE 'Europe/London' +" + "Commands (Other)","SET TRACE_LEVEL"," -SET { TRACE_LEVEL_FILE | TRACE_LEVEL_SYSTEM_OUT } int -"," -Sets the trace level for file the file or system out stream." +@h2@ SET { TRACE_LEVEL_FILE | TRACE_LEVEL_SYSTEM_OUT } int +"," +Sets the trace level for file the file or system out stream. Levels are: 0=off, +1=error, 2=info, 3=debug. The default level is 1 for file and 0 for system out. +To use SLF4J, append "";TRACE_LEVEL_FILE=4"" to the database URL when opening the database. + +This setting is not persistent. +Admin rights are required to execute this command, as it affects all connections. +This command does not commit a transaction, and rollback does not affect it. +This setting can be appended to the database URL: ""jdbc:h2:./test;TRACE_LEVEL_SYSTEM_OUT=3"" +"," +SET TRACE_LEVEL_SYSTEM_OUT 3 +" + "Commands (Other)","SET TRACE_MAX_FILE_SIZE"," -SET TRACE_MAX_FILE_SIZE int -"," -Sets the maximum trace file size." -"Commands (Other)","SET UNDO_LOG"," -SET UNDO_LOG int -"," -Enables (1) or disables (0) the per session undo log." +@h2@ SET TRACE_MAX_FILE_SIZE int +"," +Sets the maximum trace file size. If the file exceeds the limit, the file is +renamed to .old and a new file is created. If another .old file exists, it is +deleted. The default max size is 16 MB. + +This setting is persistent. +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting can be appended to the database URL: ""jdbc:h2:./test;TRACE_MAX_FILE_SIZE=3"" +"," +SET TRACE_MAX_FILE_SIZE 10 +" + +"Commands (Other)","SET TRUNCATE_LARGE_LENGTH"," +@h2@ SET TRUNCATE_LARGE_LENGTH { TRUE | FALSE } +"," +If ""TRUE"" is specified, the ""CHARACTER"", ""CHARACTER VARYING"", ""VARCHAR_IGNORECASE"", ""BINARY"", +"BINARY_VARYING", "JAVA_OBJECT"" and ""JSON"" data types with too large length will be treated as these data types with +maximum allowed length instead. +By default, or if ""FALSE"" is specified, such definitions throw an exception. +This setting can be used for compatibility with definitions from older versions of H2. + +This setting can be appended to the database URL: ""jdbc:h2:./test;TRUNCATE_LARGE_LENGTH=TRUE"" +"," +SET TRUNCATE_LARGE_LENGTH TRUE +" + +"Commands (Other)","SET VARIABLE_BINARY"," +@h2@ SET VARIABLE_BINARY { TRUE | FALSE } +"," +If ""TRUE"" is specified, the ""BINARY"" data type will be parsed as ""VARBINARY"" in the current session. +It can be used for compatibility with older versions of H2. + +This setting can be appended to the database URL: ""jdbc:h2:./test;VARIABLE_BINARY=TRUE"" +"," +SET VARIABLE_BINARY TRUE +" + "Commands (Other)","SET WRITE_DELAY"," -SET WRITE_DELAY int -"," -Set the maximum delay between a commit and flushing the log, in milliseconds." +@h2@ SET WRITE_DELAY int +"," +Set the maximum delay between a commit and flushing the log, in milliseconds. +This setting is persistent. The default is 500 ms. + +Admin rights are required to execute this command, as it affects all connections. +This command commits an open transaction in this connection. +This setting can be appended to the database URL: ""jdbc:h2:./test;WRITE_DELAY=0"" +"," +SET WRITE_DELAY 2000 +" + "Commands (Other)","SHUTDOWN"," -SHUTDOWN [ IMMEDIATELY | COMPACT | DEFRAG ] +@h2@ SHUTDOWN [ IMMEDIATELY | COMPACT | DEFRAG ] "," This statement closes all open connections to the database and closes the -database." +database. This command is usually not required, as the database is +closed automatically when the last connection to it is closed. + +If no option is used, then the database is closed normally. +All connections are closed, open transactions are rolled back. + +SHUTDOWN COMPACT fully compacts the database (re-creating the database may further reduce the database size). +If the database is closed normally (using SHUTDOWN or by closing all connections), then the database is also compacted, +but only for at most the time defined by the database setting ""h2.maxCompactTime"" in milliseconds (see there). + +SHUTDOWN IMMEDIATELY closes the database files without any cleanup and without compacting. + +SHUTDOWN DEFRAG is currently equivalent to COMPACT. + +Admin rights are required to execute this command. +"," +SHUTDOWN COMPACT +" + +"Literals","Value"," +string | @h2@ { dollarQuotedString } | numeric | dateAndTime | boolean | bytes + | interval | array | @h2@ { geometry | json | uuid } | null +"," +A literal value of any data type, or null. +"," +10 +" + +"Literals","Approximate numeric"," +[ + | - ] { { number [ . number ] } | { . number } } +E [ + | - ] expNumber +"," +An approximate numeric value. +Approximate numeric values have [DECFLOAT](https://h2database.com/html/datatypes.html#decfloat_type) data type. +To define a [DOUBLE PRECISION](https://h2database.com/html/datatypes.html#double_precision_type) value, use +""CAST(X AS DOUBLE PRECISION)"". +To define a [REAL](https://h2database.com/html/datatypes.html#real_type) value, use ""CAST(X AS REAL)"". +There are some special REAL, DOUBLE PRECISION, and DECFLOAT values: +to represent positive infinity, use ""CAST('Infinity' AS dataType)""; +for negative infinity, use ""CAST('-Infinity' AS dataType)""; +for ""NaN"" (not a number), use ""CAST('NaN' AS dataType)"". +"," +-1.4e-10 +CAST(1e2 AS REAL) +CAST('NaN' AS DOUBLE PRECISION) +" + +"Literals","Array"," +ARRAY '[' [ expression [,...] ] ']' +"," +An array of values. +"," +ARRAY[1, 2] +ARRAY[1] +ARRAY[] +" + +"Literals","Boolean"," +TRUE | FALSE | UNKNOWN +"," +A boolean value. +UNKNOWN is a NULL value with the boolean data type. +"," +TRUE +" + +"Literals","Bytes"," +X'hex' [ 'hex' [...] ] +"," +A binary string value. The hex value is not case sensitive and may contain space characters as separators. +If there are more than one group of quoted hex values, groups must be separated with whitespace. +"," +X'' +X'01FF' +X'01 bc 2a' +X'01' '02' +" + +"Literals","Date"," +DATE '[-]yyyy-MM-dd' +"," +A date literal. +"," +DATE '2004-12-31' +" + +"Literals","Date and time"," +date | time | timeWithTimeZone | timestamp | timestampWithTimeZone +"," +A literal value of any date-time data type. +"," +TIMESTAMP '1999-01-31 10:00:00' +" + +"Literals","Dollar Quoted String"," +@h2@ $$anythingExceptTwoDollarSigns$$ +"," +A string starts and ends with two dollar signs. Two dollar signs are not allowed +within the text. A whitespace is required before the first set of dollar signs. +No escaping is required within the text. +"," +$$John's car$$ +" + +"Literals","Exact numeric"," +[ + | - ] { { number [ . number ] } | { . number } } +"," +An exact numeric value. +Exact numeric values with dot have [NUMERIC](https://h2database.com/html/datatypes.html#numeric_type) data type, values +without dot small enough to fit into [INTEGER](https://h2database.com/html/datatypes.html#integer_type) data type have +this type, larger values small enough to fit into [BIGINT](https://h2database.com/html/datatypes.html#bigint_type) data +type have this type, others also have NUMERIC data type. +"," +-1600.05 +" + +"Literals","Hex Number"," +@h2@ [ + | - ] @h2@ 0x { digit | a-f | A-F } [...] +"," +A number written in hexadecimal notation. +"," +0xff +" + +"Literals","Int"," +[ + | - ] number +"," +The maximum integer number is 2147483647, the minimum is -2147483648. +"," +10 +" + +"Literals","GEOMETRY"," +@h2@ GEOMETRY { bytes | string } +"," +A binary string or character string with GEOMETRY object. + +A binary string should contain Well-known Binary Representation (WKB) from OGC 06-103r4. +Dimension system marks may be specified either in both OGC WKB or in PostGIS EWKB formats. +Optional SRID from EWKB may be specified. +POINT EMPTY stored with NaN values as specified in OGC 12-128r15 is supported. + +A character string should contain Well-known Text Representation (WKT) from OGC 06-103r4 +with optional SRID from PostGIS EWKT extension. + +"," +GEOMETRY 'GEOMETRYCOLLECTION (POINT (1 2))' +GEOMETRY X'00000000013ff00000000000003ff0000000000000' +" + +"Literals","JSON"," +@h2@ JSON { bytes | string } +"," +A binary or character string with a RFC 8259-compliant JSON text and data format. +JSON text is parsed into internal representation. +Order of object members is preserved as is. +Duplicate object member names are allowed. +"," +JSON '{""id"":10,""name"":""What''s this?""}' +JSON '[1, ' '2]'; +JSON X'7472' '7565' +" + +"Literals","Long"," +[ + | - ] number +"," +Long numbers are between -9223372036854775808 and 9223372036854775807. +"," +100000 +" + +"Literals","Null"," +NULL +"," +NULL is a value without data type and means 'unknown value'. +"," +NULL +" + +"Literals","Number"," +digit [...] +"," +The maximum length of the number depends on the data type used. +"," +100 +" + +"Literals","Numeric"," +exactNumeric | approximateNumeric | int | long | @h2@ { hexNumber } +"," +The data type of a numeric literal is the one of numeric data types, such as NUMERIC, DECFLOAT, BIGINT, or INTEGER +depending on format and value. + +An explicit CAST can be used to change the data type. +"," +-1600.05 +CAST(0 AS DOUBLE PRECISION) +-1.4e-10 +" + +"Literals","String"," +[N]'anythingExceptSingleQuote' [...] + | U&{'anythingExceptSingleQuote' [...]} [ UESCAPE 'singleCharacter' ] +"," +A character string literal starts and ends with a single quote. +Two single quotes can be used to create a single quote inside a string. +Prefix ""N"" means a national character string literal; +H2 does not distinguish regular and national character string literals in any way, this prefix has no effect in H2. + +String literals staring with ""U&"" are Unicode character string literals. +All character string literals in H2 may have Unicode characters, +but Unicode character string literals may contain Unicode escape sequences ""\0000"" or ""\+000000"", +where \ is an escape character, ""0000"" and ""000000"" are Unicode character codes in hexadecimal notation. +Optional ""UESCAPE"" clause may be used to specify another escape character, +with exception for single quote, double quote, plus sign, and hexadecimal digits (0-9, a-f, and A-F). +By default the backslash is used. +Two escape characters can be used to include a single character inside a string. +Two single quotes can be used to create a single quote inside a string. +"," +'John''s car' +'A' 'B' 'C' +U&'W\00f6rter ' '\\ \+01f600 /' +U&'|00a1' UESCAPE '|' +" + +"Literals","UUID"," +@h2@ UUID '{ digit | a-f | A-F | - } [...]' +"," +A UUID literal. +Must contain 32 hexadecimal digits. Digits may be separated with - signs. +"," +UUID '12345678-1234-1234-1234-123456789ABC' +" + +"Literals","Time"," +TIME [ WITHOUT TIME ZONE ] 'hh:mm:ss[.nnnnnnnnn]' +"," +A time literal. A value is between 0:00:00 and 23:59:59.999999999 +and has nanosecond resolution. +"," +TIME '23:59:59' +" + +"Literals","Time with time zone"," +TIME WITH TIME ZONE 'hh:mm:ss[.nnnnnnnnn]{ @h2@ { Z } | { - | + } timeZoneOffsetString}' +"," +A time with time zone literal. A value is between 0:00:00 and 23:59:59.999999999 +and has nanosecond resolution. +"," +TIME WITH TIME ZONE '23:59:59+01' +TIME WITH TIME ZONE '10:15:30.334-03:30' +TIME WITH TIME ZONE '0:00:00Z' +" + +"Literals","Timestamp"," +TIMESTAMP [ WITHOUT TIME ZONE ] '[-]yyyy-MM-dd hh:mm:ss[.nnnnnnnnn]' +"," +A timestamp literal. +"," +TIMESTAMP '2005-12-31 23:59:59' +" + +"Literals","Timestamp with time zone"," +TIMESTAMP WITH TIME ZONE '[-]yyyy-MM-dd hh:mm:ss[.nnnnnnnnn] +[ @h2@ { Z } | { - | + } timeZoneOffsetString | @h2@ { timeZoneNameString } ]' +"," +A timestamp with time zone literal. +If name of time zone is specified it will be converted to time zone offset. +"," +TIMESTAMP WITH TIME ZONE '2005-12-31 23:59:59Z' +TIMESTAMP WITH TIME ZONE '2005-12-31 23:59:59-10:00' +TIMESTAMP WITH TIME ZONE '2005-12-31 23:59:59.123+05' +TIMESTAMP WITH TIME ZONE '2005-12-31 23:59:59.123456789 Europe/London' +" + +"Literals","Interval"," +intervalYear | intervalMonth | intervalDay | intervalHour | intervalMinute + | intervalSecond | intervalYearToMonth | intervalDayToHour + | intervalDayToMinute | intervalDayToSecond | intervalHourToMinute + | intervalHourToSecond | intervalMinuteToSecond +"," +An interval literal. +"," +INTERVAL '1-2' YEAR TO MONTH +" + +"Literals","INTERVAL YEAR"," +INTERVAL [-|+] '[-|+]yearInt' YEAR +"," +An INTERVAL YEAR literal. +"," +INTERVAL '10' YEAR +" + +"Literals","INTERVAL MONTH"," +INTERVAL [-|+] '[-|+]monthInt' MONTH +"," +An INTERVAL MONTH literal. +"," +INTERVAL '10' MONTH +" + +"Literals","INTERVAL DAY"," +INTERVAL [-|+] '[-|+]dayInt' DAY +"," +An INTERVAL DAY literal. +"," +INTERVAL '10' DAY +" + +"Literals","INTERVAL HOUR"," +INTERVAL [-|+] '[-|+]hourInt' HOUR +"," +An INTERVAL HOUR literal. +"," +INTERVAL '10' HOUR +" + +"Literals","INTERVAL MINUTE"," +INTERVAL [-|+] '[-|+]minuteInt' MINUTE +"," +An INTERVAL MINUTE literal. +"," +INTERVAL '10' MINUTE +" + +"Literals","INTERVAL SECOND"," +INTERVAL [-|+] '[-|+]secondInt[.nnnnnnnnn]' SECOND +"," +An INTERVAL SECOND literal. +"," +INTERVAL '10.123' SECOND +" + +"Literals","INTERVAL YEAR TO MONTH"," +INTERVAL [-|+] '[-|+]yearInt-monthInt' YEAR TO MONTH +"," +An INTERVAL YEAR TO MONTH literal. +"," +INTERVAL '1-6' YEAR TO MONTH +" + +"Literals","INTERVAL DAY TO HOUR"," +INTERVAL [-|+] '[-|+]dayInt hoursInt' DAY TO HOUR +"," +An INTERVAL DAY TO HOUR literal. +"," +INTERVAL '10 11' DAY TO HOUR +" + +"Literals","INTERVAL DAY TO MINUTE"," +INTERVAL [-|+] '[-|+]dayInt hh:mm' DAY TO MINUTE +"," +An INTERVAL DAY TO MINUTE literal. +"," +INTERVAL '10 11:12' DAY TO MINUTE +" + +"Literals","INTERVAL DAY TO SECOND"," +INTERVAL [-|+] '[-|+]dayInt hh:mm:ss[.nnnnnnnnn]' DAY TO SECOND +"," +An INTERVAL DAY TO SECOND literal. +"," +INTERVAL '10 11:12:13.123' DAY TO SECOND +" + +"Literals","INTERVAL HOUR TO MINUTE"," +INTERVAL [-|+] '[-|+]hh:mm' HOUR TO MINUTE +"," +An INTERVAL HOUR TO MINUTE literal. +"," +INTERVAL '10:11' HOUR TO MINUTE +" + +"Literals","INTERVAL HOUR TO SECOND"," +INTERVAL [-|+] '[-|+]hh:mm:ss[.nnnnnnnnn]' HOUR TO SECOND +"," +An INTERVAL HOUR TO SECOND literal. +"," +INTERVAL '10:11:12.123' HOUR TO SECOND +" + +"Literals","INTERVAL MINUTE TO SECOND"," +INTERVAL [-|+] '[-|+]mm:ss[.nnnnnnnnn]' MINUTE TO SECOND +"," +An INTERVAL MINUTE TO SECOND literal. +"," +INTERVAL '11:12.123' MINUTE TO SECOND +" + +"Datetime fields","Datetime field"," +yearField | monthField | dayOfMonthField + | hourField | minuteField | secondField + | timezoneHourField | timezoneMinuteField + | @h2@ { timezoneSecondField + | millenniumField | centuryField | decadeField + | quarterField + | millisecondField | microsecondField | nanosecondField + | dayOfYearField + | isoDayOfWeekField | isoWeekField | isoWeekYearField + | dayOfWeekField | weekField | weekYearField + | epochField } +"," +Fields for EXTRACT, DATEADD, DATEDIFF, and DATE_TRUNC functions. +"," +YEAR +" + +"Datetime fields","Year field"," +YEAR | @c@ { YYYY | YY | SQL_TSI_YEAR } +"," +Year. +"," +YEAR +" + +"Datetime fields","Month field"," +MONTH | @c@ { MM | M | SQL_TSI_MONTH } +"," +Month (1-12). +"," +MONTH +" + +"Datetime fields","Day of month field"," +DAY | @c@ { DD | D | SQL_TSI_DAY } +"," +Day of month (1-31). +"," +DAY +" + +"Datetime fields","Hour field"," +HOUR | @c@ { HH | SQL_TSI_HOUR } +"," +Hour (0-23). +"," +HOUR +" + +"Datetime fields","Minute field"," +MINUTE | @c@ { MI | N | SQL_TSI_MINUTE } +"," +Minute (0-59). +"," +MINUTE +" + +"Datetime fields","Second field"," +SECOND | @c@ { SS | S | SQL_TSI_SECOND } +"," +Second (0-59). +"," +SECOND +" + +"Datetime fields","Timezone hour field"," +TIMEZONE_HOUR +"," +Timezone hour (from -18 to +18). +"," +TIMEZONE_HOUR +" + +"Datetime fields","Timezone minute field"," +TIMEZONE_MINUTE +"," +Timezone minute (from -59 to +59). +"," +TIMEZONE_MINUTE +" + +"Datetime fields","Timezone second field"," +@h2@ TIMEZONE_SECOND +"," +Timezone second (from -59 to +59). +Local mean time (LMT) used in the past may have offsets with seconds. +Standard time doesn't use such offsets. +"," +TIMEZONE_SECOND +" + +"Datetime fields","Millennium field"," +@h2@ MILLENNIUM +"," +Century, or one thousand years (2001-01-01 to 3000-12-31). +"," +MILLENNIUM +" + +"Datetime fields","Century field"," +@h2@ CENTURY +"," +Century, or one hundred years (2001-01-01 to 2100-12-31). +"," +CENTURY +" + +"Datetime fields","Decade field"," +@h2@ DECADE +"," +Decade, or ten years (2020-01-01 to 2029-12-31). +"," +DECADE +" + +"Datetime fields","Quarter field"," +@h2@ QUARTER +"," +Quarter (1-4). +"," +QUARTER +" + +"Datetime fields","Millisecond field"," +@h2@ { MILLISECOND } | @c@ { MS } +"," +Millisecond (0-999). +"," +MILLISECOND +" + +"Datetime fields","Microsecond field"," +@h2@ { MICROSECOND } | @c@ { MCS } +"," +Microsecond (0-999999). +"," +MICROSECOND +" + +"Datetime fields","Nanosecond field"," +@h2@ { NANOSECOND } | @c@ { NS } +"," +Nanosecond (0-999999999). +"," +NANOSECOND +" + +"Datetime fields","Day of year field"," +@h2@ { DAYOFYEAR | DAY_OF_YEAR } | @c@ { DOY | DY } +"," +Day of year (1-366). +"," +DAYOFYEAR +" + +"Datetime fields","ISO day of week field"," +@h2@ { ISO_DAY_OF_WEEK } | @c@ { ISODOW } +"," +ISO day of week (1-7). Monday is 1. +"," +ISO_DAY_OF_WEEK +" + +"Datetime fields","ISO week field"," +@h2@ ISO_WEEK +"," +ISO week of year (1-53). +ISO definition is used when first week of year should have at least four days +and week is started with Monday. +"," +ISO_WEEK +" + +"Datetime fields","ISO week year field"," +@h2@ { ISO_WEEK_YEAR } | @c@ { ISO_YEAR | ISOYEAR } +"," +Returns the ISO week-based year from a date/time value. +"," +ISO_WEEK_YEAR +" + +"Datetime fields","Day of week field"," +@h2@ { DAY_OF_WEEK | DAYOFWEEK } | @c@ { DOW } +"," +Day of week (1-7), locale-specific. +"," +DAY_OF_WEEK +" + +"Datetime fields","Week field"," +@h2@ { WEEK } | @c@ { WW | W | SQL_TSI_WEEK } +"," +Week of year (1-53) using local rules. +"," +WEEK +" + +"Datetime fields","Week year field"," +@h2@ { WEEK_YEAR } +"," +Returns the week-based year (locale-specific) from a date/time value. +"," +WEEK_YEAR +" + +"Datetime fields","Epoch field"," +@h2@ EPOCH +"," +For TIMESTAMP values number of seconds since 1970-01-01 00:00:00 in local time zone. +For TIMESTAMP WITH TIME ZONE values number of seconds since 1970-01-01 00:00:00 in UTC time zone. +For DATE values number of seconds since 1970-01-01. +For TIME values number of seconds since midnight. +"," +EPOCH +" + "Other Grammar","Alias"," name "," -An alias is a name that is only valid in the context of the statement." +An alias is a name that is only valid in the context of the statement. +"," +A +" + "Other Grammar","And Condition"," condition [ { AND condition } [...] ] "," -Value or condition." -"Other Grammar","Array"," -( [ expression, [ expression [,...] ] ] ) -"," -An array of values." -"Other Grammar","Boolean"," -TRUE | FALSE -"," -A boolean value." -"Other Grammar","Bytes"," -X'hex' -"," -A binary value." -"Other Grammar","Case"," -CASE expression { WHEN expression THEN expression } [...] +Value or condition. +"," +ID=1 AND NAME='Hi' +" + +"Other Grammar","Array element reference"," +array '[' indexInt ']' +"," +Returns array element at specified index or NULL if array is null or index is null. +"," +A[2] +" + +"Other Grammar","Field reference"," +(expression).fieldName +"," +Returns field value from the row value or NULL if row value is null. +Row value expression must be enclosed in parentheses. +"," +(R).COL1 +" + +"Other Grammar","Array value constructor by query"," +ARRAY (query) +"," +Collects values from the subquery into array. + +The subquery should have exactly one column. +Number of elements in the returned array is the number of rows in the subquery. +NULL values are included into array. +"," +ARRAY(SELECT * FROM SYSTEM_RANGE(1, 10)); +" + +"Other Grammar","Case expression"," +simpleCase | searchedCase +"," +Performs conditional evaluation of expressions. +"," +CASE A WHEN 'a' THEN 1 ELSE 2 END +CASE WHEN V > 10 THEN 1 WHEN V < 0 THEN 2 END +CASE WHEN A IS NULL THEN 'Null' ELSE 'Not null' END +" + +"Other Grammar","Simple case"," +CASE expression +{ WHEN { expression | conditionRightHandSide } [,...] THEN expression } [...] [ ELSE expression ] END "," -Returns the first expression where the value is equal to the test expression." -"Other Grammar","Case When"," -CASE { WHEN expression THEN expression} [...] +Returns then expression from the first when clause where one of its operands was was evaluated to ""TRUE"" +for the case expression. +If there are no such clauses, returns else expression or NULL if it is absent. + +Plain expressions are tested for equality with the case expression, ""NULL"" is not equal to ""NULL"". +Right sides of conditions are evaluated with the case expression on the left side. +"," +CASE CNT WHEN IS NULL THEN 'Null' WHEN 0 THEN 'No' WHEN 1 THEN 'One' WHEN 2, 3 THEN 'Few' ELSE 'Some' END +" + +"Other Grammar","Searched case"," +CASE { WHEN expression THEN expression } [...] [ ELSE expression ] END "," -Returns the first expression where the condition is true." +Returns the first expression where the condition is true. If no else part is +specified, return NULL. +"," +CASE WHEN CNT<10 THEN 'Low' ELSE 'High' END +CASE WHEN A IS NULL THEN 'Null' ELSE 'Not null' END +" + +"Other Grammar","Cast specification"," +CAST(value AS dataTypeOrDomain) +"," +Converts a value to another data type. The following conversion rules are used: +When converting a number to a boolean, 0 is false and every other value is true. +When converting a boolean to a number, false is 0 and true is 1. +When converting a number to a number of another type, the value is checked for overflow. +When converting a string to binary, UTF-8 encoding is used. +Note that some data types may need explicitly specified precision to avoid overflow or rounding. +"," +CAST(NAME AS INT); +CAST(TIMESTAMP '2010-01-01 10:40:00.123456' AS TIME(6)) +" + "Other Grammar","Cipher"," -AES -"," -Only the algorithm AES (""AES-128"") is supported currently." -"Other Grammar","Column Definition"," -columnName dataType -[ { DEFAULT expression | AS computedColumnExpression } ] [ [ NOT ] NULL ] -[ { AUTO_INCREMENT | IDENTITY } [ ( startInt [, incrementInt ] ) ] ] -[ SELECTIVITY selectivity ] [ COMMENT expression ] -[ PRIMARY KEY [ HASH ] | UNIQUE ] [ CHECK condition ] +@h2@ AES "," -Default expressions are used if no explicit value was used when adding a row." -"Other Grammar","Comments"," --- anythingUntilEndOfLine | // anythingUntilEndOfLine | /* anythingUntilEndComment */ +Only the algorithm AES (""AES-128"") is supported currently. "," -Comments can be used anywhere in a command and are ignored by the database." +AES +" + +"Other Grammar","Column Definition"," +dataTypeOrDomain @h2@ [ VISIBLE | INVISIBLE ] +[ { DEFAULT expression + | GENERATED ALWAYS AS (generatedColumnExpression) + | GENERATED {ALWAYS | BY DEFAULT} AS IDENTITY [(sequenceOption [...])]} ] +@h2@ [ ON UPDATE expression ] +@h2@ [ DEFAULT ON NULL ] +@h2@ [ SELECTIVITY selectivityInt ] @h2@ [ COMMENT expression ] +[ columnConstraintDefinition ] [...] +"," +The default expression is used if no explicit value was used when adding a row +and when DEFAULT value was specified in an update command. + +A column is either a generated column or a base column. +The generated column has a generated column expression. +The generated column expression is evaluated and assigned whenever the row changes. +This expression may reference base columns of the table, but may not reference other data. +The value of the generated column cannot be set explicitly. +Generated columns may not have DEFAULT or ON UPDATE expressions. + +On update column expression is used if row is updated, +at least one column has a new value that is different from its previous value +and value for this column is not set explicitly in update statement. + +Identity column is a column generated with a sequence. +The column declared as the identity column with IDENTITY data type or with IDENTITY () clause +is implicitly the primary key column of this table. +GENERATED ALWAYS AS IDENTITY, GENERATED BY DEFAULT AS IDENTITY, and AUTO_INCREMENT clauses +do not create the primary key constraint automatically. +GENERATED ALWAYS AS IDENTITY clause indicates that column can only be generated by the sequence, +its value cannot be set explicitly. +Identity column has implicit NOT NULL constraint. +Identity column may not have DEFAULT or ON UPDATE expressions. + +DEFAULT ON NULL makes NULL value work as DEFAULT value is assignments to this column. + +The invisible column will not be displayed as a result of SELECT * query. +Otherwise, it works as normal column. + +Column constraint definitions are not supported for ALTER statements. +"," +CREATE TABLE TEST(ID INT PRIMARY KEY, + NAME VARCHAR(255) DEFAULT '' NOT NULL); +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + QUANTITY INT, PRICE NUMERIC(10, 2), + AMOUNT NUMERIC(20, 2) GENERATED ALWAYS AS (QUANTITY * PRICE)); +" + +"Other Grammar","Column Constraint Definition"," +[ constraintNameDefinition ] +NOT NULL | PRIMARY KEY | UNIQUE | referencesSpecification | CHECK (condition) +"," +NOT NULL disallows NULL value for a column. + +PRIMARY KEY and UNIQUE require unique values. +PRIMARY KEY also disallows NULL values and marks the column as a primary key. + +Referential constraint requires values that exist in other column (usually in another table). + +Check constraint require a specified condition to return TRUE or UNKNOWN (NULL). +It can reference columns of the table, and can reference objects that exist while the statement is executed. +Conditions are only checked when a row is added or modified in the table where the constraint exists. +"," +NOT NULL +PRIMARY KEY +UNIQUE +REFERENCES T2(ID) +CHECK (VALUE > 0) +" + +"Other Grammar","Comment"," +bracketedComment | -- anythingUntilEndOfLine | @c@ // anythingUntilEndOfLine +"," +Comments can be used anywhere in a command and are ignored by the database. +Line comments ""--"" and ""//"" end with a newline. +"," +-- comment +/* comment */ +" + +"Other Grammar","Bracketed comment"," +/* [ [ bracketedComment ] [ anythingUntilCommentStartOrEnd ] [...] ] */ +"," +Comments can be used anywhere in a command and are ignored by the database. +Bracketed comments ""/* */"" can be nested and can be multiple lines long. +"," +/* comment */ +/* comment /* nested comment */ comment */ +" + "Other Grammar","Compare"," -<> | <= | >= | = | < | > | != | && +<> | <= | >= | = | < | > | @c@ { != } | @h2@ && "," -Comparison operator." -"Other Grammar","Condition"," -operand [ conditionRightHandSide ] | NOT condition | EXISTS ( select ) +Comparison operator. The operator != is the same as <>. +The operator ""&&"" means overlapping; it can only be used with geometry types. "," -Boolean value or condition." +<> +" + +"Other Grammar","Condition"," +operand [ conditionRightHandSide ] + | NOT condition + | EXISTS ( query ) + | UNIQUE ( query ) + | @h2@ INTERSECTS (operand, operand) +"," +Boolean value or condition. + +""NOT"" condition negates the result of subcondition and returns ""TRUE"", ""FALSE"", or ""UNKNOWN"" (""NULL""). + +""EXISTS"" predicate tests whether the result of the specified subquery is not empty and returns ""TRUE"" or ""FALSE"". + +""UNIQUE"" predicate tests absence of duplicate rows in the specified subquery and returns ""TRUE"" or ""FALSE"". +Rows with ""NULL"" value in any column are ignored. + +""INTERSECTS"" checks whether 2D bounding boxes of specified geometries intersect with each other +and returns ""TRUE"" or ""FALSE"". +"," +ID <> 2 +NOT(A OR B) +EXISTS (SELECT NULL FROM TEST T WHERE T.GROUP_ID = P.ID) +UNIQUE (SELECT A, B FROM TEST T WHERE T.CATEGORY = CAT) +INTERSECTS(GEOM1, GEOM2) +" + "Other Grammar","Condition Right Hand Side"," -compare { { { ALL | ANY | SOME } ( select ) } | operand } - | IS [ NOT ] NULL - | IS [ NOT ] [ DISTINCT FROM ] operand - | BETWEEN operand AND operand - | IN ( { select | expression [,...] } ) - | [ NOT ] LIKE operand [ ESCAPE string ] - | [ NOT ] REGEXP operand -"," -The right hand side of a condition." -"Other Grammar","Constraint"," +comparisonRightHandSide + | quantifiedComparisonRightHandSide + | nullPredicateRightHandSide + | distinctPredicateRightHandSide + | quantifiedDistinctPredicateRightHandSide + | booleanTestRightHandSide + | typePredicateRightHandSide + | jsonPredicateRightHandSide + | betweenPredicateRightHandSide + | inPredicateRightHandSide + | likePredicateRightHandSide + | regexpPredicateRightHandSide +"," +The right hand side of a condition. +"," +> 10 +IS NULL +IS NOT NULL +IS NOT DISTINCT FROM B +IS OF (DATE, TIMESTAMP, TIMESTAMP WITH TIME ZONE) +IS JSON OBJECT WITH UNIQUE KEYS +LIKE 'Jo%' +" + +"Other Grammar","Comparison Right Hand Side"," +compare operand +"," +Right side of comparison predicates. +"," +> 10 +" + +"Other Grammar","Quantified Comparison Right Hand Side"," +compare { ALL | ANY | SOME } ( query ) +"," +Right side of quantified comparison predicates. + +Quantified comparison predicate ALL returns TRUE if specified comparison operation between +left size of condition and each row from a subquery returns TRUE, including case when there are no rows. +ALL predicate returns FALSE if at least one such comparison returns FALSE. +Otherwise it returns UNKNOWN. + +Quantified comparison predicates ANY and SOME return TRUE if specified comparison operation between +left size of condition and at least one row from a subquery returns TRUE. +ANY and SOME predicates return FALSE if all such comparisons return FALSE. +Otherwise they return UNKNOWN. + +Note that these predicates have priority over ANY and SOME aggregate functions with subquery on the right side. +Use parentheses around aggregate function. +"," +< ALL(SELECT V FROM TEST) +" + +"Other Grammar","Null Predicate Right Hand Side"," +IS [ NOT ] NULL +"," +Right side of null predicate. + +Check whether the specified value(s) are NULL values. +To test multiple values a row value must be specified. +""IS NULL"" returns ""TRUE"" if and only if all values are ""NULL"" values; otherwise it returns ""FALSE"". +""IS NOT NULL"" returns ""TRUE"" if and only if all values are not ""NULL"" values; otherwise it returns ""FALSE"". +"," +IS NULL +" + +"Other Grammar","Distinct Predicate Right Hand Side"," +IS [ NOT ] [ DISTINCT FROM ] operand +"," +Right side of distinct predicate. + +Distinct predicate is null-safe, meaning NULL is considered the same as NULL, +and the condition never evaluates to UNKNOWN. +"," +IS NOT DISTINCT FROM OTHER +" + +"Other Grammar","Quantified Distinct Predicate Right Hand Side"," +@h2@ IS [ NOT ] [ DISTINCT FROM ] { ALL | ANY | SOME } ( query ) +"," +Right side of quantified distinct predicate. + +Quantified distinct predicate is null-safe, meaning NULL is considered the same as NULL, +and the condition never evaluates to UNKNOWN. + +Quantified distinct predicate ALL returns TRUE if specified distinct predicate between +left size of condition and each row from a subquery returns TRUE, including case when there are no rows. +Otherwise it returns FALSE. + +Quantified distinct predicates ANY and SOME return TRUE if specified distinct predicate between +left size of condition and at least one row from a subquery returns TRUE. +Otherwise they return FALSE. + +Note that these predicates have priority over ANY and SOME aggregate functions with subquery on the right side. +Use parentheses around aggregate function. +"," +IS DISTINCT FROM ALL(SELECT V FROM TEST) +" + +"Other Grammar","Boolean Test Right Hand Side"," +IS [ NOT ] { TRUE | FALSE | UNKNOWN } +"," +Right side of boolean test. + +Checks whether the specified value is (not) ""TRUE"", ""FALSE"", or ""UNKNOWN"" (""NULL"") +and return ""TRUE"" or ""FALSE"". +This test is null-safe. +"," +IS TRUE +" + +"Other Grammar","Type Predicate Right Hand Side"," +IS [ NOT ] OF (dataType [,...]) +"," +Right side of type predicate. + +Checks whether the data type of the specified operand is one of the specified data types. +Some data types have multiple names, these names are considered as equal here. +Domains and their base data types are currently not distinguished from each other. +Precision and scale are also ignored. +If operand is NULL, the result is UNKNOWN. +"," +IS OF (INTEGER, BIGINT) +" + +"Other Grammar","JSON Predicate Right Hand Side"," +IS [ NOT ] JSON [ VALUE | ARRAY | OBJECT | SCALAR ] + [ [ WITH | WITHOUT ] UNIQUE [ KEYS ] ] +"," +Right side of JSON predicate. + +Checks whether value of the specified string, binary data, or a JSON is a valid JSON. +If ""ARRAY"", ""OBJECT"", or ""SCALAR"" is specified, only JSON items of the specified type are considered as valid. +If ""WITH UNIQUE [ KEYS ]"" is specified only JSON with unique keys is considered as valid. +This predicate isn't null-safe, it returns UNKNOWN if operand is NULL. +"," +IS JSON OBJECT WITH UNIQUE KEYS +" + +"Other Grammar","Between Predicate Right Hand Side"," +[ NOT ] BETWEEN [ ASYMMETRIC | SYMMETRIC ] operand AND operand +"," +Right side of between predicate. + +Checks whether the value is within the range inclusive. +""V BETWEEN [ ASYMMETRIC ] A AND B"" is equivalent to ""A <= V AND V <= B"". +""V BETWEEN SYMMETRIC A AND B"" is equivalent to ""A <= V AND V <= B OR A >= V AND V >= B"". +"," +BETWEEN LOW AND HIGH +" + +"Other Grammar","In Predicate Right Hand Side"," +[ NOT ] IN ( { query | expression [,...] } ) +"," +Right side of in predicate. + +Checks presence of value in the specified list of values or in result of the specified query. + +Returns ""TRUE"" if row value on the left side is equal to one of values on the right side, +""FALSE"" if all comparison operations were evaluated to ""FALSE"" or right side has no values, +and ""UNKNOWN"" otherwise. + +This operation is logically equivalent to ""OR"" between comparison operations +comparing left side and each value from the right side. +"," +IN (A, B, C) +IN (SELECT V FROM TEST) +" + +"Other Grammar","Like Predicate Right Hand Side"," +[ NOT ] { LIKE | @h2@ { ILIKE } } operand [ ESCAPE string ] +"," +Right side of like predicate. + +The wildcards characters are ""_"" (any one character) and ""%"" (any characters). +The database uses an index when comparing with LIKE except if the operand starts with a wildcard. +To search for the characters ""%"" and ""_"", the characters need to be escaped. +The default escape character is "" \ "" (backslash). +To select no escape character, use ""ESCAPE ''"" (empty string). +At most one escape character is allowed. +Each character that follows the escape character in the pattern needs to match exactly. +Patterns that end with an escape character are invalid and the expression returns NULL. + +ILIKE does a case-insensitive compare. +"," +LIKE 'a%' +" + +"Other Grammar","Regexp Predicate Right Hand Side"," +@h2@ { [ NOT ] REGEXP operand } +"," +Right side of Regexp predicate. + +Regular expression matching is used. +See Java ""Matcher.find"" for details. +"," +REGEXP '[a-z]' +" + +"Other Grammar","Table Constraint Definition"," [ constraintNameDefinition ] -{ CHECK expression - | UNIQUE ( columnName [,...] ) +{ PRIMARY KEY @h2@ [ HASH ] ( columnName [,...] ) } + | UNIQUE ( { columnName [,...] | VALUE } ) | referentialConstraint - | PRIMARY KEY [ HASH ] ( columnName [,...] ) } -"," -Defines a constraint." + | CHECK (condition) +"," +Defines a constraint. + +PRIMARY KEY and UNIQUE require unique values. +PRIMARY KEY also disallows NULL values and marks the column as a primary key, a table can have only one primary key. +UNIQUE constraint supports NULL values and rows with NULL value in any column are considered as unique. +UNIQUE (VALUE) creates a unique constraint on entire row, excluding invisible columns; +but if new columns will be added to the table, they will not be included into this constraint. + +Referential constraint requires values that exist in other column(s) (usually in another table). + +Check constraint requires a specified condition to return TRUE or UNKNOWN (NULL). +It can reference columns of the table, and can reference objects that exist while the statement is executed. +Conditions are only checked when a row is added or modified in the table where the constraint exists. +"," +PRIMARY KEY(ID, NAME) +" + "Other Grammar","Constraint Name Definition"," -CONSTRAINT [ IF NOT EXISTS ] newConstraintName +CONSTRAINT @h2@ [ IF NOT EXISTS ] newConstraintName "," -Defines a constraint name." +Defines a constraint name. +"," +CONSTRAINT CONST_ID +" + "Other Grammar","Csv Options"," -charsetString [, fieldSepString [, fieldDelimString [, escString [, nullString]]]]] +@h2@ charsetString [, fieldSepString [, fieldDelimString [, escString [, nullString]]]] | optionString "," -Optional parameters for CSVREAD and CSVWRITE." +Optional parameters for CSVREAD and CSVWRITE. +Instead of setting the options one by one, all options can be +combined into a space separated key-value pairs, as follows: +""STRINGDECODE('charset=UTF-8 escape=\"" fieldDelimiter=\"" fieldSeparator=, ' ||"" +""'lineComment=# lineSeparator=\n null= rowSeparator=')"". +The following options are supported: + +""caseSensitiveColumnNames"" (true or false; disabled by default), + +""charset"" (for example 'UTF-8'), + +""escape"" (the character that escapes the field delimiter), + +""fieldDelimiter"" (a double quote by default), + +""fieldSeparator"" (a comma by default), + +""lineComment"" (disabled by default), + +""lineSeparator"" (the line separator used for writing; ignored for reading), + +""null"", Support reading existing CSV files that contain explicit ""null"" delimiters. +Note that an empty, unquoted values are also treated as null. + +""preserveWhitespace"" (true or false; disabled by default), + +""writeColumnHeader"" (true or false; enabled by default). + +For a newline or other special character, use STRINGDECODE as in the example above. +A space needs to be escaped with a backslash (""'\ '""), and +a backslash needs to be escaped with another backslash (""'\\'""). +All other characters are not to be escaped, that means +newline and tab characters are written as such. +"," +CALL CSVWRITE('test2.csv', 'SELECT * FROM TEST', 'charset=UTF-8 fieldSeparator=|'); +" + +"Other Grammar","Data Change Delta Table"," +{ OLD | NEW | FINAL } TABLE +( { insert | update | delete | @h2@ { mergeInto } | mergeUsing } ) +"," +Executes the inner data change command and returns old, new, or final rows. + +""OLD"" is not allowed for ""INSERT"" command. It returns old rows. + +""NEW"" and ""FINAL"" are not allowed for ""DELETE"" command. + +""NEW"" returns new rows after evaluation of default expressions, but before execution of triggers. + +""FINAL"" returns new rows after execution of triggers. +"," +SELECT ID FROM FINAL TABLE (INSERT INTO TEST (A, B) VALUES (1, 2)) +" + +"Other Grammar","Data Type or Domain"," +dataType | [schemaName.]domainName +"," +A data type or domain name. +"," +INTEGER +MY_DOMAIN +" + "Other Grammar","Data Type"," -intType | booleanType | tinyintType | smallintType | bigintType | identityType - | decimalType | doubleType | realType | dateType | timeType | timestampType - | binaryType | otherType | varcharType | varcharIgnorecaseType | charType - | blobType | clobType | uuidType | arrayType -"," -A data type definition." -"Other Grammar","Date"," -DATE 'yyyy-MM-dd' -"," -A date literal." -"Other Grammar","Decimal"," -[ + | - ] { { number [ . number ] } | { . number } } [ E [ + | - ] expNumber [...] ] ] -"," -A decimal number with fixed precision and scale." +predefinedType | arrayType | rowType +"," +A data type. +"," +INTEGER +" + +"Other Grammar","Predefined Type"," +characterType | characterVaryingType | characterLargeObjectType + | binaryType | binaryVaryingType | binaryLargeObjectType + | booleanType + | smallintType | integerType | bigintType + | numericType | realType | doublePrecisionType | decfloatType + | dateType | timeType | timeWithTimeZoneType + | timestampType | timestampWithTimeZoneType + | intervalType + | @h2@ { tinyintType | javaObjectType | enumType + | geometryType | jsonType | uuidType } +"," +A predefined data type. +"," +INTEGER +" + "Other Grammar","Digit"," 0-9 "," -A digit." -"Other Grammar","Dollar Quoted String"," -$$anythingExceptTwoDollarSigns$$ +A digit. "," -A string starts and ends with two dollar signs." +0 +" + "Other Grammar","Expression"," andCondition [ { OR andCondition } [...] ] "," -Value or condition." +Value or condition. +"," +ID=1 OR NAME='Hi' +" + "Other Grammar","Factor"," -term [ { { * | / | % } term } [...] ] +term [ { { * | / | @c@ { % } } term } [...] ] +"," +A value or a numeric factor. "," -A value or a numeric factor." +ID * 10 +" + +"Other Grammar","Grouping element"," +expression | (expression [, ...]) | () +"," +A grouping element of GROUP BY clause. +"," +A +(B, C) +() +" + "Other Grammar","Hex"," -{ { digit | a-f | A-F } { digit | a-f | A-F } } [...] +[' ' [...]] { { digit | a-f | A-F } [' ' [...]] { digit | a-f | A-F } [' ' [...]] } [...] "," -The hexadecimal representation of a number or of bytes." -"Other Grammar","Hex Number"," -[ + | - ] 0x hex +The hexadecimal representation of a number or of bytes with optional space characters. +Two hexadecimal digit characters are one byte. "," -A number written in hexadecimal notation." +cafe +11 22 33 +a b c d +" + "Other Grammar","Index Column"," columnName [ ASC | DESC ] [ NULLS { FIRST | LAST } ] "," -Indexes this column in ascending or descending order." -"Other Grammar","Int"," -[ + | - ] number -"," -The maximum integer number is 2147483647, the minimum is -2147483648." -"Other Grammar","Long"," -[ + | - ] number -"," -Long numbers are between -9223372036854775808 and 9223372036854775807." +Indexes this column in ascending or descending order. Usually it is not required +to specify the order; however doing so will speed up large queries that order +the column in the same way. +"," +NAME +" + +"Other Grammar","Insert values"," +VALUES { DEFAULT|expression | [ROW] ({DEFAULT|expression} [,...]) }, [,...] +"," +Values for INSERT statement. +"," +VALUES (1, 'Test') +" + +"Other Grammar","Interval qualifier"," +YEAR [(precisionInt)] [ TO MONTH ] + | MONTH [(precisionInt)] + | DAY [(precisionInt)] [ TO { HOUR | MINUTE | SECOND [(scaleInt)] } ] + | HOUR [(precisionInt)] [ TO { MINUTE | SECOND [(scaleInt)] } ] + | MINUTE [(precisionInt)] [ TO SECOND [(scaleInt)] ] + | SECOND [(precisionInt [, scaleInt])] +"," +An interval qualifier. +"," +DAY TO SECOND +" + +"Other Grammar","Join specification"," +ON expression | USING (columnName [,...]) +"," +Specifies a join condition or column names. +"," +ON B.ID = A.PARENT_ID +USING (ID) +" + +"Other Grammar","Merge when clause"," +mergeWhenMatchedClause|mergeWhenNotMatchedClause +"," +WHEN MATCHED or WHEN NOT MATCHED clause for MERGE USING command. +"," +WHEN MATCHED THEN DELETE +" + +"Other Grammar","Merge when matched clause"," +WHEN MATCHED [ AND expression ] THEN +UPDATE SET setClauseList | DELETE +"," +WHEN MATCHED clause for MERGE USING command. +"," +WHEN MATCHED THEN UPDATE SET NAME = S.NAME +WHEN MATCHED THEN DELETE +" + +"Other Grammar","Merge when not matched clause"," +WHEN NOT MATCHED [ AND expression ] THEN INSERT +[ ( columnName [,...] ) ] +[ overrideClause ] +VALUES ({DEFAULT|expression} [,...]) +"," +WHEN NOT MATCHED clause for MERGE USING command. +"," +WHEN NOT MATCHED THEN INSERT (ID, NAME) VALUES (S.ID, S.NAME) +" + "Other Grammar","Name"," -{ { A-Z|_ } [ { A-Z|_|0-9 } [...] ] } | quotedName -"," -Names are not case sensitive." -"Other Grammar","Null"," -NULL -"," -NULL is a value without data type and means 'unknown value'." -"Other Grammar","Number"," -digit [...] -"," -The maximum length of the number depends on the data type used." -"Other Grammar","Numeric"," -decimal | int | long | hexNumber -"," -The data type of a numeric value is always the lowest possible for the given value." +{ { A-Z|_ } [ { A-Z|_|0-9 } [...] ] } | quotedName +"," +With default settings unquoted names are converted to upper case. +The maximum name length is 256 characters. + +Identifiers in H2 are case sensitive by default. +Because unquoted names are converted to upper case, they can be written in any case anyway. +When both quoted and unquoted names are used for the same identifier the quoted names must be written in upper case. +Identifiers with lowercase characters can be written only as a quoted name, they aren't accessible with unquoted names. + +If DATABASE_TO_UPPER setting is set to FALSE the unquoted names aren't converted to upper case. + +If DATABASE_TO_LOWER setting is set to TRUE the unquoted names are converted to lower case instead. + +If CASE_INSENSITIVE_IDENTIFIERS setting is set to TRUE all identifiers are case insensitive. +"," +TEST +" + "Other Grammar","Operand"," summand [ { || summand } [...] ] "," -A value or a concatenation of values." -"Other Grammar","Order"," -{ int | expression } [ ASC | DESC ] [ NULLS { FIRST | LAST } ] -"," -Sorts the result by the given column number, or by an expression." +Performs the concatenation of character string, binary string, or array values. +In the default mode, the result is NULL if either parameter is NULL. +In compatibility modes result of string concatenation with NULL parameter can be different. +"," +'Hi' || ' Eva' +X'AB' || X'CD' +ARRAY[1, 2] || 3 +1 || ARRAY[2, 3] +ARRAY[1, 2] || ARRAY[3, 4] +" + +"Other Grammar","Override clause"," +OVERRIDING { USER | SYSTEM } VALUE +"," +If OVERRIDING USER VALUE is specified, INSERT statement ignores the provided value for identity column +and generates a new one instead. + +If OVERRIDING SYSTEM VALUE is specified, INSERT statement assigns the provided value to identity column. + +If neither clauses are specified, INSERT statement assigns the provided value to +GENERATED BY DEFAULT AS IDENTITY column, +but throws an exception if value is specified for GENERATED ALWAYS AS IDENTITY column. +"," +OVERRIDING SYSTEM VALUE +OVERRIDING USER VALUE +" + +"Other Grammar","Query"," +select | explicitTable | tableValue +"," +A query, such as SELECT, explicit table, or table value. +"," +SELECT ID FROM TEST; +TABLE TEST; +VALUES (1, 2), (3, 4); +" + "Other Grammar","Quoted Name"," ""anythingExceptDoubleQuote"" -"," -Quoted names are case sensitive, and can contain spaces." + | U&""anythingExceptDoubleQuote"" [ UESCAPE 'singleCharacter' ] +"," +Case of characters in quoted names is preserved as is. Such names can contain spaces. +The maximum name length is 256 characters. +Two double quotes can be used to create a single double quote inside an identifier. +With default settings identifiers in H2 are case sensitive. + +Identifiers staring with ""U&"" are Unicode identifiers. +All identifiers in H2 may have Unicode characters, +but Unicode identifiers may contain Unicode escape sequences ""\0000"" or ""\+000000"", +where \ is an escape character, ""0000"" and ""000000"" are Unicode character codes in hexadecimal notation. +Optional ""UESCAPE"" clause may be used to specify another escape character, +with exception for single quote, double quote, plus sign, and hexadecimal digits (0-9, a-f, and A-F). +By default the backslash is used. +Two escape characters can be used to include a single character inside an Unicode identifier. +Two double quotes can be used to create a single double quote inside an Unicode identifier. +"," +""FirstName"" +U&""\00d6ffnungszeit"" +U&""/00d6ffnungszeit"" UESCAPE '/' +" + "Other Grammar","Referential Constraint"," -FOREIGN KEY ( columnName [,...] ) +FOREIGN KEY ( columnName [,...] ) referencesSpecification +"," +Defines a referential constraint. +"," +FOREIGN KEY(ID) REFERENCES TEST(ID) +" + +"Other Grammar","References Specification"," REFERENCES [ refTableName ] [ ( refColumnName [,...] ) ] [ ON DELETE referentialAction ] [ ON UPDATE referentialAction ] "," -Defines a referential constraint." +Defines a referential specification of a referential constraint. +If the table name is not specified, then the same table is referenced. +RESTRICT is the default action. +If the referenced columns are not specified, then the primary key columns are used. +Referential constraint requires an existing unique or primary key constraint on referenced columns, +this constraint must include all referenced columns in any order and must not include any other columns. +Some tables may not be referenced, such as metadata tables. +"," +REFERENCES TEST(ID) +" + "Other Grammar","Referential Action"," CASCADE | RESTRICT | NO ACTION | SET { DEFAULT | NULL } "," -The action CASCADE will cause conflicting rows in the referencing (child) table to be deleted or updated." -"Other Grammar","Script Compression Encryption"," -[ COMPRESSION { DEFLATE | LZF | ZIP | GZIP } ] [ CIPHER cipher PASSWORD string ] +The action CASCADE will cause conflicting rows in the referencing (child) table to be deleted or updated. +RESTRICT is the default action. +As this database does not support deferred checking, RESTRICT and NO ACTION will both throw an exception if the constraint is violated. +The action SET DEFAULT will set the column in the referencing (child) table to the default value, while SET NULL will set it to NULL. "," -The compression and encryption algorithm to use for script files." +CASCADE +SET NULL +" + +"Other Grammar","Script Compression Encryption"," +@h2@ [ COMPRESSION { DEFLATE | LZF | ZIP | GZIP } ] +@h2@ [ CIPHER cipher PASSWORD string ] +"," +The compression and encryption algorithm to use for script files. +When using encryption, only DEFLATE and LZF are supported. +LZF is faster but uses more space. +"," +COMPRESSION LZF +" + +"Other Grammar","Select order"," +{ expression | @c@ { int } } [ ASC | DESC ] [ NULLS { FIRST | LAST } ] +"," +Sorts the result by the given column number, or by an expression. If the +expression is a single parameter, then the value is interpreted as a column +number. Negative column numbers reverse the sort order. +"," +NAME DESC NULLS LAST +" + +"Other Grammar","Row value expression"," +ROW (expression, [,...]) + | ( [ expression, expression [,...] ] ) + | expression +"," +A row value expression. +"," +ROW (1) +(1, 2) +1 +" + "Other Grammar","Select Expression"," -* | expression [ [ AS ] columnAlias ] | tableAlias.* -"," -An expression in a SELECT statement." -"Other Grammar","String"," -'anythingExceptSingleQuote' -"," -A string starts and ends with a single quote." +wildcardExpression | expression [ [ AS ] columnAlias ] +"," +An expression in a SELECT statement. +"," +ID AS DOCUMENT_ID +" + +"Other Grammar","Sequence value expression"," +{ NEXT | @h2@ { CURRENT } } VALUE FOR [schemaName.]sequenceName +"," +The next or current value of a sequence. + +When the next value is requested the sequence is incremented and the current value of the sequence +and the last identity in the current session are updated with the generated value. +The next value of the sequence is generated only once for each processed row. +If this expression is used multiple times with the same sequence it returns the same value within a processed row. +Used values are never re-used, even when the transaction is rolled back. + +Current value may only be requested after generation of the sequence value in the current session. +It returns the latest generated value for the current session. + +If a single command contains next and current value expressions for the same sequence there is no guarantee that +the next value expression will be evaluated before the evaluation of current value expression. +"," +NEXT VALUE FOR SEQ1 +CURRENT VALUE FOR SCHEMA2.SEQ2 +" + +"Other Grammar","Sequence option"," +START WITH long + | @h2@ { RESTART WITH long } + | basicSequenceOption +"," +Option of a sequence. + +START WITH is used to set the initial value of the sequence. +If initial value is not defined, MINVALUE for incrementing sequences and MAXVALUE for decrementing sequences is used. + +RESTART is used to immediately restart the sequence with the specified value. +"," +START WITH 10000 +NO CACHE +" + +"Other Grammar","Alter sequence option"," +@h2@ { START WITH long } + | RESTART [ WITH long ] + | basicSequenceOption +"," +Option of a sequence. + +START WITH is used to change the initial value of the sequence. +It does not affect the current value of the sequence, +it only changes the preserved initial value that is used for simple RESTART without a value. + +RESTART is used to restart the sequence from its initial value or with the specified value. +"," +START WITH 10000 +NO CACHE +" + +"Other Grammar","Alter identity column option"," +@h2@ { START WITH long } + | RESTART [ WITH long ] + | SET basicSequenceOption +"," +Option of an identity column. + +START WITH is used to set or change the initial value of the sequence. +START WITH does not affect the current value of the sequence, +it only changes the preserved initial value that is used for simple RESTART without a value. + +RESTART is used to restart the sequence from its initial value or with the specified value. +"," +START WITH 10000 +SET NO CACHE +" + +"Other Grammar","Basic sequence option"," +INCREMENT BY long + | MINVALUE long | NO MINVALUE | @c@ { NOMINVALUE } + | MAXVALUE long | NO MAXVALUE | @c@ { NOMAXVALUE } + | CYCLE | NO CYCLE | @h2@ { EXHAUSTED } | @c@ { NOCYCLE } + | @h2@ { CACHE long } | @h2@ { NO CACHE } | @c@ { NOCACHE } +"," +Basic option of a sequence. + +INCREMENT BY specifies the step of the sequence, may be positive or negative, but may not be zero. +The default is 1. + +MINVALUE and MAXVALUE specify the bounds of the sequence. + +Sequences with CYCLE option start the generation again from +MINVALUE (incrementing sequences) or MAXVALUE (decrementing sequences) instead of exhausting with an error. +Sequences with EXHAUSTED option can't return values until they will be restarted. + +The CACHE option sets the number of pre-allocated numbers. +If the system crashes without closing the database, at most this many numbers are lost. +The default cache size is 32 if sequence has enough range of values. +NO CACHE option or the cache size 1 or lower disable the cache. +If CACHE option is specified, it cannot be larger than the total number of values +that sequence can produce within a cycle. +"," +MAXVALUE 100000 +CYCLE +NO CACHE +" + +"Other Grammar","Set clause list"," +{ { columnName = { DEFAULT | expression } } + | { ( columnName [,...] ) = { rowValueExpression | (query) } } } [,...] +"," +List of SET clauses. +"," +NAME = 'Test', PRICE = 2 +(A, B) = (1, 2) +(A, B) = (1, 2), C = 3 +(A, B) = (SELECT X, Y FROM OTHER T2 WHERE T1.ID = T2.ID) +" + +"Other Grammar","Sort specification"," +expression [ ASC | DESC ] [ NULLS { FIRST | LAST } ] +"," +Sorts the result by an expression. +"," +X ASC NULLS FIRST +" + +"Other Grammar","Sort specification list"," +sortSpecification [,...] +"," +Sorts the result by expressions. +"," +V +A, B DESC NULLS FIRST +" + "Other Grammar","Summand"," factor [ { { + | - } factor } [...] ] "," -A value or a numeric sum." +A value or a numeric sum. + +Please note the text concatenation operator is ""||"". +"," +ID + 20 +" + "Other Grammar","Table Expression"," -{ [ schemaName. ] tableName | ( select ) | valuesExpression } [ [ AS ] newTableAlias ] +{ [ schemaName. ] tableName + | ( query ) + | unnest + | table + | dataChangeDeltaTable } +[ [ AS ] newTableAlias [ ( columnName [,...] ) ] ] +@h2@ [ USE INDEX ([ indexName [,...] ]) ] [ { { LEFT | RIGHT } [ OUTER ] | [ INNER ] | CROSS | NATURAL } - JOIN tableExpression [ ON expression ] ] -"," -Joins a table." -"Other Grammar","Values Expression"," -VALUES { ( expression [,...] ) } [,...] -"," -A list of rows that can be used like a table." + JOIN tableExpression [ joinSpecification ] ] +"," +Joins a table. The join specification is not supported for cross and natural joins. +A natural join is an inner join, where the condition is automatically on the +columns with the same name. +"," +TEST1 AS T1 LEFT JOIN TEST2 AS T2 ON T1.ID = T2.PARENT_ID +" + +"Other Grammar","Within group specification"," +WITHIN GROUP (ORDER BY sortSpecificationList) +"," +Group specification for ordered set functions. +"," +WITHIN GROUP (ORDER BY ID DESC) +" + +"Other Grammar","Wildcard expression"," +[[schemaName.]tableAlias.]* +@h2@ [EXCEPT ([[schemaName.]tableAlias.]columnName, [,...])] +"," +A wildcard expression in a SELECT statement. +A wildcard expression represents all visible columns. Some columns can be excluded with optional EXCEPT clause. +"," +* +* EXCEPT (DATA) +" + +"Other Grammar","Window name or specification"," +windowName | windowSpecification +"," +A window name or inline specification for a window function or aggregate. + +Window functions in H2 may require a lot of memory for large queries. +"," +W1 +(ORDER BY ID) +" + +"Other Grammar","Window specification"," +([existingWindowName] +[PARTITION BY expression [,...]] [ORDER BY sortSpecificationList] +[windowFrame]) +"," +A window specification for a window, window function or aggregate. + +If name of an existing window is specified its clauses are used by default. + +Optional window partition clause separates rows into independent partitions. +Each partition is processed separately. +If this clause is not present there is one implicit partition with all rows. + +Optional window order clause specifies order of rows in the partition. +If some rows have the same order position they are considered as a group of rows in optional window frame clause. + +Optional window frame clause specifies which rows are processed by a window function, +see its documentation for a more details. +"," +() +(W1 ORDER BY ID) +(PARTITION BY CATEGORY) +(PARTITION BY CATEGORY ORDER BY NAME, ID) +(ORDER BY Y RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE TIES) +" + +"Other Grammar","Window frame"," +ROWS|RANGE|GROUP +{windowFramePreceding|BETWEEN windowFrameBound AND windowFrameBound} +[EXCLUDE {CURRENT ROW|GROUP|TIES|NO OTHERS}] +"," +A window frame clause. +May be specified only for aggregates and FIRST_VALUE(), LAST_VALUE(), and NTH_VALUE() window functions. + +If this clause is not specified for an aggregate or window function that supports this clause +the default window frame depends on window order clause. +If window order clause is also not specified +the default window frame contains all the rows in the partition. +If window order clause is specified +the default window frame contains all preceding rows and all rows from the current group. + +Window frame unit determines how rows or groups of rows are selected and counted. +If ROWS is specified rows are not grouped in any way and relative numbers of rows are used in bounds. +If RANGE is specified rows are grouped according window order clause, +preceding and following values mean the difference between value in the current row and in the target rows, +and CURRENT ROW in bound specification means current group of rows. +If GROUPS is specified rows are grouped according window order clause, +preceding and following values means relative number of groups of rows, +and CURRENT ROW in bound specification means current group of rows. + +If only window frame preceding clause is specified it is treated as +BETWEEN windowFramePreceding AND CURRENT ROW. + +Optional window frame exclusion clause specifies rows that should be excluded from the frame. +EXCLUDE CURRENT ROW excludes only the current row regardless the window frame unit. +EXCLUDE GROUP excludes the whole current group of rows, including the current row. +EXCLUDE TIES excludes the current group of rows, but not the current row. +EXCLUDE NO OTHERS is default and it does not exclude anything. +"," +ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE TIES +" + +"Other Grammar","Window frame preceding"," +UNBOUNDED PRECEDING|value PRECEDING|CURRENT ROW +"," +A window frame preceding clause. +If value is specified it should not be negative. +"," +UNBOUNDED PRECEDING +1 PRECEDING +CURRENT ROW +" + +"Other Grammar","Window frame bound"," +UNBOUNDED PRECEDING|value PRECEDING|CURRENT ROW + |value FOLLOWING|UNBOUNDED FOLLOWING +"," +A window frame bound clause. +If value is specified it should not be negative. +"," +UNBOUNDED PRECEDING +UNBOUNDED FOLLOWING +1 FOLLOWING +CURRENT ROW +" + "Other Grammar","Term"," -value - | columnName +{ value + | column | ?[ int ] - | NEXT VALUE FOR sequenceName + | sequenceValueExpression | function | { - | + } term | ( expression ) - | select - | case - | caseWhen - | tableAlias.columnName -"," -A value." -"Other Grammar","Time"," -TIME 'hh:mm:ss' -"," -A time literal." -"Other Grammar","Timestamp"," -TIMESTAMP 'yyyy-MM-dd hh:mm:ss[.nnnnnnnnn]' -"," -A timestamp literal." -"Other Grammar","Value"," -string | dollarQuotedString | numeric | date | time | timestamp | boolean | bytes | array | null -"," -A literal value of any data type, or null." -"Data Types","INT Type"," -INT | INTEGER | MEDIUMINT | INT4 | SIGNED -"," -Possible values: -2147483648 to 2147483647." + | arrayElementReference + | fieldReference + | query + | caseExpression + | castSpecification + | userDefinedFunctionName } +[ timeZone | intervalQualifier ] +"," +A value. Parameters can be indexed, for example ""?1"" meaning the first parameter. + +Interval qualifier may only be specified for a compatible value +or for a subtraction operation between two datetime values. +The subtraction operation ignores the leading field precision of the qualifier. +"," +'Hello' + +" + +"Other Grammar","Time zone"," +AT { TIME ZONE { intervalHourToMinute | intervalHourToSecond | @h2@ { string } } | LOCAL } +"," +A time zone. Converts the timestamp with or without time zone into timestamp with time zone at specified time zone. +If a day-time interval is specified as a time zone, +it may not have fractional seconds and must be between -18 to 18 hours inclusive. +"," +AT LOCAL +AT TIME ZONE '2' +AT TIME ZONE '-6:00' +AT TIME ZONE INTERVAL '10:00' HOUR TO MINUTE +AT TIME ZONE INTERVAL '10:00:00' HOUR TO SECOND +AT TIME ZONE 'UTC' +AT TIME ZONE 'Europe/London' +" + +"Other Grammar","Column"," +[[schemaName.]tableAlias.] { columnName | @h2@ { _ROWID_ } } +"," +A column name with optional table alias and schema. +_ROWID_ can be used to access unique row identifier. +"," +ID +" + +"Data Types","CHARACTER Type"," +{ CHARACTER | CHAR | NATIONAL { CHARACTER | CHAR } | NCHAR } +[ ( lengthInt [CHARACTERS|OCTETS] ) ] +"," +A Unicode String of fixed length. + +Length, if any, should be specified in characters, CHARACTERS and OCTETS units have no effect in H2. +The allowed length is from 1 to 1048576 characters. +If length is not specified, 1 character is used by default. + +The whole text is kept in memory when using this data type. +For variable-length strings use [CHARACTER VARYING](https://h2database.com/html/datatypes.html#character_varying_type) +data type instead. +For large text data [CHARACTER LARGE OBJECT](https://h2database.com/html/datatypes.html#character_large_object_type) +should be used; see there for details. + +Too short strings are right-padded with space characters. +Too long strings are truncated by CAST specification and rejected by column assignment. + +Two CHARACTER strings of different length are considered as equal if all additional characters in the longer string +are space characters. + +See also [string](https://h2database.com/html/grammar.html#string) literal grammar. +Mapped to ""java.lang.String"". +"," +CHARACTER +CHAR(10) +" + +"Data Types","CHARACTER VARYING Type"," +{ { CHARACTER | CHAR } VARYING + | VARCHAR + | { NATIONAL { CHARACTER | CHAR } | NCHAR } VARYING + | @c@ { LONGVARCHAR | VARCHAR2 | NVARCHAR | NVARCHAR2 } + | @h2@ { VARCHAR_CASESENSITIVE } } +[ ( lengthInt [CHARACTERS|OCTETS] ) ] +"," +A Unicode String. +Use two single quotes ('') to create a quote. + +The allowed length is from 1 to 1048576 characters. +The length is a size constraint; only the actual data is persisted. +Length, if any, should be specified in characters, CHARACTERS and OCTETS units have no effect in H2. + +The whole text is loaded into memory when using this data type. +For large text data [CHARACTER LARGE OBJECT](https://h2database.com/html/datatypes.html#character_large_object_type) +should be used; see there for details. + +See also [string](https://h2database.com/html/grammar.html#string) literal grammar. +Mapped to ""java.lang.String"". +"," +CHARACTER VARYING(100) +VARCHAR(255) +" + +"Data Types","CHARACTER LARGE OBJECT Type"," +{ { CHARACTER | CHAR } LARGE OBJECT | CLOB + | { NATIONAL CHARACTER | NCHAR } LARGE OBJECT | NCLOB + | @c@ { TINYTEXT | TEXT | MEDIUMTEXT | LONGTEXT | NTEXT } } +[ ( lengthLong [K|M|G|T|P] [CHARACTERS|OCTETS]) ] +"," +CHARACTER LARGE OBJECT is intended for very large Unicode character string values. +Unlike when using [CHARACTER VARYING](https://h2database.com/html/datatypes.html#character_varying_type), +large CHARACTER LARGE OBJECT values are not kept fully in-memory; instead, they are streamed. +CHARACTER LARGE OBJECT should be used for documents and texts with arbitrary size such as XML or +HTML documents, text files, or memo fields of unlimited size. +Use ""PreparedStatement.setCharacterStream"" to store values. +See also [Large Objects](https://h2database.com/html/advanced.html#large_objects) section. + +CHARACTER VARYING should be used for text with relatively short average size (for example +shorter than 200 characters). Short CHARACTER LARGE OBJECT values are stored inline, but there is +an overhead compared to CHARACTER VARYING. + +Length, if any, should be specified in characters, CHARACTERS and OCTETS units have no effect in H2. + +Mapped to ""java.sql.Clob"" (""java.io.Reader"" is also supported). +"," +CHARACTER LARGE OBJECT +CLOB(10K) +" + +"Data Types","VARCHAR_IGNORECASE Type"," +@h2@ VARCHAR_IGNORECASE +[ ( lengthInt [CHARACTERS|OCTETS] ) ] +"," +Same as VARCHAR, but not case sensitive when comparing. +Stored in mixed case. + +The allowed length is from 1 to 1048576 characters. +The length is a size constraint; only the actual data is persisted. +Length, if any, should be specified in characters, CHARACTERS and OCTETS units have no effect in H2. + +The whole text is loaded into memory when using this data type. +For large text data CLOB should be used; see there for details. + +See also [string](https://h2database.com/html/grammar.html#string) literal grammar. +Mapped to ""java.lang.String"". +"," +VARCHAR_IGNORECASE +" + +"Data Types","BINARY Type"," +BINARY [ ( lengthInt ) ] +"," +Represents a binary string (byte array) of fixed predefined length. + +The allowed length is from 1 to 1048576 bytes. +If length is not specified, 1 byte is used by default. + +The whole binary string is kept in memory when using this data type. +For variable-length binary strings use [BINARY VARYING](https://h2database.com/html/datatypes.html#binary_varying_type) +data type instead. +For large binary data [BINARY LARGE OBJECT](https://h2database.com/html/datatypes.html#binary_large_object_type) +should be used; see there for details. + +Too short binary string are right-padded with zero bytes. +Too long binary strings are truncated by CAST specification and rejected by column assignment. + +Binary strings of different length are considered as not equal to each other. + +See also [bytes](https://h2database.com/html/grammar.html#bytes) literal grammar. +Mapped to byte[]. +"," +BINARY +BINARY(1000) +" + +"Data Types","BINARY VARYING Type"," +{ BINARY VARYING | VARBINARY + | @c@ { LONGVARBINARY | RAW | BYTEA } } +[ ( lengthInt ) ] +"," +Represents a byte array. + +The allowed length is from 1 to 1048576 bytes. +The length is a size constraint; only the actual data is persisted. + +The whole binary string is kept in memory when using this data type. +For large binary data [BINARY LARGE OBJECT](https://h2database.com/html/datatypes.html#binary_large_object_type) +should be used; see there for details. + +See also [bytes](https://h2database.com/html/grammar.html#bytes) literal grammar. +Mapped to byte[]. +"," +BINARY VARYING(100) +VARBINARY(1000) +" + +"Data Types","BINARY LARGE OBJECT Type"," +{ BINARY LARGE OBJECT | BLOB + | @c@ { TINYBLOB | MEDIUMBLOB | LONGBLOB | IMAGE } } +[ ( lengthLong [K|M|G|T|P]) ] +"," +BINARY LARGE OBJECT is intended for very large binary values such as files or images. +Unlike when using [BINARY VARYING](https://h2database.com/html/datatypes.html#binary_varying_type), +large objects are not kept fully in-memory; instead, they are streamed. +Use ""PreparedStatement.setBinaryStream"" to store values. +See also [CHARACTER LARGE OBJECT](https://h2database.com/html/datatypes.html#character_large_object_type) +and [Large Objects](https://h2database.com/html/advanced.html#large_objects) section. + +Mapped to ""java.sql.Blob"" (""java.io.InputStream"" is also supported). +"," +BINARY LARGE OBJECT +BLOB(10K) +" + "Data Types","BOOLEAN Type"," -BOOLEAN | BIT | BOOL +BOOLEAN | @c@ { BIT | BOOL } +"," +Possible values: TRUE, FALSE, and UNKNOWN (NULL). + +See also [boolean](https://h2database.com/html/grammar.html#boolean) literal grammar. +Mapped to ""java.lang.Boolean"". "," -Possible values: TRUE and FALSE." +BOOLEAN +" + "Data Types","TINYINT Type"," -TINYINT +@h2@ TINYINT +"," +Possible values are: -128 to 127. + +See also [integer](https://h2database.com/html/grammar.html#int) literal grammar. + +In JDBC this data type is mapped to ""java.lang.Integer"". +""java.lang.Byte"" is also supported. + +In ""org.h2.api.Aggregate"", ""org.h2.api.AggregateFunction"", and ""org.h2.api.Trigger"" +this data type is mapped to ""java.lang.Byte"". + "," -Possible values are: -128 to 127." +TINYINT +" + "Data Types","SMALLINT Type"," -SMALLINT | INT2 | YEAR -"," -Possible values: -32768 to 32767." +SMALLINT | @c@ { INT2 } +"," +Possible values: -32768 to 32767. + +See also [integer](https://h2database.com/html/grammar.html#int) literal grammar. + +In JDBC this data type is mapped to ""java.lang.Integer"". +""java.lang.Short"" is also supported. + +In ""org.h2.api.Aggregate"", ""org.h2.api.AggregateFunction"", and ""org.h2.api.Trigger"" +this data type is mapped to ""java.lang.Short"". +"," +SMALLINT +" + +"Data Types","INTEGER Type"," +INTEGER | INT | @c@ { MEDIUMINT | INT4 | SIGNED } +"," +Possible values: -2147483648 to 2147483647. + +See also [integer](https://h2database.com/html/grammar.html#int) literal grammar. +Mapped to ""java.lang.Integer"". +"," +INTEGER +INT +" + "Data Types","BIGINT Type"," -BIGINT | INT8 -"," -Possible values: -9223372036854775808 to 9223372036854775807." -"Data Types","IDENTITY Type"," -IDENTITY -"," -Auto-Increment value." -"Data Types","DECIMAL Type"," -{ DECIMAL | NUMBER | DEC | NUMERIC } ( precisionInt [ , scaleInt ] ) -"," -Data type with fixed precision and scale." -"Data Types","DOUBLE Type"," -{ DOUBLE [ PRECISION ] | FLOAT | FLOAT8 } -"," -A floating point number." +BIGINT | @c@ INT8 +"," +Possible values: -9223372036854775808 to 9223372036854775807. + +See also [long](https://h2database.com/html/grammar.html#long) literal grammar. +Mapped to ""java.lang.Long"". +"," +BIGINT +" + +"Data Types","NUMERIC Type"," +{ NUMERIC | DECIMAL | DEC } [ ( precisionInt [ , scaleInt ] ) ] +"," +Data type with fixed decimal precision and scale. +This data type is recommended for storing currency values. + +If precision is specified, it must be from 1 to 100000. +If scale is specified, it must be from 0 to 100000, 0 is default. + +See also [numeric](https://h2database.com/html/grammar.html#numeric) literal grammar. +Mapped to ""java.math.BigDecimal"". +"," +NUMERIC(20, 2) +" + "Data Types","REAL Type"," -{ REAL | FLOAT4 } -"," -A single precision floating point number." -"Data Types","TIME Type"," -TIME -"," -The time data type." +REAL | FLOAT ( precisionInt ) | @c@ { FLOAT4 } +"," +A single precision floating point number. +Should not be used to represent currency values, because of rounding problems. +Precision value for FLOAT type name should be from 1 to 24. + +See also [numeric](https://h2database.com/html/grammar.html#numeric) literal grammar. +Mapped to ""java.lang.Float"". +"," +REAL +" + +"Data Types","DOUBLE PRECISION Type"," +DOUBLE PRECISION | FLOAT [ ( precisionInt ) ] | @c@ { DOUBLE | FLOAT8 } +"," +A double precision floating point number. +Should not be used to represent currency values, because of rounding problems. +If precision value is specified for FLOAT type name, it should be from 25 to 53. + +See also [numeric](https://h2database.com/html/grammar.html#numeric) literal grammar. +Mapped to ""java.lang.Double"". +"," +DOUBLE PRECISION +" + +"Data Types","DECFLOAT Type"," +DECFLOAT [ ( precisionInt ) ] +"," +Decimal floating point number. +This data type is not recommended to represent currency values, because of variable scale. + +If precision is specified, it must be from 1 to 100000. + +See also [numeric](https://h2database.com/html/grammar.html#numeric) literal grammar. +Mapped to ""java.math.BigDecimal"". +There are three special values: 'Infinity', '-Infinity', and 'NaN'. +These special values can't be read or set as ""BigDecimal"" values, +but they can be read or set using ""java.lang.String"", float, or double. +"," +DECFLOAT +DECFLOAT(20) +" + "Data Types","DATE Type"," DATE "," -The date data type." -"Data Types","TIMESTAMP Type"," -{ TIMESTAMP | DATETIME | SMALLDATETIME } +The date data type. The proleptic Gregorian calendar is used. + +See also [date](https://h2database.com/html/grammar.html#date) literal grammar. + +In JDBC this data type is mapped to ""java.sql.Date"", with the time set to ""00:00:00"" +(or to the next possible time if midnight doesn't exist for the given date and time zone due to a daylight saving change). +""java.time.LocalDate"" is also supported and recommended. + +In ""org.h2.api.Aggregate"", ""org.h2.api.AggregateFunction"", and ""org.h2.api.Trigger"" +this data type is mapped to ""java.time.LocalDate"". + +If your time zone had LMT (local mean time) in the past and you use such old dates +(depends on the time zone, usually 100 or more years ago), +don't use ""java.sql.Date"" to read and write them. + +If you deal with very old dates (before 1582-10-15) note that ""java.sql.Date"" uses a mixed Julian/Gregorian calendar, +""java.util.GregorianCalendar"" can be configured to proleptic Gregorian with +""setGregorianChange(new java.util.Date(Long.MIN_VALUE))"" and used to read or write fields of dates. "," -The timestamp data type." -"Data Types","BINARY Type"," -{ BINARY | VARBINARY | LONGVARBINARY | RAW | BYTEA } [ ( precisionInt ) ] -"," -Represents a byte array." -"Data Types","OTHER Type"," -OTHER -"," -This type allows storing serialized Java objects." -"Data Types","VARCHAR Type"," -{ VARCHAR | LONGVARCHAR | VARCHAR2 | NVARCHAR - | NVARCHAR2 | VARCHAR_CASESENSITIVE} [ ( precisionInt ) ] -"," -A Unicode String." -"Data Types","VARCHAR_IGNORECASE Type"," -VARCHAR_IGNORECASE [ ( precisionInt ) ] -"," -Same as VARCHAR, but not case sensitive when comparing." -"Data Types","CHAR Type"," -{ CHAR | CHARACTER | NCHAR } [ ( precisionInt ) ] -"," -A Unicode String." -"Data Types","BLOB Type"," -{ BLOB | TINYBLOB | MEDIUMBLOB | LONGBLOB | IMAGE | OID } [ ( precisionInt ) ] +DATE +" + +"Data Types","TIME Type"," +TIME [ ( precisionInt ) ] [ WITHOUT TIME ZONE ] +"," +The time data type. The format is hh:mm:ss[.nnnnnnnnn]. +If fractional seconds precision is specified it should be from 0 to 9, 0 is default. + +See also [time](https://h2database.com/html/grammar.html#time) literal grammar. + +In JDBC this data type is mapped to ""java.sql.Time"". +""java.time.LocalTime"" is also supported and recommended. + +In ""org.h2.api.Aggregate"", ""org.h2.api.AggregateFunction"", and ""org.h2.api.Trigger"" +this data type is mapped to ""java.time.LocalTime"". + +Use ""java.time.LocalTime"" or ""String"" instead of ""java.sql.Time"" when non-zero precision is needed. +Cast from higher fractional seconds precision to lower fractional seconds precision performs round half up; +if result of rounding is higher than maximum supported value 23:59:59.999999999 the value is rounded down instead. +The CAST operation to TIMESTAMP and TIMESTAMP WITH TIME ZONE data types uses the +[CURRENT_DATE](https://h2database.com/html/functions.html#current_date) for date fields. "," -Like BINARY, but intended for very large values such as files or images." -"Data Types","CLOB Type"," -{ CLOB | TINYTEXT | TEXT | MEDIUMTEXT | LONGTEXT | NTEXT | NCLOB } [ ( precisionInt ) ] +TIME +TIME(9) +" + +"Data Types","TIME WITH TIME ZONE Type"," +TIME [ ( precisionInt ) ] WITH TIME ZONE +"," +The time with time zone data type. +If fractional seconds precision is specified it should be from 0 to 9, 0 is default. + +See also [time with time zone](https://h2database.com/html/grammar.html#time_with_time_zone) literal grammar. +Mapped to ""java.time.OffsetTime"". +Cast from higher fractional seconds precision to lower fractional seconds precision performs round half up; +if result of rounding is higher than maximum supported value 23:59:59.999999999 the value is rounded down instead. +The CAST operation to TIMESTAMP and TIMESTAMP WITH TIME ZONE data types uses the +[CURRENT_DATE](https://h2database.com/html/functions.html#current_date) for date fields. +"," +TIME WITH TIME ZONE +TIME(9) WITH TIME ZONE +" + +"Data Types","TIMESTAMP Type"," +TIMESTAMP [ ( precisionInt ) ] [ WITHOUT TIME ZONE ] + | @c@ { DATETIME [ ( precisionInt ) ] | SMALLDATETIME } +"," +The timestamp data type. The proleptic Gregorian calendar is used. +If fractional seconds precision is specified it should be from 0 to 9, 6 is default. +Fractional seconds precision of SMALLDATETIME is always 0 and cannot be specified. + +This data type holds the local date and time without time zone information. +It cannot distinguish timestamps near transitions from DST to normal time. +For absolute timestamps use the [TIMESTAMP WITH TIME ZONE](https://h2database.com/html/datatypes.html#timestamp_with_time_zone_type) data type instead. + +See also [timestamp](https://h2database.com/html/grammar.html#timestamp) literal grammar. + +In JDBC this data type is mapped to ""java.sql.Timestamp"" (""java.util.Date"" may be used too). +""java.time.LocalDateTime"" is also supported and recommended. + +In ""org.h2.api.Aggregate"", ""org.h2.api.AggregateFunction"", and ""org.h2.api.Trigger"" +this data type is mapped to ""java.time.LocalDateTime"". + +If your time zone had LMT (local mean time) in the past and you use such old dates +(depends on the time zone, usually 100 or more years ago), +don't use ""java.sql.Timestamp"" and ""java.util.Date"" to read and write them. + +If you deal with very old dates (before 1582-10-15) note that ""java.sql.Timestamp"" and ""java.util.Date"" +use a mixed Julian/Gregorian calendar, ""java.util.GregorianCalendar"" can be configured to proleptic Gregorian with +""setGregorianChange(new java.util.Date(Long.MIN_VALUE))"" and used to read or write fields of timestamps. + +Cast from higher fractional seconds precision to lower fractional seconds precision performs round half up. +"," +TIMESTAMP +TIMESTAMP(9) +" + +"Data Types","TIMESTAMP WITH TIME ZONE Type"," +TIMESTAMP [ ( precisionInt ) ] WITH TIME ZONE +"," +The timestamp with time zone data type. The proleptic Gregorian calendar is used. +If fractional seconds precision is specified it should be from 0 to 9, 6 is default. + +See also [timestamp with time zone](https://h2database.com/html/grammar.html#timestamp_with_time_zone) literal grammar. +Mapped to ""java.time.OffsetDateTime"". +""java.time.ZonedDateTime"" and ""java.time.Instant"" are also supported. + +Values of this data type are compared by UTC values. It means that ""2010-01-01 10:00:00+01"" is greater than ""2010-01-01 11:00:00+03"". + +Conversion to ""TIMESTAMP"" uses time zone offset to get UTC time and converts it to local time using the system time zone. +Conversion from ""TIMESTAMP"" does the same operations in reverse and sets time zone offset to offset of the system time zone. +Cast from higher fractional seconds precision to lower fractional seconds precision performs round half up. +"," +TIMESTAMP WITH TIME ZONE +TIMESTAMP(9) WITH TIME ZONE +" + +"Data Types","INTERVAL Type"," +intervalYearType | intervalMonthType | intervalDayType + | intervalHourType| intervalMinuteType | intervalSecondType + | intervalYearToMonthType | intervalDayToHourType + | intervalDayToMinuteType | intervalDayToSecondType + | intervalHourToMinuteType | intervalHourToSecondType + | intervalMinuteToSecondType +"," +Interval data type. +There are two classes of intervals. Year-month intervals can store years and months. +Day-time intervals can store days, hours, minutes, and seconds. +Year-month intervals are comparable only with another year-month intervals. +Day-time intervals are comparable only with another day-time intervals. + +Mapped to ""org.h2.api.Interval"". +"," +INTERVAL DAY TO SECOND +" + +"Data Types","JAVA_OBJECT Type"," +@h2@ { JAVA_OBJECT | OBJECT | OTHER } [ ( lengthInt ) ] +"," +This type allows storing serialized Java objects. Internally, a byte array with serialized form is used. +The allowed length is from 1 (useful only with custom serializer) to 1048576 bytes. +The length is a size constraint; only the actual data is persisted. + +Serialization and deserialization is done on the client side only with two exclusions described below. +Deserialization is only done when ""getObject"" is called. +Java operations cannot be executed inside the database engine for security reasons. +Use ""PreparedStatement.setObject"" with ""Types.JAVA_OBJECT"" or ""H2Type.JAVA_OBJECT"" +as a third argument to store values. + +If Java method alias has ""Object"" parameter(s), values are deserialized during invocation of this method +on the server side. + +If a [linked table](https://h2database.com/html/advanced.html#linked_tables) has a column with ""Types.JAVA_OBJECT"" +JDBC data type and its database is not an another H2, Java objects need to be serialized and deserialized during +interaction between H2 and database that owns the table on the server side of H2. + +This data type needs special attention in secure environments. + +Mapped to ""java.lang.Object"" (or any subclass). +"," +JAVA_OBJECT +JAVA_OBJECT(10000) +" + +"Data Types","ENUM Type"," +@h2@ ENUM (string [, ... ]) +"," +A type with enumerated values. +Mapped to ""java.lang.String"". + +Duplicate and empty values are not permitted. +The maximum allowed length of value is 1048576 characters. +The maximum number of values is 65536. +"," +ENUM('clubs', 'diamonds', 'hearts', 'spades') +" + +"Data Types","GEOMETRY Type"," +@h2@ GEOMETRY + [({ GEOMETRY | + { POINT + | LINESTRING + | POLYGON + | MULTIPOINT + | MULTILINESTRING + | MULTIPOLYGON + | GEOMETRYCOLLECTION } [Z|M|ZM]} + [, sridInt] )] +"," +A spatial geometry type. +If additional constraints are not specified this type accepts all supported types of geometries. +A constraint with required geometry type and dimension system can be set by specifying name of the type and +dimension system. A whitespace between them is optional. +2D dimension system does not have a name and assumed if only a geometry type name is specified. +POINT means 2D point, POINT Z or POINTZ means 3D point. +GEOMETRY constraint means no restrictions on type or dimension system of geometry. +A constraint with required spatial reference system identifier (SRID) can be set by specifying this identifier. + +Mapped to ""org.locationtech.jts.geom.Geometry"" if JTS library is in classpath and to ""java.lang.String"" otherwise. +May be represented in textual format using the WKT (well-known text) or EWKT (extended well-known text) format. +Values are stored internally in EWKB (extended well-known binary) format, the maximum allowed length is 1048576 bytes. +Only a subset of EWKB and EWKT features is supported. +Supported objects are POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, and GEOMETRYCOLLECTION. +Supported dimension systems are 2D (XY), Z (XYZ), M (XYM), and ZM (XYZM). +SRID (spatial reference system identifier) is supported. + +Use a quoted string containing a WKT/EWKT formatted string or ""PreparedStatement.setObject()"" to store values, +and ""ResultSet.getObject(..)"" or ""ResultSet.getString(..)"" to retrieve the values. "," -CLOB is like VARCHAR, but intended for very large values." +GEOMETRY +GEOMETRY(POINT) +GEOMETRY(POINT Z) +GEOMETRY(POINT Z, 4326) +GEOMETRY(GEOMETRY, 4326) +" + +"Data Types","JSON Type"," +@h2@ JSON [(lengthInt)] +"," +A RFC 8259-compliant JSON text. + +See also [json](https://h2database.com/html/grammar.html#json) literal grammar. +Mapped to ""byte[]"". +The allowed length is from 1 to 1048576 bytes. +The length is a size constraint; only the actual data is persisted. + +To set a JSON value with ""java.lang.String"" in a PreparedStatement use a ""FORMAT JSON"" data format +(""INSERT INTO TEST(ID, DATA) VALUES (?, ? FORMAT JSON)""). +Without the data format VARCHAR values are converted to a JSON string values. + +Order of object members is preserved as is. +Duplicate object member names are allowed. +"," +JSON +" + "Data Types","UUID Type"," -UUID +@h2@ UUID +"," +Universally unique identifier. This is a 128 bit value. +To store values, use ""PreparedStatement.setBytes"", +""setString"", or ""setObject(uuid)"" (where ""uuid"" is a ""java.util.UUID""). +""ResultSet.getObject"" will return a ""java.util.UUID"". + +Please note that using an index on randomly generated data will +result on poor performance once there are millions of rows in a table. +The reason is that the cache behavior is very bad with randomly distributed data. +This is a problem for any database system. + +For details, see the documentation of ""java.util.UUID"". "," -Universally unique identifier." +UUID +" + "Data Types","ARRAY Type"," -ARRAY -"," -An array of values." -"Data Types","GEOMETRY Type"," -GEOMETRY -"," -A spatial geometry type, based on the ""com." -"Functions (Aggregate)","AVG"," -AVG ( [ DISTINCT ] { numeric } ) -"," -The average (mean) value." -"Functions (Aggregate)","BIT_AND"," -BIT_AND(expression) -"," -The bitwise AND of all non-null values." -"Functions (Aggregate)","BIT_OR"," -BIT_OR(expression) -"," -The bitwise OR of all non-null values." -"Functions (Aggregate)","BOOL_AND"," -BOOL_AND(boolean) -"," -Returns true if all expressions are true." -"Functions (Aggregate)","BOOL_OR"," -BOOL_OR(boolean) -"," -Returns true if any expression is true." -"Functions (Aggregate)","COUNT"," -COUNT( { * | { [ DISTINCT ] expression } } ) -"," -The count of all row, or of the non-null values." -"Functions (Aggregate)","GROUP_CONCAT"," -GROUP_CONCAT ( [ DISTINCT ] string -[ ORDER BY { expression [ ASC | DESC ] } [,...] ] -[ SEPARATOR expression ] ) -"," -Concatenates strings with a separator." -"Functions (Aggregate)","MAX"," -MAX(value) -"," -The highest value." -"Functions (Aggregate)","MIN"," -MIN(value) -"," -The lowest value." -"Functions (Aggregate)","SUM"," -SUM( [ DISTINCT ] { numeric } ) -"," -The sum of all values." -"Functions (Aggregate)","SELECTIVITY"," -SELECTIVITY(value) -"," -Estimates the selectivity (0-100) of a value." -"Functions (Aggregate)","STDDEV_POP"," -STDDEV_POP( [ DISTINCT ] numeric ) -"," -The population standard deviation." -"Functions (Aggregate)","STDDEV_SAMP"," -STDDEV_SAMP( [ DISTINCT ] numeric ) -"," -The sample standard deviation." -"Functions (Aggregate)","VAR_POP"," -VAR_POP( [ DISTINCT ] numeric ) -"," -The population variance (square of the population standard deviation)." -"Functions (Aggregate)","VAR_SAMP"," -VAR_SAMP( [ DISTINCT ] numeric ) -"," -The sample variance (square of the sample standard deviation)." +baseDataType ARRAY [ '[' maximumCardinalityInt ']' ] +"," +A data type for array of values. +Base data type specifies the data type of elements. +Array may have NULL elements. +Maximum cardinality, if any, specifies maximum allowed number of elements in the array. +The allowed cardinality is from 0 to 65536 elements. + +See also [array](https://h2database.com/html/grammar.html#array) literal grammar. +Mapped to ""java.lang.Object[]"" (arrays of any non-primitive type are also supported). + +Use ""PreparedStatement.setArray(..)"" or ""PreparedStatement.setObject(.., new Object[] {..})"" to store values, +and ""ResultSet.getObject(..)"" or ""ResultSet.getArray(..)"" to retrieve the values. +"," +BOOLEAN ARRAY +VARCHAR(100) ARRAY +INTEGER ARRAY[10] +" + +"Data Types","ROW Type"," +ROW (fieldName dataType [,...]) +"," +A row value data type. This data type should not be normally used as data type of a column. + +See also [row value expression](https://h2database.com/html/grammar.html#row_value_expression) grammar. +Mapped to ""java.sql.ResultSet"". +"," +ROW(A INT, B VARCHAR(10)) +" + +"Interval Data Types","INTERVAL YEAR Type"," +INTERVAL YEAR [ ( precisionInt ) ] +"," +Interval data type. +If precision is specified it should be from 1 to 18, 2 is default. + +See also [year interval](https://h2database.com/html/grammar.html#interval_year) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Period"" is also supported. +"," +INTERVAL YEAR +" + +"Interval Data Types","INTERVAL MONTH Type"," +INTERVAL MONTH [ ( precisionInt ) ] +"," +Interval data type. +If precision is specified it should be from 1 to 18, 2 is default. + +See also [month interval](https://h2database.com/html/grammar.html#interval_month) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Period"" is also supported. +"," +INTERVAL MONTH +" + +"Interval Data Types","INTERVAL DAY Type"," +INTERVAL DAY [ ( precisionInt ) ] +"," +Interval data type. +If precision is specified it should be from 1 to 18, 2 is default. + +See also [day interval](https://h2database.com/html/grammar.html#interval_day) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Duration"" is also supported. +"," +INTERVAL DAY +" + +"Interval Data Types","INTERVAL HOUR Type"," +INTERVAL HOUR [ ( precisionInt ) ] +"," +Interval data type. +If precision is specified it should be from 1 to 18, 2 is default. + +See also [hour interval](https://h2database.com/html/grammar.html#interval_hour) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Duration"" is also supported. +"," +INTERVAL HOUR +" + +"Interval Data Types","INTERVAL MINUTE Type"," +INTERVAL MINUTE [ ( precisionInt ) ] +"," +Interval data type. +If precision is specified it should be from 1 to 18, 2 is default. + +See also [minute interval](https://h2database.com/html/grammar.html#interval_minute) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Duration"" is also supported. +"," +INTERVAL MINUTE +" + +"Interval Data Types","INTERVAL SECOND Type"," +INTERVAL SECOND [ ( precisionInt [, fractionalPrecisionInt ] ) ] +"," +Interval data type. +If precision is specified it should be from 1 to 18, 2 is default. +If fractional seconds precision is specified it should be from 0 to 9, 6 is default. + +See also [second interval](https://h2database.com/html/grammar.html#interval_second) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Duration"" is also supported. +"," +INTERVAL SECOND +" + +"Interval Data Types","INTERVAL YEAR TO MONTH Type"," +INTERVAL YEAR [ ( precisionInt ) ] TO MONTH +"," +Interval data type. +If leading field precision is specified it should be from 1 to 18, 2 is default. + +See also [year to month interval](https://h2database.com/html/grammar.html#interval_year_to_month) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Period"" is also supported. +"," +INTERVAL YEAR TO MONTH +" + +"Interval Data Types","INTERVAL DAY TO HOUR Type"," +INTERVAL DAY [ ( precisionInt ) ] TO HOUR +"," +Interval data type. +If leading field precision is specified it should be from 1 to 18, 2 is default. + +See also [day to hour interval](https://h2database.com/html/grammar.html#interval_day_to_hour) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Duration"" is also supported. +"," +INTERVAL DAY TO HOUR +" + +"Interval Data Types","INTERVAL DAY TO MINUTE Type"," +INTERVAL DAY [ ( precisionInt ) ] TO MINUTE +"," +Interval data type. +If leading field precision is specified it should be from 1 to 18, 2 is default. + +See also [day to minute interval](https://h2database.com/html/grammar.html#interval_day_to_minute) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Duration"" is also supported. +"," +INTERVAL DAY TO MINUTE +" + +"Interval Data Types","INTERVAL DAY TO SECOND Type"," +INTERVAL DAY [ ( precisionInt ) ] TO SECOND [ ( fractionalPrecisionInt ) ] +"," +Interval data type. +If leading field precision is specified it should be from 1 to 18, 2 is default. +If fractional seconds precision is specified it should be from 0 to 9, 6 is default. + +See also [day to second interval](https://h2database.com/html/grammar.html#interval_day_to_second) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Duration"" is also supported. +"," +INTERVAL DAY TO SECOND +" + +"Interval Data Types","INTERVAL HOUR TO MINUTE Type"," +INTERVAL HOUR [ ( precisionInt ) ] TO MINUTE +"," +Interval data type. +If leading field precision is specified it should be from 1 to 18, 2 is default. + +See also [hour to minute interval](https://h2database.com/html/grammar.html#interval_hour_to_minute) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Duration"" is also supported. +"," +INTERVAL HOUR TO MINUTE +" + +"Interval Data Types","INTERVAL HOUR TO SECOND Type"," +INTERVAL HOUR [ ( precisionInt ) ] TO SECOND [ ( fractionalPrecisionInt ) ] +"," +Interval data type. +If leading field precision is specified it should be from 1 to 18, 2 is default. +If fractional seconds precision is specified it should be from 0 to 9, 6 is default. + +See also [hour to second interval](https://h2database.com/html/grammar.html#interval_hour_to_second) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Duration"" is also supported. +"," +INTERVAL HOUR TO SECOND +" + +"Interval Data Types","INTERVAL MINUTE TO SECOND Type"," +INTERVAL MINUTE [ ( precisionInt ) ] TO SECOND [ ( fractionalPrecisionInt ) ] +"," +Interval data type. +If leading field precision is specified it should be from 1 to 18, 2 is default. +If fractional seconds precision is specified it should be from 0 to 9, 6 is default. + +See also [minute to second interval](https://h2database.com/html/grammar.html#interval_minute_to_second) literal grammar. +Mapped to ""org.h2.api.Interval"". +""java.time.Duration"" is also supported. +"," +INTERVAL MINUTE TO SECOND +" + "Functions (Numeric)","ABS"," -ABS ( { numeric } ) -"," -See also Java ""Math." +ABS( { numeric | interval } ) +"," +Returns the absolute value of a specified value. +The returned value is of the same data type as the parameter. + +Note that TINYINT, SMALLINT, INT, and BIGINT data types cannot represent absolute values +of their minimum negative values, because they have more negative values than positive. +For example, for INT data type allowed values are from -2147483648 to 2147483647. +ABS(-2147483648) should be 2147483648, but this value is not allowed for this data type. +It leads to an exception. +To avoid it cast argument of this function to a higher data type. +"," +ABS(I) +ABS(CAST(I AS BIGINT)) +" + "Functions (Numeric)","ACOS"," ACOS(numeric) "," -Calculate the arc cosine." +Calculate the arc cosine. +See also Java ""Math.acos"". +This method returns a double. +"," +ACOS(D) +" + "Functions (Numeric)","ASIN"," ASIN(numeric) "," -Calculate the arc sine." +Calculate the arc sine. +See also Java ""Math.asin"". +This method returns a double. +"," +ASIN(D) +" + "Functions (Numeric)","ATAN"," ATAN(numeric) "," -Calculate the arc tangent." +Calculate the arc tangent. +See also Java ""Math.atan"". +This method returns a double. +"," +ATAN(D) +" + "Functions (Numeric)","COS"," COS(numeric) "," -Calculate the trigonometric cosine." +Calculate the trigonometric cosine. +See also Java ""Math.cos"". +This method returns a double. +"," +COS(ANGLE) +" + "Functions (Numeric)","COSH"," COSH(numeric) "," -Calculate the hyperbolic cosine." +Calculate the hyperbolic cosine. +See also Java ""Math.cosh"". +This method returns a double. +"," +COSH(X) +" + "Functions (Numeric)","COT"," -COT(numeric) +@h2@ COT(numeric) +"," +Calculate the trigonometric cotangent (""1/TAN(ANGLE)""). +See also Java ""Math.*"" functions. +This method returns a double. "," -Calculate the trigonometric cotangent (""1/TAN(ANGLE)"")." +COT(ANGLE) +" + "Functions (Numeric)","SIN"," SIN(numeric) "," -Calculate the trigonometric sine." +Calculate the trigonometric sine. +See also Java ""Math.sin"". +This method returns a double. +"," +SIN(ANGLE) +" + "Functions (Numeric)","SINH"," SINH(numeric) "," -Calculate the hyperbolic sine." +Calculate the hyperbolic sine. +See also Java ""Math.sinh"". +This method returns a double. +"," +SINH(ANGLE) +" + "Functions (Numeric)","TAN"," TAN(numeric) "," -Calculate the trigonometric tangent." +Calculate the trigonometric tangent. +See also Java ""Math.tan"". +This method returns a double. +"," +TAN(ANGLE) +" + "Functions (Numeric)","TANH"," TANH(numeric) "," -Calculate the hyperbolic tangent." +Calculate the hyperbolic tangent. +See also Java ""Math.tanh"". +This method returns a double. +"," +TANH(X) +" + "Functions (Numeric)","ATAN2"," -ATAN2(numeric, numeric) +@h2@ ATAN2(numeric, numeric) "," -Calculate the angle when converting the rectangular coordinates to polar coordinates." +Calculate the angle when converting the rectangular coordinates to polar coordinates. +See also Java ""Math.atan2"". +This method returns a double. +"," +ATAN2(X, Y) +" + "Functions (Numeric)","BITAND"," -BITAND(long, long) +@h2@ BITAND(expression, expression) +"," +The bitwise AND operation. +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +For aggregate function see [BIT_AND_AGG](https://h2database.com/html/functions-aggregate.html#bit_and_agg). "," -The bitwise AND operation." +BITAND(A, B) +" + "Functions (Numeric)","BITOR"," -BITOR(long, long) +@h2@ BITOR(expression, expression) "," -The bitwise OR operation." -"Functions (Numeric)","BITXOR"," -BITXOR(long, long) +The bitwise OR operation. +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +For aggregate function see [BIT_OR_AGG](https://h2database.com/html/functions-aggregate.html#bit_or_agg). "," -The bitwise XOR operation." +BITOR(A, B) +" + +"Functions (Numeric)","BITXOR"," +@h2@ BITXOR(expression, expression) +"," +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +For aggregate function see [BIT_XOR_AGG](https://h2database.com/html/functions-aggregate.html#bit_xor_agg). +"," +The bitwise XOR operation. +"," +BITXOR(A, B) +" + +"Functions (Numeric)","BITNOT"," +@h2@ BITNOT(expression) +"," +The bitwise NOT operation. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. +"," +BITNOT(A) +" + +"Functions (Numeric)","BITNAND"," +@h2@ BITNAND(expression, expression) +"," +The bitwise NAND operation equivalent to ""BITNOT(BITAND(expression, expression))"". +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +For aggregate function see [BIT_NAND_AGG](https://h2database.com/html/functions-aggregate.html#bit_nand_agg). +"," +BITNAND(A, B) +" + +"Functions (Numeric)","BITNOR"," +@h2@ BITNOR(expression, expression) +"," +The bitwise NOR operation equivalent to ""BITNOT(BITOR(expression, expression))"". +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +For aggregate function see [BIT_NOR_AGG](https://h2database.com/html/functions-aggregate.html#bit_nor_agg). +"," +BITNOR(A, B) +" + +"Functions (Numeric)","BITXNOR"," +@h2@ BITXNOR(expression, expression) +"," +The bitwise XNOR operation equivalent to ""BITNOT(BITXOR(expression, expression))"". +Arguments should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +For aggregate function see [BIT_XNOR_AGG](https://h2database.com/html/functions-aggregate.html#bit_xnor_agg). +"," +BITXNOR(A, B) +" + +"Functions (Numeric)","BITGET"," +@h2@ BITGET(expression, long) +"," +Returns true if and only if the first argument has a bit set in the +position specified by the second parameter. +The first argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This method returns a boolean. +The second argument is zero-indexed; the least significant bit has position 0. +"," +BITGET(A, 1) +" + +"Functions (Numeric)","BITCOUNT"," +@h2@ BITCOUNT(expression) +"," +Returns count of set bits in the specified value. +Value should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This method returns a long. +"," +BITCOUNT(A) +" + +"Functions (Numeric)","LSHIFT"," +@h2@ LSHIFT(expression, long) +"," +The bitwise signed left shift operation. +Shifts the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +If number of bits is negative, a signed right shift is performed instead. +For numeric values a sign bit is used for left-padding (with negative offset). +If number of bits is equal to or larger than number of bits in value all bits are pushed out from the value. +For binary string arguments signed and unsigned shifts return the same results. +"," +LSHIFT(A, B) +" + +"Functions (Numeric)","RSHIFT"," +@h2@ RSHIFT(expression, long) +"," +The bitwise signed right shift operation. +Shifts the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +If number of bits is negative, a signed left shift is performed instead. +For numeric values a sign bit is used for left-padding (with positive offset). +If number of bits is equal to or larger than number of bits in value all bits are pushed out from the value. +For binary string arguments signed and unsigned shifts return the same results. +"," +RSHIFT(A, B) +" + +"Functions (Numeric)","ULSHIFT"," +@h2@ ULSHIFT(expression, long) +"," +The bitwise unsigned left shift operation. +Shifts the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +If number of bits is negative, an unsigned right shift is performed instead. +If number of bits is equal to or larger than number of bits in value all bits are pushed out from the value. +For binary string arguments signed and unsigned shifts return the same results. +"," +ULSHIFT(A, B) +" + +"Functions (Numeric)","URSHIFT"," +@h2@ URSHIFT(expression, long) +"," +The bitwise unsigned right shift operation. +Shifts the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. + +If number of bits is negative, an unsigned left shift is performed instead. +If number of bits is equal to or larger than number of bits in value all bits are pushed out from the value. +For binary string arguments signed and unsigned shifts return the same results. +"," +URSHIFT(A, B) +" + +"Functions (Numeric)","ROTATELEFT"," +@h2@ ROTATELEFT(expression, long) +"," +The bitwise left rotation operation. +Rotates the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. +"," +ROTATELEFT(A, B) +" + +"Functions (Numeric)","ROTATERIGHT"," +@h2@ ROTATERIGHT(expression, long) +"," +The bitwise right rotation operation. +Rotates the first argument by the number of bits given by the second argument. +Argument should have TINYINT, SMALLINT, INTEGER, BIGINT, BINARY, or BINARY VARYING data type. +This function returns result of the same data type. +"," +ROTATERIGHT(A, B) +" + "Functions (Numeric)","MOD"," -MOD(long, long) -"," -The modulo operation." -"Functions (Numeric)","CEILING"," -{ CEILING | CEIL } (numeric) -"," -See also Java ""Math." +MOD(dividendNumeric, divisorNumeric) +"," +The modulus expression. + +Result has the same type as divisor. +Result is NULL if either of arguments is NULL. +If divisor is 0, an exception is raised. +Result has the same sign as dividend or is equal to 0. + +Usually arguments should have scale 0, but it isn't required by H2. +"," +MOD(A, B) +" + +"Functions (Numeric)","CEIL"," +{ CEIL | CEILING } (numeric) +"," +Returns the smallest integer value that is greater than or equal to the argument. +This method returns value of the same type as argument, but with scale set to 0 and adjusted precision, if applicable. +"," +CEIL(A) +" + "Functions (Numeric)","DEGREES"," -DEGREES(numeric) +@h2@ DEGREES(numeric) "," -See also Java ""Math." +See also Java ""Math.toDegrees"". +This method returns a double. +"," +DEGREES(A) +" + "Functions (Numeric)","EXP"," EXP(numeric) "," -See also Java ""Math." +See also Java ""Math.exp"". +This method returns a double. +"," +EXP(A) +" + "Functions (Numeric)","FLOOR"," FLOOR(numeric) "," -See also Java ""Math." -"Functions (Numeric)","LOG"," -{ LOG | LN } (numeric) +Returns the largest integer value that is less than or equal to the argument. +This method returns value of the same type as argument, but with scale set to 0 and adjusted precision, if applicable. +"," +FLOOR(A) +" + +"Functions (Numeric)","LN"," +LN(numeric) +"," +Calculates the natural (base e) logarithm as a double value. +Argument must be a positive numeric value. "," -See also Java ""Math." +LN(A) +" + +"Functions (Numeric)","LOG"," +LOG({baseNumeric, numeric | @c@{numeric}}) +"," +Calculates the logarithm with specified base as a double value. +Argument and base must be positive numeric values. +Base cannot be equal to 1. + +The default base is e (natural logarithm), in the PostgreSQL mode the default base is base 10. +In MSSQLServer mode the optional base is specified after the argument. + +Single-argument variant of LOG function is deprecated, use [LN](https://h2database.com/html/functions.html#ln) +or [LOG10](https://h2database.com/html/functions.html#log10) instead. +"," +LOG(2, A) +" + "Functions (Numeric)","LOG10"," LOG10(numeric) "," -See also Java ""Math." +Calculates the base 10 logarithm as a double value. +Argument must be a positive numeric value. +"," +LOG10(A) +" + +"Functions (Numeric)","ORA_HASH"," +@c@ ORA_HASH(expression [, bucketLong [, seedLong]]) +"," +Computes a hash value. +Optional bucket argument determines the maximum returned value. +This argument should be between 0 and 4294967295, default is 4294967295. +Optional seed argument is combined with the given expression to return the different values for the same expression. +This argument should be between 0 and 4294967295, default is 0. +This method returns a long value between 0 and the specified or default bucket value inclusive. +"," +ORA_HASH(A) +" + "Functions (Numeric)","RADIANS"," -RADIANS(numeric) +@h2@ RADIANS(numeric) +"," +See also Java ""Math.toRadians"". +This method returns a double. "," -See also Java ""Math." +RADIANS(A) +" + "Functions (Numeric)","SQRT"," SQRT(numeric) "," -See also Java ""Math." +See also Java ""Math.sqrt"". +This method returns a double. +"," +SQRT(A) +" + "Functions (Numeric)","PI"," -PI() +@h2@ PI() +"," +See also Java ""Math.PI"". +This method returns a double. "," -See also Java ""Math." +PI() +" + "Functions (Numeric)","POWER"," POWER(numeric, numeric) "," -See also Java ""Math." +See also Java ""Math.pow"". +This method returns a double. +"," +POWER(A, B) +" + "Functions (Numeric)","RAND"," -{ RAND | RANDOM } ( [ int ] ) +@h2@ { RAND | RANDOM } ( [ int ] ) +"," +Calling the function without parameter returns the next a pseudo random number. +Calling it with an parameter seeds the session's random number generator. +This method returns a double between 0 (including) and 1 (excluding). "," -Calling the function without parameter returns the next a pseudo random number." +RAND() +" + "Functions (Numeric)","RANDOM_UUID"," -RANDOM_UUID() +@h2@ { RANDOM_UUID | UUID } () "," -Returns a new UUID with 122 pseudo random bits." +Returns a new UUID with 122 pseudo random bits. + +Please note that using an index on randomly generated data will +result on poor performance once there are millions of rows in a table. +The reason is that the cache behavior is very bad with randomly distributed data. +This is a problem for any database system. +"," +RANDOM_UUID() +" + "Functions (Numeric)","ROUND"," -ROUND(numeric [, digitsInt]) +@h2@ ROUND(numeric [, digitsInt]) "," -Rounds to a number of digits, or to the nearest long if the number of digits if not set." -"Functions (Numeric)","ROUNDMAGIC"," -ROUNDMAGIC(numeric) +Rounds to a number of fractional digits. +This method returns value of the same type as argument, but with adjusted precision and scale, if applicable. "," -This function rounds numbers in a good way, but it is slow." +ROUND(N, 2) +" + +"Functions (Numeric)","ROUNDMAGIC"," +@h2@ ROUNDMAGIC(numeric) +"," +This function rounds numbers in a good way, but it is slow. +It has a special handling for numbers around 0. +Only numbers smaller or equal +/-1000000000000 are supported. +The value is converted to a String internally, and then the last 4 characters are checked. +'000x' becomes '0000' and '999x' becomes '999999', which is rounded automatically. +This method returns a double. +"," +ROUNDMAGIC(N/3*3) +" + "Functions (Numeric)","SECURE_RAND"," -SECURE_RAND(int) +@h2@ SECURE_RAND(int) +"," +Generates a number of cryptographically secure random numbers. +This method returns bytes. "," -Generates a number of cryptographically secure random numbers." +CALL SECURE_RAND(16) +" + "Functions (Numeric)","SIGN"," -SIGN ( { numeric } ) +@h2@ SIGN( { numeric | interval } ) +"," +Returns -1 if the value is smaller than 0, 0 if zero or NaN, and otherwise 1. "," -Returns -1 if the value is smaller 0, 0 if zero, and otherwise 1." +SIGN(N) +" + "Functions (Numeric)","ENCRYPT"," -ENCRYPT(algorithmString, keyBytes, dataBytes) +@h2@ ENCRYPT(algorithmString, keyBytes, dataBytes) "," -Encrypts data using a key." -"Functions (Numeric)","DECRYPT"," -DECRYPT(algorithmString, keyBytes, dataBytes) +Encrypts data using a key. +The supported algorithm is AES. +The block size is 16 bytes. +This method returns bytes. "," -Decrypts data using a key." -"Functions (Numeric)","HASH"," -HASH(algorithmString, dataBytes, iterationInt) +CALL ENCRYPT('AES', '00', STRINGTOUTF8('Test')) +" + +"Functions (Numeric)","DECRYPT"," +@h2@ DECRYPT(algorithmString, keyBytes, dataBytes) "," -Calculate the hash value using an algorithm, and repeat this process for a number of iterations." -"Functions (Numeric)","TRUNCATE"," -{ TRUNC | TRUNCATE } ( { {numeric, digitsInt} | timestamp } ) +Decrypts data using a key. +The supported algorithm is AES. +The block size is 16 bytes. +This method returns bytes. "," -Truncates to a number of digits (to the next value closer to 0)." +CALL TRIM(CHAR(0) FROM UTF8TOSTRING( + DECRYPT('AES', '00', '3fabb4de8f1ee2e97d7793bab2db1116'))) +" + +"Functions (Numeric)","HASH"," +@h2@ HASH(algorithmString, expression [, iterationInt]) +"," +Calculate the hash value using an algorithm, and repeat this process for a number of iterations. + +This function supports MD5, SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, SHA3-224, SHA3-256, SHA3-384, and SHA3-512 +algorithms. +SHA-224, SHA-384, and SHA-512 may be unavailable in some JREs. + +MD5 and SHA-1 algorithms should not be considered as secure. + +If this function is used to encrypt a password, a random salt should be concatenated with a password and this salt and +result of the function should be stored to prevent a rainbow table attack and number of iterations should be large +enough to slow down a dictionary or a brute force attack. + +This method returns bytes. +"," +CALL HASH('SHA-256', 'Text', 1000) +CALL HASH('SHA3-256', X'0102') +" + +"Functions (Numeric)","TRUNC"," +@h2@ { TRUNC | TRUNCATE } ( { {numeric [, digitsInt] } + | @c@ { timestamp | timestampWithTimeZone | date | timestampString } } ) +"," +When a numeric argument is specified, truncates it to a number of digits (to the next value closer to 0) +and returns value of the same type as argument, but with adjusted precision and scale, if applicable. + +This function with datetime or string argument is deprecated, use +[DATE_TRUNC](https://h2database.com/html/functions.html#date_trunc) instead. +When used with a timestamp, truncates the timestamp to a date (day) value +and returns a timestamp with or without time zone depending on type of the argument. +When used with a date, returns a timestamp at start of this date. +When used with a timestamp as string, truncates the timestamp to a date (day) value +and returns a timestamp without time zone. +"," +TRUNCATE(N, 2) +" + "Functions (Numeric)","COMPRESS"," -COMPRESS(dataBytes [, algorithmString]) +@h2@ COMPRESS(dataBytes [, algorithmString]) +"," +Compresses the data using the specified compression algorithm. +Supported algorithms are: LZF (faster but lower compression; default), and DEFLATE (higher compression). +Compression does not always reduce size. Very small objects and objects with little redundancy may get larger. +This method returns bytes. "," -Compresses the data using the specified compression algorithm." +COMPRESS(STRINGTOUTF8('Test')) +" + "Functions (Numeric)","EXPAND"," -EXPAND(bytes) +@h2@ EXPAND(bytes) +"," +Expands data that was compressed using the COMPRESS function. +This method returns bytes. "," -Expands data that was compressed using the COMPRESS function." +UTF8TOSTRING(EXPAND(COMPRESS(STRINGTOUTF8('Test')))) +" + "Functions (Numeric)","ZERO"," -ZERO() +@h2@ ZERO() +"," +Returns the value 0. This function can be used even if numeric literals are disabled. "," -Returns the value 0." +ZERO() +" + "Functions (String)","ASCII"," -ASCII(string) +@h2@ ASCII(string) +"," +Returns the ASCII value of the first character in the string. +This method returns an int. "," -Returns the ASCII value of the first character in the string." +ASCII('Hi') +" "Functions (String)","BIT_LENGTH"," -BIT_LENGTH(string) +@h2@ BIT_LENGTH(bytes) +"," +Returns the number of bits in a binary string. +This method returns a long. "," -Returns the number of bits in a string." -"Functions (String)","LENGTH"," -{ LENGTH | CHAR_LENGTH | CHARACTER_LENGTH } ( string ) +BIT_LENGTH(NAME) +" + +"Functions (String)","CHAR_LENGTH"," +{ CHAR_LENGTH | CHARACTER_LENGTH | @c@ { LENGTH } } ( string ) "," -Returns the number of characters in a string." +Returns the number of characters in a character string. +This method returns a long. +"," +CHAR_LENGTH(NAME) +" + "Functions (String)","OCTET_LENGTH"," -OCTET_LENGTH(string) +OCTET_LENGTH(bytes) +"," +Returns the number of bytes in a binary string. +This method returns a long. "," -Returns the number of bytes in a string." +OCTET_LENGTH(NAME) +" + "Functions (String)","CHAR"," -{ CHAR | CHR } ( int ) +@h2@ { CHAR | CHR } ( int ) +"," +Returns the character that represents the ASCII value. +This method returns a string. "," -Returns the character that represents the ASCII value." +CHAR(65) +" + "Functions (String)","CONCAT"," -CONCAT(string, string [,...]) +@h2@ CONCAT(string, string [,...]) "," -Combines strings." -"Functions (String)","CONCAT_WS"," -CONCAT_WS(separatorString, string, string [,...]) +Combines strings. +Unlike with the operator ""||"", NULL parameters are ignored, +and do not cause the result to become NULL. +If all parameters are NULL the result is an empty string. +This method returns a string. "," -Combines strings with separator." +CONCAT(NAME, '!') +" + +"Functions (String)","CONCAT_WS"," +@h2@ CONCAT_WS(separatorString, string, string [,...]) +"," +Combines strings with separator. +If separator is NULL it is treated like an empty string. +Other NULL parameters are ignored. +Remaining non-NULL parameters, if any, are concatenated with the specified separator. +If there are no remaining parameters the result is an empty string. +This method returns a string. +"," +CONCAT_WS(',', NAME, '!') +" + "Functions (String)","DIFFERENCE"," -DIFFERENCE(string, string) -"," -Returns the difference between the sounds of two strings." +@h2@ DIFFERENCE(string, string) +"," +Returns the difference between the sounds of two strings. +The difference is calculated as a number of matched characters +in the same positions in SOUNDEX representations of arguments. +This method returns an int between 0 and 4 inclusive, or null if any of its parameters is null. +Note that value of 0 means that strings are not similar to each other. +Value of 4 means that strings are fully similar to each other (have the same SOUNDEX representation). +"," +DIFFERENCE(T1.NAME, T2.NAME) +" + "Functions (String)","HEXTORAW"," -HEXTORAW(string) +@h2@ HEXTORAW(string) "," -Converts a hex representation of a string to a string." +Converts a hex representation of a string to a string. +4 hex characters per string character are used. +"," +HEXTORAW(DATA) +" + "Functions (String)","RAWTOHEX"," -RAWTOHEX(string) +@h2@ RAWTOHEX({string|bytes}) "," -Converts a string to the hex representation." -"Functions (String)","INSTR"," -INSTR(string, searchString, [, startInt]) +Converts a string or bytes to the hex representation. +4 hex characters per string character are used. +This method returns a string. "," -Returns the location of a search string in a string." +RAWTOHEX(DATA) +" + "Functions (String)","INSERT Function"," -INSERT(originalString, startInt, lengthInt, addString) +@h2@ INSERT(originalString, startInt, lengthInt, addString) +"," +Inserts a additional string into the original string at a specified start position. +The length specifies the number of characters that are removed at the start position in the original string. +This method returns a string. "," -Inserts a additional string into the original string at a specified start position." +INSERT(NAME, 1, 1, ' ') +" + "Functions (String)","LOWER"," -{ LOWER | LCASE } ( string ) +{ LOWER | @c@ { LCASE } } ( string ) +"," +Converts a string to lowercase. "," -Converts a string to lowercase." +LOWER(NAME) +" + "Functions (String)","UPPER"," -{ UPPER | UCASE } ( string ) +{ UPPER | @c@ { UCASE } } ( string ) "," -Converts a string to uppercase." +Converts a string to uppercase. +"," +UPPER(NAME) +" + "Functions (String)","LEFT"," -LEFT(string, int) +@h2@ LEFT(string, int) "," -Returns the leftmost number of characters." -"Functions (String)","RIGHT"," -RIGHT(string, int) +Returns the leftmost number of characters. "," -Returns the rightmost number of characters." -"Functions (String)","LOCATE"," -LOCATE(searchString, string [, startInt]) +LEFT(NAME, 3) +" + +"Functions (String)","RIGHT"," +@h2@ RIGHT(string, int) "," -Returns the location of a search string in a string." -"Functions (String)","POSITION"," -POSITION(searchString, string) +Returns the rightmost number of characters. "," -Returns the location of a search string in a string." +RIGHT(NAME, 3) +" + +"Functions (String)","LOCATE"," +@h2@ { LOCATE(searchString, string [, startInt]) } + | @c@ { INSTR(string, searchString, [, startInt]) } + | @c@ { POSITION(searchString, string) } +"," +Returns the location of a search string in a string. +If a start position is used, the characters before it are ignored. +If position is negative, the rightmost location is returned. +0 is returned if the search string is not found. +Please note this function is case sensitive, even if the parameters are not. +"," +LOCATE('.', NAME) +" + "Functions (String)","LPAD"," -LPAD(string, int[, paddingString]) +@h2@ LPAD(string, int[, paddingString]) +"," +Left pad the string to the specified length. +If the length is shorter than the string, it will be truncated at the end. +If the padding string is not set, spaces will be used. "," -Left pad the string to the specified length." +LPAD(AMOUNT, 10, '*') +" + "Functions (String)","RPAD"," -RPAD(string, int[, paddingString]) +@h2@ RPAD(string, int[, paddingString]) "," -Right pad the string to the specified length." +Right pad the string to the specified length. +If the length is shorter than the string, it will be truncated. +If the padding string is not set, spaces will be used. +"," +RPAD(TEXT, 10, '-') +" + "Functions (String)","LTRIM"," -LTRIM(string) +@c@ LTRIM(string) +"," +Removes all leading spaces from a string. + +This function is deprecated, use [TRIM](https://h2database.com/html/functions.html#trim) instead of it. "," -Removes all leading spaces from a string." +LTRIM(NAME) +" + "Functions (String)","RTRIM"," -RTRIM(string) +@c@ RTRIM(string) +"," +Removes all trailing spaces from a string. + +This function is deprecated, use [TRIM](https://h2database.com/html/functions.html#trim) instead of it. "," -Removes all trailing spaces from a string." +RTRIM(NAME) +" + "Functions (String)","TRIM"," -TRIM ( [ { LEADING | TRAILING | BOTH } [ string ] FROM ] string ) +TRIM ( [ [ LEADING | TRAILING | BOTH ] [ string ] FROM ] string ) "," -Removes all leading spaces, trailing spaces, or spaces at both ends, from a string." -"Functions (String)","REGEXP_REPLACE"," -REGEXP_REPLACE(inputString, regexString, replacementString) +Removes all leading spaces, trailing spaces, or spaces at both ends, from a string. +Other characters can be removed as well. "," -Replaces each substring that matches a regular expression." +TRIM(BOTH '_' FROM NAME) +" + +"Functions (String)","REGEXP_REPLACE"," +@h2@ REGEXP_REPLACE(inputString, regexString, replacementString [, flagsString]) +"," +Replaces each substring that matches a regular expression. +For details, see the Java ""String.replaceAll()"" method. +If any parameter is null (except optional flagsString parameter), the result is null. + +Flags values are limited to 'i', 'c', 'n', 'm'. Other symbols cause exception. +Multiple symbols could be used in one flagsString parameter (like 'im'). +Later flags override first ones, for example 'ic' is equivalent to case sensitive matching 'c'. + +'i' enables case insensitive matching (Pattern.CASE_INSENSITIVE) + +'c' disables case insensitive matching (Pattern.CASE_INSENSITIVE) + +'n' allows the period to match the newline character (Pattern.DOTALL) + +'m' enables multiline mode (Pattern.MULTILINE) + +"," +REGEXP_REPLACE('Hello World', ' +', ' ') +REGEXP_REPLACE('Hello WWWWorld', 'w+', 'W', 'i') +" + +"Functions (String)","REGEXP_LIKE"," +@h2@ REGEXP_LIKE(inputString, regexString [, flagsString]) +"," +Matches string to a regular expression. +For details, see the Java ""Matcher.find()"" method. +If any parameter is null (except optional flagsString parameter), the result is null. + +Flags values are limited to 'i', 'c', 'n', 'm'. Other symbols cause exception. +Multiple symbols could be used in one flagsString parameter (like 'im'). +Later flags override first ones, for example 'ic' is equivalent to case sensitive matching 'c'. + +'i' enables case insensitive matching (Pattern.CASE_INSENSITIVE) + +'c' disables case insensitive matching (Pattern.CASE_INSENSITIVE) + +'n' allows the period to match the newline character (Pattern.DOTALL) + +'m' enables multiline mode (Pattern.MULTILINE) + +"," +REGEXP_LIKE('Hello World', '[A-Z ]*', 'i') +" + +"Functions (String)","REGEXP_SUBSTR"," +@h2@ REGEXP_SUBSTR(inputString, regexString [, positionInt, occurrenceInt, flagsString, groupInt]) +"," +Matches string to a regular expression and returns the matched substring. +For details, see the java.util.regex.Pattern and related functionality. + +The parameter position specifies where in inputString the match should start. Occurrence indicates +which occurrence of pattern in inputString to search for. + +Flags values are limited to 'i', 'c', 'n', 'm'. Other symbols cause exception. +Multiple symbols could be used in one flagsString parameter (like 'im'). +Later flags override first ones, for example 'ic' is equivalent to case sensitive matching 'c'. + +'i' enables case insensitive matching (Pattern.CASE_INSENSITIVE) + +'c' disables case insensitive matching (Pattern.CASE_INSENSITIVE) + +'n' allows the period to match the newline character (Pattern.DOTALL) + +'m' enables multiline mode (Pattern.MULTILINE) + +If the pattern has groups, the group parameter can be used to specify which group to return. + +"," +REGEXP_SUBSTR('2020-10-01', '\d{4}') +REGEXP_SUBSTR('2020-10-01', '(\d{4})-(\d{2})-(\d{2})', 1, 1, NULL, 2) +" + "Functions (String)","REPEAT"," -REPEAT(string, int) +@h2@ REPEAT(string, int) "," -Returns a string repeated some number of times." +Returns a string repeated some number of times. +"," +REPEAT(NAME || ' ', 10) +" + "Functions (String)","REPLACE"," -REPLACE(string, searchString [, replacementString]) +@h2@ REPLACE(string, searchString [, replacementString]) +"," +Replaces all occurrences of a search string in a text with another string. +If no replacement is specified, the search string is removed from the original string. +If any parameter is null, the result is null. "," -Replaces all occurrences of a search string in a text with another string." +REPLACE(NAME, ' ') +" + "Functions (String)","SOUNDEX"," -SOUNDEX(string) +@h2@ SOUNDEX(string) +"," +Returns a four character code representing the sound of a string. +This method returns a string, or null if parameter is null. +See https://en.wikipedia.org/wiki/Soundex for more information. "," -Returns a four character code representing the sound of a string." +SOUNDEX(NAME) +" + "Functions (String)","SPACE"," -SPACE(int) +@h2@ SPACE(int) "," -Returns a string consisting of a number of spaces." +Returns a string consisting of a number of spaces. +"," +SPACE(80) +" + "Functions (String)","STRINGDECODE"," -STRINGDECODE(string) +@h2@ STRINGDECODE(string) +"," +Converts a encoded string using the Java string literal encoding format. +Special characters are \b, \t, \n, \f, \r, \"", \\, \, \u. +This method returns a string. "," -Converts a encoded string using the Java string literal encoding format." +CALL STRINGENCODE(STRINGDECODE('Lines 1\nLine 2')) +" + "Functions (String)","STRINGENCODE"," -STRINGENCODE(string) +@h2@ STRINGENCODE(string) +"," +Encodes special characters in a string using the Java string literal encoding format. +Special characters are \b, \t, \n, \f, \r, \"", \\, \, \u. +This method returns a string. "," -Encodes special characters in a string using the Java string literal encoding format." +CALL STRINGENCODE(STRINGDECODE('Lines 1\nLine 2')) +" + "Functions (String)","STRINGTOUTF8"," -STRINGTOUTF8(string) +@h2@ STRINGTOUTF8(string) "," -Encodes a string to a byte array using the UTF8 encoding format." +Encodes a string to a byte array using the UTF8 encoding format. +This method returns bytes. +"," +CALL UTF8TOSTRING(STRINGTOUTF8('This is a test')) +" + "Functions (String)","SUBSTRING"," -{ SUBSTRING | SUBSTR } ( string, startInt [, lengthInt ] ) +SUBSTRING ( {string|bytes} FROM startInt [ FOR lengthInt ] ) + | @c@ { { SUBSTRING | SUBSTR } ( {string|bytes}, startInt [, lengthInt ] ) } +"," +Returns a substring of a string starting at a position. +If the start index is negative, then the start index is relative to the end of the string. +The length is optional. "," -Returns a substring of a string starting at a position." +CALL SUBSTRING('[Hello]' FROM 2 FOR 5); +CALL SUBSTRING('hour' FROM 2); +" + "Functions (String)","UTF8TOSTRING"," -UTF8TOSTRING(bytes) +@h2@ UTF8TOSTRING(bytes) +"," +Decodes a byte array in the UTF8 format to a string. "," -Decodes a byte array in the UTF8 format to a string." +CALL UTF8TOSTRING(STRINGTOUTF8('This is a test')) +" + +"Functions (String)","QUOTE_IDENT"," +@h2@ QUOTE_IDENT(string) +"," +Quotes the specified identifier. +Identifier is surrounded by double quotes. +If identifier contains double quotes they are repeated twice. +"," +QUOTE_IDENT('Column 1') +" + "Functions (String)","XMLATTR"," -XMLATTR(nameString, valueString) +@h2@ XMLATTR(nameString, valueString) +"," +Creates an XML attribute element of the form ""name=value"". +The value is encoded as XML text. +This method returns a string. "," -Creates an XML attribute element of the form ""name=value""." +CALL XMLNODE('a', XMLATTR('href', 'https://h2database.com')) +" + "Functions (String)","XMLNODE"," -XMLNODE(elementString [, attributesString [, contentString [, indentBoolean]]]) +@h2@ XMLNODE(elementString [, attributesString [, contentString [, indentBoolean]]]) "," -Create an XML node element." +Create an XML node element. +An empty or null attribute string means no attributes are set. +An empty or null content string means the node is empty. +The content is indented by default if it contains a newline. +This method returns a string. +"," +CALL XMLNODE('a', XMLATTR('href', 'https://h2database.com'), 'H2') +" + "Functions (String)","XMLCOMMENT"," -XMLCOMMENT(commentString) +@h2@ XMLCOMMENT(commentString) +"," +Creates an XML comment. +Two dashes (""--"") are converted to ""- -"". +This method returns a string. "," -Creates an XML comment." +CALL XMLCOMMENT('Test') +" + "Functions (String)","XMLCDATA"," -XMLCDATA(valueString) +@h2@ XMLCDATA(valueString) +"," +Creates an XML CDATA element. +If the value contains ""]]>"", an XML text element is created instead. +This method returns a string. "," -Creates an XML CDATA element." +CALL XMLCDATA('data') +" + "Functions (String)","XMLSTARTDOC"," -XMLSTARTDOC() +@h2@ XMLSTARTDOC() "," -Returns the XML declaration." +Returns the XML declaration. +The result is always """". +"," +CALL XMLSTARTDOC() +" + "Functions (String)","XMLTEXT"," -XMLTEXT(valueString [, escapeNewlineBoolean]) +@h2@ XMLTEXT(valueString [, escapeNewlineBoolean]) +"," +Creates an XML text element. +If enabled, newline and linefeed is converted to an XML entity (&#). +This method returns a string. "," -Creates an XML text element." +CALL XMLTEXT('test') +" + "Functions (String)","TO_CHAR"," -TO_CHAR(value [, formatString[, nlsParamString]]) +@c@ TO_CHAR(value [, formatString[, nlsParamString]]) +"," +Oracle-compatible TO_CHAR function that can format a timestamp, a number, or text. "," -Oracle-compatible TO_CHAR function that can format a timestamp, a number, or text." +CALL TO_CHAR(TIMESTAMP '2010-01-01 00:00:00', 'DD MON, YYYY') +" + "Functions (String)","TRANSLATE"," -TRANSLATE(value , searchString, replacementString]]) +@c@ TRANSLATE(value, searchString, replacementString) "," -Oracle-compatible TRANSLATE function that replaces a sequence of characters in a string with another set of characters." -"Functions (Time and Date)","CURRENT_DATE"," -{ CURRENT_DATE [ () ] | CURDATE() | SYSDATE | TODAY } +Oracle-compatible TRANSLATE function that replaces a sequence of characters in a string with another set of characters. "," -Returns the current date." +CALL TRANSLATE('Hello world', 'eo', 'EO') +" + +"Functions (Time and Date)","CURRENT_DATE"," +CURRENT_DATE | @c@ { CURDATE() | SYSDATE | TODAY } +"," +Returns the current date. + +These functions return the same value within a transaction (default) +or within a command depending on database mode. + +[SET TIME ZONE](https://h2database.com/html/commands.html#set_time_zone) command reevaluates the value +for these functions using the same original UTC timestamp of transaction. +"," +CURRENT_DATE +" + "Functions (Time and Date)","CURRENT_TIME"," -{ CURRENT_TIME [ () ] | CURTIME() } -"," -Returns the current time." +CURRENT_TIME [ (int) ] +"," +Returns the current time with time zone. +If fractional seconds precision is specified it should be from 0 to 9, 0 is default. +The specified value can be used only to limit precision of a result. +The actual maximum available precision depends on operating system and JVM and can be 3 (milliseconds) or higher. +Higher precision is not available before Java 9. + +This function returns the same value within a transaction (default) +or within a command depending on database mode. + +[SET TIME ZONE](https://h2database.com/html/commands.html#set_time_zone) command reevaluates the value +for this function using the same original UTC timestamp of transaction. +"," +CURRENT_TIME +CURRENT_TIME(9) +" + "Functions (Time and Date)","CURRENT_TIMESTAMP"," -{ CURRENT_TIMESTAMP [ ( [ int ] ) ] | NOW( [ int ] ) } -"," -Returns the current timestamp." +CURRENT_TIMESTAMP [ (int) ] +"," +Returns the current timestamp with time zone. +Time zone offset is set to a current time zone offset. +If fractional seconds precision is specified it should be from 0 to 9, 6 is default. +The specified value can be used only to limit precision of a result. +The actual maximum available precision depends on operating system and JVM and can be 3 (milliseconds) or higher. +Higher precision is not available before Java 9. + +This function returns the same value within a transaction (default) +or within a command depending on database mode. + +[SET TIME ZONE](https://h2database.com/html/commands.html#set_time_zone) command reevaluates the value +for this function using the same original UTC timestamp of transaction. +"," +CURRENT_TIMESTAMP +CURRENT_TIMESTAMP(9) +" + +"Functions (Time and Date)","LOCALTIME"," +LOCALTIME [ (int) ] | @c@ CURTIME([ int ]) +"," +Returns the current time without time zone. +If fractional seconds precision is specified it should be from 0 to 9, 0 is default. +The specified value can be used only to limit precision of a result. +The actual maximum available precision depends on operating system and JVM and can be 3 (milliseconds) or higher. +Higher precision is not available before Java 9. + +These functions return the same value within a transaction (default) +or within a command depending on database mode. + +[SET TIME ZONE](https://h2database.com/html/commands.html#set_time_zone) command reevaluates the value +for these functions using the same original UTC timestamp of transaction. +"," +LOCALTIME +LOCALTIME(9) +" + +"Functions (Time and Date)","LOCALTIMESTAMP"," +LOCALTIMESTAMP [ (int) ] | @c@ NOW( [ int ] ) +"," +Returns the current timestamp without time zone. +If fractional seconds precision is specified it should be from 0 to 9, 6 is default. +The specified value can be used only to limit precision of a result. +The actual maximum available precision depends on operating system and JVM and can be 3 (milliseconds) or higher. +Higher precision is not available before Java 9. + +The returned value has date and time without time zone information. +If time zone has DST transitions the returned values are ambiguous during transition from DST to normal time. +For absolute timestamps use the [CURRENT_TIMESTAMP](https://h2database.com/html/functions.html#current_timestamp) +function and [TIMESTAMP WITH TIME ZONE](https://h2database.com/html/datatypes.html#timestamp_with_time_zone_type) +data type. + +These functions return the same value within a transaction (default) +or within a command depending on database mode. + +[SET TIME ZONE](https://h2database.com/html/commands.html#set_time_zone) reevaluates the value +for these functions using the same original UTC timestamp of transaction. +"," +LOCALTIMESTAMP +LOCALTIMESTAMP(9) +" + "Functions (Time and Date)","DATEADD"," -{ DATEADD| TIMESTAMPADD } (unitString, addIntLong, timestamp) -"," -Adds units to a timestamp." +@h2@ { DATEADD| TIMESTAMPADD } @h2@ (datetimeField, addIntLong, dateAndTime) +"," +Adds units to a date-time value. The datetimeField indicates the unit. +Use negative values to subtract units. +addIntLong may be a long value when manipulating milliseconds, +microseconds, or nanoseconds otherwise its range is restricted to int. +This method returns a value with the same type as specified value if unit is compatible with this value. +If specified field is a HOUR, MINUTE, SECOND, MILLISECOND, etc and value is a DATE value DATEADD returns combined TIMESTAMP. +Fields DAY, MONTH, YEAR, WEEK, etc are not allowed for TIME values. +Fields TIMEZONE_HOUR, TIMEZONE_MINUTE, and TIMEZONE_SECOND are only allowed for TIMESTAMP WITH TIME ZONE values. +"," +DATEADD(MONTH, 1, DATE '2001-01-31') +" + "Functions (Time and Date)","DATEDIFF"," -{ DATEDIFF | TIMESTAMPDIFF } (unitString, aTimestamp, bTimestamp) +@h2@ { DATEDIFF | TIMESTAMPDIFF } @h2@ (datetimeField, aDateAndTime, bDateAndTime) +"," +Returns the number of crossed unit boundaries between two date/time values. +This method returns a long. +The datetimeField indicates the unit. +Only TIMEZONE_HOUR, TIMEZONE_MINUTE, and TIMEZONE_SECOND fields use the time zone offset component. +With all other fields if date/time values have time zone offset component it is ignored. "," -Returns the the number of crossed unit boundaries between two timestamps." +DATEDIFF(YEAR, T1.CREATED, T2.CREATED) +" + +"Functions (Time and Date)","DATE_TRUNC"," +@h2@ DATE_TRUNC (datetimeField, dateAndTime) +"," +Truncates the specified date-time value to the specified field. +"," +DATE_TRUNC(DAY, TIMESTAMP '2010-01-03 10:40:00'); +" + "Functions (Time and Date)","DAYNAME"," -DAYNAME(date) +@h2@ DAYNAME(dateAndTime) +"," +Returns the name of the day (in English). "," -Returns the name of the day (in English)." +DAYNAME(CREATED) +" + "Functions (Time and Date)","DAY_OF_MONTH"," -DAY_OF_MONTH(date) +@c@ DAY_OF_MONTH({dateAndTime|interval}) "," -Returns the day of the month (1-31)." +Returns the day of the month (1-31). + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. +"," +DAY_OF_MONTH(CREATED) +" + "Functions (Time and Date)","DAY_OF_WEEK"," -DAY_OF_WEEK(date) +@c@ DAY_OF_WEEK(dateAndTime) +"," +Returns the day of the week (1-7), locale-specific. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. +"," +DAY_OF_WEEK(CREATED) +" + +"Functions (Time and Date)","ISO_DAY_OF_WEEK"," +@c@ ISO_DAY_OF_WEEK(dateAndTime) "," -Returns the day of the week (1 means Sunday)." +Returns the ISO day of the week (1 means Monday). + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. +"," +ISO_DAY_OF_WEEK(CREATED) +" + "Functions (Time and Date)","DAY_OF_YEAR"," -DAY_OF_YEAR(date) +@c@ DAY_OF_YEAR({dateAndTime|interval}) +"," +Returns the day of the year (1-366). + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," -Returns the day of the year (1-366)." +DAY_OF_YEAR(CREATED) +" + "Functions (Time and Date)","EXTRACT"," -EXTRACT ( { YEAR | YY | MONTH | MM | WEEK | DAY | DD | DAY_OF_YEAR - | DOY | HOUR | HH | MINUTE | MI | SECOND | SS | MILLISECOND | MS } - FROM timestamp ) +EXTRACT ( datetimeField FROM { dateAndTime | interval }) +"," +Returns a value of the specific time unit from a date/time value. +This method returns a numeric value with EPOCH field and +an int for all other fields. "," -Returns a specific value from a timestamps." +EXTRACT(SECOND FROM CURRENT_TIMESTAMP) +" + "Functions (Time and Date)","FORMATDATETIME"," -FORMATDATETIME ( timestamp, formatString +@h2@ FORMATDATETIME ( dateAndTime, formatString [ , localeString [ , timeZoneString ] ] ) "," -Formats a date, time or timestamp as a string." +Formats a date, time or timestamp as a string. +The most important format characters are: +y year, M month, d day, H hour, m minute, s second. +For details of the format, see ""java.time.format.DateTimeFormatter"". + +If timeZoneString is specified, it is used in formatted string if formatString has time zone. +If TIMESTAMP WITH TIME ZONE is passed and timeZoneString is specified, +the timestamp is converted to the specified time zone and its UTC value is preserved. + +This method returns a string. +"," +CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', + 'EEE, d MMM yyyy HH:mm:ss z', 'en', 'GMT') +" + "Functions (Time and Date)","HOUR"," -HOUR(timestamp) +@c@ HOUR({dateAndTime|interval}) "," -Returns the hour (0-23) from a timestamp." +Returns the hour (0-23) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. +"," +HOUR(CREATED) +" + "Functions (Time and Date)","MINUTE"," -MINUTE(timestamp) +@c@ MINUTE({dateAndTime|interval}) +"," +Returns the minute (0-59) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," -Returns the minute (0-59) from a timestamp." +MINUTE(CREATED) +" + "Functions (Time and Date)","MONTH"," -MONTH(timestamp) +@c@ MONTH({dateAndTime|interval}) +"," +Returns the month (1-12) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," -Returns the month (1-12) from a timestamp." +MONTH(CREATED) +" + "Functions (Time and Date)","MONTHNAME"," -MONTHNAME(date) +@h2@ MONTHNAME(dateAndTime) "," -Returns the name of the month (in English)." +Returns the name of the month (in English). +"," +MONTHNAME(CREATED) +" + "Functions (Time and Date)","PARSEDATETIME"," -PARSEDATETIME(string, formatString +@h2@ PARSEDATETIME(string, formatString [, localeString [, timeZoneString]]) "," -Parses a string and returns a timestamp." +Parses a string and returns a TIMESTAMP WITH TIME ZONE value. +The most important format characters are: +y year, M month, d day, H hour, m minute, s second. +For details of the format, see ""java.time.format.DateTimeFormatter"". + +If timeZoneString is specified, it is used as default. +"," +CALL PARSEDATETIME('Sat, 3 Feb 2001 03:05:06 GMT', + 'EEE, d MMM yyyy HH:mm:ss z', 'en', 'GMT') +" + "Functions (Time and Date)","QUARTER"," -QUARTER(timestamp) +@c@ QUARTER(dateAndTime) +"," +Returns the quarter (1-4) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," -Returns the quarter (1-4) from a timestamp." +QUARTER(CREATED) +" + "Functions (Time and Date)","SECOND"," -SECOND(timestamp) +@c@ SECOND(dateAndTime) "," -Returns the second (0-59) from a timestamp." -"Functions (Time and Date)","WEEK"," -WEEK(timestamp) +Returns the second (0-59) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. "," -Returns the week (1-53) from a timestamp." +SECOND(CREATED|interval) +" + +"Functions (Time and Date)","WEEK"," +@c@ WEEK(dateAndTime) +"," +Returns the week (1-53) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. + +This function uses the current system locale. +"," +WEEK(CREATED) +" + +"Functions (Time and Date)","ISO_WEEK"," +@c@ ISO_WEEK(dateAndTime) +"," +Returns the ISO week (1-53) from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. + +This function uses the ISO definition when +first week of year should have at least four days +and week is started with Monday. +"," +ISO_WEEK(CREATED) +" + "Functions (Time and Date)","YEAR"," -YEAR(timestamp) -"," -Returns the year from a timestamp." +@c@ YEAR({dateAndTime|interval}) +"," +Returns the year from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. +"," +YEAR(CREATED) +" + +"Functions (Time and Date)","ISO_YEAR"," +@c@ ISO_YEAR(dateAndTime) +"," +Returns the ISO week year from a date/time value. + +This function is deprecated, use [EXTRACT](https://h2database.com/html/functions.html#extract) instead of it. +"," +ISO_YEAR(CREATED) +" + +"Functions (System)","ABORT_SESSION"," +@h2@ ABORT_SESSION(sessionInt) +"," +Cancels the currently executing statement of another session. Closes the session and releases the allocated resources. +Returns true if the session was closed, false if no session with the given id was found. + +If a client was connected while its session was aborted it will see an error. + +Admin rights are required to execute this command. +"," +CALL ABORT_SESSION(3) +" + "Functions (System)","ARRAY_GET"," -ARRAY_GET(arrayExpression, indexExpression) -"," -Returns one element of an array." -"Functions (System)","ARRAY_LENGTH"," -ARRAY_LENGTH(arrayExpression) -"," -Returns the length of an array." +@c@ ARRAY_GET(arrayExpression, indexExpression) +"," +Returns element at the specified 1-based index from an array. + +This function is deprecated, use +[array element reference](https://www.h2database.com/html/grammar.html#array_element_reference) instead of it. + +Returns NULL if array or index is NULL. +"," +CALL ARRAY_GET(ARRAY['Hello', 'World'], 2) +" + +"Functions (System)","CARDINALITY"," +{ CARDINALITY | @c@ { ARRAY_LENGTH } } (arrayExpression) +"," +Returns the length of an array. +Returns NULL if the specified array is NULL. +"," +CALL CARDINALITY(ARRAY['Hello', 'World']) +" + "Functions (System)","ARRAY_CONTAINS"," -ARRAY_CONTAINS(arrayExpression, value) -"," -Returns a boolean true if the array contains the value." +@h2@ ARRAY_CONTAINS(arrayExpression, value) +"," +Returns a boolean TRUE if the array contains the value or FALSE if it does not contain it. +Returns NULL if the specified array is NULL. +"," +CALL ARRAY_CONTAINS(ARRAY['Hello', 'World'], 'Hello') +" + +"Functions (System)","ARRAY_CAT"," +@c@ ARRAY_CAT(arrayExpression, arrayExpression) +"," +Returns the concatenation of two arrays. + +This function is deprecated, use ""||"" instead of it. + +Returns NULL if any parameter is NULL. +"," +CALL ARRAY_CAT(ARRAY[1, 2], ARRAY[3, 4]) +" + +"Functions (System)","ARRAY_APPEND"," +@c@ ARRAY_APPEND(arrayExpression, value) +"," +Append an element to the end of an array. + +This function is deprecated, use ""||"" instead of it. + +Returns NULL if any parameter is NULL. +"," +CALL ARRAY_APPEND(ARRAY[1, 2], 3) +" + +"Functions (System)","ARRAY_MAX_CARDINALITY"," +ARRAY_MAX_CARDINALITY(arrayExpression) +"," +Returns the maximum allowed array cardinality (length) of the declared data type of argument. +"," +SELECT ARRAY_MAX_CARDINALITY(COL1) FROM TEST FETCH FIRST ROW ONLY; +" + +"Functions (System)","TRIM_ARRAY"," +TRIM_ARRAY(arrayExpression, int) +"," +Removes the specified number of elements from the end of the array. + +Returns NULL if second parameter is NULL or if first parameter is NULL and second parameter is not negative. +Throws exception if second parameter is negative or larger than number of elements in array. +Otherwise returns the truncated array. +"," +CALL TRIM_ARRAY(ARRAY[1, 2, 3, 4], 1) +" + +"Functions (System)","ARRAY_SLICE"," +@h2@ ARRAY_SLICE(arrayExpression, lowerBoundInt, upperBoundInt) +"," +Returns elements from the array as specified by the lower and upper bound parameters. +Both parameters are inclusive and the first element has index 1, i.e. ARRAY_SLICE(a, 2, 2) has only the second element. +Returns NULL if any parameter is NULL or if an index is out of bounds. +"," +CALL ARRAY_SLICE(ARRAY[1, 2, 3, 4], 1, 3) +" + "Functions (System)","AUTOCOMMIT"," -AUTOCOMMIT() +@h2@ AUTOCOMMIT() "," -Returns true if auto commit is switched on for this session." +Returns true if auto commit is switched on for this session. +"," +AUTOCOMMIT() +" + "Functions (System)","CANCEL_SESSION"," -CANCEL_SESSION(sessionInt) +@h2@ CANCEL_SESSION(sessionInt) "," -Cancels the currently executing statement of another session." +Cancels the currently executing statement of another session. +Returns true if the statement was canceled, false if the session is closed or no statement is currently executing. + +Admin rights are required to execute this command. +"," +CANCEL_SESSION(3) +" + "Functions (System)","CASEWHEN Function"," -CASEWHEN(boolean, aValue, bValue) +@c@ CASEWHEN(boolean, aValue, bValue) "," -Returns 'a' if the boolean expression is true, otherwise 'b'." -"Functions (System)","CAST"," -CAST(value AS dataType) +Returns 'aValue' if the boolean expression is true, otherwise 'bValue'. + +This function is deprecated, use [CASE](https://h2database.com/html/grammar.html#searched_case) instead of it. "," -Converts a value to another data type." +CASEWHEN(ID=1, 'A', 'B') +" + "Functions (System)","COALESCE"," -{ COALESCE | NVL } (aValue, bValue [,...]) +{ COALESCE | @c@ { NVL } } (aValue, bValue [,...]) + | @c@ IFNULL(aValue, bValue) "," -Returns the first value that is not null." -"Functions (System)","CONVERT"," -CONVERT(value, dataType) +Returns the first value that is not null. "," -Converts a value to another data type." -"Functions (System)","CURRVAL"," -CURRVAL( [ schemaName, ] sequenceString ) +COALESCE(A, B, C) +" + +"Functions (System)","CONVERT"," +@c@ CONVERT(value, dataTypeOrDomain) "," -Returns the current (last) value of the sequence, independent of the session." -"Functions (System)","CSVREAD"," -CSVREAD(fileNameString [, columnsString [, csvOptions ] ] ) +Converts a value to another data type. + +This function is deprecated, use [CAST](https://h2database.com/html/grammar.html#cast_specification) instead of it. "," -Returns the result set of reading the CSV (comma separated values) file." +CONVERT(NAME, INT) +" + +"Functions (System)","CURRVAL"," +@c@ CURRVAL( [ schemaNameString, ] sequenceString ) +"," +Returns the latest generated value of the sequence for the current session. +Current value may only be requested after generation of the sequence value in the current session. +This method exists only for compatibility, when it isn't required use +[CURRENT VALUE FOR sequenceName](https://h2database.com/html/grammar.html#sequence_value_expression) +instead. +If the schema name is not set, the current schema is used. +When sequence is not found, the uppercase name is also checked. +This method returns a long. +"," +CURRVAL('TEST_SEQ') +" + "Functions (System)","CSVWRITE"," -CSVWRITE ( fileNameString, queryString [, csvOptions [, lineSepString] ] ) -"," -Writes a CSV (comma separated values)." -"Functions (System)","DATABASE"," -DATABASE() -"," -Returns the name of the database." +@h2@ CSVWRITE ( fileNameString, queryString [, csvOptions [, lineSepString] ] ) +"," +Writes a CSV (comma separated values). The file is overwritten if it exists. +If only a file name is specified, it will be written to the current working directory. +For each parameter, NULL means the default value should be used. +The default charset is the default value for this system, and the default field separator is a comma. + +The values are converted to text using the default string representation; +if another conversion is required you need to change the select statement accordingly. +The parameter nullString is used when writing NULL (by default nothing is written +when NULL appears). The default line separator is the default value for this +system (system property ""line.separator""). + +The returned value is the number or rows written. +Admin rights are required to execute this command. +"," +CALL CSVWRITE('data/test.csv', 'SELECT * FROM TEST'); +CALL CSVWRITE('data/test2.csv', 'SELECT * FROM TEST', 'charset=UTF-8 fieldSeparator=|'); +-- Write a tab-separated file +CALL CSVWRITE('data/test.tsv', 'SELECT * FROM TEST', 'charset=UTF-8 fieldSeparator=' || CHAR(9)); +" + +"Functions (System)","CURRENT_SCHEMA"," +CURRENT_SCHEMA | @c@ SCHEMA() +"," +Returns the name of the default schema for this session. +"," +CALL CURRENT_SCHEMA +" + +"Functions (System)","CURRENT_CATALOG"," +CURRENT_CATALOG | @c@ DATABASE() +"," +Returns the name of the database. +"," +CALL CURRENT_CATALOG +" + "Functions (System)","DATABASE_PATH"," -DATABASE_PATH() -"," -Returns the directory of the database files and the database name, if it is file based." +@h2@ DATABASE_PATH() +"," +Returns the directory of the database files and the database name, if it is file based. +Returns NULL otherwise. +"," +CALL DATABASE_PATH(); +" + +"Functions (System)","DATA_TYPE_SQL"," +@h2@ DATA_TYPE_SQL +@h2@ (objectSchemaString, objectNameString, objectTypeString, typeIdentifierString) +"," +Returns SQL representation of data type of the specified +constant, domain, table column, routine result or argument. + +For constants object type is 'CONSTANT' and type identifier is the value of +""INFORMATION_SCHEMA.CONSTANTS.DTD_IDENTIFIER"". + +For domains object type is 'DOMAIN' and type identifier is the value of +""INFORMATION_SCHEMA.DOMAINS.DTD_IDENTIFIER"". + +For columns object type is 'TABLE' and type identifier is the value of +""INFORMATION_SCHEMA.COLUMNS.DTD_IDENTIFIER"". + +For routines object name is the value of ""INFORMATION_SCHEMA.ROUTINES.SPECIFIC_NAME"", +object type is 'ROUTINE', and type identifier is the value of +""INFORMATION_SCHEMA.ROUTINES.DTD_IDENTIFIER"" for data type of the result and the value of +""INFORMATION_SCHEMA.PARAMETERS.DTD_IDENTIFIER"" for data types of arguments. +Aggregate functions aren't supported by this function, because their data type isn't statically known. + +This function returns NULL if any argument is NULL, object type is not valid, or object isn't found. +"," +DATA_TYPE_SQL('PUBLIC', 'C', 'CONSTANT', 'TYPE') +DATA_TYPE_SQL('PUBLIC', 'D', 'DOMAIN', 'TYPE') +DATA_TYPE_SQL('PUBLIC', 'T', 'TABLE', '1') +DATA_TYPE_SQL('PUBLIC', 'R_1', 'ROUTINE', 'RESULT') +DATA_TYPE_SQL('PUBLIC', 'R_1', 'ROUTINE', '1') +COALESCE( + QUOTE_IDENT(DOMAIN_SCHEMA) || '.' || QUOTE_IDENT(DOMAIN_NAME), + DATA_TYPE_SQL(TABLE_SCHEMA, TABLE_NAME, 'TABLE', DTD_IDENTIFIER)) +" + +"Functions (System)","DB_OBJECT_ID"," +@h2@ DB_OBJECT_ID({{'ROLE'|'SETTING'|'SCHEMA'|'USER'}, objectNameString + | {'CONSTANT'|'CONSTRAINT'|'DOMAIN'|'INDEX'|'ROUTINE'|'SEQUENCE' + |'SYNONYM'|'TABLE'|'TRIGGER'}, schemaNameString, objectNameString }) +"," +Returns internal identifier of the specified database object as integer value or NULL if object doesn't exist. + +Admin rights are required to execute this function. +"," +CALL DB_OBJECT_ID('ROLE', 'MANAGER'); +CALL DB_OBJECT_ID('TABLE', 'PUBLIC', 'MY_TABLE'); +" + +"Functions (System)","DB_OBJECT_SQL"," +@h2@ DB_OBJECT_SQL({{'ROLE'|'SETTING'|'SCHEMA'|'USER'}, objectNameString + | {'CONSTANT'|'CONSTRAINT'|'DOMAIN'|'INDEX'|'ROUTINE'|'SEQUENCE' + |'SYNONYM'|'TABLE'|'TRIGGER'}, schemaNameString, objectNameString }) +"," +Returns internal SQL definition of the specified database object or NULL if object doesn't exist +or it is a system object without SQL definition. + +This function should not be used to analyze structure of the object by machine code. +Internal SQL representation may contain undocumented non-standard clauses +and may be different in different versions of H2. +Objects are described in the ""INFORMATION_SCHEMA"" in machine-readable way. + +Admin rights are required to execute this function. +"," +CALL DB_OBJECT_SQL('ROLE', 'MANAGER'); +CALL DB_OBJECT_SQL('TABLE', 'PUBLIC', 'MY_TABLE'); +" + "Functions (System)","DECODE"," -DECODE(value, whenValue, thenValue [,...]) +@c@ DECODE(value, whenValue, thenValue [,...]) "," -Returns the first matching value." -"Functions (System)","DISK_SPACE_USED"," -DISK_SPACE_USED(tableNameString) +Returns the first matching value. NULL is considered to match NULL. +If no match was found, then NULL or the last parameter (if the parameter count is even) is returned. +This function is provided for Oracle compatibility, +use [CASE](https://h2database.com/html/grammar.html#case_expression) instead of it. "," -Returns the approximate amount of space used by the table specified." +CALL DECODE(RAND()>0.5, 0, 'Red', 1, 'Black'); +" + +"Functions (System)","DISK_SPACE_USED"," +@h2@ DISK_SPACE_USED(tableNameString) +"," +Returns the approximate amount of space used by the table specified. +Does not currently take into account indexes or LOB's. +This function may be expensive since it has to load every page in the table. +"," +CALL DISK_SPACE_USED('my_table'); +" + +"Functions (System)","SIGNAL"," +@h2@ SIGNAL(sqlStateString, messageString) +"," +Throw an SQLException with the passed SQLState and reason. +"," +CALL SIGNAL('23505', 'Duplicate user ID: ' || user_id); +" + +"Functions (System)","ESTIMATED_ENVELOPE"," +@h2@ ESTIMATED_ENVELOPE(tableNameString, columnNameString) +"," +Returns the estimated minimum bounding box that encloses all specified GEOMETRY values. +Only 2D coordinate plane is supported. +NULL values are ignored. +Column must have a spatial index. +This function is fast, but estimation may include uncommitted data (including data from other transactions), +may return approximate bounds, or be different with actual value due to other reasons. +Use with caution. +If estimation is not available this function returns NULL. +For accurate and reliable result use ESTIMATE aggregate function instead. +"," +CALL ESTIMATED_ENVELOPE('MY_TABLE', 'GEOMETRY_COLUMN'); +" + "Functions (System)","FILE_READ"," -FILE_READ(fileNameString [,encodingString]) -"," -Returns the contents of a file." +@h2@ FILE_READ(fileNameString [,encodingString]) +"," +Returns the contents of a file. If only one parameter is supplied, the data are +returned as a BLOB. If two parameters are used, the data is returned as a CLOB +(text). The second parameter is the character set to use, NULL meaning the +default character set for this system. + +File names and URLs are supported. +To read a stream from the classpath, use the prefix ""classpath:"". + +Admin rights are required to execute this command. +"," +SELECT LENGTH(FILE_READ('~/.h2.server.properties')) LEN; +SELECT FILE_READ('http://localhost:8182/stylesheet.css', NULL) CSS; +" + +"Functions (System)","FILE_WRITE"," +@h2@ FILE_WRITE(blobValue, fileNameString) +"," +Write the supplied parameter into a file. Return the number of bytes written. + +Write access to folder, and admin rights are required to execute this command. +"," +SELECT FILE_WRITE('Hello world', '/tmp/hello.txt')) LEN; +" + "Functions (System)","GREATEST"," -GREATEST(aValue, bValue [,...]) -"," -Returns the largest value that is not NULL, or NULL if all values are NULL." -"Functions (System)","IDENTITY"," -IDENTITY() +@h2@ GREATEST(aValue, bValue [,...]) "," -Returns the last inserted identity value for this session." -"Functions (System)","IFNULL"," -IFNULL(aValue, bValue) +Returns the largest value that is not NULL, or NULL if all values are NULL. "," -Returns the value of 'a' if it is not null, otherwise 'b'." +CALL GREATEST(1, 2, 3); +" + "Functions (System)","LEAST"," -LEAST(aValue, bValue [,...]) +@h2@ LEAST(aValue, bValue [,...]) "," -Returns the smallest value that is not NULL, or NULL if all values are NULL." +Returns the smallest value that is not NULL, or NULL if all values are NULL. +"," +CALL LEAST(1, 2, 3); +" + "Functions (System)","LOCK_MODE"," -LOCK_MODE() +@h2@ LOCK_MODE() +"," +Returns the current lock mode. See SET LOCK_MODE. +This method returns an int. "," -Returns the current lock mode." +CALL LOCK_MODE(); +" + "Functions (System)","LOCK_TIMEOUT"," -LOCK_TIMEOUT() +@h2@ LOCK_TIMEOUT() "," -Returns the lock timeout of the current session (in milliseconds)." -"Functions (System)","LINK_SCHEMA"," -LINK_SCHEMA(targetSchemaString, driverString, urlString, -userString, passwordString, sourceSchemaString) +Returns the lock timeout of the current session (in milliseconds). "," -Creates table links for all tables in a schema." +LOCK_TIMEOUT() +" + "Functions (System)","MEMORY_FREE"," -MEMORY_FREE() +@h2@ MEMORY_FREE() +"," +Returns the free memory in KB (where 1024 bytes is a KB). +This method returns a long. +The garbage is run before returning the value. +Admin rights are required to execute this command. "," -Returns the free memory in KB (where 1024 bytes is a KB)." +MEMORY_FREE() +" + "Functions (System)","MEMORY_USED"," -MEMORY_USED() +@h2@ MEMORY_USED() "," -Returns the used memory in KB (where 1024 bytes is a KB)." -"Functions (System)","NEXTVAL"," -NEXTVAL ( [ schemaName, ] sequenceString ) +Returns the used memory in KB (where 1024 bytes is a KB). +This method returns a long. +The garbage is run before returning the value. +Admin rights are required to execute this command. "," -Returns the next value of the sequence." +MEMORY_USED() +" + +"Functions (System)","NEXTVAL"," +@c@ NEXTVAL ( [ schemaNameString, ] sequenceString ) +"," +Increments the sequence and returns its value. +The current value of the sequence and the last identity in the current session are updated with the generated value. +Used values are never re-used, even when the transaction is rolled back. +This method exists only for compatibility, it's recommended to use the standard +[NEXT VALUE FOR sequenceName](https://h2database.com/html/grammar.html#sequence_value_expression) +instead. +If the schema name is not set, the current schema is used. +When sequence is not found, the uppercase name is also checked. +This method returns a long. +"," +NEXTVAL('TEST_SEQ') +" + "Functions (System)","NULLIF"," NULLIF(aValue, bValue) "," -Returns NULL if 'a' is equals to 'b', otherwise 'a'." -"Functions (System)","NVL2"," -NVL2(testValue, aValue, bValue) +Returns NULL if 'a' is equal to 'b', otherwise 'a'. "," -If the test value is null, then 'b' is returned." +NULLIF(A, B) +A / NULLIF(B, 0) +" + +"Functions (System)","NVL2"," +@c@ NVL2(testValue, aValue, bValue) +"," +If the test value is null, then 'b' is returned. Otherwise, 'a' is returned. +The data type of the returned value is the data type of 'a' if this is a text type. + +This function is provided for Oracle compatibility, +use [CASE](https://h2database.com/html/grammar.html#case_expression) +or [COALESCE](https://h2database.com/html/functions.html#coalesce) instead of it. +"," +NVL2(X, 'not null', 'null') +" + "Functions (System)","READONLY"," -READONLY() -"," -Returns true if the database is read-only." -"Functions (System)","ROWNUM"," -{ ROWNUM() } | { ROW_NUMBER() OVER() } -"," -Returns the number of the current row." -"Functions (System)","SCHEMA"," -SCHEMA() +@h2@ READONLY() "," -Returns the name of the default schema for this session." -"Functions (System)","SCOPE_IDENTITY"," -SCOPE_IDENTITY() +Returns true if the database is read-only. "," -Returns the last inserted identity value for this session for the current scope -(ie." +READONLY() +" + +"Functions (System)","ROWNUM"," +@h2@ ROWNUM() +"," +Returns the number of the current row. +This method returns a long value. +It is supported for SELECT statements, as well as for DELETE and UPDATE. +The first row has the row number 1, and is calculated before ordering and grouping the result set, +but after evaluating index conditions (even when the index conditions are specified in an outer query). +Use the [ROW_NUMBER() OVER ()](https://h2database.com/html/functions-window.html#row_number) +function to get row numbers after grouping or in specified order. +"," +SELECT ROWNUM(), * FROM TEST; +SELECT ROWNUM(), * FROM (SELECT * FROM TEST ORDER BY NAME); +SELECT ID FROM (SELECT T.*, ROWNUM AS R FROM TEST T) WHERE R BETWEEN 2 AND 3; +" + "Functions (System)","SESSION_ID"," -SESSION_ID() +@h2@ SESSION_ID() +"," +Returns the unique session id number for the current database connection. +This id stays the same while the connection is open. +This method returns an int. +The database engine may re-use a session id after the connection is closed. "," -Returns the unique session id number for the current database connection." +CALL SESSION_ID() +" + "Functions (System)","SET"," -SET(@variableName, value) +@h2@ SET(@variableName, value) "," -Updates a variable with the given value." -"Functions (System)","TABLE"," -{ TABLE | TABLE_DISTINCT } ( { name dataType = expression } [,...] ) +Updates a variable with the given value. +The new value is returned. +When used in a query, the value is updated in the order the rows are read. +When used in a subquery, not all rows might be read depending on the query plan. +This can be used to implement running totals / cumulative sums. "," -Returns the result set." +SELECT X, SET(@I, COALESCE(@I, 0)+X) RUNNING_TOTAL FROM SYSTEM_RANGE(1, 10) +" + "Functions (System)","TRANSACTION_ID"," -TRANSACTION_ID() -"," -Returns the current transaction id for this session." +@h2@ TRANSACTION_ID() +"," +Returns the current transaction id for this session. +This method returns NULL if there is no uncommitted change, or if the database is not persisted. +Otherwise a value of the following form is returned: +""logFileId-position-sessionId"". +This method returns a string. +The value is unique across database restarts (values are not re-used). +"," +CALL TRANSACTION_ID() +" + "Functions (System)","TRUNCATE_VALUE"," -TRUNCATE_VALUE(value, precisionInt, forceBoolean) +@h2@ TRUNCATE_VALUE(value, precisionInt, forceBoolean) +"," +Truncate a value to the required precision. +If force flag is set to ""FALSE"" fixed precision values are not truncated. +The method returns a value with the same data type as the first parameter. +"," +CALL TRUNCATE_VALUE(X, 10, TRUE); +" + +"Functions (System)","CURRENT_PATH"," +CURRENT_PATH "," -Truncate a value to the required precision." -"Functions (System)","USER"," -{ USER | CURRENT_USER } () +Returns the comma-separated list of quoted schema names where user-defined functions are searched +when they are referenced without the schema name. "," -Returns the name of the current user of this session." +CURRENT_PATH +" + +"Functions (System)","CURRENT_ROLE"," +CURRENT_ROLE +"," +Returns the name of the PUBLIC role. +"," +CURRENT_ROLE +" + +"Functions (System)","CURRENT_USER"," +CURRENT_USER | SESSION_USER | SYSTEM_USER | USER +"," +Returns the name of the current user of this session. +"," +CURRENT_USER +" + "Functions (System)","H2VERSION"," +@h2@ H2VERSION() +"," +Returns the H2 version as a String. +"," H2VERSION() +" + +"Functions (JSON)","JSON_OBJECT"," +JSON_OBJECT( +[{{[KEY] string VALUE expression} | {string : expression}} [,...] ] +[ { NULL | ABSENT } ON NULL ] +[ { WITH | WITHOUT } UNIQUE KEYS ] +) +"," +Returns a JSON object constructed from the specified properties. +If ABSENT ON NULL is specified properties with NULL value are not included in the object. +If WITH UNIQUE KEYS is specified the constructed object is checked for uniqueness of keys, +nested objects, if any, are checked too. +"," +JSON_OBJECT('id': 100, 'name': 'Joe', 'groups': '[2,5]' FORMAT JSON); +" + +"Functions (JSON)","JSON_ARRAY"," +JSON_ARRAY( +[expression [,...]]|{(query) [FORMAT JSON]} +[ { NULL | ABSENT } ON NULL ] +) +"," +Returns a JSON array constructed from the specified values or from the specified single-column subquery. +If NULL ON NULL is specified NULL values are included in the array. +"," +JSON_ARRAY(10, 15, 20); +JSON_ARRAY(JSON_DATA_A FORMAT JSON, JSON_DATA_B FORMAT JSON); +JSON_ARRAY((SELECT J FROM PROPS) FORMAT JSON); +" + +"Functions (Table)","CSVREAD"," +@h2@ CSVREAD(fileNameString [, columnsString [, csvOptions ] ] ) +"," +Returns the result set of reading the CSV (comma separated values) file. +For each parameter, NULL means the default value should be used. + +If the column names are specified (a list of column names separated with the +fieldSeparator), those are used, otherwise (or if they are set to NULL) the first line of +the file is interpreted as the column names. +In that case, column names that contain no special characters (only letters, '_', +and digits; similar to the rule for Java identifiers) are processed is the same way as unquoted identifiers +and therefore case of characters may be changed. +Other column names are processed as quoted identifiers and case of characters is preserved. +To preserve the case of column names unconditionally use +[caseSensitiveColumnNames](https://h2database.com/html/grammar.html#csv_options) option. + +The default charset is the default value for this system, and the default field separator +is a comma. Missing unquoted values as well as data that matches nullString is +parsed as NULL. All columns are of type VARCHAR. + +The BOM (the byte-order-mark) character 0xfeff at the beginning of the file is ignored. + +This function can be used like a table: ""SELECT * FROM CSVREAD(...)"". + +Instead of a file, a URL may be used, for example +""jar:file:///c:/temp/example.zip!/org/example/nested.csv"". +To read a stream from the classpath, use the prefix ""classpath:"". +To read from HTTP, use the prefix ""http:"" (as in a browser). + +For performance reason, CSVREAD should not be used inside a join. +Instead, import the data first (possibly into a temporary table) and then use the table. + +Admin rights are required to execute this command. +"," +SELECT * FROM CSVREAD('test.csv'); +-- Read a file containing the columns ID, NAME with +SELECT * FROM CSVREAD('test2.csv', 'ID|NAME', 'charset=UTF-8 fieldSeparator=|'); +SELECT * FROM CSVREAD('data/test.csv', null, 'rowSeparator=;'); +-- Read a tab-separated file +SELECT * FROM CSVREAD('data/test.tsv', null, 'rowSeparator=' || CHAR(9)); +SELECT ""Last Name"" FROM CSVREAD('address.csv'); +SELECT ""Last Name"" FROM CSVREAD('classpath:/org/acme/data/address.csv'); +" + +"Functions (Table)","LINK_SCHEMA"," +@h2@ LINK_SCHEMA (targetSchemaString, driverString, urlString, +@h2@ userString, passwordString, sourceSchemaString) +"," +Creates table links for all tables in a schema. +If tables with the same name already exist, they are dropped first. +The target schema is created automatically if it does not yet exist. +The driver name may be empty if the driver is already loaded. +The list of tables linked is returned in the form of a result set. +Admin rights are required to execute this command. +"," +SELECT * FROM LINK_SCHEMA('TEST2', '', 'jdbc:h2:./test2', 'sa', 'sa', 'PUBLIC'); +" + +"Functions (Table)","TABLE"," +@h2@ { TABLE | TABLE_DISTINCT } +@h2@ ( { name dataTypeOrDomain = {array|rowValueExpression} } [,...] ) +"," +Returns the result set. TABLE_DISTINCT removes duplicate rows. +"," +SELECT * FROM TABLE(V INT = ARRAY[1, 2]); +SELECT * FROM TABLE(ID INT=(1, 2), NAME VARCHAR=('Hello', 'World')); +" + +"Functions (Table)","UNNEST"," +UNNEST(array, [,...]) [WITH ORDINALITY] +"," +Returns the result set. +Number of columns is equal to number of arguments, +plus one additional column with row number if WITH ORDINALITY is specified. +Number of rows is equal to length of longest specified array. +If multiple arguments are specified and they have different length, cells with missing values will contain null values. +"," +SELECT * FROM UNNEST(ARRAY['a', 'b', 'c']); +" + +"Aggregate Functions (General)","AVG"," +AVG ( [ DISTINCT|ALL ] { numeric | interval } ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The average (mean) value. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +The data type of result is DOUBLE PRECISION for TINYINT, SMALLINT, INTEGER, and REAL arguments, +NUMERIC with additional 10 decimal digits of precision and scale for BIGINT and NUMERIC arguments; +DECFLOAT with additional 10 decimal digits of precision for DOUBLE PRECISION and DECFLOAT arguments; +INTERVAL with the same leading field precision, all additional smaller datetime units in interval qualifier, +and the maximum scale for INTERVAL arguments. +"," +AVG(X) +" + +"Aggregate Functions (General)","MAX"," +MAX(value) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The highest value. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +The returned value is of the same data type as the parameter. "," -Returns the H2 version as a String." +MAX(NAME) +" + +"Aggregate Functions (General)","MIN"," +MIN(value) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The lowest value. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +The returned value is of the same data type as the parameter. +"," +MIN(NAME) +" + +"Aggregate Functions (General)","SUM"," +SUM( [ DISTINCT|ALL ] { numeric | interval | @h2@ { boolean } } ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The sum of all values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +The data type of result is BIGINT for BOOLEAN, TINYINT, SMALLINT, and INTEGER arguments; +NUMERIC with additional 10 decimal digits of precision for BIGINT and NUMERIC arguments; +DOUBLE PRECISION for REAL arguments, +DECFLOAT with additional 10 decimal digits of precision for DOUBLE PRECISION and DECFLOAT arguments; +INTERVAL with maximum precision and the same interval qualifier and scale for INTERVAL arguments. +"," +SUM(X) +" + +"Aggregate Functions (General)","EVERY"," +{EVERY| @c@ {BOOL_AND}}(boolean) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Returns true if all expressions are true. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +EVERY(ID>10) +" + +"Aggregate Functions (General)","ANY"," +{ANY|SOME| @c@ {BOOL_OR}}(boolean) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Returns true if any expression is true. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +Note that if ANY or SOME aggregate function is placed on the right side of comparison operation or distinct predicate +and argument of this function is a subquery additional parentheses around aggregate function are required, +otherwise it will be parsed as quantified predicate. +"," +ANY(NAME LIKE 'W%') +A = (ANY((SELECT B FROM T))) +" + +"Aggregate Functions (General)","COUNT"," +COUNT( { * | { [ DISTINCT|ALL ] expression } } ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The count of all row, or of the non-null values. +This method returns a long. +If no rows are selected, the result is 0. +Aggregates are only allowed in select statements. +"," +COUNT(*) +" + +"Aggregate Functions (General)","STDDEV_POP"," +STDDEV_POP( [ DISTINCT|ALL ] numeric ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The population standard deviation. +This method returns a double. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +STDDEV_POP(X) +" + +"Aggregate Functions (General)","STDDEV_SAMP"," +STDDEV_SAMP( [ DISTINCT|ALL ] numeric ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The sample standard deviation. +This method returns a double. +If less than two rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +STDDEV(X) +" + +"Aggregate Functions (General)","VAR_POP"," +VAR_POP( [ DISTINCT|ALL ] numeric ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The population variance (square of the population standard deviation). +This method returns a double. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +VAR_POP(X) +" + +"Aggregate Functions (General)","VAR_SAMP"," +VAR_SAMP( [ DISTINCT|ALL ] numeric ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The sample variance (square of the sample standard deviation). +This method returns a double. +If less than two rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +VAR_SAMP(X) +" + +"Aggregate Functions (General)","BIT_AND_AGG"," +{@h2@{BIT_AND_AGG}|@c@{BIT_AND}}@h2@(expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise AND of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITAND](https://h2database.com/html/functions.html#bitand). +"," +BIT_AND_AGG(X) +" + +"Aggregate Functions (General)","BIT_OR_AGG"," +{@h2@{BIT_OR_AGG}|@c@{BIT_OR}}@h2@(expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise OR of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITOR](https://h2database.com/html/functions.html#bitor). +"," +BIT_OR_AGG(X) +" + +"Aggregate Functions (General)","BIT_XOR_AGG"," +@h2@ BIT_XOR_AGG( [ DISTINCT|ALL ] expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise XOR of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITXOR](https://h2database.com/html/functions.html#bitxor). +"," +BIT_XOR_AGG(X) +" + +"Aggregate Functions (General)","BIT_NAND_AGG"," +@h2@ BIT_NAND_AGG(expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise NAND of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITNAND](https://h2database.com/html/functions.html#bitnand). +"," +BIT_NAND_AGG(X) +" + +"Aggregate Functions (General)","BIT_NOR_AGG"," +@h2@ BIT_NOR_AGG(expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise NOR of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITNOR](https://h2database.com/html/functions.html#bitnor). +"," +BIT_NOR_AGG(X) +" + +"Aggregate Functions (General)","BIT_XNOR_AGG"," +@h2@ BIT_XNOR_AGG( [ DISTINCT|ALL ] expression) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The bitwise XNOR of all non-null values. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. + +For non-aggregate function see [BITXNOR](https://h2database.com/html/functions.html#bitxnor). +"," +BIT_XNOR_AGG(X) +" + +"Aggregate Functions (General)","ENVELOPE"," +@h2@ ENVELOPE( value ) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +Returns the minimum bounding box that encloses all specified GEOMETRY values. +Only 2D coordinate plane is supported. +NULL values are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +ENVELOPE(X) +" + +"Aggregate Functions (Binary Set)","COVAR_POP"," +COVAR_POP(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The population covariance. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +COVAR_POP(Y, X) +" + +"Aggregate Functions (Binary Set)","COVAR_SAMP"," +COVAR_SAMP(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The sample covariance. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If less than two rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +COVAR_SAMP(Y, X) +" + +"Aggregate Functions (Binary Set)","CORR"," +CORR(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Pearson's correlation coefficient. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +CORR(Y, X) +" + +"Aggregate Functions (Binary Set)","REGR_SLOPE"," +REGR_SLOPE(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The slope of the line. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +REGR_SLOPE(Y, X) +" + +"Aggregate Functions (Binary Set)","REGR_INTERCEPT"," +REGR_INTERCEPT(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The y-intercept of the regression line. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +REGR_INTERCEPT(Y, X) +" + +"Aggregate Functions (Binary Set)","REGR_COUNT"," +REGR_COUNT(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Returns the number of rows in the group. +This method returns a long. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is 0. +Aggregates are only allowed in select statements. +"," +REGR_COUNT(Y, X) +" + +"Aggregate Functions (Binary Set)","REGR_R2"," +REGR_R2(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The coefficient of determination. +This method returns a double. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +REGR_R2(Y, X) +" + +"Aggregate Functions (Binary Set)","REGR_AVGX"," +REGR_AVGX(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The average (mean) value of dependent expression. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +For details about the data type see [AVG](https://h2database.com/html/functions-aggregate.html#avg). +Aggregates are only allowed in select statements. +"," +REGR_AVGX(Y, X) +" + +"Aggregate Functions (Binary Set)","REGR_AVGY"," +REGR_AVGY(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The average (mean) value of independent expression. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +For details about the data type see [AVG](https://h2database.com/html/functions-aggregate.html#avg). +Aggregates are only allowed in select statements. +"," +REGR_AVGY(Y, X) +" + +"Aggregate Functions (Binary Set)","REGR_SXX"," +REGR_SXX(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The the sum of squares of independent expression. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +REGR_SXX(Y, X) +" + +"Aggregate Functions (Binary Set)","REGR_SYY"," +REGR_SYY(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The the sum of squares of dependent expression. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +REGR_SYY(Y, X) +" + +"Aggregate Functions (Binary Set)","REGR_SXY"," +REGR_SXY(dependentExpression, independentExpression) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +The the sum of products independent expression times dependent expression. +Rows in which either argument is NULL are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +REGR_SXY(Y, X) +" + +"Aggregate Functions (Ordered)","LISTAGG"," +LISTAGG ( [ DISTINCT|ALL ] string [, separatorString] +[ ON OVERFLOW { ERROR + | TRUNCATE [ filterString ] { WITH | WITHOUT } COUNT } ] ) +withinGroupSpecification +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Concatenates strings with a separator. +The default separator is a ',' (without space). +This method returns a string. +NULL values are ignored in the calculation, COALESCE can be used to replace them. +If no rows are selected, the result is NULL. + +If ""ON OVERFLOW TRUNCATE"" is specified, values that don't fit into returned string are truncated +and replaced with filter string placeholder ('...' by default) and count of truncated elements in parentheses. +If ""WITHOUT COUNT"" is specified, count of truncated elements is not appended. + +Aggregates are only allowed in select statements. +"," +LISTAGG(NAME, ', ') WITHIN GROUP (ORDER BY ID) +LISTAGG(COALESCE(NAME, 'null'), ', ') WITHIN GROUP (ORDER BY ID) +LISTAGG(ID, ', ') WITHIN GROUP (ORDER BY ID) OVER (ORDER BY ID) +LISTAGG(ID, ';' ON OVERFLOW TRUNCATE 'etc' WITHOUT COUNT) WITHIN GROUP (ORDER BY ID) +" + +"Aggregate Functions (Ordered)","ARRAY_AGG"," +ARRAY_AGG ( @h2@ [ DISTINCT|ALL ] value +[ ORDER BY sortSpecificationList ] ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Aggregate the value into an array. +This method returns an array. +NULL values are included in the array, FILTER clause can be used to exclude them. +If no rows are selected, the result is NULL. +If ORDER BY is not specified order of values is not determined. +When this aggregate is used with OVER clause that contains ORDER BY subclause +it does not enforce exact order of values. +This aggregate needs additional own ORDER BY clause to make it deterministic. +Aggregates are only allowed in select statements. +"," +ARRAY_AGG(NAME ORDER BY ID) +ARRAY_AGG(NAME ORDER BY ID) FILTER (WHERE NAME IS NOT NULL) +ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID) +" + +"Aggregate Functions (Hypothetical Set)","RANK aggregate"," +RANK(value [,...]) +withinGroupSpecification +[FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +Returns the rank of the hypothetical row in specified collection of rows. +The rank of a row is the number of rows that precede this row plus 1. +If two or more rows have the same values in ORDER BY columns, these rows get the same rank from the first row with the same values. +It means that gaps in ranks are possible. + +See [RANK](https://h2database.com/html/functions-window.html#rank) for a window function with the same name. +"," +SELECT RANK(5) WITHIN GROUP (ORDER BY V) FROM TEST; +" + +"Aggregate Functions (Hypothetical Set)","DENSE_RANK aggregate"," +DENSE_RANK(value [,...]) +withinGroupSpecification +[FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +Returns the dense rank of the hypothetical row in specified collection of rows. +The rank of a row is the number of groups of rows with the same values in ORDER BY columns that precede group with this row plus 1. +If two or more rows have the same values in ORDER BY columns, these rows get the same rank. +Gaps in ranks are not possible. + +See [DENSE_RANK](https://h2database.com/html/functions-window.html#dense_rank) for a window function with the same name. +"," +SELECT DENSE_RANK(5) WITHIN GROUP (ORDER BY V) FROM TEST; +" + +"Aggregate Functions (Hypothetical Set)","PERCENT_RANK aggregate"," +PERCENT_RANK(value [,...]) +withinGroupSpecification +[FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +Returns the relative rank of the hypothetical row in specified collection of rows. +The relative rank is calculated as (RANK - 1) / (NR - 1), +where RANK is a rank of the row and NR is a total number of rows in the collection including hypothetical row. + +See [PERCENT_RANK](https://h2database.com/html/functions-window.html#percent_rank) for a window function with the same name. +"," +SELECT PERCENT_RANK(5) WITHIN GROUP (ORDER BY V) FROM TEST; +" + +"Aggregate Functions (Hypothetical Set)","CUME_DIST aggregate"," +CUME_DIST(value [,...]) +withinGroupSpecification +[FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +Returns the relative rank of the hypothetical row in specified collection of rows. +The relative rank is calculated as NP / NR +where NP is a number of rows that precede the current row or have the same values in ORDER BY columns +and NR is a total number of rows in the collection including hypothetical row. + +See [CUME_DIST](https://h2database.com/html/functions-window.html#cume_dist) for a window function with the same name. +"," +SELECT CUME_DIST(5) WITHIN GROUP (ORDER BY V) FROM TEST; +" + +"Aggregate Functions (Inverse Distribution)","PERCENTILE_CONT"," +PERCENTILE_CONT(numeric) WITHIN GROUP (ORDER BY sortSpecification) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Return percentile of values from the group with interpolation. +Interpolation is only supported for numeric, date-time, and interval data types. +Argument must be between 0 and 1 inclusive. +Argument must be the same for all rows in the same group. +If argument is NULL, the result is NULL. +NULL values are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY V) +" + +"Aggregate Functions (Inverse Distribution)","PERCENTILE_DISC"," +PERCENTILE_DISC(numeric) WITHIN GROUP (ORDER BY sortSpecification) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Return percentile of values from the group. +Interpolation is not performed. +Argument must be between 0 and 1 inclusive. +Argument must be the same for all rows in the same group. +If argument is NULL, the result is NULL. +NULL values are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +PERCENTILE_DISC(0.5) WITHIN GROUP (ORDER BY V) +" + +"Aggregate Functions (Inverse Distribution)","MEDIAN"," +@h2@ MEDIAN( [ DISTINCT|ALL ] value ) +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +The value separating the higher half of a values from the lower half. +Returns the middle value or an interpolated value between two middle values if number of values is even. +Interpolation is only supported for numeric, date-time, and interval data types. +NULL values are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +MEDIAN(X) +" + +"Aggregate Functions (Inverse Distribution)","MODE"," +@h2@ { MODE() WITHIN GROUP (ORDER BY sortSpecification) } + | @c@ { MODE( value [ ORDER BY sortSpecification ] ) } +@h2@ [FILTER (WHERE expression)] @h2@ [OVER windowNameOrSpecification] +"," +Returns the value that occurs with the greatest frequency. +If there are multiple values with the same frequency only one value will be returned. +In this situation value will be chosen based on optional ORDER BY clause +that should specify exactly the same expression as argument of this function. +Use ascending order to get smallest value or descending order to get largest value +from multiple values with the same frequency. +If this clause is not specified the exact chosen value is not determined in this situation. +NULL values are ignored in the calculation. +If no rows are selected, the result is NULL. +Aggregates are only allowed in select statements. +"," +MODE() WITHIN GROUP (ORDER BY X) +" + +"Aggregate Functions (JSON)","JSON_OBJECTAGG"," +JSON_OBJECTAGG( +{[KEY] string VALUE value} | {string : value} +[ { NULL | ABSENT } ON NULL ] +[ { WITH | WITHOUT } UNIQUE KEYS ] +) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Aggregates the keys with values into a JSON object. +If ABSENT ON NULL is specified properties with NULL value are not included in the object. +If WITH UNIQUE KEYS is specified the constructed object is checked for uniqueness of keys, +nested objects, if any, are checked too. +If no values are selected, the result is SQL NULL value. +"," +JSON_OBJECTAGG(NAME: VAL); +JSON_OBJECTAGG(KEY NAME VALUE VAL); +" + +"Aggregate Functions (JSON)","JSON_ARRAYAGG"," +JSON_ARRAYAGG( @h2@ [ DISTINCT|ALL ] expression +[ ORDER BY sortSpecificationList ] +[ { NULL | ABSENT } ON NULL ] ) +[FILTER (WHERE expression)] [OVER windowNameOrSpecification] +"," +Aggregates the values into a JSON array. +If NULL ON NULL is specified NULL values are included in the array. +If no values are selected, the result is SQL NULL value. +"," +JSON_ARRAYAGG(NUMBER) +" + +"Window Functions (Row Number)","ROW_NUMBER"," +ROW_NUMBER() OVER windowNameOrSpecification +"," +Returns the number of the current row starting with 1. +Window frame clause is not allowed for this function. + +Window functions in H2 may require a lot of memory for large queries. +"," +SELECT ROW_NUMBER() OVER (), * FROM TEST; +SELECT ROW_NUMBER() OVER (ORDER BY ID), * FROM TEST; +SELECT ROW_NUMBER() OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; +" + +"Window Functions (Rank)","RANK"," +RANK() OVER windowNameOrSpecification +"," +Returns the rank of the current row. +The rank of a row is the number of rows that precede this row plus 1. +If two or more rows have the same values in ORDER BY columns, these rows get the same rank from the first row with the same values. +It means that gaps in ranks are possible. +This function requires window order clause. +Window frame clause is not allowed for this function. + +Window functions in H2 may require a lot of memory for large queries. + +See [RANK aggregate](https://h2database.com/html/functions-aggregate.html#rank_aggregate) for a hypothetical set function with the same name. +"," +SELECT RANK() OVER (ORDER BY ID), * FROM TEST; +SELECT RANK() OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; +" + +"Window Functions (Rank)","DENSE_RANK"," +DENSE_RANK() OVER windowNameOrSpecification +"," +Returns the dense rank of the current row. +The rank of a row is the number of groups of rows with the same values in ORDER BY columns that precede group with this row plus 1. +If two or more rows have the same values in ORDER BY columns, these rows get the same rank. +Gaps in ranks are not possible. +This function requires window order clause. +Window frame clause is not allowed for this function. + +Window functions in H2 may require a lot of memory for large queries. + +See [DENSE_RANK aggregate](https://h2database.com/html/functions-aggregate.html#dense_rank_aggregate) for a hypothetical set function with the same name. +"," +SELECT DENSE_RANK() OVER (ORDER BY ID), * FROM TEST; +SELECT DENSE_RANK() OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; +" + +"Window Functions (Rank)","PERCENT_RANK"," +PERCENT_RANK() OVER windowNameOrSpecification +"," +Returns the relative rank of the current row. +The relative rank is calculated as (RANK - 1) / (NR - 1), +where RANK is a rank of the row and NR is a number of rows in window partition with this row. +Note that result is always 0 if window order clause is not specified. +Window frame clause is not allowed for this function. + +Window functions in H2 may require a lot of memory for large queries. + +See [PERCENT_RANK aggregate](https://h2database.com/html/functions-aggregate.html#percent_rank_aggregate) for a hypothetical set function with the same name. +"," +SELECT PERCENT_RANK() OVER (ORDER BY ID), * FROM TEST; +SELECT PERCENT_RANK() OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; +" + +"Window Functions (Rank)","CUME_DIST"," +CUME_DIST() OVER windowNameOrSpecification +"," +Returns the relative rank of the current row. +The relative rank is calculated as NP / NR +where NP is a number of rows that precede the current row or have the same values in ORDER BY columns +and NR is a number of rows in window partition with this row. +Note that result is always 1 if window order clause is not specified. +Window frame clause is not allowed for this function. + +Window functions in H2 may require a lot of memory for large queries. + +See [CUME_DIST aggregate](https://h2database.com/html/functions-aggregate.html#cume_dist_aggregate) for a hypothetical set function with the same name. +"," +SELECT CUME_DIST() OVER (ORDER BY ID), * FROM TEST; +SELECT CUME_DIST() OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; +" + +"Window Functions (Lead or Lag)","LEAD"," +LEAD(value [, offsetInt [, defaultValue]]) [{RESPECT|IGNORE} NULLS] +OVER windowNameOrSpecification +"," +Returns the value in a next row with specified offset relative to the current row. +Offset must be non-negative. +If IGNORE NULLS is specified rows with null values in selected expression are skipped. +If number of considered rows is less than specified relative number this function returns NULL +or the specified default value, if any. +If offset is 0 the value from the current row is returned unconditionally. +This function requires window order clause. +Window frame clause is not allowed for this function. + +Window functions in H2 may require a lot of memory for large queries. +"," +SELECT LEAD(X) OVER (ORDER BY ID), * FROM TEST; +SELECT LEAD(X, 2, 0) IGNORE NULLS OVER ( + PARTITION BY CATEGORY ORDER BY ID +), * FROM TEST; +" + +"Window Functions (Lead or Lag)","LAG"," +LAG(value [, offsetInt [, defaultValue]]) [{RESPECT|IGNORE} NULLS] +OVER windowNameOrSpecification +"," +Returns the value in a previous row with specified offset relative to the current row. +Offset must be non-negative. +If IGNORE NULLS is specified rows with null values in selected expression are skipped. +If number of considered rows is less than specified relative number this function returns NULL +or the specified default value, if any. +If offset is 0 the value from the current row is returned unconditionally. +This function requires window order clause. +Window frame clause is not allowed for this function. + +Window functions in H2 may require a lot of memory for large queries. +"," +SELECT LAG(X) OVER (ORDER BY ID), * FROM TEST; +SELECT LAG(X, 2, 0) IGNORE NULLS OVER ( + PARTITION BY CATEGORY ORDER BY ID +), * FROM TEST; +" + +"Window Functions (Nth Value)","FIRST_VALUE"," +FIRST_VALUE(value) [{RESPECT|IGNORE} NULLS] +OVER windowNameOrSpecification +"," +Returns the first value in a window. +If IGNORE NULLS is specified null values are skipped and the function returns first non-null value, if any. + +Window functions in H2 may require a lot of memory for large queries. +"," +SELECT FIRST_VALUE(X) OVER (ORDER BY ID), * FROM TEST; +SELECT FIRST_VALUE(X) IGNORE NULLS OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; +" + +"Window Functions (Nth Value)","LAST_VALUE"," +LAST_VALUE(value) [{RESPECT|IGNORE} NULLS] +OVER windowNameOrSpecification +"," +Returns the last value in a window. +If IGNORE NULLS is specified null values are skipped and the function returns last non-null value before them, if any; +if there is no non-null value it returns NULL. +Note that the last value is actually a value in the current group of rows +if window order clause is specified and window frame clause is not specified. + +Window functions in H2 may require a lot of memory for large queries. +"," +SELECT LAST_VALUE(X) OVER (ORDER BY ID), * FROM TEST; +SELECT LAST_VALUE(X) IGNORE NULLS OVER ( + PARTITION BY CATEGORY ORDER BY ID + RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING +), * FROM TEST; +" + +"Window Functions (Nth Value)","NTH_VALUE"," +NTH_VALUE(value, nInt) [FROM {FIRST|LAST}] [{RESPECT|IGNORE} NULLS] +OVER windowNameOrSpecification +"," +Returns the value in a row with a specified relative number in a window. +Relative row number must be positive. +If FROM LAST is specified rows a counted backwards from the last row. +If IGNORE NULLS is specified rows with null values in selected expression are skipped. +If number of considered rows is less than specified relative number this function returns NULL. +Note that the last row is actually a last row in the current group of rows +if window order clause is specified and window frame clause is not specified. + +Window functions in H2 may require a lot of memory for large queries. +"," +SELECT NTH_VALUE(X) OVER (ORDER BY ID), * FROM TEST; +SELECT NTH_VALUE(X) IGNORE NULLS OVER ( + PARTITION BY CATEGORY ORDER BY ID + RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING +), * FROM TEST; +" + +"Window Functions (Other)","NTILE"," +NTILE(long) OVER windowNameOrSpecification +"," +Distributes the rows into a specified number of groups. +Number of groups should be a positive long value. +NTILE returns the 1-based number of the group to which the current row belongs. +First groups will have more rows if number of rows is not divisible by number of groups. +For example, if 5 rows are distributed into 2 groups this function returns 1 for the first 3 row and 2 for the last 2 rows. +This function requires window order clause. +Window frame clause is not allowed for this function. + +Window functions in H2 may require a lot of memory for large queries. +"," +SELECT NTILE(10) OVER (ORDER BY ID), * FROM TEST; +SELECT NTILE(5) OVER (PARTITION BY CATEGORY ORDER BY ID), * FROM TEST; +" + +"Window Functions (Other)","RATIO_TO_REPORT"," +@h2@ RATIO_TO_REPORT(value) +@h2@ OVER windowNameOrSpecification +"," +Returns the ratio of a value to the sum of all values. +If argument is NULL or sum of all values is 0, then the value of function is NULL. +Window ordering and window frame clauses are not allowed for this function. + +Window functions in H2 may require a lot of memory for large queries. +"," +SELECT X, RATIO_TO_REPORT(X) OVER (PARTITION BY CATEGORY), CATEGORY FROM TEST; +" + "System Tables","Information Schema"," INFORMATION_SCHEMA "," To get the list of system tables, execute the statement SELECT * FROM -INFORMATION_SCHEMA." +INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'INFORMATION_SCHEMA' +"," +" + "System Tables","Range Table"," -SYSTEM_RANGE(start, end) +@h2@ SYSTEM_RANGE(start, end [, step]) +"," +Contains all values from start to end (this is a dynamic table). "," -Contains all values from start to end (this is a dynamic table)." +SYSTEM_RANGE(0, 100) +" diff --git a/h2/src/main/org/h2/res/javadoc.properties b/h2/src/main/org/h2/res/javadoc.properties index db9f1d3f0c..fabf642fa4 100644 --- a/h2/src/main/org/h2/res/javadoc.properties +++ b/h2/src/main/org/h2/res/javadoc.properties @@ -4,38 +4,34 @@ org.h2.jmx.DatabaseInfoMBean.getCacheSizeMax=The maximum cache size in KB. org.h2.jmx.DatabaseInfoMBean.getFileReadCount=The file read count since the database was opened. org.h2.jmx.DatabaseInfoMBean.getFileSize=The database file size in KB. org.h2.jmx.DatabaseInfoMBean.getFileWriteCount=The number of write operations since the database was opened. -org.h2.jmx.DatabaseInfoMBean.getFileWriteCountTotal=The number of write operations since the database was created. -org.h2.jmx.DatabaseInfoMBean.getLogMode=The transaction log mode (0 disabled, 1 without sync, 2 enabled). org.h2.jmx.DatabaseInfoMBean.getMode=The database compatibility mode (REGULAR if no compatibility mode is\n used). org.h2.jmx.DatabaseInfoMBean.getTraceLevel=The trace level (0 disabled, 1 error, 2 info, 3 debug). org.h2.jmx.DatabaseInfoMBean.getVersion=The database version. org.h2.jmx.DatabaseInfoMBean.isExclusive=Is the database open in exclusive mode? -org.h2.jmx.DatabaseInfoMBean.isMultiThreaded=Is multi-threading enabled? -org.h2.jmx.DatabaseInfoMBean.isMvcc=Is MVCC (multi version concurrency) enabled? org.h2.jmx.DatabaseInfoMBean.isReadOnly=Is the database read-only? org.h2.jmx.DatabaseInfoMBean.listSessions=List sessions, including the queries that are in\n progress, and locked tables. org.h2.jmx.DatabaseInfoMBean.listSettings=List the database settings. -org.h2.tools.Backup=Creates a backup of a database.\nThis tool copies all database files. The database must be closed before using\n this tool. To create a backup while the database is in use, run the BACKUP\n SQL statement. In an emergency, for example if the application is not\n responding, creating a backup using the Backup tool is possible by using the\n quiet mode. However, if the database is changed while the backup is running\n in quiet mode, the backup could be corrupt. -org.h2.tools.Backup.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-file ] The target file name (default\: backup.zip)\n[-dir ] The source directory (default\: .)\n[-db ] Source database; not required if there is only one\n[-quiet] Do not print progress information -org.h2.tools.ChangeFileEncryption=Allows changing the database file encryption password or algorithm.\nThis tool can not be used to change a password of a user.\n The database must be closed before using this tool. -org.h2.tools.ChangeFileEncryption.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-cipher type] The encryption type (AES)\n[-dir ] The database directory (default\: .)\n[-db ] Database name (all databases if not set)\n[-decrypt ] The decryption password (if not set\: not yet encrypted)\n[-encrypt ] The encryption password (if not set\: do not encrypt)\n[-quiet] Do not print progress information +org.h2.tools.Backup=Creates a backup of a database.\n\n This tool copies all database files. The database must be closed before using\n this tool. To create a backup while the database is in use, run the BACKUP\n SQL statement. In an emergency, for example if the application is not\n responding, creating a backup using the Backup tool is possible by using the\n quiet mode. However, if the database is changed while the backup is running\n in quiet mode, the backup could be corrupt. +org.h2.tools.Backup.main=Options are case sensitive.\nSupported options are\:[-help] or [-?]Print the list of options\n[-file ] The target file name (default\: backup.zip)\n[-dir ] The source directory (default\: .)\n[-db ] Source database; not required if there is only one\n[-quiet] Do not print progress information +org.h2.tools.ChangeFileEncryption=Allows changing the database file encryption password or algorithm.\n\n This tool can not be used to change a password of a user.\n The database must be closed before using this tool. +org.h2.tools.ChangeFileEncryption.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-cipher type] The encryption type (AES)\n[-dir ] The database directory (default\: .)\n[-db ] Database name (all databases if not set)\n[-decrypt ] The decryption password (if not set\: not yet encrypted)\n[-encrypt ] The encryption password (if not set\: do not encrypt)\n[-quiet] Do not print progress information org.h2.tools.Console=Starts the H2 Console (web-) server, as well as the TCP and PG server. -org.h2.tools.Console.main=When running without options, -tcp, -web, -browser and -pg are started.\nOptions are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-url] Start a browser and connect to this URL\n[-driver] Used together with -url\: the driver\n[-user] Used together with -url\: the user name\n[-password] Used together with -url\: the password\n[-web] Start the web server with the H2 Console\n[-tool] Start the icon or window that allows to start a browser\n[-browser] Start a browser connecting to the web server\n[-tcp] Start the TCP server\n[-pg] Start the PG server\nFor each Server, additional options are available;\n for details, see the Server tool.\nIf a service can not be started, the program\n terminates with an exit code of 1. -org.h2.tools.ConvertTraceFile=Converts a .trace.db file to a SQL script and Java source code.\nSQL statement statistics are listed as well. -org.h2.tools.ConvertTraceFile.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-traceFile ] The trace file name (default\: test.trace.db)\n[-script ] The script file name (default\: test.sql)\n[-javaClass ] The Java directory and class file name (default\: Test) -org.h2.tools.CreateCluster=Creates a cluster from a standalone database.\nCopies a database to another location if required. -org.h2.tools.CreateCluster.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-urlSource ""] The database URL of the source database (jdbc\:h2\:...)\n[-urlTarget ""] The database URL of the target database (jdbc\:h2\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-serverList ] The comma separated list of host names or IP addresses -org.h2.tools.DeleteDbFiles=Deletes all files belonging to a database.\nThe database must be closed before calling this tool. -org.h2.tools.DeleteDbFiles.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-dir ] The directory (default\: .)\n[-db ] The database name\n[-quiet] Do not print progress information +org.h2.tools.Console.main=When running without options, -tcp, -web, -browser and -pg are started.\n\n Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-url] Start a browser and connect to this URL\n[-driver] Used together with -url\: the driver\n[-user] Used together with -url\: the user name\n[-password] Used together with -url\: the password\n[-web] Start the web server with the H2 Console\n[-tool] Start the icon or window that allows to start a browser\n[-browser] Start a browser connecting to the web server\n[-tcp] Start the TCP server\n[-pg] Start the PG server\nFor each Server, additional options are available;\n for details, see the Server tool.\n If a service can not be started, the program\n terminates with an exit code of 1. +org.h2.tools.ConvertTraceFile=Converts a .trace.db file to a SQL script and Java source code.\n\n SQL statement statistics are listed as well. +org.h2.tools.ConvertTraceFile.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-traceFile ] The trace file name (default\: test.trace.db)\n[-script ] The script file name (default\: test.sql)\n[-javaClass ] The Java directory and class file name (default\: Test) +org.h2.tools.CreateCluster=Creates a cluster from a stand-alone database.\n\n Copies a database to another location if required. +org.h2.tools.CreateCluster.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-urlSource ""] The database URL of the source database (jdbc\:h2\:...)\n[-urlTarget ""] The database URL of the target database (jdbc\:h2\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-serverList ] The comma separated list of host names or IP addresses +org.h2.tools.DeleteDbFiles=Deletes all files belonging to a database.\n\n The database must be closed before calling this tool. +org.h2.tools.DeleteDbFiles.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-dir ] The directory (default\: .)\n[-db ] The database name\n[-quiet] Do not print progress information org.h2.tools.Recover=Helps recovering a corrupted database. -org.h2.tools.Recover.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-dir ] The directory (default\: .)\n[-db ] The database name (all databases if not set)\n[-trace] Print additional trace information\n[-transactionLog] Print the transaction log\nEncrypted databases need to be decrypted first. +org.h2.tools.Recover.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-dir ] The directory (default\: .)\n[-db ] The database name (all databases if not set)\n[-trace] Print additional trace information\n[-transactionLog] Print the transaction log\nEncrypted databases need to be decrypted first. org.h2.tools.Restore=Restores a H2 database by extracting the database files from a .zip file. -org.h2.tools.Restore.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-file ] The source file name (default\: backup.zip)\n[-dir ] The target directory (default\: .)\n[-db ] The target database name (as stored if not set)\n[-quiet] Do not print progress information +org.h2.tools.Restore.main=Options are case sensitive. Supported options\nSupported options[-help] or [-?]Print the list of options\n[-file ] The source file name (default\: backup.zip)\n[-dir ] The target directory (default\: .)\n[-db ] The target database name (as stored if not set)\n[-quiet] Do not print progress information org.h2.tools.RunScript=Runs a SQL script against a database. -org.h2.tools.RunScript.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-url ""] The database URL (jdbc\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-script ] The script file to run (default\: backup.sql)\n[-driver ] The JDBC driver class to use (not required in most cases)\n[-showResults] Show the statements and the results of queries\n[-checkResults] Check if the query results match the expected results\n[-continueOnError] Continue even if the script contains errors\n[-options ...] RUNSCRIPT options (embedded H2; -*Results not supported) +org.h2.tools.RunScript.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-url ""] The database URL (jdbc\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-script ] The script file to run (default\: backup.sql)\n[-driver ] The JDBC driver class to use (not required in most cases)\n[-showResults] Show the statements and the results of queries\n[-checkResults] Check if the query results match the expected results\n[-continueOnError] Continue even if the script contains errors\n[-options ...] RUNSCRIPT options (embedded H2; -*Results not supported) org.h2.tools.Script=Creates a SQL script file by extracting the schema and data of a database. -org.h2.tools.Script.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-url ""] The database URL (jdbc\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-script ] The target script file name (default\: backup.sql)\n[-options ...] A list of options (only for embedded H2, see SCRIPT)\n[-quiet] Do not print progress information +org.h2.tools.Script.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-url ""] The database URL (jdbc\:...)\n[-user ] The user name (default\: sa)\n[-password ] The password\n[-script ] The target script file name (default\: backup.sql)\n[-options ...] A list of options (only for embedded H2, see SCRIPT)\n[-quiet] Do not print progress information org.h2.tools.Server=Starts the H2 Console (web-) server, TCP, and PG server. -org.h2.tools.Server.main=When running without options, -tcp, -web, -browser and -pg are started.\nOptions are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-web] Start the web server with the H2 Console\n[-webAllowOthers] Allow other computers to connect - see below\n[-webDaemon] Use a daemon thread\n[-webPort ] The port (default\: 8082)\n[-webSSL] Use encrypted (HTTPS) connections\n[-browser] Start a browser connecting to the web server\n[-tcp] Start the TCP server\n[-tcpAllowOthers] Allow other computers to connect - see below\n[-tcpDaemon] Use a daemon thread\n[-tcpPort ] The port (default\: 9092)\n[-tcpSSL] Use encrypted (SSL) connections\n[-tcpPassword ] The password for shutting down a TCP server\n[-tcpShutdown ""] Stop the TCP server; example\: tcp\://localhost\n[-tcpShutdownForce] Do not wait until all connections are closed\n[-pg] Start the PG server\n[-pgAllowOthers] Allow other computers to connect - see below\n[-pgDaemon] Use a daemon thread\n[-pgPort ] The port (default\: 5435)\n[-properties ""] Server properties (default\: ~, disable\: null)\n[-baseDir ] The base directory for H2 databases (all servers)\n[-ifExists] Only existing databases may be opened (all servers)\n[-trace] Print additional trace information (all servers)\n[-key ] Allows to map a database name to another (all servers)\nThe options -xAllowOthers are potentially risky.\nFor details, see Advanced Topics / Protection against Remote Access. +org.h2.tools.Server.main=When running without options, -tcp, -web, -browser and -pg are started.\n\n Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-web] Start the web server with the H2 Console\n[-webAllowOthers] Allow other computers to connect - see below\n[-webDaemon] Use a daemon thread\n[-webPort ] The port (default\: 8082)\n[-webSSL] Use encrypted (HTTPS) connections\n[-webAdminPassword] Password of DB Console administrator\n[-browser] Start a browser connecting to the web server\n[-tcp] Start the TCP server\n[-tcpAllowOthers] Allow other computers to connect - see below\n[-tcpDaemon] Use a daemon thread\n[-tcpPort ] The port (default\: 9092)\n[-tcpSSL] Use encrypted (SSL) connections\n[-tcpPassword ] The password for shutting down a TCP server\n[-tcpShutdown ""] Stop the TCP server; example\: tcp\://localhost\n[-tcpShutdownForce] Do not wait until all connections are closed\n[-pg] Start the PG server\n[-pgAllowOthers] Allow other computers to connect - see below\n[-pgDaemon] Use a daemon thread\n[-pgPort ] The port (default\: 5435)\n[-properties ""] Server properties (default\: ~, disable\: null)\n[-baseDir ] The base directory for H2 databases (all servers)\n[-ifExists] Only existing databases may be opened (all servers)\n[-ifNotExists] Databases are created when accessed\n[-trace] Print additional trace information (all servers)\n[-key ] Allows to map a database name to another (all servers)\nThe options -xAllowOthers are potentially risky.\n\n For details, see Advanced Topics / Protection against Remote Access. org.h2.tools.Shell=Interactive command line tool to access a database using JDBC. -org.h2.tools.Shell.main=Options are case sensitive. Supported options are\:\n[-help] or [-?] Print the list of options\n[-url ""] The database URL (jdbc\:h2\:...)\n[-user ] The user name\n[-password ] The password\n[-driver ] The JDBC driver class to use (not required in most cases)\n[-sql ""] Execute the SQL statements and exit\n[-properties ""] Load the server properties from this directory\nIf special characters don't work as expected, you may need to use\n -Dfile.encoding\=UTF-8 (Mac OS X) or CP850 (Windows). +org.h2.tools.Shell.main=Options are case sensitive.\nSupported options[-help] or [-?]Print the list of options\n[-url ""] The database URL (jdbc\:h2\:...)\n[-user ] The user name\n[-password ] The password\n[-driver ] The JDBC driver class to use (not required in most cases)\n[-sql ""] Execute the SQL statements and exit\n[-properties ""] Load the server properties from this directory\nIf special characters don't work as expected, you may need to use\n -Dfile.encoding\=UTF-8 (Mac OS X) or CP850 (Windows). diff --git a/h2/src/main/org/h2/result/DefaultRow.java b/h2/src/main/org/h2/result/DefaultRow.java new file mode 100644 index 0000000000..a9fe6c4063 --- /dev/null +++ b/h2/src/main/org/h2/result/DefaultRow.java @@ -0,0 +1,116 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import org.h2.engine.Constants; +import org.h2.value.Value; +import org.h2.value.ValueBigint; + +/** + * The default implementation of a row in a table. + */ +public class DefaultRow extends Row { + + /** + * The constant that means "memory usage is unknown and needs to be calculated first". + */ + public static final int MEMORY_CALCULATE = -1; + + /** + * The values of the row (one entry per column). + */ + protected final Value[] data; + + private int memory; + + DefaultRow(int columnCount) { + this.data = new Value[columnCount]; + this.memory = MEMORY_CALCULATE; + } + + public DefaultRow(Value[] data) { + this.data = data; + this.memory = MEMORY_CALCULATE; + } + + public DefaultRow(Value[] data, int memory) { + this.data = data; + this.memory = memory; + } + + @Override + public Value getValue(int i) { + return i == ROWID_INDEX ? ValueBigint.get(key) : data[i]; + } + + @Override + public void setValue(int i, Value v) { + if (i == ROWID_INDEX) { + key = v.getLong(); + } else { + data[i] = v; + } + } + + @Override + public int getColumnCount() { + return data.length; + } + + @Override + public int getMemory() { + if (memory != MEMORY_CALCULATE) { + return memory; + } + return memory = calculateMemory(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder("( /* key:").append(key).append(" */ "); + for (int i = 0, length = data.length; i < length; i++) { + if (i > 0) { + builder.append(", "); + } + Value v = data[i]; + builder.append(v == null ? "null" : v.getTraceSQL()); + } + return builder.append(')').toString(); + } + + /** + * Calculate the estimated memory used for this row, in bytes. + * + * @return the memory + */ + protected int calculateMemory() { + int m = Constants.MEMORY_ROW + Constants.MEMORY_ARRAY + data.length * Constants.MEMORY_POINTER; + for (Value v : data) { + if (v != null) { + m += v.getMemory(); + } + } + return m; + } + + @Override + public Value[] getValueList() { + return data; + } + + @Override + public boolean hasSharedData(Row other) { + return other instanceof DefaultRow && data == ((DefaultRow) other).data; + } + + @Override + public void copyFrom(SearchRow source) { + setKey(source.getKey()); + for (int i = 0; i < getColumnCount(); i++) { + setValue(i, source.getValue(i)); + } + } +} diff --git a/h2/src/main/org/h2/result/FetchedResult.java b/h2/src/main/org/h2/result/FetchedResult.java new file mode 100644 index 0000000000..6882ede34c --- /dev/null +++ b/h2/src/main/org/h2/result/FetchedResult.java @@ -0,0 +1,69 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import org.h2.engine.Session; +import org.h2.value.Value; + +/** + * Abstract fetched result. + */ +public abstract class FetchedResult implements ResultInterface { + + long rowId = -1; + + Value[] currentRow; + + Value[] nextRow; + + boolean afterLast; + + FetchedResult() { + } + + @Override + public final Value[] currentRow() { + return currentRow; + } + + @Override + public final boolean next() { + if (hasNext()) { + rowId++; + currentRow = nextRow; + nextRow = null; + return true; + } + if (!afterLast) { + rowId++; + currentRow = null; + afterLast = true; + } + return false; + } + + @Override + public final boolean isAfterLast() { + return afterLast; + } + + @Override + public final long getRowId() { + return rowId; + } + + @Override + public final boolean needToClose() { + return true; + } + + @Override + public final ResultInterface createShallowCopy(Session targetSession) { + // The operation is not supported on fetched result. + return null; + } + +} diff --git a/h2/src/main/org/h2/result/LazyResult.java b/h2/src/main/org/h2/result/LazyResult.java new file mode 100644 index 0000000000..66c6187343 --- /dev/null +++ b/h2/src/main/org/h2/result/LazyResult.java @@ -0,0 +1,160 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.message.DbException; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Lazy execution support for queries. + * + * @author Sergi Vladykin + */ +public abstract class LazyResult extends FetchedResult { + + private final SessionLocal session; + private final Expression[] expressions; + private boolean closed; + private long limit; + + public LazyResult(SessionLocal session, Expression[] expressions) { + this.session = session; + this.expressions = expressions; + } + + public void setLimit(long limit) { + this.limit = limit; + } + + @Override + public boolean isLazy() { + return true; + } + + @Override + public void reset() { + if (closed) { + throw DbException.getInternalError(); + } + rowId = -1L; + afterLast = false; + currentRow = null; + nextRow = null; + } + + /** + * Go to the next row and skip it. + * + * @return true if a row exists + */ + public boolean skip() { + if (closed || afterLast) { + return false; + } + currentRow = null; + if (nextRow != null) { + nextRow = null; + return true; + } + if (skipNextRow()) { + return true; + } + afterLast = true; + return false; + } + + @Override + public boolean hasNext() { + if (closed || afterLast) { + return false; + } + if (nextRow == null && (limit <= 0 || rowId + 1 < limit)) { + nextRow = fetchNextRow(); + } + return nextRow != null; + } + + /** + * Fetch next row or null if none available. + * + * @return next row or null + */ + protected abstract Value[] fetchNextRow(); + + /** + * Skip next row. + * + * @return true if next row was available + */ + protected boolean skipNextRow() { + return fetchNextRow() != null; + } + + @Override + public long getRowCount() { + throw DbException.getUnsupportedException("Row count is unknown for lazy result."); + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public void close() { + closed = true; + } + + @Override + public String getAlias(int i) { + return expressions[i].getAlias(session, i); + } + + @Override + public String getSchemaName(int i) { + return expressions[i].getSchemaName(); + } + + @Override + public String getTableName(int i) { + return expressions[i].getTableName(); + } + + @Override + public String getColumnName(int i) { + return expressions[i].getColumnName(session, i); + } + + @Override + public TypeInfo getColumnType(int i) { + return expressions[i].getType(); + } + + @Override + public boolean isIdentity(int i) { + return expressions[i].isIdentity(); + } + + @Override + public int getNullable(int i) { + return expressions[i].getNullable(); + } + + @Override + public void setFetchSize(int fetchSize) { + // ignore + } + + @Override + public int getFetchSize() { + // We always fetch rows one by one. + return 1; + } + +} diff --git a/h2/src/main/org/h2/result/LocalResult.java b/h2/src/main/org/h2/result/LocalResult.java index 65ee34aae8..fa630ed495 100644 --- a/h2/src/main/org/h2/result/LocalResult.java +++ b/h2/src/main/org/h2/result/LocalResult.java @@ -1,22 +1,30 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; -import java.sql.ResultSet; -import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.TreeMap; + import org.h2.engine.Database; import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.engine.SysProperties; import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; import org.h2.message.DbException; -import org.h2.util.New; -import org.h2.util.ValueHashMap; -import org.h2.value.DataType; +import org.h2.mvstore.db.MVTempResult; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.util.Utils; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; +import org.h2.value.ValueLob; +import org.h2.value.ValueRow; /** * A local result set contains all row data of a result set. @@ -26,40 +34,79 @@ */ public class LocalResult implements ResultInterface, ResultTarget { + /** + * Constructs a new local result object for the specified table. + * + * @param session + * the session + * @param table + * the table + * @return the local result + */ + public static LocalResult forTable(SessionLocal session, Table table) { + Column[] columns = table.getColumns(); + int degree = columns.length; + Expression[] expressions = new Expression[degree + 1]; + Database database = session.getDatabase(); + for (int i = 0; i < degree; i++) { + expressions[i] = new ExpressionColumn(database, columns[i]); + } + Column rowIdColumn = table.getRowIdColumn(); + expressions[degree] = rowIdColumn != null ? new ExpressionColumn(database, rowIdColumn) + : new ExpressionColumn(database, null, table.getName()); + return new LocalResult(session, expressions, degree, degree + 1); + } + private int maxMemoryRows; - private Session session; + private final SessionLocal session; private int visibleColumnCount; + private int resultColumnCount; private Expression[] expressions; - private int rowId, rowCount; + private boolean forDataChangeDeltaTable; + private long rowId, rowCount; private ArrayList rows; private SortOrder sort; - private ValueHashMap distinctRows; + // HashSet cannot be used here, because we need to compare values of + // different type or scale properly. + private TreeMap distinctRows; private Value[] currentRow; - private int offset; - private int limit = -1; + private long offset; + private long limit = -1; + private boolean fetchPercent; + private SortOrder withTiesSortOrder; + private boolean limitsWereApplied; private ResultExternal external; - private int diskOffset; private boolean distinct; - private boolean randomAccess; + private int[] distinctIndexes; private boolean closed; private boolean containsLobs; + private Boolean containsNull; /** * Construct a local result object. */ public LocalResult() { - // nothing to do + this(null); + } + + private LocalResult(SessionLocal session) { + this.session = session; } /** * Construct a local result object. * - * @param session the session - * @param expressions the expression array - * @param visibleColumnCount the number of visible columns + * @param session + * the session + * @param expressions + * the expression array + * @param visibleColumnCount + * the number of visible columns + * @param resultColumnCount + * the number of columns including visible columns and additional + * virtual columns for ORDER BY and DISTINCT ON clauses */ - public LocalResult(Session session, Expression[] expressions, - int visibleColumnCount) { + public LocalResult(SessionLocal session, Expression[] expressions, int visibleColumnCount, int resultColumnCount) { this.session = session; if (session == null) { this.maxMemoryRows = Integer.MAX_VALUE; @@ -71,43 +118,34 @@ public LocalResult(Session session, Expression[] expressions, this.maxMemoryRows = Integer.MAX_VALUE; } } - rows = New.arrayList(); + rows = Utils.newSmallArrayList(); this.visibleColumnCount = visibleColumnCount; + this.resultColumnCount = resultColumnCount; rowId = -1; this.expressions = expressions; } + @Override + public boolean isLazy() { + return false; + } + + /** + * Redefine count of maximum rows holds in memory for the result. + * + * @param maxValue Maximum rows count in memory. + * + * @see SysProperties#MAX_MEMORY_ROWS + */ public void setMaxMemoryRows(int maxValue) { this.maxMemoryRows = maxValue; } /** - * Construct a local result set by reading all data from a regular result - * set. - * - * @param session the session - * @param rs the result set - * @param maxrows the maximum number of rows to read (0 for no limit) - * @return the local result set + * Sets value collection mode for data change delta tables. */ - public static LocalResult read(Session session, ResultSet rs, int maxrows) { - Expression[] cols = Expression.getExpressionColumns(session, rs); - int columnCount = cols.length; - LocalResult result = new LocalResult(session, cols, columnCount); - try { - for (int i = 0; (maxrows == 0 || i < maxrows) && rs.next(); i++) { - Value[] list = new Value[columnCount]; - for (int j = 0; j < columnCount; j++) { - int type = result.getColumnType(j); - list[j] = DataType.readValue(session, rs, j + 1, type); - } - result.addRow(list); - } - } catch (SQLException e) { - throw DbException.convert(e); - } - result.done(); - return result; + public void setForDataChangeDeltaTable() { + forDataChangeDeltaTable = true; } /** @@ -117,6 +155,7 @@ public static LocalResult read(Session session, ResultSet rs, int maxrows) { * @param targetSession the session of the copy * @return the copy if possible, or null if copying is not possible */ + @Override public LocalResult createShallowCopy(Session targetSession) { if (external == null && (rows == null || rows.size() < rowCount)) { return null; @@ -131,10 +170,10 @@ public LocalResult createShallowCopy(Session targetSession) { return null; } } - LocalResult copy = new LocalResult(); + LocalResult copy = new LocalResult((SessionLocal) targetSession); copy.maxMemoryRows = this.maxMemoryRows; - copy.session = targetSession; copy.visibleColumnCount = this.visibleColumnCount; + copy.resultColumnCount = this.resultColumnCount; copy.expressions = this.expressions; copy.rowId = -1; copy.rowCount = this.rowCount; @@ -142,17 +181,18 @@ public LocalResult createShallowCopy(Session targetSession) { copy.sort = this.sort; copy.distinctRows = this.distinctRows; copy.distinct = distinct; - copy.randomAccess = randomAccess; + copy.distinctIndexes = distinctIndexes; copy.currentRow = null; copy.offset = 0; copy.limit = -1; copy.external = e2; - copy.diskOffset = this.diskOffset; + copy.containsNull = containsNull; return copy; } /** - * Set the sort order. + * Sets sort order to be used by this result. When rows are presorted by the + * query this method should not be used. * * @param sort the sort order */ @@ -164,33 +204,27 @@ public void setSortOrder(SortOrder sort) { * Remove duplicate rows. */ public void setDistinct() { + assert distinctIndexes == null; distinct = true; - distinctRows = ValueHashMap.newInstance(); + distinctRows = new TreeMap<>(session.getDatabase().getCompareMode()); } /** - * Random access is required (containsDistinct). + * Remove rows with duplicates in columns with specified indexes. + * + * @param distinctIndexes distinct indexes */ - public void setRandomAccess() { - this.randomAccess = true; + public void setDistinct(int[] distinctIndexes) { + assert !distinct; + this.distinctIndexes = distinctIndexes; + distinctRows = new TreeMap<>(session.getDatabase().getCompareMode()); } /** - * Remove the row from the result set if it exists. - * - * @param values the row + * @return whether this result is a distinct result */ - public void removeDistinct(Value[] values) { - if (!distinct) { - DbException.throwInternalError(); - } - if (distinctRows != null) { - ValueArray array = ValueArray.get(values); - distinctRows.remove(array); - rowCount = distinctRows.size(); - } else { - rowCount = external.removeRow(values); - } + private boolean isAnyDistinct() { + return distinct || distinctIndexes != null; } /** @@ -200,38 +234,87 @@ public void removeDistinct(Value[] values) { * @return true if the row exists */ public boolean containsDistinct(Value[] values) { + assert values.length == visibleColumnCount; if (external != null) { return external.contains(values); } if (distinctRows == null) { - distinctRows = ValueHashMap.newInstance(); + distinctRows = new TreeMap<>(session.getDatabase().getCompareMode()); for (Value[] row : rows) { - if (row.length > visibleColumnCount) { - Value[] r2 = new Value[visibleColumnCount]; - System.arraycopy(row, 0, r2, 0, visibleColumnCount); - row = r2; - } - ValueArray array = ValueArray.get(row); - distinctRows.put(array, row); + ValueRow array = getDistinctRow(row); + distinctRows.put(array, array.getList()); } } - ValueArray array = ValueArray.get(values); + ValueRow array = ValueRow.get(values); return distinctRows.get(array) != null; } + /** + * Check if this result set contains a NULL value. This method may reset + * this result. + * + * @return true if there is a NULL value + */ + public boolean containsNull() { + Boolean r = containsNull; + if (r == null) { + r = false; + reset(); + loop: while (next()) { + Value[] row = currentRow; + for (int i = 0; i < visibleColumnCount; i++) { + if (row[i].containsNull()) { + r = true; + break loop; + } + } + } + reset(); + containsNull = r; + } + return r; + } + + /** + * Remove the row from the result set if it exists. + * + * @param values the row + */ + public void removeDistinct(Value[] values) { + if (!distinct) { + throw DbException.getInternalError(); + } + assert values.length == visibleColumnCount; + if (distinctRows != null) { + distinctRows.remove(ValueRow.get(values)); + rowCount = distinctRows.size(); + } else { + rowCount = external.removeRow(values); + } + } + @Override public void reset() { rowId = -1; + currentRow = null; if (external != null) { external.reset(); - if (diskOffset > 0) { - for (int i = 0; i < diskOffset; i++) { - external.next(); - } - } } } + /** + * Retrieve the current row + * @return row + */ + public Row currentRowForTable() { + int degree = visibleColumnCount; + Value[] currentRow = this.currentRow; + Row row = session.getDatabase().getRowFactory() + .createRow(Arrays.copyOf(currentRow, degree), SearchRow.MEMORY_CALCULATE); + row.setKey(currentRow[degree].getLong()); + return row; + } + @Override public Value[] currentRow() { return currentRow; @@ -245,7 +328,7 @@ public boolean next() { if (external != null) { currentRow = external.next(); } else { - currentRow = rows.get(rowId); + currentRow = rows.get((int) rowId); } return true; } @@ -255,56 +338,108 @@ public boolean next() { } @Override - public int getRowId() { + public long getRowId() { return rowId; } + @Override + public boolean isAfterLast() { + return rowId >= rowCount; + } + private void cloneLobs(Value[] values) { for (int i = 0; i < values.length; i++) { Value v = values[i]; - Value v2 = v.copyToResult(); - if (v2 != v) { - containsLobs = true; - session.addTemporaryLob(v2); - values[i] = v2; + if (v instanceof ValueLob) { + if (forDataChangeDeltaTable) { + containsLobs = true; + } else { + ValueLob v2 = ((ValueLob) v).copyToResult(); + if (v2 != v) { + containsLobs = true; + values[i] = session.addTemporaryLob(v2); + } + } } } } + private ValueRow getDistinctRow(Value[] values) { + if (distinctIndexes != null) { + int cnt = distinctIndexes.length; + Value[] newValues = new Value[cnt]; + for (int i = 0; i < cnt; i++) { + newValues[i] = values[distinctIndexes[i]]; + } + values = newValues; + } else if (values.length > visibleColumnCount) { + values = Arrays.copyOf(values, visibleColumnCount); + } + return ValueRow.get(values); + } + + private void createExternalResult() { + external = MVTempResult.of(session.getDatabase(), expressions, distinct, distinctIndexes, visibleColumnCount, + resultColumnCount, sort); + } + + /** + * Add a row for a table. + * + * @param row the row to add + */ + public void addRowForTable(Row row) { + int degree = visibleColumnCount; + Value[] values = new Value[degree + 1]; + for (int i = 0; i < degree; i++) { + values[i] = row.getValue(i); + } + values[degree] = ValueBigint.get(row.getKey()); + addRowInternal(values); + } + /** * Add a row to this object. * * @param values the row to add */ @Override - public void addRow(Value[] values) { + public void addRow(Value... values) { + assert values.length == resultColumnCount; cloneLobs(values); - if (distinct) { + addRowInternal(values); + } + + private void addRowInternal(Value... values) { + if (isAnyDistinct()) { if (distinctRows != null) { - ValueArray array = ValueArray.get(values); - distinctRows.put(array, values); + ValueRow distinctRow = getDistinctRow(values); + Value[] previous = distinctRows.get(distinctRow); + if (previous == null || sort != null && sort.compare(previous, values) > 0) { + distinctRows.put(distinctRow, values); + } rowCount = distinctRows.size(); if (rowCount > maxMemoryRows) { - external = new ResultTempTable(session, expressions, true, sort); + createExternalResult(); rowCount = external.addRows(distinctRows.values()); distinctRows = null; } } else { rowCount = external.addRow(values); } - return; - } - rows.add(values); - rowCount++; - if (rows.size() > maxMemoryRows) { - if (external == null) { - external = new ResultTempTable(session, expressions, false, sort); + } else { + rows.add(values); + rowCount++; + if (rows.size() > maxMemoryRows) { + addRowsToDisk(); } - addRowsToDisk(); } } private void addRowsToDisk() { + if (external == null) { + createExternalResult(); + } rowCount = external.addRows(rows); rows.clear(); } @@ -318,82 +453,160 @@ public int getVisibleColumnCount() { * This method is called after all rows have been added. */ public void done() { - if (distinct) { - if (distinctRows != null) { - rows = distinctRows.values(); - } else { - if (external != null && sort != null) { - // external sort - ResultExternal temp = external; - external = null; - temp.reset(); - rows = New.arrayList(); - // TODO use offset directly if possible - while (true) { - Value[] list = temp.next(); - if (list == null) { - break; - } - if (external == null) { - external = new ResultTempTable(session, expressions, true, sort); - } - rows.add(list); - if (rows.size() > maxMemoryRows) { - rowCount = external.addRows(rows); - rows.clear(); - } - } - temp.close(); - // the remaining data in rows is written in the following - // lines - } - } - } if (external != null) { addRowsToDisk(); - external.done(); } else { - if (sort != null) { - if (offset > 0 || limit > 0) { - sort.sort(rows, offset, limit < 0 ? rows.size() : limit); + if (isAnyDistinct()) { + rows = new ArrayList<>(distinctRows.values()); + } + if (sort != null && limit != 0 && !limitsWereApplied) { + boolean withLimit = limit > 0 && withTiesSortOrder == null; + if (offset > 0 || withLimit) { + int endExclusive = rows.size(); + if (offset < endExclusive) { + int fromInclusive = (int) offset; + if (withLimit && limit < endExclusive - fromInclusive) { + endExclusive = fromInclusive + (int) limit; + } + sort.sort(rows, fromInclusive, endExclusive); + } } else { sort.sort(rows); } } } - applyOffset(); - applyLimit(); + applyOffsetAndLimit(); reset(); } + private void applyOffsetAndLimit() { + if (limitsWereApplied) { + return; + } + long offset = Math.max(this.offset, 0); + long limit = this.limit; + if (offset == 0 && limit < 0 && !fetchPercent || rowCount == 0) { + return; + } + if (fetchPercent) { + if (limit < 0 || limit > 100) { + throw DbException.getInvalidValueException("FETCH PERCENT", limit); + } + // Oracle rounds percent up, do the same for now + limit = (limit * rowCount + 99) / 100; + } + boolean clearAll = offset >= rowCount || limit == 0; + if (!clearAll) { + long remaining = rowCount - offset; + limit = limit < 0 ? remaining : Math.min(remaining, limit); + if (offset == 0 && remaining <= limit) { + return; + } + } else { + limit = 0; + } + distinctRows = null; + rowCount = limit; + if (external == null) { + if (clearAll) { + rows.clear(); + return; + } + int to = (int) (offset + limit); + if (withTiesSortOrder != null) { + Value[] expected = rows.get(to - 1); + while (to < rows.size() && withTiesSortOrder.compare(expected, rows.get(to)) == 0) { + to++; + rowCount++; + } + } + if (offset != 0 || to != rows.size()) { + // avoid copying the whole array for each row + rows = new ArrayList<>(rows.subList((int) offset, to)); + } + } else { + if (clearAll) { + external.close(); + external = null; + return; + } + trimExternal(offset, limit); + } + } + + private void trimExternal(long offset, long limit) { + ResultExternal temp = external; + external = null; + temp.reset(); + while (--offset >= 0) { + temp.next(); + } + Value[] row = null; + while (--limit >= 0) { + row = temp.next(); + rows.add(row); + if (rows.size() > maxMemoryRows) { + addRowsToDisk(); + } + } + if (withTiesSortOrder != null && row != null) { + Value[] expected = row; + while ((row = temp.next()) != null && withTiesSortOrder.compare(expected, row) == 0) { + rows.add(row); + rowCount++; + if (rows.size() > maxMemoryRows) { + addRowsToDisk(); + } + } + } + if (external != null) { + addRowsToDisk(); + } + temp.close(); + } + @Override - public int getRowCount() { + public long getRowCount() { return rowCount; } + @Override + public void limitsWereApplied() { + this.limitsWereApplied = true; + } + + @Override + public boolean hasNext() { + return !closed && rowId < rowCount - 1; + } + /** * Set the number of rows that this result will return at the maximum. * * @param limit the limit (-1 means no limit, 0 means no rows) */ - public void setLimit(int limit) { + public void setLimit(long limit) { this.limit = limit; } - private void applyLimit() { - if (limit < 0) { - return; - } - if (external == null) { - if (rows.size() > limit) { - rows = New.arrayList(rows.subList(0, limit)); - rowCount = limit; - } - } else { - if (limit < rowCount) { - rowCount = limit; - } - } + /** + * @param fetchPercent whether limit expression specifies percentage of rows + */ + public void setFetchPercent(boolean fetchPercent) { + this.fetchPercent = fetchPercent; + } + + /** + * Enables inclusion of tied rows to result and sets the sort order for tied + * rows. The specified sort order must be the same as sort order if sort + * order was set. Passed value will be used if sort order was not set that + * is possible when rows are presorted. + * + * @param withTiesSortOrder the sort order for tied rows + */ + public void setWithTies(SortOrder withTiesSortOrder) { + assert sort == null || sort == withTiesSortOrder; + this.withTiesSortOrder = withTiesSortOrder; } @Override @@ -412,7 +625,7 @@ public void close() { @Override public String getAlias(int i) { - return expressions[i].getAlias(); + return expressions[i].getAlias(session, i); } @Override @@ -425,39 +638,24 @@ public String getSchemaName(int i) { return expressions[i].getSchemaName(); } - @Override - public int getDisplaySize(int i) { - return expressions[i].getDisplaySize(); - } - @Override public String getColumnName(int i) { - return expressions[i].getColumnName(); + return expressions[i].getColumnName(session, i); } @Override - public int getColumnType(int i) { + public TypeInfo getColumnType(int i) { return expressions[i].getType(); } - @Override - public long getColumnPrecision(int i) { - return expressions[i].getPrecision(); - } - @Override public int getNullable(int i) { return expressions[i].getNullable(); } @Override - public boolean isAutoIncrement(int i) { - return expressions[i].isAutoIncrement(); - } - - @Override - public int getColumnScale(int i) { - return expressions[i].getScale(); + public boolean isIdentity(int i) { + return expressions[i].isIdentity(); } /** @@ -465,34 +663,10 @@ public int getColumnScale(int i) { * * @param offset the offset */ - public void setOffset(int offset) { + public void setOffset(long offset) { this.offset = offset; } - private void applyOffset() { - if (offset <= 0) { - return; - } - if (external == null) { - if (offset >= rows.size()) { - rows.clear(); - rowCount = 0; - } else { - // avoid copying the whole array for each row - int remove = Math.min(offset, rows.size()); - rows = New.arrayList(rows.subList(remove, rows.size())); - rowCount -= remove; - } - } else { - if (offset >= rowCount) { - rowCount = 0; - } else { - diskOffset = offset; - rowCount -= offset; - } - } - } - @Override public String toString() { return super.toString() + " columns: " + visibleColumnCount + @@ -504,6 +678,7 @@ public String toString() { * * @return true if it is */ + @Override public boolean isClosed() { return closed; } diff --git a/h2/src/main/org/h2/result/MergedResult.java b/h2/src/main/org/h2/result/MergedResult.java new file mode 100644 index 0000000000..57545821e5 --- /dev/null +++ b/h2/src/main/org/h2/result/MergedResult.java @@ -0,0 +1,88 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.h2.util.Utils; +import org.h2.value.Value; + +/** + * Merged result. Used to combine several results into one. Merged result will + * contain rows from all appended results. Results are not required to have the + * same lists of columns, but required to have compatible column definitions, + * for example, if one result has a {@link java.sql.Types#VARCHAR} column + * {@code NAME} then another results that have {@code NAME} column should also + * define it with the same type. + */ +public final class MergedResult { + private final ArrayList> data = Utils.newSmallArrayList(); + + private final ArrayList columns = Utils.newSmallArrayList(); + + /** + * Appends a result. + * + * @param result + * result to append + */ + public void add(ResultInterface result) { + int count = result.getVisibleColumnCount(); + if (count == 0) { + return; + } + SimpleResult.Column[] cols = new SimpleResult.Column[count]; + for (int i = 0; i < count; i++) { + SimpleResult.Column c = new SimpleResult.Column(result.getAlias(i), result.getColumnName(i), + result.getColumnType(i)); + cols[i] = c; + if (!columns.contains(c)) { + columns.add(c); + } + } + while (result.next()) { + if (count == 1) { + data.add(Collections.singletonMap(cols[0], result.currentRow()[0])); + } else { + HashMap map = new HashMap<>(); + for (int i = 0; i < count; i++) { + SimpleResult.Column ci = cols[i]; + map.put(ci, result.currentRow()[i]); + } + data.add(map); + } + } + } + + /** + * Returns merged results. + * + * @return result with rows from all appended result sets + */ + public SimpleResult getResult() { + SimpleResult result = new SimpleResult(); + for (SimpleResult.Column c : columns) { + result.addColumn(c); + } + for (Map map : data) { + Value[] row = new Value[columns.size()]; + for (Map.Entry entry : map.entrySet()) { + row[columns.indexOf(entry.getKey())] = entry.getValue(); + } + result.addRow(row); + } + return result; + } + + @Override + public String toString() { + return columns + ": " + data.size(); + } + +} diff --git a/h2/src/main/org/h2/result/ResultColumn.java b/h2/src/main/org/h2/result/ResultColumn.java index 14ef845ef3..f8cc1a51f5 100644 --- a/h2/src/main/org/h2/result/ResultColumn.java +++ b/h2/src/main/org/h2/result/ResultColumn.java @@ -1,13 +1,15 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; import java.io.IOException; +import org.h2.engine.Constants; import org.h2.value.Transfer; +import org.h2.value.TypeInfo; /** * A result set column of a remote result. @@ -35,29 +37,14 @@ public class ResultColumn { final String columnName; /** - * The value type of this column. + * The column type. */ - final int columnType; + final TypeInfo columnType; /** - * The precision. + * True if this is an identity column. */ - final long precision; - - /** - * The scale. - */ - final int scale; - - /** - * The expected display size. - */ - final int displaySize; - - /** - * True if this is an autoincrement column. - */ - final boolean autoIncrement; + final boolean identity; /** * True if this column is nullable. @@ -74,11 +61,11 @@ public class ResultColumn { schemaName = in.readString(); tableName = in.readString(); columnName = in.readString(); - columnType = in.readInt(); - precision = in.readLong(); - scale = in.readInt(); - displaySize = in.readInt(); - autoIncrement = in.readBoolean(); + columnType = in.readTypeInfo(); + if (in.getVersion() < Constants.TCP_PROTOCOL_VERSION_20) { + in.readInt(); + } + identity = in.readBoolean(); nullable = in.readInt(); } @@ -88,6 +75,7 @@ public class ResultColumn { * @param out the object to where to write the data * @param result the result * @param i the column index + * @throws IOException on failure */ public static void writeColumn(Transfer out, ResultInterface result, int i) throws IOException { @@ -95,11 +83,12 @@ public static void writeColumn(Transfer out, ResultInterface result, int i) out.writeString(result.getSchemaName(i)); out.writeString(result.getTableName(i)); out.writeString(result.getColumnName(i)); - out.writeInt(result.getColumnType(i)); - out.writeLong(result.getColumnPrecision(i)); - out.writeInt(result.getColumnScale(i)); - out.writeInt(result.getDisplaySize(i)); - out.writeBoolean(result.isAutoIncrement(i)); + TypeInfo type = result.getColumnType(i); + out.writeTypeInfo(type); + if (out.getVersion() < Constants.TCP_PROTOCOL_VERSION_20) { + out.writeInt(type.getDisplaySize()); + } + out.writeBoolean(result.isIdentity(i)); out.writeInt(result.getNullable(i)); } diff --git a/h2/src/main/org/h2/result/ResultExternal.java b/h2/src/main/org/h2/result/ResultExternal.java index e9f05c4a04..c61b5a176b 100644 --- a/h2/src/main/org/h2/result/ResultExternal.java +++ b/h2/src/main/org/h2/result/ResultExternal.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; -import java.util.ArrayList; +import java.util.Collection; import org.h2.value.Value; /** @@ -40,12 +40,7 @@ public interface ResultExternal { * @param rows the list of rows to add * @return the new number of rows in this object */ - int addRows(ArrayList rows); - - /** - * This method is called after all rows have been added. - */ - void done(); + int addRows(Collection rows); /** * Close this object and delete the temporary file. diff --git a/h2/src/main/org/h2/result/ResultInterface.java b/h2/src/main/org/h2/result/ResultInterface.java index 0eb1bc43da..c9ac258198 100644 --- a/h2/src/main/org/h2/result/ResultInterface.java +++ b/h2/src/main/org/h2/result/ResultInterface.java @@ -1,17 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; +import org.h2.engine.Session; +import org.h2.value.TypeInfo; import org.h2.value.Value; /** * The result interface is used by the LocalResult and ResultRemote class. * A result may contain rows, or just an update count. */ -public interface ResultInterface { +public interface ResultInterface extends AutoCloseable { /** * Go to the beginning of the result, that means @@ -39,7 +41,14 @@ public interface ResultInterface { * * @return the row id */ - int getRowId(); + long getRowId(); + + /** + * Check if the current position is after last row. + * + * @return true if after last + */ + boolean isAfterLast(); /** * Get the number of visible columns. @@ -54,7 +63,14 @@ public interface ResultInterface { * * @return the number of rows */ - int getRowCount(); + long getRowCount(); + + /** + * Check if this result has more rows to fetch. + * + * @return true if it has + */ + boolean hasNext(); /** * Check if this result set should be closed, for example because it is @@ -67,6 +83,7 @@ public interface ResultInterface { /** * Close the result and delete any temporary files */ + @Override void close(); /** @@ -107,60 +124,59 @@ public interface ResultInterface { * @param i the column number (starting with 0) * @return the column data type */ - int getColumnType(int i); + TypeInfo getColumnType(int i); /** - * Get the precision for this column. + * Check if this is an identity column. * * @param i the column number (starting with 0) - * @return the precision + * @return true for identity columns */ - long getColumnPrecision(int i); + boolean isIdentity(int i); /** - * Get the scale for this column. + * Check if this column is nullable. * * @param i the column number (starting with 0) - * @return the scale + * @return Column.NULLABLE_* */ - int getColumnScale(int i); + int getNullable(int i); /** - * Get the display size for this column. + * Set the fetch size for this result set. * - * @param i the column number (starting with 0) - * @return the display size + * @param fetchSize the new fetch size */ - int getDisplaySize(int i); + void setFetchSize(int fetchSize); /** - * Check if this is an auto-increment column. + * Get the current fetch size for this result set. * - * @param i the column number (starting with 0) - * @return true for auto-increment columns + * @return the fetch size */ - boolean isAutoIncrement(int i); + int getFetchSize(); /** - * Check if this column is nullable. + * Check if this a lazy execution result. * - * @param i the column number (starting with 0) - * @return Column.NULLABLE_* + * @return true if it is a lazy result */ - int getNullable(int i); + boolean isLazy(); /** - * Set the fetch size for this result set. + * Check if this result set is closed. * - * @param fetchSize the new fetch size + * @return true if it is */ - void setFetchSize(int fetchSize); + boolean isClosed(); /** - * Get the current fetch size for this result set. + * Create a shallow copy of the result set. The data and a temporary table + * (if there is any) is not copied. * - * @return the fetch size + * @param targetSession the session of the copy + * @return the copy if possible, or null if copying is not possible */ - int getFetchSize(); + ResultInterface createShallowCopy(Session targetSession); } diff --git a/h2/src/main/org/h2/result/ResultRemote.java b/h2/src/main/org/h2/result/ResultRemote.java index f0b6ae2eb5..e3e5a532e6 100644 --- a/h2/src/main/org/h2/result/ResultRemote.java +++ b/h2/src/main/org/h2/result/ResultRemote.java @@ -1,18 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; import java.io.IOException; import java.util.ArrayList; + +import org.h2.api.ErrorCode; import org.h2.engine.SessionRemote; import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.message.Trace; -import org.h2.util.New; import org.h2.value.Transfer; +import org.h2.value.TypeInfo; import org.h2.value.Value; /** @@ -20,16 +22,15 @@ * In many cases, the complete data is kept on the client side, * but for large results only a subset is in-memory. */ -public class ResultRemote implements ResultInterface { +public final class ResultRemote extends FetchedResult { private int fetchSize; private SessionRemote session; private Transfer transfer; private int id; private final ResultColumn[] columns; - private Value[] currentRow; - private final int rowCount; - private int rowId, rowOffset; + private long rowCount; + private long rowOffset; private ArrayList result; private final Trace trace; @@ -40,14 +41,32 @@ public ResultRemote(SessionRemote session, Transfer transfer, int id, this.transfer = transfer; this.id = id; this.columns = new ResultColumn[columnCount]; - rowCount = transfer.readInt(); + rowCount = transfer.readRowCount(); for (int i = 0; i < columnCount; i++) { columns[i] = new ResultColumn(transfer); } rowId = -1; - result = New.arrayList(); this.fetchSize = fetchSize; - fetchRows(false); + if (rowCount >= 0) { + fetchSize = (int) Math.min(rowCount, fetchSize); + result = new ArrayList<>(fetchSize); + } else { + result = new ArrayList<>(); + } + synchronized (session) { + try { + if (fetchRows(fetchSize)) { + rowCount = result.size(); + } + } catch (IOException e) { + throw DbException.convertIOException(e, null); + } + } + } + + @Override + public boolean isLazy() { + return rowCount < 0L; } @Override @@ -71,28 +90,13 @@ public String getColumnName(int i) { } @Override - public int getColumnType(int i) { + public TypeInfo getColumnType(int i) { return columns[i].columnType; } @Override - public long getColumnPrecision(int i) { - return columns[i].precision; - } - - @Override - public int getColumnScale(int i) { - return columns[i].scale; - } - - @Override - public int getDisplaySize(int i) { - return columns[i].displaySize; - } - - @Override - public boolean isAutoIncrement(int i) { - return columns[i].autoIncrement; + public boolean isIdentity(int i) { + return columns[i].identity; } @Override @@ -102,8 +106,13 @@ public int getNullable(int i) { @Override public void reset() { + if (rowCount < 0L || rowOffset > 0L) { + throw DbException.get(ErrorCode.RESULT_SET_NOT_SCROLLABLE); + } rowId = -1; currentRow = null; + nextRow = null; + afterLast = false; if (session == null) { return; } @@ -119,40 +128,37 @@ public void reset() { } @Override - public Value[] currentRow() { - return currentRow; + public int getVisibleColumnCount() { + return columns.length; } @Override - public boolean next() { - if (rowId < rowCount) { - rowId++; - remapIfOld(); - if (rowId < rowCount) { - if (rowId - rowOffset >= result.size()) { - fetchRows(true); - } - currentRow = result.get(rowId - rowOffset); - return true; - } - currentRow = null; + public long getRowCount() { + if (rowCount < 0L) { + throw DbException.getUnsupportedException("Row count is unknown for lazy result."); } - return false; - } - - @Override - public int getRowId() { - return rowId; - } - - @Override - public int getVisibleColumnCount() { - return columns.length; + return rowCount; } @Override - public int getRowCount() { - return rowCount; + public boolean hasNext() { + if (afterLast) { + return false; + } + if (nextRow == null) { + if (rowCount < 0L || rowId < rowCount - 1) { + long nextRowId = rowId + 1; + if (session != null) { + remapIfOld(); + if (nextRowId - rowOffset >= result.size()) { + fetchAdditionalRows(); + } + } + int index = (int) (nextRowId - rowOffset); + nextRow = index < result.size() ? result.get(index) : null; + } + } + return nextRow != null; } private void sendClose() { @@ -180,9 +186,6 @@ public void close() { } private void remapIfOld() { - if (session == null) { - return; - } try { if (id <= session.getCurrentId() - SysProperties.SERVER_CACHED_OBJECTS / 2) { // object is too old - we need to map it to a new id @@ -199,44 +202,58 @@ private void remapIfOld() { } } - private void fetchRows(boolean sendFetch) { + private void fetchAdditionalRows() { synchronized (session) { session.checkClosed(); try { rowOffset += result.size(); result.clear(); - int fetch = Math.min(fetchSize, rowCount - rowOffset); - if (sendFetch) { - session.traceOperation("RESULT_FETCH_ROWS", id); - transfer.writeInt(SessionRemote.RESULT_FETCH_ROWS). - writeInt(id).writeInt(fetch); - session.done(transfer); - } - for (int r = 0; r < fetch; r++) { - boolean row = transfer.readBoolean(); - if (!row) { - break; - } - int len = columns.length; - Value[] values = new Value[len]; - for (int i = 0; i < len; i++) { - Value v = transfer.readValue(); - values[i] = v; - } - result.add(values); - } - if (rowOffset + result.size() >= rowCount) { - sendClose(); + int fetch = fetchSize; + if (rowCount >= 0) { + fetch = (int) Math.min(fetch, rowCount - rowOffset); + } else if (fetch == Integer.MAX_VALUE) { + fetch = SysProperties.SERVER_RESULT_SET_FETCH_SIZE; } + session.traceOperation("RESULT_FETCH_ROWS", id); + transfer.writeInt(SessionRemote.RESULT_FETCH_ROWS).writeInt(id).writeInt(fetch); + session.done(transfer); + fetchRows(fetch); } catch (IOException e) { throw DbException.convertIOException(e, null); } } } + private boolean fetchRows(int fetch) throws IOException { + int len = columns.length; + for (int r = 0; r < fetch; r++) { + switch (transfer.readByte()) { + case 1: { + Value[] values = new Value[len]; + for (int i = 0; i < len; i++) { + values[i] = transfer.readValue(columns[i].columnType); + } + result.add(values); + break; + } + case 0: + sendClose(); + return true; + case -1: + throw SessionRemote.readException(transfer); + default: + throw DbException.getInternalError(); + } + } + if (rowCount >= 0L && rowOffset + result.size() >= rowCount) { + sendClose(); + } + return false; + } + @Override public String toString() { - return "columns: " + columns.length + " rows: " + rowCount + " pos: " + rowId; + return "columns: " + columns.length + (rowCount < 0L ? " lazy" : " rows: " + rowCount) + " pos: " + rowId; } @Override @@ -250,8 +267,8 @@ public void setFetchSize(int fetchSize) { } @Override - public boolean needToClose() { - return true; + public boolean isClosed() { + return result == null; } } diff --git a/h2/src/main/org/h2/result/ResultTarget.java b/h2/src/main/org/h2/result/ResultTarget.java index ff8e146066..cca53de6cd 100644 --- a/h2/src/main/org/h2/result/ResultTarget.java +++ b/h2/src/main/org/h2/result/ResultTarget.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; @@ -17,13 +17,20 @@ public interface ResultTarget { * * @param values the values */ - void addRow(Value[] values); + void addRow(Value... values); /** * Get the number of rows. * * @return the number of rows */ - int getRowCount(); + long getRowCount(); + + /** + * A hint that sorting, offset and limit may be ignored by this result + * because they were applied during the query. This is useful for WITH TIES + * clause because result may contain tied rows above limit. + */ + void limitsWereApplied(); } diff --git a/h2/src/main/org/h2/result/ResultTempTable.java b/h2/src/main/org/h2/result/ResultTempTable.java deleted file mode 100644 index 0a7cda6048..0000000000 --- a/h2/src/main/org/h2/result/ResultTempTable.java +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.result; - -import java.util.ArrayList; -import java.util.Arrays; - -import org.h2.command.ddl.CreateTableData; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.index.Cursor; -import org.h2.index.Index; -import org.h2.index.IndexType; -import org.h2.schema.Schema; -import org.h2.table.Column; -import org.h2.table.IndexColumn; -import org.h2.table.Table; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * This class implements the temp table buffer for the LocalResult class. - */ -public class ResultTempTable implements ResultExternal { - - private static final String COLUMN_NAME = "DATA"; - private final boolean distinct; - private final SortOrder sort; - private Index index; - private Session session; - private Table table; - private Cursor resultCursor; - private int rowCount; - private int columnCount; - - private final ResultTempTable parent; - private boolean closed; - private int childCount; - private boolean containsLob; - - ResultTempTable(Session session, Expression[] expressions, boolean distinct, SortOrder sort) { - this.session = session; - this.distinct = distinct; - this.sort = sort; - this.columnCount = expressions.length; - Schema schema = session.getDatabase().getSchema(Constants.SCHEMA_MAIN); - CreateTableData data = new CreateTableData(); - for (int i = 0; i < expressions.length; i++) { - int type = expressions[i].getType(); - Column col = new Column(COLUMN_NAME + i, - type); - if (type == Value.CLOB || type == Value.BLOB) { - containsLob = true; - } - data.columns.add(col); - } - data.id = session.getDatabase().allocateObjectId(); - data.tableName = "TEMP_RESULT_SET_" + data.id; - data.temporary = true; - data.persistIndexes = false; - data.persistData = true; - data.create = true; - data.session = session; - table = schema.createTable(data); - if (sort != null || distinct) { - createIndex(); - } - parent = null; - } - - private ResultTempTable(ResultTempTable parent) { - this.parent = parent; - this.columnCount = parent.columnCount; - this.distinct = parent.distinct; - this.session = parent.session; - this.table = parent.table; - this.index = parent.index; - this.rowCount = parent.rowCount; - this.sort = parent.sort; - this.containsLob = parent.containsLob; - reset(); - } - - private void createIndex() { - IndexColumn[] indexCols = null; - if (sort != null) { - int[] colIndex = sort.getQueryColumnIndexes(); - indexCols = new IndexColumn[colIndex.length]; - for (int i = 0; i < colIndex.length; i++) { - IndexColumn indexColumn = new IndexColumn(); - indexColumn.column = table.getColumn(colIndex[i]); - indexColumn.sortType = sort.getSortTypes()[i]; - indexColumn.columnName = COLUMN_NAME + i; - indexCols[i] = indexColumn; - } - } else { - indexCols = new IndexColumn[columnCount]; - for (int i = 0; i < columnCount; i++) { - IndexColumn indexColumn = new IndexColumn(); - indexColumn.column = table.getColumn(i); - indexColumn.columnName = COLUMN_NAME + i; - indexCols[i] = indexColumn; - } - } - String indexName = table.getSchema().getUniqueIndexName(session, - table, Constants.PREFIX_INDEX); - int indexId = session.getDatabase().allocateObjectId(); - IndexType indexType = IndexType.createNonUnique(true); - index = table.addIndex(session, indexName, indexId, indexCols, - indexType, true, null); - } - - @Override - public synchronized ResultExternal createShallowCopy() { - if (parent != null) { - return parent.createShallowCopy(); - } - if (closed) { - return null; - } - childCount++; - return new ResultTempTable(this); - } - - @Override - public int removeRow(Value[] values) { - Row row = convertToRow(values); - Cursor cursor = find(row); - if (cursor != null) { - row = cursor.get(); - table.removeRow(session, row); - rowCount--; - } - return rowCount; - } - - @Override - public boolean contains(Value[] values) { - return find(convertToRow(values)) != null; - } - - @Override - public int addRow(Value[] values) { - Row row = convertToRow(values); - if (distinct) { - Cursor cursor = find(row); - if (cursor == null) { - table.addRow(session, row); - rowCount++; - } - } else { - table.addRow(session, row); - rowCount++; - } - return rowCount; - } - - @Override - public int addRows(ArrayList rows) { - // speeds up inserting, but not really needed: - if (sort != null) { - sort.sort(rows); - } - for (Value[] values : rows) { - addRow(values); - } - return rowCount; - } - - private synchronized void closeChild() { - if (--childCount == 0 && closed) { - dropTable(); - } - } - - @Override - public synchronized void close() { - if (closed) { - return; - } - closed = true; - if (parent != null) { - parent.closeChild(); - } else { - if (childCount == 0) { - dropTable(); - } - } - } - - private void dropTable() { - if (table == null) { - return; - } - if (containsLob) { - // contains BLOB or CLOB: can not truncate now, - // otherwise the BLOB and CLOB entries are removed - return; - } - try { - Database database = session.getDatabase(); - // Need to lock because not all of the code-paths - // that reach here have already taken this lock, - // notably via the close() paths. - synchronized (session) { - synchronized (database) { - table.truncate(session); - } - } - // This session may not lock the sys table (except if it already has - // locked it) because it must be committed immediately, otherwise - // other threads can not access the sys table. If the table is not - // removed now, it will be when the database is opened the next - // time. (the table is truncated, so this is just one record) - if (!database.isSysTableLocked()) { - Session sysSession = database.getSystemSession(); - table.removeChildrenAndResources(sysSession); - if (index != null) { - // need to explicitly do this, - // as it's not registered in the system session - session.removeLocalTempTableIndex(index); - } - // the transaction must be committed immediately - // TODO this synchronization cascade is very ugly - synchronized (session) { - synchronized (sysSession) { - synchronized (database) { - sysSession.commit(false); - } - } - } - } - } finally { - table = null; - } - } - - @Override - public void done() { - // nothing to do - } - - @Override - public Value[] next() { - if (resultCursor == null) { - Index idx; - if (distinct || sort != null) { - idx = index; - } else { - idx = table.getScanIndex(session); - } - if (session.getDatabase().getMvStore() != null) { - // sometimes the transaction is already committed, - // in which case we can't use the session - if (idx.getRowCount(session) == 0 && rowCount > 0) { - // this means querying is not transactional - resultCursor = idx.find((Session) null, null, null); - } else { - // the transaction is still open - resultCursor = idx.find(session, null, null); - } - } else { - resultCursor = idx.find(session, null, null); - } - } - if (!resultCursor.next()) { - return null; - } - Row row = resultCursor.get(); - return row.getValueList(); - } - - @Override - public void reset() { - resultCursor = null; - } - - private Row convertToRow(Value[] values) { - if (values.length < columnCount) { - Value[] v2 = Arrays.copyOf(values, columnCount); - for (int i = values.length; i < columnCount; i++) { - v2[i] = ValueNull.INSTANCE; - } - values = v2; - } - return new Row(values, Row.MEMORY_CALCULATE); - } - - private Cursor find(Row row) { - if (index == null) { - // for the case "in(select ...)", the query might - // use an optimization and not create the index - // up front - createIndex(); - } - Cursor cursor = index.find(session, row, row); - while (cursor.next()) { - SearchRow found = cursor.getSearchRow(); - boolean ok = true; - Database db = session.getDatabase(); - for (int i = 0; i < row.getColumnCount(); i++) { - if (!db.areEqual(row.getValue(i), found.getValue(i))) { - ok = false; - break; - } - } - if (ok) { - return cursor; - } - } - return null; - } - -} - diff --git a/h2/src/main/org/h2/result/ResultWithGeneratedKeys.java b/h2/src/main/org/h2/result/ResultWithGeneratedKeys.java new file mode 100644 index 0000000000..62a8427285 --- /dev/null +++ b/h2/src/main/org/h2/result/ResultWithGeneratedKeys.java @@ -0,0 +1,72 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +/** + * Result of update command with optional generated keys. + */ +public class ResultWithGeneratedKeys { + /** + * Result of update command with generated keys; + */ + public static final class WithKeys extends ResultWithGeneratedKeys { + private final ResultInterface generatedKeys; + + /** + * Creates a result with update count and generated keys. + * + * @param updateCount + * update count + * @param generatedKeys + * generated keys + */ + public WithKeys(long updateCount, ResultInterface generatedKeys) { + super(updateCount); + this.generatedKeys = generatedKeys; + } + + @Override + public ResultInterface getGeneratedKeys() { + return generatedKeys; + } + } + + /** + * Returns a result with only update count. + * + * @param updateCount + * update count + * @return the result. + */ + public static ResultWithGeneratedKeys of(long updateCount) { + return new ResultWithGeneratedKeys(updateCount); + } + + private final long updateCount; + + ResultWithGeneratedKeys(long updateCount) { + this.updateCount = updateCount; + } + + /** + * Returns generated keys, or {@code null}. + * + * @return generated keys, or {@code null} + */ + public ResultInterface getGeneratedKeys() { + return null; + } + + /** + * Returns update count. + * + * @return update count + */ + public long getUpdateCount() { + return updateCount; + } + +} diff --git a/h2/src/main/org/h2/result/ResultWithPaddedStrings.java b/h2/src/main/org/h2/result/ResultWithPaddedStrings.java new file mode 100644 index 0000000000..d195f91504 --- /dev/null +++ b/h2/src/main/org/h2/result/ResultWithPaddedStrings.java @@ -0,0 +1,193 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import java.util.Arrays; +import org.h2.engine.Session; +import org.h2.util.MathUtils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueVarchar; + +/** + * Result with padded fixed length strings. + */ +public class ResultWithPaddedStrings implements ResultInterface { + + private final ResultInterface source; + + /** + * Returns wrapped result if necessary, or original result if it does not + * contain visible CHAR columns. + * + * @param source + * source result + * @return wrapped result or original result + */ + public static ResultInterface get(ResultInterface source) { + int count = source.getVisibleColumnCount(); + for (int i = 0; i < count; i++) { + if (source.getColumnType(i).getValueType() == Value.CHAR) { + return new ResultWithPaddedStrings(source); + } + } + return source; + } + + /** + * Creates new instance of result. + * + * @param source + * the source result + */ + private ResultWithPaddedStrings(ResultInterface source) { + this.source = source; + } + + @Override + public void reset() { + source.reset(); + } + + @Override + public Value[] currentRow() { + int count = source.getVisibleColumnCount(); + Value[] row = Arrays.copyOf(source.currentRow(), count); + for (int i = 0; i < count; i++) { + TypeInfo type = source.getColumnType(i); + if (type.getValueType() == Value.CHAR) { + long precision = type.getPrecision(); + if (precision == Integer.MAX_VALUE) { + // CHAR is CHAR(1) + precision = 1; + } + String s = row[i].getString(); + if (s != null && s.length() < precision) { + /* + * Use ValueString to avoid truncation of spaces. There is + * no difference between ValueStringFixed and ValueString + * for JDBC layer anyway. + */ + row[i] = ValueVarchar.get(rightPadWithSpaces(s, MathUtils.convertLongToInt(precision))); + } + } + } + return row; + } + + private static String rightPadWithSpaces(String s, int length) { + int used = s.length(); + if (length <= used) { + return s; + } + char[] res = new char[length]; + s.getChars(0, used, res, 0); + Arrays.fill(res, used, length, ' '); + return new String(res); + } + + @Override + public boolean next() { + return source.next(); + } + + @Override + public long getRowId() { + return source.getRowId(); + } + + @Override + public boolean isAfterLast() { + return source.isAfterLast(); + } + + @Override + public int getVisibleColumnCount() { + return source.getVisibleColumnCount(); + } + + @Override + public long getRowCount() { + return source.getRowCount(); + } + + @Override + public boolean hasNext() { + return source.hasNext(); + } + + @Override + public boolean needToClose() { + return source.needToClose(); + } + + @Override + public void close() { + source.close(); + } + + @Override + public String getAlias(int i) { + return source.getAlias(i); + } + + @Override + public String getSchemaName(int i) { + return source.getSchemaName(i); + } + + @Override + public String getTableName(int i) { + return source.getTableName(i); + } + + @Override + public String getColumnName(int i) { + return source.getColumnName(i); + } + + @Override + public TypeInfo getColumnType(int i) { + return source.getColumnType(i); + } + + @Override + public boolean isIdentity(int i) { + return source.isIdentity(i); + } + + @Override + public int getNullable(int i) { + return source.getNullable(i); + } + + @Override + public void setFetchSize(int fetchSize) { + source.setFetchSize(fetchSize); + } + + @Override + public int getFetchSize() { + return source.getFetchSize(); + } + + @Override + public boolean isLazy() { + return source.isLazy(); + } + + @Override + public boolean isClosed() { + return source.isClosed(); + } + + @Override + public ResultInterface createShallowCopy(Session targetSession) { + ResultInterface copy = source.createShallowCopy(targetSession); + return copy != null ? new ResultWithPaddedStrings(copy) : null; + } + +} diff --git a/h2/src/main/org/h2/result/Row.java b/h2/src/main/org/h2/result/Row.java index 086bb58814..29dbc80417 100644 --- a/h2/src/main/org/h2/result/Row.java +++ b/h2/src/main/org/h2/result/Row.java @@ -1,179 +1,77 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; -import org.h2.engine.Constants; -import org.h2.store.Data; -import org.h2.util.StatementBuilder; +import java.util.Arrays; + import org.h2.value.Value; -import org.h2.value.ValueLong; /** * Represents a row in a table. */ -public class Row implements SearchRow { - - public static final int MEMORY_CALCULATE = -1; - public static final Row[] EMPTY_ARRAY = {}; - - private long key; - private final Value[] data; - private int memory; - private int version; - private boolean deleted; - private int sessionId; - - public Row(Value[] data, int memory) { - this.data = data; - this.memory = memory; - } +public abstract class Row extends SearchRow { /** - * Get a copy of the row that is distinct from (not equal to) this row. - * This is used for FOR UPDATE to allow pseudo-updating a row. + * Creates a new row. * - * @return a new row with the same data + * @param data values of columns, or null + * @param memory used memory + * @return the allocated row */ - public Row getCopy() { - Value[] d2 = new Value[data.length]; - System.arraycopy(data, 0, d2, 0, data.length); - Row r2 = new Row(d2, memory); - r2.key = key; - r2.version = version + 1; - r2.sessionId = sessionId; - return r2; - } - - @Override - public void setKeyAndVersion(SearchRow row) { - setKey(row.getKey()); - setVersion(row.getVersion()); - } - - @Override - public int getVersion() { - return version; - } - - public void setVersion(int version) { - this.version = version; - } - - @Override - public long getKey() { - return key; - } - - @Override - public void setKey(long key) { - this.key = key; - } - - @Override - public Value getValue(int i) { - return i == -1 ? ValueLong.get(key) : data[i]; + public static Row get(Value[] data, int memory) { + return new DefaultRow(data, memory); } /** - * Get the number of bytes required for the data. + * Creates a new row with the specified key. * - * @param dummy the template buffer - * @return the number of bytes + * @param data values of columns, or null + * @param memory used memory + * @param key the key + * @return the allocated row */ - public int getByteCount(Data dummy) { - int size = 0; - for (Value v : data) { - size += dummy.getValueLen(v); - } - return size; - } - - @Override - public void setValue(int i, Value v) { - if (i == -1) { - this.key = v.getLong(); - } else { - data[i] = v; - } - } - - public boolean isEmpty() { - return data == null; - } - - @Override - public int getColumnCount() { - return data.length; - } - - @Override - public int getMemory() { - if (memory != MEMORY_CALCULATE) { - return memory; - } - int m = Constants.MEMORY_ROW; - if (data != null) { - int len = data.length; - m += Constants.MEMORY_OBJECT + len * Constants.MEMORY_POINTER; - for (int i = 0; i < len; i++) { - Value v = data[i]; - if (v != null) { - m += v.getMemory(); - } - } - } - this.memory = m; - return m; - } - - @Override - public String toString() { - StatementBuilder buff = new StatementBuilder("( /* key:"); - buff.append(getKey()); - if (version != 0) { - buff.append(" v:" + version); - } - if (isDeleted()) { - buff.append(" deleted"); - } - buff.append(" */ "); - if (data != null) { - for (Value v : data) { - buff.appendExceptFirst(", "); - buff.append(v == null ? "null" : v.getTraceSQL()); - } - } - return buff.append(')').toString(); - } - - public void setDeleted(boolean deleted) { - this.deleted = deleted; - } - - public void setSessionId(int sessionId) { - this.sessionId = sessionId; - } - - public int getSessionId() { - return sessionId; + public static Row get(Value[] data, int memory, long key) { + Row r = new DefaultRow(data, memory); + r.setKey(key); + return r; } /** - * This record has been committed. The session id is reset. + * Get values. + * + * @return values */ - public void commit() { - this.sessionId = 0; - } + public abstract Value[] getValueList(); - public boolean isDeleted() { - return deleted; + /** + * Check whether values of this row are equal to values of other row. + * + * @param other + * the other row + * @return {@code true} if values are equal, + * {@code false} otherwise + */ + public boolean hasSameValues(Row other) { + return Arrays.equals(getValueList(), other.getValueList()); } - public Value[] getValueList() { - return data; + /** + * Check whether this row and the specified row share the same underlying + * data with values. This method must return {@code false} when values are + * not equal and may return either {@code true} or {@code false} when they + * are equal. This method may be used only for optimizations and should not + * perform any slow checks, such as equality checks for all pairs of values. + * + * @param other + * the other row + * @return {@code true} if rows share the same underlying data, + * {@code false} otherwise or when unknown + */ + public boolean hasSharedData(Row other) { + return false; } } diff --git a/h2/src/main/org/h2/result/RowFactory.java b/h2/src/main/org/h2/result/RowFactory.java new file mode 100644 index 0000000000..0a257fd7c1 --- /dev/null +++ b/h2/src/main/org/h2/result/RowFactory.java @@ -0,0 +1,207 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import org.h2.engine.CastDataProvider; +import org.h2.mvstore.db.RowDataType; +import org.h2.store.DataHandler; +import org.h2.table.IndexColumn; +import org.h2.value.CompareMode; +import org.h2.value.TypeInfo; +import org.h2.value.Typed; +import org.h2.value.Value; + +/** + * Creates rows. + * + * @author Sergi Vladykin + * @author Andrei Tokar + */ +public abstract class RowFactory { + + private static final class Holder { + static final RowFactory EFFECTIVE = DefaultRowFactory.INSTANCE; + } + + public static DefaultRowFactory getDefaultRowFactory() { + return DefaultRowFactory.INSTANCE; + } + + public static RowFactory getRowFactory() { + return Holder.EFFECTIVE; + } + + /** + * Create a new row factory. + * + * @param provider the cast provider + * @param compareMode the compare mode + * @param handler the data handler + * @param columns the list of columns + * @param indexColumns the list of index columns + * @param storeKeys whether row keys are stored + * @return the (possibly new) row factory + */ + public RowFactory createRowFactory(CastDataProvider provider, CompareMode compareMode, DataHandler handler, + Typed[] columns, IndexColumn[] indexColumns, boolean storeKeys) { + return this; + } + + /** + * Create a new row. + * + * @param data the values + * @param memory the estimated memory usage in bytes + * @return the created row + */ + public abstract Row createRow(Value[] data, int memory); + + /** + * Create new row. + * + * @return the created row + */ + public abstract SearchRow createRow(); + + public abstract RowDataType getRowDataType(); + + public abstract int[] getIndexes(); + + public abstract TypeInfo[] getColumnTypes(); + + public abstract int getColumnCount(); + + public abstract boolean getStoreKeys(); + + + /** + * Default implementation of row factory. + */ + public static final class DefaultRowFactory extends RowFactory { + private final RowDataType dataType; + private final int columnCount; + private final int[] indexes; + private TypeInfo[] columnTypes; + private final int[] map; + + public static final DefaultRowFactory INSTANCE = new DefaultRowFactory(); + + DefaultRowFactory() { + this(new RowDataType(null, CompareMode.getInstance(null, 0), null, null, null, 0, true), 0, null, null); + } + + private DefaultRowFactory(RowDataType dataType, int columnCount, int[] indexes, TypeInfo[] columnTypes) { + this.dataType = dataType; + this.columnCount = columnCount; + this.indexes = indexes; + if (indexes == null) { + map = null; + } else { + map = new int[columnCount]; + for (int i = 0, l = indexes.length; i < l;) { + map[indexes[i]] = ++i; + } + } + this.columnTypes = columnTypes; + } + + @Override + public RowFactory createRowFactory(CastDataProvider provider, CompareMode compareMode, DataHandler handler, + Typed[] columns, IndexColumn[] indexColumns, boolean storeKeys) { + int[] indexes = null; + int[] sortTypes = null; + TypeInfo[] columnTypes = null; + int columnCount = 0; + if (columns != null) { + columnCount = columns.length; + if (indexColumns == null) { + sortTypes = new int[columnCount]; + for (int i = 0; i < columnCount; i++) { + sortTypes[i] = SortOrder.ASCENDING; + } + } else { + int len = indexColumns.length; + indexes = new int[len]; + sortTypes = new int[len]; + for (int i = 0; i < len; i++) { + IndexColumn indexColumn = indexColumns[i]; + indexes[i] = indexColumn.column.getColumnId(); + sortTypes[i] = indexColumn.sortType; + } + } + columnTypes = new TypeInfo[columnCount]; + for (int i = 0; i < columnCount; i++) { + columnTypes[i] = columns[i].getType(); + } + } + return createRowFactory(provider, compareMode, handler, sortTypes, indexes, columnTypes, columnCount, + storeKeys); + } + + /** + * Create a new row factory. + * + * @param provider the cast provider + * @param compareMode the compare mode + * @param handler the data handler + * @param sortTypes the sort types + * @param indexes the list of indexed columns + * @param columnTypes the list of column data type information + * @param columnCount the number of columns + * @param storeKeys whether row keys are stored + * @return the (possibly new) row factory + */ + public RowFactory createRowFactory(CastDataProvider provider, CompareMode compareMode, DataHandler handler, + int[] sortTypes, int[] indexes, TypeInfo[] columnTypes, int columnCount, boolean storeKeys) { + RowDataType rowDataType = new RowDataType(provider, compareMode, handler, sortTypes, indexes, columnCount, + storeKeys); + RowFactory rowFactory = new DefaultRowFactory(rowDataType, columnCount, indexes, columnTypes); + rowDataType.setRowFactory(rowFactory); + return rowFactory; + } + + @Override + public Row createRow(Value[] data, int memory) { + return new DefaultRow(data, memory); + } + + @Override + public SearchRow createRow() { + if (indexes == null) { + return new DefaultRow(columnCount); + } else if (indexes.length == 1) { + return new SimpleRowValue(columnCount, indexes[0]); + } else { + return new Sparse(columnCount, indexes.length, map); + } + } + + @Override + public RowDataType getRowDataType() { + return dataType; + } + + @Override + public int[] getIndexes() { + return indexes; + } + + @Override + public TypeInfo[] getColumnTypes() { + return columnTypes; + } + + @Override + public int getColumnCount() { + return columnCount; + } + + @Override + public boolean getStoreKeys() { + return dataType.isStoreKeys(); + } + } +} diff --git a/h2/src/main/org/h2/result/RowList.java b/h2/src/main/org/h2/result/RowList.java deleted file mode 100644 index cdfc851bb6..0000000000 --- a/h2/src/main/org/h2/result/RowList.java +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.result; - -import java.util.ArrayList; -import org.h2.engine.Constants; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.store.Data; -import org.h2.store.FileStore; -import org.h2.util.New; -import org.h2.value.Value; - -/** - * A list of rows. If the list grows too large, it is buffered to disk - * automatically. - */ -public class RowList { - - private final Session session; - private final ArrayList list = New.arrayList(); - private int size; - private int index, listIndex; - private FileStore file; - private Data rowBuff; - private ArrayList lobs; - private final int maxMemory; - private int memory; - private boolean written; - private boolean readUncached; - - /** - * Construct a new row list for this session. - * - * @param session the session - */ - public RowList(Session session) { - this.session = session; - if (session.getDatabase().isPersistent()) { - maxMemory = session.getDatabase().getMaxOperationMemory(); - } else { - maxMemory = 0; - } - } - - private void writeRow(Data buff, Row r) { - buff.checkCapacity(1 + Data.LENGTH_INT * 8); - buff.writeByte((byte) 1); - buff.writeInt(r.getMemory()); - int columnCount = r.getColumnCount(); - buff.writeInt(columnCount); - buff.writeLong(r.getKey()); - buff.writeInt(r.getVersion()); - buff.writeInt(r.isDeleted() ? 1 : 0); - buff.writeInt(r.getSessionId()); - for (int i = 0; i < columnCount; i++) { - Value v = r.getValue(i); - buff.checkCapacity(1); - if (v == null) { - buff.writeByte((byte) 0); - } else { - buff.writeByte((byte) 1); - if (v.getType() == Value.CLOB || v.getType() == Value.BLOB) { - // need to keep a reference to temporary lobs, - // otherwise the temp file is deleted - if (v.getSmall() == null && v.getTableId() == 0) { - if (lobs == null) { - lobs = New.arrayList(); - } - // need to create a copy, otherwise, - // if stored multiple times, it may be renamed - // and then not found - v = v.copyToTemp(); - lobs.add(v); - } - } - buff.checkCapacity(buff.getValueLen(v)); - buff.writeValue(v); - } - } - } - - private void writeAllRows() { - if (file == null) { - Database db = session.getDatabase(); - String fileName = db.createTempFile(); - file = db.openFile(fileName, "rw", false); - file.setCheckedWriting(false); - file.seek(FileStore.HEADER_LENGTH); - rowBuff = Data.create(db, Constants.DEFAULT_PAGE_SIZE); - file.seek(FileStore.HEADER_LENGTH); - } - Data buff = rowBuff; - initBuffer(buff); - for (int i = 0, size = list.size(); i < size; i++) { - if (i > 0 && buff.length() > Constants.IO_BUFFER_SIZE) { - flushBuffer(buff); - initBuffer(buff); - } - Row r = list.get(i); - writeRow(buff, r); - } - flushBuffer(buff); - file.autoDelete(); - list.clear(); - memory = 0; - } - - private static void initBuffer(Data buff) { - buff.reset(); - buff.writeInt(0); - } - - private void flushBuffer(Data buff) { - buff.checkCapacity(1); - buff.writeByte((byte) 0); - buff.fillAligned(); - buff.setInt(0, buff.length() / Constants.FILE_BLOCK_SIZE); - file.write(buff.getBytes(), 0, buff.length()); - } - - /** - * Add a row to the list. - * - * @param r the row to add - */ - public void add(Row r) { - list.add(r); - memory += r.getMemory() + Constants.MEMORY_POINTER; - if (maxMemory > 0 && memory > maxMemory) { - writeAllRows(); - } - size++; - } - - /** - * Remove all rows from the list. - */ - public void reset() { - index = 0; - if (file != null) { - listIndex = 0; - if (!written) { - writeAllRows(); - written = true; - } - list.clear(); - file.seek(FileStore.HEADER_LENGTH); - } - } - - /** - * Check if there are more rows in this list. - * - * @return true it there are more rows - */ - public boolean hasNext() { - return index < size; - } - - private Row readRow(Data buff) { - if (buff.readByte() == 0) { - return null; - } - int mem = buff.readInt(); - int columnCount = buff.readInt(); - long key = buff.readLong(); - int version = buff.readInt(); - if (readUncached) { - key = 0; - } - boolean deleted = buff.readInt() == 1; - int sessionId = buff.readInt(); - Value[] values = new Value[columnCount]; - for (int i = 0; i < columnCount; i++) { - Value v; - if (buff.readByte() == 0) { - v = null; - } else { - v = buff.readValue(); - if (v.isLinked()) { - // the table id is 0 if it was linked when writing - // a temporary entry - if (v.getTableId() == 0) { - session.unlinkAtCommit(v); - } - } - } - values[i] = v; - } - Row row = new Row(values, mem); - row.setKey(key); - row.setVersion(version); - row.setDeleted(deleted); - row.setSessionId(sessionId); - return row; - } - - /** - * Get the next row from the list. - * - * @return the next row - */ - public Row next() { - Row r; - if (file == null) { - r = list.get(index++); - } else { - if (listIndex >= list.size()) { - list.clear(); - listIndex = 0; - Data buff = rowBuff; - buff.reset(); - int min = Constants.FILE_BLOCK_SIZE; - file.readFully(buff.getBytes(), 0, min); - int len = buff.readInt() * Constants.FILE_BLOCK_SIZE; - buff.checkCapacity(len); - if (len - min > 0) { - file.readFully(buff.getBytes(), min, len - min); - } - while (true) { - r = readRow(buff); - if (r == null) { - break; - } - list.add(r); - } - } - index++; - r = list.get(listIndex++); - } - return r; - } - - /** - * Get the number of rows in this list. - * - * @return the number of rows - */ - public int size() { - return size; - } - - /** - * Do not use the cache. - */ - public void invalidateCache() { - readUncached = true; - } - - /** - * Close the result list and delete the temporary file. - */ - public void close() { - if (file != null) { - file.autoDelete(); - file.closeAndDeleteSilently(); - file = null; - rowBuff = null; - } - } - -} diff --git a/h2/src/main/org/h2/result/SearchRow.java b/h2/src/main/org/h2/result/SearchRow.java index e4763866a7..80babceb2a 100644 --- a/h2/src/main/org/h2/result/SearchRow.java +++ b/h2/src/main/org/h2/result/SearchRow.java @@ -1,29 +1,58 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; +import org.h2.engine.CastDataProvider; +import org.h2.value.CompareMode; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueNull; /** - * The interface for rows stored in a table, and for partial rows stored in the + * The base class for rows stored in a table, and for partial rows stored in the * index. */ -public interface SearchRow { +public abstract class SearchRow extends Value { /** - * An empty array of SearchRow objects. + * Index of a virtual "_ROWID_" column within a row or a table */ - SearchRow[] EMPTY_ARRAY = {}; + public static final int ROWID_INDEX = -1; + + /** + * If the key is this value, then the key is considered equal to all other + * keys, when comparing. + */ + public static long MATCH_ALL_ROW_KEY = Long.MIN_VALUE + 1; + + /** + * The constant that means "memory usage is unknown and needs to be calculated first". + */ + public static final int MEMORY_CALCULATE = -1; + + /** + * The row key. + */ + protected long key; /** * Get the column count. * * @return the column count */ - int getColumnCount(); + public abstract int getColumnCount(); + + /** + * Determine if specified column contains NULL + * @param index column index + * @return true if NULL + */ + public boolean isNull(int index) { + return getValue(index) == ValueNull.INSTANCE; + } /** * Get the value for the column @@ -31,7 +60,7 @@ public interface SearchRow { * @param index the column number (starting with 0) * @return the value */ - Value getValue(int index); + public abstract Value getValue(int index); /** * Set the value for given column @@ -39,41 +68,79 @@ public interface SearchRow { * @param index the column number (starting with 0) * @param v the new value */ - void setValue(int index, Value v); - - /** - * Set the position and version to match another row. - * - * @param old the other row. - */ - void setKeyAndVersion(SearchRow old); - - /** - * Get the version of the row. - * - * @return the version - */ - int getVersion(); + public abstract void setValue(int index, Value v); /** * Set the unique key of the row. * * @param key the key */ - void setKey(long key); + public void setKey(long key) { + this.key = key; + } /** * Get the unique key of the row. * * @return the key */ - long getKey(); + public long getKey() { + return key; + } /** * Get the estimated memory used for this row, in bytes. * * @return the memory */ - int getMemory(); + @Override + public abstract int getMemory(); + + /** + * Copy all relevant values from the source to this row. + * @param source source of column values + */ + public abstract void copyFrom(SearchRow source); + + @Override + public TypeInfo getType() { + return TypeInfo.TYPE_ROW_EMPTY; + } + + @Override + public int getValueType() { + return Value.ROW; + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + builder.append("ROW ("); + for (int index = 0, count = getColumnCount(); index < count; index++) { + if (index != 0) { + builder.append(", "); + } + getValue(index).getSQL(builder, sqlFlags); + } + return builder.append(')'); + } + + @Override + public String getString() { + return getTraceSQL(); + } + + @Override + public int hashCode() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean equals(Object other) { + throw new UnsupportedOperationException(); + } + @Override + public int compareTypeSafe(Value v, CompareMode mode, CastDataProvider provider) { + throw new UnsupportedOperationException(); + } } diff --git a/h2/src/main/org/h2/result/SimpleResult.java b/h2/src/main/org/h2/result/SimpleResult.java new file mode 100644 index 0000000000..c47a315d61 --- /dev/null +++ b/h2/src/main/org/h2/result/SimpleResult.java @@ -0,0 +1,302 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import java.sql.ResultSetMetaData; +import java.util.ArrayList; +import java.util.Comparator; + +import org.h2.engine.Session; +import org.h2.util.Utils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Simple in-memory result. + */ +public class SimpleResult implements ResultInterface, ResultTarget { + + /** + * Column info for the simple result. + */ + static final class Column { + /** Column alias. */ + final String alias; + + /** Column name. */ + final String columnName; + + /** Column type. */ + final TypeInfo columnType; + + Column(String alias, String columnName, TypeInfo columnType) { + if (alias == null || columnName == null) { + throw new NullPointerException(); + } + this.alias = alias; + this.columnName = columnName; + this.columnType = columnType; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + alias.hashCode(); + result = prime * result + columnName.hashCode(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null || getClass() != obj.getClass()) + return false; + Column other = (Column) obj; + return alias.equals(other.alias) && columnName.equals(other.columnName); + } + + @Override + public String toString() { + if (alias.equals(columnName)) { + return columnName; + } + return columnName + ' ' + alias; + } + + } + + private final ArrayList columns; + + private final ArrayList rows; + + private final String schemaName, tableName; + + private int rowId; + + /** + * Creates new instance of simple result. + */ + public SimpleResult() { + this("", ""); + } + + /** + * Creates new instance of simple result. + * + * @param schemaName + * the name of the schema + * @param tableName + * the name of the table + */ + public SimpleResult(String schemaName, String tableName) { + this.columns = Utils.newSmallArrayList(); + this.rows = new ArrayList<>(); + this.schemaName = schemaName; + this.tableName = tableName; + this.rowId = -1; + } + + private SimpleResult(ArrayList columns, ArrayList rows, String schemaName, String tableName) { + this.columns = columns; + this.rows = rows; + this.schemaName = schemaName; + this.tableName = tableName; + this.rowId = -1; + } + + /** + * Add column to the result. + * + * @param alias + * Column's alias. + * @param columnName + * Column's name. + * @param columnType + * Column's value type. + * @param columnPrecision + * Column's precision. + * @param columnScale + * Column's scale. + */ + public void addColumn(String alias, String columnName, int columnType, long columnPrecision, int columnScale) { + addColumn(alias, columnName, TypeInfo.getTypeInfo(columnType, columnPrecision, columnScale, null)); + } + + /** + * Add column to the result. + * + * @param columnName + * Column's name. + * @param columnType + * Column's type. + */ + public void addColumn(String columnName, TypeInfo columnType) { + addColumn(new Column(columnName, columnName, columnType)); + } + + /** + * Add column to the result. + * + * @param alias + * Column's alias. + * @param columnName + * Column's name. + * @param columnType + * Column's type. + */ + public void addColumn(String alias, String columnName, TypeInfo columnType) { + addColumn(new Column(alias, columnName, columnType)); + } + + /** + * Add column to the result. + * + * @param column + * Column info. + */ + void addColumn(Column column) { + assert rows.isEmpty(); + columns.add(column); + } + + @Override + public void addRow(Value... values) { + assert values.length == columns.size(); + rows.add(values); + } + + @Override + public void reset() { + rowId = -1; + } + + @Override + public Value[] currentRow() { + return rows.get(rowId); + } + + @Override + public boolean next() { + int count = rows.size(); + if (rowId < count) { + return ++rowId < count; + } + return false; + } + + @Override + public long getRowId() { + return rowId; + } + + @Override + public boolean isAfterLast() { + return rowId >= rows.size(); + } + + @Override + public int getVisibleColumnCount() { + return columns.size(); + } + + @Override + public long getRowCount() { + return rows.size(); + } + + @Override + public boolean hasNext() { + return rowId < rows.size() - 1; + } + + @Override + public boolean needToClose() { + return false; + } + + @Override + public void close() { + // Do nothing for now + } + + @Override + public String getAlias(int i) { + return columns.get(i).alias; + } + + @Override + public String getSchemaName(int i) { + return schemaName; + } + + @Override + public String getTableName(int i) { + return tableName; + } + + @Override + public String getColumnName(int i) { + return columns.get(i).columnName; + } + + @Override + public TypeInfo getColumnType(int i) { + return columns.get(i).columnType; + } + + @Override + public boolean isIdentity(int i) { + return false; + } + + @Override + public int getNullable(int i) { + return ResultSetMetaData.columnNullableUnknown; + } + + @Override + public void setFetchSize(int fetchSize) { + // Ignored + } + + @Override + public int getFetchSize() { + return 1; + } + + @Override + public boolean isLazy() { + return false; + } + + @Override + public boolean isClosed() { + return false; + } + + @Override + public SimpleResult createShallowCopy(Session targetSession) { + return new SimpleResult(columns, rows, schemaName, tableName); + } + + @Override + public void limitsWereApplied() { + // Nothing to do + } + + /** + * Sort rows in the list. + * + * @param comparator + * the comparator + */ + public void sortRows(Comparator comparator) { + rows.sort(comparator); + } + +} diff --git a/h2/src/main/org/h2/result/SimpleRow.java b/h2/src/main/org/h2/result/SimpleRow.java deleted file mode 100644 index 703e6b42cc..0000000000 --- a/h2/src/main/org/h2/result/SimpleRow.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.result; - -import org.h2.engine.Constants; -import org.h2.util.StatementBuilder; -import org.h2.value.Value; - -/** - * Represents a simple row without state. - */ -public class SimpleRow implements SearchRow { - - private long key; - private int version; - private final Value[] data; - private int memory; - - public SimpleRow(Value[] data) { - this.data = data; - } - - @Override - public int getColumnCount() { - return data.length; - } - - @Override - public long getKey() { - return key; - } - - @Override - public void setKey(long key) { - this.key = key; - } - - @Override - public void setKeyAndVersion(SearchRow row) { - key = row.getKey(); - version = row.getVersion(); - } - - @Override - public int getVersion() { - return version; - } - - @Override - public void setValue(int i, Value v) { - data[i] = v; - } - - @Override - public Value getValue(int i) { - return data[i]; - } - - @Override - public String toString() { - StatementBuilder buff = new StatementBuilder("( /* key:"); - buff.append(getKey()); - if (version != 0) { - buff.append(" v:" + version); - } - buff.append(" */ "); - for (Value v : data) { - buff.appendExceptFirst(", "); - buff.append(v == null ? "null" : v.getTraceSQL()); - } - return buff.append(')').toString(); - } - - @Override - public int getMemory() { - if (memory == 0) { - int len = data.length; - memory = Constants.MEMORY_OBJECT + len * Constants.MEMORY_POINTER; - for (int i = 0; i < len; i++) { - Value v = data[i]; - if (v != null) { - memory += v.getMemory(); - } - } - } - return memory; - } - -} diff --git a/h2/src/main/org/h2/result/SimpleRowValue.java b/h2/src/main/org/h2/result/SimpleRowValue.java index 7794ac28c1..84181cde70 100644 --- a/h2/src/main/org/h2/result/SimpleRowValue.java +++ b/h2/src/main/org/h2/result/SimpleRowValue.java @@ -1,20 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; import org.h2.engine.Constants; import org.h2.value.Value; +import org.h2.value.ValueBigint; +import org.h2.value.ValueNull; /** * A simple row that contains data for only one column. */ -public class SimpleRowValue implements SearchRow { +public class SimpleRowValue extends SearchRow { - private long key; - private int version; private int index; private final int virtualColumnCount; private Value data; @@ -23,15 +23,9 @@ public SimpleRowValue(int columnCount) { this.virtualColumnCount = columnCount; } - @Override - public void setKeyAndVersion(SearchRow row) { - key = row.getKey(); - version = row.getVersion(); - } - - @Override - public int getVersion() { - return version; + public SimpleRowValue(int columnCount, int index) { + this.virtualColumnCount = columnCount; + this.index = index; } @Override @@ -39,23 +33,19 @@ public int getColumnCount() { return virtualColumnCount; } - @Override - public long getKey() { - return key; - } - - @Override - public void setKey(long key) { - this.key = key; - } - @Override public Value getValue(int idx) { + if (idx == ROWID_INDEX) { + return ValueBigint.get(getKey()); + } return idx == index ? data : null; } @Override public void setValue(int idx, Value v) { + if (idx == ROWID_INDEX) { + setKey(v.getLong()); + } index = idx; data = v; } @@ -68,7 +58,17 @@ public String toString() { @Override public int getMemory() { - return Constants.MEMORY_OBJECT + (data == null ? 0 : data.getMemory()); + return Constants.MEMORY_ROW + (data == null ? 0 : data.getMemory()); + } + + @Override + public boolean isNull(int index) { + return index != this.index || data == null || data == ValueNull.INSTANCE; } + @Override + public void copyFrom(SearchRow source) { + setKey(source.getKey()); + setValue(index, source.getValue(index)); + } } diff --git a/h2/src/main/org/h2/result/SortOrder.java b/h2/src/main/org/h2/result/SortOrder.java index 2570fc6f2e..65b9782468 100644 --- a/h2/src/main/org/h2/result/SortOrder.java +++ b/h2/src/main/org/h2/result/SortOrder.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; @@ -9,23 +9,23 @@ import java.util.Collections; import java.util.Comparator; -import org.h2.command.dml.SelectOrderBy; +import org.h2.command.query.QueryOrderBy; import org.h2.engine.Database; -import org.h2.engine.SysProperties; +import org.h2.engine.SessionLocal; import org.h2.expression.Expression; import org.h2.expression.ExpressionColumn; +import org.h2.mode.DefaultNullOrdering; import org.h2.table.Column; import org.h2.table.TableFilter; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; import org.h2.util.Utils; import org.h2.value.Value; import org.h2.value.ValueNull; +import org.h2.value.ValueRow; /** * A sort order represents an ORDER BY clause in a query. */ -public class SortOrder implements Comparator { +public final class SortOrder implements Comparator { /** * This bit mask means the values should be sorted in ascending order. @@ -49,13 +49,7 @@ public class SortOrder implements Comparator { */ public static final int NULLS_LAST = 4; - /** - * The default sort order for NULL. - */ - private static final int DEFAULT_NULL_SORT = - SysProperties.SORT_NULLS_HIGH ? 1 : -1; - - private final Database database; + private final SessionLocal session; /** * The column indexes of the order by expressions within the query. @@ -70,19 +64,29 @@ public class SortOrder implements Comparator { /** * The order list. */ - private final ArrayList orderList; + private final ArrayList orderList; + + /** + * Construct a new sort order object with default sort directions. + * + * @param session the session + * @param queryColumnIndexes the column index list + */ + public SortOrder(SessionLocal session, int[] queryColumnIndexes) { + this (session, queryColumnIndexes, new int[queryColumnIndexes.length], null); + } /** * Construct a new sort order object. * - * @param database the database + * @param session the session * @param queryColumnIndexes the column index list * @param sortType the sort order bit masks * @param orderList the original query order list (if this is a query) */ - public SortOrder(Database database, int[] queryColumnIndexes, - int[] sortType, ArrayList orderList) { - this.database = database; + public SortOrder(SessionLocal session, int[] queryColumnIndexes, int[] sortType, + ArrayList orderList) { + this.session = session; this.queryColumnIndexes = queryColumnIndexes; this.sortTypes = sortType; this.orderList = orderList; @@ -92,50 +96,41 @@ public SortOrder(Database database, int[] queryColumnIndexes, * Create the SQL snippet that describes this sort order. * This is the SQL snippet that usually appears after the ORDER BY clause. * + * @param builder string builder to append to * @param list the expression list * @param visible the number of columns in the select list - * @return the SQL snippet + * @param sqlFlags formatting flags + * @return the specified string builder */ - public String getSQL(Expression[] list, int visible) { - StatementBuilder buff = new StatementBuilder(); + public StringBuilder getSQL(StringBuilder builder, Expression[] list, int visible, int sqlFlags) { int i = 0; for (int idx : queryColumnIndexes) { - buff.appendExceptFirst(", "); + if (i > 0) { + builder.append(", "); + } if (idx < visible) { - buff.append(idx + 1); + builder.append(idx + 1); } else { - buff.append('=').append(StringUtils.unEnclose(list[idx].getSQL())); - } - int type = sortTypes[i++]; - if ((type & DESCENDING) != 0) { - buff.append(" DESC"); - } - if ((type & NULLS_FIRST) != 0) { - buff.append(" NULLS FIRST"); - } else if ((type & NULLS_LAST) != 0) { - buff.append(" NULLS LAST"); + list[idx].getUnenclosedSQL(builder, sqlFlags); } + typeToString(builder, sortTypes[i++]); } - return buff.toString(); + return builder; } /** - * Compare two expressions where one of them is NULL. - * - * @param aNull whether the first expression is null - * @param sortType the sort bit mask to use - * @return the result of the comparison (-1 meaning the first expression - * should appear before the second, 0 if they are equal) + * Appends type information (DESC, NULLS FIRST, NULLS LAST) to the specified statement builder. + * @param builder string builder + * @param type sort type */ - public static int compareNull(boolean aNull, int sortType) { - if ((sortType & NULLS_FIRST) != 0) { - return aNull ? -1 : 1; - } else if ((sortType & NULLS_LAST) != 0) { - return aNull ? 1 : -1; - } else { - // see also JdbcDatabaseMetaData.nullsAreSorted* - int comp = aNull ? DEFAULT_NULL_SORT : -DEFAULT_NULL_SORT; - return (sortType & DESCENDING) == 0 ? comp : -comp; + public static void typeToString(StringBuilder builder, int type) { + if ((type & DESCENDING) != 0) { + builder.append(" DESC"); + } + if ((type & NULLS_FIRST) != 0) { + builder.append(" NULLS FIRST"); + } else if ((type & NULLS_LAST) != 0) { + builder.append(" NULLS LAST"); } } @@ -158,9 +153,9 @@ public int compare(Value[] a, Value[] b) { if (aNull == bNull) { continue; } - return compareNull(aNull, type); + return session.getDatabase().getDefaultNullOrdering().compareNull(aNull, type); } - int comp = database.compare(ao, bo); + int comp = session.compare(ao, bo); if (comp != 0) { return (type & DESCENDING) == 0 ? comp : -comp; } @@ -174,34 +169,24 @@ public int compare(Value[] a, Value[] b) { * @param rows the list of rows */ public void sort(ArrayList rows) { - Collections.sort(rows, this); + rows.sort(this); } /** * Sort a list of rows using offset and limit. * * @param rows the list of rows - * @param offset the offset - * @param limit the limit + * @param fromInclusive the start index, inclusive + * @param toExclusive the end index, exclusive */ - public void sort(ArrayList rows, int offset, int limit) { - int rowsSize = rows.size(); - if (rows.isEmpty() || offset >= rowsSize || limit == 0) { - return; - } - if (offset < 0) { - offset = 0; - } - if (offset + limit > rowsSize) { - limit = rowsSize - offset; - } - if (limit == 1 && offset == 0) { + public void sort(ArrayList rows, int fromInclusive, int toExclusive) { + if (toExclusive == 1 && fromInclusive == 0) { rows.set(0, Collections.min(rows, this)); return; } - Value[][] arr = rows.toArray(new Value[rowsSize][]); - Utils.sortTopN(arr, offset, limit, this); - for (int i = 0, end = Math.min(offset + limit, rowsSize); i < end; i++) { + Value[][] arr = rows.toArray(new Value[0][]); + Utils.sortTopN(arr, fromInclusive, toExclusive, this); + for (int i = fromInclusive; i < toExclusive; i++) { rows.set(i, arr[i]); } } @@ -233,7 +218,7 @@ public Column getColumn(int index, TableFilter filter) { if (orderList == null) { return null; } - SelectOrderBy order = orderList.get(index); + QueryOrderBy order = orderList.get(index); Expression expr = order.expression; if (expr == null) { return null; @@ -261,4 +246,51 @@ public int[] getSortTypes() { return sortTypes; } + /** + * Returns the original query order list. + * + * @return the original query order list + */ + public ArrayList getOrderList() { + return orderList; + } + + /** + * Returns sort order bit masks with {@link SortOrder#NULLS_FIRST} or + * {@link SortOrder#NULLS_LAST} explicitly set. + * + * @return bit masks with either {@link SortOrder#NULLS_FIRST} or {@link SortOrder#NULLS_LAST} + * explicitly set. + */ + public int[] getSortTypesWithNullOrdering() { + return addNullOrdering(session.getDatabase(), sortTypes.clone()); + } + + /** + * Add explicit {@link SortOrder#NULLS_FIRST} or {@link SortOrder#NULLS_LAST} where they + * aren't already specified. + * + * @param database + * the database + * @param sortTypes + * bit masks + * @return the specified array with possibly modified bit masks + */ + public static int[] addNullOrdering(Database database, int[] sortTypes) { + DefaultNullOrdering defaultNullOrdering = database.getDefaultNullOrdering(); + for (int i = 0, length = sortTypes.length; i < length; i++) { + sortTypes[i] = defaultNullOrdering.addExplicitNullOrdering(sortTypes[i]); + } + return sortTypes; + } + + /** + * Returns comparator for row values. + * + * @return comparator for row values. + */ + public Comparator getRowValueComparator() { + return (o1, o2) -> compare(((ValueRow) o1).getList(), ((ValueRow) o2).getList()); + } + } diff --git a/h2/src/main/org/h2/result/Sparse.java b/h2/src/main/org/h2/result/Sparse.java new file mode 100644 index 0000000000..828cd05197 --- /dev/null +++ b/h2/src/main/org/h2/result/Sparse.java @@ -0,0 +1,64 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.result; + +import org.h2.value.Value; +import org.h2.value.ValueBigint; + +/** + * Class Sparse. + *
      + *
    • 11/16/19 7:35 PM initial creation + *
    + * + * @author Andrei Tokar + */ +public final class Sparse extends DefaultRow { + private final int columnCount; + private final int[] map; + + Sparse(int columnCount, int capacity, int[] map) { + super(new Value[capacity]); + this.columnCount = columnCount; + this.map = map; + } + + @Override + public int getColumnCount() { + return columnCount; + } + + @Override + public Value getValue(int i) { + if (i == ROWID_INDEX) { + return ValueBigint.get(getKey()); + } + int index = map[i]; + return index > 0 ? super.getValue(index - 1) : null; + } + + @Override + public void setValue(int i, Value v) { + if (i == ROWID_INDEX) { + setKey(v.getLong()); + } + int index = map[i]; + if (index > 0) { + super.setValue(index - 1, v); + } + } + + @Override + public void copyFrom(SearchRow source) { + setKey(source.getKey()); + for (int i = 0; i < map.length; i++) { + int index = map[i]; + if (index > 0) { + super.setValue(index - 1, source.getValue(i)); + } + } + } +} diff --git a/h2/src/main/org/h2/result/UpdatableRow.java b/h2/src/main/org/h2/result/UpdatableRow.java index 49bd29e735..fb3e7077de 100644 --- a/h2/src/main/org/h2/result/UpdatableRow.java +++ b/h2/src/main/org/h2/result/UpdatableRow.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.result; @@ -12,14 +12,18 @@ import java.util.ArrayList; import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.engine.Session; +import org.h2.engine.SessionRemote; import org.h2.jdbc.JdbcConnection; +import org.h2.jdbc.JdbcResultSet; import org.h2.message.DbException; -import org.h2.util.New; -import org.h2.util.StatementBuilder; +import org.h2.util.JdbcUtils; import org.h2.util.StringUtils; -import org.h2.value.DataType; +import org.h2.util.Utils; import org.h2.value.Value; import org.h2.value.ValueNull; +import org.h2.value.ValueToObjectConverter; /** * This class is used for updatable result sets. An updatable row provides @@ -41,12 +45,16 @@ public class UpdatableRow { * * @param conn the database connection * @param result the result + * @throws SQLException on failure */ public UpdatableRow(JdbcConnection conn, ResultInterface result) throws SQLException { this.conn = conn; this.result = result; columnCount = result.getVisibleColumnCount(); + if (columnCount == 0) { + return; + } for (int i = 0; i < columnCount; i++) { String t = result.getTableName(i); String s = result.getSchemaName(i); @@ -64,24 +72,26 @@ public UpdatableRow(JdbcConnection conn, ResultInterface result) return; } } + String type = "BASE TABLE"; + Session session = conn.getSession(); + if (session instanceof SessionRemote + && ((SessionRemote) session).getClientVersion() <= Constants.TCP_PROTOCOL_VERSION_19) { + type = "TABLE"; + } final DatabaseMetaData meta = conn.getMetaData(); ResultSet rs = meta.getTables(null, StringUtils.escapeMetaDataPattern(schemaName), StringUtils.escapeMetaDataPattern(tableName), - new String[] { "TABLE" }); + new String[] { type }); if (!rs.next()) { return; } - if (rs.getString("SQL") == null) { - // system table - return; - } String table = rs.getString("TABLE_NAME"); // if the table name in the database meta data is lower case, // but the table in the result set meta data is not, then the column // in the database meta data is also lower case boolean toUpper = !table.equals(tableName) && table.equalsIgnoreCase(tableName); - key = New.arrayList(); + key = Utils.newSmallArrayList(); rs = meta.getPrimaryKeys(null, StringUtils.escapeMetaDataPattern(schemaName), tableName); @@ -118,7 +128,7 @@ public UpdatableRow(JdbcConnection conn, ResultInterface result) } private boolean isIndexUsable(ArrayList indexColumns) { - if (indexColumns.size() == 0) { + if (indexColumns.isEmpty()) { return false; } for (String c : indexColumns) { @@ -156,29 +166,30 @@ private int getColumnIndex(String columnName) { return index; } - private void appendColumnList(StatementBuilder buff, boolean set) { - buff.resetCount(); + private void appendColumnList(StringBuilder builder, boolean set) { for (int i = 0; i < columnCount; i++) { - buff.appendExceptFirst(","); + if (i > 0) { + builder.append(','); + } String col = result.getColumnName(i); - buff.append(StringUtils.quoteIdentifier(col)); + StringUtils.quoteIdentifier(builder, col); if (set) { - buff.append("=? "); + builder.append("=? "); } } } - private void appendKeyCondition(StatementBuilder buff) { - buff.append(" WHERE "); - buff.resetCount(); - for (String k : key) { - buff.appendExceptFirst(" AND "); - buff.append(StringUtils.quoteIdentifier(k)).append("=?"); + private void appendKeyCondition(StringBuilder builder) { + builder.append(" WHERE "); + for (int i = 0; i < key.size(); i++) { + if (i > 0) { + builder.append(" AND "); + } + StringUtils.quoteIdentifier(builder, key.get(i)).append("=?"); } } - private void setKey(PreparedStatement prep, int start, Value[] current) - throws SQLException { + private void setKey(PreparedStatement prep, int start, Value[] current) throws SQLException { for (int i = 0, size = key.size(); i < size; i++) { String col = key.get(i); int idx = getColumnIndex(col); @@ -188,7 +199,7 @@ private void setKey(PreparedStatement prep, int start, Value[] current) // as multiple such rows could exist throw DbException.get(ErrorCode.NO_DATA_AVAILABLE); } - v.set(prep, start + i); + JdbcUtils.set(prep, start + i, v, conn); } } @@ -204,11 +215,11 @@ private void setKey(PreparedStatement prep, int start, Value[] current) // return rs.getInt(1) == 0; // } - private void appendTableName(StatementBuilder buff) { + private void appendTableName(StringBuilder builder) { if (schemaName != null && schemaName.length() > 0) { - buff.append(StringUtils.quoteIdentifier(schemaName)).append('.'); + StringUtils.quoteIdentifier(builder, schemaName).append('.'); } - buff.append(StringUtils.quoteIdentifier(tableName)); + StringUtils.quoteIdentifier(builder, tableName); } /** @@ -216,23 +227,23 @@ private void appendTableName(StatementBuilder buff) { * * @param row the values that contain the key * @return the row + * @throws SQLException on failure */ public Value[] readRow(Value[] row) throws SQLException { - StatementBuilder buff = new StatementBuilder("SELECT "); - appendColumnList(buff, false); - buff.append(" FROM "); - appendTableName(buff); - appendKeyCondition(buff); - PreparedStatement prep = conn.prepareStatement(buff.toString()); + StringBuilder builder = new StringBuilder("SELECT "); + appendColumnList(builder, false); + builder.append(" FROM "); + appendTableName(builder); + appendKeyCondition(builder); + PreparedStatement prep = conn.prepareStatement(builder.toString()); setKey(prep, 1, row); - ResultSet rs = prep.executeQuery(); + JdbcResultSet rs = (JdbcResultSet) prep.executeQuery(); if (!rs.next()) { throw DbException.get(ErrorCode.NO_DATA_AVAILABLE); } Value[] newRow = new Value[columnCount]; for (int i = 0; i < columnCount; i++) { - int type = result.getColumnType(i); - newRow[i] = DataType.readValue(conn.getSession(), rs, i + 1, type); + newRow[i] = ValueToObjectConverter.readValue(conn.getSession(), rs, i + 1); } return newRow; } @@ -244,10 +255,10 @@ public Value[] readRow(Value[] row) throws SQLException { * @throws SQLException if this row has already been deleted */ public void deleteRow(Value[] current) throws SQLException { - StatementBuilder buff = new StatementBuilder("DELETE FROM "); - appendTableName(buff); - appendKeyCondition(buff); - PreparedStatement prep = conn.prepareStatement(buff.toString()); + StringBuilder builder = new StringBuilder("DELETE FROM "); + appendTableName(builder); + appendKeyCondition(builder); + PreparedStatement prep = conn.prepareStatement(builder.toString()); setKey(prep, 1, current); int count = prep.executeUpdate(); if (count != 1) { @@ -264,22 +275,22 @@ public void deleteRow(Value[] current) throws SQLException { * @throws SQLException if the row has been deleted */ public void updateRow(Value[] current, Value[] updateRow) throws SQLException { - StatementBuilder buff = new StatementBuilder("UPDATE "); - appendTableName(buff); - buff.append(" SET "); - appendColumnList(buff, true); + StringBuilder builder = new StringBuilder("UPDATE "); + appendTableName(builder); + builder.append(" SET "); + appendColumnList(builder, true); // TODO updatable result set: we could add all current values to the // where clause // - like this optimistic ('no') locking is possible - appendKeyCondition(buff); - PreparedStatement prep = conn.prepareStatement(buff.toString()); + appendKeyCondition(builder); + PreparedStatement prep = conn.prepareStatement(builder.toString()); int j = 1; for (int i = 0; i < columnCount; i++) { Value v = updateRow[i]; if (v == null) { v = current[i]; } - v.set(prep, j++); + JdbcUtils.set(prep, j++, v, conn); } setKey(prep, j, current); int count = prep.executeUpdate(); @@ -296,27 +307,28 @@ public void updateRow(Value[] current, Value[] updateRow) throws SQLException { * @throws SQLException if the row could not be inserted */ public void insertRow(Value[] row) throws SQLException { - StatementBuilder buff = new StatementBuilder("INSERT INTO "); - appendTableName(buff); - buff.append('('); - appendColumnList(buff, false); - buff.append(")VALUES("); - buff.resetCount(); + StringBuilder builder = new StringBuilder("INSERT INTO "); + appendTableName(builder); + builder.append('('); + appendColumnList(builder, false); + builder.append(")VALUES("); for (int i = 0; i < columnCount; i++) { - buff.appendExceptFirst(","); + if (i > 0) { + builder.append(','); + } Value v = row[i]; if (v == null) { - buff.append("DEFAULT"); + builder.append("DEFAULT"); } else { - buff.append('?'); + builder.append('?'); } } - buff.append(')'); - PreparedStatement prep = conn.prepareStatement(buff.toString()); + builder.append(')'); + PreparedStatement prep = conn.prepareStatement(builder.toString()); for (int i = 0, j = 0; i < columnCount; i++) { Value v = row[i]; if (v != null) { - v.set(prep, j++ + 1); + JdbcUtils.set(prep, j++ + 1, v, conn); } } int count = prep.executeUpdate(); diff --git a/h2/src/main/org/h2/result/package.html b/h2/src/main/org/h2/result/package.html index dd0359d7c0..0629958272 100644 --- a/h2/src/main/org/h2/result/package.html +++ b/h2/src/main/org/h2/result/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/schema/Constant.java b/h2/src/main/org/h2/schema/Constant.java index f2f8f18142..bcf523ab79 100644 --- a/h2/src/main/org/h2/schema/Constant.java +++ b/h2/src/main/org/h2/schema/Constant.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.schema; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.expression.ValueExpression; import org.h2.message.DbException; import org.h2.message.Trace; @@ -17,28 +17,25 @@ * A user-defined constant as created by the SQL statement * CREATE CONSTANT */ -public class Constant extends SchemaObjectBase { +public final class Constant extends SchemaObject { private Value value; private ValueExpression expression; public Constant(Schema schema, int id, String name) { - initSchemaObjectBase(schema, id, name, Trace.SCHEMA); + super(schema, id, name, Trace.SCHEMA); } @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(); - } - - @Override - public String getDropSQL() { - return null; + throw DbException.getInternalError(toString()); } @Override public String getCreateSQL() { - return "CREATE CONSTANT " + getSQL() + " VALUE " + value.getSQL(); + StringBuilder builder = new StringBuilder("CREATE CONSTANT "); + getSQL(builder, DEFAULT_SQL_FLAGS).append(" VALUE "); + return value.getSQL(builder, DEFAULT_SQL_FLAGS).toString(); } @Override @@ -47,16 +44,11 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { database.removeMeta(session, getId()); invalidate(); } - @Override - public void checkRename() { - // ok - } - public void setValue(Value value) { this.value = value; expression = ValueExpression.get(value); diff --git a/h2/src/main/org/h2/schema/Domain.java b/h2/src/main/org/h2/schema/Domain.java new file mode 100644 index 0000000000..1003a2105a --- /dev/null +++ b/h2/src/main/org/h2/schema/Domain.java @@ -0,0 +1,224 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.schema; + +import java.util.ArrayList; +import org.h2.constraint.Constraint; +import org.h2.constraint.ConstraintDomain; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.expression.Expression; +import org.h2.expression.ValueExpression; +import org.h2.message.DbException; +import org.h2.message.Trace; +import org.h2.table.ColumnTemplate; +import org.h2.table.Table; +import org.h2.util.Utils; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Represents a domain. + */ +public final class Domain extends SchemaObject implements ColumnTemplate { + + private TypeInfo type; + + /** + * Parent domain. + */ + private Domain domain; + + private Expression defaultExpression; + + private Expression onUpdateExpression; + + private ArrayList constraints; + + public Domain(Schema schema, int id, String name) { + super(schema, id, name, Trace.SCHEMA); + } + + @Override + public String getCreateSQLForCopy(Table table, String quotedName) { + throw DbException.getInternalError(toString()); + } + + @Override + public String getDropSQL() { + StringBuilder builder = new StringBuilder("DROP DOMAIN IF EXISTS "); + return getSQL(builder, DEFAULT_SQL_FLAGS).toString(); + } + + @Override + public String getCreateSQL() { + StringBuilder builder = getSQL(new StringBuilder("CREATE DOMAIN "), DEFAULT_SQL_FLAGS).append(" AS "); + if (domain != null) { + domain.getSQL(builder, DEFAULT_SQL_FLAGS); + } else { + type.getSQL(builder, DEFAULT_SQL_FLAGS); + } + if (defaultExpression != null) { + defaultExpression.getUnenclosedSQL(builder.append(" DEFAULT "), DEFAULT_SQL_FLAGS); + } + if (onUpdateExpression != null) { + onUpdateExpression.getUnenclosedSQL(builder.append(" ON UPDATE "), DEFAULT_SQL_FLAGS); + } + return builder.toString(); + } + + public void setDataType(TypeInfo type) { + this.type = type; + } + + public TypeInfo getDataType() { + return type; + } + + @Override + public void setDomain(Domain domain) { + this.domain = domain; + } + + @Override + public Domain getDomain() { + return domain; + } + + @Override + public void setDefaultExpression(SessionLocal session, Expression defaultExpression) { + // also to test that no column names are used + if (defaultExpression != null) { + defaultExpression = defaultExpression.optimize(session); + if (defaultExpression.isConstant()) { + defaultExpression = ValueExpression.get(defaultExpression.getValue(session)); + } + } + this.defaultExpression = defaultExpression; + } + + @Override + public Expression getDefaultExpression() { + return defaultExpression; + } + + @Override + public Expression getEffectiveDefaultExpression() { + return defaultExpression != null ? defaultExpression + : domain != null ? domain.getEffectiveDefaultExpression() : null; + } + + @Override + public String getDefaultSQL() { + return defaultExpression == null ? null + : defaultExpression.getUnenclosedSQL(new StringBuilder(), DEFAULT_SQL_FLAGS).toString(); + } + + @Override + public void setOnUpdateExpression(SessionLocal session, Expression onUpdateExpression) { + // also to test that no column names are used + if (onUpdateExpression != null) { + onUpdateExpression = onUpdateExpression.optimize(session); + if (onUpdateExpression.isConstant()) { + onUpdateExpression = ValueExpression.get(onUpdateExpression.getValue(session)); + } + } + this.onUpdateExpression = onUpdateExpression; + } + + @Override + public Expression getOnUpdateExpression() { + return onUpdateExpression; + } + + @Override + public Expression getEffectiveOnUpdateExpression() { + return onUpdateExpression != null ? onUpdateExpression + : domain != null ? domain.getEffectiveOnUpdateExpression() : null; + } + + @Override + public String getOnUpdateSQL() { + return onUpdateExpression == null ? null + : onUpdateExpression.getUnenclosedSQL(new StringBuilder(), DEFAULT_SQL_FLAGS).toString(); + } + + @Override + public void prepareExpressions(SessionLocal session) { + if (defaultExpression != null) { + defaultExpression = defaultExpression.optimize(session); + } + if (onUpdateExpression != null) { + onUpdateExpression = onUpdateExpression.optimize(session); + } + if (domain != null) { + domain.prepareExpressions(session); + } + } + + /** + * Add a constraint to the domain. + * + * @param constraint the constraint to add + */ + public void addConstraint(ConstraintDomain constraint) { + if (constraints == null) { + constraints = Utils.newSmallArrayList(); + } + if (!constraints.contains(constraint)) { + constraints.add(constraint); + } + } + + public ArrayList getConstraints() { + return constraints; + } + + /** + * Remove the given constraint from the list. + * + * @param constraint the constraint to remove + */ + public void removeConstraint(Constraint constraint) { + if (constraints != null) { + constraints.remove(constraint); + } + } + + @Override + public int getType() { + return DbObject.DOMAIN; + } + + @Override + public void removeChildrenAndResources(SessionLocal session) { + if (constraints != null && !constraints.isEmpty()) { + for (ConstraintDomain constraint : constraints.toArray(new ConstraintDomain[0])) { + database.removeSchemaObject(session, constraint); + } + constraints = null; + } + database.removeMeta(session, getId()); + } + + /** + * Check the specified value. + * + * @param session the session + * @param value the value + */ + public void checkConstraints(SessionLocal session, Value value) { + if (constraints != null) { + for (ConstraintDomain constraint : constraints) { + constraint.check(session, value); + } + } + if (domain != null) { + domain.checkConstraints(session, value); + } + } + +} diff --git a/h2/src/main/org/h2/schema/FunctionAlias.java b/h2/src/main/org/h2/schema/FunctionAlias.java new file mode 100644 index 0000000000..47caf1ecf9 --- /dev/null +++ b/h2/src/main/org/h2/schema/FunctionAlias.java @@ -0,0 +1,561 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.schema; + +import java.lang.reflect.Array; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; + +import org.h2.Driver; +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.expression.Alias; +import org.h2.expression.Expression; +import org.h2.expression.ExpressionColumn; +import org.h2.jdbc.JdbcConnection; +import org.h2.message.DbException; +import org.h2.message.Trace; +import org.h2.result.LocalResult; +import org.h2.result.ResultInterface; +import org.h2.table.Column; +import org.h2.util.JdbcUtils; +import org.h2.util.SourceCompiler; +import org.h2.util.StringUtils; +import org.h2.util.Utils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueNull; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueToObjectConverter2; + +/** + * Represents a user-defined function, or alias. + * + * @author Thomas Mueller + * @author Gary Tong + */ +public final class FunctionAlias extends UserDefinedFunction { + + private String methodName; + private String source; + private JavaMethod[] javaMethods; + private boolean deterministic; + + private FunctionAlias(Schema schema, int id, String name) { + super(schema, id, name, Trace.FUNCTION); + } + + /** + * Create a new alias based on a method name. + * + * @param schema the schema + * @param id the id + * @param name the name + * @param javaClassMethod the class and method name + * @param force create the object even if the class or method does not exist + * @return the database object + */ + public static FunctionAlias newInstance( + Schema schema, int id, String name, String javaClassMethod, + boolean force) { + FunctionAlias alias = new FunctionAlias(schema, id, name); + int paren = javaClassMethod.indexOf('('); + int lastDot = javaClassMethod.lastIndexOf('.', paren < 0 ? + javaClassMethod.length() : paren); + if (lastDot < 0) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_1, javaClassMethod); + } + alias.className = javaClassMethod.substring(0, lastDot); + alias.methodName = javaClassMethod.substring(lastDot + 1); + alias.init(force); + return alias; + } + + /** + * Create a new alias based on source code. + * + * @param schema the schema + * @param id the id + * @param name the name + * @param source the source code + * @param force create the object even if the class or method does not exist + * @return the database object + */ + public static FunctionAlias newInstanceFromSource( + Schema schema, int id, String name, String source, boolean force) { + FunctionAlias alias = new FunctionAlias(schema, id, name); + alias.source = source; + alias.init(force); + return alias; + } + + private void init(boolean force) { + try { + // at least try to compile the class, otherwise the data type is not + // initialized if it could be + load(); + } catch (DbException e) { + if (!force) { + throw e; + } + } + } + + private synchronized void load() { + if (javaMethods != null) { + return; + } + if (source != null) { + loadFromSource(); + } else { + loadClass(); + } + } + + private void loadFromSource() { + SourceCompiler compiler = database.getCompiler(); + synchronized (compiler) { + String fullClassName = Constants.USER_PACKAGE + "." + getName(); + compiler.setSource(fullClassName, source); + try { + Method m = compiler.getMethod(fullClassName); + JavaMethod method = new JavaMethod(m, 0); + javaMethods = new JavaMethod[] { + method + }; + } catch (DbException e) { + throw e; + } catch (Exception e) { + throw DbException.get(ErrorCode.SYNTAX_ERROR_1, e, source); + } + } + } + + private void loadClass() { + Class javaClass = JdbcUtils.loadUserClass(className); + Method[] methods = javaClass.getMethods(); + ArrayList list = new ArrayList<>(1); + for (int i = 0, len = methods.length; i < len; i++) { + Method m = methods[i]; + if (!Modifier.isStatic(m.getModifiers())) { + continue; + } + if (m.getName().equals(methodName) || + getMethodSignature(m).equals(methodName)) { + JavaMethod javaMethod = new JavaMethod(m, i); + for (JavaMethod old : list) { + if (old.getParameterCount() == javaMethod.getParameterCount()) { + throw DbException.get(ErrorCode. + METHODS_MUST_HAVE_DIFFERENT_PARAMETER_COUNTS_2, + old.toString(), javaMethod.toString()); + } + } + list.add(javaMethod); + } + } + if (list.isEmpty()) { + throw DbException.get( + ErrorCode.PUBLIC_STATIC_JAVA_METHOD_NOT_FOUND_1, + methodName + " (" + className + ")"); + } + javaMethods = list.toArray(new JavaMethod[0]); + // Sort elements. Methods with a variable number of arguments must be at + // the end. Reason: there could be one method without parameters and one + // with a variable number. The one without parameters needs to be used + // if no parameters are given. + Arrays.sort(javaMethods); + } + + private static String getMethodSignature(Method m) { + StringBuilder buff = new StringBuilder(m.getName()); + buff.append('('); + Class[] parameterTypes = m.getParameterTypes(); + for (int i = 0, length = parameterTypes.length; i < length; i++) { + if (i > 0) { + // do not use a space here, because spaces are removed + // in CreateFunctionAlias.setJavaClassMethod() + buff.append(','); + } + Class p = parameterTypes[i]; + if (p.isArray()) { + buff.append(p.getComponentType().getName()).append("[]"); + } else { + buff.append(p.getName()); + } + } + return buff.append(')').toString(); + } + + @Override + public String getDropSQL() { + return getSQL(new StringBuilder("DROP ALIAS IF EXISTS "), DEFAULT_SQL_FLAGS).toString(); + } + + @Override + public String getCreateSQL() { + StringBuilder builder = new StringBuilder("CREATE FORCE ALIAS "); + getSQL(builder, DEFAULT_SQL_FLAGS); + if (deterministic) { + builder.append(" DETERMINISTIC"); + } + if (source != null) { + StringUtils.quoteStringSQL(builder.append(" AS "), source); + } else { + StringUtils.quoteStringSQL(builder.append(" FOR "), className + '.' + methodName); + } + return builder.toString(); + } + + @Override + public int getType() { + return DbObject.FUNCTION_ALIAS; + } + + @Override + public synchronized void removeChildrenAndResources(SessionLocal session) { + database.removeMeta(session, getId()); + className = null; + methodName = null; + javaMethods = null; + invalidate(); + } + + /** + * Find the Java method that matches the arguments. + * + * @param args the argument list + * @return the Java method + * @throws DbException if no matching method could be found + */ + public JavaMethod findJavaMethod(Expression[] args) { + load(); + int parameterCount = args.length; + for (JavaMethod m : javaMethods) { + int count = m.getParameterCount(); + if (count == parameterCount || (m.isVarArgs() && + count <= parameterCount + 1)) { + return m; + } + } + throw DbException.get(ErrorCode.METHOD_NOT_FOUND_1, getName() + " (" + + className + ", parameter count: " + parameterCount + ")"); + } + + public String getJavaMethodName() { + return this.methodName; + } + + /** + * Get the Java methods mapped by this function. + * + * @return the Java methods. + */ + public JavaMethod[] getJavaMethods() { + load(); + return javaMethods; + } + + public void setDeterministic(boolean deterministic) { + this.deterministic = deterministic; + } + + public boolean isDeterministic() { + return deterministic; + } + + public String getSource() { + return source; + } + + /** + * There may be multiple Java methods that match a function name. + * Each method must have a different number of parameters however. + * This helper class represents one such method. + */ + public static class JavaMethod implements Comparable { + private final int id; + private final Method method; + private final TypeInfo dataType; + private boolean hasConnectionParam; + private boolean varArgs; + private Class varArgClass; + private int paramCount; + + JavaMethod(Method method, int id) { + this.method = method; + this.id = id; + Class[] paramClasses = method.getParameterTypes(); + paramCount = paramClasses.length; + if (paramCount > 0) { + Class paramClass = paramClasses[0]; + if (Connection.class.isAssignableFrom(paramClass)) { + hasConnectionParam = true; + paramCount--; + } + } + if (paramCount > 0) { + Class lastArg = paramClasses[paramClasses.length - 1]; + if (lastArg.isArray() && method.isVarArgs()) { + varArgs = true; + varArgClass = lastArg.getComponentType(); + } + } + Class returnClass = method.getReturnType(); + dataType = ResultSet.class.isAssignableFrom(returnClass) ? null + : ValueToObjectConverter2.classToType(returnClass); + } + + @Override + public String toString() { + return method.toString(); + } + + /** + * Check if this function requires a database connection. + * + * @return if the function requires a connection + */ + public boolean hasConnectionParam() { + return this.hasConnectionParam; + } + + /** + * Call the user-defined function and return the value. + * + * @param session the session + * @param args the argument list + * @param columnList true if the function should only return the column + * list + * @return the value + */ + public Value getValue(SessionLocal session, Expression[] args, boolean columnList) { + Object returnValue = execute(session, args, columnList); + if (Value.class.isAssignableFrom(method.getReturnType())) { + return (Value) returnValue; + } + return ValueToObjectConverter.objectToValue(session, returnValue, dataType.getValueType()) + .convertTo(dataType, session); + } + + /** + * Call the table user-defined function and return the value. + * + * @param session the session + * @param args the argument list + * @param columnList true if the function should only return the column + * list + * @return the value + */ + public ResultInterface getTableValue(SessionLocal session, Expression[] args, boolean columnList) { + Object o = execute(session, args, columnList); + if (o == null) { + throw DbException.get(ErrorCode.FUNCTION_MUST_RETURN_RESULT_SET_1, method.getName()); + } + if (ResultInterface.class.isAssignableFrom(method.getReturnType())) { + return (ResultInterface) o; + } + return resultSetToResult(session, (ResultSet) o, columnList ? 0 : Integer.MAX_VALUE); + } + + /** + * Create a result for the given result set. + * + * @param session the session + * @param rs the result set + * @param maxrows the maximum number of rows to read (0 to just read the + * meta data) + * @return the value + */ + public static ResultInterface resultSetToResult(SessionLocal session, ResultSet rs, int maxrows) { + try { + ResultSetMetaData meta = rs.getMetaData(); + int columnCount = meta.getColumnCount(); + Expression[] columns = new Expression[columnCount]; + for (int i = 0; i < columnCount; i++) { + String alias = meta.getColumnLabel(i + 1); + String name = meta.getColumnName(i + 1); + String columnTypeName = meta.getColumnTypeName(i + 1); + int columnType = DataType.convertSQLTypeToValueType(meta.getColumnType(i + 1), columnTypeName); + int precision = meta.getPrecision(i + 1); + int scale = meta.getScale(i + 1); + TypeInfo typeInfo; + if (columnType == Value.ARRAY && columnTypeName.endsWith(" ARRAY")) { + typeInfo = TypeInfo + .getTypeInfo(Value.ARRAY, -1L, 0, + TypeInfo.getTypeInfo(DataType.getTypeByName( + columnTypeName.substring(0, columnTypeName.length() - 6), + session.getMode()).type)); + } else { + typeInfo = TypeInfo.getTypeInfo(columnType, precision, scale, null); + } + Expression e = new ExpressionColumn(session.getDatabase(), new Column(name, typeInfo)); + if (!alias.equals(name)) { + e = new Alias(e, alias, false); + } + columns[i] = e; + } + LocalResult result = new LocalResult(session, columns, columnCount, columnCount); + for (int i = 0; i < maxrows && rs.next(); i++) { + Value[] list = new Value[columnCount]; + for (int j = 0; j < columnCount; j++) { + list[j] = ValueToObjectConverter.objectToValue(session, rs.getObject(j + 1), + columns[j].getType().getValueType()); + } + result.addRow(list); + } + result.done(); + return result; + } catch (SQLException e) { + throw DbException.convert(e); + } + } + + private Object execute(SessionLocal session, Expression[] args, boolean columnList) { + Class[] paramClasses = method.getParameterTypes(); + Object[] params = new Object[paramClasses.length]; + int p = 0; + JdbcConnection conn = session.createConnection(columnList); + if (hasConnectionParam && params.length > 0) { + params[p++] = conn; + } + + // allocate array for varArgs parameters + Object varArg = null; + if (varArgs) { + int len = args.length - params.length + 1 + + (hasConnectionParam ? 1 : 0); + varArg = Array.newInstance(varArgClass, len); + params[params.length - 1] = varArg; + } + + for (int a = 0, len = args.length; a < len; a++, p++) { + boolean currentIsVarArg = varArgs && + p >= paramClasses.length - 1; + Class paramClass; + if (currentIsVarArg) { + paramClass = varArgClass; + } else { + paramClass = paramClasses[p]; + } + Value v = args[a].getValue(session); + Object o; + if (Value.class.isAssignableFrom(paramClass)) { + o = v; + } else { + boolean primitive = paramClass.isPrimitive(); + if (v == ValueNull.INSTANCE) { + if (primitive) { + if (columnList) { + // If the column list is requested, the parameters + // may be null. Need to set to default value, + // otherwise the function can't be called at all. + o = DataType.getDefaultForPrimitiveType(paramClass); + } else { + // NULL for a java primitive: return NULL + return null; + } + } else { + o = null; + } + } else { + o = ValueToObjectConverter.valueToObject( + (Class) (primitive ? Utils.getNonPrimitiveClass(paramClass) : paramClass), v, conn); + } + } + if (currentIsVarArg) { + Array.set(varArg, p - params.length + 1, o); + } else { + params[p] = o; + } + } + boolean old = session.getAutoCommit(); + Value identity = session.getLastIdentity(); + boolean defaultConnection = session.getDatabase(). + getSettings().defaultConnection; + try { + session.setAutoCommit(false); + Object returnValue; + try { + if (defaultConnection) { + Driver.setDefaultConnection(session.createConnection(columnList)); + } + returnValue = method.invoke(null, params); + if (returnValue == null) { + return null; + } + } catch (InvocationTargetException e) { + StringBuilder builder = new StringBuilder(method.getName()).append('('); + for (int i = 0, length = params.length; i < length; i++) { + if (i > 0) { + builder.append(", "); + } + builder.append(params[i]); + } + builder.append(')'); + throw DbException.convertInvocation(e, builder.toString()); + } catch (Exception e) { + throw DbException.convert(e); + } + return returnValue; + } finally { + session.setLastIdentity(identity); + session.setAutoCommit(old); + if (defaultConnection) { + Driver.setDefaultConnection(null); + } + } + } + + public Class[] getColumnClasses() { + return method.getParameterTypes(); + } + + /** + * Returns data type information for regular functions or {@code null} + * for table value functions. + * + * @return data type information for regular functions or {@code null} + * for table value functions + */ + public TypeInfo getDataType() { + return dataType; + } + + public int getParameterCount() { + return paramCount; + } + + public boolean isVarArgs() { + return varArgs; + } + + @Override + public int compareTo(JavaMethod m) { + if (varArgs != m.varArgs) { + return varArgs ? 1 : -1; + } + if (paramCount != m.paramCount) { + return paramCount - m.paramCount; + } + if (hasConnectionParam != m.hasConnectionParam) { + return hasConnectionParam ? 1 : -1; + } + return id - m.id; + } + + } + +} diff --git a/h2/src/main/org/h2/schema/InformationSchema.java b/h2/src/main/org/h2/schema/InformationSchema.java new file mode 100644 index 0000000000..a958166363 --- /dev/null +++ b/h2/src/main/org/h2/schema/InformationSchema.java @@ -0,0 +1,77 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.schema; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.table.InformationSchemaTable; +import org.h2.table.InformationSchemaTableLegacy; +import org.h2.table.Table; + +/** + * Information schema. + */ +public final class InformationSchema extends MetaSchema { + + private volatile HashMap newTables; + + private volatile HashMap oldTables; + + /** + * Creates new instance of information schema. + * + * @param database + * the database + * @param owner + * the owner of the schema (system user) + */ + public InformationSchema(Database database, User owner) { + super(database, Constants.INFORMATION_SCHEMA_ID, database.sysIdentifier("INFORMATION_SCHEMA"), owner); + } + + @Override + protected Map getMap(SessionLocal session) { + if (session == null) { + return Collections.emptyMap(); + } + boolean old = session.isOldInformationSchema(); + HashMap map = old ? oldTables : newTables; + if (map == null) { + map = fillMap(old); + } + return map; + } + + private synchronized HashMap fillMap(boolean old) { + HashMap map = old ? oldTables : newTables; + if (map == null) { + map = database.newStringMap(64); + if (old) { + for (int type = 0; type < InformationSchemaTableLegacy.META_TABLE_TYPE_COUNT; type++) { + InformationSchemaTableLegacy table = new InformationSchemaTableLegacy(this, + Constants.INFORMATION_SCHEMA_ID - type, type); + map.put(table.getName(), table); + } + oldTables = map; + } else { + for (int type = 0; type < InformationSchemaTable.META_TABLE_TYPE_COUNT; type++) { + InformationSchemaTable table = new InformationSchemaTable(this, + Constants.INFORMATION_SCHEMA_ID - type, type); + map.put(table.getName(), table); + } + newTables = map; + } + } + return map; + } + +} diff --git a/h2/src/main/org/h2/schema/MetaSchema.java b/h2/src/main/org/h2/schema/MetaSchema.java new file mode 100644 index 0000000000..867421ddc1 --- /dev/null +++ b/h2/src/main/org/h2/schema/MetaSchema.java @@ -0,0 +1,97 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.schema; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Map; + +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.engine.User; +import org.h2.table.Table; + +/** + * Meta data schema. + */ +public abstract class MetaSchema extends Schema { + + /** + * Creates a new instance of meta data schema. + * + * @param database + * the database + * @param id + * the object id + * @param schemaName + * the schema name + * @param owner + * the owner of the schema + */ + public MetaSchema(Database database, int id, String schemaName, User owner) { + super(database, id, schemaName, owner, true); + } + + @Override + public Table findTableOrView(SessionLocal session, String name) { + Map map = getMap(session); + Table table = map.get(name); + if (table != null) { + return table; + } + return super.findTableOrView(session, name); + } + + @Override + public Collection

    Command line options
    [-dump <fileName>]Dump the contends of the file
    [-info <fileName>]
    getAllTablesAndViews(SessionLocal session) { + Collection
    userTables = super.getAllTablesAndViews(session); + if (session == null) { + return userTables; + } + Collection
    systemTables = getMap(session).values(); + if (userTables.isEmpty()) { + return systemTables; + } + ArrayList
    list = new ArrayList<>(systemTables.size() + userTables.size()); + list.addAll(systemTables); + list.addAll(userTables); + return list; + } + + @Override + public Table getTableOrView(SessionLocal session, String name) { + Map map = getMap(session); + Table table = map.get(name); + if (table != null) { + return table; + } + return super.getTableOrView(session, name); + } + + @Override + public Table getTableOrViewByName(SessionLocal session, String name) { + Map map = getMap(session); + Table table = map.get(name); + if (table != null) { + return table; + } + return super.getTableOrViewByName(session, name); + } + + /** + * Returns map of tables in this schema. + * + * @param session the session + * @return map of tables in this schema + */ + protected abstract Map getMap(SessionLocal session); + + @Override + public boolean isEmpty() { + return false; + } + +} diff --git a/h2/src/main/org/h2/schema/Schema.java b/h2/src/main/org/h2/schema/Schema.java index 543ff0af64..9002a5c8a9 100644 --- a/h2/src/main/org/h2/schema/Schema.java +++ b/h2/src/main/org/h2/schema/Schema.java @@ -1,58 +1,62 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.schema; import java.util.ArrayList; -import java.util.HashMap; +import java.util.Collection; import java.util.HashSet; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import org.h2.api.ErrorCode; -import org.h2.api.TableEngine; +import org.h2.command.ddl.CreateSynonymData; import org.h2.command.ddl.CreateTableData; import org.h2.constraint.Constraint; import org.h2.engine.Database; import org.h2.engine.DbObject; -import org.h2.engine.DbObjectBase; -import org.h2.engine.FunctionAlias; -import org.h2.engine.Session; +import org.h2.engine.DbSettings; +import org.h2.engine.Right; +import org.h2.engine.RightOwner; +import org.h2.engine.SessionLocal; import org.h2.engine.SysProperties; -import org.h2.engine.User; import org.h2.index.Index; import org.h2.message.DbException; import org.h2.message.Trace; -import org.h2.mvstore.db.MVTableEngine; -import org.h2.table.RegularTable; +import org.h2.table.MetaTable; import org.h2.table.Table; import org.h2.table.TableLink; -import org.h2.util.JdbcUtils; -import org.h2.util.New; +import org.h2.table.TableSynonym; +import org.h2.util.Utils; /** * A schema as created by the SQL statement * CREATE SCHEMA */ -public class Schema extends DbObjectBase { +public class Schema extends DbObject { - private User owner; + private RightOwner owner; private final boolean system; - - private final HashMap tablesAndViews; - private final HashMap indexes; - private final HashMap sequences; - private final HashMap triggers; - private final HashMap constraints; - private final HashMap constants; - private final HashMap functions; + private ArrayList tableEngineParams; + + private final ConcurrentHashMap tablesAndViews; + private final ConcurrentHashMap domains; + private final ConcurrentHashMap synonyms; + private final ConcurrentHashMap indexes; + private final ConcurrentHashMap sequences; + private final ConcurrentHashMap triggers; + private final ConcurrentHashMap constraints; + private final ConcurrentHashMap constants; + private final ConcurrentHashMap functionsAndAggregates; /** * The set of returned unique names that are not yet stored. It is used to * avoid returning the same unique name twice when multiple threads * concurrently create objects. */ - private final HashSet temporaryUniqueNames = New.hashSet(); + private final HashSet temporaryUniqueNames = new HashSet<>(); /** * Create a new schema object. @@ -64,16 +68,17 @@ public class Schema extends DbObjectBase { * @param system if this is a system schema (such a schema can not be * dropped) */ - public Schema(Database database, int id, String schemaName, User owner, - boolean system) { - tablesAndViews = database.newStringMap(); - indexes = database.newStringMap(); - sequences = database.newStringMap(); - triggers = database.newStringMap(); - constraints = database.newStringMap(); - constants = database.newStringMap(); - functions = database.newStringMap(); - initDbObjectBase(database, id, schemaName, Trace.SCHEMA); + public Schema(Database database, int id, String schemaName, RightOwner owner, boolean system) { + super(database, id, schemaName, Trace.SCHEMA); + tablesAndViews = database.newConcurrentStringMap(); + domains = database.newConcurrentStringMap(); + synonyms = database.newConcurrentStringMap(); + indexes = database.newConcurrentStringMap(); + sequences = database.newConcurrentStringMap(); + triggers = database.newConcurrentStringMap(); + constraints = database.newConcurrentStringMap(); + constants = database.newConcurrentStringMap(); + functionsAndAggregates = database.newConcurrentStringMap(); this.owner = owner; this.system = system; } @@ -89,12 +94,7 @@ public boolean canDrop() { @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(); - } - - @Override - public String getDropSQL() { - return null; + throw DbException.getInternalError(toString()); } @Override @@ -102,8 +102,10 @@ public String getCreateSQL() { if (system) { return null; } - return "CREATE SCHEMA IF NOT EXISTS " + - getSQL() + " AUTHORIZATION " + owner.getSQL(); + StringBuilder builder = new StringBuilder("CREATE SCHEMA IF NOT EXISTS "); + getSQL(builder, DEFAULT_SQL_FLAGS).append(" AUTHORIZATION "); + owner.getSQL(builder, DEFAULT_SQL_FLAGS); + return builder.toString(); } @Override @@ -111,60 +113,88 @@ public int getType() { return DbObject.SCHEMA; } + /** + * Return whether is this schema is empty (does not contain any objects). + * + * @return {@code true} if this schema is empty, {@code false} otherwise + */ + public boolean isEmpty() { + return tablesAndViews.isEmpty() && domains.isEmpty() && synonyms.isEmpty() && indexes.isEmpty() + && sequences.isEmpty() && triggers.isEmpty() && constraints.isEmpty() && constants.isEmpty() + && functionsAndAggregates.isEmpty(); + } + @Override - public void removeChildrenAndResources(Session session) { - while (triggers != null && triggers.size() > 0) { - TriggerObject obj = (TriggerObject) triggers.values().toArray()[0]; - database.removeSchemaObject(session, obj); - } - while (constraints != null && constraints.size() > 0) { - Constraint obj = (Constraint) constraints.values().toArray()[0]; - database.removeSchemaObject(session, obj); + public ArrayList getChildren() { + ArrayList children = Utils.newSmallArrayList(); + ArrayList rights = database.getAllRights(); + for (Right right : rights) { + if (right.getGrantedObject() == this) { + children.add(right); + } } + return children; + } + + @Override + public void removeChildrenAndResources(SessionLocal session) { + removeChildrenFromMap(session, triggers); + removeChildrenFromMap(session, constraints); // There can be dependencies between tables e.g. using computed columns, // so we might need to loop over them multiple times. - boolean runLoopAgain = false; - do { - runLoopAgain = false; - if (tablesAndViews != null) { - // Loop over a copy because the map is modified underneath us. - for (Table obj : New.arrayList(tablesAndViews.values())) { - // Check for null because multiple tables might be deleted - // in one go underneath us. - if (obj.getName() != null) { - if (database.getDependentTable(obj, obj) == null) { - database.removeSchemaObject(session, obj); - } else { - runLoopAgain = true; - } + boolean modified = true; + while (!tablesAndViews.isEmpty()) { + boolean newModified = false; + for (Table obj : tablesAndViews.values()) { + if (obj.getName() != null) { + // Database.removeSchemaObject() removes the object from + // the map too, but it is safe for ConcurrentHashMap. + Table dependentTable = database.getDependentTable(obj, obj); + if (dependentTable == null) { + database.removeSchemaObject(session, obj); + newModified = true; + } else if (dependentTable.getSchema() != this) { + throw DbException.get(ErrorCode.CANNOT_DROP_2, // + obj.getTraceSQL(), dependentTable.getTraceSQL()); + } else if (!modified) { + dependentTable.removeColumnExpressionsDependencies(session); + dependentTable.setModified(); + database.updateMeta(session, dependentTable); } } } - } while (runLoopAgain); - while (indexes != null && indexes.size() > 0) { - Index obj = (Index) indexes.values().toArray()[0]; - database.removeSchemaObject(session, obj); - } - while (sequences != null && sequences.size() > 0) { - Sequence obj = (Sequence) sequences.values().toArray()[0]; - database.removeSchemaObject(session, obj); + modified = newModified; } - while (constants != null && constants.size() > 0) { - Constant obj = (Constant) constants.values().toArray()[0]; - database.removeSchemaObject(session, obj); - } - while (functions != null && functions.size() > 0) { - FunctionAlias obj = (FunctionAlias) functions.values().toArray()[0]; - database.removeSchemaObject(session, obj); + removeChildrenFromMap(session, domains); + removeChildrenFromMap(session, indexes); + removeChildrenFromMap(session, sequences); + removeChildrenFromMap(session, constants); + removeChildrenFromMap(session, functionsAndAggregates); + for (Right right : database.getAllRights()) { + if (right.getGrantedObject() == this) { + database.removeDatabaseObject(session, right); + } } database.removeMeta(session, getId()); owner = null; invalidate(); } - @Override - public void checkRename() { - // ok + private void removeChildrenFromMap(SessionLocal session, ConcurrentHashMap map) { + if (!map.isEmpty()) { + for (SchemaObject obj : map.values()) { + /* + * Referential constraints are dropped when unique or PK + * constraint is dropped, but iterator may return already + * removed objects in some cases. + */ + if (obj.isValid()) { + // Database.removeSchemaObject() removes the object from + // the map too, but it is safe for ConcurrentHashMap. + database.removeSchemaObject(session, obj); + } + } + } } /** @@ -172,17 +202,40 @@ public void checkRename() { * * @return the owner */ - public User getOwner() { + public RightOwner getOwner() { return owner; } + /** + * Get table engine params of this schema. + * + * @return default table engine params + */ + public ArrayList getTableEngineParams() { + return tableEngineParams; + } + + /** + * Set table engine params of this schema. + * @param tableEngineParams default table engine params + */ + public void setTableEngineParams(ArrayList tableEngineParams) { + this.tableEngineParams = tableEngineParams; + } + @SuppressWarnings("unchecked") - private HashMap getMap(int type) { - HashMap result; + private Map getMap(int type) { + Map result; switch (type) { case DbObject.TABLE_OR_VIEW: result = tablesAndViews; break; + case DbObject.DOMAIN: + result = domains; + break; + case DbObject.SYNONYM: + result = synonyms; + break; case DbObject.SEQUENCE: result = sequences; break; @@ -199,12 +252,13 @@ private HashMap getMap(int type) { result = constants; break; case DbObject.FUNCTION_ALIAS: - result = functions; + case DbObject.AGGREGATE: + result = functionsAndAggregates; break; default: - throw DbException.throwInternalError("type=" + type); + throw DbException.getInternalError("type=" + type); } - return (HashMap) result; + return (Map) result; } /** @@ -215,15 +269,14 @@ private HashMap getMap(int type) { * @param obj the object to add */ public void add(SchemaObject obj) { - if (SysProperties.CHECK && obj.getSchema() != this) { - DbException.throwInternalError("wrong schema"); + if (obj.getSchema() != this) { + throw DbException.getInternalError("wrong schema"); } String name = obj.getName(); - HashMap map = getMap(obj.getType()); - if (SysProperties.CHECK && map.get(name) != null) { - DbException.throwInternalError("object already exists: " + name); + Map map = getMap(obj.getType()); + if (map.putIfAbsent(name, obj) != null) { + throw DbException.getInternalError("object already exists: " + name); } - map.put(name, obj); freeUniqueName(name); } @@ -235,13 +288,13 @@ public void add(SchemaObject obj) { */ public void rename(SchemaObject obj, String newName) { int type = obj.getType(); - HashMap map = getMap(type); + Map map = getMap(type); if (SysProperties.CHECK) { - if (!map.containsKey(obj.getName())) { - DbException.throwInternalError("not found: " + obj.getName()); + if (!map.containsKey(obj.getName()) && !(obj instanceof MetaTable)) { + throw DbException.getInternalError("not found: " + obj.getName()); } if (obj.getName().equals(newName) || map.containsKey(newName)) { - DbException.throwInternalError("object already exists: " + newName); + throw DbException.getInternalError("object already exists: " + newName); } } obj.checkRename(); @@ -255,13 +308,13 @@ public void rename(SchemaObject obj, String newName) { /** * Try to find a table or view with this name. This method returns null if * no object with this name exists. Local temporary tables are also - * returned. + * returned. Synonyms are not returned or resolved. * * @param session the session * @param name the object name * @return the object or null */ - public Table findTableOrView(Session session, String name) { + public Table findTableOrView(SessionLocal session, String name) { Table table = tablesAndViews.get(name); if (table == null && session != null) { table = session.findLocalTempTable(name); @@ -269,6 +322,48 @@ public Table findTableOrView(Session session, String name) { return table; } + /** + * Try to find a table or view with this name. This method returns null if + * no object with this name exists. Local temporary tables are also + * returned. If a synonym with this name exists, the backing table of the + * synonym is returned + * + * @param session the session + * @param name the object name + * @return the object or null + */ + public Table resolveTableOrView(SessionLocal session, String name) { + Table table = findTableOrView(session, name); + if (table == null) { + TableSynonym synonym = synonyms.get(name); + if (synonym != null) { + return synonym.getSynonymFor(); + } + } + return table; + } + + /** + * Try to find a synonym with this name. This method returns null if + * no object with this name exists. + * + * @param name the object name + * @return the object or null + */ + public TableSynonym getSynonym(String name) { + return synonyms.get(name); + } + + /** + * Get the domain if it exists, or null if not. + * + * @param name the name of the domain + * @return the domain or null + */ + public Domain findDomain(String name) { + return domains.get(name); + } + /** * Try to find an index with this name. This method returns null if * no object with this name exists. @@ -277,7 +372,7 @@ public Table findTableOrView(Session session, String name) { * @param name the object name * @return the object or null */ - public Index findIndex(Session session, String name) { + public Index findIndex(SessionLocal session, String name) { Index index = indexes.get(name); if (index == null) { index = session.findLocalTempTableIndex(name); @@ -315,7 +410,7 @@ public Sequence findSequence(String sequenceName) { * @param name the object name * @return the object or null */ - public Constraint findConstraint(Session session, String name) { + public Constraint findConstraint(SessionLocal session, String name) { Constraint constraint = constraints.get(name); if (constraint == null) { constraint = session.findLocalTempTableConstraint(name); @@ -342,7 +437,46 @@ public Constant findConstant(String constantName) { * @return the object or null */ public FunctionAlias findFunction(String functionAlias) { - return functions.get(functionAlias); + UserDefinedFunction userDefinedFunction = findFunctionOrAggregate(functionAlias); + return userDefinedFunction instanceof FunctionAlias ? (FunctionAlias) userDefinedFunction : null; + } + + /** + * Get the user defined aggregate function if it exists. This method returns + * null if no object with this name exists. + * + * @param name the name of the user defined aggregate function + * @return the aggregate function or null + */ + public UserAggregate findAggregate(String name) { + UserDefinedFunction userDefinedFunction = findFunctionOrAggregate(name); + return userDefinedFunction instanceof UserAggregate ? (UserAggregate) userDefinedFunction : null; + } + + /** + * Try to find a user defined function or aggregate function with the + * specified name. This method returns null if no object with this name + * exists. + * + * @param name + * the object name + * @return the object or null + */ + public UserDefinedFunction findFunctionOrAggregate(String name) { + return functionsAndAggregates.get(name); + } + + /** + * Reserve a unique object name. + * + * @param name the object name + */ + public void reserveUniqueName(String name) { + if (name != null) { + synchronized (temporaryUniqueNames) { + temporaryUniqueNames.add(name); + } + } } /** @@ -358,30 +492,26 @@ public void freeUniqueName(String name) { } } - private String getUniqueName(DbObject obj, - HashMap map, String prefix) { - String hash = Integer.toHexString(obj.getName().hashCode()).toUpperCase(); - String name = null; + private String getUniqueName(DbObject obj, Map map, String prefix) { + StringBuilder nameBuilder = new StringBuilder(prefix); + String hash = Integer.toHexString(obj.getName().hashCode()); synchronized (temporaryUniqueNames) { - for (int i = 1, len = hash.length(); i < len; i++) { - name = prefix + hash.substring(0, i); - if (!map.containsKey(name) && !temporaryUniqueNames.contains(name)) { - break; + for (int i = 0, len = hash.length(); i < len; i++) { + char c = hash.charAt(i); + String name = nameBuilder.append(c >= 'a' ? (char) (c - 0x20) : c).toString(); + if (!map.containsKey(name) && temporaryUniqueNames.add(name)) { + return name; } - name = null; } - if (name == null) { - prefix = prefix + hash + "_"; - for (int i = 0;; i++) { - name = prefix + i; - if (!map.containsKey(name) && !temporaryUniqueNames.contains(name)) { - break; - } + int nameLength = nameBuilder.append('_').length(); + for (int i = 0;; i++) { + String name = nameBuilder.append(i).toString(); + if (!map.containsKey(name) && temporaryUniqueNames.add(name)) { + return name; } + nameBuilder.setLength(nameLength); } - temporaryUniqueNames.add(name); } - return name; } /** @@ -391,8 +521,8 @@ private String getUniqueName(DbObject obj, * @param table the constraint table * @return the unique name */ - public String getUniqueConstraintName(Session session, Table table) { - HashMap tableConstraints; + public String getUniqueConstraintName(SessionLocal session, Table table) { + Map tableConstraints; if (table.isTemporary() && !table.isGlobalTemporary()) { tableConstraints = session.getLocalTempTableConstraints(); } else { @@ -401,6 +531,17 @@ public String getUniqueConstraintName(Session session, Table table) { return getUniqueName(table, tableConstraints, "CONSTRAINT_"); } + /** + * Create a unique constraint name. + * + * @param session the session + * @param domain the constraint domain + * @return the unique name + */ + public String getUniqueDomainConstraintName(SessionLocal session, Domain domain) { + return getUniqueName(domain, constraints, "CONSTRAINT_"); + } + /** * Create a unique index name. * @@ -409,8 +550,8 @@ public String getUniqueConstraintName(Session session, Table table) { * @param prefix the index name prefix * @return the unique name */ - public String getUniqueIndexName(Session session, Table table, String prefix) { - HashMap tableIndexes; + public String getUniqueIndexName(SessionLocal session, Table table, String prefix) { + Map tableIndexes; if (table.isTemporary() && !table.isGlobalTemporary()) { tableIndexes = session.getLocalTempTableIndexes(); } else { @@ -428,7 +569,7 @@ public String getUniqueIndexName(Session session, Table table, String prefix) { * @return the table or view * @throws DbException if no such object exists */ - public Table getTableOrView(Session session, String name) { + public Table getTableOrView(SessionLocal session, String name) { Table table = tablesAndViews.get(name); if (table == null) { if (session != null) { @@ -441,6 +582,21 @@ public Table getTableOrView(Session session, String name) { return table; } + /** + * Get the domain with the given name. + * + * @param name the domain name + * @return the domain + * @throws DbException if no such object exists + */ + public Domain getDomain(String name) { + Domain domain = domains.get(name); + if (domain == null) { + throw DbException.get(ErrorCode.DOMAIN_NOT_FOUND_1, name); + } + return domain; + } + /** * Get the index with the given name. * @@ -504,40 +660,91 @@ public Sequence getSequence(String sequenceName) { /** * Get all objects. * - * @return a (possible empty) list of all objects + * @param addTo + * list to add objects to, or {@code null} to allocate a new + * list + * @return the specified list with added objects, or a new (possibly empty) list + * with all objects */ - public ArrayList getAll() { - ArrayList all = New.arrayList(); - all.addAll(getMap(DbObject.TABLE_OR_VIEW).values()); - all.addAll(getMap(DbObject.SEQUENCE).values()); - all.addAll(getMap(DbObject.INDEX).values()); - all.addAll(getMap(DbObject.TRIGGER).values()); - all.addAll(getMap(DbObject.CONSTRAINT).values()); - all.addAll(getMap(DbObject.CONSTANT).values()); - all.addAll(getMap(DbObject.FUNCTION_ALIAS).values()); - return all; + public ArrayList getAll(ArrayList addTo) { + if (addTo == null) { + addTo = Utils.newSmallArrayList(); + } + addTo.addAll(tablesAndViews.values()); + addTo.addAll(domains.values()); + addTo.addAll(synonyms.values()); + addTo.addAll(sequences.values()); + addTo.addAll(indexes.values()); + addTo.addAll(triggers.values()); + addTo.addAll(constraints.values()); + addTo.addAll(constants.values()); + addTo.addAll(functionsAndAggregates.values()); + return addTo; } /** * Get all objects of the given type. * - * @param type the object type - * @return a (possible empty) list of all objects + * @param type + * the object type + * @param addTo + * list to add objects to */ - public ArrayList getAll(int type) { - HashMap map = getMap(type); - return New.arrayList(map.values()); + public void getAll(int type, ArrayList addTo) { + addTo.addAll(getMap(type).values()); + } + + public Collection getAllDomains() { + return domains.values(); + } + + public Collection getAllConstraints() { + return constraints.values(); + } + + public Collection getAllConstants() { + return constants.values(); + } + + public Collection getAllSequences() { + return sequences.values(); + } + + public Collection getAllTriggers() { + return triggers.values(); } /** * Get all tables and views. * + * @param session the session, {@code null} to exclude meta tables * @return a (possible empty) list of all objects */ - public ArrayList
    getAllTablesAndViews() { - synchronized (database) { - return New.arrayList(tablesAndViews.values()); - } + public Collection
    getAllTablesAndViews(SessionLocal session) { + return tablesAndViews.values(); + } + + public Collection getAllIndexes() { + return indexes.values(); + } + + public Collection getAllSynonyms() { + return synonyms.values(); + } + + public Collection getAllFunctionsAndAggregates() { + return functionsAndAggregates.values(); + } + + /** + * Get the table with the given name, if any. + * + * @param session the session + * @param name the table name + * @return the table or null if not found + */ + public Table getTableOrViewByName(SessionLocal session, String name) { + return tablesAndViews.get(name); } /** @@ -547,11 +754,10 @@ public ArrayList
    getAllTablesAndViews() { */ public void remove(SchemaObject obj) { String objName = obj.getName(); - HashMap map = getMap(obj.getType()); - if (SysProperties.CHECK && !map.containsKey(objName)) { - DbException.throwInternalError("not found: " + objName); + Map map = getMap(obj.getType()); + if (map.remove(objName) == null) { + throw DbException.getInternalError("not found: " + objName); } - map.remove(objName); freeUniqueName(objName); } @@ -567,21 +773,33 @@ public Table createTable(CreateTableData data) { database.lockMeta(data.session); } data.schema = this; - if (data.tableEngine == null) { - if (database.getSettings().mvStore) { - data.tableEngine = MVTableEngine.class.getName(); + String tableEngine = data.tableEngine; + if (tableEngine == null) { + DbSettings s = database.getSettings(); + tableEngine = s.defaultTableEngine; + if (tableEngine == null) { + return database.getStore().createTable(data); } + data.tableEngine = tableEngine; } - if (data.tableEngine != null) { - TableEngine engine; - try { - engine = (TableEngine) JdbcUtils.loadUserClass(data.tableEngine).newInstance(); - } catch (Exception e) { - throw DbException.convert(e); - } - return engine.createTable(data); + if (data.tableEngineParams == null) { + data.tableEngineParams = this.tableEngineParams; } - return new RegularTable(data); + return database.getTableEngine(tableEngine).createTable(data); + } + } + + /** + * Add a table synonym to the schema. + * + * @param data the create synonym information + * @return the created {@link TableSynonym} object + */ + public TableSynonym createSynonym(CreateSynonymData data) { + synchronized (database) { + database.lockMeta(data.session); + data.schema = this; + return new TableSynonym(data); } } diff --git a/h2/src/main/org/h2/schema/SchemaObject.java b/h2/src/main/org/h2/schema/SchemaObject.java index 62882d9cef..f777d038cf 100644 --- a/h2/src/main/org/h2/schema/SchemaObject.java +++ b/h2/src/main/org/h2/schema/SchemaObject.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.schema; @@ -10,14 +10,42 @@ /** * Any database object that is stored in a schema. */ -public interface SchemaObject extends DbObject { +public abstract class SchemaObject extends DbObject { + + private final Schema schema; + + /** + * Initialize some attributes of this object. + * + * @param newSchema the schema + * @param id the object id + * @param name the name + * @param traceModuleId the trace module id + */ + protected SchemaObject(Schema newSchema, int id, String name, int traceModuleId) { + super(newSchema.getDatabase(), id, name, traceModuleId); + this.schema = newSchema; + } /** * Get the schema in which this object is defined * * @return the schema */ - Schema getSchema(); + public final Schema getSchema() { + return schema; + } + + @Override + public String getSQL(int sqlFlags) { + return getSQL(new StringBuilder(), sqlFlags).toString(); + } + + @Override + public StringBuilder getSQL(StringBuilder builder, int sqlFlags) { + schema.getSQL(builder, sqlFlags).append('.'); + return super.getSQL(builder, sqlFlags); + } /** * Check whether this is a hidden object that doesn't appear in the meta @@ -25,6 +53,8 @@ public interface SchemaObject extends DbObject { * * @return true if it is hidden */ - boolean isHidden(); + public boolean isHidden() { + return false; + } } diff --git a/h2/src/main/org/h2/schema/SchemaObjectBase.java b/h2/src/main/org/h2/schema/SchemaObjectBase.java deleted file mode 100644 index f5093f8ca9..0000000000 --- a/h2/src/main/org/h2/schema/SchemaObjectBase.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.schema; - -import org.h2.engine.DbObjectBase; - -/** - * The base class for classes implementing SchemaObject. - */ -public abstract class SchemaObjectBase extends DbObjectBase implements - SchemaObject { - - private Schema schema; - - /** - * Initialize some attributes of this object. - * - * @param newSchema the schema - * @param id the object id - * @param name the name - * @param traceModule the trace module name - */ - protected void initSchemaObjectBase(Schema newSchema, int id, String name, - String traceModule) { - initDbObjectBase(newSchema.getDatabase(), id, name, traceModule); - this.schema = newSchema; - } - - @Override - public Schema getSchema() { - return schema; - } - - @Override - public String getSQL() { - return schema.getSQL() + "." + super.getSQL(); - } - - @Override - public boolean isHidden() { - return false; - } - -} diff --git a/h2/src/main/org/h2/schema/Sequence.java b/h2/src/main/org/h2/schema/Sequence.java index fd4c3ad2c3..f21b918132 100644 --- a/h2/src/main/org/h2/schema/Sequence.java +++ b/h2/src/main/org/h2/schema/Sequence.java @@ -1,185 +1,334 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.schema; -import java.math.BigInteger; - import org.h2.api.ErrorCode; +import org.h2.command.ddl.SequenceOptions; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.table.Table; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueBigint; /** * A sequence is created using the statement * CREATE SEQUENCE */ -public class Sequence extends SchemaObjectBase { +public final class Sequence extends SchemaObject { + + /** + * CYCLE clause and sequence state. + */ + public enum Cycle { + + /** + * Sequence is cycled. + */ + CYCLE, + + /** + * Sequence is not cycled and isn't exhausted yet. + */ + NO_CYCLE, + + /** + * Sequence is not cycled and was already exhausted. + */ + EXHAUSTED; + + /** + * Return whether sequence is cycled. + * + * @return {@code true} if sequence is cycled, {@code false} if sequence + * is not cycled + */ + public boolean isCycle() { + return this == CYCLE; + } + + } /** * The default cache size for sequences. */ public static final int DEFAULT_CACHE_SIZE = 32; - private long value; - private long valueWithMargin; + private long baseValue; + private long margin; + + private TypeInfo dataType; + private long increment; private long cacheSize; + private long startValue; private long minValue; private long maxValue; - private boolean cycle; + private Cycle cycle; private boolean belongsToTable; + private boolean writeWithMargin; /** - * Creates a new sequence for an auto-increment column. + * Creates a new sequence. * - * @param schema the schema - * @param id the object id - * @param name the sequence name - * @param startValue the first value to return - * @param increment the increment count + * @param session + * the session + * @param schema + * the schema + * @param id + * the object id + * @param name + * the sequence name + * @param options + * the sequence options + * @param belongsToTable + * whether this sequence belongs to a table (for generated + * columns) */ - public Sequence(Schema schema, int id, String name, long startValue, - long increment) { - this(schema, id, name, startValue, increment, null, null, null, false, - true); + public Sequence(SessionLocal session, Schema schema, int id, String name, SequenceOptions options, + boolean belongsToTable) { + super(schema, id, name, Trace.SEQUENCE); + dataType = options.getDataType(); + if (dataType == null) { + options.setDataType(dataType = session.getMode().decimalSequences ? TypeInfo.TYPE_NUMERIC_BIGINT + : TypeInfo.TYPE_BIGINT); + } + long bounds[] = options.getBounds(); + Long t = options.getIncrement(session); + long increment = t != null ? t : 1; + Long start = options.getStartValue(session); + Long min = options.getMinValue(null, session); + Long max = options.getMaxValue(null, session); + long minValue = min != null ? min : getDefaultMinValue(start, increment, bounds); + long maxValue = max != null ? max : getDefaultMaxValue(start, increment, bounds); + long startValue = start != null ? start : increment >= 0 ? minValue : maxValue; + Long restart = options.getRestartValue(session, startValue); + long baseValue = restart != null ? restart : startValue; + t = options.getCacheSize(session); + long cacheSize; + boolean mayAdjustCacheSize; + if (t != null) { + cacheSize = t; + mayAdjustCacheSize = false; + } else { + cacheSize = DEFAULT_CACHE_SIZE; + mayAdjustCacheSize = true; + } + cacheSize = checkOptions(baseValue, startValue, minValue, maxValue, increment, cacheSize, mayAdjustCacheSize); + Cycle cycle = options.getCycle(); + if (cycle == null) { + cycle = Cycle.NO_CYCLE; + } else if (cycle == Cycle.EXHAUSTED) { + baseValue = startValue; + } + this.margin = this.baseValue = baseValue; + this.increment = increment; + this.cacheSize = cacheSize; + this.startValue = startValue; + this.minValue = minValue; + this.maxValue = maxValue; + this.cycle = cycle; + this.belongsToTable = belongsToTable; } /** - * Creates a new sequence. - * - * @param schema the schema - * @param id the object id - * @param name the sequence name - * @param startValue the first value to return - * @param increment the increment count - * @param cacheSize the number of entries to pre-fetch - * @param minValue the minimum value - * @param maxValue the maximum value - * @param cycle whether to jump back to the min value if needed - * @param belongsToTable whether this sequence belongs to a table (for - * auto-increment columns) + * Allows the base value, start value, min value, max value, increment and + * cache size to be updated atomically, including atomic validation. Useful + * because setting these attributes one after the other could otherwise + * result in an invalid sequence state (e.g. min value > max value, start + * value < min value, etc). + * @param baseValue + * the base value ({@code null} if restart is not requested) + * @param startValue + * the new start value ({@code null} if no change) + * @param minValue + * the new min value ({@code null} if no change) + * @param maxValue + * the new max value ({@code null} if no change) + * @param increment + * the new increment ({@code null} if no change) + * @param cycle + * the new cycle value, or {@code null} if no change + * @param cacheSize + * the new cache size ({@code null} if no change) */ - public Sequence(Schema schema, int id, String name, Long startValue, - Long increment, Long cacheSize, Long minValue, Long maxValue, - boolean cycle, boolean belongsToTable) { - initSchemaObjectBase(schema, id, name, Trace.SEQUENCE); - this.increment = increment != null ? - increment : 1; - this.minValue = minValue != null ? - minValue : getDefaultMinValue(startValue, this.increment); - this.maxValue = maxValue != null ? - maxValue : getDefaultMaxValue(startValue, this.increment); - this.value = startValue != null ? - startValue : getDefaultStartValue(this.increment); - this.valueWithMargin = value; - this.cacheSize = cacheSize != null ? - Math.max(1, cacheSize) : DEFAULT_CACHE_SIZE; - this.cycle = cycle; - this.belongsToTable = belongsToTable; - if (!isValid(this.value, this.minValue, this.maxValue, this.increment)) { - throw DbException.get(ErrorCode.SEQUENCE_ATTRIBUTES_INVALID, name, - String.valueOf(this.value), String.valueOf(this.minValue), - String.valueOf(this.maxValue), - String.valueOf(this.increment)); + public synchronized void modify(Long baseValue, Long startValue, Long minValue, Long maxValue, Long increment, + Cycle cycle, Long cacheSize) { + long baseValueAsLong = baseValue != null ? baseValue : this.baseValue; + long startValueAsLong = startValue != null ? startValue : this.startValue; + long minValueAsLong = minValue != null ? minValue : this.minValue; + long maxValueAsLong = maxValue != null ? maxValue : this.maxValue; + long incrementAsLong = increment != null ? increment : this.increment; + long cacheSizeAsLong; + boolean mayAdjustCacheSize; + if (cacheSize != null) { + cacheSizeAsLong = cacheSize; + mayAdjustCacheSize = false; + } else { + cacheSizeAsLong = this.cacheSize; + mayAdjustCacheSize = true; + } + cacheSizeAsLong = checkOptions(baseValueAsLong, startValueAsLong, minValueAsLong, maxValueAsLong, + incrementAsLong, cacheSizeAsLong, mayAdjustCacheSize); + if (cycle == null) { + cycle = this.cycle; + if (cycle == Cycle.EXHAUSTED && baseValue != null) { + cycle = Cycle.NO_CYCLE; + } + } else if (cycle == Cycle.EXHAUSTED) { + baseValueAsLong = startValueAsLong; } + this.margin = this.baseValue = baseValueAsLong; + this.startValue = startValueAsLong; + this.minValue = minValueAsLong; + this.maxValue = maxValueAsLong; + this.increment = incrementAsLong; + this.cacheSize = cacheSizeAsLong; + this.cycle = cycle; } /** - * Allows the start value, increment, min value and max value to be updated - * atomically, including atomic validation. Useful because setting these - * attributes one after the other could otherwise result in an invalid - * sequence state (e.g. min value > max value, start value < min value, - * etc). + * Validates the specified prospective base value, start value, min value, + * max value, increment, and cache size relative to each other, since each + * of their respective validities are contingent on the values of the other + * parameters. * - * @param startValue the new start value (null if no change) - * @param minValue the new min value (null if no change) - * @param maxValue the new max value (null if no change) - * @param increment the new increment (null if no change) + * @param baseValue + * the prospective base value + * @param startValue + * the prospective start value + * @param minValue + * the prospective min value + * @param maxValue + * the prospective max value + * @param increment + * the prospective increment + * @param cacheSize + * the prospective cache size + * @param mayAdjustCacheSize + * whether cache size may be adjusted, cache size 0 is adjusted + * unconditionally to 1 + * @return the prospective or adjusted cache size */ - public synchronized void modify(Long startValue, Long minValue, - Long maxValue, Long increment) { - if (startValue == null) { - startValue = this.value; - } - if (minValue == null) { - minValue = this.minValue; - } - if (maxValue == null) { - maxValue = this.maxValue; - } - if (increment == null) { - increment = this.increment; + private long checkOptions(long baseValue, long startValue, long minValue, long maxValue, long increment, + long cacheSize, boolean mayAdjustCacheSize) { + if (minValue <= baseValue && baseValue <= maxValue // + && minValue <= startValue && startValue <= maxValue // + && minValue < maxValue && increment != 0L) { + long range = maxValue - minValue; + if (Long.compareUnsigned(Math.abs(increment), range) <= 0 && cacheSize >= 0L) { + if (cacheSize <= 1L) { + return 1L; + } + long maxCacheSize = getMaxCacheSize(range, increment); + if (cacheSize <= maxCacheSize) { + return cacheSize; + } + if (mayAdjustCacheSize) { + return maxCacheSize; + } + } } - if (!isValid(startValue, minValue, maxValue, increment)) { - throw DbException.get(ErrorCode.SEQUENCE_ATTRIBUTES_INVALID, - getName(), String.valueOf(startValue), - String.valueOf(minValue), - String.valueOf(maxValue), - String.valueOf(increment)); + throw DbException.get(ErrorCode.SEQUENCE_ATTRIBUTES_INVALID_7, getName(), Long.toString(baseValue), + Long.toString(startValue), Long.toString(minValue), Long.toString(maxValue), Long.toString(increment), + Long.toString(cacheSize)); + } + + private static long getMaxCacheSize(long range, long increment) { + if (increment > 0L) { + if (range < 0) { + range = Long.MAX_VALUE; + } else { + range += increment; + if (range < 0) { + range = Long.MAX_VALUE; + } + } + } else { + range = -range; + if (range > 0) { + range = Long.MIN_VALUE; + } else { + range += increment; + if (range >= 0) { + range = Long.MIN_VALUE; + } + } } - this.value = startValue; - this.valueWithMargin = startValue; - this.minValue = minValue; - this.maxValue = maxValue; - this.increment = increment; + return range / increment; } /** - * Validates the specified prospective start value, min value, max value and - * increment relative to each other, since each of their respective - * validities are contingent on the values of the other parameters. + * Calculates default min value. * - * @param value the prospective start value - * @param minValue the prospective min value - * @param maxValue the prospective max value - * @param increment the prospective increment + * @param startValue the start value of the sequence. + * @param increment the increment of the sequence value. + * @param bounds min and max bounds of data type of the sequence + * @return min value. */ - private static boolean isValid(long value, long minValue, long maxValue, - long increment) { - return minValue <= value && - maxValue >= value && - maxValue > minValue && - increment != 0 && - // Math.abs(increment) < maxValue - minValue - // use BigInteger to avoid overflows when maxValue and minValue - // are really big - BigInteger.valueOf(increment).abs().compareTo( - BigInteger.valueOf(maxValue).subtract(BigInteger.valueOf(minValue))) < 0; - } - - private static long getDefaultMinValue(Long startValue, long increment) { - long v = increment >= 0 ? 1 : Long.MIN_VALUE; + public static long getDefaultMinValue(Long startValue, long increment, long[] bounds) { + long v = increment >= 0 ? 1 : bounds[0]; if (startValue != null && increment >= 0 && startValue < v) { v = startValue; } return v; } - private static long getDefaultMaxValue(Long startValue, long increment) { - long v = increment >= 0 ? Long.MAX_VALUE : -1; + /** + * Calculates default max value. + * + * @param startValue the start value of the sequence. + * @param increment the increment of the sequence value. + * @param bounds min and max bounds of data type of the sequence + * @return min value. + */ + public static long getDefaultMaxValue(Long startValue, long increment, long[] bounds) { + long v = increment >= 0 ? bounds[1] : -1; if (startValue != null && increment < 0 && startValue > v) { v = startValue; } return v; } - private long getDefaultStartValue(long increment) { - return increment >= 0 ? minValue : maxValue; - } - public boolean getBelongsToTable() { return belongsToTable; } + public TypeInfo getDataType() { + return dataType; + } + + public int getEffectivePrecision() { + TypeInfo dataType = this.dataType; + switch (dataType.getValueType()) { + case Value.NUMERIC: { + int p = (int) dataType.getPrecision(); + int s = dataType.getScale(); + if (p - s > ValueBigint.DECIMAL_PRECISION) { + return ValueBigint.DECIMAL_PRECISION + s; + } + return p; + } + case Value.DECFLOAT: + return Math.min((int) dataType.getPrecision(), ValueBigint.DECIMAL_PRECISION); + default: + return (int) dataType.getPrecision(); + } + } + public long getIncrement() { return increment; } + public long getStartValue() { + return startValue; + } + public long getMinValue() { return minValue; } @@ -188,89 +337,173 @@ public long getMaxValue() { return maxValue; } - public boolean getCycle() { + public Cycle getCycle() { return cycle; } - public void setCycle(boolean cycle) { - this.cycle = cycle; - } - @Override public String getDropSQL() { if (getBelongsToTable()) { return null; } - return "DROP SEQUENCE IF EXISTS " + getSQL(); + StringBuilder builder = new StringBuilder("DROP SEQUENCE IF EXISTS "); + return getSQL(builder, DEFAULT_SQL_FLAGS).toString(); } @Override public String getCreateSQLForCopy(Table table, String quotedName) { - throw DbException.throwInternalError(); + throw DbException.getInternalError(toString()); } @Override - public synchronized String getCreateSQL() { - StringBuilder buff = new StringBuilder("CREATE SEQUENCE "); - buff.append(getSQL()).append(" START WITH ").append(value); + public String getCreateSQL() { + StringBuilder builder = getSQL(new StringBuilder("CREATE SEQUENCE "), DEFAULT_SQL_FLAGS); + if (dataType.getValueType() != Value.BIGINT) { + dataType.getSQL(builder.append(" AS "), DEFAULT_SQL_FLAGS); + } + builder.append(' '); + synchronized (this) { + getSequenceOptionsSQL(builder, writeWithMargin ? margin : baseValue); + } + if (belongsToTable) { + builder.append(" BELONGS_TO_TABLE"); + } + return builder.toString(); + } + + /** + * Append the options part of the SQL statement to create the sequence. + * + * @param builder the builder + * @return the builder + */ + public synchronized StringBuilder getSequenceOptionsSQL(StringBuilder builder) { + return getSequenceOptionsSQL(builder, baseValue); + } + + private StringBuilder getSequenceOptionsSQL(StringBuilder builder, long value) { + builder.append("START WITH ").append(startValue); + if (value != startValue && cycle != Cycle.EXHAUSTED) { + builder.append(" RESTART WITH ").append(value); + } if (increment != 1) { - buff.append(" INCREMENT BY ").append(increment); + builder.append(" INCREMENT BY ").append(increment); } - if (minValue != getDefaultMinValue(value, increment)) { - buff.append(" MINVALUE ").append(minValue); + long[] bounds = SequenceOptions.getBounds(dataType); + if (minValue != getDefaultMinValue(value, increment, bounds)) { + builder.append(" MINVALUE ").append(minValue); } - if (maxValue != getDefaultMaxValue(value, increment)) { - buff.append(" MAXVALUE ").append(maxValue); + if (maxValue != getDefaultMaxValue(value, increment, bounds)) { + builder.append(" MAXVALUE ").append(maxValue); } - if (cycle) { - buff.append(" CYCLE"); + if (cycle == Cycle.CYCLE) { + builder.append(" CYCLE"); + } else if (cycle == Cycle.EXHAUSTED) { + builder.append(" EXHAUSTED"); } if (cacheSize != DEFAULT_CACHE_SIZE) { - buff.append(" CACHE ").append(cacheSize); - } - if (belongsToTable) { - buff.append(" BELONGS_TO_TABLE"); + if (cacheSize == 1) { + builder.append(" NO CACHE"); + } else if (cacheSize > DEFAULT_CACHE_SIZE // + || cacheSize != getMaxCacheSize(maxValue - minValue, increment)) { + builder.append(" CACHE ").append(cacheSize); + } } - return buff.toString(); + return builder; } /** - * Get the next value for this sequence. + * Get the next value for this sequence. Should not be called directly, use + * {@link SessionLocal#getNextValueFor(Sequence, org.h2.command.Prepared)} instead. * * @param session the session * @return the next value */ - public synchronized long getNext(Session session) { - boolean needsFlush = false; - if ((increment > 0 && value >= valueWithMargin) || - (increment < 0 && value <= valueWithMargin)) { - valueWithMargin += increment * cacheSize; - needsFlush = true; - } - if ((increment > 0 && value > maxValue) || - (increment < 0 && value < minValue)) { - if (cycle) { - value = increment > 0 ? minValue : maxValue; - valueWithMargin = value + (increment * cacheSize); - needsFlush = true; - } else { + public Value getNext(SessionLocal session) { + long result; + boolean needsFlush; + synchronized (this) { + if (cycle == Cycle.EXHAUSTED) { throw DbException.get(ErrorCode.SEQUENCE_EXHAUSTED, getName()); } + result = baseValue; + long newBase = result + increment; + needsFlush = increment > 0 ? increment(result, newBase) : decrement(result, newBase); } if (needsFlush) { flush(session); } - long v = value; - value += increment; - return v; + return ValueBigint.get(result).castTo(dataType, session); + } + + private boolean increment(long oldBase, long newBase) { + boolean needsFlush = false; + /* + * If old base is not negative and new base is negative there is an + * overflow. + */ + if (newBase > maxValue || (~oldBase & newBase) < 0) { + newBase = minValue; + needsFlush = true; + if (cycle == Cycle.CYCLE) { + margin = newBase + increment * (cacheSize - 1); + } else { + margin = newBase; + cycle = Cycle.EXHAUSTED; + } + } else if (newBase > margin) { + long newMargin = newBase + increment * (cacheSize - 1); + if (newMargin > maxValue || (~newBase & newMargin) < 0) { + /* + * Don't cache values near the end of the sequence for + * simplicity. + */ + newMargin = newBase; + } + margin = newMargin; + needsFlush = true; + } + baseValue = newBase; + return needsFlush; + } + + private boolean decrement(long oldBase, long newBase) { + boolean needsFlush = false; + /* + * If old base is negative and new base is not negative there is an + * overflow. + */ + if (newBase < minValue || (oldBase & ~newBase) < 0) { + newBase = maxValue; + needsFlush = true; + if (cycle == Cycle.CYCLE) { + margin = newBase + increment * (cacheSize - 1); + } else { + margin = newBase; + cycle = Cycle.EXHAUSTED; + } + } else if (newBase < margin) { + long newMargin = newBase + increment * (cacheSize - 1); + if (newMargin < minValue || (newBase & ~newMargin) < 0) { + /* + * Don't cache values near the end of the sequence for + * simplicity. + */ + newMargin = newBase; + } + margin = newMargin; + needsFlush = true; + } + baseValue = newBase; + return needsFlush; } /** * Flush the current value to disk. */ public void flushWithoutMargin() { - if (valueWithMargin != value) { - valueWithMargin = value; + if (margin != baseValue) { + margin = baseValue; flush(null); } } @@ -280,12 +513,15 @@ public void flushWithoutMargin() { * * @param session the session */ - public synchronized void flush(Session session) { - if (session == null || !database.isSysTableLocked()) { - // This session may not lock the sys table (except if it already has + public void flush(SessionLocal session) { + if (isTemporary()) { + return; + } + if (session == null || !database.isSysTableLockedBy(session)) { + // This session may not lock the sys table (except if it has already // locked it) because it must be committed immediately, otherwise // other threads can not access the sys table. - Session sysSession = database.getSystemSession(); + SessionLocal sysSession = database.getSystemSession(); synchronized (sysSession) { flushInternal(sysSession); sysSession.commit(false); @@ -297,16 +533,17 @@ public synchronized void flush(Session session) { } } - private void flushInternal(Session session) { - // just for this case, use the value with the margin for the script - long realValue = value; + private void flushInternal(SessionLocal session) { + final boolean metaWasLocked = database.lockMeta(session); + // just for this case, use the value with the margin try { - value = valueWithMargin; - if (!isTemporary()) { - database.updateMeta(session, this); - } + writeWithMargin = true; + database.updateMeta(session, this); } finally { - value = realValue; + writeWithMargin = false; + if (!metaWasLocked) { + database.unlockMeta(session); + } } } @@ -323,28 +560,24 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { database.removeMeta(session, getId()); invalidate(); } - @Override - public void checkRename() { - // nothing to do + public synchronized long getBaseValue() { + // Use synchronized because baseValue is not volatile + return baseValue; } public synchronized long getCurrentValue() { - return value - increment; + return baseValue - increment; } public void setBelongsToTable(boolean b) { this.belongsToTable = b; } - public void setCacheSize(long cacheSize) { - this.cacheSize = Math.max(1, cacheSize); - } - public long getCacheSize() { return cacheSize; } diff --git a/h2/src/main/org/h2/schema/TriggerObject.java b/h2/src/main/org/h2/schema/TriggerObject.java index 29b480ed7b..fbf2b462ea 100644 --- a/h2/src/main/org/h2/schema/TriggerObject.java +++ b/h2/src/main/org/h2/schema/TriggerObject.java @@ -1,36 +1,41 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.schema; import java.lang.reflect.Method; import java.sql.Connection; +import java.sql.ResultSet; import java.sql.SQLException; +import java.util.Arrays; import org.h2.api.ErrorCode; import org.h2.api.Trigger; -import org.h2.command.Parser; import org.h2.engine.Constants; import org.h2.engine.DbObject; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.jdbc.JdbcConnection; +import org.h2.jdbc.JdbcResultSet; import org.h2.message.DbException; import org.h2.message.Trace; import org.h2.result.Row; +import org.h2.result.SimpleResult; +import org.h2.table.Column; import org.h2.table.Table; +import org.h2.tools.TriggerAdapter; import org.h2.util.JdbcUtils; import org.h2.util.SourceCompiler; -import org.h2.util.StatementBuilder; import org.h2.util.StringUtils; -import org.h2.value.DataType; import org.h2.value.Value; +import org.h2.value.ValueToObjectConverter; /** *A trigger is created using the statement * CREATE TRIGGER */ -public class TriggerObject extends SchemaObjectBase { +public final class TriggerObject extends SchemaObject { /** * The default queue size. @@ -51,7 +56,7 @@ public class TriggerObject extends SchemaObjectBase { private Trigger triggerCallback; public TriggerObject(Schema schema, int id, String name, Table table) { - initSchemaObjectBase(schema, id, name, Trace.TRIGGER); + super(schema, id, name, Trace.TRIGGER); this.table = table; setTemporary(table.isTemporary()); } @@ -60,6 +65,10 @@ public void setBefore(boolean before) { this.before = before; } + public boolean isInsteadOf() { + return insteadOf; + } + public void setInsteadOf(boolean insteadOf) { this.insteadOf = insteadOf; } @@ -69,11 +78,11 @@ private synchronized void load() { return; } try { - Session sysSession = database.getSystemSession(); + SessionLocal sysSession = database.getSystemSession(); Connection c2 = sysSession.createConnection(false); Object obj; if (triggerClassName != null) { - obj = JdbcUtils.loadUserClass(triggerClassName).newInstance(); + obj = JdbcUtils.loadUserClass(triggerClassName).getDeclaredConstructor().newInstance(); } else { obj = loadFromSource(); } @@ -94,11 +103,15 @@ private Trigger loadFromSource() { String fullClassName = Constants.USER_PACKAGE + ".trigger." + getName(); compiler.setSource(fullClassName, triggerSource); try { - Method m = compiler.getMethod(fullClassName); - if (m.getParameterTypes().length > 0) { - throw new IllegalStateException("No parameters are allowed for a trigger"); + if (SourceCompiler.isJavaxScriptSource(triggerSource)) { + return (Trigger) compiler.getCompiledScript(fullClassName).eval(); + } else { + final Method m = compiler.getMethod(fullClassName); + if (m.getParameterTypes().length > 0) { + throw new IllegalStateException("No parameters are allowed for a trigger"); + } + return (Trigger) m.invoke(null); } - return (Trigger) m.invoke(null); } catch (DbException e) { throw e; } catch (Exception e) { @@ -150,7 +163,7 @@ private void setTriggerAction(String triggerClassName, String source, boolean fo * @param type the trigger type * @param beforeAction if this method is called before applying the changes */ - public void fire(Session session, int type, boolean beforeAction) { + public void fire(SessionLocal session, int type, boolean beforeAction) { if (rowBased || before != beforeAction || (typeMask & type) == 0) { return; } @@ -160,28 +173,31 @@ public void fire(Session session, int type, boolean beforeAction) { if (type != Trigger.SELECT) { old = session.setCommitOrRollbackDisabled(true); } - Value identity = session.getLastScopeIdentity(); + Value identity = session.getLastIdentity(); try { - triggerCallback.fire(c2, null, null); + if (triggerCallback instanceof TriggerAdapter) { + ((TriggerAdapter) triggerCallback).fire(c2, (ResultSet) null, (ResultSet) null); + } else { + triggerCallback.fire(c2, null, null); + } } catch (Throwable e) { - throw DbException.get(ErrorCode.ERROR_EXECUTING_TRIGGER_3, e, getName(), - triggerClassName != null ? triggerClassName : "..source..", e.toString()); + throw getErrorExecutingTrigger(e); } finally { - session.setLastScopeIdentity(identity); + session.setLastIdentity(identity); if (type != Trigger.SELECT) { session.setCommitOrRollbackDisabled(old); } } } - private static Object[] convertToObjectList(Row row) { + private static Object[] convertToObjectList(Row row, JdbcConnection conn) { if (row == null) { return null; } int len = row.getColumnCount(); Object[] list = new Object[len]; for (int i = 0; i < len; i++) { - list[i] = row.getValue(i).getObject(); + list[i] = ValueToObjectConverter.valueToDefaultObject(row.getValue(i), conn, false); } return list; } @@ -193,6 +209,7 @@ private static Object[] convertToObjectList(Row row) { * times for each statement. * * @param session the session + * @param table the table * @param oldRow the old row * @param newRow the new row * @param beforeAction true if this method is called before the operation is @@ -200,7 +217,7 @@ private static Object[] convertToObjectList(Row row) { * @param rollback when the operation occurred within a rollback * @return true if no further action is required (for 'instead of' triggers) */ - public boolean fireRow(Session session, Row oldRow, Row newRow, + public boolean fireRow(SessionLocal session, Table table, Row oldRow, Row newRow, boolean beforeAction, boolean rollback) { if (!rowBased || before != beforeAction) { return false; @@ -209,8 +226,6 @@ public boolean fireRow(Session session, Row oldRow, Row newRow, return false; } load(); - Object[] oldList; - Object[] newList; boolean fire = false; if ((typeMask & Trigger.INSERT) != 0) { if (oldRow == null && newRow != null) { @@ -230,28 +245,56 @@ public boolean fireRow(Session session, Row oldRow, Row newRow, if (!fire) { return false; } - oldList = convertToObjectList(oldRow); - newList = convertToObjectList(newRow); - Object[] newListBackup; - if (before && newList != null) { - newListBackup = new Object[newList.length]; - System.arraycopy(newList, 0, newListBackup, 0, newList.length); - } else { - newListBackup = null; - } - Connection c2 = session.createConnection(false); + JdbcConnection c2 = session.createConnection(false); boolean old = session.getAutoCommit(); boolean oldDisabled = session.setCommitOrRollbackDisabled(true); - Value identity = session.getLastScopeIdentity(); + Value identity = session.getLastIdentity(); try { session.setAutoCommit(false); - triggerCallback.fire(c2, oldList, newList); - if (newListBackup != null) { - for (int i = 0; i < newList.length; i++) { - Object o = newList[i]; - if (o != newListBackup[i]) { - Value v = DataType.convertToValue(session, o, Value.UNKNOWN); - newRow.setValue(i, v); + if (triggerCallback instanceof TriggerAdapter) { + JdbcResultSet oldResultSet = oldRow != null ? createResultSet(c2, table, oldRow, false) : null; + JdbcResultSet newResultSet = newRow != null ? createResultSet(c2, table, newRow, before) : null; + try { + ((TriggerAdapter) triggerCallback).fire(c2, oldResultSet, newResultSet); + } catch (Throwable e) { + throw getErrorExecutingTrigger(e); + } + if (newResultSet != null) { + Value[] updatedList = newResultSet.getUpdateRow(); + if (updatedList != null) { + boolean modified = false; + for (int i = 0, l = updatedList.length; i < l; i++) { + Value v = updatedList[i]; + if (v != null) { + modified = true; + newRow.setValue(i, v); + } + } + if (modified) { + table.convertUpdateRow(session, newRow, true); + } + } + } + } else { + Object[] oldList = convertToObjectList(oldRow, c2); + Object[] newList = convertToObjectList(newRow, c2); + Object[] newListBackup = before && newList != null ? Arrays.copyOf(newList, newList.length) : null; + try { + triggerCallback.fire(c2, oldList, newList); + } catch (Throwable e) { + throw getErrorExecutingTrigger(e); + } + if (newListBackup != null) { + boolean modified = false; + for (int i = 0; i < newList.length; i++) { + Object o = newList[i]; + if (o != newListBackup[i]) { + modified = true; + newRow.setValue(i, ValueToObjectConverter.objectToValue(session, o, Value.UNKNOWN)); + } + } + if (modified) { + table.convertUpdateRow(session, newRow, true); } } } @@ -262,13 +305,50 @@ public boolean fireRow(Session session, Row oldRow, Row newRow, throw DbException.convert(e); } } finally { - session.setLastScopeIdentity(identity); + session.setLastIdentity(identity); session.setCommitOrRollbackDisabled(oldDisabled); session.setAutoCommit(old); } return insteadOf; } + private static JdbcResultSet createResultSet(JdbcConnection conn, Table table, Row row, boolean updatable) + throws SQLException { + SimpleResult result = new SimpleResult(table.getSchema().getName(), table.getName()); + for (Column c : table.getColumns()) { + result.addColumn(c.getName(), c.getType()); + } + /* + * Old implementation works with and without next() invocation, so add + * the row twice for compatibility. + */ + result.addRow(row.getValueList()); + result.addRow(row.getValueList()); + JdbcResultSet resultSet = new JdbcResultSet(conn, null, null, result, -1, false, false, updatable); + resultSet.next(); + return resultSet; + } + + private DbException getErrorExecutingTrigger(Throwable e) { + if (e instanceof DbException) { + return (DbException) e; + } + if (e instanceof SQLException) { + return DbException.convert(e); + } + return DbException.get(ErrorCode.ERROR_EXECUTING_TRIGGER_3, e, getName(), + triggerClassName != null ? triggerClassName : "..source..", e.toString()); + } + + /** + * Returns the trigger type. + * + * @return the trigger type + */ + public int getTypeMask() { + return typeMask; + } + /** * Set the trigger type. * @@ -282,6 +362,10 @@ public void setRowBased(boolean rowBased) { this.rowBased = rowBased; } + public boolean isRowBased() { + return rowBased; + } + public void setQueueSize(int size) { this.queueSize = size; } @@ -302,68 +386,84 @@ public void setOnRollback(boolean onRollback) { this.onRollback = onRollback; } - @Override - public String getDropSQL() { - return null; + public boolean isOnRollback() { + return onRollback; } @Override public String getCreateSQLForCopy(Table targetTable, String quotedName) { - StringBuilder buff = new StringBuilder("CREATE FORCE TRIGGER "); - buff.append(quotedName); + StringBuilder builder = new StringBuilder("CREATE FORCE TRIGGER "); + builder.append(quotedName); if (insteadOf) { - buff.append(" INSTEAD OF "); + builder.append(" INSTEAD OF "); } else if (before) { - buff.append(" BEFORE "); + builder.append(" BEFORE "); } else { - buff.append(" AFTER "); + builder.append(" AFTER "); } - buff.append(getTypeNameList()); - buff.append(" ON ").append(targetTable.getSQL()); + getTypeNameList(builder).append(" ON "); + targetTable.getSQL(builder, DEFAULT_SQL_FLAGS); if (rowBased) { - buff.append(" FOR EACH ROW"); + builder.append(" FOR EACH ROW"); } if (noWait) { - buff.append(" NOWAIT"); + builder.append(" NOWAIT"); } else { - buff.append(" QUEUE ").append(queueSize); + builder.append(" QUEUE ").append(queueSize); } if (triggerClassName != null) { - buff.append(" CALL ").append(Parser.quoteIdentifier(triggerClassName)); + StringUtils.quoteStringSQL(builder.append(" CALL "), triggerClassName); } else { - buff.append(" AS ").append(StringUtils.quoteStringSQL(triggerSource)); + StringUtils.quoteStringSQL(builder.append(" AS "), triggerSource); } - return buff.toString(); + return builder.toString(); } - public String getTypeNameList() { - StatementBuilder buff = new StatementBuilder(); + /** + * Append the trigger types to the given string builder. + * + * @param builder the builder + * @return the passed string builder + */ + public StringBuilder getTypeNameList(StringBuilder builder) { + boolean f = false; if ((typeMask & Trigger.INSERT) != 0) { - buff.appendExceptFirst(", "); - buff.append("INSERT"); + f = true; + builder.append("INSERT"); } if ((typeMask & Trigger.UPDATE) != 0) { - buff.appendExceptFirst(", "); - buff.append("UPDATE"); + if (f) { + builder.append(", "); + } + f = true; + builder.append("UPDATE"); } if ((typeMask & Trigger.DELETE) != 0) { - buff.appendExceptFirst(", "); - buff.append("DELETE"); + if (f) { + builder.append(", "); + } + f = true; + builder.append("DELETE"); } if ((typeMask & Trigger.SELECT) != 0) { - buff.appendExceptFirst(", "); - buff.append("SELECT"); + if (f) { + builder.append(", "); + } + f = true; + builder.append("SELECT"); } if (onRollback) { - buff.appendExceptFirst(", "); - buff.append("ROLLBACK"); + if (f) { + builder.append(", "); + } + builder.append("ROLLBACK"); } - return buff.toString(); + return builder; } @Override public String getCreateSQL() { - return getCreateSQLForCopy(table, getSQL()); + return getCreateSQLForCopy(table, getSQL(DEFAULT_SQL_FLAGS)); } @Override @@ -372,7 +472,7 @@ public int getType() { } @Override - public void removeChildrenAndResources(Session session) { + public void removeChildrenAndResources(SessionLocal session) { table.removeTrigger(this); database.removeMeta(session, getId()); if (triggerCallback != null) { @@ -389,11 +489,6 @@ public void removeChildrenAndResources(Session session) { invalidate(); } - @Override - public void checkRename() { - // nothing to do - } - /** * Get the table of this trigger. * @@ -427,6 +522,7 @@ public String getTriggerSource() { /** * Close the trigger. + * @throws SQLException on failure */ public void close() throws SQLException { if (triggerCallback != null) { diff --git a/h2/src/main/org/h2/schema/UserAggregate.java b/h2/src/main/org/h2/schema/UserAggregate.java new file mode 100644 index 0000000000..45ee8b42df --- /dev/null +++ b/h2/src/main/org/h2/schema/UserAggregate.java @@ -0,0 +1,119 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.schema; + +import java.sql.Connection; +import java.sql.SQLException; + +import org.h2.api.Aggregate; +import org.h2.api.AggregateFunction; +import org.h2.engine.DbObject; +import org.h2.engine.SessionLocal; +import org.h2.message.DbException; +import org.h2.message.Trace; +import org.h2.util.JdbcUtils; +import org.h2.util.StringUtils; +import org.h2.value.DataType; +import org.h2.value.TypeInfo; + +/** + * Represents a user-defined aggregate function. + */ +public final class UserAggregate extends UserDefinedFunction { + + private Class javaClass; + + public UserAggregate(Schema schema, int id, String name, String className, + boolean force) { + super(schema, id, name, Trace.FUNCTION); + this.className = className; + if (!force) { + getInstance(); + } + } + + public Aggregate getInstance() { + if (javaClass == null) { + javaClass = JdbcUtils.loadUserClass(className); + } + Object obj; + try { + obj = javaClass.getDeclaredConstructor().newInstance(); + Aggregate agg; + if (obj instanceof Aggregate) { + agg = (Aggregate) obj; + } else { + agg = new AggregateWrapper((AggregateFunction) obj); + } + return agg; + } catch (Exception e) { + throw DbException.convert(e); + } + } + + @Override + public String getDropSQL() { + StringBuilder builder = new StringBuilder("DROP AGGREGATE IF EXISTS "); + return getSQL(builder, DEFAULT_SQL_FLAGS).toString(); + } + + @Override + public String getCreateSQL() { + StringBuilder builder = new StringBuilder("CREATE FORCE AGGREGATE "); + getSQL(builder, DEFAULT_SQL_FLAGS).append(" FOR "); + return StringUtils.quoteStringSQL(builder, className).toString(); + } + + @Override + public int getType() { + return DbObject.AGGREGATE; + } + + @Override + public synchronized void removeChildrenAndResources(SessionLocal session) { + database.removeMeta(session, getId()); + className = null; + javaClass = null; + invalidate(); + } + + /** + * Wrap {@link AggregateFunction} in order to behave as + * {@link org.h2.api.Aggregate} + **/ + private static class AggregateWrapper implements Aggregate { + private final AggregateFunction aggregateFunction; + + AggregateWrapper(AggregateFunction aggregateFunction) { + this.aggregateFunction = aggregateFunction; + } + + @Override + public void init(Connection conn) throws SQLException { + aggregateFunction.init(conn); + } + + @Override + public int getInternalType(int[] inputTypes) throws SQLException { + int[] sqlTypes = new int[inputTypes.length]; + for (int i = 0; i < inputTypes.length; i++) { + sqlTypes[i] = DataType.convertTypeToSQLType(TypeInfo.getTypeInfo(inputTypes[i])); + } + return DataType.convertSQLTypeToValueType(aggregateFunction.getType(sqlTypes)); + } + + @Override + public void add(Object value) throws SQLException { + aggregateFunction.add(value); + } + + @Override + public Object getResult() throws SQLException { + return aggregateFunction.getResult(); + } + } + +} diff --git a/h2/src/main/org/h2/schema/UserDefinedFunction.java b/h2/src/main/org/h2/schema/UserDefinedFunction.java new file mode 100644 index 0000000000..7a3c6c8954 --- /dev/null +++ b/h2/src/main/org/h2/schema/UserDefinedFunction.java @@ -0,0 +1,36 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.schema; + +import org.h2.message.DbException; +import org.h2.table.Table; + +/** + * User-defined Java function or aggregate function. + */ +public abstract class UserDefinedFunction extends SchemaObject { + + String className; + + UserDefinedFunction(Schema newSchema, int id, String name, int traceModuleId) { + super(newSchema, id, name, traceModuleId); + } + + @Override + public final String getCreateSQLForCopy(Table table, String quotedName) { + throw DbException.getInternalError(toString()); + } + + @Override + public final void checkRename() { + throw DbException.getUnsupportedException("RENAME"); + } + + public final String getJavaClassName() { + return className; + } + +} diff --git a/h2/src/main/org/h2/schema/package.html b/h2/src/main/org/h2/schema/package.html index b073c6eb2a..815a65a659 100644 --- a/h2/src/main/org/h2/schema/package.html +++ b/h2/src/main/org/h2/schema/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/security/AES.java b/h2/src/main/org/h2/security/AES.java index 28d202e581..24a73257f8 100644 --- a/h2/src/main/org/h2/security/AES.java +++ b/h2/src/main/org/h2/security/AES.java @@ -1,10 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; +import org.h2.util.Bits; + /** * An implementation of the AES block cipher algorithm, * also known as Rijndael. Only AES-128 is supported by this class. @@ -135,14 +137,10 @@ public void decrypt(byte[] bytes, int off, int len) { private void encryptBlock(byte[] in, byte[] out, int off) { int[] k = encKey; - int x0 = ((in[off] << 24) | ((in[off + 1] & 255) << 16) - | ((in[off + 2] & 255) << 8) | (in[off + 3] & 255)) ^ k[0]; - int x1 = ((in[off + 4] << 24) | ((in[off + 5] & 255) << 16) - | ((in[off + 6] & 255) << 8) | (in[off + 7] & 255)) ^ k[1]; - int x2 = ((in[off + 8] << 24) | ((in[off + 9] & 255) << 16) - | ((in[off + 10] & 255) << 8) | (in[off + 11] & 255)) ^ k[2]; - int x3 = ((in[off + 12] << 24) | ((in[off + 13] & 255) << 16) - | ((in[off + 14] & 255) << 8) | (in[off + 15] & 255)) ^ k[3]; + int x0 = Bits.readInt(in, off) ^ k[0]; + int x1 = Bits.readInt(in, off + 4) ^ k[1]; + int x2 = Bits.readInt(in, off + 8) ^ k[2]; + int x3 = Bits.readInt(in, off + 12) ^ k[3]; int y0 = FT0[(x0 >> 24) & 255] ^ FT1[(x1 >> 16) & 255] ^ FT2[(x2 >> 8) & 255] ^ FT3[x3 & 255] ^ k[4]; int y1 = FT0[(x1 >> 24) & 255] ^ FT1[(x2 >> 16) & 255] @@ -223,26 +221,18 @@ private void encryptBlock(byte[] in, byte[] out, int off) { | (FS[(y0 >> 8) & 255] << 8) | FS[y1 & 255]) ^ k[42]; x3 = ((FS[(y3 >> 24) & 255] << 24) | (FS[(y0 >> 16) & 255] << 16) | (FS[(y1 >> 8) & 255] << 8) | FS[y2 & 255]) ^ k[43]; - out[off] = (byte) (x0 >> 24); out[off+1] = (byte) (x0 >> 16); - out[off+2] = (byte) (x0 >> 8); out[off+3] = (byte) x0; - out[off+4] = (byte) (x1 >> 24); out[off+5] = (byte) (x1 >> 16); - out[off+6] = (byte) (x1 >> 8); out[off+7] = (byte) x1; - out[off+8] = (byte) (x2 >> 24); out[off+9] = (byte) (x2 >> 16); - out[off+10] = (byte) (x2 >> 8); out[off+11] = (byte) x2; - out[off+12] = (byte) (x3 >> 24); out[off+13] = (byte) (x3 >> 16); - out[off+14] = (byte) (x3 >> 8); out[off+15] = (byte) x3; + Bits.writeInt(out, off, x0); + Bits.writeInt(out, off + 4, x1); + Bits.writeInt(out, off + 8, x2); + Bits.writeInt(out, off + 12, x3); } private void decryptBlock(byte[] in, byte[] out, int off) { int[] k = decKey; - int x0 = ((in[off] << 24) | ((in[off + 1] & 255) << 16) - | ((in[off + 2] & 255) << 8) | (in[off + 3] & 255)) ^ k[0]; - int x1 = ((in[off + 4] << 24) | ((in[off + 5] & 255) << 16) - | ((in[off + 6] & 255) << 8) | (in[off + 7] & 255)) ^ k[1]; - int x2 = ((in[off + 8] << 24) | ((in[off + 9] & 255) << 16) - | ((in[off + 10] & 255) << 8) | (in[off + 11] & 255)) ^ k[2]; - int x3 = ((in[off + 12] << 24) | ((in[off + 13] & 255) << 16) - | ((in[off + 14] & 255) << 8) | (in[off + 15] & 255)) ^ k[3]; + int x0 = Bits.readInt(in, off) ^ k[0]; + int x1 = Bits.readInt(in, off + 4) ^ k[1]; + int x2 = Bits.readInt(in, off + 8) ^ k[2]; + int x3 = Bits.readInt(in, off + 12) ^ k[3]; int y0 = RT0[(x0 >> 24) & 255] ^ RT1[(x3 >> 16) & 255] ^ RT2[(x2 >> 8) & 255] ^ RT3[x1 & 255] ^ k[4]; int y1 = RT0[(x1 >> 24) & 255] ^ RT1[(x0 >> 16) & 255] @@ -323,15 +313,10 @@ private void decryptBlock(byte[] in, byte[] out, int off) { | (RS[(y0 >> 8) & 255] << 8) | RS[y3 & 255]) ^ k[42]; x3 = ((RS[(y3 >> 24) & 255] << 24) | (RS[(y2 >> 16) & 255] << 16) | (RS[(y1 >> 8) & 255] << 8) | RS[y0 & 255]) ^ k[43]; - out[off] = (byte) (x0 >> 24); - out[off + 1] = (byte) (x0 >> 16); - out[off+2] = (byte) (x0 >> 8); out[off+3] = (byte) x0; - out[off+4] = (byte) (x1 >> 24); out[off+5] = (byte) (x1 >> 16); - out[off+6] = (byte) (x1 >> 8); out[off+7] = (byte) x1; - out[off+8] = (byte) (x2 >> 24); out[off+9] = (byte) (x2 >> 16); - out[off+10] = (byte) (x2 >> 8); out[off+11] = (byte) x2; - out[off+12] = (byte) (x3 >> 24); out[off+13] = (byte) (x3 >> 16); - out[off+14] = (byte) (x3 >> 8); out[off+15] = (byte) x3; + Bits.writeInt(out, off, x0); + Bits.writeInt(out, off + 4, x1); + Bits.writeInt(out, off + 8, x2); + Bits.writeInt(out, off + 12, x3); } @Override diff --git a/h2/src/main/org/h2/security/BlockCipher.java b/h2/src/main/org/h2/security/BlockCipher.java index be236ce18b..6e4cca4fab 100644 --- a/h2/src/main/org/h2/security/BlockCipher.java +++ b/h2/src/main/org/h2/security/BlockCipher.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; diff --git a/h2/src/main/org/h2/security/CipherFactory.java b/h2/src/main/org/h2/security/CipherFactory.java index 753db7808b..0477e9afa7 100644 --- a/h2/src/main/org/h2/security/CipherFactory.java +++ b/h2/src/main/org/h2/security/CipherFactory.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; @@ -17,13 +17,18 @@ import java.security.KeyFactory; import java.security.KeyStore; import java.security.PrivateKey; +import java.security.Security; import java.security.cert.Certificate; import java.security.cert.CertificateFactory; import java.security.spec.PKCS8EncodedKeySpec; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; import java.util.Properties; + import javax.net.ServerSocketFactory; import javax.net.ssl.SSLServerSocket; import javax.net.ssl.SSLServerSocketFactory; @@ -33,6 +38,7 @@ import org.h2.api.ErrorCode; import org.h2.engine.SysProperties; import org.h2.message.DbException; +import org.h2.mvstore.DataUtils; import org.h2.store.fs.FileUtils; import org.h2.util.IOUtils; import org.h2.util.StringUtils; @@ -48,6 +54,20 @@ public class CipherFactory { public static final String KEYSTORE_PASSWORD = "h2pass"; + /** + * The security property which can prevent anonymous TLS connections. + * Introduced into Java 6, 7, 8 in updates from July 2015. + */ + public static final String LEGACY_ALGORITHMS_SECURITY_KEY = + "jdk.tls.legacyAlgorithms"; + + /** + * The value of {@value #LEGACY_ALGORITHMS_SECURITY_KEY} security + * property at the time of class initialization. + * Null if it is not set. + */ + public static final String DEFAULT_LEGACY_ALGORITHMS = getLegacyAlgorithmsSilently(); + private static final String KEYSTORE = "~/.h2.keystore"; private static final String KEYSTORE_KEY = @@ -55,6 +75,7 @@ public class CipherFactory { private static final String KEYSTORE_PASSWORD_KEY = "javax.net.ssl.keyStorePassword"; + private CipherFactory() { // utility class } @@ -83,10 +104,10 @@ public static BlockCipher getBlockCipher(String algorithm) { * @param address the address to connect to * @param port the port * @return the socket + * @throws IOException on failure */ public static Socket createSocket(InetAddress address, int port) throws IOException { - Socket socket = null; setKeystore(); SSLSocketFactory f = (SSLSocketFactory) SSLSocketFactory.getDefault(); SSLSocket secureSocket = (SSLSocket) f.createSocket(); @@ -100,22 +121,30 @@ public static Socket createSocket(InetAddress address, int port) secureSocket.getSupportedCipherSuites()); secureSocket.setEnabledCipherSuites(list); } - socket = secureSocket; - return socket; + return secureSocket; } /** - * Create a secure server socket. If a bind address is specified, the socket - * is only bound to this address. + * Create a secure server socket. If a bind address is specified, the + * socket is only bound to this address. + * If h2.enableAnonymousTLS is true, an attempt is made to modify + * the security property jdk.tls.legacyAlgorithms (in newer JVMs) to allow + * anonymous TLS. This system change is effectively permanent for the + * lifetime of the JVM. + * @see #removeAnonFromLegacyAlgorithms() * * @param port the port to listen on * @param bindAddress the address to bind to, or null to bind to all * addresses * @return the server socket + * @throws IOException on failure */ public static ServerSocket createServerSocket(int port, InetAddress bindAddress) throws IOException { ServerSocket socket = null; + if (SysProperties.ENABLE_ANONYMOUS_TLS) { + removeAnonFromLegacyAlgorithms(); + } setKeystore(); ServerSocketFactory f = SSLServerSocketFactory.getDefault(); SSLServerSocket secureSocket; @@ -137,13 +166,102 @@ public static ServerSocket createServerSocket(int port, return socket; } + /** + * Removes DH_anon and ECDH_anon from a comma separated list of ciphers. + * Only the first occurrence is removed. + * If there is nothing to remove, returns the reference to the argument. + * @param list a list of names separated by commas (and spaces) + * @return a new string without DH_anon and ECDH_anon items, + * or the original if none were found + */ + public static String removeDhAnonFromCommaSeparatedList(String list) { + if (list == null) { + return list; + } + List algorithms = new LinkedList<>(Arrays.asList(list.split("\\s*,\\s*"))); + boolean dhAnonRemoved = algorithms.remove("DH_anon"); + boolean ecdhAnonRemoved = algorithms.remove("ECDH_anon"); + if (dhAnonRemoved || ecdhAnonRemoved) { + String string = Arrays.toString(algorithms.toArray(new String[algorithms.size()])); + return (!algorithms.isEmpty()) ? string.substring(1, string.length() - 1): ""; + } + return list; + } + + /** + * Attempts to weaken the security properties to allow anonymous TLS. + * New JREs would not choose an anonymous cipher suite in a TLS handshake + * if server-side security property + * {@value #LEGACY_ALGORITHMS_SECURITY_KEY} + * were not modified from the default value. + *

    + * NOTE: In current (as of 2016) default implementations of JSSE which use + * this security property, the value is permanently cached inside the + * ServerHandshake class upon its first use. + * Therefore the modification accomplished by this method has to be done + * before the first use of a server SSL socket. + * Later changes to this property will not have any effect on server socket + * behavior. + */ + public static synchronized void removeAnonFromLegacyAlgorithms() { + String legacyOriginal = getLegacyAlgorithmsSilently(); + if (legacyOriginal == null) { + return; + } + String legacyNew = removeDhAnonFromCommaSeparatedList(legacyOriginal); + if (!legacyOriginal.equals(legacyNew)) { + setLegacyAlgorithmsSilently(legacyNew); + } + } + + /** + * Attempts to resets the security property to the default value. + * The default value of {@value #LEGACY_ALGORITHMS_SECURITY_KEY} was + * obtained at time of class initialization. + *

    + * NOTE: Resetting the property might not have any effect on server + * socket behavior. + * @see #removeAnonFromLegacyAlgorithms() + */ + public static synchronized void resetDefaultLegacyAlgorithms() { + setLegacyAlgorithmsSilently(DEFAULT_LEGACY_ALGORITHMS); + } + + /** + * Returns the security property {@value #LEGACY_ALGORITHMS_SECURITY_KEY}. + * Ignores security exceptions. + * + * @return the value of the security property, or null if not set + * or not accessible + */ + public static String getLegacyAlgorithmsSilently() { + String defaultLegacyAlgorithms = null; + try { + defaultLegacyAlgorithms = Security.getProperty(LEGACY_ALGORITHMS_SECURITY_KEY); + } catch (SecurityException e) { + // ignore + } + return defaultLegacyAlgorithms; + } + + private static void setLegacyAlgorithmsSilently(String legacyAlgorithms) { + if (legacyAlgorithms == null) { + return; + } + try { + Security.setProperty(LEGACY_ALGORITHMS_SECURITY_KEY, legacyAlgorithms); + } catch (SecurityException e) { + // ignore + } + } + private static byte[] getKeyStoreBytes(KeyStore store, String password) throws IOException { ByteArrayOutputStream bout = new ByteArrayOutputStream(); try { store.store(bout, password.toCharArray()); } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } return bout.toByteArray(); } @@ -153,6 +271,7 @@ private static byte[] getKeyStoreBytes(KeyStore store, String password) * * @param password the keystore password * @return the keystore + * @throws IOException on failure */ public static KeyStore getKeyStore(String password) throws IOException { try { @@ -160,7 +279,7 @@ public static KeyStore getKeyStore(String password) throws IOException { // if you have a keystore file. // This code is (hopefully) more Java version independent // than using keystores directly. See also: - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4887561 + // https://bugs.openjdk.java.net/browse/JDK-4887561 // (1.4.2 cannot read keystore written with 1.4.1) // --- generated code start --- @@ -233,7 +352,7 @@ public static KeyStore getKeyStore(String password) throws IOException { // --- generated code end --- return store; } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } @@ -258,7 +377,7 @@ private static void setKeystore() throws IOException { out.write(data); out.close(); } catch (Exception e) { - throw DbException.convertToIOException(e); + throw DataUtils.convertToIOException(e); } } String absolutePath = FileUtils.toRealPath(fileName); @@ -270,21 +389,19 @@ private static void setKeystore() throws IOException { } private static String[] enableAnonymous(String[] enabled, String[] supported) { - HashSet set = new HashSet(); - Collections.addAll(set, enabled); + LinkedHashSet set = new LinkedHashSet<>(); for (String x : supported) { - if (!x.startsWith("SSL") && - x.indexOf("_anon_") >= 0 && - x.indexOf("_AES_") >= 0 && - x.indexOf("_SHA") >= 0) { + if (!x.startsWith("SSL") && x.contains("_anon_") && + (x.contains("_AES_") || x.contains("_3DES_")) && x.contains("_SHA")) { set.add(x); } } + Collections.addAll(set, enabled); return set.toArray(new String[0]); } private static String[] disableSSL(String[] enabled) { - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); for (String x : enabled) { if (!x.startsWith("SSL")) { set.add(x); diff --git a/h2/src/main/org/h2/security/Fog.java b/h2/src/main/org/h2/security/Fog.java index 74b31dced5..ab5d61fc1b 100644 --- a/h2/src/main/org/h2/security/Fog.java +++ b/h2/src/main/org/h2/security/Fog.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; -import org.h2.util.Utils; +import org.h2.util.Bits; /** * A pseudo-encryption algorithm that makes the data appear to be @@ -31,63 +31,35 @@ public void decrypt(byte[] bytes, int off, int len) { } private void encryptBlock(byte[] in, byte[] out, int off) { - int x0 = (in[off] << 24) | ((in[off+1] & 255) << 16) | - ((in[off+2] & 255) << 8) | (in[off+3] & 255); - int x1 = (in[off+4] << 24) | ((in[off+5] & 255) << 16) | - ((in[off+6] & 255) << 8) | (in[off+7] & 255); - int x2 = (in[off+8] << 24) | ((in[off+9] & 255) << 16) | - ((in[off+10] & 255) << 8) | (in[off+11] & 255); - int x3 = (in[off+12] << 24) | ((in[off+13] & 255) << 16) | - ((in[off+14] & 255) << 8) | (in[off+15] & 255); + int x0 = Bits.readInt(in, off); + int x1 = Bits.readInt(in, off + 4); + int x2 = Bits.readInt(in, off + 8); + int x3 = Bits.readInt(in, off + 12); int k = key; - int s = x1 & 31; - x0 ^= k; - x0 = (x0 << s) | (x0 >>> (32 - s)); - x2 ^= k; - x2 = (x2 << s) | (x2 >>> (32 - s)); - s = x0 & 31; - x1 ^= k; - x1 = (x1 << s) | (x1 >>> (32 - s)); - x3 ^= k; - x3 = (x3 << s) | (x3 >>> (32 - s)); - out[off] = (byte) (x0 >> 24); out[off+1] = (byte) (x0 >> 16); - out[off+2] = (byte) (x0 >> 8); out[off+3] = (byte) x0; - out[off+4] = (byte) (x1 >> 24); out[off+5] = (byte) (x1 >> 16); - out[off+6] = (byte) (x1 >> 8); out[off+7] = (byte) x1; - out[off+8] = (byte) (x2 >> 24); out[off+9] = (byte) (x2 >> 16); - out[off+10] = (byte) (x2 >> 8); out[off+11] = (byte) x2; - out[off+12] = (byte) (x3 >> 24); out[off+13] = (byte) (x3 >> 16); - out[off+14] = (byte) (x3 >> 8); out[off+15] = (byte) x3; + x0 = Integer.rotateLeft(x0 ^ k, x1); + x2 = Integer.rotateLeft(x2 ^ k, x1); + x1 = Integer.rotateLeft(x1 ^ k, x0); + x3 = Integer.rotateLeft(x3 ^ k, x0); + Bits.writeInt(out, off, x0); + Bits.writeInt(out, off + 4, x1); + Bits.writeInt(out, off + 8, x2); + Bits.writeInt(out, off + 12, x3); } private void decryptBlock(byte[] in, byte[] out, int off) { - int x0 = (in[off] << 24) | ((in[off+1] & 255) << 16) | - ((in[off+2] & 255) << 8) | (in[off+3] & 255); - int x1 = (in[off+4] << 24) | ((in[off+5] & 255) << 16) | - ((in[off+6] & 255) << 8) | (in[off+7] & 255); - int x2 = (in[off+8] << 24) | ((in[off+9] & 255) << 16) | - ((in[off+10] & 255) << 8) | (in[off+11] & 255); - int x3 = (in[off+12] << 24) | ((in[off+13] & 255) << 16) | - ((in[off+14] & 255) << 8) | (in[off+15] & 255); + int x0 = Bits.readInt(in, off); + int x1 = Bits.readInt(in, off + 4); + int x2 = Bits.readInt(in, off + 8); + int x3 = Bits.readInt(in, off + 12); int k = key; - int s = 32 - (x0 & 31); - x1 = (x1 << s) | (x1 >>> (32 - s)); - x1 ^= k; - x3 = (x3 << s) | (x3 >>> (32 - s)); - x3 ^= k; - s = 32 - (x1 & 31); - x0 = (x0 << s) | (x0 >>> (32 - s)); - x0 ^= k; - x2 = (x2 << s) | (x2 >>> (32 - s)); - x2 ^= k; - out[off] = (byte) (x0 >> 24); out[off+1] = (byte) (x0 >> 16); - out[off+2] = (byte) (x0 >> 8); out[off+3] = (byte) x0; - out[off+4] = (byte) (x1 >> 24); out[off+5] = (byte) (x1 >> 16); - out[off+6] = (byte) (x1 >> 8); out[off+7] = (byte) x1; - out[off+8] = (byte) (x2 >> 24); out[off+9] = (byte) (x2 >> 16); - out[off+10] = (byte) (x2 >> 8); out[off+11] = (byte) x2; - out[off+12] = (byte) (x3 >> 24); out[off+13] = (byte) (x3 >> 16); - out[off+14] = (byte) (x3 >> 8); out[off+15] = (byte) x3; + x1 = Integer.rotateRight(x1, x0) ^ k; + x3 = Integer.rotateRight(x3, x0) ^ k; + x0 = Integer.rotateRight(x0, x1) ^ k; + x2 = Integer.rotateRight(x2, x1) ^ k; + Bits.writeInt(out, off, x0); + Bits.writeInt(out, off + 4, x1); + Bits.writeInt(out, off + 8, x2); + Bits.writeInt(out, off + 12, x3); } @Override @@ -97,7 +69,7 @@ public int getKeyLength() { @Override public void setKey(byte[] key) { - this.key = (int) Utils.readLong(key, 0); + this.key = (int) Bits.readLong(key, 0); } } diff --git a/h2/src/main/org/h2/security/SHA256.java b/h2/src/main/org/h2/security/SHA256.java index 8d609e6402..1b372893c4 100644 --- a/h2/src/main/org/h2/security/SHA256.java +++ b/h2/src/main/org/h2/security/SHA256.java @@ -1,42 +1,27 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; +import java.security.GeneralSecurityException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.Arrays; +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import org.h2.util.Bits; + /** * This class implements the cryptographic hash function SHA-256. */ public class SHA256 { - /** - * The first 32 bits of the fractional parts of the cube roots of the first - * sixty-four prime numbers. - */ - private static final int[] K = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, - 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, - 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, - 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, - 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, - 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, - 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, - 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, - 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, - 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, - 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, - 0xc67178f2 }; - - private static final int[] HH = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, - 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; - - private final byte[] result = new byte[32]; - private final int[] w = new int[64]; - private final int[] hh = new int[8]; + private SHA256() { + } /** * Calculate the hash code by using the given salt. The salt is appended @@ -93,44 +78,20 @@ public static byte[] getKeyPasswordHash(String userName, char[] password) { * @return the hash */ public static byte[] getHMAC(byte[] key, byte[] message) { - key = normalizeKeyForHMAC(key); - int len = message.length; - int byteLen = 64 + Math.max(32, len); - int intLen = getIntCount(byteLen); - byte[] byteBuff = new byte[intLen * 4]; - int[] intBuff = new int[intLen]; - SHA256 sha = new SHA256(); - byte[] iKey = new byte[64 + len]; - byte[] oKey = new byte[64 + 32]; - sha.calculateHMAC(key, message, len, iKey, oKey, byteBuff, intBuff); - return sha.result; - } - - private void calculateHMAC(byte[] key, byte[] message, int len, - byte[] iKey, byte[] oKey, byte[] byteBuff, int[] intBuff) { - Arrays.fill(iKey, 0, 64, (byte) 0x36); - xor(iKey, key, 64); - System.arraycopy(message, 0, iKey, 64, len); - calculateHash(iKey, 64 + len, byteBuff, intBuff); - Arrays.fill(oKey, 0, 64, (byte) 0x5c); - xor(oKey, key, 64); - System.arraycopy(result, 0, oKey, 64, 32); - calculateHash(oKey, 64 + 32, byteBuff, intBuff); + return initMac(key).doFinal(message); } - private static byte[] normalizeKeyForHMAC(byte[] key) { - if (key.length > 64) { - key = getHash(key, false); + private static Mac initMac(byte[] key) { + // Java forbids empty keys + if (key.length == 0) { + key = new byte[1]; } - if (key.length < 64) { - key = Arrays.copyOf(key, 64); - } - return key; - } - - private static void xor(byte[] target, byte[] data, int len) { - for (int i = 0; i < len; i++) { - target[i] ^= data[i]; + try { + Mac mac = Mac.getInstance("HmacSHA256"); + mac.init(new SecretKeySpec(key, "HmacSHA256")); + return mac; + } catch (GeneralSecurityException e) { + throw new RuntimeException(e); } } @@ -146,33 +107,28 @@ private static void xor(byte[] target, byte[] data, int len) { public static byte[] getPBKDF2(byte[] password, byte[] salt, int iterations, int resultLen) { byte[] result = new byte[resultLen]; - byte[] key = normalizeKeyForHMAC(password); - SHA256 sha = new SHA256(); + Mac mac = initMac(password); int len = 64 + Math.max(32, salt.length + 4); byte[] message = new byte[len]; - int intLen = getIntCount(len); - byte[] byteBuff = new byte[intLen * 4]; - int[] intBuff = new int[intLen]; - byte[] iKey = new byte[64 + len]; - byte[] oKey = new byte[64 + 32]; + byte[] macRes = null; for (int k = 1, offset = 0; offset < resultLen; k++, offset += 32) { for (int i = 0; i < iterations; i++) { if (i == 0) { System.arraycopy(salt, 0, message, 0, salt.length); - writeInt(message, salt.length, k); + Bits.writeInt(message, salt.length, k); len = salt.length + 4; } else { - System.arraycopy(sha.result, 0, message, 0, 32); + System.arraycopy(macRes, 0, message, 0, 32); len = 32; } - sha.calculateHMAC(key, message, len, iKey, oKey, byteBuff, intBuff); + mac.update(message, 0, len); + macRes = mac.doFinal(); for (int j = 0; j < 32 && j + offset < resultLen; j++) { - result[j + offset] ^= sha.result[j]; + result[j + offset] ^= macRes[j]; } } } Arrays.fill(password, (byte) 0); - Arrays.fill(key, (byte) 0); return result; } @@ -185,102 +141,16 @@ public static byte[] getPBKDF2(byte[] password, byte[] salt, * @return the hash code */ public static byte[] getHash(byte[] data, boolean nullData) { - int len = data.length; - int intLen = getIntCount(len); - byte[] byteBuff = new byte[intLen * 4]; - int[] intBuff = new int[intLen]; - SHA256 sha = new SHA256(); - sha.calculateHash(data, len, byteBuff, intBuff); + byte[] result; + try { + result = MessageDigest.getInstance("SHA-256").digest(data); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } if (nullData) { - sha.fillWithNull(); - Arrays.fill(intBuff, 0); - Arrays.fill(byteBuff, (byte) 0); Arrays.fill(data, (byte) 0); } - return sha.result; - } - - private static int getIntCount(int byteCount) { - return ((byteCount + 9 + 63) / 64) * 16; - } - - private void fillWithNull() { - Arrays.fill(w, 0); - Arrays.fill(hh, 0); - } - - private void calculateHash(byte[] data, int len, - byte[] byteBuff, int[] intBuff) { - int[] w = this.w; - int[] hh = this.hh; - byte[] result = this.result; - int intLen = getIntCount(len); - System.arraycopy(data, 0, byteBuff, 0, len); - byteBuff[len] = (byte) 0x80; - Arrays.fill(byteBuff, len + 1, intLen * 4, (byte) 0); - for (int i = 0, j = 0; j < intLen; i += 4, j++) { - intBuff[j] = readInt(byteBuff, i); - } - intBuff[intLen - 2] = len >>> 29; - intBuff[intLen - 1] = len << 3; - System.arraycopy(HH, 0, hh, 0, 8); - for (int block = 0; block < intLen; block += 16) { - for (int i = 0; i < 16; i++) { - w[i] = intBuff[block + i]; - } - for (int i = 16; i < 64; i++) { - int x = w[i - 2]; - int theta1 = rot(x, 17) ^ rot(x, 19) ^ (x >>> 10); - x = w[i - 15]; - int theta0 = rot(x, 7) ^ rot(x, 18) ^ (x >>> 3); - w[i] = theta1 + w[i - 7] + theta0 + w[i - 16]; - } - - int a = hh[0], b = hh[1], c = hh[2], d = hh[3]; - int e = hh[4], f = hh[5], g = hh[6], h = hh[7]; - - for (int i = 0; i < 64; i++) { - int t1 = h + (rot(e, 6) ^ rot(e, 11) ^ rot(e, 25)) - + ((e & f) ^ ((~e) & g)) + K[i] + w[i]; - int t2 = (rot(a, 2) ^ rot(a, 13) ^ rot(a, 22)) - + ((a & b) ^ (a & c) ^ (b & c)); - h = g; - g = f; - f = e; - e = d + t1; - d = c; - c = b; - b = a; - a = t1 + t2; - } - hh[0] += a; - hh[1] += b; - hh[2] += c; - hh[3] += d; - hh[4] += e; - hh[5] += f; - hh[6] += g; - hh[7] += h; - } - for (int i = 0; i < 8; i++) { - writeInt(result, i * 4, hh[i]); - } - } - - private static int rot(int i, int count) { - return Integer.rotateRight(i, count); - } - - private static int readInt(byte[] b, int i) { - return ((b[i] & 0xff) << 24) + ((b[i + 1] & 0xff) << 16) - + ((b[i + 2] & 0xff) << 8) + (b[i + 3] & 0xff); - } - - private static void writeInt(byte[] b, int i, int value) { - b[i] = (byte) (value >> 24); - b[i + 1] = (byte) (value >> 16); - b[i + 2] = (byte) (value >> 8); - b[i + 3] = (byte) value; + return result; } } diff --git a/h2/src/main/org/h2/security/SHA3.java b/h2/src/main/org/h2/security/SHA3.java new file mode 100644 index 0000000000..cc22b7bde5 --- /dev/null +++ b/h2/src/main/org/h2/security/SHA3.java @@ -0,0 +1,289 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.security; + +import java.security.MessageDigest; +import java.util.Arrays; + +import org.h2.util.Bits; + +/** + * SHA-3 message digest family. + */ +public final class SHA3 extends MessageDigest { + + private static final long[] ROUND_CONSTANTS; + + static { + long[] rc = new long[24]; + byte l = 1; + for (int i = 0; i < 24; i++) { + rc[i] = 0; + for (int j = 0; j < 7; j++) { + byte t = l; + l = (byte) (t < 0 ? t << 1 ^ 0x71 : t << 1); + if ((t & 1) != 0) { + rc[i] ^= 1L << (1 << j) - 1; + } + } + } + ROUND_CONSTANTS = rc; + } + + /** + * Returns a new instance of SHA3-224 message digest. + * + * @return SHA3-224 message digest + */ + public static SHA3 getSha3_224() { + return new SHA3("SHA3-224", 28); + } + + /** + * Returns a new instance of SHA3-256 message digest. + * + * @return SHA3-256 message digest + */ + public static SHA3 getSha3_256() { + return new SHA3("SHA3-256", 32); + } + + /** + * Returns a new instance of SHA3-384 message digest. + * + * @return SHA3-384 message digest + */ + public static SHA3 getSha3_384() { + return new SHA3("SHA3-384", 48); + } + + /** + * Returns a new instance of SHA3-512 message digest. + * + * @return SHA3-512 message digest + */ + public static SHA3 getSha3_512() { + return new SHA3("SHA3-512", 64); + } + + private final int digestLength; + + private final int rate; + + private long state00, state01, state02, state03, state04, state05, state06, state07, state08, state09, // + state10, state11, state12, state13, state14, state15, state16, state17, state18, state19, // + state20, state21, state22, state23, state24; + + private final byte[] buf; + + private int bufcnt; + + private SHA3(String algorithm, int digestLength) { + super(algorithm); + this.digestLength = digestLength; + buf = new byte[this.rate = 200 - digestLength * 2]; + } + + @Override + protected byte[] engineDigest() { + buf[bufcnt] = 0b110; + Arrays.fill(buf, bufcnt + 1, rate, (byte) 0); + buf[rate - 1] |= 0x80; + absorbQueue(); + byte[] r = new byte[digestLength]; + switch (digestLength) { + case 64: + Bits.writeLongLE(r, 56, state07); + Bits.writeLongLE(r, 48, state06); + //$FALL-THROUGH$ + case 48: + Bits.writeLongLE(r, 40, state05); + Bits.writeLongLE(r, 32, state04); + //$FALL-THROUGH$ + case 32: + Bits.writeLongLE(r, 24, state03); + break; + case 28: + Bits.writeIntLE(r, 24, (int) state03); + } + Bits.writeLongLE(r, 16, state02); + Bits.writeLongLE(r, 8, state01); + Bits.writeLongLE(r, 0, state00); + engineReset(); + return r; + } + + @Override + protected int engineGetDigestLength() { + return digestLength; + } + + @Override + protected void engineReset() { + state24 = state23 = state22 = state21 = state20 // + = state19 = state18 = state17 = state16 = state15 // + = state14 = state13 = state12 = state11 = state10 // + = state09 = state08 = state07 = state06 = state05 // + = state04 = state03 = state02 = state01 = state00 = 0L; + Arrays.fill(buf, (byte) 0); + bufcnt = 0; + } + + @Override + protected void engineUpdate(byte input) { + buf[bufcnt++] = input; + if (bufcnt == rate) { + absorbQueue(); + } + } + + @Override + protected void engineUpdate(byte[] input, int offset, int len) { + while (len > 0) { + if (bufcnt == 0 && len >= rate) { + do { + absorb(input, offset); + offset += rate; + len -= rate; + } while (len >= rate); + } else { + int partialBlock = Math.min(len, rate - bufcnt); + System.arraycopy(input, offset, buf, bufcnt, partialBlock); + bufcnt += partialBlock; + offset += partialBlock; + len -= partialBlock; + if (bufcnt == rate) { + absorbQueue(); + } + } + } + } + + private void absorbQueue() { + absorb(buf, 0); + bufcnt = 0; + } + + private void absorb(byte[] data, int offset) { + /* + * There is no need to copy 25 state* fields into local variables, + * because so large number of local variables only hurts performance. + */ + switch (digestLength) { + case 28: + state17 ^= Bits.readLongLE(data, offset + 136); + //$FALL-THROUGH$ + case 32: + state13 ^= Bits.readLongLE(data, offset + 104); + state14 ^= Bits.readLongLE(data, offset + 112); + state15 ^= Bits.readLongLE(data, offset + 120); + state16 ^= Bits.readLongLE(data, offset + 128); + //$FALL-THROUGH$ + case 48: + state09 ^= Bits.readLongLE(data, offset + 72); + state10 ^= Bits.readLongLE(data, offset + 80); + state11 ^= Bits.readLongLE(data, offset + 88); + state12 ^= Bits.readLongLE(data, offset + 96); + } + state00 ^= Bits.readLongLE(data, offset); + state01 ^= Bits.readLongLE(data, offset + 8); + state02 ^= Bits.readLongLE(data, offset + 16); + state03 ^= Bits.readLongLE(data, offset + 24); + state04 ^= Bits.readLongLE(data, offset + 32); + state05 ^= Bits.readLongLE(data, offset + 40); + state06 ^= Bits.readLongLE(data, offset + 48); + state07 ^= Bits.readLongLE(data, offset + 56); + state08 ^= Bits.readLongLE(data, offset + 64); + for (int i = 0; i < 24; i++) { + long c0 = state00 ^ state05 ^ state10 ^ state15 ^ state20; + long c1 = state01 ^ state06 ^ state11 ^ state16 ^ state21; + long c2 = state02 ^ state07 ^ state12 ^ state17 ^ state22; + long c3 = state03 ^ state08 ^ state13 ^ state18 ^ state23; + long c4 = state04 ^ state09 ^ state14 ^ state19 ^ state24; + long dX = (c1 << 1 | c1 >>> 63) ^ c4; + state00 ^= dX; + state05 ^= dX; + state10 ^= dX; + state15 ^= dX; + state20 ^= dX; + dX = (c2 << 1 | c2 >>> 63) ^ c0; + state01 ^= dX; + state06 ^= dX; + state11 ^= dX; + state16 ^= dX; + state21 ^= dX; + dX = (c3 << 1 | c3 >>> 63) ^ c1; + state02 ^= dX; + state07 ^= dX; + state12 ^= dX; + state17 ^= dX; + state22 ^= dX; + dX = (c4 << 1 | c4 >>> 63) ^ c2; + state03 ^= dX; + state08 ^= dX; + state13 ^= dX; + state18 ^= dX; + state23 ^= dX; + dX = (c0 << 1 | c0 >>> 63) ^ c3; + state04 ^= dX; + state09 ^= dX; + state14 ^= dX; + state19 ^= dX; + state24 ^= dX; + long s00 = state00; + long s01 = state06 << 44 | state06 >>> 20; + long s02 = state12 << 43 | state12 >>> 21; + long s03 = state18 << 21 | state18 >>> 43; + long s04 = state24 << 14 | state24 >>> 50; + long s05 = state03 << 28 | state03 >>> 36; + long s06 = state09 << 20 | state09 >>> 44; + long s07 = state10 << 3 | state10 >>> 61; + long s08 = state16 << 45 | state16 >>> 19; + long s09 = state22 << 61 | state22 >>> 3; + long s10 = state01 << 1 | state01 >>> 63; + long s11 = state07 << 6 | state07 >>> 58; + long s12 = state13 << 25 | state13 >>> 39; + long s13 = state19 << 8 | state19 >>> 56; + long s14 = state20 << 18 | state20 >>> 46; + long s15 = state04 << 27 | state04 >>> 37; + long s16 = state05 << 36 | state05 >>> 28; + long s17 = state11 << 10 | state11 >>> 54; + long s18 = state17 << 15 | state17 >>> 49; + long s19 = state23 << 56 | state23 >>> 8; + long s20 = state02 << 62 | state02 >>> 2; + long s21 = state08 << 55 | state08 >>> 9; + long s22 = state14 << 39 | state14 >>> 25; + long s23 = state15 << 41 | state15 >>> 23; + long s24 = state21 << 2 | state21 >>> 62; + state00 = s00 ^ ~s01 & s02 ^ ROUND_CONSTANTS[i]; + state01 = s01 ^ ~s02 & s03; + state02 = s02 ^ ~s03 & s04; + state03 = s03 ^ ~s04 & s00; + state04 = s04 ^ ~s00 & s01; + state05 = s05 ^ ~s06 & s07; + state06 = s06 ^ ~s07 & s08; + state07 = s07 ^ ~s08 & s09; + state08 = s08 ^ ~s09 & s05; + state09 = s09 ^ ~s05 & s06; + state10 = s10 ^ ~s11 & s12; + state11 = s11 ^ ~s12 & s13; + state12 = s12 ^ ~s13 & s14; + state13 = s13 ^ ~s14 & s10; + state14 = s14 ^ ~s10 & s11; + state15 = s15 ^ ~s16 & s17; + state16 = s16 ^ ~s17 & s18; + state17 = s17 ^ ~s18 & s19; + state18 = s18 ^ ~s19 & s15; + state19 = s19 ^ ~s15 & s16; + state20 = s20 ^ ~s21 & s22; + state21 = s21 ^ ~s22 & s23; + state22 = s22 ^ ~s23 & s24; + state23 = s23 ^ ~s24 & s20; + state24 = s24 ^ ~s20 & s21; + } + } + +} diff --git a/h2/src/main/org/h2/security/SecureFileStore.java b/h2/src/main/org/h2/security/SecureFileStore.java index 88fb54c6c9..2e70aaa14a 100644 --- a/h2/src/main/org/h2/security/SecureFileStore.java +++ b/h2/src/main/org/h2/security/SecureFileStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; @@ -8,6 +8,7 @@ import org.h2.engine.Constants; import org.h2.store.DataHandler; import org.h2.store.FileStore; +import org.h2.util.Bits; import org.h2.util.MathUtils; /** @@ -70,7 +71,7 @@ public void write(byte[] b, int off, int len) { } @Override - protected void readFullyDirect(byte[] b, int off, int len) { + public void readFullyDirect(byte[] b, int off, int len) { super.readFully(b, off, len); pos += len; } @@ -98,15 +99,7 @@ private void xorInitVector(byte[] b, int off, int len, long p) { byte[] iv = bufferForInitVector; while (len > 0) { for (int i = 0; i < Constants.FILE_BLOCK_SIZE; i += 8) { - long block = (p + i) >>> 3; - iv[i] = (byte) (block >> 56); - iv[i + 1] = (byte) (block >> 48); - iv[i + 2] = (byte) (block >> 40); - iv[i + 3] = (byte) (block >> 32); - iv[i + 4] = (byte) (block >> 24); - iv[i + 5] = (byte) (block >> 16); - iv[i + 6] = (byte) (block >> 8); - iv[i + 7] = (byte) block; + Bits.writeLong(iv, i, (p + i) >>> 3); } cipherForInitVector.encrypt(iv, 0, Constants.FILE_BLOCK_SIZE); for (int i = 0; i < Constants.FILE_BLOCK_SIZE; i++) { diff --git a/h2/src/main/org/h2/security/XTEA.java b/h2/src/main/org/h2/security/XTEA.java index a8a3444013..01f2192bf5 100644 --- a/h2/src/main/org/h2/security/XTEA.java +++ b/h2/src/main/org/h2/security/XTEA.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.security; -import org.h2.engine.SysProperties; import org.h2.message.DbException; +import org.h2.util.Bits; /** * An implementation of the XTEA block cipher algorithm. @@ -25,9 +25,8 @@ public class XTEA implements BlockCipher { @Override public void setKey(byte[] b) { int[] key = new int[4]; - for (int i = 0; i < 16;) { - key[i / 4] = (b[i++] << 24) + ((b[i++] & 255) << 16) - + ((b[i++] & 255) << 8) + (b[i++] & 255); + for (int i = 0; i < 16; i += 4) { + key[i / 4] = Bits.readInt(b, i); } int[] r = new int[32]; for (int i = 0, sum = 0; i < 32;) { @@ -47,10 +46,8 @@ public void setKey(byte[] b) { @Override public void encrypt(byte[] bytes, int off, int len) { - if (SysProperties.CHECK) { - if (len % ALIGN != 0) { - DbException.throwInternalError("unaligned len " + len); - } + if (len % ALIGN != 0) { + throw DbException.getInternalError("unaligned len " + len); } for (int i = off; i < off + len; i += 8) { encryptBlock(bytes, bytes, i); @@ -59,10 +56,8 @@ public void encrypt(byte[] bytes, int off, int len) { @Override public void decrypt(byte[] bytes, int off, int len) { - if (SysProperties.CHECK) { - if (len % ALIGN != 0) { - DbException.throwInternalError("unaligned len " + len); - } + if (len % ALIGN != 0) { + throw DbException.getInternalError("unaligned len " + len); } for (int i = off; i < off + len; i += 8) { decryptBlock(bytes, bytes, i); @@ -70,10 +65,8 @@ public void decrypt(byte[] bytes, int off, int len) { } private void encryptBlock(byte[] in, byte[] out, int off) { - int y = (in[off] << 24) | ((in[off + 1] & 255) << 16) - | ((in[off + 2] & 255) << 8) | (in[off + 3] & 255); - int z = (in[off + 4] << 24) | ((in[off + 5] & 255) << 16) - | ((in[off + 6] & 255) << 8) | (in[off + 7] & 255); + int y = Bits.readInt(in, off); + int z = Bits.readInt(in, off + 4); y += (((z << 4) ^ (z >>> 5)) + z) ^ k0; z += (((y >>> 5) ^ (y << 4)) + y) ^ k1; y += (((z << 4) ^ (z >>> 5)) + z) ^ k2; @@ -106,21 +99,13 @@ private void encryptBlock(byte[] in, byte[] out, int off) { z += (((y >>> 5) ^ (y << 4)) + y) ^ k29; y += (((z << 4) ^ (z >>> 5)) + z) ^ k30; z += (((y >>> 5) ^ (y << 4)) + y) ^ k31; - out[off] = (byte) (y >> 24); - out[off + 1] = (byte) (y >> 16); - out[off + 2] = (byte) (y >> 8); - out[off + 3] = (byte) y; - out[off + 4] = (byte) (z >> 24); - out[off + 5] = (byte) (z >> 16); - out[off + 6] = (byte) (z >> 8); - out[off + 7] = (byte) z; + Bits.writeInt(out, off, y); + Bits.writeInt(out, off + 4, z); } private void decryptBlock(byte[] in, byte[] out, int off) { - int y = (in[off] << 24) | ((in[off + 1] & 255) << 16) - | ((in[off + 2] & 255) << 8) | (in[off + 3] & 255); - int z = (in[off + 4] << 24) | ((in[off + 5] & 255) << 16) - | ((in[off + 6] & 255) << 8) | (in[off + 7] & 255); + int y = Bits.readInt(in, off); + int z = Bits.readInt(in, off + 4); z -= (((y >>> 5) ^ (y << 4)) + y) ^ k31; y -= (((z << 4) ^ (z >>> 5)) + z) ^ k30; z -= (((y >>> 5) ^ (y << 4)) + y) ^ k29; @@ -153,14 +138,8 @@ private void decryptBlock(byte[] in, byte[] out, int off) { y -= (((z << 4) ^ (z >>> 5)) + z) ^ k2; z -= (((y >>> 5) ^ (y << 4)) + y) ^ k1; y -= (((z << 4) ^ (z >>> 5)) + z) ^ k0; - out[off] = (byte) (y >> 24); - out[off + 1] = (byte) (y >> 16); - out[off + 2] = (byte) (y >> 8); - out[off + 3] = (byte) y; - out[off + 4] = (byte) (z >> 24); - out[off + 5] = (byte) (z >> 16); - out[off + 6] = (byte) (z >> 8); - out[off + 7] = (byte) z; + Bits.writeInt(out, off, y); + Bits.writeInt(out, off + 4, z); } @Override diff --git a/h2/src/main/org/h2/security/auth/AuthConfigException.java b/h2/src/main/org/h2/security/auth/AuthConfigException.java new file mode 100644 index 0000000000..6135f6d590 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/AuthConfigException.java @@ -0,0 +1,30 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +/** + * Exception thrown when an issue occurs during the authentication configuration + * + */ +public class AuthConfigException extends RuntimeException { + private static final long serialVersionUID = 1L; + + public AuthConfigException() { + super(); + } + + public AuthConfigException(String message) { + super(message); + } + + public AuthConfigException(Throwable cause) { + super(cause); + } + + public AuthConfigException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/h2/src/main/org/h2/security/auth/AuthenticationException.java b/h2/src/main/org/h2/security/auth/AuthenticationException.java new file mode 100644 index 0000000000..df054b2b56 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/AuthenticationException.java @@ -0,0 +1,30 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +/** + * Exception thrown in case of errors during authentication + */ +public class AuthenticationException extends Exception { + private static final long serialVersionUID = 1L; + + public AuthenticationException() { + super(); + } + + public AuthenticationException(String message) { + super(message); + } + + public AuthenticationException(Throwable cause) { + super(cause); + } + + public AuthenticationException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/h2/src/main/org/h2/security/auth/AuthenticationInfo.java b/h2/src/main/org/h2/security/auth/AuthenticationInfo.java new file mode 100644 index 0000000000..ab9ecfd9cf --- /dev/null +++ b/h2/src/main/org/h2/security/auth/AuthenticationInfo.java @@ -0,0 +1,89 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +import org.h2.engine.ConnectionInfo; +import org.h2.util.StringUtils; + +/** + * Input data for authenticators; it wraps ConnectionInfo + */ +public class AuthenticationInfo { + + private ConnectionInfo connectionInfo; + + private String password; + + private String realm; + + /** + * Can be used by authenticator to hold information. + */ + Object nestedIdentity; + + public AuthenticationInfo(ConnectionInfo connectionInfo) { + this.connectionInfo = connectionInfo; + this.realm = connectionInfo.getProperty("AUTHREALM", null); + if (this.realm != null) { + this.realm = StringUtils.toUpperEnglish(this.realm); + } + this.password = connectionInfo.getProperty("AUTHZPWD", null); + } + + public String getUserName() { + return connectionInfo.getUserName(); + } + + public String getRealm() { + return realm; + } + + public String getPassword() { + return password; + } + + public ConnectionInfo getConnectionInfo() { + return connectionInfo; + } + + public String getFullyQualifiedName() { + if (realm == null) { + return connectionInfo.getUserName(); + } else { + return connectionInfo.getUserName() + "@" + realm; + } + } + + /** + * Gets nested identity object that can be used by authenticator to hold information. + * + * @return nested identity object. + */ + public Object getNestedIdentity() { + return nestedIdentity; + } + + /** + * Method used by authenticators to hold information about authenticated + * user + * + * @param nestedIdentity + * = nested identity object + */ + public void setNestedIdentity(Object nestedIdentity) { + this.nestedIdentity = nestedIdentity; + } + + /** + * Clean authentication data. + */ + public void clean() { + this.password = null; + this.nestedIdentity = null; + connectionInfo.cleanAuthenticationInfo(); + } + +} diff --git a/h2/src/main/org/h2/security/auth/Authenticator.java b/h2/src/main/org/h2/security/auth/Authenticator.java new file mode 100644 index 0000000000..c5ea0b1b73 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/Authenticator.java @@ -0,0 +1,34 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +import org.h2.engine.Database; +import org.h2.engine.User; + +/** + * Low level interface to implement full authentication process. + */ +public interface Authenticator { + + /** + * Perform user authentication. + * + * @param authenticationInfo authentication info. + * @param database target database instance. + * @return valid database user or null if user doesn't exists in the + * database + * @throws AuthenticationException on failure + */ + User authenticate(AuthenticationInfo authenticationInfo, Database database) throws AuthenticationException; + + /** + * Initialize the authenticator. This method is invoked by databases when + * the authenticator is set when the authenticator is set. + * + * @param database target database + */ + void init(Database database) throws AuthConfigException; +} diff --git a/h2/src/main/org/h2/security/auth/AuthenticatorFactory.java b/h2/src/main/org/h2/security/auth/AuthenticatorFactory.java new file mode 100644 index 0000000000..c099ac5a1d --- /dev/null +++ b/h2/src/main/org/h2/security/auth/AuthenticatorFactory.java @@ -0,0 +1,20 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +/** + * Authenticator factory + */ +public class AuthenticatorFactory { + + /** + * Factory method. + * @return authenticator instance. + */ + public static Authenticator createAuthenticator() { + return DefaultAuthenticator.getInstance(); + } +} diff --git a/h2/src/main/org/h2/security/auth/ConfigProperties.java b/h2/src/main/org/h2/security/auth/ConfigProperties.java new file mode 100644 index 0000000000..0dc19bf20d --- /dev/null +++ b/h2/src/main/org/h2/security/auth/ConfigProperties.java @@ -0,0 +1,115 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; + +import org.h2.util.Utils; + +/** + * wrapper for configuration properties + */ +public class ConfigProperties { + + private HashMap properties; + + public ConfigProperties() { + properties = new HashMap<>(); + } + + public ConfigProperties(PropertyConfig... configProperties) { + this(configProperties == null ? null : Arrays.asList(configProperties)); + } + + public ConfigProperties(Collection configProperties) { + properties = new HashMap<>(); + if (configProperties != null) { + for (PropertyConfig currentProperty : configProperties) { + if (properties.putIfAbsent(currentProperty.getName(), currentProperty.getValue()) != null) { + throw new AuthConfigException("duplicate property " + currentProperty.getName()); + } + } + } + } + + /** + * Returns the string value of specified property. + * + * @param name property name. + * @param defaultValue default value. + * @return the string property value or {@code defaultValue} if the property is missing. + */ + public String getStringValue(String name, String defaultValue) { + String result = properties.get(name); + if (result == null) { + return defaultValue; + } + return result; + } + + /** + * Returns the string value of specified property. + * + * @param name property name. + * @return the string property value. + * @throws AuthConfigException if the property is missing. + */ + public String getStringValue(String name) { + String result = properties.get(name); + if (result == null) { + throw new AuthConfigException("missing config property " + name); + } + return result; + } + + /** + * Returns the integer value of specified property. + * + * @param name property name. + * @param defaultValue default value. + * @return the integer property value or {@code defaultValue} if the property is missing. + */ + public int getIntValue(String name, int defaultValue) { + String result = properties.get(name); + if (result == null) { + return defaultValue; + } + return Integer.parseInt(result); + } + + /** + * Returns the integer value of specified property. + * + * @param name property name. + * @return the integer property value. + * @throws AuthConfigException if the property is missing. + */ + public int getIntValue(String name) { + String result = properties.get(name); + if (result == null) { + throw new AuthConfigException("missing config property " + name); + } + return Integer.parseInt(result); + } + + /** + * Returns the boolean value of specified property. + * + * @param name property name. + * @param defaultValue default value. + * @return the boolean property value or {@code defaultValue} if the property is missing. + */ + public boolean getBooleanValue(String name, boolean defaultValue) { + String result = properties.get(name); + if (result == null) { + return defaultValue; + } + return Utils.parseBoolean(result, defaultValue, true); + } + +} diff --git a/h2/src/main/org/h2/security/auth/Configurable.java b/h2/src/main/org/h2/security/auth/Configurable.java new file mode 100644 index 0000000000..56191e1b65 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/Configurable.java @@ -0,0 +1,17 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +/** + * describe how to perform objects runtime configuration + */ +public interface Configurable { + /** + * configure the component + * @param configProperties = configuration properties + */ + void configure(ConfigProperties configProperties); +} diff --git a/h2/src/main/org/h2/security/auth/DefaultAuthenticator.java b/h2/src/main/org/h2/security/auth/DefaultAuthenticator.java new file mode 100644 index 0000000000..052270ef17 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/DefaultAuthenticator.java @@ -0,0 +1,364 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +import java.io.IOException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import javax.xml.parsers.ParserConfigurationException; +import org.h2.api.CredentialsValidator; +import org.h2.api.UserToRolesMapper; +import org.h2.engine.Database; +import org.h2.engine.Right; +import org.h2.engine.Role; +import org.h2.engine.SysProperties; +import org.h2.engine.User; +import org.h2.engine.UserBuilder; +import org.h2.message.Trace; +import org.h2.security.auth.impl.AssignRealmNameRole; +import org.h2.security.auth.impl.JaasCredentialsValidator; +import org.h2.util.StringUtils; +import org.xml.sax.SAXException; + +/** + * Default authenticator implementation. + *

    + * When client connectionInfo contains property AUTHREALM={realName} credentials + * (typically user id and password) are validated by + * {@link org.h2.api.CredentialsValidator} configured for that realm. + *

    + *

    + * When client connectionInfo doesn't contains AUTHREALM property credentials + * are validated internally on the database + *

    + *

    + * Rights assignment can be managed through {@link org.h2.api.UserToRolesMapper} + *

    + *

    + * Default configuration has a realm H2 that validate credentials through JAAS + * api (appName=h2). To customize configuration set h2.authConfigFile system + * property to refer a valid h2auth.xml config file + *

    + */ +public class DefaultAuthenticator implements Authenticator { + + public static final String DEFAULT_REALMNAME = "H2"; + + private Map realms = new HashMap<>(); + private List userToRolesMappers = new ArrayList<>(); + private boolean allowUserRegistration; + private boolean persistUsers; + private boolean createMissingRoles; + private boolean skipDefaultInitialization; + private boolean initialized; + private static DefaultAuthenticator instance; + + protected static final DefaultAuthenticator getInstance() { + if (instance == null) { + instance = new DefaultAuthenticator(); + } + return instance; + } + + /** + * Create the Authenticator with default configurations + */ + public DefaultAuthenticator() { + } + + /** + * Create authenticator and optionally skip the default configuration. This + * option is useful when the authenticator is configured at code level + * + * @param skipDefaultInitialization + * if true default initialization is skipped + */ + public DefaultAuthenticator(boolean skipDefaultInitialization) { + this.skipDefaultInitialization = skipDefaultInitialization; + } + + /** + * If set save users externals defined during the authentication. + * + * @return {@code true} if user will be persisted, + * otherwise returns {@code false} + */ + public boolean isPersistUsers() { + return persistUsers; + } + + /** + * If set to {@code true} saves users externals defined during the authentication. + * + * @param persistUsers {@code true} if user will be persisted, + * otherwise {@code false}. + */ + public void setPersistUsers(boolean persistUsers) { + this.persistUsers = persistUsers; + } + + /** + * If set create external users in the database if not present. + * + * @return {@code true} if creation external user is allowed, + * otherwise returns {@code false} + */ + public boolean isAllowUserRegistration() { + return allowUserRegistration; + } + + /** + * If set to{@code true} creates external users in the database if not present. + * + * @param allowUserRegistration {@code true} if creation external user is allowed, + * otherwise returns {@code false} + */ + public void setAllowUserRegistration(boolean allowUserRegistration) { + this.allowUserRegistration = allowUserRegistration; + } + + /** + * When set create roles not found in the database. If not set roles not + * found in the database are silently skipped. + * + * @return {@code true} if not found roles will be created, + * {@code false} roles are silently skipped. + */ + public boolean isCreateMissingRoles() { + return createMissingRoles; + } + + /** + * Sets the flag that define behavior in case external roles not found in the database. + * + * + * @param createMissingRoles when is {@code true} not found roles are created, + * when is {@code false} roles are silently skipped. + */ + public void setCreateMissingRoles(boolean createMissingRoles) { + this.createMissingRoles = createMissingRoles; + } + + /** + * Add an authentication realm. Realms are case insensitive + * + * @param name + * realm name + * @param credentialsValidator + * credentials validator for realm + */ + public void addRealm(String name, CredentialsValidator credentialsValidator) { + realms.put(StringUtils.toUpperEnglish(name), credentialsValidator); + } + + /** + * UserToRoleMappers assign roles to authenticated users + * + * @return current UserToRoleMappers active + */ + public List getUserToRolesMappers() { + return userToRolesMappers; + } + + public void setUserToRolesMappers(UserToRolesMapper... userToRolesMappers) { + List userToRolesMappersList = new ArrayList<>(); + for (UserToRolesMapper current : userToRolesMappers) { + userToRolesMappersList.add(current); + } + this.userToRolesMappers = userToRolesMappersList; + } + + /** + * Initializes the authenticator. + * + * this method is skipped if skipDefaultInitialization is set Order of + * initialization is + *
      + *
    1. Check h2.authConfigFile system property.
    2. + *
    3. Use the default configuration hard coded
    4. + *
    + * + * @param database where authenticator is initialized + */ + @Override + public void init(Database database) throws AuthConfigException { + if (skipDefaultInitialization) { + return; + } + if (initialized) { + return; + } + synchronized (this) { + if (initialized) { + return; + } + Trace trace = database.getTrace(Trace.DATABASE); + URL h2AuthenticatorConfigurationUrl = null; + try { + String configFile = SysProperties.AUTH_CONFIG_FILE; + if (configFile != null) { + if (trace.isDebugEnabled()) { + trace.debug("DefaultAuthenticator.config: configuration read from system property" + + " h2auth.configurationfile={0}", configFile); + } + h2AuthenticatorConfigurationUrl = new URL(configFile); + } + if (h2AuthenticatorConfigurationUrl == null) { + if (trace.isDebugEnabled()) { + trace.debug("DefaultAuthenticator.config: default configuration"); + } + defaultConfiguration(); + } else { + configureFromUrl(h2AuthenticatorConfigurationUrl); + } + } catch (Exception e) { + trace.error(e, "DefaultAuthenticator.config: an error occurred during configuration from {0} ", + h2AuthenticatorConfigurationUrl); + throw new AuthConfigException( + "Failed to configure authentication from " + h2AuthenticatorConfigurationUrl, e); + } + initialized = true; + } + } + + private void defaultConfiguration() { + createMissingRoles = false; + allowUserRegistration = true; + realms = new HashMap<>(); + CredentialsValidator jaasCredentialsValidator = new JaasCredentialsValidator(); + jaasCredentialsValidator.configure(new ConfigProperties()); + realms.put(DEFAULT_REALMNAME, jaasCredentialsValidator); + UserToRolesMapper assignRealmNameRole = new AssignRealmNameRole(); + assignRealmNameRole.configure(new ConfigProperties()); + userToRolesMappers.add(assignRealmNameRole); + } + + /** + * Configure the authenticator from a configuration file + * + * @param configUrl URL of configuration file + * @throws AuthenticationException on failure + * @throws SAXException on failure + * @throws IOException on failure + * @throws ParserConfigurationException on failure + */ + public void configureFromUrl(URL configUrl) throws AuthenticationException, + SAXException, IOException, ParserConfigurationException { + H2AuthConfig config = H2AuthConfigXml.parseFrom(configUrl); + configureFrom(config); + } + + private void configureFrom(H2AuthConfig config) throws AuthenticationException { + allowUserRegistration = config.isAllowUserRegistration(); + createMissingRoles = config.isCreateMissingRoles(); + HashMap newRealms = new HashMap<>(); + for (RealmConfig currentRealmConfig : config.getRealms()) { + String currentRealmName = currentRealmConfig.getName(); + if (currentRealmName == null) { + throw new AuthenticationException("Missing realm name"); + } + currentRealmName = currentRealmName.toUpperCase(); + CredentialsValidator currentValidator = null; + try { + currentValidator = (CredentialsValidator) Class.forName(currentRealmConfig.getValidatorClass()) + .getDeclaredConstructor().newInstance(); + } catch (Exception e) { + throw new AuthenticationException("invalid validator class fo realm " + currentRealmName, e); + } + currentValidator.configure(new ConfigProperties(currentRealmConfig.getProperties())); + if (newRealms.putIfAbsent(currentRealmConfig.getName().toUpperCase(), currentValidator) != null) { + throw new AuthenticationException("Duplicate realm " + currentRealmConfig.getName()); + } + } + this.realms = newRealms; + List newUserToRolesMapper = new ArrayList<>(); + for (UserToRolesMapperConfig currentUserToRolesMapperConfig : config.getUserToRolesMappers()) { + UserToRolesMapper currentUserToRolesMapper = null; + try { + currentUserToRolesMapper = (UserToRolesMapper) Class + .forName(currentUserToRolesMapperConfig.getClassName()).getDeclaredConstructor().newInstance(); + } catch (Exception e) { + throw new AuthenticationException("Invalid class in UserToRolesMapperConfig", e); + } + currentUserToRolesMapper.configure(new ConfigProperties(currentUserToRolesMapperConfig.getProperties())); + newUserToRolesMapper.add(currentUserToRolesMapper); + } + this.userToRolesMappers = newUserToRolesMapper; + } + + private boolean updateRoles(AuthenticationInfo authenticationInfo, User user, Database database) + throws AuthenticationException { + boolean updatedDb = false; + Set roles = new HashSet<>(); + for (UserToRolesMapper currentUserToRolesMapper : userToRolesMappers) { + Collection currentRoles = currentUserToRolesMapper.mapUserToRoles(authenticationInfo); + if (currentRoles != null && !currentRoles.isEmpty()) { + roles.addAll(currentRoles); + } + } + for (String currentRoleName : roles) { + if (currentRoleName == null || currentRoleName.isEmpty()) { + continue; + } + Role currentRole = database.findRole(currentRoleName); + if (currentRole == null && isCreateMissingRoles()) { + synchronized (database.getSystemSession()) { + currentRole = new Role(database, database.allocateObjectId(), currentRoleName, false); + database.addDatabaseObject(database.getSystemSession(), currentRole); + database.getSystemSession().commit(false); + updatedDb = true; + } + } + if (currentRole == null) { + continue; + } + if (user.getRightForRole(currentRole) == null) { + // NON PERSISTENT + Right currentRight = new Right(database, -1, user, currentRole); + currentRight.setTemporary(true); + user.grantRole(currentRole, currentRight); + } + } + return updatedDb; + } + + @Override + public final User authenticate(AuthenticationInfo authenticationInfo, Database database) + throws AuthenticationException { + String userName = authenticationInfo.getFullyQualifiedName(); + User user = database.findUser(userName); + if (user == null && !isAllowUserRegistration()) { + throw new AuthenticationException("User " + userName + " not found in db"); + } + CredentialsValidator validator = realms.get(authenticationInfo.getRealm()); + if (validator == null) { + throw new AuthenticationException("realm " + authenticationInfo.getRealm() + " not configured"); + } + try { + if (!validator.validateCredentials(authenticationInfo)) { + return null; + } + } catch (Exception e) { + throw new AuthenticationException(e); + } + if (user == null) { + synchronized (database.getSystemSession()) { + user = UserBuilder.buildUser(authenticationInfo, database, isPersistUsers()); + database.addDatabaseObject(database.getSystemSession(), user); + database.getSystemSession().commit(false); + } + } + user.revokeTemporaryRightsOnRoles(); + updateRoles(authenticationInfo, user, database); + return user; + } +} diff --git a/h2/src/main/org/h2/security/auth/H2AuthConfig.java b/h2/src/main/org/h2/security/auth/H2AuthConfig.java new file mode 100644 index 0000000000..9fe168883d --- /dev/null +++ b/h2/src/main/org/h2/security/auth/H2AuthConfig.java @@ -0,0 +1,98 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +import java.util.ArrayList; +import java.util.List; + +/** + * Describe configuration of H2 DefaultAuthenticator. + */ +public class H2AuthConfig { + + private boolean allowUserRegistration=true; + private boolean createMissingRoles=true; + private List realms; + private List userToRolesMappers; + + /** + * Allow user registration flag. If set to {@code true} + * creates external users in the database if not present. + * + * @return {@code true} in case user registration is allowed, + * otherwise returns {@code false}. + */ + public boolean isAllowUserRegistration() { + return allowUserRegistration; + } + + /** + * @param allowUserRegistration Allow user registration flag. + */ + public void setAllowUserRegistration(boolean allowUserRegistration) { + this.allowUserRegistration = allowUserRegistration; + } + + /** + * When set create roles not found in the database. If not set roles not + * found in the database are silently skipped. + * @return {@code true} if the flag is set, otherwise returns {@code false}. + */ + public boolean isCreateMissingRoles() { + return createMissingRoles; + } + + /** + * When set create roles not found in the database. If not set roles not + * found in the database are silently skipped + * @param createMissingRoles missing roles flag. + */ + public void setCreateMissingRoles(boolean createMissingRoles) { + this.createMissingRoles = createMissingRoles; + } + + /** + * Gets configuration of authentication realms. + * + * @return configuration of authentication realms. + */ + public List getRealms() { + if (realms == null) { + realms = new ArrayList<>(); + } + return realms; + } + + /** + * Sets configuration of authentication realms. + * + * @param realms configuration of authentication realms. + */ + public void setRealms(List realms) { + this.realms = realms; + } + + /** + * Gets configuration of the mappers external users to database roles. + * + * @return configuration of the mappers external users to database roles. + */ + public List getUserToRolesMappers() { + if (userToRolesMappers == null) { + userToRolesMappers = new ArrayList<>(); + } + return userToRolesMappers; + } + + /** + * Sets configuration of the mappers external users to database roles. + * + * @param userToRolesMappers configuration of the mappers external users to database roles. + */ + public void setUserToRolesMappers(List userToRolesMappers) { + this.userToRolesMappers = userToRolesMappers; + } +} diff --git a/h2/src/main/org/h2/security/auth/H2AuthConfigXml.java b/h2/src/main/org/h2/security/auth/H2AuthConfigXml.java new file mode 100644 index 0000000000..b1f6888d59 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/H2AuthConfigXml.java @@ -0,0 +1,128 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URL; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.parsers.SAXParser; +import javax.xml.parsers.SAXParserFactory; +import org.xml.sax.Attributes; +import org.xml.sax.SAXException; +import org.xml.sax.helpers.DefaultHandler; + +/** + * Parser of external authentication XML configuration file + */ +public class H2AuthConfigXml extends DefaultHandler{ + + private H2AuthConfig result; + private HasConfigProperties lastConfigProperties; + + @Override + public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { + switch (qName) { + case "h2Auth": + result = new H2AuthConfig(); + result.setAllowUserRegistration("true".equals( + getAttributeValueOr("allowUserRegistration",attributes,"false"))); + result.setCreateMissingRoles("true".equals( + getAttributeValueOr("createMissingRoles",attributes, "true"))); + break; + case "realm": + RealmConfig realmConfig = new RealmConfig(); + realmConfig.setName(getMandatoryAttributeValue("name", attributes)); + realmConfig.setValidatorClass(getMandatoryAttributeValue("validatorClass", attributes)); + result.getRealms().add(realmConfig); + lastConfigProperties=realmConfig; + break; + case "userToRolesMapper": + UserToRolesMapperConfig userToRolesMapperConfig = new UserToRolesMapperConfig(); + userToRolesMapperConfig.setClassName(getMandatoryAttributeValue("className", attributes)); + result.getUserToRolesMappers().add(userToRolesMapperConfig); + lastConfigProperties=userToRolesMapperConfig; + break; + case "property": + if (lastConfigProperties==null) { + throw new SAXException("property element in the wrong place"); + } + lastConfigProperties.getProperties().add(new PropertyConfig( + getMandatoryAttributeValue("name", attributes), + getMandatoryAttributeValue("value", attributes))); + break; + default: + throw new SAXException("unexpected element "+qName); + } + + } + + @Override + public void endElement(String uri, String localName, String qName) throws SAXException { + if (lastConfigProperties!=null && qName.equals("property")==false) { + lastConfigProperties=null; + } + } + + private static String getMandatoryAttributeValue(String attributeName, Attributes attributes) throws SAXException { + String attributeValue=attributes.getValue(attributeName); + if (attributeValue==null || attributeValue.trim().equals("")) { + throw new SAXException("missing attribute "+attributeName); + } + return attributeValue; + + } + + private static String getAttributeValueOr(String attributeName, Attributes attributes, String defaultValue) { + String attributeValue=attributes.getValue(attributeName); + if (attributeValue==null || attributeValue.trim().equals("")) { + return defaultValue; + } + return attributeValue; + } + + /** + * Returns parsed authenticator configuration. + * + * @return Authenticator configuration. + */ + public H2AuthConfig getResult() { + return result; + } + + /** + * Parse the xml. + * + * @param url the source of the xml configuration. + * @return Authenticator configuration. + * @throws ParserConfigurationException if a parser cannot be created. + * @throws SAXException for SAX errors. + * @throws IOException If an I/O error occurs + */ + public static H2AuthConfig parseFrom(URL url) + throws SAXException, IOException, ParserConfigurationException { + try (InputStream inputStream = url.openStream()) { + return parseFrom(inputStream); + } + } + + /** + * Parse the xml. + * + * @param inputStream the source of the xml configuration. + * @return Authenticator configuration. + * @throws ParserConfigurationException if a parser cannot be created. + * @throws SAXException for SAX errors. + * @throws IOException If an I/O error occurs + */ + public static H2AuthConfig parseFrom(InputStream inputStream) + throws SAXException, IOException, ParserConfigurationException { + SAXParser saxParser = SAXParserFactory.newInstance().newSAXParser(); + H2AuthConfigXml xmlHandler = new H2AuthConfigXml(); + saxParser.parse(inputStream, xmlHandler); + return xmlHandler.getResult(); + } +} diff --git a/h2/src/main/org/h2/security/auth/HasConfigProperties.java b/h2/src/main/org/h2/security/auth/HasConfigProperties.java new file mode 100644 index 0000000000..93856bffc0 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/HasConfigProperties.java @@ -0,0 +1,15 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +import java.util.List; + +/** + * Interface for objects with configuration properties. + */ +public interface HasConfigProperties { + List getProperties(); +} diff --git a/h2/src/main/org/h2/security/auth/PropertyConfig.java b/h2/src/main/org/h2/security/auth/PropertyConfig.java new file mode 100644 index 0000000000..2f049cf492 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/PropertyConfig.java @@ -0,0 +1,40 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +/** + * Configuration property + */ +public class PropertyConfig { + + private String name; + + private String value; + + public PropertyConfig() { + } + + public PropertyConfig(String name, String value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } +} diff --git a/h2/src/main/org/h2/security/auth/RealmConfig.java b/h2/src/main/org/h2/security/auth/RealmConfig.java new file mode 100644 index 0000000000..f020fca229 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/RealmConfig.java @@ -0,0 +1,64 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +import java.util.ArrayList; +import java.util.List; + +/** + * Configuration for authentication realm. + */ +public class RealmConfig implements HasConfigProperties { + + private String name; + private String validatorClass; + private List properties; + + /** + * Gets realm's name. + * + * @return realm's name. + */ + public String getName() { + return name; + } + + /** + * Sets realm's name. + * + * @param name realm's name. + */ + public void setName(String name) { + this.name = name; + } + + /** + * Gets validator class name. + * + * @return validator class name. + */ + public String getValidatorClass() { + return validatorClass; + } + + /** + * Sets validator class name. + * + * @param validatorClass validator class name. + */ + public void setValidatorClass(String validatorClass) { + this.validatorClass = validatorClass; + } + + @Override + public List getProperties() { + if (properties == null) { + properties = new ArrayList<>(); + } + return properties; + } + +} diff --git a/h2/src/main/org/h2/security/auth/UserToRolesMapperConfig.java b/h2/src/main/org/h2/security/auth/UserToRolesMapperConfig.java new file mode 100644 index 0000000000..16df852a16 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/UserToRolesMapperConfig.java @@ -0,0 +1,46 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth; + +import java.util.ArrayList; +import java.util.List; + +/** + * Configuration for class that maps users to their roles. + * + * @see org.h2.api.UserToRolesMapper + */ +public class UserToRolesMapperConfig implements HasConfigProperties { + + private String className; + private List properties; + + /** + * @return Mapper class name. + */ + public String getClassName() { + return className; + } + + /** + * @param className mapper class name. + */ + public void setClassName(String className) { + this.className = className; + } + + /** + * @return Mapper properties. + */ + @Override + public List getProperties() { + if (properties == null) { + properties = new ArrayList<>(); + } + return properties; + } + +} diff --git a/h2/src/main/org/h2/security/auth/impl/AssignRealmNameRole.java b/h2/src/main/org/h2/security/auth/impl/AssignRealmNameRole.java new file mode 100644 index 0000000000..825ce3928c --- /dev/null +++ b/h2/src/main/org/h2/security/auth/impl/AssignRealmNameRole.java @@ -0,0 +1,48 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth.impl; + +import java.util.Arrays; +import java.util.Collection; + +import org.h2.api.UserToRolesMapper; +import org.h2.security.auth.AuthenticationException; +import org.h2.security.auth.AuthenticationInfo; +import org.h2.security.auth.ConfigProperties; + +/** + * Assign to user a role based on realm name + * + * *

    + * Configuration parameters: + *

    + *
      + *
    • roleNameFormat, optional by default is @{realm}
    • + *
    + */ +public class AssignRealmNameRole implements UserToRolesMapper{ + + private String roleNameFormat; + + public AssignRealmNameRole() { + this("@%s"); + } + + public AssignRealmNameRole(String roleNameFormat) { + this.roleNameFormat = roleNameFormat; + } + + @Override + public void configure(ConfigProperties configProperties) { + roleNameFormat=configProperties.getStringValue("roleNameFormat",roleNameFormat); + } + + @Override + public Collection mapUserToRoles(AuthenticationInfo authenticationInfo) throws AuthenticationException { + return Arrays.asList(String.format(roleNameFormat, authenticationInfo.getRealm())); + } + +} diff --git a/h2/src/main/org/h2/security/auth/impl/JaasCredentialsValidator.java b/h2/src/main/org/h2/security/auth/impl/JaasCredentialsValidator.java new file mode 100644 index 0000000000..9b43a30f2b --- /dev/null +++ b/h2/src/main/org/h2/security/auth/impl/JaasCredentialsValidator.java @@ -0,0 +1,85 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth.impl; + +import java.io.IOException; + +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.NameCallback; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.auth.login.LoginContext; + +import org.h2.api.CredentialsValidator; +import org.h2.security.auth.AuthenticationInfo; +import org.h2.security.auth.ConfigProperties; + +/** + * Validate credentials by using standard Java Authentication and Authorization Service + * + *

    + * Configuration parameters: + *

    + *
      + *
    • appName inside the JAAS configuration (by default h2)
    • + *
    + * + */ +public class JaasCredentialsValidator implements CredentialsValidator { + + public static final String DEFAULT_APPNAME="h2"; + + private String appName; + + public JaasCredentialsValidator() { + this(DEFAULT_APPNAME); + } + + /** + * Create the validator with the given name of JAAS configuration + * @param appName = name of JAAS configuration + */ + public JaasCredentialsValidator(String appName) { + this.appName=appName; + } + + @Override + public void configure(ConfigProperties configProperties) { + appName=configProperties.getStringValue("appName",appName); + } + + static class AuthenticationInfoCallbackHandler implements CallbackHandler { + + AuthenticationInfo authenticationInfo; + + AuthenticationInfoCallbackHandler(AuthenticationInfo authenticationInfo) { + this.authenticationInfo = authenticationInfo; + } + + @Override + public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { + for (int i = 0; i < callbacks.length; i++) { + if (callbacks[i] instanceof NameCallback) { + ((NameCallback) callbacks[i]).setName(authenticationInfo.getUserName()); + } else if (callbacks[i] instanceof PasswordCallback) { + ((PasswordCallback) callbacks[i]).setPassword(authenticationInfo.getPassword().toCharArray()); + } + } + } + + } + + @Override + public boolean validateCredentials(AuthenticationInfo authenticationInfo) throws Exception { + LoginContext loginContext = new LoginContext(appName, + new AuthenticationInfoCallbackHandler(authenticationInfo)); + loginContext.login(); + authenticationInfo.setNestedIdentity(loginContext.getSubject()); + return true; + } + +} diff --git a/h2/src/main/org/h2/security/auth/impl/LdapCredentialsValidator.java b/h2/src/main/org/h2/security/auth/impl/LdapCredentialsValidator.java new file mode 100644 index 0000000000..e1e85c8222 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/impl/LdapCredentialsValidator.java @@ -0,0 +1,73 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth.impl; + +import java.util.Hashtable; + +import javax.naming.Context; +import javax.naming.directory.DirContext; +import javax.naming.directory.InitialDirContext; + +import org.h2.api.CredentialsValidator; +import org.h2.security.auth.AuthenticationInfo; +import org.h2.security.auth.ConfigProperties; + +/** + * Validate credentials by performing an LDAP bind + *

    + * Configuration parameters: + *

    + *
      + *
    • bindDnPattern bind dn pattern with %u instead of username + * (example: uid=%u,ou=users,dc=example,dc=com)
    • + *
    • host ldap server
    • + *
    • port of ldap service; optional, by default 389 for insecure, 636 for secure
    • + *
    • secure, optional by default is true (use SSL)
    • + *
    + */ +public class LdapCredentialsValidator implements CredentialsValidator { + + private String bindDnPattern; + private String host; + private int port; + private boolean secure; + private String url; + + @Override + public void configure(ConfigProperties configProperties) { + bindDnPattern = configProperties.getStringValue("bindDnPattern"); + host = configProperties.getStringValue("host"); + secure = configProperties.getBooleanValue("secure", true); + port = configProperties.getIntValue("port", secure ? 636 : 389); + url = "ldap" + (secure ? "s" : "") + "://" + host + ":" + port; + } + + @Override + public boolean validateCredentials(AuthenticationInfo authenticationInfo) throws Exception { + DirContext dirContext = null; + try { + String dn=bindDnPattern.replace("%u", authenticationInfo.getUserName()); + Hashtable env = new Hashtable<>(); + env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.ldap.LdapCtxFactory"); + env.put(Context.PROVIDER_URL, url); + env.put(Context.SECURITY_AUTHENTICATION, "simple"); + env.put(Context.SECURITY_PRINCIPAL, dn); + env.put(Context.SECURITY_CREDENTIALS, authenticationInfo.getPassword()); + if (secure) { + env.put(Context.SECURITY_PROTOCOL,"ssl"); + } + dirContext = new InitialDirContext(env); + authenticationInfo.setNestedIdentity(dn); + return true; + } finally { + if (dirContext != null) { + dirContext.close(); + } + } + + } + +} diff --git a/h2/src/main/org/h2/security/auth/impl/StaticRolesMapper.java b/h2/src/main/org/h2/security/auth/impl/StaticRolesMapper.java new file mode 100644 index 0000000000..adbed395ac --- /dev/null +++ b/h2/src/main/org/h2/security/auth/impl/StaticRolesMapper.java @@ -0,0 +1,51 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth.impl; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; + +import org.h2.api.UserToRolesMapper; +import org.h2.security.auth.AuthenticationException; +import org.h2.security.auth.AuthenticationInfo; +import org.h2.security.auth.ConfigProperties; + +/** + * Assign static roles to authenticated users + *

    + * Configuration parameters: + *

    + *
      + *
    • roles role list separated by comma
    • + *
    + * + */ +public class StaticRolesMapper implements UserToRolesMapper { + + private Collection roles; + + public StaticRolesMapper() { + } + + public StaticRolesMapper(String... roles) { + this.roles=Arrays.asList(roles); + } + + @Override + public void configure(ConfigProperties configProperties) { + String rolesString=configProperties.getStringValue("roles", ""); + if (rolesString!=null) { + roles = new HashSet<>(Arrays.asList(rolesString.split(","))); + } + } + + @Override + public Collection mapUserToRoles(AuthenticationInfo authenticationInfo) throws AuthenticationException { + return roles; + } + +} diff --git a/h2/src/main/org/h2/security/auth/impl/StaticUserCredentialsValidator.java b/h2/src/main/org/h2/security/auth/impl/StaticUserCredentialsValidator.java new file mode 100644 index 0000000000..edee8de558 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/impl/StaticUserCredentialsValidator.java @@ -0,0 +1,73 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: Alessandro Ventura + */ +package org.h2.security.auth.impl; + +import java.util.regex.Pattern; + +import org.h2.api.CredentialsValidator; +import org.h2.security.SHA256; +import org.h2.security.auth.AuthenticationException; +import org.h2.security.auth.AuthenticationInfo; +import org.h2.security.auth.ConfigProperties; +import org.h2.util.MathUtils; +import org.h2.util.StringUtils; +import org.h2.util.Utils; + +/** + * This credentials validator matches the user and password with the configured + * Usage should be limited to test purposes + * + */ +public class StaticUserCredentialsValidator implements CredentialsValidator { + + private Pattern userNamePattern; + private String password; + private byte[] salt; + private byte[] hashWithSalt; + + public StaticUserCredentialsValidator() { + } + + public StaticUserCredentialsValidator(String userNamePattern,String password) { + if (userNamePattern!=null) { + this.userNamePattern=Pattern.compile(userNamePattern.toUpperCase()); + } + salt=MathUtils.secureRandomBytes(256); + hashWithSalt=SHA256.getHashWithSalt(password.getBytes(), salt); + } + + @Override + public boolean validateCredentials(AuthenticationInfo authenticationInfo) throws AuthenticationException { + if (userNamePattern!=null) { + if (!userNamePattern.matcher(authenticationInfo.getUserName()).matches()) { + return false; + } + } + if (password!=null) { + return password.equals(authenticationInfo.getPassword()); + } + return Utils.compareSecure(hashWithSalt, + SHA256.getHashWithSalt(authenticationInfo.getPassword().getBytes(), salt)); + } + + @Override + public void configure(ConfigProperties configProperties) { + String userNamePatternString=configProperties.getStringValue("userNamePattern",null); + if (userNamePatternString!=null) { + userNamePattern = Pattern.compile(userNamePatternString); + } + password=configProperties.getStringValue("password",password); + String saltString =configProperties.getStringValue("salt",null); + if (saltString!=null) { + salt=StringUtils.convertHexToBytes(saltString); + } + String hashString=configProperties.getStringValue("hash", null); + if (hashString!=null) { + hashWithSalt = SHA256.getHashWithSalt(StringUtils.convertHexToBytes(hashString), salt); + } + } + +} diff --git a/h2/src/main/org/h2/security/auth/impl/package.html b/h2/src/main/org/h2/security/auth/impl/package.html new file mode 100644 index 0000000000..429db14800 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/impl/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Authentication classes. + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/security/auth/package.html b/h2/src/main/org/h2/security/auth/package.html new file mode 100644 index 0000000000..429db14800 --- /dev/null +++ b/h2/src/main/org/h2/security/auth/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Authentication classes. + +

    \ No newline at end of file diff --git a/h2/src/main/org/h2/security/package.html b/h2/src/main/org/h2/security/package.html index 45a0e10dac..44e27d75a6 100644 --- a/h2/src/main/org/h2/security/package.html +++ b/h2/src/main/org/h2/security/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/Service.java b/h2/src/main/org/h2/server/Service.java index ea0793e342..dfcd8b0ceb 100644 --- a/h2/src/main/org/h2/server/Service.java +++ b/h2/src/main/org/h2/server/Service.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server; @@ -19,6 +19,7 @@ public interface Service { * Initialize the service from command line options. * * @param args the command line options + * @throws Exception on failure */ void init(String... args) throws Exception; @@ -32,6 +33,7 @@ public interface Service { /** * Start the service. This usually means create the server socket. * This method must not block. + * @throws SQLException on failure */ void start() throws SQLException; diff --git a/h2/src/main/org/h2/server/ShutdownHandler.java b/h2/src/main/org/h2/server/ShutdownHandler.java index 91d3d4b27e..49b24d3dbc 100644 --- a/h2/src/main/org/h2/server/ShutdownHandler.java +++ b/h2/src/main/org/h2/server/ShutdownHandler.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server; diff --git a/h2/src/main/org/h2/server/TcpServer.java b/h2/src/main/org/h2/server/TcpServer.java index cb7dde5620..fe90ba41ba 100644 --- a/h2/src/main/org/h2/server/TcpServer.java +++ b/h2/src/main/org/h2/server/TcpServer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server; @@ -9,26 +9,24 @@ import java.net.ServerSocket; import java.net.Socket; import java.net.UnknownHostException; -import java.sql.Connection; -import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; -import java.util.Map; -import java.util.Properties; import java.util.Set; -import org.h2.Driver; +import java.util.concurrent.ConcurrentHashMap; + import org.h2.api.ErrorCode; import org.h2.engine.Constants; +import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; -import org.h2.util.JdbcUtils; +import org.h2.util.MathUtils; import org.h2.util.NetUtils; -import org.h2.util.New; import org.h2.util.StringUtils; import org.h2.util.Tool; +import org.h2.util.Utils10; /** * The TCP server implements the native H2 database server protocol. @@ -49,8 +47,7 @@ public class TcpServer implements Service { */ private static final String MANAGEMENT_DB_PREFIX = "management_db_"; - private static final Map SERVERS = - Collections.synchronizedMap(new HashMap()); + private static final ConcurrentHashMap SERVERS = new ConcurrentHashMap<>(); private int port; private boolean portIsSet; @@ -64,8 +61,8 @@ public class TcpServer implements Service { private String baseDir; private boolean allowOthers; private boolean isDaemon; - private boolean ifExists; - private Connection managementDb; + private boolean ifExists = true; + private JdbcConnection managementDb; private PreparedStatement managementDbAdd; private PreparedStatement managementDbRemove; private String managementPassword = ""; @@ -85,27 +82,23 @@ public static String getManagementDbName(int port) { } private void initManagementDb() throws SQLException { - Properties prop = new Properties(); - prop.setProperty("user", ""); - prop.setProperty("password", managementPassword); + if (managementPassword.isEmpty()) { + managementPassword = StringUtils.convertBytesToHex(MathUtils.secureRandomBytes(32)); + } // avoid using the driver manager - Connection conn = Driver.load().connect("jdbc:h2:" + - getManagementDbName(port), prop); + JdbcConnection conn = new JdbcConnection("jdbc:h2:" + getManagementDbName(port), null, "", managementPassword, + false); managementDb = conn; - Statement stat = null; - try { - stat = conn.createStatement(); - stat.execute("CREATE ALIAS IF NOT EXISTS STOP_SERVER FOR \"" + - TcpServer.class.getName() + ".stopServer\""); + + try (Statement stat = conn.createStatement()) { + stat.execute("CREATE ALIAS IF NOT EXISTS STOP_SERVER FOR '" + TcpServer.class.getName() + ".stopServer'"); stat.execute("CREATE TABLE IF NOT EXISTS SESSIONS" + - "(ID INT PRIMARY KEY, URL VARCHAR, USER VARCHAR, " + - "CONNECTED TIMESTAMP)"); + "(ID INT PRIMARY KEY, URL VARCHAR, `USER` VARCHAR, " + + "CONNECTED TIMESTAMP(9) WITH TIME ZONE)"); managementDbAdd = conn.prepareStatement( - "INSERT INTO SESSIONS VALUES(?, ?, ?, NOW())"); + "INSERT INTO SESSIONS VALUES(?, ?, ?, CURRENT_TIMESTAMP(9))"); managementDbRemove = conn.prepareStatement( "DELETE FROM SESSIONS WHERE ID=?"); - } finally { - JdbcUtils.closeSilently(stat); } SERVERS.put(port, this); } @@ -191,9 +184,10 @@ public void init(String... args) { isDaemon = true; } else if (Tool.isOption(a, "-ifExists")) { ifExists = true; + } else if (Tool.isOption(a, "-ifNotExists")) { + ifExists = false; } } - org.h2.Driver.load(); } @Override @@ -206,6 +200,16 @@ public int getPort() { return port; } + /** + * Returns whether a secure protocol is used. + * + * @return {@code true} if SSL socket is used, {@code false} if plain socket + * is used + */ + public boolean getSSL() { + return ssl; + } + /** * Check if this socket may connect to this server. Remote connections are * not allowed if the flag allowOthers is set. @@ -248,9 +252,11 @@ public void listen() { try { while (!stop) { Socket s = serverSocket.accept(); - TcpServerThread c = new TcpServerThread(s, this, nextThreadId++); + Utils10.setTcpQuickack(s, true); + int id = nextThreadId++; + TcpServerThread c = new TcpServerThread(s, this, id); running.add(c); - Thread thread = new Thread(c, threadName + " thread"); + Thread thread = new Thread(c, threadName + " thread-" + id); thread.setDaemon(isDaemon); c.setThread(thread); thread.start(); @@ -309,7 +315,7 @@ public void stop() { } } // TODO server: using a boolean 'now' argument? a timeout? - for (TcpServerThread c : New.arrayList(running)) { + for (TcpServerThread c : new ArrayList<>(running)) { if (c != null) { c.close(); try { @@ -426,6 +432,7 @@ boolean getIfExists() { * @param force if the server should be stopped immediately * @param all whether all TCP servers that are running in the JVM should be * stopped + * @throws SQLException on failure */ public static synchronized void shutdown(String url, String password, boolean force, boolean all) throws SQLException { @@ -439,17 +446,9 @@ public static synchronized void shutdown(String url, String password, } } String db = getManagementDbName(port); - try { - org.h2.Driver.load(); - } catch (Throwable e) { - throw DbException.convert(e); - } for (int i = 0; i < 2; i++) { - Connection conn = null; - PreparedStatement prep = null; - try { - conn = DriverManager.getConnection("jdbc:h2:" + url + "/" + db, "", password); - prep = conn.prepareStatement("CALL STOP_SERVER(?, ?, ?)"); + try (JdbcConnection conn = new JdbcConnection("jdbc:h2:" + url + '/' + db, null, "", password, true)) { + PreparedStatement prep = conn.prepareStatement("CALL STOP_SERVER(?, ?, ?)"); prep.setInt(1, all ? 0 : port); prep.setString(2, password); prep.setInt(3, force ? SHUTDOWN_FORCE : SHUTDOWN_NORMAL); @@ -469,9 +468,6 @@ public static synchronized void shutdown(String url, String password, if (i == 1) { throw e; } - } finally { - JdbcUtils.closeSilently(prep); - JdbcUtils.closeSilently(conn); } } } catch (Exception e) { @@ -486,7 +482,7 @@ public static synchronized void shutdown(String url, String password, * @param statementId the statement id */ void cancelStatement(String sessionId, int statementId) { - for (TcpServerThread c : New.arrayList(running)) { + for (TcpServerThread c : new ArrayList<>(running)) { if (c != null) { c.cancelStatement(sessionId, statementId); } diff --git a/h2/src/main/org/h2/server/TcpServerThread.java b/h2/src/main/org/h2/server/TcpServerThread.java index f41621be21..82c210f441 100644 --- a/h2/src/main/org/h2/server/TcpServerThread.java +++ b/h2/src/main/org/h2/server/TcpServerThread.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server; @@ -14,30 +14,37 @@ import java.net.Socket; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Objects; import org.h2.api.ErrorCode; import org.h2.command.Command; import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; import org.h2.engine.Engine; +import org.h2.engine.GeneratedKeysMode; import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.engine.SessionRemote; import org.h2.engine.SysProperties; import org.h2.expression.Parameter; import org.h2.expression.ParameterInterface; import org.h2.expression.ParameterRemote; -import org.h2.jdbc.JdbcSQLException; +import org.h2.jdbc.JdbcException; +import org.h2.jdbc.meta.DatabaseMetaServer; import org.h2.message.DbException; import org.h2.result.ResultColumn; import org.h2.result.ResultInterface; +import org.h2.result.ResultWithGeneratedKeys; import org.h2.store.LobStorageInterface; import org.h2.util.IOUtils; +import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.SmallLRUCache; import org.h2.util.SmallMap; -import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; import org.h2.value.Transfer; import org.h2.value.Value; -import org.h2.value.ValueLobDb; +import org.h2.value.ValueLob; /** * One server thread is opened per client connection. @@ -46,7 +53,7 @@ public class TcpServerThread implements Runnable { protected final Transfer transfer; private final TcpServer server; - private Session session; + private SessionLocal session; private boolean stop; private Thread thread; private Command commit; @@ -59,12 +66,12 @@ public class TcpServerThread implements Runnable { private final int threadId; private int clientVersion; private String sessionId; + private long lastRemoteSettingsId; TcpServerThread(Socket socket, TcpServer server, int id) { this.server = server; this.threadId = id; - transfer = new Transfer(null); - transfer.setSocket(socket); + transfer = new Transfer(null, socket); } private void trace(String s) { @@ -79,22 +86,31 @@ public void run() { // TODO server: should support a list of allowed databases // and a list of allowed clients try { + Socket socket = transfer.getSocket(); + if (socket == null) { + // the transfer is already closed, prevent NPE in TcpServer#allow(Socket) + return; + } if (!server.allow(transfer.getSocket())) { throw DbException.get(ErrorCode.REMOTE_CONNECTION_NOT_ALLOWED); } int minClientVersion = transfer.readInt(); - if (minClientVersion < Constants.TCP_PROTOCOL_VERSION_6) { - throw DbException.get(ErrorCode.DRIVER_VERSION_ERROR_2, - "" + clientVersion, "" + Constants.TCP_PROTOCOL_VERSION_6); - } else if (minClientVersion > Constants.TCP_PROTOCOL_VERSION_15) { + if (minClientVersion < 6) { throw DbException.get(ErrorCode.DRIVER_VERSION_ERROR_2, - "" + clientVersion, "" + Constants.TCP_PROTOCOL_VERSION_15); + Integer.toString(minClientVersion), "" + Constants.TCP_PROTOCOL_VERSION_MIN_SUPPORTED); } int maxClientVersion = transfer.readInt(); - if (maxClientVersion >= Constants.TCP_PROTOCOL_VERSION_15) { - clientVersion = Constants.TCP_PROTOCOL_VERSION_15; + if (maxClientVersion < Constants.TCP_PROTOCOL_VERSION_MIN_SUPPORTED) { + throw DbException.get(ErrorCode.DRIVER_VERSION_ERROR_2, + Integer.toString(maxClientVersion), "" + Constants.TCP_PROTOCOL_VERSION_MIN_SUPPORTED); + } else if (minClientVersion > Constants.TCP_PROTOCOL_VERSION_MAX_SUPPORTED) { + throw DbException.get(ErrorCode.DRIVER_VERSION_ERROR_2, + Integer.toString(minClientVersion), "" + Constants.TCP_PROTOCOL_VERSION_MAX_SUPPORTED); + } + if (maxClientVersion >= Constants.TCP_PROTOCOL_VERSION_MAX_SUPPORTED) { + clientVersion = Constants.TCP_PROTOCOL_VERSION_MAX_SUPPORTED; } else { - clientVersion = minClientVersion; + clientVersion = maxClientVersion; } transfer.setVersion(clientVersion); String db = transfer.readString(); @@ -136,29 +152,45 @@ public void run() { ci.setBaseDir(baseDir); } if (server.getIfExists()) { - ci.setProperty("IFEXISTS", "TRUE"); + ci.setProperty("FORBID_CREATION", "TRUE"); } transfer.writeInt(SessionRemote.STATUS_OK); transfer.writeInt(clientVersion); transfer.flush(); - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_13) { - if (ci.getFilePasswordHash() != null) { - ci.setFileEncryptionKey(transfer.readBytes()); - } + if (ci.getFilePasswordHash() != null) { + ci.setFileEncryptionKey(transfer.readBytes()); } - session = Engine.getInstance().createSession(ci); + ci.setNetworkConnectionInfo(new NetworkConnectionInfo( + NetUtils.ipToShortForm(new StringBuilder(server.getSSL() ? "ssl://" : "tcp://"), + socket.getLocalAddress().getAddress(), true) // + .append(':').append(socket.getLocalPort()).toString(), // + socket.getInetAddress().getAddress(), socket.getPort(), + new StringBuilder().append('P').append(clientVersion).toString())); + if (clientVersion < Constants.TCP_PROTOCOL_VERSION_20) { + // For DatabaseMetaData + ci.setProperty("OLD_INFORMATION_SCHEMA", "TRUE"); + // For H2 Console + ci.setProperty("NON_KEYWORDS", "VALUE"); + } + session = Engine.createSession(ci); transfer.setSession(session); server.addConnection(threadId, originalURL, ci.getUserName()); trace("Connected"); + lastRemoteSettingsId = session.getDatabase().getRemoteSettingsId(); + } catch (OutOfMemoryError e) { + // catch this separately otherwise such errors will never hit the console + server.traceError(e); + sendError(e, true); + stop = true; } catch (Throwable e) { - sendError(e); + sendError(e,true); stop = true; } while (!stop) { try { process(); } catch (Throwable e) { - sendError(e); + sendError(e, true); } } trace("Disconnect"); @@ -172,23 +204,12 @@ public void run() { private void closeSession() { if (session != null) { RuntimeException closeError = null; - try { - Command rollback = session.prepareLocal("ROLLBACK"); - rollback.executeUpdate(); - } catch (RuntimeException e) { - closeError = e; - server.traceError(e); - } catch (Exception e) { - server.traceError(e); - } try { session.close(); server.removeConnection(threadId); } catch (RuntimeException e) { - if (closeError == null) { - closeError = e; - server.traceError(e); - } + closeError = e; + server.traceError(e); } catch (Exception e) { server.traceError(e); } finally { @@ -216,7 +237,7 @@ void close() { } } - private void sendError(Throwable t) { + private void sendError(Throwable t, boolean withStatus) { try { SQLException e = DbException.convert(t).getSQLException(); StringWriter writer = new StringWriter(); @@ -224,15 +245,18 @@ private void sendError(Throwable t) { String trace = writer.toString(); String message; String sql; - if (e instanceof JdbcSQLException) { - JdbcSQLException j = (JdbcSQLException) e; + if (e instanceof JdbcException) { + JdbcException j = (JdbcException) e; message = j.getOriginalMessage(); sql = j.getSQL(); } else { message = e.getMessage(); sql = null; } - transfer.writeInt(SessionRemote.STATUS_ERROR). + if (withStatus) { + transfer.writeInt(SessionRemote.STATUS_ERROR); + } + transfer. writeString(e.getSQLState()).writeString(message). writeString(sql).writeInt(e.getErrorCode()).writeString(trace).flush(); } catch (Exception e2) { @@ -249,15 +273,15 @@ private void setParameters(Command command) throws IOException { ArrayList params = command.getParameters(); for (int i = 0; i < len; i++) { Parameter p = (Parameter) params.get(i); - p.setValue(transfer.readValue()); + p.setValue(transfer.readValue(null)); } } private void process() throws IOException { int operation = transfer.readInt(); switch (operation) { - case SessionRemote.SESSION_PREPARE_READ_PARAMS: - case SessionRemote.SESSION_PREPARE: { + case SessionRemote.SESSION_PREPARE: + case SessionRemote.SESSION_PREPARE_READ_PARAMS2: { int id = transfer.readInt(); String sql = transfer.readString(); int old = session.getModificationId(); @@ -265,10 +289,19 @@ private void process() throws IOException { boolean readonly = command.isReadOnly(); cache.addObject(id, command); boolean isQuery = command.isQuery(); - ArrayList params = command.getParameters(); + transfer.writeInt(getState(old)).writeBoolean(isQuery). - writeBoolean(readonly).writeInt(params.size()); - if (operation == SessionRemote.SESSION_PREPARE_READ_PARAMS) { + writeBoolean(readonly); + + if (operation != SessionRemote.SESSION_PREPARE) { + transfer.writeInt(command.getCommandType()); + } + + ArrayList params = command.getParameters(); + + transfer.writeInt(params.size()); + + if (operation != SessionRemote.SESSION_PREPARE) { for (ParameterInterface p : params) { ParameterRemote.writeMetaData(transfer, p); } @@ -288,7 +321,7 @@ private void process() throws IOException { commit = session.prepareLocal("COMMIT"); } int old = session.getModificationId(); - commit.executeUpdate(); + commit.executeUpdate(null); transfer.writeInt(getState(old)).flush(); break; } @@ -300,7 +333,7 @@ private void process() throws IOException { cache.addObject(objectId, result); int columnCount = result.getVisibleColumnCount(); transfer.writeInt(SessionRemote.STATUS_OK). - writeInt(columnCount).writeInt(0); + writeInt(columnCount).writeRowCount(0L); for (int i = 0; i < columnCount; i++) { ResultColumn.writeColumn(transfer, result, i); } @@ -310,7 +343,7 @@ private void process() throws IOException { case SessionRemote.COMMAND_EXECUTE_QUERY: { int id = transfer.readInt(); int objectId = transfer.readInt(); - int maxRows = transfer.readInt(); + long maxRows = transfer.readRowCount(); int fetchSize = transfer.readInt(); Command command = (Command) cache.getObject(id, false); setParameters(command); @@ -323,15 +356,12 @@ private void process() throws IOException { int columnCount = result.getVisibleColumnCount(); int state = getState(old); transfer.writeInt(state).writeInt(columnCount); - int rowCount = result.getRowCount(); - transfer.writeInt(rowCount); + long rowCount = result.isLazy() ? -1L : result.getRowCount(); + transfer.writeRowCount(rowCount); for (int i = 0; i < columnCount; i++) { ResultColumn.writeColumn(transfer, result, i); } - int fetch = Math.min(rowCount, fetchSize); - for (int i = 0; i < fetch; i++) { - sendRow(result); - } + sendRows(result, rowCount >= 0L ? Math.min(rowCount, fetchSize) : fetchSize); transfer.flush(); break; } @@ -339,19 +369,66 @@ private void process() throws IOException { int id = transfer.readInt(); Command command = (Command) cache.getObject(id, false); setParameters(command); + boolean writeGeneratedKeys = true; + Object generatedKeysRequest; + int mode = transfer.readInt(); + switch (mode) { + case GeneratedKeysMode.NONE: + generatedKeysRequest = false; + writeGeneratedKeys = false; + break; + case GeneratedKeysMode.AUTO: + generatedKeysRequest = true; + break; + case GeneratedKeysMode.COLUMN_NUMBERS: { + int len = transfer.readInt(); + int[] keys = new int[len]; + for (int i = 0; i < len; i++) { + keys[i] = transfer.readInt(); + } + generatedKeysRequest = keys; + break; + } + case GeneratedKeysMode.COLUMN_NAMES: { + int len = transfer.readInt(); + String[] keys = new String[len]; + for (int i = 0; i < len; i++) { + keys[i] = transfer.readString(); + } + generatedKeysRequest = keys; + break; + } + default: + throw DbException.get(ErrorCode.CONNECTION_BROKEN_1, + "Unsupported generated keys' mode " + mode); + } int old = session.getModificationId(); - int updateCount; + ResultWithGeneratedKeys result; synchronized (session) { - updateCount = command.executeUpdate(); + result = command.executeUpdate(generatedKeysRequest); } int status; if (session.isClosed()) { status = SessionRemote.STATUS_CLOSED; + stop = true; } else { status = getState(old); } - transfer.writeInt(status).writeInt(updateCount). - writeBoolean(session.getAutoCommit()); + transfer.writeInt(status); + transfer.writeRowCount(result.getUpdateCount()); + transfer.writeBoolean(session.getAutoCommit()); + if (writeGeneratedKeys) { + ResultInterface generatedKeys = result.getGeneratedKeys(); + int columnCount = generatedKeys.getVisibleColumnCount(); + transfer.writeInt(columnCount); + long rowCount = generatedKeys.getRowCount(); + transfer.writeRowCount(rowCount); + for (int i = 0; i < columnCount; i++) { + ResultColumn.writeColumn(transfer, generatedKeys, i); + } + sendRows(generatedKeys, rowCount); + generatedKeys.close(); + } transfer.flush(); break; } @@ -369,9 +446,7 @@ private void process() throws IOException { int count = transfer.readInt(); ResultInterface result = (ResultInterface) cache.getObject(id, false); transfer.writeInt(SessionRemote.STATUS_OK); - for (int i = 0; i < count; i++) { - sendRow(result); - } + sendRows(result, count); transfer.flush(); break; } @@ -400,9 +475,12 @@ private void process() throws IOException { } case SessionRemote.SESSION_SET_ID: { sessionId = transfer.readString(); - transfer.writeInt(SessionRemote.STATUS_OK); - transfer.writeBoolean(session.getAutoCommit()); - transfer.flush(); + if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_20) { + session.setTimeZone(TimeZoneProvider.ofId(transfer.readString())); + } + transfer.writeInt(SessionRemote.STATUS_OK) + .writeBoolean(session.getAutoCommit()) + .flush(); break; } case SessionRemote.SESSION_SET_AUTOCOMMIT: { @@ -418,40 +496,15 @@ private void process() throws IOException { } case SessionRemote.LOB_READ: { long lobId = transfer.readLong(); - byte[] hmac; - CachedInputStream in; - boolean verifyMac; - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_11) { - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_12) { - hmac = transfer.readBytes(); - verifyMac = true; - } else { - hmac = null; - verifyMac = false; - } - in = lobs.get(lobId); - if (in == null && verifyMac) { - in = new CachedInputStream(null); - lobs.put(lobId, in); - } - } else { - verifyMac = false; - hmac = null; - in = lobs.get(lobId); - } + byte[] hmac = transfer.readBytes(); long offset = transfer.readLong(); int length = transfer.readInt(); - if (verifyMac) { - transfer.verifyLobMac(hmac, lobId); - } - if (in == null) { - throw DbException.get(ErrorCode.OBJECT_CLOSED); - } - if (in.getPos() != offset) { + transfer.verifyLobMac(hmac, lobId); + CachedInputStream in = lobs.get(lobId); + if (in == null || in.getPos() != offset) { LobStorageInterface lobStorage = session.getDataHandler().getLobStorage(); // only the lob id is used - ValueLobDb lob = ValueLobDb.create(Value.BLOB, null, -1, lobId, hmac, -1); - InputStream lobIn = lobStorage.getInputStream(lob, hmac, -1); + InputStream lobIn = lobStorage.getInputStream(lobId, -1); in = new CachedInputStream(lobIn); lobs.put(lobId, in); lobIn.skip(offset); @@ -466,47 +519,87 @@ private void process() throws IOException { transfer.flush(); break; } + case SessionRemote.GET_JDBC_META: { + int code = transfer.readInt(); + int length = transfer.readInt(); + Value[] args = new Value[length]; + for (int i = 0; i < length; i++) { + args[i] = transfer.readValue(null); + } + int old = session.getModificationId(); + ResultInterface result; + synchronized (session) { + result = DatabaseMetaServer.process(session, code, args); + } + int columnCount = result.getVisibleColumnCount(); + int state = getState(old); + transfer.writeInt(state).writeInt(columnCount); + long rowCount = result.getRowCount(); + transfer.writeRowCount(rowCount); + for (int i = 0; i < columnCount; i++) { + ResultColumn.writeColumn(transfer, result, i); + } + sendRows(result, rowCount); + transfer.flush(); + break; + } default: trace("Unknown operation: " + operation); - closeSession(); close(); } } private int getState(int oldModificationId) { + if (session == null) { + return SessionRemote.STATUS_CLOSED; + } if (session.getModificationId() == oldModificationId) { - return SessionRemote.STATUS_OK; + long remoteSettingsId = session.getDatabase().getRemoteSettingsId(); + if (lastRemoteSettingsId == remoteSettingsId) { + return SessionRemote.STATUS_OK; + } + lastRemoteSettingsId = remoteSettingsId; } return SessionRemote.STATUS_OK_STATE_CHANGED; } - private void sendRow(ResultInterface result) throws IOException { - if (result.next()) { - transfer.writeBoolean(true); - Value[] v = result.currentRow(); - for (int i = 0; i < result.getVisibleColumnCount(); i++) { - if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_12) { - transfer.writeValue(v[i]); + private void sendRows(ResultInterface result, long count) throws IOException { + int columnCount = result.getVisibleColumnCount(); + boolean lazy = result.isLazy(); + Session oldSession = lazy ? session.setThreadLocalSession() : null; + try { + while (count-- > 0L) { + boolean hasNext; + try { + hasNext = result.next(); + } catch (Exception e) { + transfer.writeByte((byte) -1); + sendError(e, false); + break; + } + if (hasNext) { + transfer.writeByte((byte) 1); + Value[] values = result.currentRow(); + for (int i = 0; i < columnCount; i++) { + Value v = values[i]; + if (lazy && v instanceof ValueLob) { + ValueLob v2 = ((ValueLob) v).copyToResult(); + if (v2 != v) { + v = session.addTemporaryLob(v2); + } + } + transfer.writeValue(v); + } } else { - writeValue(v[i]); + transfer.writeByte((byte) 0); + break; } } - } else { - transfer.writeBoolean(false); - } - } - - private void writeValue(Value v) throws IOException { - if (v.getType() == Value.CLOB || v.getType() == Value.BLOB) { - if (v instanceof ValueLobDb) { - ValueLobDb lob = (ValueLobDb) v; - if (lob.isStored()) { - long id = lob.getLobId(); - lobs.put(id, new CachedInputStream(null)); - } + } finally { + if (lazy) { + session.resetThreadLocalSession(oldSession); } } - transfer.writeValue(v); } void setThread(Thread thread) { @@ -524,7 +617,7 @@ Thread getThread() { * @param statementId the statement to cancel */ void cancelStatement(String targetSessionId, int statementId) { - if (StringUtils.equals(targetSessionId, this.sessionId)) { + if (Objects.equals(targetSessionId, this.sessionId)) { Command cmd = (Command) cache.getObject(statementId, false); cmd.cancel(); } diff --git a/h2/src/main/org/h2/server/package.html b/h2/src/main/org/h2/server/package.html index 15458bcf1c..05dde64b0c 100644 --- a/h2/src/main/org/h2/server/package.html +++ b/h2/src/main/org/h2/server/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/pg/PgServer.java b/h2/src/main/org/h2/server/pg/PgServer.java index 3f86944882..94a59dd41d 100644 --- a/h2/src/main/org/h2/server/pg/PgServer.java +++ b/h2/src/main/org/h2/server/pg/PgServer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.pg; @@ -9,30 +9,25 @@ import java.net.ServerSocket; import java.net.Socket; import java.net.UnknownHostException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.sql.Types; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; - import org.h2.api.ErrorCode; -import org.h2.engine.Constants; import org.h2.message.DbException; import org.h2.server.Service; import org.h2.util.NetUtils; -import org.h2.util.New; import org.h2.util.Tool; +import org.h2.util.Utils10; +import org.h2.value.TypeInfo; +import org.h2.value.Value; /** * This class implements a subset of the PostgreSQL protocol as described here: - * http://developer.postgresql.org/pgdocs/postgres/protocol.html + * https://www.postgresql.org/docs/devel/protocol.html * The PostgreSQL catalog is described here: - * http://www.postgresql.org/docs/7.4/static/catalogs.html + * https://www.postgresql.org/docs/7.4/catalogs.html * * @author Thomas Mueller * @author Sergi Vladykin 2009-07-03 (convertType) @@ -50,11 +45,6 @@ public class PgServer implements Service { */ public static final int PG_TYPE_VARCHAR = 1043; - /** - * The integer array type (for the column pg_index.indkey). - */ - public static final int PG_TYPE_INT2VECTOR = 22; - public static final int PG_TYPE_BOOL = 16; public static final int PG_TYPE_BYTEA = 17; public static final int PG_TYPE_BPCHAR = 1042; @@ -62,17 +52,20 @@ public class PgServer implements Service { public static final int PG_TYPE_INT2 = 21; public static final int PG_TYPE_INT4 = 23; public static final int PG_TYPE_TEXT = 25; - public static final int PG_TYPE_OID = 26; public static final int PG_TYPE_FLOAT4 = 700; public static final int PG_TYPE_FLOAT8 = 701; public static final int PG_TYPE_UNKNOWN = 705; - public static final int PG_TYPE_TEXTARRAY = 1009; + public static final int PG_TYPE_INT2_ARRAY = 1005; + public static final int PG_TYPE_INT4_ARRAY = 1007; + public static final int PG_TYPE_VARCHAR_ARRAY = 1015; public static final int PG_TYPE_DATE = 1082; public static final int PG_TYPE_TIME = 1083; - public static final int PG_TYPE_TIMESTAMP_NO_TMZONE = 1114; + public static final int PG_TYPE_TIMETZ = 1266; + public static final int PG_TYPE_TIMESTAMP = 1114; + public static final int PG_TYPE_TIMESTAMPTZ = 1184; public static final int PG_TYPE_NUMERIC = 1700; - private final HashSet typeSet = New.hashSet(); + private final HashSet typeSet = new HashSet<>(); private int port = PgServer.DEFAULT_PORT; private boolean portIsSet; @@ -85,7 +78,7 @@ public class PgServer implements Service { private String baseDir; private boolean allowOthers; private boolean isDaemon; - private boolean ifExists; + private boolean ifExists = true; private String key, keyDatabase; @Override @@ -106,12 +99,13 @@ public void init(String... args) { isDaemon = true; } else if (Tool.isOption(a, "-ifExists")) { ifExists = true; + } else if (Tool.isOption(a, "-ifNotExists")) { + ifExists = false; } else if (Tool.isOption(a, "-key")) { key = args[++i]; keyDatabase = args[++i]; } } - org.h2.Driver.load(); // int testing; // trace = true; } @@ -198,10 +192,12 @@ public void listen() { trace("Connection not allowed"); s.close(); } else { + Utils10.setTcpQuickack(s, true); PgServerThread c = new PgServerThread(s, this); running.add(c); - c.setProcessId(pid.incrementAndGet()); - Thread thread = new Thread(c, threadName+" thread"); + int id = pid.incrementAndGet(); + c.setProcessId(id); + Thread thread = new Thread(c, threadName + " thread-" + id); thread.setDaemon(isDaemon); c.setThread(thread); thread.start(); @@ -230,7 +226,7 @@ public void stop() { } } // TODO server: using a boolean 'now' argument? a timeout? - for (PgServerThread c : New.arrayList(running)) { + for (PgServerThread c : new ArrayList<>(running)) { c.close(); try { Thread t = c.getThread(); @@ -268,7 +264,7 @@ public boolean isRunning(boolean traceError) { * @return the thread */ PgServerThread getThread(int processId) { - for (PgServerThread c : New.arrayList(running)) { + for (PgServerThread c : new ArrayList<>(running)) { if (c.getProcessId() == processId) { return c; } @@ -300,205 +296,84 @@ boolean getIfExists() { } /** - * The Java implementation of the PostgreSQL function pg_get_indexdef. The - * method is used to get CREATE INDEX command for an index, or the column - * definition of one column in the index. - * - * @param conn the connection - * @param indexId the index id - * @param ordinalPosition the ordinal position (null if the SQL statement - * should be returned) - * @param pretty this flag is ignored - * @return the SQL statement or the column name - */ - public static String getIndexColumn(Connection conn, int indexId, - Integer ordinalPosition, Boolean pretty) throws SQLException { - if (ordinalPosition == null || ordinalPosition.intValue() == 0) { - PreparedStatement prep = conn.prepareStatement( - "select sql from information_schema.indexes where id=?"); - prep.setInt(1, indexId); - ResultSet rs = prep.executeQuery(); - if (rs.next()) { - return rs.getString(1); - } - return ""; - } - PreparedStatement prep = conn.prepareStatement( - "select column_name from information_schema.indexes " + - "where id=? and ordinal_position=?"); - prep.setInt(1, indexId); - prep.setInt(2, ordinalPosition.intValue()); - ResultSet rs = prep.executeQuery(); - if (rs.next()) { - return rs.getString(1); - } - return ""; - } - - /** - * Get the name of the current schema. - * This method is called by the database. - * - * @param conn the connection - * @return the schema name - */ - public static String getCurrentSchema(Connection conn) throws SQLException { - ResultSet rs = conn.createStatement().executeQuery("call schema()"); - rs.next(); - return rs.getString(1); - } - - /** - * Get the OID of an object. This method is called by the database. - * - * @param conn the connection - * @param tableName the table name - * @return the oid - */ - public static int getOid(Connection conn, String tableName) - throws SQLException { - if (tableName.startsWith("\"") && tableName.endsWith("\"")) { - tableName = tableName.substring(1, tableName.length() - 1); - } - PreparedStatement prep = conn.prepareStatement( - "select oid from pg_class where relName = ?"); - prep.setString(1, tableName); - ResultSet rs = prep.executeQuery(); - if (!rs.next()) { - return 0; - } - return rs.getInt(1); - } - - /** - * Get the name of this encoding code. - * This method is called by the database. + * Returns the name of the given type. * - * @param code the encoding code - * @return the encoding name - */ - public static String getEncodingName(int code) { - switch (code) { - case 0: - return "SQL_ASCII"; - case 6: - return "UTF8"; - case 8: - return "LATIN1"; - default: - return code < 40 ? "UTF8" : ""; - } - } - - /** - * Get the version. This method must return PostgreSQL to keep some clients - * happy. This method is called by the database. - * - * @return the server name and version - */ - public static String getVersion() { - return "PostgreSQL 8.1.4 server protocol using H2 " + - Constants.getFullVersion(); - } - - /** - * Get the current system time. - * This method is called by the database. - * - * @return the current system time - */ - public static Timestamp getStartTime() { - return new Timestamp(System.currentTimeMillis()); - } - - /** - * Get the user name for this id. - * This method is called by the database. - * - * @param conn the connection - * @param id the user id - * @return the user name - */ - public static String getUserById(Connection conn, int id) throws SQLException { - PreparedStatement prep = conn.prepareStatement( - "SELECT NAME FROM INFORMATION_SCHEMA.USERS WHERE ID=?"); - prep.setInt(1, id); - ResultSet rs = prep.executeQuery(); - if (rs.next()) { - return rs.getString(1); - } - return null; - } - - /** - * Check if the this session has the given database privilege. - * This method is called by the database. - * - * @param id the session id - * @param privilege the privilege to check - * @return true - */ - public static boolean hasDatabasePrivilege(int id, String privilege) { - return true; - } - - /** - * Check if the current session has access to this table. - * This method is called by the database. - * - * @param table the table name - * @param privilege the privilege to check - * @return true - */ - public static boolean hasTablePrivilege(String table, String privilege) { - return true; - } - - /** - * Get the current transaction id. - * This method is called by the database. - * - * @param table the table name - * @param id the id - * @return 1 - */ - public static int getCurrentTid(String table, String id) { - return 1; - } - - /** - * A fake wrapper around pg_get_expr(expr_text, relation_oid), in PostgreSQL - * it "decompiles the internal form of an expression, assuming that any vars - * in it refer to the relation indicated by the second parameter". - * - * @param exprText the expression text - * @param relationOid the relation object id - * @return always null - */ - public static String getPgExpr(String exprText, int relationOid) { - return null; - } - - /** - * Check if the current session has access to this table. - * This method is called by the database. - * - * @param conn the connection * @param pgType the PostgreSQL type oid - * @param typeMod the type modifier (typically -1) * @return the name of the given type */ - public static String formatType(Connection conn, int pgType, int typeMod) - throws SQLException { - PreparedStatement prep = conn.prepareStatement( - "select typname from pg_catalog.pg_type where oid = ? and typtypmod = ?"); - prep.setInt(1, pgType); - prep.setInt(2, typeMod); - ResultSet rs = prep.executeQuery(); - if (rs.next()) { - return rs.getString(1); + public static String formatType(int pgType) { + int valueType; + switch (pgType) { + case 0: + return "-"; + case PG_TYPE_BOOL: + valueType = Value.BOOLEAN; + break; + case PG_TYPE_BYTEA: + valueType = Value.VARBINARY; + break; + case 18: + return "char"; + case 19: + return "name"; + case PG_TYPE_INT8: + valueType = Value.BIGINT; + break; + case PG_TYPE_INT2: + valueType = Value.SMALLINT; + break; + case 22: + return "int2vector"; + case PG_TYPE_INT4: + valueType = Value.INTEGER; + break; + case 24: + return "regproc"; + case PG_TYPE_TEXT: + valueType = Value.CLOB; + break; + case PG_TYPE_FLOAT4: + valueType = Value.REAL; + break; + case PG_TYPE_FLOAT8: + valueType = Value.DOUBLE; + break; + case PG_TYPE_INT2_ARRAY: + return "smallint[]"; + case PG_TYPE_INT4_ARRAY: + return "integer[]"; + case PG_TYPE_VARCHAR_ARRAY: + return "character varying[]"; + case PG_TYPE_BPCHAR: + valueType = Value.CHAR; + break; + case PG_TYPE_VARCHAR: + valueType = Value.VARCHAR; + break; + case PG_TYPE_DATE: + valueType = Value.DATE; + break; + case PG_TYPE_TIME: + valueType = Value.TIME; + break; + case PG_TYPE_TIMETZ: + valueType = Value.TIME_TZ; + break; + case PG_TYPE_TIMESTAMP: + valueType = Value.TIMESTAMP; + break; + case PG_TYPE_TIMESTAMPTZ: + valueType = Value.TIMESTAMP_TZ; + break; + case PG_TYPE_NUMERIC: + valueType = Value.NUMERIC; + break; + case 2205: + return "regclass"; + default: + return "???"; } - return null; + return Value.getTypeName(valueType); } /** @@ -507,40 +382,56 @@ public static String formatType(Connection conn, int pgType, int typeMod) * @param type the SQL type * @return the PostgreSQL type */ - public static int convertType(final int type) { - switch (type) { - case Types.BOOLEAN: + public static int convertType(TypeInfo type) { + switch (type.getValueType()) { + case Value.BOOLEAN: return PG_TYPE_BOOL; - case Types.VARCHAR: + case Value.VARCHAR: return PG_TYPE_VARCHAR; - case Types.CLOB: + case Value.NULL: + case Value.CLOB: return PG_TYPE_TEXT; - case Types.CHAR: + case Value.CHAR: return PG_TYPE_BPCHAR; - case Types.SMALLINT: + case Value.SMALLINT: return PG_TYPE_INT2; - case Types.INTEGER: + case Value.INTEGER: return PG_TYPE_INT4; - case Types.BIGINT: + case Value.BIGINT: return PG_TYPE_INT8; - case Types.DECIMAL: + case Value.NUMERIC: + case Value.DECFLOAT: return PG_TYPE_NUMERIC; - case Types.REAL: + case Value.REAL: return PG_TYPE_FLOAT4; - case Types.DOUBLE: + case Value.DOUBLE: return PG_TYPE_FLOAT8; - case Types.TIME: + case Value.TIME: return PG_TYPE_TIME; - case Types.DATE: + case Value.TIME_TZ: + return PG_TYPE_TIMETZ; + case Value.DATE: return PG_TYPE_DATE; - case Types.TIMESTAMP: - return PG_TYPE_TIMESTAMP_NO_TMZONE; - case Types.VARBINARY: + case Value.TIMESTAMP: + return PG_TYPE_TIMESTAMP; + case Value.TIMESTAMP_TZ: + return PG_TYPE_TIMESTAMPTZ; + case Value.BINARY: + case Value.VARBINARY: return PG_TYPE_BYTEA; - case Types.BLOB: - return PG_TYPE_OID; - case Types.ARRAY: - return PG_TYPE_TEXTARRAY; + case Value.ARRAY: { + type = (TypeInfo) type.getExtTypeInfo(); + switch (type.getValueType()) { + case Value.SMALLINT: + return PG_TYPE_INT2_ARRAY; + case Value.INTEGER: + return PG_TYPE_INT4_ARRAY; + case Value.VARCHAR: + return PG_TYPE_VARCHAR_ARRAY; + default: + return PG_TYPE_VARCHAR_ARRAY; + } + } default: return PG_TYPE_UNKNOWN; } diff --git a/h2/src/main/org/h2/server/pg/PgServerThread.java b/h2/src/main/org/h2/server/pg/PgServerThread.java index 4a1eb97353..aba652af6a 100644 --- a/h2/src/main/org/h2/server/pg/PgServerThread.java +++ b/h2/src/main/org/h2/server/pg/PgServerThread.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.pg; @@ -12,66 +12,114 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -import java.io.InputStreamReader; import java.io.OutputStream; -import java.io.Reader; import java.io.StringReader; +import java.math.BigDecimal; +import java.math.BigInteger; import java.net.Socket; -import java.sql.Connection; -import java.sql.ParameterMetaData; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Types; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Properties; +import java.util.regex.Pattern; +import org.h2.api.ErrorCode; import org.h2.command.CommandInterface; import org.h2.engine.ConnectionInfo; +import org.h2.engine.Constants; +import org.h2.engine.Database; +import org.h2.engine.Engine; +import org.h2.engine.SessionLocal; import org.h2.engine.SysProperties; -import org.h2.jdbc.JdbcConnection; -import org.h2.jdbc.JdbcPreparedStatement; -import org.h2.jdbc.JdbcStatement; +import org.h2.expression.ParameterInterface; import org.h2.message.DbException; -import org.h2.mvstore.DataUtils; -import org.h2.util.IOUtils; -import org.h2.util.JdbcUtils; +import org.h2.result.ResultInterface; +import org.h2.schema.Schema; +import org.h2.table.Column; +import org.h2.table.Table; +import org.h2.util.DateTimeUtils; import org.h2.util.MathUtils; +import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.ScriptReader; import org.h2.util.StringUtils; +import org.h2.util.TimeZoneProvider; import org.h2.util.Utils; +import org.h2.util.Utils10; import org.h2.value.CaseInsensitiveMap; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; +import org.h2.value.ValueDate; +import org.h2.value.ValueDouble; +import org.h2.value.ValueInteger; +import org.h2.value.ValueNull; +import org.h2.value.ValueReal; +import org.h2.value.ValueSmallint; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; /** * One server thread is opened for each client. */ -public class PgServerThread implements Runnable { +public final class PgServerThread implements Runnable { + + private static final boolean INTEGER_DATE_TYPES = false; + + private static final Pattern SHOULD_QUOTE = Pattern.compile(".*[\",\\\\{}].*"); + + private static String pgTimeZone(String value) { + if (value.startsWith("GMT+")) { + return convertTimeZone(value, "GMT-"); + } else if (value.startsWith("GMT-")) { + return convertTimeZone(value, "GMT+"); + } else if (value.startsWith("UTC+")) { + return convertTimeZone(value, "UTC-"); + } else if (value.startsWith("UTC-")) { + return convertTimeZone(value, "UTC+"); + } else { + return value; + } + } + + private static String convertTimeZone(String value, String prefix) { + int length = value.length(); + return new StringBuilder(length).append(prefix).append(value, 4, length).toString(); + } + private final PgServer server; private Socket socket; - private Connection conn; + private SessionLocal session; private boolean stop; private DataInputStream dataInRaw; private DataInputStream dataIn; private OutputStream out; private int messageType; - private ByteArrayOutputStream outBuffer; + private ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); private DataOutputStream dataOut; private Thread thread; private boolean initDone; private String userName; private String databaseName; private int processId; - private int secret; - private JdbcStatement activeRequest; + private final int secret; + private CommandInterface activeRequest; private String clientEncoding = SysProperties.PG_DEFAULT_CLIENT_ENCODING; - private String dateStyle = "ISO"; + private String dateStyle = "ISO, MDY"; + private TimeZoneProvider timeZone = DateTimeUtils.getTimeZone(); private final HashMap prepared = - new CaseInsensitiveMap(); + new CaseInsensitiveMap<>(); private final HashMap portals = - new CaseInsensitiveMap(); + new CaseInsensitiveMap<>(); PgServerThread(Socket socket, PgServer server) { this.server = server; @@ -109,7 +157,7 @@ private String readString() throws IOException { } buff.write(x); } - return new String(buff.toByteArray(), getEncoding()); + return Utils10.byteArrayOutputStreamToString(buff, getEncoding()); } private int readInt() throws IOException { @@ -141,7 +189,7 @@ private void process() throws IOException { } int len = dataInRaw.readInt(); len -= 4; - byte[] data = DataUtils.newBytes(len); + byte[] data = Utils.newBytes(len); dataInRaw.readFully(data, 0, len); dataIn = new DataInputStream(new ByteArrayInputStream(data, 0, len)); switch (x) { @@ -171,19 +219,40 @@ private void process() throws IOException { " (" + (version >> 16) + "." + (version & 0xff) + ")"); while (true) { String param = readString(); - if (param.length() == 0) { + if (param.isEmpty()) { break; } String value = readString(); - if ("user".equals(param)) { + switch (param) { + case "user": this.userName = value; - } else if ("database".equals(param)) { + break; + case "database": this.databaseName = server.checkKeyAndGetDatabaseName(value); - } else if ("client_encoding".equals(param)) { + break; + case "client_encoding": + // node-postgres will send "'utf-8'" + int length = value.length(); + if (length >= 2 && value.charAt(0) == '\'' + && value.charAt(length - 1) == '\'') { + value = value.substring(1, length - 1); + } // UTF8 clientEncoding = value; - } else if ("DateStyle".equals(param)) { + break; + case "DateStyle": + if (value.indexOf(',') < 0) { + value += ", MDY"; + } dateStyle = value; + break; + case "TimeZone": + try { + timeZone = TimeZoneProvider.ofId(pgTimeZone(value)); + } catch (Exception e) { + server.trace("Unknown TimeZone: " + value); + } + break; } // extra_float_digits 2 // geqo on (Genetic Query Optimization) @@ -199,10 +268,10 @@ private void process() throws IOException { try { Properties info = new Properties(); info.put("MODE", "PostgreSQL"); - info.put("USER", userName); - info.put("PASSWORD", password); + info.put("DATABASE_TO_LOWER", "TRUE"); + info.put("DEFAULT_NULL_ORDERING", "HIGH"); String url = "jdbc:h2:" + databaseName; - ConnectionInfo ci = new ConnectionInfo(url, info); + ConnectionInfo ci = new ConnectionInfo(url, info, userName, password); String baseDir = server.getBaseDir(); if (baseDir == null) { baseDir = SysProperties.getBaseDir(); @@ -211,12 +280,14 @@ private void process() throws IOException { ci.setBaseDir(baseDir); } if (server.getIfExists()) { - ci.setProperty("IFEXISTS", "TRUE"); + ci.setProperty("FORBID_CREATION", "TRUE"); } - conn = new JdbcConnection(ci, false); - // can not do this because when called inside - // DriverManager.getConnection, a deadlock occurs - // conn = DriverManager.getConnection(url, userName, password); + ci.setNetworkConnectionInfo(new NetworkConnectionInfo( // + NetUtils.ipToShortForm(new StringBuilder("pg://"), // + socket.getLocalAddress().getAddress(), true) // + .append(':').append(socket.getLocalPort()).toString(), // + socket.getInetAddress().getAddress(), socket.getPort(), null)); + session = Engine.createSession(ci); initDb(); sendAuthenticationOk(); } catch (Exception e) { @@ -230,15 +301,29 @@ private void process() throws IOException { Prepared p = new Prepared(); p.name = readString(); p.sql = getSQL(readString()); - int count = readShort(); - p.paramType = new int[count]; - for (int i = 0; i < count; i++) { - int type = readInt(); - server.checkType(type); - p.paramType[i] = type; + int paramTypesCount = readShort(); + int[] paramTypes = null; + if (paramTypesCount > 0) { + paramTypes = new int[paramTypesCount]; + for (int i = 0; i < paramTypesCount; i++) { + paramTypes[i] = readInt(); + } } try { - p.prep = (JdbcPreparedStatement) conn.prepareStatement(p.sql); + p.prep = session.prepareLocal(p.sql); + ArrayList parameters = p.prep.getParameters(); + int count = parameters.size(); + p.paramType = new int[count]; + for (int i = 0; i < count; i++) { + int type; + if (i < paramTypesCount && paramTypes[i] != 0) { + type = paramTypes[i]; + server.checkType(type); + } else { + type = PgServer.convertType(parameters.get(i).getType()); + } + p.paramType[i] = type; + } prepared.put(p.name, p); sendParseComplete(); } catch (Exception e) { @@ -265,8 +350,9 @@ private void process() throws IOException { } int paramCount = readShort(); try { + ArrayList parameters = prep.prep.getParameters(); for (int i = 0; i < paramCount; i++) { - setParameter(prep.prep, prep.paramType[i], i, formatCodes); + setParameter(parameters, prep.paramType[i], i, formatCodes); } } catch (Exception e) { sendErrorResponse(e); @@ -287,10 +373,13 @@ private void process() throws IOException { if (type == 'S') { Prepared p = prepared.remove(name); if (p != null) { - JdbcUtils.closeSilently(p.prep); + p.close(); } } else if (type == 'P') { - portals.remove(name); + Portal p = portals.remove(name); + if (p != null) { + p.prep.closeResult(); + } } else { server.trace("expected S or P, got " + type); sendErrorResponse("expected S or P"); @@ -308,17 +397,21 @@ private void process() throws IOException { if (p == null) { sendErrorResponse("Prepared not found: " + name); } else { - sendParameterDescription(p); + try { + sendParameterDescription(p.prep.getParameters(), p.paramType); + sendRowDescription(p.prep.getMetaData(), null); + } catch (Exception e) { + sendErrorResponse(e); + } } } else if (type == 'P') { Portal p = portals.get(name); if (p == null) { sendErrorResponse("Portal not found: " + name); } else { - PreparedStatement prep = p.prep.prep; + CommandInterface prep = p.prep.prep; try { - ResultSetMetaData meta = prep.getMetaData(); - sendRowDescription(meta); + sendRowDescription(prep.getMetaData(), p.resultColumnFormat); } catch (Exception e) { sendErrorResponse(e); } @@ -337,34 +430,19 @@ private void process() throws IOException { sendErrorResponse("Portal not found: " + name); break; } - int maxRows = readShort(); + int maxRows = readInt(); Prepared prepared = p.prep; - JdbcPreparedStatement prep = prepared.prep; + CommandInterface prep = prepared.prep; server.trace(prepared.sql); try { - prep.setMaxRows(maxRows); setActiveRequest(prep); - boolean result = prep.execute(); - if (result) { - try { - ResultSet rs = prep.getResultSet(); - // the meta-data is sent in the prior 'Describe' - while (rs.next()) { - sendDataRow(rs); - } - sendCommandComplete(prep, 0); - } catch (Exception e) { - sendErrorResponse(e); - } + if (prep.isQuery()) { + executeQuery(prepared, prep, p.resultColumnFormat, maxRows); } else { - sendCommandComplete(prep, prep.getUpdateCount()); + sendCommandComplete(prep, prep.executeUpdate(null).getUpdateCount()); } } catch (Exception e) { - if (prep.wasCancelled()) { - sendCancelQueryResponse(); - } else { - sendErrorResponse(e); - } + sendErrorOrCancelResponse(e); } finally { setActiveRequest(null); } @@ -378,43 +456,31 @@ private void process() throws IOException { case 'Q': { server.trace("Query"); String query = readString(); + @SuppressWarnings("resource") ScriptReader reader = new ScriptReader(new StringReader(query)); while (true) { - JdbcStatement stat = null; - try { - String s = reader.readStatement(); - if (s == null) { - break; - } - s = getSQL(s); - stat = (JdbcStatement) conn.createStatement(); - setActiveRequest(stat); - boolean result = stat.execute(s); - if (result) { - ResultSet rs = stat.getResultSet(); - ResultSetMetaData meta = rs.getMetaData(); - try { - sendRowDescription(meta); - while (rs.next()) { - sendDataRow(rs); + String s = reader.readStatement(); + if (s == null) { + break; + } + s = getSQL(s); + try (CommandInterface command = session.prepareLocal(s)) { + setActiveRequest(command); + if (command.isQuery()) { + try (ResultInterface result = command.executeQuery(0, false)) { + sendRowDescription(result, null); + while (result.next()) { + sendDataRow(result, null); } - sendCommandComplete(stat, 0); - } catch (Exception e) { - sendErrorResponse(e); - break; + sendCommandComplete(command, 0); } } else { - sendCommandComplete(stat, stat.getUpdateCount()); - } - } catch (SQLException e) { - if (stat != null && stat.wasCancelled()) { - sendCancelQueryResponse(); - } else { - sendErrorResponse(e); + sendCommandComplete(command, command.executeUpdate(null).getUpdateCount()); } + } catch (Exception e) { + sendErrorOrCancelResponse(e); break; } finally { - JdbcUtils.closeSilently(stat); setActiveRequest(null); } } @@ -432,6 +498,36 @@ private void process() throws IOException { } } + private void executeQuery(Prepared prepared, CommandInterface prep, int[] resultColumnFormat, int maxRows) + throws Exception { + ResultInterface result = prepared.result; + if (result == null) { + result = prep.executeQuery(0L, false); + } + try { + // the meta-data is sent in the prior 'Describe' + if (maxRows == 0) { + while (result.next()) { + sendDataRow(result, resultColumnFormat); + } + } else { + for (; maxRows > 0 && result.next(); maxRows--) { + sendDataRow(result, resultColumnFormat); + } + if (result.hasNext()) { + prepared.result = result; + sendCommandSuspended(); + return; + } + } + prepared.closeResult(); + sendCommandComplete(prep, 0); + } catch (Exception e) { + prepared.closeResult(); + throw e; + } + } + private String getSQL(String s) { String lower = StringUtils.toLowerEnglish(s); if (lower.startsWith("show max_identifier_length")) { @@ -446,21 +542,20 @@ private String getSQL(String s) { return s; } - private void sendCommandComplete(JdbcStatement stat, int updateCount) - throws IOException { + private void sendCommandComplete(CommandInterface command, long updateCount) throws IOException { startMessage('C'); - switch (stat.getLastExecutedCommandType()) { + switch (command.getCommandType()) { case CommandInterface.INSERT: writeStringPart("INSERT 0 "); - writeString(Integer.toString(updateCount)); + writeString(Long.toString(updateCount)); break; case CommandInterface.UPDATE: writeStringPart("UPDATE "); - writeString(Integer.toString(updateCount)); + writeString(Long.toString(updateCount)); break; case CommandInterface.DELETE: writeStringPart("DELETE "); - writeString(Integer.toString(updateCount)); + writeString(Long.toString(updateCount)); break; case CommandInterface.SELECT: case CommandInterface.CALL: @@ -470,135 +565,349 @@ private void sendCommandComplete(JdbcStatement stat, int updateCount) writeString("BEGIN"); break; default: - server.trace("check CommandComplete tag for command " + stat); + server.trace("check CommandComplete tag for command " + command); writeStringPart("UPDATE "); - writeString(Integer.toString(updateCount)); + writeString(Long.toString(updateCount)); } sendMessage(); } - private void sendDataRow(ResultSet rs) throws Exception { - ResultSetMetaData metaData = rs.getMetaData(); - int columns = metaData.getColumnCount(); + private void sendCommandSuspended() throws IOException { + startMessage('s'); + sendMessage(); + } + + private void sendDataRow(ResultInterface result, int[] formatCodes) throws IOException { + int columns = result.getVisibleColumnCount(); startMessage('D'); writeShort(columns); - for (int i = 1; i <= columns; i++) { - writeDataColumn(rs, i, PgServer.convertType(metaData.getColumnType(i))); + Value[] row = result.currentRow(); + for (int i = 0; i < columns; i++) { + int pgType = PgServer.convertType(result.getColumnType(i)); + boolean text = formatAsText(pgType, formatCodes, i); + writeDataColumn(row[i], pgType, text); } sendMessage(); } - private void writeDataColumn(ResultSet rs, int column, int pgType) - throws Exception { - if (formatAsText(pgType)) { + private static long toPostgreDays(long dateValue) { + return DateTimeUtils.absoluteDayFromDateValue(dateValue) - 10_957; + } + + private void writeDataColumn(Value v, int pgType, boolean text) throws IOException { + if (v == ValueNull.INSTANCE) { + writeInt(-1); + return; + } + if (text) { // plain text switch (pgType) { case PgServer.PG_TYPE_BOOL: writeInt(1); - dataOut.writeByte(rs.getBoolean(column) ? 't' : 'f'); + dataOut.writeByte(v.getBoolean() ? 't' : 'f'); break; - default: - String s = rs.getString(column); - if (s == null) { - writeInt(-1); - } else { - byte[] data = s.getBytes(getEncoding()); - writeInt(data.length); - write(data); + case PgServer.PG_TYPE_BYTEA: { + byte[] bytes = v.getBytesNoCopy(); + int length = bytes.length; + int cnt = length; + for (int i = 0; i < length; i++) { + byte b = bytes[i]; + if (b < 32 || b > 126) { + cnt += 3; + } else if (b == 92) { + cnt++; + } + } + byte[] data = new byte[cnt]; + for (int i = 0, j = 0; i < length; i++) { + byte b = bytes[i]; + if (b < 32 || b > 126) { + data[j++] = '\\'; + data[j++] = (byte) (((b >>> 6) & 3) + '0'); + data[j++] = (byte) (((b >>> 3) & 7) + '0'); + data[j++] = (byte) ((b & 7) + '0'); + } else if (b == 92) { + data[j++] = '\\'; + data[j++] = '\\'; + } else { + data[j++] = b; + } } + writeInt(data.length); + write(data); + break; + } + case PgServer.PG_TYPE_INT2_ARRAY: + case PgServer.PG_TYPE_INT4_ARRAY: + case PgServer.PG_TYPE_VARCHAR_ARRAY: + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + baos.write('{'); + Value[] values = ((ValueArray) v).getList(); + Charset encoding = getEncoding(); + for (int i = 0; i < values.length; i++) { + if (i > 0) { + baos.write(','); + } + String s = values[i].getString(); + if (SHOULD_QUOTE.matcher(s).matches()) { + List ss = new ArrayList<>(); + for (String s0 : s.split("\\\\")) { + ss.add(s0.replace("\"", "\\\"")); + } + s = "\"" + String.join("\\\\", ss) + "\""; + } + baos.write(s.getBytes(encoding)); + } + baos.write('}'); + writeInt(baos.size()); + write(baos); + break; + default: + byte[] data = v.getString().getBytes(getEncoding()); + writeInt(data.length); + write(data); } } else { // binary switch (pgType) { + case PgServer.PG_TYPE_BOOL: + writeInt(1); + dataOut.writeByte(v.getBoolean() ? 1 : 0); + break; case PgServer.PG_TYPE_INT2: writeInt(2); - writeShort(rs.getShort(column)); + writeShort(v.getShort()); break; case PgServer.PG_TYPE_INT4: writeInt(4); - writeInt(rs.getInt(column)); + writeInt(v.getInt()); break; case PgServer.PG_TYPE_INT8: writeInt(8); - dataOut.writeLong(rs.getLong(column)); + dataOut.writeLong(v.getLong()); break; case PgServer.PG_TYPE_FLOAT4: writeInt(4); - dataOut.writeFloat(rs.getFloat(column)); + dataOut.writeFloat(v.getFloat()); break; case PgServer.PG_TYPE_FLOAT8: writeInt(8); - dataOut.writeDouble(rs.getDouble(column)); + dataOut.writeDouble(v.getDouble()); break; - case PgServer.PG_TYPE_BYTEA: - byte[] data = rs.getBytes(column); - if (data == null) { - writeInt(-1); - } else { - writeInt(data.length); - write(data); + case PgServer.PG_TYPE_NUMERIC: + writeNumericBinary(v.getBigDecimal()); + break; + case PgServer.PG_TYPE_BYTEA: { + byte[] data = v.getBytesNoCopy(); + writeInt(data.length); + write(data); + break; + } + case PgServer.PG_TYPE_DATE: + writeInt(4); + writeInt((int) (toPostgreDays(((ValueDate) v).getDateValue()))); + break; + case PgServer.PG_TYPE_TIME: + writeTimeBinary(((ValueTime) v).getNanos(), 8); + break; + case PgServer.PG_TYPE_TIMETZ: { + ValueTimeTimeZone t = (ValueTimeTimeZone) v; + long m = t.getNanos(); + writeTimeBinary(m, 12); + dataOut.writeInt(-t.getTimeZoneOffsetSeconds()); + break; + } + case PgServer.PG_TYPE_TIMESTAMP: { + ValueTimestamp t = (ValueTimestamp) v; + long m = toPostgreDays(t.getDateValue()) * 86_400; + long nanos = t.getTimeNanos(); + writeTimestampBinary(m, nanos); + break; + } + case PgServer.PG_TYPE_TIMESTAMPTZ: { + ValueTimestampTimeZone t = (ValueTimestampTimeZone) v; + long m = toPostgreDays(t.getDateValue()) * 86_400; + long nanos = t.getTimeNanos() - t.getTimeZoneOffsetSeconds() * 1_000_000_000L; + if (nanos < 0L) { + m--; + nanos += DateTimeUtils.NANOS_PER_DAY; } + writeTimestampBinary(m, nanos); break; - + } default: throw new IllegalStateException("output binary format is undefined"); } } } - private String getEncoding() { + private static final int[] POWERS10 = {1, 10, 100, 1000, 10000}; + private static final int MAX_GROUP_SCALE = 4; + private static final int MAX_GROUP_SIZE = POWERS10[4]; + + private static int divide(BigInteger[] unscaled, int divisor) { + BigInteger[] bi = unscaled[0].divideAndRemainder(BigInteger.valueOf(divisor)); + unscaled[0] = bi[0]; + return bi[1].intValue(); + } + + // https://www.npgsql.org/dev/types.html + // https://github.com/npgsql/npgsql/blob/8a479081f707784b5040747b23102c3d6371b9d3/ + // src/Npgsql/TypeHandlers/NumericHandlers/NumericHandler.cs#L166 + private void writeNumericBinary(BigDecimal value) throws IOException { + int weight = 0; + List groups = new ArrayList<>(); + int scale = value.scale(); + int signum = value.signum(); + if (signum != 0) { + BigInteger[] unscaled = {null}; + if (scale < 0) { + unscaled[0] = value.setScale(0).unscaledValue(); + scale = 0; + } else { + unscaled[0] = value.unscaledValue(); + } + if (signum < 0) { + unscaled[0] = unscaled[0].negate(); + } + weight = -scale / MAX_GROUP_SCALE - 1; + int remainder = 0; + int scaleChunk = scale % MAX_GROUP_SCALE; + if (scaleChunk > 0) { + remainder = divide(unscaled, POWERS10[scaleChunk]) * POWERS10[MAX_GROUP_SCALE - scaleChunk]; + if (remainder != 0) { + weight--; + } + } + if (remainder == 0) { + while ((remainder = divide(unscaled, MAX_GROUP_SIZE)) == 0) { + weight++; + } + } + groups.add(remainder); + while (unscaled[0].signum() != 0) { + groups.add(divide(unscaled, MAX_GROUP_SIZE)); + } + } + int groupCount = groups.size(); + if (groupCount + weight > Short.MAX_VALUE || scale > Short.MAX_VALUE) { + throw DbException.get(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1, value.toString()); + } + writeInt(8 + groupCount * 2); + writeShort(groupCount); + writeShort(groupCount + weight); + writeShort(signum < 0 ? 16384 : 0); + writeShort(scale); + for (int i = groupCount - 1; i >= 0; i--) { + writeShort(groups.get(i)); + } + } + + private void writeTimeBinary(long m, int numBytes) throws IOException { + writeInt(numBytes); + if (INTEGER_DATE_TYPES) { + // long format + m /= 1_000; + } else { + // double format + m = Double.doubleToLongBits(m * 0.000_000_001); + } + dataOut.writeLong(m); + } + + private void writeTimestampBinary(long m, long nanos) throws IOException { + writeInt(8); + if (INTEGER_DATE_TYPES) { + // long format + m = m * 1_000_000 + nanos / 1_000; + } else { + // double format + m = Double.doubleToLongBits(m + nanos * 0.000_000_001); + } + dataOut.writeLong(m); + } + + private Charset getEncoding() { if ("UNICODE".equals(clientEncoding)) { - return "UTF-8"; + return StandardCharsets.UTF_8; } - return clientEncoding; + return Charset.forName(clientEncoding); } - private void setParameter(PreparedStatement prep, - int pgType, int i, int[] formatCodes) throws SQLException, IOException { - boolean text = (i >= formatCodes.length) || (formatCodes[i] == 0); - int col = i + 1; + private void setParameter(ArrayList parameters, int pgType, int i, int[] formatCodes) + throws IOException { + boolean text = true; + if (formatCodes.length == 1) { + text = formatCodes[0] == 0; + } else if (i < formatCodes.length) { + text = formatCodes[i] == 0; + } int paramLen = readInt(); + Value value; if (paramLen == -1) { - prep.setNull(col, Types.NULL); + value = ValueNull.INSTANCE; } else if (text) { // plain text - byte[] data = DataUtils.newBytes(paramLen); + byte[] data = Utils.newBytes(paramLen); readFully(data); - prep.setString(col, new String(data, getEncoding())); + String str = new String(data, getEncoding()); + switch (pgType) { + case PgServer.PG_TYPE_DATE: { + // Strip timezone offset + int idx = str.indexOf(' '); + if (idx > 0) { + str = str.substring(0, idx); + } + break; + } + case PgServer.PG_TYPE_TIME: { + // Strip timezone offset + int idx = str.indexOf('+'); + if (idx <= 0) { + idx = str.indexOf('-'); + } + if (idx > 0) { + str = str.substring(0, idx); + } + break; + } + } + value = ValueVarchar.get(str, session); } else { // binary switch (pgType) { case PgServer.PG_TYPE_INT2: - checkParamLength(4, paramLen); - prep.setShort(col, readShort()); + checkParamLength(2, paramLen); + value = ValueSmallint.get(readShort()); break; case PgServer.PG_TYPE_INT4: checkParamLength(4, paramLen); - prep.setInt(col, readInt()); + value = ValueInteger.get(readInt()); break; case PgServer.PG_TYPE_INT8: checkParamLength(8, paramLen); - prep.setLong(col, dataIn.readLong()); + value = ValueBigint.get(dataIn.readLong()); break; case PgServer.PG_TYPE_FLOAT4: checkParamLength(4, paramLen); - prep.setFloat(col, dataIn.readFloat()); + value = ValueReal.get(dataIn.readFloat()); break; case PgServer.PG_TYPE_FLOAT8: checkParamLength(8, paramLen); - prep.setDouble(col, dataIn.readDouble()); + value = ValueDouble.get(dataIn.readDouble()); break; case PgServer.PG_TYPE_BYTEA: - byte[] d1 = DataUtils.newBytes(paramLen); + byte[] d1 = Utils.newBytes(paramLen); readFully(d1); - prep.setBytes(col, d1); + value = ValueVarbinary.getNoCopy(d1); break; default: server.trace("Binary format for type: "+pgType+" is unsupported"); - byte[] d2 = DataUtils.newBytes(paramLen); + byte[] d2 = Utils.newBytes(paramLen); readFully(d2); - prep.setString(col, new String(d2, getEncoding())); + value = ValueVarchar.get(new String(d2, getEncoding()), session); } } + parameters.get(i).setValue(value, true); } private static void checkParamLength(int expected, int got) { @@ -607,6 +916,14 @@ private static void checkParamLength(int expected, int got) { } } + private void sendErrorOrCancelResponse(Exception e) throws IOException { + if (e instanceof DbException && ((DbException) e).getErrorCode() == ErrorCode.STATEMENT_WAS_CANCELED) { + sendCancelQueryResponse(); + } else { + sendErrorResponse(e); + } + } + private void sendErrorResponse(Exception re) throws IOException { SQLException e = DbException.toSQLException(re); server.traceError(e); @@ -636,27 +953,22 @@ private void sendCancelQueryResponse() throws IOException { sendMessage(); } - private void sendParameterDescription(Prepared p) throws IOException { - try { - PreparedStatement prep = p.prep; - ParameterMetaData meta = prep.getParameterMetaData(); - int count = meta.getParameterCount(); - startMessage('t'); - writeShort(count); - for (int i = 0; i < count; i++) { - int type; - if (p.paramType != null && p.paramType[i] != 0) { - type = p.paramType[i]; - } else { - type = PgServer.PG_TYPE_VARCHAR; - } - server.checkType(type); - writeInt(type); + private void sendParameterDescription(ArrayList parameters, int[] paramTypes) + throws Exception { + int count = parameters.size(); + startMessage('t'); + writeShort(count); + for (int i = 0; i < count; i++) { + int type; + if (paramTypes != null && paramTypes[i] != 0) { + type = paramTypes[i]; + } else { + type = PgServer.PG_TYPE_VARCHAR; } - sendMessage(); - } catch (Exception e) { - sendErrorResponse(e); + server.checkType(type); + writeInt(type); } + sendMessage(); } private void sendNoData() throws IOException { @@ -664,18 +976,32 @@ private void sendNoData() throws IOException { sendMessage(); } - private void sendRowDescription(ResultSetMetaData meta) throws Exception { - if (meta == null) { + private void sendRowDescription(ResultInterface result, int[] formatCodes) throws IOException { + if (result == null) { sendNoData(); } else { - int columns = meta.getColumnCount(); + int columns = result.getVisibleColumnCount(); + int[] oids = new int[columns]; + int[] attnums = new int[columns]; int[] types = new int[columns]; int[] precision = new int[columns]; String[] names = new String[columns]; + Database database = session.getDatabase(); for (int i = 0; i < columns; i++) { - String name = meta.getColumnName(i + 1); + String name = result.getColumnName(i); + Schema schema = database.findSchema(result.getSchemaName(i)); + if (schema != null) { + Table table = schema.findTableOrView(session, result.getTableName(i)); + if (table != null) { + oids[i] = table.getId(); + Column column = table.findColumn(name); + if (column != null) { + attnums[i] = column.getColumnId() + 1; + } + } + } names[i] = name; - int type = meta.getColumnType(i + 1); + TypeInfo type = result.getColumnType(i); int pgType = PgServer.convertType(type); // the ODBC client needs the column pg_catalog.pg_index // to be of type 'int2vector' @@ -684,8 +1010,8 @@ private void sendRowDescription(ResultSetMetaData meta) throws Exception { // meta.getTableName(i + 1))) { // type = PgServer.PG_TYPE_INT2VECTOR; // } - precision[i] = meta.getColumnDisplaySize(i + 1); - if (type != Types.NULL) { + precision[i] = type.getDisplaySize(); + if (type.getValueType() != Value.NULL) { server.checkType(pgType); } types[i] = pgType; @@ -695,9 +1021,9 @@ private void sendRowDescription(ResultSetMetaData meta) throws Exception { for (int i = 0; i < columns; i++) { writeString(StringUtils.toLowerEnglish(names[i])); // object ID - writeInt(0); + writeInt(oids[i]); // attribute number of the column - writeShort(0); + writeShort(attnums[i]); // data type writeInt(types[i]); // pg_type.typlen @@ -705,7 +1031,7 @@ private void sendRowDescription(ResultSetMetaData meta) throws Exception { // pg_attribute.atttypmod writeInt(-1); // the format type: text = 0, binary = 1 - writeShort(formatAsText(types[i]) ? 0 : 1); + writeShort(formatAsText(types[i], formatCodes, i) ? 0 : 1); } sendMessage(); } @@ -714,16 +1040,21 @@ private void sendRowDescription(ResultSetMetaData meta) throws Exception { /** * Check whether the given type should be formatted as text. * - * @return true for binary + * @param pgType data type + * @param formatCodes format codes, or {@code null} + * @param column 0-based column number + * @return true for text */ - private static boolean formatAsText(int pgType) { - switch (pgType) { - // TODO: add more types to send as binary once compatibility is - // confirmed - case PgServer.PG_TYPE_BYTEA: - return false; + private static boolean formatAsText(int pgType, int[] formatCodes, int column) { + boolean text = true; + if (formatCodes != null && formatCodes.length > 0) { + if (formatCodes.length == 1) { + text = formatCodes[0] == 0; + } else if (column < formatCodes.length) { + text = formatCodes[column] == 0; + } } - return true; + return text; } private static int getTypeSize(int pgType, int precision) { @@ -765,62 +1096,19 @@ private void sendCloseComplete() throws IOException { sendMessage(); } - private void initDb() throws SQLException { - Statement stat = null; - ResultSet rs = null; - try { - synchronized (server) { - // better would be: set the database to exclusive mode - rs = conn.getMetaData().getTables(null, "PG_CATALOG", "PG_VERSION", null); - boolean tableFound = rs.next(); - stat = conn.createStatement(); - if (!tableFound) { - installPgCatalog(stat); - } - rs = stat.executeQuery("select * from pg_catalog.pg_version"); - if (!rs.next() || rs.getInt(1) < 2) { - // installation incomplete, or old version - installPgCatalog(stat); - } else { - // version 2 or newer: check the read version - int versionRead = rs.getInt(2); - if (versionRead > 2) { - throw DbException.throwInternalError("Incompatible PG_VERSION"); - } - } - } - stat.execute("set search_path = PUBLIC, pg_catalog"); - HashSet typeSet = server.getTypeSet(); - if (typeSet.size() == 0) { - rs = stat.executeQuery("select oid from pg_catalog.pg_type"); - while (rs.next()) { - typeSet.add(rs.getInt(1)); - } - } - } finally { - JdbcUtils.closeSilently(stat); - JdbcUtils.closeSilently(rs); + private void initDb() { + session.setTimeZone(timeZone); + try (CommandInterface command = session.prepareLocal("set search_path = public, pg_catalog")) { + command.executeUpdate(null); } - } - - private static void installPgCatalog(Statement stat) throws SQLException { - Reader r = null; - try { - r = new InputStreamReader(new ByteArrayInputStream(Utils - .getResource("/org/h2/server/pg/pg_catalog.sql"))); - ScriptReader reader = new ScriptReader(r); - while (true) { - String sql = reader.readStatement(); - if (sql == null) { - break; + HashSet typeSet = server.getTypeSet(); + if (typeSet.isEmpty()) { + try (CommandInterface command = session.prepareLocal("select oid from pg_catalog.pg_type"); + ResultInterface result = command.executeQuery(0, false)) { + while (result.next()) { + typeSet.add(result.currentRow()[0].getInt()); } - stat.execute(sql); } - reader.close(); - } catch (IOException e) { - throw DbException.convertIOException(e, "Can not read pg_catalog resource"); - } finally { - IOUtils.closeSilently(r); } } @@ -828,9 +1116,16 @@ private static void installPgCatalog(Statement stat) throws SQLException { * Close this connection. */ void close() { + for (Prepared prep : prepared.values()) { + prep.close(); + } try { stop = true; - JdbcUtils.closeSilently(conn); + try { + session.close(); + } catch (Exception e) { + // Ignore + } if (socket != null) { socket.close(); } @@ -838,7 +1133,7 @@ void close() { } catch (Exception e) { server.traceError(e); } - conn = null; + session = null; socket = null; server.remove(this); } @@ -855,34 +1150,22 @@ private void sendAuthenticationOk() throws IOException { sendMessage(); sendParameterStatus("client_encoding", clientEncoding); sendParameterStatus("DateStyle", dateStyle); - sendParameterStatus("integer_datetimes", "off"); sendParameterStatus("is_superuser", "off"); sendParameterStatus("server_encoding", "SQL_ASCII"); - sendParameterStatus("server_version", "8.1.4"); + sendParameterStatus("server_version", Constants.PG_VERSION); sendParameterStatus("session_authorization", userName); sendParameterStatus("standard_conforming_strings", "off"); - // TODO PostgreSQL TimeZone - sendParameterStatus("TimeZone", "CET"); + sendParameterStatus("TimeZone", pgTimeZone(timeZone.getId())); + // Don't inline, see https://bugs.eclipse.org/bugs/show_bug.cgi?id=569498 + String value = INTEGER_DATE_TYPES ? "on" : "off"; + sendParameterStatus("integer_datetimes", value); sendBackendKeyData(); sendReadyForQuery(); } private void sendReadyForQuery() throws IOException { startMessage('Z'); - char c; - try { - if (conn.getAutoCommit()) { - // idle - c = 'I'; - } else { - // in a transaction block - c = 'T'; - } - } catch (SQLException e) { - // failed transaction block - c = 'E'; - } - write((byte) c); + write((byte) (session.getAutoCommit() ? /* idle */ 'I' : /* in a transaction block */ 'T')); sendMessage(); } @@ -914,24 +1197,30 @@ private void write(byte[] data) throws IOException { dataOut.write(data); } + private void write(ByteArrayOutputStream baos) throws IOException { + baos.writeTo(dataOut); + } + private void write(int b) throws IOException { dataOut.write(b); } private void startMessage(int newMessageType) { this.messageType = newMessageType; - outBuffer = new ByteArrayOutputStream(); + if (outBuffer.size() <= 65_536) { + outBuffer.reset(); + } else { + outBuffer = new ByteArrayOutputStream(); + } dataOut = new DataOutputStream(outBuffer); } private void sendMessage() throws IOException { dataOut.flush(); - byte[] buff = outBuffer.toByteArray(); - int len = buff.length; dataOut = new DataOutputStream(out); - dataOut.write(messageType); - dataOut.writeInt(len + 4); - dataOut.write(buff); + write(messageType); + writeInt(outBuffer.size() + 4); + write(outBuffer); dataOut.flush(); } @@ -959,7 +1248,7 @@ int getProcessId() { return this.processId; } - private synchronized void setActiveRequest(JdbcStatement statement) { + private synchronized void setActiveRequest(CommandInterface statement) { activeRequest = statement; } @@ -968,12 +1257,8 @@ private synchronized void setActiveRequest(JdbcStatement statement) { */ private synchronized void cancelRequest() { if (activeRequest != null) { - try { - activeRequest.cancel(); - activeRequest = null; - } catch (SQLException e) { - throw DbException.convert(e); - } + activeRequest.cancel(); + activeRequest = null; } } @@ -995,12 +1280,40 @@ static class Prepared { /** * The prepared statement. */ - JdbcPreparedStatement prep; + CommandInterface prep; + + /** + * The current result (for suspended portal). + */ + ResultInterface result; /** * The list of parameter types (if set). */ int[] paramType; + + /** + * Closes prepared statement and result, if any. + */ + void close() { + try { + closeResult(); + prep.close(); + } catch (Exception e) { + // Ignore + } + } + + /** + * Closes the result, if any. + */ + void closeResult() { + ResultInterface result = this.result; + if (result != null) { + this.result = null; + result.close(); + } + } } /** diff --git a/h2/src/main/org/h2/server/pg/package.html b/h2/src/main/org/h2/server/pg/package.html index 2d8e201f11..0a3346d9f6 100644 --- a/h2/src/main/org/h2/server/pg/package.html +++ b/h2/src/main/org/h2/server/pg/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/pg/pg_catalog.sql b/h2/src/main/org/h2/server/pg/pg_catalog.sql deleted file mode 100644 index 4500dfbec4..0000000000 --- a/h2/src/main/org/h2/server/pg/pg_catalog.sql +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -; -drop schema if exists pg_catalog; -create schema pg_catalog; - -drop alias if exists pg_convertType; -create alias pg_convertType deterministic for "org.h2.server.pg.PgServer.convertType"; - -drop alias if exists pg_get_oid; -create alias pg_get_oid deterministic for "org.h2.server.pg.PgServer.getOid"; - -create table pg_catalog.pg_version as select 2 as version, 2 as version_read; -grant select on pg_catalog.pg_version to PUBLIC; - -create view pg_catalog.pg_roles -- (oid, rolname, rolcreaterole, rolcreatedb) -as -select - id oid, - cast(name as varchar_ignorecase) rolname, - case when admin then 't' else 'f' end as rolcreaterole, - case when admin then 't' else 'f' end as rolcreatedb -from INFORMATION_SCHEMA.users; -grant select on pg_catalog.pg_roles to PUBLIC; - -create view pg_catalog.pg_namespace -- (oid, nspname) -as -select - id oid, - cast(schema_name as varchar_ignorecase) nspname -from INFORMATION_SCHEMA.schemata; -grant select on pg_catalog.pg_namespace to PUBLIC; - -create table pg_catalog.pg_type( - oid int primary key, - typname varchar_ignorecase, - typnamespace int, - typlen int, - typtype varchar, - typbasetype int, - typtypmod int, - typnotnull boolean, - typinput varchar -); -grant select on pg_catalog.pg_type to PUBLIC; - -insert into pg_catalog.pg_type -select - pg_convertType(data_type) oid, - cast(type_name as varchar_ignorecase) typname, - (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog') typnamespace, - -1 typlen, - 'c' typtype, - 0 typbasetype, - -1 typtypmod, - false typnotnull, - null typinput -from INFORMATION_SCHEMA.type_info -where pos = 0 - and pg_convertType(data_type) <> 705; -- not unknown - -merge into pg_catalog.pg_type values( - 19, - 'name', - (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog'), - -1, - 'c', - 0, - -1, - false, - null -); -merge into pg_catalog.pg_type values( - 0, - 'null', - (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog'), - -1, - 'c', - 0, - -1, - false, - null -); -merge into pg_catalog.pg_type values( - 22, - 'int2vector', - (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog'), - -1, - 'c', - 0, - -1, - false, - null -); -merge into pg_catalog.pg_type values( - 2205, - 'regproc', - (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog'), - 4, - 'b', - 0, - -1, - false, - null -); - -create domain regproc as varchar_ignorecase; - -create view pg_catalog.pg_class -- (oid, relname, relnamespace, relkind, relam, reltuples, reltablespace, relpages, relhasindex, relhasrules, relhasoids, relchecks, reltriggers) -as -select - id oid, - cast(table_name as varchar_ignorecase) relname, - (select id from INFORMATION_SCHEMA.schemata where schema_name = table_schema) relnamespace, - case table_type when 'TABLE' then 'r' else 'v' end relkind, - 0 relam, - cast(0 as float) reltuples, - 0 reltablespace, - 0 relpages, - false relhasindex, - false relhasrules, - false relhasoids, - cast(0 as smallint) relchecks, - (select count(*) from INFORMATION_SCHEMA.triggers t where t.table_schema = table_schema and t.table_name = table_name) reltriggers -from INFORMATION_SCHEMA.tables -union all -select - id oid, - cast(index_name as varchar_ignorecase) relname, - (select id from INFORMATION_SCHEMA.schemata where schema_name = table_schema) relnamespace, - 'i' relkind, - 0 relam, - cast(0 as float) reltuples, - 0 reltablespace, - 0 relpages, - true relhasindex, - false relhasrules, - false relhasoids, - cast(0 as smallint) relchecks, - 0 reltriggers -from INFORMATION_SCHEMA.indexes; -grant select on pg_catalog.pg_class to PUBLIC; - -create table pg_catalog.pg_proc( - oid int, - proname varchar_ignorecase, - prorettype int, - pronamespace int -); -grant select on pg_catalog.pg_proc to PUBLIC; - -create table pg_catalog.pg_trigger( - oid int, - tgconstrrelid int, - tgfoid int, - tgargs int, - tgnargs int, - tgdeferrable boolean, - tginitdeferred boolean, - tgconstrname varchar_ignorecase, - tgrelid int -); -grant select on pg_catalog.pg_trigger to PUBLIC; - -create view pg_catalog.pg_attrdef -- (oid, adsrc, adrelid, adnum) -as -select - id oid, - 0 adsrc, - 0 adrelid, - 0 adnum, - null adbin -from INFORMATION_SCHEMA.tables where 1=0; -grant select on pg_catalog.pg_attrdef to PUBLIC; - -create view pg_catalog.pg_attribute -- (oid, attrelid, attname, atttypid, attlen, attnum, atttypmod, attnotnull, attisdropped, atthasdef) -as -select - t.id*10000 + c.ordinal_position oid, - t.id attrelid, - c.column_name attname, - pg_convertType(data_type) atttypid, - case when numeric_precision > 255 then -1 else numeric_precision end attlen, - c.ordinal_position attnum, - -1 atttypmod, - case c.is_nullable when 'YES' then false else true end attnotnull, - false attisdropped, - false atthasdef -from INFORMATION_SCHEMA.tables t, INFORMATION_SCHEMA.columns c -where t.table_name = c.table_name -and t.table_schema = c.table_schema -union all -select - 1000000 + t.id*10000 + c.ordinal_position oid, - i.id attrelid, - c.column_name attname, - pg_convertType(data_type) atttypid, - case when numeric_precision > 255 then -1 else numeric_precision end attlen, - c.ordinal_position attnum, - -1 atttypmod, - case c.is_nullable when 'YES' then false else true end attnotnull, - false attisdropped, - false atthasdef -from INFORMATION_SCHEMA.tables t, INFORMATION_SCHEMA.indexes i, INFORMATION_SCHEMA.columns c -where t.table_name = i.table_name -and t.table_schema = i.table_schema -and t.table_name = c.table_name -and t.table_schema = c.table_schema; -grant select on pg_catalog.pg_attribute to PUBLIC; - -create view pg_catalog.pg_index -- (oid, indexrelid, indrelid, indisclustered, indisunique, indisprimary, indexprs, indkey, indpred) -as -select - i.id oid, - i.id indexrelid, - t.id indrelid, - false indisclustered, - not non_unique indisunique, - primary_key indisprimary, - cast('' as varchar_ignorecase) indexprs, - cast(1 as array) indkey, - null indpred -from INFORMATION_SCHEMA.indexes i, INFORMATION_SCHEMA.tables t -where i.table_schema = t.table_schema -and i.table_name = t.table_name -and i.ordinal_position = 1 --- workaround for MS Access problem opening tables with primary key -and 1=0; -grant select on pg_catalog.pg_index to PUBLIC; - -drop alias if exists pg_get_indexdef; -create alias pg_get_indexdef for "org.h2.server.pg.PgServer.getIndexColumn"; - -drop alias if exists pg_catalog.pg_get_indexdef; -create alias pg_catalog.pg_get_indexdef for "org.h2.server.pg.PgServer.getIndexColumn"; - -drop alias if exists pg_catalog.pg_get_expr; -create alias pg_catalog.pg_get_expr for "org.h2.server.pg.PgServer.getPgExpr"; - -drop alias if exists pg_catalog.format_type; -create alias pg_catalog.format_type for "org.h2.server.pg.PgServer.formatType"; - -drop alias if exists version; -create alias version for "org.h2.server.pg.PgServer.getVersion"; - -drop alias if exists current_schema; -create alias current_schema for "org.h2.server.pg.PgServer.getCurrentSchema"; - -drop alias if exists pg_encoding_to_char; -create alias pg_encoding_to_char for "org.h2.server.pg.PgServer.getEncodingName"; - -drop alias if exists pg_postmaster_start_time; -create alias pg_postmaster_start_time for "org.h2.server.pg.PgServer.getStartTime"; - -drop alias if exists pg_get_userbyid; -create alias pg_get_userbyid for "org.h2.server.pg.PgServer.getUserById"; - -drop alias if exists has_database_privilege; -create alias has_database_privilege for "org.h2.server.pg.PgServer.hasDatabasePrivilege"; - -drop alias if exists has_table_privilege; -create alias has_table_privilege for "org.h2.server.pg.PgServer.hasTablePrivilege"; - -drop alias if exists currtid2; -create alias currtid2 for "org.h2.server.pg.PgServer.getCurrentTid"; - -create table pg_catalog.pg_database( - oid int, - datname varchar_ignorecase, - encoding int, - datlastsysoid int, - datallowconn boolean, - datconfig array, -- text[] - datacl array, -- aclitem[] - datdba int, - dattablespace int -); -grant select on pg_catalog.pg_database to PUBLIC; - -insert into pg_catalog.pg_database values( - 0, -- oid - 'postgres', -- datname - 6, -- encoding, UTF8 - 100000, -- datlastsysoid - true, -- datallowconn - null, -- datconfig - null, -- datacl - select min(id) from INFORMATION_SCHEMA.users where admin=true, -- datdba - 0 -- dattablespace -); - -create table pg_catalog.pg_tablespace( - oid int, - spcname varchar_ignorecase, - spclocation varchar_ignorecase, - spcowner int, - spcacl array -- aclitem[] -); -grant select on pg_catalog.pg_tablespace to PUBLIC; - -insert into pg_catalog.pg_tablespace values( - 0, - 'main', -- spcname - '?', -- spclocation - 0, -- spcowner, - null -- spcacl -); - -create table pg_catalog.pg_settings( - oid int, - name varchar_ignorecase, - setting varchar_ignorecase -); -grant select on pg_catalog.pg_settings to PUBLIC; - -insert into pg_catalog.pg_settings values -(0, 'autovacuum', 'on'), -(1, 'stats_start_collector', 'on'), -(2, 'stats_row_level', 'on'); - -create view pg_catalog.pg_user -- oid, usename, usecreatedb, usesuper -as -select - id oid, - cast(name as varchar_ignorecase) usename, - true usecreatedb, - true usesuper -from INFORMATION_SCHEMA.users; -grant select on pg_catalog.pg_user to PUBLIC; - -create table pg_catalog.pg_authid( - oid int, - rolname varchar_ignorecase, - rolsuper boolean, - rolinherit boolean, - rolcreaterole boolean, - rolcreatedb boolean, - rolcatupdate boolean, - rolcanlogin boolean, - rolconnlimit boolean, - rolpassword boolean, - rolvaliduntil timestamp, -- timestamptz - rolconfig array -- text[] -); -grant select on pg_catalog.pg_authid to PUBLIC; - -create table pg_catalog.pg_am(oid int, amname varchar_ignorecase); -grant select on pg_catalog.pg_am to PUBLIC; -insert into pg_catalog.pg_am values(0, 'btree'); -insert into pg_catalog.pg_am values(1, 'hash'); - -create table pg_catalog.pg_description -- (objoid, objsubid, classoid, description) -as -select - oid objoid, - 0 objsubid, - -1 classoid, - cast(datname as varchar_ignorecase) description -from pg_catalog.pg_database; -grant select on pg_catalog.pg_description to PUBLIC; - -create table pg_catalog.pg_group -- oid, groname -as -select - 0 oid, - cast('' as varchar_ignorecase) groname -from pg_catalog.pg_database where 1=0; -grant select on pg_catalog.pg_group to PUBLIC; - -create table pg_catalog.pg_inherits( - inhrelid int, - inhparent int, - inhseqno int -); -grant select on pg_catalog.pg_inherits to PUBLIC; diff --git a/h2/src/main/org/h2/server/web/ConnectionInfo.java b/h2/src/main/org/h2/server/web/ConnectionInfo.java index 2bd03a3460..2b6fcdb9ab 100644 --- a/h2/src/main/org/h2/server/web/ConnectionInfo.java +++ b/h2/src/main/org/h2/server/web/ConnectionInfo.java @@ -1,11 +1,10 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; -import org.h2.util.MathUtils; import org.h2.util.StringUtils; /** @@ -61,7 +60,7 @@ String getString() { @Override public int compareTo(ConnectionInfo o) { - return -MathUtils.compareInt(lastAccess, o.lastAccess); + return Integer.compare(o.lastAccess, lastAccess); } } diff --git a/h2/src/main/org/h2/server/web/DbStarter.java b/h2/src/main/org/h2/server/web/DbStarter.java index 90790e7cb8..3cbb46515b 100644 --- a/h2/src/main/org/h2/server/web/DbStarter.java +++ b/h2/src/main/org/h2/server/web/DbStarter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; diff --git a/h2/src/main/org/h2/server/web/JakartaDbStarter.java b/h2/src/main/org/h2/server/web/JakartaDbStarter.java new file mode 100644 index 0000000000..1547672b97 --- /dev/null +++ b/h2/src/main/org/h2/server/web/JakartaDbStarter.java @@ -0,0 +1,93 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.server.web; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; + +import jakarta.servlet.ServletContext; +import jakarta.servlet.ServletContextEvent; +import jakarta.servlet.ServletContextListener; + +import org.h2.tools.Server; +import org.h2.util.StringUtils; + +/** + * This class can be used to start the H2 TCP server (or other H2 servers, for + * example the PG server) inside a Jakarta web application container such as + * Tomcat or Jetty. It can also open a database connection. + */ +public class JakartaDbStarter implements ServletContextListener { + + private Connection conn; + private Server server; + + @Override + public void contextInitialized(ServletContextEvent servletContextEvent) { + try { + org.h2.Driver.load(); + + // This will get the setting from a context-param in web.xml if + // defined: + ServletContext servletContext = servletContextEvent.getServletContext(); + String url = getParameter(servletContext, "db.url", "jdbc:h2:~/test"); + String user = getParameter(servletContext, "db.user", "sa"); + String password = getParameter(servletContext, "db.password", "sa"); + + // Start the server if configured to do so + String serverParams = getParameter(servletContext, "db.tcpServer", null); + if (serverParams != null) { + String[] params = StringUtils.arraySplit(serverParams, ' ', true); + server = Server.createTcpServer(params); + server.start(); + } + + // To access the database in server mode, use the database URL: + // jdbc:h2:tcp://localhost/~/test + conn = DriverManager.getConnection(url, user, password); + servletContext.setAttribute("connection", conn); + } catch (Exception e) { + e.printStackTrace(); + } + } + + private static String getParameter(ServletContext servletContext, + String key, String defaultValue) { + String value = servletContext.getInitParameter(key); + return value == null ? defaultValue : value; + } + + /** + * Get the connection. + * + * @return the connection + */ + public Connection getConnection() { + return conn; + } + + @Override + public void contextDestroyed(ServletContextEvent servletContextEvent) { + try { + Statement stat = conn.createStatement(); + stat.execute("SHUTDOWN"); + stat.close(); + } catch (Exception e) { + e.printStackTrace(); + } + try { + conn.close(); + } catch (Exception e) { + e.printStackTrace(); + } + if (server != null) { + server.stop(); + server = null; + } + } + +} diff --git a/h2/src/main/org/h2/server/web/JakartaWebServlet.java b/h2/src/main/org/h2/server/web/JakartaWebServlet.java new file mode 100644 index 0000000000..260266e0e1 --- /dev/null +++ b/h2/src/main/org/h2/server/web/JakartaWebServlet.java @@ -0,0 +1,169 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.server.web; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.Properties; + +import jakarta.servlet.ServletConfig; +import jakarta.servlet.ServletOutputStream; +import jakarta.servlet.http.HttpServlet; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; + +import org.h2.util.NetworkConnectionInfo; + +/** + * This servlet lets the H2 Console be used in a Jakarta servlet container + * such as Tomcat or Jetty. + */ +public class JakartaWebServlet extends HttpServlet { + + private static final long serialVersionUID = 1L; + private transient WebServer server; + + @Override + public void init() { + ServletConfig config = getServletConfig(); + Enumeration en = config.getInitParameterNames(); + ArrayList list = new ArrayList<>(); + while (en.hasMoreElements()) { + String name = en.nextElement().toString(); + String value = config.getInitParameter(name); + if (!name.startsWith("-")) { + name = "-" + name; + } + list.add(name); + if (value.length() > 0) { + list.add(value); + } + } + String[] args = list.toArray(new String[0]); + server = new WebServer(); + server.setAllowChunked(false); + server.init(args); + } + + @Override + public void destroy() { + server.stop(); + } + + private boolean allow(HttpServletRequest req) { + if (server.getAllowOthers()) { + return true; + } + String addr = req.getRemoteAddr(); + try { + InetAddress address = InetAddress.getByName(addr); + return address.isLoopbackAddress(); + } catch (UnknownHostException | NoClassDefFoundError e) { + // Google App Engine does not allow java.net.InetAddress + return false; + } + + } + + private String getAllowedFile(HttpServletRequest req, String requestedFile) { + if (!allow(req)) { + return "notAllowed.jsp"; + } + if (requestedFile.length() == 0) { + return "index.do"; + } + return requestedFile; + } + + @Override + public void doGet(HttpServletRequest req, HttpServletResponse resp) + throws IOException { + req.setCharacterEncoding("utf-8"); + String file = req.getPathInfo(); + if (file == null) { + resp.sendRedirect(req.getRequestURI() + "/"); + return; + } else if (file.startsWith("/")) { + file = file.substring(1); + } + file = getAllowedFile(req, file); + + // extract the request attributes + Properties attributes = new Properties(); + Enumeration en = req.getAttributeNames(); + while (en.hasMoreElements()) { + String name = en.nextElement().toString(); + String value = req.getAttribute(name).toString(); + attributes.put(name, value); + } + en = req.getParameterNames(); + while (en.hasMoreElements()) { + String name = en.nextElement().toString(); + String value = req.getParameter(name); + attributes.put(name, value); + } + + WebSession session = null; + String sessionId = attributes.getProperty("jsessionid"); + if (sessionId != null) { + session = server.getSession(sessionId); + } + WebApp app = new WebApp(server); + app.setSession(session, attributes); + String ifModifiedSince = req.getHeader("if-modified-since"); + + String scheme = req.getScheme(); + StringBuilder builder = new StringBuilder(scheme).append("://").append(req.getServerName()); + int serverPort = req.getServerPort(); + if (!(serverPort == 80 && scheme.equals("http") || serverPort == 443 && scheme.equals("https"))) { + builder.append(':').append(serverPort); + } + String path = builder.append(req.getContextPath()).toString(); + file = app.processRequest(file, new NetworkConnectionInfo(path, req.getRemoteAddr(), req.getRemotePort())); + session = app.getSession(); + + String mimeType = app.getMimeType(); + boolean cache = app.getCache(); + + if (cache && server.getStartDateTime().equals(ifModifiedSince)) { + resp.setStatus(HttpServletResponse.SC_NOT_MODIFIED); + return; + } + byte[] bytes = server.getFile(file); + if (bytes == null) { + resp.sendError(HttpServletResponse.SC_NOT_FOUND); + bytes = ("File not found: " + file).getBytes(StandardCharsets.UTF_8); + } else { + if (session != null && file.endsWith(".jsp")) { + String page = new String(bytes, StandardCharsets.UTF_8); + page = PageParser.parse(page, session.map); + bytes = page.getBytes(StandardCharsets.UTF_8); + } + resp.setContentType(mimeType); + if (!cache) { + resp.setHeader("Cache-Control", "no-cache"); + } else { + resp.setHeader("Cache-Control", "max-age=10"); + resp.setHeader("Last-Modified", server.getStartDateTime()); + } + } + if (bytes != null) { + ServletOutputStream out = resp.getOutputStream(); + out.write(bytes); + } + } + + @Override + public void doPost(HttpServletRequest req, HttpServletResponse resp) + throws IOException { + doGet(req, resp); + } + +} diff --git a/h2/src/main/org/h2/server/web/PageParser.java b/h2/src/main/org/h2/server/web/PageParser.java index ec5aa245e1..78f8036d99 100644 --- a/h2/src/main/org/h2/server/web/PageParser.java +++ b/h2/src/main/org/h2/server/web/PageParser.java @@ -1,15 +1,17 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; import java.text.ParseException; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import org.h2.util.New; + +import org.h2.util.StringUtils; /** * A page parser can parse an HTML page and replace the tags there. @@ -96,9 +98,9 @@ private void parseAll() throws ParseException { List list = (List) get(items); if (list == null) { result.append("?items?"); - list = New.arrayList(); + list = new ArrayList<>(); } - if (list.size() == 0) { + if (list.isEmpty()) { parseBlockUntil(""); } for (Object o : list) { @@ -141,7 +143,7 @@ private void parseAll() throws ParseException { setError(i); return; } - String item = p.substring(i, j).trim(); + String item = StringUtils.trimSubstring(p, i, j); i = j; String s = (String) get(item); replaceTags(s); @@ -238,66 +240,67 @@ private static String escapeHtml(String s, boolean convertBreakAndSpace) { if (s == null) { return null; } + int length = s.length(); if (convertBreakAndSpace) { - if (s.length() == 0) { + if (length == 0) { return " "; } } - StringBuilder buff = new StringBuilder(s.length()); + StringBuilder builder = new StringBuilder(length); boolean convertSpace = true; - for (int i = 0; i < s.length(); i++) { - char c = s.charAt(i); - if (c == ' ' || c == '\t') { + for (int i = 0; i < length;) { + int cp = s.codePointAt(i); + if (cp == ' ' || cp == '\t') { // convert tabs into spaces - for (int j = 0; j < (c == ' ' ? 1 : TAB_WIDTH); j++) { + for (int j = 0; j < (cp == ' ' ? 1 : TAB_WIDTH); j++) { if (convertSpace && convertBreakAndSpace) { - buff.append(" "); + builder.append(" "); } else { - buff.append(' '); + builder.append(' '); convertSpace = true; } } - continue; - } - convertSpace = false; - switch (c) { - case '$': - // so that ${ } in the text is interpreted correctly - buff.append("$"); - break; - case '<': - buff.append("<"); - break; - case '>': - buff.append(">"); - break; - case '&': - buff.append("&"); - break; - case '"': - buff.append("""); - break; - case '\'': - buff.append("'"); - break; - case '\n': - if (convertBreakAndSpace) { - buff.append("
    "); - convertSpace = true; - } else { - buff.append(c); - } - break; - default: - if (c >= 128) { - buff.append("&#").append((int) c).append(';'); - } else { - buff.append(c); + } else { + convertSpace = false; + switch (cp) { + case '$': + // so that ${ } in the text is interpreted correctly + builder.append("$"); + break; + case '<': + builder.append("<"); + break; + case '>': + builder.append(">"); + break; + case '&': + builder.append("&"); + break; + case '"': + builder.append("""); + break; + case '\'': + builder.append("'"); + break; + case '\n': + if (convertBreakAndSpace) { + builder.append("
    "); + convertSpace = true; + } else { + builder.append(cp); + } + break; + default: + if (cp >= 128) { + builder.append("&#").append(cp).append(';'); + } else { + builder.append((char) cp); + } } - break; } + i += Character.charCount(cp); } - return buff.toString(); + return builder.toString(); } /** @@ -310,11 +313,12 @@ static String escapeJavaScript(String s) { if (s == null) { return null; } - if (s.length() == 0) { + int length = s.length(); + if (length == 0) { return ""; } - StringBuilder buff = new StringBuilder(s.length()); - for (int i = 0; i < s.length(); i++) { + StringBuilder buff = new StringBuilder(length); + for (int i = 0; i < length; i++) { char c = s.charAt(i); switch (c) { case '"': diff --git a/h2/src/main/org/h2/server/web/WebApp.java b/h2/src/main/org/h2/server/web/WebApp.java index c19c354a26..945403679c 100644 --- a/h2/src/main/org/h2/server/web/WebApp.java +++ b/h2/src/main/org/h2/server/web/WebApp.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; @@ -10,9 +10,8 @@ import java.io.PrintWriter; import java.io.StringReader; import java.io.StringWriter; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.ParameterMetaData; @@ -39,9 +38,10 @@ import org.h2.bnf.context.DbContents; import org.h2.bnf.context.DbSchema; import org.h2.bnf.context.DbTableOrView; +import org.h2.command.Parser; import org.h2.engine.Constants; import org.h2.engine.SysProperties; -import org.h2.jdbc.JdbcSQLException; +import org.h2.jdbc.JdbcException; import org.h2.message.DbException; import org.h2.security.SHA256; import org.h2.tools.Backup; @@ -55,14 +55,16 @@ import org.h2.tools.Script; import org.h2.tools.SimpleResultSet; import org.h2.util.JdbcUtils; -import org.h2.util.New; +import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.Profiler; import org.h2.util.ScriptReader; import org.h2.util.SortedProperties; -import org.h2.util.StatementBuilder; import org.h2.util.StringUtils; import org.h2.util.Tool; import org.h2.util.Utils; +import org.h2.util.Utils10; +import org.h2.value.DataType; /** * For each connection to a session, an object of this class is created. @@ -70,6 +72,9 @@ */ public class WebApp { + private static final Comparator SYSTEM_SCHEMA_COMPARATOR = Comparator + .comparing(DbTableOrView::getName, String.CASE_INSENSITIVE_ORDER); + /** * The web server. */ @@ -126,10 +131,10 @@ void setSession(WebSession session, Properties attributes) { * Process an HTTP request. * * @param file the file that was requested - * @param hostAddr the host address + * @param networkConnectionInfo the network connection information * @return the name of the file to return to the client */ - String processRequest(String file, String hostAddr) { + String processRequest(String file, NetworkConnectionInfo networkConnectionInfo) { int index = file.lastIndexOf('.'); String suffix; if (index >= 0) { @@ -151,8 +156,9 @@ String processRequest(String file, String hostAddr) { "jsp".equals(suffix)) { cache = false; mimeType = "text/html"; - if (session == null && !file.startsWith(WebServer.TRANSFER)) { - session = server.createNewSession(hostAddr); + if (session == null) { + session = server.createNewSession( + NetUtils.ipToShortForm(null, networkConnectionInfo.getClientAddr(), false).toString()); if (!"notAllowed.jsp".equals(file)) { file = "index.do"; } @@ -167,7 +173,15 @@ String processRequest(String file, String hostAddr) { trace("mimeType=" + mimeType); trace(file); if (file.endsWith(".do")) { - file = process(file); + file = process(file, networkConnectionInfo); + } else if (file.endsWith(".jsp")) { + switch (file) { + case "admin.jsp": + case "tools.jsp": + if (!checkAdmin(file)) { + file = process("adminLogin.do", networkConnectionInfo); + } + } } return file; } @@ -204,56 +218,98 @@ private static String getComboBox(String[][] elements, String selected) { return buff.toString(); } - private String process(String file) { + private String process(String file, NetworkConnectionInfo networkConnectionInfo) { trace("process " + file); while (file.endsWith(".do")) { - if ("login.do".equals(file)) { - file = login(); - } else if ("index.do".equals(file)) { + switch (file) { + case "login.do": + file = login(networkConnectionInfo); + break; + case "index.do": file = index(); - } else if ("logout.do".equals(file)) { + break; + case "logout.do": file = logout(); - } else if ("settingRemove.do".equals(file)) { + break; + case "settingRemove.do": file = settingRemove(); - } else if ("settingSave.do".equals(file)) { + break; + case "settingSave.do": file = settingSave(); - } else if ("test.do".equals(file)) { - file = test(); - } else if ("query.do".equals(file)) { + break; + case "test.do": + file = test(networkConnectionInfo); + break; + case "query.do": file = query(); - } else if ("tables.do".equals(file)) { + break; + case "tables.do": file = tables(); - } else if ("editResult.do".equals(file)) { + break; + case "editResult.do": file = editResult(); - } else if ("getHistory.do".equals(file)) { + break; + case "getHistory.do": file = getHistory(); - } else if ("admin.do".equals(file)) { - file = admin(); - } else if ("adminSave.do".equals(file)) { - file = adminSave(); - } else if ("adminStartTranslate.do".equals(file)) { - file = adminStartTranslate(); - } else if ("adminShutdown.do".equals(file)) { - file = adminShutdown(); - } else if ("autoCompleteList.do".equals(file)) { + break; + case "admin.do": + file = checkAdmin(file) ? admin() : "adminLogin.do"; + break; + case "adminSave.do": + file = checkAdmin(file) ? adminSave() : "adminLogin.do"; + break; + case "adminStartTranslate.do": + file = checkAdmin(file) ? adminStartTranslate() : "adminLogin.do"; + break; + case "adminShutdown.do": + file = checkAdmin(file) ? adminShutdown() : "adminLogin.do"; + break; + case "autoCompleteList.do": file = autoCompleteList(); - } else if ("tools.do".equals(file)) { - file = tools(); - } else if ("transfer.do".equals(file)) { - file = "transfer.jsp"; - } else { + break; + case "tools.do": + file = checkAdmin(file) ? tools() : "adminLogin.do"; + break; + case "adminLogin.do": + file = adminLogin(); + break; + default: file = "error.jsp"; + break; } } trace("return " + file); return file; } + private boolean checkAdmin(String file) { + Boolean b = (Boolean) session.get("admin"); + if (b != null && b) { + return true; + } + String key = server.getKey(); + if (key != null && key.equals(session.get("key"))) { + return true; + } + session.put("adminBack", file); + return false; + } + + private String adminLogin() { + String password = attributes.getProperty("password"); + if (password == null || password.isEmpty() || !server.checkAdminPassword(password)) { + return "adminLogin.jsp"; + } + String back = (String) session.remove("adminBack"); + session.put("admin", true); + return back != null ? back : "admin.do"; + } + private String autoCompleteList() { String query = (String) attributes.get("query"); boolean lowercase = false; - if (query.trim().length() > 0 && - Character.isLowerCase(query.trim().charAt(0))) { + String tQuery = query.trim(); + if (!tQuery.isEmpty() && Character.isLowerCase(tQuery.charAt(0))) { lowercase = true; } try { @@ -283,7 +339,8 @@ private String autoCompleteList() { while (sql.length() > 0 && sql.charAt(0) <= ' ') { sql = sql.substring(1); } - if (sql.trim().length() > 0 && Character.isLowerCase(sql.trim().charAt(0))) { + String tSql = sql.trim(); + if (!tSql.isEmpty() && Character.isLowerCase(tSql.charAt(0))) { lowercase = true; } Bnf bnf = session.getBnf(); @@ -299,11 +356,11 @@ private String autoCompleteList() { space = " "; } } - ArrayList list = New.arrayList(map.size()); + ArrayList list = new ArrayList<>(map.size()); for (Map.Entry entry : map.entrySet()) { String key = entry.getKey(); String value = entry.getValue(); - String type = "" + key.charAt(0); + String type = String.valueOf(key.charAt(0)); if (Integer.parseInt(type) > 2) { continue; } @@ -316,21 +373,16 @@ private String autoCompleteList() { value = space + value; } key = StringUtils.urlEncode(key); - key = StringUtils.replaceAll(key, "+", " "); + key = key.replace('+', ' '); value = StringUtils.urlEncode(value); - value = StringUtils.replaceAll(value, "+", " "); + value = value.replace('+', ' '); list.add(type + "#" + key + "#" + value); } Collections.sort(list); - if (query.endsWith("\n") || query.trim().endsWith(";")) { + if (query.endsWith("\n") || tQuery.endsWith(";")) { list.add(0, "1#(Newline)#\n"); } - StatementBuilder buff = new StatementBuilder(); - for (String s : list) { - buff.appendExceptFirst("|"); - buff.append(s); - } - result = buff.toString(); + result = String.join("|", list); } session.put("autoCompleteList", result); } catch (Throwable e) { @@ -340,8 +392,9 @@ private String autoCompleteList() { } private String admin() { - session.put("port", "" + server.getPort()); - session.put("allowOthers", "" + server.getAllowOthers()); + session.put("port", Integer.toString(server.getPort())); + session.put("allowOthers", Boolean.toString(server.getAllowOthers())); + session.put("webExternalNames", server.getExternalNames()); session.put("ssl", String.valueOf(server.getSSL())); session.put("sessions", server.getSessions()); return "admin.jsp"; @@ -351,16 +404,21 @@ private String adminSave() { try { Properties prop = new SortedProperties(); int port = Integer.decode((String) attributes.get("port")); - prop.setProperty("webPort", String.valueOf(port)); + prop.setProperty("webPort", Integer.toString(port)); server.setPort(port); - boolean allowOthers = Boolean.parseBoolean( - (String) attributes.get("allowOthers")); + boolean allowOthers = Utils.parseBoolean((String) attributes.get("allowOthers"), false, false); prop.setProperty("webAllowOthers", String.valueOf(allowOthers)); server.setAllowOthers(allowOthers); - boolean ssl = Boolean.parseBoolean( - (String) attributes.get("ssl")); + String externalNames = (String) attributes.get("webExternalNames"); + prop.setProperty("webExternalNames", externalNames); + server.setExternalNames(externalNames); + boolean ssl = Utils.parseBoolean((String) attributes.get("ssl"), false, false); prop.setProperty("webSSL", String.valueOf(ssl)); server.setSSL(ssl); + byte[] adminPassword = server.getAdminPassword(); + if (adminPassword != null) { + prop.setProperty("webAdminPassword", StringUtils.convertBytesToHex(adminPassword)); + } server.saveProperties(prop); } catch (Exception e) { trace(e.toString()); @@ -394,7 +452,7 @@ private String tools() { } else if ("CreateCluster".equals(toolName)) { tool = new CreateCluster(); } else { - throw DbException.throwInternalError(toolName); + throw DbException.getInternalError(toolName); } ByteArrayOutputStream outBuff = new ByteArrayOutputStream(); PrintStream out = new PrintStream(outBuff, false, "UTF-8"); @@ -402,7 +460,7 @@ private String tools() { try { tool.runTool(argList); out.flush(); - String o = new String(outBuff.toByteArray(), Constants.UTF8); + String o = Utils10.byteArrayOutputStreamToString(outBuff, StandardCharsets.UTF_8); String result = PageParser.escapeHtml(o); session.put("toolResult", result); } catch (Exception e) { @@ -480,26 +538,27 @@ private String getHistory() { return "query.jsp"; } - private static int addColumns(boolean mainSchema, DbTableOrView table, - StringBuilder buff, int treeIndex, boolean showColumnTypes, - StringBuilder columnsBuffer) { + private static int addColumns(boolean mainSchema, DbTableOrView table, StringBuilder builder, int treeIndex, + boolean showColumnTypes, StringBuilder columnsBuilder) { DbColumn[] columns = table.getColumns(); for (int i = 0; columns != null && i < columns.length; i++) { DbColumn column = columns[i]; - if (columnsBuffer.length() > 0) { - columnsBuffer.append(' '); + if (columnsBuilder.length() > 0) { + columnsBuilder.append(' '); } - columnsBuffer.append(column.getName()); + columnsBuilder.append(column.getName()); String col = escapeIdentifier(column.getName()); String level = mainSchema ? ", 1, 1" : ", 2, 2"; - buff.append("setNode(" + treeIndex + level + ", 'column', '" + - PageParser.escapeJavaScript(column.getName()) + - "', 'javascript:ins(\\'" + col + "\\')');\n"); + builder.append("setNode(").append(treeIndex).append(level) + .append(", 'column', '") + .append(PageParser.escapeJavaScript(column.getName())) + .append("', 'javascript:ins(\\'").append(col).append("\\')');\n"); treeIndex++; if (mainSchema && showColumnTypes) { - buff.append("setNode(" + treeIndex + ", 2, 2, 'type', '" + - PageParser.escapeJavaScript(column.getDataType()) + - "', null);\n"); + builder.append("setNode(").append(treeIndex) + .append(", 2, 2, 'type', '") + .append(PageParser.escapeJavaScript(column.getDataType())) + .append("', null);\n"); treeIndex++; } } @@ -542,7 +601,7 @@ private static int addIndexes(boolean mainSchema, DatabaseMetaData meta, // SQLite return treeIndex; } - HashMap indexMap = New.hashMap(); + HashMap indexMap = new HashMap<>(); while (rs.next()) { String name = rs.getString("INDEX_NAME"); IndexInfo info = indexMap.get(name); @@ -576,29 +635,30 @@ private static int addIndexes(boolean mainSchema, DatabaseMetaData meta, String level = mainSchema ? ", 1, 1" : ", 2, 1"; String levelIndex = mainSchema ? ", 2, 1" : ", 3, 1"; String levelColumnType = mainSchema ? ", 3, 2" : ", 4, 2"; - buff.append("setNode(" + treeIndex + level + - ", 'index_az', '${text.tree.indexes}', null);\n"); + buff.append("setNode(").append(treeIndex).append(level) + .append(", 'index_az', '${text.tree.indexes}', null);\n"); treeIndex++; for (IndexInfo info : indexMap.values()) { - buff.append("setNode(" + treeIndex + levelIndex + - ", 'index', '" + - PageParser.escapeJavaScript(info.name) + "', null);\n"); + buff.append("setNode(").append(treeIndex).append(levelIndex) + .append(", 'index', '") + .append(PageParser.escapeJavaScript(info.name)) + .append("', null);\n"); treeIndex++; - buff.append("setNode(" + treeIndex + levelColumnType + - ", 'type', '" + info.type + "', null);\n"); + buff.append("setNode(").append(treeIndex).append(levelColumnType) + .append(", 'type', '").append(info.type).append("', null);\n"); treeIndex++; - buff.append("setNode(" + treeIndex + levelColumnType + - ", 'type', '" + - PageParser.escapeJavaScript(info.columns) + - "', null);\n"); + buff.append("setNode(").append(treeIndex).append(levelColumnType) + .append(", 'type', '") + .append(PageParser.escapeJavaScript(info.columns)) + .append("', null);\n"); treeIndex++; } } return treeIndex; } - private int addTablesAndViews(DbSchema schema, boolean mainSchema, - StringBuilder buff, int treeIndex) throws SQLException { + private int addTablesAndViews(DbSchema schema, boolean mainSchema, StringBuilder builder, int treeIndex) + throws SQLException { if (schema == null) { return treeIndex; } @@ -612,80 +672,89 @@ private int addTablesAndViews(DbSchema schema, boolean mainSchema, if (tables == null) { return treeIndex; } - boolean isOracle = schema.getContents().isOracle(); + DbContents contents = schema.getContents(); + boolean isOracle = contents.isOracle(); boolean notManyTables = tables.length < SysProperties.CONSOLE_MAX_TABLES_LIST_INDEXES; - for (DbTableOrView table : tables) { - if (table.isView()) { - continue; - } - int tableId = treeIndex; - String tab = table.getQuotedName(); - if (!mainSchema) { - tab = schema.quotedName + "." + tab; + try (PreparedStatement prep = showColumns ? prepareViewDefinitionQuery(conn, contents) : null) { + if (prep != null) { + prep.setString(1, schema.name); } - tab = escapeIdentifier(tab); - buff.append("setNode(" + treeIndex + indentation + " 'table', '" + - PageParser.escapeJavaScript(table.getName()) + - "', 'javascript:ins(\\'" + tab + "\\',true)');\n"); - treeIndex++; - if (mainSchema || showColumns) { - StringBuilder columnsBuffer = new StringBuilder(); - treeIndex = addColumns(mainSchema, table, buff, treeIndex, - notManyTables, columnsBuffer); - if (!isOracle && notManyTables) { - treeIndex = addIndexes(mainSchema, meta, table.getName(), - schema.name, buff, treeIndex); + if (schema.isSystem) { + Arrays.sort(tables, SYSTEM_SCHEMA_COMPARATOR); + for (DbTableOrView table : tables) { + treeIndex = addTableOrView(schema, mainSchema, builder, treeIndex, meta, false, indentation, + isOracle, notManyTables, table, table.isView(), prep, indentNode); + } + } else { + for (DbTableOrView table : tables) { + if (table.isView()) { + continue; + } + treeIndex = addTableOrView(schema, mainSchema, builder, treeIndex, meta, showColumns, indentation, + isOracle, notManyTables, table, false, null, indentNode); + } + for (DbTableOrView table : tables) { + if (!table.isView()) { + continue; + } + treeIndex = addTableOrView(schema, mainSchema, builder, treeIndex, meta, showColumns, indentation, + isOracle, notManyTables, table, true, prep, indentNode); } - buff.append("addTable('" + - PageParser.escapeJavaScript(table.getName()) + "', '" + - PageParser.escapeJavaScript(columnsBuffer.toString()) + - "', " + tableId + ");\n"); } } - tables = schema.getTables(); - for (DbTableOrView view : tables) { - if (!view.isView()) { - continue; - } - int tableId = treeIndex; - String tab = view.getQuotedName(); - if (!mainSchema) { - tab = view.getSchema().quotedName + "." + tab; + return treeIndex; + } + + private static PreparedStatement prepareViewDefinitionQuery(Connection conn, DbContents contents) { + if (contents.mayHaveStandardViews()) { + try { + return conn.prepareStatement("SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS" + + " WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ?"); + } catch (SQLException e) { + contents.setMayHaveStandardViews(false); } - tab = escapeIdentifier(tab); - buff.append("setNode(" + treeIndex + indentation + " 'view', '" + - PageParser.escapeJavaScript(view.getName()) + - "', 'javascript:ins(\\'" + tab + "\\',true)');\n"); - treeIndex++; - if (mainSchema) { - StringBuilder columnsBuffer = new StringBuilder(); - treeIndex = addColumns(mainSchema, view, buff, - treeIndex, notManyTables, columnsBuffer); - if (schema.getContents().isH2()) { - PreparedStatement prep = null; - try { - prep = conn.prepareStatement("SELECT * FROM " + - "INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=?"); - prep.setString(1, view.getName()); - ResultSet rs = prep.executeQuery(); + } + return null; + } + + private static int addTableOrView(DbSchema schema, boolean mainSchema, StringBuilder builder, int treeIndex, + DatabaseMetaData meta, boolean showColumns, String indentation, boolean isOracle, boolean notManyTables, + DbTableOrView table, boolean isView, PreparedStatement prep, String indentNode) throws SQLException { + int tableId = treeIndex; + String tab = table.getQuotedName(); + if (!mainSchema) { + tab = schema.quotedName + '.' + tab; + } + tab = escapeIdentifier(tab); + builder.append("setNode(").append(treeIndex).append(indentation) + .append(" '").append(isView ? "view" : "table").append("', '") + .append(PageParser.escapeJavaScript(table.getName())) + .append("', 'javascript:ins(\\'").append(tab).append("\\',true)');\n"); + treeIndex++; + if (showColumns) { + StringBuilder columnsBuilder = new StringBuilder(); + treeIndex = addColumns(mainSchema, table, builder, treeIndex, notManyTables, columnsBuilder); + if (isView) { + if (prep != null) { + prep.setString(2, table.getName()); + try (ResultSet rs = prep.executeQuery()) { if (rs.next()) { - String sql = rs.getString("SQL"); - buff.append("setNode(" + treeIndex + indentNode + - " 'type', '" + - PageParser.escapeJavaScript(sql) + - "', null);\n"); - treeIndex++; + String sql = rs.getString(1); + if (sql != null) { + builder.append("setNode(").append(treeIndex).append(indentNode).append(" 'type', '") + .append(PageParser.escapeJavaScript(sql)).append("', null);\n"); + treeIndex++; + } } - rs.close(); - } finally { - JdbcUtils.closeSilently(prep); } } - buff.append("addTable('" + - PageParser.escapeJavaScript(view.getName()) + "', '" + - PageParser.escapeJavaScript(columnsBuffer.toString()) + - "', " + tableId + ");\n"); + } else if (!isOracle && notManyTables) { + treeIndex = addIndexes(mainSchema, meta, table.getName(), schema.name, builder, treeIndex); } + builder.append("addTable('") + .append(PageParser.escapeJavaScript(table.getName())).append("', '") + .append(PageParser.escapeJavaScript(columnsBuilder.toString())).append("', ") + .append(tableId).append(");\n"); } return treeIndex; } @@ -700,9 +769,10 @@ private String tables() { session.loadBnf(); isH2 = contents.isH2(); - StringBuilder buff = new StringBuilder(); - buff.append("setNode(0, 0, 0, 'database', '" + PageParser.escapeJavaScript(url) - + "', null);\n"); + StringBuilder buff = new StringBuilder() + .append("setNode(0, 0, 0, 'database', '") + .append(PageParser.escapeJavaScript(url)) + .append("', null);\n"); int treeIndex = 1; DbSchema defaultSchema = contents.getDefaultSchema(); @@ -712,79 +782,86 @@ private String tables() { if (schema == defaultSchema || schema == null) { continue; } - buff.append("setNode(" + treeIndex + ", 0, 1, 'folder', '" + - PageParser.escapeJavaScript(schema.name) + - "', null);\n"); + buff.append("setNode(").append(treeIndex).append(", 0, 1, 'folder', '") + .append(PageParser.escapeJavaScript(schema.name)) + .append("', null);\n"); treeIndex++; treeIndex = addTablesAndViews(schema, false, buff, treeIndex); } if (isH2) { - Statement stat = null; - try { - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM " + - "INFORMATION_SCHEMA.SEQUENCES ORDER BY SEQUENCE_NAME"); + try (Statement stat = conn.createStatement()) { + ResultSet rs; + try { + rs = stat.executeQuery("SELECT SEQUENCE_NAME, BASE_VALUE, INCREMENT FROM " + + "INFORMATION_SCHEMA.SEQUENCES ORDER BY SEQUENCE_NAME"); + } catch (SQLException e) { + rs = stat.executeQuery("SELECT SEQUENCE_NAME, CURRENT_VALUE, INCREMENT FROM " + + "INFORMATION_SCHEMA.SEQUENCES ORDER BY SEQUENCE_NAME"); + } for (int i = 0; rs.next(); i++) { if (i == 0) { - buff.append("setNode(" + treeIndex + - ", 0, 1, 'sequences', '${text.tree.sequences}', null);\n"); + buff.append("setNode(").append(treeIndex) + .append(", 0, 1, 'sequences', '${text.tree.sequences}', null);\n"); treeIndex++; } - String name = rs.getString("SEQUENCE_NAME"); - String current = rs.getString("CURRENT_VALUE"); - String increment = rs.getString("INCREMENT"); - buff.append("setNode(" + treeIndex + - ", 1, 1, 'sequence', '" + - PageParser.escapeJavaScript(name) + - "', null);\n"); + String name = rs.getString(1); + String currentBase = rs.getString(2); + String increment = rs.getString(3); + buff.append("setNode(").append(treeIndex) + .append(", 1, 1, 'sequence', '") + .append(PageParser.escapeJavaScript(name)) + .append("', null);\n"); treeIndex++; - buff.append("setNode(" + treeIndex + - ", 2, 2, 'type', '${text.tree.current}: " + - PageParser.escapeJavaScript(current) + - "', null);\n"); + buff.append("setNode(").append(treeIndex) + .append(", 2, 2, 'type', '${text.tree.current}: ") + .append(PageParser.escapeJavaScript(currentBase)) + .append("', null);\n"); treeIndex++; if (!"1".equals(increment)) { - buff.append("setNode(" + - treeIndex + - ", 2, 2, 'type', '${text.tree.increment}: " + - PageParser.escapeJavaScript(increment) + - "', null);\n"); + buff.append("setNode(").append(treeIndex) + .append(", 2, 2, 'type', '${text.tree.increment}: ") + .append(PageParser.escapeJavaScript(increment)) + .append("', null);\n"); treeIndex++; } } rs.close(); - rs = stat.executeQuery("SELECT * FROM " + - "INFORMATION_SCHEMA.USERS ORDER BY NAME"); + try { + rs = stat.executeQuery( + "SELECT USER_NAME, IS_ADMIN FROM INFORMATION_SCHEMA.USERS ORDER BY USER_NAME"); + } catch (SQLException e) { + rs = stat.executeQuery("SELECT NAME, ADMIN FROM INFORMATION_SCHEMA.USERS ORDER BY NAME"); + } for (int i = 0; rs.next(); i++) { if (i == 0) { - buff.append("setNode(" + treeIndex + - ", 0, 1, 'users', '${text.tree.users}', null);\n"); + buff.append("setNode(").append(treeIndex) + .append(", 0, 1, 'users', '${text.tree.users}', null);\n"); treeIndex++; } - String name = rs.getString("NAME"); - String admin = rs.getString("ADMIN"); - buff.append("setNode(" + treeIndex + - ", 1, 1, 'user', '" + - PageParser.escapeJavaScript(name) + - "', null);\n"); + String name = rs.getString(1); + String admin = rs.getString(2); + buff.append("setNode(").append(treeIndex) + .append(", 1, 1, 'user', '") + .append(PageParser.escapeJavaScript(name)) + .append("', null);\n"); treeIndex++; if (admin.equalsIgnoreCase("TRUE")) { - buff.append("setNode(" + treeIndex + - ", 2, 2, 'type', '${text.tree.admin}', null);\n"); + buff.append("setNode(").append(treeIndex) + .append(", 2, 2, 'type', '${text.tree.admin}', null);\n"); treeIndex++; } } rs.close(); - } finally { - JdbcUtils.closeSilently(stat); } } DatabaseMetaData meta = session.getMetaData(); String version = meta.getDatabaseProductName() + " " + meta.getDatabaseProductVersion(); - buff.append("setNode(" + treeIndex + ", 0, 0, 'info', '" + - PageParser.escapeJavaScript(version) + "', null);\n"); - buff.append("refreshQueryTables();"); + buff.append("setNode(").append(treeIndex) + .append(", 0, 0, 'info', '") + .append(PageParser.escapeJavaScript(version)) + .append("', null);\n") + .append("refreshQueryTables();"); session.put("tree", buff.toString()); } catch (Exception e) { session.put("tree", ""); @@ -814,8 +891,8 @@ private String getStackTrace(int id, Throwable e, boolean isH2) { error += " " + se.getSQLState() + "/" + se.getErrorCode(); if (isH2) { int code = se.getErrorCode(); - error += " (${text.a.help})"; } } @@ -833,14 +910,14 @@ private static String linkToSource(String s) { try { StringBuilder result = new StringBuilder(s.length()); int idx = s.indexOf("
    "); - result.append(s.substring(0, idx)); + result.append(s, 0, idx); while (true) { int start = s.indexOf("org.h2.", idx); if (start < 0) { result.append(s.substring(idx)); break; } - result.append(s.substring(idx, start)); + result.append(s, idx, start); int end = s.indexOf(')', start); if (end < 0) { result.append(s.substring(idx)); @@ -855,7 +932,7 @@ private static String linkToSource(String s) { String file = element.substring(open + 1, colon); String lineNumber = element.substring(colon + 1, element.length()); String fullFileName = packageName.replace('.', '/') + "/" + file; - result.append("" + s + ""; } - private String test() { + private String test(NetworkConnectionInfo networkConnectionInfo) { String driver = attributes.getProperty("driver", ""); String url = attributes.getProperty("url", ""); String user = attributes.getProperty("user", ""); @@ -892,7 +969,7 @@ private String test() { prof.startCollecting(); Connection conn; try { - conn = server.getConnection(driver, url, user, password); + conn = server.getConnection(driver, url, user, password, null, networkConnectionInfo); } finally { prof.stopCollecting(); profOpen = prof.getTop(3); @@ -918,7 +995,7 @@ private String test() { PageParser.escapeHtml(profClose) + ""; } else { - success = "${text.login.testSuccessful}"; + success = "
    ${text.login.testSuccessful}
    "; } session.put("error", success); // session.put("error", "${text.login.testSuccessful}"); @@ -937,14 +1014,13 @@ private String test() { * @return the formatted error message */ private String getLoginError(Exception e, boolean isH2) { - if (e instanceof JdbcSQLException && - ((JdbcSQLException) e).getErrorCode() == ErrorCode.CLASS_NOT_FOUND_1) { + if (e instanceof JdbcException && ((JdbcException) e).getErrorCode() == ErrorCode.CLASS_NOT_FOUND_1) { return "${text.login.driverNotFound}
    " + getStackTrace(0, e, isH2); } return getStackTrace(0, e, isH2); } - private String login() { + private String login(NetworkConnectionInfo networkConnectionInfo) { String driver = attributes.getProperty("driver", ""); String url = attributes.getProperty("url", ""); String user = attributes.getProperty("user", ""); @@ -954,7 +1030,8 @@ private String login() { session.put("maxrows", "1000"); boolean isH2 = url.startsWith("jdbc:h2:"); try { - Connection conn = server.getConnection(driver, url, user, password); + Connection conn = server.getConnection(driver, url, user, password, (String) session.get("key"), + networkConnectionInfo); session.setConnection(conn); session.put("url", url); session.put("user", user); @@ -986,6 +1063,7 @@ private String logout() { } catch (Exception e) { trace(e.toString()); } + session.remove("admin"); return "index.do"; } @@ -993,7 +1071,7 @@ private String query() { String sql = attributes.getProperty("sql").trim(); try { ScriptReader r = new ScriptReader(new StringReader(sql)); - final ArrayList list = New.arrayList(); + final ArrayList list = new ArrayList<>(); while (true) { String s = r.readStatement(); if (s == null) { @@ -1003,7 +1081,7 @@ private String query() { } final Connection conn = session.getConnection(); if (SysProperties.CONSOLE_STREAM && server.getAllowChunked()) { - String page = new String(server.getFile("result.jsp"), Constants.UTF8); + String page = new String(server.getFile("result.jsp"), StandardCharsets.UTF_8); int idx = page.indexOf("${result}"); // the first element of the list is the header, the last the // footer @@ -1025,10 +1103,6 @@ public String next() { query(conn, s, i - 1, list.size() - 2, b); return b.toString(); } - @Override - public void remove() { - throw new UnsupportedOperationException(); - } }); return "result.jsp"; } @@ -1103,161 +1177,9 @@ private String editResult() { return "result.jsp"; } - private ResultSet getMetaResultSet(Connection conn, String sql) - throws SQLException { - DatabaseMetaData meta = conn.getMetaData(); - if (isBuiltIn(sql, "@best_row_identifier")) { - String[] p = split(sql); - int scale = p[4] == null ? 0 : Integer.parseInt(p[4]); - boolean nullable = p[5] == null ? false : Boolean.parseBoolean(p[5]); - return meta.getBestRowIdentifier(p[1], p[2], p[3], scale, nullable); - } else if (isBuiltIn(sql, "@catalogs")) { - return meta.getCatalogs(); - } else if (isBuiltIn(sql, "@columns")) { - String[] p = split(sql); - return meta.getColumns(p[1], p[2], p[3], p[4]); - } else if (isBuiltIn(sql, "@column_privileges")) { - String[] p = split(sql); - return meta.getColumnPrivileges(p[1], p[2], p[3], p[4]); - } else if (isBuiltIn(sql, "@cross_references")) { - String[] p = split(sql); - return meta.getCrossReference(p[1], p[2], p[3], p[4], p[5], p[6]); - } else if (isBuiltIn(sql, "@exported_keys")) { - String[] p = split(sql); - return meta.getExportedKeys(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@imported_keys")) { - String[] p = split(sql); - return meta.getImportedKeys(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@index_info")) { - String[] p = split(sql); - boolean unique = p[4] == null ? false : Boolean.parseBoolean(p[4]); - boolean approx = p[5] == null ? false : Boolean.parseBoolean(p[5]); - return meta.getIndexInfo(p[1], p[2], p[3], unique, approx); - } else if (isBuiltIn(sql, "@primary_keys")) { - String[] p = split(sql); - return meta.getPrimaryKeys(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@procedures")) { - String[] p = split(sql); - return meta.getProcedures(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@procedure_columns")) { - String[] p = split(sql); - return meta.getProcedureColumns(p[1], p[2], p[3], p[4]); - } else if (isBuiltIn(sql, "@schemas")) { - return meta.getSchemas(); - } else if (isBuiltIn(sql, "@tables")) { - String[] p = split(sql); - String[] types = p[4] == null ? null : StringUtils.arraySplit(p[4], ',', false); - return meta.getTables(p[1], p[2], p[3], types); - } else if (isBuiltIn(sql, "@table_privileges")) { - String[] p = split(sql); - return meta.getTablePrivileges(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@table_types")) { - return meta.getTableTypes(); - } else if (isBuiltIn(sql, "@type_info")) { - return meta.getTypeInfo(); - } else if (isBuiltIn(sql, "@udts")) { - String[] p = split(sql); - int[] types; - if (p[4] == null) { - types = null; - } else { - String[] t = StringUtils.arraySplit(p[4], ',', false); - types = new int[t.length]; - for (int i = 0; i < t.length; i++) { - types[i] = Integer.parseInt(t[i]); - } - } - return meta.getUDTs(p[1], p[2], p[3], types); - } else if (isBuiltIn(sql, "@version_columns")) { - String[] p = split(sql); - return meta.getVersionColumns(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@memory")) { - SimpleResultSet rs = new SimpleResultSet(); - rs.addColumn("Type", Types.VARCHAR, 0, 0); - rs.addColumn("KB", Types.VARCHAR, 0, 0); - rs.addRow("Used Memory", "" + Utils.getMemoryUsed()); - rs.addRow("Free Memory", "" + Utils.getMemoryFree()); - return rs; - } else if (isBuiltIn(sql, "@info")) { - SimpleResultSet rs = new SimpleResultSet(); - rs.addColumn("KEY", Types.VARCHAR, 0, 0); - rs.addColumn("VALUE", Types.VARCHAR, 0, 0); - rs.addRow("conn.getCatalog", conn.getCatalog()); - rs.addRow("conn.getAutoCommit", "" + conn.getAutoCommit()); - rs.addRow("conn.getTransactionIsolation", "" + conn.getTransactionIsolation()); - rs.addRow("conn.getWarnings", "" + conn.getWarnings()); - String map; - try { - map = "" + conn.getTypeMap(); - } catch (SQLException e) { - map = e.toString(); - } - rs.addRow("conn.getTypeMap", "" + map); - rs.addRow("conn.isReadOnly", "" + conn.isReadOnly()); - rs.addRow("conn.getHoldability", "" + conn.getHoldability()); - addDatabaseMetaData(rs, meta); - return rs; - } else if (isBuiltIn(sql, "@attributes")) { - String[] p = split(sql); - return meta.getAttributes(p[1], p[2], p[3], p[4]); - } else if (isBuiltIn(sql, "@super_tables")) { - String[] p = split(sql); - return meta.getSuperTables(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@super_types")) { - String[] p = split(sql); - return meta.getSuperTypes(p[1], p[2], p[3]); - } else if (isBuiltIn(sql, "@prof_stop")) { - if (profiler != null) { - profiler.stopCollecting(); - SimpleResultSet rs = new SimpleResultSet(); - rs.addColumn("Top Stack Trace(s)", Types.VARCHAR, 0, 0); - rs.addRow(profiler.getTop(3)); - profiler = null; - return rs; - } - } - return null; - } - - private static void addDatabaseMetaData(SimpleResultSet rs, - DatabaseMetaData meta) { - Method[] methods = DatabaseMetaData.class.getDeclaredMethods(); - Arrays.sort(methods, new Comparator() { - @Override - public int compare(Method o1, Method o2) { - return o1.toString().compareTo(o2.toString()); - } - }); - for (Method m : methods) { - if (m.getParameterTypes().length == 0) { - try { - Object o = m.invoke(meta); - rs.addRow("meta." + m.getName(), "" + o); - } catch (InvocationTargetException e) { - rs.addRow("meta." + m.getName(), e.getTargetException().toString()); - } catch (Exception e) { - rs.addRow("meta." + m.getName(), e.toString()); - } - } - } - } - - private static String[] split(String s) { - String[] list = new String[10]; - String[] t = StringUtils.arraySplit(s, ' ', true); - System.arraycopy(t, 0, list, 0, t.length); - for (int i = 0; i < list.length; i++) { - if ("null".equals(list[i])) { - list[i] = null; - } - } - return list; - } - private int getMaxrows() { String r = (String) session.get("maxrows"); - int maxrows = r == null ? 0 : Integer.parseInt(r); - return maxrows; + return r == null ? 0 : Integer.parseInt(r); } private String getResult(Connection conn, int id, String sql, @@ -1271,9 +1193,8 @@ private String getResult(Connection conn, int id, String sql, sqlUpper.contains("ALTER") || sqlUpper.contains("RUNSCRIPT")) { String sessionId = attributes.getProperty("jsessionid"); - buff.append(""); + buff.append(""); } Statement stat; DbContents contents = session.getContents(); @@ -1287,16 +1208,16 @@ private String getResult(Connection conn, int id, String sql, ResultSet rs; long time = System.currentTimeMillis(); boolean metadata = false; - boolean generatedKeys = false; + Object generatedKeys = null; boolean edit = false; boolean list = false; - if (isBuiltIn(sql, "@autocommit_true")) { + if (JdbcUtils.isBuiltIn(sql, "@autocommit_true")) { conn.setAutoCommit(true); return "${text.result.autoCommitOn}"; - } else if (isBuiltIn(sql, "@autocommit_false")) { + } else if (JdbcUtils.isBuiltIn(sql, "@autocommit_false")) { conn.setAutoCommit(false); return "${text.result.autoCommitOff}"; - } else if (isBuiltIn(sql, "@cancel")) { + } else if (JdbcUtils.isBuiltIn(sql, "@cancel")) { stat = session.executingStatement; if (stat != null) { stat.cancel(); @@ -1305,96 +1226,138 @@ private String getResult(Connection conn, int id, String sql, buff.append("${text.result.noRunningStatement}"); } return buff.toString(); - } else if (isBuiltIn(sql, "@edit")) { + } else if (JdbcUtils.isBuiltIn(sql, "@edit")) { edit = true; - sql = sql.substring("@edit".length()).trim(); + sql = StringUtils.trimSubstring(sql, "@edit".length()); session.put("resultSetSQL", sql); } - if (isBuiltIn(sql, "@list")) { + if (JdbcUtils.isBuiltIn(sql, "@list")) { list = true; - sql = sql.substring("@list".length()).trim(); + sql = StringUtils.trimSubstring(sql, "@list".length()); } - if (isBuiltIn(sql, "@meta")) { + if (JdbcUtils.isBuiltIn(sql, "@meta")) { metadata = true; - sql = sql.substring("@meta".length()).trim(); + sql = StringUtils.trimSubstring(sql, "@meta".length()); } - if (isBuiltIn(sql, "@generated")) { + if (JdbcUtils.isBuiltIn(sql, "@generated")) { generatedKeys = true; - sql = sql.substring("@generated".length()).trim(); - } else if (isBuiltIn(sql, "@history")) { + int offset = "@generated".length(); + int length = sql.length(); + for (; offset < length; offset++) { + char c = sql.charAt(offset); + if (c == '(') { + Parser p = new Parser(); + generatedKeys = p.parseColumnList(sql, offset); + offset = p.getLastParseIndex(); + break; + } + if (!Character.isWhitespace(c)) { + break; + } + } + sql = StringUtils.trimSubstring(sql, offset); + } else if (JdbcUtils.isBuiltIn(sql, "@history")) { buff.append(getCommandHistoryString()); return buff.toString(); - } else if (isBuiltIn(sql, "@loop")) { - sql = sql.substring("@loop".length()).trim(); + } else if (JdbcUtils.isBuiltIn(sql, "@loop")) { + sql = StringUtils.trimSubstring(sql, "@loop".length()); int idx = sql.indexOf(' '); int count = Integer.decode(sql.substring(0, idx)); - sql = sql.substring(idx).trim(); + sql = StringUtils.trimSubstring(sql, idx); return executeLoop(conn, count, sql); - } else if (isBuiltIn(sql, "@maxrows")) { - int maxrows = (int) Double.parseDouble( - sql.substring("@maxrows".length()).trim()); - session.put("maxrows", "" + maxrows); + } else if (JdbcUtils.isBuiltIn(sql, "@maxrows")) { + int maxrows = (int) Double.parseDouble(StringUtils.trimSubstring(sql, "@maxrows".length())); + session.put("maxrows", Integer.toString(maxrows)); return "${text.result.maxrowsSet}"; - } else if (isBuiltIn(sql, "@parameter_meta")) { - sql = sql.substring("@parameter_meta".length()).trim(); + } else if (JdbcUtils.isBuiltIn(sql, "@parameter_meta")) { + sql = StringUtils.trimSubstring(sql, "@parameter_meta".length()); PreparedStatement prep = conn.prepareStatement(sql); buff.append(getParameterResultSet(prep.getParameterMetaData())); return buff.toString(); - } else if (isBuiltIn(sql, "@password_hash")) { - sql = sql.substring("@password_hash".length()).trim(); - String[] p = split(sql); + } else if (JdbcUtils.isBuiltIn(sql, "@password_hash")) { + sql = StringUtils.trimSubstring(sql, "@password_hash".length()); + String[] p = JdbcUtils.split(sql); return StringUtils.convertBytesToHex( SHA256.getKeyPasswordHash(p[0], p[1].toCharArray())); - } else if (isBuiltIn(sql, "@prof_start")) { + } else if (JdbcUtils.isBuiltIn(sql, "@prof_start")) { if (profiler != null) { profiler.stopCollecting(); } profiler = new Profiler(); profiler.startCollecting(); return "Ok"; - } else if (isBuiltIn(sql, "@sleep")) { - String s = sql.substring("@sleep".length()).trim(); + } else if (JdbcUtils.isBuiltIn(sql, "@sleep")) { + String s = StringUtils.trimSubstring(sql, "@sleep".length()); int sleep = 1; if (s.length() > 0) { sleep = Integer.parseInt(s); } Thread.sleep(sleep * 1000); return "Ok"; - } else if (isBuiltIn(sql, "@transaction_isolation")) { - String s = sql.substring("@transaction_isolation".length()).trim(); + } else if (JdbcUtils.isBuiltIn(sql, "@transaction_isolation")) { + String s = StringUtils.trimSubstring(sql, "@transaction_isolation".length()); if (s.length() > 0) { int level = Integer.parseInt(s); conn.setTransactionIsolation(level); } - buff.append("Transaction Isolation: " + - conn.getTransactionIsolation() + "
    "); - buff.append(Connection.TRANSACTION_READ_UNCOMMITTED + - ": read_uncommitted
    "); - buff.append(Connection.TRANSACTION_READ_COMMITTED + - ": read_committed
    "); - buff.append(Connection.TRANSACTION_REPEATABLE_READ + - ": repeatable_read
    "); - buff.append(Connection.TRANSACTION_SERIALIZABLE + - ": serializable"); + buff.append("Transaction Isolation: ") + .append(conn.getTransactionIsolation()) + .append("
    "); + buff.append(Connection.TRANSACTION_READ_UNCOMMITTED) + .append(": read_uncommitted
    "); + buff.append(Connection.TRANSACTION_READ_COMMITTED) + .append(": read_committed
    "); + buff.append(Connection.TRANSACTION_REPEATABLE_READ) + .append(": repeatable_read
    "); + buff.append(Constants.TRANSACTION_SNAPSHOT) + .append(": snapshot
    "); + buff.append(Connection.TRANSACTION_SERIALIZABLE) + .append(": serializable"); } if (sql.startsWith("@")) { - rs = getMetaResultSet(conn, sql); + rs = JdbcUtils.getMetaResultSet(conn, sql); + if (rs == null && JdbcUtils.isBuiltIn(sql, "@prof_stop")) { + if (profiler != null) { + profiler.stopCollecting(); + SimpleResultSet simple = new SimpleResultSet(); + simple.addColumn("Top Stack Trace(s)", Types.VARCHAR, 0, 0); + simple.addRow(profiler.getTop(3)); + rs = simple; + profiler = null; + } + } if (rs == null) { - buff.append("?: " + sql); + buff.append("?: ").append(sql); return buff.toString(); } } else { int maxrows = getMaxrows(); stat.setMaxRows(maxrows); session.executingStatement = stat; - boolean isResultSet = stat.execute(sql); + boolean isResultSet; + if (generatedKeys == null) { + isResultSet = stat.execute(sql); + } else if (generatedKeys instanceof Boolean) { + isResultSet = stat.execute(sql, + ((Boolean) generatedKeys) ? Statement.RETURN_GENERATED_KEYS : Statement.NO_GENERATED_KEYS); + } else if (generatedKeys instanceof String[]) { + isResultSet = stat.execute(sql, (String[]) generatedKeys); + } else { + isResultSet = stat.execute(sql, (int[]) generatedKeys); + } session.addCommand(sql); - if (generatedKeys) { + if (generatedKeys != null) { rs = null; rs = stat.getGeneratedKeys(); } else { if (!isResultSet) { - buff.append("${text.result.updateCount}: " + stat.getUpdateCount()); + long updateCount; + try { + updateCount = stat.getLargeUpdateCount(); + } catch (UnsupportedOperationException e) { + updateCount = stat.getUpdateCount(); + } + buff.append("${text.result.updateCount}: ").append(updateCount); time = System.currentTimeMillis() - time; buff.append("
    (").append(time).append(" ms)"); stat.close(); @@ -1422,20 +1385,16 @@ private String getResult(Connection conn, int id, String sql, } } - private static boolean isBuiltIn(String sql, String builtIn) { - return StringUtils.startsWithIgnoreCase(sql, builtIn); - } - private String executeLoop(Connection conn, int count, String sql) throws SQLException { - ArrayList params = New.arrayList(); + ArrayList params = new ArrayList<>(); int idx = 0; while (!stop) { idx = sql.indexOf('?', idx); if (idx < 0) { break; } - if (isBuiltIn(sql.substring(idx), "?/*rnd*/")) { + if (JdbcUtils.isBuiltIn(sql.substring(idx), "?/*rnd*/")) { params.add(1); sql = sql.substring(0, idx) + "?" + sql.substring(idx + "/*rnd*/".length() + 1); } else { @@ -1446,15 +1405,15 @@ private String executeLoop(Connection conn, int count, String sql) boolean prepared; Random random = new Random(1); long time = System.currentTimeMillis(); - if (isBuiltIn(sql, "@statement")) { - sql = sql.substring("@statement".length()).trim(); + if (JdbcUtils.isBuiltIn(sql, "@statement")) { + sql = StringUtils.trimSubstring(sql, "@statement".length()); prepared = false; Statement stat = conn.createStatement(); for (int i = 0; !stop && i < count; i++) { String s = sql; for (Integer type : params) { idx = s.indexOf('?'); - if (type.intValue() == 1) { + if (type == 1) { s = s.substring(0, idx) + random.nextInt(count) + s.substring(idx + 1); } else { s = s.substring(0, idx) + i + s.substring(idx + 1); @@ -1474,7 +1433,7 @@ private String executeLoop(Connection conn, int count, String sql) for (int i = 0; !stop && i < count; i++) { for (int j = 0; j < params.size(); j++) { Integer type = params.get(j); - if (type.intValue() == 1) { + if (type == 1) { prep.setInt(j + 1, random.nextInt(count)); } else { prep.setInt(j + 1, i); @@ -1495,19 +1454,15 @@ private String executeLoop(Connection conn, int count, String sql) } } time = System.currentTimeMillis() - time; - StatementBuilder buff = new StatementBuilder(); - buff.append(time).append(" ms: ").append(count).append(" * "); - if (prepared) { - buff.append("(Prepared) "); - } else { - buff.append("(Statement) "); - } - buff.append('('); - for (int p : params) { - buff.appendExceptFirst(", "); - buff.append(p == 0 ? "i" : "rnd"); + StringBuilder builder = new StringBuilder().append(time).append(" ms: ").append(count).append(" * ") + .append(prepared ? "(Prepared) " : "(Statement) ").append('('); + for (int i = 0, size = params.size(); i < size; i++) { + if (i > 0) { + builder.append(", "); + } + builder.append(params.get(i) == 0 ? "i" : "rnd"); } - return buff.append(") ").append(sql).toString(); + return builder.append(") ").append(sql).toString(); } private String getCommandHistoryString() { @@ -1573,9 +1528,9 @@ private String getResultSet(String sql, ResultSet rs, boolean metadata, "id=\"mainForm\" target=\"h2result\">" + "" + "" + - "
    "); + "
    "); } else { - buff.append("
    "); + buff.append("
    "); } if (metadata) { SimpleResultSet r = new SimpleResultSet(); @@ -1674,10 +1629,11 @@ private String getResultSet(String sql, ResultSet rs, boolean metadata, "onmouseout = \"this.className ='icon'\" " + "class=\"icon\" alt=\"${text.resultEdit.edit}\" " + "title=\"${text.resultEdit.edit}\" border=\"1\"/>"). - append("" + - "\"${text.resultEdit.delete}\"null"; + } else if (d.length > 50_000) { + return "
    =+
    " + StringUtils.convertBytesToHex(d, 3) + "... (" + + d.length + " ${text.result.bytes})"; + } + return StringUtils.convertBytesToHex(d); + } String d = rs.getString(columnIndex); if (d == null) { return "null"; - } else if (d.length() > 100000) { - String s; - if (isBinary(rs.getMetaData().getColumnType(columnIndex))) { - s = PageParser.escapeHtml(d.substring(0, 6)) + - "... (" + (d.length() / 2) + " ${text.result.bytes})"; - } else { - s = PageParser.escapeHtml(d.substring(0, 100)) + - "... (" + d.length() + " ${text.result.characters})"; - } - return "
    =+
    " + s; + } else if (d.length() > 100_000) { + return "
    =+
    " + PageParser.escapeHtml(d.substring(0, 100)) + "... (" + + d.length() + " ${text.result.characters})"; } else if (d.equals("null") || d.startsWith("= ") || d.startsWith("=+")) { return "
    =
    " + PageParser.escapeHtml(d); } else if (d.equals("")) { @@ -1792,19 +1752,6 @@ private static String escapeData(ResultSet rs, int columnIndex) return PageParser.escapeHtml(d); } - private static boolean isBinary(int sqlType) { - switch (sqlType) { - case Types.BINARY: - case Types.BLOB: - case Types.JAVA_OBJECT: - case Types.LONGVARBINARY: - case Types.OTHER: - case Types.VARBINARY: - return true; - } - return false; - } - private void unescapeData(String x, ResultSet rs, int columnIndex) throws SQLException { if (x.equals("null")) { @@ -1833,6 +1780,10 @@ private void unescapeData(String x, ResultSet rs, int columnIndex) x = x.substring(2); } ResultSetMetaData meta = rs.getMetaData(); + if (DataType.isBinaryColumn(meta, columnIndex)) { + rs.updateBytes(columnIndex, StringUtils.convertHexToBytes(x)); + return; + } int type = meta.getColumnType(columnIndex); if (session.getContents().isH2()) { rs.updateString(columnIndex, x); @@ -1867,7 +1818,7 @@ private String settingRemove() { String setting = attributes.getProperty("name", ""); server.removeSetting(setting); ArrayList settings = server.getSettings(); - if (settings.size() > 0) { + if (!settings.isEmpty()) { attributes.put("setting", settings.get(0)); } server.saveProperties(null); diff --git a/h2/src/main/org/h2/server/web/WebServer.java b/h2/src/main/org/h2/server/web/WebServer.java index 0e339db06e..73d17644da 100644 --- a/h2/src/main/org/h2/server/web/WebServer.java +++ b/h2/src/main/org/h2/server/web/WebServer.java @@ -1,21 +1,26 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; -import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.ServerSocket; import java.net.Socket; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.sql.Connection; import java.sql.SQLException; -import java.text.SimpleDateFormat; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -24,19 +29,18 @@ import java.util.Map.Entry; import java.util.Properties; import java.util.Set; -import java.util.TimeZone; import org.h2.engine.Constants; import org.h2.engine.SysProperties; import org.h2.message.DbException; +import org.h2.security.SHA256; import org.h2.server.Service; import org.h2.server.ShutdownHandler; import org.h2.store.fs.FileUtils; -import org.h2.util.IOUtils; import org.h2.util.JdbcUtils; import org.h2.util.MathUtils; import org.h2.util.NetUtils; -import org.h2.util.New; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.SortedProperties; import org.h2.util.StringUtils; import org.h2.util.Tool; @@ -48,14 +52,13 @@ */ public class WebServer implements Service { - static final String TRANSFER = "transfer"; - static final String[][] LANGUAGES = { { "cs", "\u010ce\u0161tina" }, { "de", "Deutsch" }, { "en", "English" }, { "es", "Espa\u00f1ol" }, { "fr", "Fran\u00e7ais" }, + { "hi", "Hindi \u0939\u093f\u0902\u0926\u0940" }, { "hu", "Magyar"}, { "ko", "\ud55c\uad6d\uc5b4"}, { "in", "Indonesia"}, @@ -80,11 +83,25 @@ public class WebServer implements Service { private static final String[] GENERIC = { "Generic JNDI Data Source|javax.naming.InitialContext|" + "java:comp/env/jdbc/Test|sa", + "Generic Teradata|com.teradata.jdbc.TeraDriver|" + + "jdbc:teradata://whomooz/|", + "Generic Snowflake|com.snowflake.client.jdbc.SnowflakeDriver|" + + "jdbc:snowflake://accountName.snowflakecomputing.com|", + "Generic Redshift|com.amazon.redshift.jdbc42.Driver|" + + "jdbc:redshift://endpoint:5439/database|", + "Generic Impala|org.cloudera.impala.jdbc41.Driver|" + + "jdbc:impala://clustername:21050/default|", + "Generic Hive 2|org.apache.hive.jdbc.HiveDriver|" + + "jdbc:hive2://clustername:10000/default|", + "Generic Hive|org.apache.hadoop.hive.jdbc.HiveDriver|" + + "jdbc:hive://clustername:10000/default|", + "Generic Azure SQL|com.microsoft.sqlserver.jdbc.SQLServerDriver|" + + "jdbc:sqlserver://name.database.windows.net:1433|", "Generic Firebird Server|org.firebirdsql.jdbc.FBDriver|" + "jdbc:firebirdsql:localhost:c:/temp/firebird/test|sysdba", "Generic SQLite|org.sqlite.JDBC|" + "jdbc:sqlite:test|sa", - "Generic DB2|COM.ibm.db2.jdbc.net.DB2Driver|" + + "Generic DB2|com.ibm.db2.jcc.DB2Driver|" + "jdbc:db2://localhost/test|" , "Generic Oracle|oracle.jdbc.driver.OracleDriver|" + "jdbc:oracle:thin:@localhost:1521:XE|sa" , @@ -94,13 +111,15 @@ public class WebServer implements Service { "jdbc:sqlserver://localhost;DatabaseName=test|sa", "Generic PostgreSQL|org.postgresql.Driver|" + "jdbc:postgresql:test|" , - "Generic MySQL|com.mysql.jdbc.Driver|" + + "Generic MySQL|com.mysql.cj.jdbc.Driver|" + "jdbc:mysql://localhost:3306/test|" , + "Generic MariaDB|org.mariadb.jdbc.Driver|" + + "jdbc:mariadb://localhost:3306/test|" , "Generic HSQLDB|org.hsqldb.jdbcDriver|" + "jdbc:hsqldb:test;hsqldb.default_table_type=cached|sa" , - "Generic Derby (Server)|org.apache.derby.jdbc.ClientDriver|" + + "Generic Derby (Server)|org.apache.derby.client.ClientAutoloadedDriver|" + "jdbc:derby://localhost:1527/test;create=true|sa", - "Generic Derby (Embedded)|org.apache.derby.jdbc.EmbeddedDriver|" + + "Generic Derby (Embedded)|org.apache.derby.iapi.jdbc.AutoloadedDriver|" + "jdbc:derby:test;create=true|sa", "Generic H2 (Server)|org.h2.Driver|" + "jdbc:h2:tcp://localhost/~/test|sa", @@ -140,21 +159,26 @@ public class WebServer implements Service { // private URLClassLoader urlClassLoader; private int port; private boolean allowOthers; + private String externalNames; private boolean isDaemon; private final Set running = Collections.synchronizedSet(new HashSet()); private boolean ssl; - private final HashMap connInfoMap = New.hashMap(); + private byte[] adminPassword; + private final HashMap connInfoMap = new HashMap<>(); private long lastTimeoutCheck; - private final HashMap sessions = New.hashMap(); - private final HashSet languages = New.hashSet(); + private final HashMap sessions = new HashMap<>(); + private final HashSet languages = new HashSet<>(); private String startDateTime; private ServerSocket serverSocket; + private String host; private String url; private ShutdownHandler shutdownHandler; private Thread listenerThread; - private boolean ifExists; + private boolean ifExists = true; + private String key; + private boolean allowSecureCreation; private boolean trace; private TranslateThread translateThread; private boolean allowChunked = true; @@ -167,20 +191,10 @@ public class WebServer implements Service { * * @param file the file name * @return the data + * @throws IOException on failure */ byte[] getFile(String file) throws IOException { trace("getFile <" + file + ">"); - if (file.startsWith(TRANSFER + "/") && new File(TRANSFER).exists()) { - file = file.substring(TRANSFER.length() + 1); - if (!isSimpleName(file)) { - return null; - } - File f = new File(TRANSFER, file); - if (!f.exists()) { - return null; - } - return IOUtils.readBytesAndClose(new FileInputStream(f), -1); - } byte[] data = Utils.getResource("/org/h2/server/web/res/" + file); if (data == null) { trace(" null"); @@ -190,22 +204,6 @@ byte[] getFile(String file) throws IOException { return data; } - /** - * Check if this is a simple name (only contains '.', '-', '_', letters, or - * digits). - * - * @param s the string - * @return true if it's a simple name - */ - static boolean isSimpleName(String s) { - for (char c : s.toCharArray()) { - if (c != '.' && c != '_' && c != '-' && !Character.isLetterOrDigit(c)) { - return false; - } - } - return true; - } - /** * Remove this web thread from the set of running threads. * @@ -229,7 +227,7 @@ private static String generateSessionId() { WebSession getSession(String sessionId) { long now = System.currentTimeMillis(); if (lastTimeoutCheck + SESSION_TIMEOUT < now) { - for (String id : New.arrayList(sessions.keySet())) { + for (String id : new ArrayList<>(sessions.keySet())) { WebSession session = sessions.get(id); if (session.lastAccess + SESSION_TIMEOUT < now) { trace("timeout for " + id); @@ -272,14 +270,42 @@ WebSession createNewSession(String hostAddr) { String getStartDateTime() { if (startDateTime == null) { - SimpleDateFormat format = new SimpleDateFormat( - "EEE, d MMM yyyy HH:mm:ss z", new Locale("en", "")); - format.setTimeZone(TimeZone.getTimeZone("GMT")); - startDateTime = format.format(System.currentTimeMillis()); + startDateTime = DateTimeFormatter.ofPattern("EEE, d MMM yyyy HH:mm:ss z", Locale.ENGLISH) + .format(ZonedDateTime.now(ZoneId.of("UTC"))); } return startDateTime; } + /** + * Returns the key for privileged connections. + * + * @return key key, or null + */ + String getKey() { + return key; + } + + /** + * Sets the key for privileged connections. + * + * @param key key, or null + */ + public void setKey(String key) { + if (!allowOthers) { + this.key = key; + } + } + + /** + * @param allowSecureCreation + * whether creation of databases using the key should be allowed + */ + public void setAllowSecureCreation(boolean allowSecureCreation) { + if (!allowOthers) { + this.allowSecureCreation = allowSecureCreation; + } + } + @Override public void init(String... args) { // set the serverPropertiesDir, because it's used in loadProperties() @@ -295,6 +321,8 @@ public void init(String... args) { "webSSL", false); allowOthers = SortedProperties.getBooleanProperty(prop, "webAllowOthers", false); + setExternalNames(SortedProperties.getStringProperty(prop, "webExternalNames", null)); + setAdminPassword(SortedProperties.getStringProperty(prop, "webAdminPassword", null)); commandHistoryString = prop.getProperty(COMMAND_HISTORY); for (int i = 0; args != null && i < args.length; i++) { String a = args[i]; @@ -304,6 +332,8 @@ public void init(String... args) { ssl = true; } else if (Tool.isOption(a, "-webAllowOthers")) { allowOthers = true; + } else if (Tool.isOption(a, "-webExternalNames")) { + setExternalNames(args[++i]); } else if (Tool.isOption(a, "-webDaemon")) { isDaemon = true; } else if (Tool.isOption(a, "-baseDir")) { @@ -311,6 +341,10 @@ public void init(String... args) { SysProperties.setBaseDir(baseDir); } else if (Tool.isOption(a, "-ifExists")) { ifExists = true; + } else if (Tool.isOption(a, "-ifNotExists")) { + ifExists = false; + } else if (Tool.isOption(a, "-webAdminPassword")) { + setAdminPassword(args[++i]); } else if (Tool.isOption(a, "-properties")) { // already set i++; @@ -334,6 +368,9 @@ public void init(String... args) { for (String[] lang : LANGUAGES) { languages.add(lang[0]); } + if (allowOthers) { + key = null; + } updateURL(); } @@ -343,10 +380,25 @@ public String getURL() { return url; } + /** + * @return host name + */ + public String getHost() { + if (host == null) { + updateURL(); + } + return host; + } + private void updateURL() { try { - url = (ssl ? "https" : "http") + "://" + - NetUtils.getLocalAddress() + ":" + port; + host = StringUtils.toLowerEnglish(NetUtils.getLocalAddress()); + StringBuilder builder = new StringBuilder(ssl ? "https" : "http").append("://") + .append(host).append(':').append(port); + if (key != null && serverSocket != null) { + builder.append("?key=").append(key); + } + url = builder.toString(); } catch (NoClassDefFoundError e) { // Google App Engine does not allow java.net.InetAddress } @@ -413,10 +465,10 @@ public void stop() { } } // TODO server: using a boolean 'now' argument? a timeout? - for (WebSession session : New.arrayList(sessions.values())) { + for (WebSession session : new ArrayList<>(sessions.values())) { session.close(); } - for (WebThread c : New.arrayList(running)) { + for (WebThread c : new ArrayList<>(running)) { try { c.stopNow(); c.join(100); @@ -471,7 +523,7 @@ void readTranslations(WebSession session, String language) { trace("translation: "+language); byte[] trans = getFile("_text_"+language+".prop"); trace(" "+new String(trans)); - text = SortedProperties.fromLines(new String(trans, Constants.UTF8)); + text = SortedProperties.fromLines(new String(trans, StandardCharsets.UTF_8)); // remove starting # (if not translated yet) for (Entry entry : text.entrySet()) { String value = (String) entry.getValue(); @@ -482,11 +534,11 @@ void readTranslations(WebSession session, String language) { } catch (IOException e) { DbException.traceThrowable(e); } - session.put("text", new HashMap(text)); + session.put("text", new HashMap<>(text)); } ArrayList> getSessions() { - ArrayList> list = New.arrayList(); + ArrayList> list = new ArrayList<>(sessions.size()); for (WebSession s : sessions.values()) { list.add(s.getInfo()); } @@ -504,6 +556,9 @@ public String getName() { } void setAllowOthers(boolean b) { + if (b) { + key = null; + } allowOthers = b; } @@ -512,6 +567,14 @@ public boolean getAllowOthers() { return allowOthers; } + void setExternalNames(String externalNames) { + this.externalNames = externalNames != null ? StringUtils.toLowerEnglish(externalNames) : null; + } + + String getExternalNames() { + return externalNames; + } + void setSSL(boolean b) { ssl = b; } @@ -544,7 +607,7 @@ public void setCommandHistoryAllowed(boolean allowed) { } public ArrayList getCommandHistoryList() { - ArrayList result = New.arrayList(); + ArrayList result = new ArrayList<>(); if (commandHistoryString == null) { return result; } @@ -651,7 +714,7 @@ String[] getSettingNames() { * @return the list */ synchronized ArrayList getSettings() { - ArrayList settings = New.arrayList(); + ArrayList settings = new ArrayList<>(); if (connInfoMap.size() == 0) { Properties prop = loadProperties(); if (prop.size() == 0) { @@ -662,7 +725,7 @@ synchronized ArrayList getSettings() { } } else { for (int i = 0;; i++) { - String data = prop.getProperty(String.valueOf(i)); + String data = prop.getProperty(Integer.toString(i)); if (data == null) { break; } @@ -689,14 +752,17 @@ synchronized void saveProperties(Properties prop) { Properties old = loadProperties(); prop = new SortedProperties(); prop.setProperty("webPort", - "" + SortedProperties.getIntProperty(old, - "webPort", port)); + Integer.toString(SortedProperties.getIntProperty(old, "webPort", port))); prop.setProperty("webAllowOthers", - "" + SortedProperties.getBooleanProperty(old, - "webAllowOthers", allowOthers)); + Boolean.toString(SortedProperties.getBooleanProperty(old, "webAllowOthers", allowOthers))); + if (externalNames != null) { + prop.setProperty("webExternalNames", externalNames); + } prop.setProperty("webSSL", - "" + SortedProperties.getBooleanProperty(old, - "webSSL", ssl)); + Boolean.toString(SortedProperties.getBooleanProperty(old, "webSSL", ssl))); + if (adminPassword != null) { + prop.setProperty("webAdminPassword", StringUtils.convertBytesToHex(adminPassword)); + } if (commandHistoryString != null) { prop.setProperty(COMMAND_HISTORY, commandHistoryString); } @@ -706,7 +772,7 @@ synchronized void saveProperties(Properties prop) { for (int i = 0; i < len; i++) { ConnectionInfo info = settings.get(i); if (info != null) { - prop.setProperty(String.valueOf(len - i - 1), info.getString()); + prop.setProperty(Integer.toString(len - i - 1), info.getString()); } } if (!"null".equals(serverPropertiesDir)) { @@ -727,36 +793,19 @@ synchronized void saveProperties(Properties prop) { * @param databaseUrl the database URL * @param user the user name * @param password the password + * @param userKey the key of privileged user + * @param networkConnectionInfo the network connection information * @return the database connection + * @throws SQLException on failure */ Connection getConnection(String driver, String databaseUrl, String user, - String password) throws SQLException { + String password, String userKey, NetworkConnectionInfo networkConnectionInfo) throws SQLException { driver = driver.trim(); databaseUrl = databaseUrl.trim(); - org.h2.Driver.load(); - Properties p = new Properties(); - p.setProperty("user", user.trim()); // do not trim the password, otherwise an // encrypted H2 database with empty user password doesn't work - p.setProperty("password", password); - if (databaseUrl.startsWith("jdbc:h2:")) { - if (ifExists) { - databaseUrl += ";IFEXISTS=TRUE"; - } - // PostgreSQL would throw a NullPointerException - // if it is loaded before the H2 driver - // because it can't deal with non-String objects in the connection - // Properties - return org.h2.Driver.load().connect(databaseUrl, p); - } -// try { -// Driver dr = (Driver) urlClassLoader. -// loadClass(driver).newInstance(); -// return dr.connect(url, p); -// } catch(ClassNotFoundException e2) { -// throw e2; -// } - return JdbcUtils.getConnection(driver, databaseUrl, p); + return JdbcUtils.getConnection(driver, databaseUrl, user.trim(), password, networkConnectionInfo, + ifExists && (!allowSecureCreation || key == null || !key.equals(userKey))); } /** @@ -777,6 +826,7 @@ public void setShutdownHandler(ShutdownHandler shutdownHandler) { * * @param conn the connection * @return the URL of the web site to access this connection + * @throws SQLException on failure */ public String addSession(Connection conn) throws SQLException { WebSession session = createNewSession("local"); @@ -793,7 +843,7 @@ public String addSession(Connection conn) throws SQLException { */ private class TranslateThread extends Thread { - private final File file = new File("translation.properties"); + private final Path file = Paths.get("translation.properties"); private final Map translation; private volatile boolean stopNow; @@ -802,7 +852,7 @@ private class TranslateThread extends Thread { } public String getFileName() { - return file.getAbsolutePath(); + return file.toAbsolutePath().toString(); } public void stopNow() { @@ -819,12 +869,12 @@ public void run() { while (!stopNow) { try { SortedProperties sp = new SortedProperties(); - if (file.exists()) { - InputStream in = FileUtils.newInputStream(file.getName()); + if (Files.exists(file)) { + InputStream in = Files.newInputStream(file); sp.load(in); translation.putAll(sp); } else { - OutputStream out = FileUtils.newOutputStream(file.getName(), false); + OutputStream out = Files.newOutputStream(file); sp.putAll(translation); sp.store(out, "Translation"); } @@ -866,4 +916,42 @@ boolean getAllowChunked() { return allowChunked; } + byte[] getAdminPassword() { + return adminPassword; + } + + void setAdminPassword(String password) { + if (password == null || password.isEmpty()) { + adminPassword = null; + return; + } + if (password.length() == 128) { + try { + adminPassword = StringUtils.convertHexToBytes(password); + return; + } catch (Exception ex) {} + } + byte[] salt = MathUtils.secureRandomBytes(32); + byte[] hash = SHA256.getHashWithSalt(password.getBytes(StandardCharsets.UTF_8), salt); + byte[] total = Arrays.copyOf(salt, 64); + System.arraycopy(hash, 0, total, 32, 32); + adminPassword = total; + } + + /** + * Check the admin password. + * + * @param password the password to test + * @return true if admin password not configure, or admin password correct + */ + boolean checkAdminPassword(String password) { + if (adminPassword == null) { + return false; + } + byte[] salt = Arrays.copyOf(adminPassword, 32); + byte[] hash = new byte[32]; + System.arraycopy(adminPassword, 32, hash, 0, 32); + return Utils.compareSecure(hash, SHA256.getHashWithSalt(password.getBytes(StandardCharsets.UTF_8), salt)); + } + } diff --git a/h2/src/main/org/h2/server/web/WebServlet.java b/h2/src/main/org/h2/server/web/WebServlet.java index 78b7d0d821..752cf6bbc6 100644 --- a/h2/src/main/org/h2/server/web/WebServlet.java +++ b/h2/src/main/org/h2/server/web/WebServlet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; @@ -8,6 +8,7 @@ import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Enumeration; import java.util.Properties; @@ -18,8 +19,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.h2.engine.Constants; -import org.h2.util.New; +import org.h2.util.NetworkConnectionInfo; /** * This servlet lets the H2 Console be used in a standard servlet container @@ -34,7 +34,7 @@ public class WebServlet extends HttpServlet { public void init() { ServletConfig config = getServletConfig(); Enumeration en = config.getInitParameterNames(); - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); while (en.hasMoreElements()) { String name = en.nextElement().toString(); String value = config.getInitParameter(name); @@ -46,8 +46,7 @@ public void init() { list.add(value); } } - String[] args = new String[list.size()]; - list.toArray(args); + String[] args = list.toArray(new String[0]); server = new WebServer(); server.setAllowChunked(false); server.init(args); @@ -66,12 +65,11 @@ private boolean allow(HttpServletRequest req) { try { InetAddress address = InetAddress.getByName(addr); return address.isLoopbackAddress(); - } catch (UnknownHostException e) { - return false; - } catch (NoClassDefFoundError e) { + } catch (UnknownHostException | NoClassDefFoundError e) { // Google App Engine does not allow java.net.InetAddress return false; } + } private String getAllowedFile(HttpServletRequest req, String requestedFile) { @@ -121,8 +119,14 @@ public void doGet(HttpServletRequest req, HttpServletResponse resp) app.setSession(session, attributes); String ifModifiedSince = req.getHeader("if-modified-since"); - String hostAddr = req.getRemoteAddr(); - file = app.processRequest(file, hostAddr); + String scheme = req.getScheme(); + StringBuilder builder = new StringBuilder(scheme).append("://").append(req.getServerName()); + int serverPort = req.getServerPort(); + if (!(serverPort == 80 && scheme.equals("http") || serverPort == 443 && scheme.equals("https"))) { + builder.append(':').append(serverPort); + } + String path = builder.append(req.getContextPath()).toString(); + file = app.processRequest(file, new NetworkConnectionInfo(path, req.getRemoteAddr(), req.getRemotePort())); session = app.getSession(); String mimeType = app.getMimeType(); @@ -135,12 +139,12 @@ public void doGet(HttpServletRequest req, HttpServletResponse resp) byte[] bytes = server.getFile(file); if (bytes == null) { resp.sendError(HttpServletResponse.SC_NOT_FOUND); - bytes = ("File not found: " + file).getBytes(Constants.UTF8); + bytes = ("File not found: " + file).getBytes(StandardCharsets.UTF_8); } else { if (session != null && file.endsWith(".jsp")) { - String page = new String(bytes, Constants.UTF8); + String page = new String(bytes, StandardCharsets.UTF_8); page = PageParser.parse(page, session.map); - bytes = page.getBytes(Constants.UTF8); + bytes = page.getBytes(StandardCharsets.UTF_8); } resp.setContentType(mimeType); if (!cache) { diff --git a/h2/src/main/org/h2/server/web/WebSession.java b/h2/src/main/org/h2/server/web/WebSession.java index 2489e95a59..bda717d1a0 100644 --- a/h2/src/main/org/h2/server/web/WebSession.java +++ b/h2/src/main/org/h2/server/web/WebSession.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; @@ -19,7 +19,6 @@ import org.h2.bnf.context.DbContents; import org.h2.bnf.context.DbContextRule; import org.h2.message.DbException; -import org.h2.util.New; /** * The web session keeps all data of a user session. @@ -37,7 +36,7 @@ class WebSession { /** * The session attribute map. */ - final HashMap map = New.hashMap(); + final HashMap map = new HashMap<>(); /** * The current locale. @@ -99,9 +98,10 @@ Object get(String key) { * Remove a session attribute from the map. * * @param key the key + * @return value that was associated with the key, or null */ - void remove(String key) { - map.remove(key); + Object remove(String key) { + return map.remove(key); } /** @@ -131,6 +131,9 @@ void loadBnf() { new DbContextRule(contents, DbContextRule.SCHEMA); DbContextRule columnAliasRule = new DbContextRule(contents, DbContextRule.COLUMN_ALIAS); + DbContextRule procedure = + new DbContextRule(contents, DbContextRule.PROCEDURE); + newBnf.updateTopic("procedure", procedure); newBnf.updateTopic("column_name", columnRule); newBnf.updateTopic("new_table_alias", newAliasRule); newBnf.updateTopic("table_alias", aliasRule); @@ -165,7 +168,7 @@ void addCommand(String sql) { return; } sql = sql.trim(); - if (sql.length() == 0) { + if (sql.isEmpty()) { return; } if (commandHistory.size() > MAX_HISTORY) { @@ -196,7 +199,7 @@ ArrayList getCommandHistory() { * @return a map containing the session meta data */ HashMap getInfo() { - HashMap m = New.hashMap(); + HashMap m = new HashMap<>(); m.putAll(map); m.put("lastAccess", new Timestamp(lastAccess).toString()); try { @@ -204,7 +207,7 @@ HashMap getInfo() { "${text.admin.notConnected}" : conn.getMetaData().getURL()); m.put("user", conn == null ? "-" : conn.getMetaData().getUserName()); - m.put("lastQuery", commandHistory.size() == 0 ? + m.put("lastQuery", commandHistory.isEmpty() ? "" : commandHistory.get(0)); m.put("executing", executingStatement == null ? "${text.admin.no}" : "${text.admin.yes}"); diff --git a/h2/src/main/org/h2/server/web/WebThread.java b/h2/src/main/org/h2/server/web/WebThread.java index 50821b616f..2c6a7fd6b5 100644 --- a/h2/src/main/org/h2/server/web/WebThread.java +++ b/h2/src/main/org/h2/server/web/WebThread.java @@ -1,31 +1,31 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.server.web; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; -import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.InterruptedIOException; import java.io.OutputStream; -import java.io.RandomAccessFile; import java.net.Socket; import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.Locale; import java.util.Properties; import java.util.StringTokenizer; -import org.h2.engine.Constants; + import org.h2.engine.SysProperties; import org.h2.message.DbException; -import org.h2.mvstore.DataUtils; import org.h2.util.IOUtils; import org.h2.util.NetUtils; +import org.h2.util.NetworkConnectionInfo; import org.h2.util.StringUtils; +import org.h2.util.Utils; /** * For each connection to a session, an object of this class is created. @@ -33,11 +33,16 @@ */ class WebThread extends WebApp implements Runnable { + private static final byte[] RN = { '\r', '\n' }; + + private static final byte[] RNRN = { '\r', '\n', '\r', '\n' }; + protected OutputStream output; protected final Socket socket; private final Thread thread; private InputStream input; - private int headerBytes; + private String host; + private int dataLength; private String ifModifiedSince; WebThread(Socket socket, WebServer server) { @@ -57,6 +62,7 @@ void start() { * Wait until the thread is stopped. * * @param millis the maximum number of milliseconds to wait + * @throws InterruptedException if interrupted */ void join(int millis) throws InterruptedException { thread.join(millis); @@ -81,6 +87,9 @@ private String getAllowedFile(String requestedFile) { if (requestedFile.length() == 0) { return "index.do"; } + if (requestedFile.charAt(0) == '?') { + return "index.do" + requestedFile; + } return requestedFile; } @@ -110,112 +119,166 @@ public void run() { @SuppressWarnings("unchecked") private boolean process() throws IOException { - boolean keepAlive = false; String head = readHeaderLine(); - if (head.startsWith("GET ") || head.startsWith("POST ")) { - int begin = head.indexOf('/'), end = head.lastIndexOf(' '); - String file; - if (begin < 0 || end < begin) { - file = ""; - } else { - file = head.substring(begin + 1, end).trim(); - } - trace(head + ": " + file); - file = getAllowedFile(file); - attributes = new Properties(); - int paramIndex = file.indexOf("?"); - session = null; - if (paramIndex >= 0) { - String attrib = file.substring(paramIndex + 1); - parseAttributes(attrib); - String sessionId = attributes.getProperty("jsessionid"); - file = file.substring(0, paramIndex); - session = server.getSession(sessionId); - } - keepAlive = parseHeader(); - String hostAddr = socket.getInetAddress().getHostAddress(); - file = processRequest(file, hostAddr); - if (file.length() == 0) { - // asynchronous request - return true; + boolean get = head.startsWith("GET "); + if ((!get && !head.startsWith("POST ")) || !head.endsWith(" HTTP/1.1")) { + writeSimple("HTTP/1.1 400 Bad Request", "Bad request"); + return false; + } + String file = StringUtils.trimSubstring(head, get ? 4 : 5, head.length() - 9); + if (file.isEmpty() || file.charAt(0) != '/') { + writeSimple("HTTP/1.1 400 Bad Request", "Bad request"); + return false; + } + attributes = new Properties(); + boolean keepAlive = parseHeader(); + if (!checkHost(host)) { + return false; + } + file = file.substring(1); + trace(head + ": " + file); + file = getAllowedFile(file); + int paramIndex = file.indexOf('?'); + session = null; + String key = null; + if (paramIndex >= 0) { + String attrib = file.substring(paramIndex + 1); + parseAttributes(attrib); + String sessionId = attributes.getProperty("jsessionid"); + key = attributes.getProperty("key"); + file = file.substring(0, paramIndex); + session = server.getSession(sessionId); + } + parseBodyAttributes(); + file = processRequest(file, + new NetworkConnectionInfo( + NetUtils.ipToShortForm(new StringBuilder(server.getSSL() ? "https://" : "http://"), + socket.getLocalAddress().getAddress(), true) // + .append(':').append(socket.getLocalPort()).toString(), // + socket.getInetAddress().getAddress(), socket.getPort(), null)); + if (file.length() == 0) { + // asynchronous request + return true; + } + String message; + if (cache && ifModifiedSince != null && ifModifiedSince.equals(server.getStartDateTime())) { + writeSimple("HTTP/1.1 304 Not Modified", (byte[]) null); + return keepAlive; + } + byte[] bytes = server.getFile(file); + if (bytes == null) { + writeSimple("HTTP/1.1 404 Not Found", "File not found: " + file); + return keepAlive; + } + if (session != null && file.endsWith(".jsp")) { + if (key != null) { + session.put("key", key); } - String message; - byte[] bytes; - if (cache && ifModifiedSince != null && - ifModifiedSince.equals(server.getStartDateTime())) { - bytes = null; - message = "HTTP/1.1 304 Not Modified\r\n"; - } else { - bytes = server.getFile(file); - if (bytes == null) { - message = "HTTP/1.1 404 Not Found\r\n"; - bytes = ("File not found: " + file).getBytes(Constants.UTF8); - message += "Content-Length: " + bytes.length + "\r\n"; - } else { - if (session != null && file.endsWith(".jsp")) { - String page = new String(bytes, Constants.UTF8); - if (SysProperties.CONSOLE_STREAM) { - Iterator it = (Iterator) session.map.remove("chunks"); - if (it != null) { - message = "HTTP/1.1 200 OK\r\n"; - message += "Content-Type: " + mimeType + "\r\n"; - message += "Cache-Control: no-cache\r\n"; - message += "Transfer-Encoding: chunked\r\n"; - message += "\r\n"; - trace(message); - output.write(message.getBytes()); - while (it.hasNext()) { - String s = it.next(); - s = PageParser.parse(s, session.map); - bytes = s.getBytes(Constants.UTF8); - if (bytes.length == 0) { - continue; - } - output.write(Integer.toHexString(bytes.length).getBytes()); - output.write("\r\n".getBytes()); - output.write(bytes); - output.write("\r\n".getBytes()); - output.flush(); - } - output.write("0\r\n\r\n".getBytes()); - output.flush(); - return keepAlive; - } - } - page = PageParser.parse(page, session.map); - bytes = page.getBytes(Constants.UTF8); - } + String page = new String(bytes, StandardCharsets.UTF_8); + if (SysProperties.CONSOLE_STREAM) { + Iterator it = (Iterator) session.map.remove("chunks"); + if (it != null) { message = "HTTP/1.1 200 OK\r\n"; message += "Content-Type: " + mimeType + "\r\n"; - if (!cache) { - message += "Cache-Control: no-cache\r\n"; - } else { - message += "Cache-Control: max-age=10\r\n"; - message += "Last-Modified: " + server.getStartDateTime() + "\r\n"; + message += "Cache-Control: no-cache\r\n"; + message += "Transfer-Encoding: chunked\r\n"; + message += "\r\n"; + trace(message); + output.write(message.getBytes(StandardCharsets.ISO_8859_1)); + while (it.hasNext()) { + String s = it.next(); + s = PageParser.parse(s, session.map); + bytes = s.getBytes(StandardCharsets.UTF_8); + if (bytes.length == 0) { + continue; + } + output.write(Integer.toHexString(bytes.length).getBytes(StandardCharsets.ISO_8859_1)); + output.write(RN); + output.write(bytes); + output.write(RN); + output.flush(); } - message += "Content-Length: " + bytes.length + "\r\n"; + output.write('0'); + output.write(RNRN); + output.flush(); + return keepAlive; } } - message += "\r\n"; - trace(message); - output.write(message.getBytes()); - if (bytes != null) { - output.write(bytes); - } - output.flush(); + page = PageParser.parse(page, session.map); + bytes = page.getBytes(StandardCharsets.UTF_8); } + message = "HTTP/1.1 200 OK\r\n"; + message += "Content-Type: " + mimeType + "\r\n"; + if (!cache) { + message += "Cache-Control: no-cache\r\n"; + } else { + message += "Cache-Control: max-age=10\r\n"; + message += "Last-Modified: " + server.getStartDateTime() + "\r\n"; + } + message += "Content-Length: " + bytes.length + "\r\n"; + message += "\r\n"; + trace(message); + output.write(message.getBytes(StandardCharsets.ISO_8859_1)); + output.write(bytes); + output.flush(); return keepAlive; } + private void writeSimple(String status, String text) throws IOException { + writeSimple(status, text != null ? text.getBytes(StandardCharsets.UTF_8) : null); + } + + private void writeSimple(String status, byte[] bytes) throws IOException { + trace(status); + output.write(status.getBytes(StandardCharsets.ISO_8859_1)); + if (bytes != null) { + output.write(RN); + String contentLength = "Content-Length: " + bytes.length; + trace(contentLength); + output.write(contentLength.getBytes(StandardCharsets.ISO_8859_1)); + output.write(RNRN); + output.write(bytes); + } else { + output.write(RNRN); + } + output.flush(); + } + + private boolean checkHost(String host) throws IOException { + if (host == null) { + writeSimple("HTTP/1.1 400 Bad Request", "Bad request"); + return false; + } + int index = host.indexOf(':'); + if (index >= 0) { + host = host.substring(0, index); + } + if (host.isEmpty()) { + return false; + } + host = StringUtils.toLowerEnglish(host); + if (host.equals(server.getHost()) || host.equals("localhost") || host.equals("127.0.0.1")) { + return true; + } + String externalNames = server.getExternalNames(); + if (externalNames != null && !externalNames.isEmpty()) { + for (String s : externalNames.split(",")) { + if (host.equals(s.trim())) { + return true; + } + } + } + writeSimple("HTTP/1.1 404 Not Found", "Host " + host + " not found"); + return false; + } + private String readHeaderLine() throws IOException { StringBuilder buff = new StringBuilder(); while (true) { - headerBytes++; int c = input.read(); if (c == -1) { throw new IOException("Unexpected EOF"); } else if (c == '\r') { - headerBytes++; if (input.read() == '\n') { return buff.length() > 0 ? buff.toString() : null; } @@ -227,6 +290,17 @@ private String readHeaderLine() throws IOException { } } + private void parseBodyAttributes() throws IOException { + if (dataLength > 0) { + byte[] bytes = Utils.newBytes(dataLength); + for (int pos = 0; pos < dataLength;) { + pos += input.read(bytes, pos, dataLength - pos); + } + String s = new String(bytes, StandardCharsets.UTF_8); + parseAttributes(s); + } + } + private void parseAttributes(String s) { trace("data=" + s); while (s != null) { @@ -255,16 +329,15 @@ private boolean parseHeader() throws IOException { boolean keepAlive = false; trace("parseHeader"); int len = 0; + host = null; ifModifiedSince = null; boolean multipart = false; - while (true) { - String line = readHeaderLine(); - if (line == null) { - break; - } + for (String line; (line = readHeaderLine()) != null;) { trace(" " + line); String lower = StringUtils.toLowerEnglish(line); - if (lower.startsWith("if-modified-since")) { + if (lower.startsWith("host")) { + host = getHeaderLineValue(line); + } else if (lower.startsWith("if-modified-since")) { ifModifiedSince = getHeaderLineValue(line); } else if (lower.startsWith("connection")) { String conn = getHeaderLineValue(line); @@ -283,7 +356,7 @@ private boolean parseHeader() throws IOException { boolean isWebKit = lower.contains("webkit/"); if (isWebKit && session != null) { // workaround for what seems to be a WebKit bug: - // http://code.google.com/p/chromium/issues/detail?id=6402 + // https://bugs.chromium.org/p/chromium/issues/detail?id=6402 session.put("frame-border", "1"); session.put("frameset-border", "2"); } @@ -315,64 +388,21 @@ private boolean parseHeader() throws IOException { } } } - } else if (line.trim().length() == 0) { + } else if (StringUtils.isWhitespaceOrEmpty(line)) { break; } } + dataLength = 0; if (multipart) { - uploadMultipart(input, len); - } else if (session != null && len > 0) { - byte[] bytes = DataUtils.newBytes(len); - for (int pos = 0; pos < len;) { - pos += input.read(bytes, pos, len - pos); - } - String s = new String(bytes); - parseAttributes(s); + // not supported + } else if (len > 0) { + dataLength = len; } return keepAlive; } - private void uploadMultipart(InputStream in, int len) throws IOException { - if (!new File(WebServer.TRANSFER).exists()) { - return; - } - String fileName = "temp.bin"; - headerBytes = 0; - String boundary = readHeaderLine(); - while (true) { - String line = readHeaderLine(); - if (line == null) { - break; - } - int index = line.indexOf("filename=\""); - if (index > 0) { - fileName = line.substring(index + - "filename=\"".length(), line.lastIndexOf('"')); - } - trace(" " + line); - } - if (!WebServer.isSimpleName(fileName)) { - return; - } - len -= headerBytes; - File file = new File(WebServer.TRANSFER, fileName); - OutputStream out = new FileOutputStream(file); - IOUtils.copy(in, out, len); - out.close(); - // remove the boundary - RandomAccessFile f = new RandomAccessFile(file, "rw"); - int testSize = (int) Math.min(f.length(), Constants.IO_BUFFER_SIZE); - f.seek(f.length() - testSize); - byte[] bytes = DataUtils.newBytes(Constants.IO_BUFFER_SIZE); - f.readFully(bytes, 0, testSize); - String s = new String(bytes, "ASCII"); - int x = s.lastIndexOf(boundary); - f.setLength(f.length() - testSize + x - 2); - f.close(); - } - private static String getHeaderLineValue(String line) { - return line.substring(line.indexOf(':') + 1).trim(); + return StringUtils.trimSubstring(line, line.indexOf(':') + 1); } @Override diff --git a/h2/src/main/org/h2/server/web/package.html b/h2/src/main/org/h2/server/web/package.html index 64ad6ce79a..4eab3b2de8 100644 --- a/h2/src/main/org/h2/server/web/package.html +++ b/h2/src/main/org/h2/server/web/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/_text_cs.prop b/h2/src/main/org/h2/server/web/res/_text_cs.prop index 93472cd6f2..4e082236b1 100644 --- a/h2/src/main/org/h2/server/web/res/_text_cs.prop +++ b/h2/src/main/org/h2/server/web/res/_text_cs.prop @@ -25,6 +25,7 @@ adminLoginCancel=Zrušit adminLoginOk=OK adminLogout=Odhlásit adminOthers=Povolit připojení z jiných počítačů +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Číslo portu adminPortWeb=Číslo portu webového serveru adminRestart=Změny se projeví po restartu serveru. @@ -98,6 +99,9 @@ toolbar.autoComplete=Automatické dokončování toolbar.autoComplete.full=Úplné toolbar.autoComplete.normal=Normální toolbar.autoComplete.off=Vypnuto +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Vypnuto +toolbar.autoSelect.on=#On toolbar.cancelStatement=Zrušit prováděný příkaz toolbar.clear=Vyčistit toolbar.commit=Vložit diff --git a/h2/src/main/org/h2/server/web/res/_text_de.prop b/h2/src/main/org/h2/server/web/res/_text_de.prop index c6ea67f532..846bcbd3ff 100644 --- a/h2/src/main/org/h2/server/web/res/_text_de.prop +++ b/h2/src/main/org/h2/server/web/res/_text_de.prop @@ -7,7 +7,7 @@ a.remoteConnectionsDisabled=Verbindungen von anderen Rechnern sind nicht freigeg a.title=H2 Console a.tools=Tools a.user=Benutzername -admin.executing=Aktiv +admin.executing=Aktive Ausführung admin.ip=IP admin.lastAccess=Letzter Zugriff admin.lastQuery=Letzter Befehl @@ -16,28 +16,29 @@ admin.notConnected=nicht verbunden admin.url=URL admin.yes=Ja adminAllow=Zugelassene Verbindungen -adminConnection=Verbindungs-Sicherheit -adminHttp=Unverschlüsselte HTTP Verbindungen -adminHttps=Verschlüsselte HTTPS Verbindungen +adminConnection=Verbindungssicherheit +adminHttp=Unverschlüsselte HTTP Verbindungen verwenden +adminHttps=Verschlüsselte HTTPS Verbindungen verwenden adminLocal=Nur lokale Verbindungen erlauben adminLogin=Administration Login adminLoginCancel=Abbrechen adminLoginOk=OK adminLogout=Beenden adminOthers=Verbindungen von anderen Computern erlauben -adminPort=Port +adminWebExternalNames=#External names or addresses of this server (comma-separated) +adminPort=Admin Port adminPortWeb=Web-Server Port adminRestart=Änderungen werden nach einem Neustart des Servers aktiv. adminSave=Speichern adminSessions=Aktive Verbindungen -adminShutdown=Shutdown +adminShutdown=Herunterfahren adminTitle=H2 Console Optionen adminTranslateHelp=Die H2 Console übersetzen oder die Übersetzung verbessern. adminTranslateStart=Übersetzen helpAction=Aktion helpAddAnotherRow=Fügt einen weiteren Datensatz hinzu helpAddDrivers=Datenbank Treiber hinzufügen -helpAddDriversText=Es ist möglich zusätzliche Datenbank-Treiber zu laden, indem die Pfade der Treiber-Dateien in den Umgebungsvariablen H2DRIVERS oder CLASSPATH eingetragen werden. Beispiel (Windows): Um den Datenbank-Treiber mit dem Jar-File C:/Programs/hsqldb/lib/hsqldb.jar hinzuzufügen, setzen Sie den die Umgebungvariable H2DRIVERS auf C:/Programs/hsqldb/lib/hsqldb.jar. +helpAddDriversText=Es ist möglich, zusätzliche Datenbank-Treiber zu laden, indem die Pfade der Treiber-Dateien in den Umgebungsvariablen H2DRIVERS oder CLASSPATH eingetragen werden. Beispiel (Windows): Um den Datenbank-Treiber mit dem Jar-File C:/Programs/hsqldb/lib/hsqldb.jar hinzuzufügen, setzen Sie die Umgebungvariable H2DRIVERS auf C:/Programs/hsqldb/lib/hsqldb.jar. helpAddRow=Fügt einen Datensatz hinzu helpCommandHistory=Zeigt die Befehls-Chronik helpCreateTable=Erzeugt eine neue Tabelle @@ -98,6 +99,9 @@ toolbar.autoComplete=Auto-Complete toolbar.autoComplete.full=Alles toolbar.autoComplete.normal=Normal toolbar.autoComplete.off=Aus +toolbar.autoSelect=Automatische Auswahl +toolbar.autoSelect.off=Aus +toolbar.autoSelect.on=An toolbar.cancelStatement=Laufenden Befehl abbrechen toolbar.clear=Leeren toolbar.commit=Commit (Abschliessen/Speichern) @@ -110,13 +114,13 @@ toolbar.run=Ausführen toolbar.runSelected=Ausgewähltes Ausführen toolbar.sqlStatement=SQL Befehl tools.backup=Backup -tools.backup.help=Erzeugt eine Sichheitskopie eine Datenbank. +tools.backup.help=Erzeugt eine Sicherheitskopie einer Datenbank. tools.changeFileEncryption=ChangeFileEncryption tools.changeFileEncryption.help=Erlaubt, Datei Verschlüsselungs-Passwort und -Algorithmus einer Datenbank zu ändern. tools.cipher=Verschlüsselung (AES oder XTEA) tools.commandLine=Kommandozeile tools.convertTraceFile=ConvertTraceFile -tools.convertTraceFile.help=Konvertiert eine .trace.db Datei in eine Java Applikation und ein SQL Script. +tools.convertTraceFile.help=Konvertiert eine .trace.db Datei in eine Java Applikation und ein SQL Skript. tools.createCluster=CreateCluster tools.createCluster.help=Generiert ein Cluster aus einer autonomen Datenbank. tools.databaseName=Datenbankname @@ -127,27 +131,27 @@ tools.directory=Verzeichnis tools.encryptionPassword=Verschlüsselungs-Passwort tools.javaDirectoryClassName=Java Verzeichnis- und Klassen-Name tools.recover=Recover -tools.recover.help=Hilft bei der Reparatur eine beschädigten Datenbank. +tools.recover.help=Hilft bei der Reparatur einer beschädigten Datenbank. tools.restore=Restore tools.restore.help=Stellt eine Datenbank aus einem Backup her. tools.result=Ergebnis tools.run=Start tools.runScript=RunScript -tools.runScript.help=Führt ein SQL Script aus. +tools.runScript.help=Führt ein SQL Skript aus. tools.script=Script -tools.script.help=Generiert eine SQL Script einer Datenbank für Backup- und Migrationszwecke. -tools.scriptFileName=Script Dateiname -tools.serverList=Server List +tools.script.help=Generiert ein SQL Skript einer Datenbank für Backup- und Migrationszwecke. +tools.scriptFileName=Skript Dateiname +tools.serverList=Server Liste tools.sourceDatabaseName=Quell-Datenbankname tools.sourceDatabaseURL=Quell-Datenbank URL tools.sourceDirectory=Quell-Verzeichnis tools.sourceFileName=Quell-Dateiname -tools.sourceScriptFileName=Dateiname des Scripts (Quelle) +tools.sourceScriptFileName=Dateiname des Skripts (Quelle) tools.targetDatabaseName=Ziel-Datenbankname tools.targetDatabaseURL=Ziel-Datenbank URL tools.targetDirectory=Ziel-Verzeichnis tools.targetFileName=Ziel-Dateiname -tools.targetScriptFileName=Dateiname des Scripts (Ziel) +tools.targetScriptFileName=Dateiname des Skripts (Ziel) tools.traceFileName=Name der Trace Datei tree.admin=Administrator tree.current=Aktueller Wert diff --git a/h2/src/main/org/h2/server/web/res/_text_en.prop b/h2/src/main/org/h2/server/web/res/_text_en.prop index 9eab3466be..b6f0fb8a0c 100644 --- a/h2/src/main/org/h2/server/web/res/_text_en.prop +++ b/h2/src/main/org/h2/server/web/res/_text_en.prop @@ -1,7 +1,7 @@ .translator=Thomas Mueller a.help=Help a.language=English -a.lynxNotSupported=Sorry, Lynx not supported yet +a.lynxNotSupported=Sorry, Lynx is not supported yet a.password=Password a.remoteConnectionsDisabled=Sorry, remote connections ('webAllowOthers') are disabled on this server. a.title=H2 Console @@ -25,6 +25,7 @@ adminLoginCancel=Cancel adminLoginOk=OK adminLogout=Logout adminOthers=Allow connections from other computers +adminWebExternalNames=External names or addresses of this server (comma-separated) adminPort=Port number adminPortWeb=Web server port number adminRestart=Changes take effect after restarting the server. @@ -37,7 +38,7 @@ adminTranslateStart=Translate helpAction=Action helpAddAnotherRow=Add another row helpAddDrivers=Adding Database Drivers -helpAddDriversText=Additional database drivers can be registered by adding the Jar file location of the driver to the the environment variables H2DRIVERS or CLASSPATH. Example (Windows): to add the database driver library C:/Programs/hsqldb/lib/hsqldb.jar, set the environment variable H2DRIVERS to C:/Programs/hsqldb/lib/hsqldb.jar. +helpAddDriversText=Additional database drivers can be registered by adding the Jar file location of the driver to the environment variables H2DRIVERS or CLASSPATH. Example (Windows): to add the database driver library C:/Programs/hsqldb/lib/hsqldb.jar, set the environment variable H2DRIVERS to C:/Programs/hsqldb/lib/hsqldb.jar. helpAddRow=Add a new row helpCommandHistory=Shows the Command History helpCreateTable=Create a new table @@ -98,6 +99,9 @@ toolbar.autoComplete=Auto complete toolbar.autoComplete.full=Full toolbar.autoComplete.normal=Normal toolbar.autoComplete.off=Off +toolbar.autoSelect=Auto select +toolbar.autoSelect.off=Off +toolbar.autoSelect.on=On toolbar.cancelStatement=Cancel the current statement toolbar.clear=Clear toolbar.commit=Commit diff --git a/h2/src/main/org/h2/server/web/res/_text_es.prop b/h2/src/main/org/h2/server/web/res/_text_es.prop index 67e2297afa..8e41b66ce5 100644 --- a/h2/src/main/org/h2/server/web/res/_text_es.prop +++ b/h2/src/main/org/h2/server/web/res/_text_es.prop @@ -25,6 +25,7 @@ adminLoginCancel=Cancelar adminLoginOk=Aceptar adminLogout=Desconectar adminOthers=Permitir conexiones desde otros ordenadores +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Puerto adminPortWeb=Puerto del servidor Web adminRestart=Los cambios tendrán efecto al reiniciar el servidor. @@ -98,6 +99,9 @@ toolbar.autoComplete=Auto completado toolbar.autoComplete.full=Completo toolbar.autoComplete.normal=Normal toolbar.autoComplete.off=Desactivado +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Desactivado +toolbar.autoSelect.on=#On toolbar.cancelStatement=Cancelar la instrucción actual toolbar.clear=Eliminar toolbar.commit=Commit diff --git a/h2/src/main/org/h2/server/web/res/_text_fr.prop b/h2/src/main/org/h2/server/web/res/_text_fr.prop index e856a622e4..792f72ecf8 100644 --- a/h2/src/main/org/h2/server/web/res/_text_fr.prop +++ b/h2/src/main/org/h2/server/web/res/_text_fr.prop @@ -25,6 +25,7 @@ adminLoginCancel=Annuler adminLoginOk=OK adminLogout=Déconnexion adminOthers=Autoriser les connexions d'ordinateurs distants +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Numéro de port adminPortWeb=Numéro de port du serveur Web adminRestart=Modifications effectuées après redémarrage du serveur. @@ -98,6 +99,9 @@ toolbar.autoComplete=Complètement automatique toolbar.autoComplete.full=Exhaustif toolbar.autoComplete.normal=Normal toolbar.autoComplete.off=Désactivé +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Désactivé +toolbar.autoSelect.on=#On toolbar.cancelStatement=Annuler l'instruction en cours toolbar.clear=Effacer toolbar.commit=Valider diff --git a/h2/src/main/org/h2/server/web/res/_text_hi.prop b/h2/src/main/org/h2/server/web/res/_text_hi.prop new file mode 100644 index 0000000000..a7d8a05293 --- /dev/null +++ b/h2/src/main/org/h2/server/web/res/_text_hi.prop @@ -0,0 +1,164 @@ +.translator=vikash verma +a.help=सहायता +a.language=Hindi(हिंदी) +a.lynxNotSupported=क्षमा करें, लिंक्स(Lynx) अभी तक समर्थित नहीं है +a.password=पासवर्ड +a.remoteConnectionsDisabled=क्षमा करें, इस सर्वर पर दूरस्थ कनेक्शन ('webAllowOthers') अक्षम हैं। +a.title=एच 2 कंसोल +a.tools=उपकरण +a.user=प्रयोक्ता नाम +admin.executing=निष्पादित +admin.ip=आईपी (IP) +admin.lastAccess=अंतिम पहुंच +admin.lastQuery=अंतिम प्रश्न(query) +admin.no=नहीं +admin.notConnected=जुड़े नहीं हैं +admin.url=यूआरएल (URL) +admin.yes=हाँ +adminAllow=ग्राहकों को अनुमति है +adminConnection=कनेक्शन सुरक्षा +adminHttp=अनएन्क्रिप्टेड HTTP कनेक्शन का उपयोग करें +adminHttps=एन्क्रिप्टेड एसएसएल (HTTPS) कनेक्शन का उपयोग करें +adminLocal=केवल स्थानीय कनेक्शन की अनुमति दें +adminLogin=प्रशासन लॉगिन करें +adminLoginCancel=रद्द करना +adminLoginOk=ठीक +adminLogout=लोग आउट +adminOthers=अन्य कंप्यूटर से कनेक्शन की अनुमति दें +adminWebExternalNames=#External names or addresses of this server (comma-separated) +adminPort=पोर्ट नंबर +adminPortWeb=वेब सर्वर पोर्ट नंबर +adminRestart=सर्वर को पुनरारंभ करने के बाद परिवर्तन प्रभावी होते हैं। +adminSave=रक्षित करें +adminSessions=सक्रिय सत्र +adminShutdown=बंद करना +adminTitle=एच 2 कंसोल प्राथमिकताएं +adminTranslateHelp=अनुवाद या H2 कंसोल के अनुवाद में सुधार। +adminTranslateStart=अनुवाद करना +helpAction=कर्म +helpAddAnotherRow=एक और पंक्ति जोड़ें +helpAddDrivers=डेटाबेस ड्राइवर्स जोड़ना +helpAddDriversText=अतिरिक्त डेटाबेस ड्राइवरों को पर्यावरण चर (environment variables) H2DRIVERS या CLASSPATH में ड्राइवर के जार फ़ाइल स्थान को जोड़कर पंजीकृत किया जा सकता है। उदाहरण (विंडोज़) : डेटाबेस ड्राइवर लाइब्रेरी को जोड़ने के लिए C : / Programs / hsqldb / lib / hsqldb.jar, C_: / प्रोग्राम / hsqldb (lib / hsqldb.jar) पर्यावरण चर H2DRIVERS सेट करें। +helpAddRow=एक नई पंक्ति जोड़ें +helpCommandHistory=कमांड इतिहास दिखाता है +helpCreateTable=एक नई तालिका बनाएँ +helpDeleteRow=एक पंक्ति निकालें +helpDisconnect=डेटाबेस से डिस्कनेक्ट करता है +helpDisplayThis=यह सहायता पृष्ठ प्रदर्शित करें +helpDropTable=यदि मौजूद है तो तालिका हटाएं +helpExecuteCurrent=वर्तमान SQL कथन निष्पादित करता है +helpExecuteSelected=पाठ चयन द्वारा परिभाषित SQL कथन निष्पादित करता है +helpIcon=चिह्न +helpImportantCommands=महत्वपूर्ण आदेश +helpOperations=संचालन +helpQuery=तालिका को क्वेरी करें +helpSampleSQL=नमूना एसक्यूएल स्क्रिप्ट +helpStatements=एसक्यूएल बयान +helpUpdate=एक पंक्ति में डेटा बदलें +helpWithColumnsIdName=आईडी और NAME कॉलम के साथ +key.alt=Alt +key.ctrl=Ctrl +key.enter=Enter +key.shift=Shift +key.space=Space +login.connect=जुडिये +login.driverClass=चालक वर्ग (Driver Class) +login.driverNotFound=डेटाबेस ड्राइवर नहीं मिला
    ड्राइवरों को जोड़ने के लिए सहायता में देखें +login.goAdmin=पसंद +login.jdbcUrl=JDBC URL +login.language=भाषा +login.login=लॉग इन करें +login.remove=हटाये +login.save=रक्षित करें +login.savedSetting=सहेजे गए सेटिंग्स +login.settingName=सेटिंग्स का नाम +login.testConnection=परीक्षण कनेक्शन +login.testSuccessful=सफल परीक्षण +login.welcome=एच 2 कंसोल +result.1row=1 पंक्ति +result.autoCommitOff=ऑटो कमिट बंद +result.autoCommitOn=ऑटो कमिट चालू +result.bytes=बाइट्स +result.characters=वर्ण +result.maxrowsSet=अधिकतम पंक्ति संख्या सेट है +result.noRows=कोई पंक्तियाँ नहीं +result.noRunningStatement=वर्तमान में कोई स्टेटमेंट नहीं चल रहा है +result.rows=पंक्तियां +result.statementWasCanceled=बयान रद्द कर दिया गया +result.updateCount=अद्यतन गणना +resultEdit.action=कर्म +resultEdit.add=जोड़ना +resultEdit.cancel=रद्द करना +resultEdit.delete=हटाये +resultEdit.edit=संपादित करें +resultEdit.editResult=संपादित करें +resultEdit.save=रक्षित करें +toolbar.all=सब +toolbar.autoCommit=ऑटो कमिट +toolbar.autoComplete=ऑटो पूर्ण +toolbar.autoComplete.full=पूर्ण +toolbar.autoComplete.normal=सामान्य +toolbar.autoComplete.off=बंद +toolbar.autoSelect=स्वतः चयन +toolbar.autoSelect.off=बंद +toolbar.autoSelect.on=पर +toolbar.cancelStatement=वर्तमान कथन को रद्द करें +toolbar.clear=स्पष्ट +toolbar.commit=कमिट +toolbar.disconnect=डिस्कनेक्ट +toolbar.history=कमान का इतिहास +toolbar.maxRows=अधिकतम पंक्तियाँ +toolbar.refresh=ताज़ा करना +toolbar.rollback=रोलबैक +toolbar.run=रन +toolbar.runSelected=चयनित चलाएं +toolbar.sqlStatement=एसक्यूएल बयान +tools.backup=बैकअप +tools.backup.help=एक डेटाबेस का बैकअप बनाता है। +tools.changeFileEncryption=ChangeFileEncryption +tools.changeFileEncryption.help=डेटाबेस फ़ाइल एन्क्रिप्शन पासवर्ड और एल्गोरिथ्म को बदलने देता है। +tools.cipher=सिफर (एईएस या एक्सटीईए) +tools.commandLine=कमांड लाइन +tools.convertTraceFile=ConvertTraceFile +tools.convertTraceFile.help=एक जावा अनुप्रयोग और SQL स्क्रिप्ट के लिए एक .trace.db फ़ाइल में कनवर्ट करता है। +tools.createCluster=CreateCluster +tools.createCluster.help=एक स्टैंडअलोन डेटाबेस से एक क्लस्टर बनाता है। +tools.databaseName=डेटाबेस नाम +tools.decryptionPassword=डिक्रिप्शन पासवर्ड +tools.deleteDbFiles=DeleteDbFiles +tools.deleteDbFiles.help=डेटाबेस से संबंधित सभी फ़ाइलों को हटाता है। +tools.directory=निर्देशिका +tools.encryptionPassword=एन्क्रिप्शन पासवर्ड +tools.javaDirectoryClassName=जावा निर्देशिका और वर्ग का नाम +tools.recover=वसूली +tools.recover.help=एक दूषित डेटाबेस को पुनर्प्राप्त करने में मदद करता है। +tools.restore=पुनर्स्थापित +tools.restore.help=डेटाबेस बैकअप पुनर्स्थापित करता है। +tools.result=परिणाम +tools.run=रन +tools.runScript=RunScript +tools.runScript.help=SQL स्क्रिप्ट चलाता है। +tools.script=लिपि +tools.script.help=बैकअप या माइग्रेशन के लिए डेटाबेस को SQL स्क्रिप्ट में बदलने की अनुमति देता है। +tools.scriptFileName=स्क्रिप्ट फ़ाइल नाम +tools.serverList=सर्वर सूची +tools.sourceDatabaseName=स्रोत डेटाबेस का नाम +tools.sourceDatabaseURL=स्रोत डेटाबेस URL +tools.sourceDirectory=स्रोत निर्देशिका +tools.sourceFileName=स्रोत फ़ाइल नाम +tools.sourceScriptFileName=स्रोत स्क्रिप्ट फ़ाइल नाम +tools.targetDatabaseName=लक्ष्य डेटाबेस नाम +tools.targetDatabaseURL=लक्ष्य डेटाबेस URL +tools.targetDirectory=लक्ष्य निर्देशिका +tools.targetFileName=लक्ष्य फ़ाइल नाम +tools.targetScriptFileName=लक्ष्य स्क्रिप्ट फ़ाइल नाम +tools.traceFileName=ट्रेस फ़ाइल नाम +tree.admin=व्यवस्थापक +tree.current=वर्तमान मूल्य +tree.hashed=टुकड़ों में बांटा(Hashed) +tree.increment=वृद्धि +tree.indexes=इंडेक्स +tree.nonUnique=गैर अद्वितीय +tree.sequences=दृश्यों +tree.unique=अद्वितीय +tree.users=उपयोगकर्ता diff --git a/h2/src/main/org/h2/server/web/res/_text_hu.prop b/h2/src/main/org/h2/server/web/res/_text_hu.prop index 7d25ea66c1..1406ed0e2b 100644 --- a/h2/src/main/org/h2/server/web/res/_text_hu.prop +++ b/h2/src/main/org/h2/server/web/res/_text_hu.prop @@ -25,6 +25,7 @@ adminLoginCancel=Mégse adminLoginOk=OK adminLogout=Kilépés adminOthers=Más számítógépekről kezdeményezett kapcsolatok engedélyezése +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=#Port number adminPortWeb=Webkiszolgáló portszáma adminRestart=A változtatások a kiszolgáló újraindítása után lépnek érvénybe @@ -98,6 +99,9 @@ toolbar.autoComplete=Automatikus kiegészítés toolbar.autoComplete.full=Teljes toolbar.autoComplete.normal=Normál toolbar.autoComplete.off=Kikapcsolva +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Kikapcsolva +toolbar.autoSelect.on=#On toolbar.cancelStatement=Aktuális utasítás végrehajtásának megszakítása toolbar.clear=Törlés toolbar.commit=Jóváhagyás diff --git a/h2/src/main/org/h2/server/web/res/_text_in.prop b/h2/src/main/org/h2/server/web/res/_text_in.prop index 92d760522e..e954ac7a4d 100644 --- a/h2/src/main/org/h2/server/web/res/_text_in.prop +++ b/h2/src/main/org/h2/server/web/res/_text_in.prop @@ -25,6 +25,7 @@ adminLoginCancel=Batal adminLoginOk=OK adminLogout=Keluar adminOthers=Ijinkan koneksi dari komputer lain +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Nomor port adminPortWeb=Nomor port web server adminRestart=Perubahan akan efektif setelah server di-restart. @@ -98,6 +99,9 @@ toolbar.autoComplete=Auto-Complete toolbar.autoComplete.full=Full toolbar.autoComplete.normal=Normal toolbar.autoComplete.off=Off +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Off +toolbar.autoSelect.on=#On toolbar.cancelStatement=Batalkan pernyataan terkini toolbar.clear=Bersihkan toolbar.commit=Laksanakan diff --git a/h2/src/main/org/h2/server/web/res/_text_it.prop b/h2/src/main/org/h2/server/web/res/_text_it.prop index 1d733e0f5e..73fa39f5e5 100644 --- a/h2/src/main/org/h2/server/web/res/_text_it.prop +++ b/h2/src/main/org/h2/server/web/res/_text_it.prop @@ -25,6 +25,7 @@ adminLoginCancel=Annulla adminLoginOk=OK adminLogout=Disconnessione adminOthers=Abilita connessioni da altri computers +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Numero di porta adminPortWeb=Numero di porta del server Web adminRestart=Le modifiche saranno effettive dopo il riavvio del server. @@ -98,6 +99,9 @@ toolbar.autoComplete=Auto completamento toolbar.autoComplete.full=Pieno toolbar.autoComplete.normal=Normale toolbar.autoComplete.off=Disattivo +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Disattivo +toolbar.autoSelect.on=#On toolbar.cancelStatement=Annulla il seguente comando toolbar.clear=Annulla toolbar.commit=Esegui comando diff --git a/h2/src/main/org/h2/server/web/res/_text_ja.prop b/h2/src/main/org/h2/server/web/res/_text_ja.prop index 737af84875..f998bfda46 100644 --- a/h2/src/main/org/h2/server/web/res/_text_ja.prop +++ b/h2/src/main/org/h2/server/web/res/_text_ja.prop @@ -25,6 +25,7 @@ adminLoginCancel=キャンセル adminLoginOk=OK adminLogout=ログアウト adminOthers=他のコンピュータからの接続を許可 +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=ポート番号 adminPortWeb=Webサーバポート番号 adminRestart=変更はサーバの再起動後に有効になります。 @@ -98,6 +99,9 @@ toolbar.autoComplete=オートコンプリート toolbar.autoComplete.full=フル toolbar.autoComplete.normal=ノーマル toolbar.autoComplete.off=オフ +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=オフ +toolbar.autoSelect.on=#On toolbar.cancelStatement=現在のステートメントをキャンセル toolbar.clear=クリア toolbar.commit=コミット diff --git a/h2/src/main/org/h2/server/web/res/_text_ko.prop b/h2/src/main/org/h2/server/web/res/_text_ko.prop index 854aa4cfcc..cfa58eb3bf 100644 --- a/h2/src/main/org/h2/server/web/res/_text_ko.prop +++ b/h2/src/main/org/h2/server/web/res/_text_ko.prop @@ -25,6 +25,7 @@ adminLoginCancel=취소 adminLoginOk=확인 adminLogout=로그아웃 adminOthers=다른 컴퓨터에서의 연결 허가 +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=포트 번호 adminPortWeb=웹 서버 포트 번호 adminRestart=변경 사항은 서버 재시작 후 반영됩니다. @@ -98,6 +99,9 @@ toolbar.autoComplete=자동 완성 toolbar.autoComplete.full=전체 toolbar.autoComplete.normal=보통 toolbar.autoComplete.off=안함 +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=안함 +toolbar.autoSelect.on=#On toolbar.cancelStatement=현재 문 취소 toolbar.clear=지우기 toolbar.commit=커밋 diff --git a/h2/src/main/org/h2/server/web/res/_text_nl.prop b/h2/src/main/org/h2/server/web/res/_text_nl.prop index db7d7d3706..5c04618251 100644 --- a/h2/src/main/org/h2/server/web/res/_text_nl.prop +++ b/h2/src/main/org/h2/server/web/res/_text_nl.prop @@ -25,6 +25,7 @@ adminLoginCancel=Annuleren adminLoginOk=OK adminLogout=Uitloggen adminOthers=Sta verbindingen vanaf andere computers toe +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Poortnummer adminPortWeb=Webserver poortnummer adminRestart=Wijzigingen worden doorgevoerd na herstarten server @@ -98,6 +99,9 @@ toolbar.autoComplete=Auto aanvullen toolbar.autoComplete.full=Volledig toolbar.autoComplete.normal=Normaal toolbar.autoComplete.off=Uit +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Uit +toolbar.autoSelect.on=#On toolbar.cancelStatement=Annuleer het huidige statement toolbar.clear=Wissen toolbar.commit=Commit diff --git a/h2/src/main/org/h2/server/web/res/_text_pl.prop b/h2/src/main/org/h2/server/web/res/_text_pl.prop index 427110cd48..b13069bc0c 100644 --- a/h2/src/main/org/h2/server/web/res/_text_pl.prop +++ b/h2/src/main/org/h2/server/web/res/_text_pl.prop @@ -25,6 +25,7 @@ adminLoginCancel=Anuluj adminLoginOk=OK adminLogout=Wyloguj adminOthers=Pozwalaj na połączenia zdalne +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Numer portu adminPortWeb=Numer portu serwera Web adminRestart=Zmiany będą widoczne po zrestartowaniu serwera. @@ -98,6 +99,9 @@ toolbar.autoComplete=Automatyczne uzupełnianie toolbar.autoComplete.full=Pełny toolbar.autoComplete.normal=Normalny toolbar.autoComplete.off=Wyłączony +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Wyłączony +toolbar.autoSelect.on=#On toolbar.cancelStatement=Anuluj bieżące zapytanie toolbar.clear=Wyczyść toolbar.commit=Zatwierdź diff --git a/h2/src/main/org/h2/server/web/res/_text_pt_br.prop b/h2/src/main/org/h2/server/web/res/_text_pt_br.prop index 26285385c2..56516c98c8 100644 --- a/h2/src/main/org/h2/server/web/res/_text_pt_br.prop +++ b/h2/src/main/org/h2/server/web/res/_text_pt_br.prop @@ -25,6 +25,7 @@ adminLoginCancel=Cancelar adminLoginOk=Confirmar adminLogout=Sair adminOthers=Permitir conexões de outros computadores na rede +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Número da porta adminPortWeb=Número da porta do servidor adminRestart=As alterações serão aplicadas depois de reiniciar o servidor. @@ -92,12 +93,15 @@ resultEdit.delete=Apagar resultEdit.edit=Alterar resultEdit.editResult=Alterar resultEdit.save=Salvar -toolbar.all=Todas +toolbar.all=Todos toolbar.autoCommit=Auto commit toolbar.autoComplete=Auto complete toolbar.autoComplete.full=Total toolbar.autoComplete.normal=Normal toolbar.autoComplete.off=Desligado +toolbar.autoSelect=Auto seleção +toolbar.autoSelect.off=Desligado +toolbar.autoSelect.on=Ligado toolbar.cancelStatement=Cancelar o comando que está em execução toolbar.clear=Limpar toolbar.commit=Commit @@ -107,10 +111,10 @@ toolbar.maxRows=Número máximo de linhas toolbar.refresh=Atualizar toolbar.rollback=Rollback toolbar.run=Executar comando -toolbar.runSelected=#Run Selected +toolbar.runSelected=Executar selecionado toolbar.sqlStatement=Comando SQL tools.backup=#Backup -tools.backup.help=#Creates a backup of a database. +tools.backup.help=Cria um backup de um banco de dados. tools.changeFileEncryption=#ChangeFileEncryption tools.changeFileEncryption.help=#Allows changing the database file encryption password and algorithm. tools.cipher=#Cipher (AES or XTEA) @@ -129,7 +133,7 @@ tools.javaDirectoryClassName=#Java directory and class name tools.recover=#Recover tools.recover.help=#Helps recovering a corrupted database. tools.restore=#Restore -tools.restore.help=#Restores a database backup. +tools.restore.help=Restaura um backup de banco de dados. tools.result=#Result tools.run=Executar comando tools.runScript=#RunScript @@ -146,8 +150,8 @@ tools.sourceScriptFileName=#Source script file name tools.targetDatabaseName=#Target database name tools.targetDatabaseURL=#Target database URL tools.targetDirectory=#Target directory -tools.targetFileName=#Target file name -tools.targetScriptFileName=#Target script file name +tools.targetFileName=Nome do arquivo de destino +tools.targetScriptFileName=Nome do arquivo de script de destino tools.traceFileName=#Trace file name tree.admin=Administrador tree.current=Valor corrente diff --git a/h2/src/main/org/h2/server/web/res/_text_pt_pt.prop b/h2/src/main/org/h2/server/web/res/_text_pt_pt.prop index 733c492c1c..3323f3b3a1 100644 --- a/h2/src/main/org/h2/server/web/res/_text_pt_pt.prop +++ b/h2/src/main/org/h2/server/web/res/_text_pt_pt.prop @@ -25,6 +25,7 @@ adminLoginCancel=Cancelar adminLoginOk=Confirmar adminLogout=Sair adminOthers=Permitir conexões a partir de outro computador na rede +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Número do porto adminPortWeb=Número do porto do servidor adminRestart=As alterações apenas serão aplicadas após reiniciar o servidor. @@ -98,6 +99,9 @@ toolbar.autoComplete=Auto complete toolbar.autoComplete.full=Total toolbar.autoComplete.normal=Normal toolbar.autoComplete.off=Desligado +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Desligado +toolbar.autoSelect.on=#On toolbar.cancelStatement=Cancelar o comando que se encontra em execução toolbar.clear=Limpar toolbar.commit=Commit diff --git a/h2/src/main/org/h2/server/web/res/_text_ru.prop b/h2/src/main/org/h2/server/web/res/_text_ru.prop index e44c496d09..4f23c8aa0d 100644 --- a/h2/src/main/org/h2/server/web/res/_text_ru.prop +++ b/h2/src/main/org/h2/server/web/res/_text_ru.prop @@ -1,57 +1,58 @@ .translator=Vlad Alexahin a.help=Помощь a.language=Русский -a.lynxNotSupported=Извините, Lynx пока что не поддерживается +a.lynxNotSupported=Извините, Lynx пока не поддерживается a.password=Пароль -a.remoteConnectionsDisabled=Извините, удаленные подключения ('webAllowOthers') запрещены на этом сервере. +a.remoteConnectionsDisabled=Извините, удалённые подключения ('webAllowOthers') запрещены на этом сервере. a.title=H2 Console -a.tools=#Tools -a.user=Пользователь Имя +a.tools=Инструменты +a.user=Имя пользователя admin.executing=Выполняется admin.ip=IP -admin.lastAccess=Последний Вход -admin.lastQuery=Последний Запрос -admin.no=#no -admin.notConnected=#not connected +admin.lastAccess=Последний доступ +admin.lastQuery=Последний запрос +admin.no=нет +admin.notConnected=нет соединения admin.url=URL -admin.yes=#yes -adminAllow=Разрешенные клиенты +admin.yes=да +adminAllow=Разрешённые клиенты adminConnection=Безопасность подключения -adminHttp=Используйте незашифрованые HTTP-соединения +adminHttp=Используйте незашифрованные HTTP-соединения adminHttps=Используйте SSL (HTTPS) соединения adminLocal=Разрешены только локальные подключения -adminLogin=Администратор Логин +adminLogin=Административный вход adminLoginCancel=Отменить adminLoginOk=OK -adminLogout=Logout +adminLogout=Выход adminOthers=Разрешить удаленные подключения +adminWebExternalNames=Внешние имена или адреса этого сервера (через запятую) adminPort=Номер порта adminPortWeb=Порт web-сервера adminRestart=Изменения вступят в силу после перезагрузки сервера. adminSave=Сохранить adminSessions=Активные сессии adminShutdown=Выключить -adminTitle=H2 Console Preferences -adminTranslateHelp=#Translate or improve the translation of the H2 Console. -adminTranslateStart=#Translate +adminTitle=Настройки консоли H2 +adminTranslateHelp=Перевести или улучшить перевод консоли H2 +adminTranslateStart=Перевести helpAction=Действие helpAddAnotherRow=Добавить строку -helpAddDrivers=Дабавляем драйвер базы данных -helpAddDriversText=Дополнительные драйверы базы данных могут быть зарегестрированы добавлением соответствующих Jar-файлов в переменную среды H2DRIVERS или в CLASSPATH. Пример (Windows): Чтобы добаить библиотеку драйвера базы данных C:/Programs/hsqldb/lib/hsqldb.jar, установите в переменную среды H2DRIVERS значение C:/Programs/hsqldb/lib/hsqldb.jar. +helpAddDrivers=Добавляем драйвер базы данных +helpAddDriversText=Дополнительные драйверы базы данных могут быть зарегистрированы добавлением соответствующих Jar-файлов в переменную среды H2DRIVERS или в CLASSPATH. Пример (Windows): Чтобы добавить библиотеку драйвера базы данных C:/Programs/hsqldb/lib/hsqldb.jar, установите в переменную среды H2DRIVERS значение C:/Programs/hsqldb/lib/hsqldb.jar. helpAddRow=Добавить новую строку -helpCommandHistory=Показывает историю выполенных команд +helpCommandHistory=Показывает историю выполненных команд helpCreateTable=Создать новую таблицу helpDeleteRow=Удалить строку helpDisconnect=Отключиться от базы данных helpDisplayThis=Показывает это окно помощи helpDropTable=Удаляет таблицу, если она уже существует helpExecuteCurrent=Выполнить текущий SQL-запрос -helpExecuteSelected=#Executes the SQL statement defined by the text selection +helpExecuteSelected=Выполнить SQL-запрос, выделенный в тексте helpIcon=Иконка helpImportantCommands=Важные команды helpOperations=Операции helpQuery=Запрос к таблице -helpSampleSQL=Примеры SQL-скриптов +helpSampleSQL=Примеры скриптов SQL helpStatements=SQL-запрос helpUpdate=Изменить данные в строке helpWithColumnsIdName=с колонками ID и NAME @@ -63,7 +64,7 @@ key.space=#Space login.connect=Соединиться login.driverClass=Класс драйвера login.driverNotFound=Драйвер базы данных не найден
    Посмотрите в Помощи, как добавить драйвер базы данных -login.goAdmin=Preferences +login.goAdmin=Настройки login.jdbcUrl=JDBC URL login.language=Язык login.login=Логин @@ -77,11 +78,11 @@ login.welcome=H2 Console result.1row=1 строка result.autoCommitOff=Авто-выполнение сейчас ВЫКЛЮЧЕНО result.autoCommitOn=Авто-выполнение сейчас ВКЛЮЧЕНО -result.bytes=#bytes -result.characters=#characters +result.bytes=байт +result.characters=символов result.maxrowsSet=Установлено максимальное количество строк result.noRows=нет строк -result.noRunningStatement=Сейчас нету выполняемых запросов +result.noRunningStatement=Сейчас нет выполняемых запросов result.rows=строки result.statementWasCanceled=Запрос был отменен result.updateCount=Обновить количество @@ -98,63 +99,66 @@ toolbar.autoComplete=Авто-завершение toolbar.autoComplete.full=Все toolbar.autoComplete.normal=Нормальные toolbar.autoComplete.off=Выключено +toolbar.autoSelect=Автовыбор +toolbar.autoSelect.off=Выключено +toolbar.autoSelect.on=Включено toolbar.cancelStatement=Отменить текущий запрос toolbar.clear=Очистить -toolbar.commit=Выполнить -toolbar.disconnect=Отсоедениться +toolbar.commit=Зафиксировать транзакцию +toolbar.disconnect=Отсоединиться toolbar.history=История команд toolbar.maxRows=Максимальное количество строк toolbar.refresh=Обновить -toolbar.rollback=Вернуть назад +toolbar.rollback=Откатить транзакцию toolbar.run=Выполнить -toolbar.runSelected=#Run Selected +toolbar.runSelected=Выполнить выделенное toolbar.sqlStatement=SQL-запрос -tools.backup=#Backup -tools.backup.help=#Creates a backup of a database. -tools.changeFileEncryption=#ChangeFileEncryption -tools.changeFileEncryption.help=#Allows changing the database file encryption password and algorithm. -tools.cipher=#Cipher (AES or XTEA) -tools.commandLine=#Command line -tools.convertTraceFile=#ConvertTraceFile -tools.convertTraceFile.help=#Converts a .trace.db file to a Java application and SQL script. -tools.createCluster=#CreateCluster -tools.createCluster.help=#Creates a cluster from a standalone database. -tools.databaseName=#Database name -tools.decryptionPassword=#Decryption password -tools.deleteDbFiles=#DeleteDbFiles -tools.deleteDbFiles.help=#Deletes all files belonging to a database. -tools.directory=#Directory -tools.encryptionPassword=#Encryption password -tools.javaDirectoryClassName=#Java directory and class name -tools.recover=#Recover -tools.recover.help=#Helps recovering a corrupted database. -tools.restore=#Restore -tools.restore.help=#Restores a database backup. -tools.result=#Result +tools.backup=Резервное копирование +tools.backup.help=Создает резервную копию базы данных. +tools.changeFileEncryption=Изменить шифрование файла +tools.changeFileEncryption.help=Позволяет изменить алгоритм шифрования файлов базы данных и пароль. +tools.cipher=Алгоритм (AES или XTEA) +tools.commandLine=Командная строка +tools.convertTraceFile=Преобразовать trace-файл +tools.convertTraceFile.help=Преобразует .trace.db файл в приложение Java и скрипт SQL. +tools.createCluster=Создать кластер +tools.createCluster.help=Создание кластера из автономной базы данных. +tools.databaseName=Имя базы данных +tools.decryptionPassword=Пароль дешифровки +tools.deleteDbFiles=Удалить файлы БД +tools.deleteDbFiles.help=Удаляет все файлы, относящиеся к базе данных. +tools.directory=Каталог +tools.encryptionPassword=Пароль шифровки +tools.javaDirectoryClassName=Каталог и имя класса +tools.recover=Восстановление +tools.recover.help=Восстановление поврежденной базы данных. +tools.restore=Восстановить +tools.restore.help=Восстановление из резервной копии базы данных. +tools.result=Результат tools.run=Выполнить -tools.runScript=#RunScript -tools.runScript.help=#Runs a SQL script. -tools.script=#Script -tools.script.help=#Allows to convert a database to a SQL script for backup or migration. -tools.scriptFileName=#Script file name -tools.serverList=#Server list -tools.sourceDatabaseName=#Source database name -tools.sourceDatabaseURL=#Source database URL -tools.sourceDirectory=#Source directory -tools.sourceFileName=#Source file name -tools.sourceScriptFileName=#Source script file name -tools.targetDatabaseName=#Target database name -tools.targetDatabaseURL=#Target database URL -tools.targetDirectory=#Target directory -tools.targetFileName=#Target file name -tools.targetScriptFileName=#Target script file name -tools.traceFileName=#Trace file name +tools.runScript=Выполнить скрипт +tools.runScript.help=Выполнение скрипта SQL. +tools.script=Скрипт +tools.script.help=Позволяет преобразовать базу данных в скрипт SQL для резервного копирования или миграции. +tools.scriptFileName=Имя файла скрипта SQL +tools.serverList=Список серверов +tools.sourceDatabaseName=Имя базы данных источника +tools.sourceDatabaseURL=URL базы данных источника +tools.sourceDirectory=Каталог источника +tools.sourceFileName=Имя файла источника +tools.sourceScriptFileName=Имя файла скрипта источника +tools.targetDatabaseName=Имя базы данных назначения +tools.targetDatabaseURL=URL базы данных назначения +tools.targetDirectory=Каталог назначения +tools.targetFileName=Имя файла назначения +tools.targetScriptFileName=Имя файла скрипта назначения +tools.traceFileName=Имя trace-файла tree.admin=Администратор tree.current=Текущее значение tree.hashed=Hashed -tree.increment=Увеличить +tree.increment=Приращение tree.indexes=Индексы tree.nonUnique=Неуникальное -tree.sequences=Последовательность +tree.sequences=Последовательности tree.unique=Уникальное tree.users=Пользователи diff --git a/h2/src/main/org/h2/server/web/res/_text_sk.prop b/h2/src/main/org/h2/server/web/res/_text_sk.prop index bbfac846f9..a4f11dba77 100644 --- a/h2/src/main/org/h2/server/web/res/_text_sk.prop +++ b/h2/src/main/org/h2/server/web/res/_text_sk.prop @@ -25,6 +25,7 @@ adminLoginCancel=Zrušiť adminLoginOk=OK adminLogout=Odhlásiť adminOthers=Povoliť pripojenia z iných počítačov +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Číslo portu adminPortWeb=Číslo portu Web servera adminRestart=Zmeny sa vykonajú po reštarte servera @@ -98,6 +99,9 @@ toolbar.autoComplete=Auto dokončovanie toolbar.autoComplete.full=Plné toolbar.autoComplete.normal=Normálne toolbar.autoComplete.off=Vypnuté +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Vypnuté +toolbar.autoSelect.on=#On toolbar.cancelStatement=Zrušiť aktuálny príkaz toolbar.clear=Vyčistiť toolbar.commit=Commit (schváliť) diff --git a/h2/src/main/org/h2/server/web/res/_text_tr.prop b/h2/src/main/org/h2/server/web/res/_text_tr.prop index f74ce9296d..80aed9ffbc 100644 --- a/h2/src/main/org/h2/server/web/res/_text_tr.prop +++ b/h2/src/main/org/h2/server/web/res/_text_tr.prop @@ -25,6 +25,7 @@ adminLoginCancel=İptal et adminLoginOk=Tamam adminLogout=Bitir adminOthers=Başka bilgisayarlardan, veri tabanına bağlanma izni ver +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Port adminPortWeb=Web-Server Port adminRestart=Değişiklikler veri tabanı hizmetçisinin yeniden başlatılmasıyla etkinlik kazanacak. @@ -98,6 +99,9 @@ toolbar.autoComplete=Auto-Complete toolbar.autoComplete.full=Hepsi toolbar.autoComplete.normal=Normal toolbar.autoComplete.off=Kapalı +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Kapalı +toolbar.autoSelect.on=#On toolbar.cancelStatement=Yürütülen işlemi iptal et toolbar.clear=Temizle toolbar.commit=Degişiklikleri kaydet diff --git a/h2/src/main/org/h2/server/web/res/_text_uk.prop b/h2/src/main/org/h2/server/web/res/_text_uk.prop index 4bf458e8c5..3c71e5d54c 100644 --- a/h2/src/main/org/h2/server/web/res/_text_uk.prop +++ b/h2/src/main/org/h2/server/web/res/_text_uk.prop @@ -25,6 +25,7 @@ adminLoginCancel=Відмінити adminLoginOk=OK adminLogout=Завершення сеансу adminOthers=Дозволити під'єднання з інших копм'ютерів +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=Номер порта adminPortWeb=Номер порта веб сервера adminRestart=Зміни вступлять в силу після перезавантаження сервера. @@ -98,6 +99,9 @@ toolbar.autoComplete=Авто доповнення toolbar.autoComplete.full=Повне toolbar.autoComplete.normal=Нормальне toolbar.autoComplete.off=Виключене +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=Виключене +toolbar.autoSelect.on=#On toolbar.cancelStatement=Відмінити поточний запит toolbar.clear=Очистити toolbar.commit=Підтвердити зміни diff --git a/h2/src/main/org/h2/server/web/res/_text_zh_cn.prop b/h2/src/main/org/h2/server/web/res/_text_zh_cn.prop index 7570002856..5dabdcd54d 100644 --- a/h2/src/main/org/h2/server/web/res/_text_zh_cn.prop +++ b/h2/src/main/org/h2/server/web/res/_text_zh_cn.prop @@ -25,6 +25,7 @@ adminLoginCancel=取消 adminLoginOk=确认 adminLogout=注销 adminOthers=允许来自其他远程计算机的连接 +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=端口号 adminPortWeb=Web 服务器端口号 adminRestart=更新配置将在重启服务器后生效. @@ -98,6 +99,9 @@ toolbar.autoComplete=自动完成 toolbar.autoComplete.full=完全 toolbar.autoComplete.normal=正常 toolbar.autoComplete.off=关闭 +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=关闭 +toolbar.autoSelect.on=#On toolbar.cancelStatement=取消当前的执行语句 toolbar.clear=清除 toolbar.commit=提交 diff --git a/h2/src/main/org/h2/server/web/res/_text_zh_tw.prop b/h2/src/main/org/h2/server/web/res/_text_zh_tw.prop index 642a80479a..6e726c8271 100644 --- a/h2/src/main/org/h2/server/web/res/_text_zh_tw.prop +++ b/h2/src/main/org/h2/server/web/res/_text_zh_tw.prop @@ -25,6 +25,7 @@ adminLoginCancel=取消 adminLoginOk=確定 adminLogout=登出 adminOthers=允許來自其他電腦的連接 +adminWebExternalNames=#External names or addresses of this server (comma-separated) adminPort=通訊埠 adminPortWeb=Web 伺服器的通訊埠 adminRestart=伺服器重新啟動後修改才會生效. @@ -98,6 +99,9 @@ toolbar.autoComplete=自動完成 (complete) toolbar.autoComplete.full=完整 toolbar.autoComplete.normal=標準 toolbar.autoComplete.off=關閉 +toolbar.autoSelect=#Auto select +toolbar.autoSelect.off=關閉 +toolbar.autoSelect.on=#On toolbar.cancelStatement=取消目前的SQL述句 toolbar.clear=清除 toolbar.commit=提交 diff --git a/h2/src/main/org/h2/server/web/res/admin.jsp b/h2/src/main/org/h2/server/web/res/admin.jsp index 0859d63c14..f9b3ae2337 100644 --- a/h2/src/main/org/h2/server/web/res/admin.jsp +++ b/h2/src/main/org/h2/server/web/res/admin.jsp @@ -1,7 +1,7 @@ @@ -15,7 +15,7 @@ Initial Developer: H2 Group ${text.adminTitle}

    - ${text.adminLogout} + ${text.adminLogout}


    @@ -39,6 +39,10 @@ Initial Developer: H2 Group ${text.adminOthers}

    +

    + ${text.adminWebExternalNames}:
    + +

    ${text.adminConnection}

    diff --git a/h2/src/main/org/h2/server/web/res/adminLogin.jsp b/h2/src/main/org/h2/server/web/res/adminLogin.jsp index 034fb79133..4f13e87478 100644 --- a/h2/src/main/org/h2/server/web/res/adminLogin.jsp +++ b/h2/src/main/org/h2/server/web/res/adminLogin.jsp @@ -1,7 +1,7 @@ @@ -10,7 +10,7 @@ Initial Developer: H2 Group - +
    diff --git a/h2/src/main/org/h2/server/web/res/error.jsp b/h2/src/main/org/h2/server/web/res/error.jsp index d7df18dab6..f0f26fe6b5 100644 --- a/h2/src/main/org/h2/server/web/res/error.jsp +++ b/h2/src/main/org/h2/server/web/res/error.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/favicon.ico b/h2/src/main/org/h2/server/web/res/favicon.ico index 6e0f78aeb1..fd5e73a416 100644 Binary files a/h2/src/main/org/h2/server/web/res/favicon.ico and b/h2/src/main/org/h2/server/web/res/favicon.ico differ diff --git a/h2/src/main/org/h2/server/web/res/frame.jsp b/h2/src/main/org/h2/server/web/res/frame.jsp index 190f049db3..224b6a3f60 100644 --- a/h2/src/main/org/h2/server/web/res/frame.jsp +++ b/h2/src/main/org/h2/server/web/res/frame.jsp @@ -1,7 +1,7 @@ diff --git a/h2/src/main/org/h2/server/web/res/header.jsp b/h2/src/main/org/h2/server/web/res/header.jsp index f4cf628b68..5edb39866b 100644 --- a/h2/src/main/org/h2/server/web/res/header.jsp +++ b/h2/src/main/org/h2/server/web/res/header.jsp @@ -1,7 +1,7 @@ @@ -128,6 +128,18 @@ Initial Developer: H2 Group +   + + '")) { + rs.next(); + String result = rs.getString(1); + writer.println(result); + } } -// ResultSet rsDbs = conn.createStatement().executeQuery( -// "SELECT DB RESULTS GROUP BY DBID, DB ORDER BY DBID"); -// while(rsDbs.next()) { -// writer.println("" + rsDbs.getString(1) + ""); -// } -// ResultSet rs = conn.createStatement().executeQuery( -// "SELECT TEST, UNIT FROM RESULTS " + -// "GROUP BY TESTID, TEST, UNIT ORDER BY TESTID"); -// while(rs.next()) { -// writer.println("" + rs.getString(1) + ""); -// writer.println("" + rs.getString(2) + ""); -// ResultSet rsRes = conn.createStatement().executeQuery( -// "SELECT RESULT FROM RESULTS WHERE TESTID=? ORDER BY DBID"); -// -// -// } - -// PrintWriter writer = -// new PrintWriter(new FileWriter("benchmark.html")); -// writer.println(""); -// for(int j=0; j" + db.getName() + ""); -// } -// writer.println(""); -// for(int i=0; i"); -// writer.println(""); -// for(int j=0; j" + v[2] + ""); -// } -// writer.println(""); -// } -// writer.println("
    Test CaseUnit
    " + res[0] + "" + res[1] + "
    "); - if (exit) { System.exit(0); } @@ -231,18 +176,21 @@ private void testAll(ArrayList dbs, ArrayList tests, db.startServer(); Connection conn = db.openNewConnection(); DatabaseMetaData meta = conn.getMetaData(); - System.out.println(" " + meta.getDatabaseProductName() + " " + - meta.getDatabaseProductVersion()); + System.out.println("Database: " + meta.getDatabaseProductName() + " " + meta.getDatabaseProductVersion()); + System.out.println("Driver: " + meta.getDriverName() + " " + meta.getDriverVersion()); runDatabase(db, tests, 1); runDatabase(db, tests, 1); + db.reset(); collect = true; runDatabase(db, tests, size); conn.close(); db.log("Executed statements", "#", db.getExecutedStatements()); db.log("Total time", "ms", db.getTotalTime()); + System.out.println("Total time: " + db.getTotalTime() + " ms"); int statPerSec = (int) (db.getExecutedStatements() * 1000L / db.getTotalTime()); - db.log("Statements per second", "#", statPerSec); + db.log("Statements per second", "#/s", statPerSec); System.out.println("Statements per second: " + statPerSec); + System.out.println("GC overhead: " + (100 * db.getTotalGCTime() / db.getTotalTime()) + "%"); collect = false; db.stopServer(); } diff --git a/h2/src/test/org/h2/test/bench/TestScalability.java b/h2/src/test/org/h2/test/bench/TestScalability.java index 0c019fc4e7..998cde64a5 100644 --- a/h2/src/test/org/h2/test/bench/TestScalability.java +++ b/h2/src/test/org/h2/test/bench/TestScalability.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.bench; @@ -15,10 +15,11 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.List; +import java.util.Properties; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.util.IOUtils; -import org.h2.util.JdbcUtils; +import org.h2.test.bench.Database.Measurement; /** * Used to compare scalability between the old engine and the new MVStore @@ -42,7 +43,7 @@ public class TestScalability implements Database.DatabaseTest { * @param args the command line parameters */ public static void main(String... args) throws Exception { - new TestScalability().test(); + new TestScalability().test(args); } private static Connection getResultConnection() throws SQLException { @@ -51,103 +52,118 @@ private static Connection getResultConnection() throws SQLException { } private static void openResults() throws SQLException { - Connection conn = null; - Statement stat = null; - try { - conn = getResultConnection(); - stat = conn.createStatement(); + try (Connection conn = getResultConnection(); + Statement stat = conn.createStatement()) { stat.execute( "CREATE TABLE IF NOT EXISTS RESULTS(TESTID INT, " + - "TEST VARCHAR, UNIT VARCHAR, DBID INT, " + - "DB VARCHAR, RESULT VARCHAR)"); - } finally { - JdbcUtils.closeSilently(stat); - JdbcUtils.closeSilently(conn); + "TEST VARCHAR, UNIT VARCHAR, DBID INT, " + + "DB VARCHAR, TCNT INT, RESULT VARCHAR)"); } } - private void test() throws Exception { - final boolean exit = false; - FileUtils.deleteRecursive("data", true); - final String out = "benchmark.html"; - final int size = 400; - - ArrayList dbs = new ArrayList(); - int id = 1; - final String h2Url = "jdbc:h2:./data/test;" + - "LOCK_TIMEOUT=10000;LOCK_MODE=3"; - dbs.add(createDbEntry(id++, "H2", 1, h2Url)); - dbs.add(createDbEntry(id++, "H2", 10, h2Url)); - dbs.add(createDbEntry(id++, "H2", 20, h2Url)); - dbs.add(createDbEntry(id++, "H2", 30, h2Url)); - dbs.add(createDbEntry(id++, "H2", 40, h2Url)); - dbs.add(createDbEntry(id++, "H2", 50, h2Url)); - dbs.add(createDbEntry(id++, "H2", 100, h2Url)); - - final String mvUrl = "jdbc:h2:./data/mvTest;" + - "LOCK_TIMEOUT=10000;MV_STORE=TRUE"; - dbs.add(createDbEntry(id++, "MV", 1, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 10, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 20, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 30, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 40, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 50, mvUrl)); - dbs.add(createDbEntry(id++, "MV", 100, mvUrl)); - - final BenchB test = new BenchB(); + private void test(String... args) throws Exception { + int dbId = -1; + boolean exit = false; + String out = "scalability.html"; + int size = 400; + for (int i = 0; i < args.length; i++) { + String arg = args[i]; + if ("-db".equals(arg)) { + dbId = Integer.parseInt(args[++i]); + } else if ("-init".equals(arg)) { + FileUtils.deleteRecursive("data", true); + } else if ("-out".equals(arg)) { + out = args[++i]; + } else if ("-trace".equals(arg)) { + trace = true; + } else if ("-exit".equals(arg)) { + exit = true; + } else if ("-size".equals(arg)) { + size = Integer.parseInt(args[++i]); + } + } + + Properties prop = loadProperties(); + + ArrayList dbs = new ArrayList<>(); + for (int id = 0; id < 100; id++) { + if (dbId != -1 && id != dbId) { + continue; + } + String dbString = prop.getProperty("db" + id); + if (dbString != null) { + Database db = Database.parse(this, id, dbString, prop); + if (db != null) { + int runCount = 8; + String valueStr = prop.getProperty("runCount" + id); + if (valueStr != null) { + runCount = Integer.parseInt(valueStr); + } + dbs.add(new RunSequence(db, runCount)); + } + } + } + + BenchB test = new BenchB() { + // Since we focus on scalability here, lets emphasize multi-threaded + // part of the test (transactions) and minimize impact of the init. + @Override + protected int getTransactionsPerClient(int size) { + return size * 8; + } + }; testAll(dbs, test, size); - collect = false; - ArrayList results = dbs.get(0).getResults(); - Connection conn = null; - PreparedStatement prep = null; - Statement stat = null; - PrintWriter writer = null; - try { + List results = dbs.get(0).results.get(0); + try (Connection conn = getResultConnection()) { openResults(); - conn = getResultConnection(); - stat = conn.createStatement(); - prep = conn.prepareStatement( + try (PreparedStatement prep = conn.prepareStatement( "INSERT INTO RESULTS(TESTID, " + - "TEST, UNIT, DBID, DB, RESULT) VALUES(?, ?, ?, ?, ?, ?)"); - for (int i = 0; i < results.size(); i++) { - Object[] res = results.get(i); - prep.setInt(1, i); - prep.setString(2, res[0].toString()); - prep.setString(3, res[1].toString()); - for (Database db : dbs) { - prep.setInt(4, db.getId()); - prep.setString(5, db.getName()); - Object[] v = db.getResults().get(i); - prep.setString(6, v[2].toString()); - prep.execute(); + "TEST, UNIT, DBID, DB, TCNT, RESULT) VALUES(?, ?, ?, ?, ?, ?, ?)")) { + for (int i = 0; i < results.size(); i++) { + Measurement res = results.get(i); + prep.setInt(1, i); + prep.setString(2, res.name); + prep.setString(3, res.unit); + for (RunSequence runSequence : dbs) { + Database db = runSequence.database; + int threadCount = 1; + for (List result : runSequence.results) { + if (result.size() > i) { + Measurement measurement = result.get(i); + prep.setInt(4, db.getId()); + prep.setString(5, db.getName()); + prep.setInt(6, threadCount); + prep.setString(7, String.valueOf(measurement.value)); + prep.execute(); + threadCount <<= 1; + } + } + } } } - writer = new PrintWriter(new FileWriter(out)); - ResultSet rs = stat.executeQuery( - "CALL '" + - "' " + - "|| SELECT GROUP_CONCAT('' " + - "ORDER BY DBID SEPARATOR '') FROM " + - "(SELECT DISTINCT DBID, DB FROM RESULTS)" + - "|| '' || CHAR(10) " + - "|| SELECT GROUP_CONCAT('' || ( " + - "SELECT GROUP_CONCAT('' " + - "ORDER BY DBID SEPARATOR '') FROM RESULTS R2 WHERE " + - "R2.TESTID = R1.TESTID) || '' " + - "ORDER BY TESTID SEPARATOR CHAR(10)) FROM " + - "(SELECT DISTINCT TESTID, TEST, UNIT FROM RESULTS) R1" + - "|| '
    Test CaseUnit' || DB || '
    ' || " + - "TEST || '' || UNIT || '' || RESULT || '
    '"); - rs.next(); - String result = rs.getString(1); - writer.println(result); - } finally { - JdbcUtils.closeSilently(prep); - JdbcUtils.closeSilently(stat); - JdbcUtils.closeSilently(conn); - IOUtils.closeSilently(writer); + try (Statement stat = conn.createStatement(); + PrintWriter writer = new PrintWriter(new FileWriter(out)); + ResultSet rs = stat.executeQuery( + "CALL '" + + "' " + + "|| (SELECT GROUP_CONCAT('' " + + "ORDER BY TCNT SEPARATOR '') FROM " + + "(SELECT TCNT, COUNT(*) COLSPAN FROM (SELECT DISTINCT DB, TCNT FROM RESULTS) GROUP BY TCNT))" + + "|| '' || CHAR(10) " + + "|| '' || (SELECT GROUP_CONCAT('' ORDER BY TCNT, DB SEPARATOR '')" + + " FROM (SELECT DISTINCT DB, TCNT FROM RESULTS)) || '' || CHAR(10) " + + "|| (SELECT GROUP_CONCAT('' || ( " + + "SELECT GROUP_CONCAT('' ORDER BY TCNT,DB SEPARATOR '')" + + " FROM RESULTS R2 WHERE R2.TESTID = R1.TESTID) || '' " + + "ORDER BY TESTID SEPARATOR CHAR(10)) FROM " + + "(SELECT DISTINCT TESTID, TEST, UNIT FROM RESULTS) R1)" + + "|| '
    Test CaseUnit' || TCNT || '
    ' || DB || '
    ' || TEST || '' || UNIT || '' || RESULT || '
    '")) { + rs.next(); + String result = rs.getString(1); + writer.println(result); + } } if (exit) { @@ -155,52 +171,67 @@ private void test() throws Exception { } } - private Database createDbEntry(int id, String namePrefix, - int threadCount, String url) { - Database db = Database.parse(this, id, namePrefix + "(" + threadCount + - "threads), org.h2.Driver, " + url + ", sa, sa", threadCount); - return db; - } + private void testAll(ArrayList runSequences, BenchB test, int size) throws Exception { + Database lastDb = null; + Connection conn = null; + for (RunSequence runSequence : runSequences) { + Database db = runSequence.database; + try { + if (lastDb != null) { + conn.close(); + lastDb.stopServer(); + Thread.sleep(1000); + // calls garbage collection + TestBase.getMemoryUsed(); + } + String dbName = db.getName(); + System.out.println("------------------"); + System.out.println("Testing the performance of " + dbName); + db.startServer(); + // hold one connection open during the whole test to keep database up + conn = db.openNewConnection(); + test.init(db, size); + + for (int runNo = 0, threadCount = 1; runNo < runSequence.runCount; runNo++, threadCount <<= 1) { + System.out.println("Testing the performance of " + dbName + + " (" + threadCount + " threads)"); + DatabaseMetaData meta = conn.getMetaData(); + System.out.println(" " + meta.getDatabaseProductName() + " " + + meta.getDatabaseProductVersion()); + test.setThreadCount(threadCount); - private void testAll(ArrayList dbs, BenchB test, int size) - throws Exception { - for (int i = 0; i < dbs.size(); i++) { - if (i > 0) { - Thread.sleep(1000); + test.runTest(); + test.runTest(); + db.reset(); + collect = true; + test.runTest(); + + int executedStatements = db.getExecutedStatements(); + int totalTime = db.getTotalTime(); + int totalGCTime = db.getTotalGCTime(); + db.log("Executed statements", "#", executedStatements); + db.log("Total time", "ms", totalTime); + int statPerSec = (int) (executedStatements * 1000L / totalTime); + db.log("Statements per second", "#/s", statPerSec); + collect = false; + System.out.println("Statements per second: " + statPerSec); + System.out.println("GC overhead: " + (100 * totalGCTime / totalTime) + "%"); + ArrayList measurements = db.reset(); + runSequence.results.add(measurements); + } + } catch (Throwable ex) { + ex.printStackTrace(); + } finally { + lastDb = db; } - // calls garbage collection - TestBase.getMemoryUsed(); - Database db = dbs.get(i); - System.out.println("Testing the performance of " + db.getName()); - db.startServer(); - Connection conn = db.openNewConnection(); - DatabaseMetaData meta = conn.getMetaData(); - System.out.println(" " + meta.getDatabaseProductName() + " " + - meta.getDatabaseProductVersion()); - runDatabase(db, test, 1); - runDatabase(db, test, 1); - collect = true; - runDatabase(db, test, size); + } + if (lastDb != null) { conn.close(); - db.log("Executed statements", "#", db.getExecutedStatements()); - db.log("Total time", "ms", db.getTotalTime()); - int statPerSec = (int) (db.getExecutedStatements() * - 1000L / db.getTotalTime()); - db.log("Statements per second", "#", statPerSec); - System.out.println("Statements per second: " + statPerSec); - collect = false; - db.stopServer(); + lastDb.stopServer(); } } - private static void runDatabase(Database db, BenchB bench, int size) - throws Exception { - bench.init(db, size); - bench.setThreadCount(db.getThreadsCount()); - bench.runTest(); - } - /** * Print a message to system out if trace is enabled. * @@ -217,4 +248,16 @@ public void trace(String s) { public boolean isCollect() { return collect; } + + private static final class RunSequence + { + final Database database; + final int runCount; + final List> results = new ArrayList<>(); + + public RunSequence(Database dataBase, int runCount) { + this.database = dataBase; + this.runCount = runCount; + } + } } diff --git a/h2/src/test/org/h2/test/bench/package.html b/h2/src/test/org/h2/test/bench/package.html index f164c9b260..e33caee6cf 100644 --- a/h2/src/test/org/h2/test/bench/package.html +++ b/h2/src/test/org/h2/test/bench/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/bench/test.properties b/h2/src/test/org/h2/test/bench/test.properties index 1239af1a7e..f81e595fe3 100644 --- a/h2/src/test/org/h2/test/bench/test.properties +++ b/h2/src/test/org/h2/test/bench/test.properties @@ -1,30 +1,29 @@ db1 = H2, org.h2.Driver, jdbc:h2:./data/test, sa, sa -#xdb1 = H2, org.h2.Driver, jdbc:h2:./data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3;DEFAULT_TABLE_ENGINE=org.h2.mvstore.db.MVTableEngine, sa, sa - -#xdb1 = H2, org.h2.Driver, jdbc:h2:./data/test;LOG=1;LOCK_TIMEOUT=10000;LOCK_MODE=3;ACCESS_MODE_DATA=rwd, sa, sa -#xdb2 = H2 (nio), org.h2.Driver, jdbc:h2:nio:data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3, sa, sa -#xdb3 = H2 (nioMapped), org.h2.Driver, jdbc:h2:nioMapped:data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3, sa, sa -#xdb2 = H2 (MVCC), org.h2.Driver, jdbc:h2:./data/test_mvcc;MVCC=TRUE, sa, sa -#xdb2 = H2 (XTEA), org.h2.Driver, jdbc:h2:./data/test_xtea;LOCK_TIMEOUT=10000;LOCK_MODE=3;CIPHER=XTEA, sa, sa 123 -#xdb3 = H2 (AES), org.h2.Driver, jdbc:h2:./data/test_aes;LOCK_TIMEOUT=10000;LOCK_MODE=3;CIPHER=AES, sa, sa 123 -#xdb4 = H2, org.h2.Driver, jdbc:h2:./data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3;write_mode_log=rws;write_delay=0, sa, sa -#xdb5 = H2_PG, org.postgresql.Driver, jdbc:postgresql://localhost:5435/h2test, sa, sa - -db2 = HSQLDB, org.hsqldb.jdbcDriver, jdbc:hsqldb:data/test;hsqldb.default_table_type=cached;sql.enforce_size=true, sa -db3 = Derby, org.apache.derby.jdbc.EmbeddedDriver, jdbc:derby:data/derby;create=true, sa, sa - -db4 = H2 (Server), org.h2.Driver, jdbc:h2:tcp://localhost/./data/testServer, sa, sa -db5 = HSQLDB, org.hsqldb.jdbcDriver, jdbc:hsqldb:hsql://localhost/xdb, sa -db6 = Derby, org.apache.derby.jdbc.ClientDriver, jdbc:derby://localhost/data/derbyServer;create=true, sa, sa -db7 = PostgreSQL, org.postgresql.Driver, jdbc:postgresql:test, sa, sa -db8 = MySQL, com.mysql.jdbc.Driver, jdbc:mysql://localhost/test?jdbcCompliantTruncation=false, sa, sa - -#db2 = MSSQLServer, com.microsoft.jdbc.sqlserver.SQLServerDriver, jdbc:microsoft:sqlserver://127.0.0.1:1433;DatabaseName=test, test, test -#db2 = Oracle, oracle.jdbc.driver.OracleDriver, jdbc:oracle:thin:@localhost:1521:XE, client, client -#db2 = Firebird, org.firebirdsql.jdbc.FBDriver, jdbc:firebirdsql:localhost:c:/temp/firebird/test, sysdba, masterkey -#db2 = DB2, COM.ibm.db2.jdbc.net.DB2Driver, jdbc:db2://localhost/test, test, test -#db2 = OneDollarDB, in.co.daffodil.db.jdbc.DaffodilDBDriver, jdbc:daffodilDB_embedded:school;path=C:/temp;create=true, sa +#db1 = H2 (forced), org.h2.Driver, jdbc:h2:./data/test;LOG=1;LOCK_TIMEOUT=10000;LOCK_MODE=3;ACCESS_MODE_DATA=rwd, sa, sa +#db1 = H2 (nio), org.h2.Driver, jdbc:h2:nio:data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3, sa, sa +#db1 = H2 (nioMapped), org.h2.Driver, jdbc:h2:nioMapped:data/test;LOCK_TIMEOUT=10000;LOCK_MODE=3, sa, sa +#db1 = H2 (XTEA), org.h2.Driver, jdbc:h2:./data/test_xtea;LOCK_TIMEOUT=10000;LOCK_MODE=3;CIPHER=XTEA, sa, sa 123 +#db1 = H2 (AES), org.h2.Driver, jdbc:h2:./data/test_aes;LOCK_TIMEOUT=10000;LOCK_MODE=3;CIPHER=AES, sa, sa 123 + +db2 = HSQLDB, org.hsqldb.jdbc.JDBCDriver, jdbc:hsqldb:file:./data/test;hsqldb.default_table_type=cached;hsqldb.write_delay_millis=1000;shutdown=true, sa +db3 = Derby, org.apache.derby.jdbc.AutoloadedDriver, jdbc:derby:data/derby;create=true, sa, sa + +db4 = H2 (C/S), org.h2.Driver, jdbc:h2:tcp://localhost/./data/testServer, sa, sa +db5 = HSQLDB (C/S), org.hsqldb.jdbcDriver, jdbc:hsqldb:hsql://localhost/xdb, sa +db6 = Derby (C/S), org.apache.derby.jdbc.ClientDriver, jdbc:derby://localhost/data/derbyServer;create=true, sa, sa +db7 = PG (C/S), org.postgresql.Driver, jdbc:postgresql://localhost:5432/test, sa, sa +db8 = MySQL (C/S), com.mysql.cj.jdbc.Driver, jdbc:mysql://localhost:3306/test, sa, sa + +#db9 = MSSQLServer, com.microsoft.jdbc.sqlserver.SQLServerDriver, jdbc:microsoft:sqlserver://127.0.0.1:1433;DatabaseName=test, test, test +#db9 = Oracle, oracle.jdbc.driver.OracleDriver, jdbc:oracle:thin:@localhost:1521:XE, client, client +#db9 = Firebird, org.firebirdsql.jdbc.FBDriver, jdbc:firebirdsql:localhost:test?encoding=UTF8, sa, sa +#db9 = DB2, COM.ibm.db2.jdbc.net.DB2Driver, jdbc:db2://localhost/test, test, test +#db9 = OneDollarDB, in.co.daffodil.db.jdbc.DaffodilDBDriver, jdbc:daffodilDB_embedded:school;path=C:/temp;create=true, sa +#db9 = SQLite, org.sqlite.JDBC, jdbc:sqlite:data/testSQLite.db, sa, sa + +db11 = H2 (mem), org.h2.Driver, jdbc:h2:mem:test;LOCK_MODE=0, sa, sa +db12 = HSQLDB (mem), org.hsqldb.jdbcDriver, jdbc:hsqldb:mem:data/test;hsqldb.tx=mvcc;shutdown=true, sa firebirdsql.datetime = TIMESTAMP postgresql.datetime = TIMESTAMP @@ -37,3 +36,10 @@ test3 = org.h2.test.bench.BenchB test4 = org.h2.test.bench.BenchC size = 5000 + +runCount3 = 4 +runCount5 = 4 +runCount6 = 4 +runCount7 = 7 +runCount8 = 4 +runCount12 = 5 \ No newline at end of file diff --git a/h2/src/test/org/h2/test/coverage/Coverage.java b/h2/src/test/org/h2/test/coverage/Coverage.java index ea134c3494..380baea54a 100644 --- a/h2/src/test/org/h2/test/coverage/Coverage.java +++ b/h2/src/test/org/h2/test/coverage/Coverage.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.coverage; @@ -14,7 +14,7 @@ import java.io.Reader; import java.io.Writer; import java.util.ArrayList; -import org.h2.util.New; +import java.util.concurrent.TimeUnit; /** * Tool to instrument java files with profiler calls. The tool can be used for @@ -24,8 +24,8 @@ public class Coverage { private static final String IMPORT = "import " + Coverage.class.getPackage().getName() + ".Profile"; - private final ArrayList files = New.arrayList(); - private final ArrayList exclude = New.arrayList(); + private final ArrayList files = new ArrayList<>(); + private final ArrayList exclude = new ArrayList<>(); private Tokenizer tokenizer; private Writer writer; private Writer data; @@ -127,10 +127,10 @@ private void addDir(String path, int recurse) { private void processAll() { int len = files.size(); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); for (int i = 0; i < len; i++) { - long t2 = System.currentTimeMillis(); - if (t2 - time > 1000 || i >= len - 1) { + long t2 = System.nanoTime(); + if (t2 - time > TimeUnit.SECONDS.toNanos(1) || i >= len - 1) { System.out.println((i + 1) + " of " + len + " " + (100 * i / len) + "%"); time = t2; @@ -498,7 +498,7 @@ private void setLine() { private void nextDebug() throws IOException { if (perFunction) { - int i = function.indexOf("("); + int i = function.indexOf('('); String func = i < 0 ? function : function.substring(0, i); String fileLine = file + "." + func + "("; i = file.lastIndexOf('.'); diff --git a/h2/src/test/org/h2/test/coverage/Profile.java b/h2/src/test/org/h2/test/coverage/Profile.java index e0ec2d2930..06d57be0c5 100644 --- a/h2/src/test/org/h2/test/coverage/Profile.java +++ b/h2/src/test/org/h2/test/coverage/Profile.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.coverage; @@ -10,6 +10,8 @@ import java.io.FileWriter; import java.io.IOException; import java.io.LineNumberReader; +import java.util.concurrent.TimeUnit; + import org.h2.util.IOUtils; /** @@ -25,26 +27,22 @@ public class Profile extends Thread { private boolean stop; private int maxIndex; private int lastIndex; - private long lastTime; + private long lastTimeNs; private BufferedWriter trace; private Profile() { - LineNumberReader r = null; - try { - r = new LineNumberReader(new FileReader("profile.txt")); + try (LineNumberReader r = new LineNumberReader(new FileReader("profile.txt"))) { while (r.readLine() != null) { // nothing - just count lines } maxIndex = r.getLineNumber(); count = new int[maxIndex]; time = new int[maxIndex]; - lastTime = System.currentTimeMillis(); + lastTimeNs = System.nanoTime(); Runtime.getRuntime().addShutdownHook(this); } catch (Exception e) { e.printStackTrace(); System.exit(1); - } finally { - IOUtils.closeSilently(r); } } @@ -79,7 +77,7 @@ public void run() { */ public static void startCollecting() { MAIN.stop = false; - MAIN.lastTime = System.currentTimeMillis(); + MAIN.lastTimeNs = System.nanoTime(); } /** @@ -110,10 +108,10 @@ private void addVisit(int i) { if (stop) { return; } - long now = System.currentTimeMillis(); + long now = System.nanoTime(); if (TRACE) { if (trace != null) { - int duration = (int) (now - lastTime); + long duration = TimeUnit.NANOSECONDS.toMillis(now - lastTimeNs); try { trace.write(i + "\t" + duration + "\r\n"); } catch (Exception e) { @@ -123,8 +121,8 @@ private void addVisit(int i) { } } count[i]++; - time[lastIndex] += (int) (now - lastTime); - lastTime = now; + time[lastIndex] += (int) TimeUnit.NANOSECONDS.toMillis(now - lastTimeNs); + lastTimeNs = now; lastIndex = i; } @@ -195,9 +193,8 @@ private void listTop(String title, int[] list, int max) throws IOException { list[bigIndex] = -(big + 1); index[i] = bigIndex; } - LineNumberReader r = null; - try { - r = new LineNumberReader(new FileReader("profile.txt")); + + try (LineNumberReader r = new LineNumberReader(new FileReader("profile.txt"))) { for (int i = 0; i < maxIndex; i++) { String line = r.readLine(); int k = list[i]; @@ -215,8 +212,6 @@ private void listTop(String title, int[] list, int max) throws IOException { for (int i = 0; i < max; i++) { print(text[i]); } - } finally { - IOUtils.closeSilently(r); } } diff --git a/h2/src/test/org/h2/test/coverage/Tokenizer.java b/h2/src/test/org/h2/test/coverage/Tokenizer.java index 476be3df16..611800f001 100644 --- a/h2/src/test/org/h2/test/coverage/Tokenizer.java +++ b/h2/src/test/org/h2/test/coverage/Tokenizer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.coverage; @@ -8,6 +8,7 @@ import java.io.EOFException; import java.io.IOException; import java.io.Reader; +import java.util.Arrays; /** * Helper class for the java file parser. @@ -154,9 +155,7 @@ int nextToken() throws IOException { int i = 0; do { if (i >= chars.length) { - char[] nb = new char[chars.length * 2]; - System.arraycopy(chars, 0, nb, 0, chars.length); - chars = nb; + chars = Arrays.copyOf(chars, chars.length * 2); } chars[i++] = (char) c; c = read(); @@ -221,9 +220,7 @@ int nextToken() throws IOException { } if (i >= chars.length) { - char[] nb = new char[chars.length * 2]; - System.arraycopy(chars, 0, nb, 0, chars.length); - chars = nb; + chars = Arrays.copyOf(chars, chars.length * 2); } chars[i++] = (char) c; } diff --git a/h2/src/test/org/h2/test/coverage/package.html b/h2/src/test/org/h2/test/coverage/package.html index c65e08774f..72a52ae6ed 100644 --- a/h2/src/test/org/h2/test/coverage/package.html +++ b/h2/src/test/org/h2/test/coverage/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/db/AbstractBaseForCommonTableExpressions.java b/h2/src/test/org/h2/test/db/AbstractBaseForCommonTableExpressions.java new file mode 100644 index 0000000000..89a69297fe --- /dev/null +++ b/h2/src/test/org/h2/test/db/AbstractBaseForCommonTableExpressions.java @@ -0,0 +1,103 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; + +import org.h2.test.TestDb; + +/** + * Base class for common table expression tests + */ +public abstract class AbstractBaseForCommonTableExpressions extends TestDb { + + /** + * Test a query. + * + * @param maxRetries the number of times the query is run + * @param expectedRowData the expected result data + * @param expectedColumnNames the expected columns of the result + * @param expectedNumberOfRows the expected number of rows + * @param setupSQL the SQL statement used for setup + * @param withQuery the query + * @param closeAndReopenDatabaseConnectionOnIteration whether the connection + * should be re-opened each time + * @param expectedColumnTypes the expected datatypes of the result + * @param anyOrder whether any order of rows should be allowed. + * If {@code true}, this method may sort expectedRowData. + */ + void testRepeatedQueryWithSetup(int maxRetries, String[] expectedRowData, String[] expectedColumnNames, + int expectedNumberOfRows, String setupSQL, String withQuery, + int closeAndReopenDatabaseConnectionOnIteration, String[] expectedColumnTypes, + boolean anyOrder) throws SQLException { + + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + PreparedStatement prep; + ResultSet rs; + + if (anyOrder) { + Arrays.sort(expectedRowData); + } + ArrayList rowData = new ArrayList<>(); + StringBuilder buf = new StringBuilder(); + + for (int queryRunTries = 1; queryRunTries <= maxRetries; queryRunTries++) { + + Statement stat = conn.createStatement(); + stat.execute(setupSQL); + stat.close(); + + // close and re-open connection for one iteration to make sure the query work + // between connections + if (queryRunTries == closeAndReopenDatabaseConnectionOnIteration) { + conn.close(); + + conn = getConnection("commonTableExpressionQueries"); + } + prep = conn.prepareStatement(withQuery); + + rs = prep.executeQuery(); + for (int columnIndex = 1; columnIndex <= rs.getMetaData().getColumnCount(); columnIndex++) { + + assertNotNull(rs.getMetaData().getColumnLabel(columnIndex)); + assertEquals(expectedColumnNames[columnIndex - 1], rs.getMetaData().getColumnLabel(columnIndex)); + assertEquals( + "wrong type of column " + rs.getMetaData().getColumnLabel(columnIndex) + " on iteration #" + + queryRunTries, + expectedColumnTypes[columnIndex - 1], rs.getMetaData().getColumnTypeName(columnIndex)); + } + + rowData.clear(); + while (rs.next()) { + buf.setLength(0); + for (int columnIndex = 1; columnIndex <= rs.getMetaData().getColumnCount(); columnIndex++) { + buf.append('|').append(rs.getString(columnIndex)); + } + rowData.add(buf.toString()); + } + if (anyOrder) { + Collections.sort(rowData); + } + assertEquals(expectedRowData, rowData.toArray(new String[0])); + + rs.close(); + prep.close(); + } + + conn.close(); + deleteDb("commonTableExpressionQueries"); + + } + +} diff --git a/h2/src/test/org/h2/test/db/Db.java b/h2/src/test/org/h2/test/db/Db.java index 6e90d9c5d0..4c0542d68d 100644 --- a/h2/src/test/org/h2/test/db/Db.java +++ b/h2/src/test/org/h2/test/db/Db.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -31,7 +31,7 @@ public class Db { private Connection conn; private Statement stat; private final HashMap prepared = - new HashMap(); + new HashMap<>(); /** * Create a database object using the given connection. @@ -86,11 +86,11 @@ public void execute(String sql) { * @return a list of maps */ static List> query(ResultSet rs) throws SQLException { - List> list = new ArrayList>(); + List> list = new ArrayList<>(); ResultSetMetaData meta = rs.getMetaData(); int columnCount = meta.getColumnCount(); while (rs.next()) { - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); for (int i = 0; i < columnCount; i++) { map.put(meta.getColumnLabel(i+1), rs.getObject(i+1)); } diff --git a/h2/src/test/org/h2/test/db/TaskDef.java b/h2/src/test/org/h2/test/db/TaskDef.java index 513f4252fb..46a2f15cf9 100644 --- a/h2/src/test/org/h2/test/db/TaskDef.java +++ b/h2/src/test/org/h2/test/db/TaskDef.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -8,6 +8,8 @@ import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; +import java.util.Arrays; + import org.h2.test.utils.SelfDestructor; /** @@ -26,7 +28,7 @@ public static void main(String... args) { TaskDef task; try { String className = args[0]; - task = (TaskDef) Class.forName(className).newInstance(); + task = (TaskDef) Class.forName(className).getDeclaredConstructor().newInstance(); System.out.println("running"); } catch (Throwable t) { System.out.println("init error: " + t); @@ -34,9 +36,7 @@ public static void main(String... args) { return; } try { - String[] taskArgs = new String[args.length - 1]; - System.arraycopy(args, 0, taskArgs, 0, args.length - 1); - task.run(taskArgs); + task.run(Arrays.copyOf(args, args.length - 1)); } catch (Throwable t) { System.out.println("error: " + t); t.printStackTrace(); diff --git a/h2/src/test/org/h2/test/db/TaskProcess.java b/h2/src/test/org/h2/test/db/TaskProcess.java index 40e8a78656..7fdd01d5c4 100644 --- a/h2/src/test/org/h2/test/db/TaskProcess.java +++ b/h2/src/test/org/h2/test/db/TaskProcess.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -15,9 +15,11 @@ import java.io.OutputStreamWriter; import java.util.ArrayList; import java.util.Arrays; + +import org.h2.test.TestBase; import org.h2.test.utils.SelfDestructor; -import org.h2.util.Task; import org.h2.util.StringUtils; +import org.h2.util.Task; /** * A task that is run as an external process. This class communicates over @@ -47,8 +49,8 @@ public TaskProcess(TaskDef taskDef) { public void start(String... args) { try { String selfDestruct = SelfDestructor.getPropertyString(60); - ArrayList list = new ArrayList(); - list.add("java"); + ArrayList list = new ArrayList<>(); + list.add(TestBase.getJVM()); list.add(selfDestruct); list.add("-cp"); list.add("bin" + File.pathSeparator + "."); @@ -57,9 +59,7 @@ public void start(String... args) { if (args != null && args.length > 0) { list.addAll(Arrays.asList(args)); } - String[] procDef = new String[list.size()]; - list.toArray(procDef); - traceOperation("start: " + StringUtils.arrayCombine(procDef, ' ')); + String[] procDef = list.toArray(new String[0]); process = Runtime.getRuntime().exec(procDef); copyInThread(process.getErrorStream(), System.err); reader = new BufferedReader(new InputStreamReader(process.getInputStream())); @@ -70,7 +70,6 @@ public void start(String... args) { "No reply from process, command: " + StringUtils.arrayCombine(procDef, ' ')); } else if (line.startsWith("running")) { - traceOperation("got reply: " + line); } else if (line.startsWith("init error")) { throw new RuntimeException(line); } @@ -130,12 +129,4 @@ public void destroy() { process.destroy(); } - /** - * Trace the operation. Tracing is disabled by default. - * - * @param s the string to print - */ - private void traceOperation(String s) { - // ignore - } } diff --git a/h2/src/test/org/h2/test/db/TestAlter.java b/h2/src/test/org/h2/test/db/TestAlter.java index 0da52e8d56..1d27fdd419 100644 --- a/h2/src/test/org/h2/test/db/TestAlter.java +++ b/h2/src/test/org/h2/test/db/TestAlter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -10,14 +10,20 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.Collection; import org.h2.api.ErrorCode; +import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.jdbc.JdbcConnection; +import org.h2.schema.Sequence; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test ALTER statements. */ -public class TestAlter extends TestBase { +public class TestAlter extends TestDb { private Connection conn; private Statement stat; @@ -28,43 +34,31 @@ public class TestAlter extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - deleteDb("alter"); - conn = getConnection("alter"); + deleteDb(getTestName()); + conn = getConnection(getTestName()); stat = conn.createStatement(); - testAlterTableAlterColumnAsSelfColumn(); + testAlterTableRenameConstraint(); testAlterTableDropColumnWithReferences(); - testAlterTableAlterColumnWithConstraint(); - testAlterTableAlterColumn(); + testAlterTableDropMultipleColumns(); testAlterTableAddColumnIdentity(); testAlterTableDropIdentityColumn(); testAlterTableAddColumnIfNotExists(); testAlterTableAddMultipleColumns(); - testAlterTableAlterColumn2(); testAlterTableAddColumnBefore(); testAlterTableAddColumnAfter(); - testAlterTableModifyColumn(); + testAlterTableAddMultipleColumnsBefore(); + testAlterTableAddMultipleColumnsAfter(); conn.close(); - deleteDb("alter"); - } - - private void testAlterTableAlterColumnAsSelfColumn() throws SQLException { - stat.execute("create table test(id int, name varchar)"); - stat.execute("alter table test alter column id int as id+1"); - stat.execute("insert into test values(1, 'Hello')"); - stat.execute("update test set name='World'"); - ResultSet rs = stat.executeQuery("select * from test"); - rs.next(); - assertEquals(3, rs.getInt(1)); - stat.execute("drop table test"); + deleteDb(getTestName()); } private void testAlterTableDropColumnWithReferences() throws SQLException { - stat.execute("create table parent(id int, b int)"); + stat.execute("create table parent(id int primary key, b int)"); stat.execute("create table child(p int primary key)"); stat.execute("alter table child add foreign key(p) references parent(id)"); stat.execute("alter table parent drop column id"); @@ -108,55 +102,53 @@ private void testAlterTableDropColumnWithReferences() throws SQLException { } - /** - * Tests a bug we used to have where altering the name of a column that had - * a check constraint that referenced itself would result in not being able - * to re-open the DB. - */ - private void testAlterTableAlterColumnWithConstraint() throws SQLException { - if (config.memory) { - return; - } - stat.execute("create table test(id int check(id in (1,2)) )"); - stat.execute("alter table test alter id rename to id2"); - // disconnect and reconnect - conn.close(); - conn = getConnection("alter"); - stat = conn.createStatement(); - stat.execute("insert into test values(1)"); - assertThrows(ErrorCode.CHECK_CONSTRAINT_VIOLATED_1, stat). - execute("insert into test values(3)"); + private void testAlterTableDropMultipleColumns() throws SQLException { + stat.execute("create table test(id int, b varchar, c int, d int)"); + stat.execute("alter table test drop column b, c"); + stat.execute("alter table test drop d"); + stat.execute("drop table test"); + // Test-Case: Same as above but using brackets (Oracle style) + stat.execute("create table test(id int, b varchar, c int, d int)"); + stat.execute("alter table test drop column (b, c)"); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat). + execute("alter table test drop column b"); + stat.execute("alter table test drop (d)"); + stat.execute("drop table test"); + // Test-Case: Error if dropping all columns + stat.execute("create table test(id int, name varchar, name2 varchar)"); + assertThrows(ErrorCode.CANNOT_DROP_LAST_COLUMN, stat). + execute("alter table test drop column id, name, name2"); + stat.execute("drop table test"); + } + + private void testAlterTableRenameConstraint() throws SQLException { + stat.execute("create table test(id int, name varchar(255))"); + stat.execute("alter table test add constraint x check (id > name)"); + stat.execute("alter table test rename constraint x to x2"); stat.execute("drop table test"); } private void testAlterTableDropIdentityColumn() throws SQLException { + Session iface = ((JdbcConnection) stat.getConnection()).getSession(); + if (!(iface instanceof SessionLocal)) { + return; + } + Collection allSequences = ((SessionLocal) iface).getDatabase().getMainSchema().getAllSequences(); stat.execute("create table test(id int auto_increment, name varchar)"); stat.execute("alter table test drop column id"); - ResultSet rs = stat.executeQuery("select * from INFORMATION_SCHEMA.SEQUENCES"); - assertFalse(rs.next()); + assertEquals(0, allSequences.size()); stat.execute("drop table test"); stat.execute("create table test(id int auto_increment, name varchar)"); stat.execute("alter table test drop column name"); - rs = stat.executeQuery("select * from INFORMATION_SCHEMA.SEQUENCES"); - assertTrue(rs.next()); + assertEquals(1, allSequences.size()); stat.execute("drop table test"); } - private void testAlterTableAlterColumn() throws SQLException { - stat.execute("create table t(x varchar) as select 'x'"); - assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, stat). - execute("alter table t alter column x int"); - stat.execute("drop table t"); - stat.execute("create table t(id identity, x varchar) as select null, 'x'"); - assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, stat). - execute("alter table t alter column x int"); - stat.execute("drop table t"); - } - private void testAlterTableAddColumnIdentity() throws SQLException { stat.execute("create table t(x varchar)"); - stat.execute("alter table t add id bigint identity(5, 5) not null"); + stat.execute("alter table t add id bigint generated by default as identity(start with 5 increment by 5)" + + " default on null"); stat.execute("insert into t values (null, null)"); stat.execute("insert into t values (null, null)"); ResultSet rs = stat.executeQuery("select id from t order by id"); @@ -187,6 +179,40 @@ private void testAlterTableAddMultipleColumns() throws SQLException { stat.execute("drop table t"); } + + + // column and field names must be upper-case due to getMetaData sensitivity + private void testAlterTableAddMultipleColumnsBefore() throws SQLException { + stat.execute("create table T(X varchar)"); + stat.execute("alter table T add (Y int, Z int) before X"); + DatabaseMetaData dbMeta = conn.getMetaData(); + ResultSet rs = dbMeta.getColumns(null, null, "T", null); + assertTrue(rs.next()); + assertEquals("Y", rs.getString("COLUMN_NAME")); + assertTrue(rs.next()); + assertEquals("Z", rs.getString("COLUMN_NAME")); + assertTrue(rs.next()); + assertEquals("X", rs.getString("COLUMN_NAME")); + assertFalse(rs.next()); + stat.execute("drop table T"); + } + + // column and field names must be upper-case due to getMetaData sensitivity + private void testAlterTableAddMultipleColumnsAfter() throws SQLException { + stat.execute("create table T(X varchar)"); + stat.execute("alter table T add (Y int, Z int) after X"); + DatabaseMetaData dbMeta = conn.getMetaData(); + ResultSet rs = dbMeta.getColumns(null, null, "T", null); + assertTrue(rs.next()); + assertEquals("X", rs.getString("COLUMN_NAME")); + assertTrue(rs.next()); + assertEquals("Y", rs.getString("COLUMN_NAME")); + assertTrue(rs.next()); + assertEquals("Z", rs.getString("COLUMN_NAME")); + assertFalse(rs.next()); + stat.execute("drop table T"); + } + // column and field names must be upper-case due to getMetaData sensitivity private void testAlterTableAddColumnBefore() throws SQLException { stat.execute("create table T(X varchar)"); @@ -215,19 +241,4 @@ private void testAlterTableAddColumnAfter() throws SQLException { stat.execute("drop table T"); } - private void testAlterTableAlterColumn2() throws SQLException { - // ensure that increasing a VARCHAR columns length takes effect because - // we optimize this case - stat.execute("create table t(x varchar(2)) as select 'x'"); - stat.execute("alter table t alter column x varchar(20)"); - stat.execute("insert into t values('Hello')"); - stat.execute("drop table t"); - } - - private void testAlterTableModifyColumn() throws SQLException { - stat.execute("create table t(x int)"); - stat.execute("alter table t modify column x varchar(20)"); - stat.execute("insert into t values('Hello')"); - stat.execute("drop table t"); - } } diff --git a/h2/src/test/org/h2/test/db/TestAlterSchemaRename.java b/h2/src/test/org/h2/test/db/TestAlterSchemaRename.java index edac2ac94f..fa778daf0c 100644 --- a/h2/src/test/org/h2/test/db/TestAlterSchemaRename.java +++ b/h2/src/test/org/h2/test/db/TestAlterSchemaRename.java @@ -1,22 +1,23 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; -import org.h2.api.ErrorCode; -import org.h2.test.TestBase; - import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import org.h2.api.ErrorCode; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + /** * Test ALTER SCHEMA RENAME statements. */ -public class TestAlterSchemaRename extends TestBase { +public class TestAlterSchemaRename extends TestDb { private Connection conn; private Statement stat; @@ -27,13 +28,13 @@ public class TestAlterSchemaRename extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - deleteDb("alter"); - conn = getConnection("alter"); + deleteDb(getTestName()); + conn = getConnection(getTestName()); stat = conn.createStatement(); testTryToRenameSystemSchemas(); testSimpleRename(); @@ -41,7 +42,7 @@ public void test() throws Exception { testCrossSchemaViews(); testAlias(); conn.close(); - deleteDb("alter"); + deleteDb(getTestName()); } private void testTryToRenameSystemSchemas() throws SQLException { @@ -63,7 +64,7 @@ private void testSimpleRename() throws SQLException { rs = stat.executeQuery("select * from s2.tab"); assertTrue(rs.next()); assertEquals(3, rs.getInt(1)); - stat.execute("drop schema s2"); + stat.execute("drop schema s2 cascade"); } @@ -93,12 +94,12 @@ private void testCrossSchemaViews() throws SQLException { assertEquals(6, rs.getInt(1)); if (!config.memory) { conn.close(); - conn = getConnection("alter"); + conn = getConnection(getTestName()); stat = conn.createStatement(); stat.executeQuery("select * from s2_new.v1"); } - stat.execute("drop schema s1"); - stat.execute("drop schema s2_new"); + stat.execute("drop schema s1 cascade"); + stat.execute("drop schema s2_new cascade"); } /** @@ -116,11 +117,11 @@ private void testAlias() throws SQLException { assertEquals("4321", rs.getString(1)); if (!config.memory) { conn.close(); - conn = getConnection("alter"); + conn = getConnection(getTestName()); stat = conn.createStatement(); stat.executeQuery("CALL S2.REVERSE('1234')"); } - stat.execute("drop schema s2"); + stat.execute("drop schema s2 cascade"); } } \ No newline at end of file diff --git a/h2/src/test/org/h2/test/db/TestAlterTableNotFound.java b/h2/src/test/org/h2/test/db/TestAlterTableNotFound.java new file mode 100644 index 0000000000..568f3c95bd --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestAlterTableNotFound.java @@ -0,0 +1,174 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +public class TestAlterTableNotFound extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testWithoutAnyCandidate(); + testWithoutAnyCandidateWhenDatabaseToLower(); + testWithoutAnyCandidateWhenDatabaseToUpper(); + testWithoutAnyCandidateWhenCaseInsensitiveIdentifiers(); + testWithOneCandidate(); + testWithOneCandidateWhenDatabaseToLower(); + testWithOneCandidateWhenDatabaseToUpper(); + testWithOneCandidateWhenCaseInsensitiveIdentifiers(); + testWithTwoCandidates(); + } + + private void testWithoutAnyCandidate() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=FALSE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T2 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE t1 DROP COLUMN ID"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found;"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithoutAnyCandidateWhenDatabaseToLower() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_LOWER=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T2 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE T1 DROP COLUMN ID"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found;"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithoutAnyCandidateWhenDatabaseToUpper() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_LOWER=FALSE;DATABASE_TO_UPPER=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T2 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE t1 DROP COLUMN ID"); + fail("Table `T1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"T1\" not found;"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithoutAnyCandidateWhenCaseInsensitiveIdentifiers() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=FALSE;CASE_INSENSITIVE_IDENTIFIERS=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T2 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE t1 DROP COLUMN ID"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found;"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithOneCandidate() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=FALSE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE t1 DROP COLUMN ID"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (candidates are: \"T1\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithOneCandidateWhenDatabaseToLower() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_LOWER=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE t1 ( ID INT GENERATED BY DEFAULT AS IDENTITY, PAYLOAD INT )"); + stat.execute("ALTER TABLE T1 DROP COLUMN PAYLOAD"); + conn.close(); + deleteDb(getTestName()); + } + + private void testWithOneCandidateWhenDatabaseToUpper() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY, PAYLOAD INT )"); + stat.execute("ALTER TABLE t1 DROP COLUMN PAYLOAD"); + conn.close(); + deleteDb(getTestName()); + } + + private void testWithOneCandidateWhenCaseInsensitiveIdentifiers() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=FALSE;CASE_INSENSITIVE_IDENTIFIERS=TRUE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY, PAYLOAD INT )"); + stat.execute("ALTER TABLE t1 DROP COLUMN PAYLOAD"); + conn.close(); + deleteDb(getTestName()); + } + + private void testWithTwoCandidates() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnectionWithSettings("DATABASE_TO_UPPER=FALSE"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE Toast ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + stat.execute("CREATE TABLE TOAST ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.execute("ALTER TABLE toast DROP COLUMN ID"); + fail("Table `toast` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"toast\" not found (candidates are: \"TOAST, Toast\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private Connection getConnectionWithSettings(String settings) throws SQLException { + return getConnection(getTestName() + ";" + settings); + } +} diff --git a/h2/src/test/org/h2/test/db/TestAnalyzeTableTx.java b/h2/src/test/org/h2/test/db/TestAnalyzeTableTx.java new file mode 100644 index 0000000000..ca65c1470b --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestAnalyzeTableTx.java @@ -0,0 +1,61 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; + +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +public class TestAnalyzeTableTx extends TestDb { + private static final int C = 10_000; + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public boolean isEnabled() { + return !config.networked && !config.big; + } + + @Override + public void test() throws Exception { + deleteDb(getTestName()); + Connection[] connections = new Connection[C]; + try (Connection shared = getConnection(getTestName())) { + Statement statement = shared.createStatement(); + statement.executeUpdate("DROP TABLE IF EXISTS TEST"); + statement.executeUpdate("CREATE TABLE TEST(ID INT PRIMARY KEY)"); + for (int i = 0; i < C; i++) { + Connection c = getConnection(getTestName()); + c.createStatement().executeUpdate("INSERT INTO TEST VALUES (" + i + ')'); + connections[i] = c; + } + try (ResultSet rs = statement.executeQuery("SELECT * FROM TEST")) { + for (int i = 0; i < C; i++) { + if (!rs.next()) + throw new Exception("next"); + if (rs.getInt(1) != i) + throw new Exception(Integer.toString(i)); + } + } + } finally { + for (Connection connection : connections) { + if (connection != null) { + try { connection.close(); } catch (Throwable ignore) {/**/} + } + } + } + } +} diff --git a/h2/src/test/org/h2/test/db/TestAutoRecompile.java b/h2/src/test/org/h2/test/db/TestAutoRecompile.java index a0a0595897..e7fb639154 100644 --- a/h2/src/test/org/h2/test/db/TestAutoRecompile.java +++ b/h2/src/test/org/h2/test/db/TestAutoRecompile.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -12,11 +12,12 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests if prepared statements are re-compiled when required. */ -public class TestAutoRecompile extends TestBase { +public class TestAutoRecompile extends TestDb { /** * Run just this test. @@ -24,7 +25,7 @@ public class TestAutoRecompile extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/db/TestBackup.java b/h2/src/test/org/h2/test/db/TestBackup.java index 34e1e53464..31801b20a6 100644 --- a/h2/src/test/org/h2/test/db/TestBackup.java +++ b/h2/src/test/org/h2/test/db/TestBackup.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -9,11 +9,13 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import org.h2.api.DatabaseEventListener; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.Backup; import org.h2.tools.Restore; import org.h2.util.Task; @@ -21,7 +23,7 @@ /** * Test for the BACKUP SQL statement. */ -public class TestBackup extends TestBase { +public class TestBackup extends TestDb { /** * Run just this test. @@ -29,14 +31,19 @@ public class TestBackup extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { + public boolean isEnabled() { if (config.memory) { - return; + return false; } + return true; + } + + @Override + public void test() throws SQLException { testConcurrentBackup(); testBackupRestoreLobStatement(); testBackupRestoreLob(); @@ -50,7 +57,7 @@ private void testConcurrentBackup() throws SQLException { return; } deleteDb("backup"); - String url = getURL("backup;multi_threaded=true", true); + String url = getURL("backup", true); Connection conn = getConnection(url); final Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, name varchar)"); @@ -64,7 +71,7 @@ private void testConcurrentBackup() throws SQLException { @Override public void call() throws Exception { while (!stop) { - if (System.currentTimeMillis() < updateEnd.get()) { + if (System.nanoTime() < updateEnd.get()) { stat.execute("update test set name = 'Hallo'"); stat1.execute("checkpoint"); stat.execute("update test set name = 'Hello'"); @@ -82,7 +89,7 @@ public void call() throws Exception { Statement stat2 = conn2.createStatement(); task.execute(); for (int i = 0; i < 10; i++) { - updateEnd.set(System.currentTimeMillis() + 2000); + updateEnd.set(System.nanoTime() + TimeUnit.SECONDS.toNanos(2)); stat2.execute("backup to '"+getBaseDir()+"/backup.zip'"); stat2.execute("checkpoint"); Restore.execute(getBaseDir() + "/backup.zip", getBaseDir() + "/t" + i, "backup"); @@ -108,27 +115,7 @@ public void call() throws Exception { public static class BackupListener implements DatabaseEventListener { @Override - public void closingDatabase() { - // ignore - } - - @Override - public void exceptionThrown(SQLException e, String sql) { - // ignore - } - - @Override - public void init(String url) { - // ignore - } - - @Override - public void opened() { - // ignore - } - - @Override - public void setProgress(int state, String name, int x, int max) { + public void setProgress(int state, String name, long x, long max) { try { Thread.sleep(1); } catch (InterruptedException e) { @@ -182,7 +169,7 @@ private void testBackup() throws SQLException { stat1.execute("create table testlob" + "(id int primary key, b blob, c clob)"); stat1.execute("insert into testlob values" + - "(1, space(10000), repeat('00', 10000))"); + "(1, repeat(char(0), 10000), space(10000))"); conn2 = getConnection("backup"); stat2 = conn2.createStatement(); stat2.execute("insert into test values(3, 'third')"); diff --git a/h2/src/test/org/h2/test/db/TestBigDb.java b/h2/src/test/org/h2/test/db/TestBigDb.java index a0298999aa..a4e35d0b0a 100644 --- a/h2/src/test/org/h2/test/db/TestBigDb.java +++ b/h2/src/test/org/h2/test/db/TestBigDb.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -10,14 +10,16 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.concurrent.TimeUnit; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Utils; /** * Test for big databases. */ -public class TestBigDb extends TestBase { +public class TestBigDb extends TestDb { /** * Run just this test. @@ -25,17 +27,22 @@ public class TestBigDb extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { + public boolean isEnabled() { if (config.memory) { - return; + return false; } if (config.networked && config.big) { - return; + return false; } + return true; + } + + @Override + public void test() throws SQLException { testLargeTable(); testInsert(); testLeftSummary(); @@ -80,19 +87,19 @@ private void testLargeTable() throws SQLException { + "STATUS_CODE CHAR(3) DEFAULT SECURE_RAND(1)," + "INTRA_STAT_CODE CHAR(12) DEFAULT SECURE_RAND(6)," + "PRD_TITLE CHAR(50) DEFAULT SECURE_RAND(25)," - + "VALID_FROM DATE DEFAULT NOW()," - + "MOD_DATUM DATE DEFAULT NOW())"); + + "VALID_FROM DATE DEFAULT CURRENT_DATE," + + "MOD_DATUM DATE DEFAULT CURRENT_DATE)"); int len = getSize(10, 50000); try { PreparedStatement prep = conn.prepareStatement( "INSERT INTO TEST(PRD_CODE) VALUES('abc' || ?)"); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); for (int i = 0; i < len; i++) { if ((i % 1000) == 0) { - long t = System.currentTimeMillis(); - if (t - time > 1000) { + long t = System.nanoTime(); + if (t - time > TimeUnit.SECONDS.toNanos(1)) { time = t; - int free = Utils.getMemoryFree(); + long free = Utils.getMemoryFree(); println("i: " + i + " free: " + free + " used: " + Utils.getMemoryUsed()); } } diff --git a/h2/src/test/org/h2/test/db/TestBigResult.java b/h2/src/test/org/h2/test/db/TestBigResult.java index 967ab4fbb1..bb2e3fbba2 100644 --- a/h2/src/test/org/h2/test/db/TestBigResult.java +++ b/h2/src/test/org/h2/test/db/TestBigResult.java @@ -1,40 +1,55 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; +import java.sql.Blob; +import java.sql.Clob; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Types; import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; +import org.h2.message.TraceSystem; import org.h2.store.FileLister; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test for big result sets. */ -public class TestBigResult extends TestBase { +public class TestBigResult extends TestDb { /** * Run just this test. * - * @param a ignored + * @param a + * ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { + public boolean isEnabled() { if (config.memory) { - return; + return false; } + return true; + } + + @Override + public void test() throws SQLException { testLargeSubquery(); + testSortingAndDistinct(); + testLOB(); testLargeUpdateDelete(); testCloseConnectionDelete(); testOrderGroup(); @@ -49,13 +64,11 @@ private void testLargeSubquery() throws SQLException { int len = getSize(1000, 4000); stat.execute("SET MAX_MEMORY_ROWS " + (len / 10)); stat.execute("CREATE TABLE RECOVERY(TRANSACTION_ID INT, SQL_STMT VARCHAR)"); - stat.execute("INSERT INTO RECOVERY " + - "SELECT X, CASE MOD(X, 2) WHEN 0 THEN 'commit' ELSE 'begin' END " + - "FROM SYSTEM_RANGE(1, "+len+")"); - ResultSet rs = stat.executeQuery("SELECT * FROM RECOVERY " + - "WHERE SQL_STMT LIKE 'begin%' AND " + - "TRANSACTION_ID NOT IN(SELECT TRANSACTION_ID FROM RECOVERY " + - "WHERE SQL_STMT='commit' OR SQL_STMT='rollback')"); + stat.execute("INSERT INTO RECOVERY " + "SELECT X, CASE MOD(X, 2) WHEN 0 THEN 'commit' ELSE 'begin' END " + + "FROM SYSTEM_RANGE(1, " + len + ")"); + ResultSet rs = stat.executeQuery("SELECT * FROM RECOVERY " + "WHERE SQL_STMT LIKE 'begin%' AND " + + "TRANSACTION_ID NOT IN(SELECT TRANSACTION_ID FROM RECOVERY " + + "WHERE SQL_STMT='commit' OR SQL_STMT='rollback')"); int count = 0, last = 1; while (rs.next()) { assertEquals(last, rs.getInt(1)); @@ -66,6 +79,295 @@ private void testLargeSubquery() throws SQLException { conn.close(); } + private void testSortingAndDistinct() throws SQLException { + deleteDb("bigResult"); + Connection conn = getConnection("bigResult"); + Statement stat = conn.createStatement(); + int count = getSize(1000, 4000); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT NOT NULL)"); + PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); + for (int i = 0; i < count; i++) { + ps.setInt(1, i); + ps.setInt(2, count - i); + ps.executeUpdate(); + } + // local result + testSortingAndDistinct1(stat, count, count); + // external result + testSortingAndDistinct1(stat, 10, count); + stat.execute("DROP TABLE TEST"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE1 INT NOT NULL, VALUE2 INT NOT NULL)"); + ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?, ?)"); + int partCount = count / 10; + for (int i = 0; i < count; i++) { + ps.setInt(1, i); + int a = i / 10; + int b = i % 10; + ps.setInt(2, partCount - a); + ps.setInt(3, 10 - b); + ps.executeUpdate(); + } + String sql; + /* + * Sorting only + */ + sql = "SELECT VALUE2, VALUE1 FROM (SELECT ID, VALUE2, VALUE1 FROM TEST ORDER BY VALUE2)"; + // local result + testSortingAndDistinct2(stat, sql, count, partCount); + // external result + testSortingAndDistinct2(stat, sql, 10, partCount); + /* + * Distinct only + */ + sql = "SELECT VALUE2, VALUE1 FROM (SELECT DISTINCT ID, VALUE2, VALUE1 FROM TEST)"; + // local result + testSortingAndDistinct2DistinctOnly(stat, sql, count, partCount); + // external result + testSortingAndDistinct2DistinctOnly(stat, sql, 10, partCount); + /* + * Sorting and distinct + */ + sql = "SELECT VALUE2, VALUE1 FROM (SELECT DISTINCT ID, VALUE2, VALUE1 FROM TEST ORDER BY VALUE2)"; + // local result + testSortingAndDistinct2(stat, sql, count, partCount); + // external result + testSortingAndDistinct2(stat, sql, 10, partCount); + /* + * One more distinct only + */ + sql = "SELECT VALUE1 FROM (SELECT DISTINCT VALUE1 FROM TEST)"; + // local result + testSortingAndDistinct3DistinctOnly(stat, sql, count, partCount); + // external result + testSortingAndDistinct3DistinctOnly(stat, sql, 1, partCount); + /* + * One more sorting and distinct + */ + sql = "SELECT VALUE1 FROM (SELECT DISTINCT VALUE1 FROM TEST ORDER BY VALUE1)"; + // local result + testSortingAndDistinct3(stat, sql, count, partCount); + // external result + testSortingAndDistinct3(stat, sql, 1, partCount); + stat.execute("DROP TABLE TEST"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT)"); + ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); + for (int i = 0; i < count; i++) { + ps.setInt(1, i); + int j = i / 10; + if (j == 0) { + ps.setNull(2, Types.INTEGER); + } else { + ps.setInt(2, j); + } + ps.executeUpdate(); + } + /* + * Sorting and distinct + */ + sql = "SELECT DISTINCT V FROM TEST ORDER BY V"; + // local result + testSortingAndDistinct4(stat, sql, count, partCount); + // external result + testSortingAndDistinct4(stat, sql, 1, partCount); + /* + * Distinct only + */ + sql = "SELECT DISTINCT V FROM TEST"; + // local result + testSortingAndDistinct4DistinctOnly(stat, sql, count, partCount); + // external result + testSortingAndDistinct4DistinctOnly(stat, sql, 1, partCount); + /* + * Sorting only + */ + sql = "SELECT V FROM TEST ORDER BY V"; + // local result + testSortingAndDistinct4SortingOnly(stat, sql, count, partCount); + // external result + testSortingAndDistinct4SortingOnly(stat, sql, 1, partCount); + conn.close(); + } + + private void testSortingAndDistinct1(Statement stat, int maxRows, int count) throws SQLException { + stat.execute("SET MAX_MEMORY_ROWS " + maxRows); + ResultSet rs = stat.executeQuery("SELECT V FROM (SELECT DISTINCT ID, V FROM TEST ORDER BY V)"); + for (int i = 1; i <= count; i++) { + assertTrue(rs.next()); + assertEquals(rs.getInt(1), i); + } + assertFalse(rs.next()); + } + + private void testSortingAndDistinct2(Statement stat, String sql, int maxRows, int partCount) throws SQLException { + ResultSet rs; + stat.execute("SET MAX_MEMORY_ROWS " + maxRows); + rs = stat.executeQuery(sql); + BitSet set = new BitSet(partCount); + for (int i = 1; i <= 10; i++) { + set.clear(); + for (int j = 1; j <= partCount; j++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + set.set(rs.getInt(2)); + } + assertEquals(partCount + 1, set.nextClearBit(1)); + } + assertFalse(rs.next()); + } + + private void testSortingAndDistinct2DistinctOnly(Statement stat, String sql, int maxRows, int partCount) + throws SQLException { + ResultSet rs; + stat.execute("SET MAX_MEMORY_ROWS " + maxRows); + rs = stat.executeQuery(sql); + BitSet set = new BitSet(partCount * 10); + for (int i = 1; i <= 10; i++) { + for (int j = 1; j <= partCount; j++) { + assertTrue(rs.next()); + set.set(rs.getInt(1) * partCount + rs.getInt(2)); + } + } + assertEquals(partCount * 11 + 1, set.nextClearBit(partCount + 1)); + assertFalse(rs.next()); + } + + private void testSortingAndDistinct3(Statement stat, String sql, int maxRows, int partCount) throws SQLException { + ResultSet rs; + stat.execute("SET MAX_MEMORY_ROWS " + maxRows); + rs = stat.executeQuery(sql); + for (int i = 1; i <= partCount; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertFalse(rs.next()); + } + + private void testSortingAndDistinct3DistinctOnly(Statement stat, String sql, int maxRows, int partCount) + throws SQLException { + ResultSet rs; + stat.execute("SET MAX_MEMORY_ROWS " + maxRows); + rs = stat.executeQuery(sql); + BitSet set = new BitSet(partCount); + for (int i = 1; i <= partCount; i++) { + assertTrue(rs.next()); + set.set(rs.getInt(1)); + } + assertEquals(partCount + 1, set.nextClearBit(1)); + assertFalse(rs.next()); + } + + private void testSortingAndDistinct4(Statement stat, String sql, int maxRows, int count) throws SQLException { + stat.execute("SET MAX_MEMORY_ROWS " + maxRows); + ResultSet rs = stat.executeQuery(sql); + for (int i = 0; i < count; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + if (i == 0) { + assertTrue(rs.wasNull()); + } + } + assertFalse(rs.next()); + } + + private void testSortingAndDistinct4DistinctOnly(Statement stat, String sql, int maxRows, int count) + throws SQLException { + stat.execute("SET MAX_MEMORY_ROWS " + maxRows); + ResultSet rs = stat.executeQuery(sql); + BitSet set = new BitSet(); + for (int i = 0; i < count; i++) { + assertTrue(rs.next()); + int v = rs.getInt(1); + if (v == 0) { + assertTrue(rs.wasNull()); + } + assertFalse(set.get(v)); + set.set(v); + } + assertFalse(rs.next()); + assertEquals(count, set.nextClearBit(0)); + } + + private void testSortingAndDistinct4SortingOnly(Statement stat, String sql, int maxRows, int count) + throws SQLException { + stat.execute("SET MAX_MEMORY_ROWS " + maxRows); + ResultSet rs = stat.executeQuery(sql); + for (int i = 0; i < count; i++) { + for (int j = 0; j < 10; j++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + if (i == 0) { + assertTrue(rs.wasNull()); + } + } + } + assertFalse(rs.next()); + } + + private void testLOB() throws SQLException { + if (config.traceLevelFile == TraceSystem.DEBUG) { + // Trace system on this level can throw OOME with such large + // arguments as used in this test. + return; + } + deleteDb("bigResult"); + Connection conn = getConnection("bigResult"); + Statement stat = conn.createStatement(); + stat.execute("SET MAX_MEMORY_ROWS " + 1); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V BLOB NOT NULL)"); + PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); + int length = 1_000_000; + byte[] data = new byte[length]; + for (int i = 1; i <= 10; i++) { + ps.setInt(1, i); + Arrays.fill(data, (byte) i); + ps.setBytes(2, data); + ps.executeUpdate(); + } + Blob[] blobs = new Blob[10]; + ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); + for (int i = 1; i <= 10; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + blobs[i - 1] = rs.getBlob(2); + } + assertFalse(rs.next()); + rs.close(); + for (int i = 1; i <= 10; i++) { + Blob b = blobs[i - 1]; + byte[] bytes = b.getBytes(1, (int) b.length()); + Arrays.fill(data, (byte) i); + assertEquals(data, bytes); + b.free(); + } + stat.execute("DROP TABLE TEST"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V CLOB NOT NULL)"); + ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); + char[] cdata = new char[length]; + for (int i = 1; i <= 10; i++) { + ps.setInt(1, i); + Arrays.fill(cdata, (char) i); + ps.setString(2, new String(cdata)); + ps.executeUpdate(); + } + Clob[] clobs = new Clob[10]; + rs = stat.executeQuery("SELECT * FROM TEST"); + for (int i = 1; i <= 10; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + clobs[i - 1] = rs.getClob(2); + } + assertFalse(rs.next()); + rs.close(); + for (int i = 1; i <= 10; i++) { + Clob c = clobs[i - 1]; + String string = c.getSubString(1, (int) c.length()); + Arrays.fill(cdata, (char) i); + assertEquals(new String(cdata), string); + c.free(); + } + conn.close(); + } + private void testLargeUpdateDelete() throws SQLException { deleteDb("bigResult"); Connection conn = getConnection("bigResult"); @@ -90,8 +392,7 @@ private void testCloseConnectionDelete() throws SQLException { // rs.close(); conn.close(); deleteDb("bigResult"); - ArrayList files = FileLister.getDatabaseFiles(getBaseDir(), - "bigResult", true); + ArrayList files = FileLister.getDatabaseFiles(getBaseDir(), "bigResult", true); if (files.size() > 0) { fail("file not deleted: " + files.get(0)); } @@ -128,15 +429,10 @@ private void testOrderGroup() throws SQLException { Connection conn = getConnection("bigResult"); Statement stat = conn.createStatement(); stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("CREATE TABLE TEST(" + - "ID INT PRIMARY KEY, " + - "Name VARCHAR(255), " + - "FirstName VARCHAR(255), " + - "Points INT," + - "LicenseID INT)"); + stat.execute("CREATE TABLE TEST(" + "ID INT PRIMARY KEY, " + "Name VARCHAR(255), " + "FirstName VARCHAR(255), " + + "Points INT," + "LicenseID INT)"); int len = getSize(10, 5000); - PreparedStatement prep = conn.prepareStatement( - "INSERT INTO TEST VALUES(?, ?, ?, ?, ?)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?, ?, ?, ?)"); for (int i = 0; i < len; i++) { prep.setInt(1, i); prep.setString(2, "Name " + i); @@ -187,8 +483,7 @@ private void testOrderGroup() throws SQLException { prep.setString(2, "" + i / 200); prep.execute(); } - Statement s2 = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, - ResultSet.CONCUR_UPDATABLE); + Statement s2 = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); rs = s2.executeQuery("SELECT NAME FROM DATA"); rs.last(); conn.setAutoCommit(true); diff --git a/h2/src/test/org/h2/test/db/TestCases.java b/h2/src/test/org/h2/test/db/TestCases.java index 9ffe39552b..d9512030bc 100644 --- a/h2/src/test/org/h2/test/db/TestCases.java +++ b/h2/src/test/org/h2/test/db/TestCases.java @@ -1,10 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; +import java.io.ByteArrayInputStream; import java.io.File; import java.io.StringReader; import java.sql.Connection; @@ -18,16 +19,16 @@ import java.sql.Timestamp; import java.util.List; import java.util.Random; - +import java.util.concurrent.TimeUnit; import org.h2.api.ErrorCode; -import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Various test cases. */ -public class TestCases extends TestBase { +public class TestCases extends TestDb { /** * Run just this test. @@ -35,11 +36,13 @@ public class TestCases extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { + testMinimalCoveringIndexPlan(); + testMinMaxDirectLookupIndex(); testReferenceLaterTable(); testAutoCommitInDatabaseURL(); testReferenceableIndexUsage(); @@ -54,7 +57,7 @@ public void test() throws Exception { testConvertType(); testSortedSelect(); testMaxMemoryRows(); - testDeleteTop(); + testLikeExpressions(); testUnicode(); testOuterJoin(); testCommentOnColumnWithSchemaEqualDatabase(); @@ -75,6 +78,10 @@ public void test() throws Exception { testDeleteGroup(); testDisconnect(); testExecuteTrace(); + testExplain(); + testExplainAnalyze(); + testDataChangeDeltaTable(); + testGroupSortedReset(); if (config.memory) { return; } @@ -94,7 +101,6 @@ public void test() throws Exception { testDefaultQueryReconnect(); testBigString(); testRenameReconnect(); - testAllSizes(); testCreateDrop(); testPolePos(); testQuick(); @@ -103,7 +109,6 @@ public void test() throws Exception { testDoubleRecovery(); testConstraintReconnect(); testCollation(); - testBinaryCollation(); deleteDb("cases"); } @@ -140,23 +145,19 @@ private void testReferenceableIndexUsage() throws SQLException { stat.execute("drop table if exists a, b"); stat.execute("create table a(id int, x int) as select 1, 100"); stat.execute("create index idx1 on a(id, x)"); + stat.execute("alter table a add unique(id)"); stat.execute("create table b(id int primary key, a_id int) as select 1, 1"); stat.execute("alter table b add constraint x " + "foreign key(a_id) references a(id)"); stat.execute("update a set x=200"); - stat.execute("drop table if exists a, b"); + stat.execute("drop table if exists a, b cascade"); conn.close(); } private void testClearSyntaxException() throws SQLException { Connection conn = getConnection("cases"); Statement stat = conn.createStatement(); - try { - stat.execute("select t.x, t.x t.y from dual t"); - fail(); - } catch (SQLException e) { - assertEquals("42000", e.getSQLState()); - } + assertThrows(42000, stat).execute("select t.x, t.x t.y from dual t"); conn.close(); } @@ -175,9 +176,9 @@ private void testViewParameters() throws SQLException { Connection conn = getConnection("cases"); Statement stat = conn.createStatement(); stat.execute( - "create view test as select 0 value, 'x' name from dual"); + "create view test as select 0 v, 'x' name from dual"); PreparedStatement prep = conn.prepareStatement( - "select 1 from test where name=? and value=? and value<=?"); + "select 1 from test where name=? and v=? and v<=?"); prep.setString(1, "x"); prep.setInt(2, 0); prep.setInt(3, 1); @@ -267,7 +268,7 @@ private void testDependencies() throws SQLException { stat.execute("create table test(id int primary key)"); assertThrows(ErrorCode.COLUMN_IS_REFERENCED_1, stat). execute("alter table test alter column id " + - "set default ifnull((select max(id) from test for update)+1, 0)"); + "set default ifnull((select max(id) from test)+1, 0)"); stat.execute("drop table test"); conn.close(); } @@ -278,7 +279,7 @@ private void testConvertType() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test as select cast(0 as dec(10, 2)) x"); ResultSetMetaData meta = stat.executeQuery("select * from test").getMetaData(); - assertEquals(2, meta.getPrecision(1)); + assertEquals(10, meta.getPrecision(1)); assertEquals(2, meta.getScale(1)); stat.execute("alter table test add column y int"); stat.execute("drop table test"); @@ -302,9 +303,9 @@ private void testMaxMemoryRows() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key)"); stat.execute("insert into test values(1), (2)"); - stat.execute("select * from dual where x not in " + + stat.execute("select * from system_range(1, 1) where x not in " + "(select id from test order by id)"); - stat.execute("select * from dual where x not in " + + stat.execute("select * from system_range(1, 1) where x not in " + "(select id from test union select id from test)"); stat.execute("(select id from test order by id) " + "intersect (select id from test order by id)"); @@ -582,7 +583,7 @@ private void testConstraintAlterTable() throws SQLException { deleteDb("cases"); Connection conn = getConnection("cases"); Statement stat = conn.createStatement(); - stat.execute("create table parent (pid int)"); + stat.execute("create table parent (pid int primary key)"); stat.execute("create table child (cid int primary key, pid int)"); stat.execute("alter table child add foreign key (pid) references parent(pid)"); stat.execute("alter table child add column c2 int"); @@ -633,12 +634,12 @@ private void testLobDecrypt() throws SQLException { prep.setCharacterStream(2, new StringReader(value), -1); ResultSet rs = prep.executeQuery(); rs.next(); - String encrypted = rs.getString(1); + byte[] encrypted = rs.getBytes(1); PreparedStatement prep2 = conn.prepareStatement( "CALL TRIM(CHAR(0) FROM " + "UTF8TOSTRING(DECRYPT('AES', RAWTOHEX(?), ?)))"); prep2.setCharacterStream(1, new StringReader(key), -1); - prep2.setCharacterStream(2, new StringReader(encrypted), -1); + prep2.setBinaryStream(2, new ByteArrayInputStream(encrypted), -1); ResultSet rs2 = prep2.executeQuery(); rs2.first(); String decrypted = rs2.getString(1); @@ -663,12 +664,11 @@ private void testReservedKeywordReconnect() throws SQLException { conn.close(); } - private void testInvalidDatabaseName() throws SQLException { + private void testInvalidDatabaseName() { if (config.memory) { return; } - assertThrows(ErrorCode.INVALID_DATABASE_NAME_1, this). - getConnection("cases/"); + assertThrows(ErrorCode.INVALID_DATABASE_NAME_1, () -> getConnection("cases/")); } private void testReuseSpace() throws SQLException { @@ -821,33 +821,30 @@ private void testDisconnect() throws Exception { } deleteDb("cases"); Connection conn = getConnection("cases"); - final Statement stat = conn.createStatement(); + Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST(ID IDENTITY)"); for (int i = 0; i < 1000; i++) { stat.execute("INSERT INTO TEST() VALUES()"); } - final SQLException[] stopped = { null }; - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - long time = System.currentTimeMillis(); - ResultSet rs = stat.executeQuery("SELECT MAX(T.ID) " + - "FROM TEST T, TEST, TEST, TEST, TEST, " + - "TEST, TEST, TEST, TEST, TEST, TEST"); - rs.next(); - time = System.currentTimeMillis() - time; - TestBase.logError("query was too quick; result: " + - rs.getInt(1) + " time:" + time, null); - } catch (SQLException e) { - stopped[0] = e; - // ok - } + SQLException[] stopped = { null }; + Thread t = new Thread(() -> { + try { + long time = System.nanoTime(); + ResultSet rs = stat.executeQuery("SELECT MAX(T.ID) " + + "FROM TEST T, TEST, TEST, TEST, TEST, " + + "TEST, TEST, TEST, TEST, TEST, TEST"); + rs.next(); + time = System.nanoTime() - time; + TestBase.logError("query was too quick; result: " + + rs.getInt(1) + " time:" + TimeUnit.NANOSECONDS.toMillis(time), null); + } catch (SQLException e) { + stopped[0] = e; + // ok } }); t.start(); Thread.sleep(300); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); conn.close(); t.join(5000); if (stopped[0] == null) { @@ -855,8 +852,8 @@ public void run() { } else { assertKnownException(stopped[0]); } - time = System.currentTimeMillis() - time; - if (time > 5000) { + time = System.nanoTime() - time; + if (time > TimeUnit.SECONDS.toNanos(5)) { if (!config.reopen) { fail("closing took " + time); } @@ -885,6 +882,231 @@ private void testExecuteTrace() throws SQLException { conn.close(); } + private void checkExplain(Statement stat, String sql, String expected) throws SQLException { + ResultSet rs = stat.executeQuery(sql); + + assertTrue(rs.next()); + + assertEquals(expected, rs.getString(1)); + } + + private void testExplain() throws SQLException { + deleteDb("cases"); + Connection conn = getConnection("cases"); + Statement stat = conn.createStatement(); + + stat.execute("CREATE TABLE ORGANIZATION" + + "(id int primary key, name varchar(100))"); + stat.execute("CREATE TABLE PERSON" + + "(id int primary key, orgId int, name varchar(100), salary int)"); + + checkExplain(stat, "/* bla-bla */ EXPLAIN SELECT ID FROM ORGANIZATION WHERE id = ?", + "SELECT\n" + + " \"ID\"\n" + + "FROM \"PUBLIC\".\"ORGANIZATION\"\n" + + " /* PUBLIC.PRIMARY_KEY_D: ID = ?1 */\n" + + "WHERE \"ID\" = ?1"); + + checkExplain(stat, "EXPLAIN SELECT ID FROM ORGANIZATION WHERE id = 1", + "SELECT\n" + + " \"ID\"\n" + + "FROM \"PUBLIC\".\"ORGANIZATION\"\n" + + " /* PUBLIC.PRIMARY_KEY_D: ID = 1 */\n" + + "WHERE \"ID\" = 1"); + + checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE id = ?", + "SELECT\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + + "FROM \"PUBLIC\".\"PERSON\"\n" + + " /* PUBLIC.PRIMARY_KEY_8: ID = ?1 */\n" + + "WHERE \"ID\" = ?1"); + + checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE id = 50", + "SELECT\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + + "FROM \"PUBLIC\".\"PERSON\"\n" + + " /* PUBLIC.PRIMARY_KEY_8: ID = 50 */\n" + + "WHERE \"ID\" = 50"); + + checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE salary > ? and salary < ?", + "SELECT\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + + "FROM \"PUBLIC\".\"PERSON\"\n" + + " /* PUBLIC.PERSON.tableScan */\n" + + "WHERE (\"SALARY\" > ?1)\n" + + " AND (\"SALARY\" < ?2)"); + + checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE salary > 1000 and salary < 2000", + "SELECT\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + + "FROM \"PUBLIC\".\"PERSON\"\n" + + " /* PUBLIC.PERSON.tableScan */\n" + + "WHERE (\"SALARY\" > 1000)\n" + + " AND (\"SALARY\" < 2000)"); + + checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE name = lower(?)", + "SELECT\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + + "FROM \"PUBLIC\".\"PERSON\"\n" + + " /* PUBLIC.PERSON.tableScan */\n" + + "WHERE \"NAME\" = LOWER(?1)"); + + checkExplain(stat, "EXPLAIN SELECT * FROM PERSON WHERE name = lower('Smith')", + "SELECT\n" + + " \"PUBLIC\".\"PERSON\".\"ID\",\n" + + " \"PUBLIC\".\"PERSON\".\"ORGID\",\n" + + " \"PUBLIC\".\"PERSON\".\"NAME\",\n" + + " \"PUBLIC\".\"PERSON\".\"SALARY\"\n" + + "FROM \"PUBLIC\".\"PERSON\"\n" + + " /* PUBLIC.PERSON.tableScan */\n" + + "WHERE \"NAME\" = 'smith'"); + + checkExplain(stat, "EXPLAIN SELECT * FROM PERSON p " + + "INNER JOIN ORGANIZATION o ON p.id = o.id WHERE o.id = ? AND p.salary > ?", + "SELECT\n" + + " \"P\".\"ID\",\n" + + " \"P\".\"ORGID\",\n" + + " \"P\".\"NAME\",\n" + + " \"P\".\"SALARY\",\n" + + " \"O\".\"ID\",\n" + + " \"O\".\"NAME\"\n" + + "FROM \"PUBLIC\".\"ORGANIZATION\" \"O\"\n" + + " /* PUBLIC.PRIMARY_KEY_D: ID = ?1 */\n" + + " /* WHERE O.ID = ?1\n" + + " */\n" + + "INNER JOIN \"PUBLIC\".\"PERSON\" \"P\"\n" + + " /* PUBLIC.PRIMARY_KEY_8: ID = O.ID */\n" + + " ON 1=1\n" + + "WHERE (\"P\".\"ID\" = \"O\".\"ID\")\n" + + " AND (\"O\".\"ID\" = ?1)\n" + + " AND (\"P\".\"SALARY\" > ?2)"); + + checkExplain(stat, "EXPLAIN SELECT * FROM PERSON p " + + "INNER JOIN ORGANIZATION o ON p.id = o.id WHERE o.id = 10 AND p.salary > 1000", + "SELECT\n" + + " \"P\".\"ID\",\n" + + " \"P\".\"ORGID\",\n" + + " \"P\".\"NAME\",\n" + + " \"P\".\"SALARY\",\n" + + " \"O\".\"ID\",\n" + + " \"O\".\"NAME\"\n" + + "FROM \"PUBLIC\".\"ORGANIZATION\" \"O\"\n" + + " /* PUBLIC.PRIMARY_KEY_D: ID = 10 */\n" + + " /* WHERE O.ID = 10\n" + + " */\n" + + "INNER JOIN \"PUBLIC\".\"PERSON\" \"P\"\n" + + " /* PUBLIC.PRIMARY_KEY_8: ID = O.ID */\n" + + " ON 1=1\n" + + "WHERE (\"P\".\"ID\" = \"O\".\"ID\")\n" + + " AND (\"O\".\"ID\" = 10)\n" + + " AND (\"P\".\"SALARY\" > 1000)"); + + PreparedStatement pStat = conn.prepareStatement( + "/* bla-bla */ EXPLAIN SELECT ID FROM ORGANIZATION WHERE id = ?"); + + ResultSet rs = pStat.executeQuery(); + + assertTrue(rs.next()); + + assertEquals("SELECT\n" + + " \"ID\"\n" + + "FROM \"PUBLIC\".\"ORGANIZATION\"\n" + + " /* PUBLIC.PRIMARY_KEY_D: ID = ?1 */\n" + + "WHERE \"ID\" = ?1", + rs.getString(1)); + + conn.close(); + } + + private void testExplainAnalyze() throws SQLException { + deleteDb("cases"); + Connection conn = getConnection("cases"); + Statement stat = conn.createStatement(); + + stat.execute("CREATE TABLE ORGANIZATION" + + "(id int primary key, name varchar(100))"); + stat.execute("CREATE TABLE PERSON" + + "(id int primary key, orgId int, name varchar(100), salary int)"); + + stat.execute("INSERT INTO ORGANIZATION VALUES(1, 'org1')"); + stat.execute("INSERT INTO ORGANIZATION VALUES(2, 'org2')"); + + stat.execute("INSERT INTO PERSON VALUES(1, 1, 'person1', 1000)"); + stat.execute("INSERT INTO PERSON VALUES(2, 1, 'person2', 2000)"); + stat.execute("INSERT INTO PERSON VALUES(3, 2, 'person3', 3000)"); + stat.execute("INSERT INTO PERSON VALUES(4, 2, 'person4', 4000)"); + + assertThrows(ErrorCode.PARAMETER_NOT_SET_1, stat, + "/* bla-bla */ EXPLAIN ANALYZE SELECT ID FROM ORGANIZATION WHERE id = ?"); + + PreparedStatement pStat = conn.prepareStatement( + "/* bla-bla */ EXPLAIN ANALYZE SELECT ID FROM ORGANIZATION WHERE id = ?"); + + assertThrows(ErrorCode.PARAMETER_NOT_SET_1, pStat).executeQuery(); + + pStat.setInt(1, 1); + + ResultSet rs = pStat.executeQuery(); + + assertTrue(rs.next()); + + assertEquals("SELECT\n" + + " \"ID\"\n" + + "FROM \"PUBLIC\".\"ORGANIZATION\"\n" + + " /* PUBLIC.PRIMARY_KEY_D: ID = ?1 */\n" + + " /* scanCount: 2 */\n" + + "WHERE \"ID\" = ?1", + rs.getString(1)); + + pStat = conn.prepareStatement("EXPLAIN ANALYZE SELECT * FROM PERSON p " + + "INNER JOIN ORGANIZATION o ON o.id = p.id WHERE o.id = ?"); + + assertThrows(ErrorCode.PARAMETER_NOT_SET_1, pStat).executeQuery(); + + pStat.setInt(1, 1); + + rs = pStat.executeQuery(); + + assertTrue(rs.next()); + + assertEquals("SELECT\n" + + " \"P\".\"ID\",\n" + + " \"P\".\"ORGID\",\n" + + " \"P\".\"NAME\",\n" + + " \"P\".\"SALARY\",\n" + + " \"O\".\"ID\",\n" + + " \"O\".\"NAME\"\n" + + "FROM \"PUBLIC\".\"PERSON\" \"P\"\n" + + " /* PUBLIC.PRIMARY_KEY_8: ID = ?1 */\n" + + " /* scanCount: 2 */\n" + + "INNER JOIN \"PUBLIC\".\"ORGANIZATION\" \"O\"\n" + + " /* PUBLIC.PRIMARY_KEY_D: ID = ?1\n" + + " AND ID = P.ID\n" + + " */\n" + + " ON 1=1\n" + + " /* scanCount: 2 */\n" + + "WHERE (\"O\".\"ID\" = ?1)\n" + + " AND (\"O\".\"ID\" = \"P\".\"ID\")", + rs.getString(1)); + + conn.close(); + } + private void testAlterTableReconnect() throws SQLException { deleteDb("cases"); Connection conn = getConnection("cases"); @@ -915,7 +1137,7 @@ private void testAlterTableReconnect() throws SQLException { stat.execute("drop table test"); stat.execute("create table test(id identity)"); stat.execute("insert into test values(1)"); - assertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2, stat). + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, stat). execute("alter table test alter column id date"); conn.close(); conn = getConnection("cases"); @@ -954,45 +1176,6 @@ private void testCollation() throws SQLException { conn.close(); } - private void testBinaryCollation() throws SQLException { - deleteDb("cases"); - Connection conn = getConnection("cases"); - Statement stat = conn.createStatement(); - ResultSet rs; - - // test the default (SIGNED) - if (Constants.VERSION_MINOR < 4) { - stat.execute("create table bin( x binary(1) );"); - stat.execute("insert into bin(x) values (x'09'),(x'0a'),(x'99'),(x'aa');"); - rs = stat.executeQuery("select * from bin order by x;"); - rs.next(); - assertEquals("99", rs.getString(1)); - rs.next(); - assertEquals("aa", rs.getString(1)); - rs.next(); - assertEquals("09", rs.getString(1)); - rs.next(); - assertEquals("0a", rs.getString(1)); - stat.execute("drop table bin"); - } - - // test UNSIGNED mode - stat.execute("SET BINARY_COLLATION UNSIGNED"); - stat.execute("create table bin( x binary(1) );"); - stat.execute("insert into bin(x) values (x'09'),(x'0a'),(x'99'),(x'aa');"); - rs = stat.executeQuery("select * from bin order by x;"); - rs.next(); - assertEquals("09", rs.getString(1)); - rs.next(); - assertEquals("0a", rs.getString(1)); - rs.next(); - assertEquals("99", rs.getString(1)); - rs.next(); - assertEquals("aa", rs.getString(1)); - - conn.close(); - } - private void testPersistentSettings() throws SQLException { deleteDb("cases"); Connection conn = getConnection("cases"); @@ -1059,7 +1242,7 @@ private void testViewReconnect() throws SQLException { conn.close(); conn = getConnection("cases"); stat = conn.createStatement(); - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat). execute("select * from abc"); conn.close(); } @@ -1147,7 +1330,7 @@ private void testConstraintReconnect() throws SQLException { Statement stat = conn.createStatement(); stat.execute("drop table if exists parent"); stat.execute("drop table if exists child"); - stat.execute("create table parent(id int)"); + stat.execute("create table parent(id int primary key)"); stat.execute("create table child(c_id int, p_id int, " + "foreign key(p_id) references parent(id))"); stat.execute("insert into parent values(1), (2)"); @@ -1200,7 +1383,7 @@ private void testRenameReconnect() throws SQLException { deleteDb("cases"); Connection conn = getConnection("cases"); conn.createStatement().execute("CREATE TABLE TEST_SEQ" + - "(ID INT IDENTITY, NAME VARCHAR(255))"); + "(ID INT GENERATED BY DEFAULT AS IDENTITY, NAME VARCHAR(255))"); conn.createStatement().execute("CREATE TABLE TEST" + "(ID INT PRIMARY KEY)"); conn.createStatement().execute("ALTER TABLE TEST RENAME TO TEST2"); @@ -1208,8 +1391,8 @@ private void testRenameReconnect() throws SQLException { "(ID INT PRIMARY KEY, NAME VARCHAR, UNIQUE(NAME))"); conn.close(); conn = getConnection("cases"); - conn.createStatement().execute("INSERT INTO TEST_SEQ(NAME) VALUES('Hi')"); - ResultSet rs = conn.createStatement().executeQuery("CALL IDENTITY()"); + ResultSet rs = conn.createStatement().executeQuery( + "SELECT ID FROM FINAL TABLE(INSERT INTO TEST_SEQ(NAME) VALUES('Hi'))"); rs.next(); assertEquals(1, rs.getInt(1)); conn.createStatement().execute("SELECT * FROM TEST2"); @@ -1218,46 +1401,13 @@ private void testRenameReconnect() throws SQLException { conn.close(); conn = getConnection("cases"); conn.createStatement().execute("SELECT * FROM TEST_B2"); - conn.createStatement().execute( - "INSERT INTO TEST_SEQ(NAME) VALUES('World')"); - rs = conn.createStatement().executeQuery("CALL IDENTITY()"); + rs = conn.createStatement().executeQuery( + "SELECT ID FROM FINAL TABLE(INSERT INTO TEST_SEQ(NAME) VALUES('World'))"); rs.next(); assertEquals(2, rs.getInt(1)); conn.close(); } - private void testAllSizes() throws SQLException { - trace("testAllSizes"); - deleteDb("cases"); - Connection conn = getConnection("cases"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(A INT, B INT, C INT, DATA VARCHAR)"); - int increment = getSize(100, 1); - for (int i = 1; i < 500; i += increment) { - StringBuilder buff = new StringBuilder(); - buff.append("CREATE TABLE TEST"); - for (int j = 0; j < i; j++) { - buff.append('a'); - } - buff.append("(ID INT)"); - String sql = buff.toString(); - stat.execute(sql); - stat.execute("INSERT INTO TEST VALUES(" + i + ", 0, 0, '" + sql + "')"); - } - conn.close(); - conn = getConnection("cases"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); - while (rs.next()) { - int id = rs.getInt(1); - String s = rs.getString("DATA"); - if (!s.endsWith(")")) { - fail("id=" + id); - } - } - conn.close(); - } - private void testSelectForUpdate() throws SQLException { trace("testSelectForUpdate"); deleteDb("cases"); @@ -1474,48 +1624,142 @@ private void testDeleteAndDropTableWithLobs(boolean useDrop) assertEquals("Lob file was not deleted: " + list, 0, list.size()); } - private void testDeleteTop() throws SQLException { + private void testMinimalCoveringIndexPlan() throws SQLException { deleteDb("cases"); Connection conn = getConnection("cases"); Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(id int) AS " + - "SELECT x FROM system_range(1, 100)"); - stat.execute("DELETE TOP 10 FROM TEST"); - ResultSet rs = stat.executeQuery("SELECT COUNT(*) FROM TEST"); + stat.execute("create table t(a int, b int, c int)"); + stat.execute("create index a_idx on t(a)"); + stat.execute("create index b_idx on t(b)"); + stat.execute("create index ab_idx on t(a, b)"); + stat.execute("create index abc_idx on t(a, b, c)"); + + ResultSet rs; + String plan; + + rs = stat.executeQuery("explain select a from t"); assertTrue(rs.next()); - assertEquals(90, rs.getInt(1)); + plan = rs.getString(1); + assertContains(plan, "/* PUBLIC.A_IDX */"); + rs.close(); - stat.execute("DELETE FROM TEST LIMIT ((SELECT COUNT(*) FROM TEST) / 10)"); - rs = stat.executeQuery("SELECT COUNT(*) FROM TEST"); + rs = stat.executeQuery("explain select b from t"); assertTrue(rs.next()); - assertEquals(81, rs.getInt(1)); + plan = rs.getString(1); + assertContains(plan, "/* PUBLIC.B_IDX */"); + rs.close(); - rs = stat.executeQuery("EXPLAIN DELETE " + - "FROM TEST LIMIT ((SELECT COUNT(*) FROM TEST) / 10)"); - rs.next(); - assertEquals("DELETE FROM PUBLIC.TEST\n" + - " /* PUBLIC.TEST.tableScan */\n" + - "LIMIT ((SELECT\n" + - " COUNT(*)\n" + - "FROM PUBLIC.TEST\n" + - " /* PUBLIC.TEST.tableScan */\n" + - "/* direct lookup */) / 10)", - rs.getString(1)); - - PreparedStatement prep; - prep = conn.prepareStatement("SELECT * FROM TEST LIMIT ?"); - prep.setInt(1, 10); - prep.execute(); + rs = stat.executeQuery("explain select b, a from t"); + assertTrue(rs.next()); + plan = rs.getString(1); + assertContains(plan, "/* PUBLIC.AB_IDX */"); + rs.close(); - prep = conn.prepareStatement("DELETE FROM TEST LIMIT ?"); - prep.setInt(1, 10); - prep.execute(); - rs = stat.executeQuery("SELECT COUNT(*) FROM TEST"); + rs = stat.executeQuery("explain select b, a, c from t"); assertTrue(rs.next()); - assertEquals(71, rs.getInt(1)); + plan = rs.getString(1); + assertContains(plan, "/* PUBLIC.ABC_IDX */"); + rs.close(); + + conn.close(); + } + + private void testMinMaxDirectLookupIndex() throws SQLException { + deleteDb("cases"); + Connection conn = getConnection("cases"); + Statement stat = conn.createStatement(); + stat.execute("create table t(a int, b int)"); + stat.execute("create index b_idx on t(b desc)"); + stat.execute("create index ab_idx on t(a, b)"); + + final int count = 100; + + PreparedStatement p = conn.prepareStatement("insert into t values (?,?)"); + for (int i = 0; i <= count; i++) { + p.setInt(1, i); + p.setInt(2, count - i); + assertEquals(1, p.executeUpdate()); + } + p.close(); + + ResultSet rs; + String plan; + + rs = stat.executeQuery("select max(b) from t"); + assertTrue(rs.next()); + assertEquals(count, rs.getInt(1)); + rs.close(); + + rs = stat.executeQuery("explain select max(b) from t"); + assertTrue(rs.next()); + plan = rs.getString(1); + assertContains(plan, "/* PUBLIC.B_IDX */"); + assertContains(plan, "/* direct lookup */"); + rs.close(); + + rs = stat.executeQuery("select min(b) from t"); + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1)); + rs.close(); + + rs = stat.executeQuery("explain select min(b) from t"); + assertTrue(rs.next()); + plan = rs.getString(1); + assertContains(plan, "/* PUBLIC.B_IDX */"); + assertContains(plan, "/* direct lookup */"); + rs.close(); + + conn.close(); + } + + /** Tests fix for bug #682: Queries with 'like' expressions may filter rows incorrectly */ + private void testLikeExpressions() throws SQLException { + Connection conn = getConnection("cases"); + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("select * from (select 'fo%' a union all select '%oo') where 'foo' like a"); + assertTrue(rs.next()); + assertEquals("fo%", rs.getString(1)); + assertTrue(rs.next()); + assertEquals("%oo", rs.getString(1)); + conn.close(); + } + + private void testDataChangeDeltaTable() throws SQLException { + /* + * This test case didn't reproduce the issue in the TestScript. + * + * The same UPDATE is necessary before and after usage of a data change + * delta table. + */ + String updateCommand = "UPDATE TEST SET V = 3 WHERE ID = 1"; + deleteDb("cases"); + Connection conn = getConnection("cases"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INT, V INT)"); + assertEquals(0, stat.executeUpdate(updateCommand)); + ResultSet rs = stat.executeQuery("SELECT V FROM FINAL TABLE (INSERT INTO TEST VALUES (1, 1))"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertEquals(1, stat.executeUpdate(updateCommand)); + rs = stat.executeQuery("SELECT V FROM TEST"); + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + conn.close(); + } + + private void testGroupSortedReset() throws SQLException { + // This test case didn't reproduce the issue in the TestScript. + deleteDb("cases"); + Connection conn = getConnection("cases"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1(A INT PRIMARY KEY, B INT) AS VALUES (1, 4), (2, 5), (3, 6)"); + String sql = "SELECT B FROM T1 LEFT JOIN (VALUES 2) T2(A) USING(A) WHERE T2.A = 2 GROUP BY T1.A"; + stat.execute(sql); + stat.execute("UPDATE T1 SET B = 7 WHERE A = 3"); + stat.execute(sql); conn.close(); } -} \ No newline at end of file +} diff --git a/h2/src/test/org/h2/test/db/TestCheckpoint.java b/h2/src/test/org/h2/test/db/TestCheckpoint.java index fb5c1d6eb8..6cfc1e793f 100644 --- a/h2/src/test/org/h2/test/db/TestCheckpoint.java +++ b/h2/src/test/org/h2/test/db/TestCheckpoint.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -10,11 +10,12 @@ import java.sql.Statement; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the CHECKPOINT SQL statement. */ -public class TestCheckpoint extends TestBase { +public class TestCheckpoint extends TestDb { /** * Run just this test. @@ -22,7 +23,7 @@ public class TestCheckpoint extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/db/TestCluster.java b/h2/src/test/org/h2/test/db/TestCluster.java index b970755bfc..6884892ff8 100644 --- a/h2/src/test/org/h2/test/db/TestCluster.java +++ b/h2/src/test/org/h2/test/db/TestCluster.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -15,15 +15,16 @@ import org.h2.api.ErrorCode; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.CreateCluster; import org.h2.tools.DeleteDbFiles; import org.h2.tools.Server; import org.h2.util.JdbcUtils; /** - * Test for the cluster feature. + * Test the cluster feature. */ -public class TestCluster extends TestBase { +public class TestCluster extends TestDb { /** * Run just this test. @@ -31,7 +32,15 @@ public class TestCluster extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); + } + + @Override + public boolean isEnabled() { + if (config.memory || config.networked || config.cipher != null) { + return false; + } + return true; } @Override @@ -46,11 +55,6 @@ public void test() throws Exception { } private void testClob() throws SQLException { - if (config.memory || config.networked || config.cipher != null) { - return; - } - int port1 = 9191, port2 = 9192; - String serverList = "localhost:" + port1 + ",localhost:" + port2; deleteFiles(); org.h2.Driver.load(); @@ -58,9 +62,9 @@ private void testClob() throws SQLException { Connection conn; Statement stat; + Server n1 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node1").start(); + int port1 = n1.getPort(); String url1 = getURL("jdbc:h2:tcp://localhost:" + port1 + "/test", false); - Server n1 = org.h2.tools.Server.createTcpServer("-tcpPort", - "" + port1, "-baseDir", getBaseDir() + "/node1").start(); conn = getConnection(url1, user, password); stat = conn.createStatement(); @@ -68,10 +72,11 @@ private void testClob() throws SQLException { stat.execute("insert into t1 values(1, repeat('Hello', 50))"); conn.close(); + Server n2 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node2").start(); + int port2 = n2.getPort(); String url2 = getURL("jdbc:h2:tcp://localhost:" + port2 + "/test", false); - Server n2 = org.h2.tools.Server.createTcpServer("-tcpPort", - "" + port2 , "-baseDir", getBaseDir() + "/node2").start(); + String serverList = "localhost:" + port1 + ",localhost:" + port2; String urlCluster = getURL("jdbc:h2:tcp://" + serverList + "/test", true); CreateCluster.main("-urlSource", url1, "-urlTarget", url2, "-user", user, "-password", password, "-serverList", @@ -86,11 +91,6 @@ private void testClob() throws SQLException { } private void testRecover() throws SQLException { - if (config.memory || config.networked || config.cipher != null) { - return; - } - int port1 = 9191, port2 = 9192; - String serverList = "localhost:" + port1 + ",localhost:" + port2; deleteFiles(); org.h2.Driver.load(); @@ -99,15 +99,19 @@ private void testRecover() throws SQLException { Statement stat; ResultSet rs; + + Server server1 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node1") + .start(); + int port1 = server1.getPort(); + Server server2 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node2") + .start(); + int port2 = server2.getPort(); + String url1 = getURL("jdbc:h2:tcp://localhost:" + port1 + "/test", true); String url2 = getURL("jdbc:h2:tcp://localhost:" + port2 + "/test", true); + String serverList = "localhost:" + port1 + ",localhost:" + port2; String urlCluster = getURL("jdbc:h2:tcp://" + serverList + "/test", true); - Server server1 = org.h2.tools.Server.createTcpServer( - "-tcpPort", "" + port1, "-baseDir", getBaseDir() + "/node1").start(); - Server server2 = org.h2.tools.Server.createTcpServer( - "-tcpPort", "" + port2 , "-baseDir", getBaseDir() + "/node2").start(); - CreateCluster.main("-urlSource", url1, "-urlTarget", url2, "-user", user, "-password", password, "-serverList", serverList); @@ -128,7 +132,7 @@ private void testRecover() throws SQLException { rs.next(); assertEquals(5, rs.getInt(1)); - server2 = org.h2.tools.Server.createTcpServer("-tcpPort", + server2 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-tcpPort", "" + port2 , "-baseDir", getBaseDir() + "/node2").start(); CreateCluster.main("-urlSource", url1, "-urlTarget", url2, "-user", user, "-password", password, "-serverList", @@ -149,11 +153,6 @@ private void testRecover() throws SQLException { } private void testRollback() throws SQLException { - if (config.memory || config.networked || config.cipher != null) { - return; - } - int port1 = 9191, port2 = 9192; - String serverList = "localhost:" + port1 + ",localhost:" + port2; deleteFiles(); org.h2.Driver.load(); @@ -162,15 +161,16 @@ private void testRollback() throws SQLException { Statement stat; ResultSet rs; + Server n1 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node1").start(); + int port1 = n1.getPort(); + Server n2 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node2").start(); + int port2 = n2.getPort(); + String url1 = getURL("jdbc:h2:tcp://localhost:" + port1 + "/test", true); String url2 = getURL("jdbc:h2:tcp://localhost:" + port2 + "/test", true); + String serverList = "localhost:" + port1 + ",localhost:" + port2; String urlCluster = getURL("jdbc:h2:tcp://" + serverList + "/test", true); - Server n1 = org.h2.tools.Server.createTcpServer("-tcpPort", - "" + port1, "-baseDir", getBaseDir() + "/node1").start(); - Server n2 = org.h2.tools.Server.createTcpServer("-tcpPort", - "" + port2 , "-baseDir", getBaseDir() + "/node2").start(); - CreateCluster.main("-urlSource", url1, "-urlTarget", url2, "-user", user, "-password", password, "-serverList", serverList); @@ -197,11 +197,6 @@ private void testRollback() throws SQLException { } private void testCase() throws SQLException { - if (config.memory || config.networked || config.cipher != null) { - return; - } - int port1 = 9191, port2 = 9192; - String serverList = "localhost:" + port1 + ",localhost:" + port2; deleteFiles(); org.h2.Driver.load(); @@ -210,15 +205,16 @@ private void testCase() throws SQLException { Statement stat; ResultSet rs; + + Server n1 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node1").start(); + int port1 = n1.getPort(); + Server n2 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node2").start(); + int port2 = n2.getPort(); + String serverList = "localhost:" + port1 + ",localhost:" + port2; String url1 = getURL("jdbc:h2:tcp://localhost:" + port1 + "/test", true); String url2 = getURL("jdbc:h2:tcp://localhost:" + port2 + "/test", true); String urlCluster = getURL("jdbc:h2:tcp://" + serverList + "/test", true); - Server n1 = org.h2.tools.Server.createTcpServer("-tcpPort", - "" + port1, "-baseDir", getBaseDir() + "/node1").start(); - Server n2 = org.h2.tools.Server.createTcpServer("-tcpPort", - "" + port2 , "-baseDir", getBaseDir() + "/node2").start(); - CreateCluster.main("-urlSource", url1, "-urlTarget", url2, "-user", user, "-password", password, "-serverList", serverList); @@ -254,26 +250,23 @@ private void testCase() throws SQLException { } private void testClientInfo() throws SQLException { - if (config.memory || config.networked || config.cipher != null) { - return; - } - int port1 = 9191, port2 = 9192; - String serverList = "localhost:" + port1 + ",localhost:" + port2; deleteFiles(); org.h2.Driver.load(); String user = getUser(), password = getPassword(); Connection conn; + + Server n1 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node1").start(); + int port1 = n1.getPort(); + Server n2 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node2").start(); + int port2 = n2.getPort(); + + String serverList = "localhost:" + port1 + ",localhost:" + port2; String url1 = getURL("jdbc:h2:tcp://localhost:" + port1 + "/test", true); String url2 = getURL("jdbc:h2:tcp://localhost:" + port2 + "/test", true); String urlCluster = getURL("jdbc:h2:tcp://" + serverList + "/test", true); - Server n1 = org.h2.tools.Server.createTcpServer("-tcpPort", - "" + port1, "-baseDir", getBaseDir() + "/node1").start(); - Server n2 = org.h2.tools.Server.createTcpServer("-tcpPort", - "" + port2 , "-baseDir", getBaseDir() + "/node2").start(); - CreateCluster.main("-urlSource", url1, "-urlTarget", url2, "-user", user, "-password", password, "-serverList", serverList); @@ -306,25 +299,18 @@ private void testClientInfo() throws SQLException { } private void testCreateClusterAtRuntime() throws SQLException { - if (config.memory || config.networked || config.cipher != null) { - return; - } - int port1 = 9191, port2 = 9192; - String serverList = "localhost:" + port1 + ",localhost:" + port2; deleteFiles(); org.h2.Driver.load(); String user = getUser(), password = getPassword(); Connection conn; Statement stat; - String url1 = getURL("jdbc:h2:tcp://localhost:" + port1 + "/test", false); - String url2 = getURL("jdbc:h2:tcp://localhost:" + port2 + "/test", false); - String urlCluster = getURL("jdbc:h2:tcp://" + serverList + "/test", false); int len = 10; // initialize the database - Server n1 = org.h2.tools.Server.createTcpServer("-tcpPort", - "" + port1, "-baseDir", getBaseDir() + "/node1").start(); + Server n1 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node1").start(); + int port1 = n1.getPort(); + String url1 = getURL("jdbc:h2:tcp://localhost:" + port1 + "/test", false); conn = getConnection(url1, user, password); stat = conn.createStatement(); stat.execute("create table test(id int primary key, name varchar) as " + @@ -333,10 +319,12 @@ private void testCreateClusterAtRuntime() throws SQLException { stat.execute("grant all on test to test"); // start the second server - Server n2 = org.h2.tools.Server.createTcpServer("-tcpPort", - "" + port2 , "-baseDir", getBaseDir() + "/node2").start(); + Server n2 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-baseDir", getBaseDir() + "/node2").start(); + int port2 = n2.getPort(); + String url2 = getURL("jdbc:h2:tcp://localhost:" + port2 + "/test", false); // copy the database and initialize the cluster + String serverList = "localhost:" + port1 + ",localhost:" + port2; CreateCluster.main("-urlSource", url1, "-urlTarget", url2, "-user", user, "-password", password, "-serverList", serverList); @@ -347,6 +335,7 @@ private void testCreateClusterAtRuntime() throws SQLException { JdbcUtils.closeSilently(conn); // test the cluster connection + String urlCluster = getURL("jdbc:h2:tcp://" + serverList + "/test", false); Connection connApp = getConnection(urlCluster + ";AUTO_RECONNECT=TRUE", user, password); check(connApp, len, "'" + serverList + "'"); @@ -365,7 +354,7 @@ private void testCreateClusterAtRuntime() throws SQLException { connApp.setAutoCommit(true); // re-create the cluster - n2 = org.h2.tools.Server.createTcpServer("-tcpPort", "" + port2, + n2 = org.h2.tools.Server.createTcpServer("-ifNotExists", "-tcpPort", "" + port2, "-baseDir", getBaseDir() + "/node2").start(); CreateCluster.main("-urlSource", url1, "-urlTarget", url2, "-user", user, "-password", password, "-serverList", @@ -399,9 +388,6 @@ private void testCreateClusterAtRuntime() throws SQLException { } private void testStartStopCluster() throws SQLException { - if (config.memory || config.networked || config.cipher != null) { - return; - } int port1 = 9193, port2 = 9194; String serverList = "localhost:" + port1 + ",localhost:" + port2; deleteFiles(); @@ -442,10 +428,10 @@ private void testStartStopCluster() throws SQLException { // try to connect in standalone mode - should fail // should not be able to connect in standalone mode - assertThrows(ErrorCode.CLUSTER_ERROR_DATABASE_RUNS_CLUSTERED_1, this). - getConnection("jdbc:h2:tcp://localhost:"+port1+"/test", user, password); - assertThrows(ErrorCode.CLUSTER_ERROR_DATABASE_RUNS_CLUSTERED_1, this). - getConnection("jdbc:h2:tcp://localhost:"+port2+"/test", user, password); + assertThrows(ErrorCode.CLUSTER_ERROR_DATABASE_RUNS_CLUSTERED_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + port1 + "/test", user, password)); + assertThrows(ErrorCode.CLUSTER_ERROR_DATABASE_RUNS_CLUSTERED_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + port2 + "/test", user, password)); // test a cluster connection conn = getConnection("jdbc:h2:tcp://" + serverList + "/test", user, password); @@ -524,7 +510,7 @@ private void check(Connection conn, int len, String expectedCluster) assertFalse(rs.next()); } ResultSet rs = conn.createStatement().executeQuery( - "SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME='CLUSTER'"); + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'CLUSTER'"); String cluster = rs.next() ? rs.getString(1) : "''"; assertEquals(expectedCluster, cluster); } diff --git a/h2/src/test/org/h2/test/db/TestCompatibility.java b/h2/src/test/org/h2/test/db/TestCompatibility.java index 01d21419dc..b64cb97547 100644 --- a/h2/src/test/org/h2/test/db/TestCompatibility.java +++ b/h2/src/test/org/h2/test/db/TestCompatibility.java @@ -1,10 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.PreparedStatement; @@ -12,14 +14,15 @@ import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; - +import java.util.Locale; import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the compatibility with other databases. */ -public class TestCompatibility extends TestBase { +public class TestCompatibility extends TestDb { private Connection conn; @@ -29,16 +32,14 @@ public class TestCompatibility extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws SQLException { deleteDb("compatibility"); - testOnDuplicateKey(); testCaseSensitiveIdentifiers(); - testKeyAsColumnInMySQLMode(); conn = getConnection("compatibility"); testDomain(); @@ -52,33 +53,20 @@ public void test() throws SQLException { testDerby(); testSybaseAndMSSQLServer(); + testUnknownSet(); + conn.close(); + testIdentifiers(); + testIdentifiersCaseInResultSet(); + testDatabaseToLowerParser(); + testOldInformationSchema(); deleteDb("compatibility"); - } - - private void testOnDuplicateKey() throws SQLException { - Connection c = getConnection("compatibility;MODE=MYSQL"); - Statement stat = c.createStatement(); - stat.execute("set mode mysql"); - stat.execute("create schema s2"); - stat.execute("create table s2.test(id int primary key, name varchar(255))"); - stat.execute("insert into s2.test(id, name) values(1, 'a')"); - stat.execute("insert into s2.test(id, name) values(1, 'b') " + - "on duplicate key update name = values(name)"); - stat.execute("drop schema s2"); - c.close(); - } - private void testKeyAsColumnInMySQLMode() throws SQLException { - Connection c = getConnection("compatibility;MODE=MYSQL"); - Statement stat = c.createStatement(); - stat.execute("create table test(id int primary key, key varchar)"); - stat.execute("drop table test"); - c.close(); + testUnknownURL(); } private void testCaseSensitiveIdentifiers() throws SQLException { - Connection c = getConnection("compatibility;DATABASE_TO_UPPER=FALSE"); + Connection c = getConnection("compatibility;DATABASE_TO_UPPER=FALSE;CASE_INSENSITIVE_IDENTIFIERS=TRUE"); Statement stat = c.createStatement(); stat.execute("create table test(id int primary key, name varchar) " + "as select 1, 'hello'"); @@ -117,6 +105,20 @@ private void testCaseSensitiveIdentifiers() throws SQLException { stat.execute("select id from test t group by T.ID"); stat.execute("drop table test"); + + rs = stat.executeQuery("select 1e10, 1000000000000000000000e10, 0xfAfBl"); + assertTrue(rs.next()); + assertEquals(1e10, rs.getDouble(1)); + assertEquals(1000000000000000000000e10, rs.getDouble(2)); + assertEquals(0xfafbL, rs.getLong(3)); + assertFalse(rs.next()); + + stat.execute("create table \"t 1\" (a int, b int)"); + stat.execute("create view v as select * from \"t 1\""); + stat.executeQuery("select * from v").close(); + stat.execute("drop view v"); + stat.execute("drop table \"t 1\""); + c.close(); } @@ -126,7 +128,7 @@ private void testDomain() throws SQLException { } Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key) as select 1"); - assertThrows(ErrorCode.USER_DATA_TYPE_ALREADY_EXISTS_1, stat). + assertThrows(ErrorCode.DOMAIN_ALREADY_EXISTS_1, stat). execute("create domain int as varchar"); conn.close(); conn = getConnection("compatibility"); @@ -140,22 +142,24 @@ private void testColumnAlias() throws SQLException { String[] modes = { "PostgreSQL", "MySQL", "HSQLDB", "MSSQLServer", "Derby", "Oracle", "Regular" }; String columnAlias; - columnAlias = "MySQL,Regular"; + columnAlias = "HSQLDB,MySQL,Regular"; stat.execute("CREATE TABLE TEST(ID INT)"); for (String mode : modes) { stat.execute("SET MODE " + mode); ResultSet rs = stat.executeQuery("SELECT ID I FROM TEST"); ResultSetMetaData meta = rs.getMetaData(); + assertEquals(mode + " mode", "I", meta.getColumnLabel(1)); String columnName = meta.getColumnName(1); String tableName = meta.getTableName(1); - if ("ID".equals(columnName) && "TEST".equals(tableName)) { - assertTrue(mode + " mode should not support columnAlias", - columnAlias.contains(mode)); - } else if ("I".equals(columnName) && tableName.equals("")) { - assertTrue(mode + " mode should support columnAlias", - columnAlias.indexOf(mode) < 0); + String schemaName = meta.getSchemaName(1); + if (columnAlias.contains(mode)) { + assertEquals(mode + " mode", "ID", columnName); + assertEquals(mode + " mode", "TEST", tableName); + assertEquals(mode + " mode", "PUBLIC", schemaName); } else { - fail(); + assertEquals(mode + " mode", "I", columnName); + assertEquals(mode + " mode", "", tableName); + assertEquals(mode + " mode", "", schemaName); } } stat.execute("DROP TABLE TEST"); @@ -165,7 +169,7 @@ private void testUniqueIndexSingleNull() throws SQLException { Statement stat = conn.createStatement(); String[] modes = { "PostgreSQL", "MySQL", "HSQLDB", "MSSQLServer", "Derby", "Oracle", "Regular" }; - String multiNull = "PostgreSQL,MySQL,Oracle,Regular"; + String multiNull = "PostgreSQL,MySQL,HSQLDB,Oracle,Regular"; for (String mode : modes) { stat.execute("SET MODE " + mode); stat.execute("CREATE TABLE TEST(ID INT)"); @@ -212,13 +216,6 @@ private void testHsqlDb() throws SQLException { stat.execute("CALL TODAY"); stat.execute("DROP TABLE TEST IF EXISTS"); - stat.execute("CREATE TABLE TEST(ID INT)"); - stat.execute("INSERT INTO TEST VALUES(1)"); - PreparedStatement prep = conn.prepareStatement( - "SELECT LIMIT ? 1 ID FROM TEST"); - prep.setInt(1, 2); - prep.executeQuery(); - stat.execute("DROP TABLE TEST IF EXISTS"); } private void testLog(double expected, Statement stat) throws SQLException { @@ -240,52 +237,150 @@ private void testPostgreSQL() throws SQLException { assertResult("ABC", stat, "SELECT SUBSTRING('ABCDEF' FOR 3)"); assertResult("ABCD", stat, "SELECT SUBSTRING('0ABCDEF' FROM 2 FOR 4)"); + + /* --------- Behaviour of CHAR(N) --------- */ + + /* Test right-padding of CHAR(N) at INSERT */ + stat.execute("CREATE TABLE TEST(CH CHAR(10))"); + stat.execute("INSERT INTO TEST (CH) VALUES ('Hello')"); + assertResult("Hello ", stat, "SELECT CH FROM TEST"); + + /* Test that WHERE clauses accept unpadded values and will pad before comparison */ + assertResult("Hello ", stat, "SELECT CH FROM TEST WHERE CH = 'Hello'"); + + /* Test CHAR which is identical to CHAR(1) */ + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(CH CHAR)"); + stat.execute("INSERT INTO TEST (CH) VALUES ('')"); + assertResult(" ", stat, "SELECT CH FROM TEST"); + assertResult(" ", stat, "SELECT CH FROM TEST WHERE CH = ''"); + + /* Test that excessive spaces are trimmed */ + stat.execute("DELETE FROM TEST"); + stat.execute("INSERT INTO TEST (CH) VALUES ('1 ')"); + assertResult("1", stat, "SELECT CH FROM TEST"); + assertResult("1", stat, "SELECT CH FROM TEST WHERE CH = '1 '"); + + /* Test that we do not trim too far */ + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(CH CHAR(2))"); + stat.execute("INSERT INTO TEST (CH) VALUES ('1 ')"); + assertResult("1 ", stat, "SELECT CH FROM TEST"); + assertResult("1 ", stat, "SELECT CH FROM TEST WHERE CH = '1 '"); + + /* --------- Disallowed column types --------- */ + + String[] DISALLOWED_TYPES = {"NUMBER", "IDENTITY", "TINYINT", "BLOB"}; + for (String type : DISALLOWED_TYPES) { + stat.execute("DROP TABLE IF EXISTS TEST"); + assertThrows(ErrorCode.UNKNOWN_DATA_TYPE_1, stat).execute("CREATE TABLE TEST(COL " + type + ")"); + } + + /* Test MONEY data type */ + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(M MONEY)"); + stat.execute("INSERT INTO TEST(M) VALUES (-92233720368547758.08)"); + stat.execute("INSERT INTO TEST(M) VALUES (0.11111)"); + stat.execute("INSERT INTO TEST(M) VALUES (92233720368547758.07)"); + ResultSet rs = stat.executeQuery("SELECT M FROM TEST ORDER BY M"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("-92233720368547758.08"), rs.getBigDecimal(1)); + assertTrue(rs.next()); + assertEquals(new BigDecimal("0.11"), rs.getBigDecimal(1)); + assertTrue(rs.next()); + assertEquals(new BigDecimal("92233720368547758.07"), rs.getBigDecimal(1)); + assertFalse(rs.next()); + + /* Test SET STATEMENT_TIMEOUT */ + assertEquals(0, stat.getQueryTimeout()); + conn.close(); + deleteDb("compatibility"); + // `stat.getQueryTimeout()` caches the result, so create another connection + conn = getConnection("compatibility;MODE=PostgreSQL"); + stat = conn.createStatement(); + // `STATEMENT_TIMEOUT` uses milliseconds + stat.execute("SET STATEMENT_TIMEOUT TO 30000"); + // `stat.getQueryTimeout()` returns seconds + assertEquals(30, stat.getQueryTimeout()); } private void testMySQL() throws SQLException { + // need to reconnect to change DATABASE_TO_LOWER + conn.close(); + deleteDb("compatibility"); + conn = getConnection("compatibility;MODE=MYSQL;DATABASE_TO_LOWER=TRUE"); Statement stat = conn.createStatement(); stat.execute("create schema test_schema"); stat.execute("use test_schema"); - assertResult("TEST_SCHEMA", stat, "select schema()"); + assertResult("test_schema", stat, "select schema()"); stat.execute("use public"); - assertResult("PUBLIC", stat, "select schema()"); + assertResult("public", stat, "select schema()"); stat.execute("SELECT 1"); stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); + stat.execute("CREATE TABLE `TEST`(ID INT PRIMARY KEY, NAME VARCHAR)"); stat.execute("INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World')"); - org.h2.mode.FunctionsMySQL.register(conn); assertResult("0", stat, "SELECT UNIX_TIMESTAMP('1970-01-01 00:00:00Z')"); - assertResult("1196418619", stat, - "SELECT UNIX_TIMESTAMP('2007-11-30 10:30:19Z')"); - assertResult("1196418619", stat, - "SELECT UNIX_TIMESTAMP(FROM_UNIXTIME(1196418619))"); - assertResult("2007 November", stat, - "SELECT FROM_UNIXTIME(1196300000, '%Y %M')"); - assertResult("2003-12-31", stat, - "SELECT DATE('2003-12-31 11:02:03')"); + assertResult("1196418619", stat, "SELECT UNIX_TIMESTAMP('2007-11-30 10:30:19Z')"); + assertResult("1196418619", stat, "SELECT UNIX_TIMESTAMP(FROM_UNIXTIME(1196418619))"); + assertResult("2007 November", stat, "SELECT FROM_UNIXTIME(1196300000, '%Y %M')"); + assertResult("2003-12-31", stat, "SELECT DATE('2003-12-31 11:02:03')"); + assertResult("2003-12-31", stat, "SELECT DATE('2003-12-31 11:02:03')"); + assertResult(null, stat, "SELECT DATE('100')"); + // check the weird MySQL variant of DELETE + stat.execute("DELETE TEST FROM TEST WHERE 1=2"); + + // Check conversion between VARCHAR and VARBINARY + String string = "ABCD\u1234"; + byte[] bytes = string.getBytes(StandardCharsets.UTF_8); + stat.execute("CREATE TABLE TEST2(C VARCHAR, B VARBINARY)"); + stat.execute("INSERT INTO TEST2(C) VALUES ('" + string + "')"); + assertEquals(1, stat.executeUpdate("UPDATE TEST2 SET B = C")); + ResultSet rs = stat.executeQuery("SELECT B FROM TEST2"); + assertTrue(rs.next()); + assertEquals(bytes, rs.getBytes(1)); + assertEquals(bytes, rs.getBytes("B")); + assertEquals(1, stat.executeUpdate("UPDATE TEST2 SET C = B")); + testMySQLBytesCheck(stat, string, bytes); + PreparedStatement prep = conn.prepareStatement("UPDATE TEST2 SET C = ?"); + prep.setBytes(1, bytes); + assertEquals(1, prep.executeUpdate()); + testMySQLBytesCheck(stat, string, bytes); + stat.execute("DELETE FROM TEST2"); + prep = conn.prepareStatement("INSERT INTO TEST2(C) VALUES (?)"); + prep.setBytes(1, bytes); + assertEquals(1, prep.executeUpdate()); + testMySQLBytesCheck(stat, string, bytes); + prep = conn.prepareStatement("SELECT C FROM TEST2 WHERE C = ?"); + prep.setBytes(1, bytes); + testMySQLBytesCheck(prep.executeQuery(), string, bytes); + stat.execute("CREATE INDEX TEST2_C ON TEST2(C)"); + prep = conn.prepareStatement("SELECT C FROM TEST2 WHERE C = ?"); + prep.setBytes(1, bytes); + testMySQLBytesCheck(prep.executeQuery(), string, bytes); + stat.execute("DROP TABLE TEST2"); if (config.memory) { return; } // need to reconnect, because meta data tables may be initialized conn.close(); - conn = getConnection("compatibility;MODE=MYSQL"); + conn = getConnection("compatibility;MODE=MYSQL;DATABASE_TO_LOWER=TRUE"); stat = conn.createStatement(); testLog(Math.log(10), stat); DatabaseMetaData meta = conn.getMetaData(); assertTrue(meta.storesLowerCaseIdentifiers()); - assertTrue(meta.storesLowerCaseQuotedIdentifiers()); + assertFalse(meta.storesLowerCaseQuotedIdentifiers()); assertFalse(meta.storesMixedCaseIdentifiers()); assertFalse(meta.storesMixedCaseQuotedIdentifiers()); assertFalse(meta.storesUpperCaseIdentifiers()); - assertTrue(meta.storesUpperCaseQuotedIdentifiers()); + assertFalse(meta.storesUpperCaseQuotedIdentifiers()); stat = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); assertResult("test", stat, "SHOW TABLES"); - ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); + rs = stat.executeQuery("SELECT * FROM TEST"); rs.next(); rs.updateString(2, "Hallo"); rs.updateRow(); @@ -306,13 +401,29 @@ private void testMySQL() throws SQLException { stat.execute("CREATE TABLE TEST_4" + "(ID INT PRIMARY KEY) charset=UTF8"); stat.execute("CREATE TABLE TEST_5" + - "(ID INT PRIMARY KEY) ENGINE=InnoDb auto_increment=3 default charset=UTF8"); + "(ID INT AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDb auto_increment=3 default charset=UTF8"); stat.execute("CREATE TABLE TEST_6" + - "(ID INT PRIMARY KEY) ENGINE=InnoDb auto_increment=3 charset=UTF8"); + "(ID INT AUTO_INCREMENT PRIMARY KEY) ENGINE=MyISAM default character set UTF8MB4, auto_increment 3"); stat.execute("CREATE TABLE TEST_7" + - "(ID INT, KEY TEST_7_IDX(ID) USING BTREE)"); + "(ID INT AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDb auto_increment=3 charset=UTF8 comment 'text'"); stat.execute("CREATE TABLE TEST_8" + - "(ID INT, UNIQUE KEY TEST_8_IDX(ID) USING BTREE)"); + "(ID INT AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDb auto_increment=3 character set=UTF8"); + stat.execute("CREATE TABLE TEST_9" + + "(ID INT, KEY TEST_7_IDX(ID) USING BTREE)"); + stat.execute("CREATE TABLE TEST_10" + + "(ID INT, UNIQUE KEY TEST_10_IDX(ID) USING BTREE)"); + stat.execute("CREATE TABLE TEST_11(ID INT) COLLATE UTF8"); + stat.execute("CREATE TABLE TEST_12(ID INT) DEFAULT COLLATE UTF8"); + stat.execute("CREATE TABLE TEST_13(a VARCHAR(10) COLLATE UTF8MB4)"); + stat.execute("CREATE TABLE TEST_14(a VARCHAR(10) NULL CHARACTER SET UTF8MB4 COLLATE UTF8MB4_BIN)"); + stat.execute("ALTER TABLE TEST_14 CONVERT TO CHARACTER SET UTF8MB4 COLLATE UTF8MB4_UNICODE_CI"); + stat.execute("ALTER TABLE TEST_14 MODIFY a VARCHAR(10) NOT NULL CHARACTER SET UTF8MB4 COLLATE UTF8"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat).execute("CREATE TABLE TEST_99" + + "(ID INT PRIMARY KEY) CHARSET UTF8,"); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat).execute("CREATE TABLE TEST_99" + + "(ID INT PRIMARY KEY) AUTO_INCREMENT 100"); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat).execute("CREATE TABLE TEST_99" + + "(ID INT) AUTO_INCREMENT 100"); // this maps to SET REFERENTIAL_INTEGRITY TRUE/FALSE stat.execute("SET foreign_key_checks = 0"); @@ -320,7 +431,7 @@ private void testMySQL() throws SQLException { // Check if mysql comments are supported, ensure clean connection conn.close(); - conn = getConnection("compatibility;MODE=MYSQL"); + conn = getConnection("compatibility;MODE=MYSQL;DATABASE_TO_LOWER=TRUE"); stat = conn.createStatement(); stat.execute("DROP TABLE IF EXISTS TEST_NO_COMMENT"); stat.execute("CREATE table TEST_NO_COMMENT " + @@ -342,10 +453,30 @@ private void testMySQL() throws SQLException { stat.execute("CREATE TABLE TEST2(ID INT) ROW_FORMAT=DYNAMIC"); + // check the MySQL index dropping syntax + stat.execute("ALTER TABLE TEST_COMMENT_ENGINE ADD CONSTRAINT CommentUnique UNIQUE (SOME_ITEM_ID)"); + stat.execute("ALTER TABLE TEST_COMMENT_ENGINE DROP INDEX CommentUnique"); + stat.execute("CREATE INDEX IDX_ATTACHMENT_ID ON TEST_COMMENT_ENGINE (ATTACHMENT_ID)"); + stat.execute("DROP INDEX IDX_ATTACHMENT_ID ON TEST_COMMENT_ENGINE"); + + stat.execute("DROP ALL OBJECTS"); + conn.close(); + deleteDb("compatibility"); conn = getConnection("compatibility"); } + private void testMySQLBytesCheck(Statement stat, String string, byte[] bytes) throws SQLException { + testMySQLBytesCheck(stat.executeQuery("SELECT C FROM TEST2"), string, bytes); + } + + private void testMySQLBytesCheck(ResultSet rs, String string, byte[] bytes) throws SQLException { + assertTrue(rs.next()); + assertEquals(string, rs.getString(1)); + assertEquals(bytes, rs.getBytes(1)); + assertEquals(bytes, rs.getBytes("C")); + } + private void testSybaseAndMSSQLServer() throws SQLException { Statement stat = conn.createStatement(); stat.execute("SET MODE MSSQLServer"); @@ -399,10 +530,47 @@ private void testSybaseAndMSSQLServer() throws SQLException { rs.next(); assertEquals(10, rs.getInt(1)); rs.close(); + rs = stat.executeQuery("SELECT X FROM (SELECT CONVERT(INT, '10') AS X)"); + rs.next(); + assertEquals(10, rs.getInt(1)); + rs.close(); // make sure we're ignoring the index part of the statement rs = stat.executeQuery("select * from test (index table1_index)"); rs.close(); + + // UNIQUEIDENTIFIER is MSSQL's equivalent of UUID + stat.execute("create table test3 (id UNIQUEIDENTIFIER)"); + + /* Test MONEY data type */ + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(M MONEY)"); + stat.execute("INSERT INTO TEST(M) VALUES (-922337203685477.5808)"); + stat.execute("INSERT INTO TEST(M) VALUES (0.11111)"); + stat.execute("INSERT INTO TEST(M) VALUES (922337203685477.5807)"); + rs = stat.executeQuery("SELECT M FROM TEST ORDER BY M"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("-922337203685477.5808"), rs.getBigDecimal(1)); + assertTrue(rs.next()); + assertEquals(new BigDecimal("0.1111"), rs.getBigDecimal(1)); + assertTrue(rs.next()); + assertEquals(new BigDecimal("922337203685477.5807"), rs.getBigDecimal(1)); + assertFalse(rs.next()); + + /* Test SMALLMONEY data type */ + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(M SMALLMONEY)"); + stat.execute("INSERT INTO TEST(M) VALUES (-214748.3648)"); + stat.execute("INSERT INTO TEST(M) VALUES (0.11111)"); + stat.execute("INSERT INTO TEST(M) VALUES (214748.3647)"); + rs = stat.executeQuery("SELECT M FROM TEST ORDER BY M"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("-214748.3648"), rs.getBigDecimal(1)); + assertTrue(rs.next()); + assertEquals(new BigDecimal("0.1111"), rs.getBigDecimal(1)); + assertTrue(rs.next()); + assertEquals(new BigDecimal("214748.3647"), rs.getBigDecimal(1)); + assertFalse(rs.next()); } private void testDB2() throws SQLException { @@ -432,6 +600,8 @@ private void testDB2() throws SQLException { res.next(); assertEquals("2", res.getString(1)); assertFalse(res.next()); + conn.close(); + // test isolation-clause conn = getConnection("compatibility;MODE=DB2"); stat = conn.createStatement(); @@ -455,6 +625,30 @@ private void testDB2() throws SQLException { "fetch next 2 rows only with rs use and keep update locks"); res = stat.executeQuery("select * from test order by id " + "fetch next 2 rows only with rr use and keep exclusive locks"); + + // Test DB2 TIMESTAMP format with dash separating date and time + stat.execute("drop table test if exists"); + stat.execute("create table test(date TIMESTAMP)"); + stat.executeUpdate("insert into test (date) values ('2014-04-05-09.48.28.020005')"); + assertResult("2014-04-05 09:48:28.020005", stat, + "select date from test"); // <- result is always H2 format timestamp! + assertResult("2014-04-05 09:48:28.020005", stat, + "select date from test where date = '2014-04-05-09.48.28.020005'"); + assertResult("2014-04-05 09:48:28.020005", stat, + "select date from test where date = '2014-04-05 09:48:28.020005'"); + + // Test limited support for DB2's special registers + + // Standard SQL functions like LOCALTIMESTAMP, CURRENT_TIMESTAMP and + // others are used to compare values, their implementation in H2 is + // compatible with standard, but may be not really compatible with DB2. + assertResult("TRUE", stat, "SELECT LOCALTIMESTAMP = CURRENT TIMESTAMP"); + assertResult("TRUE", stat, "SELECT CAST(LOCALTIMESTAMP AS VARCHAR) = CAST(CURRENT TIMESTAMP AS VARCHAR)"); + assertResult("TRUE", stat, "SELECT CURRENT_TIMESTAMP = CURRENT TIMESTAMP WITH TIME ZONE"); + assertResult("TRUE", stat, + "SELECT CAST(CURRENT_TIMESTAMP AS VARCHAR) = CAST(CURRENT TIMESTAMP WITH TIME ZONE AS VARCHAR)"); + assertResult("TRUE", stat, "SELECT CURRENT_TIME = CURRENT TIME"); + assertResult("TRUE", stat, "SELECT CURRENT_DATE = CURRENT DATE"); } private void testDerby() throws SQLException { @@ -474,4 +668,121 @@ private void testDerby() throws SQLException { conn.close(); conn = getConnection("compatibility"); } + + private void testUnknownSet() throws SQLException { + Statement stat = conn.createStatement(); + assertThrows(ErrorCode.UNKNOWN_MODE_1, stat).execute("SET MODE UnknownMode"); + } + + private void testIdentifiers() throws SQLException { + deleteDb("compatibility"); + testIdentifiers(false, false, false); + testIdentifiers(false, false, true); + testIdentifiers(true, false, false); + testIdentifiers(true, false, true); + testIdentifiers(false, true, false); + testIdentifiers(false, true, true); + } + + private void testIdentifiers(boolean upper, boolean lower, boolean caseInsensitiveIdentifiers) // + throws SQLException { + try (Connection conn = getConnection("compatibility;DATABASE_TO_UPPER=" + upper + ";DATABASE_TO_LOWER=" + lower + + ";CASE_INSENSITIVE_IDENTIFIERS=" + caseInsensitiveIdentifiers)) { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE Test(Id INT) AS VALUES 2"); + String schema = "PUBLIC", table = "Test", column = "Id"; + if (upper) { + table = table.toUpperCase(Locale.ROOT); + column = column.toUpperCase(Locale.ROOT); + } else if (lower) { + schema = schema.toLowerCase(Locale.ROOT); + table = table.toLowerCase(Locale.ROOT); + column = column.toLowerCase(Locale.ROOT); + } + try (ResultSet rs = stat.executeQuery("SELECT TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME" + + " FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME ILIKE 'Test'")) { + assertTrue(rs.next()); + assertEquals(schema, rs.getString(1)); + assertEquals(table, rs.getString(2)); + assertEquals(column, rs.getString(3)); + } + testIdentifiers(stat, "Test", "Id", true); + testIdentifiers(stat, "`Test`", "`Id`", true); + boolean ok = upper || lower || caseInsensitiveIdentifiers; + testIdentifiers(stat, "TEST", "ID", ok); + testIdentifiers(stat, "`TEST`", "`ID`", ok); + testIdentifiers(stat, "test", "id", ok); + testIdentifiers(stat, "`test`", "`id`", ok); + testIdentifiers(stat, '"' + table + '"', '"' + column + '"', true); + testIdentifiers(stat, "\"TeSt\"", "\"iD\"", caseInsensitiveIdentifiers); + stat.execute("CREATE TABLE T2(\"`\" INT, `\"'\"` INT) AS VALUES (1, 2)"); + try (ResultSet rs = stat.executeQuery("SELECT ````, \"\"\"'\"\"\" FROM T2")) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertEquals(2, rs.getInt(2)); + } + } finally { + deleteDb("compatibility"); + } + } + + private void testIdentifiers(Statement stat, String table, String column, boolean ok) throws SQLException { + String query = "SELECT _ROWID_, " + column + " FROM " + table; + if (ok) { + try (ResultSet rs = stat.executeQuery(query)) { + assertTrue(rs.next()); + assertEquals(1L, rs.getLong(1)); + assertEquals(2, rs.getInt(2)); + } + } else { + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2, stat).executeQuery(query); + } + } + + private void testUnknownURL() { + assertThrows(ErrorCode.UNKNOWN_MODE_1, () -> { + getConnection("compatibility;MODE=Unknown").close(); + deleteDb("compatibility"); + }); + } + + private void testIdentifiersCaseInResultSet() throws SQLException { + try (Connection conn = getConnection( + "compatibility;DATABASE_TO_UPPER=FALSE;CASE_INSENSITIVE_IDENTIFIERS=TRUE")) { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(A INT)"); + ResultSet rs = stat.executeQuery("SELECT a from test"); + ResultSetMetaData md = rs.getMetaData(); + assertEquals("A", md.getColumnName(1)); + rs = stat.executeQuery("SELECT a FROM (SELECT 1) t(A)"); + md = rs.getMetaData(); + assertEquals("A", md.getColumnName(1)); + } finally { + deleteDb("compatibility"); + } + } + + private void testDatabaseToLowerParser() throws SQLException { + try (Connection conn = getConnection("compatibility;DATABASE_TO_LOWER=TRUE")) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("SELECT 0x1234567890AbCdEf"); + rs.next(); + assertEquals(0x1234567890ABCDEFL, rs.getLong(1)); + } finally { + deleteDb("compatibility"); + } + } + + private void testOldInformationSchema() throws SQLException { + try (Connection conn = getConnection( + "compatibility;OLD_INFORMATION_SCHEMA=TRUE")) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("TABLE INFORMATION_SCHEMA.TABLE_TYPES"); + rs.next(); + assertEquals("TABLE", rs.getString(1)); + } finally { + deleteDb("compatibility"); + } + } + } diff --git a/h2/src/test/org/h2/test/db/TestCompatibilityOracle.java b/h2/src/test/org/h2/test/db/TestCompatibilityOracle.java index b61cd79cba..82ca638de7 100644 --- a/h2/src/test/org/h2/test/db/TestCompatibilityOracle.java +++ b/h2/src/test/org/h2/test/db/TestCompatibilityOracle.java @@ -1,23 +1,29 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Timestamp; import java.sql.Types; +import java.text.SimpleDateFormat; import java.util.Arrays; +import java.util.Locale; + import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.SimpleResultSet; /** * Test Oracle compatibility mode. */ -public class TestCompatibilityOracle extends TestBase { +public class TestCompatibilityOracle extends TestDb { /** * Run just this test. @@ -26,20 +32,93 @@ public class TestCompatibilityOracle extends TestBase { */ public static void main(String... s) throws Exception { TestBase test = TestBase.createCaller().init(); - test.test(); + test.testFromMain(); } @Override public void test() throws Exception { + testNotNullSyntax(); testTreatEmptyStringsAsNull(); testDecimalScale(); + testPoundSymbolInColumnName(); + testToDate(); + testSpecialTypes(); + testDate(); + testSequenceNextval(); + testVarchar(); + deleteDb("oracle"); } - private void testTreatEmptyStringsAsNull() throws SQLException { + private void testNotNullSyntax() throws SQLException { deleteDb("oracle"); Connection conn = getConnection("oracle;MODE=Oracle"); Statement stat = conn.createStatement(); + // Some other variation (oracle syntax) + stat.execute("create table T (C int not null enable)"); + stat.execute("insert into T values(1)"); + stat.execute("drop table T"); + stat.execute("create table T (C int not null enable validate)"); + stat.execute("insert into T values(1)"); + stat.execute("drop table T"); + // can set NULL + // can set NULL even with 'not null syntax' (oracle) + stat.execute("create table T (C int not null disable)"); + stat.execute("insert into T values(null)"); + stat.execute("drop table T"); + // can set NULL even with 'not null syntax' (oracle) + stat.execute("create table T (C int not null enable novalidate)"); + stat.execute("insert into T values(null)"); + stat.execute("drop table T"); + // Some other variation with oracle syntax + stat.execute("create table T (C int not null)"); + stat.execute("insert into T values(1)"); + stat.execute("alter table T modify C not null"); + stat.execute("insert into T values(1)"); + stat.execute("alter table T modify C not null enable"); + stat.execute("insert into T values(1)"); + stat.execute("alter table T modify C not null enable validate"); + stat.execute("insert into T values(1)"); + stat.execute("drop table T"); + // can set NULL + stat.execute("create table T (C int null)"); + stat.execute("insert into T values(null)"); + stat.execute("alter table T modify C null enable"); + stat.execute("alter table T modify C null enable validate"); + stat.execute("insert into T values(null)"); + // can set NULL even with 'not null syntax' (oracle) + stat.execute("alter table T modify C not null disable"); + stat.execute("insert into T values(null)"); + // can set NULL even with 'not null syntax' (oracle) + stat.execute("alter table T modify C not null enable novalidate"); + stat.execute("insert into T values(null)"); + stat.execute("drop table T"); + + conn.close(); + } + + private void testSpecialTypes() throws SQLException { + // Test VARCHAR, VARCHAR2 with CHAR and BYTE + deleteDb("oracle"); + Connection conn = getConnection("oracle;MODE=Oracle"); + Statement stat = conn.createStatement(); + stat.execute("create table T (ID NUMBER)"); + stat.execute("alter table T add A_1 VARCHAR(1)"); + stat.execute("alter table T add A_2 VARCHAR2(1)"); + stat.execute("alter table T add B_1 VARCHAR(1 byte)"); // with BYTE + stat.execute("alter table T add B_2 VARCHAR2(1 byte)"); + stat.execute("alter table T add C_1 VARCHAR(1 char)"); // with CHAR + stat.execute("alter table T add C_2 VARCHAR2(1 char)"); + stat.execute("alter table T add B_255 VARCHAR(255 byte)"); + stat.execute("alter table T add C_255 VARCHAR(255 char)"); + stat.execute("drop table T"); + conn.close(); + } + + private void testTreatEmptyStringsAsNull() throws SQLException { + deleteDb("oracle"); + Connection conn = getConnection("oracle;MODE=Oracle"); + Statement stat = conn.createStatement(); stat.execute("CREATE TABLE A (ID NUMBER, X VARCHAR2(1))"); stat.execute("INSERT INTO A VALUES (1, 'a')"); stat.execute("INSERT INTO A VALUES (2, '')"); @@ -82,7 +161,7 @@ private void testTreatEmptyStringsAsNull() throws SQLException { stat, "SELECT * FROM D"); stat.execute("CREATE TABLE E (ID NUMBER, X RAW(1))"); - stat.execute("INSERT INTO E VALUES (1, '0A')"); + stat.execute("INSERT INTO E VALUES (1, HEXTORAW('0A'))"); stat.execute("INSERT INTO E VALUES (2, '')"); assertResult("2", stat, "SELECT COUNT(*) FROM E"); assertResult("1", stat, "SELECT COUNT(*) FROM E WHERE X IS NULL"); @@ -90,6 +169,17 @@ private void testTreatEmptyStringsAsNull() throws SQLException { assertResult(new Object[][] { { 1, new byte[] { 10 } }, { 2, null } }, stat, "SELECT * FROM E"); + stat.execute("CREATE TABLE F (ID NUMBER, X VARCHAR2(1))"); + stat.execute("INSERT INTO F VALUES (1, 'a')"); + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO F VALUES (2, ?)"); + prep.setString(1, ""); + prep.execute(); + assertResult("2", stat, "SELECT COUNT(*) FROM F"); + assertResult("1", stat, "SELECT COUNT(*) FROM F WHERE X IS NULL"); + assertResult("0", stat, "SELECT COUNT(*) FROM F WHERE X = ''"); + assertResult(new Object[][]{{1, "a"}, {2, null}}, stat, "SELECT * FROM F"); + conn.close(); } @@ -109,6 +199,170 @@ private void testDecimalScale() throws SQLException { conn.close(); } + /** + * Test the # in a column name for oracle compatibility + */ + private void testPoundSymbolInColumnName() throws SQLException { + deleteDb("oracle"); + Connection conn = getConnection("oracle;MODE=Oracle"); + Statement stat = conn.createStatement(); + + stat.execute( + "CREATE TABLE TEST(ID INT PRIMARY KEY, U##NAME VARCHAR(255))"); + stat.execute( + "INSERT INTO TEST VALUES(1, 'Hello'), (2, 'HelloWorld'), (3, 'HelloWorldWorld')"); + + assertResult("1", stat, "SELECT ID FROM TEST where U##NAME ='Hello'"); + + conn.close(); + } + + private void testToDate() throws SQLException { + if (config.ci || Locale.getDefault() != Locale.ENGLISH) { + return; + } + deleteDb("oracle"); + Connection conn = getConnection("oracle;MODE=Oracle"); + Statement stat = conn.createStatement(); + + stat.execute("CREATE TABLE DATE_TABLE (ID NUMBER PRIMARY KEY, TEST_VAL TIMESTAMP)"); + stat.execute("INSERT INTO DATE_TABLE VALUES (1, " + + "to_date('31-DEC-9999 23:59:59','DD-MON-RRRR HH24:MI:SS'))"); + stat.execute("INSERT INTO DATE_TABLE VALUES (2, " + + "to_date('01-JAN-0001 00:00:00','DD-MON-RRRR HH24:MI:SS'))"); + + assertResultDate("9999-12-31T23:59:59", stat, + "SELECT TEST_VAL FROM DATE_TABLE WHERE ID=1"); + assertResultDate("0001-01-01T00:00:00", stat, + "SELECT TEST_VAL FROM DATE_TABLE WHERE ID=2"); + + conn.close(); + } + + private void testDate() throws SQLException { + deleteDb("oracle"); + Connection conn = getConnection("oracle;MODE=Oracle"); + Statement stat = conn.createStatement(); + + Timestamp t1 = Timestamp.valueOf("2011-02-03 12:11:10"); + Timestamp t2 = Timestamp.valueOf("1999-10-15 13:14:15"); + Timestamp t3 = Timestamp.valueOf("2030-11-22 11:22:33"); + Timestamp t4 = Timestamp.valueOf("2018-01-10 22:10:01"); + + stat.execute("CREATE TABLE TEST (ID INT PRIMARY KEY, D DATE)"); + stat.executeUpdate("INSERT INTO TEST VALUES(1, TIMESTAMP '2011-02-03 12:11:10.1')"); + stat.executeUpdate("INSERT INTO TEST VALUES(2, CAST ('1999-10-15 13:14:15.1' AS DATE))"); + stat.executeUpdate("INSERT INTO TEST VALUES(3, '2030-11-22 11:22:33.1')"); + PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); + ps.setInt(1, 4); + ps.setTimestamp(2, Timestamp.valueOf("2018-01-10 22:10:01.1")); + ps.executeUpdate(); + ResultSet rs = stat.executeQuery("SELECT D FROM TEST ORDER BY ID"); + rs.next(); + assertEquals(t1, rs.getTimestamp(1)); + rs.next(); + assertEquals(t2, rs.getTimestamp(1)); + rs.next(); + assertEquals(t3, rs.getTimestamp(1)); + rs.next(); + assertEquals(t4, rs.getTimestamp(1)); + assertFalse(rs.next()); + + conn.close(); + } + + private void testSequenceNextval() throws SQLException { + // Test NEXTVAL without Oracle MODE should return BIGINT + checkSequenceTypeWithMode("REGULAR", Types.BIGINT, false); + // Test NEXTVAL with Oracle MODE should return DECIMAL + checkSequenceTypeWithMode("Oracle", Types.NUMERIC, true); + } + + private void checkSequenceTypeWithMode(String mode, int expectedType, boolean usePseudoColumn) + throws SQLException { + deleteDb("oracle"); + Connection conn = getConnection("oracle;MODE=" + mode); + Statement stat = conn.createStatement(); + + stat.execute("CREATE SEQUENCE seq"); + ResultSet rs = stat.executeQuery( + usePseudoColumn ? "SELECT seq.NEXTVAL FROM DUAL" : "VALUES NEXT VALUE FOR seq"); + // Check type: + assertEquals(rs.getMetaData().getColumnType(1), expectedType); + conn.close(); + } + + private void testVarchar() throws SQLException { + deleteDb("oracle"); + Connection conn = getConnection("oracle;MODE=Oracle"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V VARCHAR) AS VALUES (1, 'a')"); + PreparedStatement prep = conn.prepareStatement("UPDATE TEST SET V = ? WHERE ID = ?"); + prep.setInt(2, 1); + prep.setString(1, ""); + prep.executeUpdate(); + ResultSet rs = stat.executeQuery("SELECT V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(1)); + assertFalse(rs.next()); + prep.setNString(1, ""); + prep.executeUpdate(); + Statement stat2 = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateString(2, ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateString("V", ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateNString(2, ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateNString("V", ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateObject(2, ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat2.executeQuery("SELECT ID, V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + rs.updateObject("V", ""); + rs.updateRow(); + assertFalse(rs.next()); + rs = stat.executeQuery("SELECT V FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getString(1)); + assertFalse(rs.next()); + conn.close(); + } + + private void assertResultDate(String expected, Statement stat, String sql) + throws SQLException { + SimpleDateFormat iso8601 = new SimpleDateFormat( + "yyyy-MM-dd'T'HH:mm:ss"); + ResultSet rs = stat.executeQuery(sql); + if (rs.next()) { + assertEquals(expected, iso8601.format(rs.getTimestamp(1))); + } else { + assertEquals(expected, null); + } + } + private void assertResult(Object[][] expectedRowsOfValues, Statement stat, String sql) throws SQLException { assertResult(newSimpleResultSet(expectedRowsOfValues), stat, sql); diff --git a/h2/src/test/org/h2/test/db/TestCompatibilitySQLServer.java b/h2/src/test/org/h2/test/db/TestCompatibilitySQLServer.java new file mode 100644 index 0000000000..5d1fa2486c --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestCompatibilitySQLServer.java @@ -0,0 +1,85 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Test MSSQLServer compatibility mode. + */ +public class TestCompatibilitySQLServer extends TestDb { + + /** + * Run just this test. + * + * @param s ignored + */ + public static void main(String... s) throws Exception { + TestBase test = TestBase.createCaller().init(); + test.testFromMain(); + } + + @Override + public void test() throws Exception { + deleteDb("sqlserver"); + + final Connection conn = getConnection("sqlserver;MODE=MSSQLServer"); + try { + testDiscardTableHints(conn); + testPrimaryKeyIdentity(conn); + } finally { + conn.close(); + deleteDb("sqlserver"); + } + } + + private void testDiscardTableHints(Connection conn) throws SQLException { + final Statement stat = conn.createStatement(); + + stat.execute("create table parent(id int primary key, name varchar(255))"); + stat.execute("create table child(" + + "id int primary key, " + + "parent_id int, " + + "name varchar(255), " + + "foreign key (parent_id) references public.parent(id))"); + + stat.execute("select * from parent"); + stat.execute("select * from parent with(nolock)"); + stat.execute("select * from parent with(nolock, index = id)"); + stat.execute("select * from parent with(nolock, index(id, name))"); + + stat.execute("select * from parent p " + + "join child ch on ch.parent_id = p.id"); + stat.execute("select * from parent p with(nolock) " + + "join child ch with(nolock) on ch.parent_id = p.id"); + stat.execute("select * from parent p with(nolock) " + + "join child ch with(nolock, index = id) on ch.parent_id = p.id"); + stat.execute("select * from parent p with(nolock) " + + "join child ch with(nolock, index(id, name)) on ch.parent_id = p.id"); + } + + private void testPrimaryKeyIdentity(Connection conn) throws SQLException { + final Statement stat = conn.createStatement(); + + // IDENTITY after PRIMARY KEY is an undocumented syntax of MS SQL + stat.execute("create table test(id int primary key identity, expected_id int)"); + stat.execute("insert into test (expected_id) VALUES (1), (2), (3)"); + + final ResultSet results = stat.executeQuery("select * from test"); + while (results.next()) { + assertEquals(results.getInt("expected_id"), results.getInt("id")); + } + + stat.execute("create table test2 (id int primary key not null identity)"); + } + +} diff --git a/h2/src/test/org/h2/test/db/TestCsv.java b/h2/src/test/org/h2/test/db/TestCsv.java index 10f89994c9..3dc6b1977a 100644 --- a/h2/src/test/org/h2/test/db/TestCsv.java +++ b/h2/src/test/org/h2/test/db/TestCsv.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -13,6 +13,7 @@ import java.io.Reader; import java.io.StringReader; import java.io.StringWriter; +import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -21,14 +22,14 @@ import java.sql.Statement; import java.util.ArrayList; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.h2.api.ErrorCode; -import org.h2.engine.SysProperties; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.Csv; import org.h2.util.IOUtils; -import org.h2.util.New; import org.h2.util.StringUtils; /** @@ -37,7 +38,7 @@ * @author Thomas Mueller * @author Sylvain Cuaz (testNull) */ -public class TestCsv extends TestBase { +public class TestCsv extends TestDb { /** * Run just this test. @@ -47,7 +48,7 @@ public class TestCsv extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -76,12 +77,12 @@ private void testWriteColumnHeader() throws Exception { Connection conn = getConnection("csv"); Statement stat = conn.createStatement(); stat.execute("call csvwrite('" + getBaseDir() + - "/test.tsv', 'select x from dual', 'writeColumnHeader=false')"); + "/test.tsv', 'select x from system_range(1, 1)', 'writeColumnHeader=false')"); String x = IOUtils.readStringAndClose(IOUtils.getReader( FileUtils.newInputStream(getBaseDir() + "/test.tsv")), -1); assertEquals("\"1\"", x.trim()); stat.execute("call csvwrite('" + getBaseDir() + - "/test.tsv', 'select x from dual', 'writeColumnHeader=true')"); + "/test.tsv', 'select x from system_range(1, 1)', 'writeColumnHeader=true')"); x = IOUtils.readStringAndClose(IOUtils.getReader( FileUtils.newInputStream(getBaseDir() + "/test.tsv")), -1); x = x.trim(); @@ -105,9 +106,7 @@ private void testWriteResultSetDataType() throws Exception { csv.setLineSeparator(";"); csv.write(writer, rs); conn.close(); - // getTimestamp().getString() needs to be used (not for H2, but for - // Oracle) - assertEquals("TS,N;0101-01-01 12:00:00.0,;", writer.toString()); + assertEquals("TS,N;-0100-01-01 12:00:00,;", writer.toString()); } private void testCaseSensitiveColumnNames() throws Exception { @@ -182,7 +181,7 @@ private void testChangeData() throws Exception { private void testOptions() { Csv csv = new Csv(); assertEquals(",", csv.getFieldSeparatorWrite()); - assertEquals(SysProperties.LINE_SEPARATOR, csv.getLineSeparator()); + assertEquals(System.lineSeparator(), csv.getLineSeparator()); assertEquals("", csv.getNullString()); assertEquals('\"', csv.getEscapeCharacter()); assertEquals('"', csv.getFieldDelimiter()); @@ -231,9 +230,7 @@ private void testOptions() { assertEquals("\0", csv.getNullString()); assertEquals("", charset); - createClassProxy(Csv.class); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, csv). - setOptions("escape=a error=b"); + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, () -> csv.setOptions("escape=a error=b")); assertEquals('a', csv.getEscapeCharacter()); } @@ -241,9 +238,9 @@ private void testPseudoBom() throws Exception { ByteArrayOutputStream out = new ByteArrayOutputStream(); // UTF-8 "BOM" / marker out.write(StringUtils.convertHexToBytes("ef" + "bb" + "bf")); - out.write("\"ID\", \"NAME\"\n1, Hello".getBytes("UTF-8")); + out.write("\"ID\", \"NAME\"\n1, Hello".getBytes(StandardCharsets.UTF_8)); byte[] buff = out.toByteArray(); - Reader r = new InputStreamReader(new ByteArrayInputStream(buff), "UTF-8"); + Reader r = new InputStreamReader(new ByteArrayInputStream(buff), StandardCharsets.UTF_8); ResultSet rs = new Csv().read(r, null); assertEquals("ID", rs.getMetaData().getColumnLabel(1)); assertEquals("NAME", rs.getMetaData().getColumnLabel(2)); @@ -305,7 +302,7 @@ private void testNull() throws Exception { OutputStream out = FileUtils.newOutputStream(fileName, false); String csvContent = "\"A\",\"B\",\"C\",\"D\"\n\\N,\"\",\"\\N\","; - byte[] b = csvContent.getBytes("UTF-8"); + byte[] b = csvContent.getBytes(StandardCharsets.UTF_8); out.write(b, 0, b.length); out.close(); Csv csv = new Csv(); @@ -351,7 +348,7 @@ private void testRandomData() throws SQLException { int len = getSize(1000, 10000); PreparedStatement prep = conn.prepareStatement( "insert into test(a, b) values(?, ?)"); - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(len); Random random = new Random(1); for (int i = 0; i < len; i++) { String a = randomData(random), b = randomData(random); @@ -400,7 +397,7 @@ private void testEmptyFieldDelimiter() throws Exception { InputStreamReader reader = new InputStreamReader( FileUtils.newInputStream(fileName)); String text = IOUtils.readStringAndClose(reader, -1).trim(); - text = StringUtils.replaceAll(text, "\n", " "); + text = text.replace('\n', ' '); assertEquals("ID|NAME 1|Hello", text); ResultSet rs = stat.executeQuery("select * from csvread('" + fileName + "', null, null, '|', '')"); @@ -490,7 +487,7 @@ private void testAsTable() throws SQLException { assertTrue(rs.next()); assertEquals("Hello", rs.getString(1)); assertFalse(rs.next()); - rs = stat.executeQuery("call csvread('" + getBaseDir() + "/test.csv')"); + rs = stat.executeQuery("select * from csvread('" + getBaseDir() + "/test.csv')"); assertTrue(rs.next()); assertEquals(1, rs.getInt(1)); assertEquals("Hello", rs.getString(2)); @@ -557,21 +554,21 @@ private void testWriteRead() throws SQLException { stat.execute("INSERT INTO TEST(NAME) VALUES('Ruebezahl')"); } long time; - time = System.currentTimeMillis(); + time = System.nanoTime(); new Csv().write(conn, getBaseDir() + "/testRW.csv", "SELECT X ID, 'Ruebezahl' NAME FROM SYSTEM_RANGE(1, " + len + ")", "UTF8"); - trace("write: " + (System.currentTimeMillis() - time)); + trace("write: " + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); ResultSet rs; - time = System.currentTimeMillis(); + time = System.nanoTime(); for (int i = 0; i < 30; i++) { rs = new Csv().read(getBaseDir() + "/testRW.csv", null, "UTF8"); while (rs.next()) { // ignore } } - trace("read: " + (System.currentTimeMillis() - time)); + trace("read: " + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); rs = new Csv().read(getBaseDir() + "/testRW.csv", null, "UTF8"); - // stat.execute("CREATE ALIAS CSVREAD FOR \"org.h2.tools.Csv.read\""); + // stat.execute("CREATE ALIAS CSVREAD FOR 'org.h2.tools.Csv.read'"); ResultSetMetaData meta = rs.getMetaData(); assertEquals(2, meta.getColumnCount()); for (int i = 0; i < len; i++) { diff --git a/h2/src/test/org/h2/test/db/TestDateStorage.java b/h2/src/test/org/h2/test/db/TestDateStorage.java index 2238b53aa2..98a7f05b77 100644 --- a/h2/src/test/org/h2/test/db/TestDateStorage.java +++ b/h2/src/test/org/h2/test/db/TestDateStorage.java @@ -1,29 +1,32 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.sql.Connection; +import java.sql.Date; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Time; import java.sql.Timestamp; import java.util.ArrayList; +import java.util.Calendar; +import java.util.GregorianCalendar; +import java.util.SimpleTimeZone; import java.util.TimeZone; - -import org.h2.engine.SysProperties; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.unit.TestDate; -import org.h2.util.DateTimeUtils; import org.h2.value.ValueTimestamp; /** * Tests the date transfer and storage. */ -public class TestDateStorage extends TestBase { +public class TestDateStorage extends TestDb { /** * Run just this test. @@ -31,83 +34,119 @@ public class TestDateStorage extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - System.setProperty("h2.storeLocalTime", "true"); - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws SQLException { - deleteDb("date"); - testMoveDatabaseToAnotherTimezone(); + deleteDb(getTestName()); + testDateTimeTimestampWithCalendar(); testAllTimeZones(); testCurrentTimeZone(); } - private void testMoveDatabaseToAnotherTimezone() throws SQLException { - if (config.memory) { - return; - } - if (!SysProperties.STORE_LOCAL_TIME) { - return; - } - String db = "date;LOG=0;FILE_LOCK=NO"; - Connection conn = getConnection(db); - Statement stat; - stat = conn.createStatement(); - stat.execute("create table date_list(tz varchar, t varchar, ts timestamp)"); - conn.close(); - TimeZone defaultTimeZone = TimeZone.getDefault(); - ArrayList distinct = TestDate.getDistinctTimeZones(); + private void testDateTimeTimestampWithCalendar() throws SQLException { + Connection conn = getConnection(getTestName()); + Statement stat = conn.createStatement(); + stat.execute("create table ts(x timestamp primary key)"); + stat.execute("create table t(x time primary key)"); + stat.execute("create table d(x date)"); + Calendar utcCalendar = new GregorianCalendar(new SimpleTimeZone(0, "Z")); + stat.execute("SET TIME ZONE 'PST'"); + TimeZone old = TimeZone.getDefault(); + TimeZone.setDefault(TimeZone.getTimeZone("PST")); try { - for (TimeZone tz : distinct) { - // println("insert using " + tz.getID()); - TimeZone.setDefault(tz); - DateTimeUtils.resetCalendar(); - conn = getConnection(db); - PreparedStatement prep = conn.prepareStatement( - "insert into date_list values(?, ?, ?)"); - prep.setString(1, tz.getID()); - for (int m = 1; m < 10; m++) { - String s = "2000-0" + m + "-01 15:00:00"; - prep.setString(2, s); - prep.setTimestamp(3, Timestamp.valueOf(s)); - prep.execute(); - } - conn.close(); - } - // printTime("inserted"); - for (TimeZone target : distinct) { - // println("select from " + target.getID()); - if ("Pacific/Kiritimati".equals(target.getID())) { - // there is a problem with this time zone, but it seems - // unrelated to this database (possibly wrong timezone - // information?) - continue; - } - TimeZone.setDefault(target); - DateTimeUtils.resetCalendar(); - conn = getConnection(db); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * from date_list order by t"); - while (rs.next()) { - String source = rs.getString(1); - String a = rs.getString(2); - String b = rs.getString(3); - b = b.substring(0, a.length()); - if (!a.equals(b)) { - assertEquals(source + ">" + target, a, b); - } - } - conn.close(); - } + // 2010-03-14T02:15:00Z + Timestamp ts1 = Timestamp.valueOf("2010-03-13 18:15:00"); + Time t1 = new Time(ts1.getTime()); + Date d1 = new Date(ts1.getTime()); + // when converted to UTC, this is 03:15, which doesn't actually + // exist because of summer time change at that day + // 2010-03-14T03:15:00Z + Timestamp ts2 = Timestamp.valueOf("2010-03-13 19:15:00"); + Time t2 = new Time(ts2.getTime()); + Date d2 = new Date(ts2.getTime()); + PreparedStatement prep; + ResultSet rs; + prep = conn.prepareStatement("insert into ts values(?)"); + prep.setTimestamp(1, ts1, utcCalendar); + prep.execute(); + prep.setTimestamp(1, ts2, utcCalendar); + prep.execute(); + prep = conn.prepareStatement("insert into t values(?)"); + prep.setTime(1, t1, utcCalendar); + prep.execute(); + prep.setTime(1, t2, utcCalendar); + prep.execute(); + prep = conn.prepareStatement("insert into d values(?)"); + prep.setDate(1, d1, utcCalendar); + prep.execute(); + prep.setDate(1, d2, utcCalendar); + prep.execute(); + rs = stat.executeQuery("select * from ts order by x"); + rs.next(); + assertEquals("2010-03-14 02:15:00", + rs.getString(1)); + assertEquals("2010-03-13 18:15:00.0", + rs.getTimestamp(1, utcCalendar).toString()); + assertEquals("2010-03-14 03:15:00.0", + rs.getTimestamp(1).toString()); + assertEquals("2010-03-14 02:15:00", + rs.getString("x")); + assertEquals("2010-03-13 18:15:00.0", + rs.getTimestamp("x", utcCalendar).toString()); + assertEquals("2010-03-14 03:15:00.0", + rs.getTimestamp("x").toString()); + rs.next(); + assertEquals("2010-03-14 03:15:00", + rs.getString(1)); + assertEquals("2010-03-13 19:15:00.0", + rs.getTimestamp(1, utcCalendar).toString()); + assertEquals("2010-03-14 03:15:00.0", + rs.getTimestamp(1).toString()); + assertEquals("2010-03-14 03:15:00", + rs.getString("x")); + assertEquals("2010-03-13 19:15:00.0", + rs.getTimestamp("x", utcCalendar).toString()); + assertEquals("2010-03-14 03:15:00.0", + rs.getTimestamp("x").toString()); + rs = stat.executeQuery("select * from t order by x"); + rs.next(); + assertEquals("02:15:00", rs.getString(1)); + assertEquals("18:15:00", rs.getTime(1, utcCalendar).toString()); + assertEquals("02:15:00", rs.getTime(1).toString()); + assertEquals("02:15:00", rs.getString("x")); + assertEquals("18:15:00", rs.getTime("x", utcCalendar).toString()); + assertEquals("02:15:00", rs.getTime("x").toString()); + rs.next(); + assertEquals("03:15:00", rs.getString(1)); + assertEquals("19:15:00", rs.getTime(1, utcCalendar).toString()); + assertEquals("03:15:00", rs.getTime(1).toString()); + assertEquals("03:15:00", rs.getString("x")); + assertEquals("19:15:00", rs.getTime("x", utcCalendar).toString()); + assertEquals("03:15:00", rs.getTime("x").toString()); + rs = stat.executeQuery("select * from d order by x"); + rs.next(); + assertEquals("2010-03-14", rs.getString(1)); + assertEquals("2010-03-13", rs.getDate(1, utcCalendar).toString()); + assertEquals("2010-03-14", rs.getDate(1).toString()); + assertEquals("2010-03-14", rs.getString("x")); + assertEquals("2010-03-13", rs.getDate("x", utcCalendar).toString()); + assertEquals("2010-03-14", rs.getDate("x").toString()); + rs.next(); + assertEquals("2010-03-14", rs.getString(1)); + assertEquals("2010-03-13", rs.getDate(1, utcCalendar).toString()); + assertEquals("2010-03-14", rs.getDate(1).toString()); + assertEquals("2010-03-14", rs.getString("x")); + assertEquals("2010-03-13", rs.getDate("x", utcCalendar).toString()); + assertEquals("2010-03-14", rs.getDate("x").toString()); } finally { - TimeZone.setDefault(defaultTimeZone); - DateTimeUtils.resetCalendar(); + stat.execute("SET TIME ZONE LOCAL"); + TimeZone.setDefault(old); } - // printTime("done"); - conn = getConnection(db); - stat = conn.createStatement(); - stat.execute("drop table date_list"); + stat.execute("drop table ts"); + stat.execute("drop table t"); + stat.execute("drop table d"); conn.close(); } @@ -124,29 +163,39 @@ private static void testCurrentTimeZone() { } private static void test(int year, int month, int day, int hour) { - ValueTimestamp.parse(year + "-" + month + "-" + day + " " + hour + ":00:00"); + ValueTimestamp.parse(year + "-" + month + "-" + day + " " + hour + ":00:00", null); } private void testAllTimeZones() throws SQLException { - Connection conn = getConnection("date"); + Connection conn = getConnection(getTestName()); TimeZone defaultTimeZone = TimeZone.getDefault(); + PreparedStatement prepTimeZone = conn.prepareStatement("SET TIME ZONE ?"); PreparedStatement prep = conn.prepareStatement("CALL CAST(? AS DATE)"); try { ArrayList distinct = TestDate.getDistinctTimeZones(); for (TimeZone tz : distinct) { + /* + * Some OpenJDKs have unusable timezones with negative DST that + * causes IAE in SimpleTimeZone(). + */ + if (tz.getID().startsWith("SystemV/")) { + if (tz.getDSTSavings() < 0) { + continue; + } + } // println(tz.getID()); + prepTimeZone.setString(1, tz.getID()); + prepTimeZone.executeUpdate(); TimeZone.setDefault(tz); - DateTimeUtils.resetCalendar(); for (int d = 101; d < 129; d++) { test(prep, d); } } } finally { TimeZone.setDefault(defaultTimeZone); - DateTimeUtils.resetCalendar(); } conn.close(); - deleteDb("date"); + deleteDb(getTestName()); } private void test(PreparedStatement prep, int d) throws SQLException { diff --git a/h2/src/test/org/h2/test/db/TestDeadlock.java b/h2/src/test/org/h2/test/db/TestDeadlock.java index 5b47bbb8cd..03d5b5ceaa 100644 --- a/h2/src/test/org/h2/test/db/TestDeadlock.java +++ b/h2/src/test/org/h2/test/db/TestDeadlock.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -10,15 +10,15 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; - -import org.h2.api.ErrorCode; +import java.util.concurrent.TimeUnit; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Task; /** * Test for deadlocks in the code, and test the deadlock detection mechanism. */ -public class TestDeadlock extends TestBase { +public class TestDeadlock extends TestDb { /** * The first connection. @@ -42,19 +42,16 @@ public class TestDeadlock extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { deleteDb("deadlock"); + testTemporaryTablesAndMetaDataLocking(); testDeadlockInFulltextSearch(); testConcurrentLobReadAndTempResultTableDelete(); - testDiningPhilosophers(); - testLockUpgrade(); - testThreePhilosophers(); testNoDeadlock(); - testThreeSome(); deleteDb("deadlock"); } @@ -80,8 +77,8 @@ public void call() throws Exception { } }; t.execute(); - long start = System.currentTimeMillis(); - while (System.currentTimeMillis() - start < 1000) { + long start = System.nanoTime(); + while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(1)) { stat2.execute("insert into test values(1, 'Hello')"); stat2.execute("delete from test"); } @@ -117,8 +114,8 @@ public void call() throws Exception { } }; t.execute(); - long start = System.currentTimeMillis(); - while (System.currentTimeMillis() - start < 1000) { + long start = System.nanoTime(); + while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(1)) { Reader r = rs2.getCharacterStream(2); char[] buff = new char[1024]; while (true) { @@ -232,170 +229,18 @@ public void execute() throws SQLException { } - private void testThreePhilosophers() throws Exception { - if (config.mvcc) { - return; - } - initTest(); - c1.createStatement().execute("CREATE TABLE TEST_A(ID INT PRIMARY KEY)"); - c1.createStatement().execute("CREATE TABLE TEST_B(ID INT PRIMARY KEY)"); - c1.createStatement().execute("CREATE TABLE TEST_C(ID INT PRIMARY KEY)"); - c1.commit(); - c1.createStatement().execute("INSERT INTO TEST_A VALUES(1)"); - c2.createStatement().execute("INSERT INTO TEST_B VALUES(1)"); - c3.createStatement().execute("INSERT INTO TEST_C VALUES(1)"); - DoIt t2 = new DoIt() { - @Override - public void execute() throws SQLException { - c1.createStatement().execute("DELETE FROM TEST_B"); - c1.commit(); - } - }; - t2.start(); - DoIt t3 = new DoIt() { - @Override - public void execute() throws SQLException { - c2.createStatement().execute("DELETE FROM TEST_C"); - c2.commit(); - } - }; - t3.start(); - try { - c3.createStatement().execute("DELETE FROM TEST_A"); - c3.commit(); - } catch (SQLException e) { - catchDeadlock(e); - } - t2.join(); - t3.join(); - checkDeadlock(); - c1.commit(); - c2.commit(); - c3.commit(); - c1.createStatement().execute("DROP TABLE TEST_A, TEST_B, TEST_C"); - end(); - } - - // test case for issue # 61 - // http://code.google.com/p/h2database/issues/detail?id=61) - private void testThreeSome() throws Exception { - if (config.mvcc) { - return; - } - initTest(); - c1.createStatement().execute("CREATE TABLE TEST_A(ID INT PRIMARY KEY)"); - c1.createStatement().execute("CREATE TABLE TEST_B(ID INT PRIMARY KEY)"); - c1.createStatement().execute("CREATE TABLE TEST_C(ID INT PRIMARY KEY)"); - c1.commit(); - c1.createStatement().execute("INSERT INTO TEST_A VALUES(1)"); - c1.createStatement().execute("INSERT INTO TEST_B VALUES(1)"); - c2.createStatement().execute("INSERT INTO TEST_C VALUES(1)"); - DoIt t2 = new DoIt() { - @Override - public void execute() throws SQLException { - c3.createStatement().execute("INSERT INTO TEST_B VALUES(2)"); - c3.commit(); - } - }; - t2.start(); - DoIt t3 = new DoIt() { - @Override - public void execute() throws SQLException { - c2.createStatement().execute("INSERT INTO TEST_A VALUES(2)"); - c2.commit(); - } - }; - t3.start(); - try { - c1.createStatement().execute("INSERT INTO TEST_C VALUES(2)"); - c1.commit(); - } catch (SQLException e) { - catchDeadlock(e); - c1.rollback(); - } - t2.join(); - t3.join(); - checkDeadlock(); - c1.commit(); - c2.commit(); - c3.commit(); - c1.createStatement().execute("DROP TABLE TEST_A, TEST_B, TEST_C"); - end(); - } - - private void testLockUpgrade() throws Exception { - if (config.mvcc) { - return; - } - initTest(); - c1.createStatement().execute("CREATE TABLE TEST(ID INT PRIMARY KEY)"); - c1.createStatement().execute("INSERT INTO TEST VALUES(1)"); - c1.commit(); - c1.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - c2.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - c1.createStatement().executeQuery("SELECT * FROM TEST"); - c2.createStatement().executeQuery("SELECT * FROM TEST"); - Thread t1 = new DoIt() { - @Override - public void execute() throws SQLException { - c1.createStatement().execute("DELETE FROM TEST"); - c1.commit(); - } - }; - t1.start(); - try { - c2.createStatement().execute("DELETE FROM TEST"); - c2.commit(); - } catch (SQLException e) { - catchDeadlock(e); - } - t1.join(); - checkDeadlock(); - c1.commit(); - c2.commit(); - c1.createStatement().execute("DROP TABLE TEST"); - end(); - } - - private void testDiningPhilosophers() throws Exception { - if (config.mvcc) { - return; - } - initTest(); - c1.createStatement().execute("CREATE TABLE T1(ID INT)"); - c1.createStatement().execute("CREATE TABLE T2(ID INT)"); - c1.createStatement().execute("INSERT INTO T1 VALUES(1)"); - c2.createStatement().execute("INSERT INTO T2 VALUES(1)"); - DoIt t1 = new DoIt() { - @Override - public void execute() throws SQLException { - c1.createStatement().execute("INSERT INTO T2 VALUES(2)"); - c1.commit(); - } - }; - t1.start(); - try { - c2.createStatement().execute("INSERT INTO T1 VALUES(2)"); - } catch (SQLException e) { - catchDeadlock(e); - } - t1.join(); - checkDeadlock(); - c1.commit(); - c2.commit(); - c1.createStatement().execute("DROP TABLE T1, T2"); - end(); - } - private void checkDeadlock() throws SQLException { - assertTrue(lastException != null); - assertKnownException(lastException); - assertEquals(ErrorCode.DEADLOCK_1, lastException.getErrorCode()); - SQLException e2 = lastException.getNextException(); - if (e2 != null) { - // we have two exception, but there should only be one - throw new SQLException("Expected one exception, got multiple", e2); - } + // there was a bug in the meta data locking here + private void testTemporaryTablesAndMetaDataLocking() throws Exception { + deleteDb("deadlock"); + Connection conn = getConnection("deadlock"); + Statement stmt = conn.createStatement(); + conn.setAutoCommit(false); + stmt.execute("CREATE SEQUENCE IF NOT EXISTS SEQ1 START WITH 1000000"); + stmt.execute("CREATE FORCE VIEW V1 AS WITH RECURSIVE TEMP(X) AS " + + "(SELECT x FROM DUAL) SELECT * FROM TEMP"); + stmt.executeQuery("SELECT NEXT VALUE FOR SEQ1"); + conn.close(); } } diff --git a/h2/src/test/org/h2/test/db/TestDrop.java b/h2/src/test/org/h2/test/db/TestDrop.java deleted file mode 100644 index 3f1ee7dd19..0000000000 --- a/h2/src/test/org/h2/test/db/TestDrop.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; -import org.h2.test.TestBase; - -/** - * Test DROP statement - */ -public class TestDrop extends TestBase { - - private Connection conn; - private Statement stat; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - deleteDb("drop"); - conn = getConnection("drop"); - stat = conn.createStatement(); - - testTableDependsOnView(); - testComputedColumnDependency(); - testInterSchemaDependency(); - - conn.close(); - deleteDb("drop"); - } - - private void testTableDependsOnView() throws SQLException { - stat.execute("drop all objects"); - stat.execute("create table a(x int)"); - stat.execute("create view b as select * from a"); - stat.execute("create table c(y int check (select count(*) from b) = 0)"); - stat.execute("drop all objects"); - } - - private void testComputedColumnDependency() throws SQLException { - stat.execute("DROP ALL OBJECTS"); - stat.execute("CREATE TABLE A (A INT);"); - stat.execute("CREATE TABLE B (B INT AS SELECT A FROM A);"); - stat.execute("DROP ALL OBJECTS"); - stat.execute("CREATE SCHEMA TEST_SCHEMA"); - stat.execute("CREATE TABLE TEST_SCHEMA.A (A INT);"); - stat.execute("CREATE TABLE TEST_SCHEMA.B " + - "(B INT AS SELECT A FROM TEST_SCHEMA.A);"); - stat.execute("DROP SCHEMA TEST_SCHEMA"); - } - - private void testInterSchemaDependency() throws SQLException { - stat.execute("drop all objects;"); - stat.execute("create schema table_view"); - stat.execute("set schema table_view"); - stat.execute("create table test1 (id int, name varchar(20))"); - stat.execute("create view test_view_1 as (select * from test1)"); - stat.execute("set schema public"); - stat.execute("create schema test_run"); - stat.execute("set schema test_run"); - stat.execute("create table test2 (id int, address varchar(20), " + - "constraint a_cons check (id in (select id from table_view.test1)))"); - stat.execute("set schema public"); - stat.execute("drop all objects"); - } -} diff --git a/h2/src/test/org/h2/test/db/TestDuplicateKeyUpdate.java b/h2/src/test/org/h2/test/db/TestDuplicateKeyUpdate.java index f7b696b217..841579370e 100644 --- a/h2/src/test/org/h2/test/db/TestDuplicateKeyUpdate.java +++ b/h2/src/test/org/h2/test/db/TestDuplicateKeyUpdate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -11,12 +11,14 @@ import java.sql.SQLException; import java.sql.Statement; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests for the ON DUPLICATE KEY UPDATE in the Insert class. */ -public class TestDuplicateKeyUpdate extends TestBase { +public class TestDuplicateKeyUpdate extends TestDb { /** * Run just this test. @@ -24,7 +26,7 @@ public class TestDuplicateKeyUpdate extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -37,6 +39,9 @@ public void test() throws SQLException { testDuplicateExpression(conn); testOnDuplicateKeyInsertBatch(conn); testOnDuplicateKeyInsertMultiValue(conn); + testPrimaryKeyAndUniqueKey(conn); + testUpdateCountAndQualifiedNames(conn); + testEnum(conn); conn.close(); deleteDb("duplicateKeyUpdate"); } @@ -109,12 +114,14 @@ private void testDuplicateOnUnique(Connection conn) throws SQLException { assertEquals("UPDATE", rs.getNString(1)); stat.execute("INSERT INTO table_test2 (a_text, some_text, updatable_text ) " + - "VALUES ('b', 'b', 'test') " + + "VALUES ('b', 'b', 'test'), ('c', 'c', 'test2') " + "ON DUPLICATE KEY UPDATE updatable_text=values(updatable_text)"); rs = stat.executeQuery("SELECT updatable_text " + - "FROM table_test2 where a_text = 'b'"); + "FROM table_test2 where a_text in ('b', 'c') order by a_text"); rs.next(); assertEquals("test", rs.getNString(1)); + rs.next(); + assertEquals("test2", rs.getNString(1)); } private void testDuplicateCache(Connection conn) throws SQLException { @@ -186,12 +193,12 @@ private void testOnDuplicateKeyInsertBatch(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test " + - "(key varchar(1) primary key, count int not null)"); + "(id varchar(1) primary key, count int not null)"); // Insert multiple values as a batch for (int i = 0; i <= 2; ++i) { PreparedStatement prep = conn.prepareStatement( - "insert into test(key, count) values(?, ?) " + + "insert into test(id, count) values(?, ?) " + "on duplicate key update count = count + 1"); prep.setString(1, "a"); prep.setInt(2, 1); @@ -207,7 +214,7 @@ private void testOnDuplicateKeyInsertBatch(Connection conn) // Check result ResultSet rs = stat.executeQuery( - "select count from test where key = 'a'"); + "select count from test where id = 'a'"); rs.next(); assertEquals(3, rs.getInt(1)); @@ -218,12 +225,12 @@ private void testOnDuplicateKeyInsertMultiValue(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test" + - "(key varchar(1) primary key, count int not null)"); + "(id varchar(1) primary key, count int not null)"); // Insert multiple values in single insert operation for (int i = 0; i <= 2; ++i) { PreparedStatement prep = conn.prepareStatement( - "insert into test(key, count) values(?, ?), (?, ?), (?, ?) " + + "insert into test(id, count) values(?, ?), (?, ?), (?, ?) " + "on duplicate key update count = count + 1"); prep.setString(1, "a"); prep.setInt(2, 1); @@ -236,11 +243,72 @@ private void testOnDuplicateKeyInsertMultiValue(Connection conn) conn.commit(); // Check result - ResultSet rs = stat.executeQuery("select count from test where key = 'a'"); + ResultSet rs = stat.executeQuery("select count from test where id = 'a'"); rs.next(); assertEquals(3, rs.getInt(1)); stat.execute("drop table test"); } + private void testPrimaryKeyAndUniqueKey(Connection conn) throws SQLException { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE test (id INT, dup INT, " + + "counter INT, PRIMARY KEY(id), UNIQUE(dup))"); + stat.execute("INSERT INTO test (id, dup, counter) VALUES (1, 1, 1)"); + stat.execute("INSERT INTO test (id, dup, counter) VALUES (2, 1, 1) " + + "ON DUPLICATE KEY UPDATE counter = counter + VALUES(counter)"); + + // Check result + ResultSet rs = stat.executeQuery("SELECT counter FROM test ORDER BY id"); + rs.next(); + assertEquals(2, rs.getInt(1)); + assertEquals(false, rs.next()); + + stat.execute("drop table test"); + } + + private void testUpdateCountAndQualifiedNames(Connection conn) throws SQLException { + Statement stat = conn.createStatement(); + stat.execute("set mode mysql"); + stat.execute("create schema s2"); + stat.execute("create table s2.test(id int primary key, name varchar(255))"); + stat.execute("insert into s2.test(id, name) values(1, 'a')"); + assertEquals(2, stat.executeUpdate("insert into s2.test(id, name) values(1, 'b') " + + "on duplicate key update name = values(name)")); + assertEquals(0, stat.executeUpdate("insert into s2.test(id, name) values(1, 'b') " + + "on duplicate key update name = values(name)")); + assertEquals(1, stat.executeUpdate("insert into s2.test(id, name) values(2, 'c') " + + "on duplicate key update name = values(name)")); + ResultSet rs = stat.executeQuery("select id, name from s2.test order by id"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertEquals("b", rs.getString(2)); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertEquals("c", rs.getString(2)); + assertFalse(rs.next()); + // Check qualified names in ON UPDATE case + assertEquals(2, stat.executeUpdate("insert into s2.test(id, name) values(2, 'd') " + + "on duplicate key update test.name = values(name)")); + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat) + .executeUpdate("insert into s2.test(id, name) values(2, 'd') " + + "on duplicate key update test2.name = values(name)"); + assertEquals(2, stat.executeUpdate("insert into s2.test(id, name) values(2, 'e') " + + "on duplicate key update s2.test.name = values(name)")); + assertThrows(ErrorCode.SCHEMA_NAME_MUST_MATCH, stat) + .executeUpdate("insert into s2.test(id, name) values(2, 'd') " + + "on duplicate key update s3.test.name = values(name)"); + stat.execute("drop schema s2 cascade"); + } + + private void testEnum(Connection conn) throws SQLException { + Statement stat = conn.createStatement(); + stat.execute("create table test(e enum('a', 'b') unique)"); + PreparedStatement ps = conn.prepareStatement("insert into test(e) values (?) on duplicate key update e = e"); + ps.setString(1, "a"); + assertEquals(1, ps.executeUpdate()); + assertEquals(0, ps.executeUpdate()); + stat.execute("drop table test"); + } + } diff --git a/h2/src/test/org/h2/test/db/TestEncryptedDb.java b/h2/src/test/org/h2/test/db/TestEncryptedDb.java index 87c4113663..de2f8fa27f 100644 --- a/h2/src/test/org/h2/test/db/TestEncryptedDb.java +++ b/h2/src/test/org/h2/test/db/TestEncryptedDb.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -12,11 +12,12 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test using an encrypted database. */ -public class TestEncryptedDb extends TestBase { +public class TestEncryptedDb extends TestDb { /** * Run just this test. @@ -24,35 +25,42 @@ public class TestEncryptedDb extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { + public boolean isEnabled() { if (config.memory || config.cipher != null) { - return; + return false; } + return true; + } + + @Override + public void test() throws SQLException { deleteDb("encrypted"); - Connection conn = getConnection("encrypted;CIPHER=AES", "sa", "123 123"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(ID INT)"); - stat.execute("CHECKPOINT"); - stat.execute("SET WRITE_DELAY 0"); - stat.execute("INSERT INTO TEST VALUES(1)"); - stat.execute("SHUTDOWN IMMEDIATELY"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - - assertThrows(ErrorCode.FILE_ENCRYPTION_ERROR_1, this). - getConnection("encrypted;CIPHER=AES", "sa", "1234 1234"); - - conn = getConnection("encrypted;CIPHER=AES", "sa", "123 123"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - assertFalse(rs.next()); - - conn.close(); + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, + () -> getConnection("encrypted;CIPHER=AES;PAGE_SIZE=2048", "sa", "1234 1234")); + try (Connection conn = getConnection("encrypted;CIPHER=AES", "sa", "123 123")) { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INT)"); + stat.execute("CHECKPOINT"); + stat.execute("SET WRITE_DELAY 0"); + stat.execute("INSERT INTO TEST VALUES(1)"); + stat.execute("SHUTDOWN IMMEDIATELY"); + } + + assertThrows(ErrorCode.FILE_ENCRYPTION_ERROR_1, // + () -> getConnection("encrypted;CIPHER=AES", "sa", "1234 1234")); + + try (Connection conn = getConnection("encrypted;CIPHER=AES", "sa", "123 123")) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + } +// conn.close(); deleteDb("encrypted"); } diff --git a/h2/src/test/org/h2/test/db/TestExclusive.java b/h2/src/test/org/h2/test/db/TestExclusive.java index 63d1d4e5af..0fb4c2ceab 100644 --- a/h2/src/test/org/h2/test/db/TestExclusive.java +++ b/h2/src/test/org/h2/test/db/TestExclusive.java @@ -1,23 +1,26 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.concurrent.atomic.AtomicInteger; import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Task; /** * Test for the exclusive mode. */ -public class TestExclusive extends TestBase { +public class TestExclusive extends TestDb { /** * Run just this test. @@ -25,23 +28,27 @@ public class TestExclusive extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { + testSetExclusiveTrueFalse(); + testSetExclusiveGetExclusive(); + } + + private void testSetExclusiveTrueFalse() throws Exception { deleteDb("exclusive"); Connection conn = getConnection("exclusive"); Statement stat = conn.createStatement(); stat.execute("set exclusive true"); - assertThrows(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE, this). - getConnection("exclusive"); + assertThrows(ErrorCode.DATABASE_IS_IN_EXCLUSIVE_MODE, () -> getConnection("exclusive")); stat.execute("set exclusive false"); Connection conn2 = getConnection("exclusive"); final Statement stat2 = conn2.createStatement(); stat.execute("set exclusive true"); - final AtomicInteger state = new AtomicInteger(0); + final AtomicInteger state = new AtomicInteger(); Task task = new Task() { @Override public void call() throws SQLException { @@ -64,4 +71,56 @@ public void call() throws SQLException { deleteDb("exclusive"); } + private void testSetExclusiveGetExclusive() throws SQLException { + deleteDb("exclusive"); + try (Connection connection = getConnection("exclusive")) { + assertFalse(getExclusiveMode(connection)); + + setExclusiveMode(connection, 1); + assertTrue(getExclusiveMode(connection)); + + setExclusiveMode(connection, 0); + assertFalse(getExclusiveMode(connection)); + + // Setting to existing mode should not throws exception + setExclusiveMode(connection, 0); + assertFalse(getExclusiveMode(connection)); + + setExclusiveMode(connection, 1); + assertTrue(getExclusiveMode(connection)); + + // Setting to existing mode throws exception + setExclusiveMode(connection, 1); + assertTrue(getExclusiveMode(connection)); + + setExclusiveMode(connection, 2); + assertTrue(getExclusiveMode(connection)); + + setExclusiveMode(connection, 0); + assertFalse(getExclusiveMode(connection)); + } + } + + + private static void setExclusiveMode(Connection connection, int exclusiveMode) throws SQLException { + String sql = "SET EXCLUSIVE " + exclusiveMode; + + try (PreparedStatement statement = connection.prepareStatement(sql)) { + statement.execute(); + } + } + + private static boolean getExclusiveMode(Connection connection) throws SQLException{ + boolean exclusiveMode = false; + + String sql = "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'EXCLUSIVE'"; + try (PreparedStatement statement = connection.prepareStatement(sql)) { + ResultSet result = statement.executeQuery(); + if (result.next()) { + exclusiveMode = result.getBoolean(1); + } + } + + return exclusiveMode; + } } diff --git a/h2/src/test/org/h2/test/db/TestFullText.java b/h2/src/test/org/h2/test/db/TestFullText.java index 71b95ac74b..0e7da44762 100644 --- a/h2/src/test/org/h2/test/db/TestFullText.java +++ b/h2/src/test/org/h2/test/db/TestFullText.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -18,15 +18,19 @@ import java.util.Random; import java.util.StringTokenizer; import java.util.UUID; +import java.util.concurrent.TimeUnit; + import org.h2.fulltext.FullText; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.IOUtils; import org.h2.util.Task; /** * Fulltext search tests. */ -public class TestFullText extends TestBase { +public class TestFullText extends TestDb { /** * The words used in this test. @@ -42,7 +46,7 @@ public class TestFullText extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -71,10 +75,7 @@ public void test() throws Exception { testPerformance(true); testReopen(true); testDropIndex(true); - } catch (ClassNotFoundException e) { - println("Class not found, not tested: " + LUCENE_FULLTEXT_CLASS_NAME); - // ok - } catch (NoClassDefFoundError e) { + } catch (ClassNotFoundException | NoClassDefFoundError e) { println("Class not found, not tested: " + LUCENE_FULLTEXT_CLASS_NAME); // ok } @@ -84,15 +85,15 @@ public void test() throws Exception { deleteDb("fullTextReopen"); } - private static void close(Collection list) throws SQLException { + private static void close(Collection list) { for (Connection conn : list) { - conn.close(); + IOUtils.closeSilently(conn); } } private Connection getConnection(String name, Collection list) throws SQLException { - Connection conn = getConnection(name); + Connection conn = getConnection(name + ";MODE=STRICT"); list.add(conn); return conn; } @@ -102,12 +103,11 @@ private void testAutoAnalyze() throws SQLException { Connection conn; Statement stat; - ArrayList connList = new ArrayList(); + ArrayList connList = new ArrayList<>(); conn = getConnection("fullTextNative", connList); stat = conn.createStatement(); - stat.execute("create alias if not exists ft_init " + - "for \"org.h2.fulltext.FullText.init\""); + stat.execute("create alias if not exists ft_init for 'org.h2.fulltext.FullText.init'"); stat.execute("call ft_init()"); stat.execute("create table test(id int primary key, name varchar)"); stat.execute("call ft_create_index('PUBLIC', 'TEST', 'NAME')"); @@ -124,11 +124,10 @@ private void testAutoAnalyze() throws SQLException { private void testNativeFeatures() throws SQLException { deleteDb("fullTextNative"); - ArrayList connList = new ArrayList(); + ArrayList connList = new ArrayList<>(); Connection conn = getConnection("fullTextNative", connList); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT " + - "FOR \"org.h2.fulltext.FullText.init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT FOR 'org.h2.fulltext.FullText.init'"); stat.execute("CALL FT_INIT()"); FullText.setIgnoreList(conn, "to,this"); FullText.setWhitespaceChars(conn, " ,.-"); @@ -154,8 +153,8 @@ private void testNativeFeatures() throws SQLException { assertEquals("KEYS", rs.getMetaData().getColumnLabel(4)); assertEquals("PUBLIC", rs.getString(1)); assertEquals("TEST", rs.getString(2)); - assertEquals("(ID)", rs.getString(3)); - assertEquals("(1)", rs.getString(4)); + assertEquals("[ID]", rs.getString(3)); + assertEquals("[1]", rs.getString(4)); rs = stat.executeQuery("SELECT * FROM FT_SEARCH('this', 0, 0)"); assertFalse(rs.next()); @@ -208,7 +207,7 @@ private void testTransaction(boolean lucene) throws SQLException { String prefix = lucene ? "FTL" : "FT"; deleteDb("fullTextTransaction"); FileUtils.deleteRecursive(getBaseDir() + "/fullTextTransaction", false); - ArrayList connList = new ArrayList(); + ArrayList connList = new ArrayList<>(); Connection conn = getConnection("fullTextTransaction", connList); Statement stat = conn.createStatement(); initFullText(stat, lucene); @@ -244,80 +243,80 @@ private void testMultiThreaded(boolean lucene) throws Exception { final String prefix = lucene ? "FTL" : "FT"; trace("Testing multithreaded " + prefix); deleteDb("fullText"); - ArrayList connList = new ArrayList(); - int len = 2; - Task[] task = new Task[len]; - for (int i = 0; i < len; i++) { - // final Connection conn = - // getConnection("fullText;MULTI_THREADED=1;LOCK_TIMEOUT=10000"); - final Connection conn = getConnection("fullText", connList); - Statement stat = conn.createStatement(); - initFullText(stat, lucene); - initFullText(stat, lucene); - final String tableName = "TEST" + i; - stat.execute("CREATE TABLE " + tableName + - "(ID INT PRIMARY KEY, DATA VARCHAR)"); - stat.execute("CALL " + prefix + - "_CREATE_INDEX('PUBLIC', '" + tableName + "', NULL)"); - task[i] = new Task() { - @Override - public void call() throws SQLException { - trace("starting thread " + Thread.currentThread()); - PreparedStatement prep = conn.prepareStatement( - "INSERT INTO " + tableName + " VALUES(?, ?)"); - Statement stat = conn.createStatement(); - Random random = new Random(); - int x = 0; - while (!stop) { - trace("stop = " + stop + " for " + Thread.currentThread()); - StringBuilder buff = new StringBuilder(); - for (int j = 0; j < 1000; j++) { - buff.append(" ").append(random.nextInt(10000)); - buff.append(" x").append(j); - buff.append(" ").append(KNOWN_WORDS[j % KNOWN_WORDS.length]); + ArrayList connList = new ArrayList<>(); + try { + int len = 2; + Task[] task = new Task[len]; + for (int i = 0; i < len; i++) { + final Connection conn = getConnection("fullText;LOCK_TIMEOUT=60000", connList); + Statement stat = conn.createStatement(); + initFullText(stat, lucene); + initFullText(stat, lucene); + final String tableName = "TEST" + i; + stat.execute("CREATE TABLE " + tableName + + "(ID INT PRIMARY KEY, DATA VARCHAR)"); + stat.execute("CALL " + prefix + + "_CREATE_INDEX('PUBLIC', '" + tableName + "', NULL)"); + task[i] = new Task() { + @Override + public void call() throws SQLException { + trace("starting thread " + Thread.currentThread()); + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO " + tableName + " VALUES(?, ?)"); + Statement stat = conn.createStatement(); + Random random = new Random(); + int x = 0; + while (!stop) { + trace("stop = " + stop + " for " + Thread.currentThread()); + StringBuilder buff = new StringBuilder(); + for (int j = 0; j < 1000; j++) { + buff.append(" ").append(random.nextInt(10000)); + buff.append(" x").append(j); + buff.append(" ").append(KNOWN_WORDS[j % KNOWN_WORDS.length]); + } + prep.setInt(1, x); + prep.setString(2, buff.toString()); + prep.execute(); + x++; + for (String knownWord : KNOWN_WORDS) { + trace("searching for " + knownWord + " with " + + Thread.currentThread()); + ResultSet rs = stat.executeQuery("SELECT * FROM " + + prefix + "_SEARCH('" + knownWord + + "', 0, 0)"); + assertTrue(rs.next()); + } } - prep.setInt(1, x); - prep.setString(2, buff.toString()); - prep.execute(); - x++; - for (String knownWord : KNOWN_WORDS) { - trace("searching for " + knownWord + " with " + - Thread.currentThread()); - ResultSet rs = stat.executeQuery("SELECT * FROM " + - prefix + "_SEARCH('" + knownWord + - "', 0, 0)"); - assertTrue(rs.next()); + trace("closing connection"); + if (!config.memory) { + conn.close(); } + trace("completed thread " + Thread.currentThread()); } - trace("closing connection"); - if (!config.memory) { - conn.close(); - } - trace("completed thread " + Thread.currentThread()); - } - }; - } - for (Task t : task) { - t.execute(); - } - trace("sleeping"); - Thread.sleep(1000); - - trace("setting stop to true"); - for (Task t : task) { - trace("joining " + t); - t.get(); - trace("done joining " + t); + }; + } + for (Task t : task) { + t.execute(); + } + trace("sleeping"); + Thread.sleep(1000); + + trace("setting stop to true"); + for (Task t : task) { + trace("joining " + t); + t.get(); + trace("done joining " + t); + } + } finally { + close(connList); } - close(connList); } private void testStreamLob() throws SQLException { deleteDb("fullText"); Connection conn = getConnection("fullText"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT " + - "FOR \"org.h2.fulltext.FullText.init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT FOR 'org.h2.fulltext.FullText.init'"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, DATA CLOB)"); FullText.createIndex(conn, "PUBLIC", "TEST", null); conn.setAutoCommit(false); @@ -362,8 +361,7 @@ private void testCreateDropNative() throws SQLException { FileUtils.deleteRecursive(getBaseDir() + "/fullText", false); Connection conn = getConnection("fullText"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT " + - "FOR \"org.h2.fulltext.FullText.init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS FT_INIT FOR 'org.h2.fulltext.FullText.init'"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); for (int i = 0; i < 10; i++) { FullText.createIndex(conn, "PUBLIC", "TEST", null); @@ -444,15 +442,26 @@ private void testPerformance(boolean lucene) throws SQLException { initFullText(stat, lucene); stat.execute("DROP TABLE IF EXISTS TEST"); stat.execute( - "CREATE TABLE TEST AS SELECT * FROM INFORMATION_SCHEMA.HELP"); - stat.execute("ALTER TABLE TEST ALTER COLUMN ID INT NOT NULL"); - stat.execute("CREATE PRIMARY KEY ON TEST(ID)"); - long time = System.currentTimeMillis(); + "CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY," + + " SECTION VARCHAR, TOPIC VARCHAR, SYNTAX VARCHAR, TEXT VARCHAR)"); + PreparedStatement ps = conn.prepareStatement( + "INSERT INTO TEST(SECTION, TOPIC, SYNTAX, TEXT) VALUES (?, ?, ?, ?)"); + try (ResultSet rs = stat.executeQuery("HELP \"\"")) { + while (rs.next()) { + for (int i = 1; i <= 4; i++) { + ps.setString(i, rs.getString(i)); + } + ps.addBatch(); + } + } + ps.executeUpdate(); + long time = System.nanoTime(); stat.execute("CALL " + prefix + "_CREATE_INDEX('PUBLIC', 'TEST', NULL)"); - println("create " + prefix + ": " + (System.currentTimeMillis() - time)); + println("create " + prefix + ": " + + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); PreparedStatement prep = conn.prepareStatement( "SELECT * FROM " + prefix + "_SEARCH(?, 0, 0)"); - time = System.currentTimeMillis(); + time = System.nanoTime(); ResultSet rs = stat.executeQuery("SELECT TEXT FROM TEST"); int count = 0; while (rs.next()) { @@ -473,7 +482,7 @@ private void testPerformance(boolean lucene) throws SQLException { } } println("search " + prefix + ": " + - (System.currentTimeMillis() - time) + " count: " + count); + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time) + " count: " + count); stat.execute("CALL " + prefix + "_DROP_ALL()"); conn.close(); } @@ -483,13 +492,11 @@ private void test(boolean lucene, String dataType) throws SQLException { return; } deleteDb("fullText"); - ArrayList connList = new ArrayList(); - Connection conn = getConnection("fullText", connList); + Connection conn = getConnection("fullText"); String prefix = lucene ? "FTL_" : "FT_"; Statement stat = conn.createStatement(); String className = lucene ? "FullTextLucene" : "FullText"; - stat.execute("CREATE ALIAS IF NOT EXISTS " + - prefix + "INIT FOR \"org.h2.fulltext." + className + ".init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS " + prefix + "INIT FOR 'org.h2.fulltext." + className + ".init'"); stat.execute("CALL " + prefix + "INIT()"); stat.execute("DROP TABLE IF EXISTS TEST"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME " + dataType + ")"); @@ -582,15 +589,15 @@ private void test(boolean lucene, String dataType) throws SQLException { if (!config.memory) { conn.close(); + conn = getConnection("fullText"); } - conn = getConnection("fullText", connList); stat = conn.createStatement(); stat.executeQuery("SELECT * FROM " + prefix + "SEARCH('World', 0, 0)"); stat.execute("CALL " + prefix + "DROP_ALL()"); - close(connList); + conn.close(); } private void testDropIndex(boolean lucene) throws SQLException { @@ -617,7 +624,6 @@ private void testDropIndex(boolean lucene) throws SQLException { "_CREATE_INDEX('PUBLIC', 'TEST', 'NAME1, NAME2')"); stat.execute("UPDATE TEST SET NAME2=NULL WHERE ID=1"); stat.execute("UPDATE TEST SET NAME2='Hello World' WHERE ID=1"); - conn.close(); conn.close(); FileUtils.deleteRecursive(getBaseDir() + "/fullTextDropIndex", false); @@ -627,8 +633,7 @@ private static void initFullText(Statement stat, boolean lucene) throws SQLException { String prefix = lucene ? "FTL" : "FT"; String className = lucene ? "FullTextLucene" : "FullText"; - stat.execute("CREATE ALIAS IF NOT EXISTS " + prefix + - "_INIT FOR \"org.h2.fulltext." + className + ".init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS " + prefix + "_INIT FOR 'org.h2.fulltext." + className + ".init'"); stat.execute("CALL " + prefix + "_INIT()"); } } diff --git a/h2/src/test/org/h2/test/db/TestFunctionOverload.java b/h2/src/test/org/h2/test/db/TestFunctionOverload.java index 9e438b55c5..fe598c665f 100644 --- a/h2/src/test/org/h2/test/db/TestFunctionOverload.java +++ b/h2/src/test/org/h2/test/db/TestFunctionOverload.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -13,13 +13,14 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests for overloaded user defined functions. * * @author Gary Tong */ -public class TestFunctionOverload extends TestBase { +public class TestFunctionOverload extends TestDb { private static final String ME = TestFunctionOverload.class.getName(); private Connection conn; @@ -31,7 +32,7 @@ public class TestFunctionOverload extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -51,12 +52,12 @@ public void test() throws SQLException { private void testOverloadError() throws SQLException { Statement stat = conn.createStatement(); assertThrows(ErrorCode.METHODS_MUST_HAVE_DIFFERENT_PARAMETER_COUNTS_2, stat). - execute("create alias overloadError for \"" + ME + ".overloadError\""); + execute("create alias overloadError for '" + ME + ".overloadError'"); } private void testControl() throws SQLException { Statement stat = conn.createStatement(); - stat.execute("create alias overload0 for \"" + ME + ".overload0\""); + stat.execute("create alias overload0 for '" + ME + ".overload0'"); ResultSet rs = stat.executeQuery("select overload0() from dual"); assertTrue(rs.next()); assertEquals("0 args", 0, rs.getInt(1)); @@ -68,7 +69,7 @@ private void testControl() throws SQLException { private void testOverload() throws SQLException { Statement stat = conn.createStatement(); - stat.execute("create alias overload1or2 for \"" + ME + ".overload1or2\""); + stat.execute("create alias overload1or2 for '" + ME + ".overload1or2'"); ResultSet rs = stat.executeQuery("select overload1or2(1) from dual"); rs.next(); assertEquals("1 arg", 1, rs.getInt(1)); @@ -79,17 +80,16 @@ private void testOverload() throws SQLException { assertFalse(rs.next()); rs = meta.getProcedures(null, null, "OVERLOAD1OR2"); rs.next(); - assertEquals(1, rs.getInt("NUM_INPUT_PARAMS")); + assertEquals("OVERLOAD1OR2_1", rs.getString("SPECIFIC_NAME")); rs.next(); - assertEquals(2, rs.getInt("NUM_INPUT_PARAMS")); + assertEquals("OVERLOAD1OR2_2", rs.getString("SPECIFIC_NAME")); assertFalse(rs.next()); } private void testOverloadNamedArgs() throws SQLException { Statement stat = conn.createStatement(); - stat.execute("create alias overload1or2Named for \"" + ME + - ".overload1or2(int)\""); + stat.execute("create alias overload1or2Named for '" + ME + ".overload1or2(int)'"); ResultSet rs = stat.executeQuery("select overload1or2Named(1) from dual"); assertTrue("First Row", rs.next()); @@ -104,8 +104,7 @@ private void testOverloadNamedArgs() throws SQLException { private void testOverloadWithConnection() throws SQLException { Statement stat = conn.createStatement(); - stat.execute("create alias overload1or2WithConn for \"" + ME + - ".overload1or2WithConn\""); + stat.execute("create alias overload1or2WithConn for '" + ME + ".overload1or2WithConn'"); ResultSet rs = stat.executeQuery("select overload1or2WithConn(1) from dual"); rs.next(); diff --git a/h2/src/test/org/h2/test/db/TestFunctions.java b/h2/src/test/org/h2/test/db/TestFunctions.java index 31c31b7d1c..dd601a5050 100644 --- a/h2/src/test/org/h2/test/db/TestFunctions.java +++ b/h2/src/test/org/h2/test/db/TestFunctions.java @@ -1,11 +1,14 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; @@ -22,11 +25,22 @@ import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Timestamp; import java.sql.Types; +import java.text.DecimalFormatSymbols; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.time.LocalDate; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoUnit; +import java.time.temporal.TemporalQueries; +import java.time.temporal.WeekFields; import java.util.ArrayList; import java.util.Calendar; +import java.util.Collections; import java.util.Currency; import java.util.Date; +import java.util.GregorianCalendar; import java.util.HashMap; import java.util.Locale; import java.util.Properties; @@ -37,17 +51,28 @@ import org.h2.api.AggregateFunction; import org.h2.api.ErrorCode; import org.h2.engine.Constants; +import org.h2.engine.SessionLocal; +import org.h2.expression.function.ToCharFunction; +import org.h2.expression.function.ToCharFunction.Capitalization; +import org.h2.jdbc.JdbcConnection; +import org.h2.mode.ToDateParser; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.test.ap.TestAnnotationProcessor; import org.h2.tools.SimpleResultSet; import org.h2.util.IOUtils; -import org.h2.util.New; +import org.h2.util.StringUtils; +import org.h2.value.TypeInfo; import org.h2.value.Value; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; /** * Tests for user defined functions and aggregates. */ -public class TestFunctions extends TestBase implements AggregateFunction { +public class TestFunctions extends TestDb implements AggregateFunction { static int count; @@ -57,16 +82,27 @@ public class TestFunctions extends TestBase implements AggregateFunction { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + // Locale.setDefault(Locale.GERMANY); + // Locale.setDefault(Locale.US); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { deleteDb("functions"); - testDataType(); + testOverrideAlias(); + deleteDb("functions"); + if (!config.networked) { + JdbcConnection conn = (JdbcConnection) getConnection("functions"); + SessionLocal session = (SessionLocal) conn.getSession(); + testToDate(session); + testToDateException(session); + conn.close(); + } testVersion(); testFunctionTable(); testFunctionTableVarArgs(); + testArray(); testArrayParameters(); testDefaultConnection(); testFunctionInSchema(); @@ -79,39 +115,25 @@ public void test() throws Exception { testDeterministic(); testTransactionId(); testPrecision(); - testMathFunctions(); testVarArgs(); testAggregate(); testAggregateType(); testFunctions(); + testDateTimeFunctions(); testFileRead(); testValue(); testNvl2(); - testConcatWs(); - testTruncate(); testToCharFromDateTime(); testToCharFromNumber(); testToCharFromText(); - testTranslate(); - testGenerateSeries(); - // TODO - // testCachingOfDeterministicFunctionAlias(); + testFileWrite(); + testThatCurrentTimestampIsSane(); + testThatCurrentTimestampStaysTheSameWithinATransaction(); + testThatCurrentTimestampUpdatesOutsideATransaction(); + testAnnotationProcessorsOutput(); + testSignal(); deleteDb("functions"); - FileUtils.deleteRecursive(TEMP_DIR, true); - } - - private void testDataType() throws SQLException { - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - assertEquals(Types.DOUBLE, stat.executeQuery( - "select radians(x) from dual"). - getMetaData().getColumnType(1)); - assertEquals(Types.DOUBLE, stat.executeQuery( - "select power(10, 2*x) from dual"). - getMetaData().getColumnType(1)); - stat.close(); - conn.close(); } private void testVersion() throws SQLException { @@ -121,7 +143,7 @@ private void testVersion() throws SQLException { ResultSet rs = stat.executeQuery(query); assertTrue(rs.next()); String version = rs.getString(1); - assertEquals(Constants.getVersion(), version); + assertEquals(Constants.VERSION, version); assertFalse(rs.next()); rs.close(); stat.close(); @@ -131,18 +153,26 @@ private void testVersion() throws SQLException { private void testFunctionTable() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("create alias simple_function_table for \"" + - TestFunctions.class.getName() + ".simpleFunctionTable\""); + stat.execute("create alias simple_function_table for '" + + TestFunctions.class.getName() + ".simpleFunctionTable'"); + stat.execute("create alias function_table_with_parameter for '" + + TestFunctions.class.getName() + ".functionTableWithParameter'"); stat.execute("select * from simple_function_table() " + "where a>0 and b in ('x', 'y')"); + PreparedStatement prep = conn.prepareStatement("call function_table_with_parameter(?)"); + prep.setInt(1, 10); + ResultSet rs = prep.executeQuery(); + assertTrue(rs.next()); + assertEquals(10, rs.getInt(1)); + assertEquals("X", rs.getString(2)); conn.close(); } private void testFunctionTableVarArgs() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("create alias varargs_function_table for \"" + TestFunctions.class.getName() - + ".varArgsFunctionTable\""); + stat.execute("create alias varargs_function_table for '" + TestFunctions.class.getName() + + ".varArgsFunctionTable'"); ResultSet rs = stat.executeQuery("select * from varargs_function_table(1,2,3,5,8,13)"); for (int i : new int[] { 1, 2, 3, 5, 8, 13 }) { assertTrue(rs.next()); @@ -158,7 +188,7 @@ private void testFunctionTableVarArgs() throws SQLException { * @param conn the connection * @return a result set */ - public static ResultSet simpleFunctionTable(Connection conn) { + public static ResultSet simpleFunctionTable(@SuppressWarnings("unused") Connection conn) { SimpleResultSet result = new SimpleResultSet(); result.addColumn("A", Types.INTEGER, 0, 0); result.addColumn("B", Types.CHAR, 0, 0); @@ -166,6 +196,21 @@ public static ResultSet simpleFunctionTable(Connection conn) { return result; } + /** + * This method is called via reflection from the database. + * + * @param conn the connection + * @param p the parameter + * @return a result set + */ + public static ResultSet functionTableWithParameter(@SuppressWarnings("unused") Connection conn, int p) { + SimpleResultSet result = new SimpleResultSet(); + result.addColumn("A", Types.INTEGER, 0, 0); + result.addColumn("B", Types.CHAR, 0, 0); + result.addRow(p, 'X'); + return result; + } + /** * This method is called via reflection from the database. * @@ -246,62 +291,15 @@ private void testNvl2() throws SQLException { conn.close(); } - private void testConcatWs() throws SQLException { - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - - String createSQL = "CREATE TABLE testConcat(id BIGINT, txt1 " + - "varchar, txt2 varchar, txt3 varchar);"; - stat.execute(createSQL); - stat.execute("insert into testConcat(id, txt1, txt2, txt3) " + - "values(1, 'test1', 'test2', 'test3')"); - stat.execute("insert into testConcat(id, txt1, txt2, txt3) " + - "values(2, 'test1', 'test2', null)"); - stat.execute("insert into testConcat(id, txt1, txt2, txt3) " + - "values(3, 'test1', null, null)"); - stat.execute("insert into testConcat(id, txt1, txt2, txt3) " + - "values(4, null, 'test2', null)"); - stat.execute("insert into testConcat(id, txt1, txt2, txt3) " + - "values(5, null, null, null)"); - - String query = "SELECT concat_ws('_',txt1, txt2, txt3), txt1 " + - "FROM testConcat order by id asc"; - ResultSet rs = stat.executeQuery(query); - rs.next(); - String actual = rs.getString(1); - assertEquals("test1_test2_test3", actual); - rs.next(); - actual = rs.getString(1); - assertEquals("test1_test2", actual); - rs.next(); - actual = rs.getString(1); - assertEquals("test1", actual); - rs.next(); - actual = rs.getString(1); - assertEquals("test2", actual); - rs.next(); - actual = rs.getString(1); - assertEquals("", actual); - rs.close(); - - rs = stat.executeQuery("select concat_ws(null,null,null)"); - rs.next(); - assertNull(rs.getObject(1)); - - stat.execute("drop table testConcat"); - conn.close(); - } - private void testValue() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); ResultSet rs; - stat.execute("create alias TO_CHAR_2 for \"" + - getClass().getName() + ".toChar\""); + stat.execute("create alias TO_CHAR_2 for '" + getClass().getName() + ".toChar'"); rs = stat.executeQuery( "call TO_CHAR_2(TIMESTAMP '2001-02-03 04:05:06', 'format')"); rs.next(); - assertEquals("2001-02-03 04:05:06.0", rs.getString(1)); + assertEquals("2001-02-03 04:05:06", rs.getString(1)); stat.execute("drop alias TO_CHAR_2"); conn.close(); } @@ -316,14 +314,13 @@ public static Value toChar(Value... args) { if (args.length == 0) { return null; } - return args[0].convertTo(Value.STRING); + return args[0].convertTo(TypeInfo.TYPE_VARCHAR); } private void testDefaultConnection() throws SQLException { Connection conn = getConnection("functions;DEFAULT_CONNECTION=TRUE"); Statement stat = conn.createStatement(); - stat.execute("create alias test for \""+ - TestFunctions.class.getName()+".testDefaultConn\""); + stat.execute("create alias test for '" + TestFunctions.class.getName() + ".testDefaultConn'"); stat.execute("call test()"); stat.execute("drop alias test"); conn.close(); @@ -344,12 +341,12 @@ private void testFunctionInSchema() throws SQLException { stat.execute("create alias schema2.func as 'int x() { return 1; }'"); stat.execute("create view test as select schema2.func()"); ResultSet rs; - rs = stat.executeQuery("select * from information_schema.views"); + rs = stat.executeQuery("select * from information_schema.views where table_schema = 'PUBLIC'"); rs.next(); - assertTrue(rs.getString("VIEW_DEFINITION").contains("SCHEMA2.FUNC")); + assertContains(rs.getString("VIEW_DEFINITION"), "\"SCHEMA2\".\"FUNC\""); stat.execute("drop view test"); - stat.execute("drop schema schema2"); + stat.execute("drop schema schema2 cascade"); conn.close(); } @@ -385,8 +382,8 @@ private void testSource() throws SQLException { ResultSet rs; stat.execute("create force alias sayHi as 'String test(String name) {\n" + "return \"Hello \" + name;\n}'"); - rs = stat.executeQuery("SELECT ALIAS_NAME " + - "FROM INFORMATION_SCHEMA.FUNCTION_ALIASES"); + rs = stat.executeQuery("SELECT ROUTINE_NAME " + + "FROM INFORMATION_SCHEMA.ROUTINES"); rs.next(); assertEquals("SAY" + "HI", rs.getString(1)); rs = stat.executeQuery("call sayHi('Joe')"); @@ -408,10 +405,9 @@ private void testDynamicArgumentAndReturn() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); ResultSet rs; - stat.execute("create alias dynamic deterministic for \"" + - getClass().getName() + ".dynamic\""); + stat.execute("create alias dynamic deterministic for '" + getClass().getName() + ".dynamic'"); setCount(0); - rs = stat.executeQuery("call dynamic(('a', 1))[0]"); + rs = stat.executeQuery("call dynamic(ARRAY['a', '1'])[1]"); rs.next(); String a = rs.getString(1); assertEquals("a1", a); @@ -424,8 +420,7 @@ private void testUUID() throws SQLException { Statement stat = conn.createStatement(); ResultSet rs; - stat.execute("create alias xorUUID for \""+ - getClass().getName()+".xorUUID\""); + stat.execute("create alias xorUUID for '" + getClass().getName() + ".xorUUID'"); setCount(0); rs = stat.executeQuery("call xorUUID(random_uuid(), random_uuid())"); rs.next(); @@ -441,8 +436,7 @@ private void testDeterministic() throws SQLException { Statement stat = conn.createStatement(); ResultSet rs; - stat.execute("create alias getCount for \""+ - getClass().getName()+".getCount\""); + stat.execute("create alias getCount for '" + getClass().getName() + ".getCount'"); setCount(0); rs = stat.executeQuery("select getCount() from system_range(1, 2)"); rs.next(); @@ -451,8 +445,7 @@ private void testDeterministic() throws SQLException { assertEquals(1, rs.getInt(1)); stat.execute("drop alias getCount"); - stat.execute("create alias getCount deterministic for \""+ - getClass().getName()+".getCount\""); + stat.execute("create alias getCount deterministic for '" + getClass().getName() + ".getCount'"); setCount(0); rs = stat.executeQuery("select getCount() from system_range(1, 2)"); rs.next(); @@ -461,11 +454,10 @@ private void testDeterministic() throws SQLException { assertEquals(0, rs.getInt(1)); stat.execute("drop alias getCount"); rs = stat.executeQuery("SELECT * FROM " + - "INFORMATION_SCHEMA.FUNCTION_ALIASES " + - "WHERE UPPER(ALIAS_NAME) = 'GET' || 'COUNT'"); + "INFORMATION_SCHEMA.ROUTINES " + + "WHERE UPPER(ROUTINE_NAME) = 'GET' || 'COUNT'"); assertFalse(rs.next()); - stat.execute("create alias reverse deterministic for \""+ - getClass().getName()+".reverse\""); + stat.execute("create alias reverse deterministic for '" + getClass().getName() + ".reverse'"); rs = stat.executeQuery("select reverse(x) from system_range(700, 700)"); rs.next(); assertEquals("007", rs.getString(1)); @@ -493,7 +485,7 @@ private void testTransactionId() throws SQLException { stat.execute("delete from test"); rs = stat.executeQuery("call transaction_id()"); rs.next(); - assertTrue(rs.getString(1) != null); + assertNotNull(rs.getString(1)); stat.execute("drop table test"); conn.close(); } @@ -501,42 +493,26 @@ private void testTransactionId() throws SQLException { private void testPrecision() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("create alias no_op for \""+getClass().getName()+".noOp\""); + stat.execute("create alias no_op for '" + getClass().getName() + ".noOp'"); PreparedStatement prep = conn.prepareStatement( "select * from dual where no_op(1.6)=?"); prep.setBigDecimal(1, new BigDecimal("1.6")); ResultSet rs = prep.executeQuery(); assertTrue(rs.next()); - stat.execute("create aggregate agg_sum for \""+getClass().getName()+"\""); + stat.execute("create aggregate agg_sum for '" + getClass().getName() + '\''); rs = stat.executeQuery("select agg_sum(1), sum(1.6) from dual"); rs.next(); assertEquals(1, rs.getMetaData().getScale(2)); - assertEquals(32767, rs.getMetaData().getScale(1)); - stat.executeQuery("select * from information_schema.function_aliases"); - conn.close(); - } - - private void testMathFunctions() throws SQLException { - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("CALL SINH(50)"); - assertTrue(rs.next()); - assertEquals(Math.sinh(50), rs.getDouble(1)); - rs = stat.executeQuery("CALL COSH(50)"); - assertTrue(rs.next()); - assertEquals(Math.cosh(50), rs.getDouble(1)); - rs = stat.executeQuery("CALL TANH(50)"); - assertTrue(rs.next()); - assertEquals(Math.tanh(50), rs.getDouble(1)); + assertEquals(ValueNumeric.MAXIMUM_SCALE / 2, rs.getMetaData().getScale(1)); + stat.executeQuery("select * from information_schema.routines"); conn.close(); } private void testVarArgs() throws SQLException { Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS mean FOR \"" + - getClass().getName() + ".mean\""); + stat.execute("CREATE ALIAS mean FOR '" + getClass().getName() + ".mean'"); ResultSet rs = stat.executeQuery( "select mean(), mean(10), mean(10, 20), mean(10, 20, 30)"); rs.next(); @@ -545,8 +521,7 @@ private void testVarArgs() throws SQLException { assertEquals(15.0, rs.getDouble(3)); assertEquals(20.0, rs.getDouble(4)); - stat.execute("CREATE ALIAS mean2 FOR \"" + - getClass().getName() + ".mean2\""); + stat.execute("CREATE ALIAS mean2 FOR '" + getClass().getName() + ".mean2'"); rs = stat.executeQuery( "select mean2(), mean2(10), mean2(10, 20)"); rs.next(); @@ -557,32 +532,31 @@ private void testVarArgs() throws SQLException { DatabaseMetaData meta = conn.getMetaData(); rs = meta.getProcedureColumns(null, null, "MEAN2", null); assertTrue(rs.next()); - assertEquals("P0", rs.getString("COLUMN_NAME")); + assertEquals("RESULT", rs.getString("COLUMN_NAME")); assertTrue(rs.next()); assertEquals("FUNCTIONS", rs.getString("PROCEDURE_CAT")); assertEquals("PUBLIC", rs.getString("PROCEDURE_SCHEM")); assertEquals("MEAN2", rs.getString("PROCEDURE_NAME")); - assertEquals("P2", rs.getString("COLUMN_NAME")); + assertEquals("P1", rs.getString("COLUMN_NAME")); assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt("COLUMN_TYPE")); - assertEquals("OTHER", rs.getString("TYPE_NAME")); - assertEquals(Integer.MAX_VALUE, rs.getInt("PRECISION")); - assertEquals(Integer.MAX_VALUE, rs.getInt("LENGTH")); + assertEquals("DOUBLE PRECISION ARRAY", rs.getString("TYPE_NAME")); + assertEquals(Constants.MAX_ARRAY_CARDINALITY, rs.getInt("PRECISION")); + assertEquals(Constants.MAX_ARRAY_CARDINALITY, rs.getInt("LENGTH")); assertEquals(0, rs.getInt("SCALE")); - assertEquals(DatabaseMetaData.columnNullable, + assertEquals(DatabaseMetaData.columnNullableUnknown, rs.getInt("NULLABLE")); - assertEquals("", rs.getString("REMARKS")); + assertNull(rs.getString("REMARKS")); assertEquals(null, rs.getString("COLUMN_DEF")); assertEquals(0, rs.getInt("SQL_DATA_TYPE")); assertEquals(0, rs.getInt("SQL_DATETIME_SUB")); assertEquals(0, rs.getInt("CHAR_OCTET_LENGTH")); assertEquals(1, rs.getInt("ORDINAL_POSITION")); - assertEquals("YES", rs.getString("IS_NULLABLE")); - assertEquals("MEAN2", rs.getString("SPECIFIC_NAME")); + assertEquals("", rs.getString("IS_NULLABLE")); + assertEquals("MEAN2_1", rs.getString("SPECIFIC_NAME")); assertFalse(rs.next()); - stat.execute("CREATE ALIAS printMean FOR \"" + - getClass().getName() + ".printMean\""); + stat.execute("CREATE ALIAS printMean FOR '" + getClass().getName() + ".printMean'"); rs = stat.executeQuery( "select printMean('A'), printMean('A', 10), " + "printMean('BB', 10, 20), printMean ('CCC', 10, 20, 30)"); @@ -619,16 +593,52 @@ private void testFileRead() throws Exception { InputStreamReader r = new InputStreamReader(FileUtils.newInputStream(fileName)); String ps2 = IOUtils.readStringAndClose(r, -1); assertEquals(ps, ps2); + FileUtils.delete(fileName); + // Test classpath prefix using this test class as input + fileName = "/" + this.getClass().getName().replaceAll("\\.", "/") + ".class"; + rs = stat.executeQuery("SELECT LENGTH(FILE_READ('classpath:" + fileName + "')) LEN"); + rs.next(); + int fileSize = rs.getInt(1); + assertTrue(fileSize > 0); + conn.close(); + } + + + private void testFileWrite() throws Exception { + Connection conn = getConnection("functions"); + Statement stat = conn.createStatement(); + // Copy data into clob table + stat.execute("DROP TABLE TEST IF EXISTS"); + PreparedStatement pst = conn.prepareStatement( + "CREATE TABLE TEST(data clob) AS SELECT ? " + "data"); + Properties prop = System.getProperties(); + ByteArrayOutputStream os = new ByteArrayOutputStream(prop.size()); + prop.store(os, ""); + pst.setBinaryStream(1, new ByteArrayInputStream(os.toByteArray())); + pst.execute(); + os.close(); + String fileName = new File(getBaseDir(), "test.txt").getPath(); + FileUtils.delete(fileName); + ResultSet rs = stat.executeQuery("SELECT FILE_WRITE(data, " + + StringUtils.quoteStringSQL(fileName) + ") len from test"); + assertTrue(rs.next()); + assertEquals(os.size(), rs.getInt(1)); + InputStreamReader r = new InputStreamReader(FileUtils.newInputStream(fileName)); + // Compare expected content with written file content + String ps2 = IOUtils.readStringAndClose(r, -1); + assertEquals(os.toString(), ps2); conn.close(); FileUtils.delete(fileName); } + + /** * This median implementation keeps all objects in memory. */ public static class MedianString implements AggregateFunction { - private final ArrayList list = New.arrayList(); + private final ArrayList list = new ArrayList<>(); @Override public void add(Object value) { @@ -637,6 +647,7 @@ public void add(Object value) { @Override public Object getResult() { + Collections.sort(list); return list.get(list.size() / 2); } @@ -645,11 +656,6 @@ public int getType(int[] inputType) { return Types.VARCHAR; } - @Override - public void init(Connection conn) { - // nothing to do - } - } /** @@ -657,7 +663,7 @@ public void init(Connection conn) { */ public static class MedianStringType implements Aggregate { - private final ArrayList list = New.arrayList(); + private final ArrayList list = new ArrayList<>(); @Override public void add(Object value) { @@ -671,12 +677,7 @@ public Object getResult() { @Override public int getInternalType(int[] inputTypes) throws SQLException { - return Value.STRING; - } - - @Override - public void init(Connection conn) { - // nothing to do + return Value.VARCHAR; } } @@ -685,14 +686,26 @@ private void testAggregateType() throws SQLException { deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("CREATE AGGREGATE MEDIAN FOR \"" + - MedianStringType.class.getName() + "\""); - stat.execute("CREATE AGGREGATE IF NOT EXISTS MEDIAN FOR \"" + - MedianStringType.class.getName() + "\""); + stat.execute("CREATE AGGREGATE SIMPLE_MEDIAN FOR '" + MedianStringType.class.getName() + '\''); + stat.execute("CREATE AGGREGATE IF NOT EXISTS SIMPLE_MEDIAN FOR '" + MedianStringType.class.getName() + '\''); ResultSet rs = stat.executeQuery( - "SELECT MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); + "SELECT SIMPLE_MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); rs.next(); assertEquals("5", rs.getString(1)); + rs = stat.executeQuery( + "SELECT SIMPLE_MEDIAN(X) FILTER (WHERE X > 2) FROM SYSTEM_RANGE(1, 9)"); + rs.next(); + assertEquals("6", rs.getString(1)); + rs = stat.executeQuery("SELECT SIMPLE_MEDIAN(X) OVER () FROM SYSTEM_RANGE(1, 9)"); + for (int i = 1; i < 9; i++) { + assertTrue(rs.next()); + assertEquals("5", rs.getString(1)); + } + rs = stat.executeQuery("SELECT SIMPLE_MEDIAN(X) OVER (PARTITION BY X) FROM SYSTEM_RANGE(1, 9)"); + for (int i = 1; i < 9; i++) { + assertTrue(rs.next()); + assertEquals(Integer.toString(i), rs.getString(1)); + } conn.close(); if (config.memory) { @@ -701,22 +714,22 @@ private void testAggregateType() throws SQLException { conn = getConnection("functions"); stat = conn.createStatement(); - stat.executeQuery("SELECT MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); + stat.executeQuery("SELECT SIMPLE_MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); DatabaseMetaData meta = conn.getMetaData(); - rs = meta.getProcedures(null, null, "MEDIAN"); + rs = meta.getProcedures(null, null, "SIMPLE_MEDIAN"); assertTrue(rs.next()); assertFalse(rs.next()); rs = stat.executeQuery("SCRIPT"); boolean found = false; while (rs.next()) { String sql = rs.getString(1); - if (sql.contains("MEDIAN")) { + if (sql.contains("SIMPLE_MEDIAN")) { found = true; } } assertTrue(found); - stat.execute("DROP AGGREGATE MEDIAN"); - stat.execute("DROP AGGREGATE IF EXISTS MEDIAN"); + stat.execute("DROP AGGREGATE SIMPLE_MEDIAN"); + stat.execute("DROP AGGREGATE IF EXISTS SIMPLE_MEDIAN"); conn.close(); } @@ -724,14 +737,25 @@ private void testAggregate() throws SQLException { deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - stat.execute("CREATE AGGREGATE MEDIAN FOR \"" + - MedianString.class.getName() + "\""); - stat.execute("CREATE AGGREGATE IF NOT EXISTS MEDIAN FOR \"" + - MedianString.class.getName() + "\""); - ResultSet rs = stat.executeQuery( - "SELECT MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); + stat.execute("CREATE AGGREGATE SIMPLE_MEDIAN FOR '" + MedianString.class.getName() + '\''); + stat.execute("CREATE AGGREGATE IF NOT EXISTS SIMPLE_MEDIAN FOR '" + MedianString.class.getName() + '\''); + stat.execute("CREATE SCHEMA S1"); + stat.execute("CREATE AGGREGATE S1.MEDIAN2 FOR '" + MedianString.class.getName() + '\''); + ResultSet rs = stat.executeQuery("SELECT SIMPLE_MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); rs.next(); assertEquals("5", rs.getString(1)); + assertThrows(ErrorCode.FUNCTION_NOT_FOUND_1, stat).executeQuery("SELECT MEDIAN2(X) FROM SYSTEM_RANGE(1, 9)"); + rs = stat.executeQuery("SELECT S1.MEDIAN2(X) FROM SYSTEM_RANGE(1, 9)"); + rs.next(); + assertEquals("5", rs.getString(1)); + + stat.execute("CREATE TABLE DATA(V INT)"); + stat.execute("INSERT INTO DATA VALUES (1), (3), (2), (1), (1), (2), (1), (1), (1), (1), (1)"); + rs = stat.executeQuery("SELECT SIMPLE_MEDIAN(V), SIMPLE_MEDIAN(DISTINCT V) FROM DATA"); + rs.next(); + assertEquals("1", rs.getString(1)); + assertEquals("2", rs.getString(2)); + conn.close(); if (config.memory) { @@ -740,22 +764,32 @@ private void testAggregate() throws SQLException { conn = getConnection("functions"); stat = conn.createStatement(); - stat.executeQuery("SELECT MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); + stat.executeQuery("SELECT SIMPLE_MEDIAN(X) FROM SYSTEM_RANGE(1, 9)"); DatabaseMetaData meta = conn.getMetaData(); - rs = meta.getProcedures(null, null, "MEDIAN"); + rs = meta.getProcedures(null, null, "SIMPLE_MEDIAN"); + assertTrue(rs.next()); + assertEquals("PUBLIC", rs.getString("PROCEDURE_SCHEM")); + assertFalse(rs.next()); + rs = meta.getProcedures(null, null, "MEDIAN2"); assertTrue(rs.next()); + assertEquals("S1", rs.getString("PROCEDURE_SCHEM")); assertFalse(rs.next()); rs = stat.executeQuery("SCRIPT"); - boolean found = false; + boolean found1 = false, found2 = false; while (rs.next()) { String sql = rs.getString(1); - if (sql.contains("MEDIAN")) { - found = true; + if (sql.contains("\"PUBLIC\".\"SIMPLE_MEDIAN\"")) { + found1 = true; + } else if (sql.contains("\"S1\".\"MEDIAN2\"")) { + found2 = true; } } - assertTrue(found); - stat.execute("DROP AGGREGATE MEDIAN"); - stat.execute("DROP AGGREGATE IF EXISTS MEDIAN"); + assertTrue(found1); + assertTrue(found2); + stat.execute("DROP AGGREGATE SIMPLE_MEDIAN"); + stat.execute("DROP AGGREGATE IF EXISTS SIMPLE_MEDIAN"); + stat.execute("DROP AGGREGATE S1.MEDIAN2"); + stat.execute("DROP SCHEMA S1"); conn.close(); } @@ -768,8 +802,7 @@ private void testFunctions() throws SQLException { assertCallResult("1", stat, "abs(1)"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); - stat.execute("CREATE ALIAS ADD_ROW FOR \"" + - getClass().getName() + ".addRow\""); + stat.execute("CREATE ALIAS ADD_ROW FOR '" + getClass().getName() + ".addRow'"); ResultSet rs; rs = stat.executeQuery("CALL ADD_ROW(1, 'Hello')"); rs.next(); @@ -783,37 +816,36 @@ private void testFunctions() throws SQLException { DatabaseMetaData meta = conn.getMetaData(); rs = meta.getProcedureColumns(null, null, "ADD_ROW", null); assertTrue(rs.next()); - assertEquals("P0", rs.getString("COLUMN_NAME")); + assertEquals("RESULT", rs.getString("COLUMN_NAME")); assertTrue(rs.next()); assertEquals("FUNCTIONS", rs.getString("PROCEDURE_CAT")); assertEquals("PUBLIC", rs.getString("PROCEDURE_SCHEM")); assertEquals("ADD_ROW", rs.getString("PROCEDURE_NAME")); - assertEquals("P2", rs.getString("COLUMN_NAME")); + assertEquals("P1", rs.getString("COLUMN_NAME")); assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt("COLUMN_TYPE")); assertEquals("INTEGER", rs.getString("TYPE_NAME")); - assertEquals(10, rs.getInt("PRECISION")); - assertEquals(10, rs.getInt("LENGTH")); + assertEquals(32, rs.getInt("PRECISION")); + assertEquals(32, rs.getInt("LENGTH")); assertEquals(0, rs.getInt("SCALE")); assertEquals(DatabaseMetaData.columnNoNulls, rs.getInt("NULLABLE")); - assertEquals("", rs.getString("REMARKS")); + assertNull(rs.getString("REMARKS")); assertEquals(null, rs.getString("COLUMN_DEF")); assertEquals(0, rs.getInt("SQL_DATA_TYPE")); assertEquals(0, rs.getInt("SQL_DATETIME_SUB")); assertEquals(0, rs.getInt("CHAR_OCTET_LENGTH")); assertEquals(1, rs.getInt("ORDINAL_POSITION")); - assertEquals("YES", rs.getString("IS_NULLABLE")); - assertEquals("ADD_ROW", rs.getString("SPECIFIC_NAME")); + assertEquals("", rs.getString("IS_NULLABLE")); + assertEquals("ADD_ROW_1", rs.getString("SPECIFIC_NAME")); assertTrue(rs.next()); - assertEquals("P3", rs.getString("COLUMN_NAME")); - assertEquals("VARCHAR", rs.getString("TYPE_NAME")); + assertEquals("P2", rs.getString("COLUMN_NAME")); + assertEquals("CHARACTER VARYING", rs.getString("TYPE_NAME")); assertFalse(rs.next()); stat.executeQuery("CALL ADD_ROW(2, 'World')"); - stat.execute("CREATE ALIAS SELECT_F FOR \"" + - getClass().getName() + ".select\""); - rs = stat.executeQuery("CALL SELECT_F('SELECT * " + + stat.execute("CREATE ALIAS SELECT_F FOR '" + getClass().getName() + ".select'"); + rs = stat.executeQuery("SELECT * FROM SELECT_F('SELECT * " + "FROM TEST ORDER BY ID')"); assertEquals(2, rs.getMetaData().getColumnCount()); rs.next(); @@ -833,26 +865,10 @@ private void testFunctions() throws SQLException { assertEquals("Hello", rs.getString(1)); assertFalse(rs.next()); - rs = stat.executeQuery("SELECT SELECT_F('SELECT * " + - "FROM TEST WHERE ID=' || ID) FROM TEST ORDER BY ID"); - assertEquals(1, rs.getMetaData().getColumnCount()); - rs.next(); - assertEquals("((1, Hello))", rs.getString(1)); - rs.next(); - assertEquals("((2, World))", rs.getString(1)); - assertFalse(rs.next()); - - rs = stat.executeQuery("SELECT SELECT_F('SELECT * " + - "FROM TEST ORDER BY ID') FROM DUAL"); - assertEquals(1, rs.getMetaData().getColumnCount()); - rs.next(); - assertEquals("((1, Hello), (2, World))", rs.getString(1)); - assertFalse(rs.next()); assertThrows(ErrorCode.SYNTAX_ERROR_2, stat). - executeQuery("CALL SELECT_F('ERROR')"); - stat.execute("CREATE ALIAS SIMPLE FOR \"" + - getClass().getName() + ".simpleResultSet\""); - rs = stat.executeQuery("CALL SIMPLE(2, 1, 1, 1, 1, 1, 1, 1)"); + executeQuery("SELECT * FROM SELECT_F('ERROR')"); + stat.execute("CREATE ALIAS SIMPLE FOR '" + getClass().getName() + ".simpleResultSet'"); + rs = stat.executeQuery("SELECT * FROM SIMPLE(2, 1, 1, 1, 1, 1, 1, 1)"); assertEquals(2, rs.getMetaData().getColumnCount()); rs.next(); assertEquals(0, rs.getInt(1)); @@ -869,23 +885,22 @@ private void testFunctions() throws SQLException { assertEquals("Hello", rs.getString(2)); assertFalse(rs.next()); - stat.execute("CREATE ALIAS ARRAY FOR \"" + - getClass().getName() + ".getArray\""); - rs = stat.executeQuery("CALL ARRAY()"); + stat.execute("CREATE ALIAS GET_ARRAY FOR '" + getClass().getName() + ".getArray'"); + rs = stat.executeQuery("CALL GET_ARRAY()"); assertEquals(1, rs.getMetaData().getColumnCount()); rs.next(); Array a = rs.getArray(1); Object[] array = (Object[]) a.getArray(); assertEquals(2, array.length); - assertEquals(0, ((Integer) array[0]).intValue()); + assertEquals("0", (String) array[0]); assertEquals("Hello", (String) array[1]); assertThrows(ErrorCode.INVALID_VALUE_2, a).getArray(1, -1); - assertThrows(ErrorCode.INVALID_VALUE_2, a).getArray(1, 3); + assertEquals(2, ((Object[]) a.getArray(1, 3)).length); assertEquals(0, ((Object[]) a.getArray(1, 0)).length); assertEquals(0, ((Object[]) a.getArray(2, 0)).length); assertThrows(ErrorCode.INVALID_VALUE_2, a).getArray(0, 0); assertThrows(ErrorCode.INVALID_VALUE_2, a).getArray(3, 0); - HashMap> map = New.hashMap(); + HashMap> map = new HashMap<>(); assertEquals(0, ((Object[]) a.getArray(1, 0, map)).length); assertEquals(2, ((Object[]) a.getArray(map)).length); assertEquals(2, ((Object[]) a.getArray(null)).length); @@ -933,18 +948,13 @@ private void testFunctions() throws SQLException { assertThrows(ErrorCode.OBJECT_CLOSED, a).getArray(); assertThrows(ErrorCode.OBJECT_CLOSED, a).getResultSet(); - stat.execute("CREATE ALIAS ROOT FOR \"" + getClass().getName() + ".root\""); + stat.execute("CREATE ALIAS ROOT FOR '" + getClass().getName() + ".root'"); rs = stat.executeQuery("CALL ROOT(9)"); rs.next(); assertEquals(3, rs.getInt(1)); assertFalse(rs.next()); - stat.execute("CREATE ALIAS MAX_ID FOR \"" + - getClass().getName() + ".selectMaxId\""); - rs = stat.executeQuery("CALL MAX_ID()"); - rs.next(); - assertEquals(2, rs.getInt(1)); - assertFalse(rs.next()); + stat.execute("CREATE ALIAS MAX_ID FOR '" + getClass().getName() + ".selectMaxId'"); rs = stat.executeQuery("SELECT * FROM MAX_ID()"); rs.next(); @@ -956,14 +966,14 @@ private void testFunctions() throws SQLException { assertEquals(0, rs.getInt(1)); assertFalse(rs.next()); - stat.execute("CREATE ALIAS blob FOR \"" + getClass().getName() + ".blob\""); + stat.execute("CREATE ALIAS blob FOR '" + getClass().getName() + ".blob'"); rs = stat.executeQuery("SELECT blob(CAST('0102' AS BLOB)) FROM DUAL"); while (rs.next()) { // ignore } rs.close(); - stat.execute("CREATE ALIAS clob FOR \"" + getClass().getName() + ".clob\""); + stat.execute("CREATE ALIAS clob FOR '" + getClass().getName() + ".clob'"); rs = stat.executeQuery("SELECT clob(CAST('Hello' AS CLOB)) FROM DUAL"); while (rs.next()) { // ignore @@ -977,75 +987,67 @@ private void testFunctions() throws SQLException { assertTrue(rs.next()); assertEquals("Hello", rs.getString(1)); - rs = stat.executeQuery("select * from sql('select cast(''4869'' as blob)')"); + rs = stat.executeQuery("select * from sql('select cast(X''4869'' as blob)')"); assertTrue(rs.next()); assertEquals("Hi", new String(rs.getBytes(1))); - rs = stat.executeQuery("select sql('select 1 a, ''Hello'' b')"); - assertTrue(rs.next()); - rs2 = (ResultSet) rs.getObject(1); - rs2.next(); - assertEquals(1, rs2.getInt(1)); - assertEquals("Hello", rs2.getString(2)); - ResultSetMetaData meta2 = rs2.getMetaData(); + rs = stat.executeQuery("select * from sql('select 1 a, ''Hello'' b')"); + rs.next(); + assertEquals(1, rs.getInt(1)); + assertEquals("Hello", rs.getString(2)); + ResultSetMetaData meta2 = rs.getMetaData(); assertEquals(Types.INTEGER, meta2.getColumnType(1)); assertEquals("INTEGER", meta2.getColumnTypeName(1)); assertEquals("java.lang.Integer", meta2.getColumnClassName(1)); assertEquals(Types.VARCHAR, meta2.getColumnType(2)); - assertEquals("VARCHAR", meta2.getColumnTypeName(2)); + assertEquals("CHARACTER VARYING", meta2.getColumnTypeName(2)); assertEquals("java.lang.String", meta2.getColumnClassName(2)); - stat.execute("CREATE ALIAS blob2stream FOR \"" + - getClass().getName() + ".blob2stream\""); - stat.execute("CREATE ALIAS stream2stream FOR \"" + - getClass().getName() + ".stream2stream\""); - stat.execute("CREATE TABLE TEST_BLOB(ID INT PRIMARY KEY, VALUE BLOB)"); + stat.execute("CREATE ALIAS blob2stream FOR '" + getClass().getName() + ".blob2stream'"); + stat.execute("CREATE ALIAS stream2stream FOR '" + getClass().getName() + ".stream2stream'"); + stat.execute("CREATE TABLE TEST_BLOB(ID INT PRIMARY KEY, \"VALUE\" BLOB)"); stat.execute("INSERT INTO TEST_BLOB VALUES(0, null)"); stat.execute("INSERT INTO TEST_BLOB VALUES(1, 'edd1f011edd1f011edd1f011')"); - rs = stat.executeQuery("SELECT blob2stream(VALUE) FROM TEST_BLOB"); + rs = stat.executeQuery("SELECT blob2stream(\"VALUE\") FROM TEST_BLOB"); while (rs.next()) { // ignore } rs.close(); - rs = stat.executeQuery("SELECT stream2stream(VALUE) FROM TEST_BLOB"); + rs = stat.executeQuery("SELECT stream2stream(\"VALUE\") FROM TEST_BLOB"); while (rs.next()) { // ignore } - stat.execute("CREATE ALIAS NULL_RESULT FOR \"" + - getClass().getName() + ".nullResultSet\""); - rs = stat.executeQuery("CALL NULL_RESULT()"); - assertEquals(1, rs.getMetaData().getColumnCount()); - rs.next(); - assertEquals(null, rs.getString(1)); - assertFalse(rs.next()); - - rs = meta.getProcedures(null, null, "NULL_RESULT"); - rs.next(); - assertEquals("FUNCTIONS", rs.getString("PROCEDURE_CAT")); - assertEquals("PUBLIC", rs.getString("PROCEDURE_SCHEM")); - assertEquals("NULL_RESULT", rs.getString("PROCEDURE_NAME")); - assertEquals(0, rs.getInt("NUM_INPUT_PARAMS")); - assertEquals(0, rs.getInt("NUM_OUTPUT_PARAMS")); - assertEquals(0, rs.getInt("NUM_RESULT_SETS")); - assertEquals("", rs.getString("REMARKS")); - assertEquals(DatabaseMetaData.procedureReturnsResult, - rs.getInt("PROCEDURE_TYPE")); - assertEquals("NULL_RESULT", rs.getString("SPECIFIC_NAME")); - - rs = meta.getProcedureColumns(null, null, "NULL_RESULT", null); - assertTrue(rs.next()); - assertEquals("P0", rs.getString("COLUMN_NAME")); - assertFalse(rs.next()); - - stat.execute("CREATE ALIAS RESULT_WITH_NULL FOR \"" + - getClass().getName() + ".resultSetWithNull\""); - rs = stat.executeQuery("CALL RESULT_WITH_NULL()"); - assertEquals(1, rs.getMetaData().getColumnCount()); - rs.next(); - assertEquals(null, rs.getString(1)); - assertFalse(rs.next()); + conn.close(); + } + private void testDateTimeFunctions() throws SQLException { + deleteDb("functions"); + Connection conn = getConnection("functions"); + Statement stat = conn.createStatement(); + ResultSet rs; + WeekFields wf = WeekFields.of(Locale.getDefault()); + for (int y = 2001; y <= 2010; y++) { + for (int d = 1; d <= 7; d++) { + String date1 = y + "-01-0" + d, date2 = y + "-01-0" + (d + 1); + LocalDate local1 = LocalDate.parse(date1), local2 = LocalDate.parse(date2); + rs = stat.executeQuery( + "SELECT EXTRACT(DAY_OF_WEEK FROM C1), EXTRACT(WEEK FROM C1), EXTRACT(WEEK_YEAR FROM C1)," + + " DATEDIFF(WEEK, C1, C2), DATE_TRUNC(WEEK, C1), DATE_TRUNC(WEEK_YEAR, C1) FROM" + + " VALUES (DATE '" + date1 + "', DATE '" + date2 + "')"); + rs.next(); + assertEquals(local1.get(wf.dayOfWeek()), rs.getInt(1)); + int w1 = local1.get(wf.weekOfWeekBasedYear()); + assertEquals(w1, rs.getInt(2)); + int weekYear = local1.get(wf.weekBasedYear()); + assertEquals(weekYear, rs.getInt(3)); + assertEquals(w1 == local2.get(wf.weekOfWeekBasedYear()) ? 0 : 1, rs.getInt(4)); + assertEquals(local1.minus(local1.get(wf.dayOfWeek()) - 1, ChronoUnit.DAYS), + rs.getObject(5, LocalDate.class)); + assertEquals(DateTimeFormatter.ofPattern("Y-w-e").parse(weekYear + "-1-1") + .query(TemporalQueries.localDate()), rs.getObject(6, LocalDate.class)); + } + } conn.close(); } @@ -1077,8 +1079,8 @@ private void testSchemaSearchPath() throws SQLException { stat.execute("SET SCHEMA TEST"); stat.execute("CREATE ALIAS PARSE_INT2 FOR " + "\"java.lang.Integer.parseInt(java.lang.String, int)\";"); - rs = stat.executeQuery("SELECT ALIAS_NAME FROM " + - "INFORMATION_SCHEMA.FUNCTION_ALIASES WHERE ALIAS_SCHEMA ='TEST'"); + rs = stat.executeQuery("SELECT ROUTINE_NAME FROM " + + "INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA ='TEST'"); rs.next(); assertEquals("PARSE_INT2", rs.getString(1)); stat.execute("DROP ALIAS PARSE_INT2"); @@ -1091,8 +1093,8 @@ private void testSchemaSearchPath() throws SQLException { rs = stat.executeQuery("CALL PARSE_INT2('-FF', 16)"); rs.next(); assertEquals(-255, rs.getInt(1)); - rs = stat.executeQuery("SELECT ALIAS_NAME FROM " + - "INFORMATION_SCHEMA.FUNCTION_ALIASES WHERE ALIAS_SCHEMA ='TEST'"); + rs = stat.executeQuery("SELECT ROUTINE_NAME FROM " + + "INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA ='TEST'"); rs.next(); assertEquals("PARSE_INT2", rs.getString(1)); rs = stat.executeQuery("CALL TEST.PARSE_INT2('-2147483648', 10)"); @@ -1104,145 +1106,262 @@ private void testSchemaSearchPath() throws SQLException { conn.close(); } + private void testArray() throws SQLException { + deleteDb("functions"); + Connection conn = getConnection("functions"); + PreparedStatement prep = conn.prepareStatement("SELECT ARRAY_MAX_CARDINALITY(?)"); + prep.setObject(1, new Integer[] { 1, 2, 3 }); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + assertEquals(3, rs.getInt(1)); + } + conn.close(); + } + private void testArrayParameters() throws SQLException { deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); - ResultSet rs; stat.execute("create alias array_test AS " + "$$ Integer[] array_test(Integer[] in_array) " + "{ return in_array; } $$;"); - PreparedStatement stmt = conn.prepareStatement( + PreparedStatement prep = conn.prepareStatement( "select array_test(?) from dual"); - stmt.setObject(1, new Integer[] { 1, 2 }); - rs = stmt.executeQuery(); - rs.next(); - assertEquals(Integer[].class.getName(), rs.getObject(1).getClass() - .getName()); + prep.setObject(1, new Integer[] { 1, 2 }); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + assertTrue(rs.getObject(1) instanceof Array); + } CallableStatement call = conn.prepareCall("{ ? = call array_test(?) }"); call.setObject(2, new Integer[] { 2, 1 }); call.registerOutParameter(1, Types.ARRAY); call.execute(); - assertEquals(Integer[].class.getName(), call.getArray(1).getArray() + assertEquals(Object[].class.getName(), call.getArray(1).getArray() .getClass().getName()); - assertEquals(new Integer[] { 2, 1 }, (Integer[]) call.getObject(1)); + assertEquals(new Object[]{2, 1}, (Object[]) ((Array) call.getObject(1)).getArray()); stat.execute("drop alias array_test"); - conn.close(); - } - - private void testTruncate() throws SQLException { - deleteDb("functions"); - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - - ResultSet rs = stat.executeQuery("SELECT TRUNCATE(1.234, 2) FROM dual"); - rs.next(); - assertEquals(1.23d, rs.getDouble(1)); - - rs = stat.executeQuery( - "SELECT CURRENT_TIMESTAMP(), " + - "TRUNCATE(CURRENT_TIMESTAMP()) FROM dual"); - rs.next(); - Calendar c = Calendar.getInstance(); - c.setTime(rs.getTimestamp(1)); - c.set(Calendar.HOUR_OF_DAY, 0); - c.set(Calendar.MINUTE, 0); - c.set(Calendar.SECOND, 0); - c.set(Calendar.MILLISECOND, 0); - java.util.Date nowDate = c.getTime(); - assertEquals(nowDate, rs.getTimestamp(2)); - - try { - rs = stat.executeQuery("SELECT TRUNCATE('bad', 1) FROM dual"); - fail("expected exception"); - } catch (SQLException ex) { - // expected - } - - // check for passing wrong data type - try { - rs = stat.executeQuery("SELECT TRUNCATE('bad') FROM dual"); - fail("expected exception"); - } catch (SQLException ex) { - // expected + stat.execute("CREATE ALIAS F DETERMINISTIC FOR '" + TestFunctions.class.getName() + ".arrayParameters1'"); + prep = conn.prepareStatement("SELECT F(ARRAY[ARRAY['1', '2'], ARRAY['3']])"); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + assertEquals(new Integer[][] {{1, 2}, {3}}, rs.getObject(1, Integer[][].class)); } - - // check for too many parameters - try { - rs = stat.executeQuery("SELECT TRUNCATE(1,2,3) FROM dual"); - fail("expected exception"); - } catch (SQLException ex) { - // expected + prep = conn.prepareStatement("SELECT F(ARRAY[ARRAY[1::BIGINT, 2::BIGINT], ARRAY[3::BIGINT]])"); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + assertEquals(new Short[][] {{1, 2}, {3}}, rs.getObject(1, Short[][].class)); } + stat.execute("DROP ALIAS F"); conn.close(); } - private void testTranslate() throws SQLException { - Connection conn = getConnection("functions"); - Statement stat = conn.createStatement(); - - String createSQL = "CREATE TABLE testTranslate(id BIGINT, " + - "txt1 varchar);"; - stat.execute(createSQL); - stat.execute("insert into testTranslate(id, txt1) " + - "values(1, 'test1')"); - stat.execute("insert into testTranslate(id, txt1) " + - "values(2, null)"); - stat.execute("insert into testTranslate(id, txt1) " + - "values(3, '')"); - stat.execute("insert into testTranslate(id, txt1) " + - "values(4, 'caps')"); - - String query = "SELECT translate(txt1, 'p', 'r') " + - "FROM testTranslate order by id asc"; - ResultSet rs = stat.executeQuery(query); - rs.next(); - String actual = rs.getString(1); - assertEquals("test1", actual); - rs.next(); - actual = rs.getString(1); - assertNull(actual); - rs.next(); - actual = rs.getString(1); - assertEquals("", actual); - rs.next(); - actual = rs.getString(1); - assertEquals("cars", actual); - rs.close(); + /** + * This method is called with reflection. + * + * @param x argument + * @return result + */ + public static Integer[][] arrayParameters1(String[][] x) { + int l = x.length; + Integer[][] result = new Integer[l][]; + for (int i = 0; i < l; i++) { + String[] x1 = x[i]; + int l1 = x1.length; + Integer[] r1 = new Integer[l1]; + for (int j = 0; j < l1; j++) { + r1[j] = Integer.parseInt(x1[j]); + } + result[i] = r1; + } + return result; + } - rs = stat.executeQuery("select translate(null,null,null)"); - rs.next(); - assertNull(rs.getObject(1)); + private void testToDateException(SessionLocal session) { + assertThrows(ErrorCode.INVALID_TO_DATE_FORMAT, + () -> ToDateParser.toDate(session, "1979-ThisWillFail-12", "YYYY-MM-DD")); + assertThrows(ErrorCode.INVALID_TO_DATE_FORMAT, // + () -> ToDateParser.toDate(session, "1-DEC-0000", "DD-MON-RRRR")); + } + + private void testToDate(SessionLocal session) { + GregorianCalendar calendar = new GregorianCalendar(); + int year = calendar.get(Calendar.YEAR); + int month = calendar.get(Calendar.MONTH) + 1; + // Default date in Oracle is the first day of the current month + String defDate = year + "-" + month + "-1 "; + ValueTimestamp date = null; + date = ValueTimestamp.parse("1979-11-12", null); + assertEquals(date, ToDateParser.toDate(session, "1979-11-12T00:00:00Z", "YYYY-MM-DD\"T\"HH24:MI:SS\"Z\"")); + assertEquals(date, ToDateParser.toDate(session, "1979*foo*1112", "YYYY\"*foo*\"MM\"\"DD")); + assertEquals(date, ToDateParser.toDate(session, "1979-11-12", "YYYY-MM-DD")); + assertEquals(date, ToDateParser.toDate(session, "1979/11/12", "YYYY/MM/DD")); + assertEquals(date, ToDateParser.toDate(session, "1979,11,12", "YYYY,MM,DD")); + assertEquals(date, ToDateParser.toDate(session, "1979.11.12", "YYYY.MM.DD")); + assertEquals(date, ToDateParser.toDate(session, "1979;11;12", "YYYY;MM;DD")); + assertEquals(date, ToDateParser.toDate(session, "1979:11:12", "YYYY:MM:DD")); + + date = ValueTimestamp.parse("1979-" + month + "-01", null); + assertEquals(date, ToDateParser.toDate(session, "1979", "YYYY")); + assertEquals(date, ToDateParser.toDate(session, "1979 AD", "YYYY AD")); + assertEquals(date, ToDateParser.toDate(session, "1979 A.D.", "YYYY A.D.")); + assertEquals(date, ToDateParser.toDate(session, "1979 A.D.", "YYYY BC")); + assertEquals(date, ToDateParser.toDate(session, "+1979", "SYYYY")); + assertEquals(date, ToDateParser.toDate(session, "79", "RRRR")); + + date = ValueTimestamp.parse(defDate + "00:12:00", null); + assertEquals(date, ToDateParser.toDate(session, "12", "MI")); + + date = ValueTimestamp.parse("1970-11-01", null); + assertEquals(date, ToDateParser.toDate(session, "11", "MM")); + assertEquals(date, ToDateParser.toDate(session, "11", "Mm")); + assertEquals(date, ToDateParser.toDate(session, "11", "mM")); + assertEquals(date, ToDateParser.toDate(session, "11", "mm")); + assertEquals(date, ToDateParser.toDate(session, "XI", "RM")); + + int y = (year / 10) * 10 + 9; + date = ValueTimestamp.parse(y + "-" + month + "-01", null); + assertEquals(date, ToDateParser.toDate(session, "9", "Y")); + y = (year / 100) * 100 + 79; + date = ValueTimestamp.parse(y + "-" + month + "-01", null); + assertEquals(date, ToDateParser.toDate(session, "79", "YY")); + y = (year / 1_000) * 1_000 + 979; + date = ValueTimestamp.parse(y + "-" + month + "-01", null); + assertEquals(date, ToDateParser.toDate(session, "979", "YYY")); + + // Gregorian calendar does not have a year 0. + // 0 = 0001 BC, -1 = 0002 BC, ... so we adjust + date = ValueTimestamp.parse("-99-" + month + "-01", null); + assertEquals(date, ToDateParser.toDate(session, "0100 BC", "YYYY BC")); + assertEquals(date, ToDateParser.toDate(session, "0100 B.C.", "YYYY B.C.")); + assertEquals(date, ToDateParser.toDate(session, "-0100", "SYYYY")); + assertEquals(date, ToDateParser.toDate(session, "-0100", "YYYY")); + + // Gregorian calendar does not have a year 0. + // 0 = 0001 BC, -1 = 0002 BC, ... so we adjust + y = -((year / 1_000) * 1_000 + 99); + date = ValueTimestamp.parse(y + "-" + month + "-01", null); + assertEquals(date, ToDateParser.toDate(session, "100 BC", "YYY BC")); + + // Gregorian calendar does not have a year 0. + // 0 = 0001 BC, -1 = 0002 BC, ... so we adjust + y = -((year / 100) * 100); + date = ValueTimestamp.parse(y + "-" + month + "-01", null); + assertEquals(date, ToDateParser.toDate(session, "01 BC", "YY BC")); + y = -((year / 10) * 10); + date = ValueTimestamp.parse(y + "-" + month + "-01", null); + assertEquals(date, ToDateParser.toDate(session, "1 BC", "Y BC")); + + date = ValueTimestamp.parse(defDate + "08:12:00", null); + assertEquals(date, ToDateParser.toDate(session, "08:12 AM", "HH:MI AM")); + assertEquals(date, ToDateParser.toDate(session, "08:12 A.M.", "HH:MI A.M.")); + assertEquals(date, ToDateParser.toDate(session, "08:12", "HH24:MI")); + + date = ValueTimestamp.parse(defDate + "08:12:00", null); + assertEquals(date, ToDateParser.toDate(session, "08:12", "HH:MI")); + assertEquals(date, ToDateParser.toDate(session, "08:12", "HH12:MI")); + + date = ValueTimestamp.parse(defDate + "08:12:34", null); + assertEquals(date, ToDateParser.toDate(session, "08:12:34", "HH:MI:SS")); + + date = ValueTimestamp.parse(defDate + "12:00:00", null); + assertEquals(date, ToDateParser.toDate(session, "12:00:00 PM", "HH12:MI:SS AM")); + + date = ValueTimestamp.parse(defDate + "00:00:00", null); + assertEquals(date, ToDateParser.toDate(session, "12:00:00 AM", "HH12:MI:SS AM")); + + date = ValueTimestamp.parse(defDate + "00:00:34", null); + assertEquals(date, ToDateParser.toDate(session, "34", "SS")); + + date = ValueTimestamp.parse(defDate + "08:12:34", null); + assertEquals(date, ToDateParser.toDate(session, "29554", "SSSSS")); + + date = ValueTimestamp.parse(defDate + "08:12:34.550", null); + assertEquals(date, ToDateParser.toDate(session, "08:12:34 550", "HH:MI:SS FF")); + assertEquals(date, ToDateParser.toDate(session, "08:12:34 55", "HH:MI:SS FF2")); + + date = ValueTimestamp.parse(defDate + "14:04:00", null); + assertEquals(date, ToDateParser.toDate(session, "02:04 P.M.", "HH:MI p.M.")); + assertEquals(date, ToDateParser.toDate(session, "02:04 PM", "HH:MI PM")); + + date = ValueTimestamp.parse("1970-" + month + "-12", null); + assertEquals(date, ToDateParser.toDate(session, "12", "DD")); + + date = ValueTimestamp.parse(year + (calendar.isLeapYear(year) ? "-11-11" : "-11-12"), null); + assertEquals(date, ToDateParser.toDate(session, "316", "DDD")); + assertEquals(date, ToDateParser.toDate(session, "316", "DdD")); + assertEquals(date, ToDateParser.toDate(session, "316", "dDD")); + assertEquals(date, ToDateParser.toDate(session, "316", "ddd")); + + date = ValueTimestamp.parse("2013-01-29", null); + assertEquals(date, ToDateParser.toDate(session, "2456322", "J")); + + if (Locale.getDefault().getLanguage().equals("en")) { + date = ValueTimestamp.parse("9999-12-31 23:59:59", null); + assertEquals(date, ToDateParser.toDate(session, "31-DEC-9999 23:59:59", "DD-MON-YYYY HH24:MI:SS")); + assertEquals(date, ToDateParser.toDate(session, "31-DEC-9999 23:59:59", "DD-MON-RRRR HH24:MI:SS")); + assertEquals(ValueTimestamp.parse("0001-03-01", null), + ToDateParser.toDate(session, "1-MAR-0001", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("9999-03-01", null), + ToDateParser.toDate(session, "1-MAR-9999", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("2000-03-01", null), + ToDateParser.toDate(session, "1-MAR-000", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("1999-03-01", null), + ToDateParser.toDate(session, "1-MAR-099", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("0100-03-01", null), + ToDateParser.toDate(session, "1-MAR-100", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("2000-03-01", null), + ToDateParser.toDate(session, "1-MAR-00", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("2049-03-01", null), + ToDateParser.toDate(session, "1-MAR-49", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("1950-03-01", null), + ToDateParser.toDate(session, "1-MAR-50", "DD-MON-RRRR")); + assertEquals(ValueTimestamp.parse("1999-03-01", null), + ToDateParser.toDate(session, "1-MAR-99", "DD-MON-RRRR")); + } - stat.execute("drop table testTranslate"); - conn.close(); + assertEquals(ValueTimestampTimeZone.parse("2000-05-10 10:11:12-08:15", null), + ToDateParser.toTimestampTz(session, "2000-05-10 10:11:12 -8:15", "YYYY-MM-DD HH24:MI:SS TZH:TZM")); + assertEquals(ValueTimestampTimeZone.parse("2000-05-10 10:11:12-08:15", null), + ToDateParser.toTimestampTz(session, "2000-05-10 10:11:12 GMT-08:15", "YYYY-MM-DD HH24:MI:SS TZR")); + assertEquals(ValueTimestampTimeZone.parse("2000-02-10 10:11:12-08", null), + ToDateParser.toTimestampTz(session, "2000-02-10 10:11:12 US/Pacific", "YYYY-MM-DD HH24:MI:SS TZR")); + assertEquals(ValueTimestampTimeZone.parse("2000-02-10 10:11:12-08", null), + ToDateParser.toTimestampTz(session, "2000-02-10 10:11:12 PST", "YYYY-MM-DD HH24:MI:SS TZD")); } private void testToCharFromDateTime() throws SQLException { + ToCharFunction.clearNames(); deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); TimeZone tz = TimeZone.getDefault(); - boolean daylight = tz.inDaylightTime(new Date()); + final Timestamp timestamp1979 = Timestamp.valueOf("1979-11-12 08:12:34.560"); + boolean daylight = tz.inDaylightTime(timestamp1979); String tzShortName = tz.getDisplayName(daylight, TimeZone.SHORT); String tzLongName = tz.getID(); + if (tzLongName.equals("Etc/UTC")) { + tzLongName = "UTC"; + } stat.executeUpdate("CREATE TABLE T (X TIMESTAMP(6))"); stat.executeUpdate("INSERT INTO T VALUES " + - "(TIMESTAMP '1979-11-12 08:12:34.560')"); + "(TIMESTAMP '"+timestamp1979.toString()+"')"); stat.executeUpdate("CREATE TABLE U (X TIMESTAMP(6))"); stat.executeUpdate("INSERT INTO U VALUES " + "(TIMESTAMP '-100-01-15 14:04:02.120')"); assertResult("1979-11-12 08:12:34.56", stat, "SELECT X FROM T"); - assertResult("-100-01-15 14:04:02.12", stat, "SELECT X FROM U"); - assertResult("12-NOV-79 08.12.34.560000 AM", stat, "SELECT TO_CHAR(X) FROM T"); + assertResult("-0100-01-15 14:04:02.12", stat, "SELECT X FROM U"); + String expected = String.format("%tb", timestamp1979).toUpperCase(); + expected = stripTrailingPeriod(expected); + assertResult("12-" + expected + "-79 08.12.34.560000000 AM", stat, + "SELECT TO_CHAR(X) FROM T"); assertResult("- / , . ; : text - /", stat, "SELECT TO_CHAR(X, '- / , . ; : \"text\" - /') FROM T"); assertResult("1979-11-12", stat, @@ -1274,12 +1393,26 @@ private void testToCharFromDateTime() throws SQLException { assertResult("0 BC", stat, "SELECT TO_CHAR(X, 'Y BC') FROM U"); assertResult("1979 A.D.", stat, "SELECT TO_CHAR(X, 'YYYY B.C.') FROM T"); + assertResult("2013", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'YYYY') FROM DUAL"); + assertResult("013", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'YYY') FROM DUAL"); + assertResult("13", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'YY') FROM DUAL"); + assertResult("3", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'Y') FROM DUAL"); + // ISO week year + assertResult("2014", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'IYYY') FROM DUAL"); + assertResult("014", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'IYY') FROM DUAL"); + assertResult("14", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'IY') FROM DUAL"); + assertResult("4", stat, "SELECT TO_CHAR(DATE '2013-12-30', 'I') FROM DUAL"); + assertResult("0002", stat, "SELECT TO_CHAR(DATE '-0001-01-01', 'IYYY') FROM DUAL"); + assertResult("0001", stat, "SELECT TO_CHAR(DATE '-0001-01-04', 'IYYY') FROM DUAL"); + assertResult("0004", stat, "SELECT TO_CHAR(DATE '-0004-01-01', 'IYYY') FROM DUAL"); assertResult("08:12 AM", stat, "SELECT TO_CHAR(X, 'HH:MI AM') FROM T"); assertResult("08:12 A.M.", stat, "SELECT TO_CHAR(X, 'HH:MI A.M.') FROM T"); assertResult("02:04 P.M.", stat, "SELECT TO_CHAR(X, 'HH:MI A.M.') FROM U"); assertResult("08:12 AM", stat, "SELECT TO_CHAR(X, 'HH:MI PM') FROM T"); assertResult("02:04 PM", stat, "SELECT TO_CHAR(X, 'HH:MI PM') FROM U"); assertResult("08:12 A.M.", stat, "SELECT TO_CHAR(X, 'HH:MI P.M.') FROM T"); + assertResult("12 PM", stat, "SELECT TO_CHAR(TIME '12:00:00', 'HH AM')"); + assertResult("12 AM", stat, "SELECT TO_CHAR(TIME '00:00:00', 'HH AM')"); assertResult("A.M.", stat, "SELECT TO_CHAR(X, 'P.M.') FROM T"); assertResult("a.m.", stat, "SELECT TO_CHAR(X, 'p.M.') FROM T"); assertResult("a.m.", stat, "SELECT TO_CHAR(X, 'p.m.') FROM T"); @@ -1289,31 +1422,38 @@ private void testToCharFromDateTime() throws SQLException { assertResult("am", stat, "SELECT TO_CHAR(X, 'pm') FROM T"); assertResult("2", stat, "SELECT TO_CHAR(X, 'D') FROM T"); assertResult("2", stat, "SELECT TO_CHAR(X, 'd') FROM T"); - assertResult("MONDAY ", stat, "SELECT TO_CHAR(X, 'DAY') FROM T"); - assertResult("Monday ", stat, "SELECT TO_CHAR(X, 'Day') FROM T"); - assertResult("monday ", stat, "SELECT TO_CHAR(X, 'day') FROM T"); - assertResult("monday ", stat, "SELECT TO_CHAR(X, 'dAY') FROM T"); - assertResult("Monday", stat, "SELECT TO_CHAR(X, 'fmDay') FROM T"); - assertResult("monday -monday-monday-monday -monday", stat, - "SELECT TO_CHAR(X, 'day-fmday-day-fmday-fmday') FROM T"); + expected = String.format("%tA", timestamp1979); + expected = expected.substring(0, 1).toUpperCase() + expected.substring(1); + String spaces = " "; + String first9 = (expected + spaces).substring(0, 9); + assertResult(StringUtils.toUpperEnglish(first9), + stat, "SELECT TO_CHAR(X, 'DAY') FROM T"); + assertResult(first9, + stat, "SELECT TO_CHAR(X, 'Day') FROM T"); + assertResult(first9.toLowerCase(), + stat, "SELECT TO_CHAR(X, 'day') FROM T"); + assertResult(first9.toLowerCase(), + stat, "SELECT TO_CHAR(X, 'dAY') FROM T"); + assertResult(expected, + stat, "SELECT TO_CHAR(X, 'fmDay') FROM T"); assertResult("12", stat, "SELECT TO_CHAR(X, 'DD') FROM T"); assertResult("316", stat, "SELECT TO_CHAR(X, 'DDD') FROM T"); assertResult("316", stat, "SELECT TO_CHAR(X, 'DdD') FROM T"); assertResult("316", stat, "SELECT TO_CHAR(X, 'dDD') FROM T"); assertResult("316", stat, "SELECT TO_CHAR(X, 'ddd') FROM T"); - assertResult("Monday, November 12, 1979", stat, + expected = String.format("%1$tA, %1$tB %1$te, %1$tY", timestamp1979); + assertResult(expected, stat, "SELECT TO_CHAR(X, 'DL') FROM T"); - assertResult("Monday, November 12, 1979", stat, - "SELECT TO_CHAR(X, 'DL', 'NLS_DATE_LANGUAGE = English') FROM T"); assertResult("11/12/1979", stat, "SELECT TO_CHAR(X, 'DS') FROM T"); assertResult("11/12/1979", stat, "SELECT TO_CHAR(X, 'Ds') FROM T"); assertResult("11/12/1979", stat, "SELECT TO_CHAR(X, 'dS') FROM T"); assertResult("11/12/1979", stat, "SELECT TO_CHAR(X, 'ds') FROM T"); - assertResult("MON", stat, "SELECT TO_CHAR(X, 'DY') FROM T"); - assertResult("Mon", stat, "SELECT TO_CHAR(X, 'Dy') FROM T"); - assertResult("mon", stat, "SELECT TO_CHAR(X, 'dy') FROM T"); - assertResult("mon", stat, "SELECT TO_CHAR(X, 'dY') FROM T"); - assertResult("08:12:34.560000", stat, + expected = String.format("%1$ta", timestamp1979); + assertResult(expected.toUpperCase(), stat, "SELECT TO_CHAR(X, 'DY') FROM T"); + assertResult(Capitalization.CAPITALIZE.apply(expected), stat, "SELECT TO_CHAR(X, 'Dy') FROM T"); + assertResult(expected.toLowerCase(), stat, "SELECT TO_CHAR(X, 'dy') FROM T"); + assertResult(expected.toLowerCase(), stat, "SELECT TO_CHAR(X, 'dY') FROM T"); + assertResult("08:12:34.560000000", stat, "SELECT TO_CHAR(X, 'HH:MI:SS.FF') FROM T"); assertResult("08:12:34.5", stat, "SELECT TO_CHAR(X, 'HH:MI:SS.FF1') FROM T"); @@ -1333,10 +1473,10 @@ private void testToCharFromDateTime() throws SQLException { "SELECT TO_CHAR(X, 'HH:MI:SS.FF8') FROM T"); assertResult("08:12:34.560000000", stat, "SELECT TO_CHAR(X, 'HH:MI:SS.FF9') FROM T"); - assertResult("08:12:34.560000000", stat, - "SELECT TO_CHAR(X, 'HH:MI:SS.ff9') FROM T"); - assertResult("08:12:34.560000000", stat, - "SELECT TO_CHAR(X, 'HH:MI:SS.fF9') FROM T"); + assertResult("012345678", stat, + "SELECT TO_CHAR(TIME '0:00:00.012345678', 'FF') FROM T"); + assertResult("00", stat, + "SELECT TO_CHAR(TIME '0:00:00.000', 'FF2') FROM T"); assertResult("08:12", stat, "SELECT TO_CHAR(X, 'HH:MI') FROM T"); assertResult("08:12", stat, "SELECT TO_CHAR(X, 'HH12:MI') FROM T"); assertResult("08:12", stat, "SELECT TO_CHAR(X, 'HH24:MI') FROM T"); @@ -1353,13 +1493,25 @@ private void testToCharFromDateTime() throws SQLException { assertResult("11", stat, "SELECT TO_CHAR(X, 'Mm') FROM T"); assertResult("11", stat, "SELECT TO_CHAR(X, 'mM') FROM T"); assertResult("11", stat, "SELECT TO_CHAR(X, 'mm') FROM T"); - assertResult("NOV", stat, "SELECT TO_CHAR(X, 'MON') FROM T"); - assertResult("Nov", stat, "SELECT TO_CHAR(X, 'Mon') FROM T"); - assertResult("nov", stat, "SELECT TO_CHAR(X, 'mon') FROM T"); - assertResult("NOVEMBER ", stat, "SELECT TO_CHAR(X, 'MONTH') FROM T"); - assertResult("November ", stat, "SELECT TO_CHAR(X, 'Month') FROM T"); - assertResult("november ", stat, "SELECT TO_CHAR(X, 'month') FROM T"); - assertResult("November", stat, "SELECT TO_CHAR(X, 'fmMonth') FROM T"); + expected = String.format("%1$tb", timestamp1979); + expected = stripTrailingPeriod(expected); + expected = expected.substring(0, 1).toUpperCase() + expected.substring(1); + assertResult(expected.toUpperCase(), stat, + "SELECT TO_CHAR(X, 'MON') FROM T"); + assertResult(expected, stat, + "SELECT TO_CHAR(X, 'Mon') FROM T"); + assertResult(expected.toLowerCase(), stat, + "SELECT TO_CHAR(X, 'mon') FROM T"); + expected = String.format("%1$tB", timestamp1979); + expected = (expected + " ").substring(0, 9); + assertResult(expected.toUpperCase(), stat, + "SELECT TO_CHAR(X, 'MONTH') FROM T"); + assertResult(Capitalization.CAPITALIZE.apply(expected), stat, + "SELECT TO_CHAR(X, 'Month') FROM T"); + assertResult(expected.toLowerCase(), stat, + "SELECT TO_CHAR(X, 'month') FROM T"); + assertResult(Capitalization.CAPITALIZE.apply(expected.trim()), stat, + "SELECT TO_CHAR(X, 'fmMonth') FROM T"); assertResult("4", stat, "SELECT TO_CHAR(X, 'Q') FROM T"); assertResult("XI", stat, "SELECT TO_CHAR(X, 'RM') FROM T"); assertResult("xi", stat, "SELECT TO_CHAR(X, 'rm') FROM T"); @@ -1368,11 +1520,31 @@ private void testToCharFromDateTime() throws SQLException { assertResult("1979", stat, "SELECT TO_CHAR(X, 'RRRR') FROM T"); assertResult("34", stat, "SELECT TO_CHAR(X, 'SS') FROM T"); assertResult("29554", stat, "SELECT TO_CHAR(X, 'SSSSS') FROM T"); - assertResult("8:12:34 AM", stat, "SELECT TO_CHAR(X, 'TS') FROM T"); + expected = new SimpleDateFormat("h:mm:ss aa").format(timestamp1979); + if (Locale.getDefault().getLanguage().equals(Locale.ENGLISH.getLanguage())) { + assertEquals("8:12:34 AM", expected); + } + assertResult(expected, stat, "SELECT TO_CHAR(X, 'TS') FROM T"); assertResult(tzLongName, stat, "SELECT TO_CHAR(X, 'TZR') FROM T"); assertResult(tzShortName, stat, "SELECT TO_CHAR(X, 'TZD') FROM T"); - assertResult(".", stat, "SELECT TO_CHAR(X, 'X') FROM T"); - assertResult("1,979", stat, "SELECT TO_CHAR(X, 'Y,YYY') FROM T"); + assertResult("GMT+10:30", stat, + "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+10:30', 'TZR')"); + assertResult("GMT+10:30", stat, + "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+10:30', 'TZD')"); + + assertResult("-10", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00-10:00', 'TZH')"); + assertResult("+10", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+10:00', 'TZH')"); + assertResult("+00", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+00:00', 'TZH')"); + assertResult("50", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+00:50', 'TZM')"); + assertResult("00", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+00:00', 'TZM')"); + assertResult("-10:50", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00-10:50', 'TZH:TZM')"); + assertResult("+10:50", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+10:50', 'TZH:TZM')"); + assertResult("+00:00", stat, "SELECT TO_CHAR(TIMESTAMP WITH TIME ZONE '2010-01-01 0:00:00+00:00', 'TZH:TZM')"); + + expected = String.format("%f", 1.1).substring(1, 2); + assertResult(expected, stat, "SELECT TO_CHAR(X, 'X') FROM T"); + expected = String.format("%,d", 1979); + assertResult(expected, stat, "SELECT TO_CHAR(X, 'Y,YYY') FROM T"); assertResult("1979", stat, "SELECT TO_CHAR(X, 'YYYY') FROM T"); assertResult("1979", stat, "SELECT TO_CHAR(X, 'SYYYY') FROM T"); assertResult("-0100", stat, "SELECT TO_CHAR(X, 'SYYYY') FROM U"); @@ -1380,7 +1552,19 @@ private void testToCharFromDateTime() throws SQLException { assertResult("79", stat, "SELECT TO_CHAR(X, 'YY') FROM T"); assertResult("9", stat, "SELECT TO_CHAR(X, 'Y') FROM T"); assertResult("7979", stat, "SELECT TO_CHAR(X, 'yyfxyy') FROM T"); - assertThrows("", stat, "SELECT TO_CHAR(X, 'A') FROM T"); + assertThrows(ErrorCode.INVALID_TO_CHAR_FORMAT, stat, + "SELECT TO_CHAR(X, 'A') FROM T"); + + assertResult("01-1 2000-01 1999-52", stat, "SELECT TO_CHAR(DATE '2000-01-01', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 1999-52", stat, "SELECT TO_CHAR(DATE '2000-01-02', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-03', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-04', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-05', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-06', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-1 2000-01 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-07', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("01-2 2000-02 2000-01", stat, "SELECT TO_CHAR(DATE '2000-01-08', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("02-1 2000-05 2000-05", stat, "SELECT TO_CHAR(DATE '2000-02-01', 'MM-W YYYY-WW IYYY-IW')"); + assertResult("12-5 2000-53 2000-52", stat, "SELECT TO_CHAR(DATE '2000-12-31', 'MM-W YYYY-WW IYYY-IW')"); // check a bug we had when the month or day of the month is 1 digit stat.executeUpdate("TRUNCATE TABLE T"); @@ -1390,12 +1574,22 @@ private void testToCharFromDateTime() throws SQLException { conn.close(); } + private static String stripTrailingPeriod(String expected) { + // CLDR provider appends period on some locales + int l = expected.length() - 1; + if (expected.charAt(l) == '.') + expected = expected.substring(0, l); + return expected; + } + private void testToCharFromNumber() throws SQLException { deleteDb("functions"); Connection conn = getConnection("functions"); Statement stat = conn.createStatement(); + Locale.setDefault(new Locale("en")); - Currency currency = Currency.getInstance(Locale.getDefault()); + Locale locale = Locale.getDefault(); + Currency currency = Currency.getInstance(locale.getCountry().length() == 2 ? locale : Locale.US); String cc = currency.getCurrencyCode(); String cs = currency.getSymbol(); @@ -1431,12 +1625,26 @@ private void testToCharFromNumber() throws SQLException { "SELECT TO_CHAR(12345, '$999') FROM DUAL"); assertResult("######", stat, "SELECT TO_CHAR(12345, '$9999') FROM DUAL"); - assertResult(" " + cs + "12345", stat, - "SELECT TO_CHAR(12345, '$99999999') FROM DUAL"); - assertResult(" " + cs + "12,345.35", stat, - "SELECT TO_CHAR(12345.345, '$99,999,999.99') FROM DUAL"); - assertResult(" " + cs + "12,345", stat, - "SELECT TO_CHAR(12345.345, '$99g999g999') FROM DUAL"); + String expected = String.format("%,d", 12345); + if (locale == Locale.ENGLISH) { + assertResult(String.format("%5s12345", cs), stat, + "SELECT TO_CHAR(12345, '$99999999') FROM DUAL"); + assertResult(String.format("%6s12,345.35", cs), stat, + "SELECT TO_CHAR(12345.345, '$99,999,999.99') FROM DUAL"); + assertResult(String.format("%5s%s", cs, expected), stat, + "SELECT TO_CHAR(12345.345, '$99g999g999') FROM DUAL"); + assertResult(" " + cs + "123.45", stat, + "SELECT TO_CHAR(123.45, 'L999.99') FROM DUAL"); + assertResult(" -" + cs + "123.45", stat, + "SELECT TO_CHAR(-123.45, 'L999.99') FROM DUAL"); + assertResult(cs + "123.45", stat, + "SELECT TO_CHAR(123.45, 'FML999.99') FROM DUAL"); + assertResult(" " + cs + "123.45", stat, + "SELECT TO_CHAR(123.45, 'U999.99') FROM DUAL"); + assertResult(" " + cs + "123.45", stat, + "SELECT TO_CHAR(123.45, 'u999.99') FROM DUAL"); + + } assertResult(" 12,345.35", stat, "SELECT TO_CHAR(12345.345, '99,999,999.99') FROM DUAL"); assertResult("12,345.35", stat, @@ -1525,17 +1733,8 @@ private void testToCharFromNumber() throws SQLException { "SELECT TO_CHAR(123.45, 'C999g999') FROM DUAL"); assertResult(cc + "123.45", stat, "SELECT TO_CHAR(123.45, 'FMC999,999.99') FROM DUAL"); - assertResult(" " + cs + "123.45", stat, - "SELECT TO_CHAR(123.45, 'L999.99') FROM DUAL"); - assertResult(" -" + cs + "123.45", stat, - "SELECT TO_CHAR(-123.45, 'L999.99') FROM DUAL"); - assertResult(cs + "123.45", stat, - "SELECT TO_CHAR(123.45, 'FML999.99') FROM DUAL"); - assertResult(" " + cs + "123.45", stat, - "SELECT TO_CHAR(123.45, 'U999.99') FROM DUAL"); - assertResult(" " + cs + "123.45", stat, - "SELECT TO_CHAR(123.45, 'u999.99') FROM DUAL"); - assertResult(" .33", stat, + expected = String.format("%.2f", 0.33f).substring(1); + assertResult(" " + expected, stat, "SELECT TO_CHAR(0.326, '99D99') FROM DUAL"); assertResult(" 1.2E+02", stat, "SELECT TO_CHAR(123.456, '9.9EEEE') FROM DUAL"); @@ -1550,9 +1749,10 @@ private void testToCharFromNumber() throws SQLException { "SELECT TO_CHAR(123.456, '00.00000000EEEE') FROM DUAL"); assertResult("1.23456000E+02", stat, "SELECT TO_CHAR(123.456, 'fm00.00000000EEEE') FROM DUAL"); - assertResult(" 1,234,567", stat, + expected = String.format("%,d", 1234567); + assertResult(" " + expected, stat, "SELECT TO_CHAR(1234567, '9G999G999') FROM DUAL"); - assertResult("-1,234,567", stat, + assertResult("-" + expected, stat, "SELECT TO_CHAR(-1234567, '9G999G999') FROM DUAL"); assertResult("123.45-", stat, "SELECT TO_CHAR(-123.45, '999.99MI') FROM DUAL"); assertResult("123.45-", stat, "SELECT TO_CHAR(-123.45, '999.99mi') FROM DUAL"); @@ -1611,13 +1811,39 @@ private void testToCharFromNumber() throws SQLException { "SELECT TO_CHAR(123456789012345, 'TME') FROM DUAL"); assertResult("4.5E-01", stat, "SELECT TO_CHAR(0.45, 'TME') FROM DUAL"); assertResult("4.5E-01", stat, "SELECT TO_CHAR(0.45, 'tMe') FROM DUAL"); - assertThrows("Invalid TO_CHAR format \"999.99q\"", stat, + assertThrows(ErrorCode.INVALID_TO_CHAR_FORMAT, stat, "SELECT TO_CHAR(123.45, '999.99q') FROM DUAL"); - assertThrows("Invalid TO_CHAR format \"fm999.99q\"", stat, + assertThrows(ErrorCode.INVALID_TO_CHAR_FORMAT, stat, "SELECT TO_CHAR(123.45, 'fm999.99q') FROM DUAL"); - assertThrows("Invalid TO_CHAR format \"q999.99\"", stat, + assertThrows(ErrorCode.INVALID_TO_CHAR_FORMAT, stat, "SELECT TO_CHAR(123.45, 'q999.99') FROM DUAL"); + // ISSUE-115 + assertResult("0.123", stat, "select to_char(0.123, 'FM0.099') from dual;"); + assertResult("1.123", stat, "select to_char(1.1234, 'FM0.099') from dual;"); + assertResult("1.1234", stat, "select to_char(1.1234, 'FM0.0999') from dual;"); + assertResult("1.023", stat, "select to_char(1.023, 'FM0.099') from dual;"); + assertResult("0.012", stat, "select to_char(0.012, 'FM0.099') from dual;"); + assertResult("0.123", stat, "select to_char(0.123, 'FM0.099') from dual;"); + assertResult("0.001", stat, "select to_char(0.001, 'FM0.099') from dual;"); + assertResult("0.001", stat, "select to_char(0.0012, 'FM0.099') from dual;"); + assertResult("0.002", stat, "select to_char(0.0019, 'FM0.099') from dual;"); + final char decimalSeparator = DecimalFormatSymbols.getInstance().getDecimalSeparator(); + final String oneDecimal = "0" + decimalSeparator + "0"; + final String twoDecimals = "0" + decimalSeparator + "00"; + assertResult(oneDecimal, stat, "select to_char(0, 'FM0D099') from dual;"); + assertResult(twoDecimals, stat, "select to_char(0., 'FM0D009') from dual;"); + assertResult("0" + decimalSeparator + "000000000", + stat, "select to_char(0.000000000, 'FM0D999999999') from dual;"); + assertResult("0" + decimalSeparator, stat, "select to_char(0, 'FM0D9') from dual;"); + assertResult(oneDecimal, stat, "select to_char(0.0, 'FM0D099') from dual;"); + assertResult(twoDecimals, stat, "select to_char(0.00, 'FM0D009') from dual;"); + assertResult(twoDecimals, stat, "select to_char(0, 'FM0D009') from dual;"); + assertResult(oneDecimal, stat, "select to_char(0, 'FM0D09') from dual;"); + assertResult(oneDecimal, stat, "select to_char(0, 'FM0D0') from dual;"); + + assertResult("10,000,000.", stat, + "SELECT TO_CHAR(CAST(10000000 AS DOUBLE PRECISION), 'FM999,999,999.99') FROM DUAL"); conn.close(); } @@ -1629,57 +1855,159 @@ private void testToCharFromText() throws SQLException { conn.close(); } + private void testAnnotationProcessorsOutput() { + try { + System.setProperty(TestAnnotationProcessor.MESSAGES_KEY, "WARNING,foo1|ERROR,foo2"); + callCompiledFunction("test_annotation_processor_warn_and_error"); + fail(); + } catch (SQLException e) { + assertEquals(ErrorCode.SYNTAX_ERROR_1, e.getErrorCode()); + assertContains(e.getMessage(), "foo1"); + assertContains(e.getMessage(), "foo2"); + } finally { + System.clearProperty(TestAnnotationProcessor.MESSAGES_KEY); + } + } + + private void testSignal() throws SQLException { + deleteDb("functions"); + + Connection conn = getConnection("functions"); + Statement stat = conn.createStatement(); + + assertThrows(ErrorCode.INVALID_VALUE_2, stat).execute("call signal('00145', 'success class is invalid')"); + assertThrows(ErrorCode.INVALID_VALUE_2, stat).execute("call signal('foo', 'SQLSTATE has 5 chars')"); + assertThrows(ErrorCode.INVALID_VALUE_2, stat) + .execute("call signal('Ab123', 'SQLSTATE has only digits or upper-case letters')"); + try { + stat.execute("call signal('AB123', 'some custom error')"); + fail("Should have thrown"); + } catch (SQLException e) { + assertEquals("AB123", e.getSQLState()); + assertContains(e.getMessage(), "some custom error"); + } + + conn.close(); + } + + private void testThatCurrentTimestampIsSane() throws SQLException, + ParseException { + deleteDb("functions"); + + Date before = new Date(); - private void testGenerateSeries() throws SQLException { Connection conn = getConnection("functions"); + conn.setAutoCommit(false); Statement stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * from system_range(1,3)"); + + final String formatted; + final ResultSet rs = stat.executeQuery( + "select to_char(current_timestamp(9), 'YYYY MM DD HH24 MI SS FF3') from dual"); rs.next(); - assertEquals(1, rs.getInt(1)); + formatted = rs.getString(1); + rs.close(); + + Date after = new Date(); + + Date parsed = new SimpleDateFormat("y M d H m s S").parse(formatted); + + assertFalse(parsed.before(before)); + assertFalse(parsed.after(after)); + conn.close(); + } + + + private void testThatCurrentTimestampStaysTheSameWithinATransaction() + throws SQLException, InterruptedException { + deleteDb("functions"); + Connection conn = getConnection("functions"); + conn.setAutoCommit(false); + Statement stat = conn.createStatement(); + + Timestamp first; + ResultSet rs = stat.executeQuery("select CURRENT_TIMESTAMP from DUAL"); rs.next(); - assertEquals(2, rs.getInt(1)); + first = rs.getTimestamp(1); + rs.close(); + + Thread.sleep(1); + + Timestamp second; + rs = stat.executeQuery("select CURRENT_TIMESTAMP from DUAL"); rs.next(); - assertEquals(3, rs.getInt(1)); + second = rs.getTimestamp(1); + rs.close(); - rs = stat.executeQuery("select * from system_range(2,2)"); - assertTrue(rs.next()); - assertEquals(2, rs.getInt(1)); + assertEquals(first, second); + conn.close(); + } - rs = stat.executeQuery("select * from system_range(2,1)"); - assertFalse(rs.next()); + private void testThatCurrentTimestampUpdatesOutsideATransaction() + throws SQLException, InterruptedException { + if (config.lazy && config.networked) { + return; + } + deleteDb("functions"); + Connection conn = getConnection("functions"); + conn.setAutoCommit(true); + Statement stat = conn.createStatement(); - rs = stat.executeQuery("select * from system_range(1,2,-1)"); - assertFalse(rs.next()); + Timestamp first; + ResultSet rs = stat.executeQuery("select CURRENT_TIMESTAMP from DUAL"); + rs.next(); + first = rs.getTimestamp(1); + rs.close(); - assertThrows(ErrorCode.STEP_SIZE_MUST_NOT_BE_ZERO, stat).executeQuery( - "select * from system_range(1,2,0)"); + Thread.sleep(1); - rs = stat.executeQuery("select * from system_range(2,1,-1)"); - assertTrue(rs.next()); - assertEquals(2, rs.getInt(1)); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); + Timestamp second; + rs = stat.executeQuery("select CURRENT_TIMESTAMP from DUAL"); + rs.next(); + second = rs.getTimestamp(1); + rs.close(); - rs = stat.executeQuery("select * from system_range(1,5,2)"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - assertTrue(rs.next()); - assertEquals(3, rs.getInt(1)); - assertTrue(rs.next()); - assertEquals(5, rs.getInt(1)); + assertTrue(second.after(first)); + conn.close(); + } - rs = stat.executeQuery("select * from system_range(1,6,2)"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - assertTrue(rs.next()); - assertEquals(3, rs.getInt(1)); - assertTrue(rs.next()); - assertEquals(5, rs.getInt(1)); + private void testOverrideAlias() throws SQLException { + deleteDb("functions"); + Connection conn = getConnection("functions"); + conn.setAutoCommit(true); + Statement stat = conn.createStatement(); + + assertThrows(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, stat).execute("create alias CURRENT_TIMESTAMP for '" + + getClass().getName() + ".currentTimestamp'"); + + stat.execute("set BUILTIN_ALIAS_OVERRIDE true"); + + stat.execute("create alias CURRENT_TIMESTAMP for '" + getClass().getName() + ".currentTimestampOverride'"); + + assertCallResult("3141", stat, "CURRENT_TIMESTAMP"); conn.close(); } + private void callCompiledFunction(String functionName) throws SQLException { + deleteDb("functions"); + try (Connection conn = getConnection("functions")) { + Statement stat = conn.createStatement(); + ResultSet rs; + stat.execute("create alias " + functionName + " AS " + + "$$ boolean " + functionName + "() " + + "{ return true; } $$;"); + + PreparedStatement stmt = conn.prepareStatement( + "select " + functionName + "() from dual"); + rs = stmt.executeQuery(); + rs.next(); + assertEquals(Boolean.class.getName(), rs.getObject(1).getClass().getName()); + + stat.execute("drop alias " + functionName + ""); + } + } + private void assertCallResult(String expected, Statement stat, String sql) throws SQLException { ResultSet rs = stat.executeQuery("CALL " + sql); @@ -1788,8 +2116,8 @@ public static ResultSet selectMaxId(Connection conn) throws SQLException { * * @return the test array */ - public static Object[] getArray() { - return new Object[] { 0, "Hello" }; + public static String[] getArray() { + return new String[] { "0", "Hello" }; } /** @@ -1804,16 +2132,6 @@ public static ResultSet resultSetWithNull(Connection conn) throws SQLException { return statement.executeQuery(); } - /** - * This method is called via reflection from the database. - * - * @param conn the connection - * @return the result set - */ - public static ResultSet nullResultSet(Connection conn) { - return null; - } - /** * Test method to create a simple result set. * @@ -1843,10 +2161,10 @@ public static ResultSet simpleResultSet(Integer rowCount, int ip, sp != 1 || lp != 1 || byParam != 1) { throw new AssertionError("params not 1/true"); } - if (rowCount.intValue() >= 1) { + if (rowCount >= 1) { rs.addRow(0, "Hello"); } - if (rowCount.intValue() >= 2) { + if (rowCount >= 2) { rs.addRow(1, "World"); } } @@ -1971,12 +2289,21 @@ public static UUID xorUUID(UUID a, UUID b) { * @param args the argument list * @return an array of one element */ - public static Object[] dynamic(Object[] args) { + public static String[] dynamic(String[] args) { StringBuilder buff = new StringBuilder(); for (Object a : args) { buff.append(a); } - return new Object[] { buff.toString() }; + return new String[] { buff.toString() }; + } + + /** + * This method is called via reflection from the database. + * + * @return a fixed number + */ + public static long currentTimestampOverride() { + return 3141; } @Override @@ -1997,9 +2324,4 @@ public int getType(int[] inputTypes) { return Types.DECIMAL; } - @Override - public void init(Connection conn) { - // ignore - } - } diff --git a/h2/src/test/org/h2/test/db/TestGeneralCommonTableQueries.java b/h2/src/test/org/h2/test/db/TestGeneralCommonTableQueries.java new file mode 100644 index 0000000000..654da27f6e --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestGeneralCommonTableQueries.java @@ -0,0 +1,581 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import org.h2.test.TestAll; +import org.h2.test.TestBase; + +/** + * Test non-recursive queries using WITH, but more than one common table defined. + */ +public class TestGeneralCommonTableQueries extends AbstractBaseForCommonTableExpressions { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testSimpleSelect(); + testImpliedColumnNames(); + testChainedQuery(); + testParameterizedQuery(); + testNumberedParameterizedQuery(); + testColumnNames(); + + testInsert(); + testUpdate(); + testDelete(); + testMerge(); + testCreateTable(); + testNestedSQL(); + testSimple4RowRecursiveQuery(); + testSimple2By4RowRecursiveQuery(); + testSimple3RowRecursiveQueryWithLazyEval(); + testSimple3RowRecursiveQueryDropAllObjects(); + } + + private void testSimpleSelect() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + Statement stat; + PreparedStatement prep; + ResultSet rs; + + stat = conn.createStatement(); + final String simpleTwoColumnQuery = "with " + + "t1(n) as (select 1 as first) " + + ",t2(n) as (select 2 as first) " + + "select * from t1 union all select * from t2"; + rs = stat.executeQuery(simpleTwoColumnQuery); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + + prep = conn.prepareStatement(simpleTwoColumnQuery); + rs = prep.executeQuery(); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + + prep = conn.prepareStatement("with " + + "t1(n) as (select 2 as first) " + + ",t2(n) as (select 3 as first) " + + "select * from t1 union all select * from t2 where n<>?"); + + prep.setInt(1, 0); + rs = prep.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + assertFalse(rs.next()); + + prep = conn.prepareStatement("with " + + "t1(n) as (select 2 as first) " + + ",t2(n) as (select 3 as first) " + + ",t3(n) as (select 4 as first) " + + "select * from t1 union all select * from t2 union all select * from t3 where n<>?"); + + prep.setInt(1, 4); + rs = prep.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + assertFalse(rs.next()); + + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testImpliedColumnNames() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + PreparedStatement prep; + ResultSet rs; + + prep = conn.prepareStatement("with " + + "t1 as (select 2 as first_col) " + + ",t2 as (select first_col+1 from t1) " + + ",t3 as (select 4 as first_col) " + + "select * from t1 union all select * from t2 union all select * from t3 where first_col<>?"); + + prep.setInt(1, 4); + rs = prep.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(3, rs.getInt("FIRST_COL")); + assertFalse(rs.next()); + assertEquals(rs.getMetaData().getColumnCount(), 1); + assertEquals("FIRST_COL", rs.getMetaData().getColumnLabel(1)); + + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testChainedQuery() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + PreparedStatement prep; + ResultSet rs; + + prep = conn.prepareStatement( + " WITH t1 AS (" + + " SELECT 1 AS FIRST_COLUMN" + + ")," + + " t2 AS (" + + " SELECT FIRST_COLUMN+1 AS FIRST_COLUMN FROM t1 " + + ") " + + "SELECT sum(FIRST_COLUMN) FROM t2"); + + rs = prep.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testParameterizedQuery() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + PreparedStatement prep; + ResultSet rs; + + prep = conn.prepareStatement("WITH t1 AS (" + + " SELECT X, 'T1' FROM SYSTEM_RANGE(?,?)" + + ")," + + "t2 AS (" + + " SELECT X, 'T2' FROM SYSTEM_RANGE(?,?)" + + ") " + + "SELECT * FROM t1 UNION ALL SELECT * FROM t2 " + + "UNION ALL SELECT X, 'Q' FROM SYSTEM_RANGE(?,?)"); + prep.setInt(1, 1); + prep.setInt(2, 2); + prep.setInt(3, 3); + prep.setInt(4, 4); + prep.setInt(5, 5); + prep.setInt(6, 6); + rs = prep.executeQuery(); + + for (int n: new int[]{1, 2, 3, 4, 5, 6}) { + assertTrue(rs.next()); + assertEquals(n, rs.getInt(1)); + } + assertFalse(rs.next()); + + // call it twice + rs = prep.executeQuery(); + + for (int n: new int[]{1, 2, 3, 4, 5, 6}) { + assertTrue(rs.next()); + assertEquals(n, rs.getInt(1)); + } + assertFalse(rs.next()); + + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testNumberedParameterizedQuery() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + PreparedStatement prep; + ResultSet rs; + + conn.setAutoCommit(false); + + prep = conn.prepareStatement("WITH t1 AS (" + +" SELECT R.X, 'T1' FROM SYSTEM_RANGE(?1,?2) R" + +")," + +"t2 AS (" + +" SELECT R.X, 'T2' FROM SYSTEM_RANGE(?3,?4) R" + +") " + +"SELECT * FROM t1 UNION ALL SELECT * FROM t2 UNION ALL SELECT X, 'Q' FROM SYSTEM_RANGE(?5,?6)"); + prep.setInt(1, 1); + prep.setInt(2, 2); + prep.setInt(3, 3); + prep.setInt(4, 4); + prep.setInt(5, 5); + prep.setInt(6, 6); + rs = prep.executeQuery(); + + for (int n : new int[] { 1, 2, 3, 4, 5, 6 }) { + assertTrue(rs.next()); + assertEquals(n, rs.getInt(1)); + } + assertEquals("X", rs.getMetaData().getColumnLabel(1)); + assertEquals("'T1'", rs.getMetaData().getColumnLabel(2)); + + assertFalse(rs.next()); + + try { + prep = conn.prepareStatement("SELECT * FROM t1 UNION ALL SELECT * FROM t2 "+ + "UNION ALL SELECT X, 'Q' FROM SYSTEM_RANGE(5,6)"); + rs = prep.executeQuery(); + fail("Temp view T1 was accessible after previous WITH statement finished "+ + "- but should not have been."); + } catch (SQLException e) { + // ensure the T1 table has been removed even without auto commit + assertContains(e.getMessage(), "Table \"T1\" not found (this database is empty);"); + } + + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testInsert() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + Statement stat; + PreparedStatement prep; + ResultSet rs; + int rowCount; + + stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY, X INT NULL, Y VARCHAR(100) NULL )"); + + prep = conn.prepareStatement("WITH v1 AS (" + + " SELECT R.X, 'X1' AS Y FROM SYSTEM_RANGE(?1,?2) R" + + ")" + + "INSERT INTO T1 (X,Y) SELECT v1.X, v1.Y FROM v1"); + prep.setInt(1, 1); + prep.setInt(2, 2); + rowCount = prep.executeUpdate(); + + assertEquals(2, rowCount); + + rs = stat.executeQuery("SELECT ID, X,Y FROM T1"); + + for (int n : new int[]{1, 2}) { + assertTrue(rs.next()); + assertTrue(rs.getInt(1) != 0); + assertEquals(n, rs.getInt(2)); + assertEquals("X1", rs.getString(3)); + } + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testUpdate() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + Statement stat; + PreparedStatement prep; + ResultSet rs; + int rowCount; + + stat = conn.createStatement(); + stat.execute("CREATE TABLE IF NOT EXISTS T1 AS SELECT R.X AS ID, R.X, 'X1' AS Y FROM SYSTEM_RANGE(1,2) R"); + + prep = conn.prepareStatement("WITH v1 AS (" + +" SELECT R.X, 'X1' AS Y FROM SYSTEM_RANGE(?1,?2) R" + +")" + +"UPDATE T1 SET Y = 'Y1' WHERE X IN ( SELECT v1.X FROM v1 )"); + prep.setInt(1, 1); + prep.setInt(2, 2); + rowCount = prep.executeUpdate(); + + assertEquals(2, rowCount); + + rs = stat.executeQuery("SELECT ID, X,Y FROM T1"); + + for (int n : new int[] { 1, 2 }) { + assertTrue(rs.next()); + assertTrue(rs.getInt(1) != 0); + assertEquals(n, rs.getInt(2)); + assertEquals("Y1", rs.getString(3)); + } + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testDelete() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + Statement stat; + PreparedStatement prep; + ResultSet rs; + int rowCount; + + stat = conn.createStatement(); + stat.execute("CREATE TABLE IF NOT EXISTS T1 AS SELECT R.X AS ID, R.X, 'X1' AS Y FROM SYSTEM_RANGE(1,2) R"); + + prep = conn.prepareStatement("WITH v1 AS (" + +" SELECT R.X, 'X1' AS Y FROM SYSTEM_RANGE(1,2) R" + +")" + +"DELETE FROM T1 WHERE X IN ( SELECT v1.X FROM v1 )"); + rowCount = prep.executeUpdate(); + + assertEquals(2, rowCount); + + rs = stat.executeQuery("SELECT ID, X,Y FROM T1"); + + assertFalse(rs.next()); + + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testMerge() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + Statement stat; + PreparedStatement prep; + ResultSet rs; + int rowCount; + + stat = conn.createStatement(); + stat.execute("CREATE TABLE IF NOT EXISTS T1 AS SELECT R.X AS ID, R.X, 'X1' AS Y FROM SYSTEM_RANGE(1,2) R"); + + prep = conn.prepareStatement("WITH v1 AS (" + +" SELECT R.X, 'X1' AS Y FROM SYSTEM_RANGE(1,3) R" + +")" + +"MERGE INTO T1 KEY(ID) SELECT v1.X AS ID, v1.X, v1.Y FROM v1"); + rowCount = prep.executeUpdate(); + + assertEquals(3, rowCount); + + rs = stat.executeQuery("SELECT ID, X,Y FROM T1"); + + for (int n : new int[] { 1, 2, 3 }) { + assertTrue(rs.next()); + assertTrue(rs.getInt(1) != 0); + assertEquals(n, rs.getInt(2)); + assertEquals("X1", rs.getString(3)); + } + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testCreateTable() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + Statement stat; + PreparedStatement prep; + ResultSet rs; + boolean success; + + stat = conn.createStatement(); + prep = conn.prepareStatement("WITH v1 AS (" + +" SELECT R.X, 'X1' AS Y FROM SYSTEM_RANGE(1,3) R" + +")" + +"CREATE TABLE IF NOT EXISTS T1 AS SELECT v1.X AS ID, v1.X, v1.Y FROM v1"); + success = prep.execute(); + + assertEquals(false, success); + + rs = stat.executeQuery("SELECT ID, X,Y FROM T1"); + + for (int n : new int[] { 1, 2, 3 }) { + assertTrue(rs.next()); + assertTrue(rs.getInt(1) != 0); + assertEquals(n, rs.getInt(2)); + assertEquals("X1", rs.getString(3)); + } + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testNestedSQL() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + PreparedStatement prep; + ResultSet rs; + + prep = conn.prepareStatement( + "WITH T1 AS ( "+ + " SELECT * "+ + " FROM TABLE ( "+ + " K VARCHAR = ('a', 'b'), "+ + " V INTEGER = (1, 2) "+ + " ) "+ + "), "+ + " "+ + " "+ + "T2 AS ( "+ + " SELECT * "+ + " FROM TABLE ( "+ + " K VARCHAR = ('a', 'b'), "+ + " V INTEGER = (3, 4) "+ + " ) "+ + "), "+ + " "+ + " "+ + "JOIN_CTE AS ( "+ + " SELECT T1.* "+ + " "+ + " FROM "+ + " T1 "+ + " JOIN T2 ON ( "+ + " T1.K = T2.K "+ + " ) "+ + ") "+ + " "+ + "SELECT * FROM JOIN_CTE"); + + rs = prep.executeQuery(); + + for (String keyLetter : new String[] { "a", "b" }) { + assertTrue(rs.next()); + assertContains("ab", rs.getString(1)); + assertEquals(rs.getString(1), keyLetter); + assertTrue(rs.getInt(2) != 0); + } + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testColumnNames() throws Exception { + deleteDb("commonTableExpressionQueries"); + Connection conn = getConnection("commonTableExpressionQueries"); + PreparedStatement prep; + ResultSet rs; + + conn.setAutoCommit(false); + + prep = conn.prepareStatement("WITH t1 AS (" + +" SELECT 1 AS ONE, R.X AS TWO, 'T1' AS THREE, X FROM SYSTEM_RANGE(1,1) R" + +")" + +"SELECT * FROM t1"); + rs = prep.executeQuery(); + + for (int n : new int[] { 1 }) { + assertTrue(rs.next()); + assertEquals(n, rs.getInt(1)); + assertEquals(n, rs.getInt(4)); + } + assertEquals("ONE", rs.getMetaData().getColumnLabel(1)); + assertEquals("TWO", rs.getMetaData().getColumnLabel(2)); + assertEquals("THREE", rs.getMetaData().getColumnLabel(3)); + assertEquals("X", rs.getMetaData().getColumnLabel(4)); + + assertFalse(rs.next()); + + conn.close(); + deleteDb("commonTableExpressionQueries"); + } + + private void testSimple4RowRecursiveQuery() throws Exception { + + String[] expectedRowData = new String[]{"|1", "|2", "|3"}; + String[] expectedColumnTypes = new String[]{"INTEGER"}; + String[] expectedColumnNames = new String[]{"N"}; + + String setupSQL = "-- do nothing"; + String withQuery = "with recursive r(n) as (\n"+ + "(select 1) union all (select n+1 from r where n < 3)\n"+ + ")\n"+ + "select n from r"; + + int maxRetries = 3; + int expectedNumberOfRows = expectedRowData.length; + + testRepeatedQueryWithSetup(maxRetries, expectedRowData, expectedColumnNames, expectedNumberOfRows, setupSQL, + withQuery, maxRetries - 1, expectedColumnTypes, false); + + } + + private void testSimple2By4RowRecursiveQuery() throws Exception { + + String[] expectedRowData = new String[]{"|0|1|10", "|1|2|11", "|2|3|12", "|3|4|13"}; + String[] expectedColumnTypes = new String[]{"INTEGER", "INTEGER", "INTEGER"}; + String[] expectedColumnNames = new String[]{"K", "N", "N2"}; + + String setupSQL = "-- do nothing"; + String withQuery = "with \n"+ + "r1(n,k) as ((select 1, 0) union all (select n+1,k+1 from r1 where n <= 3)),"+ + "r2(n,k) as ((select 10,0) union all (select n+1,k+1 from r2 where n <= 13))"+ + "select r1.k, r1.n, r2.n AS n2 from r1 inner join r2 ON r1.k= r2.k "; + + int maxRetries = 3; + int expectedNumberOfRows = expectedRowData.length; + + testRepeatedQueryWithSetup(maxRetries, expectedRowData, expectedColumnNames, expectedNumberOfRows, setupSQL, + withQuery, maxRetries - 1, expectedColumnTypes, false); + + } + + private void testSimple3RowRecursiveQueryWithLazyEval() throws Exception { + if (config.lazy && config.networked) { + return; + } + + String[] expectedRowData = new String[]{"|6"}; + String[] expectedColumnTypes = new String[]{"BIGINT"}; + String[] expectedColumnNames = new String[]{"SUM(N)"}; + + // back up the config - to restore it after this test + TestAll backupConfig = config; + config = new TestAll(); + + try { + // Test with settings: lazy mvStore memory multiThreaded + // connection url is + // mem:script;MV_STORE=true;LOG=1;LOCK_TIMEOUT=50; + // LAZY_QUERY_EXECUTION=1 + config.lazy = true; + config.memory = true; + + String setupSQL = "--no config set"; + String withQuery = "select sum(n) from (\n" + +" with recursive r(n) as (\n" + +" (select 1) union all (select n+1 from r where n < 3) \n" + +" )\n" + +" select n from r \n" + +")\n"; + + int maxRetries = 10; + int expectedNumberOfRows = expectedRowData.length; + + testRepeatedQueryWithSetup(maxRetries, expectedRowData, expectedColumnNames, expectedNumberOfRows, + setupSQL, withQuery, maxRetries - 1, expectedColumnTypes, false); + } finally { + config = backupConfig; + } + } + + private void testSimple3RowRecursiveQueryDropAllObjects() throws Exception { + + String[] expectedRowData = new String[]{"|6"}; + String[] expectedColumnTypes = new String[]{"BIGINT"}; + String[] expectedColumnNames = new String[]{"SUM(N)"}; + + String setupSQL = "DROP ALL OBJECTS;"; + String withQuery = "select sum(n) from (" + +" with recursive r(n) as (" + +" (select 1) union all (select n+1 from r where n < 3)" + +" )," + +" dummyUnusedCte(n) as (" + +" select 1 " + +" )" + +" select n from r" + +")"; + + int maxRetries = 10; + int expectedNumberOfRows = expectedRowData.length; + + testRepeatedQueryWithSetup(maxRetries, expectedRowData, expectedColumnNames, expectedNumberOfRows, setupSQL, + withQuery, maxRetries - 1, expectedColumnTypes, false); + } +} diff --git a/h2/src/test/org/h2/test/db/TestIgnoreCatalogs.java b/h2/src/test/org/h2/test/db/TestIgnoreCatalogs.java new file mode 100644 index 0000000000..7e0712016a --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestIgnoreCatalogs.java @@ -0,0 +1,240 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; + +import org.h2.api.ErrorCode; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * @author aschoerk + */ +public class TestIgnoreCatalogs extends TestDb { + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + canCommentOn(); + canUseDefaultSchema(); + canYetIdentifyWrongCatalogName(); + canUseSettingInUrl(); + canUseSetterSyntax(); + canCatalogNameEqualSchemaName(); + canUseCatalogAtIndexName(); + canCommentOn(); + canAllCombined(); + doesNotAcceptEmptySchemaWhenNotMSSQL(); + } + + private void doesNotAcceptEmptySchemaWhenNotMSSQL() throws SQLException { + try (Connection conn = getConnection("ignoreCatalogs;IGNORE_CATALOGS=TRUE")) { + try (Statement stat = conn.createStatement()) { + prepareDbAndSetDefaultSchema(stat); + stat.execute("set schema dbo"); + stat.execute("create table catalog1.dbo.test(id int primary key, name varchar(255))"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on table catalog1..test is 'table comment3'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "create table catalog1..test2(id int primary key, " + + "name varchar(255))"); + stat.execute("comment on table catalog1.dbo.test is 'table comment1'"); + stat.execute("insert into test values(1, 'Hello')"); + stat.execute("insert into cat.dbo.test values(2, 'Hello2')"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column catalog1...test.id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column catalog1..test..id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column ..test..id is 'id comment1'"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canCommentOn() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;IGNORE_CATALOGS=TRUE;")) { + try (Statement stat = conn.createStatement()) { + prepareDbAndSetDefaultSchema(stat); + stat.execute("create table catalog1.dbo.test(id int primary key, name varchar(255))"); + stat.execute("comment on table catalog1.dbo.test is 'table comment1'"); + stat.execute("comment on table dbo.test is 'table comment2'"); + stat.execute("comment on table catalog1..test is 'table comment3'"); + stat.execute("comment on table test is 'table comment4'"); + stat.execute("comment on column catalog1..test.id is 'id comment1'"); + stat.execute("comment on column catalog1.dbo.test.id is 'id comment1'"); + stat.execute("comment on column dbo.test.id is 'id comment1'"); + stat.execute("comment on column test.id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column catalog1...id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column catalog1...test.id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column catalog1..test..id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column ..test..id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column test..id is 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column .PUBLIC.TEST.ID 'id comment1'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, stat, "comment on column .TEST.ID 'id comment1'"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canUseDefaultSchema() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;IGNORE_CATALOGS=TRUE;")) { + try (Statement stat = conn.createStatement()) { + prepareDbAndSetDefaultSchema(stat); + stat.execute("create table catalog1..test(id int primary key, name varchar(255))"); + + stat.execute("create table test2(id int primary key, name varchar(255))"); + // expect table already exists + assertThrows(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, stat, + "create table catalog2.dbo.test(id int primary key, name varchar(255))"); + stat.execute("insert into test values(1, 'Hello')"); + stat.execute("insert into test2 values(1, 'Hello')"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canUseSettingInUrl() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;IGNORE_CATALOGS=TRUE;")) { + try (Statement stat = conn.createStatement()) { + prepareDb(stat); + stat.execute("create table catalog1.dbo.test(id int primary key, name varchar(255))"); + // expect table already exists + assertThrows(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, stat, + "create table catalog2.dbo.test(id int primary key, name varchar(255))"); + stat.execute("insert into dbo.test values(1, 'Hello')"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + + } + + private void canUseSetterSyntax() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;")) { + try (Statement stat = conn.createStatement()) { + prepareDb(stat); + stat.execute("set IGNORE_CATALOGS=TRUE"); + stat.execute("create table catalog1.dbo.test(id int primary key, name varchar(255))"); + // expect table already exists + assertThrows(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, stat, + "create table catalog2.dbo.test(id int primary key, name varchar(255))"); + stat.execute("insert into dbo.test values(1, 'Hello')"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canCatalogNameEqualSchemaName() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;")) { + try (Statement stat = conn.createStatement()) { + prepareDb(stat); + stat.execute("set IGNORE_CATALOGS=TRUE"); + stat.execute("create table dbo.dbo.test(id int primary key, name varchar(255))"); + // expect object already exists + assertThrows(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, stat, + "create table catalog2.dbo.test(id int primary key, name varchar(255))"); + stat.execute("insert into dbo.test values(1, 'Hello')"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canYetIdentifyWrongCatalogName() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;")) { + try (Statement stat = conn.createStatement()) { + prepareDb(stat); + // works, since catalog name equals database name + stat.execute("create table ignoreCatalogs.dbo.test(id int primary key, name varchar(255))"); + // schema test_x not found error + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "create table test_x.dbo.test(id int primary key, name varchar(255))"); + assertThrows(ErrorCode.DATABASE_NOT_FOUND_1, stat, "comment on column db..test.id is 'id'"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canUseCatalogAtIndexName() throws Exception { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;")) { + try (Statement stat = conn.createStatement()) { + prepareDb(stat); + stat.execute("set IGNORE_CATALOGS=TRUE"); + stat.execute("create table dbo.dbo.test(id int primary key, name varchar(255))"); + stat.execute("create index i on dbo.dbo.test(id,name)"); + stat.execute("create index dbo.i2 on dbo.dbo.test(id,name)"); + stat.execute("create index catalog.dbo.i3 on dbo.dbo.test(id,name)"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "create index dboNotExistent.i4 on dbo.dbo.test(id,name)"); + // expect object already exists + stat.execute("insert into dbo.test values(1, 'Hello')"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private void canAllCombined() throws SQLException { + try (Connection conn = getConnection("ignoreCatalogs;MODE=MSSQLSERVER;IGNORE_CATALOGS=TRUE;")) { + try (Statement stat = conn.createStatement()) { + prepareDbAndSetDefaultSchema(stat); + stat.execute("create table dbo.test(id int primary key, name varchar(255))"); + stat.execute("create table catalog1.dbo.test2(id int primary key, name varchar(255))"); + stat.execute("insert into dbo.test values(1, 'Hello')"); + stat.execute("insert into dbo.test2 values(1, 'Hello2')"); + stat.execute("set ignore_catalogs=false"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "insert into catalog1.dbo.test2 values(2, 'Hello2')"); + stat.execute("set ignore_catalogs=true"); + assertResult("1", stat, "select * from test"); + assertResult("1", stat, "select * from test2"); + stat.execute("alter table xxx.dbo.test add column (a varchar(200))"); + stat.execute("alter table xxx..test add column (b varchar(200))"); + stat.execute("alter table test add column (c varchar(200))"); + stat.execute("drop table xxx.dbo.test"); + stat.execute("drop table catalog1.dbo.test2"); + stat.execute("drop table if exists xxx.dbo.test"); + stat.execute("drop table if exists catalog1.dbo.test2"); + stat.execute("set ignore_catalogs=false"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "alter table xxx.dbo.test add column (a varchar(200))"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "alter table xxx..test add column (b varchar(200))"); + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat, + "alter table test add column (c varchar(200))"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "drop table if exists xxx.dbo.test"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, stat, + "drop table if exists xxx2..test"); + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat, "drop table test"); + } + } finally { + deleteDb("ignoreCatalogs"); + } + } + + private static void prepareDb(Statement stat) throws SQLException { + stat.execute("drop all objects"); + stat.execute("create schema dbo"); + } + + private static void prepareDbAndSetDefaultSchema(Statement stat) throws SQLException { + prepareDb(stat); + stat.execute("set schema dbo"); + } + +} diff --git a/h2/src/test/org/h2/test/db/TestIndex.java b/h2/src/test/org/h2/test/db/TestIndex.java index d2b595738f..1b2fa807d0 100644 --- a/h2/src/test/org/h2/test/db/TestIndex.java +++ b/h2/src/test/org/h2/test/db/TestIndex.java @@ -1,27 +1,34 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.sql.Connection; +import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Types; import java.util.HashMap; +import java.util.HashSet; import java.util.Random; - +import java.util.concurrent.atomic.AtomicInteger; import org.h2.api.ErrorCode; -import org.h2.result.SortOrder; +import org.h2.command.query.Select; import org.h2.test.TestBase; -import org.h2.util.New; +import org.h2.test.TestDb; +import org.h2.tools.SimpleResultSet; +import org.h2.value.ValueInteger; /** * Index tests. */ -public class TestIndex extends TestBase { +public class TestIndex extends TestDb { + + private static int testFunctionIndexCounter; private Connection conn; private Statement stat; @@ -33,16 +40,24 @@ public class TestIndex extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws SQLException { deleteDb("index"); + testOrderIndex(); testIndexTypes(); testHashIndexOnMemoryTable(); testErrorMessage(); testDuplicateKeyException(); + int to = config.lockTimeout; + config.lockTimeout = 50000; + try { + testConcurrentUpdate(); + } finally { + config.lockTimeout = to; + } testNonUniqueHashIndex(); testRenamePrimaryKey(); testRandomized(); @@ -78,18 +93,37 @@ public void test() throws SQLException { testLargeIndex(); testMultiColumnIndex(); // long time; - // time = System.currentTimeMillis(); + // time = System.nanoTime(); testHashIndex(true, false); testHashIndex(false, false); - // System.out.println("b-tree="+(System.currentTimeMillis()-time)); - // time = System.currentTimeMillis(); testHashIndex(true, true); testHashIndex(false, true); - // System.out.println("hash="+(System.currentTimeMillis()-time)); testMultiColumnHashIndex(); + testFunctionIndex(); + + conn.close(); + deleteDb("index"); + + // This test uses own connection + testEnumIndex(); + } + + private void testOrderIndex() throws SQLException { + Connection conn = getConnection("index"); + stat = conn.createStatement(); + stat.execute("create table test(id int, name varchar)"); + stat.execute("insert into test values (2, 'a'), (1, 'a')"); + stat.execute("create index on test(name)"); + ResultSet rs = stat.executeQuery( + "select id from test where name = 'a' order by id"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); conn.close(); deleteDb("index"); } @@ -130,10 +164,10 @@ private void testErrorMessage() throws SQLException { stat.execute("create table test(id int, name int primary key)"); testErrorMessage("PRIMARY", "KEY", " ON PUBLIC.TEST(NAME)"); stat.execute("create table test(id int, name int, unique(name))"); - testErrorMessage("CONSTRAINT_INDEX_2 ON PUBLIC.TEST(NAME)"); + testErrorMessage("CONSTRAINT_INDEX_2 ON PUBLIC.TEST(NAME NULLS FIRST)"); stat.execute("create table test(id int, name int, " + "constraint abc unique(name, id))"); - testErrorMessage("ABC_INDEX_2 ON PUBLIC.TEST(NAME, ID)"); + testErrorMessage("ABC_INDEX_2 ON PUBLIC.TEST(NAME NULLS FIRST, ID NULLS FIRST)"); } private void testErrorMessage(String... expected) throws SQLException { @@ -143,10 +177,10 @@ private void testErrorMessage(String... expected) throws SQLException { fail(); } catch (SQLException e) { String m = e.getMessage(); - int start = m.indexOf('\"'), end = m.indexOf('\"', start + 1); + int start = m.indexOf('"'), end = m.lastIndexOf('"'); String s = m.substring(start + 1, end); for (String t : expected) { - assertTrue(t + " not in " + s, s.contains(t)); + assertContains(s, t); } } stat.execute("drop table test"); @@ -166,12 +200,110 @@ private void testDuplicateKeyException() throws SQLException { // The format of the VALUES clause varies a little depending on the // type of the index, so just test that we're getting useful info // back. - assertContains(m, "IDX_TEST_NAME ON PUBLIC.TEST(NAME)"); + assertContains(m, "IDX_TEST_NAME ON PUBLIC.TEST(NAME NULLS FIRST)"); assertContains(m, "'Hello'"); } stat.execute("drop table test"); } + private static class ConcurrentUpdateThread extends Thread { + private final AtomicInteger concurrentUpdateId, concurrentUpdateValue; + + private final PreparedStatement psInsert, psDelete; + + boolean haveDuplicateKeyException; + + ConcurrentUpdateThread(Connection c, AtomicInteger concurrentUpdateId, + AtomicInteger concurrentUpdateValue) throws SQLException { + this.concurrentUpdateId = concurrentUpdateId; + this.concurrentUpdateValue = concurrentUpdateValue; + psInsert = c.prepareStatement("insert into test(id, v) values (?, ?)"); + psDelete = c.prepareStatement("delete from test where v = ?"); + } + + @Override + public void run() { + for (int i = 0; i < 10000; i++) { + try { + if (Math.random() > 0.05) { + psInsert.setInt(1, concurrentUpdateId.incrementAndGet()); + psInsert.setInt(2, concurrentUpdateValue.get()); + psInsert.executeUpdate(); + } else { + psDelete.setInt(1, concurrentUpdateValue.get()); + psDelete.executeUpdate(); + } + } catch (SQLException ex) { + switch (ex.getErrorCode()) { + case 23505: + haveDuplicateKeyException = true; + break; + case 90131: + // Unlikely but possible + break; + default: + ex.printStackTrace(); + } + } + if (Math.random() > 0.95) + concurrentUpdateValue.incrementAndGet(); + } + } + } + + private void testConcurrentUpdate() throws SQLException { + Connection c = getConnection("index"); + Statement stat = c.createStatement(); + stat.execute("create table test(id int primary key, v int)"); + stat.execute("create unique index idx_value_name on test(v)"); + PreparedStatement check = c.prepareStatement("select v from test"); + ConcurrentUpdateThread[] threads = new ConcurrentUpdateThread[4]; + AtomicInteger concurrentUpdateId = new AtomicInteger(), concurrentUpdateValue = new AtomicInteger(); + + // The same connection + for (int i = 0; i < threads.length; i++) { + threads[i] = new ConcurrentUpdateThread(c, concurrentUpdateId, concurrentUpdateValue); + } + testConcurrentUpdateRun(threads, check); + // Different connections + Connection[] connections = new Connection[threads.length]; + for (int i = 0; i < threads.length; i++) { + Connection c2 = getConnection("index"); + connections[i] = c2; + threads[i] = new ConcurrentUpdateThread(c2, concurrentUpdateId, concurrentUpdateValue); + } + testConcurrentUpdateRun(threads, check); + for (Connection c2 : connections) { + c2.close(); + } + stat.execute("drop table test"); + c.close(); + } + + private void testConcurrentUpdateRun(ConcurrentUpdateThread[] threads, PreparedStatement check) + throws SQLException { + for (ConcurrentUpdateThread t : threads) { + t.start(); + } + boolean haveDuplicateKeyException = false; + for (ConcurrentUpdateThread t : threads) { + try { + t.join(); + haveDuplicateKeyException |= t.haveDuplicateKeyException; + } catch (InterruptedException e) { + } + } + assertTrue("haveDuplicateKeys", haveDuplicateKeyException); + HashSet set = new HashSet<>(); + try (ResultSet rs = check.executeQuery()) { + while (rs.next()) { + if (!set.add(rs.getInt(1))) { + fail("unique index violation"); + } + } + } + } + private void testNonUniqueHashIndex() throws SQLException { reconnect(); stat.execute("create memory table test(id bigint, data bigint)"); @@ -183,7 +315,7 @@ private void testNonUniqueHashIndex() throws SQLException { "delete from test where id=?"); PreparedStatement prepSelect = conn.prepareStatement( "select count(*) from test where id=?"); - HashMap map = New.hashMap(); + HashMap map = new HashMap<>(); for (int i = 0; i < 1000; i++) { long key = rand.nextInt(10) * 1000000000L; Integer r = map.get(key); @@ -237,7 +369,7 @@ private void testRandomized() throws SQLException { Random rand = new Random(1); reconnect(); stat.execute("drop all objects"); - stat.execute("CREATE TABLE TEST(ID identity)"); + stat.execute("CREATE TABLE TEST(ID identity default on null)"); int len = getSize(100, 1000); for (int i = 0; i < len; i++) { switch (rand.nextInt(4)) { @@ -279,7 +411,7 @@ private void testHashIndex() throws SQLException { for (int i = 0; i < len; i++) { int x = rand.nextInt(len); String sql = ""; - switch(rand.nextInt(3)) { + switch (rand.nextInt(3)) { case 0: sql = "delete from testA where id = " + x; break; @@ -328,7 +460,6 @@ private void testDescIndex() throws SQLException { rs = conn.getMetaData().getIndexInfo(null, null, "TEST", false, false); rs.next(); assertEquals("D", rs.getString("ASC_OR_DESC")); - assertEquals(SortOrder.DESCENDING, rs.getInt("SORT_TYPE")); stat.execute("INSERT INTO TEST SELECT X FROM SYSTEM_RANGE(1, 30)"); rs = stat.executeQuery( "SELECT COUNT(*) FROM TEST WHERE ID BETWEEN 10 AND 20"); @@ -338,7 +469,6 @@ private void testDescIndex() throws SQLException { rs = conn.getMetaData().getIndexInfo(null, null, "TEST", false, false); rs.next(); assertEquals("D", rs.getString("ASC_OR_DESC")); - assertEquals(SortOrder.DESCENDING, rs.getInt("SORT_TYPE")); rs = stat.executeQuery( "SELECT COUNT(*) FROM TEST WHERE ID BETWEEN 10 AND 20"); rs.next(); @@ -408,8 +538,7 @@ private void testConstraint() throws SQLException { stat.execute("CREATE TABLE CHILD(ID INT PRIMARY KEY, " + "PID INT, FOREIGN KEY(PID) REFERENCES PARENT(ID))"); reconnect(); - stat.execute("DROP TABLE PARENT"); - stat.execute("DROP TABLE CHILD"); + stat.execute("DROP TABLE PARENT, CHILD"); } private void testLargeIndex() throws SQLException { @@ -588,4 +717,67 @@ private void log(String sql) throws SQLException { trace("---done---"); } + /** + * This method is called from the database. + * + * @return the result set + */ + public static ResultSet testFunctionIndexFunction() { + // There are additional callers like JdbcConnection.prepareCommand() and + // CommandContainer.recompileIfRequired() + for (StackTraceElement element : Thread.currentThread().getStackTrace()) { + if (element.getClassName().startsWith(Select.class.getName())) { + testFunctionIndexCounter++; + break; + } + } + SimpleResultSet rs = new SimpleResultSet(); + rs.addColumn("ID", Types.INTEGER, ValueInteger.PRECISION, 0); + rs.addColumn("VALUE", Types.INTEGER, ValueInteger.PRECISION, 0); + rs.addRow(1, 10); + rs.addRow(2, 20); + rs.addRow(3, 30); + return rs; + } + + private void testFunctionIndex() throws SQLException { + testFunctionIndexCounter = 0; + stat.execute("CREATE ALIAS TEST_INDEX FOR '" + TestIndex.class.getName() + ".testFunctionIndexFunction'"); + try (ResultSet rs = stat.executeQuery("SELECT * FROM TEST_INDEX() WHERE ID = 1 OR ID = 3")) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertEquals(10, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + assertEquals(30, rs.getInt(2)); + assertFalse(rs.next()); + } finally { + stat.execute("DROP ALIAS TEST_INDEX"); + } + assertEquals(1, testFunctionIndexCounter); + } + + private void testEnumIndex() throws SQLException { + if (config.memory || config.networked) { + return; + } + deleteDb("index"); + String url = "jdbc:h2:" + getBaseDir() + "/index;DB_CLOSE_DELAY=0"; + Connection conn = DriverManager.getConnection(url); + Statement stat = conn.createStatement(); + + stat.execute("CREATE TABLE TEST(ID INT, V ENUM('A', 'B'), CONSTRAINT PK PRIMARY KEY(ID, V))"); + stat.execute("INSERT INTO TEST VALUES (1, 'A'), (2, 'B')"); + + conn.close(); + conn = DriverManager.getConnection(url); + stat = conn.createStatement(); + + stat.execute("DELETE FROM TEST WHERE V = 'A'"); + stat.execute("DROP TABLE TEST"); + + conn.close(); + deleteDb("index"); + } + } diff --git a/h2/src/test/org/h2/test/db/TestIndexHints.java b/h2/src/test/org/h2/test/db/TestIndexHints.java new file mode 100644 index 0000000000..a992869d9d --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestIndexHints.java @@ -0,0 +1,136 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import org.h2.api.ErrorCode; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Tests the index hints feature of this database. + */ +public class TestIndexHints extends TestDb { + + private Connection conn; + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + deleteDb("indexhints"); + createDb(); + testQuotedIdentifier(); + testWithSingleIndexName(); + testWithEmptyIndexHintsList(); + testWithInvalidIndexName(); + testWithMultipleIndexNames(); + testPlanSqlHasIndexesInCorrectOrder(); + testWithTableAlias(); + testWithTableAliasCalledUse(); + conn.close(); + deleteDb("indexhints"); + } + + private void createDb() throws SQLException { + conn = getConnection("indexhints"); + Statement stat = conn.createStatement(); + stat.execute("create table test (x int, y int)"); + stat.execute("create index idx1 on test (x)"); + stat.execute("create index idx2 on test (x, y)"); + stat.execute("create index \"Idx3\" on test (y, x)"); + } + + private void testQuotedIdentifier() throws SQLException { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("explain analyze select * " + + "from test use index(\"Idx3\") where x=1 and y=1"); + assertTrue(rs.next()); + String plan = rs.getString(1); + rs.close(); + assertTrue(plan.contains("/* PUBLIC.Idx3:")); + assertTrue(plan.contains("USE INDEX (\"Idx3\")")); + rs = stat.executeQuery("EXPLAIN ANALYZE " + plan); + assertTrue(rs.next()); + plan = rs.getString(1); + assertTrue(plan.contains("/* PUBLIC.Idx3:")); + assertTrue(plan.contains("USE INDEX (\"Idx3\")")); + } + + private void testWithSingleIndexName() throws SQLException { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("explain analyze select * " + + "from test use index(idx1) where x=1 and y=1"); + rs.next(); + String result = rs.getString(1); + assertTrue(result.contains("/* PUBLIC.IDX1:")); + } + + private void testWithTableAlias() throws SQLException { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("explain analyze select * " + + "from test t use index(idx2) where x=1 and y=1"); + rs.next(); + String result = rs.getString(1); + assertTrue(result.contains("/* PUBLIC.IDX2:")); + } + + private void testWithTableAliasCalledUse() throws SQLException { + // make sure that while adding new syntax for table hints, code + // that uses "USE" as a table alias still works + Statement stat = conn.createStatement(); + stat.executeQuery("explain analyze select * " + + "from test use where use.x=1 and use.y=1"); + } + + private void testWithMultipleIndexNames() throws SQLException { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("explain analyze select * " + + "from test use index(idx1, idx2) where x=1 and y=1"); + rs.next(); + String result = rs.getString(1); + assertTrue(result.contains("/* PUBLIC.IDX2:")); + } + + private void testPlanSqlHasIndexesInCorrectOrder() throws SQLException { + ResultSet rs = conn.createStatement().executeQuery("explain analyze select * " + + "from test use index(idx1, idx2) where x=1 and y=1"); + rs.next(); + assertTrue(rs.getString(1).contains("USE INDEX (\"IDX1\", \"IDX2\")")); + + ResultSet rs2 = conn.createStatement().executeQuery("explain analyze select * " + + "from test use index(idx2, idx1) where x=1 and y=1"); + rs2.next(); + assertTrue(rs2.getString(1).contains("USE INDEX (\"IDX2\", \"IDX1\")")); + } + + private void testWithEmptyIndexHintsList() throws SQLException { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("explain analyze select * " + + "from test use index () where x=1 and y=1"); + rs.next(); + String result = rs.getString(1); + assertTrue(result.contains("/* PUBLIC.TEST.tableScan")); + } + + private void testWithInvalidIndexName() throws SQLException { + Statement stat = conn.createStatement(); + assertThrows(ErrorCode.INDEX_NOT_FOUND_1, stat).executeQuery("explain analyze select * " + + "from test use index(idx_doesnt_exist) where x=1 and y=1"); + } + +} diff --git a/h2/src/test/org/h2/test/db/TestLIRSMemoryConsumption.java b/h2/src/test/org/h2/test/db/TestLIRSMemoryConsumption.java new file mode 100644 index 0000000000..55d27c26c0 --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestLIRSMemoryConsumption.java @@ -0,0 +1,103 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.util.Random; +import org.h2.mvstore.cache.CacheLongKeyLIRS; +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.Utils; + +/** + * Class TestLIRSMemoryConsumption. + *
      + *
    • 8/5/18 10:57 PM initial creation + *
    + * + * @author Andrei Tokar + */ +public class TestLIRSMemoryConsumption extends TestDb { + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() { + testMemoryConsumption(); + System.out.println("-----------------------"); + testMemoryConsumption(); + System.out.println("-----------------------"); + testMemoryConsumption(); + } + + private static void testMemoryConsumption() { + int size = 1_000_000; + Random rng = new Random(); + CacheLongKeyLIRS.Config config = new CacheLongKeyLIRS.Config(); + for (int mb = 1; mb <= 16; mb *= 2) { + config.maxMemory = mb * 1024 * 1024; + CacheLongKeyLIRS cache = new CacheLongKeyLIRS<>(config); + long memoryUsedInitial = Utils.getMemoryUsed(); + for (int i = 0; i < size; i++) { + cache.put(i, createValue(i), getValueSize(i)); + } + for (int i = 0; i < size; i++) { + int key; + int mode = rng.nextInt(4); + switch(mode) { + default: + case 0: + key = rng.nextInt(10); + break; + case 1: + key = rng.nextInt(100); + break; + case 2: + key = rng.nextInt(10_000); + break; + case 3: + key = rng.nextInt(1_000_000); + break; + } + Object val = cache.get(key); + if (val == null) { + cache.put(key, createValue(key), getValueSize(key)); + } + } + Utils.collectGarbage(); + cache.trimNonResidentQueue(); + long memoryUsed = Utils.getMemoryUsed(); + + int sizeHot = cache.sizeHot(); + int sizeResident = cache.size(); + int sizeNonResident = cache.sizeNonResident(); + long hits = cache.getHits(); + long misses = cache.getMisses(); + System.out.println(mb + " | " + + (memoryUsed - memoryUsedInitial + 512) / 1024 + " | " + + (sizeResident+sizeNonResident) + " | " + + sizeHot + " | " + (sizeResident - sizeHot) + " | " + sizeNonResident + + " | " + (hits * 100 / (hits + misses)) ); + } + } + + private static Object createValue(long key) { +// return new Object(); + return new byte[2540]; + } + + private static int getValueSize(long key) { +// return 16; + return 2560; + } +} diff --git a/h2/src/test/org/h2/test/db/TestLargeBlob.java b/h2/src/test/org/h2/test/db/TestLargeBlob.java index 3b2b60ad12..56a94cd740 100644 --- a/h2/src/test/org/h2/test/db/TestLargeBlob.java +++ b/h2/src/test/org/h2/test/db/TestLargeBlob.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -11,11 +11,12 @@ import java.sql.ResultSet; import java.sql.Statement; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test a BLOB larger than Integer.MAX_VALUE */ -public class TestLargeBlob extends TestBase { +public class TestLargeBlob extends TestDb { /** * Run just this test. @@ -23,21 +24,24 @@ public class TestLargeBlob extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws Exception { - if (!config.big || config.memory || config.mvcc || config.networked) { - return; + public boolean isEnabled() { + if (!config.big || config.memory || config.networked) { + return false; } + return true; + } + @Override + public void test() throws Exception { deleteDb("largeBlob"); String url = getURL("largeBlob;TRACE_LEVEL_FILE=0", true); Connection conn = getConnection(url); final long testLength = Integer.MAX_VALUE + 110L; Statement stat = conn.createStatement(); - stat.execute("set COMPRESS_LOB LZF"); stat.execute("create table test(x blob)"); PreparedStatement prep = conn.prepareStatement( "insert into test values(?)"); diff --git a/h2/src/test/org/h2/test/db/TestLinkedTable.java b/h2/src/test/org/h2/test/db/TestLinkedTable.java index 0f0fd84f8f..d33f137c67 100644 --- a/h2/src/test/org/h2/test/db/TestLinkedTable.java +++ b/h2/src/test/org/h2/test/db/TestLinkedTable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -14,15 +14,15 @@ import java.sql.SQLException; import java.sql.Statement; import java.sql.Timestamp; - import org.h2.api.ErrorCode; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the linked table feature (CREATE LINKED TABLE). */ -public class TestLinkedTable extends TestBase { +public class TestLinkedTable extends TestDb { /** * Run just this test. @@ -30,8 +30,7 @@ public class TestLinkedTable extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - // System.setProperty("h2.storeLocalTime", "true"); - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -52,7 +51,9 @@ public void test() throws SQLException { testLinkTwoTables(); testCachingResults(); testLinkedTableInReadOnlyDb(); - + testGeometry(); + testFetchSize(); + testFetchSizeWithAutoCommit(); deleteDb("linkedTable"); } @@ -137,7 +138,7 @@ private void testHiddenSQL() throws SQLException { "(null, 'jdbc:h2:mem:', 'sa', 'pwd', 'DUAL2')"); fail(); } catch (SQLException e) { - assertTrue(e.toString().contains("pwd")); + assertContains(e.toString(), "pwd"); } try { conn.createStatement().execute("create linked table test" + @@ -237,7 +238,7 @@ private void testMultipleSchemas() throws SQLException { assertSingleValue(sb, "SELECT * FROM T2", 2); sa.execute("DROP ALL OBJECTS"); sb.execute("DROP ALL OBJECTS"); - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, sa). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, sa). execute("SELECT * FROM TEST"); ca.close(); cb.close(); @@ -288,9 +289,9 @@ private static void testLinkOtherSchema() throws SQLException { sa.execute("CREATE TABLE GOOD (X NUMBER)"); sa.execute("CREATE SCHEMA S"); sa.execute("CREATE TABLE S.BAD (X NUMBER)"); - sb.execute("CALL LINK_SCHEMA('G', '', " + + sb.execute("SELECT * FROM LINK_SCHEMA('G', '', " + "'jdbc:h2:mem:one', 'sa', 'sa', 'PUBLIC'); "); - sb.execute("CALL LINK_SCHEMA('B', '', " + + sb.execute("SELECT * FROM LINK_SCHEMA('B', '', " + "'jdbc:h2:mem:one', 'sa', 'sa', 'S'); "); // OK sb.executeQuery("SELECT * FROM G.GOOD"); @@ -428,7 +429,7 @@ private void testLinkSchema() throws SQLException { Connection conn2 = DriverManager.getConnection(url2, "sa2", "def def"); Statement stat2 = conn2.createStatement(); - String link = "CALL LINK_SCHEMA('LINKED', '', '" + url1 + + String link = "SELECT * FROM LINK_SCHEMA('LINKED', '', '" + url1 + "', 'sa1', 'abc abc', 'PUBLIC')"; stat2.execute(link); stat2.executeQuery("SELECT * FROM LINKED.TEST1"); @@ -459,7 +460,7 @@ private void testLinkTable() throws SQLException { stat.execute("CREATE TEMP TABLE TEST_TEMP(ID INT PRIMARY KEY)"); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, " + "NAME VARCHAR(200), XT TINYINT, XD DECIMAL(10,2), " + - "XTS TIMESTAMP, XBY BINARY(255), XBO BIT, XSM SMALLINT, " + + "XTS TIMESTAMP, XBY VARBINARY(255), XBO BIT, XSM SMALLINT, " + "XBI BIGINT, XBL BLOB, XDA DATE, XTI TIME, XCL CLOB, XDO DOUBLE)"); stat.execute("CREATE INDEX IDXNAME ON TEST(NAME)"); stat.execute("INSERT INTO TEST VALUES(0, NULL, NULL, NULL, NULL, " + @@ -495,7 +496,7 @@ private void testLinkTable() throws SQLException { testRow(stat, "LINK_TEST"); ResultSet rs = stat.executeQuery("SELECT * FROM LINK_TEST"); ResultSetMetaData meta = rs.getMetaData(); - assertEquals(10, meta.getPrecision(1)); + assertEquals(32, meta.getPrecision(1)); assertEquals(200, meta.getPrecision(2)); conn.close(); @@ -525,7 +526,7 @@ private void testLinkTable() throws SQLException { rs = stat.executeQuery("SELECT * FROM " + "INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='LINK_TEST'"); rs.next(); - assertEquals("TABLE LINK", rs.getString("TABLE_TYPE")); + assertEquals("TABLE LINK", rs.getString("STORAGE_TYPE")); rs.next(); rs = stat.executeQuery("SELECT * FROM LINK_TEST WHERE ID=0"); @@ -576,7 +577,7 @@ private void testRow(Statement stat, String name) throws SQLException { assertTrue(rs.getBoolean("XBO")); assertEquals(3000, rs.getShort("XSM")); assertEquals(1234567890123456789L, rs.getLong("XBI")); - assertEquals("1122aa", rs.getString("XBL")); + assertEquals(new byte[] {0x11, 0x22, (byte) 0xAA }, rs.getBytes("XBL")); assertEquals("0002-01-01", rs.getString("XDA")); assertEquals("00:00:00", rs.getString("XTI")); assertEquals("J\u00fcrg", rs.getString("XCL")); @@ -693,4 +694,84 @@ private void testLinkedTableInReadOnlyDb() throws SQLException { deleteDb("testLinkedTableInReadOnlyDb"); } + private void testGeometry() throws SQLException { + if (config.memory) { + return; + } + org.h2.Driver.load(); + Connection ca = DriverManager.getConnection("jdbc:h2:mem:one", "sa", "sa"); + Connection cb = DriverManager.getConnection("jdbc:h2:mem:two", "sa", "sa"); + Statement sa = ca.createStatement(); + Statement sb = cb.createStatement(); + sa.execute("CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY," + + " THE_GEOM GEOMETRY, THE_GEOM_2 GEOMETRY(POINT, 4326))"); + sa.execute("INSERT INTO TEST(THE_GEOM, THE_GEOM_2) VALUES" + + " (GEOMETRY 'POINT (1 1)', GEOMETRY 'SRID=4326;POINT(2 2)')"); + String sql = "CREATE LINKED TABLE T(NULL, " + + "'jdbc:h2:mem:one', 'sa', 'sa', 'TEST') READONLY"; + sb.execute(sql); + try (ResultSet rs = sb.executeQuery("SELECT * FROM T")) { + assertTrue(rs.next()); + assertEquals("POINT (1 1)", rs.getString("THE_GEOM")); + assertEquals("SRID=4326;POINT (2 2)", rs.getString("THE_GEOM_2")); + } + sb.execute("DROP TABLE T"); + ca.close(); + cb.close(); + } + + private void testFetchSize() throws SQLException { + if (config.memory) { + return; + } + org.h2.Driver.load(); + Connection ca = DriverManager.getConnection("jdbc:h2:mem:one", "sa", "sa"); + Connection cb = DriverManager.getConnection("jdbc:h2:mem:two", "sa", "sa"); + Statement sa = ca.createStatement(); + Statement sb = cb.createStatement(); + sa.execute("DROP TABLE IF EXISTS TEST; " + + "CREATE TABLE TEST as select * from SYSTEM_RANGE(1,1000) as n;"); + String sql = "CREATE LINKED TABLE T(NULL, " + + "'jdbc:h2:mem:one', 'sa', 'sa', 'TEST') FETCH_SIZE 10"; + sb.execute(sql); + try (ResultSet rs = sb.executeQuery("SELECT count(*) FROM T")) { + assertTrue(rs.next()); + assertEquals(1000, rs.getInt(1)); + } + ResultSet res = sb.executeQuery("CALL DB_OBJECT_SQL('TABLE', 'PUBLIC', 'T')"); + res.next(); + assertEquals("CREATE FORCE LINKED TABLE \"PUBLIC\".\"T\"(NULL, 'jdbc:h2:mem:one', 'sa', 'sa', 'TEST')" + + " FETCH_SIZE 10 /*--hide--*/", res.getString(1)); + sb.execute("DROP TABLE T"); + ca.close(); + cb.close(); + } + + private void testFetchSizeWithAutoCommit() throws SQLException { + if (config.memory) { + return; + } + org.h2.Driver.load(); + Connection ca = DriverManager.getConnection("jdbc:h2:mem:one", "sa", "sa"); + Connection cb = DriverManager.getConnection("jdbc:h2:mem:two", "sa", "sa"); + Statement sa = ca.createStatement(); + Statement sb = cb.createStatement(); + sa.execute("DROP TABLE IF EXISTS TEST; " + + "CREATE TABLE TEST as select * from SYSTEM_RANGE(1,1000) as n;"); + String sql = "CREATE LINKED TABLE T(NULL, " + + "'jdbc:h2:mem:one', 'sa', 'sa', 'TEST') FETCH_SIZE 10 AUTOCOMMIT OFF"; + sb.execute(sql); + try (ResultSet rs = sb.executeQuery("SELECT count(*) FROM T")) { + assertTrue(rs.next()); + assertEquals(1000, rs.getInt(1)); + } + ResultSet res = sb.executeQuery("CALL DB_OBJECT_SQL('TABLE', 'PUBLIC', 'T')"); + res.next(); + assertEquals("CREATE FORCE LINKED TABLE \"PUBLIC\".\"T\"(NULL, 'jdbc:h2:mem:one', 'sa', 'sa', 'TEST')" + + " FETCH_SIZE 10 AUTOCOMMIT OFF /*--hide--*/", res.getString(1)); + sb.execute("DROP TABLE T"); + ca.close(); + cb.close(); + } + } diff --git a/h2/src/test/org/h2/test/db/TestListener.java b/h2/src/test/org/h2/test/db/TestListener.java index 788060e7ec..5e042743f9 100644 --- a/h2/src/test/org/h2/test/db/TestListener.java +++ b/h2/src/test/org/h2/test/db/TestListener.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -10,22 +10,23 @@ import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Statement; +import java.util.concurrent.TimeUnit; import org.h2.api.DatabaseEventListener; import org.h2.test.TestBase; -import org.h2.util.JdbcUtils; +import org.h2.test.TestDb; /** * Tests the DatabaseEventListener. */ -public class TestListener extends TestBase implements DatabaseEventListener { +public class TestListener extends TestDb implements DatabaseEventListener { private long last; private int lastState = -1; private String databaseUrl; public TestListener() { - start = last = System.currentTimeMillis(); + start = last = System.nanoTime(); } /** @@ -34,14 +35,19 @@ public TestListener() { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { + public boolean isEnabled() { if (config.networked || config.cipher != null) { - return; + return false; } + return true; + } + + @Override + public void test() throws SQLException { deleteDb("listener"); Connection conn; conn = getConnection("listener"); @@ -68,9 +74,9 @@ public void exceptionThrown(SQLException e, String sql) { } @Override - public void setProgress(int state, String name, int current, int max) { - long time = System.currentTimeMillis(); - if (state == lastState && time < last + 1000) { + public void setProgress(int state, String name, long current, long max) { + long time = System.nanoTime(); + if (state == lastState && time < last + TimeUnit.SECONDS.toNanos(1)) { return; } if (state == STATE_STATEMENT_START || @@ -104,7 +110,7 @@ public void setProgress(int state, String name, int current, int max) { // ignore } printTime("state: " + stateName + " " + - (100 * current / max) + " " + (time - start)); + (100 * current / max) + " " + TimeUnit.NANOSECONDS.toMillis(time - start)); } @Override @@ -112,15 +118,13 @@ public void closingDatabase() { if (databaseUrl.toUpperCase().contains("CIPHER")) { return; } - Connection conn = null; - try { - conn = DriverManager.getConnection(databaseUrl, getUser(), getPassword()); + + try (Connection conn = DriverManager.getConnection(databaseUrl, + getUser(), getPassword())) { conn.createStatement().execute("DROP TABLE TEST2"); conn.close(); } catch (SQLException e) { e.printStackTrace(); - } finally { - JdbcUtils.closeSilently(conn); } } @@ -134,15 +138,13 @@ public void opened() { if (databaseUrl.toUpperCase().contains("CIPHER")) { return; } - Connection conn = null; - try { - conn = DriverManager.getConnection(databaseUrl, getUser(), getPassword()); + + try (Connection conn = DriverManager.getConnection(databaseUrl, + getUser(), getPassword())) { conn.createStatement().execute("CREATE TABLE IF NOT EXISTS TEST2(ID INT)"); conn.close(); } catch (SQLException e) { e.printStackTrace(); - } finally { - JdbcUtils.closeSilently(conn); } } diff --git a/h2/src/test/org/h2/test/db/TestLob.java b/h2/src/test/org/h2/test/db/TestLob.java index 4dbd938fdb..45203921a2 100644 --- a/h2/src/test/org/h2/test/db/TestLob.java +++ b/h2/src/test/org/h2/test/db/TestLob.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -13,6 +13,7 @@ import java.io.OutputStream; import java.io.Reader; import java.io.StringReader; +import java.nio.charset.StandardCharsets; import java.sql.Blob; import java.sql.Clob; import java.sql.Connection; @@ -22,24 +23,34 @@ import java.sql.SQLException; import java.sql.Savepoint; import java.sql.Statement; -import java.util.List; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.h2.api.ErrorCode; +import org.h2.engine.Constants; import org.h2.engine.SysProperties; import org.h2.jdbc.JdbcConnection; import org.h2.message.DbException; +import org.h2.store.FileLister; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.tools.Recover; +import org.h2.tools.SimpleResultSet; import org.h2.util.IOUtils; import org.h2.util.JdbcUtils; -import org.h2.util.StringUtils; import org.h2.util.Task; +import org.h2.value.ValueBlob; +import org.h2.value.ValueClob; +import org.h2.value.ValueLob; /** * Tests LOB and CLOB data types. */ -public class TestLob extends TestBase { +public class TestLob extends TestDb { private static final String MORE_THAN_128_CHARS = "12345678901234567890123456789012345678901234567890" + @@ -54,15 +65,16 @@ public class TestLob extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.big = true; - test.test(); + test.testFromMain(); } @Override public void test() throws Exception { + testReclamationOnInDoubtRollback(); + testRemoveAfterDeleteAndClose(); testRemovedAfterTimeout(); testConcurrentRemoveRead(); testCloseLobTwice(); - testCleaningUpLobsOnRollback(); testClobWithRandomUnicodeChars(); testCommitOnExclusiveConnection(); testReadManyLobs(); @@ -72,7 +84,6 @@ public void test() throws Exception { testBlobInputStreamSeek(true); testBlobInputStreamSeek(false); testDeadlock(); - testDeadlock2(); testCopyManyLobs(); testCopyLob(); testConcurrentCreate(); @@ -80,15 +91,13 @@ public void test() throws Exception { testUniqueIndex(); testConvert(); testCreateAsSelect(); - testDelete(); - testTempFilesDeleted(true); - testTempFilesDeleted(false); testLobServerMemory(); testUpdatingLobRow(); + testBufferedInputStreamBug(); if (config.memory) { return; } - testLobCleanupSessionTemporaries(); + testLargeClob(); testLobUpdateMany(); testLobVariable(); testLobDrop(); @@ -98,10 +107,7 @@ public void test() throws Exception { testLobRollbackStop(); testLobCopy(); testLobHibernate(); - testLobCopy(false); - testLobCopy(true); - testLobCompression(false); - testLobCompression(true); + testLobCopy2(); testManyLobs(); testClob(); testUpdateLob(); @@ -109,13 +115,119 @@ public void test() throws Exception { testLob(false); testLob(true); testJavaObject(); + testLobInValueResultSet(); + testLimits(); deleteDb("lob"); - FileUtils.deleteRecursive(TEMP_DIR, true); + } + + private void testReclamationOnInDoubtRollback() throws Exception { + if (config.memory || config.cipher != null) { + return; + } + deleteDb("lob"); + try (Connection conn = getConnection("lob")) { + try (Statement st = conn.createStatement()) { + st.executeUpdate("CREATE TABLE IF NOT EXISTS dataTable(" + + "dataStamp BIGINT PRIMARY KEY, " + + "data BLOB)"); + } + + conn.setAutoCommit(false); + Random rnd = new Random(0); + try (PreparedStatement pstmt = conn.prepareStatement("INSERT INTO dataTable VALUES(?, ?)")) { + for (int i = 0; i < 100; ++i) { + int numBytes = 1024 * 1024; + byte[] data = new byte[numBytes]; + rnd.nextBytes(data); + pstmt.setLong(1, i); + pstmt.setBytes(2, data); + pstmt.executeUpdate(); + } + } + try (Statement st = conn.createStatement()) { + st.executeUpdate("PREPARE COMMIT lobtx"); + st.execute("SHUTDOWN IMMEDIATELY"); + } + } + + try (Connection conn = getConnection("lob")) { + try (Statement st = conn.createStatement(); + ResultSet rs = st.executeQuery("SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT")) { + assertTrue("No in-doubt tx", rs.first()); + assertEquals("LOBTX", rs.getString("TRANSACTION_NAME")); + assertFalse("more than one in-doubt tx", rs.next()); + st.executeUpdate("ROLLBACK TRANSACTION lobtx; CHECKPOINT SYNC"); + } + } + + try (Connection conn = getConnection("lob")) { + try (Statement st = conn.createStatement()) { + st.execute("SHUTDOWN COMPACT"); + } + } + + ArrayList dbFiles = FileLister.getDatabaseFiles(getBaseDir(), "lob", false); + assertEquals(1, dbFiles.size()); + File file = new File(dbFiles.get(0)); + assertTrue(file.exists()); + long fileSize = file.length(); + assertTrue("File size=" + fileSize, fileSize < 13000); + } + + private void testRemoveAfterDeleteAndClose() throws Exception { + if (config.memory || config.cipher != null) { + return; + } + deleteDb("lob"); + Connection conn = getConnection("lob"); + Statement stat = conn.createStatement(); + stat.execute("create table test(id int primary key, data clob)"); + for (int i = 0; i < 10; i++) { + stat.execute("insert into test values(1, space(100000))"); + if (i > 5) { + ResultSet rs = stat.executeQuery("select * from test"); + rs.next(); + Clob c = rs.getClob(2); + stat.execute("delete from test where id = 1"); + c.getSubString(1, 10); + } else { + stat.execute("delete from test where id = 1"); + } + } + // some clobs are removed only here (those that were queries for) + conn.close(); + Recover.execute(getBaseDir(), "lob"); + long size = FileUtils.size(getBaseDir() + "/lob.h2.sql"); + assertTrue("size: " + size, size > 1000 && size < 10000); + } + + private void testLargeClob() throws Exception { + deleteDb("lob"); + Connection conn; + conn = reconnect(null); + conn.createStatement().execute( + "CREATE TABLE TEST(ID IDENTITY, C CLOB)"); + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO TEST(C) VALUES(?)"); + int len = SysProperties.LOB_CLIENT_MAX_SIZE_MEMORY + 1; + prep.setCharacterStream(1, getRandomReader(len, 2), -1); + prep.execute(); + conn = reconnect(conn); + ResultSet rs = conn.createStatement().executeQuery( + "SELECT * FROM TEST ORDER BY ID"); + rs.next(); + assertEqualReaders(getRandomReader(len, 2), + rs.getCharacterStream("C"), -1); + assertFalse(rs.next()); + conn.close(); } private void testRemovedAfterTimeout() throws Exception { + if (config.lazy) { + return; + } deleteDb("lob"); - final String url = getURL("lob;lob_timeout=50", true); + final String url = getURL("lob;lob_timeout=200", true); Connection conn = getConnection(url); Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, data clob)"); @@ -137,19 +249,17 @@ private void testRemovedAfterTimeout() throws Exception { stat.execute("delete from test"); c1.getSubString(1, 3); // wait until it times out - Thread.sleep(100); + Thread.sleep(250); // start a new transaction, to be sure stat.execute("delete from test"); - try { - c1.getSubString(1, 3); - fail(); - } catch (SQLException e) { - // expected - } + assertThrows(SQLException.class, c1).getSubString(1, 3); conn.close(); } private void testConcurrentRemoveRead() throws Exception { + if (config.lazy) { + return; + } deleteDb("lob"); final String url = getURL("lob", true); Connection conn = getConnection(url); @@ -182,28 +292,6 @@ private void testCloseLobTwice() throws SQLException { conn.close(); } - private void testCleaningUpLobsOnRollback() throws Exception { - if (config.mvStore) { - return; - } - deleteDb("lob"); - Connection conn = getConnection("lob"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE test(id int, data CLOB)"); - conn.setAutoCommit(false); - stat.executeUpdate("insert into test values (1, '" + - MORE_THAN_128_CHARS + "')"); - conn.rollback(); - ResultSet rs = stat.executeQuery("select count(*) from test"); - rs.next(); - assertEquals(0, rs.getInt(1)); - rs = stat.executeQuery("select * from information_schema.lobs"); - rs = stat.executeQuery("select count(*) from information_schema.lob_data"); - rs.next(); - assertEquals(0, rs.getInt(1)); - conn.close(); - } - private void testReadManyLobs() throws Exception { deleteDb("lob"); Connection conn; @@ -211,7 +299,7 @@ private void testReadManyLobs() throws Exception { Statement stat = conn.createStatement(); stat.execute("create table test(id identity, data clob)"); PreparedStatement prep = conn.prepareStatement( - "insert into test values(null, ?)"); + "insert into test(data) values ?"); byte[] data = new byte[256]; Random r = new Random(1); for (int i = 0; i < 1000; i++) { @@ -310,17 +398,6 @@ private void testBlobInputStreamSeek(boolean upgraded) throws Exception { prep.setBinaryStream(2, new ByteArrayInputStream(buff), -1); prep.execute(); } - if (upgraded) { - if (!config.mvStore) { - if (config.memory) { - stat.execute("update information_schema.lob_map set pos=null"); - } else { - stat.execute("alter table information_schema.lob_map drop column pos"); - conn.close(); - conn = getConnection("lob"); - } - } - } prep = conn.prepareStatement("select * from test where id = ?"); for (int i = 0; i < 1; i++) { random.setSeed(i); @@ -375,125 +452,20 @@ public void call() throws Exception { conn2.close(); } - /** - * A background task. - */ - private final class Deadlock2Task1 extends Task { - - public final Connection conn; - - Deadlock2Task1() throws SQLException { - this.conn = getDeadlock2Connection(); - } - - @Override - public void call() throws Exception { - Random random = new Random(); - Statement stat = conn.createStatement(); - char[] tmp = new char[1024]; - while (!stop) { - try { - ResultSet rs = stat.executeQuery( - "select name from test where id = " + random.nextInt(999)); - if (rs.next()) { - Reader r = rs.getClob("name").getCharacterStream(); - while (r.read(tmp) >= 0) { - // ignore - } - r.close(); - } - rs.close(); - } catch (SQLException ex) { - // ignore "LOB gone away", this can happen - // in the presence of concurrent updates - if (ex.getErrorCode() != ErrorCode.IO_EXCEPTION_2) { - throw ex; - } - } catch (IOException ex) { - // ignore "LOB gone away", this can happen - // in the presence of concurrent updates - Exception e = ex; - if (e.getCause() instanceof DbException) { - e = (Exception) e.getCause(); - } - if (!(e.getCause() instanceof SQLException)) { - throw ex; - } - SQLException e2 = (SQLException) e.getCause(); - if (e2.getErrorCode() != ErrorCode.IO_EXCEPTION_1) { - throw ex; - } - } catch (Exception e) { - e.printStackTrace(System.out); - throw e; - } - } - } - - } - - /** - * A background task. - */ - private final class Deadlock2Task2 extends Task { - - public final Connection conn; - - Deadlock2Task2() throws SQLException { - this.conn = getDeadlock2Connection(); - } - - @Override - public void call() throws Exception { - Random random = new Random(); - Statement stat = conn.createStatement(); - while (!stop) { - stat.execute("update test set counter = " + - random.nextInt(10) + " where id = " + random.nextInt(1000)); - } - } - - } - - private void testDeadlock2() throws Exception { - if (config.mvcc || config.memory) { - return; - } - deleteDb("lob"); - Connection conn = getDeadlock2Connection(); - Statement stat = conn.createStatement(); - stat.execute("create cached table test(id int not null identity, " + - "name clob, counter int)"); - stat.execute("insert into test(id, name) select x, space(100000) " + - "from system_range(1, 100)"); - Deadlock2Task1 task1 = new Deadlock2Task1(); - Deadlock2Task2 task2 = new Deadlock2Task2(); - task1.execute("task1"); - task2.execute("task2"); - for (int i = 0; i < 100; i++) { - stat.execute("insert into test values(null, space(10000 + " + i + "), 1)"); - } - task1.get(); - task1.conn.close(); - task2.get(); - task2.conn.close(); - conn.close(); - } - Connection getDeadlock2Connection() throws SQLException { - return getConnection("lob;MULTI_THREADED=TRUE;LOCK_TIMEOUT=60000"); + return getConnection("lob;LOCK_TIMEOUT=60000"); } private void testCopyManyLobs() throws Exception { deleteDb("lob"); Connection conn = getConnection("lob"); Statement stat = conn.createStatement(); - stat.execute("create table test(id identity, data clob) " + - "as select 1, space(10000)"); - stat.execute("insert into test(id, data) select null, data from test"); - stat.execute("insert into test(id, data) select null, data from test"); - stat.execute("insert into test(id, data) select null, data from test"); - stat.execute("insert into test(id, data) select null, data from test"); + stat.execute("create table test(id identity default on null, data clob) " + + "as select null, space(10000)"); + stat.execute("insert into test(data) select data from test"); + stat.execute("insert into test(data) select data from test"); + stat.execute("insert into test(data) select data from test"); + stat.execute("insert into test(data) select data from test"); stat.execute("delete from test where id < 10"); stat.execute("shutdown compact"); conn.close(); @@ -586,12 +558,7 @@ private void testUniqueIndex() throws Exception { Statement stat; conn = getConnection("lob"); stat = conn.createStatement(); - try { - stat.execute("create memory table test(x clob unique)"); - fail(); - } catch (SQLException e) { - assertEquals(ErrorCode.FEATURE_NOT_SUPPORTED_1, e.getErrorCode()); - } + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, stat).execute("create memory table test(x clob unique)"); conn.close(); } @@ -628,82 +595,6 @@ private void testCreateAsSelect() throws Exception { conn.close(); } - private void testDelete() throws Exception { - if (config.memory || config.mvStore) { - return; - } - deleteDb("lob"); - Connection conn; - Statement stat; - conn = getConnection("lob"); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key, name clob)"); - stat.execute("insert into test values(1, space(10000))"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("insert into test values(2, space(10000))"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("delete from test where id = 1"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("insert into test values(3, space(10000))"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("insert into test values(4, space(10000))"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("delete from test where id = 2"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("delete from test where id = 3"); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 1); - stat.execute("delete from test"); - conn.close(); - conn = getConnection("lob"); - stat = conn.createStatement(); - assertSingleValue(stat, - "select count(*) from information_schema.lob_data", 0); - stat.execute("drop table test"); - conn.close(); - } - - private void testTempFilesDeleted(boolean stream) throws Exception { - FileUtils.deleteRecursive(TEMP_DIR, true); - FileUtils.createDirectories(TEMP_DIR); - List list = FileUtils.newDirectoryStream(TEMP_DIR); - assertEquals("Unexpected temp file: " + list, 0, list.size()); - deleteDb("lob"); - Connection conn = getConnection("lob"); - Statement stat; - stat = conn.createStatement(); - stat.execute("create table test(id int primary key, name text)"); - PreparedStatement prep = conn.prepareStatement( - "insert into test values(2, ?)"); - if (stream) { - String large = new String(new char[1024 * 1024 * 2]).replace((char) 0, 'x'); - prep.setCharacterStream(1, new StringReader(large), -1); - large = null; - prep.execute(); - } else { - stat.execute("insert into test values(1, space(100000))"); - } - /* - list = FileUtils.newDirectoryStream(TEMP_DIR); - assertEquals("Unexpected temp file: " + list, 0, list.size()); - */ - ResultSet rs; - rs = stat.executeQuery("select * from test"); - while (rs.next()) { - rs.getCharacterStream("name").close(); - } - prep.close(); - conn.close(); - list = FileUtils.newDirectoryStream(TEMP_DIR); - assertEquals("Unexpected temp file: " + list, 0, list.size()); - } - private void testLobUpdateMany() throws SQLException { deleteDb("lob"); Connection conn = getConnection("lob"); @@ -716,35 +607,6 @@ private void testLobUpdateMany() throws SQLException { conn.close(); } - private void testLobCleanupSessionTemporaries() throws SQLException { - if (config.mvStore) { - return; - } - deleteDb("lob"); - Connection conn = getConnection("lob"); - Statement stat = conn.createStatement(); - stat.execute("create table test(data clob)"); - - ResultSet rs = stat.executeQuery("select count(*) " + - "from INFORMATION_SCHEMA.LOBS"); - assertTrue(rs.next()); - assertEquals(0, rs.getInt(1)); - rs.close(); - - PreparedStatement prep = conn.prepareStatement( - "INSERT INTO test(data) VALUES(?)"); - String name = new String(new char[200]).replace((char) 0, 'x'); - prep.setString(1, name); - prep.execute(); - prep.close(); - - rs = stat.executeQuery("select count(*) from INFORMATION_SCHEMA.LOBS"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - rs.close(); - conn.close(); - } - private void testLobServerMemory() throws SQLException { deleteDb("lob"); Connection conn = getConnection("lob"); @@ -1053,39 +915,46 @@ private void testLobHibernate() throws Exception { prep2.getQueryTimeout(); prep2.close(); conn0.getAutoCommit(); - Reader r = clob0.getCharacterStream(); + Reader r; + int ch; + r = clob0.getCharacterStream(); for (int i = 0; i < 10000; i++) { - int ch = r.read(); + ch = r.read(); + if (ch != ('0' + (i % 10))) { + fail("expected " + (char) ('0' + (i % 10)) + + " got: " + ch + " (" + (char) ch + ")"); + } + } + ch = r.read(); + if (ch != -1) { + fail("expected -1 got: " + ch); + } + r.close(); + r = clob0.getCharacterStream(1235, 1000); + for (int i = 1234; i < 2234; i++) { + ch = r.read(); if (ch != ('0' + (i % 10))) { fail("expected " + (char) ('0' + (i % 10)) + " got: " + ch + " (" + (char) ch + ")"); } } - int ch = r.read(); + ch = r.read(); if (ch != -1) { fail("expected -1 got: " + ch); } + r.close(); + assertThrows(ErrorCode.INVALID_VALUE_2, clob0).getCharacterStream(10001, 1); + assertThrows(ErrorCode.INVALID_VALUE_2, clob0).getCharacterStream(10002, 0); conn0.close(); } - private void testLobCopy(boolean compress) throws SQLException { + private void testLobCopy2() throws SQLException { deleteDb("lob"); Connection conn; conn = reconnect(null); Statement stat = conn.createStatement(); - if (compress) { - stat.execute("SET COMPRESS_LOB LZF"); - } else { - stat.execute("SET COMPRESS_LOB NO"); - } conn = reconnect(conn); stat = conn.createStatement(); - ResultSet rs; - rs = stat.executeQuery("select value from information_schema.settings " + - "where NAME='COMPRESS_LOB'"); - rs.next(); - assertEquals(compress ? "LZF" : "NO", rs.getString(1)); - assertFalse(rs.next()); stat.execute("create table test(text clob)"); stat.execute("create table test2(text clob)"); StringBuilder buff = new StringBuilder(); @@ -1095,7 +964,7 @@ private void testLobCopy(boolean compress) throws SQLException { String spaces = buff.toString(); stat.execute("insert into test values('" + spaces + "')"); stat.execute("insert into test2 select * from test"); - rs = stat.executeQuery("select * from test2"); + ResultSet rs = stat.executeQuery("select * from test2"); rs.next(); assertEquals(spaces, rs.getString(1)); stat.execute("drop table test"); @@ -1109,55 +978,6 @@ private void testLobCopy(boolean compress) throws SQLException { conn.close(); } - private void testLobCompression(boolean compress) throws Exception { - deleteDb("lob"); - Connection conn; - conn = reconnect(null); - if (compress) { - conn.createStatement().execute("SET COMPRESS_LOB LZF"); - } else { - conn.createStatement().execute("SET COMPRESS_LOB NO"); - } - conn.createStatement().execute("CREATE TABLE TEST(ID INT PRIMARY KEY, C CLOB)"); - PreparedStatement prep = conn.prepareStatement( - "INSERT INTO TEST VALUES(?, ?)"); - long time = System.currentTimeMillis(); - int len = getSize(10, 40); - if (config.networked && config.big) { - len = 5; - } - StringBuilder buff = new StringBuilder(); - for (int i = 0; i < 1000; i++) { - buff.append(StringUtils.xmlNode("content", null, "This is a test " + i)); - } - String xml = buff.toString(); - for (int i = 0; i < len; i++) { - prep.setInt(1, i); - prep.setString(2, xml + i); - prep.execute(); - } - for (int i = 0; i < len; i++) { - ResultSet rs = conn.createStatement().executeQuery( - "SELECT * FROM TEST"); - while (rs.next()) { - if (i == 0) { - assertEquals(xml + rs.getInt(1), rs.getString(2)); - } else { - Reader r = rs.getCharacterStream(2); - String result = IOUtils.readStringAndClose(r, -1); - assertEquals(xml + rs.getInt(1), result); - } - } - } - time = System.currentTimeMillis() - time; - trace("time: " + time + " compress: " + compress); - conn.close(); - if (!config.memory) { - long length = new File(getBaseDir() + "/lob.h2.db").length(); - trace("len: " + length + " compress: " + compress); - } - } - private void testManyLobs() throws Exception { deleteDb("lob"); Connection conn; @@ -1273,12 +1093,12 @@ private void testClob() throws Exception { } private Connection reconnect(Connection conn) throws SQLException { - long time = System.currentTimeMillis(); + long time = System.nanoTime(); if (conn != null) { JdbcUtils.closeSilently(conn); } conn = getConnection("lob"); - trace("re-connect=" + (System.currentTimeMillis() - time)); + trace("re-connect=" + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); return conn; } @@ -1338,7 +1158,7 @@ private void testLobReconnect() throws Exception { PreparedStatement prep; prep = conn.prepareStatement("INSERT INTO TEST VALUES(1, ?)"); String s = new String(getRandomChars(10000, 1)); - byte[] data = s.getBytes("UTF-8"); + byte[] data = s.getBytes(StandardCharsets.UTF_8); // if we keep the string, debugging with Eclipse is not possible // because Eclipse wants to display the large string and fails s = ""; @@ -1376,7 +1196,7 @@ private void testLob(boolean clob) throws Exception { PreparedStatement prep; ResultSet rs; long time; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE " + + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V " + (clob ? "CLOB" : "BLOB") + ")"); int len = getSize(1, 1000); @@ -1384,7 +1204,7 @@ private void testLob(boolean clob) throws Exception { len = 100; } - time = System.currentTimeMillis(); + time = System.nanoTime(); prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?)"); for (int i = 0; i < len; i += i + i + 1) { prep.setInt(1, i); @@ -1396,12 +1216,12 @@ private void testLob(boolean clob) throws Exception { } prep.execute(); } - trace("insert=" + (System.currentTimeMillis() - time)); + trace("insert=" + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); traceMemory(); conn = reconnect(conn); - time = System.currentTimeMillis(); - prep = conn.prepareStatement("SELECT ID, VALUE FROM TEST"); + time = System.nanoTime(); + prep = conn.prepareStatement("SELECT ID, V FROM TEST"); rs = prep.executeQuery(); while (rs.next()) { int id = rs.getInt("ID"); @@ -1426,18 +1246,18 @@ private void testLob(boolean clob) throws Exception { (InputStream) obj, -1); } } - trace("select=" + (System.currentTimeMillis() - time)); + trace("select=" + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); traceMemory(); conn = reconnect(conn); - time = System.currentTimeMillis(); + time = System.nanoTime(); prep = conn.prepareStatement("DELETE FROM TEST WHERE ID=?"); for (int i = 0; i < len; i++) { prep.setInt(1, i); prep.executeUpdate(); } - trace("delete=" + (System.currentTimeMillis() - time)); + trace("delete=" + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); traceMemory(); conn = reconnect(conn); @@ -1482,17 +1302,31 @@ private void testJavaObject() throws SQLException { assertFalse(rs.next()); conn.createStatement().execute("drop table test"); - stat.execute("create table test(value other)"); + stat.execute("create table test(v other)"); prep = conn.prepareStatement("insert into test values(?)"); - prep.setObject(1, JdbcUtils.serialize("", conn.getSession().getDataHandler())); + prep.setObject(1, JdbcUtils.serialize("", conn.getJavaObjectSerializer())); prep.execute(); - rs = stat.executeQuery("select value from test"); + rs = stat.executeQuery("select v from test"); while (rs.next()) { - assertEquals("", (String) rs.getObject("value")); + assertEquals("", (String) rs.getObject("v")); } conn.close(); } + /** + * Test a bug where the usage of BufferedInputStream in LobStorageMap was + * causing a deadlock. + */ + private void testBufferedInputStreamBug() throws SQLException { + deleteDb("lob"); + JdbcConnection conn = (JdbcConnection) getConnection("lob"); + conn.createStatement().execute("CREATE TABLE TEST(test BLOB)"); + PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST(test) VALUES(?)"); + ps.setBlob(1, new ByteArrayInputStream(new byte[257])); + ps.executeUpdate(); + conn.close(); + } + private static Reader getRandomReader(int len, int seed) { return new CharArrayReader(getRandomChars(len, seed)); } @@ -1576,7 +1410,7 @@ private void testClobWithRandomUnicodeChars() throws Exception { stat.execute("CREATE TABLE logs" + "(id int primary key auto_increment, message CLOB)"); PreparedStatement s1 = conn.prepareStatement( - "INSERT INTO logs (id, message) VALUES(null, ?)"); + "INSERT INTO logs (message) VALUES ?"); final Random rand = new Random(1); for (int i = 1; i <= 100; i++) { String data = randomUnicodeString(rand); @@ -1630,4 +1464,118 @@ private static String randomUnicodeString(Random rand) { } return new String(buffer); } + + private void testLobInValueResultSet() throws SQLException { + deleteDb("lob"); + JdbcConnection conn = (JdbcConnection) getConnection("lob"); + Statement stat = conn.createStatement(); + stat.execute("CREATE ALIAS VRS FOR '" + getClass().getName() + ".testLobInValueResultSetGet'"); + ResultSet rs = stat.executeQuery("SELECT * FROM VRS()"); + assertTrue(rs.next()); + Clob clob = rs.getClob(1); + assertFalse(rs.next()); + assertEquals(MORE_THAN_128_CHARS, clob.getSubString(1, Integer.MAX_VALUE)); + conn.close(); + } + + /** + * This method is called via reflection from the database. + * + * @param conn connection + * @return the result set + * @throws SQLException on exception + */ + public static SimpleResultSet testLobInValueResultSetGet(Connection conn) throws SQLException { + final Clob c = conn.createClob(); + c.setString(1, MORE_THAN_128_CHARS); + SimpleResultSet rs = new SimpleResultSet() { + @Override + public Object getObject(int columnIndex) throws SQLException { + return c; + } + }; + rs.addColumn("L", Types.CLOB, 1000, 0); + rs.addRow(MORE_THAN_128_CHARS); + return rs; + } + + private void testLimits() throws Exception { + deleteDb("lob"); + JdbcConnection conn = (JdbcConnection) getConnection("lob"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INTEGER, B BLOB, C CLOB)"); + PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?, ?)"); + ps.setInt(1, 1); + byte[] b = new byte[Constants.MAX_STRING_LENGTH]; + Arrays.fill(b, (byte) 'A'); + String s = new String(b, StandardCharsets.UTF_8); + ps.setBytes(2, b); + ps.setString(3, s); + ps.executeUpdate(); + byte[] b2 = new byte[Constants.MAX_STRING_LENGTH + 1]; + Arrays.fill(b2, (byte) 'A'); + String s2 = new String(b2, StandardCharsets.UTF_8); + assertThrows(ErrorCode.VALUE_TOO_LONG_2, ps).setBytes(2, b2); + ps.setBinaryStream(2, new ByteArrayInputStream(b2)); + assertThrows(ErrorCode.VALUE_TOO_LONG_2, ps).setString(3, s2); + ps.setCharacterStream(3, new StringReader(s2)); + ps.executeUpdate(); + try (ResultSet rs = stat.executeQuery("TABLE TEST ORDER BY ID")) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + testLimitsSmall(b, s, rs, 2); + testLimitsSmall(b, s, rs, 2); + testLimitsSmall(b, s, rs, 3); + testLimitsSmall(b, s, rs, 3); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + testLimitsLarge(b2, s2, rs, 2); + testLimitsLarge(b2, s2, rs, 2); + testLimitsLarge(b2, s2, rs, 3); + testLimitsLarge(b2, s2, rs, 3); + assertFalse(rs.next()); + } + conn.close(); + testLimitsSmall(b, s, ValueBlob.createSmall(b)); + testLimitsSmall(b, s, ValueClob.createSmall(b, Constants.MAX_STRING_LENGTH)); + testLimitsLarge(b2, s2, ValueBlob.createSmall(b2)); + testLimitsLarge(b2, s2, ValueClob.createSmall(b2, Constants.MAX_STRING_LENGTH + 1)); + } + + private void testLimitsSmall(byte[] b, String s, ResultSet rs, int index) throws SQLException { + assertEquals(b, rs.getBytes(index)); + assertEquals(s, rs.getString(index)); + } + + private void testLimitsLarge(byte[] b, String s, ResultSet rs, int index) throws SQLException, IOException { + assertThrows(ErrorCode.VALUE_TOO_LONG_2, rs).getBytes(index); + assertEquals(b, IOUtils.readBytesAndClose(rs.getBlob(index).getBinaryStream(), -1)); + assertThrows(ErrorCode.VALUE_TOO_LONG_2, rs).getString(index); + assertEquals(s, IOUtils.readStringAndClose(rs.getClob(index).getCharacterStream(), -1)); + } + + private void testLimitsSmall(byte[] b, String s, ValueLob v) { + assertEquals(b, v.getBytesNoCopy()); + assertEquals(s, v.getString()); + assertEquals(s, v.getString()); + } + + private void testLimitsLarge(byte[] b, String s, ValueLob v) throws IOException { + try { + assertEquals(b, v.getBytesNoCopy()); + throw new AssertionError(); + } catch (DbException e) { + assertEquals(ErrorCode.VALUE_TOO_LONG_2, e.getErrorCode()); + } + assertEquals(b, IOUtils.readBytesAndClose(v.getInputStream(), -1)); + for (int i = 0; i < 2; i++) { + try { + assertEquals(s, v.getString()); + throw new AssertionError(); + } catch (DbException e) { + assertEquals(ErrorCode.VALUE_TOO_LONG_2, e.getErrorCode()); + } + assertEquals(s, IOUtils.readStringAndClose(v.getReader(), -1)); + } + } } diff --git a/h2/src/test/org/h2/test/db/TestLobObject.java b/h2/src/test/org/h2/test/db/TestLobObject.java index 70a9eedaad..b150fc512b 100644 --- a/h2/src/test/org/h2/test/db/TestLobObject.java +++ b/h2/src/test/org/h2/test/db/TestLobObject.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; diff --git a/h2/src/test/org/h2/test/db/TestMemoryUsage.java b/h2/src/test/org/h2/test/db/TestMemoryUsage.java index d0259680e9..dbf367d113 100644 --- a/h2/src/test/org/h2/test/db/TestMemoryUsage.java +++ b/h2/src/test/org/h2/test/db/TestMemoryUsage.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -11,14 +11,17 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.Random; +import java.util.concurrent.TimeUnit; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Utils; /** * Tests the memory usage of the cache. */ -public class TestMemoryUsage extends TestBase { +public class TestMemoryUsage extends TestDb { private Connection conn; @@ -28,7 +31,7 @@ public class TestMemoryUsage extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -38,7 +41,9 @@ public void test() throws SQLException { // can't test in-memory databases return; } - testCreateDropLoop(); + // comment this out for now, not reliable when running on my 64-bit + // Java1.8 VM + // testCreateDropLoop(); testCreateIndex(); testClob(); testReconnectOften(); @@ -58,15 +63,19 @@ private void testOpenCloseConnections() throws SQLException { return; } deleteDb("memoryUsage"); - conn = getConnection("memoryUsage"); - eatMemory(4000); - for (int i = 0; i < 4000; i++) { - Connection c2 = getConnection("memoryUsage"); - c2.createStatement(); - c2.close(); + // to eliminate background thread interference + conn = getConnection("memoryUsage;WRITE_DELAY=0"); + try { + eatMemory(4000); + for (int i = 0; i < 4000; i++) { + Connection c2 = getConnection("memoryUsage"); + c2.createStatement(); + c2.close(); + } + } finally { + freeMemory(); + closeConnection(conn); } - freeMemory(); - conn.close(); } private void testCreateDropLoop() throws SQLException { @@ -78,17 +87,17 @@ private void testCreateDropLoop() throws SQLException { stat.execute("DROP TABLE TEST"); } stat.execute("checkpoint"); - int used = Utils.getMemoryUsed(); + long used = Utils.getMemoryUsed(); for (int i = 0; i < 1000; i++) { stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY)"); stat.execute("DROP TABLE TEST"); } stat.execute("checkpoint"); - int usedNow = Utils.getMemoryUsed(); + long usedNow = Utils.getMemoryUsed(); if (usedNow > used * 1.3) { // try to lower memory usage (because it might be wrong) // by forcing OOME - for (int i = 1024;; i *= 2) { + for (int i = 1024; i < (1 >> 31); i *= 2) { try { byte[] oome = new byte[1024 * 1024 * 256]; oome[0] = (byte) i; @@ -119,41 +128,42 @@ private void testClob() throws SQLException { return; } deleteDb("memoryUsageClob"); - conn = getConnection("memoryUsageClob"); + conn = getConnection("memoryUsageClob;WRITE_DELAY=0"); Statement stat = conn.createStatement(); stat.execute("SET MAX_LENGTH_INPLACE_LOB 8192"); stat.execute("SET CACHE_SIZE 8000"); stat.execute("CREATE TABLE TEST(ID IDENTITY, DATA CLOB)"); - freeSoftReferences(); try { - int base = Utils.getMemoryUsed(); + long base = Utils.getMemoryUsed(); for (int i = 0; i < 4; i++) { stat.execute("INSERT INTO TEST(DATA) " + "SELECT SPACE(8000) FROM SYSTEM_RANGE(1, 800)"); - freeSoftReferences(); - int used = Utils.getMemoryUsed(); + long used = Utils.getMemoryUsed(); if ((used - base) > 3 * 8192) { fail("Used: " + (used - base) + " i: " + i); } } } finally { - conn.close(); freeMemory(); + closeConnection(conn); } } /** - * Eat memory so that all soft references are garbage collected. + * Closes the specified connection. It silently consumes OUT_OF_MEMORY that + * may happen in background thread during the tests. + * + * @param conn connection to close + * @throws SQLException on other SQL exception */ - void freeSoftReferences() { + private static void closeConnection(Connection conn) throws SQLException { try { - eatMemory(1); - } catch (OutOfMemoryError e) { - // ignore + conn.close(); + } catch (SQLException e) { + if (e.getErrorCode() != ErrorCode.OUT_OF_MEMORY) { + throw e; + } } - System.gc(); - System.gc(); - freeMemory(); } private void testCreateIndex() throws SQLException { @@ -175,13 +185,18 @@ private void testCreateIndex() throws SQLException { prep.setInt(1, i); prep.executeUpdate(); } - int base = Utils.getMemoryUsed(); + long base = Utils.getMemoryUsed(); stat.execute("create index idx_test_id on test(id)"); - System.gc(); - System.gc(); - int used = Utils.getMemoryUsed(); - if ((used - base) > getSize(7500, 12000)) { - fail("Used: " + (used - base)); + for (int i = 0;; i++) { + System.gc(); + long used = Utils.getMemoryUsed() - base; + if (used <= getSize(7500, 12000)) { + break; + } + if (i < 16) { + continue; + } + fail("Used: " + used); } stat.execute("drop table test"); conn.close(); @@ -192,15 +207,17 @@ private void testReconnectOften() throws SQLException { Connection conn1 = getConnection("memoryUsage"); int len = getSize(1, 2000); printTimeMemory("start", 0); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); for (int i = 0; i < len; i++) { Connection conn2 = getConnection("memoryUsage"); conn2.close(); if (i % 10000 == 0) { - printTimeMemory("connect", System.currentTimeMillis() - time); + printTimeMemory("connect", + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); } } - printTimeMemory("connect", System.currentTimeMillis() - time); + printTimeMemory("connect", + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); conn1.close(); } @@ -210,14 +227,14 @@ private void insertUpdateSelectDelete() throws SQLException { int len = getSize(1, 2000); // insert - time = System.currentTimeMillis(); + time = System.nanoTime(); stat.execute("DROP TABLE IF EXISTS TEST"); - trace("drop=" + (System.currentTimeMillis() - time)); + trace("drop=" + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); stat.execute("CREATE CACHED TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255))"); PreparedStatement prep = conn.prepareStatement( "INSERT INTO TEST VALUES(?, 'Hello World')"); printTimeMemory("start", 0); - time = System.currentTimeMillis(); + time = System.nanoTime(); for (int i = 0; i < len; i++) { prep.setInt(1, i); prep.execute(); @@ -225,10 +242,10 @@ private void insertUpdateSelectDelete() throws SQLException { trace(" " + (100 * i / len) + "%"); } } - printTimeMemory("insert", System.currentTimeMillis() - time); + printTimeMemory("insert", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); // update - time = System.currentTimeMillis(); + time = System.nanoTime(); prep = conn.prepareStatement( "UPDATE TEST SET NAME='Hallo Welt' || ID WHERE ID = ?"); for (int i = 0; i < len; i++) { @@ -238,10 +255,10 @@ private void insertUpdateSelectDelete() throws SQLException { trace(" " + (100 * i / len) + "%"); } } - printTimeMemory("update", System.currentTimeMillis() - time); + printTimeMemory("update", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); // select - time = System.currentTimeMillis(); + time = System.nanoTime(); prep = conn.prepareStatement("SELECT * FROM TEST WHERE ID = ?"); for (int i = 0; i < len; i++) { prep.setInt(1, i); @@ -252,11 +269,12 @@ private void insertUpdateSelectDelete() throws SQLException { trace(" " + (100 * i / len) + "%"); } } - printTimeMemory("select", System.currentTimeMillis() - time); + printTimeMemory("select", + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); // select randomized Random random = new Random(1); - time = System.currentTimeMillis(); + time = System.nanoTime(); prep = conn.prepareStatement("SELECT * FROM TEST WHERE ID = ?"); for (int i = 0; i < len; i++) { prep.setInt(1, random.nextInt(len)); @@ -267,10 +285,11 @@ private void insertUpdateSelectDelete() throws SQLException { trace(" " + (100 * i / len) + "%"); } } - printTimeMemory("select randomized", System.currentTimeMillis() - time); + printTimeMemory("select randomized", + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); // delete - time = System.currentTimeMillis(); + time = System.nanoTime(); prep = conn.prepareStatement("DELETE FROM TEST WHERE ID = ?"); for (int i = 0; i < len; i++) { prep.setInt(1, random.nextInt(len)); @@ -279,7 +298,8 @@ private void insertUpdateSelectDelete() throws SQLException { trace(" " + (100 * i / len) + "%"); } } - printTimeMemory("delete", System.currentTimeMillis() - time); + printTimeMemory("delete", + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); } } diff --git a/h2/src/test/org/h2/test/db/TestMergeUsing.java b/h2/src/test/org/h2/test/db/TestMergeUsing.java new file mode 100644 index 0000000000..f0328a5a7e --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestMergeUsing.java @@ -0,0 +1,301 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import org.h2.api.Trigger; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Test merge using syntax. + */ +public class TestMergeUsing extends TestDb implements Trigger { + + private static final String GATHER_ORDERED_RESULTS_SQL = "SELECT ID, NAME FROM PARENT ORDER BY ID ASC"; + private static int triggerTestingUpdateCount; + + private String triggerName; + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public boolean isEnabled() { + return true; + } + + @Override + public void test() throws Exception { + // Simple ID,NAME inserts, target table with PK initially empty + testMergeUsing( + "CREATE TABLE PARENT(ID INT, NAME VARCHAR, PRIMARY KEY(ID) );", + "MERGE INTO PARENT AS P USING (SELECT X AS ID, 'Marcy'||X AS NAME " + + "FROM SYSTEM_RANGE(1,2) ) AS S ON (P.ID = S.ID AND 1=1 AND S.ID = P.ID) " + + "WHEN MATCHED THEN " + + "UPDATE SET P.NAME = S.NAME WHERE 2 = 2 WHEN NOT MATCHED THEN INSERT (ID, NAME) VALUES (S.ID, S.NAME)", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2)", 2); + // Simple NAME updates, target table missing PK + testMergeUsing( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );", + "MERGE INTO PARENT AS P USING (" + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) ) AS S " + + "ON (P.ID = S.ID AND 1=1 AND S.ID = P.ID) " + + "WHEN MATCHED THEN UPDATE SET P.NAME = S.NAME||S.ID WHERE 1 = 1 WHEN NOT MATCHED THEN " + + "INSERT (ID, NAME) VALUES (S.ID, S.NAME)", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X||X AS NAME FROM SYSTEM_RANGE(1,2)", + 2); + // No NAME updates, WHERE clause is always false, insert clause missing + testMergeUsing( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );", + "MERGE INTO PARENT AS P USING (" + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) ) AS S ON (P.ID = S.ID) " + + "WHEN MATCHED THEN UPDATE SET P.NAME = S.NAME||S.ID WHERE 1 = 2", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2)", 0); + // No NAME updates, no WHERE clause, insert clause missing + testMergeUsing( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );", + "MERGE INTO PARENT AS P USING (" + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) ) AS S ON (P.ID = S.ID) " + + "WHEN MATCHED THEN UPDATE SET P.NAME = S.NAME||S.ID", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X||X AS NAME FROM SYSTEM_RANGE(1,2)", + 2); + // Two delete updates done, no WHERE clause, insert clause missing + testMergeUsing( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );", + "MERGE INTO PARENT AS P USING (" + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) ) AS S ON (P.ID = S.ID) " + + "WHEN MATCHED THEN DELETE", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) WHERE 1=0", + 2); + // One insert, one update one delete happens, target table missing PK + testMergeUsing( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );", + "MERGE INTO PARENT AS P USING (" + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3) ) AS S ON (P.ID = S.ID) " + + "WHEN MATCHED THEN UPDATE SET P.NAME = S.NAME||S.ID WHERE P.ID = 2 " + + "WHEN MATCHED THEN DELETE WHERE P.ID = 1 WHEN NOT MATCHED THEN " + + "INSERT (ID, NAME) VALUES (S.ID, S.NAME)", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X||X AS NAME FROM SYSTEM_RANGE(2,2) UNION ALL " + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(3,3)", + 3); + // One insert, one update one delete happens, target table missing PK + testMergeUsing( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );" + + "CREATE TABLE SOURCE AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3) );", + "MERGE INTO PARENT AS P USING SOURCE AS S ON (P.ID = S.ID) " + + "WHEN MATCHED THEN UPDATE SET P.NAME = S.NAME||S.ID WHERE P.ID = 2 " + + "WHEN MATCHED THEN DELETE WHERE P.ID = 1 WHEN NOT MATCHED THEN " + + "INSERT (ID, NAME) VALUES (S.ID, S.NAME)", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X||X AS NAME FROM SYSTEM_RANGE(2,2) UNION ALL " + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(3,3)", + 3); + // One insert, one update one delete happens, target table missing PK, + // no source alias + testMergeUsing( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );" + + "CREATE TABLE SOURCE AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3) );", + "MERGE INTO PARENT AS P USING SOURCE ON (P.ID = SOURCE.ID) " + + "WHEN MATCHED THEN UPDATE SET P.NAME = SOURCE.NAME||SOURCE.ID WHERE P.ID = 2 " + + "WHEN MATCHED THEN DELETE WHERE P.ID = 1 " + + "WHEN NOT MATCHED THEN INSERT (ID, NAME) VALUES (SOURCE.ID, SOURCE.NAME)", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X||X AS NAME FROM SYSTEM_RANGE(2,2) UNION ALL " + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(3,3)", + 3); + // One insert, one update one delete happens, target table missing PK, + // no source or target alias + testMergeUsing( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2) );" + + "CREATE TABLE SOURCE AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3) );", + "MERGE INTO PARENT USING SOURCE ON (PARENT.ID = SOURCE.ID) " + + "WHEN MATCHED THEN UPDATE SET PARENT.NAME = SOURCE.NAME||SOURCE.ID WHERE PARENT.ID = 2 " + + "WHEN MATCHED THEN DELETE WHERE PARENT.ID = 1 " + + "WHEN NOT MATCHED THEN INSERT (ID, NAME) VALUES (SOURCE.ID, SOURCE.NAME)", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X||X AS NAME FROM SYSTEM_RANGE(2,2) UNION ALL " + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(3,3)", + 3); + + // Only insert clause, no update or delete clauses + testMergeUsing( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,1) );" + + "DELETE FROM PARENT;", + "MERGE INTO PARENT AS P USING (" + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3) ) AS S ON (P.ID = S.ID) " + + "WHEN NOT MATCHED THEN INSERT (ID, NAME) VALUES (S.ID, S.NAME)", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3)", 3); + // no insert, no update, no delete clauses - essentially a no-op + testMergeUsingException( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,1) );" + + "DELETE FROM PARENT;", + "MERGE INTO PARENT AS P USING (" + + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3) ) AS S ON (P.ID = S.ID)", + GATHER_ORDERED_RESULTS_SQL, + "SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,3) WHERE X<0", + 0, + "WHEN\""); + // One insert, one update one delete happens, target table missing PK, + // triggers update all NAME fields + triggerTestingUpdateCount = 0; + testMergeUsing( + "CREATE TABLE PARENT AS (SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,2));" + + getCreateTriggerSQL(), + "MERGE INTO PARENT AS P USING " + + "(SELECT X AS ID, 'Marcy'||X AS NAME FROM SYSTEM_RANGE(1,4) ) AS S ON (P.ID = S.ID) " + + "WHEN MATCHED THEN UPDATE SET P.NAME = S.NAME||S.ID WHERE P.ID = 2 " + + "WHEN MATCHED THEN DELETE WHERE P.ID = 1 " + + "WHEN NOT MATCHED THEN INSERT (ID, NAME) VALUES (S.ID, S.NAME)", + GATHER_ORDERED_RESULTS_SQL, + "SELECT 2 AS ID, 'Marcy22-updated2' AS NAME UNION ALL " + + "SELECT X AS ID, 'Marcy'||X||'-inserted'||X AS NAME FROM SYSTEM_RANGE(3,4)", + 4); + } + + /** + * Run a test case of the merge using syntax + * + * @param setupSQL - one or more SQL statements to setup the case + * @param statementUnderTest - the merge statement being tested + * @param gatherResultsSQL - a select which gathers the results of the merge + * from the target table + * @param expectedResultsSQL - a select which returns the expected results + * in the target table + * @param expectedRowUpdateCount - how many updates should be expected from + * the merge using + */ + private void testMergeUsing(String setupSQL, String statementUnderTest, + String gatherResultsSQL, String expectedResultsSQL, + int expectedRowUpdateCount) throws Exception { + deleteDb("mergeUsingQueries"); + + try (Connection conn = getConnection("mergeUsingQueries;MODE=Oracle")) { + Statement stat = conn.createStatement(); + stat.execute(setupSQL); + + PreparedStatement prep = conn.prepareStatement(statementUnderTest); + int rowCountUpdate = prep.executeUpdate(); + + // compare actual results from SQL result set with expected results + // - by diffing (aka set MINUS operation) + ResultSet rs = stat.executeQuery("( " + gatherResultsSQL + " ) MINUS ( " + + expectedResultsSQL + " )"); + + int rowCount = 0; + StringBuilder diffBuffer = new StringBuilder(""); + while (rs.next()) { + rowCount++; + diffBuffer.append("|"); + for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) { + diffBuffer.append(rs.getObject(i)); + diffBuffer.append("|\n"); + } + } + assertEquals("Differences between expected and actual output found:" + + diffBuffer, 0, rowCount); + assertEquals("Expected update counts differ", + expectedRowUpdateCount, rowCountUpdate); + } finally { + deleteDb("mergeUsingQueries"); + } + } + + /** + * Run a test case of the merge using syntax + * + * @param setupSQL - one or more SQL statements to setup the case + * @param statementUnderTest - the merge statement being tested + * @param gatherResultsSQL - a select which gathers the results of the merge + * from the target table + * @param expectedResultsSQL - a select which returns the expected results + * in the target table + * @param expectedRowUpdateCount - how many updates should be expected from + * the merge using + * @param exceptionMessage - the exception message expected + */ + private void testMergeUsingException(String setupSQL, + String statementUnderTest, String gatherResultsSQL, + String expectedResultsSQL, int expectedRowUpdateCount, + String exceptionMessage) throws Exception { + try { + testMergeUsing(setupSQL, statementUnderTest, gatherResultsSQL, + expectedResultsSQL, expectedRowUpdateCount); + } catch (RuntimeException | SQLException e) { + if (!e.getMessage().contains(exceptionMessage)) { + e.printStackTrace(); + } + assertContains(e.getMessage(), exceptionMessage); + return; + } + fail("Failed to see exception with message:" + exceptionMessage); + } + + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) + throws SQLException { + + if (conn == null) { + throw new AssertionError("connection is null"); + } + if (triggerName.startsWith("INS_BEFORE")) { + newRow[1] = newRow[1] + "-inserted" + (++triggerTestingUpdateCount); + } else if (triggerName.startsWith("UPD_BEFORE")) { + newRow[1] = newRow[1] + "-updated" + (++triggerTestingUpdateCount); + } else if (triggerName.startsWith("DEL_BEFORE")) { + oldRow[1] = oldRow[1] + "-deleted" + (++triggerTestingUpdateCount); + } + } + + @Override + public void init(Connection conn, String schemaName, String trigger, + String tableName, boolean before, int type) { + this.triggerName = trigger; + if (!"PARENT".equals(tableName)) { + throw new AssertionError("supposed to be PARENT"); + } + if ((trigger.endsWith("AFTER") && before) + || (trigger.endsWith("BEFORE") && !before)) { + throw new AssertionError( + "triggerName: " + trigger + " before:" + before); + } + if ((trigger.startsWith("UPD") && type != UPDATE) + || (trigger.startsWith("INS") && type != INSERT) + || (trigger.startsWith("DEL") && type != DELETE)) { + throw new AssertionError( + "triggerName: " + trigger + " type:" + type); + } + } + + private String getCreateTriggerSQL() { + StringBuilder buf = new StringBuilder(); + buf.append("CREATE TRIGGER INS_BEFORE " + "BEFORE INSERT ON PARENT " + + "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";"); + buf.append("CREATE TRIGGER UPD_BEFORE " + "BEFORE UPDATE ON PARENT " + + "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";"); + buf.append("CREATE TRIGGER DEL_BEFORE " + "BEFORE DELETE ON PARENT " + + "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";"); + return buf.toString(); + } + +} diff --git a/h2/src/test/org/h2/test/db/TestMultiConn.java b/h2/src/test/org/h2/test/db/TestMultiConn.java index cad52c4561..891042cc72 100644 --- a/h2/src/test/org/h2/test/db/TestMultiConn.java +++ b/h2/src/test/org/h2/test/db/TestMultiConn.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -11,12 +11,13 @@ import java.sql.Statement; import org.h2.api.DatabaseEventListener; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Task; /** * Multi-connection tests. */ -public class TestMultiConn extends TestBase { +public class TestMultiConn extends TestDb { /** * How long to wait in milliseconds. @@ -29,7 +30,7 @@ public class TestMultiConn extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -45,7 +46,7 @@ private void testConcurrentShutdownQuery() throws Exception { Connection conn1 = getConnection("multiConn"); Connection conn2 = getConnection("multiConn"); final Statement stat1 = conn1.createStatement(); - stat1.execute("CREATE ALIAS SLEEP FOR \"java.lang.Thread.sleep(long)\""); + stat1.execute("CREATE ALIAS SLEEP FOR 'java.lang.Thread.sleep(long)'"); final Statement stat2 = conn2.createStatement(); stat1.execute("SET THROTTLE 100"); Task t = new Task() { @@ -74,15 +75,15 @@ public void call() throws Exception { private void testThreeThreads() throws Exception { deleteDb("multiConn"); - final Connection conn1 = getConnection("multiConn"); - final Connection conn2 = getConnection("multiConn"); - final Connection conn3 = getConnection("multiConn"); + Connection conn1 = getConnection("multiConn"); + Connection conn2 = getConnection("multiConn"); + Connection conn3 = getConnection("multiConn"); conn1.setAutoCommit(false); conn2.setAutoCommit(false); conn3.setAutoCommit(false); - final Statement s1 = conn1.createStatement(); - final Statement s2 = conn2.createStatement(); - final Statement s3 = conn3.createStatement(); + Statement s1 = conn1.createStatement(); + Statement s2 = conn2.createStatement(); + Statement s3 = conn3.createStatement(); s1.execute("CREATE TABLE TEST1(ID INT)"); s2.execute("CREATE TABLE TEST2(ID INT)"); s3.execute("CREATE TABLE TEST3(ID INT)"); @@ -92,28 +93,22 @@ private void testThreeThreads() throws Exception { s1.execute("SET LOCK_TIMEOUT 1000"); s2.execute("SET LOCK_TIMEOUT 1000"); s3.execute("SET LOCK_TIMEOUT 1000"); - Thread t1 = new Thread(new Runnable() { - @Override - public void run() { - try { - s3.execute("INSERT INTO TEST2 VALUES(4)"); - conn3.commit(); - } catch (SQLException e) { - TestBase.logError("insert", e); - } + Thread t1 = new Thread(() -> { + try { + s3.execute("INSERT INTO TEST2 VALUES(4)"); + conn3.commit(); + } catch (SQLException e) { + TestBase.logError("insert", e); } }); t1.start(); Thread.sleep(20); - Thread t2 = new Thread(new Runnable() { - @Override - public void run() { - try { - s2.execute("INSERT INTO TEST1 VALUES(5)"); - conn2.commit(); - } catch (SQLException e) { - TestBase.logError("insert", e); - } + Thread t2 = new Thread(() -> { + try { + s2.execute("INSERT INTO TEST1 VALUES(5)"); + conn2.commit(); + } catch (SQLException e) { + TestBase.logError("insert", e); } }); t2.start(); @@ -145,16 +140,13 @@ private void testConcurrentOpen() throws Exception { conn.createStatement().execute("SHUTDOWN"); conn.close(); final String listener = MyDatabaseEventListener.class.getName(); - Runnable r = new Runnable() { - @Override - public void run() { - try { - Connection c1 = getConnection("multiConn;DATABASE_EVENT_LISTENER='" + listener - + "';file_lock=socket"); - c1.close(); - } catch (Exception e) { - TestBase.logError("connect", e); - } + Runnable r = () -> { + try { + Connection c1 = getConnection("multiConn;DATABASE_EVENT_LISTENER='" + listener + + "';file_lock=socket"); + c1.close(); + } catch (Exception e) { + TestBase.logError("connect", e); } }; Thread thread = new Thread(r); @@ -207,16 +199,10 @@ private void testCommitRollback() throws SQLException { /** * A database event listener used in this test. */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { + public static final class MyDatabaseEventListener implements DatabaseEventListener { @Override - public void exceptionThrown(SQLException e, String sql) { - // do nothing - } - - @Override - public void setProgress(int state, String name, int x, int max) { + public void setProgress(int state, String name, long x, long max) { if (wait > 0) { try { Thread.sleep(wait); @@ -226,20 +212,6 @@ public void setProgress(int state, String name, int x, int max) { } } - @Override - public void closingDatabase() { - // do nothing - } - - @Override - public void init(String url) { - // do nothing - } - - @Override - public void opened() { - // do nothing - } } } diff --git a/h2/src/test/org/h2/test/db/TestMultiDimension.java b/h2/src/test/org/h2/test/db/TestMultiDimension.java index db5e75da9a..afd99bde92 100644 --- a/h2/src/test/org/h2/test/db/TestMultiDimension.java +++ b/h2/src/test/org/h2/test/db/TestMultiDimension.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -11,14 +11,16 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.MultiDimension; /** * Tests the multi-dimension index tool. */ -public class TestMultiDimension extends TestBase { +public class TestMultiDimension extends TestDb { /** * Run just this test. @@ -28,7 +30,7 @@ public class TestMultiDimension extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -78,16 +80,15 @@ private void testHelperMethods() { assertEquals(y, tool.deinterleave(3, xyz, 1)); assertEquals(z, tool.deinterleave(3, xyz, 2)); } - createClassProxy(MultiDimension.class); - assertThrows(IllegalArgumentException.class, m).getMaxValue(1); - assertThrows(IllegalArgumentException.class, m).getMaxValue(33); - assertThrows(IllegalArgumentException.class, m).normalize(2, 10, 11, 12); - assertThrows(IllegalArgumentException.class, m).normalize(2, 5, 10, 0); - assertThrows(IllegalArgumentException.class, m).normalize(2, 10, 0, 9); - assertThrows(IllegalArgumentException.class, m).interleave(-1, 5); - assertThrows(IllegalArgumentException.class, m).interleave(5, -1); - assertThrows(IllegalArgumentException.class, m). - interleave(Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE); + assertThrows(IllegalArgumentException.class, () -> m.getMaxValue(1)); + assertThrows(IllegalArgumentException.class, () -> m.getMaxValue(33)); + assertThrows(IllegalArgumentException.class, () -> m.normalize(2, 10, 11, 12)); + assertThrows(IllegalArgumentException.class, () -> m.normalize(2, 5, 10, 0)); + assertThrows(IllegalArgumentException.class, () -> m.normalize(2, 10, 0, 9)); + assertThrows(IllegalArgumentException.class, () -> m.interleave(-1, 5)); + assertThrows(IllegalArgumentException.class, () -> m.interleave(5, -1)); + assertThrows(IllegalArgumentException.class, + () -> m.interleave(Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE)); } private void testPerformance2d() throws SQLException { @@ -95,8 +96,7 @@ private void testPerformance2d() throws SQLException { Connection conn; conn = getConnection("multiDimension"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS MAP FOR \"" + - getClass().getName() + ".interleave\""); + stat.execute("CREATE ALIAS MAP FOR '" + getClass().getName() + ".interleave'"); stat.execute("CREATE TABLE TEST(X INT NOT NULL, Y INT NOT NULL, " + "XY BIGINT AS MAP(X, Y), DATA VARCHAR)"); stat.execute("CREATE INDEX IDX_X ON TEST(X, Y)"); @@ -106,11 +106,11 @@ private void testPerformance2d() throws SQLException { // the MultiDimension tool is faster for 4225 (65^2) points // the more the bigger the difference int max = getSize(30, 65); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); for (int x = 0; x < max; x++) { for (int y = 0; y < max; y++) { - long t2 = System.currentTimeMillis(); - if (t2 - time > 1000) { + long t2 = System.nanoTime(); + if (t2 - time > TimeUnit.SECONDS.toNanos(1)) { int percent = (int) (100.0 * ((double) x * max + y) / ((double) max * max)); trace(percent + "%"); @@ -139,17 +139,17 @@ private void testPerformance2d() throws SQLException { int minX = rand.nextInt(max - size); int minY = rand.nextInt(max - size); int maxX = minX + size, maxY = minY + size; - time = System.currentTimeMillis(); + time = System.nanoTime(); ResultSet rs1 = multi.getResult(prepMulti, new int[] { minX, minY }, new int[] { maxX, maxY }); - timeMulti += System.currentTimeMillis() - time; - time = System.currentTimeMillis(); + timeMulti += System.nanoTime() - time; + time = System.nanoTime(); prepRegular.setInt(1, minX); prepRegular.setInt(2, maxX); prepRegular.setInt(3, minY); prepRegular.setInt(4, maxY); ResultSet rs2 = prepRegular.executeQuery(); - timeRegular += System.currentTimeMillis() - time; + timeRegular += System.nanoTime() - time; while (rs1.next()) { assertTrue(rs2.next()); assertEquals(rs1.getInt(1), rs2.getInt(1)); @@ -159,7 +159,8 @@ private void testPerformance2d() throws SQLException { } conn.close(); deleteDb("multiDimension"); - trace("2d: regular: " + timeRegular + " MultiDimension: " + timeMulti); + trace("2d: regular: " + TimeUnit.NANOSECONDS.toMillis(timeRegular) + + " MultiDimension: " + TimeUnit.NANOSECONDS.toMillis(timeMulti)); } private void testPerformance3d() throws SQLException { @@ -167,8 +168,7 @@ private void testPerformance3d() throws SQLException { Connection conn; conn = getConnection("multiDimension"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS MAP FOR \"" + - getClass().getName() + ".interleave\""); + stat.execute("CREATE ALIAS MAP FOR '" + getClass().getName() + ".interleave'"); stat.execute("CREATE TABLE TEST(X INT NOT NULL, " + "Y INT NOT NULL, Z INT NOT NULL, " + "XYZ BIGINT AS MAP(X, Y, Z), DATA VARCHAR)"); @@ -179,12 +179,12 @@ private void testPerformance3d() throws SQLException { // the MultiDimension tool is faster for 8000 (20^3) points // the more the bigger the difference int max = getSize(10, 20); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); for (int x = 0; x < max; x++) { for (int y = 0; y < max; y++) { for (int z = 0; z < max; z++) { - long t2 = System.currentTimeMillis(); - if (t2 - time > 1000) { + long t2 = System.nanoTime(); + if (t2 - time > TimeUnit.SECONDS.toNanos(1)) { int percent = (int) (100.0 * ((double) x * max + y) / ((double) max * max)); trace(percent + "%"); @@ -216,11 +216,11 @@ private void testPerformance3d() throws SQLException { int minY = rand.nextInt(max - size); int minZ = rand.nextInt(max - size); int maxX = minX + size, maxY = minY + size, maxZ = minZ + size; - time = System.currentTimeMillis(); + time = System.nanoTime(); ResultSet rs1 = multi.getResult(prepMulti, new int[] { minX, minY, minZ }, new int[] { maxX, maxY, maxZ }); - timeMulti += System.currentTimeMillis() - time; - time = System.currentTimeMillis(); + timeMulti += System.nanoTime() - time; + time = System.nanoTime(); prepRegular.setInt(1, minX); prepRegular.setInt(2, maxX); prepRegular.setInt(3, minY); @@ -228,7 +228,7 @@ private void testPerformance3d() throws SQLException { prepRegular.setInt(5, minZ); prepRegular.setInt(6, maxZ); ResultSet rs2 = prepRegular.executeQuery(); - timeRegular += System.currentTimeMillis() - time; + timeRegular += System.nanoTime() - time; while (rs1.next()) { assertTrue(rs2.next()); assertEquals(rs1.getInt(1), rs2.getInt(1)); @@ -238,7 +238,8 @@ private void testPerformance3d() throws SQLException { } conn.close(); deleteDb("multiDimension"); - trace("3d: regular: " + timeRegular + " MultiDimension: " + timeMulti); + trace("3d: regular: " + TimeUnit.NANOSECONDS.toMillis(timeRegular) + + " MultiDimension: " + TimeUnit.NANOSECONDS.toMillis(timeMulti)); } /** diff --git a/h2/src/test/org/h2/test/db/TestMultiThread.java b/h2/src/test/org/h2/test/db/TestMultiThread.java index 4a5659a0c6..ea6f060686 100644 --- a/h2/src/test/org/h2/test/db/TestMultiThread.java +++ b/h2/src/test/org/h2/test/db/TestMultiThread.java @@ -1,46 +1,50 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.io.StringReader; +import java.math.BigDecimal; import java.sql.Connection; -import java.sql.DatabaseMetaData; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import org.h2.api.ErrorCode; import org.h2.test.TestAll; import org.h2.test.TestBase; -import org.h2.util.SmallLRUCache; -import org.h2.util.SynchronizedVerifier; +import org.h2.test.TestDb; +import org.h2.util.IOUtils; import org.h2.util.Task; /** * Multi-threaded tests. */ -public class TestMultiThread extends TestBase implements Runnable { +public class TestMultiThread extends TestDb implements Runnable { private boolean stop; private TestMultiThread parent; private Random random; - private Connection threadConn; - private Statement threadStat; public TestMultiThread() { // nothing to do } - private TestMultiThread(TestAll config, TestMultiThread parent) - throws SQLException { + private TestMultiThread(TestAll config, TestMultiThread parent) { this.config = config; this.parent = parent; random = new Random(); - threadConn = getConnection(); - threadStat = threadConn.createStatement(); } /** @@ -49,249 +53,452 @@ private TestMultiThread(TestAll config, TestMultiThread parent) * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { testConcurrentSchemaChange(); testConcurrentLobAdd(); - testConcurrentView(); testConcurrentAlter(); - testConcurrentAnalyze(); testConcurrentInsertUpdateSelect(); - testLockModeWithMultiThreaded(); + testViews(); + testConcurrentInsert(); + testConcurrentUpdate(); + testConcurrentUpdate2(); + testCheckConstraint(); } private void testConcurrentSchemaChange() throws Exception { - String db = "testConcurrentSchemaChange"; + String db = getTestName(); deleteDb(db); - final String url = getURL(db + ";MULTI_THREADED=1", true); - Connection conn = getConnection(url); - Task[] tasks = new Task[2]; - for (int i = 0; i < tasks.length; i++) { - final int x = i; - Task t = new Task() { - @Override - public void call() throws Exception { - Connection c2 = getConnection(url); - Statement stat = c2.createStatement(); - try { - for (int i = 0; !stop; i++) { - stat.execute("create table test" + x + "_" + i); - c2.getMetaData().getTables(null, null, null, null); - stat.execute("drop table test" + x + "_" + i); + final String url = getURL(db + ";LOCK_TIMEOUT=10000", true); + try (Connection conn = getConnection(url)) { + Task[] tasks = new Task[2]; + for (int i = 0; i < tasks.length; i++) { + final int x = i; + Task t = new Task() { + @Override + public void call() throws Exception { + try (Connection c2 = getConnection(url)) { + Statement stat = c2.createStatement(); + for (int i = 0; !stop; i++) { + stat.execute("create table test" + x + "_" + i); + c2.getMetaData().getTables(null, null, null, null); + stat.execute("drop table test" + x + "_" + i); + } } - } finally { - c2.close(); } - } - }; - tasks[i] = t; - t.execute(); - } - Thread.sleep(1000); - for (Task t : tasks) { - t.get(); + }; + tasks[i] = t; + t.execute(); + } + Thread.sleep(1000); + for (Task t : tasks) { + t.get(); + } } - conn.close(); } private void testConcurrentLobAdd() throws Exception { - String db = "concurrentLobAdd"; + String db = getTestName(); deleteDb(db); - final String url = getURL(db + ";MULTI_THREADED=1", true); - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("create table test(id identity, data clob)"); - Task[] tasks = new Task[2]; - for (int i = 0; i < tasks.length; i++) { + final String url = getURL(db, true); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id identity, data clob)"); + Task[] tasks = new Task[2]; + for (int i = 0; i < tasks.length; i++) { + Task t = new Task() { + @Override + public void call() throws Exception { + try (Connection c2 = getConnection(url)) { + PreparedStatement p2 = c2 + .prepareStatement("insert into test(data) values(?)"); + while (!stop) { + p2.setCharacterStream(1, new StringReader(new String( + new char[10 * 1024]))); + p2.execute(); + } + } + } + }; + tasks[i] = t; + t.execute(); + } + Thread.sleep(500); + for (Task t : tasks) { + t.get(); + } + } + } + + private void testConcurrentAlter() throws Exception { + deleteDb(getTestName()); + try (final Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); Task t = new Task() { @Override public void call() throws Exception { - Connection c2 = getConnection(url); - PreparedStatement p2 = c2 - .prepareStatement("insert into test(data) values(?)"); - try { - while (!stop) { - p2.setCharacterStream(1, new StringReader(new String( - new char[10 * 1024]))); - p2.execute(); - } - } finally { - c2.close(); + while (!stop) { + conn.prepareStatement("select * from test"); } } }; - tasks[i] = t; + stat.execute("create table test(id int)"); t.execute(); - } - Thread.sleep(500); - for (Task t : tasks) { + for (int i = 0; i < 200; i++) { + stat.execute("alter table test add column x int"); + stat.execute("alter table test drop column x"); + } t.get(); } - conn.close(); } - private void testConcurrentView() throws Exception { - if (config.mvcc) { - return; - } - String db = "concurrentView"; - deleteDb(db); - final String url = getURL(db + ";MULTI_THREADED=1", true); - final Random r = new Random(); - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - StringBuilder buff = new StringBuilder(); - buff.append("create table test(id int"); - final int len = 3; - for (int i = 0; i < len; i++) { - buff.append(", x" + i + " int"); - } - buff.append(")"); - stat.execute(buff.toString()); - stat.execute("create view test_view as select * from test"); - stat.execute("insert into test(id) select x from system_range(1, 2)"); - Task t = new Task() { - @Override - public void call() throws Exception { - Connection c2 = getConnection(url); - while (!stop) { - c2.prepareStatement("select * from test_view where x" + - r.nextInt(len) + "=1"); - } - c2.close(); + private void testConcurrentInsertUpdateSelect() throws Exception { + try (Connection conn = getConnection()) { + Statement stmt = conn.createStatement(); + stmt.execute("CREATE TABLE TEST(ID IDENTITY, NAME VARCHAR)"); + int len = getSize(10, 200); + Thread[] threads = new Thread[len]; + for (int i = 0; i < len; i++) { + threads[i] = new Thread(new TestMultiThread(config, this)); } - }; - t.execute(); - SynchronizedVerifier.setDetect(SmallLRUCache.class, true); - for (int i = 0; i < 1000; i++) { - conn.prepareStatement("select * from test_view where x" + - r.nextInt(len) + "=1"); - } - t.get(); - SynchronizedVerifier.setDetect(SmallLRUCache.class, false); - conn.close(); - } - - private void testConcurrentAlter() throws Exception { - deleteDb("concurrentAlter"); - final Connection conn = getConnection("concurrentAlter"); - Statement stat = conn.createStatement(); - Task t = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - conn.prepareStatement("select * from test"); - } + for (int i = 0; i < len; i++) { + threads[i].start(); } - }; - stat.execute("create table test(id int)"); - t.execute(); - for (int i = 0; i < 200; i++) { - stat.execute("alter table test add column x int"); - stat.execute("alter table test drop column x"); - } - t.get(); - conn.close(); - deleteDb("concurrentAlter"); - } - - private void testConcurrentAnalyze() throws Exception { - if (config.mvcc) { - return; - } - deleteDb("concurrentAnalyze"); - final String url = getURL("concurrentAnalyze;MULTI_THREADED=1", true); - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("create table test(id bigint primary key) " + - "as select x from system_range(1, 1000)"); - Task t = new Task() { - @Override - public void call() throws SQLException { - Connection conn2; - conn2 = getConnection(url); - for (int i = 0; i < 1000; i++) { - conn2.createStatement().execute("analyze"); - } - conn2.close(); + int sleep = getSize(400, 10000); + Thread.sleep(sleep); + this.stop = true; + for (int i = 0; i < len; i++) { + threads[i].join(); } - }; - t.execute(); - Thread.yield(); - for (int i = 0; i < 1000; i++) { - conn.createStatement().execute("analyze"); - } - t.get(); - stat.execute("drop table test"); - conn.close(); - deleteDb("concurrentAnalyze"); - } - - private void testConcurrentInsertUpdateSelect() throws Exception { - threadConn = getConnection(); - threadStat = threadConn.createStatement(); - threadStat.execute("CREATE TABLE TEST(ID IDENTITY, NAME VARCHAR)"); - int len = getSize(10, 200); - Thread[] threads = new Thread[len]; - for (int i = 0; i < len; i++) { - threads[i] = new Thread(new TestMultiThread(config, this)); - } - for (int i = 0; i < len; i++) { - threads[i].start(); + ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM TEST"); + rs.next(); + trace("max id=" + rs.getInt(1)); } - int sleep = getSize(400, 10000); - Thread.sleep(sleep); - this.stop = true; - for (int i = 0; i < len; i++) { - threads[i].join(); - } - ResultSet rs = threadStat.executeQuery("SELECT COUNT(*) FROM TEST"); - rs.next(); - trace("max id=" + rs.getInt(1)); - threadConn.close(); } private Connection getConnection() throws SQLException { - return getConnection("jdbc:h2:mem:multiThread"); + return getConnection("jdbc:h2:mem:" + getTestName()); } @Override public void run() { - try { + try (Connection conn = getConnection()) { + Statement stmt = conn.createStatement(); while (!parent.stop) { - threadStat.execute("SELECT COUNT(*) FROM TEST"); - threadStat.execute("INSERT INTO TEST VALUES(NULL, 'Hi')"); - PreparedStatement prep = threadConn.prepareStatement( + stmt.execute("SELECT COUNT(*) FROM TEST"); + stmt.execute("INSERT INTO TEST(NAME) VALUES('Hi')"); + PreparedStatement prep = conn.prepareStatement( "UPDATE TEST SET NAME='Hello' WHERE ID=?"); prep.setInt(1, random.nextInt(10000)); prep.execute(); - prep = threadConn.prepareStatement("SELECT * FROM TEST WHERE ID=?"); + prep = conn.prepareStatement("SELECT * FROM TEST WHERE ID=?"); prep.setInt(1, random.nextInt(10000)); ResultSet rs = prep.executeQuery(); while (rs.next()) { rs.getString("NAME"); } } - threadConn.close(); } catch (Exception e) { logError("multi", e); } } - private void testLockModeWithMultiThreaded() throws Exception { - // currently the combination of LOCK_MODE=0 and MULTI_THREADED + private void testViews() throws Exception { // is not supported deleteDb("lockMode"); - final String url = getURL("lockMode;MULTI_THREADED=1", true); + String url = getURL("lockMode", true); + + // create some common tables and views + ExecutorService executor = Executors.newFixedThreadPool(8); + Connection conn = getConnection(url); + try { + Statement stat = conn.createStatement(); + stat.execute( + "CREATE TABLE INVOICE(INVOICE_ID INT PRIMARY KEY, AMOUNT DECIMAL)"); + stat.execute("CREATE VIEW INVOICE_VIEW as SELECT * FROM INVOICE"); + + stat.execute( + "CREATE TABLE INVOICE_DETAIL(DETAIL_ID INT PRIMARY KEY, " + + "INVOICE_ID INT, DESCRIPTION VARCHAR)"); + stat.execute( + "CREATE VIEW INVOICE_DETAIL_VIEW as SELECT * FROM INVOICE_DETAIL"); + + stat.close(); + + // create views that reference the common views in different threads + ArrayList> jobs = new ArrayList<>(); + for (int i = 0; i < 1000; i++) { + final int j = i; + jobs.add(executor.submit(() -> { + try (Connection conn2 = getConnection(url)) { + Statement stat2 = conn2.createStatement(); + + stat2.execute("CREATE VIEW INVOICE_VIEW" + j + + " as SELECT * FROM INVOICE_VIEW"); + + // the following query intermittently results in a + // NullPointerException + stat2.execute("CREATE VIEW INVOICE_DETAIL_VIEW" + j + + " as SELECT DTL.* FROM INVOICE_VIEW" + j + + " INV JOIN INVOICE_DETAIL_VIEW DTL " + + "ON INV.INVOICE_ID = DTL.INVOICE_ID" + + " WHERE DESCRIPTION='TEST'"); + + ResultSet rs = stat2 + .executeQuery("SELECT * FROM INVOICE_VIEW" + j); + rs.next(); + rs.close(); + + rs = stat2.executeQuery( + "SELECT * FROM INVOICE_DETAIL_VIEW" + j); + rs.next(); + rs.close(); + + stat2.close(); + } + return null; + })); + } + // check for exceptions + for (Future job : jobs) { + try { + job.get(); + } catch (ExecutionException ex) { + // ignore timeout exceptions, happens periodically when the + // machine is really busy and it's not the thing we are + // trying to test + if (!(ex.getCause() instanceof SQLException) + || ((SQLException) ex.getCause()).getErrorCode() != ErrorCode.LOCK_TIMEOUT_1) { + throw ex; + } + } + } + } finally { + IOUtils.closeSilently(conn); + executor.shutdown(); + executor.awaitTermination(20, TimeUnit.SECONDS); + } + + deleteDb("lockMode"); + } + + private void testConcurrentInsert() throws Exception { + deleteDb("lockMode"); + + final String url = getURL("lockMode;LOCK_TIMEOUT=10000", true); + int threadCount = 25; + ExecutorService executor = Executors.newFixedThreadPool(threadCount); Connection conn = getConnection(url); - DatabaseMetaData meta = conn.getMetaData(); - assertFalse(meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_READ_UNCOMMITTED)); - conn.close(); + try { + conn.createStatement().execute( + "CREATE TABLE IF NOT EXISTS TRAN (ID NUMBER(18,0) not null PRIMARY KEY)"); + + final ArrayList> callables = new ArrayList<>(); + for (int i = 0; i < threadCount; i++) { + final long initialTransactionId = i * 1000000L; + callables.add(() -> { + try (Connection taskConn = getConnection(url)) { + taskConn.setAutoCommit(false); + PreparedStatement insertTranStmt = taskConn + .prepareStatement("INSERT INTO tran (id) VALUES(?)"); + // to guarantee uniqueness + long tranId = initialTransactionId; + for (int j = 0; j < 1000; j++) { + insertTranStmt.setLong(1, tranId++); + insertTranStmt.execute(); + taskConn.commit(); + } + } + return null; + }); + } + + final ArrayList> jobs = new ArrayList<>(); + for (int i = 0; i < threadCount; i++) { + jobs.add(executor.submit(callables.get(i))); + } + // check for exceptions + for (Future job : jobs) { + job.get(5, TimeUnit.MINUTES); + } + } finally { + IOUtils.closeSilently(conn); + executor.shutdown(); + executor.awaitTermination(20, TimeUnit.SECONDS); + } + + deleteDb("lockMode"); + } + + private void testConcurrentUpdate() throws Exception { + deleteDb("lockMode"); + + final int objectCount = 10000; + final String url = getURL("lockMode;LOCK_TIMEOUT=10000", true); + int threadCount = 25; + ExecutorService executor = Executors.newFixedThreadPool(threadCount); + Connection conn = getConnection(url); + try { + conn.createStatement().execute( + "CREATE TABLE IF NOT EXISTS ACCOUNT" + + "(ID NUMBER(18,0) not null PRIMARY KEY, BALANCE NUMBER null)"); + final PreparedStatement mergeAcctStmt = conn + .prepareStatement("MERGE INTO Account(id, balance) key (id) VALUES (?, ?)"); + for (int i = 0; i < objectCount; i++) { + mergeAcctStmt.setLong(1, i); + mergeAcctStmt.setBigDecimal(2, BigDecimal.ZERO); + mergeAcctStmt.execute(); + } + + final ArrayList> callables = new ArrayList<>(); + for (int i = 0; i < threadCount; i++) { + callables.add(() -> { + try (Connection taskConn = getConnection(url)) { + taskConn.setAutoCommit(false); + final PreparedStatement updateAcctStmt = taskConn + .prepareStatement("UPDATE account SET balance = ? WHERE id = ?"); + for (int j = 0; j < 1000; j++) { + updateAcctStmt.setDouble(1, Math.random()); + updateAcctStmt.setLong(2, (int) (Math.random() * objectCount)); + updateAcctStmt.execute(); + taskConn.commit(); + } + } + return null; + }); + } + + final ArrayList> jobs = new ArrayList<>(); + for (int i = 0; i < threadCount; i++) { + jobs.add(executor.submit(callables.get(i))); + } + // check for exceptions + for (Future job : jobs) { + job.get(5, TimeUnit.MINUTES); + } + } finally { + IOUtils.closeSilently(conn); + executor.shutdown(); + executor.awaitTermination(20, TimeUnit.SECONDS); + } + deleteDb("lockMode"); } + private final class ConcurrentUpdate2 extends Thread { + private final String column; + + Throwable exception; + + ConcurrentUpdate2(String column) { + this.column = column; + } + + @Override + public void run() { + try (Connection c = getConnection("concurrentUpdate2;LOCK_TIMEOUT=10000")) { + PreparedStatement ps = c.prepareStatement("UPDATE TEST SET V = ? WHERE " + column + " = ?"); + for (int test = 0; test < 1000; test++) { + for (int i = 0; i < 16; i++) { + ps.setInt(1, test); + ps.setInt(2, i); + assertEquals(16, ps.executeUpdate()); + } + } + } catch (Throwable e) { + exception = e; + } + } + } + + private void testConcurrentUpdate2() throws Exception { + deleteDb("concurrentUpdate2"); + try (Connection c = getConnection("concurrentUpdate2")) { + Statement s = c.createStatement(); + s.execute("CREATE TABLE TEST(A INT, B INT, V INT, PRIMARY KEY(A, B))"); + PreparedStatement ps = c.prepareStatement("INSERT INTO TEST VALUES (?, ?, ?)"); + for (int i = 0; i < 16; i++) { + for (int j = 0; j < 16; j++) { + ps.setInt(1, i); + ps.setInt(2, j); + ps.setInt(3, 0); + ps.executeUpdate(); + } + } + ConcurrentUpdate2 a = new ConcurrentUpdate2("A"); + ConcurrentUpdate2 b = new ConcurrentUpdate2("B"); + a.start(); + b.start(); + a.join(); + b.join(); + Throwable e = a.exception; + if (e == null) { + e = b.exception; + } + if (e != null) { + if (e instanceof Exception) { + throw (Exception) e; + } + throw (Error) e; + } + } finally { + deleteDb("concurrentUpdate2"); + } + } + + private void testCheckConstraint() throws Exception { + deleteDb("checkConstraint"); + try (Connection c = getConnection("checkConstraint")) { + Statement s = c.createStatement(); + s.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, A INT, B INT)"); + PreparedStatement ps = c.prepareStatement("INSERT INTO TEST VALUES (?, ?, ?)"); + s.execute("ALTER TABLE TEST ADD CONSTRAINT CHECK_A_B CHECK A = B"); + final int numRows = 10; + for (int i = 0; i < numRows; i++) { + ps.setInt(1, i); + ps.setInt(2, 0); + ps.setInt(3, 0); + ps.executeUpdate(); + } + int numThreads = 4; + Thread[] threads = new Thread[numThreads]; + final AtomicBoolean error = new AtomicBoolean(); + for (int i = 0; i < numThreads; i++) { + threads[i] = new Thread() { + @Override + public void run() { + try (Connection c = getConnection("checkConstraint")) { + PreparedStatement ps = c.prepareStatement("UPDATE TEST SET A = ?, B = ? WHERE ID = ?"); + Random r = new Random(); + for (int i = 0; i < 1_000; i++) { + int v = r.nextInt(1_000); + ps.setInt(1, v); + ps.setInt(2, v); + ps.setInt(3, r.nextInt(numRows)); + ps.executeUpdate(); + } + } catch (SQLException e) { + error.set(true); + synchronized (TestMultiThread.this) { + logError("Error in CHECK constraint", e); + } + } + } + }; + } + for (int i = 0; i < numThreads; i++) { + threads[i].start(); + } + for (int i = 0; i < numThreads; i++) { + threads[i].join(); + } + assertFalse(error.get()); + } finally { + deleteDb("checkConstraint"); + } + } + } diff --git a/h2/src/test/org/h2/test/db/TestMultiThreadedKernel.java b/h2/src/test/org/h2/test/db/TestMultiThreadedKernel.java index 35a65fad38..b700b2f8b0 100644 --- a/h2/src/test/org/h2/test/db/TestMultiThreadedKernel.java +++ b/h2/src/test/org/h2/test/db/TestMultiThreadedKernel.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -12,15 +12,16 @@ import java.sql.Statement; import java.util.ArrayList; import java.util.Random; + import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.JdbcUtils; -import org.h2.util.New; import org.h2.util.Task; /** * A multi-threaded test case. */ -public class TestMultiThreadedKernel extends TestBase { +public class TestMultiThreadedKernel extends TestDb { /** * Stop the current thread. @@ -38,20 +39,16 @@ public class TestMultiThreadedKernel extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - if (config.mvcc) { - return; - } deleteDb("multiThreadedKernel"); testConcurrentRead(); testCache(); deleteDb("multiThreadedKernel"); - final String url = getURL("multiThreadedKernel;" + - "DB_CLOSE_DELAY=-1;MULTI_THREADED=1", true); + final String url = getURL("multiThreadedKernel;DB_CLOSE_DELAY=-1", true); final String user = getUser(), password = getPassword(); int len = 3; Thread[] threads = new Thread[len]; @@ -100,12 +97,11 @@ private void work(Connection conn) throws SQLException { } private void testConcurrentRead() throws Exception { - ArrayList list = New.arrayList(); int size = 2; final int count = 1000; + ArrayList list = new ArrayList<>(size); final Connection[] connections = new Connection[count]; - String url = getURL("multiThreadedKernel;" + - "MULTI_THREADED=TRUE;CACHE_SIZE=16", true); + String url = getURL("multiThreadedKernel;CACHE_SIZE=16", true); for (int i = 0; i < size; i++) { final Connection conn = DriverManager.getConnection( url, getUser(), getPassword()); @@ -141,12 +137,11 @@ public void call() throws Exception { } private void testCache() throws Exception { - ArrayList list = New.arrayList(); int size = 3; final int count = 100; + ArrayList list = new ArrayList<>(size); final Connection[] connections = new Connection[count]; - String url = getURL("multiThreadedKernel;" + - "MULTI_THREADED=TRUE;CACHE_SIZE=1", true); + String url = getURL("multiThreadedKernel;CACHE_SIZE=1", true); for (int i = 0; i < size; i++) { final Connection conn = DriverManager.getConnection( url, getUser(), getPassword()); @@ -181,4 +176,8 @@ public void call() throws SQLException { } } + @Override + protected String getURL(String name, boolean admin) { + return super.getURL(name + ";LOCK_TIMEOUT=2000", admin); + } } diff --git a/h2/src/test/org/h2/test/db/TestOpenClose.java b/h2/src/test/org/h2/test/db/TestOpenClose.java index 7ce7d9b60e..3a58f0d599 100644 --- a/h2/src/test/org/h2/test/db/TestOpenClose.java +++ b/h2/src/test/org/h2/test/db/TestOpenClose.java @@ -1,30 +1,37 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.concurrent.TimeUnit; + import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.Restore; import org.h2.util.Task; /** * Tests opening and closing a database. */ -public class TestOpenClose extends TestBase { +public class TestOpenClose extends TestDb { private int nextId = 10; @@ -34,7 +41,7 @@ public class TestOpenClose extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -45,6 +52,7 @@ public void test() throws Exception { testBackup(); testCase(); testReconnectFast(); + test1_1(); deleteDb("openClose"); } @@ -55,8 +63,8 @@ private void testErrorMessageLocked() throws Exception { deleteDb("openClose"); Connection conn; conn = getConnection("jdbc:h2:" + getBaseDir() + "/openClose;FILE_LOCK=FS"); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this).getConnection( - "jdbc:h2:" + getBaseDir() + "/openClose;FILE_LOCK=FS;OPEN_NEW=TRUE"); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, + () -> getConnection("jdbc:h2:" + getBaseDir() + "/openClose;FILE_LOCK=FS;OPEN_NEW=TRUE")); conn.close(); } @@ -64,16 +72,10 @@ private void testErrorMessageWrongSplit() throws Exception { if (config.memory || config.reopen) { return; } - String fn = getBaseDir() + "/openClose2"; - if (config.mvStore) { - fn += Constants.SUFFIX_MV_FILE; - } else { - fn += Constants.SUFFIX_PAGE_FILE; - } + String fn = getBaseDir() + "/openClose2" + Constants.SUFFIX_MV_FILE; FileUtils.delete("split:" + fn); Connection conn; - String url = "jdbc:h2:split:18:" + getBaseDir() + "/openClose2"; - url = getURL(url, true); + String url = getURL("jdbc:h2:split:18:" + getBaseDir() + "/openClose2", true); conn = DriverManager.getConnection(url); conn.createStatement().execute("create table test(id int, name varchar) " + "as select 1, space(1000000)"); @@ -82,11 +84,7 @@ private void testErrorMessageWrongSplit() throws Exception { c.position(c.size() * 2 - 1); c.write(ByteBuffer.wrap(new byte[1])); c.close(); - if (config.mvStore) { - assertThrows(ErrorCode.IO_EXCEPTION_1, this).getConnection(url); - } else { - assertThrows(ErrorCode.IO_EXCEPTION_2, this).getConnection(url); - } + assertThrows(ErrorCode.IO_EXCEPTION_1, () -> getConnection(url)); FileUtils.delete("split:" + fn); } @@ -97,8 +95,8 @@ private void testCloseDelay() throws Exception { Connection conn = DriverManager.getConnection(url, user, password); conn.close(); Thread.sleep(950); - long time = System.currentTimeMillis(); - while (System.currentTimeMillis() - time < 100) { + long time = System.nanoTime(); + while (System.nanoTime() - time < TimeUnit.MILLISECONDS.toNanos(100)) { conn = DriverManager.getConnection(url, user, password); conn.close(); } @@ -152,7 +150,7 @@ private void testReconnectFast() throws SQLException { conn.close(); conn = DriverManager.getConnection(url, user, password); stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM DUAL"); + ResultSet rs = stat.executeQuery("SELECT * FROM SYSTEM_RANGE(1, 1)"); if (rs.next()) { rs.getString(1); } @@ -220,11 +218,22 @@ synchronized int getNextId() { return nextId++; } + private void test1_1() throws IOException { + Path old = Paths.get(getBaseDir()).resolve("db" + Constants.SUFFIX_OLD_DATABASE_FILE); + Files.createFile(old); + try { + assertThrows(ErrorCode.FILE_VERSION_ERROR_1, + () -> DriverManager.getConnection("jdbc:h2:" + getBaseDir() + "/db")); + } finally { + Files.deleteIfExists(old); + } + } + + /** * A database event listener used in this test. */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { + public static final class MyDatabaseEventListener implements DatabaseEventListener { @Override public void exceptionThrown(SQLException e, String sql) { @@ -232,7 +241,7 @@ public void exceptionThrown(SQLException e, String sql) { } @Override - public void setProgress(int state, String name, int current, int max) { + public void setProgress(int state, String name, long current, long max) { String stateName; switch (state) { case STATE_SCAN_FILE: @@ -258,20 +267,6 @@ public void setProgress(int state, String name, int current, int max) { // System.out.println(": " + stateName); } - @Override - public void closingDatabase() { - // nothing to do - } - - @Override - public void init(String url) { - // nothing to do - } - - @Override - public void opened() { - // nothing to do - } } } diff --git a/h2/src/test/org/h2/test/db/TestOptimizations.java b/h2/src/test/org/h2/test/db/TestOptimizations.java index 253b646930..2395824362 100644 --- a/h2/src/test/org/h2/test/db/TestOptimizations.java +++ b/h2/src/test/org/h2/test/db/TestOptimizations.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -15,19 +15,18 @@ import java.util.HashMap; import java.util.Random; import java.util.TreeSet; - +import java.util.concurrent.TimeUnit; import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.SimpleResultSet; -import org.h2.util.New; -import org.h2.util.StringUtils; import org.h2.util.Task; /** * Test various optimizations (query cache, optimization for MIN(..), and * MAX(..)). */ -public class TestOptimizations extends TestBase { +public class TestOptimizations extends TestDb { /** * Run just this test. @@ -35,12 +34,13 @@ public class TestOptimizations extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { deleteDb("optimizations"); + testConditionsStackOverflow(); testIdentityIndexUsage(); testFastRowIdCondition(); testExplainRoundTrip(); @@ -58,11 +58,13 @@ public void test() throws Exception { testNestedIn(); testConstantIn1(); testConstantIn2(); + testConstantTypeConversionToColumnType(); testNestedInSelectAndLike(); testNestedInSelect(); testInSelectJoin(); testMinMaxNullOptimization(); - testUseIndexWhenAllColumnsNotInOrderBy(); + testUseCoveringIndex(); + // testUseIndexWhenAllColumnsNotInOrderBy(); if (config.networked) { return; } @@ -71,14 +73,18 @@ public void test() throws Exception { testMultiColumnRangeQuery(); testDistinctOptimization(); testQueryCacheTimestamp(); - testQueryCacheSpeed(); + if (!config.lazy) { + testQueryCacheSpeed(); + } testQueryCache(true); testQueryCache(false); testIn(); testMinMaxCountOptimization(true); testMinMaxCountOptimization(false); testOrderedIndexes(); + testIndexUseDespiteNullsFirst(); testConvertOrToIn(); + testConditionAndOrDistributiveLaw(); deleteDb("optimizations"); } @@ -108,8 +114,8 @@ private void testFastRowIdCondition() throws Exception { private void testExplainRoundTrip() throws Exception { Connection conn = getConnection("optimizations"); - assertExplainRoundTrip(conn, - "select x from dual where x > any(select x from dual)"); + assertExplainRoundTrip(conn, "SELECT \"X\" FROM SYSTEM_RANGE(1, 1)" + + " WHERE \"X\" > ANY(SELECT DISTINCT \"X\" FROM SYSTEM_RANGE(1, 1))"); conn.close(); } @@ -118,14 +124,13 @@ private void assertExplainRoundTrip(Connection conn, String sql) Statement stat = conn.createStatement(); ResultSet rs = stat.executeQuery("explain " + sql); rs.next(); - String plan = rs.getString(1).toLowerCase(); + String plan = rs.getString(1); plan = plan.replaceAll("\\s+", " "); plan = plan.replaceAll("/\\*[^\\*]*\\*/", ""); plan = plan.replaceAll("\\s+", " "); - plan = StringUtils.replaceAll(plan, "system_range(1, 1)", "dual"); plan = plan.replaceAll("\\( ", "\\("); plan = plan.replaceAll(" \\)", "\\)"); - assertEquals(plan, sql); + assertEquals(sql, plan); } private void testOrderByExpression() throws Exception { @@ -168,7 +173,7 @@ private void testGroupSubquery() throws Exception { private void testAnalyzeLob() throws Exception { Connection conn = getConnection("optimizations"); Statement stat = conn.createStatement(); - stat.execute("create table test(v varchar, b binary, cl clob, bl blob) as " + + stat.execute("create table test(v varchar, b varbinary, cl clob, bl blob) as " + "select ' ', '00', ' ', '00' from system_range(1, 100)"); stat.execute("analyze"); ResultSet rs = stat.executeQuery("select column_name, selectivity " + @@ -211,12 +216,15 @@ private void testExistsSubquery() throws Exception { "where exists(select 1 from test, test, test) and id = 10"); rs.next(); // ensure the ID = 10 part is evaluated first - assertContains(rs.getString(1), "WHERE (ID = 10)"); + assertContains(rs.getString(1), "WHERE (\"ID\" = 10)"); stat.execute("drop table test"); conn.close(); } private void testQueryCacheConcurrentUse() throws Exception { + if (config.lazy) { + return; + } final Connection conn = getConnection("optimizations"); Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, data clob)"); @@ -279,7 +287,8 @@ private void testRowId() throws SQLException { stat.execute("insert into test(data) values('World')"); stat.execute("insert into test(_rowid_, data) values(20, 'Hello')"); stat.execute( - "merge into test(_rowid_, data) key(_rowid_) values(20, 'Hallo')"); + "merge into test using (values(20, 'Hallo')) s(id, data) on test._rowid_ = s.id" + + " when matched then update set data = s.data"); rs = stat.executeQuery( "select _rowid_, data from test order by _rowid_"); rs.next(); @@ -289,7 +298,7 @@ private void testRowId() throws SQLException { assertEquals(11, rs.getInt(1)); assertEquals("World", rs.getString(2)); rs.next(); - assertEquals(21, rs.getInt(1)); + assertEquals(20, rs.getInt(1)); assertEquals("Hallo", rs.getString(2)); assertFalse(rs.next()); stat.execute("drop table test"); @@ -337,12 +346,12 @@ private void testSortIndex() throws SQLException { rs = stat.executeQuery("explain select * from test " + "where id > 10 order by id"); rs.next(); - assertTrue(rs.getString(1).contains("IDX_ID_ASC")); + assertContains(rs.getString(1), "IDX_ID_ASC"); rs = stat.executeQuery("explain select * from test " + "where id < 10 order by id desc"); rs.next(); - assertTrue(rs.getString(1).contains("IDX_ID_DESC")); + assertContains(rs.getString(1), "IDX_ID_DESC"); rs.next(); stat.execute("drop table test"); @@ -353,8 +362,8 @@ private void testAutoAnalyze() throws SQLException { deleteDb("optimizations"); Connection conn = getConnection("optimizations"); Statement stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select value " + - "from information_schema.settings where name='analyzeAuto'"); + ResultSet rs = stat.executeQuery( + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'analyzeAuto'"); int auto = rs.next() ? rs.getInt(1) : 0; if (auto != 0) { stat.execute("create table test(id int)"); @@ -428,7 +437,7 @@ private void testConstantIn1() throws SQLException { stat.execute("create table test(id int primary key, name varchar(255))"); stat.execute("insert into test values(1, 'Hello'), (2, 'World')"); assertSingleValue(stat, - "select count(*) from test where name in ('Hello', 'World', 1)", 2); + "select count(*) from test where name in ('Hello', 'World', '1')", 2); assertSingleValue(stat, "select count(*) from test where name in ('Hello', 'World')", 2); assertSingleValue(stat, @@ -462,6 +471,26 @@ private void testConstantIn2() throws SQLException { conn.close(); } + private void testConstantTypeConversionToColumnType() throws SQLException { + deleteDb("optimizations"); + Connection conn = getConnection("optimizations;IGNORECASE=TRUE"); + Statement stat = conn.createStatement(); + + stat.executeUpdate("CREATE TABLE test (x int)"); + ResultSet resultSet; + resultSet = stat.executeQuery( + "EXPLAIN SELECT x FROM test WHERE x = '5'"); + + assertTrue(resultSet.next()); + // String constant '5' has been converted to int constant 5 on + // optimization + assertTrue(resultSet.getString(1).endsWith("\"X\" = 5")); + + stat.execute("drop table test"); + + conn.close(); + } + private void testNestedInSelect() throws SQLException { deleteDb("optimizations"); Connection conn = getConnection("optimizations"); @@ -504,12 +533,12 @@ private void testNestedInSelectAndLike() throws SQLException { assertFalse(rs.next()); PreparedStatement prep; - prep = conn.prepareStatement("SELECT * FROM DUAL A " + - "WHERE A.X IN (SELECT B.X FROM DUAL B WHERE B.X LIKE ?)"); + prep = conn.prepareStatement("SELECT * FROM SYSTEM_RANGE(1, 1) A " + + "WHERE A.X IN (SELECT B.X FROM SYSTEM_RANGE(1, 1) B WHERE B.X LIKE ?)"); prep.setString(1, "1"); prep.execute(); - prep = conn.prepareStatement("SELECT * FROM DUAL A " + - "WHERE A.X IN (SELECT B.X FROM DUAL B WHERE B.X IN (?, ?))"); + prep = conn.prepareStatement("SELECT * FROM SYSTEM_RANGE(1, 1) A " + + "WHERE A.X IN (SELECT B.X FROM SYSTEM_RANGE(1, 1) B WHERE B.X IN (?, ?))"); prep.setInt(1, 1); prep.setInt(2, 1); prep.executeQuery(); @@ -551,9 +580,7 @@ private void testOptimizeInJoinSelect() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table item(id int primary key)"); stat.execute("insert into item values(1)"); - stat.execute("create alias opt for \"" + - getClass().getName() + - ".optimizeInJoinSelect\""); + stat.execute("create alias opt for '" + getClass().getName() + ".optimizeInJoinSelect'"); PreparedStatement prep = conn.prepareStatement( "select * from item where id in (select x from opt())"); ResultSet rs = prep.executeQuery(); @@ -632,10 +659,6 @@ private void testMinMaxNullOptimization() throws SQLException { ResultSet rs = stat.executeQuery( "explain select min(x), max(x) from test"); rs.next(); - if (!config.mvcc) { - String plan = rs.getString(1); - assertTrue(plan.indexOf("direct") > 0); - } rs = stat.executeQuery("select min(x), max(x) from test"); rs.next(); int min = rs.getInt(1); @@ -666,7 +689,7 @@ private void testMultiColumnRangeQuery() throws SQLException { "WHERE id < 100 and type=2 AND id<100"); rs.next(); String plan = rs.getString(1); - assertTrue(plan.indexOf("TYPE_INDEX") > 0); + assertContains(plan, "TYPE_INDEX"); conn.close(); } @@ -738,17 +761,6 @@ private void testDistinctOptimization() throws SQLException { assertEquals(i, rs.getInt(1)); } assertFalse(rs.next()); - rs = stat.executeQuery("SELECT DISTINCT TYPE FROM TEST " + - "ORDER BY TYPE LIMIT -1 OFFSET 0 SAMPLE_SIZE 3"); - // must have at least one row - assertTrue(rs.next()); - for (int i = 0; i < 3; i++) { - rs.getInt(1); - if (i > 0 && !rs.next()) { - break; - } - } - assertFalse(rs.next()); conn.close(); } @@ -779,19 +791,28 @@ private void testQueryCacheSpeed() throws SQLException { } private void testQuerySpeed(Statement stat, String sql) throws SQLException { - stat.execute("set OPTIMIZE_REUSE_RESULTS 0"); - stat.execute(sql); - long time = System.currentTimeMillis(); - stat.execute(sql); - time = System.currentTimeMillis() - time; - stat.execute("set OPTIMIZE_REUSE_RESULTS 1"); + long totalTime = 0; + long totalTimeOptimized = 0; + for (int i = 0; i < 3; i++) { + totalTime += measureQuerySpeed(stat, sql, false); + totalTimeOptimized += measureQuerySpeed(stat, sql, true); + } + // System.out.println( + // TimeUnit.NANOSECONDS.toMillis(totalTime) + " " + + // TimeUnit.NANOSECONDS.toMillis(totalTimeOptimized)); + if (totalTimeOptimized > totalTime) { + fail("not optimized: " + TimeUnit.NANOSECONDS.toMillis(totalTime) + + " optimized: " + TimeUnit.NANOSECONDS.toMillis(totalTimeOptimized) + + " sql:" + sql); + } + } + + private static long measureQuerySpeed(Statement stat, String sql, boolean optimized) throws SQLException { + stat.execute("set OPTIMIZE_REUSE_RESULTS " + (optimized ? "1" : "0")); stat.execute(sql); - long time2 = System.currentTimeMillis(); + long time = System.nanoTime(); stat.execute(sql); - time2 = System.currentTimeMillis() - time2; - if (time2 > time * 2) { - fail("not optimized: " + time + " optimized: " + time2 + " sql:" + sql); - } + return System.nanoTime() - time; } private void testQueryCache(boolean optimize) throws SQLException { @@ -830,11 +851,11 @@ private void testMinMaxCountOptimization(boolean memory) Connection conn = getConnection("optimizations"); Statement stat = conn.createStatement(); stat.execute("create " + (memory ? "memory" : "") + - " table test(id int primary key, value int)"); - stat.execute("create index idx_value_id on test(value, id);"); + " table test(id int primary key, v int)"); + stat.execute("create index idx_v_id on test(v, id);"); int len = getSize(1000, 10000); - HashMap map = New.hashMap(); - TreeSet set = new TreeSet(); + HashMap map = new HashMap<>(); + TreeSet set = new TreeSet<>(); Random random = new Random(1); for (int i = 0; i < len; i++) { if (i == len / 2) { @@ -853,7 +874,7 @@ private void testMinMaxCountOptimization(boolean memory) case 5: if (random.nextInt(1000) == 1) { stat.execute("insert into test values(" + i + ", null)"); - map.put(new Integer(i), null); + map.put(i, null); } else { int value = random.nextInt(); stat.execute("insert into test values(" + i + ", " + value + ")"); @@ -878,7 +899,7 @@ private void testMinMaxCountOptimization(boolean memory) break; } case 9: { - ArrayList list = New.arrayList(map.values()); + ArrayList list = new ArrayList<>(map.values()); int count = list.size(); Integer min = null, max = null; if (count > 0) { @@ -886,7 +907,7 @@ private void testMinMaxCountOptimization(boolean memory) max = set.last(); } ResultSet rs = stat.executeQuery( - "select min(value), max(value), count(*) from test"); + "select min(v), max(v), count(*) from test"); rs.next(); Integer minDb = (Integer) rs.getObject(1); Integer maxDb = (Integer) rs.getObject(2); @@ -918,9 +939,9 @@ private void testIn() throws SQLException { assertFalse(stat.executeQuery("select * from dual " + "where null in(null, 1)").next()); - assertFalse(stat.executeQuery("select * from dual " + + assertFalse(stat.executeQuery("select * from system_range(1, 1) " + "where 1+x in(3, 4)").next()); - assertFalse(stat.executeQuery("select * from dual d1, dual d2 " + + assertFalse(stat.executeQuery("select * from system_range(1, 1) d1, dual d2 " + "where d1.x in(3, 4)").next()); stat.execute("create table test(id int primary key, name varchar)"); @@ -1006,6 +1027,85 @@ private void testOrderedIndexes() throws SQLException { conn.close(); } + private void testIndexUseDespiteNullsFirst() throws SQLException { + deleteDb("optimizations"); + Connection conn = getConnection("optimizations"); + Statement stat = conn.createStatement(); + + stat.execute("CREATE TABLE my_table(K1 INT)"); + stat.execute("CREATE INDEX my_index ON my_table(K1)"); + stat.execute("INSERT INTO my_table VALUES (NULL)"); + stat.execute("INSERT INTO my_table VALUES (1)"); + stat.execute("INSERT INTO my_table VALUES (2)"); + + ResultSet rs; + String result; + + + rs = stat.executeQuery( + "EXPLAIN PLAN FOR SELECT * FROM my_table " + + "ORDER BY K1 ASC NULLS FIRST"); + rs.next(); + result = rs.getString(1); + assertContains(result, "/* index sorted */"); + + rs = stat.executeQuery( + "SELECT * FROM my_table " + + "ORDER BY K1 ASC NULLS FIRST"); + rs.next(); + assertNull(rs.getObject(1)); + rs.next(); + assertEquals(1, rs.getInt(1)); + rs.next(); + assertEquals(2, rs.getInt(1)); + + // === + rs = stat.executeQuery( + "EXPLAIN PLAN FOR SELECT * FROM my_table " + + "ORDER BY K1 DESC NULLS FIRST"); + rs.next(); + result = rs.getString(1); + if (result.contains("/* index sorted */")) { + fail(result + " does not contain: /* index sorted */"); + } + + rs = stat.executeQuery( + "SELECT * FROM my_table " + + "ORDER BY K1 DESC NULLS FIRST"); + rs.next(); + assertNull(rs.getObject(1)); + rs.next(); + assertEquals(2, rs.getInt(1)); + rs.next(); + assertEquals(1, rs.getInt(1)); + + // === + rs = stat.executeQuery( + "EXPLAIN PLAN FOR SELECT * FROM my_table " + + "ORDER BY K1 ASC NULLS LAST"); + rs.next(); + result = rs.getString(1); + if (result.contains("/* index sorted */")) { + fail(result + " does not contain: /* index sorted */"); + } + + rs = stat.executeQuery( + "SELECT * FROM my_table " + + "ORDER BY K1 ASC NULLS LAST"); + rs.next(); + assertEquals(1, rs.getInt(1)); + rs.next(); + assertEquals(2, rs.getInt(1)); + rs.next(); + assertNull(rs.getObject(1)); + + // TODO: Test "EXPLAIN PLAN FOR SELECT * FROM my_table ORDER BY K1 DESC NULLS FIRST" + // Currently fails, as using the index when sorting DESC is currently not supported. + + stat.execute("DROP TABLE my_table"); + conn.close(); + } + private void testConvertOrToIn() throws SQLException { deleteDb("optimizations"); Connection conn = getConnection("optimizations"); @@ -1018,7 +1118,7 @@ private void testConvertOrToIn() throws SQLException { ResultSet rs = stat.executeQuery("EXPLAIN PLAN FOR SELECT * " + "FROM test WHERE ID=1 OR ID=2 OR ID=3 OR ID=4 OR ID=5"); rs.next(); - assertContains(rs.getString(1), "ID IN(1, 2, 3, 4, 5)"); + assertContains(rs.getString(1), "\"ID\" IN(1, 2, 3, 4, 5)"); rs = stat.executeQuery("SELECT COUNT(*) FROM test " + "WHERE ID=1 OR ID=2 OR ID=3 OR ID=4 OR ID=5"); @@ -1027,4 +1127,78 @@ private void testConvertOrToIn() throws SQLException { conn.close(); } + + private void testUseCoveringIndex() throws SQLException { + deleteDb("optimizations"); + Connection conn = getConnection("optimizations"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TABLE_A(id IDENTITY PRIMARY KEY NOT NULL, " + + "name VARCHAR NOT NULL, active BOOLEAN DEFAULT TRUE, " + + "CONSTRAINT TABLE_A_UK UNIQUE (name) )"); + stat.execute("CREATE TABLE TABLE_B(id IDENTITY PRIMARY KEY NOT NULL, " + + "TABLE_a_id BIGINT NOT NULL, createDate TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, " + + "CONSTRAINT TABLE_B_UK UNIQUE (table_a_id, createDate))"); + stat.execute("CREATE INDEX TABLE_B_IDX ON TABLE_B(TABLE_A_ID)"); + stat.execute("ALTER TABLE TABLE_B ADD FOREIGN KEY (table_a_id) REFERENCES TABLE_A(id)"); + stat.execute("INSERT INTO TABLE_A (name) SELECT 'package_' || CAST(X as VARCHAR) " + + "FROM SYSTEM_RANGE(1, 100) WHERE X <= 100"); + int count = config.memory ? 30_000 : 50_000; + stat.execute("INSERT INTO TABLE_B (table_a_id, createDate) SELECT " + + "CASE WHEN table_a_id = 0 THEN 1 ELSE table_a_id END, createDate " + + "FROM ( SELECT ROUND((RAND() * 100)) AS table_a_id, " + + "DATEADD('SECOND', X, CURRENT_TIMESTAMP) as createDate FROM SYSTEM_RANGE(1, " + count + ") " + + "WHERE X < " + count + " )"); + stat.execute("ANALYZE"); + + ResultSet rs = stat.executeQuery("EXPLAIN ANALYZE SELECT MAX(b.id) as id " + + "FROM table_b b JOIN table_a a ON b.table_a_id = a.id GROUP BY b.table_a_id " + + "HAVING A.ACTIVE = TRUE"); + rs.next(); + assertContains(rs.getString(1), "/* PUBLIC.TABLE_B_IDX: TABLE_A_ID = A.ID */"); + + rs = stat.executeQuery("EXPLAIN ANALYZE SELECT MAX(id) FROM table_b GROUP BY table_a_id"); + rs.next(); + assertContains(rs.getString(1), "/* PUBLIC.TABLE_B_IDX"); + conn.close(); + } + + private void testConditionAndOrDistributiveLaw() throws SQLException { + deleteDb("optimizations"); + Connection conn = getConnection("optimizations"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE IF NOT EXISTS TABLE_A (" + + "id int NOT NULL AUTO_INCREMENT, " + + "name VARCHAR(30) NOT NULL," + + "occupation VARCHAR(20)," + + "age int," + + "salary int," + + "PRIMARY KEY(id))"); + stat.execute("INSERT INTO TABLE_A (name,occupation,age,salary) VALUES" + + "('mark', 'doctor',25,5000)," + + "('kevin', 'artist',20,4000)," + + "('isuru', 'engineer',25,5000)," + + "('josaph', 'businessman',30,7000)," + + "('sajeewa', 'analyst',24,5000)," + + "('randil', 'engineer',25,5000)," + + "('ashan', 'developer',24,5000)"); + ResultSet rs = stat.executeQuery("SELECT * FROM TABLE_A WHERE (salary = 5000 AND name = 'isuru') OR" + + "(age = 25 AND name = 'isuru') "); + rs.next(); + assertTrue("engineer".equals(rs.getString("occupation"))); + conn.close(); + } + + private void testConditionsStackOverflow() throws SQLException { + deleteDb("optimizations"); + Connection conn = getConnection("optimizations"); + Statement stat = conn.createStatement(); + StringBuilder b = new StringBuilder("SELECT 1"); + for (int i=0; i<10000; i++) { + b.append(" AND 1"); + } + ResultSet rs = stat.executeQuery(b.toString()); + rs.next(); + assertTrue(rs.getBoolean(1)); + conn.close(); + } } diff --git a/h2/src/test/org/h2/test/db/TestOutOfMemory.java b/h2/src/test/org/h2/test/db/TestOutOfMemory.java index b8861b14a1..c93c5b83ab 100644 --- a/h2/src/test/org/h2/test/db/TestOutOfMemory.java +++ b/h2/src/test/org/h2/test/db/TestOutOfMemory.java @@ -1,24 +1,36 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.sql.Connection; +import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; - +import java.util.Map; +import java.util.Random; +import java.util.concurrent.atomic.AtomicReference; import org.h2.api.ErrorCode; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; +import org.h2.store.fs.FilePath; +import org.h2.store.fs.FileUtils; +import org.h2.store.fs.mem.FilePathMem; import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.Utils; /** * Tests out of memory situations. The database must not get corrupted, and * transactions must stay atomic. */ -public class TestOutOfMemory extends TestBase { +public class TestOutOfMemory extends TestDb { + + private static final String DB_NAME = "outOfMemory"; /** * Run just this test. @@ -26,59 +38,217 @@ public class TestOutOfMemory extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { - if (config.memory || config.mvcc) { - return; + public boolean isEnabled() { + if (config.vmlens) { + // running out of memory will cause the vmlens agent to stop working + return false; } - for (int i = 0; i < 5; i++) { + return true; + } + + @Override + public void test() throws Exception { + try { + if (!config.ci) { + System.gc(); + testMVStoreUsingInMemoryFileSystem(); + System.gc(); + testDatabaseUsingInMemoryFileSystem(); + } + System.gc(); + if (!config.networked) { // for some unknown reason it fails + testUpdateWhenNearlyOutOfMemory(); + } + } finally { System.gc(); } - deleteDb("outOfMemory"); - Connection conn = getConnection("outOfMemory;MAX_OPERATION_MEMORY=1000000"); - Statement stat = conn.createStatement(); - stat.execute("drop all objects"); - stat.execute("create table stuff (id int, text varchar as space(100) || id)"); - stat.execute("insert into stuff(id) select x from system_range(1, 3000)"); - PreparedStatement prep = conn.prepareStatement( - "update stuff set text = text || space(1000) || id"); - prep.execute(); - stat.execute("checkpoint"); - eatMemory(80); + } + + private void testMVStoreUsingInMemoryFileSystem() { + FilePath.register(new FilePathMem()); + String fileName = "memFS:" + getTestName(); + AtomicReference exRef = new AtomicReference<>(); + MVStore store = new MVStore.Builder() + .fileName(fileName) + .backgroundExceptionHandler((t, e) -> exRef.compareAndSet(null, e)) + .open(); try { + Map map = store.openMap("test"); + Random r = new Random(1); try { - prep.execute(); + for (int i = 0; i < 100; i++) { + byte[] data = new byte[10 * 1024 * 1024]; + r.nextBytes(data); + map.put(i, data); + } + Throwable throwable = exRef.get(); + if(throwable instanceof OutOfMemoryError) throw (OutOfMemoryError)throwable; + if(throwable instanceof MVStoreException) throw (MVStoreException)throwable; + fail(); + } catch (OutOfMemoryError | MVStoreException e) { + // expected + } + try { + store.close(); + } catch (MVStoreException e) { + // expected + } + store.closeImmediately(); + store = MVStore.open(fileName); + store.openMap("test"); + store.close(); + } finally { + // just in case, otherwise if this test suffers a spurious failure, + // succeeding tests will too, because they will OOM + store.closeImmediately(); + FileUtils.delete(fileName); + } + } + + private void testDatabaseUsingInMemoryFileSystem() throws SQLException, InterruptedException { + String filename = "memFS:" + getTestName(); + String url = "jdbc:h2:" + filename + "/test"; + try { + Connection conn = DriverManager.getConnection(url); + Statement stat = conn.createStatement(); + long memoryFree = Utils.getMemoryFree(); + try { + stat.execute("create table test(id int, name varchar) as " + + "select x, space(1000000+x) from system_range(1, 10000)"); fail(); } catch (SQLException e) { - assertEquals(ErrorCode.OUT_OF_MEMORY, e.getErrorCode()); + assertTrue("Unexpected error code: " + e.getErrorCode(), + ErrorCode.OUT_OF_MEMORY == e.getErrorCode() || + ErrorCode.FILE_CORRUPTED_1 == e.getErrorCode() || + ErrorCode.DATABASE_IS_CLOSED == e.getErrorCode() || + ErrorCode.GENERAL_ERROR_1 == e.getErrorCode()); } - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - freeMemory(); - conn = null; - conn = getConnection("outOfMemory"); + recoverAfterOOM(memoryFree * 3 / 4); + try { + conn.close(); + fail(); + } catch (SQLException e) { + assertTrue("Unexpected error code: " + e.getErrorCode(), + ErrorCode.OUT_OF_MEMORY == e.getErrorCode() || + ErrorCode.FILE_CORRUPTED_1 == e.getErrorCode() || + ErrorCode.DATABASE_IS_CLOSED == e.getErrorCode() || + ErrorCode.GENERAL_ERROR_1 == e.getErrorCode()); + } + recoverAfterOOM(memoryFree * 3 / 4); + conn = DriverManager.getConnection(url); stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select count(*) from stuff"); - rs.next(); - assertEquals(3000, rs.getInt(1)); - } catch (OutOfMemoryError e) { - freeMemory(); - // out of memory not detected - throw (Error) new AssertionError("Out of memory not detected").initCause(e); + stat.execute("SELECT 1"); + conn.close(); } finally { - freeMemory(); - if (conn != null) { - try { - conn.close(); - } catch (SQLException e) { - // out of memory will / may close the database - assertKnownException(e); - } + // release the static data this test generates + FileUtils.deleteRecursive(filename, true); + } + } + + private static void recoverAfterOOM(long expectedFreeMemory) throws InterruptedException { + for (int i = 0; i < 50; i++) { + if (Utils.getMemoryFree() > expectedFreeMemory) { + break; + } + Thread.sleep(20); + } + } + + private void testUpdateWhenNearlyOutOfMemory() throws Exception { + if (config.memory) { + return; + } + deleteDb(DB_NAME); + + ProcessBuilder processBuilder = buildChild( + DB_NAME + ";MAX_OPERATION_MEMORY=1000000", + MyChild.class, + "-XX:+UseParallelGC", +// "-XX:+UseG1GC", + "-Xmx128m"); +//* + processBuilder.start().waitFor(); +/*/ + List args = processBuilder.command(); + for (Iterator iter = args.iterator(); iter.hasNext(); ) { + String arg = iter.next(); + if(arg.equals(MyChild.class.getName())) { + iter.remove(); + break; + } + iter.remove(); + } + MyChild.main(args.toArray(new String[0])); +//*/ + try (Connection conn = getConnection(DB_NAME)) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("SELECT count(*) FROM stuff"); + assertTrue(rs.next()); + assertEquals(3000, rs.getInt(1)); + + rs = stat.executeQuery("SELECT * FROM stuff WHERE id = 3000"); + assertTrue(rs.next()); + String text = rs.getString(2); + assertFalse(rs.wasNull()); + assertEquals(1004, text.length()); + + // TODO: there are intermittent failures here + // where number is about 1000 short of expected value. + // This indicates a real problem - durability failure + // and need to be looked at. + rs = stat.executeQuery("SELECT sum(length(text)) FROM stuff"); + assertTrue(rs.next()); + int totalSize = rs.getInt(1); + if (3010893 > totalSize) { + TestBase.logErrorMessage("Durability failure - expected: 3010893, actual: " + totalSize); } + } finally { + deleteDb(DB_NAME); } - deleteDb("outOfMemory"); } + public static final class MyChild extends TestDb.Child { + + /** + * Run just this test. + * + * @param args the arguments + */ + public static void main(String... args) throws Exception { + new MyChild(args).init().test(); + } + + private MyChild(String... args) { + super(args); + } + + @Override + public void test() { + try (Connection conn = getConnection()) { + Statement stat = conn.createStatement(); + stat.execute("DROP ALL OBJECTS"); + stat.execute("CREATE TABLE stuff (id INT, text VARCHAR)"); + stat.execute("INSERT INTO stuff(id) SELECT x FROM system_range(1, 3000)"); + PreparedStatement prep = conn.prepareStatement( + "UPDATE stuff SET text = IFNULL(text,'') || space(1000) || id"); + prep.execute(); + stat.execute("CHECKPOINT"); + + ResultSet rs = stat.executeQuery("SELECT sum(length(text)) FROM stuff"); + assertTrue(rs.next()); + assertEquals(3010893, rs.getInt(1)); + + eatMemory(80); + prep.execute(); + fail(); + } catch (SQLException ignore) { + } finally { + freeMemory(); + } + } + } } diff --git a/h2/src/test/org/h2/test/db/TestPersistentCommonTableExpressions.java b/h2/src/test/org/h2/test/db/TestPersistentCommonTableExpressions.java new file mode 100644 index 0000000000..e020fbcea8 --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestPersistentCommonTableExpressions.java @@ -0,0 +1,253 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import org.h2.test.TestBase; + +/** + * Test persistent common table expressions queries using WITH. + */ +public class TestPersistentCommonTableExpressions extends AbstractBaseForCommonTableExpressions { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + // persistent cte tests - also tests reconnects and database reloading... + testRecursiveTable(); + testPersistentNonRecursiveTableInCreateView(); + testPersistentRecursiveTableInCreateView(); + testPersistentNonRecursiveTableInCreateViewDropAllObjects(); + testPersistentRecursiveTableInCreateViewDropAllObjects(); + } + + private void testRecursiveTable() throws Exception { + String[] expectedRowData = new String[]{"|meat|null", "|fruit|3", "|veg|2"}; + String[] expectedColumnTypes = new String[]{"CHARACTER VARYING", "NUMERIC"}; + String[] expectedColumnNames = new String[]{"VAL", + "SUM((SELECT\n" + + " X\n" + + "FROM PUBLIC.\"\" BB\n" + + "WHERE BB.A IS NOT DISTINCT FROM A.VAL))"}; + + String setupSQL = + "DROP TABLE IF EXISTS A; " + +"DROP TABLE IF EXISTS B; " + +"DROP TABLE IF EXISTS C; " + +"CREATE TABLE A(VAL VARCHAR(255)); " + +"CREATE TABLE B(A VARCHAR(255), VAL VARCHAR(255)); " + +"CREATE TABLE C(B VARCHAR(255), VAL VARCHAR(255)); " + +" " + +"INSERT INTO A VALUES('fruit'); " + +"INSERT INTO B VALUES('fruit','apple'); " + +"INSERT INTO B VALUES('fruit','banana'); " + +"INSERT INTO C VALUES('apple', 'golden delicious');" + +"INSERT INTO C VALUES('apple', 'granny smith'); " + +"INSERT INTO C VALUES('apple', 'pippin'); " + +"INSERT INTO A VALUES('veg'); " + +"INSERT INTO B VALUES('veg', 'carrot'); " + +"INSERT INTO C VALUES('carrot', 'nantes'); " + +"INSERT INTO C VALUES('carrot', 'imperator'); " + +"INSERT INTO C VALUES(null, 'banapple'); " + +"INSERT INTO A VALUES('meat'); "; + + String withQuery = "WITH BB as (SELECT \n" + + "sum(1) as X, \n" + + "a \n" + + "FROM B \n" + + "JOIN C ON B.val=C.b \n" + + "GROUP BY a) \n" + + "SELECT \n" + + "A.val, \n" + + "sum((SELECT X FROM BB WHERE BB.a IS NOT DISTINCT FROM A.val))\n" + + "FROM A \n" + "GROUP BY A.val"; + int maxRetries = 3; + int expectedNumberOfRows = expectedRowData.length; + + testRepeatedQueryWithSetup(maxRetries, expectedRowData, expectedColumnNames, expectedNumberOfRows, setupSQL, + withQuery, maxRetries - 1, expectedColumnTypes, true); + + } + + private void testPersistentRecursiveTableInCreateView() throws Exception { + String setupSQL = "--SET TRACE_LEVEL_SYSTEM_OUT 3;\n" + +"DROP TABLE IF EXISTS my_tree; \n" + +"DROP VIEW IF EXISTS v_my_tree; \n" + +"CREATE TABLE my_tree ( \n" + +" id INTEGER, \n" + +" parent_fk INTEGER \n" + +"); \n" + +" \n" + +"INSERT INTO my_tree ( id, parent_fk) VALUES ( 1, NULL ); \n" + +"INSERT INTO my_tree ( id, parent_fk) VALUES ( 11, 1 ); \n" + +"INSERT INTO my_tree ( id, parent_fk) VALUES ( 111, 11 ); \n" + +"INSERT INTO my_tree ( id, parent_fk) VALUES ( 12, 1 ); \n" + +"INSERT INTO my_tree ( id, parent_fk) VALUES ( 121, 12 ); \n" + +" \n" + +"CREATE OR REPLACE VIEW v_my_tree AS \n" + +"WITH RECURSIVE tree_cte (sub_tree_root_id, tree_level, parent_fk, child_fk) AS ( \n" + +" SELECT mt.ID AS sub_tree_root_id, CAST(0 AS INT) AS tree_level, mt.parent_fk, mt.id \n" + +" FROM my_tree mt \n" + +" UNION ALL \n" + +" SELECT sub_tree_root_id, mtc.tree_level + 1 AS tree_level, mtc.parent_fk, mt.id \n" + +" FROM my_tree mt \n" + +"INNER JOIN tree_cte mtc ON mtc.child_fk = mt.parent_fk \n" + +"), \n" + +"unused_cte AS ( SELECT 1 AS unUsedColumn ) \n" + +"SELECT sub_tree_root_id, tree_level, parent_fk, child_fk FROM tree_cte; \n"; + + String withQuery = "SELECT * FROM v_my_tree"; + int maxRetries = 4; + String[] expectedRowData = new String[]{"|1|0|null|1", + "|11|0|1|11", + "|111|0|11|111", + "|12|0|1|12", + "|121|0|12|121", + "|1|1|null|11", + "|11|1|1|111", + "|1|1|null|12", + "|12|1|1|121", + "|1|2|null|111", + "|1|2|null|121" + }; + String[] expectedColumnNames = new String[]{"SUB_TREE_ROOT_ID", "TREE_LEVEL", "PARENT_FK", "CHILD_FK"}; + String[] expectedColumnTypes = new String[]{"INTEGER", "INTEGER", "INTEGER", "INTEGER"}; + int expectedNumberOfRows = 11; + testRepeatedQueryWithSetup(maxRetries, expectedRowData, expectedColumnNames, expectedNumberOfRows, setupSQL, + withQuery, maxRetries - 1, expectedColumnTypes, false); + } + + private void testPersistentNonRecursiveTableInCreateView() throws Exception { + String setupSQL = "" + +"DROP VIEW IF EXISTS v_my_nr_tree; \n" + +"DROP TABLE IF EXISTS my_table; \n" + +"CREATE TABLE my_table ( \n" + +" id INTEGER, \n" + +" parent_fk INTEGER \n" + +"); \n" + +" \n" + +"INSERT INTO my_table ( id, parent_fk) VALUES ( 1, NULL ); \n" + +"INSERT INTO my_table ( id, parent_fk) VALUES ( 11, 1 ); \n" + +"INSERT INTO my_table ( id, parent_fk) VALUES ( 111, 11 ); \n" + +"INSERT INTO my_table ( id, parent_fk) VALUES ( 12, 1 ); \n" + +"INSERT INTO my_table ( id, parent_fk) VALUES ( 121, 12 ); \n" + +" \n" + +"CREATE OR REPLACE VIEW v_my_nr_tree AS \n" + +"WITH tree_cte_nr (sub_tree_root_id, tree_level, parent_fk, child_fk) AS ( \n" + +" SELECT mt.ID AS sub_tree_root_id, CAST(0 AS INT) AS tree_level, mt.parent_fk, mt.id \n" + +" FROM my_table mt \n" + +"), \n" + +"unused_cte AS ( SELECT 1 AS unUsedColumn ) \n" + +"SELECT sub_tree_root_id, tree_level, parent_fk, child_fk FROM tree_cte_nr; \n"; + + String withQuery = "SELECT * FROM v_my_nr_tree"; + int maxRetries = 6; + String[] expectedRowData = new String[]{ + "|1|0|null|1", + "|11|0|1|11", + "|111|0|11|111", + "|12|0|1|12", + "|121|0|12|121", + }; + String[] expectedColumnNames = new String[]{"SUB_TREE_ROOT_ID", "TREE_LEVEL", "PARENT_FK", "CHILD_FK"}; + String[] expectedColumnTypes = new String[]{"INTEGER", "INTEGER", "INTEGER", "INTEGER"}; + int expectedNumberOfRows = 5; + testRepeatedQueryWithSetup(maxRetries, expectedRowData, expectedColumnNames, expectedNumberOfRows, setupSQL, + withQuery, maxRetries - 1, expectedColumnTypes, false); + } + + private void testPersistentNonRecursiveTableInCreateViewDropAllObjects() throws Exception { + String setupSQL = "" + +"DROP ALL OBJECTS; \n" + +"CREATE TABLE my_table ( \n" + +" id INTEGER, \n" + +" parent_fk INTEGER \n" + +"); \n" + +" \n" + +"INSERT INTO my_table ( id, parent_fk) VALUES ( 1, NULL ); \n" + +"INSERT INTO my_table ( id, parent_fk) VALUES ( 11, 1 ); \n" + +"INSERT INTO my_table ( id, parent_fk) VALUES ( 111, 11 ); \n" + +"INSERT INTO my_table ( id, parent_fk) VALUES ( 12, 1 ); \n" + +"INSERT INTO my_table ( id, parent_fk) VALUES ( 121, 12 ); \n" + +" \n" + +"CREATE OR REPLACE VIEW v_my_nr_tree AS \n" + +"WITH tree_cte_nr (sub_tree_root_id, tree_level, parent_fk, child_fk) AS ( \n" + +" SELECT mt.ID AS sub_tree_root_id, CAST(0 AS INT) AS tree_level, mt.parent_fk, mt.id \n" + +" FROM my_table mt \n" + +"), \n" + +"unused_cte AS ( SELECT 1 AS unUsedColumn ) \n" + +"SELECT sub_tree_root_id, tree_level, parent_fk, child_fk FROM tree_cte_nr; \n"; + + String withQuery = "SELECT * FROM v_my_nr_tree"; + int maxRetries = 6; + String[] expectedRowData = new String[]{ + "|1|0|null|1", + "|11|0|1|11", + "|111|0|11|111", + "|12|0|1|12", + "|121|0|12|121", + }; + String[] expectedColumnNames = new String[]{"SUB_TREE_ROOT_ID", "TREE_LEVEL", "PARENT_FK", "CHILD_FK"}; + String[] expectedColumnTypes = new String[]{"INTEGER", "INTEGER", "INTEGER", "INTEGER"}; + int expectedNumberOfRows = 5; + testRepeatedQueryWithSetup(maxRetries, expectedRowData, expectedColumnNames, expectedNumberOfRows, setupSQL, + withQuery, maxRetries - 1, expectedColumnTypes, false); + } + + private void testPersistentRecursiveTableInCreateViewDropAllObjects() throws Exception { + String setupSQL = "--SET TRACE_LEVEL_SYSTEM_OUT 3;\n" + +"DROP ALL OBJECTS; \n" + +"CREATE TABLE my_tree ( \n" + +" id INTEGER, \n" + +" parent_fk INTEGER \n" + +"); \n" + +" \n" + +"INSERT INTO my_tree ( id, parent_fk) VALUES ( 1, NULL ); \n" + +"INSERT INTO my_tree ( id, parent_fk) VALUES ( 11, 1 ); \n" + +"INSERT INTO my_tree ( id, parent_fk) VALUES ( 111, 11 ); \n" + +"INSERT INTO my_tree ( id, parent_fk) VALUES ( 12, 1 ); \n" + +"INSERT INTO my_tree ( id, parent_fk) VALUES ( 121, 12 ); \n" + +" \n" + +"CREATE OR REPLACE VIEW v_my_tree AS \n" + +"WITH RECURSIVE tree_cte (sub_tree_root_id, tree_level, parent_fk, child_fk) AS ( \n" + +" SELECT mt.ID AS sub_tree_root_id, CAST(0 AS INT) AS tree_level, mt.parent_fk, mt.id \n" + +" FROM my_tree mt \n" + +" UNION ALL \n" + +" SELECT sub_tree_root_id, mtc.tree_level + 1 AS tree_level, mtc.parent_fk, mt.id \n" + +" FROM my_tree mt \n" + +"INNER JOIN tree_cte mtc ON mtc.child_fk = mt.parent_fk \n" + +"), \n" + +"unused_cte AS ( SELECT 1 AS unUsedColumn ) \n" + +"SELECT sub_tree_root_id, tree_level, parent_fk, child_fk FROM tree_cte; \n"; + + String withQuery = "SELECT * FROM v_my_tree"; + int maxRetries = 4; + String[] expectedRowData = new String[]{"|1|0|null|1", + "|11|0|1|11", + "|111|0|11|111", + "|12|0|1|12", + "|121|0|12|121", + "|1|1|null|11", + "|11|1|1|111", + "|1|1|null|12", + "|12|1|1|121", + "|1|2|null|111", + "|1|2|null|121" + }; + String[] expectedColumnNames = new String[]{"SUB_TREE_ROOT_ID", "TREE_LEVEL", "PARENT_FK", "CHILD_FK"}; + String[] expectedColumnTypes = new String[]{"INTEGER", "INTEGER", "INTEGER", "INTEGER"}; + int expectedNumberOfRows = 11; + testRepeatedQueryWithSetup(maxRetries, expectedRowData, expectedColumnNames, expectedNumberOfRows, setupSQL, + withQuery, maxRetries - 1, expectedColumnTypes, false); + } +} diff --git a/h2/src/test/org/h2/test/db/TestPowerOff.java b/h2/src/test/org/h2/test/db/TestPowerOff.java index 9185f592fb..e1f5e67cac 100644 --- a/h2/src/test/org/h2/test/db/TestPowerOff.java +++ b/h2/src/test/org/h2/test/db/TestPowerOff.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -14,14 +14,14 @@ import org.h2.api.ErrorCode; import org.h2.engine.Database; -import org.h2.jdbc.JdbcConnection; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.JdbcUtils; /** * Tests simulated power off conditions. */ -public class TestPowerOff extends TestBase { +public class TestPowerOff extends TestDb { private static final String DB_NAME = "powerOff"; private String dir, url; @@ -34,14 +34,19 @@ public class TestPowerOff extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { + public boolean isEnabled() { if (config.memory) { - return; + return false; } + return true; + } + + @Override + public void test() throws SQLException { if (config.big || config.googleAppEngine) { dir = getBaseDir(); url = DB_NAME; @@ -71,18 +76,18 @@ private void testLobCrash() throws SQLException { conn = getConnection(url); stat = conn.createStatement(); stat.execute("set write_delay 0"); - ((JdbcConnection) conn).setPowerOffCount(Integer.MAX_VALUE); - stat.execute("insert into test values(null, space(11000))"); - int max = Integer.MAX_VALUE - ((JdbcConnection) conn).getPowerOffCount(); + setPowerOffCount(conn, Integer.MAX_VALUE); + stat.execute("insert into test(data) values space(11000)"); + int max = Integer.MAX_VALUE - getPowerOffCount(conn); for (int i = 0; i < max + 10; i++) { conn.close(); conn = getConnection(url); stat = conn.createStatement(); - stat.execute("insert into test values(null, space(11000))"); + stat.execute("insert into test(data) values space(11000)"); stat.execute("set write_delay 0"); - ((JdbcConnection) conn).setPowerOffCount(i); + setPowerOffCount(conn, i); try { - stat.execute("insert into test values(null, space(11000))"); + stat.execute("insert into test(data) values space(11000)"); } catch (SQLException e) { // ignore } @@ -150,7 +155,7 @@ private void testCrash() throws SQLException { conn = getConnection(url); Statement stat = conn.createStatement(); stat.execute("SET WRITE_DELAY 0"); - ((JdbcConnection) conn).setPowerOffCount(random.nextInt(100)); + setPowerOffCount(conn, random.nextInt(100)); try { stat.execute("DROP TABLE IF EXISTS TEST"); stat.execute("CREATE TABLE TEST" + @@ -208,7 +213,7 @@ private void testMemoryTables() throws SQLException { "(ID INT PRIMARY KEY, NAME VARCHAR(255))"); stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); stat.execute("CHECKPOINT"); - ((JdbcConnection) conn).setPowerOffCount(1); + setPowerOffCount(conn, 1); try { stat.execute("INSERT INTO TEST VALUES(2, 'Hello')"); stat.execute("INSERT INTO TEST VALUES(3, 'Hello')"); @@ -218,7 +223,7 @@ private void testMemoryTables() throws SQLException { assertKnownException(e); } - ((JdbcConnection) conn).setPowerOffCount(0); + setPowerOffCount(conn, 0); try { conn.close(); } catch (SQLException e) { @@ -298,8 +303,7 @@ private int testRun(boolean init) throws SQLException { stat.execute("DROP TABLE TEST"); state = 0; if (init) { - maxPowerOffCount = Integer.MAX_VALUE - - ((JdbcConnection) conn).getPowerOffCount(); + maxPowerOffCount = Integer.MAX_VALUE - getPowerOffCount(conn); } conn.close(); } catch (SQLException e) { @@ -317,7 +321,7 @@ private int recoverAndCheckConsistency() throws SQLException { int state; Database.setInitialPowerOffCount(0); Connection conn = getConnection(url); - assertEquals(0, ((JdbcConnection) conn).getPowerOffCount()); + assertEquals(0, getPowerOffCount(conn)); Statement stat = conn.createStatement(); DatabaseMetaData meta = conn.getMetaData(); ResultSet rs = meta.getTables(null, null, "TEST", null); diff --git a/h2/src/test/org/h2/test/db/TestQueryCache.java b/h2/src/test/org/h2/test/db/TestQueryCache.java index a3d75322aa..476bc6519b 100644 --- a/h2/src/test/org/h2/test/db/TestQueryCache.java +++ b/h2/src/test/org/h2/test/db/TestQueryCache.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -12,11 +12,12 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the query cache. */ -public class TestQueryCache extends TestBase { +public class TestQueryCache extends TestDb { /** * Run just this test. @@ -24,7 +25,7 @@ public class TestQueryCache extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -36,49 +37,74 @@ public void test() throws Exception { } private void test1() throws Exception { - Connection conn = getConnection("queryCache;QUERY_CACHE_SIZE=10"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int, name varchar) " + - "as select x, space(100) from system_range(1, 1000)"); - PreparedStatement prep; - conn.prepareStatement("select count(*) from test t1, test t2"); - long time; - ResultSet rs; - long first = 0; - for (int i = 0; i < 4; i++) { - // this should both ensure results are not re-used - // stat.execute("set mode regular"); - // stat.execute("create table x()"); - // stat.execute("drop table x"); - time = System.currentTimeMillis(); - prep = conn.prepareStatement("select count(*) from test t1, test t2"); - prep.executeQuery(); - rs = stat.executeQuery("select count(*) from test t1, test t2"); - rs.next(); - int c = rs.getInt(1); - assertEquals(1000000, c); - time = System.currentTimeMillis() - time; - if (first == 0) { - first = time; - } else { - assertSmaller(time, first); + try (Connection conn = getConnection("queryCache;QUERY_CACHE_SIZE=10")) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id int, name varchar)"); + PreparedStatement prep; + // query execution may be fast here but the parsing must be slow + StringBuilder queryBuilder = new StringBuilder("select count(*) from test t1 where \n"); + for (int i = 0; i < 1000; i++) { + if (i != 0) { + queryBuilder.append(" and "); + } + queryBuilder.append(" TIMESTAMP '2005-12-31 23:59:59' = TIMESTAMP '2005-12-31 23:59:59' "); } + String query = queryBuilder.toString(); + conn.prepareStatement(query); + int firstGreater = 0; + int firstSmaller = 0; + long time; + ResultSet rs; + long first = 0; + // 1000 iterations to warm up and avoid JIT effects + for (int i = 0; i < 1005; i++) { + // this should both ensure results are not re-used + // stat.execute("set mode regular"); + // stat.execute("create table x()"); + // stat.execute("drop table x"); + time = System.nanoTime(); + prep = conn.prepareStatement(query); + execute(prep); + prep.close(); + rs = stat.executeQuery(query); + rs.next(); + int c = rs.getInt(1); + rs.close(); + assertEquals(0, c); + time = System.nanoTime() - time; + if (i == 1000) { + // take from cache and do not close, + // so that next iteration will have a cache miss + prep = conn.prepareStatement(query); + } else if (i == 1001) { + first = time; + } else if (i > 1001) { + if (first > time) { + firstGreater++; + } else { + firstSmaller++; + } + } + } + // first prepare time must be always greater because of query cache, + // but JVM is too unpredictable to assert that, so just check that + // usually this is true + assertSmaller(firstSmaller, firstGreater); + stat.execute("drop table test"); } - stat.execute("drop table test"); - conn.close(); } private void testClearingCacheWithTableStructureChanges() throws Exception { - Connection conn = getConnection("queryCache;QUERY_CACHE_SIZE=10"); - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, conn). - prepareStatement("SELECT * FROM TEST"); - Statement stat = conn.createStatement(); - stat.executeUpdate("CREATE TABLE TEST(col1 bigint, col2 varchar(255))"); - PreparedStatement prep = conn.prepareStatement("SELECT * FROM TEST"); - prep.close(); - stat.executeUpdate("DROP TABLE TEST"); - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, conn). - prepareStatement("SELECT * FROM TEST"); - conn.close(); + try (Connection conn = getConnection("queryCache;QUERY_CACHE_SIZE=10")) { + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, conn). + prepareStatement("SELECT * FROM TEST"); + Statement stat = conn.createStatement(); + stat.executeUpdate("CREATE TABLE TEST(col1 bigint, col2 varchar(255))"); + PreparedStatement prep = conn.prepareStatement("SELECT * FROM TEST"); + prep.close(); + stat.executeUpdate("DROP TABLE TEST"); + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, conn). + prepareStatement("SELECT * FROM TEST"); + } } } diff --git a/h2/src/test/org/h2/test/db/TestReadOnly.java b/h2/src/test/org/h2/test/db/TestReadOnly.java index 2b3cb07f3a..84bc97b178 100644 --- a/h2/src/test/org/h2/test/db/TestReadOnly.java +++ b/h2/src/test/org/h2/test/db/TestReadOnly.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -18,13 +18,14 @@ import org.h2.store.FileLister; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.Backup; import org.h2.tools.Server; /** * Test for the read-only database feature. */ -public class TestReadOnly extends TestBase { +public class TestReadOnly extends TestDb { /** * Run just this test. @@ -32,14 +33,19 @@ public class TestReadOnly extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws Exception { + public boolean isEnabled() { if (config.memory) { - return; + return false; } + return true; + } + + @Override + public void test() throws Exception { testReadOnlyInZip(); testReadOnlyTempTableResult(); testReadOnlyConnect(); @@ -66,17 +72,18 @@ private void testReadOnlyInZip() throws SQLException { "jdbc:h2:zip:"+dir+"/readonly.zip!/readonlyInZip", getUser(), getPassword()); conn.createStatement().execute("select * from test where id=1"); conn.close(); - Server server = Server.createTcpServer("-tcpPort", "9081", "-baseDir", dir); + Server server = Server.createTcpServer("-baseDir", dir); server.start(); + int port = server.getPort(); try { conn = getConnection( - "jdbc:h2:tcp://localhost:9081/zip:readonly.zip!/readonlyInZip", + "jdbc:h2:tcp://localhost:" + port + "/zip:readonly.zip!/readonlyInZip", getUser(), getPassword()); conn.createStatement().execute("select * from test where id=1"); conn.close(); FilePathZip2.register(); conn = getConnection( - "jdbc:h2:tcp://localhost:9081/zip2:readonly.zip!/readonlyInZip", + "jdbc:h2:tcp://localhost:" + port + "/zip2:readonly.zip!/readonlyInZip", getUser(), getPassword()); conn.createStatement().execute("select * from test where id=1"); conn.close(); @@ -129,7 +136,7 @@ private void testReadOnlyFiles(boolean setReadOnly) throws Exception { File f = File.createTempFile("test", "temp"); assertTrue(f.canWrite()); f.setReadOnly(); - assertTrue(!f.canWrite()); + assertFalse(f.canWrite()); f.delete(); f = File.createTempFile("test", "temp"); @@ -137,7 +144,7 @@ private void testReadOnlyFiles(boolean setReadOnly) throws Exception { r.write(1); f.setReadOnly(); r.close(); - assertTrue(!f.canWrite()); + assertFalse(f.canWrite()); f.delete(); deleteDb("readonlyFiles"); @@ -146,7 +153,7 @@ private void testReadOnlyFiles(boolean setReadOnly) throws Exception { stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); stat.execute("INSERT INTO TEST VALUES(2, 'World')"); - assertTrue(!conn.isReadOnly()); + assertFalse(conn.isReadOnly()); conn.close(); if (setReadOnly) { @@ -191,8 +198,8 @@ private void testReadOnlyConnect() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test(id identity)"); stat.execute("insert into test select x from system_range(1, 11)"); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this). - getConnection("readonlyConnect;ACCESS_MODE_DATA=r;OPEN_NEW=TRUE"); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, + () -> getConnection("readonlyConnect;ACCESS_MODE_DATA=r;OPEN_NEW=TRUE")); conn.close(); deleteDb("readonlyConnect"); } diff --git a/h2/src/test/org/h2/test/db/TestRecursiveQueries.java b/h2/src/test/org/h2/test/db/TestRecursiveQueries.java index 7f27edac1a..2a8d27a360 100644 --- a/h2/src/test/org/h2/test/db/TestRecursiveQueries.java +++ b/h2/src/test/org/h2/test/db/TestRecursiveQueries.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -9,12 +9,14 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.Statement; +import java.sql.Types; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test recursive queries using WITH. */ -public class TestRecursiveQueries extends TestBase { +public class TestRecursiveQueries extends TestDb { /** * Run just this test. @@ -22,13 +24,14 @@ public class TestRecursiveQueries extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { testWrongLinkLargeResult(); - testSimple(); + testSimpleUnionAll(); + testSimpleUnion(); } private void testWrongLinkLargeResult() throws Exception { @@ -59,7 +62,7 @@ private void testWrongLinkLargeResult() throws Exception { deleteDb("recursiveQueries"); } - private void testSimple() throws Exception { + private void testSimpleUnionAll() throws Exception { deleteDb("recursiveQueries"); Connection conn = getConnection("recursiveQueries"); Statement stat; @@ -96,13 +99,13 @@ private void testSimple() throws Exception { assertFalse(rs.next()); prep = conn.prepareStatement("with recursive t(n) as " + - "(select @start union all select n+@inc from t where n<@end) " + + "(select @start union all select n+@inc from t where n<@end_index) " + "select * from t"); - prep2 = conn.prepareStatement("select @start:=?, @inc:=?, @end:=?"); + prep2 = conn.prepareStatement("select @start:=?, @inc:=?, @end_index:=?"); prep2.setInt(1, 10); prep2.setInt(2, 2); prep2.setInt(3, 14); - prep2.execute(); + assertTrue(prep2.executeQuery().next()); rs = prep.executeQuery(); assertTrue(rs.next()); assertEquals(10, rs.getInt(1)); @@ -115,7 +118,7 @@ private void testSimple() throws Exception { prep2.setInt(1, 100); prep2.setInt(2, 3); prep2.setInt(3, 103); - prep2.execute(); + assertTrue(prep2.executeQuery().next()); rs = prep.executeQuery(); assertTrue(rs.next()); assertEquals(100, rs.getInt(1)); @@ -123,6 +126,56 @@ private void testSimple() throws Exception { assertEquals(103, rs.getInt(1)); assertFalse(rs.next()); + prep = conn.prepareStatement("with recursive t(n) as " + + "(select ? union all select n+? from t where n getConnection("rights")); } } @@ -215,7 +349,7 @@ private void testGetTables() throws SQLException { stat.execute("CREATE USER IF NOT EXISTS TEST PASSWORD 'TEST'"); stat.execute("CREATE TABLE TEST(ID INT)"); - stat.execute("GRANT ALL ON TEST TO TEST"); + stat.execute("GRANT ALL ON TABLE TEST TO TEST"); Connection conn2 = getConnection("rights", "TEST", getPassword("TEST")); DatabaseMetaData meta = conn2.getMetaData(); meta.getTables(null, null, "%", new String[]{"TABLE", "VIEW", "SEQUENCE"}); @@ -248,7 +382,7 @@ private void testSchemaRenameUser() throws SQLException { deleteDb("rights"); Connection conn = getConnection("rights"); stat = conn.createStatement(); - stat.execute("create user test password '' admin"); + stat.execute("create user test password ''"); stat.execute("create schema b authorization test"); stat.execute("create table b.test(id int)"); stat.execute("alter user test rename to test1"); @@ -256,12 +390,9 @@ private void testSchemaRenameUser() throws SQLException { conn = getConnection("rights"); stat = conn.createStatement(); stat.execute("select * from b.test"); - assertThrows(ErrorCode.CANNOT_DROP_2, stat). - execute("alter user test1 admin false"); assertThrows(ErrorCode.CANNOT_DROP_2, stat). execute("drop user test1"); - stat.execute("drop schema b"); - stat.execute("alter user test1 admin false"); + stat.execute("drop schema b cascade"); stat.execute("drop user test1"); conn.close(); } @@ -293,14 +424,16 @@ private void testSchemaAdminRole() throws SQLException { "(ID INT PRIMARY KEY, NAME VARCHAR)"); conn.close(); + String url = "rights"; + // try and fail (no rights yet) - conn = getConnection("rights;LOG=2", "SCHEMA_CREATOR", getPassword("xyz")); + conn = getConnection(url, "SCHEMA_CREATOR", getPassword("xyz")); stat = conn.createStatement(); assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat).execute( "CREATE SCHEMA SCHEMA_RIGHT_TEST_WILL_FAIL"); assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat).execute( "ALTER SCHEMA SCHEMA_RIGHT_TEST_EXISTS RENAME TO SCHEMA_RIGHT_TEST_WILL_FAIL"); - assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat).execute( + assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, stat).execute( "DROP SCHEMA SCHEMA_RIGHT_TEST_EXISTS"); conn.close(); @@ -311,7 +444,7 @@ private void testSchemaAdminRole() throws SQLException { conn.close(); // try and succeed - conn = getConnection("rights;LOG=2", "SCHEMA_CREATOR", getPassword("xyz")); + conn = getConnection(url, "SCHEMA_CREATOR", getPassword("xyz")); stat = conn.createStatement(); // should be able to create a schema and manipulate tables on that @@ -323,7 +456,7 @@ private void testSchemaAdminRole() throws SQLException { executeSuccess("INSERT INTO S.TEST (ID, NAME) VALUES (42, 'Adams')"); executeSuccess("UPDATE S.TEST Set NAME = 'Douglas'"); executeSuccess("DELETE FROM S.TEST"); - executeSuccess("DROP SCHEMA S"); + executeSuccess("DROP SCHEMA S CASCADE"); // ...and on other schemata executeSuccess("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); @@ -341,14 +474,14 @@ private void testSchemaAdminRole() throws SQLException { conn.close(); // try again and fail - conn = getConnection("rights;LOG=2", "SCHEMA_CREATOR", getPassword("xyz")); + conn = getConnection(url, "SCHEMA_CREATOR", getPassword("xyz")); stat = conn.createStatement(); assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat). execute("CREATE SCHEMA SCHEMA_RIGHT_TEST"); assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat). execute("ALTER SCHEMA SCHEMA_RIGHT_TEST_EXISTS " + "RENAME TO SCHEMA_RIGHT_TEST_RENAMED"); - assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat). + assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, stat). execute("DROP SCHEMA SCHEMA_RIGHT_TEST_EXISTS"); assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, stat). execute("CREATE TABLE SCHEMA_RIGHT_TEST_EXISTS.TEST" + @@ -363,6 +496,61 @@ private void testSchemaAdminRole() throws SQLException { conn.close(); } + private void testTableRename() throws SQLException { + if (config.memory) { + return; + } + deleteDb("rights"); + Connection conn = getConnection("rights"); + stat = conn.createStatement(); + stat.execute("create user test password '' admin"); + stat.execute("create schema b"); + stat.execute("create table b.t1(id int)"); + stat.execute("grant select on b.t1 to test"); + stat.execute("alter table b.t1 rename to b.t2"); + conn.close(); + conn = getConnection("rights"); + stat = conn.createStatement(); + stat.execute("drop user test"); + conn.close(); + } + + private void testSchemaRename() throws SQLException { + if (config.memory) { + return; + } + deleteDb("rights"); + Connection conn = getConnection("rights"); + stat = conn.createStatement(); + stat.execute("create user test password '' admin"); + stat.execute("create schema b"); + stat.execute("grant select on schema b to test"); + stat.execute("alter schema b rename to c"); + conn.close(); + conn = getConnection("rights"); + stat = conn.createStatement(); + stat.execute("drop user test"); + conn.close(); + } + + private void testSchemaDrop() throws SQLException { + if (config.memory) { + return; + } + deleteDb("rights"); + Connection conn = getConnection("rights"); + stat = conn.createStatement(); + stat.execute("create user test password '' admin"); + stat.execute("create schema b"); + stat.execute("grant select on schema b to test"); + stat.execute("drop schema b cascade"); + conn.close(); + conn = getConnection("rights"); + stat = conn.createStatement(); + stat.execute("drop user test"); + conn.close(); + } + private void testAccessRights() throws SQLException { if (config.memory) { return; @@ -385,7 +573,8 @@ private void testAccessRights() throws SQLException { executeSuccess("GRANT SELECT, INSERT, UPDATE ON TEST TO PASS_READER"); conn.close(); - conn = getConnection("rights;LOG=2", "PASS_READER", getPassword("abc")); + String url = "rights"; + conn = getConnection(url, "PASS_READER", getPassword("abc")); stat = conn.createStatement(); executeSuccess("SELECT * FROM PASS_NAME"); executeSuccess("SELECT * FROM (SELECT * FROM PASS_NAME)"); @@ -399,7 +588,7 @@ private void testAccessRights() throws SQLException { executeError("SELECT * FROM (SELECT * FROM PASS)"); assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat). execute("CREATE VIEW X AS SELECT * FROM PASS_READER"); - assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat). + assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, stat). execute("CREATE VIEW X AS SELECT * FROM PASS_NAME"); conn.close(); @@ -458,7 +647,7 @@ private void testAccessRights() throws SQLException { } catch (SQLException e) { assertKnownException(e); } - conn = getConnection("rights;LOG=2", "TEST", getPassword("def")); + conn = getConnection(url, "TEST", getPassword("def")); stat = conn.createStatement(); assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, stat). @@ -525,6 +714,115 @@ private void testTableType(Connection conn, String type) throws SQLException { executeSuccess("DROP TABLE TEST"); } + private void testDropTable() throws SQLException { + deleteDb("rights"); + Connection conn = getConnection("rights"); + stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INT)"); + stat.execute("CREATE USER U PASSWORD '1'"); + stat.execute("GRANT ALL PRIVILEGES ON TEST TO U"); + Connection conn2 = getConnection("rights", "U", getPassword("1")); + conn.close(); + stat = conn2.createStatement(); + assertEquals(1, stat.executeUpdate("INSERT INTO TEST VALUES 1")); + assertEquals(1, stat.executeUpdate("UPDATE TEST SET ID = 2 WHERE ID = 1")); + assertEquals(1, stat.executeUpdate("DELETE FROM TEST WHERE ID = 2")); + executeError("DROP TABLE TEST"); + conn2.close(); + } + + private void testSchemaOwner() throws SQLException { + deleteDb("rights"); + Connection connAdmin = getConnection("rights"); + Statement statAdmin = connAdmin.createStatement(); + statAdmin.execute("CREATE USER SCHEMA_ADMIN PASSWORD '1'"); + statAdmin.execute("GRANT ALTER ANY SCHEMA TO SCHEMA_ADMIN"); + Connection connSchemaAdmin = getConnection("rights", "SCHEMA_ADMIN", getPassword("1")); + Statement statSchemaAdmin = connSchemaAdmin.createStatement(); + statAdmin.execute("CREATE USER SCHEMA_OWNER PASSWORD '1'"); + Connection connSchemaOwner = getConnection("rights", "SCHEMA_OWNER", getPassword("1")); + Statement statSchemaOwner = connSchemaOwner.createStatement(); + statAdmin.execute("CREATE USER OTHER PASSWORD '1'"); + Connection connOther = getConnection("rights", "OTHER", getPassword("1")); + Statement statOther = connOther.createStatement(); + testSchemaOwner(statAdmin, statSchemaAdmin, statSchemaOwner, statOther, "SCHEMA_OWNER"); + statAdmin.execute("CREATE ROLE SCHEMA_OWNER_ROLE"); + statAdmin.execute("GRANT SCHEMA_OWNER_ROLE TO SCHEMA_OWNER"); + testSchemaOwner(statAdmin, statSchemaAdmin, statSchemaOwner, statOther, "SCHEMA_OWNER_ROLE"); + testAdminAndSchemaOwner(statAdmin, statSchemaAdmin); + statAdmin.close(); + statSchemaAdmin.close(); + statSchemaOwner.close(); + } + + private void testSchemaOwner(Statement statAdmin, Statement statSchemaAdmin, Statement statSchemaOwner, + Statement statOther, String authorization) throws SQLException { + executeSuccessErrorAdmin(statSchemaAdmin, statSchemaOwner, "CREATE SCHEMA S AUTHORIZATION " + authorization); + executeSuccessError(statSchemaOwner, statOther, "CREATE DOMAIN S.D INT"); + executeSuccessError(statSchemaOwner, statOther, "ALTER DOMAIN S.D ADD CONSTRAINT S.D_C CHECK (VALUE > 0)"); + executeSuccessError(statSchemaOwner, statOther, "ALTER DOMAIN S.D DROP CONSTRAINT S.D_C"); + executeSuccessError(statSchemaOwner, statOther, "ALTER DOMAIN S.D RENAME TO S.D2"); + executeSuccessError(statSchemaOwner, statOther, "DROP DOMAIN S.D2"); + executeSuccessError(statSchemaOwner, statOther, "CREATE CONSTANT S.C VALUE 1"); + executeSuccessError(statSchemaOwner, statOther, "DROP CONSTANT S.C"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "CREATE ALIAS S.F FOR 'java.lang.Math.max(long,long)'"); + executeSuccessError(statSchemaOwner, statOther, "DROP ALIAS S.F"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, + "CREATE AGGREGATE S.A FOR \'" + TestFunctions.MedianStringType.class.getName() + '\''); + executeSuccessError(statSchemaOwner, statOther, "DROP AGGREGATE S.A"); + executeSuccessError(statSchemaOwner, statOther, "CREATE SEQUENCE S.S"); + executeSuccessError(statSchemaOwner, statOther, "ALTER SEQUENCE S.S RESTART WITH 2"); + executeSuccessError(statSchemaOwner, statOther, "DROP SEQUENCE S.S"); + executeSuccessError(statSchemaOwner, statOther, "CREATE VIEW S.V AS SELECT 1"); + executeSuccessError(statSchemaOwner, statOther, "ALTER VIEW S.V RECOMPILE"); + executeSuccessError(statSchemaOwner, statOther, "ALTER VIEW S.V RENAME TO S.V2"); + executeSuccessError(statSchemaOwner, statOther, "DROP VIEW S.V2"); + executeSuccessError(statSchemaOwner, statOther, "CREATE TABLE S.T(ID INT)"); + executeSuccessError(statSchemaOwner, statOther, "ALTER TABLE S.T ADD V INT"); + executeSuccessError(statSchemaOwner, statOther, "ALTER TABLE S.T ADD CONSTRAINT S.T_C UNIQUE(V)"); + executeSuccessError(statSchemaOwner, statOther, "ALTER TABLE S.T DROP CONSTRAINT S.T_C"); + executeSuccessError(statSchemaOwner, statOther, "CREATE UNIQUE INDEX S.I ON S.T(V)"); + executeSuccessError(statSchemaOwner, statOther, "ALTER INDEX S.I RENAME TO S.I2"); + executeSuccessError(statSchemaOwner, statOther, "DROP INDEX S.I2"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, + "CREATE TRIGGER S.G BEFORE INSERT ON S.T FOR EACH ROW CALL \'" + TestTrigger.class.getName() + '\''); + executeSuccessError(statSchemaOwner, statOther, "DROP TRIGGER S.G"); + executeSuccessError(statSchemaOwner, statOther, "GRANT SELECT ON S.T TO OTHER"); + executeSuccessError(statSchemaOwner, statOther, "REVOKE SELECT ON S.T FROM OTHER"); + executeSuccessError(statSchemaOwner, statOther, "ALTER TABLE S.T RENAME TO S.T2"); + executeSuccessError(statSchemaOwner, statOther, "DROP TABLE S.T2"); + executeSuccessError(statSchemaOwner, statOther, "DROP SCHEMA S"); + } + + private void testAdminAndSchemaOwner(Statement statAdmin, Statement statSchemaAdmin) throws SQLException { + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "GRANT ALTER ANY SCHEMA TO OTHER"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "REVOKE ALTER ANY SCHEMA FROM OTHER"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "CREATE USER U PASSWORD '1'"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "CREATE ROLE R"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "GRANT R TO U"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "REVOKE R FROM U"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "DROP USER U"); + executeSuccessErrorAdmin(statAdmin, statSchemaAdmin, "DROP ROLE R"); + } + + public static class TestTrigger implements Trigger { + + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + } + + } + + private void executeSuccessErrorAdmin(Statement success, Statement error, String sql) throws SQLException { + assertThrows(ErrorCode.ADMIN_RIGHTS_REQUIRED, error).execute(sql); + success.execute(sql); + } + + private void executeSuccessError(Statement success, Statement error, String sql) throws SQLException { + assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, error).execute(sql); + success.execute(sql); + } + private void executeError(String sql) throws SQLException { assertThrows(ErrorCode.NOT_ENOUGH_RIGHTS_FOR_1, stat).execute(sql); } diff --git a/h2/src/test/org/h2/test/db/TestRunscript.java b/h2/src/test/org/h2/test/db/TestRunscript.java index 88131de14e..eeba97a95e 100644 --- a/h2/src/test/org/h2/test/db/TestRunscript.java +++ b/h2/src/test/org/h2/test/db/TestRunscript.java @@ -1,20 +1,27 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Types; +import java.util.Collections; import org.h2.api.ErrorCode; import org.h2.api.Trigger; +import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.ChangeFileEncryption; import org.h2.tools.Recover; import org.h2.util.Task; @@ -22,7 +29,7 @@ /** * Tests the RUNSCRIPT SQL statement. */ -public class TestRunscript extends TestBase implements Trigger { +public class TestRunscript extends TestDb implements Trigger { /** * Run just this test. @@ -30,7 +37,13 @@ public class TestRunscript extends TestBase implements Trigger { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); + org.h2.test.TestAll config = new org.h2.test.TestAll(); + config.traceLevelFile = 1; + System.out.println(config); + TestBase test = createCaller(); + test.runTest(config); +// TestBase.createCaller().init().testFromMain(); } @Override @@ -51,6 +64,8 @@ public void test() throws Exception { testCancelScript(); testEncoding(); testClobPrimaryKey(); + testTruncateLargeLength(); + testVariableBinary(); deleteDb("runscript"); } @@ -59,7 +74,7 @@ private void testDropReferencedUserDefinedFunction() throws Exception { Connection conn; conn = getConnection("runscript"); Statement stat = conn.createStatement(); - stat.execute("create alias int_decode for \"java.lang.Integer.decode\""); + stat.execute("create alias int_decode for 'java.lang.Integer.decode'"); stat.execute("create table test(x varchar, y int as int_decode(x))"); stat.execute("script simple drop to '" + getBaseDir() + "/backup.sql'"); @@ -93,15 +108,15 @@ private void testScriptExcludeSchema() throws Exception { stat.execute("script schema include_schema1"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The schema 'exclude_schema1' should not be present in the script", - rs.getString(1).indexOf("exclude_schema1".toUpperCase()) == -1); + assertFalse("The schema 'exclude_schema1' should not be present in the script", + rs.getString(1).contains("exclude_schema1".toUpperCase())); } rs.close(); stat.execute("create schema include_schema2"); stat.execute("script nosettings schema include_schema1, include_schema2"); rs = stat.getResultSet(); - // user and one row per schema = 3 - assertResultRowCount(3, rs); + // version, user, and one row per schema = 4 + assertResultRowCount(4, rs); rs.close(); conn.close(); } @@ -122,29 +137,29 @@ private void testScriptExcludeTable() throws Exception { stat.execute("script table a.test1"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The table 'a.test2' should not be present in the script", - rs.getString(1).indexOf("a.test2".toUpperCase()) == -1); - assertTrue("The table 'b.test1' should not be present in the script", - rs.getString(1).indexOf("b.test1".toUpperCase()) == -1); - assertTrue("The table 'b.test2' should not be present in the script", - rs.getString(1).indexOf("b.test2".toUpperCase()) == -1); + assertFalse("The table 'a.test2' should not be present in the script", + rs.getString(1).contains("a.test2".toUpperCase())); + assertFalse("The table 'b.test1' should not be present in the script", + rs.getString(1).contains("b.test1".toUpperCase())); + assertFalse("The table 'b.test2' should not be present in the script", + rs.getString(1).contains("b.test2".toUpperCase())); } rs.close(); stat.execute("set schema b"); stat.execute("script table test1"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The table 'a.test1' should not be present in the script", - rs.getString(1).indexOf("a.test1".toUpperCase()) == -1); - assertTrue("The table 'a.test2' should not be present in the script", - rs.getString(1).indexOf("a.test2".toUpperCase()) == -1); - assertTrue("The table 'b.test2' should not be present in the script", - rs.getString(1).indexOf("b.test2".toUpperCase()) == -1); + assertFalse("The table 'a.test1' should not be present in the script", + rs.getString(1).contains("a.test1".toUpperCase())); + assertFalse("The table 'a.test2' should not be present in the script", + rs.getString(1).contains("a.test2".toUpperCase())); + assertFalse("The table 'b.test2' should not be present in the script", + rs.getString(1).contains("b.test2".toUpperCase())); } stat.execute("script nosettings table a.test1, test2"); rs = stat.getResultSet(); - // user, schemas 'a' & 'b' and 2 rows per table = 7 - assertResultRowCount(7, rs); + // version, user, schemas 'a' & 'b', and 2 rows per table = 7 + assertResultRowCount(8, rs); rs.close(); conn.close(); } @@ -158,14 +173,14 @@ private void testScriptExcludeFunctionAlias() throws Exception { stat.execute("create schema a"); stat.execute("create schema b"); stat.execute("create schema c"); - stat.execute("create alias a.int_decode for \"java.lang.Integer.decode\""); + stat.execute("create alias a.int_decode for 'java.lang.Integer.decode'"); stat.execute("create table a.test(x varchar, y int as a.int_decode(x))"); stat.execute("script schema b"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The function alias 'int_decode' " + + assertFalse("The function alias 'int_decode' " + "should not be present in the script", - rs.getString(1).indexOf("int_decode".toUpperCase()) == -1); + rs.getString(1).contains("int_decode".toUpperCase())); } rs.close(); conn.close(); @@ -186,9 +201,9 @@ private void testScriptExcludeConstant() throws Exception { stat.execute("script schema b"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The constant 'default_email' " + + assertFalse("The constant 'default_email' " + "should not be present in the script", - rs.getString(1).indexOf("default_email".toUpperCase()) == -1); + rs.getString(1).contains("default_email".toUpperCase())); } rs.close(); conn.close(); @@ -207,8 +222,8 @@ private void testScriptExcludeSequence() throws Exception { stat.execute("script schema b"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The sequence 'seq_id' should not be present in the script", - rs.getString(1).indexOf("seq_id".toUpperCase()) == -1); + assertFalse("The sequence 'seq_id' should not be present in the script", + rs.getString(1).contains("seq_id".toUpperCase())); } rs.close(); conn.close(); @@ -229,18 +244,18 @@ private void testScriptExcludeConstraint() throws Exception { stat.execute("script schema b"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The sequence 'unique_constraint' " + + assertFalse("The sequence 'unique_constraint' " + "should not be present in the script", - rs.getString(1).indexOf("unique_constraint".toUpperCase()) == -1); + rs.getString(1).contains("unique_constraint".toUpperCase())); } rs.close(); stat.execute("create table a.test2(x varchar, y int)"); stat.execute("script table a.test2"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The sequence 'unique_constraint' " + + assertFalse("The sequence 'unique_constraint' " + "should not be present in the script", - rs.getString(1).indexOf("unique_constraint".toUpperCase()) == -1); + rs.getString(1).contains("unique_constraint".toUpperCase())); } rs.close(); conn.close(); @@ -261,16 +276,16 @@ private void testScriptExcludeTrigger() throws Exception { stat.execute("script schema b"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The trigger 'trigger_insert' should not be present in the script", - rs.getString(1).indexOf("trigger_insert".toUpperCase()) == -1); + assertFalse("The trigger 'trigger_insert' should not be present in the script", + rs.getString(1).contains("trigger_insert".toUpperCase())); } rs.close(); stat.execute("create table a.test2(x varchar, y int)"); stat.execute("script table a.test2"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The trigger 'trigger_insert' should not be present in the script", - rs.getString(1).indexOf("trigger_insert".toUpperCase()) == -1); + assertFalse("The trigger 'trigger_insert' should not be present in the script", + rs.getString(1).contains("trigger_insert".toUpperCase())); } rs.close(); conn.close(); @@ -294,8 +309,8 @@ private void testScriptExcludeRight() throws Exception { stat.execute("script schema b"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The grant to 'USER_A1' should not be present in the script", - rs.getString(1).indexOf("to USER_A1".toUpperCase()) == -1); + assertFalse("The grant to 'USER_A1' should not be present in the script", + rs.getString(1).contains("to USER_A1".toUpperCase())); } rs.close(); stat.execute("create user USER_A2 password 'test'"); @@ -304,10 +319,10 @@ private void testScriptExcludeRight() throws Exception { stat.execute("script table a.test2"); rs = stat.getResultSet(); while (rs.next()) { - assertTrue("The grant to 'USER_A1' should not be present in the script", - rs.getString(1).indexOf("to USER_A1".toUpperCase()) == -1); - assertTrue("The grant to 'USER_B1' should not be present in the script", - rs.getString(1).indexOf("to USER_B1".toUpperCase()) == -1); + assertFalse("The grant to 'USER_A1' should not be present in the script", + rs.getString(1).contains("to USER_A1".toUpperCase())); + assertFalse("The grant to 'USER_B1' should not be present in the script", + rs.getString(1).contains("to USER_B1".toUpperCase())); } rs.close(); conn.close(); @@ -324,6 +339,10 @@ private void testRunscriptFromClasspath() throws Exception { } private void testCancelScript() throws Exception { + if (config.ci) { + // fails regularly under Travis, not sure why + return; + } deleteDb("runscript"); Connection conn; conn = getConnection("runscript"); @@ -347,7 +366,7 @@ public void call() throws SQLException { Thread.sleep(200); stat.cancel(); SQLException e = (SQLException) task.getException(); - assertTrue(e != null); + assertNotNull(e); assertEquals(ErrorCode.STATEMENT_WAS_CANCELED, e.getErrorCode()); stat.execute("set throttle 1000"); @@ -361,9 +380,10 @@ public void call() throws SQLException { } }; task.execute(); - Thread.sleep(100); + Thread.sleep(200); stat.cancel(); e = (SQLException) task.getException(); + assertNotNull(e); assertEquals(ErrorCode.STATEMENT_WAS_CANCELED, e.getErrorCode()); conn.close(); @@ -413,7 +433,7 @@ private void testClobPrimaryKey() throws SQLException { stat.execute("create table test(id int not null, data clob) " + "as select 1, space(4100)"); // the primary key for SYSTEM_LOB_STREAM used to be named like this - stat.execute("create primary key primary_key_e on test(id)"); + stat.execute("alter table test add constraint primary_key_e primary key(id)"); stat.execute("script to '" + getBaseDir() + "/backup.sql'"); conn.close(); deleteDb("runscript"); @@ -436,8 +456,7 @@ private void test(boolean password) throws SQLException { stat1.execute("create table test2(id int primary key) as " + "select x from system_range(1, 5000)"); stat1.execute("create sequence testSeq start with 100 increment by 10"); - stat1.execute("create alias myTest for \"" + - getClass().getName() + ".test\""); + stat1.execute("create alias myTest for '" + getClass().getName() + ".test'"); stat1.execute("create trigger myTrigger before insert " + "on test nowait call \"" + getClass().getName() + "\""); stat1.execute("create view testView as select * " + @@ -456,7 +475,7 @@ private void test(boolean password) throws SQLException { stat1.execute("grant all on testSchema.child to testUser"); stat1.execute("grant select, insert on testSchema.parent to testRole"); stat1.execute("grant testRole to testUser"); - stat1.execute("create table blob (value blob)"); + stat1.execute("create table blob (v blob)"); PreparedStatement prep = conn1.prepareStatement( "insert into blob values (?)"); prep.setBytes(1, new byte[65536]); @@ -529,7 +548,52 @@ private void test(boolean password) throws SQLException { deleteDb("runscriptRestoreRecover"); FileUtils.delete(getBaseDir() + "/backup.2.sql"); FileUtils.delete(getBaseDir() + "/backup.3.sql"); + FileUtils.delete(getBaseDir() + "/runscript.h2.sql"); + + } + + private void testTruncateLargeLength() throws Exception { + deleteDb("runscript"); + Connection conn; + Statement stat; + Files.write(Paths.get(getBaseDir() + "/backup.sql"), + Collections.singleton("CREATE TABLE TEST(V VARCHAR(2147483647))"), // + StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + conn = getConnection("runscript"); + stat = conn.createStatement(); + assertThrows(ErrorCode.INVALID_VALUE_PRECISION, stat) + .execute("RUNSCRIPT FROM '" + getBaseDir() + "/backup.sql'"); + stat.execute("RUNSCRIPT FROM '" + getBaseDir() + "/backup.sql' QUIRKS_MODE"); + assertEquals(Constants.MAX_STRING_LENGTH, stat.executeQuery("TABLE TEST").getMetaData().getPrecision(1)); + conn.close(); + deleteDb("runscript"); + FileUtils.delete(getBaseDir() + "/backup.sql"); + } + private void testVariableBinary() throws SQLException { + deleteDb("runscript"); + Connection conn; + Statement stat; + conn = getConnection("runscript"); + stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(B BINARY)"); + assertEquals(Types.BINARY, stat.executeQuery("TABLE TEST").getMetaData().getColumnType(1)); + stat.execute("SCRIPT TO '" + getBaseDir() + "/backup.sql'"); + conn.close(); + deleteDb("runscript"); + conn = getConnection("runscript"); + stat = conn.createStatement(); + stat.execute("RUNSCRIPT FROM '" + getBaseDir() + "/backup.sql'"); + assertEquals(Types.BINARY, stat.executeQuery("TABLE TEST").getMetaData().getColumnType(1)); + conn.close(); + deleteDb("runscript"); + conn = getConnection("runscript"); + stat = conn.createStatement(); + stat.execute("RUNSCRIPT FROM '" + getBaseDir() + "/backup.sql' VARIABLE_BINARY"); + assertEquals(Types.VARBINARY, stat.executeQuery("TABLE TEST").getMetaData().getColumnType(1)); + conn.close(); + deleteDb("runscript"); + FileUtils.delete(getBaseDir() + "/backup.sql"); } @Override diff --git a/h2/src/test/org/h2/test/db/TestSQLInjection.java b/h2/src/test/org/h2/test/db/TestSQLInjection.java index 4a7e2e538c..8cb9dcaec6 100644 --- a/h2/src/test/org/h2/test/db/TestSQLInjection.java +++ b/h2/src/test/org/h2/test/db/TestSQLInjection.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -13,11 +13,12 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the ALLOW_LITERALS feature (protection against SQL injection). */ -public class TestSQLInjection extends TestBase { +public class TestSQLInjection extends TestDb { private Connection conn; private Statement stat; @@ -28,14 +29,19 @@ public class TestSQLInjection extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { + public boolean isEnabled() { if (config.reopen) { - return; + return false; } + return true; + } + + @Override + public void test() throws SQLException { deleteDb("sqlInjection"); reconnect("sqlInjection"); stat.execute("DROP TABLE IF EXISTS USERS"); diff --git a/h2/src/test/org/h2/test/db/TestScript.java b/h2/src/test/org/h2/test/db/TestScript.java deleted file mode 100644 index 21de095988..0000000000 --- a/h2/src/test/org/h2/test/db/TestScript.java +++ /dev/null @@ -1,398 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.LineNumberReader; -import java.io.PrintStream; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Random; - -import org.h2.test.TestAll; -import org.h2.test.TestBase; -import org.h2.util.New; -import org.h2.util.StringUtils; - -/** - * This test runs a SQL script file and compares the output with the expected - * output. - */ -public class TestScript extends TestBase { - - private static final String FILENAME = "org/h2/test/testScript.sql"; - - private boolean failFast; - - private boolean reconnectOften; - private Connection conn; - private Statement stat; - private LineNumberReader in; - private int line; - private PrintStream out; - private final ArrayList result = New.arrayList(); - private String putBack; - private StringBuilder errors; - private ArrayList statements; - - private Random random = new Random(1); - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - /** - * Get all SQL statements of this file. - * - * @param conf the configuration - * @return the list of statements - */ - public ArrayList getAllStatements(TestAll conf) throws Exception { - config = conf; - statements = New.arrayList(); - test(); - return statements; - } - - @Override - public void test() throws Exception { - if (config.networked && config.big) { - return; - } - reconnectOften = false; - if (!config.memory) { - if (config.big) { - reconnectOften = true; - } - } - testScript(); - deleteDb("script"); - } - - private void testScript() throws Exception { - deleteDb("script"); - String outFile = "test.out.txt"; - String inFile = FILENAME; - conn = getConnection("script"); - stat = conn.createStatement(); - out = new PrintStream(new FileOutputStream(outFile)); - errors = new StringBuilder(); - testFile(inFile); - conn.close(); - out.close(); - if (errors.length() > 0) { - throw new Exception("errors:\n" + errors.toString()); - } - // new File(outFile).delete(); - } - - private String readLine() throws IOException { - if (putBack != null) { - String s = putBack; - putBack = null; - return s; - } - while (true) { - String s = in.readLine(); - if (s == null) { - return s; - } - s = s.trim(); - if (s.length() > 0) { - return s; - } - } - } - - private void testFile(String inFile) throws Exception { - InputStream is = getClass().getClassLoader().getResourceAsStream(inFile); - in = new LineNumberReader(new InputStreamReader(is, "Cp1252")); - StringBuilder buff = new StringBuilder(); - while (true) { - String sql = readLine(); - if (sql == null) { - break; - } - if (sql.startsWith("--")) { - write(sql); - } else if (sql.startsWith(">")) { - // do nothing - } else if (sql.endsWith(";")) { - write(sql); - buff.append(sql.substring(0, sql.length() - 1)); - sql = buff.toString(); - buff = new StringBuilder(); - process(sql); - } else { - write(sql); - buff.append(sql); - buff.append('\n'); - } - } - } - - private boolean containsTempTables() throws SQLException { - ResultSet rs = conn.getMetaData().getTables(null, null, null, - new String[] { "TABLE" }); - while (rs.next()) { - String sql = rs.getString("SQL"); - if (sql != null) { - if (sql.contains("TEMPORARY")) { - return true; - } - } - } - return false; - } - - private void process(String sql) throws Exception { - if (reconnectOften) { - if (!containsTempTables()) { - boolean autocommit = conn.getAutoCommit(); - if (autocommit && random.nextInt(10) < 1) { - // reconnect 10% of the time - conn.close(); - conn = getConnection("script"); - conn.setAutoCommit(autocommit); - stat = conn.createStatement(); - } - } - } - if (statements != null) { - statements.add(sql); - } - if (sql.indexOf('?') == -1) { - processStatement(sql); - } else { - String param = readLine(); - write(param); - if (!param.equals("{")) { - throw new AssertionError("expected '{', got " + param + " in " + sql); - } - try { - PreparedStatement prep = conn.prepareStatement(sql); - int count = 0; - while (true) { - param = readLine(); - write(param); - if (param.startsWith("}")) { - break; - } - count += processPrepared(sql, prep, param); - } - writeResult(sql, "update count: " + count, null); - } catch (SQLException e) { - writeException(sql, e); - } - } - write(""); - } - - private static void setParameter(PreparedStatement prep, int i, String param) - throws SQLException { - if (param.equalsIgnoreCase("null")) { - param = null; - } - prep.setString(i, param); - } - - private int processPrepared(String sql, PreparedStatement prep, String param) - throws Exception { - try { - StringBuilder buff = new StringBuilder(); - int index = 0; - for (int i = 0; i < param.length(); i++) { - char c = param.charAt(i); - if (c == ',') { - setParameter(prep, ++index, buff.toString()); - buff = new StringBuilder(); - } else if (c == '"') { - while (true) { - c = param.charAt(++i); - if (c == '"') { - break; - } - buff.append(c); - } - } else if (c > ' ') { - buff.append(c); - } - } - if (buff.length() > 0) { - setParameter(prep, ++index, buff.toString()); - } - if (prep.execute()) { - writeResultSet(sql, prep.getResultSet()); - return 0; - } - return prep.getUpdateCount(); - } catch (SQLException e) { - writeException(sql, e); - return 0; - } - } - - private int processStatement(String sql) throws Exception { - try { - if (stat.execute(sql)) { - writeResultSet(sql, stat.getResultSet()); - } else { - int count = stat.getUpdateCount(); - writeResult(sql, count < 1 ? "ok" : "update count: " + count, null); - } - } catch (SQLException e) { - writeException(sql, e); - } - return 0; - } - - private static String formatString(String s) { - if (s == null) { - return "null"; - } - s = StringUtils.replaceAll(s, "\r\n", "\n"); - s = s.replace('\n', ' '); - s = StringUtils.replaceAll(s, " ", " "); - while (true) { - String s2 = StringUtils.replaceAll(s, " ", " "); - if (s2.length() == s.length()) { - break; - } - s = s2; - } - return s; - } - - private void writeResultSet(String sql, ResultSet rs) throws Exception { - boolean ordered = StringUtils.toLowerEnglish(sql).contains("order by"); - ResultSetMetaData meta = rs.getMetaData(); - int len = meta.getColumnCount(); - int[] max = new int[len]; - String[] head = new String[len]; - for (int i = 0; i < len; i++) { - String label = formatString(meta.getColumnLabel(i + 1)); - max[i] = label.length(); - head[i] = label; - } - result.clear(); - while (rs.next()) { - String[] row = new String[len]; - for (int i = 0; i < len; i++) { - String data = formatString(rs.getString(i + 1)); - if (max[i] < data.length()) { - max[i] = data.length(); - } - row[i] = data; - } - result.add(row); - } - rs.close(); - writeResult(sql, format(head, max), null); - writeResult(sql, format(null, max), null); - String[] array = new String[result.size()]; - for (int i = 0; i < result.size(); i++) { - array[i] = format(result.get(i), max); - } - if (!ordered) { - sort(array); - } - int i = 0; - for (; i < array.length; i++) { - writeResult(sql, array[i], null); - } - writeResult(sql, (ordered ? "rows (ordered): " : "rows: ") + i, null); - } - - private static String format(String[] row, int[] max) { - int length = max.length; - StringBuilder buff = new StringBuilder(); - for (int i = 0; i < length; i++) { - if (i > 0) { - buff.append(' '); - } - if (row == null) { - for (int j = 0; j < max[i]; j++) { - buff.append('-'); - } - } else { - int len = row[i].length(); - buff.append(row[i]); - if (i < length - 1) { - for (int j = len; j < max[i]; j++) { - buff.append(' '); - } - } - } - } - return buff.toString(); - } - - private void writeException(String sql, SQLException e) throws Exception { - writeResult(sql, "exception", e); - } - - private void writeResult(String sql, String s, SQLException e) - throws Exception { - assertKnownException(e); - s = ("> " + s).trim(); - String compare = readLine(); - if (compare != null && compare.startsWith(">")) { - if (!compare.equals(s)) { - if (reconnectOften && sql.toUpperCase().startsWith("EXPLAIN")) { - return; - } - errors.append("line: "); - errors.append(line); - errors.append("\n" + "exp: "); - errors.append(compare); - errors.append("\n" + "got: "); - errors.append(s); - errors.append("\n"); - if (e != null) { - TestBase.logError("script", e); - } - TestBase.logError(errors.toString(), null); - if (failFast) { - conn.close(); - System.exit(1); - } - } - } else { - putBack = compare; - } - write(s); - - } - - private void write(String s) { - line++; - out.println(s); - } - - private static void sort(String[] a) { - for (int i = 1, j, len = a.length; i < len; i++) { - String t = a[i]; - for (j = i - 1; j >= 0 && t.compareTo(a[j]) < 0; j--) { - a[j + 1] = a[j]; - } - a[j + 1] = t; - } - } - -} diff --git a/h2/src/test/org/h2/test/db/TestScriptSimple.java b/h2/src/test/org/h2/test/db/TestScriptSimple.java deleted file mode 100644 index 089f102a1a..0000000000 --- a/h2/src/test/org/h2/test/db/TestScriptSimple.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.LineNumberReader; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; - -import org.h2.test.TestBase; -import org.h2.util.ScriptReader; - -/** - * This test runs a simple SQL script file and compares the output with the - * expected output. - */ -public class TestScriptSimple extends TestBase { - - private Connection conn; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - if (config.memory || config.big || config.networked) { - return; - } - deleteDb("scriptSimple"); - reconnect(); - String inFile = "org/h2/test/testSimple.in.txt"; - InputStream is = getClass().getClassLoader().getResourceAsStream(inFile); - LineNumberReader lineReader = new LineNumberReader( - new InputStreamReader(is, "Cp1252")); - ScriptReader reader = new ScriptReader(lineReader); - while (true) { - String sql = reader.readStatement(); - if (sql == null) { - break; - } - sql = sql.trim(); - try { - if ("@reconnect".equals(sql.toLowerCase())) { - reconnect(); - } else if (sql.length() == 0) { - // ignore - } else if (sql.toLowerCase().startsWith("select")) { - ResultSet rs = conn.createStatement().executeQuery(sql); - while (rs.next()) { - String expected = reader.readStatement().trim(); - String got = "> " + rs.getString(1); - assertEquals(sql, expected, got); - } - } else { - conn.createStatement().execute(sql); - } - } catch (SQLException e) { - System.out.println(sql); - throw e; - } - } - is.close(); - conn.close(); - deleteDb("scriptSimple"); - } - - private void reconnect() throws SQLException { - if (conn != null) { - conn.close(); - } - conn = getConnection("scriptSimple"); - } - -} diff --git a/h2/src/test/org/h2/test/db/TestSelectCountNonNullColumn.java b/h2/src/test/org/h2/test/db/TestSelectCountNonNullColumn.java deleted file mode 100644 index a8bac7cc5b..0000000000 --- a/h2/src/test/org/h2/test/db/TestSelectCountNonNullColumn.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import org.h2.test.TestBase; - -/** - * Test that count(column) is converted to count(*) if the column is not - * nullable. - */ -public class TestSelectCountNonNullColumn extends TestBase { - - private static final String DBNAME = "selectCountNonNullColumn"; - private Statement stat; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws SQLException { - - deleteDb(DBNAME); - Connection conn = getConnection(DBNAME); - stat = conn.createStatement(); - - stat.execute("CREATE TABLE SIMPLE(KEY VARCHAR(25) " + - "PRIMARY KEY, NAME VARCHAR(25))"); - stat.execute("INSERT INTO SIMPLE(KEY) VALUES('k1')"); - stat.execute("INSERT INTO SIMPLE(KEY,NAME) VALUES('k2','name2')"); - - checkKeyCount(-1); - checkNameCount(-1); - checkStarCount(-1); - - checkKeyCount(2); - checkNameCount(1); - checkStarCount(2); - - conn.close(); - - } - - private void checkStarCount(long expect) throws SQLException { - String sql = "SELECT COUNT(*) FROM SIMPLE"; - if (expect < 0) { - sql = "EXPLAIN " + sql; - } - ResultSet rs = stat.executeQuery(sql); - rs.next(); - if (expect >= 0) { - assertEquals(expect, rs.getLong(1)); - } else { - // System.out.println(rs.getString(1)); - assertEquals("SELECT\n" + " COUNT(*)\n" + "FROM PUBLIC.SIMPLE\n" - + " /* PUBLIC.SIMPLE.tableScan */\n" - + "/* direct lookup */", rs.getString(1)); - } - } - - private void checkKeyCount(long expect) throws SQLException { - String sql = "SELECT COUNT(KEY) FROM SIMPLE"; - if (expect < 0) { - sql = "EXPLAIN " + sql; - } - ResultSet rs = stat.executeQuery(sql); - rs.next(); - if (expect >= 0) { - assertEquals(expect, rs.getLong(1)); - } else { - // System.out.println(rs.getString(1)); - assertEquals("SELECT\n" + " COUNT(KEY)\n" - + "FROM PUBLIC.SIMPLE\n" - + " /* PUBLIC.SIMPLE.tableScan */\n" - + "/* direct lookup */", rs.getString(1)); - } - } - - private void checkNameCount(long expect) throws SQLException { - String sql = "SELECT COUNT(NAME) FROM SIMPLE"; - if (expect < 0) { - sql = "EXPLAIN " + sql; - } - ResultSet rs = stat.executeQuery(sql); - rs.next(); - if (expect >= 0) { - assertEquals(expect, rs.getLong(1)); - } else { - // System.out.println(rs.getString(1)); - assertEquals("SELECT\n" + " COUNT(NAME)\n" + "FROM PUBLIC.SIMPLE\n" - + " /* PUBLIC.SIMPLE.tableScan */", rs.getString(1)); - } - } - -} diff --git a/h2/src/test/org/h2/test/db/TestSelectTableNotFound.java b/h2/src/test/org/h2/test/db/TestSelectTableNotFound.java new file mode 100644 index 0000000000..bed6108812 --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestSelectTableNotFound.java @@ -0,0 +1,177 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; + +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +public class TestSelectTableNotFound extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testWithoutAnyCandidate(); + testWithOneCandidate(); + testWithTwoCandidates(); + testWithSchema(); + testWithSchemaSearchPath(); + testWhenSchemaIsEmpty(); + testWithSchemaWhenSchemaIsEmpty(); + testWithSchemaSearchPathWhenSchemaIsEmpty(); + } + + private void testWithoutAnyCandidate() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T2 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.executeQuery("SELECT 1 FROM t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found;"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithOneCandidate() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.executeQuery("SELECT 1 FROM t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (candidates are: \"T1\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithTwoCandidates() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE Toast ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + stat.execute("CREATE TABLE TOAST ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.executeQuery("SELECT 1 FROM toast"); + fail("Table `toast` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"toast\" not found (candidates are: \"TOAST, Toast\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithSchema() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.executeQuery("SELECT 1 FROM PUBLIC.t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (candidates are: \"T1\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithSchemaSearchPath() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("SET SCHEMA_SEARCH_PATH PUBLIC"); + stat.execute("CREATE TABLE T1 ( ID INT GENERATED BY DEFAULT AS IDENTITY )"); + try { + stat.executeQuery("SELECT 1 FROM t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (candidates are: \"T1\")"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWhenSchemaIsEmpty() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + try { + stat.executeQuery("SELECT 1 FROM t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (this database is empty)"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithSchemaWhenSchemaIsEmpty() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + try { + stat.executeQuery("SELECT 1 FROM PUBLIC.t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (this database is empty)"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private void testWithSchemaSearchPathWhenSchemaIsEmpty() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(); + Statement stat = conn.createStatement(); + stat.execute("SET SCHEMA_SEARCH_PATH PUBLIC"); + try { + stat.executeQuery("SELECT 1 FROM t1"); + fail("Table `t1` was accessible but should not have been."); + } catch (SQLException e) { + String message = e.getMessage(); + assertContains(message, "Table \"t1\" not found (this database is empty)"); + } + + conn.close(); + deleteDb(getTestName()); + } + + private Connection getConnection() throws SQLException { + return getConnection(getTestName() + ";DATABASE_TO_UPPER=FALSE"); + } +} diff --git a/h2/src/test/org/h2/test/db/TestSequence.java b/h2/src/test/org/h2/test/db/TestSequence.java index bbcda47657..689ada2716 100644 --- a/h2/src/test/org/h2/test/db/TestSequence.java +++ b/h2/src/test/org/h2/test/db/TestSequence.java @@ -1,23 +1,28 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import org.h2.api.Trigger; +import org.h2.engine.Constants; import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.Task; /** * Tests the sequence feature of this database. */ -public class TestSequence extends TestBase { +public class TestSequence extends TestDb { /** * Run just this test. @@ -25,11 +30,13 @@ public class TestSequence extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { + public void test() throws Exception { + testConcurrentCreate(); + testConcurrentNextAndCurrentValue(); testSchemaSearchPath(); testAlterSequenceColumn(); testAlterSequence(); @@ -44,6 +51,121 @@ public void test() throws SQLException { deleteDb("sequence"); } + private void testConcurrentCreate() throws Exception { + deleteDb("sequence"); + final String url = getURL("sequence;LOCK_TIMEOUT=2000", true); + Connection conn = getConnection(url); + Task[] tasks = new Task[2]; + try { + Statement stat = conn.createStatement(); + stat.execute("create table dummy(id bigint primary key)"); + stat.execute("create table test(id bigint primary key)"); + stat.execute("create sequence test_seq cache 2"); + for (int i = 0; i < tasks.length; i++) { + final int x = i; + tasks[i] = new Task() { + @Override + public void call() throws Exception { + try (Connection conn = getConnection(url)) { + PreparedStatement prep = conn.prepareStatement( + "insert into test(id) values(next value for test_seq)"); + PreparedStatement prep2 = conn.prepareStatement( + "delete from test"); + while (!stop) { + prep.execute(); + if (Math.random() < 0.01) { + prep2.execute(); + } + if (Math.random() < 0.01) { + createDropTrigger(conn); + } + } + } + } + + private void createDropTrigger(Connection conn) throws Exception { + String triggerName = "t_" + x; + Statement stat = conn.createStatement(); + stat.execute("create trigger " + triggerName + + " before insert on dummy call \"" + + TriggerTest.class.getName() + "\""); + stat.execute("drop trigger " + triggerName); + } + + }.execute(); + } + Thread.sleep(1000); + for (Task t : tasks) { + t.get(); + } + } finally { + for (Task t : tasks) { + t.join(); + } + conn.close(); + } + } + + private void testConcurrentNextAndCurrentValue() throws Exception { + deleteDb("sequence"); + final String url = getURL("sequence", true); + Connection conn = getConnection(url); + Task[] tasks = new Task[2]; + try { + Statement stat = conn.createStatement(); + stat.execute("CREATE SEQUENCE SEQ1"); + stat.execute("CREATE SEQUENCE SEQ2"); + for (int i = 0; i < tasks.length; i++) { + tasks[i] = new Task() { + @Override + public void call() throws Exception { + try (Connection conn = getConnection(url)) { + PreparedStatement next1 = conn.prepareStatement("CALL NEXT VALUE FOR SEQ1"); + PreparedStatement next2 = conn.prepareStatement("CALL NEXT VALUE FOR SEQ2"); + PreparedStatement current1 = conn.prepareStatement("CALL CURRENT VALUE FOR SEQ1"); + PreparedStatement current2 = conn.prepareStatement("CALL CURRENT VALUE FOR SEQ2"); + while (!stop) { + long v1, v2; + try (ResultSet rs = next1.executeQuery()) { + rs.next(); + v1 = rs.getLong(1); + } + try (ResultSet rs = next2.executeQuery()) { + rs.next(); + v2 = rs.getLong(1); + } + try (ResultSet rs = current1.executeQuery()) { + rs.next(); + if (v1 != rs.getLong(1)) { + throw new RuntimeException("Unexpected CURRENT VALUE FOR SEQ1"); + } + } + try (ResultSet rs = current2.executeQuery()) { + rs.next(); + if (v2 != rs.getLong(1)) { + throw new RuntimeException("Unexpected CURRENT VALUE FOR SEQ2"); + } + } + } + } + } + }.execute(); + } + Thread.sleep(1000); + for (Task t : tasks) { + Exception e = t.getException(); + if (e != null) { + throw new AssertionError(e.getMessage()); + } + } + } finally { + for (Task t : tasks) { + t.join(); + } + conn.close(); + } + } + private void testSchemaSearchPath() throws SQLException { deleteDb("sequence"); Connection conn = getConnection("sequence"); @@ -51,8 +173,8 @@ private void testSchemaSearchPath() throws SQLException { stat.execute("CREATE SCHEMA TEST"); stat.execute("CREATE SEQUENCE TEST.TEST_SEQ"); stat.execute("SET SCHEMA_SEARCH_PATH PUBLIC, TEST"); - stat.execute("CALL TEST_SEQ.NEXTVAL"); - stat.execute("CALL TEST_SEQ.CURRVAL"); + stat.execute("CALL NEXT VALUE FOR TEST_SEQ"); + stat.execute("CALL CURRENT VALUE FOR TEST_SEQ"); conn.close(); } @@ -62,7 +184,7 @@ private void testAlterSequenceColumn() throws SQLException { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST(ID INT , NAME VARCHAR(255))"); stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); - stat.execute("ALTER TABLE TEST ALTER COLUMN ID INT IDENTITY"); + stat.execute("ALTER TABLE TEST ALTER COLUMN ID INT GENERATED BY DEFAULT AS IDENTITY"); stat.execute("ALTER TABLE test ALTER COLUMN ID RESTART WITH 3"); stat.execute("INSERT INTO TEST (name) VALUES('Other World')"); conn.close(); @@ -71,8 +193,8 @@ private void testAlterSequenceColumn() throws SQLException { private void testAlterSequence() throws SQLException { test("create sequence s; alter sequence s restart with 2", null, 2, 3, 4); test("create sequence s; alter sequence s restart with 7", null, 7, 8, 9, 10); - test("create sequence s; alter sequence s restart with 11 " + - "minvalue 3 maxvalue 12 cycle", null, 11, 12, 3, 4); + test("create sequence s; alter sequence s start with 3 restart with 11 minvalue 3 maxvalue 12 cycle", + null, 11, 12, 3, 4); test("create sequence s; alter sequence s restart with 5 cache 2", null, 5, 6, 7, 8); test("create sequence s; alter sequence s restart with 9 " + @@ -128,38 +250,35 @@ private void testMetaTable() throws SQLException { assertEquals("SEQUENCE", rs.getString("SEQUENCE_CATALOG")); assertEquals("PUBLIC", rs.getString("SEQUENCE_SCHEMA")); assertEquals("A", rs.getString("SEQUENCE_NAME")); - assertEquals(0, rs.getLong("CURRENT_VALUE")); + assertEquals(1, rs.getLong("BASE_VALUE")); assertEquals(1, rs.getLong("INCREMENT")); - assertEquals(false, rs.getBoolean("IS_GENERATED")); - assertEquals("", rs.getString("REMARKS")); + assertNull(rs.getString("REMARKS")); assertEquals(32, rs.getLong("CACHE")); - assertEquals(1, rs.getLong("MIN_VALUE")); - assertEquals(Long.MAX_VALUE, rs.getLong("MAX_VALUE")); - assertEquals(false, rs.getBoolean("IS_CYCLE")); + assertEquals(1, rs.getLong("MINIMUM_VALUE")); + assertEquals(Long.MAX_VALUE, rs.getLong("MAXIMUM_VALUE")); + assertEquals("NO", rs.getString("CYCLE_OPTION")); rs.next(); assertEquals("SEQUENCE", rs.getString("SEQUENCE_CATALOG")); assertEquals("PUBLIC", rs.getString("SEQUENCE_SCHEMA")); assertEquals("B", rs.getString("SEQUENCE_NAME")); - assertEquals(5, rs.getLong("CURRENT_VALUE")); + assertEquals(7, rs.getLong("BASE_VALUE")); assertEquals(2, rs.getLong("INCREMENT")); - assertEquals(false, rs.getBoolean("IS_GENERATED")); - assertEquals("", rs.getString("REMARKS")); + assertNull(rs.getString("REMARKS")); assertEquals(1, rs.getLong("CACHE")); - assertEquals(5, rs.getLong("MIN_VALUE")); - assertEquals(9, rs.getLong("MAX_VALUE")); - assertEquals(true, rs.getBoolean("IS_CYCLE")); + assertEquals(5, rs.getLong("MINIMUM_VALUE")); + assertEquals(9, rs.getLong("MAXIMUM_VALUE")); + assertEquals("YES", rs.getString("CYCLE_OPTION")); rs.next(); assertEquals("SEQUENCE", rs.getString("SEQUENCE_CATALOG")); assertEquals("PUBLIC", rs.getString("SEQUENCE_SCHEMA")); assertEquals("C", rs.getString("SEQUENCE_NAME")); - assertEquals(-2, rs.getLong("CURRENT_VALUE")); + assertEquals(-4, rs.getLong("BASE_VALUE")); assertEquals(-2, rs.getLong("INCREMENT")); - assertEquals(false, rs.getBoolean("IS_GENERATED")); - assertEquals("", rs.getString("REMARKS")); + assertNull(rs.getString("REMARKS")); assertEquals(3, rs.getLong("CACHE")); - assertEquals(-9, rs.getLong("MIN_VALUE")); - assertEquals(-3, rs.getLong("MAX_VALUE")); - assertEquals(false, rs.getBoolean("IS_CYCLE")); + assertEquals(-9, rs.getLong("MINIMUM_VALUE")); + assertEquals(-3, rs.getLong("MAXIMUM_VALUE")); + assertEquals("NO", rs.getString("CYCLE_OPTION")); assertFalse(rs.next()); conn.close(); } @@ -212,33 +331,33 @@ private void testCreationErrors() throws SQLException { stat, "create sequence a minvalue 5 start with 2", "Unable to create or alter sequence \"A\" because of " + - "invalid attributes (start value \"2\", " + + "invalid attributes (base value \"2\", start value \"2\", " + "min value \"5\", max value \"" + Long.MAX_VALUE + - "\", increment \"1\")"); + "\", increment \"1\", cache size \"32\")"); expectError( stat, "create sequence b maxvalue 5 start with 7", "Unable to create or alter sequence \"B\" because of " + - "invalid attributes (start value \"7\", " + - "min value \"1\", max value \"5\", increment \"1\")"); + "invalid attributes (base value \"7\", start value \"7\", " + + "min value \"1\", max value \"5\", increment \"1\", cache size \"32\")"); expectError( stat, "create sequence c minvalue 5 maxvalue 2", "Unable to create or alter sequence \"C\" because of " + - "invalid attributes (start value \"5\", " + - "min value \"5\", max value \"2\", increment \"1\")"); + "invalid attributes (base value \"5\", start value \"5\", " + + "min value \"5\", max value \"2\", increment \"1\", cache size \"32\")"); expectError( stat, "create sequence d increment by 0", "Unable to create or alter sequence \"D\" because of " + - "invalid attributes (start value \"1\", " + + "invalid attributes (base value \"1\", start value \"1\", " + "min value \"1\", max value \"" + - Long.MAX_VALUE + "\", increment \"0\")"); + Long.MAX_VALUE + "\", increment \"0\", cache size \"32\")"); expectError(stat, "create sequence e minvalue 1 maxvalue 5 increment 99", "Unable to create or alter sequence \"E\" because of " + - "invalid attributes (start value \"1\", " + - "min value \"1\", max value \"5\", increment \"99\")"); + "invalid attributes (base value \"1\", start value \"1\", " + + "min value \"1\", max value \"5\", increment \"99\", cache size \"32\")"); conn.close(); } @@ -253,23 +372,24 @@ private void testCreateSql() throws SQLException { "minvalue 2 maxvalue 9 nocycle cache 2"); stat.execute("create sequence d nomaxvalue no minvalue no cache nocycle"); stat.execute("create sequence e cache 1"); - List script = new ArrayList(); + List script = new ArrayList<>(); ResultSet rs = stat.executeQuery("script nodata"); while (rs.next()) { script.add(rs.getString(1)); } Collections.sort(script); - assertEquals("CREATE SEQUENCE PUBLIC.A START WITH 1;", script.get(0)); - assertEquals("CREATE SEQUENCE PUBLIC.B START " + + assertEquals("-- H2 " + Constants.VERSION + ";", script.get(0)); + assertEquals("CREATE SEQUENCE \"PUBLIC\".\"A\" START WITH 1;", script.get(1)); + assertEquals("CREATE SEQUENCE \"PUBLIC\".\"B\" START " + "WITH 5 INCREMENT BY 2 " + - "MINVALUE 3 MAXVALUE 7 CYCLE CACHE 1;", script.get(1)); - assertEquals("CREATE SEQUENCE PUBLIC.C START " + + "MINVALUE 3 MAXVALUE 7 CYCLE NO CACHE;", script.get(2)); + assertEquals("CREATE SEQUENCE \"PUBLIC\".\"C\" START " + "WITH 3 MINVALUE 2 MAXVALUE 9 CACHE 2;", - script.get(2)); - assertEquals("CREATE SEQUENCE PUBLIC.D START " + - "WITH 1 CACHE 1;", script.get(3)); - assertEquals("CREATE SEQUENCE PUBLIC.E START " + - "WITH 1 CACHE 1;", script.get(4)); + script.get(3)); + assertEquals("CREATE SEQUENCE \"PUBLIC\".\"D\" START " + + "WITH 1 NO CACHE;", script.get(4)); + assertEquals("CREATE SEQUENCE \"PUBLIC\".\"E\" START " + + "WITH 1 NO CACHE;", script.get(5)); conn.close(); } @@ -334,7 +454,7 @@ private void test(String setupSql, String finalError, long... values) getNext(stat); fail("Expected error: " + finalError); } catch (SQLException e) { - assertTrue(e.getMessage().contains(finalError)); + assertContains(e.getMessage(), finalError); } } @@ -346,7 +466,7 @@ private void expectError(Statement stat, String sql, String error) { stat.execute(sql); fail("Expected error: " + error); } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains(error)); + assertContains(e.getMessage(), error); } } @@ -356,4 +476,25 @@ private static long getNext(Statement stat) throws SQLException { long value = rs.getLong(1); return value; } + + /** + * A test trigger. + */ + public static class TriggerTest implements Trigger { + + @Override + public void init(Connection conn, String schemaName, + String triggerName, String tableName, boolean before, int type) + throws SQLException { + conn.createStatement().executeQuery("call next value for test_seq"); + } + + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) + throws SQLException { + // ignore + } + + } + } diff --git a/h2/src/test/org/h2/test/db/TestSessionsLocks.java b/h2/src/test/org/h2/test/db/TestSessionsLocks.java index fcec715aad..874cabe692 100644 --- a/h2/src/test/org/h2/test/db/TestSessionsLocks.java +++ b/h2/src/test/org/h2/test/db/TestSessionsLocks.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -10,12 +10,14 @@ import java.sql.SQLException; import java.sql.Statement; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the meta data tables information_schema.locks and sessions. */ -public class TestSessionsLocks extends TestBase { +public class TestSessionsLocks extends TestDb { /** * Run just this test. @@ -23,22 +25,25 @@ public class TestSessionsLocks extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); + } + + @Override + public boolean isEnabled() { + return true; } @Override public void test() throws Exception { - if (config.mvcc) { - return; - } testCancelStatement(); testLocks(); + testAbortStatement(); deleteDb("sessionsLocks"); } private void testLocks() throws SQLException { deleteDb("sessionsLocks"); - Connection conn = getConnection("sessionsLocks;MULTI_THREADED=1"); + Connection conn = getConnection("sessionsLocks"); Statement stat = conn.createStatement(); ResultSet rs; rs = stat.executeQuery("select * from information_schema.locks " + @@ -55,24 +60,13 @@ private void testLocks() throws SQLException { assertEquals("PUBLIC", rs.getString("TABLE_SCHEMA")); assertEquals("TEST", rs.getString("TABLE_NAME")); rs.getString("SESSION_ID"); - if (config.mvcc) { - assertEquals("READ", rs.getString("LOCK_TYPE")); - } else { - assertEquals("WRITE", rs.getString("LOCK_TYPE")); - } + assertEquals("READ", rs.getString("LOCK_TYPE")); assertFalse(rs.next()); conn2.commit(); conn2.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); stat2.execute("SELECT * FROM TEST"); rs = stat.executeQuery("select * from information_schema.locks " + "order by session_id"); - if (!config.mvcc) { - rs.next(); - assertEquals("PUBLIC", rs.getString("TABLE_SCHEMA")); - assertEquals("TEST", rs.getString("TABLE_NAME")); - rs.getString("SESSION_ID"); - assertEquals("READ", rs.getString("LOCK_TYPE")); - } assertFalse(rs.next()); conn2.commit(); rs = stat.executeQuery("select * from information_schema.locks " + @@ -84,40 +78,37 @@ private void testLocks() throws SQLException { private void testCancelStatement() throws Exception { deleteDb("sessionsLocks"); - Connection conn = getConnection("sessionsLocks;MULTI_THREADED=1"); + Connection conn = getConnection("sessionsLocks"); Statement stat = conn.createStatement(); ResultSet rs; rs = stat.executeQuery("select * from information_schema.sessions " + - "order by SESSION_START, ID"); + "order by SESSION_START, SESSION_ID"); rs.next(); - int sessionId = rs.getInt("ID"); + int sessionId = rs.getInt("SESSION_ID"); rs.getString("USER_NAME"); rs.getTimestamp("SESSION_START"); - rs.getString("STATEMENT"); - rs.getTimestamp("STATEMENT_START"); + rs.getString("EXECUTING_STATEMENT"); + rs.getTimestamp("EXECUTING_STATEMENT_START"); assertFalse(rs.next()); Connection conn2 = getConnection("sessionsLocks"); - final Statement stat2 = conn2.createStatement(); + Statement stat2 = conn2.createStatement(); rs = stat.executeQuery("select * from information_schema.sessions " + - "order by SESSION_START, ID"); + "order by SESSION_START, SESSION_ID"); assertTrue(rs.next()); - assertEquals(sessionId, rs.getInt("ID")); + assertEquals(sessionId, rs.getInt("SESSION_ID")); assertTrue(rs.next()); - int otherId = rs.getInt("ID"); + int otherId = rs.getInt("SESSION_ID"); assertTrue(otherId != sessionId); assertFalse(rs.next()); stat2.execute("set throttle 1"); - final boolean[] done = { false }; - Runnable runnable = new Runnable() { - @Override - public void run() { - try { - stat2.execute("select count(*) from " + - "system_range(1, 10000000) t1, system_range(1, 10000000) t2"); - new Error("Unexpected success").printStackTrace(); - } catch (SQLException e) { - done[0] = true; - } + boolean[] done = { false }; + Runnable runnable = () -> { + try { + stat2.execute("select count(*) from " + + "system_range(1, 10000000) t1, system_range(1, 10000000) t2"); + new Error("Unexpected success").printStackTrace(); + } catch (SQLException e) { + done[0] = true; } }; new Thread(runnable).start(); @@ -140,4 +131,58 @@ public void run() { conn.close(); } + private void testAbortStatement() throws Exception { + deleteDb("sessionsLocks"); + Connection conn = getConnection("sessionsLocks"); + Statement stat = conn.createStatement(); + ResultSet rs; + rs = stat.executeQuery("select session_id() as ID"); + rs.next(); + int sessionId = rs.getInt("ID"); + + // Setup session to be aborted + Connection conn2 = getConnection("sessionsLocks"); + Statement stat2 = conn2.createStatement(); + stat2.execute("create table test(id int primary key, name varchar)"); + conn2.setAutoCommit(false); + stat2.execute("insert into test values(1, 'Hello')"); + conn2.commit(); + // grab a lock + stat2.executeUpdate("update test set name = 'Again' where id = 1"); + + rs = stat2.executeQuery("select session_id() as ID"); + rs.next(); + + int otherId = rs.getInt("ID"); + assertTrue(otherId != sessionId); + assertFalse(rs.next()); + + // expect one lock + assertEquals(1, getLockCountForSession(stat, otherId)); + rs = stat.executeQuery("CALL ABORT_SESSION(" + otherId + ")"); + rs.next(); + assertTrue(rs.getBoolean(1)); + + // expect the lock to be released along with its session + assertEquals(0, getLockCountForSession(stat, otherId)); + rs = stat.executeQuery("CALL ABORT_SESSION(" + otherId + ")"); + rs.next(); + assertFalse("Session is expected to be already aborted", rs.getBoolean(1)); + + // using the connection for the aborted session is expected to throw an + // exception + assertThrows(config.networked ? ErrorCode.CONNECTION_BROKEN_1 : ErrorCode.DATABASE_CALLED_AT_SHUTDOWN, stat2) + .executeQuery("select count(*) from test"); + + conn2.close(); + conn.close(); + } + + private int getLockCountForSession(Statement stmnt, int otherId) throws SQLException { + try (ResultSet rs = stmnt + .executeQuery("select count(*) from information_schema.locks where session_id = " + otherId)) { + assertTrue(rs.next()); + return rs.getInt(1); + } + } } diff --git a/h2/src/test/org/h2/test/db/TestSetCollation.java b/h2/src/test/org/h2/test/db/TestSetCollation.java new file mode 100644 index 0000000000..7c0559f107 --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestSetCollation.java @@ -0,0 +1,191 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +public class TestSetCollation extends TestDb { + private static final String[] TEST_STRINGS = new String[]{"A", "\u00c4", "AA", "B", "$", "1A", null}; + + private static final String DB_NAME = "collator"; + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testDefaultCollator(); + testCp500Collator(); + testDeCollator(); + testUrlParameter(); + testReopenDatabase(); + testReopenDatabaseWithUrlParameter(); + testReopenDatabaseWithDifferentCollationInUrl(); + testReopenDatabaseWithSameCollationInUrl(); + } + + + private void testDefaultCollator() throws Exception { + assertEquals(Arrays.asList(null, "$", "1A", "A", "AA", "B", "\u00c4"), orderedWithCollator(null)); + } + + private void testDeCollator() throws Exception { + assertEquals(Arrays.asList(null, "$", "1A", "A", "\u00c4", "AA", "B"), orderedWithCollator("DE")); + assertEquals(Arrays.asList(null, "$", "1A", "A", "\u00c4", "AA", "B"), orderedWithCollator("DEFAULT_DE")); + } + + private void testCp500Collator() throws Exception { + // IBM z/OS codepage + assertEquals(Arrays.asList(null, "A", "AA", "B", "1A", "$", "\u00c4"), + orderedWithCollator("CHARSET_CP500")); + } + + private void testUrlParameter() throws Exception { + // Specifying the collator in the JDBC Url should have the same effect + // as setting it with a set statement + config.collation = "CHARSET_CP500"; + try { + assertEquals(Arrays.asList(null, "A", "AA", "B", "1A", "$", "\u00c4"), orderedWithCollator(null)); + } finally { + config.collation = null; + } + } + + private void testReopenDatabase() throws Exception { + if (config.memory) { + return; + } + + orderedWithCollator("DE"); + + try (Connection con = getConnection(DB_NAME)) { + insertValues(con, new String[]{"A", "\u00c4"}, 100); + + assertEquals(Arrays.asList(null, "$", "1A", "A", "A", "\u00c4", "\u00c4", "AA", "B"), + loadTableValues(con)); + } + } + + private void testReopenDatabaseWithUrlParameter() throws Exception { + if (config.memory) { + return; + } + + config.collation = "DE"; + try { + orderedWithCollator(null); + } finally { + config.collation = null; + } + + // reopen the database without specifying a collation in the url. + // This should keep the initial collation. + try (Connection con = getConnection(DB_NAME)) { + insertValues(con, new String[]{"A", "\u00c4"}, 100); + + assertEquals(Arrays.asList(null, "$", "1A", "A", "A", "\u00c4", "\u00c4", "AA", "B"), + loadTableValues(con)); + } + + } + + private void testReopenDatabaseWithDifferentCollationInUrl() throws Exception { + if (config.memory) { + return; + } + config.collation = "DE"; + try { + orderedWithCollator(null); + } finally { + config.collation = null; + } + + config.collation = "CHARSET_CP500"; + try { + getConnection(DB_NAME); + fail(); + } catch (SQLException e) { + // expected + } finally { + config.collation = null; + } + } + + private void testReopenDatabaseWithSameCollationInUrl() throws Exception { + if (config.memory) { + return; + } + config.collation = "DE"; + try { + orderedWithCollator(null); + } finally { + config.collation = null; + } + + config.collation = "DE"; + try (Connection con = getConnection(DB_NAME)) { + insertValues(con, new String[]{"A", "\u00c4"}, 100); + + assertEquals(Arrays.asList(null, "$", "1A", "A", "A", "\u00c4", "\u00c4", "AA", "B"), + loadTableValues(con)); + } finally { + config.collation = null; + } + } + + + private List orderedWithCollator(String collator) throws SQLException { + deleteDb(DB_NAME); + try (Connection con = getConnection(DB_NAME); Statement statement = con.createStatement()) { + if (collator != null) { + statement.execute("SET COLLATION " + collator); + } + statement.execute("CREATE TABLE charsettable(id INT PRIMARY KEY, testvalue VARCHAR(50))"); + + insertValues(con, TEST_STRINGS, 1); + + return loadTableValues(con); + } + } + + private static void insertValues(Connection con, String[] values, int startId) throws SQLException { + PreparedStatement ps = con.prepareStatement("INSERT INTO charsettable VALUES (?, ?)"); + int id = startId; + for (String value : values) { + ps.setInt(1, id++); + ps.setString(2, value); + ps.execute(); + } + ps.close(); + } + + private static List loadTableValues(Connection con) throws SQLException { + List results = new ArrayList<>(); + Statement statement = con.createStatement(); + ResultSet resultSet = statement.executeQuery("select testvalue from charsettable order by testvalue"); + while (resultSet.next()) { + results.add(resultSet.getString(1)); + } + statement.close(); + return results; + } + +} diff --git a/h2/src/test/org/h2/test/db/TestShow.java b/h2/src/test/org/h2/test/db/TestShow.java deleted file mode 100644 index 45d6c6cfe3..0000000000 --- a/h2/src/test/org/h2/test/db/TestShow.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import org.h2.test.TestBase; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; - -/** - * Test of compatibility for the SHOW statement. - */ -public class TestShow extends TestBase { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws SQLException { - testPgCompatibility(); - testMysqlCompatibility(); - } - - private void testPgCompatibility() throws SQLException { - Connection conn = getConnection("mem:pg"); - Statement stat = conn.createStatement(); - - assertResult("UNICODE", stat, "SHOW CLIENT_ENCODING"); - assertResult("read committed", stat, "SHOW DEFAULT_TRANSACTION_ISOLATION"); - assertResult("read committed", stat, "SHOW TRANSACTION ISOLATION LEVEL"); - assertResult("ISO", stat, "SHOW DATESTYLE"); - assertResult("8.1.4", stat, "SHOW SERVER_VERSION"); - assertResult("UTF8", stat, "SHOW SERVER_ENCODING"); - } - - private void testMysqlCompatibility() throws SQLException { - Connection conn = getConnection("mem:pg"); - Statement stat = conn.createStatement(); - ResultSet rs; - - // show tables without a schema - stat.execute("create table person(id int, name varchar)"); - rs = stat.executeQuery("SHOW TABLES"); - assertTrue(rs.next()); - assertEquals("PERSON", rs.getString(1)); - assertEquals("PUBLIC", rs.getString(2)); - assertFalse(rs.next()); - - // show tables with a schema - assertResultRowCount(1, stat.executeQuery("SHOW TABLES FROM PUBLIC")); - - // columns - assertResultRowCount(2, stat.executeQuery("SHOW COLUMNS FROM person")); - } -} diff --git a/h2/src/test/org/h2/test/db/TestSpaceReuse.java b/h2/src/test/org/h2/test/db/TestSpaceReuse.java index 4e11161c3a..dd21cf549c 100644 --- a/h2/src/test/org/h2/test/db/TestSpaceReuse.java +++ b/h2/src/test/org/h2/test/db/TestSpaceReuse.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -8,15 +8,15 @@ import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; - import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests if disk space is reused after deleting many rows. */ -public class TestSpaceReuse extends TestBase { +public class TestSpaceReuse extends TestDb { /** * Run just this test. @@ -24,32 +24,33 @@ public class TestSpaceReuse extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { + public boolean isEnabled() { if (config.memory) { - return; + return false; } + return true; + } + + @Override + public void test() throws SQLException { deleteDb("spaceReuse"); long max = 0, now = 0, min = Long.MAX_VALUE; for (int i = 0; i < 20; i++) { Connection conn = getConnection("spaceReuse"); Statement stat = conn.createStatement(); stat.execute("set retention_time 0"); + stat.execute("set write_delay 0"); // disable auto-commit so that free-unused runs on commit stat.execute("create table if not exists t(i int)"); stat.execute("insert into t select x from system_range(1, 500)"); conn.close(); conn = getConnection("spaceReuse"); conn.createStatement().execute("delete from t"); conn.close(); - String fileName = getBaseDir() + "/spaceReuse"; - if (Constants.VERSION_MINOR >= 4) { - fileName += Constants.SUFFIX_MV_FILE; - } else { - fileName += Constants.SUFFIX_PAGE_FILE; - } + String fileName = getBaseDir() + "/spaceReuse" + Constants.SUFFIX_MV_FILE; now = FileUtils.size(fileName); assertTrue(now > 0); if (i < 10) { diff --git a/h2/src/test/org/h2/test/db/TestSpatial.java b/h2/src/test/org/h2/test/db/TestSpatial.java index 5cc1b042f6..0de3de0f74 100644 --- a/h2/src/test/org/h2/test/db/TestSpatial.java +++ b/h2/src/test/org/h2/test/db/TestSpatial.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -12,23 +12,34 @@ import java.sql.Statement; import java.sql.Types; import java.util.Random; - -import com.vividsolutions.jts.geom.Envelope; -import com.vividsolutions.jts.geom.Point; -import com.vividsolutions.jts.geom.util.AffineTransformation; import org.h2.api.Aggregate; +import org.h2.api.ErrorCode; +import org.h2.message.DbException; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.SimpleResultSet; import org.h2.tools.SimpleRowSource; -import org.h2.value.DataType; +import org.h2.util.HasSQL; +import org.h2.value.TypeInfo; import org.h2.value.Value; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; -import com.vividsolutions.jts.geom.Polygon; -import com.vividsolutions.jts.io.ParseException; -import com.vividsolutions.jts.io.WKTReader; import org.h2.value.ValueGeometry; +import org.h2.value.ValueToObjectConverter; +import org.h2.value.ValueToObjectConverter2; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.CoordinateSequence; +import org.locationtech.jts.geom.Envelope; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.MultiPoint; +import org.locationtech.jts.geom.Point; +import org.locationtech.jts.geom.Polygon; +import org.locationtech.jts.geom.PrecisionModel; +import org.locationtech.jts.geom.impl.CoordinateArraySequenceFactory; +import org.locationtech.jts.geom.util.AffineTransformation; +import org.locationtech.jts.io.ByteOrderValues; +import org.locationtech.jts.io.ParseException; +import org.locationtech.jts.io.WKBWriter; +import org.locationtech.jts.io.WKTReader; /** * Spatial datatype and index tests. @@ -37,9 +48,9 @@ * @author Noel Grandin * @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888 */ -public class TestSpatial extends TestBase { +public class TestSpatial extends TestDb { - private String url = "spatial"; + private static final String URL = "spatial"; /** * Run just this test. @@ -47,26 +58,30 @@ public class TestSpatial extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { - if (!config.mvStore && config.mvcc) { - return; - } - if (config.memory && config.mvcc) { - return; + public boolean isEnabled() { + if (config.memory) { + return false; } - if (DataType.GEOMETRY_CLASS != null) { - deleteDb("spatial"); - url = "spatial"; - testSpatial(); - deleteDb("spatial"); + if (ValueToObjectConverter.GEOMETRY_CLASS == null) { + return false; } + return true; + } + + @Override + public void test() throws SQLException { + deleteDb("spatial"); + testSpatial(); + deleteDb("spatial"); } private void testSpatial() throws SQLException { + testNaNs(); + testBug1(); testSpatialValues(); testOverlap(); testNotOverlap(); @@ -81,7 +96,6 @@ private void testSpatial() throws SQLException { testValueConversion(); testEquals(); testTableFunctionGeometry(); - testHashCode(); testAggregateWithGeometry(); testTableViewSpatialPredicate(); testValueGeometryScript(); @@ -90,22 +104,52 @@ private void testSpatial() throws SQLException { testStoreCorruption(); testExplainSpatialIndexWithPk(); testNullableGeometry(); + testNullableGeometryDelete(); + testNullableGeometryInsert(); + testNullableGeometryUpdate(); + testIndexUpdateNullGeometry(); + testInsertNull(); + testSpatialIndexWithOrder(); } - private void testHashCode() { - ValueGeometry geomA = ValueGeometry - .get("POLYGON ((67 13 6, 67 18 5, 59 18 4, 59 13 6, 67 13 6))"); - ValueGeometry geomB = ValueGeometry - .get("POLYGON ((67 13 6, 67 18 5, 59 18 4, 59 13 6, 67 13 6))"); - ValueGeometry geomC = ValueGeometry - .get("POLYGON ((67 13 6, 67 18 5, 59 18 4, 59 13 5, 67 13 6))"); - assertEquals(geomA.hashCode(), geomB.hashCode()); - assertFalse(geomA.hashCode() == geomC.hashCode()); + private void testNaNs() { + GeometryFactory factory = new GeometryFactory(new PrecisionModel(), 0, + CoordinateArraySequenceFactory.instance()); + CoordinateSequence c2 = factory.getCoordinateSequenceFactory().create(1, 2, 0); + c2.setOrdinate(0, 0, 1d); + c2.setOrdinate(0, 1, 1d); + CoordinateSequence c3 = factory.getCoordinateSequenceFactory().create(1, 3, 0); + c3.setOrdinate(0, 0, 1d); + c3.setOrdinate(0, 1, 2d); + c3.setOrdinate(0, 2, 3d); + Point p2 = factory.createPoint(c2); + Point p3 = factory.createPoint(c3); + try { + ValueGeometry.getFromGeometry(new MultiPoint(new Point[] { p2, p3 }, factory)); + fail("Expected exception"); + } catch (DbException e) { + assertEquals(ErrorCode.DATA_CONVERSION_ERROR_1, e.getErrorCode()); + } + } + + private void testBug1() throws SQLException { + deleteDb("spatial"); + Connection conn = getConnection(URL); + Statement stat = conn.createStatement(); + + stat.execute("CREATE TABLE VECTORS (ID INTEGER NOT NULL, GEOM GEOMETRY, S INTEGER)"); + stat.execute("INSERT INTO VECTORS(ID, GEOM, S) " + + "VALUES(0, 'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))', 1)"); + + stat.executeQuery("select * from (select * from VECTORS) WHERE S=1 " + + "AND GEOM && 'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'"); + conn.close(); + deleteDb("spatial"); } private void testSpatialValues() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); + Connection conn = getConnection(URL); Statement stat = conn.createStatement(); stat.execute("create memory table test" + @@ -123,6 +167,13 @@ private void testSpatialValues() throws SQLException { new Coordinate(2, 2), new Coordinate(1, 1) }); assertTrue(polygon.equals(rs.getObject(2))); + rs.close(); + rs = stat.executeQuery("select id, cast(polygon as varchar) from test"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertEquals("POLYGON ((1 1, 1 2, 2 2, 1 1))", rs.getObject(2)); + assertTrue(polygon.equals(rs.getObject(2, Geometry.class))); + rs.close(); rs = stat.executeQuery("select * from test where polygon = " + "'POLYGON ((1 1, 1 2, 2 2, 1 1))'"); @@ -170,8 +221,7 @@ static Geometry getRandomGeometry(Random geometryRand, private void testOverlap() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); stat.execute("create memory table test" + "(id int primary key, poly geometry)"); @@ -189,22 +239,21 @@ private void testOverlap() throws SQLException { assertEquals(1, rs.getInt("id")); assertFalse(rs.next()); stat.execute("drop table test"); - } finally { - conn.close(); } } private void testPersistentSpatialIndex() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); stat.execute("create table test" + "(id int primary key, poly geometry)"); stat.execute("insert into test values(1, " + "'POLYGON ((1 1, 1 2, 2 2, 1 1))')"); - stat.execute("insert into test values(2, " + - "'POLYGON ((3 1, 3 2, 4 2, 3 1))')"); + stat.execute("insert into test values(2,null)"); stat.execute("insert into test values(3, " + + "'POLYGON ((3 1, 3 2, 4 2, 3 1))')"); + stat.execute("insert into test values(4,null)"); + stat.execute("insert into test values(5, " + "'POLYGON ((1 3, 1 4, 2 4, 1 3))')"); stat.execute("create spatial index on test(poly)"); @@ -225,17 +274,13 @@ private void testPersistentSpatialIndex() throws SQLException { assertEquals(1, rs.getInt("id")); assertFalse(rs.next()); rs.close(); - } finally { - // Close the database - conn.close(); } if (config.memory) { return; } - conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); ResultSet rs = stat.executeQuery( "select * from test " + @@ -244,36 +289,33 @@ private void testPersistentSpatialIndex() throws SQLException { assertEquals(1, rs.getInt("id")); assertFalse(rs.next()); stat.execute("drop table test"); - } finally { - conn.close(); } - } + private void testNotOverlap() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); stat.execute("create memory table test" + "(id int primary key, poly geometry)"); stat.execute("insert into test values(1, " + "'POLYGON ((1 1, 1 2, 2 2, 1 1))')"); - stat.execute("insert into test values(2, " + - "'POLYGON ((3 1, 3 2, 4 2, 3 1))')"); + stat.execute("insert into test values(2,null)"); stat.execute("insert into test values(3, " + + "'POLYGON ((3 1, 3 2, 4 2, 3 1))')"); + stat.execute("insert into test values(4,null)"); + stat.execute("insert into test values(5, " + "'POLYGON ((1 3, 1 4, 2 4, 1 3))')"); ResultSet rs = stat.executeQuery( "select * from test " + "where NOT poly && 'POINT (1.5 1.5)'::Geometry"); assertTrue(rs.next()); - assertEquals(2, rs.getInt("id")); - assertTrue(rs.next()); assertEquals(3, rs.getInt("id")); + assertTrue(rs.next()); + assertEquals(5, rs.getInt("id")); assertFalse(rs.next()); stat.execute("drop table test"); - } finally { - conn.close(); } } @@ -292,6 +334,10 @@ private static void createTestTable(Statement stat) throws SQLException { "'POLYGON ((90 9, 190 9, 190 -91, 90 -91, 90 9))')"); stat.execute("insert into area values(6, " + "'POLYGON ((190 9, 290 9, 290 -91, 190 -91, 190 9))')"); + stat.execute("insert into area values(7,null)"); + stat.execute("insert into area values(8,null)"); + + stat.execute("create table roads(idRoad int primary key, the_geom geometry)"); stat.execute("create spatial index on roads(the_geom)"); stat.execute("insert into roads values(1, " + @@ -315,18 +361,17 @@ private static void createTestTable(Statement stat) throws SQLException { stat.execute("insert into roads values(7, " + "'LINESTRING (60.321361058601155 -13.099243856332663, " + "149.24385633270325 5.955576559546344)')"); + stat.execute("insert into roads values(8, null)"); + stat.execute("insert into roads values(9, null)"); + } private void testSpatialIndexQueryMultipleTable() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); createTestTable(stat); testRoadAndArea(stat); - } finally { - // Close the database - conn.close(); } deleteDb("spatial"); } @@ -360,13 +405,13 @@ private void testRoadAndArea(Statement stat) throws SQLException { private void testIndexTransaction() throws SQLException { // Check session management in index deleteDb("spatial"); - Connection conn = getConnection(url); - conn.setAutoCommit(false); - try { + try (Connection conn = getConnection(URL)) { + conn.setAutoCommit(false); Statement stat = conn.createStatement(); createTestTable(stat); Savepoint sp = conn.setSavepoint(); // Remove a row but do not commit + stat.execute("delete from roads where idRoad=9"); stat.execute("delete from roads where idRoad=7"); // Check if index is updated ResultSet rs = stat.executeQuery( @@ -397,10 +442,7 @@ private void testIndexTransaction() throws SQLException { conn.rollback(sp); // Check if the index is restored testRoadAndArea(stat); - } finally { - conn.close(); } - } /** @@ -408,12 +450,13 @@ private void testIndexTransaction() throws SQLException { */ private void testMemorySpatialIndex() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); + Connection conn = getConnection(URL); Statement stat = conn.createStatement(); stat.execute("create memory table test(id int primary key, polygon geometry)"); stat.execute("create spatial index idx_test_polygon on test(polygon)"); stat.execute("insert into test values(1, 'POLYGON ((1 1, 1 2, 2 2, 1 1))')"); + stat.execute("insert into test values(2, null)"); ResultSet rs; // an query that can not possibly return a result @@ -426,9 +469,7 @@ private void testMemorySpatialIndex() throws SQLException { "explain select * from test " + "where polygon && 'POLYGON ((1 1, 1 2, 2 2, 1 1))'::Geometry"); rs.next(); - if (config.mvStore) { - assertContains(rs.getString(1), "/* PUBLIC.IDX_TEST_POLYGON: POLYGON &&"); - } + assertContains(rs.getString(1), "/* PUBLIC.IDX_TEST_POLYGON: POLYGON &&"); // TODO equality should probably also use the spatial index // rs = stat.executeQuery("explain select * from test " + @@ -470,11 +511,9 @@ private void testMemorySpatialIndex() throws SQLException { */ private void testJavaAlias() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS T_GEOM_FROM_TEXT FOR \"" + - TestSpatial.class.getName() + ".geomFromText\""); + stat.execute("CREATE ALIAS T_GEOM_FROM_TEXT FOR '" + TestSpatial.class.getName() + ".geomFromText'"); stat.execute("create table test(id int primary key " + "auto_increment, the_geom geometry)"); stat.execute("insert into test(the_geom) values(" + @@ -485,8 +524,6 @@ private void testJavaAlias() throws SQLException { assertTrue(rs.next()); assertEquals("POLYGON ((62 48, 84 48, 84 42, 56 34, 62 48))", rs.getObject(1).toString()); - } finally { - conn.close(); } deleteDb("spatial"); } @@ -496,11 +533,10 @@ private void testJavaAlias() throws SQLException { */ private void testJavaAliasTableFunction() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS T_RANDOM_GEOM_TABLE FOR \"" + - TestSpatial.class.getName() + ".getRandomGeometryTable\""); + stat.execute("CREATE ALIAS T_RANDOM_GEOM_TABLE FOR '" + + TestSpatial.class.getName() + ".getRandomGeometryTable'"); stat.execute( "create table test as " + "select * from T_RANDOM_GEOM_TABLE(42,20,-100,100,-100,100,4)"); @@ -508,8 +544,6 @@ private void testJavaAliasTableFunction() throws SQLException { ResultSet rs = stat.executeQuery("select count(*) from test"); assertTrue(rs.next()); assertEquals(20, rs.getInt(1)); - } finally { - conn.close(); } deleteDb("spatial"); } @@ -570,6 +604,7 @@ public void reset() throws SQLException { */ public static Geometry geomFromText(String text, int srid) throws SQLException { WKTReader wktReader = new WKTReader(); + wktReader.setIsOldJtsCoordinateSyntaxAllowed(false); try { Geometry geom = wktReader.read(text); geom.setSRID(srid); @@ -582,22 +617,51 @@ public static Geometry geomFromText(String text, int srid) throws SQLException { private void testGeometryDataType() { GeometryFactory geometryFactory = new GeometryFactory(); Geometry geometry = geometryFactory.createPoint(new Coordinate(0, 0)); - assertEquals(Value.GEOMETRY, DataType.getTypeFromClass(geometry.getClass())); + assertEquals(TypeInfo.TYPE_GEOMETRY, ValueToObjectConverter2.classToType(geometry.getClass())); } /** * Test serialization of Z and SRID values. */ private void testWKB() { - ValueGeometry geom3d = ValueGeometry.get( - "POLYGON ((67 13 6, 67 18 5, 59 18 4, 59 13 6, 67 13 6))", 27572); + String ewkt = "SRID=27572;POLYGON Z ((67 13 6, 67 18 5, 59 18 4, 59 13 6, 67 13 6))"; + ValueGeometry geom3d = ValueGeometry.get(ewkt); + assertEquals(ewkt, geom3d.getString()); ValueGeometry copy = ValueGeometry.get(geom3d.getBytes()); - assertEquals(6, copy.getGeometry().getCoordinates()[0].z); - assertEquals(5, copy.getGeometry().getCoordinates()[1].z); - assertEquals(4, copy.getGeometry().getCoordinates()[2].z); + Geometry g = copy.getGeometry(); + assertEquals(6, g.getCoordinates()[0].getZ()); + assertEquals(5, g.getCoordinates()[1].getZ()); + assertEquals(4, g.getCoordinates()[2].getZ()); // Test SRID copy = ValueGeometry.get(geom3d.getBytes()); - assertEquals(27572, copy.getGeometry().getSRID()); + assertEquals(27572, g.getSRID()); + + Point point = new GeometryFactory().createPoint((new Coordinate(1.1d, 1.2d))); + // SRID 0 + checkSRID(ValueGeometry.getFromGeometry(point).getBytes(), 0); + checkSRID(new WKBWriter(2, ByteOrderValues.BIG_ENDIAN, false).write(point), 0); + checkSRID(new WKBWriter(2, ByteOrderValues.BIG_ENDIAN, true).write(point), 0); + checkSRID(new WKBWriter(2, ByteOrderValues.LITTLE_ENDIAN, false).write(point), 0); + checkSRID(new WKBWriter(2, ByteOrderValues.LITTLE_ENDIAN, true).write(point), 0); + ewkt = "POINT (1.1 1.2)"; + assertEquals(ewkt, ValueGeometry.getFromGeometry(point).getString()); + assertEquals(ewkt, ValueGeometry.get(ewkt).getString()); + // SRID 1,000,000,000 + point.setSRID(1_000_000_000); + checkSRID(ValueGeometry.getFromGeometry(point).getBytes(), 1_000_000_000); + checkSRID(new WKBWriter(2, ByteOrderValues.BIG_ENDIAN, true).write(point), 1_000_000_000); + checkSRID(new WKBWriter(2, ByteOrderValues.LITTLE_ENDIAN, true).write(point), 1_000_000_000); + ewkt = "SRID=1000000000;POINT (1.1 1.2)"; + assertEquals(ewkt, ValueGeometry.getFromGeometry(point).getString()); + assertEquals(ewkt, ValueGeometry.get(ewkt).getString()); + } + + private void checkSRID(byte[] bytes, int srid) { + Point point = (Point) ValueGeometry.getFromEWKB(bytes).getGeometry(); + assertEquals(1.1, point.getX()); + assertEquals(1.2, point.getY()); + assertEquals(srid, point.getSRID()); + assertEquals(srid, point.getFactory().getSRID()); } /** @@ -605,11 +669,9 @@ private void testWKB() { */ private void testValueConversion() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); + Connection conn = getConnection(URL); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS OBJ_STRING FOR \"" + - TestSpatial.class.getName() + - ".getObjectString\""); + stat.execute("CREATE ALIAS OBJ_STRING FOR '" + TestSpatial.class.getName() + ".getObjectString'"); ResultSet rs = stat.executeQuery( "select OBJ_STRING('POINT( 15 25 )'::geometry)"); assertTrue(rs.next()); @@ -624,7 +686,7 @@ private void testValueConversion() throws SQLException { * @param object the object * @return the string representation */ - public static String getObjectString(Object object) { + public static String getObjectString(Geometry object) { return object.toString(); } @@ -634,7 +696,7 @@ public static String getObjectString(Object object) { private void testEquals() { // 3d equality test ValueGeometry geom3d = ValueGeometry.get( - "POLYGON ((67 13 6, 67 18 5, 59 18 4, 59 13 6, 67 13 6))"); + "POLYGON Z((67 13 6, 67 18 5, 59 18 4, 59 13 6, 67 13 6))"); ValueGeometry geom2d = ValueGeometry.get( "POLYGON ((67 13, 67 18, 59 18, 59 13, 67 13))"); assertFalse(geom3d.equals(geom2d)); @@ -642,20 +704,14 @@ private void testEquals() { GeometryFactory geometryFactory = new GeometryFactory(); Geometry geometry = geometryFactory.createPoint(new Coordinate(0, 0)); geometry.setSRID(27572); - ValueGeometry valueGeometry = - ValueGeometry.getFromGeometry(geometry); + ValueGeometry valueGeometry = ValueGeometry.getFromGeometry(geometry); Geometry geometry2 = geometryFactory.createPoint(new Coordinate(0, 0)); geometry2.setSRID(5326); - ValueGeometry valueGeometry2 = - ValueGeometry.getFromGeometry(geometry2); + ValueGeometry valueGeometry2 = ValueGeometry.getFromGeometry(geometry2); assertFalse(valueGeometry.equals(valueGeometry2)); - // Check illegal geometry (no WKB representation) - try { - ValueGeometry.get("POINT EMPTY"); - fail("expected this to throw IllegalArgumentException"); - } catch (IllegalArgumentException ex) { - // expected - } + ValueGeometry valueGeometry3 = ValueGeometry.getFromGeometry(geometry); + assertEquals(valueGeometry, valueGeometry3); + assertEquals(geometry.getSRID(), valueGeometry3.getGeometry().getSRID()); } /** @@ -663,11 +719,9 @@ private void testEquals() { */ private void testTableFunctionGeometry() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS POINT_TABLE FOR \"" + - TestSpatial.class.getName() + ".pointTable\""); + stat.execute("CREATE ALIAS POINT_TABLE FOR '" + TestSpatial.class.getName() + ".pointTable'"); stat.execute("create table test as select * from point_table(1, 1)"); // Read column type ResultSet columnMeta = conn.getMetaData(). @@ -676,8 +730,6 @@ private void testTableFunctionGeometry() throws SQLException { assertEquals("geometry", columnMeta.getString("TYPE_NAME").toLowerCase()); assertFalse(columnMeta.next()); - } finally { - conn.close(); } deleteDb("spatial"); } @@ -692,20 +744,18 @@ private void testTableFunctionGeometry() throws SQLException { public static ResultSet pointTable(double x, double y) { GeometryFactory factory = new GeometryFactory(); SimpleResultSet rs = new SimpleResultSet(); - rs.addColumn("THE_GEOM", Types.JAVA_OBJECT, "GEOMETRY", 0, 0); + rs.addColumn("THE_GEOM", Types.OTHER, "GEOMETRY", 0, 0); rs.addRow(factory.createPoint(new Coordinate(x, y))); return rs; } private void testAggregateWithGeometry() throws SQLException { deleteDb("spatialIndex"); - Connection conn = getConnection("spatialIndex"); - try { + try (Connection conn = getConnection("spatialIndex")) { Statement st = conn.createStatement(); - st.execute("CREATE AGGREGATE TABLE_ENVELOPE FOR \""+ - TableEnvelope.class.getName()+"\""); + st.execute("CREATE AGGREGATE TABLE_ENVELOPE FOR '" + TableEnvelope.class.getName() + '\''); st.execute("CREATE TABLE test(the_geom GEOMETRY)"); - st.execute("INSERT INTO test VALUES ('POINT(1 1)'), ('POINT(10 5)')"); + st.execute("INSERT INTO test VALUES ('POINT(1 1)'), (null), (null), ('POINT(10 5)')"); ResultSet rs = st.executeQuery("select TABLE_ENVELOPE(the_geom) from test"); assertEquals("geometry", rs.getMetaData(). getColumnTypeName(1).toLowerCase()); @@ -714,8 +764,6 @@ private void testAggregateWithGeometry() throws SQLException { assertTrue(new Envelope(1, 10, 1, 5).equals( ((Geometry) rs.getObject(1)).getEnvelopeInternal())); assertFalse(rs.next()); - } finally { - conn.close(); } deleteDb("spatialIndex"); } @@ -760,13 +808,13 @@ public Object getResult() throws SQLException { private void testTableViewSpatialPredicate() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); stat.execute("drop table if exists test"); stat.execute("drop view if exists test_view"); stat.execute("create table test(id int primary key, poly geometry)"); stat.execute("insert into test values(1, 'POLYGON ((1 1, 1 2, 2 2, 1 1))')"); + stat.execute("insert into test values(4, null)"); stat.execute("insert into test values(2, 'POLYGON ((3 1, 3 2, 4 2, 3 1))')"); stat.execute("insert into test values(3, 'POLYGON ((1 3, 1 4, 2 4, 1 3))')"); stat.execute("create view test_view as select * from test"); @@ -785,9 +833,6 @@ private void testTableViewSpatialPredicate() throws SQLException { assertEquals(1, rs.getInt("id")); assertFalse(rs.next()); rs.close(); - } finally { - // Close the database - conn.close(); } deleteDb("spatial"); } @@ -796,17 +841,14 @@ private void testTableViewSpatialPredicate() throws SQLException { * Check ValueGeometry conversion into SQL script */ private void testValueGeometryScript() throws SQLException { - ValueGeometry valueGeometry = ValueGeometry.get("POINT(1 1 5)"); - Connection conn = getConnection(url); - try { + ValueGeometry valueGeometry = ValueGeometry.get("POINT Z(1 1 5)"); + try (Connection conn = getConnection(URL)) { ResultSet rs = conn.createStatement().executeQuery( - "SELECT " + valueGeometry.getSQL()); + "SELECT " + valueGeometry.getSQL(HasSQL.DEFAULT_SQL_FLAGS)); assertTrue(rs.next()); Object obj = rs.getObject(1); ValueGeometry g = ValueGeometry.getFromGeometry(obj); assertTrue("got: " + g + " exp: " + valueGeometry, valueGeometry.equals(g)); - } finally { - conn.close(); } } @@ -815,8 +857,7 @@ private void testValueGeometryScript() throws SQLException { * be updated. */ private void testInPlaceUpdate() throws SQLException { - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { ResultSet rs = conn.createStatement().executeQuery( "SELECT 'POINT(1 1)'::geometry"); assertTrue(rs.next()); @@ -831,34 +872,28 @@ private void testInPlaceUpdate() throws SQLException { assertEquals(1, ((Point) rs.getObject(1)).getX()); assertEquals(1, ((Point) rs.getObject(1)).getY()); rs.close(); - } finally { - conn.close(); } } private void testScanIndexOnNonSpatialQuery() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); stat.execute("drop table if exists test"); stat.execute("create table test(id serial primary key, " + - "value double, the_geom geometry)"); + "v double, the_geom geometry)"); stat.execute("create spatial index spatial on test(the_geom)"); ResultSet rs = stat.executeQuery("explain select * from test where _ROWID_ = 5"); assertTrue(rs.next()); - assertContains(rs.getString(1), "tableScan"); - } finally { - // Close the database - conn.close(); + assertFalse(rs.getString(1).contains("/* PUBLIC.SPATIAL: _ROWID_ = " + + "5 */")); } deleteDb("spatial"); } private void testStoreCorruption() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); stat.execute("drop table if exists pt_cloud;\n" + "CREATE TABLE PT_CLOUD AS " + @@ -877,56 +912,88 @@ private void testStoreCorruption() throws SQLException { " system_range(1e6,1e6+50) A,system_range(6e6,6e6+50) B;\n" + "create spatial index pt_index on pt_cloud(the_geom);\n" + "shutdown compact;"); - } finally { - // Close the database - conn.close(); } deleteDb("spatial"); } private void testExplainSpatialIndexWithPk() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); - try { + try (Connection conn = getConnection(URL)) { Statement stat = conn.createStatement(); stat.execute("drop table if exists pt_cloud;"); - stat.execute("CREATE TABLE PT_CLOUD(id serial, the_geom geometry) AS " + - "SELECT null, CONCAT('POINT(',A.X,' ',B.X,')')::geometry the_geom " + + stat.execute("CREATE TABLE PT_CLOUD(id serial, the_geom geometry)"); + stat.execute("INSERT INTO PT_CLOUD(the_geom) " + + "SELECT 'POINT(' || A.X || ' ' || B.X || ')' " + "from system_range(0,120) A,system_range(0,10) B;"); stat.execute("create spatial index on pt_cloud(the_geom);"); - ResultSet rs = stat.executeQuery( + try (ResultSet rs = stat.executeQuery( "explain select * from PT_CLOUD " + - "where the_geom && 'POINT(1 1)'"); - try { + "where the_geom && 'POINT(1 1)'")) { assertTrue(rs.next()); assertFalse("H2 should use spatial index got this explain:\n" + rs.getString(1), rs.getString(1).contains("tableScan")); - } finally { - rs.close(); } - } finally { - // Close the database - conn.close(); } deleteDb("spatial"); } private void testNullableGeometry() throws SQLException { deleteDb("spatial"); - Connection conn = getConnection(url); + Connection conn = getConnection(URL); Statement stat = conn.createStatement(); stat.execute("create memory table test" + "(id int primary key, the_geom geometry)"); stat.execute("create spatial index on test(the_geom)"); stat.execute("insert into test values(1, null)"); + stat.execute("insert into test values(2, null)"); + stat.execute("delete from test where the_geom is null"); + stat.execute("insert into test values(1, null)"); + stat.execute("insert into test values(2, null)"); + stat.execute("insert into test values(3, " + + "'POLYGON ((1000 2000, 1000 3000, 2000 3000, 1000 2000))')"); + stat.execute("insert into test values(4, null)"); + stat.execute("insert into test values(5, null)"); + stat.execute("insert into test values(6, " + + "'POLYGON ((1000 3000, 1000 4000, 2000 4000, 1000 3000))')"); + ResultSet rs = stat.executeQuery("select * from test"); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - assertNull(rs.getObject(2)); + int count = 0; + while (rs.next()) { + count++; + int id = rs.getInt(1); + if (id == 3 || id == 6) { + assertNotNull(rs.getObject(2)); + } else { + assertNull(rs.getObject(2)); + } + } + assertEquals(6, count); + + rs = stat.executeQuery("select * from test where the_geom is null"); + count = 0; + while (rs.next()) { + count++; + assertNull(rs.getObject(2)); + } + assertEquals(4, count); + + rs = stat.executeQuery("select * from test where the_geom is not null"); + count = 0; + while (rs.next()) { + count++; + assertNotNull(rs.getObject(2)); + } + assertEquals(2, count); + + rs = stat.executeQuery( + "select * from test " + + "where intersects(the_geom, " + + "'POLYGON ((1000 1000, 1000 2000, 2000 2000, 1000 1000))')"); + conn.close(); if (!config.memory) { - conn = getConnection(url); + conn = getConnection(URL); stat = conn.createStatement(); rs = stat.executeQuery("select * from test"); assertTrue(rs.next()); @@ -934,8 +1001,209 @@ private void testNullableGeometry() throws SQLException { assertNull(rs.getObject(2)); conn.close(); } + deleteDb("spatial"); + } + + private void testNullableGeometryDelete() throws SQLException { + deleteDb("spatial"); + Connection conn = getConnection(URL); + Statement stat = conn.createStatement(); + stat.execute("create memory table test" + + "(id int primary key, the_geom geometry)"); + stat.execute("create spatial index on test(the_geom)"); + stat.execute("insert into test values(1, null)"); + stat.execute("insert into test values(2, null)"); + stat.execute("insert into test values(3, null)"); + ResultSet rs = stat.executeQuery("select * from test order by id"); + while (rs.next()) { + assertNull(rs.getObject(2)); + } + stat.execute("delete from test where id = 1"); + stat.execute("delete from test where id = 2"); + stat.execute("delete from test where id = 3"); + stat.execute("insert into test values(4, null)"); + stat.execute("insert into test values(5, null)"); + stat.execute("insert into test values(6, null)"); + stat.execute("delete from test where id = 4"); + stat.execute("delete from test where id = 5"); + stat.execute("delete from test where id = 6"); + conn.close(); + deleteDb("spatial"); + } + + private void testNullableGeometryInsert() throws SQLException { + deleteDb("spatial"); + Connection conn = getConnection(URL); + Statement stat = conn.createStatement(); + stat.execute("create memory table test" + + "(id identity, the_geom geometry)"); + stat.execute("create spatial index on test(the_geom)"); + for (int i = 0; i < 1000; i++) { + stat.execute("insert into test(the_geom) values null"); + } + ResultSet rs = stat.executeQuery("select * from test"); + while (rs.next()) { + assertNull(rs.getObject(2)); + } + conn.close(); + deleteDb("spatial"); + } + private void testNullableGeometryUpdate() throws SQLException { + deleteDb("spatial"); + Connection conn = getConnection(URL); + Statement stat = conn.createStatement(); + stat.execute("create memory table test" + + "(id int primary key, the_geom geometry, description varchar2(32))"); + stat.execute("create spatial index on test(the_geom)"); + for (int i = 0; i < 1000; i++) { + stat.execute("insert into test values("+ (i + 1) +", null, null)"); + } + ResultSet rs = stat.executeQuery("select * from test"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertNull(rs.getObject(2)); + stat.execute("update test set description='DESCRIPTION' where id = 1"); + stat.execute("update test set description='DESCRIPTION' where id = 2"); + stat.execute("update test set description='DESCRIPTION' where id = 3"); + conn.close(); deleteDb("spatial"); } + private void testIndexUpdateNullGeometry() throws SQLException { + deleteDb("spatial"); + Connection conn = getConnection(URL); + Statement stat = conn.createStatement(); + stat.execute("drop table if exists DUMMY_11;"); + stat.execute("CREATE TABLE PUBLIC.DUMMY_11 (fid serial, GEOM GEOMETRY);"); + stat.execute("CREATE SPATIAL INDEX PUBLIC_DUMMY_11_SPATIAL_INDEX on" + + " PUBLIC.DUMMY_11(GEOM);"); + stat.execute("insert into PUBLIC.DUMMY_11(geom) values(null);"); + stat.execute("update PUBLIC.DUMMY_11 set geom =" + + " 'POLYGON ((1 1, 5 1, 5 5, 1 5, 1 1))';"); + ResultSet rs = stat.executeQuery("select fid, GEOM from DUMMY_11 " + + "where GEOM && " + + "'POLYGON" + + "((1 1,5 1,5 5,1 5,1 1))';"); + try { + assertTrue(rs.next()); + assertEquals("POLYGON ((1 1, 5 1, 5 5, 1 5, 1 1))", rs.getString(2)); + } finally { + rs.close(); + } + // Update again the geometry elsewhere + stat.execute("update PUBLIC.DUMMY_11 set geom =" + + " 'POLYGON ((10 10, 50 10, 50 50, 10 50, 10 10))';"); + + rs = stat.executeQuery("select fid, GEOM from DUMMY_11 " + + "where GEOM && " + + "'POLYGON ((10 10, 50 10, 50 50, 10 50, 10 10))';"); + try { + assertTrue(rs.next()); + assertEquals("POLYGON ((10 10, 50 10, 50 50, 10 50, 10 10))", rs.getString(2)); + } finally { + rs.close(); + } + conn.close(); + deleteDb("spatial"); + } + + private void testInsertNull() throws SQLException { + deleteDb("spatial"); + try (Connection conn = getConnection(URL)) { + Statement stat = conn.createStatement(); + stat.execute("\n" + + "drop table if exists PUBLIC.DUMMY_12;\n" + + "CREATE TABLE PUBLIC.DUMMY_12 (\n" + + " \"fid\" serial,\n" + + " Z_ID INTEGER,\n" + + " GEOM GEOMETRY,\n" + + " CONSTRAINT CONSTRAINT_DUMMY_12 PRIMARY KEY (\"fid\")\n" + + ");\n" + + "CREATE INDEX PRIMARY_KEY_DUMMY_12 ON PUBLIC.DUMMY_12 (\"fid\");\n" + + "CREATE spatial INDEX PUBLIC_DUMMY_12_SPATIAL_INDEX_ ON PUBLIC.DUMMY_12 (GEOM);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (123,3125163,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (124,3125164,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (125,3125173,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (126,3125174,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (127,3125175,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (128,3125176,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (129,3125177,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (130,3125178,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (131,3125179,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (132,3125180,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (133,3125335,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (134,3125336,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (135,3125165,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (136,3125337,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (137,3125338,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (138,3125339,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (139,3125340,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (140,3125341,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (141,3125342,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (142,3125343,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (143,3125344,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (144,3125345,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (145,3125346,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (146,3125166,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (147,3125347,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (148,3125348,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (149,3125349,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (150,3125350,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (151,3125351,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (152,3125352,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (153,3125353,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (154,3125354,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (155,3125355,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (156,3125356,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (157,3125167,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (158,3125357,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (159,3125358,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (160,3125359,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (161,3125360,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (162,3125361,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (163,3125362,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (164,3125363,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (165,3125364,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (166,3125365,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (167,3125366,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (168,3125168,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (169,3125367,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (170,3125368,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (171,3125369,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (172,3125370,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (173,3125169,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (174,3125170,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (175,3125171,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (176,3125172,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (177,-2,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (178,-1,NULL);\n" + + "INSERT INTO PUBLIC.DUMMY_12 (\"fid\",Z_ID,GEOM) VALUES (179," + + "-1,NULL);"); + try (ResultSet rs = stat.executeQuery("select * from DUMMY_12")) { + assertTrue(rs.next()); + } + } + deleteDb("spatial"); + } + + private void testSpatialIndexWithOrder() throws SQLException { + deleteDb("spatial"); + try (Connection conn = getConnection(URL)) { + Statement stat = conn.createStatement(); + stat.execute("DROP TABLE IF EXISTS BUILDINGS;" + + "CREATE TABLE BUILDINGS (PK BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, " + + "THE_GEOM geometry);" + + "insert into buildings(the_geom) SELECT 'POINT(1 1)" + + "'::geometry from SYSTEM_RANGE(1,10000);\n" + + "CREATE SPATIAL INDEX ON PUBLIC.BUILDINGS(THE_GEOM);\n"); + + try (ResultSet rs = stat.executeQuery("EXPLAIN SELECT * FROM " + + "BUILDINGS ORDER BY PK LIMIT 51;")) { + assertTrue(rs.next()); + assertTrue(rs.getString(1).contains("PRIMARY_KEY")); + } + } + deleteDb("spatial"); + } } diff --git a/h2/src/test/org/h2/test/db/TestSpeed.java b/h2/src/test/org/h2/test/db/TestSpeed.java index f4a4bf2706..3e4d6a80a7 100644 --- a/h2/src/test/org/h2/test/db/TestSpeed.java +++ b/h2/src/test/org/h2/test/db/TestSpeed.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -9,13 +9,15 @@ import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Statement; +import java.util.concurrent.TimeUnit; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Various small performance tests. */ -public class TestSpeed extends TestBase { +public class TestSpeed extends TestDb { /** * Run just this test. @@ -23,7 +25,7 @@ public class TestSpeed extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -55,7 +57,7 @@ public void test() throws SQLException { // stat.execute("CREATE TABLE TEST_A(ID INT PRIMARY KEY, NAME // VARCHAR(255))"); // stat.execute("INSERT INTO TEST_A VALUES(0, 'Hello')"); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); // for(int i=1; i<8000; i*=2) { // stat.execute("INSERT INTO TEST_A SELECT ID+"+i+", NAME FROM TEST_A"); // @@ -68,7 +70,6 @@ public void test() throws SQLException { // rs.getString(2); // } // } - // System.out.println(System.currentTimeMillis()-time); // // stat.execute("CREATE TABLE TEST_B(ID INT PRIMARY KEY, NAME @@ -89,7 +90,7 @@ public void test() throws SQLException { // rs.getString("ID"); // stat.execute("DROP TABLE TEST"); - // long time = System.currentTimeMillis(); + // long time = System.nanoTime(); stat.execute("DROP TABLE IF EXISTS TEST"); stat.execute("CREATE CACHED TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255))"); @@ -108,8 +109,8 @@ public void test() throws SQLException { // System.exit(0); // System.out.println("END "+Value.cacheHit+" "+Value.cacheMiss); - time = System.currentTimeMillis() - time; - trace(time + " insert"); + time = System.nanoTime() - time; + trace(TimeUnit.NANOSECONDS.toMillis(time) + " insert"); // if(true) return; @@ -122,7 +123,7 @@ public void test() throws SQLException { // conn.close(); - time = System.currentTimeMillis(); + time = System.nanoTime(); prep = conn.prepareStatement("UPDATE TEST " + "SET NAME='Another data row which is long' WHERE ID=?"); @@ -150,12 +151,13 @@ public void test() throws SQLException { // } // } - time = System.currentTimeMillis() - time; - trace(time + " update"); + time = System.nanoTime() - time; + trace(TimeUnit.NANOSECONDS.toMillis(time) + " update"); + time = System.nanoTime(); conn.close(); - time = System.currentTimeMillis() - time; - trace(time + " close"); + time = System.nanoTime() - time; + trace(TimeUnit.NANOSECONDS.toMillis(time) + " close"); deleteDb("speed"); } diff --git a/h2/src/test/org/h2/test/db/TestSubqueryPerformanceOnLazyExecutionMode.java b/h2/src/test/org/h2/test/db/TestSubqueryPerformanceOnLazyExecutionMode.java new file mode 100644 index 0000000000..48361bcf1e --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestSubqueryPerformanceOnLazyExecutionMode.java @@ -0,0 +1,167 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import org.h2.command.dml.SetTypes; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Test subquery performance with lazy query execution mode {@link SetTypes#LAZY_QUERY_EXECUTION}. + */ +public class TestSubqueryPerformanceOnLazyExecutionMode extends TestDb { + /** Rows count. */ + private static final int ROWS = 5000; + /** Test repeats when unexpected failure. */ + private static final int FAIL_REPEATS = 5; + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String[] a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public boolean isEnabled() { + return !config.ci; + } + + @Override + public void test() throws Exception { + deleteDb("lazySubq"); + try (Connection conn = getConnection("lazySubq")) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("CREATE TABLE one (x INTEGER, y INTEGER )"); + try (PreparedStatement prep = conn.prepareStatement("insert into one values (?,?)")) { + for (int row = 0; row < ROWS; row++) { + prep.setInt(1, row / 100); + prep.setInt(2, row); + prep.execute(); + } + } + + testSubqueryInCondition(stmt); + testSubqueryInJoin(stmt); + testSubqueryInJoinFirst(stmt); + testJoinTwoSubqueries(stmt); + testSubqueryInNestedJoin(stmt); + } + } + finally { + deleteDb("lazySubq"); + } + } + + private void testSubqueryInCondition(Statement stmt) throws Exception { + String sql = "SELECT COUNT (*) FROM one WHERE x IN (SELECT y FROM one WHERE y < 50)"; + + checkExecutionTime(stmt, sql); + } + + private void testSubqueryInJoin(Statement stmt) throws Exception { + String sql = + "SELECT COUNT (one.x) FROM one " + + "JOIN (SELECT y AS val FROM one WHERE y < 50) AS subq ON subq.val=one.x"; + + checkExecutionTime(stmt, sql); + } + + private void testSubqueryInJoinFirst(Statement stmt) throws Exception { + String sql = + "SELECT COUNT (one.x) FROM " + + "(SELECT y AS val FROM one WHERE y < 50) AS subq " + + "JOIN one ON subq.val=one.x"; + + checkExecutionTime(stmt, sql); + } + + private void testJoinTwoSubqueries(Statement stmt) throws Exception { + String sql = + "SELECT COUNT (one_sub.x) FROM " + + "(SELECT y AS val FROM one WHERE y < 50) AS subq " + + "JOIN (SELECT x FROM one) AS one_sub ON subq.val=one_sub.x"; + + checkExecutionTime(stmt, sql); + } + + private void testSubqueryInNestedJoin(Statement stmt) throws Exception { + String sql = + "SELECT COUNT (one.x) FROM one " + + "LEFT JOIN (SELECT 1 AS val_1) AS subq0 " + + "JOIN (SELECT y AS val FROM one WHERE y < 30) AS subq1 ON subq0.val_1 < subq1.val " + + "ON one.x = subq1.val " + + "WHERE one.x < 30"; + + checkExecutionTime(stmt, sql, 3000); + } + + private void checkExecutionTime(Statement stmt, String sql) throws Exception { + checkExecutionTime(stmt, sql, ROWS); + } + + /** + * Compare execution time when lazy execution mode is disabled and enabled. + * The execution time must be almost the same. + */ + private void checkExecutionTime(Statement stmt, String sql, int expected) throws Exception { + long totalNotLazy = 0; + long totalLazy = 0; + + int successCnt = 0; + int failCnt = 0; + + for (int i = 0; i < FAIL_REPEATS; ++i) { + long tLazy = executeAndCheckResult(stmt, sql, true, expected); + long tNotLazy = executeAndCheckResult(stmt, sql, false, expected); + + totalNotLazy += tNotLazy; + totalLazy += tLazy; + + if (tNotLazy * 2 > tLazy) { + successCnt++; + if (i == 0) { + break; + } + } else { + failCnt++; + } + } + + if (failCnt > successCnt) { + fail("Lazy execution too slow. Avg lazy time: " + + (totalLazy / FAIL_REPEATS) + ", avg not lazy time: " + (totalNotLazy / FAIL_REPEATS)); + } + } + + /** + * @return Time of the query execution. + */ + private long executeAndCheckResult(Statement stmt, String sql, boolean lazy, int expected) throws SQLException { + if (lazy) { + stmt.execute("SET LAZY_QUERY_EXECUTION 1"); + } + else { + stmt.execute("SET LAZY_QUERY_EXECUTION 0"); + } + + long t0 = System.currentTimeMillis(); + try (ResultSet rs = stmt.executeQuery(sql)) { + rs.next(); + assertEquals(expected, rs.getInt(1)); + } + + return System.currentTimeMillis() - t0; + } +} diff --git a/h2/src/test/org/h2/test/db/TestSynonymForTable.java b/h2/src/test/org/h2/test/db/TestSynonymForTable.java new file mode 100644 index 0000000000..61c04084c1 --- /dev/null +++ b/h2/src/test/org/h2/test/db/TestSynonymForTable.java @@ -0,0 +1,333 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.db; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Tests for table synonyms. + */ +public class TestSynonymForTable extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws SQLException { + testSelectFromSynonym(); + testInsertIntoSynonym(); + testInsertWithColumnNameIntoSynonym(); + testUpdate(); + testDeleteFromSynonym(); + testTruncateSynonym(); + testExistingTableName(); + testCreateForUnknownTable(); + testMetaData(); + testCreateOrReplace(); + testCreateOrReplaceExistingTable(); + testSynonymInDifferentSchema(); + testReopenDatabase(); + testDropSynonym(); + testDropTable(); + testDropSchema(); + } + + private void testUpdate() throws SQLException { + Connection conn = getConnection("synonym"); + createTableWithSynonym(conn); + insertIntoSynonym(conn, 25); + + Statement stmnt = conn.createStatement(); + assertEquals(1, stmnt.executeUpdate("UPDATE testsynonym set id = 30 WHERE id = 25")); + + assertSynonymContains(conn, 30); + + conn.close(); + } + + private void testDropSchema() throws SQLException { + Connection conn = getConnection("synonym"); + Statement stat = conn.createStatement(); + + stat.execute("CREATE SCHEMA IF NOT EXISTS s1"); + stat.execute("CREATE TABLE IF NOT EXISTS s1.backingtable(id INT PRIMARY KEY)"); + stat.execute("CREATE OR REPLACE SYNONYM testsynonym FOR s1.backingtable"); + stat.execute("DROP SCHEMA s1 CASCADE"); + + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat).execute("SELECT id FROM testsynonym"); + conn.close(); + } + + private void testDropTable() throws SQLException { + Connection conn = getConnection("synonym"); + createTableWithSynonym(conn); + Statement stat = conn.createStatement(); + stat.execute("DROP TABLE backingtable"); + + // Backing table does not exist anymore. + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat).execute("SELECT id FROM testsynonym"); + + // Synonym should be dropped as well + ResultSet synonyms = conn.createStatement().executeQuery( + "SELECT * FROM INFORMATION_SCHEMA.SYNONYMS WHERE SYNONYM_NAME='TESTSYNONYM'"); + assertFalse(synonyms.next()); + conn.close(); + + // Reopening should work with dropped synonym + Connection conn2 = getConnection("synonym"); + assertThrows(ErrorCode.OBJECT_CLOSED, stat).execute("SELECT id FROM testsynonym"); + conn2.close(); + } + + private void testDropSynonym() throws SQLException { + Connection conn = getConnection("synonym"); + createTableWithSynonym(conn); + Statement stat = conn.createStatement(); + + stat.execute("DROP SYNONYM testsynonym"); + + // Synonym does not exist anymore. + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat).execute("SELECT id FROM testsynonym"); + + // Dropping with "if exists" should succeed even if the synonym does not exist anymore. + stat.execute("DROP SYNONYM IF EXISTS testsynonym"); + + // Without "if exists" the command should fail if the synonym does not exist. + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat).execute("DROP SYNONYM testsynonym"); + conn.close(); + } + + private void testSynonymInDifferentSchema() throws SQLException { + Connection conn = getConnection("synonym"); + Statement stat = conn.createStatement(); + + stat.execute("CREATE SCHEMA IF NOT EXISTS s1"); + stat.execute("CREATE TABLE IF NOT EXISTS s1.backingtable(id INT PRIMARY KEY)"); + stat.execute("TRUNCATE TABLE s1.backingtable"); + stat.execute("CREATE OR REPLACE SYNONYM testsynonym FOR s1.backingtable"); + stat.execute("INSERT INTO s1.backingtable VALUES(15)"); + assertSynonymContains(conn, 15); + conn.close(); + } + + private void testCreateOrReplaceExistingTable() throws SQLException { + Connection conn = getConnection("synonym"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE IF NOT EXISTS backingtable(id INT PRIMARY KEY)"); + + assertThrows(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, stat) + .execute("CREATE OR REPLACE SYNONYM backingtable FOR backingtable"); + conn.close(); + } + + private void testCreateOrReplace() throws SQLException { + // start with a fresh db so the first create or replace has to actually create the synonym. + deleteDb("synonym"); + Connection conn = getConnection("synonym"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE IF NOT EXISTS backingtable(id INT PRIMARY KEY)"); + stat.execute("CREATE TABLE IF NOT EXISTS backingtable2(id INT PRIMARY KEY)"); + stat.execute("CREATE OR REPLACE SYNONYM testsynonym FOR backingtable"); + insertIntoBackingTable(conn, 17); + + ResultSet rs = stat.executeQuery("SELECT id FROM testsynonym"); + assertTrue(rs.next()); + assertEquals(17, rs.getInt(1)); + + stat.execute("CREATE OR REPLACE SYNONYM testsynonym FOR backingtable2"); + + // Should not return a result, since backingtable2 is empty. + ResultSet rs2 = stat.executeQuery("SELECT id FROM testsynonym"); + assertFalse(rs2.next()); + conn.close(); + + deleteDb("synonym"); + } + + private void testMetaData() throws SQLException { + Connection conn = getConnection("synonym"); + createTableWithSynonym(conn); + + ResultSet tables = conn.getMetaData().getTables(null, Constants.SCHEMA_MAIN, null, + new String[]{"SYNONYM"}); + assertTrue(tables.next()); + assertEquals(tables.getString("TABLE_NAME"), "TESTSYNONYM"); + assertEquals(tables.getString("TABLE_TYPE"), "SYNONYM"); + assertFalse(tables.next()); + + ResultSet columns = conn.getMetaData().getColumns(null, Constants.SCHEMA_MAIN, "TESTSYNONYM", null); + assertTrue(columns.next()); + assertEquals(columns.getString("TABLE_NAME"), "TESTSYNONYM"); + assertEquals(columns.getString("COLUMN_NAME"), "ID"); + assertFalse(columns.next()); + + ResultSet synonyms = conn.createStatement().executeQuery("SELECT * FROM INFORMATION_SCHEMA.SYNONYMS"); + assertTrue(synonyms.next()); + assertEquals("SYNONYM", synonyms.getString("SYNONYM_CATALOG")); + assertEquals("PUBLIC", synonyms.getString("SYNONYM_SCHEMA")); + assertEquals("TESTSYNONYM", synonyms.getString("SYNONYM_NAME")); + assertEquals("BACKINGTABLE", synonyms.getString("SYNONYM_FOR")); + assertEquals("VALID", synonyms.getString("STATUS")); + assertNull(synonyms.getString("REMARKS")); + assertFalse(synonyms.next()); + conn.close(); + } + + private void testCreateForUnknownTable() throws SQLException { + Connection conn = getConnection("synonym"); + Statement stat = conn.createStatement(); + + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat) + .execute("CREATE SYNONYM someSynonym FOR nonexistingTable"); + conn.close(); + } + + private void testExistingTableName() throws SQLException { + Connection conn = getConnection("synonym"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE IF NOT EXISTS backingtable(id INT PRIMARY KEY)"); + + assertThrows(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1, stat) + .execute("CREATE SYNONYM backingtable FOR backingtable"); + conn.close(); + } + + /** + * Make sure, that the schema changes are persisted when reopening the database + */ + private void testReopenDatabase() throws SQLException { + if(!config.memory) { + deleteDb("synonym"); + Connection conn = getConnection("synonym"); + createTableWithSynonym(conn); + insertIntoBackingTable(conn, 9); + conn.close(); + Connection conn2 = getConnection("synonym"); + assertSynonymContains(conn2, 9); + conn2.close(); + } + } + + private void testTruncateSynonym() throws SQLException { + Connection conn = getConnection("synonym"); + createTableWithSynonym(conn); + + insertIntoBackingTable(conn, 7); + assertBackingTableContains(conn, 7); + + conn.createStatement().execute("TRUNCATE TABLE testsynonym"); + + assertBackingTableIsEmpty(conn); + conn.close(); + } + + private void testDeleteFromSynonym() throws SQLException { + Connection conn = getConnection("synonym"); + createTableWithSynonym(conn); + + insertIntoBackingTable(conn, 7); + assertBackingTableContains(conn, 7); + deleteFromSynonym(conn, 7); + + assertBackingTableIsEmpty(conn); + conn.close(); + } + + private static void deleteFromSynonym(Connection conn, int id) throws SQLException { + PreparedStatement prep = conn.prepareStatement( + "DELETE FROM testsynonym WHERE id = ?"); + prep.setInt(1, id); + prep.execute(); + } + + private void assertBackingTableIsEmpty(Connection conn) throws SQLException { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("SELECT id FROM backingtable"); + assertFalse(rs.next()); + } + + private void testInsertIntoSynonym() throws SQLException { + Connection conn = getConnection("synonym"); + createTableWithSynonym(conn); + + insertIntoSynonym(conn, 5); + assertBackingTableContains(conn, 5); + conn.close(); + } + + private void testInsertWithColumnNameIntoSynonym() throws SQLException { + Connection conn = getConnection("synonym"); + createTableWithSynonym(conn); + + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO testsynonym (id) VALUES(?)"); + prep.setInt(1, 55); + prep.execute(); + assertBackingTableContains(conn, 55); + conn.close(); + } + + private void assertBackingTableContains(Connection conn, int testValue) throws SQLException { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("SELECT id FROM backingtable"); + assertTrue(rs.next()); + assertEquals(testValue, rs.getInt(1)); + assertFalse(rs.next()); + } + + private void testSelectFromSynonym() throws SQLException { + deleteDb("synonym"); + Connection conn = getConnection("synonym"); + createTableWithSynonym(conn); + insertIntoBackingTable(conn, 1); + assertSynonymContains(conn, 1); + conn.close(); + } + + private void assertSynonymContains(Connection conn, int id) throws SQLException { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("SELECT id FROM testsynonym"); + assertTrue(rs.next()); + assertEquals(id, rs.getInt(1)); + assertFalse(rs.next()); + } + + private static void insertIntoSynonym(Connection conn, int id) throws SQLException { + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO testsynonym VALUES(?)"); + prep.setInt(1, id); + prep.execute(); + } + + private static void insertIntoBackingTable(Connection conn, int id) throws SQLException { + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO backingtable VALUES(?)"); + prep.setInt(1, id); + prep.execute(); + } + + private static void createTableWithSynonym(Connection conn) throws SQLException { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE IF NOT EXISTS backingtable(id INT PRIMARY KEY)"); + stat.execute("CREATE OR REPLACE SYNONYM testsynonym FOR backingtable"); + stat.execute("TRUNCATE TABLE backingtable"); + } + +} diff --git a/h2/src/test/org/h2/test/db/TestTableEngines.java b/h2/src/test/org/h2/test/db/TestTableEngines.java index 96331ae8cb..a87646f7e3 100644 --- a/h2/src/test/org/h2/test/db/TestTableEngines.java +++ b/h2/src/test/org/h2/test/db/TestTableEngines.java @@ -1,24 +1,33 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + import org.h2.api.TableEngine; import org.h2.command.ddl.CreateTableData; -import org.h2.engine.Session; -import org.h2.expression.Expression; -import org.h2.index.BaseIndex; +import org.h2.command.query.AllColumnsForPlan; +import org.h2.engine.SessionLocal; import org.h2.index.Cursor; import org.h2.index.Index; import org.h2.index.IndexType; import org.h2.index.SingleRowCursor; +import org.h2.message.DbException; import org.h2.result.Row; import org.h2.result.SearchRow; import org.h2.result.SortOrder; @@ -26,18 +35,19 @@ import org.h2.table.Table; import org.h2.table.TableBase; import org.h2.table.TableFilter; +import org.h2.table.TableType; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.value.Value; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; import org.h2.value.ValueNull; -import org.h2.value.ValueString; /** * The class for external table engines mechanism testing. * * @author Sergi Vladykin */ -public class TestTableEngines extends TestBase { +public class TestTableEngines extends TestDb { /** * Run just this test. @@ -45,32 +55,17 @@ public class TestTableEngines extends TestBase { * @param a ignored */ public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - if (config.mvcc) { - return; - } - testEarlyFilter(); + testQueryExpressionFlag(); + testSubQueryInfo(); testEngineParams(); + testSchemaEngineParams(); testSimpleQuery(); - } - - private void testEarlyFilter() throws SQLException { - deleteDb("tableEngine"); - Connection conn = getConnection("tableEngine;EARLY_FILTER=TRUE"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE t1(id int, name varchar) ENGINE \"" + - EndlessTableEngine.class.getName() + "\""); - ResultSet rs = stat.executeQuery( - "SELECT name FROM t1 where id=1 and name is not null"); - assertTrue(rs.next()); - assertEquals("((ID = 1)\n AND (NAME IS NOT NULL))", rs.getString(1)); - rs.close(); - conn.close(); - deleteDb("tableEngine"); + testMultiColumnTreeSetIndex(); } private void testEngineParams() throws SQLException { @@ -85,6 +80,13 @@ private void testEngineParams() throws SQLException { EndlessTableEngine.createTableData.tableEngineParams.get(0)); assertEquals("param2", EndlessTableEngine.createTableData.tableEngineParams.get(1)); + stat.execute("CREATE TABLE t2(id int, name varchar) WITH \"param1\", \"param2\""); + assertEquals(2, + EndlessTableEngine.createTableData.tableEngineParams.size()); + assertEquals("param1", + EndlessTableEngine.createTableData.tableEngineParams.get(0)); + assertEquals("param2", + EndlessTableEngine.createTableData.tableEngineParams.get(1)); conn.close(); if (!config.memory) { // Test serialization of table parameters @@ -98,6 +100,28 @@ private void testEngineParams() throws SQLException { EndlessTableEngine.createTableData.tableEngineParams.get(1)); conn.close(); } + // Prevent memory leak + EndlessTableEngine.createTableData = null; + deleteDb("tableEngine"); + } + + private void testSchemaEngineParams() throws SQLException { + deleteDb("tableEngine"); + Connection conn = getConnection("tableEngine"); + Statement stat = conn.createStatement(); + stat.execute("CREATE SCHEMA s1 WITH \"param1\", \"param2\""); + + stat.execute("CREATE TABLE s1.t1(id int, name varchar) ENGINE \"" + + EndlessTableEngine.class.getName() + '\"'); + assertEquals(2, + EndlessTableEngine.createTableData.tableEngineParams.size()); + assertEquals("param1", + EndlessTableEngine.createTableData.tableEngineParams.get(0)); + assertEquals("param2", + EndlessTableEngine.createTableData.tableEngineParams.get(1)); + conn.close(); + // Prevent memory leak + EndlessTableEngine.createTableData = null; deleteDb("tableEngine"); } @@ -157,6 +181,326 @@ private void testStatements(Statement stat) throws SQLException { } + private void testMultiColumnTreeSetIndex() throws SQLException { + deleteDb("tableEngine"); + Connection conn = getConnection("tableEngine"); + Statement stat = conn.createStatement(); + + stat.executeUpdate("CREATE TABLE T(A INT, B VARCHAR, C BIGINT, " + + "D BIGINT DEFAULT 0) ENGINE \"" + + TreeSetIndexTableEngine.class.getName() + "\""); + + stat.executeUpdate("CREATE INDEX IDX_C_B_A ON T(C, B, A)"); + stat.executeUpdate("CREATE INDEX IDX_B_A ON T(B, A)"); + + List> dataSet = new ArrayList<>(); + + dataSet.add(Arrays.asList(1, "1", 1L)); + dataSet.add(Arrays.asList(1, "0", 2L)); + dataSet.add(Arrays.asList(2, "0", -1L)); + dataSet.add(Arrays.asList(0, "0", 1L)); + dataSet.add(Arrays.asList(0, "1", null)); + dataSet.add(Arrays.asList(2, null, 0L)); + + PreparedStatement prep = conn.prepareStatement("INSERT INTO T(A,B,C) VALUES(?,?,?)"); + for (List row : dataSet) { + for (int i = 0; i < row.size(); i++) { + prep.setObject(i + 1, row.get(i)); + } + assertEquals(1, prep.executeUpdate()); + } + prep.close(); + + checkPlan(stat, "select max(c) from t", "direct lookup"); + checkPlan(stat, "select min(c) from t", "direct lookup"); + checkPlan(stat, "select count(*) from t", "direct lookup"); + + checkPlan(stat, "select * from t", "scan"); + + checkPlan(stat, "select * from t order by c", "IDX_C_B_A"); + checkPlan(stat, "select * from t order by c, b", "IDX_C_B_A"); + checkPlan(stat, "select * from t order by b", "IDX_B_A"); + checkPlan(stat, "select * from t order by b, a", "IDX_B_A"); + checkPlan(stat, "select * from t order by b, c", "scan"); + checkPlan(stat, "select * from t order by a, b", "scan"); + checkPlan(stat, "select * from t order by a, c, b", "scan"); + + checkPlan(stat, "select * from t where b > ''", "IDX_B_A"); + checkPlan(stat, "select * from t where a > 0 and b > ''", "IDX_B_A"); + checkPlan(stat, "select * from t where b < ''", "IDX_B_A"); + checkPlan(stat, "select * from t where b < '' and c < 1", "IDX_C_B_A"); + checkPlan(stat, "select * from t where a = 0", "scan"); + checkPlan(stat, "select * from t where a > 0 order by c, b", "IDX_C_B_A"); + checkPlan(stat, "select * from t where a = 0 and c > 0", "IDX_C_B_A"); + checkPlan(stat, "select * from t where a = 0 and b < '0'", "IDX_B_A"); + + assertEquals(6, ((Number) query(stat, "select count(*) from t").get(0).get(0)).intValue()); + + checkResultsNoOrder(stat, 6, "select * from t", "select * from t order by a"); + checkResultsNoOrder(stat, 6, "select * from t", "select * from t order by b"); + checkResultsNoOrder(stat, 6, "select * from t", "select * from t order by c"); + checkResultsNoOrder(stat, 6, "select * from t", "select * from t order by c, a"); + checkResultsNoOrder(stat, 6, "select * from t", "select * from t order by b, a"); + checkResultsNoOrder(stat, 6, "select * from t", "select * from t order by c, b, a"); + checkResultsNoOrder(stat, 6, "select * from t", "select * from t order by a, c, b"); + + checkResultsNoOrder(stat, 4, "select * from t where a > 0", + "select * from t where a > 0 order by a"); + checkResultsNoOrder(stat, 4, "select * from t where a > 0", + "select * from t where a > 0 order by b"); + checkResultsNoOrder(stat, 4, "select * from t where a > 0", + "select * from t where a > 0 order by c"); + checkResultsNoOrder(stat, 4, "select * from t where a > 0", + "select * from t where a > 0 order by c, a"); + checkResultsNoOrder(stat, 4, "select * from t where a > 0", + "select * from t where a > 0 order by b, a"); + checkResultsNoOrder(stat, 4, "select * from t where a > 0", + "select * from t where a > 0 order by c, b, a"); + checkResultsNoOrder(stat, 4, "select * from t where a > 0", + "select * from t where a > 0 order by a, c, b"); + + checkResults(6, dataSet, stat, + "select * from t order by a", null, new RowComparator(0)); + checkResults(6, dataSet, stat, + "select * from t order by a desc", null, new RowComparator(true, 0)); + checkResults(6, dataSet, stat, + "select * from t order by b, c", null, new RowComparator(1, 2)); + checkResults(6, dataSet, stat, + "select * from t order by c, a", null, new RowComparator(2, 0)); + checkResults(6, dataSet, stat, + "select * from t order by b, a", null, new RowComparator(1, 0)); + checkResults(6, dataSet, stat, + "select * from t order by c, b, a", null, new RowComparator(2, 1, 0)); + + checkResults(4, dataSet, stat, + "select * from t where a > 0", new RowFilter() { + @Override + protected boolean accept(List row) { + return getInt(row, 0) > 0; + } + }, null); + checkResults(3, dataSet, stat, "select * from t where b = '0'", new RowFilter() { + @Override + protected boolean accept(List row) { + return "0".equals(getString(row, 1)); + } + }, null); + checkResults(5, dataSet, stat, "select * from t where b >= '0'", new RowFilter() { + @Override + protected boolean accept(List row) { + String b = getString(row, 1); + return b != null && b.compareTo("0") >= 0; + } + }, null); + checkResults(2, dataSet, stat, "select * from t where b > '0'", new RowFilter() { + @Override + protected boolean accept(List row) { + String b = getString(row, 1); + return b != null && b.compareTo("0") > 0; + } + }, null); + checkResults(1, dataSet, stat, "select * from t where b > '0' and c > 0", new RowFilter() { + @Override + protected boolean accept(List row) { + String b = getString(row, 1); + Long c = getLong(row, 2); + return b != null && b.compareTo("0") > 0 && c != null && c > 0; + } + }, null); + checkResults(1, dataSet, stat, "select * from t where b > '0' and c < 2", new RowFilter() { + @Override + protected boolean accept(List row) { + String b = getString(row, 1); + Long c = getLong(row, 2); + return b != null && b.compareTo("0") > 0 && c != null && c < 2; + } + }, null); + checkResults(2, dataSet, stat, "select * from t where b > '0' and a < 2", new RowFilter() { + @Override + protected boolean accept(List row) { + Integer a = getInt(row, 0); + String b = getString(row, 1); + return b != null && b.compareTo("0") > 0 && a != null && a < 2; + } + }, null); + checkResults(1, dataSet, stat, "select * from t where b > '0' and a > 0", new RowFilter() { + @Override + protected boolean accept(List row) { + Integer a = getInt(row, 0); + String b = getString(row, 1); + return b != null && b.compareTo("0") > 0 && a != null && a > 0; + } + }, null); + checkResults(2, dataSet, stat, "select * from t where b = '0' and a > 0", new RowFilter() { + @Override + protected boolean accept(List row) { + Integer a = getInt(row, 0); + String b = getString(row, 1); + return "0".equals(b) && a != null && a > 0; + } + }, null); + checkResults(2, dataSet, stat, "select * from t where b = '0' and a < 2", new RowFilter() { + @Override + protected boolean accept(List row) { + Integer a = getInt(row, 0); + String b = getString(row, 1); + return "0".equals(b) && a != null && a < 2; + } + }, null); + conn.close(); + deleteDb("tableEngine"); + } + + private void testQueryExpressionFlag() throws SQLException { + deleteDb("testQueryExpressionFlag"); + Connection conn = getConnection("testQueryExpressionFlag"); + Statement stat = conn.createStatement(); + stat.execute("create table QUERY_EXPR_TEST(id int) ENGINE \"" + + TreeSetIndexTableEngine.class.getName() + "\""); + stat.execute("create table QUERY_EXPR_TEST_NO(id int) ENGINE \"" + + TreeSetIndexTableEngine.class.getName() + "\""); + stat.executeQuery("select 1 + (select 1 from QUERY_EXPR_TEST)").next(); + stat.executeQuery("select 1 from QUERY_EXPR_TEST_NO where id in " + + "(select id from QUERY_EXPR_TEST)"); + stat.executeQuery("select 1 from QUERY_EXPR_TEST_NO n " + + "where exists(select 1 from QUERY_EXPR_TEST y where y.id = n.id)"); + conn.close(); + deleteDb("testQueryExpressionFlag"); + } + + private void testSubQueryInfo() throws SQLException { + deleteDb("testSubQueryInfo"); + Connection conn = getConnection("testSubQueryInfo"); + Statement stat = conn.createStatement(); + stat.execute("create table SUB_QUERY_TEST(id int primary key, name varchar) ENGINE \"" + + TreeSetIndexTableEngine.class.getName() + "\""); + // test sub-queries + stat.executeQuery("select * from " + + "(select t2.id from " + + "(select t3.id from sub_query_test t3 where t3.name = '') t4, " + + "sub_query_test t2 " + + "where t2.id = t4.id) t5").next(); + // test view 1 + stat.execute("create view t4 as (select t3.id from sub_query_test t3 where t3.name = '')"); + stat.executeQuery("select * from " + + "(select t2.id from t4, sub_query_test t2 where t2.id = t4.id) t5").next(); + // test view 2 + stat.execute("create view t5 as " + + "(select t2.id from t4, sub_query_test t2 where t2.id = t4.id)"); + stat.executeQuery("select * from t5").next(); + // test select expressions + stat.execute("create table EXPR_TEST(id int) ENGINE \"" + + TreeSetIndexTableEngine.class.getName() + "\""); + stat.executeQuery("select * from (select (select id from EXPR_TEST x limit 1) a " + + "from dual where 1 = (select id from EXPR_TEST y limit 1)) z").next(); + // test select expressions 2 + stat.execute("create table EXPR_TEST2(id int) ENGINE \"" + + TreeSetIndexTableEngine.class.getName() + "\""); + stat.executeQuery("select * from (select (select 1 from " + + "(select (select 2 from EXPR_TEST) from EXPR_TEST2) ZZ) from dual)").next(); + // test select expression plan + stat.execute("create table test_plan(id int primary key, name varchar)"); + stat.execute("create index MY_NAME_INDEX on test_plan(name)"); + checkPlan(stat, "select * from (select (select id from test_plan " + + "where name = 'z') from dual)", + "MY_NAME_INDEX"); + conn.close(); + deleteDb("testSubQueryInfo"); + } + + /** + * A static assertion method. + * + * @param condition the condition + * @param message the error message + */ + static void assert0(boolean condition, String message) { + if (!condition) { + throw new AssertionError(message); + } + } + + private void checkResultsNoOrder(Statement stat, int size, String query1, String query2) + throws SQLException { + List> res1 = query(stat, query1); + List> res2 = query(stat, query2); + if (size != res1.size() || size != res2.size()) { + fail("Wrong size: \n" + res1 + "\n" + res2); + } + if (size == 0) { + return; + } + int[] cols = new int[res1.get(0).size()]; + for (int i = 0; i < cols.length; i++) { + cols[i] = i; + } + Comparator> comp = new RowComparator(cols); + res1.sort(comp); + res2.sort(comp); + assertTrue("Wrong data: \n" + res1 + "\n" + res2, res1.equals(res2)); + } + + private void checkResults(int size, List> dataSet, + Statement stat, String query, RowFilter filter, RowComparator sort) + throws SQLException { + List> res1 = query(stat, query); + List> res2 = query(dataSet, filter, sort); + + assertTrue("Wrong size: " + size + " \n" + res1 + "\n" + res2, + res1.size() == size && res2.size() == size); + assertTrue(filter != null || sort != null); + + for (int i = 0; i < res1.size(); i++) { + List row1 = res1.get(i); + List row2 = res2.get(i); + + assertTrue("Filter failed on row " + i + " of \n" + res1 + "\n" + res2, + filter == null || filter.accept(row1)); + assertTrue("Sort failed on row " + i + " of \n" + res1 + "\n" + res2, + sort == null || sort.compare(row1, row2) == 0); + } + } + + private static List> query(List> dataSet, + RowFilter filter, RowComparator sort) { + List> res = new ArrayList<>(); + if (filter == null) { + res.addAll(dataSet); + } else { + for (List row : dataSet) { + if (filter.accept(row)) { + res.add(row); + } + } + } + if (sort != null) { + res.sort(sort); + } + return res; + } + + private static List> query(Statement stat, String query) throws SQLException { + ResultSet rs = stat.executeQuery(query); + int cols = rs.getMetaData().getColumnCount(); + List> list = new ArrayList<>(); + while (rs.next()) { + List row = new ArrayList<>(cols); + for (int i = 1; i <= cols; i++) { + row.add(rs.getObject(i)); + } + list.add(row); + } + rs.close(); + return list; + } + + private void checkPlan(Statement stat, String query, String index) + throws SQLException { + String plan = query(stat, "EXPLAIN " + query).get(0).get(0).toString(); + assertTrue("Index '" + index + "' is not used in query plan: " + plan, + plan.contains(index)); + } + /** * A test table factory. */ @@ -170,16 +514,16 @@ private static class OneRowTable extends TableBase { /** * A scan index for one row. */ - public class Scan extends BaseIndex { + public class Scan extends Index { Scan(Table table) { - initBaseIndex(table, table.getId(), table.getName() + "_SCAN", - IndexColumn.wrap(table.getColumns()), IndexType.createScan(false)); + super(table, table.getId(), table.getName() + "_SCAN", + IndexColumn.wrap(table.getColumns()), 0, IndexType.createScan(false)); } @Override - public long getRowCountApproximation() { - return table.getRowCountApproximation(); + public long getRowCountApproximation(SessionLocal session) { + return table.getRowCountApproximation(session); } @Override @@ -188,27 +532,22 @@ public long getDiskSpaceUsed() { } @Override - public long getRowCount(Session session) { + public long getRowCount(SessionLocal session) { return table.getRowCount(session); } @Override - public void checkRename() { - // do nothing - } - - @Override - public void truncate(Session session) { + public void truncate(SessionLocal session) { // do nothing } @Override - public void remove(Session session) { + public void remove(SessionLocal session) { // do nothing } @Override - public void remove(Session session, Row r) { + public void remove(SessionLocal session, Row r) { // do nothing } @@ -218,23 +557,24 @@ public boolean needRebuild() { } @Override - public double getCost(Session session, int[] masks, - TableFilter filter, SortOrder sortOrder) { + public double getCost(SessionLocal session, int[] masks, + TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { return 0; } @Override - public Cursor findFirstOrLast(Session session, boolean first) { + public Cursor findFirstOrLast(SessionLocal session, boolean first) { return new SingleRowCursor(row); } @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { return new SingleRowCursor(row); } @Override - public void close(Session session) { + public void close(SessionLocal session) { // do nothing } @@ -244,7 +584,7 @@ public boolean canGetFirstOrLast() { } @Override - public void add(Session session, Row r) { + public void add(SessionLocal session, Row r) { // do nothing } } @@ -259,14 +599,13 @@ public void add(Session session, Row r) { } @Override - public Index addIndex(Session session, String indexName, - int indexId, IndexColumn[] cols, IndexType indexType, - boolean create, String indexComment) { + public Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment) { return null; } @Override - public void addRow(Session session, Row r) { + public void addRow(SessionLocal session, Row r) { this.row = r; } @@ -276,7 +615,7 @@ public boolean canDrop() { } @Override - public boolean canGetRowCount() { + public boolean canGetRowCount(SessionLocal session) { return true; } @@ -286,7 +625,7 @@ public void checkSupportAlter() { } @Override - public void close(Session session) { + public void close(SessionLocal session) { // do nothing } @@ -301,33 +640,23 @@ public long getMaxDataModificationId() { } @Override - public long getRowCount(Session session) { - return getRowCountApproximation(); + public long getRowCount(SessionLocal session) { + return getRowCountApproximation(session); } @Override - public long getRowCountApproximation() { + public long getRowCountApproximation(SessionLocal session) { return row == null ? 0 : 1; } @Override - public long getDiskSpaceUsed() { - return 0; - } - - @Override - public Index getScanIndex(Session session) { + public Index getScanIndex(SessionLocal session) { return scanIndex; } @Override - public String getTableType() { - return EXTERNAL_TABLE_ENGINE; - } - - @Override - public Index getUniqueIndex() { - return null; + public TableType getTableType() { + return TableType.EXTERNAL_TABLE_ENGINE; } @Override @@ -336,34 +665,15 @@ public boolean isDeterministic() { } @Override - public boolean isLockedExclusively() { - return false; - } - - @Override - public boolean lock(Session session, boolean exclusive, boolean force) { - // do nothing - return false; - } - - @Override - public void removeRow(Session session, Row r) { + public void removeRow(SessionLocal session, Row r) { this.row = null; } @Override - public void truncate(Session session) { + public long truncate(SessionLocal session) { + long result = row != null ? 1L : 0L; row = null; - } - - @Override - public void unlock(Session s) { - // do nothing - } - - @Override - public void checkRename() { - // do nothing + return result; } } @@ -395,7 +705,7 @@ private static class EndlessTable extends OneRowTableEngine.OneRowTable { EndlessTable(CreateTableData data) { super(data); - row = new Row(new Value[] { ValueInt.get(1), ValueNull.INSTANCE }, 0); + row = Row.get(new Value[] { ValueInteger.get(1), ValueNull.INSTANCE }, 0); scanIndex = new Auto(this); } @@ -409,25 +719,7 @@ public class Auto extends OneRowTableEngine.OneRowTable.Scan { } @Override - public Cursor find(TableFilter filter, SearchRow first, SearchRow last) { - return find(filter.getFilterCondition()); - } - - @Override - public Cursor find(Session session, SearchRow first, SearchRow last) { - return find(null); - } - - /** - * Search within the table. - * - * @param filter the table filter (optional) - * @return the cursor - */ - private Cursor find(Expression filter) { - if (filter != null) { - row.setValue(1, ValueString.get(filter.getSQL())); - } + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { return new SingleRowCursor(row); } @@ -449,4 +741,412 @@ public EndlessTable createTable(CreateTableData data) { } + /** + * A table engine that internally uses a tree set. + */ + public static class TreeSetIndexTableEngine implements TableEngine { + + static TreeSetTable created; + + @Override + public Table createTable(CreateTableData data) { + return created = new TreeSetTable(data); + } + } + + /** + * A table that internally uses a tree set. + */ + private static class TreeSetTable extends TableBase { + int dataModificationId; + + ArrayList indexes; + + TreeSetIndex scan = new TreeSetIndex(this, "scan", + IndexColumn.wrap(getColumns()), IndexType.createScan(false)) { + @Override + public double getCost(SessionLocal session, int[] masks, + TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { + return getCostRangeIndex(masks, getRowCount(session), filters, + filter, sortOrder, true, allColumnsSet); + } + }; + + TreeSetTable(CreateTableData data) { + super(data); + } + + @Override + public long truncate(SessionLocal session) { + long result = getRowCountApproximation(session); + if (indexes != null) { + for (Index index : indexes) { + index.truncate(session); + } + } else { + scan.truncate(session); + } + dataModificationId++; + return result; + } + + @Override + public void removeRow(SessionLocal session, Row row) { + if (indexes != null) { + for (Index index : indexes) { + index.remove(session, row); + } + } else { + scan.remove(session, row); + } + dataModificationId++; + } + + @Override + public void addRow(SessionLocal session, Row row) { + if (indexes != null) { + for (Index index : indexes) { + index.add(session, row); + } + } else { + scan.add(session, row); + } + dataModificationId++; + } + + @Override + public Index addIndex(SessionLocal session, String indexName, int indexId, IndexColumn[] cols, + int uniqueColumnCount, IndexType indexType, boolean create, String indexComment) { + if (indexes == null) { + indexes = new ArrayList<>(2); + // Scan must be always at 0. + indexes.add(scan); + } + Index index = new TreeSetIndex(this, indexName, cols, indexType); + for (SearchRow row : scan.set) { + index.add(session, (Row) row); + } + indexes.add(index); + dataModificationId++; + setModified(); + return index; + } + + @Override + public boolean isDeterministic() { + return false; + } + + @Override + public TableType getTableType() { + return TableType.EXTERNAL_TABLE_ENGINE; + } + + @Override + public Index getScanIndex(SessionLocal session) { + return scan; + } + + @Override + public long getRowCountApproximation(SessionLocal session) { + return getScanIndex(null).getRowCountApproximation(session); + } + + @Override + public long getRowCount(SessionLocal session) { + return scan.getRowCount(session); + } + + @Override + public long getMaxDataModificationId() { + return dataModificationId; + } + + @Override + public ArrayList getIndexes() { + return indexes; + } + + @Override + public void close(SessionLocal session) { + // No-op. + } + + @Override + public void checkSupportAlter() { + // No-op. + } + + @Override + public boolean canGetRowCount(SessionLocal session) { + return true; + } + + @Override + public boolean canDrop() { + return true; + } + } + + /** + * An index that internally uses a tree set. + */ + private static class TreeSetIndex extends Index implements Comparator { + + final TreeSet set = new TreeSet<>(this); + + TreeSetIndex(Table t, String name, IndexColumn[] cols, IndexType type) { + super(t, 0, name, cols, 0, type); + } + + @Override + public int compare(SearchRow o1, SearchRow o2) { + int res = compareRows(o1, o2); + if (res == 0) { + if (o1.getKey() == Long.MAX_VALUE || o2.getKey() == Long.MIN_VALUE) { + res = 1; + } else if (o1.getKey() == Long.MIN_VALUE || o2.getKey() == Long.MAX_VALUE) { + res = -1; + } + } + return res; + } + + @Override + public void close(SessionLocal session) { + // No-op. + } + + @Override + public void add(SessionLocal session, Row row) { + set.add(row); + } + + @Override + public void remove(SessionLocal session, Row row) { + set.remove(row); + } + + private static SearchRow mark(SearchRow row, boolean first) { + if (row != null) { + // Mark this row to be a search row. + row.setKey(first ? Long.MIN_VALUE : Long.MAX_VALUE); + } + return row; + } + + @Override + public Cursor find(SessionLocal session, SearchRow first, SearchRow last) { + Set subSet; + if (first != null && last != null && compareRows(last, first) < 0) { + subSet = Collections.emptySet(); + } else { + if (first != null) { + first = set.floor(mark(first, true)); + } + if (last != null) { + last = set.ceiling(mark(last, false)); + } + if (first == null && last == null) { + subSet = set; + } else if (first != null) { + if (last != null) { + subSet = set.subSet(first, true, last, true); + } else { + subSet = set.tailSet(first, true); + } + } else if (last != null) { + subSet = set.headSet(last, true); + } else { + throw new IllegalStateException(); + } + } + return new IteratorCursor(subSet.iterator()); + } + + @Override + public double getCost(SessionLocal session, int[] masks, + TableFilter[] filters, int filter, SortOrder sortOrder, + AllColumnsForPlan allColumnsSet) { + return getCostRangeIndex(masks, set.size(), filters, filter, + sortOrder, false, allColumnsSet); + } + + @Override + public void remove(SessionLocal session) { + // No-op. + } + + @Override + public void truncate(SessionLocal session) { + set.clear(); + } + + @Override + public boolean canGetFirstOrLast() { + return true; + } + + @Override + public Cursor findFirstOrLast(SessionLocal session, boolean first) { + return new SingleRowCursor((Row) + (set.isEmpty() ? null : first ? set.first() : set.last())); + } + + @Override + public boolean needRebuild() { + return true; + } + + @Override + public long getRowCount(SessionLocal session) { + return set.size(); + } + + @Override + public long getRowCountApproximation(SessionLocal session) { + return getRowCount(null); + } + + } + + /** + */ + private static class IteratorCursor implements Cursor { + Iterator it; + private Row current; + + IteratorCursor(Iterator it) { + this.it = it; + } + + @Override + public boolean previous() { + throw DbException.getUnsupportedException("prev"); + } + + @Override + public boolean next() { + if (it.hasNext()) { + current = (Row) it.next(); + return true; + } + current = null; + return false; + } + + @Override + public SearchRow getSearchRow() { + return get(); + } + + @Override + public Row get() { + return current; + } + + @Override + public String toString() { + return "IteratorCursor->" + current; + } + } + + /** + * A comparator for rows (lists of comparable objects). + */ + private static class RowComparator implements Comparator> { + private int[] cols; + private boolean descending; + + RowComparator(int... cols) { + this.descending = false; + this.cols = cols; + } + + RowComparator(boolean descending, int... cols) { + this.descending = descending; + this.cols = cols; + } + + @SuppressWarnings("unchecked") + @Override + public int compare(List row1, List row2) { + for (int i = 0; i < cols.length; i++) { + int col = cols[i]; + Comparable o1 = (Comparable) row1.get(col); + Comparable o2 = (Comparable) row2.get(col); + if (o1 == null) { + return applyDescending(o2 == null ? 0 : -1); + } + if (o2 == null) { + return applyDescending(1); + } + int res = o1.compareTo(o2); + if (res != 0) { + return applyDescending(res); + } + } + return 0; + } + + private int applyDescending(int v) { + if (!descending) { + return v; + } + if (v == 0) { + return v; + } + return -v; + } + } + + /** + * A filter for rows (lists of objects). + */ + abstract static class RowFilter { + + /** + * Check whether the row needs to be processed. + * + * @param row the row + * @return true if yes + */ + protected abstract boolean accept(List row); + + /** + * Get an integer from a row. + * + * @param row the row + * @param col the column index + * @return the value + */ + protected Integer getInt(List row, int col) { + return (Integer) row.get(col); + } + + /** + * Get a long from a row. + * + * @param row the row + * @param col the column index + * @return the value + */ + protected Long getLong(List row, int col) { + return (Long) row.get(col); + } + + /** + * Get a string from a row. + * + * @param row the row + * @param col the column index + * @return the value + */ + protected String getString(List row, int col) { + return (String) row.get(col); + } + + } + } diff --git a/h2/src/test/org/h2/test/db/TestTempTables.java b/h2/src/test/org/h2/test/db/TestTempTables.java index 2d886bc67a..416c7ae4ed 100644 --- a/h2/src/test/org/h2/test/db/TestTempTables.java +++ b/h2/src/test/org/h2/test/db/TestTempTables.java @@ -1,24 +1,26 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; - import org.h2.api.ErrorCode; -import org.h2.engine.Constants; -import org.h2.store.fs.FileUtils; +import org.h2.engine.Session; +import org.h2.engine.SessionLocal; +import org.h2.jdbc.JdbcConnection; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Temporary table tests. */ -public class TestTempTables extends TestBase { +public class TestTempTables extends TestDb { /** * Run just this test. @@ -26,17 +28,17 @@ public class TestTempTables extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws SQLException { deleteDb("tempTables"); + testAnalyzeReuseObjectId(); testTempSequence(); testTempFileResultSet(); testTempTableResultSet(); testTransactionalTemp(); - testDeleteGlobalTempTableWhenClosing(); Connection c1 = getConnection("tempTables"); testAlter(c1); Connection c2 = getConnection("tempTables"); @@ -46,25 +48,49 @@ public void test() throws SQLException { c1.close(); c2.close(); testLotsOfTables(); + testCreateAsSelectDistinct(); deleteDb("tempTables"); } + private void testAnalyzeReuseObjectId() throws SQLException { + deleteDb("tempTables"); + Connection conn = getConnection("tempTables"); + Statement stat = conn.createStatement(); + stat.execute("create local temporary table test(id identity)"); + PreparedStatement prep = conn + .prepareStatement("insert into test default values"); + for (int i = 0; i < 10000; i++) { + prep.execute(); + } + stat.execute("create local temporary table " + + "test2(id identity) as select x from system_range(1, 10)"); + conn.close(); + } + private void testTempSequence() throws SQLException { deleteDb("tempTables"); Connection conn = getConnection("tempTables"); Statement stat = conn.createStatement(); stat.execute("create local temporary table test(id identity)"); - stat.execute("insert into test values(null)"); + Session iface = ((JdbcConnection) conn).getSession(); + if ((iface instanceof SessionLocal)) { + assertEquals(1, ((SessionLocal) iface).getDatabase().getMainSchema().getAllSequences().size()); + } + stat.execute("insert into test default values"); stat.execute("shutdown"); conn.close(); conn = getConnection("tempTables"); - ResultSet rs = conn.createStatement().executeQuery( - "select * from information_schema.sequences"); - assertFalse(rs.next()); + iface = ((JdbcConnection) conn).getSession(); + if ((iface instanceof SessionLocal)) { + assertEquals(0, ((SessionLocal) iface).getDatabase().getMainSchema().getAllSequences().size()); + } conn.close(); } private void testTempFileResultSet() throws SQLException { + if (config.lazy) { + return; + } deleteDb("tempTables"); Connection conn = getConnection("tempTables;MAX_MEMORY_ROWS=10"); ResultSet rs1, rs2; @@ -75,10 +101,10 @@ private void testTempFileResultSet() throws SQLException { rs1 = stat1.executeQuery("select * from system_range(1, 20)"); rs2 = stat2.executeQuery("select * from system_range(1, 20)"); for (int i = 0; i < 20; i++) { - rs1.next(); - rs2.next(); - rs1.getInt(1); - rs2.getInt(1); + assertTrue(rs1.next()); + assertTrue(rs2.next()); + assertEquals(i + 1, rs1.getInt(1)); + assertEquals(i + 1, rs2.getInt(1)); } rs2.close(); // verify the temp table is not deleted yet @@ -169,7 +195,7 @@ private void testTransactionalTemp() throws SQLException { stat.execute("commit"); stat.execute("insert into test values(2)"); stat.execute("create local temporary table temp(" + - "id int primary key, name varchar, constraint x index(name)) transactional"); + "id int primary key, name varchar, constraint x unique(name)) transactional"); stat.execute("insert into temp values(3, 'test')"); stat.execute("rollback"); rs = stat.executeQuery("select * from test"); @@ -180,34 +206,6 @@ private void testTransactionalTemp() throws SQLException { conn.close(); } - private void testDeleteGlobalTempTableWhenClosing() throws SQLException { - if (config.memory) { - return; - } - if (config.mvStore) { - return; - } - deleteDb("tempTables"); - Connection conn = getConnection("tempTables"); - Statement stat = conn.createStatement(); - stat.execute("create global temporary table test(id int, data varchar)"); - stat.execute("insert into test " + - "select x, space(1000) from system_range(1, 1000)"); - stat.execute("shutdown compact"); - try { - conn.close(); - } catch (SQLException e) { - // expected - } - String dbName = getBaseDir() + "/tempTables" + Constants.SUFFIX_PAGE_FILE; - long before = FileUtils.size(dbName); - assertTrue(before > 0); - conn = getConnection("tempTables"); - conn.close(); - long after = FileUtils.size(dbName); - assertEquals(after, before); - } - private void testAlter(Connection conn) throws SQLException { Statement stat; stat = conn.createStatement(); @@ -290,7 +288,7 @@ private void testTables(Connection c1, Connection c2) throws SQLException { assertResultRowCount(1, rs); c1.commit(); // test_temp should have been dropped automatically - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, s1). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, s1). executeQuery("select * from test_temp"); } @@ -299,6 +297,9 @@ private void testTables(Connection c1, Connection c2) throws SQLException { * transaction table in the MVStore */ private void testLotsOfTables() throws SQLException { + if (config.networked || config.throttle > 0) { + return; // just to save some testing time + } deleteDb("tempTables"); Connection conn = getConnection("tempTables"); Statement stat = conn.createStatement(); @@ -308,4 +309,24 @@ private void testLotsOfTables() throws SQLException { } conn.close(); } + + /** + * Issue #401: NPE in "SELECT DISTINCT * ORDER BY" + */ + private void testCreateAsSelectDistinct() throws SQLException { + deleteDb("tempTables"); + Connection conn = getConnection("tempTables;MAX_MEMORY_ROWS=1000"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE ONE(S1 VARCHAR(255), S2 VARCHAR(255))"); + PreparedStatement prep = conn + .prepareStatement("insert into one values(?,?)"); + for (int row = 0; row < 10000; row++) { + prep.setString(1, "abc"); + prep.setString(2, "def" + row); + prep.execute(); + } + stat.execute( + "CREATE TABLE TWO AS SELECT DISTINCT * FROM ONE ORDER BY S1"); + conn.close(); + } } diff --git a/h2/src/test/org/h2/test/db/TestTransaction.java b/h2/src/test/org/h2/test/db/TestTransaction.java index da30ca9fa7..22b8b9c014 100644 --- a/h2/src/test/org/h2/test/db/TestTransaction.java +++ b/h2/src/test/org/h2/test/db/TestTransaction.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -14,16 +14,16 @@ import java.sql.Statement; import java.util.ArrayList; import java.util.Random; - import org.h2.api.ErrorCode; +import org.h2.engine.Constants; import org.h2.test.TestBase; -import org.h2.util.New; +import org.h2.test.TestDb; /** * Transactional tests, including transaction isolation tests, and tests related * to savepoints. */ -public class TestTransaction extends TestBase { +public class TestTransaction extends TestDb { /** * Run just this test. @@ -31,22 +31,35 @@ public class TestTransaction extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase init = TestBase.createCaller().init(); + init.testFromMain(); } @Override - public void test() throws SQLException { + public void test() throws Exception { + testClosingConnectionWithSessionTempTable(); + testClosingConnectionWithLockedTable(); testConstraintCreationRollback(); testCommitOnAutoCommitChange(); testConcurrentSelectForUpdate(); - testLogMode(); testRollback(); testRollback2(); testForUpdate(); + testForUpdate2(); + testForUpdate3(); + testUpdate(); + testMergeUsing(); + testDelete(); testSetTransaction(); testReferential(); testSavepoint(); testIsolation(); + testIsolationLevels(); + testIsolationLevels2(); + testIsolationLevels3(); + testIsolationLevels4(); + testIsolationLevelsCountAggregate(); + testIsolationLevelsCountAggregate2(); deleteDb("transaction"); } @@ -54,16 +67,11 @@ private void testConstraintCreationRollback() throws SQLException { deleteDb("transaction"); Connection conn = getConnection("transaction"); Statement stat = conn.createStatement(); - stat.execute("create table test(id int, p int)"); - stat.execute("insert into test values(1, 2)"); - try { - stat.execute("alter table test add constraint fail " + - "foreign key(p) references test(id)"); - fail(); - } catch (SQLException e) { - // expected - } + stat.execute("create table test(id int unique, p int)"); stat.execute("insert into test values(1, 2)"); + assertThrows(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, stat).execute( + "alter table test add constraint fail foreign key(p) references test(id)"); + stat.execute("insert into test values(2, 3)"); stat.execute("drop table test"); conn.close(); } @@ -83,15 +91,9 @@ private void testCommitOnAutoCommitChange() throws SQLException { // should have no effect conn.setAutoCommit(false); - ResultSet rs; - if (config.mvcc) { - rs = stat2.executeQuery("select count(*) from test"); - rs.next(); - assertEquals(0, rs.getInt(1)); - } else { - assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat2). - executeQuery("select count(*) from test"); - } + ResultSet rs = stat2.executeQuery("select count(*) from test"); + rs.next(); + assertEquals(0, rs.getInt(1)); // should commit conn.setAutoCommit(true); @@ -105,68 +107,44 @@ private void testCommitOnAutoCommitChange() throws SQLException { conn.close(); } - private void testLogMode() throws SQLException { - if (config.memory) { - return; - } - if (config.mvStore) { - return; - } - deleteDb("transaction"); - testLogMode(0); - testLogMode(1); - testLogMode(2); - } - - private void testLogMode(int logMode) throws SQLException { - Connection conn; - Statement stat; - ResultSet rs; - conn = getConnection("transaction"); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key) as select 1"); - stat.execute("set write_delay 0"); - stat.execute("set log " + logMode); - rs = stat.executeQuery( - "select value from information_schema.settings where name = 'LOG'"); - rs.next(); - assertEquals(logMode, rs.getInt(1)); - stat.execute("insert into test values(2)"); - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (SQLException e) { - // expected - } - conn = getConnection("transaction"); - stat = conn.createStatement(); - rs = stat.executeQuery("select * from test order by id"); - assertTrue(rs.next()); - if (logMode != 0) { - assertTrue(rs.next()); - } - assertFalse(rs.next()); - stat.execute("drop table test"); - conn.close(); - } - private void testConcurrentSelectForUpdate() throws SQLException { deleteDb("transaction"); Connection conn = getConnection("transaction"); conn.setAutoCommit(false); Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, name varchar)"); + stat.execute("create table test2(id int primary key, name varchar)"); stat.execute("insert into test values(1, 'Hello'), (2, 'World')"); + stat.execute("insert into test2 values(1, 'A'), (2, 'B')"); conn.commit(); - PreparedStatement prep = conn.prepareStatement( - "select * from test for update"); + testConcurrentSelectForUpdateImpl(conn, "*"); + testConcurrentSelectForUpdateImpl(conn, "*, count(*) over ()"); + conn.close(); + } + + private void testConcurrentSelectForUpdateImpl(Connection conn, String expressions) throws SQLException { + Connection conn2; + PreparedStatement prep; + prep = conn.prepareStatement("select * from test for update"); prep.execute(); - Connection conn2 = getConnection("transaction"); + conn2 = getConnection("transaction"); + conn2.setAutoCommit(false); + assertThrows(ErrorCode.LOCK_TIMEOUT_1, conn2.createStatement()). + execute("select " + expressions + " from test for update"); + conn2.close(); + conn.commit(); + + prep = conn.prepareStatement("select " + expressions + + " from test join test2 on test.id = test2.id for update"); + prep.execute(); + conn2 = getConnection("transaction"); conn2.setAutoCommit(false); assertThrows(ErrorCode.LOCK_TIMEOUT_1, conn2.createStatement()). execute("select * from test for update"); + assertThrows(ErrorCode.LOCK_TIMEOUT_1, conn2.createStatement()). + execute("select * from test2 for update"); conn2.close(); - conn.close(); + conn.commit(); } private void testForUpdate() throws SQLException { @@ -186,15 +164,311 @@ private void testForUpdate() throws SQLException { Connection conn2 = getConnection("transaction"); conn2.setAutoCommit(false); Statement stat2 = conn2.createStatement(); - if (config.mvcc) { - stat2.execute("update test set name = 'Welt' where id = 2"); - } + stat2.execute("update test set name = 'Welt' where id = 2"); assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat2). execute("update test set name = 'Hallo' where id = 1"); conn2.close(); conn.close(); } + private void testForUpdate2() throws Exception { + // Exclude some configurations to avoid spending too much time in sleep() + if (config.networked || config.cipher != null) { + return; + } + deleteDb("transaction"); + Connection conn1 = getConnection("transaction"); + Connection conn2 = getConnection("transaction"); + Statement stat1 = conn1.createStatement(); + stat1.execute("CREATE TABLE TEST (ID INT PRIMARY KEY, V INT)"); + conn1.setAutoCommit(false); + conn2.createStatement().execute("SET LOCK_TIMEOUT 2000"); + testForUpdate2(conn1, stat1, conn2, false); + testForUpdate2(conn1, stat1, conn2, true); + conn1.close(); + conn2.close(); + } + + private void testForUpdate2(Connection conn1, Statement stat1, Connection conn2, boolean forUpdate) + throws Exception { + testForUpdate2(conn1, stat1, conn2, forUpdate, false); + testForUpdate2(conn1, stat1, conn2, forUpdate, true); + } + + private void testForUpdate2(Connection conn1, Statement stat1, Connection conn2, boolean forUpdate, + boolean window) throws Exception { + testForUpdate2(conn1, stat1, conn2, forUpdate, window, false, false); + testForUpdate2(conn1, stat1, conn2, forUpdate, window, false, true); + testForUpdate2(conn1, stat1, conn2, forUpdate, window, true, false); + } + + private void testForUpdate2(Connection conn1, Statement stat1, final Connection conn2, boolean forUpdate, + boolean window, boolean deleted, boolean excluded) throws Exception { + stat1.execute("MERGE INTO TEST KEY(ID) VALUES (1, 1)"); + conn1.commit(); + stat1.execute(deleted ? "DELETE FROM TEST WHERE ID = 1" : "UPDATE TEST SET V = 2 WHERE ID = 1"); + final int[] res = new int[1]; + final Exception[] ex = new Exception[1]; + StringBuilder builder = new StringBuilder("SELECT V"); + if (window) { + builder.append(", RANK() OVER (ORDER BY ID)"); + } + builder.append(" FROM TEST WHERE ID = 1"); + if (excluded) { + builder.append(" AND V = 1"); + } + if (forUpdate) { + builder.append(" FOR UPDATE"); + } + String query = builder.toString(); + final PreparedStatement prep2 = conn2.prepareStatement(query); + Thread t = new Thread() { + @Override + public void run() { + try { + ResultSet resultSet = prep2.executeQuery(); + res[0] = resultSet.next() ? resultSet.getInt(1) : -1; + conn2.commit(); + } catch (SQLException e) { + ex[0] = e; + } + } + }; + t.start(); + Thread.sleep(500); + conn1.commit(); + t.join(); + if (ex[0] != null) { + throw ex[0]; + } + assertEquals(forUpdate ? (deleted || excluded) ? -1 : 2 : 1, res[0]); + } + + private void testForUpdate3() throws Exception { + // Exclude some configurations to avoid spending too much time in sleep() + if (config.networked || config.cipher != null) { + return; + } + deleteDb("transaction"); + Connection conn1 = getConnection("transaction"); + final Connection conn2 = getConnection("transaction"); + Statement stat1 = conn1.createStatement(); + stat1.execute("CREATE TABLE TEST (ID INT PRIMARY KEY, V INT UNIQUE)"); + conn1.setAutoCommit(false); + conn2.createStatement().execute("SET LOCK_TIMEOUT 2000"); + stat1.execute("MERGE INTO TEST KEY(ID) VALUES (1, 1), (2, 2), (3, 3), (4, 4)"); + conn1.commit(); + stat1.execute("UPDATE TEST SET V = 10 - V"); + final Exception[] ex = new Exception[1]; + StringBuilder builder = new StringBuilder("SELECT V FROM TEST ORDER BY V FOR UPDATE"); + String query = builder.toString(); + final PreparedStatement prep2 = conn2.prepareStatement(query); + Thread t = new Thread() { + @Override + public void run() { + try { + ResultSet resultSet = prep2.executeQuery(); + int previous = -1; + while (resultSet.next()) { + int value = resultSet.getInt(1); + assertTrue(previous + ">=" + value, previous < value); + previous = value; + } + conn2.commit(); + } catch (SQLException e) { + ex[0] = e; + } + } + }; + t.start(); + Thread.sleep(500); + conn1.commit(); + t.join(); + if (ex[0] != null) { + throw ex[0]; + } + conn1.close(); + conn2.close(); + } + + private void testUpdate() throws Exception { + final int count = 50; + deleteDb("transaction"); + final Connection conn1 = getConnection("transaction"); + conn1.setAutoCommit(false); + Connection conn2 = getConnection("transaction"); + conn2.setAutoCommit(false); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + stat1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, \"VALUE\" BOOLEAN) AS " + + "SELECT X, FALSE FROM GENERATE_SERIES(1, " + count + ')'); + conn1.commit(); + stat1.executeQuery("SELECT * FROM TEST").close(); + stat2.executeQuery("SELECT * FROM TEST").close(); + final int[] r = new int[1]; + Thread t = new Thread() { + @Override + public void run() { + int sum = 0; + try { + PreparedStatement prep = conn1.prepareStatement( + "UPDATE TEST SET \"VALUE\" = TRUE WHERE ID = ? AND NOT \"VALUE\""); + for (int i = 1; i <= count; i++) { + prep.setInt(1, i); + prep.addBatch(); + } + int[] a = prep.executeBatch(); + for (int i : a) { + sum += i; + } + conn1.commit(); + } catch (SQLException e) { + // Ignore + } + r[0] = sum; + } + }; + t.start(); + int sum = 0; + PreparedStatement prep = conn2.prepareStatement( + "UPDATE TEST SET \"VALUE\" = TRUE WHERE ID = ? AND NOT \"VALUE\""); + for (int i = 1; i <= count; i++) { + prep.setInt(1, i); + prep.addBatch(); + } + int[] a = prep.executeBatch(); + for (int i : a) { + sum += i; + } + conn2.commit(); + t.join(); + assertEquals(count, sum + r[0]); + conn2.close(); + conn1.close(); + } + + private void testMergeUsing() throws Exception { + final int count = 50; + deleteDb("transaction"); + final Connection conn1 = getConnection("transaction"); + conn1.setAutoCommit(false); + Connection conn2 = getConnection("transaction"); + conn2.setAutoCommit(false); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + stat1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, \"VALUE\" BOOLEAN) AS " + + "SELECT X, FALSE FROM GENERATE_SERIES(1, " + count + ')'); + conn1.commit(); + stat1.executeQuery("SELECT * FROM TEST").close(); + stat2.executeQuery("SELECT * FROM TEST").close(); + final int[] r = new int[1]; + Thread t = new Thread() { + @Override + public void run() { + int sum = 0; + try { + PreparedStatement prep = conn1.prepareStatement( + "MERGE INTO TEST T USING (SELECT ?1::INT X) S ON T.ID = S.X AND NOT T.\"VALUE\"" + + " WHEN MATCHED THEN UPDATE SET T.\"VALUE\" = TRUE" + + " WHEN NOT MATCHED THEN INSERT VALUES (10000 + ?1, FALSE)"); + for (int i = 1; i <= count; i++) { + prep.setInt(1, i); + prep.addBatch(); + } + int[] a = prep.executeBatch(); + for (int i : a) { + sum += i; + } + conn1.commit(); + } catch (SQLException e) { + // Ignore + } + r[0] = sum; + } + }; + t.start(); + int sum = 0; + PreparedStatement prep = conn2.prepareStatement( + "MERGE INTO TEST T USING (SELECT ?1::INT X) S ON T.ID = S.X AND NOT T.\"VALUE\"" + + " WHEN MATCHED THEN UPDATE SET T.\"VALUE\" = TRUE" + + " WHEN NOT MATCHED THEN INSERT VALUES (10000 + ?1, FALSE)"); + for (int i = 1; i <= count; i++) { + prep.setInt(1, i); + prep.addBatch(); + } + int[] a = prep.executeBatch(); + for (int i : a) { + sum += i; + } + conn2.commit(); + t.join(); + assertEquals(count * 2, sum + r[0]); + conn2.close(); + conn1.close(); + } + + private void testDelete() throws Exception { + String sql1 = "DELETE FROM TEST WHERE ID = ? AND NOT \"VALUE\""; + String sql2 = "UPDATE TEST SET \"VALUE\" = TRUE WHERE ID = ? AND NOT \"VALUE\""; + testDeleteImpl(sql1, sql2); + testDeleteImpl(sql2, sql1); + } + + private void testDeleteImpl(final String sql1, String sql2) throws Exception { + final int count = 50; + deleteDb("transaction"); + final Connection conn1 = getConnection("transaction"); + conn1.setAutoCommit(false); + Connection conn2 = getConnection("transaction"); + conn2.setAutoCommit(false); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + stat1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, \"VALUE\" BOOLEAN) AS " + + "SELECT X, FALSE FROM GENERATE_SERIES(1, " + count + ')'); + conn1.commit(); + stat1.executeQuery("SELECT * FROM TEST").close(); + stat2.executeQuery("SELECT * FROM TEST").close(); + final int[] r = new int[1]; + Thread t = new Thread() { + @Override + public void run() { + int sum = 0; + try { + PreparedStatement prep = conn1.prepareStatement(sql1); + for (int i = 1; i <= count; i++) { + prep.setInt(1, i); + prep.addBatch(); + } + int[] a = prep.executeBatch(); + for (int i : a) { + sum += i; + } + conn1.commit(); + } catch (SQLException e) { + // Ignore + } + r[0] = sum; + } + }; + t.start(); + int sum = 0; + PreparedStatement prep = conn2.prepareStatement( + sql2); + for (int i = 1; i <= count; i++) { + prep.setInt(1, i); + prep.addBatch(); + } + int[] a = prep.executeBatch(); + for (int i : a) { + sum += i; + } + conn2.commit(); + t.join(); + assertEquals(count, sum + r[0]); + conn2.close(); + conn1.close(); + } + private void testRollback() throws SQLException { deleteDb("transaction"); Connection conn = getConnection("transaction"); @@ -219,7 +493,7 @@ private void testRollback() throws SQLException { conn = getConnection("transaction"); stat = conn.createStatement(); - stat.execute("create table master(id int) as select 1"); + stat.execute("create table master(id int primary key) as select 1"); stat.execute("create table child1(id int references master(id) " + "on delete cascade)"); stat.execute("insert into child1 values(1), (1), (1)"); @@ -264,7 +538,7 @@ private void testRollback2() throws SQLException { conn = getConnection("transaction"); stat = conn.createStatement(); - stat.execute("create table master(id int) as select 1"); + stat.execute("create table master(id int primary key) as select 1"); stat.execute("create table child1(id int references master(id) " + "on delete cascade)"); stat.execute("insert into child1 values(1), (1)"); @@ -312,7 +586,7 @@ private void testReferential() throws SQLException { Statement s1 = c1.createStatement(); s1.execute("drop table if exists a"); s1.execute("drop table if exists b"); - s1.execute("create table a (id integer identity not null, " + + s1.execute("create table a (id integer generated by default as identity, " + "code varchar(10) not null, primary key(id))"); s1.execute("create table b (name varchar(100) not null, a integer, " + "primary key(name), foreign key(a) references a(id))"); @@ -320,20 +594,52 @@ private void testReferential() throws SQLException { c2.setAutoCommit(false); s1.executeUpdate("insert into A(code) values('one')"); Statement s2 = c2.createStatement(); - if (config.mvcc) { - assertThrows( - ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, s2). - executeUpdate("insert into B values('two', 1)"); - } else { - assertThrows(ErrorCode.LOCK_TIMEOUT_1, s2). - executeUpdate("insert into B values('two', 1)"); - } + assertThrows( + ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, s2). + executeUpdate("insert into B values('two', 1)"); c2.commit(); c1.rollback(); c1.close(); c2.close(); } + private void testClosingConnectionWithLockedTable() throws SQLException { + deleteDb("transaction"); + Connection c1 = getConnection("transaction"); + Connection c2 = getConnection("transaction"); + c1.setAutoCommit(false); + c2.setAutoCommit(false); + + Statement s1 = c1.createStatement(); + s1.execute("create table a (id integer generated by default as identity, " + + "code varchar(10) not null, primary key(id))"); + s1.executeUpdate("insert into a(code) values('one')"); + c1.commit(); + s1.executeQuery("select * from a for update"); + c1.close(); + + Statement s2 = c2.createStatement(); + s2.executeQuery("select * from a for update"); + c2.close(); + } + + private void testClosingConnectionWithSessionTempTable() throws SQLException { + deleteDb("transaction"); + Connection c1 = getConnection("transaction"); + Connection c2 = getConnection("transaction"); + c1.setAutoCommit(false); + c2.setAutoCommit(false); + + Statement s1 = c1.createStatement(); + s1.execute("create local temporary table a (id int, x BLOB)"); + c1.commit(); + c1.close(); + + Statement s2 = c2.createStatement(); + s2.execute("create table c (id int)"); + c2.close(); + } + private void testSavepoint() throws SQLException { deleteDb("transaction"); Connection conn = getConnection("transaction"); @@ -410,11 +716,9 @@ private void testIsolation() throws SQLException { Connection conn = getConnection("transaction"); trace("default TransactionIsolation=" + conn.getTransactionIsolation()); conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - assertTrue(conn.getTransactionIsolation() == - Connection.TRANSACTION_READ_COMMITTED); + assertEquals(Connection.TRANSACTION_READ_COMMITTED, conn.getTransactionIsolation()); conn.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - assertTrue(conn.getTransactionIsolation() == - Connection.TRANSACTION_SERIALIZABLE); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, conn.getTransactionIsolation()); Statement stat = conn.createStatement(); assertTrue(conn.getAutoCommit()); conn.setAutoCommit(false); @@ -439,12 +743,146 @@ private void testIsolation() throws SQLException { conn.close(); } + private void testIsolationLevels() throws SQLException { + for (int isolationLevel : new int[] { Connection.TRANSACTION_REPEATABLE_READ, Constants.TRANSACTION_SNAPSHOT, + Connection.TRANSACTION_SERIALIZABLE }) { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction"); + Connection conn3 = getConnection("transaction")) { + conn3.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + Statement stat3 = conn3.createStatement(); + stat1.execute("CREATE TABLE TEST1(ID INT PRIMARY KEY) AS VALUES 1, 2"); + stat1.execute("CREATE TABLE TEST2(ID INT PRIMARY KEY, V INT) AS VALUES (1, 10), (2, 20)"); + conn2.setAutoCommit(false); + // Read committed + testIsolationLevelsCheckRowsAndCount(stat2, 1, 2); + stat1.execute("INSERT INTO TEST1 VALUES 3"); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 3); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 2); + stat1.execute("INSERT INTO TEST2 VALUES (3, 30)"); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 3); + // Repeatable read or serializable + conn2.setTransactionIsolation(isolationLevel); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 3); + + stat1.execute("INSERT INTO TEST1 VALUES 4"); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 3); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 3); + stat1.execute("INSERT INTO TEST2 VALUES (4, 40)"); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 3); + conn2.commit(); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 4); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 4); + stat1.execute("ALTER TABLE TEST2 ADD CONSTRAINT FK FOREIGN KEY(ID) REFERENCES TEST1(ID)"); + conn2.commit(); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 4); + stat1.execute("INSERT INTO TEST1 VALUES 5"); + stat1.execute("INSERT INTO TEST2 VALUES (5, 50)"); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 4); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 4); + conn2.commit(); + testIsolationLevelsCheckRowsAndCount(stat2, 1, 5); + testIsolationLevelsCheckRowsAndCount(stat2, 2, 5); + stat2.execute("INSERT INTO TEST1 VALUES 6"); + stat2.execute("INSERT INTO TEST2 VALUES (6, 60)"); + stat2.execute("DELETE FROM TEST2 WHERE ID IN (1, 3)"); + stat2.execute("UPDATE TEST2 SET V = 45 WHERE ID = 4"); + stat1.execute("INSERT INTO TEST1 VALUES 7"); + stat1.execute("INSERT INTO TEST2 VALUES (7, 70)"); + stat2.execute("INSERT INTO TEST1 VALUES 8"); + stat2.execute("INSERT INTO TEST2 VALUES (8, 80)"); + stat2.execute("INSERT INTO TEST1 VALUES 9"); + stat2.execute("INSERT INTO TEST2 VALUES (9, 90)"); + stat2.execute("DELETE FROM TEST2 WHERE ID = 9"); + testIsolationLevelsCheckRowsAndCount2(stat2, 1, 1, 2, 3, 4, 5, 6, 8, 9); + // Read uncommitted + testIsolationLevelsCheckRowsAndCount2(stat3, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9); + // Repeatable read or serializable + try (ResultSet rs = stat2.executeQuery("SELECT COUNT(*) FROM TEST2")) { + rs.next(); + assertEquals(5, rs.getLong(1)); + } + try (ResultSet rs = stat2.executeQuery("SELECT ID, V FROM TEST2 ORDER BY ID")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertEquals(20, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(4, rs.getInt(1)); + assertEquals(45, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(5, rs.getInt(1)); + assertEquals(50, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(6, rs.getInt(1)); + assertEquals(60, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(8, rs.getInt(1)); + assertEquals(80, rs.getInt(2)); + assertFalse(rs.next()); + } + stat1.execute("INSERT INTO TEST1 VALUES 11"); + stat1.execute("INSERT INTO TEST2 VALUES (11, 110)"); + conn2.commit(); + testIsolationLevelsCheckRowsAndCount2(stat1, 2, 2, 4, 5, 6, 7, 8, 11); + testIsolationLevelsCheckRowsAndCount2(stat2, 2, 2, 4, 5, 6, 7, 8, 11); + stat2.execute("INSERT INTO TEST1 VALUES 10"); + stat2.execute("INSERT INTO TEST2 VALUES (9, 90), (10, 100)"); + stat2.execute("DELETE FROM TEST2 WHERE ID = 9"); + testIsolationLevelsCheckRowsAndCount2(stat2, 2, 2, 4, 5, 6, 7, 8, 10, 11); + stat1.execute("ALTER TABLE TEST2 DROP CONSTRAINT FK"); + conn2.commit(); + try (ResultSet rs = stat2.executeQuery("SELECT COUNT(*) FROM TEST1")) { + rs.next(); + assertEquals(11, rs.getLong(1)); + } + stat1.execute("INSERT INTO TEST2 VALUES (20, 200)"); + try (ResultSet rs = stat2.executeQuery("SELECT COUNT(*) FROM TEST2")) { + rs.next(); + assertEquals(isolationLevel != Connection.TRANSACTION_REPEATABLE_READ ? 8 : 9, rs.getLong(1)); + } + } + } + deleteDb("transaction"); + } + + private void testIsolationLevelsCheckRowsAndCount(Statement stat, int table, int expected) + throws SQLException { + try (ResultSet rs = stat.executeQuery("SELECT COUNT(*) FROM TEST" + table)) { + rs.next(); + assertEquals(expected, rs.getLong(1)); + } + try (ResultSet rs = stat.executeQuery("SELECT ID FROM TEST" + table + " ORDER BY ID")) { + for (int i = 0; ++i <= expected;) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertFalse(rs.next()); + } + } + + private void testIsolationLevelsCheckRowsAndCount2(Statement stat, int table, int... values) + throws SQLException { + try (ResultSet rs = stat.executeQuery("SELECT COUNT(*) FROM TEST" + table)) { + rs.next(); + assertEquals(values.length, rs.getLong(1)); + } + try (ResultSet rs = stat.executeQuery("SELECT ID FROM TEST" + table + " ORDER BY ID")) { + for (int expected : values) { + assertTrue(rs.next()); + assertEquals(expected, rs.getInt(1)); + } + assertFalse(rs.next()); + } + } + private void testNestedResultSets(Connection conn) throws SQLException { Statement stat = conn.createStatement(); - test(stat, "CREATE TABLE NEST1(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); - test(stat, "CREATE TABLE NEST2(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + test(stat, "CREATE TABLE NEST1(ID INT PRIMARY KEY,\"VALUE\" VARCHAR(255))"); + test(stat, "CREATE TABLE NEST2(ID INT PRIMARY KEY,\"VALUE\" VARCHAR(255))"); DatabaseMetaData meta = conn.getMetaData(); - ArrayList result = New.arrayList(); + ArrayList result = new ArrayList<>(); ResultSet rs1, rs2; rs1 = meta.getTables(null, null, "NEST%", null); while (rs1.next()) { @@ -458,7 +896,7 @@ private void testNestedResultSets(Connection conn) throws SQLException { } // should be NEST1.ID, NEST1.NAME, NEST2.ID, NEST2.NAME assertEquals(result.toString(), 4, result.size()); - result = New.arrayList(); + result = new ArrayList<>(); test(stat, "INSERT INTO NEST1 VALUES(1,'A')"); test(stat, "INSERT INTO NEST1 VALUES(2,'B')"); test(stat, "INSERT INTO NEST2 VALUES(1,'1')"); @@ -476,7 +914,7 @@ private void testNestedResultSets(Connection conn) throws SQLException { } // should be A/1, A/2, B/1, B/2 assertEquals(result.toString(), 4, result.size()); - result = New.arrayList(); + result = new ArrayList<>(); rs1 = s1.executeQuery("SELECT * FROM NEST1 ORDER BY ID"); rs2 = s1.executeQuery("SELECT * FROM NEST2 ORDER BY ID"); assertThrows(ErrorCode.OBJECT_CLOSED, rs1).next(); @@ -505,4 +943,329 @@ private void test(Statement stat, String sql) throws SQLException { stat.execute(sql); } + private void testIsolationLevels2() throws SQLException { + for (int isolationLevel : new int[] { Connection.TRANSACTION_READ_UNCOMMITTED, + Connection.TRANSACTION_READ_COMMITTED, Connection.TRANSACTION_REPEATABLE_READ, + Constants.TRANSACTION_SNAPSHOT, Connection.TRANSACTION_SERIALIZABLE }) { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction")) { + conn1.setTransactionIsolation(isolationLevel); + conn2.setTransactionIsolation(isolationLevel); + conn1.setAutoCommit(false); + conn2.setAutoCommit(false); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + // Test a table without constraints + stat1.execute("CREATE TABLE TEST(\"VALUE\" INT)"); + stat1.executeQuery("TABLE TEST").close(); + stat1.execute("DROP TABLE TEST"); + // Other tests + stat1.execute("CREATE TABLE TEST(ID VARCHAR PRIMARY KEY, \"VALUE\" INT)"); + stat1.execute("INSERT INTO TEST VALUES ('1', 1)"); + conn1.commit(); + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID = '1'")) { + rs.next(); + assertEquals(1, rs.getInt(2)); + } + stat2.executeUpdate("UPDATE TEST SET \"VALUE\" = \"VALUE\" + 1"); + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID = '1'")) { + rs.next(); + assertEquals(isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED ? 2 : 1, rs.getInt(2)); + } + assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat1) + .executeQuery("SELECT * FROM TEST WHERE ID = '1' FOR UPDATE"); + conn2.commit(); + if (isolationLevel >= Connection.TRANSACTION_REPEATABLE_READ) { + assertThrows(ErrorCode.DEADLOCK_1, stat1) + .executeQuery("SELECT * FROM TEST WHERE ID = '1' FOR UPDATE"); + } else { + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID = '1' FOR UPDATE")) { + rs.next(); + assertEquals(2, rs.getInt(2)); + } + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST")) { + rs.next(); + assertEquals(2, rs.getInt(2)); + } + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID = '1'")) { + rs.next(); + assertEquals(2, rs.getInt(2)); + } + } + } + } + deleteDb("transaction"); + } + + private void testIsolationLevels3() throws SQLException { + for (int isolationLevel : new int[] { Connection.TRANSACTION_READ_UNCOMMITTED, + Connection.TRANSACTION_READ_COMMITTED, Connection.TRANSACTION_REPEATABLE_READ, + Constants.TRANSACTION_SNAPSHOT, Connection.TRANSACTION_SERIALIZABLE }) { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction")) { + conn1.setTransactionIsolation(isolationLevel); + conn2.setTransactionIsolation(isolationLevel); + conn1.setAutoCommit(false); + conn2.setAutoCommit(false); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + stat1.execute("CREATE TABLE TEST(ID BIGINT PRIMARY KEY, ID2 INT UNIQUE, \"VALUE\" INT)"); + stat1.execute("INSERT INTO TEST VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3)"); + conn1.commit(); + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID2 IN (1, 2)")) { + rs.next(); + assertEquals(1, rs.getInt(3)); + rs.next(); + assertEquals(2, rs.getInt(3)); + } + stat2.executeUpdate("UPDATE TEST SET ID2 = 4, \"VALUE\" = 5 WHERE ID2 = 2"); + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID2 IN (1, 2)")) { + rs.next(); + assertEquals(1, rs.getInt(3)); + if (isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED) { + assertFalse(rs.next()); + } else { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(3)); + } + } + if (isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED) { + assertFalse(stat1.executeQuery("SELECT * FROM TEST WHERE ID2 = 2 FOR UPDATE").next()); + assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat1) + .executeQuery("SELECT * FROM TEST WHERE ID2 = 4 FOR UPDATE"); + } else { + assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat1) + .executeQuery("SELECT * FROM TEST WHERE ID2 = 2 FOR UPDATE"); + assertFalse(stat1.executeQuery("SELECT * FROM TEST WHERE ID2 = 4 FOR UPDATE").next()); + } + stat2.executeUpdate("UPDATE TEST SET \"VALUE\" = 6 WHERE ID2 = 3"); + conn2.commit(); + if (isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED + || isolationLevel == Connection.TRANSACTION_READ_COMMITTED) { + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID2 = 4 FOR UPDATE")) { + rs.next(); + assertEquals(5, rs.getInt(3)); + } + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST")) { + rs.next(); + assertEquals(1, rs.getInt(3)); + rs.next(); + assertEquals(5, rs.getInt(3)); + rs.next(); + assertEquals(6, rs.getInt(3)); + } + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID2 = 4")) { + rs.next(); + assertEquals(5, rs.getInt(3)); + } + } else { + try (ResultSet rs = stat1.executeQuery("SELECT * FROM TEST WHERE ID2 = 3")) { + rs.next(); + assertEquals(3, rs.getInt(3)); + } + assertThrows(ErrorCode.DEADLOCK_1, stat1) + .executeQuery("SELECT * FROM TEST WHERE ID2 = 3 FOR UPDATE"); + } + } + } + deleteDb("transaction"); + } + + private void testIsolationLevels4() throws SQLException { + testIsolationLevels4(true); + testIsolationLevels4(false); + } + + private void testIsolationLevels4(boolean primaryKey) throws SQLException { + for (int isolationLevel : new int[] { Connection.TRANSACTION_READ_UNCOMMITTED, + Connection.TRANSACTION_READ_COMMITTED, Connection.TRANSACTION_REPEATABLE_READ, + Constants.TRANSACTION_SNAPSHOT, Connection.TRANSACTION_SERIALIZABLE }) { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction")) { + Statement stat1 = conn1.createStatement(); + stat1.execute("CREATE TABLE TEST(ID INT " + (primaryKey ? "PRIMARY KEY" : "UNIQUE") + + ", V INT) AS VALUES (1, 2)"); + conn2.setAutoCommit(false); + conn2.setTransactionIsolation(isolationLevel); + Statement stat2 = conn2.createStatement(); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + } + stat1.execute("UPDATE TEST SET V = V + 1"); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(isolationLevel >= Connection.TRANSACTION_REPEATABLE_READ ? 2 : 3, rs.getInt(1)); + assertFalse(rs.next()); + } + if (isolationLevel >= Connection.TRANSACTION_REPEATABLE_READ) { + assertThrows(ErrorCode.DEADLOCK_1, stat2).executeUpdate("UPDATE TEST SET V = V + 2"); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + assertFalse(rs.next()); + } + stat1.execute("DELETE FROM TEST"); + assertThrows(ErrorCode.DEADLOCK_1, stat2).executeUpdate("UPDATE TEST SET V = V + 2"); + stat1.execute("INSERT INTO TEST VALUES (1, 2)"); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + } + stat1.execute("DELETE FROM TEST"); + stat1.execute("INSERT INTO TEST VALUES (1, 2)"); + if (primaryKey) { + // With a delegate index the row was completely + // restored, so no error + assertEquals(1, stat2.executeUpdate("UPDATE TEST SET V = V + 2")); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(4, rs.getInt(1)); + assertFalse(rs.next()); + } + conn2.commit(); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(4, rs.getInt(1)); + assertFalse(rs.next()); + } + } else { + // With a secondary index restored row is not the same + assertThrows(ErrorCode.DEADLOCK_1, stat2).executeUpdate("UPDATE TEST SET V = V + 2"); + try (ResultSet rs = stat2.executeQuery("SELECT V FROM TEST WHERE ID = 1")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + } + } + stat1.execute("DELETE FROM TEST"); + assertThrows(ErrorCode.DUPLICATE_KEY_1, stat2).execute("INSERT INTO TEST VALUES (1, 3)"); + } + } + } + deleteDb("transaction"); + } + + private void testIsolationLevelsCountAggregate() throws SQLException { + testIsolationLevelsCountAggregate(Connection.TRANSACTION_READ_UNCOMMITTED, 12, 15, 15, 16); + testIsolationLevelsCountAggregate(Connection.TRANSACTION_READ_COMMITTED, 6, 9, 15, 16); + testIsolationLevelsCountAggregate(Connection.TRANSACTION_REPEATABLE_READ, 6, 9, 9, 15); + testIsolationLevelsCountAggregate(Constants.TRANSACTION_SNAPSHOT, 6, 9, 9, 15); + testIsolationLevelsCountAggregate(Connection.TRANSACTION_SERIALIZABLE, 6, 9, 9, 15); + } + + private void testIsolationLevelsCountAggregate(int isolationLevel, long uncommitted1, long uncommitted2, + long committed, long committedOther) throws SQLException { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction")) { + Statement stat1 = conn1.createStatement(); + stat1.execute("CREATE TABLE TEST(V BIGINT) AS VALUES 1, 2, 3, 4, 5, 18"); + conn1.setTransactionIsolation(isolationLevel); + conn1.setAutoCommit(false); + PreparedStatement all = conn1.prepareStatement("SELECT COUNT(*) FROM TEST"); + PreparedStatement simple = conn1.prepareStatement("SELECT COUNT(V) FROM TEST"); + conn2.setAutoCommit(false); + Statement stat2 = conn2.createStatement(); + testIsolationLevelsCountAggregate(all, simple, 6); + stat2.executeUpdate("DELETE FROM TEST WHERE V IN(3, 4)"); + stat2.executeUpdate("INSERT INTO TEST SELECT * FROM SYSTEM_RANGE(10, 17)"); + testIsolationLevelsCountAggregate(all, simple, uncommitted1); + stat1.executeUpdate("DELETE FROM TEST WHERE V = 2"); + stat1.executeUpdate("INSERT INTO TEST SELECT * FROM SYSTEM_RANGE(6, 9)"); + testIsolationLevelsCountAggregate(all, simple, uncommitted2); + conn2.commit(); + testIsolationLevelsCountAggregate(all, simple, committed); + conn1.commit(); + testIsolationLevelsCountAggregate(all, simple, 15); + stat2.executeUpdate("DELETE FROM TEST WHERE V = 17"); + stat2.executeUpdate("INSERT INTO TEST VALUES 19, 20"); + conn2.commit(); + testIsolationLevelsCountAggregate(all, simple, committedOther); + } + } + + private void testIsolationLevelsCountAggregate(PreparedStatement all, PreparedStatement simple, long expected) + throws SQLException { + try (ResultSet rs = all.executeQuery()) { + rs.next(); + assertEquals(expected, rs.getLong(1)); + } + try (ResultSet rs = simple.executeQuery()) { + rs.next(); + assertEquals(expected, rs.getLong(1)); + } + } + + private void testIsolationLevelsCountAggregate2() throws SQLException { + testIsolationLevelsCountAggregate2(Connection.TRANSACTION_READ_UNCOMMITTED); + testIsolationLevelsCountAggregate2(Connection.TRANSACTION_READ_COMMITTED); + testIsolationLevelsCountAggregate2(Connection.TRANSACTION_REPEATABLE_READ); + testIsolationLevelsCountAggregate2(Constants.TRANSACTION_SNAPSHOT); + testIsolationLevelsCountAggregate2(Connection.TRANSACTION_SERIALIZABLE); + } + + private void testIsolationLevelsCountAggregate2(int isolationLevel) + throws SQLException { + deleteDb("transaction"); + try (Connection conn1 = getConnection("transaction"); Connection conn2 = getConnection("transaction")) { + conn1.setTransactionIsolation(isolationLevel); + conn1.setAutoCommit(false); + Statement stat1 = conn1.createStatement(); + Statement stat2 = conn2.createStatement(); + stat1.executeUpdate( + "CREATE TABLE TEST(X INTEGER PRIMARY KEY, Y INTEGER) AS SELECT X, 1 FROM SYSTEM_RANGE(1, 100)"); + conn1.commit(); + conn2.setTransactionIsolation(isolationLevel); + conn2.setAutoCommit(false); + PreparedStatement prep = conn1.prepareStatement("SELECT COUNT(*) FROM TEST"); + // Initial count + testIsolationLevelCountAggregate2(prep, 100L); + stat1.executeUpdate("INSERT INTO TEST VALUES (101, 2)"); + stat1.executeUpdate("DELETE FROM TEST WHERE X BETWEEN 2 AND 3"); + stat1.executeUpdate("UPDATE TEST SET Y = 2 WHERE X BETWEEN 4 AND 7"); + // Own uncommitted changes + testIsolationLevelCountAggregate2(prep, 99L); + stat2.executeUpdate("INSERT INTO TEST VALUES (102, 2)"); + stat2.executeUpdate("DELETE FROM TEST WHERE X BETWEEN 12 AND 13"); + stat2.executeUpdate("UPDATE TEST SET Y = 2 WHERE X BETWEEN 14 AND 17"); + // Own and concurrent uncommitted changes + testIsolationLevelCountAggregate2(prep, + isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED ? 98L : 99L); + conn2.commit(); + // Own uncommitted and concurrent committed changes + testIsolationLevelCountAggregate2(prep, + isolationLevel <= Connection.TRANSACTION_READ_COMMITTED ? 98L: 99L); + conn1.commit(); + // Everything is committed + testIsolationLevelCountAggregate2(prep, 98L); + stat2.executeUpdate("INSERT INTO TEST VALUES (103, 2)"); + stat2.executeUpdate("DELETE FROM TEST WHERE X BETWEEN 22 AND 23"); + stat2.executeUpdate("UPDATE TEST SET Y = 2 WHERE X BETWEEN 24 AND 27"); + // Concurrent uncommitted changes + testIsolationLevelCountAggregate2(prep, + isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED ? 97L : 98L); + conn2.commit(); + // Concurrent committed changes + testIsolationLevelCountAggregate2(prep, + isolationLevel <= Connection.TRANSACTION_READ_COMMITTED ? 97L: 98L); + conn1.commit(); + // Everything is committed again + testIsolationLevelCountAggregate2(prep, 97L); + stat2.executeUpdate("INSERT INTO TEST VALUES (104, 2)"); + conn1.commit(); + // Transaction was started with concurrent uncommitted change + testIsolationLevelCountAggregate2(prep, + isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED ? 98L : 97L); + } + } + + private void testIsolationLevelCountAggregate2(PreparedStatement prep, long expected) throws SQLException { + ResultSet rs; + rs = prep.executeQuery(); + rs.next(); + assertEquals(expected, rs.getLong(1)); + } + } diff --git a/h2/src/test/org/h2/test/db/TestTriggersConstraints.java b/h2/src/test/org/h2/test/db/TestTriggersConstraints.java index 3b5ac9c3be..30c3d34bbc 100644 --- a/h2/src/test/org/h2/test/db/TestTriggersConstraints.java +++ b/h2/src/test/org/h2/test/db/TestTriggersConstraints.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -12,17 +12,24 @@ import java.sql.Statement; import java.util.Arrays; import java.util.HashSet; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicIntegerArray; import org.h2.api.ErrorCode; import org.h2.api.Trigger; +import org.h2.message.DbException; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.TriggerAdapter; +import org.h2.util.StringUtils; import org.h2.util.Task; +import org.h2.value.ValueBigint; /** * Tests for trigger and constraints. */ -public class TestTriggersConstraints extends TestBase implements Trigger { +public class TestTriggersConstraints extends TestDb implements Trigger { private static boolean mustNotCallTrigger; private String triggerName; @@ -33,24 +40,27 @@ public class TestTriggersConstraints extends TestBase implements Trigger { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { deleteDb("trigger"); + testWrongDataType(); testTriggerDeadlock(); - testDeleteInTrigger(); testTriggerAdapter(); testTriggerSelectEachRow(); testViewTrigger(); + testViewTriggerGeneratedKeys(); testTriggerBeforeSelect(); testTriggerAlterTable(); testTriggerAsSource(); + testTriggerAsJavascript(); testTriggers(); testConstraints(); testCheckConstraintErrorMessage(); testMultiPartForeignKeys(); + testConcurrent(); deleteDb("trigger"); } @@ -65,59 +75,121 @@ public void fire(Connection conn, ResultSet oldRow, ResultSet newRow) } } - private void testTriggerDeadlock() throws Exception { - final Connection conn, conn2; - final Statement stat, stat2; - conn = getConnection("trigger"); - conn2 = getConnection("trigger"); - stat = conn.createStatement(); - stat2 = conn2.createStatement(); - stat.execute("create table test(id int) as select 1"); - stat.execute("create table test2(id int) as select 1"); - stat.execute("create trigger test_u before update on test2 " + - "for each row call \"" + DeleteTrigger.class.getName() + "\""); - conn.setAutoCommit(false); - conn2.setAutoCommit(false); - stat2.execute("update test set id = 2"); - Task task = new Task() { - @Override - public void call() throws Exception { - Thread.sleep(300); - stat2.execute("update test2 set id = 4"); - } - }; - task.execute(); - Thread.sleep(100); - try { - stat.execute("update test2 set id = 3"); - task.get(); - } catch (SQLException e) { - assertEquals(ErrorCode.LOCK_TIMEOUT_1, e.getErrorCode()); + /** + * Trigger that sets value of the wrong data type. + */ + public static class WrongTrigger implements Trigger { + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + newRow[1] = "Wrong value"; } - conn2.rollback(); - conn.rollback(); - stat.execute("drop table test"); - stat.execute("drop table test2"); - conn.close(); - conn2.close(); } - private void testDeleteInTrigger() throws SQLException { - if (config.mvcc) { - return; + /** + * Trigger that sets value of the wrong data type. + */ + public static class WrongTriggerAdapter extends TriggerAdapter { + @Override + public void fire(Connection conn, ResultSet oldRow, ResultSet newRow) throws SQLException { + newRow.updateString(2, "Wrong value"); + } + } + + /** + * Trigger that sets null value. + */ + public static class NullTrigger implements Trigger { + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + newRow[1] = null; + } + } + + /** + * Trigger that sets null value. + */ + public static class NullTriggerAdapter extends TriggerAdapter { + @Override + public void fire(Connection conn, ResultSet oldRow, ResultSet newRow) throws SQLException { + newRow.updateNull(2); + } + } + + private void testWrongDataType() throws Exception { + try (Connection conn = getConnection("trigger")) { + Statement stat = conn.createStatement(); + stat.executeUpdate("CREATE TABLE TEST(A INTEGER, B INTEGER NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES (1, 2)"); + + stat.executeUpdate("CREATE TRIGGER TEST_TRIGGER BEFORE INSERT ON TEST FOR EACH ROW CALL '" + + WrongTrigger.class.getName() + '\''); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, prep).executeUpdate(); + stat.executeUpdate("DROP TRIGGER TEST_TRIGGER"); + + stat.executeUpdate("CREATE TRIGGER TEST_TRIGGER BEFORE INSERT ON TEST FOR EACH ROW CALL '" + + WrongTriggerAdapter.class.getName() + '\''); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, prep).executeUpdate(); + stat.executeUpdate("DROP TRIGGER TEST_TRIGGER"); + + stat.executeUpdate("CREATE TRIGGER TEST_TRIGGER BEFORE INSERT ON TEST FOR EACH ROW CALL '" + + NullTrigger.class.getName() + '\''); + assertThrows(ErrorCode.NULL_NOT_ALLOWED, prep).executeUpdate(); + stat.executeUpdate("DROP TRIGGER TEST_TRIGGER"); + + stat.executeUpdate("CREATE TRIGGER TEST_TRIGGER BEFORE INSERT ON TEST FOR EACH ROW CALL '" + + NullTriggerAdapter.class.getName() + '\''); + assertThrows(ErrorCode.NULL_NOT_ALLOWED, prep).executeUpdate(); + stat.executeUpdate("DROP TRIGGER TEST_TRIGGER"); + + stat.executeUpdate("DROP TABLE TEST"); + } + } + + private void testTriggerDeadlock() throws Exception { + final CountDownLatch latch = new CountDownLatch(2); + try (Connection conn = getConnection("trigger")) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id int) as select 1"); + stat.execute("create table test2(id int) as select 1"); + stat.execute("create trigger test_u before update on test2 " + + "for each row call \"" + DeleteTrigger.class.getName() + "\""); + conn.setAutoCommit(false); + stat.execute("update test set id = 2"); + Task task = new Task() { + @Override + public void call() throws Exception { + try (Connection conn2 = getConnection("trigger")) { + conn2.setAutoCommit(false); + try (Statement stat2 = conn2.createStatement()) { + latch.countDown(); + latch.await(); + stat2.execute("update test2 set id = 4"); + } + conn2.rollback(); + } catch (SQLException e) { + int errorCode = e.getErrorCode(); + assertTrue(String.valueOf(errorCode), + ErrorCode.LOCK_TIMEOUT_1 == errorCode || + ErrorCode.DEADLOCK_1 == errorCode); + } + } + }; + task.execute(); + latch.countDown(); + latch.await(); + try { + stat.execute("update test2 set id = 3"); + } catch (SQLException e) { + int errorCode = e.getErrorCode(); + assertTrue(String.valueOf(errorCode), + ErrorCode.LOCK_TIMEOUT_1 == errorCode || + ErrorCode.DEADLOCK_1 == errorCode); + } + task.get(); + conn.rollback(); + stat.execute("drop table test"); + stat.execute("drop table test2"); } - Connection conn; - Statement stat; - conn = getConnection("trigger"); - stat = conn.createStatement(); - stat.execute("create table test(id int) as select 1"); - stat.execute("create trigger test_u before update on test " + - "for each row call \"" + DeleteTrigger.class.getName() + "\""); - // this threw a NullPointerException - assertThrows(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, stat). - execute("update test set id = 2"); - stat.execute("drop table test"); - conn.close(); } private void testTriggerAdapter() throws SQLException { @@ -160,14 +232,9 @@ private void testTriggerSelectEachRow() throws SQLException { stat = conn.createStatement(); stat.execute("drop table if exists test"); stat.execute("create table test(id int)"); - try { - stat.execute("create trigger test_insert before select on test " + + assertThrows(ErrorCode.INVALID_TRIGGER_FLAGS_1, stat) + .execute("create trigger test_insert before select on test " + "for each row call \"" + TestTriggerAdapter.class.getName() + "\""); - fail(); - } catch (SQLException ex) { - assertEquals(ErrorCode.TRIGGER_SELECT_AND_ROW_BASED_NOT_SUPPORTED, - ex.getErrorCode()); - } conn.close(); } @@ -203,6 +270,45 @@ private void testViewTrigger() throws SQLException { conn.close(); } + private void testViewTriggerGeneratedKeys() throws SQLException { + Connection conn; + Statement stat; + conn = getConnection("trigger"); + stat = conn.createStatement(); + stat.execute("drop table if exists test"); + stat.execute("create table test(id int generated by default as identity)"); + stat.execute("create view test_view as select * from test"); + stat.execute("create trigger test_view_insert " + + "instead of insert on test_view for each row call \"" + + TestViewGeneratedKeys.class.getName() + "\""); + if (!config.memory) { + conn.close(); + conn = getConnection("trigger"); + stat = conn.createStatement(); + } + + PreparedStatement pstat; + pstat = conn.prepareStatement( + "insert into test_view values()", new int[] { 1 }); + int count = pstat.executeUpdate(); + assertEquals(1, count); + + ResultSet gkRs; + gkRs = pstat.getGeneratedKeys(); + + assertTrue(gkRs.next()); + assertEquals(1, gkRs.getInt(1)); + assertFalse(gkRs.next()); + + ResultSet rs; + rs = stat.executeQuery("select * from test"); + assertTrue(rs.next()); + assertFalse(rs.next()); + stat.execute("drop view test_view"); + stat.execute("drop table test"); + conn.close(); + } + /** * A test trigger adapter implementation. */ @@ -275,14 +381,33 @@ public void fire(Connection conn, Object[] oldRow, Object[] newRow) } } + } + + /** + * + */ + public static class TestViewGeneratedKeys implements Trigger { + + PreparedStatement prepInsert; + @Override - public void close() { - // ignore + public void init(Connection conn, String schemaName, + String triggerName, String tableName, boolean before, int type) + throws SQLException { + prepInsert = conn.prepareStatement( + "insert into test values()", Statement.RETURN_GENERATED_KEYS); } @Override - public void remove() { - // ignore + public void fire(Connection conn, Object[] oldRow, Object[] newRow) + throws SQLException { + if (newRow != null) { + prepInsert.execute(); + ResultSet rs = prepInsert.getGeneratedKeys(); + if (rs.next()) { + newRow[0] = ValueBigint.get(rs.getLong(1)); + } + } } } @@ -345,16 +470,6 @@ public void fire(Connection conn, Object[] oldRow, Object[] newRow) prepMeta.execute(); } - @Override - public void close() { - // ignore - } - - @Override - public void remove() { - // ignore - } - } /** @@ -365,13 +480,7 @@ public static class TestTriggerAlterTable implements Trigger { @Override public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { - conn.createStatement().execute("call seq.nextval"); - } - - @Override - public void init(Connection conn, String schemaName, - String triggerName, String tableName, boolean before, int type) { - // nothing to do + conn.createStatement().execute("call next value for seq"); } @Override @@ -388,16 +497,21 @@ public void remove() { private void testTriggerAlterTable() throws SQLException { deleteDb("trigger"); - testTrigger(false); + testTrigger(null); } private void testTriggerAsSource() throws SQLException { deleteDb("trigger"); - testTrigger(true); + testTrigger("java"); } - private void testTrigger(final boolean asSource) throws SQLException { - final String callSeq = "call seq.nextval"; + private void testTriggerAsJavascript() throws SQLException { + deleteDb("trigger"); + testTrigger("javascript"); + } + + private void testTrigger(final String sourceLang) throws SQLException { + final String callSeq = "call next value for seq"; Connection conn = getConnection("trigger"); Statement stat = conn.createStatement(); stat.execute("DROP TABLE IF EXISTS TEST"); @@ -407,12 +521,19 @@ private void testTrigger(final boolean asSource) throws SQLException { conn.setAutoCommit(false); Trigger t = new org.h2.test.db.TestTriggersConstraints.TestTriggerAlterTable(); t.close(); - if (asSource) { + if ("java".equals(sourceLang)) { String triggerClassName = this.getClass().getName() + "." + TestTriggerAlterTable.class.getSimpleName(); stat.execute("create trigger test_upd before insert on test " + "as $$org.h2.api.Trigger create() " + "{ return new " + triggerClassName + "(); } $$"); + } else if ("javascript".equals(sourceLang)) { + String triggerClassName = this.getClass().getName() + "." + + TestTriggerAlterTable.class.getSimpleName(); + final String body = "//javascript\n" + + "new Packages." + triggerClassName + "();"; + stat.execute("create trigger test_upd before insert on test as $$" + + body + " $$"); } else { stat.execute("create trigger test_upd before insert on test call \"" + TestTriggerAlterTable.class.getName() + "\""); @@ -449,19 +570,19 @@ private void testCheckConstraintErrorMessage() throws SQLException { + "company_id int not null, " + "foreign key(company_id) references companies(id))"); stat.execute("create table connections (id identity, company_id int not null, " - + "first int not null, second int not null, " + + "first int not null, `second` int not null, " + "foreign key (company_id) references companies(id), " + "foreign key (first) references departments(id), " - + "foreign key (second) references departments(id), " + + "foreign key (`second`) references departments(id), " + "check (select departments.company_id from departments, companies where " - + " departments.id in (first, second)) = company_id)"); + + " departments.id in (first, `second`)) = company_id)"); stat.execute("insert into companies(id) values(1)"); stat.execute("insert into departments(id, company_id) " + "values(10, 1)"); stat.execute("insert into departments(id, company_id) " + "values(20, 1)"); assertThrows(ErrorCode.CHECK_CONSTRAINT_INVALID, stat) - .execute("insert into connections(id, company_id, first, second) " + .execute("insert into connections(id, company_id, first, `second`) " + "values(100, 1, 10, 20)"); stat.execute("drop table connections"); @@ -496,8 +617,7 @@ private void testMultiPartForeignKeys() throws SQLException { assertSingleValue(stat, "select count(*) from test1", 3); assertSingleValue(stat, "select count(*) from test2", 1); - stat.execute("drop table test1"); - stat.execute("drop table test2"); + stat.execute("drop table test1, test2"); conn.close(); } @@ -512,35 +632,35 @@ private void testTriggers() throws SQLException { // [FOR EACH ROW] [QUEUE n] [NOWAIT] CALL triggeredClass stat.execute("CREATE TRIGGER IF NOT EXISTS INS_BEFORE " + "BEFORE INSERT ON TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\""); + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + '\''); stat.execute("CREATE TRIGGER IF NOT EXISTS INS_BEFORE " + "BEFORE INSERT ON TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\""); + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + '\''); stat.execute("CREATE TRIGGER INS_AFTER " + "" + "AFTER INSERT ON TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\""); + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + '\''); stat.execute("CREATE TRIGGER UPD_BEFORE " + "BEFORE UPDATE ON TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\""); + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + '\''); stat.execute("CREATE TRIGGER INS_AFTER_ROLLBACK " + "AFTER INSERT, ROLLBACK ON TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\""); + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + '\''); stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); ResultSet rs; rs = stat.executeQuery("SCRIPT"); checkRows(rs, new String[] { - "CREATE FORCE TRIGGER PUBLIC.INS_BEFORE " + - "BEFORE INSERT ON PUBLIC.TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";", - "CREATE FORCE TRIGGER PUBLIC.INS_AFTER " + - "AFTER INSERT ON PUBLIC.TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";", - "CREATE FORCE TRIGGER PUBLIC.UPD_BEFORE " + - "BEFORE UPDATE ON PUBLIC.TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";", - "CREATE FORCE TRIGGER PUBLIC.INS_AFTER_ROLLBACK " + - "AFTER INSERT, ROLLBACK ON PUBLIC.TEST " + - "FOR EACH ROW NOWAIT CALL \"" + getClass().getName() + "\";", + "CREATE FORCE TRIGGER \"PUBLIC\".\"INS_BEFORE\" " + + "BEFORE INSERT ON \"PUBLIC\".\"TEST\" " + + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + "';", + "CREATE FORCE TRIGGER \"PUBLIC\".\"INS_AFTER\" " + + "AFTER INSERT ON \"PUBLIC\".\"TEST\" " + + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + "';", + "CREATE FORCE TRIGGER \"PUBLIC\".\"UPD_BEFORE\" " + + "BEFORE UPDATE ON \"PUBLIC\".\"TEST\" " + + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + "';", + "CREATE FORCE TRIGGER \"PUBLIC\".\"INS_AFTER_ROLLBACK\" " + + "AFTER INSERT, ROLLBACK ON \"PUBLIC\".\"TEST\" " + + "FOR EACH ROW NOWAIT CALL '" + getClass().getName() + "';", }); while (rs.next()) { String sql = rs.getString(1); @@ -578,7 +698,7 @@ private void testTriggers() throws SQLException { } private void checkRows(ResultSet rs, String[] expected) throws SQLException { - HashSet set = new HashSet(Arrays.asList(expected)); + HashSet set = new HashSet<>(Arrays.asList(expected)); while (rs.next()) { set.remove(rs.getString(1)); } @@ -587,6 +707,66 @@ private void checkRows(ResultSet rs, String[] expected) throws SQLException { } } + private void testConcurrent() throws Exception { + deleteDb("trigger"); + Connection conn = getConnection("trigger"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(A INT)"); + stat.execute("CREATE TRIGGER TEST_BEFORE BEFORE INSERT, UPDATE ON TEST FOR EACH ROW CALL " + + StringUtils.quoteStringSQL(ConcurrentTrigger.class.getName())); + Thread[] threads = new Thread[ConcurrentTrigger.N_T]; + AtomicInteger a = new AtomicInteger(); + for (int i = 0; i < ConcurrentTrigger.N_T; i++) { + Thread thread = new Thread() { + @Override + public void run() { + try (Connection conn = getConnection("trigger")) { + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(A) VALUES ?"); + for (int j = 0; j < ConcurrentTrigger.N_R; j++) { + prep.setInt(1, a.getAndIncrement()); + prep.executeUpdate(); + } + } catch (SQLException e) { + throw DbException.convert(e); + } + } + }; + threads[i] = thread; + } + synchronized (TestTriggersConstraints.class) { + AtomicIntegerArray array = ConcurrentTrigger.array; + int l = array.length(); + for (int i = 0; i < l; i++) { + array.set(i, 0); + } + for (Thread thread : threads) { + thread.start(); + } + for (Thread thread : threads) { + thread.join(); + } + for (int i = 0; i < l; i++) { + assertEquals(1, array.get(i)); + } + } + conn.close(); + } + + public static final class ConcurrentTrigger extends TriggerAdapter { + + static final int N_T = 4; + + static final int N_R = 250; + + static final AtomicIntegerArray array = new AtomicIntegerArray(N_T * N_R); + + @Override + public void fire(Connection conn, ResultSet oldRow, ResultSet newRow) throws SQLException { + array.set(newRow.getInt(1), 1); + } + + } + @Override public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { diff --git a/h2/src/test/org/h2/test/db/TestTwoPhaseCommit.java b/h2/src/test/org/h2/test/db/TestTwoPhaseCommit.java index 97474c9aee..3f1380ba29 100644 --- a/h2/src/test/org/h2/test/db/TestTwoPhaseCommit.java +++ b/h2/src/test/org/h2/test/db/TestTwoPhaseCommit.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -10,14 +10,13 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; - import org.h2.test.TestBase; -import org.h2.util.New; +import org.h2.test.TestDb; /** * Tests for the two-phase-commit feature. */ -public class TestTwoPhaseCommit extends TestBase { +public class TestTwoPhaseCommit extends TestDb { /** * Run just this test. @@ -25,15 +24,19 @@ public class TestTwoPhaseCommit extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws SQLException { + public boolean isEnabled() { if (config.memory || config.networked) { - return; + return false; } + return true; + } + @Override + public void test() throws SQLException { deleteDb("twoPhaseCommit"); prepare(); @@ -44,28 +47,9 @@ public void test() throws SQLException { openWith(false); test(false); - if (!config.mvStore) { - testLargeTransactionName(); - } - deleteDb("twoPhaseCommit"); - } + testInDoubtAfterShutdown(); - private void testLargeTransactionName() throws SQLException { - Connection conn = getConnection("twoPhaseCommit"); - Statement stat = conn.createStatement(); - conn.setAutoCommit(false); - stat.execute("CREATE TABLE TEST2(ID INT)"); - String name = "tx12345678"; - try { - while (true) { - stat.execute("INSERT INTO TEST2 VALUES(1)"); - name += "x"; - stat.execute("PREPARE COMMIT " + name); - } - } catch (SQLException e) { - assertKnownException(e); - } - conn.close(); + deleteDb("twoPhaseCommit"); } private void test(boolean rolledBack) throws SQLException { @@ -88,10 +72,10 @@ private void test(boolean rolledBack) throws SQLException { private void openWith(boolean rollback) throws SQLException { Connection conn = getConnection("twoPhaseCommit"); Statement stat = conn.createStatement(); - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); ResultSet rs = stat.executeQuery("SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT"); while (rs.next()) { - list.add(rs.getString("TRANSACTION")); + list.add(rs.getString("TRANSACTION_NAME")); } for (String s : list) { if (rollback) { @@ -116,4 +100,58 @@ private void prepare() throws SQLException { stat.execute("PREPARE COMMIT XID_TEST_TRANSACTION_WITH_LONG_NAME"); crash(conn); } + + private void testInDoubtAfterShutdown() throws SQLException { + if (config.memory) { + return; + } + deleteDb("twoPhaseCommit"); + Connection conn = getConnection("twoPhaseCommit"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID INT PRIMARY KEY)"); + conn.setAutoCommit(false); + stat.execute("INSERT INTO TEST VALUES (1)"); + stat.execute("PREPARE COMMIT \"#1\""); + conn.commit(); + stat.execute("SHUTDOWN IMMEDIATELY"); + conn = getConnection("twoPhaseCommit"); + stat = conn.createStatement(); + ResultSet rs = stat.executeQuery( + "SELECT TRANSACTION_NAME, TRANSACTION_STATE FROM INFORMATION_SCHEMA.IN_DOUBT"); + assertFalse(rs.next()); + rs = stat.executeQuery("SELECT ID FROM TEST"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + conn.setAutoCommit(false); + stat.execute("INSERT INTO TEST VALUES (2)"); + stat.execute("PREPARE COMMIT \"#2\""); + conn.rollback(); + stat.execute("SHUTDOWN IMMEDIATELY"); + conn = getConnection("twoPhaseCommit"); + stat = conn.createStatement(); + rs = stat.executeQuery("SELECT TRANSACTION_NAME, TRANSACTION_STATE FROM INFORMATION_SCHEMA.IN_DOUBT"); + assertFalse(rs.next()); + rs = stat.executeQuery("SELECT ID FROM TEST"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + conn.setAutoCommit(false); + stat.execute("INSERT INTO TEST VALUES (3)"); + stat.execute("PREPARE COMMIT \"#3\""); + stat.execute("SHUTDOWN IMMEDIATELY"); + conn = getConnection("twoPhaseCommit"); + stat = conn.createStatement(); + rs = stat.executeQuery("SELECT TRANSACTION_NAME, TRANSACTION_STATE FROM INFORMATION_SCHEMA.IN_DOUBT"); + assertTrue(rs.next()); + assertEquals("#3", rs.getString("TRANSACTION_NAME")); + assertEquals("IN_DOUBT", rs.getString("TRANSACTION_STATE")); + rs = stat.executeQuery("SELECT ID FROM TEST"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + conn.close(); + deleteDb("twoPhaseCommit"); + } + } diff --git a/h2/src/test/org/h2/test/db/TestUpgrade.java b/h2/src/test/org/h2/test/db/TestUpgrade.java deleted file mode 100644 index 01f69897f8..0000000000 --- a/h2/src/test/org/h2/test/db/TestUpgrade.java +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.db; - -import java.io.OutputStream; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; - -import org.h2.api.ErrorCode; -import org.h2.store.fs.FileUtils; -import org.h2.test.TestBase; -import org.h2.upgrade.DbUpgrade; -import org.h2.util.Utils; - -/** - * Automatic upgrade test cases. - */ -public class TestUpgrade extends TestBase { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - if (config.mvStore) { - return; - } - if (!Utils.isClassPresent("org.h2.upgrade.v1_1.Driver")) { - return; - } - testLobs(); - testErrorUpgrading(); - testNoDb(); - testNoUpgradeOldAndNew(); - testIfExists(); - testCipher(); - } - - private void testLobs() throws Exception { - deleteDb("upgrade"); - Connection conn; - conn = DriverManager.getConnection("jdbc:h2v1_1:" + - getBaseDir() + "/upgrade;PAGE_STORE=FALSE", getUser(), getPassword()); - conn.createStatement().execute( - "create table test(data clob) as select space(100000)"); - conn.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.data.db")); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.index.db")); - DbUpgrade.setDeleteOldDb(true); - DbUpgrade.setScriptInTempDir(true); - conn = getConnection("upgrade"); - assertFalse(FileUtils.exists(getBaseDir() + "/upgrade.data.db")); - assertFalse(FileUtils.exists(getBaseDir() + "/upgrade.index.db")); - ResultSet rs = conn.createStatement().executeQuery("select * from test"); - rs.next(); - assertEquals(new String(new char[100000]).replace((char) 0, ' '), - rs.getString(1)); - conn.close(); - DbUpgrade.setDeleteOldDb(false); - DbUpgrade.setScriptInTempDir(false); - deleteDb("upgrade"); - } - - private void testErrorUpgrading() throws Exception { - deleteDb("upgrade"); - OutputStream out; - out = FileUtils.newOutputStream(getBaseDir() + "/upgrade.data.db", false); - out.write(new byte[10000]); - out.close(); - out = FileUtils.newOutputStream(getBaseDir() + "/upgrade.index.db", false); - out.write(new byte[10000]); - out.close(); - assertThrows(ErrorCode.FILE_VERSION_ERROR_1, this). - getConnection("upgrade"); - - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.data.db")); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.index.db")); - deleteDb("upgrade"); - } - - private void testNoDb() throws SQLException { - deleteDb("upgrade"); - Connection conn = getConnection("upgrade"); - conn.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - deleteDb("upgrade"); - - conn = getConnection("upgrade;NO_UPGRADE=TRUE"); - conn.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - deleteDb("upgrade"); - } - - private void testNoUpgradeOldAndNew() throws Exception { - deleteDb("upgrade"); - deleteDb("upgradeOld"); - String additionalParameters = ";AUTO_SERVER=TRUE;OPEN_NEW=TRUE"; - - // Create old db - Utils.callStaticMethod("org.h2.upgrade.v1_1.Driver.load"); - Connection connOld = DriverManager.getConnection("jdbc:h2v1_1:" + - getBaseDir() + "/upgradeOld;PAGE_STORE=FALSE" + additionalParameters); - // Test auto server, too - Connection connOld2 = DriverManager.getConnection("jdbc:h2v1_1:" + - getBaseDir() + "/upgradeOld;PAGE_STORE=FALSE" + additionalParameters); - Statement statOld = connOld.createStatement(); - statOld.execute("create table testOld(id int)"); - connOld.close(); - connOld2.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgradeOld.data.db")); - - // Create new DB - Connection connNew = DriverManager.getConnection("jdbc:h2:" + - getBaseDir() + "/upgrade" + additionalParameters); - Connection connNew2 = DriverManager.getConnection("jdbc:h2:" + - getBaseDir() + "/upgrade" + additionalParameters); - Statement statNew = connNew.createStatement(); - statNew.execute("create table test(id int)"); - - // Link to old DB without upgrade - statNew.executeUpdate("CREATE LOCAL TEMPORARY LINKED TABLE " + - "linkedTestOld('org.h2.Driver', 'jdbc:h2v1_1:" + - getBaseDir() + "/upgradeOld" + additionalParameters + "', '', '', 'TestOld')"); - statNew.executeQuery("select * from linkedTestOld"); - connNew.close(); - connNew2.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgradeOld.data.db")); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - - connNew = DriverManager.getConnection("jdbc:h2:" + - getBaseDir() + "/upgrade" + additionalParameters); - connNew2 = DriverManager.getConnection("jdbc:h2:" + - getBaseDir() + "/upgrade" + additionalParameters); - statNew = connNew.createStatement(); - // Link to old DB with upgrade - statNew.executeUpdate("CREATE LOCAL TEMPORARY LINKED TABLE " + - "linkedTestOld('org.h2.Driver', 'jdbc:h2:" + - getBaseDir() + "/upgradeOld" + additionalParameters + "', '', '', 'TestOld')"); - statNew.executeQuery("select * from linkedTestOld"); - connNew.close(); - connNew2.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgradeOld.h2.db")); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - - deleteDb("upgrade"); - deleteDb("upgradeOld"); - } - - private void testIfExists() throws Exception { - deleteDb("upgrade"); - - // Create old - Utils.callStaticMethod("org.h2.upgrade.v1_1.Driver.load"); - Connection connOld = DriverManager.getConnection( - "jdbc:h2v1_1:" + getBaseDir() + "/upgrade;PAGE_STORE=FALSE"); - // Test auto server, too - Connection connOld2 = DriverManager.getConnection( - "jdbc:h2v1_1:" + getBaseDir() + "/upgrade;PAGE_STORE=FALSE"); - Statement statOld = connOld.createStatement(); - statOld.execute("create table test(id int)"); - connOld.close(); - connOld2.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.data.db")); - - // Upgrade - Connection connOldViaNew = DriverManager.getConnection( - "jdbc:h2:" + getBaseDir() + "/upgrade;ifexists=true"); - Statement statOldViaNew = connOldViaNew.createStatement(); - statOldViaNew.executeQuery("select * from test"); - connOldViaNew.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - - deleteDb("upgrade"); - } - - private void testCipher() throws Exception { - deleteDb("upgrade"); - - // Create old db - Utils.callStaticMethod("org.h2.upgrade.v1_1.Driver.load"); - Connection conn = DriverManager.getConnection("jdbc:h2v1_1:" + - getBaseDir() + "/upgrade;PAGE_STORE=FALSE;" + - "CIPHER=AES", "abc", "abc abc"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - conn.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.data.db")); - - // Connect to old DB with upgrade - conn = DriverManager.getConnection("jdbc:h2:" + - getBaseDir() + "/upgrade;CIPHER=AES", "abc", "abc abc"); - stat = conn.createStatement(); - stat.executeQuery("select * from test"); - conn.close(); - assertTrue(FileUtils.exists(getBaseDir() + "/upgrade.h2.db")); - - deleteDb("upgrade"); - } - - @Override - public void deleteDb(String dbName) { - super.deleteDb(dbName); - try { - Utils.callStaticMethod( - "org.h2.upgrade.v1_1.tools.DeleteDbFiles.execute", - getBaseDir(), dbName, true); - } catch (Exception e) { - throw new RuntimeException(e.getMessage()); - } - FileUtils.delete(getBaseDir() + "/" + - dbName + ".data.db.backup"); - FileUtils.delete(getBaseDir() + "/" + - dbName + ".index.db.backup"); - FileUtils.deleteRecursive(getBaseDir() + "/" + - dbName + ".lobs.db.backup", false); - } - -} \ No newline at end of file diff --git a/h2/src/test/org/h2/test/db/TestView.java b/h2/src/test/org/h2/test/db/TestView.java index 4eaddd7a6e..1dffd44bec 100644 --- a/h2/src/test/org/h2/test/db/TestView.java +++ b/h2/src/test/org/h2/test/db/TestView.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -10,14 +10,16 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; - import org.h2.api.ErrorCode; +import org.h2.engine.SessionLocal; +import org.h2.jdbc.JdbcConnection; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test for views. */ -public class TestView extends TestBase { +public class TestView extends TestDb { private static int x; @@ -27,13 +29,14 @@ public class TestView extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws SQLException { deleteDb("view"); - + testSubSubQuery(); + testSubQueryViewIndexCache(); testInnerSelectWithRownum(); testInnerSelectWithRange(); testEmptyColumn(); @@ -47,17 +50,85 @@ public void test() throws SQLException { testManyViews(); testReferenceView(); testViewAlterAndCommandCache(); - testViewConstraintFromColumnExpression(); deleteDb("view"); } + private void testSubSubQuery() throws SQLException { + Connection conn = getConnection("view"); + Statement stat = conn.createStatement(); + stat.execute("drop table test if exists"); + stat.execute("create table test(a int, b int, c int)"); + stat.execute("insert into test values(1, 1, 1)"); + ResultSet rs = stat.executeQuery("select 1 x from (select a, b, c from " + + "(select * from test) bbb where bbb.a >=1 and bbb.a <= 1) sp " + + "where sp.a = 1 and sp.b = 1 and sp.c = 1"); + assertTrue(rs.next()); + conn.close(); + } + + private void testSubQueryViewIndexCache() throws SQLException { + if (config.networked) { + return; + } + Connection conn = getConnection("view"); + Statement stat = conn.createStatement(); + stat.execute("drop table test if exists"); + stat.execute("create table test(id int primary key, " + + "name varchar(25) unique, age int unique)"); + + // check that initial cache size is empty + SessionLocal s = (SessionLocal) ((JdbcConnection) conn).getSession(); + s.clearViewIndexCache(); + assertTrue(s.getViewIndexCache(true).isEmpty()); + assertTrue(s.getViewIndexCache(false).isEmpty()); + + // create view command should not affect caches + stat.execute("create view v as select * from test"); + assertTrue(s.getViewIndexCache(true).isEmpty()); + assertTrue(s.getViewIndexCache(false).isEmpty()); + + // check view index cache + stat.executeQuery("select * from v where id > 0").next(); + int size1 = s.getViewIndexCache(false).size(); + assertTrue(size1 > 0); + assertTrue(s.getViewIndexCache(true).isEmpty()); + stat.executeQuery("select * from v where name = 'xyz'").next(); + int size2 = s.getViewIndexCache(false).size(); + assertTrue(size2 > size1); + assertTrue(s.getViewIndexCache(true).isEmpty()); + + // check we did not add anything to view cache if we run a sub-query + stat.executeQuery("select * from (select * from test) where age = 17").next(); + int size3 = s.getViewIndexCache(false).size(); + assertEquals(size2, size3); + assertTrue(s.getViewIndexCache(true).isEmpty()); + + // check clear works + s.clearViewIndexCache(); + assertTrue(s.getViewIndexCache(false).isEmpty()); + assertTrue(s.getViewIndexCache(true).isEmpty()); + + // drop everything + stat.execute("drop view v"); + stat.execute("drop table test"); + conn.close(); + } + private void testInnerSelectWithRownum() throws SQLException { Connection conn = getConnection("view"); Statement stat = conn.createStatement(); stat.execute("drop table test if exists"); stat.execute("create table test(id int primary key, name varchar(1))"); stat.execute("insert into test(id, name) values(1, 'b'), (3, 'a')"); - ResultSet rs = stat.executeQuery( + ResultSet rs; + rs = stat.executeQuery( + "select nr from (select rownum() as nr, " + + "a.id as id from (select id from test order by name) as a) as b " + + "where b.id = 1;"); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + rs = stat.executeQuery( "select nr from (select row_number() over() as nr, " + "a.id as id from (select id from test order by name) as a) as b " + "where b.id = 1;"); @@ -98,7 +169,7 @@ private void testEmptyColumn() throws SQLException { private void testChangeSchemaSearchPath() throws SQLException { deleteDb("view"); - Connection conn = getConnection("view;FUNCTIONS_IN_SCHEMA=TRUE"); + Connection conn = getConnection("view"); Statement stat = conn.createStatement(); stat.execute("CREATE ALIAS X AS $$ int x() { return 1; } $$;"); stat.execute("CREATE SCHEMA S"); @@ -141,7 +212,7 @@ private void testCacheFunction(boolean deterministic) throws SQLException { x = 8; stat.execute("CREATE ALIAS GET_X " + (deterministic ? "DETERMINISTIC" : "") + - " FOR \"" + getClass().getName() + ".getX\""); + " FOR '" + getClass().getName() + ".getX'"); stat.execute("CREATE VIEW V AS SELECT * FROM (SELECT GET_X())"); ResultSet rs; rs = stat.executeQuery("SELECT * FROM V"); @@ -276,47 +347,4 @@ private void testViewAlterAndCommandCache() throws SQLException { deleteDb("view"); } - /** - * Make sure that the table constraint is still available when create a view - * of other table. - */ - private void testViewConstraintFromColumnExpression() throws SQLException { - deleteDb("view"); - Connection conn = getConnection("view"); - Statement stat = conn.createStatement(); - stat.execute("create table t0(id1 int primary key CHECK ((ID1 % 2) = 0))"); - stat.execute("create table t1(id2 int primary key CHECK ((ID2 % 1) = 0))"); - stat.execute("insert into t0 values(0)"); - stat.execute("insert into t1 values(1)"); - stat.execute("create view v1 as select * from t0,t1"); - // Check with ColumnExpression - ResultSet rs = stat.executeQuery( - "select * from INFORMATION_SCHEMA.COLUMNS where TABLE_NAME = 'V1'"); - assertTrue(rs.next()); - assertEquals("ID1", rs.getString("COLUMN_NAME")); - assertEquals("((ID1 % 2) = 0)", rs.getString("CHECK_CONSTRAINT")); - assertTrue(rs.next()); - assertEquals("ID2", rs.getString("COLUMN_NAME")); - assertEquals("((ID2 % 1) = 0)", rs.getString("CHECK_CONSTRAINT")); - // Check with AliasExpression - stat.execute("create view v2 as select ID1 key1,ID2 key2 from t0,t1"); - rs = stat.executeQuery("select * from INFORMATION_SCHEMA.COLUMNS where TABLE_NAME = 'V2'"); - assertTrue(rs.next()); - assertEquals("KEY1", rs.getString("COLUMN_NAME")); - assertEquals("((KEY1 % 2) = 0)", rs.getString("CHECK_CONSTRAINT")); - assertTrue(rs.next()); - assertEquals("KEY2", rs.getString("COLUMN_NAME")); - assertEquals("((KEY2 % 1) = 0)", rs.getString("CHECK_CONSTRAINT")); - // Check hide of constraint if column is an Operation - stat.execute("create view v3 as select ID1 + 1 ID1, ID2 + 1 ID2 from t0,t1"); - rs = stat.executeQuery("select * from INFORMATION_SCHEMA.COLUMNS where TABLE_NAME = 'V3'"); - assertTrue(rs.next()); - assertEquals("ID1", rs.getString("COLUMN_NAME")); - assertEquals("", rs.getString("CHECK_CONSTRAINT")); - assertTrue(rs.next()); - assertEquals("ID2", rs.getString("COLUMN_NAME")); - assertEquals("", rs.getString("CHECK_CONSTRAINT")); - conn.close(); - deleteDb("view"); - } } diff --git a/h2/src/test/org/h2/test/db/TestViewAlterTable.java b/h2/src/test/org/h2/test/db/TestViewAlterTable.java index caea80c097..6e8febc5e1 100644 --- a/h2/src/test/org/h2/test/db/TestViewAlterTable.java +++ b/h2/src/test/org/h2/test/db/TestViewAlterTable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -9,13 +9,14 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import org.h2.test.TestBase; import org.h2.api.ErrorCode; +import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test the impact of ALTER TABLE statements on views. */ -public class TestViewAlterTable extends TestBase { +public class TestViewAlterTable extends TestDb { private Connection conn; private Statement stat; @@ -26,13 +27,13 @@ public class TestViewAlterTable extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - deleteDb("alter"); - conn = getConnection("alter"); + deleteDb(getTestName()); + conn = getConnection(getTestName()); stat = conn.createStatement(); testDropColumnWithoutViews(); @@ -45,9 +46,10 @@ public void test() throws Exception { testJoinAndAlias(); testSubSelect(); testForeignKey(); + testAlterTableDropColumnInViewWithDoubleQuotes(); conn.close(); - deleteDb("alter"); + deleteDb(getTestName()); } private void testDropColumnWithoutViews() throws SQLException { @@ -70,9 +72,9 @@ private void testAlterTableDropColumnNotInView() throws SQLException { private void testAlterTableDropColumnInView() throws SQLException { // simple stat.execute("create table test(id identity, name varchar) " + - "as select x, 'Hello'"); + "as select 1, 'Hello' from dual"); stat.execute("create view test_view as select * from test"); - assertThrows(ErrorCode.VIEW_IS_INVALID_2, stat). + assertThrows(ErrorCode.COLUMN_IS_REFERENCED_1, stat). execute("alter table test drop name"); ResultSet rs = stat.executeQuery("select * from test_view"); assertTrue(rs.next()); @@ -82,7 +84,7 @@ private void testAlterTableDropColumnInView() throws SQLException { // nested createTestData(); // should throw exception because V1 uses column A - assertThrows(ErrorCode.VIEW_IS_INVALID_2, stat). + assertThrows(ErrorCode.COLUMN_IS_REFERENCED_1, stat). execute("alter table test drop column a"); stat.execute("drop table test cascade"); } @@ -155,7 +157,7 @@ private void testForeignKey() throws SQLException { } private void createTestData() throws SQLException { - stat.execute("create table test(a int, b int, c int)"); + stat.execute("create table test(a int primary key, b int, c int)"); stat.execute("insert into test(a, b, c) values (1, 2, 3)"); stat.execute("create view v1 as select a as b, b as a from test"); // child of v1 @@ -196,4 +198,18 @@ private void checkViewRemainsValid() throws SQLException { } } + + // original error: table "XX_COPY_xx_xx" not found + private void testAlterTableDropColumnInViewWithDoubleQuotes() throws SQLException{ + // simple + stat.execute("create table \"test\"(id identity, name varchar) " + + "as select 1, 'Hello' from dual"); + stat.execute("create view test_view as select * from \"test\""); + assertThrows(ErrorCode.COLUMN_IS_REFERENCED_1, stat). + execute("alter table \"test\" drop name"); + ResultSet rs = stat.executeQuery("select * from test_view"); + assertTrue(rs.next()); + stat.execute("drop view test_view"); + stat.execute("drop table \"test\""); + } } diff --git a/h2/src/test/org/h2/test/db/TestViewDropView.java b/h2/src/test/org/h2/test/db/TestViewDropView.java index 29c6d0a8e7..6361704af8 100644 --- a/h2/src/test/org/h2/test/db/TestViewDropView.java +++ b/h2/src/test/org/h2/test/db/TestViewDropView.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.db; @@ -12,11 +12,12 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test the impact of DROP VIEW statements on dependent views. */ -public class TestViewDropView extends TestBase { +public class TestViewDropView extends TestDb { private Connection conn; private Statement stat; @@ -27,13 +28,13 @@ public class TestViewDropView extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - deleteDb("alter"); - conn = getConnection("alter"); + deleteDb(getTestName()); + conn = getConnection(getTestName()); stat = conn.createStatement(); testDropViewDefaultBehaviour(); @@ -45,11 +46,11 @@ public void test() throws Exception { testCreateOrReplaceForceViewWithNowInvalidDependentViews(); conn.close(); - deleteDb("alter"); + deleteDb(getTestName()); } private void testCreateForceView() throws SQLException { - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat). execute("create view test_view as select * from test"); stat.execute("create force view test_view as select * from test"); stat.execute("create table test(id int)"); @@ -65,8 +66,8 @@ private void testCreateForceView() throws SQLException { private void testDropViewDefaultBehaviour() throws SQLException { createTestData(); - ResultSet rs = stat.executeQuery("select value " + - "from information_schema.settings where name = 'DROP_RESTRICT'"); + ResultSet rs = stat.executeQuery( + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'DROP_RESTRICT'"); rs.next(); boolean dropRestrict = rs.getBoolean(1); if (dropRestrict) { diff --git a/h2/src/test/org/h2/test/db/package.html b/h2/src/test/org/h2/test/db/package.html index 8e72b36f11..7b975d2567 100644 --- a/h2/src/test/org/h2/test/db/package.html +++ b/h2/src/test/org/h2/test/db/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/jaqu/AliasMapTest.java b/h2/src/test/org/h2/test/jaqu/AliasMapTest.java deleted file mode 100644 index bb107511b9..0000000000 --- a/h2/src/test/org/h2/test/jaqu/AliasMapTest.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jaqu; - -import java.util.List; -import org.h2.jaqu.Db; -import org.h2.test.TestBase; - -/** - * Tests that columns (p.unitsInStock) are not compared by value with the value - * (9), but by reference (using an identity hash map). - * See http://code.google.com/p/h2database/issues/detail?id=119 - * - * @author d moebius at scoop slash gmbh dot de - */ -public class AliasMapTest extends TestBase { - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws Exception { - new AliasMapTest().test(); - } - - @Override - public void test() throws Exception { - Db db = Db.open("jdbc:h2:mem:", "sa", "sa"); - db.insertAll(Product.getList()); - - Product p = new Product(); - List products = db - .from(p) - .where(p.unitsInStock).is(9) - .orderBy(p.productId).select(); - - assertEquals("[]", products.toString()); - - db.close(); - } -} - diff --git a/h2/src/test/org/h2/test/jaqu/AnnotationsTest.java b/h2/src/test/org/h2/test/jaqu/AnnotationsTest.java deleted file mode 100644 index d7d8c2cb8c..0000000000 --- a/h2/src/test/org/h2/test/jaqu/AnnotationsTest.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.test.jaqu; - -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.List; - -import org.h2.api.ErrorCode; -import org.h2.jaqu.Db; -import org.h2.test.TestBase; -import org.h2.util.JdbcUtils; - -/** - * Test annotation processing. - */ -public class AnnotationsTest extends TestBase { - - /** - * This object represents a database (actually a connection to the - * database). - */ - private Db db; - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws SQLException { - new AnnotationsTest().test(); - } - - @Override - public void test() throws SQLException { - db = Db.open("jdbc:h2:mem:", "sa", "sa"); - db.insertAll(Product.getList()); - db.insertAll(ProductAnnotationOnly.getList()); - db.insertAll(ProductMixedAnnotation.getList()); - testIndexCreation(); - testProductAnnotationOnly(); - testProductMixedAnnotation(); - testTrimStringAnnotation(); - testCreateTableIfRequiredAnnotation(); - testColumnInheritanceAnnotation(); - db.close(); - } - - private void testIndexCreation() throws SQLException { - // test indexes are created, and columns are in the right order - DatabaseMetaData meta = db.getConnection().getMetaData(); - ResultSet rs = meta.getIndexInfo(null, "PUBLIC", - "ANNOTATED" + "PRODUCT", false, true); - assertTrue(rs.next()); - assertStartsWith(rs.getString("INDEX_NAME"), "PRIMARY_KEY"); - assertTrue(rs.next()); - assertStartsWith(rs.getString("INDEX_NAME"), "ANNOTATED" + "PRODUCT_"); - assertStartsWith(rs.getString("COLUMN_NAME"), "NAME"); - assertTrue(rs.next()); - assertStartsWith(rs.getString("INDEX_NAME"), "ANNOTATED" + "PRODUCT_"); - assertStartsWith(rs.getString("COLUMN_NAME"), "CAT"); - assertFalse(rs.next()); - } - - private void testProductAnnotationOnly() { - ProductAnnotationOnly p = new ProductAnnotationOnly(); - assertEquals(10, db.from(p).selectCount()); - - // test JQColumn.name="cat" - assertEquals(2, db.from(p).where(p.category).is("Beverages").selectCount()); - - // test JQTable.annotationsOnly=true - // public String unmappedField is ignored by JaQu - assertEquals(0, db.from(p).where(p.unmappedField).is("unmapped").selectCount()); - - // test JQColumn.autoIncrement=true - // 10 objects, 10 autoIncremented unique values - assertEquals(10, db.from(p).selectDistinct(p.autoIncrement).size()); - - // test JQTable.primaryKey=id - try { - db.insertAll(ProductAnnotationOnly.getList()); - } catch (RuntimeException r) { - SQLException s = (SQLException) r.getCause(); - assertEquals(ErrorCode.DUPLICATE_KEY_1, s.getErrorCode()); - } - } - - private void testProductMixedAnnotation() { - ProductMixedAnnotation p = new ProductMixedAnnotation(); - - // test JQColumn.name="cat" - assertEquals(2, db.from(p).where(p.category).is("Beverages").selectCount()); - - // test JQTable.annotationsOnly=false - // public String mappedField is reflectively mapped by JaQu - assertEquals(10, db.from(p).where(p.mappedField).is("mapped").selectCount()); - - // test JQColumn.primaryKey=true - try { - db.insertAll(ProductMixedAnnotation.getList()); - } catch (RuntimeException r) { - SQLException s = (SQLException) r.getCause(); - assertEquals(ErrorCode.DUPLICATE_KEY_1, s.getErrorCode()); - } - } - - private void testTrimStringAnnotation() { - ProductAnnotationOnly p = new ProductAnnotationOnly(); - ProductAnnotationOnly prod = db.from(p).selectFirst(); - String oldValue = prod.category; - String newValue = "01234567890123456789"; - // 2 chars exceeds field max - prod.category = newValue; - db.update(prod); - - ProductAnnotationOnly newProd = db.from(p) - .where(p.productId) - .is(prod.productId) - .selectFirst(); - assertEquals(newValue.substring(0, 15), newProd.category); - - newProd.category = oldValue; - db.update(newProd); - } - - private void testColumnInheritanceAnnotation() { - ProductInheritedAnnotation table = new ProductInheritedAnnotation(); - Db db = Db.open("jdbc:h2:mem:", "sa", "sa"); - List inserted = ProductInheritedAnnotation - .getData(); - db.insertAll(inserted); - - List retrieved = db.from(table).select(); - - for (int j = 0; j < retrieved.size(); j++) { - ProductInheritedAnnotation i = inserted.get(j); - ProductInheritedAnnotation r = retrieved.get(j); - assertEquals(i.category, r.category); - assertEquals(i.mappedField, r.mappedField); - assertEquals(i.unitsInStock, r.unitsInStock); - assertEquals(i.unitPrice, r.unitPrice); - assertEquals(i.name(), r.name()); - assertEquals(i.id(), r.id()); - } - db.close(); - } - - private void testCreateTableIfRequiredAnnotation() { - // tests JQTable.createTableIfRequired=false - Db noCreateDb = null; - try { - noCreateDb = Db.open("jdbc:h2:mem:", "sa", "sa"); - noCreateDb.insertAll(ProductNoCreateTable.getList()); - noCreateDb.close(); - } catch (RuntimeException r) { - SQLException s = (SQLException) r.getCause(); - assertEquals(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, s.getErrorCode()); - } - if (noCreateDb != null) { - JdbcUtils.closeSilently(noCreateDb.getConnection()); - } - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/ClobTest.java b/h2/src/test/org/h2/test/jaqu/ClobTest.java deleted file mode 100644 index ae58b95912..0000000000 --- a/h2/src/test/org/h2/test/jaqu/ClobTest.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.test.jaqu; - -import static org.h2.jaqu.Define.primaryKey; -import static org.h2.jaqu.Define.tableName; -import java.text.MessageFormat; -import java.util.Arrays; -import java.util.List; -import org.h2.jaqu.Db; -import org.h2.jaqu.Table; -import org.h2.test.TestBase; - -/** - * Tests if converting a CLOB to a String works. - */ -public class ClobTest extends TestBase { - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws Exception { - new ClobTest().test(); - } - - @Override - public void test() throws Exception { - String create = "CREATE TABLE CLOB_TEST(ID INT PRIMARY KEY, WORDS {0})"; - Db db = Db.open("jdbc:h2:mem:", "sa", "sa"); - db.executeUpdate(MessageFormat.format(create, "VARCHAR(255)")); - db.insertAll(StringRecord.getList()); - testSimpleUpdate(db, "VARCHAR fail"); - db.close(); - - db = Db.open("jdbc:h2:mem:", "sa", "sa"); - db.executeUpdate(MessageFormat.format(create, "TEXT")); - db.insertAll(StringRecord.getList()); - testSimpleUpdate(db, "CLOB fail because of single quote artifacts"); - db.close(); - } - - private void testSimpleUpdate(Db db, String failureMsg) { - String newWords = "I changed the words"; - StringRecord r = new StringRecord(); - StringRecord originalRecord = db.from(r).where(r.id).is(2).selectFirst(); - String oldWords = originalRecord.words; - originalRecord.words = newWords; - db.update(originalRecord); - - StringRecord r2 = new StringRecord(); - StringRecord revisedRecord = db.from(r2).where(r2.id).is(2).selectFirst(); - assertEquals(failureMsg, newWords, revisedRecord.words); - - // undo update - originalRecord.words = oldWords; - db.update(originalRecord); - } - - /** - * A simple class used in this test. - */ - public static class StringRecord implements Table { - - public Integer id; - public String words; - - public StringRecord() { - // public constructor - } - - private StringRecord(int id, String words) { - this.id = id; - this.words = words; - } - - @Override - public void define() { - tableName("CLOB_TEST"); - primaryKey(id); - } - - private static StringRecord create(int id, String words) { - return new StringRecord(id, words); - } - - public static List getList() { - StringRecord[] list = { - create(1, "Once upon a midnight dreary, while I pondered weak and weary,"), - create(2, "Over many a quaint and curious volume of forgotten lore,"), - create(3, "While I nodded, nearly napping, suddenly there came a tapping,"), - create(4, "As of some one gently rapping, rapping at my chamber door."), - create(5, "`'Tis some visitor,' I muttered, `tapping at my chamber door -"), - create(6, "Only this, and nothing more.'") }; - - return Arrays.asList(list); - } - - @Override - public String toString() { - return id + ": " + words; - } - } -} - diff --git a/h2/src/test/org/h2/test/jaqu/ComplexObject.java b/h2/src/test/org/h2/test/jaqu/ComplexObject.java deleted file mode 100644 index 3cb8f255cf..0000000000 --- a/h2/src/test/org/h2/test/jaqu/ComplexObject.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jaqu; - -import static org.h2.jaqu.Define.primaryKey; -import java.math.BigDecimal; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.Arrays; -import java.util.Date; -import java.util.List; -import org.h2.jaqu.Table; - -/** - * A table containing all possible data types. - */ -public class ComplexObject implements Table { - public Integer id; - public Long amount; - public String name; - public BigDecimal value; - public Date birthday; - public Time time; - public Timestamp created; - - static ComplexObject build(Integer id, boolean isNull) { - ComplexObject obj = new ComplexObject(); - obj.id = id; - obj.amount = isNull ? null : Long.valueOf(1); - obj.name = isNull ? null : "hello"; - obj.value = isNull ? null : new BigDecimal("1"); - obj.birthday = isNull ? null : java.sql.Date.valueOf("2001-01-01"); - obj.time = isNull ? null : Time.valueOf("10:20:30"); - obj.created = isNull ? null : Timestamp.valueOf("2002-02-02 02:02:02"); - return obj; - } - - @Override - public void define() { - primaryKey(id); - } - - public static List getList() { - return Arrays.asList(new ComplexObject[] { build(0, true), build(1, false) }); - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/Customer.java b/h2/src/test/org/h2/test/jaqu/Customer.java deleted file mode 100644 index b30629dd34..0000000000 --- a/h2/src/test/org/h2/test/jaqu/Customer.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jaqu; - -import java.util.Arrays; -import java.util.List; - -/** - * A table containing customer data. - */ -public class Customer { - - public String customerId; - public String region; - - public Customer() { - // public constructor - } - - public Customer(String customerId, String region) { - this.customerId = customerId; - this.region = region; - } - - @Override - public String toString() { - return customerId; - } - - public static List getList() { - Customer[] list = { - new Customer("ALFKI", "WA"), - new Customer("ANATR", "WA"), - new Customer("ANTON", "CA") }; - return Arrays.asList(list); - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/ModelsTest.java b/h2/src/test/org/h2/test/jaqu/ModelsTest.java deleted file mode 100644 index 98c5bebb99..0000000000 --- a/h2/src/test/org/h2/test/jaqu/ModelsTest.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.test.jaqu; - -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import org.h2.jaqu.Db; -import org.h2.jaqu.DbInspector; -import org.h2.jaqu.DbUpgrader; -import org.h2.jaqu.DbVersion; -import org.h2.jaqu.Table.JQDatabase; -import org.h2.jaqu.ValidationRemark; -import org.h2.test.TestBase; -import org.h2.test.jaqu.SupportedTypes.SupportedTypes2; - -/** - * Test that the mapping between classes and tables is done correctly. - */ -public class ModelsTest extends TestBase { - - /** - * This object represents a database (actually a connection to the - * database). - */ - private Db db; - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws Exception { - ModelsTest test = new ModelsTest(); - test.init(); - test.config.traceTest = true; - test.test(); - } - - @Override - public void test() { - db = Db.open("jdbc:h2:mem:", "sa", "sa"); - db.insertAll(Product.getList()); - db.insertAll(ProductAnnotationOnly.getList()); - db.insertAll(ProductMixedAnnotation.getList()); - testValidateModels(); - testSupportedTypes(); - testModelGeneration(); - testDatabaseUpgrade(); - testTableUpgrade(); - db.close(); - } - - private void testValidateModels() { - DbInspector inspector = new DbInspector(db); - validateModel(inspector, new Product()); - validateModel(inspector, new ProductAnnotationOnly()); - validateModel(inspector, new ProductMixedAnnotation()); - } - - private void validateModel(DbInspector inspector, Object o) { - List remarks = inspector.validateModel(o, false); - if (config.traceTest && remarks.size() > 0) { - trace("Validation remarks for " + o.getClass().getName()); - for (ValidationRemark remark : remarks) { - trace(remark.toString()); - } - trace(""); - } - for (ValidationRemark remark : remarks) { - assertFalse(remark.toString(), remark.isError()); - } - } - - private void testSupportedTypes() { - List original = SupportedTypes.createList(); - db.insertAll(original); - List retrieved = db.from(SupportedTypes.SAMPLE).select(); - assertEquals(original.size(), retrieved.size()); - for (int i = 0; i < original.size(); i++) { - SupportedTypes o = original.get(i); - SupportedTypes r = retrieved.get(i); - if (!o.equivalentTo(r)) { - assertTrue(o.equivalentTo(r)); - } - } - } - - private void testModelGeneration() { - DbInspector inspector = new DbInspector(db); - List models = inspector.generateModel(null, - "SupportedTypes", - "org.h2.test.jaqu", true, true); - assertEquals(1, models.size()); - // a poor test, but a start - assertEquals(1364, models.get(0).length()); - } - - private void testDatabaseUpgrade() { - Db db = Db.open("jdbc:h2:mem:", "sa", "sa"); - - // insert a database version record - db.insert(new DbVersion(1)); - - TestDbUpgrader dbUpgrader = new TestDbUpgrader(); - db.setDbUpgrader(dbUpgrader); - - List original = SupportedTypes.createList(); - db.insertAll(original); - - assertEquals(1, dbUpgrader.oldVersion.get()); - assertEquals(2, dbUpgrader.newVersion.get()); - db.close(); - } - - private void testTableUpgrade() { - Db db = Db.open("jdbc:h2:mem:", "sa", "sa"); - - // insert first, this will create version record automatically - List original = SupportedTypes.createList(); - db.insertAll(original); - - // reset the dbUpgrader (clears the update check cache) - TestDbUpgrader dbUpgrader = new TestDbUpgrader(); - db.setDbUpgrader(dbUpgrader); - - SupportedTypes2 s2 = new SupportedTypes2(); - - List types = db.from(s2).select(); - assertEquals(10, types.size()); - assertEquals(1, dbUpgrader.oldVersion.get()); - assertEquals(2, dbUpgrader.newVersion.get()); - db.close(); - } - - /** - * A sample database upgrader class. - */ - @JQDatabase(version = 2) - class TestDbUpgrader implements DbUpgrader { - final AtomicInteger oldVersion = new AtomicInteger(0); - final AtomicInteger newVersion = new AtomicInteger(0); - - @Override - public boolean upgradeTable(Db db, String schema, String table, - int fromVersion, int toVersion) { - // just claims success on upgrade request - oldVersion.set(fromVersion); - newVersion.set(toVersion); - return true; - } - - @Override - public boolean upgradeDatabase(Db db, int fromVersion, int toVersion) { - // just claims success on upgrade request - oldVersion.set(fromVersion); - newVersion.set(toVersion); - return true; - } - - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/Order.java b/h2/src/test/org/h2/test/jaqu/Order.java deleted file mode 100644 index dad6620b33..0000000000 --- a/h2/src/test/org/h2/test/jaqu/Order.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jaqu; - -import static org.h2.jaqu.Define.primaryKey; -import static org.h2.jaqu.Define.tableName; -import java.math.BigDecimal; -import java.util.Arrays; -import java.util.Date; -import java.util.List; -import org.h2.jaqu.Table; - -/** - * A table containing order data. - */ - -public class Order implements Table { - public String customerId; - public Integer orderId; - public Date orderDate; - public BigDecimal total; - - public Order(String customerId, Integer orderId, - String total, String orderDate) { - this.customerId = customerId; - this.orderId = orderId; - this.total = new BigDecimal(total); - this.orderDate = java.sql.Date.valueOf(orderDate); - } - - public Order() { - // public constructor - } - - @Override - public void define() { - tableName("Orders"); - primaryKey(customerId, orderId); - } - - public static List getList() { - Order[] list = { - new Order("ALFKI", 10702, "330.00", "2007-01-02"), - new Order("ALFKI", 10952, "471.20", "2007-02-03"), - new Order("ANATR", 10308, "88.80", "2007-01-03"), - new Order("ANATR", 10625, "479.75", "2007-03-03"), - new Order("ANATR", 10759, "320.00", "2007-04-01"), - new Order("ANTON", 10365, "403.20", "2007-02-13"), - new Order("ANTON", 10682, "375.50", "2007-03-13"), - new Order("ANTON", 10355, "480.00", "2007-04-11") }; - return Arrays.asList(list); - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/Product.java b/h2/src/test/org/h2/test/jaqu/Product.java deleted file mode 100644 index 2e0b41dc44..0000000000 --- a/h2/src/test/org/h2/test/jaqu/Product.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jaqu; - -import static org.h2.jaqu.Define.index; -import static org.h2.jaqu.Define.maxLength; -import static org.h2.jaqu.Define.primaryKey; -import static org.h2.jaqu.Define.tableName; -import java.util.Arrays; -import java.util.List; -import org.h2.jaqu.Table; - -/** - * A table containing product data. - */ -public class Product implements Table { - - public Integer productId; - public String productName; - public String category; - public Double unitPrice; - public Integer unitsInStock; - - public Product() { - // public constructor - } - - private Product(int productId, String productName, - String category, double unitPrice, int unitsInStock) { - this.productId = productId; - this.productName = productName; - this.category = category; - this.unitPrice = unitPrice; - this.unitsInStock = unitsInStock; - } - - @Override - public void define() { - tableName("Product"); - primaryKey(productId); - maxLength(category, 255); - index(productName, category); - } - - private static Product create(int productId, String productName, - String category, double unitPrice, int unitsInStock) { - return new Product(productId, productName, category, - unitPrice, unitsInStock); - } - - public static List getList() { - Product[] list = { - create(1, "Chai", "Beverages", 18, 39), - create(2, "Chang", "Beverages", 19.0, 17), - create(3, "Aniseed Syrup", "Condiments", 10.0, 13), - create(4, "Chef Anton's Cajun Seasoning", "Condiments", 22.0, 53), - create(5, "Chef Anton's Gumbo Mix", "Condiments", 21.3500, 0), - create(6, "Grandma's Boysenberry Spread", "Condiments", 25.0, 120), - create(7, "Uncle Bob's Organic Dried Pears", "Produce", 30.0, 15), - create(8, "Northwoods Cranberry Sauce", "Condiments", 40.0, 6), - create(9, "Mishi Kobe Niku", "Meat/Poultry", 97.0, 29), - create(10, "Ikura", "Seafood", 31.0, 31), - }; - - return Arrays.asList(list); - } - - @Override - public String toString() { - return productName + ": " + unitsInStock; - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/ProductAnnotationOnly.java b/h2/src/test/org/h2/test/jaqu/ProductAnnotationOnly.java deleted file mode 100644 index 854340f7d2..0000000000 --- a/h2/src/test/org/h2/test/jaqu/ProductAnnotationOnly.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.test.jaqu; - -import java.util.Arrays; -import java.util.List; -import org.h2.jaqu.Table.JQColumn; -import org.h2.jaqu.Table.JQIndex; -import org.h2.jaqu.Table.JQTable; - -/** - * A table containing product data. - */ -@JQTable(name = "AnnotatedProduct", primaryKey = "id") -@JQIndex(standard = "name, cat") -public class ProductAnnotationOnly { - - @JQColumn(autoIncrement = true) - public Integer autoIncrement; - - public String unmappedField; - - @JQColumn(name = "id") - Integer productId; - - @JQColumn(name = "cat", maxLength = 15, trimString = true) - String category; - - @JQColumn(name = "name") - private String productName; - - @JQColumn - private Double unitPrice; - - @JQColumn - private Integer unitsInStock; - - public ProductAnnotationOnly() { - // public constructor - } - - private ProductAnnotationOnly(int productId, String productName, - String category, double unitPrice, int unitsInStock, - String unmappedField) { - this.productId = productId; - this.productName = productName; - this.category = category; - this.unitPrice = unitPrice; - this.unitsInStock = unitsInStock; - this.unmappedField = unmappedField; - } - - private static ProductAnnotationOnly create(int productId, - String productName, String category, double unitPrice, - int unitsInStock, String unmappedField) { - return new ProductAnnotationOnly(productId, productName, category, - unitPrice, unitsInStock, unmappedField); - } - - public static List getList() { - String unmappedField = "unmapped"; - ProductAnnotationOnly[] list = { - create(1, "Chai", "Beverages", 18, 39, - unmappedField), - create(2, "Chang", "Beverages", 19.0, 17, - unmappedField), - create(3, "Aniseed Syrup", "Condiments", 10.0, 13, - unmappedField), - create(4, "Chef Anton's Cajun Seasoning", "Condiments", 22.0, 53, - unmappedField), - create(5, "Chef Anton's Gumbo Mix", "Condiments", 21.3500, 0, - unmappedField), - create(6, "Grandma's Boysenberry Spread", "Condiments", 25.0, 120, - unmappedField), - create(7, "Uncle Bob's Organic Dried Pears", "Produce", 30.0, 15, - unmappedField), - create(8, "Northwoods Cranberry Sauce", "Condiments", 40.0, 6, - unmappedField), - create(9, "Mishi Kobe Niku", "Meat/Poultry", 97.0, 29, - unmappedField), - create(10, "Ikura", "Seafood", 31.0, 31, - unmappedField), }; - return Arrays.asList(list); - } - - @Override - public String toString() { - return productName + ": " + unitsInStock; - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/ProductInheritedAnnotation.java b/h2/src/test/org/h2/test/jaqu/ProductInheritedAnnotation.java deleted file mode 100644 index 0780681373..0000000000 --- a/h2/src/test/org/h2/test/jaqu/ProductInheritedAnnotation.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.test.jaqu; - -import java.util.Arrays; -import java.util.List; -import org.h2.jaqu.Table.JQTable; - -/** - * This class inherits all its fields from a parent class which has annotated - * columns. The JQTable annotation of the parent class is ignored and only - * the JQTable annotation of this class matters. - * However, this table inherits JQColumns from its super class. - */ -@JQTable(inheritColumns = true, annotationsOnly = false) -public class ProductInheritedAnnotation extends ProductMixedAnnotation { - - public ProductInheritedAnnotation() { - // public constructor - } - - private ProductInheritedAnnotation(int productId, String productName, - String category, double unitPrice, int unitsInStock, - String mappedField) { - super(productId, productName, category, unitPrice, unitsInStock, - mappedField); - } - - private static ProductInheritedAnnotation create(int productId, - String productName, String category, double unitPrice, - int unitsInStock, String mappedField) { - return new ProductInheritedAnnotation(productId, productName, category, - unitPrice, unitsInStock, mappedField); - } - - public static List getData() { - String mappedField = "mapped"; - ProductInheritedAnnotation[] list = { - create(1, "Chai", "Beverages", 18, 39, mappedField), - create(2, "Chang", "Beverages", 19.0, 17, mappedField), - create(3, "Aniseed Syrup", "Condiments", 10.0, 13, mappedField), - create(4, "Chef Anton's Cajun Seasoning", "Condiments", 22.0, 53, mappedField), - create(5, "Chef Anton's Gumbo Mix", "Condiments", 21.3500, 0, mappedField), - create(6, "Grandma's Boysenberry Spread", "Condiments", 25.0, 120, mappedField), - create(7, "Uncle Bob's Organic Dried Pears", "Produce", 30.0, 15, mappedField), - create(8, "Northwoods Cranberry Sauce", "Condiments", 40.0, 6, mappedField), - create(9, "Mishi Kobe Niku", "Meat/Poultry", 97.0, 29, mappedField), - create(10, "Ikura", "Seafood", 31.0, 31, mappedField), }; - return Arrays.asList(list); - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/ProductMixedAnnotation.java b/h2/src/test/org/h2/test/jaqu/ProductMixedAnnotation.java deleted file mode 100644 index fbbf47723b..0000000000 --- a/h2/src/test/org/h2/test/jaqu/ProductMixedAnnotation.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.test.jaqu; - -import java.util.Arrays; -import java.util.List; -import org.h2.jaqu.Table.JQColumn; -import org.h2.jaqu.Table.JQIndex; -import org.h2.jaqu.Table.JQTable; - -/** - * A table containing product data. - */ -@JQTable(annotationsOnly = false) -@JQIndex(standard = "name, cat") -public class ProductMixedAnnotation { - - public Double unitPrice; - public Integer unitsInStock; - public String mappedField; - - @JQColumn(name = "cat", maxLength = 255) - String category; - - @JQColumn(name = "id", primaryKey = true) - private Integer productId; - - @JQColumn(name = "name") - private String productName; - - public ProductMixedAnnotation() { - // public constructor - } - - protected ProductMixedAnnotation(int productId, String productName, - String category, double unitPrice, int unitsInStock, - String mappedField) { - this.productId = productId; - this.productName = productName; - this.category = category; - this.unitPrice = unitPrice; - this.unitsInStock = unitsInStock; - this.mappedField = mappedField; - } - - private static ProductMixedAnnotation create(int productId, - String productName, String category, double unitPrice, - int unitsInStock, String mappedField) { - return new ProductMixedAnnotation(productId, productName, category, - unitPrice, unitsInStock, mappedField); - } - - public static List getList() { - String mappedField = "mapped"; - ProductMixedAnnotation[] list = { - create(1, "Chai", "Beverages", 18, 39, mappedField), - create(2, "Chang", "Beverages", 19.0, 17, mappedField), - create(3, "Aniseed Syrup", "Condiments", 10.0, 13, mappedField), - create(4, "Chef Anton's Cajun Seasoning", "Condiments", 22.0, 53, mappedField), - create(5, "Chef Anton's Gumbo Mix", "Condiments", 21.3500, 0, mappedField), - create(6, "Grandma's Boysenberry Spread", "Condiments", 25.0, 120, mappedField), - create(7, "Uncle Bob's Organic Dried Pears", "Produce", 30.0, 15, mappedField), - create(8, "Northwoods Cranberry Sauce", "Condiments", 40.0, 6, mappedField), - create(9, "Mishi Kobe Niku", "Meat/Poultry", 97.0, 29, mappedField), - create(10, "Ikura", "Seafood", 31.0, 31, mappedField), }; - return Arrays.asList(list); - } - - @Override - public String toString() { - return productName + ": " + unitsInStock; - } - - public int id() { - return productId; - } - - public String name() { - return productName; - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/ProductNoCreateTable.java b/h2/src/test/org/h2/test/jaqu/ProductNoCreateTable.java deleted file mode 100644 index b48a68ec12..0000000000 --- a/h2/src/test/org/h2/test/jaqu/ProductNoCreateTable.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.test.jaqu; - -import java.util.Arrays; -import java.util.List; -import org.h2.jaqu.Table.JQColumn; -import org.h2.jaqu.Table.JQTable; - -/** - * A table containing product data. - */ -@JQTable(createIfRequired = false) -public class ProductNoCreateTable { - - @JQColumn(name = "id") - private Integer productId; - - @JQColumn(name = "name") - private String productName; - - public ProductNoCreateTable() { - // public constructor - } - - private ProductNoCreateTable(int productId, String productName) { - this.productId = productId; - this.productName = productName; - } - - private static ProductNoCreateTable create(int productId, String productName) { - return new ProductNoCreateTable(productId, productName); - } - - public static List getList() { - ProductNoCreateTable[] list = { create(1, "Chai"), create(2, "Chang") }; - return Arrays.asList(list); - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/SamplesTest.java b/h2/src/test/org/h2/test/jaqu/SamplesTest.java deleted file mode 100644 index 6d5ac61bbe..0000000000 --- a/h2/src/test/org/h2/test/jaqu/SamplesTest.java +++ /dev/null @@ -1,442 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jaqu; - -import static org.h2.jaqu.Function.count; -import static org.h2.jaqu.Function.isNull; -import static org.h2.jaqu.Function.length; -import static org.h2.jaqu.Function.max; -import static org.h2.jaqu.Function.min; -import static org.h2.jaqu.Function.not; -import static org.h2.jaqu.Function.sum; -import java.math.BigDecimal; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import org.h2.jaqu.Db; -import org.h2.jaqu.Filter; -import org.h2.test.TestBase; - -/** - * This is the implementation of the 101 LINQ Samples as described in - * http://msdn2.microsoft.com/en-us/vcsharp/aa336760.aspx - */ -public class SamplesTest extends TestBase { - - /** - * This object represents a database (actually a connection to the - * database). - */ - Db db; - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) { - new SamplesTest().test(); - } - - @Override - public void test() { - db = Db.open("jdbc:h2:mem:", "sa", "sa"); - db.insertAll(Product.getList()); - db.insertAll(Customer.getList()); - db.insertAll(Order.getList()); - db.insertAll(ComplexObject.getList()); - // TODO support getters/setters - // TODO support all relevant data types (byte[], ...) - // TODO nested AND/OR, >, <, ... - // TODO NOT - // TODO +, -, *, /, ||, nested operations - // TODO LIKE ESCAPE... - // TODO UPDATE: FROM ... UPDATE? - // TODO SELECT UNION - // TODO DatabaseAdapter - testComplexObject(); - testComplexObject2(); - testOrAndNot(); - testDelete(); - testIsNull(); - testLike(); - testMinMax(); - testSum(); - testLength(); - testCount(); - testGroup(); - testSelectManyCompoundFrom2(); - testWhereSimple4(); - testSelectSimple2(); - testAnonymousTypes3(); - testWhereSimple2(); - testWhereSimple3(); - testReverseColumns(); - testLimitOffset(); - testKeyRetrieval(); - db.close(); - } - - /** - * A simple test table. The columns are in a different order than in the - * database. - */ - public static class TestReverse { - public String name; - public Integer id; - } - - private void testReverseColumns() { - db.executeUpdate("create table TestReverse" + - "(id int, name varchar, additional varchar)"); - TestReverse t = new TestReverse(); - t.id = 10; - t.name = "Hello"; - db.insert(t); - TestReverse check = db.from(new TestReverse()).selectFirst(); - assertEquals(t.name, check.name); - assertEquals(t.id, check.id); - } - - - private void testWhereSimple2() { - -// var soldOutProducts = -// from p in products -// where p.UnitsInStock == 0 -// select p; - - Product p = new Product(); - List soldOutProducts = - db.from(p). - where(p.unitsInStock).is(0). - orderBy(p.productId).select(); - - assertEquals("[Chef Anton's Gumbo Mix: 0]", soldOutProducts.toString()); - } - - private void testWhereSimple3() { - -// var expensiveInStockProducts = -// from p in products -// where p.UnitsInStock > 0 -// && p.UnitPrice > 3.00M -// select p; - - Product p = new Product(); - List expensiveInStockProducts = - db.from(p). - where(p.unitsInStock).bigger(0). - and(p.unitPrice).bigger(30.0). - orderBy(p.productId).select(); - - assertEquals("[Northwoods Cranberry Sauce: 6, Mishi Kobe Niku: 29, Ikura: 31]", - expensiveInStockProducts.toString()); - } - - private void testWhereSimple4() { - -// var waCustomers = -// from c in customers -// where c.Region == "WA" -// select c; - - Customer c = new Customer(); - List waCustomers = - db.from(c). - where(c.region).is("WA"). - select(); - - assertEquals("[ALFKI, ANATR]", waCustomers.toString()); - } - - private void testSelectSimple2() { - -// var productNames = -// from p in products -// select p.ProductName; - - Product p = new Product(); - List productNames = - db.from(p). - orderBy(p.productId).select(p.productName); - - List products = Product.getList(); - for (int i = 0; i < products.size(); i++) { - assertEquals(products.get(i).productName, productNames.get(i)); - } - } - - /** - * A result set class containing the product name and price. - */ - public static class ProductPrice { - public String productName; - public String category; - public Double price; - } - - private void testAnonymousTypes3() { - -// var productInfos = -// from p in products -// select new { -// p.ProductName, -// p.Category, -// Price = p.UnitPrice -// }; - - final Product p = new Product(); - List productInfos = - db.from(p).orderBy(p.productId). - select(new ProductPrice() { { - productName = p.productName; - category = p.category; - price = p.unitPrice; - }}); - - List products = Product.getList(); - assertEquals(products.size(), productInfos.size()); - for (int i = 0; i < products.size(); i++) { - ProductPrice pr = productInfos.get(i); - Product p2 = products.get(i); - assertEquals(p2.productName, pr.productName); - assertEquals(p2.category, pr.category); - assertEquals(p2.unitPrice, pr.price); - } - } - - /** - * A result set class containing customer data and the order total. - */ - public static class CustOrder { - public String customerId; - public Integer orderId; - public BigDecimal total; - @Override - public String toString() { - return customerId + ":" + orderId + ":" + total; - } - } - - private void testSelectManyCompoundFrom2() { - -// var orders = -// from c in customers, -// o in c.Orders -// where o.Total < 500.00M -// select new { -// c.CustomerID, -// o.OrderID, -// o.Total -// }; - - final Customer c = new Customer(); - final Order o = new Order(); - List orders = - db.from(c). - innerJoin(o).on(c.customerId).is(o.customerId). - where(o.total).smaller(new BigDecimal("100.00")). - orderBy(1). - select(new CustOrder() { { - customerId = c.customerId; - orderId = o.orderId; - total = o.total; - }}); - - assertEquals("[ANATR:10308:88.80]", orders.toString()); - } - - private void testIsNull() { - Product p = new Product(); - String sql = db.from(p).whereTrue(isNull(p.productName)).getSQL(); - assertEquals("SELECT * FROM Product WHERE (productName IS NULL)", sql); - } - - private void testDelete() { - Product p = new Product(); - int deleted = db.from(p).where(p.productName).like("A%").delete(); - assertEquals(1, deleted); - deleted = db.from(p).delete(); - assertEquals(9, deleted); - db.insertAll(Product.getList()); - db.deleteAll(Product.getList()); - assertEquals(0, db.from(p).selectCount()); - db.insertAll(Product.getList()); - } - - private void testOrAndNot() { - Product p = new Product(); - String sql = db.from(p).whereTrue(not(isNull(p.productName))).getSQL(); - assertEquals("SELECT * FROM Product WHERE (NOT productName IS NULL)", sql); - sql = db.from(p).whereTrue(not(isNull(p.productName))).getSQL(); - assertEquals("SELECT * FROM Product WHERE (NOT productName IS NULL)", sql); - sql = db.from(p).whereTrue(db.test(p.productId).is(1)).getSQL(); - assertEquals("SELECT * FROM Product WHERE ((productId = ?))", sql); - } - - private void testLength() { - Product p = new Product(); - List lengths = - db.from(p). - where(length(p.productName)).smaller(10). - orderBy(1). - selectDistinct(length(p.productName)); - assertEquals("[4, 5]", lengths.toString()); - } - - private void testSum() { - Product p = new Product(); - Long sum = db.from(p).selectFirst(sum(p.unitsInStock)); - assertEquals(323, sum.intValue()); - Double sumPrice = db.from(p).selectFirst(sum(p.unitPrice)); - assertEquals(313.35, sumPrice.doubleValue()); - } - - private void testMinMax() { - Product p = new Product(); - Integer min = db.from(p).selectFirst(min(p.unitsInStock)); - assertEquals(0, min.intValue()); - String minName = db.from(p).selectFirst(min(p.productName)); - assertEquals("Aniseed Syrup", minName); - Double max = db.from(p).selectFirst(max(p.unitPrice)); - assertEquals(97.0, max.doubleValue()); - } - - private void testLike() { - Product p = new Product(); - List aList = db.from(p). - where(p.productName).like("Cha%"). - orderBy(p.productName).select(); - assertEquals("[Chai: 39, Chang: 17]", aList.toString()); - } - - private void testCount() { - long count = db.from(new Product()).selectCount(); - assertEquals(10, count); - } - - private void testComplexObject() { - ComplexObject co = new ComplexObject(); - String sql = db.from(co). - where(co.id).is(1). - and(co.amount).is(1L). - and(co.birthday).smaller(new java.util.Date()). - and(co.created).smaller(java.sql.Timestamp.valueOf("2005-05-05 05:05:05")). - and(co.name).is("hello"). - and(co.time).smaller(java.sql.Time.valueOf("23:23:23")). - and(co.value).is(new BigDecimal("1")).getSQL(); - assertEquals("SELECT * FROM ComplexObject " + - "WHERE id = ? " + - "AND amount = ? " + - "AND birthday < ? " + - "AND created < ? " + - "AND name = ? " + - "AND time < ? " + - "AND value = ?", sql); - - long count = db.from(co). - where(co.id).is(1). - and(co.amount).is(1L). - and(co.birthday).smaller(new java.util.Date()). - and(co.created).smaller(java.sql.Timestamp.valueOf("2005-05-05 05:05:05")). - and(co.name).is("hello"). - and(co.time).smaller(java.sql.Time.valueOf("23:23:23")). - and(co.value).is(new BigDecimal("1")). - selectCount(); - assertEquals(1, count); - } - - private void testComplexObject2() { - testComplexObject2(1, "hello"); - } - - private void testComplexObject2(final int x, final String name) { - final ComplexObject co = new ComplexObject(); - - String sql = db.from(co). - where(new Filter() { @Override - public boolean where() { - return co.id == x - && co.name.equals(name) - && co.name.equals("hello"); - } }).getSQL(); - assertEquals("SELECT * FROM ComplexObject " + - "WHERE id=? " + - "AND ?=name " + - "AND 'hello'=name", sql); - - long count = db.from(co). - where(new Filter() { @Override - public boolean where() { - return co.id == x - && co.name.equals(name) - && co.name.equals("hello"); - } }).selectCount(); - - assertEquals(1, count); - } - - private void testLimitOffset() { - Set ids = new HashSet(); - Product p = new Product(); - for (int i = 0; i < 5; i++) { - List products = db.from(p).limit(2).offset(2 * i).select(); - assertTrue(products.size() == 2); - for (Product prod : products) { - assertTrue("Failed to add product id. Duplicate?", ids.add(prod.productId)); - } - } - } - - private void testKeyRetrieval() { - List list = SupportedTypes.createList(); - List keys = db.insertAllAndGetKeys(list); - Set uniqueKeys = new HashSet(); - for (Long l : keys) { - assertTrue("Failed to add key. Duplicate?", uniqueKeys.add(l)); - } - } - - /** - * A result set class containing product groups. - */ - public static class ProductGroup { - public String category; - public Long productCount; - @Override - public String toString() { - return category + ":" + productCount; - } - } - - private void testGroup() { - -// var orderGroups = -// from p in products -// group p by p.Category into g -// select new { -// Category = g.Key, -// Products = g -// }; - - final Product p = new Product(); - List list = - db.from(p). - groupBy(p.category). - orderBy(1). - select(new ProductGroup() { { - category = p.category; - productCount = count(); - }}); - - assertEquals("[Beverages:2, Condiments:5, " + - "Meat/Poultry:1, Produce:1, Seafood:1]", - list.toString()); - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/SupportedTypes.java b/h2/src/test/org/h2/test/jaqu/SupportedTypes.java deleted file mode 100644 index 012055fe5f..0000000000 --- a/h2/src/test/org/h2/test/jaqu/SupportedTypes.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.test.jaqu; - -import java.math.BigDecimal; -import java.sql.Timestamp; -import java.util.List; -import java.util.Random; -import org.h2.jaqu.Table.JQColumn; -import org.h2.jaqu.Table.JQTable; -import org.h2.util.New; - -/** - * A data class that contains a column for each data type. - */ -@JQTable(strictTypeMapping = true, version = 1) -public class SupportedTypes { - - static final SupportedTypes SAMPLE = new SupportedTypes(); - - @JQColumn(primaryKey = true, autoIncrement = true) - public Integer id; - - @JQColumn - private Boolean myBool = false; - - @JQColumn - private Byte myByte = 2; - - @JQColumn - private Short myShort; - - @JQColumn - private Integer myInteger; - - @JQColumn - private Long myLong; - - @JQColumn - private Float myFloat = 1.0f; - - @JQColumn - private Double myDouble; - - @JQColumn - private BigDecimal myBigDecimal; - - @JQColumn - private String myString; - - @JQColumn - private java.util.Date myUtilDate; - - @JQColumn - private java.sql.Date mySqlDate; - - @JQColumn - private java.sql.Time mySqlTime; - - @JQColumn - private java.sql.Timestamp mySqlTimestamp; - - static List createList() { - List list = New.arrayList(); - for (int i = 0; i < 10; i++) { - list.add(randomValue()); - } - return list; - } - - static SupportedTypes randomValue() { - Random rand = new Random(); - SupportedTypes s = new SupportedTypes(); - s.myBool = Boolean.valueOf(rand.nextBoolean()); - s.myByte = new Byte((byte) rand.nextInt(Byte.MAX_VALUE)); - s.myShort = new Short((short) rand.nextInt(Short.MAX_VALUE)); - s.myInteger = new Integer(rand.nextInt()); - s.myLong = new Long(rand.nextLong()); - s.myFloat = new Float(rand.nextFloat()); - s.myDouble = new Double(rand.nextDouble()); - s.myBigDecimal = new BigDecimal(rand.nextDouble()); - s.myString = Long.toHexString(rand.nextLong()); - s.myUtilDate = new java.util.Date(rand.nextLong()); - s.mySqlDate = new java.sql.Date(rand.nextLong()); - s.mySqlTime = new java.sql.Time(rand.nextLong()); - s.mySqlTimestamp = new java.sql.Timestamp(rand.nextLong()); - return s; - } - - public boolean equivalentTo(SupportedTypes s) { - boolean same = true; - same &= myBool.equals(s.myBool); - same &= myByte.equals(s.myByte); - same &= myShort.equals(s.myShort); - same &= myInteger.equals(s.myInteger); - same &= myLong.equals(s.myLong); - same &= myFloat.equals(s.myFloat); - same &= myDouble.equals(s.myDouble); - same &= myBigDecimal.equals(s.myBigDecimal); - Timestamp a = new Timestamp(myUtilDate.getTime()); - same &= a.toString().equals(s.myUtilDate.toString()); - same &= mySqlTimestamp.toString().equals(s.mySqlTimestamp.toString()); - same &= mySqlDate.toString().equals(s.mySqlDate.toString()); - same &= mySqlTime.toString().equals(s.mySqlTime.toString()); - same &= myString.equals(s.myString); - same &= true; - return same; - } - - /** - * This class demonstrates the table upgrade. - */ - @JQTable(name = "SupportedTypes", version = 2, - inheritColumns = true, strictTypeMapping = true) - public static class SupportedTypes2 extends SupportedTypes { - - public SupportedTypes2() { - // nothing to do - } - } -} diff --git a/h2/src/test/org/h2/test/jaqu/UpdateTest.java b/h2/src/test/org/h2/test/jaqu/UpdateTest.java deleted file mode 100644 index f1773879dc..0000000000 --- a/h2/src/test/org/h2/test/jaqu/UpdateTest.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jaqu; - -import org.h2.jaqu.Db; -import org.h2.test.TestBase; - -import static java.sql.Date.valueOf; - -/** - * Tests the Db.update() function. - * - * @author dmoebius at scoop slash gmbh dot de - */ -public class UpdateTest extends TestBase { - - private Db db; - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws Exception { - new UpdateTest().test(); - } - - @Override - public void test() throws Exception { - db = Db.open("jdbc:h2:mem:", "sa", "sa"); - db.insertAll(Product.getList()); - db.insertAll(Customer.getList()); - db.insertAll(Order.getList()); - - testSimpleUpdate(); - testSimpleUpdateWithCombinedPrimaryKey(); - testSimpleMerge(); - testSimpleMergeWithCombinedPrimaryKey(); - testSetColumns(); - - db.close(); - } - - private void testSimpleUpdate() { - Product p = new Product(); - Product pChang = db.from(p).where(p.productName).is("Chang") - .selectFirst(); - // update unitPrice from 19.0 to 19.5 - pChang.unitPrice = 19.5; - // update unitsInStock from 17 to 16 - pChang.unitsInStock = 16; - db.update(pChang); - - Product p2 = new Product(); - Product pChang2 = db.from(p2).where(p2.productName).is("Chang") - .selectFirst(); - assertEquals(19.5, pChang2.unitPrice); - assertEquals(16, pChang2.unitsInStock.intValue()); - - // undo update - pChang.unitPrice = 19.0; - pChang.unitsInStock = 17; - db.update(pChang); - } - - private void testSimpleUpdateWithCombinedPrimaryKey() { - Order o = new Order(); - Order ourOrder = db.from(o).where(o.orderDate) - .is(valueOf("2007-01-02")).selectFirst(); - ourOrder.orderDate = valueOf("2007-01-03"); - db.update(ourOrder); - - Order ourUpdatedOrder = db.from(o).where(o.orderDate) - .is(valueOf("2007-01-03")).selectFirst(); - assertTrue("updated order not found", ourUpdatedOrder != null); - - // undo update - ourOrder.orderDate = valueOf("2007-01-02"); - db.update(ourOrder); - } - - private void testSimpleMerge() { - Product p = new Product(); - Product pChang = db.from(p).where(p.productName).is("Chang") - .selectFirst(); - // update unitPrice from 19.0 to 19.5 - pChang.unitPrice = 19.5; - // update unitsInStock from 17 to 16 - pChang.unitsInStock = 16; - db.merge(pChang); - - Product p2 = new Product(); - Product pChang2 = db.from(p2).where(p2.productName).is("Chang") - .selectFirst(); - assertEquals(19.5, pChang2.unitPrice); - assertEquals(16, pChang2.unitsInStock.intValue()); - - // undo update - pChang.unitPrice = 19.0; - pChang.unitsInStock = 17; - db.merge(pChang); - } - - private void testSimpleMergeWithCombinedPrimaryKey() { - Order o = new Order(); - Order ourOrder = db.from(o).where(o.orderDate) - .is(valueOf("2007-01-02")).selectFirst(); - ourOrder.orderDate = valueOf("2007-01-03"); - db.merge(ourOrder); - - Order ourUpdatedOrder = db.from(o).where(o.orderDate) - .is(valueOf("2007-01-03")).selectFirst(); - assertTrue("updated order not found", ourUpdatedOrder != null); - - // undo update - ourOrder.orderDate = valueOf("2007-01-02"); - db.merge(ourOrder); - } - - private void testSetColumns() { - Product p = new Product(); - Product original = db.from(p).where(p.productId).is(1).selectFirst(); - - // update string and double columns - db.from(p) - .set(p.productName).to("updated") - .increment(p.unitPrice).by(3.14) - .increment(p.unitsInStock).by(2) - .where(p.productId) - .is(1). - update(); - - // confirm the data was properly updated - Product revised = db.from(p).where(p.productId).is(1).selectFirst(); - assertEquals("updated", revised.productName); - assertEquals(original.unitPrice + 3.14, revised.unitPrice); - assertEquals(original.unitsInStock + 2, revised.unitsInStock.intValue()); - - // restore the data - db.from(p) - .set(p.productName).to(original.productName) - .set(p.unitPrice).to(original.unitPrice) - .increment(p.unitsInStock).by(-2) - .where(p.productId).is(1).update(); - - // confirm the data was properly restored - Product restored = db.from(p). - where(p.productId).is(1).selectFirst(); - assertEquals(original.productName, restored.productName); - assertEquals(original.unitPrice, restored.unitPrice); - assertEquals(original.unitsInStock, restored.unitsInStock); - - double unitPriceOld = db.from(p). - where(p.productId).is(1).selectFirst().unitPrice; - // double the unit price - db.from(p).increment(p.unitPrice).by(p.unitPrice).where(p.productId) - .is(1).update(); - double unitPriceNew = db.from(p). - where(p.productId).is(1).selectFirst().unitPrice; - assertEquals(unitPriceOld * 2, unitPriceNew); - - } - -} diff --git a/h2/src/test/org/h2/test/jaqu/package.html b/h2/src/test/org/h2/test/jaqu/package.html deleted file mode 100644 index d0c94b2fc1..0000000000 --- a/h2/src/test/org/h2/test/jaqu/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -JaQu (Java Query) test cases. - -

    \ No newline at end of file diff --git a/h2/src/test/org/h2/test/jdbc/TestBatchUpdates.java b/h2/src/test/org/h2/test/jdbc/TestBatchUpdates.java index a65995a890..1a153b9e16 100644 --- a/h2/src/test/org/h2/test/jdbc/TestBatchUpdates.java +++ b/h2/src/test/org/h2/test/jdbc/TestBatchUpdates.java @@ -1,12 +1,10 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; -import java.io.ByteArrayOutputStream; -import java.io.PrintStream; import java.sql.BatchUpdateException; import java.sql.CallableStatement; import java.sql.Connection; @@ -16,12 +14,14 @@ import java.sql.SQLException; import java.sql.Statement; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test for batch updates. */ -public class TestBatchUpdates extends TestBase { +public class TestBatchUpdates extends TestDb { private static final String COFFEE_UPDATE = "UPDATE TEST SET PRICE=PRICE*20 WHERE TYPE_ID=?"; @@ -57,7 +57,7 @@ public class TestBatchUpdates extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -78,15 +78,15 @@ private void testRootCause() throws SQLException { try { stat.executeBatch(); } catch (SQLException e) { - assertContains(e.toString(), "TEST_Y"); - e = e.getNextException(); - assertTrue(e != null); - assertContains(e.toString(), "TEST_Y"); + assertContains(e.toString(), "TEST_X"); e = e.getNextException(); - assertTrue(e != null); + assertNotNull(e); assertContains(e.toString(), "TEST_X"); e = e.getNextException(); - assertTrue(e == null); + assertNotNull(e); + assertContains(e.toString(), "TEST_Y"); + e = e.getNextException(); + assertNull(e); } stat.execute("create table test(id int)"); PreparedStatement prep = conn.prepareStatement("insert into test values(?)"); @@ -97,15 +97,15 @@ private void testRootCause() throws SQLException { try { prep.executeBatch(); } catch (SQLException e) { - assertContains(e.toString(), "TEST_Y"); - e = e.getNextException(); - assertTrue(e != null); - assertContains(e.toString(), "TEST_Y"); + assertContains(e.toString(), "TEST_X"); e = e.getNextException(); - assertTrue(e != null); + assertNotNull(e); assertContains(e.toString(), "TEST_X"); e = e.getNextException(); - assertTrue(e == null); + assertNotNull(e); + assertContains(e.toString(), "TEST_Y"); + e = e.getNextException(); + assertNull(e); } stat.execute("drop table test"); conn.close(); @@ -115,8 +115,7 @@ private void testExecuteCall() throws SQLException { deleteDb("batchUpdates"); conn = getConnection("batchUpdates"); stat = conn.createStatement(); - stat.execute("CREATE ALIAS updatePrices FOR \"" + - getClass().getName() + ".updatePrices\""); + stat.execute("CREATE ALIAS updatePrices FOR '" + getClass().getName() + ".updatePrices'"); CallableStatement call = conn.prepareCall("{call updatePrices(?, ?)}"); call.setString(1, "Hello"); call.setFloat(2, 1.4f); @@ -140,7 +139,7 @@ private void testExecuteCall() throws SQLException { * @param f the float * @return the float converted to an int */ - public static int updatePrices(String message, double f) { + public static int updatePrices(@SuppressWarnings("unused") String message, double f) { return (int) f; } @@ -154,19 +153,7 @@ private void testException() throws SQLException { prep.setString(1, "x"); prep.addBatch(); } - try { - prep.executeBatch(); - } catch (BatchUpdateException e) { - PrintStream temp = System.err; - try { - ByteArrayOutputStream buff = new ByteArrayOutputStream(); - PrintStream p = new PrintStream(buff); - System.setErr(p); - e.printStackTrace(); - } finally { - System.setErr(temp); - } - } + assertThrows(BatchUpdateException.class, prep).executeBatch(); conn.close(); } @@ -220,6 +207,7 @@ private void testAddBatch01() throws SQLException { String s = COFFEE_UPDATE; trace("Prepared Statement String:" + s); prep = conn.prepareStatement(s); + assertThrows(ErrorCode.PARAMETER_NOT_SET_1, prep).addBatch(); prep.setInt(1, 2); prep.addBatch(); prep.setInt(1, 3); @@ -549,7 +537,7 @@ private void testContinueBatch01() throws SQLException { trace("Count val is: " + count); // make sure that we have the correct error code for // the failed update. - if (!(batchUpdates[1] == -3 && count == 1)) { + if (batchUpdates[1] != -3 || count != 1) { fail("insert failed"); } } diff --git a/h2/src/test/org/h2/test/jdbc/TestCallableStatement.java b/h2/src/test/org/h2/test/jdbc/TestCallableStatement.java index 275fc8aa8c..c1e758553b 100644 --- a/h2/src/test/org/h2/test/jdbc/TestCallableStatement.java +++ b/h2/src/test/org/h2/test/jdbc/TestCallableStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -10,6 +10,8 @@ import java.io.StringReader; import java.math.BigDecimal; import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.sql.Array; import java.sql.CallableStatement; import java.sql.Connection; import java.sql.Ref; @@ -20,10 +22,14 @@ import java.sql.Statement; import java.sql.Timestamp; import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.Collections; import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.SimpleResultSet; import org.h2.util.IOUtils; import org.h2.util.JdbcUtils; @@ -32,7 +38,7 @@ /** * Tests for the CallableStatement class. */ -public class TestCallableStatement extends TestBase { +public class TestCallableStatement extends TestDb { /** * Run just this test. @@ -40,7 +46,7 @@ public class TestCallableStatement extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -55,23 +61,23 @@ public void test() throws Exception { testCallWithResult(conn); testPrepare(conn); testClassLoader(conn); + testArrayArgument(conn); + testArrayReturnValue(conn); conn.close(); deleteDb("callableStatement"); } private void testOutParameter(Connection conn) throws SQLException { - conn.createStatement().execute( - "create table test(id identity) as select null"); + conn.createStatement().execute("CREATE SEQUENCE SEQ"); for (int i = 1; i < 20; i++) { - CallableStatement cs = conn.prepareCall("{ ? = call IDENTITY()}"); + CallableStatement cs = conn.prepareCall("{ ? = CALL NEXT VALUE FOR SEQ}"); cs.registerOutParameter(1, Types.BIGINT); cs.execute(); long id = cs.getLong(1); - assertEquals(1, id); + assertEquals(i, id); cs.close(); } - conn.createStatement().execute( - "drop table test"); + conn.createStatement().execute("DROP SEQUENCE SEQ"); } private void testUnsupportedOperations(Connection conn) throws SQLException { @@ -80,24 +86,20 @@ private void testUnsupportedOperations(Connection conn) throws SQLException { assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). getURL(1); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). - getObject(1, Collections.>emptyMap()); + getObject(1, Collections.emptyMap()); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). getRef(1); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). getRowId(1); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). - getSQLXML(1); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). getURL("a"); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). - getObject("a", Collections.>emptyMap()); + getObject("a", Collections.emptyMap()); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). getRef("a"); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). getRowId("a"); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). - getSQLXML("a"); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). setURL(1, (URL) null); @@ -105,16 +107,11 @@ private void testUnsupportedOperations(Connection conn) throws SQLException { setRef(1, (Ref) null); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). setRowId(1, (RowId) null); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). - setSQLXML(1, (SQLXML) null); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). setURL("a", (URL) null); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). setRowId("a", (RowId) null); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, call). - setSQLXML("a", (SQLXML) null); - } private void testCallWithResultSet(Connection conn) throws SQLException { @@ -147,6 +144,7 @@ private void testGetters(Connection conn) throws SQLException { assertEquals(1, call.getLong(1)); assertEquals(1, call.getByte(1)); assertEquals(1, ((Long) call.getObject(1)).longValue()); + assertEquals(1, call.getObject(1, Long.class).longValue()); assertFalse(call.wasNull()); call.setFloat(2, 1.1f); @@ -169,17 +167,20 @@ private void testGetters(Connection conn) throws SQLException { call.registerOutParameter(1, Types.DATE); call.execute(); assertEquals("2000-01-01", call.getDate(1).toString()); + assertEquals("2000-01-01", call.getObject(1, LocalDate.class).toString()); call.setTime(2, java.sql.Time.valueOf("01:02:03")); call.registerOutParameter(1, Types.TIME); call.execute(); assertEquals("01:02:03", call.getTime(1).toString()); + assertEquals("01:02:03", call.getObject(1, LocalTime.class).toString()); call.setTimestamp(2, java.sql.Timestamp.valueOf( "2001-02-03 04:05:06.789")); call.registerOutParameter(1, Types.TIMESTAMP); call.execute(); assertEquals("2001-02-03 04:05:06.789", call.getTimestamp(1).toString()); + assertEquals("2001-02-03T04:05:06.789", call.getObject(1, LocalDateTime.class).toString()); call.setBoolean(2, true); call.registerOutParameter(1, Types.BIT); @@ -237,9 +238,8 @@ private void testPrepare(Connection conn) throws Exception { assertEquals(1, rs.getInt(1)); assertEquals("Hello", rs.getString(2)); assertFalse(rs.next()); - stat.execute("CREATE ALIAS testCall FOR \"" + - getClass().getName() + ".testCall\""); - call = conn.prepareCall("{CALL testCall(?, ?, ?, ?)}"); + stat.execute("CREATE ALIAS testCall FOR '" + getClass().getName() + ".testCall'"); + call = conn.prepareCall("{SELECT * FROM testCall(?, ?, ?, ?)}"); call.setInt("A", 50); call.setString("B", "abc"); long t = System.currentTimeMillis(); @@ -248,12 +248,7 @@ private void testPrepare(Connection conn) throws Exception { call.registerOutParameter(1, Types.INTEGER); call.registerOutParameter("B", Types.VARCHAR); call.executeUpdate(); - try { - call.getTimestamp("C"); - fail("not registered out parameter accessible"); - } catch (SQLException e) { - // expected exception - } + assertThrows(ErrorCode.INVALID_VALUE_2, call).getTimestamp("C"); call.registerOutParameter(3, Types.TIMESTAMP); call.registerOutParameter(4, Types.TIMESTAMP); call.executeUpdate(); @@ -263,10 +258,16 @@ private void testPrepare(Connection conn) throws Exception { assertEquals("2001-02-03 10:20:30.0", call.getTimestamp(4).toString()); assertEquals("2001-02-03 10:20:30.0", call.getTimestamp("D").toString()); + assertEquals("2001-02-03T10:20:30", call.getObject(4, LocalDateTime.class).toString()); + assertEquals("2001-02-03T10:20:30", call.getObject("D", LocalDateTime.class).toString()); assertEquals("10:20:30", call.getTime(4).toString()); assertEquals("10:20:30", call.getTime("D").toString()); + assertEquals("10:20:30", call.getObject(4, LocalTime.class).toString()); + assertEquals("10:20:30", call.getObject("D", LocalTime.class).toString()); assertEquals("2001-02-03", call.getDate(4).toString()); assertEquals("2001-02-03", call.getDate("D").toString()); + assertEquals("2001-02-03", call.getObject(4, LocalDate.class).toString()); + assertEquals("2001-02-03", call.getObject("D", LocalDate.class).toString()); assertEquals(100, call.getInt(1)); assertEquals(100, call.getInt("A")); @@ -297,25 +298,12 @@ private void testPrepare(Connection conn) throws Exception { assertEquals("ABC", call.getClob("B").getSubString(1, 3)); assertEquals("ABC", call.getNClob(2).getSubString(1, 3)); assertEquals("ABC", call.getNClob("B").getSubString(1, 3)); + assertEquals("ABC", call.getSQLXML(2).getString()); + assertEquals("ABC", call.getSQLXML("B").getString()); - try { - call.getString(100); - fail("incorrect parameter index value"); - } catch (SQLException e) { - // expected exception - } - try { - call.getString(0); - fail("incorrect parameter index value"); - } catch (SQLException e) { - // expected exception - } - try { - call.getBoolean("X"); - fail("incorrect parameter name value"); - } catch (SQLException e) { - // expected exception - } + assertThrows(ErrorCode.INVALID_VALUE_2, call).getString(100); + assertThrows(ErrorCode.INVALID_VALUE_2, call).getString(0); + assertThrows(ErrorCode.INVALID_VALUE_2, call).getBoolean("X"); call.setCharacterStream("B", new StringReader("xyz")); @@ -330,15 +318,15 @@ private void testPrepare(Connection conn) throws Exception { call.executeUpdate(); assertEquals("XYZ", call.getString("B")); call.setAsciiStream("B", - new ByteArrayInputStream("xyz".getBytes("UTF-8"))); + new ByteArrayInputStream("xyz".getBytes(StandardCharsets.UTF_8))); call.executeUpdate(); assertEquals("XYZ", call.getString("B")); call.setAsciiStream("B", - new ByteArrayInputStream("xyz-".getBytes("UTF-8")), 3); + new ByteArrayInputStream("xyz-".getBytes(StandardCharsets.UTF_8)), 3); call.executeUpdate(); assertEquals("XYZ", call.getString("B")); call.setAsciiStream("B", - new ByteArrayInputStream("xyz-".getBytes("UTF-8")), 3L); + new ByteArrayInputStream("xyz-".getBytes(StandardCharsets.UTF_8)), 3L); call.executeUpdate(); assertEquals("XYZ", call.getString("B")); @@ -362,6 +350,11 @@ private void testPrepare(Connection conn) throws Exception { call.setNString("B", "xyz"); call.executeUpdate(); assertEquals("XYZ", call.getString("B")); + SQLXML xml = conn.createSQLXML(); + xml.setString("xyz"); + call.setSQLXML("B", xml); + call.executeUpdate(); + assertEquals("XYZ", call.getString("B")); // test for exceptions after closing call.close(); @@ -378,7 +371,7 @@ private void testClassLoader(Connection conn) throws SQLException { JdbcUtils.addClassFactory(myFactory); try { Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS T_CLASSLOADER FOR \"TestClassFactory.testClassF\""); + stat.execute("CREATE ALIAS T_CLASSLOADER FOR 'TestClassFactory.testClassF'"); ResultSet rs = stat.executeQuery("SELECT T_CLASSLOADER(true)"); assertTrue(rs.next()); assertEquals(false, rs.getBoolean(1)); @@ -387,6 +380,92 @@ private void testClassLoader(Connection conn) throws SQLException { } } + private void testArrayArgument(Connection connection) throws SQLException { + Array array = connection.createArrayOf("Int", new Object[] {0, 1, 2}); + try (Statement statement = connection.createStatement()) { + statement.execute("CREATE ALIAS getArrayLength FOR '" + getClass().getName() + ".getArrayLength'"); + + // test setArray + try (CallableStatement callableStatement = connection + .prepareCall("{call getArrayLength(?)}")) { + callableStatement.setArray(1, array); + assertTrue(callableStatement.execute()); + + try (ResultSet resultSet = callableStatement.getResultSet()) { + assertTrue(resultSet.next()); + assertEquals(3, resultSet.getInt(1)); + assertFalse(resultSet.next()); + } + } + + // test setObject + try (CallableStatement callableStatement = connection + .prepareCall("{call getArrayLength(?)}")) { + callableStatement.setObject(1, array); + assertTrue(callableStatement.execute()); + + try (ResultSet resultSet = callableStatement.getResultSet()) { + assertTrue(resultSet.next()); + assertEquals(3, resultSet.getInt(1)); + assertFalse(resultSet.next()); + } + } + } finally { + array.free(); + } + } + + private void testArrayReturnValue(Connection connection) throws SQLException { + Integer[][] arraysToTest = new Integer[][] { + {0, 1, 2}, + {0, 1, 2}, + {0, null, 2}, + }; + try (Statement statement = connection.createStatement()) { + statement.execute("CREATE ALIAS arrayIdentiy FOR '" + getClass().getName() + ".arrayIdentiy'"); + + for (Integer[] arrayToTest : arraysToTest) { + Array sqlInputArray = connection.createArrayOf("INTEGER", arrayToTest); + try { + try (CallableStatement callableStatement = connection + .prepareCall("{call arrayIdentiy(?)}")) { + callableStatement.setArray(1, sqlInputArray); + assertTrue(callableStatement.execute()); + + try (ResultSet resultSet = callableStatement.getResultSet()) { + assertTrue(resultSet.next()); + + // test getArray() + Array sqlReturnArray = resultSet.getArray(1); + try { + assertEquals( + (Object[]) sqlInputArray.getArray(), + (Object[]) sqlReturnArray.getArray()); + } finally { + sqlReturnArray.free(); + } + + // test getObject(Array.class) + sqlReturnArray = resultSet.getObject(1, Array.class); + try { + assertEquals( + (Object[]) sqlInputArray.getArray(), + (Object[]) sqlReturnArray.getArray()); + } finally { + sqlReturnArray.free(); + } + + assertFalse(resultSet.next()); + } + } + } finally { + sqlInputArray.free(); + } + + } + } + } + /** * Class factory unit test * @param b boolean value @@ -396,6 +475,26 @@ public static Boolean testClassF(Boolean b) { return !b; } + /** + * This method is called via reflection from the database. + * + * @param array the array + * @return the length of the array + */ + public static int getArrayLength(Integer[] array) { + return array == null ? 0 : array.length; + } + + /** + * This method is called via reflection from the database. + * + * @param array the array + * @return the array + */ + public static Integer[] arrayIdentiy(Integer[] array) { + return array; + } + /** * This method is called via reflection from the database. * diff --git a/h2/src/test/org/h2/test/jdbc/TestCancel.java b/h2/src/test/org/h2/test/jdbc/TestCancel.java index d26d380040..271fd71166 100644 --- a/h2/src/test/org/h2/test/jdbc/TestCancel.java +++ b/h2/src/test/org/h2/test/jdbc/TestCancel.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -14,11 +14,12 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests Statement.cancel */ -public class TestCancel extends TestBase { +public class TestCancel extends TestDb { private static int lastVisited; @@ -28,7 +29,7 @@ public class TestCancel extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } /** @@ -116,8 +117,8 @@ private void testJdbcQueryTimeout() throws SQLException { assertEquals(1, stat.getQueryTimeout()); Statement s2 = conn.createStatement(); assertEquals(1, s2.getQueryTimeout()); - ResultSet rs = s2.executeQuery("SELECT VALUE " + - "FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'QUERY_TIMEOUT'"); + ResultSet rs = s2.executeQuery( + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'QUERY_TIMEOUT'"); rs.next(); assertEquals(1000, rs.getInt(1)); assertThrows(ErrorCode.STATEMENT_WAS_CANCELED, stat). @@ -163,11 +164,14 @@ public static int visit(int x) { } private void testCancelStatement() throws Exception { + if (config.lazy && config.networked) { + return; + } deleteDb("cancel"); Connection conn = getConnection("cancel"); Statement stat = conn.createStatement(); stat.execute("DROP TABLE IF EXISTS TEST"); - stat.execute("CREATE ALIAS VISIT FOR \"" + getClass().getName() + ".visit\""); + stat.execute("CREATE ALIAS VISIT FOR '" + getClass().getName() + ".visit'"); stat.execute("CREATE MEMORY TABLE TEST" + "(ID INT PRIMARY KEY, NAME VARCHAR(255))"); PreparedStatement prep = conn.prepareStatement( @@ -189,9 +193,9 @@ private void testCancelStatement() throws Exception { cancel.start(); try { Thread.yield(); - assertThrows(ErrorCode.STATEMENT_WAS_CANCELED, query). - executeQuery("SELECT VISIT(ID), (SELECT SUM(X) " + - "FROM SYSTEM_RANGE(1, 10000) WHERE X<>ID) FROM TEST ORDER BY ID"); + assertThrows(ErrorCode.STATEMENT_WAS_CANCELED, query, + "SELECT VISIT(ID), (SELECT SUM(X) " + + "FROM SYSTEM_RANGE(1, 100000) WHERE X<>ID) FROM TEST ORDER BY ID"); } finally { cancel.stopNow(); cancel.join(); diff --git a/h2/src/test/org/h2/test/jdbc/TestConcurrentConnectionUsage.java b/h2/src/test/org/h2/test/jdbc/TestConcurrentConnectionUsage.java new file mode 100644 index 0000000000..e2f15bb8f1 --- /dev/null +++ b/h2/src/test/org/h2/test/jdbc/TestConcurrentConnectionUsage.java @@ -0,0 +1,59 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.jdbc; + +import java.io.ByteArrayInputStream; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; + +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.Task; + +/** + * Test concurrent usage of the same connection. + */ +public class TestConcurrentConnectionUsage extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws SQLException { + testAutoCommit(); + } + + private void testAutoCommit() throws SQLException { + deleteDb(getTestName()); + final Connection conn = getConnection(getTestName()); + final PreparedStatement p1 = conn.prepareStatement("select 1 from dual"); + Task t = new Task() { + @Override + public void call() throws Exception { + while (!stop) { + p1.executeQuery(); + conn.setAutoCommit(true); + conn.setAutoCommit(false); + } + } + }.execute(); + PreparedStatement prep = conn.prepareStatement("select ? from dual"); + for (int i = 0; i < 10; i++) { + prep.setBinaryStream(1, new ByteArrayInputStream(new byte[1024])); + prep.executeQuery(); + } + t.get(); + conn.close(); + } + +} diff --git a/h2/src/test/org/h2/test/jdbc/TestConnection.java b/h2/src/test/org/h2/test/jdbc/TestConnection.java new file mode 100644 index 0000000000..14206376ea --- /dev/null +++ b/h2/src/test/org/h2/test/jdbc/TestConnection.java @@ -0,0 +1,402 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.jdbc; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; +import java.util.TimeZone; + +import org.h2.api.ErrorCode; +import org.h2.engine.Constants; +import org.h2.engine.SysProperties; +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.DateTimeUtils; + +/** + * Tests the client info + */ +public class TestConnection extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testSetSupportedClientInfo(); + testSetUnsupportedClientInfo(); + testGetUnsupportedClientInfo(); + testSetSupportedClientInfoProperties(); + testSetUnsupportedClientInfoProperties(); + testSetInternalProperty(); + testSetInternalPropertyToInitialValue(); + testTransactionIsolationSetAndGet(); + testSetGetSchema(); + testCommitOnAutoCommitSetRunner(); + testRollbackOnAutoCommitSetRunner(); + testChangeTransactionLevelCommitRunner(); + testLockTimeout(); + testIgnoreUnknownSettings(); + testTimeZone(); + } + + private void testSetInternalProperty() throws SQLException { + // Use MySQL-mode since this allows all property names + // (apart from h2 internal names). + Connection conn = getConnection("clientInfoMySQL;MODE=MySQL"); + + assertThrows(SQLClientInfoException.class, conn).setClientInfo("numServers", "SomeValue"); + assertThrows(SQLClientInfoException.class, conn).setClientInfo("server23", "SomeValue"); + conn.close(); + } + + /** + * Test that no exception is thrown if the client info of a connection + * managed in a connection pool is reset to its initial values. + * + * This is needed when using h2 in websphere liberty. + */ + private void testSetInternalPropertyToInitialValue() throws SQLException { + // Use MySQL-mode since this allows all property names + // (apart from h2 internal names). + Connection conn = getConnection("clientInfoMySQL;MODE=MySQL"); + String numServersPropertyName = "numServers"; + String numServers = conn.getClientInfo(numServersPropertyName); + conn.setClientInfo(numServersPropertyName, numServers); + assertEquals(conn.getClientInfo(numServersPropertyName), numServers); + conn.close(); + } + + private void testSetUnsupportedClientInfoProperties() throws SQLException { + Connection conn = getConnection("clientInfo"); + Properties properties = new Properties(); + properties.put("ClientUser", "someUser"); + assertThrows(SQLClientInfoException.class, conn).setClientInfo(properties); + conn.close(); + } + + private void testSetSupportedClientInfoProperties() throws SQLException { + Connection conn = getConnection("clientInfoDB2;MODE=DB2"); + conn.setClientInfo("ApplicationName", "Connection Test"); + + Properties properties = new Properties(); + properties.put("ClientUser", "someUser"); + conn.setClientInfo(properties); + // old property should have been removed + assertNull(conn.getClientInfo("ApplicationName")); + // new property has been set + assertEquals(conn.getClientInfo("ClientUser"), "someUser"); + conn.close(); + } + + private void testSetSupportedClientInfo() throws SQLException { + Connection conn = getConnection("clientInfoDB2;MODE=DB2"); + conn.setClientInfo("ApplicationName", "Connection Test"); + + assertEquals(conn.getClientInfo("ApplicationName"), "Connection Test"); + conn.close(); + } + + private void testSetUnsupportedClientInfo() throws SQLException { + Connection conn = getConnection("clientInfoDB2;MODE=DB2"); + assertThrows(SQLClientInfoException.class, conn).setClientInfo( + "UnsupportedName", "SomeValue"); + conn.close(); + } + + private void testGetUnsupportedClientInfo() throws SQLException { + Connection conn = getConnection("clientInfo"); + assertNull(conn.getClientInfo("UnknownProperty")); + conn.close(); + } + + private void testTransactionIsolationSetAndGet() throws Exception { + deleteDb("transactionIsolation"); + try (Connection conn = getConnection("transactionIsolation")) { + assertEquals(Connection.TRANSACTION_READ_COMMITTED, conn.getTransactionIsolation()); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + assertEquals(Connection.TRANSACTION_READ_UNCOMMITTED, conn.getTransactionIsolation()); + conn.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); + assertEquals(Connection.TRANSACTION_REPEATABLE_READ, + conn.getTransactionIsolation()); + conn.setTransactionIsolation(Constants.TRANSACTION_SNAPSHOT); + assertEquals(Constants.TRANSACTION_SNAPSHOT, + conn.getTransactionIsolation()); + conn.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, conn.getTransactionIsolation()); + } finally { + deleteDb("transactionIsolation"); + } + } + + private void testCommitOnAutoCommitSetRunner() throws Exception { + assertFalse("Default value must be false", SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT); + testCommitOnAutoCommitSet(false); + try { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = true; + testCommitOnAutoCommitSet(true); + } finally { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = false; + } + + } + + private void testCommitOnAutoCommitSet(boolean expectedPropertyEnabled) throws Exception { + assertEquals(SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT, expectedPropertyEnabled); + Connection conn = getConnection("clientInfo"); + conn.setAutoCommit(false); + Statement stat = conn.createStatement(); + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO TEST VALUES(?, ?)"); + int index = 1; + prep.setInt(index++, 1); + prep.setString(index++, "test1"); + prep.execute(); + conn.commit(); + // no error expected + + conn.setAutoCommit(true); + index = 1; + prep.setInt(index++, 2); + prep.setString(index++, "test2"); + if (expectedPropertyEnabled) { + prep.execute(); + try { + conn.commit(); + throw new AssertionError("SQLException expected"); + } catch (SQLException e) { + assertTrue(e.getMessage().contains("commit()")); + assertEquals(ErrorCode.METHOD_DISABLED_ON_AUTOCOMMIT_TRUE, e.getErrorCode()); + } + ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM TEST"); + rs.next(); + assertTrue(rs.getInt(1) == 2); + rs.close(); + } else { + prep.execute(); + conn.commit(); + ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM TEST"); + rs.next(); + assertTrue(rs.getInt(1) == 2); + rs.close(); + } + + conn.close(); + prep.close(); + } + + private void testChangeTransactionLevelCommitRunner() throws Exception { + assertFalse("Default value must be false", SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT); + testChangeTransactionLevelCommit(false); + testChangeTransactionLevelCommit(true); + try { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = true; + testChangeTransactionLevelCommit(true); + testChangeTransactionLevelCommit(false); + } finally { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = false; + } + } + + private void testChangeTransactionLevelCommit(boolean setAutoCommit) throws Exception { + Connection conn = getConnection("clientInfo"); + conn.setAutoCommit(setAutoCommit); + Statement stat = conn.createStatement(); + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO TEST VALUES(?, ?)"); + int index = 1; + prep.setInt(index++, 1); + prep.setString(index++, "test1"); + prep.execute(); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + + conn.createStatement().executeQuery("SELECT COUNT(*) FROM TEST"); + // throws exception if TransactionIsolation did not commit + + conn.close(); + prep.close(); + } + + private void testRollbackOnAutoCommitSetRunner() throws Exception { + assertFalse("Default value must be false", SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT); + testRollbackOnAutoCommitSet(false); + try { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = true; + testRollbackOnAutoCommitSet(true); + } finally { + SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT = false; + } + } + + private void testRollbackOnAutoCommitSet(boolean expectedPropertyEnabled) throws Exception { + assertEquals(SysProperties.FORCE_AUTOCOMMIT_OFF_ON_COMMIT, expectedPropertyEnabled); + Connection conn = getConnection("clientInfo"); + conn.setAutoCommit(false); + Statement stat = conn.createStatement(); + stat.execute("DROP TABLE IF EXISTS TEST"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO TEST VALUES(?, ?)"); + int index = 1; + prep.setInt(index++, 1); + prep.setString(index++, "test1"); + prep.execute(); + conn.rollback(); + // no error expected + + + conn.setAutoCommit(true); + index = 1; + prep.setInt(index++, 2); + prep.setString(index++, "test2"); + if (expectedPropertyEnabled) { + prep.execute(); + try { + conn.rollback(); + throw new AssertionError("SQLException expected"); + } catch (SQLException e) { + assertEquals(ErrorCode.METHOD_DISABLED_ON_AUTOCOMMIT_TRUE, e.getErrorCode()); + assertTrue(e.getMessage().contains("rollback()")); + } + ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM TEST"); + rs.next(); + int count = rs.getInt(1); + assertTrue("Found " +count + " rows", count == 1); + rs.close(); + } else { + prep.execute(); + // rollback is permitted, however has no effects in autocommit=true + conn.rollback(); + ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM TEST"); + rs.next(); + int count = rs.getInt(1); + assertTrue("Found " + count + " rows", count == 1); + rs.close(); + } + + conn.close(); + prep.close(); + } + + private void testSetGetSchema() throws SQLException { + deleteDb("schemaSetGet"); + Connection conn = getConnection("schemaSetGet"); + Statement s = conn.createStatement(); + s.executeUpdate("create schema my_test_schema"); + s.executeUpdate("create table my_test_schema.my_test_table(id int, nave varchar) as values (1, 'a')"); + assertEquals("PUBLIC", conn.getSchema()); + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, s, "select * from my_test_table"); + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, conn).setSchema("my_test_table"); + conn.setSchema("MY_TEST_SCHEMA"); + assertEquals("MY_TEST_SCHEMA", conn.getSchema()); + try (ResultSet rs = s.executeQuery("select * from my_test_table")) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertEquals("a", rs.getString(2)); + assertFalse(rs.next()); + } + assertThrows(ErrorCode.SCHEMA_NOT_FOUND_1, conn).setSchema("NON_EXISTING_SCHEMA"); + assertEquals("MY_TEST_SCHEMA", conn.getSchema()); + s.executeUpdate("create schema \"otheR_schEma\""); + s.executeUpdate("create table \"otheR_schEma\".my_test_table(id int, nave varchar) as values (2, 'b')"); + conn.setSchema("otheR_schEma"); + assertEquals("otheR_schEma", conn.getSchema()); + try (ResultSet rs = s.executeQuery("select * from my_test_table")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertEquals("b", rs.getString(2)); + assertFalse(rs.next()); + } + s.execute("SET SCHEMA \"MY_TEST_SCHEMA\""); + assertEquals("MY_TEST_SCHEMA", conn.getSchema()); + s.close(); + conn.close(); + deleteDb("schemaSetGet"); + } + + private void testLockTimeout() throws SQLException { + deleteDb("lockTimeout"); + try (Connection conn1 = getConnection("lockTimeout"); + Connection conn2 = getConnection("lockTimeout;LOCK_TIMEOUT=6000")) { + conn1.setAutoCommit(false); + conn2.setAutoCommit(false); + Statement s1 = conn1.createStatement(); + Statement s2 = conn2.createStatement(); + s1.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT) AS VALUES (1, 2)"); + conn1.commit(); + s2.execute("INSERT INTO TEST VALUES (2, 4)"); + s1.execute("UPDATE TEST SET V = 3 WHERE ID = 1"); + s2.execute("SET LOCK_TIMEOUT 50"); + long n = System.nanoTime(); + assertThrows(ErrorCode.LOCK_TIMEOUT_1, s2).execute("UPDATE TEST SET V = 4 WHERE ID = 1"); + if (System.nanoTime() - n > 5_000_000_000L) { + fail("LOCK_TIMEOUT wasn't set"); + } + } finally { + deleteDb("lockTimeout"); + } + } + + private void testIgnoreUnknownSettings() throws SQLException { + deleteDb("ignoreUnknownSettings"); + assertThrows(ErrorCode.UNSUPPORTED_SETTING_1, () -> getConnection("ignoreUnknownSettings;A=1")); + try (Connection c = getConnection("ignoreUnknownSettings;IGNORE_UNKNOWN_SETTINGS=TRUE;A=1")) { + } finally { + deleteDb("ignoreUnknownSettings"); + } + } + + private void testTimeZone() throws SQLException { + deleteDb("timeZone"); + String tz1 = "Europe/London", tz2 = "Europe/Paris", tz3 = "Asia/Tokyo"; + try (Connection c = getConnection("timeZone")) { + TimeZone tz = TimeZone.getDefault(); + try { + TimeZone.setDefault(TimeZone.getTimeZone(tz1)); + DateTimeUtils.resetCalendar(); + try (Connection c1 = getConnection("timeZone")) { + TimeZone.setDefault(TimeZone.getTimeZone(tz2)); + DateTimeUtils.resetCalendar(); + try (Connection c2 = getConnection("timeZone"); + Connection c3 = getConnection("timeZone;TIME ZONE=" + tz3)) { + checkTimeZone(tz1, c1); + checkTimeZone(tz2, c2); + checkTimeZone(tz3, c3); + } + } + } finally { + TimeZone.setDefault(tz); + DateTimeUtils.resetCalendar(); + } + } finally { + deleteDb("timeZone"); + } + } + + private void checkTimeZone(String expected, Connection conn) throws SQLException { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery( + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'TIME ZONE'"); + rs.next(); + assertEquals(expected, rs.getString(1)); + } + +} diff --git a/h2/src/test/org/h2/test/jdbc/TestDatabaseEventListener.java b/h2/src/test/org/h2/test/jdbc/TestDatabaseEventListener.java index f426b1e544..072fe14280 100644 --- a/h2/src/test/org/h2/test/jdbc/TestDatabaseEventListener.java +++ b/h2/src/test/org/h2/test/jdbc/TestDatabaseEventListener.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -13,13 +13,13 @@ import org.h2.Driver; import org.h2.api.DatabaseEventListener; -import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the DatabaseEventListener interface. */ -public class TestDatabaseEventListener extends TestBase { +public class TestDatabaseEventListener extends TestDb { /** * A flag to mark that the given method was called. @@ -34,7 +34,7 @@ public class TestDatabaseEventListener extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -78,21 +78,6 @@ public void opened() { } } - @Override - public void closingDatabase() { - // nothing to do - } - - @Override - public void exceptionThrown(SQLException e, String sql) { - // nothing to do - } - - @Override - public void setProgress(int state, String name, int x, int max) { - // nothing to do - } - } private void testInit() throws SQLException { @@ -118,32 +103,29 @@ private void testIndexRebuiltOnce() throws SQLException { Properties p = new Properties(); p.setProperty("user", user); p.setProperty("password", password); - Connection conn; Statement stat; - conn = DriverManager.getConnection(url, p); - stat = conn.createStatement(); - // the old.id index head is at position 0 - stat.execute("create table old(id identity) as select 1"); - // the test.id index head is at position 1 - stat.execute("create table test(id identity) as select 1"); - conn.close(); - conn = DriverManager.getConnection(url, p); - stat = conn.createStatement(); - // free up space at position 0 - stat.execute("drop table old"); - stat.execute("insert into test values(2)"); - stat.execute("checkpoint sync"); - stat.execute("shutdown immediately"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); + try (Connection conn = DriverManager.getConnection(url, p)) { + stat = conn.createStatement(); + // the old.id index head is at position 0 + stat.execute("create table old(id identity) as select 1"); + // the test.id index head is at position 1 + stat.execute("create table test(id identity) as select 1"); + } + try (Connection conn = DriverManager.getConnection(url, p)) { + stat = conn.createStatement(); + // free up space at position 0 + stat.execute("drop table old"); + stat.execute("insert into test values(2)"); + stat.execute("checkpoint sync"); + stat.execute("shutdown immediately"); + } // now the index should be re-built - conn = DriverManager.getConnection(url, p); - conn.close(); + try (Connection conn = DriverManager.getConnection(url, p)) {/**/} calledCreateIndex = false; p.put("DATABASE_EVENT_LISTENER", MyDatabaseEventListener.class.getName()); - conn = org.h2.Driver.load().connect(url, p); - conn.close(); - assertTrue(!calledCreateIndex); + try (Connection conn = org.h2.Driver.load().connect(url, p)) {/**/} + assertFalse(calledCreateIndex); } private void testIndexNotRebuilt() throws SQLException { @@ -176,7 +158,7 @@ private void testIndexNotRebuilt() throws SQLException { MyDatabaseEventListener.class.getName()); conn = org.h2.Driver.load().connect(url, p); conn.close(); - assertTrue(!calledCreateIndex); + assertFalse(calledCreateIndex); } private void testCloseLog0(boolean shutdown) throws SQLException { @@ -205,7 +187,7 @@ private void testCloseLog0(boolean shutdown) throws SQLException { conn = org.h2.Driver.load().connect(url, p); conn.close(); if (calledOpened) { - assertTrue(!calledScan); + assertFalse(calledScan); } } @@ -247,31 +229,20 @@ private void testCalledForStatement() throws SQLException { /** * The database event listener for this test. */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { + public static final class MyDatabaseEventListener implements DatabaseEventListener { @Override public void closingDatabase() { calledClosingDatabase = true; } - @Override - public void exceptionThrown(SQLException e, String sql) { - // nothing to do - } - - @Override - public void init(String url) { - // nothing to do - } - @Override public void opened() { calledOpened = true; } @Override - public void setProgress(int state, String name, int x, int max) { + public void setProgress(int state, String name, long x, long max) { if (state == DatabaseEventListener.STATE_SCAN_FILE) { calledScan = true; } diff --git a/h2/src/test/org/h2/test/jdbc/TestDriver.java b/h2/src/test/org/h2/test/jdbc/TestDriver.java index f30680297d..64a7eb0fa9 100644 --- a/h2/src/test/org/h2/test/jdbc/TestDriver.java +++ b/h2/src/test/org/h2/test/jdbc/TestDriver.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -12,12 +12,14 @@ import java.util.Properties; import org.h2.Driver; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the database driver. */ -public class TestDriver extends TestBase { +public class TestDriver extends TestDb { /** * Run just this test. @@ -25,13 +27,14 @@ public class TestDriver extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { testSettingsAsProperties(); testDriverObject(); + testURLs(); } private void testSettingsAsProperties() throws Exception { @@ -40,13 +43,13 @@ private void testSettingsAsProperties() throws Exception { prop.put("password", getPassword()); prop.put("max_compact_time", "1234"); prop.put("unknown", "1234"); - String url = getURL("driver", true); + String url = getURL("jdbc:h2:mem:driver", true); Connection conn = DriverManager.getConnection(url, prop); ResultSet rs; rs = conn.createStatement().executeQuery( - "select * from information_schema.settings where name='MAX_COMPACT_TIME'"); + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'MAX_COMPACT_TIME'"); rs.next(); - assertEquals(1234, rs.getInt(2)); + assertEquals(1234, rs.getInt(1)); conn.close(); } @@ -54,14 +57,16 @@ private void testDriverObject() throws Exception { Driver instance = Driver.load(); assertTrue(DriverManager.getDriver("jdbc:h2:~/test") == instance); Driver.unload(); - try { - java.sql.Driver d = DriverManager.getDriver("jdbc:h2:~/test"); - fail(d.toString()); - } catch (SQLException e) { - // ignore - } + assertThrows(SQLException.class, () -> DriverManager.getDriver("jdbc:h2:~/test")); Driver.load(); assertTrue(DriverManager.getDriver("jdbc:h2:~/test") == instance); } + private void testURLs() throws Exception { + java.sql.Driver instance = Driver.load(); + assertThrows(ErrorCode.URL_FORMAT_ERROR_2, instance).acceptsURL(null); + assertThrows(ErrorCode.URL_FORMAT_ERROR_2, instance).connect(null, null); + assertNull(instance.connect("jdbc:unknown", null)); + } + } diff --git a/h2/src/test/org/h2/test/jdbc/TestGetGeneratedKeys.java b/h2/src/test/org/h2/test/jdbc/TestGetGeneratedKeys.java new file mode 100644 index 0000000000..ebc356548c --- /dev/null +++ b/h2/src/test/org/h2/test/jdbc/TestGetGeneratedKeys.java @@ -0,0 +1,1751 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.jdbc; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.UUID; + +import org.h2.api.ErrorCode; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Tests for the {@link Statement#getGeneratedKeys()}. + */ +public class TestGetGeneratedKeys extends TestDb { + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + deleteDb("getGeneratedKeys"); + Connection conn = getConnection("getGeneratedKeys"); + testBatchAndMergeInto(conn); + testPrimaryKey(conn); + testInsertWithSelect(conn); + testUpdate(conn); + testMergeUsing(conn); + testWrongStatement(conn); + testMultithreaded(conn); + testNameCase(conn); + testColumnNotFound(conn); + + testPrepareStatement_Execute(conn); + testPrepareStatement_ExecuteBatch(conn); + testPrepareStatement_ExecuteLargeBatch(conn); + testPrepareStatement_ExecuteLargeUpdate(conn); + testPrepareStatement_ExecuteUpdate(conn); + testPrepareStatement_int_Execute(conn); + testPrepareStatement_int_ExecuteBatch(conn); + testPrepareStatement_int_ExecuteLargeBatch(conn); + testPrepareStatement_int_ExecuteLargeUpdate(conn); + testPrepareStatement_int_ExecuteUpdate(conn); + testPrepareStatement_intArray_Execute(conn); + testPrepareStatement_intArray_ExecuteBatch(conn); + testPrepareStatement_intArray_ExecuteLargeBatch(conn); + testPrepareStatement_intArray_ExecuteLargeUpdate(conn); + testPrepareStatement_intArray_ExecuteUpdate(conn); + testPrepareStatement_StringArray_Execute(conn); + testPrepareStatement_StringArray_ExecuteBatch(conn); + testPrepareStatement_StringArray_ExecuteLargeBatch(conn); + testPrepareStatement_StringArray_ExecuteLargeUpdate(conn); + testPrepareStatement_StringArray_ExecuteUpdate(conn); + + testStatementExecute(conn); + testStatementExecute_int(conn); + testStatementExecute_intArray(conn); + testStatementExecute_StringArray(conn); + testStatementExecuteLargeUpdate(conn); + testStatementExecuteLargeUpdate_int(conn); + testStatementExecuteLargeUpdate_intArray(conn); + testStatementExecuteLargeUpdate_StringArray(conn); + testStatementExecuteUpdate(conn); + testStatementExecuteUpdate_int(conn); + testStatementExecuteUpdate_intArray(conn); + testStatementExecuteUpdate_StringArray(conn); + + conn.close(); + deleteDb("getGeneratedKeys"); + } + + /** + * Test for batch updates and MERGE INTO operator. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testBatchAndMergeInto(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID BIGINT AUTO_INCREMENT, UID UUID DEFAULT RANDOM_UUID(), V INT)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (?), (?)", + Statement.RETURN_GENERATED_KEYS); + prep.setInt(1, 1); + prep.setInt(2, 2); + prep.addBatch(); + prep.setInt(1, 3); + prep.setInt(1, 4); + prep.addBatch(); + prep.executeBatch(); + ResultSet rs = prep.getGeneratedKeys(); + ResultSetMetaData meta = rs.getMetaData(); + assertEquals("BIGINT", meta.getColumnTypeName(1)); + assertEquals("UUID", meta.getColumnTypeName(2)); + rs.next(); + assertEquals(1L, rs.getLong(1)); + UUID u1 = (UUID) rs.getObject(2); + assertNotNull(u1); + rs.next(); + assertEquals(2L, rs.getLong(1)); + UUID u2 = (UUID) rs.getObject(2); + assertNotNull(u2); + rs.next(); + assertEquals(3L, rs.getLong(1)); + UUID u3 = (UUID) rs.getObject(2); + assertNotNull(u3); + rs.next(); + assertEquals(4L, rs.getLong(1)); + UUID u4 = (UUID) rs.getObject(2); + assertNotNull(u4); + assertFalse(rs.next()); + assertFalse(u1.equals(u2)); + assertFalse(u2.equals(u3)); + assertFalse(u3.equals(u4)); + prep = conn.prepareStatement("MERGE INTO TEST(ID, V) KEY(ID) VALUES (?, ?)", + Statement.RETURN_GENERATED_KEYS); + prep.setInt(1, 2); + prep.setInt(2, 10); + prep.execute(); + rs = prep.getGeneratedKeys(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertEquals(u2, rs.getObject(2)); + assertFalse(rs.next()); + prep.setInt(1, 5); + prep.executeUpdate(); + rs = prep.getGeneratedKeys(); + rs.next(); + assertEquals(Long.class, rs.getObject(1).getClass()); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test for PRIMARY KEY columns. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrimaryKey(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID BIGINT PRIMARY KEY, V INT)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(ID, V) VALUES (?, ?)", + Statement.RETURN_GENERATED_KEYS); + prep.setLong(1, 10); + prep.setInt(2, 100); + prep.executeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + rs.next(); + assertEquals(10L, rs.getLong(1)); + assertFalse(rs.next()); + assertEquals(1, rs.getMetaData().getColumnCount()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for INSERT ... SELECT operator. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testInsertWithSelect(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, V INT NOT NULL)"); + + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) SELECT 10", + Statement.RETURN_GENERATED_KEYS); + prep.executeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertTrue(rs.next()); + assertEquals(1, rs.getLong(1)); + rs.close(); + + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for UPDATE operator. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, V INT NOT NULL)"); + stat.execute("INSERT INTO TEST(V) VALUES 10"); + PreparedStatement prep = conn.prepareStatement("UPDATE TEST SET V = ? WHERE V = ?", + Statement.RETURN_GENERATED_KEYS); + prep.setInt(1, 20); + prep.setInt(2, 10); + assertEquals(1, prep.executeUpdate()); + ResultSet rs = prep.getGeneratedKeys(); + assertTrue(rs.next()); + assertEquals(1, rs.getLong(1)); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for MERGE USING operator. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testMergeUsing(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE SOURCE (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + " UID INT NOT NULL UNIQUE, V INT NOT NULL)"); + stat.execute("CREATE TABLE DESTINATION (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + " UID INT NOT NULL UNIQUE, V INT NOT NULL)"); + PreparedStatement ps = conn.prepareStatement("INSERT INTO SOURCE(UID, V) VALUES (?, ?)"); + for (int i = 1; i <= 100; i++) { + ps.setInt(1, i); + ps.setInt(2, i * 10 + 5); + ps.executeUpdate(); + } + // Insert first half of a rows with different values + ps = conn.prepareStatement("INSERT INTO DESTINATION(UID, V) VALUES (?, ?)"); + for (int i = 1; i <= 50; i++) { + ps.setInt(1, i); + ps.setInt(2, i * 10); + ps.executeUpdate(); + } + // And merge second half into it, first half will be updated with a new values + ps = conn.prepareStatement( + "MERGE INTO DESTINATION USING SOURCE ON (DESTINATION.UID = SOURCE.UID)" + + " WHEN MATCHED THEN UPDATE SET V = SOURCE.V" + + " WHEN NOT MATCHED THEN INSERT (UID, V) VALUES (SOURCE.UID, SOURCE.V)", + Statement.RETURN_GENERATED_KEYS); + // All rows should be either updated or inserted + assertEquals(100, ps.executeUpdate()); + ResultSet rs = ps.getGeneratedKeys(); + for (int i = 1; i <= 100; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getLong(1)); + } + assertFalse(rs.next()); + rs.close(); + // Check merged data + rs = stat.executeQuery("SELECT ID, UID, V FROM DESTINATION ORDER BY ID"); + for (int i = 1; i <= 100; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getLong(1)); + assertEquals(i, rs.getInt(2)); + assertEquals(i * 10 + 5, rs.getInt(3)); + } + assertFalse(rs.next()); + stat.execute("DROP TABLE SOURCE"); + stat.execute("DROP TABLE DESTINATION"); + } + + /** + * Test method for incompatible statements. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testWrongStatement(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, V INT)"); + stat.execute("INSERT INTO TEST(V) VALUES 10, 20, 30"); + stat.execute("DELETE FROM TEST WHERE V = 10", Statement.RETURN_GENERATED_KEYS); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("TRUNCATE TABLE TEST", Statement.RETURN_GENERATED_KEYS); + rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for shared connection between several statements in different + * threads. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testMultithreaded(final Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, V INT NOT NULL)"); + final int count = 4, iterations = 10_000; + Thread[] threads = new Thread[count]; + final long[] keys = new long[count * iterations]; + for (int i = 0; i < count; i++) { + final int num = i; + threads[num] = new Thread("getGeneratedKeys-" + num) { + @Override + public void run() { + try { + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (?)", + Statement.RETURN_GENERATED_KEYS); + for (int i = 0; i < iterations; i++) { + int value = iterations * num + i; + prep.setInt(1, value); + prep.execute(); + ResultSet rs = prep.getGeneratedKeys(); + rs.next(); + keys[value] = rs.getLong(1); + rs.close(); + } + } catch (SQLException ex) { + ex.printStackTrace(); + } + } + }; + } + for (int i = 0; i < count; i++) { + threads[i].start(); + } + for (int i = 0; i < count; i++) { + threads[i].join(); + } + ResultSet rs = stat.executeQuery("SELECT V, ID FROM TEST ORDER BY V"); + for (int i = 0; i < keys.length; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + assertEquals(keys[i], rs.getLong(2)); + } + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for case of letters in column names. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testNameCase(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "\"id\" UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + // Test columns with only difference in case + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", + new String[] { "id", "ID" }); + prep.executeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("id", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(1L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + // Test lower case name of upper case column + stat.execute("ALTER TABLE TEST DROP COLUMN \"id\""); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "id" }); + testNameCase1(prep, 2L, true); + // Test upper case name of lower case column + stat.execute("ALTER TABLE TEST ALTER COLUMN ID RENAME TO \"id\""); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "ID" }); + testNameCase1(prep, 3L, false); + stat.execute("DROP TABLE TEST"); + } + + private void testNameCase1(PreparedStatement prep, long id, boolean upper) throws SQLException { + prep.executeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals(upper ? "ID" : "id", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(id, rs.getLong(1)); + assertFalse(rs.next()); + rs.close(); + } + + /** + * Test method for column not found exception. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testColumnNotFound(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT, V INT NOT NULL)"); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat).execute("INSERT INTO TEST(V) VALUES (1)", // + new int[] { 0 }); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat).execute("INSERT INTO TEST(V) VALUES (1)", // + new int[] { 3 }); + assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat).execute("INSERT INTO TEST(V) VALUES (1)", // + new String[] { "X" }); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String)} + * .{@link PreparedStatement#execute()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_Execute(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)"); + prep.execute(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String)} + * .{@link PreparedStatement#executeBatch()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_ExecuteBatch(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)"); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String)} + * .{@link PreparedStatement#executeLargeBatch()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_ExecuteLargeBatch(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)"); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String)} + * .{@link PreparedStatement#executeLargeUpdate()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_ExecuteLargeUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)"); + prep.executeLargeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String)} + * .{@link PreparedStatement#executeUpdate()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_ExecuteUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)"); + prep.executeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, int)} + * .{@link PreparedStatement#execute()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_int_Execute(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", + Statement.NO_GENERATED_KEYS); + prep.execute(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + prep.execute(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, int)} + * .{@link PreparedStatement#executeBatch()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_int_ExecuteBatch(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", + Statement.NO_GENERATED_KEYS); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(3L, rs.getLong(1)); + assertEquals(3L, rs.getLong("ID")); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertEquals(UUID.class, rs.getObject("UID").getClass()); + assertEquals(UUID.class, rs.getObject("UID", UUID.class).getClass()); + assertTrue(rs.next()); + assertEquals(4L, rs.getLong(1)); + assertEquals(4L, rs.getLong("ID")); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertEquals(UUID.class, rs.getObject("UID").getClass()); + assertEquals(UUID.class, rs.getObject("UID", UUID.class).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Connection#prepareStatement(String, int)} + * .{@link PreparedStatement#executeLargeBatch()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_int_ExecuteLargeBatch(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", + Statement.NO_GENERATED_KEYS); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(3L, rs.getLong(1)); + assertEquals(3L, rs.getLong("ID")); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertEquals(UUID.class, rs.getObject("UID").getClass()); + assertEquals(UUID.class, rs.getObject("UID", UUID.class).getClass()); + assertTrue(rs.next()); + assertEquals(4L, rs.getLong(1)); + assertEquals(4L, rs.getLong("ID")); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertEquals(UUID.class, rs.getObject("UID").getClass()); + assertEquals(UUID.class, rs.getObject("UID", UUID.class).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, int)} + * .{@link PreparedStatement#executeLargeUpdate()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_int_ExecuteLargeUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", + Statement.NO_GENERATED_KEYS); + prep.executeLargeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + prep.executeLargeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, int)} + * .{@link PreparedStatement#executeUpdate()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_int_ExecuteUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", + Statement.NO_GENERATED_KEYS); + prep.executeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + prep.executeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, int[])} + * .{@link PreparedStatement#execute()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_intArray_Execute(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new int[0]); + prep.execute(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); + prep.execute(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); + prep.execute(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); + prep.execute(); + rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, int[])} + * .{@link PreparedStatement#executeBatch()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_intArray_ExecuteBatch(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new int[0]); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(3L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertTrue(rs.next()); + assertEquals(4L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(5L, rs.getLong(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(6L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, int[])} + * .{@link PreparedStatement#executeLargeBatch()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_intArray_ExecuteLargeBatch(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new int[0]); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(3L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertTrue(rs.next()); + assertEquals(4L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(5L, rs.getLong(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(6L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, int[])} + * .{@link PreparedStatement#executeLargeUpdate()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_intArray_ExecuteLargeUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new int[0]); + prep.executeLargeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); + prep.executeLargeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); + prep.executeLargeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); + prep.executeLargeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, int[])} + * .{@link PreparedStatement#executeUpdate()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_intArray_ExecuteUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new int[0]); + prep.executeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); + prep.executeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); + prep.executeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); + prep.executeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, String[])} + * .{@link PreparedStatement#execute()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_StringArray_Execute(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[0]); + prep.executeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); + prep.execute(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); + prep.execute(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); + prep.execute(); + rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, String[])} + * .{@link PreparedStatement#executeBatch()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_StringArray_ExecuteBatch(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[0]); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(3L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertTrue(rs.next()); + assertEquals(4L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(5L, rs.getLong(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(6L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); + prep.addBatch(); + prep.addBatch(); + prep.executeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, String[])} + * .{@link PreparedStatement#executeLargeBatch()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_StringArray_ExecuteLargeBatch(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[0]); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(3L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertTrue(rs.next()); + assertEquals(4L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(5L, rs.getLong(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(6L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); + prep.addBatch(); + prep.addBatch(); + prep.executeLargeBatch(); + rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for + * {@link Connection#prepareStatement(String, String[])} + * .{@link PreparedStatement#executeLargeUpdate()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_StringArray_ExecuteLargeUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[0]); + prep.executeLargeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); + prep.executeLargeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); + prep.executeLargeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); + prep.executeLargeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Connection#prepareStatement(String, String[])} + * .{@link PreparedStatement#executeUpdate()}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testPrepareStatement_StringArray_ExecuteUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (10)", new String[0]); + prep.executeUpdate(); + ResultSet rs = prep.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); + prep.executeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); + prep.executeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + prep = conn.prepareStatement("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); + prep.executeUpdate(); + rs = prep.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#execute(String)}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecute(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.execute("INSERT INTO TEST(V) VALUES (10)"); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#execute(String, int)}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecute_int(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + stat.execute("INSERT INTO TEST(V) VALUES (10)", Statement.NO_GENERATED_KEYS); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#execute(String, int[])}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecute_intArray(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.execute("INSERT INTO TEST(V) VALUES (10)", new int[0]); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + stat.execute("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); + rs = stat.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#executeUpdate(String, String[])}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecute_StringArray(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.execute("INSERT INTO TEST(V) VALUES (10)", new String[0]); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + stat.execute("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); + rs = stat.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#executeLargeUpdate(String)}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecuteLargeUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (10)"); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#executeLargeUpdate(String, int)}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecuteLargeUpdate_int(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (10)", Statement.NO_GENERATED_KEYS); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#executeLargeUpdate(String, int[])}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecuteLargeUpdate_intArray(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (10)", new int[0]); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); + rs = stat.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#executeLargeUpdate(String, String[])}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecuteLargeUpdate_StringArray(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (10)", new String[0]); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + stat.executeLargeUpdate("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); + rs = stat.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#executeUpdate(String)}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecuteUpdate(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (10)"); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#executeUpdate(String, int)}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecuteUpdate_int(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL, OTHER INT DEFAULT 0)"); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (10)", Statement.NO_GENERATED_KEYS); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (20)", Statement.RETURN_GENERATED_KEYS); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#executeUpdate(String, int[])}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecuteUpdate_intArray(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (10)", new int[0]); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (20)", new int[] { 1, 2 }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (30)", new int[] { 2, 1 }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (40)", new int[] { 2 }); + rs = stat.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + + /** + * Test method for {@link Statement#executeUpdate(String, String[])}. + * + * @param conn + * connection + * @throws Exception + * on exception + */ + private void testStatementExecuteUpdate_StringArray(Connection conn) throws Exception { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST (ID BIGINT PRIMARY KEY AUTO_INCREMENT," + + "UID UUID NOT NULL DEFAULT RANDOM_UUID(), V INT NOT NULL)"); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (10)", new String[0]); + ResultSet rs = stat.getGeneratedKeys(); + assertFalse(rs.next()); + rs.close(); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (20)", new String[] { "ID", "UID" }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("ID", rs.getMetaData().getColumnName(1)); + assertEquals("UID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(2L, rs.getLong(1)); + assertEquals(UUID.class, rs.getObject(2).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (30)", new String[] { "UID", "ID" }); + rs = stat.getGeneratedKeys(); + assertEquals(2, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertEquals("ID", rs.getMetaData().getColumnName(2)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertEquals(3L, rs.getLong(2)); + assertFalse(rs.next()); + rs.close(); + stat.executeUpdate("INSERT INTO TEST(V) VALUES (40)", new String[] { "UID" }); + rs = stat.getGeneratedKeys(); + assertEquals(1, rs.getMetaData().getColumnCount()); + assertEquals("UID", rs.getMetaData().getColumnName(1)); + assertTrue(rs.next()); + assertEquals(UUID.class, rs.getObject(1).getClass()); + assertFalse(rs.next()); + rs.close(); + stat.execute("DROP TABLE TEST"); + } + +} diff --git a/h2/src/test/org/h2/test/jdbc/TestJavaObject.java b/h2/src/test/org/h2/test/jdbc/TestJavaObject.java deleted file mode 100644 index 935ee35fe5..0000000000 --- a/h2/src/test/org/h2/test/jdbc/TestJavaObject.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jdbc; - -import java.io.Serializable; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Types; -import java.util.Arrays; -import java.util.UUID; - -import org.h2.engine.SysProperties; -import org.h2.test.TestBase; - -/** - * Tests java object values when SysProperties.SERIALIZE_JAVA_OBJECT property is - * disabled. - * - * @author Sergi Vladykin - */ -public class TestJavaObject extends TestBase { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase test = createCaller().init(); - test.config.traceTest = true; - test.config.memory = true; - test.config.networked = true; - test.config.beforeTest(); - test.test(); - test.config.afterTest(); - } - - @Override - public void test() throws Exception { - SysProperties.serializeJavaObject = false; - try { - trace("Test Java Object"); - doTest(new MyObj(1), new MyObj(2), false); - doTest(Arrays.asList(UUID.randomUUID(), null), - Arrays.asList(UUID.randomUUID(), UUID.randomUUID()), true); - // doTest(new Timestamp(System.currentTimeMillis()), - // new Timestamp(System.currentTimeMillis() + 10000), - // false); - doTest(200, 100, false); - doTest(200, 100L, true); - // doTest(new Date(System.currentTimeMillis() + 1000), - // new Date(System.currentTimeMillis()), false); - // doTest(new java.util.Date(System.currentTimeMillis() + 1000), - // new java.util.Date(System.currentTimeMillis()), false); - // doTest(new Time(System.currentTimeMillis() + 1000), - // new Date(System.currentTimeMillis()), false); - // doTest(new Time(System.currentTimeMillis() + 1000), - // new Timestamp(System.currentTimeMillis()), false); - } finally { - SysProperties.serializeJavaObject = true; - } - } - - private void doTest(Object o1, Object o2, boolean hash) throws SQLException { - deleteDb("javaObject"); - Connection conn = getConnection("javaObject"); - Statement stat = conn.createStatement(); - stat.execute("create table t(id identity, val other)"); - - PreparedStatement ins = conn.prepareStatement( - "insert into t(val) values(?)"); - - ins.setObject(1, o1, Types.JAVA_OBJECT); - assertEquals(1, ins.executeUpdate()); - - ins.setObject(1, o2, Types.JAVA_OBJECT); - assertEquals(1, ins.executeUpdate()); - - ResultSet rs = stat.executeQuery( - "select val from t order by val limit 1"); - - assertTrue(rs.next()); - - Object smallest; - if (hash) { - if (o1.getClass() != o2.getClass()) { - smallest = o1.getClass().getName().compareTo( - o2.getClass().getName()) < 0 ? o1 : o2; - } else { - assertFalse(o1.hashCode() == o2.hashCode()); - smallest = o1.hashCode() < o2.hashCode() ? o1 : o2; - } - } else { - @SuppressWarnings("unchecked") - int compare = ((Comparable) o1).compareTo(o2); - assertFalse(compare == 0); - smallest = compare < 0 ? o1 : o2; - } - - assertEquals(smallest.toString(), rs.getString(1)); - - Object y = rs.getObject(1); - - assertTrue(smallest.equals(y)); - assertFalse(rs.next()); - rs.close(); - - PreparedStatement prep = conn.prepareStatement( - "select id from t where val = ?"); - - prep.setObject(1, o1, Types.JAVA_OBJECT); - rs = prep.executeQuery(); - assertTrue(rs.next()); - assertEquals(1, rs.getInt(1)); - assertFalse(rs.next()); - rs.close(); - - prep.setObject(1, o2, Types.JAVA_OBJECT); - rs = prep.executeQuery(); - assertTrue(rs.next()); - assertEquals(2, rs.getInt(1)); - assertFalse(rs.next()); - rs.close(); - - stat.close(); - prep.close(); - - conn.close(); - deleteDb("javaObject"); - // trace("ok: " + o1.getClass().getName() + " vs " + - // o2.getClass().getName()); - } - - /** - * A test class. - */ - public static class MyObj implements Comparable, Serializable { - - private static final long serialVersionUID = 1L; - private final int value; - - MyObj(int value) { - this.value = value; - } - - @Override - public String toString() { - return "myObj:" + value; - } - - @Override - public int compareTo(MyObj o) { - return value - o.value; - } - - @Override - public boolean equals(Object o) { - return toString().equals(o.toString()); - } - - @Override - public int hashCode() { - return -value; - } - - } -} diff --git a/h2/src/test/org/h2/test/jdbc/TestJavaObjectSerializer.java b/h2/src/test/org/h2/test/jdbc/TestJavaObjectSerializer.java index 78dc6759b5..bb145a23ee 100644 --- a/h2/src/test/org/h2/test/jdbc/TestJavaObjectSerializer.java +++ b/h2/src/test/org/h2/test/jdbc/TestJavaObjectSerializer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -12,6 +12,7 @@ import java.sql.Types; import org.h2.api.JavaObjectSerializer; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.JdbcUtils; /** @@ -20,7 +21,7 @@ * @author Sergi Vladykin * @author Davide Cavestro */ -public class TestJavaObjectSerializer extends TestBase { +public class TestJavaObjectSerializer extends TestDb { /** * Run just this test. @@ -32,9 +33,7 @@ public static void main(String... a) throws Exception { test.config.traceTest = true; test.config.memory = true; test.config.networked = true; - test.config.beforeTest(); - test.test(); - test.config.afterTest(); + test.testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbc/TestLimitUpdates.java b/h2/src/test/org/h2/test/jdbc/TestLimitUpdates.java deleted file mode 100644 index 17b8bfa350..0000000000 --- a/h2/src/test/org/h2/test/jdbc/TestLimitUpdates.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.jdbc; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import org.h2.test.TestBase; - -/** - * Test for limit updates. - */ -public class TestLimitUpdates extends TestBase { - - private static final String DATABASE_NAME = "limitUpdates"; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws SQLException { - testLimitUpdates(); - deleteDb(DATABASE_NAME); - } - - private void testLimitUpdates() throws SQLException { - deleteDb(DATABASE_NAME); - Connection conn = null; - PreparedStatement prep = null; - - try { - conn = getConnection(DATABASE_NAME); - prep = conn.prepareStatement( - "CREATE TABLE TEST(KEY_ID INT PRIMARY KEY, VALUE_ID INT)"); - prep.executeUpdate(); - - prep.close(); - prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?)"); - int numRows = 10; - for (int i = 0; i < numRows; ++i) { - prep.setInt(1, i); - prep.setInt(2, 0); - prep.execute(); - } - assertEquals(numRows, countWhere(conn, 0)); - - // update all elements than available - prep.close(); - prep = conn.prepareStatement("UPDATE TEST SET VALUE_ID = ?"); - prep.setInt(1, 1); - prep.execute(); - assertEquals(numRows, countWhere(conn, 1)); - - // update less elements than available - updateLimit(conn, 2, numRows / 2); - assertEquals(numRows / 2, countWhere(conn, 2)); - - // update more elements than available - updateLimit(conn, 3, numRows * 2); - assertEquals(numRows, countWhere(conn, 3)); - - // update no elements - updateLimit(conn, 4, 0); - assertEquals(0, countWhere(conn, 4)); - } finally { - if (prep != null) { - prep.close(); - } - if (conn != null) { - conn.close(); - } - } - } - - private static int countWhere(final Connection conn, final int where) - throws SQLException { - PreparedStatement prep = null; - ResultSet rs = null; - try { - prep = conn.prepareStatement( - "SELECT COUNT(*) FROM TEST WHERE VALUE_ID = ?"); - prep.setInt(1, where); - rs = prep.executeQuery(); - rs.next(); - return rs.getInt(1); - } finally { - if (rs != null) { - rs.close(); - } - if (prep != null) { - prep.close(); - } - } - } - - private static void updateLimit(final Connection conn, final int value, - final int limit) throws SQLException { - PreparedStatement prep = null; - try { - prep = conn.prepareStatement( - "UPDATE TEST SET VALUE_ID = ? LIMIT ?"); - prep.setInt(1, value); - prep.setInt(2, limit); - prep.execute(); - } finally { - if (prep != null) { - prep.close(); - } - } - } -} diff --git a/h2/src/test/org/h2/test/jdbc/TestLobApi.java b/h2/src/test/org/h2/test/jdbc/TestLobApi.java index f500e8f6da..cdb6f7d92f 100644 --- a/h2/src/test/org/h2/test/jdbc/TestLobApi.java +++ b/h2/src/test/org/h2/test/jdbc/TestLobApi.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -12,6 +12,7 @@ import java.io.Reader; import java.io.StringReader; import java.io.Writer; +import java.nio.charset.StandardCharsets; import java.sql.Blob; import java.sql.Clob; import java.sql.Connection; @@ -24,12 +25,14 @@ import org.h2.api.ErrorCode; import org.h2.jdbc.JdbcConnection; import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.test.utils.RandomDataUtils; import org.h2.util.IOUtils; /** * Test the Blob, Clob, and NClob implementations. */ -public class TestLobApi extends TestBase { +public class TestLobApi extends TestDb { private JdbcConnection conn; private Statement stat; @@ -40,17 +43,17 @@ public class TestLobApi extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - deleteDb("lob"); + deleteDb(getTestName()); testUnsupportedOperations(); testLobStaysOpenUntilCommitted(); testInputStreamThrowsException(true); testInputStreamThrowsException(false); - conn = (JdbcConnection) getConnection("lob"); + conn = (JdbcConnection) getConnection(getTestName()); stat = conn.createStatement(); stat.execute("create table test(id int, x blob)"); testBlob(0); @@ -68,7 +71,7 @@ public void test() throws Exception { } private void testUnsupportedOperations() throws Exception { - Connection conn = getConnection("lob"); + Connection conn = getConnection(getTestName()); stat = conn.createStatement(); stat.execute("create table test(id int, c clob, b blob)"); stat.execute("insert into test values(1, 'x', x'00')"); @@ -76,38 +79,30 @@ private void testUnsupportedOperations() throws Exception { rs.next(); Clob clob = rs.getClob(2); byte[] data = IOUtils.readBytesAndClose(clob.getAsciiStream(), -1); - assertEquals("x", new String(data, "UTF-8")); + assertEquals("x", new String(data, StandardCharsets.UTF_8)); assertTrue(clob.toString().endsWith("'x'")); clob.free(); - assertTrue(clob.toString().endsWith("null")); + assertTrue(clob.toString().endsWith("")); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, clob). truncate(0); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, clob). setAsciiStream(1); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, clob). - setString(1, "", 0, 1); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, clob). position("", 0); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, clob). position((Clob) null, 0); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, clob). - getCharacterStream(1, 1); Blob blob = rs.getBlob(3); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, blob). truncate(0); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, blob). - setBytes(1, new byte[0], 0, 0); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, blob). position(new byte[1], 0); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, blob). position((Blob) null, 0); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, blob). - getBinaryStream(1, 1); assertTrue(blob.toString().endsWith("X'00'")); blob.free(); - assertTrue(blob.toString().endsWith("null")); + assertTrue(blob.toString().endsWith("")); stat.execute("drop table test"); conn.close(); @@ -118,25 +113,35 @@ private void testUnsupportedOperations() throws Exception { * the result set is closed (see ResultSet.close). */ private void testLobStaysOpenUntilCommitted() throws Exception { - Connection conn = getConnection("lob"); + Connection conn = getConnection(getTestName()); stat = conn.createStatement(); stat.execute("create table test(id identity, c clob, b blob)"); PreparedStatement prep = conn.prepareStatement( - "insert into test values(null, ?, ?)"); + "insert into test(c, b) values(?, ?)"); prep.setString(1, ""); prep.setBytes(2, new byte[0]); prep.execute(); + Random r = new Random(1); + + char[] charsSmall = new char[20]; + RandomDataUtils.randomChars(r, charsSmall); + String dSmall = new String(charsSmall); + prep.setCharacterStream(1, new StringReader(dSmall), -1); + byte[] bytesSmall = new byte[20]; + r.nextBytes(bytesSmall); + prep.setBinaryStream(2, new ByteArrayInputStream(bytesSmall), -1); + prep.execute(); + char[] chars = new char[100000]; - for (int i = 0; i < chars.length; i++) { - chars[i] = (char) r.nextInt(10000); - } + RandomDataUtils.randomChars(r, chars); String d = new String(chars); prep.setCharacterStream(1, new StringReader(d), -1); byte[] bytes = new byte[100000]; r.nextBytes(bytes); prep.setBinaryStream(2, new ByteArrayInputStream(bytes), -1); prep.execute(); + conn.setAutoCommit(false); ResultSet rs = stat.executeQuery("select * from test order by id"); rs.next(); @@ -145,29 +150,38 @@ private void testLobStaysOpenUntilCommitted() throws Exception { rs.next(); Clob c2 = rs.getClob(2); Blob b2 = rs.getBlob(3); + rs.next(); + Clob c3 = rs.getClob(2); + Blob b3 = rs.getBlob(3); assertFalse(rs.next()); // now close rs.close(); // but the LOBs must stay open assertEquals(0, c1.length()); assertEquals(0, b1.length()); - assertEquals(chars.length, c2.length()); - assertEquals(bytes.length, b2.length()); assertEquals("", c1.getSubString(1, 0)); assertEquals(new byte[0], b1.getBytes(1, 0)); - assertEquals(d, c2.getSubString(1, (int) c2.length())); - assertEquals(bytes, b2.getBytes(1, (int) b2.length())); + + assertEquals(charsSmall.length, c2.length()); + assertEquals(bytesSmall.length, b2.length()); + assertEquals(dSmall, c2.getSubString(1, (int) c2.length())); + assertEquals(bytesSmall, b2.getBytes(1, (int) b2.length())); + + assertEquals(chars.length, c3.length()); + assertEquals(bytes.length, b3.length()); + assertEquals(d, c3.getSubString(1, (int) c3.length())); + assertEquals(bytes, b3.getBytes(1, (int) b3.length())); stat.execute("drop table test"); conn.close(); } private void testInputStreamThrowsException(final boolean ioException) throws Exception { - Connection conn = getConnection("lob"); + Connection conn = getConnection(getTestName()); stat = conn.createStatement(); stat.execute("create table test(id identity, c clob, b blob)"); PreparedStatement prep = conn.prepareStatement( - "insert into test values(null, ?, ?)"); + "insert into test(c, b) values(?, ?)"); assertThrows(ErrorCode.IO_EXCEPTION_1, prep). setCharacterStream(1, new Reader() { @@ -240,30 +254,43 @@ private void testBlob(int length) throws Exception { prep.setInt(1, 2); b = conn.createBlob(); - b.setBytes(1, data); + assertEquals(length, b.setBytes(1, data)); prep.setBlob(2, b); prep.execute(); prep.setInt(1, 3); - prep.setBlob(2, new ByteArrayInputStream(data)); + Blob b2 = conn.createBlob(); + byte[] xdata = new byte[length + 2]; + System.arraycopy(data, 0, xdata, 1, length); + assertEquals(length, b2.setBytes(1, xdata, 1, length)); + prep.setBlob(2, b2); prep.execute(); prep.setInt(1, 4); + prep.setBlob(2, new ByteArrayInputStream(data)); + prep.execute(); + + prep.setInt(1, 5); prep.setBlob(2, new ByteArrayInputStream(data), -1); prep.execute(); ResultSet rs; rs = stat.executeQuery("select * from test"); rs.next(); - Blob b2 = rs.getBlob(2); - assertEquals(length, b2.length()); + Blob b3 = rs.getBlob(2); + assertEquals(length, b3.length()); byte[] bytes = b.getBytes(1, length); - byte[] bytes2 = b2.getBytes(1, length); + byte[] bytes2 = b3.getBytes(1, length); assertEquals(bytes, bytes2); rs.next(); - b2 = rs.getBlob(2); - assertEquals(length, b2.length()); - bytes2 = b2.getBytes(1, length); + b3 = rs.getBlob(2); + assertEquals(length, b3.length()); + bytes2 = b3.getBytes(1, length); + assertEquals(bytes, bytes2); + rs.next(); + b3 = rs.getBlob(2); + assertEquals(length, b3.length()); + bytes2 = b3.getBytes(1, length); assertEquals(bytes, bytes2); while (rs.next()) { bytes2 = rs.getBytes(2); @@ -314,20 +341,28 @@ private void testClob(int length) throws Exception { NClob nc; nc = conn.createNClob(); - nc.setString(1, new String(data)); + assertEquals(length, nc.setString(1, new String(data))); prep.setInt(1, 5); prep.setNClob(2, nc); prep.execute(); - prep.setInt(1, 5); + nc = conn.createNClob(); + char[] xdata = new char[length + 2]; + System.arraycopy(data, 0, xdata, 1, length); + assertEquals(length, nc.setString(1, new String(xdata), 1, length)); + prep.setInt(1, 6); + prep.setNClob(2, nc); + prep.execute(); + + prep.setInt(1, 7); prep.setNClob(2, new StringReader(new String(data))); prep.execute(); - prep.setInt(1, 6); + prep.setInt(1, 8); prep.setNClob(2, new StringReader(new String(data)), -1); prep.execute(); - prep.setInt(1, 7); + prep.setInt(1, 9); prep.setNString(2, new String(data)); prep.execute(); diff --git a/h2/src/test/org/h2/test/jdbc/TestManyJdbcObjects.java b/h2/src/test/org/h2/test/jdbc/TestManyJdbcObjects.java index b9d2eb77d2..d833c80977 100644 --- a/h2/src/test/org/h2/test/jdbc/TestManyJdbcObjects.java +++ b/h2/src/test/org/h2/test/jdbc/TestManyJdbcObjects.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -13,11 +13,12 @@ import java.sql.Statement; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the server by creating many JDBC objects (result sets and so on). */ -public class TestManyJdbcObjects extends TestBase { +public class TestManyJdbcObjects extends TestDb { /** * Run just this test. @@ -25,7 +26,7 @@ public class TestManyJdbcObjects extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -45,8 +46,8 @@ private void testNestedResultSets() throws SQLException { DatabaseMetaData meta = conn.getMetaData(); ResultSet rsTables = meta.getColumns(null, null, null, null); while (rsTables.next()) { - meta.getExportedKeys(null, null, null); - meta.getImportedKeys(null, null, null); + meta.getExportedKeys(null, null, "TEST"); + meta.getImportedKeys(null, null, "TEST"); } conn.close(); } diff --git a/h2/src/test/org/h2/test/jdbc/TestMetaData.java b/h2/src/test/org/h2/test/jdbc/TestMetaData.java index 7cb4f23b5d..ebf8879849 100644 --- a/h2/src/test/org/h2/test/jdbc/TestMetaData.java +++ b/h2/src/test/org/h2/test/jdbc/TestMetaData.java @@ -1,29 +1,33 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; +import static org.h2.engine.Constants.MAX_ARRAY_CARDINALITY; +import static org.h2.engine.Constants.MAX_NUMERIC_PRECISION; +import static org.h2.engine.Constants.MAX_STRING_LENGTH; + import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.Driver; import java.sql.ResultSet; import java.sql.ResultSetMetaData; -import java.sql.SQLClientInfoException; import java.sql.SQLException; import java.sql.Statement; import java.sql.Types; import org.h2.api.ErrorCode; import org.h2.engine.Constants; +import org.h2.mode.DefaultNullOrdering; import org.h2.test.TestBase; -import org.h2.value.DataType; +import org.h2.test.TestDb; /** * Test for the DatabaseMetaData implementation. */ -public class TestMetaData extends TestBase { +public class TestMetaData extends TestDb { private static final String CATALOG = "METADATA"; @@ -33,7 +37,7 @@ public class TestMetaData extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -42,20 +46,23 @@ public void test() throws SQLException { testUnwrap(); testUnsupportedOperations(); testTempTable(); - testColumnResultSetMeta(); testColumnLobMeta(); testColumnMetaData(); testColumnPrecision(); testColumnDefault(); + testColumnGenerated(); + testHiddenColumn(); testCrossReferences(); testProcedureColumns(); + testTypeInfo(); testUDTs(); testStatic(); + testNullsAreSortedAt(); testGeneral(); testAllowLiteralsNone(); testClientInfo(); - testSessionsUncommitted(); testQueryStatistics(); + testQueryStatisticsLimit(); } private void testUnwrap() throws SQLException { @@ -103,41 +110,6 @@ private void testUnsupportedOperations() throws SQLException { conn.close(); } - private void testColumnResultSetMeta() throws SQLException { - Connection conn = getConnection("metaData"); - Statement stat = conn.createStatement(); - stat.executeUpdate("create table test(data result_set)"); - stat.execute("create alias x as 'ResultSet x(Connection conn, String sql) " + - "throws SQLException { return conn.createStatement(" + - "ResultSet.TYPE_SCROLL_INSENSITIVE, " + - "ResultSet.CONCUR_READ_ONLY).executeQuery(sql); }'"); - stat.execute("insert into test values(" + - "select x('select x from system_range(1, 2)'))"); - ResultSet rs = stat.executeQuery("select * from test"); - ResultSetMetaData rsMeta = rs.getMetaData(); - assertTrue(rsMeta.toString().endsWith(": columns=1")); - assertEquals("java.sql.ResultSet", rsMeta.getColumnClassName(1)); - assertEquals(DataType.TYPE_RESULT_SET, rsMeta.getColumnType(1)); - rs.next(); - assertTrue(rs.getObject(1) instanceof java.sql.ResultSet); - assertEquals("org.h2.tools.SimpleResultSet", - rs.getObject(1).getClass().getName()); - stat.executeUpdate("drop alias x"); - - rs = stat.executeQuery("select 1 from dual"); - rs.next(); - rsMeta = rs.getMetaData(); - assertTrue(rsMeta.getCatalogName(1) != null); - assertEquals("1", rsMeta.getColumnLabel(1)); - assertEquals("1", rsMeta.getColumnName(1)); - assertEquals("", rsMeta.getSchemaName(1)); - assertEquals("", rsMeta.getTableName(1)); - assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability()); - assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, rs.getHoldability()); - stat.executeUpdate("drop table test"); - conn.close(); - } - private void testColumnLobMeta() throws SQLException { Connection conn = getConnection("metaData"); Statement stat = conn.createStatement(); @@ -165,14 +137,14 @@ private void testColumnMetaData() throws SQLException { assertEquals("C", rs.getMetaData().getColumnName(1)); Statement stat = conn.createStatement(); - stat.execute("create table a(x array)"); - stat.execute("insert into a values((1, 2))"); + stat.execute("create table a(x int array)"); + stat.execute("insert into a values(ARRAY[1, 2])"); rs = stat.executeQuery("SELECT x[1] FROM a"); ResultSetMetaData rsMeta = rs.getMetaData(); - assertEquals(Types.VARCHAR, rsMeta.getColumnType(1)); + assertEquals(Types.INTEGER, rsMeta.getColumnType(1)); rs.next(); - // assertEquals(String.class.getName(), - // rs.getObject(1).getClass().getName()); + assertEquals(Integer.class.getName(), + rs.getObject(1).getClass().getName()); stat.execute("drop table a"); conn.close(); } @@ -187,15 +159,15 @@ private void testColumnPrecision() throws SQLException { rs = stat.executeQuery("SELECT * FROM ONE"); rsMeta = rs.getMetaData(); assertEquals(12, rsMeta.getPrecision(1)); - assertEquals(17, rsMeta.getPrecision(2)); - assertEquals(Types.DECIMAL, rsMeta.getColumnType(1)); - assertEquals(Types.DOUBLE, rsMeta.getColumnType(2)); + assertEquals(53, rsMeta.getPrecision(2)); + assertEquals(Types.NUMERIC, rsMeta.getColumnType(1)); + assertEquals(Types.FLOAT, rsMeta.getColumnType(2)); rs = stat.executeQuery("SELECT * FROM TWO"); rsMeta = rs.getMetaData(); assertEquals(12, rsMeta.getPrecision(1)); - assertEquals(17, rsMeta.getPrecision(2)); - assertEquals(Types.DECIMAL, rsMeta.getColumnType(1)); - assertEquals(Types.DOUBLE, rsMeta.getColumnType(2)); + assertEquals(53, rsMeta.getPrecision(2)); + assertEquals(Types.NUMERIC, rsMeta.getColumnType(1)); + assertEquals(Types.FLOAT, rsMeta.getColumnType(2)); stat.execute("DROP TABLE ONE, TWO"); conn.close(); } @@ -218,25 +190,64 @@ private void testColumnDefault() throws SQLException { conn.close(); } + private void testColumnGenerated() throws SQLException { + Connection conn = getConnection("metaData"); + DatabaseMetaData meta = conn.getMetaData(); + ResultSet rs; + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(A INT, B INT AS A + 1)"); + rs = meta.getColumns(null, null, "TEST", null); + rs.next(); + assertEquals("A", rs.getString("COLUMN_NAME")); + assertEquals("NO", rs.getString("IS_GENERATEDCOLUMN")); + rs.next(); + assertEquals("B", rs.getString("COLUMN_NAME")); + assertEquals("YES", rs.getString("IS_GENERATEDCOLUMN")); + assertFalse(rs.next()); + stat.execute("DROP TABLE TEST"); + conn.close(); + } + + private void testHiddenColumn() throws SQLException { + Connection conn = getConnection("metaData"); + DatabaseMetaData meta = conn.getMetaData(); + ResultSet rs; + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(A INT, B INT INVISIBLE)"); + rs = meta.getColumns(null, null, "TEST", null); + assertTrue(rs.next()); + assertEquals("A", rs.getString("COLUMN_NAME")); + assertFalse(rs.next()); + rs = meta.getPseudoColumns(null, null, "TEST", null); + assertTrue(rs.next()); + assertEquals("B", rs.getString("COLUMN_NAME")); + assertEquals("YES", rs.getString("IS_NULLABLE")); + assertTrue(rs.next()); + assertEquals("_ROWID_", rs.getString("COLUMN_NAME")); + assertEquals("NO", rs.getString("IS_NULLABLE")); + assertFalse(rs.next()); + stat.execute("DROP TABLE TEST"); + conn.close(); + } + private void testProcedureColumns() throws SQLException { Connection conn = getConnection("metaData"); DatabaseMetaData meta = conn.getMetaData(); ResultSet rs; Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS PROP FOR " + - "\"java.lang.System.getProperty(java.lang.String)\""); - stat.execute("CREATE ALIAS EXIT FOR \"java.lang.System.exit\""); + stat.execute("CREATE ALIAS PROP FOR 'java.lang.System.getProperty(java.lang.String)'"); + stat.execute("CREATE ALIAS EXIT FOR 'java.lang.System.exit'"); rs = meta.getProcedures(null, null, "EX%"); assertResultSetMeta(rs, 9, new String[] { "PROCEDURE_CAT", - "PROCEDURE_SCHEM", "PROCEDURE_NAME", "NUM_INPUT_PARAMS", - "NUM_OUTPUT_PARAMS", "NUM_RESULT_SETS", "REMARKS", + "PROCEDURE_SCHEM", "PROCEDURE_NAME", "RESERVED1", + "RESERVED2", "RESERVED3", "REMARKS", "PROCEDURE_TYPE", "SPECIFIC_NAME" }, new int[] { Types.VARCHAR, - Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.INTEGER, - Types.INTEGER, Types.VARCHAR, Types.SMALLINT, Types.VARCHAR }, + Types.VARCHAR, Types.VARCHAR, Types.NULL, Types.NULL, + Types.NULL, Types.VARCHAR, Types.SMALLINT, Types.VARCHAR }, null, null); assertResultSetOrdered(rs, new String[][] { { CATALOG, - Constants.SCHEMA_MAIN, "EXIT", "1", "0", "0", "", - "" + DatabaseMetaData.procedureNoResult } }); + Constants.SCHEMA_MAIN, "EXIT", null, null, null, null, + "" + DatabaseMetaData.procedureNoResult, "EXIT_1" } }); rs = meta.getProcedureColumns(null, null, null, null); assertResultSetMeta(rs, 20, new String[] { "PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", "COLUMN_NAME", @@ -254,23 +265,151 @@ private void testProcedureColumns() throws SQLException { assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "EXIT", "P1", "" + DatabaseMetaData.procedureColumnIn, - "" + Types.INTEGER, "INTEGER", "10", "10", "0", "10", - "" + DatabaseMetaData.procedureNoNulls }, - { CATALOG, Constants.SCHEMA_MAIN, "PROP", "P0", + "" + Types.INTEGER, "INTEGER", "32", "32", null, "2", + "" + DatabaseMetaData.procedureNoNulls, + null, null, null, null, null, "1", "", "EXIT_1" }, + { CATALOG, Constants.SCHEMA_MAIN, "PROP", "RESULT", "" + DatabaseMetaData.procedureColumnReturn, - "" + Types.VARCHAR, "VARCHAR", "" + Integer.MAX_VALUE, - "" + Integer.MAX_VALUE, "0", "10", - "" + DatabaseMetaData.procedureNullableUnknown }, + "" + Types.VARCHAR, "CHARACTER VARYING", "" + MAX_STRING_LENGTH, + "" + MAX_STRING_LENGTH, null, null, + "" + DatabaseMetaData.procedureNullableUnknown, + null, null, null, null, "" + MAX_STRING_LENGTH, "0", "", "PROP_1" }, { CATALOG, Constants.SCHEMA_MAIN, "PROP", "P1", "" + DatabaseMetaData.procedureColumnIn, - "" + Types.VARCHAR, "VARCHAR", "" + Integer.MAX_VALUE, - "" + Integer.MAX_VALUE, "0", "10", - "" + DatabaseMetaData.procedureNullable }, }); + "" + Types.VARCHAR, "CHARACTER VARYING", "" + MAX_STRING_LENGTH, + "" + MAX_STRING_LENGTH, null, null, + "" + DatabaseMetaData.procedureNullableUnknown, + null, null, null, null, "" + MAX_STRING_LENGTH, "1", "", "PROP_1" }, }); stat.execute("DROP ALIAS EXIT"); stat.execute("DROP ALIAS PROP"); conn.close(); } + private void testTypeInfo() throws SQLException { + Connection conn = getConnection("metaData"); + DatabaseMetaData meta = conn.getMetaData(); + ResultSet rs; + rs = meta.getTypeInfo(); + assertResultSetMeta(rs, 18, + new String[] { "TYPE_NAME", "DATA_TYPE", "PRECISION", "LITERAL_PREFIX", "LITERAL_SUFFIX", + "CREATE_PARAMS", "NULLABLE", "CASE_SENSITIVE", "SEARCHABLE", "UNSIGNED_ATTRIBUTE", + "FIXED_PREC_SCALE", "AUTO_INCREMENT", "LOCAL_TYPE_NAME", "MINIMUM_SCALE", "MAXIMUM_SCALE", + "SQL_DATA_TYPE", "SQL_DATETIME_SUB", "NUM_PREC_RADIX"}, + new int[] { Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, + Types.SMALLINT, Types.BOOLEAN, Types.SMALLINT, Types.BOOLEAN, Types.BOOLEAN, Types.BOOLEAN, + Types.VARCHAR, Types.SMALLINT, Types.SMALLINT, Types.INTEGER, Types.INTEGER, Types.INTEGER }, + null, null); + testTypeInfo(rs, "TINYINT", Types.TINYINT, 8, null, null, null, false, false, (short) 0, (short) 0, 2); + testTypeInfo(rs, "BIGINT", Types.BIGINT, 64, null, null, null, false, false, (short) 0, (short) 0, 2); + testTypeInfo(rs, "BINARY VARYING", Types.VARBINARY, MAX_STRING_LENGTH, "X'", "'", "LENGTH", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "BINARY", Types.BINARY, MAX_STRING_LENGTH, "X'", "'", "LENGTH", false, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "UUID", Types.BINARY, 16, "'", "'", null, false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "CHARACTER", Types.CHAR, MAX_STRING_LENGTH, "'", "'", "LENGTH", true, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "NUMERIC", Types.NUMERIC, MAX_NUMERIC_PRECISION, null, null, "PRECISION,SCALE", false, true, + (short) 0, Short.MAX_VALUE, 10); + testTypeInfo(rs, "DECFLOAT", Types.NUMERIC, MAX_NUMERIC_PRECISION, null, null, "PRECISION", false, false, + (short) 0, (short) 0, 10); + testTypeInfo(rs, "INTEGER", Types.INTEGER, 32, null, null, null, false, false, (short) 0, + (short) 0, 2); + testTypeInfo(rs, "SMALLINT", Types.SMALLINT, 16, null, null, null, false, false, (short) 0, + (short) 0, 2); + testTypeInfo(rs, "REAL", Types.REAL, 24, null, null, null, false, false, (short) 0, (short) 0, 2); + testTypeInfo(rs, "DOUBLE PRECISION", Types.DOUBLE, 53, null, null, null, false, false, (short) 0, (short) 0, + 2); + testTypeInfo(rs, "CHARACTER VARYING", Types.VARCHAR, MAX_STRING_LENGTH, "'", "'", "LENGTH", true, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "VARCHAR_IGNORECASE", Types.VARCHAR, MAX_STRING_LENGTH, "'", "'", "LENGTH", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "BOOLEAN", Types.BOOLEAN, 1, null, null, null, false, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "DATE", Types.DATE, 10, "DATE '", "'", null, false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "TIME", Types.TIME, 18, "TIME '", "'", "SCALE", false, false, (short) 0, (short) 9, 0); + testTypeInfo(rs, "TIMESTAMP", Types.TIMESTAMP, 29, "TIMESTAMP '", "'", "SCALE", false, false, (short) 0, + (short) 9, 0); + testTypeInfo(rs, "INTERVAL YEAR", Types.OTHER, 18, "INTERVAL '", "' YEAR", "PRECISION", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL MONTH", Types.OTHER, 18, "INTERVAL '", "' MONTH", "PRECISION", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL DAY", Types.OTHER, 18, "INTERVAL '", "' DAY", "PRECISION", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL HOUR", Types.OTHER, 18, "INTERVAL '", "' HOUR", "PRECISION", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL MINUTE", Types.OTHER, 18, "INTERVAL '", "' MINUTE", "PRECISION", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL SECOND", Types.OTHER, 18, "INTERVAL '", "' SECOND", "PRECISION,SCALE", false, false, + (short) 0, (short) 9, 0); + testTypeInfo(rs, "INTERVAL YEAR TO MONTH", Types.OTHER, 18, "INTERVAL '", "' YEAR TO MONTH", "PRECISION", + false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL DAY TO HOUR", Types.OTHER, 18, "INTERVAL '", "' DAY TO HOUR", "PRECISION", + false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL DAY TO MINUTE", Types.OTHER, 18, "INTERVAL '", "' DAY TO MINUTE", "PRECISION", + false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL DAY TO SECOND", Types.OTHER, 18, "INTERVAL '", "' DAY TO SECOND", "PRECISION,SCALE", + false, false, (short) 0, (short) 9, 0); + testTypeInfo(rs, "INTERVAL HOUR TO MINUTE", Types.OTHER, 18, "INTERVAL '", "' HOUR TO MINUTE", "PRECISION", + false, false, (short) 0, (short) 0, 0); + testTypeInfo(rs, "INTERVAL HOUR TO SECOND", Types.OTHER, 18, "INTERVAL '", "' HOUR TO SECOND", + "PRECISION,SCALE", false, false, (short) 0, (short) 9, 0); + testTypeInfo(rs, "INTERVAL MINUTE TO SECOND", Types.OTHER, 18, "INTERVAL '", "' MINUTE TO SECOND", + "PRECISION,SCALE", false, false, (short) 0, (short) 9, 0); + testTypeInfo(rs, "ENUM", Types.OTHER, MAX_STRING_LENGTH, "'", "'", "ELEMENT [,...]", false, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "GEOMETRY", Types.OTHER, Integer.MAX_VALUE, "'", "'", "TYPE,SRID", false, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "JSON", Types.OTHER, MAX_STRING_LENGTH, "JSON '", "'", "LENGTH", true, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "ROW", Types.OTHER, 0, "ROW(", ")", "NAME DATA_TYPE [,...]", false, false, (short) 0, + (short) 0, 0); + testTypeInfo(rs, "JAVA_OBJECT", Types.JAVA_OBJECT, MAX_STRING_LENGTH, "X'", "'", "LENGTH", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "ARRAY", Types.ARRAY, MAX_ARRAY_CARDINALITY, "ARRAY[", "]", "CARDINALITY", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "BINARY LARGE OBJECT", Types.BLOB, Integer.MAX_VALUE, "X'", "'", "LENGTH", false, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "CHARACTER LARGE OBJECT", Types.CLOB, Integer.MAX_VALUE, "'", "'", "LENGTH", true, false, + (short) 0, (short) 0, 0); + testTypeInfo(rs, "TIME WITH TIME ZONE", Types.TIME_WITH_TIMEZONE, 24, "TIME WITH TIME ZONE '", "'", "SCALE", + false, false, (short) 0, (short) 9, 0); + testTypeInfo(rs, "TIMESTAMP WITH TIME ZONE", Types.TIMESTAMP_WITH_TIMEZONE, 35, "TIMESTAMP WITH TIME ZONE '", + "'", "SCALE", false, false, (short) 0, (short) 9, 0); + assertFalse(rs.next()); + conn.close(); + } + + private void testTypeInfo(ResultSet rs, String name, int type, long precision, String prefix, String suffix, + String params, boolean caseSensitive, boolean fixed, short minScale, short maxScale, int radix) + throws SQLException { + assertTrue(rs.next()); + assertEquals(name, rs.getString(1)); + assertEquals(type, rs.getInt(2)); + assertEquals(precision, rs.getLong(3)); + assertEquals(prefix, rs.getString(4)); + assertEquals(suffix, rs.getString(5)); + assertEquals(params, rs.getString(6)); + assertEquals(DatabaseMetaData.typeNullable, rs.getShort(7)); + assertEquals(caseSensitive, rs.getBoolean(8)); + assertEquals(DatabaseMetaData.typeSearchable, rs.getShort(9)); + assertFalse(rs.getBoolean(10)); + assertEquals(fixed, rs.getBoolean(11)); + assertFalse(rs.getBoolean(12)); + assertEquals(name, rs.getString(13)); + assertEquals(minScale, rs.getShort(14)); + assertEquals(maxScale, rs.getShort(15)); + rs.getInt(16); + assertTrue(rs.wasNull()); + rs.getInt(17); + assertTrue(rs.wasNull()); + if (radix != 0) { + assertEquals(radix, rs.getInt(18)); + } else { + rs.getInt(18); + assertTrue(rs.wasNull()); + } + } + private void testUDTs() throws SQLException { Connection conn = getConnection("metaData"); DatabaseMetaData meta = conn.getMetaData(); @@ -280,7 +419,7 @@ private void testUDTs() throws SQLException { new String[] { "TYPE_CAT", "TYPE_SCHEM", "TYPE_NAME", "CLASS_NAME", "DATA_TYPE", "REMARKS", "BASE_TYPE" }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, - Types.VARCHAR, Types.SMALLINT, Types.VARCHAR, + Types.VARCHAR, Types.INTEGER, Types.VARCHAR, Types.SMALLINT }, null, null); conn.close(); } @@ -299,8 +438,7 @@ private void testCrossReferences() throws SQLException { checkCrossRef(rs); rs = meta.getExportedKeys(null, "PUBLIC", "PARENT"); checkCrossRef(rs); - stat.execute("DROP TABLE PARENT"); - stat.execute("DROP TABLE CHILD"); + stat.execute("DROP TABLE PARENT, CHILD"); conn.close(); } @@ -319,13 +457,13 @@ private void checkCrossRef(ResultSet rs) throws SQLException { Constants.SCHEMA_MAIN, "CHILD", "PA", "1", "" + DatabaseMetaData.importedKeyRestrict, "" + DatabaseMetaData.importedKeyRestrict, "AB", - "PRIMARY_KEY_8", + "CONSTRAINT_8", "" + DatabaseMetaData.importedKeyNotDeferrable }, { CATALOG, Constants.SCHEMA_MAIN, "PARENT", "B", CATALOG, Constants.SCHEMA_MAIN, "CHILD", "PB", "2", "" + DatabaseMetaData.importedKeyRestrict, "" + DatabaseMetaData.importedKeyRestrict, "AB", - "PRIMARY_KEY_8", + "CONSTRAINT_8", "" + DatabaseMetaData.importedKeyNotDeferrable } }); } @@ -361,7 +499,7 @@ private void testStatic() throws SQLException { assertTrue(dr.jdbcCompliant()); assertEquals(0, dr.getPropertyInfo(null, null).length); - assertTrue(dr.connect("jdbc:test:false", null) == null); + assertNull(dr.connect("jdbc:test:false", null)); assertTrue(meta.getNumericFunctions().length() > 0); assertTrue(meta.getStringFunctions().length() > 0); @@ -388,7 +526,7 @@ private void testStatic() throws SQLException { meta.getDriverMinorVersion()); int majorVersion = 4; assertEquals(majorVersion, meta.getJDBCMajorVersion()); - assertEquals(0, meta.getJDBCMinorVersion()); + assertEquals(2, meta.getJDBCMinorVersion()); assertEquals("H2", meta.getDatabaseProductName()); assertEquals(Connection.TRANSACTION_READ_COMMITTED, meta.getDefaultTransactionIsolation()); @@ -423,14 +561,11 @@ private void testStatic() throws SQLException { assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, meta.getResultSetHoldability()); - assertEquals(DatabaseMetaData.sqlStateSQL99, - meta.getSQLStateType()); + assertEquals(DatabaseMetaData.sqlStateSQL, meta.getSQLStateType()); assertFalse(meta.locatorsUpdateCopy()); assertEquals("schema", meta.getSchemaTerm()); assertEquals("\\", meta.getSearchStringEscape()); - assertEquals("LIMIT,MINUS,ROWNUM,SYSDATE,SYSTIME,SYSTIMESTAMP,TODAY", - meta.getSQLKeywords()); assertTrue(meta.getURL().startsWith("jdbc:h2:")); assertTrue(meta.getUserName().length() > 1); @@ -443,10 +578,6 @@ private void testStatic() throws SQLException { assertTrue(meta.isCatalogAtStart()); assertFalse(meta.isReadOnly()); assertTrue(meta.nullPlusNonNullIsNull()); - assertFalse(meta.nullsAreSortedAtEnd()); - assertFalse(meta.nullsAreSortedAtStart()); - assertFalse(meta.nullsAreSortedHigh()); - assertTrue(meta.nullsAreSortedLow()); assertFalse(meta.othersDeletesAreVisible( ResultSet.TYPE_FORWARD_ONLY)); assertFalse(meta.othersDeletesAreVisible( @@ -486,7 +617,7 @@ private void testStatic() throws SQLException { assertFalse(meta.storesLowerCaseIdentifiers()); assertFalse(meta.storesLowerCaseQuotedIdentifiers()); assertFalse(meta.storesMixedCaseIdentifiers()); - assertTrue(meta.storesMixedCaseQuotedIdentifiers()); + assertFalse(meta.storesMixedCaseQuotedIdentifiers()); assertTrue(meta.storesUpperCaseIdentifiers()); assertFalse(meta.storesUpperCaseQuotedIdentifiers()); assertTrue(meta.supportsAlterTableWithAddColumn()); @@ -513,7 +644,7 @@ private void testStatic() throws SQLException { assertFalse(meta.supportsFullOuterJoins()); assertTrue(meta.supportsGetGeneratedKeys()); - assertTrue(meta.supportsMultipleOpenResults()); + assertFalse(meta.supportsMultipleOpenResults()); assertFalse(meta.supportsNamedParameters()); assertTrue(meta.supportsGroupBy()); @@ -534,8 +665,8 @@ private void testStatic() throws SQLException { assertTrue(meta.supportsOpenStatementsAcrossRollback()); assertTrue(meta.supportsOrderByUnrelated()); assertTrue(meta.supportsOuterJoins()); - assertTrue(meta.supportsPositionedDelete()); - assertTrue(meta.supportsPositionedUpdate()); + assertFalse(meta.supportsPositionedDelete()); + assertFalse(meta.supportsPositionedUpdate()); assertTrue(meta.supportsResultSetConcurrency( ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)); assertTrue(meta.supportsResultSetConcurrency( @@ -575,16 +706,12 @@ private void testStatic() throws SQLException { assertTrue(meta.supportsSubqueriesInQuantifieds()); assertTrue(meta.supportsTableCorrelationNames()); assertTrue(meta.supportsTransactions()); - assertTrue(meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_NONE)); - assertTrue(meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_READ_COMMITTED)); - assertTrue(meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_READ_UNCOMMITTED)); - assertTrue(meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_REPEATABLE_READ)); - assertTrue(meta.supportsTransactionIsolationLevel( - Connection.TRANSACTION_SERIALIZABLE)); + assertFalse(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_NONE)); + assertTrue(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_COMMITTED)); + assertTrue(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_UNCOMMITTED)); + assertTrue(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_REPEATABLE_READ)); + assertTrue(meta.supportsTransactionIsolationLevel(Constants.TRANSACTION_SNAPSHOT)); + assertTrue(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_SERIALIZABLE)); assertTrue(meta.supportsUnion()); assertTrue(meta.supportsUnionAll()); assertFalse(meta.updatesAreDetected(ResultSet.TYPE_FORWARD_ONLY)); @@ -595,6 +722,30 @@ private void testStatic() throws SQLException { conn.close(); } + private void testNullsAreSortedAt() throws SQLException { + Connection conn = getConnection("metaData"); + Statement stat = conn.createStatement(); + DatabaseMetaData meta = conn.getMetaData(); + testNullsAreSortedAt(meta, DefaultNullOrdering.LOW); + stat.execute("SET DEFAULT_NULL_ORDERING LOW"); + testNullsAreSortedAt(meta, DefaultNullOrdering.LOW); + stat.execute("SET DEFAULT_NULL_ORDERING HIGH"); + testNullsAreSortedAt(meta, DefaultNullOrdering.HIGH); + stat.execute("SET DEFAULT_NULL_ORDERING FIRST"); + testNullsAreSortedAt(meta, DefaultNullOrdering.FIRST); + stat.execute("SET DEFAULT_NULL_ORDERING LAST"); + testNullsAreSortedAt(meta, DefaultNullOrdering.LAST); + stat.execute("SET DEFAULT_NULL_ORDERING LOW"); + conn.close(); + } + + private void testNullsAreSortedAt(DatabaseMetaData meta, DefaultNullOrdering ordering) throws SQLException { + assertEquals(ordering == DefaultNullOrdering.HIGH, meta.nullsAreSortedHigh()); + assertEquals(ordering == DefaultNullOrdering.LOW, meta.nullsAreSortedLow()); + assertEquals(ordering == DefaultNullOrdering.FIRST, meta.nullsAreSortedAtStart()); + assertEquals(ordering == DefaultNullOrdering.LAST, meta.nullsAreSortedAtEnd()); + } + private void testMore() throws SQLException { Connection conn = getConnection("metaData"); DatabaseMetaData meta = conn.getMetaData(); @@ -658,23 +809,23 @@ private void testMore() throws SQLException { trace("getTables"); rs = meta.getTables(null, Constants.SCHEMA_MAIN, null, new String[] { "TABLE" }); - assertResultSetMeta(rs, 11, new String[] { "TABLE_CAT", "TABLE_SCHEM", + assertResultSetMeta(rs, 10, new String[] { "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "TABLE_TYPE", "REMARKS", "TYPE_CAT", "TYPE_SCHEM", "TYPE_NAME", "SELF_REFERENCING_COL_NAME", - "REF_GENERATION", "SQL" }, new int[] { Types.VARCHAR, + "REF_GENERATION" }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, - Types.VARCHAR, Types.VARCHAR }, null, null); + Types.VARCHAR }, null, null); if (rs.next()) { fail("Database is not empty after dropping all tables"); } stat.executeUpdate("CREATE TABLE TEST(" + "ID INT PRIMARY KEY," - + "TEXT_V VARCHAR(120)," + "DEC_V DECIMAL(12,3)," + + "TEXT_V VARCHAR(120)," + "DEC_V DECIMAL(12,3)," + "NUM_V NUMERIC(12,3)," + "DATE_V DATETIME," + "BLOB_V BLOB," + "CLOB_V CLOB" + ")"); rs = meta.getTables(null, Constants.SCHEMA_MAIN, null, new String[] { "TABLE" }); assertResultSetOrdered(rs, new String[][] { { CATALOG, - Constants.SCHEMA_MAIN, "TEST", "TABLE", "" } }); + Constants.SCHEMA_MAIN, "TEST", "BASE TABLE" } }); trace("getColumns"); rs = meta.getColumns(null, null, "TEST", null); assertResultSetMeta(rs, 24, new String[] { "TABLE_CAT", "TABLE_SCHEM", @@ -684,7 +835,7 @@ private void testMore() throws SQLException { "SQL_DATA_TYPE", "SQL_DATETIME_SUB", "CHAR_OCTET_LENGTH", "ORDINAL_POSITION", "IS_NULLABLE", "SCOPE_CATALOG", "SCOPE_SCHEMA", "SCOPE_TABLE", "SOURCE_DATA_TYPE", - "IS_AUTOINCREMENT", "SCOPE_CATLOG" }, new int[] { + "IS_AUTOINCREMENT", "IS_GENERATEDCOLUMN" }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.VARCHAR, Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.VARCHAR, @@ -694,32 +845,34 @@ private void testMore() throws SQLException { null, null); assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "TEST", "ID", - "" + Types.INTEGER, "INTEGER", "10", "10", "0", "10", - "" + DatabaseMetaData.columnNoNulls, "", null, - "" + Types.INTEGER, "0", "10", "1", "NO" }, + "" + Types.INTEGER, "INTEGER", "32", null, "0", "2", + "" + DatabaseMetaData.columnNoNulls, null, null, + null, null, "32", "1", "NO" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "TEXT_V", - "" + Types.VARCHAR, "VARCHAR", "120", "120", "0", "10", - "" + DatabaseMetaData.columnNullable, "", null, - "" + Types.VARCHAR, "0", "120", "2", "YES" }, + "" + Types.VARCHAR, "CHARACTER VARYING", "120", null, "0", null, + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "120", "2", "YES" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "DEC_V", - "" + Types.DECIMAL, "DECIMAL", "12", "12", "3", "10", - "" + DatabaseMetaData.columnNullable, "", null, - "" + Types.DECIMAL, "0", "12", "3", "YES" }, + "" + Types.DECIMAL, "DECIMAL", "12", null, "3", "10", + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "12", "3", "YES" }, + { CATALOG, Constants.SCHEMA_MAIN, "TEST", "NUM_V", + "" + Types.NUMERIC, "NUMERIC", "12", null, "3", "10", + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "12", "4", "YES" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "DATE_V", - "" + Types.TIMESTAMP, "TIMESTAMP", "23", "23", "10", - "10", "" + DatabaseMetaData.columnNullable, "", null, - "" + Types.TIMESTAMP, "0", "23", "4", "YES" }, + "" + Types.TIMESTAMP, "TIMESTAMP", "26", null, "6", null, + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "26", "5", "YES" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "BLOB_V", - "" + Types.BLOB, "BLOB", "" + Integer.MAX_VALUE, - "" + Integer.MAX_VALUE, "0", "10", - "" + DatabaseMetaData.columnNullable, "", null, - "" + Types.BLOB, "0", "" + Integer.MAX_VALUE, "5", + "" + Types.BLOB, "BINARY LARGE OBJECT", "" + Integer.MAX_VALUE, null, "0", null, + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "" + Integer.MAX_VALUE, "6", "YES" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "CLOB_V", - "" + Types.CLOB, "CLOB", "" + Integer.MAX_VALUE, - "" + Integer.MAX_VALUE, "0", "10", - "" + DatabaseMetaData.columnNullable, "", null, - "" + Types.CLOB, "0", "" + Integer.MAX_VALUE, "6", + "" + Types.CLOB, "CHARACTER LARGE OBJECT", "" + Integer.MAX_VALUE, null, "0", null, + "" + DatabaseMetaData.columnNullable, null, null, + null, null, "" + Integer.MAX_VALUE, "7", "YES" } }); /* * rs=meta.getColumns(null,null,"TEST",null); while(rs.next()) { int @@ -729,44 +882,46 @@ private void testMore() throws SQLException { stat.executeUpdate("CREATE INDEX IDX_TEXT_DEC ON TEST(TEXT_V,DEC_V)"); stat.executeUpdate("CREATE UNIQUE INDEX IDX_DATE ON TEST(DATE_V)"); rs = meta.getIndexInfo(null, null, "TEST", false, false); - assertResultSetMeta(rs, 14, new String[] { "TABLE_CAT", "TABLE_SCHEM", + assertResultSetMeta(rs, 13, new String[] { "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "NON_UNIQUE", "INDEX_QUALIFIER", "INDEX_NAME", "TYPE", "ORDINAL_POSITION", "COLUMN_NAME", "ASC_OR_DESC", - "CARDINALITY", "PAGES", "FILTER_CONDITION", "SORT_TYPE" }, + "CARDINALITY", "PAGES", "FILTER_CONDITION" }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.BOOLEAN, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.SMALLINT, Types.VARCHAR, - Types.VARCHAR, Types.INTEGER, Types.INTEGER, - Types.VARCHAR, Types.INTEGER }, null, null); + Types.VARCHAR, Types.BIGINT, Types.BIGINT, + Types.VARCHAR }, null, null); assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "TEST", "FALSE", CATALOG, "IDX_DATE", "" + DatabaseMetaData.tableIndexOther, "1", - "DATE_V", "A", "0", "0", "" }, + "DATE_V", "A", "0", "0" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "FALSE", CATALOG, "PRIMARY_KEY_2", "" + DatabaseMetaData.tableIndexOther, - "1", "ID", "A", "0", "0", "" }, + "1", "ID", "A", "0", "0" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "TRUE", CATALOG, "IDX_TEXT_DEC", "" + DatabaseMetaData.tableIndexOther, - "1", "TEXT_V", "A", "0", "0", "" }, + "1", "TEXT_V", "A", "0", "0" }, { CATALOG, Constants.SCHEMA_MAIN, "TEST", "TRUE", CATALOG, "IDX_TEXT_DEC", "" + DatabaseMetaData.tableIndexOther, - "2", "DEC_V", "A", "0", "0", "" }, }); + "2", "DEC_V", "A", "0", "0" }, }, + new int[] { 11 }); stat.executeUpdate("DROP INDEX IDX_TEXT_DEC"); stat.executeUpdate("DROP INDEX IDX_DATE"); rs = meta.getIndexInfo(null, null, "TEST", false, false); - assertResultSetMeta(rs, 14, new String[] { "TABLE_CAT", "TABLE_SCHEM", + assertResultSetMeta(rs, 13, new String[] { "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "NON_UNIQUE", "INDEX_QUALIFIER", "INDEX_NAME", "TYPE", "ORDINAL_POSITION", "COLUMN_NAME", "ASC_OR_DESC", - "CARDINALITY", "PAGES", "FILTER_CONDITION", "SORT_TYPE" }, + "CARDINALITY", "PAGES", "FILTER_CONDITION" }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.BOOLEAN, Types.VARCHAR, Types.VARCHAR, Types.SMALLINT, Types.SMALLINT, Types.VARCHAR, - Types.VARCHAR, Types.INTEGER, Types.INTEGER, - Types.VARCHAR, Types.INTEGER }, null, null); + Types.VARCHAR, Types.BIGINT, Types.BIGINT, + Types.VARCHAR }, null, null); assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "TEST", "FALSE", CATALOG, "PRIMARY_KEY_2", "" + DatabaseMetaData.tableIndexOther, "1", - "ID", "A", "0", "0", "" } }); + "ID", "A", "0", "0" } }, + new int[] { 11 }); trace("getPrimaryKeys"); rs = meta.getPrimaryKeys(null, null, "TEST"); assertResultSetMeta(rs, 6, new String[] { "TABLE_CAT", "TABLE_SCHEM", @@ -782,37 +937,37 @@ private void testMore() throws SQLException { "CREATE TABLE TX2(B INT,A VARCHAR(6),C INT,PRIMARY KEY(C,A,B))"); rs = meta.getTables(null, null, "T_2", null); assertResultSetOrdered(rs, new String[][] { - { CATALOG, Constants.SCHEMA_MAIN, "TX2", "TABLE", "" }, - { CATALOG, Constants.SCHEMA_MAIN, "T_2", "TABLE", "" } }); + { CATALOG, Constants.SCHEMA_MAIN, "TX2", "BASE TABLE" }, + { CATALOG, Constants.SCHEMA_MAIN, "T_2", "BASE TABLE" } }); trace("getTables - using a quoted _ character"); rs = meta.getTables(null, null, "T\\_2", null); assertResultSetOrdered(rs, new String[][] { { CATALOG, - Constants.SCHEMA_MAIN, "T_2", "TABLE", "" } }); + Constants.SCHEMA_MAIN, "T_2", "BASE TABLE" } }); trace("getTables - using the % wildcard"); rs = meta.getTables(null, Constants.SCHEMA_MAIN, "%", new String[] { "TABLE" }); assertResultSetOrdered(rs, new String[][] { - { CATALOG, Constants.SCHEMA_MAIN, "TEST", "TABLE", "" }, - { CATALOG, Constants.SCHEMA_MAIN, "TX2", "TABLE", "" }, - { CATALOG, Constants.SCHEMA_MAIN, "T_2", "TABLE", "" } }); + { CATALOG, Constants.SCHEMA_MAIN, "TEST", "BASE TABLE" }, + { CATALOG, Constants.SCHEMA_MAIN, "TX2", "BASE TABLE" }, + { CATALOG, Constants.SCHEMA_MAIN, "T_2", "BASE TABLE" } }); stat.execute("DROP TABLE TEST"); trace("getColumns - using wildcards"); rs = meta.getColumns(null, null, "___", "B%"); assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "TX2", "B", - "" + Types.INTEGER, "INTEGER", "10" }, + "" + Types.INTEGER, "INTEGER", "32" }, { CATALOG, Constants.SCHEMA_MAIN, "T_2", "B", - "" + Types.INTEGER, "INTEGER", "10" }, }); + "" + Types.INTEGER, "INTEGER", "32" }, }); trace("getColumns - using wildcards"); rs = meta.getColumns(null, null, "_\\__", "%"); assertResultSetOrdered(rs, new String[][] { { CATALOG, Constants.SCHEMA_MAIN, "T_2", "B", - "" + Types.INTEGER, "INTEGER", "10" }, + "" + Types.INTEGER, "INTEGER", "32" }, { CATALOG, Constants.SCHEMA_MAIN, "T_2", "A", - "" + Types.VARCHAR, "VARCHAR", "6" }, + "" + Types.VARCHAR, "CHARACTER VARYING", "6" }, { CATALOG, Constants.SCHEMA_MAIN, "T_2", "C", - "" + Types.INTEGER, "INTEGER", "10" }, }); + "" + Types.INTEGER, "INTEGER", "32" }, }); trace("getIndexInfo"); stat.executeUpdate("CREATE UNIQUE INDEX A_INDEX ON TX2(B,C,A)"); stat.executeUpdate("CREATE INDEX B_INDEX ON TX2(A,B,C)"); @@ -844,7 +999,8 @@ private void testMore() throws SQLException { "B", "A" }, { CATALOG, Constants.SCHEMA_MAIN, "TX2", "TRUE", CATALOG, "B_INDEX", "" + DatabaseMetaData.tableIndexOther, "3", - "C", "A" }, }); + "C", "A" }, }, + new int[] { 11 }); trace("getPrimaryKeys"); rs = meta.getPrimaryKeys(null, null, "T_2"); assertResultSetOrdered(rs, new String[][] { @@ -916,9 +1072,8 @@ private void testMore() throws SQLException { */ rs = meta.getSchemas(); - assertResultSetMeta(rs, 3, new String[] { "TABLE_SCHEM", - "TABLE_CATALOG", "IS_DEFAULT" }, new int[] { Types.VARCHAR, - Types.VARCHAR, Types.BOOLEAN }, null, null); + assertResultSetMeta(rs, 2, new String[] { "TABLE_SCHEM", "TABLE_CATALOG" }, + new int[] { Types.VARCHAR, Types.VARCHAR }, null, null); assertTrue(rs.next()); assertEquals("INFORMATION_SCHEMA", rs.getString(1)); assertTrue(rs.next()); @@ -926,9 +1081,8 @@ private void testMore() throws SQLException { assertFalse(rs.next()); rs = meta.getSchemas(null, null); - assertResultSetMeta(rs, 3, new String[] { "TABLE_SCHEM", - "TABLE_CATALOG", "IS_DEFAULT" }, new int[] { Types.VARCHAR, - Types.VARCHAR, Types.BOOLEAN }, null, null); + assertResultSetMeta(rs, 2, new String[] { "TABLE_SCHEM", "TABLE_CATALOG" }, + new int[] { Types.VARCHAR, Types.VARCHAR }, null, null); assertTrue(rs.next()); assertEquals("INFORMATION_SCHEMA", rs.getString(1)); assertTrue(rs.next()); @@ -943,8 +1097,9 @@ private void testMore() throws SQLException { rs = meta.getTableTypes(); assertResultSetMeta(rs, 1, new String[] { "TABLE_TYPE" }, new int[] { Types.VARCHAR }, null, null); - assertResultSetOrdered(rs, new String[][] { { "SYSTEM TABLE" }, - { "TABLE" }, { "TABLE LINK" }, { "VIEW" } }); + assertResultSetOrdered(rs, new String[][] { + { "BASE TABLE" }, { "GLOBAL TEMPORARY" }, + { "LOCAL TEMPORARY" }, { "SYNONYM" }, { "VIEW" } }); rs = meta.getTypeInfo(); assertResultSetMeta(rs, 18, new String[] { "TYPE_NAME", "DATA_TYPE", @@ -976,9 +1131,9 @@ private void testMore() throws SQLException { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR }, null, null); - assertTrue(conn.getWarnings() == null); + assertNull(conn.getWarnings()); conn.clearWarnings(); - assertTrue(conn.getWarnings() == null); + assertNull(conn.getWarnings()); conn.close(); } @@ -1019,82 +1174,36 @@ private void testGeneral() throws SQLException { rs = meta.getTableTypes(); rs.next(); - assertEquals("SYSTEM TABLE", rs.getString("TABLE_TYPE")); + assertEquals("BASE TABLE", rs.getString("TABLE_TYPE")); + rs.next(); + assertEquals("GLOBAL TEMPORARY", rs.getString("TABLE_TYPE")); rs.next(); - assertEquals("TABLE", rs.getString("TABLE_TYPE")); + assertEquals("LOCAL TEMPORARY", rs.getString("TABLE_TYPE")); rs.next(); - assertEquals("TABLE LINK", rs.getString("TABLE_TYPE")); + assertEquals("SYNONYM", rs.getString("TABLE_TYPE")); rs.next(); assertEquals("VIEW", rs.getString("TABLE_TYPE")); assertFalse(rs.next()); rs = meta.getTables(null, Constants.SCHEMA_MAIN, null, new String[] { "TABLE" }); - assertTrue(rs.getStatement() == null); + assertNull(rs.getStatement()); rs.next(); assertEquals("TEST", rs.getString("TABLE_NAME")); assertFalse(rs.next()); - rs = meta.getTables(null, "INFORMATION_SCHEMA", - null, new String[] { "TABLE", "SYSTEM TABLE" }); - rs.next(); - assertEquals("CATALOGS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("COLLATIONS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("COLUMNS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("COLUMN_PRIVILEGES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("CONSTANTS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("CONSTRAINTS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("CROSS_REFERENCES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("DOMAINS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("FUNCTION_ALIASES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("FUNCTION_COLUMNS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("HELP", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("INDEXES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("IN_DOUBT", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("LOCKS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("QUERY_STATISTICS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("RIGHTS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("ROLES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SCHEMATA", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SEQUENCES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SESSIONS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SESSION_STATE", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("SETTINGS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TABLES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TABLE_PRIVILEGES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TABLE_TYPES", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TRIGGERS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("TYPE_INFO", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("USERS", rs.getString("TABLE_NAME")); - rs.next(); - assertEquals("VIEWS", rs.getString("TABLE_NAME")); + rs = meta.getTables(null, "INFORMATION_SCHEMA", null, new String[] { "BASE TABLE", "VIEW" }); + for (String name : new String[] { "CONSTANTS", "ENUM_VALUES", + "INDEXES", "INDEX_COLUMNS", "INFORMATION_SCHEMA_CATALOG_NAME", "IN_DOUBT", "LOCKS", + "QUERY_STATISTICS", "RIGHTS", "ROLES", "SESSIONS", "SESSION_STATE", "SETTINGS", "SYNONYMS", + "USERS", "CHECK_CONSTRAINTS", "COLLATIONS", "COLUMNS", "COLUMN_PRIVILEGES", + "CONSTRAINT_COLUMN_USAGE", "DOMAINS", "DOMAIN_CONSTRAINTS", "ELEMENT_TYPES", "FIELDS", + "KEY_COLUMN_USAGE", "PARAMETERS", + "REFERENTIAL_CONSTRAINTS", "ROUTINES", "SCHEMATA", "SEQUENCES", "TABLES", "TABLE_CONSTRAINTS", + "TABLE_PRIVILEGES", "TRIGGERS", "VIEWS" }) { + rs.next(); + assertEquals(name, rs.getString("TABLE_NAME")); + } assertFalse(rs.next()); rs = meta.getColumns(null, null, "TEST", null); @@ -1138,11 +1247,18 @@ private void testGeneral() throws SQLException { stat.execute("DROP TABLE TEST"); rs = stat.executeQuery("SELECT * FROM INFORMATION_SCHEMA.SETTINGS"); + int mvStoreSettingsCount = 0, pageStoreSettingsCount = 0; while (rs.next()) { - String name = rs.getString("NAME"); - String value = rs.getString("VALUE"); - trace(name + "=" + value); + String name = rs.getString("SETTING_NAME"); + trace(name + '=' + rs.getString("SETTING_VALUE")); + if ("COMPRESS".equals(name) || "REUSE_SPACE".equals(name)) { + mvStoreSettingsCount++; + } else if (name.startsWith("PAGE_STORE_")) { + pageStoreSettingsCount++; + } } + assertEquals(2, mvStoreSettingsCount); + assertEquals(0, pageStoreSettingsCount); testMore(); @@ -1165,18 +1281,18 @@ private void testAllowLiteralsNone() throws SQLException { stat.execute("SET ALLOW_LITERALS NONE"); DatabaseMetaData meta = conn.getMetaData(); // meta.getAttributes(null, null, null, null); - meta.getBestRowIdentifier(null, null, null, 0, false); + meta.getBestRowIdentifier(null, null, "TEST", 0, false); meta.getCatalogs(); // meta.getClientInfoProperties(); - meta.getColumnPrivileges(null, null, null, null); + meta.getColumnPrivileges(null, null, "TEST", null); meta.getColumns(null, null, null, null); - meta.getCrossReference(null, null, null, null, null, null); - meta.getExportedKeys(null, null, null); + meta.getCrossReference(null, null, "TEST", null, null, "TEST"); + meta.getExportedKeys(null, null, "TEST"); // meta.getFunctionColumns(null, null, null, null); // meta.getFunctions(null, null, null); - meta.getImportedKeys(null, null, null); - meta.getIndexInfo(null, null, null, false, false); - meta.getPrimaryKeys(null, null, null); + meta.getImportedKeys(null, null, "TEST"); + meta.getIndexInfo(null, null, "TEST", false, false); + meta.getPrimaryKeys(null, null, "TEST"); meta.getProcedureColumns(null, null, null, null); meta.getProcedures(null, null, null); meta.getSchemas(); @@ -1195,36 +1311,27 @@ private void testAllowLiteralsNone() throws SQLException { private void testClientInfo() throws SQLException { Connection conn = getConnection("metaData"); - assertThrows(SQLClientInfoException.class, conn).getClientInfo("xxx"); + assertNull(conn.getClientInfo("xxx")); DatabaseMetaData meta = conn.getMetaData(); ResultSet rs = meta.getClientInfoProperties(); - assertFalse(rs.next()); - conn.close(); - deleteDb("metaData"); - } - - private void testSessionsUncommitted() throws SQLException { - if (config.mvcc || config.memory) { - return; + ResultSetMetaData rsMeta = rs.getMetaData(); + assertEquals("NAME", rsMeta.getColumnName(1)); + assertEquals("MAX_LEN", rsMeta.getColumnName(2)); + assertEquals("DEFAULT_VALUE", rsMeta.getColumnName(3)); + assertEquals("DESCRIPTION", rsMeta.getColumnName(4)); + assertEquals("VALUE", rsMeta.getColumnName(5)); + int count = 0; + while (rs.next()) { + count++; } - Connection conn = getConnection("metaData"); - conn.setAutoCommit(false); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - stat.execute("begin transaction"); - for (int i = 0; i < 6; i++) { - stat.execute("insert into test values (1)"); + if (config.networked) { + // server0, numServers + assertEquals(2, count); + } else { + // numServers + assertEquals(1, count); } - ResultSet rs = stat.executeQuery("select contains_uncommitted " + - "from INFORMATION_SCHEMA.SESSIONS"); - rs.next(); - assertEquals(true, rs.getBoolean(1)); rs.close(); - stat.execute("commit"); - rs = stat.executeQuery("select contains_uncommitted " + - "from INFORMATION_SCHEMA.SESSIONS"); - rs.next(); - assertEquals(false, rs.getBoolean(1)); conn.close(); deleteDb("metaData"); } @@ -1242,7 +1349,7 @@ private void testQueryStatistics() throws SQLException { stat.execute("SET QUERY_STATISTICS TRUE"); int count = 100; for (int i = 0; i < count; i++) { - stat.execute("select * from test limit 10"); + execute(stat, "select * from test limit 10"); } // The "order by" makes the result set more stable on windows, where the // timer resolution is not that great @@ -1252,9 +1359,51 @@ private void testQueryStatistics() throws SQLException { assertTrue(rs.next()); assertEquals("select * from test limit 10", rs.getString("SQL_STATEMENT")); assertEquals(count, rs.getInt("EXECUTION_COUNT")); - assertEquals(10 * count, rs.getInt("CUMULATIVE_ROW_COUNT")); + assertEquals(config.lazy ? 0 : 10 * count, rs.getInt("CUMULATIVE_ROW_COUNT")); rs.close(); conn.close(); deleteDb("metaData"); } + + private void testQueryStatisticsLimit() throws SQLException { + Connection conn = getConnection("metaData"); + Statement stat = conn.createStatement(); + stat.execute("create table test(id int primary key, name varchar) as " + + "select x, space(1000) from system_range(1, 2000)"); + + ResultSet rs = stat.executeQuery( + "select * from INFORMATION_SCHEMA.QUERY_STATISTICS"); + assertFalse(rs.next()); + rs.close(); + + //first, test setting the limit before activating statistics + int statisticsMaxEntries = 200; + //prevent test limit being less than or equal to default limit + assertTrue(statisticsMaxEntries > Constants.QUERY_STATISTICS_MAX_ENTRIES); + stat.execute("SET QUERY_STATISTICS_MAX_ENTRIES " + statisticsMaxEntries); + stat.execute("SET QUERY_STATISTICS TRUE"); + for (int i = 0; i < statisticsMaxEntries * 2; i++) { + stat.execute("select * from test where id = " + i); + } + rs = stat.executeQuery("select count(*) from INFORMATION_SCHEMA.QUERY_STATISTICS"); + assertTrue(rs.next()); + assertEquals(statisticsMaxEntries, rs.getInt(1)); + rs.close(); + + //first, test changing the limit once statistics is activated + int statisticsMaxEntriesNew = 50; + //prevent new test limit being greater than or equal to default limit + assertTrue(statisticsMaxEntriesNew < Constants.QUERY_STATISTICS_MAX_ENTRIES); + stat.execute("SET QUERY_STATISTICS_MAX_ENTRIES " + statisticsMaxEntriesNew); + for (int i = 0; i < statisticsMaxEntriesNew * 2; i++) { + stat.execute("select * from test where id = " + i); + } + rs = stat.executeQuery("select count(*) from INFORMATION_SCHEMA.QUERY_STATISTICS"); + assertTrue(rs.next()); + assertEquals(statisticsMaxEntriesNew, rs.getInt(1)); + rs.close(); + + conn.close(); + deleteDb("metaData"); + } } diff --git a/h2/src/test/org/h2/test/jdbc/TestNativeSQL.java b/h2/src/test/org/h2/test/jdbc/TestNativeSQL.java index 4c11d9d2c7..fd17319597 100644 --- a/h2/src/test/org/h2/test/jdbc/TestNativeSQL.java +++ b/h2/src/test/org/h2/test/jdbc/TestNativeSQL.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -13,11 +13,12 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the Connection.nativeSQL method. */ -public class TestNativeSQL extends TestBase { +public class TestNativeSQL extends TestDb { private static final String[] PAIRS = { "CREATE TABLE TEST(ID INT PRIMARY KEY)", @@ -69,7 +70,7 @@ public class TestNativeSQL extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -144,7 +145,7 @@ private void testRandom() throws SQLException { StringBuilder buff = new StringBuilder("{oj }"); for (int j = random.nextInt(10); j > 0; j--) { String s; - switch(random.nextInt(7)) { + switch (random.nextInt(7)) { case 0: buff.append(" $$"); s = "{}\'\"-/* a\n"; diff --git a/h2/src/test/org/h2/test/jdbc/TestPreparedStatement.java b/h2/src/test/org/h2/test/jdbc/TestPreparedStatement.java index 3666c11373..7bbe4026b3 100644 --- a/h2/src/test/org/h2/test/jdbc/TestPreparedStatement.java +++ b/h2/src/test/org/h2/test/jdbc/TestPreparedStatement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -10,10 +10,13 @@ import java.io.InputStream; import java.io.StringReader; import java.math.BigDecimal; +import java.math.BigInteger; +import java.math.RoundingMode; import java.net.URL; import java.sql.Array; import java.sql.Connection; import java.sql.Date; +import java.sql.JDBCType; import java.sql.ParameterMetaData; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -21,25 +24,34 @@ import java.sql.RowId; import java.sql.SQLException; import java.sql.Statement; -import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.ZonedDateTime; import java.util.Calendar; import java.util.GregorianCalendar; -import java.util.SimpleTimeZone; import java.util.TimeZone; import java.util.UUID; import org.h2.api.ErrorCode; -import org.h2.api.Trigger; +import org.h2.api.H2Type; +import org.h2.api.Interval; +import org.h2.api.IntervalQualifier; import org.h2.test.TestBase; -import org.h2.util.DateTimeUtils; +import org.h2.test.TestDb; import org.h2.util.Task; /** * Tests for the PreparedStatement implementation. */ -public class TestPreparedStatement extends TestBase { +public class TestPreparedStatement extends TestDb { private static final int LOB_SIZE = 4000, LOB_SIZE_BIG = 512 * 1024; @@ -49,7 +61,7 @@ public class TestPreparedStatement extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -59,14 +71,14 @@ public void test() throws Exception { testUnwrap(conn); testUnsupportedOperations(conn); testChangeType(conn); - testDateTimeTimestampWithCalendar(conn); testCallTablePrepared(conn); testValues(conn); testToString(conn); testExecuteUpdateCall(conn); testPrepareExecute(conn); + testEnum(conn); testUUID(conn); - testScopedGeneratedKey(conn); + testUUIDAsJavaObject(conn); testLobTempFiles(conn); testExecuteErrorTwice(conn); testTempView(conn); @@ -77,16 +89,26 @@ public void test() throws Exception { testCancelReuse(conn); testCoalesce(conn); testPreparedStatementMetaData(conn); + testBigDecimal(conn); testDate(conn); + testDate8(conn); + testTime8(conn); + testOffsetTime8(conn); + testDateTime8(conn); + testOffsetDateTime8(conn); + testZonedDateTime8(conn); + testInstant8(conn); + testInterval(conn); + testInterval8(conn); + testJson(conn); testArray(conn); - testUUIDGeneratedKeys(conn); testSetObject(conn); + testSetObject2(conn); testPreparedSubquery(conn); testLikeIndex(conn); testCasewhen(conn); testSubquery(conn); testObject(conn); - testIdentity(conn); testDataTypes(conn); testGetMoreResults(conn); testBlob(conn); @@ -94,8 +116,12 @@ public void test() throws Exception { testParameterMetaData(conn); testColumnMetaDataWithEquals(conn); testColumnMetaDataWithIn(conn); + testMultipleStatements(conn); + testAfterRollback(conn); conn.close(); testPreparedStatementWithLiteralsNone(); + testPreparedStatementWithIndexedParameterAndLiteralsNone(); + testPreparedStatementWithAnyParameter(); deleteDb("preparedStatement"); } @@ -143,15 +169,9 @@ private void testUnsupportedOperations(Connection conn) throws Exception { setRowId(1, (RowId) null); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, prep). setUnicodeStream(1, (InputStream) null, 0); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, prep). - setArray(1, (Array) null); ParameterMetaData meta = prep.getParameterMetaData(); assertTrue(meta.toString(), meta.toString().endsWith("parameterCount=1")); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, conn). - createSQLXML(); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, conn). - createArrayOf("Integer", new Object[0]); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, conn). createStruct("Integer", new Object[0]); } @@ -169,110 +189,8 @@ private static void testChangeType(Connection conn) throws SQLException { prep.executeQuery(); } - private void testDateTimeTimestampWithCalendar(Connection conn) - throws SQLException { - Statement stat = conn.createStatement(); - stat.execute("create table ts(x timestamp primary key)"); - stat.execute("create table t(x time primary key)"); - stat.execute("create table d(x date)"); - Calendar utcCalendar = new GregorianCalendar(new SimpleTimeZone(0, "Z")); - TimeZone old = TimeZone.getDefault(); - DateTimeUtils.resetCalendar(); - TimeZone.setDefault(TimeZone.getTimeZone("PST")); - try { - Timestamp ts1 = Timestamp.valueOf("2010-03-13 18:15:00"); - Time t1 = new Time(ts1.getTime()); - Date d1 = new Date(ts1.getTime()); - // when converted to UTC, this is 03:15, which doesn't actually - // exist because of summer time change at that day - Timestamp ts2 = Timestamp.valueOf("2010-03-13 19:15:00"); - Time t2 = new Time(ts2.getTime()); - Date d2 = new Date(ts2.getTime()); - PreparedStatement prep; - ResultSet rs; - prep = conn.prepareStatement("insert into ts values(?)"); - prep.setTimestamp(1, ts1, utcCalendar); - prep.execute(); - prep.setTimestamp(1, ts2, utcCalendar); - prep.execute(); - prep = conn.prepareStatement("insert into t values(?)"); - prep.setTime(1, t1, utcCalendar); - prep.execute(); - prep.setTime(1, t2, utcCalendar); - prep.execute(); - prep = conn.prepareStatement("insert into d values(?)"); - prep.setDate(1, d1, utcCalendar); - prep.execute(); - prep.setDate(1, d2, utcCalendar); - prep.execute(); - rs = stat.executeQuery("select * from ts order by x"); - rs.next(); - assertEquals("2010-03-14 02:15:00.0", - rs.getString(1)); - assertEquals("2010-03-13 18:15:00.0", - rs.getTimestamp(1, utcCalendar).toString()); - assertEquals("2010-03-14 03:15:00.0", - rs.getTimestamp(1).toString()); - assertEquals("2010-03-14 02:15:00.0", - rs.getString("x")); - assertEquals("2010-03-13 18:15:00.0", - rs.getTimestamp("x", utcCalendar).toString()); - assertEquals("2010-03-14 03:15:00.0", - rs.getTimestamp("x").toString()); - rs.next(); - assertEquals("2010-03-14 03:15:00.0", - rs.getString(1)); - assertEquals("2010-03-13 19:15:00.0", - rs.getTimestamp(1, utcCalendar).toString()); - assertEquals("2010-03-14 03:15:00.0", - rs.getTimestamp(1).toString()); - assertEquals("2010-03-14 03:15:00.0", - rs.getString("x")); - assertEquals("2010-03-13 19:15:00.0", - rs.getTimestamp("x", utcCalendar).toString()); - assertEquals("2010-03-14 03:15:00.0", - rs.getTimestamp("x").toString()); - rs = stat.executeQuery("select * from t order by x"); - rs.next(); - assertEquals("02:15:00", rs.getString(1)); - assertEquals("18:15:00", rs.getTime(1, utcCalendar).toString()); - assertEquals("02:15:00", rs.getTime(1).toString()); - assertEquals("02:15:00", rs.getString("x")); - assertEquals("18:15:00", rs.getTime("x", utcCalendar).toString()); - assertEquals("02:15:00", rs.getTime("x").toString()); - rs.next(); - assertEquals("03:15:00", rs.getString(1)); - assertEquals("19:15:00", rs.getTime(1, utcCalendar).toString()); - assertEquals("03:15:00", rs.getTime(1).toString()); - assertEquals("03:15:00", rs.getString("x")); - assertEquals("19:15:00", rs.getTime("x", utcCalendar).toString()); - assertEquals("03:15:00", rs.getTime("x").toString()); - rs = stat.executeQuery("select * from d order by x"); - rs.next(); - assertEquals("2010-03-14", rs.getString(1)); - assertEquals("2010-03-13", rs.getDate(1, utcCalendar).toString()); - assertEquals("2010-03-14", rs.getDate(1).toString()); - assertEquals("2010-03-14", rs.getString("x")); - assertEquals("2010-03-13", rs.getDate("x", utcCalendar).toString()); - assertEquals("2010-03-14", rs.getDate("x").toString()); - rs.next(); - assertEquals("2010-03-14", rs.getString(1)); - assertEquals("2010-03-13", rs.getDate(1, utcCalendar).toString()); - assertEquals("2010-03-14", rs.getDate(1).toString()); - assertEquals("2010-03-14", rs.getString("x")); - assertEquals("2010-03-13", rs.getDate("x", utcCalendar).toString()); - assertEquals("2010-03-14", rs.getDate("x").toString()); - } finally { - TimeZone.setDefault(old); - DateTimeUtils.resetCalendar(); - } - stat.execute("drop table ts"); - stat.execute("drop table t"); - stat.execute("drop table d"); - } - private static void testCallTablePrepared(Connection conn) throws SQLException { - PreparedStatement prep = conn.prepareStatement("call table(x int = (1))"); + PreparedStatement prep = conn.prepareStatement("select * from table(x int = (1))"); prep.executeQuery(); prep.executeQuery(); } @@ -407,7 +325,7 @@ private void testInsertFunction(Connection conn) throws SQLException { PreparedStatement prep; ResultSet rs; - stat.execute("CREATE TABLE TEST(ID INT, H BINARY)"); + stat.execute("CREATE TABLE TEST(ID INT, H VARBINARY)"); prep = conn.prepareStatement("INSERT INTO TEST " + "VALUES(?, HASH('SHA256', STRINGTOUTF8(?), 5))"); prep.setInt(1, 1); @@ -477,18 +395,20 @@ private void testMaxRowsChange(Connection conn) throws SQLException { private void testUnknownDataType(Connection conn) throws SQLException { assertThrows(ErrorCode.UNKNOWN_DATA_TYPE_1, conn). prepareStatement("SELECT * FROM (SELECT ? FROM DUAL)"); + assertThrows(ErrorCode.UNKNOWN_DATA_TYPE_1, conn). + prepareStatement("VALUES BITAND(?, ?)"); PreparedStatement prep = conn.prepareStatement("SELECT -?"); prep.setInt(1, 1); - prep.execute(); + execute(prep); prep = conn.prepareStatement("SELECT ?-?"); prep.setInt(1, 1); prep.setInt(2, 2); - prep.execute(); + execute(prep); } private void testCancelReuse(Connection conn) throws Exception { conn.createStatement().execute( - "CREATE ALIAS SLEEP FOR \"java.lang.Thread.sleep\""); + "CREATE ALIAS SLEEP FOR 'java.lang.Thread.sleep'"); // sleep for 10 seconds final PreparedStatement prep = conn.prepareStatement( "SELECT SLEEP(?) FROM SYSTEM_RANGE(1, 10000) LIMIT ?"); @@ -497,14 +417,14 @@ private void testCancelReuse(Connection conn) throws Exception { Task t = new Task() { @Override public void call() throws SQLException { - prep.execute(); + TestPreparedStatement.this.execute(prep); } }; t.execute(); Thread.sleep(100); prep.cancel(); SQLException e = (SQLException) t.getException(); - assertTrue(e != null); + assertNotNull(e); assertEquals(ErrorCode.STATEMENT_WAS_CANCELED, e.getErrorCode()); prep.setInt(1, 1); prep.setInt(2, 1); @@ -532,11 +452,15 @@ private void testPreparedStatementMetaData(Connection conn) ResultSetMetaData meta = prep.getMetaData(); assertEquals(2, meta.getColumnCount()); assertEquals("INTEGER", meta.getColumnTypeName(1)); - assertEquals("VARCHAR", meta.getColumnTypeName(2)); + assertEquals("CHARACTER VARYING", meta.getColumnTypeName(2)); prep = conn.prepareStatement("call 1"); meta = prep.getMetaData(); assertEquals(1, meta.getColumnCount()); assertEquals("INTEGER", meta.getColumnTypeName(1)); + prep = conn.prepareStatement("SELECT * FROM UNNEST(ARRAY[1, 2])"); + meta = prep.getMetaData(); + assertEquals(1, meta.getColumnCount()); + assertEquals("INTEGER", meta.getColumnTypeName(1)); } private void testArray(Connection conn) throws SQLException { @@ -551,6 +475,58 @@ private void testArray(Connection conn) throws SQLException { assertFalse(rs.next()); } + private void testEnum(Connection conn) throws SQLException { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE test_enum(size ENUM('small', 'medium', 'large'))"); + + String[] badSizes = new String[]{"green", "smol", "0"}; + for (int i = 0; i < badSizes.length; i++) { + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO test_enum VALUES(?)"); + prep.setObject(1, badSizes[i]); + assertThrows(ErrorCode.ENUM_VALUE_NOT_PERMITTED, prep).execute(); + } + + String[] goodSizes = new String[]{"small", "medium", "large"}; + for (int i = 0; i < goodSizes.length; i++) { + PreparedStatement prep = conn.prepareStatement( + "INSERT INTO test_enum VALUES(?)"); + prep.setObject(1, goodSizes[i]); + prep.execute(); + ResultSet rs = stat.executeQuery("SELECT * FROM test_enum"); + for (int j = 0; j <= i; j++) { + rs.next(); + } + assertEquals(goodSizes[i], rs.getString(1)); + assertEquals(i + 1, rs.getInt(1)); + Object o = rs.getObject(1); + assertEquals(String.class, o.getClass()); + } + + for (int i = 0; i < goodSizes.length; i++) { + PreparedStatement prep = conn.prepareStatement("SELECT * FROM test_enum WHERE size = ?"); + prep.setObject(1, goodSizes[i]); + ResultSet rs = prep.executeQuery(); + rs.next(); + String s = rs.getString(1); + assertTrue(s.equals(goodSizes[i])); + assertFalse(rs.next()); + } + + for (int i = 0; i < badSizes.length; i++) { + PreparedStatement prep = conn.prepareStatement("SELECT * FROM test_enum WHERE size = ?"); + prep.setObject(1, badSizes[i]); + if (config.lazy && !config.networked) { + ResultSet resultSet = prep.executeQuery(); + assertThrows(ErrorCode.ENUM_VALUE_NOT_PERMITTED, resultSet).next(); + } else { + assertThrows(ErrorCode.ENUM_VALUE_NOT_PERMITTED, prep).executeQuery(); + } + } + + stat.execute("DROP TABLE test_enum"); + } + private void testUUID(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test_uuid(id uuid primary key)"); @@ -567,68 +543,26 @@ private void testUUID(Connection conn) throws SQLException { stat.execute("drop table test_uuid"); } - private void testUUIDGeneratedKeys(Connection conn) throws SQLException { - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST_UUID(id UUID DEFAULT " + - "random_UUID() PRIMARY KEY)"); - stat.execute("INSERT INTO TEST_UUID() VALUES()"); - ResultSet rs = stat.getGeneratedKeys(); - rs.next(); - byte[] data = rs.getBytes(1); - assertEquals(16, data.length); - stat.execute("INSERT INTO TEST_UUID VALUES(random_UUID())"); - rs = stat.getGeneratedKeys(); - assertFalse(rs.next()); - stat.execute("DROP TABLE TEST_UUID"); - } - - /** - * A trigger that creates a sequence value. - */ - public static class SequenceTrigger implements Trigger { - - @Override - public void fire(Connection conn, Object[] oldRow, Object[] newRow) - throws SQLException { - conn.setAutoCommit(false); - conn.createStatement().execute("call next value for seq"); - } - - @Override - public void init(Connection conn, String schemaName, - String triggerName, String tableName, boolean before, int type) { - // ignore - } - - @Override - public void close() { - // ignore - } - - @Override - public void remove() { - // ignore - } - - } + private void testUUIDAsJavaObject(Connection conn) throws SQLException { + String uuidStr = "12345678-1234-4321-8765-123456789012"; - private void testScopedGeneratedKey(Connection conn) throws SQLException { Statement stat = conn.createStatement(); - Trigger t = new SequenceTrigger(); - stat.execute("create table test(id identity)"); - stat.execute("create sequence seq start with 1000"); - stat.execute("create trigger test_ins after insert on test call \"" + - t.getClass().getName() + "\""); - stat.execute("insert into test values(null)"); - ResultSet rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(1, rs.getLong(1)); - stat.execute("insert into test values(100)"); - rs = stat.getGeneratedKeys(); + stat.execute("create table test_uuid(id uuid primary key)"); + UUID origUUID = UUID.fromString(uuidStr); + PreparedStatement prep = conn.prepareStatement("insert into test_uuid values(?)"); + prep.setObject(1, origUUID, java.sql.Types.JAVA_OBJECT); + prep.execute(); + + prep = conn.prepareStatement("select * from test_uuid where id=?"); + prep.setObject(1, origUUID, java.sql.Types.JAVA_OBJECT); + ResultSet rs = prep.executeQuery(); rs.next(); - assertEquals(100, rs.getLong(1)); - stat.execute("drop sequence seq"); - stat.execute("drop table test"); + Object o = rs.getObject(1); + assertTrue(o instanceof UUID); + UUID selectedUUID = (UUID) o; + assertTrue(selectedUUID.toString().equals(uuidStr)); + assertTrue(selectedUUID.equals(origUUID)); + stat.execute("drop table test_uuid"); } private void testSetObject(Connection conn) throws SQLException { @@ -639,15 +573,15 @@ private void testSetObject(Connection conn) throws SQLException { prep.setObject(1, 'x'); prep.execute(); stat.execute("DROP TABLE TEST"); - stat.execute("CREATE TABLE TEST(ID INT, DATA BINARY, JAVA OTHER)"); + stat.execute("CREATE TABLE TEST(ID INT, DATA VARBINARY, JAVA OTHER)"); prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?, ?)"); prep.setInt(1, 1); prep.setObject(2, 11); prep.setObject(3, null); prep.execute(); prep.setInt(1, 2); - prep.setObject(2, 101, Types.OTHER); - prep.setObject(3, 103, Types.OTHER); + prep.setObject(2, 101, Types.JAVA_OBJECT); + prep.setObject(3, 103, Types.JAVA_OBJECT); prep.execute(); PreparedStatement p2 = conn.prepareStatement( "SELECT * FROM TEST ORDER BY ID"); @@ -655,7 +589,7 @@ private void testSetObject(Connection conn) throws SQLException { rs.next(); Object o = rs.getObject(2); assertTrue(o instanceof byte[]); - assertTrue(rs.getObject(3) == null); + assertNull(rs.getObject(3)); rs.next(); o = rs.getObject(2); assertTrue(o instanceof byte[]); @@ -666,6 +600,71 @@ private void testSetObject(Connection conn) throws SQLException { stat.execute("DROP TABLE TEST"); } + private void testSetObject2(Connection conn) throws SQLException { + try (PreparedStatement prep = conn.prepareStatement("VALUES (?1, ?1 IS OF(INTEGER), ?1 IS OF(BIGINT))")) { + for (int i = 1; i <= 6; i++) { + testSetObject2SetObjectType(prep, i, (long) i); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + // Parameters are converted to VARCHAR by a query + assertEquals(Integer.toString(i), rs.getString(1)); + // Use the type predicate to check a real data type + if (i == 1) { + assertFalse(rs.getBoolean(2)); + assertTrue(rs.getBoolean(3)); + } else { + assertTrue(rs.getBoolean(2)); + assertFalse(rs.getBoolean(3)); + } + } + testSetObject2SetObjectType(prep, i, null); + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + assertNull(rs.getObject(1)); + } + } + prep.setObject(1, 1); + } + } + + private static void testSetObject2SetObjectType(PreparedStatement prep, int method, Object value) + throws SQLException { + switch (method) { + case 1: + prep.setObject(1, value); + break; + case 2: + prep.setObject(1, value, Types.INTEGER); + break; + case 3: + prep.setObject(1, value, JDBCType.INTEGER); + break; + case 4: + prep.setObject(1, value, Types.INTEGER, 0); + break; + case 5: + prep.setObject(1, value, JDBCType.INTEGER, 0); + break; + case 6: + prep.setObject(1, value, H2Type.INTEGER, 0); + } + } + + private void testBigDecimal(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?, ?"); + BigDecimal bd = new BigDecimal("12300").setScale(-2, RoundingMode.UNNECESSARY); + prep.setBigDecimal(1, bd); + prep.setObject(2, bd); + ResultSet rs = prep.executeQuery(); + rs.next(); + bd = rs.getBigDecimal(1); + assertEquals(12300, bd.intValue()); + assertEquals(0, bd.scale()); + bd = rs.getBigDecimal(2); + assertEquals(12300, bd.intValue()); + assertEquals(0, bd.scale()); + } + private void testDate(Connection conn) throws SQLException { PreparedStatement prep = conn.prepareStatement("SELECT ?"); Timestamp ts = Timestamp.valueOf("2001-02-03 04:05:06"); @@ -676,6 +675,256 @@ private void testDate(Connection conn) throws SQLException { assertEquals(ts.toString(), ts2.toString()); } + private void testDate8(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + LocalDate localDate = LocalDate.parse("2001-02-03"); + prep.setObject(1, localDate); + ResultSet rs = prep.executeQuery(); + rs.next(); + LocalDate localDate2 = rs.getObject(1, LocalDate.class); + assertEquals(localDate, localDate2); + rs.close(); + localDate = LocalDate.parse("-0509-01-01"); + prep.setObject(1, localDate); + rs = prep.executeQuery(); + rs.next(); + localDate2 = rs.getObject(1, LocalDate.class); + assertEquals(localDate, localDate2); + rs.close(); + prep.setString(1, "1500-02-28"); + rs = prep.executeQuery(); + rs.next(); + localDate2 = rs.getObject(1, LocalDate.class); + assertEquals(LocalDate.parse("1500-02-28"), localDate2); + rs.close(); + prep.setString(1, "-0100-02-28"); + rs = prep.executeQuery(); + rs.next(); + localDate2 = rs.getObject(1, LocalDate.class); + assertEquals(LocalDate.parse("-0100-02-28"), localDate2); + rs.close(); + /* + * Test dates during Julian to Gregorian transition. + * + * java.util.TimeZone doesn't support LMT, so perform this test with + * fixed time zone offset + */ + Statement stat = conn.createStatement(); + stat.execute("SET TIME ZONE '1'"); + TimeZone old = TimeZone.getDefault(); + TimeZone.setDefault(TimeZone.getTimeZone("GMT+01")); + try { + localDate = LocalDate.parse("1582-10-05"); + prep.setObject(1, localDate); + rs = prep.executeQuery(); + rs.next(); + localDate2 = rs.getObject(1, LocalDate.class); + assertEquals(localDate, localDate2); + assertEquals("1582-10-05", rs.getString(1)); + assertEquals(Date.valueOf("1582-09-25"), rs.getDate(1)); + GregorianCalendar gc = new GregorianCalendar(); + gc.setGregorianChange(new java.util.Date(Long.MIN_VALUE)); + gc.clear(); + gc.set(Calendar.YEAR, 1582); + gc.set(Calendar.MONTH, 9); + gc.set(Calendar.DAY_OF_MONTH, 5); + Date expected = new Date(gc.getTimeInMillis()); + gc.clear(); + assertEquals(expected, rs.getDate(1, gc)); + rs.close(); + } finally { + stat.execute("SET TIME ZONE LOCAL"); + TimeZone.setDefault(old); + } + } + + private void testTime8(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + LocalTime localTime = LocalTime.parse("04:05:06"); + prep.setObject(1, localTime); + ResultSet rs = prep.executeQuery(); + rs.next(); + LocalTime localTime2 = rs.getObject(1, LocalTime.class); + assertEquals(localTime, localTime2); + rs.close(); + localTime = LocalTime.parse("04:05:06.123456789"); + prep.setObject(1, localTime); + rs = prep.executeQuery(); + rs.next(); + localTime2 = rs.getObject(1, LocalTime.class); + assertEquals(localTime, localTime2); + rs.close(); + } + + private void testOffsetTime8(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + OffsetTime offsetTime = OffsetTime.parse("04:05:06+02:30"); + prep.setObject(1, offsetTime); + ResultSet rs = prep.executeQuery(); + rs.next(); + OffsetTime offsetTime2 = rs.getObject(1, OffsetTime.class); + assertEquals(offsetTime, offsetTime2); + assertFalse(rs.next()); + rs.close(); + + prep.setObject(1, offsetTime, Types.TIME_WITH_TIMEZONE); + rs = prep.executeQuery(); + rs.next(); + offsetTime2 = rs.getObject(1, OffsetTime.class); + assertEquals(offsetTime, offsetTime2); + assertFalse(rs.next()); + rs.close(); + } + + private void testDateTime8(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + LocalDateTime localDateTime = LocalDateTime.parse("2001-02-03T04:05:06"); + prep.setObject(1, localDateTime); + ResultSet rs = prep.executeQuery(); + rs.next(); + LocalDateTime localDateTime2 = rs.getObject(1, LocalDateTime.class); + assertEquals(localDateTime, localDateTime2); + rs.close(); + } + + private void testOffsetDateTime8(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + OffsetDateTime offsetDateTime = OffsetDateTime.parse("2001-02-03T04:05:06+02:30"); + prep.setObject(1, offsetDateTime); + ResultSet rs = prep.executeQuery(); + rs.next(); + OffsetDateTime offsetDateTime2 = rs.getObject(1, OffsetDateTime.class); + assertEquals(offsetDateTime, offsetDateTime2); + assertFalse(rs.next()); + rs.close(); + + prep.setObject(1, offsetDateTime, Types.TIMESTAMP_WITH_TIMEZONE); + rs = prep.executeQuery(); + rs.next(); + offsetDateTime2 = rs.getObject(1, OffsetDateTime.class); + assertEquals(offsetDateTime, offsetDateTime2); + // Check default mapping + rs.getObject(1); + assertFalse(rs.next()); + rs.close(); + } + + private void testZonedDateTime8(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + ZonedDateTime zonedDateTime = ZonedDateTime.parse("2001-02-03T04:05:06+02:30"); + prep.setObject(1, zonedDateTime); + ResultSet rs = prep.executeQuery(); + rs.next(); + ZonedDateTime zonedDateTime2 = rs.getObject(1, ZonedDateTime.class); + assertEquals(zonedDateTime, zonedDateTime2); + assertFalse(rs.next()); + rs.close(); + + prep.setObject(1, zonedDateTime, Types.TIMESTAMP_WITH_TIMEZONE); + rs = prep.executeQuery(); + rs.next(); + zonedDateTime2 = rs.getObject(1, ZonedDateTime.class); + assertEquals(zonedDateTime, zonedDateTime2); + assertFalse(rs.next()); + rs.close(); + } + + private void testInstant8(Connection conn) throws Exception { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + testInstant8Impl(prep, Instant.now()); + testInstant8Impl(prep, Instant.parse("2000-01-15T12:13:14.123456789Z")); + testInstant8Impl(prep, Instant.parse("1500-09-10T23:22:11.123456789Z")); + } + + private void testInstant8Impl(PreparedStatement prep, Instant instant) throws SQLException { + prep.setObject(1, instant); + ResultSet rs = prep.executeQuery(); + rs.next(); + Instant instant2 = rs.getObject(1, Instant.class); + assertEquals(instant, instant2); + Timestamp ts = rs.getTimestamp(1); + assertEquals(instant, ts.toInstant()); + assertFalse(rs.next()); + rs.close(); + + prep.setTimestamp(1, ts); + rs = prep.executeQuery(); + rs.next(); + instant2 = rs.getObject(1, Instant.class); + assertEquals(instant, instant2); + assertFalse(rs.next()); + rs.close(); + } + + private void testInterval(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + Interval interval = new Interval(IntervalQualifier.MINUTE, false, 100, 0); + prep.setObject(1, interval); + ResultSet rs = prep.executeQuery(); + rs.next(); + assertEquals("INTERVAL '100' MINUTE", rs.getString(1)); + assertEquals(interval, rs.getObject(1)); + assertEquals(interval, rs.getObject(1, Interval.class)); + } + + private void testInterval8(Connection conn) throws SQLException { + PreparedStatement prep = conn.prepareStatement("SELECT ?"); + testPeriod8(prep, 1, 2, "INTERVAL '1-2' YEAR TO MONTH"); + testPeriod8(prep, -1, -2, "INTERVAL '-1-2' YEAR TO MONTH"); + testPeriod8(prep, 1, -8, "INTERVAL '0-4' YEAR TO MONTH", 0, 4); + testPeriod8(prep, -1, 8, "INTERVAL '-0-4' YEAR TO MONTH", 0, -4); + testPeriod8(prep, 0, 0, "INTERVAL '0-0' YEAR TO MONTH"); + testPeriod8(prep, 100, 0, "INTERVAL '100' YEAR"); + testPeriod8(prep, -100, 0, "INTERVAL '-100' YEAR"); + testPeriod8(prep, 0, 100, "INTERVAL '100' MONTH"); + testPeriod8(prep, 0, -100, "INTERVAL '-100' MONTH"); + Period period = Period.of(0, 0, 1); + assertThrows(ErrorCode.INVALID_VALUE_2, prep).setObject(1, period); + Duration duration = Duration.ofSeconds(-4, 900_000_000); + prep.setObject(1, duration); + ResultSet rs = prep.executeQuery(); + rs.next(); + assertEquals("INTERVAL '-3.1' SECOND", rs.getString(1)); + assertEquals(duration, rs.getObject(1, Duration.class)); + } + + private void testPeriod8(PreparedStatement prep, int years, int months, String expectedString) + throws SQLException { + testPeriod8(prep, years, months, expectedString, years, months); + } + + private void testPeriod8(PreparedStatement prep, int years, int months, String expectedString, int expYears, + int expMonths) throws SQLException { + Period period = Period.of(years, months, 0); + Period expectedPeriod = Period.of(expYears, expMonths, 0); + prep.setObject(1, period); + ResultSet rs = prep.executeQuery(); + rs.next(); + assertEquals(expectedString, rs.getString(1)); + assertEquals(expectedPeriod, rs.getObject(1, Period.class)); + } + + private void testJson(Connection conn) throws SQLException { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID BIGINT, J JSON)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES (?, ?)"); + prep.setInt(1, 1); + prep.setString(2, "[1]"); + prep.executeUpdate(); + prep = conn.prepareStatement("INSERT INTO TEST VALUES (?, ? FORMAT JSON)"); + prep.setInt(1, 2); + prep.setString(2, "[1]"); + prep.executeUpdate(); + try (ResultSet rs = stat.executeQuery("SELECT J FROM TEST ORDER BY ID")) { + assertTrue(rs.next()); + assertEquals("\"[1]\"", rs.getString(1)); + assertTrue(rs.next()); + assertEquals("[1]", rs.getString(1)); + assertFalse(rs.next()); + } + stat.execute("DROP TABLE TEST"); + } + private void testPreparedSubquery(Connection conn) throws SQLException { Statement s = conn.createStatement(); s.executeUpdate("CREATE TABLE TEST(ID IDENTITY, FLAG BIT)"); @@ -715,7 +964,7 @@ private void testParameterMetaData(Connection conn) throws SQLException { PreparedStatement prep = conn.prepareStatement("SELECT ?, ?, ? FROM DUAL"); ParameterMetaData pm = prep.getParameterMetaData(); assertEquals("java.lang.String", pm.getParameterClassName(1)); - assertEquals("VARCHAR", pm.getParameterTypeName(1)); + assertEquals("CHARACTER VARYING", pm.getParameterTypeName(1)); assertEquals(3, pm.getParameterCount()); assertEquals(ParameterMetaData.parameterModeIn, pm.getParameterMode(1)); assertEquals(Types.VARCHAR, pm.getParameterType(1)); @@ -730,22 +979,25 @@ private void testParameterMetaData(Connection conn) throws SQLException { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST3(ID INT, " + - "NAME VARCHAR(255), DATA DECIMAL(10,2))"); + "NAME VARCHAR(255), DATA1 DECIMAL(10,2), DATA2 NUMERIC(10,2))"); PreparedStatement prep1 = conn.prepareStatement( - "UPDATE TEST3 SET ID=?, NAME=?, DATA=?"); + "UPDATE TEST3 SET ID=?, NAME=?, DATA1=?, DATA2=?"); PreparedStatement prep2 = conn.prepareStatement( - "INSERT INTO TEST3 VALUES(?, ?, ?)"); - checkParameter(prep1, 1, "java.lang.Integer", 4, "INTEGER", 10, 0); - checkParameter(prep1, 2, "java.lang.String", 12, "VARCHAR", 255, 0); - checkParameter(prep1, 3, "java.math.BigDecimal", 3, "DECIMAL", 10, 2); - checkParameter(prep2, 1, "java.lang.Integer", 4, "INTEGER", 10, 0); - checkParameter(prep2, 2, "java.lang.String", 12, "VARCHAR", 255, 0); - checkParameter(prep2, 3, "java.math.BigDecimal", 3, "DECIMAL", 10, 2); + "INSERT INTO TEST3 VALUES(?, ?, ?, ?)"); + checkParameter(prep1, 1, "java.lang.Integer", 4, "INTEGER", 32, 0); + checkParameter(prep1, 2, "java.lang.String", 12, "CHARACTER VARYING", 255, 0); + checkParameter(prep1, 3, "java.math.BigDecimal", Types.DECIMAL, "DECIMAL", 10, 2); + checkParameter(prep1, 4, "java.math.BigDecimal", Types.NUMERIC, "NUMERIC", 10, 2); + checkParameter(prep2, 1, "java.lang.Integer", 4, "INTEGER", 32, 0); + checkParameter(prep2, 2, "java.lang.String", 12, "CHARACTER VARYING", 255, 0); + checkParameter(prep2, 3, "java.math.BigDecimal", Types.DECIMAL, "DECIMAL", 10, 2); + checkParameter(prep2, 4, "java.math.BigDecimal", Types.NUMERIC, "NUMERIC", 10, 2); PreparedStatement prep3 = conn.prepareStatement( - "SELECT * FROM TEST3 WHERE ID=? AND NAME LIKE ? AND ?>DATA"); - checkParameter(prep3, 1, "java.lang.Integer", 4, "INTEGER", 10, 0); - checkParameter(prep3, 2, "java.lang.String", 12, "VARCHAR", 0, 0); - checkParameter(prep3, 3, "java.math.BigDecimal", 3, "DECIMAL", 10, 2); + "SELECT * FROM TEST3 WHERE ID=? AND NAME LIKE ? AND ?>DATA1 AND ?>DATA2"); + checkParameter(prep3, 1, "java.lang.Integer", 4, "INTEGER", 32, 0); + checkParameter(prep3, 2, "java.lang.String", 12, "CHARACTER VARYING", 0, 0); + checkParameter(prep3, 3, "java.math.BigDecimal", Types.DECIMAL, "DECIMAL", 10, 2); + checkParameter(prep3, 4, "java.math.BigDecimal", Types.NUMERIC, "NUMERIC", 10, 2); stat.execute("DROP TABLE TEST3"); } @@ -762,9 +1014,9 @@ private void checkParameter(PreparedStatement prep, int index, private void testLikeIndex(Connection conn) throws SQLException { Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255))"); - stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); - stat.execute("INSERT INTO TEST VALUES(2, 'World')"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT, NAME VARCHAR(255))"); + stat.execute("INSERT INTO TEST VALUES(1, 2, 'Hello')"); + stat.execute("INSERT INTO TEST VALUES(2, 4, 'World')"); stat.execute("create index idxname on test(name);"); PreparedStatement prep, prepExe; @@ -778,10 +1030,10 @@ private void testLikeIndex(Connection conn) throws SQLException { ResultSet rs = prep.executeQuery(); rs.next(); String plan = rs.getString(1); - assertTrue(plan.contains(".tableScan")); + assertContains(plan, ".tableScan"); rs = prepExe.executeQuery(); rs.next(); - assertEquals("World", rs.getString(2)); + assertEquals("World", rs.getString(3)); assertFalse(rs.next()); prep.setString(1, "H%"); @@ -789,10 +1041,10 @@ private void testLikeIndex(Connection conn) throws SQLException { rs = prep.executeQuery(); rs.next(); String plan1 = rs.getString(1); - assertTrue(plan1.contains("IDXNAME")); + assertContains(plan1, "IDXNAME"); rs = prepExe.executeQuery(); rs.next(); - assertEquals("Hello", rs.getString(2)); + assertEquals("Hello", rs.getString(3)); assertFalse(rs.next()); stat.execute("DROP TABLE IF EXISTS TEST"); @@ -887,15 +1139,17 @@ private void testDataTypes(Connection conn) throws SQLException { ResultSet rs; trace("Create tables"); stat.execute("CREATE TABLE T_INT" + - "(ID INT PRIMARY KEY,VALUE INT)"); + "(ID INT PRIMARY KEY,V INT)"); stat.execute("CREATE TABLE T_VARCHAR" + - "(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + "(ID INT PRIMARY KEY,V VARCHAR(255))"); stat.execute("CREATE TABLE T_DECIMAL_0" + - "(ID INT PRIMARY KEY,VALUE DECIMAL(30,0))"); + "(ID INT PRIMARY KEY,V DECIMAL(30,0))"); stat.execute("CREATE TABLE T_DECIMAL_10" + - "(ID INT PRIMARY KEY,VALUE DECIMAL(20,10))"); + "(ID INT PRIMARY KEY,V DECIMAL(20,10))"); stat.execute("CREATE TABLE T_DATETIME" + - "(ID INT PRIMARY KEY,VALUE DATETIME)"); + "(ID INT PRIMARY KEY,V DATETIME)"); + stat.execute("CREATE TABLE T_BIGINT" + + "(ID INT PRIMARY KEY,V DECIMAL(30,0))"); prep = conn.prepareStatement("INSERT INTO T_INT VALUES(?,?)", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); prep.setInt(1, 1); @@ -918,7 +1172,7 @@ private void testDataTypes(Connection conn) throws SQLException { prep.setString(2, "-1"); prep.executeUpdate(); prep.setInt(1, 7); - prep.setObject(2, new Integer(3)); + prep.setObject(2, 3); prep.executeUpdate(); prep.setObject(1, "8"); // should throw an exception @@ -989,9 +1243,35 @@ private void testDataTypes(Connection conn) throws SQLException { prep.setFloat(2, -40); prep.executeUpdate(); - rs = stat.executeQuery("SELECT VALUE FROM T_DECIMAL_0 ORDER BY ID"); + rs = stat.executeQuery("SELECT V FROM T_DECIMAL_0 ORDER BY ID"); checkBigDecimal(rs, new String[] { "" + Long.MAX_VALUE, "" + Long.MIN_VALUE, "10", "-20", "30", "-40" }); + prep = conn.prepareStatement("INSERT INTO T_BIGINT VALUES(?,?)"); + prep.setInt(1, 1); + prep.setObject(2, new BigInteger("" + Long.MAX_VALUE)); + prep.executeUpdate(); + prep.setInt(1, 2); + prep.setObject(2, Long.MIN_VALUE); + prep.executeUpdate(); + prep.setInt(1, 3); + prep.setObject(2, 10); + prep.executeUpdate(); + prep.setInt(1, 4); + prep.setObject(2, -20); + prep.executeUpdate(); + prep.setInt(1, 5); + prep.setObject(2, 30); + prep.executeUpdate(); + prep.setInt(1, 6); + prep.setObject(2, -40); + prep.executeUpdate(); + prep.setInt(1, 7); + prep.setObject(2, new BigInteger("-60")); + prep.executeUpdate(); + + rs = stat.executeQuery("SELECT V FROM T_BIGINT ORDER BY ID"); + checkBigDecimal(rs, new String[] { "" + Long.MAX_VALUE, + "" + Long.MIN_VALUE, "10", "-20", "30", "-40", "-60" }); } private void testGetMoreResults(Connection conn) throws SQLException { @@ -1034,16 +1314,16 @@ private void testObject(Connection conn) throws SQLException { stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255))"); stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); PreparedStatement prep = conn.prepareStatement( - "SELECT ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? FROM TEST"); + "SELECT ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? FROM TEST"); prep.setObject(1, Boolean.TRUE); prep.setObject(2, "Abc"); prep.setObject(3, new BigDecimal("10.2")); - prep.setObject(4, new Byte((byte) 0xff)); - prep.setObject(5, new Short(Short.MAX_VALUE)); - prep.setObject(6, new Integer(Integer.MIN_VALUE)); - prep.setObject(7, new Long(Long.MAX_VALUE)); - prep.setObject(8, new Float(Float.MAX_VALUE)); - prep.setObject(9, new Double(Double.MAX_VALUE)); + prep.setObject(4, (byte) 0xff); + prep.setObject(5, Short.MAX_VALUE); + prep.setObject(6, Integer.MIN_VALUE); + prep.setObject(7, Long.MAX_VALUE); + prep.setObject(8, Float.MAX_VALUE); + prep.setObject(9, Double.MAX_VALUE); prep.setObject(10, java.sql.Date.valueOf("2001-02-03")); prep.setObject(11, java.sql.Time.valueOf("04:05:06")); prep.setObject(12, java.sql.Timestamp.valueOf( @@ -1051,29 +1331,26 @@ private void testObject(Connection conn) throws SQLException { prep.setObject(13, new java.util.Date(java.sql.Date.valueOf( "2001-02-03").getTime())); prep.setObject(14, new byte[] { 10, 20, 30 }); - prep.setObject(15, new Character('a'), Types.OTHER); + prep.setObject(15, 'a', Types.JAVA_OBJECT); prep.setObject(16, "2001-01-02", Types.DATE); // converting to null seems strange... prep.setObject(17, "2001-01-02", Types.NULL); prep.setObject(18, "3.725", Types.DOUBLE); prep.setObject(19, "23:22:21", Types.TIME); - prep.setObject(20, new java.math.BigInteger("12345"), Types.OTHER); + prep.setObject(20, new java.math.BigInteger("12345"), Types.JAVA_OBJECT); + prep.setArray(21, conn.createArrayOf("TINYINT", new Object[] {(byte) 1})); + prep.setArray(22, conn.createArrayOf("SMALLINT", new Object[] {(short) -2})); rs = prep.executeQuery(); rs.next(); assertTrue(rs.getObject(1).equals(Boolean.TRUE)); assertTrue(rs.getObject(2).equals("Abc")); assertTrue(rs.getObject(3).equals(new BigDecimal("10.2"))); - assertTrue(rs.getObject(4).equals((byte) 0xff)); - assertTrue(rs.getObject(5).equals( - new Short(Short.MAX_VALUE))); - assertTrue(rs.getObject(6).equals( - new Integer(Integer.MIN_VALUE))); - assertTrue(rs.getObject(7).equals( - new Long(Long.MAX_VALUE))); - assertTrue(rs.getObject(8).equals( - new Float(Float.MAX_VALUE))); - assertTrue(rs.getObject(9).equals( - new Double(Double.MAX_VALUE))); + assertTrue(rs.getObject(4).equals(Integer.valueOf(-1))); + assertTrue(rs.getObject(5).equals(Integer.valueOf(Short.MAX_VALUE))); + assertTrue(rs.getObject(6).equals(Integer.MIN_VALUE)); + assertTrue(rs.getObject(7).equals(Long.MAX_VALUE)); + assertTrue(rs.getObject(8).equals(Float.MAX_VALUE)); + assertTrue(rs.getObject(9).equals(Double.MAX_VALUE)); assertTrue(rs.getObject(10).equals( java.sql.Date.valueOf("2001-02-03"))); assertEquals("04:05:06", rs.getObject(11).toString()); @@ -1088,12 +1365,15 @@ private void testObject(Connection conn) throws SQLException { assertTrue(rs.getObject(16).equals( java.sql.Date.valueOf("2001-01-02"))); assertTrue(rs.getObject(17) == null && rs.wasNull()); - assertTrue(rs.getObject(18).equals( - new Double(3.725))); + assertTrue(rs.getObject(18).equals(3.725d)); assertTrue(rs.getObject(19).equals( java.sql.Time.valueOf("23:22:21"))); assertTrue(rs.getObject(20).equals( new java.math.BigInteger("12345"))); + Object[] a = (Object[]) ((Array) rs.getObject(21)).getArray(); + assertEquals(a[0], Integer.valueOf(1)); + a = (Object[]) ((Array) rs.getObject(22)).getArray(); + assertEquals(a[0], Integer.valueOf(-2)); // } else if(x instanceof java.io.Reader) { // return session.createLob(Value.CLOB, @@ -1107,60 +1387,6 @@ private void testObject(Connection conn) throws SQLException { } - private void testIdentity(Connection conn) throws SQLException { - Statement stat = conn.createStatement(); - stat.execute("CREATE SEQUENCE SEQ"); - stat.execute("CREATE TABLE TEST(ID INT)"); - PreparedStatement prep; - prep = conn.prepareStatement( - "INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)"); - prep.execute(); - ResultSet rs = prep.getGeneratedKeys(); - rs.next(); - assertEquals(1, rs.getInt(1)); - assertFalse(rs.next()); - - prep = conn.prepareStatement( - "INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - Statement.RETURN_GENERATED_KEYS); - prep.execute(); - rs = prep.getGeneratedKeys(); - rs.next(); - assertEquals(2, rs.getInt(1)); - assertFalse(rs.next()); - - prep = conn.prepareStatement( - "INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - new int[] { 1 }); - prep.execute(); - rs = prep.getGeneratedKeys(); - rs.next(); - assertEquals(3, rs.getInt(1)); - assertFalse(rs.next()); - - prep = conn.prepareStatement( - "INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - new String[] { "ID" }); - prep.execute(); - rs = prep.getGeneratedKeys(); - rs.next(); - assertEquals(4, rs.getInt(1)); - assertFalse(rs.next()); - - prep = conn.prepareStatement( - "INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - ResultSet.TYPE_FORWARD_ONLY, - ResultSet.CONCUR_READ_ONLY, - ResultSet.HOLD_CURSORS_OVER_COMMIT); - prep.execute(); - rs = prep.getGeneratedKeys(); - rs.next(); - assertEquals(5, rs.getInt(1)); - assertFalse(rs.next()); - - stat.execute("DROP TABLE TEST"); - } - private int getLength() { return getSize(LOB_SIZE, LOB_SIZE_BIG); } @@ -1326,9 +1552,9 @@ private void testClob(Connection conn) throws SQLException { assertEquals(ascii2, rs.getString(3)); assertFalse(rs.next()); - assertTrue(prep.getWarnings() == null); + assertNull(prep.getWarnings()); prep.clearWarnings(); - assertTrue(prep.getWarnings() == null); + assertNull(prep.getWarnings()); assertTrue(conn == prep.getConnection()); } @@ -1351,18 +1577,88 @@ private void testPreparedStatementWithLiteralsNone() throws SQLException { deleteDb("preparedStatement"); } + private void testPreparedStatementWithIndexedParameterAndLiteralsNone() throws SQLException { + // make sure that when the analyze table kicks in, + // it works with ALLOW_LITERALS=NONE + deleteDb("preparedStatement"); + Connection conn = getConnection( + "preparedStatement;ANALYZE_AUTO=100"); + conn.createStatement().execute( + "SET ALLOW_LITERALS NONE"); + conn.prepareStatement("CREATE TABLE test (id INT)").execute(); + PreparedStatement ps = conn.prepareStatement( + "INSERT INTO test (id) VALUES (?1)"); + + ps.setInt(1, 1); + ps.executeUpdate(); + + conn.close(); + deleteDb("preparedStatement"); + } + + private void testPreparedStatementWithAnyParameter() throws SQLException { + deleteDb("preparedStatement"); + Connection conn = getConnection("preparedStatement"); + conn.prepareStatement("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT UNIQUE)").execute(); + PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST(ID, V) VALUES (?, ?)"); + for (int i = 0; i < 10_000; i++) { + ps.setInt(1, i); + ps.setInt(2, i * 10); + ps.executeUpdate(); + } + Integer[] values = {-100, 10, 200, 3_000, 40_000, 500_000}; + int[] expected = {1, 20, 300, 4_000}; + // Ensure that other methods return the same results + ps = conn.prepareStatement("SELECT ID FROM TEST WHERE V IN (SELECT * FROM TABLE(X INT=?)) ORDER BY ID"); + anyParameterCheck(ps, values, expected); + ps = conn.prepareStatement("SELECT ID FROM TEST INNER JOIN TABLE(X INT=?) T ON TEST.V = T.X"); + anyParameterCheck(ps, values, expected); + // Test expression = ANY(?) + ps = conn.prepareStatement("SELECT ID FROM TEST WHERE V = ANY(?)"); + assertThrows(ErrorCode.PARAMETER_NOT_SET_1, ps).executeQuery(); + anyParameterCheck(ps, values, expected); + anyParameterCheck(ps, 300, new int[] {30}); + anyParameterCheck(ps, -5, new int[0]); + ps = conn.prepareStatement("SELECT V, CASE V WHEN = ANY(?) THEN 1 ELSE 2 END FROM" + + " (VALUES DATE '2000-01-01', DATE '2010-01-01') T(V) ORDER BY V"); + ps.setObject(1, new LocalDate[] { LocalDate.of(2000, 1, 1), LocalDate.of(2030, 1, 1) }); + try (ResultSet rs = ps.executeQuery()) { + assertTrue(rs.next()); + assertEquals(LocalDate.of(2000, 1, 1), rs.getObject(1, LocalDate.class)); + assertEquals(1, rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(LocalDate.of(2010, 1, 1), rs.getObject(1, LocalDate.class)); + assertEquals(2, rs.getInt(2)); + assertFalse(rs.next()); + assertEquals("CASE V WHEN = ANY(?1) THEN 1 ELSE 2 END", rs.getMetaData().getColumnLabel(2)); + } + conn.close(); + deleteDb("preparedStatement"); + } + + private void anyParameterCheck(PreparedStatement ps, Object values, int[] expected) throws SQLException { + ps.setObject(1, values); + try (ResultSet rs = ps.executeQuery()) { + for (int exp : expected) { + assertTrue(rs.next()); + assertEquals(exp, rs.getInt(1)); + } + assertFalse(rs.next()); + } + } + private void checkBigDecimal(ResultSet rs, String[] value) throws SQLException { for (String v : value) { assertTrue(rs.next()); java.math.BigDecimal x = rs.getBigDecimal(1); trace("v=" + v + " x=" + x); if (v == null) { - assertTrue(x == null); + assertNull(x); } else { assertTrue(x.compareTo(new java.math.BigDecimal(v)) == 0); } } - assertTrue(!rs.next()); + assertFalse(rs.next()); } private void testColumnMetaDataWithEquals(Connection conn) @@ -1394,4 +1690,70 @@ private void testColumnMetaDataWithIn(Connection conn) throws SQLException { ps.getParameterMetaData().getParameterType(1)); stmt.execute("DROP TABLE TEST"); } + + private void testMultipleStatements(Connection conn) throws SQLException { + assertThrows(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS, conn).prepareStatement("SELECT ?; SELECT ?1"); + assertThrows(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS, conn).prepareStatement("SELECT ?1; SELECT ?"); + Statement stmt = conn.createStatement(); + stmt.execute("CREATE TABLE TEST (ID IDENTITY, V INT)"); + PreparedStatement ps = conn.prepareStatement("INSERT INTO TEST(V) VALUES ?; INSERT INTO TEST(V) VALUES ?"); + ps.setInt(1, 1); + ps.setInt(2, 2); + ps.executeUpdate(); + ps = conn.prepareStatement("INSERT INTO TEST(V) VALUES ?2; INSERT INTO TEST(V) VALUES ?1;"); + ps.setInt(1, 3); + ps.setInt(2, 4); + ps.executeUpdate(); + try (ResultSet rs = stmt.executeQuery("SELECT V FROM TEST ORDER BY ID")) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(4, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + assertFalse(rs.next()); + } + stmt.execute("DROP TABLE TEST"); + ps = conn.prepareStatement("CREATE TABLE A (C1 INT);" // + + "CREATE INDEX A_IDX ON A(C1);" // + + "ALTER TABLE A ADD (C2 INT);" // + + "CREATE TABLE B AS (SELECT C1 FROM A);"); + ps.executeUpdate(); + stmt.execute("DROP TABLE A, B"); + } + + private void testAfterRollback(Connection conn) throws SQLException { + try (Statement stat = conn.createStatement()) { + try { + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255))"); + conn.setAutoCommit(false); + + // insert something into test table + stat.execute("INSERT INTO TEST VALUES(1, 'Hello')"); + + // execute 'SELECT count(*)' with prepared-statements + PreparedStatement pstmt = conn.prepareStatement("SELECT count(*) FROM TEST"); + try (ResultSet rs = pstmt.executeQuery()) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + } + + // rollback the insert + conn.rollback(); + + // re-execute the pstmt. + try (ResultSet rs = pstmt.executeQuery()) { + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1)); + } + } finally { + // cleanup + stat.execute("DROP TABLE IF EXISTS TEST"); + conn.setAutoCommit(true); + } + } + } + } diff --git a/h2/src/test/org/h2/test/jdbc/TestResultSet.java b/h2/src/test/org/h2/test/jdbc/TestResultSet.java index 922d588bd2..0b0141a7f0 100644 --- a/h2/src/test/org/h2/test/jdbc/TestResultSet.java +++ b/h2/src/test/org/h2/test/jdbc/TestResultSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -13,7 +13,9 @@ import java.io.Writer; import java.math.BigDecimal; import java.math.BigInteger; +import java.nio.charset.StandardCharsets; import java.sql.Array; +import java.sql.Blob; import java.sql.Clob; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -25,24 +27,39 @@ import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; -import java.sql.SQLXML; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.Period; +import java.time.ZonedDateTime; +import java.util.Arrays; import java.util.Calendar; import java.util.Collections; -import java.util.Locale; +import java.util.GregorianCalendar; import java.util.TimeZone; import org.h2.api.ErrorCode; +import org.h2.api.Interval; +import org.h2.api.IntervalQualifier; +import org.h2.engine.Constants; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.IOUtils; +import org.h2.util.MathUtils; +import org.h2.util.StringUtils; /** * Tests for the ResultSet implementation. */ -public class TestResultSet extends TestBase { +public class TestResultSet extends TestDb { private Connection conn; private Statement stat; @@ -53,7 +70,7 @@ public class TestResultSet extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -70,7 +87,6 @@ public void test() throws Exception { testInsertRowWithUpdatableResultSetDefault(); testBeforeFirstAfterLast(); testParseSpecialValues(); - testSpecialLocale(); testSubstringPrecision(); testSubstringDataType(); testColumnLabelColumnName(); @@ -81,6 +97,8 @@ public void test() throws Exception { testFindColumn(); testColumnLength(); testArray(); + testRowValue(); + testEnum(); testLimitMaxRows(); trace("max rows=" + stat.getMaxRows()); @@ -89,11 +107,15 @@ public void test() throws Exception { assertTrue(stat.getMaxRows() == 6); testInt(); + testSmallInt(); + testBigInt(); testVarchar(); testDecimal(); testDoubleFloat(); testDatetime(); testDatetimeWithCalendar(); + testInterval(); + testInterval8(); testBlob(); testClob(); testAutoIncrement(); @@ -117,12 +139,12 @@ private void testUnwrap() throws SQLException { } private void testReuseSimpleResult() throws SQLException { - ResultSet rs = stat.executeQuery("select table(x array=((1)))"); + ResultSet rs = stat.executeQuery("select * from table(x int array=((1)))"); while (rs.next()) { rs.getString(1); } rs.close(); - rs = stat.executeQuery("select table(x array=((1)))"); + rs = stat.executeQuery("select * from table(x int array=((1)))"); while (rs.next()) { rs.getString(1); } @@ -137,9 +159,9 @@ private void testUnsupportedOperations() throws SQLException { assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). getUnicodeStream("x"); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - getObject(1, Collections.>emptyMap()); + getObject(1, Collections.emptyMap()); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - getObject("x", Collections.>emptyMap()); + getObject("x", Collections.emptyMap()); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). getRef(1); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). @@ -152,34 +174,18 @@ private void testUnsupportedOperations() throws SQLException { getRowId(1); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). getRowId("x"); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - getSQLXML(1); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - getSQLXML("x"); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). updateRef(1, (Ref) null); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). updateRef("x", (Ref) null); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - updateArray(1, (Array) null); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - updateArray("x", (Array) null); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). updateRowId(1, (RowId) null); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). updateRowId("x", (RowId) null); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - updateNClob(1, (NClob) null); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - updateNClob("x", (NClob) null); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - updateSQLXML(1, (SQLXML) null); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - updateSQLXML("x", (SQLXML) null); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). getCursorName(); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, rs). - setFetchDirection(ResultSet.FETCH_FORWARD); + setFetchDirection(ResultSet.FETCH_REVERSE); } private void testAmbiguousColumnNames() throws SQLException { @@ -198,8 +204,9 @@ private void testInsertRowWithUpdatableResultSetDefault() throws Exception { PreparedStatement prep = conn.prepareStatement("select * from test", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); ResultSet rs = prep.executeQuery(); + int idx = 1; rs.moveToInsertRow(); - rs.updateInt(1, 1); + rs.updateInt(1, idx++); rs.insertRow(); rs.close(); rs = stat.executeQuery("select * from test"); @@ -220,12 +227,12 @@ private void testInsertRowWithUpdatableResultSetDefault() throws Exception { rs = prep.executeQuery(); rs.moveToInsertRow(); - rs.updateInt(1, 2); + rs.updateInt(1, idx++); rs.updateNString(2, "Hello"); rs.insertRow(); rs.moveToInsertRow(); - rs.updateInt(1, 3); + rs.updateInt(1, idx++); rs.updateNString("data", "Hello"); rs.insertRow(); @@ -233,7 +240,7 @@ private void testInsertRowWithUpdatableResultSetDefault() throws Exception { Writer w; rs.moveToInsertRow(); - rs.updateInt(1, 4); + rs.updateInt(1, idx++); c = conn.createClob(); w = c.setCharacterStream(1); w.write("Hello"); @@ -242,7 +249,7 @@ private void testInsertRowWithUpdatableResultSetDefault() throws Exception { rs.insertRow(); rs.moveToInsertRow(); - rs.updateInt(1, 5); + rs.updateInt(1, idx++); c = conn.createClob(); w = c.setCharacterStream(1); w.write("Hello"); @@ -250,48 +257,70 @@ private void testInsertRowWithUpdatableResultSetDefault() throws Exception { rs.updateClob("data", c); rs.insertRow(); + NClob nc; + + rs.moveToInsertRow(); + rs.updateInt(1, idx++); + nc = conn.createNClob(); + w = nc.setCharacterStream(1); + w.write("Hello"); + w.close(); + rs.updateNClob(2, nc); + rs.insertRow(); + + rs.moveToInsertRow(); + rs.updateInt(1, idx++); + nc = conn.createNClob(); + w = nc.setCharacterStream(1); + w.write("Hello"); + w.close(); + rs.updateNClob("data", nc); + rs.insertRow(); + InputStream in; rs.moveToInsertRow(); - rs.updateInt(1, 6); - in = new ByteArrayInputStream("Hello".getBytes("UTF-8")); + rs.updateInt(1, idx++); + in = new ByteArrayInputStream("Hello".getBytes(StandardCharsets.UTF_8)); rs.updateAsciiStream(2, in); rs.insertRow(); rs.moveToInsertRow(); - rs.updateInt(1, 7); - in = new ByteArrayInputStream("Hello".getBytes("UTF-8")); + rs.updateInt(1, idx++); + in = new ByteArrayInputStream("Hello".getBytes(StandardCharsets.UTF_8)); rs.updateAsciiStream("data", in); rs.insertRow(); rs.moveToInsertRow(); - rs.updateInt(1, 8); - in = new ByteArrayInputStream("Hello-".getBytes("UTF-8")); + rs.updateInt(1, idx++); + in = new ByteArrayInputStream("Hello-".getBytes(StandardCharsets.UTF_8)); rs.updateAsciiStream(2, in, 5); rs.insertRow(); rs.moveToInsertRow(); - rs.updateInt(1, 9); - in = new ByteArrayInputStream("Hello-".getBytes("UTF-8")); + rs.updateInt(1, idx++); + in = new ByteArrayInputStream("Hello-".getBytes(StandardCharsets.UTF_8)); rs.updateAsciiStream("data", in, 5); rs.insertRow(); rs.moveToInsertRow(); - rs.updateInt(1, 10); - in = new ByteArrayInputStream("Hello-".getBytes("UTF-8")); + rs.updateInt(1, idx++); + in = new ByteArrayInputStream("Hello-".getBytes(StandardCharsets.UTF_8)); rs.updateAsciiStream(2, in, 5L); rs.insertRow(); rs.moveToInsertRow(); - rs.updateInt(1, 11); - in = new ByteArrayInputStream("Hello-".getBytes("UTF-8")); + rs.updateInt(1, idx++); + in = new ByteArrayInputStream("Hello-".getBytes(StandardCharsets.UTF_8)); rs.updateAsciiStream("data", in, 5L); rs.insertRow(); rs = stat.executeQuery("select * from test"); - while (rs.next()) { + for (int i = 1; i < idx; i++) { + assertTrue(rs.next()); assertEquals("Hello", rs.getString(2)); } + assertFalse(rs.next()); stat.execute("drop table test"); } @@ -347,34 +376,14 @@ private void testParseSpecialValue(String x) throws SQLException { assertTrue(expected.equals(o)); } - private void testSpecialLocale() throws SQLException { - Locale old = Locale.getDefault(); - try { - // when using Turkish as the default locale, "i".toUpperCase() is - // not "I" - Locale.setDefault(new Locale("tr")); - stat.execute("create table test(I1 int, i2 int, b int, c int, d int) " + - "as select 1, 1, 1, 1, 1"); - ResultSet rs = stat.executeQuery("select * from test"); - rs.next(); - rs.getString("I1"); - rs.getString("i1"); - rs.getString("I2"); - rs.getString("i2"); - stat.execute("drop table test"); - } finally { - Locale.setDefault(old); - } - } - private void testSubstringDataType() throws SQLException { - ResultSet rs = stat.executeQuery("select substr(x, 1, 1) from dual"); + ResultSet rs = stat.executeQuery("select substr(x, 1, 1) from system_range(1, 1)"); rs.next(); assertEquals(Types.VARCHAR, rs.getMetaData().getColumnType(1)); } private void testColumnLabelColumnName() throws SQLException { - ResultSet rs = stat.executeQuery("select x as y from dual"); + ResultSet rs = stat.executeQuery("select x as y from system_range(1, 1)"); rs.next(); rs.getString("x"); rs.getString("y"); @@ -418,6 +427,7 @@ private void testFetchSize() throws SQLException { int a = stat.getFetchSize(); int b = rs.getFetchSize(); assertEquals(a, b); + rs.setFetchDirection(ResultSet.FETCH_FORWARD); rs.setFetchSize(b + 1); b = rs.getFetchSize(); assertEquals(a + 1, b); @@ -471,7 +481,7 @@ private void testSubstringPrecision() throws SQLException { trace("testSubstringPrecision"); stat.execute("CREATE TABLE TEST(ID INT, NAME VARCHAR(10))"); stat.execute("INSERT INTO TEST VALUES(1, 'Hello'), (2, 'WorldPeace')"); - checkPrecision(0, "SELECT SUBSTR(NAME, 12, 4) FROM TEST"); + checkPrecision(1, "SELECT SUBSTR(NAME, 12, 4) FROM TEST"); checkPrecision(9, "SELECT SUBSTR(NAME, 2) FROM TEST"); checkPrecision(10, "SELECT SUBSTR(NAME, ID) FROM TEST"); checkPrecision(4, "SELECT SUBSTR(NAME, 2, 4) FROM TEST"); @@ -540,20 +550,20 @@ private void testColumnLength() throws SQLException { rs = stat.executeQuery("explain select * from dual"); meta = rs.getMetaData(); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(1)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getColumnDisplaySize(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getPrecision(1)); rs = stat.executeQuery("script"); meta = rs.getMetaData(); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(1)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getColumnDisplaySize(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getPrecision(1)); rs = stat.executeQuery("select group_concat(table_name) " + "from information_schema.tables"); rs.next(); meta = rs.getMetaData(); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(1)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getColumnDisplaySize(1)); + assertEquals(Constants.MAX_STRING_LENGTH, meta.getPrecision(1)); } @@ -564,17 +574,13 @@ private void testLimitMaxRows() throws SQLException { rs = stat.executeQuery("SELECT C || C FROM one;"); ResultSetMetaData md = rs.getMetaData(); assertEquals(20, md.getPrecision(1)); - ResultSet rs2 = stat.executeQuery("SELECT UPPER (C) FROM one;"); - ResultSetMetaData md2 = rs2.getMetaData(); - assertEquals(10, md2.getPrecision(1)); - rs = stat.executeQuery("SELECT UPPER (C), CHAR(10), " + + rs = stat.executeQuery("SELECT CHAR(10), " + "CONCAT(C,C,C), HEXTORAW(C), RAWTOHEX(C) FROM one"); ResultSetMetaData meta = rs.getMetaData(); - assertEquals(10, meta.getPrecision(1)); - assertEquals(1, meta.getPrecision(2)); - assertEquals(30, meta.getPrecision(3)); - assertEquals(3, meta.getPrecision(4)); - assertEquals(40, meta.getPrecision(5)); + assertEquals(1, meta.getPrecision(1)); + assertEquals(30, meta.getPrecision(2)); + assertEquals(2, meta.getPrecision(3)); + assertEquals(40, meta.getPrecision(4)); stat.execute("DROP TABLE one"); } @@ -584,12 +590,14 @@ private void testAutoIncrement() throws SQLException { ResultSet rs; stat.execute("CREATE TABLE TEST(ID IDENTITY NOT NULL, NAME VARCHAR NULL)"); - stat.execute("INSERT INTO TEST(NAME) VALUES('Hello')"); + stat.execute("INSERT INTO TEST(NAME) VALUES('Hello')", + Statement.RETURN_GENERATED_KEYS); rs = stat.getGeneratedKeys(); assertTrue(rs.next()); assertEquals(1, rs.getInt(1)); - stat.execute("INSERT INTO TEST(NAME) VALUES('World')"); + stat.execute("INSERT INTO TEST(NAME) VALUES('World')", + Statement.RETURN_GENERATED_KEYS); rs = stat.getGeneratedKeys(); assertTrue(rs.next()); assertEquals(2, rs.getInt(1)); @@ -613,7 +621,7 @@ private void testInt() throws SQLException { ResultSet rs; Object o; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE INT)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" INT)"); stat.execute("INSERT INTO TEST VALUES(1,-1)"); stat.execute("INSERT INTO TEST VALUES(2,0)"); stat.execute("INSERT INTO TEST VALUES(3,1)"); @@ -641,7 +649,7 @@ private void testInt() throws SQLException { assertTrue("TEST".equals(meta.getTableName(1))); assertTrue("ID".equals(meta.getColumnName(1))); assertTrue("VALUE".equals(meta.getColumnName(2))); - assertTrue(!meta.isAutoIncrement(1)); + assertFalse(meta.isAutoIncrement(1)); assertTrue(meta.isCaseSensitive(1)); assertTrue(meta.isSearchable(1)); assertFalse(meta.isCurrency(1)); @@ -654,12 +662,12 @@ private void testInt() throws SQLException { assertFalse(meta.isDefinitelyWritable(1)); assertTrue(meta.getColumnDisplaySize(1) > 0); assertTrue(meta.getColumnDisplaySize(2) > 0); - assertEquals(null, meta.getColumnClassName(3)); + assertEquals(Void.class.getName(), meta.getColumnClassName(3)); assertTrue(rs.getRow() == 0); assertResultSetMeta(rs, 3, new String[] { "ID", "VALUE", "N" }, new int[] { Types.INTEGER, Types.INTEGER, - Types.NULL }, new int[] { 10, 10, 1 }, new int[] { 0, 0, 0 }); + Types.NULL }, new int[] { 32, 32, 1 }, new int[] { 0, 0, 0 }); rs.next(); assertEquals(ResultSet.CONCUR_READ_ONLY, rs.getConcurrency()); assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); @@ -690,11 +698,19 @@ private void testInt() throws SQLException { o = rs.getObject("value"); trace(o.getClass().getName()); assertTrue(o instanceof Integer); - assertTrue(((Integer) o).intValue() == -1); + assertTrue((Integer) o == -1); + o = rs.getObject("value", Integer.class); + trace(o.getClass().getName()); + assertTrue(o instanceof Integer); + assertTrue((Integer) o == -1); o = rs.getObject(2); trace(o.getClass().getName()); assertTrue(o instanceof Integer); - assertTrue(((Integer) o).intValue() == -1); + assertTrue((Integer) o == -1); + o = rs.getObject(2, Integer.class); + trace(o.getClass().getName()); + assertTrue(o instanceof Integer); + assertTrue((Integer) o == -1); assertTrue(rs.getBoolean("Value")); assertTrue(rs.getByte("Value") == (byte) -1); assertTrue(rs.getShort("Value") == (short) -1); @@ -710,7 +726,7 @@ private void testInt() throws SQLException { rs.next(); assertTrue(rs.getRow() == 2); assertTrue(rs.getInt(2) == 0 && !rs.wasNull()); - assertTrue(!rs.getBoolean(2)); + assertFalse(rs.getBoolean(2)); assertTrue(rs.getByte(2) == 0); assertTrue(rs.getShort(2) == 0); assertTrue(rs.getLong(2) == 0); @@ -740,7 +756,10 @@ private void testInt() throws SQLException { assertTrue(rs.getString(1).equals("6") && !rs.wasNull()); assertTrue(rs.getString(2) == null && rs.wasNull()); o = rs.getObject(2); - assertTrue(o == null); + assertNull(o); + assertTrue(rs.wasNull()); + o = rs.getObject(2, Integer.class); + assertNull(o); assertTrue(rs.wasNull()); assertFalse(rs.next()); assertEquals(0, rs.getRow()); @@ -750,12 +769,266 @@ private void testInt() throws SQLException { stat.setMaxRows(0); } + private void testSmallInt() throws SQLException { + trace("Test SMALLINT"); + ResultSet rs; + Object o; + + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" SMALLINT)"); + stat.execute("INSERT INTO TEST VALUES(1,-1)"); + stat.execute("INSERT INTO TEST VALUES(2,0)"); + stat.execute("INSERT INTO TEST VALUES(3,1)"); + stat.execute("INSERT INTO TEST VALUES(4," + Short.MAX_VALUE + ")"); + stat.execute("INSERT INTO TEST VALUES(5," + Short.MIN_VALUE + ")"); + stat.execute("INSERT INTO TEST VALUES(6,NULL)"); + + // MySQL compatibility (is this required?) + // rs=stat.executeQuery("SELECT * FROM TEST T ORDER BY ID"); + // check(rs.findColumn("T.ID"), 1); + // check(rs.findColumn("T.NAME"), 2); + + rs = stat.executeQuery("SELECT *, NULL AS N FROM TEST ORDER BY ID"); + + // MySQL compatibility + assertEquals(1, rs.findColumn("TEST.ID")); + assertEquals(2, rs.findColumn("TEST.VALUE")); + + assertTrue(rs.getRow() == 0); + assertResultSetMeta(rs, 3, new String[] { "ID", "VALUE", "N" }, + new int[] { Types.INTEGER, Types.SMALLINT, + Types.NULL }, new int[] { 32, 16, 1 }, new int[] { 0, 0, 0 }); + rs.next(); + + assertTrue(rs.getRow() == 1); + assertEquals(2, rs.findColumn("VALUE")); + assertEquals(2, rs.findColumn("value")); + assertEquals(2, rs.findColumn("Value")); + assertEquals(2, rs.findColumn("Value")); + assertEquals(1, rs.findColumn("ID")); + assertEquals(1, rs.findColumn("id")); + assertEquals(1, rs.findColumn("Id")); + assertEquals(1, rs.findColumn("iD")); + assertTrue(rs.getShort(2) == -1 && !rs.wasNull()); + assertTrue(rs.getShort("VALUE") == -1 && !rs.wasNull()); + assertTrue(rs.getShort("value") == -1 && !rs.wasNull()); + assertTrue(rs.getShort("Value") == -1 && !rs.wasNull()); + assertTrue(rs.getString("Value").equals("-1") && !rs.wasNull()); + + o = rs.getObject("value"); + trace(o.getClass().getName()); + assertTrue(o.getClass() == Integer.class); + assertTrue(((Number) o).intValue() == -1); + o = rs.getObject("value", Short.class); + trace(o.getClass().getName()); + assertTrue(o instanceof Short); + assertTrue((Short) o == -1); + o = rs.getObject(2); + trace(o.getClass().getName()); + assertTrue(o.getClass() == Integer.class); + assertTrue(((Number) o).intValue() == -1); + o = rs.getObject(2, Short.class); + trace(o.getClass().getName()); + assertTrue(o instanceof Short); + assertTrue((Short) o == -1); + assertTrue(rs.getBoolean("Value")); + assertTrue(rs.getByte("Value") == (byte) -1); + assertTrue(rs.getInt("Value") == -1); + assertTrue(rs.getLong("Value") == -1); + assertTrue(rs.getFloat("Value") == -1.0); + assertTrue(rs.getDouble("Value") == -1.0); + + assertTrue(rs.getString("Value").equals("-1") && !rs.wasNull()); + assertTrue(rs.getShort("ID") == 1 && !rs.wasNull()); + assertTrue(rs.getShort("id") == 1 && !rs.wasNull()); + assertTrue(rs.getShort("Id") == 1 && !rs.wasNull()); + assertTrue(rs.getShort(1) == 1 && !rs.wasNull()); + rs.next(); + + assertTrue(rs.getRow() == 2); + assertTrue(rs.getShort(2) == 0 && !rs.wasNull()); + assertFalse(rs.getBoolean(2)); + assertTrue(rs.getByte(2) == 0); + assertTrue(rs.getInt(2) == 0); + assertTrue(rs.getLong(2) == 0); + assertTrue(rs.getFloat(2) == 0.0); + assertTrue(rs.getDouble(2) == 0.0); + assertTrue(rs.getString(2).equals("0") && !rs.wasNull()); + assertTrue(rs.getShort(1) == 2 && !rs.wasNull()); + rs.next(); + + assertTrue(rs.getRow() == 3); + assertTrue(rs.getShort("ID") == 3 && !rs.wasNull()); + assertTrue(rs.getShort("VALUE") == 1 && !rs.wasNull()); + rs.next(); + + assertTrue(rs.getRow() == 4); + assertTrue(rs.getShort("ID") == 4 && !rs.wasNull()); + assertTrue(rs.getShort("VALUE") == Short.MAX_VALUE && !rs.wasNull()); + rs.next(); + + assertTrue(rs.getRow() == 5); + assertTrue(rs.getShort("id") == 5 && !rs.wasNull()); + assertTrue(rs.getShort("value") == Short.MIN_VALUE && !rs.wasNull()); + assertTrue(rs.getString(1).equals("5") && !rs.wasNull()); + rs.next(); + + assertTrue(rs.getRow() == 6); + assertTrue(rs.getShort("id") == 6 && !rs.wasNull()); + assertTrue(rs.getShort("value") == 0 && rs.wasNull()); + assertTrue(rs.getShort(2) == 0 && rs.wasNull()); + assertTrue(rs.getShort(1) == 6 && !rs.wasNull()); + assertTrue(rs.getString(1).equals("6") && !rs.wasNull()); + assertTrue(rs.getString(2) == null && rs.wasNull()); + o = rs.getObject(2); + assertNull(o); + assertTrue(rs.wasNull()); + o = rs.getObject(2, Short.class); + assertNull(o); + assertTrue(rs.wasNull()); + assertFalse(rs.next()); + assertEquals(0, rs.getRow()); + + stat.execute("DROP TABLE TEST"); + stat.setMaxRows(0); + } + + private void testBigInt() throws SQLException { + trace("Test SMALLINT"); + ResultSet rs; + Object o; + + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" BIGINT)"); + stat.execute("INSERT INTO TEST VALUES(1,-1)"); + stat.execute("INSERT INTO TEST VALUES(2,0)"); + stat.execute("INSERT INTO TEST VALUES(3,1)"); + stat.execute("INSERT INTO TEST VALUES(4," + Long.MAX_VALUE + ")"); + stat.execute("INSERT INTO TEST VALUES(5," + Long.MIN_VALUE + ")"); + stat.execute("INSERT INTO TEST VALUES(6,NULL)"); + + // MySQL compatibility (is this required?) + // rs=stat.executeQuery("SELECT * FROM TEST T ORDER BY ID"); + // check(rs.findColumn("T.ID"), 1); + // check(rs.findColumn("T.NAME"), 2); + + rs = stat.executeQuery("SELECT *, NULL AS N FROM TEST ORDER BY ID"); + + // MySQL compatibility + assertEquals(1, rs.findColumn("TEST.ID")); + assertEquals(2, rs.findColumn("TEST.VALUE")); + + assertTrue(rs.getRow() == 0); + assertResultSetMeta(rs, 3, new String[] { "ID", "VALUE", "N" }, + new int[] { Types.INTEGER, Types.BIGINT, + Types.NULL }, new int[] { 32, 64, 1 }, new int[] { 0, 0, 0 }); + rs.next(); + + assertTrue(rs.getRow() == 1); + assertEquals(2, rs.findColumn("VALUE")); + assertEquals(2, rs.findColumn("value")); + assertEquals(2, rs.findColumn("Value")); + assertEquals(2, rs.findColumn("Value")); + assertEquals(1, rs.findColumn("ID")); + assertEquals(1, rs.findColumn("id")); + assertEquals(1, rs.findColumn("Id")); + assertEquals(1, rs.findColumn("iD")); + assertTrue(rs.getLong(2) == -1 && !rs.wasNull()); + assertTrue(rs.getLong("VALUE") == -1 && !rs.wasNull()); + assertTrue(rs.getLong("value") == -1 && !rs.wasNull()); + assertTrue(rs.getLong("Value") == -1 && !rs.wasNull()); + assertTrue(rs.getString("Value").equals("-1") && !rs.wasNull()); + + o = rs.getObject("value"); + trace(o.getClass().getName()); + assertTrue(o instanceof Long); + assertTrue((Long) o == -1); + o = rs.getObject("value", Long.class); + trace(o.getClass().getName()); + assertTrue(o instanceof Long); + assertTrue((Long) o == -1); + o = rs.getObject("value", BigInteger.class); + trace(o.getClass().getName()); + assertTrue(o instanceof BigInteger); + assertTrue(((BigInteger) o).longValue() == -1); + o = rs.getObject(2); + trace(o.getClass().getName()); + assertTrue(o instanceof Long); + assertTrue((Long) o == -1); + o = rs.getObject(2, Long.class); + trace(o.getClass().getName()); + assertTrue(o instanceof Long); + assertTrue((Long) o == -1); + o = rs.getObject(2, BigInteger.class); + trace(o.getClass().getName()); + assertTrue(o instanceof BigInteger); + assertTrue(((BigInteger) o).longValue() == -1); + assertTrue(rs.getBoolean("Value")); + assertTrue(rs.getByte("Value") == (byte) -1); + assertTrue(rs.getShort("Value") == -1); + assertTrue(rs.getInt("Value") == -1); + assertTrue(rs.getFloat("Value") == -1.0); + assertTrue(rs.getDouble("Value") == -1.0); + + assertTrue(rs.getString("Value").equals("-1") && !rs.wasNull()); + assertTrue(rs.getLong("ID") == 1 && !rs.wasNull()); + assertTrue(rs.getLong("id") == 1 && !rs.wasNull()); + assertTrue(rs.getLong("Id") == 1 && !rs.wasNull()); + assertTrue(rs.getLong(1) == 1 && !rs.wasNull()); + rs.next(); + + assertTrue(rs.getRow() == 2); + assertTrue(rs.getLong(2) == 0 && !rs.wasNull()); + assertFalse(rs.getBoolean(2)); + assertTrue(rs.getByte(2) == 0); + assertTrue(rs.getShort(2) == 0); + assertTrue(rs.getInt(2) == 0); + assertTrue(rs.getFloat(2) == 0.0); + assertTrue(rs.getDouble(2) == 0.0); + assertTrue(rs.getString(2).equals("0") && !rs.wasNull()); + assertTrue(rs.getLong(1) == 2 && !rs.wasNull()); + rs.next(); + + assertTrue(rs.getRow() == 3); + assertTrue(rs.getLong("ID") == 3 && !rs.wasNull()); + assertTrue(rs.getLong("VALUE") == 1 && !rs.wasNull()); + rs.next(); + + assertTrue(rs.getRow() == 4); + assertTrue(rs.getLong("ID") == 4 && !rs.wasNull()); + assertTrue(rs.getLong("VALUE") == Long.MAX_VALUE && !rs.wasNull()); + rs.next(); + + assertTrue(rs.getRow() == 5); + assertTrue(rs.getLong("id") == 5 && !rs.wasNull()); + assertTrue(rs.getLong("value") == Long.MIN_VALUE && !rs.wasNull()); + assertTrue(rs.getString(1).equals("5") && !rs.wasNull()); + rs.next(); + + assertTrue(rs.getRow() == 6); + assertTrue(rs.getLong("id") == 6 && !rs.wasNull()); + assertTrue(rs.getLong("value") == 0 && rs.wasNull()); + assertTrue(rs.getLong(2) == 0 && rs.wasNull()); + assertTrue(rs.getLong(1) == 6 && !rs.wasNull()); + assertTrue(rs.getString(1).equals("6") && !rs.wasNull()); + assertTrue(rs.getString(2) == null && rs.wasNull()); + o = rs.getObject(2); + assertNull(o); + assertTrue(rs.wasNull()); + o = rs.getObject(2, Long.class); + assertNull(o); + assertTrue(rs.wasNull()); + assertFalse(rs.next()); + assertEquals(0, rs.getRow()); + + stat.execute("DROP TABLE TEST"); + stat.setMaxRows(0); + } + private void testVarchar() throws SQLException { trace("Test VARCHAR"); ResultSet rs; Object o; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" VARCHAR(255))"); stat.execute("INSERT INTO TEST VALUES(1,'')"); stat.execute("INSERT INTO TEST VALUES(2,' ')"); stat.execute("INSERT INTO TEST VALUES(3,' ')"); @@ -770,7 +1043,7 @@ private void testVarchar() throws SQLException { rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.VARCHAR }, new int[] { - 10, 255 }, new int[] { 0, 0 }); + 32, 255 }, new int[] { 0, 0 }); String value; rs.next(); value = rs.getString(2); @@ -801,6 +1074,10 @@ private void testVarchar() throws SQLException { trace(o.getClass().getName()); assertTrue(o instanceof String); assertTrue(o.toString().equals("Hi")); + o = rs.getObject("value", String.class); + trace(o.getClass().getName()); + assertTrue(o instanceof String); + assertTrue(o.equals("Hi")); rs.next(); value = rs.getString(2); trace("Value: <" + value + "> (should be: < Hi >)"); @@ -831,7 +1108,7 @@ private void testVarchar() throws SQLException { trace("Value: <" + value + "> (should be: <\\%>)"); assertTrue(rs.getInt(1) == 11 && !rs.wasNull()); assertTrue(rs.getString(2).equals("\\%") && !rs.wasNull()); - assertTrue(!rs.next()); + assertFalse(rs.next()); stat.execute("DROP TABLE TEST"); } @@ -840,7 +1117,7 @@ private void testDecimal() throws SQLException { ResultSet rs; Object o; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE DECIMAL(10,2))"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" DECIMAL(10,2))"); stat.execute("INSERT INTO TEST VALUES(1,-1)"); stat.execute("INSERT INTO TEST VALUES(2,.0)"); stat.execute("INSERT INTO TEST VALUES(3,1.)"); @@ -851,30 +1128,34 @@ private void testDecimal() throws SQLException { rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.DECIMAL }, new int[] { - 10, 10 }, new int[] { 0, 2 }); + 32, 10 }, new int[] { 0, 2 }); BigDecimal bd; rs.next(); assertTrue(rs.getInt(1) == 1); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); assertTrue(rs.getInt(2) == -1); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); bd = rs.getBigDecimal(2); assertTrue(bd.compareTo(new BigDecimal("-1.00")) == 0); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); o = rs.getObject(2); trace(o.getClass().getName()); assertTrue(o instanceof BigDecimal); assertTrue(((BigDecimal) o).compareTo(new BigDecimal("-1.00")) == 0); + o = rs.getObject(2, BigDecimal.class); + trace(o.getClass().getName()); + assertTrue(o instanceof BigDecimal); + assertTrue(((BigDecimal) o).compareTo(new BigDecimal("-1.00")) == 0); rs.next(); assertTrue(rs.getInt(1) == 2); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); assertTrue(rs.getInt(2) == 0); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); bd = rs.getBigDecimal(2); assertTrue(bd.compareTo(new BigDecimal("0.00")) == 0); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); rs.next(); checkColumnBigDecimal(rs, 2, 1, "1.00"); @@ -891,7 +1172,19 @@ private void testDecimal() throws SQLException { rs.next(); checkColumnBigDecimal(rs, 2, 0, null); - assertTrue(!rs.next()); + assertFalse(rs.next()); + stat.execute("DROP TABLE TEST"); + + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" DECIMAL(22,2))"); + stat.execute("INSERT INTO TEST VALUES(1,-12345678909876543210)"); + stat.execute("INSERT INTO TEST VALUES(2,12345678901234567890.12345)"); + rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); + rs.next(); + assertEquals(new BigDecimal("-12345678909876543210.00"), rs.getBigDecimal(2)); + assertEquals(new BigInteger("-12345678909876543210"), rs.getObject(2, BigInteger.class)); + rs.next(); + assertEquals(new BigDecimal("12345678901234567890.12"), rs.getBigDecimal(2)); + assertEquals(new BigInteger("12345678901234567890"), rs.getObject(2, BigInteger.class)); stat.execute("DROP TABLE TEST"); } @@ -900,65 +1193,120 @@ private void testDoubleFloat() throws SQLException { ResultSet rs; Object o; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, D DOUBLE, R REAL)"); - stat.execute("INSERT INTO TEST VALUES(1, -1, -1)"); - stat.execute("INSERT INTO TEST VALUES(2,.0, .0)"); - stat.execute("INSERT INTO TEST VALUES(3, 1., 1.)"); - stat.execute("INSERT INTO TEST VALUES(4, 12345678.89, 12345678.89)"); - stat.execute("INSERT INTO TEST VALUES(6, 99999999.99, 99999999.99)"); - stat.execute("INSERT INTO TEST VALUES(7, -99999999.99, -99999999.99)"); - stat.execute("INSERT INTO TEST VALUES(8, NULL, NULL)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, D DOUBLE, R REAL, F DECFLOAT)"); + stat.execute("INSERT INTO TEST VALUES(1, -1, -1, -1)"); + stat.execute("INSERT INTO TEST VALUES(2, .0, .0, .0)"); + stat.execute("INSERT INTO TEST VALUES(3, 1., 1., 1.)"); + stat.execute("INSERT INTO TEST VALUES(4, 12345678.89, 12345678.89, 12345678.89)"); + stat.execute("INSERT INTO TEST VALUES(6, 99999999.99, 99999999.99, 99999999.99)"); + stat.execute("INSERT INTO TEST VALUES(7, -99999999.99, -99999999.99, -99999999.99)"); + stat.execute("INSERT INTO TEST VALUES(8, NULL, NULL, NULL)"); + stat.execute("INSERT INTO TEST VALUES(9, '-Infinity', '-Infinity', '-Infinity')"); + stat.execute("INSERT INTO TEST VALUES(10, 'Infinity', 'Infinity', 'Infinity')"); + stat.execute("INSERT INTO TEST VALUES(11, 'NaN', 'NaN', 'NaN')"); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); - assertResultSetMeta(rs, 3, new String[] { "ID", "D", "R" }, - new int[] { Types.INTEGER, Types.DOUBLE, Types.REAL }, - new int[] { 10, 17, 7 }, new int[] { 0, 0, 0 }); + assertResultSetMeta(rs, 4, new String[] { "ID", "D", "R", "F" }, + null, + new int[] { 32, 53, 24, 100_000 }, new int[] { 0, 0, 0, 0 }); + ResultSetMetaData md = rs.getMetaData(); + assertEquals("INTEGER", md.getColumnTypeName(1)); + assertEquals("DOUBLE PRECISION", md.getColumnTypeName(2)); + assertEquals("REAL", md.getColumnTypeName(3)); + assertEquals("DECFLOAT", md.getColumnTypeName(4)); BigDecimal bd; rs.next(); assertTrue(rs.getInt(1) == 1); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); assertTrue(rs.getInt(2) == -1); assertTrue(rs.getInt(3) == -1); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); bd = rs.getBigDecimal(2); assertTrue(bd.compareTo(new BigDecimal("-1.00")) == 0); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); o = rs.getObject(2); trace(o.getClass().getName()); assertTrue(o instanceof Double); - assertTrue(((Double) o).compareTo(new Double("-1.00")) == 0); + assertTrue(((Double) o).compareTo(-1d) == 0); + o = rs.getObject(2, Double.class); + trace(o.getClass().getName()); + assertTrue(o instanceof Double); + assertTrue(((Double) o).compareTo(-1d) == 0); o = rs.getObject(3); trace(o.getClass().getName()); assertTrue(o instanceof Float); - assertTrue(((Float) o).compareTo(new Float("-1.00")) == 0); + assertTrue(((Float) o).compareTo(-1f) == 0); + o = rs.getObject(3, Float.class); + trace(o.getClass().getName()); + assertTrue(o instanceof Float); + assertTrue(((Float) o).compareTo(-1f) == 0); + o = rs.getObject(4); + trace(o.getClass().getName()); + assertTrue(o instanceof BigDecimal); + assertEquals(BigDecimal.valueOf(-1L, 0), o); + o = rs.getObject(4, BigDecimal.class); + trace(o.getClass().getName()); + assertTrue(o instanceof BigDecimal); + assertEquals(BigDecimal.valueOf(-1L, 0), o); rs.next(); assertTrue(rs.getInt(1) == 2); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); assertTrue(rs.getInt(2) == 0); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); assertTrue(rs.getInt(3) == 0); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); + assertTrue(rs.getInt(4) == 0); + assertFalse(rs.wasNull()); bd = rs.getBigDecimal(2); assertTrue(bd.compareTo(new BigDecimal("0.00")) == 0); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); bd = rs.getBigDecimal(3); assertTrue(bd.compareTo(new BigDecimal("0.00")) == 0); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); + bd = rs.getBigDecimal(4); + assertTrue(bd.compareTo(new BigDecimal("0.00")) == 0); + assertFalse(rs.wasNull()); rs.next(); assertEquals(1.0, rs.getDouble(2)); assertEquals(1.0f, rs.getFloat(3)); + assertEquals(BigDecimal.ONE, rs.getBigDecimal(4)); rs.next(); assertEquals(12345678.89, rs.getDouble(2)); assertEquals(12345678.89f, rs.getFloat(3)); + assertEquals(BigDecimal.valueOf(12_345_678_89L, 2), rs.getBigDecimal(4)); rs.next(); assertEquals(99999999.99, rs.getDouble(2)); assertEquals(99999999.99f, rs.getFloat(3)); + assertEquals(BigDecimal.valueOf(99_999_999_99L, 2), rs.getBigDecimal(4)); rs.next(); assertEquals(-99999999.99, rs.getDouble(2)); assertEquals(-99999999.99f, rs.getFloat(3)); + assertEquals(BigDecimal.valueOf(-99_999_999_99L, 2), rs.getBigDecimal(4)); rs.next(); checkColumnBigDecimal(rs, 2, 0, null); checkColumnBigDecimal(rs, 3, 0, null); - assertTrue(!rs.next()); + checkColumnBigDecimal(rs, 4, 0, null); + rs.next(); + assertEquals(Float.NEGATIVE_INFINITY, rs.getFloat(2)); + assertEquals(Double.NEGATIVE_INFINITY, rs.getDouble(3)); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getBigDecimal(4); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(4); + assertEquals(Double.NEGATIVE_INFINITY, rs.getDouble(4)); + assertEquals("-Infinity", rs.getString(4)); + rs.next(); + assertEquals(Float.POSITIVE_INFINITY, rs.getFloat(2)); + assertEquals(Double.POSITIVE_INFINITY, rs.getDouble(3)); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getBigDecimal(4); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(4); + assertEquals(Double.POSITIVE_INFINITY, rs.getDouble(4)); + assertEquals("Infinity", rs.getString(4)); + rs.next(); + assertEquals(Float.NaN, rs.getFloat(2)); + assertEquals(Double.NaN, rs.getDouble(3)); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getBigDecimal(4); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(4); + assertEquals(Double.NaN, rs.getDouble(4)); + assertEquals("NaN", rs.getString(4)); + assertFalse(rs.next()); stat.execute("DROP TABLE TEST"); } @@ -972,39 +1320,39 @@ private void testDatetime() throws SQLException { assertEquals("99999-12-23", rs.getString(1)); rs = stat.executeQuery("call timestamp '99999-12-23 01:02:03.000'"); rs.next(); - assertEquals("99999-12-23 01:02:03.0", rs.getString(1)); + assertEquals("99999-12-23 01:02:03", rs.getString(1)); rs = stat.executeQuery("call date '-99999-12-23'"); rs.next(); assertEquals("-99999-12-23", rs.getString(1)); rs = stat.executeQuery("call timestamp '-99999-12-23 01:02:03.000'"); rs.next(); - assertEquals("-99999-12-23 01:02:03.0", rs.getString(1)); + assertEquals("-99999-12-23 01:02:03", rs.getString(1)); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE DATETIME)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" DATETIME)"); stat.execute("INSERT INTO TEST VALUES(1,DATE '2011-11-11')"); stat.execute("INSERT INTO TEST VALUES(2,TIMESTAMP '2002-02-02 02:02:02')"); stat.execute("INSERT INTO TEST VALUES(3,TIMESTAMP '1800-1-1 0:0:0')"); stat.execute("INSERT INTO TEST VALUES(4,TIMESTAMP '9999-12-31 23:59:59')"); stat.execute("INSERT INTO TEST VALUES(5,NULL)"); rs = stat.executeQuery("SELECT 0 ID, " + - "TIMESTAMP '9999-12-31 23:59:59' VALUE FROM TEST ORDER BY ID"); + "TIMESTAMP '9999-12-31 23:59:59' \"VALUE\" FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.TIMESTAMP }, - new int[] { 10, 23 }, new int[] { 0, 10 }); + new int[] { 32, 29 }, new int[] { 0, 9 }); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.TIMESTAMP }, - new int[] { 10, 23 }, new int[] { 0, 10 }); + new int[] { 32, 26 }, new int[] { 0, 6 }); rs.next(); java.sql.Date date; java.sql.Time time; java.sql.Timestamp ts; date = rs.getDate(2); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); time = rs.getTime(2); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); ts = rs.getTimestamp(2); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); trace("Date: " + date.toString() + " Time:" + time.toString() + " Timestamp:" + ts.toString()); trace("Date ms: " + date.getTime() + " Time ms:" + time.getTime() + @@ -1030,47 +1378,122 @@ private void testDatetime() throws SQLException { assertTrue(((java.sql.Timestamp) o).equals( java.sql.Timestamp.valueOf("2011-11-11 00:00:00.0"))); assertFalse(rs.wasNull()); + o = rs.getObject(2, java.sql.Timestamp.class); + trace(o.getClass().getName()); + assertTrue(o instanceof java.sql.Timestamp); + assertTrue(((java.sql.Timestamp) o).equals( + java.sql.Timestamp.valueOf("2011-11-11 00:00:00.0"))); + assertFalse(rs.wasNull()); + o = rs.getObject(2, java.util.Date.class); + assertTrue(o.getClass() == java.util.Date.class); + assertEquals(((java.util.Date) o).getTime(), + java.sql.Timestamp.valueOf("2011-11-11 00:00:00.0").getTime()); + o = rs.getObject(2, Calendar.class); + assertTrue(o instanceof Calendar); + assertEquals(((Calendar) o).getTimeInMillis(), + java.sql.Timestamp.valueOf("2011-11-11 00:00:00.0").getTime()); rs.next(); date = rs.getDate("VALUE"); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); time = rs.getTime("VALUE"); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); ts = rs.getTimestamp("VALUE"); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); trace("Date: " + date.toString() + " Time:" + time.toString() + " Timestamp:" + ts.toString()); assertEquals("2002-02-02", date.toString()); assertEquals("02:02:02", time.toString()); assertEquals("2002-02-02 02:02:02.0", ts.toString()); rs.next(); - assertEquals("1800-01-01", rs.getDate("value").toString()); + + assertEquals("1800-01-01", rs.getObject("value", LocalDate.class).toString()); assertEquals("00:00:00", rs.getTime("value").toString()); - assertEquals("1800-01-01 00:00:00.0", - rs.getTimestamp("value").toString()); + assertEquals("00:00", rs.getObject("value", LocalTime.class).toString()); + assertEquals("1800-01-01T00:00", rs.getObject("value", LocalDateTime.class).toString()); rs.next(); + assertEquals("9999-12-31", rs.getDate("Value").toString()); + assertEquals("9999-12-31", rs.getObject("Value", LocalDate.class).toString()); assertEquals("23:59:59", rs.getTime("Value").toString()); - assertEquals("9999-12-31 23:59:59.0", - rs.getTimestamp("Value").toString()); + assertEquals("23:59:59", rs.getObject("Value", LocalTime.class).toString()); + assertEquals("9999-12-31 23:59:59.0", rs.getTimestamp("Value").toString()); + assertEquals("9999-12-31T23:59:59", rs.getObject("Value", LocalDateTime.class).toString()); rs.next(); + assertTrue(rs.getDate("Value") == null && rs.wasNull()); assertTrue(rs.getTime("vALUe") == null && rs.wasNull()); assertTrue(rs.getTimestamp(2) == null && rs.wasNull()); - assertTrue(!rs.next()); + assertTrue(rs.getObject(2, LocalDateTime.class) == null && rs.wasNull()); + assertFalse(rs.next()); rs = stat.executeQuery("SELECT DATE '2001-02-03' D, " + "TIME '14:15:16', " + "TIMESTAMP '2007-08-09 10:11:12.141516171' TS FROM TEST"); rs.next(); + date = (Date) rs.getObject(1); time = (Time) rs.getObject(2); ts = (Timestamp) rs.getObject(3); assertEquals("2001-02-03", date.toString()); assertEquals("14:15:16", time.toString()); assertEquals("2007-08-09 10:11:12.141516171", ts.toString()); + date = rs.getObject(1, Date.class); + time = rs.getObject(2, Time.class); + ts = rs.getObject(3, Timestamp.class); + assertEquals("2001-02-03", date.toString()); + assertEquals("14:15:16", time.toString()); + assertEquals("2007-08-09 10:11:12.141516171", ts.toString()); + assertEquals("2001-02-03", rs.getObject(1, LocalDate.class).toString()); + assertEquals("14:15:16", rs.getObject(2, LocalTime.class).toString()); + assertEquals("2007-08-09T10:11:12.141516171", rs.getObject(3, LocalDateTime.class).toString()); stat.execute("DROP TABLE TEST"); + + rs = stat.executeQuery("SELECT LOCALTIME, CURRENT_TIME"); + rs.next(); + assertEquals(rs.getTime(1), rs.getTime(2)); + rs = stat.executeQuery("SELECT LOCALTIMESTAMP, CURRENT_TIMESTAMP"); + rs.next(); + assertEquals(rs.getTimestamp(1), rs.getTimestamp(2)); + + rs = stat.executeQuery("SELECT DATE '-1000000000-01-01', " + "DATE '1000000000-12-31'"); + rs.next(); + assertEquals("-999999999-01-01", rs.getObject(1, LocalDate.class).toString()); + assertEquals("+999999999-12-31", rs.getObject(2, LocalDate.class).toString()); + + rs = stat.executeQuery("SELECT TIMESTAMP '-1000000000-01-01 00:00:00', " + + "TIMESTAMP '1000000000-12-31 23:59:59.999999999'"); + rs.next(); + assertEquals("-999999999-01-01T00:00", rs.getObject(1, LocalDateTime.class).toString()); + assertEquals("+999999999-12-31T23:59:59.999999999", rs.getObject(2, LocalDateTime.class).toString()); + + rs = stat.executeQuery("SELECT TIMESTAMP WITH TIME ZONE '-1000000000-01-01 00:00:00Z', " + + "TIMESTAMP WITH TIME ZONE '1000000000-12-31 23:59:59.999999999Z', " + + "TIMESTAMP WITH TIME ZONE '-1000000000-01-01 00:00:00+18', " + + "TIMESTAMP WITH TIME ZONE '1000000000-12-31 23:59:59.999999999-18'"); + rs.next(); + assertEquals("-999999999-01-01T00:00Z", rs.getObject(1, OffsetDateTime.class).toString()); + assertEquals("+999999999-12-31T23:59:59.999999999Z", rs.getObject(2, OffsetDateTime.class).toString()); + assertEquals("-999999999-01-01T00:00+18:00", rs.getObject(3, OffsetDateTime.class).toString()); + assertEquals("+999999999-12-31T23:59:59.999999999-18:00", rs.getObject(4, OffsetDateTime.class).toString()); + assertEquals("-999999999-01-01T00:00Z", rs.getObject(1, ZonedDateTime.class).toString()); + assertEquals("+999999999-12-31T23:59:59.999999999Z", rs.getObject(2, ZonedDateTime.class).toString()); + assertEquals("-999999999-01-01T00:00+18:00", rs.getObject(3, ZonedDateTime.class).toString()); + assertEquals("+999999999-12-31T23:59:59.999999999-18:00", rs.getObject(4, ZonedDateTime.class).toString()); + assertEquals("-1000000000-01-01T00:00:00Z", rs.getObject(1, Instant.class).toString()); + assertEquals("+1000000000-12-31T23:59:59.999999999Z", rs.getObject(2, Instant.class).toString()); + assertEquals("-1000000000-01-01T00:00:00Z", rs.getObject(3, Instant.class).toString()); + assertEquals("+1000000000-12-31T23:59:59.999999999Z", rs.getObject(4, Instant.class).toString()); + + rs = stat.executeQuery("SELECT LOCALTIME, CURRENT_TIME"); + rs.next(); + assertEquals(rs.getObject(1, LocalTime.class), rs.getObject(2, LocalTime.class)); + assertEquals(rs.getObject(1, OffsetTime.class), rs.getObject(2, OffsetTime.class)); + rs = stat.executeQuery("SELECT LOCALTIMESTAMP, CURRENT_TIMESTAMP"); + rs.next(); + assertEquals(rs.getObject(1, LocalDateTime.class), rs.getObject(2, LocalDateTime.class)); + assertEquals(rs.getObject(1, OffsetDateTime.class), rs.getObject(2, OffsetDateTime.class)); } private void testDatetimeWithCalendar() throws SQLException { @@ -1078,11 +1501,11 @@ private void testDatetimeWithCalendar() throws SQLException { ResultSet rs; stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, " + - "D DATE, T TIME, TS TIMESTAMP)"); + "D DATE, T TIME, TS TIMESTAMP(9))"); PreparedStatement prep = conn.prepareStatement( "INSERT INTO TEST VALUES(?, ?, ?, ?)"); - Calendar regular = Calendar.getInstance(); - Calendar other = null; + GregorianCalendar regular = new GregorianCalendar(); + GregorianCalendar other = null; // search a locale that has a _different_ raw offset long testTime = java.sql.Date.valueOf("2001-02-03").getTime(); for (String s : TimeZone.getAvailableIDs()) { @@ -1094,7 +1517,7 @@ private void testDatetimeWithCalendar() throws SQLException { if (rawOffsetDiff != 0 && rawOffsetDiff != 1000 * 60 * 60 * 24) { if (regular.getTimeZone().getOffset(testTime) != zone.getOffset(testTime)) { - other = Calendar.getInstance(zone); + other = new GregorianCalendar(zone); break; } } @@ -1136,12 +1559,18 @@ private void testDatetimeWithCalendar() throws SQLException { java.sql.Timestamp.valueOf("2107-08-09 10:11:12.131415")); prep.execute(); + prep.setInt(1, 5); + prep.setDate(2, java.sql.Date.valueOf("2101-02-03"), null); + prep.setTime(3, java.sql.Time.valueOf("14:05:06"), null); + prep.setTimestamp(4, java.sql.Timestamp.valueOf("2107-08-09 10:11:12.131415"), null); + prep.execute(); + rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 4, new String[] { "ID", "D", "T", "TS" }, new int[] { Types.INTEGER, Types.DATE, Types.TIME, Types.TIMESTAMP }, - new int[] { 10, 8, 6, 23 }, new int[] { 0, 0, 0, 10 }); + new int[] { 32, 10, 8, 29 }, new int[] { 0, 0, 0, 9 }); rs.next(); assertEquals(0, rs.getInt(1)); @@ -1184,52 +1613,154 @@ private void testDatetimeWithCalendar() throws SQLException { assertEquals("14:05:06", rs.getTime("T").toString()); assertEquals("2101-02-03", rs.getDate("D").toString()); + rs.next(); + assertEquals(5, rs.getInt("ID")); + assertEquals("2107-08-09 10:11:12.131415", + rs.getTimestamp("TS").toString()); + assertEquals("14:05:06", rs.getTime("T").toString()); + assertEquals("2101-02-03", rs.getDate("D").toString()); + assertFalse(rs.next()); stat.execute("DROP TABLE TEST"); } + private void testInterval() throws SQLException { + trace("Test INTERVAL"); + ResultSet rs; + + rs = stat.executeQuery("CALL INTERVAL '10' YEAR"); + rs.next(); + assertEquals("INTERVAL '10' YEAR", rs.getString(1)); + Interval expected = new Interval(IntervalQualifier.YEAR, false, 10, 0); + assertEquals(expected, rs.getObject(1)); + assertEquals(expected, rs.getObject(1, Interval.class)); + ResultSetMetaData metaData = rs.getMetaData(); + assertEquals(Types.OTHER, metaData.getColumnType(1)); + assertEquals("INTERVAL YEAR", metaData.getColumnTypeName(1)); + assertEquals(Interval.class.getName(), metaData.getColumnClassName(1)); + assertEquals("INTERVAL '-111222333444555666' YEAR".length(), metaData.getColumnDisplaySize(1)); + // Intervals are not numbers + assertFalse(metaData.isSigned(1)); + } + + private void testInterval8() throws SQLException { + trace("Test INTERVAL 8"); + ResultSet rs; + + rs = stat.executeQuery("CALL INTERVAL '1-2' YEAR TO MONTH"); + rs.next(); + assertEquals("INTERVAL '1-2' YEAR TO MONTH", rs.getString(1)); + assertEquals(Period.of(1, 2, 0), rs.getObject(1, Period.class)); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(1, Duration.class); + + rs = stat.executeQuery("CALL INTERVAL '-3.1' SECOND"); + rs.next(); + assertEquals("INTERVAL '-3.1' SECOND", rs.getString(1)); + assertEquals(Duration.ofSeconds(-4, 900_000_000), rs.getObject(1, Duration.class)); + assertThrows(ErrorCode.DATA_CONVERSION_ERROR_1, rs).getObject(1, Period.class); + } + private void testBlob() throws SQLException { trace("Test BLOB"); ResultSet rs; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE BLOB)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" BLOB)"); stat.execute("INSERT INTO TEST VALUES(1,X'01010101')"); stat.execute("INSERT INTO TEST VALUES(2,X'02020202')"); stat.execute("INSERT INTO TEST VALUES(3,X'00')"); stat.execute("INSERT INTO TEST VALUES(4,X'ffffff')"); stat.execute("INSERT INTO TEST VALUES(5,X'0bcec1')"); - stat.execute("INSERT INTO TEST VALUES(6,NULL)"); + stat.execute("INSERT INTO TEST VALUES(6,X'03030303')"); + stat.execute("INSERT INTO TEST VALUES(7,NULL)"); + byte[] random = new byte[0x10000]; + MathUtils.randomBytes(random); + stat.execute("INSERT INTO TEST VALUES(8, X'" + StringUtils.convertBytesToHex(random) + "')"); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.BLOB }, new int[] { - 10, Integer.MAX_VALUE }, new int[] { 0, 0 }); + 32, Integer.MAX_VALUE }, new int[] { 0, 0 }); rs.next(); + assertEqualsWithNull(new byte[] { (byte) 0x01, (byte) 0x01, (byte) 0x01, (byte) 0x01 }, rs.getBytes(2)); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); + assertEqualsWithNull(new byte[] { (byte) 0x01, (byte) 0x01, + (byte) 0x01, (byte) 0x01 }, + rs.getObject(2, byte[].class)); + assertFalse(rs.wasNull()); rs.next(); + assertEqualsWithNull(new byte[] { (byte) 0x02, (byte) 0x02, (byte) 0x02, (byte) 0x02 }, rs.getBytes("value")); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); + assertEqualsWithNull(new byte[] { (byte) 0x02, (byte) 0x02, + (byte) 0x02, (byte) 0x02 }, + rs.getObject("value", byte[].class)); + assertFalse(rs.wasNull()); rs.next(); + assertEqualsWithNull(new byte[] { (byte) 0x00 }, readAllBytes(rs.getBinaryStream(2))); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); rs.next(); + assertEqualsWithNull(new byte[] { (byte) 0xff, (byte) 0xff, (byte) 0xff }, readAllBytes(rs.getBinaryStream("VaLuE"))); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); rs.next(); + InputStream in = rs.getBinaryStream("value"); byte[] b = readAllBytes(in); assertEqualsWithNull(new byte[] { (byte) 0x0b, (byte) 0xce, (byte) 0xc1 }, b); - assertTrue(!rs.wasNull()); + Blob blob = rs.getObject("value", Blob.class); + try { + assertNotNull(blob); + assertEqualsWithNull(new byte[] { (byte) 0x0b, (byte) 0xce, (byte) 0xc1 }, + readAllBytes(blob.getBinaryStream())); + assertEqualsWithNull(new byte[] { (byte) 0xce, + (byte) 0xc1 }, readAllBytes(blob.getBinaryStream(2, 2))); + assertFalse(rs.wasNull()); + } finally { + blob.free(); + } + assertFalse(rs.wasNull()); + rs.next(); + + blob = rs.getObject("value", Blob.class); + try { + assertNotNull(blob); + assertEqualsWithNull(new byte[] { (byte) 0x03, (byte) 0x03, + (byte) 0x03, (byte) 0x03 }, readAllBytes(blob.getBinaryStream())); + assertEqualsWithNull(new byte[] { (byte) 0x03, + (byte) 0x03 }, readAllBytes(blob.getBinaryStream(2, 2))); + assertFalse(rs.wasNull()); + assertThrows(ErrorCode.INVALID_VALUE_2, blob).getBinaryStream(5, 1); + } finally { + blob.free(); + } rs.next(); + assertEqualsWithNull(null, readAllBytes(rs.getBinaryStream("VaLuE"))); assertTrue(rs.wasNull()); - assertTrue(!rs.next()); + rs.next(); + + blob = rs.getObject("value", Blob.class); + try { + assertNotNull(blob); + assertEqualsWithNull(random, readAllBytes(blob.getBinaryStream())); + byte[] expected = Arrays.copyOfRange(random, 100, 50102); + byte[] got = readAllBytes(blob.getBinaryStream(101, 50002)); + assertEqualsWithNull(expected, got); + assertFalse(rs.wasNull()); + assertThrows(ErrorCode.INVALID_VALUE_2, blob).getBinaryStream(0x10001, 1); + assertThrows(ErrorCode.INVALID_VALUE_2, blob).getBinaryStream(0x10002, 0); + } finally { + blob.free(); + } + + assertFalse(rs.next()); stat.execute("DROP TABLE TEST"); } @@ -1239,66 +1770,90 @@ private void testClob() throws SQLException { String string; stat = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE CLOB)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,\"VALUE\" CLOB)"); stat.execute("INSERT INTO TEST VALUES(1,'Test')"); stat.execute("INSERT INTO TEST VALUES(2,'Hello')"); stat.execute("INSERT INTO TEST VALUES(3,'World!')"); stat.execute("INSERT INTO TEST VALUES(4,'Hallo')"); stat.execute("INSERT INTO TEST VALUES(5,'Welt!')"); - stat.execute("INSERT INTO TEST VALUES(6,NULL)"); + stat.execute("INSERT INTO TEST VALUES(6,'Test2')"); stat.execute("INSERT INTO TEST VALUES(7,NULL)"); + stat.execute("INSERT INTO TEST VALUES(8,NULL)"); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); assertResultSetMeta(rs, 2, new String[] { "ID", "VALUE" }, new int[] { Types.INTEGER, Types.CLOB }, new int[] { - 10, Integer.MAX_VALUE }, new int[] { 0, 0 }); + 32, Integer.MAX_VALUE }, new int[] { 0, 0 }); rs.next(); Object obj = rs.getObject(2); assertTrue(obj instanceof java.sql.Clob); string = rs.getString(2); assertTrue(string != null && string.equals("Test")); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); rs.next(); InputStreamReader reader = null; try { - reader = new InputStreamReader(rs.getAsciiStream(2), "ISO-8859-1"); + reader = new InputStreamReader(rs.getAsciiStream(2), StandardCharsets.ISO_8859_1); } catch (Exception e) { assertTrue(false); } string = readString(reader); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); trace(string); assertTrue(string != null && string.equals("Hello")); rs.next(); try { - reader = new InputStreamReader(rs.getAsciiStream("value"), "ISO-8859-1"); + reader = new InputStreamReader(rs.getAsciiStream("value"), StandardCharsets.ISO_8859_1); } catch (Exception e) { assertTrue(false); } string = readString(reader); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); trace(string); assertTrue(string != null && string.equals("World!")); rs.next(); + string = readString(rs.getCharacterStream(2)); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); trace(string); assertTrue(string != null && string.equals("Hallo")); + Clob clob = rs.getClob(2); + try { + assertEquals("all", readString(clob.getCharacterStream(2, 3))); + assertThrows(ErrorCode.INVALID_VALUE_2, clob).getCharacterStream(6, 1); + assertThrows(ErrorCode.INVALID_VALUE_2, clob).getCharacterStream(7, 0); + } finally { + clob.free(); + } rs.next(); + string = readString(rs.getCharacterStream("value")); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); trace(string); assertTrue(string != null && string.equals("Welt!")); rs.next(); - assertTrue(rs.getCharacterStream(2) == null); + + clob = rs.getObject("value", Clob.class); + try { + assertNotNull(clob); + string = readString(clob.getCharacterStream()); + assertTrue(string != null && string.equals("Test2")); + assertFalse(rs.wasNull()); + } finally { + clob.free(); + } + rs.next(); + + assertNull(rs.getCharacterStream(2)); assertTrue(rs.wasNull()); rs.next(); - assertTrue(rs.getAsciiStream("Value") == null); + + assertNull(rs.getAsciiStream("Value")); assertTrue(rs.wasNull()); assertTrue(rs.getStatement() == stat); - assertTrue(rs.getWarnings() == null); + assertNull(rs.getWarnings()); rs.clearWarnings(); - assertTrue(rs.getWarnings() == null); + assertNull(rs.getWarnings()); assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); assertEquals(ResultSet.CONCUR_UPDATABLE, rs.getConcurrency()); rs.next(); @@ -1308,19 +1863,23 @@ private void testClob() throws SQLException { private void testArray() throws SQLException { trace("Test ARRAY"); ResultSet rs; - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE ARRAY)"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, \"VALUE\" INTEGER ARRAY)"); PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?)"); prep.setInt(1, 1); - prep.setObject(2, new Object[] { new Integer(1), new Integer(2) }); + prep.setObject(2, new Object[] { 1, 2 }); prep.execute(); prep.setInt(1, 2); prep.setObject(2, new Object[] { 11, 12 }); prep.execute(); + prep.setInt(1, 3); + prep.setObject(2, new Object[0]); + prep.execute(); prep.close(); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); + assertEquals("INTEGER ARRAY", rs.getMetaData().getColumnTypeName(2)); rs.next(); assertEquals(1, rs.getInt(1)); - Object[] list = (Object[]) rs.getObject(2); + Object[] list = (Object[]) ((Array) rs.getObject(2)).getArray(); assertEquals(1, ((Integer) list[0]).intValue()); assertEquals(2, ((Integer) list[1]).intValue()); @@ -1330,9 +1889,10 @@ private void testArray() throws SQLException { assertEquals(2, ((Integer) list2[1]).intValue()); list2 = (Object[]) array.getArray(2, 1); assertEquals(2, ((Integer) list2[0]).intValue()); + rs.next(); assertEquals(2, rs.getInt(1)); - list = (Object[]) rs.getObject(2); + list = (Object[]) ((Array) rs.getObject(2)).getArray(); assertEquals(11, ((Integer) list[0]).intValue()); assertEquals(12, ((Integer) list[1]).intValue()); @@ -1343,13 +1903,35 @@ private void testArray() throws SQLException { list2 = (Object[]) array.getArray(2, 1); assertEquals(12, ((Integer) list2[0]).intValue()); - list2 = (Object[]) array.getArray(Collections.>emptyMap()); + list2 = (Object[]) array.getArray(Collections.emptyMap()); assertEquals(11, ((Integer) list2[0]).intValue()); - assertEquals(Types.NULL, array.getBaseType()); - assertEquals("NULL", array.getBaseTypeName()); + assertEquals(Types.INTEGER, array.getBaseType()); + assertEquals("INTEGER", array.getBaseTypeName()); + + assertTrue(array.toString().endsWith(": ARRAY [11, 12]")); + + rs.next(); + assertEquals(3, rs.getInt(1)); + list = (Object[]) ((Array) rs.getObject(2)).getArray(); + assertEquals(0, list.length); + + array = rs.getArray("VALUE"); + list2 = (Object[]) array.getArray(); + assertEquals(0, list2.length); + list2 = (Object[]) array.getArray(1, 0); + assertEquals(0, list2.length); + list2 = (Object[]) array.getArray(1, 1); + assertEquals(0, list2.length); + + list2 = (Object[]) array.getArray(Collections.emptyMap()); + assertEquals(0, list2.length); + + // TODO + // assertEquals(Types.INTEGER, array.getBaseType()); + // assertEquals("INTEGER", array.getBaseTypeName()); - assertTrue(array.toString().endsWith(": (11, 12)")); + assertTrue(array.toString().endsWith(": ARRAY []")); // free array.free(); @@ -1359,9 +1941,119 @@ private void testArray() throws SQLException { assertThrows(ErrorCode.OBJECT_CLOSED, array).getResultSet(); assertFalse(rs.next()); + + try (Statement s = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE)) { + rs = s.executeQuery("SELECT * FROM TEST ORDER BY ID"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + rs.updateArray(2, conn.createArrayOf("INT", new Object[] {10, 20})); + rs.updateRow(); + assertTrue(rs.next()); + rs.updateArray("VALUE", conn.createArrayOf("INT", new Object[] {11, 22})); + rs.updateRow(); + assertTrue(rs.next()); + assertFalse(rs.next()); + rs.moveToInsertRow(); + rs.updateInt(1, 4); + rs.updateArray(2, null); + rs.insertRow(); + } + + rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertEquals(new Object[] {10, 20}, (Object[]) ((Array) rs.getObject(2)).getArray()); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertEquals(new Object[] {11, 22}, (Object[]) ((Array) rs.getObject(2)).getArray()); + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + assertEquals(new Object[0], (Object[]) ((Array) rs.getObject(2)).getArray()); + assertTrue(rs.next()); + assertEquals(4, rs.getInt(1)); + assertNull(rs.getObject(2)); + assertFalse(rs.next()); + stat.execute("DROP TABLE TEST"); } + private void testRowValue() throws SQLException { + trace("Test ROW value"); + ResultSet rs; + rs = stat.executeQuery("SELECT (1, 'test')"); + assertEquals("ROW(\"C1\" INTEGER, \"C2\" CHARACTER VARYING(4))", rs.getMetaData().getColumnTypeName(1)); + rs.next(); + testRowValue((ResultSet) rs.getObject(1)); + ResultSet rowAsResultSet = rs.getObject(1, ResultSet.class); + testRowValue(rowAsResultSet); + } + + private void testRowValue(ResultSet rowAsResultSet) throws SQLException { + ResultSetMetaData md = rowAsResultSet.getMetaData(); + assertEquals(2, md.getColumnCount()); + assertEquals("C1", md.getColumnLabel(1)); + assertEquals("C1", md.getColumnName(1)); + assertEquals("C2", md.getColumnLabel(2)); + assertEquals("C2", md.getColumnName(2)); + assertEquals(Types.INTEGER, md.getColumnType(1)); + assertEquals(Types.VARCHAR, md.getColumnType(2)); + assertTrue(rowAsResultSet.next()); + assertEquals(1, rowAsResultSet.getInt(1)); + assertEquals("test", rowAsResultSet.getString(2)); + assertFalse(rowAsResultSet.next()); + } + + private void testEnum() throws SQLException { + trace("Test ENUM"); + + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, \"VALUE\" ENUM('A', 'B', 'C', 'D', 'E', 'F', 'G'))"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?)"); + prep.setInt(1, 1); + prep.setString(2, "A"); + prep.executeUpdate(); + prep.setInt(1, 2); + prep.setObject(2, "B"); + prep.executeUpdate(); + prep.setInt(1, 3); + prep.setInt(2, 3); + prep.executeUpdate(); + prep.setInt(1, 4); + prep.setObject(2, "D", Types.VARCHAR); + prep.executeUpdate(); + prep.setInt(1, 5); + prep.setObject(2, "E", Types.OTHER); + prep.executeUpdate(); + prep.setInt(1, 6); + prep.setObject(2, 6, Types.OTHER); + prep.executeUpdate(); + prep.setInt(1, 7); + prep.setObject(2, 7, Types.INTEGER); + prep.executeUpdate(); + + ResultSet rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); + assertEquals("ENUM('A', 'B', 'C', 'D', 'E', 'F', 'G')", rs.getMetaData().getColumnTypeName(2)); + testEnumResult(rs, 1, "A", 1); + testEnumResult(rs, 2, "B", 2); + testEnumResult(rs, 3, "C", 3); + testEnumResult(rs, 4, "D", 4); + testEnumResult(rs, 5, "E", 5); + testEnumResult(rs, 6, "F", 6); + testEnumResult(rs, 7, "G", 7); + assertFalse(rs.next()); + + stat.execute("DROP TABLE TEST"); + } + + private void testEnumResult(ResultSet rs, int id, String name, int ordinal) throws SQLException { + assertTrue(rs.next()); + assertEquals(id, rs.getInt(1)); + assertEquals(name, rs.getString(2)); + assertEquals(name, rs.getObject(2)); + assertEquals(name, rs.getObject(2, String.class)); + assertEquals(ordinal, rs.getInt(2)); + assertEquals((Integer) ordinal, rs.getObject(2, Integer.class)); + } + private byte[] readAllBytes(InputStream in) { if (in == null) { return null; @@ -1399,7 +2091,7 @@ private void checkColumnBigDecimal(ResultSet rs, int column, int i, assertTrue(rs.wasNull()); } else { trace("BigDecimal i=" + i + " bd=" + bd + " ; i1=" + i1 + " bd1=" + bd1); - assertTrue(!rs.wasNull()); + assertFalse(rs.wasNull()); assertTrue(i1 == i); assertTrue(bd1.compareTo(new BigDecimal(bd)) == 0); } diff --git a/h2/src/test/org/h2/test/jdbc/TestSQLXML.java b/h2/src/test/org/h2/test/jdbc/TestSQLXML.java new file mode 100644 index 0000000000..940b803570 --- /dev/null +++ b/h2/src/test/org/h2/test/jdbc/TestSQLXML.java @@ -0,0 +1,217 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.jdbc; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.StringReader; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.sql.Statement; + +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.stream.XMLStreamException; +import javax.xml.stream.XMLStreamReader; +import javax.xml.transform.Result; +import javax.xml.transform.Source; +import javax.xml.transform.TransformerConfigurationException; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMResult; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.sax.SAXResult; +import javax.xml.transform.sax.SAXSource; +import javax.xml.transform.stax.StAXResult; +import javax.xml.transform.stax.StAXSource; +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.stream.StreamSource; + +import org.h2.api.ErrorCode; +import org.h2.jdbc.JdbcConnection; +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.IOUtils; +import org.w3c.dom.Node; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; + +/** + * Test the SQLXML implementation. + */ +public class TestSQLXML extends TestDb { + private static final String XML = "Text"; + + private JdbcConnection conn; + private Statement stat; + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + deleteDb(getTestName()); + conn = (JdbcConnection) getConnection(getTestName()); + stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, X CLOB)"); + stat.execute("INSERT INTO TEST VALUES (1, NULL)"); + testGetters(); + testSetters(); + conn.close(); + deleteDb(getTestName()); + } + + private void testGetters() throws SQLException, IOException, XMLStreamException { + ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); + assertTrue(rs.next()); + assertNull(rs.getSQLXML(2)); + assertNull(rs.getSQLXML("X")); + assertEquals(1, stat.executeUpdate("UPDATE TEST SET X = '" + XML + '\'')); + rs = stat.executeQuery("SELECT * FROM TEST"); + assertTrue(rs.next()); + // ResultSet.getObject() + SQLXML sqlxml = rs.getObject(2, SQLXML.class); + assertEquals(XML, sqlxml.getString()); + + sqlxml = rs.getSQLXML(2); + // getBinaryStream() + assertEquals(XML, IOUtils.readStringAndClose(IOUtils.getReader(sqlxml.getBinaryStream()), -1)); + // getCharacterStream() + assertEquals(XML, IOUtils.readStringAndClose(sqlxml.getCharacterStream(), -1)); + // getString() + assertEquals(XML, sqlxml.getString()); + // DOMSource + DOMSource domSource = sqlxml.getSource(DOMSource.class); + Node n = domSource.getNode().getFirstChild(); + assertEquals("xml", n.getNodeName()); + assertEquals("v", n.getAttributes().getNamedItem("a").getNodeValue()); + assertEquals("Text", n.getFirstChild().getNodeValue()); + // SAXSource + SAXSource saxSource = sqlxml.getSource(SAXSource.class); + assertEquals(XML, + IOUtils.readStringAndClose(IOUtils.getReader(saxSource.getInputSource().getByteStream()), -1)); + // StAXSource + StAXSource staxSource = sqlxml.getSource(StAXSource.class); + XMLStreamReader stxReader = staxSource.getXMLStreamReader(); + assertEquals(XMLStreamReader.START_DOCUMENT, stxReader.getEventType()); + assertEquals(XMLStreamReader.START_ELEMENT, stxReader.next()); + assertEquals("xml", stxReader.getLocalName()); + assertEquals("a", stxReader.getAttributeLocalName(0)); + assertEquals("v", stxReader.getAttributeValue(0)); + assertEquals(XMLStreamReader.CHARACTERS, stxReader.next()); + assertEquals("Text", stxReader.getText()); + assertEquals(XMLStreamReader.END_ELEMENT, stxReader.next()); + assertEquals(XMLStreamReader.END_DOCUMENT, stxReader.next()); + // StreamSource + StreamSource streamSource = sqlxml.getSource(StreamSource.class); + assertEquals(XML, IOUtils.readStringAndClose(IOUtils.getReader(streamSource.getInputStream()), -1)); + // something illegal + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, sqlxml).getSource(Source.class); + } + + private void testSetters() throws SQLException, IOException, SAXException, ParserConfigurationException, + TransformerConfigurationException, TransformerException { + // setBinaryStream() + SQLXML sqlxml = conn.createSQLXML(); + try (OutputStream out = sqlxml.setBinaryStream()) { + out.write(XML.getBytes(StandardCharsets.UTF_8)); + } + testSettersImpl(sqlxml); + // setCharacterStream() + sqlxml = conn.createSQLXML(); + try (Writer out = sqlxml.setCharacterStream()) { + out.write(XML); + } + testSettersImpl(sqlxml); + // setString() + sqlxml = conn.createSQLXML(); + sqlxml.setString(XML); + testSettersImpl(sqlxml); + + TransformerFactory tf = TransformerFactory.newInstance(); + DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + DOMSource domSource = new DOMSource(dbf.newDocumentBuilder().parse(new InputSource(new StringReader(XML)))); + // DOMResult + sqlxml = conn.createSQLXML(); + tf.newTransformer().transform(domSource, sqlxml.setResult(DOMResult.class)); + testSettersImpl(sqlxml); + // SAXResult + sqlxml = conn.createSQLXML(); + tf.newTransformer().transform(domSource, sqlxml.setResult(SAXResult.class)); + testSettersImpl(sqlxml); + // StAXResult + sqlxml = conn.createSQLXML(); + tf.newTransformer().transform(domSource, sqlxml.setResult(StAXResult.class)); + testSettersImpl(sqlxml); + // StreamResult + sqlxml = conn.createSQLXML(); + tf.newTransformer().transform(domSource, sqlxml.setResult(StreamResult.class)); + testSettersImpl(sqlxml); + // something illegal + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, sqlxml).setResult(Result.class); + // null + testSettersImpl(null); + } + + private void assertXML(String actual) { + if (actual.startsWith("") + 2); + } + assertEquals(XML, actual); + } + + private void testSettersImplAssert(SQLXML sqlxml) throws SQLException { + ResultSet rs = stat.executeQuery("SELECT X FROM TEST"); + assertTrue(rs.next()); + SQLXML v = rs.getSQLXML(1); + if (sqlxml != null) { + assertXML(v.getString()); + } else { + assertNull(v); + } + } + + private void testSettersImpl(SQLXML sqlxml) throws SQLException { + PreparedStatement prep = conn.prepareStatement("UPDATE TEST SET X = ?"); + prep.setSQLXML(1, sqlxml); + assertEquals(1, prep.executeUpdate()); + testSettersImplAssert(sqlxml); + + prep.setObject(1, sqlxml); + assertEquals(1, prep.executeUpdate()); + testSettersImplAssert(sqlxml); + + Statement st = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("SELECT * FROM TEST FOR UPDATE"); + assertTrue(rs.next()); + rs.updateSQLXML(2, sqlxml); + rs.updateRow(); + testSettersImplAssert(sqlxml); + rs = st.executeQuery("SELECT * FROM TEST FOR UPDATE"); + assertTrue(rs.next()); + rs.updateSQLXML("X", sqlxml); + rs.updateRow(); + testSettersImplAssert(sqlxml); + + rs = st.executeQuery("SELECT * FROM TEST FOR UPDATE"); + assertTrue(rs.next()); + rs.updateObject(2, sqlxml); + rs.updateRow(); + testSettersImplAssert(sqlxml); + } + +} diff --git a/h2/src/test/org/h2/test/jdbc/TestStatement.java b/h2/src/test/org/h2/test/jdbc/TestStatement.java index fd69432b1f..658b68fcb5 100644 --- a/h2/src/test/org/h2/test/jdbc/TestStatement.java +++ b/h2/src/test/org/h2/test/jdbc/TestStatement.java @@ -1,30 +1,30 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.ResultSet; -import java.sql.SQLClientInfoException; import java.sql.SQLException; import java.sql.Savepoint; import java.sql.Statement; +import java.util.Arrays; import java.util.HashMap; -import java.util.Properties; import org.h2.api.ErrorCode; import org.h2.engine.SysProperties; import org.h2.jdbc.JdbcStatement; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.util.New; +import org.h2.test.TestDb; /** * Tests for the Statement implementation. */ -public class TestStatement extends TestBase { +public class TestStatement extends TestDb { private Connection conn; @@ -34,7 +34,7 @@ public class TestStatement extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -47,10 +47,13 @@ public void test() throws Exception { testSavepoint(); testConnectionRollback(); testStatement(); + testPreparedStatement(); + testCloseOnCompletion(); testIdentityMerge(); - testIdentity(); conn.close(); deleteDb("statement"); + testIdentifiers(); + deleteDb("statement"); } private void testUnwrap() throws SQLException { @@ -68,16 +71,11 @@ private void testUnwrap() throws SQLException { private void testUnsupportedOperations() throws Exception { conn.setTypeMap(null); - HashMap> map = New.hashMap(); + HashMap> map = new HashMap<>(); conn.setTypeMap(map); map.put("x", Object.class); assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, conn). setTypeMap(map); - - assertThrows(SQLClientInfoException.class, conn). - setClientInfo("X", "Y"); - assertThrows(SQLClientInfoException.class, conn). - setClientInfo(new Properties()); } private void testTraceError() throws Exception { @@ -207,13 +205,14 @@ private void testStatement() throws SQLException { assertEquals(ResultSet.CONCUR_READ_ONLY, stat2.getResultSetConcurrency()); assertEquals(0, stat.getMaxFieldSize()); - assertTrue(!((JdbcStatement) stat2).isClosed()); + assertFalse(stat2.isClosed()); stat2.close(); - assertTrue(((JdbcStatement) stat2).isClosed()); + assertTrue(stat2.isClosed()); ResultSet rs; int count; + long largeCount; boolean result; stat.execute("CREATE TABLE TEST(ID INT)"); @@ -239,28 +238,37 @@ private void testStatement() throws SQLException { assertTrue(stat.getQueryTimeout() == 0); trace("executeUpdate"); count = stat.executeUpdate( - "CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + "CREATE TABLE TEST(ID INT PRIMARY KEY,V VARCHAR(255))"); assertEquals(0, count); count = stat.executeUpdate( "INSERT INTO TEST VALUES(1,'Hello')"); assertEquals(1, count); count = stat.executeUpdate( - "INSERT INTO TEST(VALUE,ID) VALUES('JDBC',2)"); + "INSERT INTO TEST(V,ID) VALUES('JDBC',2)"); assertEquals(1, count); count = stat.executeUpdate( - "UPDATE TEST SET VALUE='LDBC' WHERE ID=2 OR ID=1"); + "UPDATE TEST SET V='LDBC' WHERE ID=2 OR ID=1"); assertEquals(2, count); count = stat.executeUpdate( - "UPDATE TEST SET VALUE='\\LDBC\\' WHERE VALUE LIKE 'LDBC' "); + "UPDATE TEST SET V='\\LDBC\\' WHERE V LIKE 'LDBC' "); assertEquals(2, count); count = stat.executeUpdate( - "UPDATE TEST SET VALUE='LDBC' WHERE VALUE LIKE '\\\\LDBC\\\\'"); + "UPDATE TEST SET V='LDBC' WHERE V LIKE '\\\\LDBC\\\\'"); trace("count:" + count); assertEquals(2, count); count = stat.executeUpdate("DELETE FROM TEST WHERE ID=-1"); assertEquals(0, count); count = stat.executeUpdate("DELETE FROM TEST WHERE ID=2"); assertEquals(1, count); + largeCount = stat.executeLargeUpdate("DELETE FROM TEST WHERE ID=-1"); + assertEquals(0, largeCount); + assertEquals(0, stat.getLargeUpdateCount()); + largeCount = stat.executeLargeUpdate("INSERT INTO TEST(V,ID) VALUES('JDBC',2)"); + assertEquals(1, largeCount); + assertEquals(1, stat.getLargeUpdateCount()); + largeCount = stat.executeLargeUpdate("DELETE FROM TEST WHERE ID=2"); + assertEquals(1, largeCount); + assertEquals(1, stat.getLargeUpdateCount()); assertThrows(ErrorCode.METHOD_NOT_ALLOWED_FOR_QUERY, stat). executeUpdate("SELECT * FROM TEST"); @@ -270,31 +278,31 @@ private void testStatement() throws SQLException { trace("execute"); result = stat.execute( - "CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); - assertTrue(!result); + "CREATE TABLE TEST(ID INT PRIMARY KEY,V VARCHAR(255))"); + assertFalse(result); result = stat.execute("INSERT INTO TEST VALUES(1,'Hello')"); - assertTrue(!result); - result = stat.execute("INSERT INTO TEST(VALUE,ID) VALUES('JDBC',2)"); - assertTrue(!result); - result = stat.execute("UPDATE TEST SET VALUE='LDBC' WHERE ID=2"); - assertTrue(!result); + assertFalse(result); + result = stat.execute("INSERT INTO TEST(V,ID) VALUES('JDBC',2)"); + assertFalse(result); + result = stat.execute("UPDATE TEST SET V='LDBC' WHERE ID=2"); + assertFalse(result); result = stat.execute("DELETE FROM TEST WHERE ID=3"); - assertTrue(!result); + assertFalse(result); result = stat.execute("SELECT * FROM TEST"); assertTrue(result); result = stat.execute("DROP TABLE TEST"); - assertTrue(!result); + assertFalse(result); assertThrows(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY, stat). - executeQuery("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + executeQuery("CREATE TABLE TEST(ID INT PRIMARY KEY,V VARCHAR(255))"); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,VALUE VARCHAR(255))"); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY,V VARCHAR(255))"); assertThrows(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY, stat). executeQuery("INSERT INTO TEST VALUES(1,'Hello')"); assertThrows(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY, stat). - executeQuery("UPDATE TEST SET VALUE='LDBC' WHERE ID=2"); + executeQuery("UPDATE TEST SET V='LDBC' WHERE ID=2"); assertThrows(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY, stat). executeQuery("DELETE FROM TEST WHERE ID=3"); @@ -316,91 +324,207 @@ private void testStatement() throws SQLException { stat.execute("DROP TABLE TEST"); stat.executeUpdate("DROP TABLE IF EXISTS TEST"); - assertTrue(stat.getWarnings() == null); + assertNull(stat.getWarnings()); stat.clearWarnings(); - assertTrue(stat.getWarnings() == null); + assertNull(stat.getWarnings()); assertTrue(conn == stat.getConnection()); stat.close(); } + private void testCloseOnCompletion() throws SQLException { + Statement stat = conn.createStatement(); + assertFalse(stat.isCloseOnCompletion()); + ResultSet rs = stat.executeQuery("VALUES 1"); + assertFalse(stat.isCloseOnCompletion()); + stat.closeOnCompletion(); + assertTrue(stat.isCloseOnCompletion()); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + rs.close(); + assertTrue(stat.isClosed()); + assertThrows(ErrorCode.OBJECT_CLOSED, stat).isCloseOnCompletion(); + assertThrows(ErrorCode.OBJECT_CLOSED, stat).closeOnCompletion(); + stat = conn.createStatement(); + stat.closeOnCompletion(); + rs = stat.executeQuery("VALUES 1"); + ResultSet rs2 = stat.executeQuery("VALUES 2"); + rs.close(); + assertFalse(stat.isClosed()); + rs2.close(); + assertTrue(stat.isClosed()); + } + private void testIdentityMerge() throws SQLException { Statement stat = conn.createStatement(); stat.execute("drop table if exists test1"); stat.execute("create table test1(id identity, x int)"); stat.execute("drop table if exists test2"); stat.execute("create table test2(id identity, x int)"); - stat.execute("merge into test1(x) key(x) values(5)"); + stat.execute("merge into test1(x) key(x) values(5)", + Statement.RETURN_GENERATED_KEYS); ResultSet keys; keys = stat.getGeneratedKeys(); keys.next(); assertEquals(1, keys.getInt(1)); stat.execute("insert into test2(x) values(10), (11), (12)"); - stat.execute("merge into test1(x) key(x) values(5)"); + stat.execute("merge into test1(x) key(x) values(5)", + Statement.RETURN_GENERATED_KEYS); keys = stat.getGeneratedKeys(); + keys.next(); + assertEquals(1, keys.getInt(1)); assertFalse(keys.next()); - stat.execute("merge into test1(x) key(x) values(6)"); + stat.execute("merge into test1(x) key(x) values(6)", + Statement.RETURN_GENERATED_KEYS); keys = stat.getGeneratedKeys(); keys.next(); assertEquals(2, keys.getInt(1)); stat.execute("drop table test1, test2"); } - private void testIdentity() throws SQLException { + private void testPreparedStatement() throws SQLException{ Statement stat = conn.createStatement(); - stat.execute("CREATE SEQUENCE SEQ"); - stat.execute("CREATE TABLE TEST(ID INT)"); - stat.execute("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)"); - ResultSet rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(1, rs.getInt(1)); + stat.execute("create table test(id int primary key, name varchar(255))"); + stat.execute("insert into test values(1, 'Hello')"); + stat.execute("insert into test values(2, 'World')"); + PreparedStatement ps = conn.prepareStatement( + "select name from test where id in (select id from test where name REGEXP ?)"); + ps.setString(1, "Hello"); + ResultSet rs = ps.executeQuery(); + assertTrue(rs.next()); + assertEquals("Hello", rs.getString("name")); assertFalse(rs.next()); - stat.execute("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - Statement.RETURN_GENERATED_KEYS); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(2, rs.getInt(1)); + ps.setString(1, "World"); + rs = ps.executeQuery(); + assertTrue(rs.next()); + assertEquals("World", rs.getString("name")); assertFalse(rs.next()); - stat.execute("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - new int[] { 1 }); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(3, rs.getInt(1)); + //Changes the table structure + stat.execute("create index t_id on test(name)"); + //Test the prepared statement again to check if the internal cache attributes were reset + ps.setString(1, "Hello"); + rs = ps.executeQuery(); + assertTrue(rs.next()); + assertEquals("Hello", rs.getString("name")); assertFalse(rs.next()); - stat.execute("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - new String[] { "ID" }); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(4, rs.getInt(1)); - assertFalse(rs.next()); - stat.executeUpdate("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - Statement.RETURN_GENERATED_KEYS); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(5, rs.getInt(1)); - assertFalse(rs.next()); - stat.executeUpdate("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - new int[] { 1 }); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(6, rs.getInt(1)); - assertFalse(rs.next()); - stat.executeUpdate("INSERT INTO TEST VALUES(NEXT VALUE FOR SEQ)", - new String[] { "ID" }); - rs = stat.getGeneratedKeys(); - rs.next(); - assertEquals(7, rs.getInt(1)); + ps.setString(1, "World"); + rs = ps.executeQuery(); + assertTrue(rs.next()); + assertEquals("World", rs.getString("name")); assertFalse(rs.next()); + ps = conn.prepareStatement("insert into test values(?, ?)"); + ps.setInt(1, 3); + ps.setString(2, "v3"); + ps.addBatch(); + ps.setInt(1, 4); + ps.setString(2, "v4"); + ps.addBatch(); + assertTrue(Arrays.equals(new int[] {1, 1}, ps.executeBatch())); + ps.setInt(1, 5); + ps.setString(2, "v5"); + ps.addBatch(); + ps.setInt(1, 6); + ps.setString(2, "v6"); + ps.addBatch(); + assertTrue(Arrays.equals(new long[] {1, 1}, ps.executeLargeBatch())); + ps.setInt(1, 7); + ps.setString(2, "v7"); + assertEquals(1, ps.executeUpdate()); + assertEquals(1, ps.getUpdateCount()); + ps.setInt(1, 8); + ps.setString(2, "v8"); + assertEquals(1, ps.executeLargeUpdate()); + assertEquals(1, ps.getLargeUpdateCount()); + stat.execute("drop table test"); + } - stat.execute("CREATE TABLE TEST2(ID identity primary key)"); - stat.execute("INSERT INTO TEST2 VALUES()"); - stat.execute("SET @X = IDENTITY()"); - rs = stat.executeQuery("SELECT @X"); - rs.next(); - assertEquals(1, rs.getInt(1)); + private void testIdentifiers() throws SQLException { + Connection conn = getConnection("statement"); + + JdbcStatement stat = (JdbcStatement) conn.createStatement(); + assertEquals("SOME_ID", stat.enquoteIdentifier("SOME_ID", false)); + assertEquals("\"SOME ID\"", stat.enquoteIdentifier("SOME ID", false)); + assertEquals("\"SOME_ID\"", stat.enquoteIdentifier("SOME_ID", true)); + assertEquals("\"FROM\"", stat.enquoteIdentifier("FROM", false)); + assertEquals("\"Test\"", stat.enquoteIdentifier("Test", false)); + assertEquals("\"test\"", stat.enquoteIdentifier("test", false)); + assertEquals("\"TOP\"", stat.enquoteIdentifier("TOP", false)); + assertEquals("\"Test\"", stat.enquoteIdentifier("\"Test\"", false)); + assertEquals("\"Test\"", stat.enquoteIdentifier("\"Test\"", true)); + assertEquals("\"\"\"Test\"", stat.enquoteIdentifier("\"\"\"Test\"", true)); + assertEquals("\"\"", stat.enquoteIdentifier("", false)); + assertEquals("\"\"", stat.enquoteIdentifier("", true)); + assertEquals("U&\"\"", stat.enquoteIdentifier("U&\"\"", false)); + assertEquals("U&\"\"", stat.enquoteIdentifier("U&\"\"", true)); + assertEquals("U&\"\0100\"", stat.enquoteIdentifier("U&\"\0100\"", false)); + assertEquals("U&\"\0100\"", stat.enquoteIdentifier("U&\"\0100\"", true)); + assertThrows(NullPointerException.class, () -> stat.enquoteIdentifier(null, false)); + assertThrows(ErrorCode.INVALID_NAME_1, () -> stat.enquoteIdentifier("\"Test", true)); + assertThrows(ErrorCode.INVALID_NAME_1, () -> stat.enquoteIdentifier("\"a\"a\"", true)); + assertThrows(ErrorCode.INVALID_NAME_1, () -> stat.enquoteIdentifier("U&\"a\"a\"", true)); + assertThrows(ErrorCode.STRING_FORMAT_ERROR_1, () -> stat.enquoteIdentifier("U&\"\\111\"", true)); + assertEquals("U&\"\\02b0\"", stat.enquoteIdentifier("\u02B0", false)); + + assertTrue(stat.isSimpleIdentifier("SOME_ID_1")); + assertFalse(stat.isSimpleIdentifier("SOME ID")); + assertFalse(stat.isSimpleIdentifier("FROM")); + assertFalse(stat.isSimpleIdentifier("Test")); + assertFalse(stat.isSimpleIdentifier("test")); + assertFalse(stat.isSimpleIdentifier("TOP")); + assertFalse(stat.isSimpleIdentifier("_")); + assertFalse(stat.isSimpleIdentifier("_1")); + assertFalse(stat.isSimpleIdentifier("\u02B0")); - stat.execute("DROP TABLE TEST"); - stat.execute("DROP TABLE TEST2"); + conn.close(); + deleteDb("statement"); + conn = getConnection("statement;DATABASE_TO_LOWER=TRUE"); + + JdbcStatement stat2 = (JdbcStatement) conn.createStatement(); + assertEquals("some_id", stat2.enquoteIdentifier("some_id", false)); + assertEquals("\"some id\"", stat2.enquoteIdentifier("some id", false)); + assertEquals("\"some_id\"", stat2.enquoteIdentifier("some_id", true)); + assertEquals("\"from\"", stat2.enquoteIdentifier("from", false)); + assertEquals("\"Test\"", stat2.enquoteIdentifier("Test", false)); + assertEquals("\"TEST\"", stat2.enquoteIdentifier("TEST", false)); + assertEquals("\"top\"", stat2.enquoteIdentifier("top", false)); + + assertTrue(stat2.isSimpleIdentifier("some_id")); + assertFalse(stat2.isSimpleIdentifier("some id")); + assertFalse(stat2.isSimpleIdentifier("from")); + assertFalse(stat2.isSimpleIdentifier("Test")); + assertFalse(stat2.isSimpleIdentifier("TEST")); + assertFalse(stat2.isSimpleIdentifier("top")); + + conn.close(); + deleteDb("statement"); + conn = getConnection("statement;DATABASE_TO_UPPER=FALSE"); + + JdbcStatement stat3 = (JdbcStatement) conn.createStatement(); + assertEquals("SOME_ID", stat3.enquoteIdentifier("SOME_ID", false)); + assertEquals("some_id", stat3.enquoteIdentifier("some_id", false)); + assertEquals("\"SOME ID\"", stat3.enquoteIdentifier("SOME ID", false)); + assertEquals("\"some id\"", stat3.enquoteIdentifier("some id", false)); + assertEquals("\"SOME_ID\"", stat3.enquoteIdentifier("SOME_ID", true)); + assertEquals("\"some_id\"", stat3.enquoteIdentifier("some_id", true)); + assertEquals("\"FROM\"", stat3.enquoteIdentifier("FROM", false)); + assertEquals("\"from\"", stat3.enquoteIdentifier("from", false)); + assertEquals("Test", stat3.enquoteIdentifier("Test", false)); + assertEquals("\"TOP\"", stat3.enquoteIdentifier("TOP", false)); + assertEquals("\"top\"", stat3.enquoteIdentifier("top", false)); + + assertTrue(stat3.isSimpleIdentifier("SOME_ID")); + assertTrue(stat3.isSimpleIdentifier("some_id")); + assertFalse(stat3.isSimpleIdentifier("SOME ID")); + assertFalse(stat3.isSimpleIdentifier("some id")); + assertFalse(stat3.isSimpleIdentifier("FROM")); + assertFalse(stat3.isSimpleIdentifier("from")); + assertTrue(stat3.isSimpleIdentifier("Test")); + assertFalse(stat3.isSimpleIdentifier("TOP")); + assertFalse(stat3.isSimpleIdentifier("top")); + assertThrows(NullPointerException.class, () -> stat3.isSimpleIdentifier(null)); + + conn.close(); } } diff --git a/h2/src/test/org/h2/test/jdbc/TestTransactionIsolation.java b/h2/src/test/org/h2/test/jdbc/TestTransactionIsolation.java index 3c0d8f0500..234bad5c8f 100644 --- a/h2/src/test/org/h2/test/jdbc/TestTransactionIsolation.java +++ b/h2/src/test/org/h2/test/jdbc/TestTransactionIsolation.java @@ -1,20 +1,21 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; import java.sql.Connection; import java.sql.SQLException; - +import java.sql.Statement; import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Transaction isolation level tests. */ -public class TestTransactionIsolation extends TestBase { +public class TestTransactionIsolation extends TestDb { private Connection conn1, conn2; @@ -24,82 +25,87 @@ public class TestTransactionIsolation extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws SQLException { - if (config.mvcc) { - // no tests yet - } else { - testTableLevelLocking(); - } + testTableLevelLocking(); } private void testTableLevelLocking() throws SQLException { deleteDb("transactionIsolation"); + conn1 = getConnection("transactionIsolation"); - assertEquals(Connection.TRANSACTION_READ_COMMITTED, - conn1.getTransactionIsolation()); - conn1.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - assertEquals(Connection.TRANSACTION_SERIALIZABLE, - conn1.getTransactionIsolation()); - conn1.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); - assertEquals(Connection.TRANSACTION_READ_UNCOMMITTED, - conn1.getTransactionIsolation()); - assertSingleValue(conn1.createStatement(), "CALL LOCK_MODE()", 0); - conn1.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - assertSingleValue(conn1.createStatement(), "CALL LOCK_MODE()", 3); - assertEquals(Connection.TRANSACTION_READ_COMMITTED, - conn1.getTransactionIsolation()); - conn1.createStatement().execute("SET LOCK_MODE 1"); - assertEquals(Connection.TRANSACTION_SERIALIZABLE, - conn1.getTransactionIsolation()); - conn1.createStatement().execute("CREATE TABLE TEST(ID INT)"); - conn1.createStatement().execute("INSERT INTO TEST VALUES(1)"); conn1.setAutoCommit(false); conn2 = getConnection("transactionIsolation"); conn2.setAutoCommit(false); - conn1.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + assertEquals(Connection.TRANSACTION_READ_COMMITTED, conn1.getMetaData().getDefaultTransactionIsolation()); + assertEquals(Connection.TRANSACTION_READ_COMMITTED, conn1.getTransactionIsolation()); - // serializable: just reading - assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 1); - assertSingleValue(conn2.createStatement(), "SELECT * FROM TEST", 1); - conn1.commit(); - conn2.commit(); + try (Connection conn = getConnection("transactionIsolation"); + Statement stmt = conn.createStatement()) { + stmt.execute("CREATE TABLE TEST(ID INT)"); + } + testIt(Connection.TRANSACTION_READ_UNCOMMITTED); + testIt(Connection.TRANSACTION_READ_COMMITTED); + testIt(Connection.TRANSACTION_REPEATABLE_READ); + testIt(Connection.TRANSACTION_SERIALIZABLE); + + try (Connection conn = getConnection("transactionIsolation"); + Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE TEST"); + stmt.execute("CREATE TABLE TEST(ID INT UNIQUE)"); + } + testIt(Connection.TRANSACTION_READ_UNCOMMITTED); + testIt(Connection.TRANSACTION_READ_COMMITTED); + testIt(Connection.TRANSACTION_REPEATABLE_READ); + testIt(Connection.TRANSACTION_SERIALIZABLE); - // serializable: write lock - conn1.createStatement().executeUpdate("UPDATE TEST SET ID=2"); - assertThrows(ErrorCode.LOCK_TIMEOUT_1, conn2.createStatement()). - executeQuery("SELECT * FROM TEST"); - conn1.commit(); - conn2.commit(); + conn2.close(); + conn1.close(); + deleteDb("transactionIsolation"); + } - conn1.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + private void testIt(int isolationLevel2) throws SQLException { + try (Connection conn = getConnection("transactionIsolation"); + Statement stmt = conn.createStatement()) { + stmt.execute("DELETE FROM TEST"); + stmt.execute("INSERT INTO TEST VALUES(1)"); + } - // read-committed: #1 read, #2 update, #1 read again - assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 2); - conn2.createStatement().executeUpdate("UPDATE TEST SET ID=3"); - conn2.commit(); - assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 3); - conn1.commit(); - - // read-committed: #1 read, #2 read, #2 update, #1 delete - assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 3); - assertSingleValue(conn2.createStatement(), "SELECT * FROM TEST", 3); - conn2.createStatement().executeUpdate("UPDATE TEST SET ID=4"); - assertThrows(ErrorCode.LOCK_TIMEOUT_1, conn1.createStatement()). - executeUpdate("DELETE FROM TEST"); - conn2.commit(); - conn1.commit(); - assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 4); - assertSingleValue(conn2.createStatement(), "SELECT * FROM TEST", 4); + conn2.setTransactionIsolation(isolationLevel2); + assertEquals(isolationLevel2, conn2.getTransactionIsolation()); - conn1.close(); - conn2.close(); - deleteDb("transactionIsolation"); + testRowLocks(Connection.TRANSACTION_READ_UNCOMMITTED); + testRowLocks(Connection.TRANSACTION_READ_COMMITTED); + testRowLocks(Connection.TRANSACTION_REPEATABLE_READ); + testRowLocks(Connection.TRANSACTION_SERIALIZABLE); + + testDirtyRead(Connection.TRANSACTION_READ_UNCOMMITTED, 1, true, true); + testDirtyRead(Connection.TRANSACTION_READ_COMMITTED, 2, false, true); + testDirtyRead(Connection.TRANSACTION_REPEATABLE_READ, 3, false, false); + testDirtyRead(Connection.TRANSACTION_SERIALIZABLE, 4, false, false); } + private void testDirtyRead(int isolationLevel, int value, boolean dirtyVisible, boolean committedVisible) + throws SQLException { + conn1.setTransactionIsolation(isolationLevel); + assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", value); + int newValue = value + 1; + conn2.createStatement().executeUpdate("UPDATE TEST SET ID=" + newValue); + assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", dirtyVisible ? newValue : value); + conn2.commit(); + assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", committedVisible ? newValue : value); + } + + private void testRowLocks(int isolationLevel) throws SQLException { + conn1.setTransactionIsolation(isolationLevel); + assertSingleValue(conn1.createStatement(), "SELECT * FROM TEST", 1); + assertSingleValue(conn2.createStatement(), "SELECT * FROM TEST FOR UPDATE", 1); + assertThrows(ErrorCode.LOCK_TIMEOUT_1, conn1.createStatement()).executeUpdate("DELETE FROM TEST"); + conn2.commit(); + } } diff --git a/h2/src/test/org/h2/test/jdbc/TestUpdatableResultSet.java b/h2/src/test/org/h2/test/jdbc/TestUpdatableResultSet.java index ac3873908e..217232db0c 100644 --- a/h2/src/test/org/h2/test/jdbc/TestUpdatableResultSet.java +++ b/h2/src/test/org/h2/test/jdbc/TestUpdatableResultSet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -12,6 +12,7 @@ import java.sql.Blob; import java.sql.Connection; import java.sql.Date; +import java.sql.JDBCType; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; @@ -20,14 +21,20 @@ import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; import org.h2.api.ErrorCode; +import org.h2.api.H2Type; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Updatable result set tests. */ -public class TestUpdatableResultSet extends TestBase { +public class TestUpdatableResultSet extends TestDb { /** * Run just this test. @@ -35,7 +42,7 @@ public class TestUpdatableResultSet extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -46,6 +53,7 @@ public void test() throws Exception { testUpdateDeleteInsert(); testUpdateDataType(); testUpdateResetRead(); + testUpdateObject(); deleteDb("updatableResultSet"); } @@ -63,6 +71,8 @@ private void testDetectUpdatable() throws SQLException { rs = stat.executeQuery("select name from test"); assertEquals(ResultSet.CONCUR_READ_ONLY, rs.getConcurrency()); stat.execute("drop table test"); + rs = stat.executeQuery("SELECT"); + assertEquals(ResultSet.CONCUR_READ_ONLY, rs.getConcurrency()); stat.execute("create table test(a int, b int, " + "name varchar, primary key(a, b))"); @@ -266,8 +276,13 @@ private void testScroll() throws SQLException { assertTrue(rs.absolute(3)); assertEquals(3, rs.getRow()); - assertTrue(rs.absolute(-1)); - assertEquals(3, rs.getRow()); + if (!config.lazy) { + assertTrue(rs.absolute(-1)); + assertEquals(3, rs.getRow()); + + assertTrue(rs.absolute(-2)); + assertEquals(2, rs.getRow()); + } assertFalse(rs.absolute(4)); assertTrue(rs.isAfterLast()); @@ -290,27 +305,30 @@ private void testUpdateDataType() throws Exception { Statement stat = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255), " - + "DEC DECIMAL(10,2), BOO BIT, BYE TINYINT, BIN BINARY(100), " - + "D DATE, T TIME, TS TIMESTAMP, DB DOUBLE, R REAL, L BIGINT, " + + "DEC DECIMAL(10,2), BOO BIT, BYE TINYINT, BIN VARBINARY(100), " + + "D DATE, T TIME, TS TIMESTAMP(9), TSTZ TIMESTAMP(9) WITH TIME ZONE, DB DOUBLE, R REAL, L BIGINT, " + "O_I INT, SH SMALLINT, CL CLOB, BL BLOB)"); + final int clobIndex = 16, blobIndex = 17; ResultSet rs = stat.executeQuery("SELECT * FROM TEST"); ResultSetMetaData meta = rs.getMetaData(); - assertEquals("java.lang.Integer", meta.getColumnClassName(1)); - assertEquals("java.lang.String", meta.getColumnClassName(2)); - assertEquals("java.math.BigDecimal", meta.getColumnClassName(3)); - assertEquals("java.lang.Boolean", meta.getColumnClassName(4)); - assertEquals("java.lang.Byte", meta.getColumnClassName(5)); - assertEquals("[B", meta.getColumnClassName(6)); - assertEquals("java.sql.Date", meta.getColumnClassName(7)); - assertEquals("java.sql.Time", meta.getColumnClassName(8)); - assertEquals("java.sql.Timestamp", meta.getColumnClassName(9)); - assertEquals("java.lang.Double", meta.getColumnClassName(10)); - assertEquals("java.lang.Float", meta.getColumnClassName(11)); - assertEquals("java.lang.Long", meta.getColumnClassName(12)); - assertEquals("java.lang.Integer", meta.getColumnClassName(13)); - assertEquals("java.lang.Short", meta.getColumnClassName(14)); - assertEquals("java.sql.Clob", meta.getColumnClassName(15)); - assertEquals("java.sql.Blob", meta.getColumnClassName(16)); + int c = 0; + assertEquals("java.lang.Integer", meta.getColumnClassName(++c)); + assertEquals("java.lang.String", meta.getColumnClassName(++c)); + assertEquals("java.math.BigDecimal", meta.getColumnClassName(++c)); + assertEquals("java.lang.Boolean", meta.getColumnClassName(++c)); + assertEquals("java.lang.Integer", meta.getColumnClassName(++c)); + assertEquals("[B", meta.getColumnClassName(++c)); + assertEquals("java.sql.Date", meta.getColumnClassName(++c)); + assertEquals("java.sql.Time", meta.getColumnClassName(++c)); + assertEquals("java.sql.Timestamp", meta.getColumnClassName(++c)); + assertEquals("java.time.OffsetDateTime", meta.getColumnClassName(++c)); + assertEquals("java.lang.Double", meta.getColumnClassName(++c)); + assertEquals("java.lang.Float", meta.getColumnClassName(++c)); + assertEquals("java.lang.Long", meta.getColumnClassName(++c)); + assertEquals("java.lang.Integer", meta.getColumnClassName(++c)); + assertEquals("java.lang.Integer", meta.getColumnClassName(++c)); + assertEquals("java.sql.Clob", meta.getColumnClassName(++c)); + assertEquals("java.sql.Blob", meta.getColumnClassName(++c)); rs.moveToInsertRow(); rs.updateInt(1, 0); rs.updateNull(2); @@ -320,22 +338,24 @@ private void testUpdateDataType() throws Exception { rs.insertRow(); rs.moveToInsertRow(); - rs.updateInt(1, 1); - rs.updateString(2, null); - rs.updateBigDecimal(3, null); - rs.updateBoolean(4, false); - rs.updateByte(5, (byte) 0); - rs.updateBytes(6, null); - rs.updateDate(7, null); - rs.updateTime(8, null); - rs.updateTimestamp(9, null); - rs.updateDouble(10, 0.0); - rs.updateFloat(11, (float) 0.0); - rs.updateLong(12, 0L); - rs.updateObject(13, null); - rs.updateShort(14, (short) 0); - rs.updateCharacterStream(15, new StringReader("test"), 0); - rs.updateBinaryStream(16, + c = 0; + rs.updateInt(++c, 1); + rs.updateString(++c, null); + rs.updateBigDecimal(++c, null); + rs.updateBoolean(++c, false); + rs.updateByte(++c, (byte) 0); + rs.updateBytes(++c, null); + rs.updateDate(++c, null); + rs.updateTime(++c, null); + rs.updateTimestamp(++c, null); + rs.updateObject(++c, null); + rs.updateDouble(++c, 0.0); + rs.updateFloat(++c, 0.0f); + rs.updateLong(++c, 0L); + rs.updateObject(++c, null); + rs.updateShort(++c, (short) 0); + rs.updateCharacterStream(++c, new StringReader("test"), 0); + rs.updateBinaryStream(++c, new ByteArrayInputStream(new byte[] { (byte) 0xff, 0x00 }), 0); rs.insertRow(); @@ -350,8 +370,10 @@ private void testUpdateDataType() throws Exception { rs.updateTime("T", Time.valueOf("21:46:28")); rs.updateTimestamp("TS", Timestamp.valueOf("2005-09-21 21:47:09.567890123")); + rs.updateObject("TSTZ", OffsetDateTime.of(LocalDate.of(2005, 9, 21), + LocalTime.ofNanoOfDay(81_189_123_456_789L), ZoneOffset.ofHours(1))); rs.updateDouble("DB", 1.725); - rs.updateFloat("R", (float) 2.5); + rs.updateFloat("R", 2.5f); rs.updateLong("L", Long.MAX_VALUE); rs.updateObject("O_I", 10); rs.updateShort("SH", Short.MIN_VALUE); @@ -370,8 +392,8 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 4); - rs.updateCharacterStream(15, new StringReader("\u00ef\u00f6\u00fc")); - rs.updateBinaryStream(16, + rs.updateCharacterStream(clobIndex, new StringReader("\u00ef\u00f6\u00fc")); + rs.updateBinaryStream(blobIndex, new ByteArrayInputStream(new byte[] { (byte) 0xab, 0x12 })); rs.insertRow(); @@ -384,8 +406,8 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 6); - rs.updateClob(15, new StringReader("\u00ef\u00f6\u00fc")); - rs.updateBlob(16, + rs.updateClob(clobIndex, new StringReader("\u00ef\u00f6\u00fc")); + rs.updateBlob(blobIndex, new ByteArrayInputStream(new byte[] { (byte) 0xab, 0x12 })); rs.insertRow(); @@ -401,8 +423,8 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 8); - rs.updateNClob(15, new StringReader("\u00ef\u00f6\u00fc")); - rs.updateBlob(16, b); + rs.updateNClob(clobIndex, new StringReader("\u00ef\u00f6\u00fc")); + rs.updateBlob(blobIndex, b); rs.insertRow(); rs.moveToInsertRow(); @@ -413,8 +435,8 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 10); - rs.updateNClob(15, new StringReader("\u00ef\u00f6\u00fc"), -1); - rs.updateBlob(16, b); + rs.updateNClob(clobIndex, new StringReader("\u00ef\u00f6\u00fc"), -1); + rs.updateBlob(blobIndex, b); rs.insertRow(); rs.moveToInsertRow(); @@ -426,9 +448,9 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 12); - rs.updateNCharacterStream(15, + rs.updateNCharacterStream(clobIndex, new StringReader("\u00ef\u00f6\u00fc"), -1); - rs.updateBlob(16, b); + rs.updateBlob(blobIndex, b); rs.insertRow(); rs.moveToInsertRow(); @@ -440,75 +462,121 @@ private void testUpdateDataType() throws Exception { rs.moveToInsertRow(); rs.updateInt("ID", 14); - rs.updateNCharacterStream(15, + rs.updateNCharacterStream(clobIndex, new StringReader("\u00ef\u00f6\u00fc")); - rs.updateBlob(16, b); + rs.updateBlob(blobIndex, b); rs.insertRow(); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID NULLS FIRST"); rs.next(); - assertTrue(rs.getInt(1) == 0); - assertTrue(rs.getString(2) == null && rs.wasNull()); - assertTrue(rs.getBigDecimal(3) == null && rs.wasNull()); - assertTrue(!rs.getBoolean(4) && rs.wasNull()); - assertTrue(rs.getByte(5) == 0 && rs.wasNull()); - assertTrue(rs.getBytes(6) == null && rs.wasNull()); - assertTrue(rs.getDate(7) == null && rs.wasNull()); - assertTrue(rs.getTime(8) == null && rs.wasNull()); - assertTrue(rs.getTimestamp(9) == null && rs.wasNull()); - assertTrue(rs.getDouble(10) == 0.0 && rs.wasNull()); - assertTrue(rs.getFloat(11) == 0.0 && rs.wasNull()); - assertTrue(rs.getLong(12) == 0 && rs.wasNull()); - assertTrue(rs.getObject(13) == null && rs.wasNull()); - assertTrue(rs.getShort(14) == 0 && rs.wasNull()); - assertTrue(rs.getCharacterStream(15) == null && rs.wasNull()); - assertTrue(rs.getBinaryStream(16) == null && rs.wasNull()); + c = 0; + assertTrue(rs.getInt(++c) == 0); + assertTrue(rs.getString(++c) == null && rs.wasNull()); + assertTrue(rs.getBigDecimal(++c) == null && rs.wasNull()); + assertTrue(!rs.getBoolean(++c) && rs.wasNull()); + assertTrue(rs.getByte(++c) == 0 && rs.wasNull()); + assertTrue(rs.getBytes(++c) == null && rs.wasNull()); + assertTrue(rs.getDate(++c) == null && rs.wasNull()); + assertTrue(rs.getTime(++c) == null && rs.wasNull()); + assertTrue(rs.getTimestamp(++c) == null && rs.wasNull()); + assertTrue(rs.getDouble(++c) == 0.0 && rs.wasNull()); + assertTrue(rs.getFloat(++c) == 0.0 && rs.wasNull()); + assertTrue(rs.getLong(++c) == 0 && rs.wasNull()); + assertTrue(rs.getObject(++c) == null && rs.wasNull()); + assertTrue(rs.getShort(++c) == 0 && rs.wasNull()); + assertTrue(rs.getCharacterStream(++c) == null && rs.wasNull()); + assertTrue(rs.getBinaryStream(++c) == null && rs.wasNull()); rs.next(); - assertTrue(rs.getInt(1) == 1); - assertTrue(rs.getString(2) == null && rs.wasNull()); - assertTrue(rs.getBigDecimal(3) == null && rs.wasNull()); - assertTrue(!rs.getBoolean(4) && !rs.wasNull()); - assertTrue(rs.getByte(5) == 0 && !rs.wasNull()); - assertTrue(rs.getBytes(6) == null && rs.wasNull()); - assertTrue(rs.getDate(7) == null && rs.wasNull()); - assertTrue(rs.getTime(8) == null && rs.wasNull()); - assertTrue(rs.getTimestamp(9) == null && rs.wasNull()); - assertTrue(rs.getDouble(10) == 0.0 && !rs.wasNull()); - assertTrue(rs.getFloat(11) == 0.0 && !rs.wasNull()); - assertTrue(rs.getLong(12) == 0 && !rs.wasNull()); - assertTrue(rs.getObject(13) == null && rs.wasNull()); - assertTrue(rs.getShort(14) == 0 && !rs.wasNull()); - assertEquals("test", rs.getString(15)); - assertEquals(new byte[] { (byte) 0xff, 0x00 }, rs.getBytes(16)); + c = 0; + assertTrue(rs.getInt(++c) == 1); + assertTrue(rs.getString(++c) == null && rs.wasNull()); + assertTrue(rs.getBigDecimal(++c) == null && rs.wasNull()); + assertTrue(!rs.getBoolean(++c) && !rs.wasNull()); + assertTrue(rs.getByte(++c) == 0 && !rs.wasNull()); + assertTrue(rs.getBytes(++c) == null && rs.wasNull()); + assertTrue(rs.getDate(++c) == null && rs.wasNull()); + assertTrue(rs.getTime(++c) == null && rs.wasNull()); + assertTrue(rs.getTimestamp(++c) == null && rs.wasNull()); + assertTrue(rs.getObject(++c) == null && rs.wasNull()); + assertTrue(rs.getDouble(++c) == 0.0 && !rs.wasNull()); + assertTrue(rs.getFloat(++c) == 0.0 && !rs.wasNull()); + assertTrue(rs.getLong(++c) == 0 && !rs.wasNull()); + assertTrue(rs.getObject(++c) == null && rs.wasNull()); + assertTrue(rs.getShort(++c) == 0 && !rs.wasNull()); + assertEquals("test", rs.getString(++c)); + assertEquals(new byte[] { (byte) 0xff, 0x00 }, rs.getBytes(++c)); rs.next(); - assertTrue(rs.getInt(1) == 2); - assertEquals("+", rs.getString(2)); - assertEquals("1.20", rs.getBigDecimal(3).toString()); - assertTrue(rs.getBoolean(4)); - assertTrue((rs.getByte(5) & 0xff) == 0xff); - assertEquals(new byte[] { 0x00, (byte) 0xff }, rs.getBytes(6)); - assertEquals("2005-09-21", rs.getDate(7).toString()); - assertEquals("21:46:28", rs.getTime(8).toString()); - assertEquals("2005-09-21 21:47:09.567890123", rs.getTimestamp(9).toString()); - assertTrue(rs.getDouble(10) == 1.725); - assertTrue(rs.getFloat(11) == (float) 2.5); - assertTrue(rs.getLong(12) == Long.MAX_VALUE); - assertEquals(10, ((Integer) rs.getObject(13)).intValue()); - assertTrue(rs.getShort(14) == Short.MIN_VALUE); + c = 0; + assertTrue(rs.getInt(++c) == 2); + assertEquals("+", rs.getString(++c)); + assertEquals("1.20", rs.getBigDecimal(++c).toString()); + assertTrue(rs.getBoolean(++c)); + assertTrue((rs.getByte(++c) & 0xff) == 0xff); + assertEquals(new byte[] { 0x00, (byte) 0xff }, rs.getBytes(++c)); + assertEquals("2005-09-21", rs.getDate(++c).toString()); + assertEquals("21:46:28", rs.getTime(++c).toString()); + assertEquals("2005-09-21 21:47:09.567890123", rs.getTimestamp(++c).toString()); + assertEquals("2005-09-21T22:33:09.123456789+01:00", rs.getObject(++c).toString()); + assertTrue(rs.getDouble(++c) == 1.725); + assertTrue(rs.getFloat(++c) == 2.5f); + assertTrue(rs.getLong(++c) == Long.MAX_VALUE); + assertEquals(10, ((Integer) rs.getObject(++c)).intValue()); + assertTrue(rs.getShort(++c) == Short.MIN_VALUE); // auml ouml uuml - assertEquals("\u00ef\u00f6\u00fc", rs.getString(15)); - assertEquals(new byte[] { (byte) 0xab, 0x12 }, rs.getBytes(16)); + assertEquals("\u00ef\u00f6\u00fc", rs.getString(++c)); + assertEquals(new byte[] { (byte) 0xab, 0x12 }, rs.getBytes(++c)); + c = 1; + rs.updateString(++c, "-"); + rs.updateBigDecimal(++c, new BigDecimal("1.30")); + rs.updateBoolean(++c, false); + rs.updateByte(++c, (byte) 0x55); + rs.updateBytes(++c, new byte[] { 0x01, (byte) 0xfe }); + rs.updateDate(++c, Date.valueOf("2005-09-22")); + rs.updateTime(++c, Time.valueOf("21:46:29")); + rs.updateTimestamp(++c, Timestamp.valueOf("2005-09-21 21:47:10.111222333")); + rs.updateObject(++c, OffsetDateTime.of(LocalDate.of(2005, 9, 22), LocalTime.ofNanoOfDay(10_111_222_333L), + ZoneOffset.ofHours(2))); + rs.updateDouble(++c, 2.25); + rs.updateFloat(++c, 3.5f); + rs.updateLong(++c, Long.MAX_VALUE - 1); + rs.updateInt(++c, 11); + rs.updateShort(++c, (short) -1_000); + rs.updateString(++c, "ABCD"); + rs.updateBytes(++c, new byte[] { 1, 2 }); + rs.updateRow(); for (int i = 3; i <= 14; i++) { rs.next(); assertEquals(i, rs.getInt(1)); - assertEquals("\u00ef\u00f6\u00fc", rs.getString(15)); - assertEquals(new byte[] { (byte) 0xab, 0x12 }, rs.getBytes(16)); + assertEquals("\u00ef\u00f6\u00fc", rs.getString(clobIndex)); + assertEquals(new byte[] { (byte) 0xab, 0x12 }, rs.getBytes(blobIndex)); } assertFalse(rs.next()); + rs = stat.executeQuery("SELECT * FROM TEST WHERE ID = 2"); + rs.next(); + c = 0; + assertTrue(rs.getInt(++c) == 2); + assertEquals("-", rs.getString(++c)); + assertEquals("1.30", rs.getBigDecimal(++c).toString()); + assertFalse(rs.getBoolean(++c)); + assertTrue((rs.getByte(++c) & 0xff) == 0x55); + assertEquals(new byte[] { 0x01, (byte) 0xfe }, rs.getBytes(++c)); + assertEquals("2005-09-22", rs.getDate(++c).toString()); + assertEquals("21:46:29", rs.getTime(++c).toString()); + assertEquals("2005-09-21 21:47:10.111222333", rs.getTimestamp(++c).toString()); + assertEquals("2005-09-22T00:00:10.111222333+02:00", rs.getObject(++c).toString()); + assertTrue(rs.getDouble(++c) == 2.25); + assertTrue(rs.getFloat(++c) == 3.5f); + assertTrue(rs.getLong(++c) == Long.MAX_VALUE - 1); + assertEquals(11, ((Integer) rs.getObject(++c)).intValue()); + assertTrue(rs.getShort(++c) == -1_000); + assertEquals("ABCD", rs.getString(++c)); + assertEquals(new byte[] { 1, 2 }, rs.getBytes(++c)); + assertFalse(rs.next()); + stat.execute("DROP TABLE TEST"); conn.close(); } @@ -666,6 +734,89 @@ private void testScrollResultSet(Statement stat, int type, int rows) } } + private void testUpdateObject() throws SQLException { + deleteDb("updatableResultSet"); + Connection conn = getConnection("updatableResultSet"); + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, V INT)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES (?1, ?1)"); + for (int i = 1; i <= 12; i++) { + prep.setInt(1, i); + prep.executeUpdate(); + } + prep = conn.prepareStatement("TABLE TEST ORDER BY ID", ResultSet.TYPE_FORWARD_ONLY, + ResultSet.CONCUR_UPDATABLE); + try (ResultSet rs = prep.executeQuery()) { + for (int i = 1; i <= 12; i++) { + rs.next(); + assertEquals(i, rs.getInt(1)); + assertEquals(i, rs.getInt(2)); + testUpdateObjectUpdateRow(rs, i, i * 10); + rs.updateRow(); + } + assertFalse(rs.next()); + } + try (ResultSet rs = prep.executeQuery()) { + for (int i = 1; i <= 12; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + assertEquals(i * 10, rs.getInt(2)); + testUpdateObjectUpdateRow(rs, i, null); + rs.updateRow(); + } + assertFalse(rs.next()); + } + try (ResultSet rs = prep.executeQuery()) { + for (int i = 1; i <= 12; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + assertNull(rs.getObject(2)); + } + assertFalse(rs.next()); + } + conn.close(); + } + + private static void testUpdateObjectUpdateRow(ResultSet rs, int method, Object value) throws SQLException { + switch (method) { + case 1: + rs.updateObject(2, value); + break; + case 2: + rs.updateObject("V", value); + break; + case 3: + rs.updateObject(2, value, 0); + break; + case 4: + rs.updateObject(2, value, JDBCType.INTEGER); + break; + case 5: + rs.updateObject(2, value, H2Type.INTEGER); + break; + case 6: + rs.updateObject("V", value, 0); + break; + case 7: + rs.updateObject("V", value, JDBCType.INTEGER); + break; + case 8: + rs.updateObject("V", value, H2Type.INTEGER); + break; + case 9: + rs.updateObject(2, value, JDBCType.INTEGER, 0); + break; + case 10: + rs.updateObject(2, value, H2Type.INTEGER, 0); + break; + case 11: + rs.updateObject("V", value, JDBCType.INTEGER, 0); + break; + case 12: + rs.updateObject("V", value, H2Type.INTEGER, 0); + } + } + private void assertState(ResultSet rs, boolean beforeFirst, boolean first, boolean last, boolean afterLast) throws SQLException { assertEquals(beforeFirst, rs.isBeforeFirst()); diff --git a/h2/src/test/org/h2/test/jdbc/TestUrlJavaObjectSerializer.java b/h2/src/test/org/h2/test/jdbc/TestUrlJavaObjectSerializer.java index cab32c4e61..b1e7634c3c 100644 --- a/h2/src/test/org/h2/test/jdbc/TestUrlJavaObjectSerializer.java +++ b/h2/src/test/org/h2/test/jdbc/TestUrlJavaObjectSerializer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -12,13 +12,14 @@ import java.sql.Types; import org.h2.api.JavaObjectSerializer; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests per-db {@link JavaObjectSerializer} when set through the JDBC URL. * * @author Davide Cavestro */ -public class TestUrlJavaObjectSerializer extends TestBase { +public class TestUrlJavaObjectSerializer extends TestDb { /** * Run just this test. @@ -30,9 +31,7 @@ public static void main(String... a) throws Exception { test.config.traceTest = true; test.config.memory = true; test.config.networked = true; - test.config.beforeTest(); - test.test(); - test.config.afterTest(); + test.testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbc/TestZloty.java b/h2/src/test/org/h2/test/jdbc/TestZloty.java index d265b910be..e915849826 100644 --- a/h2/src/test/org/h2/test/jdbc/TestZloty.java +++ b/h2/src/test/org/h2/test/jdbc/TestZloty.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbc; @@ -13,12 +13,13 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests a custom BigDecimal implementation, as well * as direct modification of a byte in a byte array. */ -public class TestZloty extends TestBase { +public class TestZloty extends TestDb { /** * Run just this test. @@ -26,7 +27,7 @@ public class TestZloty extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbc/package.html b/h2/src/test/org/h2/test/jdbc/package.html index ae3bc47a9d..bf78702576 100644 --- a/h2/src/test/org/h2/test/jdbc/package.html +++ b/h2/src/test/org/h2/test/jdbc/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/jdbcx/SimpleXid.java b/h2/src/test/org/h2/test/jdbcx/SimpleXid.java index c09dd330d5..666239b426 100644 --- a/h2/src/test/org/h2/test/jdbcx/SimpleXid.java +++ b/h2/src/test/org/h2/test/jdbcx/SimpleXid.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbcx; diff --git a/h2/src/test/org/h2/test/jdbcx/TestConnectionPool.java b/h2/src/test/org/h2/test/jdbcx/TestConnectionPool.java index 0dd3a5df6d..dab7d296a7 100644 --- a/h2/src/test/org/h2/test/jdbcx/TestConnectionPool.java +++ b/h2/src/test/org/h2/test/jdbcx/TestConnectionPool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbcx; @@ -11,17 +11,22 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + import javax.sql.DataSource; +import org.h2.api.ErrorCode; import org.h2.jdbcx.JdbcConnectionPool; import org.h2.jdbcx.JdbcDataSource; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Task; /** * This class tests the JdbcConnectionPool. */ -public class TestConnectionPool extends TestBase { +public class TestConnectionPool extends TestDb { /** * Run just this test. @@ -29,7 +34,7 @@ public class TestConnectionPool extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -43,6 +48,7 @@ public void test() throws Exception { testKeepOpen(); testConnect(); testThreads(); + testUnwrap(); deleteDb("connectionPool"); deleteDb("connectionPool2"); } @@ -58,7 +64,7 @@ private void testShutdown() throws SQLException { conn1.close(); conn2.createStatement().execute("shutdown immediately"); cp.dispose(); - assertTrue(w.toString().length() > 0); + assertTrue(w.toString().length() == 0); cp.dispose(); } @@ -68,7 +74,7 @@ private void testWrongUrl() { try { cp.getConnection(); } catch (SQLException e) { - assertEquals(8001, e.getErrorCode()); + assertEquals(ErrorCode.URL_FORMAT_ERROR_2, e.getErrorCode()); } cp.dispose(); } @@ -78,9 +84,7 @@ private void testTimeout() throws Exception { String password = getPassword(); final JdbcConnectionPool man = JdbcConnectionPool.create(url, user, password); man.setLoginTimeout(1); - createClassProxy(man.getClass()); - assertThrows(IllegalArgumentException.class, man). - setMaxConnections(-1); + assertThrows(IllegalArgumentException.class, () -> man.setMaxConnections(-1)); man.setMaxConnections(2); // connection 1 (of 2) Connection conn = man.getConnection(); @@ -95,17 +99,22 @@ public void call() { } }; t.execute(); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); + Connection conn2 = null; try { // connection 2 (of 1 or 2) may fail - man.getConnection(); + conn2 = man.getConnection(); // connection 3 (of 1 or 2) must fail man.getConnection(); fail(); } catch (SQLException e) { - assertTrue(e.toString().toLowerCase().contains("timeout")); - time = System.currentTimeMillis() - time; - assertTrue("timeout after " + time + " ms", time > 1000); + if (conn2 != null) { + conn2.close(); + } + assertContains(e.toString().toLowerCase(), "timeout"); + time = System.nanoTime() - time; + assertTrue("timeout after " + TimeUnit.NANOSECONDS.toMillis(time) + + " ms", time > TimeUnit.SECONDS.toNanos(1)); } finally { conn.close(); t.get(); @@ -151,17 +160,17 @@ private void testPerformance() throws SQLException { JdbcConnectionPool man = JdbcConnectionPool.create(url, user, password); Connection conn = man.getConnection(); int len = 1000; - long time = System.currentTimeMillis(); + long time = System.nanoTime(); for (int i = 0; i < len; i++) { man.getConnection().close(); } man.dispose(); - trace((int) (System.currentTimeMillis() - time)); - time = System.currentTimeMillis(); + trace((int) TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); + time = System.nanoTime(); for (int i = 0; i < len; i++) { DriverManager.getConnection(url, user, password).close(); } - trace((int) (System.currentTimeMillis() - time)); + trace((int) TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); conn.close(); } @@ -181,7 +190,7 @@ private void testKeepOpen() throws Exception { private void testThreads() throws Exception { final int len = getSize(4, 20); final JdbcConnectionPool man = getConnectionPool(len - 2); - final boolean[] stop = { false }; + final AtomicBoolean stop = new AtomicBoolean(); /** * This class gets and returns connections from the pool. @@ -190,7 +199,7 @@ class TestRunner implements Runnable { @Override public void run() { try { - while (!stop[0]) { + while (!stop.get()) { Connection conn = man.getConnection(); if (man.getActiveConnections() >= len + 1) { throw new Exception("a: " + @@ -213,7 +222,7 @@ public void run() { threads[i].start(); } Thread.sleep(1000); - stop[0] = true; + stop.set(true); for (int i = 0; i < len; i++) { threads[i].join(); } @@ -245,4 +254,16 @@ private void testConnect() throws SQLException { getConnection(null, null); } + private void testUnwrap() throws SQLException { + JdbcConnectionPool pool = JdbcConnectionPool.create(new JdbcDataSource()); + assertTrue(pool.isWrapperFor(Object.class)); + assertTrue(pool.isWrapperFor(DataSource.class)); + assertTrue(pool.isWrapperFor(pool.getClass())); + assertFalse(pool.isWrapperFor(Integer.class)); + assertTrue(pool == pool.unwrap(Object.class)); + assertTrue(pool == pool.unwrap(DataSource.class)); + assertTrue(pool == pool.unwrap(pool.getClass())); + assertThrows(ErrorCode.INVALID_VALUE_2, () -> pool.unwrap(Integer.class)); + } + } diff --git a/h2/src/test/org/h2/test/jdbcx/TestDataSource.java b/h2/src/test/org/h2/test/jdbcx/TestDataSource.java index a6cec7b18e..20c9213cbe 100644 --- a/h2/src/test/org/h2/test/jdbcx/TestDataSource.java +++ b/h2/src/test/org/h2/test/jdbcx/TestDataSource.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbcx; @@ -15,19 +15,22 @@ import javax.naming.spi.ObjectFactory; import javax.sql.ConnectionEvent; import javax.sql.ConnectionEventListener; +import javax.sql.DataSource; import javax.sql.XAConnection; import javax.transaction.xa.XAResource; import javax.transaction.xa.Xid; +import org.h2.api.ErrorCode; import org.h2.jdbcx.JdbcDataSource; import org.h2.jdbcx.JdbcDataSourceFactory; import org.h2.jdbcx.JdbcXAConnection; import org.h2.message.TraceSystem; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests DataSource and XAConnection. */ -public class TestDataSource extends TestBase { +public class TestDataSource extends TestDb { /** * Run just this test. @@ -35,7 +38,7 @@ public class TestDataSource extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } // public static void main(String... args) throws SQLException { @@ -50,7 +53,7 @@ public static void main(String... a) throws Exception { // System.setProperty(Context.PROVIDER_URL, "rmi://localhost:1099"); // // JdbcDataSource ds = new JdbcDataSource(); -// ds.setURL("jdbc:h2:test"); +// ds.setURL("jdbc:h2:./test"); // ds.setUser("test"); // ds.setPassword(""); // @@ -71,7 +74,10 @@ public void test() throws Exception { } testDataSourceFactory(); testDataSource(); + testUnwrap(); testXAConnection(); + // otherwise we sometimes can't delete the trace file when the TestAll cleanup code runs + JdbcDataSourceFactory.getTraceSystem().close(); deleteDb("dataSource"); } @@ -187,6 +193,18 @@ private void testDataSource() throws SQLException { conn = ds.getConnection(getUser(), getPassword()); stat = conn.createStatement(); stat.execute("SELECT * FROM DUAL"); + conn.close(); + } + + private void testUnwrap() throws SQLException { + JdbcDataSource ds = new JdbcDataSource(); + assertTrue(ds.isWrapperFor(Object.class)); + assertTrue(ds.isWrapperFor(DataSource.class)); + assertTrue(ds.isWrapperFor(JdbcDataSource.class)); + assertFalse(ds.isWrapperFor(String.class)); + assertTrue(ds == ds.unwrap(Object.class)); + assertTrue(ds == ds.unwrap(DataSource.class)); + assertThrows(ErrorCode.INVALID_VALUE_2, () -> ds.unwrap(String.class)); } } diff --git a/h2/src/test/org/h2/test/jdbcx/TestXA.java b/h2/src/test/org/h2/test/jdbcx/TestXA.java index ea404cd1fc..2914518649 100644 --- a/h2/src/test/org/h2/test/jdbcx/TestXA.java +++ b/h2/src/test/org/h2/test/jdbcx/TestXA.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: James Devenish */ package org.h2.test.jdbcx; @@ -16,12 +16,13 @@ import javax.transaction.xa.Xid; import org.h2.jdbcx.JdbcDataSource; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.JdbcUtils; /** * Basic XA tests. */ -public class TestXA extends TestBase { +public class TestXA extends TestDb { private static final String DB_NAME1 = "xadb1"; private static final String DB_NAME2 = "xadb2"; @@ -32,12 +33,13 @@ public class TestXA extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { testRollbackWithoutPrepare(); + testRollbackAfterPrepare(); testXAAutoCommit(); deleteDb("xa"); testMixedXaNormal(); @@ -92,6 +94,49 @@ public byte[] getBranchQualifier() { deleteDb("xa"); } + private void testRollbackAfterPrepare() throws Exception { + if (config.memory) { + return; + } + Xid xid = new Xid() { + @Override + public int getFormatId() { + return 3145; + } + @Override + public byte[] getGlobalTransactionId() { + return new byte[] { 1, 2, 3, 4, 5, 6, 6, 7, 8 }; + } + @Override + public byte[] getBranchQualifier() { + return new byte[] { 34, 43, 33, 3, 3, 3, 33, 33, 3 }; + } + }; + deleteDb("xa"); + JdbcDataSource ds = new JdbcDataSource(); + ds.setURL(getURL("xa", true)); + ds.setPassword(getPassword()); + Connection dm = ds.getConnection(); + Statement stat = dm.createStatement(); + stat.execute("CREATE TABLE IF NOT EXISTS TEST(ID INT PRIMARY KEY, VAL INT)"); + stat.execute("INSERT INTO TEST(ID,VAL) VALUES (1,1)"); + dm.close(); + XAConnection c = ds.getXAConnection(); + XAResource xa = c.getXAResource(); + Connection connection = c.getConnection(); + xa.start(xid, XAResource.TMJOIN); + PreparedStatement ps = connection.prepareStatement("UPDATE TEST SET VAL=? WHERE ID=?"); + ps.setInt(1, new Random().nextInt()); + ps.setInt(2, 1); + ps.close(); + xa.prepare(xid); + xa.rollback(xid); + connection.close(); + c.close(); + deleteDb("xa"); + } + + private void testMixedXaNormal() throws Exception { JdbcDataSource ds = new JdbcDataSource(); ds.setURL("jdbc:h2:mem:test"); @@ -104,13 +149,13 @@ private void testMixedXaNormal() throws Exception { XAResource res = xa.getXAResource(); res.start(xid, XAResource.TMNOFLAGS); - assertTrue(!c.getAutoCommit()); + assertFalse(c.getAutoCommit()); res.end(xid, XAResource.TMSUCCESS); res.commit(xid, true); assertTrue(c.getAutoCommit()); res.start(xid, XAResource.TMNOFLAGS); - assertTrue(!c.getAutoCommit()); + assertFalse(c.getAutoCommit()); res.end(xid, XAResource.TMFAIL); res.rollback(xid); assertTrue(c.getAutoCommit()); @@ -149,7 +194,7 @@ private void testXAAutoCommit() throws Exception { xa.getXAResource().start(xid, XAResource.TMNOFLAGS); Connection c = xa.getConnection(); - assertTrue(!c.getAutoCommit()); + assertFalse(c.getAutoCommit()); c.close(); xa.close(); } @@ -238,20 +283,20 @@ private void testXA(boolean useOneDatabase) throws SQLException { trace("stmt1.executeUpdate(\"CREATE TABLE xatest1 " + "(id INT PRIMARY KEY, value INT)\")"); stat1.executeUpdate("CREATE TABLE xatest1 " + - "(id INT PRIMARY KEY, value INT)"); + "(id INT PRIMARY KEY, v INT)"); trace("stmt2.executeUpdate(\"CREATE TABLE xatest2 " + - "(id INT PRIMARY KEY, value INT)\")"); + "(id INT PRIMARY KEY, v INT)\")"); stat2.executeUpdate("CREATE TABLE xatest2 " + - "(id INT PRIMARY KEY, value INT)"); + "(id INT PRIMARY KEY, v INT)"); } else { trace("stmt1.executeUpdate(\"CREATE TABLE xatest " + "(id INT PRIMARY KEY, value INT)\")"); stat1.executeUpdate("CREATE TABLE xatest " + - "(id INT PRIMARY KEY, value INT)"); + "(id INT PRIMARY KEY, v INT)"); trace("stmt2.executeUpdate(\"CREATE TABLE xatest " + - "(id INT PRIMARY KEY, value INT)\")"); + "(id INT PRIMARY KEY, v INT)\")"); stat2.executeUpdate("CREATE TABLE xatest " + - "(id INT PRIMARY KEY, value INT)"); + "(id INT PRIMARY KEY, v INT)"); } if (useOneDatabase) { @@ -298,22 +343,22 @@ private void testXA(boolean useOneDatabase) throws SQLException { if (useOneDatabase) { trace("stmt1.executeUpdate(\"UPDATE xatest1 " + - "SET value=1 WHERE id=1\")"); + "SET v=1 WHERE id=1\")"); stat1.executeUpdate("UPDATE xatest1 " + - "SET value=1 WHERE id=1"); + "SET v=1 WHERE id=1"); trace("stmt2.executeUpdate(\"UPDATE xatest2 " + - "SET value=1 WHERE id=2\")"); + "SET v=1 WHERE id=2\")"); stat2.executeUpdate("UPDATE xatest2 " + - "SET value=1 WHERE id=2"); + "SET v=1 WHERE id=2"); } else { trace("stmt1.executeUpdate(\"UPDATE xatest " + - "SET value=1 WHERE id=1\")"); + "SET v=1 WHERE id=1\")"); stat1.executeUpdate("UPDATE xatest " + - "SET value=1 WHERE id=1"); + "SET v=1 WHERE id=1"); trace("stmt2.executeUpdate(\"UPDATE xatest " + - "SET value=1 WHERE id=2\")"); + "SET v=1 WHERE id=2\")"); stat2.executeUpdate("UPDATE xatest " + - "SET value=1 WHERE id=2"); + "SET v=1 WHERE id=2"); } trace("xares1.end(xid1, XAResource.TMSUCCESS)"); diff --git a/h2/src/test/org/h2/test/jdbcx/TestXASimple.java b/h2/src/test/org/h2/test/jdbcx/TestXASimple.java index 8523fd7a69..16f68cdf3e 100644 --- a/h2/src/test/org/h2/test/jdbcx/TestXASimple.java +++ b/h2/src/test/org/h2/test/jdbcx/TestXASimple.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.jdbcx; @@ -14,12 +14,13 @@ import javax.transaction.xa.Xid; import org.h2.jdbcx.JdbcDataSource; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.JdbcUtils; /** * A simple XA test. */ -public class TestXASimple extends TestBase { +public class TestXASimple extends TestDb { /** * Run just this test. @@ -27,7 +28,7 @@ public class TestXASimple extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/jdbcx/package.html b/h2/src/test/org/h2/test/jdbcx/package.html index bd78701aa7..41fa5358b5 100644 --- a/h2/src/test/org/h2/test/jdbcx/package.html +++ b/h2/src/test/org/h2/test/jdbcx/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/mvcc/TestMvcc1.java b/h2/src/test/org/h2/test/mvcc/TestMvcc1.java index d75962184c..954d27d159 100644 --- a/h2/src/test/org/h2/test/mvcc/TestMvcc1.java +++ b/h2/src/test/org/h2/test/mvcc/TestMvcc1.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.mvcc; @@ -14,11 +14,12 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Basic MVCC (multi version concurrency) test cases. */ -public class TestMvcc1 extends TestBase { +public class TestMvcc1 extends TestDb { private Connection c1, c2; private Statement s1, s2; @@ -30,38 +31,21 @@ public class TestMvcc1 extends TestBase { */ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); - test.config.mvcc = true; - test.test(); + test.testFromMain(); } @Override - public void test() throws SQLException { - testCases(); - testSetMode(); - deleteDb("mvcc1"); + public boolean isEnabled() { + return true; } - private void testSetMode() throws SQLException { + @Override + public void test() throws SQLException { + testCases(); deleteDb("mvcc1"); - c1 = getConnection("mvcc1;MVCC=FALSE"); - Statement stat = c1.createStatement(); - ResultSet rs = stat.executeQuery( - "select * from information_schema.settings where name='MVCC'"); - rs.next(); - assertEquals("FALSE", rs.getString("VALUE")); - assertThrows(ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1, stat). - execute("SET MVCC TRUE"); - rs = stat.executeQuery("select * from information_schema.settings " + - "where name='MVCC'"); - rs.next(); - assertEquals("FALSE", rs.getString("VALUE")); - c1.close(); } private void testCases() throws SQLException { - if (!config.mvcc) { - return; - } ResultSet rs; // TODO Prio 1: document: exclusive table lock still used when altering @@ -77,9 +61,9 @@ private void testCases() throws SQLException { // selects new data (select * from test where id > ?) and deletes deleteDb("mvcc1"); - c1 = getConnection("mvcc1;MVCC=TRUE;LOCK_TIMEOUT=10"); + c1 = getConnection("mvcc1;LOCK_TIMEOUT=10"); s1 = c1.createStatement(); - c2 = getConnection("mvcc1;MVCC=TRUE;LOCK_TIMEOUT=10"); + c2 = getConnection("mvcc1;LOCK_TIMEOUT=10"); s2 = c2.createStatement(); c1.setAutoCommit(false); c2.setAutoCommit(false); @@ -103,7 +87,7 @@ private void testCases() throws SQLException { c2.commit(); // referential integrity problem - s1.execute("create table a (id integer identity not null, " + + s1.execute("create table a (id integer generated by default as identity, " + "code varchar(10) not null, primary key(id))"); s1.execute("create table b (name varchar(100) not null, a integer, " + "primary key(name), foreign key(a) references a(id))"); @@ -230,14 +214,14 @@ private void testCases() throws SQLException { s1.execute("DROP TABLE TEST"); c1.commit(); - s1.execute("CREATE TABLE TEST(ID INT IDENTITY, NAME VARCHAR)"); + s1.execute("CREATE TABLE TEST(ID INT GENERATED BY DEFAULT AS IDENTITY, NAME VARCHAR)"); s1.execute("INSERT INTO TEST(NAME) VALUES('Ruebezahl')"); assertResult("0", s2, "SELECT COUNT(*) FROM TEST"); assertResult("1", s1, "SELECT COUNT(*) FROM TEST"); s1.execute("DROP TABLE TEST"); c1.commit(); - s1.execute("CREATE TABLE TEST(ID INT IDENTITY, NAME VARCHAR)"); + s1.execute("CREATE TABLE TEST(ID INT GENERATED BY DEFAULT AS IDENTITY, NAME VARCHAR)"); s1.execute("INSERT INTO TEST(NAME) VALUES('Ruebezahl')"); s1.execute("INSERT INTO TEST(NAME) VALUES('Ruebezahl')"); s1.execute("DROP TABLE TEST"); @@ -252,7 +236,7 @@ private void testCases() throws SQLException { c1.commit(); Random random = new Random(1); - s1.execute("CREATE TABLE TEST(ID INT IDENTITY, NAME VARCHAR)"); + s1.execute("CREATE TABLE TEST(ID INT GENERATED BY DEFAULT AS IDENTITY, NAME VARCHAR)"); Statement s; Connection c; for (int i = 0; i < 1000; i++) { diff --git a/h2/src/test/org/h2/test/mvcc/TestMvcc2.java b/h2/src/test/org/h2/test/mvcc/TestMvcc2.java index 31c22532e8..93ce063569 100644 --- a/h2/src/test/org/h2/test/mvcc/TestMvcc2.java +++ b/h2/src/test/org/h2/test/mvcc/TestMvcc2.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.mvcc; @@ -9,16 +9,15 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import java.util.concurrent.atomic.AtomicBoolean; - import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Task; /** * Additional MVCC (multi version concurrency) test cases. */ -public class TestMvcc2 extends TestBase { +public class TestMvcc2 extends TestDb { private static final String DROP_TABLE = "DROP TABLE IF EXISTS EMPLOYEE"; @@ -36,15 +35,16 @@ public class TestMvcc2 extends TestBase { */ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); - test.config.mvcc = true; - test.test(); + test.testFromMain(); + } + + @Override + public boolean isEnabled() { + return true; } @Override public void test() throws Exception { - if (!config.mvcc) { - return; - } deleteDb("mvcc2"); testConcurrentInsert(); testConcurrentUpdate(); @@ -66,33 +66,26 @@ private void testConcurrentInsert() throws Exception { stat2.execute("set lock_timeout 1000"); stat.execute("create table test(id int primary key, name varchar)"); conn.setAutoCommit(false); - final AtomicBoolean committed = new AtomicBoolean(false); Task t = new Task() { @Override - public void call() throws SQLException { + public void call() { try { -//System.out.println("insert2 hallo"); stat2.execute("insert into test values(0, 'Hallo')"); -//System.out.println("insert2 hallo done"); + fail(); } catch (SQLException e) { -//System.out.println("insert2 hallo e " + e); - if (!committed.get()) { - throw e; - } + assertTrue(e.toString(), + e.getErrorCode() == ErrorCode.DUPLICATE_KEY_1 || + e.getErrorCode() == ErrorCode.CONCURRENT_UPDATE_1); } } }; -//System.out.println("insert hello"); stat.execute("insert into test values(0, 'Hello')"); t.execute(); - Thread.sleep(500); -//System.out.println("insert hello commit"); - committed.set(true); conn.commit(); t.get(); ResultSet rs; rs = stat.executeQuery("select name from test"); - rs.next(); + assertTrue(rs.next()); assertEquals("Hello", rs.getString(1)); stat.execute("drop table test"); conn2.close(); @@ -112,16 +105,17 @@ private void testConcurrentUpdate() throws Exception { @Override public void call() throws SQLException { stat2.execute("update test set name = 'Hallo'"); + assertEquals(1, stat2.getUpdateCount()); } }; stat.execute("update test set name = 'Hi'"); + assertEquals(1, stat.getUpdateCount()); t.execute(); - Thread.sleep(500); conn.commit(); t.get(); ResultSet rs; rs = stat.executeQuery("select name from test"); - rs.next(); + assertTrue(rs.next()); assertEquals("Hallo", rs.getString(1)); stat.execute("drop table test"); conn2.close(); @@ -129,28 +123,21 @@ public void call() throws SQLException { } private void testSelectForUpdate() throws SQLException { - Connection conn = getConnection("mvcc2;SELECT_FOR_UPDATE_MVCC=true"); - Connection conn2 = getConnection("mvcc2;SELECT_FOR_UPDATE_MVCC=true"); + Connection conn = getConnection("mvcc2"); + Connection conn2 = getConnection("mvcc2"); Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, name varchar)"); conn.setAutoCommit(false); stat.execute("insert into test select x, 'Hello' from system_range(1, 10)"); stat.execute("select * from test where id = 3 for update"); conn.commit(); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, stat). - execute("select sum(id) from test for update"); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, stat). - execute("select distinct id from test for update"); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, stat). - execute("select id from test group by id for update"); - assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, stat). - execute("select t1.id from test t1, test t2 for update"); stat.execute("select * from test where id = 3 for update"); conn2.setAutoCommit(false); conn2.createStatement().execute("select * from test where id = 4 for update"); assertThrows(ErrorCode.LOCK_TIMEOUT_1, conn2.createStatement()). execute("select * from test where id = 3 for update"); conn.close(); + conn2.close(); } private void testInsertUpdateRollback() throws SQLException { diff --git a/h2/src/test/org/h2/test/mvcc/TestMvcc3.java b/h2/src/test/org/h2/test/mvcc/TestMvcc3.java index ff0833d940..ebf6bfadbf 100644 --- a/h2/src/test/org/h2/test/mvcc/TestMvcc3.java +++ b/h2/src/test/org/h2/test/mvcc/TestMvcc3.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.mvcc; @@ -13,11 +13,12 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Additional MVCC (multi version concurrency) test cases. */ -public class TestMvcc3 extends TestBase { +public class TestMvcc3 extends TestDb { /** * Run just this test. @@ -26,8 +27,7 @@ public class TestMvcc3 extends TestBase { */ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); - test.config.mvcc = true; - test.test(); + test.testFromMain(); } @Override @@ -36,7 +36,6 @@ public void test() throws SQLException { testConcurrentUpdate(); testInsertUpdateRollback(); testCreateTableAsSelect(); - testSequence(); testDisableAutoCommit(); testRollback(); deleteDb("mvcc3"); @@ -63,9 +62,6 @@ private void testFailedUpdate() throws SQLException { } private void testConcurrentUpdate() throws SQLException { - if (!config.mvcc) { - return; - } deleteDb("mvcc3"); Connection c1 = getConnection("mvcc3"); c1.setAutoCommit(false); @@ -79,12 +75,7 @@ private void testConcurrentUpdate() throws SQLException { s1.execute("create unique index on test(name)"); s1.executeUpdate("update test set name = 100 where id = 1"); - try { - s2.executeUpdate("update test set name = 100 where id = 2"); - fail(); - } catch (SQLException e) { - // expected - } + assertThrows(SQLException.class, s2).executeUpdate("update test set name = 100 where id = 2"); ResultSet rs = s1.executeQuery("select * from test order by id"); assertTrue(rs.next()); @@ -107,10 +98,6 @@ private void testConcurrentUpdate() throws SQLException { } private void testInsertUpdateRollback() throws SQLException { - if (!config.mvcc) { - return; - } - deleteDb("mvcc3"); Connection c1 = getConnection("mvcc3"); Statement s1 = c1.createStatement(); @@ -152,9 +139,6 @@ private void printRows(String s, Statement s1, Statement s2) } private void testCreateTableAsSelect() throws SQLException { - if (!config.mvcc) { - return; - } deleteDb("mvcc3"); Connection c1 = getConnection("mvcc3"); Statement s1 = c1.createStatement(); @@ -170,10 +154,6 @@ private void testCreateTableAsSelect() throws SQLException { } private void testRollback() throws SQLException { - if (!config.mvcc) { - return; - } - deleteDb("mvcc3"); Connection conn = getConnection("mvcc3"); Statement stat = conn.createStatement(); @@ -223,9 +203,6 @@ private void testRollback() throws SQLException { } private void testDisableAutoCommit() throws SQLException { - if (!config.mvcc) { - return; - } deleteDb("mvcc3"); Connection conn = getConnection("mvcc3"); Statement stat = conn.createStatement(); @@ -241,34 +218,4 @@ private void testDisableAutoCommit() throws SQLException { conn.close(); } - private void testSequence() throws SQLException { - if (config.memory) { - return; - } - - deleteDb("mvcc3"); - Connection conn; - ResultSet rs; - - conn = getConnection("mvcc3"); - conn.createStatement().execute("create sequence abc"); - conn.close(); - - conn = getConnection("mvcc3"); - rs = conn.createStatement().executeQuery("call abc.nextval"); - rs.next(); - assertEquals(1, rs.getInt(1)); - conn.close(); - - conn = getConnection("mvcc3"); - rs = conn.createStatement().executeQuery("call abc.currval"); - rs.next(); - assertEquals(1, rs.getInt(1)); - conn.close(); - } - } - - - - diff --git a/h2/src/test/org/h2/test/mvcc/TestMvcc4.java b/h2/src/test/org/h2/test/mvcc/TestMvcc4.java new file mode 100644 index 0000000000..b99637a2d0 --- /dev/null +++ b/h2/src/test/org/h2/test/mvcc/TestMvcc4.java @@ -0,0 +1,138 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.mvcc; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.concurrent.CountDownLatch; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Additional MVCC (multi version concurrency) test cases. + */ +public class TestMvcc4 extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase test = TestBase.createCaller().init(); + test.config.lockTimeout = 20000; + test.config.memory = true; + test.testFromMain(); + } + + @Override + public boolean isEnabled() { + if (config.networked) { + return false; + } + return true; + } + + @Override + public void test() throws SQLException { + testSelectForUpdateAndUpdateConcurrency(); + } + + private void testSelectForUpdateAndUpdateConcurrency() throws SQLException { + deleteDb("mvcc4"); + Connection setup = getConnection("mvcc4"); + setup.setAutoCommit(false); + + { + Statement s = setup.createStatement(); + s.executeUpdate("CREATE TABLE test (" + + "entity_id VARCHAR(100) NOT NULL PRIMARY KEY, " + + "lastUpdated TIMESTAMP NOT NULL)"); + + PreparedStatement ps = setup.prepareStatement( + "INSERT INTO test (entity_id, lastUpdated) VALUES (?, ?)"); + for (int i = 0; i < 2; i++) { + String id = "" + i; + ps.setString(1, id); + ps.setTimestamp(2, new Timestamp(System.currentTimeMillis())); + ps.executeUpdate(); + } + setup.commit(); + } + + //Create a connection from thread 1 + Connection c1 = getConnection("mvcc4;LOCK_TIMEOUT=10000"); + c1.setAutoCommit(false); + + //Fire off a concurrent update. + final CountDownLatch executedUpdate = new CountDownLatch(1); + new Thread() { + @Override + public void run() { + try { + Connection c2 = getConnection("mvcc4"); + c2.setAutoCommit(false); + + PreparedStatement ps = c2.prepareStatement( + "SELECT * FROM test WHERE entity_id = ? FOR UPDATE"); + ps.setString(1, "1"); + ps.executeQuery().next(); + + executedUpdate.countDown(); + // interrogate new "blocker_id" metatable field instead of + // relying on stacktraces!? to determine when session is blocking + PreparedStatement stmt = c2.prepareStatement( + "SELECT * FROM INFORMATION_SCHEMA.SESSIONS WHERE BLOCKER_ID = SESSION_ID()"); + ResultSet resultSet; + do { + resultSet = stmt.executeQuery(); + } while(!resultSet.next()); + + c2.commit(); + c2.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + }.start(); + + //Wait until the concurrent update has executed, but not yet committed + try { + executedUpdate.await(); + } catch (InterruptedException e) { + // ignore + } + + // Execute an update. This should initially fail, and enter the waiting + // for lock case. + PreparedStatement ps = c1.prepareStatement("UPDATE test SET lastUpdated = ?"); + ps.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + assertEquals(2, ps.executeUpdate()); + + c1.commit(); + c1.close(); + + Connection verify = getConnection("mvcc4"); + + verify.setAutoCommit(false); + ps = verify.prepareStatement("SELECT COUNT(*) FROM test"); + ResultSet rs = ps.executeQuery(); + assertTrue(rs.next()); + assertEquals(2,rs.getInt(1)); + verify.commit(); + verify.close(); + + setup.close(); + } +} + + + + diff --git a/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded.java b/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded.java index 8c192734b3..26f3ab3e54 100644 --- a/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded.java +++ b/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.mvcc; @@ -8,16 +8,17 @@ import java.sql.Connection; import java.sql.ResultSet; import java.sql.Statement; -import java.util.concurrent.CountDownLatch; - +import java.util.ArrayList; +import java.util.concurrent.CyclicBarrier; import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Task; /** * Multi-threaded MVCC (multi version concurrency) test cases. */ -public class TestMvccMultiThreaded extends TestBase { +public class TestMvccMultiThreaded extends TestDb { /** * Run just this test. @@ -25,21 +26,59 @@ public class TestMvccMultiThreaded extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); + } + + @Override + public boolean isEnabled() { + return true; } @Override public void test() throws Exception { + testConcurrentSelectForUpdate(); testMergeWithUniqueKeyViolation(); testConcurrentMerge(); - testConcurrentUpdate(""); - // not supported currently - // testConcurrentUpdate(";MULTI_THREADED=TRUE"); + testConcurrentUpdate(); + } + + private void testConcurrentSelectForUpdate() throws Exception { + deleteDb(getTestName()); + Connection conn = getConnection(getTestName()); + Statement stat = conn.createStatement(); + stat.execute("create table test(id int not null primary key, updated int not null)"); + stat.execute("insert into test(id, updated) values(1, 100)"); + ArrayList tasks = new ArrayList<>(); + int count = 3; + for (int i = 0; i < count; i++) { + Task task = new Task() { + @Override + public void call() throws Exception { + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + while (!stop) { + stat.execute("select * from test where id=1 for update"); + } + } + } + }.execute(); + tasks.add(task); + } + for (int i = 0; i < 10; i++) { + Thread.sleep(100); + ResultSet rs = stat.executeQuery("select * from test"); + assertTrue(rs.next()); + } + for (Task t : tasks) { + t.get(); + } + conn.close(); + deleteDb(getTestName()); } private void testMergeWithUniqueKeyViolation() throws Exception { - deleteDb("mvccMultiThreaded"); - Connection conn = getConnection("mvccMultiThreaded"); + deleteDb(getTestName()); + Connection conn = getConnection(getTestName()); Statement stat = conn.createStatement(); stat.execute("create table test(x int primary key, y int unique)"); stat.execute("insert into test values(1, 1)"); @@ -51,19 +90,18 @@ private void testMergeWithUniqueKeyViolation() throws Exception { } private void testConcurrentMerge() throws Exception { - deleteDb("mvccMultiThreaded"); + deleteDb(getTestName()); int len = 3; final Connection[] connList = new Connection[len]; for (int i = 0; i < len; i++) { Connection conn = getConnection( - "mvccMultiThreaded;MVCC=TRUE;LOCK_TIMEOUT=500"); + getTestName() + ";LOCK_TIMEOUT=500"); connList[i] = conn; } Connection conn = connList[0]; conn.createStatement().execute( "create table test(id int primary key, name varchar)"); Task[] tasks = new Task[len]; - final boolean[] stop = { false }; for (int i = 0; i < len; i++) { final Connection c = connList[i]; c.setAutoCommit(false); @@ -74,51 +112,54 @@ public void call() throws Exception { c.createStatement().execute( "merge into test values(1, 'x')"); c.commit(); - Thread.sleep(1); } } }; tasks[i].execute(); } Thread.sleep(1000); - stop[0] = true; for (int i = 0; i < len; i++) { tasks[i].get(); } for (int i = 0; i < len; i++) { connList[i].close(); } - deleteDb("mvccMultiThreaded"); + deleteDb(getTestName()); } - private void testConcurrentUpdate(String suffix) throws Exception { - deleteDb("mvccMultiThreaded"); + private void testConcurrentUpdate() throws Exception { + deleteDb(getTestName()); int len = 2; final Connection[] connList = new Connection[len]; for (int i = 0; i < len; i++) { - connList[i] = getConnection( - "mvccMultiThreaded;MVCC=TRUE" + suffix); + connList[i] = getConnection(getTestName()); } Connection conn = connList[0]; conn.createStatement().execute( - "create table test(id int primary key, value int)"); + "create table test(id int primary key, v int)"); conn.createStatement().execute( "insert into test values(0, 0)"); final int count = 1000; Task[] tasks = new Task[len]; - final CountDownLatch latch = new CountDownLatch(len); + final CyclicBarrier barrier = new CyclicBarrier(len); for (int i = 0; i < len; i++) { final int x = i; + // Recent changes exposed a race condition in this test itself. + // Without preliminary record locking, counter will be off. + connList[x].setAutoCommit(false); tasks[i] = new Task() { @Override public void call() throws Exception { for (int a = 0; a < count; a++) { + ResultSet rs = connList[x].createStatement().executeQuery( + "select v from test for update"); + assertTrue(rs.next()); connList[x].createStatement().execute( - "update test set value=value+1"); - latch.countDown(); - latch.await(); + "update test set v=v+1"); + connList[x].commit(); + barrier.await(); } } }; @@ -127,13 +168,12 @@ public void call() throws Exception { for (int i = 0; i < len; i++) { tasks[i].get(); } - ResultSet rs = conn.createStatement().executeQuery("select value from test"); + ResultSet rs = conn.createStatement().executeQuery("select v from test"); rs.next(); assertEquals(count * len, rs.getInt(1)); for (int i = 0; i < len; i++) { connList[i].close(); } - deleteDb("mvccMultiThreaded"); } } diff --git a/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded2.java b/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded2.java new file mode 100644 index 0000000000..1f6231eed4 --- /dev/null +++ b/h2/src/test/org/h2/test/mvcc/TestMvccMultiThreaded2.java @@ -0,0 +1,186 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.mvcc; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.concurrent.CountDownLatch; +import org.h2.message.DbException; +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.IOUtils; + +/** + * Additional MVCC (multi version concurrency) test cases. + */ +public class TestMvccMultiThreaded2 extends TestDb { + + private static final int TEST_THREAD_COUNT = 100; + private static final int TEST_TIME_SECONDS = 60; + private static final boolean DISPLAY_STATS = false; + + private static final String URL = ";LOCK_TIMEOUT=120000"; + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase test = TestBase.createCaller().init(); + test.config.lockTimeout = 120000; + test.config.memory = true; + test.testFromMain(); + } + + int getTestDuration() { + // to save some testing time + return config.big ? TEST_TIME_SECONDS : TEST_TIME_SECONDS / 10; + } + + @Override + public boolean isEnabled() { + return true; + } + + @Override + public void test() throws SQLException, InterruptedException { + testSelectForUpdateConcurrency(); + } + + private void testSelectForUpdateConcurrency() + throws SQLException, InterruptedException { + deleteDb(getTestName()); + Connection conn = getConnection(getTestName() + URL); + conn.setAutoCommit(false); + + String sql = "CREATE TABLE test (" + + "entity_id INTEGER NOT NULL PRIMARY KEY, " + + "lastUpdated INTEGER NOT NULL)"; + + Statement smtm = conn.createStatement(); + smtm.executeUpdate(sql); + + PreparedStatement ps = conn.prepareStatement( + "INSERT INTO test (entity_id, lastUpdated) VALUES (?, ?)"); + ps.setInt(1, 1); + ps.setInt(2, 100); + ps.executeUpdate(); + ps.setInt(1, 2); + ps.setInt(2, 200); + ps.executeUpdate(); + conn.commit(); + + CountDownLatch latch = new CountDownLatch(TEST_THREAD_COUNT + 1); + ArrayList threads = new ArrayList<>(); + for (int i = 0; i < TEST_THREAD_COUNT; i++) { + SelectForUpdate sfu = new SelectForUpdate(latch); + sfu.setName("Test SelectForUpdate Thread#"+i); + threads.add(sfu); + sfu.start(); + } + + latch.countDown(); + + // gather stats on threads after they finished + @SuppressWarnings("unused") + int minProcessed = Integer.MAX_VALUE, maxProcessed = 0, totalProcessed = 0; + + boolean allOk = true; + for (SelectForUpdate sfu : threads) { + // make sure all threads have stopped by joining with them + sfu.join(); + allOk &= sfu.ok; + totalProcessed += sfu.iterationsProcessed; + if (sfu.iterationsProcessed > maxProcessed) { + maxProcessed = sfu.iterationsProcessed; + } + if (sfu.iterationsProcessed < minProcessed) { + minProcessed = sfu.iterationsProcessed; + } + } + + if (DISPLAY_STATS) { + println(String.format( + "+ INFO: TestMvccMultiThreaded2 RUN STATS threads=%d, minProcessed=%d, maxProcessed=%d, " + + "totalProcessed=%d, averagePerThread=%d, averagePerThreadPerSecond=%d\n", + TEST_THREAD_COUNT, minProcessed, maxProcessed, totalProcessed, totalProcessed / TEST_THREAD_COUNT, + totalProcessed / (TEST_THREAD_COUNT * getTestDuration()))); + } + + IOUtils.closeSilently(conn); + deleteDb(getTestName()); + + assertTrue(allOk); + } + + /** + * Worker test thread selecting for update + */ + private class SelectForUpdate extends Thread + { + private final CountDownLatch latch; + public int iterationsProcessed; + + public boolean ok; + + SelectForUpdate(CountDownLatch latch) { + this.latch = latch; + } + + @Override + public void run() { + final long start = System.currentTimeMillis(); + boolean done = false; + try (Connection conn = getConnection(getTestName() + URL)) { + conn.setAutoCommit(false); + + // give the other threads a chance to start up before going into our work loop + latch.countDown(); + latch.await(); + + PreparedStatement ps = conn.prepareStatement( + "SELECT * FROM test WHERE entity_id = ? FOR UPDATE"); + while (!done) { + String id; + int value; + if ((iterationsProcessed & 1) == 0) { + id = "1"; + value = 100; + } else { + id = "2"; + value = 200; + } + ps.setString(1, id); + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + assertTrue(rs.getInt(2) == value); + + conn.commit(); + iterationsProcessed++; + + long now = System.currentTimeMillis(); + if (now - start > 1000 * getTestDuration()) { + done = true; + } + } + ok = true; + } catch (InterruptedException ignore) { + } catch (SQLException e) { + TestBase.logError("SQL error from thread "+getName(), e); + throw DbException.convert(e); + } catch (Exception e) { + TestBase.logError("General error from thread "+getName(), e); + throw e; + } + } + } +} diff --git a/h2/src/test/org/h2/test/mvcc/package.html b/h2/src/test/org/h2/test/mvcc/package.html index a8dac66da1..73ab19a52e 100644 --- a/h2/src/test/org/h2/test/mvcc/package.html +++ b/h2/src/test/org/h2/test/mvcc/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/otherDatabases.txt b/h2/src/test/org/h2/test/otherDatabases.txt index 9232ea30be..48a689b49b 100644 --- a/h2/src/test/org/h2/test/otherDatabases.txt +++ b/h2/src/test/org/h2/test/otherDatabases.txt @@ -67,9 +67,9 @@ No optimization for COUNT(*) Derby -------------------------------------------------------------------------------------------------------- To call getFD().sync() (which results in the OS call fsync()), -set the system property derby.storage.fileSyncTransactionLog to true true. +set the system property derby.storage.fileSyncTransactionLog to true. See -http://db.apache.org/derby/javadoc/engine/org/apache/derby/iapi/reference/Property.html#FILESYNC_TRANSACTION_LOG +https://db.apache.org/derby/javadoc/engine/org/apache/derby/iapi/reference/Property.html#FILESYNC_TRANSACTION_LOG Missing features: LIMIT OFFSET is not supported. No optimization for COUNT(*) diff --git a/h2/src/test/org/h2/test/package.html b/h2/src/test/org/h2/test/package.html index 6c160d4bf5..b2fcea6040 100644 --- a/h2/src/test/org/h2/test/package.html +++ b/h2/src/test/org/h2/test/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/poweroff/Listener.java b/h2/src/test/org/h2/test/poweroff/Listener.java index 762222e6b8..2b49cac156 100644 --- a/h2/src/test/org/h2/test/poweroff/Listener.java +++ b/h2/src/test/org/h2/test/poweroff/Listener.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; @@ -9,6 +9,7 @@ import java.io.IOException; import java.net.ServerSocket; import java.net.Socket; +import java.util.concurrent.TimeUnit; /** * The listener application for the power off test. @@ -60,7 +61,7 @@ private void listen(int port) throws IOException { Socket socket = serverSocket.accept(); DataInputStream in = new DataInputStream(socket.getInputStream()); System.out.println("Connected"); - time = System.currentTimeMillis(); + time = System.nanoTime(); try { while (true) { int value = in.readInt(); @@ -72,8 +73,8 @@ private void listen(int port) throws IOException { } catch (IOException e) { System.out.println("Closed with Exception: " + e); } - time = System.currentTimeMillis() - time; - int operationsPerSecond = (int) (1000 * maxValue / time); + time = System.nanoTime() - time; + int operationsPerSecond = (int) (TimeUnit.SECONDS.toNanos(1) * maxValue / time); System.out.println("Max=" + maxValue + " operations/sec=" + operationsPerSecond); } diff --git a/h2/src/test/org/h2/test/poweroff/Test.java b/h2/src/test/org/h2/test/poweroff/Test.java index f835e0dc9b..2875236632 100644 --- a/h2/src/test/org/h2/test/poweroff/Test.java +++ b/h2/src/test/org/h2/test/poweroff/Test.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; @@ -142,21 +142,21 @@ private static void testFile(DataOutputStream out) throws IOException { private static void testDatabases(DataOutputStream out) throws Exception { Test[] dbs = { new Test("org.h2.Driver", - "jdbc:h2:test1", "sa", "", true), + "jdbc:h2:./test1", "sa", "", true), new Test("org.h2.Driver", - "jdbc:h2:test2", "sa", "", false), + "jdbc:h2:./test2", "sa", "", false), new Test("org.hsqldb.jdbcDriver", "jdbc:hsqldb:test4", "sa", "", false), - // new Test("com.mysql.jdbc.Driver", + // new Test("com.mysql.cj.jdbc.Driver", // "jdbc:mysql://localhost/test", "sa", ""), new Test("org.postgresql.Driver", "jdbc:postgresql:test", "sa", "sa", false), - new Test("org.apache.derby.jdbc.EmbeddedDriver", + new Test("org.apache.derby.iapi.jdbc.AutoloadedDriver", "jdbc:derby:test;create=true", "sa", "", false), new Test("org.h2.Driver", - "jdbc:h2:test5", "sa", "", true), + "jdbc:h2:./test5", "sa", "", true), new Test("org.h2.Driver", - "jdbc:h2:test6", "sa", "", false), }; + "jdbc:h2:./test6", "sa", "", false), }; for (int i = 0;; i++) { for (Test t : dbs) { t.insert(i); diff --git a/h2/src/test/org/h2/test/poweroff/TestRecover.java b/h2/src/test/org/h2/test/poweroff/TestRecover.java index bd3af3052a..922d43fbbb 100644 --- a/h2/src/test/org/h2/test/poweroff/TestRecover.java +++ b/h2/src/test/org/h2/test/poweroff/TestRecover.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; @@ -12,7 +12,6 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.PrintWriter; -import java.util.Date; import java.security.SecureRandom; import java.sql.Connection; import java.sql.Driver; @@ -21,7 +20,8 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import java.text.SimpleDateFormat; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.List; import java.util.Random; @@ -29,7 +29,6 @@ import java.util.zip.ZipOutputStream; import org.h2.util.IOUtils; -import org.h2.util.New; /** * This standalone test checks if recovery of a database works after power @@ -55,7 +54,7 @@ public class TestRecover { // "jdbc:derby:/temp/derby/data/test;create=true"); // private static final String DRIVER = // System.getProperty("test.driver", - // "org.apache.derby.jdbc.EmbeddedDriver"); + // "org.apache.derby.iapi.jdbc.AutoloadedDriver"); /** * This method is called when executing this application from the command @@ -104,10 +103,9 @@ private static File backup(String sourcePath, String targetPath, } oldest.delete(); } - SimpleDateFormat sd = new SimpleDateFormat("yyMMdd-HHmmss"); - String date = sd.format(new Date()); + String date = DateTimeFormatter.ofPattern("yyMMdd-HHmmss").format(LocalDateTime.now()); File zipFile = new File(root, "backup-" + date + "-" + node + ".zip"); - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); File base = new File(sourcePath); listRecursive(list, base); if (list.size() == 0) { @@ -133,12 +131,9 @@ private static File backup(String sourcePath, String targetPath, } ZipEntry entry = new ZipEntry(basePath + entryName); zipOut.putNextEntry(entry); - InputStream in = null; - try { - in = new FileInputStream(fileName); + + try (InputStream in = new FileInputStream(fileName)) { IOUtils.copyAndCloseInput(in, zipOut); - } finally { - IOUtils.closeSilently(in); } zipOut.closeEntry(); } @@ -209,7 +204,7 @@ private static void closeConnection(Connection conn) { // ignore } try { - Driver driver = (Driver) Class.forName(DRIVER).newInstance(); + Driver driver = (Driver) Class.forName(DRIVER).getDeclaredConstructor().newInstance(); DriverManager.registerDriver(driver); } catch (Exception e) { e.printStackTrace(); diff --git a/h2/src/test/org/h2/test/poweroff/TestRecoverKillLoop.java b/h2/src/test/org/h2/test/poweroff/TestRecoverKillLoop.java index e827b8652b..20c9a4db06 100644 --- a/h2/src/test/org/h2/test/poweroff/TestRecoverKillLoop.java +++ b/h2/src/test/org/h2/test/poweroff/TestRecoverKillLoop.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; @@ -37,7 +37,7 @@ private void runTest(int count) throws Exception { Random random = new Random(1); for (int i = 0; i < count; i++) { String[] procDef = { - "java", "-cp", getClassPath(), + getJVM(), "-cp", getClassPath(), "-Dtest.dir=data/db", TestRecover.class.getName() }; diff --git a/h2/src/test/org/h2/test/poweroff/TestReorderWrites.java b/h2/src/test/org/h2/test/poweroff/TestReorderWrites.java new file mode 100644 index 0000000000..a6bfba0b95 --- /dev/null +++ b/h2/src/test/org/h2/test/poweroff/TestReorderWrites.java @@ -0,0 +1,213 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.poweroff; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.Arrays; +import java.util.Map; +import java.util.Random; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.MVStoreTool; +import org.h2.store.fs.FilePath; +import org.h2.store.fs.FileUtils; +import org.h2.test.TestBase; +import org.h2.test.utils.FilePathReorderWrites; + +/** + * Tests that the MVStore recovers from a power failure if the file system or + * disk re-ordered the write operations. + */ +public class TestReorderWrites extends TestBase { + + private static final boolean LOG = false; + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testMVStore(false); + testMVStore(true); + testFileSystem(false); + testFileSystem(true); + } + + private void testMVStore(final boolean partialWrite) { + // Add partial write test + // @since 2019-07-31 little-pan + println(String.format("testMVStore(): %s partial write", partialWrite? "Enable": "Disable")); + FilePathReorderWrites.setPartialWrites(partialWrite); + + FilePathReorderWrites fs = FilePathReorderWrites.register(); + String fileName = "reorder:memFS:test.mv"; + try { + for (int i = 0; i < (config.big ? 1000 : 100); i++) { + log(i + " --------------------------------"); + // this test is not interested in power off failures during + // initial creation + fs.setPowerOffCountdown(0, 0); + // release the static data this test generates + FileUtils.delete("memFS:test.mv"); + FileUtils.delete("memFS:test.mv.copy"); + MVStore store = new MVStore.Builder(). + fileName(fileName). + autoCommitDisabled().open(); + // store.setRetentionTime(10); + Map map = store.openMap("data"); + map.put(-1, new byte[1]); + store.commit(); + store.getFileStore().sync(); + Random r = new Random(i); + int stop = 4 + r.nextInt(config.big ? 150 : 20); + log("countdown start"); + fs.setPowerOffCountdown(stop, i); + try { + for (int j = 1; j < 100; j++) { + Map newMap = store.openMap("d" + j); + newMap.put(j, j * 10); + int key = r.nextInt(10); + int len = 10 * r.nextInt(1000); + if (r.nextBoolean()) { + map.remove(key); + } else { + map.put(key, new byte[len]); + } + log("op " + j + ": "); + store.commit(); + switch (r.nextInt(10)) { + case 0: + log("op compact"); + store.compact(100, 10 * 1024); + break; + case 1: + log("op compactMoveChunks"); + store.compactMoveChunks(); + log("op compactMoveChunks done"); + break; + } + } + // write has to fail at some point + fail(); + } catch (MVStoreException e) { + log("stop " + e + ", cause: " + e.getCause()); + // expected + } + try { + store.close(); + } catch (MVStoreException e) { + // expected + store.closeImmediately(); + } + log("verify"); + fs.setPowerOffCountdown(100, 0); + if (LOG) { + MVStoreTool.dump(fileName, true); + } + store = new MVStore.Builder(). + fileName(fileName). + autoCommitDisabled().open(); + map = store.openMap("data"); + if (!map.containsKey(-1)) { + fail("key not found, size=" + map.size() + " i=" + i); + } else { + assertEquals("i=" + i, 1, map.get(-1).length); + } + for (int j = 0; j < 100; j++) { + Map newMap = store.openMap("d" + j); + newMap.get(j); + } + map.keySet(); + store.close(); + } + } finally { + // release the static data this test generates + FileUtils.delete("memFS:test.mv"); + FileUtils.delete("memFS:test.mv.copy"); + } + } + + private static void log(String message) { + if (LOG) { + System.out.println(message); + } + } + + private void testFileSystem(final boolean partialWrite) throws IOException { + FilePathReorderWrites fs = FilePathReorderWrites.register(); + // *disable this for now, still bug(s) in our code* + // Add partial write enable test + // @since 2019-07-31 little-pan + FilePathReorderWrites.setPartialWrites(partialWrite); + println(String.format("testFileSystem(): %s partial write", partialWrite? "Enable": "Disable")); + + String fileName = "reorder:memFS:test"; + final ByteBuffer empty = ByteBuffer.allocate(1024); + Random r = new Random(1); + long minSize = Long.MAX_VALUE; + long maxSize = 0; + int minWritten = Integer.MAX_VALUE; + int maxWritten = 0; + for (int i = 0; i < 100; i++) { + fs.setPowerOffCountdown(100, i); + FileUtils.delete(fileName); + FileChannel fc = FilePath.get(fileName).open("rw"); + for (int j = 0; j < 20; j++) { + fc.write(empty, j * 1024); + empty.flip(); + } + fs.setPowerOffCountdown(4 + r.nextInt(20), i); + int lastWritten = 0; + int lastTruncated = 0; + for (int j = 20; j >= 0; j--) { + try { + byte[] bytes = new byte[1024]; + Arrays.fill(bytes, (byte) j); + ByteBuffer data = ByteBuffer.wrap(bytes); + fc.write(data, 0); + lastWritten = j; + } catch (IOException e) { + // expected + break; + } + try { + fc.truncate(j * 1024); + lastTruncated = j * 1024; + } catch (IOException e) { + // expected + break; + } + } + if (lastTruncated <= 0 || lastWritten <= 0) { + fail(); + } + fs.setPowerOffCountdown(100, 0); + fc = FilePath.get(fileName).open("rw"); + ByteBuffer data = ByteBuffer.allocate(1024); + fc.read(data, 0); + data.flip(); + int got = data.get(); + long size = fc.size(); + minSize = Math.min(minSize, size); + maxSize = Math.max(minSize, size); + minWritten = Math.min(minWritten, got); + maxWritten = Math.max(maxWritten, got); + } + assertTrue(minSize < maxSize); + assertTrue(minWritten < maxWritten); + // release the static data this test generates + FileUtils.delete(fileName); + } + +} diff --git a/h2/src/test/org/h2/test/poweroff/TestWrite.java b/h2/src/test/org/h2/test/poweroff/TestWrite.java index f87df07ddc..b7d75a0037 100644 --- a/h2/src/test/org/h2/test/poweroff/TestWrite.java +++ b/h2/src/test/org/h2/test/poweroff/TestWrite.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.poweroff; @@ -13,6 +13,7 @@ import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Statement; +import java.util.concurrent.TimeUnit; /** * This test shows the raw file access performance using various file modes. @@ -38,12 +39,12 @@ public static void main(String... args) throws Exception { testFile("rwd", true); testFile("rws", true); testDatabase("org.h2.Driver", - "jdbc:h2:test", "sa", ""); + "jdbc:h2:./test", "sa", ""); testDatabase("org.hsqldb.jdbcDriver", "jdbc:hsqldb:test4", "sa", ""); - testDatabase("org.apache.derby.jdbc.EmbeddedDriver", + testDatabase("org.apache.derby.iapi.jdbc.AutoloadedDriver", "jdbc:derby:test;create=true", "sa", ""); - testDatabase("com.mysql.jdbc.Driver", + testDatabase("com.mysql.cj.jdbc.Driver", "jdbc:mysql://localhost/test", "sa", "sa"); testDatabase("org.postgresql.Driver", "jdbc:postgresql:test", "sa", "sa"); @@ -57,7 +58,7 @@ private static void testFile(String mode, boolean flush) throws Exception { RandomAccessFile file = new RandomAccessFile("test.txt", mode); file.setLength(0); FileDescriptor fd = file.getFD(); - long start = System.currentTimeMillis(); + long start = System.nanoTime(); byte[] data = { 0 }; file.write(data); int i = 0; @@ -67,8 +68,8 @@ private static void testFile(String mode, boolean flush) throws Exception { file.write(data); fd.sync(); if ((i & 15) == 0) { - long time = System.currentTimeMillis() - start; - if (time > 5000) { + long time = System.nanoTime() - start; + if (time > TimeUnit.SECONDS.toNanos(5)) { break; } } @@ -78,17 +79,17 @@ private static void testFile(String mode, boolean flush) throws Exception { file.seek(0); file.write(data); if ((i & 1023) == 0) { - long time = System.currentTimeMillis() - start; - if (time > 5000) { + long time = System.nanoTime() - start; + if (time > TimeUnit.SECONDS.toNanos(5)) { break; } } } } - long time = System.currentTimeMillis() - start; - System.out.println("Time: " + time); + long time = System.nanoTime() - start; + System.out.println("Time: " + TimeUnit.NANOSECONDS.toMillis(time)); System.out.println("Operations: " + i); - System.out.println("Operations/second: " + (i * 1000 / time)); + System.out.println("Operations/second: " + (i * TimeUnit.SECONDS.toNanos(1) / time)); System.out.println(); file.close(); new File("test.txt").delete(); @@ -108,23 +109,23 @@ private static void testDatabase(String driver, String url, String user, stat.execute("CREATE TABLE TEST(ID INT)"); PreparedStatement prep = conn.prepareStatement( "INSERT INTO TEST VALUES(?)"); - long start = System.currentTimeMillis(); + long start = System.nanoTime(); int i = 0; for (;; i++) { prep.setInt(1, i); // autocommit is on by default, so this commits as well prep.execute(); if ((i & 15) == 0) { - long time = System.currentTimeMillis() - start; - if (time > 5000) { + long time = System.nanoTime() - start; + if (time > TimeUnit.SECONDS.toNanos(5)) { break; } } } - long time = System.currentTimeMillis() - start; - System.out.println("Time: " + time); + long time = System.nanoTime() - start; + System.out.println("Time: " + TimeUnit.NANOSECONDS.toMillis(time)); System.out.println("Operations: " + i); - System.out.println("Operations/second: " + (i * 1000 / time)); + System.out.println("Operations/second: " + (i * TimeUnit.SECONDS.toNanos(1) / time)); System.out.println(); stat.execute("DROP TABLE TEST"); conn.close(); diff --git a/h2/src/test/org/h2/test/poweroff/package.html b/h2/src/test/org/h2/test/poweroff/package.html index a8dac66da1..73ab19a52e 100644 --- a/h2/src/test/org/h2/test/poweroff/package.html +++ b/h2/src/test/org/h2/test/poweroff/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/recover/RecoverLobTest.java b/h2/src/test/org/h2/test/recover/RecoverLobTest.java new file mode 100644 index 0000000000..fb93f5b1b5 --- /dev/null +++ b/h2/src/test/org/h2/test/recover/RecoverLobTest.java @@ -0,0 +1,81 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.recover; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.tools.DeleteDbFiles; +import org.h2.tools.Recover; + +/** + * Tests BLOB/CLOB recovery. + */ +public class RecoverLobTest extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public boolean isEnabled() { + if (config.memory) { + return false; + } + return true; + } + + @Override + public void test() throws Exception { + testRecoverClob(); + } + + private void testRecoverClob() throws Exception { + DeleteDbFiles.execute(getBaseDir(), "recovery", true); + Connection conn = getConnection("recovery"); + Statement stat = conn.createStatement(); + stat.execute("create table test(id int, data clob)"); + stat.execute("insert into test values(1, space(10000))"); + stat.execute("insert into test values(2, space(20000))"); + stat.execute("insert into test values(3, space(30000))"); + stat.execute("insert into test values(4, space(40000))"); + stat.execute("insert into test values(5, space(50000))"); + stat.execute("insert into test values(6, space(60000))"); + stat.execute("insert into test values(7, space(70000))"); + stat.execute("insert into test values(8, space(80000))"); + + conn.close(); + Recover.main("-dir", getBaseDir(), "-db", "recovery"); + DeleteDbFiles.execute(getBaseDir(), "recovery", true); + conn = getConnection( + "recovery;init=runscript from '" + + getBaseDir() + "/recovery.h2.sql'"); + stat = conn.createStatement(); + + ResultSet rs = stat.executeQuery("select * from test"); + while(rs.next()){ + + int id = rs.getInt(1); + String data = rs.getString(2); + + assertNotNull(data); + assertTrue(data.length() == 10000 * id); + + } + rs.close(); + conn.close(); + } + + + +} diff --git a/h2/src/test/org/h2/test/recover/package.html b/h2/src/test/org/h2/test/recover/package.html new file mode 100644 index 0000000000..05ddb3e212 --- /dev/null +++ b/h2/src/test/org/h2/test/recover/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Recovery tests. + +

    \ No newline at end of file diff --git a/h2/src/test/org/h2/test/rowlock/TestRowLocks.java b/h2/src/test/org/h2/test/rowlock/TestRowLocks.java index 05afca349d..3c481d4355 100644 --- a/h2/src/test/org/h2/test/rowlock/TestRowLocks.java +++ b/h2/src/test/org/h2/test/rowlock/TestRowLocks.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.rowlock; @@ -12,12 +12,13 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Task; /** * Row level locking tests. */ -public class TestRowLocks extends TestBase { +public class TestRowLocks extends TestDb { /** * The statements used in this test. @@ -32,30 +33,18 @@ public class TestRowLocks extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - testSetMode(); testCases(); - deleteDb("rowLocks"); - } - - private void testSetMode() throws SQLException { - deleteDb("rowLocks"); - c1 = getConnection("rowLocks"); - Statement stat = c1.createStatement(); - stat.execute("SET LOCK_MODE 2"); - ResultSet rs = stat.executeQuery("call lock_mode()"); - rs.next(); - assertEquals("2", rs.getString(1)); - c1.close(); + deleteDb(getTestName()); } private void testCases() throws Exception { - deleteDb("rowLocks"); - c1 = getConnection("rowLocks;MVCC=TRUE"); + deleteDb(getTestName()); + c1 = getConnection(getTestName()); s1 = c1.createStatement(); s1.execute("SET LOCK_TIMEOUT 10000"); s1.execute("CREATE TABLE TEST AS " + @@ -64,7 +53,7 @@ private void testCases() throws Exception { c1.setAutoCommit(false); s1.execute("UPDATE TEST SET NAME='Hallo' WHERE ID=1"); - c2 = getConnection("rowLocks"); + c2 = getConnection(getTestName()); c2.setAutoCommit(false); s2 = c2.createStatement(); diff --git a/h2/src/test/org/h2/test/rowlock/package.html b/h2/src/test/org/h2/test/rowlock/package.html index 72846b3981..ce78426472 100644 --- a/h2/src/test/org/h2/test/rowlock/package.html +++ b/h2/src/test/org/h2/test/rowlock/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/scripts/Aggregate1.java b/h2/src/test/org/h2/test/scripts/Aggregate1.java new file mode 100644 index 0000000000..038a93794e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/Aggregate1.java @@ -0,0 +1,32 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.scripts; + +import java.sql.SQLException; + +import org.h2.api.Aggregate; +import org.h2.api.H2Type; + +/** + * An aggregate function for tests. + */ +public class Aggregate1 implements Aggregate { + + @Override + public int getInternalType(int[] inputTypes) throws SQLException { + return H2Type.INTEGER.getVendorTypeNumber(); + } + + @Override + public void add(Object value) throws SQLException { + } + + @Override + public Object getResult() throws SQLException { + return 0; + } + +} diff --git a/h2/src/test/org/h2/test/scripts/TestScript.java b/h2/src/test/org/h2/test/scripts/TestScript.java new file mode 100644 index 0000000000..0e7686b693 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/TestScript.java @@ -0,0 +1,770 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.scripts; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.LineNumberReader; +import java.io.PrintStream; +import java.io.RandomAccessFile; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.nio.charset.StandardCharsets; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; + +import org.h2.api.ErrorCode; +import org.h2.command.CommandContainer; +import org.h2.command.CommandInterface; +import org.h2.command.Prepared; +import org.h2.command.dml.ScriptCommand; +import org.h2.command.query.Query; +import org.h2.engine.Mode.ModeEnum; +import org.h2.jdbc.JdbcConnection; +import org.h2.jdbc.JdbcPreparedStatement; +import org.h2.test.TestAll; +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.StringUtils; +import org.h2.value.DataType; + +/** + * This test runs a SQL script file and compares the output with the expected + * output. + */ +public class TestScript extends TestDb { + + private static final String BASE_DIR = "org/h2/test/scripts/"; + + private static final boolean FIX_OUTPUT = false; + + private static final Field COMMAND; + + private static final Field PREPARED; + + private static boolean CHECK_ORDERING; + + /** If set to true, the test will exit at the first failure. */ + private boolean failFast; + /** If set to a value the test will add all executed statements to this list */ + private ArrayList statements; + + private boolean reconnectOften; + private Connection conn; + private Statement stat; + private String fileName; + private LineNumberReader in; + private PrintStream out; + private final ArrayList result = new ArrayList<>(); + private final ArrayDeque putBack = new ArrayDeque<>(); + private boolean foundErrors; + + private Random random = new Random(1); + + static { + try { + COMMAND = JdbcPreparedStatement.class.getDeclaredField("command"); + COMMAND.setAccessible(true); + PREPARED = CommandContainer.class.getDeclaredField("prepared"); + PREPARED.setAccessible(true); + } catch (ReflectiveOperationException e) { + throw new RuntimeException(e); + } + } + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + CHECK_ORDERING = true; + TestBase.createCaller().init().testFromMain(); + } + + /** + * Get all SQL statements of this file. + * + * @param conf the configuration + * @return the list of statements + */ + public ArrayList getAllStatements(TestAll conf) throws Exception { + config = conf; + ArrayList result = new ArrayList<>(4000); + try { + statements = result; + test(); + } finally { + this.statements = null; + } + return result; + } + + @Override + public boolean isEnabled() { + if (config.networked && config.big) { + return false; + } + return true; + } + + @Override + public void test() throws Exception { + reconnectOften = !config.memory && config.big; + + testScript("testScript.sql"); + if (!config.memory && !config.big && !config.networked) { + testScript("testSimple.sql"); + } + testScript("dual.sql"); + testScript("indexes.sql"); + testScript("information_schema.sql"); + testScript("range_table.sql"); + testScript("altertable-index-reuse.sql"); + testScript("altertable-fk.sql"); + testScript("default-and-on_update.sql"); + + for (String s : new String[] { "add_months", "compatibility", "group_by", "strict_and_legacy"}) { + testScript("compatibility/" + s + ".sql"); + } + for (String s : new String[] { "array", "bigint", "binary", "blob", + "boolean", "char", "clob", "date", "decfloat", "double_precision", "enum", + "geometry", "identity", "int", "interval", "java_object", "json", "numeric", "real", "row", "smallint", + "time-with-time-zone", "time", "timestamp-with-time-zone", "timestamp", "tinyint", + "uuid", "varbinary", "varchar", "varchar-ignorecase" }) { + testScript("datatypes/" + s + ".sql"); + } + for (String s : new String[] { "alterDomain", "alterTableAdd", "alterTableAlterColumn", "alterTableDropColumn", + "alterTableDropConstraint", + "alterTableRename", "alterTableRenameConstraint", + "analyze", "commentOn", "createAlias", "createConstant", "createDomain", + "createIndex", "createSchema", "createSequence", "createSynonym", + "createTable", "createTrigger", "createView", "dropAllObjects", "dropDomain", "dropIndex", + "dropSchema", "dropTable", "grant", "truncateTable" }) { + testScript("ddl/" + s + ".sql"); + } + for (String s : new String[] { "delete", "error_reporting", "execute_immediate", "insert", "insertIgnore", + "merge", "mergeUsing", "replace", "script", "show", "update", "with" }) { + testScript("dml/" + s + ".sql"); + } + for (String s : new String[] { "any", "array_agg", "avg", "bit_and_agg", "bit_or_agg", "bit_xor_agg", + "corr", + "count", + "covar_pop", "covar_samp", + "envelope", "every", "histogram", + "json_arrayagg", "json_objectagg", + "listagg", "max", "min", "mode", "percentile", "rank", + "regr_avgx", "regr_avgy", "regr_count", "regr_intercept", "regr_r2", "regr_slope", + "regr_sxx", "regr_sxy", "regr_syy", + "stddev_pop", "stddev_samp", "sum", "var_pop", "var_samp" }) { + testScript("functions/aggregate/" + s + ".sql"); + } + for (String s : new String[] { "json_array", "json_object" }) { + testScript("functions/json/" + s + ".sql"); + } + for (String s : new String[] { "abs", "acos", "asin", "atan", "atan2", + "bitand", "bitcount", "bitget", "bitnot", "bitor", "bitxor", "ceil", "compress", + "cos", "cosh", "cot", "decrypt", "degrees", "encrypt", "exp", + "expand", "floor", "hash", "length", "log", "lshift", "mod", "ora-hash", "pi", + "power", "radians", "rand", "random-uuid", "rotate", "round", + "roundmagic", "rshift", "secure-rand", "sign", "sin", "sinh", "sqrt", + "tan", "tanh", "truncate", "zero" }) { + testScript("functions/numeric/" + s + ".sql"); + } + for (String s : new String[] { "array-to-string", + "ascii", "bit-length", "char", "concat", + "concat-ws", "difference", "hextoraw", "insert", + "left", "length", "locate", "lower", "lpad", "ltrim", + "octet-length", "quote_ident", "rawtohex", "regexp-like", + "regex-replace", "regexp-substr", "repeat", "replace", "right", "rpad", "rtrim", + "soundex", "space", "stringdecode", "stringencode", + "stringtoutf8", "substring", "to-char", "translate", "trim", + "upper", "utf8tostring", "xmlattr", "xmlcdata", "xmlcomment", + "xmlnode", "xmlstartdoc", "xmltext" }) { + testScript("functions/string/" + s + ".sql"); + } + for (String s : new String[] { "array-cat", "array-contains", "array-get", + "array-slice", "autocommit", "cancel-session", "casewhen", + "cardinality", "cast", "coalesce", "convert", "csvread", "csvwrite", "current_catalog", + "current_schema", "current_user", "currval", "data_type_sql", + "database-path", "db_object", "decode", "disk-space-used", + "file-read", "file-write", "greatest", "h2version", "identity", + "ifnull", "last-insert-id", "least", "link-schema", "lock-mode", "lock-timeout", + "memory-free", "memory-used", "nextval", "nullif", "nvl2", + "readonly", "rownum", "session-id", + "table", "transaction-id", "trim_array", "truncate-value", "unnest" }) { + testScript("functions/system/" + s + ".sql"); + } + for (String s : new String[] { "current_date", "current_timestamp", + "current-time", "dateadd", "datediff", "dayname", + "day-of-month", "day-of-week", "day-of-year", "extract", + "formatdatetime", "hour", "minute", "month", "monthname", + "parsedatetime", "quarter", "second", "truncate", "week", "year", "date_trunc" }) { + testScript("functions/timeanddate/" + s + ".sql"); + } + for (String s : new String[] { "lead", "nth_value", "ntile", "ratio_to_report", "row_number" }) { + testScript("functions/window/" + s + ".sql"); + } + for (String s : new String[] { "at-time-zone", "boolean-test", "case", "concatenation", "conditions", + "data-change-delta-table", "field-reference", "help", "sequence", "set" }) { + testScript("other/" + s + ".sql"); + } + for (String s : new String[] { "comments", "identifiers" }) { + testScript("parser/" + s + ".sql"); + } + for (String s : new String[] { "between", "distinct", "in", "like", "null", "type", "unique" }) { + testScript("predicates/" + s + ".sql"); + } + for (String s : new String[] { "derived-column-names", "distinct", "joins", "query-optimisations", "select", + "table", "values", "window" }) { + testScript("queries/" + s + ".sql"); + } + testScript("other/two_phase_commit.sql"); + testScript("other/unique_include.sql"); + + deleteDb("script"); + System.out.flush(); + if (foundErrors) { + throw new Exception("errors in script found"); + } + } + + private void testScript(String scriptFileName) throws Exception { + deleteDb("script"); + + // Reset all the state in case there is anything left over from the previous file + // we processed. + conn = null; + stat = null; + fileName = null; + in = null; + out = null; + result.clear(); + putBack.clear(); + + String outFile; + if (FIX_OUTPUT) { + outFile = scriptFileName; + int idx = outFile.lastIndexOf('/'); + if (idx >= 0) { + outFile = outFile.substring(idx + 1); + } + } else { + outFile = "test.out.txt"; + } + conn = getConnection("script"); + stat = conn.createStatement(); + out = new PrintStream(new FileOutputStream(outFile)); + testFile(BASE_DIR + scriptFileName); + conn.close(); + out.close(); + if (FIX_OUTPUT) { + File file = new File(outFile); + // If there are two trailing newline characters remove one + try (RandomAccessFile r = new RandomAccessFile(file, "rw")) { + byte[] separator = System.lineSeparator().getBytes(StandardCharsets.ISO_8859_1); + int separatorLength = separator.length; + long length = r.length() - (separatorLength * 2); + truncate: if (length >= 0) { + r.seek(length); + for (int i = 0; i < 2; i++) { + for (int j = 0; j < separatorLength; j++) { + if (r.readByte() != separator[j]) { + break truncate; + } + } + } + r.setLength(length + separatorLength); + } + } + file.renameTo(new File("h2/src/test/org/h2/test/scripts/" + scriptFileName)); + return; + } + } + + private String readLine() throws IOException { + String s = putBack.pollFirst(); + return s != null ? s : readNextLine(); + } + + private String readNextLine() throws IOException { + String s; + boolean comment = false; + while ((s = in.readLine()) != null) { + if (s.startsWith("--")) { + write(s); + comment = true; + continue; + } + if (!FIX_OUTPUT) { + s = s.trim(); + } + if (!s.isEmpty()) { + break; + } + if (comment) { + write(""); + comment = false; + } + } + return s; + } + + private void putBack(String line) { + putBack.addLast(line); + } + + private void testFile(String inFile) throws Exception { + InputStream is = getClass().getClassLoader().getResourceAsStream(inFile); + if (is == null) { + throw new IOException("could not find " + inFile); + } + fileName = inFile; + in = new LineNumberReader(new InputStreamReader(is, StandardCharsets.UTF_8)); + StringBuilder buff = new StringBuilder(); + boolean allowReconnect = true; + for (String sql; (sql = readLine()) != null;) { + if (sql.startsWith("--")) { + write(sql); + } else if (sql.startsWith(">")) { + addWriteResultError("", sql); + } else if (sql.endsWith(";")) { + write(sql); + buff.append(sql, 0, sql.length() - 1); + sql = buff.toString(); + buff.setLength(0); + process(sql, allowReconnect); + } else if (sql.startsWith("@")) { + if (buff.length() > 0) { + addWriteResultError("", sql); + } else { + switch (sql) { + case "@reconnect": + write(sql); + write(""); + if (!config.memory) { + reconnect(conn.getAutoCommit()); + } + break; + case "@reconnect on": + write(sql); + write(""); + allowReconnect = true; + break; + case "@reconnect off": + write(sql); + write(""); + allowReconnect = false; + break; + case "@autocommit on": + conn.setAutoCommit(true); + break; + case "@autocommit off": + conn.setAutoCommit(false); + break; + default: + addWriteResultError("", sql); + } + } + } else { + write(sql); + buff.append(sql); + buff.append('\n'); + } + } + } + + private boolean containsTempTables() throws SQLException { + ResultSet rs = conn.getMetaData().getTables(null, null, null, + new String[] { "TABLE" }); + while (rs.next()) { + String sql = rs.getString("SQL"); + if (sql != null) { + if (sql.contains("TEMPORARY")) { + return true; + } + } + } + return false; + } + + private void process(String sql, boolean allowReconnect) throws Exception { + if (allowReconnect && reconnectOften) { + if (!containsTempTables() + && ((JdbcConnection) conn).getMode().getEnum() == ModeEnum.REGULAR + && conn.getSchema().equals("PUBLIC")) { + boolean autocommit = conn.getAutoCommit(); + if (autocommit && random.nextInt(10) < 1) { + // reconnect 10% of the time + reconnect(autocommit); + } + } + } + if (statements != null) { + statements.add(sql); + } + if (!hasParameters(sql)) { + processStatement(sql); + } else { + String param = readLine(); + write(param); + if (!param.equals("{")) { + throw new AssertionError("expected '{', got " + param + " in " + sql); + } + try { + PreparedStatement prep = conn.prepareStatement(sql); + int count = 0; + while (true) { + param = readLine(); + write(param); + if (param.startsWith("}")) { + break; + } + count += processPrepared(sql, prep, param); + } + writeResult(sql, "update count: " + count, null); + } catch (SQLException e) { + writeException(sql, e); + } + } + write(""); + } + + private static boolean hasParameters(String sql) { + int index = 0; + for (;;) { + index = sql.indexOf('?', index); + if (index < 0) { + return false; + } + int length = sql.length(); + if (++index == length || sql.charAt(index) != '?') { + return true; + } + index++; + } + } + + private void reconnect(boolean autocommit) throws SQLException { + conn.close(); + conn = getConnection("script"); + conn.setAutoCommit(autocommit); + stat = conn.createStatement(); + } + + private static void setParameter(PreparedStatement prep, int i, String param) + throws SQLException { + if (param.equalsIgnoreCase("null")) { + param = null; + } + prep.setString(i, param); + } + + private int processPrepared(String sql, PreparedStatement prep, String param) + throws Exception { + try { + StringBuilder buff = new StringBuilder(); + int index = 0; + for (int i = 0; i < param.length(); i++) { + char c = param.charAt(i); + if (c == ',') { + setParameter(prep, ++index, buff.toString()); + buff.setLength(0); + } else if (c == '"') { + while (true) { + c = param.charAt(++i); + if (c == '"') { + break; + } + buff.append(c); + } + } else if (c > ' ') { + buff.append(c); + } + } + if (buff.length() > 0) { + setParameter(prep, ++index, buff.toString()); + } + if (prep.execute()) { + writeResultSet(sql, prep.getResultSet()); + return 0; + } + return prep.getUpdateCount(); + } catch (SQLException e) { + writeException(sql, e); + return 0; + } + } + + private int processStatement(String sql) throws Exception { + try { + boolean res; + Statement s; + if (/* TestScript */ CHECK_ORDERING || /* TestAll */ config.memory && !config.lazy && !config.networked) { + PreparedStatement prep = conn.prepareStatement(sql); + res = prep.execute(); + s = prep; + } else { + res = stat.execute(sql); + s = stat; + } + if (res) { + writeResultSet(sql, s.getResultSet()); + } else { + int count = s.getUpdateCount(); + writeResult(sql, count < 1 ? "ok" : "update count: " + count, null); + } + } catch (SQLException e) { + writeException(sql, e); + } + return 0; + } + + private static String formatString(String s) { + if (s == null) { + return "null"; + } + s = StringUtils.replaceAll(s, "\r\n", "\n"); + s = s.replace('\n', ' '); + s = StringUtils.replaceAll(s, " ", " "); + while (true) { + String s2 = StringUtils.replaceAll(s, " ", " "); + if (s2.length() == s.length()) { + break; + } + s = s2; + } + return s; + } + + private static String formatBinary(byte[] b) { + if (b == null) { + return "null"; + } + return StringUtils.convertBytesToHex(new StringBuilder("X'"), b).append('\'').toString(); + } + + private void writeResultSet(String sql, ResultSet rs) throws Exception { + ResultSetMetaData meta = rs.getMetaData(); + int len = meta.getColumnCount(); + int[] max = new int[len]; + result.clear(); + while (rs.next()) { + String[] row = new String[len]; + for (int i = 0; i < len; i++) { + String data = readValue(rs, meta, i + 1); + if (max[i] < data.length()) { + max[i] = data.length(); + } + row[i] = data; + } + result.add(row); + } + String[] head = new String[len]; + for (int i = 0; i < len; i++) { + String label = formatString(meta.getColumnLabel(i + 1)); + if (max[i] < label.length()) { + max[i] = label.length(); + } + head[i] = label; + } + Boolean gotOrdered = null; + Statement st = rs.getStatement(); + if (st instanceof JdbcPreparedStatement) { + CommandInterface ci = (CommandInterface) COMMAND.get(st); + if (ci instanceof CommandContainer) { + Prepared p = (Prepared) PREPARED.get(ci); + if (p instanceof Query) { + gotOrdered = ((Query) p).hasOrder(); + } else if (p instanceof ScriptCommand) { + gotOrdered = true; + } + } + } + rs.close(); + String line = readLine(); + putBack(line); + if (line != null && line.startsWith(">> ")) { + switch (result.size()) { + case 0: + writeResult(sql, "", null, ">> "); + return; + case 1: + String[] row = result.get(0); + if (row.length == 1) { + writeResult(sql, row[0], null, ">> "); + } else { + writeResult(sql, "", null, ">> "); + } + return; + default: + writeResult(sql, "<" + result.size() + " rows>", null, ">> "); + return; + } + } + Boolean ordered; + for (;;) { + line = readNextLine(); + if (line == null) { + addWriteResultError("", ""); + return; + } + putBack(line); + if (line.startsWith("> rows: ")) { + ordered = false; + break; + } else if (line.startsWith("> rows (ordered): ")) { + ordered = true; + break; + } else if (line.startsWith("> rows (partially ordered): ")) { + ordered = null; + break; + } + } + if (gotOrdered != null) { + if (ordered == null || ordered) { + if (!gotOrdered) { + addWriteResultError("", ""); + } + } else { + if (gotOrdered) { + addWriteResultError("", ""); + } + } + } + writeResult(sql, format(head, max), null); + writeResult(sql, format(null, max), null); + String[] array = new String[result.size()]; + for (int i = 0; i < result.size(); i++) { + array[i] = format(result.get(i), max); + } + if (!Boolean.TRUE.equals(ordered)) { + sort(array); + } + int i = 0; + for (; i < array.length; i++) { + writeResult(sql, array[i], null); + } + writeResult(sql, + (ordered != null ? ordered ? "rows (ordered): " : "rows: " : "rows (partially ordered): ") + i, + null); + } + + private static String readValue(ResultSet rs, ResultSetMetaData meta, int column) throws SQLException { + return DataType.isBinaryColumn(meta, column) ? formatBinary(rs.getBytes(column)) + : formatString(rs.getString(column)); + } + + private static String format(String[] row, int[] max) { + int length = max.length; + StringBuilder buff = new StringBuilder(); + for (int i = 0; i < length; i++) { + if (i > 0) { + buff.append(' '); + } + if (row == null) { + for (int j = 0; j < max[i]; j++) { + buff.append('-'); + } + } else { + int len = row[i].length(); + buff.append(row[i]); + if (i < length - 1) { + for (int j = len; j < max[i]; j++) { + buff.append(' '); + } + } + } + } + return buff.toString(); + } + + /** Convert the error code to a symbolic name from ErrorCode. */ + private static final Map ERROR_CODE_TO_NAME = new HashMap<>(256); + static { + try { + for (Field field : ErrorCode.class.getDeclaredFields()) { + if (field.getModifiers() == (Modifier.PUBLIC | Modifier.STATIC | Modifier.FINAL) + && field.getAnnotation(Deprecated.class) == null) { + ERROR_CODE_TO_NAME.put(field.getInt(null), field.getName()); + } + } + } catch (IllegalAccessException ex) { + throw new RuntimeException(ex); + } + } + + private void writeException(String sql, SQLException ex) throws Exception { + writeResult(sql, "exception " + ERROR_CODE_TO_NAME.get(ex.getErrorCode()), ex); + } + + private void writeResult(String sql, String s, SQLException ex) throws Exception { + writeResult(sql, s, ex, "> "); + } + + private void writeResult(String sql, String s, SQLException ex, String prefix) throws Exception { + assertKnownException(sql, ex); + s = (prefix + s).trim(); + String compare = readLine(); + if (compare != null && compare.startsWith(">")) { + if (!compare.equals(s)) { + if (reconnectOften && sql.toUpperCase().startsWith("EXPLAIN")) { + return; + } + addWriteResultError(compare, s); + if (ex != null) { + TestBase.logError("script", ex); + } + if (failFast) { + conn.close(); + System.exit(1); + } + } + } else { + addWriteResultError("", s); + if (compare != null) { + putBack(compare); + } + } + write(s); + } + + private void addWriteResultError(String expected, String got) { + foundErrors = true; + final String msg = fileName + '\n' + // + "line: " + in.getLineNumber() + '\n' + // + "exp: " + expected + '\n' + // + "got: " + got + '\n'; + TestBase.logErrorMessage(msg); + } + + private void write(String s) { + out.println(s); + } + + private static void sort(String[] a) { + for (int i = 1, j, len = a.length; i < len; i++) { + String t = a[i]; + for (j = i - 1; j >= 0 && t.compareTo(a[j]) < 0; j--) { + a[j + 1] = a[j]; + } + a[j + 1] = t; + } + } + +} diff --git a/h2/src/test/org/h2/test/scripts/Trigger1.java b/h2/src/test/org/h2/test/scripts/Trigger1.java new file mode 100644 index 0000000000..b110511299 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/Trigger1.java @@ -0,0 +1,25 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.scripts; + +import java.sql.Connection; +import java.sql.SQLException; + +import org.h2.api.Trigger; + +/** + * A trigger for tests. + */ +public class Trigger1 implements Trigger { + + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + if (newRow != null) { + newRow[2] = ((int) newRow[2]) * 10; + } + } + +} diff --git a/h2/src/test/org/h2/test/scripts/Trigger2.java b/h2/src/test/org/h2/test/scripts/Trigger2.java new file mode 100644 index 0000000000..ff773336d1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/Trigger2.java @@ -0,0 +1,59 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.scripts; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.h2.api.Trigger; + +/** + * A trigger for tests. + */ +public class Trigger2 implements Trigger { + + @Override + public void fire(Connection conn, Object[] oldRow, Object[] newRow) throws SQLException { + if (oldRow == null && newRow != null) { + Long id = (Long) newRow[0]; + PreparedStatement prep; + int i = 0; + if (id == null) { + prep = conn.prepareStatement("SELECT * FROM FINAL TABLE (INSERT INTO TEST VALUES (DEFAULT, ?, ?))"); + } else { + prep = conn.prepareStatement("SELECT * FROM FINAL TABLE (INSERT INTO TEST VALUES (?, ?, ?))"); + prep.setLong(++i, id); + } + prep.setInt(++i, (int) newRow[1]); + prep.setInt(++i, (int) newRow[2]); + executeAndReadFinalTable(prep, newRow); + } else if (oldRow != null && newRow != null) { + PreparedStatement prep = conn.prepareStatement( + "SELECT * FROM FINAL TABLE (UPDATE TEST SET (ID, A, B) = (?, ?, ?) WHERE ID = ?)"); + prep.setLong(1, (long) newRow[0]); + prep.setInt(2, (int) newRow[1]); + prep.setInt(3, (int) newRow[2]); + prep.setLong(4, (long) oldRow[0]); + executeAndReadFinalTable(prep, newRow); + } else if (oldRow != null && newRow == null) { + PreparedStatement prep = conn.prepareStatement("DELETE FROM TEST WHERE ID = ?"); + prep.setLong(1, (long) oldRow[0]); + prep.executeUpdate(); + } + } + + private static void executeAndReadFinalTable(PreparedStatement prep, Object[] newRow) throws SQLException { + try (ResultSet rs = prep.executeQuery()) { + rs.next(); + newRow[0] = rs.getLong(1); + newRow[1] = rs.getInt(2); + newRow[2] = rs.getInt(3); + } + } + +} diff --git a/h2/src/test/org/h2/test/scripts/altertable-fk.sql b/h2/src/test/org/h2/test/scripts/altertable-fk.sql new file mode 100644 index 0000000000..73adb9d586 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/altertable-fk.sql @@ -0,0 +1,26 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Check that constraints are properly renamed when we rename a column. + +CREATE TABLE user_group (ID decimal PRIMARY KEY NOT NULL); +> ok + +CREATE TABLE login_message (ID decimal PRIMARY KEY NOT NULL, user_group_id decimal); +> ok + +ALTER TABLE login_message ADD CONSTRAINT FK_LOGIN_MESSAGE +FOREIGN KEY (user_group_id) +REFERENCES user_group(id) ON DELETE CASCADE; +> ok + +ALTER TABLE login_message ALTER COLUMN user_group_id RENAME TO user_group_id2; +> ok + +INSERT INTO user_group (ID) VALUES (1); +> update count: 1 + +DELETE FROM user_group; +> update count: 1 diff --git a/h2/src/test/org/h2/test/scripts/altertable-index-reuse.sql b/h2/src/test/org/h2/test/scripts/altertable-index-reuse.sql new file mode 100644 index 0000000000..f93f90e7e0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/altertable-index-reuse.sql @@ -0,0 +1,33 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE "domains" ("id" bigint NOT NULL auto_increment PRIMARY KEY); +> ok + +CREATE TABLE "users" ("id" bigint NOT NULL auto_increment PRIMARY KEY,"username" varchar_ignorecase(255),"domain" bigint,"desc" varchar_ignorecase(255)); +> ok + +-- adds constraint on (domain,username) and generates unique index domainusername_key_INDEX_xxx +ALTER TABLE "users" ADD CONSTRAINT "domainusername_key" UNIQUE ("domain","username"); +> ok + +-- adds foreign key on domain - if domainusername_key didn't exist it would create unique index on domain, but it reuses the existing index +ALTER TABLE "users" ADD CONSTRAINT "udomain_fkey" FOREIGN KEY ("domain") REFERENCES "domains"("id") ON DELETE RESTRICT; +> ok + +-- now we drop the domainusername_key, but domainusername_key_INDEX_xxx is used by udomain_fkey and was not being dropped +-- this was an issue, because it's a unique index and still enforcing constraint on (domain,username) +ALTER TABLE "users" DROP CONSTRAINT "domainusername_key"; +> ok + +insert into "domains" ("id") VALUES (1); +> update count: 1 + +insert into "users" ("username","domain","desc") VALUES ('test',1,'first user'); +> update count: 1 + +-- should work,because we dropped domainusername_key, but failed: Unique index or primary key violation +INSERT INTO "users" ("username","domain","desc") VALUES ('test',1,'second user'); +> update count: 1 diff --git a/h2/src/test/org/h2/test/scripts/compatibility/add_months.sql b/h2/src/test/org/h2/test/scripts/compatibility/add_months.sql new file mode 100644 index 0000000000..69e7100854 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/compatibility/add_months.sql @@ -0,0 +1,23 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SET MODE Oracle; +> ok + +-- 01-Aug-03 + 3 months = 01-Nov-03 +SELECT ADD_MONTHS('2003-08-01', 3); +>> 2003-11-01 00:00:00 + +-- 31-Jan-03 + 1 month = 28-Feb-2003 +SELECT ADD_MONTHS('2003-01-31', 1); +>> 2003-02-28 00:00:00 + +-- 21-Aug-2003 - 3 months = 21-May-2003 +SELECT ADD_MONTHS('2003-08-21', -3); +>> 2003-05-21 00:00:00 + +-- 21-Aug-2003 00:00:00.333 - 3 months = 21-May-2003 00:00:00.333 +SELECT ADD_MONTHS('2003-08-21 00:00:00.333', -3); +>> 2003-05-21 00:00:00.333 diff --git a/h2/src/test/org/h2/test/scripts/compatibility/compatibility.sql b/h2/src/test/org/h2/test/scripts/compatibility/compatibility.sql new file mode 100644 index 0000000000..a05dec4eba --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/compatibility/compatibility.sql @@ -0,0 +1,751 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- EXEC and EXECUTE in MSSQLServer mode + +CREATE ALIAS MY_NO_ARG AS 'int f() { return 1; }'; +> ok + +CREATE ALIAS MY_SQRT FOR "java.lang.Math.sqrt"; +> ok + +CREATE ALIAS MY_REMAINDER FOR "java.lang.Math.IEEEremainder"; +> ok + +EXEC MY_SQRT 4; +> exception SYNTAX_ERROR_2 + +-- PostgreSQL-style EXECUTE doesn't work with MSSQLServer-style arguments +EXECUTE MY_SQRT 4; +> exception FUNCTION_ALIAS_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +-- PostgreSQL-style PREPARE is not available in MSSQLServer mode +PREPARE TEST AS SELECT 1; +> exception SYNTAX_ERROR_2 + +-- PostgreSQL-style DEALLOCATE is not available in MSSQLServer mode +DEALLOCATE TEST; +> exception SYNTAX_ERROR_2 + +EXEC MY_NO_ARG; +>> 1 + +EXEC MY_SQRT 4; +>> 2.0 + +EXEC MY_REMAINDER 4, 3; +>> 1.0 + +EXECUTE MY_SQRT 4; +>> 2.0 + +EXEC PUBLIC.MY_SQRT 4; +>> 2.0 + +EXEC SCRIPT.PUBLIC.MY_SQRT 4; +>> 2.0 + +EXEC UNKNOWN_PROCEDURE; +> exception FUNCTION_NOT_FOUND_1 + +EXEC UNKNOWN_SCHEMA.MY_SQRT 4; +> exception SCHEMA_NOT_FOUND_1 + +EXEC UNKNOWN_DATABASE.PUBLIC.MY_SQRT 4; +> exception DATABASE_NOT_FOUND_1 + +SET MODE Regular; +> ok + +DROP ALIAS MY_NO_ARG; +> ok + +DROP ALIAS MY_SQRT; +> ok + +DROP ALIAS MY_REMAINDER; +> ok + +-- UPDATE TOP (n) in MSSQLServer mode + +CREATE TABLE TEST(A INT, B INT) AS VALUES (1, 2), (3, 4), (5, 6); +> ok + +UPDATE TOP (1) TEST SET B = 10; +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +UPDATE TOP (1) TEST SET B = 10; +> update count: 1 + +SELECT COUNT(*) FILTER (WHERE B = 10) N, COUNT(*) FILTER (WHERE B <> 10) O FROM TEST; +> N O +> - - +> 1 2 +> rows: 1 + +UPDATE TEST SET B = 10 WHERE B <> 10; +> update count: 2 + +UPDATE TOP (1) TEST SET B = 10 LIMIT 1; +> exception SYNTAX_ERROR_1 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +SET MODE MySQL; +> ok + +CREATE TABLE A (A INT PRIMARY KEY, X INT); +> ok + +ALTER TABLE A ADD INDEX A_IDX(X); +> ok + +ALTER TABLE A DROP INDEX A_IDX_1; +> exception CONSTRAINT_NOT_FOUND_1 + +ALTER TABLE A DROP INDEX IF EXISTS A_IDX_1; +> ok + +ALTER TABLE A DROP INDEX IF EXISTS A_IDX; +> ok + +ALTER TABLE A DROP INDEX A_IDX; +> exception CONSTRAINT_NOT_FOUND_1 + +CREATE TABLE B (B INT PRIMARY KEY, A INT); +> ok + +ALTER TABLE B ADD CONSTRAINT B_FK FOREIGN KEY (A) REFERENCES A(A); +> ok + +ALTER TABLE B DROP FOREIGN KEY B_FK_1; +> exception CONSTRAINT_NOT_FOUND_1 + +-- MariaDB compatibility +ALTER TABLE B DROP FOREIGN KEY IF EXISTS B_FK_1; +> ok + +ALTER TABLE B DROP FOREIGN KEY IF EXISTS B_FK; +> ok + +ALTER TABLE B DROP FOREIGN KEY B_FK; +> exception CONSTRAINT_NOT_FOUND_1 + +DROP TABLE A, B; +> ok + +SET MODE Regular; +> ok + +-- PostgreSQL-style CREATE INDEX ... USING +CREATE TABLE TEST(B1 INT, B2 INT, H INT, R GEOMETRY, T INT); +> ok + +CREATE INDEX TEST_BTREE_IDX ON TEST USING BTREE(B1, B2); +> ok + +CREATE INDEX TEST_HASH_IDX ON TEST USING HASH(H); +> ok + +CREATE INDEX TEST_RTREE_IDX ON TEST USING RTREE(R); +> ok + +SELECT INDEX_NAME, INDEX_TYPE_NAME FROM INFORMATION_SCHEMA.INDEXES WHERE TABLE_NAME = 'TEST'; +> INDEX_NAME INDEX_TYPE_NAME +> -------------- --------------- +> TEST_BTREE_IDX INDEX +> TEST_HASH_IDX HASH INDEX +> TEST_RTREE_IDX SPATIAL INDEX +> rows: 3 + +SELECT INDEX_NAME, COLUMN_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.INDEX_COLUMNS WHERE TABLE_NAME = 'TEST'; +> INDEX_NAME COLUMN_NAME ORDINAL_POSITION +> -------------- ----------- ---------------- +> TEST_BTREE_IDX B1 1 +> TEST_BTREE_IDX B2 2 +> TEST_HASH_IDX H 1 +> TEST_RTREE_IDX R 1 +> rows: 4 + +CREATE HASH INDEX TEST_BAD_IDX ON TEST USING HASH(T); +> exception SYNTAX_ERROR_2 + +CREATE SPATIAL INDEX TEST_BAD_IDX ON TEST USING RTREE(T); +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok + +SET MODE MySQL; +> ok + +CREATE TABLE test (id int(25) NOT NULL auto_increment, name varchar NOT NULL, PRIMARY KEY (id,name)); +> ok + +drop table test; +> ok + +create memory table word(word_id integer, name varchar); +> ok + +alter table word alter column word_id integer(10) auto_increment; +> ok + +insert into word(name) values('Hello'); +> update count: 1 + +alter table word alter column word_id restart with 30872; +> ok + +insert into word(name) values('World'); +> update count: 1 + +select * from word; +> WORD_ID NAME +> ------- ----- +> 1 Hello +> 30872 World +> rows: 2 + +drop table word; +> ok + +CREATE MEMORY TABLE TEST1(ID BIGINT(20) NOT NULL PRIMARY KEY COMMENT 'COMMENT1', FIELD_NAME VARCHAR(100) NOT NULL COMMENT 'COMMENT2'); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST1; +> SCRIPT +> ------------------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST1"( "ID" BIGINT COMMENT 'COMMENT1' NOT NULL, "FIELD_NAME" CHARACTER VARYING(100) COMMENT 'COMMENT2' NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST1; +> rows (ordered): 4 + +CREATE TABLE TEST2(ID BIGINT(20) NOT NULL AUTO_INCREMENT PRIMARY KEY COMMENT 'COMMENT1', FIELD_NAME VARCHAR(100) NOT NULL COMMENT 'COMMENT2' COMMENT 'COMMENT3'); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST3(ID BIGINT(20) NOT NULL AUTO_INCREMENT PRIMARY KEY COMMENT 'COMMENT1' CHECK(ID > 0), FIELD_NAME VARCHAR(100) NOT NULL COMMENT 'COMMENT2'); +> ok + +CREATE TABLE TEST4(ID BIGINT(20) NOT NULL AUTO_INCREMENT PRIMARY KEY CHECK(ID > 0) COMMENT 'COMMENT1', FIELD_NAME VARCHAR(100) NOT NULL COMMENT 'COMMENT2'); +> ok + +DROP TABLE TEST1, TEST3, TEST4; +> ok + +SET MODE Regular; +> ok + +-- Keywords as identifiers + +CREATE TABLE TEST(KEY INT, VALUE INT); +> exception SYNTAX_ERROR_2 + +@reconnect off + +SET NON_KEYWORDS KEY, VALUE, AS, SET, DAY; +> ok + +CREATE TABLE TEST(KEY INT, VALUE INT, AS INT, SET INT, DAY INT); +> ok + +INSERT INTO TEST(KEY, VALUE, AS, SET, DAY) VALUES (1, 2, 3, 4, 5), (6, 7, 8, 9, 10); +> update count: 2 + +SELECT KEY, VALUE, AS, SET, DAY FROM TEST WHERE KEY <> 6 AND VALUE <> 7 AND AS <> 8 AND SET <> 9 AND DAY <> 10; +> KEY VALUE AS SET DAY +> --- ----- -- --- --- +> 1 2 3 4 5 +> rows: 1 + +DROP TABLE TEST; +> ok + +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'NON_KEYWORDS'; +>> AS,DAY,KEY,SET,VALUE + +SET NON_KEYWORDS; +> ok + +@reconnect on + +SELECT COUNT(*) FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'NON_KEYWORDS'; +>> 0 + +CREATE TABLE TEST(KEY INT, VALUE INT); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST1(C VARCHAR(1 CHAR)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST2(C VARCHAR(1 BYTE)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST3(C BINARY_FLOAT); +> exception UNKNOWN_DATA_TYPE_1 + +CREATE TABLE TEST4(C BINARY_DOUBLE); +> exception UNKNOWN_DATA_TYPE_1 + +SET MODE Oracle; +> ok + +CREATE TABLE TEST1(C VARCHAR(1 CHAR)); +> ok + +CREATE TABLE TEST2(C VARCHAR(1 BYTE)); +> ok + +CREATE TABLE TEST3(C BINARY_FLOAT); +> ok + +CREATE TABLE TEST4(C BINARY_DOUBLE); +> ok + +SELECT TABLE_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME IN ('TEST3', 'TEST4'); +> TABLE_NAME DATA_TYPE +> ---------- ---------------- +> TEST3 REAL +> TEST4 DOUBLE PRECISION +> rows: 2 + +DROP TABLE TEST1, TEST2, TEST3, TEST4; +> ok + +SET MODE PostgreSQL; +> ok + +EXPLAIN VALUES VERSION(); +>> VALUES (VERSION()) + +SET MODE Regular; +> ok + +CREATE TABLE TEST(A INT) AS VALUES 0; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> SIN(A) A + 1 A +> ------ ----- - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> ok + +TABLE V; +> SIN(A) A + 1 ((((((((((A + 1) * A) + 1) * A) + 1) * A) + 1) * A) + 1) * A) + 1 +> ------ ----- ----------------------------------------------------------------- +> 0.0 1 1 +> rows: 1 + +DROP VIEW V; +> ok + +CREATE VIEW V AS SELECT SIN(0), COS(0); +> ok + +TABLE V; +> 0.0 1.0 +> --- --- +> 0.0 1.0 +> rows: 1 + +DROP VIEW V; +> ok + +SET MODE DB2; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> 1 2 A +> --- - - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> exception COLUMN_ALIAS_IS_NOT_SPECIFIED_1 + +SET MODE Derby; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> 1 2 A +> --- - - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> exception COLUMN_ALIAS_IS_NOT_SPECIFIED_1 + +SET MODE MSSQLServer; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> A +> --- - - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> exception COLUMN_ALIAS_IS_NOT_SPECIFIED_1 + +SET MODE HSQLDB; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> C1 C2 A +> --- -- - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> ok + +TABLE V; +> C1 C2 C3 +> --- -- -- +> 0.0 1 1 +> rows: 1 + +DROP VIEW V; +> ok + +SET MODE MySQL; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> SIN(A) A + 1 A +> ------ ----- - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> ok + +TABLE V; +> SIN(A) A + 1 Name_exp_3 +> ------ ----- ---------- +> 0.0 1 1 +> rows: 1 + +DROP VIEW V; +> ok + +CREATE VIEW V AS SELECT SIN(0), COS(0); +> ok + +TABLE V; +> SIN(0) COS(0) +> ------ ------ +> 0.0 1.0 +> rows: 1 + +DROP VIEW V; +> ok + +SET MODE Oracle; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> SIN(A) A + 1 A +> ------ ----- - +> 0.0 1 0 +> rows: 1 + +SET MODE PostgreSQL; +> ok + +SELECT SIN(A), A+1, A FROM TEST; +> sin ?column? A +> --- -------- - +> 0.0 1 0 +> rows: 1 + +CREATE VIEW V AS SELECT SIN(A), A+1, (((((A + 1) * A + 1) * A + 1) * A + 1) * A + 1) * A + 1 FROM TEST; +> exception DUPLICATE_COLUMN_NAME_1 + +CREATE VIEW V AS SELECT SIN(0), COS(0); +> ok + +TABLE V; +> sin cos +> --- --- +> 0.0 1.0 +> rows: 1 + +DROP VIEW V; +> ok + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +--- sequence with manual value ------------------ + +SET MODE MySQL; +> ok + +CREATE TABLE TEST(ID bigint generated by default as identity (start with 1), name varchar); +> ok + +SET AUTOCOMMIT FALSE; +> ok + +insert into test(name) values('Hello'); +> update count: 1 + +select id from final table (insert into test(name) values('World')); +>> 2 + +select id from final table (insert into test(id, name) values(1234567890123456, 'World')); +>> 1234567890123456 + +select id from final table (insert into test(name) values('World')); +>> 1234567890123457 + +select * from test order by id; +> ID NAME +> ---------------- ----- +> 1 Hello +> 2 World +> 1234567890123456 World +> 1234567890123457 World +> rows (ordered): 4 + +SET AUTOCOMMIT TRUE; +> ok + +drop table if exists test; +> ok + +CREATE TABLE TEST(ID bigint generated by default as identity (start with 1), name varchar); +> ok + +SET AUTOCOMMIT FALSE; +> ok + +insert into test(name) values('Hello'); +> update count: 1 + +select id from final table (insert into test(name) values('World')); +>> 2 + +select id from final table (insert into test(id, name) values(1234567890123456, 'World')); +>> 1234567890123456 + +select id from final table (insert into test(name) values('World')); +>> 1234567890123457 + +select * from test order by id; +> ID NAME +> ---------------- ----- +> 1 Hello +> 2 World +> 1234567890123456 World +> 1234567890123457 World +> rows (ordered): 4 + +SET AUTOCOMMIT TRUE; +> ok + +drop table test; +> ok + +SET MODE PostgreSQL; +> ok + +-- To reset last identity +DROP ALL OBJECTS; +> ok + +SELECT LASTVAL(); +> exception CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1 + +CREATE SEQUENCE SEQ START WITH 100; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 100 + +SELECT LASTVAL(); +>> 100 + +DROP SEQUENCE SEQ; +> ok + +SET MODE MSSQLServer; +> ok + +-- To reset last identity +DROP ALL OBJECTS; +> ok + +SELECT SCOPE_IDENTITY(); +>> null + +CREATE TABLE TEST(ID BIGINT IDENTITY, V INT); +> ok + +INSERT INTO TEST(V) VALUES (10); +> update count: 1 + +SELECT SCOPE_IDENTITY(); +>> 1 + +DROP TABLE TEST; +> ok + +SET MODE DB2; +> ok + +-- To reset last identity +DROP ALL OBJECTS; +> ok + +SELECT IDENTITY_VAL_LOCAL(); +>> null + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +INSERT INTO TEST(V) VALUES 10; +> update count: 1 + +SELECT IDENTITY_VAL_LOCAL(); +>> 1 + +DROP TABLE TEST; +> ok + +SET MODE Derby; +> ok + +-- To reset last identity +DROP ALL OBJECTS; +> ok + +SELECT IDENTITY_VAL_LOCAL(); +>> null + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +INSERT INTO TEST(V) VALUES 10; +> update count: 1 + +SELECT IDENTITY_VAL_LOCAL(); +>> 1 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok + +SET MODE MSSQLServer; +> ok + +CREATE TABLE TEST(ID BIGINT NOT NULL IDENTITY(10, 5), NAME VARCHAR); +> ok + +INSERT INTO TEST(NAME) VALUES('Hello'), ('World'); +> update count: 2 + +SELECT * FROM TEST; +> ID NAME +> -- ----- +> 10 Hello +> 15 World +> rows: 2 + +DROP TABLE TEST; +> ok + +SET MODE PostgreSQL; +> ok + +SELECT TO_DATE('24-12-2025','DD-MM-YYYY'); +>> 2025-12-24 + +SET TIME ZONE 'UTC'; +> ok + +SELECT TO_TIMESTAMP('24-12-2025 14:13:12','DD-MM-YYYY HH24:MI:SS'); +>> 2025-12-24 14:13:12+00 + +SET TIME ZONE LOCAL; +> ok + +SET MODE Regular; +> ok + +SELECT 1 = TRUE; +> exception TYPES_ARE_NOT_COMPARABLE_2 + +SET MODE MySQL; +> ok + +SELECT 1 = TRUE; +>> TRUE + +SELECT TRUE = 0; +>> FALSE + +SELECT 1 > TRUE; +> exception TYPES_ARE_NOT_COMPARABLE_2 + +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, B BOOLEAN, I INTEGER); +> ok + +CREATE INDEX TEST_B_IDX ON TEST(B); +> ok + +CREATE INDEX TEST_I_IDX ON TEST(I); +> ok + +INSERT INTO TEST(B, I) VALUES (TRUE, 1), (TRUE, 1), (FALSE, 0), (TRUE, 1), (UNKNOWN, NULL); +> update count: 5 + +SELECT * FROM TEST WHERE B = 1; +> ID B I +> -- ---- - +> 1 TRUE 1 +> 2 TRUE 1 +> 4 TRUE 1 +> rows: 3 + +EXPLAIN SELECT * FROM TEST WHERE B = 1; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."I" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "B" = 1 + +SELECT * FROM TEST WHERE I = TRUE; +> ID B I +> -- ---- - +> 1 TRUE 1 +> 2 TRUE 1 +> 4 TRUE 1 +> rows: 3 + +EXPLAIN SELECT * FROM TEST WHERE I = TRUE; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."I" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_I_IDX: I = 1 */ WHERE "I" = 1 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/compatibility/group_by.sql b/h2/src/test/org/h2/test/scripts/compatibility/group_by.sql new file mode 100644 index 0000000000..f156ea5ebc --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/compatibility/group_by.sql @@ -0,0 +1,57 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- GROUP BY column index for MySQL/MariaDB/PostgreSQL compatibility mode + +CREATE TABLE MYTAB(X INT , Y INT, Z INT) AS VALUES (1,123,2), (1,456,2), (3,789,4); +> ok + +SET MODE MySQL; +> ok + +SELECT SUM(Y) AS S , X + Z FROM MYTAB GROUP BY 2; +> S X + Z +> --- ----- +> 579 3 +> 789 7 +> rows: 2 + +EXPLAIN SELECT SUM(Y) AS S , X + Z FROM MYTAB GROUP BY 2; +> PLAN +> ------------------------------------------------------------------------------------------------------- +> SELECT SUM("Y") AS "S", "X" + "Z" FROM "PUBLIC"."MYTAB" /* PUBLIC.MYTAB.tableScan */ GROUP BY "X" + "Z" +> rows: 1 + +SELECT SUM(Y) AS S , X + Z FROM MYTAB GROUP BY 3; +> exception GROUP_BY_NOT_IN_THE_RESULT + +SELECT MYTAB.*, SUM(Y) AS S FROM MYTAB GROUP BY 1; +> exception SYNTAX_ERROR_2 + +SET MODE MariaDB; +> ok + +SELECT SUM(Y) AS S , X + Z FROM MYTAB GROUP BY 2; +> S X + Z +> --- ----- +> 579 3 +> 789 7 +> rows: 2 + +SET MODE PostgreSQL; +> ok + +SELECT SUM(Y) AS S , X + Z FROM MYTAB GROUP BY 2; +> S ?column? +> --- -------- +> 579 3 +> 789 7 +> rows: 2 + +SET MODE Oracle; +> ok + +SELECT SUM(Y) AS S , X FROM MYTAB GROUP BY 2; +> exception MUST_GROUP_BY_COLUMN_1 diff --git a/h2/src/test/org/h2/test/scripts/compatibility/strict_and_legacy.sql b/h2/src/test/org/h2/test/scripts/compatibility/strict_and_legacy.sql new file mode 100644 index 0000000000..7fbc8317ce --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/compatibility/strict_and_legacy.sql @@ -0,0 +1,101 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SET MODE STRICT; +> ok + +VALUES 1 IN (); +> exception SYNTAX_ERROR_2 + +SELECT TOP 1 * FROM (VALUES 1, 2); +> exception SYNTAX_ERROR_1 + +SELECT * FROM (VALUES 1, 2) LIMIT 1; +> exception SYNTAX_ERROR_1 + +CREATE TABLE TEST(ID IDENTITY); +> exception UNKNOWN_DATA_TYPE_1 + +CREATE TABLE TEST(ID BIGINT AUTO_INCREMENT); +> exception SYNTAX_ERROR_2 + +SET MODE LEGACY; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, V INTEGER NOT NULL); +> ok + +INSERT INTO TEST(ID, V) VALUES (10, 15); +> update count: 1 + +INSERT INTO TEST(V) VALUES 20; +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 10 15 +> 11 20 +> rows: 2 + +UPDATE TOP(1) TEST SET V = V + 1; +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 10 16 +> 11 20 +> rows: 2 + +MERGE INTO TEST T USING (VALUES (10, 17), (11, 30)) I(ID, V) ON T.ID = I.ID +WHEN MATCHED THEN UPDATE SET V = I.V WHERE T.ID > 10; +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 10 16 +> 11 30 +> rows: 2 + +CREATE TABLE T2(ID BIGINT PRIMARY KEY, V INT REFERENCES TEST(V)); +> ok + +DROP TABLE T2, TEST; +> ok + +CREATE TABLE TEST(ID BIGINT IDENTITY(1, 10)); +> ok + +DROP TABLE TEST; +> ok + +CREATE SEQUENCE SEQ; +> ok + +SELECT SEQ.NEXTVAL; +>> 1 + +SELECT SEQ.CURRVAL; +>> 1 + +DROP SEQUENCE SEQ; +> ok + +SELECT 1 = TRUE; +>> TRUE + +SET MODE STRICT; +> ok + +CREATE TABLE TEST(LIMIT INTEGER, MINUS INTEGER); +> ok + +DROP TABLE TEST; +> ok + +SET MODE REGULAR; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/array.sql b/h2/src/test/org/h2/test/scripts/datatypes/array.sql new file mode 100644 index 0000000000..f083ce9947 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/array.sql @@ -0,0 +1,270 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT (10, 20, 30)[1]; +> exception INVALID_VALUE_2 + +SELECT ARRAY[]; +>> [] + +SELECT ARRAY[10]; +>> [10] + +SELECT ARRAY[10, 20, 30]; +>> [10, 20, 30] + +SELECT ARRAY[10, 20, 30][1]; +>> 10 + +SELECT ARRAY[10, 20, 30][3]; +>> 30 + +SELECT ARRAY[10, 20, 30][0]; +> exception ARRAY_ELEMENT_ERROR_2 + +SELECT ARRAY[10, 20, 30][4]; +> exception ARRAY_ELEMENT_ERROR_2 + +SELECT ARRAY[1, NULL] IS NOT DISTINCT FROM ARRAY[1, NULL]; +>> TRUE + +SELECT ARRAY[1, NULL] IS DISTINCT FROM ARRAY[1, NULL]; +>> FALSE + +SELECT ARRAY[1, NULL] = ARRAY[1, NULL]; +>> null + +SELECT ARRAY[1, NULL] <> ARRAY[1, NULL]; +>> null + +SELECT ARRAY[NULL] = ARRAY[NULL, NULL]; +>> FALSE + +select ARRAY[1, NULL, 2] = ARRAY[1, NULL, 1]; +>> FALSE + +select ARRAY[1, NULL, 2] <> ARRAY[1, NULL, 1]; +>> TRUE + +SELECT ARRAY[1, NULL] > ARRAY[1, NULL]; +>> null + +SELECT ARRAY[1, 2] > ARRAY[1, NULL]; +>> null + +SELECT ARRAY[1, 2, NULL] > ARRAY[1, 1, NULL]; +>> TRUE + +SELECT ARRAY[1, 1, NULL] > ARRAY[1, 2, NULL]; +>> FALSE + +SELECT ARRAY[1, 2, NULL] < ARRAY[1, 1, NULL]; +>> FALSE + +SELECT ARRAY[1, 1, NULL] <= ARRAY[1, 1, NULL]; +>> null + +SELECT ARRAY[1, NULL] IN (ARRAY[1, NULL]); +>> null + +CREATE TABLE TEST(A ARRAY); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST(A INTEGER ARRAY); +> ok + +INSERT INTO TEST VALUES (ARRAY[1, NULL]), (ARRAY[1, 2]); +> update count: 2 + +SELECT ARRAY[1, 2] IN (SELECT A FROM TEST); +>> TRUE + +SELECT ROW (ARRAY[1, 2]) IN (SELECT A FROM TEST); +>> TRUE + +SELECT ARRAY[1, NULL] IN (SELECT A FROM TEST); +>> null + +SELECT ROW (ARRAY[1, NULL]) IN (SELECT A FROM TEST); +>> null + +SELECT A FROM TEST WHERE A = (1, 2); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +DROP TABLE TEST; +> ok + +SELECT ARRAY[1, 2] || 3; +>> [1, 2, 3] + +SELECT 1 || ARRAY[2, 3]; +>> [1, 2, 3] + +SELECT ARRAY[1, 2] || ARRAY[3]; +>> [1, 2, 3] + +SELECT ARRAY[1, 2] || ARRAY[3, 4]; +>> [1, 2, 3, 4] + +SELECT ARRAY[1, 2] || NULL; +>> null + +SELECT NULL::INT ARRAY || ARRAY[2]; +>> null + +CREATE TABLE TEST(ID INT, A1 INT ARRAY, A2 INT ARRAY[2]); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, MAXIMUM_CARDINALITY + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE MAXIMUM_CARDINALITY +> ----------- --------- ------------------- +> ID INTEGER null +> A1 ARRAY 65536 +> A2 ARRAY 2 +> rows (ordered): 3 + +INSERT INTO TEST VALUES (1, ARRAY[], ARRAY[]), (2, ARRAY[1, 2], ARRAY[1, 2]); +> update count: 2 + +INSERT INTO TEST VALUES (3, ARRAY[], ARRAY[1, 2, 3]); +> exception VALUE_TOO_LONG_2 + +TABLE TEST; +> ID A1 A2 +> -- ------ ------ +> 1 [] [] +> 2 [1, 2] [1, 2] +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(A1 INT ARRAY, A2 INT ARRAY[2], A3 INT ARRAY[0]); +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> -------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A1" INTEGER ARRAY, "A2" INTEGER ARRAY[2], "A3" INTEGER ARRAY[0] ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +INSERT INTO TEST(A3) VALUES ARRAY[NULL]; +> exception VALUE_TOO_LONG_2 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST1(I INT ARRAY, I2 INT ARRAY[2]); +> ok + +INSERT INTO TEST1 VALUES (ARRAY[1, 2, 3.0], ARRAY[1, NULL]); +> update count: 1 + +@reconnect + +TABLE TEST1; +> I I2 +> --------- --------- +> [1, 2, 3] [1, null] +> rows: 1 + +INSERT INTO TEST1 VALUES (ARRAY[], ARRAY['abc']); +> exception DATA_CONVERSION_ERROR_1 + +CREATE MEMORY TABLE TEST2 AS (TABLE TEST1) WITH NO DATA; +> ok + +CREATE MEMORY TABLE TEST3(A TIME ARRAY[10] ARRAY[2]); +> ok + +INSERT INTO TEST3 VALUES ARRAY[ARRAY[TIME '10:00:00']]; +> update count: 1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> --------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST1"( "I" INTEGER ARRAY, "I2" INTEGER ARRAY[2] ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST1; +> INSERT INTO "PUBLIC"."TEST1" VALUES (ARRAY [1, 2, 3], ARRAY [1, NULL]); +> CREATE MEMORY TABLE "PUBLIC"."TEST2"( "I" INTEGER ARRAY, "I2" INTEGER ARRAY[2] ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST2; +> CREATE MEMORY TABLE "PUBLIC"."TEST3"( "A" TIME ARRAY[10] ARRAY[2] ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST3; +> INSERT INTO "PUBLIC"."TEST3" VALUES (ARRAY [ARRAY [TIME '10:00:00']]); +> rows (ordered): 9 + +DROP TABLE TEST1, TEST2, TEST3; +> ok + +VALUES CAST(ARRAY['1', '2'] AS DOUBLE PRECISION ARRAY); +>> [1.0, 2.0] + +EXPLAIN VALUES CAST(ARRAY['1', '2'] AS DOUBLE PRECISION ARRAY); +>> VALUES (CAST(ARRAY [1.0, 2.0] AS DOUBLE PRECISION ARRAY)) + +CREATE TABLE TEST(A1 TIMESTAMP ARRAY, A2 TIMESTAMP ARRAY ARRAY); +> ok + +CREATE INDEX IDX3 ON TEST(A1); +> ok + +CREATE INDEX IDX4 ON TEST(A2); +> ok + +DROP TABLE TEST; +> ok + +VALUES CAST(ARRAY[ARRAY[1, 2], ARRAY[3, 4]] AS INT ARRAY[2] ARRAY[1]); +>> [[1, 2]] + +VALUES CAST(ARRAY[ARRAY[1, 2], ARRAY[3, 4]] AS INT ARRAY[1] ARRAY[2]); +>> [[1], [3]] + +VALUES CAST(ARRAY[1, 2] AS INT ARRAY[0]); +>> [] + +VALUES ARRAY??(1??); +>> [1] + +EXPLAIN VALUES ARRAY??(1, 2??); +>> VALUES (ARRAY [1, 2]) + +VALUES ARRAY(SELECT X FROM SYSTEM_RANGE(1, 10)); +>> [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + +CREATE TABLE TEST AS VALUES ARRAY(SELECT X FROM SYSTEM_RANGE(1, 1) WHERE FALSE) WITH NO DATA; +> ok + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> ARRAY + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.ELEMENT_TYPES WHERE OBJECT_NAME = 'TEST'; +>> BIGINT + +DROP TABLE TEST; +> ok + +VALUES ARRAY(SELECT); +> exception SUBQUERY_IS_NOT_SINGLE_COLUMN + +VALUES ARRAY(SELECT 1, 2); +> exception SUBQUERY_IS_NOT_SINGLE_COLUMN + +EXPLAIN VALUES ARRAY[NULL, 1, '3']; +>> VALUES (ARRAY [NULL, 1, 3]) + +CREATE TABLE TEST(A INTEGER ARRAY[65536]); +> ok + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INTEGER ARRAY[65537]); +> exception INVALID_VALUE_PRECISION diff --git a/h2/src/test/org/h2/test/scripts/datatypes/bigint.sql b/h2/src/test/org/h2/test/scripts/datatypes/bigint.sql new file mode 100644 index 0000000000..3b2bacf124 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/bigint.sql @@ -0,0 +1,68 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Multiplication + +SELECT CAST(-4294967296 AS BIGINT) * CAST (2147483648 AS BIGINT); +>> -9223372036854775808 + +SELECT CAST(4294967296 AS BIGINT) * CAST (-2147483648 AS BIGINT); +>> -9223372036854775808 + +SELECT CAST(-2147483648 AS BIGINT) * CAST (4294967296 AS BIGINT); +>> -9223372036854775808 + +SELECT CAST(2147483648 AS BIGINT) * CAST (-4294967296 AS BIGINT); +>> -9223372036854775808 + +SELECT CAST(4294967296 AS BIGINT) * CAST (2147483648 AS BIGINT); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +SELECT CAST(-4294967296 AS BIGINT) * CAST (-2147483648 AS BIGINT); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +SELECT CAST(2147483648 AS BIGINT) * CAST (4294967296 AS BIGINT); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +SELECT CAST(-2147483648 AS BIGINT) * CAST (-4294967296 AS BIGINT); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +SELECT CAST(-9223372036854775808 AS BIGINT) * CAST(1 AS BIGINT); +>> -9223372036854775808 + +SELECT CAST(-9223372036854775808 AS BIGINT) * CAST(-1 AS BIGINT); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +SELECT CAST(1 AS BIGINT) * CAST(-9223372036854775808 AS BIGINT); +>> -9223372036854775808 + +SELECT CAST(-1 AS BIGINT) * CAST(-9223372036854775808 AS BIGINT); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +-- Division + +SELECT CAST(1 AS BIGINT) / CAST(0 AS BIGINT); +> exception DIVISION_BY_ZERO_1 + +SELECT CAST(-9223372036854775808 AS BIGINT) / CAST(1 AS BIGINT); +>> -9223372036854775808 + +SELECT CAST(-9223372036854775808 AS BIGINT) / CAST(-1 AS BIGINT); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +SELECT 0x1L; +> 1 +> - +> 1 +> rows: 1 + +SELECT 0x1234567890abL; +> 20015998341291 +> -------------- +> 20015998341291 +> rows: 1 + +EXPLAIN VALUES (1L, -2147483648L, 2147483647L, -2147483649L, 2147483648L); +>> VALUES (CAST(1 AS BIGINT), -2147483648, CAST(2147483647 AS BIGINT), -2147483649, 2147483648) diff --git a/h2/src/test/org/h2/test/scripts/datatypes/binary.sql b/h2/src/test/org/h2/test/scripts/datatypes/binary.sql new file mode 100644 index 0000000000..fadf19999c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/binary.sql @@ -0,0 +1,58 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(B1 BINARY, B2 BINARY(10)); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, CHARACTER_OCTET_LENGTH FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE CHARACTER_OCTET_LENGTH +> ----------- --------- ---------------------- +> B1 BINARY 1 +> B2 BINARY 10 +> rows (ordered): 2 + +DROP TABLE TEST; +> ok + +SELECT CAST(X'11' AS BINARY) || CAST(NULL AS BINARY); +>> null + +SELECT CAST(NULL AS BINARY) || CAST(X'11' AS BINARY); +>> null + +EXPLAIN VALUES CAST(X'01' AS BINARY); +>> VALUES (CAST(X'01' AS BINARY(1))) + +CREATE TABLE T(C BINARY(0)); +> exception INVALID_VALUE_2 + +VALUES CAST(X'0102' AS BINARY); +>> X'01' + +CREATE TABLE T1(A BINARY(1048576)); +> ok + +CREATE TABLE T2(A BINARY(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A BINARY(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_OCTET_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_OCTET_LENGTH +> ---------- ---------------------- +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/blob.sql b/h2/src/test/org/h2/test/scripts/datatypes/blob.sql new file mode 100644 index 0000000000..05cc2eb5ea --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/blob.sql @@ -0,0 +1,61 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(B1 BLOB, B2 BINARY LARGE OBJECT, B3 TINYBLOB, B4 MEDIUMBLOB, B5 LONGBLOB, B6 IMAGE); +> ok + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- ------------------- +> B1 BINARY LARGE OBJECT +> B2 BINARY LARGE OBJECT +> B3 BINARY LARGE OBJECT +> B4 BINARY LARGE OBJECT +> B5 BINARY LARGE OBJECT +> B6 BINARY LARGE OBJECT +> rows (ordered): 6 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(B0 BLOB(10), B1 BLOB(10K), B2 BLOB(10M), B3 BLOB(10G), B4 BLOB(10T), B5 BLOB(10P)); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE CHARACTER_MAXIMUM_LENGTH +> ----------- ------------------- ------------------------ +> B0 BINARY LARGE OBJECT 10 +> B1 BINARY LARGE OBJECT 10240 +> B2 BINARY LARGE OBJECT 10485760 +> B3 BINARY LARGE OBJECT 10737418240 +> B4 BINARY LARGE OBJECT 10995116277760 +> B5 BINARY LARGE OBJECT 11258999068426240 +> rows (ordered): 6 + +INSERT INTO TEST(B0) VALUES (X'0102030405060708091011'); +> exception VALUE_TOO_LONG_2 + +INSERT INTO TEST(B0) VALUES (X'01020304050607080910'); +> update count: 1 + +SELECT B0 FROM TEST; +>> X'01020304050607080910' + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(B BLOB(8192P)); +> exception INVALID_VALUE_2 + +EXPLAIN VALUES CAST(X'00' AS BLOB(1)); +>> VALUES (CAST(X'00' AS BINARY LARGE OBJECT(1))) + +CREATE TABLE T(C BLOB(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(C1 BLOB(1K CHARACTERS), C2 BLOB(1K OCTETS)); +> exception SYNTAX_ERROR_2 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/boolean.sql b/h2/src/test/org/h2/test/scripts/datatypes/boolean.sql new file mode 100644 index 0000000000..979a5e7385 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/boolean.sql @@ -0,0 +1,42 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(B BOOLEAN) AS (VALUES TRUE, FALSE, UNKNOWN); +> ok + +SELECT * FROM TEST ORDER BY B; +> B +> ----- +> null +> FALSE +> TRUE +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST AS (SELECT UNKNOWN B); +> ok + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> BOOLEAN + +EXPLAIN SELECT CAST(NULL AS BOOLEAN); +>> SELECT UNKNOWN + +SELECT NOT TRUE A, NOT FALSE B, NOT NULL C, NOT UNKNOWN D; +> A B C D +> ----- ---- ---- ---- +> FALSE TRUE null null +> rows: 1 + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES (TRUE, FALSE, UNKNOWN); +>> VALUES (TRUE, FALSE, UNKNOWN) + +EXPLAIN SELECT A IS TRUE OR B IS FALSE FROM (VALUES (TRUE, TRUE)) T(A, B); +>> SELECT ("A" IS TRUE) OR ("B" IS FALSE) FROM (VALUES (TRUE, TRUE)) "T"("A", "B") /* table scan */ diff --git a/h2/src/test/org/h2/test/scripts/datatypes/char.sql b/h2/src/test/org/h2/test/scripts/datatypes/char.sql new file mode 100644 index 0000000000..c76241463a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/char.sql @@ -0,0 +1,198 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(C1 CHAR, C2 CHARACTER, C3 NCHAR, C4 NATIONAL CHARACTER, C5 NATIONAL CHAR); +> ok + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- --------- +> C1 CHARACTER +> C2 CHARACTER +> C3 CHARACTER +> C4 CHARACTER +> C5 CHARACTER +> rows (ordered): 5 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(C CHAR(2)); +> ok + +INSERT INTO TEST VALUES 'aa', 'b'; +> update count: 2 + +SELECT * FROM TEST WHERE C = 'b'; +>> b + +SELECT * FROM TEST WHERE C = 'b '; +>> b + +SELECT * FROM TEST WHERE C = 'b '; +>> b + +SELECT C || 'x' V FROM TEST; +> V +> --- +> aax +> b x +> rows: 2 + +DROP TABLE TEST; +> ok + +SET MODE PostgreSQL; +> ok + +CREATE TABLE TEST(C CHAR(2)); +> ok + +INSERT INTO TEST VALUES 'aa', 'b'; +> update count: 2 + +SELECT * FROM TEST WHERE C = 'b'; +>> b + +SELECT * FROM TEST WHERE C = 'b '; +>> b + +SELECT * FROM TEST WHERE C = 'b '; +>> b + +SELECT C || 'x' V FROM TEST; +> V +> --- +> aax +> bx +> rows: 2 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok + +EXPLAIN VALUES CAST('a' AS CHAR(1)); +>> VALUES (CAST('a' AS CHAR(1))) + +EXPLAIN VALUES CAST('' AS CHAR(1)); +>> VALUES (CAST(' ' AS CHAR(1))) + +CREATE TABLE T(C CHAR(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T(C1 CHAR(1 CHARACTERS), C2 CHAR(1 OCTETS)); +> ok + +DROP TABLE T; +> ok + +VALUES CAST('ab' AS CHAR); +>> a + +CREATE TABLE TEST(A CHAR(2) NOT NULL, B CHAR(3) NOT NULL); +> ok + +INSERT INTO TEST VALUES ('a', 'a'), ('aa', 'aaa'), ('bb ', 'bb'); +> update count: 3 + +INSERT INTO TEST VALUES ('a a', 'a a'); +> exception VALUE_TOO_LONG_2 + +VALUES CAST('a a' AS CHAR(2)) || '*'; +>> a * + +SELECT A || '*', B || '*', A || B || '*', CHAR_LENGTH(A), A = B FROM TEST; +> A || '*' B || '*' A || B || '*' CHAR_LENGTH(A) A = B +> -------- -------- ------------- -------------- ----- +> a * a * a a * 2 TRUE +> aa* aaa* aaaaa* 2 FALSE +> bb* bb * bbbb * 2 TRUE +> rows: 3 + +DROP TABLE TEST; +> ok + +SET MODE MySQL; +> ok + +CREATE TABLE TEST(A CHAR(2) NOT NULL, B CHAR(3) NOT NULL); +> ok + +INSERT INTO TEST VALUES ('a', 'a'), ('aa', 'aaa'), ('bb ', 'bb'); +> update count: 3 + +INSERT INTO TEST VALUES ('a a', 'a a'); +> exception VALUE_TOO_LONG_2 + +VALUES CAST('a a' AS CHAR(2)) || '*'; +>> a* + +SELECT A || '*', B || '*', A || B || '*', CHAR_LENGTH(A), A = B FROM TEST; +> A || '*' B || '*' A || B || '*' CHAR_LENGTH(A) A = B +> -------- -------- ------------- -------------- ----- +> a* a* aa* 1 TRUE +> aa* aaa* aaaaa* 2 FALSE +> bb* bb* bbbb* 2 TRUE +> rows: 3 + +DROP TABLE TEST; +> ok + +SET MODE PostgreSQL; +> ok + +CREATE TABLE TEST(A CHAR(2) NOT NULL, B CHAR(3) NOT NULL); +> ok + +INSERT INTO TEST VALUES ('a', 'a'), ('aa', 'aaa'), ('bb ', 'bb'); +> update count: 3 + +INSERT INTO TEST VALUES ('a a', 'a a'); +> exception VALUE_TOO_LONG_2 + +VALUES CAST('a a' AS CHAR(2)) || '*'; +>> a* + +SELECT A || '*', B || '*', A || B || '*', CHAR_LENGTH(A), A = B FROM TEST; +> ?column? ?column? ?column? char_length ?column? +> -------- -------- -------- ----------- -------- +> a* a* aa* 1 TRUE +> aa* aaa* aaaaa* 2 FALSE +> bb* bb* bbbb* 2 TRUE +> rows: 3 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok + +CREATE TABLE T1(A CHARACTER(1048576)); +> ok + +CREATE TABLE T2(A CHARACTER(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A CHARACTER(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_MAXIMUM_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_MAXIMUM_LENGTH +> ---------- ------------------------ +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/clob.sql b/h2/src/test/org/h2/test/scripts/datatypes/clob.sql new file mode 100644 index 0000000000..20cb6db086 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/clob.sql @@ -0,0 +1,70 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(C1 CLOB, C2 CHARACTER LARGE OBJECT, C3 TINYTEXT, C4 TEXT, C5 MEDIUMTEXT, C6 LONGTEXT, C7 NTEXT, + C8 NCLOB, C9 CHAR LARGE OBJECT, C10 NCHAR LARGE OBJECT, C11 NATIONAL CHARACTER LARGE OBJECT); +> ok + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- ---------------------- +> C1 CHARACTER LARGE OBJECT +> C2 CHARACTER LARGE OBJECT +> C3 CHARACTER LARGE OBJECT +> C4 CHARACTER LARGE OBJECT +> C5 CHARACTER LARGE OBJECT +> C6 CHARACTER LARGE OBJECT +> C7 CHARACTER LARGE OBJECT +> C8 CHARACTER LARGE OBJECT +> C9 CHARACTER LARGE OBJECT +> C10 CHARACTER LARGE OBJECT +> C11 CHARACTER LARGE OBJECT +> rows (ordered): 11 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(C0 CLOB(10), C1 CLOB(10K), C2 CLOB(10M CHARACTERS), C3 CLOB(10G OCTETS), C4 CLOB(10T), C5 CLOB(10P)); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE CHARACTER_MAXIMUM_LENGTH +> ----------- ---------------------- ------------------------ +> C0 CHARACTER LARGE OBJECT 10 +> C1 CHARACTER LARGE OBJECT 10240 +> C2 CHARACTER LARGE OBJECT 10485760 +> C3 CHARACTER LARGE OBJECT 10737418240 +> C4 CHARACTER LARGE OBJECT 10995116277760 +> C5 CHARACTER LARGE OBJECT 11258999068426240 +> rows (ordered): 6 + +INSERT INTO TEST(C0) VALUES ('12345678901'); +> exception VALUE_TOO_LONG_2 + +INSERT INTO TEST(C0) VALUES ('1234567890'); +> update count: 1 + +SELECT C0 FROM TEST; +>> 1234567890 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(C CLOB(8192P)); +> exception INVALID_VALUE_2 + +EXPLAIN VALUES CAST(' ' AS CLOB(1)); +>> VALUES (CAST(' ' AS CHARACTER LARGE OBJECT(1))) + +CREATE TABLE T(C CLOB(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(C1 CLOB(1K CHARACTERS), C2 CLOB(1K OCTETS)); +> ok + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/date.sql b/h2/src/test/org/h2/test/scripts/datatypes/date.sql new file mode 100644 index 0000000000..9d48a4b87e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/date.sql @@ -0,0 +1,60 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(D1 DATE); +> ok + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- --------- +> D1 DATE +> rows (ordered): 1 + +DROP TABLE TEST; +> ok + +SELECT DATE '2000-01-02'; +>> 2000-01-02 + +SELECT DATE '20000102'; +>> 2000-01-02 + +SELECT DATE '-1000102'; +>> -0100-01-02 + +SELECT DATE '3001231'; +>> 0300-12-31 + +-- PostgreSQL returns 2020-12-31 +SELECT DATE '201231'; +> exception INVALID_DATETIME_CONSTANT_2 + +CALL DATE '-1000000000-01-01'; +>> -1000000000-01-01 + +CALL DATE '1000000000-12-31'; +>> 1000000000-12-31 + +CALL DATE '-1000000001-12-31'; +> exception INVALID_DATETIME_CONSTANT_2 + +CALL DATE '1000000001-01-01'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT CAST (TIMESTAMP '1000000000-12-31 00:00:00' AS DATE); +>> 1000000000-12-31 + +SELECT CAST (DATE '1000000000-12-31' AS TIMESTAMP); +>> 1000000000-12-31 00:00:00 + +SELECT CAST (TIMESTAMP '-1000000000-01-01 00:00:00' AS DATE); +>> -1000000000-01-01 + +SELECT CAST (DATE '-1000000000-01-01' AS TIMESTAMP); +>> -1000000000-01-01 00:00:00 + +SELECT CAST (DATE '2000-01-01' AS TIME); +> exception DATA_CONVERSION_ERROR_1 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/decfloat.sql b/h2/src/test/org/h2/test/scripts/datatypes/decfloat.sql new file mode 100644 index 0000000000..f311f90115 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/decfloat.sql @@ -0,0 +1,283 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST(D1 DECFLOAT, D2 DECFLOAT(5), D3 DECFLOAT(10), X NUMBER); +> ok + +INSERT INTO TEST VALUES(1, 1, 9999999999, 1.23); +> update count: 1 + +TABLE TEST; +> D1 D2 D3 X +> -- -- ---------- ---- +> 1 1 9999999999 1.23 +> rows: 1 + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ----------- --------- ----------------- ----------------------- ------------- ------------------ -------------------------- ---------------------- +> D1 DECFLOAT 100000 10 null DECFLOAT null null +> D2 DECFLOAT 5 10 null DECFLOAT 5 null +> D3 DECFLOAT 10 10 null DECFLOAT 10 null +> X DECFLOAT 40 10 null DECFLOAT 40 null +> rows (ordered): 4 + +SELECT D2 + D3 A, D2 - D3 S, D2 * D3 M, D2 / D3 D FROM TEST; +> A S M D +> ----- ----------- ---------- ---------------- +> 1E+10 -9999999998 9999999999 1.0000000001E-10 +> rows: 1 + +CREATE TABLE RESULT AS SELECT D2 + D3 A, D2 - D3 S, D2 * D3 M, D2 / D3 D FROM TEST; +> ok + +TABLE RESULT; +> A S M D +> ----- ----------- ---------- ---------------- +> 1E+10 -9999999998 9999999999 1.0000000001E-10 +> rows: 1 + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'RESULT' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ----------- --------- ----------------- ----------------------- ------------- ------------------ -------------------------- ---------------------- +> A DECFLOAT 11 10 null DECFLOAT 11 null +> S DECFLOAT 11 10 null DECFLOAT 11 null +> M DECFLOAT 15 10 null DECFLOAT 15 null +> D DECFLOAT 11 10 null DECFLOAT 11 null +> rows (ordered): 4 + +DROP TABLE TEST, RESULT; +> ok + +EXPLAIN VALUES (CAST(-9223372036854775808 AS DECFLOAT(19)), CAST(9223372036854775807 AS DECFLOAT(19)), 1.0, -9223372036854775809, + 9223372036854775808); +>> VALUES (CAST(-9223372036854775808 AS DECFLOAT), CAST(9223372036854775807 AS DECFLOAT), 1.0, -9223372036854775809, 9223372036854775808) + +CREATE TABLE T(C DECFLOAT(0)); +> exception INVALID_VALUE_2 + +SELECT CAST(11 AS DECFLOAT(1)); +>> 1E+1 + +SELECT 1E1 IS OF(DECFLOAT); +>> TRUE + +SELECT (CAST(1 AS REAL) + CAST(1 AS SMALLINT)) IS OF(REAL); +>> TRUE + +SELECT (CAST(1 AS REAL) + CAST(1 AS BIGINT)) IS OF(DECFLOAT); +>> TRUE + +SELECT (CAST(1 AS REAL) + CAST(1 AS NUMERIC)) IS OF(DECFLOAT); +>> TRUE + +SELECT MOD(CAST(5 AS DECFLOAT), CAST(2 AS DECFLOAT)); +>> 1 + +EXPLAIN SELECT 1.1E0, 1E1; +>> SELECT CAST(1.1 AS DECFLOAT), CAST(1E+1 AS DECFLOAT) + +CREATE MEMORY TABLE TEST(D DECFLOAT(8)) AS VALUES '-Infinity', '-1', '0', '1', '1.5', 'Infinity', 'NaN'; +> ok + +@reconnect + +SELECT D, -D, SIGN(D) FROM TEST ORDER BY D; +> D - D SIGN(D) +> --------- --------- ------- +> -Infinity Infinity -1 +> -1 1 -1 +> 0 0 0 +> 1 -1 1 +> 1.5 -1.5 1 +> Infinity -Infinity 1 +> NaN NaN 0 +> rows (ordered): 7 + +SELECT A.D, B.D, A.D + B.D, A.D - B.D, A.D * B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D + B.D A.D - B.D A.D * B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity -Infinity NaN Infinity +> -Infinity -1 -Infinity -Infinity Infinity +> -Infinity 0 -Infinity -Infinity NaN +> -Infinity 1 -Infinity -Infinity -Infinity +> -Infinity 1.5 -Infinity -Infinity -Infinity +> -Infinity Infinity NaN -Infinity -Infinity +> -Infinity NaN NaN NaN NaN +> -1 -Infinity -Infinity Infinity Infinity +> -1 -1 -2 0 1 +> -1 0 -1 -1 0 +> -1 1 0 -2 -1 +> -1 1.5 0.5 -2.5 -1.5 +> -1 Infinity Infinity -Infinity -Infinity +> -1 NaN NaN NaN NaN +> 0 -Infinity -Infinity Infinity NaN +> 0 -1 -1 1 0 +> 0 0 0 0 0 +> 0 1 1 -1 0 +> 0 1.5 1.5 -1.5 0 +> 0 Infinity Infinity -Infinity NaN +> 0 NaN NaN NaN NaN +> 1 -Infinity -Infinity Infinity -Infinity +> 1 -1 0 2 -1 +> 1 0 1 1 0 +> 1 1 2 0 1 +> 1 1.5 2.5 -0.5 1.5 +> 1 Infinity Infinity -Infinity Infinity +> 1 NaN NaN NaN NaN +> 1.5 -Infinity -Infinity Infinity -Infinity +> 1.5 -1 0.5 2.5 -1.5 +> 1.5 0 1.5 1.5 0 +> 1.5 1 2.5 0.5 1.5 +> 1.5 1.5 3 0 2.25 +> 1.5 Infinity Infinity -Infinity Infinity +> 1.5 NaN NaN NaN NaN +> Infinity -Infinity NaN Infinity -Infinity +> Infinity -1 Infinity Infinity -Infinity +> Infinity 0 Infinity Infinity NaN +> Infinity 1 Infinity Infinity Infinity +> Infinity 1.5 Infinity Infinity Infinity +> Infinity Infinity Infinity NaN Infinity +> Infinity NaN NaN NaN NaN +> NaN -Infinity NaN NaN NaN +> NaN -1 NaN NaN NaN +> NaN 0 NaN NaN NaN +> NaN 1 NaN NaN NaN +> NaN 1.5 NaN NaN NaN +> NaN Infinity NaN NaN NaN +> NaN NaN NaN NaN NaN +> rows (ordered): 49 + +SELECT A.D, B.D, A.D / B.D, MOD(A.D, B.D) FROM TEST A JOIN TEST B WHERE B.D <> 0 ORDER BY A.D, B.D; +> D D A.D / B.D MOD(A.D, B.D) +> --------- --------- ------------ ------------- +> -Infinity -Infinity NaN NaN +> -Infinity -1 Infinity NaN +> -Infinity 1 -Infinity NaN +> -Infinity 1.5 -Infinity NaN +> -Infinity Infinity NaN NaN +> -Infinity NaN NaN NaN +> -1 -Infinity 0 -1 +> -1 -1 1 0 +> -1 1 -1 0 +> -1 1.5 -0.666666667 -1 +> -1 Infinity 0 -1 +> -1 NaN NaN NaN +> 0 -Infinity 0 0 +> 0 -1 0 0 +> 0 1 0 0 +> 0 1.5 0 0 +> 0 Infinity 0 0 +> 0 NaN NaN NaN +> 1 -Infinity 0 1 +> 1 -1 -1 0 +> 1 1 1 0 +> 1 1.5 0.666666667 1 +> 1 Infinity 0 1 +> 1 NaN NaN NaN +> 1.5 -Infinity 0 1.5 +> 1.5 -1 -1.5 0.5 +> 1.5 1 1.5 0.5 +> 1.5 1.5 1 0 +> 1.5 Infinity 0 1.5 +> 1.5 NaN NaN NaN +> Infinity -Infinity NaN NaN +> Infinity -1 -Infinity NaN +> Infinity 1 Infinity NaN +> Infinity 1.5 Infinity NaN +> Infinity Infinity NaN NaN +> Infinity NaN NaN NaN +> NaN -Infinity NaN NaN +> NaN -1 NaN NaN +> NaN 1 NaN NaN +> NaN 1.5 NaN NaN +> NaN Infinity NaN NaN +> NaN NaN NaN NaN +> rows (ordered): 42 + +SELECT A.D, B.D, A.D > B.D, A.D = B.D, A.D < B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D > B.D A.D = B.D A.D < B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity FALSE TRUE FALSE +> -Infinity -1 FALSE FALSE TRUE +> -Infinity 0 FALSE FALSE TRUE +> -Infinity 1 FALSE FALSE TRUE +> -Infinity 1.5 FALSE FALSE TRUE +> -Infinity Infinity FALSE FALSE TRUE +> -Infinity NaN FALSE FALSE TRUE +> -1 -Infinity TRUE FALSE FALSE +> -1 -1 FALSE TRUE FALSE +> -1 0 FALSE FALSE TRUE +> -1 1 FALSE FALSE TRUE +> -1 1.5 FALSE FALSE TRUE +> -1 Infinity FALSE FALSE TRUE +> -1 NaN FALSE FALSE TRUE +> 0 -Infinity TRUE FALSE FALSE +> 0 -1 TRUE FALSE FALSE +> 0 0 FALSE TRUE FALSE +> 0 1 FALSE FALSE TRUE +> 0 1.5 FALSE FALSE TRUE +> 0 Infinity FALSE FALSE TRUE +> 0 NaN FALSE FALSE TRUE +> 1 -Infinity TRUE FALSE FALSE +> 1 -1 TRUE FALSE FALSE +> 1 0 TRUE FALSE FALSE +> 1 1 FALSE TRUE FALSE +> 1 1.5 FALSE FALSE TRUE +> 1 Infinity FALSE FALSE TRUE +> 1 NaN FALSE FALSE TRUE +> 1.5 -Infinity TRUE FALSE FALSE +> 1.5 -1 TRUE FALSE FALSE +> 1.5 0 TRUE FALSE FALSE +> 1.5 1 TRUE FALSE FALSE +> 1.5 1.5 FALSE TRUE FALSE +> 1.5 Infinity FALSE FALSE TRUE +> 1.5 NaN FALSE FALSE TRUE +> Infinity -Infinity TRUE FALSE FALSE +> Infinity -1 TRUE FALSE FALSE +> Infinity 0 TRUE FALSE FALSE +> Infinity 1 TRUE FALSE FALSE +> Infinity 1.5 TRUE FALSE FALSE +> Infinity Infinity FALSE TRUE FALSE +> Infinity NaN FALSE FALSE TRUE +> NaN -Infinity TRUE FALSE FALSE +> NaN -1 TRUE FALSE FALSE +> NaN 0 TRUE FALSE FALSE +> NaN 1 TRUE FALSE FALSE +> NaN 1.5 TRUE FALSE FALSE +> NaN Infinity TRUE FALSE FALSE +> NaN NaN FALSE TRUE FALSE +> rows (ordered): 49 + +SELECT D, CAST(D AS REAL) D1, CAST(D AS DOUBLE PRECISION) D2 FROM TEST ORDER BY D; +> D D1 D2 +> --------- --------- --------- +> -Infinity -Infinity -Infinity +> -1 -1.0 -1.0 +> 0 0.0 0.0 +> 1 1.0 1.0 +> 1.5 1.5 1.5 +> Infinity Infinity Infinity +> NaN NaN NaN +> rows (ordered): 7 + +EXPLAIN SELECT CAST('Infinity' AS DECFLOAT), CAST('-Infinity' AS DECFLOAT), CAST('NaN' AS DECFLOAT), CAST(0 AS DECFLOAT); +>> SELECT CAST('Infinity' AS DECFLOAT), CAST('-Infinity' AS DECFLOAT), CAST('NaN' AS DECFLOAT), CAST(0 AS DECFLOAT) + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D" DECFLOAT(8) ); +> -- 7 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES ('-Infinity'), (-1), (0), (1), (1.5), ('Infinity'), ('NaN'); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/double_precision.sql b/h2/src/test/org/h2/test/scripts/datatypes/double_precision.sql new file mode 100644 index 0000000000..3d86efdfb1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/double_precision.sql @@ -0,0 +1,233 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST(D1 DOUBLE, D2 DOUBLE PRECISION, D3 FLOAT, D4 FLOAT(25), D5 FLOAT(53)); +> ok + +ALTER TABLE TEST ADD COLUMN D6 FLOAT(54); +> exception INVALID_VALUE_PRECISION + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ----------- ---------------- ----------------- ----------------------- ------------- ------------------ -------------------------- ---------------------- +> D1 DOUBLE PRECISION 53 2 null DOUBLE PRECISION null null +> D2 DOUBLE PRECISION 53 2 null DOUBLE PRECISION null null +> D3 DOUBLE PRECISION 53 2 null FLOAT null null +> D4 DOUBLE PRECISION 53 2 null FLOAT 25 null +> D5 DOUBLE PRECISION 53 2 null FLOAT 53 null +> rows (ordered): 5 + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> -------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D1" DOUBLE PRECISION, "D2" DOUBLE PRECISION, "D3" FLOAT, "D4" FLOAT(25), "D5" FLOAT(53) ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES CAST(0 AS DOUBLE); +>> VALUES (CAST(0.0 AS DOUBLE PRECISION)) + +CREATE MEMORY TABLE TEST(D DOUBLE PRECISION) AS VALUES '-Infinity', '-1', '0', '1', '1.5', 'Infinity', 'NaN'; +> ok + +SELECT D, -D, SIGN(D) FROM TEST ORDER BY D; +> D - D SIGN(D) +> --------- --------- ------- +> -Infinity Infinity -1 +> -1.0 1.0 -1 +> 0.0 0.0 0 +> 1.0 -1.0 1 +> 1.5 -1.5 1 +> Infinity -Infinity 1 +> NaN NaN 0 +> rows (ordered): 7 + +SELECT A.D, B.D, A.D + B.D, A.D - B.D, A.D * B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D + B.D A.D - B.D A.D * B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity -Infinity NaN Infinity +> -Infinity -1.0 -Infinity -Infinity Infinity +> -Infinity 0.0 -Infinity -Infinity NaN +> -Infinity 1.0 -Infinity -Infinity -Infinity +> -Infinity 1.5 -Infinity -Infinity -Infinity +> -Infinity Infinity NaN -Infinity -Infinity +> -Infinity NaN NaN NaN NaN +> -1.0 -Infinity -Infinity Infinity Infinity +> -1.0 -1.0 -2.0 0.0 1.0 +> -1.0 0.0 -1.0 -1.0 0.0 +> -1.0 1.0 0.0 -2.0 -1.0 +> -1.0 1.5 0.5 -2.5 -1.5 +> -1.0 Infinity Infinity -Infinity -Infinity +> -1.0 NaN NaN NaN NaN +> 0.0 -Infinity -Infinity Infinity NaN +> 0.0 -1.0 -1.0 1.0 0.0 +> 0.0 0.0 0.0 0.0 0.0 +> 0.0 1.0 1.0 -1.0 0.0 +> 0.0 1.5 1.5 -1.5 0.0 +> 0.0 Infinity Infinity -Infinity NaN +> 0.0 NaN NaN NaN NaN +> 1.0 -Infinity -Infinity Infinity -Infinity +> 1.0 -1.0 0.0 2.0 -1.0 +> 1.0 0.0 1.0 1.0 0.0 +> 1.0 1.0 2.0 0.0 1.0 +> 1.0 1.5 2.5 -0.5 1.5 +> 1.0 Infinity Infinity -Infinity Infinity +> 1.0 NaN NaN NaN NaN +> 1.5 -Infinity -Infinity Infinity -Infinity +> 1.5 -1.0 0.5 2.5 -1.5 +> 1.5 0.0 1.5 1.5 0.0 +> 1.5 1.0 2.5 0.5 1.5 +> 1.5 1.5 3.0 0.0 2.25 +> 1.5 Infinity Infinity -Infinity Infinity +> 1.5 NaN NaN NaN NaN +> Infinity -Infinity NaN Infinity -Infinity +> Infinity -1.0 Infinity Infinity -Infinity +> Infinity 0.0 Infinity Infinity NaN +> Infinity 1.0 Infinity Infinity Infinity +> Infinity 1.5 Infinity Infinity Infinity +> Infinity Infinity Infinity NaN Infinity +> Infinity NaN NaN NaN NaN +> NaN -Infinity NaN NaN NaN +> NaN -1.0 NaN NaN NaN +> NaN 0.0 NaN NaN NaN +> NaN 1.0 NaN NaN NaN +> NaN 1.5 NaN NaN NaN +> NaN Infinity NaN NaN NaN +> NaN NaN NaN NaN NaN +> rows (ordered): 49 + +SELECT A.D, B.D, A.D / B.D, MOD(A.D, B.D) FROM TEST A JOIN TEST B WHERE B.D <> 0 ORDER BY A.D, B.D; +> D D A.D / B.D MOD(A.D, B.D) +> --------- --------- ------------------- ------------- +> -Infinity -Infinity NaN NaN +> -Infinity -1.0 Infinity NaN +> -Infinity 1.0 -Infinity NaN +> -Infinity 1.5 -Infinity NaN +> -Infinity Infinity NaN NaN +> -Infinity NaN NaN NaN +> -1.0 -Infinity 0.0 -1.0 +> -1.0 -1.0 1.0 0.0 +> -1.0 1.0 -1.0 0.0 +> -1.0 1.5 -0.6666666666666666 -1.0 +> -1.0 Infinity 0.0 -1.0 +> -1.0 NaN NaN NaN +> 0.0 -Infinity 0.0 0.0 +> 0.0 -1.0 0.0 0.0 +> 0.0 1.0 0.0 0.0 +> 0.0 1.5 0.0 0.0 +> 0.0 Infinity 0.0 0.0 +> 0.0 NaN NaN NaN +> 1.0 -Infinity 0.0 1.0 +> 1.0 -1.0 -1.0 0.0 +> 1.0 1.0 1.0 0.0 +> 1.0 1.5 0.6666666666666666 1.0 +> 1.0 Infinity 0.0 1.0 +> 1.0 NaN NaN NaN +> 1.5 -Infinity 0.0 1.5 +> 1.5 -1.0 -1.5 0.5 +> 1.5 1.0 1.5 0.5 +> 1.5 1.5 1.0 0.0 +> 1.5 Infinity 0.0 1.5 +> 1.5 NaN NaN NaN +> Infinity -Infinity NaN NaN +> Infinity -1.0 -Infinity NaN +> Infinity 1.0 Infinity NaN +> Infinity 1.5 Infinity NaN +> Infinity Infinity NaN NaN +> Infinity NaN NaN NaN +> NaN -Infinity NaN NaN +> NaN -1.0 NaN NaN +> NaN 1.0 NaN NaN +> NaN 1.5 NaN NaN +> NaN Infinity NaN NaN +> NaN NaN NaN NaN +> rows (ordered): 42 + +SELECT A.D, B.D, A.D > B.D, A.D = B.D, A.D < B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D > B.D A.D = B.D A.D < B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity FALSE TRUE FALSE +> -Infinity -1.0 FALSE FALSE TRUE +> -Infinity 0.0 FALSE FALSE TRUE +> -Infinity 1.0 FALSE FALSE TRUE +> -Infinity 1.5 FALSE FALSE TRUE +> -Infinity Infinity FALSE FALSE TRUE +> -Infinity NaN FALSE FALSE TRUE +> -1.0 -Infinity TRUE FALSE FALSE +> -1.0 -1.0 FALSE TRUE FALSE +> -1.0 0.0 FALSE FALSE TRUE +> -1.0 1.0 FALSE FALSE TRUE +> -1.0 1.5 FALSE FALSE TRUE +> -1.0 Infinity FALSE FALSE TRUE +> -1.0 NaN FALSE FALSE TRUE +> 0.0 -Infinity TRUE FALSE FALSE +> 0.0 -1.0 TRUE FALSE FALSE +> 0.0 0.0 FALSE TRUE FALSE +> 0.0 1.0 FALSE FALSE TRUE +> 0.0 1.5 FALSE FALSE TRUE +> 0.0 Infinity FALSE FALSE TRUE +> 0.0 NaN FALSE FALSE TRUE +> 1.0 -Infinity TRUE FALSE FALSE +> 1.0 -1.0 TRUE FALSE FALSE +> 1.0 0.0 TRUE FALSE FALSE +> 1.0 1.0 FALSE TRUE FALSE +> 1.0 1.5 FALSE FALSE TRUE +> 1.0 Infinity FALSE FALSE TRUE +> 1.0 NaN FALSE FALSE TRUE +> 1.5 -Infinity TRUE FALSE FALSE +> 1.5 -1.0 TRUE FALSE FALSE +> 1.5 0.0 TRUE FALSE FALSE +> 1.5 1.0 TRUE FALSE FALSE +> 1.5 1.5 FALSE TRUE FALSE +> 1.5 Infinity FALSE FALSE TRUE +> 1.5 NaN FALSE FALSE TRUE +> Infinity -Infinity TRUE FALSE FALSE +> Infinity -1.0 TRUE FALSE FALSE +> Infinity 0.0 TRUE FALSE FALSE +> Infinity 1.0 TRUE FALSE FALSE +> Infinity 1.5 TRUE FALSE FALSE +> Infinity Infinity FALSE TRUE FALSE +> Infinity NaN FALSE FALSE TRUE +> NaN -Infinity TRUE FALSE FALSE +> NaN -1.0 TRUE FALSE FALSE +> NaN 0.0 TRUE FALSE FALSE +> NaN 1.0 TRUE FALSE FALSE +> NaN 1.5 TRUE FALSE FALSE +> NaN Infinity TRUE FALSE FALSE +> NaN NaN FALSE TRUE FALSE +> rows (ordered): 49 + +SELECT D, CAST(D AS REAL) D1, CAST(D AS DECFLOAT) D2 FROM TEST ORDER BY D; +> D D1 D2 +> --------- --------- --------- +> -Infinity -Infinity -Infinity +> -1.0 -1.0 -1 +> 0.0 0.0 0 +> 1.0 1.0 1 +> 1.5 1.5 1.5 +> Infinity Infinity Infinity +> NaN NaN NaN +> rows (ordered): 7 + +EXPLAIN SELECT CAST('Infinity' AS DOUBLE PRECISION), CAST('-Infinity' AS DOUBLE PRECISION), CAST('NaN' AS DOUBLE PRECISION), CAST(0 AS DOUBLE PRECISION); +>> SELECT CAST('Infinity' AS DOUBLE PRECISION), CAST('-Infinity' AS DOUBLE PRECISION), CAST('NaN' AS DOUBLE PRECISION), CAST(0.0 AS DOUBLE PRECISION) + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D" DOUBLE PRECISION ); +> -- 7 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES ('-Infinity'), (-1.0), (0.0), (1.0), (1.5), ('Infinity'), ('NaN'); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/enum.sql b/h2/src/test/org/h2/test/scripts/datatypes/enum.sql new file mode 100644 index 0000000000..cd10233159 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/enum.sql @@ -0,0 +1,388 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +---------------- +--- ENUM support +---------------- + +--- ENUM basic operations + +create table card (rank int, suit enum('hearts', 'clubs', 'spades')); +> ok + +insert into card (rank, suit) values (0, 'clubs'), (3, 'hearts'), (4, NULL); +> update count: 3 + +alter table card alter column suit enum('hearts', 'clubs', 'spades', 'diamonds'); +> ok + +select * from card; +> RANK SUIT +> ---- ------ +> 0 clubs +> 3 hearts +> 4 null +> rows: 3 + +@reconnect + +select suit from card where rank = 0; +>> clubs + +alter table card alter column suit enum('a', 'b', 'c', 'd'); +> exception ENUM_VALUE_NOT_PERMITTED + +alter table card alter column suit enum('''none''', 'hearts', 'clubs', 'spades', 'diamonds'); +> ok + +select * from card order by suit; +> RANK SUIT +> ---- ------ +> 4 null +> 3 hearts +> 0 clubs +> rows (ordered): 3 + +insert into card (rank, suit) values (8, 'diamonds'), (10, 'clubs'), (7, 'hearts'); +> update count: 3 + +select suit, count(rank) from card group by suit order by suit, count(rank); +> SUIT COUNT(RANK) +> -------- ----------- +> null 1 +> hearts 2 +> clubs 2 +> diamonds 1 +> rows (ordered): 4 + +select rank from card where suit = 'diamonds'; +>> 8 + +alter table card alter column suit enum('hearts', 'clubs', 'spades', 'diamonds'); +> ok + +alter table card alter column suit enum('hearts', 'clubs', 'spades', 'diamonds', 'long_enum_value_of_128_chars_00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +> ok + +insert into card (rank, suit) values (11, 'long_enum_value_of_128_chars_00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +> update count: 1 + +--- ENUM integer-based operations + +select rank from card where suit = 2; +> exception TYPES_ARE_NOT_COMPARABLE_2 + +select rank from card where cast(suit as integer) = 2; +> RANK +> ---- +> 0 +> 10 +> rows: 2 + +insert into card (rank, suit) values(5, 3); +> update count: 1 + +select * from card where cast(rank as integer) = 5; +> RANK SUIT +> ---- ------ +> 5 spades +> rows: 1 + +--- ENUM edge cases + +insert into card (rank, suit) values(6, ' '); +> exception ENUM_VALUE_NOT_PERMITTED + +alter table card alter column suit enum('hearts', 'clubs', 'spades', 'diamonds', 'clubs'); +> exception ENUM_DUPLICATE + +alter table card alter column suit enum('hearts', 'clubs', 'spades', 'diamonds', ''); +> exception ENUM_EMPTY + +drop table card; +> ok + +--- ENUM as custom user data type + +create type CARD_SUIT as enum('hearts', 'clubs', 'spades', 'diamonds'); +> ok + +create table card (rank int, suit CARD_SUIT); +> ok + +insert into card (rank, suit) values (0, 'clubs'), (3, 'hearts'); +> update count: 2 + +select * from card; +> RANK SUIT +> ---- ------ +> 0 clubs +> 3 hearts +> rows: 2 + +drop table card; +> ok + +drop type CARD_SUIT; +> ok + +--- ENUM in primary key with another column +create type CARD_SUIT as enum('hearts', 'clubs', 'spades', 'diamonds'); +> ok + +create table card (rank int, suit CARD_SUIT, primary key(rank, suit)); +> ok + +insert into card (rank, suit) values (0, 'clubs'), (3, 'hearts'), (1, 'clubs'); +> update count: 3 + +insert into card (rank, suit) values (0, 'clubs'); +> exception DUPLICATE_KEY_1 + +select rank from card where suit = 'clubs'; +> RANK +> ---- +> 0 +> 1 +> rows: 2 + +drop table card; +> ok + +drop type CARD_SUIT; +> ok + +--- ENUM with index +create type CARD_SUIT as enum('hearts', 'clubs', 'spades', 'diamonds'); +> ok + +create table card (rank int, suit CARD_SUIT, primary key(rank, suit)); +> ok + +insert into card (rank, suit) values (0, 'clubs'), (3, 'hearts'), (1, 'clubs'); +> update count: 3 + +create index idx_card_suite on card(`suit`); +> ok + +select rank from card where suit = 'clubs'; +> RANK +> ---- +> 0 +> 1 +> rows: 2 + +select rank from card where suit in ('clubs'); +> RANK +> ---- +> 0 +> 1 +> rows: 2 + +insert into card values (2, 'diamonds'); +> update count: 1 + +select rank from card where suit in ('clubs', 'hearts'); +> RANK +> ---- +> 0 +> 1 +> 3 +> rows: 3 + +select rank from card where suit in ('clubs', 'hearts') or suit = 'diamonds'; +> RANK +> ---- +> 0 +> 1 +> 2 +> 3 +> rows: 4 + +drop table card; +> ok + +drop type CARD_SUIT; +> ok + +CREATE TABLE TEST(ID INT, E1 ENUM('A', 'B') DEFAULT 'A', E2 ENUM('C', 'D') DEFAULT 'C' ON UPDATE 'D'); +> ok + +INSERT INTO TEST(ID) VALUES (1); +> update count: 1 + +SELECT * FROM TEST; +> ID E1 E2 +> -- -- -- +> 1 A C +> rows: 1 + +UPDATE TEST SET E1 = 'B'; +> update count: 1 + +SELECT * FROM TEST; +> ID E1 E2 +> -- -- -- +> 1 B D +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(E ENUM('A', 'B')); +> ok + +INSERT INTO TEST VALUES ('B'); +> update count: 1 + +CREATE VIEW V AS SELECT * FROM TEST; +> ok + +SELECT * FROM V; +>> B + +CREATE VIEW V1 AS SELECT E + 2 AS E FROM TEST; +> ok + +SELECT * FROM V1; +>> 4 + +CREATE VIEW V2 AS SELECT E + E AS E FROM TEST; +> ok + +SELECT * FROM V2; +>> 4 + +CREATE VIEW V3 AS SELECT -E AS E FROM TEST; +> ok + +SELECT * FROM V3; +>> -2 + +SELECT TABLE_NAME, DATA_TYPE + FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'E' ORDER BY TABLE_NAME; +> TABLE_NAME DATA_TYPE +> ---------- --------- +> TEST ENUM +> V ENUM +> V1 INTEGER +> V2 INTEGER +> V3 INTEGER +> rows (ordered): 5 + +SELECT OBJECT_NAME, OBJECT_TYPE, ENUM_IDENTIFIER, VALUE_NAME, VALUE_ORDINAL FROM INFORMATION_SCHEMA.ENUM_VALUES + WHERE OBJECT_SCHEMA = 'PUBLIC'; +> OBJECT_NAME OBJECT_TYPE ENUM_IDENTIFIER VALUE_NAME VALUE_ORDINAL +> ----------- ----------- --------------- ---------- ------------- +> TEST TABLE 1 A 1 +> TEST TABLE 1 B 2 +> V TABLE 1 A 1 +> V TABLE 1 B 2 +> rows: 4 + +DROP VIEW V; +> ok + +DROP VIEW V1; +> ok + +DROP VIEW V2; +> ok + +DROP VIEW V3; +> ok + +DROP TABLE TEST; +> ok + +SELECT CAST (2 AS ENUM('a', 'b', 'c', 'd')); +>> b + +CREATE TABLE TEST(E ENUM('a', 'b')); +> ok + +EXPLAIN SELECT * FROM TEST WHERE E = 'a'; +>> SELECT "PUBLIC"."TEST"."E" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "E" = CAST('a' AS ENUM('a', 'b')) + +INSERT INTO TEST VALUES ('a'); +> update count: 1 + +(SELECT * FROM TEST A) UNION ALL (SELECT * FROM TEST A); +> E +> - +> a +> a +> rows: 2 + +(SELECT * FROM TEST A) MINUS (SELECT * FROM TEST A); +> E +> - +> rows: 0 + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES CAST('A' AS ENUM('A', 'B')); +>> VALUES (CAST('A' AS ENUM('A', 'B'))) + +CREATE TABLE TEST(E1 ENUM('a', 'b'), E2 ENUM('e', 'c') ARRAY, E3 ROW(E ENUM('x', 'y'))); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, DTD_IDENTIFIER FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME DATA_TYPE DTD_IDENTIFIER +> ----------- --------- -------------- +> E1 ENUM 1 +> E2 ARRAY 2 +> E3 ROW 3 +> rows: 3 + +SELECT COLLECTION_TYPE_IDENTIFIER, DATA_TYPE, DTD_IDENTIFIER FROM INFORMATION_SCHEMA.ELEMENT_TYPES WHERE OBJECT_NAME = 'TEST'; +> COLLECTION_TYPE_IDENTIFIER DATA_TYPE DTD_IDENTIFIER +> -------------------------- --------- -------------- +> 2 ENUM 2_ +> rows: 1 + +SELECT ROW_IDENTIFIER, FIELD_NAME, DATA_TYPE, DTD_IDENTIFIER FROM INFORMATION_SCHEMA.FIELDS WHERE OBJECT_NAME = 'TEST'; +> ROW_IDENTIFIER FIELD_NAME DATA_TYPE DTD_IDENTIFIER +> -------------- ---------- --------- -------------- +> 3 E ENUM 3_1 +> rows: 1 + +SELECT * FROM INFORMATION_SCHEMA.ENUM_VALUES WHERE OBJECT_NAME = 'TEST'; +> OBJECT_CATALOG OBJECT_SCHEMA OBJECT_NAME OBJECT_TYPE ENUM_IDENTIFIER VALUE_NAME VALUE_ORDINAL +> -------------- ------------- ----------- ----------- --------------- ---------- ------------- +> SCRIPT PUBLIC TEST TABLE 1 a 1 +> SCRIPT PUBLIC TEST TABLE 1 b 2 +> SCRIPT PUBLIC TEST TABLE 2_ c 2 +> SCRIPT PUBLIC TEST TABLE 2_ e 1 +> SCRIPT PUBLIC TEST TABLE 3_1 x 1 +> SCRIPT PUBLIC TEST TABLE 3_1 y 2 +> rows: 6 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A ENUM('A', 'B') ARRAY, B ROW(V ENUM('C', 'D'))); +> ok + +INSERT INTO TEST VALUES (ARRAY['A', 'B'], ROW('C')); +> update count: 1 + +TABLE TEST; +> A B +> ------ ------- +> [A, B] ROW (C) +> rows: 1 + +@reconnect + +TABLE TEST; +> A B +> ------ ------- +> [A, B] ROW (C) +> rows: 1 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/geometry.sql b/h2/src/test/org/h2/test/scripts/datatypes/geometry.sql new file mode 100644 index 0000000000..4b6675bf74 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/geometry.sql @@ -0,0 +1,277 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(G GEOMETRY, G_S GEOMETRY(GEOMETRY, 1), P GEOMETRY(POINT), P_S GEOMETRY(POINT, 1), + PZ1 GEOMETRY(POINT Z), PZ2 GEOMETRY(POINTZ), PZ1_S GEOMETRY(POINT Z, 1), PZ2_S GEOMETRY(POINTZ, 1), + PM GEOMETRY(POINT M), PZM GEOMETRY(POINT ZM), PZM_S GEOMETRY(POINT ZM, -100), + LS GEOMETRY(LINESTRING), PG GEOMETRY(POLYGON), + MP GEOMETRY(MULTIPOINT), MLS GEOMETRY(MULTILINESTRING), MPG GEOMETRY(MULTIPOLYGON), + GC GEOMETRY(GEOMETRYCOLLECTION)); +> ok + +INSERT INTO TEST VALUES ('POINT EMPTY', 'SRID=1;POINT EMPTY', 'POINT EMPTY', 'SRID=1;POINT EMPTY', + 'POINT Z EMPTY', 'POINT Z EMPTY', 'SRID=1;POINT Z EMPTY', 'SRID=1;POINTZ EMPTY', + 'POINT M EMPTY', 'POINT ZM EMPTY', 'SRID=-100;POINT ZM EMPTY', + 'LINESTRING EMPTY', 'POLYGON EMPTY', + 'MULTIPOINT EMPTY', 'MULTILINESTRING EMPTY', 'MULTIPOLYGON EMPTY', + 'GEOMETRYCOLLECTION EMPTY'); +> update count: 1 + +SELECT COLUMN_NAME, DATA_TYPE, GEOMETRY_TYPE, GEOMETRY_SRID FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE GEOMETRY_TYPE GEOMETRY_SRID +> ----------- --------- ------------------ ------------- +> G GEOMETRY null null +> G_S GEOMETRY null 1 +> P GEOMETRY POINT null +> P_S GEOMETRY POINT 1 +> PZ1 GEOMETRY POINT Z null +> PZ2 GEOMETRY POINT Z null +> PZ1_S GEOMETRY POINT Z 1 +> PZ2_S GEOMETRY POINT Z 1 +> PM GEOMETRY POINT M null +> PZM GEOMETRY POINT ZM null +> PZM_S GEOMETRY POINT ZM -100 +> LS GEOMETRY LINESTRING null +> PG GEOMETRY POLYGON null +> MP GEOMETRY MULTIPOINT null +> MLS GEOMETRY MULTILINESTRING null +> MPG GEOMETRY MULTIPOLYGON null +> GC GEOMETRY GEOMETRYCOLLECTION null +> rows (ordered): 17 + +UPDATE TEST SET G = 'SRID=10;LINESTRING EMPTY'; +> update count: 1 + +UPDATE TEST SET GC = 'SRID=8;GEOMETRYCOLLECTION(POINT (1 1))'; +> update count: 1 + +UPDATE TEST SET G_S = 'POINT (1 1)'; +> exception DATA_CONVERSION_ERROR_1 + +UPDATE TEST SET P = 'POINT Z EMPTY'; +> exception DATA_CONVERSION_ERROR_1 + +UPDATE TEST SET P = 'POLYGON EMPTY'; +> exception DATA_CONVERSION_ERROR_1 + +UPDATE TEST SET PZ1 = 'POINT EMPTY'; +> exception DATA_CONVERSION_ERROR_1 + +SELECT * FROM TEST; +> G G_S P P_S PZ1 PZ2 PZ1_S PZ2_S PM PZM PZM_S LS PG MP MLS MPG GC +> ------------------------ ------------------ ----------- ------------------ ------------- ------------- -------------------- -------------------- ------------- -------------- ------------------------ ---------------- ------------- ---------------- --------------------- ------------------ --------------------------------------- +> SRID=10;LINESTRING EMPTY SRID=1;POINT EMPTY POINT EMPTY SRID=1;POINT EMPTY POINT Z EMPTY POINT Z EMPTY SRID=1;POINT Z EMPTY SRID=1;POINT Z EMPTY POINT M EMPTY POINT ZM EMPTY SRID=-100;POINT ZM EMPTY LINESTRING EMPTY POLYGON EMPTY MULTIPOINT EMPTY MULTILINESTRING EMPTY MULTIPOLYGON EMPTY SRID=8;GEOMETRYCOLLECTION (POINT (1 1)) +> rows: 1 + +SELECT G FROM TEST WHERE P_S = 'SRID=1;POINT EMPTY'; +>> SRID=10;LINESTRING EMPTY + +SELECT G FROM TEST WHERE P_S = 'GEOMETRYCOLLECTION Z EMPTY'; +> exception DATA_CONVERSION_ERROR_1 + +CREATE SPATIAL INDEX IDX ON TEST(GC); +> ok + +SELECT P FROM TEST WHERE GC = 'SRID=8;GEOMETRYCOLLECTION (POINT (1 1))'; +>> POINT EMPTY + +SELECT P FROM TEST WHERE GC = 'SRID=8;GEOMETRYCOLLECTION Z (POINT (1 1 1))'; +> exception DATA_CONVERSION_ERROR_1 + +SELECT CAST('POINT EMPTY' AS GEOMETRY(POINT)); +>> POINT EMPTY + +SELECT CAST('POINT EMPTY' AS GEOMETRY(POINT Z)); +> exception DATA_CONVERSION_ERROR_1 + +SELECT CAST('POINT EMPTY' AS GEOMETRY(POINT, 0)); +>> POINT EMPTY + +SELECT CAST('POINT EMPTY' AS GEOMETRY(POINT, 1)); +> exception DATA_CONVERSION_ERROR_1 + +SELECT CAST('POINT EMPTY' AS GEOMETRY(POLYGON)); +> exception DATA_CONVERSION_ERROR_1 + +DROP TABLE TEST; +> ok + +SELECT CAST('POINT EMPTY'::GEOMETRY AS JSON); +>> null + +SELECT CAST('null' FORMAT JSON AS GEOMETRY); +>> POINT EMPTY + +SELECT CAST('POINT (1 2)'::GEOMETRY AS JSON); +>> {"type":"Point","coordinates":[1,2]} + +SELECT CAST('{"type":"Point","coordinates":[1,2]}' FORMAT JSON AS GEOMETRY); +>> POINT (1 2) + +SELECT CAST('POINT Z (1 2 3)'::GEOMETRY AS JSON); +>> {"type":"Point","coordinates":[1,2,3]} + +SELECT CAST('{"type":"Point","coordinates":[1,2,3]}' FORMAT JSON AS GEOMETRY); +>> POINT Z (1 2 3) + +SELECT CAST('POINT ZM (1 2 3 4)'::GEOMETRY AS JSON); +>> {"type":"Point","coordinates":[1,2,3,4]} + +SELECT CAST('{"type":"Point","coordinates":[1,2,3,4]}' FORMAT JSON AS GEOMETRY); +>> POINT ZM (1 2 3 4) + +SELECT CAST('POINT M (1 2 4)'::GEOMETRY AS JSON); +> exception DATA_CONVERSION_ERROR_1 + +SELECT CAST('SRID=4326;POINT (1 2)'::GEOMETRY AS JSON); +>> {"type":"Point","coordinates":[1,2]} + +SELECT CAST('{"type":"Point","coordinates":[1,2]}' FORMAT JSON AS GEOMETRY(POINT)); +>> POINT (1 2) + +SELECT CAST('{"type":"Point","coordinates":[1,2]}' FORMAT JSON AS GEOMETRY(GEOMETRY, 4326)); +>> SRID=4326;POINT (1 2) + +SELECT CAST('LINESTRING EMPTY'::GEOMETRY AS JSON); +>> {"type":"LineString","coordinates":[]} + +SELECT CAST('{"type":"LineString","coordinates":[]}' FORMAT JSON AS GEOMETRY); +>> LINESTRING EMPTY + +SELECT CAST('LINESTRING (1 2, 3 4)'::GEOMETRY AS JSON); +>> {"type":"LineString","coordinates":[[1,2],[3,4]]} + +SELECT CAST('{"type":"LineString","coordinates":[[1,2],[3,4]]}' FORMAT JSON AS GEOMETRY); +>> LINESTRING (1 2, 3 4) + +SELECT CAST('POLYGON EMPTY'::GEOMETRY AS JSON); +>> {"type":"Polygon","coordinates":[]} + +SELECT CAST('{"type":"Polygon","coordinates":[]}' FORMAT JSON AS GEOMETRY); +>> POLYGON EMPTY + +SELECT CAST('POLYGON ((-1 -2, 10 1, 2 20, -1 -2))'::GEOMETRY AS JSON); +>> {"type":"Polygon","coordinates":[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]]} + +SELECT CAST('{"type":"Polygon","coordinates":[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]]}' FORMAT JSON AS GEOMETRY); +>> POLYGON ((-1 -2, 10 1, 2 20, -1 -2)) + +SELECT CAST('POLYGON ((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5), EMPTY)'::GEOMETRY AS JSON); +>> {"type":"Polygon","coordinates":[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]],[[0.5,0.5],[1,0.5],[1,1],[0.5,0.5]],[]]} + +SELECT CAST('{"type":"Polygon","coordinates":[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]],[[0.5,0.5],[1,0.5],[1,1],[0.5,0.5]],[]]}' FORMAT JSON AS GEOMETRY); +>> POLYGON ((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5), EMPTY) + +SELECT CAST('MULTIPOINT EMPTY'::GEOMETRY AS JSON); +>> {"type":"MultiPoint","coordinates":[]} + +SELECT CAST('{"type":"MultiPoint","coordinates":[]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOINT EMPTY + +SELECT CAST('MULTIPOINT ((1 2))'::GEOMETRY AS JSON); +>> {"type":"MultiPoint","coordinates":[[1,2]]} + +SELECT CAST('{"type":"MultiPoint","coordinates":[[1,2]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOINT ((1 2)) + +SELECT CAST('MULTIPOINT ((1 2), (3 4))'::GEOMETRY AS JSON); +>> {"type":"MultiPoint","coordinates":[[1,2],[3,4]]} + +SELECT CAST('{"type":"MultiPoint","coordinates":[[1,2],[3,4]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOINT ((1 2), (3 4)) + +SELECT CAST('MULTIPOINT ((1 0), EMPTY, EMPTY, (2 2))'::GEOMETRY AS JSON); +>> {"type":"MultiPoint","coordinates":[[1,0],null,null,[2,2]]} + +SELECT CAST('{"type":"MultiPoint","coordinates":[[1,0],null,null,[2,2]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOINT ((1 0), EMPTY, EMPTY, (2 2)) + +SELECT CAST('MULTILINESTRING EMPTY'::GEOMETRY AS JSON); +>> {"type":"MultiLineString","coordinates":[]} + +SELECT CAST('{"type":"MultiLineString","coordinates":[]}' FORMAT JSON AS GEOMETRY); +>> MULTILINESTRING EMPTY + +SELECT CAST('MULTILINESTRING ((1 2, 3 4, 5 7))'::GEOMETRY AS JSON); +>> {"type":"MultiLineString","coordinates":[[[1,2],[3,4],[5,7]]]} + +SELECT CAST('{"type":"MultiLineString","coordinates":[[[1,2],[3,4],[5,7]]]}' FORMAT JSON AS GEOMETRY); +>> MULTILINESTRING ((1 2, 3 4, 5 7)) + +SELECT CAST('MULTILINESTRING ((1 2, 3 4, 5 7), (-1 -1, 0 0, 2 2, 4 6.01), EMPTY)'::GEOMETRY AS JSON); +>> {"type":"MultiLineString","coordinates":[[[1,2],[3,4],[5,7]],[[-1,-1],[0,0],[2,2],[4,6.01]],[]]} + +SELECT CAST('{"type":"MultiLineString","coordinates":[[[1,2],[3,4],[5,7]],[[-1,-1],[0,0],[2,2],[4,6.01]],[]]}' FORMAT JSON AS GEOMETRY); +>> MULTILINESTRING ((1 2, 3 4, 5 7), (-1 -1, 0 0, 2 2, 4 6.01), EMPTY) + +SELECT CAST('MULTIPOLYGON EMPTY'::GEOMETRY AS JSON); +>> {"type":"MultiPolygon","coordinates":[]} + +SELECT CAST('{"type":"MultiPolygon","coordinates":[]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOLYGON EMPTY + +SELECT CAST('MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2)))'::GEOMETRY AS JSON); +>> {"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]]]} + +SELECT CAST('{"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2))) + +SELECT CAST('MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2)), ((1 2, 2 2, 3 3, 1 2)))'::GEOMETRY AS JSON); +>> {"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]],[[[1,2],[2,2],[3,3],[1,2]]]]} + +SELECT CAST('{"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]]],[[[1,2],[2,2],[3,3],[1,2]]]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2)), ((1 2, 2 2, 3 3, 1 2))) + +SELECT CAST('MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5)))'::GEOMETRY AS JSON); +>> {"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]],[[0.5,0.5],[1,0.5],[1,1],[0.5,0.5]]]]} + +SELECT CAST('{"type":"MultiPolygon","coordinates":[[[[-1,-2],[1E1,1],[2,2E1],[-1,-2]],[[0.5,0.5],[1,0.5],[1,1],[0.5,0.5]]]]}' FORMAT JSON AS GEOMETRY); +>> MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5))) + +SELECT CAST('GEOMETRYCOLLECTION EMPTY'::GEOMETRY AS JSON); +>> {"type":"GeometryCollection","geometries":[]} + +SELECT CAST('{"type":"GeometryCollection","geometries":[]}' FORMAT JSON AS GEOMETRY); +>> GEOMETRYCOLLECTION EMPTY + +SELECT CAST('GEOMETRYCOLLECTION (POINT (1 2))'::GEOMETRY AS JSON); +>> {"type":"GeometryCollection","geometries":[{"type":"Point","coordinates":[1,2]}]} + +SELECT CAST('{"type":"GeometryCollection","geometries":[{"type":"Point","coordinates":[1,2]}]}' FORMAT JSON AS GEOMETRY); +>> GEOMETRYCOLLECTION (POINT (1 2)) + +SELECT CAST('GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (1 3)), MULTIPOINT ((4 8)))'::GEOMETRY AS JSON); +>> {"type":"GeometryCollection","geometries":[{"type":"GeometryCollection","geometries":[{"type":"Point","coordinates":[1,3]}]},{"type":"MultiPoint","coordinates":[[4,8]]}]} + +SELECT CAST('{"type":"GeometryCollection","geometries":[{"type":"GeometryCollection","geometries":[{"type":"Point","coordinates":[1,3]}]},{"type":"MultiPoint","coordinates":[[4,8]]}]}' FORMAT JSON AS GEOMETRY); +>> GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (1 3)), MULTIPOINT ((4 8))) + +SELECT CAST('{"type":"Unknown","coordinates":[1,2]}' FORMAT JSON AS GEOMETRY); +> exception DATA_CONVERSION_ERROR_1 + +EXPLAIN VALUES GEOMETRY 'POINT EMPTY'; +>> VALUES (GEOMETRY 'POINT EMPTY') + +EXPLAIN VALUES GEOMETRY X'00000000017ff80000000000007ff8000000000000'; +>> VALUES (GEOMETRY 'POINT EMPTY') + +EXPLAIN VALUES CAST(CAST('POINT EMPTY' AS GEOMETRY) AS VARBINARY); +>> VALUES (CAST(X'00000000017ff80000000000007ff8000000000000' AS BINARY VARYING)) + +SELECT GEOMETRY X'000000000300000000'; +>> POLYGON EMPTY + +SELECT GEOMETRY X'00000000030000000100000000'; +>> POLYGON EMPTY + +SELECT CAST(GEOMETRY 'POLYGON EMPTY' AS VARBINARY); +>> X'000000000300000000' + +SELECT CAST(GEOMETRY X'00000000030000000100000000' AS VARBINARY); +>> X'000000000300000000' + +VALUES GEOMETRY 'POINT (1 2 3)'; +> exception DATA_CONVERSION_ERROR_1 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/identity.sql b/h2/src/test/org/h2/test/scripts/datatypes/identity.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/identity.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/datatypes/int.sql b/h2/src/test/org/h2/test/scripts/datatypes/int.sql new file mode 100644 index 0000000000..266abcca4b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/int.sql @@ -0,0 +1,18 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Division + +SELECT CAST(1 AS INT) / CAST(0 AS INT); +> exception DIVISION_BY_ZERO_1 + +SELECT CAST(-2147483648 AS INT) / CAST(1 AS INT); +>> -2147483648 + +SELECT CAST(-2147483648 AS INT) / CAST(-1 AS INT); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +EXPLAIN VALUES 1; +>> VALUES (1) diff --git a/h2/src/test/org/h2/test/scripts/datatypes/interval.sql b/h2/src/test/org/h2/test/scripts/datatypes/interval.sql new file mode 100644 index 0000000000..89b53900e5 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/interval.sql @@ -0,0 +1,1102 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, + I01 INTERVAL YEAR, I02 INTERVAL MONTH, I03 INTERVAL DAY, I04 INTERVAL HOUR, I05 INTERVAL MINUTE, + I06 INTERVAL SECOND, I07 INTERVAL YEAR TO MONTH, I08 INTERVAL DAY TO HOUR, I09 INTERVAL DAY TO MINUTE, + I10 INTERVAL DAY TO SECOND, I11 INTERVAL HOUR TO MINUTE, I12 INTERVAL HOUR TO SECOND, + I13 INTERVAL MINUTE TO SECOND, + J01 INTERVAL YEAR(5), J02 INTERVAL MONTH(5), J03 INTERVAL DAY(5), J04 INTERVAL HOUR(5), J05 INTERVAL MINUTE(5), + J06 INTERVAL SECOND(5, 9), J07 INTERVAL YEAR(5) TO MONTH, J08 INTERVAL DAY(5) TO HOUR, + J09 INTERVAL DAY(5) TO MINUTE, J10 INTERVAL DAY(5) TO SECOND(9), J11 INTERVAL HOUR(5) TO MINUTE, + J12 INTERVAL HOUR(5) TO SECOND(9), J13 INTERVAL MINUTE(5) TO SECOND(9)); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, DATETIME_PRECISION, INTERVAL_TYPE, INTERVAL_PRECISION + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE DATETIME_PRECISION INTERVAL_TYPE INTERVAL_PRECISION +> ----------- --------- ------------------ ---------------- ------------------ +> ID INTEGER null null null +> I01 INTERVAL 0 YEAR 2 +> I02 INTERVAL 0 MONTH 2 +> I03 INTERVAL 0 DAY 2 +> I04 INTERVAL 0 HOUR 2 +> I05 INTERVAL 0 MINUTE 2 +> I06 INTERVAL 6 SECOND 2 +> I07 INTERVAL 0 YEAR TO MONTH 2 +> I08 INTERVAL 0 DAY TO HOUR 2 +> I09 INTERVAL 0 DAY TO MINUTE 2 +> I10 INTERVAL 6 DAY TO SECOND 2 +> I11 INTERVAL 0 HOUR TO MINUTE 2 +> I12 INTERVAL 6 HOUR TO SECOND 2 +> I13 INTERVAL 6 MINUTE TO SECOND 2 +> J01 INTERVAL 0 YEAR 5 +> J02 INTERVAL 0 MONTH 5 +> J03 INTERVAL 0 DAY 5 +> J04 INTERVAL 0 HOUR 5 +> J05 INTERVAL 0 MINUTE 5 +> J06 INTERVAL 9 SECOND 5 +> J07 INTERVAL 0 YEAR TO MONTH 5 +> J08 INTERVAL 0 DAY TO HOUR 5 +> J09 INTERVAL 0 DAY TO MINUTE 5 +> J10 INTERVAL 9 DAY TO SECOND 5 +> J11 INTERVAL 0 HOUR TO MINUTE 5 +> J12 INTERVAL 9 HOUR TO SECOND 5 +> J13 INTERVAL 9 MINUTE TO SECOND 5 +> rows (ordered): 27 + +INSERT INTO TEST VALUES ( + 1, + INTERVAL '1' YEAR, INTERVAL '1' MONTH, INTERVAL '1' DAY, INTERVAL '1' HOUR, INTERVAL '1' MINUTE, + INTERVAL '1.123456789' SECOND, INTERVAL '1-2' YEAR TO MONTH, INTERVAL '1 2' DAY TO HOUR, + INTERVAL '1 2:3' DAY TO MINUTE, INTERVAL '1 2:3:4.123456789' DAY TO SECOND, INTERVAL '1:2' HOUR TO MINUTE, + INTERVAL '1:2:3.123456789' HOUR TO SECOND, INTERVAL '1:2.123456789' MINUTE TO SECOND, + INTERVAL '1' YEAR, INTERVAL '1' MONTH, INTERVAL '1' DAY, INTERVAL '1' HOUR, INTERVAL '1' MINUTE, + INTERVAL '1.123456789' SECOND, INTERVAL '1-2' YEAR TO MONTH, INTERVAL '1 2' DAY TO HOUR, + INTERVAL '1 2:3' DAY TO MINUTE, INTERVAL '1 2:3:4.123456789' DAY TO SECOND, INTERVAL '1:2' HOUR TO MINUTE, + INTERVAL '1:2:3.123456789' HOUR TO SECOND, INTERVAL '1:2.123456789' MINUTE TO SECOND + ), ( + 2, + INTERVAL '-1' YEAR, INTERVAL '-1' MONTH, INTERVAL '-1' DAY, INTERVAL '-1' HOUR, INTERVAL '-1' MINUTE, + INTERVAL '-1.123456789' SECOND, INTERVAL '-1-2' YEAR TO MONTH, INTERVAL '-1 2' DAY TO HOUR, + INTERVAL '-1 2:3' DAY TO MINUTE, INTERVAL '-1 2:3:4.123456789' DAY TO SECOND, INTERVAL '-1:2' HOUR TO MINUTE, + INTERVAL '-1:2:3.123456789' HOUR TO SECOND, INTERVAL '-1:2.123456789' MINUTE TO SECOND, + INTERVAL -'1' YEAR, INTERVAL -'1' MONTH, INTERVAL -'1' DAY, INTERVAL -'1' HOUR, INTERVAL -'1' MINUTE, + INTERVAL -'1.123456789' SECOND, INTERVAL -'1-2' YEAR TO MONTH, INTERVAL -'1 2' DAY TO HOUR, + INTERVAL -'1 2:3' DAY TO MINUTE, INTERVAL -'1 2:3:4.123456789' DAY TO SECOND, INTERVAL -'1:2' HOUR TO MINUTE, + INTERVAL -'1:2:3.123456789' HOUR TO SECOND, INTERVAL -'1:2.123456789' MINUTE TO SECOND); +> update count: 2 + +@reconnect + +SELECT I01, I02, I03, I04, I05, I06 FROM TEST ORDER BY ID; +> I01 I02 I03 I04 I05 I06 +> ------------------ ------------------- ----------------- ------------------ -------------------- --------------------------- +> INTERVAL '1' YEAR INTERVAL '1' MONTH INTERVAL '1' DAY INTERVAL '1' HOUR INTERVAL '1' MINUTE INTERVAL '1.123457' SECOND +> INTERVAL '-1' YEAR INTERVAL '-1' MONTH INTERVAL '-1' DAY INTERVAL '-1' HOUR INTERVAL '-1' MINUTE INTERVAL '-1.123457' SECOND +> rows (ordered): 2 + +SELECT I07, I08, I09, I10 FROM TEST ORDER BY ID; +> I07 I08 I09 I10 +> ----------------------------- ---------------------------- --------------------------------- ------------------------------------------- +> INTERVAL '1-2' YEAR TO MONTH INTERVAL '1 02' DAY TO HOUR INTERVAL '1 02:03' DAY TO MINUTE INTERVAL '1 02:03:04.123457' DAY TO SECOND +> INTERVAL '-1-2' YEAR TO MONTH INTERVAL '-1 02' DAY TO HOUR INTERVAL '-1 02:03' DAY TO MINUTE INTERVAL '-1 02:03:04.123457' DAY TO SECOND +> rows (ordered): 2 + +SELECT I11, I12, I12 FROM TEST ORDER BY ID; +> I11 I12 I12 +> ------------------------------- ----------------------------------------- ----------------------------------------- +> INTERVAL '1:02' HOUR TO MINUTE INTERVAL '1:02:03.123457' HOUR TO SECOND INTERVAL '1:02:03.123457' HOUR TO SECOND +> INTERVAL '-1:02' HOUR TO MINUTE INTERVAL '-1:02:03.123457' HOUR TO SECOND INTERVAL '-1:02:03.123457' HOUR TO SECOND +> rows (ordered): 2 + +SELECT J01, J02, J03, J04, J05, J06 FROM TEST ORDER BY ID; +> J01 J02 J03 J04 J05 J06 +> ------------------ ------------------- ----------------- ------------------ -------------------- ------------------------------ +> INTERVAL '1' YEAR INTERVAL '1' MONTH INTERVAL '1' DAY INTERVAL '1' HOUR INTERVAL '1' MINUTE INTERVAL '1.123456789' SECOND +> INTERVAL '-1' YEAR INTERVAL '-1' MONTH INTERVAL '-1' DAY INTERVAL '-1' HOUR INTERVAL '-1' MINUTE INTERVAL '-1.123456789' SECOND +> rows (ordered): 2 + +SELECT J07, J08, J09, J10 FROM TEST ORDER BY ID; +> J07 J08 J09 J10 +> ----------------------------- ---------------------------- --------------------------------- ---------------------------------------------- +> INTERVAL '1-2' YEAR TO MONTH INTERVAL '1 02' DAY TO HOUR INTERVAL '1 02:03' DAY TO MINUTE INTERVAL '1 02:03:04.123456789' DAY TO SECOND +> INTERVAL '-1-2' YEAR TO MONTH INTERVAL '-1 02' DAY TO HOUR INTERVAL '-1 02:03' DAY TO MINUTE INTERVAL '-1 02:03:04.123456789' DAY TO SECOND +> rows (ordered): 2 + +SELECT J11, J12, J12 FROM TEST ORDER BY ID; +> J11 J12 J12 +> ------------------------------- -------------------------------------------- -------------------------------------------- +> INTERVAL '1:02' HOUR TO MINUTE INTERVAL '1:02:03.123456789' HOUR TO SECOND INTERVAL '1:02:03.123456789' HOUR TO SECOND +> INTERVAL '-1:02' HOUR TO MINUTE INTERVAL '-1:02:03.123456789' HOUR TO SECOND INTERVAL '-1:02:03.123456789' HOUR TO SECOND +> rows (ordered): 2 + +DROP TABLE TEST; +> ok + +-- Year-month casts + +SELECT CAST(INTERVAL '-10' YEAR AS INTERVAL MONTH(3)); +>> INTERVAL '-120' MONTH + +SELECT CAST(INTERVAL '-10' YEAR AS INTERVAL YEAR TO MONTH); +>> INTERVAL '-10-0' YEAR TO MONTH + +SELECT CAST(INTERVAL '-20' MONTH AS INTERVAL YEAR); +>> INTERVAL '-1' YEAR + +SELECT CAST(INTERVAL '-20' MONTH AS INTERVAL YEAR TO MONTH); +>> INTERVAL '-1-8' YEAR TO MONTH + +SELECT CAST(INTERVAL '-20-10' YEAR TO MONTH AS INTERVAL YEAR); +>> INTERVAL '-20' YEAR + +SELECT CAST(INTERVAL '-20-10' YEAR TO MONTH AS INTERVAL MONTH(3)); +>> INTERVAL '-250' MONTH + +-- Day-time casts: DAY + +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL HOUR(3)); +>> INTERVAL '-240' HOUR + +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL MINUTE(5)); +>> INTERVAL '-14400' MINUTE + +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL SECOND(6)); +>> INTERVAL '-864000' SECOND + +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL DAY TO HOUR); +>> INTERVAL '-10 00' DAY TO HOUR + +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL DAY TO MINUTE); +>> INTERVAL '-10 00:00' DAY TO MINUTE + +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL DAY TO SECOND); +>> INTERVAL '-10 00:00:00' DAY TO SECOND + +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL HOUR(3) TO MINUTE); +>> INTERVAL '-240:00' HOUR TO MINUTE + +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL HOUR(3) TO SECOND); +>> INTERVAL '-240:00:00' HOUR TO SECOND + +SELECT CAST(INTERVAL '-10' DAY AS INTERVAL MINUTE(5) TO SECOND); +>> INTERVAL '-14400:00' MINUTE TO SECOND + +-- Day-time casts: HOUR + +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL DAY); +>> INTERVAL '-1' DAY + +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL MINUTE(4)); +>> INTERVAL '-1800' MINUTE + +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL SECOND(6)); +>> INTERVAL '-108000' SECOND + +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL DAY TO HOUR); +>> INTERVAL '-1 06' DAY TO HOUR + +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL DAY TO MINUTE); +>> INTERVAL '-1 06:00' DAY TO MINUTE + +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL DAY TO SECOND); +>> INTERVAL '-1 06:00:00' DAY TO SECOND + +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '-30:00' HOUR TO MINUTE + +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL HOUR TO SECOND); +>> INTERVAL '-30:00:00' HOUR TO SECOND + +SELECT CAST(INTERVAL '-30' HOUR AS INTERVAL MINUTE(4) TO SECOND); +>> INTERVAL '-1800:00' MINUTE TO SECOND + +-- Day-time casts: MINUTE + +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL DAY); +>> INTERVAL '-1' DAY + +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL HOUR); +>> INTERVAL '-26' HOUR + +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL SECOND(5)); +>> INTERVAL '-94200' SECOND + +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL DAY TO HOUR); +>> INTERVAL '-1 02' DAY TO HOUR + +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL DAY TO MINUTE); +>> INTERVAL '-1 02:10' DAY TO MINUTE + +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL DAY TO SECOND); +>> INTERVAL '-1 02:10:00' DAY TO SECOND + +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '-26:10' HOUR TO MINUTE + +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL HOUR TO SECOND); +>> INTERVAL '-26:10:00' HOUR TO SECOND + +SELECT CAST(INTERVAL '-1570' MINUTE AS INTERVAL MINUTE(4) TO SECOND); +>> INTERVAL '-1570:00' MINUTE TO SECOND + +-- Day-time casts: SECOND + +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL DAY); +>> INTERVAL '-1' DAY + +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL HOUR); +>> INTERVAL '-26' HOUR + +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL MINUTE(4)); +>> INTERVAL '-1563' MINUTE + +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL DAY TO HOUR); +>> INTERVAL '-1 02' DAY TO HOUR + +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL DAY TO MINUTE); +>> INTERVAL '-1 02:03' DAY TO MINUTE + +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL DAY TO SECOND); +>> INTERVAL '-1 02:03:04.123457' DAY TO SECOND + +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '-26:03' HOUR TO MINUTE + +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL HOUR TO SECOND); +>> INTERVAL '-26:03:04.123457' HOUR TO SECOND + +SELECT CAST(INTERVAL '-93784.123456789' SECOND AS INTERVAL MINUTE(4) TO SECOND); +>> INTERVAL '-1563:04.123457' MINUTE TO SECOND + +-- Day-time casts: DAY TO HOUR + +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL DAY); +>> INTERVAL '-1' DAY + +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL HOUR); +>> INTERVAL '-26' HOUR + +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL MINUTE(4)); +>> INTERVAL '-1560' MINUTE + +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL SECOND(5)); +>> INTERVAL '-93600' SECOND + +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL DAY TO MINUTE); +>> INTERVAL '-1 02:00' DAY TO MINUTE + +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL DAY TO SECOND); +>> INTERVAL '-1 02:00:00' DAY TO SECOND + +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '-26:00' HOUR TO MINUTE + +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL HOUR TO SECOND); +>> INTERVAL '-26:00:00' HOUR TO SECOND + +SELECT CAST(INTERVAL '-1 2' DAY TO HOUR AS INTERVAL MINUTE(4) TO SECOND); +>> INTERVAL '-1560:00' MINUTE TO SECOND + +-- Day-time casts: DAY TO MINUTE + +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL DAY); +>> INTERVAL '-1' DAY + +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL HOUR); +>> INTERVAL '-26' HOUR + +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL MINUTE(4)); +>> INTERVAL '-1563' MINUTE + +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL SECOND(5)); +>> INTERVAL '-93780' SECOND + +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL DAY TO HOUR); +>> INTERVAL '-1 02' DAY TO HOUR + +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL DAY TO SECOND); +>> INTERVAL '-1 02:03:00' DAY TO SECOND + +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '-26:03' HOUR TO MINUTE + +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL HOUR TO SECOND); +>> INTERVAL '-26:03:00' HOUR TO SECOND + +SELECT CAST(INTERVAL '-1 2:3' DAY TO MINUTE AS INTERVAL MINUTE(4) TO SECOND); +>> INTERVAL '-1563:00' MINUTE TO SECOND + +-- Day-time casts: DAY TO SECOND + +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL DAY); +>> INTERVAL '-1' DAY + +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL HOUR); +>> INTERVAL '-26' HOUR + +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL MINUTE(4)); +>> INTERVAL '-1563' MINUTE + +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL SECOND(5)); +>> INTERVAL '-93784.123457' SECOND + +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL DAY TO HOUR); +>> INTERVAL '-1 02' DAY TO HOUR + +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL DAY TO MINUTE); +>> INTERVAL '-1 02:03' DAY TO MINUTE + +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '-26:03' HOUR TO MINUTE + +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL HOUR TO SECOND); +>> INTERVAL '-26:03:04.123457' HOUR TO SECOND + +SELECT CAST(INTERVAL '-1 2:3:4.123456789' DAY TO SECOND AS INTERVAL MINUTE(4) TO SECOND); +>> INTERVAL '-1563:04.123457' MINUTE TO SECOND + +-- Day-time casts: HOUR TO MINUTE + +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL DAY); +>> INTERVAL '-1' DAY + +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL HOUR); +>> INTERVAL '-30' HOUR + +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL MINUTE(4)); +>> INTERVAL '-1802' MINUTE + +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL SECOND(6)); +>> INTERVAL '-108120' SECOND + +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL DAY TO HOUR); +>> INTERVAL '-1 06' DAY TO HOUR + +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL DAY TO MINUTE); +>> INTERVAL '-1 06:02' DAY TO MINUTE + +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL DAY TO SECOND); +>> INTERVAL '-1 06:02:00' DAY TO SECOND + +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL HOUR TO SECOND); +>> INTERVAL '-30:02:00' HOUR TO SECOND + +SELECT CAST(INTERVAL '-30:2' HOUR TO MINUTE AS INTERVAL MINUTE(4) TO SECOND); +>> INTERVAL '-1802:00' MINUTE TO SECOND + +-- Day-time casts: HOUR TO SECOND + +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL DAY); +>> INTERVAL '-1' DAY + +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL HOUR); +>> INTERVAL '-30' HOUR + +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL MINUTE(4)); +>> INTERVAL '-1802' MINUTE + +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL SECOND(6)); +>> INTERVAL '-108124.123457' SECOND + +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL DAY TO HOUR); +>> INTERVAL '-1 06' DAY TO HOUR + +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL DAY TO MINUTE); +>> INTERVAL '-1 06:02' DAY TO MINUTE + +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL DAY TO SECOND); +>> INTERVAL '-1 06:02:04.123457' DAY TO SECOND + +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '-30:02' HOUR TO MINUTE + +SELECT CAST(INTERVAL '-30:2:4.123456789' HOUR TO SECOND AS INTERVAL MINUTE(4) TO SECOND); +>> INTERVAL '-1802:04.123457' MINUTE TO SECOND + +-- Day-time casts: MINUTE TO SECOND + +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL DAY); +>> INTERVAL '-1' DAY + +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL HOUR); +>> INTERVAL '-30' HOUR + +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL MINUTE(4)); +>> INTERVAL '-1803' MINUTE + +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL SECOND(6)); +>> INTERVAL '-108184.123457' SECOND + +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL DAY TO HOUR); +>> INTERVAL '-1 06' DAY TO HOUR + +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL DAY TO MINUTE); +>> INTERVAL '-1 06:03' DAY TO MINUTE + +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL DAY TO SECOND); +>> INTERVAL '-1 06:03:04.123457' DAY TO SECOND + +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '-30:03' HOUR TO MINUTE + +SELECT CAST(INTERVAL '-1803:4.123456789' MINUTE TO SECOND AS INTERVAL HOUR TO SECOND); +>> INTERVAL '-30:03:04.123457' HOUR TO SECOND + +-- Cast with fractional seconds precision + +SELECT CAST(INTERVAL '10:11.123456789' MINUTE TO SECOND AS INTERVAL SECOND(3, 9)); +>> INTERVAL '611.123456789' SECOND + +-- Casts with strings + +SELECT CAST(INTERVAL '10' YEAR AS VARCHAR); +>> INTERVAL '10' YEAR + +SELECT CAST('INTERVAL ''10'' YEAR' AS INTERVAL YEAR); +>> INTERVAL '10' YEAR + +SELECT CAST('10' AS INTERVAL YEAR); +>> INTERVAL '10' YEAR + +SELECT CAST(INTERVAL '10' MONTH AS VARCHAR); +>> INTERVAL '10' MONTH + +SELECT CAST('INTERVAL ''10'' MONTH' AS INTERVAL MONTH); +>> INTERVAL '10' MONTH + +SELECT CAST('10' AS INTERVAL MONTH); +>> INTERVAL '10' MONTH + +SELECT CAST(INTERVAL '10' DAY AS VARCHAR); +>> INTERVAL '10' DAY + +SELECT CAST('INTERVAL ''10'' DAY' AS INTERVAL DAY); +>> INTERVAL '10' DAY + +SELECT CAST('10' AS INTERVAL DAY); +>> INTERVAL '10' DAY + +SELECT CAST(INTERVAL '10' HOUR AS VARCHAR); +>> INTERVAL '10' HOUR + +SELECT CAST('INTERVAL ''10'' HOUR' AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST('10' AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST(INTERVAL '10' MINUTE AS VARCHAR); +>> INTERVAL '10' MINUTE + +SELECT CAST('INTERVAL ''10'' MINUTE' AS INTERVAL MINUTE); +>> INTERVAL '10' MINUTE + +SELECT CAST('10' AS INTERVAL MINUTE); +>> INTERVAL '10' MINUTE + +SELECT CAST(INTERVAL '10.123456789' SECOND AS VARCHAR); +>> INTERVAL '10.123456789' SECOND + +SELECT CAST('INTERVAL ''10.123456789'' SECOND' AS INTERVAL SECOND(2, 9)); +>> INTERVAL '10.123456789' SECOND + +SELECT CAST('10.123456789' AS INTERVAL SECOND(2, 9)); +>> INTERVAL '10.123456789' SECOND + +SELECT CAST(INTERVAL '10-11' YEAR TO MONTH AS VARCHAR); +>> INTERVAL '10-11' YEAR TO MONTH + +SELECT CAST('INTERVAL ''10-11'' YEAR TO MONTH' AS INTERVAL YEAR TO MONTH); +>> INTERVAL '10-11' YEAR TO MONTH + +SELECT CAST('10-11' AS INTERVAL YEAR TO MONTH); +>> INTERVAL '10-11' YEAR TO MONTH + +SELECT CAST(INTERVAL '10 11' DAY TO HOUR AS VARCHAR); +>> INTERVAL '10 11' DAY TO HOUR + +SELECT CAST('INTERVAL ''10 11'' DAY TO HOUR' AS INTERVAL DAY TO HOUR); +>> INTERVAL '10 11' DAY TO HOUR + +SELECT CAST('10 11' AS INTERVAL DAY TO HOUR); +>> INTERVAL '10 11' DAY TO HOUR + +SELECT CAST(INTERVAL '10 11:12' DAY TO MINUTE AS VARCHAR); +>> INTERVAL '10 11:12' DAY TO MINUTE + +SELECT CAST('INTERVAL ''10 11:12'' DAY TO MINUTE' AS INTERVAL DAY TO MINUTE); +>> INTERVAL '10 11:12' DAY TO MINUTE + +SELECT CAST('10 11:12' AS INTERVAL DAY TO MINUTE); +>> INTERVAL '10 11:12' DAY TO MINUTE + +SELECT CAST(INTERVAL '10 11:12:13.123456789' DAY TO SECOND AS VARCHAR); +>> INTERVAL '10 11:12:13.123456789' DAY TO SECOND + +SELECT CAST('INTERVAL ''10 11:12:13.123456789'' DAY TO SECOND' AS INTERVAL DAY TO SECOND(9)); +>> INTERVAL '10 11:12:13.123456789' DAY TO SECOND + +SELECT CAST('10 11:12:13.123456789' AS INTERVAL DAY TO SECOND(9)); +>> INTERVAL '10 11:12:13.123456789' DAY TO SECOND + +SELECT CAST(INTERVAL '11:12' HOUR TO MINUTE AS VARCHAR); +>> INTERVAL '11:12' HOUR TO MINUTE + +SELECT CAST('INTERVAL ''11:12'' HOUR TO MINUTE' AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '11:12' HOUR TO MINUTE + +SELECT CAST('11:12' AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '11:12' HOUR TO MINUTE + +SELECT CAST(INTERVAL '11:12:13.123456789' HOUR TO SECOND AS VARCHAR); +>> INTERVAL '11:12:13.123456789' HOUR TO SECOND + +SELECT CAST('INTERVAL ''11:12:13.123456789'' HOUR TO SECOND' AS INTERVAL HOUR TO SECOND(9)); +>> INTERVAL '11:12:13.123456789' HOUR TO SECOND + +SELECT CAST('11:12:13.123456789' AS INTERVAL HOUR TO SECOND(9)); +>> INTERVAL '11:12:13.123456789' HOUR TO SECOND + +SELECT CAST(INTERVAL '12:13.123456789' MINUTE TO SECOND AS VARCHAR); +>> INTERVAL '12:13.123456789' MINUTE TO SECOND + +SELECT CAST('INTERVAL ''12:13.123456789'' MINUTE TO SECOND' AS INTERVAL MINUTE TO SECOND(9)); +>> INTERVAL '12:13.123456789' MINUTE TO SECOND + +SELECT CAST('12:13.123456789' AS INTERVAL MINUTE TO SECOND(9)); +>> INTERVAL '12:13.123456789' MINUTE TO SECOND + +-- More formats + +SELECT INTERVAL +'+10' SECOND; +>> INTERVAL '10' SECOND + +SELECT CAST('INTERVAL +''+10'' SECOND' AS INTERVAL SECOND); +>> INTERVAL '10' SECOND + +SELECT INTERVAL -'-10' HOUR; +>> INTERVAL '10' HOUR + +SELECT CAST('INTERVAL -''-10'' HOUR' AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST('INTERVAL ''1'' MINUTE' AS INTERVAL SECOND); +>> INTERVAL '60' SECOND + +SELECT CAST(' interval + ''12-2'' Year To Month ' AS INTERVAL YEAR TO MONTH); +>> INTERVAL '12-2' YEAR TO MONTH + +SELECT CAST('INTERVAL''11:12''HOUR TO MINUTE' AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '11:12' HOUR TO MINUTE + +SELECT INTERVAL '-0-1' YEAR TO MONTH; +>> INTERVAL '-0-1' YEAR TO MONTH + +SELECT INTERVAL '-0.1' SECOND; +>> INTERVAL '-0.1' SECOND + +SELECT INTERVAL -'0.1' SECOND; +>> INTERVAL '-0.1' SECOND + +-- Arithmetic + +SELECT INTERVAL '1000' SECOND + INTERVAL '10' MINUTE; +>> INTERVAL '26:40' MINUTE TO SECOND + +SELECT INTERVAL '1000' SECOND - INTERVAL '10' MINUTE; +>> INTERVAL '6:40' MINUTE TO SECOND + +SELECT INTERVAL '10' YEAR + INTERVAL '1' MONTH; +>> INTERVAL '10-1' YEAR TO MONTH + +SELECT INTERVAL '10' YEAR - INTERVAL '1' MONTH; +>> INTERVAL '9-11' YEAR TO MONTH + +SELECT INTERVAL '1000' SECOND * 2; +>> INTERVAL '2000' SECOND + +SELECT 2 * INTERVAL '1000' SECOND; +>> INTERVAL '2000' SECOND + +SELECT INTERVAL '1000' SECOND / 2; +>> INTERVAL '500' SECOND + +SELECT INTERVAL '10' YEAR * 2; +>> INTERVAL '20' YEAR + +SELECT 2 * INTERVAL '10' YEAR; +>> INTERVAL '20' YEAR + +SELECT INTERVAL '10' YEAR / 2; +>> INTERVAL '5' YEAR + +SELECT TIME '10:00:00' + INTERVAL '30' MINUTE; +>> 10:30:00 + +SELECT INTERVAL '30' MINUTE + TIME '10:00:00'; +>> 10:30:00 + +SELECT TIME '10:00:00' - INTERVAL '30' MINUTE; +>> 09:30:00 + +SELECT DATE '2000-01-10' + INTERVAL '30' HOUR; +>> 2000-01-11 + +SELECT INTERVAL '30' HOUR + DATE '2000-01-10'; +>> 2000-01-11 + +SELECT DATE '2000-01-10' - INTERVAL '30' HOUR; +>> 2000-01-09 + +SELECT DATE '2000-01-10' + INTERVAL '1-2' YEAR TO MONTH; +>> 2001-03-10 + +SELECT INTERVAL '1-2' YEAR TO MONTH + DATE '2000-01-10'; +>> 2001-03-10 + +SELECT DATE '2000-01-10' - INTERVAL '1-2' YEAR TO MONTH; +>> 1998-11-10 + +SELECT TIMESTAMP '2000-01-01 12:00:00' + INTERVAL '25 13' DAY TO HOUR; +>> 2000-01-27 01:00:00 + +SELECT INTERVAL '25 13' DAY TO HOUR + TIMESTAMP '2000-01-01 12:00:00'; +>> 2000-01-27 01:00:00 + +SELECT TIMESTAMP '2000-01-01 12:00:00' - INTERVAL '25 13' DAY TO HOUR; +>> 1999-12-06 23:00:00 + +SELECT TIMESTAMP '2000-01-01 12:00:00' + INTERVAL '1-2' YEAR TO MONTH; +>> 2001-03-01 12:00:00 + +SELECT INTERVAL '1-2' YEAR TO MONTH + TIMESTAMP '2000-01-01 12:00:00'; +>> 2001-03-01 12:00:00 + +SELECT TIMESTAMP '2000-01-01 12:00:00' - INTERVAL '1-2' YEAR TO MONTH; +>> 1998-11-01 12:00:00 + +SELECT TIMESTAMP WITH TIME ZONE '2000-01-01 12:00:00+01' + INTERVAL '25 13' DAY TO HOUR; +>> 2000-01-27 01:00:00+01 + +SELECT INTERVAL '25 13' DAY TO HOUR + TIMESTAMP WITH TIME ZONE '2000-01-01 12:00:00+01'; +>> 2000-01-27 01:00:00+01 + +SELECT TIMESTAMP WITH TIME ZONE '2000-01-01 12:00:00+01' - INTERVAL '25 13' DAY TO HOUR; +>> 1999-12-06 23:00:00+01 + +SELECT TIMESTAMP WITH TIME ZONE '2000-01-01 12:00:00+01' + INTERVAL '1-2' YEAR TO MONTH; +>> 2001-03-01 12:00:00+01 + +SELECT INTERVAL '1-2' YEAR TO MONTH + TIMESTAMP WITH TIME ZONE '2000-01-01 12:00:00+01'; +>> 2001-03-01 12:00:00+01 + +SELECT TIMESTAMP WITH TIME ZONE '2000-01-01 12:00:00+01' - INTERVAL '1-2' YEAR TO MONTH; +>> 1998-11-01 12:00:00+01 + +SELECT -INTERVAL '1' DAY; +>> INTERVAL '-1' DAY + +-- Date-time subtraction + +SELECT TIME '10:30:15.123456789' - TIME '11:00:00'; +>> INTERVAL '-0:29:44.876543211' HOUR TO SECOND + +SELECT DATE '2010-01-15' - DATE '2009-12-31'; +>> INTERVAL '15' DAY + +SELECT TIMESTAMP '2010-01-15 12:00:00.5' - TIMESTAMP '2010-01-13 01:30:00'; +>> INTERVAL '2 10:30:00.5' DAY TO SECOND + +SELECT TIMESTAMP WITH TIME ZONE '2010-01-15 12:00:00.5+01' - TIMESTAMP WITH TIME ZONE '2010-01-13 01:30:00+01'; +>> INTERVAL '2 10:30:00.5' DAY TO SECOND + +SELECT TIMESTAMP WITH TIME ZONE '2010-01-15 12:00:00.5+01' - TIMESTAMP WITH TIME ZONE '2010-01-13 01:30:00+02'; +>> INTERVAL '2 11:30:00.5' DAY TO SECOND + +SELECT TIMESTAMP '2010-01-15 12:00:00.5+01' - TIMESTAMP WITH TIME ZONE '2010-01-13 01:30:00+02'; +>> INTERVAL '2 11:30:00.5' DAY TO SECOND + +SELECT TIMESTAMP WITH TIME ZONE '2010-01-15 12:00:00.5+01' - TIMESTAMP '2010-01-13 01:30:00+02'; +>> INTERVAL '2 11:30:00.5' DAY TO SECOND + +CREATE TABLE TEST(I INTERVAL YEAR TO MONTH); +> ok + +INSERT INTO TEST VALUES ('-0-0'), ('-0-1'), ('-1-1'), ('1-0'), ('0-1'), ('1-1'), ('-1-0'); +> update count: 7 + +SELECT * FROM TEST ORDER BY I; +> I +> ----------------------------- +> INTERVAL '-1-1' YEAR TO MONTH +> INTERVAL '-1-0' YEAR TO MONTH +> INTERVAL '-0-1' YEAR TO MONTH +> INTERVAL '0-0' YEAR TO MONTH +> INTERVAL '0-1' YEAR TO MONTH +> INTERVAL '1-0' YEAR TO MONTH +> INTERVAL '1-1' YEAR TO MONTH +> rows (ordered): 7 + +DROP TABLE TEST; +> ok + +-- Some precision tests + +CREATE TABLE TEST(I INTERVAL DAY, IL INTERVAL DAY(5)); +> ok + +INSERT INTO TEST VALUES ('99', '99999'), ('-99', '-99999'); +> update count: 2 + +INSERT INTO TEST(I) VALUES ('100'); +> exception VALUE_TOO_LONG_2 + +INSERT INTO TEST(I) VALUES ('-100'); +> exception VALUE_TOO_LONG_2 + +INSERT INTO TEST(IL) VALUES ('100000'); +> exception VALUE_TOO_LONG_2 + +INSERT INTO TEST(IL) VALUES ('-100000'); +> exception VALUE_TOO_LONG_2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I INTERVAL DAY(0)); +> exception INVALID_VALUE_PRECISION + +CREATE TABLE TEST(I INTERVAL DAY(18)); +> ok + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I INTERVAL DAY(19)); +> exception INVALID_VALUE_PRECISION + +CREATE TABLE TEST(I INTERVAL HOUR TO SECOND(0)); +> ok + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I INTERVAL HOUR TO SECOND(9)); +> ok + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I INTERVAL HOUR TO SECOND(10)); +> exception INVALID_VALUE_SCALE + +SELECT TIMESTAMP '2018-09-10 23:30:00' - TIMESTAMP '2014-09-11 23:30:00'; +>> INTERVAL '1460 00:00:00' DAY TO SECOND + +SELECT TIMESTAMP WITH TIME ZONE '2014-09-11 23:30:00Z' - TIMESTAMP WITH TIME ZONE '2018-09-10 23:30:00Z'; +>> INTERVAL '-1460 00:00:00' DAY TO SECOND + +SELECT DATE '2018-09-10' - DATE '2014-09-11'; +>> INTERVAL '1460' DAY + +SELECT INTERVAL -'1-2' YEAR TO MONTH / INTERVAL '1' MONTH; +>> -14.0000000000000000000000000000000000000000 + +SELECT INTERVAL '1 12:03:40.123456789' DAY TO SECOND / INTERVAL '1' SECOND; +>> 129820.1234567890000000000000000000000000000000000000000000000000000000 + +SELECT INTERVAL -'0.000000001' SECOND / INTERVAL '1' SECOND; +>> -0.0000000010000000000000000000000000000000000000000000000000000000 + +SELECT INTERVAL -'1-2' YEAR TO MONTH / INTERVAL '1' DAY; +> exception FEATURE_NOT_SUPPORTED_1 + +SELECT INTERVAL '1' DAY / INTERVAL '0' DAY; +> exception DIVISION_BY_ZERO_1 + +CALL CAST(INTERVAL '999999999999999998.999999999' SECOND AS INTERVAL SECOND(18)); +>> INTERVAL '999999999999999999' SECOND + +CALL CAST(INTERVAL '999999999999999999.999999999' SECOND AS INTERVAL SECOND(18)); +>> INTERVAL '999999999999999999.999999' SECOND + +CALL CAST(INTERVAL '999999999999999998 23:59:59.999999999' DAY TO SECOND AS INTERVAL DAY(18) TO SECOND); +>> INTERVAL '999999999999999999 00:00:00' DAY TO SECOND + +CALL CAST(INTERVAL '999999999999999999 23:59:59.999999999' DAY TO SECOND AS INTERVAL DAY(18) TO SECOND); +>> INTERVAL '999999999999999999 23:59:59.999999' DAY TO SECOND + +CALL CAST(INTERVAL '999999999999999998:59:59.999999999' HOUR TO SECOND AS INTERVAL HOUR(18) TO SECOND); +>> INTERVAL '999999999999999999:00:00' HOUR TO SECOND + +CALL CAST(INTERVAL '999999999999999999:59:59.999999999' HOUR TO SECOND AS INTERVAL HOUR(18) TO SECOND); +>> INTERVAL '999999999999999999:59:59.999999' HOUR TO SECOND + +CALL CAST(INTERVAL '999999999999999998:59.999999999' MINUTE TO SECOND AS INTERVAL MINUTE(18) TO SECOND); +>> INTERVAL '999999999999999999:00' MINUTE TO SECOND + +CALL CAST(INTERVAL '999999999999999999:59.999999999' MINUTE TO SECOND AS INTERVAL MINUTE(18) TO SECOND); +>> INTERVAL '999999999999999999:59.999999' MINUTE TO SECOND + +CALL CAST(INTERVAL '99' DAY AS INTERVAL DAY); +>> INTERVAL '99' DAY + +CALL CAST(INTERVAL '-99' DAY AS INTERVAL DAY); +>> INTERVAL '-99' DAY + +CALL CAST(INTERVAL '100' DAY AS INTERVAL DAY); +> exception VALUE_TOO_LONG_2 + +CALL CAST(INTERVAL '-100' DAY AS INTERVAL DAY); +> exception VALUE_TOO_LONG_2 + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00'); +>> INTERVAL '7180 09:30:00' DAY TO SECOND + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR; +> exception VALUE_TOO_LONG_2 + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR(6); +>> INTERVAL '172329' HOUR + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - INTERVAL '1' YEAR) YEAR; +> exception SYNTAX_ERROR_2 + +SELECT (INTERVAL '10' HOUR - INTERVAL '1' HOUR) HOUR; +> exception SYNTAX_ERROR_2 + +SELECT (10 - 2) SECOND; +> exception SYNTAX_ERROR_2 + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR TO SECOND; +> exception VALUE_TOO_LONG_2 + +SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR(6) TO SECOND; +>> INTERVAL '172329:30:00' HOUR TO SECOND + +EXPLAIN SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR TO SECOND; +>> SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR TO SECOND + +EXPLAIN SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR TO SECOND(9); +>> SELECT (TIMESTAMP '2010-01-01 10:00:00' - TIMESTAMP '1990-05-06 00:30:00') HOUR TO SECOND(9) + +CREATE TABLE TEST(S VARCHAR) AS VALUES '1'; +> ok + +SELECT S DAY FROM TEST; +>> INTERVAL '1' DAY + +EXPLAIN SELECT S DAY FROM TEST; +>> SELECT CAST("S" AS INTERVAL DAY) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +SELECT CAST(10 AS INTERVAL YEAR); +>> INTERVAL '10' YEAR + +SELECT CAST(INTERVAL '10' YEAR AS INTEGER); +>> 10 + +SELECT CAST(-10 AS INTERVAL YEAR); +>> INTERVAL '-10' YEAR + +SELECT CAST(INTERVAL '-10' YEAR AS INTEGER); +>> -10 + +SELECT CAST(10::BIGINT AS INTERVAL YEAR); +>> INTERVAL '10' YEAR + +SELECT CAST(INTERVAL '10' YEAR AS BIGINT); +>> 10 + +SELECT CAST(INTERVAL '10' YEAR AS SMALLINT); +>> 10 + +SELECT CAST(INTERVAL '10' YEAR AS TINYINT); +>> 10 + +SELECT CAST(10::DOUBLE AS INTERVAL YEAR); +>> INTERVAL '10' YEAR + +SELECT CAST(INTERVAL '10' YEAR AS REAL); +>> 10.0 + +SELECT CAST(INTERVAL '10' YEAR AS DOUBLE); +>> 10.0 + +SELECT CAST(INTERVAL '10' YEAR AS NUMERIC); +>> 10 + +SELECT CAST(INTERVAL '-10' YEAR AS NUMERIC); +>> -10 + +SELECT CAST(10.123456789123456789 AS INTERVAL YEAR); +>> INTERVAL '10' YEAR + +SELECT CAST(10 AS INTERVAL MONTH); +>> INTERVAL '10' MONTH + +SELECT CAST(INTERVAL '10' MONTH AS NUMERIC); +>> 10 + +SELECT CAST(10.123456789123456789 AS INTERVAL MONTH); +>> INTERVAL '10' MONTH + +SELECT CAST(10 AS INTERVAL DAY); +>> INTERVAL '10' DAY + +SELECT CAST(INTERVAL '10' DAY AS NUMERIC); +>> 10 + +SELECT CAST(-10 AS INTERVAL DAY); +>> INTERVAL '-10' DAY + +SELECT CAST(10.123456789123456789 AS INTERVAL DAY); +>> INTERVAL '10' DAY + +SELECT CAST(10 AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST(INTERVAL '10' HOUR AS NUMERIC); +>> 10 + +SELECT CAST(10::BIGINT AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST(10::DOUBLE AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST(10.123456789123456789 AS INTERVAL HOUR); +>> INTERVAL '10' HOUR + +SELECT CAST(10 AS INTERVAL MINUTE); +>> INTERVAL '10' MINUTE + +SELECT CAST(INTERVAL '10' MINUTE AS NUMERIC); +>> 10 + +SELECT CAST(10.123456789123456789 AS INTERVAL MINUTE); +>> INTERVAL '10' MINUTE + +SELECT CAST(10 AS INTERVAL SECOND); +>> INTERVAL '10' SECOND + +SELECT CAST(INTERVAL '10' SECOND AS NUMERIC); +>> 10 + +SELECT CAST(10.123456789123456789 AS INTERVAL SECOND); +>> INTERVAL '10.123457' SECOND + +SELECT CAST(INTERVAL '10.123457' SECOND AS INT); +>> 10 + +SELECT CAST(INTERVAL '10.123457' SECOND AS NUMERIC(8, 6)); +>> 10.123457 + +SELECT CAST(10 AS INTERVAL YEAR TO MONTH); +>> INTERVAL '10-0' YEAR TO MONTH + +SELECT CAST(10::DOUBLE AS INTERVAL YEAR TO MONTH); +>> INTERVAL '10-0' YEAR TO MONTH + +SELECT CAST(10.123456789123456789 AS INTERVAL YEAR TO MONTH); +>> INTERVAL '10-1' YEAR TO MONTH + +SELECT CAST(INTERVAL '10-1' YEAR TO MONTH AS NUMERIC(4, 2)); +>> 10.08 + +SELECT CAST(10 AS INTERVAL DAY TO HOUR); +>> INTERVAL '10 00' DAY TO HOUR + +SELECT CAST(10::DOUBLE AS INTERVAL DAY TO HOUR); +>> INTERVAL '10 00' DAY TO HOUR + +SELECT CAST(10.123456789123456789 AS INTERVAL DAY TO HOUR); +>> INTERVAL '10 02' DAY TO HOUR + +SELECT CAST(INTERVAL '10 02' DAY TO HOUR AS NUMERIC(4, 2)); +>> 10.08 + +SELECT CAST(INTERVAL '-10 02' DAY TO HOUR AS NUMERIC(4, 2)); +>> -10.08 + +SELECT CAST(10 AS INTERVAL DAY TO MINUTE); +>> INTERVAL '10 00:00' DAY TO MINUTE + +SELECT CAST(10.123456789123456789 AS INTERVAL DAY TO MINUTE); +>> INTERVAL '10 02:57' DAY TO MINUTE + +SELECT CAST(INTERVAL '10 02:57' DAY TO MINUTE AS NUMERIC(6, 4)); +>> 10.1229 + +SELECT CAST(10 AS INTERVAL DAY TO SECOND); +>> INTERVAL '10 00:00:00' DAY TO SECOND + +SELECT CAST(10.123456789123456789 AS INTERVAL DAY TO SECOND); +>> INTERVAL '10 02:57:46.66658' DAY TO SECOND + +SELECT CAST(INTERVAL '10 02:57:46.66658' DAY TO SECOND AS NUMERIC(16, 14)); +>> 10.12345678912037 + +SELECT CAST(10 AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '10:00' HOUR TO MINUTE + +SELECT CAST(10.123456789123456789 AS INTERVAL HOUR TO MINUTE); +>> INTERVAL '10:07' HOUR TO MINUTE + +SELECT CAST(INTERVAL '10:07' HOUR TO MINUTE AS NUMERIC(4, 2)); +>> 10.12 + +SELECT CAST(10 AS INTERVAL HOUR TO SECOND); +>> INTERVAL '10:00:00' HOUR TO SECOND + +SELECT CAST(10.123456789123456789 AS INTERVAL HOUR TO SECOND); +>> INTERVAL '10:07:24.444441' HOUR TO SECOND + +SELECT CAST(INTERVAL '10:07:24.444441' HOUR TO SECOND AS NUMERIC(15, 13)); +>> 10.1234567891667 + +SELECT CAST(10 AS INTERVAL MINUTE TO SECOND); +>> INTERVAL '10:00' MINUTE TO SECOND + +SELECT CAST(10.123456789123456789 AS INTERVAL MINUTE TO SECOND); +>> INTERVAL '10:07.407407' MINUTE TO SECOND + +SELECT CAST(INTERVAL '10:07.407407' MINUTE TO SECOND AS NUMERIC(13, 11)); +>> 10.12345678333 + +-- H2 uses 1970-01-01 as start datetime + +SELECT TIMESTAMP '2001-01-05 10:30:00' - TIME '11:45:30.5'; +>> INTERVAL '11326 22:44:29.5' DAY TO SECOND + +SELECT TIME '11:45:30.5' - TIMESTAMP '2001-01-05 10:30:00'; +>> INTERVAL '-11326 22:44:29.5' DAY TO SECOND + +EXPLAIN VALUES INTERVAL '1' DAY; +>> VALUES (INTERVAL '1' DAY) + +SELECT CAST(INTERVAL '1000000000000000' MINUTE AS BIGINT); +>> 1000000000000000 + +SELECT CAST(INTERVAL '999999999999999999:30' HOUR TO SECOND AS NUMERIC); +>> 1000000000000000000 + +SELECT CAST(INTERVAL '999999999999999999:30' HOUR TO SECOND AS NUMERIC(20, 1)); +>> 999999999999999999.5 + +SELECT CAST(INTERVAL '999999999999999999:30' HOUR TO MINUTE AS BIGINT); +>> 1000000000000000000 + +SELECT D1, D2, (D1 - D2) YEAR TO MONTH, (D2 - D1) YEAR TO MONTH FROM (VALUES + (DATE '1999-05-12', DATE '2020-05-11'), + (DATE '1999-05-12', DATE '2020-05-12'), + (DATE '1999-05-12', DATE '2020-05-13') +) T(D1, D2); +> D1 D2 (D1 - D2) YEAR TO MONTH (D2 - D1) YEAR TO MONTH +> ---------- ---------- ------------------------------- ------------------------------ +> 1999-05-12 2020-05-11 INTERVAL '-20-11' YEAR TO MONTH INTERVAL '20-11' YEAR TO MONTH +> 1999-05-12 2020-05-12 INTERVAL '-21-0' YEAR TO MONTH INTERVAL '21-0' YEAR TO MONTH +> 1999-05-12 2020-05-13 INTERVAL '-21-0' YEAR TO MONTH INTERVAL '21-0' YEAR TO MONTH +> rows: 3 + +SELECT T1, T2, (T1 - T2) YEAR TO MONTH, (T2 - T1) YEAR TO MONTH FROM (VALUES + (TIMESTAMP '1999-05-12 12:00:00', TIMESTAMP '2020-05-12 11:00:00'), + (TIMESTAMP '1999-05-12 12:00:00', TIMESTAMP '2020-05-12 12:00:00'), + (TIMESTAMP '1999-05-12 12:00:00', TIMESTAMP '2020-05-12 13:00:00') +) T(T1, T2); +> T1 T2 (T1 - T2) YEAR TO MONTH (T2 - T1) YEAR TO MONTH +> ------------------- ------------------- ------------------------------- ------------------------------ +> 1999-05-12 12:00:00 2020-05-12 11:00:00 INTERVAL '-20-11' YEAR TO MONTH INTERVAL '20-11' YEAR TO MONTH +> 1999-05-12 12:00:00 2020-05-12 12:00:00 INTERVAL '-21-0' YEAR TO MONTH INTERVAL '21-0' YEAR TO MONTH +> 1999-05-12 12:00:00 2020-05-12 13:00:00 INTERVAL '-21-0' YEAR TO MONTH INTERVAL '21-0' YEAR TO MONTH +> rows: 3 + +SELECT (DATE '2010-01-02' - DATE '2000-01-01') YEAR; +>> INTERVAL '10' YEAR diff --git a/h2/src/test/org/h2/test/scripts/datatypes/java_object.sql b/h2/src/test/org/h2/test/scripts/datatypes/java_object.sql new file mode 100644 index 0000000000..bbe0f8ece9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/java_object.sql @@ -0,0 +1,53 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +EXPLAIN VALUES CAST(X'' AS JAVA_OBJECT); +>> VALUES (CAST(X'' AS JAVA_OBJECT)) + +VALUES CAST(CAST(X'00' AS JAVA_OBJECT) AS VARCHAR(2)); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST(CAST(X'00' AS JAVA_OBJECT) AS CHAR(2)); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST('00' AS JAVA_OBJECT); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST(CAST('00' AS CHAR(2)) AS JAVA_OBJECT); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST(X'0000' AS JAVA_OBJECT(1)); +> exception VALUE_TOO_LONG_2 + +VALUES CAST(CAST (X'0000' AS JAVA_OBJECT) AS JAVA_OBJECT(1)); +> exception VALUE_TOO_LONG_2 + +CREATE TABLE T(C JAVA_OBJECT(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T1(A JAVA_OBJECT(1048576)); +> ok + +CREATE TABLE T2(A JAVA_OBJECT(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A JAVA_OBJECT(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_OCTET_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_OCTET_LENGTH +> ---------- ---------------------- +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/json.sql b/h2/src/test/org/h2/test/scripts/datatypes/json.sql new file mode 100644 index 0000000000..4bf8ece132 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/json.sql @@ -0,0 +1,360 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT '{"tag1":"simple string"}' FORMAT JSON; +>> {"tag1":"simple string"} + +SELECT CAST('{"tag1":"simple string"}' FORMAT JSON AS JSON); +>> {"tag1":"simple string"} + +SELECT CAST('text' AS JSON); +>> "text" + +SELECT X'31' FORMAT JSON; +>> 1 + +SELECT 0::JSON; +>> 0 + +SELECT '0' FORMAT JSON; +>> 0 + +SELECT JSON '1', JSON X'31', JSON '1' IS OF (JSON), JSON X'31' IS OF (JSON); +> JSON '1' JSON '1' TRUE TRUE +> -------- -------- ---- ---- +> 1 1 TRUE TRUE +> rows: 1 + +SELECT JSON 'tr' 'ue', JSON X'7472' '7565', JSON 'tr' 'ue' IS OF (JSON), JSON X'7472' '7565' IS OF (JSON); +> JSON 'true' JSON 'true' TRUE TRUE +> ----------- ----------- ---- ---- +> true true TRUE TRUE +> rows: 1 + +SELECT 1::JSON; +>> 1 + +SELECT 1L::JSON; +>> 1 + +SELECT 1000000000000L::JSON; +>> 1000000000000 + +SELECT CAST(1e100::FLOAT AS JSON); +>> 1.0E100 + +SELECT CAST(1e100::DOUBLE AS JSON); +>> 1.0E100 + +SELECT CAST(1e100 AS JSON); +>> 1E100 + +SELECT CAST(TRUE AS JSON); +>> true + +SELECT CAST('true' FORMAT JSON AS JSON); +>> true + +SELECT CAST(FALSE AS JSON); +>> false + +SELECT CAST('false' FORMAT JSON AS JSON); +>> false + +SELECT CAST('null' FORMAT JSON AS JSON); +>> null + +SELECT CAST('10' FORMAT JSON AS VARBINARY); +>> X'3130' + +SELECT CAST('10' FORMAT JSON AS BLOB); +>> X'3130' + +CREATE TABLE TEST (ID INT, DATA JSON); +> ok + +INSERT INTO TEST VALUES +(1, '{"tag1":"simple string", "tag2": 333, "tag3":[1, 2, 3]}' format json), +(2, '{"tag1":"another string", "tag4":{"lvl1":"lvl2"}}' format json), +(3, '["string", 5555, {"arr":"yes"}]' format json), +(4, '{"1":"val1"}' format json); +> update count: 4 + +@reconnect + +SELECT ID, DATA FROM TEST; +> ID DATA +> -- -------------------------------------------------- +> 1 {"tag1":"simple string","tag2":333,"tag3":[1,2,3]} +> 2 {"tag1":"another string","tag4":{"lvl1":"lvl2"}} +> 3 ["string",5555,{"arr":"yes"}] +> 4 {"1":"val1"} +> rows: 4 + +INSERT INTO TEST VALUES (5, '}' FORMAT JSON); +> exception DATA_CONVERSION_ERROR_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, S VARCHAR, B VARBINARY, J JSON) AS VALUES + (1, '{"a":1,"a":2}', STRINGTOUTF8('{"a":1,"a":2}'), '{"a":1,"a":2}' FORMAT JSON), + (2, '{"a":1,"b":2}', STRINGTOUTF8('{"a":1,"b":2}'), '{"a":1,"b":2}' FORMAT JSON), + (3, '{"a":1,"b":2', STRINGTOUTF8('{"a":1,"b":2'), null), + (4, null, null, null); +> ok + +SELECT S IS JSON, B IS JSON WITHOUT UNIQUE, J IS JSON WITHOUT UNIQUE KEYS FROM TEST ORDER BY ID; +> S IS JSON B IS JSON J IS JSON +> --------- --------- --------- +> TRUE TRUE TRUE +> TRUE TRUE TRUE +> FALSE FALSE null +> null null null +> rows (ordered): 4 + +SELECT S IS NOT JSON, B IS NOT JSON WITHOUT UNIQUE, J IS NOT JSON WITHOUT UNIQUE KEYS FROM TEST ORDER BY ID; +> S IS NOT JSON B IS NOT JSON J IS NOT JSON +> ------------- ------------- ------------- +> FALSE FALSE FALSE +> FALSE FALSE FALSE +> TRUE TRUE null +> null null null +> rows (ordered): 4 + +SELECT S IS JSON WITH UNIQUE KEYS, B IS JSON WITH UNIQUE, J IS JSON WITH UNIQUE KEYS FROM TEST ORDER BY ID; +> S IS JSON WITH UNIQUE KEYS B IS JSON WITH UNIQUE KEYS J IS JSON WITH UNIQUE KEYS +> -------------------------- -------------------------- -------------------------- +> FALSE FALSE FALSE +> TRUE TRUE TRUE +> FALSE FALSE null +> null null null +> rows (ordered): 4 + +SELECT S IS NOT JSON WITH UNIQUE KEYS, B IS NOT JSON WITH UNIQUE, J IS NOT JSON WITH UNIQUE KEYS FROM TEST ORDER BY ID; +> S IS NOT JSON WITH UNIQUE KEYS B IS NOT JSON WITH UNIQUE KEYS J IS NOT JSON WITH UNIQUE KEYS +> ------------------------------ ------------------------------ ------------------------------ +> TRUE TRUE TRUE +> FALSE FALSE FALSE +> TRUE TRUE null +> null null null +> rows (ordered): 4 + +DROP TABLE TEST; +> ok + +SELECT 1 IS JSON; +>> FALSE + +SELECT 1 IS NOT JSON; +>> TRUE + +CREATE TABLE TEST(ID INT, S VARCHAR) AS VALUES + (1, '[{"a":1}]'), (2, '{"a":[3]}'), + (3, 'null'), (4, '{"a":1,"a":2}'), + (5, 'X'), (6, NULL); +> ok + +EXPLAIN SELECT S FORMAT JSON FORMAT JSON, (S FORMAT JSON) FORMAT JSON FROM TEST; +>> SELECT "S" FORMAT JSON, "S" FORMAT JSON FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +ALTER TABLE TEST ADD J JSON; +> ok + +UPDATE TEST SET J = S FORMAT JSON WHERE S IS JSON; +> update count: 4 + +SELECT S IS JSON, S IS JSON VALUE, S IS JSON ARRAY, S IS JSON OBJECT, S IS JSON SCALAR FROM TEST ORDER BY ID; +> S IS JSON S IS JSON S IS JSON ARRAY S IS JSON OBJECT S IS JSON SCALAR +> --------- --------- --------------- ---------------- ---------------- +> TRUE TRUE TRUE FALSE FALSE +> TRUE TRUE FALSE TRUE FALSE +> TRUE TRUE FALSE FALSE TRUE +> TRUE TRUE FALSE TRUE FALSE +> FALSE FALSE FALSE FALSE FALSE +> null null null null null +> rows (ordered): 6 + +SELECT J IS JSON, J IS JSON VALUE, J IS JSON ARRAY, J IS JSON OBJECT, J IS JSON SCALAR FROM TEST ORDER BY ID; +> J IS JSON J IS JSON J IS JSON ARRAY J IS JSON OBJECT J IS JSON SCALAR +> --------- --------- --------------- ---------------- ---------------- +> TRUE TRUE TRUE FALSE FALSE +> TRUE TRUE FALSE TRUE FALSE +> TRUE TRUE FALSE FALSE TRUE +> TRUE TRUE FALSE TRUE FALSE +> null null null null null +> null null null null null +> rows (ordered): 6 + +SELECT J IS JSON WITH UNIQUE KEYS, J IS JSON VALUE WITH UNIQUE KEYS, J IS JSON ARRAY WITH UNIQUE KEYS, + J IS JSON OBJECT WITH UNIQUE KEYS, J IS JSON SCALAR WITH UNIQUE KEYS FROM TEST ORDER BY ID; +> J IS JSON WITH UNIQUE KEYS J IS JSON WITH UNIQUE KEYS J IS JSON ARRAY WITH UNIQUE KEYS J IS JSON OBJECT WITH UNIQUE KEYS J IS JSON SCALAR WITH UNIQUE KEYS +> -------------------------- -------------------------- -------------------------------- --------------------------------- --------------------------------- +> TRUE TRUE TRUE FALSE FALSE +> TRUE TRUE FALSE TRUE FALSE +> TRUE TRUE FALSE FALSE TRUE +> FALSE FALSE FALSE FALSE FALSE +> null null null null null +> null null null null null +> rows (ordered): 6 + +SELECT S IS NOT JSON, S IS NOT JSON VALUE, S IS NOT JSON ARRAY, S IS NOT JSON OBJECT, S IS NOT JSON SCALAR + FROM TEST ORDER BY ID; +> S IS NOT JSON S IS NOT JSON S IS NOT JSON ARRAY S IS NOT JSON OBJECT S IS NOT JSON SCALAR +> ------------- ------------- ------------------- -------------------- -------------------- +> FALSE FALSE FALSE TRUE TRUE +> FALSE FALSE TRUE FALSE TRUE +> FALSE FALSE TRUE TRUE FALSE +> FALSE FALSE TRUE FALSE TRUE +> TRUE TRUE TRUE TRUE TRUE +> null null null null null +> rows (ordered): 6 + +SELECT NOT S IS NOT JSON, NOT S IS NOT JSON VALUE, NOT S IS NOT JSON ARRAY, NOT S IS NOT JSON OBJECT, + NOT S IS NOT JSON SCALAR FROM TEST ORDER BY ID; +> S IS JSON S IS JSON S IS JSON ARRAY S IS JSON OBJECT S IS JSON SCALAR +> --------- --------- --------------- ---------------- ---------------- +> TRUE TRUE TRUE FALSE FALSE +> TRUE TRUE FALSE TRUE FALSE +> TRUE TRUE FALSE FALSE TRUE +> TRUE TRUE FALSE TRUE FALSE +> FALSE FALSE FALSE FALSE FALSE +> null null null null null +> rows (ordered): 6 + +DROP TABLE TEST; +> ok + +SELECT NULL FORMAT JSON, (NULL FORMAT JSON) IS NULL; +> JSON 'null' FALSE +> ----------- ----- +> null FALSE +> rows: 1 + +CREATE MEMORY TABLE TEST(J JSON) AS VALUES ('["\u00A7''",{}]' FORMAT JSON); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ---------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "J" JSON ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (JSON '["\u00a7\u0027",{}]'); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok + +CREATE TABLE T(C JSON(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(J JSON(3)); +> ok + +INSERT INTO TEST VALUES JSON '[1]'; +> update count: 1 + +INSERT INTO TEST VALUES JSON 'null'; +> exception VALUE_TOO_LONG_2 + +DROP TABLE TEST; +> ok + +SELECT CAST(JSON 'null' AS JSON(3)); +> exception VALUE_TOO_LONG_2 + +CREATE TABLE TEST(J JSONB); +> exception UNKNOWN_DATA_TYPE_1 + +SET MODE PostgreSQL; +> ok + +CREATE TABLE TEST(J JSONB); +> ok + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok + +EXPLAIN SELECT A IS JSON AND B IS JSON FROM (VALUES (JSON 'null', 1)) T(A, B); +>> SELECT ("A" IS JSON) AND ("B" IS JSON) FROM (VALUES (JSON 'null', 1)) "T"("A", "B") /* table scan */ + +CREATE TABLE T1(A JSON(1048576)); +> ok + +CREATE TABLE T2(A JSON(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A JSON(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_OCTET_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_OCTET_LENGTH +> ---------- ---------------------- +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok + +SELECT JSON_OBJECT( + 'CHAR' : CAST('C' AS CHAR), + 'VARCHAR' : 'C', + 'CLOB' : CAST('C' AS CLOB), + 'IGNORECASE' : CAST('C' AS VARCHAR_IGNORECASE)); +>> {"CHAR":"C","VARCHAR":"C","CLOB":"C","IGNORECASE":"C"} + +SELECT JSON_OBJECT( + 'BINARY' : CAST(X'7b7d' AS BINARY(2)), + 'VARBINARY' : CAST(X'7b7d' AS VARBINARY), + 'BLOB' : CAST(X'7b7d' AS BLOB)); +>> {"BINARY":{},"VARBINARY":{},"BLOB":{}} + +SELECT CAST(TRUE AS JSON); +>> true + +SELECT JSON_OBJECT( + 'TINYINT' : CAST(1 AS TINYINT), + 'SMALLINT' : CAST(2 AS SMALLINT), + 'INTEGER' : 3, + 'BIGINT' : 4L, + 'NUMERIC' : 1.1, + 'REAL' : CAST(1.2 AS REAL), + 'DOUBLE' : CAST(1.3 AS DOUBLE), + 'DECFLOAT' : 1e-1); +>> {"TINYINT":1,"SMALLINT":2,"INTEGER":3,"BIGINT":4,"NUMERIC":1.1,"REAL":1.2,"DOUBLE":1.3,"DECFLOAT":0.1} + +SELECT JSON_OBJECT( + 'DATE' : DATE '2001-01-31', + 'TIME' : TIME '10:00:00.123456789', + 'TIME_TZ' : TIME WITH TIME ZONE '10:00:00.123456789+10:00'); +>> {"DATE":"2001-01-31","TIME":"10:00:00.123456789","TIME_TZ":"10:00:00.123456789+10"} + +SELECT JSON_OBJECT( + 'TIMESTAMP' : TIMESTAMP '2001-01-31 10:00:00.123456789', + 'TIMESTAMP_TZ' : TIMESTAMP WITH TIME ZONE '2001-01-31 10:00:00.123456789+10:00'); +>> {"TIMESTAMP":"2001-01-31T10:00:00.123456789","TIMESTAMP_TZ":"2001-01-31T10:00:00.123456789+10"} + +SELECT JSON_OBJECT( + 'GEOMETRY' : GEOMETRY 'POINT (1 2)', + 'JSON' : JSON '[]', + 'UUID' : UUID '01234567-89ab-cdef-fedc-ba9876543210'); +>> {"GEOMETRY":{"type":"Point","coordinates":[1,2]},"JSON":[],"UUID":"01234567-89ab-cdef-fedc-ba9876543210"} + +SELECT CAST(ARRAY[JSON '[]', JSON '{}'] AS JSON); +>> [[],{}] + +SELECT CAST(ARRAY[1, 2] AS JSON); +>> [1,2] diff --git a/h2/src/test/org/h2/test/scripts/datatypes/numeric.sql b/h2/src/test/org/h2/test/scripts/datatypes/numeric.sql new file mode 100644 index 0000000000..43536cefb0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/numeric.sql @@ -0,0 +1,188 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST( + N1 NUMERIC, N2 NUMERIC(10), N3 NUMERIC(10, 0), N4 NUMERIC(10, 2), + D1 DECIMAL, D2 DECIMAL(10), D3 DECIMAL(10, 0), D4 DECIMAL(10, 2), D5 DEC, + X1 NUMBER(10), X2 NUMBER(10, 2)); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ----------- --------- ----------------- ----------------------- ------------- ------------------ -------------------------- ---------------------- +> N1 NUMERIC 100000 10 0 NUMERIC null null +> N2 NUMERIC 10 10 0 NUMERIC 10 null +> N3 NUMERIC 10 10 0 NUMERIC 10 0 +> N4 NUMERIC 10 10 2 NUMERIC 10 2 +> D1 NUMERIC 100000 10 0 DECIMAL null null +> D2 NUMERIC 10 10 0 DECIMAL 10 null +> D3 NUMERIC 10 10 0 DECIMAL 10 0 +> D4 NUMERIC 10 10 2 DECIMAL 10 2 +> D5 NUMERIC 100000 10 0 DECIMAL null null +> X1 NUMERIC 10 10 0 NUMERIC 10 null +> X2 NUMERIC 10 10 2 NUMERIC 10 2 +> rows (ordered): 11 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(N NUMERIC(2, -1)); +> exception INVALID_VALUE_SCALE + +CREATE TABLE TEST(ID INT, X1 BIT, XT TINYINT, X_SM SMALLINT, XB BIGINT, XD DECIMAL(10,2), XD2 DOUBLE PRECISION, XR REAL); +> ok + +INSERT INTO TEST VALUES(?, ?, ?, ?, ?, ?, ?, ?); +{ +0,FALSE,0,0,0,0.0,0.0,0.0 +1,TRUE,1,1,1,1.0,1.0,1.0 +4,TRUE,4,4,4,4.0,4.0,4.0 +-1,FALSE,-1,-1,-1,-1.0,-1.0,-1.0 +NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL +}; +> update count: 5 + +SELECT ID, CAST(XT AS NUMBER(10,1)), +CAST(X_SM AS NUMBER(10,1)), CAST(XB AS NUMBER(10,1)), CAST(XD AS NUMBER(10,1)), +CAST(XD2 AS NUMBER(10,1)), CAST(XR AS NUMBER(10,1)) FROM TEST; +> ID CAST(XT AS NUMERIC(10, 1)) CAST(X_SM AS NUMERIC(10, 1)) CAST(XB AS NUMERIC(10, 1)) CAST(XD AS NUMERIC(10, 1)) CAST(XD2 AS NUMERIC(10, 1)) CAST(XR AS NUMERIC(10, 1)) +> ---- -------------------------- ---------------------------- -------------------------- -------------------------- --------------------------- -------------------------- +> -1 -1.0 -1.0 -1.0 -1.0 -1.0 -1.0 +> 0 0.0 0.0 0.0 0.0 0.0 0.0 +> 1 1.0 1.0 1.0 1.0 1.0 1.0 +> 4 4.0 4.0 4.0 4.0 4.0 4.0 +> null null null null null null null +> rows: 5 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I NUMERIC(-1)); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(I NUMERIC(-1, -1)); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST (N NUMERIC(3, 1)) AS VALUES (0), (0.0), (NULL); +> ok + +SELECT * FROM TEST; +> N +> ---- +> 0.0 +> 0.0 +> null +> rows: 3 + +DROP TABLE TEST; +> ok + +SELECT CAST(10000 AS NUMERIC(5)); +>> 10000 + +CREATE DOMAIN N AS NUMERIC(10, 1); +> ok + +CREATE TABLE TEST(V N); +> ok + +SELECT NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'V'; +>> 1 + +DROP TABLE TEST; +> ok + +DROP DOMAIN N; +> ok + +CREATE TABLE TEST(I INT PRIMARY KEY, V NUMERIC(1, 3)); +> ok + +INSERT INTO TEST VALUES (1, 1e-3), (2, 1.1e-3), (3, 1e-4); +> update count: 3 + +INSERT INTO TEST VALUES (4, 1e-2); +> exception VALUE_TOO_LONG_2 + +TABLE TEST; +> I V +> - ----- +> 1 0.001 +> 2 0.001 +> 3 0.000 +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I INT PRIMARY KEY, V NUMERIC(2)); +> ok + +INSERT INTO TEST VALUES (1, 1e-1), (2, 2e0), (3, 3e1); +> update count: 3 + +TABLE TEST; +> I V +> - -- +> 1 0 +> 2 2 +> 3 30 +> rows: 3 + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES (CAST(-9223372036854775808 AS NUMERIC(19)), CAST(9223372036854775807 AS NUMERIC(19)), 1.0, -9223372036854775809, + 9223372036854775808); +>> VALUES (CAST(-9223372036854775808 AS NUMERIC(19)), CAST(9223372036854775807 AS NUMERIC(19)), 1.0, -9223372036854775809, 9223372036854775808) + +CREATE TABLE T(C NUMERIC(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T1(A NUMERIC(100000)); +> ok + +CREATE TABLE T2(A NUMERIC(100001)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A NUMERIC(100001)); +> ok + +SELECT TABLE_NAME, NUMERIC_PRECISION, DECLARED_NUMERIC_PRECISION FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME NUMERIC_PRECISION DECLARED_NUMERIC_PRECISION +> ---------- ----------------- -------------------------- +> T1 100000 100000 +> T2 100000 100000 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok + +SET MODE Oracle; +> ok + +CREATE TABLE TEST(N NUMERIC(2, 1)); +> ok + +INSERT INTO TEST VALUES 20; +> exception VALUE_TOO_LONG_2 + +INSERT INTO TEST VALUES CAST(20 AS NUMERIC(2)); +> exception VALUE_TOO_LONG_2 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/real.sql b/h2/src/test/org/h2/test/scripts/datatypes/real.sql new file mode 100644 index 0000000000..d3e350eb0c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/real.sql @@ -0,0 +1,247 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST(D1 REAL, D2 FLOAT4, D3 FLOAT(1), D4 FLOAT(24)); +> ok + +ALTER TABLE TEST ADD COLUMN D5 FLOAT(0); +> exception INVALID_VALUE_PRECISION + +ALTER TABLE TEST ADD COLUMN D5 FLOAT(-1); +> exception INVALID_VALUE_2 + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ----------- --------- ----------------- ----------------------- ------------- ------------------ -------------------------- ---------------------- +> D1 REAL 24 2 null REAL null null +> D2 REAL 24 2 null REAL null null +> D3 REAL 24 2 null FLOAT 1 null +> D4 REAL 24 2 null FLOAT 24 null +> rows (ordered): 4 + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D1" REAL, "D2" REAL, "D3" FLOAT(1), "D4" FLOAT(24) ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES CAST(0 AS REAL); +>> VALUES (CAST(0.0 AS REAL)) + +CREATE TABLE TEST(F REAL, I INT) AS VALUES (2000000000, 2000000001); +> ok + +SELECT F, I, F = I FROM TEST; +> F I F = I +> ----- ---------- ----- +> 2.0E9 2000000001 FALSE +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(D REAL) AS VALUES '-Infinity', '-1', '0', '1', '1.5', 'Infinity', 'NaN'; +> ok + +SELECT D, -D, SIGN(D) FROM TEST ORDER BY D; +> D - D SIGN(D) +> --------- --------- ------- +> -Infinity Infinity -1 +> -1.0 1.0 -1 +> 0.0 0.0 0 +> 1.0 -1.0 1 +> 1.5 -1.5 1 +> Infinity -Infinity 1 +> NaN NaN 0 +> rows (ordered): 7 + +SELECT A.D, B.D, A.D + B.D, A.D - B.D, A.D * B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D + B.D A.D - B.D A.D * B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity -Infinity NaN Infinity +> -Infinity -1.0 -Infinity -Infinity Infinity +> -Infinity 0.0 -Infinity -Infinity NaN +> -Infinity 1.0 -Infinity -Infinity -Infinity +> -Infinity 1.5 -Infinity -Infinity -Infinity +> -Infinity Infinity NaN -Infinity -Infinity +> -Infinity NaN NaN NaN NaN +> -1.0 -Infinity -Infinity Infinity Infinity +> -1.0 -1.0 -2.0 0.0 1.0 +> -1.0 0.0 -1.0 -1.0 0.0 +> -1.0 1.0 0.0 -2.0 -1.0 +> -1.0 1.5 0.5 -2.5 -1.5 +> -1.0 Infinity Infinity -Infinity -Infinity +> -1.0 NaN NaN NaN NaN +> 0.0 -Infinity -Infinity Infinity NaN +> 0.0 -1.0 -1.0 1.0 0.0 +> 0.0 0.0 0.0 0.0 0.0 +> 0.0 1.0 1.0 -1.0 0.0 +> 0.0 1.5 1.5 -1.5 0.0 +> 0.0 Infinity Infinity -Infinity NaN +> 0.0 NaN NaN NaN NaN +> 1.0 -Infinity -Infinity Infinity -Infinity +> 1.0 -1.0 0.0 2.0 -1.0 +> 1.0 0.0 1.0 1.0 0.0 +> 1.0 1.0 2.0 0.0 1.0 +> 1.0 1.5 2.5 -0.5 1.5 +> 1.0 Infinity Infinity -Infinity Infinity +> 1.0 NaN NaN NaN NaN +> 1.5 -Infinity -Infinity Infinity -Infinity +> 1.5 -1.0 0.5 2.5 -1.5 +> 1.5 0.0 1.5 1.5 0.0 +> 1.5 1.0 2.5 0.5 1.5 +> 1.5 1.5 3.0 0.0 2.25 +> 1.5 Infinity Infinity -Infinity Infinity +> 1.5 NaN NaN NaN NaN +> Infinity -Infinity NaN Infinity -Infinity +> Infinity -1.0 Infinity Infinity -Infinity +> Infinity 0.0 Infinity Infinity NaN +> Infinity 1.0 Infinity Infinity Infinity +> Infinity 1.5 Infinity Infinity Infinity +> Infinity Infinity Infinity NaN Infinity +> Infinity NaN NaN NaN NaN +> NaN -Infinity NaN NaN NaN +> NaN -1.0 NaN NaN NaN +> NaN 0.0 NaN NaN NaN +> NaN 1.0 NaN NaN NaN +> NaN 1.5 NaN NaN NaN +> NaN Infinity NaN NaN NaN +> NaN NaN NaN NaN NaN +> rows (ordered): 49 + +SELECT A.D, B.D, A.D / B.D, MOD(A.D, B.D) FROM TEST A JOIN TEST B WHERE B.D <> 0 ORDER BY A.D, B.D; +> D D A.D / B.D MOD(A.D, B.D) +> --------- --------- ---------- ------------- +> -Infinity -Infinity NaN NaN +> -Infinity -1.0 Infinity NaN +> -Infinity 1.0 -Infinity NaN +> -Infinity 1.5 -Infinity NaN +> -Infinity Infinity NaN NaN +> -Infinity NaN NaN NaN +> -1.0 -Infinity 0.0 -1.0 +> -1.0 -1.0 1.0 0.0 +> -1.0 1.0 -1.0 0.0 +> -1.0 1.5 -0.6666667 -1.0 +> -1.0 Infinity 0.0 -1.0 +> -1.0 NaN NaN NaN +> 0.0 -Infinity 0.0 0.0 +> 0.0 -1.0 0.0 0.0 +> 0.0 1.0 0.0 0.0 +> 0.0 1.5 0.0 0.0 +> 0.0 Infinity 0.0 0.0 +> 0.0 NaN NaN NaN +> 1.0 -Infinity 0.0 1.0 +> 1.0 -1.0 -1.0 0.0 +> 1.0 1.0 1.0 0.0 +> 1.0 1.5 0.6666667 1.0 +> 1.0 Infinity 0.0 1.0 +> 1.0 NaN NaN NaN +> 1.5 -Infinity 0.0 1.5 +> 1.5 -1.0 -1.5 0.5 +> 1.5 1.0 1.5 0.5 +> 1.5 1.5 1.0 0.0 +> 1.5 Infinity 0.0 1.5 +> 1.5 NaN NaN NaN +> Infinity -Infinity NaN NaN +> Infinity -1.0 -Infinity NaN +> Infinity 1.0 Infinity NaN +> Infinity 1.5 Infinity NaN +> Infinity Infinity NaN NaN +> Infinity NaN NaN NaN +> NaN -Infinity NaN NaN +> NaN -1.0 NaN NaN +> NaN 1.0 NaN NaN +> NaN 1.5 NaN NaN +> NaN Infinity NaN NaN +> NaN NaN NaN NaN +> rows (ordered): 42 + +SELECT A.D, B.D, A.D > B.D, A.D = B.D, A.D < B.D FROM TEST A JOIN TEST B ORDER BY A.D, B.D; +> D D A.D > B.D A.D = B.D A.D < B.D +> --------- --------- --------- --------- --------- +> -Infinity -Infinity FALSE TRUE FALSE +> -Infinity -1.0 FALSE FALSE TRUE +> -Infinity 0.0 FALSE FALSE TRUE +> -Infinity 1.0 FALSE FALSE TRUE +> -Infinity 1.5 FALSE FALSE TRUE +> -Infinity Infinity FALSE FALSE TRUE +> -Infinity NaN FALSE FALSE TRUE +> -1.0 -Infinity TRUE FALSE FALSE +> -1.0 -1.0 FALSE TRUE FALSE +> -1.0 0.0 FALSE FALSE TRUE +> -1.0 1.0 FALSE FALSE TRUE +> -1.0 1.5 FALSE FALSE TRUE +> -1.0 Infinity FALSE FALSE TRUE +> -1.0 NaN FALSE FALSE TRUE +> 0.0 -Infinity TRUE FALSE FALSE +> 0.0 -1.0 TRUE FALSE FALSE +> 0.0 0.0 FALSE TRUE FALSE +> 0.0 1.0 FALSE FALSE TRUE +> 0.0 1.5 FALSE FALSE TRUE +> 0.0 Infinity FALSE FALSE TRUE +> 0.0 NaN FALSE FALSE TRUE +> 1.0 -Infinity TRUE FALSE FALSE +> 1.0 -1.0 TRUE FALSE FALSE +> 1.0 0.0 TRUE FALSE FALSE +> 1.0 1.0 FALSE TRUE FALSE +> 1.0 1.5 FALSE FALSE TRUE +> 1.0 Infinity FALSE FALSE TRUE +> 1.0 NaN FALSE FALSE TRUE +> 1.5 -Infinity TRUE FALSE FALSE +> 1.5 -1.0 TRUE FALSE FALSE +> 1.5 0.0 TRUE FALSE FALSE +> 1.5 1.0 TRUE FALSE FALSE +> 1.5 1.5 FALSE TRUE FALSE +> 1.5 Infinity FALSE FALSE TRUE +> 1.5 NaN FALSE FALSE TRUE +> Infinity -Infinity TRUE FALSE FALSE +> Infinity -1.0 TRUE FALSE FALSE +> Infinity 0.0 TRUE FALSE FALSE +> Infinity 1.0 TRUE FALSE FALSE +> Infinity 1.5 TRUE FALSE FALSE +> Infinity Infinity FALSE TRUE FALSE +> Infinity NaN FALSE FALSE TRUE +> NaN -Infinity TRUE FALSE FALSE +> NaN -1.0 TRUE FALSE FALSE +> NaN 0.0 TRUE FALSE FALSE +> NaN 1.0 TRUE FALSE FALSE +> NaN 1.5 TRUE FALSE FALSE +> NaN Infinity TRUE FALSE FALSE +> NaN NaN FALSE TRUE FALSE +> rows (ordered): 49 + +SELECT D, CAST(D AS DOUBLE PRECISION) D1, CAST(D AS DECFLOAT) D2 FROM TEST ORDER BY D; +> D D1 D2 +> --------- --------- --------- +> -Infinity -Infinity -Infinity +> -1.0 -1.0 -1 +> 0.0 0.0 0 +> 1.0 1.0 1 +> 1.5 1.5 1.5 +> Infinity Infinity Infinity +> NaN NaN NaN +> rows (ordered): 7 + +EXPLAIN SELECT CAST('Infinity' AS REAL), CAST('-Infinity' AS REAL), CAST('NaN' AS REAL), CAST(0 AS REAL); +>> SELECT CAST('Infinity' AS REAL), CAST('-Infinity' AS REAL), CAST('NaN' AS REAL), CAST(0.0 AS REAL) + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D" REAL ); +> -- 7 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES ('-Infinity'), (-1.0), (0.0), (1.0), (1.5), ('Infinity'), ('NaN'); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/row.sql b/h2/src/test/org/h2/test/scripts/datatypes/row.sql new file mode 100644 index 0000000000..d1bd2443ee --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/row.sql @@ -0,0 +1,220 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT (); +>> ROW () + +SELECT (1,); +> exception SYNTAX_ERROR_2 + +SELECT ROW (); +>> ROW () + +SELECT ROW (1,); +> exception SYNTAX_ERROR_2 + +SELECT ROW (10); +>> ROW (10) + +SELECT (10, 20, 30); +>> ROW (10, 20, 30) + +SELECT (1, NULL) IS NOT DISTINCT FROM (1, NULL); +>> TRUE + +SELECT (1, NULL) IS DISTINCT FROM ROW (1, NULL); +>> FALSE + +SELECT (1, NULL) = (1, NULL); +>> null + +SELECT (1, NULL) <> (1, NULL); +>> null + +SELECT ROW (NULL) = (NULL, NULL); +> exception COLUMN_COUNT_DOES_NOT_MATCH + +select (1, NULL, 2) = (1, NULL, 1); +>> FALSE + +select (1, NULL, 2) <> (1, NULL, 1); +>> TRUE + +SELECT (1, NULL) > (1, NULL); +>> null + +SELECT (1, 2) > (1, NULL); +>> null + +SELECT (1, 2, NULL) > (1, 1, NULL); +>> TRUE + +SELECT (1, 1, NULL) > (1, 2, NULL); +>> FALSE + +SELECT (1, 2, NULL) < (1, 1, NULL); +>> FALSE + +SELECT (1, 1, NULL) <= (1, 1, NULL); +>> null + +SELECT (1, 2) IN (SELECT 1, 2); +>> TRUE + +SELECT (1, 2) IN (SELECT * FROM VALUES (1, 2), (1, NULL)); +>> TRUE + +SELECT (1, 2) IN (SELECT * FROM VALUES (1, 1), (1, NULL)); +>> null + +SELECT (1, 2) IN (SELECT * FROM VALUES (1, 1), (1, 3)); +>> FALSE + +SELECT (1, NULL) IN (SELECT 1, NULL); +>> null + +SELECT (1, ARRAY[1]) IN (SELECT 1, ARRAY[1]); +>> TRUE + +SELECT (1, ARRAY[1]) IN (SELECT 1, ARRAY[2]); +>> FALSE + +SELECT (1, ARRAY[NULL]) IN (SELECT 1, ARRAY[NULL]); +>> null + +CREATE TABLE TEST (R ROW(A INT, B VARCHAR)); +> ok + +INSERT INTO TEST VALUES ((1, 2)); +> update count: 1 + +INSERT INTO TEST VALUES ((1, X'3341')); +> update count: 1 + +TABLE TEST; +> R +> ----------- +> ROW (1, 2) +> ROW (1, 3A) +> rows: 2 + +DROP TABLE TEST; +> ok + +SELECT CAST((1, 2.1) AS ROW(A INT, B INT)); +>> ROW (1, 2) + +SELECT CAST((1, 2.1) AS ROW(A INT, B INT, C INT)); +> exception DATA_CONVERSION_ERROR_1 + +SELECT CAST(1 AS ROW(V INT)); +>> ROW (1) + +SELECT CAST((1, 2) AS ROW(A INT, A INT)); +> exception DUPLICATE_COLUMN_NAME_1 + +CREATE DOMAIN D1 AS ROW(A INT); +> ok + +CREATE DOMAIN D2 AS BIGINT ARRAY; +> ok + +CREATE TABLE TEST(A ROW(A INT, B INT ARRAY[1]) ARRAY, B BIGINT ARRAY[2] ARRAY[3], C ROW(V BIGINT, A INT ARRAY), + D D1, E D2); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, DOMAIN_NAME, MAXIMUM_CARDINALITY, DTD_IDENTIFIER FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME DATA_TYPE DOMAIN_NAME MAXIMUM_CARDINALITY DTD_IDENTIFIER +> ----------- --------- ----------- ------------------- -------------- +> A ARRAY null 65536 1 +> B ARRAY null 3 2 +> C ROW null null 3 +> D ROW D1 null 4 +> E ARRAY D2 65536 5 +> rows: 5 + +SELECT OBJECT_NAME, OBJECT_TYPE, COLLECTION_TYPE_IDENTIFIER, DATA_TYPE, MAXIMUM_CARDINALITY, DTD_IDENTIFIER + FROM INFORMATION_SCHEMA.ELEMENT_TYPES; +> OBJECT_NAME OBJECT_TYPE COLLECTION_TYPE_IDENTIFIER DATA_TYPE MAXIMUM_CARDINALITY DTD_IDENTIFIER +> ----------- ----------- -------------------------- --------- ------------------- -------------- +> D2 DOMAIN TYPE BIGINT null TYPE_ +> TEST TABLE 1 ROW null 1_ +> TEST TABLE 1__2 INTEGER null 1__2_ +> TEST TABLE 2 ARRAY 2 2_ +> TEST TABLE 2_ BIGINT null 2__ +> TEST TABLE 3_2 INTEGER null 3_2_ +> TEST TABLE 5 BIGINT null 5_ +> rows: 7 + +SELECT OBJECT_NAME, OBJECT_TYPE, ROW_IDENTIFIER, FIELD_NAME, ORDINAL_POSITION, DATA_TYPE, MAXIMUM_CARDINALITY, + DTD_IDENTIFIER + FROM INFORMATION_SCHEMA.FIELDS; +> OBJECT_NAME OBJECT_TYPE ROW_IDENTIFIER FIELD_NAME ORDINAL_POSITION DATA_TYPE MAXIMUM_CARDINALITY DTD_IDENTIFIER +> ----------- ----------- -------------- ---------- ---------------- --------- ------------------- -------------- +> D1 DOMAIN TYPE A 1 INTEGER null TYPE_1 +> TEST TABLE 1_ A 1 INTEGER null 1__1 +> TEST TABLE 1_ B 2 ARRAY 1 1__2 +> TEST TABLE 3 A 2 ARRAY 65536 3_2 +> TEST TABLE 3 V 1 BIGINT null 3_1 +> TEST TABLE 4 A 1 INTEGER null 4_1 +> rows: 6 + +DROP TABLE TEST; +> ok + +DROP DOMAIN D1; +> ok + +DROP DOMAIN D2; +> ok + +@reconnect off + +CREATE LOCAL TEMPORARY TABLE TEST AS (SELECT ROW(1, 2) R); +> ok + +CREATE INDEX IDX ON TEST(R); +> ok + +DROP TABLE TEST; +> ok + +CREATE LOCAL TEMPORARY TABLE TEST(R ROW(C CLOB)); +> ok + +CREATE INDEX IDX ON TEST(R); +> exception FEATURE_NOT_SUPPORTED_1 + +DROP TABLE TEST; +> ok + +@reconnect on + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT (' || (SELECT LISTAGG('1') FROM SYSTEM_RANGE(1, 16384)) || ')'; +> ok + +DROP TABLE TEST; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT (' || (SELECT LISTAGG('1') FROM SYSTEM_RANGE(1, 16385)) || ')'; +> exception TOO_MANY_COLUMNS_1 + +EXECUTE IMMEDIATE 'CREATE TABLE TEST(R ROW(' || (SELECT LISTAGG('C' || X || ' INTEGER') FROM SYSTEM_RANGE(1, 16384)) || '))'; +> ok + +DROP TABLE TEST; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST(R ROW(' || (SELECT LISTAGG('C' || X || ' INTEGER') FROM SYSTEM_RANGE(1, 16385)) || '))'; +> exception TOO_MANY_COLUMNS_1 + +-- The next tests should be at the of this file + +SET MAX_MEMORY_ROWS = 2; +> ok + +SELECT (X, X) FROM SYSTEM_RANGE(1, 100000) ORDER BY -X FETCH FIRST ROW ONLY; +>> ROW (100000, 100000) diff --git a/h2/src/test/org/h2/test/scripts/datatypes/smallint.sql b/h2/src/test/org/h2/test/scripts/datatypes/smallint.sql new file mode 100644 index 0000000000..53362fef48 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/smallint.sql @@ -0,0 +1,30 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Division + +SELECT CAST(1 AS SMALLINT) / CAST(0 AS SMALLINT); +> exception DIVISION_BY_ZERO_1 + +SELECT CAST(-32768 AS SMALLINT) / CAST(1 AS SMALLINT); +>> -32768 + +SELECT CAST(-32768 AS SMALLINT) / CAST(-1 AS SMALLINT); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +EXPLAIN VALUES CAST(1 AS SMALLINT); +>> VALUES (CAST(1 AS SMALLINT)) + +EXPLAIN VALUES CAST(1 AS YEAR); +> exception UNKNOWN_DATA_TYPE_1 + +SET MODE MySQL; +> ok + +EXPLAIN VALUES CAST(1 AS YEAR); +>> VALUES (CAST(1 AS SMALLINT)) + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/time-with-time-zone.sql b/h2/src/test/org/h2/test/scripts/datatypes/time-with-time-zone.sql new file mode 100644 index 0000000000..b400394075 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/time-with-time-zone.sql @@ -0,0 +1,98 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(T1 TIME WITH TIME ZONE, T2 TIME WITH TIME ZONE); +> ok + +INSERT INTO TEST(T1, T2) VALUES (TIME WITH TIME ZONE '10:00:00+01', TIME WITH TIME ZONE '11:00:00+02'); +> update count: 1 + +SELECT T1, T2, T1 = T2 FROM TEST; +> T1 T2 T1 = T2 +> ----------- ----------- ------- +> 10:00:00+01 11:00:00+02 TRUE +> rows: 1 + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- ------------------- +> T1 TIME WITH TIME ZONE +> T2 TIME WITH TIME ZONE +> rows (ordered): 2 + +ALTER TABLE TEST ADD (T3 TIME(0), T4 TIME(9) WITHOUT TIME ZONE); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE DATETIME_PRECISION +> ----------- ------------------- ------------------ +> T1 TIME WITH TIME ZONE 0 +> T2 TIME WITH TIME ZONE 0 +> T3 TIME 0 +> T4 TIME 9 +> rows (ordered): 4 + +ALTER TABLE TEST ADD T5 TIME(10); +> exception INVALID_VALUE_SCALE + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(T TIME WITH TIME ZONE, T0 TIME(0) WITH TIME ZONE, T1 TIME(1) WITH TIME ZONE, + T2 TIME(2) WITH TIME ZONE, T3 TIME(3) WITH TIME ZONE, T4 TIME(4) WITH TIME ZONE, T5 TIME(5) WITH TIME ZONE, + T6 TIME(6) WITH TIME ZONE, T7 TIME(7) WITH TIME ZONE, T8 TIME(8) WITH TIME ZONE, T9 TIME(9) WITH TIME ZONE); +> ok + +INSERT INTO TEST VALUES ('08:00:00.123456789-01', '08:00:00.123456789Z', '08:00:00.123456789+01:02:03', + '08:00:00.123456789-3:00', '08:00:00.123456789+4:30', '08:00:00.123456789Z', '08:00:00.123456789Z', + '08:00:00.123456789Z', '08:00:00.123456789Z', '08:00:00.123456789Z', '08:00:00.123456789Z'); +> update count: 1 + +SELECT * FROM TEST; +> T T0 T1 T2 T3 T4 T5 T6 T7 T8 T9 +> ----------- ----------- ------------------- -------------- ------------------ ---------------- ----------------- ------------------ ------------------- -------------------- --------------------- +> 08:00:00-01 08:00:00+00 08:00:00.1+01:02:03 08:00:00.12-03 08:00:00.123+04:30 08:00:00.1235+00 08:00:00.12346+00 08:00:00.123457+00 08:00:00.1234568+00 08:00:00.12345679+00 08:00:00.123456789+00 +> rows: 1 + +DELETE FROM TEST; +> update count: 1 + +INSERT INTO TEST(T0, T8) VALUES ('23:59:59.999999999Z', '23:59:59.999999999Z'); +> update count: 1 + +SELECT T0 FROM TEST; +>> 23:59:59+00 + +SELECT T8 FROM TEST; +>> 23:59:59.99999999+00 + +DROP TABLE TEST; +> ok + +SELECT TIME WITH TIME ZONE '11:22:33'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT TIME WITH TIME ZONE '11:22:33 Europe/London'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT CAST (TIMESTAMP WITH TIME ZONE '1000000000-12-31 11:22:33.123456789+02' AS TIME WITH TIME ZONE); +>> 11:22:33+02 + +SELECT CAST (TIMESTAMP WITH TIME ZONE '1000000000-12-31 11:22:33.123456789+02' AS TIME(9) WITH TIME ZONE); +>> 11:22:33.123456789+02 + +SELECT CAST (TIMESTAMP WITH TIME ZONE '-1000000000-12-31 11:22:33.123456789+02' AS TIME(9) WITH TIME ZONE); +>> 11:22:33.123456789+02 + +SELECT CAST (TIME WITH TIME ZONE '10:00:00Z' AS DATE); +> exception DATA_CONVERSION_ERROR_1 + +SELECT TIME WITH TIME ZONE '23:00:00+01' - TIME WITH TIME ZONE '00:00:30-01'; +>> INTERVAL '20:59:30' HOUR TO SECOND + +SELECT TIME WITH TIME ZONE '10:00:00-10' + INTERVAL '30' MINUTE; +>> 10:30:00-10 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/time.sql b/h2/src/test/org/h2/test/scripts/datatypes/time.sql new file mode 100644 index 0000000000..a51b23425c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/time.sql @@ -0,0 +1,128 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(T1 TIME, T2 TIME WITHOUT TIME ZONE); +> ok + +INSERT INTO TEST(T1, T2) VALUES (TIME '10:00:00', TIME WITHOUT TIME ZONE '10:00:00'); +> update count: 1 + +SELECT T1, T2, T1 = T2 FROM TEST; +> T1 T2 T1 = T2 +> -------- -------- ------- +> 10:00:00 10:00:00 TRUE +> rows: 1 + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- --------- +> T1 TIME +> T2 TIME +> rows (ordered): 2 + +ALTER TABLE TEST ADD (T3 TIME(0), T4 TIME(9) WITHOUT TIME ZONE); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE DATETIME_PRECISION +> ----------- --------- ------------------ +> T1 TIME 0 +> T2 TIME 0 +> T3 TIME 0 +> T4 TIME 9 +> rows (ordered): 4 + +ALTER TABLE TEST ADD T5 TIME(10); +> exception INVALID_VALUE_SCALE + +DROP TABLE TEST; +> ok + +-- Check that TIME is allowed as a column name +CREATE TABLE TEST(TIME TIME); +> ok + +INSERT INTO TEST VALUES (TIME '08:00:00'); +> update count: 1 + +SELECT TIME FROM TEST; +>> 08:00:00 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(T TIME, T0 TIME(0), T1 TIME(1), T2 TIME(2), T3 TIME(3), T4 TIME(4), T5 TIME(5), T6 TIME(6), + T7 TIME(7), T8 TIME(8), T9 TIME(9)); +> ok + +INSERT INTO TEST VALUES ('08:00:00.123456789', '08:00:00.123456789', '08:00:00.123456789', '08:00:00.123456789', + '08:00:00.123456789', '08:00:00.123456789', '08:00:00.123456789', '08:00:00.123456789', '08:00:00.123456789', + '08:00:00.123456789', '08:00:00.123456789'); +> update count: 1 + +SELECT * FROM TEST; +> T T0 T1 T2 T3 T4 T5 T6 T7 T8 T9 +> -------- -------- ---------- ----------- ------------ ------------- -------------- --------------- ---------------- ----------------- ------------------ +> 08:00:00 08:00:00 08:00:00.1 08:00:00.12 08:00:00.123 08:00:00.1235 08:00:00.12346 08:00:00.123457 08:00:00.1234568 08:00:00.12345679 08:00:00.123456789 +> rows: 1 + +DELETE FROM TEST; +> update count: 1 + +INSERT INTO TEST(T0, T8) VALUES ('23:59:59.999999999', '23:59:59.999999999'); +> update count: 1 + +SELECT T0 FROM TEST; +>> 23:59:59 + +SELECT T8 FROM TEST; +>> 23:59:59.99999999 + +DROP TABLE TEST; +> ok + +SELECT TIME '11:22:33'; +>> 11:22:33 + +SELECT TIME '11:22'; +>> 11:22:00 + +SELECT TIME '112233'; +>> 11:22:33 + +SELECT TIME '1122'; +>> 11:22:00 + +SELECT TIME '12233'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT TIME '122'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT TIME '11:22:33.1'; +>> 11:22:33.1 + +SELECT TIME '112233.1'; +>> 11:22:33.1 + +SELECT TIME '12233.1'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT TIME '1122.1'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT CAST (TIMESTAMP '1000000000-12-31 11:22:33.123456789' AS TIME); +>> 11:22:33 + +SELECT CAST (TIMESTAMP '1000000000-12-31 11:22:33.123456789' AS TIME(9)); +>> 11:22:33.123456789 + +SELECT CAST (TIMESTAMP '-1000000000-12-31 11:22:33.123456789' AS TIME(9)); +>> 11:22:33.123456789 + +SELECT CAST (TIME '10:00:00' AS DATE); +> exception DATA_CONVERSION_ERROR_1 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/timestamp-with-time-zone.sql b/h2/src/test/org/h2/test/scripts/datatypes/timestamp-with-time-zone.sql new file mode 100644 index 0000000000..290d975fe9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/timestamp-with-time-zone.sql @@ -0,0 +1,138 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE tab_with_timezone(x TIMESTAMP WITH TIME ZONE); +> ok + +INSERT INTO tab_with_timezone(x) VALUES ('2017-01-01'); +> update count: 1 + +SELECT CAST("Query".X AS TIMESTAMP) FROM (select * from tab_with_timezone where x > '2016-01-01') AS "Query"; +>> 2017-01-01 00:00:00 + +DELETE FROM tab_with_timezone; +> update count: 1 + +INSERT INTO tab_with_timezone VALUES ('2018-03-25 01:59:00 Europe/Berlin'), ('2018-03-25 03:00:00 Europe/Berlin'); +> update count: 2 + +SELECT * FROM tab_with_timezone ORDER BY X; +> X +> ---------------------- +> 2018-03-25 01:59:00+01 +> 2018-03-25 03:00:00+02 +> rows (ordered): 2 + +SELECT TIMESTAMP WITH TIME ZONE '2000-01-10 00:00:00 -02' AS A, + TIMESTAMP WITH TIME ZONE '2000-01-10 00:00:00.000000000 +02:00' AS B, + TIMESTAMP WITH TIME ZONE '2000-01-10 00:00:00.000000000+02:00' AS C, + TIMESTAMP WITH TIME ZONE '2000-01-10T00:00:00.000000000+09:00[Asia/Tokyo]' AS D; +> A B C D +> ---------------------- ---------------------- ---------------------- ---------------------- +> 2000-01-10 00:00:00-02 2000-01-10 00:00:00+02 2000-01-10 00:00:00+02 2000-01-10 00:00:00+09 +> rows: 1 + +CREATE TABLE TEST(T1 TIMESTAMP WITH TIME ZONE, T2 TIMESTAMP(0) WITH TIME ZONE, T3 TIMESTAMP(9) WITH TIME ZONE); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE DATETIME_PRECISION +> ----------- ------------------------ ------------------ +> T1 TIMESTAMP WITH TIME ZONE 6 +> T2 TIMESTAMP WITH TIME ZONE 0 +> T3 TIMESTAMP WITH TIME ZONE 9 +> rows (ordered): 3 + +ALTER TABLE TEST ADD T4 TIMESTAMP (10) WITH TIME ZONE; +> exception INVALID_VALUE_SCALE + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(T TIMESTAMP WITH TIME ZONE, T0 TIMESTAMP(0) WITH TIME ZONE, T1 TIMESTAMP(1) WITH TIME ZONE, + T2 TIMESTAMP(2) WITH TIME ZONE, T3 TIMESTAMP(3) WITH TIME ZONE, T4 TIMESTAMP(4) WITH TIME ZONE, + T5 TIMESTAMP(5) WITH TIME ZONE, T6 TIMESTAMP(6) WITH TIME ZONE, T7 TIMESTAMP(7) WITH TIME ZONE, + T8 TIMESTAMP(8) WITH TIME ZONE, T9 TIMESTAMP(9) WITH TIME ZONE); +> ok + +INSERT INTO TEST VALUES ('2000-01-01 08:00:00.123456789Z', '2000-01-01 08:00:00.123456789Z', + '2000-01-01 08:00:00.123456789Z', '2000-01-01 08:00:00.123456789Z', '2000-01-01 08:00:00.123456789Z', + '2000-01-01 08:00:00.123456789Z', '2000-01-01 08:00:00.123456789Z', '2000-01-01 08:00:00.123456789Z', + '2000-01-01 08:00:00.123456789Z', '2000-01-01 08:00:00.123456789Z', '2000-01-01 08:00:00.123456789Z'); +> update count: 1 + +SELECT T, T0, T1, T2 FROM TEST; +> T T0 T1 T2 +> ----------------------------- ---------------------- ------------------------ ------------------------- +> 2000-01-01 08:00:00.123457+00 2000-01-01 08:00:00+00 2000-01-01 08:00:00.1+00 2000-01-01 08:00:00.12+00 +> rows: 1 + +SELECT T3, T4, T5, T6 FROM TEST; +> T3 T4 T5 T6 +> -------------------------- --------------------------- ---------------------------- ----------------------------- +> 2000-01-01 08:00:00.123+00 2000-01-01 08:00:00.1235+00 2000-01-01 08:00:00.12346+00 2000-01-01 08:00:00.123457+00 +> rows: 1 + +SELECT T7, T8, T9 FROM TEST; +> T7 T8 T9 +> ------------------------------ ------------------------------- -------------------------------- +> 2000-01-01 08:00:00.1234568+00 2000-01-01 08:00:00.12345679+00 2000-01-01 08:00:00.123456789+00 +> rows: 1 + +DELETE FROM TEST; +> update count: 1 + +INSERT INTO TEST(T0) VALUES ('2000-01-01 23:59:59.999999999Z'); +> update count: 1 + +SELECT T0 FROM TEST; +>> 2000-01-02 00:00:00+00 + +DROP TABLE TEST; +> ok + +SELECT (LOCALTIMESTAMP + 1) = (CURRENT_TIMESTAMP + 1); +>> TRUE + +SELECT (TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+01' + 1) A, + (1 + TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+01') B; +> A B +> ---------------------- ---------------------- +> 2010-01-02 10:00:00+01 2010-01-02 10:00:00+01 +> rows: 1 + +SELECT (LOCALTIMESTAMP - 1) = (CURRENT_TIMESTAMP - 1); +>> TRUE + +SELECT (TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+01' - 1) A; +> A +> ---------------------- +> 2009-12-31 10:00:00+01 +> rows: 1 + +CALL TIMESTAMP WITH TIME ZONE '-1000000000-01-01 00:00:00Z'; +>> -1000000000-01-01 00:00:00+00 + +CALL TIMESTAMP WITH TIME ZONE '1000000000-12-31 23:59:59.999999999Z'; +>> 1000000000-12-31 23:59:59.999999999+00 + +CALL TIMESTAMP WITH TIME ZONE '-1000000001-12-31 23:59:59.999999999Z'; +> exception INVALID_DATETIME_CONSTANT_2 + +CALL TIMESTAMP WITH TIME ZONE '1000000001-01-01 00:00:00Z'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT CAST (TIMESTAMP WITH TIME ZONE '2000-01-01 23:59:59.999999999Z' AS TIMESTAMP WITH TIME ZONE); +>> 2000-01-02 00:00:00+00 + +SELECT CAST (TIMESTAMP WITH TIME ZONE '1000000000-12-31 23:59:59.999999999Z' AS TIMESTAMP WITH TIME ZONE); +>> 1000000000-12-31 23:59:59.999999+00 + +SELECT CAST (CAST (TIMESTAMP '1000000000-12-31 23:59:59.999999999' AS TIMESTAMP(9) WITH TIME ZONE) AS TIMESTAMP(9)); +>> 1000000000-12-31 23:59:59.999999999 + +SELECT CAST (CAST (TIMESTAMP '-1000000000-12-31 00:00:00' AS TIMESTAMP(9) WITH TIME ZONE) AS TIMESTAMP(9)); +>> -1000000000-12-31 00:00:00 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/timestamp.sql b/h2/src/test/org/h2/test/scripts/datatypes/timestamp.sql new file mode 100644 index 0000000000..b2bfa5f0d0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/timestamp.sql @@ -0,0 +1,173 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(T1 TIMESTAMP, T2 TIMESTAMP WITHOUT TIME ZONE); +> ok + +INSERT INTO TEST(T1, T2) VALUES (TIMESTAMP '2010-01-01 10:00:00', TIMESTAMP WITHOUT TIME ZONE '2010-01-01 10:00:00'); +> update count: 1 + +SELECT T1, T2, T1 = T2 FROM TEST; +> T1 T2 T1 = T2 +> ------------------- ------------------- ------- +> 2010-01-01 10:00:00 2010-01-01 10:00:00 TRUE +> rows: 1 + +ALTER TABLE TEST ADD (T3 TIMESTAMP(0), T4 TIMESTAMP(9) WITHOUT TIME ZONE, + DT1 DATETIME, DT2 DATETIME(0), DT3 DATETIME(9), + DT2_1 DATETIME2, DT2_2 DATETIME2(0), DT2_3 DATETIME2(7), + SDT1 SMALLDATETIME); +> ok + +SELECT COLUMN_NAME, DATA_TYPE, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE DATETIME_PRECISION +> ----------- --------- ------------------ +> T1 TIMESTAMP 6 +> T2 TIMESTAMP 6 +> T3 TIMESTAMP 0 +> T4 TIMESTAMP 9 +> DT1 TIMESTAMP 6 +> DT2 TIMESTAMP 0 +> DT3 TIMESTAMP 9 +> DT2_1 TIMESTAMP 6 +> DT2_2 TIMESTAMP 0 +> DT2_3 TIMESTAMP 7 +> SDT1 TIMESTAMP 0 +> rows (ordered): 11 + +ALTER TABLE TEST ADD T5 TIMESTAMP(10); +> exception INVALID_VALUE_SCALE + +ALTER TABLE TEST ADD DT4 DATETIME(10); +> exception INVALID_VALUE_SCALE + +ALTER TABLE TEST ADD DT2_4 DATETIME2(10); +> exception INVALID_VALUE_SCALE + +ALTER TABLE TEST ADD STD2 SMALLDATETIME(1); +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok + +-- Check that TIMESTAMP is allowed as a column name +CREATE TABLE TEST(TIMESTAMP TIMESTAMP(0)); +> ok + +INSERT INTO TEST VALUES (TIMESTAMP '1999-12-31 08:00:00'); +> update count: 1 + +SELECT TIMESTAMP FROM TEST; +>> 1999-12-31 08:00:00 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(T TIMESTAMP, T0 TIMESTAMP(0), T1 TIMESTAMP(1), T2 TIMESTAMP(2), T3 TIMESTAMP(3), T4 TIMESTAMP(4), + T5 TIMESTAMP(5), T6 TIMESTAMP(6), T7 TIMESTAMP(7), T8 TIMESTAMP(8), T9 TIMESTAMP(9)); +> ok + +INSERT INTO TEST VALUES ('2000-01-01 08:00:00.123456789', '2000-01-01 08:00:00.123456789', + '2000-01-01 08:00:00.123456789', '2000-01-01 08:00:00.123456789', '2000-01-01 08:00:00.123456789', + '2000-01-01 08:00:00.123456789', '2000-01-01 08:00:00.123456789', '2000-01-01 08:00:00.123456789', + '2000-01-01 08:00:00.123456789', '2000-01-01 08:00:00.123456789', '2000-01-01 08:00:00.123456789'); +> update count: 1 + +SELECT T, T0, T1, T2 FROM TEST; +> T T0 T1 T2 +> -------------------------- ------------------- --------------------- ---------------------- +> 2000-01-01 08:00:00.123457 2000-01-01 08:00:00 2000-01-01 08:00:00.1 2000-01-01 08:00:00.12 +> rows: 1 + +SELECT T3, T4, T5, T6 FROM TEST; +> T3 T4 T5 T6 +> ----------------------- ------------------------ ------------------------- -------------------------- +> 2000-01-01 08:00:00.123 2000-01-01 08:00:00.1235 2000-01-01 08:00:00.12346 2000-01-01 08:00:00.123457 +> rows: 1 + +SELECT T7, T8, T9 FROM TEST; +> T7 T8 T9 +> --------------------------- ---------------------------- ----------------------------- +> 2000-01-01 08:00:00.1234568 2000-01-01 08:00:00.12345679 2000-01-01 08:00:00.123456789 +> rows: 1 + +DELETE FROM TEST; +> update count: 1 + +INSERT INTO TEST(T0) VALUES ('2000-01-01 23:59:59.999999999'); +> update count: 1 + +SELECT T0 FROM TEST; +>> 2000-01-02 00:00:00 + +DROP TABLE TEST; +> ok + +create table test(id int, d timestamp); +> ok + +insert into test values(1, '2006-01-01 12:00:00.000'); +> update count: 1 + +insert into test values(1, '1999-12-01 23:59:00.000'); +> update count: 1 + +select * from test where d= '1999-12-01 23:59:00.000'; +> ID D +> -- ------------------- +> 1 1999-12-01 23:59:00 +> rows: 1 + +select * from test where d= timestamp '2006-01-01 12:00:00.000'; +> ID D +> -- ------------------- +> 1 2006-01-01 12:00:00 +> rows: 1 + +drop table test; +> ok + +SELECT TIMESTAMP '2000-01-02 11:22:33'; +>> 2000-01-02 11:22:33 + +SELECT TIMESTAMP '2000-01-02T11:22:33'; +>> 2000-01-02 11:22:33 + +SELECT TIMESTAMP '20000102 11:22:33'; +>> 2000-01-02 11:22:33 + +SELECT TIMESTAMP '20000102T11:22:33'; +>> 2000-01-02 11:22:33 + +SELECT TIMESTAMP '2000-01-02 112233'; +>> 2000-01-02 11:22:33 + +SELECT TIMESTAMP '2000-01-02T112233'; +>> 2000-01-02 11:22:33 + +SELECT TIMESTAMP '20000102 112233'; +>> 2000-01-02 11:22:33 + +SELECT TIMESTAMP '20000102T112233'; +>> 2000-01-02 11:22:33 + +CALL TIMESTAMP '-1000000000-01-01 00:00:00'; +>> -1000000000-01-01 00:00:00 + +CALL TIMESTAMP '1000000000-12-31 23:59:59.999999999'; +>> 1000000000-12-31 23:59:59.999999999 + +CALL TIMESTAMP '-1000000001-12-31 23:59:59.999999999'; +> exception INVALID_DATETIME_CONSTANT_2 + +CALL TIMESTAMP '1000000001-01-01 00:00:00'; +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT CAST (TIMESTAMP '2000-01-01 23:59:59.999999999' AS TIMESTAMP); +>> 2000-01-02 00:00:00 + +SELECT CAST (TIMESTAMP '1000000000-12-31 23:59:59.999999999' AS TIMESTAMP); +>> 1000000000-12-31 23:59:59.999999 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/tinyint.sql b/h2/src/test/org/h2/test/scripts/datatypes/tinyint.sql new file mode 100644 index 0000000000..c389b6e17f --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/tinyint.sql @@ -0,0 +1,18 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Division + +SELECT CAST(1 AS TINYINT) / CAST(0 AS TINYINT); +> exception DIVISION_BY_ZERO_1 + +SELECT CAST(-128 AS TINYINT) / CAST(1 AS TINYINT); +>> -128 + +SELECT CAST(-128 AS TINYINT) / CAST(-1 AS TINYINT); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 + +EXPLAIN VALUES CAST(1 AS TINYINT); +>> VALUES (CAST(1 AS TINYINT)) diff --git a/h2/src/test/org/h2/test/scripts/datatypes/uuid.sql b/h2/src/test/org/h2/test/scripts/datatypes/uuid.sql new file mode 100644 index 0000000000..39686caa06 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/uuid.sql @@ -0,0 +1,42 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(U UUID) AS (SELECT * FROM VALUES + ('00000000-0000-0000-0000-000000000000'), ('00000000-0000-0000-9000-000000000000'), + ('11111111-1111-1111-1111-111111111111'), ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')); +> ok + +SELECT U FROM TEST ORDER BY U; +> U +> ------------------------------------ +> 00000000-0000-0000-0000-000000000000 +> 00000000-0000-0000-9000-000000000000 +> 11111111-1111-1111-1111-111111111111 +> aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa +> rows (ordered): 4 + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES UUID '11111111-1111-1111-1111-111111111111'; +>> VALUES (UUID '11111111-1111-1111-1111-111111111111') + +VALUES CAST('01234567-89AB-CDEF-0123-456789ABCDE' AS UUID); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST(X'0123456789ABCDEF0123456789ABCD' AS UUID); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST('01234567-89AB-CDEF-0123-456789ABCDEF' AS UUID); +>> 01234567-89ab-cdef-0123-456789abcdef + +VALUES CAST(X'0123456789ABCDEF0123456789ABCDEF' AS UUID); +>> 01234567-89ab-cdef-0123-456789abcdef + +VALUES CAST('01234567-89AB-CDEF-0123-456789ABCDEF-0' AS UUID); +> exception DATA_CONVERSION_ERROR_1 + +VALUES CAST(X'0123456789ABCDEF0123456789ABCDEF01' AS UUID); +> exception DATA_CONVERSION_ERROR_1 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/varbinary.sql b/h2/src/test/org/h2/test/scripts/datatypes/varbinary.sql new file mode 100644 index 0000000000..881b3a7923 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/varbinary.sql @@ -0,0 +1,143 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(B1 VARBINARY, B2 BINARY VARYING, B3 RAW, B4 BYTEA, B5 LONG RAW, B6 LONGVARBINARY); +> ok + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- -------------- +> B1 BINARY VARYING +> B2 BINARY VARYING +> B3 BINARY VARYING +> B4 BINARY VARYING +> B5 BINARY VARYING +> B6 BINARY VARYING +> rows (ordered): 6 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST AS (VALUES X'11' || X'25'); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> -------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "C1" BINARY VARYING(2) ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (X'1125'); +> rows (ordered): 4 + +EXPLAIN SELECT C1 || X'10' FROM TEST; +>> SELECT "C1" || X'10' FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +SELECT X'11' || CAST(NULL AS VARBINARY); +>> null + +SELECT CAST(NULL AS VARBINARY) || X'11'; +>> null + +SELECT X'1'; +> exception HEX_STRING_ODD_1 + +SELECT X'1' '1'; +> exception HEX_STRING_ODD_1 + +SELECT X' 1 2 3 4 '; +>> X'1234' + +SELECT X'1 2 3'; +> exception HEX_STRING_ODD_1 + +SELECT X'~'; +> exception HEX_STRING_WRONG_1 + +SELECT X'G'; +> exception HEX_STRING_WRONG_1 + +SELECT X'TT'; +> exception HEX_STRING_WRONG_1 + +SELECT X' TT'; +> exception HEX_STRING_WRONG_1 + +SELECT X'AB' 'CD'; +>> X'abcd' + +SELECT X'AB' /* comment*/ 'CD' 'EF'; +>> X'abcdef' + +SELECT X'AB' 'CX'; +> exception HEX_STRING_WRONG_1 + +SELECT 0xabcd; +>> 43981 + +SET MODE MSSQLServer; +> ok + +SELECT 0x, 0x12ab; +> +> --- ------- +> X'' X'12ab' +> rows: 1 + +SELECT 0xZ; +> exception HEX_STRING_WRONG_1 + +SET MODE MySQL; +> ok + +SELECT 0x, 0x12ab; +> X'' X'12ab' +> --- ------- +> X'' X'12ab' +> rows: 1 + +SELECT 0xZ; +> exception HEX_STRING_WRONG_1 + +SET MODE Regular; +> ok + +EXPLAIN VALUES X''; +>> VALUES (X'') + +CREATE TABLE T(C VARBINARY(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T1(A BINARY VARYING(1048576)); +> ok + +CREATE TABLE T2(A BINARY VARYING(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A BINARY VARYING(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_OCTET_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_OCTET_LENGTH +> ---------- ---------------------- +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok + +SELECT X'ab''cd'; +> exception SYNTAX_ERROR_1 diff --git a/h2/src/test/org/h2/test/scripts/datatypes/varchar-ignorecase.sql b/h2/src/test/org/h2/test/scripts/datatypes/varchar-ignorecase.sql new file mode 100644 index 0000000000..268b906706 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/varchar-ignorecase.sql @@ -0,0 +1,191 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(C1 VARCHAR_IGNORECASE); +> ok + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- ------------------ +> C1 VARCHAR_IGNORECASE +> rows (ordered): 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST (N VARCHAR_IGNORECASE) AS VALUES 'A', 'a', NULL; +> ok + +SELECT DISTINCT * FROM TEST; +> N +> ---- +> A +> null +> rows: 2 + +SELECT * FROM TEST; +> N +> ---- +> A +> a +> null +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST (N VARCHAR_IGNORECASE) AS VALUES 'A', 'a', 'C', NULL; +> ok + +CREATE INDEX TEST_IDX ON TEST(N); +> ok + +SELECT N FROM TEST WHERE N IN ('a', 'A', 'B'); +> N +> - +> A +> a +> rows: 2 + +EXPLAIN SELECT N FROM TEST WHERE N IN (SELECT DISTINCT ON(B) A FROM VALUES ('a', 1), ('A', 2), ('B', 3) T(A, B)); +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ WHERE "N" IN( SELECT DISTINCT ON("B") "A" FROM (VALUES ('a', 1), ('A', 2), ('B', 3)) "T"("A", "B") /* table scan */) + +SELECT N FROM TEST WHERE N IN (SELECT DISTINCT ON(B) A FROM VALUES ('a', 1), ('A', 2), ('B', 3) T(A, B)); +> N +> - +> A +> a +> rows: 2 + +SELECT N FROM TEST WHERE N IN (SELECT DISTINCT ON(B) A FROM VALUES ('a'::VARCHAR_IGNORECASE, 1), + ('A'::VARCHAR_IGNORECASE, 2), ('B'::VARCHAR_IGNORECASE, 3) T(A, B)); +> N +> - +> A +> a +> rows: 2 + +EXPLAIN SELECT N FROM TEST WHERE N IN (SELECT DISTINCT ON(B) A FROM VALUES ('a'::VARCHAR_IGNORECASE(1), 1), + ('A'::VARCHAR_IGNORECASE(1), 2), ('B'::VARCHAR_IGNORECASE(1), 3) T(A, B)); +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX: N IN(SELECT DISTINCT ON(B) A FROM (VALUES (CAST('a' AS VARCHAR_IGNORECASE(1)), 1), (CAST('A' AS VARCHAR_IGNORECASE(1)), 2), (CAST('B' AS VARCHAR_IGNORECASE(1)), 3)) T(A, B) /* table scan */) */ WHERE "N" IN( SELECT DISTINCT ON("B") "A" FROM (VALUES (CAST('a' AS VARCHAR_IGNORECASE(1)), 1), (CAST('A' AS VARCHAR_IGNORECASE(1)), 2), (CAST('B' AS VARCHAR_IGNORECASE(1)), 3)) "T"("A", "B") /* table scan */) + +DROP INDEX TEST_IDX; +> ok + +CREATE UNIQUE INDEX TEST_IDX ON TEST(N); +> exception DUPLICATE_KEY_1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(N VARCHAR_IGNORECASE) AS VALUES ('A'), ('a'), ('C'), (NULL); +> ok + +CREATE HASH INDEX TEST_IDX ON TEST(N); +> ok + +SELECT N FROM TEST WHERE N = 'A'; +> N +> - +> A +> a +> rows: 2 + +DROP INDEX TEST_IDX; +> ok + +CREATE UNIQUE HASH INDEX TEST_IDX ON TEST(N); +> exception DUPLICATE_KEY_1 + +DELETE FROM TEST WHERE N = 'A' LIMIT 1; +> update count: 1 + +CREATE UNIQUE HASH INDEX TEST_IDX ON TEST(N); +> ok + +SELECT 1 FROM TEST WHERE N = 'A'; +>> 1 + +INSERT INTO TEST VALUES (NULL); +> update count: 1 + +SELECT N FROM TEST WHERE N IS NULL; +> N +> ---- +> null +> null +> rows: 2 + +DELETE FROM TEST WHERE N IS NULL LIMIT 1; +> update count: 1 + +SELECT N FROM TEST WHERE N IS NULL; +>> null + +DROP TABLE TEST; +> ok + +EXPLAIN VALUES CAST('a' AS VARCHAR_IGNORECASE(1)); +>> VALUES (CAST('a' AS VARCHAR_IGNORECASE(1))) + +CREATE TABLE T(C VARCHAR_IGNORECASE(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T(C1 VARCHAR_IGNORECASE(1 CHARACTERS), C2 VARCHAR_IGNORECASE(1 OCTETS)); +> ok + +DROP TABLE T; +> ok + +SELECT 'I' ILIKE CHAR(0x130); +>> TRUE + +SET COLLATION TURKISH STRENGTH IDENTICAL; +> ok + +CREATE TABLE TEST(V VARCHAR_IGNORECASE UNIQUE); +> ok + +INSERT INTO TEST VALUES 'I', 'i'; +> update count: 2 + +INSERT INTO TEST VALUES CHAR(0x0130); +> exception DUPLICATE_KEY_1 + +INSERT INTO TEST VALUES CHAR(0x0131); +> exception DUPLICATE_KEY_1 + +DROP TABLE TEST; +> ok + +SET COLLATION OFF; +> ok + + +CREATE TABLE T1(A VARCHAR_IGNORECASE(1048576)); +> ok + +CREATE TABLE T2(A VARCHAR_IGNORECASE(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A VARCHAR_IGNORECASE(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_MAXIMUM_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_MAXIMUM_LENGTH +> ---------- ------------------------ +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok diff --git a/h2/src/test/org/h2/test/scripts/datatypes/varchar.sql b/h2/src/test/org/h2/test/scripts/datatypes/varchar.sql new file mode 100644 index 0000000000..d7ebecfa0b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/datatypes/varchar.sql @@ -0,0 +1,126 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT 'A' 'b' + 'c'; +>> Abc + +SELECT N'A' 'b' + 'c'; +>> Abc + +CREATE TABLE TEST(C1 VARCHAR, C2 CHARACTER VARYING, C3 VARCHAR2, C4 NVARCHAR, C5 NVARCHAR2, C6 VARCHAR_CASESENSITIVE, + C7 LONGVARCHAR, C8 TID, C9 CHAR VARYING, C10 NCHAR VARYING, C11 NATIONAL CHARACTER VARYING, C12 NATIONAL CHAR VARYING); +> ok + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE +> ----------- ----------------- +> C1 CHARACTER VARYING +> C2 CHARACTER VARYING +> C3 CHARACTER VARYING +> C4 CHARACTER VARYING +> C5 CHARACTER VARYING +> C6 CHARACTER VARYING +> C7 CHARACTER VARYING +> C8 CHARACTER VARYING +> C9 CHARACTER VARYING +> C10 CHARACTER VARYING +> C11 CHARACTER VARYING +> C12 CHARACTER VARYING +> rows (ordered): 12 + +DROP TABLE TEST; +> ok + +CREATE TABLE T(C VARCHAR(0)); +> exception INVALID_VALUE_2 + +CREATE TABLE T(C VARCHAR(1K)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE T(C1 VARCHAR(1 CHARACTERS), C2 VARCHAR(1 OCTETS)); +> ok + +DROP TABLE T; +> ok + + +CREATE TABLE T1(A CHARACTER VARYING(1048576)); +> ok + +CREATE TABLE T2(A CHARACTER VARYING(1048577)); +> exception INVALID_VALUE_PRECISION + +SET TRUNCATE_LARGE_LENGTH TRUE; +> ok + +CREATE TABLE T2(A CHARACTER VARYING(1048577)); +> ok + +SELECT TABLE_NAME, CHARACTER_MAXIMUM_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME CHARACTER_MAXIMUM_LENGTH +> ---------- ------------------------ +> T1 1048576 +> T2 1048576 +> rows: 2 + +SET TRUNCATE_LARGE_LENGTH FALSE; +> ok + +DROP TABLE T1, T2; +> ok + +SELECT U&'a\0030a\+000025a'; +>> a0a%a + +SELECT U&'az0030az+000025a' UESCAPE 'z'; +>> a0a%a + +EXPLAIN SELECT U&'\fffd\+100000'; +>> SELECT U&'\fffd\+100000' + +SELECT U&'\'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\0'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\00'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\003'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\0030'; +>> 0 + +SELECT U&'\zzzz'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+0'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+00'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+000'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+0000'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+00003'; +> exception STRING_FORMAT_ERROR_1 + +SELECT U&'\+000030'; +>> 0 + +SELECT U&'\+zzzzzz'; +> exception STRING_FORMAT_ERROR_1 + +EXPLAIN SELECT U&'''\\', U&'''\\\fffd'; +>> SELECT '''\', U&'''\\\fffd' diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterDomain.sql b/h2/src/test/org/h2/test/scripts/ddl/alterDomain.sql new file mode 100644 index 0000000000..94bc2ae007 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/alterDomain.sql @@ -0,0 +1,346 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE DOMAIN D1 INT DEFAULT 1; +> ok + +CREATE DOMAIN D2 D1 DEFAULT 2; +> ok + +CREATE DOMAIN D3 D1; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, S1 D1, S2 D2, S3 D3, C1 D1 DEFAULT 4, C2 D2 DEFAULT 5, C3 D3 DEFAULT 6); +> ok + +INSERT INTO TEST(ID) VALUES 1; +> update count: 1 + +TABLE TEST; +> ID S1 S2 S3 C1 C2 C3 +> -- -- -- -- -- -- -- +> 1 1 2 1 4 5 6 +> rows: 1 + +ALTER DOMAIN D1 SET DEFAULT 3; +> ok + +INSERT INTO TEST(ID) VALUES 2; +> update count: 1 + +SELECT * FROM TEST WHERE ID = 2; +> ID S1 S2 S3 C1 C2 C3 +> -- -- -- -- -- -- -- +> 2 3 2 3 4 5 6 +> rows: 1 + +ALTER DOMAIN D1 DROP DEFAULT; +> ok + +SELECT DOMAIN_NAME, DOMAIN_DEFAULT FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_DEFAULT +> ----------- -------------- +> D1 null +> D2 2 +> D3 3 +> rows: 3 + +SELECT COLUMN_NAME, COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_DEFAULT +> ----------- -------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 3 +> S2 null +> S3 null +> rows: 7 + +ALTER DOMAIN D1 SET DEFAULT 3; +> ok + +ALTER DOMAIN D3 DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN S1 DROP DEFAULT; +> ok + +SELECT DOMAIN_NAME, DOMAIN_DEFAULT FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_DEFAULT +> ----------- -------------- +> D1 3 +> D2 2 +> D3 null +> rows: 3 + +SELECT COLUMN_NAME, COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_DEFAULT +> ----------- -------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 null +> S2 null +> S3 null +> rows: 7 + +DROP DOMAIN D1 CASCADE; +> ok + +SELECT DOMAIN_NAME, DOMAIN_DEFAULT FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_DEFAULT +> ----------- -------------- +> D2 2 +> D3 3 +> rows: 2 + +SELECT COLUMN_NAME, COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_DEFAULT +> ----------- -------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 3 +> S2 null +> S3 null +> rows: 7 + +DROP TABLE TEST; +> ok + +DROP DOMAIN D2; +> ok + +DROP DOMAIN D3; +> ok + +CREATE DOMAIN D1 INT ON UPDATE 1; +> ok + +CREATE DOMAIN D2 D1 ON UPDATE 2; +> ok + +CREATE DOMAIN D3 D1; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, S1 D1, S2 D2, S3 D3, C1 D1 ON UPDATE 4, C2 D2 ON UPDATE 5, C3 D3 ON UPDATE 6); +> ok + +ALTER DOMAIN D1 SET ON UPDATE 3; +> ok + +ALTER DOMAIN D1 DROP ON UPDATE; +> ok + +SELECT DOMAIN_NAME, DOMAIN_ON_UPDATE FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_ON_UPDATE +> ----------- ---------------- +> D1 null +> D2 2 +> D3 3 +> rows: 3 + +SELECT COLUMN_NAME, COLUMN_ON_UPDATE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_ON_UPDATE +> ----------- ---------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 3 +> S2 null +> S3 null +> rows: 7 + +ALTER DOMAIN D1 SET ON UPDATE 3; +> ok + +ALTER DOMAIN D3 DROP ON UPDATE; +> ok + +ALTER TABLE TEST ALTER COLUMN S1 DROP ON UPDATE; +> ok + +SELECT DOMAIN_NAME, DOMAIN_ON_UPDATE FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_ON_UPDATE +> ----------- ---------------- +> D1 3 +> D2 2 +> D3 null +> rows: 3 + +SELECT COLUMN_NAME, COLUMN_ON_UPDATE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_ON_UPDATE +> ----------- ---------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 null +> S2 null +> S3 null +> rows: 7 + +DROP DOMAIN D1 CASCADE; +> ok + +SELECT DOMAIN_NAME, DOMAIN_ON_UPDATE FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME DOMAIN_ON_UPDATE +> ----------- ---------------- +> D2 2 +> D3 3 +> rows: 2 + +SELECT COLUMN_NAME, COLUMN_ON_UPDATE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME COLUMN_ON_UPDATE +> ----------- ---------------- +> C1 4 +> C2 5 +> C3 6 +> ID null +> S1 3 +> S2 null +> S3 null +> rows: 7 + +DROP TABLE TEST; +> ok + +DROP DOMAIN D2; +> ok + +DROP DOMAIN D3; +> ok + +CREATE DOMAIN D1 AS INT; +> ok + +CREATE DOMAIN D2 AS D1; +> ok + +CREATE TABLE T(C1 D1, C2 D2, L BIGINT); +> ok + +ALTER DOMAIN D1 RENAME TO D3; +> ok + +SELECT DOMAIN_NAME, DATA_TYPE, PARENT_DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAINS; +> DOMAIN_NAME DATA_TYPE PARENT_DOMAIN_NAME +> ----------- --------- ------------------ +> D2 INTEGER D3 +> D3 INTEGER null +> rows: 2 + +SELECT COLUMN_NAME, DOMAIN_NAME FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'T' AND COLUMN_NAME LIKE 'C_'; +> COLUMN_NAME DOMAIN_NAME +> ----------- ----------- +> C1 D3 +> C2 D2 +> rows: 2 + +@reconnect + +SELECT DOMAIN_NAME, DATA_TYPE, PARENT_DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAINS; +> DOMAIN_NAME DATA_TYPE PARENT_DOMAIN_NAME +> ----------- --------- ------------------ +> D2 INTEGER D3 +> D3 INTEGER null +> rows: 2 + +SELECT COLUMN_NAME, DOMAIN_NAME FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'T' AND COLUMN_NAME LIKE 'C_'; +> COLUMN_NAME DOMAIN_NAME +> ----------- ----------- +> C1 D3 +> C2 D2 +> rows: 2 + +DROP TABLE T; +> ok + +DROP DOMAIN D2; +> ok + +DROP DOMAIN D3; +> ok + +CREATE DOMAIN D1 AS INT; +> ok + +CREATE DOMAIN D2 AS D1; +> ok + +CREATE TABLE TEST(A INT, C D2) AS VALUES (1, 1); +> ok + +ALTER TABLE TEST ADD CHECK (C > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D2 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D1 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +CREATE UNIQUE INDEX TEST_A_IDX ON TEST(A); +> ok + +ALTER TABLE TEST ADD CHECK (C > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D2 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D1 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +CREATE INDEX TEST_C_IDX ON TEST(C); +> ok + +ALTER TABLE TEST ADD CHECK (C > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D2 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D1 ADD CHECK (VALUE > 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D1 ADD CHECK (VALUE > 1) NOCHECK; +> ok + +DROP TABLE TEST; +> ok + +ALTER DOMAIN D1 ADD CONSTRAINT T CHECK (VALUE < 100); +> ok + +ALTER DOMAIN D3 RENAME CONSTRAINT T TO T1; +> exception DOMAIN_NOT_FOUND_1 + +ALTER DOMAIN IF EXISTS D3 RENAME CONSTRAINT T TO T1; +> ok + +ALTER DOMAIN D2 RENAME CONSTRAINT T TO T2; +> exception CONSTRAINT_NOT_FOUND_1 + +ALTER DOMAIN D1 RENAME CONSTRAINT T TO T3; +> ok + +SELECT CONSTRAINT_NAME, DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAIN_CONSTRAINTS WHERE CONSTRAINT_NAME LIKE 'T%'; +> CONSTRAINT_NAME DOMAIN_NAME +> --------------- ----------- +> T3 D1 +> rows: 1 + +DROP DOMAIN D2; +> ok + +DROP DOMAIN D1; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableAdd.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableAdd.sql new file mode 100644 index 0000000000..9f00abb42f --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableAdd.sql @@ -0,0 +1,395 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(B INT); +> ok + +ALTER TABLE TEST ADD C INT; +> ok + +ALTER TABLE TEST ADD COLUMN D INT; +> ok + +ALTER TABLE TEST ADD IF NOT EXISTS B INT; +> ok + +ALTER TABLE TEST ADD IF NOT EXISTS E INT; +> ok + +ALTER TABLE IF EXISTS TEST2 ADD COLUMN B INT; +> ok + +ALTER TABLE TEST ADD B1 INT AFTER B; +> ok + +ALTER TABLE TEST ADD B2 INT BEFORE C; +> ok + +ALTER TABLE TEST ADD (C1 INT, C2 INT) AFTER C; +> ok + +ALTER TABLE TEST ADD (C3 INT, C4 INT) BEFORE D; +> ok + +ALTER TABLE TEST ADD A2 INT FIRST; +> ok + +ALTER TABLE TEST ADD (A INT, A1 INT) FIRST; +> ok + +SELECT * FROM TEST; +> A A1 A2 B B1 B2 C C1 C2 C3 C4 D E +> - -- -- - -- -- - -- -- -- -- - - +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT NOT NULL, B INT); +> ok + +-- column B may be null +ALTER TABLE TEST ADD (CONSTRAINT PK_B PRIMARY KEY (B)); +> exception COLUMN_MUST_NOT_BE_NULLABLE_1 + +ALTER TABLE TEST ADD (CONSTRAINT PK_A PRIMARY KEY (A)); +> ok + +ALTER TABLE TEST ADD (C INT AUTO_INCREMENT UNIQUE, CONSTRAINT U_B UNIQUE (B), D INT UNIQUE); +> ok + +INSERT INTO TEST(A, B, D) VALUES (11, 12, 14); +> update count: 1 + +SELECT * FROM TEST; +> A B C D +> -- -- - -- +> 11 12 1 14 +> rows: 1 + +INSERT INTO TEST VALUES (11, 20, 30, 40); +> exception DUPLICATE_KEY_1 + +INSERT INTO TEST VALUES (10, 12, 30, 40); +> exception DUPLICATE_KEY_1 + +INSERT INTO TEST VALUES (10, 20, 1, 40); +> exception DUPLICATE_KEY_1 + +INSERT INTO TEST VALUES (10, 20, 30, 14); +> exception DUPLICATE_KEY_1 + +INSERT INTO TEST VALUES (10, 20, 30, 40); +> update count: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(); +> ok + +ALTER TABLE TEST ADD A INT CONSTRAINT PK_1 PRIMARY KEY; +> ok + +SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS; +> CONSTRAINT_NAME CONSTRAINT_TYPE +> --------------- --------------- +> PK_1 PRIMARY KEY +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE PARENT(ID INT); +> ok + +CREATE INDEX PARENT_ID_IDX ON PARENT(ID); +> ok + +CREATE TABLE CHILD(ID INT PRIMARY KEY, P INT); +> ok + +ALTER TABLE CHILD ADD CONSTRAINT CHILD_P_FK FOREIGN KEY (P) REFERENCES PARENT(ID); +> exception CONSTRAINT_NOT_FOUND_1 + +SET MODE MySQL; +> ok + +ALTER TABLE CHILD ADD CONSTRAINT CHILD_P_FK FOREIGN KEY (P) REFERENCES PARENT(ID); +> ok + +SET MODE Regular; +> ok + +INSERT INTO PARENT VALUES 1, 1; +> exception DUPLICATE_KEY_1 + +DROP TABLE CHILD, PARENT; +> ok + +CREATE TABLE PARENT(ID INT CONSTRAINT P1 PRIMARY KEY); +> ok + +CREATE TABLE CHILD(ID INT CONSTRAINT P2 PRIMARY KEY, CHILD INT CONSTRAINT C REFERENCES PARENT); +> ok + +ALTER TABLE PARENT DROP CONSTRAINT P1 RESTRICT; +> exception CONSTRAINT_IS_USED_BY_CONSTRAINT_2 + +ALTER TABLE PARENT DROP CONSTRAINT P1 RESTRICT; +> exception CONSTRAINT_IS_USED_BY_CONSTRAINT_2 + +ALTER TABLE PARENT DROP CONSTRAINT P1 CASCADE; +> ok + +DROP TABLE PARENT, CHILD; +> ok + +CREATE TABLE A(A TIMESTAMP PRIMARY KEY, B INT ARRAY UNIQUE, C TIME ARRAY UNIQUE); +> ok + +CREATE TABLE B(A TIMESTAMP WITH TIME ZONE, B DATE, C INT ARRAY, D TIME ARRAY, E TIME WITH TIME ZONE ARRAY); +> ok + +ALTER TABLE B ADD FOREIGN KEY(A) REFERENCES A(A); +> exception UNCOMPARABLE_REFERENCED_COLUMN_2 + +ALTER TABLE B ADD FOREIGN KEY(B) REFERENCES A(A); +> ok + +ALTER TABLE B ADD FOREIGN KEY(C) REFERENCES A(B); +> ok + +ALTER TABLE B ADD FOREIGN KEY(C) REFERENCES A(C); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +ALTER TABLE B ADD FOREIGN KEY(D) REFERENCES A(B); +> exception UNCOMPARABLE_REFERENCED_COLUMN_2 + +ALTER TABLE B ADD FOREIGN KEY(D) REFERENCES A(C); +> ok + +ALTER TABLE B ADD FOREIGN KEY(E) REFERENCES A(B); +> exception UNCOMPARABLE_REFERENCED_COLUMN_2 + +ALTER TABLE B ADD FOREIGN KEY(E) REFERENCES A(C); +> exception UNCOMPARABLE_REFERENCED_COLUMN_2 + +DROP TABLE B, A; +> ok + +CREATE TABLE PARENT(ID INT PRIMARY KEY, K INT UNIQUE); +> ok + +CREATE TABLE CHILD(ID INT PRIMARY KEY, P INT GENERATED ALWAYS AS (ID)); +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE CASCADE; +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE RESTRICT; +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE NO ACTION; +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE SET DEFAULT; +> exception GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE SET NULL; +> exception GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON UPDATE CASCADE; +> exception GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON UPDATE RESTRICT; +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON UPDATE NO ACTION; +> ok + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON UPDATE SET DEFAULT; +> exception GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 + +ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON UPDATE SET NULL; +> exception GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 + +DROP TABLE CHILD, PARENT; +> ok + +CREATE TABLE T1(B INT, G INT GENERATED ALWAYS AS (B + 1) UNIQUE); +> ok + +CREATE TABLE T2(A INT, G INT REFERENCES T1(G) ON UPDATE CASCADE); +> ok + +INSERT INTO T1(B) VALUES 1; +> update count: 1 + +INSERT INTO T2 VALUES (1, 2); +> update count: 1 + +TABLE T2; +> A G +> - - +> 1 2 +> rows: 1 + +UPDATE T1 SET B = 2; +> update count: 1 + +TABLE T2; +> A G +> - - +> 1 3 +> rows: 1 + +DROP TABLE T2, T1; +> ok + +CREATE SCHEMA S1; +> ok + +CREATE TABLE S1.T1(ID INT PRIMARY KEY); +> ok + +CREATE SCHEMA S2; +> ok + +CREATE TABLE S2.T2(ID INT, FK INT REFERENCES S1.T1(ID)); +> ok + +SELECT CONSTRAINT_SCHEMA, CONSTRAINT_TYPE, TABLE_SCHEMA, TABLE_NAME, INDEX_SCHEMA + FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_SCHEMA LIKE 'S%'; +> CONSTRAINT_SCHEMA CONSTRAINT_TYPE TABLE_SCHEMA TABLE_NAME INDEX_SCHEMA +> ----------------- --------------- ------------ ---------- ------------ +> S1 PRIMARY KEY S1 T1 S1 +> S2 FOREIGN KEY S2 T2 S2 +> rows: 2 + +SELECT INDEX_SCHEMA, TABLE_SCHEMA, TABLE_NAME, INDEX_TYPE_NAME, IS_GENERATED FROM INFORMATION_SCHEMA.INDEXES + WHERE TABLE_SCHEMA LIKE 'S%'; +> INDEX_SCHEMA TABLE_SCHEMA TABLE_NAME INDEX_TYPE_NAME IS_GENERATED +> ------------ ------------ ---------- --------------- ------------ +> S1 S1 T1 PRIMARY KEY TRUE +> S2 S2 T2 INDEX TRUE +> rows: 2 + +SELECT INDEX_SCHEMA, TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE TABLE_SCHEMA LIKE 'S%'; +> INDEX_SCHEMA TABLE_SCHEMA TABLE_NAME COLUMN_NAME +> ------------ ------------ ---------- ----------- +> S1 S1 T1 ID +> S2 S2 T2 FK +> rows: 2 + +@reconnect + +DROP SCHEMA S2 CASCADE; +> ok + +DROP SCHEMA S1 CASCADE; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST(' || (SELECT LISTAGG('C' || X || ' INT') FROM SYSTEM_RANGE(1, 16384)) || ')'; +> ok + +ALTER TABLE TEST ADD COLUMN(X INTEGER); +> exception TOO_MANY_COLUMNS_1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(ID BIGINT NOT NULL); +> ok + +ALTER TABLE TEST ADD PRIMARY KEY(ID); +> ok + +SELECT INDEX_TYPE_NAME, IS_GENERATED FROM INFORMATION_SCHEMA.INDEXES WHERE TABLE_NAME = 'TEST'; +> INDEX_TYPE_NAME IS_GENERATED +> --------------- ------------ +> PRIMARY KEY TRUE +> rows: 1 + +CALL DB_OBJECT_SQL('INDEX', 'PUBLIC', 'PRIMARY_KEY_2'); +>> CREATE PRIMARY KEY "PUBLIC"."PRIMARY_KEY_2" ON "PUBLIC"."TEST"("ID") + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" BIGINT NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 4 + +@reconnect + +SELECT INDEX_TYPE_NAME, IS_GENERATED FROM INFORMATION_SCHEMA.INDEXES WHERE TABLE_NAME = 'TEST'; +> INDEX_TYPE_NAME IS_GENERATED +> --------------- ------------ +> PRIMARY KEY TRUE +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT INVISIBLE, CONSTRAINT TEST_UNIQUE_2 UNIQUE(VALUE)); +> ok + +ALTER TABLE TEST ADD COLUMN D INT; +> ok + +ALTER TABLE TEST ADD CONSTRAINT TEST_UNIQUE_3 UNIQUE(VALUE); +> ok + +SELECT CONSTRAINT_NAME, COLUMN_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE + WHERE TABLE_NAME = 'TEST'; +> CONSTRAINT_NAME COLUMN_NAME ORDINAL_POSITION +> --------------- ----------- ---------------- +> TEST_UNIQUE_2 A 1 +> TEST_UNIQUE_2 B 2 +> TEST_UNIQUE_3 A 1 +> TEST_UNIQUE_3 B 2 +> TEST_UNIQUE_3 D 3 +> rows: 5 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(); +> ok + +ALTER TABLE TEST ADD UNIQUE (VALUE); +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT) AS VALUES (3, 4); +> ok + +ALTER TABLE TEST ADD G INT GENERATED ALWAYS AS (A + B); +> ok + +ALTER TABLE TEST ADD ID BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY FIRST; +> ok + +ALTER TABLE TEST ADD C INT AFTER B; +> ok + +INSERT INTO TEST(A, B) VALUES (5, 6); +> update count: 1 + +TABLE TEST; +> ID A B C G +> -- - - ---- -- +> 1 3 4 null 7 +> 2 5 6 null 11 +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableAlterColumn.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableAlterColumn.sql new file mode 100644 index 0000000000..cda63ed105 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableAlterColumn.sql @@ -0,0 +1,816 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(T INT); +> ok + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> INTEGER + +-- SET DEFAULT +ALTER TABLE TEST ALTER COLUMN T SET DEFAULT 1; +> ok + +SELECT COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> 1 + +-- DROP DEFAULT +ALTER TABLE TEST ALTER COLUMN T DROP DEFAULT; +> ok + +SELECT COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> null + +-- SET NOT NULL +ALTER TABLE TEST ALTER COLUMN T SET NOT NULL; +> ok + +SELECT IS_NULLABLE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> NO + +-- DROP NOT NULL +ALTER TABLE TEST ALTER COLUMN T DROP NOT NULL; +> ok + +SELECT IS_NULLABLE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> YES + +ALTER TABLE TEST ALTER COLUMN T SET NOT NULL; +> ok + +-- SET NULL +ALTER TABLE TEST ALTER COLUMN T SET NULL; +> ok + +SELECT IS_NULLABLE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> YES + +-- SET DATA TYPE +ALTER TABLE TEST ALTER COLUMN T SET DATA TYPE BIGINT; +> ok + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +>> BIGINT + +ALTER TABLE TEST ALTER COLUMN T INT INVISIBLE DEFAULT 1 ON UPDATE 2 NOT NULL COMMENT 'C'; +> ok + +SELECT DATA_TYPE, IS_VISIBLE, COLUMN_DEFAULT, COLUMN_ON_UPDATE, REMARKS, IS_NULLABLE + FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +> DATA_TYPE IS_VISIBLE COLUMN_DEFAULT COLUMN_ON_UPDATE REMARKS IS_NULLABLE +> --------- ---------- -------------- ---------------- ------- ----------- +> INTEGER FALSE 1 2 C NO +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN T SET DATA TYPE BIGINT; +> ok + +SELECT DATA_TYPE, IS_VISIBLE, COLUMN_DEFAULT, COLUMN_ON_UPDATE, REMARKS, IS_NULLABLE + FROM INFORMATION_SCHEMA.COLUMNS WHERE COLUMN_NAME = 'T'; +> DATA_TYPE IS_VISIBLE COLUMN_DEFAULT COLUMN_ON_UPDATE REMARKS IS_NULLABLE +> --------- ---------- -------------- ---------------- ------- ----------- +> BIGINT FALSE 1 2 C NO +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT AUTO_INCREMENT PRIMARY KEY, V INT NOT NULL); +> ok + +ALTER TABLE TEST ALTER COLUMN ID RESTART WITH 100; +> ok + +INSERT INTO TEST(V) VALUES (1); +> update count: 1 + +ALTER TABLE TEST AUTO_INCREMENT = 200; +> exception SYNTAX_ERROR_2 + +SET MODE MySQL; +> ok + +ALTER TABLE TEST AUTO_INCREMENT = 200; +> ok + +INSERT INTO TEST(V) VALUES (2); +> update count: 1 + +ALTER TABLE TEST AUTO_INCREMENT 300; +> ok + +INSERT INTO TEST(V) VALUES (3); +> update count: 1 + +SELECT * FROM TEST ORDER BY ID; +> ID V +> --- - +> 100 1 +> 200 2 +> 300 3 +> rows (ordered): 3 + +ALTER TABLE TEST DROP PRIMARY KEY; +> ok + +ALTER TABLE TEST AUTO_INCREMENT = 400; +> exception COLUMN_NOT_FOUND_1 + +ALTER TABLE TEST ADD PRIMARY KEY(V); +> ok + +ALTER TABLE TEST AUTO_INCREMENT = 400; +> exception COLUMN_NOT_FOUND_1 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +-- Compatibility syntax + +SET MODE MySQL; +> ok + +create table test(id int primary key, name varchar); +> ok + +insert into test(id) values(1); +> update count: 1 + +alter table test change column id id2 int; +> ok + +select id2 from test; +> ID2 +> --- +> 1 +> rows: 1 + +drop table test; +> ok + +SET MODE Oracle; +> ok + +CREATE MEMORY TABLE TEST(V INT NOT NULL); +> ok + +ALTER TABLE TEST MODIFY COLUMN V BIGINT; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "V" BIGINT NOT NULL ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +SET MODE MySQL; +> ok + +ALTER TABLE TEST MODIFY COLUMN V INT; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> --------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "V" INTEGER ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +ALTER TABLE TEST MODIFY COLUMN V BIGINT NOT NULL; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "V" BIGINT NOT NULL ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +create table test(id int, name varchar); +> ok + +alter table test alter column id int as id+1; +> exception COLUMN_NOT_FOUND_1 + +drop table test; +> ok + +create table t(x varchar) as select 'x'; +> ok + +alter table t alter column x int; +> exception DATA_CONVERSION_ERROR_1 + +drop table t; +> ok + +create table t(id identity default on null, x varchar) as select null, 'x'; +> ok + +alter table t alter column x int; +> exception DATA_CONVERSION_ERROR_1 + +drop table t; +> ok + +-- ensure that increasing a VARCHAR columns length takes effect because we optimize this case +create table t(x varchar(2)) as select 'x'; +> ok + +alter table t alter column x varchar(20); +> ok + +insert into t values 'Hello'; +> update count: 1 + +drop table t; +> ok + +SET MODE MySQL; +> ok + +create table t(x int); +> ok + +alter table t modify column x varchar(20); +> ok + +insert into t values('Hello'); +> update count: 1 + +drop table t; +> ok + +-- This worked in v1.4.196 +create table T (C varchar not null); +> ok + +alter table T modify C int null; +> ok + +insert into T values(null); +> update count: 1 + +drop table T; +> ok + +-- This failed in v1.4.196 +create table T (C int not null); +> ok + +-- Silently corrupted column C +alter table T modify C null; +> ok + +insert into T values(null); +> update count: 1 + +drop table T; +> ok + +SET MODE Oracle; +> ok + +create table foo (bar varchar(255)); +> ok + +alter table foo modify (bar varchar(255) not null); +> ok + +insert into foo values(null); +> exception NULL_NOT_ALLOWED + +DROP TABLE FOO; +> ok + +SET MODE Regular; +> ok + +-- Tests a bug we used to have where altering the name of a column that had +-- a check constraint that referenced itself would result in not being able +-- to re-open the DB. +create table test(id int check(id in (1,2)) ); +> ok + +alter table test alter id rename to id2; +> ok + +@reconnect + +insert into test values 1; +> update count: 1 + +insert into test values 3; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +drop table test; +> ok + +CREATE MEMORY TABLE TEST(C INT); +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D RENAME TO E; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS C RENAME TO D; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET NOT NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET NOT NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET DEFAULT 1; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET DEFAULT 1; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET ON UPDATE 2; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET ON UPDATE 2; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET DATA TYPE BIGINT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET DATA TYPE BIGINT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET INVISIBLE; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET INVISIBLE; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SELECTIVITY 3; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SELECTIVITY 3; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E RESTART WITH 4; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D RESTART WITH 4 SET MAXVALUE 1000; +> ok + +SELECT COLUMN_NAME, IS_IDENTITY, IDENTITY_GENERATION, IDENTITY_START, IDENTITY_INCREMENT, IDENTITY_MAXIMUM, + IDENTITY_MINIMUM, IDENTITY_CYCLE, IDENTITY_BASE, IDENTITY_CACHE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME IS_IDENTITY IDENTITY_GENERATION IDENTITY_START IDENTITY_INCREMENT IDENTITY_MAXIMUM IDENTITY_MINIMUM IDENTITY_CYCLE IDENTITY_BASE IDENTITY_CACHE +> ----------- ----------- ------------------- -------------- ------------------ ---------------- ---------------- -------------- ------------- -------------- +> D YES BY DEFAULT 1 1 1000 1 NO 4 32 +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN D SET CYCLE; +> ok + +SELECT IDENTITY_CYCLE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> YES + +ALTER TABLE TEST ALTER COLUMN D DROP IDENTITY; +> ok + +SELECT IS_IDENTITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> NO + +ALTER TABLE TEST ALTER COLUMN D DROP IDENTITY; +> ok + +ALTER TABLE TEST ALTER COLUMN E DROP IDENTITY; +> exception COLUMN_NOT_FOUND_1 + +ALTER TABLE TEST ALTER COLUMN D SET GENERATED BY DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN D SET DEFAULT (1); +> ok + +SELECT COLUMN_DEFAULT, IS_IDENTITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_DEFAULT IS_IDENTITY +> -------------- ----------- +> null YES +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN D DROP IDENTITY; +> ok + +ALTER TABLE TEST ALTER COLUMN D SET GENERATED ALWAYS; +> ok + +SELECT IS_IDENTITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> YES + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E DROP IDENTITY; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E DROP NOT NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D DROP NOT NULL; +> exception COLUMN_MUST_NOT_BE_NULLABLE_1 + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E DROP ON UPDATE; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D DROP ON UPDATE; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E INT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D INT; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS E SET VISIBLE; +> ok + +ALTER TABLE TEST ALTER COLUMN IF EXISTS D SET VISIBLE; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "D" INTEGER NOT NULL ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT GENERATED ALWAYS AS IDENTITY (MINVALUE 1 MAXVALUE 10 INCREMENT BY -1), V INT); +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +TABLE TEST; +> ID V +> -- - +> 10 1 +> rows: 1 + +DELETE FROM TEST; +> update count: 1 + +ALTER TABLE TEST ALTER COLUMN ID RESTART; +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +TABLE TEST; +> ID V +> -- - +> 10 1 +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN ID RESTART WITH 5; +> ok + +INSERT INTO TEST(V) VALUES 2; +> update count: 1 + +TABLE TEST; +> ID V +> -- - +> 10 1 +> 5 2 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT) AS VALUES 1, 2, 3; +> ok + +ALTER TABLE TEST ALTER COLUMN A SET DATA TYPE BIGINT USING A * 10; +> ok + +TABLE TEST; +> A +> -- +> 10 +> 20 +> 30 +> rows: 3 + +ALTER TABLE TEST ADD COLUMN B INT NOT NULL USING A + 1; +> ok + +TABLE TEST; +> A B +> -- -- +> 10 11 +> 20 21 +> 30 31 +> rows: 3 + +ALTER TABLE TEST ADD COLUMN C VARCHAR(2) USING A; +> ok + +TABLE TEST; +> A B C +> -- -- -- +> 10 11 10 +> 20 21 20 +> 30 31 30 +> rows: 3 + +ALTER TABLE TEST ALTER COLUMN C SET DATA TYPE VARCHAR(3) USING C || '*'; +> ok + +TABLE TEST; +> A B C +> -- -- --- +> 10 11 10* +> 20 21 20* +> 30 31 30* +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(B BINARY) AS VALUES X'00'; +> ok + +ALTER TABLE TEST ALTER COLUMN B SET DATA TYPE BINARY(2); +> ok + +TABLE TEST; +>> X'0000' + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(D INT DEFAULT 8, G INT GENERATED ALWAYS AS (D + 1), S INT GENERATED ALWAYS AS IDENTITY); +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, IS_IDENTITY, IS_GENERATED, GENERATION_EXPRESSION + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT IS_IDENTITY IS_GENERATED GENERATION_EXPRESSION +> ----------- -------------- ----------- ------------ --------------------- +> D 8 NO NEVER null +> G null NO ALWAYS "D" + 1 +> S null YES NEVER null +> rows: 3 + +ALTER TABLE TEST ALTER COLUMN D SET ON UPDATE 1; +> ok + +ALTER TABLE TEST ALTER COLUMN G SET ON UPDATE 1; +> ok + +ALTER TABLE TEST ALTER COLUMN S SET ON UPDATE 1; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, IS_IDENTITY, IS_GENERATED, GENERATION_EXPRESSION, COLUMN_ON_UPDATE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT IS_IDENTITY IS_GENERATED GENERATION_EXPRESSION COLUMN_ON_UPDATE +> ----------- -------------- ----------- ------------ --------------------- ---------------- +> D 8 NO NEVER null 1 +> G null NO ALWAYS "D" + 1 null +> S null YES NEVER null null +> rows: 3 + +ALTER TABLE TEST ALTER COLUMN D DROP ON UPDATE; +> ok + +ALTER TABLE TEST ALTER COLUMN G DROP ON UPDATE; +> ok + +ALTER TABLE TEST ALTER COLUMN S DROP ON UPDATE; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, IS_IDENTITY, IS_GENERATED, GENERATION_EXPRESSION, COLUMN_ON_UPDATE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT IS_IDENTITY IS_GENERATED GENERATION_EXPRESSION COLUMN_ON_UPDATE +> ----------- -------------- ----------- ------------ --------------------- ---------------- +> D 8 NO NEVER null null +> G null NO ALWAYS "D" + 1 null +> S null YES NEVER null null +> rows: 3 + +ALTER TABLE TEST ALTER COLUMN G DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN S DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN D DROP EXPRESSION; +> ok + +ALTER TABLE TEST ALTER COLUMN S DROP EXPRESSION; +> ok + +ALTER TABLE TEST ALTER COLUMN D DROP IDENTITY; +> ok + +ALTER TABLE TEST ALTER COLUMN G DROP IDENTITY; +> ok + +ALTER TABLE TEST ALTER COLUMN G SET DEFAULT ("D" + 2); +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, IS_IDENTITY, IS_GENERATED, GENERATION_EXPRESSION + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT IS_IDENTITY IS_GENERATED GENERATION_EXPRESSION +> ----------- -------------- ----------- ------------ --------------------- +> D 8 NO NEVER null +> G null NO ALWAYS "D" + 1 +> S null YES NEVER null +> rows: 3 + +ALTER TABLE TEST ALTER COLUMN D DROP DEFAULT; +> ok + +ALTER TABLE TEST ALTER COLUMN G DROP EXPRESSION; +> ok + +ALTER TABLE TEST ALTER COLUMN S DROP IDENTITY; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, IS_IDENTITY, IS_GENERATED, GENERATION_EXPRESSION + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT IS_IDENTITY IS_GENERATED GENERATION_EXPRESSION +> ----------- -------------- ----------- ------------ --------------------- +> D null NO NEVER null +> G null NO NEVER null +> S null NO NEVER null +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY(START WITH 10 MINVALUE 3 INCREMENT BY 2 CYCLE CACHE 16), V INT); +> ok + +INSERT INTO TEST(V) VALUES 1, 2; +> update count: 2 + +DELETE FROM TEST WHERE V = 2; +> update count: 1 + +SELECT COLUMN_NAME, DATA_TYPE, IS_IDENTITY, IDENTITY_START, IDENTITY_INCREMENT, IDENTITY_MAXIMUM, IDENTITY_MINIMUM, + IDENTITY_CYCLE, IDENTITY_BASE, IDENTITY_CACHE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME DATA_TYPE IS_IDENTITY IDENTITY_START IDENTITY_INCREMENT IDENTITY_MAXIMUM IDENTITY_MINIMUM IDENTITY_CYCLE IDENTITY_BASE IDENTITY_CACHE +> ----------- --------- ----------- -------------- ------------------ ------------------- ---------------- -------------- ------------- -------------- +> ID BIGINT YES 10 2 9223372036854775807 3 YES 14 16 +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN ID SET DATA TYPE INTEGER; +> ok + +SELECT COLUMN_NAME, DATA_TYPE, IS_IDENTITY, IDENTITY_START, IDENTITY_INCREMENT, IDENTITY_MAXIMUM, IDENTITY_MINIMUM, + IDENTITY_CYCLE, IDENTITY_BASE, IDENTITY_CACHE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME DATA_TYPE IS_IDENTITY IDENTITY_START IDENTITY_INCREMENT IDENTITY_MAXIMUM IDENTITY_MINIMUM IDENTITY_CYCLE IDENTITY_BASE IDENTITY_CACHE +> ----------- --------- ----------- -------------- ------------------ ---------------- ---------------- -------------- ------------- -------------- +> ID INTEGER YES 10 2 2147483647 3 YES 14 16 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY, V INT); +> ok + +SELECT COLUMN_NAME, IS_IDENTITY, IDENTITY_GENERATION + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME IS_IDENTITY IDENTITY_GENERATION +> ----------- ----------- ------------------- +> ID YES ALWAYS +> rows: 1 + +INSERT INTO TEST(V) VALUES 10; +> update count: 1 + +INSERT INTO TEST(ID, V) VALUES (2, 20); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +UPDATE TEST SET ID = ID + 1; +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +MERGE INTO TEST(ID, V) KEY(V) VALUES (2, 10); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +MERGE INTO TEST USING (VALUES (2, 20)) S(ID, V) ON TEST.ID = S.ID + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.V); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +@reconnect + +SELECT COLUMN_NAME, IS_IDENTITY, IDENTITY_GENERATION + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME IS_IDENTITY IDENTITY_GENERATION +> ----------- ----------- ------------------- +> ID YES ALWAYS +> rows: 1 + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ----------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" BIGINT GENERATED ALWAYS AS IDENTITY(START WITH 1 RESTART WITH 2) NOT NULL, "V" INTEGER ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT, V INT); +> ok + +ALTER TABLE TEST ALTER COLUMN ID SET GENERATED ALWAYS; +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +SELECT COLUMN_NAME, IS_IDENTITY, IDENTITY_GENERATION, IDENTITY_BASE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME IS_IDENTITY IDENTITY_GENERATION IDENTITY_BASE +> ----------- ----------- ------------------- ------------- +> ID YES ALWAYS 2 +> rows: 1 + +ALTER TABLE TEST ALTER COLUMN ID SET GENERATED BY DEFAULT; +> ok + +INSERT INTO TEST(V) VALUES 2; +> update count: 1 + +SELECT COLUMN_NAME, IS_IDENTITY, IDENTITY_GENERATION, IDENTITY_BASE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +> COLUMN_NAME IS_IDENTITY IDENTITY_GENERATION IDENTITY_BASE +> ----------- ----------- ------------------- ------------- +> ID YES BY DEFAULT 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT DEFAULT 1, B INT DEFAULT 2 DEFAULT ON NULL); +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, DEFAULT_ON_NULL FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT DEFAULT_ON_NULL +> ----------- -------------- --------------- +> A 1 FALSE +> B 2 TRUE +> rows: 2 + +ALTER TABLE TEST ALTER COLUMN A SET DEFAULT ON NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN B DROP DEFAULT ON NULL; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, DEFAULT_ON_NULL FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT DEFAULT_ON_NULL +> ----------- -------------- --------------- +> A 1 TRUE +> B 2 FALSE +> rows: 2 + +ALTER TABLE TEST ALTER COLUMN A SET DEFAULT ON NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN B DROP DEFAULT ON NULL; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, DEFAULT_ON_NULL FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME COLUMN_DEFAULT DEFAULT_ON_NULL +> ----------- -------------- --------------- +> A 1 TRUE +> B 2 FALSE +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableDropColumn.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableDropColumn.sql new file mode 100644 index 0000000000..a7825a5e18 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableDropColumn.sql @@ -0,0 +1,98 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A VARCHAR, B VARCHAR, C VARCHAR AS LOWER(A)); +> ok + +ALTER TABLE TEST DROP COLUMN B; +> ok + +DROP TABLE TEST; +> ok + +ALTER TABLE IF EXISTS TEST DROP COLUMN A; +> ok + +ALTER TABLE TEST DROP COLUMN A; +> exception TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 + +CREATE TABLE TEST(A INT, B INT, C INT, D INT, E INT, F INT, G INT, H INT, I INT, J INT); +> ok + +ALTER TABLE TEST DROP COLUMN IF EXISTS J; +> ok + +ALTER TABLE TEST DROP COLUMN J; +> exception COLUMN_NOT_FOUND_1 + +ALTER TABLE TEST DROP COLUMN B; +> ok + +ALTER TABLE TEST DROP COLUMN IF EXISTS C; +> ok + +SELECT * FROM TEST; +> A D E F G H I +> - - - - - - - +> rows: 0 + +ALTER TABLE TEST DROP COLUMN B, D; +> exception COLUMN_NOT_FOUND_1 + +ALTER TABLE TEST DROP COLUMN IF EXISTS B, D; +> ok + +SELECT * FROM TEST; +> A E F G H I +> - - - - - - +> rows: 0 + +ALTER TABLE TEST DROP COLUMN E, F; +> ok + +SELECT * FROM TEST; +> A G H I +> - - - - +> rows: 0 + +ALTER TABLE TEST DROP COLUMN (B, H); +> exception COLUMN_NOT_FOUND_1 + +ALTER TABLE TEST DROP COLUMN IF EXISTS (B, H); +> ok + +SELECT * FROM TEST; +> A G I +> - - - +> rows: 0 + +ALTER TABLE TEST DROP COLUMN (G, I); +> ok + +SELECT * FROM TEST; +> A +> - +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE T1(ID INT PRIMARY KEY, C INT); +> ok + +CREATE VIEW V1 AS SELECT C FROM T1; +> ok + +ALTER TABLE T1 DROP COLUMN C; +> exception COLUMN_IS_REFERENCED_1 + +DROP VIEW V1; +> ok + +ALTER TABLE T1 DROP COLUMN C; +> ok + +DROP TABLE T1; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableDropConstraint.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableDropConstraint.sql new file mode 100644 index 0000000000..2be6935581 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableDropConstraint.sql @@ -0,0 +1,19 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE A(A INT PRIMARY KEY); +> ok + +CREATE TABLE B(B INT PRIMARY KEY, A INT CONSTRAINT C REFERENCES A(A)); +> ok + +ALTER TABLE A DROP CONSTRAINT C; +> exception CONSTRAINT_NOT_FOUND_1 + +ALTER TABLE B DROP CONSTRAINT C; +> ok + +DROP TABLE B, A; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableRename.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableRename.sql new file mode 100644 index 0000000000..53683cb754 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableRename.sql @@ -0,0 +1,61 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Test for ALTER TABLE RENAME and ALTER VIEW RENAME + +CREATE TABLE TABLE1A(ID INT); +> ok + +INSERT INTO TABLE1A VALUES (1); +> update count: 1 + +-- ALTER TABLE RENAME + +ALTER TABLE TABLE1A RENAME TO TABLE1B; +> ok + +SELECT * FROM TABLE1B; +>> 1 + +ALTER TABLE IF EXISTS TABLE1B RENAME TO TABLE1C; +> ok + +SELECT * FROM TABLE1C; +>> 1 + +ALTER TABLE BAD RENAME TO SMTH; +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +ALTER TABLE IF EXISTS BAD RENAME TO SMTH; +> ok + +-- ALTER VIEW RENAME + +CREATE VIEW VIEW1A AS SELECT * FROM TABLE1C; +> ok + +ALTER VIEW VIEW1A RENAME TO VIEW1B; +> ok + +SELECT * FROM VIEW1B; +>> 1 + +ALTER TABLE IF EXISTS VIEW1B RENAME TO VIEW1C; +> ok + +SELECT * FROM VIEW1C; +>> 1 + +ALTER VIEW BAD RENAME TO SMTH; +> exception VIEW_NOT_FOUND_1 + +ALTER VIEW IF EXISTS BAD RENAME TO SMTH; +> ok + +SELECT * FROM VIEW1C; +>> 1 + +DROP TABLE TABLE1C CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/alterTableRenameConstraint.sql b/h2/src/test/org/h2/test/scripts/ddl/alterTableRenameConstraint.sql new file mode 100644 index 0000000000..6c1dbdc4a1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/alterTableRenameConstraint.sql @@ -0,0 +1,19 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE A(A INT PRIMARY KEY); +> ok + +CREATE TABLE B(B INT PRIMARY KEY, A INT CONSTRAINT C REFERENCES A(A)); +> ok + +ALTER TABLE A RENAME CONSTRAINT C TO C1; +> exception CONSTRAINT_NOT_FOUND_1 + +ALTER TABLE B RENAME CONSTRAINT C TO C1; +> ok + +DROP TABLE B, A; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/analyze.sql b/h2/src/test/org/h2/test/scripts/ddl/analyze.sql new file mode 100644 index 0000000000..706fe121f9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/analyze.sql @@ -0,0 +1,67 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(X INT, B BLOB(1)); +> ok + +INSERT INTO TEST(X) VALUES 1, 2, 3, 3, NULL, NULL; +> update count: 6 + +ANALYZE TABLE TEST; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 66 + +INSERT INTO TEST(X) VALUES 6, 7, 8, 9; +> update count: 4 + +ANALYZE TABLE TEST; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 80 + +TRUNCATE TABLE TEST; +> update count: 10 + +INSERT INTO TEST(X) VALUES 1, 2, 3; +> update count: 3 + +ANALYZE TABLE TEST; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 100 + +TRUNCATE TABLE TEST; +> update count: 3 + +INSERT INTO TEST(X) VALUES 1, 1, 1, 1; +> update count: 4 + +ANALYZE TABLE TEST; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 25 + +ANALYZE TABLE TEST SAMPLE_SIZE 3; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 33 + +TRUNCATE TABLE TEST; +> update count: 4 + +ANALYZE TABLE TEST; +> ok + +SELECT SELECTIVITY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'X'; +>> 50 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/commentOn.sql b/h2/src/test/org/h2/test/scripts/ddl/commentOn.sql new file mode 100644 index 0000000000..ea9d89b0a8 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/commentOn.sql @@ -0,0 +1,66 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT COMMENT NULL, B INT COMMENT '', C INT COMMENT 'comment 1', D INT COMMENT 'comment 2'); +> ok + +SELECT COLUMN_NAME, REMARKS FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME REMARKS +> ----------- --------- +> A null +> B null +> C comment 1 +> D comment 2 +> rows: 4 + +COMMENT ON COLUMN TEST.A IS 'comment 3'; +> ok + +COMMENT ON COLUMN TEST.B IS 'comment 4'; +> ok + +COMMENT ON COLUMN TEST.C IS NULL; +> ok + +COMMENT ON COLUMN TEST.D IS ''; +> ok + +SELECT COLUMN_NAME, REMARKS FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME REMARKS +> ----------- --------- +> A comment 3 +> B comment 4 +> C null +> D null +> rows: 4 + +DROP TABLE TEST; +> ok + +CREATE USER U1 COMMENT NULL PASSWORD '1'; +> ok + +CREATE USER U2 COMMENT '' PASSWORD '1'; +> ok + +CREATE USER U3 COMMENT 'comment' PASSWORD '1'; +> ok + +SELECT USER_NAME, REMARKS FROM INFORMATION_SCHEMA.USERS WHERE USER_NAME LIKE 'U_'; +> USER_NAME REMARKS +> --------- ------- +> U1 null +> U2 null +> U3 comment +> rows: 3 + +DROP USER U1; +> ok + +DROP USER U2; +> ok + +DROP USER U3; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createAlias.sql b/h2/src/test/org/h2/test/scripts/ddl/createAlias.sql new file mode 100644 index 0000000000..3a4234e1f3 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createAlias.sql @@ -0,0 +1,142 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create alias "MIN" for 'java.lang.Integer.parseInt(java.lang.String)'; +> exception FUNCTION_ALIAS_ALREADY_EXISTS_1 + +create alias "CAST" for 'java.lang.Integer.parseInt(java.lang.String)'; +> exception FUNCTION_ALIAS_ALREADY_EXISTS_1 + +@reconnect off + +--- function alias --------------------------------------------------------------------------------------------- +CREATE ALIAS MY_SQRT FOR 'java.lang.Math.sqrt'; +> ok + +SELECT MY_SQRT(2.0) MS, SQRT(2.0); +> MS 1.4142135623730951 +> ------------------ ------------------ +> 1.4142135623730951 1.4142135623730951 +> rows: 1 + +SELECT MY_SQRT(SUM(X)), SUM(X), MY_SQRT(55) FROM SYSTEM_RANGE(1, 10); +> PUBLIC.MY_SQRT(SUM(X)) SUM(X) PUBLIC.MY_SQRT(55) +> ---------------------- ------ ------------------ +> 7.416198487095663 55 7.416198487095663 +> rows: 1 + +SELECT MY_SQRT(-1.0) MS, SQRT(NULL) S; +> MS S +> --- ---- +> NaN null +> rows: 1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ---------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE FORCE ALIAS "PUBLIC"."MY_SQRT" FOR 'java.lang.Math.sqrt'; +> rows (ordered): 2 + +SELECT SPECIFIC_NAME, ROUTINE_NAME, ROUTINE_TYPE, DATA_TYPE, ROUTINE_BODY, EXTERNAL_NAME, EXTERNAL_LANGUAGE, + IS_DETERMINISTIC, REMARKS FROM INFORMATION_SCHEMA.ROUTINES; +> SPECIFIC_NAME ROUTINE_NAME ROUTINE_TYPE DATA_TYPE ROUTINE_BODY EXTERNAL_NAME EXTERNAL_LANGUAGE IS_DETERMINISTIC REMARKS +> ------------- ------------ ------------ ---------------- ------------ ------------------- ----------------- ---------------- ------- +> MY_SQRT_1 MY_SQRT FUNCTION DOUBLE PRECISION EXTERNAL java.lang.Math.sqrt JAVA NO null +> rows: 1 + +SELECT SPECIFIC_NAME, ORDINAL_POSITION, PARAMETER_MODE, IS_RESULT, AS_LOCATOR, PARAMETER_NAME, DATA_TYPE, + PARAMETER_DEFAULT FROM INFORMATION_SCHEMA.PARAMETERS; +> SPECIFIC_NAME ORDINAL_POSITION PARAMETER_MODE IS_RESULT AS_LOCATOR PARAMETER_NAME DATA_TYPE PARAMETER_DEFAULT +> ------------- ---------------- -------------- --------- ---------- -------------- ---------------- ----------------- +> MY_SQRT_1 1 IN NO NO P1 DOUBLE PRECISION null +> rows: 1 + +DROP ALIAS MY_SQRT; +> ok + +CREATE SCHEMA TEST_SCHEMA; +> ok + +CREATE ALIAS TRUNC FOR 'java.lang.Math.floor(double)'; +> exception FUNCTION_ALIAS_ALREADY_EXISTS_1 + +CREATE ALIAS PUBLIC.TRUNC FOR 'java.lang.Math.floor(double)'; +> exception FUNCTION_ALIAS_ALREADY_EXISTS_1 + +CREATE ALIAS TEST_SCHEMA.TRUNC FOR 'java.lang.Math.round(double)'; +> exception FUNCTION_ALIAS_ALREADY_EXISTS_1 + +SET BUILTIN_ALIAS_OVERRIDE=1; +> ok + +CREATE ALIAS TRUNC FOR 'java.lang.Math.floor(double)'; +> ok + +SELECT TRUNC(1.5); +>> 1.0 + +SELECT TRUNC(-1.5); +>> -2.0 + +DROP ALIAS TRUNC; +> ok + +-- Compatibility syntax with identifier +CREATE ALIAS TRUNC FOR "java.lang.Math.floor(double)"; +> ok + +SELECT TRUNC(-1.5); +>> -2.0 + +DROP ALIAS TRUNC; +> ok + +CREATE ALIAS PUBLIC.TRUNC FOR 'java.lang.Math.floor(double)'; +> ok + +CREATE ALIAS TEST_SCHEMA.TRUNC FOR 'java.lang.Math.round(double)'; +> ok + +SELECT PUBLIC.TRUNC(1.5); +>> 1.0 + +SELECT PUBLIC.TRUNC(-1.5); +>> -2.0 + +SELECT TEST_SCHEMA.TRUNC(1.5); +>> 2 + +SELECT TEST_SCHEMA.TRUNC(-1.5); +>> -1 + +DROP ALIAS PUBLIC.TRUNC; +> ok + +DROP ALIAS TEST_SCHEMA.TRUNC; +> ok + +SET BUILTIN_ALIAS_OVERRIDE=0; +> ok + +DROP SCHEMA TEST_SCHEMA RESTRICT; +> ok + +-- test for issue #1531 +CREATE TABLE TEST (ID BIGINT, VAL VARCHAR2(10)) AS SELECT x,'val'||x FROM SYSTEM_RANGE(1,2); +> ok + +CREATE ALIAS FTBL AS $$ ResultSet t(Connection c) throws SQLException {return c.prepareStatement("SELECT ID, VAL FROM TEST").executeQuery();} $$; +> ok + +CREATE OR REPLACE VIEW V_TEST (ID, VAL) AS (SELECT * FROM FTBL()); +> ok + +SELECT * FROM V_TEST; +> ID VAL +> -- ---- +> 1 val1 +> 2 val2 +> rows: 2 diff --git a/h2/src/test/org/h2/test/scripts/ddl/createConstant.sql b/h2/src/test/org/h2/test/scripts/ddl/createConstant.sql new file mode 100644 index 0000000000..a2b941ae7a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createConstant.sql @@ -0,0 +1,82 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE SCHEMA CONST; +> ok + +CREATE CONSTANT IF NOT EXISTS ONE VALUE 1; +> ok + +COMMENT ON CONSTANT ONE IS 'Eins'; +> ok + +CREATE CONSTANT IF NOT EXISTS ONE VALUE 1; +> ok + +CREATE CONSTANT CONST.ONE VALUE 1; +> ok + +SELECT CONSTANT_SCHEMA, CONSTANT_NAME, VALUE_DEFINITION, DATA_TYPE, NUMERIC_PRECISION, REMARKS FROM INFORMATION_SCHEMA.CONSTANTS; +> CONSTANT_SCHEMA CONSTANT_NAME VALUE_DEFINITION DATA_TYPE NUMERIC_PRECISION REMARKS +> --------------- ------------- ---------------- --------- ----------------- ------- +> CONST ONE 1 INTEGER 32 null +> PUBLIC ONE 1 INTEGER 32 Eins +> rows: 2 + +SELECT ONE, CONST.ONE; +> 1 1 +> - - +> 1 1 +> rows: 1 + +COMMENT ON CONSTANT ONE IS NULL; +> ok + +DROP SCHEMA CONST CASCADE; +> ok + +SELECT CONSTANT_SCHEMA, CONSTANT_NAME, VALUE_DEFINITION, DATA_TYPE, REMARKS FROM INFORMATION_SCHEMA.CONSTANTS; +> CONSTANT_SCHEMA CONSTANT_NAME VALUE_DEFINITION DATA_TYPE REMARKS +> --------------- ------------- ---------------- --------- ------- +> PUBLIC ONE 1 INTEGER null +> rows: 1 + +DROP CONSTANT ONE; +> ok + +DROP CONSTANT IF EXISTS ONE; +> ok + +create constant abc value 1; +> ok + +call abc; +> 1 +> - +> 1 +> rows: 1 + +drop all objects; +> ok + +call abc; +> exception COLUMN_NOT_FOUND_1 + +create constant abc value 1; +> ok + +comment on constant abc is 'One'; +> ok + +select remarks from information_schema.constants where constant_name = 'ABC'; +>> One + +@reconnect + +select remarks from information_schema.constants where constant_name = 'ABC'; +>> One + +drop constant abc; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createDomain.sql b/h2/src/test/org/h2/test/scripts/ddl/createDomain.sql new file mode 100644 index 0000000000..e0936e3b21 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createDomain.sql @@ -0,0 +1,259 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE SCHEMA S1; +> ok + +CREATE SCHEMA S2; +> ok + +CREATE DOMAIN S1.D1 AS INT DEFAULT 1; +> ok + +CREATE DOMAIN S2.D2 AS TIMESTAMP WITH TIME ZONE ON UPDATE CURRENT_TIMESTAMP; +> ok + +CREATE TABLE TEST(C1 S1.D1, C2 S2.D2); +> ok + +SELECT COLUMN_NAME, DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, COLUMN_DEFAULT, COLUMN_ON_UPDATE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME COLUMN_DEFAULT COLUMN_ON_UPDATE +> ----------- -------------- ------------- ----------- -------------- ---------------- +> C1 SCRIPT S1 D1 null null +> C2 SCRIPT S2 D2 null null +> rows (ordered): 2 + +SELECT DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, DOMAIN_DEFAULT, DOMAIN_ON_UPDATE, DATA_TYPE FROM INFORMATION_SCHEMA.DOMAINS; +> DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME DOMAIN_DEFAULT DOMAIN_ON_UPDATE DATA_TYPE +> -------------- ------------- ----------- -------------- ----------------- ------------------------ +> SCRIPT S1 D1 1 null INTEGER +> SCRIPT S2 D2 null CURRENT_TIMESTAMP TIMESTAMP WITH TIME ZONE +> rows: 2 + +DROP TABLE TEST; +> ok + +DROP DOMAIN S1.D1; +> ok + +DROP SCHEMA S1 RESTRICT; +> ok + +DROP SCHEMA S2 RESTRICT; +> exception CANNOT_DROP_2 + +DROP SCHEMA S2 CASCADE; +> ok + +CREATE DOMAIN D INT; +> ok + +CREATE MEMORY TABLE TEST(C D); +> ok + +ALTER DOMAIN D ADD CHECK (VALUE <> 0); +> ok + +ALTER DOMAIN D ADD CONSTRAINT D1 CHECK (VALUE > 0); +> ok + +ALTER DOMAIN D ADD CONSTRAINT D1 CHECK (VALUE > 0); +> exception CONSTRAINT_ALREADY_EXISTS_1 + +ALTER DOMAIN D ADD CONSTRAINT IF NOT EXISTS D1 CHECK (VALUE > 0); +> ok + +ALTER DOMAIN X ADD CHECK (VALUE > 0); +> exception DOMAIN_NOT_FOUND_1 + +ALTER DOMAIN IF EXISTS X ADD CHECK (VALUE > 0); +> ok + +INSERT INTO TEST VALUES -1; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +ALTER DOMAIN D DROP CONSTRAINT D1; +> ok + +ALTER DOMAIN D DROP CONSTRAINT D1; +> exception CONSTRAINT_NOT_FOUND_1 + +ALTER DOMAIN D DROP CONSTRAINT IF EXISTS D1; +> ok + +ALTER DOMAIN IF EXISTS X DROP CONSTRAINT D1; +> ok + +ALTER DOMAIN X DROP CONSTRAINT IF EXISTS D1; +> exception DOMAIN_NOT_FOUND_1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE DOMAIN "PUBLIC"."D" AS INTEGER; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "C" "PUBLIC"."D" ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> ALTER DOMAIN "PUBLIC"."D" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4" CHECK(VALUE <> 0) NOCHECK; +> rows (ordered): 5 + +SELECT CONSTRAINT_NAME, DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAIN_CONSTRAINTS; +> CONSTRAINT_NAME DOMAIN_NAME +> --------------- ----------- +> CONSTRAINT_4 D +> rows: 1 + +TABLE INFORMATION_SCHEMA.CHECK_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME CHECK_CLAUSE +> ------------------ ----------------- --------------- ------------ +> SCRIPT PUBLIC CONSTRAINT_4 VALUE <> 0 +> rows: 1 + +SELECT COUNT(*) FROM INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE; +>> 0 + +INSERT INTO TEST VALUES -1; +> update count: 1 + +INSERT INTO TEST VALUES 0; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +DROP DOMAIN D RESTRICT; +> exception CANNOT_DROP_2 + +DROP DOMAIN D CASCADE; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "C" INTEGER ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (-1); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" CHECK("C" <> 0) NOCHECK; +> rows (ordered): 5 + +SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE, TABLE_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS; +> CONSTRAINT_NAME CONSTRAINT_TYPE TABLE_NAME +> --------------- --------------- ---------- +> CONSTRAINT_2 CHECK TEST +> rows: 1 + +TABLE INFORMATION_SCHEMA.CHECK_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME CHECK_CLAUSE +> ------------------ ----------------- --------------- ------------ +> SCRIPT PUBLIC CONSTRAINT_2 "C" <> 0 +> rows: 1 + +TABLE INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE; +> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME +> ------------- ------------ ---------- ----------- ------------------ ----------------- --------------- +> SCRIPT PUBLIC TEST C SCRIPT PUBLIC CONSTRAINT_2 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE DOMAIN D1 AS INT DEFAULT 1 CHECK (VALUE >= 1); +> ok + +CREATE DOMAIN D2 AS D1 DEFAULT 2; +> ok + +CREATE DOMAIN D3 AS D1 CHECK (VALUE >= 3); +> ok + +CREATE DOMAIN D4 AS D1 DEFAULT 4 CHECK (VALUE >= 4); +> ok + +SELECT DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, DOMAIN_DEFAULT, DOMAIN_ON_UPDATE, DATA_TYPE, NUMERIC_PRECISION, + PARENT_DOMAIN_CATALOG, PARENT_DOMAIN_SCHEMA, PARENT_DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME DOMAIN_DEFAULT DOMAIN_ON_UPDATE DATA_TYPE NUMERIC_PRECISION PARENT_DOMAIN_CATALOG PARENT_DOMAIN_SCHEMA PARENT_DOMAIN_NAME +> -------------- ------------- ----------- -------------- ---------------- --------- ----------------- --------------------- -------------------- ------------------ +> SCRIPT PUBLIC D1 1 null INTEGER 32 null null null +> SCRIPT PUBLIC D2 2 null INTEGER 32 SCRIPT PUBLIC D1 +> SCRIPT PUBLIC D3 null null INTEGER 32 SCRIPT PUBLIC D1 +> SCRIPT PUBLIC D4 4 null INTEGER 32 SCRIPT PUBLIC D1 +> rows: 4 + +SELECT DOMAIN_NAME, CHECK_CLAUSE FROM INFORMATION_SCHEMA.DOMAIN_CONSTRAINTS D JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS C + ON D.CONSTRAINT_CATALOG = C.CONSTRAINT_CATALOG AND D.CONSTRAINT_SCHEMA = C.CONSTRAINT_SCHEMA AND D.CONSTRAINT_NAME = C.CONSTRAINT_NAME + WHERE C.CONSTRAINT_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME CHECK_CLAUSE +> ----------- ------------ +> D1 VALUE >= 1 +> D3 VALUE >= 3 +> D4 VALUE >= 4 +> rows: 3 + +VALUES CAST(0 AS D2); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +DROP DOMAIN D1; +> exception CANNOT_DROP_2 + +DROP DOMAIN D1 CASCADE; +> ok + +SELECT DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, DOMAIN_DEFAULT, DOMAIN_ON_UPDATE, DATA_TYPE, NUMERIC_PRECISION, + PARENT_DOMAIN_CATALOG, PARENT_DOMAIN_SCHEMA, PARENT_DOMAIN_NAME FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +> DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME DOMAIN_DEFAULT DOMAIN_ON_UPDATE DATA_TYPE NUMERIC_PRECISION PARENT_DOMAIN_CATALOG PARENT_DOMAIN_SCHEMA PARENT_DOMAIN_NAME +> -------------- ------------- ----------- -------------- ---------------- --------- ----------------- --------------------- -------------------- ------------------ +> SCRIPT PUBLIC D2 2 null INTEGER 32 null null null +> SCRIPT PUBLIC D3 1 null INTEGER 32 null null null +> SCRIPT PUBLIC D4 4 null INTEGER 32 null null null +> rows: 3 + +SELECT DOMAIN_NAME, CHECK_CLAUSE FROM INFORMATION_SCHEMA.DOMAIN_CONSTRAINTS D JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS C + ON D.CONSTRAINT_CATALOG = C.CONSTRAINT_CATALOG AND D.CONSTRAINT_SCHEMA = C.CONSTRAINT_SCHEMA AND D.CONSTRAINT_NAME = C.CONSTRAINT_NAME + WHERE C.CONSTRAINT_SCHEMA = 'PUBLIC'; +> DOMAIN_NAME CHECK_CLAUSE +> ----------- ------------ +> D2 VALUE >= 1 +> D3 VALUE >= 1 +> D3 VALUE >= 3 +> D4 VALUE >= 1 +> D4 VALUE >= 4 +> rows: 5 + +DROP DOMAIN D2; +> ok + +DROP DOMAIN D3; +> ok + +DROP DOMAIN D4; +> ok + +CREATE DOMAIN D1 INT; +> ok + +CREATE DOMAIN D2 INT; +> ok + +DROP DOMAIN D1; +> ok + +CREATE DOMAIN D3 D2; +> ok + +@reconnect + +DROP DOMAIN D3; +> ok + +DROP DOMAIN D2; +> ok + +CREATE DOMAIN D AS CHARACTER VARYING CHECK (VALUE LIKE '%1%'); +> ok + +ALTER DOMAIN D ADD CHECK (VALUE ILIKE '%2%'); +> ok + +DROP DOMAIN D; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createIndex.sql b/h2/src/test/org/h2/test/scripts/ddl/createIndex.sql new file mode 100644 index 0000000000..4f99d98afe --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createIndex.sql @@ -0,0 +1,34 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(G GEOMETRY); +> ok + +CREATE UNIQUE SPATIAL INDEX IDX ON TEST(G); +> exception SYNTAX_ERROR_2 + +CREATE HASH SPATIAL INDEX IDX ON TEST(G); +> exception SYNTAX_ERROR_2 + +CREATE UNIQUE HASH SPATIAL INDEX IDX ON TEST(G); +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +CREATE INDEX TEST_IDX ON TEST(C) INCLUDE(B); +> exception SYNTAX_ERROR_1 + +CREATE UNIQUE INDEX TEST_IDX ON TEST(C) INCLUDE(B); +> ok + +DROP INDEX TEST_IDX; +> ok + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createSchema.sql b/h2/src/test/org/h2/test/scripts/ddl/createSchema.sql new file mode 100644 index 0000000000..e48583182e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createSchema.sql @@ -0,0 +1,64 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE USER TEST_USER PASSWORD 'test'; +> ok + +CREATE ROLE TEST_ROLE; +> ok + +CREATE SCHEMA S1; +> ok + +CREATE SCHEMA S2 AUTHORIZATION TEST_USER; +> ok + +CREATE SCHEMA S3 AUTHORIZATION TEST_ROLE; +> ok + +CREATE SCHEMA AUTHORIZATION TEST_USER; +> ok + +CREATE SCHEMA AUTHORIZATION TEST_ROLE; +> ok + +TABLE INFORMATION_SCHEMA.SCHEMATA; +> CATALOG_NAME SCHEMA_NAME SCHEMA_OWNER DEFAULT_CHARACTER_SET_CATALOG DEFAULT_CHARACTER_SET_SCHEMA DEFAULT_CHARACTER_SET_NAME SQL_PATH DEFAULT_COLLATION_NAME REMARKS +> ------------ ------------------ ------------ ----------------------------- ---------------------------- -------------------------- -------- ---------------------- ------- +> SCRIPT INFORMATION_SCHEMA SA SCRIPT PUBLIC Unicode null OFF null +> SCRIPT PUBLIC SA SCRIPT PUBLIC Unicode null OFF null +> SCRIPT S1 SA SCRIPT PUBLIC Unicode null OFF null +> SCRIPT S2 TEST_USER SCRIPT PUBLIC Unicode null OFF null +> SCRIPT S3 TEST_ROLE SCRIPT PUBLIC Unicode null OFF null +> SCRIPT TEST_ROLE TEST_ROLE SCRIPT PUBLIC Unicode null OFF null +> SCRIPT TEST_USER TEST_USER SCRIPT PUBLIC Unicode null OFF null +> rows: 7 + +DROP SCHEMA S1; +> ok + +DROP SCHEMA S2; +> ok + +DROP SCHEMA S3; +> ok + +DROP USER TEST_USER; +> exception CANNOT_DROP_2 + +DROP ROLE TEST_ROLE; +> exception CANNOT_DROP_2 + +DROP SCHEMA TEST_USER; +> ok + +DROP SCHEMA TEST_ROLE; +> ok + +DROP USER TEST_USER; +> ok + +DROP ROLE TEST_ROLE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createSequence.sql b/h2/src/test/org/h2/test/scripts/ddl/createSequence.sql new file mode 100644 index 0000000000..e6f3cb8d29 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createSequence.sql @@ -0,0 +1,196 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY 1 MINVALUE 0 MAXVALUE 1; +> ok + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY 1 MINVALUE 0 MAXVALUE 0; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 1 INCREMENT BY 1 MINVALUE 1 MAXVALUE 0; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY 0 MINVALUE 0 MAXVALUE 1; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 1 INCREMENT BY 1 MINVALUE 2 MAXVALUE 10; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 20 INCREMENT BY 1 MINVALUE 1 MAXVALUE 10; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY 9223372036854775807 MINVALUE -9223372036854775808 MAXVALUE 9223372036854775807; +> ok + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY 9223372036854775807 MINVALUE -9223372036854775808 MAXVALUE 9223372036854775807 CACHE 2; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE -9223372036854775808 MAXVALUE 9223372036854775807; +> ok + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE -9223372036854775808 MAXVALUE 9223372036854775807 CACHE 2; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE -1 MAXVALUE 9223372036854775807 NO CACHE; +> ok + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE 0 MAXVALUE 9223372036854775807 NO CACHE; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 0 INCREMENT BY -9223372036854775808 MINVALUE -1 MAXVALUE 9223372036854775807 CACHE 2; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ CACHE -1; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ MINVALUE 10 START WITH 9 RESTART WITH 10; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ MAXVALUE 10 START WITH 11 RESTART WITH 1; +> exception SEQUENCE_ATTRIBUTES_INVALID_7 + +CREATE SEQUENCE SEQ START WITH 0 MINVALUE -10 MAXVALUE 10; +> ok + +SELECT SEQUENCE_NAME, START_VALUE, MINIMUM_VALUE, MAXIMUM_VALUE, INCREMENT, CYCLE_OPTION, BASE_VALUE, CACHE + FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME START_VALUE MINIMUM_VALUE MAXIMUM_VALUE INCREMENT CYCLE_OPTION BASE_VALUE CACHE +> ------------- ----------- ------------- ------------- --------- ------------ ---------- ----- +> SEQ 0 -10 10 1 NO 0 21 +> rows: 1 + +ALTER SEQUENCE SEQ NO MINVALUE NO MAXVALUE; +> ok + +SELECT SEQUENCE_NAME, START_VALUE, MINIMUM_VALUE, MAXIMUM_VALUE, INCREMENT, CYCLE_OPTION, BASE_VALUE, CACHE + FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME START_VALUE MINIMUM_VALUE MAXIMUM_VALUE INCREMENT CYCLE_OPTION BASE_VALUE CACHE +> ------------- ----------- ------------- ------------------- --------- ------------ ---------- ----- +> SEQ 0 0 9223372036854775807 1 NO 0 21 +> rows: 1 + +ALTER SEQUENCE SEQ MINVALUE -100 MAXVALUE 100; +> ok + +SELECT SEQUENCE_NAME, START_VALUE, MINIMUM_VALUE, MAXIMUM_VALUE, INCREMENT, CYCLE_OPTION, BASE_VALUE, CACHE + FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME START_VALUE MINIMUM_VALUE MAXIMUM_VALUE INCREMENT CYCLE_OPTION BASE_VALUE CACHE +> ------------- ----------- ------------- ------------- --------- ------------ ---------- ----- +> SEQ 0 -100 100 1 NO 0 21 +> rows: 1 + +VALUES NEXT VALUE FOR SEQ; +>> 0 + +ALTER SEQUENCE SEQ START WITH 10; +> ok + +SELECT SEQUENCE_NAME, START_VALUE, MINIMUM_VALUE, MAXIMUM_VALUE, INCREMENT, CYCLE_OPTION, BASE_VALUE, CACHE + FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME START_VALUE MINIMUM_VALUE MAXIMUM_VALUE INCREMENT CYCLE_OPTION BASE_VALUE CACHE +> ------------- ----------- ------------- ------------- --------- ------------ ---------- ----- +> SEQ 10 -100 100 1 NO 1 21 +> rows: 1 + +VALUES NEXT VALUE FOR SEQ; +>> 1 + +ALTER SEQUENCE SEQ RESTART; +> ok + +VALUES NEXT VALUE FOR SEQ; +>> 10 + +ALTER SEQUENCE SEQ START WITH 5 RESTART WITH 20; +> ok + +VALUES NEXT VALUE FOR SEQ; +>> 20 + +@reconnect + +SELECT SEQUENCE_NAME, START_VALUE, MINIMUM_VALUE, MAXIMUM_VALUE, INCREMENT, CYCLE_OPTION, BASE_VALUE, CACHE + FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME START_VALUE MINIMUM_VALUE MAXIMUM_VALUE INCREMENT CYCLE_OPTION BASE_VALUE CACHE +> ------------- ----------- ------------- ------------- --------- ------------ ---------- ----- +> SEQ 5 -100 100 1 NO 21 21 +> rows: 1 + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE SEQ START WITH 10 RESTART WITH 20; +> ok + +VALUES NEXT VALUE FOR SEQ; +>> 20 + +DROP SEQUENCE SEQ; +> ok + +SET AUTOCOMMIT OFF; +> ok + +CREATE SEQUENCE SEQ; +> ok + +ALTER SEQUENCE SEQ RESTART WITH 1; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 1 + +DROP SEQUENCE SEQ; +> ok + +COMMIT; +> ok + +SET AUTOCOMMIT ON; +> ok + +CREATE SEQUENCE SEQ MINVALUE 1 MAXVALUE 10 INCREMENT BY -1; +> ok + +VALUES NEXT VALUE FOR SEQ, NEXT VALUE FOR SEQ; +> C1 +> -- +> 10 +> 9 +> rows: 2 + +ALTER SEQUENCE SEQ RESTART; +> ok + +VALUES NEXT VALUE FOR SEQ, NEXT VALUE FOR SEQ; +> C1 +> -- +> 10 +> 9 +> rows: 2 + +ALTER SEQUENCE SEQ RESTART WITH 1; +> ok + +VALUES NEXT VALUE FOR SEQ; +>> 1 + +VALUES NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +DROP SEQUENCE SEQ; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createSynonym.sql b/h2/src/test/org/h2/test/scripts/ddl/createSynonym.sql new file mode 100644 index 0000000000..b359f386a7 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createSynonym.sql @@ -0,0 +1,52 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE SCHEMA SCHEMA1; +> ok + +CREATE SCHEMA SCHEMA2; +> ok + +CREATE TABLE SCHEMA1.T1(K BIGINT PRIMARY KEY, V VARCHAR); +> ok + +CREATE SYNONYM SCHEMA1.T1 FOR SCHEMA1.T1; +> exception TABLE_OR_VIEW_ALREADY_EXISTS_1 + +CREATE SYNONYM SCHEMA2.T1 FOR SCHEMA1.T1; +> ok + +DROP SYNONYM SCHEMA2.T1; +> ok + +SET SCHEMA SCHEMA2; +> ok + +CREATE SYNONYM T1 FOR SCHEMA1.T1; +> ok + +DROP SYNONYM T1; +> ok + +SET SCHEMA SCHEMA1; +> ok + +CREATE SYNONYM T1 FOR T1; +> exception TABLE_OR_VIEW_ALREADY_EXISTS_1 + +CREATE SYNONYM SCHEMA2.T1 FOR T1; +> ok + +DROP SYNONYM SCHEMA2.T1; +> ok + +SET SCHEMA PUBLIC; +> ok + +DROP SCHEMA SCHEMA2 CASCADE; +> ok + +DROP SCHEMA SCHEMA1 CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createTable.sql b/h2/src/test/org/h2/test/scripts/ddl/createTable.sql new file mode 100644 index 0000000000..01d94e367a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createTable.sql @@ -0,0 +1,267 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT CONSTRAINT PK_1 PRIMARY KEY); +> ok + +SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS; +> CONSTRAINT_NAME CONSTRAINT_TYPE +> --------------- --------------- +> PK_1 PRIMARY KEY +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID IDENTITY, CONSTRAINT PK_1 PRIMARY KEY(ID)); +> ok + +SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS; +> CONSTRAINT_NAME CONSTRAINT_TYPE +> --------------- --------------- +> PK_1 PRIMARY KEY +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE T1(ID INT PRIMARY KEY, COL2 INT); +> ok + +INSERT INTO T1 VALUES (1, 2), (11, 22); +> update count: 2 + +CREATE TABLE T2 AS SELECT * FROM T1; +> ok + +SELECT * FROM T2 ORDER BY ID; +> ID COL2 +> -- ---- +> 1 2 +> 11 22 +> rows (ordered): 2 + +DROP TABLE T2; +> ok + +CREATE TABLE T2 AS SELECT * FROM T1 WITH DATA; +> ok + +SELECT * FROM T2 ORDER BY ID; +> ID COL2 +> -- ---- +> 1 2 +> 11 22 +> rows (ordered): 2 + +DROP TABLE T2; +> ok + +CREATE TABLE T2 AS SELECT * FROM T1 WITH NO DATA; +> ok + +SELECT * FROM T2 ORDER BY ID; +> ID COL2 +> -- ---- +> rows (ordered): 0 + +DROP TABLE T2; +> ok + +DROP TABLE T1; +> ok + +CREATE TABLE TEST(A INT, B INT INVISIBLE); +> ok + +SELECT * FROM TEST; +> A +> - +> rows: 0 + +SELECT A, B FROM TEST; +> A B +> - - +> rows: 0 + +SELECT COLUMN_NAME, IS_VISIBLE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME IS_VISIBLE +> ----------- ---------- +> A TRUE +> B FALSE +> rows (ordered): 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST1(ID IDENTITY); +> ok + +CREATE TABLE TEST2(ID BIGINT GENERATED BY DEFAULT AS IDENTITY); +> ok + +SELECT CONSTRAINT_TYPE, TABLE_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_SCHEMA = 'PUBLIC'; +> CONSTRAINT_TYPE TABLE_NAME +> --------------- ---------- +> PRIMARY KEY TEST1 +> rows: 1 + +DROP TABLE TEST1, TEST2; +> ok + +CREATE TABLE TEST(A); +> exception UNKNOWN_DATA_TYPE_1 + +CREATE TABLE TEST(A, B, C) AS SELECT 1, 2, CAST ('A' AS VARCHAR); +> ok + +SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +> COLUMN_NAME DATA_TYPE +> ----------- ----------------- +> A INTEGER +> B INTEGER +> C CHARACTER VARYING +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(A INT, B INT GENERATED ALWAYS AS (1), C INT GENERATED ALWAYS AS (B + 1)); +> exception COLUMN_NOT_FOUND_1 + +CREATE MEMORY TABLE TEST(A INT, B INT GENERATED ALWAYS AS (1), C INT GENERATED ALWAYS AS (A + 1)); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ----------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A" INTEGER, "B" INTEGER GENERATED ALWAYS AS (1), "C" INTEGER GENERATED ALWAYS AS ("A" + 1) ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT GENERATED BY DEFAULT AS (1)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST(A IDENTITY GENERATED ALWAYS AS (1)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST(A IDENTITY AS (1)); +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST1(ID BIGINT GENERATED ALWAYS AS IDENTITY); +> ok + +CREATE TABLE TEST2(ID BIGINT GENERATED BY DEFAULT AS IDENTITY); +> ok + +CREATE TABLE TEST3(ID BIGINT NULL GENERATED ALWAYS AS IDENTITY); +> exception COLUMN_MUST_NOT_BE_NULLABLE_1 + +CREATE TABLE TEST3(ID BIGINT GENERATED BY DEFAULT AS IDENTITY NULL); +> exception COLUMN_MUST_NOT_BE_NULLABLE_1 + +SELECT COLUMN_NAME, IDENTITY_GENERATION, IS_NULLABLE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME IDENTITY_GENERATION IS_NULLABLE +> ----------- ------------------- ----------- +> ID ALWAYS NO +> ID BY DEFAULT NO +> rows: 2 + +DROP TABLE TEST1, TEST2; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY(MINVALUE 1 MAXVALUE 2), V INT); +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +SELECT IDENTITY_BASE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +>> 2 + +INSERT INTO TEST(V) VALUES 2; +> update count: 1 + +SELECT IDENTITY_BASE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME = 'ID'; +>> null + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, V INT); +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +INSERT INTO TEST VALUES (2, 2); +> update count: 1 + +INSERT INTO TEST(V) VALUES 3; +> exception DUPLICATE_KEY_1 + +TABLE TEST; +> ID V +> -- - +> 1 1 +> 2 2 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST1(R BIGINT GENERATED BY DEFAULT AS IDENTITY); +> ok + +SET MODE HSQLDB; +> ok + +CREATE TABLE TEST2(M BIGINT GENERATED BY DEFAULT AS IDENTITY); +> ok + +SET MODE MySQL; +> ok + +CREATE TABLE TEST3(H BIGINT GENERATED BY DEFAULT AS IDENTITY); +> ok + +SET MODE Regular; +> ok + +SELECT COLUMN_NAME, DEFAULT_ON_NULL FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'PUBLIC'; +> COLUMN_NAME DEFAULT_ON_NULL +> ----------- --------------- +> H TRUE +> M TRUE +> R FALSE +> rows: 3 + +DROP TABLE TEST1, TEST2, TEST3; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST(' || (SELECT LISTAGG('C' || X || ' INT') FROM SYSTEM_RANGE(1, 16384)) || ')'; +> ok + +DROP TABLE TEST; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST(' || (SELECT LISTAGG('C' || X || ' INT') FROM SYSTEM_RANGE(1, 16385)) || ')'; +> exception TOO_MANY_COLUMNS_1 + +CREATE TABLE TEST AS (SELECT REPEAT('A', 300)); +> ok + +TABLE TEST; +> C1 +> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +> AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +> rows: 1 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createTrigger.sql b/h2/src/test/org/h2/test/scripts/ddl/createTrigger.sql new file mode 100644 index 0000000000..672263520a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createTrigger.sql @@ -0,0 +1,230 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE COUNT(X INT); +> ok + +CREATE FORCE TRIGGER T_COUNT BEFORE INSERT ON COUNT CALL 'com.Unknown'; +> ok + +INSERT INTO COUNT VALUES(NULL); +> exception ERROR_CREATING_TRIGGER_OBJECT_3 + +DROP TRIGGER T_COUNT; +> ok + +CREATE TABLE ITEMS(ID INT CHECK ID < SELECT MAX(ID) FROM COUNT); +> ok + +insert into items values(DEFAULT); +> update count: 1 + +DROP TABLE COUNT; +> exception CANNOT_DROP_2 + +insert into items values(DEFAULT); +> update count: 1 + +drop table items, count; +> ok + +CREATE TABLE TEST(A VARCHAR, B VARCHAR, C VARCHAR); +> ok + +CREATE TRIGGER T1 BEFORE INSERT, UPDATE ON TEST FOR EACH ROW CALL 'org.h2.test.scripts.Trigger1'; +> ok + +INSERT INTO TEST VALUES ('a', 'b', 'c'); +> exception ERROR_EXECUTING_TRIGGER_3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A VARCHAR, B VARCHAR, C INT); +> ok + +CREATE TRIGGER T1 BEFORE INSERT ON TEST FOR EACH ROW CALL 'org.h2.test.scripts.Trigger1'; +> ok + +INSERT INTO TEST VALUES ('1', 'a', 1); +> update count: 1 + +DROP TRIGGER T1; +> ok + +CREATE TRIGGER T1 BEFORE INSERT ON TEST FOR EACH STATEMENT CALL 'org.h2.test.scripts.Trigger1'; +> ok + +INSERT INTO TEST VALUES ('2', 'b', 2); +> update count: 1 + +DROP TRIGGER T1; +> ok + +TABLE TEST; +> A B C +> - - -- +> 1 a 10 +> 2 b 2 +> rows: 2 + +DROP TABLE TEST; +> ok + +-- --------------------------------------------------------------------------- +-- Checking multiple classes in trigger source +-- --------------------------------------------------------------------------- + +CREATE TABLE TEST(A VARCHAR, B VARCHAR, C VARCHAR); +> ok + +CREATE TRIGGER T1 BEFORE INSERT, UPDATE ON TEST FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + if (newRow != null) { + newRow[2] = newRow[2] + "1"\u003B + } + } + }\u003B +}'); +> ok + +INSERT INTO TEST VALUES ('a', 'b', 'c'); +> update count: 1 + +TABLE TEST; +> A B C +> - - -- +> a b c1 +> rows: 1 + +DROP TABLE TEST; +> ok + +-- --------------------------------------------------------------------------- +-- PostgreSQL syntax tests +-- --------------------------------------------------------------------------- + +set mode postgresql; +> ok + +CREATE TABLE COUNT(X INT); +> ok + +INSERT INTO COUNT VALUES(1); +> update count: 1 + +CREATE FORCE TRIGGER T_COUNT BEFORE INSERT OR UPDATE ON COUNT CALL 'com.Unknown'; +> ok + +INSERT INTO COUNT VALUES(NULL); +> exception ERROR_CREATING_TRIGGER_OBJECT_3 + +UPDATE COUNT SET X=2 WHERE X=1; +> exception ERROR_CREATING_TRIGGER_OBJECT_3 + +DROP TABLE COUNT; +> ok + +SET MODE Regular; +> ok + +CREATE MEMORY TABLE T(ID INT PRIMARY KEY, V INT); +> ok + +CREATE VIEW V1 AS TABLE T; +> ok + +CREATE VIEW V2 AS TABLE T; +> ok + +CREATE VIEW V3 AS TABLE T; +> ok + +CREATE TRIGGER T1 INSTEAD OF INSERT ON V1 FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> ok + +CREATE TRIGGER T2 INSTEAD OF UPDATE ON V2 FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> ok + +CREATE TRIGGER T3 INSTEAD OF DELETE ON V3 FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> ok + +SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, IS_INSERTABLE_INTO, COMMIT_ACTION + FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE IS_INSERTABLE_INTO COMMIT_ACTION +> ------------- ------------ ---------- ---------- ------------------ ------------- +> SCRIPT PUBLIC T BASE TABLE YES null +> SCRIPT PUBLIC V1 VIEW NO null +> SCRIPT PUBLIC V2 VIEW NO null +> SCRIPT PUBLIC V3 VIEW NO null +> rows: 4 + +SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, VIEW_DEFINITION, CHECK_OPTION, IS_UPDATABLE, INSERTABLE_INTO, + IS_TRIGGER_UPDATABLE, IS_TRIGGER_DELETABLE, IS_TRIGGER_INSERTABLE_INTO + FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE INSERTABLE_INTO IS_TRIGGER_UPDATABLE IS_TRIGGER_DELETABLE IS_TRIGGER_INSERTABLE_INTO +> ------------- ------------ ---------- ------------------ ------------ ------------ --------------- -------------------- -------------------- -------------------------- +> SCRIPT PUBLIC V1 TABLE "PUBLIC"."T" NONE NO NO NO NO YES +> SCRIPT PUBLIC V2 TABLE "PUBLIC"."T" NONE NO NO YES NO NO +> SCRIPT PUBLIC V3 TABLE "PUBLIC"."T" NONE NO NO NO YES NO +> rows: 3 + +SELECT * FROM INFORMATION_SCHEMA.TRIGGERS; +> TRIGGER_CATALOG TRIGGER_SCHEMA TRIGGER_NAME EVENT_MANIPULATION EVENT_OBJECT_CATALOG EVENT_OBJECT_SCHEMA EVENT_OBJECT_TABLE ACTION_ORIENTATION ACTION_TIMING IS_ROLLBACK JAVA_CLASS QUEUE_SIZE NO_WAIT REMARKS +> --------------- -------------- ------------ ------------------ -------------------- ------------------- ------------------ ------------------ ------------- ----------- ---------- ---------- ------- ------- +> SCRIPT PUBLIC T1 INSERT SCRIPT PUBLIC V1 ROW INSTEAD OF FALSE null 1024 FALSE null +> SCRIPT PUBLIC T2 UPDATE SCRIPT PUBLIC V2 ROW INSTEAD OF FALSE null 1024 FALSE null +> SCRIPT PUBLIC T3 DELETE SCRIPT PUBLIC V3 ROW INSTEAD OF FALSE null 1024 FALSE null +> rows: 3 + +CREATE TRIGGER T4 BEFORE ROLLBACK ON TEST FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> exception INVALID_TRIGGER_FLAGS_1 + +CREATE TRIGGER T4 BEFORE SELECT ON TEST FOR EACH ROW AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> exception INVALID_TRIGGER_FLAGS_1 + +CREATE TRIGGER T4 BEFORE SELECT, ROLLBACK ON TEST FOR EACH STATEMENT AS STRINGDECODE( +'org.h2.api.Trigger create() { + return new org.h2.api.Trigger() { + public void fire(Connection conn, Object[] oldRow, Object[] newRow) { + } + }\u003B +}'); +> exception INVALID_TRIGGER_FLAGS_1 + +DROP TABLE T CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/createView.sql b/h2/src/test/org/h2/test/scripts/ddl/createView.sql new file mode 100644 index 0000000000..b049555439 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/createView.sql @@ -0,0 +1,54 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE VIEW TEST_VIEW(A) AS SELECT 'a'; +> ok + +CREATE OR REPLACE VIEW TEST_VIEW(B, C) AS SELECT 'b', 'c'; +> ok + +SELECT * FROM TEST_VIEW; +> B C +> - - +> b c +> rows: 1 + +SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, VIEW_DEFINITION, CHECK_OPTION, IS_UPDATABLE, STATUS, REMARKS + FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_NAME = 'TEST_VIEW'; +> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE STATUS REMARKS +> ------------- ------------ ---------- --------------- ------------ ------------ ------ ------- +> SCRIPT PUBLIC TEST_VIEW SELECT 'b', 'c' NONE NO VALID null +> rows: 1 + +DROP VIEW TEST_VIEW; +> ok + +CREATE TABLE TEST(C1 INT) AS (VALUES 1, 2); +> ok + +CREATE OR REPLACE VIEW TEST_VIEW AS (SELECT C1 AS A FROM TEST); +> ok + +ALTER TABLE TEST ADD COLUMN C2 INT; +> ok + +UPDATE TEST SET C2 = C1 + 1; +> update count: 2 + +CREATE OR REPLACE VIEW TEST_VIEW AS (SELECT C1 AS A, C2 AS B FROM TEST); +> ok + +CREATE OR REPLACE VIEW TEST_VIEW AS (SELECT C2 AS B, C1 AS A FROM TEST); +> ok + +SELECT * FROM TEST_VIEW; +> B A +> - - +> 2 1 +> 3 2 +> rows: 2 + +DROP TABLE TEST CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/dropAllObjects.sql b/h2/src/test/org/h2/test/scripts/ddl/dropAllObjects.sql new file mode 100644 index 0000000000..2d570e5934 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/dropAllObjects.sql @@ -0,0 +1,61 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +@reconnect off + +-- Test table depends on view + +create table a(x int); +> ok + +create view b as select * from a; +> ok + +create table c(y int check (select count(*) from b) = 0); +> ok + +drop all objects; +> ok + +-- Test inter-schema dependency + +create schema table_view; +> ok + +set schema table_view; +> ok + +create table test1 (id int, name varchar(20)); +> ok + +create view test_view_1 as (select * from test1); +> ok + +set schema public; +> ok + +create schema test_run; +> ok + +set schema test_run; +> ok + +create table test2 (id int, address varchar(20), constraint a_cons check (id in (select id from table_view.test1))); +> ok + +set schema public; +> ok + +drop all objects; +> ok + +CREATE DOMAIN D INT; +> ok + +DROP ALL OBJECTS; +> ok + +SELECT COUNT(*) FROM INFORMATION_SCHEMA.DOMAINS WHERE DOMAIN_SCHEMA = 'PUBLIC'; +>> 0 diff --git a/h2/src/test/org/h2/test/scripts/ddl/dropDomain.sql b/h2/src/test/org/h2/test/scripts/ddl/dropDomain.sql new file mode 100644 index 0000000000..2fc644b3c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/dropDomain.sql @@ -0,0 +1,76 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE DOMAIN E AS ENUM('A', 'B'); +> ok + +CREATE TABLE TEST(I INT PRIMARY KEY, E1 E, E2 E NOT NULL); +> ok + +INSERT INTO TEST VALUES (1, 'A', 'B'); +> update count: 1 + +SELECT COLUMN_NAME, DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, IS_NULLABLE, DATA_TYPE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME IS_NULLABLE DATA_TYPE +> ----------- -------------- ------------- ----------- ----------- --------- +> I null null null NO INTEGER +> E1 SCRIPT PUBLIC E YES ENUM +> E2 SCRIPT PUBLIC E NO ENUM +> rows (ordered): 3 + +DROP DOMAIN E RESTRICT; +> exception CANNOT_DROP_2 + +DROP DOMAIN E CASCADE; +> ok + +SELECT COLUMN_NAME, DOMAIN_CATALOG, DOMAIN_SCHEMA, DOMAIN_NAME, IS_NULLABLE, DATA_TYPE + FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DOMAIN_CATALOG DOMAIN_SCHEMA DOMAIN_NAME IS_NULLABLE DATA_TYPE +> ----------- -------------- ------------- ----------- ----------- --------- +> I null null null NO INTEGER +> E1 null null null YES ENUM +> E2 null null null NO ENUM +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE DOMAIN D INT CHECK (VALUE > 0); +> ok + +CREATE MEMORY TABLE TEST(C D); +> ok + +DROP DOMAIN D CASCADE; +> ok + +INSERT INTO TEST VALUES 1; +> update count: 1 + +INSERT INTO TEST VALUES -1; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +@reconnect + +INSERT INTO TEST VALUES 1; +> update count: 1 + +INSERT INTO TEST VALUES -1; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "C" INTEGER ); +> -- 2 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (1), (1); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" CHECK("C" > 0) NOCHECK; +> rows (ordered): 5 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/dropIndex.sql b/h2/src/test/org/h2/test/scripts/ddl/dropIndex.sql new file mode 100644 index 0000000000..a933bb56bf --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/dropIndex.sql @@ -0,0 +1,75 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE SCHEMA TEST; +> ok + +CREATE TABLE TEST.TBL ( + NAME VARCHAR +); +> ok + +CREATE UNIQUE INDEX NAME_INDEX ON TEST.TBL(NAME); +> ok + +SET MODE MySQL; +> ok + +-- MySQL compatibility syntax +ALTER TABLE TEST.TBL DROP INDEX NAME_INDEX; +> ok + +CREATE UNIQUE INDEX NAME_INDEX ON TEST.TBL(NAME); +> ok + +-- MySQL compatibility syntax +ALTER TABLE TEST.TBL DROP INDEX TEST.NAME_INDEX; +> ok + +ALTER TABLE TEST.TBL ADD CONSTRAINT NAME_INDEX UNIQUE (NAME); +> ok + +-- MySQL compatibility syntax +ALTER TABLE TEST.TBL DROP INDEX NAME_INDEX; +> ok + +ALTER TABLE TEST.TBL ADD CONSTRAINT NAME_INDEX UNIQUE (NAME); +> ok + +-- MySQL compatibility syntax +ALTER TABLE TEST.TBL DROP INDEX TEST.NAME_INDEX; +> ok + +DROP SCHEMA TEST CASCADE; +> ok + +create table test(id int primary key, name varchar); +> ok + +alter table test alter column id int auto_increment; +> ok + +create table otherTest(id int primary key, name varchar); +> ok + +alter table otherTest add constraint fk foreign key(id) references test(id); +> ok + +-- MySQL compatibility syntax +alter table otherTest drop foreign key fk; +> ok + +create unique index idx on otherTest(name); +> ok + +-- MySQL compatibility syntax +alter table otherTest drop index idx; +> ok + +drop table test, otherTest; +> ok + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/dropSchema.sql b/h2/src/test/org/h2/test/scripts/ddl/dropSchema.sql new file mode 100644 index 0000000000..4285f88c5f --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/dropSchema.sql @@ -0,0 +1,149 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +@reconnect off + +CREATE SCHEMA TEST_SCHEMA; +> ok + +DROP SCHEMA TEST_SCHEMA RESTRICT; +> ok + +CREATE SCHEMA TEST_SCHEMA; +> ok + +CREATE TABLE TEST_SCHEMA.TEST(); +> ok + +DROP SCHEMA TEST_SCHEMA RESTRICT; +> exception CANNOT_DROP_2 + +DROP SCHEMA TEST_SCHEMA CASCADE; +> ok + +CREATE SCHEMA TEST_SCHEMA; +> ok + +CREATE VIEW TEST_SCHEMA.TEST AS SELECT 1; +> ok + +DROP SCHEMA TEST_SCHEMA RESTRICT; +> exception CANNOT_DROP_2 + +DROP SCHEMA TEST_SCHEMA CASCADE; +> ok + +CREATE TABLE PUBLIC.SRC(); +> ok + +CREATE SCHEMA TEST_SCHEMA; +> ok + +CREATE SYNONYM TEST_SCHEMA.TEST FOR PUBLIC.SRC; +> ok + +DROP SCHEMA TEST_SCHEMA RESTRICT; +> exception CANNOT_DROP_2 + +DROP SCHEMA TEST_SCHEMA CASCADE; +> ok + +DROP TABLE PUBLIC.SRC; +> ok + +CREATE SCHEMA TEST_SCHEMA; +> ok + +CREATE SEQUENCE TEST_SCHEMA.TEST; +> ok + +DROP SCHEMA TEST_SCHEMA RESTRICT; +> exception CANNOT_DROP_2 + +DROP SCHEMA TEST_SCHEMA CASCADE; +> ok + +CREATE SCHEMA TEST_SCHEMA; +> ok + +CREATE CONSTANT TEST_SCHEMA.TEST VALUE 1; +> ok + +DROP SCHEMA TEST_SCHEMA RESTRICT; +> exception CANNOT_DROP_2 + +DROP SCHEMA TEST_SCHEMA CASCADE; +> ok + +CREATE SCHEMA TEST_SCHEMA; +> ok + +CREATE ALIAS TEST_SCHEMA.TEST FOR "java.lang.System.currentTimeMillis"; +> ok + +DROP SCHEMA TEST_SCHEMA RESTRICT; +> exception CANNOT_DROP_2 + +DROP SCHEMA TEST_SCHEMA CASCADE; +> ok + +-- Test computed column dependency + +CREATE TABLE A (A INT); +> ok + +CREATE TABLE B (B INT AS SELECT A FROM A); +> ok + +DROP ALL OBJECTS; +> ok + +CREATE SCHEMA TEST_SCHEMA; +> ok + +CREATE TABLE TEST_SCHEMA.A (A INT); +> ok + +CREATE TABLE TEST_SCHEMA.B (B INT AS SELECT A FROM TEST_SCHEMA.A); +> ok + +DROP SCHEMA TEST_SCHEMA CASCADE; +> ok + +CREATE SCHEMA A; +> ok + +CREATE TABLE A.A1(ID INT); +> ok + +CREATE SCHEMA B; +> ok + +CREATE TABLE B.B1(ID INT, X INT DEFAULT (SELECT MAX(ID) FROM A.A1)); +> ok + +DROP SCHEMA A CASCADE; +> exception CANNOT_DROP_2 + +DROP SCHEMA B CASCADE; +> ok + +DROP SCHEMA A CASCADE; +> ok + +CREATE SCHEMA A; +> ok + +CREATE TABLE A.A1(ID INT, X INT); +> ok + +CREATE TABLE A.A2(ID INT, X INT DEFAULT (SELECT MAX(ID) FROM A.A1)); +> ok + +ALTER TABLE A.A1 ALTER COLUMN X SET DEFAULT (SELECT MAX(ID) FROM A.A2); +> ok + +DROP SCHEMA A CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/dropTable.sql b/h2/src/test/org/h2/test/scripts/ddl/dropTable.sql new file mode 100644 index 0000000000..05a606a0a0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/dropTable.sql @@ -0,0 +1,64 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE T1(ID1 INT PRIMARY KEY, ID2 INT); +> ok + +CREATE TABLE T2(ID2 INT, ID1 INT); +> ok + +ALTER TABLE T2 ADD CONSTRAINT C1 FOREIGN KEY(ID1) REFERENCES T1(ID1); +> ok + +DROP TABLE T1 RESTRICT; +> exception CANNOT_DROP_2 + +DROP TABLE T1 CASCADE; +> ok + +CREATE TABLE T1(ID1 INT PRIMARY KEY, ID2 INT); +> ok + +ALTER TABLE T2 ADD CONSTRAINT C1 FOREIGN KEY(ID1) REFERENCES T1(ID1); +> ok + +DROP TABLE T2 RESTRICT; +> ok + +CREATE VIEW V1 AS SELECT * FROM T1; +> ok + +DROP TABLE T1 RESTRICT; +> exception CANNOT_DROP_2 + +DROP TABLE T1 CASCADE; +> ok + +SELECT * FROM V1; +> exception TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 + +CREATE TABLE T1(ID1 INT); +> ok + +ALTER TABLE T1 ADD CONSTRAINT C1 CHECK ID1 > 0; +> ok + +DROP TABLE T1 RESTRICT; +> ok + +CREATE TABLE T1(ID1 INT PRIMARY KEY, ID2 INT); +> ok + +CREATE TABLE T2(ID2 INT PRIMARY KEY, ID1 INT); +> ok + +ALTER TABLE T2 ADD CONSTRAINT C1 FOREIGN KEY(ID1) REFERENCES T1(ID1); +> ok + +ALTER TABLE T1 ADD CONSTRAINT C2 FOREIGN KEY(ID2) REFERENCES T2(ID2); +> ok + +DROP TABLE T1, T2 RESTRICT; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/grant.sql b/h2/src/test/org/h2/test/scripts/ddl/grant.sql new file mode 100644 index 0000000000..e3b7e159e9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/grant.sql @@ -0,0 +1,57 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST1(ID BIGINT PRIMARY KEY); +> ok + +CREATE MEMORY TABLE TEST2(ID BIGINT PRIMARY KEY); +> ok + +CREATE USER TEST_USER PASSWORD 'test'; +> ok + +GRANT SELECT, INSERT ON TEST1, TEST2 TO TEST_USER; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> --------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE USER IF NOT EXISTS "TEST_USER" PASSWORD ''; +> CREATE MEMORY TABLE "PUBLIC"."TEST1"( "ID" BIGINT NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST1; +> CREATE MEMORY TABLE "PUBLIC"."TEST2"( "ID" BIGINT NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST2" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4C" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST2; +> GRANT SELECT, INSERT ON "PUBLIC"."TEST1" TO "TEST_USER"; +> GRANT SELECT, INSERT ON "PUBLIC"."TEST2" TO "TEST_USER"; +> rows (ordered): 10 + +REVOKE INSERT ON TEST1 FROM TEST_USER; +> ok + +REVOKE ALL ON TEST2 FROM TEST_USER; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> --------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE USER IF NOT EXISTS "TEST_USER" PASSWORD ''; +> CREATE MEMORY TABLE "PUBLIC"."TEST1"( "ID" BIGINT NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST1; +> CREATE MEMORY TABLE "PUBLIC"."TEST2"( "ID" BIGINT NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST2" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4C" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST2; +> GRANT SELECT ON "PUBLIC"."TEST1" TO "TEST_USER"; +> rows (ordered): 9 + +DROP USER TEST_USER; +> ok + +DROP TABLE TEST1, TEST2; +> ok diff --git a/h2/src/test/org/h2/test/scripts/ddl/truncateTable.sql b/h2/src/test/org/h2/test/scripts/ddl/truncateTable.sql new file mode 100644 index 0000000000..0ac0093f66 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/ddl/truncateTable.sql @@ -0,0 +1,189 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create table FOO(id integer primary key); +> ok + +create table BAR(fooId integer); +> ok + +alter table bar add foreign key (fooId) references foo (id); +> ok + +truncate table bar; +> ok + +truncate table foo; +> exception CANNOT_TRUNCATE_1 + +drop table bar, foo; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'); +> update count: 2 + +TRUNCATE TABLE TEST; +> update count: 2 + +SELECT * FROM TEST; +> ID NAME +> -- ---- +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE PARENT(ID INT PRIMARY KEY, NAME VARCHAR); +> ok + +CREATE TABLE CHILD(PARENTID INT, FOREIGN KEY(PARENTID) REFERENCES PARENT(ID), NAME VARCHAR); +> ok + +TRUNCATE TABLE CHILD; +> ok + +TRUNCATE TABLE PARENT; +> exception CANNOT_TRUNCATE_1 + +DROP TABLE CHILD; +> ok + +DROP TABLE PARENT; +> ok + +CREATE SEQUENCE SEQ2; +> ok + +CREATE SEQUENCE SEQ3; +> ok + +CREATE TABLE TEST( + ID1 BIGINT AUTO_INCREMENT NOT NULL, + ID2 BIGINT NOT NULL DEFAULT NEXT VALUE FOR SEQ2 NULL_TO_DEFAULT SEQUENCE SEQ2, + ID3 BIGINT NOT NULL DEFAULT NEXT VALUE FOR SEQ3 NULL_TO_DEFAULT, + "VALUE" INT NOT NULL); +> ok + +INSERT INTO TEST("VALUE") VALUES (1), (2); +> update count: 2 + +SELECT * FROM TEST ORDER BY "VALUE"; +> ID1 ID2 ID3 VALUE +> --- --- --- ----- +> 1 1 1 1 +> 2 2 2 2 +> rows (ordered): 2 + +TRUNCATE TABLE TEST; +> update count: 2 + +INSERT INTO TEST("VALUE") VALUES (1), (2); +> update count: 2 + +SELECT * FROM TEST ORDER BY "VALUE"; +> ID1 ID2 ID3 VALUE +> --- --- --- ----- +> 3 3 3 1 +> 4 4 4 2 +> rows (ordered): 2 + +TRUNCATE TABLE TEST CONTINUE IDENTITY; +> update count: 2 + +INSERT INTO TEST("VALUE") VALUES (1), (2); +> update count: 2 + +SELECT * FROM TEST ORDER BY "VALUE"; +> ID1 ID2 ID3 VALUE +> --- --- --- ----- +> 5 5 5 1 +> 6 6 6 2 +> rows (ordered): 2 + +TRUNCATE TABLE TEST RESTART IDENTITY; +> update count: 2 + +INSERT INTO TEST("VALUE") VALUES (1), (2); +> update count: 2 + +SELECT * FROM TEST ORDER BY "VALUE"; +> ID1 ID2 ID3 VALUE +> --- --- --- ----- +> 1 1 7 1 +> 2 2 8 2 +> rows (ordered): 2 + +SET MODE MSSQLServer; +> ok + +TRUNCATE TABLE TEST; +> update count: 2 + +INSERT INTO TEST("VALUE") VALUES (1), (2); +> update count: 2 + +SELECT * FROM TEST ORDER BY "VALUE"; +> ID1 ID2 ID3 VALUE +> --- --- --- ----- +> 1 1 9 1 +> 2 2 10 2 +> rows (ordered): 2 + +SET MODE MySQL; +> ok + +TRUNCATE TABLE TEST; +> update count: 2 + +INSERT INTO TEST("VALUE") VALUES (1), (2); +> update count: 2 + +SELECT * FROM TEST ORDER BY "VALUE"; +> ID1 ID2 ID3 VALUE +> --- --- --- ----- +> 1 1 11 1 +> 2 2 12 2 +> rows (ordered): 2 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +DROP SEQUENCE SEQ3; +> ok + +CREATE TABLE TEST(ID INT GENERATED BY DEFAULT AS IDENTITY(MINVALUE 1 MAXVALUE 10 INCREMENT BY -1), V INT); +> ok + +INSERT INTO TEST(V) VALUES 1, 2; +> update count: 2 + +TABLE TEST; +> ID V +> -- - +> 10 1 +> 9 2 +> rows: 2 + +TRUNCATE TABLE TEST RESTART IDENTITY; +> update count: 2 + +INSERT INTO TEST(V) VALUES 1, 2; +> update count: 2 + +TABLE TEST; +> ID V +> -- - +> 10 1 +> 9 2 +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/default-and-on_update.sql b/h2/src/test/org/h2/test/scripts/default-and-on_update.sql new file mode 100644 index 0000000000..aeb273792e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/default-and-on_update.sql @@ -0,0 +1,111 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE SEQUENCE SEQ; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT DEFAULT NEXT VALUE FOR SEQ ON UPDATE 1000 * NEXT VALUE FOR SEQ); +> ok + +INSERT INTO TEST(ID) VALUES (1), (2); +> update count: 2 + +SELECT * FROM TEST ORDER BY ID; +> ID V +> -- - +> 1 1 +> 2 2 +> rows (ordered): 2 + +UPDATE TEST SET ID = 3 WHERE ID = 2; +> update count: 1 + +SELECT * FROM TEST ORDER BY ID; +> ID V +> -- ---- +> 1 1 +> 3 3000 +> rows (ordered): 2 + +UPDATE TEST SET V = 3 WHERE ID = 3; +> update count: 1 + +SELECT * FROM TEST ORDER BY ID; +> ID V +> -- - +> 1 1 +> 3 3 +> rows (ordered): 2 + +ALTER TABLE TEST ADD V2 TIMESTAMP ON UPDATE CURRENT_TIMESTAMP; +> ok + +UPDATE TEST SET V = 4 WHERE ID = 3; +> update count: 1 + +SELECT ID, V, LENGTH(V2) > 18 AS L FROM TEST ORDER BY ID; +> ID V L +> -- - ---- +> 1 1 null +> 3 4 TRUE +> rows (ordered): 2 + +UPDATE TEST SET V = 1 WHERE V = 1; +> update count: 1 + +SELECT ID, V, LENGTH(V2) > 18 AS L FROM TEST ORDER BY ID; +> ID V L +> -- - ---- +> 1 1 null +> 3 4 TRUE +> rows (ordered): 2 + +MERGE INTO TEST(ID, V) KEY(ID) VALUES (1, 1); +> update count: 1 + +SELECT ID, V, LENGTH(V2) > 18 AS L FROM TEST ORDER BY ID; +> ID V L +> -- - ---- +> 1 1 null +> 3 4 TRUE +> rows (ordered): 2 + +MERGE INTO TEST(ID, V) KEY(ID) VALUES (1, 2); +> update count: 1 + +SELECT ID, V, LENGTH(V2) > 18 AS L FROM TEST ORDER BY ID; +> ID V L +> -- - ---- +> 1 2 TRUE +> 3 4 TRUE +> rows (ordered): 2 + +ALTER TABLE TEST ALTER COLUMN V SET ON UPDATE NULL; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, COLUMN_ON_UPDATE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY COLUMN_NAME; +> COLUMN_NAME COLUMN_DEFAULT COLUMN_ON_UPDATE +> ----------- ----------------------------- ----------------- +> ID null null +> V NEXT VALUE FOR "PUBLIC"."SEQ" NULL +> V2 null CURRENT_TIMESTAMP +> rows (ordered): 3 + +ALTER TABLE TEST ALTER COLUMN V DROP ON UPDATE; +> ok + +SELECT COLUMN_NAME, COLUMN_DEFAULT, COLUMN_ON_UPDATE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY COLUMN_NAME; +> COLUMN_NAME COLUMN_DEFAULT COLUMN_ON_UPDATE +> ----------- ----------------------------- ----------------- +> ID null null +> V NEXT VALUE FOR "PUBLIC"."SEQ" null +> V2 null CURRENT_TIMESTAMP +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +DROP SEQUENCE SEQ; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/delete.sql b/h2/src/test/org/h2/test/scripts/dml/delete.sql new file mode 100644 index 0000000000..60a7f792f0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/delete.sql @@ -0,0 +1,101 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT); +> ok + +INSERT INTO TEST VALUES (1), (2), (3); +> update count: 3 + +DELETE FROM TEST WHERE EXISTS (SELECT X FROM SYSTEM_RANGE(1, 3) WHERE X = ID) AND ROWNUM() = 1; +> update count: 1 + +SELECT ID FROM TEST; +> ID +> -- +> 2 +> 3 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY) AS SELECT * FROM SYSTEM_RANGE(1, 13); +> ok + +DELETE FROM TEST WHERE ID <= 12 FETCH FIRST ROW ONLY; +> update count: 1 + +DELETE FROM TEST WHERE ID <= 12 FETCH FIRST ROWS ONLY; +> update count: 1 + +DELETE FROM TEST WHERE ID <= 12 FETCH NEXT ROW ONLY; +> update count: 1 + +DELETE FROM TEST WHERE ID <= 12 FETCH NEXT ROWS ONLY; +> update count: 1 + +DELETE FROM TEST WHERE ID <= 12 FETCH FIRST 2 ROW ONLY; +> update count: 2 + +DELETE FROM TEST WHERE ID <= 12 FETCH FIRST 2 ROWS ONLY; +> update count: 2 + +DELETE FROM TEST WHERE ID <= 12 FETCH NEXT 2 ROW ONLY; +> update count: 2 + +DELETE FROM TEST WHERE ID <= 12 FETCH NEXT 2 ROWS ONLY; +> update count: 2 + +EXPLAIN DELETE FROM TEST WHERE ID <= 12 FETCH FIRST 2 ROWS ONLY; +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID <= 12 */ WHERE "ID" <= 12 FETCH FIRST 2 ROWS ONLY + +EXPLAIN DELETE FROM TEST FETCH FIRST 1 ROW ONLY; +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST ROW ONLY + +EXPLAIN DELETE FROM TEST; +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +TABLE TEST; +>> 13 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(id int) AS SELECT x FROM system_range(1, 100); +> ok + +SET MODE MSSQLServer; +> ok + +DELETE TOP 10 FROM TEST; +> update count: 10 + +SET MODE Regular; +> ok + +SELECT COUNT(*) FROM TEST; +>> 90 + +DELETE FROM TEST LIMIT ((SELECT COUNT(*) FROM TEST) / 10); +> update count: 9 + +SELECT COUNT(*) FROM TEST; +>> 81 + +EXPLAIN DELETE FROM TEST LIMIT ((SELECT COUNT(*) FROM TEST) / 10); +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST (SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */) / 10 ROWS ONLY + +DELETE FROM TEST LIMIT ?; +{ +10 +}; +> update count: 10 + +SELECT COUNT(*) FROM TEST; +>> 71 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/error_reporting.sql b/h2/src/test/org/h2/test/scripts/dml/error_reporting.sql new file mode 100644 index 0000000000..9da42977b0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/error_reporting.sql @@ -0,0 +1,43 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT 0x; +> exception SYNTAX_ERROR_2 + +SELECT 0xZ; +> exception SYNTAX_ERROR_2 + +SELECT 0xAAZ; +> exception SYNTAX_ERROR_2 + +SELECT 0x1LZ; +> exception SYNTAX_ERROR_2 + +SELECT 0x1234567890abZ; +> exception SYNTAX_ERROR_2 + +SELECT 0x1234567890abLZ; +> exception SYNTAX_ERROR_2 + +CREATE TABLE test (id INT NOT NULL, name VARCHAR); +> ok + +select * from test where id = ARRAY [1, 2]; +> exception TYPES_ARE_NOT_COMPARABLE_2 + +insert into test values (1, 't'); +> update count: 1 + +select * from test where id = (1, 2); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +drop table test; +> ok + +SELECT 1 + 2 NOT; +> exception SYNTAX_ERROR_2 + +SELECT 1 NOT > 2; +> exception SYNTAX_ERROR_2 diff --git a/h2/src/test/org/h2/test/scripts/dml/execute_immediate.sql b/h2/src/test/org/h2/test/scripts/dml/execute_immediate.sql new file mode 100644 index 0000000000..b3aa0057aa --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/execute_immediate.sql @@ -0,0 +1,33 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE MEMORY TABLE TEST(ID INT UNIQUE); +> ok + +EXECUTE IMMEDIATE 'INSERT INTO TEST VALUES ' || 1; +> update count: 1 + +EXECUTE IMMEDIATE 'INSERT INTO TEST2 VALUES 1'; +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +EXECUTE IMMEDIATE 'SELECT 1'; +> exception SYNTAX_ERROR_2 + +EXECUTE IMMEDIATE 'ALTER TABLE TEST DROP CONSTRAINT ' || + QUOTE_IDENT((SELECT CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS + WHERE TABLE_SCHEMA = 'PUBLIC' AND TABLE_NAME = 'TEST' AND CONSTRAINT_TYPE = 'UNIQUE')); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ---------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (1); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/insert.sql b/h2/src/test/org/h2/test/scripts/dml/insert.sql new file mode 100644 index 0000000000..804fca813a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/insert.sql @@ -0,0 +1,150 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT, B INT); +> ok + +INSERT INTO TEST VALUES ROW (1, 2), (3, 4), ROW (5, 6); +> update count: 3 + +INSERT INTO TEST(a) VALUES 7; +> update count: 1 + +INSERT INTO TEST(a) VALUES 8, 9; +> update count: 2 + +TABLE TEST; +> A B +> - ---- +> 1 2 +> 3 4 +> 5 6 +> 7 null +> 8 null +> 9 null +> rows: 6 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT); +> ok + +-- TODO Do we need _ROWID_ support here? +INSERT INTO TEST(_ROWID_, ID) VALUES (2, 3); +> update count: 1 + +SELECT _ROWID_, ID FROM TEST; +> _ROWID_ ID +> ------- -- +> 2 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT DEFAULT 5); +> ok + +INSERT INTO TEST VALUES (1, DEFAULT); +> update count: 1 + +INSERT INTO TEST SET A = 2, B = DEFAULT; +> update count: 1 + +TABLE TEST; +> A B +> - - +> 1 5 +> 2 5 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1)); +> ok + +INSERT INTO TEST VALUES (1, 1); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +INSERT INTO TEST(B) VALUES 1; +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +INSERT INTO TEST VALUES (1, DEFAULT); +> update count: 1 + +INSERT INTO TEST DEFAULT VALUES; +> update count: 1 + +TABLE TEST; +> A B +> ---- ---- +> 1 2 +> null null +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID NUMERIC(20) GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +INSERT INTO TEST VALUES (12345678901234567890, 1); +> update count: 1 + +TABLE TEST; +> ID V +> -------------------- - +> 12345678901234567890 1 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +INSERT INTO TEST VALUES (10, 20); +> update count: 1 + +INSERT INTO TEST OVERRIDING USER VALUE VALUES (20, 30); +> update count: 1 + +INSERT INTO TEST OVERRIDING SYSTEM VALUE VALUES (30, 40); +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 1 30 +> 10 20 +> 30 40 +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY, V INT); +> ok + +INSERT INTO TEST VALUES (10, 20); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +INSERT INTO TEST OVERRIDING USER VALUE VALUES (20, 30); +> update count: 1 + +INSERT INTO TEST OVERRIDING SYSTEM VALUE VALUES (30, 40); +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 1 30 +> 30 40 +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/insertIgnore.sql b/h2/src/test/org/h2/test/scripts/dml/insertIgnore.sql new file mode 100644 index 0000000000..bdbf726a69 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/insertIgnore.sql @@ -0,0 +1,127 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SET MODE MySQL; +> ok + +CREATE TABLE TEST(ID BIGINT PRIMARY KEY, `VALUE` INT NOT NULL); +> ok + +INSERT INTO TEST VALUES (1, 10), (2, 20), (3, 30), (4, 40); +> update count: 4 + +INSERT INTO TEST VALUES (3, 31), (5, 51); +> exception DUPLICATE_KEY_1 + +SELECT * FROM TEST ORDER BY ID; +> ID VALUE +> -- ----- +> 1 10 +> 2 20 +> 3 30 +> 4 40 +> rows (ordered): 4 + +INSERT IGNORE INTO TEST VALUES (3, 32), (5, 52); +> update count: 1 + +INSERT IGNORE INTO TEST VALUES (4, 43); +> ok + +SELECT * FROM TEST ORDER BY ID; +> ID VALUE +> -- ----- +> 1 10 +> 2 20 +> 3 30 +> 4 40 +> 5 52 +> rows (ordered): 5 + +CREATE TABLE TESTREF(ID BIGINT PRIMARY KEY, `VALUE` INT NOT NULL); +> ok + +INSERT INTO TESTREF VALUES (1, 11), (2, 21), (6, 61), (7, 71); +> update count: 4 + +INSERT INTO TEST (ID, `VALUE`) SELECT ID, `VALUE` FROM TESTREF; +> exception DUPLICATE_KEY_1 + +SELECT * FROM TEST ORDER BY ID; +> ID VALUE +> -- ----- +> 1 10 +> 2 20 +> 3 30 +> 4 40 +> 5 52 +> rows (ordered): 5 + +INSERT IGNORE INTO TEST (ID, `VALUE`) SELECT ID, `VALUE` FROM TESTREF; +> update count: 2 + +INSERT IGNORE INTO TEST (ID, `VALUE`) SELECT ID, `VALUE` FROM TESTREF; +> ok + +SELECT * FROM TEST ORDER BY ID; +> ID VALUE +> -- ----- +> 1 10 +> 2 20 +> 3 30 +> 4 40 +> 5 52 +> 6 61 +> 7 71 +> rows (ordered): 7 + +INSERT INTO TESTREF VALUES (8, 81), (9, 91); +> update count: 2 + +INSERT INTO TEST (ID, `VALUE`) SELECT ID, `VALUE` FROM TESTREF ON DUPLICATE KEY UPDATE `VALUE`=83; +> update count: 10 + +SELECT * FROM TEST ORDER BY ID; +> ID VALUE +> -- ----- +> 1 83 +> 2 83 +> 3 30 +> 4 40 +> 5 52 +> 6 83 +> 7 83 +> 8 81 +> 9 91 +> rows (ordered): 9 + +SET MODE Regular; +> ok + +INSERT INTO TEST (ID, `VALUE`) VALUES (9, 90), (10, 100); +> exception DUPLICATE_KEY_1 + +INSERT INTO TEST (ID, `VALUE`) VALUES (9, 90), (10, 100) ON CONFLICT DO NOTHING; +> exception SYNTAX_ERROR_1 + +SET MODE PostgreSQL; +> ok + +INSERT INTO TEST (ID, `VALUE`) VALUES (9, 90), (10, 100); +> exception DUPLICATE_KEY_1 + +INSERT INTO TEST (ID, `VALUE`) VALUES (9, 90), (10, 100) ON CONFLICT DO NOTHING; +> update count: 1 + +SELECT * FROM TEST WHERE ID >= 8 ORDER BY ID; +> ID VALUE +> -- ----- +> 8 81 +> 9 91 +> 10 100 +> rows (ordered): 3 + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/merge.sql b/h2/src/test/org/h2/test/scripts/dml/merge.sql new file mode 100644 index 0000000000..93509d46a4 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/merge.sql @@ -0,0 +1,161 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create table test(a int primary key, b int references(a)); +> ok + +merge into test values(1, 2); +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +EXPLAIN SELECT * FROM TEST WHERE ID=1; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 + +EXPLAIN MERGE INTO TEST VALUES(1, 'Hello'); +>> MERGE INTO "PUBLIC"."TEST"("ID", "NAME") KEY("ID") VALUES (1, 'Hello') + +MERGE INTO TEST VALUES(1, 'Hello'); +> update count: 1 + +MERGE INTO TEST VALUES(1, 'Hi'); +> update count: 1 + +MERGE INTO TEST VALUES(2, 'World'); +> update count: 1 + +MERGE INTO TEST VALUES(2, 'World!'); +> update count: 1 + +MERGE INTO TEST(ID, NAME) VALUES(3, 'How are you'); +> update count: 1 + +EXPLAIN MERGE INTO TEST(ID, NAME) VALUES(3, 'How are you'); +>> MERGE INTO "PUBLIC"."TEST"("ID", "NAME") KEY("ID") VALUES (3, 'How are you') + +MERGE INTO TEST(ID, NAME) KEY(ID) VALUES(3, 'How do you do'); +> update count: 1 + +EXPLAIN MERGE INTO TEST(ID, NAME) KEY(ID) VALUES(3, 'How do you do'); +>> MERGE INTO "PUBLIC"."TEST"("ID", "NAME") KEY("ID") VALUES (3, 'How do you do') + +MERGE INTO TEST(ID, NAME) KEY(NAME) VALUES(3, 'Fine'); +> exception DUPLICATE_KEY_1 + +MERGE INTO TEST(ID, NAME) KEY(NAME) VALUES(4, 'Fine!'); +> update count: 1 + +MERGE INTO TEST(ID, NAME) KEY(NAME) VALUES(4, 'Fine! And you'); +> exception DUPLICATE_KEY_1 + +MERGE INTO TEST(ID, NAME) KEY(NAME, ID) VALUES(5, 'I''m ok'); +> update count: 1 + +MERGE INTO TEST(ID, NAME) KEY(NAME, ID) VALUES(5, 'Oh, fine'); +> exception DUPLICATE_KEY_1 + +MERGE INTO TEST(ID, NAME) VALUES(6, 'Oh, fine.'); +> update count: 1 + +SELECT * FROM TEST; +> ID NAME +> -- ------------- +> 1 Hi +> 2 World! +> 3 How do you do +> 4 Fine! +> 5 I'm ok +> 6 Oh, fine. +> rows: 6 + +MERGE INTO TEST SELECT ID+4, NAME FROM TEST; +> update count: 6 + +SELECT * FROM TEST; +> ID NAME +> -- ------------- +> 1 Hi +> 10 Oh, fine. +> 2 World! +> 3 How do you do +> 4 Fine! +> 5 Hi +> 6 World! +> 7 How do you do +> 8 Fine! +> 9 I'm ok +> rows: 10 + +DROP TABLE TEST; +> ok + +-- Test for the index matching logic in org.h2.command.dml.Merge + +CREATE TABLE TEST(ID INT PRIMARY KEY, VALUE1 INT, VALUE2 INT, UNIQUE(VALUE1, VALUE2)); +> ok + +MERGE INTO TEST KEY (ID) VALUES (1, 2, 3), (2, 2, 3); +> exception DUPLICATE_KEY_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT DEFAULT 5); +> ok + +MERGE INTO TEST KEY(A) VALUES (1, DEFAULT); +> update count: 1 + +TABLE TEST; +> A B +> - - +> 1 5 +> rows: 1 + +UPDATE TEST SET B = 1 WHERE A = 1; +> update count: 1 + +SELECT B FROM TEST WHERE A = 1; +>> 1 + +MERGE INTO TEST KEY(A) VALUES (1, DEFAULT); +> update count: 1 + +SELECT B FROM TEST WHERE A = 1; +>> 5 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1)); +> ok + +MERGE INTO TEST KEY(A) VALUES (1, 1); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +MERGE INTO TEST KEY(A) VALUES (1, DEFAULT); +> update count: 1 + +MERGE INTO TEST KEY(A) VALUES (1, 1); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +MERGE INTO TEST KEY(A) VALUES (1, DEFAULT); +> update count: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, G INT GENERATED ALWAYS AS (ID + 1)); +> ok + +MERGE INTO TEST(G) KEY(ID) VALUES (1); +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/mergeUsing.sql b/h2/src/test/org/h2/test/scripts/dml/mergeUsing.sql new file mode 100644 index 0000000000..051241645c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/mergeUsing.sql @@ -0,0 +1,541 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- +CREATE TABLE PARENT(ID INT, NAME VARCHAR, PRIMARY KEY(ID) ); +> ok + +MERGE INTO PARENT AS P + USING (SELECT X AS ID, 'Coco'||X AS NAME FROM SYSTEM_RANGE(1,2) ) AS S + ON (P.ID = S.ID AND 1=1 AND S.ID = P.ID) + WHEN MATCHED THEN + UPDATE SET P.NAME = S.NAME WHERE 2 = 2; +> exception SYNTAX_ERROR_1 + +SET MODE Oracle; +> ok + +MERGE INTO PARENT AS P + USING (SELECT X AS ID, 'Coco'||X AS NAME FROM SYSTEM_RANGE(1,2) ) AS S + ON (P.ID = S.ID AND 1=1 AND S.ID = P.ID) + WHEN MATCHED THEN + UPDATE SET P.NAME = S.NAME WHERE 2 = 2 + WHEN NOT MATCHED THEN + INSERT (ID, NAME) VALUES (S.ID, S.NAME); +> update count: 2 + +SELECT * FROM PARENT; +> ID NAME +> -- ----- +> 1 Coco1 +> 2 Coco2 +> rows: 2 + +EXPLAIN PLAN + MERGE INTO PARENT AS P + USING (SELECT X AS ID, 'Coco'||X AS NAME FROM SYSTEM_RANGE(1,2) ) AS S + ON (P.ID = S.ID AND 1=1 AND S.ID = P.ID) + WHEN MATCHED THEN + UPDATE SET P.NAME = S.NAME WHERE 2 = 2 + WHEN NOT MATCHED THEN + INSERT (ID, NAME) VALUES (S.ID, S.NAME); +>> MERGE INTO "PUBLIC"."PARENT" "P" /* PUBLIC.PRIMARY_KEY_8: ID = S.ID AND ID = S.ID */ USING ( SELECT "X" AS "ID", CONCAT('Coco', "X") AS "NAME" FROM SYSTEM_RANGE(1, 2) ) "S" /* SELECT X AS ID, CONCAT('Coco', X) AS NAME FROM SYSTEM_RANGE(1, 2) /* range index */ */ WHEN MATCHED THEN UPDATE SET "NAME" = "S"."NAME" WHEN NOT MATCHED THEN INSERT ("ID", "NAME") VALUES ("S"."ID", "S"."NAME") + +SET MODE Regular; +> ok + +DROP TABLE PARENT; +> ok + +CREATE SCHEMA SOURCESCHEMA; +> ok + +CREATE TABLE SOURCESCHEMA.SOURCE(ID INT PRIMARY KEY, "VALUE" INT); +> ok + +INSERT INTO SOURCESCHEMA.SOURCE VALUES (1, 10), (3, 30), (5, 50); +> update count: 3 + +CREATE SCHEMA DESTSCHEMA; +> ok + +CREATE TABLE DESTSCHEMA.DESTINATION(ID INT PRIMARY KEY, "VALUE" INT); +> ok + +INSERT INTO DESTSCHEMA.DESTINATION VALUES (3, 300), (6, 600); +> update count: 2 + +MERGE INTO DESTSCHEMA.DESTINATION USING SOURCESCHEMA.SOURCE ON (DESTSCHEMA.DESTINATION.ID = SOURCESCHEMA.SOURCE.ID) + WHEN MATCHED THEN UPDATE SET "VALUE" = SOURCESCHEMA.SOURCE."VALUE" + WHEN NOT MATCHED THEN INSERT (ID, "VALUE") VALUES (SOURCESCHEMA.SOURCE.ID, SOURCESCHEMA.SOURCE."VALUE"); +> update count: 3 + +SELECT * FROM DESTSCHEMA.DESTINATION; +> ID VALUE +> -- ----- +> 1 10 +> 3 30 +> 5 50 +> 6 600 +> rows: 4 + +DROP SCHEMA SOURCESCHEMA CASCADE; +> ok + +DROP SCHEMA DESTSCHEMA CASCADE; +> ok + +CREATE TABLE SOURCE_TABLE(ID BIGINT PRIMARY KEY, C1 INT NOT NULL); +> ok + +INSERT INTO SOURCE_TABLE VALUES (1, 10), (2, 20), (3, 30); +> update count: 3 + +CREATE TABLE DEST_TABLE(ID BIGINT PRIMARY KEY, C1 INT NOT NULL, C2 INT NOT NULL); +> ok + +INSERT INTO DEST_TABLE VALUES (2, 200, 2000), (4, 400, 4000); +> update count: 2 + +MERGE INTO DEST_TABLE USING SOURCE_TABLE ON (DEST_TABLE.ID = SOURCE_TABLE.ID) + WHEN MATCHED THEN UPDATE SET DEST_TABLE.C1 = SOURCE_TABLE.C1, DEST_TABLE.C2 = 100; +> update count: 1 + +SELECT * FROM DEST_TABLE; +> ID C1 C2 +> -- --- ---- +> 2 20 100 +> 4 400 4000 +> rows: 2 + +MERGE INTO DEST_TABLE D USING SOURCE_TABLE S ON (D.ID = S.ID) + WHEN MATCHED THEN UPDATE SET D.C1 = S.C1, D.C2 = 100 + WHEN NOT MATCHED THEN INSERT (ID, C1, C2) VALUES (S.ID, S.C1, 1000); +> update count: 3 + +SELECT * FROM DEST_TABLE; +> ID C1 C2 +> -- --- ---- +> 1 10 1000 +> 2 20 100 +> 3 30 1000 +> 4 400 4000 +> rows: 4 + +DROP TABLE SOURCE_TABLE; +> ok + +DROP TABLE DEST_TABLE; +> ok + +CREATE TABLE TEST(C1 INT, C2 INT, C3 INT); +> ok + +MERGE INTO TEST USING DUAL ON C1 = 11 AND C2 = 21 + WHEN NOT MATCHED THEN INSERT (C1, C2, C3) VALUES (11, 21, 31) + WHEN MATCHED THEN UPDATE SET C3 = 31; +> update count: 1 + +MERGE INTO TEST USING DUAL ON (C1 = 11 AND C2 = 22) + WHEN NOT MATCHED THEN INSERT (C1, C2, C3) VALUES (11, 22, 32) + WHEN MATCHED THEN UPDATE SET C3 = 32; +> update count: 1 + +SELECT * FROM TEST ORDER BY C1, C2; +> C1 C2 C3 +> -- -- -- +> 11 21 31 +> 11 22 32 +> rows (ordered): 2 + +MERGE INTO TEST USING DUAL ON C1 = 11 AND C2 = 21 + WHEN NOT MATCHED THEN INSERT (C1, C2, C3) VALUES (11, 21, 33) + WHEN MATCHED THEN UPDATE SET C3 = 33; +> update count: 1 + +SELECT * FROM TEST ORDER BY C1, C2; +> C1 C2 C3 +> -- -- -- +> 11 21 33 +> 11 22 32 +> rows (ordered): 2 + +MERGE INTO TEST USING (SELECT 1 FROM DUAL) ON (C1 = 11 AND C2 = 21) + WHEN NOT MATCHED THEN INSERT (C1, C2, C3) VALUES (11, 21, 33) + WHEN MATCHED THEN UPDATE SET C3 = 34; +> update count: 1 + +SELECT * FROM TEST ORDER BY C1, C2; +> C1 C2 C3 +> -- -- -- +> 11 21 34 +> 11 22 32 +> rows (ordered): 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST (ID INT, "VALUE" INT); +> ok + +MERGE INTO TEST USING DUAL ON (ID = 1) + WHEN MATCHED THEN UPDATE SET "VALUE" = 1 + WHEN; +> exception SYNTAX_ERROR_2 + +MERGE INTO TEST USING DUAL ON (ID = 1) + WHEN MATCHED THEN UPDATE SET "VALUE" = 1 + WHEN NOT MATCHED THEN; +> exception SYNTAX_ERROR_2 + +MERGE INTO TEST USING DUAL ON (ID = 1) + WHEN NOT MATCHED THEN INSERT (ID, "VALUE") VALUES (1, 1) + WHEN; +> exception SYNTAX_ERROR_2 + +MERGE INTO TEST USING DUAL ON (ID = 1) + WHEN NOT MATCHED THEN INSERT (ID, "VALUE") VALUES (1, 1) + WHEN MATCHED THEN; +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY); +> ok + +MERGE INTO TEST USING (SELECT CAST(? AS INT) ID FROM DUAL) S ON (TEST.ID = S.ID) + WHEN NOT MATCHED THEN INSERT (ID) VALUES (S.ID); +{ +10 +20 +30 +}; +> update count: 3 + +SELECT * FROM TEST; +> ID +> -- +> 10 +> 20 +> 30 +> rows: 3 + +MERGE INTO TEST USING (SELECT 40) ON UNKNOWN_COLUMN = 1 WHEN NOT MATCHED THEN INSERT (ID) VALUES (40); +> exception COLUMN_NOT_FOUND_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, "VALUE" INT); +> ok + +INSERT INTO TEST VALUES (1, 10), (2, 20); +> update count: 2 + +MERGE INTO TEST USING (SELECT 1) ON (ID < 0) + WHEN MATCHED THEN UPDATE SET "VALUE" = 30 + WHEN NOT MATCHED THEN INSERT VALUES (3, 30); +> update count: 1 + +SELECT * FROM TEST; +> ID VALUE +> -- ----- +> 1 10 +> 2 20 +> 3 30 +> rows: 3 + +MERGE INTO TEST USING (SELECT 1) ON (ID = ID) + WHEN MATCHED THEN UPDATE SET "VALUE" = 40 + WHEN NOT MATCHED THEN INSERT VALUES (4, 40); +> update count: 3 + +SELECT * FROM TEST; +> ID VALUE +> -- ----- +> 1 40 +> 2 40 +> 3 40 +> rows: 3 + +MERGE INTO TEST USING (SELECT 1) ON (1 = 1) + WHEN MATCHED THEN UPDATE SET "VALUE" = 50 + WHEN NOT MATCHED THEN INSERT VALUES (5, 50); +> update count: 3 + +SELECT * FROM TEST; +> ID VALUE +> -- ----- +> 1 50 +> 2 50 +> 3 50 +> rows: 3 + +MERGE INTO TEST USING (SELECT 1) ON 1 = 1 + WHEN MATCHED THEN UPDATE SET "VALUE" = 60 WHERE ID = 3 DELETE WHERE ID = 2; +> exception SYNTAX_ERROR_1 + +MERGE INTO TEST USING (SELECT 1 A) ON 1 = 1 + WHEN MATCHED THEN DELETE WHERE ID = 2; +> exception SYNTAX_ERROR_1 + +SET MODE Oracle; +> ok + +MERGE INTO TEST USING (SELECT 1 A) ON 1 = 1 + WHEN MATCHED THEN DELETE WHERE ID = 2; +> update count: 1 + +SET MODE Regular; +> ok + +SELECT * FROM TEST; +> ID VALUE +> -- ----- +> 1 50 +> 3 50 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE T(ID INT, F BOOLEAN, "VALUE" INT); +> ok + +INSERT INTO T VALUES (1, FALSE, 10), (2, TRUE, 20); +> update count: 2 + +CREATE TABLE S(S_ID INT, S_F BOOLEAN, S_VALUE INT); +> ok + +INSERT INTO S VALUES (1, FALSE, 100), (2, TRUE, 200), (3, FALSE, 300), (4, TRUE, 400); +> update count: 4 + +MERGE INTO T USING S ON ID = S_ID + WHEN MATCHED AND F THEN UPDATE SET "VALUE" = S_VALUE + WHEN MATCHED AND NOT F THEN DELETE + WHEN NOT MATCHED AND S_F THEN INSERT VALUES (S_ID, S_F, S_VALUE); +> update count: 3 + +SELECT * FROM T; +> ID F VALUE +> -- ---- ----- +> 2 TRUE 200 +> 4 TRUE 400 +> rows: 2 + +DROP TABLE T, S; +> ok + +CREATE TABLE T(ID INT, A INT, B INT) AS VALUES (1, 1, 1), (2, 1, 2); +> ok + +CREATE TABLE S(ID INT, A INT, B INT) AS VALUES (1, 1, 3), (2, 1, 4); +> ok + +MERGE INTO T USING S ON T.A = S.A WHEN MATCHED THEN UPDATE SET B = S.B; +> exception DUPLICATE_KEY_1 + +CREATE TABLE S2(ID INT, A INT, B INT) AS VALUES (3, 3, 3); +> ok + +MERGE INTO T USING (SELECT * FROM S UNION SELECT * FROM S2) S ON T.ID = S.ID + WHEN MATCHED THEN UPDATE SET A = S.A, B = S.B + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B); +> update count: 3 + +TABLE T; +> ID A B +> -- - - +> 1 1 3 +> 2 1 4 +> 3 3 3 +> rows: 3 + +MERGE INTO T USING (S) ON T.ID = S.ID + WHEN MATCHED THEN UPDATE SET B = S.B + 1; +> update count: 2 + +TABLE T; +> ID A B +> -- - - +> 1 1 4 +> 2 1 5 +> 3 3 3 +> rows: 3 + +DROP TABLE T, S, S2 CASCADE; +> ok + +CREATE TABLE TEST(ID INT, V INT); +> ok + +MERGE INTO TEST USING VALUES (1, 2) S ON TEST.ID = S.C1 WHEN NOT MATCHED THEN INSERT VALUES (1, 2), (3, 4); +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE T(A INT); +> ok + +MERGE INTO T USING (SELECT 1 A) S ON (TRUE) +WHEN NOT MATCHED AND S.X THEN INSERT VALUES (1); +> exception COLUMN_NOT_FOUND_1 + +DROP TABLE T; +> ok + +CREATE TABLE A(ID INT, V INT) AS VALUES (1, 1), (2, 2); +> ok + +CREATE TABLE B(ID INT, V INT) AS VALUES (2, 4), (3, 6); +> ok + +MERGE INTO A USING (SELECT * FROM B) S + ON A.ID = S.ID + WHEN MATCHED THEN UPDATE SET V = S.V; +> update count: 1 + +TABLE A; +> ID V +> -- - +> 1 1 +> 2 4 +> rows: 2 + +DROP TABLE A, B; +> ok + +CREATE TABLE TARGET(ID INT, V INT); +> ok + +MERGE INTO TARGET T USING (VALUES (1, 2)) S(ID, V) + ON T.ID = S.ID + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.V); +> update count: 1 + +CREATE TABLE SOURCE(ID INT, V INT) AS VALUES (3, 4); +> ok + +MERGE INTO TARGET T USING SOURCE S(ID, V) + ON T.ID = S.ID + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.V); +> update count: 1 + +TABLE TARGET; +> ID V +> -- - +> 1 2 +> 3 4 +> rows: 2 + +DROP TABLE SOURCE, TARGET; +> ok + +CREATE TABLE T(ID INT, V INT) AS VALUES (1, 1), (2, 2); +> ok + +MERGE INTO T USING (SELECT 1) ON (TRUE) + WHEN MATCHED THEN UPDATE SET V = 2 + WHEN MATCHED AND ID = 2 THEN UPDATE SET V = 3; +> update count: 2 + +TABLE T; +> ID V +> -- - +> 1 2 +> 2 2 +> rows: 2 + +TRUNCATE TABLE T; +> update count: 2 + +INSERT INTO T VALUES (1, 1); +> update count: 1 + +MERGE INTO T USING (SELECT 1) ON (ID = 1) + WHEN MATCHED THEN UPDATE SET V = 2 + WHEN MATCHED THEN UPDATE SET V = 3; +> update count: 1 + +TABLE T; +> ID V +> -- - +> 1 2 +> rows: 1 + +SELECT * FROM FINAL TABLE (MERGE INTO T USING (SELECT 1) ON (ID = 1) + WHEN MATCHED THEN UPDATE SET V = 4 + WHEN MATCHED THEN UPDATE SET V = 5); +> ID V +> -- - +> 1 4 +> rows: 1 + +EXPLAIN MERGE INTO T USING (VALUES (1, 2)) S(ID, V) ON T.ID = S.ID + WHEN NOT MATCHED AND T.ID = 1 THEN INSERT VALUES (S.ID, S.V) + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.V + 1) + WHEN MATCHED AND T.ID = 2 THEN UPDATE SET V = S.ID + 2 + WHEN MATCHED THEN UPDATE SET V = S.ID + 3; +>> MERGE INTO "PUBLIC"."T" /* PUBLIC.T.tableScan */ USING (VALUES (1, 2)) "S"("ID", "V") /* table scan */ WHEN NOT MATCHED AND "T"."ID" = 1 THEN INSERT ("ID", "V") VALUES ("S"."ID", "S"."V") WHEN NOT MATCHED THEN INSERT ("ID", "V") VALUES ("S"."ID", "S"."V" + 1) WHEN MATCHED AND "T"."ID" = 2 THEN UPDATE SET "V" = "S"."ID" + 2 WHEN MATCHED THEN UPDATE SET "V" = "S"."ID" + 3 + +EXPLAIN MERGE INTO T USING (VALUES (1, 2)) S(ID, V) ON T.ID = S.ID + WHEN MATCHED AND T.ID = 1 THEN DELETE + WHEN MATCHED THEN DELETE; +>> MERGE INTO "PUBLIC"."T" /* PUBLIC.T.tableScan */ USING (VALUES (1, 2)) "S"("ID", "V") /* table scan */ WHEN MATCHED AND "T"."ID" = 1 THEN DELETE WHEN MATCHED THEN DELETE + +DROP TABLE T; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +MERGE INTO TEST USING (VALUES (10, 20)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT VALUES(SOURCE.ID, SOURCE.V); +> update count: 1 + +MERGE INTO TEST USING (VALUES (20, 30)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT OVERRIDING USER VALUE VALUES(SOURCE.ID, SOURCE.V); +> update count: 1 + +MERGE INTO TEST USING (VALUES (30, 40)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT OVERRIDING SYSTEM VALUE VALUES(SOURCE.ID, SOURCE.V); +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 1 30 +> 10 20 +> 30 40 +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY, V INT); +> ok + +MERGE INTO TEST USING (VALUES (10, 20)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT VALUES(SOURCE.ID, SOURCE.V); +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +MERGE INTO TEST USING (VALUES (20, 30)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT OVERRIDING USER VALUE VALUES(SOURCE.ID, SOURCE.V); +> update count: 1 + +MERGE INTO TEST USING (VALUES (30, 40)) SOURCE(ID, V) ON TEST.ID = SOURCE.ID + WHEN NOT MATCHED THEN INSERT OVERRIDING SYSTEM VALUE VALUES(SOURCE.ID, SOURCE.V); +> update count: 1 + +TABLE TEST; +> ID V +> -- -- +> 1 30 +> 30 40 +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/replace.sql b/h2/src/test/org/h2/test/scripts/dml/replace.sql new file mode 100644 index 0000000000..cad90d682b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/replace.sql @@ -0,0 +1,53 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SET MODE MySQL; +> ok + +CREATE TABLE TABLE_WORD ( + WORD_ID int(11) NOT NULL AUTO_INCREMENT, + WORD varchar(128) NOT NULL, + PRIMARY KEY (WORD_ID) +); +> ok + +REPLACE INTO TABLE_WORD(WORD) VALUES ('aaaaaaaaaa'); +> update count: 1 + +REPLACE INTO TABLE_WORD(WORD) VALUES ('bbbbbbbbbb'); +> update count: 1 + +REPLACE INTO TABLE_WORD(WORD_ID, WORD) VALUES (3, 'cccccccccc'); +> update count: 1 + +SELECT WORD FROM TABLE_WORD where WORD_ID = 1; +>> aaaaaaaaaa + +REPLACE INTO TABLE_WORD(WORD_ID, WORD) VALUES (1, 'REPLACED'); +> update count: 2 + +SELECT WORD FROM TABLE_WORD where WORD_ID = 1; +>> REPLACED + +REPLACE INTO TABLE_WORD(WORD) SELECT 'dddddddddd'; +> update count: 1 + +SELECT WORD FROM TABLE_WORD where WORD_ID = 4; +>> dddddddddd + +REPLACE INTO TABLE_WORD(WORD_ID, WORD) SELECT 1, 'REPLACED2'; +> update count: 2 + +SELECT WORD FROM TABLE_WORD where WORD_ID = 1; +>> REPLACED2 + +SET MODE Regular; +> ok + +REPLACE INTO TABLE_WORD(WORD) VALUES ('aaaaaaaaaa'); +> exception SYNTAX_ERROR_2 + +DROP TABLE TABLE_WORD; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/script.sql b/h2/src/test/org/h2/test/scripts/dml/script.sql new file mode 100644 index 0000000000..b0289136d9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/script.sql @@ -0,0 +1,142 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create memory table test(id int primary key, name varchar(255)); +> ok + +INSERT INTO TEST VALUES(2, STRINGDECODE('abcsond\344rzeich\344 ') || char(22222) || STRINGDECODE(' \366\344\374\326\304\334\351\350\340\361!')); +> update count: 1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255) ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (2, U&'abcsond\00e4rzeich\00e4 \56ce \00f6\00e4\00fc\00d6\00c4\00dc\00e9\00e8\00e0\00f1!'); +> rows (ordered): 5 + +SCRIPT COLUMNS NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> -------------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255) ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST"("ID", "NAME") VALUES (2, U&'abcsond\00e4rzeich\00e4 \56ce \00f6\00e4\00fc\00d6\00c4\00dc\00e9\00e8\00e0\00f1!'); +> rows (ordered): 5 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY, V INT, G INT GENERATED ALWAYS AS (V + 1)); +> ok + +INSERT INTO TEST(V) VALUES 5; +> update count: 1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" BIGINT GENERATED ALWAYS AS IDENTITY(START WITH 1 RESTART WITH 2) NOT NULL, "V" INTEGER, "G" INTEGER GENERATED ALWAYS AS ("V" + 1) ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST"("ID", "V") OVERRIDING SYSTEM VALUE VALUES (1, 5); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok + +CREATE DOMAIN C AS INT; +> ok + +CREATE DOMAIN B AS C; +> ok + +CREATE DOMAIN A AS B; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE DOMAIN "PUBLIC"."C" AS INTEGER; +> CREATE DOMAIN "PUBLIC"."B" AS "PUBLIC"."C"; +> CREATE DOMAIN "PUBLIC"."A" AS "PUBLIC"."B"; +> rows (ordered): 4 + +DROP DOMAIN A; +> ok + +DROP DOMAIN B; +> ok + +DROP DOMAIN C; +> ok + +CREATE DOMAIN A AS INT; +> ok + +CREATE DOMAIN B AS A; +> ok + +CREATE DOMAIN X AS INT; +> ok + +CREATE DOMAIN Y AS X; +> ok + +CREATE DOMAIN Z AS Y; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE DOMAIN "PUBLIC"."A" AS INTEGER; +> CREATE DOMAIN "PUBLIC"."X" AS INTEGER; +> CREATE DOMAIN "PUBLIC"."B" AS "PUBLIC"."A"; +> CREATE DOMAIN "PUBLIC"."Y" AS "PUBLIC"."X"; +> CREATE DOMAIN "PUBLIC"."Z" AS "PUBLIC"."Y"; +> rows (ordered): 6 + +DROP ALL OBJECTS; +> ok + +CREATE SCHEMA S1; +> ok + +CREATE SCHEMA S2; +> ok + +CREATE SCHEMA S3; +> ok + +CREATE DOMAIN S1.D1 AS INTEGER; +> ok + +CREATE DOMAIN S2.D2 AS S1.D1; +> ok + +CREATE DOMAIN S3.D3 AS S2.D2; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION SCHEMA S3; +> SCRIPT +> ---------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE SCHEMA IF NOT EXISTS "S3" AUTHORIZATION "SA"; +> CREATE DOMAIN "S3"."D3" AS "S2"."D2"; +> rows (ordered): 3 + +DROP SCHEMA S3 CASCADE; +> ok + +DROP SCHEMA S2 CASCADE; +> ok + +DROP SCHEMA S1 CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/show.sql b/h2/src/test/org/h2/test/scripts/dml/show.sql new file mode 100644 index 0000000000..a6c2c13ef3 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/show.sql @@ -0,0 +1,113 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +------------------------------ +-- PostgreSQL compatibility -- +------------------------------ + +SHOW CLIENT_ENCODING; +> CLIENT_ENCODING +> --------------- +> UNICODE +> rows: 1 + +SHOW DEFAULT_TRANSACTION_ISOLATION; +> DEFAULT_TRANSACTION_ISOLATION +> ----------------------------- +> read committed +> rows: 1 + +SHOW TRANSACTION ISOLATION LEVEL; +> TRANSACTION_ISOLATION +> --------------------- +> read committed +> rows: 1 + +SHOW DATESTYLE; +> DATESTYLE +> --------- +> ISO +> rows: 1 + +SHOW SERVER_VERSION; +> SERVER_VERSION +> -------------- +> 8.2.23 +> rows: 1 + +SHOW SERVER_ENCODING; +> SERVER_ENCODING +> --------------- +> UTF8 +> rows: 1 + +------------------------- +-- MySQL compatibility -- +------------------------- + +CREATE TABLE TEST_P(ID_P INT PRIMARY KEY, U_P VARCHAR(255) UNIQUE, N_P INT DEFAULT 1); +> ok + +CREATE SCHEMA SCH; +> ok + +CREATE TABLE SCH.TEST_S(ID_S INT PRIMARY KEY, U_S VARCHAR(255) UNIQUE, N_S INT DEFAULT 1); +> ok + +SHOW TABLES; +> TABLE_NAME TABLE_SCHEMA +> ---------- ------------ +> TEST_P PUBLIC +> rows (ordered): 1 + +SHOW TABLES FROM PUBLIC; +> TABLE_NAME TABLE_SCHEMA +> ---------- ------------ +> TEST_P PUBLIC +> rows (ordered): 1 + +SHOW TABLES FROM SCH; +> TABLE_NAME TABLE_SCHEMA +> ---------- ------------ +> TEST_S SCH +> rows (ordered): 1 + +SHOW COLUMNS FROM TEST_P; +> FIELD TYPE NULL KEY DEFAULT +> ----- ---------------------- ---- --- ------- +> ID_P INTEGER NO PRI NULL +> U_P CHARACTER VARYING(255) YES UNI NULL +> N_P INTEGER YES 1 +> rows (ordered): 3 + +SHOW COLUMNS FROM TEST_S FROM SCH; +> FIELD TYPE NULL KEY DEFAULT +> ----- ---------------------- ---- --- ------- +> ID_S INTEGER NO PRI NULL +> U_S CHARACTER VARYING(255) YES UNI NULL +> N_S INTEGER YES 1 +> rows (ordered): 3 + +SHOW DATABASES; +> SCHEMA_NAME +> ------------------ +> INFORMATION_SCHEMA +> PUBLIC +> SCH +> rows: 3 + +SHOW SCHEMAS; +> SCHEMA_NAME +> ------------------ +> INFORMATION_SCHEMA +> PUBLIC +> SCH +> rows: 3 + +DROP TABLE TEST_P; +> ok + +DROP SCHEMA SCH CASCADE; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/update.sql b/h2/src/test/org/h2/test/scripts/dml/update.sql new file mode 100644 index 0000000000..7f67503625 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/update.sql @@ -0,0 +1,345 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT, B INT); +> ok + +INSERT INTO TEST VALUES (1, 2); +> update count: 1 + +UPDATE TEST SET (A, B) = (3, 4); +> update count: 1 + +SELECT * FROM TEST; +> A B +> - - +> 3 4 +> rows: 1 + +UPDATE TEST SET (B) = 5; +> update count: 1 + +SELECT B FROM TEST; +>> 5 + +UPDATE TEST SET (B) = ROW (6); +> update count: 1 + +SELECT B FROM TEST; +>> 6 + +UPDATE TEST SET (B) = (7); +> update count: 1 + +SELECT B FROM TEST; +>> 7 + +UPDATE TEST SET (B) = (2, 3); +> exception COLUMN_COUNT_DOES_NOT_MATCH + +-- TODO +-- UPDATE TEST SET (A, B) = ARRAY[3, 4]; +-- > exception COLUMN_COUNT_DOES_NOT_MATCH + +EXPLAIN UPDATE TEST SET (A) = ROW(3), B = 4; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "A" = 3, "B" = 4 + +EXPLAIN UPDATE TEST SET A = 3, (B) = 4; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "A" = 3, "B" = 4 + +UPDATE TEST SET (A, B) = (1, 2), (B, A) = (2, 1); +> exception DUPLICATE_COLUMN_NAME_1 + +UPDATE TEST SET (A) = A * 3; +> update count: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT) AS VALUES 100; +> ok + +-- _ROWID_ modifications are not allowed +UPDATE TEST SET _ROWID_ = 2 WHERE ID = 100; +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1)); +> ok + +INSERT INTO TEST(A) VALUES 1; +> update count: 1 + +UPDATE TEST SET A = 2, B = DEFAULT; +> update count: 1 + +TABLE TEST; +> A B +> - - +> 2 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1)); +> ok + +INSERT INTO TEST(A) VALUES 1; +> update count: 1 + +UPDATE TEST SET B = 1; +> exception GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 + +UPDATE TEST SET B = DEFAULT; +> update count: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, A INT, B INT, C INT, D INT, E INT, F INT) AS VALUES (1, 1, 1, 1, 1, 1, 1); +> ok + +EXPLAIN UPDATE TEST SET + (F, C, A) = (SELECT 2, 3, 4 FROM TEST FETCH FIRST ROW ONLY), + (B, E) = (SELECT 5, 6 FROM TEST FETCH FIRST ROW ONLY) + WHERE ID = 1; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ SET ("F", "C", "A") = (SELECT 2, 3, 4 FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST ROW ONLY), ("B", "E") = (SELECT 5, 6 FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST ROW ONLY) WHERE "ID" = 1 + +UPDATE TEST SET + (F, C, A) = (SELECT 2, 3, 4 FROM TEST FETCH FIRST ROW ONLY), + (B, E) = (SELECT 5, 6 FROM TEST FETCH FIRST ROW ONLY) + WHERE ID = 1; +> update count: 1 + +TABLE TEST; +> ID A B C D E F +> -- - - - - - - +> 1 4 5 3 1 6 2 +> rows: 1 + +UPDATE TEST SET (C, C) = (SELECT 1, 2 FROM TEST); +> exception DUPLICATE_COLUMN_NAME_1 + +UPDATE TEST SET (A, B) = (SELECT 1, 2, 3 FROM TEST); +> exception COLUMN_COUNT_DOES_NOT_MATCH + +UPDATE TEST SET (D, E) = NULL; +> exception DATA_CONVERSION_ERROR_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT GENERATED ALWAYS AS IDENTITY, ID2 BIGINT GENERATED ALWAYS AS (ID + 1), + V INT, U INT ON UPDATE (5)); +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +TABLE TEST; +> ID ID2 V U +> -- --- - ---- +> 1 2 1 null +> rows: 1 + +UPDATE TEST SET V = V + 1; +> update count: 1 + +UPDATE TEST SET V = V + 1, ID = DEFAULT, ID2 = DEFAULT; +> update count: 1 + +TABLE TEST; +> ID ID2 V U +> -- --- - - +> 1 2 3 5 +> rows: 1 + +MERGE INTO TEST USING (VALUES 1) T(X) ON TRUE WHEN MATCHED THEN UPDATE SET V = V + 1; +> update count: 1 + +MERGE INTO TEST USING (VALUES 1) T(X) ON TRUE WHEN MATCHED THEN UPDATE SET V = V + 1, ID = DEFAULT, ID2 = DEFAULT; +> update count: 1 + +TABLE TEST; +> ID ID2 V U +> -- --- - - +> 1 2 5 5 +> rows: 1 + +MERGE INTO TEST KEY(V) VALUES (DEFAULT, DEFAULT, 5, 1); +> update count: 1 + +TABLE TEST; +> ID ID2 V U +> -- --- - - +> 1 2 5 1 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE DOMAIN D AS BIGINT DEFAULT 100 ON UPDATE 200; +> ok + +CREATE TABLE TEST(ID D GENERATED BY DEFAULT AS IDENTITY, V INT, G D GENERATED ALWAYS AS (V + 1)); +> ok + +INSERT INTO TEST(V) VALUES 1; +> update count: 1 + +TABLE TEST; +> ID V G +> -- - - +> 1 1 2 +> rows: 1 + +UPDATE TEST SET V = 2; +> update count: 1 + +TABLE TEST; +> ID V G +> -- - - +> 1 2 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +DROP DOMAIN D; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT) AS VALUES (0, 0, 1), (0, 0, 3); +> ok + +CREATE TABLE S1(A INT, B INT) AS VALUES (1, 2); +> ok + +CREATE TABLE S2(A INT, B INT) AS VALUES (3, 4); +> ok + +UPDATE TEST SET (A, B) = (SELECT * FROM S1 WHERE C = A UNION SELECT * FROM S2 WHERE C = A); +> update count: 2 + +TABLE TEST; +> A B C +> - - - +> 1 2 1 +> 3 4 3 +> rows: 2 + +DROP TABLE TEST, S1, S2; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT) AS SELECT X, X FROM SYSTEM_RANGE(1, 13); +> ok + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH FIRST ROW ONLY; +> update count: 1 + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH FIRST ROWS ONLY; +> update count: 1 + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH NEXT ROW ONLY; +> update count: 1 + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH NEXT ROWS ONLY; +> update count: 1 + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH FIRST 2 ROW ONLY; +> update count: 2 + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH FIRST 2 ROWS ONLY; +> update count: 2 + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH NEXT 2 ROW ONLY; +> update count: 2 + +UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH NEXT 2 ROWS ONLY; +> update count: 2 + +EXPLAIN UPDATE TEST SET V = V + 1 WHERE ID <= 12 FETCH FIRST 2 ROWS ONLY; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID <= 12 */ SET "V" = "V" + 1 WHERE "ID" <= 12 FETCH FIRST 2 ROWS ONLY + +EXPLAIN UPDATE TEST SET V = V + 1 FETCH FIRST 1 ROW ONLY; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "V" = "V" + 1 FETCH FIRST ROW ONLY + +EXPLAIN UPDATE TEST SET V = V + 1; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "V" = "V" + 1 + +SELECT SUM(V) FROM TEST; +>> 103 + +UPDATE TEST SET V = V + 1 FETCH FIRST 100 ROWS ONLY; +> update count: 13 + +SELECT SUM(V) FROM TEST; +>> 116 + +-- legacy syntax +EXPLAIN UPDATE TEST SET V = V + 1 LIMIT 2; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "V" = "V" + 1 FETCH FIRST 2 ROWS ONLY + +UPDATE TEST SET V = V + 1 LIMIT 2; +> update count: 2 + +SELECT SUM(V) FROM TEST; +>> 118 + +DROP TABLE TEST; +> ok + +CREATE TABLE FOO (ID INT, VAL VARCHAR) AS VALUES(1, 'foo1'), (2, 'foo2'), (3, 'foo3'); +> ok + +CREATE TABLE BAR (ID INT, VAL VARCHAR) AS VALUES(1, 'bar1'), (3, 'bar3'), (4, 'bar4'); +> ok + +SET MODE PostgreSQL; +> ok + +UPDATE FOO SET VAL = BAR.VAL FROM BAR WHERE FOO.ID = BAR.ID; +> update count: 2 + +TABLE FOO; +> ID VAL +> -- ---- +> 1 bar1 +> 2 foo2 +> 3 bar3 +> rows: 3 + +UPDATE FOO SET BAR.VAL = FOO.VAL FROM BAR WHERE FOO.ID = BAR.ID; +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +SET MODE Regular; +> ok + +CREATE TABLE DEST(ID INT, X INT, Y INT); +> ok + +INSERT INTO DEST VALUES (1, 10, 11), (2, 20, 21); +> update count: 2 + +CREATE TABLE SRC(ID INT, X INT, Y INT); +> ok + +INSERT INTO SRC VALUES (1, 100, 101); +> update count: 1 + +UPDATE DEST SET (X, Y) = (SELECT X, Y FROM SRC WHERE SRC.ID = DEST.ID); +> update count: 2 + +TABLE DEST; +> ID X Y +> -- ---- ---- +> 1 100 101 +> 2 null null +> rows: 2 + +DROP TABLE SRC, DEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/dml/with.sql b/h2/src/test/org/h2/test/scripts/dml/with.sql new file mode 100644 index 0000000000..758127e770 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dml/with.sql @@ -0,0 +1,245 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create table folder(id int primary key, name varchar(255), parent int); +> ok + +insert into folder values(1, null, null), (2, 'bin', 1), (3, 'docs', 1), (4, 'html', 3), (5, 'javadoc', 3), (6, 'ext', 1), (7, 'service', 1), (8, 'src', 1), (9, 'docsrc', 8), (10, 'installer', 8), (11, 'main', 8), (12, 'META-INF', 11), (13, 'org', 11), (14, 'h2', 13), (15, 'test', 8), (16, 'tools', 8); +> update count: 16 + +with link(id, name, level) as (select id, name, 0 from folder where parent is null union all select folder.id, ifnull(link.name || '/', '') || folder.name, level + 1 from link inner join folder on link.id = folder.parent) select name from link where name is not null order by cast(id as int); +> NAME +> ----------------- +> bin +> docs +> docs/html +> docs/javadoc +> ext +> service +> src +> src/docsrc +> src/installer +> src/main +> src/main/META-INF +> src/main/org +> src/main/org/h2 +> src/test +> src/tools +> rows (ordered): 15 + +drop table folder; +> ok + +explain with recursive r(n) as ( + (select 1) union all (select n+1 from r where n < 3) +) +select n from r; +>> WITH RECURSIVE "PUBLIC"."R"("N") AS ( (SELECT 1) UNION ALL (SELECT "N" + 1 FROM "PUBLIC"."R" /* PUBLIC.R.tableScan */ WHERE "N" < 3) ) SELECT "N" FROM "PUBLIC"."R" "R" /* null */ + +explain with recursive "r"(n) as ( + (select 1) union all (select n+1 from "r" where n < 3) +) +select n from "r"; +>> WITH RECURSIVE "PUBLIC"."r"("N") AS ( (SELECT 1) UNION ALL (SELECT "N" + 1 FROM "PUBLIC"."r" /* PUBLIC.r.tableScan */ WHERE "N" < 3) ) SELECT "N" FROM "PUBLIC"."r" "r" /* null */ + +select sum(n) from ( + with recursive r(n) as ( + (select 1) union all (select n+1 from r where n < 3) + ) + select n from r +); +>> 6 + +select sum(n) from ( + with recursive "r"(n) as ( + (select 1) union all (select n+1 from "r" where n < 3) + ) + select n from "r" +); +>> 6 + +select sum(n) from (select 0) join ( + with recursive r(n) as ( + (select 1) union all (select n+1 from r where n < 3) + ) + select n from r +) on 1=1; +>> 6 + +select 0 from ( + select 0 where 0 in ( + with recursive r(n) as ( + (select 1) union all (select n+1 from r where n < 3) + ) + select n from r + ) +); +> 0 +> - +> rows: 0 + +with + r0(n,k) as (select -1, 0), + r1(n,k) as ((select 1, 0) union all (select n+1,k+1 from r1 where n <= 3)), + r2(n,k) as ((select 10,0) union all (select n+1,k+1 from r2 where n <= 13)) + select r1.k, r0.n as N0, r1.n AS N1, r2.n AS n2 from r0 inner join r1 ON r1.k= r0.k inner join r2 ON r1.k= r2.k; +> K N0 N1 N2 +> - -- -- -- +> 0 -1 1 10 +> rows: 1 + +CREATE SCHEMA SCH; +> ok + +CREATE FORCE VIEW TABLE_EXPRESSION SCH.R1(N) AS +(SELECT 1) +UNION ALL +(SELECT (N + 1) FROM SCH.R1 WHERE N < 3); +> ok + +CREATE VIEW SCH.R2(N) AS +(SELECT 1) +UNION ALL +(SELECT (N + 1) FROM SCH.R1 WHERE N < 3); +> ok + +SELECT * FROM SCH.R2; +> N +> - +> 1 +> 2 +> 3 +> rows: 3 + +WITH CTE_TEST AS (SELECT 1, 2) SELECT * FROM CTE_TEST; +> 1 2 +> - - +> 1 2 +> rows: 1 + +WITH CTE_TEST AS (SELECT 1, 2) (SELECT * FROM CTE_TEST); +> 1 2 +> - - +> 1 2 +> rows: 1 + +WITH CTE_TEST AS (SELECT 1, 2) ((SELECT * FROM CTE_TEST)); +> 1 2 +> - - +> 1 2 +> rows: 1 + +CREATE TABLE TEST(A INT, B INT) AS SELECT 1, 2; +> ok + +WITH CTE_TEST AS (TABLE TEST) ((SELECT * FROM CTE_TEST)); +> A B +> - - +> 1 2 +> rows: 1 + +WITH CTE_TEST AS (TABLE TEST) ((TABLE CTE_TEST)); +> A B +> - - +> 1 2 +> rows: 1 + +WITH CTE_TEST AS (VALUES (1, 2)) ((SELECT * FROM CTE_TEST)); +> C1 C2 +> -- -- +> 1 2 +> rows: 1 + +WITH CTE_TEST AS (TABLE TEST) ((SELECT A, B FROM CTE_TEST2)); +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +WITH CTE_TEST AS (TABLE TEST) ((SELECT A, B, C FROM CTE_TEST)); +> exception COLUMN_NOT_FOUND_1 + +DROP TABLE TEST; +> ok + +WITH RECURSIVE V(V1, V2) AS ( + SELECT 0 V1, 1 V2 + UNION ALL + SELECT V1 + 1, V2 + 1 FROM V WHERE V2 < 4 +) +SELECT V1, V2, COUNT(*) FROM V +LEFT JOIN (SELECT T1 / T2 R FROM (VALUES (10, 0)) T(T1, T2) WHERE T2*T2*T2*T2*T2*T2 <> 0) X ON X.R > V.V1 AND X.R < V.V2 +GROUP BY V1, V2; +> V1 V2 COUNT(*) +> -- -- -------- +> 0 1 1 +> 1 2 1 +> 2 3 1 +> 3 4 1 +> rows: 4 + +EXPLAIN WITH RECURSIVE V(V1, V2) AS ( + SELECT 0 V1, 1 V2 + UNION ALL + SELECT V1 + 1, V2 + 1 FROM V WHERE V2 < 10 +) +SELECT V1, V2, COUNT(*) FROM V +LEFT JOIN (SELECT T1 / T2 R FROM (VALUES (10, 0)) T(T1, T2) WHERE T2*T2*T2*T2*T2*T2 <> 0) X ON X.R > V.V1 AND X.R < V.V2 +GROUP BY V1, V2; +>> WITH RECURSIVE "PUBLIC"."V"("V1", "V2") AS ( (SELECT 0 AS "V1", 1 AS "V2") UNION ALL (SELECT "V1" + 1, "V2" + 1 FROM "PUBLIC"."V" /* PUBLIC.V.tableScan */ WHERE "V2" < 10) ) SELECT "V1", "V2", COUNT(*) FROM "PUBLIC"."V" "V" /* null */ LEFT OUTER JOIN ( SELECT "T1" / "T2" AS "R" FROM (VALUES (10, 0)) "T"("T1", "T2") WHERE ((((("T2" * "T2") * "T2") * "T2") * "T2") * "T2") <> 0 ) "X" /* SELECT T1 / T2 AS R FROM (VALUES (10, 0)) T(T1, T2) /* table scan */ WHERE ((((((T2 * T2) * T2) * T2) * T2) * T2) <> 0) _LOCAL_AND_GLOBAL_ (((T1 / T2) >= ?1) AND ((T1 / T2) <= ?2)): R > V.V1 AND R < V.V2 */ ON ("X"."R" > "V"."V1") AND ("X"."R" < "V"."V2") GROUP BY "V1", "V2" + +-- Data change delta tables in WITH +CREATE TABLE TEST("VALUE" INT NOT NULL PRIMARY KEY); +> ok + +WITH W AS (SELECT NULL FROM FINAL TABLE (INSERT INTO TEST VALUES 1, 2)) +SELECT COUNT (*) FROM W; +>> 2 + +WITH W AS (SELECT NULL FROM FINAL TABLE (UPDATE TEST SET "VALUE" = 3 WHERE "VALUE" = 2)) +SELECT COUNT (*) FROM W; +>> 1 + +WITH W AS (SELECT NULL FROM FINAL TABLE (MERGE INTO TEST VALUES 4, 5)) +SELECT COUNT (*) FROM W; +>> 2 + +WITH W AS (SELECT NULL FROM OLD TABLE (DELETE FROM TEST WHERE "VALUE" = 4)) +SELECT COUNT (*) FROM W; +>> 1 + +SET MODE MySQL; +> ok + +WITH W AS (SELECT NULL FROM FINAL TABLE (REPLACE INTO TEST VALUES 4, 5)) +SELECT COUNT (*) FROM W; +>> 2 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +CREATE TABLE T(C INT); +> ok + +INSERT INTO T WITH W(C) AS (VALUES 1) SELECT C FROM W; +> update count: 1 + +TABLE W; +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +TABLE T; +>> 1 + +DROP TABLE T; +> ok + +WITH T(X) AS (SELECT 1) +(SELECT 2 Y) UNION (SELECT 3 Z) UNION (SELECT * FROM T); +> Y +> - +> 1 +> 2 +> 3 +> rows: 3 diff --git a/h2/src/test/org/h2/test/scripts/dual.sql b/h2/src/test/org/h2/test/scripts/dual.sql new file mode 100644 index 0000000000..9df679a474 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/dual.sql @@ -0,0 +1,58 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT * FROM DUAL; +> +> +> +> rows: 1 + +CREATE TABLE DUAL(A INT); +> ok + +INSERT INTO DUAL VALUES (2); +> update count: 1 + +SELECT A FROM DUAL; +>> 2 + +SELECT * FROM SYS.DUAL; +> +> +> +> rows: 1 + +DROP TABLE DUAL; +> ok + +SET MODE DB2; +> ok + +SELECT * FROM SYSDUMMY1; +> +> +> +> rows: 1 + +CREATE TABLE SYSDUMMY1(A INT); +> ok + +INSERT INTO SYSDUMMY1 VALUES (2); +> update count: 1 + +SELECT A FROM SYSDUMMY1; +>> 2 + +SELECT * FROM SYSIBM.SYSDUMMY1; +> +> +> +> rows: 1 + +DROP TABLE SYSDUMMY1; +> ok + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/any.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/any.sql new file mode 100644 index 0000000000..41b27d5731 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/any.sql @@ -0,0 +1,33 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT, B INT); +> ok + +INSERT INTO TEST VALUES (1, 1), (1, 3), (2, 1), (2, 5), (3, 4); +> update count: 5 + +SELECT A, ANY(B < 2), SOME(B > 3), BOOL_OR(B = 1), ANY(B = 1) FILTER (WHERE A = 1) FROM TEST GROUP BY A; +> A ANY(B < 2) ANY(B > 3) ANY(B = 1) ANY(B = 1) FILTER (WHERE A = 1) +> - ---------- ---------- ---------- ------------------------------- +> 1 TRUE FALSE TRUE TRUE +> 2 TRUE TRUE TRUE null +> 3 FALSE TRUE FALSE null +> rows: 3 + +DROP TABLE TEST; +> ok + +SELECT TRUE = (ANY((SELECT X > 0 FROM SYSTEM_RANGE(1, 1)))); +> TRUE = (ANY((SELECT X > 0 FROM SYSTEM_RANGE(1, 1)))) +> ---------------------------------------------------- +> TRUE +> rows: 1 + +SELECT TRUE = (ANY((SELECT X < 0 FROM SYSTEM_RANGE(1, 1)))); +> TRUE = (ANY((SELECT X < 0 FROM SYSTEM_RANGE(1, 1)))) +> ---------------------------------------------------- +> FALSE +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/array_agg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/array_agg.sql new file mode 100644 index 0000000000..ab39ce4b3e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/array_agg.sql @@ -0,0 +1,678 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: Alex Nordlund +-- + +-- with filter condition + +create table test(v varchar); +> ok + +insert into test values ('1'), ('2'), ('3'), ('4'), ('5'), ('6'), ('7'), ('8'), ('9'); +> update count: 9 + +select array_agg(v order by v asc), + array_agg(v order by v desc) filter (where v >= '4') + from test where v >= '2'; +> ARRAY_AGG(V ORDER BY V) ARRAY_AGG(V ORDER BY V DESC) FILTER (WHERE V >= '4') +> ------------------------ ---------------------------------------------------- +> [2, 3, 4, 5, 6, 7, 8, 9] [9, 8, 7, 6, 5, 4] +> rows: 1 + +create index test_idx on test(v); +> ok + +select ARRAY_AGG(v order by v asc), + ARRAY_AGG(v order by v desc) filter (where v >= '4') + from test where v >= '2'; +> ARRAY_AGG(V ORDER BY V) ARRAY_AGG(V ORDER BY V DESC) FILTER (WHERE V >= '4') +> ------------------------ ---------------------------------------------------- +> [2, 3, 4, 5, 6, 7, 8, 9] [9, 8, 7, 6, 5, 4] +> rows: 1 + +select ARRAY_AGG(v order by v asc), + ARRAY_AGG(v order by v desc) filter (where v >= '4') + from test; +> ARRAY_AGG(V ORDER BY V) ARRAY_AGG(V ORDER BY V DESC) FILTER (WHERE V >= '4') +> --------------------------- ---------------------------------------------------- +> [1, 2, 3, 4, 5, 6, 7, 8, 9] [9, 8, 7, 6, 5, 4] +> rows: 1 + +drop table test; +> ok + +create table test (id int auto_increment primary key, v int); +> ok + +insert into test(v) values (7), (2), (8), (3), (7), (3), (9), (-1); +> update count: 8 + +select array_agg(v) from test; +> ARRAY_AGG(V) +> ------------------------- +> [7, 2, 8, 3, 7, 3, 9, -1] +> rows: 1 + +select array_agg(distinct v) from test; +> ARRAY_AGG(DISTINCT V) +> --------------------- +> [-1, 2, 3, 7, 8, 9] +> rows: 1 + +select array_agg(distinct v order by v desc) from test; +> ARRAY_AGG(DISTINCT V ORDER BY V DESC) +> ------------------------------------- +> [9, 8, 7, 3, 2, -1] +> rows: 1 + +drop table test; +> ok + +CREATE TABLE TEST (ID INT PRIMARY KEY, NAME VARCHAR); +> ok + +INSERT INTO TEST VALUES (1, 'a'), (2, 'a'), (3, 'b'), (4, 'c'), (5, 'c'), (6, 'c'); +> update count: 6 + +SELECT ARRAY_AGG(ID), NAME FROM TEST; +> exception MUST_GROUP_BY_COLUMN_1 + +SELECT ARRAY_AGG(ID ORDER BY ID), NAME FROM TEST GROUP BY NAME; +> ARRAY_AGG(ID ORDER BY ID) NAME +> ------------------------- ---- +> [1, 2] a +> [3] b +> [4, 5, 6] c +> rows: 3 + +SELECT ARRAY_AGG(ID ORDER BY ID) OVER (), NAME FROM TEST; +> ARRAY_AGG(ID ORDER BY ID) OVER () NAME +> --------------------------------- ---- +> [1, 2, 3, 4, 5, 6] a +> [1, 2, 3, 4, 5, 6] a +> [1, 2, 3, 4, 5, 6] b +> [1, 2, 3, 4, 5, 6] c +> [1, 2, 3, 4, 5, 6] c +> [1, 2, 3, 4, 5, 6] c +> rows: 6 + +SELECT ARRAY_AGG(ID ORDER BY ID) OVER (PARTITION BY NAME), NAME FROM TEST; +> ARRAY_AGG(ID ORDER BY ID) OVER (PARTITION BY NAME) NAME +> -------------------------------------------------- ---- +> [1, 2] a +> [1, 2] a +> [3] b +> [4, 5, 6] c +> [4, 5, 6] c +> [4, 5, 6] c +> rows: 6 + +SELECT + ARRAY_AGG(ID ORDER BY ID) FILTER (WHERE ID < 3 OR ID > 4) OVER (PARTITION BY NAME) A, + ARRAY_AGG(ID ORDER BY ID) FILTER (WHERE ID < 3 OR ID > 4) OVER (PARTITION BY NAME ORDER BY ID) AO, + ID, NAME FROM TEST ORDER BY ID; +> A AO ID NAME +> ------ ------ -- ---- +> [1, 2] [1] 1 a +> [1, 2] [1, 2] 2 a +> null null 3 b +> [5, 6] null 4 c +> [5, 6] [5] 5 c +> [5, 6] [5, 6] 6 c +> rows (ordered): 6 + +SELECT + ARRAY_AGG(ID ORDER BY ID) FILTER (WHERE ID < 3 OR ID > 4) + OVER (ORDER BY ID ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) A, + ID FROM TEST ORDER BY ID; +> A ID +> ------ -- +> [1, 2] 1 +> [1, 2] 2 +> [2] 3 +> [5] 4 +> [5, 6] 5 +> [5, 6] 6 +> rows (ordered): 6 + +SELECT ARRAY_AGG(SUM(ID)) OVER () FROM TEST; +> ARRAY_AGG(SUM(ID)) OVER () +> -------------------------- +> [21] +> rows: 1 + +SELECT ARRAY_AGG(ID ORDER BY ID) OVER() FROM TEST GROUP BY ID ORDER BY ID; +> ARRAY_AGG(ID ORDER BY ID) OVER () +> --------------------------------- +> [1, 2, 3, 4, 5, 6] +> [1, 2, 3, 4, 5, 6] +> [1, 2, 3, 4, 5, 6] +> [1, 2, 3, 4, 5, 6] +> [1, 2, 3, 4, 5, 6] +> [1, 2, 3, 4, 5, 6] +> rows (ordered): 6 + +SELECT ARRAY_AGG(NAME) OVER(PARTITION BY NAME) FROM TEST GROUP BY NAME; +> ARRAY_AGG(NAME) OVER (PARTITION BY NAME) +> ---------------------------------------- +> [a] +> [b] +> [c] +> rows: 3 + +SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME), NAME FROM TEST GROUP BY NAME; +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME) NAME +> ------------------------------------------------------------- ---- +> [[1, 2]] a +> [[3]] b +> [[4, 5, 6]] c +> rows: 3 + +SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME), NAME FROM TEST + WHERE ID <> 5 + GROUP BY NAME HAVING ARRAY_AGG(ID ORDER BY ID)[1] > 1 + QUALIFY ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME) <> ARRAY[ARRAY[3]]; +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME) NAME +> ------------------------------------------------------------- ---- +> [[4, 6]] c +> rows: 1 + +EXPLAIN + SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME), NAME FROM TEST + WHERE ID <> 5 + GROUP BY NAME HAVING ARRAY_AGG(ID ORDER BY ID)[1] > 1 + QUALIFY ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME) <> ARRAY[ARRAY[3]]; +>> SELECT ARRAY_AGG(ARRAY_AGG("ID" ORDER BY "ID")) OVER (PARTITION BY "NAME"), "NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "ID" <> 5 GROUP BY "NAME" HAVING ARRAY_AGG("ID" ORDER BY "ID")[1] > 1 QUALIFY ARRAY_AGG(ARRAY_AGG("ID" ORDER BY "ID")) OVER (PARTITION BY "NAME") <> ARRAY [ARRAY [3]] + +SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME), NAME FROM TEST + GROUP BY NAME ORDER BY NAME OFFSET 1 ROW; +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) OVER (PARTITION BY NAME) NAME +> ------------------------------------------------------------- ---- +> [[3]] b +> [[4, 5, 6]] c +> rows (ordered): 2 + +SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'b') OVER (PARTITION BY NAME), NAME FROM TEST + GROUP BY NAME ORDER BY NAME; +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'b') OVER (PARTITION BY NAME) NAME +> --------------------------------------------------------------------------------------- ---- +> null a +> null b +> [[4, 5, 6]] c +> rows (ordered): 3 + +SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'c') OVER (PARTITION BY NAME), NAME FROM TEST + GROUP BY NAME ORDER BY NAME; +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'c') OVER (PARTITION BY NAME) NAME +> --------------------------------------------------------------------------------------- ---- +> null a +> null b +> null c +> rows (ordered): 3 + +SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'b') OVER () FROM TEST GROUP BY NAME ORDER BY NAME; +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'b') OVER () +> ---------------------------------------------------------------------- +> [[4, 5, 6]] +> [[4, 5, 6]] +> [[4, 5, 6]] +> rows (ordered): 3 + +SELECT ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'c') OVER () FROM TEST GROUP BY NAME ORDER BY NAME; +> ARRAY_AGG(ARRAY_AGG(ID ORDER BY ID)) FILTER (WHERE NAME > 'c') OVER () +> ---------------------------------------------------------------------- +> null +> null +> null +> rows (ordered): 3 + +SELECT ARRAY_AGG(ID) OVER() FROM TEST GROUP BY NAME; +> exception MUST_GROUP_BY_COLUMN_1 + +SELECT ARRAY_AGG(ID) OVER(PARTITION BY NAME ORDER BY ID), NAME FROM TEST; +> ARRAY_AGG(ID) OVER (PARTITION BY NAME ORDER BY ID) NAME +> -------------------------------------------------- ---- +> [1, 2] a +> [1] a +> [3] b +> [4, 5, 6] c +> [4, 5] c +> [4] c +> rows: 6 + +SELECT ARRAY_AGG(ID) OVER(PARTITION BY NAME ORDER BY ID DESC), NAME FROM TEST; +> ARRAY_AGG(ID) OVER (PARTITION BY NAME ORDER BY ID DESC) NAME +> ------------------------------------------------------- ---- +> [2, 1] a +> [2] a +> [3] b +> [6, 5, 4] c +> [6, 5] c +> [6] c +> rows: 6 + +SELECT + ARRAY_AGG(ID ORDER BY ID) OVER(PARTITION BY NAME ORDER BY ID DESC) A, + ARRAY_AGG(ID) OVER(PARTITION BY NAME ORDER BY ID DESC) D, + NAME FROM TEST; +> A D NAME +> --------- --------- ---- +> [1, 2] [2, 1] a +> [2] [2] a +> [3] [3] b +> [4, 5, 6] [6, 5, 4] c +> [5, 6] [6, 5] c +> [6] [6] c +> rows: 6 + +SELECT ARRAY_AGG(SUM(ID)) OVER(ORDER BY ID) FROM TEST GROUP BY ID; +> ARRAY_AGG(SUM(ID)) OVER (ORDER BY ID) +> ------------------------------------- +> [1, 2, 3, 4, 5, 6] +> [1, 2, 3, 4, 5] +> [1, 2, 3, 4] +> [1, 2, 3] +> [1, 2] +> [1] +> rows: 6 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, G INT); +> ok + +INSERT INTO TEST VALUES + (1, 1), + (2, 2), + (3, 2), + (4, 2), + (5, 3); +> update count: 5 + +SELECT + ARRAY_AGG(ID) OVER (ORDER BY G RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) D, + ARRAY_AGG(ID) OVER (ORDER BY G RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE CURRENT ROW) R, + ARRAY_AGG(ID) OVER (ORDER BY G RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE GROUP) G, + ARRAY_AGG(ID) OVER (ORDER BY G RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE TIES) T, + ARRAY_AGG(ID) OVER (ORDER BY G RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE NO OTHERS) N + FROM TEST; +> D R G T N +> --------------- ------------ ------------ --------------- --------------- +> [1, 2, 3, 4, 5] [1, 2, 3, 4] [1, 2, 3, 4] [1, 2, 3, 4, 5] [1, 2, 3, 4, 5] +> [1, 2, 3, 4, 5] [1, 2, 3, 5] [1, 5] [1, 4, 5] [1, 2, 3, 4, 5] +> [1, 2, 3, 4, 5] [1, 2, 4, 5] [1, 5] [1, 3, 5] [1, 2, 3, 4, 5] +> [1, 2, 3, 4, 5] [1, 3, 4, 5] [1, 5] [1, 2, 5] [1, 2, 3, 4, 5] +> [1, 2, 3, 4, 5] [2, 3, 4, 5] [2, 3, 4, 5] [1, 2, 3, 4, 5] [1, 2, 3, 4, 5] +> rows: 5 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, "VALUE" INT); +> ok + +INSERT INTO TEST VALUES + (1, 1), + (2, 1), + (3, 5), + (4, 8), + (5, 8), + (6, 8), + (7, 9), + (8, 9); +> update count: 8 + +SELECT *, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) R_ID, + ARRAY_AGG("VALUE") OVER (ORDER BY "VALUE" ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) R_V, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) V_ID, + ARRAY_AGG("VALUE") OVER (ORDER BY "VALUE" RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) V_V, + ARRAY_AGG("VALUE") OVER (ORDER BY "VALUE" DESC RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) V_V_R, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) G_ID, + ARRAY_AGG("VALUE") OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) G_V + FROM TEST; +> ID VALUE R_ID R_V V_ID V_V V_V_R G_ID G_V +> -- ----- --------- --------- --------------- --------------- --------------- ------------------ ------------------ +> 1 1 [1, 2] [1, 1] [1, 2] [1, 1] [1, 1] [1, 2, 3] [1, 1, 5] +> 2 1 [1, 2, 3] [1, 1, 5] [1, 2] [1, 1] [1, 1] [1, 2, 3] [1, 1, 5] +> 3 5 [2, 3, 4] [1, 5, 8] [3] [5] [5] [1, 2, 3, 4, 5, 6] [1, 1, 5, 8, 8, 8] +> 4 8 [3, 4, 5] [5, 8, 8] [4, 5, 6, 7, 8] [8, 8, 8, 9, 9] [9, 9, 8, 8, 8] [3, 4, 5, 6, 7, 8] [5, 8, 8, 8, 9, 9] +> 5 8 [4, 5, 6] [8, 8, 8] [4, 5, 6, 7, 8] [8, 8, 8, 9, 9] [9, 9, 8, 8, 8] [3, 4, 5, 6, 7, 8] [5, 8, 8, 8, 9, 9] +> 6 8 [5, 6, 7] [8, 8, 9] [4, 5, 6, 7, 8] [8, 8, 8, 9, 9] [9, 9, 8, 8, 8] [3, 4, 5, 6, 7, 8] [5, 8, 8, 8, 9, 9] +> 7 9 [6, 7, 8] [8, 9, 9] [4, 5, 6, 7, 8] [8, 8, 8, 9, 9] [9, 9, 8, 8, 8] [4, 5, 6, 7, 8] [8, 8, 8, 9, 9] +> 8 9 [7, 8] [9, 9] [4, 5, 6, 7, 8] [8, 8, 8, 9, 9] [9, 9, 8, 8, 8] [4, 5, 6, 7, 8] [8, 8, 8, 9, 9] +> rows: 8 + +SELECT *, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) A1, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) A2 + FROM TEST; +> ID VALUE A1 A2 +> -- ----- ------------------------ ------------------------ +> 1 1 [1, 2, 3, 4, 5, 6, 7, 8] [1, 2] +> 2 1 [1, 2, 3, 4, 5, 6, 7, 8] [1, 2] +> 3 5 [3, 4, 5, 6, 7, 8] [1, 2, 3] +> 4 8 [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] +> 5 8 [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] +> 6 8 [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] +> 7 9 [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] +> 8 9 [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] +> rows: 8 + +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS -1 PRECEDING) FROM TEST; +> exception INVALID_PRECEDING_OR_FOLLOWING_1 + +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY ID ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING) FROM TEST FETCH FIRST 4 ROWS ONLY; +> ID VALUE ARRAY_AGG(ID) OVER (ORDER BY ID ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING) +> -- ----- ------------------------------------------------------------------------- +> 1 1 null +> 2 1 [1] +> 3 5 [1, 2] +> 4 8 [2, 3] +> rows: 4 + +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY ID ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) FROM TEST OFFSET 4 ROWS; +> ID VALUE ARRAY_AGG(ID) OVER (ORDER BY ID ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) +> -- ----- ------------------------------------------------------------------------- +> 5 8 [6, 7] +> 6 8 [7, 8] +> 7 9 [8] +> 8 9 null +> rows: 4 + +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY ID RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) FROM TEST FETCH FIRST 4 ROWS ONLY; +> ID VALUE ARRAY_AGG(ID) OVER (ORDER BY ID RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) +> -- ----- -------------------------------------------------------------------------- +> 1 1 null +> 2 1 [1] +> 3 5 [1, 2] +> 4 8 [2, 3] +> rows: 4 + +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY ID RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) FROM TEST OFFSET 4 ROWS; +> ID VALUE ARRAY_AGG(ID) OVER (ORDER BY ID RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) +> -- ----- -------------------------------------------------------------------------- +> 5 8 [6, 7] +> 6 8 [7, 8] +> 7 9 [8] +> 8 9 null +> rows: 4 + +SELECT *, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 0 PRECEDING AND 0 FOLLOWING) N, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 0 PRECEDING AND 0 FOLLOWING EXCLUDE TIES) T, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 PRECEDING AND 0 FOLLOWING EXCLUDE TIES) T1 + FROM TEST; +> ID VALUE N T T1 +> -- ----- --------- --- ------------ +> 1 1 [1, 2] [1] [1] +> 2 1 [1, 2] [2] [2] +> 3 5 [3] [3] [1, 2, 3] +> 4 8 [4, 5, 6] [4] [3, 4] +> 5 8 [4, 5, 6] [5] [3, 5] +> 6 8 [4, 5, 6] [6] [3, 6] +> 7 9 [7, 8] [7] [4, 5, 6, 7] +> 8 9 [7, 8] [8] [4, 5, 6, 8] +> rows: 8 + +SELECT *, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) U_P, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 2 PRECEDING AND 1 PRECEDING) P, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) F, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) U_F + FROM TEST; +> ID VALUE U_P P F U_F +> -- ----- ------------------ ------------ --------------- ------------------ +> 1 1 null null [3, 4, 5, 6] [3, 4, 5, 6, 7, 8] +> 2 1 null null [3, 4, 5, 6] [3, 4, 5, 6, 7, 8] +> 3 5 [1, 2] [1, 2] [4, 5, 6, 7, 8] [4, 5, 6, 7, 8] +> 4 8 [1, 2, 3] [1, 2, 3] [7, 8] [7, 8] +> 5 8 [1, 2, 3] [1, 2, 3] [7, 8] [7, 8] +> 6 8 [1, 2, 3] [1, 2, 3] [7, 8] [7, 8] +> 7 9 [1, 2, 3, 4, 5, 6] [3, 4, 5, 6] null null +> 8 9 [1, 2, 3, 4, 5, 6] [3, 4, 5, 6] null null +> rows: 8 + +SELECT *, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 1 PRECEDING AND 0 PRECEDING) P, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN 0 FOLLOWING AND 1 FOLLOWING) F + FROM TEST; +> ID VALUE P F +> -- ----- --------------- --------------- +> 1 1 [1, 2] [1, 2, 3] +> 2 1 [1, 2] [1, 2, 3] +> 3 5 [1, 2, 3] [3, 4, 5, 6] +> 4 8 [3, 4, 5, 6] [4, 5, 6, 7, 8] +> 5 8 [3, 4, 5, 6] [4, 5, 6, 7, 8] +> 6 8 [3, 4, 5, 6] [4, 5, 6, 7, 8] +> 7 9 [4, 5, 6, 7, 8] [7, 8] +> 8 9 [4, 5, 6, 7, 8] [7, 8] +> rows: 8 + +SELECT ID, "VALUE", + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING EXCLUDE GROUP) G, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING EXCLUDE TIES) T + FROM TEST; +> ID VALUE G T +> -- ----- ------------ --------------- +> 1 1 [3] [1, 3] +> 2 1 [3, 4] [2, 3, 4] +> 3 5 [1, 2, 4, 5] [1, 2, 3, 4, 5] +> 4 8 [2, 3] [2, 3, 4] +> 5 8 [3, 7] [3, 5, 7] +> 6 8 [7, 8] [6, 7, 8] +> 7 9 [5, 6] [5, 6, 7] +> 8 9 [6] [6, 8] +> rows: 8 + +SELECT ID, "VALUE", ARRAY_AGG(ID) OVER(ORDER BY "VALUE" ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING EXCLUDE GROUP) G + FROM TEST ORDER BY ID FETCH FIRST 3 ROWS ONLY; +> ID VALUE G +> -- ----- ------ +> 1 1 [3] +> 2 1 [3, 4] +> 3 5 [4, 5] +> rows (ordered): 3 + +SELECT ID, "VALUE", ARRAY_AGG(ID) OVER(ORDER BY "VALUE" ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE GROUP) G + FROM TEST ORDER BY ID FETCH FIRST 3 ROWS ONLY; +> ID VALUE G +> -- ----- ------ +> 1 1 null +> 2 1 null +> 3 5 [1, 2] +> rows (ordered): 3 + +SELECT ID, "VALUE", ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) A + FROM TEST; +> ID VALUE A +> -- ----- --------- +> 1 1 null +> 2 1 null +> 3 5 null +> 4 8 null +> 5 8 null +> 6 8 null +> 7 9 [4, 5, 6] +> 8 9 [4, 5, 6] +> rows: 8 + +SELECT ID, "VALUE", + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) CP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) CF, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) RP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) RF, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) GP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) GF + FROM TEST; +> ID VALUE CP CF RP RF GP GF +> -- ----- ------------------------ ------------------------ ------------------------ ------------------------ ------------------------ ------------------------ +> 1 1 [1] [1, 2, 3, 4, 5, 6, 7, 8] [1, 2] [1, 2, 3, 4, 5, 6, 7, 8] [1, 2] [1, 2, 3, 4, 5, 6, 7, 8] +> 2 1 [1, 2] [2, 3, 4, 5, 6, 7, 8] [1, 2] [1, 2, 3, 4, 5, 6, 7, 8] [1, 2] [1, 2, 3, 4, 5, 6, 7, 8] +> 3 5 [1, 2, 3] [3, 4, 5, 6, 7, 8] [1, 2, 3] [3, 4, 5, 6, 7, 8] [1, 2, 3] [3, 4, 5, 6, 7, 8] +> 4 8 [1, 2, 3, 4] [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6] [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6] [4, 5, 6, 7, 8] +> 5 8 [1, 2, 3, 4, 5] [5, 6, 7, 8] [1, 2, 3, 4, 5, 6] [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6] [4, 5, 6, 7, 8] +> 6 8 [1, 2, 3, 4, 5, 6] [6, 7, 8] [1, 2, 3, 4, 5, 6] [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6] [4, 5, 6, 7, 8] +> 7 9 [1, 2, 3, 4, 5, 6, 7] [7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [7, 8] +> 8 9 [1, 2, 3, 4, 5, 6, 7, 8] [8] [1, 2, 3, 4, 5, 6, 7, 8] [7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [7, 8] +> rows: 8 + +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY ID RANGE BETWEEN CURRENT ROW AND 1 PRECEDING) FROM TEST; +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST (ID INT, "VALUE" INT); +> ok + +INSERT INTO TEST VALUES + (1, 1), + (2, 1), + (3, 2), + (4, 2), + (5, 3), + (6, 3), + (7, 4), + (8, 4); +> update count: 8 + +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) FROM TEST; +> ID VALUE ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING) +> -- ----- ------------------------------------------------------------------------------- +> 1 1 null +> 2 1 null +> 3 2 [1, 2] +> 4 2 [1, 2] +> 5 3 [1, 2, 3, 4] +> 6 3 [1, 2, 3, 4] +> 7 4 [3, 4, 5, 6] +> 8 4 [3, 4, 5, 6] +> rows: 8 + +SELECT *, ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) FROM TEST; +> ID VALUE ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) +> -- ----- ------------------------------------------------------------------------------- +> 1 1 [3, 4, 5, 6] +> 2 1 [3, 4, 5, 6] +> 3 2 [5, 6, 7, 8] +> 4 2 [5, 6, 7, 8] +> 5 3 [7, 8] +> 6 3 [7, 8] +> 7 4 null +> 8 4 null +> rows: 8 + +SELECT ID, "VALUE", ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT ROW) A + FROM TEST; +> ID VALUE A +> -- ----- ------------ +> 1 1 null +> 2 1 null +> 3 2 [1, 2] +> 4 2 [1, 2] +> 5 3 [1, 2, 3, 4] +> 6 3 [1, 2, 3, 4] +> 7 4 [3, 4, 5, 6] +> 8 4 [3, 4, 5, 6] +> rows: 8 + +SELECT ID, "VALUE", ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN 1 FOLLOWING AND 1 FOLLOWING EXCLUDE CURRENT ROW) A + FROM TEST; +> ID VALUE A +> -- ----- ------ +> 1 1 [3, 4] +> 2 1 [3, 4] +> 3 2 [5, 6] +> 4 2 [5, 6] +> 5 3 [7, 8] +> 6 3 [7, 8] +> 7 4 null +> 8 4 null +> rows: 8 + +SELECT ID, "VALUE", + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) CP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) CF, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) RP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) RF, + ARRAY_AGG(ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) GP, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY "VALUE" GROUPS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) GF + FROM TEST; +> ID VALUE CP CF RP RF GP GF +> -- ----- ------------------------ ------------------------ ------------------------ ------------------------ ------------------------ ------------------------ +> 1 1 [1] [1, 2, 3, 4, 5, 6, 7, 8] [1, 2] [1, 2, 3, 4, 5, 6, 7, 8] [1, 2] [1, 2, 3, 4, 5, 6, 7, 8] +> 2 1 [1, 2] [2, 3, 4, 5, 6, 7, 8] [1, 2] [1, 2, 3, 4, 5, 6, 7, 8] [1, 2] [1, 2, 3, 4, 5, 6, 7, 8] +> 3 2 [1, 2, 3] [3, 4, 5, 6, 7, 8] [1, 2, 3, 4] [3, 4, 5, 6, 7, 8] [1, 2, 3, 4] [3, 4, 5, 6, 7, 8] +> 4 2 [1, 2, 3, 4] [4, 5, 6, 7, 8] [1, 2, 3, 4] [3, 4, 5, 6, 7, 8] [1, 2, 3, 4] [3, 4, 5, 6, 7, 8] +> 5 3 [1, 2, 3, 4, 5] [5, 6, 7, 8] [1, 2, 3, 4, 5, 6] [5, 6, 7, 8] [1, 2, 3, 4, 5, 6] [5, 6, 7, 8] +> 6 3 [1, 2, 3, 4, 5, 6] [6, 7, 8] [1, 2, 3, 4, 5, 6] [5, 6, 7, 8] [1, 2, 3, 4, 5, 6] [5, 6, 7, 8] +> 7 4 [1, 2, 3, 4, 5, 6, 7] [7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [7, 8] +> 8 4 [1, 2, 3, 4, 5, 6, 7, 8] [8] [1, 2, 3, 4, 5, 6, 7, 8] [7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [7, 8] +> rows: 8 + +SELECT ID, "VALUE", + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND "VALUE" FOLLOWING) RG, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID RANGE BETWEEN "VALUE" PRECEDING AND UNBOUNDED FOLLOWING) RGR, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID ROWS BETWEEN UNBOUNDED PRECEDING AND "VALUE" FOLLOWING) R, + ARRAY_AGG(ID ORDER BY ID) OVER (ORDER BY ID ROWS BETWEEN "VALUE" PRECEDING AND UNBOUNDED FOLLOWING) RR + FROM TEST; +> ID VALUE RG RGR R RR +> -- ----- ------------------------ ------------------------ ------------------------ ------------------------ +> 1 1 [1, 2] [1, 2, 3, 4, 5, 6, 7, 8] [1, 2] [1, 2, 3, 4, 5, 6, 7, 8] +> 2 1 [1, 2, 3] [1, 2, 3, 4, 5, 6, 7, 8] [1, 2, 3] [1, 2, 3, 4, 5, 6, 7, 8] +> 3 2 [1, 2, 3, 4, 5] [1, 2, 3, 4, 5, 6, 7, 8] [1, 2, 3, 4, 5] [1, 2, 3, 4, 5, 6, 7, 8] +> 4 2 [1, 2, 3, 4, 5, 6] [2, 3, 4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6] [2, 3, 4, 5, 6, 7, 8] +> 5 3 [1, 2, 3, 4, 5, 6, 7, 8] [2, 3, 4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [2, 3, 4, 5, 6, 7, 8] +> 6 3 [1, 2, 3, 4, 5, 6, 7, 8] [3, 4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [3, 4, 5, 6, 7, 8] +> 7 4 [1, 2, 3, 4, 5, 6, 7, 8] [3, 4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [3, 4, 5, 6, 7, 8] +> 8 4 [1, 2, 3, 4, 5, 6, 7, 8] [4, 5, 6, 7, 8] [1, 2, 3, 4, 5, 6, 7, 8] [4, 5, 6, 7, 8] +> rows: 8 + +SELECT ID, "VALUE", + ARRAY_AGG(ID ORDER BY ID) OVER + (PARTITION BY "VALUE" ORDER BY ID ROWS BETWEEN "VALUE" / 3 PRECEDING AND "VALUE" / 3 FOLLOWING) A, + ARRAY_AGG(ID ORDER BY ID) OVER + (PARTITION BY "VALUE" ORDER BY ID ROWS BETWEEN UNBOUNDED PRECEDING AND "VALUE" / 3 FOLLOWING) AP, + ARRAY_AGG(ID ORDER BY ID) OVER + (PARTITION BY "VALUE" ORDER BY ID ROWS BETWEEN "VALUE" / 3 PRECEDING AND UNBOUNDED FOLLOWING) AF + FROM TEST; +> ID VALUE A AP AF +> -- ----- ------ ------ ------ +> 1 1 [1] [1] [1, 2] +> 2 1 [2] [1, 2] [2] +> 3 2 [3] [3] [3, 4] +> 4 2 [4] [3, 4] [4] +> 5 3 [5, 6] [5, 6] [5, 6] +> 6 3 [5, 6] [5, 6] [5, 6] +> 7 4 [7, 8] [7, 8] [7, 8] +> 8 4 [7, 8] [7, 8] [7, 8] +> rows: 8 + +INSERT INTO TEST VALUES (9, NULL); +> update count: 1 + +SELECT ARRAY_AGG("VALUE") FROM TEST; +>> [1, 1, 2, 2, 3, 3, 4, 4, null] + +SELECT ARRAY_AGG("VALUE" ORDER BY ID) FROM TEST; +>> [1, 1, 2, 2, 3, 3, 4, 4, null] + +SELECT ARRAY_AGG("VALUE" ORDER BY ID) FILTER (WHERE "VALUE" IS NOT NULL) FROM TEST; +>> [1, 1, 2, 2, 3, 3, 4, 4] + +SELECT ARRAY_AGG("VALUE" ORDER BY "VALUE") FROM TEST; +>> [null, 1, 1, 2, 2, 3, 3, 4, 4] + +SELECT ARRAY_AGG("VALUE" ORDER BY "VALUE" NULLS LAST) FROM TEST; +>> [1, 1, 2, 2, 3, 3, 4, 4, null] + +DROP TABLE TEST; +> ok + +SELECT ARRAY_AGG(DISTINCT A ORDER BY B) FROM (VALUES (4, 3), (5, 1), (5, 2)) T(A, B); +>> [5, 4] + +EXPLAIN SELECT ARRAY_AGG(A ORDER BY 'a') FROM (VALUES 1, 2) T(A); +>> SELECT ARRAY_AGG("A") FROM (VALUES (1), (2)) "T"("A") /* table scan */ diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/avg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/avg.sql new file mode 100644 index 0000000000..1b70b6e58e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/avg.sql @@ -0,0 +1,136 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select avg(cast(x as int)) from system_range(2147483547, 2147483637); +>> 2.147483592E9 + +select avg(x) from system_range(9223372036854775707, 9223372036854775797); +>> 9223372036854775752.0000000000 + +select avg(cast(100 as tinyint)) from system_range(1, 1000); +>> 100.0 + +select avg(cast(100 as smallint)) from system_range(1, 1000); +>> 100.0 + +-- with filter condition + +create table test(v int); +> ok + +insert into test values (10), (20), (30), (40), (50), (60), (70), (80), (90), (100), (110), (120); +> update count: 12 + +select avg(v), avg(v) filter (where v >= 40) from test where v <= 100; +> AVG(V) AVG(V) FILTER (WHERE V >= 40) +> ------ ----------------------------- +> 55.0 70.0 +> rows: 1 + +create index test_idx on test(v); +> ok + +select avg(v), avg(v) filter (where v >= 40) from test where v <= 100; +> AVG(V) AVG(V) FILTER (WHERE V >= 40) +> ------ ----------------------------- +> 55.0 70.0 +> rows: 1 + +drop table test; +> ok + +CREATE TABLE S( + N1 TINYINT, + N2 SMALLINT, + N4 INTEGER, + N8 BIGINT, + N NUMERIC(10, 2), + F4 REAL, + F8 DOUBLE PRECISION, + D DECFLOAT(10), + I1 INTERVAL YEAR(3), + I2 INTERVAL MONTH(3), + I3 INTERVAL DAY(3), + I4 INTERVAL HOUR(3), + I5 INTERVAL MINUTE(3), + I6 INTERVAL SECOND(2), + I7 INTERVAL YEAR(3) TO MONTH, + I8 INTERVAL DAY(3) TO HOUR, + I9 INTERVAL DAY(3) TO MINUTE, + I10 INTERVAL DAY(3) TO SECOND(2), + I11 INTERVAL HOUR(3) TO MINUTE, + I12 INTERVAL HOUR(3) TO SECOND(2), + I13 INTERVAL MINUTE(3) TO SECOND(2)); +> ok + +CREATE TABLE A AS SELECT + AVG(N1) N1, + AVG(N2) N2, + AVG(N4) N4, + AVG(N8) N8, + AVG(N) N, + AVG(F4) F4, + AVG(F8) F8, + AVG(D) D, + AVG(I1) I1, + AVG(I2) I2, + AVG(I3) I3, + AVG(I4) I4, + AVG(I5) I5, + AVG(I6) I6, + AVG(I7) I7, + AVG(I8) I8, + AVG(I9) I9, + AVG(I10) I10, + AVG(I11) I11, + AVG(I12) I12, + AVG(I13) I13 + FROM S; +> ok + +SELECT COLUMN_NAME, DATA_TYPE_SQL('PUBLIC', 'A', 'TABLE', DTD_IDENTIFIER) TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'A' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME TYPE +> ----------- ------------------------------- +> N1 DOUBLE PRECISION +> N2 DOUBLE PRECISION +> N4 DOUBLE PRECISION +> N8 NUMERIC(29, 10) +> N NUMERIC(20, 12) +> F4 DOUBLE PRECISION +> F8 DECFLOAT(27) +> D DECFLOAT(20) +> I1 INTERVAL YEAR(3) TO MONTH +> I2 INTERVAL MONTH(3) +> I3 INTERVAL DAY(3) TO SECOND(9) +> I4 INTERVAL HOUR(3) TO SECOND(9) +> I5 INTERVAL MINUTE(3) TO SECOND(9) +> I6 INTERVAL SECOND(2, 9) +> I7 INTERVAL YEAR(3) TO MONTH +> I8 INTERVAL DAY(3) TO SECOND(9) +> I9 INTERVAL DAY(3) TO SECOND(9) +> I10 INTERVAL DAY(3) TO SECOND(9) +> I11 INTERVAL HOUR(3) TO SECOND(9) +> I12 INTERVAL HOUR(3) TO SECOND(9) +> I13 INTERVAL MINUTE(3) TO SECOND(9) +> rows (ordered): 21 + +DROP TABLE S, A; +> ok + +SELECT AVG(X) FROM (VALUES INTERVAL '1' DAY, INTERVAL '2' DAY) T(X); +>> INTERVAL '1 12:00:00' DAY TO SECOND + +SELECT AVG(X) FROM (VALUES CAST(1 AS NUMERIC(1)), CAST(2 AS NUMERIC(1))) T(X); +>> 1.5000000000 + +SELECT AVG(I) FROM (VALUES 9e99999 - 1, 1e99999 + 1) T(I); +>> 5E+99999 + +SELECT AVG(I) = 5E99999 FROM (VALUES CAST(9e99999 - 1 AS NUMERIC(100000)), CAST(1e99999 + 1 AS NUMERIC(100000))) T(I); +>> TRUE + +SELECT AVG(I) FROM (VALUES INTERVAL '999999999999999999' SECOND, INTERVAL '1' SECOND) T(I); +>> INTERVAL '500000000000000000' SECOND diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_and_agg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_and_agg.sql new file mode 100644 index 0000000000..52212634ed --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_and_agg.sql @@ -0,0 +1,48 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- with filter condition + +create table test(v bigint); +> ok + +insert into test values + (0xfffffffffff0), (0xffffffffff0f), (0xfffffffff0ff), (0xffffffff0fff), + (0xfffffff0ffff), (0xffffff0fffff), (0xfffff0ffffff), (0xffff0fffffff), + (0xfff0ffffffff), (0xff0fffffffff), (0xf0ffffffffff), (0x0fffffffffff); +> update count: 12 + +select BIT_AND_AGG(v), BIT_AND_AGG(v) filter (where v <= 0xffffffff0fff) from test where v >= 0xff0fffffffff; +> BIT_AND_AGG(V) BIT_AND_AGG(V) FILTER (WHERE V <= 281474976649215) +> --------------- -------------------------------------------------- +> 280375465082880 280375465086975 +> rows: 1 + +SELECT BIT_NAND_AGG(V), BIT_NAND_AGG(V) FILTER (WHERE V <= 0xffffffff0fff) FROM TEST WHERE V >= 0xff0fffffffff; +> BIT_NAND_AGG(V) BIT_NAND_AGG(V) FILTER (WHERE V <= 281474976649215) +> ---------------- --------------------------------------------------- +> -280375465082881 -280375465086976 +> rows: 1 + +create index test_idx on test(v); +> ok + +select BIT_AND_AGG(v), BIT_AND_AGG(v) filter (where v <= 0xffffffff0fff) from test where v >= 0xff0fffffffff; +> BIT_AND_AGG(V) BIT_AND_AGG(V) FILTER (WHERE V <= 281474976649215) +> --------------- -------------------------------------------------- +> 280375465082880 280375465086975 +> rows: 1 + +SELECT BIT_NAND_AGG(V), BIT_NAND_AGG(V) FILTER (WHERE V <= 0xffffffff0fff) FROM TEST WHERE V >= 0xff0fffffffff; +> BIT_NAND_AGG(V) BIT_NAND_AGG(V) FILTER (WHERE V <= 281474976649215) +> ---------------- --------------------------------------------------- +> -280375465082881 -280375465086976 +> rows: 1 + +EXPLAIN SELECT BITNOT(BIT_AND_AGG(V)), BITNOT(BIT_NAND_AGG(V)) FROM TEST; +>> SELECT BIT_NAND_AGG("V"), BIT_AND_AGG("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ + +drop table test; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_or_agg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_or_agg.sql new file mode 100644 index 0000000000..ba91746c04 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_or_agg.sql @@ -0,0 +1,45 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- with filter condition + +create table test(v bigint); +> ok + +insert into test values (1), (2), (4), (8), (16), (32), (64), (128), (256), (512), (1024), (2048); +> update count: 12 + +select BIT_OR_AGG(v), BIT_OR_AGG(v) filter (where v >= 8) from test where v <= 512; +> BIT_OR_AGG(V) BIT_OR_AGG(V) FILTER (WHERE V >= 8) +> ------------- ----------------------------------- +> 1023 1016 +> rows: 1 + +SELECT BIT_NOR_AGG(V), BIT_NOR_AGG(V) FILTER (WHERE V >= 8) FROM TEST WHERE V <= 512; +> BIT_NOR_AGG(V) BIT_NOR_AGG(V) FILTER (WHERE V >= 8) +> -------------- ------------------------------------ +> -1024 -1017 +> rows: 1 + +create index test_idx on test(v); +> ok + +select BIT_OR_AGG(v), BIT_OR_AGG(v) filter (where v >= 8) from test where v <= 512; +> BIT_OR_AGG(V) BIT_OR_AGG(V) FILTER (WHERE V >= 8) +> ------------- ----------------------------------- +> 1023 1016 +> rows: 1 + +SELECT BIT_NOR_AGG(V), BIT_NOR_AGG(V) FILTER (WHERE V >= 8) FROM TEST WHERE V <= 512; +> BIT_NOR_AGG(V) BIT_NOR_AGG(V) FILTER (WHERE V >= 8) +> -------------- ------------------------------------ +> -1024 -1017 +> rows: 1 + +EXPLAIN SELECT BITNOT(BIT_OR_AGG(V)), BITNOT(BIT_NOR_AGG(V)) FROM TEST; +>> SELECT BIT_NOR_AGG("V"), BIT_OR_AGG("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ + +drop table test; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_xor_agg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_xor_agg.sql new file mode 100644 index 0000000000..1092a4d00a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/bit_xor_agg.sql @@ -0,0 +1,25 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT BIT_XOR_AGG(V), BIT_XOR_AGG(DISTINCT V), BIT_XOR_AGG(V) FILTER (WHERE V <> 1) FROM (VALUES 1, 1, 2, 3, 4) T(V); +> BIT_XOR_AGG(V) BIT_XOR_AGG(DISTINCT V) BIT_XOR_AGG(V) FILTER (WHERE V <> 1) +> -------------- ----------------------- ------------------------------------ +> 5 4 5 +> rows: 1 + +SELECT BIT_XNOR_AGG(V), BIT_XNOR_AGG(DISTINCT V), BIT_XNOR_AGG(V) FILTER (WHERE V <> 1) FROM (VALUES 1, 1, 2, 3, 4) T(V); +> BIT_XNOR_AGG(V) BIT_XNOR_AGG(DISTINCT V) BIT_XNOR_AGG(V) FILTER (WHERE V <> 1) +> --------------- ------------------------ ------------------------------------- +> -6 -5 -6 +> rows: 1 + +CREATE TABLE TEST(V BIGINT); +> ok + +EXPLAIN SELECT BITNOT(BIT_XOR_AGG(V)), BITNOT(BIT_XNOR_AGG(V)) FROM TEST; +>> SELECT BIT_XNOR_AGG("V"), BIT_XOR_AGG("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/corr.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/corr.sql new file mode 100644 index 0000000000..45a9fb38d0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/corr.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT CORR(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> CORR(Y, X) OVER (ORDER BY R) +> ---------------------------- +> null +> null +> null +> null +> null +> 0.9966158955401239 +> 0.9958932064677037 +> 0.9922153572367626 +> 0.9582302043304856 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/count.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/count.sql new file mode 100644 index 0000000000..1d151de2ba --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/count.sql @@ -0,0 +1,235 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- with filter condition + +create table test(v int); +> ok + +insert into test values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12), (null); +> update count: 13 + +select count(v), count(v) filter (where v >= 4) from test where v <= 10; +> COUNT(V) COUNT(V) FILTER (WHERE V >= 4) +> -------- ------------------------------ +> 10 7 +> rows: 1 + +select count(*), count(*) filter (where v >= 4) from test; +> COUNT(*) COUNT(*) FILTER (WHERE V >= 4) +> -------- ------------------------------ +> 13 9 +> rows: 1 + +select count(*), count(*) filter (where v >= 4) from test where v <= 10; +> COUNT(*) COUNT(*) FILTER (WHERE V >= 4) +> -------- ------------------------------ +> 10 7 +> rows: 1 + +create index test_idx on test(v); +> ok + +select count(v), count(v) filter (where v >= 4) from test where v <= 10; +> COUNT(V) COUNT(V) FILTER (WHERE V >= 4) +> -------- ------------------------------ +> 10 7 +> rows: 1 + +select count(v), count(v) filter (where v >= 4) from test; +> COUNT(V) COUNT(V) FILTER (WHERE V >= 4) +> -------- ------------------------------ +> 12 9 +> rows: 1 + +drop table test; +> ok + +CREATE TABLE TEST (ID INT PRIMARY KEY, NAME VARCHAR); +> ok + +INSERT INTO TEST VALUES (1, 'b'), (3, 'a'); +> update count: 2 + +SELECT COUNT(ID) OVER (ORDER BY NAME) AS NR, + A.ID AS ID FROM (SELECT ID, NAME FROM TEST ORDER BY NAME) AS A; +> NR ID +> -- -- +> 1 3 +> 2 1 +> rows: 2 + +SELECT NR FROM (SELECT COUNT(ID) OVER (ORDER BY NAME) AS NR, + A.ID AS ID FROM (SELECT ID, NAME FROM TEST ORDER BY NAME) AS A) + AS B WHERE B.ID = 1; +>> 2 + +DROP TABLE TEST; +> ok + +SELECT I, V, COUNT(V) OVER W C, COUNT(DISTINCT V) OVER W D FROM + VALUES (1, 1), (2, 1), (3, 1), (4, 1), (5, 2), (6, 2), (7, 3) T(I, V) + WINDOW W AS (ORDER BY I); +> I V C D +> - - - - +> 1 1 1 1 +> 2 1 2 1 +> 3 1 3 1 +> 4 1 4 1 +> 5 2 5 2 +> 6 2 6 2 +> 7 3 7 3 +> rows: 7 + +SELECT I, C, COUNT(I) OVER (PARTITION BY C) CNT FROM + VALUES (1, 1), (2, 1), (3, 2), (4, 2), (5, 2) T(I, C); +> I C CNT +> - - --- +> 1 1 2 +> 2 1 2 +> 3 2 3 +> 4 2 3 +> 5 2 3 +> rows: 5 + +SELECT X, COUNT(*) OVER (ORDER BY X) C FROM VALUES (1), (1), (2), (2), (3) V(X); +> X C +> - - +> 1 2 +> 1 2 +> 2 4 +> 2 4 +> 3 5 +> rows: 5 + +CREATE TABLE TEST (N NUMERIC) AS VALUES (0), (0.0), (NULL); +> ok + +SELECT COUNT(*) FROM TEST; +>> 3 + +SELECT COUNT(N) FROM TEST; +>> 2 + +SELECT COUNT(DISTINCT N) FROM TEST; +>> 1 + +SELECT COUNT(*) FROM TEST GROUP BY N; +> COUNT(*) +> -------- +> 1 +> 2 +> rows: 2 + +SELECT COUNT(N) OVER (PARTITION BY N) C FROM TEST; +> C +> - +> 0 +> 2 +> 2 +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT) AS (VALUES (1, NULL), (1, NULL), (2, NULL)); +> ok + +SELECT COUNT((A, B)) C, COUNT(DISTINCT (A, B)) CD FROM TEST; +> C CD +> - -- +> 3 2 +> rows: 1 + +SELECT COUNT(*) OVER (PARTITION BY A, B) C1, COUNT(*) OVER (PARTITION BY (A, B)) C2 FROM TEST; +> C1 C2 +> -- -- +> 1 1 +> 2 2 +> 2 2 +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(X INT) AS (VALUES 1, 2, NULL); +> ok + +SELECT COUNT(*) FROM TEST; +>> 3 + +SELECT COUNT(1) FROM TEST; +>> 3 + +SELECT COUNT(DISTINCT 1) FROM TEST; +>> 1 + +SELECT COUNT(1) FROM TEST FILTER WHERE X <> 1; +>> 1 + +SELECT COUNT(1) OVER(PARTITION BY X IS NULL) FROM TEST; +> COUNT(*) OVER (PARTITION BY X IS NULL) +> -------------------------------------- +> 1 +> 2 +> 2 +> rows: 3 + +SELECT COUNT(NULL) FROM TEST; +>> 0 + +SELECT COUNT(DISTINCT NULL) FROM TEST; +>> 0 + +EXPLAIN SELECT COUNT(*) FROM TEST; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +EXPLAIN SELECT COUNT(*) FILTER (WHERE TRUE) FROM TEST; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +EXPLAIN SELECT COUNT(1) FROM TEST; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +EXPLAIN SELECT COUNT(DISTINCT 1) FROM TEST; +>> SELECT COUNT(DISTINCT 1) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT COUNT(1) FROM TEST FILTER WHERE X <> 1; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" "FILTER" /* PUBLIC.TEST.tableScan */ WHERE "X" <> 1 + +EXPLAIN SELECT COUNT(1) OVER(PARTITION BY X IS NULL) FROM TEST; +>> SELECT COUNT(*) OVER (PARTITION BY "X" IS NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT COUNT(NULL) FROM TEST; +>> SELECT CAST(0 AS BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY () /* direct lookup */ + +EXPLAIN SELECT COUNT(DISTINCT NULL) FROM TEST; +>> SELECT CAST(0 AS BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY () /* direct lookup */ + +SELECT COUNT(X) FROM TEST; +>> 2 + +EXPLAIN SELECT COUNT(X) FROM TEST; +>> SELECT COUNT("X") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DELETE FROM TEST WHERE X IS NULL; +> update count: 1 + +ALTER TABLE TEST ALTER COLUMN X SET NOT NULL; +> ok + +SELECT COUNT(X) FROM TEST; +>> 2 + +EXPLAIN SELECT COUNT(X) FROM TEST; +>> SELECT COUNT("X") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +SELECT COUNT(DISTINCT X) FROM TEST; +>> 2 + +EXPLAIN SELECT COUNT(DISTINCT X) FROM TEST; +>> SELECT COUNT(DISTINCT "X") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_pop.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_pop.sql new file mode 100644 index 0000000000..2db80694cd --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_pop.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT COVAR_POP(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> COVAR_POP(Y, X) OVER (ORDER BY R) +> --------------------------------- +> null +> null +> null +> 0.0 +> 0.0 +> 30.333333333333332 +> 35.75 +> 35.88 +> 31.277777777777775 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_samp.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_samp.sql new file mode 100644 index 0000000000..8b09c45d1d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/covar_samp.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT COVAR_SAMP(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> COVAR_SAMP(Y, X) OVER (ORDER BY R) +> ---------------------------------- +> null +> null +> null +> null +> 0.0 +> 45.5 +> 47.666666666666664 +> 44.85 +> 37.53333333333333 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/envelope.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/envelope.sql new file mode 100644 index 0000000000..9879b92ad8 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/envelope.sql @@ -0,0 +1,132 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(V GEOMETRY); +> ok + +SELECT ENVELOPE(V) FROM TEST; +>> null + +INSERT INTO TEST VALUES ('POINT(1 1)'); +> update count: 1 + +SELECT ENVELOPE(V) FROM TEST; +>> POINT (1 1) + +INSERT INTO TEST VALUES ('POINT(1 2)'), (NULL), ('POINT(3 1)'); +> update count: 3 + +SELECT ENVELOPE(V), ENVELOPE(V) FILTER (WHERE V <> 'POINT(3 1)') FILTERED1, + ENVELOPE(V) FILTER (WHERE V <> 'POINT(1 2)') FILTERED2 FROM TEST; +> ENVELOPE(V) FILTERED1 FILTERED2 +> ----------------------------------- --------------------- --------------------- +> POLYGON ((1 1, 1 2, 3 2, 3 1, 1 1)) LINESTRING (1 1, 1 2) LINESTRING (1 1, 3 1) +> rows: 1 + +CREATE SPATIAL INDEX IDX ON TEST(V); +> ok + +-- Without index +SELECT ENVELOPE(N) FROM (SELECT V AS N FROM TEST); +>> POLYGON ((1 1, 1 2, 3 2, 3 1, 1 1)) + +-- With index +SELECT ENVELOPE(V) FROM TEST; +>> POLYGON ((1 1, 1 2, 3 2, 3 1, 1 1)) + +-- Without index +SELECT ENVELOPE(V) FILTER (WHERE V <> 'POINT(3 1)') FILTERED FROM TEST; +>> LINESTRING (1 1, 1 2) + +-- Without index +SELECT ENVELOPE(V) FROM TEST WHERE V <> 'POINT(3 1)'; +>> LINESTRING (1 1, 1 2) + +INSERT INTO TEST VALUES ('POINT(-1.0000000001 1)'); +> update count: 1 + +-- Without index +SELECT ENVELOPE(N) FROM (SELECT V AS N FROM TEST); +>> POLYGON ((-1.0000000001 1, -1.0000000001 2, 3 2, 3 1, -1.0000000001 1)) + +-- With index +SELECT ENVELOPE(V) FROM TEST; +>> POLYGON ((-1.0000000001 1, -1.0000000001 2, 3 2, 3 1, -1.0000000001 1)) + +TRUNCATE TABLE TEST; +> update count: 5 + +-- Without index +SELECT ENVELOPE(N) FROM (SELECT V AS N FROM TEST); +>> null + +-- With index +SELECT ENVELOPE(V) FROM TEST; +>> null + +SELECT ESTIMATED_ENVELOPE('TEST', 'V'); +>> null + +@reconnect off + +SELECT RAND(1000) * 0; +>> 0.0 + +INSERT INTO TEST SELECT CAST('POINT(' || CAST(RAND() * 100000 AS INT) || ' ' || CAST(RAND() * 100000 AS INT) || ')' AS GEOMETRY) FROM SYSTEM_RANGE(1, 1000); +> update count: 1000 + +@reconnect on + +-- Without index +SELECT ENVELOPE(N) FROM (SELECT V AS N FROM TEST); +>> POLYGON ((68 78, 68 99951, 99903 99951, 99903 78, 68 78)) + +-- With index +SELECT ENVELOPE(V) FROM TEST; +>> POLYGON ((68 78, 68 99951, 99903 99951, 99903 78, 68 78)) + +SELECT ESTIMATED_ENVELOPE('TEST', 'V'); +>> POLYGON ((68 78, 68 99951, 99903 99951, 99903 78, 68 78)) + +TRUNCATE TABLE TEST; +> update count: 1000 + +@reconnect off + +SELECT RAND(1000) * 0; +>> 0.0 + +INSERT INTO TEST SELECT CAST('POINT(' || (CAST(RAND() * 100000 AS INT) * 0.000000001 + 1) || ' ' + || (CAST(RAND() * 100000 AS INT) * 0.000000001 + 1) || ')' AS GEOMETRY) FROM SYSTEM_RANGE(1, 1000); +> update count: 1000 + +@reconnect on + +-- Without index +SELECT ENVELOPE(N) FROM (SELECT V AS N FROM TEST); +>> POLYGON ((1.000000068 1.000000078, 1.000000068 1.000099951, 1.000099903 1.000099951, 1.000099903 1.000000078, 1.000000068 1.000000078)) + +-- With index +SELECT ENVELOPE(V) FROM TEST; +>> POLYGON ((1.000000068 1.000000078, 1.000000068 1.000099951, 1.000099903 1.000099951, 1.000099903 1.000000078, 1.000000068 1.000000078)) + +DROP TABLE TEST; +> ok + +-- Test for index selection +CREATE TABLE TEST(G1 GEOMETRY, G2 GEOMETRY) AS (SELECT NULL, 'POINT (1 1)'::GEOMETRY); +> ok + +CREATE SPATIAL INDEX G1IDX ON TEST(G1); +> ok + +CREATE SPATIAL INDEX G2IDX ON TEST(G2); +> ok + +SELECT ENVELOPE(G2) FROM TEST; +>> POINT (1 1) + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/every.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/every.sql new file mode 100644 index 0000000000..e603f5c624 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/every.sql @@ -0,0 +1,21 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT, B INT); +> ok + +INSERT INTO TEST VALUES (1, 1), (1, 3), (2, 1), (2, 5), (3, 4); +> update count: 5 + +SELECT A, EVERY(B < 5), BOOL_AND(B > 1), EVERY(B >= 1) FILTER (WHERE A = 1) FROM TEST GROUP BY A; +> A EVERY(B < 5) EVERY(B > 1) EVERY(B >= 1) FILTER (WHERE A = 1) +> - ------------ ------------ ---------------------------------- +> 1 TRUE FALSE TRUE +> 2 FALSE FALSE null +> 3 TRUE TRUE null +> rows: 3 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/histogram.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/histogram.sql new file mode 100644 index 0000000000..396daabd5b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/histogram.sql @@ -0,0 +1,19 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT HISTOGRAM(X), FROM VALUES (1), (2), (3), (1), (2), (NULL), (5) T(X); +>> [ROW (null, 1), ROW (1, 2), ROW (2, 2), ROW (3, 1), ROW (5, 1)] + +SELECT HISTOGRAM(X) FILTER (WHERE X > 1) FROM VALUES (1), (2), (3), (1), (2), (NULL), (5) T(X); +>> [ROW (2, 2), ROW (3, 1), ROW (5, 1)] + +SELECT HISTOGRAM(X) FILTER (WHERE X > 0) FROM VALUES (0) T(X); +>> [] + +SELECT HISTOGRAM(DISTINCT X) FROM VALUES (0) T(X); +> exception SYNTAX_ERROR_2 + +SELECT HISTOGRAM(ALL X) FROM VALUES (0) T(X); +> exception SYNTAX_ERROR_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/json_arrayagg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/json_arrayagg.sql new file mode 100644 index 0000000000..12429ec0af --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/json_arrayagg.sql @@ -0,0 +1,71 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, N VARCHAR, J JSON) AS VALUES + (1, 'Ten', JSON '10'), + (2, 'Null', NULL), + (3, 'False', JSON 'false'), + (4, 'False', JSON 'false'); +> ok + +SELECT JSON_ARRAYAGG(J NULL ON NULL) FROM TEST; +>> [10,null,false,false] + +SELECT JSON_ARRAYAGG(J) FROM TEST; +>> [10,false,false] + +SELECT JSON_ARRAYAGG(ALL J) FROM TEST; +>> [10,false,false] + +SELECT JSON_ARRAYAGG(DISTINCT J) FROM TEST; +>> [10,false] + +SELECT JSON_ARRAYAGG(J NULL ON NULL) FROM TEST; +>> [10,null,false,false] + +SELECT JSON_ARRAYAGG(J ABSENT ON NULL) FROM TEST; +>> [10,false,false] + +SELECT JSON_ARRAYAGG(J ORDER BY ID DESC NULL ON NULL) FROM TEST; +>> [false,false,null,10] + +SELECT JSON_ARRAY(NULL NULL ON NULL); +>> [null] + +EXPLAIN SELECT JSON_ARRAYAGG(J) FROM TEST; +>> SELECT JSON_ARRAYAGG("J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAYAGG(J NULL ON NULL) FROM TEST; +>> SELECT JSON_ARRAYAGG("J" NULL ON NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAYAGG(J ABSENT ON NULL) FROM TEST; +>> SELECT JSON_ARRAYAGG("J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAYAGG(J FORMAT JSON ABSENT ON NULL) FROM TEST; +>> SELECT JSON_ARRAYAGG("J" FORMAT JSON) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAYAGG(DISTINCT J FORMAT JSON ORDER BY ID DESC ABSENT ON NULL) FROM TEST; +>> SELECT JSON_ARRAYAGG(DISTINCT "J" FORMAT JSON ORDER BY "ID" DESC) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DELETE FROM TEST WHERE J IS NOT NULL; +> update count: 3 + +SELECT JSON_ARRAYAGG(J) FROM TEST; +>> [] + +SELECT JSON_ARRAYAGG(J NULL ON NULL) FROM TEST; +>> [null] + +DELETE FROM TEST; +> update count: 1 + +SELECT JSON_ARRAYAGG(J) FROM TEST; +>> null + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT JSON_ARRAYAGG(A ORDER BY 'a') FROM (VALUES 1, 2) T(A); +>> SELECT JSON_ARRAYAGG("A") FROM (VALUES (1), (2)) "T"("A") /* table scan */ diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/json_objectagg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/json_objectagg.sql new file mode 100644 index 0000000000..de61a64361 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/json_objectagg.sql @@ -0,0 +1,73 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, N VARCHAR, J JSON) AS VALUES + (1, 'Ten', '10' FORMAT JSON), + (2, 'Null', NULL), + (3, 'False', 'false' FORMAT JSON); +> ok + +SELECT JSON_OBJECTAGG(KEY N VALUE J) FROM TEST; +>> {"Ten":10,"Null":null,"False":false} + +SELECT JSON_OBJECTAGG(N VALUE J) FROM TEST; +>> {"Ten":10,"Null":null,"False":false} + +SELECT JSON_OBJECTAGG(N: J) FROM TEST; +>> {"Ten":10,"Null":null,"False":false} + +SELECT JSON_OBJECTAGG(N: J ABSENT ON NULL) FROM TEST; +>> {"Ten":10,"False":false} + +SELECT JSON_OBJECTAGG(N: J ABSENT ON NULL) FILTER (WHERE J IS NULL) FROM TEST; +>> {} + +SELECT JSON_OBJECTAGG(N: J) FILTER (WHERE FALSE) FROM TEST; +>> null + +SELECT JSON_OBJECTAGG(NULL: J) FROM TEST; +> exception INVALID_VALUE_2 + +INSERT INTO TEST VALUES (4, 'Ten', '-10' FORMAT JSON); +> update count: 1 + +SELECT JSON_OBJECTAGG(N: J) FROM TEST; +>> {"Ten":10,"Null":null,"False":false,"Ten":-10} + +SELECT JSON_OBJECTAGG(N: J WITHOUT UNIQUE KEYS) FROM TEST; +>> {"Ten":10,"Null":null,"False":false,"Ten":-10} + +SELECT JSON_OBJECTAGG(N: J WITH UNIQUE KEYS) FROM TEST; +> exception INVALID_VALUE_2 + +EXPLAIN SELECT JSON_OBJECTAGG(N: J) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J NULL ON NULL) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J ABSENT ON NULL) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J" ABSENT ON NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J WITH UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J" WITH UNIQUE KEYS) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J NULL ON NULL WITH UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J" WITH UNIQUE KEYS) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J ABSENT ON NULL WITH UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J" ABSENT ON NULL WITH UNIQUE KEYS) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J WITHOUT UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J NULL ON NULL WITHOUT UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECTAGG(N: J ABSENT ON NULL WITHOUT UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECTAGG("N": "J" ABSENT ON NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/listagg.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/listagg.sql new file mode 100644 index 0000000000..1a0d91f1a9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/listagg.sql @@ -0,0 +1,255 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- with filter condition + +create table test(v varchar); +> ok + +insert into test values ('1'), ('2'), ('3'), ('4'), ('5'), ('6'), ('7'), ('8'), ('9'); +> update count: 9 + +select listagg(v, '-') within group (order by v asc), + listagg(v, '-') within group (order by v desc) filter (where v >= '4') + from test where v >= '2'; +> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE V >= '4') +> ----------------------------------------- ---------------------------------------------------------------------- +> 2-3-4-5-6-7-8-9 9-8-7-6-5-4 +> rows: 1 + +select group_concat(v order by v asc separator '-'), + group_concat(v order by v desc separator '-') filter (where v >= '4') + from test where v >= '2'; +> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE V >= '4') +> ----------------------------------------- ---------------------------------------------------------------------- +> 2-3-4-5-6-7-8-9 9-8-7-6-5-4 +> rows: 1 + +create index test_idx on test(v); +> ok + +select group_concat(v order by v asc separator '-'), + group_concat(v order by v desc separator '-') filter (where v >= '4') + from test where v >= '2'; +> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE V >= '4') +> ----------------------------------------- ---------------------------------------------------------------------- +> 2-3-4-5-6-7-8-9 9-8-7-6-5-4 +> rows: 1 + +select group_concat(v order by v asc separator '-'), + group_concat(v order by v desc separator '-') filter (where v >= '4') + from test; +> LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) LISTAGG(V, '-') WITHIN GROUP (ORDER BY V DESC) FILTER (WHERE V >= '4') +> ----------------------------------------- ---------------------------------------------------------------------- +> 1-2-3-4-5-6-7-8-9 9-8-7-6-5-4 +> rows: 1 + +drop table test; +> ok + +create table test (id int auto_increment primary key, v int); +> ok + +insert into test(v) values (7), (2), (8), (3), (7), (3), (9), (-1); +> update count: 8 + +select group_concat(v) from test; +> LISTAGG(V) WITHIN GROUP (ORDER BY NULL) +> --------------------------------------- +> 7,2,8,3,7,3,9,-1 +> rows: 1 + +select group_concat(distinct v) from test; +> LISTAGG(DISTINCT V) WITHIN GROUP (ORDER BY NULL) +> ------------------------------------------------ +> -1,2,3,7,8,9 +> rows: 1 + +select group_concat(distinct v order by v desc) from test; +> LISTAGG(DISTINCT V) WITHIN GROUP (ORDER BY V DESC) +> -------------------------------------------------- +> 9,8,7,3,2,-1 +> rows: 1 + +INSERT INTO TEST(V) VALUES NULL; +> update count: 1 + +SELECT LISTAGG(V, ',') WITHIN GROUP (ORDER BY ID) FROM TEST; +>> 7,2,8,3,7,3,9,-1 + +SELECT LISTAGG(COALESCE(CAST(V AS VARCHAR), 'null'), ',') WITHIN GROUP (ORDER BY ID) FROM TEST; +>> 7,2,8,3,7,3,9,-1,null + +SELECT LISTAGG(V, ',') WITHIN GROUP (ORDER BY V) FROM TEST; +>> -1,2,3,3,7,7,8,9 + +drop table test; +> ok + +create table test(g int, v int) as values (1, 1), (1, 2), (1, 3), (2, 4), (2, 5), (2, 6), (3, null); +> ok + +select g, listagg(v, '-') from test group by g; +> G LISTAGG(V, '-') WITHIN GROUP (ORDER BY NULL) +> - -------------------------------------------- +> 1 1-2-3 +> 2 4-5-6 +> 3 null +> rows: 3 + +select g, listagg(v, '-') over (partition by g) from test order by v; +> G LISTAGG(V, '-') WITHIN GROUP (ORDER BY NULL) OVER (PARTITION BY G) +> - ------------------------------------------------------------------ +> 3 null +> 1 1-2-3 +> 1 1-2-3 +> 1 1-2-3 +> 2 4-5-6 +> 2 4-5-6 +> 2 4-5-6 +> rows (ordered): 7 + +select g, listagg(v, '-' on overflow error) within group (order by v) filter (where v <> 2) over (partition by g) from test order by v; +> G LISTAGG(V, '-') WITHIN GROUP (ORDER BY V) FILTER (WHERE V <> 2) OVER (PARTITION BY G) +> - ------------------------------------------------------------------------------------- +> 3 null +> 1 1-3 +> 1 1-3 +> 1 1-3 +> 2 4-5-6 +> 2 4-5-6 +> 2 4-5-6 +> rows (ordered): 7 + +select listagg(distinct v, '-') from test; +> LISTAGG(DISTINCT V, '-') WITHIN GROUP (ORDER BY NULL) +> ----------------------------------------------------- +> 1-2-3-4-5-6 +> rows: 1 + +select g, group_concat(v separator v) from test group by g; +> exception SYNTAX_ERROR_2 + +drop table test; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +INSERT INTO TEST VALUES + (1, NULL, NULL), + (2, NULL, 1), + (3, 1, NULL), + (4, 1, 1), + (5, NULL, 2), + (6, 2, NULL), + (7, 2, 2); +> update count: 7 + +SELECT LISTAGG(A) WITHIN GROUP (ORDER BY B ASC NULLS FIRST, C ASC NULLS FIRST) FROM TEST; +>> 1,2,5,3,4,6,7 + +SELECT LISTAGG(A) WITHIN GROUP (ORDER BY B ASC NULLS LAST, C ASC NULLS LAST) FROM TEST; +>> 4,3,7,6,2,5,1 + +DROP TABLE TEST; +> ok + +SELECT LISTAGG(DISTINCT A, ' ') WITHIN GROUP (ORDER BY B) FROM (VALUES ('a', 2), ('a', 3), ('b', 1)) T(A, B); +>> b a + +CREATE TABLE TEST(A INT NOT NULL, B VARCHAR(50) NOT NULL) AS VALUES (1, '1'), (1, '2'), (1, '3'); +> ok + +SELECT STRING_AGG(B, ', ') FROM TEST GROUP BY A; +>> 1, 2, 3 + +SELECT STRING_AGG(B, ', ' ORDER BY B DESC) FROM TEST GROUP BY A; +>> 3, 2, 1 + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT LISTAGG(A) WITHIN GROUP (ORDER BY 'a') FROM (VALUES 'a', 'b') T(A); +>> SELECT LISTAGG("A") WITHIN GROUP (ORDER BY NULL) FROM (VALUES ('a'), ('b')) "T"("A") /* table scan */ + +SET MODE Oracle; +> ok + +SELECT LISTAGG(V, '') WITHIN GROUP(ORDER BY V) FROM (VALUES 'a', 'b') T(V); +>> ab + +SET MODE Regular; +> ok + +CREATE TABLE TEST(ID INT, V VARCHAR) AS VALUES (1, 'b'), (2, 'a'); +> ok + +EXPLAIN SELECT LISTAGG(V) FROM TEST; +>> SELECT LISTAGG("V") WITHIN GROUP (ORDER BY NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V") WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V, ';') WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V", ';') WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V ON OVERFLOW ERROR) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V") WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V, ';' ON OVERFLOW ERROR) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V", ';') WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V ON OVERFLOW TRUNCATE WITH COUNT) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V" ON OVERFLOW TRUNCATE WITH COUNT) WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V ON OVERFLOW TRUNCATE WITHOUT COUNT) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V" ON OVERFLOW TRUNCATE WITHOUT COUNT) WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V ON OVERFLOW TRUNCATE '..' WITH COUNT) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V" ON OVERFLOW TRUNCATE '..' WITH COUNT) WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT LISTAGG(V ON OVERFLOW TRUNCATE '..' WITHOUT COUNT) WITHIN GROUP (ORDER BY ID) FROM TEST; +>> SELECT LISTAGG("V" ON OVERFLOW TRUNCATE '..' WITHOUT COUNT) WITHIN GROUP (ORDER BY "ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(V VARCHAR) AS SELECT 'ABCD_EFGH_' || X FROM SYSTEM_RANGE(1, 70000); +> ok + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE WITH COUNT) WITHIN GROUP(ORDER BY V), 40) FROM TEST; +>> BCD_EFGH_69391,ABCD_EFGH_69392,...(4007) + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE WITHOUT COUNT) WITHIN GROUP(ORDER BY V), 40) FROM TEST; +>> 9391,ABCD_EFGH_69392,ABCD_EFGH_69393,... + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE '~~~~~~~~~~~~~~~' WITH COUNT) WITHIN GROUP(ORDER BY V), 40) FROM TEST; +>> 90,ABCD_EFGH_69391,~~~~~~~~~~~~~~~(4008) + +TRUNCATE TABLE TEST; +> update count: 70000 + +INSERT INTO TEST VALUES REPEAT('A', 1048573); +> update count: 1 + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE WITH COUNT) WITHIN GROUP(ORDER BY V), 40) FROM + (TABLE TEST UNION VALUES 'BB'); +>> AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA,BB + +SELECT RIGHT(LISTAGG(V ON OVERFLOW ERROR) WITHIN GROUP(ORDER BY V), 40) FROM + (TABLE TEST UNION VALUES 'BBB'); +> exception VALUE_TOO_LONG_2 + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE WITH COUNT) WITHIN GROUP(ORDER BY V), 40) FROM + (TABLE TEST UNION VALUES 'BBB'); +>> ...(2) + +SELECT RIGHT(LISTAGG(V ON OVERFLOW TRUNCATE '..' WITHOUT COUNT) WITHIN GROUP(ORDER BY V), 40) FROM + (TABLE TEST UNION VALUES 'BBB'); +>> AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA,.. + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/max.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/max.sql new file mode 100644 index 0000000000..dfdf0c99ba --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/max.sql @@ -0,0 +1,69 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- with filter condition + +create table test(v int); +> ok + +insert into test values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12); +> update count: 12 + +select max(v), max(v) filter (where v <= 8) from test where v <= 10; +> MAX(V) MAX(V) FILTER (WHERE V <= 8) +> ------ ---------------------------- +> 10 8 +> rows: 1 + +create index test_idx on test(v); +> ok + +select max(v), max(v) filter (where v <= 8) from test where v <= 10; +> MAX(V) MAX(V) FILTER (WHERE V <= 8) +> ------ ---------------------------- +> 10 8 +> rows: 1 + +select max(v), max(v) filter (where v <= 8) from test; +> MAX(V) MAX(V) FILTER (WHERE V <= 8) +> ------ ---------------------------- +> 12 8 +> rows: 1 + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT) AS VALUES (1, 1), (2, NULL), (3, 5); +> ok + +CREATE INDEX TEST_IDX ON TEST(V NULLS LAST); +> ok + +EXPLAIN SELECT MAX(V) FROM TEST; +>> SELECT MAX("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ /* direct lookup */ + +SELECT MAX(V) FROM TEST; +>> 5 + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT MAX(X) FROM SYSTEM_RANGE(1, 2); +>> SELECT MAX("X") FROM SYSTEM_RANGE(1, 2) /* range index */ /* direct lookup */ + +SELECT MAX(X) FROM SYSTEM_RANGE(1, 2, 0); +> exception STEP_SIZE_MUST_NOT_BE_ZERO + +SELECT MAX(X) FROM SYSTEM_RANGE(1, 2); +>> 2 + +SELECT MAX(X) FROM SYSTEM_RANGE(2, 1); +>> null + +SELECT MAX(X) FROM SYSTEM_RANGE(1, 2, -1); +>> null + +SELECT MAX(X) FROM SYSTEM_RANGE(2, 1, -1); +>> 2 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/min.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/min.sql new file mode 100644 index 0000000000..e8b4b50504 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/min.sql @@ -0,0 +1,75 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- with filter condition + +create table test(v int); +> ok + +insert into test values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12); +> update count: 12 + +select min(v), min(v) filter (where v >= 4) from test where v >= 2; +> MIN(V) MIN(V) FILTER (WHERE V >= 4) +> ------ ---------------------------- +> 2 4 +> rows: 1 + +create index test_idx on test(v); +> ok + +select min(v), min(v) filter (where v >= 4) from test where v >= 2; +> MIN(V) MIN(V) FILTER (WHERE V >= 4) +> ------ ---------------------------- +> 2 4 +> rows: 1 + +select min(v), min(v) filter (where v >= 4) from test; +> MIN(V) MIN(V) FILTER (WHERE V >= 4) +> ------ ---------------------------- +> 1 4 +> rows: 1 + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT); +> ok + +CREATE INDEX TEST_IDX ON TEST(V NULLS FIRST); +> ok + +EXPLAIN SELECT MIN(V) FROM TEST; +>> SELECT MIN("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ /* direct lookup */ + +SELECT MIN(V) FROM TEST; +>> null + +INSERT INTO TEST VALUES (1, 1), (2, NULL), (3, 5); +> update count: 3 + +SELECT MIN(V) FROM TEST; +>> 1 + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT MIN(X) FROM SYSTEM_RANGE(1, 2); +>> SELECT MIN("X") FROM SYSTEM_RANGE(1, 2) /* range index */ /* direct lookup */ + +SELECT MIN(X) FROM SYSTEM_RANGE(1, 2, 0); +> exception STEP_SIZE_MUST_NOT_BE_ZERO + +SELECT MIN(X) FROM SYSTEM_RANGE(1, 2); +>> 1 + +SELECT MIN(X) FROM SYSTEM_RANGE(2, 1); +>> null + +SELECT MIN(X) FROM SYSTEM_RANGE(1, 2, -1); +>> null + +SELECT MIN(X) FROM SYSTEM_RANGE(2, 1, -1); +>> 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/mode.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/mode.sql new file mode 100644 index 0000000000..54b0dd7314 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/mode.sql @@ -0,0 +1,94 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(V INT); +> ok + +SELECT MODE(V) FROM TEST; +>> null + +SELECT MODE(DISTINCT V) FROM TEST; +> exception SYNTAX_ERROR_2 + +INSERT INTO TEST VALUES (NULL); +> update count: 1 + +SELECT MODE(V) FROM TEST; +>> null + +INSERT INTO TEST VALUES (1), (2), (3), (1), (2), (1); +> update count: 6 + +SELECT MODE(V), MODE() WITHIN GROUP (ORDER BY V DESC) FROM TEST; +> MODE() WITHIN GROUP (ORDER BY V) MODE() WITHIN GROUP (ORDER BY V DESC) +> -------------------------------- ------------------------------------- +> 1 1 +> rows: 1 + +SELECT MODE(V) FILTER (WHERE (V > 1)), MODE(V) FILTER (WHERE (V < 0)) FROM TEST; +> MODE() WITHIN GROUP (ORDER BY V) FILTER (WHERE V > 1) MODE() WITHIN GROUP (ORDER BY V) FILTER (WHERE V < 0) +> ----------------------------------------------------- ----------------------------------------------------- +> 2 null +> rows: 1 + +-- Oracle compatibility +SELECT STATS_MODE(V) FROM TEST; +>> 1 + +INSERT INTO TEST VALUES (2), (3), (3); +> update count: 3 + +SELECT MODE(V ORDER BY V) FROM TEST; +>> 1 + +SELECT MODE(V ORDER BY V ASC) FROM TEST; +>> 1 + +SELECT MODE(V ORDER BY V DESC) FROM TEST; +>> 3 + +SELECT MODE(V ORDER BY V + 1) FROM TEST; +> exception IDENTICAL_EXPRESSIONS_SHOULD_BE_USED + +SELECT MODE() WITHIN GROUP (ORDER BY V) FROM TEST; +>> 1 + +SELECT MODE() WITHIN GROUP (ORDER BY V ASC) FROM TEST; +>> 1 + +SELECT MODE() WITHIN GROUP (ORDER BY V DESC) FROM TEST; +>> 3 + +SELECT + MODE() WITHIN GROUP (ORDER BY V) OVER () MA, + MODE() WITHIN GROUP (ORDER BY V DESC) OVER () MD, + MODE() WITHIN GROUP (ORDER BY V) OVER (ORDER BY V) MWA, + MODE() WITHIN GROUP (ORDER BY V DESC) OVER (ORDER BY V) MWD, + V FROM TEST; +> MA MD MWA MWD V +> -- -- ---- ---- ---- +> 1 3 1 1 1 +> 1 3 1 1 1 +> 1 3 1 1 1 +> 1 3 1 2 2 +> 1 3 1 2 2 +> 1 3 1 2 2 +> 1 3 1 3 3 +> 1 3 1 3 3 +> 1 3 1 3 3 +> 1 3 null null null +> rows: 10 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST (N NUMERIC) AS VALUES (0), (0.0), (NULL); +> ok + +SELECT MODE(N) FROM TEST; +>> 0 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/percentile.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/percentile.sql new file mode 100644 index 0000000000..5ac0bed4ad --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/percentile.sql @@ -0,0 +1,916 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- ASC +create table test(v tinyint); +> ok + +create index test_idx on test(v asc); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +insert into test values (null); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +select median(distinct v) from test; +>> 15.0 + +insert into test values (10); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- ---- +> 10 20 15.0 +> rows: 1 + +drop table test; +> ok + +-- ASC NULLS FIRST +create table test(v tinyint); +> ok + +create index test_idx on test(v asc nulls first); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +insert into test values (null); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +select median(distinct v) from test; +>> 15.0 + +insert into test values (10); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- ---- +> 10 20 15.0 +> rows: 1 + +drop table test; +> ok + +-- ASC NULLS LAST +create table test(v tinyint); +> ok + +create index test_idx on test(v asc nulls last); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +insert into test values (null); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +select median(distinct v) from test; +>> 15.0 + +insert into test values (10); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- ---- +> 10 20 15.0 +> rows: 1 + +drop table test; +> ok + +-- DESC +create table test(v tinyint); +> ok + +create index test_idx on test(v desc); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +insert into test values (null); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +select median(distinct v) from test; +>> 15.0 + +insert into test values (10); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- ---- +> 10 20 15.0 +> rows: 1 + +drop table test; +> ok + +-- DESC NULLS FIRST +create table test(v tinyint); +> ok + +create index test_idx on test(v desc nulls first); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +insert into test values (null); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +select median(distinct v) from test; +>> 15.0 + +insert into test values (10); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- ---- +> 10 20 15.0 +> rows: 1 + +drop table test; +> ok + +-- DESC NULLS LAST +create table test(v tinyint); +> ok + +create index test_idx on test(v desc nulls last); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +insert into test values (null); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- -- +> 20 20 20 +> rows: 1 + +select median(distinct v) from test; +>> 15.0 + +insert into test values (10); +> update count: 1 + +select + percentile_disc(0.5) within group (order by v) d50a, + percentile_disc(0.5) within group (order by v desc) d50d, + median(v) m from test; +> D50A D50D M +> ---- ---- ---- +> 10 20 15.0 +> rows: 1 + +drop table test; +> ok + +create table test(v tinyint); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select median(v) from test; +>> 20 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 20 + +select median(distinct v) from test; +>> 15.0 + +insert into test values (10); +> update count: 1 + +select median(v) from test; +>> 15.0 + +drop table test; +> ok + +create table test(v smallint); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select median(v) from test; +>> 20 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 20 + +select median(distinct v) from test; +>> 15.0 + +insert into test values (10); +> update count: 1 + +select median(v) from test; +>> 15.0 + +drop table test; +> ok + +create table test(v int); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select median(v) from test; +>> 20 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 20 + +select median(distinct v) from test; +>> 15.0 + +insert into test values (10); +> update count: 1 + +select median(v) from test; +>> 15.0 + +drop table test; +> ok + +create table test(v bigint); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select median(v) from test; +>> 20 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 20 + +select median(distinct v) from test; +>> 15.0 + +insert into test values (10); +> update count: 1 + +select median(v) from test; +>> 15.0 + +drop table test; +> ok + +create table test(v real); +> ok + +insert into test values (2), (2), (1); +> update count: 3 + +select median(v) from test; +>> 2.0 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 2.0 + +select median(distinct v) from test; +>> 1.50 + +insert into test values (1); +> update count: 1 + +select median(v) from test; +>> 1.50 + +drop table test; +> ok + +create table test(v double); +> ok + +insert into test values (2), (2), (1); +> update count: 3 + +select median(v) from test; +>> 2.0 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 2.0 + +select median(distinct v) from test; +>> 1.50 + +insert into test values (1); +> update count: 1 + +select median(v) from test; +>> 1.50 + +drop table test; +> ok + +create table test(v numeric(1)); +> ok + +insert into test values (2), (2), (1); +> update count: 3 + +select median(v) from test; +>> 2 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 2 + +select median(distinct v) from test; +>> 1.5 + +insert into test values (1); +> update count: 1 + +select median(v) from test; +>> 1.5 + +drop table test; +> ok + +create table test(v time); +> ok + +insert into test values ('20:00:00'), ('20:00:00'), ('10:00:00'); +> update count: 3 + +select median(v) from test; +>> 20:00:00 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 20:00:00 + +select median(distinct v) from test; +>> 15:00:00 + +insert into test values ('10:00:00'); +> update count: 1 + +select median(v) from test; +>> 15:00:00 + +drop table test; +> ok + +create table test(v date); +> ok + +insert into test values ('2000-01-20'), ('2000-01-20'), ('2000-01-10'); +> update count: 3 + +select median(v) from test; +>> 2000-01-20 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 2000-01-20 + +select median(distinct v) from test; +>> 2000-01-15 + +insert into test values ('2000-01-10'); +> update count: 1 + +select median(v) from test; +>> 2000-01-15 + +drop table test; +> ok + +create table test(v timestamp); +> ok + +insert into test values ('2000-01-20 20:00:00'), ('2000-01-20 20:00:00'), ('2000-01-10 10:00:00'); +> update count: 3 + +select median(v) from test; +>> 2000-01-20 20:00:00 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 2000-01-20 20:00:00 + +select median(distinct v) from test; +>> 2000-01-15 15:00:00 + +insert into test values ('2000-01-10 10:00:00'); +> update count: 1 + +select median(v) from test; +>> 2000-01-15 15:00:00 + +delete from test; +> update count: 5 + +insert into test values ('2000-01-20 20:00:00'), ('2000-01-21 20:00:00'); +> update count: 2 + +select median(v) from test; +>> 2000-01-21 08:00:00 + +insert into test values ('-2000-01-10 10:00:00'), ('-2000-01-10 10:00:01'); +> update count: 2 + +select percentile_cont(0.16) within group (order by v) from test; +>> -2000-01-10 10:00:00.48 + +drop table test; +> ok + +create table test(v timestamp with time zone); +> ok + +insert into test values ('2000-01-20 20:00:00+04'), ('2000-01-20 20:00:00+04'), ('2000-01-10 10:00:00+02'); +> update count: 3 + +select median(v) from test; +>> 2000-01-20 20:00:00+04 + +insert into test values (null); +> update count: 1 + +select median(v) from test; +>> 2000-01-20 20:00:00+04 + +select median(distinct v) from test; +>> 2000-01-15 15:00:00+03 + +insert into test values ('2000-01-10 10:00:00+02'); +> update count: 1 + +select median(v) from test; +>> 2000-01-15 15:00:00+03 + +delete from test; +> update count: 5 + +insert into test values ('2000-01-20 20:00:00+10:15:15'), ('2000-01-21 20:00:00-09'); +> update count: 2 + +select median(v) from test; +>> 2000-01-21 08:00:00.5+00:37:37 + +delete from test; +> update count: 2 + +insert into test values ('-2000-01-20 20:00:00+10:15:15'), ('-2000-01-21 20:00:00-09'); +> update count: 2 + +select median(v) from test; +>> -2000-01-21 08:00:00.5+00:37:37 + +drop table test; +> ok + +create table test(v interval day to second); +> ok + +insert into test values ('0 1'), ('0 2'), ('0 2'), ('0 2'), ('-0 1'), ('-0 1'); +> update count: 6 + +select median (v) from test; +>> INTERVAL '0 01:30:00' DAY TO SECOND + +drop table test; +> ok + +-- with group by +create table test(name varchar, "VALUE" int); +> ok + +insert into test values ('Group 2A', 10), ('Group 2A', 10), ('Group 2A', 20), + ('Group 1X', 40), ('Group 1X', 50), ('Group 3B', null); +> update count: 6 + +select name, median("VALUE") from test group by name order by name; +> NAME MEDIAN("VALUE") +> -------- --------------- +> Group 1X 45.0 +> Group 2A 10 +> Group 3B null +> rows (ordered): 3 + +drop table test; +> ok + +-- with filter +create table test(v int); +> ok + +insert into test values (20), (20), (10); +> update count: 3 + +select median(v) from test where v <> 20; +>> 10 + +create index test_idx on test(v asc); +> ok + +select median(v) from test where v <> 20; +>> 10 + +drop table test; +> ok + +-- two-column index +create table test(v int, v2 int); +> ok + +create index test_idx on test(v, v2); +> ok + +insert into test values (20, 1), (10, 2), (20, 3); +> update count: 3 + +select median(v) from test; +>> 20 + +drop table test; +> ok + +-- not null column +create table test (v int not null); +> ok + +create index test_idx on test(v desc); +> ok + +select median(v) from test; +>> null + +insert into test values (10), (20); +> update count: 2 + +select median(v) from test; +>> 15.0 + +insert into test values (20), (10), (20); +> update count: 3 + +select median(v) from test; +>> 20 + +drop table test; +> ok + +-- with filter condition + +create table test(v int); +> ok + +insert into test values (10), (20), (30), (40), (50), (60), (70), (80), (90), (100), (110), (120); +> update count: 12 + +select median(v), median(v) filter (where v >= 40) from test where v <= 100; +> MEDIAN(V) MEDIAN(V) FILTER (WHERE V >= 40) +> --------- -------------------------------- +> 55.0 70 +> rows: 1 + +create index test_idx on test(v); +> ok + +select median(v), median(v) filter (where v >= 40) from test where v <= 100; +> MEDIAN(V) MEDIAN(V) FILTER (WHERE V >= 40) +> --------- -------------------------------- +> 55.0 70 +> rows: 1 + +select median(v), median(v) filter (where v >= 40) from test; +> MEDIAN(V) MEDIAN(V) FILTER (WHERE V >= 40) +> --------- -------------------------------- +> 65.0 80 +> rows: 1 + +drop table test; +> ok + +-- with filter and group by + +create table test(dept varchar, amount int); +> ok + +insert into test values + ('First', 10), ('First', 10), ('First', 20), ('First', 30), ('First', 30), + ('Second', 5), ('Second', 4), ('Second', 20), ('Second', 22), ('Second', 300), + ('Third', 3), ('Third', 100), ('Third', 150), ('Third', 170), ('Third', 400); +> update count: 15 + +select dept, median(amount) from test group by dept order by dept; +> DEPT MEDIAN(AMOUNT) +> ------ -------------- +> First 20 +> Second 20 +> Third 150 +> rows (ordered): 3 + +select dept, median(amount) filter (where amount >= 20) from test group by dept order by dept; +> DEPT MEDIAN(AMOUNT) FILTER (WHERE AMOUNT >= 20) +> ------ ------------------------------------------ +> First 30 +> Second 22 +> Third 160.0 +> rows (ordered): 3 + +select dept, median(amount) filter (where amount >= 20) from test + where (amount < 200) group by dept order by dept; +> DEPT MEDIAN(AMOUNT) FILTER (WHERE AMOUNT >= 20) +> ------ ------------------------------------------ +> First 30 +> Second 21.0 +> Third 150 +> rows (ordered): 3 + +drop table test; +> ok + +create table test(g int, v int); +> ok + +insert into test values (1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (1, 10), + (2, 10), (2, 20), (2, 30), (2, 100); +> update count: 14 + +select + percentile_cont(0.05) within group (order by v) c05a, + percentile_cont(0.05) within group (order by v desc) c05d, + percentile_cont(0.5) within group (order by v) c50, + percentile_cont(0.5) within group (order by v desc) c50d, + percentile_cont(0.95) within group (order by v) c95a, + percentile_cont(0.95) within group (order by v desc) c95d, + g from test group by g; +> C05A C05D C50 C50D C95A C95D G +> ----- ----- ---- ---- ----- ----- - +> 1.45 9.55 5.5 5.5 9.55 1.45 1 +> 11.50 89.50 25.0 25.0 89.50 11.50 2 +> rows: 2 + +select + percentile_disc(0.05) within group (order by v) d05a, + percentile_disc(0.05) within group (order by v desc) d05d, + percentile_disc(0.5) within group (order by v) d50, + percentile_disc(0.5) within group (order by v desc) d50d, + percentile_disc(0.95) within group (order by v) d95a, + percentile_disc(0.95) within group (order by v desc) d95d, + g from test group by g; +> D05A D05D D50 D50D D95A D95D G +> ---- ---- --- ---- ---- ---- - +> 1 10 5 6 10 1 1 +> 10 100 20 30 100 10 2 +> rows: 2 + +select + percentile_disc(0.05) within group (order by v) over (partition by g order by v) d05a, + percentile_disc(0.05) within group (order by v desc) over (partition by g order by v) d05d, + percentile_disc(0.5) within group (order by v) over (partition by g order by v) d50, + percentile_disc(0.5) within group (order by v desc) over (partition by g order by v) d50d, + percentile_disc(0.95) within group (order by v) over (partition by g order by v) d95a, + percentile_disc(0.95) within group (order by v desc) over (partition by g order by v) d95d, + g, v from test order by g, v; +> D05A D05D D50 D50D D95A D95D G V +> ---- ---- --- ---- ---- ---- - --- +> 1 1 1 1 1 1 1 1 +> 1 2 1 2 2 1 1 2 +> 1 3 2 2 3 1 1 3 +> 1 4 2 3 4 1 1 4 +> 1 5 3 3 5 1 1 5 +> 1 6 3 4 6 1 1 6 +> 1 7 4 4 7 1 1 7 +> 1 8 4 5 8 1 1 8 +> 1 9 5 5 9 1 1 9 +> 1 10 5 6 10 1 1 10 +> 10 10 10 10 10 10 2 10 +> 10 20 10 20 20 10 2 20 +> 10 30 20 20 30 10 2 30 +> 10 100 20 30 100 10 2 100 +> rows (ordered): 14 + +delete from test where g <> 1; +> update count: 4 + +create index test_idx on test(v); +> ok + +select + percentile_disc(0.05) within group (order by v) d05a, + percentile_disc(0.05) within group (order by v desc) d05d, + percentile_disc(0.5) within group (order by v) d50, + percentile_disc(0.5) within group (order by v desc) d50d, + percentile_disc(0.95) within group (order by v) d95a, + percentile_disc(0.95) within group (order by v desc) d95d + from test; +> D05A D05D D50 D50D D95A D95D +> ---- ---- --- ---- ---- ---- +> 1 10 5 6 10 1 +> rows: 1 + +SELECT percentile_disc(null) within group (order by v) from test; +>> null + +SELECT percentile_disc(-0.01) within group (order by v) from test; +> exception INVALID_VALUE_2 + +SELECT percentile_disc(1.01) within group (order by v) from test; +> exception INVALID_VALUE_2 + +SELECT percentile_disc(v) within group (order by v) from test; +> exception INVALID_VALUE_2 + +drop index test_idx; +> ok + +SELECT percentile_disc(null) within group (order by v) from test; +>> null + +SELECT percentile_disc(-0.01) within group (order by v) from test; +> exception INVALID_VALUE_2 + +SELECT percentile_disc(1.01) within group (order by v) from test; +> exception INVALID_VALUE_2 + +SELECT percentile_disc(v) within group (order by v) from test; +> exception INVALID_VALUE_2 + +drop table test; +> ok + +SELECT PERCENTILE_CONT(0.1) WITHIN GROUP (ORDER BY V) FROM (VALUES TIME WITH TIME ZONE '10:30:00Z', TIME WITH TIME ZONE '15:30:00+10') T(V); +>> 15:00:00+09 + +SELECT PERCENTILE_CONT(0.7) WITHIN GROUP (ORDER BY V) FROM (VALUES TIME WITH TIME ZONE '10:00:00Z', TIME WITH TIME ZONE '12:00:00+00:00:01') T(V); +>> 11:24:00.7+00 + +SELECT PERCENTILE_CONT(0.7) WITHIN GROUP (ORDER BY V) FROM (VALUES TIME WITH TIME ZONE '23:59:59.999999999Z', TIME WITH TIME ZONE '23:59:59.999999999+00:00:01') T(V); +>> 23:59:59.299999999-00:00:01 + +SELECT PERCENTILE_CONT(0.7) WITHIN GROUP (ORDER BY V) FROM (VALUES TIME WITH TIME ZONE '00:00:00Z', TIME WITH TIME ZONE '00:00:00-00:00:01') T(V); +>> 00:00:00.3+00:00:01 + +-- null ordering has no effect, but must be allowed +SELECT PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY V NULLS LAST) FROM (VALUES NULL, 1, 3) T(V); +>> 2.0 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/rank.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/rank.sql new file mode 100644 index 0000000000..739f1b0772 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/rank.sql @@ -0,0 +1,150 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(V INT) AS VALUES 1, 2, 3, 3, 4, 5, 6; +> ok + +SELECT + RANK(1) WITHIN GROUP (ORDER BY V) R1, + RANK(3) WITHIN GROUP (ORDER BY V) R3, + RANK(7) WITHIN GROUP (ORDER BY V) R7 + FROM TEST; +> R1 R3 R7 +> -- -- -- +> 1 3 8 +> rows: 1 + +SELECT + DENSE_RANK(1) WITHIN GROUP (ORDER BY V) R1, + DENSE_RANK(3) WITHIN GROUP (ORDER BY V) R3, + DENSE_RANK(7) WITHIN GROUP (ORDER BY V) R7 + FROM TEST; +> R1 R3 R7 +> -- -- -- +> 1 3 7 +> rows: 1 + +SELECT + ROUND(PERCENT_RANK(1) WITHIN GROUP (ORDER BY V), 2) R1, + ROUND(PERCENT_RANK(3) WITHIN GROUP (ORDER BY V), 2) R3, + ROUND(PERCENT_RANK(7) WITHIN GROUP (ORDER BY V), 2) R7 + FROM TEST; +> R1 R3 R7 +> --- ---- --- +> 0.0 0.29 1.0 +> rows: 1 + +SELECT + ROUND(CUME_DIST(1) WITHIN GROUP (ORDER BY V), 2) R1, + ROUND(CUME_DIST(3) WITHIN GROUP (ORDER BY V), 2) R3, + ROUND(CUME_DIST(7) WITHIN GROUP (ORDER BY V), 2) R7 + FROM TEST; +> R1 R3 R7 +> ---- ---- --- +> 0.25 0.63 1.0 +> rows: 1 + +SELECT + RANK(1, 1) WITHIN GROUP (ORDER BY V, V + 1) R11, + RANK(1, 2) WITHIN GROUP (ORDER BY V, V + 1) R12, + RANK(1, 3) WITHIN GROUP (ORDER BY V, V + 1) R13 + FROM TEST; +> R11 R12 R13 +> --- --- --- +> 1 1 2 +> rows: 1 + +SELECT + RANK(1, 1) WITHIN GROUP (ORDER BY V, V + 1 DESC) R11, + RANK(1, 2) WITHIN GROUP (ORDER BY V, V + 1 DESC) R12, + RANK(1, 3) WITHIN GROUP (ORDER BY V, V + 1 DESC) R13 + FROM TEST; +> R11 R12 R13 +> --- --- --- +> 2 1 1 +> rows: 1 + +SELECT RANK(3) WITHIN GROUP (ORDER BY V) FILTER (WHERE V <> 2) FROM TEST; +>> 2 + +SELECT + RANK(1) WITHIN GROUP (ORDER BY V) OVER () R1, + RANK(3) WITHIN GROUP (ORDER BY V) OVER () R3, + RANK(7) WITHIN GROUP (ORDER BY V) OVER () R7, + V + FROM TEST ORDER BY V; +> R1 R3 R7 V +> -- -- -- - +> 1 3 8 1 +> 1 3 8 2 +> 1 3 8 3 +> 1 3 8 3 +> 1 3 8 4 +> 1 3 8 5 +> 1 3 8 6 +> rows (ordered): 7 + +SELECT + RANK(1) WITHIN GROUP (ORDER BY V) OVER (ORDER BY V) R1, + RANK(3) WITHIN GROUP (ORDER BY V) OVER (ORDER BY V) R3, + RANK(7) WITHIN GROUP (ORDER BY V) OVER (ORDER BY V) R7, + RANK(7) WITHIN GROUP (ORDER BY V) FILTER (WHERE V <> 2) OVER (ORDER BY V) F7, + V + FROM TEST ORDER BY V; +> R1 R3 R7 F7 V +> -- -- -- -- - +> 1 2 2 2 1 +> 1 3 3 2 2 +> 1 3 5 4 3 +> 1 3 5 4 3 +> 1 3 6 5 4 +> 1 3 7 6 5 +> 1 3 8 7 6 +> rows (ordered): 7 + +SELECT + RANK(1) WITHIN GROUP (ORDER BY V) FILTER (WHERE FALSE) R, + DENSE_RANK(1) WITHIN GROUP (ORDER BY V) FILTER (WHERE FALSE) D, + PERCENT_RANK(1) WITHIN GROUP (ORDER BY V) FILTER (WHERE FALSE) P, + CUME_DIST(1) WITHIN GROUP (ORDER BY V) FILTER (WHERE FALSE) C + FROM VALUES (1) T(V); +> R D P C +> - - --- --- +> 1 1 0.0 1.0 +> rows: 1 + +SELECT RANK(1) WITHIN GROUP (ORDER BY V, V) FROM TEST; +> exception SYNTAX_ERROR_2 + +SELECT RANK(1, 2) WITHIN GROUP (ORDER BY V) FROM TEST; +> exception SYNTAX_ERROR_2 + +SELECT RANK(V) WITHIN GROUP (ORDER BY V) FROM TEST; +> exception INVALID_VALUE_2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +INSERT INTO TEST VALUES + (1, NULL, NULL), + (2, NULL, 1), + (3, 1, NULL), + (4, 1, 1), + (5, NULL, 3), + (6, 3, NULL), + (7, 3, 3); +> update count: 7 + +SELECT RANK(2, 2) WITHIN GROUP (ORDER BY B ASC NULLS FIRST, C ASC NULLS FIRST) FROM TEST; +>> 6 + +SELECT RANK(2, 2) WITHIN GROUP (ORDER BY B ASC NULLS LAST, C ASC NULLS LAST) FROM TEST; +>> 3 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgx.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgx.sql new file mode 100644 index 0000000000..421136363b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgx.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_AVGX(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_AVGX(Y, X) OVER (ORDER BY R) +> --------------------------------- +> null +> null +> null +> -2.0 +> -1.5 +> 2.0 +> 4.0 +> 5.4 +> 5.666666666666667 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgy.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgy.sql new file mode 100644 index 0000000000..377e441846 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_avgy.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_AVGY(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_AVGY(Y, X) OVER (ORDER BY R) +> --------------------------------- +> null +> null +> null +> -3.0 +> -3.0 +> 1.3333333333333333 +> 3.5 +> 4.8 +> 5.833333333333333 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_count.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_count.sql new file mode 100644 index 0000000000..e8e72f1d46 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_count.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_COUNT(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_COUNT(Y, X) OVER (ORDER BY R) +> ---------------------------------- +> 0 +> 0 +> 0 +> 1 +> 2 +> 3 +> 4 +> 5 +> 6 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_intercept.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_intercept.sql new file mode 100644 index 0000000000..f1c22e3704 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_intercept.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_INTERCEPT(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_INTERCEPT(Y, X) OVER (ORDER BY R) +> -------------------------------------- +> null +> null +> null +> null +> -3.0 +> -1.1261261261261266 +> -1.1885245901639347 +> -1.2096774193548399 +> -0.6775510204081643 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_r2.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_r2.sql new file mode 100644 index 0000000000..67517a2099 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_r2.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_R2(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_R2(Y, X) OVER (ORDER BY R) +> ------------------------------- +> null +> null +> null +> null +> 1.0 +> 0.9932432432432432 +> 0.9918032786885245 +> 0.9844913151364764 +> 0.9182051244912443 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_slope.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_slope.sql new file mode 100644 index 0000000000..3f2c4688b0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_slope.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_SLOPE(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_SLOPE(Y, X) OVER (ORDER BY R) +> ---------------------------------- +> null +> null +> null +> null +> 0.0 +> 1.2297297297297298 +> 1.1721311475409837 +> 1.1129032258064517 +> 1.1489795918367347 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxx.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxx.sql new file mode 100644 index 0000000000..963dfa560f --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxx.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_SXX(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_SXX(Y, X) OVER (ORDER BY R) +> -------------------------------- +> null +> null +> null +> 0.0 +> 0.5 +> 74.0 +> 122.0 +> 161.2 +> 163.33333333333331 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxy.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxy.sql new file mode 100644 index 0000000000..9d6aeca260 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_sxy.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_SXY(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_SXY(Y, X) OVER (ORDER BY R) +> -------------------------------- +> null +> null +> null +> 0.0 +> 0.0 +> 91.0 +> 143.0 +> 179.4 +> 187.66666666666666 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_syy.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_syy.sql new file mode 100644 index 0000000000..9478b4f483 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/regr_syy.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT REGR_SYY(Y, X) OVER (ORDER BY R) FROM (VALUES + (1, NULL, 1), + (2, 1, NULL), + (3, NULL, NULL), + (4, -3, -2), + (5, -3, -1), + (6, 10, 9), + (7, 10, 10), + (8, 10, 11), + (9, 11, 7) +) T(R, Y, X) ORDER BY R; +> REGR_SYY(Y, X) OVER (ORDER BY R) +> -------------------------------- +> null +> null +> null +> 0.0 +> 0.0 +> 112.66666666666669 +> 169.00000000000003 +> 202.80000000000004 +> 234.83333333333337 +> rows (ordered): 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_pop.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_pop.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_pop.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_samp.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_samp.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/stddev_samp.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/sum.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/sum.sql new file mode 100644 index 0000000000..f2d794076f --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/sum.sql @@ -0,0 +1,232 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select sum(cast(x as int)) from system_range(2147483547, 2147483637); +>> 195421006872 + +select sum(x) from system_range(9223372036854775707, 9223372036854775797); +>> 839326855353784593432 + +select sum(cast(100 as tinyint)) from system_range(1, 1000); +>> 100000 + +select sum(cast(100 as smallint)) from system_range(1, 1000); +>> 100000 + +-- with filter condition + +create table test(v int); +> ok + +insert into test values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12); +> update count: 12 + +select sum(v), sum(v) filter (where v >= 4) from test where v <= 10; +> SUM(V) SUM(V) FILTER (WHERE V >= 4) +> ------ ---------------------------- +> 55 49 +> rows: 1 + +create index test_idx on test(v); +> ok + +select sum(v), sum(v) filter (where v >= 4) from test where v <= 10; +> SUM(V) SUM(V) FILTER (WHERE V >= 4) +> ------ ---------------------------- +> 55 49 +> rows: 1 + +insert into test values (1), (2), (8); +> update count: 3 + +select sum(v), sum(all v), sum(distinct v) from test; +> SUM(V) SUM(V) SUM(DISTINCT V) +> ------ ------ --------------- +> 89 89 78 +> rows: 1 + +drop table test; +> ok + +create table test(v interval day to second); +> ok + +insert into test values ('0 1'), ('0 2'), ('0 2'), ('0 2'), ('-0 1'), ('-0 1'); +> update count: 6 + +select sum(v) from test; +>> INTERVAL '0 05:00:00' DAY TO SECOND + +drop table test; +> ok + +SELECT X, COUNT(*), SUM(COUNT(*)) OVER() FROM VALUES (1), (1), (1), (1), (2), (2), (3) T(X) GROUP BY X; +> X COUNT(*) SUM(COUNT(*)) OVER () +> - -------- --------------------- +> 1 4 7 +> 2 2 7 +> 3 1 7 +> rows: 3 + +CREATE TABLE TEST(ID INT); +> ok + +SELECT SUM(ID) FROM TEST; +>> null + +SELECT SUM(ID) OVER () FROM TEST; +> SUM(ID) OVER () +> --------------- +> rows: 0 + +DROP TABLE TEST; +> ok + +SELECT + ID, + SUM(ID) OVER (ORDER BY ID) S, + SUM(ID) OVER (ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) S_U_C, + SUM(ID) OVER (ORDER BY ID RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) S_C_U, + SUM(ID) OVER (ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) S_U_U + FROM (SELECT X ID FROM SYSTEM_RANGE(1, 8)); +> ID S S_U_C S_C_U S_U_U +> -- -- ----- ----- ----- +> 1 1 1 36 36 +> 2 3 3 35 36 +> 3 6 6 33 36 +> 4 10 10 30 36 +> 5 15 15 26 36 +> 6 21 21 21 36 +> 7 28 28 15 36 +> 8 36 36 8 36 +> rows: 8 + +SELECT I, V, SUM(V) OVER W S, SUM(DISTINCT V) OVER W D FROM + VALUES (1, 1), (2, 1), (3, 1), (4, 1), (5, 2), (6, 2), (7, 3) T(I, V) + WINDOW W AS (ORDER BY I); +> I V S D +> - - -- - +> 1 1 1 1 +> 2 1 2 1 +> 3 1 3 1 +> 4 1 4 1 +> 5 2 6 3 +> 6 2 8 3 +> 7 3 11 6 +> rows: 7 + +SELECT * FROM (SELECT SUM(V) OVER (ORDER BY V ROWS BETWEEN CURRENT ROW AND CURRENT ROW) S FROM (VALUES 1, 2, 2) T(V)); +> S +> - +> 1 +> 2 +> 2 +> rows: 3 + +SELECT V, SUM(V) FILTER (WHERE V <> 1) OVER (ROWS CURRENT ROW) S FROM (VALUES 1, 2, 2) T(V); +> V S +> - ---- +> 1 null +> 2 2 +> 2 2 +> rows: 3 + +SELECT V, + SUM(V) FILTER (WHERE V <> 1) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) S, + SUM(V) FILTER (WHERE V <> 1) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) T + FROM (VALUES 1, 2, 2) T(V); +> V S T +> - - - +> 1 4 2 +> 2 4 4 +> 2 4 4 +> rows: 3 + + + +CREATE TABLE S( + B BOOLEAN, + N1 TINYINT, + N2 SMALLINT, + N4 INTEGER, + N8 BIGINT, + N NUMERIC(10, 2), + F4 REAL, + F8 DOUBLE PRECISION, + D DECFLOAT(10), + I1 INTERVAL YEAR(3), + I2 INTERVAL MONTH(3), + I3 INTERVAL DAY(3), + I4 INTERVAL HOUR(3), + I5 INTERVAL MINUTE(3), + I6 INTERVAL SECOND(2), + I7 INTERVAL YEAR(3) TO MONTH, + I8 INTERVAL DAY(3) TO HOUR, + I9 INTERVAL DAY(3) TO MINUTE, + I10 INTERVAL DAY(3) TO SECOND(2), + I11 INTERVAL HOUR(3) TO MINUTE, + I12 INTERVAL HOUR(3) TO SECOND(2), + I13 INTERVAL MINUTE(3) TO SECOND(2)); +> ok + +CREATE TABLE A AS SELECT + SUM(B) B, + SUM(N1) N1, + SUM(N2) N2, + SUM(N4) N4, + SUM(N8) N8, + SUM(N) N, + SUM(F4) F4, + SUM(F8) F8, + SUM(D) D, + SUM(I1) I1, + SUM(I2) I2, + SUM(I3) I3, + SUM(I4) I4, + SUM(I5) I5, + SUM(I6) I6, + SUM(I7) I7, + SUM(I8) I8, + SUM(I9) I9, + SUM(I10) I10, + SUM(I11) I11, + SUM(I12) I12, + SUM(I13) I13 + FROM S; +> ok + +SELECT COLUMN_NAME, DATA_TYPE_SQL('PUBLIC', 'A', 'TABLE', DTD_IDENTIFIER) TYPE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'A' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME TYPE +> ----------- -------------------------------- +> B BIGINT +> N1 BIGINT +> N2 BIGINT +> N4 BIGINT +> N8 NUMERIC(29) +> N NUMERIC(20, 2) +> F4 DOUBLE PRECISION +> F8 DECFLOAT(27) +> D DECFLOAT(20) +> I1 INTERVAL YEAR(18) +> I2 INTERVAL MONTH(18) +> I3 INTERVAL DAY(18) +> I4 INTERVAL HOUR(18) +> I5 INTERVAL MINUTE(18) +> I6 INTERVAL SECOND(18) +> I7 INTERVAL YEAR(18) TO MONTH +> I8 INTERVAL DAY(18) TO HOUR +> I9 INTERVAL DAY(18) TO MINUTE +> I10 INTERVAL DAY(18) TO SECOND(2) +> I11 INTERVAL HOUR(18) TO MINUTE +> I12 INTERVAL HOUR(18) TO SECOND(2) +> I13 INTERVAL MINUTE(18) TO SECOND(2) +> rows (ordered): 22 + +DROP TABLE S, A; +> ok + +SELECT SUM(I) FROM (VALUES INTERVAL '999999999999999999' SECOND, INTERVAL '1' SECOND) T(I); +> exception NUMERIC_VALUE_OUT_OF_RANGE_1 diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/var_pop.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/var_pop.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/var_pop.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/aggregate/var_samp.sql b/h2/src/test/org/h2/test/scripts/functions/aggregate/var_samp.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/aggregate/var_samp.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/json/json_array.sql b/h2/src/test/org/h2/test/scripts/functions/json/json_array.sql new file mode 100644 index 0000000000..58d0c52988 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/json/json_array.sql @@ -0,0 +1,58 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT JSON_ARRAY(10, TRUE, 'str', NULL, '[1,2,3]' FORMAT JSON); +>> [10,true,"str",[1,2,3]] + +SELECT JSON_ARRAY(10, TRUE, 'str', NULL, '[1,2,3]' FORMAT JSON ABSENT ON NULL); +>> [10,true,"str",[1,2,3]] + +SELECT JSON_ARRAY(10, TRUE, 'str', NULL, '[1,2,3]' FORMAT JSON NULL ON NULL); +>> [10,true,"str",null,[1,2,3]] + +SELECT JSON_ARRAY(); +>> [] + +SELECT JSON_ARRAY(NULL ON NULL); +>> [] + +SELECT JSON_ARRAY(NULL ABSENT ON NULL); +>> [] + +SELECT JSON_ARRAY(NULL NULL ON NULL); +>> [null] + +CREATE TABLE TEST(ID INT, V VARCHAR); +> ok + +EXPLAIN SELECT JSON_ARRAY(V) FROM TEST; +>> SELECT JSON_ARRAY("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAY(V NULL ON NULL) FROM TEST; +>> SELECT JSON_ARRAY("V" NULL ON NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAY(V ABSENT ON NULL) FROM TEST; +>> SELECT JSON_ARRAY("V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_ARRAY(V FORMAT JSON ABSENT ON NULL) FROM TEST; +>> SELECT JSON_ARRAY("V" FORMAT JSON) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +INSERT INTO TEST VALUES (1, 'null'), (2, '1'), (3, null); +> update count: 3 + +SELECT JSON_ARRAY((SELECT V FROM TEST ORDER BY ID)); +>> ["null","1"] + +SELECT JSON_ARRAY((SELECT V FROM TEST ORDER BY ID) ABSENT ON NULL); +>> ["null","1"] + +SELECT JSON_ARRAY((SELECT V FROM TEST ORDER BY ID) NULL ON NULL); +>> ["null","1",null] + +SELECT JSON_ARRAY((SELECT V FROM TEST ORDER BY ID) FORMAT JSON); +>> [null,1,null] + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/json/json_object.sql b/h2/src/test/org/h2/test/scripts/functions/json/json_object.sql new file mode 100644 index 0000000000..d295f37244 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/json/json_object.sql @@ -0,0 +1,58 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT JSON_OBJECT('key1' : 10, 'key2' VALUE TRUE, KEY 'key3' VALUE 'str', 'key4' : NULL, 'key5' : '[1,2,3]' FORMAT JSON); +>> {"key1":10,"key2":true,"key3":"str","key4":null,"key5":[1,2,3]} + +SELECT JSON_OBJECT('key1' : NULL ABSENT ON NULL); +>> {} + +SELECT JSON_OBJECT('key1' : NULL NULL ON NULL); +>> {"key1":null} + +SELECT JSON_OBJECT(); +>> {} + +SELECT JSON_OBJECT(NULL ON NULL); +>> {} + +SELECT JSON_OBJECT(WITHOUT UNIQUE KEYS); +>> {} + +SELECT JSON_OBJECT('key1' : NULL, 'key1' : 2 NULL ON NULL WITHOUT UNIQUE KEYS); +>> {"key1":null,"key1":2} + +SELECT JSON_OBJECT('key1' : 1, 'key1' : 2 WITH UNIQUE KEYS); +> exception INVALID_VALUE_2 + +SELECT JSON_OBJECT('key1' : 1, 'key1' : 2 NULL ON NULL WITH UNIQUE KEYS); +> exception INVALID_VALUE_2 + +SELECT JSON_OBJECT('key1' : TRUE WITH UNIQUE KEYS); +>> {"key1":true} + +SELECT JSON_OBJECT(NULL : 1); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(V VARCHAR, ABSENT VARCHAR, WITHOUT VARCHAR); +> ok + +EXPLAIN SELECT JSON_OBJECT('name' : V NULL ON NULL WITHOUT UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECT('name': "V") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECT('name' : V ABSENT ON NULL WITH UNIQUE KEYS) FROM TEST; +>> SELECT JSON_OBJECT('name': "V" ABSENT ON NULL WITH UNIQUE KEYS) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECT(ABSENT : 1) FROM TEST; +>> SELECT JSON_OBJECT("ABSENT": 1) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT JSON_OBJECT(WITHOUT : 1) FROM TEST; +>> SELECT JSON_OBJECT("WITHOUT": 1) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT JSON_OBJECT(NULL ON NULL WITHOUT); +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/abs.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/abs.sql new file mode 100644 index 0000000000..1e49b93f5a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/abs.sql @@ -0,0 +1,23 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select abs(-1) r1, abs(1) r1b; +> R1 R1B +> -- --- +> 1 1 +> rows: 1 + +select abs(null) vn, abs(-1) r1, abs(1) r2, abs(0) r3, abs(-0.1) r4, abs(0.1) r5; +> VN R1 R2 R3 R4 R5 +> ---- -- -- -- --- --- +> null 1 1 0 0.1 0.1 +> rows: 1 + +select * from table(id int=(1, 2), name varchar=('Hello', 'World')) x order by id; +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows (ordered): 2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/acos.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/acos.sql new file mode 100644 index 0000000000..d0f493db45 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/acos.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select acos(null) vn, acos(-1) r1; +> VN R1 +> ---- ----------------- +> null 3.141592653589793 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/asin.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/asin.sql new file mode 100644 index 0000000000..d7fead3bf5 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/asin.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select asin(null) vn, asin(-1) r1; +> VN R1 +> ---- ------------------- +> null -1.5707963267948966 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/atan.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/atan.sql new file mode 100644 index 0000000000..e8612f1280 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/atan.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select atan(null) vn, atan(-1) r1; +> VN R1 +> ---- ------------------- +> null -0.7853981633974483 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/atan2.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/atan2.sql new file mode 100644 index 0000000000..b0b117270c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/atan2.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select atan2(null, null) vn, atan2(10, 1) r1; +> VN R1 +> ---- ------------------ +> null 1.4711276743037347 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitand.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitand.sql new file mode 100644 index 0000000000..da953e9f36 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitand.sql @@ -0,0 +1,79 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select bitand(null, 1) vn, bitand(1, null) vn1, bitand(null, null) vn2, bitand(3, 6) e2; +> VN VN1 VN2 E2 +> ---- ---- ---- -- +> null null null 2 +> rows: 1 + +SELECT BITAND(10, 12); +>> 8 + +SELECT BITNAND(10, 12); +>> -9 + +CREATE TABLE TEST(A BIGINT, B BIGINT); +> ok + +EXPLAIN SELECT BITNOT(BITAND(A, B)), BITNOT(BITNAND(A, B)) FROM TEST; +>> SELECT BITNAND("A", "B"), BITAND("A", "B") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT + BITAND(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITAND(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITAND(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITAND(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITAND(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITAND(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(65 AS TINYINT), CAST(65 AS SMALLINT), 65, CAST(65 AS BIGINT), X'41', CAST(X'41' AS BINARY(1)) + +EXPLAIN SELECT + BITAND(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITAND(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITAND(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITAND(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'4100', X'4100', CAST(X'4100' AS BINARY(2)), CAST(X'4100' AS BINARY(2)) + +EXPLAIN SELECT + BITAND(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITAND(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'41' AS BINARY(1)), CAST(X'41' AS BINARY(1)) + +EXPLAIN SELECT + BITNAND(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITNAND(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITNAND(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITNAND(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITNAND(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITNAND(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(-66 AS TINYINT), CAST(-66 AS SMALLINT), -66, CAST(-66 AS BIGINT), X'be', CAST(X'be' AS BINARY(1)) + +EXPLAIN SELECT + BITNAND(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITNAND(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITNAND(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITNAND(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'beff', X'beff', CAST(X'beff' AS BINARY(2)), CAST(X'beff' AS BINARY(2)) + +EXPLAIN SELECT + BITNAND(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITNAND(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'be' AS BINARY(1)), CAST(X'be' AS BINARY(1)) + +SELECT BITAND('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITAND(1, X'AA'); +> exception INVALID_VALUE_2 + +SELECT BITNAND('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITNAND(1, X'AA'); +> exception INVALID_VALUE_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitcount.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitcount.sql new file mode 100644 index 0000000000..235b43338d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitcount.sql @@ -0,0 +1,27 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT V, BITCOUNT(V) C FROM (VALUES 0, 10, -1) T(V); +> V C +> -- -- +> -1 32 +> 0 0 +> 10 2 +> rows: 3 + +EXPLAIN SELECT + BITCOUNT(CAST((0xC5 - 0x100) AS TINYINT)), + BITCOUNT(CAST(0xC5 AS SMALLINT)), + BITCOUNT(CAST(0xC5 AS INTEGER)), + BITCOUNT(CAST(0xC5 AS BIGINT)), + BITCOUNT(CAST(X'C5' AS VARBINARY)), + BITCOUNT(CAST(X'C5' AS BINARY)); +>> SELECT CAST(4 AS BIGINT), CAST(4 AS BIGINT), CAST(4 AS BIGINT), CAST(4 AS BIGINT), CAST(4 AS BIGINT), CAST(4 AS BIGINT) + +SELECT BITCOUNT(X'0123456789ABCDEF'); +>> 32 + +SELECT BITCOUNT(X'0123456789ABCDEF33'); +>> 36 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitget.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitget.sql new file mode 100644 index 0000000000..acea82167c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitget.sql @@ -0,0 +1,30 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT I, + BITGET(CAST((0xC5 - 0x100) AS TINYINT), I), + BITGET(CAST(0xC5 AS SMALLINT), I), + BITGET(CAST(0xC5 AS INTEGER), I), + BITGET(CAST(0xC5 AS BIGINT), I), + BITGET(CAST(X'C5' AS VARBINARY), I), + BITGET(CAST(X'C5' AS BINARY), I) + FROM (VALUES -1, 0, 1, 4, 9, 99) T(I); +> I BITGET(-59, I) BITGET(197, I) BITGET(197, I) BITGET(197, I) BITGET(CAST(X'c5' AS BINARY VARYING), I) BITGET(X'c5', I) +> -- -------------- -------------- -------------- -------------- ---------------------------------------- ---------------- +> -1 FALSE FALSE FALSE FALSE FALSE FALSE +> 0 TRUE TRUE TRUE TRUE TRUE TRUE +> 1 FALSE FALSE FALSE FALSE FALSE FALSE +> 4 FALSE FALSE FALSE FALSE FALSE FALSE +> 9 FALSE FALSE FALSE FALSE FALSE FALSE +> 99 FALSE FALSE FALSE FALSE FALSE FALSE +> rows: 6 + +SELECT X, BITGET(X'1001', X) FROM SYSTEM_RANGE(7, 9); +> X BITGET(X'1001', X) +> - ------------------ +> 7 FALSE +> 8 TRUE +> 9 FALSE +> rows: 3 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitnot.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitnot.sql new file mode 100644 index 0000000000..d4c80c244d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitnot.sql @@ -0,0 +1,31 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: Joe Littlejohn +-- + +select bitnot(null) vn, bitnot(0) v1, bitnot(10) v2, bitnot(-10) v3; +> VN V1 V2 V3 +> ---- -- --- -- +> null -1 -11 9 +> rows: 1 + +CREATE TABLE TEST(A BIGINT); +> ok + +EXPLAIN SELECT BITNOT(BITNOT(A)), BITNOT(LSHIFT(A, 1)) FROM TEST; +>> SELECT "A", BITNOT(LSHIFT("A", 1)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT + BITNOT(CAST((0xC5 - 0x100) AS TINYINT)), + BITNOT(CAST(0xC5 AS SMALLINT)), + BITNOT(CAST(0xC5 AS INTEGER)), + BITNOT(CAST(0xC5 AS BIGINT)), + BITNOT(CAST(X'C5' AS VARBINARY)), + BITNOT(CAST(X'C5' AS BINARY)); +>> SELECT CAST(58 AS TINYINT), CAST(-198 AS SMALLINT), -198, CAST(-198 AS BIGINT), X'3a', CAST(X'3a' AS BINARY(1)) + +SELECT BITNOT('AA'); +> exception INVALID_VALUE_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitor.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitor.sql new file mode 100644 index 0000000000..919484846b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitor.sql @@ -0,0 +1,79 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select bitor(null, 1) vn, bitor(1, null) vn1, bitor(null, null) vn2, bitor(3, 6) e7; +> VN VN1 VN2 E7 +> ---- ---- ---- -- +> null null null 7 +> rows: 1 + +SELECT BITOR(10, 12); +>> 14 + +SELECT BITNOR(10, 12); +>> -15 + +CREATE TABLE TEST(A BIGINT, B BIGINT); +> ok + +EXPLAIN SELECT BITNOT(BITOR(A, B)), BITNOT(BITNOR(A, B)) FROM TEST; +>> SELECT BITNOR("A", "B"), BITOR("A", "B") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT + BITOR(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITOR(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITOR(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITOR(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITOR(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITOR(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(-25 AS TINYINT), CAST(231 AS SMALLINT), 231, CAST(231 AS BIGINT), X'e7', CAST(X'e7' AS BINARY(1)) + +EXPLAIN SELECT + BITOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITOR(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITOR(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITOR(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'e701', X'e701', CAST(X'e701' AS BINARY(2)), CAST(X'e701' AS BINARY(2)) + +EXPLAIN SELECT + BITOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITOR(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'e7' AS BINARY(1)), CAST(X'e7' AS BINARY(1)) + +EXPLAIN SELECT + BITNOR(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITNOR(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITNOR(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITNOR(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITNOR(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITNOR(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(24 AS TINYINT), CAST(-232 AS SMALLINT), -232, CAST(-232 AS BIGINT), X'18', CAST(X'18' AS BINARY(1)) + +EXPLAIN SELECT + BITNOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITNOR(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITNOR(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITNOR(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'18fe', X'18fe', CAST(X'18fe' AS BINARY(2)), CAST(X'18fe' AS BINARY(2)) + +EXPLAIN SELECT + BITNOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITNOR(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'18' AS BINARY(1)), CAST(X'18' AS BINARY(1)) + +SELECT BITOR('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITOR(1, X'AA'); +> exception INVALID_VALUE_2 + +SELECT BITNOR('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITNOR(1, X'AA'); +> exception INVALID_VALUE_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/bitxor.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/bitxor.sql new file mode 100644 index 0000000000..a26692f7a3 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/bitxor.sql @@ -0,0 +1,79 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select bitxor(null, 1) vn, bitxor(1, null) vn1, bitxor(null, null) vn2, bitxor(3, 6) e5; +> VN VN1 VN2 E5 +> ---- ---- ---- -- +> null null null 5 +> rows: 1 + +SELECT BITXOR(10, 12); +>> 6 + +SELECT BITXNOR(10, 12); +>> -7 + +CREATE TABLE TEST(A BIGINT, B BIGINT); +> ok + +EXPLAIN SELECT BITNOT(BITXOR(A, B)), BITNOT(BITXNOR(A, B)) FROM TEST; +>> SELECT BITXNOR("A", "B"), BITXOR("A", "B") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT + BITXOR(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITXOR(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITXOR(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITXOR(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITXOR(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITXOR(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(-90 AS TINYINT), CAST(166 AS SMALLINT), 166, CAST(166 AS BIGINT), X'a6', CAST(X'a6' AS BINARY(1)) + +EXPLAIN SELECT + BITXOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITXOR(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITXOR(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITXOR(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'a601', X'a601', CAST(X'a601' AS BINARY(2)), CAST(X'a601' AS BINARY(2)) + +EXPLAIN SELECT + BITXOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITXOR(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'a6' AS BINARY(1)), CAST(X'a6' AS BINARY(1)) + +EXPLAIN SELECT + BITXNOR(CAST((0xC5 - 0x100) AS TINYINT), CAST(0x63 AS TINYINT)), + BITXNOR(CAST(0xC5 AS SMALLINT), CAST(0x63 AS SMALLINT)), + BITXNOR(CAST(0xC5 AS INTEGER), CAST(0x63 AS INTEGER)), + BITXNOR(CAST(0xC5 AS BIGINT), CAST(0x63 AS BIGINT)), + BITXNOR(CAST(X'C5' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITXNOR(CAST(X'C5' AS BINARY), CAST(X'63' AS BINARY)); +>> SELECT CAST(89 AS TINYINT), CAST(-167 AS SMALLINT), -167, CAST(-167 AS BIGINT), X'59', CAST(X'59' AS BINARY(1)) + +EXPLAIN SELECT + BITXNOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS VARBINARY)), + BITXNOR(CAST(X'63' AS VARBINARY), CAST(X'C501' AS VARBINARY)), + BITXNOR(CAST(X'C501' AS BINARY(2)), CAST(X'63' AS BINARY)), + BITXNOR(CAST(X'63' AS BINARY), CAST(X'C501' AS BINARY(2))); +>> SELECT X'59fe', X'59fe', CAST(X'59fe' AS BINARY(2)), CAST(X'59fe' AS BINARY(2)) + +EXPLAIN SELECT + BITXNOR(CAST(X'C501' AS VARBINARY), CAST(X'63' AS BINARY)), + BITXNOR(CAST(X'63' AS BINARY), CAST(X'C501' AS VARBINARY)); +>> SELECT CAST(X'59' AS BINARY(1)), CAST(X'59' AS BINARY(1)) + +SELECT BITXOR('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITXOR(1, X'AA'); +> exception INVALID_VALUE_2 + +SELECT BITXNOR('AA', 'BB'); +> exception INVALID_VALUE_2 + +SELECT BITXNOR(1, X'AA'); +> exception INVALID_VALUE_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/ceil.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/ceil.sql new file mode 100644 index 0000000000..7bcb48fa03 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/ceil.sql @@ -0,0 +1,46 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select ceil(null) vn, ceil(1) v1, ceiling(1.1) v2, ceil(-1.1) v3, ceiling(1.9) v4, ceiling(-1.9) v5; +> VN V1 V2 V3 V4 V5 +> ---- -- -- -- -- -- +> null 1 2 -1 2 -1 +> rows: 1 + +SELECT CEIL(1.5), CEIL(-1.5), CEIL(1.5) IS OF (NUMERIC); +> 2 -1 TRUE +> - -- ---- +> 2 -1 TRUE +> rows: 1 + +SELECT CEIL(1.5::DOUBLE), CEIL(-1.5::DOUBLE), CEIL(1.5::DOUBLE) IS OF (DOUBLE); +> 2.0 -1.0 TRUE +> --- ---- ---- +> 2.0 -1.0 TRUE +> rows: 1 + +SELECT CEIL(1.5::REAL), CEIL(-1.5::REAL), CEIL(1.5::REAL) IS OF (REAL); +> 2.0 -1.0 TRUE +> --- ---- ---- +> 2.0 -1.0 TRUE +> rows: 1 + +SELECT CEIL('a'); +> exception INVALID_VALUE_2 + +CREATE TABLE S(N NUMERIC(5, 2)); +> ok + +CREATE TABLE T AS SELECT CEIL(N) C FROM S; +> ok + +SELECT DATA_TYPE, NUMERIC_PRECISION, NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'T'; +> DATA_TYPE NUMERIC_PRECISION NUMERIC_SCALE +> --------- ----------------- ------------- +> NUMERIC 4 0 +> rows: 1 + +DROP TABLE S, T; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/compress.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/compress.sql new file mode 100644 index 0000000000..7b0ef7bff1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/compress.sql @@ -0,0 +1,25 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL COMPRESS(X'000000000000000000000000'); +>> X'010c010000c000010000' + +CALL COMPRESS(X'000000000000000000000000', 'NO'); +>> X'000c000000000000000000000000' + +CALL COMPRESS(X'000000000000000000000000', 'LZF'); +>> X'010c010000c000010000' + +CALL COMPRESS(X'000000000000000000000000', 'DEFLATE'); +>> X'020c789c6360400000000c0001' + +CALL COMPRESS(X'000000000000000000000000', 'UNKNOWN'); +> exception UNSUPPORTED_COMPRESSION_ALGORITHM_1 + +CALL COMPRESS(NULL); +>> null + +CALL COMPRESS(X'00', NULL); +>> null diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/cos.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/cos.sql new file mode 100644 index 0000000000..fe649580c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/cos.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select cos(null) vn, cos(-1) r1; +> VN R1 +> ---- ------------------ +> null 0.5403023058681398 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/cosh.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/cosh.sql new file mode 100644 index 0000000000..0b7b614aab --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/cosh.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL COSH(1); +>> 1.543080634815244 + +CALL COSH(50); +>> 2.592352764293536E21 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/cot.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/cot.sql new file mode 100644 index 0000000000..74963e24b5 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/cot.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select cot(null) vn, cot(-1) r1; +> VN R1 +> ---- ------------------- +> null -0.6420926159343306 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/decrypt.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/decrypt.sql new file mode 100644 index 0000000000..b9eeb8fef9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/decrypt.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +call utf8tostring(decrypt('AES', X'00000000000000000000000000000000', X'dbd42d55d4b923c4b03eba0396fac98e')); +>> Hello World Test + +call utf8tostring(decrypt('AES', hash('sha256', stringtoutf8('Hello'), 1000), encrypt('AES', hash('sha256', stringtoutf8('Hello'), 1000), stringtoutf8('Hello World Test')))); +>> Hello World Test diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/degrees.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/degrees.sql new file mode 100644 index 0000000000..4b4a130769 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/degrees.sql @@ -0,0 +1,14 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Truncate least significant digits because implementations returns slightly +-- different results depending on Java version +select degrees(null) vn, truncate(degrees(1), 10) v1, truncate(degrees(1.1), 10) v2, + truncate(degrees(-1.1), 10) v3, truncate(degrees(1.9), 10) v4, + truncate(degrees(-1.9), 10) v5; +> VN V1 V2 V3 V4 V5 +> ---- ------------ ------------- -------------- -------------- --------------- +> null 57.295779513 63.0253574643 -63.0253574643 108.8619810748 -108.8619810748 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/encrypt.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/encrypt.sql new file mode 100644 index 0000000000..00dff40c67 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/encrypt.sql @@ -0,0 +1,13 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +call encrypt('AES', X'00000000000000000000000000000000', stringtoutf8('Hello World Test')); +>> X'dbd42d55d4b923c4b03eba0396fac98e' + +CALL ENCRYPT('XTEA', X'00', STRINGTOUTF8('Test')); +>> X'8bc9a4601b3062692a72a5941072425f' + +call encrypt('XTEA', X'000102030405060708090a0b0c0d0e0f', X'4142434445464748'); +>> X'dea0b0b40966b0669fbae58ab503765f' diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/exp.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/exp.sql new file mode 100644 index 0000000000..365c31828d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/exp.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select exp(null) vn, left(exp(1), 4) v1, left(exp(1.1), 4) v2, left(exp(-1.1), 4) v3, left(exp(1.9), 4) v4, left(exp(-1.9), 4) v5; +> VN V1 V2 V3 V4 V5 +> ---- ---- ---- ---- ---- ---- +> null 2.71 3.00 0.33 6.68 0.14 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/expand.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/expand.sql new file mode 100644 index 0000000000..2b8416c2a6 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/expand.sql @@ -0,0 +1,19 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL EXPAND(X'000c000000000000000000000000'); +>> X'000000000000000000000000' + +CALL EXPAND(X'010c010000c000010000'); +>> X'000000000000000000000000' + +CALL EXPAND(X'020c789c6360400000000c0001'); +>> X'000000000000000000000000' + +CALL EXPAND(X''); +> exception COMPRESSION_ERROR + +CALL EXPAND(NULL); +>> null diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/floor.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/floor.sql new file mode 100644 index 0000000000..c9e17ef349 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/floor.sql @@ -0,0 +1,43 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select floor(null) vn, floor(1) v1, floor(1.1) v2, floor(-1.1) v3, floor(1.9) v4, floor(-1.9) v5; +> VN V1 V2 V3 V4 V5 +> ---- -- -- -- -- -- +> null 1 1 -2 1 -2 +> rows: 1 + +SELECT FLOOR(1.5), FLOOR(-1.5), FLOOR(1.5) IS OF (NUMERIC); +> 1 -2 TRUE +> - -- ---- +> 1 -2 TRUE +> rows: 1 + +SELECT FLOOR(1.5::DOUBLE), FLOOR(-1.5::DOUBLE), FLOOR(1.5::DOUBLE) IS OF (DOUBLE); +> 1.0 -2.0 TRUE +> --- ---- ---- +> 1.0 -2.0 TRUE +> rows: 1 + +SELECT FLOOR(1.5::REAL), FLOOR(-1.5::REAL), FLOOR(1.5::REAL) IS OF (REAL); +> 1.0 -2.0 TRUE +> --- ---- ---- +> 1.0 -2.0 TRUE +> rows: 1 + +CREATE TABLE S(N NUMERIC(5, 2)); +> ok + +CREATE TABLE T AS SELECT FLOOR(N) F FROM S; +> ok + +SELECT DATA_TYPE, NUMERIC_PRECISION, NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'T'; +> DATA_TYPE NUMERIC_PRECISION NUMERIC_SCALE +> --------- ----------------- ------------- +> NUMERIC 4 0 +> rows: 1 + +DROP TABLE S, T; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/hash.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/hash.sql new file mode 100644 index 0000000000..466d38225e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/hash.sql @@ -0,0 +1,85 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +call hash('SHA256', 'Hello', 0); +> exception INVALID_VALUE_2 + +call hash('SHA256', 'Hello'); +>> X'185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969' + +call hash('SHA256', 'Hello', 1); +>> X'185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969' + +call hash('SHA256', stringtoutf8('Hello'), 1); +>> X'185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969' + +CALL HASH('SHA256', 'Password', 1000); +>> X'c644a176ce920bde361ac336089b06cc2f1514dfa95ba5aabfe33f9a22d577f0' + +CALL HASH('SHA256', STRINGTOUTF8('Password'), 1000); +>> X'c644a176ce920bde361ac336089b06cc2f1514dfa95ba5aabfe33f9a22d577f0' + +call hash('unknown', 'Hello', 1); +> exception INVALID_VALUE_2 + +CALL HASH('MD5', '****** Message digest test ******', 1); +>> X'ccd7ee53b52575b5b04fcadf1637fd30' + +CALL HASH('MD5', '****** Message digest test ******', 10); +>> X'b9e4b74ee3c41f646ee0ba42335efe20' + +CALL HASH('SHA-1', '****** Message digest test ******', 1); +>> X'b9f28134b8c9aef59e1257eca89e3e5101234694' + +CALL HASH('SHA-1', '****** Message digest test ******', 10); +>> X'e69a31beb996b59700aed3e6fbf9c29791efbc15' + +CALL HASH('SHA-224', '****** Message digest test ******', 1); +>> X'7bd9bf319961cfdb7fc9351debbcc8a80143d5d0909e8cbccd8b5f0f' + +CALL HASH('SHA-224', '****** Message digest test ******', 10); +>> X'6685a394158763e754332f0adec3ed43866dd0ba8f47624d0521fd1e' + +CALL HASH('SHA-256', '****** Message digest test ******', 1); +>> X'4e732bc9788b0958022403dbe42b4b79bfa270f05fbe914b4ecca074635f3f5c' + +CALL HASH('SHA-256', '****** Message digest test ******', 10); +>> X'93731025337904f6bc117ca5d3adc960ee2070c7a9666a5499af28546520da85' + +CALL HASH('SHA-384', '****** Message digest test ******', 1); +>> X'a37baa07c0cd5bc8dbb510b3fc3fa6f5ca539c847d8ee382d1d045b405a3d43dc4a898fcc31930cf7a80e2a79af82d4e' + +CALL HASH('SHA-384', '****** Message digest test ******', 10); +>> X'03cc3a769871ab13a64c387c44853efafe016180ab6ea70565924ccabe62c8884b2f2e1a53c1a79db184c112c9082bc2' + +CALL HASH('SHA-512', '****** Message digest test ******', 1); +>> X'88eb2488557eaf7e4da394b6f4ba08d4c781b9f2b9c9d150195ac7f7fbee7819923476b5139abc98f252b07649ade2471be46e2625b8003d0af5a8a50ca2915f' + +CALL HASH('SHA-512', '****** Message digest test ******', 10); +>> X'ab3bb7d9447f87a07379e9219c79da2e05122ff87bf25a5e553a7e44af7ac724ed91fb1fe5730d4bb584c367fc2232680f5c45b3863c6550fcf27b4473d05695' + +CALL HASH('SHA3-224', '****** Message digest test ******', 1); +>> X'cb91fec022d97ed63622d382e36e336b65a806888416a549fb4db390' + +CALL HASH('SHA3-224', '****** Message digest test ******', 10); +>> X'0d4dd581ed9b188341ec413988cb7c6bf15d178b151b543c91031ae6' + +CALL HASH('SHA3-256', '****** Message digest test ******', 1); +>> X'91db71f65f3c5b19370e0d9fd947da52695b28c9b440a1324d11e8076643c21f' + +CALL HASH('SHA3-256', '****** Message digest test ******', 10); +>> X'ed62484d8ac54550292241698dd5480de061fc23ab12e3e941a96ec7d3afd70f' + +CALL HASH('SHA3-384', '****** Message digest test ******', 1); +>> X'c2d5e516ea10a82a3d3a8c5fe8838ca77d402490f33ef813be9af168fd2cdf8f6daa7e9cf79565f3987f897d4087ce26' + +CALL HASH('SHA3-384', '****** Message digest test ******', 10); +>> X'9f5ac0eae232746826ea59196b455267e3aaa492047d5a2616c4a8aa325216f706dc7203fcbe71ee7e3357e0f3d93ee3' + +CALL HASH('SHA3-512', '****** Message digest test ******', 1); +>> X'08811cf7409957b59bb5ba090edbef9a35c3b7a4db5d5760f15f2b14453f9cacba30b9744d4248c742aa47f3d9943cf99e7d78d1700d4ccf5bc88b394bc00603' + +CALL HASH('SHA3-512', '****** Message digest test ******', 10); +>> X'37f2a9dbc6cd7a5122cc84383843566dd7195ed8d868b1c10aca2b706667c7bb0b4f00eab81d9e87b6f355e3afe0bccd57ba04aa121d0ef0c0bdea2ff8f95513' diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/length.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/length.sql new file mode 100644 index 0000000000..67b65727dc --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/length.sql @@ -0,0 +1,34 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select bit_length(null) en, bit_length('') e0, bit_length('ab') e32; +> EN E0 E32 +> ---- -- --- +> null 0 16 +> rows: 1 + +select length(null) en, length('') e0, length('ab') e2; +> EN E0 E2 +> ---- -- -- +> null 0 2 +> rows: 1 + +select char_length(null) en, char_length('') e0, char_length('ab') e2; +> EN E0 E2 +> ---- -- -- +> null 0 2 +> rows: 1 + +select character_length(null) en, character_length('') e0, character_length('ab') e2; +> EN E0 E2 +> ---- -- -- +> null 0 2 +> rows: 1 + +select octet_length(null) en, octet_length('') e0, octet_length('ab') e4; +> EN E0 E4 +> ---- -- -- +> null 0 2 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/log.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/log.sql new file mode 100644 index 0000000000..baf60a6c76 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/log.sql @@ -0,0 +1,100 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT LN(NULL), LOG(NULL, NULL), LOG(NULL, 2); +> CAST(NULL AS DOUBLE PRECISION) CAST(NULL AS DOUBLE PRECISION) CAST(NULL AS DOUBLE PRECISION) +> ------------------------------ ------------------------------ ------------------------------ +> null null null +> rows: 1 + +SELECT LOG(2, NULL), LOG10(NULL), LOG(NULL); +> CAST(NULL AS DOUBLE PRECISION) CAST(NULL AS DOUBLE PRECISION) CAST(NULL AS DOUBLE PRECISION) +> ------------------------------ ------------------------------ ------------------------------ +> null null null +> rows: 1 + +SELECT LN(0); +> exception INVALID_VALUE_2 + +SELECT LN(-1); +> exception INVALID_VALUE_2 + +SELECT LOG(0, 2); +> exception INVALID_VALUE_2 + +SELECT LOG(-1, 2); +> exception INVALID_VALUE_2 + +SELECT LOG(1, 2); +> exception INVALID_VALUE_2 + +SELECT LOG(2, 0); +> exception INVALID_VALUE_2 + +SELECT LOG(2, -1); +> exception INVALID_VALUE_2 + +SELECT LOG(0); +> exception INVALID_VALUE_2 + +SELECT LOG(-1); +> exception INVALID_VALUE_2 + +SELECT LOG10(0); +> exception INVALID_VALUE_2 + +SELECT LOG10(-1); +> exception INVALID_VALUE_2 + +SELECT LN(0.5) VH, LN(1) V1, LN(2) V2, LN(3) V3, LN(10) V10; +> VH V1 V2 V3 V10 +> ------------------- --- ------------------ ------------------ ----------------- +> -0.6931471805599453 0.0 0.6931471805599453 1.0986122886681098 2.302585092994046 +> rows: 1 + +SELECT LOG(2, 0.5) VH, LOG(2, 1) V1, LOG(2, 2) V2, LOG(2, 3) V3, LOG(2, 10) V10, LOG(2, 64) V64; +> VH V1 V2 V3 V10 V64 +> ---- --- --- ------------------ ------------------ --- +> -1.0 0.0 1.0 1.5849625007211563 3.3219280948873626 6.0 +> rows: 1 + +SELECT LOG(2.7182818284590452, 10); +>> 2.302585092994046 + +SELECT LOG(10, 3); +>> 0.47712125471966244 + +SELECT LOG(0.5) VH, LOG(1) V1, LOG(2) V2, LOG(3) V3, LOG(10) V10; +> VH V1 V2 V3 V10 +> ------------------- --- ------------------ ------------------ ----------------- +> -0.6931471805599453 0.0 0.6931471805599453 1.0986122886681098 2.302585092994046 +> rows: 1 + +SELECT LOG10(0.5) VH, LOG10(1) V1, LOG10(2) V2, LOG10(3) V3, LOG10(10) V10, LOG10(100) V100; +> VH V1 V2 V3 V10 V100 +> ------------------- --- ------------------ ------------------- --- ---- +> -0.3010299956639812 0.0 0.3010299956639812 0.47712125471966244 1.0 2.0 +> rows: 1 + +SET MODE PostgreSQL; +> ok + +SELECT LOG(0.5) VH, LOG(1) V1, LOG(2) V2, LOG(3) V3, LOG(10) V10, LOG(100) V100; +> VH V1 V2 V3 V10 V100 +> ------------------- --- ------------------ ------------------- --- ---- +> -0.3010299956639812 0.0 0.3010299956639812 0.47712125471966244 1.0 2.0 +> rows: 1 + +SET MODE MSSQLServer; +> ok + +SELECT LOG(0.5, 2) VH, LOG(1, 2) V1, LOG(2, 2) V2, LOG(3, 2) V3, LOG(10, 2) V10, LOG(64, 2) V64; +> VH V1 V2 V3 V10 V64 +> ---- --- --- ------------------ ------------------ --- +> -1.0 0.0 1.0 1.5849625007211563 3.3219280948873626 6.0 +> rows: 1 + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/lshift.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/lshift.sql new file mode 100644 index 0000000000..7bb7e44e06 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/lshift.sql @@ -0,0 +1,109 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select lshift(null, 1) vn, lshift(1, null) vn1, lshift(null, null) vn2, lshift(3, 6) v1, lshift(3,0) v2; +> VN VN1 VN2 V1 V2 +> ---- ---- ---- --- -- +> null null null 192 3 +> rows: 1 + +SELECT I, + LSHIFT(CAST(-128 AS TINYINT), I), LSHIFT(CAST(1 AS TINYINT), I), + ULSHIFT(CAST(-128 AS TINYINT), I), ULSHIFT(CAST(1 AS TINYINT), I) + FROM + (VALUES -111, -8, -7, -1, 0, 1, 7, 8, 111) T(I) ORDER BY I; +> I LSHIFT(-128, I) LSHIFT(1, I) ULSHIFT(-128, I) ULSHIFT(1, I) +> ---- --------------- ------------ ---------------- ------------- +> -111 -1 0 0 0 +> -8 -1 0 0 0 +> -7 -1 0 1 0 +> -1 -64 0 64 0 +> 0 -128 1 -128 1 +> 1 0 2 0 2 +> 7 0 -128 0 -128 +> 8 0 0 0 0 +> 111 0 0 0 0 +> rows (ordered): 9 + +SELECT I, + LSHIFT(CAST(-32768 AS SMALLINT), I), LSHIFT(CAST(1 AS SMALLINT), I), + ULSHIFT(CAST(-32768 AS SMALLINT), I), ULSHIFT(CAST(1 AS SMALLINT), I) + FROM + (VALUES -111, -16, -15, -1, 0, 1, 15, 16, 111) T(I) ORDER BY I; +> I LSHIFT(-32768, I) LSHIFT(1, I) ULSHIFT(-32768, I) ULSHIFT(1, I) +> ---- ----------------- ------------ ------------------ ------------- +> -111 -1 0 0 0 +> -16 -1 0 0 0 +> -15 -1 0 1 0 +> -1 -16384 0 16384 0 +> 0 -32768 1 -32768 1 +> 1 0 2 0 2 +> 15 0 -32768 0 -32768 +> 16 0 0 0 0 +> 111 0 0 0 0 +> rows (ordered): 9 + +SELECT I, + LSHIFT(CAST(-2147483648 AS INTEGER), I), LSHIFT(CAST(1 AS INTEGER), I), + ULSHIFT(CAST(-2147483648 AS INTEGER), I), ULSHIFT(CAST(1 AS INTEGER), I) + FROM + (VALUES -111, -32, -31, -1, 0, 1, 31, 32, 111) T(I) ORDER BY I; +> I LSHIFT(-2147483648, I) LSHIFT(1, I) ULSHIFT(-2147483648, I) ULSHIFT(1, I) +> ---- ---------------------- ------------ ----------------------- ------------- +> -111 -1 0 0 0 +> -32 -1 0 0 0 +> -31 -1 0 1 0 +> -1 -1073741824 0 1073741824 0 +> 0 -2147483648 1 -2147483648 1 +> 1 0 2 0 2 +> 31 0 -2147483648 0 -2147483648 +> 32 0 0 0 0 +> 111 0 0 0 0 +> rows (ordered): 9 + +SELECT I, + LSHIFT(CAST(-9223372036854775808 AS BIGINT), I), LSHIFT(CAST(1 AS BIGINT), I), + ULSHIFT(CAST(-9223372036854775808 AS BIGINT), I), ULSHIFT(CAST(1 AS BIGINT), I) + FROM + (VALUES -111, -64, -63, -1, 0, 1, 63, 64, 111) T(I) ORDER BY I; +> I LSHIFT(-9223372036854775808, I) LSHIFT(1, I) ULSHIFT(-9223372036854775808, I) ULSHIFT(1, I) +> ---- ------------------------------- -------------------- -------------------------------- -------------------- +> -111 -1 0 0 0 +> -64 -1 0 0 0 +> -63 -1 0 1 0 +> -1 -4611686018427387904 0 4611686018427387904 0 +> 0 -9223372036854775808 1 -9223372036854775808 1 +> 1 0 2 0 2 +> 63 0 -9223372036854775808 0 -9223372036854775808 +> 64 0 0 0 0 +> 111 0 0 0 0 +> rows (ordered): 9 + +SELECT LSHIFT(X'', 1); +>> X'' + +SELECT LSHIFT(CAST(X'02' AS BINARY), 1); +>> X'04' + +SELECT I, LSHIFT(X'80ABCD09', I) FROM + (VALUES -33, -32, -31, -17, -16, -15, -1, 0, 1, 15, 16, 17, 31, 32, 33) T(I) ORDER BY I; +> I LSHIFT(X'80abcd09', I) +> --- ---------------------- +> -33 X'00000000' +> -32 X'00000000' +> -31 X'00000001' +> -17 X'00004055' +> -16 X'000080ab' +> -15 X'00010157' +> -1 X'4055e684' +> 0 X'80abcd09' +> 1 X'01579a12' +> 15 X'e6848000' +> 16 X'cd090000' +> 17 X'9a120000' +> 31 X'80000000' +> 32 X'00000000' +> 33 X'00000000' +> rows (ordered): 15 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/mod.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/mod.sql new file mode 100644 index 0000000000..5d0b3e7312 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/mod.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select mod(null, 1) vn, mod(1, null) vn1, mod(null, null) vn2, mod(10, 2) e1; +> VN VN1 VN2 E1 +> ---- ---- ---- -- +> null null null 0 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/ora-hash.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/ora-hash.sql new file mode 100644 index 0000000000..6df772c987 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/ora-hash.sql @@ -0,0 +1,64 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT ORA_HASH(NULL); +>> null + +SELECT ORA_HASH(NULL, 0); +>> null + +SELECT ORA_HASH(NULL, 0, 0); +>> null + +SELECT ORA_HASH(1); +>> 3509391659 + +SELECT ORA_HASH(1, -1); +> exception INVALID_VALUE_2 + +SELECT ORA_HASH(1, 0); +>> 0 + +SELECT ORA_HASH(1, 4294967295); +>> 3509391659 + +SELECT ORA_HASH(1, 4294967296); +> exception INVALID_VALUE_2 + +SELECT ORA_HASH(1, 4294967295, -1); +> exception INVALID_VALUE_2 + +SELECT ORA_HASH(1, 4294967295, 0); +>> 3509391659 + +SELECT ORA_HASH(1, 4294967295, 10); +>> 2441322222 + +SELECT ORA_HASH(1, 4294967295, 4294967295); +>> 3501171530 + +SELECT ORA_HASH(1, 4294967295, 4294967296); +> exception INVALID_VALUE_2 + +CREATE TABLE TEST(I BINARY(3), B BLOB, S VARCHAR, C CLOB); +> ok + +INSERT INTO TEST VALUES (X'010203', X'010203', 'abc', 'abc'); +> update count: 1 + +SELECT ORA_HASH(I) FROM TEST; +>> 2562861693 + +SELECT ORA_HASH(B) FROM TEST; +>> 2562861693 + +SELECT ORA_HASH(S) FROM TEST; +>> 1191608682 + +SELECT ORA_HASH(C) FROM TEST; +>> 1191608682 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/pi.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/pi.sql new file mode 100644 index 0000000000..0c283cbb3b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/pi.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select pi(); +>> 3.141592653589793 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/power.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/power.sql new file mode 100644 index 0000000000..3dd455f940 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/power.sql @@ -0,0 +1,13 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select power(null, null) en, power(2, 3) e8, power(16, 0.5) e4; +> EN E8 E4 +> ---- --- --- +> null 8.0 4.0 +> rows: 1 + +SELECT POWER(10, 2) IS OF (DOUBLE); +>> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/radians.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/radians.sql new file mode 100644 index 0000000000..f22f4933bd --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/radians.sql @@ -0,0 +1,17 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Truncate least significant digits because implementations returns slightly +-- different results depending on Java version +select radians(null) vn, truncate(radians(1), 10) v1, truncate(radians(1.1), 10) v2, + truncate(radians(-1.1), 10) v3, truncate(radians(1.9), 10) v4, + truncate(radians(-1.9), 10) v5; +> VN V1 V2 V3 V4 V5 +> ---- ------------ ------------ ------------- ------------ ------------- +> null 0.0174532925 0.0191986217 -0.0191986217 0.0331612557 -0.0331612557 +> rows: 1 + +SELECT RADIANS(0) IS OF (DOUBLE); +>> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/rand.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/rand.sql new file mode 100644 index 0000000000..1d6c29b6d6 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/rand.sql @@ -0,0 +1,15 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +@reconnect off + +select rand(1) e, random() f; +> E F +> ------------------ ------------------- +> 0.7308781907032909 0.41008081149220166 +> rows: 1 + +select rand(); +>> 0.20771484130971707 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/random-uuid.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/random-uuid.sql new file mode 100644 index 0000000000..33a8bbe6aa --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/random-uuid.sql @@ -0,0 +1,34 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT CHAR_LENGTH(CAST(RANDOM_UUID() AS VARCHAR)); +>> 36 + +SELECT RANDOM_UUID() = RANDOM_UUID(); +>> FALSE + +SELECT NEWID(); +> exception FUNCTION_NOT_FOUND_1 + +SELECT SYS_GUID(); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +SELECT CHAR_LENGTH(CAST(NEWID() AS VARCHAR)); +>> 36 + +SET MODE Oracle; +> ok + +SELECT SYS_GUID() IS OF (RAW); +>> TRUE + +SELECT OCTET_LENGTH(SYS_GUID()); +>> 16 + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/rotate.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/rotate.sql new file mode 100644 index 0000000000..5a205870e5 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/rotate.sql @@ -0,0 +1,103 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT I, ROTATELEFT(CAST(0x7d AS TINYINT), I) L, ROTATERIGHT(CAST(0x7d AS TINYINT), I) R + FROM (VALUES -8, -7, -2, -1, 0, 1, 2, 7, 8) T(I) ORDER BY I; +> I L R +> -- --- --- +> -8 125 125 +> -7 -6 -66 +> -2 95 -11 +> -1 -66 -6 +> 0 125 125 +> 1 -6 -66 +> 2 -11 95 +> 7 -66 -6 +> 8 125 125 +> rows (ordered): 9 + +SELECT I, ROTATELEFT(CAST(0x6d3f AS SMALLINT), I) L, ROTATERIGHT(CAST(0x6d3f AS SMALLINT), I) R + FROM (VALUES -16, -15, -2, -1, 0, 1, 2, 15, 16) T(I) ORDER BY I; +> I L R +> --- ------ ------ +> -16 27967 27967 +> -15 -9602 -18785 +> -2 -9393 -19203 +> -1 -18785 -9602 +> 0 27967 27967 +> 1 -9602 -18785 +> 2 -19203 -9393 +> 15 -18785 -9602 +> 16 27967 27967 +> rows (ordered): 9 + +SELECT I, ROTATELEFT(CAST(0x7d12e43c AS INTEGER), I) L, ROTATERIGHT(CAST(0x7d12e43c AS INTEGER), I) R + FROM (VALUES -32, -31, -2, -1, 0, 1, 2, 31, 32) T(I) ORDER BY I; +> I L R +> --- ---------- ---------- +> -32 2098390076 2098390076 +> -31 -98187144 1049195038 +> -2 524597519 -196374287 +> -1 1049195038 -98187144 +> 0 2098390076 2098390076 +> 1 -98187144 1049195038 +> 2 -196374287 524597519 +> 31 1049195038 -98187144 +> 32 2098390076 2098390076 +> rows (ordered): 9 + +SELECT I, ROTATELEFT(CAST(0x7302abe53d12e45f AS BIGINT), I) L, ROTATERIGHT(CAST(0x7302abe53d12e45f AS BIGINT), I) R + FROM (VALUES -64, -63, -2, -1, 0, 1, 2, 63, 64) T(I) ORDER BY I; +> I L R +> --- -------------------- -------------------- +> -64 8287375265375642719 8287375265375642719 +> -63 -1871993542958266178 -5079684404166954449 +> -2 -2539842202083477225 -3743987085916532355 +> -1 -5079684404166954449 -1871993542958266178 +> 0 8287375265375642719 8287375265375642719 +> 1 -1871993542958266178 -5079684404166954449 +> 2 -3743987085916532355 -2539842202083477225 +> 63 -5079684404166954449 -1871993542958266178 +> 64 8287375265375642719 8287375265375642719 +> rows (ordered): 9 + +SELECT I, ROTATELEFT(X'ABCD', I) L, ROTATERIGHT(X'ABCD', I) R + FROM (VALUES -16, -15, -8, -1, 0, 1, 8, 15, 16) T(I) ORDER BY I; +> I L R +> --- ------- ------- +> -16 X'abcd' X'abcd' +> -15 X'579b' X'd5e6' +> -8 X'cdab' X'cdab' +> -1 X'd5e6' X'579b' +> 0 X'abcd' X'abcd' +> 1 X'579b' X'd5e6' +> 8 X'cdab' X'cdab' +> 15 X'd5e6' X'579b' +> 16 X'abcd' X'abcd' +> rows (ordered): 9 + +SELECT I, ROTATELEFT(CAST(X'ABCD' AS BINARY(2)), I) L, ROTATERIGHT(CAST(X'ABCD' AS BINARY(2)), I) R + FROM (VALUES -16, -15, -8, -1, 0, 1, 8, 15, 16) T(I) ORDER BY I; +> I L R +> --- ------- ------- +> -16 X'abcd' X'abcd' +> -15 X'579b' X'd5e6' +> -8 X'cdab' X'cdab' +> -1 X'd5e6' X'579b' +> 0 X'abcd' X'abcd' +> 1 X'579b' X'd5e6' +> 8 X'cdab' X'cdab' +> 15 X'd5e6' X'579b' +> 16 X'abcd' X'abcd' +> rows (ordered): 9 + +SELECT ROTATELEFT(X'8000', 1); +>> X'0001' + +SELECT ROTATERIGHT(X'0001', 1); +>> X'8000' + +SELECT ROTATELEFT(X'', 1); +>> X'' diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/round.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/round.sql new file mode 100644 index 0000000000..e925aa307e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/round.sql @@ -0,0 +1,111 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT ROUND(-1.2), ROUND(-1.5), ROUND(-1.6), ROUND(2), ROUND(1.5), ROUND(1.8), ROUND(1.1); +> -1 -2 -2 2 2 2 1 +> -- -- -- - - - - +> -1 -2 -2 2 2 2 1 +> rows: 1 + +select round(null, null) en, round(10.49, 0) e10, round(10.05, 1) e101; +> EN E10 E101 +> ---- --- ---- +> null 10 10.1 +> rows: 1 + +select round(null) en, round(0.6, null) en2, round(1.05) e1, round(-1.51) em2; +> EN EN2 E1 EM2 +> ---- ---- -- --- +> null null 1 -2 +> rows: 1 + +CALL ROUND(998.5::DOUBLE); +>> 999.0 + +CALL ROUND(998.5::REAL); +>> 999.0 + +SELECT + ROUND(4503599627370495.0::DOUBLE), ROUND(4503599627370495.5::DOUBLE), + ROUND(4503599627370496.0::DOUBLE), ROUND(4503599627370497.0::DOUBLE); +> 4.503599627370495E15 4.503599627370496E15 4.503599627370496E15 4.503599627370497E15 +> -------------------- -------------------- -------------------- -------------------- +> 4.503599627370495E15 4.503599627370496E15 4.503599627370496E15 4.503599627370497E15 +> rows: 1 + +SELECT + ROUND(450359962737049.50::DOUBLE, 1), ROUND(450359962737049.55::DOUBLE, 1), + ROUND(450359962737049.60::DOUBLE, 1), ROUND(450359962737049.70::DOUBLE, 1); +> 4.503599627370495E14 4.503599627370496E14 4.503599627370496E14 4.503599627370497E14 +> -------------------- -------------------- -------------------- -------------------- +> 4.503599627370495E14 4.503599627370496E14 4.503599627370496E14 4.503599627370497E14 +> rows: 1 + +CALL ROUND(0.285, 2); +>> 0.29 + +CALL ROUND(0.285::DOUBLE, 2); +>> 0.29 + +CALL ROUND(0.285::REAL, 2); +>> 0.29 + +CALL ROUND(1.285, 2); +>> 1.29 + +CALL ROUND(1.285::DOUBLE, 2); +>> 1.29 + +CALL ROUND(1.285::REAL, 2); +>> 1.29 + +CALL ROUND(1, 1) IS OF (INTEGER); +>> TRUE + +CALL ROUND(1::DOUBLE, 1) IS OF (DOUBLE); +>> TRUE + +CALL ROUND(1::REAL, 1) IS OF (REAL); +>> TRUE + +SELECT ROUND(1, 10000000); +>> 1 + +CREATE TABLE T1(N NUMERIC(10, 2), D DECFLOAT(10), I INTEGER) AS VALUES (99999999.99, 99999999.99, 10); +> ok + +SELECT ROUND(N, -1) NN, ROUND(N) N0, ROUND(N, 1) N1, ROUND(N, 2) N2, ROUND(N, 3) N3, ROUND(N, 10000000) NL, + ROUND(D) D0, ROUND(D, 2) D2, ROUND(D, 3) D3, + ROUND(I) I0, ROUND(I, 1) I1, ROUND(I, I) II FROM T1; +> NN N0 N1 N2 N3 NL D0 D2 D3 I0 I1 II +> --------- --------- ----------- ----------- ----------- ----------- ---- ----------- ----------- -- -- -- +> 100000000 100000000 100000000.0 99999999.99 99999999.99 99999999.99 1E+8 99999999.99 99999999.99 10 10 10 +> rows: 1 + +CREATE TABLE T2 AS SELECT ROUND(N, -1) NN, ROUND(N) N0, ROUND(N, 1) N1, ROUND(N, 2) N2, ROUND(N, 3) N3, ROUND(N, 10000000) NL, + ROUND(D) D0, ROUND(D, 2) D2, ROUND(D, 3) D3, + ROUND(I) I0, ROUND(I, 1) I1, ROUND(I, I) II FROM T1; +> ok + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'T2' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_SCALE +> ----------- --------- ----------------- ------------- +> NN NUMERIC 9 0 +> N0 NUMERIC 9 0 +> N1 NUMERIC 10 1 +> N2 NUMERIC 10 2 +> N3 NUMERIC 10 2 +> NL NUMERIC 10 2 +> D0 DECFLOAT 10 null +> D2 DECFLOAT 10 null +> D3 DECFLOAT 10 null +> I0 INTEGER 32 0 +> I1 INTEGER 32 0 +> II INTEGER 32 0 +> rows (ordered): 12 + +DROP TABLE T1; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/roundmagic.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/roundmagic.sql new file mode 100644 index 0000000000..5e42f1852b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/roundmagic.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select roundmagic(null) en, roundmagic(cast(3.11 as double) - 3.1) e001, roundmagic(3.11-3.1-0.01) e000, roundmagic(2000000000000) e20x; +> EN E001 E000 E20X +> ---- ---- ---- ------ +> null 0.01 0.0 2.0E12 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/rshift.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/rshift.sql new file mode 100644 index 0000000000..47acc0169b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/rshift.sql @@ -0,0 +1,115 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select rshift(null, 1) vn, rshift(1, null) vn1, rshift(null, null) vn2, rshift(3, 6) v1, rshift(1024,3) v2; +> VN VN1 VN2 V1 V2 +> ---- ---- ---- -- --- +> null null null 0 128 +> rows: 1 + +SELECT I, + RSHIFT(CAST(-128 AS TINYINT), I), RSHIFT(CAST(1 AS TINYINT), I), + URSHIFT(CAST(-128 AS TINYINT), I), URSHIFT(CAST(1 AS TINYINT), I) + FROM + (VALUES -111, -8, -7, -1, 0, 1, 7, 8, 111) T(I) ORDER BY I; +> I RSHIFT(-128, I) RSHIFT(1, I) URSHIFT(-128, I) URSHIFT(1, I) +> ---- --------------- ------------ ---------------- ------------- +> -111 0 0 0 0 +> -8 0 0 0 0 +> -7 0 -128 0 -128 +> -1 0 2 0 2 +> 0 -128 1 -128 1 +> 1 -64 0 64 0 +> 7 -1 0 1 0 +> 8 -1 0 0 0 +> 111 -1 0 0 0 +> rows (ordered): 9 + +SELECT I, + RSHIFT(CAST(-32768 AS SMALLINT), I), RSHIFT(CAST(1 AS SMALLINT), I), + URSHIFT(CAST(-32768 AS SMALLINT), I), URSHIFT(CAST(1 AS SMALLINT), I) + FROM + (VALUES -111, -16, -15, -1, 0, 1, 15, 16, 111) T(I) ORDER BY I; +> I RSHIFT(-32768, I) RSHIFT(1, I) URSHIFT(-32768, I) URSHIFT(1, I) +> ---- ----------------- ------------ ------------------ ------------- +> -111 0 0 0 0 +> -16 0 0 0 0 +> -15 0 -32768 0 -32768 +> -1 0 2 0 2 +> 0 -32768 1 -32768 1 +> 1 -16384 0 16384 0 +> 15 -1 0 1 0 +> 16 -1 0 0 0 +> 111 -1 0 0 0 +> rows (ordered): 9 + +SELECT I, + RSHIFT(CAST(-2147483648 AS INTEGER), I), RSHIFT(CAST(1 AS INTEGER), I), + URSHIFT(CAST(-2147483648 AS INTEGER), I), URSHIFT(CAST(1 AS INTEGER), I) + FROM + (VALUES -111, -32, -31, -1, 0, 1, 31, 32, 111) T(I) ORDER BY I; +> I RSHIFT(-2147483648, I) RSHIFT(1, I) URSHIFT(-2147483648, I) URSHIFT(1, I) +> ---- ---------------------- ------------ ----------------------- ------------- +> -111 0 0 0 0 +> -32 0 0 0 0 +> -31 0 -2147483648 0 -2147483648 +> -1 0 2 0 2 +> 0 -2147483648 1 -2147483648 1 +> 1 -1073741824 0 1073741824 0 +> 31 -1 0 1 0 +> 32 -1 0 0 0 +> 111 -1 0 0 0 +> rows (ordered): 9 + +SELECT I, + RSHIFT(CAST(-9223372036854775808 AS BIGINT), I), RSHIFT(CAST(1 AS BIGINT), I), + URSHIFT(CAST(-9223372036854775808 AS BIGINT), I), URSHIFT(CAST(1 AS BIGINT), I) + FROM + (VALUES -111, -64, -63, -1, 0, 1, 63, 64, 111) T(I) ORDER BY I; +> I RSHIFT(-9223372036854775808, I) RSHIFT(1, I) URSHIFT(-9223372036854775808, I) URSHIFT(1, I) +> ---- ------------------------------- -------------------- -------------------------------- -------------------- +> -111 0 0 0 0 +> -64 0 0 0 0 +> -63 0 -9223372036854775808 0 -9223372036854775808 +> -1 0 2 0 2 +> 0 -9223372036854775808 1 -9223372036854775808 1 +> 1 -4611686018427387904 0 4611686018427387904 0 +> 63 -1 0 1 0 +> 64 -1 0 0 0 +> 111 -1 0 0 0 +> rows (ordered): 9 + +SELECT RSHIFT(X'', 1); +>> X'' + +SELECT RSHIFT(CAST(X'02' AS BINARY), 1); +>> X'01' + +SELECT I, RSHIFT(X'80ABCD09', I) FROM + (VALUES -33, -32, -31, -17, -16, -15, -1, 0, 1, 15, 16, 17, 31, 32, 33) T(I) ORDER BY I; +> I RSHIFT(X'80abcd09', I) +> --- ---------------------- +> -33 X'00000000' +> -32 X'00000000' +> -31 X'80000000' +> -17 X'9a120000' +> -16 X'cd090000' +> -15 X'e6848000' +> -1 X'01579a12' +> 0 X'80abcd09' +> 1 X'4055e684' +> 15 X'00010157' +> 16 X'000080ab' +> 17 X'00004055' +> 31 X'00000001' +> 32 X'00000000' +> 33 X'00000000' +> rows (ordered): 15 + +SELECT RSHIFT(-1, -9223372036854775808); +>> 0 + +SELECT URSHIFT(-1, -9223372036854775808); +>> 0 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/secure-rand.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/secure-rand.sql new file mode 100644 index 0000000000..a083f92c9e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/secure-rand.sql @@ -0,0 +1,13 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT SECURE_RAND(NULL); +>> null + +SELECT OCTET_LENGTH(SECURE_RAND(0)); +>> 1 + +SELECT OCTET_LENGTH(SECURE_RAND(2)); +>> 2 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/sign.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/sign.sql new file mode 100644 index 0000000000..2138f8f2be --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/sign.sql @@ -0,0 +1,16 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select sign(null) en, sign(10) e1, sign(0) e0, sign(-0.1) em1; +> EN E1 E0 EM1 +> ---- -- -- --- +> null 1 0 -1 +> rows: 1 + +SELECT SIGN(INTERVAL '-0-1' YEAR TO MONTH) A, SIGN(INTERVAL '0' DAY) B, SIGN(INTERVAL '1' HOUR) C; +> A B C +> -- - - +> -1 0 1 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/sin.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/sin.sql new file mode 100644 index 0000000000..f2f1146407 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/sin.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select sin(null) vn, sin(-1) r1; +> VN R1 +> ---- ------------------- +> null -0.8414709848078965 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/sinh.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/sinh.sql new file mode 100644 index 0000000000..2186ea8d20 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/sinh.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL SINH(1); +>> 1.1752011936438014 + +CALL SINH(50); +>> 2.592352764293536E21 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/sqrt.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/sqrt.sql new file mode 100644 index 0000000000..4a96f3a0a5 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/sqrt.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select sqrt(null) vn, sqrt(0) e0, sqrt(1) e1, sqrt(4) e2, sqrt(100) e10, sqrt(0.25) e05; +> VN E0 E1 E2 E10 E05 +> ---- --- --- --- ---- --- +> null 0.0 1.0 2.0 10.0 0.5 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/tan.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/tan.sql new file mode 100644 index 0000000000..13bcd44e32 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/tan.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select tan(null) vn, tan(-1) r1; +> VN R1 +> ---- ------------------- +> null -1.5574077246549023 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/tanh.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/tanh.sql new file mode 100644 index 0000000000..b6765cc3dc --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/tanh.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL TANH(1); +>> 0.7615941559557649 + +CALL TANH(50); +>> 1.0 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/truncate.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/truncate.sql new file mode 100644 index 0000000000..0dbe8c9d3c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/truncate.sql @@ -0,0 +1,131 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT TRUNCATE(1.234, 2); +>> 1.23 + +SELECT TRUNCATE(DATE '2011-03-05'); +>> 2011-03-05 00:00:00 + +SELECT TRUNCATE(TIMESTAMP '2011-03-05 02:03:04'); +>> 2011-03-05 00:00:00 + +SELECT TRUNCATE(TIMESTAMP WITH TIME ZONE '2011-03-05 02:03:04+07'); +>> 2011-03-05 00:00:00+07 + +SELECT TRUNCATE(CURRENT_DATE, 1); +> exception INVALID_PARAMETER_COUNT_2 + +SELECT TRUNCATE(LOCALTIMESTAMP, 1); +> exception INVALID_PARAMETER_COUNT_2 + +SELECT TRUNCATE(CURRENT_TIMESTAMP, 1); +> exception INVALID_PARAMETER_COUNT_2 + +SELECT TRUNCATE('2011-03-05 02:03:04', 1); +> exception INVALID_PARAMETER_COUNT_2 + +SELECT TRUNCATE('bad'); +> exception INVALID_DATETIME_CONSTANT_2 + +SELECT TRUNCATE(1, 2, 3); +> exception SYNTAX_ERROR_2 + +select truncate(null, null) en, truncate(1.99, 0) e1, truncate(-10.9, 0) em10; +> EN E1 EM10 +> ---- -- ---- +> null 1 -10 +> rows: 1 + +select trunc(null, null) en, trunc(1.99, 0) e1, trunc(-10.9, 0) em10; +> EN E1 EM10 +> ---- -- ---- +> null 1 -10 +> rows: 1 + +select trunc(1.3); +>> 1 + +SELECT TRUNCATE(1.3) IS OF (NUMERIC); +>> TRUE + +SELECT TRUNCATE(CAST(1.3 AS DOUBLE)) IS OF (DOUBLE); +>> TRUE + +SELECT TRUNCATE(CAST(1.3 AS REAL)) IS OF (REAL); +>> TRUE + +SELECT TRUNCATE(1.99, 0), TRUNCATE(1.99, 1), TRUNCATE(-1.99, 0), TRUNCATE(-1.99, 1); +> 1 1.9 -1 -1.9 +> - --- -- ---- +> 1 1.9 -1 -1.9 +> rows: 1 + +SELECT TRUNCATE(1.99::DOUBLE, 0), TRUNCATE(1.99::DOUBLE, 1), TRUNCATE(-1.99::DOUBLE, 0), TRUNCATE(-1.99::DOUBLE, 1); +> 1.0 1.9 -1.0 -1.9 +> --- --- ---- ---- +> 1.0 1.9 -1.0 -1.9 +> rows: 1 + +SELECT TRUNCATE(1.99::REAL, 0), TRUNCATE(1.99::REAL, 1), TRUNCATE(-1.99::REAL, 0), TRUNCATE(-1.99::REAL, 1); +> 1.0 1.9 -1.0 -1.9 +> --- --- ---- ---- +> 1.0 1.9 -1.0 -1.9 +> rows: 1 + +SELECT TRUNCATE(V, S) FROM (VALUES (1.111, 1)) T(V, S); +>> 1.100 + +SELECT TRUNC(1, 10000000); +>> 1 + +CREATE TABLE T1(N NUMERIC(10, 2), D DECFLOAT(10), I INTEGER) AS VALUES (99999999.99, 99999999.99, 10); +> ok + +SELECT TRUNC(N, -1) NN, TRUNC(N) N0, TRUNC(N, 1) N1, TRUNC(N, 2) N2, TRUNC(N, 3) N3, TRUNC(N, 10000000) NL, + TRUNC(D) D0, TRUNC(D, 2) D2, TRUNC(D, 3) D3, + TRUNC(I) I0, TRUNC(I, 1) I1, TRUNC(I, I) II FROM T1; +> NN N0 N1 N2 N3 NL D0 D2 D3 I0 I1 II +> -------- -------- ---------- ----------- ----------- ----------- -------- ----------- ----------- -- -- -- +> 99999990 99999999 99999999.9 99999999.99 99999999.99 99999999.99 99999999 99999999.99 99999999.99 10 10 10 +> rows: 1 + +CREATE TABLE T2 AS SELECT TRUNC(N, -1) NN, TRUNC(N) N0, TRUNC(N, 1) N1, TRUNC(N, 2) N2, TRUNC(N, 3) N3, TRUNC(N, 10000000) NL, + TRUNC(D) D0, TRUNC(D, 2) D2, TRUNC(D, 3) D3, + TRUNC(I) I0, TRUNC(I, 1) I1, TRUNC(I, I) II FROM T1; +> ok + +SELECT COLUMN_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_SCALE FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_NAME = 'T2' ORDER BY ORDINAL_POSITION; +> COLUMN_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_SCALE +> ----------- --------- ----------------- ------------- +> NN NUMERIC 8 0 +> N0 NUMERIC 8 0 +> N1 NUMERIC 9 1 +> N2 NUMERIC 10 2 +> N3 NUMERIC 10 2 +> NL NUMERIC 10 2 +> D0 DECFLOAT 10 null +> D2 DECFLOAT 10 null +> D3 DECFLOAT 10 null +> I0 INTEGER 32 0 +> I1 INTEGER 32 0 +> II INTEGER 32 0 +> rows (ordered): 12 + +DROP TABLE T1; +> ok + +SELECT TRUNC(11, -1) I, TRUNC(CAST(11 AS NUMERIC(2)), -1) N; +> I N +> -- -- +> 10 10 +> rows: 1 + +SELECT TRUNC(11, -2) I, TRUNC(CAST(11 AS NUMERIC(2)), -2) N; +> I N +> - - +> 0 0 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/numeric/zero.sql b/h2/src/test/org/h2/test/scripts/functions/numeric/zero.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/numeric/zero.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/array-to-string.sql b/h2/src/test/org/h2/test/scripts/functions/string/array-to-string.sql new file mode 100644 index 0000000000..7ca0767798 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/array-to-string.sql @@ -0,0 +1,34 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +set mode PostgreSQL; +> ok + +select array_to_string(array[null, 0, 1, null, 2], ','); +>> 0,1,2 + +select array_to_string(array['a', null, '', 'b', null], ',', null); +>> a,,b + +select array_to_string(array[null, 0, 1, null, 2], ',', '*'); +>> *,0,1,*,2 + +select array_to_string(array['a', null, '', 'b', null], ',', '*'); +>> a,*,,b,* + +select array_to_string(array[1, null, 3], 0, 2); +>> 10203 + +select array_to_string(null, 0, 2); +>> null + +select array_to_string(array[1, null, 3], null, 2); +>> null + +select array_to_string(0, ','); +> exception INVALID_VALUE_2 + +set mode Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/string/ascii.sql b/h2/src/test/org/h2/test/scripts/functions/string/ascii.sql new file mode 100644 index 0000000000..17fa38db98 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/ascii.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select ascii(null) en, ascii('') en, ascii('Abc') e65; +> EN EN E65 +> ---- ---- --- +> null null 65 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/bit-length.sql b/h2/src/test/org/h2/test/scripts/functions/string/bit-length.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/bit-length.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/char.sql b/h2/src/test/org/h2/test/scripts/functions/string/char.sql new file mode 100644 index 0000000000..53bb3c5e93 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/char.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select char(null) en, char(65) ea; +> EN EA +> ---- -- +> null A +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/concat-ws.sql b/h2/src/test/org/h2/test/scripts/functions/string/concat-ws.sql new file mode 100644 index 0000000000..ec647763a6 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/concat-ws.sql @@ -0,0 +1,16 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT CONCAT_WS(NULL, NULL, 'a', NULL, 'b', NULL); +>> ab + +SELECT CONCAT_WS('*', NULL, 'a', NULL, 'b', NULL); +>> a*b + +SELECT CONCAT_WS('*', '', 'a', NULL, 'b', NULL); +>> *a*b + +SELECT '[' || CONCAT_WS('a', NULL, NULL) || ']'; +>> [] diff --git a/h2/src/test/org/h2/test/scripts/functions/string/concat.sql b/h2/src/test/org/h2/test/scripts/functions/string/concat.sql new file mode 100644 index 0000000000..4b1b73562d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/concat.sql @@ -0,0 +1,13 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select concat(null, null) en, concat(null, 'a') ea, concat('b', null) eb, concat('ab', 'c') abc; +> EN EA EB ABC +> -- -- -- --- +> a b abc +> rows: 1 + +SELECT CONCAT('a', 'b', 'c', 'd'); +>> abcd diff --git a/h2/src/test/org/h2/test/scripts/functions/string/difference.sql b/h2/src/test/org/h2/test/scripts/functions/string/difference.sql new file mode 100644 index 0000000000..4853dfe1f0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/difference.sql @@ -0,0 +1,16 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select difference(null, null) en, difference('a', null) en1, difference(null, 'a') en2; +> EN EN1 EN2 +> ---- ---- ---- +> null null null +> rows: 1 + +select difference('abc', 'abc') e0, difference('Thomas', 'Tom') e1; +> E0 E1 +> -- -- +> 4 3 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/hextoraw.sql b/h2/src/test/org/h2/test/scripts/functions/string/hextoraw.sql new file mode 100644 index 0000000000..95ea6902d5 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/hextoraw.sql @@ -0,0 +1,25 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select hextoraw(null) en, rawtohex(null) en1, hextoraw(rawtohex('abc')) abc; +> EN EN1 ABC +> ---- ---- --- +> null null abc +> rows: 1 + +SELECT HEXTORAW('0049'); +>> I + +SET MODE Oracle; +> ok + +SELECT HEXTORAW('0049'); +>> X'0049' + +SELECT HEXTORAW('0049') IS OF (RAW); +>> TRUE + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/string/insert.sql b/h2/src/test/org/h2/test/scripts/functions/string/insert.sql new file mode 100644 index 0000000000..d24cb58e4e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/insert.sql @@ -0,0 +1,19 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select insert(null, null, null, null) en, insert('Rund', 1, 0, 'o') e_round, insert(null, 1, 1, 'a') ea; +> EN E_ROUND EA +> ---- ------- -- +> null Rund a +> rows: 1 + +select insert('World', 2, 4, 'e') welt, insert('Hello', 2, 1, 'a') hallo; +> WELT HALLO +> ---- ----- +> We Hallo +> rows: 1 + +SELECT INSERT(NULL, 0, 0, NULL); +>> null diff --git a/h2/src/test/org/h2/test/scripts/functions/string/left.sql b/h2/src/test/org/h2/test/scripts/functions/string/left.sql new file mode 100644 index 0000000000..fcf92c16ac --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/left.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select left(null, 10) en, left('abc', null) en2, left('boat', 2) e_bo, left('', 1) ee, left('a', -1) ee2; +> EN EN2 E_BO EE EE2 +> ---- ---- ---- -- --- +> null null bo +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/length.sql b/h2/src/test/org/h2/test/scripts/functions/string/length.sql new file mode 100644 index 0000000000..ebf2bae84d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/length.sql @@ -0,0 +1,31 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select length(null) en, length('This has 17 chars') e_17; +> EN E_17 +> ---- ---- +> null 17 +> rows: 1 + +SELECT LEN(NULL); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +select len(null) en, len('MSSQLServer uses the len keyword') e_32; +> EN E_32 +> ---- ---- +> null 32 +> rows: 1 + +SELECT LEN('A '); +>> 2 + +SELECT LEN(CAST('A ' AS CHAR(2))); +>> 1 + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/string/locate.sql b/h2/src/test/org/h2/test/scripts/functions/string/locate.sql new file mode 100644 index 0000000000..fe1bf6dd12 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/locate.sql @@ -0,0 +1,49 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select locate(null, null) en, locate(null, null, null) en1; +> EN EN1 +> ---- ---- +> null null +> rows: 1 + +select locate('World', 'Hello World') e7, locate('hi', 'abchihihi', 2) e3; +> E7 E3 +> -- -- +> 7 4 +> rows: 1 + +SELECT CHARINDEX('test', 'test'); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +select charindex('World', 'Hello World') e7, charindex('hi', 'abchihihi', 2) e3; +> E7 E3 +> -- -- +> 7 4 +> rows: 1 + +SET MODE Regular; +> ok + +select instr('Hello World', 'World') e7, instr('abchihihi', 'hi', 2) e3, instr('abcooo', 'o') e2; +> E7 E3 E2 +> -- -- -- +> 7 4 4 +> rows: 1 + +EXPLAIN SELECT INSTR(A, B) FROM (VALUES ('A', 'B')) T(A, B); +>> SELECT LOCATE("B", "A") FROM (VALUES ('A', 'B')) "T"("A", "B") /* table scan */ + +select position(null, null) en, position(null, 'abc') en1, position('World', 'Hello World') e7, position('hi', 'abchihihi') e1; +> EN EN1 E7 E1 +> ---- ---- -- -- +> null null 7 4 +> rows: 1 + +EXPLAIN SELECT POSITION((A > B), C) FROM (VALUES (1, 2, 3)) T(A, B, C); +>> SELECT LOCATE("A" > "B", "C") FROM (VALUES (1, 2, 3)) "T"("A", "B", "C") /* table scan */ diff --git a/h2/src/test/org/h2/test/scripts/functions/string/lower.sql b/h2/src/test/org/h2/test/scripts/functions/string/lower.sql new file mode 100644 index 0000000000..73138cf357 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/lower.sql @@ -0,0 +1,16 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select lower(null) en, lower('Hello') hello, lower('ABC') abc; +> EN HELLO ABC +> ---- ----- --- +> null hello abc +> rows: 1 + +select lcase(null) en, lcase('Hello') hello, lcase('ABC') abc; +> EN HELLO ABC +> ---- ----- --- +> null hello abc +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/lpad.sql b/h2/src/test/org/h2/test/scripts/functions/string/lpad.sql new file mode 100644 index 0000000000..41c69ebb20 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/lpad.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select lpad('string', 10, '+'); +>> ++++string diff --git a/h2/src/test/org/h2/test/scripts/functions/string/ltrim.sql b/h2/src/test/org/h2/test/scripts/functions/string/ltrim.sql new file mode 100644 index 0000000000..daf8e3e101 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/ltrim.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select ltrim(null) en, '>' || ltrim('a') || '<' ea, '>' || ltrim(' a ') || '<' e_as; +> EN EA E_AS +> ---- --- ---- +> null >a< >a < +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/octet-length.sql b/h2/src/test/org/h2/test/scripts/functions/string/octet-length.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/octet-length.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/quote_ident.sql b/h2/src/test/org/h2/test/scripts/functions/string/quote_ident.sql new file mode 100644 index 0000000000..8c8b946308 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/quote_ident.sql @@ -0,0 +1,16 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT QUOTE_IDENT(NULL); +>> null + +SELECT QUOTE_IDENT(''); +>> "" + +SELECT QUOTE_IDENT('a'); +>> "a" + +SELECT QUOTE_IDENT('"a""A"'); +>> """a""""A""" diff --git a/h2/src/test/org/h2/test/scripts/functions/string/rawtohex.sql b/h2/src/test/org/h2/test/scripts/functions/string/rawtohex.sql new file mode 100644 index 0000000000..05e418b045 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/rawtohex.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT RAWTOHEX('A'); +>> 0041 + +SELECT RAWTOHEX('Az'); +>> 0041007a + +SET MODE Oracle; +> ok + +SELECT RAWTOHEX('A'); +>> 41 + +SELECT RAWTOHEX('Az'); +>> 417a + +SET MODE Regular; +> ok + +SELECT RAWTOHEX(X'12fe'); +>> 12fe + +SELECT RAWTOHEX('12345678-9abc-def0-0123-456789abcdef'::UUID); +>> 123456789abcdef00123456789abcdef diff --git a/h2/src/test/org/h2/test/scripts/functions/string/regex-replace.sql b/h2/src/test/org/h2/test/scripts/functions/string/regex-replace.sql new file mode 100644 index 0000000000..24a51ec6c7 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/regex-replace.sql @@ -0,0 +1,76 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +call regexp_replace('x', 'x', '\'); +> exception LIKE_ESCAPE_ERROR_1 + +CALL REGEXP_REPLACE('abckaboooom', 'o+', 'o'); +>> abckabom + +select regexp_replace('Sylvain', 'S..', 'TOTO', 'mni'); +>> TOTOvain + +set mode oracle; +> ok + +select regexp_replace('.1.2.3.4', '[^0-9]', '', 1, 0); +>> 1234 + +select regexp_replace('.1.2.3.4', '[^0-9]', '', 1, 1); +>> 1.2.3.4 + +select regexp_replace('.1.2.3.4', '[^0-9]', '', 1, 2); +>> .12.3.4 + +select regexp_replace('.1.2.3.4', '[^0-9]', '', 3, 2); +>> .1.23.4 + +select regexp_replace('', '[^0-9]', '', 3, 2); +>> null + +select regexp_replace('ababab', '', '', 3, 2); +>> ababab + +select regexp_replace('ababab', '', '', 3, 2, ''); +>> ababab + +select regexp_replace('first last', '(\w+) (\w+)', '\2 \1'); +>> last first + +select regexp_replace('first last', '(\w+) (\w+)', '\\2 \1'); +>> \2 first + +select regexp_replace('first last', '(\w+) (\w+)', '\$2 \1'); +>> $2 first + +select regexp_replace('first last', '(\w+) (\w+)', '$2 $1'); +>> $2 $1 + +set mode regular; +> ok + +select regexp_replace('first last', '(\w+) (\w+)', '\2 \1'); +>> 2 1 + +select regexp_replace('first last', '(\w+) (\w+)', '$2 $1'); +>> last first + +select regexp_replace('AbcDef', '[^a-z]', '', 'g'); +> exception INVALID_VALUE_2 + +select regexp_replace('First and Second', '[A-Z]', ''); +>> irst and econd + +set mode PostgreSQL; +> ok + +select regexp_replace('AbcDef', '[^a-z]', '', 'g'); +>> bcef + +select regexp_replace('AbcDef123', '[a-z]', '!', 'gi'); +>> !!!!!!123 + +select regexp_replace('First Only', '[A-Z]', ''); +>> irst Only diff --git a/h2/src/test/org/h2/test/scripts/functions/string/regexp-like.sql b/h2/src/test/org/h2/test/scripts/functions/string/regexp-like.sql new file mode 100644 index 0000000000..5f86d7f67d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/regexp-like.sql @@ -0,0 +1,13 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +call select 1 from dual where regexp_like('x', 'x', '\'); +> exception INVALID_VALUE_2 + +CALL REGEXP_LIKE('A', '[a-z]', 'i'); +>> TRUE + +CALL REGEXP_LIKE('A', '[a-z]', 'c'); +>> FALSE diff --git a/h2/src/test/org/h2/test/scripts/functions/string/regexp-substr.sql b/h2/src/test/org/h2/test/scripts/functions/string/regexp-substr.sql new file mode 100644 index 0000000000..b7c984a423 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/regexp-substr.sql @@ -0,0 +1,83 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- case insensitive matches upper case +CALL REGEXP_SUBSTR('A', '[a-z]', 1, 1, 'i'); +>> A + +-- case sensitive does not match upper case +CALL REGEXP_SUBSTR('A', '[a-z]', 1, 1, 'c'); +>> null + +-- match string from position at string index 3 +CALL REGEXP_SUBSTR('help helpful', 'help.*', 3); +>> helpful + +-- match string from position at string index 6 +CALL REGEXP_SUBSTR('help helpful helping', 'help.*', 7); +>> helping + +-- should return first occurrence +CALL REGEXP_SUBSTR('helpful helping', 'help\w*', 1, 1); +>> helpful + +-- should return second occurrence +CALL REGEXP_SUBSTR('helpful helping', 'help\w*', 1, 2); +>> helping + +-- should return third occurrence +CALL REGEXP_SUBSTR('help helpful helping', 'help\w*', 1, 3); +>> helping + +-- should return first occurrence, after string at index 3 +CALL REGEXP_SUBSTR('help helpful helping', 'help\w*', 3, 1); +>> helpful + +-- should first matching group +CALL REGEXP_SUBSTR('help helpful helping', '(help\w*)', 1, 1, NULL, 1); +>> help + +-- should second occurrence of first group +CALL REGEXP_SUBSTR('help helpful helping', '(help\w*)', 1, 2, NULL, 1); +>> helpful + +-- should second group +CALL REGEXP_SUBSTR('2020-10-01', '(\d{4})-(\d{2})-(\d{2})', 1, 1, NULL, 2); +>> 10 + +-- should third group +CALL REGEXP_SUBSTR('2020-10-01', '(\d{4})-(\d{2})-(\d{2})', 1, 1, NULL, 3); +>> 01 + +CALL REGEXP_SUBSTR('2020-10-01', '\d{4}'); +>> 2020 + +-- Test variants of passing NULL, which should always result in NULL result +CALL REGEXP_SUBSTR('2020-10-01', NULL); +>> null + +CALL REGEXP_SUBSTR(NULL, '\d{4}'); +>> null + +CALL REGEXP_SUBSTR(NULL, NULL); +>> null + +CALL REGEXP_SUBSTR('2020-10-01', '\d{4}', NULL); +>> null + +CALL REGEXP_SUBSTR('2020-10-01', '\d{4}', 1, NULL); +>> null + +CALL REGEXP_SUBSTR('2020-10-01', '\d{4}', 1, 1, NULL, NULL); +>> null + +-- Index out of bounds +CALL REGEXP_SUBSTR('2020-10-01', '(\d{4})', 1, 1, NULL, 10); +>> null + +-- Illegal regexp pattern +CALL REGEXP_SUBSTR('2020-10-01', '\d{a}'); +> exception LIKE_ESCAPE_ERROR_1 + diff --git a/h2/src/test/org/h2/test/scripts/functions/string/repeat.sql b/h2/src/test/org/h2/test/scripts/functions/string/repeat.sql new file mode 100644 index 0000000000..68b06222e0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/repeat.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select repeat(null, null) en, repeat('Ho', 2) abcehoho , repeat('abc', 0) ee; +> EN ABCEHOHO EE +> ---- -------- -- +> null HoHo +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/replace.sql b/h2/src/test/org/h2/test/scripts/functions/string/replace.sql new file mode 100644 index 0000000000..19966c332c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/replace.sql @@ -0,0 +1,25 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select replace(null, null) en, replace(null, null, null) en1; +> EN EN1 +> ---- ---- +> null null +> rows: 1 + +select replace('abchihihi', 'i', 'o') abcehohoho, replace('that is tom', 'i') abcethstom; +> ABCEHOHOHO ABCETHSTOM +> ---------- ---------- +> abchohoho that s tom +> rows: 1 + +set mode oracle; +> ok + +select replace('white space', ' ', '') x, replace('white space', ' ', null) y from dual; +> X Y +> ---------- ---------- +> whitespace whitespace +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/right.sql b/h2/src/test/org/h2/test/scripts/functions/string/right.sql new file mode 100644 index 0000000000..c56fdca00c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/right.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select right(null, 10) en, right('abc', null) en2, right('boat-trip', 2) e_ip, right('', 1) ee, right('a', -1) ee2; +> EN EN2 E_IP EE EE2 +> ---- ---- ---- -- --- +> null null ip +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/rpad.sql b/h2/src/test/org/h2/test/scripts/functions/string/rpad.sql new file mode 100644 index 0000000000..0d7e635657 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/rpad.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select rpad('string', 10, '+'); +>> string++++ diff --git a/h2/src/test/org/h2/test/scripts/functions/string/rtrim.sql b/h2/src/test/org/h2/test/scripts/functions/string/rtrim.sql new file mode 100644 index 0000000000..a216fd6805 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/rtrim.sql @@ -0,0 +1,13 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select rtrim(null) en, '>' || rtrim('a') || '<' ea, '>' || rtrim(' a ') || '<' es; +> EN EA ES +> ---- --- ---- +> null >a< > a< +> rows: 1 + +select rtrim() from dual; +> exception SYNTAX_ERROR_2 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/soundex.sql b/h2/src/test/org/h2/test/scripts/functions/string/soundex.sql new file mode 100644 index 0000000000..fec64ae3c5 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/soundex.sql @@ -0,0 +1,20 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select soundex(null) en, soundex('tom') et; +> EN ET +> ---- ---- +> null t500 +> rows: 1 + +select +soundex('Washington') W252, soundex('Lee') L000, +soundex('Gutierrez') G362, soundex('Pfister') P236, +soundex('Jackson') J250, soundex('Tymczak') T522, +soundex('VanDeusen') V532, soundex('Ashcraft') A261; +> W252 L000 G362 P236 J250 T522 V532 A261 +> ---- ---- ---- ---- ---- ---- ---- ---- +> W252 L000 G362 P236 J250 T522 V532 A261 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/space.sql b/h2/src/test/org/h2/test/scripts/functions/string/space.sql new file mode 100644 index 0000000000..867bd74657 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/space.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select space(null) en, '>' || space(1) || '<' es, '>' || space(3) || '<' e2; +> EN ES E2 +> ---- --- --- +> null > < > < +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/stringdecode.sql b/h2/src/test/org/h2/test/scripts/functions/string/stringdecode.sql new file mode 100644 index 0000000000..3a2b439aec --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/stringdecode.sql @@ -0,0 +1,22 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT STRINGDECODE('\7'); +> exception STRING_FORMAT_ERROR_1 + +SELECT STRINGDECODE('\17'); +> exception STRING_FORMAT_ERROR_1 + +SELECT STRINGDECODE('\117'); +>> O + +SELECT STRINGDECODE('\178'); +> exception STRING_FORMAT_ERROR_1 + +SELECT STRINGDECODE('\u111'); +> exception STRING_FORMAT_ERROR_1 + +SELECT STRINGDECODE('\u0057'); +>> W diff --git a/h2/src/test/org/h2/test/scripts/functions/string/stringencode.sql b/h2/src/test/org/h2/test/scripts/functions/string/stringencode.sql new file mode 100644 index 0000000000..72274a9474 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/stringencode.sql @@ -0,0 +1,13 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT STRINGENCODE(STRINGDECODE('abcsond\344rzeich\344 ') || char(22222) || STRINGDECODE(' \366\344\374\326\304\334\351\350\340\361!')); +>> abcsond\u00e4rzeich\u00e4 \u56ce \u00f6\u00e4\u00fc\u00d6\u00c4\u00dc\u00e9\u00e8\u00e0\u00f1! + +call STRINGENCODE(STRINGDECODE('abcsond\344rzeich\344 \u56ce \366\344\374\326\304\334\351\350\340\361!')); +>> abcsond\u00e4rzeich\u00e4 \u56ce \u00f6\u00e4\u00fc\u00d6\u00c4\u00dc\u00e9\u00e8\u00e0\u00f1! + +CALL STRINGENCODE(STRINGDECODE('Lines 1\nLine 2')); +>> Lines 1\nLine 2 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/stringtoutf8.sql b/h2/src/test/org/h2/test/scripts/functions/string/stringtoutf8.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/stringtoutf8.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/substring.sql b/h2/src/test/org/h2/test/scripts/functions/string/substring.sql new file mode 100644 index 0000000000..624fc9643b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/substring.sql @@ -0,0 +1,82 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select substr(null, null) en, substr(null, null, null) e1, substr('bob', 2) e_ob, substr('bob', 2, 1) eo; +> EN E1 E_OB EO +> ---- ---- ---- -- +> null null ob o +> rows: 1 + +select substring(null, null) en, substring(null, null, null) e1, substring('bob', 2) e_ob, substring('bob', 2, 1) eo; +> EN E1 E_OB EO +> ---- ---- ---- -- +> null null ob o +> rows: 1 + +select substring(null from null) en, substring(null from null for null) e1, substring('bob' from 2) e_ob, substring('bob' from 2 for 1) eo; +> EN E1 E_OB EO +> ---- ---- ---- -- +> null null ob o +> rows: 1 + +select substr('[Hello]', 2, 5); +>> Hello + +-- Compatibility syntax +select substr('Hello World', -5); +>> World + +-- Compatibility +SELECT SUBSTRING('X', 0, 1); +>> X + +CREATE TABLE TEST(STR VARCHAR, START INT, LEN INT); +> ok + +EXPLAIN SELECT SUBSTRING(STR FROM START), SUBSTRING(STR FROM START FOR LEN) FROM TEST; +>> SELECT SUBSTRING("STR" FROM "START"), SUBSTRING("STR" FROM "START" FOR "LEN") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +SELECT SUBSTRING('AAA' FROM 4 FOR 1); +> '' +> -- +> +> rows: 1 + +SELECT SUBSTRING(X'001122' FROM 1 FOR 3); +>> X'001122' + +SELECT SUBSTRING(X'001122' FROM 1 FOR 2); +>> X'0011' + +SELECT SUBSTRING(X'001122' FROM 2 FOR 2); +>> X'1122' + +SELECT SUBSTRING(X'001122' FROM 4 FOR 1); +>> X'' + +SELECT SUBSTRING(X'001122' FROM 2 FOR 1); +>> X'11' + +CREATE MEMORY TABLE TEST AS (VALUES SUBSTRING(X'0011' FROM 2)); +> ok + +-- Compatibility +SELECT SUBSTRING(X'00', 0, 1); +>> X'00' + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> -------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "C1" BINARY VARYING(1) ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (X'11'); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/string/to-char.sql b/h2/src/test/org/h2/test/scripts/functions/string/to-char.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/to-char.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/translate.sql b/h2/src/test/org/h2/test/scripts/functions/string/translate.sql new file mode 100644 index 0000000000..4e9207a0fd --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/translate.sql @@ -0,0 +1,37 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE testTranslate(id BIGINT, txt1 VARCHAR); +> ok + +INSERT INTO testTranslate(id, txt1) values(1, 'test1'), (2, NULL), (3, ''), (4, 'caps'); +> update count: 4 + +SELECT TRANSLATE(txt1, 'p', 'r') FROM testTranslate ORDER BY id; +> TRANSLATE(TXT1, 'p', 'r') +> ------------------------- +> test1 +> null +> +> cars +> rows (ordered): 4 + +SET MODE DB2; +> ok + +SELECT TRANSLATE(txt1, 'p', 'r') FROM testTranslate WHERE txt1 = 'caps'; +>> caps + +SELECT TRANSLATE(txt1, 'r', 'p') FROM testTranslate WHERE txt1 = 'caps'; +>> cars + +SET MODE Regular; +> ok + +SELECT TRANSLATE(NULL, NULL, NULL); +>> null + +DROP TABLE testTranslate; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/string/trim.sql b/h2/src/test/org/h2/test/scripts/functions/string/trim.sql new file mode 100644 index 0000000000..c4d1f535c0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/trim.sql @@ -0,0 +1,31 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, A VARCHAR, B VARCHAR, C VARCHAR) AS VALUES (1, '__A__', ' B ', 'xAx'); +> ok + +SELECT TRIM(BOTH '_' FROM A), '|' || TRIM(LEADING FROM B) || '|', TRIM(TRAILING 'x' FROM C) FROM TEST; +> TRIM('_' FROM A) '|' || TRIM(LEADING FROM B) || '|' TRIM(TRAILING 'x' FROM C) +> ---------------- ---------------------------------- ------------------------- +> A |B | xA +> rows: 1 + +SELECT LENGTH(TRIM(B)), LENGTH(TRIM(FROM B)) FROM TEST; +> CHAR_LENGTH(TRIM(B)) CHAR_LENGTH(TRIM(B)) +> -------------------- -------------------- +> 1 1 +> rows: 1 + +SELECT TRIM(BOTH B) FROM TEST; +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok + +select TRIM(' ' FROM ' abc ') from dual; +> 'abc' +> ----- +> abc +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/upper.sql b/h2/src/test/org/h2/test/scripts/functions/string/upper.sql new file mode 100644 index 0000000000..cbdaa1f69c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/upper.sql @@ -0,0 +1,16 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select ucase(null) en, ucase('Hello') hello, ucase('ABC') abc; +> EN HELLO ABC +> ---- ----- --- +> null HELLO ABC +> rows: 1 + +select upper(null) en, upper('Hello') hello, upper('ABC') abc; +> EN HELLO ABC +> ---- ----- --- +> null HELLO ABC +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/string/utf8tostring.sql b/h2/src/test/org/h2/test/scripts/functions/string/utf8tostring.sql new file mode 100644 index 0000000000..16a45622d8 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/utf8tostring.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL UTF8TOSTRING(STRINGTOUTF8('This is a test')); +>> This is a test diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmlattr.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmlattr.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmlattr.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmlcdata.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmlcdata.sql new file mode 100644 index 0000000000..278816047c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmlcdata.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL XMLCDATA(''); +>> ]]> + +CALL XMLCDATA('special text ]]>'); +>> special text ]]> diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmlcomment.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmlcomment.sql new file mode 100644 index 0000000000..9e7721a861 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmlcomment.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL XMLCOMMENT('Test'); +>> + +CALL XMLCOMMENT('--- test ---'); +>> diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmlnode.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmlnode.sql new file mode 100644 index 0000000000..280b762d15 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmlnode.sql @@ -0,0 +1,19 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL XMLNODE('a', XMLATTR('href', 'https://h2database.com')); +>> + +CALL XMLNODE('br'); +>>
    + +CALL XMLNODE('p', null, 'Hello World'); +>>

    Hello World

    + +SELECT XMLNODE('p', null, 'Hello' || chr(10) || 'World'); +>>

    Hello World

    + +SELECT XMLNODE('p', null, 'Hello' || chr(10) || 'World', false); +>>

    Hello World

    diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmlstartdoc.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmlstartdoc.sql new file mode 100644 index 0000000000..4f7d8df35f --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmlstartdoc.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL XMLSTARTDOC(); +>> diff --git a/h2/src/test/org/h2/test/scripts/functions/string/xmltext.sql b/h2/src/test/org/h2/test/scripts/functions/string/xmltext.sql new file mode 100644 index 0000000000..9e2b422849 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/string/xmltext.sql @@ -0,0 +1,16 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL XMLTEXT('test'); +>> test + +CALL XMLTEXT(''); +>> <test> + +SELECT XMLTEXT('hello' || chr(10) || 'world'); +>> hello world + +CALL XMLTEXT('hello' || chr(10) || 'world', true); +>> hello world diff --git a/h2/src/test/org/h2/test/scripts/functions/system/array-cat.sql b/h2/src/test/org/h2/test/scripts/functions/system/array-cat.sql new file mode 100644 index 0000000000..b979da1343 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/array-cat.sql @@ -0,0 +1,22 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select array_cat(ARRAY[1, 2], ARRAY[3, 4]) = ARRAY[1, 2, 3, 4]; +>> TRUE + +select array_cat(ARRAY[1, 2], null) is null; +>> TRUE + +select array_cat(null, ARRAY[1, 2]) is null; +>> TRUE + +select array_append(ARRAY[1, 2], 3) = ARRAY[1, 2, 3]; +>> TRUE + +select array_append(ARRAY[1, 2], null) is null; +>> TRUE + +select array_append(null, 3) is null; +>> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/system/array-contains.sql b/h2/src/test/org/h2/test/scripts/functions/system/array-contains.sql new file mode 100644 index 0000000000..897c24290b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/array-contains.sql @@ -0,0 +1,56 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select array_contains(ARRAY[4.0, 2.0, 2.0], 2.0); +>> TRUE + +select array_contains(ARRAY[4.0, 2.0, 2.0], 5.0); +>> FALSE + +select array_contains(ARRAY['one', 'two'], 'one'); +>> TRUE + +select array_contains(ARRAY['one', 'two'], 'xxx'); +>> FALSE + +select array_contains(ARRAY['one', 'two'], null); +>> FALSE + +select array_contains(ARRAY[null, 'two'], null); +>> TRUE + +select array_contains(null, 'one'); +>> null + +select array_contains(ARRAY[ARRAY[1, 2], ARRAY[3, 4]], ARRAY[1, 2]); +>> TRUE + +select array_contains(ARRAY[ARRAY[1, 2], ARRAY[3, 4]], ARRAY[5, 6]); +>> FALSE + +CREATE TABLE TEST (ID INT PRIMARY KEY AUTO_INCREMENT, A INT ARRAY); +> ok + +INSERT INTO TEST (A) VALUES (ARRAY[1L, 2L]), (ARRAY[3L, 4L]); +> update count: 2 + +SELECT ID, ARRAY_CONTAINS(A, 1L), ARRAY_CONTAINS(A, 2L), ARRAY_CONTAINS(A, 3L), ARRAY_CONTAINS(A, 4L) FROM TEST; +> ID ARRAY_CONTAINS(A, 1) ARRAY_CONTAINS(A, 2) ARRAY_CONTAINS(A, 3) ARRAY_CONTAINS(A, 4) +> -- -------------------- -------------------- -------------------- -------------------- +> 1 TRUE TRUE FALSE FALSE +> 2 FALSE FALSE TRUE TRUE +> rows: 2 + +SELECT * FROM ( + SELECT ID, ARRAY_CONTAINS(A, 1L), ARRAY_CONTAINS(A, 2L), ARRAY_CONTAINS(A, 3L), ARRAY_CONTAINS(A, 4L) FROM TEST +); +> ID ARRAY_CONTAINS(A, 1) ARRAY_CONTAINS(A, 2) ARRAY_CONTAINS(A, 3) ARRAY_CONTAINS(A, 4) +> -- -------------------- -------------------- -------------------- -------------------- +> 1 TRUE TRUE FALSE FALSE +> 2 FALSE FALSE TRUE TRUE +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/array-get.sql b/h2/src/test/org/h2/test/scripts/functions/system/array-get.sql new file mode 100644 index 0000000000..fe9e4b4e8a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/array-get.sql @@ -0,0 +1,17 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INTEGER ARRAY) AS VALUES ARRAY[NULL], ARRAY[1]; +> ok + +SELECT A, ARRAY_GET(A, 1), ARRAY_GET(A, 1) IS OF (INTEGER) FROM TEST; +> A A[1] A[1] IS OF (INTEGER) +> ------ ---- -------------------- +> [1] 1 TRUE +> [null] null null +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/array-slice.sql b/h2/src/test/org/h2/test/scripts/functions/system/array-slice.sql new file mode 100644 index 0000000000..09e0d76d02 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/array-slice.sql @@ -0,0 +1,46 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select array_slice(ARRAY[1, 2, 3, 4], 1, 1) = ARRAY[1]; +>> TRUE + +select array_slice(ARRAY[1, 2, 3, 4], 1, 3) = ARRAY[1, 2, 3]; +>> TRUE + +-- test invalid indexes +select array_slice(ARRAY[1, 2, 3, 4], 3, 1) is null; +>> TRUE + +select array_slice(ARRAY[1, 2, 3, 4], 0, 3) is null; +>> TRUE + +select array_slice(ARRAY[1, 2, 3, 4], 1, 5) is null; +>> TRUE + +-- in PostgreSQL, indexes are corrected +SET MODE PostgreSQL; +> ok + +select array_slice(ARRAY[1, 2, 3, 4], 3, 1) = ARRAY[]; +>> TRUE + +select array_slice(ARRAY[1, 2, 3, 4], 0, 3) = ARRAY[1, 2, 3]; +>> TRUE + +select array_slice(ARRAY[1, 2, 3, 4], 1, 5) = ARRAY[1, 2, 3, 4]; +>> TRUE + +SET MODE Regular; +> ok + +-- null parameters +select array_slice(null, 1, 3) is null; +>> TRUE + +select array_slice(ARRAY[1, 2, 3, 4], null, 3) is null; +>> TRUE + +select array_slice(ARRAY[1, 2, 3, 4], 1, null) is null; +>> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/system/autocommit.sql b/h2/src/test/org/h2/test/scripts/functions/system/autocommit.sql new file mode 100644 index 0000000000..8065d08a50 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/autocommit.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select autocommit(); +>> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/system/cancel-session.sql b/h2/src/test/org/h2/test/scripts/functions/system/cancel-session.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/cancel-session.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/cardinality.sql b/h2/src/test/org/h2/test/scripts/functions/system/cardinality.sql new file mode 100644 index 0000000000..1d73e7fa08 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/cardinality.sql @@ -0,0 +1,41 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT CARDINALITY(NULL); +>> null + +SELECT CARDINALITY(ARRAY[]); +>> 0 + +SELECT CARDINALITY(ARRAY[1, 2, 5]); +>> 3 + +SELECT ARRAY_LENGTH(ARRAY[1, 2, 5]); +>> 3 + +CREATE TABLE TEST(ID INT, A INT ARRAY, B INT ARRAY[2]) AS VALUES (1, NULL, NULL), (2, ARRAY[1], ARRAY[1]); +> ok + +SELECT ID, ARRAY_MAX_CARDINALITY(A), ARRAY_MAX_CARDINALITY(B) FROM TEST; +> ID ARRAY_MAX_CARDINALITY(A) ARRAY_MAX_CARDINALITY(B) +> -- ------------------------ ------------------------ +> 1 65536 2 +> 2 65536 2 +> rows: 2 + +SELECT ARRAY_MAX_CARDINALITY(ARRAY_AGG(ID)) FROM TEST; +>> 65536 + +DROP TABLE TEST; +> ok + +SELECT ARRAY_MAX_CARDINALITY(ARRAY['a', 'b']); +>> 2 + +SELECT ARRAY_MAX_CARDINALITY(NULL); +> exception INVALID_VALUE_2 + +SELECT ARRAY_MAX_CARDINALITY(CAST(NULL AS INT ARRAY)); +>> 65536 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/casewhen.sql b/h2/src/test/org/h2/test/scripts/functions/system/casewhen.sql new file mode 100644 index 0000000000..f56f2b1ccb --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/casewhen.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select casewhen(null, '1', '2') xn, casewhen(1>0, 'n', 'y') xy, casewhen(0<1, 'a', 'b') xa; +> XN XY XA +> -- -- -- +> 2 n a +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/cast.sql b/h2/src/test/org/h2/test/scripts/functions/system/cast.sql new file mode 100644 index 0000000000..4a343d320e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/cast.sql @@ -0,0 +1,203 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select cast(null as varchar(255)) xn, cast(' 10' as int) x10, cast(' 20 ' as int) x20; +> XN X10 X20 +> ---- --- --- +> null 10 20 +> rows: 1 + +select cast(128 as varbinary); +>> X'00000080' + +select cast(65535 as varbinary); +>> X'0000ffff' + +select cast(X'ff' as tinyint); +>> -1 + +select cast(X'7f' as tinyint); +>> 127 + +select cast(X'00ff' as smallint); +>> 255 + +select cast(X'000000ff' as int); +>> 255 + +select cast(X'000000000000ffff' as long); +>> 65535 + +select cast(cast(65535 as long) as varbinary); +>> X'000000000000ffff' + +select cast(cast(-1 as tinyint) as varbinary); +>> X'ff' + +select cast(cast(-1 as smallint) as varbinary); +>> X'ffff' + +select cast(cast(-1 as int) as varbinary); +>> X'ffffffff' + +select cast(cast(-1 as long) as varbinary); +>> X'ffffffffffffffff' + +select cast(cast(1 as tinyint) as varbinary); +>> X'01' + +select cast(cast(1 as smallint) as varbinary); +>> X'0001' + +select cast(cast(1 as int) as varbinary); +>> X'00000001' + +select cast(cast(1 as long) as varbinary); +>> X'0000000000000001' + +select cast(X'ff' as tinyint); +>> -1 + +select cast(X'ffff' as smallint); +>> -1 + +select cast(X'ffffffff' as int); +>> -1 + +select cast(X'ffffffffffffffff' as long); +>> -1 + +select cast(' 011 ' as int); +>> 11 + +select cast(cast(0.1 as real) as decimal(1, 1)); +>> 0.1 + +select cast(cast(95605327.73 as float) as decimal(10, 8)); +> exception VALUE_TOO_LONG_2 + +select cast(cast('01020304-0506-0708-090a-0b0c0d0e0f00' as uuid) as varbinary); +>> X'0102030405060708090a0b0c0d0e0f00' + +call cast('null' as uuid); +> exception DATA_CONVERSION_ERROR_1 + +select cast('12345678123456781234567812345678' as uuid); +>> 12345678-1234-5678-1234-567812345678 + +select cast('000102030405060708090a0b0c0d0e0f' as uuid); +>> 00010203-0405-0607-0809-0a0b0c0d0e0f + +select -cast(0 as double); +>> 0.0 + +SELECT * FROM (SELECT CAST('11:11:11.123456789' AS TIME)); +>> 11:11:11 + +SELECT * FROM (SELECT CAST('11:11:11.123456789' AS TIME(0))); +>> 11:11:11 + +SELECT * FROM (SELECT CAST('11:11:11.123456789' AS TIME(9))); +>> 11:11:11.123456789 + +SELECT * FROM (SELECT CAST('2000-01-01 11:11:11.123456789' AS TIMESTAMP)); +>> 2000-01-01 11:11:11.123457 + +SELECT * FROM (SELECT CAST('2000-01-01 11:11:11.123456789' AS TIMESTAMP(0))); +>> 2000-01-01 11:11:11 + +SELECT * FROM (SELECT CAST('2000-01-01 11:11:11.123456789' AS TIMESTAMP(9))); +>> 2000-01-01 11:11:11.123456789 + +SELECT * FROM (SELECT CAST('2000-01-01 11:11:11.123456789Z' AS TIMESTAMP WITH TIME ZONE)); +>> 2000-01-01 11:11:11.123457+00 + +SELECT * FROM (SELECT CAST('2000-01-01 11:11:11.123456789Z' AS TIMESTAMP(0) WITH TIME ZONE)); +>> 2000-01-01 11:11:11+00 + +SELECT * FROM (SELECT CAST('2000-01-01 11:11:11.123456789Z' AS TIMESTAMP(9) WITH TIME ZONE)); +>> 2000-01-01 11:11:11.123456789+00 + +EXPLAIN SELECT CAST('A' AS VARCHAR(10)), CAST(NULL AS BOOLEAN), CAST(NULL AS VARCHAR), CAST(1 AS INT); +>> SELECT CAST('A' AS CHARACTER VARYING(10)), UNKNOWN, CAST(NULL AS CHARACTER VARYING), 1 + +SELECT CURRENT_TIMESTAMP(9) = CAST(CURRENT_TIME(9) AS TIMESTAMP(9) WITH TIME ZONE); +>> TRUE + +SELECT LOCALTIMESTAMP(9) = CAST(LOCALTIME(9) AS TIMESTAMP(9)); +>> TRUE + +CREATE TABLE TEST(I INTERVAL DAY TO SECOND(9), T TIME(9) WITH TIME ZONE); +> ok + +EXPLAIN SELECT CAST(I AS INTERVAL HOUR(4) TO SECOND), CAST(I AS INTERVAL HOUR(4) TO SECOND(6)), + CAST(I AS INTERVAL HOUR TO SECOND(9)), CAST(I AS INTERVAL HOUR(2) TO SECOND(9)) FROM TEST; +>> SELECT CAST("I" AS INTERVAL HOUR(4) TO SECOND), CAST("I" AS INTERVAL HOUR(4) TO SECOND(6)), CAST("I" AS INTERVAL HOUR TO SECOND(9)), CAST("I" AS INTERVAL HOUR(2) TO SECOND(9)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT CAST(T AS TIME WITH TIME ZONE), CAST(T AS TIME(0) WITH TIME ZONE), CAST(T AS TIME(3) WITH TIME ZONE) FROM TEST; +>> SELECT CAST("T" AS TIME WITH TIME ZONE), CAST("T" AS TIME(0) WITH TIME ZONE), CAST("T" AS TIME(3) WITH TIME ZONE) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT + CAST(TIME '10:00:00' AS TIME(9)), + CAST(TIME '10:00:00' AS TIME(9) WITH TIME ZONE), + CAST(TIME '10:00:00' AS TIMESTAMP(9)), + CAST(TIME '10:00:00' AS TIMESTAMP(9) WITH TIME ZONE); +>> SELECT TIME '10:00:00', CAST(TIME '10:00:00' AS TIME(9) WITH TIME ZONE), CAST(TIME '10:00:00' AS TIMESTAMP(9)), CAST(TIME '10:00:00' AS TIMESTAMP(9) WITH TIME ZONE) + +EXPLAIN SELECT + CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIME(9)), + CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIME(9) WITH TIME ZONE), + CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIMESTAMP(9)), + CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIMESTAMP(9) WITH TIME ZONE); +>> SELECT CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIME(9)), TIME WITH TIME ZONE '10:00:00+10', CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIMESTAMP(9)), CAST(TIME WITH TIME ZONE '10:00:00+10' AS TIMESTAMP(9) WITH TIME ZONE) + +EXPLAIN SELECT + CAST(DATE '2000-01-01' AS DATE), + CAST(DATE '2000-01-01' AS TIMESTAMP(9)), + CAST(DATE '2000-01-01' AS TIMESTAMP(9) WITH TIME ZONE); +>> SELECT DATE '2000-01-01', TIMESTAMP '2000-01-01 00:00:00', CAST(DATE '2000-01-01' AS TIMESTAMP(9) WITH TIME ZONE) + +EXPLAIN SELECT + CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIME(9)), + CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIME(9) WITH TIME ZONE), + CAST(TIMESTAMP '2000-01-01 10:00:00' AS DATE), + CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIMESTAMP(9)), + CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIMESTAMP(9) WITH TIME ZONE); +>> SELECT TIME '10:00:00', CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIME(9) WITH TIME ZONE), DATE '2000-01-01', TIMESTAMP '2000-01-01 10:00:00', CAST(TIMESTAMP '2000-01-01 10:00:00' AS TIMESTAMP(9) WITH TIME ZONE) + +EXPLAIN SELECT + CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIME(9)), + CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIME(9) WITH TIME ZONE), + CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS DATE), + CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIMESTAMP(9)), + CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIMESTAMP(9) WITH TIME ZONE); +>> SELECT CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIME(9)), TIME WITH TIME ZONE '10:00:00+10', CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS DATE), CAST(TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' AS TIMESTAMP(9)), TIMESTAMP WITH TIME ZONE '2000-01-01 10:00:00+10' + +CREATE DOMAIN D INT CHECK (VALUE > 10); +> ok + +VALUES CAST(11 AS D); +>> 11 + +VALUES CAST(10 AS D); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +EXPLAIN SELECT CAST(X AS D) FROM SYSTEM_RANGE(20, 30); +>> SELECT CAST("X" AS "PUBLIC"."D") FROM SYSTEM_RANGE(20, 30) /* range index */ + +DROP DOMAIN D; +> ok + +EXPLAIN VALUES CAST('a' AS VARCHAR_IGNORECASE(10)); +>> VALUES (CAST('a' AS VARCHAR_IGNORECASE(10))) + +SELECT CAST('true ' AS BOOLEAN) V, CAST(CAST('true' AS CHAR(10)) AS BOOLEAN) F; +> V F +> ---- ---- +> TRUE TRUE +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/coalesce.sql b/h2/src/test/org/h2/test/scripts/functions/system/coalesce.sql new file mode 100644 index 0000000000..c5fabf149b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/coalesce.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select coalesce(null, null) xn, coalesce(null, 'a') xa, coalesce('1', '2') x1; +> XN XA X1 +> ---- -- -- +> null a 1 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/convert.sql b/h2/src/test/org/h2/test/scripts/functions/system/convert.sql new file mode 100644 index 0000000000..da1a5fa5c3 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/convert.sql @@ -0,0 +1,10 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select convert(null, varchar(255)) xn, convert(' 10', int) x10, convert(' 20 ', int) x20; +> XN X10 X20 +> ---- --- --- +> null 10 20 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/csvread.sql b/h2/src/test/org/h2/test/scripts/functions/system/csvread.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/csvread.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/csvwrite.sql b/h2/src/test/org/h2/test/scripts/functions/system/csvwrite.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/csvwrite.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/current_catalog.sql b/h2/src/test/org/h2/test/scripts/functions/system/current_catalog.sql new file mode 100644 index 0000000000..fbbce1f79b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/current_catalog.sql @@ -0,0 +1,37 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL CURRENT_CATALOG; +>> SCRIPT + +CALL DATABASE(); +>> SCRIPT + +SET CATALOG SCRIPT; +> ok + +SET CATALOG 'SCRIPT'; +> ok + +SET CATALOG 'SCR' || 'IPT'; +> ok + +SET CATALOG UNKNOWN_CATALOG; +> exception DATABASE_NOT_FOUND_1 + +SET CATALOG NULL; +> exception DATABASE_NOT_FOUND_1 + +CALL CURRENT_DATABASE(); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE PostgreSQL; +> ok + +CALL CURRENT_DATABASE(); +>> SCRIPT + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/current_schema.sql b/h2/src/test/org/h2/test/scripts/functions/system/current_schema.sql new file mode 100644 index 0000000000..d2f21bf1b2 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/current_schema.sql @@ -0,0 +1,40 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT CURRENT_SCHEMA, SCHEMA(); +> CURRENT_SCHEMA CURRENT_SCHEMA +> -------------- -------------- +> PUBLIC PUBLIC +> rows: 1 + +CREATE SCHEMA S1; +> ok + +SET SCHEMA S1; +> ok + +CALL CURRENT_SCHEMA; +>> S1 + +SET SCHEMA 'PUBLIC'; +> ok + +CALL CURRENT_SCHEMA; +>> PUBLIC + +SET SCHEMA 'S' || 1; +> ok + +CALL CURRENT_SCHEMA; +>> S1 + +SET SCHEMA PUBLIC; +> ok + +SET SCHEMA NULL; +> exception SCHEMA_NOT_FOUND_1 + +DROP SCHEMA S1; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/current_user.sql b/h2/src/test/org/h2/test/scripts/functions/system/current_user.sql new file mode 100644 index 0000000000..2881250ae8 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/current_user.sql @@ -0,0 +1,25 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select user() x_sa, current_user() x_sa2; +> X_SA X_SA2 +> ---- ----- +> SA SA +> rows: 1 + +SELECT CURRENT_USER; +>> SA + +SELECT SESSION_USER; +>> SA + +SELECT SYSTEM_USER; +>> SA + +SELECT CURRENT_ROLE; +>> PUBLIC + +EXPLAIN SELECT CURRENT_USER, SESSION_USER, SYSTEM_USER, USER, CURRENT_ROLE; +>> SELECT CURRENT_USER, SESSION_USER, SYSTEM_USER, CURRENT_USER, CURRENT_ROLE diff --git a/h2/src/test/org/h2/test/scripts/functions/system/currval.sql b/h2/src/test/org/h2/test/scripts/functions/system/currval.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/currval.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/data_type_sql.sql b/h2/src/test/org/h2/test/scripts/functions/system/data_type_sql.sql new file mode 100644 index 0000000000..0f24fa4586 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/data_type_sql.sql @@ -0,0 +1,121 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- +CREATE CONSTANT C VALUE 12; +> ok + +CREATE DOMAIN D AS CHAR(3); +> ok + +CREATE TABLE T (C VARCHAR(10)); +> ok + +CREATE ALIAS R FOR "java.lang.Math.max(long,long)"; +> ok + +SELECT ID, DATA_TYPE_SQL('PUBLIC', 'C', 'CONSTANT', ID) FROM (VALUES NULL, 'TYPE', 'X') T(ID); +> ID DATA_TYPE_SQL('PUBLIC', 'C', 'CONSTANT', ID) +> ---- -------------------------------------------- +> TYPE INTEGER +> X null +> null null +> rows: 3 + +SELECT ID, DATA_TYPE_SQL('PUBLIC', 'D', 'DOMAIN', ID) FROM (VALUES NULL, 'TYPE', 'X') T(ID); +> ID DATA_TYPE_SQL('PUBLIC', 'D', 'DOMAIN', ID) +> ---- ------------------------------------------ +> TYPE CHARACTER(3) +> X null +> null null +> rows: 3 + +SELECT ID, DATA_TYPE_SQL('PUBLIC', 'T', 'TABLE', ID) FROM (VALUES NULL, '0', '1', '2', 'X') T(ID); +> ID DATA_TYPE_SQL('PUBLIC', 'T', 'TABLE', ID) +> ---- ----------------------------------------- +> 0 null +> 1 CHARACTER VARYING(10) +> 2 null +> X null +> null null +> rows: 5 + +SELECT ID, DATA_TYPE_SQL('PUBLIC', 'R_1', 'ROUTINE', ID) FROM (VALUES NULL, 'RESULT', '0', '1', '2', '3', 'X') T(ID); +> ID DATA_TYPE_SQL('PUBLIC', 'R_1', 'ROUTINE', ID) +> ------ --------------------------------------------- +> 0 null +> 1 BIGINT +> 2 BIGINT +> 3 null +> RESULT BIGINT +> X null +> null null +> rows: 7 + +SELECT DATA_TYPE_SQL(S, O, T, I) FROM (VALUES + (NULL, 'C', 'CONSTANT', 'TYPE'), + ('X', 'C', 'CONSTANT', 'TYPE'), + ('PUBLIC', NULL, 'CONSTANT', 'TYPE'), + ('PUBLIC', 'X', 'CONSTANT', 'TYPE'), + ('PUBLIC', 'C', NULL, 'TYPE'), + (NULL, 'D', 'DOMAIN', 'TYPE'), + ('X', 'D', 'DOMAIN', 'TYPE'), + ('PUBLIC', NULL, 'DOMAIN', 'TYPE'), + ('PUBLIC', 'X', 'DOMAIN', 'TYPE'), + ('PUBLIC', 'D', NULL, 'TYPE'), + (NULL, 'T', 'TABLE', '1'), + ('X', 'T', 'TABLE', '1'), + ('PUBLIC', NULL, 'TABLE', '1'), + ('PUBLIC', 'X', 'TABLE', '1'), + ('PUBLIC', 'T', NULL, '1'), + (NULL, 'R_1', 'ROUTINE', '1'), + ('X', 'R_1', 'ROUTINE', '1'), + ('PUBLIC', NULL, 'ROUTINE', '1'), + ('PUBLIC', 'R_0', 'ROUTINE', '1'), + ('PUBLIC', 'R_2', 'ROUTINE', '1'), + ('PUBLIC', 'R_Z', 'ROUTINE', '1'), + ('PUBLIC', 'X', 'ROUTINE', '1'), + ('PUBLIC', 'X_1', 'ROUTINE', '1'), + ('PUBLIC', 'R_1', NULL, '1'), + ('PUBLIC', 'T', 'X', '1') + ) T(S, O, T, I); +> DATA_TYPE_SQL(S, O, T, I) +> ------------------------- +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> null +> rows: 25 + +DROP CONSTANT C; +> ok + +DROP DOMAIN D; +> ok + +DROP TABLE T; +> ok + +DROP ALIAS R; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/database-path.sql b/h2/src/test/org/h2/test/scripts/functions/system/database-path.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/database-path.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/db_object.sql b/h2/src/test/org/h2/test/scripts/functions/system/db_object.sql new file mode 100644 index 0000000000..d44d0fa5ee --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/db_object.sql @@ -0,0 +1,284 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE ROLE A; +> ok + +CREATE ROLE B; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('ROLE', 'A'), + DB_OBJECT_ID('ROLE', 'B'), + DB_OBJECT_SQL('ROLE', 'A'), + DB_OBJECT_SQL('ROLE', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ --------------- --------------- +> TRUE CREATE ROLE "A" CREATE ROLE "B" +> rows: 1 + +DROP ROLE A; +> ok + +DROP ROLE B; +> ok + +CALL DB_OBJECT_ID('SETTING', 'CREATE_BUILD') IS NOT NULL; +>> TRUE + +CALL DB_OBJECT_SQL('SETTING', 'CREATE_BUILD') IS NOT NULL; +>> TRUE + +CREATE SCHEMA A; +> ok + +CREATE SCHEMA B; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('SCHEMA', 'A'), + DB_OBJECT_ID('SCHEMA', 'B'), + DB_OBJECT_SQL('SCHEMA', 'A'), + DB_OBJECT_SQL('SCHEMA', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ -------------------------------------------------- -------------------------------------------------- +> TRUE CREATE SCHEMA IF NOT EXISTS "A" AUTHORIZATION "SA" CREATE SCHEMA IF NOT EXISTS "B" AUTHORIZATION "SA" +> rows: 1 + +DROP SCHEMA A; +> ok + +DROP SCHEMA B; +> ok + +CREATE USER A SALT X'00' HASH X'00'; +> ok + +CREATE USER B SALT X'00' HASH X'00'; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('USER', 'A'), + DB_OBJECT_ID('USER', 'B'), + DB_OBJECT_SQL('USER', 'A'), + DB_OBJECT_SQL('USER', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ ------------------------------------------------- ------------------------------------------------- +> TRUE CREATE USER IF NOT EXISTS "A" SALT '00' HASH '00' CREATE USER IF NOT EXISTS "B" SALT '00' HASH '00' +> rows: 1 + +DROP USER A; +> ok + +DROP USER B; +> ok + +CREATE CONSTANT A VALUE 1; +> ok + +CREATE CONSTANT B VALUE 2; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('CONSTANT', 'PUBLIC', 'A'), + DB_OBJECT_ID('CONSTANT', 'PUBLIC', 'B'), + DB_OBJECT_SQL('CONSTANT', 'PUBLIC', 'A'), + DB_OBJECT_SQL('CONSTANT', 'PUBLIC', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ ------------------------------------ ------------------------------------ +> TRUE CREATE CONSTANT "PUBLIC"."A" VALUE 1 CREATE CONSTANT "PUBLIC"."B" VALUE 2 +> rows: 1 + +DROP CONSTANT A; +> ok + +DROP CONSTANT B; +> ok + +CREATE DOMAIN A AS CHAR; +> ok + +CREATE DOMAIN B AS CHAR; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('DOMAIN', 'PUBLIC', 'A'), + DB_OBJECT_ID('DOMAIN', 'PUBLIC', 'B'), + DB_OBJECT_SQL('DOMAIN', 'PUBLIC', 'A'), + DB_OBJECT_SQL('DOMAIN', 'PUBLIC', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ --------------------------------------- --------------------------------------- +> TRUE CREATE DOMAIN "PUBLIC"."A" AS CHARACTER CREATE DOMAIN "PUBLIC"."B" AS CHARACTER +> rows: 1 + +DROP DOMAIN A; +> ok + +DROP DOMAIN B; +> ok + +CREATE ALIAS A FOR 'java.lang.Math.sqrt'; +> ok + +CREATE AGGREGATE B FOR 'org.h2.test.scripts.Aggregate1'; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('ROUTINE', 'PUBLIC', 'A'), + DB_OBJECT_ID('ROUTINE', 'PUBLIC', 'B'), + DB_OBJECT_SQL('ROUTINE', 'PUBLIC', 'A'), + DB_OBJECT_SQL('ROUTINE', 'PUBLIC', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ --------------------------------------------------------- ------------------------------------------------------------------------ +> TRUE CREATE FORCE ALIAS "PUBLIC"."A" FOR 'java.lang.Math.sqrt' CREATE FORCE AGGREGATE "PUBLIC"."B" FOR 'org.h2.test.scripts.Aggregate1' +> rows: 1 + +DROP ALIAS A; +> ok + +DROP AGGREGATE B; +> ok + +CREATE SEQUENCE A; +> ok + +CREATE SEQUENCE B; +> ok + +SELECT ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES ( + DB_OBJECT_ID('SEQUENCE', 'PUBLIC', 'A'), + DB_OBJECT_ID('SEQUENCE', 'PUBLIC', 'B'), + DB_OBJECT_SQL('SEQUENCE', 'PUBLIC', 'A'), + DB_OBJECT_SQL('SEQUENCE', 'PUBLIC', 'B') +)) T(ID_A, ID_B, SQL_A, SQL_B); +> ID_A <> ID_B SQL_A SQL_B +> ------------ ----------------------------------------- ----------------------------------------- +> TRUE CREATE SEQUENCE "PUBLIC"."A" START WITH 1 CREATE SEQUENCE "PUBLIC"."B" START WITH 1 +> rows: 1 + +DROP SEQUENCE A; +> ok + +DROP SEQUENCE B; +> ok + +CREATE MEMORY TABLE T_A(ID INT); +> ok + +CREATE UNIQUE INDEX I_A ON T_A(ID); +> ok + +ALTER TABLE T_A ADD CONSTRAINT C_A UNIQUE(ID); +> ok + +CREATE SYNONYM S_A FOR T_A; +> ok + +CREATE TRIGGER G_A BEFORE INSERT ON T_A FOR EACH ROW CALL 'org.h2.test.scripts.Trigger1'; +> ok + +CREATE MEMORY TABLE T_B(ID INT); +> ok + +CREATE UNIQUE INDEX I_B ON T_B(ID); +> ok + +ALTER TABLE T_B ADD CONSTRAINT C_B UNIQUE(ID); +> ok + +CREATE SYNONYM S_B FOR T_B; +> ok + +CREATE TRIGGER G_B BEFORE INSERT ON T_B FOR EACH ROW CALL 'org.h2.test.scripts.Trigger1'; +> ok + +SELECT T, ID_A <> ID_B, SQL_A, SQL_B FROM (VALUES +( + 'CONSTRAINT', + DB_OBJECT_ID('CONSTRAINT', 'PUBLIC', 'C_A'), + DB_OBJECT_ID('CONSTRAINT', 'PUBLIC', 'C_B'), + DB_OBJECT_SQL('CONSTRAINT', 'PUBLIC', 'C_A'), + DB_OBJECT_SQL('CONSTRAINT', 'PUBLIC', 'C_B') +), ( + 'INDEX', + DB_OBJECT_ID('INDEX', 'PUBLIC', 'I_A'), + DB_OBJECT_ID('INDEX', 'PUBLIC', 'I_B'), + DB_OBJECT_SQL('INDEX', 'PUBLIC', 'I_A'), + DB_OBJECT_SQL('INDEX', 'PUBLIC', 'I_B') +), ( + 'SYNONYM', + DB_OBJECT_ID('SYNONYM', 'PUBLIC', 'S_A'), + DB_OBJECT_ID('SYNONYM', 'PUBLIC', 'S_B'), + DB_OBJECT_SQL('SYNONYM', 'PUBLIC', 'S_A'), + DB_OBJECT_SQL('SYNONYM', 'PUBLIC', 'S_B') +), ( + 'TABLE', + DB_OBJECT_ID('TABLE', 'PUBLIC', 'T_A'), + DB_OBJECT_ID('TABLE', 'PUBLIC', 'T_B'), + DB_OBJECT_SQL('TABLE', 'PUBLIC', 'T_A'), + DB_OBJECT_SQL('TABLE', 'PUBLIC', 'T_B') +), ( + 'TRIGGER', + DB_OBJECT_ID('TRIGGER', 'PUBLIC', 'G_A'), + DB_OBJECT_ID('TRIGGER', 'PUBLIC', 'G_B'), + DB_OBJECT_SQL('TRIGGER', 'PUBLIC', 'G_A'), + DB_OBJECT_SQL('TRIGGER', 'PUBLIC', 'G_B') +)) T(T, ID_A, ID_B, SQL_A, SQL_B); +> T ID_A <> ID_B SQL_A SQL_B +> ---------- ------------ ------------------------------------------------------------------------------------------------------------------------------- ------------------------------------------------------------------------------------------------------------------------------- +> CONSTRAINT TRUE ALTER TABLE "PUBLIC"."T_A" ADD CONSTRAINT "PUBLIC"."C_A" UNIQUE("ID") ALTER TABLE "PUBLIC"."T_B" ADD CONSTRAINT "PUBLIC"."C_B" UNIQUE("ID") +> INDEX TRUE CREATE UNIQUE INDEX "PUBLIC"."I_A" ON "PUBLIC"."T_A"("ID" NULLS FIRST) CREATE UNIQUE INDEX "PUBLIC"."I_B" ON "PUBLIC"."T_B"("ID" NULLS FIRST) +> SYNONYM TRUE CREATE SYNONYM "PUBLIC"."S_A" FOR "PUBLIC"."T_A" CREATE SYNONYM "PUBLIC"."S_B" FOR "PUBLIC"."T_B" +> TABLE TRUE CREATE MEMORY TABLE "PUBLIC"."T_A"( "ID" INTEGER ) CREATE MEMORY TABLE "PUBLIC"."T_B"( "ID" INTEGER ) +> TRIGGER TRUE CREATE FORCE TRIGGER "PUBLIC"."G_A" BEFORE INSERT ON "PUBLIC"."T_A" FOR EACH ROW QUEUE 1024 CALL 'org.h2.test.scripts.Trigger1' CREATE FORCE TRIGGER "PUBLIC"."G_B" BEFORE INSERT ON "PUBLIC"."T_B" FOR EACH ROW QUEUE 1024 CALL 'org.h2.test.scripts.Trigger1' +> rows: 5 + +DROP SYNONYM S_A; +> ok + +DROP SYNONYM S_B; +> ok + +DROP TABLE T_B, T_A; +> ok + +CALL DB_OBJECT_ID(NULL, NULL); +>> null + +CALL DB_OBJECT_ID(NULL, NULL, NULL); +>> null + +CALL DB_OBJECT_ID('UNKNOWN', NULL); +>> null + +CALL DB_OBJECT_ID('UNKNOWN', 'UNKNOWN'); +>> null + +CALL DB_OBJECT_ID('UNKNOWN', 'PUBLIC', 'UNKNOWN'); +>> null + +CALL DB_OBJECT_ID('UNKNOWN', 'UNKNOWN', 'UNKNOWN'); +>> null + +CALL DB_OBJECT_ID('TABLE', 'UNKNOWN', 'UNKNOWN'); +>> null + +CALL DB_OBJECT_ID('TABLE', 'PUBLIC', 'UNKNOWN'); +>> null + +CALL DB_OBJECT_ID('TABLE', 'PUBLIC', NULL); +>> null + +CALL DB_OBJECT_ID('TABLE', 'INFORMATION_SCHEMA', 'TABLES') IS NOT NULL; +>> TRUE + +CALL DB_OBJECT_SQL('TABLE', 'INFORMATION_SCHEMA', 'TABLES'); +>> null diff --git a/h2/src/test/org/h2/test/scripts/functions/system/decode.sql b/h2/src/test/org/h2/test/scripts/functions/system/decode.sql new file mode 100644 index 0000000000..7c7c3ec536 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/decode.sql @@ -0,0 +1,31 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select select decode(null, null, 'a'); +>> a + +select select decode(1, 1, 'a'); +>> a + +select select decode(1, 2, 'a'); +>> null + +select select decode(1, 1, 'a', 'else'); +>> a + +select select decode(1, 2, 'a', 'else'); +>> else + +select decode(4.0, 2.0, 2.0, 3.0, 3.0); +>> null + +select decode('3', 2.0, 2.0, 3, 3.0); +>> 3.0 + +select decode(4.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 9.0); +>> 4.0 + +select decode(1, 1, '1', 1, '11') from dual; +>> 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/disk-space-used.sql b/h2/src/test/org/h2/test/scripts/functions/system/disk-space-used.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/disk-space-used.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/file-read.sql b/h2/src/test/org/h2/test/scripts/functions/system/file-read.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/file-read.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/file-write.sql b/h2/src/test/org/h2/test/scripts/functions/system/file-write.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/file-write.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/greatest.sql b/h2/src/test/org/h2/test/scripts/functions/system/greatest.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/greatest.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/h2version.sql b/h2/src/test/org/h2/test/scripts/functions/system/h2version.sql new file mode 100644 index 0000000000..ff8a311fd1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/h2version.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +EXPLAIN VALUES H2VERSION(); +>> VALUES (H2VERSION()) diff --git a/h2/src/test/org/h2/test/scripts/functions/system/identity.sql b/h2/src/test/org/h2/test/scripts/functions/system/identity.sql new file mode 100644 index 0000000000..4d692e68d5 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/identity.sql @@ -0,0 +1,34 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID BIGINT GENERATED BY DEFAULT AS IDENTITY, V INT); +> ok + +INSERT INTO TEST(V) VALUES 10; +> update count: 1 + +VALUES IDENTITY(); +> exception FUNCTION_NOT_FOUND_1 + +VALUES SCOPE_IDENTITY(); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE LEGACY; +> ok + +INSERT INTO TEST(V) VALUES 20; +> update count: 1 + +VALUES IDENTITY(); +>> 2 + +VALUES SCOPE_IDENTITY(); +>> 2 + +SET MODE REGULAR; +> ok + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/ifnull.sql b/h2/src/test/org/h2/test/scripts/functions/system/ifnull.sql new file mode 100644 index 0000000000..5aa7665740 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/ifnull.sql @@ -0,0 +1,37 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select ifnull(null, '1') x1, ifnull(null, null) xn, ifnull('a', 'b') xa; +> X1 XN XA +> -- ---- -- +> 1 null a +> rows: 1 + +SELECT ISNULL(NULL, '1'); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +select isnull(null, '1') x1, isnull(null, null) xn, isnull('a', 'b') xa; +> X1 XN XA +> -- ---- -- +> 1 null a +> rows: 1 + +SET MODE Regular; +> ok + +CREATE MEMORY TABLE S(D DOUBLE) AS VALUES NULL; +> ok + +CREATE MEMORY TABLE T AS SELECT IFNULL(D, D) FROM S; +> ok + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'T'; +>> DOUBLE PRECISION + +DROP TABLE S, T; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/last-insert-id.sql b/h2/src/test/org/h2/test/scripts/functions/system/last-insert-id.sql new file mode 100644 index 0000000000..b51d5cf5d9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/last-insert-id.sql @@ -0,0 +1,43 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- +SET MODE MySQL; +> ok + +create memory table sequence (id INT NOT NULL AUTO_INCREMENT, title varchar(255)); +> ok + +INSERT INTO sequence (title) VALUES ('test'); +> update count: 1 + +INSERT INTO sequence (title) VALUES ('test1'); +> update count: 1 + +SELECT LAST_INSERT_ID() AS L; +>> 2 + +SELECT LAST_INSERT_ID(100) AS L; +>> 100 + +SELECT LAST_INSERT_ID() AS L; +>> 100 + +INSERT INTO sequence (title) VALUES ('test2'); +> update count: 1 + +SELECT MAX(id) AS M FROM sequence; +>> 3 + +SELECT LAST_INSERT_ID() AS L; +>> 3 + +SELECT LAST_INSERT_ID(NULL) AS L; +>> null + +SELECT LAST_INSERT_ID() AS L; +>> 0 + + +DROP TABLE sequence; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/system/least.sql b/h2/src/test/org/h2/test/scripts/functions/system/least.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/least.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/link-schema.sql b/h2/src/test/org/h2/test/scripts/functions/system/link-schema.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/link-schema.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/lock-mode.sql b/h2/src/test/org/h2/test/scripts/functions/system/lock-mode.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/lock-mode.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/lock-timeout.sql b/h2/src/test/org/h2/test/scripts/functions/system/lock-timeout.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/lock-timeout.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/memory-free.sql b/h2/src/test/org/h2/test/scripts/functions/system/memory-free.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/memory-free.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/memory-used.sql b/h2/src/test/org/h2/test/scripts/functions/system/memory-used.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/memory-used.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/nextval.sql b/h2/src/test/org/h2/test/scripts/functions/system/nextval.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/nextval.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/nullif.sql b/h2/src/test/org/h2/test/scripts/functions/system/nullif.sql new file mode 100644 index 0000000000..6042a0bc00 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/nullif.sql @@ -0,0 +1,27 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select nullif(null, null) xn, nullif('a', 'a') xn, nullif('1', '2') x1; +> XN XN X1 +> ---- ---- -- +> null null 1 +> rows: 1 + +SELECT + A = B, + NULLIF(A, B), CASE WHEN A = B THEN NULL ELSE A END + FROM (VALUES + (1, (1, NULL), (1, NULL)), + (2, (1, NULL), (2, NULL)), + (3, (2, NULL), (1, NULL)), + (4, (1, 1), (1, 2)) + ) T(N, A, B) ORDER BY N; +> A = B NULLIF(A, B) CASE WHEN A = B THEN NULL ELSE A END +> ----- ------------- ------------------------------------ +> null ROW (1, null) ROW (1, null) +> FALSE ROW (1, null) ROW (1, null) +> FALSE ROW (2, null) ROW (2, null) +> FALSE ROW (1, 1) ROW (1, 1) +> rows (ordered): 4 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/nvl2.sql b/h2/src/test/org/h2/test/scripts/functions/system/nvl2.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/nvl2.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/readonly.sql b/h2/src/test/org/h2/test/scripts/functions/system/readonly.sql new file mode 100644 index 0000000000..14d9568289 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/readonly.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select readonly(); +>> FALSE diff --git a/h2/src/test/org/h2/test/scripts/functions/system/rownum.sql b/h2/src/test/org/h2/test/scripts/functions/system/rownum.sql new file mode 100644 index 0000000000..0893274095 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/rownum.sql @@ -0,0 +1,31 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +----- Issue#600 ----- +create table test as (select char(x) as str from system_range(48,90)); +> ok + +select rownum() as rnum, str from test where str = 'A'; +> RNUM STR +> ---- --- +> 1 A +> rows: 1 + +----- Issue#3353 ----- +SELECT str FROM FINAL TABLE (UPDATE test SET str = char(rownum + 48) WHERE str = '0'); +> STR +> --- +> 1 +> rows: 1 + +drop table test; +> ok + +SELECT * FROM (VALUES 1, 2) AS T1(X), (VALUES 1, 2) AS T2(X) WHERE ROWNUM = 1; +> X X +> - - +> 1 1 +> rows: 1 + diff --git a/h2/src/test/org/h2/test/scripts/functions/system/session-id.sql b/h2/src/test/org/h2/test/scripts/functions/system/session-id.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/session-id.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/table.sql b/h2/src/test/org/h2/test/scripts/functions/system/table.sql new file mode 100644 index 0000000000..4df052af6a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/table.sql @@ -0,0 +1,65 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select * from table(a int=(1)), table(b int=2), table(c int=row(3)); +> A B C +> - - - +> 1 2 3 +> rows: 1 + +create table test as select * from table(id int=(1, 2, 3)); +> ok + +SELECT * FROM (SELECT * FROM TEST) ORDER BY id; +> ID +> -- +> 1 +> 2 +> 3 +> rows (ordered): 3 + +SELECT * FROM (SELECT * FROM TEST) x ORDER BY id; +> ID +> -- +> 1 +> 2 +> 3 +> rows (ordered): 3 + +drop table test; +> ok + +select * from table(id int = (1)); +> ID +> -- +> 1 +> rows: 1 + +-- compatibility syntax +call table(id int = (1)); +> ID +> -- +> 1 +> rows: 1 + +explain select * from table(id int = (1, 2), name varchar=('Hello', 'World')); +>> SELECT "TABLE"."ID", "TABLE"."NAME" FROM TABLE("ID" INTEGER=ROW (1, 2), "NAME" CHARACTER VARYING=ROW ('Hello', 'World')) /* function */ + +explain select * from table(id int = ARRAY[1, 2], name varchar=ARRAY['Hello', 'World']); +>> SELECT "TABLE"."ID", "TABLE"."NAME" FROM TABLE("ID" INTEGER=ARRAY [1, 2], "NAME" CHARACTER VARYING=ARRAY ['Hello', 'World']) /* function */ + +select * from table(id int=(1, 2), name varchar=('Hello', 'World')) x order by id; +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows (ordered): 2 + +SELECT * FROM (TABLE(ID INT = (1, 2))); +> ID +> -- +> 1 +> 2 +> rows: 2 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/transaction-id.sql b/h2/src/test/org/h2/test/scripts/functions/system/transaction-id.sql new file mode 100644 index 0000000000..e04598e7c1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/transaction-id.sql @@ -0,0 +1,4 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- diff --git a/h2/src/test/org/h2/test/scripts/functions/system/trim_array.sql b/h2/src/test/org/h2/test/scripts/functions/system/trim_array.sql new file mode 100644 index 0000000000..ba5c743a21 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/trim_array.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT TRIM_ARRAY(ARRAY[1, 2], -1); +> exception ARRAY_ELEMENT_ERROR_2 + +SELECT TRIM_ARRAY(ARRAY[1, 2], 0); +>> [1, 2] + +SELECT TRIM_ARRAY(ARRAY[1, 2], 1); +>> [1] + +SELECT TRIM_ARRAY(ARRAY[1, 2], 2); +>> [] + +SELECT TRIM_ARRAY(ARRAY[1, 2], 3); +> exception ARRAY_ELEMENT_ERROR_2 + +SELECT TRIM_ARRAY(NULL, 1); +>> null + +SELECT TRIM_ARRAY(NULL, -1); +> exception ARRAY_ELEMENT_ERROR_2 + +SELECT TRIM_ARRAY(ARRAY[1], NULL); +>> null diff --git a/h2/src/test/org/h2/test/scripts/functions/system/truncate-value.sql b/h2/src/test/org/h2/test/scripts/functions/system/truncate-value.sql new file mode 100644 index 0000000000..5bca7ee491 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/truncate-value.sql @@ -0,0 +1,19 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL TRUNCATE_VALUE('Test 123', 4, FALSE); +>> Test + +CALL TRUNCATE_VALUE(1234567890.123456789, 4, FALSE); +>> 1235000000 + +CALL TRUNCATE_VALUE(1234567890.123456789, 4, TRUE); +>> 1235000000 + +CALL TRUNCATE_VALUE(CAST(1234567890.123456789 AS DOUBLE PRECISION), 4, FALSE); +>> 1.2345678901234567E9 + +CALL TRUNCATE_VALUE(CAST(1234567890.123456789 AS DOUBLE PRECISION), 4, TRUE); +>> 1.235E9 diff --git a/h2/src/test/org/h2/test/scripts/functions/system/unnest.sql b/h2/src/test/org/h2/test/scripts/functions/system/unnest.sql new file mode 100644 index 0000000000..a5a52b0197 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/system/unnest.sql @@ -0,0 +1,67 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT * FROM UNNEST(); +> exception INVALID_PARAMETER_COUNT_2 + +SELECT * FROM UNNEST(ARRAY[]); +> C1 +> -- +> rows: 0 + +SELECT * FROM UNNEST(ARRAY[1, 2, 3]); +> C1 +> -- +> 1 +> 2 +> 3 +> rows: 3 + +-- compatibility syntax +CALL UNNEST(ARRAY[1, 2, 3]); +> C1 +> -- +> 1 +> 2 +> 3 +> rows: 3 + +SELECT * FROM UNNEST(ARRAY[1], ARRAY[2, 3, 4], ARRAY[5, 6]); +> C1 C2 C3 +> ---- -- ---- +> 1 2 5 +> null 3 6 +> null 4 null +> rows: 3 + +SELECT * FROM UNNEST(ARRAY[1], ARRAY[2, 3, 4], ARRAY[5, 6]) WITH ORDINALITY; +> C1 C2 C3 NORD +> ---- -- ---- ---- +> 1 2 5 1 +> null 3 6 2 +> null 4 null 3 +> rows: 3 + +EXPLAIN SELECT * FROM UNNEST(ARRAY[1]); +>> SELECT "UNNEST"."C1" FROM UNNEST(ARRAY [1]) /* function */ + +EXPLAIN SELECT * FROM UNNEST(ARRAY[1]) WITH ORDINALITY; +>> SELECT "UNNEST"."C1", "UNNEST"."NORD" FROM UNNEST(ARRAY [1]) WITH ORDINALITY /* function */ + +SELECT 1 IN(SELECT * FROM UNNEST(ARRAY[1, 2, 3])); +>> TRUE + +SELECT 4 IN(SELECT * FROM UNNEST(ARRAY[1, 2, 3])); +>> FALSE + +SELECT X, X IN(SELECT * FROM UNNEST(ARRAY[2, 4])) FROM SYSTEM_RANGE(1, 5); +> X X IN( SELECT DISTINCT UNNEST.C1 FROM UNNEST(ARRAY [2, 4])) +> - ---------------------------------------------------------- +> 1 FALSE +> 2 TRUE +> 3 FALSE +> 4 TRUE +> 5 FALSE +> rows: 5 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/current-time.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current-time.sql new file mode 100644 index 0000000000..1d558baf58 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current-time.sql @@ -0,0 +1,43 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SET TIME ZONE '-8:00'; +> ok + +SELECT CAST(CURRENT_TIME AS TIME(9)) = LOCALTIME; +>> TRUE + +SELECT CAST(CURRENT_TIME(0) AS TIME(9)) = LOCALTIME(0); +>> TRUE + +SELECT CAST(CURRENT_TIME(9) AS TIME(9)) = LOCALTIME(9); +>> TRUE + +SET TIME ZONE LOCAL; +> ok + +select length(curtime())>=8 c1, length(current_time())>=8 c2, substring(curtime(), 3, 1) c3; +> C1 C2 C3 +> ---- ---- -- +> TRUE TRUE : +> rows: 1 + +select length(now())>18 c1, length(current_timestamp())>18 c2, length(now(0))>18 c3, length(now(2))>18 c4; +> C1 C2 C3 C4 +> ---- ---- ---- ---- +> TRUE TRUE TRUE TRUE +> rows: 1 + +SELECT CAST(CURRENT_TIME AS TIME(9)) = LOCALTIME; +>> TRUE + +SELECT CAST(CURRENT_TIME(0) AS TIME(9)) = LOCALTIME(0); +>> TRUE + +SELECT CAST(CURRENT_TIME(9) AS TIME(9)) = LOCALTIME(9); +>> TRUE + +EXPLAIN SELECT CURRENT_TIME, LOCALTIME, CURRENT_TIME(9), LOCALTIME(9); +>> SELECT CURRENT_TIME, LOCALTIME, CURRENT_TIME(9), LOCALTIME(9) diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_date.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_date.sql new file mode 100644 index 0000000000..c5fe931913 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_date.sql @@ -0,0 +1,13 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select length(curdate()) c1, length(current_date()) c2, substring(curdate(), 5, 1) c3; +> C1 C2 C3 +> -- -- -- +> 10 10 - +> rows: 1 + +SELECT CURRENT_DATE IS OF (DATE); +>> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_timestamp.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_timestamp.sql new file mode 100644 index 0000000000..38e6ef835b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/current_timestamp.sql @@ -0,0 +1,137 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SET TIME ZONE '-8:00'; +> ok + +SELECT CAST(CURRENT_TIMESTAMP AS TIMESTAMP(9)) = LOCALTIMESTAMP; +>> TRUE + +SELECT CAST(CURRENT_TIMESTAMP(0) AS TIMESTAMP(9)) = LOCALTIMESTAMP(0); +>> TRUE + +SELECT CAST(CURRENT_TIMESTAMP(9) AS TIMESTAMP(9)) = LOCALTIMESTAMP(9); +>> TRUE + +VALUES EXTRACT(TIMEZONE_HOUR FROM CURRENT_TIMESTAMP); +>> -8 + +SET TIME ZONE '5:00'; +> ok + +VALUES EXTRACT(TIMEZONE_HOUR FROM CURRENT_TIMESTAMP); +>> 5 + +SET TIME ZONE LOCAL; +> ok + +@reconnect off + +SET AUTOCOMMIT OFF; +> ok + +CREATE ALIAS SLEEP FOR "java.lang.Thread.sleep(long)"; +> ok + +CREATE TABLE TEST(I IDENTITY PRIMARY KEY, T TIMESTAMP(9) WITH TIME ZONE); +> ok + +INSERT INTO TEST(T) VALUES (CURRENT_TIMESTAMP(9)), (CURRENT_TIMESTAMP(9)); +> update count: 2 + +CALL SLEEP(10); +>> null + +INSERT INTO TEST(T) VALUES (CURRENT_TIMESTAMP(9)); +> update count: 1 + +CALL SLEEP(10); +>> null + +COMMIT; +> ok + +INSERT INTO TEST(T) VALUES (CURRENT_TIMESTAMP(9)); +> update count: 1 + +CALL SLEEP(10); +>> null + +COMMIT; +> ok + +-- same statement +SELECT (SELECT T FROM TEST WHERE I = 1) = (SELECT T FROM TEST WHERE I = 2); +>> TRUE + +-- same transaction +SELECT (SELECT T FROM TEST WHERE I = 2) = (SELECT T FROM TEST WHERE I = 3); +>> TRUE + +-- another transaction +SELECT (SELECT T FROM TEST WHERE I = 3) = (SELECT T FROM TEST WHERE I = 4); +>> FALSE + +SET MODE MySQL; +> ok + +INSERT INTO TEST(T) VALUES (CURRENT_TIMESTAMP(9)), (CURRENT_TIMESTAMP(9)); +> update count: 2 + +CALL SLEEP(10); +>> null + +INSERT INTO TEST(T) VALUES (CURRENT_TIMESTAMP(9)); +> update count: 1 + +CALL SLEEP(10); +>> null + +COMMIT; +> ok + +INSERT INTO TEST(T) VALUES (CURRENT_TIMESTAMP(9)); +> update count: 1 + +COMMIT; +> ok + +-- same statement +SELECT (SELECT T FROM TEST WHERE I = 5) = (SELECT T FROM TEST WHERE I = 6); +>> TRUE + +-- same transaction +SELECT (SELECT T FROM TEST WHERE I = 6) = (SELECT T FROM TEST WHERE I = 7); +>> FALSE + +-- another transaction +SELECT (SELECT T FROM TEST WHERE I = 7) = (SELECT T FROM TEST WHERE I = 8); +>> FALSE + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +DROP ALIAS SLEEP; +> ok + +SET AUTOCOMMIT ON; +> ok + +@reconnect on + +SELECT GETDATE(); +> exception FUNCTION_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +SELECT LOCALTIMESTAMP(3) = GETDATE(); +>> TRUE + +SET MODE Regular; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/date_trunc.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/date_trunc.sql new file mode 100644 index 0000000000..7d72d289d4 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/date_trunc.sql @@ -0,0 +1,925 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +@reconnect off + +SET TIME ZONE '01:00'; +> ok + +-- +-- Test time unit in 'MICROSECONDS' +-- +SELECT DATE_TRUNC('MICROSECONDS', time '00:00:00.000'); +>> 00:00:00 + +SELECT DATE_TRUNC('microseconds', time '00:00:00.000'); +>> 00:00:00 + +SELECT DATE_TRUNC(microseconds, time '00:00:00.000'); +>> 00:00:00 + +SELECT DATE_TRUNC('MICROSECONDS', time '15:00:00'); +>> 15:00:00 + +SELECT DATE_TRUNC('microseconds', time '15:00:00'); +>> 15:00:00 + +SELECT DATE_TRUNC('MICROSECONDS', time '15:14:13'); +>> 15:14:13 + +SELECT DATE_TRUNC('microseconds', time '15:14:13'); +>> 15:14:13 + +SELECT DATE_TRUNC('MICROSECONDS', time '15:14:13.123456789'); +>> 15:14:13.123456 + +SELECT DATE_TRUNC('microseconds', time '15:14:13.123456789'); +>> 15:14:13.123456 + +SELECT DATE_TRUNC('MICROSECONDS', date '2015-05-29'); +>> 2015-05-29 + +SELECT DATE_TRUNC('microseconds', date '2015-05-29'); +>> 2015-05-29 + +SELECT DATE_TRUNC('MICROSECONDS', date '1970-01-01'); +>> 1970-01-01 + +SELECT DATE_TRUNC('microseconds', date '1970-01-01'); +>> 1970-01-01 + +select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 15:14:13+00 + +select DATE_TRUNC('microseconds', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 15:14:13+00 + +select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13.123456789+00'); +>> 2015-05-29 15:14:13.123456+00 + +select DATE_TRUNC('microseconds', timestamp with time zone '2015-05-29 15:14:13.123456789+00'); +>> 2015-05-29 15:14:13.123456+00 + +select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13-06'); +>> 2015-05-29 15:14:13-06 + +select DATE_TRUNC('microseconds', timestamp with time zone '2015-05-29 15:14:13-06'); +>> 2015-05-29 15:14:13-06 + +select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13.123456789-06'); +>> 2015-05-29 15:14:13.123456-06 + +select DATE_TRUNC('microseconds', timestamp with time zone '2015-05-29 15:14:13.123456789-06'); +>> 2015-05-29 15:14:13.123456-06 + +select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 15:14:13+10 + +select DATE_TRUNC('microseconds', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 15:14:13+10 + +select DATE_TRUNC('MICROSECONDS', timestamp with time zone '2015-05-29 15:14:13.123456789+10'); +>> 2015-05-29 15:14:13.123456+10 + +select DATE_TRUNC('microseconds', timestamp with time zone '2015-05-29 15:14:13.123456789+10'); +>> 2015-05-29 15:14:13.123456+10 + +SELECT DATE_TRUNC('microseconds', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 15:14:13 + +SELECT DATE_TRUNC('MICROSECONDS', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 15:14:13 + +SELECT DATE_TRUNC('microseconds', timestamp '2015-05-29 15:14:13.123456789'); +>> 2015-05-29 15:14:13.123456 + +SELECT DATE_TRUNC('MICROSECONDS', timestamp '2015-05-29 15:14:13.123456789'); +>> 2015-05-29 15:14:13.123456 + +SELECT DATE_TRUNC('microseconds', timestamp '2015-05-29 15:00:00'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('MICROSECONDS', timestamp '2015-05-29 15:00:00'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('microseconds', timestamp '2015-05-29 00:00:00'); +>> 2015-05-29 00:00:00 + +SELECT DATE_TRUNC('MICROSECONDS', timestamp '2015-05-29 00:00:00'); +>> 2015-05-29 00:00:00 + +-- +-- Test time unit in 'MILLISECONDS' +-- +SELECT DATE_TRUNC('MILLISECONDS', time '00:00:00.000'); +>> 00:00:00 + +SELECT DATE_TRUNC('milliseconds', time '00:00:00.000'); +>> 00:00:00 + +SELECT DATE_TRUNC('MILLISECONDS', time '15:00:00'); +>> 15:00:00 + +SELECT DATE_TRUNC('milliseconds', time '15:00:00'); +>> 15:00:00 + +SELECT DATE_TRUNC('MILLISECONDS', time '15:14:13'); +>> 15:14:13 + +SELECT DATE_TRUNC('milliseconds', time '15:14:13'); +>> 15:14:13 + +SELECT DATE_TRUNC('MILLISECONDS', time '15:14:13.123456'); +>> 15:14:13.123 + +SELECT DATE_TRUNC('milliseconds', time '15:14:13.123456'); +>> 15:14:13.123 + +SELECT DATE_TRUNC('MILLISECONDS', date '2015-05-29'); +>> 2015-05-29 + +SELECT DATE_TRUNC('milliseconds', date '2015-05-29'); +>> 2015-05-29 + +SELECT DATE_TRUNC('MILLISECONDS', date '1970-01-01'); +>> 1970-01-01 + +SELECT DATE_TRUNC('milliseconds', date '1970-01-01'); +>> 1970-01-01 + +select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 15:14:13+00 + +select DATE_TRUNC('milliseconds', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 15:14:13+00 + +select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13.123456+00'); +>> 2015-05-29 15:14:13.123+00 + +select DATE_TRUNC('milliseconds', timestamp with time zone '2015-05-29 15:14:13.123456+00'); +>> 2015-05-29 15:14:13.123+00 + +select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13-06'); +>> 2015-05-29 15:14:13-06 + +select DATE_TRUNC('milliseconds', timestamp with time zone '2015-05-29 15:14:13-06'); +>> 2015-05-29 15:14:13-06 + +select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13.123456-06'); +>> 2015-05-29 15:14:13.123-06 + +select DATE_TRUNC('milliseconds', timestamp with time zone '2015-05-29 15:14:13.123456-06'); +>> 2015-05-29 15:14:13.123-06 + +select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 15:14:13+10 + +select DATE_TRUNC('milliseconds', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 15:14:13+10 + +select DATE_TRUNC('MILLISECONDS', timestamp with time zone '2015-05-29 15:14:13.123456+10'); +>> 2015-05-29 15:14:13.123+10 + +select DATE_TRUNC('milliseconds', timestamp with time zone '2015-05-29 15:14:13.123456+10'); +>> 2015-05-29 15:14:13.123+10 + +SELECT DATE_TRUNC('milliseconds', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 15:14:13 + +SELECT DATE_TRUNC('MILLISECONDS', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 15:14:13 + +SELECT DATE_TRUNC('milliseconds', timestamp '2015-05-29 15:14:13.123456'); +>> 2015-05-29 15:14:13.123 + +SELECT DATE_TRUNC('MILLISECONDS', timestamp '2015-05-29 15:14:13.123456'); +>> 2015-05-29 15:14:13.123 + +SELECT DATE_TRUNC('milliseconds', timestamp '2015-05-29 15:00:00'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('MILLISECONDS', timestamp '2015-05-29 15:00:00'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('milliseconds', timestamp '2015-05-29 00:00:00'); +>> 2015-05-29 00:00:00 + +SELECT DATE_TRUNC('MILLISECONDS', timestamp '2015-05-29 00:00:00'); +>> 2015-05-29 00:00:00 + +-- +-- Test time unit 'SECOND' +-- +SELECT DATE_TRUNC('SECOND', time '00:00:00.000'); +>> 00:00:00 + +SELECT DATE_TRUNC('second', time '00:00:00.000'); +>> 00:00:00 + +SELECT DATE_TRUNC('SECOND', time '15:00:00'); +>> 15:00:00 + +SELECT DATE_TRUNC('second', time '15:00:00'); +>> 15:00:00 + +SELECT DATE_TRUNC('SECOND', time '15:14:13'); +>> 15:14:13 + +SELECT DATE_TRUNC('second', time '15:14:13'); +>> 15:14:13 + +SELECT DATE_TRUNC('SECOND', time '15:14:13.123456'); +>> 15:14:13 + +SELECT DATE_TRUNC('second', time '15:14:13.123456'); +>> 15:14:13 + +SELECT DATE_TRUNC('SECOND', date '2015-05-29'); +>> 2015-05-29 + +SELECT DATE_TRUNC('second', date '2015-05-29'); +>> 2015-05-29 + +SELECT DATE_TRUNC('SECOND', date '1970-01-01'); +>> 1970-01-01 + +SELECT DATE_TRUNC('second', date '1970-01-01'); +>> 1970-01-01 + +select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 15:14:13+00 + +select DATE_TRUNC('second', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 15:14:13+00 + +select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13.123456+00'); +>> 2015-05-29 15:14:13+00 + +select DATE_TRUNC('second', timestamp with time zone '2015-05-29 15:14:13.123456+00'); +>> 2015-05-29 15:14:13+00 + +select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13-06'); +>> 2015-05-29 15:14:13-06 + +select DATE_TRUNC('second', timestamp with time zone '2015-05-29 15:14:13-06'); +>> 2015-05-29 15:14:13-06 + +select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13.123456-06'); +>> 2015-05-29 15:14:13-06 + +select DATE_TRUNC('second', timestamp with time zone '2015-05-29 15:14:13.123456-06'); +>> 2015-05-29 15:14:13-06 + +select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 15:14:13+10 + +select DATE_TRUNC('second', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 15:14:13+10 + +select DATE_TRUNC('SECOND', timestamp with time zone '2015-05-29 15:14:13.123456+10'); +>> 2015-05-29 15:14:13+10 + +select DATE_TRUNC('second', timestamp with time zone '2015-05-29 15:14:13.123456+10'); +>> 2015-05-29 15:14:13+10 + +SELECT DATE_TRUNC('second', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 15:14:13 + +SELECT DATE_TRUNC('SECOND', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 15:14:13 + +SELECT DATE_TRUNC('second', timestamp '2015-05-29 15:14:13.123456'); +>> 2015-05-29 15:14:13 + +SELECT DATE_TRUNC('SECOND', timestamp '2015-05-29 15:14:13.123456'); +>> 2015-05-29 15:14:13 + +SELECT DATE_TRUNC('second', timestamp '2015-05-29 15:00:00'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('SECOND', timestamp '2015-05-29 15:00:00'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('second', timestamp '2015-05-29 00:00:00'); +>> 2015-05-29 00:00:00 + +SELECT DATE_TRUNC('SECOND', timestamp '2015-05-29 00:00:00'); +>> 2015-05-29 00:00:00 + +-- +-- Test time unit 'MINUTE' +-- +SELECT DATE_TRUNC('MINUTE', time '00:00:00'); +>> 00:00:00 + +SELECT DATE_TRUNC('minute', time '00:00:00'); +>> 00:00:00 + +SELECT DATE_TRUNC('MINUTE', time '15:00:00'); +>> 15:00:00 + +SELECT DATE_TRUNC('minute', time '15:00:00'); +>> 15:00:00 + +SELECT DATE_TRUNC('MINUTE', time '15:14:13'); +>> 15:14:00 + +SELECT DATE_TRUNC('minute', time '15:14:13'); +>> 15:14:00 + +SELECT DATE_TRUNC('MINUTE', date '2015-05-29'); +>> 2015-05-29 + +SELECT DATE_TRUNC('minute', date '2015-05-29'); +>> 2015-05-29 + +SELECT DATE_TRUNC('MINUTE', date '1970-01-01'); +>> 1970-01-01 + +SELECT DATE_TRUNC('minute', date '1970-01-01'); +>> 1970-01-01 + +select DATE_TRUNC('MINUTE', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 15:14:00+00 + +select DATE_TRUNC('minute', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 15:14:00+00 + +select DATE_TRUNC('MINUTE', timestamp with time zone '2015-05-29 15:14:13-06'); +>> 2015-05-29 15:14:00-06 + +select DATE_TRUNC('minute', timestamp with time zone '2015-05-29 15:14:13-06'); +>> 2015-05-29 15:14:00-06 + +select DATE_TRUNC('MINUTE', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 15:14:00+10 + +select DATE_TRUNC('minute', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 15:14:00+10 + +SELECT DATE_TRUNC('MINUTE', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 15:14:00 + +SELECT DATE_TRUNC('MINUTE', timestamp '2015-05-29 15:00:00'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('MINUTE', timestamp '2015-05-29 00:00:00'); +>> 2015-05-29 00:00:00 + +-- +-- Test time unit 'HOUR' +-- +SELECT DATE_TRUNC('HOUR', time '00:00:00'); +>> 00:00:00 + +SELECT DATE_TRUNC('hour', time '00:00:00'); +>> 00:00:00 + +SELECT DATE_TRUNC('HOUR', time '15:00:00'); +>> 15:00:00 + +SELECT DATE_TRUNC('hour', time '15:00:00'); +>> 15:00:00 + +SELECT DATE_TRUNC('HOUR', time '15:14:13'); +>> 15:00:00 + +SELECT DATE_TRUNC('hour', time '15:14:13'); +>> 15:00:00 + +SELECT DATE_TRUNC('HOUR', date '2015-05-29'); +>> 2015-05-29 + +SELECT DATE_TRUNC('hour', date '2015-05-29'); +>> 2015-05-29 + +SELECT DATE_TRUNC('HOUR', date '1970-01-01'); +>> 1970-01-01 + +SELECT DATE_TRUNC('hour', date '1970-01-01'); +>> 1970-01-01 + +select DATE_TRUNC('HOUR', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 15:00:00+00 + +select DATE_TRUNC('hour', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 15:00:00+00 + +select DATE_TRUNC('HOUR', timestamp with time zone '2015-05-29 15:14:13-06'); +>> 2015-05-29 15:00:00-06 + +select DATE_TRUNC('hour', timestamp with time zone '2015-05-29 15:14:13-06'); +>> 2015-05-29 15:00:00-06 + +select DATE_TRUNC('HOUR', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 15:00:00+10 + +select DATE_TRUNC('hour', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 15:00:00+10 + +SELECT DATE_TRUNC('hour', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('HOUR', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('hour', timestamp '2015-05-29 15:00:00'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('HOUR', timestamp '2015-05-29 15:00:00'); +>> 2015-05-29 15:00:00 + +SELECT DATE_TRUNC('hour', timestamp '2015-05-29 00:00:00'); +>> 2015-05-29 00:00:00 + +SELECT DATE_TRUNC('HOUR', timestamp '2015-05-29 00:00:00'); +>> 2015-05-29 00:00:00 + +-- +-- Test time unit 'DAY' +-- +select DATE_TRUNC('day', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('DAY', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('day', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('DAY', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('day', date '2015-05-29'); +>> 2015-05-29 + +select DATE_TRUNC('DAY', date '2015-05-29'); +>> 2015-05-29 + +select DATE_TRUNC('day', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 00:00:00 + +select DATE_TRUNC('DAY', timestamp '2015-05-29 15:14:13'); +>> 2015-05-29 00:00:00 + +select DATE_TRUNC('day', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 00:00:00+00 + +select DATE_TRUNC('DAY', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-29 00:00:00+00 + +select DATE_TRUNC('day', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2015-05-29 00:00:00-06 + +select DATE_TRUNC('DAY', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2015-05-29 00:00:00-06 + +select DATE_TRUNC('day', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 00:00:00+10 + +select DATE_TRUNC('DAY', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-29 00:00:00+10 + +-- +-- Test time unit 'WEEK' +-- +select DATE_TRUNC('week', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('WEEK', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('week', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('WEEK', time '15:14:13'); +>> 00:00:00 + +-- ISO_WEEK + +SELECT DATE_TRUNC(ISO_WEEK, TIME '00:00:00'); +>> 00:00:00 + +SELECT DATE_TRUNC(ISO_WEEK, TIME '15:14:13'); +>> 00:00:00 + +SELECT DATE_TRUNC(ISO_WEEK, DATE '2015-05-28'); +>> 2015-05-25 + +SELECT DATE_TRUNC(ISO_WEEK, TIMESTAMP '2015-05-29 15:14:13'); +>> 2015-05-25 00:00:00 + +SELECT DATE_TRUNC(ISO_WEEK, TIMESTAMP '2018-03-14 00:00:00.000'); +>> 2018-03-12 00:00:00 + +SELECT DATE_TRUNC(ISO_WEEK, TIMESTAMP WITH TIME ZONE '2015-05-29 15:14:13+00'); +>> 2015-05-25 00:00:00+00 + +SELECT DATE_TRUNC(ISO_WEEK, TIMESTAMP WITH TIME ZONE '2015-05-29 05:14:13-06'); +>> 2015-05-25 00:00:00-06 + +SELECT DATE_TRUNC(ISO_WEEK, TIMESTAMP WITH TIME ZONE '2015-05-29 15:14:13+10'); +>> 2015-05-25 00:00:00+10 + +-- +-- Test time unit 'MONTH' +-- +select DATE_TRUNC('month', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('MONTH', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC(MONTH, time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('month', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('MONTH', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('month', date '2015-05-28'); +>> 2015-05-01 + +select DATE_TRUNC('MONTH', date '2015-05-28'); +>> 2015-05-01 + +select DATE_TRUNC('month', timestamp '2015-05-29 15:14:13'); +>> 2015-05-01 00:00:00 + +select DATE_TRUNC('MONTH', timestamp '2015-05-29 15:14:13'); +>> 2015-05-01 00:00:00 + +SELECT DATE_TRUNC('MONTH', timestamp '2018-03-14 00:00:00.000'); +>> 2018-03-01 00:00:00 + +select DATE_TRUNC('month', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-01 00:00:00+00 + +select DATE_TRUNC('MONTH', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-05-01 00:00:00+00 + +select DATE_TRUNC('month', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2015-05-01 00:00:00-06 + +select DATE_TRUNC('MONTH', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2015-05-01 00:00:00-06 + +select DATE_TRUNC('month', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-01 00:00:00+10 + +select DATE_TRUNC('MONTH', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-05-01 00:00:00+10 + +-- +-- Test time unit 'QUARTER' +-- +select DATE_TRUNC('quarter', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('QUARTER', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('quarter', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('QUARTER', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('quarter', date '2015-05-28'); +>> 2015-04-01 + +select DATE_TRUNC('QUARTER', date '2015-05-28'); +>> 2015-04-01 + +select DATE_TRUNC('quarter', timestamp '2015-05-29 15:14:13'); +>> 2015-04-01 00:00:00 + +select DATE_TRUNC('QUARTER', timestamp '2015-05-29 15:14:13'); +>> 2015-04-01 00:00:00 + +SELECT DATE_TRUNC('QUARTER', timestamp '2018-03-14 00:00:00.000'); +>> 2018-01-01 00:00:00 + +SELECT DATE_TRUNC('QUARTER', timestamp '2015-05-29 15:14:13'); +>> 2015-04-01 00:00:00 + +SELECT DATE_TRUNC('QUARTER', timestamp '2015-05-01 15:14:13'); +>> 2015-04-01 00:00:00 + +SELECT DATE_TRUNC('QUARTER', timestamp '2015-07-29 15:14:13'); +>> 2015-07-01 00:00:00 + +SELECT DATE_TRUNC('QUARTER', timestamp '2015-09-29 15:14:13'); +>> 2015-07-01 00:00:00 + +SELECT DATE_TRUNC('QUARTER', timestamp '2015-10-29 15:14:13'); +>> 2015-10-01 00:00:00 + +SELECT DATE_TRUNC('QUARTER', timestamp '2015-12-29 15:14:13'); +>> 2015-10-01 00:00:00 + +select DATE_TRUNC('quarter', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-04-01 00:00:00+00 + +select DATE_TRUNC('QUARTER', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-04-01 00:00:00+00 + +select DATE_TRUNC('quarter', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2015-04-01 00:00:00-06 + +select DATE_TRUNC('QUARTER', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2015-04-01 00:00:00-06 + +select DATE_TRUNC('quarter', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-04-01 00:00:00+10 + +select DATE_TRUNC('QUARTER', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-04-01 00:00:00+10 + +-- +-- Test time unit 'YEAR' +-- +select DATE_TRUNC('year', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('YEAR', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('year', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('YEAR', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('year', date '2015-05-28'); +>> 2015-01-01 + +select DATE_TRUNC('YEAR', date '2015-05-28'); +>> 2015-01-01 + +select DATE_TRUNC('year', timestamp '2015-05-29 15:14:13'); +>> 2015-01-01 00:00:00 + +select DATE_TRUNC('YEAR', timestamp '2015-05-29 15:14:13'); +>> 2015-01-01 00:00:00 + +select DATE_TRUNC('year', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-01-01 00:00:00+00 + +select DATE_TRUNC('YEAR', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2015-01-01 00:00:00+00 + +select DATE_TRUNC('year', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2015-01-01 00:00:00-06 + +select DATE_TRUNC('YEAR', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2015-01-01 00:00:00-06 + +select DATE_TRUNC('year', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-01-01 00:00:00+10 + +select DATE_TRUNC('YEAR', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2015-01-01 00:00:00+10 + +-- +-- Test time unit 'DECADE' +-- +select DATE_TRUNC('decade', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('DECADE', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('decade', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('DECADE', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('decade', date '2015-05-28'); +>> 2010-01-01 + +select DATE_TRUNC('DECADE', date '2015-05-28'); +>> 2010-01-01 + +select DATE_TRUNC('decade', timestamp '2015-05-29 15:14:13'); +>> 2010-01-01 00:00:00 + +select DATE_TRUNC('DECADE', timestamp '2015-05-29 15:14:13'); +>> 2010-01-01 00:00:00 + +SELECT DATE_TRUNC('decade', timestamp '2010-05-29 15:14:13'); +>> 2010-01-01 00:00:00 + +select DATE_TRUNC('decade', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2010-01-01 00:00:00+00 + +select DATE_TRUNC('DECADE', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2010-01-01 00:00:00+00 + +select DATE_TRUNC('decade', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2010-01-01 00:00:00-06 + +select DATE_TRUNC('DECADE', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2010-01-01 00:00:00-06 + +select DATE_TRUNC('decade', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2010-01-01 00:00:00+10 + +select DATE_TRUNC('DECADE', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2010-01-01 00:00:00+10 + +-- +-- Test time unit 'CENTURY' +-- +select DATE_TRUNC('century', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('CENTURY', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('century', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('CENTURY', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('century', date '2015-05-28'); +>> 2001-01-01 + +select DATE_TRUNC('CENTURY', date '2015-05-28'); +>> 2001-01-01 + +select DATE_TRUNC('century', timestamp '2015-05-29 15:14:13'); +>> 2001-01-01 00:00:00 + +select DATE_TRUNC('CENTURY', timestamp '2015-05-29 15:14:13'); +>> 2001-01-01 00:00:00 + +SELECT DATE_TRUNC('century', timestamp '2199-05-29 15:14:13'); +>> 2101-01-01 00:00:00 + +SELECT DATE_TRUNC('CENTURY', timestamp '2000-05-29 15:14:13'); +>> 1901-01-01 00:00:00 + +SELECT DATE_TRUNC('century', timestamp '2001-05-29 15:14:13'); +>> 2001-01-01 00:00:00 + +select DATE_TRUNC('century', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2001-01-01 00:00:00+00 + +select DATE_TRUNC('CENTURY', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2001-01-01 00:00:00+00 + +select DATE_TRUNC('century', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2001-01-01 00:00:00-06 + +select DATE_TRUNC('CENTURY', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2001-01-01 00:00:00-06 + +select DATE_TRUNC('century', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2001-01-01 00:00:00+10 + +select DATE_TRUNC('CENTURY', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2001-01-01 00:00:00+10 + +-- +-- Test time unit 'MILLENNIUM' +-- +select DATE_TRUNC('millennium', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('MILLENNIUM', time '00:00:00'); +>> 00:00:00 + +select DATE_TRUNC('millennium', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('MILLENNIUM', time '15:14:13'); +>> 00:00:00 + +select DATE_TRUNC('millennium', date '2015-05-28'); +>> 2001-01-01 + +select DATE_TRUNC('MILLENNIUM', date '2015-05-28'); +>> 2001-01-01 + +select DATE_TRUNC('millennium', timestamp '2015-05-29 15:14:13'); +>> 2001-01-01 00:00:00 + +select DATE_TRUNC('MILLENNIUM', timestamp '2015-05-29 15:14:13'); +>> 2001-01-01 00:00:00 + +SELECT DATE_TRUNC('millennium', timestamp '2000-05-29 15:14:13'); +>> 1001-01-01 00:00:00 + +select DATE_TRUNC('millennium', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2001-01-01 00:00:00+00 + +select DATE_TRUNC('MILLENNIUM', timestamp with time zone '2015-05-29 15:14:13+00'); +>> 2001-01-01 00:00:00+00 + +select DATE_TRUNC('millennium', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2001-01-01 00:00:00-06 + +select DATE_TRUNC('MILLENNIUM', timestamp with time zone '2015-05-29 05:14:13-06'); +>> 2001-01-01 00:00:00-06 + +select DATE_TRUNC('millennium', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2001-01-01 00:00:00+10 + +select DATE_TRUNC('MILLENNIUM', timestamp with time zone '2015-05-29 15:14:13+10'); +>> 2001-01-01 00:00:00+10 + +-- +-- Test unhandled time unit and bad date +-- +SELECT DATE_TRUNC('---', '2015-05-29 15:14:13'); +> exception INVALID_VALUE_2 + +SELECT DATE_TRUNC('', '2015-05-29 15:14:13'); +> exception INVALID_VALUE_2 + +SELECT DATE_TRUNC('', ''); +> exception INVALID_VALUE_2 + +SELECT DATE_TRUNC('YEAR', ''); +> exception INVALID_VALUE_2 + +SELECT DATE_TRUNC('microseconds', '2015-05-29 15:14:13'); +> exception INVALID_VALUE_2 + +SET MODE PostgreSQL; +> ok + +select DATE_TRUNC('YEAR', DATE '2015-05-28'); +>> 2015-01-01 00:00:00+01 + +SET MODE Regular; +> ok + +SELECT DATE_TRUNC(DECADE, DATE '0000-01-20'); +>> 0000-01-01 + +SELECT DATE_TRUNC(DECADE, DATE '-1-12-31'); +>> -0010-01-01 + +SELECT DATE_TRUNC(DECADE, DATE '-10-01-01'); +>> -0010-01-01 + +SELECT DATE_TRUNC(DECADE, DATE '-11-12-31'); +>> -0020-01-01 + +SELECT DATE_TRUNC(CENTURY, DATE '0001-01-20'); +>> 0001-01-01 + +SELECT DATE_TRUNC(CENTURY, DATE '0000-12-31'); +>> -0099-01-01 + +SELECT DATE_TRUNC(CENTURY, DATE '-1-12-31'); +>> -0099-01-01 + +SELECT DATE_TRUNC(CENTURY, DATE '-99-01-01'); +>> -0099-01-01 + +SELECT DATE_TRUNC(CENTURY, DATE '-100-12-31'); +>> -0199-01-01 + +SELECT DATE_TRUNC(MILLENNIUM, DATE '0001-01-20'); +>> 0001-01-01 + +SELECT DATE_TRUNC(MILLENNIUM, DATE '0000-12-31'); +>> -0999-01-01 + +SELECT DATE_TRUNC(MILLENNIUM, DATE '-1-12-31'); +>> -0999-01-01 + +SELECT DATE_TRUNC(MILLENNIUM, DATE '-999-01-01'); +>> -0999-01-01 + +SELECT DATE_TRUNC(MILLENNIUM, DATE '-1000-12-31'); +>> -1999-01-01 + +-- ISO_WEEK_YEAR + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2019-12-30'); +>> 2019-12-30 + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2020-01-01'); +>> 2019-12-30 + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2020-12-01'); +>> 2019-12-30 + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2020-12-31'); +>> 2019-12-30 + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2017-01-01'); +>> 2016-01-04 + +SELECT DATE_TRUNC(ISO_WEEK_YEAR, DATE '2017-01-02'); +>> 2017-01-02 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/dateadd.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/dateadd.sql new file mode 100644 index 0000000000..6ce6d4d43e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/dateadd.sql @@ -0,0 +1,142 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select dateadd('month', 1, timestamp '2003-01-31 10:20:30.012345678'); +>> 2003-02-28 10:20:30.012345678 + +select dateadd('year', -1, timestamp '2000-02-29 10:20:30.012345678'); +>> 1999-02-28 10:20:30.012345678 + +create table test(d date, t time, ts timestamp); +> ok + +insert into test values(date '2001-01-01', time '01:00:00', timestamp '2010-01-01 00:00:00'); +> update count: 1 + +select ts + t from test; +>> 2010-01-01 01:00:00 + +select ts + t * 0.5 x from test; +>> 2010-01-01 00:30:00 + +select ts + 0.5 x from test; +>> 2010-01-01 12:00:00 + +select ts - 1.5 x from test; +>> 2009-12-30 12:00:00 + +select ts + t / 0.5 x from test; +>> 2010-01-01 02:00:00 + +VALUES TIME '04:00:00' + TIME '20:03:30.123'; +>> 00:03:30.123 + +VALUES TIME '04:00:00' + TIME WITH TIME ZONE '20:03:30.123+05'; +>> 00:03:30.123+05 + +VALUES TIME WITH TIME ZONE '04:00:00+08' + TIME '20:03:30.123'; +>> 00:03:30.123+08 + +VALUES TIME WITH TIME ZONE '04:00:00+08' + TIME WITH TIME ZONE '20:03:30.123+05'; +> exception FEATURE_NOT_SUPPORTED_1 + +VALUES DATE '2005-03-04' + TIME '20:03:30.123'; +>> 2005-03-04 20:03:30.123 + +VALUES DATE '2005-03-04' + TIME WITH TIME ZONE '20:03:30.123+05'; +>> 2005-03-04 20:03:30.123+05 + +VALUES TIMESTAMP '2005-03-04 04:00:00' + TIME '20:03:30.123'; +>> 2005-03-05 00:03:30.123 + +VALUES TIMESTAMP '2005-03-04 04:00:00' + TIME WITH TIME ZONE '20:03:30.123+05'; +>> 2005-03-05 00:03:30.123+05 + +VALUES TIMESTAMP WITH TIME ZONE '2005-03-04 04:00:00+08' + TIME '20:03:30.123'; +>> 2005-03-05 00:03:30.123+08 + +VALUES TIMESTAMP WITH TIME ZONE '2005-03-04 04:00:00+08' + TIME WITH TIME ZONE '20:03:30.123+05'; +> exception FEATURE_NOT_SUPPORTED_1 + +select 1 + d + 1, d - 1, 2 + ts + 2, ts - 2 from test; +> DATEADD(DAY, 1, DATEADD(DAY, 1, D)) DATEADD(DAY, -1, D) DATEADD(DAY, 2, DATEADD(DAY, 2, TS)) DATEADD(DAY, -2, TS) +> ----------------------------------- ------------------- ------------------------------------ -------------------- +> 2001-01-03 2000-12-31 2010-01-05 00:00:00 2009-12-30 00:00:00 +> rows: 1 + +select 1 + d + t + 1 from test; +>> 2001-01-03 01:00:00 + +drop table test; +> ok + +call dateadd('MS', 1, TIMESTAMP '2001-02-03 04:05:06.789001'); +>> 2001-02-03 04:05:06.790001 + +SELECT DATEADD('MICROSECOND', 1, TIME '10:00:01'), DATEADD('MCS', 1, TIMESTAMP '2010-10-20 10:00:01.1'); +> TIME '10:00:01.000001' TIMESTAMP '2010-10-20 10:00:01.100001' +> ---------------------- -------------------------------------- +> 10:00:01.000001 2010-10-20 10:00:01.100001 +> rows: 1 + +SELECT DATEADD('NANOSECOND', 1, TIME '10:00:01'), DATEADD('NS', 1, TIMESTAMP '2010-10-20 10:00:01.1'); +> TIME '10:00:01.000000001' TIMESTAMP '2010-10-20 10:00:01.100000001' +> ------------------------- ----------------------------------------- +> 10:00:01.000000001 2010-10-20 10:00:01.100000001 +> rows: 1 + +SELECT DATEADD('HOUR', 1, DATE '2010-01-20'); +>> 2010-01-20 01:00:00 + +SELECT DATEADD('MINUTE', 30, TIME '12:30:55'); +>> 13:00:55 + +SELECT DATEADD('DAY', 1, TIME '12:30:55'); +> exception INVALID_VALUE_2 + +SELECT DATEADD('QUARTER', 1, DATE '2010-11-16'); +>> 2011-02-16 + +SELECT DATEADD('DAY', 10, TIMESTAMP WITH TIME ZONE '2000-01-05 15:00:30.123456789-10'); +>> 2000-01-15 15:00:30.123456789-10 + +SELECT TIMESTAMPADD('DAY', 10, TIMESTAMP '2000-01-05 15:00:30.123456789'); +>> 2000-01-15 15:00:30.123456789 + +SELECT TIMESTAMPADD('TIMEZONE_HOUR', 1, TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+07:30'); +>> 2010-01-01 10:00:00+08:30 + +SELECT TIMESTAMPADD('TIMEZONE_MINUTE', -45, TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+07:30'); +>> 2010-01-01 10:00:00+06:45 + +SELECT TIMESTAMPADD('TIMEZONE_SECOND', -45, TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+07:30'); +>> 2010-01-01 10:00:00+07:29:15 + +SELECT TIMESTAMPADD('TIMEZONE_HOUR', 1, TIME WITH TIME ZONE '10:00:00+07:30'); +>> 10:00:00+08:30 + +SELECT TIMESTAMPADD('TIMEZONE_MINUTE', -45, TIME WITH TIME ZONE '10:00:00+07:30'); +>> 10:00:00+06:45 + +SELECT DATEADD(HOUR, 1, TIME '23:00:00'); +>> 00:00:00 + +SELECT DATEADD(HOUR, 1, TIME WITH TIME ZONE '21:00:00+01'); +>> 22:00:00+01 + +SELECT DATEADD(HOUR, 1, TIME WITH TIME ZONE '23:00:00+01'); +>> 00:00:00+01 + +SELECT D FROM (SELECT '2010-01-01' D) WHERE D IN (SELECT D1 - 1 FROM (SELECT DATE '2010-01-02' D1)); +>> 2010-01-01 + +SELECT DATEADD(MILLENNIUM, 1, DATE '2000-02-29'); +>> 3000-02-28 + +SELECT DATEADD(CENTURY, 1, DATE '2000-02-29'); +>> 2100-02-28 + +SELECT DATEADD(DECADE, 1, DATE '2000-02-29'); +>> 2010-02-28 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/datediff.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/datediff.sql new file mode 100644 index 0000000000..15b60523ba --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/datediff.sql @@ -0,0 +1,229 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select datediff('yy', timestamp '2003-12-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); +>> 1 + +select datediff('year', timestamp '2003-12-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); +>> 1 + +select datediff('mm', timestamp '2003-11-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); +>> 2 + +select datediff('month', timestamp '2003-11-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); +>> 2 + +select datediff('dd', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-05 10:00:00.0'); +>> 4 + +select datediff('day', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-05 10:00:00.0'); +>> 4 + +select datediff('hh', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-02 10:00:00.0'); +>> 24 + +select datediff('hour', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-02 10:00:00.0'); +>> 24 + +select datediff('mi', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); +>> -20 + +select datediff('minute', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0'); +>> -20 + +select datediff('ss', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0'); +>> 1 + +select datediff('second', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0'); +>> 1 + +select datediff('ms', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0'); +>> 500 + +select datediff('millisecond', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0'); +>> 500 + +SELECT DATEDIFF('SECOND', '1900-01-01 00:00:00.001', '1900-01-01 00:00:00.002'), DATEDIFF('SECOND', '2000-01-01 00:00:00.001', '2000-01-01 00:00:00.002'); +> 0 0 +> - - +> 0 0 +> rows: 1 + +SELECT DATEDIFF('SECOND', '1900-01-01 00:00:00.000', '1900-01-01 00:00:00.001'), DATEDIFF('SECOND', '2000-01-01 00:00:00.000', '2000-01-01 00:00:00.001'); +> 0 0 +> - - +> 0 0 +> rows: 1 + +SELECT DATEDIFF('MINUTE', '1900-01-01 00:00:00.000', '1900-01-01 00:00:01.000'), DATEDIFF('MINUTE', '2000-01-01 00:00:00.000', '2000-01-01 00:00:01.000'); +> 0 0 +> - - +> 0 0 +> rows: 1 + +SELECT DATEDIFF('MINUTE', '1900-01-01 00:00:01.000', '1900-01-01 00:00:02.000'), DATEDIFF('MINUTE', '2000-01-01 00:00:01.000', '2000-01-01 00:00:02.000'); +> 0 0 +> - - +> 0 0 +> rows: 1 + +SELECT DATEDIFF('HOUR', '1900-01-01 00:00:00.000', '1900-01-01 00:00:01.000'), DATEDIFF('HOUR', '2000-01-01 00:00:00.000', '2000-01-01 00:00:01.000'); +> 0 0 +> - - +> 0 0 +> rows: 1 + +SELECT DATEDIFF('HOUR', '1900-01-01 00:00:00.001', '1900-01-01 00:00:01.000'), DATEDIFF('HOUR', '2000-01-01 00:00:00.001', '2000-01-01 00:00:01.000'); +> 0 0 +> - - +> 0 0 +> rows: 1 + +SELECT DATEDIFF('HOUR', '1900-01-01 01:00:00.000', '1900-01-01 01:00:01.000'), DATEDIFF('HOUR', '2000-01-01 01:00:00.000', '2000-01-01 01:00:01.000'); +> 0 0 +> - - +> 0 0 +> rows: 1 + +SELECT DATEDIFF('HOUR', '1900-01-01 01:00:00.001', '1900-01-01 01:00:01.000'), DATEDIFF('HOUR', '2000-01-01 01:00:00.001', '2000-01-01 01:00:01.000'); +> 0 0 +> - - +> 0 0 +> rows: 1 + +select datediff(day, '2015-12-09 23:59:00.0', '2016-01-16 23:59:00.0'), datediff(wk, '2015-12-09 23:59:00.0', '2016-01-16 23:59:00.0'); +> 38 5 +> -- - +> 38 5 +> rows: 1 + +call datediff('MS', TIMESTAMP '2001-02-03 04:05:06.789001', TIMESTAMP '2001-02-03 04:05:06.789002'); +> 0 +> - +> 0 +> rows: 1 + +call datediff('MS', TIMESTAMP '1900-01-01 00:00:01.000', TIMESTAMP '2008-01-01 00:00:00.000'); +>> 3408134399000 + +SELECT DATEDIFF('MICROSECOND', '2006-01-01 00:00:00.0000000', '2006-01-01 00:00:00.123456789'), + DATEDIFF('MCS', '2006-01-01 00:00:00.0000000', '2006-01-01 00:00:00.123456789'), + DATEDIFF('MCS', '2006-01-01 00:00:00.0000000', '2006-01-02 00:00:00.123456789'); +> 123456 123456 86400123456 +> ------ ------ ----------- +> 123456 123456 86400123456 +> rows: 1 + +SELECT DATEDIFF('NANOSECOND', '2006-01-01 00:00:00.0000000', '2006-01-01 00:00:00.123456789'), + DATEDIFF('NS', '2006-01-01 00:00:00.0000000', '2006-01-01 00:00:00.123456789'), + DATEDIFF('NS', '2006-01-01 00:00:00.0000000', '2006-01-02 00:00:00.123456789'); +> 123456789 123456789 86400123456789 +> --------- --------- -------------- +> 123456789 123456789 86400123456789 +> rows: 1 + +SELECT DATEDIFF('ISO_WEEK', DATE '2018-02-02', DATE '2018-02-03'); +>> 0 + +SELECT DATEDIFF('ISO_WEEK', DATE '2018-02-03', DATE '2018-02-04'); +>> 0 + +SELECT DATEDIFF('ISO_WEEK', DATE '2018-02-04', DATE '2018-02-05'); +>> 1 + +SELECT DATEDIFF('ISO_WEEK', DATE '2018-02-05', DATE '2018-02-06'); +>> 0 + +SELECT DATEDIFF('ISO_WEEK', DATE '1969-12-27', DATE '1969-12-28'); +>> 0 + +SELECT DATEDIFF('ISO_WEEK', DATE '1969-12-28', DATE '1969-12-29'); +>> 1 + +SELECT DATEDIFF('QUARTER', DATE '2009-12-30', DATE '2009-12-31'); +>> 0 + +SELECT DATEDIFF('QUARTER', DATE '2010-01-01', DATE '2009-12-31'); +>> -1 + +SELECT DATEDIFF('QUARTER', DATE '2010-01-01', DATE '2010-01-02'); +>> 0 + +SELECT DATEDIFF('QUARTER', DATE '2010-01-01', DATE '2010-03-31'); +>> 0 + +SELECT DATEDIFF('QUARTER', DATE '-1000-01-01', DATE '2000-01-01'); +>> 12000 + +SELECT DATEDIFF('TIMEZONE_HOUR', TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+01', + TIMESTAMP WITH TIME ZONE '2012-02-02 12:00:00+02'); +>> 1 + +SELECT DATEDIFF('TIMEZONE_MINUTE', TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00+01:15', + TIMESTAMP WITH TIME ZONE '2012-02-02 12:00:00+02'); +>> 45 + +SELECT DATEDIFF('TIMEZONE_SECOND', TIMESTAMP WITH TIME ZONE '1880-01-01 10:00:00-07:52:58', + TIMESTAMP WITH TIME ZONE '1890-02-02 12:00:00-08'); +>> -422 + +SELECT DATEDIFF('TIMEZONE_HOUR', TIME WITH TIME ZONE '10:00:00+01', + TIME WITH TIME ZONE '12:00:00+02'); +>> 1 + +SELECT DATEDIFF('TIMEZONE_MINUTE', TIME WITH TIME ZONE '10:00:00+01:15', + TIME WITH TIME ZONE '12:00:00+02'); +>> 45 + +select datediff('HOUR', timestamp '2007-01-06 10:00:00Z', '2007-01-06 10:00:00Z'); +>> 0 + +select datediff('HOUR', timestamp '1234-05-06 10:00:00+01:00', '1234-05-06 10:00:00+02:00'); +>> -1 + +select datediff('HOUR', timestamp '1234-05-06 10:00:00+01:00', '1234-05-06 10:00:00-02:00'); +>> 3 + +select timestampdiff(month, '2003-02-01','2003-05-01'); +>> 3 + +select timestampdiff(YEAR,'2002-05-01','2001-01-01'); +>> -1 + +select timestampdiff(YEAR,'2017-01-01','2016-12-31 23:59:59'); +>> -1 + +select timestampdiff(YEAR,'2017-01-01','2017-12-31 23:59:59'); +>> 0 + +select timestampdiff(MINUTE,'2003-02-01','2003-05-01 12:05:55'); +>> 128885 + +SELECT DATEDIFF(MILLENNIUM, DATE '2000-12-31', DATE '2001-01-01'); +>> 1 + +SELECT DATEDIFF(MILLENNIUM, DATE '2001-01-01', DATE '3000-12-31'); +>> 0 + +SELECT DATEDIFF(MILLENNIUM, DATE '2001-01-01', DATE '3001-01-01'); +>> 1 + +SELECT DATEDIFF(CENTURY, DATE '2000-12-31', DATE '2001-01-01'); +>> 1 + +SELECT DATEDIFF(CENTURY, DATE '2001-01-01', DATE '2100-12-31'); +>> 0 + +SELECT DATEDIFF(CENTURY, DATE '2001-01-01', DATE '2101-01-01'); +>> 1 + +SELECT DATEDIFF(DECADE, DATE '2009-12-31', DATE '2010-01-01'); +>> 1 + +SELECT DATEDIFF(DECADE, DATE '2010-01-01', DATE '2019-12-31'); +>> 0 + +SELECT DATEDIFF(DECADE, DATE '2010-01-01', DATE '2020-01-01'); +>> 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-month.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-month.sql new file mode 100644 index 0000000000..609770c248 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-month.sql @@ -0,0 +1,23 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select dayofmonth(date '2005-09-12'); +>> 12 + +create table test(ts timestamp with time zone); +> ok + +insert into test(ts) values ('2010-05-11 00:00:00+10:00'), ('2010-05-11 00:00:00-10:00'); +> update count: 2 + +select dayofmonth(ts) d from test; +> D +> -- +> 11 +> 11 +> rows: 2 + +drop table test; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-week.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-week.sql new file mode 100644 index 0000000000..6e71c05740 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-week.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT DAYOFWEEK(DATE '2005-09-12') = EXTRACT(DAY_OF_WEEK FROM DATE '2005-09-12'); +>> TRUE diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-year.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-year.sql new file mode 100644 index 0000000000..3d7c68e3c9 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/day-of-year.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select dayofyear(date '2005-01-01') d1; +>> 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/dayname.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/dayname.sql new file mode 100644 index 0000000000..743867d2dc --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/dayname.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select dayname(date '2005-09-12'); +>> Monday diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/extract.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/extract.sql new file mode 100644 index 0000000000..33918e95ea --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/extract.sql @@ -0,0 +1,275 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT EXTRACT(NANOSECOND FROM TIME '10:00:00.123456789') IS OF (INTEGER); +>> TRUE + +SELECT EXTRACT(EPOCH FROM TIME '01:00:00') IS OF (NUMERIC); +>> TRUE + +SELECT EXTRACT (MICROSECOND FROM TIME '10:00:00.123456789'), + EXTRACT (MCS FROM TIMESTAMP '2015-01-01 11:22:33.987654321'); +> 123456 987654 +> ------ ------ +> 123456 987654 +> rows: 1 + +SELECT EXTRACT (NANOSECOND FROM TIME '10:00:00.123456789'), + EXTRACT (NS FROM TIMESTAMP '2015-01-01 11:22:33.987654321'); +> 123456789 987654321 +> --------- --------- +> 123456789 987654321 +> rows: 1 + +select EXTRACT (EPOCH from time '00:00:00'); +>> 0 + +select EXTRACT (EPOCH from time '10:00:00'); +>> 36000 + +select EXTRACT (EPOCH from time '10:00:00.123456'); +>> 36000.123456 + +select EXTRACT (EPOCH from date '1970-01-01'); +>> 0 + +select EXTRACT (EPOCH from date '2000-01-03'); +>> 946857600 + +select EXTRACT (EPOCH from timestamp '1970-01-01 00:00:00'); +>> 0 + +select EXTRACT (EPOCH from timestamp '1970-01-03 12:00:00.123456'); +>> 216000.123456 + +select EXTRACT (EPOCH from timestamp '2000-01-03 12:00:00.123456'); +>> 946900800.123456 + +select EXTRACT (EPOCH from timestamp '2500-01-03 12:00:00.654321'); +>> 16725441600.654321 + +select EXTRACT (EPOCH from timestamp with time zone '1970-01-01 00:00:00+05'); +>> -18000 + +select EXTRACT (EPOCH from timestamp with time zone '1970-01-03 12:00:00.123456+05'); +>> 198000.123456 + +select EXTRACT (EPOCH from timestamp with time zone '2000-01-03 12:00:00.123456+05'); +>> 946882800.123456 + +select extract(EPOCH from '2001-02-03 14:15:16'); +>> 981209716 + +SELECT EXTRACT(EPOCH FROM INTERVAL '10.1' SECOND); +>> 10.1 + +SELECT EXTRACT(EPOCH FROM INTERVAL -'0.000001' SECOND); +>> -0.000001 + +SELECT EXTRACT(EPOCH FROM INTERVAL '0-1' YEAR TO MONTH); +>> 2592000 + +SELECT EXTRACT(EPOCH FROM INTERVAL '-0-1' YEAR TO MONTH); +>> -2592000 + +SELECT EXTRACT(EPOCH FROM INTERVAL '1-0' YEAR TO MONTH); +>> 31557600 + +SELECT EXTRACT(EPOCH FROM INTERVAL '-1-0' YEAR TO MONTH); +>> -31557600 + +SELECT EXTRACT(TIMEZONE_HOUR FROM TIMESTAMP WITH TIME ZONE '2010-01-02 5:00:00+07:15'); +>> 7 + +SELECT EXTRACT(TIMEZONE_HOUR FROM TIMESTAMP WITH TIME ZONE '2010-01-02 5:00:00-08:30'); +>> -8 + +SELECT EXTRACT(TIMEZONE_MINUTE FROM TIMESTAMP WITH TIME ZONE '2010-01-02 5:00:00+07:15'); +>> 15 + +SELECT EXTRACT(TIMEZONE_MINUTE FROM TIMESTAMP WITH TIME ZONE '2010-01-02 5:00:00-08:30'); +>> -30 + +SELECT EXTRACT(TIMEZONE_SECOND FROM TIMESTAMP WITH TIME ZONE '1880-01-01 10:00:00-07:52:58'); +>> -58 + +SELECT EXTRACT(TIMEZONE_HOUR FROM TIME WITH TIME ZONE '5:00:00+07:15'); +>> 7 + +SELECT EXTRACT(TIMEZONE_MINUTE FROM TIME WITH TIME ZONE '5:00:00+07:15'); +>> 15 + +select extract(hour from timestamp '2001-02-03 14:15:16'); +>> 14 + +select extract(hour from '2001-02-03 14:15:16'); +>> 14 + +SELECT EXTRACT(YEAR FROM INTERVAL '-1' YEAR); +>> -1 + +SELECT EXTRACT(YEAR FROM INTERVAL '1-2' YEAR TO MONTH); +>> 1 + +SELECT EXTRACT(MONTH FROM INTERVAL '-1-3' YEAR TO MONTH); +>> -3 + +SELECT EXTRACT(MONTH FROM INTERVAL '3' MONTH); +>> 3 + +SELECT EXTRACT(DAY FROM INTERVAL '1100' DAY); +>> 1100 + +SELECT EXTRACT(DAY FROM INTERVAL '10 23' DAY TO HOUR); +>> 10 + +SELECT EXTRACT(DAY FROM INTERVAL '10 23:15' DAY TO MINUTE); +>> 10 + +SELECT EXTRACT(DAY FROM INTERVAL '10 23:15:30' DAY TO SECOND); +>> 10 + +SELECT EXTRACT(HOUR FROM INTERVAL '15' HOUR); +>> 15 + +SELECT EXTRACT(HOUR FROM INTERVAL '2 15' DAY TO HOUR); +>> 15 + +SELECT EXTRACT(HOUR FROM INTERVAL '2 10:30' DAY TO MINUTE); +>> 10 + +SELECT EXTRACT(HOUR FROM INTERVAL '2 10:30:15' DAY TO SECOND); +>> 10 + +SELECT EXTRACT(HOUR FROM INTERVAL '20:10' HOUR TO MINUTE); +>> 20 + +SELECT EXTRACT(HOUR FROM INTERVAL '20:10:22' HOUR TO SECOND); +>> 20 + +SELECT EXTRACT(MINUTE FROM INTERVAL '-35' MINUTE); +>> -35 + +SELECT EXTRACT(MINUTE FROM INTERVAL '1 20:33' DAY TO MINUTE); +>> 33 + +SELECT EXTRACT(MINUTE FROM INTERVAL '1 20:33:10' DAY TO SECOND); +>> 33 + +SELECT EXTRACT(MINUTE FROM INTERVAL '20:34' HOUR TO MINUTE); +>> 34 + +SELECT EXTRACT(MINUTE FROM INTERVAL '20:34:10' HOUR TO SECOND); +>> 34 + +SELECT EXTRACT(MINUTE FROM INTERVAL '-34:10' MINUTE TO SECOND); +>> -34 + +SELECT EXTRACT(SECOND FROM INTERVAL '-100' SECOND); +>> -100 + +SELECT EXTRACT(SECOND FROM INTERVAL '10 11:22:33' DAY TO SECOND); +>> 33 + +SELECT EXTRACT(SECOND FROM INTERVAL '1:2:3' HOUR TO SECOND); +>> 3 + +SELECT EXTRACT(SECOND FROM INTERVAL '-2:43' MINUTE TO SECOND); +>> -43 + +SELECT EXTRACT(SECOND FROM INTERVAL '11.123456789' SECOND); +>> 11 + +SELECT EXTRACT(MILLISECOND FROM INTERVAL '11.123456789' SECOND); +>> 123 + +SELECT EXTRACT(MICROSECOND FROM INTERVAL '11.123456789' SECOND); +>> 123456 + +SELECT EXTRACT(NANOSECOND FROM INTERVAL '11.123456789' SECOND); +>> 123456789 + +SELECT D, ISO_YEAR(D) Y1, EXTRACT(ISO_WEEK_YEAR FROM D) Y2, EXTRACT(ISO_YEAR FROM D) Y3, EXTRACT(ISOYEAR FROM D) Y4 + FROM (VALUES DATE '2017-01-01', DATE '2017-01-02') V(D); +> D Y1 Y2 Y3 Y4 +> ---------- ---- ---- ---- ---- +> 2017-01-01 2016 2016 2016 2016 +> 2017-01-02 2017 2017 2017 2017 +> rows: 2 + +SELECT D, EXTRACT(ISO_DAY_OF_WEEK FROM D) D1, EXTRACT(ISODOW FROM D) D2 + FROM (VALUES DATE '2019-02-03', DATE '2019-02-04') V(D); +> D D1 D2 +> ---------- -- -- +> 2019-02-03 7 7 +> 2019-02-04 1 1 +> rows: 2 + +SET MODE PostgreSQL; +> ok + +SELECT D, EXTRACT(DOW FROM D) D3 FROM (VALUES DATE '2019-02-02', DATE '2019-02-03') V(D); +> D D3 +> ---------- -- +> 2019-02-02 6 +> 2019-02-03 0 +> rows: 2 + +SET MODE Regular; +> ok + +SELECT EXTRACT(MILLENNIUM FROM DATE '-1000-12-31'); +>> -1 + +SELECT EXTRACT(MILLENNIUM FROM DATE '-999-01-01'); +>> 0 + +SELECT EXTRACT(MILLENNIUM FROM DATE '0000-12-31'); +>> 0 + +SELECT EXTRACT(MILLENNIUM FROM DATE '0001-01-01'); +>> 1 + +SELECT EXTRACT(MILLENNIUM FROM DATE '1000-12-31'); +>> 1 + +SELECT EXTRACT(MILLENNIUM FROM DATE '1001-01-01'); +>> 2 + +SELECT EXTRACT(CENTURY FROM DATE '-100-12-31'); +>> -1 + +SELECT EXTRACT(CENTURY FROM DATE '-99-01-01'); +>> 0 + +SELECT EXTRACT(CENTURY FROM DATE '0000-12-31'); +>> 0 + +SELECT EXTRACT(CENTURY FROM DATE '0001-01-01'); +>> 1 + +SELECT EXTRACT(CENTURY FROM DATE '0100-12-31'); +>> 1 + +SELECT EXTRACT(CENTURY FROM DATE '0101-01-01'); +>> 2 + +SELECT EXTRACT(DECADE FROM DATE '-11-12-31'); +>> -2 + +SELECT EXTRACT(DECADE FROM DATE '-10-01-01'); +>> -1 + +SELECT EXTRACT(DECADE FROM DATE '-1-12-31'); +>> -1 + +SELECT EXTRACT(DECADE FROM DATE '0000-01-01'); +>> 0 + +SELECT EXTRACT(DECADE FROM DATE '0009-12-31'); +>> 0 + +SELECT EXTRACT(DECADE FROM DATE '0010-01-01'); +>> 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/formatdatetime.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/formatdatetime.sql new file mode 100644 index 0000000000..dd3e270714 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/formatdatetime.sql @@ -0,0 +1,25 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL FORMATDATETIME(PARSEDATETIME('2001-02-03 04:05:06 GMT', 'yyyy-MM-dd HH:mm:ss z', 'en', 'GMT'), 'EEE, d MMM yyyy HH:mm:ss z', 'en', 'GMT'); +>> Sat, 3 Feb 2001 04:05:06 GMT + +CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', 'yyyy-MM-dd HH:mm:ss'); +>> 2001-02-03 04:05:06 + +CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', 'MM/dd/yyyy HH:mm:ss'); +>> 02/03/2001 04:05:06 + +CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', 'd. MMMM yyyy', 'de'); +>> 3. Februar 2001 + +CALL FORMATDATETIME(PARSEDATETIME('Sat, 3 Feb 2001 04:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'en', 'GMT'), 'yyyy-MM-dd HH:mm:ss', 'en', 'GMT'); +>> 2001-02-03 04:05:06 + +SELECT FORMATDATETIME(TIMESTAMP WITH TIME ZONE '2010-05-06 07:08:09.123Z', 'yyyy-MM-dd HH:mm:ss.SSS z'); +>> 2010-05-06 07:08:09.123 UTC + +SELECT FORMATDATETIME(TIMESTAMP WITH TIME ZONE '2010-05-06 07:08:09.123+13:30', 'yyyy-MM-dd HH:mm:ss.SSS z'); +>> 2010-05-06 07:08:09.123 GMT+13:30 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/hour.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/hour.sql new file mode 100644 index 0000000000..b00828275f --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/hour.sql @@ -0,0 +1,26 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select hour(time '23:10:59'); +>> 23 + +create table test(ts timestamp with time zone); +> ok + +insert into test(ts) values ('2010-05-11 05:15:10+10:00'), ('2010-05-11 05:15:10-10:00'); +> update count: 2 + +select hour(ts) h from test; +> H +> - +> 5 +> 5 +> rows: 2 + +drop table test; +> ok + +select hour('2001-02-03 14:15:16'); +>> 14 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/minute.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/minute.sql new file mode 100644 index 0000000000..8cf533ce83 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/minute.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select minute(timestamp '2005-01-01 23:10:59'); +>> 10 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/month.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/month.sql new file mode 100644 index 0000000000..e85be36a08 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/month.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select month(date '2005-09-25'); +>> 9 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/monthname.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/monthname.sql new file mode 100644 index 0000000000..a8e6637432 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/monthname.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select monthname(date '2005-09-12'); +>> September diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/parsedatetime.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/parsedatetime.sql new file mode 100644 index 0000000000..4c31dc58f0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/parsedatetime.sql @@ -0,0 +1,22 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SET TIME ZONE '01:00'; +> ok + +CALL PARSEDATETIME('3. Februar 2001', 'd. MMMM yyyy', 'de'); +>> 2001-02-03 00:00:00+01 + +CALL PARSEDATETIME('02/03/2001 04:05:06', 'MM/dd/yyyy HH:mm:ss'); +>> 2001-02-03 04:05:06+01 + +CALL CAST(PARSEDATETIME('10:11:12', 'HH:mm:ss', 'en') AS TIME); +>> 10:11:12 + +CALL CAST(PARSEDATETIME('10:11:12', 'HH:mm:ss', 'en', 'GMT+2') AS TIME WITH TIME ZONE); +>> 10:11:12+02 + +SET TIME ZONE LOCAL; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/quarter.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/quarter.sql new file mode 100644 index 0000000000..b19ae40a73 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/quarter.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select quarter(date '2005-09-01'); +>> 3 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/second.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/second.sql new file mode 100644 index 0000000000..01243bae11 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/second.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select second(timestamp '2005-01-01 23:10:59'); +>> 59 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/truncate.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/truncate.sql new file mode 100644 index 0000000000..3a28b9b174 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/truncate.sql @@ -0,0 +1,16 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select trunc('2015-05-29 15:00:00'); +>> 2015-05-29 00:00:00 + +select trunc('2015-05-29'); +>> 2015-05-29 00:00:00 + +select trunc(timestamp '2000-01-01 10:20:30.0'); +>> 2000-01-01 00:00:00 + +select trunc(timestamp '2001-01-01 14:00:00.0'); +>> 2001-01-01 00:00:00 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/week.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/week.sql new file mode 100644 index 0000000000..3d902ea56d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/week.sql @@ -0,0 +1,12 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- ISO_WEEK + +select iso_week('2006-12-31') w, iso_year('2007-12-31') y, iso_day_of_week('2007-12-31') w; +> W Y W +> -- ---- - +> 52 2008 1 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/functions/timeanddate/year.sql b/h2/src/test/org/h2/test/scripts/functions/timeanddate/year.sql new file mode 100644 index 0000000000..25dea91c9d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/timeanddate/year.sql @@ -0,0 +1,7 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select year(date '2005-01-01'); +>> 2005 diff --git a/h2/src/test/org/h2/test/scripts/functions/window/lead.sql b/h2/src/test/org/h2/test/scripts/functions/window/lead.sql new file mode 100644 index 0000000000..947849a66c --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/window/lead.sql @@ -0,0 +1,181 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST (ID INT PRIMARY KEY, "VALUE" INT); +> ok + +INSERT INTO TEST VALUES + (1, NULL), + (2, 12), + (3, NULL), + (4, 13), + (5, NULL), + (6, 21), + (7, 22), + (8, 33), + (9, NULL); +> update count: 9 + +SELECT *, + LEAD("VALUE") OVER (ORDER BY ID) LD, + LEAD("VALUE") RESPECT NULLS OVER (ORDER BY ID) LD_N, + LEAD("VALUE") IGNORE NULLS OVER (ORDER BY ID) LD_NN, + LAG("VALUE") OVER (ORDER BY ID) LG, + LAG("VALUE") RESPECT NULLS OVER (ORDER BY ID) LG_N, + LAG("VALUE") IGNORE NULLS OVER (ORDER BY ID) LG_NN + FROM TEST; +> ID VALUE LD LD_N LD_NN LG LG_N LG_NN +> -- ----- ---- ---- ----- ---- ---- ----- +> 1 null 12 12 12 null null null +> 2 12 null null 13 null null null +> 3 null 13 13 13 12 12 12 +> 4 13 null null 21 null null 12 +> 5 null 21 21 21 13 13 13 +> 6 21 22 22 22 null null 13 +> 7 22 33 33 33 21 21 21 +> 8 33 null null null 22 22 22 +> 9 null null null null 33 33 33 +> rows: 9 + +SELECT *, + LEAD("VALUE", 1) OVER (ORDER BY ID) LD, + LEAD("VALUE", 1) RESPECT NULLS OVER (ORDER BY ID) LD_N, + LEAD("VALUE", 1) IGNORE NULLS OVER (ORDER BY ID) LD_NN, + LAG("VALUE", 1) OVER (ORDER BY ID) LG, + LAG("VALUE", 1) RESPECT NULLS OVER (ORDER BY ID) LG_N, + LAG("VALUE", 1) IGNORE NULLS OVER (ORDER BY ID) LG_NN + FROM TEST; +> ID VALUE LD LD_N LD_NN LG LG_N LG_NN +> -- ----- ---- ---- ----- ---- ---- ----- +> 1 null 12 12 12 null null null +> 2 12 null null 13 null null null +> 3 null 13 13 13 12 12 12 +> 4 13 null null 21 null null 12 +> 5 null 21 21 21 13 13 13 +> 6 21 22 22 22 null null 13 +> 7 22 33 33 33 21 21 21 +> 8 33 null null null 22 22 22 +> 9 null null null null 33 33 33 +> rows: 9 + +SELECT *, + LEAD("VALUE", 0) OVER (ORDER BY ID) LD, + LEAD("VALUE", 0) RESPECT NULLS OVER (ORDER BY ID) LD_N, + LEAD("VALUE", 0) IGNORE NULLS OVER (ORDER BY ID) LD_NN, + LAG("VALUE", 0) OVER (ORDER BY ID) LG, + LAG("VALUE", 0) RESPECT NULLS OVER (ORDER BY ID) LG_N, + LAG("VALUE", 0) IGNORE NULLS OVER (ORDER BY ID) LG_NN + FROM TEST; +> ID VALUE LD LD_N LD_NN LG LG_N LG_NN +> -- ----- ---- ---- ----- ---- ---- ----- +> 1 null null null null null null null +> 2 12 12 12 12 12 12 12 +> 3 null null null null null null null +> 4 13 13 13 13 13 13 13 +> 5 null null null null null null null +> 6 21 21 21 21 21 21 21 +> 7 22 22 22 22 22 22 22 +> 8 33 33 33 33 33 33 33 +> 9 null null null null null null null +> rows: 9 + +SELECT *, + LEAD("VALUE", 2) OVER (ORDER BY ID) LD, + LEAD("VALUE", 2) RESPECT NULLS OVER (ORDER BY ID) LD_N, + LEAD("VALUE", 2) IGNORE NULLS OVER (ORDER BY ID) LD_NN, + LAG("VALUE", 2) OVER (ORDER BY ID) LG, + LAG("VALUE", 2) RESPECT NULLS OVER (ORDER BY ID) LG_N, + LAG("VALUE", 2) IGNORE NULLS OVER (ORDER BY ID) LG_NN + FROM TEST; +> ID VALUE LD LD_N LD_NN LG LG_N LG_NN +> -- ----- ---- ---- ----- ---- ---- ----- +> 1 null null null 13 null null null +> 2 12 13 13 21 null null null +> 3 null null null 21 null null null +> 4 13 21 21 22 12 12 null +> 5 null 22 22 22 null null 12 +> 6 21 33 33 33 13 13 12 +> 7 22 null null null null null 13 +> 8 33 null null null 21 21 21 +> 9 null null null null 22 22 22 +> rows: 9 + +SELECT *, + LEAD("VALUE", 2, 1111.0) OVER (ORDER BY ID) LD, + LEAD("VALUE", 2, 1111.0) RESPECT NULLS OVER (ORDER BY ID) LD_N, + LEAD("VALUE", 2, 1111.0) IGNORE NULLS OVER (ORDER BY ID) LD_NN, + LAG("VALUE", 2, 1111.0) OVER (ORDER BY ID) LG, + LAG("VALUE", 2, 1111.0) RESPECT NULLS OVER (ORDER BY ID) LG_N, + LAG("VALUE", 2, 1111.0) IGNORE NULLS OVER (ORDER BY ID) LG_NN + FROM TEST; +> ID VALUE LD LD_N LD_NN LG LG_N LG_NN +> -- ----- ---- ---- ----- ---- ---- ----- +> 1 null null null 13 1111 1111 1111 +> 2 12 13 13 21 1111 1111 1111 +> 3 null null null 21 null null 1111 +> 4 13 21 21 22 12 12 1111 +> 5 null 22 22 22 null null 12 +> 6 21 33 33 33 13 13 12 +> 7 22 null null 1111 null null 13 +> 8 33 1111 1111 1111 21 21 21 +> 9 null 1111 1111 1111 22 22 22 +> rows: 9 + +SELECT LEAD("VALUE", -1) OVER (ORDER BY ID) FROM TEST; +> exception INVALID_VALUE_2 + +SELECT LAG("VALUE", -1) OVER (ORDER BY ID) FROM TEST; +> exception INVALID_VALUE_2 + +SELECT LEAD("VALUE") OVER () FROM TEST; +> exception SYNTAX_ERROR_2 + +SELECT LAG("VALUE") OVER () FROM TEST; +> exception SYNTAX_ERROR_2 + +SELECT LEAD("VALUE") OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; +> exception SYNTAX_ERROR_1 + +SELECT LAG("VALUE") OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok + +SELECT C, SUM(I) S, LEAD(SUM(I)) OVER (ORDER BY SUM(I)) L FROM + VALUES (1, 1), (2, 1), (4, 2), (8, 2) T(I, C) GROUP BY C; +> C S L +> - -- ---- +> 1 3 12 +> 2 12 null +> rows: 2 + +CREATE TABLE TEST(X INT) AS VALUES 1, 2, 3; +> ok + +EXPLAIN SELECT LEAD(X) OVER (ORDER BY 'a') FROM TEST; +>> SELECT LEAD("X") OVER (ORDER BY NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT LEAD(X) OVER (ORDER BY 'a') FROM TEST; +> LEAD(X) OVER (ORDER BY NULL) +> ---------------------------- +> 2 +> 3 +> null +> rows: 3 + +EXPLAIN SELECT LAG(X) OVER (ORDER BY 'a') FROM TEST; +>> SELECT LAG("X") OVER (ORDER BY NULL) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT LAG(X) OVER (ORDER BY 'a') FROM TEST; +> LAG(X) OVER (ORDER BY NULL) +> --------------------------- +> 1 +> 2 +> null +> rows: 3 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/window/nth_value.sql b/h2/src/test/org/h2/test/scripts/functions/window/nth_value.sql new file mode 100644 index 0000000000..57fea994cd --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/window/nth_value.sql @@ -0,0 +1,263 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT FIRST_VALUE(1) OVER (PARTITION BY ID); +> exception COLUMN_NOT_FOUND_1 + +SELECT FIRST_VALUE(1) OVER (ORDER BY ID); +> exception COLUMN_NOT_FOUND_1 + +CREATE TABLE TEST (ID INT PRIMARY KEY, CATEGORY INT, "VALUE" INT); +> ok + +INSERT INTO TEST VALUES + (1, 1, NULL), + (2, 1, 12), + (3, 1, NULL), + (4, 1, 13), + (5, 1, NULL), + (6, 1, 13), + (7, 2, 21), + (8, 2, 22), + (9, 3, 31), + (10, 3, 32), + (11, 3, 33), + (12, 4, 41), + (13, 4, NULL); +> update count: 13 + +SELECT *, + FIRST_VALUE("VALUE") OVER (ORDER BY ID) FIRST, + FIRST_VALUE("VALUE") RESPECT NULLS OVER (ORDER BY ID) FIRST_N, + FIRST_VALUE("VALUE") IGNORE NULLS OVER (ORDER BY ID) FIRST_NN, + LAST_VALUE("VALUE") OVER (ORDER BY ID) LAST, + LAST_VALUE("VALUE") RESPECT NULLS OVER (ORDER BY ID) LAST_N, + LAST_VALUE("VALUE") IGNORE NULLS OVER (ORDER BY ID) LAST_NN + FROM TEST FETCH FIRST 6 ROWS ONLY; +> ID CATEGORY VALUE FIRST FIRST_N FIRST_NN LAST LAST_N LAST_NN +> -- -------- ----- ----- ------- -------- ---- ------ ------- +> 1 1 null null null null null null null +> 2 1 12 null null 12 12 12 12 +> 3 1 null null null 12 null null 12 +> 4 1 13 null null 12 13 13 13 +> 5 1 null null null 12 null null 13 +> 6 1 13 null null 12 13 13 13 +> rows: 6 + +SELECT *, + FIRST_VALUE("VALUE") OVER (ORDER BY ID) FIRST, + FIRST_VALUE("VALUE") RESPECT NULLS OVER (ORDER BY ID) FIRST_N, + FIRST_VALUE("VALUE") IGNORE NULLS OVER (ORDER BY ID) FIRST_NN, + LAST_VALUE("VALUE") OVER (ORDER BY ID) LAST, + LAST_VALUE("VALUE") RESPECT NULLS OVER (ORDER BY ID) LAST_N, + LAST_VALUE("VALUE") IGNORE NULLS OVER (ORDER BY ID) LAST_NN + FROM TEST WHERE ID > 1 FETCH FIRST 3 ROWS ONLY; +> ID CATEGORY VALUE FIRST FIRST_N FIRST_NN LAST LAST_N LAST_NN +> -- -------- ----- ----- ------- -------- ---- ------ ------- +> 2 1 12 12 12 12 12 12 12 +> 3 1 null 12 12 12 null null 12 +> 4 1 13 12 12 12 13 13 13 +> rows: 3 + +SELECT *, + NTH_VALUE("VALUE", 2) OVER (ORDER BY ID) NTH, + NTH_VALUE("VALUE", 2) FROM FIRST OVER (ORDER BY ID) NTH_FF, + NTH_VALUE("VALUE", 2) FROM LAST OVER (ORDER BY ID) NTH_FL, + NTH_VALUE("VALUE", 2) RESPECT NULLS OVER (ORDER BY ID) NTH_N, + NTH_VALUE("VALUE", 2) FROM FIRST RESPECT NULLS OVER (ORDER BY ID) NTH_FF_N, + NTH_VALUE("VALUE", 2) FROM LAST RESPECT NULLS OVER (ORDER BY ID) NTH_FL_N, + NTH_VALUE("VALUE", 2) IGNORE NULLS OVER (ORDER BY ID) NTH_NN, + NTH_VALUE("VALUE", 2) FROM FIRST IGNORE NULLS OVER (ORDER BY ID) NTH_FF_NN, + NTH_VALUE("VALUE", 2) FROM LAST IGNORE NULLS OVER (ORDER BY ID) NTH_FL_NN + FROM TEST FETCH FIRST 6 ROWS ONLY; +> ID CATEGORY VALUE NTH NTH_FF NTH_FL NTH_N NTH_FF_N NTH_FL_N NTH_NN NTH_FF_NN NTH_FL_NN +> -- -------- ----- ---- ------ ------ ----- -------- -------- ------ --------- --------- +> 1 1 null null null null null null null null null null +> 2 1 12 12 12 null 12 12 null null null null +> 3 1 null 12 12 12 12 12 12 null null null +> 4 1 13 12 12 null 12 12 null 13 13 12 +> 5 1 null 12 12 13 12 12 13 13 13 12 +> 6 1 13 12 12 null 12 12 null 13 13 13 +> rows: 6 + +SELECT *, + NTH_VALUE("VALUE", 2) OVER(ORDER BY ID) F, + NTH_VALUE("VALUE", 2) OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) F_U_C, + NTH_VALUE("VALUE", 2) OVER(ORDER BY ID RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) F_C_U, + NTH_VALUE("VALUE", 2) OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) F_U_U, + NTH_VALUE("VALUE", 2) FROM LAST OVER(ORDER BY ID) L, + NTH_VALUE("VALUE", 2) FROM LAST OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) L_U_C, + NTH_VALUE("VALUE", 2) FROM LAST OVER(ORDER BY ID RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) L_C_U, + NTH_VALUE("VALUE", 2) FROM LAST OVER(ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) L_U_U + FROM TEST ORDER BY ID; +> ID CATEGORY VALUE F F_U_C F_C_U F_U_U L L_U_C L_C_U L_U_U +> -- -------- ----- ---- ----- ----- ----- ---- ----- ----- ----- +> 1 1 null null null 12 12 null null 41 41 +> 2 1 12 12 12 null 12 null null 41 41 +> 3 1 null 12 12 13 12 12 12 41 41 +> 4 1 13 12 12 null 12 null null 41 41 +> 5 1 null 12 12 13 12 13 13 41 41 +> 6 1 13 12 12 21 12 null null 41 41 +> 7 2 21 12 12 22 12 13 13 41 41 +> 8 2 22 12 12 31 12 21 21 41 41 +> 9 3 31 12 12 32 12 22 22 41 41 +> 10 3 32 12 12 33 12 31 31 41 41 +> 11 3 33 12 12 41 12 32 32 41 41 +> 12 4 41 12 12 null 12 33 33 41 41 +> 13 4 null 12 12 null 12 41 41 null 41 +> rows (ordered): 13 + +SELECT NTH_VALUE("VALUE", 0) OVER (ORDER BY ID) FROM TEST; +> exception INVALID_VALUE_2 + +SELECT *, + FIRST_VALUE("VALUE") OVER (PARTITION BY CATEGORY ORDER BY ID) FIRST, + LAST_VALUE("VALUE") OVER (PARTITION BY CATEGORY ORDER BY ID) LAST, + NTH_VALUE("VALUE", 2) OVER (PARTITION BY CATEGORY ORDER BY ID) NTH + FROM TEST ORDER BY ID; +> ID CATEGORY VALUE FIRST LAST NTH +> -- -------- ----- ----- ---- ---- +> 1 1 null null null null +> 2 1 12 null 12 12 +> 3 1 null null null 12 +> 4 1 13 null 13 12 +> 5 1 null null null 12 +> 6 1 13 null 13 12 +> 7 2 21 21 21 null +> 8 2 22 21 22 22 +> 9 3 31 31 31 null +> 10 3 32 31 32 32 +> 11 3 33 31 33 32 +> 12 4 41 41 41 null +> 13 4 null 41 null null +> rows (ordered): 13 + +SELECT ID, CATEGORY, + NTH_VALUE(CATEGORY, 2) OVER (ORDER BY CATEGORY ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) C, + NTH_VALUE(CATEGORY, 2) OVER (ORDER BY CATEGORY ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW EXCLUDE CURRENT ROW) + FROM TEST FETCH FIRST 3 ROWS ONLY; +> ID CATEGORY C NTH_VALUE(CATEGORY, 2) OVER (ORDER BY CATEGORY ROWS UNBOUNDED PRECEDING EXCLUDE CURRENT ROW) +> -- -------- ---- -------------------------------------------------------------------------------------------- +> 1 1 null null +> 2 1 1 null +> 3 1 1 1 +> rows: 3 + +SELECT ID, CATEGORY, + NTH_VALUE(CATEGORY, 2) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) C2, + NTH_VALUE(CATEGORY, 3) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) C3, + NTH_VALUE(CATEGORY, 2) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING EXCLUDE CURRENT ROW) + FROM TEST OFFSET 10 ROWS; +> ID CATEGORY C2 C3 NTH_VALUE(CATEGORY, 2) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING EXCLUDE CURRENT ROW) +> -- -------- -- ---- ------------------------------------------------------------------------------------------------------------------------------- +> 11 3 4 3 4 +> 12 4 4 null null +> 13 4 4 null null +> rows: 3 + +SELECT ID, CATEGORY, + NTH_VALUE(CATEGORY, 2) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE CURRENT ROW) C + FROM TEST OFFSET 10 ROWS; +> ID CATEGORY C +> -- -------- - +> 11 3 4 +> 12 4 3 +> 13 4 3 +> rows: 3 + +SELECT ID, CATEGORY, + NTH_VALUE(CATEGORY, 1) OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE GROUP) F1, + NTH_VALUE(CATEGORY, 2) OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE GROUP) F2, + NTH_VALUE(CATEGORY, 5) OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE GROUP) F5, + NTH_VALUE(CATEGORY, 5) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE GROUP) L5, + NTH_VALUE(CATEGORY, 2) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE GROUP) L2, + NTH_VALUE(CATEGORY, 1) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE GROUP) L1 + FROM TEST ORDER BY ID; +> ID CATEGORY F1 F2 F5 L5 L2 L1 +> -- -------- -- -- -- -- -- -- +> 1 1 2 2 3 3 4 4 +> 2 1 2 2 3 3 4 4 +> 3 1 2 2 3 3 4 4 +> 4 1 2 2 3 3 4 4 +> 5 1 2 2 3 3 4 4 +> 6 1 2 2 3 3 4 4 +> 7 2 1 1 1 3 4 4 +> 8 2 1 1 1 3 4 4 +> 9 3 1 1 1 1 4 4 +> 10 3 1 1 1 1 4 4 +> 11 3 1 1 1 1 4 4 +> 12 4 1 1 1 2 3 3 +> 13 4 1 1 1 2 3 3 +> rows (ordered): 13 + +SELECT ID, CATEGORY, + NTH_VALUE(CATEGORY, 1) OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE TIES) F1, + NTH_VALUE(CATEGORY, 2) OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE TIES) F2, + NTH_VALUE(CATEGORY, 5) OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE TIES) F5, + NTH_VALUE(CATEGORY, 5) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE TIES) L5, + NTH_VALUE(CATEGORY, 2) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE TIES) L2, + NTH_VALUE(CATEGORY, 1) FROM LAST OVER (ORDER BY CATEGORY RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE TIES) L1 + FROM TEST ORDER BY ID; +> ID CATEGORY F1 F2 F5 L5 L2 L1 +> -- -------- -- -- -- -- -- -- +> 1 1 1 2 3 3 4 4 +> 2 1 1 2 3 3 4 4 +> 3 1 1 2 3 3 4 4 +> 4 1 1 2 3 3 4 4 +> 5 1 1 2 3 3 4 4 +> 6 1 1 2 3 3 4 4 +> 7 2 1 1 1 3 4 4 +> 8 2 1 1 1 3 4 4 +> 9 3 1 1 1 2 4 4 +> 10 3 1 1 1 2 4 4 +> 11 3 1 1 1 2 4 4 +> 12 4 1 1 1 2 3 4 +> 13 4 1 1 1 2 3 4 +> rows (ordered): 13 + +SELECT ID, CATEGORY, + FIRST_VALUE(ID) OVER (ORDER BY ID ROWS BETWEEN CATEGORY FOLLOWING AND UNBOUNDED FOLLOWING) F, + LAST_VALUE(ID) OVER (ORDER BY ID ROWS BETWEEN CURRENT ROW AND CATEGORY FOLLOWING) L, + NTH_VALUE(ID, 2) OVER (ORDER BY ID ROWS BETWEEN CATEGORY FOLLOWING AND UNBOUNDED FOLLOWING) N + FROM TEST ORDER BY ID; +> ID CATEGORY F L N +> -- -------- ---- -- ---- +> 1 1 2 2 3 +> 2 1 3 3 4 +> 3 1 4 4 5 +> 4 1 5 5 6 +> 5 1 6 6 7 +> 6 1 7 7 8 +> 7 2 9 9 10 +> 8 2 10 10 11 +> 9 3 12 12 13 +> 10 3 13 13 null +> 11 3 null 13 null +> 12 4 null 13 null +> 13 4 null 13 null +> rows (ordered): 13 + +DROP TABLE TEST; +> ok + +SELECT I, X, LAST_VALUE(I) OVER (ORDER BY X) L FROM VALUES (1, 1), (2, 1), (3, 2), (4, 2), (5, 3) V(I, X); +> I X L +> - - - +> 1 1 2 +> 2 1 2 +> 3 2 4 +> 4 2 4 +> 5 3 5 +> rows: 5 + +SELECT A, MAX(B) M, FIRST_VALUE(A) OVER (ORDER BY A ROWS BETWEEN MAX(B) - 1 FOLLOWING AND UNBOUNDED FOLLOWING) F + FROM VALUES (1, 1), (1, 1), (2, 1), (2, 2), (3, 1) V(A, B) + GROUP BY A; +> A M F +> - - - +> 1 1 1 +> 2 2 3 +> 3 1 3 +> rows: 3 diff --git a/h2/src/test/org/h2/test/scripts/functions/window/ntile.sql b/h2/src/test/org/h2/test/scripts/functions/window/ntile.sql new file mode 100644 index 0000000000..6367c2d5e2 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/window/ntile.sql @@ -0,0 +1,129 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT NTILE(1) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 1)); +>> 1 + +SELECT NTILE(2) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 1)); +>> 1 + +SELECT NTILE(3) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 1)); +>> 1 + +SELECT NTILE(1) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 2)); +> NTILE(1) OVER (ORDER BY X) +> -------------------------- +> 1 +> 1 +> rows: 2 + +SELECT NTILE(2) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 2)) ORDER BY X; +> NTILE(2) OVER (ORDER BY X) +> -------------------------- +> 1 +> 2 +> rows (ordered): 2 + +SELECT NTILE(2) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 3)) ORDER BY X; +> NTILE(2) OVER (ORDER BY X) +> -------------------------- +> 1 +> 1 +> 2 +> rows (ordered): 3 + +SELECT NTILE(2) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 4)) ORDER BY X; +> NTILE(2) OVER (ORDER BY X) +> -------------------------- +> 1 +> 1 +> 2 +> 2 +> rows (ordered): 4 + +SELECT NTILE(2) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 5)) ORDER BY X; +> NTILE(2) OVER (ORDER BY X) +> -------------------------- +> 1 +> 1 +> 1 +> 2 +> 2 +> rows (ordered): 5 + +SELECT NTILE(2) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 6)) ORDER BY X; +> NTILE(2) OVER (ORDER BY X) +> -------------------------- +> 1 +> 1 +> 1 +> 2 +> 2 +> 2 +> rows (ordered): 6 + +SELECT NTILE(10) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 3)) ORDER BY X; +> NTILE(10) OVER (ORDER BY X) +> --------------------------- +> 1 +> 2 +> 3 +> rows (ordered): 3 + +SELECT NTILE(10) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 22)) ORDER BY X; +> NTILE(10) OVER (ORDER BY X) +> --------------------------- +> 1 +> 1 +> 1 +> 2 +> 2 +> 2 +> 3 +> 3 +> 4 +> 4 +> 5 +> 5 +> 6 +> 6 +> 7 +> 7 +> 8 +> 8 +> 9 +> 9 +> 10 +> 10 +> rows (ordered): 22 + +SELECT NTILE(0) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 1)); +> exception INVALID_VALUE_2 + +SELECT NTILE(X) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 6)) ORDER BY X; +> NTILE(X) OVER (ORDER BY X) +> -------------------------- +> 1 +> 1 +> 2 +> 2 +> 4 +> 6 +> rows (ordered): 6 + +SELECT NTILE(X) OVER () FROM (SELECT * FROM SYSTEM_RANGE(1, 1)); +> exception SYNTAX_ERROR_2 + +SELECT NTILE(X) OVER (ORDER BY X RANGE CURRENT ROW) FROM (SELECT * FROM SYSTEM_RANGE(1, 1)); +> exception SYNTAX_ERROR_1 + +SELECT NTILE(100000000000) OVER (ORDER BY X) FROM (SELECT * FROM SYSTEM_RANGE(1, 4)); +> NTILE(100000000000) OVER (ORDER BY X) +> ------------------------------------- +> 1 +> 2 +> 3 +> 4 +> rows: 4 diff --git a/h2/src/test/org/h2/test/scripts/functions/window/ratio_to_report.sql b/h2/src/test/org/h2/test/scripts/functions/window/ratio_to_report.sql new file mode 100644 index 0000000000..6760ad7076 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/window/ratio_to_report.sql @@ -0,0 +1,38 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, N NUMERIC); +> ok + +INSERT INTO TEST VALUES(1, 1), (2, 2), (3, NULL), (4, 5); +> update count: 4 + +SELECT ID, N, RATIO_TO_REPORT(N) OVER() R2R FROM TEST; +> ID N R2R +> -- ---- ----- +> 1 1 0.125 +> 2 2 0.25 +> 3 null null +> 4 5 0.625 +> rows: 4 + +INSERT INTO TEST VALUES (5, -8); +> update count: 1 + +SELECT ID, N, RATIO_TO_REPORT(N) OVER() R2R FROM TEST; +> ID N R2R +> -- ---- ---- +> 1 1 null +> 2 2 null +> 3 null null +> 4 5 null +> 5 -8 null +> rows: 5 + +SELECT RATIO_TO_REPORT(N) OVER (ORDER BY N) FROM TEST; +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/functions/window/row_number.sql b/h2/src/test/org/h2/test/scripts/functions/window/row_number.sql new file mode 100644 index 0000000000..90b99c3628 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/functions/window/row_number.sql @@ -0,0 +1,245 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST (ID INT PRIMARY KEY, CATEGORY INT, "VALUE" INT); +> ok + +INSERT INTO TEST VALUES + (1, 1, 11), + (2, 1, 12), + (3, 1, 13), + (4, 2, 21), + (5, 2, 22), + (6, 3, 31), + (7, 3, 32), + (8, 3, 33), + (9, 4, 41); +> update count: 9 + +SELECT *, + ROW_NUMBER() OVER () RN, + ROUND(PERCENT_RANK() OVER (), 2) PR, + ROUND(CUME_DIST() OVER (), 2) CD, + ROW_NUMBER() OVER (ORDER BY ID) RNO, + RANK() OVER (ORDER BY ID) RKO, + DENSE_RANK() OVER (ORDER BY ID) DRO, + ROUND(PERCENT_RANK() OVER (ORDER BY ID), 2) PRO, + ROUND(CUME_DIST() OVER (ORDER BY ID), 2) CDO + FROM TEST; +> ID CATEGORY VALUE RN PR CD RNO RKO DRO PRO CDO +> -- -------- ----- -- --- --- --- --- --- ---- ---- +> 1 1 11 1 0.0 1.0 1 1 1 0.0 0.11 +> 2 1 12 2 0.0 1.0 2 2 2 0.13 0.22 +> 3 1 13 3 0.0 1.0 3 3 3 0.25 0.33 +> 4 2 21 4 0.0 1.0 4 4 4 0.38 0.44 +> 5 2 22 5 0.0 1.0 5 5 5 0.5 0.56 +> 6 3 31 6 0.0 1.0 6 6 6 0.63 0.67 +> 7 3 32 7 0.0 1.0 7 7 7 0.75 0.78 +> 8 3 33 8 0.0 1.0 8 8 8 0.88 0.89 +> 9 4 41 9 0.0 1.0 9 9 9 1.0 1.0 +> rows: 9 + +SELECT *, + ROW_NUMBER() OVER (ORDER BY CATEGORY) RN, + RANK() OVER (ORDER BY CATEGORY) RK, + DENSE_RANK() OVER (ORDER BY CATEGORY) DR, + ROUND(PERCENT_RANK() OVER (ORDER BY CATEGORY), 2) PR, + ROUND(CUME_DIST() OVER (ORDER BY CATEGORY), 2) CD + FROM TEST; +> ID CATEGORY VALUE RN RK DR PR CD +> -- -------- ----- -- -- -- ---- ---- +> 1 1 11 1 1 1 0.0 0.33 +> 2 1 12 2 1 1 0.0 0.33 +> 3 1 13 3 1 1 0.0 0.33 +> 4 2 21 4 4 2 0.38 0.56 +> 5 2 22 5 4 2 0.38 0.56 +> 6 3 31 6 6 3 0.63 0.89 +> 7 3 32 7 6 3 0.63 0.89 +> 8 3 33 8 6 3 0.63 0.89 +> 9 4 41 9 9 4 1.0 1.0 +> rows: 9 + +SELECT *, + ROW_NUMBER() OVER (PARTITION BY CATEGORY ORDER BY ID) RN, + RANK() OVER (PARTITION BY CATEGORY ORDER BY ID) RK, + DENSE_RANK() OVER (PARTITION BY CATEGORY ORDER BY ID) DR, + ROUND(PERCENT_RANK() OVER (PARTITION BY CATEGORY ORDER BY ID), 2) PR, + ROUND(CUME_DIST() OVER (PARTITION BY CATEGORY ORDER BY ID), 2) CD + FROM TEST; +> ID CATEGORY VALUE RN RK DR PR CD +> -- -------- ----- -- -- -- --- ---- +> 1 1 11 1 1 1 0.0 0.33 +> 2 1 12 2 2 2 0.5 0.67 +> 3 1 13 3 3 3 1.0 1.0 +> 4 2 21 1 1 1 0.0 0.5 +> 5 2 22 2 2 2 1.0 1.0 +> 6 3 31 1 1 1 0.0 0.33 +> 7 3 32 2 2 2 0.5 0.67 +> 8 3 33 3 3 3 1.0 1.0 +> 9 4 41 1 1 1 0.0 1.0 +> rows: 9 + +SELECT *, + ROW_NUMBER() OVER W RN, + RANK() OVER W RK, + DENSE_RANK() OVER W DR, + ROUND(PERCENT_RANK() OVER W, 2) PR, + ROUND(CUME_DIST() OVER W, 2) CD + FROM TEST WINDOW W AS (PARTITION BY CATEGORY ORDER BY ID) QUALIFY ROW_NUMBER() OVER W = 2; +> ID CATEGORY VALUE RN RK DR PR CD +> -- -------- ----- -- -- -- --- ---- +> 2 1 12 2 2 2 0.5 0.67 +> 5 2 22 2 2 2 1.0 1.0 +> 7 3 32 2 2 2 0.5 0.67 +> rows: 3 + +SELECT *, + ROW_NUMBER() OVER (PARTITION BY CATEGORY ORDER BY ID) RN, + RANK() OVER (PARTITION BY CATEGORY ORDER BY ID) RK, + DENSE_RANK() OVER (PARTITION BY CATEGORY ORDER BY ID) DR, + ROUND(PERCENT_RANK() OVER (PARTITION BY CATEGORY ORDER BY ID), 2) PR, + ROUND(CUME_DIST() OVER (PARTITION BY CATEGORY ORDER BY ID), 2) CD + FROM TEST QUALIFY RN = 3; +> ID CATEGORY VALUE RN RK DR PR CD +> -- -------- ----- -- -- -- --- --- +> 3 1 13 3 3 3 1.0 1.0 +> 8 3 33 3 3 3 1.0 1.0 +> rows: 2 + +SELECT + ROW_NUMBER() OVER (ORDER BY CATEGORY) RN, + RANK() OVER (ORDER BY CATEGORY) RK, + DENSE_RANK() OVER (ORDER BY CATEGORY) DR, + PERCENT_RANK() OVER () PR, + CUME_DIST() OVER () CD, + CATEGORY C + FROM TEST GROUP BY CATEGORY ORDER BY RN; +> RN RK DR PR CD C +> -- -- -- --- --- - +> 1 1 1 0.0 1.0 1 +> 2 2 2 0.0 1.0 2 +> 3 3 3 0.0 1.0 3 +> 4 4 4 0.0 1.0 4 +> rows (ordered): 4 + +SELECT RANK() OVER () FROM TEST; +> exception SYNTAX_ERROR_2 + +SELECT DENSE_RANK() OVER () FROM TEST; +> exception SYNTAX_ERROR_2 + +SELECT ROW_NUMBER() OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; +> exception SYNTAX_ERROR_1 + +SELECT RANK() OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; +> exception SYNTAX_ERROR_1 + +SELECT DENSE_RANK() OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; +> exception SYNTAX_ERROR_1 + +SELECT PERCENT_RANK() OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; +> exception SYNTAX_ERROR_1 + +SELECT CUME_DIST() OVER (ORDER BY ID RANGE CURRENT ROW) FROM TEST; +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST (ID INT PRIMARY KEY, TYPE VARCHAR, CNT INT); +> ok + +INSERT INTO TEST VALUES + (1, 'a', 1), + (2, 'b', 2), + (3, 'c', 4), + (4, 'b', 8); +> update count: 4 + +SELECT ROW_NUMBER() OVER (ORDER BY TYPE) RN, TYPE, SUM(CNT) SUM FROM TEST GROUP BY TYPE; +> RN TYPE SUM +> -- ---- --- +> 1 a 1 +> 2 b 10 +> 3 c 4 +> rows: 3 + +SELECT A, B, C, ROW_NUMBER() OVER (PARTITION BY A, B) N FROM + VALUES (1, 1, 1), (1, 1, 2), (1, 2, 3), (2, 1, 4) T(A, B, C); +> A B C N +> - - - - +> 1 1 1 1 +> 1 1 2 2 +> 1 2 3 1 +> 2 1 4 1 +> rows: 4 + +SELECT RANK () OVER () FROM TEST; +> exception SYNTAX_ERROR_2 + +SELECT DENSE_RANK () OVER () FROM TEST; +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST; +> ok + +SELECT ROW_NUMBER() OVER () FROM VALUES (1); +> ROW_NUMBER() OVER () +> -------------------- +> 1 +> rows: 1 + +CREATE TABLE TEST(X INT) AS VALUES 1, 2, 3; +> ok + +EXPLAIN SELECT ROW_NUMBER() OVER (ORDER BY 'a') FROM TEST; +>> SELECT ROW_NUMBER() OVER () FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT RANK() OVER (ORDER BY 'a') FROM TEST; +>> SELECT CAST(1 AS BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT RANK() OVER (ORDER BY 'a') FROM TEST; +> 1 +> - +> 1 +> 1 +> 1 +> rows: 3 + +EXPLAIN SELECT DENSE_RANK() OVER (ORDER BY 'a') FROM TEST; +>> SELECT CAST(1 AS BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT DENSE_RANK() OVER (ORDER BY 'a') FROM TEST; +> 1 +> - +> 1 +> 1 +> 1 +> rows: 3 + +EXPLAIN SELECT PERCENT_RANK() OVER (ORDER BY 'a') FROM TEST; +>> SELECT CAST(0.0 AS DOUBLE PRECISION) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT PERCENT_RANK() OVER (ORDER BY 'a') FROM TEST; +> 0.0 +> --- +> 0.0 +> 0.0 +> 0.0 +> rows: 3 + +EXPLAIN SELECT CUME_DIST() OVER (ORDER BY 'a') FROM TEST; +>> SELECT CAST(1.0 AS DOUBLE PRECISION) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT CUME_DIST() OVER (ORDER BY 'a') FROM TEST; +> 1.0 +> --- +> 1.0 +> 1.0 +> 1.0 +> rows: 3 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/indexes.sql b/h2/src/test/org/h2/test/scripts/indexes.sql new file mode 100644 index 0000000000..4400a63a76 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/indexes.sql @@ -0,0 +1,412 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Test all possible order modes + +CREATE TABLE TEST(A INT); +> ok + +INSERT INTO TEST VALUES (NULL), (0), (1); +> update count: 3 + +-- default + +SELECT A FROM TEST ORDER BY A; +> A +> ---- +> null +> 0 +> 1 +> rows (ordered): 3 + +CREATE INDEX A_IDX ON TEST(A); +> ok + +SELECT A FROM TEST ORDER BY A; +> A +> ---- +> null +> 0 +> 1 +> rows (ordered): 3 + +EXPLAIN SELECT A FROM TEST ORDER BY A; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX */ ORDER BY 1 /* index sorted */ + +DROP INDEX A_IDX; +> ok + +-- ASC + +SELECT A FROM TEST ORDER BY A ASC; +> A +> ---- +> null +> 0 +> 1 +> rows (ordered): 3 + +CREATE INDEX A_IDX ON TEST(A ASC); +> ok + +SELECT A FROM TEST ORDER BY A ASC; +> A +> ---- +> null +> 0 +> 1 +> rows (ordered): 3 + +EXPLAIN SELECT A FROM TEST ORDER BY A ASC; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX */ ORDER BY 1 /* index sorted */ + +DROP INDEX A_IDX; +> ok + +-- ASC NULLS FIRST + +SELECT A FROM TEST ORDER BY A ASC NULLS FIRST; +> A +> ---- +> null +> 0 +> 1 +> rows (ordered): 3 + +CREATE INDEX A_IDX ON TEST(A ASC NULLS FIRST); +> ok + +SELECT A FROM TEST ORDER BY A ASC NULLS FIRST; +> A +> ---- +> null +> 0 +> 1 +> rows (ordered): 3 + +EXPLAIN SELECT A FROM TEST ORDER BY A ASC NULLS FIRST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX */ ORDER BY 1 NULLS FIRST /* index sorted */ + +DROP INDEX A_IDX; +> ok + +-- ASC NULLS LAST + +SELECT A FROM TEST ORDER BY A ASC NULLS LAST; +> A +> ---- +> 0 +> 1 +> null +> rows (ordered): 3 + +CREATE INDEX A_IDX ON TEST(A ASC NULLS LAST); +> ok + +SELECT A FROM TEST ORDER BY A ASC NULLS LAST; +> A +> ---- +> 0 +> 1 +> null +> rows (ordered): 3 + +EXPLAIN SELECT A FROM TEST ORDER BY A ASC NULLS LAST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX */ ORDER BY 1 NULLS LAST /* index sorted */ + +DROP INDEX A_IDX; +> ok + +-- DESC + +SELECT A FROM TEST ORDER BY A DESC; +> A +> ---- +> 1 +> 0 +> null +> rows (ordered): 3 + +CREATE INDEX A_IDX ON TEST(A DESC); +> ok + +SELECT A FROM TEST ORDER BY A DESC; +> A +> ---- +> 1 +> 0 +> null +> rows (ordered): 3 + +EXPLAIN SELECT A FROM TEST ORDER BY A DESC; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX */ ORDER BY 1 DESC /* index sorted */ + +DROP INDEX A_IDX; +> ok + +-- DESC NULLS FIRST + +SELECT A FROM TEST ORDER BY A DESC NULLS FIRST; +> A +> ---- +> null +> 1 +> 0 +> rows (ordered): 3 + +CREATE INDEX A_IDX ON TEST(A DESC NULLS FIRST); +> ok + +SELECT A FROM TEST ORDER BY A DESC NULLS FIRST; +> A +> ---- +> null +> 1 +> 0 +> rows (ordered): 3 + +EXPLAIN SELECT A FROM TEST ORDER BY A DESC NULLS FIRST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX */ ORDER BY 1 DESC NULLS FIRST /* index sorted */ + +DROP INDEX A_IDX; +> ok + +-- DESC NULLS LAST + +SELECT A FROM TEST ORDER BY A DESC NULLS LAST; +> A +> ---- +> 1 +> 0 +> null +> rows (ordered): 3 + +CREATE INDEX A_IDX ON TEST(A DESC NULLS LAST); +> ok + +SELECT A FROM TEST ORDER BY A DESC NULLS LAST; +> A +> ---- +> 1 +> 0 +> null +> rows (ordered): 3 + +EXPLAIN SELECT A FROM TEST ORDER BY A DESC NULLS LAST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX */ ORDER BY 1 DESC NULLS LAST /* index sorted */ + +DROP INDEX A_IDX; +> ok + +-- Index selection + +CREATE INDEX A_IDX_ASC ON TEST(A ASC); +> ok + +CREATE INDEX A_IDX_ASC_NL ON TEST(A ASC NULLS LAST); +> ok + +CREATE INDEX A_IDX_DESC ON TEST(A DESC); +> ok + +CREATE INDEX A_IDX_DESC_NF ON TEST(A DESC NULLS FIRST); +> ok + +EXPLAIN SELECT A FROM TEST ORDER BY A; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_ASC */ ORDER BY 1 /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A ASC; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_ASC */ ORDER BY 1 /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A NULLS FIRST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_ASC */ ORDER BY 1 NULLS FIRST /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A NULLS LAST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_ASC_NL */ ORDER BY 1 NULLS LAST /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A DESC; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_DESC */ ORDER BY 1 DESC /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A DESC NULLS FIRST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_DESC_NF */ ORDER BY 1 DESC NULLS FIRST /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A DESC NULLS LAST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_DESC */ ORDER BY 1 DESC NULLS LAST /* index sorted */ + +DROP INDEX A_IDX_ASC; +> ok + +DROP INDEX A_IDX_DESC; +> ok + +CREATE INDEX A_IDX_ASC_NF ON TEST(A ASC NULLS FIRST); +> ok + +CREATE INDEX A_IDX_DESC_NL ON TEST(A DESC NULLS LAST); +> ok + +EXPLAIN SELECT A FROM TEST ORDER BY A; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_ASC_NF */ ORDER BY 1 /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A ASC; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_ASC_NF */ ORDER BY 1 /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A NULLS FIRST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_ASC_NF */ ORDER BY 1 NULLS FIRST /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A NULLS LAST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_ASC_NL */ ORDER BY 1 NULLS LAST /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A DESC; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_DESC_NL */ ORDER BY 1 DESC /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A DESC NULLS FIRST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_DESC_NF */ ORDER BY 1 DESC NULLS FIRST /* index sorted */ + +EXPLAIN SELECT A FROM TEST ORDER BY A DESC NULLS LAST; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.A_IDX_DESC_NL */ ORDER BY 1 DESC NULLS LAST /* index sorted */ + +DROP TABLE TEST; +> ok + +-- Other tests + +create table test(a int, b int); +> ok + +insert into test values(1, 1); +> update count: 1 + +create index on test(a, b desc); +> ok + +select * from test where a = 1; +> A B +> - - +> 1 1 +> rows: 1 + +drop table test; +> ok + +create table test(x int); +> ok + +create hash index on test(x); +> ok + +select 1 from test group by x; +> 1 +> - +> rows: 0 + +drop table test; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +CREATE INDEX T_A1 ON TEST(A); +> ok + +CREATE INDEX T_A_B ON TEST(A, B); +> ok + +CREATE INDEX T_A_C ON TEST(A, C); +> ok + +EXPLAIN SELECT * FROM TEST WHERE A = 0; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A1: A = 0 */ WHERE "A" = 0 + +EXPLAIN SELECT * FROM TEST WHERE A = 0 AND B >= 0; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A = 0 AND B >= 0 */ WHERE ("A" = 0) AND ("B" >= 0) + +EXPLAIN SELECT * FROM TEST WHERE A > 0 AND B >= 0; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A > 0 AND B >= 0 */ WHERE ("A" > 0) AND ("B" >= 0) + +INSERT INTO TEST (SELECT X / 100, X, X FROM SYSTEM_RANGE(1, 3000)); +> update count: 3000 + +EXPLAIN SELECT * FROM TEST WHERE A = 0; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A1: A = 0 */ WHERE "A" = 0 + +EXPLAIN SELECT * FROM TEST WHERE A = 0 AND B >= 0; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A = 0 AND B >= 0 */ WHERE ("A" = 0) AND ("B" >= 0) + +EXPLAIN SELECT * FROM TEST WHERE A > 0 AND B >= 0; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A_B: A > 0 AND B >= 0 */ WHERE ("A" > 0) AND ("B" >= 0) + +-- Test that creation order of indexes has no effect +CREATE INDEX T_A2 ON TEST(A); +> ok + +DROP INDEX T_A1; +> ok + +EXPLAIN SELECT * FROM TEST WHERE A = 0; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.T_A2: A = 0 */ WHERE "A" = 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE T(A INT, B INT, C INT); +> ok + +CREATE INDEX T_B_IDX ON T(B); +> ok + +EXPLAIN SELECT * FROM T WHERE A = 1 AND B = A; +>> SELECT "PUBLIC"."T"."A", "PUBLIC"."T"."B", "PUBLIC"."T"."C" FROM "PUBLIC"."T" /* PUBLIC.T_B_IDX: B = 1 */ WHERE ("A" = 1) AND ("B" = "A") + +DROP TABLE T; +> ok + +-- _ROWID_ tests + +CREATE TABLE TEST(ID INT PRIMARY KEY); +> ok + +INSERT INTO TEST VALUES 1, 2, 3, 4; +> update count: 4 + +SELECT * FROM TEST WHERE ID >= 2 AND ID <= 3; +> ID +> -- +> 2 +> 3 +> rows: 2 + +SELECT * FROM TEST WHERE _ROWID_ >= 2 AND _ROWID_ <= 3; +> ID +> -- +> 2 +> 3 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID FLOAT PRIMARY KEY); +> ok + +INSERT INTO TEST VALUES 1.0, 2.0, 3.0, 4.0; +> update count: 4 + +SELECT * FROM TEST WHERE ID >= 2.0 AND ID <= 3.0; +> ID +> --- +> 2.0 +> 3.0 +> rows: 2 + +SELECT * FROM TEST WHERE _ROWID_ >= 2 AND _ROWID_ <= 3; +> ID +> --- +> 2.0 +> 3.0 +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/information_schema.sql b/h2/src/test/org/h2/test/scripts/information_schema.sql new file mode 100644 index 0000000000..aca6341a63 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/information_schema.sql @@ -0,0 +1,193 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +TABLE INFORMATION_SCHEMA.INFORMATION_SCHEMA_CATALOG_NAME; +> CATALOG_NAME +> ------------ +> SCRIPT +> rows: 1 + +CREATE TABLE T1(C1 INT NOT NULL, C2 INT NOT NULL, C3 INT, C4 INT); +> ok + +ALTER TABLE T1 ADD CONSTRAINT PK_1 PRIMARY KEY(C1, C2); +> ok + +ALTER TABLE T1 ADD CONSTRAINT U_1 UNIQUE(C3, C4); +> ok + +CREATE TABLE T2(C1 INT, C2 INT, C3 INT, C4 INT); +> ok + +ALTER TABLE T2 ADD CONSTRAINT FK_1 FOREIGN KEY (C3, C4) REFERENCES T1(C1, C3) ON DELETE SET NULL; +> exception CONSTRAINT_NOT_FOUND_1 + +SET MODE MySQL; +> ok + +ALTER TABLE T2 ADD CONSTRAINT FK_1 FOREIGN KEY (C3, C4) REFERENCES T1(C1, C3) ON DELETE SET NULL; +> ok + +ALTER TABLE T2 ADD CONSTRAINT FK_2 FOREIGN KEY (C3, C4) REFERENCES T1(C4, C3) ON UPDATE CASCADE ON DELETE SET DEFAULT; +> ok + +SET MODE Regular; +> ok + +ALTER TABLE T2 ADD CONSTRAINT CH_1 CHECK (C4 > 0 AND NOT EXISTS(SELECT 1 FROM T1 WHERE T1.C1 + T1.C2 = T2.C4)); +> ok + +SELECT * FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS LIMIT 0; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME CONSTRAINT_TYPE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME IS_DEFERRABLE INITIALLY_DEFERRED ENFORCED INDEX_CATALOG INDEX_SCHEMA INDEX_NAME REMARKS +> ------------------ ----------------- --------------- --------------- ------------- ------------ ---------- ------------- ------------------ -------- ------------- ------------ ---------- ------- +> rows: 0 + +SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE, TABLE_NAME, IS_DEFERRABLE, INITIALLY_DEFERRED FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS + WHERE CONSTRAINT_CATALOG = DATABASE() AND CONSTRAINT_SCHEMA = SCHEMA() AND TABLE_CATALOG = DATABASE() AND TABLE_SCHEMA = SCHEMA() + ORDER BY TABLE_NAME, CONSTRAINT_NAME; +> CONSTRAINT_NAME CONSTRAINT_TYPE TABLE_NAME IS_DEFERRABLE INITIALLY_DEFERRED +> --------------- --------------- ---------- ------------- ------------------ +> CONSTRAINT_A UNIQUE T1 NO NO +> PK_1 PRIMARY KEY T1 NO NO +> U_1 UNIQUE T1 NO NO +> CH_1 CHECK T2 NO NO +> FK_1 FOREIGN KEY T2 NO NO +> FK_2 FOREIGN KEY T2 NO NO +> rows (ordered): 6 + +SELECT * FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE LIMIT 0; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT +> ------------------ ----------------- --------------- ------------- ------------ ---------- ----------- ---------------- ----------------------------- +> rows: 0 + +SELECT CONSTRAINT_NAME, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, POSITION_IN_UNIQUE_CONSTRAINT FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE + WHERE CONSTRAINT_CATALOG = DATABASE() AND CONSTRAINT_SCHEMA = SCHEMA() AND TABLE_CATALOG = DATABASE() AND TABLE_SCHEMA = SCHEMA() + ORDER BY TABLE_NAME, CONSTRAINT_NAME, ORDINAL_POSITION; +> CONSTRAINT_NAME TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT +> --------------- ---------- ----------- ---------------- ----------------------------- +> CONSTRAINT_A T1 C1 1 null +> CONSTRAINT_A T1 C3 2 null +> PK_1 T1 C1 1 null +> PK_1 T1 C2 2 null +> U_1 T1 C3 1 null +> U_1 T1 C4 2 null +> FK_1 T2 C3 1 1 +> FK_1 T2 C4 2 2 +> FK_2 T2 C3 1 2 +> FK_2 T2 C4 2 1 +> rows (ordered): 10 + +SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS LIMIT 0; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME UNIQUE_CONSTRAINT_CATALOG UNIQUE_CONSTRAINT_SCHEMA UNIQUE_CONSTRAINT_NAME MATCH_OPTION UPDATE_RULE DELETE_RULE +> ------------------ ----------------- --------------- ------------------------- ------------------------ ---------------------- ------------ ----------- ----------- +> rows: 0 + +SELECT CONSTRAINT_NAME, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS + WHERE CONSTRAINT_CATALOG = DATABASE() AND CONSTRAINT_SCHEMA = SCHEMA() AND UNIQUE_CONSTRAINT_CATALOG = DATABASE() AND UNIQUE_CONSTRAINT_SCHEMA = SCHEMA() + ORDER BY CONSTRAINT_NAME, UNIQUE_CONSTRAINT_NAME; +> CONSTRAINT_NAME UNIQUE_CONSTRAINT_NAME MATCH_OPTION UPDATE_RULE DELETE_RULE +> --------------- ---------------------- ------------ ----------- ----------- +> FK_1 CONSTRAINT_A NONE RESTRICT SET NULL +> FK_2 U_1 NONE CASCADE SET DEFAULT +> rows (ordered): 2 + +SELECT U1.TABLE_NAME T1, U1.COLUMN_NAME C1, U2.TABLE_NAME T2, U2.COLUMN_NAME C2 + FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE U1 JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS RC ON U1.CONSTRAINT_NAME = RC.CONSTRAINT_NAME + JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE U2 ON RC.UNIQUE_CONSTRAINT_NAME = U2.CONSTRAINT_NAME AND U1.POSITION_IN_UNIQUE_CONSTRAINT = U2.ORDINAL_POSITION + WHERE U1.CONSTRAINT_NAME = 'FK_2' ORDER BY U1.COLUMN_NAME; +> T1 C1 T2 C2 +> -- -- -- -- +> T2 C3 T1 C4 +> T2 C4 T1 C3 +> rows (ordered): 2 + +TABLE INFORMATION_SCHEMA.CHECK_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME CHECK_CLAUSE +> ------------------ ----------------- --------------- --------------------------------------------------------------------------------------------------- +> SCRIPT PUBLIC CH_1 ("C4" > 0) AND (NOT EXISTS( SELECT 1 FROM "PUBLIC"."T1" WHERE ("T1"."C1" + "T1"."C2") = "T2"."C4")) +> rows: 1 + +TABLE INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE; +> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME +> ------------- ------------ ---------- ----------- ------------------ ----------------- --------------- +> SCRIPT PUBLIC T1 C1 SCRIPT PUBLIC CH_1 +> SCRIPT PUBLIC T1 C1 SCRIPT PUBLIC CONSTRAINT_A +> SCRIPT PUBLIC T1 C1 SCRIPT PUBLIC FK_1 +> SCRIPT PUBLIC T1 C1 SCRIPT PUBLIC PK_1 +> SCRIPT PUBLIC T1 C2 SCRIPT PUBLIC CH_1 +> SCRIPT PUBLIC T1 C2 SCRIPT PUBLIC PK_1 +> SCRIPT PUBLIC T1 C3 SCRIPT PUBLIC CONSTRAINT_A +> SCRIPT PUBLIC T1 C3 SCRIPT PUBLIC FK_1 +> SCRIPT PUBLIC T1 C3 SCRIPT PUBLIC FK_2 +> SCRIPT PUBLIC T1 C3 SCRIPT PUBLIC U_1 +> SCRIPT PUBLIC T1 C4 SCRIPT PUBLIC FK_2 +> SCRIPT PUBLIC T1 C4 SCRIPT PUBLIC U_1 +> SCRIPT PUBLIC T2 C3 SCRIPT PUBLIC FK_1 +> SCRIPT PUBLIC T2 C3 SCRIPT PUBLIC FK_2 +> SCRIPT PUBLIC T2 C4 SCRIPT PUBLIC CH_1 +> SCRIPT PUBLIC T2 C4 SCRIPT PUBLIC FK_1 +> SCRIPT PUBLIC T2 C4 SCRIPT PUBLIC FK_2 +> rows: 17 + +DROP TABLE T2; +> ok + +DROP TABLE T1; +> ok + +@reconnect off + +CREATE TABLE T1(C1 INT PRIMARY KEY); +> ok + +CREATE TABLE T2(C2 INT PRIMARY KEY REFERENCES T1); +> ok + +SELECT ENFORCED FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'; +>> YES + +SET REFERENTIAL_INTEGRITY FALSE; +> ok + +SELECT ENFORCED FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'; +>> NO + +SET REFERENTIAL_INTEGRITY TRUE; +> ok + +ALTER TABLE T1 SET REFERENTIAL_INTEGRITY FALSE; +> ok + +SELECT ENFORCED FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'; +>> NO + +ALTER TABLE T1 SET REFERENTIAL_INTEGRITY TRUE; +> ok + +ALTER TABLE T2 SET REFERENTIAL_INTEGRITY FALSE; +> ok + +SELECT ENFORCED FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'; +>> NO + +DROP TABLE T2, T1; +> ok + +@reconnect on + +SELECT TABLE_NAME, ROW_COUNT_ESTIMATE FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'INFORMATION_SCHEMA' + AND TABLE_NAME IN ('INFORMATION_SCHEMA_CATALOG_NAME', 'SCHEMATA', 'ROLES', 'SESSIONS', 'IN_DOUBT', 'USERS'); +> TABLE_NAME ROW_COUNT_ESTIMATE +> ------------------------------- ------------------ +> INFORMATION_SCHEMA_CATALOG_NAME 1 +> IN_DOUBT 0 +> ROLES 1 +> SCHEMATA 2 +> SESSIONS 1 +> USERS 1 +> rows: 6 + +EXPLAIN SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLLATIONS; +>> SELECT COUNT(*) FROM "INFORMATION_SCHEMA"."COLLATIONS" /* meta */ /* direct lookup */ diff --git a/h2/src/test/org/h2/test/scripts/other/at-time-zone.sql b/h2/src/test/org/h2/test/scripts/other/at-time-zone.sql new file mode 100644 index 0000000000..c66ed8e378 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/at-time-zone.sql @@ -0,0 +1,134 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '10'; +>> 2010-01-01 15:00:01.123456789+10 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '10:00:30'; +>> 2010-01-01 15:00:31.123456789+10:00:30 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '10:00:30.1'; +> exception INVALID_VALUE_2 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE INTERVAL '10:00' HOUR TO MINUTE; +>> 2010-01-01 15:00:01.123456789+10 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE INTERVAL '10:00:30' HOUR TO SECOND; +>> 2010-01-01 15:00:31.123456789+10:00:30 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE INTERVAL '10:00:30.1' HOUR TO SECOND; +> exception INVALID_VALUE_2 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 20:00:01.123456789+05' AT TIME ZONE '18:00'; +>> 2010-01-02 09:00:01.123456789+18 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '-18:00'; +>> 2009-12-31 11:00:01.123456789-18 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '-18:01'; +> exception INVALID_VALUE_2 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '+18:01'; +> exception INVALID_VALUE_2 + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:01.123456789+05' AT TIME ZONE '19:00'; +> exception INVALID_VALUE_2 + +CALL RIGHT(CAST(CURRENT_TIMESTAMP AT TIME ZONE '00:00' AS VARCHAR), 3); +>> +00 + +CALL CAST(CURRENT_TIMESTAMP AS VARCHAR) = CAST(CURRENT_TIMESTAMP AT LOCAL AS VARCHAR); +>> TRUE + +CALL CAST(CURRENT_TIMESTAMP AS VARCHAR) = CAST(LOCALTIMESTAMP AT LOCAL AS VARCHAR); +>> TRUE + +CALL TIME WITH TIME ZONE '10:00:01.123456789+05' AT TIME ZONE '10'; +>> 15:00:01.123456789+10 + +CALL RIGHT(CAST(CURRENT_TIME AT TIME ZONE '00:00' AS VARCHAR), 3); +>> +00 + +CALL CAST(CURRENT_TIME AS VARCHAR) = CAST(CURRENT_TIME AT LOCAL AS VARCHAR); +>> TRUE + +CALL CAST(CURRENT_TIME AS VARCHAR) = CAST(LOCALTIME AT LOCAL AS VARCHAR); +>> TRUE + +CALL CAST(NULL AS TIMESTAMP) AT LOCAL; +>> null + +CALL TIMESTAMP WITH TIME ZONE '2010-01-01 10:00:00Z' AT TIME ZONE NULL; +>> null + +CALL 1 AT LOCAL; +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST(A TIMESTAMP WITH TIME ZONE, B INTERVAL HOUR TO MINUTE) AS + (VALUES ('2010-01-01 10:00:00Z', '10:00')); +> ok + +EXPLAIN SELECT A AT TIME ZONE B, A AT LOCAL FROM TEST; +>> SELECT "A" AT TIME ZONE "B", "A" AT LOCAL FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE 'Europe/London'; +>> 1999-12-31 23:00:00+00 + +CALL TIMESTAMP WITH TIME ZONE '2000-07-01 01:00:00+02' AT TIME ZONE 'Europe/London'; +>> 2000-07-01 00:00:00+01 + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE 'Z'; +>> 1999-12-31 23:00:00+00 + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE 'UTC'; +>> 1999-12-31 23:00:00+00 + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE 'GMT'; +>> 1999-12-31 23:00:00+00 + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE ''; +> exception INVALID_VALUE_2 + +CALL TIMESTAMP WITH TIME ZONE '2000-01-01 01:00:00+02' AT TIME ZONE 'GMT0'; +> exception INVALID_VALUE_2 + +CALL TIME WITH TIME ZONE '01:00:00+02' AT TIME ZONE 'Europe/London'; +> exception INVALID_VALUE_2 + +SET TIME ZONE '5'; +> ok + +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'TIME ZONE'; +>> GMT+05:00 + +SET TIME ZONE INTERVAL '4:00' HOUR TO MINUTE; +> ok + +SET TIME ZONE NULL; +> exception INVALID_VALUE_2 + +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'TIME ZONE'; +>> GMT+04:00 + +CREATE TABLE TEST(T TIMESTAMP) AS (VALUES '2010-01-01 10:00:00'); +> ok + +SELECT CAST(T AS TIMESTAMP WITH TIME ZONE) FROM TEST; +>> 2010-01-01 10:00:00+04 + +SELECT T AT LOCAL FROM TEST; +>> 2010-01-01 10:00:00+04 + +SELECT T AT TIME ZONE '8:00' FROM TEST; +>> 2010-01-01 14:00:00+08 + +SET TIME ZONE LOCAL; +> ok + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/boolean-test.sql b/h2/src/test/org/h2/test/scripts/other/boolean-test.sql new file mode 100644 index 0000000000..37383d30f0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/boolean-test.sql @@ -0,0 +1,135 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT + NULL IS UNKNOWN, FALSE IS UNKNOWN, TRUE IS UNKNOWN, + NULL IS FALSE, FALSE IS FALSE, TRUE IS FALSE, + NULL IS TRUE, FALSE IS TRUE, TRUE IS TRUE; +> TRUE FALSE FALSE FALSE TRUE FALSE FALSE FALSE TRUE +> ---- ----- ----- ----- ---- ----- ----- ----- ---- +> TRUE FALSE FALSE FALSE TRUE FALSE FALSE FALSE TRUE +> rows: 1 + +SELECT + NULL IS NOT UNKNOWN, FALSE IS NOT UNKNOWN, TRUE IS NOT UNKNOWN, + NULL IS NOT FALSE, FALSE IS NOT FALSE, TRUE IS NOT FALSE, + NULL IS NOT TRUE, FALSE IS NOT TRUE, TRUE IS NOT TRUE; +> FALSE TRUE TRUE TRUE FALSE TRUE TRUE TRUE FALSE +> ----- ---- ---- ---- ----- ---- ---- ---- ----- +> FALSE TRUE TRUE TRUE FALSE TRUE TRUE TRUE FALSE +> rows: 1 + +CREATE TABLE TEST(B BOOLEAN, N INT) AS VALUES (NULL, NULL), (FALSE, 0), (TRUE, 1); +> ok + +CREATE INDEX TEST_B_IDX ON TEST(B); +> ok + +CREATE INDEX TEST_N_IDX ON TEST(N); +> ok + +SELECT B, B IS UNKNOWN, N IS UNKNOWN, B IS FALSE, N IS FALSE, B IS TRUE, N IS TRUE FROM TEST; +> B B IS UNKNOWN N IS UNKNOWN B IS FALSE N IS FALSE B IS TRUE N IS TRUE +> ----- ------------ ------------ ---------- ---------- --------- --------- +> FALSE FALSE FALSE TRUE TRUE FALSE FALSE +> TRUE FALSE FALSE FALSE FALSE TRUE TRUE +> null TRUE TRUE FALSE FALSE FALSE FALSE +> rows: 3 + +SELECT B, B IS NOT UNKNOWN, N IS NOT UNKNOWN, B IS NOT FALSE, N IS NOT FALSE, B IS NOT TRUE, N IS NOT TRUE FROM TEST; +> B B IS NOT UNKNOWN N IS NOT UNKNOWN B IS NOT FALSE N IS NOT FALSE B IS NOT TRUE N IS NOT TRUE +> ----- ---------------- ---------------- -------------- -------------- ------------- ------------- +> FALSE TRUE TRUE FALSE FALSE TRUE TRUE +> TRUE TRUE TRUE TRUE TRUE FALSE FALSE +> null FALSE FALSE TRUE TRUE TRUE TRUE +> rows: 3 + +SELECT B, NOT B IS NOT UNKNOWN, NOT N IS NOT UNKNOWN, NOT B IS NOT FALSE, NOT N IS NOT FALSE, + NOT B IS NOT TRUE, NOT N IS NOT TRUE FROM TEST; +> B B IS UNKNOWN N IS UNKNOWN B IS FALSE N IS FALSE B IS TRUE N IS TRUE +> ----- ------------ ------------ ---------- ---------- --------- --------- +> FALSE FALSE FALSE TRUE TRUE FALSE FALSE +> TRUE FALSE FALSE FALSE FALSE TRUE TRUE +> null TRUE TRUE FALSE FALSE FALSE FALSE +> rows: 3 + +EXPLAIN SELECT B FROM TEST WHERE B IS UNKNOWN; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX: B IS UNKNOWN */ WHERE "B" IS UNKNOWN + +SELECT B FROM TEST WHERE B IS UNKNOWN; +>> null + +EXPLAIN SELECT N FROM TEST WHERE N IS UNKNOWN; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS UNKNOWN + +EXPLAIN SELECT B FROM TEST WHERE B IS NOT UNKNOWN; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX: B IN(FALSE, TRUE) */ WHERE "B" IS NOT UNKNOWN + +SELECT B FROM TEST WHERE B IS NOT UNKNOWN; +> B +> ----- +> FALSE +> TRUE +> rows: 2 + +EXPLAIN SELECT N FROM TEST WHERE N IS NOT UNKNOWN; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS NOT UNKNOWN + +EXPLAIN SELECT B FROM TEST WHERE B IS FALSE; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX: B IS FALSE */ WHERE "B" IS FALSE + +SELECT B FROM TEST WHERE B IS FALSE; +>> FALSE + +EXPLAIN SELECT N FROM TEST WHERE N IS FALSE; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS FALSE + +EXPLAIN SELECT B FROM TEST WHERE B IS NOT FALSE; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX */ WHERE "B" IS NOT FALSE + +SELECT B FROM TEST WHERE B IS NOT FALSE; +> B +> ---- +> TRUE +> null +> rows: 2 + +EXPLAIN SELECT N FROM TEST WHERE N IS NOT FALSE; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS NOT FALSE + +EXPLAIN SELECT B FROM TEST WHERE B IS TRUE; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX: B IS TRUE */ WHERE "B" IS TRUE + +SELECT B FROM TEST WHERE B IS TRUE; +>> TRUE + +EXPLAIN SELECT N FROM TEST WHERE N IS TRUE; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS TRUE + +EXPLAIN SELECT B FROM TEST WHERE B IS NOT TRUE; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX */ WHERE "B" IS NOT TRUE + +SELECT B FROM TEST WHERE B IS NOT TRUE; +> B +> ----- +> FALSE +> null +> rows: 2 + +EXPLAIN SELECT N FROM TEST WHERE N IS NOT TRUE; +>> SELECT "N" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_N_IDX */ WHERE "N" IS NOT TRUE + +DELETE FROM TEST WHERE B IS NULL; +> update count: 1 + +ALTER TABLE TEST ALTER COLUMN B SET NOT NULL; +> ok + +-- If column is NOT NULL index condition for IS NOT UNKNOWN shouldn't exist +EXPLAIN SELECT B FROM TEST WHERE B IS NOT UNKNOWN; +>> SELECT "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_B_IDX */ WHERE "B" IS NOT UNKNOWN + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/case.sql b/h2/src/test/org/h2/test/scripts/other/case.sql new file mode 100644 index 0000000000..f2fdc6c499 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/case.sql @@ -0,0 +1,133 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +select case when 1=null then 1 else 2 end; +>> 2 + +select case (1) when 1 then 1 else 2 end; +>> 1 + +select x, case when x=0 then 'zero' else 'not zero' end y from system_range(0, 2); +> X Y +> - -------- +> 0 zero +> 1 not zero +> 2 not zero +> rows: 3 + +select x, case when x=0 then 'zero' end y from system_range(0, 1); +> X Y +> - ---- +> 0 zero +> 1 null +> rows: 2 + +select x, case x when 0 then 'zero' else 'not zero' end y from system_range(0, 1); +> X Y +> - -------- +> 0 zero +> 1 not zero +> rows: 2 + +select x, case x when 0 then 'zero' when 1 then 'one' end y from system_range(0, 2); +> X Y +> - ---- +> 0 zero +> 1 one +> 2 null +> rows: 3 + +SELECT X, CASE X WHEN 1 THEN 10 WHEN 2, 3 THEN 25 WHEN 4, 5, 6 THEN 50 ELSE 90 END C FROM SYSTEM_RANGE(1, 7); +> X C +> - -- +> 1 10 +> 2 25 +> 3 25 +> 4 50 +> 5 50 +> 6 50 +> 7 90 +> rows: 7 + +SELECT CASE WHEN TRUE THEN 1 END CASE; +> exception SYNTAX_ERROR_1 + +SELECT S, CASE S + WHEN IS NULL THEN 1 + WHEN LOWER('A') THEN 2 + WHEN LIKE '%b' THEN 3 + WHEN ILIKE 'C' THEN 4 + WHEN REGEXP '[dQ]' THEN 5 + WHEN IS NOT DISTINCT FROM 'e' THEN 6 + WHEN IN ('x', 'f') THEN 7 + WHEN IN (VALUES 'g', 'z') THEN 8 + WHEN BETWEEN 'h' AND 'i' THEN 9 + WHEN = 'j' THEN 10 + WHEN < ANY(VALUES 'j', 'l') THEN 11 + WHEN NOT LIKE '%m%' THEN 12 + WHEN IS OF (VARCHAR) THEN 13 + ELSE 13 + END FROM (VALUES NULL, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm') T(S); +> S C2 +> ---- -- +> a 2 +> b 3 +> c 4 +> d 5 +> e 6 +> f 7 +> g 8 +> h 9 +> i 9 +> j 10 +> k 11 +> l 12 +> m 13 +> null 1 +> rows: 14 + +SELECT B, CASE B WHEN IS TRUE THEN 1 WHEN IS FALSE THEN 0 WHEN IS UNKNOWN THEN -1 END + FROM (VALUES TRUE, FALSE, UNKNOWN) T(B); +> B CASE B WHEN IS TRUE THEN 1 WHEN IS FALSE THEN 0 WHEN IS UNKNOWN THEN -1 END +> ----- --------------------------------------------------------------------------- +> FALSE 0 +> TRUE 1 +> null -1 +> rows: 3 + +SELECT J, CASE J WHEN IS JSON ARRAY THEN 1 WHEN IS NOT JSON OBJECT THEN 2 ELSE 3 END + FROM (VALUES JSON '[]', JSON 'true', JSON '{}') T(J); +> J CASE J WHEN IS JSON ARRAY THEN 1 WHEN IS NOT JSON OBJECT THEN 2 ELSE 3 END +> ---- -------------------------------------------------------------------------- +> [] 1 +> true 2 +> {} 3 +> rows: 3 + +SELECT V, CASE V + WHEN IN(CURRENT_DATE, DATE '2010-01-01') THEN 1 + ELSE 2 + END FROM (VALUES DATE '2000-01-01', DATE '2010-01-01', DATE '2020-02-01') T(V); +> V CASE V WHEN IN(CURRENT_DATE, DATE '2010-01-01') THEN 1 ELSE 2 END +> ---------- ----------------------------------------------------------------- +> 2000-01-01 2 +> 2010-01-01 1 +> 2020-02-01 2 +> rows: 3 + +SELECT CASE NULL WHEN IS NOT DISTINCT FROM NULL THEN TRUE ELSE FALSE END; +>> TRUE + +SELECT CASE TRUE WHEN CURRENT_DATE THEN 1 END; +> exception TYPES_ARE_NOT_COMPARABLE_2 + +SELECT * FROM (VALUES 0) D(X) JOIN (VALUES TRUE) T(C) WHERE (CASE C WHEN C THEN C END); +> X C +> - ---- +> 0 TRUE +> rows: 1 + +SELECT CASE TRUE WHEN NOT FALSE THEN 1 ELSE 0 END; +>> 1 diff --git a/h2/src/test/org/h2/test/scripts/other/concatenation.sql b/h2/src/test/org/h2/test/scripts/other/concatenation.sql new file mode 100644 index 0000000000..f61452a147 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/concatenation.sql @@ -0,0 +1,50 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(S VARCHAR(10), B VARBINARY(10), A VARCHAR(10) ARRAY) AS VALUES + ('a', X'49', ARRAY['b']), ('', X'', ARRAY[]), (NULL, NULL, NULL); +> ok + +EXPLAIN SELECT S || 'v' || '' || 'x' || S || (S || S), S || '', S || (B || X'50'), B || B || B FROM TEST; +>> SELECT "S" || 'vx' || "S" || "S" || "S", "S", "S" || ("B" || X'50'), "B" || "B" || "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT S || 'v' || '' || 'x' || S || (S || S), S || '', S || (B || X'50'), B || B || B FROM TEST; +> S || 'vx' || S || S || S S S || (B || X'50') B || B || B +> ------------------------ ---- ----------------- ----------- +> avxaaa a aIP X'494949' +> null null null null +> vx P X'' +> rows: 3 + +EXPLAIN SELECT S || A, ARRAY[] || A, S || CAST(ARRAY[] AS VARCHAR ARRAY), A || A || A FROM TEST; +>> SELECT "S" || "A", "A", CAST("S" AS CHARACTER VARYING ARRAY), "A" || "A" || "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT S || A, ARRAY[] || A, S || CAST(ARRAY[] AS VARCHAR ARRAY), A || A || A FROM TEST; +> S || A A CAST(S AS CHARACTER VARYING ARRAY) A || A || A +> ------ ---- ---------------------------------- ----------- +> [] [] [] [] +> [a, b] [b] [a] [b, b, b] +> null null null null +> rows: 3 + +EXPLAIN SELECT B || NULL, B || X'22' || NULL FROM TEST; +>> SELECT CAST(NULL AS BINARY VARYING(10)), CAST(NULL AS BINARY VARYING(11)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT B || NULL, B || X'22' || NULL FROM TEST; +> CAST(NULL AS BINARY VARYING(10)) CAST(NULL AS BINARY VARYING(11)) +> -------------------------------- -------------------------------- +> null null +> null null +> null null +> rows: 3 + +EXPLAIN SELECT B || X'', A || ARRAY['a'] FROM TEST; +>> SELECT "B", "A" || ARRAY ['a'] FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT (S || S) || (B || B) FROM TEST; +>> SELECT "S" || "S" || ("B" || "B") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/conditions.sql b/h2/src/test/org/h2/test/scripts/other/conditions.sql new file mode 100644 index 0000000000..ae1444f1bd --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/conditions.sql @@ -0,0 +1,168 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT + NULL AND NULL, NULL AND FALSE, NULL AND TRUE, + FALSE AND NULL, FALSE AND FALSE, FALSE AND TRUE, + TRUE AND NULL, TRUE AND FALSE, TRUE AND TRUE; +> UNKNOWN FALSE UNKNOWN FALSE FALSE FALSE UNKNOWN FALSE TRUE +> ------- ----- ------- ----- ----- ----- ------- ----- ---- +> null FALSE null FALSE FALSE FALSE null FALSE TRUE +> rows: 1 + +SELECT + NULL OR NULL, NULL OR FALSE, NULL OR TRUE, + FALSE OR NULL, FALSE OR FALSE, FALSE OR TRUE, + TRUE OR NULL, TRUE OR FALSE, TRUE OR TRUE; +> UNKNOWN UNKNOWN TRUE UNKNOWN FALSE TRUE TRUE TRUE TRUE +> ------- ------- ---- ------- ----- ---- ---- ---- ---- +> null null TRUE null FALSE TRUE TRUE TRUE TRUE +> rows: 1 + +SELECT NOT NULL, NOT FALSE, NOT TRUE; +> UNKNOWN TRUE FALSE +> ------- ---- ----- +> null TRUE FALSE +> rows: 1 + +SELECT 0 AND TRUE; +>> FALSE + +SELECT TRUE AND 0; +>> FALSE + +SELECT 1 OR FALSE; +>> TRUE + +SELECT FALSE OR 1; +>> TRUE + +SELECT NOT 0; +>> TRUE + +SELECT NOT 1; +>> FALSE + +CREATE TABLE TEST(B BOOLEAN, Z INT) AS VALUES (NULL, 0); +> ok + +EXPLAIN SELECT NOT NOT B, NOT NOT Z FROM TEST; +>> SELECT "B", CAST("Z" AS BOOLEAN) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT TRUE AND B, B AND TRUE, TRUE AND Z, Z AND TRUE FROM TEST; +>> SELECT "B", "B", CAST("Z" AS BOOLEAN), CAST("Z" AS BOOLEAN) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT FALSE OR B, B OR FALSE, FALSE OR Z, Z OR FALSE FROM TEST; +>> SELECT "B", "B", CAST("Z" AS BOOLEAN), CAST("Z" AS BOOLEAN) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT); +> ok + +EXPLAIN SELECT A FROM TEST WHERE (A, B) IS NOT DISTINCT FROM NULL; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE ROW ("A", "B") IS NOT DISTINCT FROM NULL + +EXPLAIN SELECT A FROM TEST WHERE (A, B) IS DISTINCT FROM NULL; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE ROW ("A", "B") IS DISTINCT FROM NULL + +EXPLAIN SELECT A IS DISTINCT FROM NULL, NULL IS DISTINCT FROM A FROM TEST; +>> SELECT "A" IS NOT NULL, "A" IS NOT NULL FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT A IS NOT DISTINCT FROM NULL, NULL IS NOT DISTINCT FROM A FROM TEST; +>> SELECT "A" IS NULL, "A" IS NULL FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A NULL); +> ok + +SELECT 1 IN (SELECT A FROM TEST); +>> FALSE + +INSERT INTO TEST VALUES NULL; +> update count: 1 + +SELECT 1 IN (SELECT A FROM TEST); +>> null + +DROP TABLE TEST; +> ok + +SELECT 1 IN (NULL); +>> null + +SELECT 1 IN (SELECT NULL); +>> null + +SELECT 1 IN (VALUES NULL); +>> null + +SELECT 1 IN (SELECT * FROM TABLE(X NULL=())); +>> FALSE + +SELECT (1, 1) IN (VALUES (1, NULL)); +>> null + +SELECT (1, 1) IN (VALUES (NULL, 1)); +>> null + +SELECT (1, 1) IN (SELECT * FROM TABLE(X INT=(), Y INT=())); +>> FALSE + +VALUES FALSE OR NULL OR FALSE; +>> null + +VALUES FALSE OR NULL OR TRUE; +>> TRUE + +VALUES TRUE AND NULL AND TRUE; +>> null + +VALUES TRUE AND NULL AND FALSE; +>> FALSE + +SELECT * FROM (VALUES 1) T(C) WHERE NOT NOT CASE C WHEN 1 THEN TRUE WHEN 2 THEN FALSE ELSE NULL END; +>> 1 + +SELECT C AND C, NOT(C AND C) FROM (VALUES 'F') T(C); +> C AND C (NOT C) OR (NOT C) +> ------- ------------------ +> FALSE TRUE +> rows: 1 + +SELECT C != 2 AND C, NOT (C != 2 AND C) FROM (VALUES TRUE) T(C); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +SELECT ROW(1) = ROW(ROW(1)); +>> TRUE + +SELECT ROW(1) = ROW(ROW(2)); +>> FALSE + +SELECT ROW(1) = ROW(ROW(1, 2)); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +SELECT ROW(1) = ROW(ROW(TIME '00:00:00')); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +CREATE TABLE TEST(C1 BOOLEAN GENERATED ALWAYS AS (NOT C2), C2 BOOLEAN GENERATED ALWAYS AS (C1)); +> exception COLUMN_NOT_FOUND_1 + +CREATE TABLE TEST(A INTEGER, B INTEGER, C INTEGER, D INTEGER) AS VALUES (1, 2, 3, 4); +> ok + +EXPLAIN SELECT A = B OR A = C C1, B = A OR A = C C2, A = B OR C = A C3, B = A OR C = A C4 FROM TEST; +>> SELECT "A" IN("B", "C") AS "C1", "A" IN("B", "C") AS "C2", "A" IN("B", "C") AS "C3", "A" IN("B", "C") AS "C4" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT A = B OR A = C OR A = D C1, B = A OR A = C OR A = D C2, A = B OR C = A OR A = D C3, + B = A OR C = A OR A = D C4, A = B OR A = C OR D = A C5, B = A OR A = C OR D = A C6, A = B OR C = A OR D = A C7, + B = A OR C = A OR D = A C8 FROM TEST; +>> SELECT "A" IN("B", "C", "D") AS "C1", "A" IN("B", "C", "D") AS "C2", "A" IN("B", "C", "D") AS "C3", "A" IN("B", "C", "D") AS "C4", "A" IN("B", "C", "D") AS "C5", "A" IN("B", "C", "D") AS "C6", "A" IN("B", "C", "D") AS "C7", "A" IN("B", "C", "D") AS "C8" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/data-change-delta-table.sql b/h2/src/test/org/h2/test/scripts/other/data-change-delta-table.sql new file mode 100644 index 0000000000..f8040387ee --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/data-change-delta-table.sql @@ -0,0 +1,417 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID BIGINT AUTO_INCREMENT PRIMARY KEY, A INT, B INT); +> ok + +CREATE TRIGGER T1 BEFORE INSERT, UPDATE ON TEST FOR EACH ROW CALL "org.h2.test.scripts.Trigger1"; +> ok + +-- INSERT + +SELECT * FROM OLD TABLE (INSERT INTO TEST(A, B) VALUES (100, 100)); +> exception SYNTAX_ERROR_2 + +SELECT * FROM NEW TABLE (INSERT INTO TEST(A, B) VALUES (1, 2)); +> ID A B +> -- - - +> 1 1 2 +> rows: 1 + +SELECT * FROM FINAL TABLE (INSERT INTO TEST(A, B) VALUES (2, 3)); +> ID A B +> -- - -- +> 2 2 30 +> rows: 1 + +-- INSERT from SELECT + +SELECT * FROM NEW TABLE (INSERT INTO TEST(A, B) SELECT * FROM VALUES (3, 4), (4, 5)); +> ID A B +> -- - - +> 3 3 4 +> 4 4 5 +> rows: 2 + +SELECT * FROM FINAL TABLE (INSERT INTO TEST(A, B) SELECT * FROM VALUES (5, 6), (6, 7)); +> ID A B +> -- - -- +> 5 5 60 +> 6 6 70 +> rows: 2 + +-- UPDATE + +SELECT * FROM OLD TABLE (UPDATE TEST SET B = 3 WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 20 +> rows: 1 + +SELECT * FROM NEW TABLE (UPDATE TEST SET B = 3 WHERE ID = 1); +> ID A B +> -- - - +> 1 1 3 +> rows: 1 + +SELECT * FROM FINAL TABLE (UPDATE TEST SET B = 3 WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 30 +> rows: 1 + +-- DELETE + +SELECT * FROM OLD TABLE (DELETE FROM TEST WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 30 +> rows: 1 + +SELECT * FROM OLD TABLE (DELETE FROM TEST WHERE ID = ?); +{ +2 +> ID A B +> -- - -- +> 2 2 30 +> rows: 1 +100 +> ID A B +> -- - - +> rows: 0 +}; +> update count: 0 + +SELECT * FROM NEW TABLE (DELETE FROM TEST); +> exception SYNTAX_ERROR_2 + +SELECT * FROM FINAL TABLE (DELETE FROM TEST); +> exception SYNTAX_ERROR_2 + +SELECT * FROM TEST TABLE (DELETE FROM TEST); +> exception SYNTAX_ERROR_2 + +-- MERGE INTO + +SELECT * FROM OLD TABLE (MERGE INTO TEST KEY(ID) VALUES (3, 3, 5), (7, 7, 8)); +> ID A B +> -- - -- +> 3 3 40 +> rows: 1 + +SELECT * FROM NEW TABLE (MERGE INTO TEST KEY(ID) VALUES (4, 4, 6), (8, 8, 9)); +> ID A B +> -- - - +> 4 4 6 +> 8 8 9 +> rows: 2 + +SELECT * FROM FINAL TABLE (MERGE INTO TEST KEY(ID) VALUES (5, 5, 7), (9, 9, 10)); +> ID A B +> -- - --- +> 5 5 70 +> 9 9 100 +> rows: 2 + +-- MERGE INTO from SELECT + +SELECT * FROM OLD TABLE (MERGE INTO TEST KEY(ID) SELECT * FROM VALUES (3, 3, 6), (10, 10, 11)); +> ID A B +> -- - -- +> 3 3 50 +> rows: 1 + +SELECT * FROM NEW TABLE (MERGE INTO TEST KEY(ID) SELECT * FROM VALUES (4, 4, 7), (11, 11, 12)); +> ID A B +> -- -- -- +> 11 11 12 +> 4 4 7 +> rows: 2 + +SELECT * FROM FINAL TABLE (MERGE INTO TEST KEY(ID) SELECT * FROM VALUES (5, 5, 8), (12, 12, 13)); +> ID A B +> -- -- --- +> 12 12 130 +> 5 5 80 +> rows: 2 + +-- MERGE USING + +SELECT * FROM OLD TABLE (MERGE INTO TEST USING + (VALUES (3, 3, 7), (10, 10, 12), (13, 13, 14)) S(ID, A, B) + ON TEST.ID = S.ID + WHEN MATCHED AND S.ID = 3 THEN UPDATE SET TEST.B = S.B + WHEN MATCHED AND S.ID <> 3 THEN DELETE + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B)); +> ID A B +> -- -- --- +> 10 10 110 +> 3 3 60 +> rows: 2 + +SELECT * FROM NEW TABLE (MERGE INTO TEST USING + (VALUES (4, 4, 8), (11, 11, 13), (14, 14, 15)) S(ID, A, B) + ON TEST.ID = S.ID + WHEN MATCHED AND S.ID = 4 THEN UPDATE SET TEST.B = S.B + WHEN MATCHED AND S.ID <> 4 THEN DELETE + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B)); +> ID A B +> -- -- -- +> 14 14 15 +> 4 4 8 +> rows: 2 + +SELECT * FROM FINAL TABLE (MERGE INTO TEST USING + (VALUES (5, 5, 9), (12, 12, 15), (15, 15, 16)) S(ID, A, B) + ON TEST.ID = S.ID + WHEN MATCHED AND S.ID = 5 THEN UPDATE SET TEST.B = S.B + WHEN MATCHED AND S.ID <> 5 THEN DELETE + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B)); +> ID A B +> -- -- --- +> 15 15 160 +> 5 5 90 +> rows: 2 + +-- REPLACE + +SELECT * FROM OLD TABLE (REPLACE INTO TEST VALUES (3, 3, 8), (16, 16, 17)); +> exception SYNTAX_ERROR_2 + +SELECT * FROM NEW TABLE (REPLACE INTO TEST VALUES (4, 4, 9), (17, 17, 18)); +> exception SYNTAX_ERROR_2 + +SELECT * FROM FINAL TABLE (REPLACE INTO TEST VALUES (5, 5, 10), (18, 18, 19)); +> exception SYNTAX_ERROR_2 + +SET MODE MySQL; +> ok + +SELECT * FROM OLD TABLE (REPLACE INTO TEST VALUES (3, 3, 8), (16, 16, 17)); +> ID A B +> -- - -- +> 3 3 70 +> rows: 1 + +SELECT * FROM NEW TABLE (REPLACE INTO TEST VALUES (4, 4, 9), (17, 17, 18)); +> ID A B +> -- -- -- +> 17 17 18 +> 4 4 9 +> rows: 2 + +SELECT * FROM FINAL TABLE (REPLACE INTO TEST VALUES (5, 5, 10), (18, 18, 19)); +> ID A B +> -- -- --- +> 18 18 190 +> 5 5 100 +> rows: 2 + +-- REPLACE from SELECT + +SELECT * FROM OLD TABLE (REPLACE INTO TEST SELECT * FROM VALUES (3, 3, 9), (19, 19, 20)); +> ID A B +> -- - -- +> 3 3 80 +> rows: 1 + +SELECT * FROM NEW TABLE (REPLACE INTO TEST SELECT * FROM VALUES (4, 4, 10), (20, 20, 21)); +> ID A B +> -- -- -- +> 20 20 21 +> 4 4 10 +> rows: 2 + +SELECT * FROM FINAL TABLE (REPLACE INTO TEST SELECT * FROM VALUES (5, 5, 11), (21, 21, 22)); +> ID A B +> -- -- --- +> 21 21 220 +> 5 5 110 +> rows: 2 + +SET MODE Regular; +> ok + +TRUNCATE TABLE TEST RESTART IDENTITY; +> update count: 16 + +CREATE VIEW TEST_VIEW AS SELECT * FROM TEST; +> ok + +CREATE TRIGGER T2 INSTEAD OF INSERT, UPDATE, DELETE ON TEST_VIEW FOR EACH ROW CALL "org.h2.test.scripts.Trigger2"; +> ok + +-- INSERT + +SELECT * FROM NEW TABLE (INSERT INTO TEST_VIEW(A, B) VALUES (1, 2)); +> ID A B +> ---- - - +> null 1 2 +> rows: 1 + +SELECT * FROM FINAL TABLE (INSERT INTO TEST_VIEW(A, B) VALUES (2, 3)); +> ID A B +> -- - -- +> 2 2 30 +> rows: 1 + +-- INSERT from SELECT + +SELECT * FROM NEW TABLE (INSERT INTO TEST_VIEW(A, B) SELECT * FROM VALUES (3, 4), (4, 5)); +> ID A B +> ---- - - +> null 3 4 +> null 4 5 +> rows: 2 + +SELECT * FROM FINAL TABLE (INSERT INTO TEST_VIEW(A, B) SELECT * FROM VALUES (5, 6), (6, 7)); +> ID A B +> -- - -- +> 5 5 60 +> 6 6 70 +> rows: 2 + +-- UPDATE + +SELECT * FROM OLD TABLE (UPDATE TEST_VIEW SET B = 3 WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 20 +> rows: 1 + +SELECT * FROM NEW TABLE (UPDATE TEST_VIEW SET B = 3 WHERE ID = 1); +> ID A B +> -- - - +> 1 1 3 +> rows: 1 + +SELECT * FROM FINAL TABLE (UPDATE TEST_VIEW SET B = 3 WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 30 +> rows: 1 + +-- DELETE + +SELECT * FROM OLD TABLE (DELETE FROM TEST_VIEW WHERE ID = 1); +> ID A B +> -- - -- +> 1 1 30 +> rows: 1 + +SELECT * FROM OLD TABLE (DELETE FROM TEST_VIEW WHERE ID = ?); +{ +2 +> ID A B +> -- - -- +> 2 2 30 +> rows: 1 +100 +> ID A B +> -- - - +> rows: 0 +}; +> update count: 0 + +-- MERGE INTO + +SELECT * FROM OLD TABLE (MERGE INTO TEST_VIEW KEY(ID) VALUES (3, 3, 5), (7, 7, 8)); +> ID A B +> -- - -- +> 3 3 40 +> rows: 1 + +SELECT * FROM NEW TABLE (MERGE INTO TEST_VIEW KEY(ID) VALUES (4, 4, 6), (8, 8, 9)); +> ID A B +> -- - - +> 4 4 6 +> 8 8 9 +> rows: 2 + +SELECT * FROM FINAL TABLE (MERGE INTO TEST_VIEW KEY(ID) VALUES (5, 5, 7), (9, 9, 10)); +> ID A B +> -- - --- +> 5 5 70 +> 9 9 100 +> rows: 2 + +-- MERGE INTO from SELECT + +SELECT * FROM OLD TABLE (MERGE INTO TEST_VIEW KEY(ID) SELECT * FROM VALUES (3, 3, 6), (10, 10, 11)); +> ID A B +> -- - -- +> 3 3 50 +> rows: 1 + +SELECT * FROM NEW TABLE (MERGE INTO TEST_VIEW KEY(ID) SELECT * FROM VALUES (4, 4, 7), (11, 11, 12)); +> ID A B +> -- -- -- +> 11 11 12 +> 4 4 7 +> rows: 2 + +SELECT * FROM FINAL TABLE (MERGE INTO TEST_VIEW KEY(ID) SELECT * FROM VALUES (5, 5, 8), (12, 12, 13)); +> ID A B +> -- -- --- +> 12 12 130 +> 5 5 80 +> rows: 2 + +-- MERGE USING + +SELECT * FROM OLD TABLE (MERGE INTO TEST_VIEW TEST USING + (VALUES (3, 3, 7), (10, 10, 12), (13, 13, 14)) S(ID, A, B) + ON TEST.ID = S.ID + WHEN MATCHED AND S.ID = 3 THEN UPDATE SET TEST.B = S.B + WHEN MATCHED AND S.ID <> 3 THEN DELETE + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B)); +> ID A B +> -- -- --- +> 10 10 110 +> 3 3 60 +> rows: 2 + +SELECT * FROM NEW TABLE (MERGE INTO TEST_VIEW TEST USING + (VALUES (4, 4, 8), (11, 11, 13), (14, 14, 15)) S(ID, A, B) + ON TEST.ID = S.ID + WHEN MATCHED AND S.ID = 4 THEN UPDATE SET TEST.B = S.B + WHEN MATCHED AND S.ID <> 4 THEN DELETE + WHEN NOT MATCHED THEN INSERT VALUES (S.ID, S.A, S.B)); +> ID A B +> -- -- -- +> 14 14 15 +> 4 4 8 +> rows: 2 + +DROP TABLE TEST CASCADE; +> ok + +CREATE TABLE TEST(ID BIGINT, DATA CHARACTER LARGE OBJECT); +> ok + +INSERT INTO TEST VALUES (1, REPEAT('A', 1000)); +> update count: 1 + +SELECT ID FROM FINAL TABLE (INSERT INTO TEST VALUES (2, REPEAT('B', 1000))); +>> 2 + +SELECT ID, SUBSTRING(DATA FROM 1 FOR 2) FROM TEST; +> ID SUBSTRING(DATA FROM 1 FOR 2) +> -- ---------------------------- +> 1 AA +> 2 BB +> rows: 2 + +@reconnect + +SELECT ID, SUBSTRING(DATA FROM 1 FOR 2) FROM TEST; +> ID SUBSTRING(DATA FROM 1 FOR 2) +> -- ---------------------------- +> 1 AA +> 2 BB +> rows: 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/field-reference.sql b/h2/src/test/org/h2/test/scripts/other/field-reference.sql new file mode 100644 index 0000000000..203ea53b0d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/field-reference.sql @@ -0,0 +1,31 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT (R).A, (R).B FROM (VALUES CAST((1, 2) AS ROW(A INT, B INT))) T(R); +> (R).A (R).B +> ----- ----- +> 1 2 +> rows: 1 + +SELECT (R).C FROM (VALUES CAST((1, 2) AS ROW(A INT, B INT))) T(R); +> exception COLUMN_NOT_FOUND_1 + +SELECT (R).C1, (R).C2 FROM (VALUES ((1, 2))) T(R); +> (R).C1 (R).C2 +> ------ ------ +> 1 2 +> rows: 1 + +SELECT (1, 2).C2; +>> 2 + +SELECT (1, 2).C0; +> exception COLUMN_NOT_FOUND_1 + +SELECT (1, 2).C; +> exception COLUMN_NOT_FOUND_1 + +SELECT (1, 2).CX; +> exception COLUMN_NOT_FOUND_1 diff --git a/h2/src/test/org/h2/test/scripts/other/help.sql b/h2/src/test/org/h2/test/scripts/other/help.sql new file mode 100644 index 0000000000..efd05de9c6 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/help.sql @@ -0,0 +1,26 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +help abc; +> SECTION TOPIC SYNTAX TEXT +> ------- ----- ------ ---- +> rows: 0 + +HELP ABCDE EF_GH; +> SECTION TOPIC SYNTAX TEXT +> ------- ----- ------ ---- +> rows: 0 + +HELP HELP; +> SECTION TOPIC SYNTAX TEXT +> ---------------- ----- ----------------------- ---------------------------------------------------- +> Commands (Other) HELP HELP [ anything [...] ] Displays the help pages of SQL commands or keywords. +> rows: 1 + +HELP he lp; +> SECTION TOPIC SYNTAX TEXT +> ---------------- ----- ----------------------- ---------------------------------------------------- +> Commands (Other) HELP HELP [ anything [...] ] Displays the help pages of SQL commands or keywords. +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/other/sequence.sql b/h2/src/test/org/h2/test/scripts/other/sequence.sql new file mode 100644 index 0000000000..16c2e25f9e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/sequence.sql @@ -0,0 +1,481 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE SEQUENCE SEQ NO CACHE; +> ok + +CREATE TABLE TEST(NEXT INT, CURRENT INT) AS (VALUES (10, 11), (20, 21)); +> ok + +SELECT NEXT "VALUE", NEXT VALUE FOR SEQ, CURRENT "VALUE", CURRENT VALUE FOR SEQ FROM TEST; +> VALUE NEXT VALUE FOR PUBLIC.SEQ VALUE CURRENT VALUE FOR PUBLIC.SEQ +> ----- ------------------------- ----- ---------------------------- +> 10 1 11 1 +> 20 2 21 2 +> rows: 2 + +EXPLAIN SELECT NEXT "VALUE", NEXT VALUE FOR SEQ, CURRENT "VALUE", CURRENT VALUE FOR SEQ FROM TEST; +>> SELECT "NEXT" AS "VALUE", NEXT VALUE FOR "PUBLIC"."SEQ", "CURRENT" AS "VALUE", CURRENT VALUE FOR "PUBLIC"."SEQ" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE S1 START WITH 11; +> ok + +CREATE SEQUENCE S2 START WITH 61; +> ok + +SELECT NEXT VALUE FOR S1 A, NEXT VALUE FOR S2 B, NEXT VALUE FOR S1 C, NEXT VALUE FOR S2 D FROM SYSTEM_RANGE(1, 2); +> A B C D +> -- -- -- -- +> 11 61 11 61 +> 12 62 12 62 +> rows: 2 + +CREATE TABLE TEST(A BIGINT, B BIGINT, C BIGINT, D BIGINT, V INT) AS + SELECT NEXT VALUE FOR S1, NEXT VALUE FOR S2, NEXT VALUE FOR S1, NEXT VALUE FOR S2, X FROM SYSTEM_RANGE(1, 2); +> ok + +INSERT INTO TEST + SELECT NEXT VALUE FOR S1, NEXT VALUE FOR S2, NEXT VALUE FOR S1, NEXT VALUE FOR S2, X FROM SYSTEM_RANGE(3, 4); +> update count: 2 + +INSERT INTO TEST VALUES + (NEXT VALUE FOR S1, NEXT VALUE FOR S2, NEXT VALUE FOR S1, NEXT VALUE FOR S2, 5), + (NEXT VALUE FOR S1, NEXT VALUE FOR S2, NEXT VALUE FOR S1, NEXT VALUE FOR S2, 6); +> update count: 2 + +TABLE TEST; +> A B C D V +> -- -- -- -- - +> 13 63 13 63 1 +> 14 64 14 64 2 +> 15 65 15 65 3 +> 16 66 16 66 4 +> 17 67 17 67 5 +> 18 68 18 68 6 +> rows: 6 + +UPDATE TEST SET A = NEXT VALUE FOR S1, B = NEXT VALUE FOR S2, C = NEXT VALUE FOR S1, D = NEXT VALUE FOR S2 + WHERE V BETWEEN 3 AND 4; +> update count: 2 + +TABLE TEST; +> A B C D V +> -- -- -- -- - +> 13 63 13 63 1 +> 14 64 14 64 2 +> 17 67 17 67 5 +> 18 68 18 68 6 +> 19 69 19 69 3 +> 20 70 20 70 4 +> rows: 6 + +MERGE INTO TEST D USING (VALUES 7, 8) S ON D.V = S.C1 + WHEN NOT MATCHED THEN INSERT VALUES + (NEXT VALUE FOR S1, NEXT VALUE FOR S2, NEXT VALUE FOR S1, NEXT VALUE FOR S2, S.C1); +> update count: 2 + +TABLE TEST; +> A B C D V +> -- -- -- -- - +> 13 63 13 63 1 +> 14 64 14 64 2 +> 17 67 17 67 5 +> 18 68 18 68 6 +> 19 69 19 69 3 +> 20 70 20 70 4 +> 21 71 21 71 7 +> 22 72 22 72 8 +> rows: 8 + +MERGE INTO TEST D USING (VALUES 7, 8) S ON D.V = S.C1 + WHEN MATCHED THEN UPDATE + SET A = NEXT VALUE FOR S1, B = NEXT VALUE FOR S2, C = NEXT VALUE FOR S1, D = NEXT VALUE FOR S2; +> update count: 2 + +TABLE TEST; +> A B C D V +> -- -- -- -- - +> 13 63 13 63 1 +> 14 64 14 64 2 +> 17 67 17 67 5 +> 18 68 18 68 6 +> 19 69 19 69 3 +> 20 70 20 70 4 +> 23 73 23 73 7 +> 24 74 24 74 8 +> rows: 8 + +DROP TABLE TEST; +> ok + +SET MODE MariaDB; +> ok + +SELECT NEXT VALUE FOR S1 A, NEXT VALUE FOR S2 B, NEXT VALUE FOR S1 C, NEXT VALUE FOR S2 D FROM SYSTEM_RANGE(1, 2); +> A B C D +> -- -- -- -- +> 25 75 26 76 +> 27 77 28 78 +> rows: 2 + +SET MODE Regular; +> ok + +DROP SEQUENCE S1; +> ok + +DROP SEQUENCE S2; +> ok + +CREATE SEQUENCE SEQ; +> ok + +SELECT SEQ.NEXTVAL; +> exception COLUMN_NOT_FOUND_1 + +SELECT SEQ.CURRVAL; +> exception COLUMN_NOT_FOUND_1 + +DROP SEQUENCE SEQ; +> ok + +SET MODE Oracle; +> ok + +create sequence seq; +> ok + +select case seq.nextval when 2 then 'two' when 3 then 'three' when 1 then 'one' else 'other' end result from dual; +> RESULT +> ------ +> one +> rows: 1 + +drop sequence seq; +> ok + +create schema s authorization sa; +> ok + +alter sequence if exists s.seq restart with 10; +> ok + +create sequence s.seq cache 0; +> ok + +alter sequence if exists s.seq restart with 3; +> ok + +select s.seq.nextval as x; +> X +> - +> 3 +> rows: 1 + +drop sequence s.seq; +> ok + +create sequence s.seq cache 0; +> ok + +alter sequence s.seq restart with 10; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION DROP; +> SCRIPT +> ---------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE SCHEMA IF NOT EXISTS "S" AUTHORIZATION "SA"; +> DROP SEQUENCE IF EXISTS "S"."SEQ"; +> CREATE SEQUENCE "S"."SEQ" AS NUMERIC(19, 0) START WITH 1 RESTART WITH 10 NO CACHE; +> rows (ordered): 4 + +drop schema s cascade; +> ok + +create schema TEST_SCHEMA; +> ok + +create sequence TEST_SCHEMA.TEST_SEQ; +> ok + +select TEST_SCHEMA.TEST_SEQ.CURRVAL; +> exception CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1 + +select TEST_SCHEMA.TEST_SEQ.nextval; +>> 1 + +select TEST_SCHEMA.TEST_SEQ.CURRVAL; +>> 1 + +drop schema TEST_SCHEMA cascade; +> ok + +CREATE TABLE TEST(CURRVAL INT, NEXTVAL INT); +> ok + +INSERT INTO TEST VALUES (3, 4); +> update count: 1 + +SELECT TEST.CURRVAL, TEST.NEXTVAL FROM TEST; +> CURRVAL NEXTVAL +> ------- ------- +> 3 4 +> rows: 1 + +DROP TABLE TEST; +> ok + +SET MODE Regular; +> ok + +CREATE SEQUENCE SEQ01 AS TINYINT; +> ok + +CREATE SEQUENCE SEQ02 AS SMALLINT; +> ok + +CREATE SEQUENCE SEQ03 AS INTEGER; +> ok + +CREATE SEQUENCE SEQ04 AS BIGINT; +> ok + +CREATE SEQUENCE SEQ05 AS REAL; +> ok + +CREATE SEQUENCE SEQ06 AS DOUBLE PRECISION; +> ok + +CREATE SEQUENCE SEQ AS NUMERIC(10, 2); +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE SEQUENCE SEQ AS NUMERIC(100, 20); +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE SEQUENCE SEQ07 AS DECIMAL; +> ok + +CREATE SEQUENCE SEQ08 AS DECIMAL(10); +> ok + +CREATE SEQUENCE SEQ11 AS DECIMAL(10, 2); +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE SEQUENCE SEQ09 AS FLOAT; +> ok + +CREATE SEQUENCE SEQ10 AS FLOAT(20); +> ok + +CREATE SEQUENCE SEQ11 AS DECFLOAT; +> ok + +CREATE SEQUENCE SEQ12 AS DECFLOAT(10); +> ok + +CREATE SEQUENCE SEQ13 AS DECFLOAT(20); +> ok + +SELECT SEQUENCE_NAME, DATA_TYPE, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, MAXIMUM_VALUE, + DECLARED_DATA_TYPE, DECLARED_NUMERIC_PRECISION, DECLARED_NUMERIC_SCALE FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME DATA_TYPE NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE MAXIMUM_VALUE DECLARED_DATA_TYPE DECLARED_NUMERIC_PRECISION DECLARED_NUMERIC_SCALE +> ------------- ---------------- ----------------- ----------------------- ------------- ------------------- ------------------ -------------------------- ---------------------- +> SEQ01 TINYINT 8 2 0 127 TINYINT null null +> SEQ02 SMALLINT 16 2 0 32767 SMALLINT null null +> SEQ03 INTEGER 32 2 0 2147483647 INTEGER null null +> SEQ04 BIGINT 64 2 0 9223372036854775807 BIGINT null null +> SEQ05 REAL 24 2 null 16777216 REAL null null +> SEQ06 DOUBLE PRECISION 53 2 null 9007199254740992 DOUBLE PRECISION null null +> SEQ07 NUMERIC 19 10 0 9223372036854775807 DECIMAL null null +> SEQ08 NUMERIC 10 10 0 9999999999 DECIMAL 10 null +> SEQ09 DOUBLE PRECISION 53 2 null 9007199254740992 FLOAT null null +> SEQ10 REAL 24 2 null 16777216 FLOAT 20 null +> SEQ11 DECFLOAT 19 10 null 9223372036854775807 DECFLOAT null null +> SEQ12 DECFLOAT 10 10 null 10000000000 DECFLOAT 10 null +> SEQ13 DECFLOAT 19 10 null 9223372036854775807 DECFLOAT 20 null +> rows: 13 + +SELECT NEXT VALUE FOR SEQ01 IS OF (TINYINT); +>> TRUE + +DROP ALL OBJECTS; +> ok + +CREATE SEQUENCE SEQ AS NUMERIC(10, 20); +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE SEQUENCE SEQ AS VARCHAR(10); +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE SEQUENCE SEQ NO; +> exception SYNTAX_ERROR_2 + +CREATE TABLE TEST( + A BIGINT GENERATED ALWAYS AS (C + 1), + B BIGINT GENERATED ALWAYS AS (D + 1), + C BIGINT GENERATED ALWAYS AS IDENTITY, + D BIGINT DEFAULT 3, + E BIGINT); +> ok + +INSERT INTO TEST(E) VALUES 10; +> update count: 1 + +TABLE TEST; +> A B C D E +> - - - - -- +> 2 4 1 3 10 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE SEQUENCE SEQ MINVALUE 1 MAXVALUE 2; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 1 + +SELECT NEXT VALUE FOR SEQ; +>> 2 + +SELECT CACHE FROM INFORMATION_SCHEMA.SEQUENCES WHERE SEQUENCE_NAME = 'SEQ'; +>> 2 + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ----------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE SEQUENCE "PUBLIC"."SEQ" START WITH 1 MAXVALUE 2 EXHAUSTED; +> rows (ordered): 2 + +@reconnect + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +ALTER SEQUENCE SEQ RESTART; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 1 + +ALTER SEQUENCE SEQ CYCLE; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 2 + +SELECT NEXT VALUE FOR SEQ; +>> 1 + +ALTER SEQUENCE SEQ INCREMENT BY -1; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 2 + +SELECT NEXT VALUE FOR SEQ; +>> 1 + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE SEQ MINVALUE 9223372036854775806; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775806 + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775807 + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +ALTER SEQUENCE SEQ NO CACHE RESTART; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775806 + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775807 + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +ALTER SEQUENCE SEQ CACHE 2 MINVALUE 9223372036854775805 RESTART WITH 9223372036854775805; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775805 + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775806 + +SELECT NEXT VALUE FOR SEQ; +>> 9223372036854775807 + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +DROP SEQUENCE SEQ; +> ok + +CREATE SEQUENCE SEQ INCREMENT BY -1 MAXVALUE -9223372036854775807; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775807 + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775808 + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +ALTER SEQUENCE SEQ NO CACHE RESTART; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775807 + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775808 + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +ALTER SEQUENCE SEQ CACHE 2 MAXVALUE -9223372036854775806 RESTART WITH -9223372036854775806; +> ok + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775806 + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775807 + +SELECT BASE_VALUE FROM INFORMATION_SCHEMA.SEQUENCES WHERE SEQUENCE_NAME = 'SEQ'; +>> -9223372036854775808 + +SELECT NEXT VALUE FOR SEQ; +>> -9223372036854775808 + +SELECT BASE_VALUE FROM INFORMATION_SCHEMA.SEQUENCES WHERE SEQUENCE_NAME = 'SEQ'; +>> null + +SELECT NEXT VALUE FOR SEQ; +> exception SEQUENCE_EXHAUSTED + +DROP SEQUENCE SEQ; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/set.sql b/h2/src/test/org/h2/test/scripts/other/set.sql new file mode 100644 index 0000000000..35296158fa --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/set.sql @@ -0,0 +1,244 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +@reconnect off + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> READ COMMITTED + +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> READ UNCOMMITTED + +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> READ COMMITTED + +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> REPEATABLE READ + +SET TRANSACTION ISOLATION LEVEL SNAPSHOT; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> SNAPSHOT + +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> SERIALIZABLE + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> READ UNCOMMITTED + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> READ COMMITTED + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> REPEATABLE READ + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SNAPSHOT; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> SNAPSHOT + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE; +> ok + +SELECT ISOLATION_LEVEL FROM INFORMATION_SCHEMA.SESSIONS WHERE SESSION_ID = SESSION_ID(); +>> SERIALIZABLE + +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED; +> ok + +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'VARIABLE_BINARY'; +>> FALSE + +CREATE MEMORY TABLE TEST(B BINARY); +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> -------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "B" BINARY ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +SET VARIABLE_BINARY TRUE; +> ok + +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'VARIABLE_BINARY'; +>> TRUE + +CREATE MEMORY TABLE TEST(B BINARY); +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION TABLE TEST; +> SCRIPT +> ---------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "B" BINARY VARYING ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 3 + +DROP TABLE TEST; +> ok + +SET VARIABLE_BINARY FALSE; +> ok + +SET LOCK_MODE 0; +> ok + +CALL LOCK_MODE(); +>> 0 + +SET LOCK_MODE 1; +> ok + +CALL LOCK_MODE(); +>> 3 + +SET LOCK_MODE 2; +> ok + +CALL LOCK_MODE(); +>> 3 + +SET LOCK_MODE 3; +> ok + +CALL LOCK_MODE(); +>> 3 + +@reconnect on + +SELECT CURRENT_PATH; +> CURRENT_PATH +> ------------ +> +> rows: 1 + +SET SCHEMA_SEARCH_PATH PUBLIC, INFORMATION_SCHEMA; +> ok + +SELECT CURRENT_PATH; +>> "PUBLIC","INFORMATION_SCHEMA" + +SET SCHEMA_SEARCH_PATH PUBLIC; +> ok + +CREATE TABLE TEST(C1 INT, C2 INT); +> ok + +CREATE INDEX IDX ON TEST(C1 ASC, C2 DESC); +> ok + +SELECT COLUMN_NAME, ORDERING_SPECIFICATION, NULL_ORDERING FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'IDX'; +> COLUMN_NAME ORDERING_SPECIFICATION NULL_ORDERING +> ----------- ---------------------- ------------- +> C1 ASC FIRST +> C2 DESC LAST +> rows: 2 + +DROP INDEX IDX; +> ok + +SET DEFAULT_NULL_ORDERING LOW; +> ok + +CREATE INDEX IDX ON TEST(C1 ASC, C2 DESC); +> ok + +SELECT COLUMN_NAME, ORDERING_SPECIFICATION, NULL_ORDERING FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'IDX'; +> COLUMN_NAME ORDERING_SPECIFICATION NULL_ORDERING +> ----------- ---------------------- ------------- +> C1 ASC FIRST +> C2 DESC LAST +> rows: 2 + +DROP INDEX IDX; +> ok + +SET DEFAULT_NULL_ORDERING HIGH; +> ok + +CREATE INDEX IDX ON TEST(C1 ASC, C2 DESC); +> ok + +SELECT COLUMN_NAME, ORDERING_SPECIFICATION, NULL_ORDERING FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'IDX'; +> COLUMN_NAME ORDERING_SPECIFICATION NULL_ORDERING +> ----------- ---------------------- ------------- +> C1 ASC LAST +> C2 DESC FIRST +> rows: 2 + +DROP INDEX IDX; +> ok + +SET DEFAULT_NULL_ORDERING FIRST; +> ok + +CREATE INDEX IDX ON TEST(C1 ASC, C2 DESC); +> ok + +SELECT COLUMN_NAME, ORDERING_SPECIFICATION, NULL_ORDERING FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'IDX'; +> COLUMN_NAME ORDERING_SPECIFICATION NULL_ORDERING +> ----------- ---------------------- ------------- +> C1 ASC FIRST +> C2 DESC FIRST +> rows: 2 + +DROP INDEX IDX; +> ok + +SET DEFAULT_NULL_ORDERING LAST; +> ok + +CREATE INDEX IDX ON TEST(C1 ASC, C2 DESC); +> ok + +SELECT COLUMN_NAME, ORDERING_SPECIFICATION, NULL_ORDERING FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'IDX'; +> COLUMN_NAME ORDERING_SPECIFICATION NULL_ORDERING +> ----------- ---------------------- ------------- +> C1 ASC LAST +> C2 DESC LAST +> rows: 2 + +DROP INDEX IDX; +> ok + +SET DEFAULT_NULL_ORDERING LOW; +> ok + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/other/two_phase_commit.sql b/h2/src/test/org/h2/test/scripts/other/two_phase_commit.sql new file mode 100644 index 0000000000..2cb8a7a17d --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/two_phase_commit.sql @@ -0,0 +1,28 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- issue #3033 +CREATE TABLE TEST(A BIGINT PRIMARY KEY, B BLOB); +> ok + +INSERT INTO TEST VALUES(1, REPEAT('010203040506070809101112',11)); +> update count: 1 + +@autocommit off + +DELETE FROM TEST WHERE A = 1; +> update count: 1 + +PREPARE COMMIT commit1; +> ok + +@reconnect + +ROLLBACK TRANSACTION commit1; +> ok + +SELECT B FROM TEST WHERE A = 1; +>> X'303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132303130323033303430353036303730383039313031313132' + diff --git a/h2/src/test/org/h2/test/scripts/other/unique_include.sql b/h2/src/test/org/h2/test/scripts/other/unique_include.sql new file mode 100644 index 0000000000..9f5428045a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/other/unique_include.sql @@ -0,0 +1,76 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +CREATE UNIQUE INDEX TEST_IDX ON TEST(C) INCLUDE(B); +> ok + +INSERT INTO TEST VALUES (10, 20, 1), (11, 20, 2), (12, 21, 3); +> update count: 3 + +INSERT INTO TEST VALUES (13, 22, 1); +> exception DUPLICATE_KEY_1 + +SELECT INDEX_NAME, TABLE_NAME, INDEX_TYPE_NAME FROM INFORMATION_SCHEMA.INDEXES WHERE INDEX_NAME = 'TEST_IDX'; +> INDEX_NAME TABLE_NAME INDEX_TYPE_NAME +> ---------- ---------- --------------- +> TEST_IDX TEST UNIQUE INDEX +> rows: 1 + +SELECT INDEX_NAME, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, IS_UNIQUE FROM INFORMATION_SCHEMA.INDEX_COLUMNS + WHERE INDEX_NAME = 'TEST_IDX' ORDER BY ORDINAL_POSITION; +> INDEX_NAME TABLE_NAME COLUMN_NAME ORDINAL_POSITION IS_UNIQUE +> ---------- ---------- ----------- ---------------- --------- +> TEST_IDX TEST C 1 TRUE +> TEST_IDX TEST B 2 FALSE +> rows (ordered): 2 + +SELECT DB_OBJECT_SQL('INDEX', 'PUBLIC', 'TEST_IDX'); +>> CREATE UNIQUE INDEX "PUBLIC"."TEST_IDX" ON "PUBLIC"."TEST"("C" NULLS FIRST) INCLUDE("B" NULLS FIRST) + +ALTER TABLE TEST ADD CONSTRAINT TEST_UNI_C UNIQUE(C); +> ok + +SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE, TABLE_NAME, INDEX_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_NAME = 'TEST'; +> CONSTRAINT_NAME CONSTRAINT_TYPE TABLE_NAME INDEX_NAME +> --------------- --------------- ---------- ---------- +> TEST_UNI_C UNIQUE TEST TEST_IDX +> rows: 1 + +SELECT CONSTRAINT_NAME, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE + WHERE CONSTRAINT_NAME = 'TEST_UNI_C'; +> CONSTRAINT_NAME TABLE_NAME COLUMN_NAME ORDINAL_POSITION +> --------------- ---------- ----------- ---------------- +> TEST_UNI_C TEST C 1 +> rows: 1 + +EXPLAIN SELECT B, C FROM TEST ORDER BY C, B; +>> SELECT "B", "C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ ORDER BY 2, 1 /* index sorted */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +CREATE UNIQUE INDEX TEST_IDX_A_B ON TEST(A) INCLUDE (B); +> ok + +CREATE UNIQUE INDEX TEST_IDX_A ON TEST(A); +> ok + +CREATE UNIQUE INDEX TEST_IDX_A_B_C ON TEST(A) INCLUDE (B, C); +> ok + +ALTER TABLE TEST ADD CONSTRAINT UNI_TEST_A UNIQUE(A); +> ok + +SELECT INDEX_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_NAME = 'UNI_TEST_A'; +>> TEST_IDX_A + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/package.html b/h2/src/test/org/h2/test/scripts/package.html new file mode 100644 index 0000000000..cf8c836c51 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/package.html @@ -0,0 +1,14 @@ + + + + +Javadoc package documentation +

    + +Script test files. + +

    \ No newline at end of file diff --git a/h2/src/test/org/h2/test/scripts/parser/comments.sql b/h2/src/test/org/h2/test/scripts/parser/comments.sql new file mode 100644 index 0000000000..aa4f6e635a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/parser/comments.sql @@ -0,0 +1,50 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CALL 1 /* comment */ ;; +>> 1 + +CALL 1 /* comment */ ; +>> 1 + +call /* remark * / * /* ** // end */*/ 1; +>> 1 + +call /*/*/ */*/ 1; +>> 1 + +call /*1/*1*/1*/1; +>> 1 + +--- remarks/comments/syntax ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST( +ID INT PRIMARY KEY, -- this is the primary key, type {integer} +NAME VARCHAR(255) -- this is a string +); +> ok + +INSERT INTO TEST VALUES( +1 /* ID */, +'Hello' // NAME +); +> update count: 1 + +SELECT * FROM TEST; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +DROP_ TABLE_ TEST_T; +> exception SYNTAX_ERROR_2 + +DROP TABLE TEST /*; +> exception SYNTAX_ERROR_1 + +call /* remark * / * /* ** // end */ 1; +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/parser/identifiers.sql b/h2/src/test/org/h2/test/scripts/parser/identifiers.sql new file mode 100644 index 0000000000..6d8bb4957a --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/parser/identifiers.sql @@ -0,0 +1,52 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT 1 "A""B""""C"""; +> A"B""C" +> ------- +> 1 +> rows: 1 + +SELECT 1 ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345; +> ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 +> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> 1 +> rows: 1 + +SELECT 1 ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456; +> exception NAME_TOO_LONG_2 + +SELECT 1 "ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"; +> ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 +> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> 1 +> rows: 1 + +SELECT 1 "ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456"; +> exception NAME_TOO_LONG_2 + +SELECT 1 "ABCDEFGHIJKLMNOPQRSTUVWXYZ01234""5ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"; +> exception NAME_TOO_LONG_2 + +SELECT 1 "ABCDEFGHIJKLMNOPQRSTUVWXYZ012345""ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"; +> exception NAME_TOO_LONG_2 + +SELECT 3 U&"\0031", 4 U&"/0032" UESCAPE '/'; +> 1 2 +> - - +> 3 4 +> rows: 1 + +EXPLAIN SELECT 1 U&"!2030" UESCAPE '!'; +>> SELECT 1 AS U&"\2030" + +SELECT 1 U&"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ01234\0035"; +> ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 +> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> 1 +> rows: 1 + +SELECT 1 U&"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ012345ABCDEFGHIJKLMNOPQRSTUVWXYZ01234\00356"; +> exception NAME_TOO_LONG_2 diff --git a/h2/src/test/org/h2/test/scripts/predicates/between.sql b/h2/src/test/org/h2/test/scripts/predicates/between.sql new file mode 100644 index 0000000000..0d4594f089 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/between.sql @@ -0,0 +1,107 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, X INT, A INT, B INT) AS VALUES + (1, NULL, NULL, NULL), + (2, NULL, NULL, 1), + (3, NULL, 1, NULL), + (4, 1, NULL, NULL), + (5, NULL, 1, 1), + (6, NULL, 1, 2), + (7, NULL, 2, 1), + (8, 1, NULL, 1), + (9, 1, NULL, 2), + (10, 2, NULL, 1), + (11, 1, 1, NULL), + (12, 1, 2, NULL), + (13, 2, 1, NULL), + (14, 1, 1, 1), + (15, 1, 1, 2), + (16, 1, 2, 1), + (17, 2, 1, 1), + (18, 1, 2, 2), + (19, 2, 1, 2), + (20, 2, 2, 1), + (21, 1, 2, 3), + (22, 1, 3, 2), + (23, 2, 1, 3), + (24, 2, 3, 1), + (25, 3, 1, 2), + (26, 3, 2, 1); +> ok + +EXPLAIN SELECT X BETWEEN A AND B A1, X BETWEEN ASYMMETRIC A AND B A2 FROM TEST; +>> SELECT "X" BETWEEN "A" AND "B" AS "A1", "X" BETWEEN "A" AND "B" AS "A2" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT X BETWEEN SYMMETRIC A AND B S1 FROM TEST; +>> SELECT "X" BETWEEN SYMMETRIC "A" AND "B" AS "S1" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT X NOT BETWEEN A AND B NA1, X NOT BETWEEN ASYMMETRIC A AND B NA2 FROM TEST; +>> SELECT "X" NOT BETWEEN "A" AND "B" AS "NA1", "X" NOT BETWEEN "A" AND "B" AS "NA2" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT X NOT BETWEEN SYMMETRIC A AND B NS1 FROM TEST; +>> SELECT "X" NOT BETWEEN SYMMETRIC "A" AND "B" AS "NS1" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT X BETWEEN A AND B A1, X BETWEEN ASYMMETRIC A AND B A2, A <= X AND X <= B A3, + X BETWEEN SYMMETRIC A AND B S1, A <= X AND X <= B OR A >= X AND X >= B S2, + X NOT BETWEEN A AND B NA1, X NOT BETWEEN ASYMMETRIC A AND B NA2, NOT (A <= X AND X <= B) NA3, + X NOT BETWEEN SYMMETRIC A AND B NS1, NOT (A <= X AND X <= B OR A >= X AND X >= B) NS2 + FROM TEST ORDER BY ID; +> A1 A2 A3 S1 S2 NA1 NA2 NA3 NS1 NS2 +> ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> null null null null null null null null null null +> FALSE FALSE FALSE null null TRUE TRUE TRUE null null +> null null null null null null null null null null +> FALSE FALSE FALSE null null TRUE TRUE TRUE null null +> null null null null null null null null null null +> TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE +> TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE +> FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE +> FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> TRUE TRUE TRUE TRUE TRUE FALSE FALSE FALSE FALSE FALSE +> FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE FALSE FALSE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> FALSE FALSE FALSE FALSE FALSE TRUE TRUE TRUE TRUE TRUE +> rows (ordered): 26 + +EXPLAIN SELECT * FROM TEST WHERE ID BETWEEN 1 AND 2; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."X", "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID >= 1 AND ID <= 2 */ WHERE "ID" BETWEEN 1 AND 2 + +EXPLAIN SELECT * FROM TEST WHERE ID NOT BETWEEN 1 AND 2; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."X", "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "ID" NOT BETWEEN 1 AND 2 + +EXPLAIN SELECT NULL BETWEEN A AND B, X BETWEEN NULL AND NULL, X BETWEEN SYMMETRIC A AND NULL, X BETWEEN SYMMETRIC NULL AND B, X BETWEEN SYMMETRIC NULL AND NULL FROM TEST; +>> SELECT UNKNOWN, UNKNOWN, UNKNOWN, UNKNOWN, UNKNOWN FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT X BETWEEN 1 AND 1, X NOT BETWEEN 1 AND 1, 2 BETWEEN SYMMETRIC 3 AND 1 FROM TEST; +>> SELECT "X" = 1, "X" <> 1, TRUE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT 2 BETWEEN 1 AND B, 2 BETWEEN A AND 3, 2 BETWEEN A AND B FROM TEST; +>> SELECT 2 BETWEEN 1 AND "B", 2 BETWEEN "A" AND 3, 2 BETWEEN "A" AND "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT X BETWEEN 1 AND NULL, X BETWEEN NULL AND 3 FROM TEST; +>> SELECT "X" BETWEEN 1 AND NULL, "X" BETWEEN NULL AND 3 FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT NOT (X BETWEEN A AND B), NOT (X NOT BETWEEN A AND B) FROM TEST; +>> SELECT "X" NOT BETWEEN "A" AND "B", "X" BETWEEN "A" AND "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok + +SELECT CURRENT_TIME BETWEEN CURRENT_DATE AND (CURRENT_DATE + INTERVAL '1' DAY); +> exception TYPES_ARE_NOT_COMPARABLE_2 diff --git a/h2/src/test/org/h2/test/scripts/predicates/distinct.sql b/h2/src/test/org/h2/test/scripts/predicates/distinct.sql new file mode 100644 index 0000000000..6fcd2e2d40 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/distinct.sql @@ -0,0 +1,66 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +-- Quantified distinct predicate + +SELECT 1 IS DISTINCT FROM ALL(VALUES 1, NULL, 2); +>> FALSE + +SELECT 1 IS DISTINCT FROM ALL(VALUES NULL, 2); +>> TRUE + +SELECT NULL IS DISTINCT FROM ALL(VALUES 1, NULL, 2); +>> FALSE + +SELECT NULL IS DISTINCT FROM ALL(VALUES 1, 2); +>> TRUE + +SELECT 1 IS NOT DISTINCT FROM ALL(VALUES 1, NULL, 2); +>> FALSE + +SELECT 1 IS NOT DISTINCT FROM ALL(VALUES 1, 1); +>> TRUE + +SELECT NULL IS NOT DISTINCT FROM ALL(VALUES 1, NULL, 2); +>> FALSE + +SELECT NULL IS NOT DISTINCT FROM ALL(VALUES NULL, NULL); +>> TRUE + +SELECT 1 IS DISTINCT FROM ANY(VALUES 1, NULL, 2); +>> TRUE + +SELECT 1 IS DISTINCT FROM ANY(VALUES 1, 1); +>> FALSE + +SELECT NULL IS DISTINCT FROM ANY(VALUES 1, NULL, 2); +>> TRUE + +SELECT NULL IS DISTINCT FROM ANY(VALUES NULL, NULL); +>> FALSE + +SELECT 1 IS NOT DISTINCT FROM ANY(VALUES 1, NULL, 2); +>> TRUE + +SELECT 1 IS NOT DISTINCT FROM ANY(VALUES NULL, 2); +>> FALSE + +SELECT NULL IS NOT DISTINCT FROM ANY(VALUES 1, NULL, 2); +>> TRUE + +SELECT NULL IS NOT DISTINCT FROM ANY(VALUES 1, 2); +>> FALSE + +SELECT NOT (NULL IS NOT DISTINCT FROM ANY(VALUES 1, 2)); +>> TRUE + +EXPLAIN SELECT NOT (NULL IS NOT DISTINCT FROM ANY(VALUES 1, 2)); +>> SELECT NOT (NULL IS NOT DISTINCT FROM ANY( VALUES (1), (2))) + +SELECT (1, NULL) IS NOT DISTINCT FROM ANY(VALUES (1, NULL), (2, NULL)); +>> TRUE + +SELECT (1, NULL) IS NOT DISTINCT FROM ANY(VALUES (2, NULL), (3, NULL)); +>> FALSE diff --git a/h2/src/test/org/h2/test/scripts/predicates/in.sql b/h2/src/test/org/h2/test/scripts/predicates/in.sql new file mode 100644 index 0000000000..a57b38c1ef --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/in.sql @@ -0,0 +1,428 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create table test(id int) as select 1; +> ok + +select * from test where id in (select id from test order by 'x'); +> ID +> -- +> 1 +> rows: 1 + +drop table test; +> ok + +select x, x in(2, 3) i from system_range(1, 2) group by x; +> X I +> - ----- +> 1 FALSE +> 2 TRUE +> rows: 2 + +select * from system_range(1, 1) where x = x + 1 or x in(2, 0); +> X +> - +> rows: 0 + +select * from system_range(1, 1) where cast('a' || x as varchar_ignorecase) in ('A1', 'B1'); +> X +> - +> 1 +> rows: 1 + +create table test(x int) as select x from system_range(1, 2); +> ok + +select * from (select rownum r from test) where r in (1, 2); +> R +> - +> 1 +> 2 +> rows: 2 + +select * from (select rownum r from test) where r = 1 or r = 2; +> R +> - +> 1 +> 2 +> rows: 2 + +drop table test; +> ok + +select x from system_range(1, 1) where x in (select x from system_range(1, 1) group by x order by max(x)); +> X +> - +> 1 +> rows: 1 + +create table test(id int) as (values 1, 2, 4); +> ok + +select a.id, a.id in(select 4) x from test a, test b where a.id in (b.id, b.id - 1); +> ID X +> -- ----- +> 1 FALSE +> 1 FALSE +> 2 FALSE +> 4 TRUE +> rows: 4 + +select a.id, a.id in(select 4) x from test a, test b where a.id in (b.id, b.id - 1) group by a.id; +> ID X +> -- ----- +> 1 FALSE +> 2 FALSE +> 4 TRUE +> rows: 3 + +select a.id, 4 in(select a.id) x from test a, test b where a.id in (b.id, b.id - 1) group by a.id; +> ID X +> -- ----- +> 1 FALSE +> 2 FALSE +> 4 TRUE +> rows: 3 + +drop table test; +> ok + +create table test(id int primary key, d int) as (values (1, 1), (2, 1)); +> ok + +select id from test where id in (1, 2) and d = 1; +> ID +> -- +> 1 +> 2 +> rows: 2 + +drop table test; +> ok + +create table test(id int) as (values null, 1); +> ok + +select * from test where id not in (select id from test where 1=0); +> ID +> ---- +> 1 +> null +> rows: 2 + +select * from test where null not in (select id from test where 1=0); +> ID +> ---- +> 1 +> null +> rows: 2 + +select * from test where not (id in (select id from test where 1=0)); +> ID +> ---- +> 1 +> null +> rows: 2 + +select * from test where not (null in (select id from test where 1=0)); +> ID +> ---- +> 1 +> null +> rows: 2 + +drop table test; +> ok + +create table t1 (id int primary key) as (select x from system_range(1, 1000)); +> ok + +create table t2 (id int primary key) as (select x from system_range(1, 1000)); +> ok + +explain select count(*) from t1 where t1.id in ( select t2.id from t2 ); +>> SELECT COUNT(*) FROM "PUBLIC"."T1" /* PUBLIC.PRIMARY_KEY_A: ID IN(SELECT DISTINCT T2.ID FROM PUBLIC.T2 /* PUBLIC.T2.tableScan */) */ WHERE "T1"."ID" IN( SELECT DISTINCT "T2"."ID" FROM "PUBLIC"."T2" /* PUBLIC.T2.tableScan */) + +select count(*) from t1 where t1.id in ( select t2.id from t2 ); +> COUNT(*) +> -------- +> 1000 +> rows: 1 + +drop table t1, t2; +> ok + +select count(*) from system_range(1, 2) where x in(1, 1, 1); +> COUNT(*) +> -------- +> 1 +> rows: 1 + +create table test(id int primary key) as (values 1, 2, 3); +> ok + +explain select * from test where id in(1, 2, null); +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID IN(1, 2, NULL) */ WHERE "ID" IN(1, 2, NULL) + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)) AS (VALUES (1, 'Hello'), (2, 'World')); +> ok + +select * from test where id in (select id from test); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +select * from test where id in ((select id from test)); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +select * from test where id in (((select id from test))); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +DROP TABLE TEST; +> ok + +create table test(v boolean) as (values unknown, true, false); +> ok + +SELECT CASE WHEN NOT (false IN (null)) THEN false END; +> NULL +> ---- +> null +> rows: 1 + +select a.v as av, b.v as bv, a.v IN (b.v), not a.v IN (b.v) from test a, test b; +> AV BV A.V = B.V A.V <> B.V +> ----- ----- --------- ---------- +> FALSE FALSE TRUE FALSE +> FALSE TRUE FALSE TRUE +> FALSE null null null +> TRUE FALSE FALSE TRUE +> TRUE TRUE TRUE FALSE +> TRUE null null null +> null FALSE null null +> null TRUE null null +> null null null null +> rows: 9 + +select a.v as av, b.v as bv, a.v IN (b.v, null), not a.v IN (b.v, null) from test a, test b; +> AV BV A.V IN(B.V, NULL) A.V NOT IN(B.V, NULL) +> ----- ----- ----------------- --------------------- +> FALSE FALSE TRUE FALSE +> FALSE TRUE null null +> FALSE null null null +> TRUE FALSE null null +> TRUE TRUE TRUE FALSE +> TRUE null null null +> null FALSE null null +> null TRUE null null +> null null null null +> rows: 9 + +drop table test; +> ok + +SELECT CASE WHEN NOT (false IN (null)) THEN false END; +> NULL +> ---- +> null +> rows: 1 + +create table test(a int, b int) as select 2, 0; +> ok + +create index idx on test(b, a); +> ok + +select count(*) from test where a in(2, 10) and b in(0, null); +>> 1 + +drop table test; +> ok + +create table test(a int, b int) as select 1, 0; +> ok + +create index idx on test(b, a); +> ok + +select count(*) from test where b in(null, 0) and a in(1, null); +>> 1 + +drop table test; +> ok + +create table test(a int, b int, unique(a, b)); +> ok + +insert into test values(1,1), (1,2); +> update count: 2 + +select count(*) from test where a in(1,2) and b in(1,2); +>> 2 + +drop table test; +> ok + +SELECT * FROM SYSTEM_RANGE(1, 10) WHERE X IN ((SELECT 1), (SELECT 2)); +> X +> - +> 1 +> 2 +> rows: 2 + +EXPLAIN SELECT * FROM SYSTEM_RANGE(1, 10) WHERE X IN ((SELECT X FROM SYSTEM_RANGE(1, 1)), (SELECT X FROM SYSTEM_RANGE(2, 2))); +>> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 10) /* range index: X IN((SELECT X FROM SYSTEM_RANGE(1, 1) /* range index */), (SELECT X FROM SYSTEM_RANGE(2, 2) /* range index */)) */ WHERE "X" IN((SELECT "X" FROM SYSTEM_RANGE(1, 1) /* range index */), (SELECT "X" FROM SYSTEM_RANGE(2, 2) /* range index */)) + +-- Tests for IN predicate with an empty list + +SELECT 1 WHERE 1 IN (); +> 1 +> - +> rows: 0 + +SELECT 1 WHERE 1 NOT IN (); +>> 1 + +SELECT CASE 1 WHEN IN() THEN 1 ELSE 2 END; +> exception SYNTAX_ERROR_2 + +SET MODE DB2; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE Derby; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE MSSQLServer; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE HSQLDB; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE MySQL; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE Oracle; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE PostgreSQL; +> ok + +SELECT 1 WHERE 1 IN (); +> exception SYNTAX_ERROR_2 + +SET MODE Regular; +> ok + +CREATE TABLE TEST(A INT, B INT) AS (VALUES (1, 1), (1, 2), (2, 1), (2, NULL)); +> ok + +SELECT * FROM TEST WHERE (A, B) IN ((1, 1), (2, 1), (2, 2), (2, NULL)); +> A B +> - - +> 1 1 +> 2 1 +> rows: 2 + +DROP TABLE TEST; +> ok + +SELECT LOCALTIME IN(DATE '2000-01-01', DATE '2010-01-01'); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +SELECT LOCALTIME IN ((VALUES DATE '2000-01-01', DATE '2010-01-01')); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +CREATE TABLE TEST(V INT) AS VALUES 1, 2; +> ok + +SELECT V, V IN (1, 1000000000000) FROM TEST; +> V V IN(1, 1000000000000) +> - ---------------------- +> 1 TRUE +> 2 FALSE +> rows: 2 + +EXPLAIN SELECT V, V IN (1, 1000000000000) FROM TEST; +>> SELECT "V", "V" IN(1, 1000000000000) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +CREATE UNIQUE INDEX TEST_IDX ON TEST(V); +> ok + +SELECT V, V IN (1, 1000000000000) FROM TEST; +> V V IN(1, 1000000000000) +> - ---------------------- +> 1 TRUE +> 2 FALSE +> rows: 2 + +EXPLAIN SELECT V, V IN (1, 1000000000000) FROM TEST; +>> SELECT "V", "V" IN(1, 1000000000000) FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(C BIGINT PRIMARY KEY) AS VALUES 1, 1000000000000; +> ok + +SELECT V, V IN (SELECT * FROM TEST) FROM (VALUES 1, 2) T(V); +> V V IN( SELECT DISTINCT PUBLIC.TEST.C FROM PUBLIC.TEST) +> - ----------------------------------------------------- +> 1 TRUE +> 2 FALSE +> rows: 2 + +EXPLAIN SELECT V, V IN (SELECT * FROM TEST) FROM (VALUES 1, 2) T(V); +>> SELECT "V", "V" IN( SELECT DISTINCT "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) FROM (VALUES (1), (2)) "T"("V") /* table scan */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(C INTEGER PRIMARY KEY) AS VALUES 1, 2; +> ok + +SELECT V, V IN (SELECT * FROM TEST) FROM (VALUES 1, 1000000000000) T(V); +> V V IN( SELECT DISTINCT PUBLIC.TEST.C FROM PUBLIC.TEST) +> ------------- ----------------------------------------------------- +> 1 TRUE +> 1000000000000 FALSE +> rows: 2 + +EXPLAIN SELECT V, V IN (SELECT * FROM TEST) FROM (VALUES 1, 1000000000000) T(V); +>> SELECT "V", "V" IN( SELECT DISTINCT "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) FROM (VALUES (1), (1000000000000)) "T"("V") /* table scan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/predicates/like.sql b/h2/src/test/org/h2/test/scripts/predicates/like.sql new file mode 100644 index 0000000000..de01420418 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/like.sql @@ -0,0 +1,214 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create table Foo (A varchar(20), B integer); +> ok + +insert into Foo (A, B) values ('abcd', 1), ('abcd', 2); +> update count: 2 + +select * from Foo where A like 'abc%' escape '\' AND B=1; +> A B +> ---- - +> abcd 1 +> rows: 1 + +drop table Foo; +> ok + +--- test case for number like string --------------------------------------------------------------------------------------------- +CREATE TABLE test (one bigint primary key, two bigint, three bigint); +> ok + +CREATE INDEX two ON test(two); +> ok + +INSERT INTO TEST VALUES(1, 2, 3), (10, 20, 30), (100, 200, 300); +> update count: 3 + +INSERT INTO TEST VALUES(2, 6, 9), (20, 60, 90), (200, 600, 900); +> update count: 3 + +SELECT * FROM test WHERE one LIKE '2%'; +> ONE TWO THREE +> --- --- ----- +> 2 6 9 +> 20 60 90 +> 200 600 900 +> rows: 3 + +SELECT * FROM test WHERE two LIKE '2%'; +> ONE TWO THREE +> --- --- ----- +> 1 2 3 +> 10 20 30 +> 100 200 300 +> rows: 3 + +SELECT * FROM test WHERE three LIKE '2%'; +> ONE TWO THREE +> --- --- ----- +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(0, NULL), (1, 'Hello'), (2, 'World'), (3, 'Word'), (4, 'Wo%'); +> update count: 5 + +SELECT * FROM TEST WHERE NAME IS NULL; +> ID NAME +> -- ---- +> 0 null +> rows: 1 + +SELECT * FROM TEST WHERE NAME IS NOT NULL; +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> 3 Word +> 4 Wo% +> rows: 4 + +SELECT * FROM TEST WHERE NAME BETWEEN 'H' AND 'Word'; +> ID NAME +> -- ----- +> 1 Hello +> 3 Word +> 4 Wo% +> rows: 3 + +SELECT * FROM TEST WHERE ID >= 2 AND ID <= 3 AND ID <> 2; +> ID NAME +> -- ---- +> 3 Word +> rows: 1 + +SELECT * FROM TEST WHERE ID>0 AND ID<4 AND ID!=2; +> ID NAME +> -- ----- +> 1 Hello +> 3 Word +> rows: 2 + +SELECT * FROM TEST WHERE 'Hello' LIKE '_el%'; +> ID NAME +> -- ----- +> 0 null +> 1 Hello +> 2 World +> 3 Word +> 4 Wo% +> rows: 5 + +SELECT * FROM TEST WHERE NAME LIKE 'Hello%'; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +SELECT * FROM TEST WHERE NAME ILIKE 'hello%'; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +SELECT * FROM TEST WHERE NAME ILIKE 'xxx%'; +> ID NAME +> -- ---- +> rows: 0 + +SELECT * FROM TEST WHERE NAME LIKE 'Wo%'; +> ID NAME +> -- ----- +> 2 World +> 3 Word +> 4 Wo% +> rows: 3 + +SELECT * FROM TEST WHERE NAME LIKE 'Wo\%'; +> ID NAME +> -- ---- +> 4 Wo% +> rows: 1 + +SELECT * FROM TEST WHERE NAME LIKE 'WoX%' ESCAPE 'X'; +> ID NAME +> -- ---- +> 4 Wo% +> rows: 1 + +SELECT * FROM TEST WHERE NAME LIKE 'Word_'; +> ID NAME +> -- ---- +> rows: 0 + +SELECT * FROM TEST WHERE NAME LIKE '%Hello%'; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +SELECT * FROM TEST WHERE 'Hello' LIKE NAME; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +SELECT T1.*, T2.* FROM TEST AS T1, TEST AS T2 WHERE T1.ID = T2.ID AND T1.NAME LIKE T2.NAME || '%'; +> ID NAME ID NAME +> -- ----- -- ----- +> 1 Hello 1 Hello +> 2 World 2 World +> 3 Word 3 Word +> 4 Wo% 4 Wo% +> rows: 4 + +SELECT ID, MAX(NAME) FROM TEST GROUP BY ID HAVING MAX(NAME) = 'World'; +> ID MAX(NAME) +> -- --------- +> 2 World +> rows: 1 + +SELECT ID, MAX(NAME) FROM TEST GROUP BY ID HAVING MAX(NAME) LIKE 'World%'; +> ID MAX(NAME) +> -- --------- +> 2 World +> rows: 1 + +EXPLAIN SELECT ID FROM TEST WHERE NAME ILIKE 'w%'; +>> SELECT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE "NAME" ILIKE 'w%' + +DROP TABLE TEST; +> ok + +SELECT S, S LIKE '%', S ILIKE '%', S REGEXP '%' FROM (VALUES NULL, '', '1') T(S); +> S CASE WHEN S IS NOT NULL THEN TRUE ELSE UNKNOWN END CASE WHEN S IS NOT NULL THEN TRUE ELSE UNKNOWN END S REGEXP '%' +> ---- -------------------------------------------------- -------------------------------------------------- ------------ +> TRUE TRUE FALSE +> 1 TRUE TRUE FALSE +> null null null null +> rows: 3 + +SELECT S, S NOT LIKE '%', S NOT ILIKE '%', S NOT REGEXP '%' FROM (VALUES NULL, '', '1') T(S); +> S CASE WHEN S IS NOT NULL THEN FALSE ELSE UNKNOWN END CASE WHEN S IS NOT NULL THEN FALSE ELSE UNKNOWN END S NOT REGEXP '%' +> ---- --------------------------------------------------- --------------------------------------------------- ---------------- +> FALSE FALSE TRUE +> 1 FALSE FALSE TRUE +> null null null null +> rows: 3 + +CREATE TABLE TEST(ID BIGINT PRIMARY KEY, V VARCHAR UNIQUE) AS VALUES (1, 'aa'), (2, 'bb'); +> ok + +SELECT ID FROM (SELECT * FROM TEST) WHERE V NOT LIKE 'a%'; +>> 2 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/predicates/null.sql b/h2/src/test/org/h2/test/scripts/predicates/null.sql new file mode 100644 index 0000000000..68ed9603d0 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/null.sql @@ -0,0 +1,200 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT NULL IS NULL; +>> TRUE + +SELECT NULL IS NOT NULL; +>> FALSE + +SELECT NOT NULL IS NULL; +>> FALSE + +SELECT NOT NULL IS NOT NULL; +>> TRUE + +SELECT 1 IS NULL; +>> FALSE + +SELECT 1 IS NOT NULL; +>> TRUE + +SELECT NOT 1 IS NULL; +>> TRUE + +SELECT NOT 1 IS NOT NULL; +>> FALSE + +SELECT () IS NULL; +>> TRUE + +SELECT () IS NOT NULL; +>> TRUE + +SELECT NOT () IS NULL; +>> FALSE + +SELECT NOT () IS NOT NULL; +>> FALSE + +SELECT (NULL, NULL) IS NULL; +>> TRUE + +SELECT (NULL, NULL) IS NOT NULL; +>> FALSE + +SELECT NOT (NULL, NULL) IS NULL; +>> FALSE + +SELECT NOT (NULL, NULL) IS NOT NULL; +>> TRUE + +SELECT (NULL, 1) IS NULL; +>> FALSE + +SELECT (NULL, 1) IS NOT NULL; +>> FALSE + +SELECT NOT (NULL, 1) IS NULL; +>> TRUE + +SELECT NOT (NULL, 1) IS NOT NULL; +>> TRUE + +SELECT (1, 2) IS NULL; +>> FALSE + +SELECT (1, 2) IS NOT NULL; +>> TRUE + +SELECT NOT (1, 2) IS NULL; +>> TRUE + +SELECT NOT (1, 2) IS NOT NULL; +>> FALSE + +CREATE TABLE TEST(A INT, B INT) AS VALUES (NULL, NULL), (1, NULL), (NULL, 2), (1, 2); +> ok + +CREATE INDEX TEST_A_IDX ON TEST(A); +> ok + +CREATE INDEX TEST_B_IDX ON TEST(B); +> ok + +CREATE INDEX TEST_A_B_IDX ON TEST(A, B); +> ok + +SELECT * FROM TEST T1 JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NULL; +> A B A B +> - - - - +> rows: 0 + +EXPLAIN SELECT * FROM TEST T1 JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX: A IS NULL */ /* WHERE T2.A IS NULL */ INNER JOIN "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX: A = T2.A */ ON 1=1 WHERE ("T2"."A" IS NULL) AND ("T1"."A" = "T2"."A") + +SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NULL; +> A B A B +> ---- ---- ---- ---- +> null 2 null null +> null null null null +> rows: 2 + +EXPLAIN SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX: A = T1.A */ ON "T1"."A" = "T2"."A" WHERE "T2"."A" IS NULL + +SELECT * FROM TEST T1 JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NOT NULL; +> A B A B +> - ---- - ---- +> 1 2 1 2 +> 1 2 1 null +> 1 null 1 2 +> 1 null 1 null +> rows: 4 + +EXPLAIN SELECT * FROM TEST T1 JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NOT NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ INNER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX: A = T1.A */ ON 1=1 WHERE ("T2"."A" IS NOT NULL) AND ("T1"."A" = "T2"."A") + +SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NOT NULL; +> A B A B +> - ---- - ---- +> 1 2 1 2 +> 1 2 1 null +> 1 null 1 2 +> 1 null 1 null +> rows: 4 + +EXPLAIN SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON T1.A = T2.A WHERE T2.A IS NOT NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX: A = T1.A */ ON "T1"."A" = "T2"."A" WHERE "T2"."A" IS NOT NULL + +SELECT * FROM TEST T1 JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NULL; +> A B A B +> - - - - +> rows: 0 + +EXPLAIN SELECT * FROM TEST T1 JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX: A IS NULL AND B IS NULL */ /* WHERE ROW (T2.A, T2.B) IS NULL */ INNER JOIN "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ ON 1=1 WHERE (ROW ("T2"."A", "T2"."B") IS NULL) AND (ROW ("T1"."A", "T1"."B") = ROW ("T2"."A", "T2"."B")) + +SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NULL; +> A B A B +> ---- ---- ---- ---- +> 1 null null null +> null 2 null null +> null null null null +> rows: 3 + +EXPLAIN SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX */ ON ROW ("T1"."A", "T1"."B") = ROW ("T2"."A", "T2"."B") WHERE ROW ("T2"."A", "T2"."B") IS NULL + +SELECT * FROM TEST T1 JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NOT NULL; +> A B A B +> - - - - +> 1 2 1 2 +> rows: 1 + +EXPLAIN SELECT * FROM TEST T1 JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NOT NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ INNER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX */ ON 1=1 WHERE (ROW ("T2"."A", "T2"."B") IS NOT NULL) AND (ROW ("T1"."A", "T1"."B") = ROW ("T2"."A", "T2"."B")) + +SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NOT NULL; +> A B A B +> - - - - +> 1 2 1 2 +> rows: 1 + +EXPLAIN SELECT * FROM TEST T1 LEFT JOIN TEST T2 ON (T1.A, T1.B) = (T2.A, T2.B) WHERE (T2.A, T2.B) IS NOT NULL; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST_A_B_IDX */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST_A_B_IDX */ ON ROW ("T1"."A", "T1"."B") = ROW ("T2"."A", "T2"."B") WHERE ROW ("T2"."A", "T2"."B") IS NOT NULL + +EXPLAIN SELECT A, B FROM TEST WHERE (A, NULL) IS NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX: A IS NULL */ WHERE "A" IS NULL + +EXPLAIN SELECT A, B FROM TEST WHERE (A, NULL) IS NOT NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE + +EXPLAIN SELECT A, B FROM TEST WHERE NOT (A, NULL) IS NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX */ WHERE "A" IS NOT NULL + +EXPLAIN SELECT A, B FROM TEST WHERE NOT (A, NULL) IS NOT NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX */ + +EXPLAIN SELECT A, B FROM TEST WHERE (A, NULL, B) IS NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX: A IS NULL AND B IS NULL */ WHERE ROW ("A", "B") IS NULL + +EXPLAIN SELECT A, B FROM TEST WHERE (A, NULL, B, NULL) IS NULL; +>> SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX: A IS NULL AND B IS NULL */ WHERE ROW ("A", "B") IS NULL + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I INTEGER) AS VALUES 1; +> ok + + +SELECT I FROM TEST WHERE _ROWID_ IS NULL; +> I +> - +> rows: 0 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/predicates/type.sql b/h2/src/test/org/h2/test/scripts/predicates/type.sql new file mode 100644 index 0000000000..d555c803f1 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/type.sql @@ -0,0 +1,49 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT 1 IS OF (INT); +>> TRUE + +SELECT 1 IS NOT OF (INT); +>> FALSE + +SELECT NULL IS OF (INT); +>> null + +SELECT NULL IS NOT OF (INT); +>> null + +SELECT 1 IS OF (INT, BIGINT); +>> TRUE + +SELECT 1 IS NOT OF (INT, BIGINT); +>> FALSE + +SELECT TRUE IS OF (VARCHAR, TIME); +>> FALSE + +SELECT TRUE IS NOT OF (VARCHAR, TIME); +>> TRUE + +CREATE TABLE TEST(A INT NOT NULL, B INT); +> ok + +EXPLAIN SELECT + 'Test' IS OF (VARCHAR), 'Test' IS NOT OF (VARCHAR), + 10 IS OF (VARCHAR), 10 IS NOT OF (VARCHAR), + NULL IS OF (VARCHAR), NULL IS NOT OF (VARCHAR); +>> SELECT TRUE, FALSE, FALSE, TRUE, UNKNOWN, UNKNOWN + +EXPLAIN SELECT A IS OF (INT), A IS OF (BIGINT), A IS NOT OF (INT), NOT A IS OF (BIGINT) FROM TEST; +>> SELECT "A" IS OF (INTEGER), "A" IS OF (BIGINT), "A" IS NOT OF (INTEGER), "A" IS NOT OF (BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT B IS OF (INT), B IS OF (BIGINT), B IS NOT OF (INT), NOT B IS OF (BIGINT) FROM TEST; +>> SELECT "B" IS OF (INTEGER), "B" IS OF (BIGINT), "B" IS NOT OF (INTEGER), "B" IS NOT OF (BIGINT) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT A IS NOT OF(INT) OR B IS OF (INT) FROM TEST; +>> SELECT ("A" IS NOT OF (INTEGER)) OR ("B" IS OF (INTEGER)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/predicates/unique.sql b/h2/src/test/org/h2/test/scripts/predicates/unique.sql new file mode 100644 index 0000000000..ffc26ea555 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/predicates/unique.sql @@ -0,0 +1,54 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT PRIMARY KEY, GR INT, A INT, B INT, C INT) AS VALUES + (1, 1, NULL, NULL, NULL), + (2, 1, NULL, NULL, NULL), + (3, 1, NULL, 1, 1), + (4, 1, NULL, 1, 1), + (5, 1, 1, 1, 1), + (6, 1, 1, 1, 2), + (7, 2, 1, 2, 1); +> ok + +SELECT UNIQUE(SELECT A, B FROM TEST); +>> FALSE + +SELECT UNIQUE(TABLE TEST); +>> TRUE + +SELECT UNIQUE(SELECT A, B, C FROM TEST); +>> TRUE + +EXPLAIN SELECT UNIQUE(SELECT A, B FROM TEST); +>> SELECT UNIQUE( SELECT "A", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) + +SELECT UNIQUE(SELECT A, B FROM TEST); +>> FALSE + +EXPLAIN SELECT UNIQUE(SELECT DISTINCT A, B FROM TEST); +>> SELECT TRUE + +SELECT UNIQUE(SELECT DISTINCT A, B FROM TEST); +>> TRUE + +SELECT G, UNIQUE(SELECT A, B, C FROM TEST WHERE GR = G) FROM (VALUES 1, 2, 3) V(G); +> G UNIQUE( SELECT A, B, C FROM PUBLIC.TEST WHERE GR = G) +> - ----------------------------------------------------- +> 1 TRUE +> 2 TRUE +> 3 TRUE +> rows: 3 + +SELECT G, UNIQUE(SELECT A, B FROM TEST WHERE GR = G ORDER BY A + B) FROM (VALUES 1, 2, 3) V(G); +> G UNIQUE( SELECT A, B FROM PUBLIC.TEST WHERE GR = G ORDER BY A + B) +> - ----------------------------------------------------------------- +> 1 FALSE +> 2 TRUE +> 3 TRUE +> rows: 3 + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/queries/derived-column-names.sql b/h2/src/test/org/h2/test/scripts/queries/derived-column-names.sql new file mode 100644 index 0000000000..1b36b3f9bb --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/derived-column-names.sql @@ -0,0 +1,88 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +SELECT * FROM (VALUES(1, 2)); +> C1 C2 +> -- -- +> 1 2 +> rows: 1 + +SELECT * FROM (VALUES(1, 2)) AS T; +> C1 C2 +> -- -- +> 1 2 +> rows: 1 + +SELECT * FROM (VALUES(1, 2)) AS T(A, B); +> A B +> - - +> 1 2 +> rows: 1 + +SELECT A AS A1, B AS B1 FROM (VALUES(1, 2)) AS T(A, B); +> A1 B1 +> -- -- +> 1 2 +> rows: 1 + +SELECT A AS A1, B AS B1 FROM (VALUES(1, 2)) AS T(A, B) WHERE A <> B; +> A1 B1 +> -- -- +> 1 2 +> rows: 1 + +SELECT A AS A1, B AS B1 FROM (VALUES(1, 2)) AS T(A, B) WHERE A1 <> B1; +> exception COLUMN_NOT_FOUND_1 + +SELECT * FROM (VALUES(1, 2)) AS T(A); +> exception COLUMN_COUNT_DOES_NOT_MATCH + +SELECT * FROM (VALUES(1, 2)) AS T(A, a); +> exception DUPLICATE_COLUMN_NAME_1 + +SELECT * FROM (VALUES(1, 2)) AS T(A, B, C); +> exception COLUMN_COUNT_DOES_NOT_MATCH + +SELECT V AS V1, A AS A1, B AS B1 FROM (VALUES (1)) T1(V) INNER JOIN (VALUES(1, 2)) T2(A, B) ON V = A; +> V1 A1 B1 +> -- -- -- +> 1 1 2 +> rows: 1 + +CREATE TABLE TEST(I INT, J INT); +> ok + +CREATE INDEX TEST_I_IDX ON TEST(I); +> ok + +INSERT INTO TEST VALUES (1, 2); +> update count: 1 + +SELECT * FROM (TEST) AS T(A, B); +> A B +> - - +> 1 2 +> rows: 1 + +SELECT * FROM TEST AS T(A, B); +> A B +> - - +> 1 2 +> rows: 1 + +SELECT * FROM TEST AS T(A, B) USE INDEX (TEST_I_IDX); +> A B +> - - +> 1 2 +> rows: 1 + +DROP TABLE TEST; +> ok + +SELECT * FROM (SELECT 1 A, 2 A) T(B, C); +> B C +> - - +> 1 2 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/queries/distinct.sql b/h2/src/test/org/h2/test/scripts/queries/distinct.sql new file mode 100644 index 0000000000..7da7c9ad95 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/distinct.sql @@ -0,0 +1,190 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID BIGINT, NAME VARCHAR); +> ok + +INSERT INTO TEST VALUES (1, 'a'), (2, 'B'), (3, 'c'), (1, 'a'); +> update count: 4 + +CREATE TABLE TEST2(ID2 BIGINT); +> ok + +INSERT INTO TEST2 VALUES (1), (2); +> update count: 2 + +SELECT DISTINCT NAME FROM TEST ORDER BY NAME; +> NAME +> ---- +> B +> a +> c +> rows (ordered): 3 + +SELECT DISTINCT NAME FROM TEST ORDER BY LOWER(NAME); +> NAME +> ---- +> a +> B +> c +> rows (ordered): 3 + +SELECT DISTINCT ID FROM TEST ORDER BY ID; +> ID +> -- +> 1 +> 2 +> 3 +> rows (ordered): 3 + +SELECT DISTINCT ID FROM TEST ORDER BY -ID - 1; +> ID +> -- +> 3 +> 2 +> 1 +> rows (ordered): 3 + +SELECT DISTINCT ID FROM TEST ORDER BY (-ID + 10) > 0 AND NOT (ID = 0), ID; +> ID +> -- +> 1 +> 2 +> 3 +> rows (ordered): 3 + +SELECT DISTINCT NAME, ID + 1 FROM TEST ORDER BY UPPER(NAME) || (ID + 1); +> NAME ID + 1 +> ---- ------ +> a 2 +> B 3 +> c 4 +> rows (ordered): 3 + +SELECT DISTINCT ID FROM TEST ORDER BY NAME; +> exception ORDER_BY_NOT_IN_RESULT + +SELECT DISTINCT ID FROM TEST ORDER BY UPPER(NAME); +> exception ORDER_BY_NOT_IN_RESULT + +SELECT DISTINCT ID FROM TEST ORDER BY CURRENT_TIMESTAMP; +> exception ORDER_BY_NOT_IN_RESULT + +SET MODE MySQL; +> ok + +SELECT DISTINCT ID FROM TEST ORDER BY NAME; +> ID +> -- +> 2 +> 1 +> 3 +> rows (ordered): 3 + +SELECT DISTINCT ID FROM TEST ORDER BY LOWER(NAME); +> ID +> -- +> 1 +> 2 +> 3 +> rows (ordered): 3 + +SELECT DISTINCT ID FROM TEST JOIN TEST2 ON ID = ID2 ORDER BY LOWER(NAME); +> ID +> -- +> 1 +> 2 +> rows (ordered): 2 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +DROP TABLE TEST2; +> ok + +CREATE TABLE TEST(C1 INT, C2 INT, C3 INT, C4 INT, C5 INT); +> ok + +INSERT INTO TEST VALUES(1, 2, 3, 4, 5), (1, 2, 3, 6, 7), (2, 1, 4, 8, 9), (3, 4, 5, 1, 1); +> update count: 4 + +SELECT DISTINCT ON(C1, C2) C1, C2, C3, C4, C5 FROM TEST; +> C1 C2 C3 C4 C5 +> -- -- -- -- -- +> 1 2 3 4 5 +> 2 1 4 8 9 +> 3 4 5 1 1 +> rows: 3 + +SELECT DISTINCT ON(C1 + C2) C1, C2, C3, C4, C5 FROM TEST; +> C1 C2 C3 C4 C5 +> -- -- -- -- -- +> 1 2 3 4 5 +> 3 4 5 1 1 +> rows: 2 + +SELECT DISTINCT ON(C1 + C2, C3) C1, C2, C3, C4, C5 FROM TEST; +> C1 C2 C3 C4 C5 +> -- -- -- -- -- +> 1 2 3 4 5 +> 2 1 4 8 9 +> 3 4 5 1 1 +> rows: 3 + +SELECT DISTINCT ON(C1) C2 FROM TEST ORDER BY C1; +> C2 +> -- +> 2 +> 1 +> 4 +> rows (ordered): 3 + +SELECT DISTINCT ON(C1) C1, C4, C5 FROM TEST ORDER BY C1, C5; +> C1 C4 C5 +> -- -- -- +> 1 4 5 +> 2 8 9 +> 3 1 1 +> rows (ordered): 3 + +SELECT DISTINCT ON(C1) C1, C4, C5 FROM TEST ORDER BY C1, C5 DESC; +> C1 C4 C5 +> -- -- -- +> 1 6 7 +> 2 8 9 +> 3 1 1 +> rows (ordered): 3 + +SELECT T1.C1, T2.C5 FROM TEST T1 JOIN ( + SELECT DISTINCT ON(C1) C1, C4, C5 FROM TEST ORDER BY C1, C5 +) T2 ON T1.C4 = T2.C4 ORDER BY T1.C1; +> C1 C5 +> -- -- +> 1 5 +> 2 9 +> 3 1 +> rows (ordered): 3 + +SELECT T1.C1, T2.C5 FROM TEST T1 JOIN ( + SELECT DISTINCT ON(C1) C1, C4, C5 FROM TEST ORDER BY C1, C5 DESC +) T2 ON T1.C4 = T2.C4 ORDER BY T1.C1; +> C1 C5 +> -- -- +> 1 7 +> 2 9 +> 3 1 +> rows (ordered): 3 + +EXPLAIN SELECT DISTINCT ON(C1) C2 FROM TEST ORDER BY C1; +>> SELECT DISTINCT ON("C1") "C2" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ ORDER BY "C1" + +SELECT DISTINCT ON(C1) C2 FROM TEST ORDER BY C3; +> exception ORDER_BY_NOT_IN_RESULT + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/queries/joins.sql b/h2/src/test/org/h2/test/scripts/queries/joins.sql new file mode 100644 index 0000000000..57ccf2acd6 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/joins.sql @@ -0,0 +1,1046 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create table a(a int) as select 1; +> ok + +create table b(b int) as select 1; +> ok + +create table c(c int) as select x from system_range(1, 2); +> ok + +select * from a inner join b on a=b right outer join c on c=a; +> A B C +> ---- ---- - +> 1 1 1 +> null null 2 +> rows: 2 + +select * from c left outer join (a inner join b on b=a) on c=a; +> C A B +> - ---- ---- +> 1 1 1 +> 2 null null +> rows: 2 + +select * from c left outer join a on c=a inner join b on b=a; +> C A B +> - - - +> 1 1 1 +> rows: 1 + +drop table a, b, c; +> ok + +create table test(a int, b int) as select x, x from system_range(1, 100); +> ok + +-- the table t1 should be processed first +explain select * from test t2, test t1 where t1.a=1 and t1.b = t2.b; +>> SELECT "T2"."A", "T2"."B", "T1"."A", "T1"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ /* WHERE T1.A = 1 */ INNER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST.tableScan */ ON 1=1 WHERE ("T1"."A" = 1) AND ("T1"."B" = "T2"."B") + +explain select * from test t1, test t2 where t1.a=1 and t1.b = t2.b; +>> SELECT "T1"."A", "T1"."B", "T2"."A", "T2"."B" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ /* WHERE T1.A = 1 */ INNER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST.tableScan */ ON 1=1 WHERE ("T1"."A" = 1) AND ("T1"."B" = "T2"."B") + +drop table test; +> ok + +create table test(id identity) as select x from system_range(1, 4); +> ok + +select a.id from test a inner join test b on a.id > b.id and b.id < 3 group by a.id; +> ID +> -- +> 2 +> 3 +> 4 +> rows: 3 + +drop table test; +> ok + +select * from system_range(1, 3) t1 inner join system_range(2, 3) t2 inner join system_range(1, 2) t3 on t3.x=t2.x on t1.x=t2.x; +> X X X +> - - - +> 2 2 2 +> rows: 1 + +CREATE TABLE PARENT(ID INT PRIMARY KEY); +> ok + +CREATE TABLE CHILD(ID INT PRIMARY KEY); +> ok + +INSERT INTO PARENT VALUES(1); +> update count: 1 + +SELECT * FROM PARENT P LEFT OUTER JOIN CHILD C ON C.PARENTID=P.ID; +> exception COLUMN_NOT_FOUND_1 + +DROP TABLE PARENT, CHILD; +> ok + +create table t1 (i int); +> ok + +create table t2 (i int); +> ok + +create table t3 (i int); +> ok + +select a.i from t1 a inner join (select a.i from t2 a inner join (select i from t3) b on a.i=b.i) b on a.i=b.i; +> I +> - +> rows: 0 + +insert into t1 values (1); +> update count: 1 + +insert into t2 values (1); +> update count: 1 + +insert into t3 values (1); +> update count: 1 + +select a.i from t1 a inner join (select a.i from t2 a inner join (select i from t3) b on a.i=b.i) b on a.i=b.i; +> I +> - +> 1 +> rows: 1 + +drop table t1, t2, t3; +> ok + +CREATE TABLE TESTA(ID IDENTITY); +> ok + +CREATE TABLE TESTB(ID IDENTITY); +> ok + +explain SELECT TESTA.ID A, TESTB.ID B FROM TESTA, TESTB ORDER BY TESTA.ID, TESTB.ID; +>> SELECT "TESTA"."ID" AS "A", "TESTB"."ID" AS "B" FROM "PUBLIC"."TESTA" /* PUBLIC.TESTA.tableScan */ INNER JOIN "PUBLIC"."TESTB" /* PUBLIC.TESTB.tableScan */ ON 1=1 ORDER BY 1, 2 + +DROP TABLE IF EXISTS TESTA, TESTB; +> ok + +create table one (id int primary key); +> ok + +create table two (id int primary key, val date); +> ok + +insert into one values(0); +> update count: 1 + +insert into one values(1); +> update count: 1 + +insert into one values(2); +> update count: 1 + +insert into two values(0, null); +> update count: 1 + +insert into two values(1, DATE'2006-01-01'); +> update count: 1 + +insert into two values(2, DATE'2006-07-01'); +> update count: 1 + +insert into two values(3, null); +> update count: 1 + +select * from one; +> ID +> -- +> 0 +> 1 +> 2 +> rows: 3 + +select * from two; +> ID VAL +> -- ---------- +> 0 null +> 1 2006-01-01 +> 2 2006-07-01 +> 3 null +> rows: 4 + +-- Query #1: should return one row +-- okay +select * from one natural join two left join two three on +one.id=three.id left join one four on two.id=four.id where three.val +is null; +> ID VAL ID VAL ID +> -- ---- -- ---- -- +> 0 null 0 null 0 +> rows: 1 + +-- Query #2: should return one row +-- okay +select * from one natural join two left join two three on +one.id=three.id left join one four on two.id=four.id where +three.val>=DATE'2006-07-01'; +> ID VAL ID VAL ID +> -- ---------- -- ---------- -- +> 2 2006-07-01 2 2006-07-01 2 +> rows: 1 + +-- Query #3: should return the union of #1 and #2 +select * from one natural join two left join two three on +one.id=three.id left join one four on two.id=four.id where three.val +is null or three.val>=DATE'2006-07-01'; +> ID VAL ID VAL ID +> -- ---------- -- ---------- -- +> 0 null 0 null 0 +> 2 2006-07-01 2 2006-07-01 2 +> rows: 2 + +explain select * from one natural join two left join two three on +one.id=three.id left join one four on two.id=four.id where three.val +is null or three.val>=DATE'2006-07-01'; +>> SELECT "PUBLIC"."ONE"."ID", "PUBLIC"."TWO"."VAL", "THREE"."ID", "THREE"."VAL", "FOUR"."ID" FROM "PUBLIC"."ONE" /* PUBLIC.ONE.tableScan */ INNER JOIN "PUBLIC"."TWO" /* PUBLIC.PRIMARY_KEY_14: ID = PUBLIC.ONE.ID */ ON 1=1 /* WHERE PUBLIC.ONE.ID = PUBLIC.TWO.ID */ LEFT OUTER JOIN "PUBLIC"."TWO" "THREE" /* PUBLIC.PRIMARY_KEY_14: ID = ONE.ID */ ON "ONE"."ID" = "THREE"."ID" LEFT OUTER JOIN "PUBLIC"."ONE" "FOUR" /* PUBLIC.PRIMARY_KEY_1: ID = TWO.ID */ ON "TWO"."ID" = "FOUR"."ID" WHERE ("PUBLIC"."ONE"."ID" = "PUBLIC"."TWO"."ID") AND (("THREE"."VAL" IS NULL) OR ("THREE"."VAL" >= DATE '2006-07-01')) + +-- Query #4: same as #3, but the joins have been manually re-ordered +-- Correct result set, same as expected for #3. +select * from one natural join two left join one four on +two.id=four.id left join two three on one.id=three.id where three.val +is null or three.val>=DATE'2006-07-01'; +> ID VAL ID ID VAL +> -- ---------- -- -- ---------- +> 0 null 0 0 null +> 2 2006-07-01 2 2 2006-07-01 +> rows: 2 + +drop table one; +> ok + +drop table two; +> ok + +create table test1 (id int primary key); +> ok + +create table test2 (id int primary key); +> ok + +create table test3 (id int primary key); +> ok + +insert into test1 values(1); +> update count: 1 + +insert into test2 values(1); +> update count: 1 + +insert into test3 values(1); +> update count: 1 + +select * from test1 +inner join test2 on test1.id=test2.id left +outer join test3 on test2.id=test3.id +where test3.id is null; +> ID ID ID +> -- -- -- +> rows: 0 + +explain select * from test1 +inner join test2 on test1.id=test2.id left +outer join test3 on test2.id=test3.id +where test3.id is null; +>> SELECT "PUBLIC"."TEST1"."ID", "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST3"."ID" FROM "PUBLIC"."TEST1" /* PUBLIC.TEST1.tableScan */ INNER JOIN "PUBLIC"."TEST2" /* PUBLIC.PRIMARY_KEY_4C: ID = TEST1.ID */ ON 1=1 /* WHERE TEST1.ID = TEST2.ID */ LEFT OUTER JOIN "PUBLIC"."TEST3" /* PUBLIC.PRIMARY_KEY_4C0: ID = TEST2.ID */ ON "TEST2"."ID" = "TEST3"."ID" WHERE ("TEST3"."ID" IS NULL) AND ("TEST1"."ID" = "TEST2"."ID") + +insert into test1 select x from system_range(2, 1000); +> update count: 999 + +select * from test1 +inner join test2 on test1.id=test2.id +left outer join test3 on test2.id=test3.id +where test3.id is null; +> ID ID ID +> -- -- -- +> rows: 0 + +explain select * from test1 +inner join test2 on test1.id=test2.id +left outer join test3 on test2.id=test3.id +where test3.id is null; +>> SELECT "PUBLIC"."TEST1"."ID", "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST3"."ID" FROM "PUBLIC"."TEST2" /* PUBLIC.TEST2.tableScan */ LEFT OUTER JOIN "PUBLIC"."TEST3" /* PUBLIC.PRIMARY_KEY_4C0: ID = TEST2.ID */ ON "TEST2"."ID" = "TEST3"."ID" INNER JOIN "PUBLIC"."TEST1" /* PUBLIC.PRIMARY_KEY_4: ID = TEST2.ID */ ON 1=1 WHERE ("TEST3"."ID" IS NULL) AND ("TEST1"."ID" = "TEST2"."ID") + +SELECT TEST1.ID, TEST2.ID, TEST3.ID +FROM TEST2 +LEFT OUTER JOIN TEST3 ON TEST2.ID = TEST3.ID +INNER JOIN TEST1 +WHERE TEST3.ID IS NULL AND TEST1.ID = TEST2.ID; +> ID ID ID +> -- -- -- +> rows: 0 + +drop table test1; +> ok + +drop table test2; +> ok + +drop table test3; +> ok + +create table left_hand (id int primary key); +> ok + +create table right_hand (id int primary key); +> ok + +insert into left_hand values(0); +> update count: 1 + +insert into left_hand values(1); +> update count: 1 + +insert into right_hand values(0); +> update count: 1 + +-- h2, postgresql, mysql, derby, hsqldb: 2 +select * from left_hand left outer join right_hand on left_hand.id=right_hand.id; +> ID ID +> -- ---- +> 0 0 +> 1 null +> rows: 2 + +-- h2, postgresql, mysql, derby, hsqldb: 2 +select * from left_hand left join right_hand on left_hand.id=right_hand.id; +> ID ID +> -- ---- +> 0 0 +> 1 null +> rows: 2 + +-- h2: 1 (2 cols); postgresql, mysql: 1 (1 col); derby, hsqldb: no natural join +select * from left_hand natural join right_hand; +> ID +> -- +> 0 +> rows: 1 + +-- h2, postgresql, mysql, derby, hsqldb: 1 +select * from left_hand left outer join right_hand on left_hand.id=right_hand.id where left_hand.id=1; +> ID ID +> -- ---- +> 1 null +> rows: 1 + +-- h2, postgresql, mysql, derby, hsqldb: 1 +select * from left_hand left join right_hand on left_hand.id=right_hand.id where left_hand.id=1; +> ID ID +> -- ---- +> 1 null +> rows: 1 + +-- h2: 0 (2 cols); postgresql, mysql: 0 (1 col); derby, hsqldb: no natural join +select * from left_hand natural join right_hand where left_hand.id=1; +> ID +> -- +> rows: 0 + +-- !!! h2: 1; postgresql, mysql, hsqldb: 0; derby: exception +select * from left_hand left outer join right_hand on left_hand.id=right_hand.id where left_hand.id=1 having right_hand.id=2; +> ID ID +> -- -- +> rows: 0 + +-- !!! h2: 1; postgresql, mysql, hsqldb: 0; derby: exception +select * from left_hand left join right_hand on left_hand.id=right_hand.id where left_hand.id=1 having right_hand.id=2; +> ID ID +> -- -- +> rows: 0 + +-- h2: 0 (2 cols); postgresql: 0 (1 col), mysql: exception; derby, hsqldb: no natural join +select * from left_hand natural join right_hand where left_hand.id=1 having right_hand.id=2; +> exception MUST_GROUP_BY_COLUMN_1 + +-- h2, mysql, hsqldb: 0 rows; postgresql, derby: exception +select * from left_hand left outer join right_hand on left_hand.id=right_hand.id where left_hand.id=1 group by left_hand.id having right_hand.id=2; +> ID ID +> -- -- +> rows: 0 + +-- h2, mysql, hsqldb: 0 rows; postgresql, derby: exception +select * from left_hand left join right_hand on left_hand.id=right_hand.id where left_hand.id=1 group by left_hand.id having right_hand.id=2; +> ID ID +> -- -- +> rows: 0 + +-- h2: 0 rows; postgresql, mysql: exception; derby, hsqldb: no natural join +select * from left_hand natural join right_hand where left_hand.id=1 group by left_hand.id having right_hand.id=2; +> ID +> -- +> rows: 0 + +drop table right_hand; +> ok + +drop table left_hand; +> ok + +--- complex join --------------------------------------------------------------------------------------------- +CREATE TABLE T1(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +CREATE TABLE T2(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +CREATE TABLE T3(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO T1 VALUES(1, 'Hello'); +> update count: 1 + +INSERT INTO T1 VALUES(2, 'World'); +> update count: 1 + +INSERT INTO T1 VALUES(3, 'Peace'); +> update count: 1 + +INSERT INTO T2 VALUES(1, 'Hello'); +> update count: 1 + +INSERT INTO T2 VALUES(2, 'World'); +> update count: 1 + +INSERT INTO T3 VALUES(1, 'Hello'); +> update count: 1 + +SELECT * FROM t1 left outer join t2 on t1.id=t2.id; +> ID NAME ID NAME +> -- ----- ---- ----- +> 1 Hello 1 Hello +> 2 World 2 World +> 3 Peace null null +> rows: 3 + +SELECT * FROM t1 left outer join t2 on t1.id=t2.id left outer join t3 on t1.id=t3.id; +> ID NAME ID NAME ID NAME +> -- ----- ---- ----- ---- ----- +> 1 Hello 1 Hello 1 Hello +> 2 World 2 World null null +> 3 Peace null null null null +> rows: 3 + +SELECT * FROM t1 left outer join t2 on t1.id=t2.id inner join t3 on t1.id=t3.id; +> ID NAME ID NAME ID NAME +> -- ----- -- ----- -- ----- +> 1 Hello 1 Hello 1 Hello +> rows: 1 + +drop table t1; +> ok + +drop table t2; +> ok + +drop table t3; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, parent int, sid int); +> ok + +create index idx_p on test(sid); +> ok + +insert into test select x, x, x from system_range(0,20); +> update count: 21 + +select * from test l0 inner join test l1 on l0.sid=l1.sid, test l3 where l0.sid=l3.parent; +> ID PARENT SID ID PARENT SID ID PARENT SID +> -- ------ --- -- ------ --- -- ------ --- +> 0 0 0 0 0 0 0 0 0 +> 1 1 1 1 1 1 1 1 1 +> 10 10 10 10 10 10 10 10 10 +> 11 11 11 11 11 11 11 11 11 +> 12 12 12 12 12 12 12 12 12 +> 13 13 13 13 13 13 13 13 13 +> 14 14 14 14 14 14 14 14 14 +> 15 15 15 15 15 15 15 15 15 +> 16 16 16 16 16 16 16 16 16 +> 17 17 17 17 17 17 17 17 17 +> 18 18 18 18 18 18 18 18 18 +> 19 19 19 19 19 19 19 19 19 +> 2 2 2 2 2 2 2 2 2 +> 20 20 20 20 20 20 20 20 20 +> 3 3 3 3 3 3 3 3 3 +> 4 4 4 4 4 4 4 4 4 +> 5 5 5 5 5 5 5 5 5 +> 6 6 6 6 6 6 6 6 6 +> 7 7 7 7 7 7 7 7 7 +> 8 8 8 8 8 8 8 8 8 +> 9 9 9 9 9 9 9 9 9 +> rows: 21 + +select * from +test l0 +inner join test l1 on l0.sid=l1.sid +inner join test l2 on l0.sid=l2.id, +test l5 +inner join test l3 on l5.sid=l3.sid +inner join test l4 on l5.sid=l4.id +where l2.id is not null +and l0.sid=l5.parent; +> ID PARENT SID ID PARENT SID ID PARENT SID ID PARENT SID ID PARENT SID ID PARENT SID +> -- ------ --- -- ------ --- -- ------ --- -- ------ --- -- ------ --- -- ------ --- +> 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +> 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 +> 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 +> 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 +> 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 +> 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 +> 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 +> 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 +> 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 +> 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 +> 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 +> 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 +> 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 +> 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 +> 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 +> 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 +> 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 +> 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 +> 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 +> 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 +> 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 +> rows: 21 + +DROP TABLE IF EXISTS TEST; +> ok + +--- joins ---------------------------------------------------------------------------------------------------- +create table t1(id int, name varchar); +> ok + +insert into t1 values(1, 'hi'), (2, 'world'); +> update count: 2 + +create table t2(id int, name varchar); +> ok + +insert into t2 values(1, 'Hallo'), (3, 'Welt'); +> update count: 2 + +select * from t1 join t2 on t1.id=t2.id; +> ID NAME ID NAME +> -- ---- -- ----- +> 1 hi 1 Hallo +> rows: 1 + +select * from t1 left join t2 on t1.id=t2.id; +> ID NAME ID NAME +> -- ----- ---- ----- +> 1 hi 1 Hallo +> 2 world null null +> rows: 2 + +select * from t1 right join t2 on t1.id=t2.id; +> ID NAME ID NAME +> ---- ---- -- ----- +> 1 hi 1 Hallo +> null null 3 Welt +> rows: 2 + +select * from t1 cross join t2; +> ID NAME ID NAME +> -- ----- -- ----- +> 1 hi 1 Hallo +> 1 hi 3 Welt +> 2 world 1 Hallo +> 2 world 3 Welt +> rows: 4 + +select * from t1 natural join t2; +> ID NAME +> -- ---- +> rows: 0 + +explain select * from t1 natural join t2; +>> SELECT "PUBLIC"."T1"."ID", "PUBLIC"."T1"."NAME" FROM "PUBLIC"."T1" /* PUBLIC.T1.tableScan */ INNER JOIN "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ ON 1=1 WHERE ("PUBLIC"."T1"."ID" = "PUBLIC"."T2"."ID") AND ("PUBLIC"."T1"."NAME" = "PUBLIC"."T2"."NAME") + +drop table t1; +> ok + +drop table t2; +> ok + +create table customer(customerid int, customer_name varchar); +> ok + +insert into customer values(0, 'Acme'); +> update count: 1 + +create table invoice(customerid int, invoiceid int, invoice_text varchar); +> ok + +insert into invoice values(0, 1, 'Soap'), (0, 2, 'More Soap'); +> update count: 2 + +create table INVOICE_LINE(line_id int, invoiceid int, customerid int, line_text varchar); +> ok + +insert into INVOICE_LINE values(10, 1, 0, 'Super Soap'), (20, 1, 0, 'Regular Soap'); +> update count: 2 + +select * from customer c natural join invoice i natural join INVOICE_LINE l; +> CUSTOMERID CUSTOMER_NAME INVOICEID INVOICE_TEXT LINE_ID LINE_TEXT +> ---------- ------------- --------- ------------ ------- ------------ +> 0 Acme 1 Soap 10 Super Soap +> 0 Acme 1 Soap 20 Regular Soap +> rows: 2 + +explain select * from customer c natural join invoice i natural join INVOICE_LINE l; +>> SELECT "C"."CUSTOMERID", "C"."CUSTOMER_NAME", "I"."INVOICEID", "I"."INVOICE_TEXT", "L"."LINE_ID", "L"."LINE_TEXT" FROM "PUBLIC"."INVOICE" "I" /* PUBLIC.INVOICE.tableScan */ INNER JOIN "PUBLIC"."INVOICE_LINE" "L" /* PUBLIC.INVOICE_LINE.tableScan */ ON 1=1 /* WHERE (I.CUSTOMERID = L.CUSTOMERID) AND (I.INVOICEID = L.INVOICEID) */ INNER JOIN "PUBLIC"."CUSTOMER" "C" /* PUBLIC.CUSTOMER.tableScan */ ON 1=1 WHERE ("C"."CUSTOMERID" = "I"."CUSTOMERID") AND ("I"."CUSTOMERID" = "L"."CUSTOMERID") AND ("I"."INVOICEID" = "L"."INVOICEID") + +select c.*, i.*, l.* from customer c natural join invoice i natural join INVOICE_LINE l; +> CUSTOMERID CUSTOMER_NAME CUSTOMERID INVOICEID INVOICE_TEXT LINE_ID INVOICEID CUSTOMERID LINE_TEXT +> ---------- ------------- ---------- --------- ------------ ------- --------- ---------- ------------ +> 0 Acme 0 1 Soap 10 1 0 Super Soap +> 0 Acme 0 1 Soap 20 1 0 Regular Soap +> rows: 2 + +explain select c.*, i.*, l.* from customer c natural join invoice i natural join INVOICE_LINE l; +>> SELECT "C"."CUSTOMERID", "C"."CUSTOMER_NAME", "I"."CUSTOMERID", "I"."INVOICEID", "I"."INVOICE_TEXT", "L"."LINE_ID", "L"."INVOICEID", "L"."CUSTOMERID", "L"."LINE_TEXT" FROM "PUBLIC"."INVOICE" "I" /* PUBLIC.INVOICE.tableScan */ INNER JOIN "PUBLIC"."INVOICE_LINE" "L" /* PUBLIC.INVOICE_LINE.tableScan */ ON 1=1 /* WHERE (I.CUSTOMERID = L.CUSTOMERID) AND (I.INVOICEID = L.INVOICEID) */ INNER JOIN "PUBLIC"."CUSTOMER" "C" /* PUBLIC.CUSTOMER.tableScan */ ON 1=1 WHERE ("C"."CUSTOMERID" = "I"."CUSTOMERID") AND ("I"."CUSTOMERID" = "L"."CUSTOMERID") AND ("I"."INVOICEID" = "L"."INVOICEID") + +drop table customer; +> ok + +drop table invoice; +> ok + +drop table INVOICE_LINE; +> ok + +--- outer joins ---------------------------------------------------------------------------------------------- +CREATE TABLE PARENT(ID INT, NAME VARCHAR(20)); +> ok + +CREATE TABLE CHILD(ID INT, PARENTID INT, NAME VARCHAR(20)); +> ok + +INSERT INTO PARENT VALUES(1, 'Sue'); +> update count: 1 + +INSERT INTO PARENT VALUES(2, 'Joe'); +> update count: 1 + +INSERT INTO CHILD VALUES(100, 1, 'Simon'); +> update count: 1 + +INSERT INTO CHILD VALUES(101, 1, 'Sabine'); +> update count: 1 + +SELECT * FROM PARENT P INNER JOIN CHILD C ON P.ID = C.PARENTID; +> ID NAME ID PARENTID NAME +> -- ---- --- -------- ------ +> 1 Sue 100 1 Simon +> 1 Sue 101 1 Sabine +> rows: 2 + +SELECT * FROM PARENT P LEFT OUTER JOIN CHILD C ON P.ID = C.PARENTID; +> ID NAME ID PARENTID NAME +> -- ---- ---- -------- ------ +> 1 Sue 100 1 Simon +> 1 Sue 101 1 Sabine +> 2 Joe null null null +> rows: 3 + +SELECT * FROM CHILD C RIGHT OUTER JOIN PARENT P ON P.ID = C.PARENTID; +> ID PARENTID NAME ID NAME +> ---- -------- ------ -- ---- +> 100 1 Simon 1 Sue +> 101 1 Sabine 1 Sue +> null null null 2 Joe +> rows: 3 + +DROP TABLE PARENT; +> ok + +DROP TABLE CHILD; +> ok + +CREATE TABLE A(A1 INT, A2 INT); +> ok + +INSERT INTO A VALUES (1, 2); +> update count: 1 + +CREATE TABLE B(B1 INT, B2 INT); +> ok + +INSERT INTO B VALUES (1, 2); +> update count: 1 + +CREATE TABLE C(B1 INT, C1 INT); +> ok + +INSERT INTO C VALUES (1, 2); +> update count: 1 + +SELECT * FROM A LEFT JOIN B ON TRUE; +> A1 A2 B1 B2 +> -- -- -- -- +> 1 2 1 2 +> rows: 1 + +SELECT A.A1, A.A2, B.B1, B.B2 FROM A RIGHT JOIN B ON TRUE; +> A1 A2 B1 B2 +> -- -- -- -- +> 1 2 1 2 +> rows: 1 + +-- this syntax without ON or USING in not standard +SELECT * FROM A LEFT JOIN B; +> A1 A2 B1 B2 +> -- -- -- -- +> 1 2 1 2 +> rows: 1 + +-- this syntax without ON or USING in not standard +SELECT A.A1, A.A2, B.B1, B.B2 FROM A RIGHT JOIN B; +> A1 A2 B1 B2 +> -- -- -- -- +> 1 2 1 2 +> rows: 1 + +SELECT * FROM A LEFT JOIN B ON TRUE NATURAL JOIN C; +> A1 A2 B1 B2 C1 +> -- -- -- -- -- +> 1 2 1 2 2 +> rows: 1 + +SELECT A.A1, A.A2, B.B1, B.B2, C.C1 FROM A RIGHT JOIN B ON TRUE NATURAL JOIN C; +> A1 A2 B1 B2 C1 +> -- -- -- -- -- +> 1 2 1 2 2 +> rows: 1 + +-- this syntax without ON or USING in not standard +SELECT * FROM A LEFT JOIN B NATURAL JOIN C; +> A1 A2 B1 B2 C1 +> -- -- -- -- -- +> 1 2 1 2 2 +> rows: 1 + +-- this syntax without ON or USING in not standard +SELECT A.A1, A.A2, B.B1, B.B2, C.C1 FROM A RIGHT JOIN B NATURAL JOIN C; +> A1 A2 B1 B2 C1 +> -- -- -- -- -- +> 1 2 1 2 2 +> rows: 1 + +DROP TABLE A; +> ok + +DROP TABLE B; +> ok + +DROP TABLE C; +> ok + +CREATE TABLE T1(X1 INT); +> ok + +CREATE TABLE T2(X2 INT); +> ok + +CREATE TABLE T3(X3 INT); +> ok + +CREATE TABLE T4(X4 INT); +> ok + +CREATE TABLE T5(X5 INT); +> ok + +INSERT INTO T1 VALUES (1); +> update count: 1 + +INSERT INTO T1 VALUES (NULL); +> update count: 1 + +INSERT INTO T2 VALUES (1); +> update count: 1 + +INSERT INTO T2 VALUES (NULL); +> update count: 1 + +INSERT INTO T3 VALUES (1); +> update count: 1 + +INSERT INTO T3 VALUES (NULL); +> update count: 1 + +INSERT INTO T4 VALUES (1); +> update count: 1 + +INSERT INTO T4 VALUES (NULL); +> update count: 1 + +INSERT INTO T5 VALUES (1); +> update count: 1 + +INSERT INTO T5 VALUES (NULL); +> update count: 1 + +SELECT T1.X1, T2.X2, T3.X3, T4.X4, T5.X5 FROM ( + T1 INNER JOIN ( + T2 LEFT OUTER JOIN ( + T3 INNER JOIN T4 ON T3.X3 = T4.X4 + ) ON T2.X2 = T4.X4 + ) ON T1.X1 = T2.X2 +) INNER JOIN T5 ON T2.X2 = T5.X5; +> X1 X2 X3 X4 X5 +> -- -- -- -- -- +> 1 1 1 1 1 +> rows: 1 + +DROP TABLE T1, T2, T3, T4, T5; +> ok + +CREATE TABLE A(X INT); +> ok + +CREATE TABLE B(Y INT); +> ok + +CREATE TABLE C(Z INT); +> ok + +SELECT A.X FROM A JOIN B ON A.X = B.Y AND B.Y >= COALESCE((SELECT Z FROM C FETCH FIRST ROW ONLY), 0); +> X +> - +> rows: 0 + +DROP TABLE A, B, C; +> ok + +CREATE TABLE TEST(A INT PRIMARY KEY); +> ok + +SELECT * FROM TEST X LEFT OUTER JOIN TEST Y ON Y.A = X.A || '1'; +> A A +> - - +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE T1(A INT, B INT) AS VALUES (1, 10), (2, 20), (4, 40), (6, 6), (7, 7); +> ok + +CREATE TABLE T2(A INT, B INT) AS VALUES (1, 100), (2, 200), (5, 500), (6, 6), (8, 7); +> ok + +SELECT T1.B, T2.B FROM T1 INNER JOIN T2 USING (A); +> B B +> -- --- +> 10 100 +> 20 200 +> 6 6 +> rows: 3 + +SELECT * FROM T1 INNER JOIN T2 USING (A); +> A B B +> - -- --- +> 1 10 100 +> 2 20 200 +> 6 6 6 +> rows: 3 + +SELECT * FROM T1 INNER JOIN T2 USING (B); +> B A A +> - - - +> 6 6 6 +> 7 7 8 +> rows: 2 + +SELECT T1.B, T2.B FROM T1 INNER JOIN T2 USING (A, B); +> B B +> - - +> 6 6 +> rows: 1 + +SELECT * FROM T1 INNER JOIN T2 USING (B, A); +> B A +> - - +> 6 6 +> rows: 1 + +DROP TABLE T1, T2; +> ok + +SELECT * + FROM (VALUES(1, 'A'), (2, 'B')) T1(A, B) + JOIN (VALUES(2, 'C'), (3, 'D')) T2(A, C) USING (A); +> A B C +> - - - +> 2 B C +> rows: 1 + +SELECT * + FROM (VALUES(1, 'A'), (2, 'B')) T1(A, B) + LEFT JOIN (VALUES(2, 'C'), (3, 'D')) T2(A, C) USING (A); +> A B C +> - - ---- +> 1 A null +> 2 B C +> rows: 2 + +SELECT * + FROM (VALUES(1, 'A'), (2, 'B')) T1(A, B) + RIGHT JOIN (VALUES(2, 'C'), (3, 'D')) T2(A, C) USING (A); +> A B C +> - ---- - +> 2 B C +> 3 null D +> rows: 2 + +SELECT T1.*, T2.* + FROM (VALUES(1, 'A'), (2, 'B')) T1(A, B) + RIGHT JOIN (VALUES(2, 'C'), (3, 'D')) T2(A, C) USING (A); +> A B A C +> ---- ---- - - +> 2 B 2 C +> null null 3 D +> rows: 2 + +SELECT * + FROM (VALUES(1, 'A'), (2, 'B')) T1(A, B) + NATURAL JOIN (VALUES(2, 'C'), (3, 'D')) T2(A, C); +> A B C +> - - - +> 2 B C +> rows: 1 + +CREATE TABLE T1(A VARCHAR_IGNORECASE PRIMARY KEY, B VARCHAR) AS (VALUES ('a', 'A'), ('b', 'B')); +> ok + +CREATE TABLE T2(A VARCHAR_IGNORECASE PRIMARY KEY, C VARCHAR) AS (VALUES ('B', 'C'), ('C', 'D')); +> ok + +SELECT * FROM T1 RIGHT JOIN T2 USING (A); +> A B C +> - ---- - +> C null D +> b B C +> rows: 2 + +EXPLAIN SELECT * FROM T1 RIGHT JOIN T2 USING (A); +>> SELECT COALESCE("PUBLIC"."T1"."A", "PUBLIC"."T2"."A") AS "A", "PUBLIC"."T1"."B", "PUBLIC"."T2"."C" FROM "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ LEFT OUTER JOIN "PUBLIC"."T1" /* PUBLIC.PRIMARY_KEY_A: A = PUBLIC.T2.A */ ON "PUBLIC"."T1"."A" = "PUBLIC"."T2"."A" + +DROP TABLE T1, T2; +> ok + +CREATE TABLE T1(A INT PRIMARY KEY, B VARCHAR) AS (VALUES (1, 'A'), (2, 'B')); +> ok + +CREATE TABLE T2(A INT PRIMARY KEY, C VARCHAR) AS (VALUES (2, 'C'), (3, 'D')); +> ok + +SELECT * FROM T1 RIGHT JOIN T2 USING (A); +> A B C +> - ---- - +> 2 B C +> 3 null D +> rows: 2 + +EXPLAIN SELECT * FROM T1 RIGHT JOIN T2 USING (A); +>> SELECT "PUBLIC"."T2"."A", "PUBLIC"."T1"."B", "PUBLIC"."T2"."C" FROM "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ LEFT OUTER JOIN "PUBLIC"."T1" /* PUBLIC.PRIMARY_KEY_A: A = PUBLIC.T2.A */ ON "PUBLIC"."T1"."A" = "PUBLIC"."T2"."A" + +SELECT * EXCEPT (T1.A) FROM T1 RIGHT JOIN T2 USING (A); +> B C +> ---- - +> B C +> null D +> rows: 2 + +SELECT * EXCEPT (T2.A) FROM T1 RIGHT JOIN T2 USING (A); +> B C +> ---- - +> B C +> null D +> rows: 2 + +DROP TABLE T1, T2; +> ok + +CREATE SCHEMA S1; +> ok + +CREATE SCHEMA S2; +> ok + +CREATE TABLE S1.T(A VARCHAR_IGNORECASE, B INT) AS (VALUES ('a', 2)); +> ok + +CREATE TABLE S2.T(A VARCHAR_IGNORECASE, B INT) AS (VALUES ('A', 3)); +> ok + +SELECT * FROM S1.T RIGHT JOIN S2.T USING(A); +> A B B +> - - - +> a 2 3 +> rows: 1 + +EXPLAIN SELECT * FROM S1.T RIGHT JOIN S2.T USING(A); +>> SELECT COALESCE("S1"."T"."A", "S2"."T"."A") AS "A", "S1"."T"."B", "S2"."T"."B" FROM "S2"."T" /* S2.T.tableScan */ LEFT OUTER JOIN "S1"."T" /* S1.T.tableScan */ ON "S1"."T"."A" = "S2"."T"."A" + +DROP SCHEMA S1 CASCADE; +> ok + +DROP SCHEMA S2 CASCADE; +> ok + +CREATE TABLE T1(C1 INTEGER) AS VALUES 1, 2, 4; +> ok + +CREATE TABLE T2(C2 INTEGER) AS VALUES 1, 3, 4; +> ok + +CREATE TABLE T3(C3 INTEGER) AS VALUES 2, 3, 4; +> ok + +SELECT * FROM T1 JOIN T2 LEFT JOIN T3 ON T2.C2 = T3.C3 ON T1.C1 = T2.C2; +> C1 C2 C3 +> -- -- ---- +> 1 1 null +> 4 4 4 +> rows: 2 + +EXPLAIN SELECT * FROM T1 JOIN T2 LEFT JOIN T3 ON T2.C2 = T3.C3 ON T1.C1 = T2.C2; +>> SELECT "PUBLIC"."T1"."C1", "PUBLIC"."T2"."C2", "PUBLIC"."T3"."C3" FROM ( "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ LEFT OUTER JOIN "PUBLIC"."T3" /* PUBLIC.T3.tableScan */ ON "T2"."C2" = "T3"."C3" ) INNER JOIN "PUBLIC"."T1" /* PUBLIC.T1.tableScan */ ON 1=1 WHERE "T1"."C1" = "T2"."C2" + +SELECT * FROM T1 RIGHT JOIN T2 LEFT JOIN T3 ON T2.C2 = T3.C3 ON T1.C1 = T2.C2; +> C1 C2 C3 +> ---- -- ---- +> 1 1 null +> 4 4 4 +> null 3 3 +> rows: 3 + +EXPLAIN SELECT * FROM T1 RIGHT JOIN T2 LEFT JOIN T3 ON T2.C2 = T3.C3 ON T1.C1 = T2.C2; +>> SELECT "PUBLIC"."T1"."C1", "PUBLIC"."T2"."C2", "PUBLIC"."T3"."C3" FROM "PUBLIC"."T2" /* PUBLIC.T2.tableScan */ LEFT OUTER JOIN "PUBLIC"."T3" /* PUBLIC.T3.tableScan */ ON "T2"."C2" = "T3"."C3" LEFT OUTER JOIN "PUBLIC"."T1" /* PUBLIC.T1.tableScan */ ON "T1"."C1" = "T2"."C2" + +DROP TABLE T1, T2, T3; +> ok + +SELECT X.A, Y.B, Z.C +FROM (SELECT 1 A) X JOIN ( + (SELECT 1 B) Y JOIN (SELECT 1 C) Z ON Z.C = Y.B +) ON Y.B = X.A; +> A B C +> - - - +> 1 1 1 +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/queries/query-optimisations.sql b/h2/src/test/org/h2/test/scripts/queries/query-optimisations.sql new file mode 100644 index 0000000000..16f09f0479 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/query-optimisations.sql @@ -0,0 +1,210 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +create table person(firstname varchar, lastname varchar); +> ok + +create index person_1 on person(firstname, lastname); +> ok + +insert into person select convert(x,varchar) as firstname, (convert(x,varchar) || ' last') as lastname from system_range(1,100); +> update count: 100 + +-- Issue #643: verify that when using an index, we use the IN part of the query, if that part of the query +-- can directly use the index. +-- +explain analyze SELECT * FROM person WHERE firstname IN ('FirstName1', 'FirstName2') AND lastname='LastName1'; +>> SELECT "PUBLIC"."PERSON"."FIRSTNAME", "PUBLIC"."PERSON"."LASTNAME" FROM "PUBLIC"."PERSON" /* PUBLIC.PERSON_1: FIRSTNAME IN('FirstName1', 'FirstName2') AND LASTNAME = 'LastName1' */ /* scanCount: 1 */ WHERE ("FIRSTNAME" IN('FirstName1', 'FirstName2')) AND ("LASTNAME" = 'LastName1') + +CREATE TABLE TEST(A SMALLINT PRIMARY KEY, B SMALLINT); +> ok + +CREATE INDEX TEST_IDX_1 ON TEST(B); +> ok + +CREATE INDEX TEST_IDX_2 ON TEST(B, A); +> ok + +INSERT INTO TEST VALUES (1, 2), (3, 4); +> update count: 2 + +EXPLAIN SELECT _ROWID_ FROM TEST WHERE B = 4; +>> SELECT _ROWID_ FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +EXPLAIN SELECT _ROWID_, A FROM TEST WHERE B = 4; +>> SELECT _ROWID_, "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +EXPLAIN SELECT A FROM TEST WHERE B = 4; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +SELECT _ROWID_, A FROM TEST WHERE B = 4; +> _ROWID_ A +> ------- - +> 3 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A TINYINT PRIMARY KEY, B TINYINT); +> ok + +CREATE INDEX TEST_IDX_1 ON TEST(B); +> ok + +CREATE INDEX TEST_IDX_2 ON TEST(B, A); +> ok + +INSERT INTO TEST VALUES (1, 2), (3, 4); +> update count: 2 + +EXPLAIN SELECT _ROWID_ FROM TEST WHERE B = 4; +>> SELECT _ROWID_ FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +EXPLAIN SELECT _ROWID_, A FROM TEST WHERE B = 4; +>> SELECT _ROWID_, "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +EXPLAIN SELECT A FROM TEST WHERE B = 4; +>> SELECT "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_IDX_1: B = 4 */ WHERE "B" = 4 + +SELECT _ROWID_, A FROM TEST WHERE B = 4; +> _ROWID_ A +> ------- - +> 3 3 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(V VARCHAR(2)) AS VALUES -1, -2; +> ok + +CREATE INDEX TEST_INDEX ON TEST(V); +> ok + +SELECT * FROM TEST WHERE V >= -1; +>> -1 + +-- H2 may use the index for a table scan, but may not create index conditions due to incompatible type +EXPLAIN SELECT * FROM TEST WHERE V >= -1; +>> SELECT "PUBLIC"."TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_INDEX */ WHERE "V" >= -1 + +EXPLAIN SELECT * FROM TEST WHERE V IN (-1, -3); +>> SELECT "PUBLIC"."TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_INDEX */ WHERE "V" IN(-1, -3) + +SELECT * FROM TEST WHERE V < -1; +>> -2 + +DROP TABLE TEST; +> ok + +CREATE TABLE T(ID INT, V INT) AS VALUES (1, 1), (1, 2), (2, 1), (2, 2); +> ok + +SELECT T1.ID, T2.V AS LV FROM (SELECT ID, MAX(V) AS LV FROM T GROUP BY ID) AS T1 + INNER JOIN T AS T2 ON T2.ID = T1.ID AND T2.V = T1.LV + WHERE T1.ID IN (1, 2) ORDER BY ID; +> ID LV +> -- -- +> 1 2 +> 2 2 +> rows (ordered): 2 + +EXPLAIN SELECT T1.ID, T2.V AS LV FROM (SELECT ID, MAX(V) AS LV FROM T GROUP BY ID) AS T1 + INNER JOIN T AS T2 ON T2.ID = T1.ID AND T2.V = T1.LV + WHERE T1.ID IN (1, 2) ORDER BY ID; +>> SELECT "T1"."ID", "T2"."V" AS "LV" FROM "PUBLIC"."T" "T2" /* PUBLIC.T.tableScan */ INNER JOIN ( SELECT "ID", MAX("V") AS "LV" FROM "PUBLIC"."T" GROUP BY "ID" ) "T1" /* SELECT ID, MAX(V) AS LV FROM PUBLIC.T /* PUBLIC.T.tableScan */ WHERE ID IS NOT DISTINCT FROM ?1 GROUP BY ID HAVING MAX(V) IS NOT DISTINCT FROM ?2: ID = T2.ID AND LV = T2.V */ ON 1=1 WHERE ("T1"."ID" IN(1, 2)) AND ("T2"."ID" = "T1"."ID") AND ("T2"."V" = "T1"."LV") ORDER BY 1 + +DROP TABLE T; +> ok + +SELECT (SELECT ROWNUM) R FROM VALUES 1, 2, 3; +> R +> - +> 1 +> 1 +> 1 +> rows: 3 + +CREATE TABLE TEST(A INT, B INT, C INT) AS VALUES (1, 1, 1); +> ok + +SELECT T1.A FROM TEST T1 LEFT OUTER JOIN TEST T2 ON T1.B = T2.A WHERE (SELECT T2.C) IS NOT NULL ORDER BY T1.A; +>> 1 + +EXPLAIN SELECT T1.A FROM TEST T1 LEFT OUTER JOIN TEST T2 ON T1.B = T2.A WHERE (SELECT T2.C) IS NOT NULL ORDER BY T1.A; +>> SELECT "T1"."A" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST.tableScan */ ON "T1"."B" = "T2"."A" WHERE "T2"."C" IS NOT NULL ORDER BY 1 + +SELECT X, (SELECT X IN (SELECT B FROM TEST)) FROM SYSTEM_RANGE(1, 2); +> X X IN( SELECT DISTINCT B FROM PUBLIC.TEST) +> - ----------------------------------------- +> 1 TRUE +> 2 FALSE +> rows: 2 + +SELECT T1.A FROM TEST T1 LEFT OUTER JOIN TEST T2 ON T1.B = T2.A WHERE (SELECT T2.C + ROWNUM) IS NOT NULL ORDER BY T1.A; +>> 1 + +EXPLAIN SELECT T1.A FROM TEST T1 LEFT OUTER JOIN TEST T2 ON T1.B = T2.A WHERE (SELECT T2.C + ROWNUM) IS NOT NULL ORDER BY T1.A; +>> SELECT "T1"."A" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.TEST.tableScan */ ON "T1"."B" = "T2"."A" WHERE ("T2"."C" + CAST(1 AS BIGINT)) IS NOT NULL ORDER BY 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE A(T TIMESTAMP WITH TIME ZONE UNIQUE) AS VALUES + TIMESTAMP WITH TIME ZONE '2020-01-01 00:01:02+02', + TIMESTAMP WITH TIME ZONE '2020-01-01 00:01:02+01'; +> ok + +CREATE TABLE B(D DATE) AS VALUES DATE '2020-01-01'; +> ok + +SET TIME ZONE '01:00'; +> ok + +SELECT T FROM A JOIN B ON T >= D; +>> 2020-01-01 00:01:02+01 + +EXPLAIN SELECT T FROM A JOIN B ON T >= D; +>> SELECT "T" FROM "PUBLIC"."B" /* PUBLIC.B.tableScan */ INNER JOIN "PUBLIC"."A" /* PUBLIC.CONSTRAINT_INDEX_4: T >= D */ ON 1=1 WHERE "T" >= "D" + +SET TIME ZONE LOCAL; +> ok + +DROP TABLE A, B; +> ok + +CREATE TABLE TEST(T TIMESTAMP WITH TIME ZONE) AS VALUES + NULL, + TIMESTAMP WITH TIME ZONE '2020-01-01 00:00:00+00', + TIMESTAMP WITH TIME ZONE '2020-01-01 01:00:00+01', + TIMESTAMP WITH TIME ZONE '2020-01-01 02:00:00+01', + NULL; +> ok + +SELECT T AT TIME ZONE 'UTC' FROM TEST GROUP BY T; +> T AT TIME ZONE 'UTC' +> ---------------------- +> 2020-01-01 00:00:00+00 +> 2020-01-01 01:00:00+00 +> null +> rows: 3 + +CREATE INDEX TEST_T_IDX ON TEST(T); +> ok + +SELECT T AT TIME ZONE 'UTC' FROM TEST GROUP BY T; +> T AT TIME ZONE 'UTC' +> ---------------------- +> 2020-01-01 00:00:00+00 +> 2020-01-01 01:00:00+00 +> null +> rows: 3 + +EXPLAIN SELECT T AT TIME ZONE 'UTC' FROM TEST GROUP BY T; +>> SELECT "T" AT TIME ZONE 'UTC' FROM "PUBLIC"."TEST" /* PUBLIC.TEST_T_IDX */ GROUP BY "T" /* group sorted */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/queries/select.sql b/h2/src/test/org/h2/test/scripts/queries/select.sql new file mode 100644 index 0000000000..02c4d8e352 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/select.sql @@ -0,0 +1,1186 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +INSERT INTO TEST VALUES (1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 2, 1), (1, 2, 2), (1, 2, 3), + (2, 1, 1), (2, 1, 2), (2, 1, 3), (2, 2, 1), (2, 2, 2), (2, 2, 3); +> update count: 12 + +SELECT * FROM TEST ORDER BY A, B; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 2 1 1 +> 2 1 2 +> 2 1 3 +> 2 2 1 +> 2 2 2 +> 2 2 3 +> rows (partially ordered): 12 + +SELECT * FROM TEST ORDER BY A, B, C FETCH FIRST 4 ROWS ONLY; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> rows (ordered): 4 + +SELECT * FROM TEST ORDER BY A, B, C FETCH FIRST 4 ROWS WITH TIES; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> rows (ordered): 4 + +SELECT * FROM TEST ORDER BY A, B FETCH FIRST 4 ROWS WITH TIES; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 6 + +SELECT * FROM TEST ORDER BY A FETCH FIRST ROW WITH TIES; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 6 + +SELECT TOP (1) WITH TIES * FROM TEST ORDER BY A; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 6 + +SELECT TOP 1 PERCENT WITH TIES * FROM TEST ORDER BY A; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 6 + +SELECT TOP 51 PERCENT WITH TIES * FROM TEST ORDER BY A, B; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 2 1 1 +> 2 1 2 +> 2 1 3 +> rows (partially ordered): 9 + +SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; +> A B C +> - - - +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 3 + +SELECT * FROM TEST FETCH NEXT ROWS ONLY; +> A B C +> - - - +> 1 1 1 +> rows: 1 + +SELECT * FROM TEST FETCH FIRST 101 PERCENT ROWS ONLY; +> exception INVALID_VALUE_2 + +SELECT * FROM TEST FETCH FIRST -1 PERCENT ROWS ONLY; +> exception INVALID_VALUE_2 + +SELECT * FROM TEST FETCH FIRST 0 PERCENT ROWS ONLY; +> A B C +> - - - +> rows: 0 + +SELECT * FROM TEST FETCH FIRST 1 PERCENT ROWS ONLY; +> A B C +> - - - +> 1 1 1 +> rows: 1 + +SELECT * FROM TEST FETCH FIRST 10 PERCENT ROWS ONLY; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> rows: 2 + +SELECT * FROM TEST OFFSET 2 ROWS FETCH NEXT 10 PERCENT ROWS ONLY; +> A B C +> - - - +> 1 1 3 +> 1 2 1 +> rows: 2 + +CREATE INDEX TEST_A_IDX ON TEST(A); +> ok + +CREATE INDEX TEST_A_B_IDX ON TEST(A, B); +> ok + +SELECT * FROM TEST ORDER BY A FETCH FIRST 1 ROW WITH TIES; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 6 + +SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; +> A B C +> - - - +> 1 2 1 +> 1 2 2 +> 1 2 3 +> rows (partially ordered): 3 + +SELECT * FROM TEST FETCH FIRST 1 ROW WITH TIES; +> exception WITH_TIES_WITHOUT_ORDER_BY + +(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; +> A B C +> - - - +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 1 2 4 +> rows (partially ordered): 4 + +(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 50 PERCENT ROWS ONLY; +> A B C +> - - - +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 1 2 4 +> 2 1 1 +> 2 1 2 +> 2 1 3 +> rows (partially ordered): 7 + +(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 40 PERCENT ROWS WITH TIES; +> A B C +> - - - +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 1 2 4 +> 2 1 1 +> 2 1 2 +> 2 1 3 +> rows (partially ordered): 7 + +(SELECT * FROM TEST) UNION (SELECT 1, 2, 4) FETCH NEXT 1 ROW WITH TIES; +> exception WITH_TIES_WITHOUT_ORDER_BY + +EXPLAIN SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 ROW WITH TIES; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX */ ORDER BY 1, 2 OFFSET 3 ROWS FETCH NEXT ROW WITH TIES /* index sorted */ + +EXPLAIN SELECT * FROM TEST ORDER BY A, B OFFSET 3 ROWS FETCH NEXT 1 PERCENT ROWS WITH TIES; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B", "PUBLIC"."TEST"."C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST_A_B_IDX */ ORDER BY 1, 2 OFFSET 3 ROWS FETCH NEXT 1 PERCENT ROWS WITH TIES /* index sorted */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A VARCHAR_IGNORECASE, B VARCHAR_IGNORECASE); +> ok + +INSERT INTO TEST VALUES ('A', 1), ('a', 2), ('A', 3), ('B', 4); +> update count: 4 + +SELECT A, B FROM TEST ORDER BY A FETCH FIRST 1 ROW WITH TIES; +> A B +> - - +> A 1 +> A 3 +> a 2 +> rows (partially ordered): 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT); +> ok + +INSERT INTO TEST VALUES (1, 1), (1, 2), (2, 1), (2, 2), (2, 3); +> update count: 5 + +SELECT A, COUNT(B) FROM TEST GROUP BY A ORDER BY A OFFSET 1; +> A COUNT(B) +> - -------- +> 2 3 +> rows (ordered): 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, "VALUE" VARCHAR) AS VALUES (1, 'A'), (2, 'B'), (3, 'C'); +> ok + +SELECT * FROM TEST ORDER BY ID DESC OFFSET 2 ROWS FETCH FIRST 2147483646 ROWS ONLY; +> ID VALUE +> -- ----- +> 1 A +> rows (ordered): 1 + +SELECT * FROM TEST ORDER BY ID DESC OFFSET 2 ROWS FETCH FIRST 2147483647 ROWS ONLY; +> ID VALUE +> -- ----- +> 1 A +> rows (ordered): 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST1(A INT, B INT, C INT) AS SELECT 1, 2, 3; +> ok + +CREATE TABLE TEST2(A INT, B INT, C INT) AS SELECT 4, 5, 6; +> ok + +SELECT A, B FROM TEST1 UNION SELECT A, B FROM TEST2 ORDER BY TEST1.C; +> exception ORDER_BY_NOT_IN_RESULT + +DROP TABLE TEST1; +> ok + +DROP TABLE TEST2; +> ok + +-- Disallowed mixed OFFSET/FETCH/LIMIT/TOP clauses +CREATE TABLE TEST (ID BIGINT); +> ok + +SELECT TOP 1 ID FROM TEST OFFSET 1 ROW; +> exception SYNTAX_ERROR_1 + +SELECT TOP 1 ID FROM TEST FETCH NEXT ROW ONLY; +> exception SYNTAX_ERROR_1 + +SELECT TOP 1 ID FROM TEST LIMIT 1; +> exception SYNTAX_ERROR_1 + +SELECT ID FROM TEST OFFSET 1 ROW LIMIT 1; +> exception SYNTAX_ERROR_1 + +SELECT ID FROM TEST FETCH NEXT ROW ONLY LIMIT 1; +> exception SYNTAX_ERROR_1 + +DROP TABLE TEST; +> ok + +-- ORDER BY with parameter +CREATE TABLE TEST(A INT, B INT); +> ok + +INSERT INTO TEST VALUES (1, 1), (1, 2), (2, 1), (2, 2); +> update count: 4 + +SELECT * FROM TEST ORDER BY ?, ? FETCH FIRST ROW ONLY; +{ +1, 2 +> A B +> - - +> 1 1 +> rows (ordered): 1 +-1, 2 +> A B +> - - +> 2 1 +> rows (ordered): 1 +1, -2 +> A B +> - - +> 1 2 +> rows (ordered): 1 +-1, -2 +> A B +> - - +> 2 2 +> rows (ordered): 1 +2, -1 +> A B +> - - +> 2 1 +> rows (ordered): 1 +} +> update count: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST1(A INT, B INT, C INT) AS SELECT 1, 2, 3; +> ok + +CREATE TABLE TEST2(A INT, D INT) AS SELECT 4, 5; +> ok + +SELECT * FROM TEST1, TEST2; +> A B C A D +> - - - - - +> 1 2 3 4 5 +> rows: 1 + +SELECT * EXCEPT (A) FROM TEST1; +> B C +> - - +> 2 3 +> rows: 1 + +SELECT * EXCEPT (TEST1.A) FROM TEST1; +> B C +> - - +> 2 3 +> rows: 1 + +SELECT * EXCEPT (PUBLIC.TEST1.A) FROM TEST1; +> B C +> - - +> 2 3 +> rows: 1 + +SELECT * EXCEPT (SCRIPT.PUBLIC.TEST1.A) FROM TEST1; +> B C +> - - +> 2 3 +> rows: 1 + +SELECT * EXCEPT (Z) FROM TEST1; +> exception COLUMN_NOT_FOUND_1 + +SELECT * EXCEPT (B, TEST1.B) FROM TEST1; +> exception DUPLICATE_COLUMN_NAME_1 + +SELECT * EXCEPT (A) FROM TEST1, TEST2; +> exception AMBIGUOUS_COLUMN_NAME_1 + +SELECT * EXCEPT (TEST1.A, B, TEST2.D) FROM TEST1, TEST2; +> C A +> - - +> 3 4 +> rows: 1 + +SELECT TEST1.*, TEST2.* FROM TEST1, TEST2; +> A B C A D +> - - - - - +> 1 2 3 4 5 +> rows: 1 + +SELECT TEST1.* EXCEPT (A), TEST2.* EXCEPT (A) FROM TEST1, TEST2; +> B C D +> - - - +> 2 3 5 +> rows: 1 + +SELECT TEST1.* EXCEPT (A), TEST2.* EXCEPT (D) FROM TEST1, TEST2; +> B C A +> - - - +> 2 3 4 +> rows: 1 + +SELECT * EXCEPT (T1.A, T2.D) FROM TEST1 T1, TEST2 T2; +> B C A +> - - - +> 2 3 4 +> rows: 1 + +DROP TABLE TEST1, TEST2; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, "VALUE" INT NOT NULL); +> ok + +INSERT INTO TEST VALUES (1, 1), (2, 1), (3, 2); +> update count: 3 + +SELECT ID, "VALUE" FROM TEST FOR UPDATE; +> ID VALUE +> -- ----- +> 1 1 +> 2 1 +> 3 2 +> rows: 3 + +-- Check that NULL row is returned from SELECT FOR UPDATE +CREATE TABLE T1(A INT PRIMARY KEY) AS VALUES 1, 2; +> ok + +CREATE TABLE T2(B INT PRIMARY KEY) AS VALUES 1; +> ok + +SELECT * FROM T1 LEFT JOIN T2 ON A = B FOR UPDATE; +> A B +> - ---- +> 1 1 +> 2 null +> rows: 2 + +DROP TABLE T1, T2; +> ok + +SELECT DISTINCT "VALUE" FROM TEST FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +SELECT DISTINCT ON("VALUE") ID, "VALUE" FROM TEST FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +SELECT SUM("VALUE") FROM TEST FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +SELECT ID FROM TEST GROUP BY "VALUE" FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +SELECT 1 FROM TEST HAVING TRUE FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT) AS SELECT X, X + 1 FROM SYSTEM_RANGE(1, 3); +> ok + +SELECT ID FROM TEST WHERE ID != ALL (SELECT ID FROM TEST WHERE ID IN(1, 3)); +> ID +> -- +> 2 +> rows: 1 + +SELECT (1, 3) > ANY (SELECT ID, V FROM TEST); +>> TRUE + +SELECT (1, 2) > ANY (SELECT ID, V FROM TEST); +>> FALSE + +SELECT (2, 3) = ANY (SELECT ID, V FROM TEST); +>> TRUE + +SELECT (3, 4) > ALL (SELECT ID, V FROM TEST); +>> FALSE + +DROP TABLE TEST; +> ok + +SELECT 1 = ALL (SELECT * FROM VALUES (NULL), (1), (2), (NULL) ORDER BY 1); +>> FALSE + +CREATE TABLE TEST(G INT, V INT); +> ok + +INSERT INTO TEST VALUES (10, 1), (11, 2), (20, 4); +> update count: 3 + +SELECT G / 10 G1, G / 10 G2, SUM(T.V) S FROM TEST T GROUP BY G / 10, G / 10; +> G1 G2 S +> -- -- - +> 1 1 3 +> 2 2 4 +> rows: 2 + +SELECT G / 10 G1, G / 10 G2, SUM(T.V) S FROM TEST T GROUP BY G2; +> G1 G2 S +> -- -- - +> 1 1 3 +> 2 2 4 +> rows: 2 + +DROP TABLE TEST; +> ok + +@reconnect off + +CALL RAND(0); +>> 0.730967787376657 + +SELECT RAND(), RAND() + 1, RAND() + 1, RAND() GROUP BY RAND() + 1; +> RAND() RAND() + 1 RAND() + 1 RAND() +> ------------------ ------------------ ------------------ ------------------ +> 0.6374174253501083 1.2405364156714858 1.2405364156714858 0.5504370051176339 +> rows: 1 + +SELECT RAND() A, RAND() + 1 B, RAND() + 1 C, RAND() D, RAND() + 2 E, RAND() + 3 F GROUP BY B, C, E, F; +> A B C D E F +> ------------------ ------------------ ------------------ ------------------ ------------------ ------------------ +> 0.8791825178724801 1.3332183994766498 1.3332183994766498 0.9412491794821144 2.3851891847407183 3.9848415401998087 +> rows: 1 + +@reconnect on + +CREATE TABLE TEST (A INT, B INT, C INT); +> ok + +INSERT INTO TEST VALUES (11, 12, 13), (21, 22, 23), (31, 32, 33); +> update count: 3 + +SELECT * FROM TEST WHERE (A, B) IN (VALUES (11, 12), (21, 22), (41, 42)); +> A B C +> -- -- -- +> 11 12 13 +> 21 22 23 +> rows: 2 + +SELECT * FROM TEST WHERE (A, B) = (VALUES (11, 12)); +> A B C +> -- -- -- +> 11 12 13 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A BIGINT, B INT) AS VALUES (1::BIGINT, 2); +> ok + +SELECT * FROM TEST WHERE (A, B) IN ((1, 2), (3, 4)); +> A B +> - - +> 1 2 +> rows: 1 + +UPDATE TEST SET A = 1000000000000; +> update count: 1 + +SELECT * FROM TEST WHERE (A, B) IN ((1, 2), (3, 4)); +> A B +> - - +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A BIGINT, B INT) AS VALUES (1, 2); +> ok + +SELECT * FROM TEST WHERE (A, B) IN ((1::BIGINT, 2), (3, 4)); +> A B +> - - +> 1 2 +> rows: 1 + +SELECT * FROM TEST WHERE (A, B) IN ((1000000000000, 2), (3, 4)); +> A B +> - - +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(I) AS VALUES 1, 2, 3; +> ok + +SELECT COUNT(*) C FROM TEST HAVING C < 1; +> C +> - +> rows: 0 + +SELECT COUNT(*) C FROM TEST QUALIFY C < 1; +> C +> - +> rows: 0 + +DROP TABLE TEST; +> ok + +SELECT A, ROW_NUMBER() OVER (ORDER BY B) R +FROM (VALUES (1, 2), (2, 1), (3, 3)) T(A, B); +> A R +> - - +> 1 2 +> 2 1 +> 3 3 +> rows: 3 + +SELECT X, A, ROW_NUMBER() OVER (ORDER BY B) R +FROM (SELECT 1 X), (VALUES (1, 2), (2, 1), (3, 3)) T(A, B); +> X A R +> - - - +> 1 1 2 +> 1 2 1 +> 1 3 3 +> rows: 3 + +SELECT A, SUM(S) OVER (ORDER BY S) FROM + (SELECT A, SUM(B) FROM (VALUES (1, 2), (1, 3), (3, 5), (3, 10)) V(A, B) GROUP BY A) S(A, S); +> A SUM(S) OVER (ORDER BY S) +> - ------------------------ +> 1 5 +> 3 20 +> rows: 2 + +SELECT A, SUM(A) OVER W SUM FROM (VALUES 1, 2) T(A) WINDOW W AS (ORDER BY A); +> A SUM +> - --- +> 1 1 +> 2 3 +> rows: 2 + +SELECT A, B, C FROM (SELECT A, B, C FROM (VALUES (1, 2, 3)) V(A, B, C)); +> A B C +> - - - +> 1 2 3 +> rows: 1 + +SELECT * FROM (SELECT * FROM (VALUES (1, 2, 3)) V(A, B, C)); +> A B C +> - - - +> 1 2 3 +> rows: 1 + +SELECT * FROM + (SELECT X * X, Y FROM + (SELECT A + 5, B FROM + (VALUES (1, 2)) V(A, B) + ) T(X, Y) + ); +> X * X Y +> ----- - +> 36 2 +> rows: 1 + +CREATE TABLE TEST("_ROWID_" INT) AS VALUES 2; +> ok + +SELECT _ROWID_ S1, TEST._ROWID_ S2, PUBLIC.TEST._ROWID_ S3, SCRIPT.PUBLIC.TEST._ROWID_ S4, + "_ROWID_" U1, TEST."_ROWID_" U2, PUBLIC.TEST."_ROWID_" U3, SCRIPT.PUBLIC.TEST."_ROWID_" U4 + FROM TEST; +> S1 S2 S3 S4 U1 U2 U3 U4 +> -- -- -- -- -- -- -- -- +> 1 1 1 1 2 2 2 2 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID BIGINT PRIMARY KEY); +> ok + +SELECT X.ID FROM TEST X JOIN TEST Y ON Y.ID IN (SELECT 1); +> ID +> -- +> rows: 0 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT) AS VALUES (1, 10), (2, 20), (4, 40); +> ok + +SELECT T1.A, T2.ARR FROM TEST T1 JOIN ( + SELECT A, ARRAY_AGG(B) OVER (ORDER BY B ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) ARR FROM TEST +) T2 ON T1.A = T2.A; +> A ARR +> - -------- +> 1 [20, 40] +> 2 [40] +> 4 null +> rows: 3 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, V INT UNIQUE); +> ok + +EXPLAIN SELECT * FROM TEST ORDER BY ID FOR UPDATE; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2 */ ORDER BY 1 FOR UPDATE /* index sorted */ + +EXPLAIN SELECT * FROM TEST ORDER BY V; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.CONSTRAINT_INDEX_2 */ ORDER BY 2 /* index sorted */ + +EXPLAIN SELECT * FROM TEST ORDER BY V FOR UPDATE; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."V" FROM "PUBLIC"."TEST" /* PUBLIC.CONSTRAINT_INDEX_2 */ ORDER BY 2 FOR UPDATE + +DROP TABLE TEST; +> ok + +-- The next tests should be at the of this file + +SET MAX_MEMORY_ROWS = 1; +> ok + +CREATE TABLE TEST(I INT) AS SELECT * FROM SYSTEM_RANGE(1, 10); +> ok + +SELECT COUNT(*) FROM (SELECT I, SUM(I) S, COUNT(I) C FROM TEST GROUP BY I HAVING S + C <= 9 ORDER BY I); +>> 8 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT); +> ok + +EXPLAIN SELECT * FROM TEST WHERE A = 1 AND B = 1 OR A = 2 AND B = 2; +>> SELECT "PUBLIC"."TEST"."A", "PUBLIC"."TEST"."B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE (("A" = 1) AND ("B" = 1)) OR (("A" = 2) AND ("B" = 2)) + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT, B INT) AS VALUES (1, 2), (1, 3), (5, 5); +> ok + +SELECT (SELECT A, B FROM TEST ORDER BY A + B FETCH FIRST ROW ONLY); +>> ROW (1, 2) + +SELECT * FROM TEST UNION ALL SELECT * FROM TEST OFFSET 2 ROWS; +> A B +> - - +> 1 2 +> 1 3 +> 5 5 +> 5 5 +> rows: 4 + +SELECT (1, 2) IN (SELECT * FROM TEST UNION ALL SELECT * FROM TEST OFFSET 2 ROWS); +>> TRUE + +SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY A DESC, B DESC OFFSET 2 ROWS; +> A B +> - - +> 1 3 +> 1 3 +> 1 2 +> 1 2 +> rows (ordered): 4 + +SELECT (1, 2) IN (SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY A DESC, B DESC OFFSET 2 ROWS); +>> TRUE + +SELECT (1, 2) IN (SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY A DESC, B DESC OFFSET 2 ROWS FETCH NEXT 1 ROW ONLY); +>> FALSE + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, NAME VARCHAR, DATA VARCHAR); +> ok + +-- This ORDER BY condition is currently forbidden +SELECT DISTINCT DATA FROM TEST ORDER BY (CASE WHEN EXISTS(SELECT * FROM TEST T WHERE T.NAME = 'A') THEN 1 ELSE 2 END); +> exception ORDER_BY_NOT_IN_RESULT + +SELECT DISTINCT DATA FROM TEST X ORDER BY (CASE WHEN EXISTS(SELECT * FROM TEST T WHERE T.ID = X.ID + 1) THEN 1 ELSE 2 END); +> exception ORDER_BY_NOT_IN_RESULT + +DROP TABLE TEST; +> ok + +-- Additional GROUP BY tests + +CREATE TABLE TEST(A INT, B INT, C INT) AS (VALUES + (NULL, NULL, NULL), (NULL, NULL, 1), (NULL, NULL, 2), + (NULL, 1, NULL), (NULL, 1, 1), (NULL, 1, 2), + (NULL, 2, NULL), (NULL, 2, 1), (NULL, 2, 2), + (1, NULL, NULL), (1, NULL, 1), (1, NULL, 2), + (1, 1, NULL), (1, 1, 1), (1, 1, 2), + (1, 2, NULL), (1, 2, 1), (1, 2, 2), + (2, NULL, NULL), (2, NULL, 1), (2, NULL, 2), + (2, 1, NULL), (2, 1, 1), (2, 1, 2), + (2, 2, NULL), (2, 2, 1), (2, 2, 2)); +> ok + +SELECT SUM(A), B, C FROM TEST GROUP BY B, C; +> SUM(A) B C +> ------ ---- ---- +> 3 1 1 +> 3 1 2 +> 3 1 null +> 3 2 1 +> 3 2 2 +> 3 2 null +> 3 null 1 +> 3 null 2 +> 3 null null +> rows: 9 + +EXPLAIN SELECT SUM(A), B, C FROM TEST GROUP BY B, C; +>> SELECT SUM("A"), "B", "C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "B", "C" + +SELECT SUM(A), B, C FROM TEST GROUP BY (B), C, (); +> SUM(A) B C +> ------ ---- ---- +> 3 1 1 +> 3 1 2 +> 3 1 null +> 3 2 1 +> 3 2 2 +> 3 2 null +> 3 null 1 +> 3 null 2 +> 3 null null +> rows: 9 + +EXPLAIN SELECT SUM(A), B, C FROM TEST GROUP BY (B), C, (); +>> SELECT SUM("A"), "B", "C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "B", "C" + +SELECT SUM(A), B, C FROM TEST GROUP BY (B, C); +> SUM(A) B C +> ------ ---- ---- +> 3 1 1 +> 3 1 2 +> 3 1 null +> 3 2 1 +> 3 2 2 +> 3 2 null +> 3 null 1 +> 3 null 2 +> 3 null null +> rows: 9 + +EXPLAIN SELECT SUM(A), B, C FROM TEST GROUP BY (B, C); +>> SELECT SUM("A"), "B", "C" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "B", "C" + +SELECT COUNT(*) FROM TEST; +>> 27 + +EXPLAIN SELECT COUNT(*) FROM TEST; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +SELECT COUNT(*) FROM TEST GROUP BY (); +>> 27 + +EXPLAIN SELECT COUNT(*) FROM TEST GROUP BY (); +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */ + +SELECT COUNT(*) FROM TEST WHERE FALSE; +>> 0 + +EXPLAIN SELECT COUNT(*) FROM TEST WHERE FALSE; +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE + +SELECT COUNT(*) FROM TEST WHERE FALSE GROUP BY (); +>> 0 + +EXPLAIN SELECT COUNT(*) FROM TEST WHERE FALSE GROUP BY (); +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE + +SELECT COUNT(*) FROM TEST WHERE FALSE GROUP BY (), (); +>> 0 + +EXPLAIN SELECT COUNT(*) FROM TEST WHERE FALSE GROUP BY (), (); +>> SELECT COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE + +SELECT 1 FROM TEST GROUP BY (); +>> 1 + +EXPLAIN SELECT 1 FROM TEST GROUP BY (); +>> SELECT 1 FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY () /* direct lookup */ + +EXPLAIN SELECT FALSE AND MAX(A) > 0 FROM TEST; +>> SELECT FALSE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY () /* direct lookup */ + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT PRIMARY KEY) AS (VALUES 1, 2, 3); +> ok + +SELECT A AS A1, A AS A2 FROM TEST GROUP BY A; +> A1 A2 +> -- -- +> 1 1 +> 2 2 +> 3 3 +> rows: 3 + +DROP TABLE TEST; +> ok + +-- Tests for SELECT without columns + +EXPLAIN SELECT *; +>> SELECT + +SELECT; +> +> +> +> rows: 1 + +SELECT FROM DUAL; +> +> +> +> rows: 1 + +SELECT * FROM DUAL JOIN (SELECT * FROM DUAL) ON 1 = 1; +> +> +> +> rows: 1 + +EXPLAIN SELECT * FROM DUAL JOIN (SELECT * FROM DUAL) ON 1 = 1; +>> SELECT FROM DUAL /* dual index */ INNER JOIN ( SELECT ) "_7" /* SELECT */ ON 1=1 + +SELECT WHERE FALSE; +> +> +> rows: 0 + +SELECT GROUP BY (); +> +> +> +> rows: 1 + +SELECT HAVING FALSE; +> +> +> rows: 0 + +SELECT QUALIFY FALSE; +> +> +> rows: 0 + +SELECT ORDER BY (SELECT 1); +> +> +> +> rows: 1 + +SELECT OFFSET 0 ROWS; +> +> +> +> rows: 1 + +SELECT FETCH FIRST 0 ROWS ONLY; +> +> +> rows: 0 + +CREATE TABLE TEST(A INT, B INT, C INT, D INT); +> ok + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) + C; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY ("A" + "B") + "C" + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B); +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" + +EXPLAIN SELECT 1 FROM (SELECT SUM(D) FROM TEST GROUP BY (A + B)) T; +>> SELECT 1 FROM ( SELECT SUM("D") FROM "PUBLIC"."TEST" GROUP BY "A" + "B" ) "T" /* SELECT SUM(D) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ GROUP BY A + B */ + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B), C; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B", "C" + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) HAVING TRUE; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" HAVING TRUE + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) WINDOW W AS (); +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) QUALIFY TRUE; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" QUALIFY TRUE + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) UNION VALUES 1; +>> (SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B") UNION (VALUES (1)) + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) EXCEPT VALUES 1; +>> (SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B") EXCEPT (VALUES (1)) + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) MINUS VALUES 1; +>> (SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B") EXCEPT (VALUES (1)) + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) INTERSECT VALUES 1; +>> (SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B") INTERSECT (VALUES (1)) + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) ORDER BY SUM(D); +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" ORDER BY 1 + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) OFFSET 0 ROWS; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" OFFSET 0 ROWS + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) FETCH FIRST ROW ONLY; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" FETCH FIRST ROW ONLY + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) LIMIT 1; +>> SELECT SUM("D") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "A" + "B" FETCH FIRST ROW ONLY + +EXPLAIN SELECT SUM(D) FROM TEST GROUP BY (A + B) FOR UPDATE; +> exception FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(A INT) AS VALUES 1, 2; +> ok + +SELECT A, A FROM TEST GROUP BY A HAVING SUM(A) > 0; +> A A +> - - +> 1 1 +> 2 2 +> rows: 2 + +DROP TABLE TEST; +> ok + +EXPLAIN SELECT X FROM SYSTEM_RANGE(1, 10) A ORDER BY (SELECT X FROM SYSTEM_RANGE(1, 20) B WHERE A.X = B.X); +>> SELECT "X" FROM SYSTEM_RANGE(1, 10) "A" /* range index */ ORDER BY (SELECT "X" FROM SYSTEM_RANGE(1, 20) "B" /* range index: X = A.X */ WHERE "A"."X" = "B"."X") + +EXPLAIN SELECT X FROM SYSTEM_RANGE(1, 10) ORDER BY 'a'; +>> SELECT "X" FROM SYSTEM_RANGE(1, 10) /* range index */ + +EXPLAIN SELECT (SELECT 1); +>> SELECT 1 + +EXPLAIN SELECT (SELECT DISTINCT 1); +>> SELECT 1 + +EXPLAIN SELECT (SELECT DISTINCT ON(RAND()) 1); +>> SELECT 1 + +EXPLAIN SELECT (SELECT 1 WHERE TRUE); +>> SELECT 1 + +EXPLAIN SELECT (SELECT 1 HAVING TRUE); +>> SELECT (SELECT 1 HAVING TRUE) + +EXPLAIN SELECT (SELECT 1 QUALIFY TRUE); +>> SELECT (SELECT 1 QUALIFY TRUE) + +EXPLAIN SELECT (VALUES 1, 2 OFFSET 1 ROW); +>> SELECT 2 + +EXPLAIN SELECT (VALUES 1, 2 OFFSET RAND() ROWS); +>> SELECT (VALUES (1), (2) OFFSET RAND() ROWS) + +EXPLAIN SELECT (VALUES 1 FETCH FIRST 2 ROWS ONLY); +>> SELECT 1 + +EXPLAIN SELECT (VALUES 1, 2 FETCH FIRST RAND() ROWS ONLY); +>> SELECT (VALUES (1), (2) FETCH FIRST RAND() ROWS ONLY) + +EXPLAIN SELECT X FROM SYSTEM_RANGE(1, 10) ORDER BY (SELECT 1); +>> SELECT "X" FROM SYSTEM_RANGE(1, 10) /* range index */ + +EXPLAIN SELECT X FROM SYSTEM_RANGE(1, 10) ORDER BY (SELECT RAND()); +>> SELECT "X" FROM SYSTEM_RANGE(1, 10) /* range index */ ORDER BY RAND() + +EXPLAIN SELECT (SELECT 1, RAND()); +>> SELECT ROW (1, RAND()) + +EXPLAIN SELECT (VALUES (1, RAND())); +>> SELECT ROW (1, RAND()) + +EXPLAIN SELECT (VALUES 1, RAND()); +>> SELECT (VALUES (1), (RAND())) + +EXPLAIN SELECT X FROM SYSTEM_RANGE(1, 10) ORDER BY X, (1+1), -X; +>> SELECT "X" FROM SYSTEM_RANGE(1, 10) /* range index */ ORDER BY 1, - "X" + + +CREATE TABLE T1 ( + T1_ID BIGINT PRIMARY KEY +); +> ok + +INSERT INTO T1 VALUES 1, 2, 3; +> update count: 3 + +CREATE TABLE T2 ( + T2_ID BIGINT PRIMARY KEY, + T1_ID BIGINT NOT NULL REFERENCES T1 +); +> ok + +INSERT INTO T2 VALUES (1, 1), (2, 1), (3, 2), (4, 3); +> update count: 4 + +SELECT * FROM (SELECT * FROM T1 FETCH FIRST 2 ROWS ONLY) T1 JOIN T2 USING (T1_ID); +> T1_ID T2_ID +> ----- ----- +> 1 1 +> 1 2 +> 2 3 +> rows: 3 + + +DROP TABLE T2, T1; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT C1 FROM (SELECT ' || (SELECT LISTAGG('1 C' || X) FROM SYSTEM_RANGE(1, 16384)) || ')'; +> ok + +DROP TABLE TEST; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT C1 FROM (SELECT ' || (SELECT LISTAGG('1 C' || X) FROM SYSTEM_RANGE(1, 16385)) || ')'; +> exception TOO_MANY_COLUMNS_1 + +CREATE TABLE TEST(A INT, B INT); +> ok + +CREATE INDEX TEST_IDX ON TEST(A, B); +> ok + +INSERT INTO TEST VALUES (1, 1), (1, 2), (2, 1), (2, 2); +> update count: 4 + +SELECT A, 1 AS X, B FROM TEST ORDER BY A, X, B DESC; +> A X B +> - - - +> 1 1 2 +> 1 1 1 +> 2 1 2 +> 2 1 1 +> rows (ordered): 4 + +EXPLAIN SELECT A, 1 AS X, B FROM TEST ORDER BY A, X, B DESC; +>> SELECT "A", 1 AS "X", "B" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ ORDER BY 1, 2, 3 DESC + +DROP TABLE TEST; +> ok + +SELECT X FROM SYSTEM_RANGE(1, 2) ORDER BY X DESC FETCH FIRST 0xFFFFFFFF ROWS ONLY; +> X +> - +> 2 +> 1 +> rows (ordered): 2 + +SELECT ((SELECT 1 X) EXCEPT (SELECT 1 Y)) T; +> T +> ---- +> null +> rows: 1 diff --git a/h2/src/test/org/h2/test/scripts/queries/table.sql b/h2/src/test/org/h2/test/scripts/queries/table.sql new file mode 100644 index 0000000000..a4d234739b --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/table.sql @@ -0,0 +1,64 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(A INT, B INT, C INT); +> ok + +INSERT INTO TEST VALUES (1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 2, 1), (1, 2, 2), (1, 2, 3), + (2, 1, 1), (2, 1, 2), (2, 1, 3), (2, 2, 1), (2, 2, 2), (2, 2, 3); +> update count: 12 + +TABLE TEST ORDER BY A, B; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> 1 2 2 +> 1 2 3 +> 2 1 1 +> 2 1 2 +> 2 1 3 +> 2 2 1 +> 2 2 2 +> 2 2 3 +> rows (partially ordered): 12 + +TABLE TEST ORDER BY A, B, C FETCH FIRST 4 ROWS ONLY; +> A B C +> - - - +> 1 1 1 +> 1 1 2 +> 1 1 3 +> 1 2 1 +> rows (ordered): 4 + +SELECT * FROM (TABLE TEST) ORDER BY A, B, C FETCH FIRST ROW ONLY; +> A B C +> - - - +> 1 1 1 +> rows (ordered): 1 + +SELECT (1, 2, 3) IN (TABLE TEST); +>> TRUE + +SELECT (TABLE TEST FETCH FIRST ROW ONLY) "ROW"; +> ROW +> ------------- +> ROW (1, 1, 1) +> rows: 1 + +EXPLAIN TABLE TEST ORDER BY A; +>> TABLE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ ORDER BY 1 + +CREATE INDEX TEST_A_INDEX ON TEST(A); +> ok + +EXPLAIN TABLE TEST ORDER BY A; +>> TABLE "PUBLIC"."TEST" /* PUBLIC.TEST_A_INDEX */ ORDER BY 1 /* index sorted */ + +DROP TABLE TEST; +> ok diff --git a/h2/src/test/org/h2/test/scripts/queries/values.sql b/h2/src/test/org/h2/test/scripts/queries/values.sql new file mode 100644 index 0000000000..410945e759 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/values.sql @@ -0,0 +1,115 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +VALUES (1, 2); +> C1 C2 +> -- -- +> 1 2 +> rows: 1 + +VALUES ROW (1, 2); +> C1 C2 +> -- -- +> 1 2 +> rows: 1 + +VALUES 1, 2; +> C1 +> -- +> 1 +> 2 +> rows: 2 + +VALUES 4, 3, 1, 2 ORDER BY 1 FETCH FIRST 75 PERCENT ROWS ONLY; +> C1 +> -- +> 1 +> 2 +> 3 +> rows (ordered): 3 + +SELECT * FROM (VALUES (1::BIGINT, 2)) T (A, B) WHERE (A, B) IN (VALUES(1, 2)); +> A B +> - - +> 1 2 +> rows: 1 + +SELECT * FROM (VALUES (1000000000000, 2)) T (A, B) WHERE (A, B) IN (VALUES(1, 2)); +> A B +> - - +> rows: 0 + +SELECT * FROM (VALUES (1, 2)) T (A, B) WHERE (A, B) IN (VALUES(1::BIGINT, 2)); +> A B +> - - +> 1 2 +> rows: 1 + +SELECT * FROM (VALUES (1, 2)) T (A, B) WHERE (A, B) IN (VALUES(1000000000000, 2)); +> A B +> - - +> rows: 0 + +EXPLAIN VALUES 1, (2), ROW(3); +>> VALUES (1), (2), (3) + +EXPLAIN VALUES (1, 2), (3, 4); +>> VALUES (1, 2), (3, 4) + +EXPLAIN SELECT * FROM (VALUES 1, 2) T(V); +>> SELECT "T"."V" FROM (VALUES (1), (2)) "T"("V") /* table scan */ + +EXPLAIN SELECT * FROM (VALUES 1, 2); +>> SELECT "_0"."C1" FROM (VALUES (1), (2)) "_0" /* table scan */ + +EXPLAIN SELECT * FROM (VALUES 1, 2 ORDER BY 1 DESC); +>> SELECT "_1"."C1" FROM ( VALUES (1), (2) ORDER BY 1 DESC ) "_1" /* VALUES (1), (2) ORDER BY 1 DESC */ + +-- Non-standard syntax +EXPLAIN SELECT * FROM VALUES 1, 2; +>> SELECT "_2"."C1" FROM (VALUES (1), (2)) "_2" /* table scan */ + +VALUES (1, 2), (3, 4), (5, 1) ORDER BY C1 + C2; +> C1 C2 +> -- -- +> 1 2 +> 5 1 +> 3 4 +> rows (ordered): 3 + +VALUES (1, 2), (3, 4), (5, 1) ORDER BY C1 + C2, C1 * C2; +> C1 C2 +> -- -- +> 1 2 +> 5 1 +> 3 4 +> rows (ordered): 3 + +VALUES (1, 2), (3, 4), (5, 1) ORDER BY C1 + C2, C1 * C2 OFFSET 1 ROW FETCH FIRST 1 ROW ONLY; +> C1 C2 +> -- -- +> 5 1 +> rows (ordered): 1 + +EXPLAIN VALUES (1, 2), (3, 4), (5, 1) ORDER BY C1 + C2, C1 * C2 OFFSET 1 ROW FETCH FIRST 1 ROW ONLY; +>> VALUES (1, 2), (3, 4), (5, 1) ORDER BY "C1" + "C2", "C1" * "C2" OFFSET 1 ROW FETCH NEXT ROW ONLY + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT C1 FROM (VALUES (' || (SELECT LISTAGG('1') FROM SYSTEM_RANGE(1, 16384)) || '))'; +> ok + +DROP TABLE TEST; +> ok + +EXECUTE IMMEDIATE 'CREATE TABLE TEST AS SELECT C1 FROM (VALUES (' || (SELECT LISTAGG('1') FROM SYSTEM_RANGE(1, 16385)) || '))'; +> exception TOO_MANY_COLUMNS_1 + +VALUES (1), (1, 2); +> exception COLUMN_COUNT_DOES_NOT_MATCH + +EXPLAIN SELECT C1, 2 FROM (VALUES 1, 2, 3) T ORDER BY 1; +>> SELECT "C1", 2 FROM (VALUES (1), (2), (3)) "T" /* table scan */ ORDER BY 1 + +EXPLAIN SELECT C1, 2 FROM (VALUES 1, 2, 3) T ORDER BY (1); +>> SELECT "C1", 2 FROM (VALUES (1), (2), (3)) "T" /* table scan */ diff --git a/h2/src/test/org/h2/test/scripts/queries/window.sql b/h2/src/test/org/h2/test/scripts/queries/window.sql new file mode 100644 index 0000000000..7e1e8560ac --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/queries/window.sql @@ -0,0 +1,232 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +CREATE TABLE TEST(ID INT, R INT, CATEGORY INT); +> ok + +INSERT INTO TEST VALUES + (1, 4, 1), + (2, 3, 1), + (3, 2, 2), + (4, 1, 2); +> update count: 4 + +SELECT *, ROW_NUMBER() OVER W FROM TEST; +> exception WINDOW_NOT_FOUND_1 + +SELECT * FROM TEST WINDOW W AS W1, W1 AS (); +> exception SYNTAX_ERROR_2 + +SELECT *, ROW_NUMBER() OVER W1, ROW_NUMBER() OVER W2 FROM TEST + WINDOW W1 AS (W2 ORDER BY ID), W2 AS (PARTITION BY CATEGORY ORDER BY ID DESC); +> ID R CATEGORY ROW_NUMBER() OVER (PARTITION BY CATEGORY ORDER BY ID) ROW_NUMBER() OVER (PARTITION BY CATEGORY ORDER BY ID DESC) +> -- - -------- ----------------------------------------------------- ---------------------------------------------------------- +> 1 4 1 1 2 +> 2 3 1 2 1 +> 3 2 2 1 2 +> 4 1 2 2 1 +> rows: 4 + +SELECT *, LAST_VALUE(ID) OVER W FROM TEST + WINDOW W AS (PARTITION BY CATEGORY ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE CURRENT ROW); +> ID R CATEGORY LAST_VALUE(ID) OVER (PARTITION BY CATEGORY ORDER BY ID RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING EXCLUDE CURRENT ROW) +> -- - -------- ------------------------------------------------------------------------------------------------------------------------------------- +> 1 4 1 2 +> 2 3 1 1 +> 3 2 2 4 +> 4 1 2 3 +> rows: 4 + +DROP TABLE TEST; +> ok + +SELECT MAX(MAX(X) OVER ()) OVER () FROM VALUES (1); +> exception INVALID_USE_OF_AGGREGATE_FUNCTION_1 + +SELECT MAX(MAX(X) OVER ()) FROM VALUES (1); +> exception INVALID_USE_OF_AGGREGATE_FUNCTION_1 + +SELECT MAX(MAX(X)) FROM VALUES (1); +> exception INVALID_USE_OF_AGGREGATE_FUNCTION_1 + +CREATE TABLE TEST(ID INT, CATEGORY INT); +> ok + +INSERT INTO TEST VALUES + (1, 1), + (2, 1), + (4, 2), + (8, 2), + (16, 3), + (32, 3); +> update count: 6 + +SELECT ROW_NUMBER() OVER (ORDER /**/ BY CATEGORY), SUM(ID) FROM TEST GROUP BY CATEGORY HAVING SUM(ID) = 12; +> ROW_NUMBER() OVER (ORDER BY CATEGORY) SUM(ID) +> ------------------------------------- ------- +> 1 12 +> rows: 1 + +SELECT ROW_NUMBER() OVER (ORDER /**/ BY CATEGORY), SUM(ID) FROM TEST GROUP BY CATEGORY HAVING CATEGORY = 2; +> ROW_NUMBER() OVER (ORDER BY CATEGORY) SUM(ID) +> ------------------------------------- ------- +> 1 12 +> rows: 1 + +SELECT ROW_NUMBER() OVER (ORDER BY CATEGORY), SUM(ID) FROM TEST GROUP BY CATEGORY HAVING CATEGORY > 1; +> ROW_NUMBER() OVER (ORDER BY CATEGORY) SUM(ID) +> ------------------------------------- ------- +> 1 12 +> 2 48 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, CATEGORY BOOLEAN); +> ok + +INSERT INTO TEST VALUES + (1, FALSE), + (2, FALSE), + (4, TRUE), + (8, TRUE), + (16, FALSE), + (32, FALSE); +> update count: 6 + +SELECT ROW_NUMBER() OVER (ORDER BY CATEGORY), SUM(ID) FROM TEST GROUP BY CATEGORY HAVING SUM(ID) = 12; +> ROW_NUMBER() OVER (ORDER BY CATEGORY) SUM(ID) +> ------------------------------------- ------- +> 1 12 +> rows: 1 + +SELECT ROW_NUMBER() OVER (ORDER BY CATEGORY), SUM(ID) FROM TEST GROUP BY CATEGORY HAVING CATEGORY; +> ROW_NUMBER() OVER (ORDER BY CATEGORY) SUM(ID) +> ------------------------------------- ------- +> 1 12 +> rows: 1 + +SELECT SUM(ID) OVER (ORDER BY ID ROWS NULL PRECEDING) P FROM TEST; +> exception INVALID_PRECEDING_OR_FOLLOWING_1 + +SELECT SUM(ID) OVER (ORDER BY ID RANGE NULL PRECEDING) P FROM TEST; +> exception INVALID_PRECEDING_OR_FOLLOWING_1 + +SELECT ARRAY_AGG(ID) OVER (ORDER BY V NULLS FIRST RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) A, + ID, V FROM VALUES (1, 1), (2, NULL), (3, 2) T(ID, V) ORDER BY V NULLS FIRST; +> A ID V +> --------- -- ---- +> [3, 1, 2] 2 null +> [3, 1] 1 1 +> [3, 1] 3 2 +> rows (ordered): 3 + +SELECT ARRAY_AGG(ID) OVER (ORDER BY V NULLS LAST RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) A, + ID, V FROM VALUES (1, 1), (2, NULL), (3, 2) T(ID, V) ORDER BY V NULLS LAST; +> A ID V +> --------- -- ---- +> [2, 3, 1] 1 1 +> [2, 3, 1] 3 2 +> [2] 2 null +> rows (ordered): 3 + +SELECT ARRAY_AGG(ID) OVER (ORDER BY V NULLS FIRST RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) A, + ID, V FROM VALUES (1, 1), (2, NULL), (3, 2) T(ID, V) ORDER BY V NULLS FIRST; +> A ID V +> --------- -- ---- +> [3, 1, 2] 2 null +> [3] 1 1 +> null 3 2 +> rows (ordered): 3 + +SELECT ARRAY_AGG(ID) OVER (ORDER BY V NULLS LAST RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) A, + ID, V FROM VALUES (1, 1), (2, NULL), (3, 2) T(ID, V) ORDER BY V NULLS LAST; +> A ID V +> ------ -- ---- +> [2, 3] 1 1 +> [2] 3 2 +> [2] 2 null +> rows (ordered): 3 + +SELECT ARRAY_AGG(ID) OVER (ORDER BY V NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) A, + ID, V FROM VALUES (1, 1), (2, NULL), (3, 2) T(ID, V) ORDER BY V NULLS FIRST; +> A ID V +> --------- -- ---- +> [2] 2 null +> [2, 1, 3] 1 1 +> [2, 1, 3] 3 2 +> rows (ordered): 3 + +SELECT ARRAY_AGG(ID) OVER (ORDER BY V NULLS LAST RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) A, + ID, V FROM VALUES (1, 1), (2, NULL), (3, 2) T(ID, V) ORDER BY V NULLS LAST; +> A ID V +> --------- -- ---- +> [1, 3] 1 1 +> [1, 3] 3 2 +> [1, 3, 2] 2 null +> rows (ordered): 3 + +SELECT ARRAY_AGG(ID) OVER (ORDER BY V NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) A, + ID, V FROM VALUES (1, 1), (2, NULL), (3, 2) T(ID, V) ORDER BY V NULLS FIRST; +> A ID V +> ------ -- ---- +> [2] 2 null +> [2] 1 1 +> [2, 1] 3 2 +> rows (ordered): 3 + +SELECT ARRAY_AGG(ID) OVER (ORDER BY V NULLS LAST RANGE BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) A, + ID, V FROM VALUES (1, 1), (2, NULL), (3, 2) T(ID, V) ORDER BY V NULLS LAST; +> A ID V +> --------- -- ---- +> null 1 1 +> [1] 3 2 +> [1, 3, 2] 2 null +> rows (ordered): 3 + +SELECT SUM(V) OVER (ORDER BY V RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM VALUES (TRUE) T(V); +> exception INVALID_VALUE_2 + +SELECT + SUM(ID) OVER (ORDER BY ID RANGE BETWEEN 10000000000 PRECEDING AND CURRENT ROW) P, + SUM(ID) OVER (ORDER BY ID RANGE BETWEEN 10000000001 PRECEDING AND 10000000000 PRECEDING) P2, + SUM(ID) OVER (ORDER BY ID RANGE BETWEEN CURRENT ROW AND 2147483647 FOLLOWING) F, + SUM(ID) OVER (ORDER BY ID RANGE BETWEEN 2147483647 FOLLOWING AND 2147483648 FOLLOWING) F2, + ID FROM TEST ORDER BY ID; +> P P2 F F2 ID +> -- ---- -- ---- -- +> 1 null 63 null 1 +> 3 null 62 null 2 +> 7 null 60 null 4 +> 15 null 56 null 8 +> 31 null 48 null 16 +> 63 null 32 null 32 +> rows (ordered): 6 + +DROP TABLE TEST; +> ok + +SELECT + ARRAY_AGG(T) OVER (ORDER BY T RANGE BETWEEN INTERVAL '1' DAY PRECEDING AND CURRENT ROW) C, + ARRAY_AGG(T) OVER (ORDER BY T RANGE BETWEEN INTERVAL '2' HOUR PRECEDING AND INTERVAL '1' HOUR PRECEDING) P, + T FROM VALUES (TIME '00:00:00'), (TIME '01:30:00') TEST(T) ORDER BY T; +> C P T +> -------------------- ---------- -------- +> [00:00:00] null 00:00:00 +> [00:00:00, 01:30:00] [00:00:00] 01:30:00 +> rows (ordered): 2 + +SELECT SUM(A) OVER (ORDER BY A, B RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) S FROM VALUES (1, 2) T(A, B); +>> 1 + +SELECT SUM(A) OVER (ORDER BY A, B RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) S FROM VALUES (1, 2) T(A, B); +> exception SYNTAX_ERROR_2 + +SELECT SUM(A) OVER (ORDER BY A, B RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) S FROM VALUES (1, 2) T(A, B); +> exception SYNTAX_ERROR_2 + +SELECT SUM(A) OVER (GROUPS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) S FROM VALUES (1, 2) T(A, B); +> exception SYNTAX_ERROR_2 diff --git a/h2/src/test/org/h2/test/scripts/range_table.sql b/h2/src/test/org/h2/test/scripts/range_table.sql new file mode 100644 index 0000000000..b3b758b2e4 --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/range_table.sql @@ -0,0 +1,235 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- + +explain select * from system_range(1, 2) where x=x+1 and x=1; +>> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* range index: X = CAST(1 AS BIGINT) */ WHERE ("X" = CAST(1 AS BIGINT)) AND ("X" = ("X" + 1)) + +explain select * from system_range(1, 2) where not (x = 1 and x*2 = 2); +>> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* range index */ WHERE ("X" <> CAST(1 AS BIGINT)) OR (("X" * 2) <> 2) + +explain select * from system_range(1, 10) where (NOT x >= 5); +>> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 10) /* range index: X < CAST(5 AS BIGINT) */ WHERE "X" < CAST(5 AS BIGINT) + +select (select t1.x from system_range(1,1) t2) from system_range(1,1) t1; +> (SELECT T1.X FROM SYSTEM_RANGE(1, 1) T2) +> ---------------------------------------- +> 1 +> rows: 1 + +EXPLAIN PLAN FOR SELECT * FROM SYSTEM_RANGE(1, 20); +>> SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 20) /* range index */ + +select sum(x) from system_range(2, 1000) r where +not exists(select * from system_range(2, 32) r2 where r.x>r2.x and mod(r.x, r2.x)=0); +>> 76127 + +SELECT COUNT(*) FROM SYSTEM_RANGE(0, 2111222333); +>> 2111222334 + +select * from system_range(2, 100) r where +not exists(select * from system_range(2, 11) r2 where r.x>r2.x and mod(r.x, r2.x)=0); +> X +> -- +> 11 +> 13 +> 17 +> 19 +> 2 +> 23 +> 29 +> 3 +> 31 +> 37 +> 41 +> 43 +> 47 +> 5 +> 53 +> 59 +> 61 +> 67 +> 7 +> 71 +> 73 +> 79 +> 83 +> 89 +> 97 +> rows: 25 + +SELECT * FROM SYSTEM_RANGE(1, 10) ORDER BY 1; +> X +> -- +> 1 +> 2 +> 3 +> 4 +> 5 +> 6 +> 7 +> 8 +> 9 +> 10 +> rows (ordered): 10 + +SELECT COUNT(*) FROM SYSTEM_RANGE(1, 10); +>> 10 + +SELECT * FROM SYSTEM_RANGE(1, 10, 2) ORDER BY 1; +> X +> - +> 1 +> 3 +> 5 +> 7 +> 9 +> rows (ordered): 5 + +SELECT COUNT(*) FROM SYSTEM_RANGE(1, 10, 2); +>> 5 + +SELECT * FROM SYSTEM_RANGE(1, 9, 2) ORDER BY 1; +> X +> - +> 1 +> 3 +> 5 +> 7 +> 9 +> rows (ordered): 5 + +SELECT COUNT(*) FROM SYSTEM_RANGE(1, 9, 2); +>> 5 + +SELECT * FROM SYSTEM_RANGE(10, 1, -2) ORDER BY 1 DESC; +> X +> -- +> 10 +> 8 +> 6 +> 4 +> 2 +> rows (ordered): 5 + +SELECT COUNT(*) FROM SYSTEM_RANGE(10, 1, -2); +>> 5 + +SELECT * FROM SYSTEM_RANGE(10, 2, -2) ORDER BY 1 DESC; +> X +> -- +> 10 +> 8 +> 6 +> 4 +> 2 +> rows (ordered): 5 + +SELECT COUNT(*) FROM SYSTEM_RANGE(10, 2, -2); +>> 5 + +SELECT * FROM SYSTEM_RANGE(1, 1); +>> 1 + +SELECT COUNT(*) FROM SYSTEM_RANGE(1, 1); +>> 1 + +SELECT * FROM SYSTEM_RANGE(1, 1, -1); +>> 1 + +SELECT COUNT(*) FROM SYSTEM_RANGE(1, 1, -1); +>> 1 + +SELECT * FROM SYSTEM_RANGE(2, 1); +> X +> - +> rows: 0 + +SELECT COUNT(*) FROM SYSTEM_RANGE(2, 1); +>> 0 + +SELECT * FROM SYSTEM_RANGE(2, 1, 2); +> X +> - +> rows: 0 + +SELECT COUNT(*) FROM SYSTEM_RANGE(2, 1, 2); +>> 0 + +SELECT * FROM SYSTEM_RANGE(1, 2, 0); +> exception STEP_SIZE_MUST_NOT_BE_ZERO + +SELECT COUNT(*) FROM SYSTEM_RANGE(1, 2, 0); +> exception STEP_SIZE_MUST_NOT_BE_ZERO + +SELECT * FROM SYSTEM_RANGE(2, 1, 0); +> exception STEP_SIZE_MUST_NOT_BE_ZERO + +SELECT COUNT(*) FROM SYSTEM_RANGE(2, 1, 0); +> exception STEP_SIZE_MUST_NOT_BE_ZERO + +SELECT * FROM SYSTEM_RANGE(1, 8, 2); +> X +> - +> 1 +> 3 +> 5 +> 7 +> rows: 4 + +SELECT * FROM SYSTEM_RANGE(1, 8, 2) WHERE X = 2; +> X +> - +> rows: 0 + +SELECT COUNT(*) FROM SYSTEM_RANGE(1, 8, 2) WHERE X = 2; +>> 0 + +SELECT * FROM SYSTEM_RANGE(1, 8, 2) WHERE X BETWEEN 2 AND 6; +> X +> - +> 3 +> 5 +> rows: 2 + +SELECT COUNT(*) FROM SYSTEM_RANGE(1, 8, 2) WHERE X BETWEEN 2 AND 6; +>> 2 + +SELECT * FROM SYSTEM_RANGE(8, 1, -2) ORDER BY X DESC; +> X +> - +> 8 +> 6 +> 4 +> 2 +> rows (ordered): 4 + +SELECT * FROM SYSTEM_RANGE(8, 1, -2) WHERE X = 3; +> X +> - +> rows: 0 + +SELECT COUNT(*) FROM SYSTEM_RANGE(8, 1, -2) WHERE X = 3; +>> 0 + +SELECT * FROM SYSTEM_RANGE(8, 1, -2) WHERE X BETWEEN 3 AND 7 ORDER BY 1 DESC; +> X +> - +> 6 +> 4 +> rows (ordered): 2 + +SELECT COUNT(*) FROM SYSTEM_RANGE(8, 1, -2) WHERE X BETWEEN 3 AND 7; +>> 2 + +SELECT X FROM SYSTEM_RANGE(1, 2, ?); +{ +1 +> X +> - +> 1 +> 2 +> rows: 2 +}; +> update count: 0 diff --git a/h2/src/test/org/h2/test/scripts/testScript.sql b/h2/src/test/org/h2/test/scripts/testScript.sql new file mode 100644 index 0000000000..dd74558e9e --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/testScript.sql @@ -0,0 +1,7076 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- +CREATE TABLE TEST(A INT, B INT) AS VALUES (1, 2), (3, 4), (5, 6); +> ok + +UPDATE TOP (1) TEST SET B = 10; +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +SET MODE MSSQLServer; +> ok + +UPDATE TOP (1) TEST SET B = 10; +> update count: 1 + +SELECT COUNT(*) FILTER (WHERE B = 10) N, COUNT(*) FILTER (WHERE B <> 10) O FROM TEST; +> N O +> - - +> 1 2 +> rows: 1 + +UPDATE TEST SET B = 10 WHERE B <> 10; +> update count: 2 + +UPDATE TOP (1) TEST SET B = 10 LIMIT 1; +> exception SYNTAX_ERROR_1 + +SET MODE Regular; +> ok + +DROP TABLE TEST; +> ok + +--- special grammar and test cases --------------------------------------------------------------------------------------------- +select 0 as x from system_range(1, 2) d group by d.x; +> X +> - +> 0 +> 0 +> rows: 2 + +select 1 "a", count(*) from dual group by "a" order by "a"; +> a COUNT(*) +> - -------- +> 1 1 +> rows (ordered): 1 + +create table results(eventId int, points int, studentId int); +> ok + +insert into results values(1, 10, 1), (2, 20, 1), (3, 5, 1); +> update count: 3 + +insert into results values(1, 10, 2), (2, 20, 2), (3, 5, 2); +> update count: 3 + +insert into results values(1, 10, 3), (2, 20, 3), (3, 5, 3); +> update count: 3 + +SELECT SUM(points) FROM RESULTS +WHERE eventID IN +(SELECT eventID FROM RESULTS +WHERE studentID = 2 +ORDER BY points DESC +LIMIT 2 ) +AND studentID = 2; +> SUM(POINTS) +> ----------- +> 30 +> rows: 1 + +SELECT eventID X FROM RESULTS +WHERE studentID = 2 +ORDER BY points DESC +LIMIT 2; +> X +> - +> 2 +> 1 +> rows (ordered): 2 + +SELECT SUM(r.points) FROM RESULTS r, +(SELECT eventID FROM RESULTS +WHERE studentID = 2 +ORDER BY points DESC +LIMIT 2 ) r2 +WHERE r2.eventID = r.eventId +AND studentID = 2; +> SUM(R.POINTS) +> ------------- +> 30 +> rows: 1 + +drop table results; +> ok + +create table test(id int, name varchar) as select 1, 'a'; +> ok + +(select id from test order by id) union (select id from test order by name); +> ID +> -- +> 1 +> rows: 1 + +drop table test; +> ok + +select * from system_range(1,1) order by x limit 3 offset 3; +> X +> - +> rows (ordered): 0 + +create sequence seq start with 65 increment by 1; +> ok + +select char(nextval('seq')) as x; +> X +> - +> A +> rows: 1 + +select char(nextval('seq')) as x; +> X +> - +> B +> rows: 1 + +drop sequence seq; +> ok + +create table test(id int, name varchar); +> ok + +insert into test values(5, 'b'), (5, 'b'), (20, 'a'); +> update count: 3 + +select id from test where name in(null, null); +> ID +> -- +> rows: 0 + +select * from (select * from test order by name limit 1) where id < 10; +> ID NAME +> -- ---- +> rows: 0 + +drop table test; +> ok + +create table test (id int primary key, pid int); +> ok + +alter table test add constraint fk_test foreign key (pid) +references test (id) index idx_test_pid; +> ok + +insert into test values (2, null); +> update count: 1 + +update test set pid = 1 where id = 2; +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +drop table test; +> ok + +create table test(name varchar(255)); +> ok + +select * from test union select * from test order by test.name; +> exception ORDER_BY_NOT_IN_RESULT + +insert into test values('a'), ('b'), ('c'); +> update count: 3 + +select name from test where name > all(select name from test where name<'b'); +> NAME +> ---- +> b +> c +> rows: 2 + +select count(*) from (select name from test where name > all(select name from test where name<'b')) x; +> COUNT(*) +> -------- +> 2 +> rows: 1 + +drop table test; +> ok + +create table test(id int) as select 1; +> ok + +select * from test where id >= all(select id from test where 1=0); +> ID +> -- +> 1 +> rows: 1 + +select * from test where id = all(select id from test where 1=0); +> ID +> -- +> 1 +> rows: 1 + +select * from test where id = all(select id from test union all select id from test); +> ID +> -- +> 1 +> rows: 1 + +select * from test where null >= all(select id from test where 1=0); +> ID +> -- +> 1 +> rows: 1 + +select * from test where null = all(select id from test where 1=0); +> ID +> -- +> 1 +> rows: 1 + +select * from test where null = all(select id from test union all select id from test); +> ID +> -- +> rows: 0 + +select * from test where id >= all(select cast(null as int) from test); +> ID +> -- +> rows: 0 + +select * from test where id = all(select null from test union all select id from test); +> ID +> -- +> rows: 0 + +select * from test where null >= all(select cast(null as int) from test); +> ID +> -- +> rows: 0 + +select * from test where null = all(select null from test union all select id from test); +> ID +> -- +> rows: 0 + +drop table test; +> ok + +select x from dual order by y.x; +> exception COLUMN_NOT_FOUND_1 + +create table test(id int primary key, name varchar(255), row_number int); +> ok + +insert into test values(1, 'hello', 10), (2, 'world', 20); +> update count: 2 + +select rownum(), id, name from test order by id; +> ROWNUM() ID NAME +> -------- -- ----- +> 1 1 hello +> 2 2 world +> rows (ordered): 2 + +select rownum(), id, name from test order by name; +> ROWNUM() ID NAME +> -------- -- ----- +> 1 1 hello +> 2 2 world +> rows (ordered): 2 + +select rownum(), id, name from test order by name desc; +> ROWNUM() ID NAME +> -------- -- ----- +> 2 2 world +> 1 1 hello +> rows (ordered): 2 + +update test set (id)=(id); +> update count: 2 + +drop table test; +> ok + +select 2^2; +> exception SYNTAX_ERROR_1 + +select * from dual where cast('xx' as varchar_ignorecase(1)) = 'X' and cast('x x ' as char(2)) = 'x'; +> +> +> +> rows: 1 + +explain select -cast(0 as real), -cast(0 as double); +>> SELECT CAST(0.0 AS REAL), CAST(0.0 AS DOUBLE PRECISION) + +select (1) one; +> ONE +> --- +> 1 +> rows: 1 + +create table test(id int); +> ok + +insert into test values(1), (2), (4); +> update count: 3 + +select * from test order by id limit -1; +> exception INVALID_VALUE_2 + +select * from test order by id limit 0; +> ID +> -- +> rows (ordered): 0 + +select * from test order by id limit 1; +> ID +> -- +> 1 +> rows (ordered): 1 + +select * from test order by id limit 1+1; +> ID +> -- +> 1 +> 2 +> rows (ordered): 2 + +select * from test order by id limit null; +> exception INVALID_VALUE_2 + +delete from test limit 0; +> ok + +delete from test limit 1; +> update count: 1 + +delete from test limit -1; +> exception INVALID_VALUE_2 + +drop table test; +> ok + +create table test(id int primary key); +> ok + +insert into test(id) direct sorted select x from system_range(1, 100); +> update count: 100 + +explain insert into test(id) direct sorted select x from system_range(1, 100); +>> INSERT INTO "PUBLIC"."TEST"("ID") DIRECT SELECT "X" FROM SYSTEM_RANGE(1, 100) /* range index */ + +explain select * from test limit 10; +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST 10 ROWS ONLY + +drop table test; +> ok + +create table test(id int primary key); +> ok + +insert into test values(1), (2), (3), (4); +> update count: 4 + +explain analyze select * from test where id is null; +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID IS NULL */ /* scanCount: 1 */ WHERE "ID" IS NULL + +drop table test; +> ok + +explain analyze select 1; +>> SELECT 1 + +create table test(id int); +> ok + +create view x as select * from test; +> ok + +drop table test restrict; +> exception CANNOT_DROP_2 + +drop table test cascade; +> ok + +select 1, 2 from (select * from dual) union all select 3, 4 from dual; +> 1 2 +> - - +> 1 2 +> 3 4 +> rows: 2 + +select 3 from (select * from dual) union all select 2 from dual; +> 3 +> - +> 2 +> 3 +> rows: 2 + +create table a(x int, y int); +> ok + +alter table a add constraint a_xy unique(x, y); +> ok + +create table b(x int, y int, foreign key(x, y) references a(x, y)); +> ok + +insert into a values(null, null), (null, 0), (0, null), (0, 0); +> update count: 4 + +insert into b values(null, null), (null, 0), (0, null), (0, 0); +> update count: 4 + +delete from a where x is null and y is null; +> update count: 1 + +delete from a where x is null and y = 0; +> update count: 1 + +delete from a where x = 0 and y is null; +> update count: 1 + +delete from a where x = 0 and y = 0; +> exception REFERENTIAL_INTEGRITY_VIOLATED_CHILD_EXISTS_1 + +drop table b; +> ok + +drop table a; +> ok + +select * from (select null as x) where x=1; +> X +> - +> rows: 0 + +create table test(id decimal(10, 2) primary key) as select 0; +> ok + +select * from test where id = 0.00; +> ID +> ---- +> 0.00 +> rows: 1 + +select * from test where id = 0.0; +> ID +> ---- +> 0.00 +> rows: 1 + +drop table test; +> ok + +select count(*) from (select 1 union (select 2 intersect select 2)) x; +> COUNT(*) +> -------- +> 2 +> rows: 1 + +create table test(id varchar(1) primary key) as select 'X'; +> ok + +select count(*) from (select 1 from dual where 1 in ((select 1 union select 1))) a; +> COUNT(*) +> -------- +> 1 +> rows: 1 + +insert into test ((select 1 union select 2) union select 3); +> update count: 3 + +select count(*) from test where id = 'X1'; +> COUNT(*) +> -------- +> 0 +> rows: 1 + +drop table test; +> ok + +create table test(id int, constraint pk primary key(id), constraint x unique(id)); +> ok + +SELECT CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_NAME = 'TEST'; +> CONSTRAINT_NAME +> --------------- +> PK +> X +> rows: 2 + +drop table test; +> ok + +create table parent(id int primary key); +> ok + +create table child(id int, parent_id int, constraint child_parent foreign key (parent_id) references parent(id)); +> ok + +SELECT CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_NAME = 'CHILD'; +> CONSTRAINT_NAME +> --------------- +> CHILD_PARENT +> rows: 1 + +drop table parent, child; +> ok + +create table test(id int, name varchar(max)); +> ok + +alter table test alter column id identity; +> ok + +drop table test; +> ok + +create table test(id identity); +> ok + +set password test; +> exception COLUMN_NOT_FOUND_1 + +alter user sa set password test; +> exception COLUMN_NOT_FOUND_1 + +comment on table test is test; +> exception COLUMN_NOT_FOUND_1 + +select 1 from test a where 1 in(select 1 from test b where b.id in(select 1 from test c where c.id=a.id)); +> 1 +> - +> rows: 0 + +drop table test; +> ok + +select @n := case when x = 1 then 1 else @n * x end f from system_range(1, 4); +> F +> -- +> 1 +> 2 +> 24 +> 6 +> rows: 4 + +select * from (select "x" from dual); +> exception COLUMN_NOT_FOUND_1 + +select * from(select 1 from system_range(1, 2) group by sin(x) order by sin(x)); +> 1 +> - +> 1 +> 1 +> rows: 2 + +create table parent(id int primary key, x int) as select 1 id, 2 x; +> ok + +create table child(id int references parent(id)) as select 1; +> ok + +delete from parent; +> exception REFERENTIAL_INTEGRITY_VIOLATED_CHILD_EXISTS_1 + +drop table parent, child; +> ok + +create domain integer as varchar; +> exception DOMAIN_ALREADY_EXISTS_1 + +create domain int as varchar; +> ok + +create memory table test(id int); +> ok + +script nodata nopasswords nosettings noversion; +> SCRIPT +> ----------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE DOMAIN "PUBLIC"."INT" AS CHARACTER VARYING; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" "PUBLIC"."INT" ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 4 + +SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST'; +>> CHARACTER VARYING + +drop table test; +> ok + +drop domain int; +> ok + +create table test(id identity, parent bigint, foreign key(parent) references(id)); +> ok + +insert into test values(0, 0), (1, NULL), (2, 1), (3, 3), (4, 3); +> update count: 5 + +delete from test where id = 3; +> exception REFERENTIAL_INTEGRITY_VIOLATED_CHILD_EXISTS_1 + +delete from test where id = 0; +> update count: 1 + +delete from test where id = 1; +> exception REFERENTIAL_INTEGRITY_VIOLATED_CHILD_EXISTS_1 + +drop table test; +> ok + +create schema a; +> ok + +set autocommit false; +> ok + +set schema a; +> ok + +create table t1 ( k int, v varchar(10) ); +> ok + +insert into t1 values ( 1, 't1' ); +> update count: 1 + +create table t2 ( k int, v varchar(10) ); +> ok + +insert into t2 values ( 2, 't2' ); +> update count: 1 + +create view v_test(a, b, c, d) as select t1.*, t2.* from t1 join t2 on ( t1.k = t2.k ); +> ok + +select * from v_test; +> A B C D +> - - - - +> rows: 0 + +set schema public; +> ok + +drop schema a cascade; +> ok + +set autocommit true; +> ok + +select x/3 as a, count(*) c from system_range(1, 10) group by a having c>2; +> A C +> - - +> 1 3 +> 2 3 +> rows: 2 + +create table test(id int); +> ok + +insert into test values(1), (2); +> update count: 2 + +select id+1 as x, count(*) from test group by x; +> X COUNT(*) +> - -------- +> 2 1 +> 3 1 +> rows: 2 + +select 1 as id, id as b, count(*) from test group by id; +> ID B COUNT(*) +> -- - -------- +> 1 1 1 +> 1 2 1 +> rows: 2 + +select id+1 as x, count(*) from test group by -x; +> exception COLUMN_NOT_FOUND_1 + +select id+1 as x, count(*) from test group by x having x>2; +> exception MUST_GROUP_BY_COLUMN_1 + +select id+1 as x, count(*) from test group by 1; +> exception MUST_GROUP_BY_COLUMN_1 + +drop table test; +> ok + +create table test(t0 timestamp(0), t1 timestamp(1), t4 timestamp(4)); +> ok + +select column_name, datetime_precision from information_schema.columns c where c.table_name = 'TEST' order by column_name; +> COLUMN_NAME DATETIME_PRECISION +> ----------- ------------------ +> T0 0 +> T1 1 +> T4 4 +> rows (ordered): 3 + +drop table test; +> ok + +create table test(a int); +> ok + +insert into test values(1), (2); +> update count: 2 + +select -test.a a from test order by test.a; +> A +> -- +> -1 +> -2 +> rows (ordered): 2 + +select -test.a from test order by test.a; +> - TEST.A +> -------- +> -1 +> -2 +> rows (ordered): 2 + +select -test.a aa from test order by a; +> AA +> -- +> -1 +> -2 +> rows (ordered): 2 + +select -test.a aa from test order by aa; +> AA +> -- +> -2 +> -1 +> rows (ordered): 2 + +select -test.a a from test order by a; +> A +> -- +> -2 +> -1 +> rows (ordered): 2 + +drop table test; +> ok + +CREATE TABLE table_a(a_id INT PRIMARY KEY, left_id INT, right_id INT); +> ok + +CREATE TABLE table_b(b_id INT PRIMARY KEY, a_id INT); +> ok + +CREATE TABLE table_c(left_id INT, right_id INT, center_id INT); +> ok + +CREATE VIEW view_a AS +SELECT table_c.center_id, table_a.a_id, table_b.b_id +FROM table_c +INNER JOIN table_a ON table_c.left_id = table_a.left_id +AND table_c.right_id = table_a.right_id +LEFT JOIN table_b ON table_b.a_id = table_a.a_id; +> ok + +SELECT * FROM table_c INNER JOIN view_a +ON table_c.center_id = view_a.center_id; +> LEFT_ID RIGHT_ID CENTER_ID CENTER_ID A_ID B_ID +> ------- -------- --------- --------- ---- ---- +> rows: 0 + +drop view view_a; +> ok + +drop table table_a, table_b, table_c; +> ok + +create table t (pk int primary key, attr int); +> ok + +insert into t values (1, 5), (5, 1); +> update count: 2 + +select t1.pk from t t1, t t2 where t1.pk = t2.attr order by t1.pk; +> PK +> -- +> 1 +> 5 +> rows (ordered): 2 + +drop table t; +> ok + +CREATE ROLE TEST_A; +> ok + +GRANT TEST_A TO TEST_A; +> exception ROLE_ALREADY_GRANTED_1 + +CREATE ROLE TEST_B; +> ok + +GRANT TEST_A TO TEST_B; +> ok + +GRANT TEST_B TO TEST_A; +> exception ROLE_ALREADY_GRANTED_1 + +DROP ROLE TEST_A; +> ok + +DROP ROLE TEST_B; +> ok + +CREATE ROLE PUBLIC2; +> ok + +GRANT PUBLIC2 TO SA; +> ok + +GRANT PUBLIC2 TO SA; +> ok + +REVOKE PUBLIC2 FROM SA; +> ok + +REVOKE PUBLIC2 FROM SA; +> ok + +DROP ROLE PUBLIC2; +> ok + +create table test(id int primary key, lastname varchar, firstname varchar, parent int references(id)); +> ok + +alter table test add constraint name unique (lastname, firstname); +> ok + +SELECT CONSTRAINT_NAME, INDEX_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS; +> CONSTRAINT_NAME INDEX_NAME +> --------------- ------------------ +> CONSTRAINT_2 PRIMARY_KEY_2 +> CONSTRAINT_27 CONSTRAINT_INDEX_2 +> NAME NAME_INDEX_2 +> rows: 3 + +SELECT CONSTRAINT_NAME, COLUMN_NAME FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE; +> CONSTRAINT_NAME COLUMN_NAME +> --------------- ----------- +> CONSTRAINT_2 ID +> CONSTRAINT_27 PARENT +> NAME FIRSTNAME +> NAME LASTNAME +> rows: 4 + +drop table test; +> ok + +ALTER TABLE INFORMATION_SCHEMA.INFORMATION_SCHEMA_CATALOG_NAME RENAME TO INFORMATION_SCHEMA.CAT; +> exception FEATURE_NOT_SUPPORTED_1 + +CREATE TABLE test (id bigserial NOT NULL primary key); +> ok + +drop table test; +> ok + +CREATE TABLE test (id serial NOT NULL primary key); +> ok + +drop table test; +> ok + +CREATE MEMORY TABLE TEST(ID INT, D DOUBLE, F FLOAT); +> ok + +insert into test values(0, POWER(0, -1), POWER(0, -1)), (1, -POWER(0, -1), -POWER(0, -1)), (2, SQRT(-1), SQRT(-1)); +> update count: 3 + +select * from test order by id; +> ID D F +> -- --------- --------- +> 0 Infinity Infinity +> 1 -Infinity -Infinity +> 2 NaN NaN +> rows (ordered): 3 + +script nopasswords nosettings noversion; +> SCRIPT +> ----------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER, "D" DOUBLE PRECISION, "F" FLOAT ); +> -- 3 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (0, 'Infinity', 'Infinity'), (1, '-Infinity', '-Infinity'), (2, 'NaN', 'NaN'); +> rows (ordered): 4 + +DROP TABLE TEST; +> ok + +create schema a; +> ok + +create table a.x(ax int); +> ok + +create schema b; +> ok + +create table b.x(bx int); +> ok + +select * from a.x, b.x; +> AX BX +> -- -- +> rows: 0 + +drop schema a cascade; +> ok + +drop schema b cascade; +> ok + +CREATE TABLE p(d date); +> ok + +INSERT INTO p VALUES('-1-01-01'), ('0-01-01'), ('0001-01-01'); +> update count: 3 + +select d, year(d), extract(year from d), cast(d as timestamp) from p; +> D EXTRACT(YEAR FROM D) EXTRACT(YEAR FROM D) CAST(D AS TIMESTAMP) +> ----------- -------------------- -------------------- -------------------- +> -0001-01-01 -1 -1 -0001-01-01 00:00:00 +> 0000-01-01 0 0 0000-01-01 00:00:00 +> 0001-01-01 1 1 0001-01-01 00:00:00 +> rows: 3 + +drop table p; +> ok + +create table test(a int, b int default 1); +> ok + +insert into test values(1, default), (2, 2), (3, null); +> update count: 3 + +select * from test; +> A B +> - ---- +> 1 1 +> 2 2 +> 3 null +> rows: 3 + +update test set b = default where a = 2; +> update count: 1 + +explain update test set b = default where a = 2; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "B" = DEFAULT WHERE "A" = 2 + +select * from test; +> A B +> - ---- +> 1 1 +> 2 1 +> 3 null +> rows: 3 + +update test set a=default; +> update count: 3 + +drop table test; +> ok + +CREATE ROLE X; +> ok + +GRANT X TO X; +> exception ROLE_ALREADY_GRANTED_1 + +CREATE ROLE Y; +> ok + +GRANT Y TO X; +> ok + +DROP ROLE Y; +> ok + +DROP ROLE X; +> ok + +select top sum(1) 0 from dual; +> exception SYNTAX_ERROR_1 + +create table test(id int primary key, name varchar) as select 1, 'Hello World'; +> ok + +select * from test; +> ID NAME +> -- ----------- +> 1 Hello World +> rows: 1 + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, LABEL CHAR(20), LOOKUP CHAR(30)); +> ok + +INSERT INTO TEST VALUES (1, 'Mouse', 'MOUSE'), (2, 'MOUSE', 'Mouse'); +> update count: 2 + +SELECT * FROM TEST; +> ID LABEL LOOKUP +> -- ------ ------ +> 1 Mouse MOUSE +> 2 MOUSE Mouse +> rows: 2 + +DROP TABLE TEST; +> ok + +call 'a' regexp 'Ho.*\'; +> exception LIKE_ESCAPE_ERROR_1 + +set @t = 0; +> ok + +call set(1, 2); +> exception CAN_ONLY_ASSIGN_TO_VARIABLE_1 + +select x, set(@t, ifnull(@t, 0) + x) from system_range(1, 3); +> X SET(@T, COALESCE(@T, 0) + X) +> - ---------------------------- +> 1 1 +> 2 3 +> 3 6 +> rows: 3 + +select * from system_range(1, 2) a, +(select * from system_range(1, 2) union select * from system_range(1, 2) +union select * from system_range(1, 1)) v where a.x = v.x; +> X X +> - - +> 1 1 +> 2 2 +> rows: 2 + +create table test(id int); +> ok + +select * from ((select * from test) union (select * from test)) where id = 0; +> ID +> -- +> rows: 0 + +select * from ((test d1 inner join test d2 on d1.id = d2.id) inner join test d3 on d1.id = d3.id) inner join test d4 on d4.id = d1.id; +> ID ID ID ID +> -- -- -- -- +> rows: 0 + +drop table test; +> ok + +create table person(id bigint auto_increment, name varchar(100)); +> ok + +insert into person(name) values ('a'), ('b'), ('c'); +> update count: 3 + +select * from person order by id; +> ID NAME +> -- ---- +> 1 a +> 2 b +> 3 c +> rows (ordered): 3 + +select * from person order by id limit 2; +> ID NAME +> -- ---- +> 1 a +> 2 b +> rows (ordered): 2 + +select * from person order by id limit 2 offset 1; +> ID NAME +> -- ---- +> 2 b +> 3 c +> rows (ordered): 2 + +select * from person order by id limit 2147483647 offset 1; +> ID NAME +> -- ---- +> 2 b +> 3 c +> rows (ordered): 2 + +select * from person order by id limit 2147483647-1 offset 1; +> ID NAME +> -- ---- +> 2 b +> 3 c +> rows (ordered): 2 + +select * from person order by id limit 2147483647-1 offset 2; +> ID NAME +> -- ---- +> 3 c +> rows (ordered): 1 + +select * from person order by id limit 2147483647-2 offset 2; +> ID NAME +> -- ---- +> 3 c +> rows (ordered): 1 + +drop table person; +> ok + +CREATE TABLE TEST(ID INTEGER NOT NULL, ID2 INTEGER DEFAULT 0); +> ok + +ALTER TABLE test ALTER COLUMN ID2 RENAME TO ID; +> exception DUPLICATE_COLUMN_NAME_1 + +drop table test; +> ok + +CREATE TABLE FOO (A CHAR(10)); +> ok + +CREATE TABLE BAR AS SELECT * FROM FOO; +> ok + +select table_name, character_maximum_length from information_schema.columns where column_name = 'A'; +> TABLE_NAME CHARACTER_MAXIMUM_LENGTH +> ---------- ------------------------ +> BAR 10 +> FOO 10 +> rows: 2 + +DROP TABLE FOO, BAR; +> ok + +create table multi_pages(dir_num int, bh_id int); +> ok + +insert into multi_pages values(1, 1), (2, 2), (3, 3); +> update count: 3 + +create table b_holding(id int primary key, site varchar(255)); +> ok + +insert into b_holding values(1, 'Hello'), (2, 'Hello'), (3, 'Hello'); +> update count: 3 + +select * from (select dir_num, count(*) as cnt from multi_pages t, b_holding bh +where t.bh_id=bh.id and bh.site='Hello' group by dir_num) as x +where cnt < 1000 order by dir_num asc; +> DIR_NUM CNT +> ------- --- +> 1 1 +> 2 1 +> 3 1 +> rows (ordered): 3 + +explain select * from (select dir_num, count(*) as cnt from multi_pages t, b_holding bh +where t.bh_id=bh.id and bh.site='Hello' group by dir_num) as x +where cnt < 1000 order by dir_num asc; +>> SELECT "X"."DIR_NUM", "X"."CNT" FROM ( SELECT "DIR_NUM", COUNT(*) AS "CNT" FROM "PUBLIC"."MULTI_PAGES" "T" INNER JOIN "PUBLIC"."B_HOLDING" "BH" ON 1=1 WHERE ("BH"."SITE" = 'Hello') AND ("T"."BH_ID" = "BH"."ID") GROUP BY "DIR_NUM" ) "X" /* SELECT DIR_NUM, COUNT(*) AS CNT FROM PUBLIC.MULTI_PAGES T /* PUBLIC.MULTI_PAGES.tableScan */ INNER JOIN PUBLIC.B_HOLDING BH /* PUBLIC.PRIMARY_KEY_3: ID = T.BH_ID */ ON 1=1 WHERE (BH.SITE = 'Hello') AND (T.BH_ID = BH.ID) GROUP BY DIR_NUM HAVING COUNT(*) <= ?1: CNT < CAST(1000 AS BIGINT) */ WHERE "CNT" < CAST(1000 AS BIGINT) ORDER BY 1 + +select dir_num, count(*) as cnt from multi_pages t, b_holding bh +where t.bh_id=bh.id and bh.site='Hello' group by dir_num +having count(*) < 1000 order by dir_num asc; +> DIR_NUM CNT +> ------- --- +> 1 1 +> 2 1 +> 3 1 +> rows (ordered): 3 + +drop table multi_pages, b_holding; +> ok + +create table test(id smallint primary key); +> ok + +insert into test values(1), (2), (3); +> update count: 3 + +explain select * from test where id = 1; +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 + +EXPLAIN SELECT * FROM TEST WHERE ID = (SELECT MAX(ID) FROM TEST); +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = (SELECT MAX(ID) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ /* direct lookup */) */ WHERE "ID" = (SELECT MAX("ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */) + +drop table test; +> ok + +create table test(id tinyint primary key); +> ok + +insert into test values(1), (2), (3); +> update count: 3 + +explain select * from test where id = 3; +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 3 */ WHERE "ID" = 3 + +explain select * from test where id = 255; +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 255 */ WHERE "ID" = 255 + +drop table test; +> ok + +CREATE TABLE PARENT(A INT, B INT, PRIMARY KEY(A, B)); +> ok + +CREATE TABLE CHILD(A INT, B INT, CONSTRAINT CP FOREIGN KEY(A, B) REFERENCES PARENT(A, B)); +> ok + +INSERT INTO PARENT VALUES(1, 2); +> update count: 1 + +INSERT INTO CHILD VALUES(2, NULL), (NULL, 3), (NULL, NULL), (1, 2); +> update count: 4 + +set autocommit false; +> ok + +ALTER TABLE CHILD SET REFERENTIAL_INTEGRITY FALSE; +> ok + +ALTER TABLE CHILD SET REFERENTIAL_INTEGRITY TRUE CHECK; +> ok + +set autocommit true; +> ok + +DROP TABLE CHILD, PARENT; +> ok + +CREATE TABLE TEST(BIRTH TIMESTAMP); +> ok + +INSERT INTO TEST VALUES('2006-04-03 10:20:30'), ('2006-04-03 10:20:31'), ('2006-05-05 00:00:00'), ('2006-07-03 22:30:00'), ('2006-07-03 22:31:00'); +> update count: 5 + +SELECT * FROM (SELECT CAST(BIRTH AS DATE) B +FROM TEST GROUP BY CAST(BIRTH AS DATE)) A +WHERE A.B >= '2006-05-05'; +> B +> ---------- +> 2006-05-05 +> 2006-07-03 +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE TABLE Parent(ID INT PRIMARY KEY, Name VARCHAR); +> ok + +CREATE TABLE Child(ID INT); +> ok + +ALTER TABLE Child ADD FOREIGN KEY(ID) REFERENCES Parent(ID); +> ok + +INSERT INTO Parent VALUES(1, '0'), (2, '0'), (3, '0'); +> update count: 3 + +INSERT INTO Child VALUES(1); +> update count: 1 + +ALTER TABLE Parent ALTER COLUMN Name BOOLEAN NULL; +> ok + +DELETE FROM Parent WHERE ID=3; +> update count: 1 + +DROP TABLE Parent, Child; +> ok + +set autocommit false; +> ok + +CREATE TABLE A(ID INT PRIMARY KEY, SK INT); +> ok + +ALTER TABLE A ADD CONSTRAINT AC FOREIGN KEY(SK) REFERENCES A(ID); +> ok + +INSERT INTO A VALUES(1, 1); +> update count: 1 + +INSERT INTO A VALUES(-2, NULL); +> update count: 1 + +ALTER TABLE A SET REFERENTIAL_INTEGRITY FALSE; +> ok + +ALTER TABLE A SET REFERENTIAL_INTEGRITY TRUE CHECK; +> ok + +ALTER TABLE A SET REFERENTIAL_INTEGRITY FALSE; +> ok + +INSERT INTO A VALUES(2, 3); +> update count: 1 + +ALTER TABLE A SET REFERENTIAL_INTEGRITY TRUE; +> ok + +ALTER TABLE A SET REFERENTIAL_INTEGRITY FALSE; +> ok + +ALTER TABLE A SET REFERENTIAL_INTEGRITY TRUE CHECK; +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +DROP TABLE A; +> ok + +set autocommit true; +> ok + +CREATE TABLE PARENT(ID INT PRIMARY KEY); +> ok + +CREATE TABLE CHILD(PID INT); +> ok + +INSERT INTO PARENT VALUES(1); +> update count: 1 + +INSERT INTO CHILD VALUES(2); +> update count: 1 + +ALTER TABLE CHILD ADD CONSTRAINT CP FOREIGN KEY(PID) REFERENCES PARENT(ID); +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +UPDATE CHILD SET PID=1; +> update count: 1 + +ALTER TABLE CHILD ADD CONSTRAINT CP FOREIGN KEY(PID) REFERENCES PARENT(ID); +> ok + +DROP TABLE CHILD, PARENT; +> ok + +CREATE TABLE A(ID INT PRIMARY KEY, SK INT); +> ok + +INSERT INTO A VALUES(1, 2); +> update count: 1 + +ALTER TABLE A ADD CONSTRAINT AC FOREIGN KEY(SK) REFERENCES A(ID); +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +DROP TABLE A; +> ok + +CREATE TABLE TEST(ID INT); +> ok + +INSERT INTO TEST VALUES(0), (1), (100); +> update count: 3 + +ALTER TABLE TEST ADD CONSTRAINT T CHECK ID<100; +> exception CHECK_CONSTRAINT_VIOLATED_1 + +UPDATE TEST SET ID=20 WHERE ID=100; +> update count: 1 + +ALTER TABLE TEST ADD CONSTRAINT T CHECK ID<100; +> ok + +DROP TABLE TEST; +> ok + +create table test(id int); +> ok + +set autocommit false; +> ok + +insert into test values(1); +> update count: 1 + +prepare commit tx1; +> ok + +commit transaction tx1; +> ok + +rollback; +> ok + +select * from test; +> ID +> -- +> 1 +> rows: 1 + +drop table test; +> ok + +set autocommit true; +> ok + +SELECT 'Hello' ~ 'He.*' T1, 'HELLO' ~ 'He.*' F2, CAST('HELLO' AS VARCHAR_IGNORECASE) ~ 'He.*' T3; +> T1 F2 T3 +> ---- ----- ---- +> TRUE FALSE TRUE +> rows: 1 + +SELECT 'Hello' ~* 'He.*' T1, 'HELLO' ~* 'He.*' T2, 'hallo' ~* 'He.*' F3; +> T1 T2 F3 +> ---- ---- ----- +> TRUE TRUE FALSE +> rows: 1 + +SELECT 'Hello' !~* 'Ho.*' T1, 'HELLO' !~* 'He.*' F2, 'hallo' !~* 'Ha.*' F3; +> T1 F2 F3 +> ---- ----- ----- +> TRUE FALSE FALSE +> rows: 1 + +create table test(parent int primary key, child int, foreign key(child) references (parent)); +> ok + +insert into test values(1, 1); +> update count: 1 + +insert into test values(2, 3); +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +set autocommit false; +> ok + +set referential_integrity false; +> ok + +insert into test values(4, 4); +> update count: 1 + +insert into test values(5, 6); +> update count: 1 + +set referential_integrity true; +> ok + +insert into test values(7, 7), (8, 9); +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +set autocommit true; +> ok + +drop table test; +> ok + +create table test as select 1, space(10) from dual where 1=0 union all select x, cast(space(100) as varchar(101)) d from system_range(1, 100); +> ok + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'), (-1, '-1'); +> update count: 2 + +select * from test where name = -1 and name = id; +> ID NAME +> -- ---- +> -1 -1 +> rows: 1 + +explain select * from test where name = -1 and name = id; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = -1 */ WHERE ("NAME" = -1) AND ("NAME" = "ID") + +DROP TABLE TEST; +> ok + +select * from system_range(1, 2) where x=x+1 and x=1; +> X +> - +> rows: 0 + +CREATE TABLE A as select 6 a; +> ok + +CREATE TABLE B(B INT PRIMARY KEY); +> ok + +CREATE VIEW V(V) AS (SELECT A FROM A UNION SELECT B FROM B); +> ok + +create table C as select * from table(c int = (0,6)); +> ok + +select * from V, C where V.V = C.C; +> V C +> - - +> 6 6 +> rows: 1 + +drop table A, B, C, V cascade; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, FLAG BOOLEAN, NAME VARCHAR); +> ok + +CREATE INDEX IDX_FLAG ON TEST(FLAG, NAME); +> ok + +INSERT INTO TEST VALUES(1, TRUE, 'Hello'), (2, FALSE, 'World'); +> update count: 2 + +EXPLAIN SELECT * FROM TEST WHERE FLAG; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."FLAG", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_FLAG: FLAG = TRUE */ WHERE "FLAG" + +EXPLAIN SELECT * FROM TEST WHERE FLAG AND NAME>'I'; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."FLAG", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_FLAG: FLAG = TRUE AND NAME > 'I' */ WHERE "FLAG" AND ("NAME" > 'I') + +DROP TABLE TEST; +> ok + +CREATE TABLE test_table (first_col varchar(20), second_col integer); +> ok + +insert into test_table values('a', 10), ('a', 4), ('b', 30), ('b', 3); +> update count: 4 + +CREATE VIEW test_view AS SELECT first_col AS renamed_col, MIN(second_col) AS also_renamed FROM test_table GROUP BY first_col; +> ok + +SELECT * FROM test_view WHERE renamed_col = 'a'; +> RENAMED_COL ALSO_RENAMED +> ----------- ------------ +> a 4 +> rows: 1 + +drop view test_view; +> ok + +drop table test_table; +> ok + +create table test(id int); +> ok + +explain select id+1 a from test group by id+1; +>> SELECT "ID" + 1 AS "A" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "ID" + 1 + +drop table test; +> ok + +set autocommit off; +> ok + +set schema_search_path = public, information_schema; +> ok + +select table_name from tables where 1=0; +> TABLE_NAME +> ---------- +> rows: 0 + +set schema_search_path = public; +> ok + +set autocommit on; +> ok + +create table script.public.x(a int); +> ok + +select * from script.PUBLIC.x; +> A +> - +> rows: 0 + +create index script.public.idx on script.public.x(a); +> ok + +drop table script.public.x; +> ok + +create table d(d double, r real); +> ok + +insert into d(d, d, r) values(1.1234567890123456789, 1.1234567890123456789, 3); +> exception DUPLICATE_COLUMN_NAME_1 + +insert into d values(1.1234567890123456789, 1.1234567890123456789); +> update count: 1 + +select r+d, r+r, d+d from d; +> R + D R + R D + D +> ----------------- --------- ------------------ +> 2.246913624759111 2.2469137 2.2469135780246914 +> rows: 1 + +drop table d; +> ok + +create table test(id int, c char(5), v varchar(5)); +> ok + +insert into test set id = 1, c = 'a', v = 'a'; +> update count: 1 + +insert into test set id = 2, c = 'a ', v = 'a '; +> update count: 1 + +insert into test set id = 3, c = 'abcde ', v = 'abcde'; +> update count: 1 + +select distinct length(c) from test order by length(c); +> CHAR_LENGTH(C) +> -------------- +> 5 +> rows (ordered): 1 + +select id, c, v, length(c), length(v) from test order by id; +> ID C V CHAR_LENGTH(C) CHAR_LENGTH(V) +> -- ----- ----- -------------- -------------- +> 1 a a 5 1 +> 2 a a 5 2 +> 3 abcde abcde 5 5 +> rows (ordered): 3 + +select id from test where c='a' order by id; +> ID +> -- +> 1 +> 2 +> rows (ordered): 2 + +select id from test where c='a ' order by id; +> ID +> -- +> 1 +> 2 +> rows (ordered): 2 + +select id from test where c=v order by id; +> ID +> -- +> 1 +> 2 +> 3 +> rows (ordered): 3 + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255), C INT); +> ok + +INSERT INTO TEST VALUES(1, '10', NULL), (2, '0', NULL); +> update count: 2 + +SELECT LEAST(ID, C, NAME), GREATEST(ID, C, NAME), LEAST(NULL, C), GREATEST(NULL, NULL), ID FROM TEST ORDER BY ID; +> LEAST(ID, C, NAME) GREATEST(ID, C, NAME) LEAST(NULL, C) CAST(NULL AS CHARACTER VARYING) ID +> ------------------ --------------------- -------------- ------------------------------- -- +> 1 10 null null 1 +> 0 2 null null 2 +> rows (ordered): 2 + +DROP TABLE TEST; +> ok + +create table people (family varchar(1) not null, person varchar(1) not null); +> ok + +create table cars (family varchar(1) not null, car varchar(1) not null); +> ok + +insert into people values(1, 1), (2, 1), (2, 2), (3, 1), (5, 1); +> update count: 5 + +insert into cars values(2, 1), (2, 2), (3, 1), (3, 2), (3, 3), (4, 1); +> update count: 6 + +select family, (select count(car) from cars where cars.family = people.family) as x +from people group by family order by family; +> FAMILY X +> ------ - +> 1 0 +> 2 2 +> 3 3 +> 5 0 +> rows (ordered): 4 + +drop table people, cars; +> ok + +select (1, 2); +> ROW (1, 2) +> ---------- +> ROW (1, 2) +> rows: 1 + +select * from (select 1), (select 2); +> 1 2 +> - - +> 1 2 +> rows: 1 + +create table t1(c1 int, c2 int); +> ok + +create table t2(c1 int, c2 int); +> ok + +insert into t1 values(1, null), (2, 2), (3, 3); +> update count: 3 + +insert into t2 values(1, 1), (1, 2), (2, null), (3, 3); +> update count: 4 + +select * from t2 where c1 not in(select c2 from t1); +> C1 C2 +> -- -- +> rows: 0 + +select * from t2 where c1 not in(null, 2, 3); +> C1 C2 +> -- -- +> rows: 0 + +select * from t1 where c2 not in(select c1 from t2); +> C1 C2 +> -- -- +> rows: 0 + +select * from t1 where not exists(select * from t2 where t1.c2=t2.c1); +> C1 C2 +> -- ---- +> 1 null +> rows: 1 + +drop table t1; +> ok + +drop table t2; +> ok + +CREATE TABLE test (family_name VARCHAR_IGNORECASE(63) NOT NULL); +> ok + +INSERT INTO test VALUES('Smith'), ('de Smith'), ('el Smith'), ('von Smith'); +> update count: 4 + +SELECT * FROM test WHERE family_name IN ('de Smith', 'Smith'); +> FAMILY_NAME +> ----------- +> Smith +> de Smith +> rows: 2 + +SELECT * FROM test WHERE family_name BETWEEN 'D' AND 'T'; +> FAMILY_NAME +> ----------- +> Smith +> de Smith +> el Smith +> rows: 3 + +CREATE INDEX family_name ON test(family_name); +> ok + +SELECT * FROM test WHERE family_name IN ('de Smith', 'Smith'); +> FAMILY_NAME +> ----------- +> Smith +> de Smith +> rows: 2 + +drop table test; +> ok + +create memory table test(id int primary key, data clob); +> ok + +insert into test values(1, 'abc' || space(20)); +> update count: 1 + +script nopasswords nosettings noversion blocksize 10; +> SCRIPT +> ---------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "DATA" CHARACTER LARGE OBJECT ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> CREATE CACHED LOCAL TEMPORARY TABLE IF NOT EXISTS SYSTEM_LOB_STREAM(ID INT NOT NULL, PART INT NOT NULL, CDATA VARCHAR, BDATA VARBINARY); +> ALTER TABLE SYSTEM_LOB_STREAM ADD CONSTRAINT SYSTEM_LOB_STREAM_PRIMARY_KEY PRIMARY KEY(ID, PART); +> CREATE ALIAS IF NOT EXISTS SYSTEM_COMBINE_CLOB FOR 'org.h2.command.dml.ScriptCommand.combineClob'; +> CREATE ALIAS IF NOT EXISTS SYSTEM_COMBINE_BLOB FOR 'org.h2.command.dml.ScriptCommand.combineBlob'; +> INSERT INTO SYSTEM_LOB_STREAM VALUES(0, 0, 'abc ', NULL); +> INSERT INTO SYSTEM_LOB_STREAM VALUES(0, 1, ' ', NULL); +> INSERT INTO SYSTEM_LOB_STREAM VALUES(0, 2, ' ', NULL); +> INSERT INTO "PUBLIC"."TEST" VALUES (1, SYSTEM_COMBINE_CLOB(0)); +> DROP TABLE IF EXISTS SYSTEM_LOB_STREAM; +> DROP ALIAS IF EXISTS SYSTEM_COMBINE_CLOB; +> DROP ALIAS IF EXISTS SYSTEM_COMBINE_BLOB; +> rows (ordered): 15 + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'); +> update count: 2 + +SELECT DISTINCT * FROM TEST ORDER BY ID; +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows (ordered): 2 + +DROP TABLE TEST; +> ok + +create sequence main_seq; +> ok + +create schema "TestSchema"; +> ok + +create sequence "TestSchema"."TestSeq"; +> ok + +create sequence "TestSchema"."ABC"; +> ok + +select currval('main_seq'), currval('TestSchema', 'TestSeq'); +> exception CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1 + +select nextval('TestSchema', 'ABC'); +>> 1 + +set autocommit off; +> ok + +set schema "TestSchema"; +> ok + +select nextval('abc'), currval('Abc'), nextval('TestSchema', 'ABC'); +> NEXTVAL('abc') CURRVAL('Abc') NEXTVAL('TestSchema', 'ABC') +> -------------- -------------- ---------------------------- +> 2 2 3 +> rows: 1 + +set schema public; +> ok + +drop schema "TestSchema" cascade; +> ok + +drop sequence main_seq; +> ok + +create sequence "test"; +> ok + +select nextval('test'); +> NEXTVAL('test') +> --------------- +> 1 +> rows: 1 + +drop sequence "test"; +> ok + +set autocommit on; +> ok + +CREATE TABLE parent(id int PRIMARY KEY); +> ok + +CREATE TABLE child(parentid int REFERENCES parent); +> ok + +TABLE INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME UNIQUE_CONSTRAINT_CATALOG UNIQUE_CONSTRAINT_SCHEMA UNIQUE_CONSTRAINT_NAME MATCH_OPTION UPDATE_RULE DELETE_RULE +> ------------------ ----------------- --------------- ------------------------- ------------------------ ---------------------- ------------ ----------- ----------- +> SCRIPT PUBLIC CONSTRAINT_3 SCRIPT PUBLIC CONSTRAINT_8 NONE RESTRICT RESTRICT +> rows: 1 + +ALTER TABLE parent ADD COLUMN name varchar; +> ok + +TABLE INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME UNIQUE_CONSTRAINT_CATALOG UNIQUE_CONSTRAINT_SCHEMA UNIQUE_CONSTRAINT_NAME MATCH_OPTION UPDATE_RULE DELETE_RULE +> ------------------ ----------------- --------------- ------------------------- ------------------------ ---------------------- ------------ ----------- ----------- +> SCRIPT PUBLIC CONSTRAINT_3 SCRIPT PUBLIC CONSTRAINT_8 NONE RESTRICT RESTRICT +> rows: 1 + +drop table parent, child; +> ok + +create table test(id int); +> ok + +create schema TEST_SCHEMA; +> ok + +set autocommit false; +> ok + +set schema TEST_SCHEMA; +> ok + +create table test(id int, name varchar); +> ok + +explain select * from test; +>> SELECT "TEST_SCHEMA"."TEST"."ID", "TEST_SCHEMA"."TEST"."NAME" FROM "TEST_SCHEMA"."TEST" /* TEST_SCHEMA.TEST.tableScan */ + +explain select * from public.test; +>> SELECT "PUBLIC"."TEST"."ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +drop schema TEST_SCHEMA cascade; +> ok + +set autocommit true; +> ok + +set schema public; +> ok + +select * from test; +> ID +> -- +> rows: 0 + +drop table test; +> ok + +create table content(thread_id int, parent_id int); +> ok + +alter table content add constraint content_parent_id check (parent_id = thread_id) or (parent_id is null) or ( parent_id in (select thread_id from content)); +> ok + +create index content_thread_id ON content(thread_id); +> ok + +insert into content values(0, 0), (0, 0); +> update count: 2 + +insert into content values(0, 1); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +insert into content values(1, 1), (2, 2); +> update count: 2 + +insert into content values(2, 1); +> update count: 1 + +insert into content values(2, 3); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +drop table content; +> ok + +select x/10 y from system_range(1, 100) group by x/10; +> Y +> -- +> 0 +> 1 +> 10 +> 2 +> 3 +> 4 +> 5 +> 6 +> 7 +> 8 +> 9 +> rows: 11 + +select timestamp '2001-02-03T10:30:33'; +> TIMESTAMP '2001-02-03 10:30:33' +> ------------------------------- +> 2001-02-03 10:30:33 +> rows: 1 + +create table test(id int); +> ok + +insert into test (select x from system_range(1, 100)); +> update count: 100 + +select id/1000 from test group by id/1000; +> ID / 1000 +> --------- +> 0 +> rows: 1 + +select id/(10*100) from test group by id/(10*100); +> ID / 1000 +> --------- +> 0 +> rows: 1 + +select id/1000 from test group by id/100; +> exception MUST_GROUP_BY_COLUMN_1 + +drop table test; +> ok + +select (x/10000) from system_range(10, 20) group by (x/10000); +> X / 10000 +> --------- +> 0 +> rows: 1 + +select sum(x), (x/10) from system_range(10, 100) group by (x/10); +> SUM(X) X / 10 +> ------ ------ +> 100 10 +> 145 1 +> 245 2 +> 345 3 +> 445 4 +> 545 5 +> 645 6 +> 745 7 +> 845 8 +> 945 9 +> rows: 10 + +CREATE FORCE VIEW ADDRESS_VIEW AS SELECT * FROM ADDRESS; +> ok + +CREATE memory TABLE ADDRESS(ID INT); +> ok + +alter view address_view recompile; +> ok + +alter view if exists address_view recompile; +> ok + +alter view if exists does_not_exist recompile; +> ok + +select * from ADDRESS_VIEW; +> ID +> -- +> rows: 0 + +drop view address_view; +> ok + +drop table address; +> ok + +CREATE ALIAS PARSE_INT2 FOR "java.lang.Integer.parseInt(java.lang.String, int)"; +> ok + +select min(SUBSTRING(random_uuid(), 15,1)='4') from system_range(1, 10); +> MIN(SUBSTRING(RANDOM_UUID() FROM 15 FOR 1) = '4') +> ------------------------------------------------- +> TRUE +> rows: 1 + +select min(8=bitand(12, PARSE_INT2(SUBSTRING(random_uuid(), 20,1), 16))) from system_range(1, 10); +> MIN(8 = BITAND(12, PUBLIC.PARSE_INT2(SUBSTRING(RANDOM_UUID() FROM 20 FOR 1), 16))) +> ---------------------------------------------------------------------------------- +> TRUE +> rows: 1 + +select BITGET(x, 0) AS IS_SET from system_range(1, 2); +> IS_SET +> ------ +> FALSE +> TRUE +> rows: 2 + +drop alias PARSE_INT2; +> ok + +create memory table test(name varchar check(name = upper(name))); +> ok + +insert into test values(null); +> update count: 1 + +insert into test values('aa'); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +insert into test values('AA'); +> update count: 1 + +script nodata nopasswords nosettings noversion; +> SCRIPT +> --------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "NAME" CHARACTER VARYING ); +> -- 2 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" CHECK("NAME" = UPPER("NAME")) NOCHECK; +> rows (ordered): 4 + +drop table test; +> ok + +create domain email as varchar(200) check (position('@' in value) > 1); +> ok + +create domain gmail as email default '@gmail.com' check (position('gmail' in value) > 1); +> ok + +create memory table address(id int primary key, name email, name2 gmail); +> ok + +insert into address(id, name, name2) values(1, 'test@abc', 'test@gmail.com'); +> update count: 1 + +insert into address(id, name, name2) values(2, 'test@abc', 'test@acme'); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +@reconnect + +insert into address(id, name, name2) values(3, 'test_abc', 'test@gmail'); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +insert into address2(name) values('test@abc'); +> exception TABLE_OR_VIEW_NOT_FOUND_1 + +CREATE DOMAIN STRING AS VARCHAR(255) DEFAULT ''; +> ok + +CREATE DOMAIN IF NOT EXISTS STRING AS VARCHAR(255) DEFAULT ''; +> ok + +CREATE DOMAIN STRING1 AS VARCHAR; +> ok + +CREATE DOMAIN STRING2 AS VARCHAR DEFAULT ''; +> ok + +create domain string_x as string2; +> ok + +create memory table test(a string, b string1, c string2); +> ok + +insert into test(b) values('x'); +> update count: 1 + +select * from test; +> A B C +> - - ------- +> x +> rows: 1 + +select DOMAIN_NAME, DOMAIN_DEFAULT, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, PARENT_DOMAIN_NAME, REMARKS from information_schema.domains; +> DOMAIN_NAME DOMAIN_DEFAULT DATA_TYPE CHARACTER_MAXIMUM_LENGTH PARENT_DOMAIN_NAME REMARKS +> ----------- -------------- ----------------- ------------------------ ------------------ ------- +> EMAIL null CHARACTER VARYING 200 null null +> GMAIL '@gmail.com' CHARACTER VARYING 200 EMAIL null +> STRING '' CHARACTER VARYING 255 null null +> STRING1 null CHARACTER VARYING 1048576 null null +> STRING2 '' CHARACTER VARYING 1048576 null null +> STRING_X null CHARACTER VARYING 1048576 STRING2 null +> rows: 6 + +script nodata nopasswords nosettings noversion; +> SCRIPT +> ------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE DOMAIN "PUBLIC"."EMAIL" AS CHARACTER VARYING(200); +> CREATE DOMAIN "PUBLIC"."STRING" AS CHARACTER VARYING(255) DEFAULT ''; +> CREATE DOMAIN "PUBLIC"."STRING1" AS CHARACTER VARYING; +> CREATE DOMAIN "PUBLIC"."STRING2" AS CHARACTER VARYING DEFAULT ''; +> CREATE DOMAIN "PUBLIC"."GMAIL" AS "PUBLIC"."EMAIL" DEFAULT '@gmail.com'; +> CREATE DOMAIN "PUBLIC"."STRING_X" AS "PUBLIC"."STRING2"; +> CREATE MEMORY TABLE "PUBLIC"."ADDRESS"( "ID" INTEGER NOT NULL, "NAME" "PUBLIC"."EMAIL", "NAME2" "PUBLIC"."GMAIL" ); +> ALTER TABLE "PUBLIC"."ADDRESS" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_E" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.ADDRESS; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A" "PUBLIC"."STRING", "B" "PUBLIC"."STRING1", "C" "PUBLIC"."STRING2" ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> ALTER DOMAIN "PUBLIC"."EMAIL" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_3" CHECK(LOCATE('@', VALUE) > 1) NOCHECK; +> ALTER DOMAIN "PUBLIC"."GMAIL" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_4" CHECK(LOCATE('gmail', VALUE) > 1) NOCHECK; +> rows (ordered): 14 + +drop table test; +> ok + +drop domain string; +> ok + +drop domain string1; +> ok + +drop domain string2 cascade; +> ok + +drop domain string_x; +> ok + +drop table address; +> ok + +drop domain email cascade; +> ok + +drop domain gmail; +> ok + +create force view address_view as select * from address; +> ok + +create table address(id identity, name varchar check instr(value, '@') > 1); +> exception SYNTAX_ERROR_2 + +create table address(id identity, name varchar check instr(name, '@') > 1); +> ok + +drop view if exists address_view; +> ok + +drop table address; +> ok + +create memory table a(k10 blob(10k), m20 blob(20m), g30 clob(30g)); +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION DROP; +> SCRIPT +> ----------------------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> DROP TABLE IF EXISTS "PUBLIC"."A" CASCADE; +> CREATE MEMORY TABLE "PUBLIC"."A"( "K10" BINARY LARGE OBJECT(10240), "M20" BINARY LARGE OBJECT(20971520), "G30" CHARACTER LARGE OBJECT(32212254720) ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.A; +> rows (ordered): 4 + +create table b(); +> ok + +create table c(); +> ok + +drop table information_schema.columns; +> exception CANNOT_DROP_TABLE_1 + +create table columns as select * from information_schema.columns; +> ok + +create table tables as select * from information_schema.tables where false; +> ok + +create table dual2 as select 1 from dual; +> ok + +select * from dual2; +> 1 +> - +> 1 +> rows: 1 + +drop table dual2, columns, tables; +> ok + +drop table a, a; +> ok + +drop table b, c; +> ok + +CREATE TABLE A (ID_A int primary key); +> ok + +CREATE TABLE B (ID_B int primary key); +> ok + +CREATE TABLE C (ID_C int primary key); +> ok + +insert into A values (1); +> update count: 1 + +insert into A values (2); +> update count: 1 + +insert into B values (1); +> update count: 1 + +insert into C values (1); +> update count: 1 + +SELECT * FROM C WHERE NOT EXISTS ((SELECT ID_A FROM A) EXCEPT (SELECT ID_B FROM B)); +> ID_C +> ---- +> rows: 0 + +(SELECT ID_A FROM A) EXCEPT (SELECT ID_B FROM B); +> ID_A +> ---- +> 2 +> rows: 1 + +drop table a; +> ok + +drop table b; +> ok + +drop table c; +> ok + +CREATE TABLE X (ID INTEGER PRIMARY KEY); +> ok + +insert into x values(0), (1), (10); +> update count: 3 + +SELECT t1.ID, (SELECT t1.id || ':' || AVG(t2.ID) FROM X t2) AS col2 FROM X t1; +> ID COL2 +> -- --------------------- +> 0 0:3.6666666666666665 +> 1 1:3.6666666666666665 +> 10 10:3.6666666666666665 +> rows: 3 + +drop table x; +> ok + +create table test(id int primary key, name varchar); +> ok + +insert into test values(rownum, '11'), (rownum, '22'), (rownum, '33'); +> update count: 3 + +select * from test order by id; +> ID NAME +> -- ---- +> 1 11 +> 2 22 +> 3 33 +> rows (ordered): 3 + +select rownum, (select count(*) from test) as col2, rownum from test; +> ROWNUM() COL2 ROWNUM() +> -------- ---- -------- +> 1 3 1 +> 2 3 2 +> 3 3 3 +> rows: 3 + +delete from test t0 where rownum<2; +> update count: 1 + +select rownum, * from (select * from test where id>1 order by id desc); +> ROWNUM() ID NAME +> -------- -- ---- +> 1 3 33 +> 2 2 22 +> rows: 2 + +update test set name='x' where rownum<2; +> update count: 1 + +select * from test; +> ID NAME +> -- ---- +> 2 x +> 3 33 +> rows: 2 + +merge into test values(2, 'r' || rownum), (10, rownum), (11, rownum); +> update count: 3 + +select * from test; +> ID NAME +> -- ---- +> 10 2 +> 11 3 +> 2 r1 +> 3 33 +> rows: 4 + +call rownum; +> ROWNUM() +> -------- +> 1 +> rows: 1 + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +create index idx_test_name on test(name); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'); +> update count: 1 + +INSERT INTO TEST VALUES(2, 'World'); +> update count: 1 + +set ignorecase true; +> ok + +CREATE TABLE TEST2(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +create unique index idx_test2_name on test2(name); +> ok + +INSERT INTO TEST2 VALUES(1, 'hElLo'); +> update count: 1 + +INSERT INTO TEST2 VALUES(2, 'World'); +> update count: 1 + +INSERT INTO TEST2 VALUES(3, 'WoRlD'); +> exception DUPLICATE_KEY_1 + +drop index idx_test2_name; +> ok + +select * from test where name='HELLO'; +> ID NAME +> -- ---- +> rows: 0 + +select * from test2 where name='HELLO'; +> ID NAME +> -- ----- +> 1 hElLo +> rows: 1 + +select * from test where name like 'HELLO'; +> ID NAME +> -- ---- +> rows: 0 + +select * from test2 where name like 'HELLO'; +> ID NAME +> -- ----- +> 1 hElLo +> rows: 1 + +explain plan for select * from test2, test where test2.name = test.name; +>> SELECT "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST2"."NAME", "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST2" /* PUBLIC.TEST2.tableScan */ INNER JOIN "PUBLIC"."TEST" /* PUBLIC.IDX_TEST_NAME */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" + +select * from test2, test where test2.name = test.name; +> ID NAME ID NAME +> -- ----- -- ----- +> 1 hElLo 1 Hello +> 2 World 2 World +> rows: 2 + +explain plan for select * from test, test2 where test2.name = test.name; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME", "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST2"."NAME" FROM "PUBLIC"."TEST2" /* PUBLIC.TEST2.tableScan */ INNER JOIN "PUBLIC"."TEST" /* PUBLIC.IDX_TEST_NAME */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" + +select * from test, test2 where test2.name = test.name; +> ID NAME ID NAME +> -- ----- -- ----- +> 1 Hello 1 hElLo +> 2 World 2 World +> rows: 2 + +create index idx_test2_name on test2(name); +> ok + +explain plan for select * from test2, test where test2.name = test.name; +>> SELECT "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST2"."NAME", "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_TEST_NAME */ INNER JOIN "PUBLIC"."TEST2" /* PUBLIC.IDX_TEST2_NAME: NAME = TEST.NAME */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" + +select * from test2, test where test2.name = test.name; +> ID NAME ID NAME +> -- ----- -- ----- +> 1 hElLo 1 Hello +> 2 World 2 World +> rows: 2 + +explain plan for select * from test, test2 where test2.name = test.name; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME", "PUBLIC"."TEST2"."ID", "PUBLIC"."TEST2"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_TEST_NAME */ INNER JOIN "PUBLIC"."TEST2" /* PUBLIC.IDX_TEST2_NAME: NAME = TEST.NAME */ ON 1=1 WHERE "TEST2"."NAME" = "TEST"."NAME" + +select * from test, test2 where test2.name = test.name; +> ID NAME ID NAME +> -- ----- -- ----- +> 1 Hello 1 hElLo +> 2 World 2 World +> rows: 2 + +DROP TABLE IF EXISTS TEST; +> ok + +DROP TABLE IF EXISTS TEST2; +> ok + +set ignorecase false; +> ok + +create table test(f1 varchar, f2 varchar); +> ok + +insert into test values('abc','222'); +> update count: 1 + +insert into test values('abc','111'); +> update count: 1 + +insert into test values('abc','333'); +> update count: 1 + +SELECT t.f1, t.f2 FROM test t ORDER BY t.f2; +> F1 F2 +> --- --- +> abc 111 +> abc 222 +> abc 333 +> rows (ordered): 3 + +SELECT t1.f1, t1.f2, t2.f1, t2.f2 FROM test t1, test t2 ORDER BY t2.f2, t1.f2; +> F1 F2 F1 F2 +> --- --- --- --- +> abc 111 abc 111 +> abc 222 abc 111 +> abc 333 abc 111 +> abc 111 abc 222 +> abc 222 abc 222 +> abc 333 abc 222 +> abc 111 abc 333 +> abc 222 abc 333 +> abc 333 abc 333 +> rows (ordered): 9 + +drop table if exists test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'); +> update count: 1 + +explain select t0.id, t1.id from test t0, test t1 order by t0.id, t1.id; +>> SELECT "T0"."ID", "T1"."ID" FROM "PUBLIC"."TEST" "T0" /* PUBLIC.TEST.tableScan */ INNER JOIN "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ ON 1=1 ORDER BY 1, 2 + +INSERT INTO TEST VALUES(2, 'World'); +> update count: 1 + +SELECT id, sum(id) FROM test GROUP BY id ORDER BY id*sum(id); +> ID SUM(ID) +> -- ------- +> 1 1 +> 2 2 +> rows (ordered): 2 + +select * +from test t1 +inner join test t2 on t2.id=t1.id +inner join test t3 on t3.id=t2.id +where exists (select 1 from test t4 where t2.id=t4.id); +> ID NAME ID NAME ID NAME +> -- ----- -- ----- -- ----- +> 1 Hello 1 Hello 1 Hello +> 2 World 2 World 2 World +> rows: 2 + +explain select * from test t1 where id in(select id from test t2 where t1.id=t2.id); +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE "ID" IN( SELECT DISTINCT "ID" FROM "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ WHERE "T1"."ID" = "T2"."ID") + +select * from test t1 where id in(select id from test t2 where t1.id=t2.id); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +explain select * from test t1 where id in(id, id+1); +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE "ID" IN("ID", "ID" + 1) + +select * from test t1 where id in(id, id+1); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +explain select * from test t1 where id in(id); +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE "ID" = "ID" + +select * from test t1 where id in(id); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +explain select * from test t1 where id in(select id from test); +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(SELECT DISTINCT ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */) */ WHERE "ID" IN( SELECT DISTINCT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) + +select * from test t1 where id in(select id from test); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +explain select * from test t1 where id in(1, select max(id) from test); +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(1, (SELECT MAX(ID) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ /* direct lookup */)) */ WHERE "ID" IN(1, (SELECT MAX("ID") FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ /* direct lookup */)) + +select * from test t1 where id in(1, select max(id) from test); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +explain select * from test t1 where id in(1, select max(id) from test t2 where t1.id=t2.id); +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE "ID" IN(1, (SELECT MAX("ID") FROM "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ WHERE "T1"."ID" = "T2"."ID")) + +select * from test t1 where id in(1, select max(id) from test t2 where t1.id=t2.id); +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows: 2 + +DROP TABLE TEST; +> ok + +create force view t1 as select * from t1; +> ok + +select * from t1; +> exception VIEW_IS_INVALID_2 + +drop table t1; +> ok + +CREATE TABLE TEST(id INT PRIMARY KEY, foo BIGINT); +> ok + +INSERT INTO TEST VALUES(1, 100); +> update count: 1 + +INSERT INTO TEST VALUES(2, 123456789012345678); +> update count: 1 + +SELECT * FROM TEST WHERE foo = 123456789014567; +> ID FOO +> -- --- +> rows: 0 + +DROP TABLE IF EXISTS TEST; +> ok + +create table test(id int); +> ok + +insert into test values(1), (2), (3), (4); +> update count: 4 + +(select * from test a, test b) minus (select * from test a, test b); +> ID ID +> -- -- +> rows: 0 + +drop table test; +> ok + +call select 1.0/3.0*3.0, 100.0/2.0, -25.0/100.0, 0.0/3.0, 6.9/2.0, 0.72179425150347250912311550800000 / 5314251955.21; +> ROW (0.99990, 50.0000, -0.25000000, 0.0000, 3.4500, 0.000000000135822361752313607260107721120531135706133162) +> ------------------------------------------------------------------------------------------------------------- +> ROW (0.99990, 50.0000, -0.25000000, 0.0000, 3.4500, 0.000000000135822361752313607260107721120531135706133162) +> rows: 1 + +create sequence test_seq; +> ok + +create table test(id int primary key, parent int); +> ok + +create index ni on test(parent); +> ok + +alter table test add constraint nu unique(parent); +> ok + +alter table test add constraint fk foreign key(parent) references(id); +> ok + +SELECT TABLE_NAME, INDEX_NAME, INDEX_TYPE_NAME FROM INFORMATION_SCHEMA.INDEXES; +> TABLE_NAME INDEX_NAME INDEX_TYPE_NAME +> ---------- ------------- --------------- +> TEST NI INDEX +> TEST NU_INDEX_2 UNIQUE INDEX +> TEST PRIMARY_KEY_2 PRIMARY KEY +> rows: 3 + +SELECT TABLE_NAME, INDEX_NAME, ORDINAL_POSITION, COLUMN_NAME FROM INFORMATION_SCHEMA.INDEX_COLUMNS; +> TABLE_NAME INDEX_NAME ORDINAL_POSITION COLUMN_NAME +> ---------- ------------- ---------------- ----------- +> TEST NI 1 PARENT +> TEST NU_INDEX_2 1 PARENT +> TEST PRIMARY_KEY_2 1 ID +> rows: 3 + +select SEQUENCE_NAME, BASE_VALUE, INCREMENT, REMARKS from INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME BASE_VALUE INCREMENT REMARKS +> ------------- ---------- --------- ------- +> TEST_SEQ 1 1 null +> rows: 1 + +drop table test; +> ok + +drop sequence test_seq; +> ok + +create table test(id int); +> ok + +insert into test values(1), (2); +> update count: 2 + +select count(*) from test where id in ((select id from test where 1=0)); +> COUNT(*) +> -------- +> 0 +> rows: 1 + +select count(*) from test where id = ((select id from test where 1=0)+1); +> COUNT(*) +> -------- +> 0 +> rows: 1 + +select count(*) from test where id = (select id from test where 1=0); +> COUNT(*) +> -------- +> 0 +> rows: 1 + +select count(*) from test where id in ((select id from test)); +> COUNT(*) +> -------- +> 2 +> rows: 1 + +select count(*) from test where id = ((select id from test)); +> exception SCALAR_SUBQUERY_CONTAINS_MORE_THAN_ONE_ROW + +select count(*) from test where id = ARRAY [(select id from test), 1]; +> exception TYPES_ARE_NOT_COMPARABLE_2 + +select count(*) from test where id = ((select id from test fetch first row only), 1); +> exception TYPES_ARE_NOT_COMPARABLE_2 + +select (select id from test where 1=0) from test; +> (SELECT ID FROM PUBLIC.TEST WHERE FALSE) +> ---------------------------------------- +> null +> null +> rows: 2 + +drop table test; +> ok + +create table test(id int primary key, a boolean); +> ok + +insert into test values(1, 'Y'); +> update count: 1 + +call select a from test order by id; +> (SELECT A FROM PUBLIC.TEST ORDER BY ID) +> --------------------------------------- +> TRUE +> rows (ordered): 1 + +select select a from test order by id; +> (SELECT A FROM PUBLIC.TEST ORDER BY ID) +> --------------------------------------- +> TRUE +> rows: 1 + +insert into test values(2, 'N'); +> update count: 1 + +insert into test values(3, '1'); +> update count: 1 + +insert into test values(4, '0'); +> update count: 1 + +insert into test values(5, 'T'); +> update count: 1 + +insert into test values(6, 'F'); +> update count: 1 + +select max(id) from test where id = max(id) group by id; +> exception INVALID_USE_OF_AGGREGATE_FUNCTION_1 + +select * from test where a=TRUE=a; +> ID A +> -- ----- +> 1 TRUE +> 2 FALSE +> 3 TRUE +> 4 FALSE +> 5 TRUE +> 6 FALSE +> rows: 6 + +drop table test; +> ok + +CREATE memory TABLE TEST(ID INT PRIMARY KEY, PARENT INT REFERENCES TEST); +> ok + +CREATE memory TABLE s(S_NO VARCHAR(5) PRIMARY KEY, name VARCHAR(16), city VARCHAR(16)); +> ok + +CREATE memory TABLE p(p_no VARCHAR(5) PRIMARY KEY, descr VARCHAR(16), color VARCHAR(8)); +> ok + +CREATE memory TABLE sp1(S_NO VARCHAR(5) REFERENCES s, p_no VARCHAR(5) REFERENCES p, qty INT, PRIMARY KEY (S_NO, p_no)); +> ok + +CREATE memory TABLE sp2(S_NO VARCHAR(5), p_no VARCHAR(5), qty INT, constraint c1 FOREIGN KEY (S_NO) references s, PRIMARY KEY (S_NO, p_no)); +> ok + +script NOPASSWORDS NOSETTINGS noversion; +> SCRIPT +> -------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "PARENT" INTEGER ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> CREATE MEMORY TABLE "PUBLIC"."S"( "S_NO" CHARACTER VARYING(5) NOT NULL, "NAME" CHARACTER VARYING(16), "CITY" CHARACTER VARYING(16) ); +> ALTER TABLE "PUBLIC"."S" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_5" PRIMARY KEY("S_NO"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.S; +> CREATE MEMORY TABLE "PUBLIC"."P"( "P_NO" CHARACTER VARYING(5) NOT NULL, "DESCR" CHARACTER VARYING(16), "COLOR" CHARACTER VARYING(8) ); +> ALTER TABLE "PUBLIC"."P" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_50" PRIMARY KEY("P_NO"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.P; +> CREATE MEMORY TABLE "PUBLIC"."SP1"( "S_NO" CHARACTER VARYING(5) NOT NULL, "P_NO" CHARACTER VARYING(5) NOT NULL, "QTY" INTEGER ); +> ALTER TABLE "PUBLIC"."SP1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_141" PRIMARY KEY("S_NO", "P_NO"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.SP1; +> CREATE MEMORY TABLE "PUBLIC"."SP2"( "S_NO" CHARACTER VARYING(5) NOT NULL, "P_NO" CHARACTER VARYING(5) NOT NULL, "QTY" INTEGER ); +> ALTER TABLE "PUBLIC"."SP2" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_1417" PRIMARY KEY("S_NO", "P_NO"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.SP2; +> ALTER TABLE "PUBLIC"."SP1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_1" FOREIGN KEY("S_NO") REFERENCES "PUBLIC"."S"("S_NO") NOCHECK; +> ALTER TABLE "PUBLIC"."SP1" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_14" FOREIGN KEY("P_NO") REFERENCES "PUBLIC"."P"("P_NO") NOCHECK; +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_27" FOREIGN KEY("PARENT") REFERENCES "PUBLIC"."TEST"("ID") NOCHECK; +> ALTER TABLE "PUBLIC"."SP2" ADD CONSTRAINT "PUBLIC"."C1" FOREIGN KEY("S_NO") REFERENCES "PUBLIC"."S"("S_NO") NOCHECK; +> rows (ordered): 20 + +drop table test; +> ok + +drop table sp1; +> ok + +drop table sp2; +> ok + +drop table s; +> ok + +drop table p; +> ok + +create table test (id identity, "VALUE" int not null); +> ok + +alter table test add primary key(id); +> exception SECOND_PRIMARY_KEY + +alter table test drop primary key; +> ok + +alter table test drop primary key; +> exception INDEX_NOT_FOUND_1 + +alter table test add primary key(id, id, id); +> ok + +alter table test drop primary key; +> ok + +drop table test; +> ok + +set autocommit off; +> ok + +create local temporary table test (id identity, b int, foreign key(b) references(id)); +> ok + +drop table test; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION DROP; +> SCRIPT +> ------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> rows (ordered): 1 + +create local temporary table test1 (id identity); +> ok + +create local temporary table test2 (id identity); +> ok + +alter table test2 add constraint test2_test1 foreign key (id) references test1; +> ok + +drop table test1, test2; +> ok + +create local temporary table test1 (id identity); +> ok + +create local temporary table test2 (id identity); +> ok + +alter table test2 add constraint test2_test1 foreign key (id) references test1; +> ok + +drop table test1, test2; +> ok + +set autocommit on; +> ok + +create table test(id int primary key, ref int, foreign key(ref) references(id)); +> ok + +insert into test values(1, 1), (2, 2); +> update count: 2 + +update test set ref=3-ref; +> update count: 2 + +alter table test add column dummy int; +> ok + +insert into test values(4, 4, null); +> update count: 1 + +drop table test; +> ok + +create table test(id int primary key); +> ok + +-- Column A.ID cannot be referenced here +explain select * from test a inner join test b left outer join test c on c.id = a.id; +> exception COLUMN_NOT_FOUND_1 + +SELECT T.ID FROM TEST "T"; +> ID +> -- +> rows: 0 + +SELECT T."ID" FROM TEST "T"; +> ID +> -- +> rows: 0 + +SELECT "T".ID FROM TEST "T"; +> ID +> -- +> rows: 0 + +SELECT "T"."ID" FROM TEST "T"; +> ID +> -- +> rows: 0 + +SELECT T.ID FROM "TEST" T; +> ID +> -- +> rows: 0 + +SELECT T."ID" FROM "TEST" T; +> ID +> -- +> rows: 0 + +SELECT "T".ID FROM "TEST" T; +> ID +> -- +> rows: 0 + +SELECT "T"."ID" FROM "TEST" T; +> ID +> -- +> rows: 0 + +SELECT T.ID FROM "TEST" "T"; +> ID +> -- +> rows: 0 + +SELECT T."ID" FROM "TEST" "T"; +> ID +> -- +> rows: 0 + +SELECT "T".ID FROM "TEST" "T"; +> ID +> -- +> rows: 0 + +SELECT "T"."ID" FROM "TEST" "T"; +> ID +> -- +> rows: 0 + +select "TEST".id from test; +> ID +> -- +> rows: 0 + +select test."ID" from test; +> ID +> -- +> rows: 0 + +select test."id" from test; +> exception COLUMN_NOT_FOUND_1 + +select "TEST"."ID" from test; +> ID +> -- +> rows: 0 + +select "test"."ID" from test; +> exception COLUMN_NOT_FOUND_1 + +select public."TEST".id from test; +> ID +> -- +> rows: 0 + +select public.test."ID" from test; +> ID +> -- +> rows: 0 + +select public."TEST"."ID" from test; +> ID +> -- +> rows: 0 + +select public."test"."ID" from test; +> exception COLUMN_NOT_FOUND_1 + +select "PUBLIC"."TEST".id from test; +> ID +> -- +> rows: 0 + +select "PUBLIC".test."ID" from test; +> ID +> -- +> rows: 0 + +select public."TEST"."ID" from test; +> ID +> -- +> rows: 0 + +select "public"."TEST"."ID" from test; +> exception COLUMN_NOT_FOUND_1 + +drop table test; +> ok + +create schema s authorization sa; +> ok + +create memory table s.test(id int); +> ok + +create index if not exists idx_id on s.test(id); +> ok + +create index if not exists idx_id on s.test(id); +> ok + +alter index s.idx_id rename to s.x; +> ok + +alter index if exists s.idx_id rename to s.x; +> ok + +alter index if exists s.x rename to s.index_id; +> ok + +alter table s.test add constraint cu_id unique(id); +> ok + +alter table s.test add name varchar; +> ok + +alter table s.test drop column name; +> ok + +alter table s.test drop constraint cu_id; +> ok + +alter table s.test rename to testtab; +> ok + +alter table s.testtab rename to test; +> ok + +create trigger test_trigger before insert on s.test call 'org.h2.test.db.TestTriggersConstraints'; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION DROP; +> SCRIPT +> ----------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE SCHEMA IF NOT EXISTS "S" AUTHORIZATION "SA"; +> DROP TABLE IF EXISTS "S"."TEST" CASCADE; +> CREATE MEMORY TABLE "S"."TEST"( "ID" INTEGER ); +> -- 0 +/- SELECT COUNT(*) FROM S.TEST; +> CREATE INDEX "S"."INDEX_ID" ON "S"."TEST"("ID" NULLS FIRST); +> CREATE FORCE TRIGGER "S"."TEST_TRIGGER" BEFORE INSERT ON "S"."TEST" QUEUE 1024 CALL 'org.h2.test.db.TestTriggersConstraints'; +> rows (ordered): 7 + +drop trigger s.test_trigger; +> ok + +drop schema s cascade; +> ok + +CREATE MEMORY TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255), y int as id+1); +> ok + +INSERT INTO TEST(id, name) VALUES(1, 'Hello'); +> update count: 1 + +create index idx_n_id on test(name, id); +> ok + +alter table test add constraint abc foreign key(id) references (id); +> ok + +alter table test rename column id to i; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION DROP; +> SCRIPT +> -------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> DROP TABLE IF EXISTS "PUBLIC"."TEST" CASCADE; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "I" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255), "Y" INTEGER GENERATED ALWAYS AS ("I" + 1) ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("I"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST"("I", "NAME") VALUES (1, 'Hello'); +> CREATE INDEX "PUBLIC"."IDX_N_ID" ON "PUBLIC"."TEST"("NAME" NULLS FIRST, "I" NULLS FIRST); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."ABC" FOREIGN KEY("I") REFERENCES "PUBLIC"."TEST"("I") NOCHECK; +> rows (ordered): 8 + +INSERT INTO TEST(i, name) VALUES(2, 'World'); +> update count: 1 + +SELECT * FROM TEST ORDER BY I; +> I NAME Y +> - ----- - +> 1 Hello 2 +> 2 World 3 +> rows (ordered): 2 + +UPDATE TEST SET NAME='Hi' WHERE I=1; +> update count: 1 + +DELETE FROM TEST t0 WHERE t0.I=2; +> update count: 1 + +drop table test; +> ok + +create table test(current int); +> ok + +select current from test; +> CURRENT +> ------- +> rows: 0 + +drop table test; +> ok + +CREATE table my_table(my_int integer, my_char varchar); +> ok + +INSERT INTO my_table VALUES(1, 'Testing'); +> update count: 1 + +ALTER TABLE my_table ALTER COLUMN my_int RENAME to my_new_int; +> ok + +SELECT my_new_int FROM my_table; +> MY_NEW_INT +> ---------- +> 1 +> rows: 1 + +UPDATE my_table SET my_new_int = 33; +> update count: 1 + +SELECT * FROM my_table; +> MY_NEW_INT MY_CHAR +> ---------- ------- +> 33 Testing +> rows: 1 + +DROP TABLE my_table; +> ok + +create sequence seq1; +> ok + +create table test(ID INT default next value for seq1); +> ok + +drop sequence seq1; +> exception CANNOT_DROP_2 + +alter table test add column name varchar; +> ok + +insert into test(name) values('Hello'); +> update count: 1 + +select * from test; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +drop table test; +> ok + +drop sequence seq1; +> ok + +create table test(a int primary key, b int, c int); +> ok + +alter table test add constraint unique_ba unique(b, a); +> ok + +alter table test add constraint abc foreign key(c, a) references test(b, a); +> ok + +insert into test values(1, 1, null); +> update count: 1 + +drop table test; +> ok + +create table ADDRESS (ADDRESS_ID int primary key, ADDRESS_TYPE int not null, SERVER_ID int not null); +> ok + +alter table address add constraint unique_a unique(ADDRESS_TYPE, SERVER_ID); +> ok + +create table SERVER (SERVER_ID int primary key, SERVER_TYPE int not null, ADDRESS_TYPE int); +> ok + +alter table ADDRESS add constraint addr foreign key (SERVER_ID) references SERVER; +> ok + +alter table SERVER add constraint server_const foreign key (ADDRESS_TYPE, SERVER_ID) references ADDRESS (ADDRESS_TYPE, SERVER_ID); +> ok + +insert into SERVER (SERVER_ID, SERVER_TYPE) values (1, 1); +> update count: 1 + +drop table address, server; +> ok + +CREATE TABLE PlanElements(id int primary key, name varchar, parent_id int, foreign key(parent_id) references(id) on delete cascade); +> ok + +INSERT INTO PlanElements(id,name,parent_id) VALUES(1, '#1', null), (2, '#1-A', 1), (3, '#1-A-1', 2), (4, '#1-A-2', 2); +> update count: 4 + +INSERT INTO PlanElements(id,name,parent_id) VALUES(5, '#1-B', 1), (6, '#1-B-1', 5), (7, '#1-B-2', 5); +> update count: 3 + +INSERT INTO PlanElements(id,name,parent_id) VALUES(8, '#1-C', 1), (9, '#1-C-1', 8), (10, '#1-C-2', 8); +> update count: 3 + +INSERT INTO PlanElements(id,name,parent_id) VALUES(11, '#1-D', 1), (12, '#1-D-1', 11), (13, '#1-D-2', 11), (14, '#1-D-3', 11); +> update count: 4 + +INSERT INTO PlanElements(id,name,parent_id) VALUES(15, '#1-E', 1), (16, '#1-E-1', 15), (17, '#1-E-2', 15), (18, '#1-E-3', 15), (19, '#1-E-4', 15); +> update count: 5 + +DELETE FROM PlanElements WHERE id = 1; +> update count: 1 + +SELECT * FROM PlanElements; +> ID NAME PARENT_ID +> -- ---- --------- +> rows: 0 + +DROP TABLE PlanElements; +> ok + +CREATE TABLE PARENT(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +CREATE TABLE CHILD(ID INT PRIMARY KEY, NAME VARCHAR(255), FOREIGN KEY(NAME) REFERENCES PARENT(ID)); +> ok + +INSERT INTO PARENT VALUES(1, '1'); +> update count: 1 + +INSERT INTO CHILD VALUES(1, '1'); +> update count: 1 + +INSERT INTO CHILD VALUES(2, 'Hello'); +> exception DATA_CONVERSION_ERROR_1 + +DROP TABLE IF EXISTS CHILD; +> ok + +DROP TABLE IF EXISTS PARENT; +> ok + +DECLARE GLOBAL TEMPORARY TABLE TEST(ID INT PRIMARY KEY); +> ok + +SELECT * FROM TEST; +> ID +> -- +> rows: 0 + +SELECT GROUP_CONCAT(ID) FROM TEST; +> LISTAGG(ID) WITHIN GROUP (ORDER BY NULL) +> ---------------------------------------- +> null +> rows: 1 + +SELECT * FROM SESSION.TEST; +> ID +> -- +> rows: 0 + +DROP TABLE TEST; +> ok + +VALUES(1, 2); +> C1 C2 +> -- -- +> 1 2 +> rows: 1 + +DROP TABLE IF EXISTS TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'); +> update count: 1 + +INSERT INTO TEST VALUES(2, 'World'); +> update count: 1 + +SELECT group_concat(name) FROM TEST group by id; +> LISTAGG(NAME) WITHIN GROUP (ORDER BY NULL) +> ------------------------------------------ +> Hello +> World +> rows: 2 + +drop table test; +> ok + +create table test(a int primary key, b int invisible, c int); +> ok + +select * from test; +> A C +> - - +> rows: 0 + +select a, b, c from test; +> A B C +> - - - +> rows: 0 + +drop table test; +> ok + +--- script drop --------------------------------------------------------------------------------------------- +create memory table test (id int primary key, im_ie varchar(10)); +> ok + +create sequence test_seq; +> ok + +SCRIPT NODATA NOPASSWORDS NOSETTINGS NOVERSION DROP; +> SCRIPT +> -------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> DROP TABLE IF EXISTS "PUBLIC"."TEST" CASCADE; +> DROP SEQUENCE IF EXISTS "PUBLIC"."TEST_SEQ"; +> CREATE SEQUENCE "PUBLIC"."TEST_SEQ" START WITH 1; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "IM_IE" CHARACTER VARYING(10) ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 7 + +drop sequence test_seq; +> ok + +drop table test; +> ok + +--- constraints --------------------------------------------------------------------------------------------- +CREATE MEMORY TABLE TEST(ID IDENTITY(100, 10), NAME VARCHAR); +> ok + +INSERT INTO TEST(NAME) VALUES('Hello'), ('World'); +> update count: 2 + +SELECT * FROM TEST; +> ID NAME +> --- ----- +> 100 Hello +> 110 World +> rows: 2 + +DROP TABLE TEST; +> ok + +CREATE CACHED TABLE account( +id INTEGER GENERATED BY DEFAULT AS IDENTITY, +name VARCHAR NOT NULL, +mail_address VARCHAR NOT NULL, +UNIQUE(name), +PRIMARY KEY(id) +); +> ok + +CREATE CACHED TABLE label( +id INTEGER GENERATED BY DEFAULT AS IDENTITY, +parent_id INTEGER NOT NULL, +account_id INTEGER NOT NULL, +name VARCHAR NOT NULL, +PRIMARY KEY(id), +UNIQUE(parent_id, name), +UNIQUE(id, account_id), +FOREIGN KEY(account_id) REFERENCES account (id), +FOREIGN KEY(parent_id, account_id) REFERENCES label (id, account_id) +); +> ok + +INSERT INTO account VALUES (0, 'example', 'example@example.com'); +> update count: 1 + +INSERT INTO label VALUES ( 0, 0, 0, 'TEST'); +> update count: 1 + +INSERT INTO label VALUES ( 1, 0, 0, 'TEST'); +> exception DUPLICATE_KEY_1 + +INSERT INTO label VALUES ( 1, 0, 0, 'TEST1'); +> update count: 1 + +INSERT INTO label VALUES ( 2, 2, 1, 'TEST'); +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +drop table label; +> ok + +drop table account; +> ok + +--- constraints and alter table add column --------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, PARENTID INT, FOREIGN KEY(PARENTID) REFERENCES(ID)); +> ok + +INSERT INTO TEST VALUES(0, 0); +> update count: 1 + +ALTER TABLE TEST ADD COLUMN CHILD_ID INT; +> ok + +ALTER TABLE TEST ALTER COLUMN CHILD_ID VARCHAR; +> ok + +ALTER TABLE TEST ALTER COLUMN PARENTID VARCHAR; +> ok + +ALTER TABLE TEST DROP COLUMN PARENTID; +> ok + +ALTER TABLE TEST DROP COLUMN CHILD_ID; +> ok + +SELECT * FROM TEST; +> ID +> -- +> 0 +> rows: 1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE A(X INT PRIMARY KEY); +> ok + +CREATE MEMORY TABLE B(XX INT, CONSTRAINT B2A FOREIGN KEY(XX) REFERENCES A(X)); +> ok + +CREATE MEMORY TABLE C(X_MASTER INT PRIMARY KEY); +> ok + +ALTER TABLE A ADD CONSTRAINT A2C FOREIGN KEY(X) REFERENCES C(X_MASTER); +> ok + +insert into c values(1); +> update count: 1 + +insert into a values(1); +> update count: 1 + +insert into b values(1); +> update count: 1 + +ALTER TABLE A ADD COLUMN Y INT; +> ok + +insert into c values(2); +> update count: 1 + +insert into a values(2, 2); +> update count: 1 + +insert into b values(2); +> update count: 1 + +DROP TABLE IF EXISTS A, B, C; +> ok + +--- quoted keywords --------------------------------------------------------------------------------------------- +CREATE TABLE "CREATE"("SELECT" INT, "PRIMARY" INT, "KEY" INT, "INDEX" INT, "ROWNUM" INT, "NEXTVAL" INT, "FROM" INT); +> ok + +INSERT INTO "CREATE" default values; +> update count: 1 + +INSERT INTO "CREATE" default values; +> update count: 1 + +SELECT "ROWNUM", ROWNUM, "SELECT" "AS", "PRIMARY" AS "X", "KEY", "NEXTVAL", "INDEX", "SELECT" "FROM" FROM "CREATE"; +> ROWNUM ROWNUM() AS X KEY NEXTVAL INDEX FROM +> ------ -------- ---- ---- ---- ------- ----- ---- +> null 1 null null null null null null +> null 2 null null null null null null +> rows: 2 + +DROP TABLE "CREATE"; +> ok + +CREATE TABLE PARENT(ID INT PRIMARY KEY, NAME VARCHAR); +> ok + +CREATE TABLE CHILD(ID INT, PARENTID INT, FOREIGN KEY(PARENTID) REFERENCES PARENT(ID)); +> ok + +INSERT INTO PARENT VALUES(1, 'Mary'), (2, 'John'); +> update count: 2 + +INSERT INTO CHILD VALUES(10, 1), (11, 1), (20, 2), (21, 2); +> update count: 4 + +MERGE INTO PARENT KEY(ID) VALUES(1, 'Marcy'); +> update count: 1 + +SELECT * FROM PARENT; +> ID NAME +> -- ----- +> 1 Marcy +> 2 John +> rows: 2 + +SELECT * FROM CHILD; +> ID PARENTID +> -- -------- +> 10 1 +> 11 1 +> 20 2 +> 21 2 +> rows: 4 + +DROP TABLE PARENT, CHILD; +> ok + +--- +create table STRING_TEST(label varchar(31), label2 varchar(255)); +> ok + +create table STRING_TEST_ic(label varchar_ignorecase(31), label2 +varchar_ignorecase(255)); +> ok + +insert into STRING_TEST values('HELLO','Bye'); +> update count: 1 + +insert into STRING_TEST values('HELLO','Hello'); +> update count: 1 + +insert into STRING_TEST_ic select * from STRING_TEST; +> update count: 2 + +-- Expect rows of STRING_TEST_ic and STRING_TEST to be identical +select * from STRING_TEST; +> LABEL LABEL2 +> ----- ------ +> HELLO Bye +> HELLO Hello +> rows: 2 + +-- correct +select * from STRING_TEST_ic; +> LABEL LABEL2 +> ----- ------ +> HELLO Bye +> HELLO Hello +> rows: 2 + +drop table STRING_TEST; +> ok + +drop table STRING_TEST_ic; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR_IGNORECASE); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'), (3, 'hallo'), (4, 'hoi'); +> update count: 4 + +SELECT * FROM TEST WHERE NAME = 'HELLO'; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +SELECT * FROM TEST WHERE NAME = 'HE11O'; +> ID NAME +> -- ---- +> rows: 0 + +SELECT * FROM TEST ORDER BY NAME; +> ID NAME +> -- ----- +> 3 hallo +> 1 Hello +> 4 hoi +> 2 World +> rows (ordered): 4 + +DROP TABLE IF EXISTS TEST; +> ok + +--- update with list --------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'); +> update count: 1 + +INSERT INTO TEST VALUES(2, 'World'); +> update count: 1 + +SELECT * FROM TEST ORDER BY ID; +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows (ordered): 2 + +UPDATE TEST t0 SET t0.NAME='Hi' WHERE t0.ID=1; +> update count: 1 + +update test set (id, name)=(id+1, name || 'Hi'); +> update count: 2 + +update test set (id, name)=(select id+1, name || 'Ho' from test t1 where test.id=t1.id); +> update count: 2 + +explain update test set (id, name)=(id+1, name || 'Hi'); +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "ID" = "ID" + 1, "NAME" = "NAME" || 'Hi' + +explain update test set (id, name)=(select id+1, name || 'Ho' from test t1 where test.id=t1.id); +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET ("ID", "NAME") = (SELECT "ID" + 1, "NAME" || 'Ho' FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = TEST.ID */ WHERE "TEST"."ID" = "T1"."ID") + +select * from test; +> ID NAME +> -- --------- +> 3 HiHiHo +> 4 WorldHiHo +> rows: 2 + +DROP TABLE IF EXISTS TEST; +> ok + +--- script --------------------------------------------------------------------------------------------- +create memory table test(id int primary key, c clob, b blob); +> ok + +insert into test values(0, null, null); +> update count: 1 + +insert into test values(1, '', ''); +> update count: 1 + +insert into test values(2, 'Cafe', X'cafe'); +> update count: 1 + +script simple nopasswords nosettings noversion; +> SCRIPT +> ------------------------------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "C" CHARACTER LARGE OBJECT, "B" BINARY LARGE OBJECT ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 3 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES(0, NULL, NULL); +> INSERT INTO "PUBLIC"."TEST" VALUES(1, '', X''); +> INSERT INTO "PUBLIC"."TEST" VALUES(2, 'Cafe', X'cafe'); +> rows (ordered): 7 + +drop table test; +> ok + +--- optimizer --------------------------------------------------------------------------------------------- +create table b(id int primary key, p int); +> ok + +create index bp on b(p); +> ok + +insert into b values(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9); +> update count: 10 + +insert into b select id+10, p+10 from b; +> update count: 10 + +explain select * from b b0, b b1, b b2 where b1.p = b0.id and b2.p = b1.id and b0.id=10; +>> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND ("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID") + +explain select * from b b0, b b1, b b2, b b3 where b1.p = b0.id and b2.p = b1.id and b3.p = b2.id and b0.id=10; +>> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P", "B3"."ID", "B3"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN "PUBLIC"."B" "B3" /* PUBLIC.BP: P = B2.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND ("B3"."P" = "B2"."ID") AND ("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID") + +explain select * from b b0, b b1, b b2, b b3, b b4 where b1.p = b0.id and b2.p = b1.id and b3.p = b2.id and b4.p = b3.id and b0.id=10; +>> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P", "B3"."ID", "B3"."P", "B4"."ID", "B4"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN "PUBLIC"."B" "B3" /* PUBLIC.BP: P = B2.ID */ ON 1=1 /* WHERE B3.P = B2.ID */ INNER JOIN "PUBLIC"."B" "B4" /* PUBLIC.BP: P = B3.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND ("B3"."P" = "B2"."ID") AND ("B4"."P" = "B3"."ID") AND ("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID") + +analyze; +> ok + +explain select * from b b0, b b1, b b2, b b3, b b4 where b1.p = b0.id and b2.p = b1.id and b3.p = b2.id and b4.p = b3.id and b0.id=10; +>> SELECT "B0"."ID", "B0"."P", "B1"."ID", "B1"."P", "B2"."ID", "B2"."P", "B3"."ID", "B3"."P", "B4"."ID", "B4"."P" FROM "PUBLIC"."B" "B0" /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN "PUBLIC"."B" "B1" /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN "PUBLIC"."B" "B2" /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN "PUBLIC"."B" "B3" /* PUBLIC.BP: P = B2.ID */ ON 1=1 /* WHERE B3.P = B2.ID */ INNER JOIN "PUBLIC"."B" "B4" /* PUBLIC.BP: P = B3.ID */ ON 1=1 WHERE ("B0"."ID" = 10) AND ("B3"."P" = "B2"."ID") AND ("B4"."P" = "B3"."ID") AND ("B1"."P" = "B0"."ID") AND ("B2"."P" = "B1"."ID") + +drop table if exists b; +> ok + +create table test(id int primary key, first_name varchar, name varchar, state int); +> ok + +create index idx_first_name on test(first_name); +> ok + +create index idx_name on test(name); +> ok + +create index idx_state on test(state); +> ok + +insert into test values +(0, 'Anne', 'Smith', 0), (1, 'Tom', 'Smith', 0), +(2, 'Tom', 'Jones', 0), (3, 'Steve', 'Johnson', 0), +(4, 'Steve', 'Martin', 0), (5, 'Jon', 'Jones', 0), +(6, 'Marc', 'Scott', 0), (7, 'Marc', 'Miller', 0), +(8, 'Susan', 'Wood', 0), (9, 'Jon', 'Bennet', 0); +> update count: 10 + +EXPLAIN SELECT * FROM TEST WHERE ID = 3; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."FIRST_NAME", "PUBLIC"."TEST"."NAME", "PUBLIC"."TEST"."STATE" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 3 */ WHERE "ID" = 3 + +explain select * from test where name='Smith' and first_name='Tom' and state=0; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."FIRST_NAME", "PUBLIC"."TEST"."NAME", "PUBLIC"."TEST"."STATE" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_FIRST_NAME: FIRST_NAME = 'Tom' */ WHERE ("STATE" = 0) AND ("NAME" = 'Smith') AND ("FIRST_NAME" = 'Tom') + +alter table test alter column name selectivity 100; +> ok + +explain select * from test where name='Smith' and first_name='Tom' and state=0; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."FIRST_NAME", "PUBLIC"."TEST"."NAME", "PUBLIC"."TEST"."STATE" FROM "PUBLIC"."TEST" /* PUBLIC.IDX_NAME: NAME = 'Smith' */ WHERE ("STATE" = 0) AND ("NAME" = 'Smith') AND ("FIRST_NAME" = 'Tom') + +drop table test; +> ok + +CREATE TABLE O(X INT PRIMARY KEY, Y INT); +> ok + +INSERT INTO O SELECT X, X+1 FROM SYSTEM_RANGE(1, 1000); +> update count: 1000 + +EXPLAIN SELECT A.X FROM O B, O A, O F, O D, O C, O E, O G, O H, O I, O J +WHERE 1=J.X and J.Y=I.X AND I.Y=H.X AND H.Y=G.X AND G.Y=F.X AND F.Y=E.X +AND E.Y=D.X AND D.Y=C.X AND C.Y=B.X AND B.Y=A.X; +>> SELECT "A"."X" FROM "PUBLIC"."O" "J" /* PUBLIC.PRIMARY_KEY_4: X = 1 */ /* WHERE J.X = 1 */ INNER JOIN "PUBLIC"."O" "I" /* PUBLIC.PRIMARY_KEY_4: X = J.Y */ ON 1=1 /* WHERE J.Y = I.X */ INNER JOIN "PUBLIC"."O" "H" /* PUBLIC.PRIMARY_KEY_4: X = I.Y */ ON 1=1 /* WHERE I.Y = H.X */ INNER JOIN "PUBLIC"."O" "G" /* PUBLIC.PRIMARY_KEY_4: X = H.Y */ ON 1=1 /* WHERE H.Y = G.X */ INNER JOIN "PUBLIC"."O" "F" /* PUBLIC.PRIMARY_KEY_4: X = G.Y */ ON 1=1 /* WHERE G.Y = F.X */ INNER JOIN "PUBLIC"."O" "E" /* PUBLIC.PRIMARY_KEY_4: X = F.Y */ ON 1=1 /* WHERE F.Y = E.X */ INNER JOIN "PUBLIC"."O" "D" /* PUBLIC.PRIMARY_KEY_4: X = E.Y */ ON 1=1 /* WHERE E.Y = D.X */ INNER JOIN "PUBLIC"."O" "C" /* PUBLIC.PRIMARY_KEY_4: X = D.Y */ ON 1=1 /* WHERE D.Y = C.X */ INNER JOIN "PUBLIC"."O" "B" /* PUBLIC.PRIMARY_KEY_4: X = C.Y */ ON 1=1 /* WHERE C.Y = B.X */ INNER JOIN "PUBLIC"."O" "A" /* PUBLIC.PRIMARY_KEY_4: X = B.Y */ ON 1=1 WHERE ("J"."X" = 1) AND ("I"."Y" = "H"."X") AND ("H"."Y" = "G"."X") AND ("G"."Y" = "F"."X") AND ("F"."Y" = "E"."X") AND ("E"."Y" = "D"."X") AND ("D"."Y" = "C"."X") AND ("C"."Y" = "B"."X") AND ("B"."Y" = "A"."X") AND ("J"."Y" = "I"."X") + +DROP TABLE O; +> ok + +CREATE TABLE PARENT(ID INT PRIMARY KEY, AID INT, BID INT, CID INT, DID INT, EID INT, FID INT, GID INT, HID INT); +> ok + +CREATE TABLE CHILD(ID INT PRIMARY KEY); +> ok + +INSERT INTO PARENT SELECT X, 1, 2, 1, 2, 1, 2, 1, 2 FROM SYSTEM_RANGE(0, 1000); +> update count: 1001 + +INSERT INTO CHILD SELECT X FROM SYSTEM_RANGE(0, 1000); +> update count: 1001 + +SELECT COUNT(*) FROM PARENT, CHILD A, CHILD B, CHILD C, CHILD D, CHILD E, CHILD F, CHILD G, CHILD H +WHERE AID=A.ID AND BID=B.ID AND CID=C.ID +AND DID=D.ID AND EID=E.ID AND FID=F.ID AND GID=G.ID AND HID=H.ID; +> COUNT(*) +> -------- +> 1001 +> rows: 1 + +EXPLAIN SELECT COUNT(*) FROM PARENT, CHILD A, CHILD B, CHILD C, CHILD D, CHILD E, CHILD F, CHILD G, CHILD H +WHERE AID=A.ID AND BID=B.ID AND CID=C.ID +AND DID=D.ID AND EID=E.ID AND FID=F.ID AND GID=G.ID AND HID=H.ID; +>> SELECT COUNT(*) FROM "PUBLIC"."PARENT" /* PUBLIC.PARENT.tableScan */ INNER JOIN "PUBLIC"."CHILD" "A" /* PUBLIC.PRIMARY_KEY_3: ID = AID */ ON 1=1 /* WHERE AID = A.ID */ INNER JOIN "PUBLIC"."CHILD" "B" /* PUBLIC.PRIMARY_KEY_3: ID = BID */ ON 1=1 /* WHERE BID = B.ID */ INNER JOIN "PUBLIC"."CHILD" "C" /* PUBLIC.PRIMARY_KEY_3: ID = CID */ ON 1=1 /* WHERE CID = C.ID */ INNER JOIN "PUBLIC"."CHILD" "D" /* PUBLIC.PRIMARY_KEY_3: ID = DID */ ON 1=1 /* WHERE DID = D.ID */ INNER JOIN "PUBLIC"."CHILD" "E" /* PUBLIC.PRIMARY_KEY_3: ID = EID */ ON 1=1 /* WHERE EID = E.ID */ INNER JOIN "PUBLIC"."CHILD" "F" /* PUBLIC.PRIMARY_KEY_3: ID = FID */ ON 1=1 /* WHERE FID = F.ID */ INNER JOIN "PUBLIC"."CHILD" "G" /* PUBLIC.PRIMARY_KEY_3: ID = GID */ ON 1=1 /* WHERE GID = G.ID */ INNER JOIN "PUBLIC"."CHILD" "H" /* PUBLIC.PRIMARY_KEY_3: ID = HID */ ON 1=1 WHERE ("CID" = "C"."ID") AND ("DID" = "D"."ID") AND ("EID" = "E"."ID") AND ("FID" = "F"."ID") AND ("GID" = "G"."ID") AND ("HID" = "H"."ID") AND ("AID" = "A"."ID") AND ("BID" = "B"."ID") + +CREATE TABLE FAMILY(ID INT PRIMARY KEY, PARENTID INT); +> ok + +INSERT INTO FAMILY SELECT X, X-1 FROM SYSTEM_RANGE(0, 1000); +> update count: 1001 + +EXPLAIN SELECT COUNT(*) FROM CHILD A, CHILD B, FAMILY, CHILD C, CHILD D, PARENT, CHILD E, CHILD F, CHILD G +WHERE FAMILY.ID=1 AND FAMILY.PARENTID=PARENT.ID +AND AID=A.ID AND BID=B.ID AND CID=C.ID AND DID=D.ID AND EID=E.ID AND FID=F.ID AND GID=G.ID; +>> SELECT COUNT(*) FROM "PUBLIC"."FAMILY" /* PUBLIC.PRIMARY_KEY_7: ID = 1 */ /* WHERE FAMILY.ID = 1 */ INNER JOIN "PUBLIC"."PARENT" /* PUBLIC.PRIMARY_KEY_8: ID = FAMILY.PARENTID */ ON 1=1 /* WHERE FAMILY.PARENTID = PARENT.ID */ INNER JOIN "PUBLIC"."CHILD" "A" /* PUBLIC.PRIMARY_KEY_3: ID = AID */ ON 1=1 /* WHERE AID = A.ID */ INNER JOIN "PUBLIC"."CHILD" "B" /* PUBLIC.PRIMARY_KEY_3: ID = BID */ ON 1=1 /* WHERE BID = B.ID */ INNER JOIN "PUBLIC"."CHILD" "C" /* PUBLIC.PRIMARY_KEY_3: ID = CID */ ON 1=1 /* WHERE CID = C.ID */ INNER JOIN "PUBLIC"."CHILD" "D" /* PUBLIC.PRIMARY_KEY_3: ID = DID */ ON 1=1 /* WHERE DID = D.ID */ INNER JOIN "PUBLIC"."CHILD" "E" /* PUBLIC.PRIMARY_KEY_3: ID = EID */ ON 1=1 /* WHERE EID = E.ID */ INNER JOIN "PUBLIC"."CHILD" "F" /* PUBLIC.PRIMARY_KEY_3: ID = FID */ ON 1=1 /* WHERE FID = F.ID */ INNER JOIN "PUBLIC"."CHILD" "G" /* PUBLIC.PRIMARY_KEY_3: ID = GID */ ON 1=1 WHERE ("FAMILY"."ID" = 1) AND ("AID" = "A"."ID") AND ("BID" = "B"."ID") AND ("CID" = "C"."ID") AND ("DID" = "D"."ID") AND ("EID" = "E"."ID") AND ("FID" = "F"."ID") AND ("GID" = "G"."ID") AND ("FAMILY"."PARENTID" = "PARENT"."ID") + +DROP TABLE FAMILY; +> ok + +DROP TABLE PARENT; +> ok + +DROP TABLE CHILD; +> ok + +--- is null / not is null --------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT UNIQUE, NAME VARCHAR CHECK LENGTH(NAME)>3); +> ok + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, NAME VARCHAR(255), B INT); +> ok + +CREATE UNIQUE INDEX IDXNAME ON TEST(NAME); +> ok + +CREATE UNIQUE INDEX IDX_NAME_B ON TEST(NAME, B); +> ok + +INSERT INTO TEST(ID, NAME, B) VALUES (0, NULL, NULL); +> update count: 1 + +INSERT INTO TEST(ID, NAME, B) VALUES (1, 'Hello', NULL); +> update count: 1 + +INSERT INTO TEST(ID, NAME, B) VALUES (2, NULL, NULL); +> update count: 1 + +INSERT INTO TEST(ID, NAME, B) VALUES (3, 'World', NULL); +> update count: 1 + +select * from test; +> ID NAME B +> -- ----- ---- +> 0 null null +> 1 Hello null +> 2 null null +> 3 World null +> rows: 4 + +UPDATE test SET name='Hi'; +> exception DUPLICATE_KEY_1 + +select * from test; +> ID NAME B +> -- ----- ---- +> 0 null null +> 1 Hello null +> 2 null null +> 3 World null +> rows: 4 + +UPDATE test SET name=NULL; +> update count: 4 + +UPDATE test SET B=1; +> update count: 4 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT, NAME VARCHAR); +> ok + +INSERT INTO TEST VALUES(NULL, NULL), (0, 'Hello'), (1, 'World'); +> update count: 3 + +SELECT * FROM TEST WHERE NOT (1=1); +> ID NAME +> -- ---- +> rows: 0 + +DROP TABLE TEST; +> ok + +create table test_null(a int, b int); +> ok + +insert into test_null values(0, 0); +> update count: 1 + +insert into test_null values(0, null); +> update count: 1 + +insert into test_null values(null, null); +> update count: 1 + +insert into test_null values(null, 0); +> update count: 1 + +select * from test_null where a=0; +> A B +> - ---- +> 0 0 +> 0 null +> rows: 2 + +select * from test_null where not a=0; +> A B +> - - +> rows: 0 + +select * from test_null where (a=0 or b=0); +> A B +> ---- ---- +> 0 0 +> 0 null +> null 0 +> rows: 3 + +select * from test_null where not (a=0 or b=0); +> A B +> - - +> rows: 0 + +select * from test_null where (a=1 or b=0); +> A B +> ---- - +> 0 0 +> null 0 +> rows: 2 + +select * from test_null where not( a=1 or b=0); +> A B +> - - +> rows: 0 + +select * from test_null where not(not( a=1 or b=0)); +> A B +> ---- - +> 0 0 +> null 0 +> rows: 2 + +select * from test_null where a=0 or b=0; +> A B +> ---- ---- +> 0 0 +> 0 null +> null 0 +> rows: 3 + +SELECT count(*) FROM test_null WHERE not ('X'=null and 1=0); +> COUNT(*) +> -------- +> 4 +> rows: 1 + +drop table if exists test_null; +> ok + +--- schema ---------------------------------------------------------------------------------------------- +SELECT DISTINCT TABLE_SCHEMA, TABLE_CATALOG FROM INFORMATION_SCHEMA.TABLES ORDER BY TABLE_SCHEMA; +> TABLE_SCHEMA TABLE_CATALOG +> ------------------ ------------- +> INFORMATION_SCHEMA SCRIPT +> rows (ordered): 1 + +SELECT * FROM INFORMATION_SCHEMA.SCHEMATA; +> CATALOG_NAME SCHEMA_NAME SCHEMA_OWNER DEFAULT_CHARACTER_SET_CATALOG DEFAULT_CHARACTER_SET_SCHEMA DEFAULT_CHARACTER_SET_NAME SQL_PATH DEFAULT_COLLATION_NAME REMARKS +> ------------ ------------------ ------------ ----------------------------- ---------------------------- -------------------------- -------- ---------------------- ------- +> SCRIPT INFORMATION_SCHEMA SA SCRIPT PUBLIC Unicode null OFF null +> SCRIPT PUBLIC SA SCRIPT PUBLIC Unicode null OFF null +> rows: 2 + +SELECT * FROM INFORMATION_SCHEMA.INFORMATION_SCHEMA_CATALOG_NAME; +> CATALOG_NAME +> ------------ +> SCRIPT +> rows: 1 + +SELECT INFORMATION_SCHEMA.SCHEMATA.SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA; +> SCHEMA_NAME +> ------------------ +> INFORMATION_SCHEMA +> PUBLIC +> rows: 2 + +SELECT INFORMATION_SCHEMA.SCHEMATA.* FROM INFORMATION_SCHEMA.SCHEMATA; +> CATALOG_NAME SCHEMA_NAME SCHEMA_OWNER DEFAULT_CHARACTER_SET_CATALOG DEFAULT_CHARACTER_SET_SCHEMA DEFAULT_CHARACTER_SET_NAME SQL_PATH DEFAULT_COLLATION_NAME REMARKS +> ------------ ------------------ ------------ ----------------------------- ---------------------------- -------------------------- -------- ---------------------- ------- +> SCRIPT INFORMATION_SCHEMA SA SCRIPT PUBLIC Unicode null OFF null +> SCRIPT PUBLIC SA SCRIPT PUBLIC Unicode null OFF null +> rows: 2 + +CREATE SCHEMA TEST_SCHEMA AUTHORIZATION SA; +> ok + +DROP SCHEMA TEST_SCHEMA RESTRICT; +> ok + +create schema Contact_Schema AUTHORIZATION SA; +> ok + +CREATE TABLE Contact_Schema.Address ( +address_id BIGINT NOT NULL +CONSTRAINT address_id_check +CHECK (address_id > 0), +address_type VARCHAR(20) NOT NULL +CONSTRAINT address_type +CHECK (address_type in ('postal','email','web')), +CONSTRAINT X_PKAddress +PRIMARY KEY (address_id) +); +> ok + +create schema ClientServer_Schema AUTHORIZATION SA; +> ok + +CREATE TABLE ClientServer_Schema.PrimaryKey_Seq ( +sequence_name VARCHAR(100) NOT NULL, +seq_number BIGINT NOT NULL UNIQUE, +CONSTRAINT X_PKPrimaryKey_Seq +PRIMARY KEY (sequence_name) +); +> ok + +alter table Contact_Schema.Address add constraint abc foreign key(address_id) +references ClientServer_Schema.PrimaryKey_Seq(seq_number); +> ok + +drop table ClientServer_Schema.PrimaryKey_Seq, Contact_Schema.Address; +> ok + +drop schema Contact_Schema restrict; +> ok + +drop schema ClientServer_Schema restrict; +> ok + +--- alter table add / drop / rename column ---------------------------------------------------------------------------------------------- +CREATE MEMORY TABLE TEST(ID INT PRIMARY KEY); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 4 + +ALTER TABLE TEST ADD CREATEDATE VARCHAR(255) DEFAULT '2001-01-01' NOT NULL; +> ok + +ALTER TABLE TEST ADD NAME VARCHAR(255) NULL BEFORE CREATEDATE; +> ok + +CREATE INDEX IDXNAME ON TEST(NAME); +> ok + +INSERT INTO TEST(ID, NAME) VALUES(1, 'Hi'); +> update count: 1 + +ALTER TABLE TEST ALTER COLUMN NAME SET NOT NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN NAME SET NOT NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN NAME SET NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN NAME SET NULL; +> ok + +ALTER TABLE TEST ALTER COLUMN NAME SET DEFAULT 1; +> ok + +SELECT * FROM TEST; +> ID NAME CREATEDATE +> -- ---- ---------- +> 1 Hi 2001-01-01 +> rows: 1 + +ALTER TABLE TEST ADD MODIFY_DATE TIMESTAMP; +> ok + +CREATE MEMORY TABLE TEST_SEQ(ID INT, NAME VARCHAR); +> ok + +INSERT INTO TEST_SEQ VALUES(-1, '-1'); +> update count: 1 + +ALTER TABLE TEST_SEQ ALTER COLUMN ID IDENTITY; +> ok + +INSERT INTO TEST_SEQ VALUES(NULL, '1'); +> exception NULL_NOT_ALLOWED + +INSERT INTO TEST_SEQ VALUES(DEFAULT, '1'); +> update count: 1 + +ALTER TABLE TEST_SEQ ALTER COLUMN ID RESTART WITH 10; +> ok + +INSERT INTO TEST_SEQ VALUES(DEFAULT, '10'); +> update count: 1 + +alter table test_seq drop primary key; +> ok + +ALTER TABLE TEST_SEQ ALTER COLUMN ID INT DEFAULT 20; +> ok + +INSERT INTO TEST_SEQ VALUES(DEFAULT, '20'); +> update count: 1 + +ALTER TABLE TEST_SEQ ALTER COLUMN NAME RENAME TO DATA; +> ok + +SELECT * FROM TEST_SEQ ORDER BY ID; +> ID DATA +> -- ---- +> -1 -1 +> 1 1 +> 10 10 +> 20 20 +> rows (ordered): 4 + +SCRIPT SIMPLE NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST_SEQ"( "ID" INTEGER DEFAULT 20 NOT NULL, "DATA" CHARACTER VARYING ); +> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.TEST_SEQ; +> INSERT INTO "PUBLIC"."TEST_SEQ" VALUES(-1, '-1'); +> INSERT INTO "PUBLIC"."TEST_SEQ" VALUES(1, '1'); +> INSERT INTO "PUBLIC"."TEST_SEQ" VALUES(10, '10'); +> INSERT INTO "PUBLIC"."TEST_SEQ" VALUES(20, '20'); +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255) DEFAULT 1, "CREATEDATE" CHARACTER VARYING(255) DEFAULT '2001-01-01' NOT NULL, "MODIFY_DATE" TIMESTAMP ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES(1, 'Hi', '2001-01-01', NULL); +> CREATE INDEX "PUBLIC"."IDXNAME" ON "PUBLIC"."TEST"("NAME" NULLS FIRST); +> rows (ordered): 12 + +CREATE UNIQUE INDEX IDX_NAME_ID ON TEST(ID, NAME); +> ok + +ALTER TABLE TEST DROP COLUMN NAME; +> exception COLUMN_IS_REFERENCED_1 + +DROP INDEX IDX_NAME_ID; +> ok + +DROP INDEX IDX_NAME_ID IF EXISTS; +> ok + +ALTER TABLE TEST DROP NAME; +> ok + +DROP TABLE TEST_SEQ; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> --------------------------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "CREATEDATE" CHARACTER VARYING(255) DEFAULT '2001-01-01' NOT NULL, "MODIFY_DATE" TIMESTAMP ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (1, '2001-01-01', NULL); +> rows (ordered): 5 + +ALTER TABLE TEST ADD NAME VARCHAR(255) NULL BEFORE CREATEDATE; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255), "CREATEDATE" CHARACTER VARYING(255) DEFAULT '2001-01-01' NOT NULL, "MODIFY_DATE" TIMESTAMP ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES (1, NULL, '2001-01-01', NULL); +> rows (ordered): 5 + +UPDATE TEST SET NAME = 'Hi'; +> update count: 1 + +INSERT INTO TEST VALUES(2, 'Hello', DEFAULT, DEFAULT); +> update count: 1 + +SELECT * FROM TEST; +> ID NAME CREATEDATE MODIFY_DATE +> -- ----- ---------- ----------- +> 1 Hi 2001-01-01 null +> 2 Hello 2001-01-01 null +> rows: 2 + +DROP TABLE TEST; +> ok + +create table test(id int, name varchar invisible); +> ok + +select * from test; +> ID +> -- +> rows: 0 + +alter table test alter column name set visible; +> ok + +select * from test; +> ID NAME +> -- ---- +> rows: 0 + +alter table test add modify_date timestamp invisible before name; +> ok + +select * from test; +> ID NAME +> -- ---- +> rows: 0 + +alter table test alter column modify_date timestamp visible; +> ok + +select * from test; +> ID MODIFY_DATE NAME +> -- ----------- ---- +> rows: 0 + +alter table test alter column modify_date set invisible; +> ok + +select * from test; +> ID NAME +> -- ---- +> rows: 0 + +drop table test; +> ok + +CREATE MEMORY TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> --------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> rows (ordered): 4 + +INSERT INTO TEST(ID, NAME) VALUES(1, 'Hi'), (2, 'World'); +> update count: 2 + +SELECT * FROM TEST; +> ID NAME +> -- ----- +> 1 Hi +> 2 World +> rows: 2 + +SELECT * FROM TEST WHERE ? IS NULL; +{ +Hello +> ID NAME +> -- ---- +> rows: 0 +}; +> update count: 0 + +DROP TABLE TEST; +> ok + +--- limit/offset ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'), (3, 'with'), (4, 'limited'), (5, 'resources'); +> update count: 5 + +SELECT TOP 2 * FROM TEST ORDER BY ID; +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows (ordered): 2 + +SELECT * FROM TEST ORDER BY ID LIMIT 2+0 OFFSET 1+0; +> ID NAME +> -- ----- +> 2 World +> 3 with +> rows (ordered): 2 + +SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY ID LIMIT 2+0 OFFSET 1+0; +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> rows (ordered): 2 + +SELECT * FROM TEST ORDER BY ID OFFSET 4; +> ID NAME +> -- --------- +> 5 resources +> rows (ordered): 1 + +SELECT ID FROM TEST GROUP BY ID UNION ALL SELECT ID FROM TEST GROUP BY ID; +> ID +> -- +> 1 +> 1 +> 2 +> 2 +> 3 +> 3 +> 4 +> 4 +> 5 +> 5 +> rows: 10 + +SELECT * FROM (SELECT ID FROM TEST GROUP BY ID); +> ID +> -- +> 1 +> 2 +> 3 +> 4 +> 5 +> rows: 5 + +EXPLAIN SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY ID LIMIT 2+0 OFFSET 1+0; +>> (SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) UNION ALL (SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) ORDER BY 1 OFFSET 1 ROW FETCH NEXT 2 ROWS ONLY + +EXPLAIN DELETE FROM TEST WHERE ID=1; +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST2COL(A INT, B INT, C VARCHAR(255), PRIMARY KEY(A, B)); +> ok + +INSERT INTO TEST2COL VALUES(0, 0, 'Hallo'), (0, 1, 'Welt'), (1, 0, 'Hello'), (1, 1, 'World'); +> update count: 4 + +SELECT * FROM TEST2COL WHERE A=0 AND B=0; +> A B C +> - - ----- +> 0 0 Hallo +> rows: 1 + +EXPLAIN SELECT * FROM TEST2COL WHERE A=0 AND B=0; +>> SELECT "PUBLIC"."TEST2COL"."A", "PUBLIC"."TEST2COL"."B", "PUBLIC"."TEST2COL"."C" FROM "PUBLIC"."TEST2COL" /* PUBLIC.PRIMARY_KEY_E: A = 0 AND B = 0 */ WHERE ("A" = 0) AND ("B" = 0) + +SELECT * FROM TEST2COL WHERE A=0; +> A B C +> - - ----- +> 0 0 Hallo +> 0 1 Welt +> rows: 2 + +EXPLAIN SELECT * FROM TEST2COL WHERE A=0; +>> SELECT "PUBLIC"."TEST2COL"."A", "PUBLIC"."TEST2COL"."B", "PUBLIC"."TEST2COL"."C" FROM "PUBLIC"."TEST2COL" /* PUBLIC.PRIMARY_KEY_E: A = 0 */ WHERE "A" = 0 + +SELECT * FROM TEST2COL WHERE B=0; +> A B C +> - - ----- +> 0 0 Hallo +> 1 0 Hello +> rows: 2 + +EXPLAIN SELECT * FROM TEST2COL WHERE B=0; +>> SELECT "PUBLIC"."TEST2COL"."A", "PUBLIC"."TEST2COL"."B", "PUBLIC"."TEST2COL"."C" FROM "PUBLIC"."TEST2COL" /* PUBLIC.TEST2COL.tableScan */ WHERE "B" = 0 + +DROP TABLE TEST2COL; +> ok + +--- testCases ---------------------------------------------------------------------------------------------- +CREATE TABLE t_1 (ch CHARACTER(10), dec DECIMAL(10,2), do DOUBLE, lo BIGINT, "IN" INTEGER, sm SMALLINT, ty TINYINT, +da DATE DEFAULT CURRENT_DATE, ti TIME DEFAULT CURRENT_TIME, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ); +> ok + +INSERT INTO T_1 (ch, dec, do) VALUES ('name', 10.23, 0); +> update count: 1 + +SELECT COUNT(*) FROM T_1; +> COUNT(*) +> -------- +> 1 +> rows: 1 + +DROP TABLE T_1; +> ok + +--- rights ---------------------------------------------------------------------------------------------- +CREATE USER TEST_USER PASSWORD '123'; +> ok + +CREATE TABLE TEST(ID INT); +> ok + +CREATE ROLE TEST_ROLE; +> ok + +CREATE ROLE IF NOT EXISTS TEST_ROLE; +> ok + +GRANT SELECT, INSERT ON TEST TO TEST_USER; +> ok + +GRANT UPDATE ON TEST TO TEST_ROLE; +> ok + +GRANT TEST_ROLE TO TEST_USER; +> ok + +SELECT ROLE_NAME FROM INFORMATION_SCHEMA.ROLES; +> ROLE_NAME +> --------- +> PUBLIC +> TEST_ROLE +> rows: 2 + +SELECT GRANTEE, GRANTEETYPE, GRANTEDROLE, RIGHTS, TABLE_SCHEMA, TABLE_NAME FROM INFORMATION_SCHEMA.RIGHTS; +> GRANTEE GRANTEETYPE GRANTEDROLE RIGHTS TABLE_SCHEMA TABLE_NAME +> --------- ----------- ----------- -------------- ------------ ---------- +> TEST_ROLE ROLE null UPDATE PUBLIC TEST +> TEST_USER USER TEST_ROLE null null null +> TEST_USER USER null SELECT, INSERT PUBLIC TEST +> rows: 3 + +SELECT * FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES; +> GRANTOR GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE WITH_HIERARCHY +> ------- --------- ------------- ------------ ---------- -------------- ------------ -------------- +> null TEST_ROLE SCRIPT PUBLIC TEST UPDATE NO NO +> null TEST_USER SCRIPT PUBLIC TEST INSERT NO NO +> null TEST_USER SCRIPT PUBLIC TEST SELECT NO NO +> rows: 3 + +SELECT * FROM INFORMATION_SCHEMA.COLUMN_PRIVILEGES; +> GRANTOR GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE +> ------- --------- ------------- ------------ ---------- ----------- -------------- ------------ +> null TEST_ROLE SCRIPT PUBLIC TEST ID UPDATE NO +> null TEST_USER SCRIPT PUBLIC TEST ID INSERT NO +> null TEST_USER SCRIPT PUBLIC TEST ID SELECT NO +> rows: 3 + +REVOKE INSERT ON TEST FROM TEST_USER; +> ok + +REVOKE TEST_ROLE FROM TEST_USER; +> ok + +SELECT GRANTEE, GRANTEETYPE, GRANTEDROLE, RIGHTS, TABLE_NAME FROM INFORMATION_SCHEMA.RIGHTS; +> GRANTEE GRANTEETYPE GRANTEDROLE RIGHTS TABLE_NAME +> --------- ----------- ----------- ------ ---------- +> TEST_ROLE ROLE null UPDATE TEST +> TEST_USER USER null SELECT TEST +> rows: 2 + +SELECT * FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES; +> GRANTOR GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE WITH_HIERARCHY +> ------- --------- ------------- ------------ ---------- -------------- ------------ -------------- +> null TEST_ROLE SCRIPT PUBLIC TEST UPDATE NO NO +> null TEST_USER SCRIPT PUBLIC TEST SELECT NO NO +> rows: 2 + +DROP USER TEST_USER; +> ok + +DROP TABLE TEST; +> ok + +DROP ROLE TEST_ROLE; +> ok + +SELECT * FROM INFORMATION_SCHEMA.ROLES; +> ROLE_NAME REMARKS +> --------- ------- +> PUBLIC null +> rows: 1 + +SELECT * FROM INFORMATION_SCHEMA.RIGHTS; +> GRANTEE GRANTEETYPE GRANTEDROLE RIGHTS TABLE_SCHEMA TABLE_NAME +> ------- ----------- ----------- ------ ------------ ---------- +> rows: 0 + +--- plan ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(?, ?); +{ +1, Hello +2, World +3, Peace +}; +> update count: 3 + +EXPLAIN INSERT INTO TEST VALUES(1, 'Test'); +>> INSERT INTO "PUBLIC"."TEST"("ID", "NAME") VALUES (1, 'Test') + +EXPLAIN INSERT INTO TEST VALUES(1, 'Test'), (2, 'World'); +>> INSERT INTO "PUBLIC"."TEST"("ID", "NAME") VALUES (1, 'Test'), (2, 'World') + +EXPLAIN INSERT INTO TEST SELECT DISTINCT ID+1, NAME FROM TEST; +>> INSERT INTO "PUBLIC"."TEST"("ID", "NAME") SELECT DISTINCT "ID" + 1, "NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT DISTINCT ID + 1, NAME FROM TEST; +>> SELECT DISTINCT "ID" + 1, "NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN SELECT * FROM TEST WHERE 1=0; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE + +EXPLAIN SELECT TOP 1 * FROM TEST FOR UPDATE; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ FETCH FIRST ROW ONLY FOR UPDATE + +EXPLAIN SELECT COUNT(NAME) FROM TEST WHERE ID=1; +>> SELECT COUNT("NAME") FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 + +EXPLAIN SELECT * FROM TEST WHERE (ID>=1 AND ID<=2) OR (ID>0 AND ID<3) AND (ID<>6) ORDER BY NAME NULLS FIRST, 1 NULLS LAST, (1+1) DESC; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ WHERE (("ID" >= 1) AND ("ID" <= 2)) OR (("ID" <> 6) AND ("ID" > 0) AND ("ID" < 3)) ORDER BY 2 NULLS FIRST, 1 NULLS LAST + +EXPLAIN SELECT * FROM TEST WHERE ID=1 GROUP BY NAME, ID; +>> SELECT "PUBLIC"."TEST"."ID", "PUBLIC"."TEST"."NAME" FROM "PUBLIC"."TEST" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE "ID" = 1 GROUP BY "NAME", "ID" + +EXPLAIN PLAN FOR UPDATE TEST SET NAME='Hello', ID=1 WHERE NAME LIKE 'T%' ESCAPE 'x'; +>> UPDATE "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ SET "ID" = 1, "NAME" = 'Hello' WHERE "NAME" LIKE 'T%' ESCAPE 'x' + +EXPLAIN PLAN FOR DELETE FROM TEST; +>> DELETE FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN PLAN FOR SELECT NAME, COUNT(*) FROM TEST GROUP BY NAME HAVING COUNT(*) > 1; +>> SELECT "NAME", COUNT(*) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ GROUP BY "NAME" HAVING COUNT(*) > 1 + +EXPLAIN PLAN FOR SELECT * FROM test t1 inner join test t2 on t1.id=t2.id and t2.name is not null where t1.id=1; +>> SELECT "T1"."ID", "T1"."NAME", "T2"."ID", "T2"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ /* WHERE T1.ID = 1 */ INNER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ ON 1=1 WHERE ("T1"."ID" = 1) AND ("T2"."NAME" IS NOT NULL) AND ("T1"."ID" = "T2"."ID") + +EXPLAIN PLAN FOR SELECT * FROM test t1 left outer join test t2 on t1.id=t2.id and t2.name is not null where t1.id=1; +>> SELECT "T1"."ID", "T1"."NAME", "T2"."ID", "T2"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ /* WHERE T1.ID = 1 */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ ON ("T2"."NAME" IS NOT NULL) AND ("T1"."ID" = "T2"."ID") WHERE "T1"."ID" = 1 + +EXPLAIN PLAN FOR SELECT * FROM test t1 left outer join test t2 on t1.id=t2.id and t2.name is null where t1.id=1; +>> SELECT "T1"."ID", "T1"."NAME", "T2"."ID", "T2"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ /* WHERE T1.ID = 1 */ LEFT OUTER JOIN "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ ON ("T2"."NAME" IS NULL) AND ("T1"."ID" = "T2"."ID") WHERE "T1"."ID" = 1 + +EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE EXISTS(SELECT * FROM TEST T2 WHERE T1.ID-1 = T2.ID); +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE EXISTS( SELECT "T2"."ID", "T2"."NAME" FROM "PUBLIC"."TEST" "T2" /* PUBLIC.PRIMARY_KEY_2: ID = (T1.ID - 1) */ WHERE ("T1"."ID" - 1) = "T2"."ID") + +EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE ID IN(1, 2); +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(1, 2) */ WHERE "ID" IN(1, 2) + +EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE ID IN(SELECT ID FROM TEST); +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.PRIMARY_KEY_2: ID IN(SELECT DISTINCT ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */) */ WHERE "ID" IN( SELECT DISTINCT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) + +EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE ID NOT IN(SELECT ID FROM TEST); +>> SELECT "T1"."ID", "T1"."NAME" FROM "PUBLIC"."TEST" "T1" /* PUBLIC.TEST.tableScan */ WHERE "ID" NOT IN( SELECT DISTINCT "ID" FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */) + +EXPLAIN PLAN FOR SELECT CAST(ID AS VARCHAR(255)) FROM TEST; +>> SELECT CAST("ID" AS CHARACTER VARYING(255)) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +EXPLAIN PLAN FOR SELECT LEFT(NAME, 2) FROM TEST; +>> SELECT LEFT("NAME", 2) FROM "PUBLIC"."TEST" /* PUBLIC.TEST.tableScan */ + +SELECT * FROM test t1 inner join test t2 on t1.id=t2.id and t2.name is not null where t1.id=1; +> ID NAME ID NAME +> -- ----- -- ----- +> 1 Hello 1 Hello +> rows: 1 + +SELECT * FROM test t1 left outer join test t2 on t1.id=t2.id and t2.name is not null where t1.id=1; +> ID NAME ID NAME +> -- ----- -- ----- +> 1 Hello 1 Hello +> rows: 1 + +SELECT * FROM test t1 left outer join test t2 on t1.id=t2.id and t2.name is null where t1.id=1; +> ID NAME ID NAME +> -- ----- ---- ---- +> 1 Hello null null +> rows: 1 + +DROP TABLE TEST; +> ok + +--- union ---------------------------------------------------------------------------------------------- +SELECT * FROM SYSTEM_RANGE(1,2) UNION ALL SELECT * FROM SYSTEM_RANGE(1,2) ORDER BY 1; +> X +> - +> 1 +> 1 +> 2 +> 2 +> rows (ordered): 4 + +EXPLAIN (SELECT * FROM SYSTEM_RANGE(1,2) UNION ALL SELECT * FROM SYSTEM_RANGE(1,2) ORDER BY 1); +>> (SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* range index */) UNION ALL (SELECT "SYSTEM_RANGE"."X" FROM SYSTEM_RANGE(1, 2) /* range index */) ORDER BY 1 + +CREATE TABLE CHILDREN(ID INT PRIMARY KEY, NAME VARCHAR(255), CLASS INT); +> ok + +CREATE TABLE CLASSES(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO CHILDREN VALUES(?, ?, ?); +{ +0, Joe, 0 +1, Anne, 1 +2, Joerg, 1 +3, Petra, 2 +}; +> update count: 4 + +INSERT INTO CLASSES VALUES(?, ?); +{ +0, Kindergarden +1, Class 1 +2, Class 2 +3, Class 3 +4, Class 4 +}; +> update count: 5 + +SELECT * FROM CHILDREN UNION ALL SELECT * FROM CHILDREN ORDER BY ID, NAME FOR UPDATE; +> ID NAME CLASS +> -- ----- ----- +> 0 Joe 0 +> 0 Joe 0 +> 1 Anne 1 +> 1 Anne 1 +> 2 Joerg 1 +> 2 Joerg 1 +> 3 Petra 2 +> 3 Petra 2 +> rows (ordered): 8 + +EXPLAIN SELECT * FROM CHILDREN UNION ALL SELECT * FROM CHILDREN ORDER BY ID, NAME FOR UPDATE; +>> (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ FOR UPDATE) UNION ALL (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ FOR UPDATE) ORDER BY 1, 2 FOR UPDATE + +SELECT 'Child', ID, NAME FROM CHILDREN UNION SELECT 'Class', ID, NAME FROM CLASSES; +> 'Child' ID NAME +> ------- -- ------------ +> Child 0 Joe +> Child 1 Anne +> Child 2 Joerg +> Child 3 Petra +> Class 0 Kindergarden +> Class 1 Class1 +> Class 2 Class2 +> Class 3 Class3 +> Class 4 Class4 +> rows: 9 + +EXPLAIN SELECT 'Child', ID, NAME FROM CHILDREN UNION SELECT 'Class', ID, NAME FROM CLASSES; +>> (SELECT 'Child', "ID", "NAME" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) UNION (SELECT 'Class', "ID", "NAME" FROM "PUBLIC"."CLASSES" /* PUBLIC.CLASSES.tableScan */) + +SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE CLASS=0; +> ID NAME CLASS +> -- ----- ----- +> 1 Anne 1 +> 2 Joerg 1 +> 3 Petra 2 +> rows: 3 + +EXPLAIN SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE CLASS=0; +>> (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) EXCEPT (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ WHERE "CLASS" = 0) + +EXPLAIN SELECT CLASS FROM CHILDREN INTERSECT SELECT ID FROM CLASSES; +>> (SELECT "CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) INTERSECT (SELECT "ID" FROM "PUBLIC"."CLASSES" /* PUBLIC.CLASSES.tableScan */) + +SELECT CLASS FROM CHILDREN INTERSECT SELECT ID FROM CLASSES; +> CLASS +> ----- +> 0 +> 1 +> 2 +> rows: 3 + +EXPLAIN SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE CLASS=0; +>> (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */) EXCEPT (SELECT "PUBLIC"."CHILDREN"."ID", "PUBLIC"."CHILDREN"."NAME", "PUBLIC"."CHILDREN"."CLASS" FROM "PUBLIC"."CHILDREN" /* PUBLIC.CHILDREN.tableScan */ WHERE "CLASS" = 0) + +SELECT * FROM CHILDREN CH, CLASSES CL WHERE CH.CLASS = CL.ID; +> ID NAME CLASS ID NAME +> -- ----- ----- -- ------------ +> 0 Joe 0 0 Kindergarden +> 1 Anne 1 1 Class1 +> 2 Joerg 1 1 Class1 +> 3 Petra 2 2 Class2 +> rows: 4 + +SELECT CH.ID CH_ID, CH.NAME CH_NAME, CL.ID CL_ID, CL.NAME CL_NAME FROM CHILDREN CH, CLASSES CL WHERE CH.CLASS = CL.ID; +> CH_ID CH_NAME CL_ID CL_NAME +> ----- ------- ----- ------------ +> 0 Joe 0 Kindergarden +> 1 Anne 1 Class1 +> 2 Joerg 1 Class1 +> 3 Petra 2 Class2 +> rows: 4 + +CREATE VIEW CHILDREN_CLASSES(CH_ID, CH_NAME, CL_ID, CL_NAME) AS +SELECT CH.ID CH_ID1, CH.NAME CH_NAME2, CL.ID CL_ID3, CL.NAME CL_NAME4 +FROM CHILDREN CH, CLASSES CL WHERE CH.CLASS = CL.ID; +> ok + +SELECT * FROM CHILDREN_CLASSES WHERE CH_NAME <> 'X'; +> CH_ID CH_NAME CL_ID CL_NAME +> ----- ------- ----- ------------ +> 0 Joe 0 Kindergarden +> 1 Anne 1 Class1 +> 2 Joerg 1 Class1 +> 3 Petra 2 Class2 +> rows: 4 + +CREATE VIEW CHILDREN_CLASS1 AS SELECT * FROM CHILDREN_CLASSES WHERE CL_ID=1; +> ok + +SELECT * FROM CHILDREN_CLASS1; +> CH_ID CH_NAME CL_ID CL_NAME +> ----- ------- ----- ------- +> 1 Anne 1 Class1 +> 2 Joerg 1 Class1 +> rows: 2 + +CREATE VIEW CHILDREN_CLASS2 AS SELECT * FROM CHILDREN_CLASSES WHERE CL_ID=2; +> ok + +SELECT * FROM CHILDREN_CLASS2; +> CH_ID CH_NAME CL_ID CL_NAME +> ----- ------- ----- ------- +> 3 Petra 2 Class2 +> rows: 1 + +CREATE VIEW CHILDREN_CLASS12 AS SELECT * FROM CHILDREN_CLASS1 UNION ALL SELECT * FROM CHILDREN_CLASS1; +> ok + +SELECT * FROM CHILDREN_CLASS12; +> CH_ID CH_NAME CL_ID CL_NAME +> ----- ------- ----- ------- +> 1 Anne 1 Class1 +> 1 Anne 1 Class1 +> 2 Joerg 1 Class1 +> 2 Joerg 1 Class1 +> rows: 4 + +DROP VIEW CHILDREN_CLASS2; +> ok + +DROP VIEW CHILDREN_CLASS1 cascade; +> ok + +DROP VIEW CHILDREN_CLASSES; +> ok + +DROP VIEW CHILDREN_CLASS12; +> exception VIEW_NOT_FOUND_1 + +CREATE VIEW V_UNION AS SELECT * FROM CHILDREN UNION ALL SELECT * FROM CHILDREN; +> ok + +SELECT * FROM V_UNION WHERE ID=1; +> ID NAME CLASS +> -- ---- ----- +> 1 Anne 1 +> 1 Anne 1 +> rows: 2 + +EXPLAIN SELECT * FROM V_UNION WHERE ID=1; +>> SELECT "PUBLIC"."V_UNION"."ID", "PUBLIC"."V_UNION"."NAME", "PUBLIC"."V_UNION"."CLASS" FROM "PUBLIC"."V_UNION" /* (SELECT PUBLIC.CHILDREN.ID, PUBLIC.CHILDREN.NAME, PUBLIC.CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.PRIMARY_KEY_9: ID IS NOT DISTINCT FROM ?1 */ /* scanCount: 2 */ WHERE PUBLIC.CHILDREN.ID IS NOT DISTINCT FROM ?1) UNION ALL (SELECT PUBLIC.CHILDREN.ID, PUBLIC.CHILDREN.NAME, PUBLIC.CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.PRIMARY_KEY_9: ID IS NOT DISTINCT FROM ?1 */ /* scanCount: 2 */ WHERE PUBLIC.CHILDREN.ID IS NOT DISTINCT FROM ?1): ID = 1 */ WHERE "ID" = 1 + +CREATE VIEW V_EXCEPT AS SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE ID=2; +> ok + +SELECT * FROM V_EXCEPT WHERE ID=1; +> ID NAME CLASS +> -- ---- ----- +> 1 Anne 1 +> rows: 1 + +EXPLAIN SELECT * FROM V_EXCEPT WHERE ID=1; +>> SELECT "PUBLIC"."V_EXCEPT"."ID", "PUBLIC"."V_EXCEPT"."NAME", "PUBLIC"."V_EXCEPT"."CLASS" FROM "PUBLIC"."V_EXCEPT" /* (SELECT DISTINCT PUBLIC.CHILDREN.ID, PUBLIC.CHILDREN.NAME, PUBLIC.CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.PRIMARY_KEY_9: ID IS NOT DISTINCT FROM ?1 */ /* scanCount: 2 */ WHERE PUBLIC.CHILDREN.ID IS NOT DISTINCT FROM ?1) EXCEPT (SELECT DISTINCT PUBLIC.CHILDREN.ID, PUBLIC.CHILDREN.NAME, PUBLIC.CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.PRIMARY_KEY_9: ID = 2 */ /* scanCount: 2 */ WHERE ID = 2): ID = 1 */ WHERE "ID" = 1 + +CREATE VIEW V_INTERSECT AS SELECT ID, NAME FROM CHILDREN INTERSECT SELECT * FROM CLASSES; +> ok + +SELECT * FROM V_INTERSECT WHERE ID=1; +> ID NAME +> -- ---- +> rows: 0 + +EXPLAIN SELECT * FROM V_INTERSECT WHERE ID=1; +>> SELECT "PUBLIC"."V_INTERSECT"."ID", "PUBLIC"."V_INTERSECT"."NAME" FROM "PUBLIC"."V_INTERSECT" /* (SELECT DISTINCT ID, NAME FROM PUBLIC.CHILDREN /* PUBLIC.PRIMARY_KEY_9: ID IS NOT DISTINCT FROM ?1 */ /* scanCount: 2 */ WHERE ID IS NOT DISTINCT FROM ?1) INTERSECT (SELECT DISTINCT PUBLIC.CLASSES.ID, PUBLIC.CLASSES.NAME FROM PUBLIC.CLASSES /* PUBLIC.PRIMARY_KEY_5: ID IS NOT DISTINCT FROM ?1 */ /* scanCount: 2 */ WHERE PUBLIC.CLASSES.ID IS NOT DISTINCT FROM ?1): ID = 1 */ WHERE "ID" = 1 + +DROP VIEW V_UNION; +> ok + +DROP VIEW V_EXCEPT; +> ok + +DROP VIEW V_INTERSECT; +> ok + +DROP TABLE CHILDREN; +> ok + +DROP TABLE CLASSES; +> ok + +--- view ---------------------------------------------------------------------------------------------- +CREATE CACHED TABLE TEST_A(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +CREATE CACHED TABLE TEST_B(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +SELECT A.ID AID, A.NAME A_NAME, B.ID BID, B.NAME B_NAME FROM TEST_A A INNER JOIN TEST_B B WHERE A.ID = B.ID; +> AID A_NAME BID B_NAME +> --- ------ --- ------ +> rows: 0 + +INSERT INTO TEST_B VALUES(1, 'Hallo'), (2, 'Welt'), (3, 'Rekord'); +> update count: 3 + +CREATE VIEW IF NOT EXISTS TEST_ALL AS SELECT A.ID AID, A.NAME A_NAME, B.ID BID, B.NAME B_NAME FROM TEST_A A, TEST_B B WHERE A.ID = B.ID; +> ok + +SELECT COUNT(*) FROM TEST_ALL; +> COUNT(*) +> -------- +> 0 +> rows: 1 + +CREATE VIEW IF NOT EXISTS TEST_ALL AS +SELECT * FROM TEST_A; +> ok + +INSERT INTO TEST_A VALUES(1, 'Hello'), (2, 'World'), (3, 'Record'); +> update count: 3 + +SELECT * FROM TEST_ALL; +> AID A_NAME BID B_NAME +> --- ------ --- ------ +> 1 Hello 1 Hallo +> 2 World 2 Welt +> 3 Record 3 Rekord +> rows: 3 + +SELECT * FROM TEST_ALL WHERE AID=1; +> AID A_NAME BID B_NAME +> --- ------ --- ------ +> 1 Hello 1 Hallo +> rows: 1 + +SELECT * FROM TEST_ALL WHERE AID>0; +> AID A_NAME BID B_NAME +> --- ------ --- ------ +> 1 Hello 1 Hallo +> 2 World 2 Welt +> 3 Record 3 Rekord +> rows: 3 + +SELECT * FROM TEST_ALL WHERE AID<2; +> AID A_NAME BID B_NAME +> --- ------ --- ------ +> 1 Hello 1 Hallo +> rows: 1 + +SELECT * FROM TEST_ALL WHERE AID<=2; +> AID A_NAME BID B_NAME +> --- ------ --- ------ +> 1 Hello 1 Hallo +> 2 World 2 Welt +> rows: 2 + +SELECT * FROM TEST_ALL WHERE AID>=2; +> AID A_NAME BID B_NAME +> --- ------ --- ------ +> 2 World 2 Welt +> 3 Record 3 Rekord +> rows: 2 + +CREATE VIEW TEST_A_SUB AS SELECT * FROM TEST_A WHERE ID < 2; +> ok + +SELECT TABLE_NAME, VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_SCHEMA = 'PUBLIC'; +> TABLE_NAME VIEW_DEFINITION +> ---------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> TEST_ALL SELECT "A"."ID" AS "AID", "A"."NAME" AS "A_NAME", "B"."ID" AS "BID", "B"."NAME" AS "B_NAME" FROM "PUBLIC"."TEST_A" "A" INNER JOIN "PUBLIC"."TEST_B" "B" ON 1=1 WHERE "A"."ID" = "B"."ID" +> TEST_A_SUB SELECT "PUBLIC"."TEST_A"."ID", "PUBLIC"."TEST_A"."NAME" FROM "PUBLIC"."TEST_A" WHERE "ID" < 2 +> rows: 2 + +SELECT * FROM TEST_A_SUB WHERE NAME IS NOT NULL; +> ID NAME +> -- ----- +> 1 Hello +> rows: 1 + +DROP VIEW TEST_A_SUB; +> ok + +DROP TABLE TEST_A cascade; +> ok + +DROP TABLE TEST_B cascade; +> ok + +DROP VIEW TEST_ALL; +> exception VIEW_NOT_FOUND_1 + +DROP VIEW IF EXISTS TEST_ALL; +> ok + +--- commit/rollback ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +SET AUTOCOMMIT FALSE; +> ok + +INSERT INTO TEST VALUES(1, 'Test'); +> update count: 1 + +ROLLBACK; +> ok + +SELECT * FROM TEST; +> ID NAME +> -- ---- +> rows: 0 + +INSERT INTO TEST VALUES(1, 'Test2'); +> update count: 1 + +SAVEPOINT TEST; +> ok + +INSERT INTO TEST VALUES(2, 'World'); +> update count: 1 + +ROLLBACK TO SAVEPOINT NOT_EXISTING; +> exception SAVEPOINT_IS_INVALID_1 + +ROLLBACK TO SAVEPOINT TEST; +> ok + +SELECT * FROM TEST; +> ID NAME +> -- ----- +> 1 Test2 +> rows: 1 + +ROLLBACK WORK; +> ok + +SELECT * FROM TEST; +> ID NAME +> -- ---- +> rows: 0 + +INSERT INTO TEST VALUES(1, 'Test3'); +> update count: 1 + +SAVEPOINT TEST3; +> ok + +INSERT INTO TEST VALUES(2, 'World2'); +> update count: 1 + +ROLLBACK TO SAVEPOINT TEST3; +> ok + +COMMIT WORK; +> ok + +SELECT * FROM TEST; +> ID NAME +> -- ----- +> 1 Test3 +> rows: 1 + +SET AUTOCOMMIT TRUE; +> ok + +DROP TABLE TEST; +> ok + +--- insert..select ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(0, 'Hello'); +> update count: 1 + +INSERT INTO TEST SELECT ID+1, NAME||'+' FROM TEST; +> update count: 1 + +INSERT INTO TEST SELECT ID+2, NAME||'+' FROM TEST; +> update count: 2 + +INSERT INTO TEST SELECT ID+4, NAME||'+' FROM TEST; +> update count: 4 + +SELECT * FROM TEST; +> ID NAME +> -- -------- +> 0 Hello +> 1 Hello+ +> 2 Hello+ +> 3 Hello++ +> 4 Hello+ +> 5 Hello++ +> 6 Hello++ +> 7 Hello+++ +> rows: 8 + +DROP TABLE TEST; +> ok + +--- syntax errors ---------------------------------------------------------------------------------------------- +CREATE SOMETHING STRANGE; +> exception SYNTAX_ERROR_2 + +SELECT T1.* T2; +> exception SYNTAX_ERROR_1 + +select replace('abchihihi', 'i', 'o') abcehohoho, replace('this is tom', 'i') 1e_th_st_om from test; +> exception SYNTAX_ERROR_1 + +select monthname(date )'005-0E9-12') d_set fm test; +> exception SYNTAX_ERROR_1 + +call substring('bob', 2, -1); +> '' +> -- +> +> rows: 1 + +--- exists ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(0, NULL); +> update count: 1 + +INSERT INTO TEST VALUES(1, 'Hello'); +> update count: 1 + +INSERT INTO TEST VALUES(2, 'World'); +> update count: 1 + +SELECT * FROM TEST T WHERE NOT EXISTS( +SELECT * FROM TEST T2 WHERE T.ID > T2.ID); +> ID NAME +> -- ---- +> 0 null +> rows: 1 + +DROP TABLE TEST; +> ok + +--- subquery ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(0, NULL); +> update count: 1 + +INSERT INTO TEST VALUES(1, 'Hello'); +> update count: 1 + +select * from test where (select max(t1.id) from test t1) between 0 and 100; +> ID NAME +> -- ----- +> 0 null +> 1 Hello +> rows: 2 + +INSERT INTO TEST VALUES(2, 'World'); +> update count: 1 + +SELECT * FROM TEST T WHERE T.ID = (SELECT T2.ID FROM TEST T2 WHERE T2.ID=T.ID); +> ID NAME +> -- ----- +> 0 null +> 1 Hello +> 2 World +> rows: 3 + +SELECT (SELECT T2.NAME FROM TEST T2 WHERE T2.ID=T.ID), T.NAME FROM TEST T; +> (SELECT T2.NAME FROM PUBLIC.TEST T2 WHERE T2.ID = T.ID) NAME +> ------------------------------------------------------- ----- +> Hello Hello +> World World +> null null +> rows: 3 + +SELECT (SELECT SUM(T2.ID) FROM TEST T2 WHERE T2.ID>T.ID), T.ID FROM TEST T; +> (SELECT SUM(T2.ID) FROM PUBLIC.TEST T2 WHERE T2.ID > T.ID) ID +> ---------------------------------------------------------- -- +> 2 1 +> 3 0 +> null 2 +> rows: 3 + +select * from test t where t.id+1 in (select id from test); +> ID NAME +> -- ----- +> 0 null +> 1 Hello +> rows: 2 + +select * from test t where t.id in (select id from test where id=t.id); +> ID NAME +> -- ----- +> 0 null +> 1 Hello +> 2 World +> rows: 3 + +select 1 from test, test where 1 in (select 1 from test where id=1); +> 1 +> - +> 1 +> 1 +> 1 +> 1 +> 1 +> 1 +> 1 +> 1 +> 1 +> rows: 9 + +select * from test, test where id=id; +> exception AMBIGUOUS_COLUMN_NAME_1 + +select 1 from test, test where id=id; +> exception AMBIGUOUS_COLUMN_NAME_1 + +select 1 from test where id in (select id from test, test); +> exception AMBIGUOUS_COLUMN_NAME_1 + +DROP TABLE TEST; +> ok + +--- group by ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(A INT, B INT, "VALUE" INT, UNIQUE(A, B)); +> ok + +INSERT INTO TEST VALUES(?, ?, ?); +{ +NULL, NULL, NULL +NULL, 0, 0 +NULL, 1, 10 +0, 0, -1 +0, 1, 100 +1, 0, 200 +1, 1, 300 +}; +> update count: 7 + +SELECT A, B, COUNT(*) CAL, COUNT(A) CA, COUNT(B) CB, MIN("VALUE") MI, MAX("VALUE") MA, SUM("VALUE") S FROM TEST GROUP BY A, B; +> A B CAL CA CB MI MA S +> ---- ---- --- -- -- ---- ---- ---- +> 0 0 1 1 1 -1 -1 -1 +> 0 1 1 1 1 100 100 100 +> 1 0 1 1 1 200 200 200 +> 1 1 1 1 1 300 300 300 +> null 0 1 0 1 0 0 0 +> null 1 1 0 1 10 10 10 +> null null 1 0 0 null null null +> rows: 7 + +DROP TABLE TEST; +> ok + +--- data types (blob, clob, varchar_ignorecase) ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT, XB BINARY(3), XBL BLOB, XO OTHER, XCL CLOB, XVI VARCHAR_IGNORECASE); +> ok + +INSERT INTO TEST VALUES(0, X'', X'', X'', '', ''); +> update count: 1 + +INSERT INTO TEST VALUES(1, X'0101', X'0101', X'0101', 'abc', 'aa'); +> update count: 1 + +INSERT INTO TEST VALUES(2, X'0AFF', X'08FE', X'F0F1', 'AbCdEfG', 'ZzAaBb'); +> update count: 1 + +INSERT INTO TEST VALUES(3, + X'112233', + X'112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff', + X'112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff', + 'AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz', + 'AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz'); +> update count: 1 + +INSERT INTO TEST VALUES(4, NULL, NULL, NULL, NULL, NULL); +> update count: 1 + +SELECT ID, XB, XBL, XO, XCL, XVI FROM TEST; +> ID XB XBL XO XCL XVI +> -- --------- --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> 0 X'000000' X'' X'' +> 1 X'010100' X'0101' X'0101' abc aa +> 2 X'0aff00' X'08fe' X'f0f1' AbCdEfG ZzAaBb +> 3 X'112233' X'112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff' X'112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff' AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz +> 4 null null null null null +> rows: 5 + +SELECT ID FROM TEST WHERE XCL = XCL; +> ID +> -- +> 0 +> 1 +> 2 +> 3 +> rows: 4 + +SELECT ID FROM TEST WHERE XCL LIKE 'abc%'; +> ID +> -- +> 1 +> rows: 1 + +SELECT ID FROM TEST WHERE XVI LIKE 'abc%'; +> ID +> -- +> 3 +> rows: 1 + +SELECT 'abc', 'Papa Joe''s', CAST(-1 AS SMALLINT), CAST(2 AS BIGINT), CAST(0 AS DOUBLE), CAST('0a0f' AS BINARY(4)) B, CAST(125 AS TINYINT), TRUE, FALSE FROM TEST WHERE ID=1; +> 'abc' 'Papa Joe''s' -1 2 0.0 B 125 TRUE FALSE +> ----- ------------- -- - --- ----------- --- ---- ----- +> abc Papa Joe's -1 2 0.0 X'30613066' 125 TRUE FALSE +> rows: 1 + +-- ' This apostrophe is here to fix syntax highlighting in the text editors. + +SELECT CAST('abcd' AS VARCHAR(255)) C1, CAST('ef_gh' AS VARCHAR(3)) C2; +> C1 C2 +> ---- --- +> abcd ef_ +> rows: 1 + +DROP TABLE TEST; +> ok + +--- data types (date and time) ---------------------------------------------------------------------------------------------- +CREATE MEMORY TABLE TEST(ID INT, XT TIME, XD DATE, XTS TIMESTAMP(9)); +> ok + +INSERT INTO TEST VALUES(0, '0:0:0','1-2-3','2-3-4 0:0:0'); +> update count: 1 + +INSERT INTO TEST VALUES(1, '01:02:03','2001-02-03','2001-02-29 0:0:0'); +> exception INVALID_DATETIME_CONSTANT_2 + +INSERT INTO TEST VALUES(1, '24:62:03','2001-02-03','2001-02-01 0:0:0'); +> exception INVALID_DATETIME_CONSTANT_2 + +INSERT INTO TEST VALUES(1, '23:02:03','2001-04-31','2001-02-01 0:0:0'); +> exception INVALID_DATETIME_CONSTANT_2 + +INSERT INTO TEST VALUES(1,'1:2:3','4-5-6','7-8-9 0:1:2'); +> update count: 1 + +INSERT INTO TEST VALUES(2,'23:59:59','1999-12-31','1999-12-31 23:59:59.123456789'); +> update count: 1 + +INSERT INTO TEST VALUES(NULL,NULL,NULL,NULL); +> update count: 1 + +SELECT * FROM TEST; +> ID XT XD XTS +> ---- -------- ---------- ----------------------------- +> 0 00:00:00 0001-02-03 0002-03-04 00:00:00 +> 1 01:02:03 0004-05-06 0007-08-09 00:01:02 +> 2 23:59:59 1999-12-31 1999-12-31 23:59:59.123456789 +> null null null null +> rows: 4 + +SELECT XD+1, XD-1, XD-XD FROM TEST; +> DATEADD(DAY, 1, XD) DATEADD(DAY, -1, XD) XD - XD +> ------------------- -------------------- ---------------- +> 0001-02-04 0001-02-02 INTERVAL '0' DAY +> 0004-05-07 0004-05-05 INTERVAL '0' DAY +> 2000-01-01 1999-12-30 INTERVAL '0' DAY +> null null null +> rows: 4 + +SELECT ID, CAST(XTS AS DATE) TS2D, +CAST(XTS AS TIME(9)) TS2T, +CAST(XD AS TIMESTAMP) D2TS FROM TEST; +> ID TS2D TS2T D2TS +> ---- ---------- ------------------ ------------------- +> 0 0002-03-04 00:00:00 0001-02-03 00:00:00 +> 1 0007-08-09 00:01:02 0004-05-06 00:00:00 +> 2 1999-12-31 23:59:59.123456789 1999-12-31 00:00:00 +> null null null null +> rows: 4 + +SCRIPT SIMPLE NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> --------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER, "XT" TIME, "XD" DATE, "XTS" TIMESTAMP(9) ); +> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> INSERT INTO "PUBLIC"."TEST" VALUES(0, TIME '00:00:00', DATE '0001-02-03', TIMESTAMP '0002-03-04 00:00:00'); +> INSERT INTO "PUBLIC"."TEST" VALUES(1, TIME '01:02:03', DATE '0004-05-06', TIMESTAMP '0007-08-09 00:01:02'); +> INSERT INTO "PUBLIC"."TEST" VALUES(2, TIME '23:59:59', DATE '1999-12-31', TIMESTAMP '1999-12-31 23:59:59.123456789'); +> INSERT INTO "PUBLIC"."TEST" VALUES(NULL, NULL, NULL, NULL); +> rows (ordered): 7 + +DROP TABLE TEST; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, t0 timestamp(23, 0), t1 timestamp(23, 1), t2 timestamp(23, 2), t5 timestamp(23, 5)); +> ok + +INSERT INTO TEST VALUES(1, '2001-01-01 12:34:56.789123', '2001-01-01 12:34:56.789123', '2001-01-01 12:34:56.789123', '2001-01-01 12:34:56.789123'); +> update count: 1 + +select * from test; +> ID T0 T1 T2 T5 +> -- ------------------- --------------------- ---------------------- ------------------------- +> 1 2001-01-01 12:34:57 2001-01-01 12:34:56.8 2001-01-01 12:34:56.79 2001-01-01 12:34:56.78912 +> rows: 1 + +DROP TABLE IF EXISTS TEST; +> ok + +--- data types (decimal) ---------------------------------------------------------------------------------------------- +CALL 1.2E10+1; +> 12000000001 +> ----------- +> 12000000001 +> rows: 1 + +CALL -1.2E-10-1; +> -1.00000000012 +> -------------- +> -1.00000000012 +> rows: 1 + +CALL 1E-1; +> 0.1 +> --- +> 0.1 +> rows: 1 + +CREATE TABLE TEST(ID INT, X1 BIT, XT TINYINT, X_SM SMALLINT, XB BIGINT, XD DECIMAL(10,2), XD2 DOUBLE PRECISION, XR REAL); +> ok + +INSERT INTO TEST VALUES(?, ?, ?, ?, ?, ?, ?, ?); +{ +0,FALSE,0,0,0,0.0,0.0,0.0 +1,TRUE,1,1,1,1.0,1.0,1.0 +4,TRUE,4,4,4,4.0,4.0,4.0 +-1,FALSE,-1,-1,-1,-1.0,-1.0,-1.0 +NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL +}; +> update count: 5 + +SELECT *, 0xFF, -0x1234567890abcd FROM TEST; +> ID X1 XT X_SM XB XD XD2 XR 255 -5124095575370701 +> ---- ----- ---- ---- ---- ----- ---- ---- --- ----------------- +> -1 FALSE -1 -1 -1 -1.00 -1.0 -1.0 255 -5124095575370701 +> 0 FALSE 0 0 0 0.00 0.0 0.0 255 -5124095575370701 +> 1 TRUE 1 1 1 1.00 1.0 1.0 255 -5124095575370701 +> 4 TRUE 4 4 4 4.00 4.0 4.0 255 -5124095575370701 +> null null null null null null null null 255 -5124095575370701 +> rows: 5 + +SELECT XD, CAST(XD AS DECIMAL(10,1)) D2DE, CAST(XD2 AS DECIMAL(4, 3)) DO2DE, CAST(XR AS DECIMAL(20,3)) R2DE FROM TEST; +> XD D2DE DO2DE R2DE +> ----- ---- ------ ------ +> -1.00 -1.0 -1.000 -1.000 +> 0.00 0.0 0.000 0.000 +> 1.00 1.0 1.000 1.000 +> 4.00 4.0 4.000 4.000 +> null null null null +> rows: 5 + +SELECT ID, CAST(XB AS DOUBLE) L2D, CAST(X_SM AS DOUBLE) S2D, CAST(XT AS DOUBLE) X2D FROM TEST; +> ID L2D S2D X2D +> ---- ---- ---- ---- +> -1 -1.0 -1.0 -1.0 +> 0 0.0 0.0 0.0 +> 1 1.0 1.0 1.0 +> 4 4.0 4.0 4.0 +> null null null null +> rows: 5 + +SELECT ID, CAST(XB AS REAL) L2D, CAST(X_SM AS REAL) S2D, CAST(XT AS REAL) T2R FROM TEST; +> ID L2D S2D T2R +> ---- ---- ---- ---- +> -1 -1.0 -1.0 -1.0 +> 0 0.0 0.0 0.0 +> 1 1.0 1.0 1.0 +> 4 4.0 4.0 4.0 +> null null null null +> rows: 5 + +SELECT ID, CAST(X_SM AS BIGINT) S2L, CAST(XT AS BIGINT) B2L, CAST(XD2 AS BIGINT) D2L, CAST(XR AS BIGINT) R2L FROM TEST; +> ID S2L B2L D2L R2L +> ---- ---- ---- ---- ---- +> -1 -1 -1 -1 -1 +> 0 0 0 0 0 +> 1 1 1 1 1 +> 4 4 4 4 4 +> null null null null null +> rows: 5 + +SELECT ID, CAST(XB AS INT) L2I, CAST(XD2 AS INT) D2I, CAST(XD2 AS SMALLINT) DO2I, CAST(XR AS SMALLINT) R2I FROM TEST; +> ID L2I D2I DO2I R2I +> ---- ---- ---- ---- ---- +> -1 -1 -1 -1 -1 +> 0 0 0 0 0 +> 1 1 1 1 1 +> 4 4 4 4 4 +> null null null null null +> rows: 5 + +SELECT ID, CAST(XD AS SMALLINT) D2S, CAST(XB AS SMALLINT) L2S, CAST(XT AS SMALLINT) B2S FROM TEST; +> ID D2S L2S B2S +> ---- ---- ---- ---- +> -1 -1 -1 -1 +> 0 0 0 0 +> 1 1 1 1 +> 4 4 4 4 +> null null null null +> rows: 5 + +SELECT ID, CAST(XD2 AS TINYINT) D2B, CAST(XD AS TINYINT) DE2B, CAST(XB AS TINYINT) L2B, CAST(X_SM AS TINYINT) S2B FROM TEST; +> ID D2B DE2B L2B S2B +> ---- ---- ---- ---- ---- +> -1 -1 -1 -1 -1 +> 0 0 0 0 0 +> 1 1 1 1 1 +> 4 4 4 4 4 +> null null null null null +> rows: 5 + +SELECT ID, CAST(XD2 AS BIT) D2B, CAST(XD AS BIT) DE2B, CAST(XB AS BIT) L2B, CAST(X_SM AS BIT) S2B FROM TEST; +> ID D2B DE2B L2B S2B +> ---- ----- ----- ----- ----- +> -1 TRUE TRUE TRUE TRUE +> 0 FALSE FALSE FALSE FALSE +> 1 TRUE TRUE TRUE TRUE +> 4 TRUE TRUE TRUE TRUE +> null null null null null +> rows: 5 + +SELECT CAST('TRUE' AS BIT) NT, CAST('1.0' AS BIT) N1, CAST('0.0' AS BIT) N0; +> NT N1 N0 +> ---- ---- ----- +> TRUE TRUE FALSE +> rows: 1 + +SELECT ID, ID+X1, ID+XT, ID+X_SM, ID+XB, ID+XD, ID+XD2, ID+XR FROM TEST; +> ID ID + X1 ID + XT ID + X_SM ID + XB ID + XD ID + XD2 ID + XR +> ---- ------- ------- --------- ------- ------- -------- ------- +> -1 -1 -2 -2 -2 -2.00 -2.0 -2.0 +> 0 0 0 0 0 0.00 0.0 0.0 +> 1 2 2 2 2 2.00 2.0 2.0 +> 4 5 8 8 8 8.00 8.0 8.0 +> null null null null null null null null +> rows: 5 + +SELECT ID, 10-X1, 10-XT, 10-X_SM, 10-XB, 10-XD, 10-XD2, 10-XR FROM TEST; +> ID 10 - X1 10 - XT 10 - X_SM 10 - XB 10 - XD 10 - XD2 10 - XR +> ---- ------- ------- --------- ------- ------- -------- ------- +> -1 10 11 11 11 11.00 11.0 11.0 +> 0 10 10 10 10 10.00 10.0 10.0 +> 1 9 9 9 9 9.00 9.0 9.0 +> 4 9 6 6 6 6.00 6.0 6.0 +> null null null null null null null null +> rows: 5 + +SELECT ID, 10*X1, 10*XT, 10*X_SM, 10*XB, 10*XD, 10*XD2, 10*XR FROM TEST; +> ID 10 * X1 10 * XT 10 * X_SM 10 * XB 10 * XD 10 * XD2 10 * XR +> ---- ------- ------- --------- ------- ------- -------- ------- +> -1 0 -10 -10 -10 -10.00 -10.0 -10.0 +> 0 0 0 0 0 0.00 0.0 0.0 +> 1 10 10 10 10 10.00 10.0 10.0 +> 4 10 40 40 40 40.00 40.0 40.0 +> null null null null null null null null +> rows: 5 + +SELECT ID, SIGN(XT), SIGN(X_SM), SIGN(XB), SIGN(XD), SIGN(XD2), SIGN(XR) FROM TEST; +> ID SIGN(XT) SIGN(X_SM) SIGN(XB) SIGN(XD) SIGN(XD2) SIGN(XR) +> ---- -------- ---------- -------- -------- --------- -------- +> -1 -1 -1 -1 -1 -1 -1 +> 0 0 0 0 0 0 0 +> 1 1 1 1 1 1 1 +> 4 1 1 1 1 1 1 +> null null null null null null null +> rows: 5 + +SELECT ID, XT-XT-XT, X_SM-X_SM-X_SM, XB-XB-XB, XD-XD-XD, XD2-XD2-XD2, XR-XR-XR FROM TEST; +> ID (XT - XT) - XT (X_SM - X_SM) - X_SM (XB - XB) - XB (XD - XD) - XD (XD2 - XD2) - XD2 (XR - XR) - XR +> ---- -------------- -------------------- -------------- -------------- ----------------- -------------- +> -1 1 1 1 1.00 1.0 1.0 +> 0 0 0 0 0.00 0.0 0.0 +> 1 -1 -1 -1 -1.00 -1.0 -1.0 +> 4 -4 -4 -4 -4.00 -4.0 -4.0 +> null null null null null null null +> rows: 5 + +SELECT ID, XT+XT, X_SM+X_SM, XB+XB, XD+XD, XD2+XD2, XR+XR FROM TEST; +> ID XT + XT X_SM + X_SM XB + XB XD + XD XD2 + XD2 XR + XR +> ---- ------- ----------- ------- ------- --------- ------- +> -1 -2 -2 -2 -2.00 -2.0 -2.0 +> 0 0 0 0 0.00 0.0 0.0 +> 1 2 2 2 2.00 2.0 2.0 +> 4 8 8 8 8.00 8.0 8.0 +> null null null null null null null +> rows: 5 + +SELECT ID, XT*XT, X_SM*X_SM, XB*XB, XD*XD, XD2*XD2, XR*XR FROM TEST; +> ID XT * XT X_SM * X_SM XB * XB XD * XD XD2 * XD2 XR * XR +> ---- ------- ----------- ------- ------- --------- ------- +> -1 1 1 1 1.0000 1.0 1.0 +> 0 0 0 0 0.0000 0.0 0.0 +> 1 1 1 1 1.0000 1.0 1.0 +> 4 16 16 16 16.0000 16.0 16.0 +> null null null null null null null +> rows: 5 + +SELECT 2/3 FROM TEST WHERE ID=1; +> 0 +> - +> 0 +> rows: 1 + +SELECT ID/ID FROM TEST; +> exception DIVISION_BY_ZERO_1 + +SELECT XT/XT FROM TEST; +> exception DIVISION_BY_ZERO_1 + +SELECT X_SM/X_SM FROM TEST; +> exception DIVISION_BY_ZERO_1 + +SELECT XB/XB FROM TEST; +> exception DIVISION_BY_ZERO_1 + +SELECT XD/XD FROM TEST; +> exception DIVISION_BY_ZERO_1 + +SELECT XD2/XD2 FROM TEST; +> exception DIVISION_BY_ZERO_1 + +SELECT XR/XR FROM TEST; +> exception DIVISION_BY_ZERO_1 + +SELECT ID++0, -X1, -XT, -X_SM, -XB, -XD, -XD2, -XR FROM TEST; +> ID + 0 - X1 - XT - X_SM - XB - XD - XD2 - XR +> ------ ----- ---- ------ ---- ----- ----- ---- +> -1 TRUE 1 1 1 1.00 1.0 1.0 +> 0 TRUE 0 0 0 0.00 0.0 0.0 +> 1 FALSE -1 -1 -1 -1.00 -1.0 -1.0 +> 4 FALSE -4 -4 -4 -4.00 -4.0 -4.0 +> null null null null null null null null +> rows: 5 + +SELECT ID, X1||'!', XT||'!', X_SM||'!', XB||'!', XD||'!', XD2||'!', XR||'!' FROM TEST; +> ID X1 || '!' XT || '!' X_SM || '!' XB || '!' XD || '!' XD2 || '!' XR || '!' +> ---- --------- --------- ----------- --------- --------- ---------- --------- +> -1 FALSE! -1! -1! -1! -1.00! -1.0! -1.0! +> 0 FALSE! 0! 0! 0! 0.00! 0.0! 0.0! +> 1 TRUE! 1! 1! 1! 1.00! 1.0! 1.0! +> 4 TRUE! 4! 4! 4! 4.00! 4.0! 4.0! +> null null null null null null null null +> rows: 5 + +DROP TABLE TEST; +> ok + +--- in ---------------------------------------------------------------------------------------------- +CREATE TABLE CUSTOMER(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +CREATE TABLE INVOICE(ID INT, CUSTOMER_ID INT, PRIMARY KEY(CUSTOMER_ID, ID), "VALUE" DECIMAL(10,2)); +> ok + +INSERT INTO CUSTOMER VALUES(?, ?); +{ +1,Lehmann +2,Meier +3,Scott +4,NULL +}; +> update count: 4 + +INSERT INTO INVOICE VALUES(?, ?, ?); +{ +10,1,100.10 +11,1,10.01 +12,1,1.001 +20,2,22.2 +21,2,200.02 +}; +> update count: 5 + +SELECT * FROM CUSTOMER WHERE ID IN(1,2,4,-1); +> ID NAME +> -- ------- +> 1 Lehmann +> 2 Meier +> 4 null +> rows: 3 + +SELECT * FROM CUSTOMER WHERE ID NOT IN(3,4,5,'1'); +> ID NAME +> -- ----- +> 2 Meier +> rows: 1 + +SELECT * FROM CUSTOMER WHERE ID NOT IN(SELECT CUSTOMER_ID FROM INVOICE); +> ID NAME +> -- ----- +> 3 Scott +> 4 null +> rows: 2 + +SELECT * FROM INVOICE WHERE CUSTOMER_ID IN(SELECT C.ID FROM CUSTOMER C); +> ID CUSTOMER_ID VALUE +> -- ----------- ------ +> 10 1 100.10 +> 11 1 10.01 +> 12 1 1.00 +> 20 2 22.20 +> 21 2 200.02 +> rows: 5 + +SELECT * FROM CUSTOMER WHERE NAME IN('Lehmann', 20); +> exception DATA_CONVERSION_ERROR_1 + +SELECT * FROM CUSTOMER WHERE NAME NOT IN('Scott'); +> ID NAME +> -- ------- +> 1 Lehmann +> 2 Meier +> rows: 2 + +SELECT * FROM CUSTOMER WHERE NAME IN(SELECT NAME FROM CUSTOMER); +> ID NAME +> -- ------- +> 1 Lehmann +> 2 Meier +> 3 Scott +> rows: 3 + +SELECT * FROM CUSTOMER WHERE NAME NOT IN(SELECT NAME FROM CUSTOMER); +> ID NAME +> -- ---- +> rows: 0 + +SELECT * FROM CUSTOMER WHERE NAME = ANY(SELECT NAME FROM CUSTOMER); +> ID NAME +> -- ------- +> 1 Lehmann +> 2 Meier +> 3 Scott +> rows: 3 + +SELECT * FROM CUSTOMER WHERE NAME = ALL(SELECT NAME FROM CUSTOMER); +> ID NAME +> -- ---- +> rows: 0 + +SELECT * FROM CUSTOMER WHERE NAME > ALL(SELECT NAME FROM CUSTOMER); +> ID NAME +> -- ---- +> rows: 0 + +SELECT * FROM CUSTOMER WHERE NAME > ANY(SELECT NAME FROM CUSTOMER); +> ID NAME +> -- ----- +> 2 Meier +> 3 Scott +> rows: 2 + +SELECT * FROM CUSTOMER WHERE NAME < ANY(SELECT NAME FROM CUSTOMER); +> ID NAME +> -- ------- +> 1 Lehmann +> 2 Meier +> rows: 2 + +DROP TABLE INVOICE; +> ok + +DROP TABLE CUSTOMER; +> ok + +--- aggregates ---------------------------------------------------------------------------------------------- +drop table if exists t; +> ok + +create table t(x double precision, y double precision); +> ok + +create view s as +select stddev_pop(x) s_px, stddev_samp(x) s_sx, var_pop(x) v_px, var_samp(x) v_sx, +stddev_pop(y) s_py, stddev_samp(y) s_sy, var_pop(y) v_py, var_samp(y) v_sy from t; +> ok + +select var(100000000.1) z from system_range(1, 1000000); +> Z +> --- +> 0.0 +> rows: 1 + +select * from s; +> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY +> ---- ---- ---- ---- ---- ---- ---- ---- +> null null null null null null null null +> rows: 1 + +select some(y>10), every(y>10), min(y), max(y) from t; +> ANY(Y > 10.0) EVERY(Y > 10.0) MIN(Y) MAX(Y) +> ------------- --------------- ------ ------ +> null null null null +> rows: 1 + +insert into t values(1000000004, 4); +> update count: 1 + +select * from s; +> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY +> ---- ---- ---- ---- ---- ---- ---- ---- +> 0.0 null 0.0 null 0.0 null 0.0 null +> rows: 1 + +insert into t values(1000000007, 7); +> update count: 1 + +select * from s; +> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY +> ---- ------------------ ---- ---- ---- ------------------ ---- ---- +> 1.5 2.1213203435596424 2.25 4.5 1.5 2.1213203435596424 2.25 4.5 +> rows: 1 + +insert into t values(1000000013, 13); +> update count: 1 + +select * from s; +> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY +> ------------------ ---------------- ---- ---- ------------------ ---------------- ---- ---- +> 3.7416573867739413 4.58257569495584 14.0 21.0 3.7416573867739413 4.58257569495584 14.0 21.0 +> rows: 1 + +insert into t values(1000000016, 16); +> update count: 1 + +select * from s; +> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY +> ----------------- ----------------- ---- ---- ----------------- ----------------- ---- ---- +> 4.743416490252569 5.477225575051661 22.5 30.0 4.743416490252569 5.477225575051661 22.5 30.0 +> rows: 1 + +insert into t values(1000000016, 16); +> update count: 1 + +select * from s; +> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY +> ----------------- ----------------- ----------------- ------------------ ----------------- ----------------- ----- ------------------ +> 4.874423036912116 5.449770630813229 23.75999994277954 29.699999928474426 4.874423042781577 5.449770637375485 23.76 29.700000000000003 +> rows: 1 + +select stddev_pop(distinct x) s_px, stddev_samp(distinct x) s_sx, var_pop(distinct x) v_px, var_samp(distinct x) v_sx, +stddev_pop(distinct y) s_py, stddev_samp(distinct y) s_sy, var_pop(distinct y) v_py, var_samp(distinct y) V_SY from t; +> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY +> ----------------- ----------------- ---- ---- ----------------- ----------------- ---- ---- +> 4.743416490252569 5.477225575051661 22.5 30.0 4.743416490252569 5.477225575051661 22.5 30.0 +> rows: 1 + +select some(y>10), every(y>10), min(y), max(y) from t; +> ANY(Y > 10.0) EVERY(Y > 10.0) MIN(Y) MAX(Y) +> ------------- --------------- ------ ------ +> TRUE FALSE 4.0 16.0 +> rows: 1 + +drop view s; +> ok + +drop table t; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255), "VALUE" DECIMAL(10,2)); +> ok + +INSERT INTO TEST VALUES(?, ?, ?); +{ +1,Apples,1.20 +2,Oranges,2.05 +3,Cherries,5.10 +4,Apples,1.50 +5,Apples,1.10 +6,Oranges,1.80 +7,Bananas,2.50 +8,NULL,3.10 +9,NULL,-10.0 +}; +> update count: 9 + +SELECT IFNULL(NAME, '') || ': ' || GROUP_CONCAT("VALUE" ORDER BY NAME, "VALUE" DESC SEPARATOR ', ') FROM TEST GROUP BY NAME ORDER BY 1; +> COALESCE(NAME, '') || ': ' || LISTAGG("VALUE", ', ') WITHIN GROUP (ORDER BY NAME, "VALUE" DESC) +> ----------------------------------------------------------------------------------------------- +> : 3.10, -10.00 +> Apples: 1.50, 1.20, 1.10 +> Bananas: 2.50 +> Cherries: 5.10 +> Oranges: 2.05, 1.80 +> rows (ordered): 5 + +SELECT GROUP_CONCAT(ID ORDER BY ID) FROM TEST; +> LISTAGG(ID) WITHIN GROUP (ORDER BY ID) +> -------------------------------------- +> 1,2,3,4,5,6,7,8,9 +> rows: 1 + +SELECT STRING_AGG(ID,';') FROM TEST; +> LISTAGG(ID, ';') WITHIN GROUP (ORDER BY NULL) +> --------------------------------------------- +> 1;2;3;4;5;6;7;8;9 +> rows: 1 + +SELECT DISTINCT NAME FROM TEST; +> NAME +> -------- +> Apples +> Bananas +> Cherries +> Oranges +> null +> rows: 5 + +SELECT DISTINCT NAME FROM TEST ORDER BY NAME DESC NULLS LAST; +> NAME +> -------- +> Oranges +> Cherries +> Bananas +> Apples +> null +> rows (ordered): 5 + +SELECT DISTINCT NAME FROM TEST ORDER BY NAME DESC NULLS LAST LIMIT 2 OFFSET 1; +> NAME +> -------- +> Cherries +> Bananas +> rows (ordered): 2 + +SELECT NAME, COUNT(*), SUM("VALUE"), MAX("VALUE"), MIN("VALUE"), AVG("VALUE"), COUNT(DISTINCT "VALUE") FROM TEST GROUP BY NAME; +> NAME COUNT(*) SUM("VALUE") MAX("VALUE") MIN("VALUE") AVG("VALUE") COUNT(DISTINCT "VALUE") +> -------- -------- ------------ ------------ ------------ --------------- ----------------------- +> Apples 3 3.80 1.50 1.10 1.266666666667 3 +> Bananas 1 2.50 2.50 2.50 2.500000000000 1 +> Cherries 1 5.10 5.10 5.10 5.100000000000 1 +> Oranges 2 3.85 2.05 1.80 1.925000000000 2 +> null 2 -6.90 3.10 -10.00 -3.450000000000 2 +> rows: 5 + +SELECT NAME, MAX("VALUE"), MIN("VALUE"), MAX("VALUE"+1)*MIN("VALUE"+1) FROM TEST GROUP BY NAME; +> NAME MAX("VALUE") MIN("VALUE") MAX("VALUE" + 1) * MIN("VALUE" + 1) +> -------- ------------ ------------ ----------------------------------- +> Apples 1.50 1.10 5.2500 +> Bananas 2.50 2.50 12.2500 +> Cherries 5.10 5.10 37.2100 +> Oranges 2.05 1.80 8.5400 +> null 3.10 -10.00 -36.9000 +> rows: 5 + +DROP TABLE TEST; +> ok + +--- order by ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +CREATE UNIQUE INDEX IDXNAME ON TEST(NAME); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'); +> update count: 1 + +INSERT INTO TEST VALUES(2, 'World'); +> update count: 1 + +INSERT INTO TEST VALUES(3, NULL); +> update count: 1 + +SELECT * FROM TEST ORDER BY NAME; +> ID NAME +> -- ----- +> 3 null +> 1 Hello +> 2 World +> rows (ordered): 3 + +SELECT * FROM TEST ORDER BY NAME DESC; +> ID NAME +> -- ----- +> 2 World +> 1 Hello +> 3 null +> rows (ordered): 3 + +SELECT * FROM TEST ORDER BY NAME NULLS FIRST; +> ID NAME +> -- ----- +> 3 null +> 1 Hello +> 2 World +> rows (ordered): 3 + +SELECT * FROM TEST ORDER BY NAME DESC NULLS FIRST; +> ID NAME +> -- ----- +> 3 null +> 2 World +> 1 Hello +> rows (ordered): 3 + +SELECT * FROM TEST ORDER BY NAME NULLS LAST; +> ID NAME +> -- ----- +> 1 Hello +> 2 World +> 3 null +> rows (ordered): 3 + +SELECT * FROM TEST ORDER BY NAME DESC NULLS LAST; +> ID NAME +> -- ----- +> 2 World +> 1 Hello +> 3 null +> rows (ordered): 3 + +SELECT ID, '=', NAME FROM TEST ORDER BY 2 FOR UPDATE; +> ID '=' NAME +> -- --- ----- +> 1 = Hello +> 2 = World +> 3 = null +> rows: 3 + +DROP TABLE TEST; +> ok + +--- having ---------------------------------------------------------------------------------------------- +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +CREATE INDEX IDXNAME ON TEST(NAME); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'); +> update count: 1 + +INSERT INTO TEST VALUES(2, 'Hello'); +> update count: 1 + +INSERT INTO TEST VALUES(3, 'World'); +> update count: 1 + +INSERT INTO TEST VALUES(4, 'World'); +> update count: 1 + +INSERT INTO TEST VALUES(5, 'Orange'); +> update count: 1 + +SELECT NAME, SUM(ID) FROM TEST GROUP BY NAME HAVING COUNT(*)>1 ORDER BY NAME; +> NAME SUM(ID) +> ----- ------- +> Hello 3 +> World 7 +> rows (ordered): 2 + +DROP INDEX IF EXISTS IDXNAME; +> ok + +DROP TABLE TEST; +> ok + +--- sequence ---------------------------------------------------------------------------------------------- +CREATE CACHED TABLE TEST(ID INT PRIMARY KEY); +> ok + +CREATE CACHED TABLE IF NOT EXISTS TEST(ID INT PRIMARY KEY); +> ok + +CREATE SEQUENCE IF NOT EXISTS TEST_SEQ START WITH 10; +> ok + +CREATE SEQUENCE IF NOT EXISTS TEST_SEQ START WITH 20; +> ok + +INSERT INTO TEST VALUES(NEXT VALUE FOR TEST_SEQ); +> update count: 1 + +CALL CURRVAL('test_seq'); +> CURRVAL('test_seq') +> ------------------- +> 10 +> rows: 1 + +INSERT INTO TEST VALUES(NEXT VALUE FOR TEST_SEQ); +> update count: 1 + +CALL NEXT VALUE FOR TEST_SEQ; +> NEXT VALUE FOR PUBLIC.TEST_SEQ +> ------------------------------ +> 12 +> rows: 1 + +INSERT INTO TEST VALUES(NEXT VALUE FOR TEST_SEQ); +> update count: 1 + +SELECT * FROM TEST; +> ID +> -- +> 10 +> 11 +> 13 +> rows: 3 + +SELECT TOP 2 * FROM TEST; +> ID +> -- +> 10 +> 11 +> rows: 2 + +SELECT TOP 2 * FROM TEST ORDER BY ID DESC; +> ID +> -- +> 13 +> 11 +> rows (ordered): 2 + +ALTER SEQUENCE TEST_SEQ RESTART WITH 20 INCREMENT BY -1; +> ok + +INSERT INTO TEST VALUES(NEXT VALUE FOR TEST_SEQ); +> update count: 1 + +INSERT INTO TEST VALUES(NEXT VALUE FOR TEST_SEQ); +> update count: 1 + +SELECT * FROM TEST ORDER BY ID ASC; +> ID +> -- +> 10 +> 11 +> 13 +> 19 +> 20 +> rows (ordered): 5 + +CALL NEXTVAL('test_seq'); +> NEXTVAL('test_seq') +> ------------------- +> 18 +> rows: 1 + +DROP SEQUENCE IF EXISTS TEST_SEQ; +> ok + +DROP SEQUENCE IF EXISTS TEST_SEQ; +> ok + +CREATE SEQUENCE TEST_LONG START WITH 90123456789012345 MAXVALUE 90123456789012345 INCREMENT BY -1; +> ok + +SET AUTOCOMMIT FALSE; +> ok + +CALL NEXT VALUE FOR TEST_LONG; +> NEXT VALUE FOR PUBLIC.TEST_LONG +> ------------------------------- +> 90123456789012345 +> rows: 1 + +SELECT SEQUENCE_NAME, BASE_VALUE, INCREMENT FROM INFORMATION_SCHEMA.SEQUENCES; +> SEQUENCE_NAME BASE_VALUE INCREMENT +> ------------- ----------------- --------- +> TEST_LONG 90123456789012344 -1 +> rows: 1 + +SET AUTOCOMMIT TRUE; +> ok + +DROP SEQUENCE TEST_LONG; +> ok + +DROP TABLE TEST; +> ok + +--- call ---------------------------------------------------------------------------------------------- +CALL PI(); +> 3.141592653589793 +> ----------------- +> 3.141592653589793 +> rows: 1 + +CALL 1+1; +> 2 +> - +> 2 +> rows: 1 + +--- constraints ---------------------------------------------------------------------------------------------- +CREATE TABLE PARENT(A INT, B INT, PRIMARY KEY(A, B)); +> ok + +CREATE TABLE CHILD(ID INT PRIMARY KEY, PA INT, PB INT, CONSTRAINT AB FOREIGN KEY(PA, PB) REFERENCES PARENT(A, B)); +> ok + +TABLE INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME UNIQUE_CONSTRAINT_CATALOG UNIQUE_CONSTRAINT_SCHEMA UNIQUE_CONSTRAINT_NAME MATCH_OPTION UPDATE_RULE DELETE_RULE +> ------------------ ----------------- --------------- ------------------------- ------------------------ ---------------------- ------------ ----------- ----------- +> SCRIPT PUBLIC AB SCRIPT PUBLIC CONSTRAINT_8 NONE RESTRICT RESTRICT +> rows: 1 + +TABLE INFORMATION_SCHEMA.KEY_COLUMN_USAGE; +> CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT +> ------------------ ----------------- --------------- ------------- ------------ ---------- ----------- ---------------- ----------------------------- +> SCRIPT PUBLIC AB SCRIPT PUBLIC CHILD PA 1 1 +> SCRIPT PUBLIC AB SCRIPT PUBLIC CHILD PB 2 2 +> SCRIPT PUBLIC CONSTRAINT_3 SCRIPT PUBLIC CHILD ID 1 null +> SCRIPT PUBLIC CONSTRAINT_8 SCRIPT PUBLIC PARENT A 1 null +> SCRIPT PUBLIC CONSTRAINT_8 SCRIPT PUBLIC PARENT B 2 null +> rows: 5 + +DROP TABLE PARENT, CHILD; +> ok + +drop table if exists test; +> ok + +create table test(id int primary key, parent int unique, foreign key(id) references test(parent)); +> ok + +insert into test values(1, 1); +> update count: 1 + +delete from test; +> update count: 1 + +drop table test; +> ok + +drop table if exists child; +> ok + +drop table if exists parent; +> ok + +create table child(a int, id int); +> ok + +create table parent(id int primary key); +> ok + +alter table child add foreign key(id) references parent; +> ok + +insert into parent values(1); +> update count: 1 + +delete from parent; +> update count: 1 + +drop table if exists child; +> ok + +drop table if exists parent; +> ok + +CREATE MEMORY TABLE PARENT(ID INT PRIMARY KEY); +> ok + +CREATE MEMORY TABLE CHILD(ID INT, PARENT_ID INT, FOREIGN KEY(PARENT_ID) REFERENCES PARENT); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ---------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."PARENT"( "ID" INTEGER NOT NULL ); +> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_8" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.PARENT; +> CREATE MEMORY TABLE "PUBLIC"."CHILD"( "ID" INTEGER, "PARENT_ID" INTEGER ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.CHILD; +> ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_3" FOREIGN KEY("PARENT_ID") REFERENCES "PUBLIC"."PARENT"("ID") NOCHECK; +> rows (ordered): 7 + +DROP TABLE PARENT, CHILD; +> ok + +CREATE TABLE TEST(ID INT, CONSTRAINT PK PRIMARY KEY(ID), NAME VARCHAR, PARENT INT, CONSTRAINT P FOREIGN KEY(PARENT) REFERENCES(ID)); +> ok + +ALTER TABLE TEST DROP PRIMARY KEY; +> exception INDEX_BELONGS_TO_CONSTRAINT_2 + +ALTER TABLE TEST DROP CONSTRAINT PK; +> exception CONSTRAINT_IS_USED_BY_CONSTRAINT_2 + +INSERT INTO TEST VALUES(1, 'Frank', 1); +> update count: 1 + +INSERT INTO TEST VALUES(2, 'Sue', 1); +> update count: 1 + +INSERT INTO TEST VALUES(3, 'Karin', 2); +> update count: 1 + +INSERT INTO TEST VALUES(4, 'Joe', 5); +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +INSERT INTO TEST VALUES(4, 'Joe', 3); +> update count: 1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE TEST(A_INT INT NOT NULL, B_INT INT NOT NULL, PRIMARY KEY(A_INT, B_INT), CONSTRAINT U_B UNIQUE(B_INT)); +> ok + +ALTER TABLE TEST ADD CONSTRAINT A_UNIQUE UNIQUE(A_INT); +> ok + +ALTER TABLE TEST DROP PRIMARY KEY; +> ok + +ALTER TABLE TEST DROP PRIMARY KEY; +> exception INDEX_NOT_FOUND_1 + +ALTER TABLE TEST DROP CONSTRAINT A_UNIQUE; +> ok + +ALTER TABLE TEST ADD CONSTRAINT C1 FOREIGN KEY(A_INT) REFERENCES TEST(B_INT); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> -------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A_INT" INTEGER NOT NULL, "B_INT" INTEGER NOT NULL ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."U_B" UNIQUE("B_INT"); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."C1" FOREIGN KEY("A_INT") REFERENCES "PUBLIC"."TEST"("B_INT") NOCHECK; +> rows (ordered): 5 + +ALTER TABLE TEST DROP CONSTRAINT C1; +> ok + +ALTER TABLE TEST DROP CONSTRAINT C1; +> exception CONSTRAINT_NOT_FOUND_1 + +DROP TABLE TEST; +> ok + +CREATE MEMORY TABLE A_TEST(A_INT INT NOT NULL, A_VARCHAR VARCHAR(255) DEFAULT 'x', A_DATE DATE, A_DECIMAL DECIMAL(10,2)); +> ok + +ALTER TABLE A_TEST ADD PRIMARY KEY(A_INT); +> ok + +ALTER TABLE A_TEST ADD CONSTRAINT MIN_LENGTH CHECK LENGTH(A_VARCHAR)>1; +> ok + +ALTER TABLE A_TEST ADD CONSTRAINT DATE_UNIQUE UNIQUE(A_DATE); +> ok + +ALTER TABLE A_TEST ADD CONSTRAINT DATE_UNIQUE_2 UNIQUE(A_DATE); +> ok + +INSERT INTO A_TEST VALUES(NULL, NULL, NULL, NULL); +> exception NULL_NOT_ALLOWED + +INSERT INTO A_TEST VALUES(1, 'A', NULL, NULL); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +INSERT INTO A_TEST VALUES(1, 'AB', NULL, NULL); +> update count: 1 + +INSERT INTO A_TEST VALUES(1, 'AB', NULL, NULL); +> exception DUPLICATE_KEY_1 + +INSERT INTO A_TEST VALUES(2, 'AB', NULL, NULL); +> update count: 1 + +INSERT INTO A_TEST VALUES(3, 'AB', '2004-01-01', NULL); +> update count: 1 + +INSERT INTO A_TEST VALUES(4, 'AB', '2004-01-01', NULL); +> exception DUPLICATE_KEY_1 + +INSERT INTO A_TEST VALUES(5, 'ABC', '2004-01-02', NULL); +> update count: 1 + +CREATE MEMORY TABLE B_TEST(B_INT INT DEFAULT -1 NOT NULL , B_VARCHAR VARCHAR(255) DEFAULT NULL NULL, CONSTRAINT B_UNIQUE UNIQUE(B_INT)); +> ok + +ALTER TABLE B_TEST ADD CHECK LENGTH(B_VARCHAR)>1; +> ok + +ALTER TABLE B_TEST ADD CONSTRAINT C1 FOREIGN KEY(B_INT) REFERENCES A_TEST(A_INT) ON DELETE CASCADE ON UPDATE CASCADE; +> ok + +ALTER TABLE B_TEST ADD PRIMARY KEY(B_INT); +> ok + +INSERT INTO B_TEST VALUES(10, 'X'); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +INSERT INTO B_TEST VALUES(1, 'X'); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +INSERT INTO B_TEST VALUES(1, 'XX'); +> update count: 1 + +SELECT * FROM B_TEST; +> B_INT B_VARCHAR +> ----- --------- +> 1 XX +> rows: 1 + +UPDATE A_TEST SET A_INT = A_INT*10; +> update count: 4 + +SELECT * FROM B_TEST; +> B_INT B_VARCHAR +> ----- --------- +> 10 XX +> rows: 1 + +ALTER TABLE B_TEST DROP CONSTRAINT C1; +> ok + +ALTER TABLE B_TEST ADD CONSTRAINT C2 FOREIGN KEY(B_INT) REFERENCES A_TEST(A_INT) ON DELETE SET NULL ON UPDATE SET NULL; +> ok + +UPDATE A_TEST SET A_INT = A_INT*10; +> exception NULL_NOT_ALLOWED + +SELECT * FROM B_TEST; +> B_INT B_VARCHAR +> ----- --------- +> 10 XX +> rows: 1 + +ALTER TABLE B_TEST DROP CONSTRAINT C2; +> ok + +UPDATE B_TEST SET B_INT = 20; +> update count: 1 + +SELECT A_INT FROM A_TEST; +> A_INT +> ----- +> 10 +> 20 +> 30 +> 50 +> rows: 4 + +ALTER TABLE B_TEST ADD CONSTRAINT C3 FOREIGN KEY(B_INT) REFERENCES A_TEST(A_INT) ON DELETE SET DEFAULT ON UPDATE SET DEFAULT; +> ok + +UPDATE A_TEST SET A_INT = A_INT*10; +> update count: 4 + +SELECT * FROM B_TEST; +> B_INT B_VARCHAR +> ----- --------- +> -1 XX +> rows: 1 + +DELETE FROM A_TEST; +> update count: 4 + +SELECT * FROM B_TEST; +> B_INT B_VARCHAR +> ----- --------- +> -1 XX +> rows: 1 + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."A_TEST"( "A_INT" INTEGER NOT NULL, "A_VARCHAR" CHARACTER VARYING(255) DEFAULT 'x', "A_DATE" DATE, "A_DECIMAL" DECIMAL(10, 2) ); +> ALTER TABLE "PUBLIC"."A_TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_7" PRIMARY KEY("A_INT"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.A_TEST; +> CREATE MEMORY TABLE "PUBLIC"."B_TEST"( "B_INT" INTEGER DEFAULT -1 NOT NULL, "B_VARCHAR" CHARACTER VARYING(255) DEFAULT NULL ); +> ALTER TABLE "PUBLIC"."B_TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_760" PRIMARY KEY("B_INT"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.B_TEST; +> INSERT INTO "PUBLIC"."B_TEST" VALUES (-1, 'XX'); +> ALTER TABLE "PUBLIC"."A_TEST" ADD CONSTRAINT "PUBLIC"."MIN_LENGTH" CHECK(CHAR_LENGTH("A_VARCHAR") > 1) NOCHECK; +> ALTER TABLE "PUBLIC"."B_TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_76" CHECK(CHAR_LENGTH("B_VARCHAR") > 1) NOCHECK; +> ALTER TABLE "PUBLIC"."A_TEST" ADD CONSTRAINT "PUBLIC"."DATE_UNIQUE" UNIQUE("A_DATE"); +> ALTER TABLE "PUBLIC"."A_TEST" ADD CONSTRAINT "PUBLIC"."DATE_UNIQUE_2" UNIQUE("A_DATE"); +> ALTER TABLE "PUBLIC"."B_TEST" ADD CONSTRAINT "PUBLIC"."B_UNIQUE" UNIQUE("B_INT"); +> ALTER TABLE "PUBLIC"."B_TEST" ADD CONSTRAINT "PUBLIC"."C3" FOREIGN KEY("B_INT") REFERENCES "PUBLIC"."A_TEST"("A_INT") ON DELETE SET DEFAULT ON UPDATE SET DEFAULT NOCHECK; +> rows (ordered): 14 + +DROP TABLE A_TEST, B_TEST; +> ok + +CREATE MEMORY TABLE FAMILY(ID INT PRIMARY KEY, NAME VARCHAR(20)); +> ok + +CREATE INDEX FAMILY_ID_NAME ON FAMILY(ID, NAME); +> ok + +CREATE MEMORY TABLE PARENT(ID INT, FAMILY_ID INT, NAME VARCHAR(20), UNIQUE(ID, FAMILY_ID)); +> ok + +ALTER TABLE PARENT ADD CONSTRAINT PARENT_FAMILY FOREIGN KEY(FAMILY_ID) +REFERENCES FAMILY(ID); +> ok + +CREATE MEMORY TABLE CHILD( +ID INT, +PARENTID INT, +FAMILY_ID INT, +UNIQUE(ID, PARENTID), +CONSTRAINT PARENT_CHILD FOREIGN KEY(PARENTID, FAMILY_ID) +REFERENCES PARENT(ID, FAMILY_ID) +ON UPDATE CASCADE +ON DELETE SET NULL, +NAME VARCHAR(20)); +> ok + +INSERT INTO FAMILY VALUES(1, 'Capone'); +> update count: 1 + +INSERT INTO CHILD VALUES(100, 1, 1, 'early'); +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +INSERT INTO PARENT VALUES(1, 1, 'Sue'); +> update count: 1 + +INSERT INTO PARENT VALUES(2, 1, 'Joe'); +> update count: 1 + +INSERT INTO CHILD VALUES(100, 1, 1, 'Simon'); +> update count: 1 + +INSERT INTO CHILD VALUES(101, 1, 1, 'Sabine'); +> update count: 1 + +INSERT INTO CHILD VALUES(200, 2, 1, 'Jim'); +> update count: 1 + +INSERT INTO CHILD VALUES(201, 2, 1, 'Johann'); +> update count: 1 + +UPDATE PARENT SET ID=3 WHERE ID=1; +> update count: 1 + +SELECT * FROM CHILD; +> ID PARENTID FAMILY_ID NAME +> --- -------- --------- ------ +> 100 3 1 Simon +> 101 3 1 Sabine +> 200 2 1 Jim +> 201 2 1 Johann +> rows: 4 + +UPDATE CHILD SET PARENTID=-1 WHERE PARENTID IS NOT NULL; +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +DELETE FROM PARENT WHERE ID=2; +> update count: 1 + +SELECT * FROM CHILD; +> ID PARENTID FAMILY_ID NAME +> --- -------- --------- ------ +> 100 3 1 Simon +> 101 3 1 Sabine +> 200 null null Jim +> 201 null null Johann +> rows: 4 + +SCRIPT SIMPLE NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."FAMILY"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(20) ); +> ALTER TABLE "PUBLIC"."FAMILY" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_7" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.FAMILY; +> INSERT INTO "PUBLIC"."FAMILY" VALUES(1, 'Capone'); +> CREATE INDEX "PUBLIC"."FAMILY_ID_NAME" ON "PUBLIC"."FAMILY"("ID" NULLS FIRST, "NAME" NULLS FIRST); +> CREATE MEMORY TABLE "PUBLIC"."PARENT"( "ID" INTEGER, "FAMILY_ID" INTEGER, "NAME" CHARACTER VARYING(20) ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.PARENT; +> INSERT INTO "PUBLIC"."PARENT" VALUES(3, 1, 'Sue'); +> CREATE MEMORY TABLE "PUBLIC"."CHILD"( "ID" INTEGER, "PARENTID" INTEGER, "FAMILY_ID" INTEGER, "NAME" CHARACTER VARYING(20) ); +> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.CHILD; +> INSERT INTO "PUBLIC"."CHILD" VALUES(100, 3, 1, 'Simon'); +> INSERT INTO "PUBLIC"."CHILD" VALUES(101, 3, 1, 'Sabine'); +> INSERT INTO "PUBLIC"."CHILD" VALUES(200, NULL, NULL, 'Jim'); +> INSERT INTO "PUBLIC"."CHILD" VALUES(201, NULL, NULL, 'Johann'); +> ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_3" UNIQUE("ID", "PARENTID"); +> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_8" UNIQUE("ID", "FAMILY_ID"); +> ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."PARENT_CHILD" FOREIGN KEY("PARENTID", "FAMILY_ID") REFERENCES "PUBLIC"."PARENT"("ID", "FAMILY_ID") ON DELETE SET NULL ON UPDATE CASCADE NOCHECK; +> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."PARENT_FAMILY" FOREIGN KEY("FAMILY_ID") REFERENCES "PUBLIC"."FAMILY"("ID") NOCHECK; +> rows (ordered): 19 + +ALTER TABLE CHILD DROP CONSTRAINT PARENT_CHILD; +> ok + +SCRIPT SIMPLE NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------------------------------------------------------------------------------ +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."FAMILY"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(20) ); +> ALTER TABLE "PUBLIC"."FAMILY" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_7" PRIMARY KEY("ID"); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.FAMILY; +> INSERT INTO "PUBLIC"."FAMILY" VALUES(1, 'Capone'); +> CREATE INDEX "PUBLIC"."FAMILY_ID_NAME" ON "PUBLIC"."FAMILY"("ID" NULLS FIRST, "NAME" NULLS FIRST); +> CREATE MEMORY TABLE "PUBLIC"."PARENT"( "ID" INTEGER, "FAMILY_ID" INTEGER, "NAME" CHARACTER VARYING(20) ); +> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.PARENT; +> INSERT INTO "PUBLIC"."PARENT" VALUES(3, 1, 'Sue'); +> CREATE MEMORY TABLE "PUBLIC"."CHILD"( "ID" INTEGER, "PARENTID" INTEGER, "FAMILY_ID" INTEGER, "NAME" CHARACTER VARYING(20) ); +> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.CHILD; +> INSERT INTO "PUBLIC"."CHILD" VALUES(100, 3, 1, 'Simon'); +> INSERT INTO "PUBLIC"."CHILD" VALUES(101, 3, 1, 'Sabine'); +> INSERT INTO "PUBLIC"."CHILD" VALUES(200, NULL, NULL, 'Jim'); +> INSERT INTO "PUBLIC"."CHILD" VALUES(201, NULL, NULL, 'Johann'); +> ALTER TABLE "PUBLIC"."CHILD" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_3" UNIQUE("ID", "PARENTID"); +> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_8" UNIQUE("ID", "FAMILY_ID"); +> ALTER TABLE "PUBLIC"."PARENT" ADD CONSTRAINT "PUBLIC"."PARENT_FAMILY" FOREIGN KEY("FAMILY_ID") REFERENCES "PUBLIC"."FAMILY"("ID") NOCHECK; +> rows (ordered): 18 + +DELETE FROM PARENT; +> update count: 1 + +SELECT * FROM CHILD; +> ID PARENTID FAMILY_ID NAME +> --- -------- --------- ------ +> 100 3 1 Simon +> 101 3 1 Sabine +> 200 null null Jim +> 201 null null Johann +> rows: 4 + +DROP TABLE PARENT; +> ok + +DROP TABLE CHILD; +> ok + +DROP TABLE FAMILY; +> ok + +CREATE TABLE INVOICE(CUSTOMER_ID INT, ID INT, TOTAL_AMOUNT DECIMAL(10,2), PRIMARY KEY(CUSTOMER_ID, ID)); +> ok + +CREATE TABLE INVOICE_LINE(CUSTOMER_ID INT, INVOICE_ID INT, LINE_ID INT, TEXT VARCHAR, AMOUNT DECIMAL(10,2)); +> ok + +CREATE INDEX ON INVOICE_LINE(CUSTOMER_ID); +> ok + +ALTER TABLE INVOICE_LINE ADD FOREIGN KEY(CUSTOMER_ID, INVOICE_ID) REFERENCES INVOICE(CUSTOMER_ID, ID) ON DELETE CASCADE; +> ok + +INSERT INTO INVOICE VALUES(1, 100, NULL), (1, 101, NULL); +> update count: 2 + +INSERT INTO INVOICE_LINE VALUES(1, 100, 10, 'Apples', 20.35), (1, 100, 20, 'Paper', 10.05), (1, 101, 10, 'Pencil', 1.10), (1, 101, 20, 'Chair', 540.40); +> update count: 4 + +INSERT INTO INVOICE_LINE VALUES(1, 102, 20, 'Nothing', 30.00); +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +DELETE FROM INVOICE WHERE ID = 100; +> update count: 1 + +SELECT * FROM INVOICE_LINE; +> CUSTOMER_ID INVOICE_ID LINE_ID TEXT AMOUNT +> ----------- ---------- ------- ------ ------ +> 1 101 10 Pencil 1.10 +> 1 101 20 Chair 540.40 +> rows: 2 + +DROP TABLE INVOICE, INVOICE_LINE; +> ok + +CREATE MEMORY TABLE TEST(A INT PRIMARY KEY, B INT, FOREIGN KEY (B) REFERENCES(A) ON UPDATE RESTRICT ON DELETE NO ACTION); +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> ----------------------------------------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "A" INTEGER NOT NULL, "B" INTEGER ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("A"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_27" FOREIGN KEY("B") REFERENCES "PUBLIC"."TEST"("A") NOCHECK; +> rows (ordered): 5 + +DROP TABLE TEST; +> ok + +--- users ---------------------------------------------------------------------------------------------- +CREATE USER TEST PASSWORD 'abc'; +> ok + +CREATE USER TEST_ADMIN_X PASSWORD 'def' ADMIN; +> ok + +ALTER USER TEST_ADMIN_X RENAME TO TEST_ADMIN; +> ok + +ALTER USER TEST_ADMIN ADMIN TRUE; +> ok + +CREATE USER TEST2 PASSWORD '123' ADMIN; +> ok + +ALTER USER TEST2 SET PASSWORD 'abc'; +> ok + +ALTER USER TEST2 ADMIN FALSE; +> ok + +CREATE MEMORY TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +CREATE MEMORY TABLE TEST2_X(ID INT); +> ok + +CREATE INDEX IDX_ID ON TEST2_X(ID); +> ok + +ALTER TABLE TEST2_X RENAME TO TEST2; +> ok + +ALTER INDEX IDX_ID RENAME TO IDX_ID2; +> ok + +SCRIPT NOPASSWORDS NOSETTINGS NOVERSION; +> SCRIPT +> -------------------------------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" PASSWORD '' ADMIN; +> CREATE USER IF NOT EXISTS "TEST_ADMIN" PASSWORD '' ADMIN; +> CREATE USER IF NOT EXISTS "TEST" PASSWORD ''; +> CREATE USER IF NOT EXISTS "TEST2" PASSWORD ''; +> CREATE MEMORY TABLE "PUBLIC"."TEST"( "ID" INTEGER NOT NULL, "NAME" CHARACTER VARYING(255) ); +> ALTER TABLE "PUBLIC"."TEST" ADD CONSTRAINT "PUBLIC"."CONSTRAINT_2" PRIMARY KEY("ID"); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; +> CREATE MEMORY TABLE "PUBLIC"."TEST2"( "ID" INTEGER ); +> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST2; +> CREATE INDEX "PUBLIC"."IDX_ID2" ON "PUBLIC"."TEST2"("ID" NULLS FIRST); +> rows (ordered): 10 + +SELECT USER_NAME, IS_ADMIN FROM INFORMATION_SCHEMA.USERS; +> USER_NAME IS_ADMIN +> ---------- -------- +> SA TRUE +> TEST FALSE +> TEST2 FALSE +> TEST_ADMIN TRUE +> rows: 4 + +DROP TABLE TEST2; +> ok + +DROP TABLE TEST; +> ok + +DROP USER TEST; +> ok + +DROP USER IF EXISTS TEST; +> ok + +DROP USER IF EXISTS TEST2; +> ok + +DROP USER TEST_ADMIN; +> ok + +SET AUTOCOMMIT FALSE; +> ok + +SET SALT '' HASH ''; +> ok + +CREATE USER SECURE SALT '001122' HASH '1122334455'; +> ok + +ALTER USER SECURE SET SALT '112233' HASH '2233445566'; +> ok + +SCRIPT NOSETTINGS NOVERSION; +> SCRIPT +> ------------------------------------------------------------------- +> CREATE USER IF NOT EXISTS "SA" SALT '' HASH '' ADMIN; +> CREATE USER IF NOT EXISTS "SECURE" SALT '112233' HASH '2233445566'; +> rows (ordered): 2 + +SET PASSWORD '123'; +> ok + +SET AUTOCOMMIT TRUE; +> ok + +DROP USER SECURE; +> ok + +--- test cases --------------------------------------------------------------------------------------------- +create table test(id int, name varchar); +> ok + +insert into test values(5, 'b'), (5, 'b'), (20, 'a'); +> update count: 3 + +drop table test; +> ok + +select 0 from (( +select 0 as f from dual u1 where null in (?, ?, ?, ?, ?) +) union all ( +select u2.f from ( +select 0 as f from ( +select 0 from dual u2f1f1 where now() = ? +) u2f1 +) u2 +)) where f = 12345; +{ +11, 22, 33, 44, 55, null +> 0 +> - +> rows: 0 +}; +> update count: 0 + +create table x(id int not null); +> ok + +alter table if exists y add column a varchar; +> ok + +alter table if exists x add column a varchar; +> ok + +alter table if exists x add column a varchar; +> exception DUPLICATE_COLUMN_NAME_1 + +alter table if exists y alter column a rename to b; +> ok + +alter table if exists x alter column a rename to b; +> ok + +alter table if exists x alter column a rename to b; +> exception COLUMN_NOT_FOUND_1 + +alter table if exists y alter column b set default 'a'; +> ok + +alter table if exists x alter column b set default 'a'; +> ok + +insert into x(id) values(1); +> update count: 1 + +select b from x; +>> a + +delete from x; +> update count: 1 + +alter table if exists y alter column b drop default; +> ok + +alter table if exists x alter column b drop default; +> ok + +alter table if exists y alter column b set not null; +> ok + +alter table if exists x alter column b set not null; +> ok + +insert into x(id) values(1); +> exception NULL_NOT_ALLOWED + +alter table if exists y alter column b drop not null; +> ok + +alter table if exists x alter column b drop not null; +> ok + +insert into x(id) values(1); +> update count: 1 + +select b from x; +>> null + +delete from x; +> update count: 1 + +alter table if exists y add constraint x_pk primary key (id); +> ok + +alter table if exists x add constraint x_pk primary key (id); +> ok + +alter table if exists x add constraint x_pk primary key (id); +> exception CONSTRAINT_ALREADY_EXISTS_1 + +insert into x(id) values(1); +> update count: 1 + +insert into x(id) values(1); +> exception DUPLICATE_KEY_1 + +delete from x; +> update count: 1 + +alter table if exists y add constraint x_check check (b = 'a'); +> ok + +alter table if exists x add constraint x_check check (b = 'a'); +> ok + +alter table if exists x add constraint x_check check (b = 'a'); +> exception CONSTRAINT_ALREADY_EXISTS_1 + +insert into x(id, b) values(1, 'b'); +> exception CHECK_CONSTRAINT_VIOLATED_1 + +alter table if exists y rename constraint x_check to x_check1; +> ok + +alter table if exists x rename constraint x_check to x_check1; +> ok + +alter table if exists x rename constraint x_check to x_check1; +> exception CONSTRAINT_NOT_FOUND_1 + +alter table if exists y drop constraint x_check1; +> ok + +alter table if exists x drop constraint x_check1; +> ok + +alter table if exists y rename to z; +> ok + +alter table if exists x rename to z; +> ok + +alter table if exists x rename to z; +> ok + +insert into z(id, b) values(1, 'b'); +> update count: 1 + +delete from z; +> update count: 1 + +alter table if exists y add constraint z_uk unique (b); +> ok + +alter table if exists z add constraint z_uk unique (b); +> ok + +alter table if exists z add constraint z_uk unique (b); +> exception CONSTRAINT_ALREADY_EXISTS_1 + +insert into z(id, b) values(1, 'b'); +> update count: 1 + +insert into z(id, b) values(1, 'b'); +> exception DUPLICATE_KEY_1 + +delete from z; +> update count: 1 + +alter table if exists y drop column b; +> ok + +alter table if exists z drop column b; +> ok + +alter table if exists z drop column b; +> exception COLUMN_NOT_FOUND_1 + +alter table if exists y drop primary key; +> ok + +alter table if exists z drop primary key; +> ok + +alter table if exists z drop primary key; +> exception INDEX_NOT_FOUND_1 + +create table x (id int not null primary key); +> ok + +alter table if exists y add constraint z_fk foreign key (id) references x (id); +> ok + +alter table if exists z add constraint z_fk foreign key (id) references x (id); +> ok + +alter table if exists z add constraint z_fk foreign key (id) references x (id); +> exception CONSTRAINT_ALREADY_EXISTS_1 + +insert into z (id) values (1); +> exception REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1 + +SET MODE MySQL; +> ok + +alter table if exists y drop foreign key z_fk; +> ok + +alter table if exists z drop foreign key z_fk; +> ok + +alter table if exists z drop foreign key z_fk; +> exception CONSTRAINT_NOT_FOUND_1 + +SET MODE Regular; +> ok + +insert into z (id) values (1); +> update count: 1 + +delete from z; +> update count: 1 + +drop table x; +> ok + +drop table z; +> ok + +create schema x; +> ok + +alter schema if exists y rename to z; +> ok + +alter schema if exists x rename to z; +> ok + +alter schema if exists x rename to z; +> ok + +create table z.z (id int); +> ok + +drop schema z cascade; +> ok + +----- Issue#493 ----- +create table test ("YEAR" int, action varchar(10)); +> ok + +insert into test values (2015, 'order'), (2016, 'order'), (2014, 'order'); +> update count: 3 + +insert into test values (2014, 'execution'), (2015, 'execution'), (2016, 'execution'); +> update count: 3 + +select * from test where "YEAR" in (select distinct "YEAR" from test order by "YEAR" desc limit 1 offset 0); +> YEAR ACTION +> ---- --------- +> 2016 execution +> 2016 order +> rows: 2 + +drop table test; +> ok diff --git a/h2/src/test/org/h2/test/scripts/testSimple.sql b/h2/src/test/org/h2/test/scripts/testSimple.sql new file mode 100644 index 0000000000..ae5fc89bbc --- /dev/null +++ b/h2/src/test/org/h2/test/scripts/testSimple.sql @@ -0,0 +1,1259 @@ +-- Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +-- and the EPL 1.0 (https://h2database.com/html/license.html). +-- Initial Developer: H2 Group +-- +select 1000L / 10; +>> 100 + +select * from (select 1 as y from dual order by y); +>> 1 + +select 1 from(select 2 from(select 1) a right join dual b) c; +>> 1 + +select 1.00 / 3 * 0.00; +>> 0.000000000000000000000000 + +select 1.00000 / 3 * 0.0000; +>> 0.00000000000000000000000000000 + +select 1.0000000 / 3 * 0.00000; +>> 0.00000000000000000000000000000000 + +select 1.0000000 / 3 * 0.000000; +>> 0.000000000000000000000000000000000 + +create table test(id null); +> ok + +drop table test; +> ok + +select * from (select group_concat(distinct 1) from system_range(1, 3)); +>> 1 + +select sum(mod(x, 2) = 1) from system_range(1, 10); +>> 5 + +create table a(x int); +> ok + +create table b(x int); +> ok + +select count(*) from (select b.x from a left join b); +>> 0 + +drop table a, b; +> ok + +select count(distinct now()) c from system_range(1, 100), system_range(1, 1000); +>> 1 + +select {fn TIMESTAMPADD(SQL_TSI_DAY, 1, {ts '2011-10-20 20:30:40.001'})}; +>> 2011-10-21 20:30:40.001 + +select {fn TIMESTAMPADD(SQL_TSI_SECOND, 1, cast('2011-10-20 20:30:40.001' as timestamp))}; +>> 2011-10-20 20:30:41.001 + +select N'test'; +>> test + +select E'test\\test'; +>> test\test + +create table a(id int unique) as select null; +> ok + +create table b(id int references a(id)) as select null; +> ok + +delete from a; +> update count: 1 + +drop table a, b; +> ok + +create cached temp table test(id identity) not persistent; +> ok + +drop table test; +> ok + +create table test(id int); +> ok + +alter table test alter column id set default 'x'; +> ok + +select column_default from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; +>> 'x' + +alter table test alter column id set not null; +> ok + +select is_nullable from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; +>> NO + +alter table test alter column id set data type varchar; +> ok + +select data_type from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; +>> CHARACTER VARYING + +alter table test alter column id type int; +> ok + +select data_type from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; +>> INTEGER + +alter table test alter column id drop default; +> ok + +select column_default from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; +>> null + +alter table test alter column id drop not null; +> ok + +select is_nullable from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; +>> YES + +drop table test; +> ok + +select x from (select *, rownum as r from system_range(1, 3)) where r=2; +>> 2 + +create table test(name varchar(255)) as select 'Hello+World+'; +> ok + +select count(*) from test where name like 'Hello++World++' escape '+'; +>> 1 + +select count(*) from test where name like '+H+e+l+l+o++World++' escape '+'; +>> 1 + +select count(*) from test where name like 'Hello+World++' escape '+'; +>> 0 + +select count(*) from test where name like 'Hello++World+' escape '+'; +>> 0 + +drop table test; +> ok + +select count(*) from system_range(1, 1); +>> 1 + +select count(*) from system_range(1, -1); +>> 0 + +select 1 from dual where '\' like '\' escape ''; +>> 1 + +select left(timestamp '2001-02-03 08:20:31+04', 4); +>> 2001 + +create table t1$2(id int); +> ok + +drop table t1$2; +> ok + +create table test(id int primary key) as select x from system_range(1, 200); +> ok + +delete from test; +> update count: 200 + +insert into test(id) values(1); +> update count: 1 + +select * from test order by id; +>> 1 + +drop table test; +> ok + +create memory table test(id int) not persistent as select 1 from dual; +> ok + +insert into test values(1); +> update count: 1 + +select count(1) from test; +>> 2 + +@reconnect + +select count(1) from test; +>> 0 + +drop table test; +> ok + +create table test(t clob) as select 1; +> ok + +select distinct t from test; +>> 1 + +drop table test; +> ok + +create table test(id int unique not null); +> ok + +drop table test; +> ok + +create table test(id int not null unique); +> ok + +drop table test; +> ok + +select count(*)from((select 1 from dual limit 1)union(select 2 from dual limit 1)); +>> 2 + +select datediff(yyyy, now(), now()); +>> 0 + +create table t(d date) as select '2008-11-01' union select '2008-11-02'; +> ok + +select 1 from t group by year(d) order by year(d); +>> 1 + +drop table t; +> ok + +create table t(d int) as select 2001 union select 2002; +> ok + +select 1 from t group by d/10 order by d/10; +>> 1 + +drop table t; +> ok + +create schema test; +> ok + +create sequence test.report_id_seq; +> ok + +select nextval('"test".REPORT_ID_SEQ'); +>> 1 + +select nextval('"test"."report_id_seq"'); +>> 2 + +select nextval('test.report_id_seq'); +>> 3 + +drop schema test cascade; +> ok + +create table master(id int primary key); +> ok + +create table detail(id int primary key, x bigint, foreign key(x) references master(id) on delete cascade); +> ok + +alter table detail alter column x bigint; +> ok + +insert into master values(0); +> update count: 1 + +insert into detail values(0,0); +> update count: 1 + +delete from master; +> update count: 1 + +drop table master, detail; +> ok + +drop all objects; +> ok + +create table test(id int primary key, parent int references test(id) on delete cascade); +> ok + +insert into test values(0, 0); +> update count: 1 + +alter table test rename to test2; +> ok + +delete from test2; +> update count: 1 + +drop table test2; +> ok + +create view test_view(id) as select * from dual; +> ok + +drop view test_view; +> ok + +SET MODE DB2; +> ok + +SELECT * FROM SYSTEM_RANGE(1, 100) OFFSET 99 ROWS; +>> 100 + +SELECT * FROM SYSTEM_RANGE(1, 100) OFFSET 50 ROWS FETCH FIRST 1 ROW ONLY; +>> 51 + +SELECT * FROM SYSTEM_RANGE(1, 100) FETCH FIRST 1 ROWS ONLY; +>> 1 + +SELECT * FROM SYSTEM_RANGE(1, 100) FETCH FIRST ROW ONLY; +>> 1 + +SET MODE REGULAR; +> ok + +create domain email as varchar comment 'e-mail'; +> ok + +create table test(e email); +> ok + +select remarks from INFORMATION_SCHEMA.COLUMNS where table_name='TEST'; +>> e-mail + +drop table test; +> ok + +drop domain email; +> ok + +create table test$test(id int); +> ok + +drop table test$test; +> ok + +create table test$$test(id int); +> ok + +drop table test$$test; +> ok + +create table test (id varchar(36) as random_uuid() primary key); +> ok + +insert into test() values(); +> update count: 1 + +delete from test where id = select id from test; +> update count: 1 + +drop table test; +> ok + +create table test (id varchar(36) as now() primary key); +> ok + +insert into test() values(); +> update count: 1 + +delete from test where id = select id from test; +> update count: 1 + +drop table test; +> ok + +SELECT SOME(X>4) FROM SYSTEM_RANGE(1,6); +>> TRUE + +SELECT EVERY(X>4) FROM SYSTEM_RANGE(1,6); +>> FALSE + +SELECT BOOL_OR(X>4) FROM SYSTEM_RANGE(1,6); +>> TRUE + +SELECT BOOL_AND(X>4) FROM SYSTEM_RANGE(1,6); +>> FALSE + +SELECT BIT_OR(X) FROM SYSTEM_RANGE(1,6); +>> 7 + +SELECT BIT_AND(X) FROM SYSTEM_RANGE(1,6); +>> 0 + +SELECT BIT_AND(X) FROM SYSTEM_RANGE(1,1); +>> 1 + +CREATE TABLE TEST(ID IDENTITY); +> ok + +ALTER TABLE TEST ALTER COLUMN ID RESTART WITH ?; +{ +10 +}; +> update count: 0 + +INSERT INTO TEST VALUES(DEFAULT); +> update count: 1 + +SELECT * FROM TEST; +>> 10 + +DROP TABLE TEST; +> ok + +CREATE SEQUENCE TEST_SEQ; +> ok + +ALTER SEQUENCE TEST_SEQ RESTART WITH ? INCREMENT BY ?; +{ +20, 3 +}; +> update count: 0 + +SELECT NEXT VALUE FOR TEST_SEQ; +>> 20 + +SELECT NEXT VALUE FOR TEST_SEQ; +>> 23 + +DROP SEQUENCE TEST_SEQ; +> ok + +create schema Contact; +> ok + +CREATE TABLE Account (id BIGINT PRIMARY KEY); +> ok + +CREATE TABLE Person (id BIGINT PRIMARY KEY, FOREIGN KEY (id) REFERENCES Account(id)); +> ok + +CREATE TABLE Contact.Contact (id BIGINT, FOREIGN KEY (id) REFERENCES public.Person(id)); +> ok + +drop schema contact cascade; +> ok + +drop table account, person; +> ok + +create schema Contact; +> ok + +CREATE TABLE Account (id BIGINT primary key); +> ok + +CREATE TABLE Person (id BIGINT primary key, FOREIGN KEY (id) REFERENCES Account); +> ok + +CREATE TABLE Contact.Contact (id BIGINT primary key, FOREIGN KEY (id) REFERENCES public.Person); +> ok + +drop schema contact cascade; +> ok + +drop table account, person; +> ok + +CREATE TABLE TEST(A int NOT NULL, B int NOT NULL, C int) ; +> ok + +ALTER TABLE TEST ADD CONSTRAINT CON UNIQUE(A,B); +> ok + +ALTER TABLE TEST DROP C; +> ok + +ALTER TABLE TEST DROP CONSTRAINT CON; +> ok + +ALTER TABLE TEST DROP B; +> ok + +DROP TABLE TEST; +> ok + +create table test(id int); +> ok + +select count(*) from (select * from ((select * from test) union (select * from test)) a) b where id = 0; +>> 0 + +select count(*) from (select * from ((select * from test) union select * from test) a) b where id = 0; +>> 0 + +select count(*) from (select * from (select * from test union select * from test) a) b where id = 0; +>> 0 + +select 1 from ((test d1 inner join test d2 on d1.id = d2.id) inner join test d3 on d1.id = d3.id) inner join test d4 on d4.id = d1.id; +> 1 +> - +> rows: 0 + +drop table test; +> ok + +select replace(lpad('string', 10), ' ', '*'); +>> ****string + +select instr('abcisj','s', -1) from dual; +>> 5 + +CREATE TABLE TEST(ID INT); +> ok + +INSERT INTO TEST VALUES(1), (2), (3); +> update count: 3 + +create index idx_desc on test(id desc); +> ok + +select * from test where id between 0 and 1; +>> 1 + +select * from test where id between 3 and 4; +>> 3 + +drop table test; +> ok + +CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); +> ok + +INSERT INTO TEST VALUES(1, 'Hello'), (2, 'HelloWorld'), (3, 'HelloWorldWorld'); +> update count: 3 + +SELECT COUNT(*) FROM TEST WHERE NAME REGEXP 'World'; +>> 2 + +SELECT NAME FROM TEST WHERE NAME REGEXP 'WorldW'; +>> HelloWorldWorld + +drop table test; +> ok + +create table test(id int); +> ok + +insert into script.public.test(id) values(1), (2); +> update count: 2 + +update test t set t.id=t.id+1; +> update count: 2 + +update public.test set public.test.id=1; +> update count: 2 + +select count(script.public.test.id) from script.public.test; +>> 2 + +update script.public.test set script.public.test.id=1; +> update count: 2 + +drop table script.public.test; +> ok + +select year(timestamp '2007-07-26T18:44:26.109000+02:00'); +>> 2007 + +create table test(id int primary key); +> ok + +begin; +> ok + +insert into test values(1); +> update count: 1 + +rollback; +> ok + +insert into test values(2); +> update count: 1 + +rollback; +> ok + +begin; +> ok + +insert into test values(3); +> update count: 1 + +commit; +> ok + +insert into test values(4); +> update count: 1 + +rollback; +> ok + +select group_concat(id order by id) from test; +>> 2,3,4 + +drop table test; +> ok + +create table test(); +> ok + +insert into test values(); +> update count: 1 + +ALTER TABLE TEST ADD ID INTEGER; +> ok + +select count(*) from test; +>> 1 + +drop table test; +> ok + +select * from dual where 'a_z' like '%=_%' escape '='; +> +> +> +> rows: 1 + +create table test as select 1 from dual union all select 2 from dual; +> ok + +drop table test; +> ok + +create table test_table(column_a integer); +> ok + +insert into test_table values(1); +> update count: 1 + +create view test_view AS SELECT * FROM (SELECT DISTINCT * FROM test_table) AS subquery; +> ok + +select * FROM test_view; +>> 1 + +drop view test_view; +> ok + +drop table test_table; +> ok + +CREATE TABLE TEST(ID INT); +> ok + +INSERT INTO TEST VALUES(1); +> update count: 1 + +CREATE VIEW TEST_VIEW AS SELECT COUNT(ID) X FROM TEST; +> ok + +explain SELECT * FROM TEST_VIEW WHERE X>1; +>> SELECT "PUBLIC"."TEST_VIEW"."X" FROM "PUBLIC"."TEST_VIEW" /* SELECT COUNT(ID) AS X FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ HAVING COUNT(ID) >= ?1: X > CAST(1 AS BIGINT) */ WHERE "X" > CAST(1 AS BIGINT) + +DROP VIEW TEST_VIEW; +> ok + +DROP TABLE TEST; +> ok + +create table test1(id int); +> ok + +insert into test1 values(1), (1), (2), (3); +> update count: 4 + +select sum(C0) from (select count(*) AS C0 from (select distinct * from test1) as temp); +>> 3 + +drop table test1; +> ok + +create table test(id int primary key check id>1); +> ok + +drop table test; +> ok + +create table table1(f1 int not null primary key); +> ok + +create table table2(f2 int not null references table1(f1) on delete cascade); +> ok + +drop table table2; +> ok + +drop table table1; +> ok + +create table table1(f1 int not null primary key); +> ok + +create table table2(f2 int not null primary key references table1(f1)); +> ok + +drop table table1, table2; +> ok + +create table test(id int); +> ok + +insert into test values(1); +> update count: 1 + +select distinct id from test a order by a.id; +>> 1 + +drop table test; +> ok + +create table FOO (ID int, A number(18, 2)); +> ok + +insert into FOO (ID, A) values (1, 10.0), (2, 20.0); +> update count: 2 + +select SUM (CASE when ID=1 then 0 ELSE A END) col0 from Foo; +>> 20.00 + +drop table FOO; +> ok + +select (SELECT true)+1 GROUP BY 1; +>> 2 + +create table FOO (ID int, A number(18, 2)); +> ok + +insert into FOO (ID, A) values (1, 10.0), (2, 20.0); +> update count: 2 + +select SUM (CASE when ID=1 then A ELSE 0 END) col0 from Foo; +>> 10.00 + +drop table FOO; +> ok + +create table A ( ID integer, a1 varchar(20) ); +> ok + +create table B ( ID integer, AID integer, b1 varchar(20)); +> ok + +create table C ( ID integer, BId integer, c1 varchar(20)); +> ok + +insert into A (ID, a1) values (1, 'a1'); +> update count: 1 + +insert into A (ID, a1) values (2, 'a2'); +> update count: 1 + +select count(*) from A left outer join (B inner join C on C.BID=B.ID ) on B.AID=A.ID where A.id=1; +>> 1 + +select count(*) from A left outer join (B left join C on C.BID=B.ID ) on B.AID=A.ID where A.id=1; +>> 1 + +select count(*) from A left outer join B on B.AID=A.ID inner join C on C.BID=B.ID where A.id=1; +>> 0 + +select count(*) from (A left outer join B on B.AID=A.ID) inner join C on C.BID=B.ID where A.id=1; +>> 0 + +drop table a, b, c; +> ok + +create schema a; +> ok + +create table a.test(id int); +> ok + +insert into a.test values(1); +> update count: 1 + +create schema b; +> ok + +create table b.test(id int); +> ok + +insert into b.test values(2); +> update count: 1 + +select a.test.id + b.test.id from a.test, b.test; +>> 3 + +drop schema a cascade; +> ok + +drop schema b cascade; +> ok + +select date '+0011-01-01'; +>> 0011-01-01 + +select date'-0010-01-01'; +>> -0010-01-01 + +create table test(id int); +> ok + +create trigger TEST_TRIGGER before insert on test call "org.h2.test.db.TestTriggersConstraints"; +> ok + +comment on trigger TEST_TRIGGER is 'just testing'; +> ok + +select remarks from information_schema.triggers where trigger_name = 'TEST_TRIGGER'; +>> just testing + +@reconnect + +select remarks from information_schema.triggers where trigger_name = 'TEST_TRIGGER'; +>> just testing + +drop trigger TEST_TRIGGER; +> ok + +@reconnect + +create alias parse_long for "java.lang.Long.parseLong(java.lang.String)"; +> ok + +comment on alias parse_long is 'Parse a long with base'; +> ok + +select remarks from information_schema.routines where routine_name = 'PARSE_LONG'; +>> Parse a long with base + +@reconnect + +select remarks from information_schema.routines where routine_name = 'PARSE_LONG'; +>> Parse a long with base + +drop alias parse_long; +> ok + +@reconnect + +create role hr; +> ok + +comment on role hr is 'Human Resources'; +> ok + +select remarks from information_schema.roles where role_name = 'HR'; +>> Human Resources + +@reconnect + +select remarks from information_schema.roles where role_name = 'HR'; +>> Human Resources + +create user abc password 'x'; +> ok + +grant hr to abc; +> ok + +drop role hr; +> ok + +@reconnect + +drop user abc; +> ok + +create domain email as varchar(100) check instr(value, '@') > 0; +> ok + +comment on domain email is 'must contain @'; +> ok + +select remarks from information_schema.domains where domain_name = 'EMAIL'; +>> must contain @ + +@reconnect + +select remarks from information_schema.domains where domain_name = 'EMAIL'; +>> must contain @ + +drop domain email; +> ok + +@reconnect + +create schema tests; +> ok + +set schema tests; +> ok + +create sequence walk; +> ok + +comment on schema tests is 'Test Schema'; +> ok + +comment on sequence walk is 'Walker'; +> ok + +select remarks from information_schema.schemata where schema_name = 'TESTS'; +>> Test Schema + +select remarks from information_schema.sequences where sequence_name = 'WALK'; +>> Walker + +@reconnect + +select remarks from information_schema.schemata where schema_name = 'TESTS'; +>> Test Schema + +select remarks from information_schema.sequences where sequence_name = 'WALK'; +>> Walker + +drop schema tests cascade; +> ok + +@reconnect + +drop table test; +> ok + +@reconnect + +create table test(id int); +> ok + +alter table test add constraint const1 unique(id); +> ok + +create index IDX_ID on test(id); +> ok + +comment on constraint const1 is 'unique id'; +> ok + +comment on index IDX_ID is 'id_index'; +> ok + +select remarks from information_schema.table_constraints where constraint_name = 'CONST1'; +>> unique id + +select remarks from information_schema.indexes where index_name = 'IDX_ID'; +>> id_index + +@reconnect + +select remarks from information_schema.table_constraints where constraint_name = 'CONST1'; +>> unique id + +select remarks from information_schema.indexes where index_name = 'IDX_ID'; +>> id_index + +drop table test; +> ok + +@reconnect + +create user sales password '1'; +> ok + +comment on user sales is 'mr. money'; +> ok + +select remarks from information_schema.users where user_name = 'SALES'; +>> mr. money + +@reconnect + +select remarks from information_schema.users where user_name = 'SALES'; +>> mr. money + +alter user sales rename to SALES_USER; +> ok + +select remarks from information_schema.users where user_name = 'SALES_USER'; +>> mr. money + +@reconnect + +select remarks from information_schema.users where user_name = 'SALES_USER'; +>> mr. money + +create table test(id int); +> ok + +create linked table test_link('org.h2.Driver', 'jdbc:h2:mem:', 'sa', 'sa', 'DUAL'); +> ok + +comment on table test_link is '123'; +> ok + +select remarks from information_schema.tables where table_name = 'TEST_LINK'; +>> 123 + +@reconnect + +select remarks from information_schema.tables where table_name = 'TEST_LINK'; +>> 123 + +comment on table test_link is 'xyz'; +> ok + +select remarks from information_schema.tables where table_name = 'TEST_LINK'; +>> xyz + +alter table test_link rename to test_l; +> ok + +select remarks from information_schema.tables where table_name = 'TEST_L'; +>> xyz + +@reconnect + +select remarks from information_schema.tables where table_name = 'TEST_L'; +>> xyz + +drop table test; +> ok + +@reconnect + +create table test(id int); +> ok + +create view test_v as select * from test; +> ok + +comment on table test_v is 'abc'; +> ok + +select remarks from information_schema.tables where table_name = 'TEST_V'; +>> abc + +@reconnect + +select remarks from information_schema.tables where table_name = 'TEST_V'; +>> abc + +alter table test_v rename to TEST_VIEW; +> ok + +select remarks from information_schema.tables where table_name = 'TEST_VIEW'; +>> abc + +@reconnect + +select remarks from information_schema.tables where table_name = 'TEST_VIEW'; +>> abc + +drop table test cascade; +> ok + +@reconnect + +create table test(a int); +> ok + +comment on table test is 'hi'; +> ok + +select remarks from information_schema.tables where table_name = 'TEST'; +>> hi + +alter table test add column b int; +> ok + +select remarks from information_schema.tables where table_name = 'TEST'; +>> hi + +alter table test rename to test1; +> ok + +select remarks from information_schema.tables where table_name = 'TEST1'; +>> hi + +@reconnect + +select remarks from information_schema.tables where table_name = 'TEST1'; +>> hi + +comment on table test1 is 'ho'; +> ok + +@reconnect + +select remarks from information_schema.tables where table_name = 'TEST1'; +>> ho + +drop table test1; +> ok + +create table test(a int, b int); +> ok + +comment on column test.b is 'test'; +> ok + +select remarks from information_schema.columns where table_name = 'TEST' and column_name = 'B'; +>> test + +@reconnect + +select remarks from information_schema.columns where table_name = 'TEST' and column_name = 'B'; +>> test + +alter table test drop column b; +> ok + +@reconnect + +comment on column test.a is 'ho'; +> ok + +select remarks from information_schema.columns where table_name = 'TEST' and column_name = 'A'; +>> ho + +@reconnect + +select remarks from information_schema.columns where table_name = 'TEST' and column_name = 'A'; +>> ho + +drop table test; +> ok + +@reconnect + +create table test(a int); +> ok + +comment on column test.a is 'test'; +> ok + +alter table test rename to test2; +> ok + +@reconnect + +select remarks from information_schema.columns where table_name = 'TEST2'; +>> test + +@reconnect + +select remarks from information_schema.columns where table_name = 'TEST2'; +>> test + +drop table test2; +> ok + +@reconnect + +create table test1 (a varchar(10)); +> ok + +create hash index x1 on test1(a); +> ok + +insert into test1 values ('abcaaaa'),('abcbbbb'),('abccccc'),('abcdddd'); +> update count: 4 + +insert into test1 values ('abcaaaa'),('abcbbbb'),('abccccc'),('abcdddd'); +> update count: 4 + +insert into test1 values ('abcaaaa'),('abcbbbb'),('abccccc'),('abcdddd'); +> update count: 4 + +insert into test1 values ('abcaaaa'),('abcbbbb'),('abccccc'),('abcdddd'); +> update count: 4 + +select count(*) from test1 where a='abcaaaa'; +>> 4 + +select count(*) from test1 where a='abcbbbb'; +>> 4 + +@reconnect + +select count(*) from test1 where a='abccccc'; +>> 4 + +select count(*) from test1 where a='abcdddd'; +>> 4 + +update test1 set a='abccccc' where a='abcdddd'; +> update count: 4 + +select count(*) from test1 where a='abccccc'; +>> 8 + +select count(*) from test1 where a='abcdddd'; +>> 0 + +delete from test1 where a='abccccc'; +> update count: 8 + +select count(*) from test1 where a='abccccc'; +>> 0 + +truncate table test1; +> update count: 8 + +insert into test1 values ('abcaaaa'); +> update count: 1 + +insert into test1 values ('abcaaaa'); +> update count: 1 + +delete from test1; +> update count: 2 + +drop table test1; +> ok + +@reconnect + +drop table if exists test; +> ok + +create table if not exists test(col1 int primary key); +> ok + +insert into test values(1); +> update count: 1 + +insert into test values(2); +> update count: 1 + +insert into test values(3); +> update count: 1 + +select count(*) from test; +>> 3 + +select max(col1) from test; +>> 3 + +update test set col1 = col1 + 1 order by col1 asc limit 100; +> update count: 3 + +select count(*) from test; +>> 3 + +select max(col1) from test; +>> 4 + +drop table if exists test; +> ok diff --git a/h2/src/test/org/h2/test/server/TestAutoServer.java b/h2/src/test/org/h2/test/server/TestAutoServer.java index 0317c9810b..72090a0130 100644 --- a/h2/src/test/org/h2/test/server/TestAutoServer.java +++ b/h2/src/test/org/h2/test/server/TestAutoServer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.server; @@ -9,13 +9,15 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.SortedProperties; /** * Tests automatic embedded/server mode. */ -public class TestAutoServer extends TestBase { +public class TestAutoServer extends TestDb { /** * The number of iterations. @@ -28,28 +30,29 @@ public class TestAutoServer extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { testUnsupportedCombinations(); testAutoServer(false); + testSocketReadTimeout(false); if (!config.big) { testAutoServer(true); } testLinkedLocalTablesWithAutoServerReconnect(); } - private void testUnsupportedCombinations() throws SQLException { + private void testUnsupportedCombinations() { String[] urls = { - "jdbc:h2:test;file_lock=no;auto_server=true", - "jdbc:h2:test;file_lock=serialized;auto_server=true", - "jdbc:h2:test;access_mode_data=r;auto_server=true", - "jdbc:h2:mem:test;auto_server=true" + "jdbc:h2:" + getTestName() + ";file_lock=no;auto_server=true", + "jdbc:h2:" + getTestName() + ";file_lock=serialized;auto_server=true", + "jdbc:h2:" + getTestName() + ";access_mode_data=r;auto_server=true", + "jdbc:h2:mem:" + getTestName() + ";auto_server=true" }; for (String url : urls) { - assertThrows(SQLException.class, this).getConnection(url); + assertThrows(SQLException.class, () -> getConnection(url)); try { getConnection(url); fail(url); @@ -63,49 +66,108 @@ private void testAutoServer(boolean port) throws Exception { if (config.memory || config.networked) { return; } + deleteDb(getTestName()); + String url = getURL(getTestName() + ";AUTO_SERVER=TRUE", true); + if (port) { + url += ";AUTO_SERVER_PORT=11111"; + } + String user = getUser(), password = getPassword(); + try (Connection connServer = getConnection(url + ";OPEN_NEW=TRUE", user, password)) { + int i = ITERATIONS; + for (; i > 0; i--) { + Thread.sleep(100); + SortedProperties prop = SortedProperties.loadProperties( + getBaseDir() + "/" + getTestName() + ".lock.db"); + String key = prop.getProperty("id"); + String server = prop.getProperty("server"); + if (server != null) { + String u2 = url.substring(url.indexOf(';')); + u2 = "jdbc:h2:tcp://" + server + "/" + key + u2; + Connection conn = DriverManager.getConnection(u2, user, password); + conn.close(); + int gotPort = Integer.parseInt(server.substring(server.lastIndexOf(':') + 1)); + if (port) { + assertEquals(11111, gotPort); + } + break; + } + } + if (i <= 0) { + fail(); + } + try (Connection conn = getConnection(url + ";OPEN_NEW=TRUE")) { + Statement stat = conn.createStatement(); + if (config.big) { + try { + stat.execute("SHUTDOWN"); + } catch (SQLException e) { + assertKnownException(e); + // the connection is closed + } + } + } + } deleteDb("autoServer"); - String url = getURL("autoServer;AUTO_SERVER=TRUE", true); + } + + + private void testSocketReadTimeout(boolean port) throws Exception { + if (config.memory || config.networked) { + return; + } + deleteDb(getTestName()); + String url = getURL(getTestName() + ";AUTO_SERVER=TRUE", true); if (port) { url += ";AUTO_SERVER_PORT=11111"; } String user = getUser(), password = getPassword(); Connection connServer = getConnection(url + ";OPEN_NEW=TRUE", - user, password); - - int i = ITERATIONS; - for (; i > 0; i--) { - Thread.sleep(100); + user, password); + try { SortedProperties prop = SortedProperties.loadProperties( - getBaseDir() + "/autoServer.lock.db"); + getBaseDir() + "/" + getTestName() + ".lock.db"); String key = prop.getProperty("id"); String server = prop.getProperty("server"); if (server != null) { - String u2 = url.substring(url.indexOf(";")); - u2 = "jdbc:h2:tcp://" + server + "/" + key + u2; + String u2 = url.substring(url.indexOf(';')); + //todo java.net.SocketTimeoutException: Read timed out + u2 = "jdbc:h2:tcp://" + server + "/" + key + u2 + ";NETWORK_TIMEOUT=100"; Connection conn = DriverManager.getConnection(u2, user, password); + Statement stat = conn.createStatement(); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, stat). + executeQuery("SELECT MAX(RAND()) FROM SYSTEM_RANGE(1, 100000000)"); conn.close(); int gotPort = Integer.parseInt(server.substring(server.lastIndexOf(':') + 1)); if (port) { assertEquals(11111, gotPort); } - break; } - } - if (i <= 0) { - fail(); - } - Connection conn = getConnection(url + ";OPEN_NEW=TRUE"); - Statement stat = conn.createStatement(); - if (config.big) { + Connection conn = getConnection(url + ";OPEN_NEW=TRUE"); + Statement stat = conn.createStatement(); + if (config.big) { + try { + stat.execute("SHUTDOWN"); + } catch (SQLException e) { + assertKnownException(e); + // the connection is closed + } + } + conn.close(); + } finally { try { - stat.execute("SHUTDOWN"); + connServer.createStatement().execute("SHUTDOWN"); + if (config.big) { + fail("server should be down already"); + } } catch (SQLException e) { + assertTrue(config.big); assertKnownException(e); - // the connection is closed } + try { + connServer.close(); + } catch (SQLException ignore) {} } - conn.close(); - connServer.close(); + deleteDb("autoServer"); } @@ -117,10 +179,10 @@ private void testLinkedLocalTablesWithAutoServerReconnect() if (config.memory || config.networked) { return; } - deleteDb("autoServerLinkedTable1"); - deleteDb("autoServerLinkedTable2"); - String url = getURL("autoServerLinkedTable1;AUTO_SERVER=TRUE", true); - String urlLinked = getURL("autoServerLinkedTable2", true); + deleteDb(getTestName() + "1"); + deleteDb(getTestName() + "2"); + String url = getURL(getTestName() + "1;AUTO_SERVER=TRUE", true); + String urlLinked = getURL(getTestName() + "2", true); String user = getUser(), password = getPassword(); Connection connLinked = getConnection(urlLinked, user, password); @@ -163,8 +225,8 @@ private void testLinkedLocalTablesWithAutoServerReconnect() // ignore } - deleteDb("autoServerLinkedTable1"); - deleteDb("autoServerLinkedTable2"); + deleteDb(getTestName() + "1"); + deleteDb(getTestName() + "2"); } /** diff --git a/h2/src/test/org/h2/test/server/TestInit.java b/h2/src/test/org/h2/test/server/TestInit.java index 596bf0a0ea..49a90f0ac3 100644 --- a/h2/src/test/org/h2/test/server/TestInit.java +++ b/h2/src/test/org/h2/test/server/TestInit.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.server; @@ -13,11 +13,12 @@ import java.sql.Statement; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests INIT command within embedded/server mode. */ -public class TestInit extends TestBase { +public class TestInit extends TestDb { /** * Run just this test. @@ -25,7 +26,7 @@ public class TestInit extends TestBase { * @param a ignored */ public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -40,7 +41,7 @@ public void test() throws Exception { Writer w = new OutputStreamWriter(FileUtils.newOutputStream(init1, false)); PrintWriter writer = new PrintWriter(w); - writer.println("create table test(id int identity, name varchar);"); + writer.println("create table test(id int generated by default as identity, name varchar);"); writer.println("insert into test(name) values('cat');"); writer.close(); diff --git a/h2/src/test/org/h2/test/server/TestJakartaWeb.java b/h2/src/test/org/h2/test/server/TestJakartaWeb.java new file mode 100644 index 0000000000..7d24757915 --- /dev/null +++ b/h2/src/test/org/h2/test/server/TestJakartaWeb.java @@ -0,0 +1,698 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.server; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.security.Principal; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Vector; + +import jakarta.servlet.AsyncContext; +import jakarta.servlet.DispatcherType; +import jakarta.servlet.RequestDispatcher; +import jakarta.servlet.ServletConfig; +import jakarta.servlet.ServletContext; +import jakarta.servlet.ServletException; +import jakarta.servlet.ServletInputStream; +import jakarta.servlet.ServletOutputStream; +import jakarta.servlet.ServletRequest; +import jakarta.servlet.ServletResponse; +import jakarta.servlet.WriteListener; +import jakarta.servlet.http.Cookie; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; +import jakarta.servlet.http.HttpSession; +import jakarta.servlet.http.HttpUpgradeHandler; +import jakarta.servlet.http.Part; + +import org.h2.server.web.JakartaWebServlet; +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.Utils10; + +/** + * Tests the Jakarta Web Servlet for the H2 Console. + */ +public class TestJakartaWeb extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testServlet(); + } + + private void testServlet() throws Exception { + JakartaWebServlet servlet = new JakartaWebServlet(); + final HashMap configMap = new HashMap<>(); + configMap.put("ifExists", ""); + configMap.put("", ""); + ServletConfig config = new ServletConfig() { + + @Override + public String getServletName() { + return "H2Console"; + } + + @Override + public Enumeration getInitParameterNames() { + return new Vector<>(configMap.keySet()).elements(); + } + + @Override + public String getInitParameter(String name) { + return configMap.get(name); + } + + @Override + public ServletContext getServletContext() { + return null; + } + + }; + servlet.init(config); + + + TestHttpServletRequest request = new TestHttpServletRequest(); + request.setPathInfo("/"); + TestHttpServletResponse response = new TestHttpServletResponse(); + TestServletOutputStream out = new TestServletOutputStream(); + response.setServletOutputStream(out); + servlet.doGet(request, response); + assertContains(out.toString(), "location.href = 'login.jsp"); + servlet.destroy(); + } + + /** + * A HTTP servlet request for testing. + */ + static class TestHttpServletRequest implements HttpServletRequest { + + private String pathInfo; + + void setPathInfo(String pathInfo) { + this.pathInfo = pathInfo; + } + + @Override + public Object getAttribute(String name) { + return null; + } + + @Override + public Enumeration getAttributeNames() { + return new Vector().elements(); + } + + @Override + public String getCharacterEncoding() { + return null; + } + + @Override + public int getContentLength() { + return 0; + } + + @Override + public String getContentType() { + return null; + } + + @Override + public ServletInputStream getInputStream() throws IOException { + return null; + } + + @Override + public String getLocalAddr() { + return null; + } + + @Override + public String getLocalName() { + return null; + } + + @Override + public int getLocalPort() { + return 0; + } + + @Override + public Locale getLocale() { + return null; + } + + @Override + public Enumeration getLocales() { + return null; + } + + @Override + public String getParameter(String name) { + return null; + } + + @Override + public Map getParameterMap() { + return null; + } + + @Override + public Enumeration getParameterNames() { + return new Vector().elements(); + } + + @Override + public String[] getParameterValues(String name) { + return null; + } + + @Override + public String getProtocol() { + return null; + } + + @Override + public BufferedReader getReader() throws IOException { + return null; + } + + @Override + @Deprecated + public String getRealPath(String path) { + return null; + } + + @Override + public String getRemoteAddr() { + return null; + } + + @Override + public String getRemoteHost() { + return null; + } + + @Override + public int getRemotePort() { + return 0; + } + + @Override + public RequestDispatcher getRequestDispatcher(String name) { + return null; + } + + @Override + public String getScheme() { + return "http"; + } + + @Override + public String getServerName() { + return null; + } + + @Override + public int getServerPort() { + return 80; + } + + @Override + public boolean isSecure() { + return false; + } + + @Override + public void removeAttribute(String name) { + // ignore + } + + @Override + public void setAttribute(String name, Object value) { + // ignore + } + + @Override + public void setCharacterEncoding(String encoding) + throws UnsupportedEncodingException { + // ignore + } + + @Override + public String getAuthType() { + return null; + } + + @Override + public String getContextPath() { + return null; + } + + @Override + public Cookie[] getCookies() { + return null; + } + + @Override + public long getDateHeader(String x) { + return 0; + } + + @Override + public String getHeader(String name) { + return null; + } + + @Override + public Enumeration getHeaderNames() { + return null; + } + + @Override + public Enumeration getHeaders(String name) { + return null; + } + + @Override + public int getIntHeader(String name) { + return 0; + } + + @Override + public String getMethod() { + return null; + } + + @Override + public String getPathInfo() { + return pathInfo; + } + + @Override + public String getPathTranslated() { + return null; + } + + @Override + public String getQueryString() { + return null; + } + + @Override + public String getRemoteUser() { + return null; + } + + @Override + public String getRequestURI() { + return null; + } + + @Override + public StringBuffer getRequestURL() { + return null; + } + + @Override + public String getRequestedSessionId() { + return null; + } + + @Override + public String getServletPath() { + return null; + } + + @Override + public HttpSession getSession() { + return null; + } + + @Override + public HttpSession getSession(boolean x) { + return null; + } + + @Override + public Principal getUserPrincipal() { + return null; + } + + @Override + public boolean isRequestedSessionIdFromCookie() { + return false; + } + + @Override + public boolean isRequestedSessionIdFromURL() { + return false; + } + + @Override + @Deprecated + public boolean isRequestedSessionIdFromUrl() { + return false; + } + + @Override + public boolean isRequestedSessionIdValid() { + return false; + } + + @Override + public boolean isUserInRole(String x) { + return false; + } + + @Override + public java.util.Collection getParts() { + return null; + } + + @Override + public Part getPart(String name) { + return null; + } + + @Override + public boolean authenticate(HttpServletResponse response) { + return false; + } + + @Override + public void login(String username, String password) { + // ignore + } + + @Override + public void logout() { + // ignore + } + + @Override + public ServletContext getServletContext() { + return null; + } + + @Override + public AsyncContext startAsync() { + return null; + } + + @Override + public AsyncContext startAsync( + ServletRequest servletRequest, + ServletResponse servletResponse) { + return null; + } + + @Override + public boolean isAsyncStarted() { + return false; + } + + @Override + public boolean isAsyncSupported() { + return false; + } + + @Override + public AsyncContext getAsyncContext() { + return null; + } + + @Override + public DispatcherType getDispatcherType() { + return null; + } + + @Override + public long getContentLengthLong() { + return 0; + } + + @Override + public String changeSessionId() { + return null; + } + + @Override + public T upgrade(Class handlerClass) + throws IOException, ServletException { + return null; + } + + } + + /** + * A HTTP servlet response for testing. + */ + static class TestHttpServletResponse implements HttpServletResponse { + + ServletOutputStream servletOutputStream; + + void setServletOutputStream(ServletOutputStream servletOutputStream) { + this.servletOutputStream = servletOutputStream; + } + + @Override + public void flushBuffer() throws IOException { + // ignore + } + + @Override + public int getBufferSize() { + return 0; + } + + @Override + public String getCharacterEncoding() { + return null; + } + + @Override + public String getContentType() { + return null; + } + + @Override + public Locale getLocale() { + return null; + } + + @Override + public ServletOutputStream getOutputStream() throws IOException { + return servletOutputStream; + } + + @Override + public PrintWriter getWriter() throws IOException { + return null; + } + + @Override + public boolean isCommitted() { + return false; + } + + @Override + public void reset() { + // ignore + } + + @Override + public void resetBuffer() { + // ignore + } + + @Override + public void setBufferSize(int arg0) { + // ignore + } + + @Override + public void setCharacterEncoding(String arg0) { + // ignore + } + + @Override + public void setContentLength(int arg0) { + // ignore + } + + @Override + public void setContentLengthLong(long arg0) { + // ignore + } + + @Override + public void setContentType(String arg0) { + // ignore + } + + @Override + public void setLocale(Locale arg0) { + // ignore + } + + @Override + public void addCookie(Cookie arg0) { + // ignore + } + + @Override + public void addDateHeader(String arg0, long arg1) { + // ignore + } + + @Override + public void addHeader(String arg0, String arg1) { + // ignore + } + + @Override + public void addIntHeader(String arg0, int arg1) { + // ignore + } + + @Override + public boolean containsHeader(String arg0) { + return false; + } + + @Override + public String encodeRedirectURL(String arg0) { + return null; + } + + @Override + @Deprecated + public String encodeRedirectUrl(String arg0) { + return null; + } + + @Override + public String encodeURL(String arg0) { + return null; + } + + @Override + @Deprecated + public String encodeUrl(String arg0) { + return null; + } + + @Override + public void sendError(int arg0) throws IOException { + // ignore + } + + @Override + public void sendError(int arg0, String arg1) throws IOException { + // ignore + } + + @Override + public void sendRedirect(String arg0) throws IOException { + // ignore + } + + @Override + public void setDateHeader(String arg0, long arg1) { + // ignore + } + + @Override + public void setHeader(String arg0, String arg1) { + // ignore + } + + @Override + public void setIntHeader(String arg0, int arg1) { + // ignore + } + + @Override + public void setStatus(int arg0) { + // ignore + } + + @Override + @Deprecated + public void setStatus(int arg0, String arg1) { + // ignore + } + + @Override + public int getStatus() { + return 0; + } + + @Override + public String getHeader(String name) { + return null; + } + + @Override + public java.util.Collection getHeaders(String name) { + return null; + } + + @Override + public java.util.Collection getHeaderNames() { + return null; + } + + } + + /** + * A servlet output stream for testing. + */ + static class TestServletOutputStream extends ServletOutputStream { + + private final ByteArrayOutputStream buff = new ByteArrayOutputStream(); + + @Override + public void write(int b) throws IOException { + buff.write(b); + } + + @Override + public String toString() { + return Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); + } + + @Override + public boolean isReady() { + return true; + } + + @Override + public void setWriteListener(WriteListener writeListener) { + // ignore + } + + } + +} diff --git a/h2/src/test/org/h2/test/server/TestNestedLoop.java b/h2/src/test/org/h2/test/server/TestNestedLoop.java index 0d18126b44..e085efed57 100644 --- a/h2/src/test/org/h2/test/server/TestNestedLoop.java +++ b/h2/src/test/org/h2/test/server/TestNestedLoop.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.server; @@ -12,12 +12,13 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests remote JDBC access with nested loops. * This is not allowed in some databases. */ -public class TestNestedLoop extends TestBase { +public class TestNestedLoop extends TestDb { /** * Run just this test. @@ -25,7 +26,7 @@ public class TestNestedLoop extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -33,7 +34,7 @@ public void test() throws SQLException { deleteDb("nestedLoop"); Connection conn = getConnection("nestedLoop"); Statement stat = conn.createStatement(); - stat.execute("create table test(id int identity, name varchar)"); + stat.execute("create table test(id int generated by default as identity, name varchar)"); int len = getSize(1010, 10000); for (int i = 0; i < len; i++) { stat.execute("insert into test(name) values('Hello World')"); diff --git a/h2/src/test/org/h2/test/server/TestWeb.java b/h2/src/test/org/h2/test/server/TestWeb.java index 8bf56c6ffa..f7cac62797 100644 --- a/h2/src/test/org/h2/test/server/TestWeb.java +++ b/h2/src/test/org/h2/test/server/TestWeb.java @@ -1,23 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.server; import java.io.BufferedReader; -import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.PrintStream; import java.io.PrintWriter; import java.io.UnsupportedEncodingException; +import java.net.ConnectException; +import java.nio.charset.StandardCharsets; import java.security.Principal; import java.sql.Connection; -import java.sql.SQLException; import java.util.Enumeration; import java.util.HashMap; import java.util.Locale; @@ -28,16 +25,19 @@ import javax.servlet.DispatcherType; import javax.servlet.RequestDispatcher; import javax.servlet.ServletConfig; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; import javax.servlet.ServletInputStream; import javax.servlet.ServletOutputStream; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; +import javax.servlet.WriteListener; import javax.servlet.http.Cookie; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; +import javax.servlet.http.HttpUpgradeHandler; import javax.servlet.http.Part; -import javax.servlet.ServletContext; import org.h2.api.ErrorCode; import org.h2.engine.Constants; @@ -45,16 +45,16 @@ import org.h2.server.web.WebServlet; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; +import org.h2.test.TestDb; import org.h2.tools.Server; -import org.h2.util.IOUtils; import org.h2.util.StringUtils; import org.h2.util.Task; +import org.h2.util.Utils10; /** * Tests the H2 Console application. */ -public class TestWeb extends TestBase { +public class TestWeb extends TestDb { private static volatile String lastUrl; @@ -64,7 +64,7 @@ public class TestWeb extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -72,7 +72,6 @@ public void test() throws Exception { testServlet(); testWrongParameters(); testTools(); - testTransfer(); testAlreadyRunning(); testStartWebServerWithConnection(); testServer(); @@ -82,11 +81,9 @@ public void test() throws Exception { private void testServlet() throws Exception { WebServlet servlet = new WebServlet(); - final HashMap configMap = new HashMap(); + final HashMap configMap = new HashMap<>(); configMap.put("ifExists", ""); configMap.put("", ""); - configMap.put("", ""); - configMap.put("", ""); ServletConfig config = new ServletConfig() { @Override @@ -96,7 +93,7 @@ public String getServletName() { @Override public Enumeration getInitParameterNames() { - return new Vector(configMap.keySet()).elements(); + return new Vector<>(configMap.keySet()).elements(); } @Override @@ -123,29 +120,17 @@ public ServletContext getServletContext() { servlet.destroy(); } - private static void testWrongParameters() { - new AssertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1) { - @Override - public void test() throws SQLException { - Server.createPgServer("-pgPort 8182"); - }}; - new AssertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1) { - @Override - public void test() throws SQLException { - Server.createTcpServer("-tcpPort 8182"); - }}; - new AssertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1) { - @Override - public void test() throws SQLException { - Server.createWebServer("-webPort=8182"); - }}; + private void testWrongParameters() { + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, () -> Server.createPgServer("-pgPort 8182")); + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, () -> Server.createTcpServer("-tcpPort 8182")); + assertThrows(ErrorCode.FEATURE_NOT_SUPPORTED_1, () -> Server.createWebServer("-webPort=8182")); } private void testAlreadyRunning() throws Exception { Server server = Server.createWebServer( "-webPort", "8182", "-properties", "null"); server.start(); - assertTrue(server.getStatus().contains("server running")); + assertContains(server.getStatus(), "server running"); Server server2 = Server.createWebServer( "-webPort", "8182", "-properties", "null"); assertEquals("Not started", server2.getStatus()); @@ -153,9 +138,9 @@ private void testAlreadyRunning() throws Exception { server2.start(); fail(); } catch (Exception e) { - assertTrue(e.toString().contains("port may be in use")); - assertTrue(server2.getStatus().contains( - "could not be started")); + assertContains(e.toString(), "port may be in use"); + assertContains(server2.getStatus(), + "could not be started"); } server.stop(); } @@ -164,15 +149,15 @@ private void testTools() throws Exception { if (config.memory || config.cipher != null) { return; } - deleteDb("web"); - Connection conn = getConnection("web"); + deleteDb(getTestName()); + Connection conn = getConnection(getTestName()); conn.createStatement().execute( "create table test(id int) as select 1"); conn.close(); Server server = new Server(); server.setOut(new PrintStream(new ByteArrayOutputStream())); server.runTool("-web", "-webPort", "8182", - "-properties", "null", "-tcp", "-tcpPort", "9101"); + "-properties", "null", "-tcp", "-tcpPort", "9101", "-webAdminPassword", "123"); try { String url = "http://localhost:8182"; WebClient client; @@ -180,71 +165,38 @@ private void testTools() throws Exception { client = new WebClient(); result = client.get(url); client.readSessionId(result); + result = client.get(url, "adminLogin.do?password=123"); result = client.get(url, "tools.jsp"); FileUtils.delete(getBaseDir() + "/backup.zip"); result = client.get(url, "tools.do?tool=Backup&args=-dir," + - getBaseDir() + ",-db,web,-file," + + getBaseDir() + ",-db," + getTestName() + ",-file," + getBaseDir() + "/backup.zip"); - deleteDb("web"); + deleteDb(getTestName()); assertTrue(FileUtils.exists(getBaseDir() + "/backup.zip")); result = client.get(url, "tools.do?tool=DeleteDbFiles&args=-dir," + - getBaseDir() + ",-db,web"); - String fn = getBaseDir() + "/web"; - if (config.mvStore) { - fn += Constants.SUFFIX_MV_FILE; - } else { - fn += Constants.SUFFIX_PAGE_FILE; - } + getBaseDir() + ",-db," + getTestName()); + String fn = getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE; assertFalse(FileUtils.exists(fn)); result = client.get(url, "tools.do?tool=Restore&args=-dir," + - getBaseDir() + ",-db,web,-file," + getBaseDir() + + getBaseDir() + ",-db," + getTestName() +",-file," + getBaseDir() + "/backup.zip"); assertTrue(FileUtils.exists(fn)); FileUtils.delete(getBaseDir() + "/web.h2.sql"); FileUtils.delete(getBaseDir() + "/backup.zip"); result = client.get(url, "tools.do?tool=Recover&args=-dir," + - getBaseDir() + ",-db,web"); - assertTrue(FileUtils.exists(getBaseDir() + "/web.h2.sql")); + getBaseDir() + ",-db," + getTestName()); + assertTrue(FileUtils.exists(getBaseDir() + "/" + getTestName() + ".h2.sql")); FileUtils.delete(getBaseDir() + "/web.h2.sql"); result = client.get(url, "tools.do?tool=RunScript&args=-script," + - getBaseDir() + "/web.h2.sql,-url," + getURL("web", true) + + getBaseDir() + "/" + getTestName() + ".h2.sql,-url," + + getURL(getTestName(), true) + ",-user," + getUser() + ",-password," + getPassword()); - FileUtils.delete(getBaseDir() + "/web.h2.sql"); + FileUtils.delete(getBaseDir() + "/" + getTestName() + ".h2.sql"); assertTrue(FileUtils.exists(fn)); - deleteDb("web"); - } finally { - server.shutdown(); - } - } - - private void testTransfer() throws Exception { - Server server = new Server(); - server.setOut(new PrintStream(new ByteArrayOutputStream())); - server.runTool("-web", "-webPort", "8182", "-properties", "null"); - File transfer = new File("transfer"); - transfer.mkdirs(); - try { - FileOutputStream f = new FileOutputStream("transfer/test.txt"); - f.write("Hello World".getBytes()); - f.close(); - WebClient client = new WebClient(); - String url = "http://localhost:8182"; - String result = client.get(url); - client.readSessionId(result); - String test = client.get(url, "transfer/test.txt"); - assertEquals("Hello World", test); - new File("transfer/testUpload.txt").delete(); - client.upload(url + "/transfer/testUpload.txt", - "testUpload.txt", new ByteArrayInputStream( - "Hallo Welt".getBytes())); - byte[] d = IOUtils.readBytesAndClose( - new FileInputStream("transfer/testUpload.txt"), -1); - assertEquals("Hallo Welt", new String(d)); - new File("transfer/testUpload.txt").delete(); + deleteDb(getTestName()); } finally { server.shutdown(); - FileUtils.deleteRecursive("transfer", true); } } @@ -289,11 +241,12 @@ private void testServer() throws Exception { } private void testIfExists() throws Exception { - Connection conn = getConnection("jdbc:h2:mem:webExists", + Connection conn = getConnection("jdbc:h2:mem:" + getTestName(), getUser(), getPassword()); Server server = new Server(); server.setOut(new PrintStream(new ByteArrayOutputStream())); - server.runTool("-ifExists", "-web", "-webPort", "8182", + // -ifExists is the default + server.runTool("-web", "-webPort", "8182", "-properties", "null", "-tcp", "-tcpPort", "9101"); try { String url = "http://localhost:8182"; @@ -304,12 +257,12 @@ private void testIfExists() throws Exception { client.readSessionId(result); result = client.get(url, "login.jsp"); result = client.get(url, "test.do?driver=org.h2.Driver" + - "&url=jdbc:h2:mem:webExists" + + "&url=jdbc:h2:mem:" + getTestName() + "&user=" + getUser() + "&password=" + getPassword() + "&name=_test_"); assertTrue(result.indexOf("Exception") < 0); result = client.get(url, "test.do?driver=org.h2.Driver" + - "&url=jdbc:h2:mem:web" + + "&url=jdbc:h2:mem:" + getTestName() + "Wrong" + "&user=" + getUser() + "&password=" + getPassword() + "&name=_test_"); assertContains(result, "Exception"); @@ -317,12 +270,13 @@ private void testIfExists() throws Exception { server.shutdown(); conn.close(); } + } private void testWebApp() throws Exception { Server server = new Server(); server.setOut(new PrintStream(new ByteArrayOutputStream())); - server.runTool("-web", "-webPort", "8182", + server.runTool("-ifNotExists", "-web", "-webPort", "8182", "-properties", "null", "-tcp", "-tcpPort", "9101"); try { String url = "http://localhost:8182"; @@ -340,13 +294,16 @@ private void testWebApp() throws Exception { result = client.get(url, "login.jsp"); assertTrue(result.indexOf("Einstellung") < 0); result = client.get(url, "test.do?driver=abc" + - "&url=jdbc:abc:mem:web&user=sa&password=sa&name=_test_"); + "&url=jdbc:abc:mem: " + getTestName() + + "&user=sa&password=sa&name=_test_"); assertContains(result, "Exception"); result = client.get(url, "test.do?driver=org.h2.Driver" + - "&url=jdbc:h2:mem:web&user=sa&password=sa&name=_test_"); + "&url=jdbc:h2:mem:" + getTestName() + + "&user=sa&password=sa&name=_test_"); assertTrue(result.indexOf("Exception") < 0); result = client.get(url, "login.do?driver=org.h2.Driver" + - "&url=jdbc:h2:mem:web&user=sa&password=sa&name=_test_"); + "&url=jdbc:h2:mem:" + getTestName() + + "&user=sa&password=sa&name=_test_"); result = client.get(url, "header.jsp"); result = client.get(url, "query.do?sql=" + "create table test(id int primary key, name varchar);" + @@ -473,8 +430,23 @@ private void testWebApp() throws Exception { result = client.get(url, "query.do?sql=@cancel"); assertContains(result, "There is currently no running statement"); result = client.get(url, - "query.do?sql=@generated insert into test(id) values(test_sequence.nextval)"); - assertContains(result, "SCOPE_IDENTITY()"); + "query.do?sql=@generated insert into test(id) values(next value for test_sequence)"); + assertContains(result, "ID1"); + result = client.get(url, + "query.do?sql=@generated(1) insert into test(id) values(next value for test_sequence)"); + assertContains(result, "ID2"); + result = client.get(url, + "query.do?sql=@generated(1, 1) insert into test(id) values(next value for test_sequence)"); + assertContains(result, "IDID33"); + result = client.get(url, + "query.do?sql=@generated(id) insert into test(id) values(next value for test_sequence)"); + assertContains(result, "ID4"); + result = client.get(url, + "query.do?sql=@generated(id, id) insert into test(id) values(next value for test_sequence)"); + assertContains(result, "IDID55"); + result = client.get(url, + "query.do?sql=@generated() insert into test(id) values(next value for test_sequence)"); + assertContains(result, "
    "); result = client.get(url, "query.do?sql=@maxrows 2000"); assertContains(result, "Max rowcount is set"); result = client.get(url, "query.do?sql=@password_hash user password"); @@ -484,20 +456,15 @@ private void testWebApp() throws Exception { assertContains(result, "Ok"); result = client.get(url, "query.do?sql=@catalogs"); assertContains(result, "PUBLIC"); - result = client.get(url, - "query.do?sql=@column_privileges null null null TEST null"); + result = client.get(url, "query.do?sql=@column_privileges null null TEST null"); assertContains(result, "PRIVILEGE"); - result = client.get(url, - "query.do?sql=@cross_references null null null TEST"); + result = client.get(url, "query.do?sql=@cross_references null null TEST null null TEST"); assertContains(result, "PKTABLE_NAME"); - result = client.get(url, - "query.do?sql=@exported_keys null null null TEST"); + result = client.get(url, "query.do?sql=@exported_keys null null TEST"); assertContains(result, "PKTABLE_NAME"); - result = client.get(url, - "query.do?sql=@imported_keys null null null TEST"); + result = client.get(url, "query.do?sql=@imported_keys null null TEST"); assertContains(result, "PKTABLE_NAME"); - result = client.get(url, - "query.do?sql=@primary_keys null null null TEST"); + result = client.get(url, "query.do?sql=@primary_keys null null TEST"); assertContains(result, "PK_NAME"); result = client.get(url, "query.do?sql=@procedures null null null"); assertContains(result, "PROCEDURE_NAME"); @@ -508,23 +475,22 @@ private void testWebApp() throws Exception { result = client.get(url, "query.do?sql=@table_privileges"); assertContains(result, "PRIVILEGE"); result = client.get(url, "query.do?sql=@table_types"); - assertContains(result, "SYSTEM TABLE"); + assertContains(result, "BASE TABLE"); result = client.get(url, "query.do?sql=@type_info"); - assertContains(result, "CLOB"); + assertContains(result, "CHARACTER LARGE OBJECT"); result = client.get(url, "query.do?sql=@version_columns"); assertContains(result, "PSEUDO_COLUMN"); result = client.get(url, "query.do?sql=@attributes"); - assertContains(result, "Feature not supported: "attributes""); + assertContains(result, "ATTR_NAME"); result = client.get(url, "query.do?sql=@super_tables"); assertContains(result, "SUPERTABLE_NAME"); result = client.get(url, "query.do?sql=@super_types"); - assertContains(result, "Feature not supported: "superTypes""); + assertContains(result, "SUPERTYPE_NAME"); result = client.get(url, "query.do?sql=@prof_start"); assertContains(result, "Ok"); result = client.get(url, "query.do?sql=@prof_stop"); assertContains(result, "Top Stack Trace(s)"); - result = client.get(url, - "query.do?sql=@best_row_identifier null null TEST"); + result = client.get(url, "query.do?sql=@best_row_identifier null null TEST"); assertContains(result, "SCOPE"); assertContains(result, "COLUMN_NAME"); assertContains(result, "ID"); @@ -561,7 +527,8 @@ private void testWebApp() throws Exception { result = client.get(url, "logout.do"); result = client.get(url, "login.do?driver=org.h2.Driver&" + - "url=jdbc:h2:mem:web&user=sa&password=sa&name=_test_"); + "url=jdbc:h2:mem:" + getTestName() + + "&user=sa&password=sa&name=_test_"); result = client.get(url, "logout.do"); result = client.get(url, "settingRemove.do?name=_test_"); @@ -580,11 +547,11 @@ private void testStartWebServerWithConnection() throws Exception { Server.openBrowser("testUrl"); assertEquals("testUrl", lastUrl); String oldUrl = lastUrl; - final Connection conn = getConnection("testWeb"); + final Connection conn = getConnection(getTestName()); Task t = new Task() { @Override public void call() throws Exception { - Server.startWebServer(conn); + Server.startWebServer(conn, true); } }; t.execute(); @@ -600,7 +567,7 @@ public void call() throws Exception { url = client.getBaseUrl(url); try { client.get(url, "logout.do"); - } catch (Exception e) { + } catch (ConnectException e) { // the server stops on logout } t.get(); @@ -611,7 +578,6 @@ public void call() throws Exception { } else { System.clearProperty(SysProperties.H2_BROWSER); } - deleteDb("testWeb"); } } @@ -748,7 +714,7 @@ public RequestDispatcher getRequestDispatcher(String name) { @Override public String getScheme() { - return null; + return "http"; } @Override @@ -758,7 +724,7 @@ public String getServerName() { @Override public int getServerPort() { - return 0; + return 80; } @Override @@ -970,6 +936,22 @@ public DispatcherType getDispatcherType() { return null; } + @Override + public long getContentLengthLong() { + return 0; + } + + @Override + public String changeSessionId() { + return null; + } + + @Override + public T upgrade(Class handlerClass) + throws IOException, ServletException { + return null; + } + } /** @@ -1048,6 +1030,11 @@ public void setContentLength(int arg0) { // ignore } + @Override + public void setContentLengthLong(long arg0) { + // ignore + } + @Override public void setContentType(String arg0) { // ignore @@ -1182,11 +1169,17 @@ public void write(int b) throws IOException { @Override public String toString() { - try { - return new String(buff.toByteArray(), "UTF-8"); - } catch (UnsupportedEncodingException e) { - return e.toString(); - } + return Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); + } + + @Override + public boolean isReady() { + return true; + } + + @Override + public void setWriteListener(WriteListener writeListener) { + // ignore } } diff --git a/h2/src/test/org/h2/test/server/WebClient.java b/h2/src/test/org/h2/test/server/WebClient.java index 56eee28f57..a24d10a587 100644 --- a/h2/src/test/org/h2/test/server/WebClient.java +++ b/h2/src/test/org/h2/test/server/WebClient.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.server; @@ -24,7 +24,7 @@ public class WebClient { private String contentType; /** - * Open an URL and get the HTML data. + * Open a URL and get the HTML data. * * @param url the HTTP URL * @return the HTML as a string @@ -143,7 +143,7 @@ String get(String url, String page) throws IOException { */ String getBaseUrl(String url) { int idx = url.indexOf("//"); - idx = url.indexOf("/", idx + 2); + idx = url.indexOf('/', idx + 2); if (idx >= 0) { return url.substring(0, idx); } diff --git a/h2/src/test/org/h2/test/server/package.html b/h2/src/test/org/h2/test/server/package.html index 7b8c261dc6..75974b6522 100644 --- a/h2/src/test/org/h2/test/server/package.html +++ b/h2/src/test/org/h2/test/server/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/store/CalculateHashConstant.java b/h2/src/test/org/h2/test/store/CalculateHashConstant.java index b80d40fc5b..9399768d00 100644 --- a/h2/src/test/org/h2/test/store/CalculateHashConstant.java +++ b/h2/src/test/org/h2/test/store/CalculateHashConstant.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -14,12 +14,13 @@ import java.util.HashSet; import java.util.Random; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.h2.security.AES; /** - * Calculate the constant for the secondary hash function, so that the hash - * function mixes the input bits as much as possible. + * Calculate the constant for the secondary / supplemental hash function, so + * that the hash function mixes the input bits as much as possible. */ public class CalculateHashConstant implements Runnable { @@ -227,6 +228,30 @@ public int hash(int x) { System.out.println("Collisions: " + collisions); } + /** + * Calculate the multiplicative inverse of a value (int). + * + * @param a the value + * @return the multiplicative inverse + */ + static long calcMultiplicativeInverse(long a) { + return BigInteger.valueOf(a).modPow( + BigInteger.valueOf((1 << 31) - 1), BigInteger.valueOf(1L << 32)).longValue(); + } + + /** + * Calculate the multiplicative inverse of a value (long). + * + * @param a the value + * @return the multiplicative inverse + */ + static long calcMultiplicativeInverseLong(long a) { + BigInteger oneShift64 = BigInteger.valueOf(1).shiftLeft(64); + BigInteger oneShift63 = BigInteger.valueOf(1).shiftLeft(63); + return BigInteger.valueOf(a).modPow( + oneShift63.subtract(BigInteger.ONE), + oneShift64).longValue(); + } /** * Store a random file to be analyzed by the Diehard test. */ @@ -315,7 +340,7 @@ long getCollisionCount() { BitSet set = new BitSet(); BitSet neg = new BitSet(); long collisions = 0; - long t = System.currentTimeMillis(); + long t = System.nanoTime(); for (int i = Integer.MIN_VALUE; i != Integer.MAX_VALUE; i++) { int x = hash(i); if (x >= 0) { @@ -333,8 +358,8 @@ long getCollisionCount() { } } if ((i & 0xfffff) == 0) { - long n = System.currentTimeMillis(); - if (n - t > 5000) { + long n = System.nanoTime(); + if (n - t > TimeUnit.SECONDS.toNanos(5)) { System.out.println(Integer.toHexString(constant) + " " + Integer.toHexString(i) + " collisions: " + collisions); t = n; diff --git a/h2/src/test/org/h2/test/store/CalculateHashConstantLong.java b/h2/src/test/org/h2/test/store/CalculateHashConstantLong.java new file mode 100644 index 0000000000..6dd2aba472 --- /dev/null +++ b/h2/src/test/org/h2/test/store/CalculateHashConstantLong.java @@ -0,0 +1,435 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.store; + +import java.io.File; +import java.io.FileOutputStream; +import java.math.BigInteger; +import java.util.Arrays; +import java.util.BitSet; +import java.util.Collections; +import java.util.HashSet; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import org.h2.security.AES; + +/** + * Calculate the constant for the secondary hash function, so that the hash + * function mixes the input bits as much as possible. + */ +public class CalculateHashConstantLong implements Runnable { + + private static BitSet primeNumbers = new BitSet(); + private static long[] randomValues; + private static AtomicInteger high = new AtomicInteger(0x20); + private static Set candidates = + Collections.synchronizedSet(new HashSet()); + + private long constant; + private int[] fromTo = new int[64 * 64]; + + private final AES aes = new AES(); + { + aes.setKey("Hello World Hallo Welt".getBytes()); + } + private final byte[] data = new byte[16]; + + /** + * Run just this test. + * + * @param args ignored + */ + public static void main(String... args) throws Exception { + for (int i = 0x0; i < 0x10000; i++) { + if (BigInteger.valueOf(i).isProbablePrime(20)) { + primeNumbers.set(i); + } + } + randomValues = getRandomValues(1000, 1); + Random r = new Random(1); + for (int i = 0; i < randomValues.length; i++) { + randomValues[i] = r.nextInt(); + } + printQuality(new CalculateHashConstantLong() { + @Override + public long hash(long x) { + return secureHash(x); + } + @Override + public String toString() { + return "AES"; + } + }, randomValues); + // Quality of AES + // Dependencies: 15715..16364 + // Avalanche: 31998 + // AvalancheSum: 3199841 + // Effect: 49456..50584 + + printQuality(new CalculateHashConstantLong() { + @Override + public long hash(long x) { + x = (x ^ (x >>> 30)) * 0xbf58476d1ce4e5b9L; + x = (x ^ (x >>> 27)) * 0x94d049bb133111ebL; + return x ^ (x >>> 31); + } + @Override + public String toString() { + return "Test"; + } + }, randomValues); + // Quality of Test + // Dependencies: 14693..16502 + // Avalanche: 31996 + // AvalancheSum: 3199679 + // Effect: 49437..50537 + + Thread[] threads = new Thread[8]; + for (int i = 0; i < 8; i++) { + threads[i] = new Thread(new CalculateHashConstantLong()); + threads[i].start(); + } + for (int i = 0; i < 8; i++) { + threads[i].join(); + } + + int finalCount = 10000; + long[] randomValues = getRandomValues(finalCount, 10); + + CalculateHashConstantLong test; + int[] minMax; + test = new CalculateHashConstantLong(); + long best = 0; + int dist = Integer.MAX_VALUE; + for (long i : candidates) { + test.constant = i; + System.out.println(); + System.out.println("Constant: 0x" + Long.toHexString(i)); + minMax = test.getDependencies(test, randomValues); + System.out.println("Dependencies: " + minMax[0] + ".." + minMax[1]); + int d = minMax[1] - minMax[0]; + int av = 0; + for (int j = 0; j < 100; j++) { + av += test.getAvalanche(test, randomValues[j]); + } + System.out.println("AvalancheSum: " + av); + minMax = test.getEffect(test, finalCount * 10, 11); + System.out.println("Effect: " + minMax[0] + ".." + minMax[1]); + d += minMax[1] - minMax[0]; + if (d < dist) { + dist = d; + best = i; + } + } + System.out.println(); + System.out.println("Best constant: 0x" + Long.toHexString(best)); + test.constant = best; + long collisions = test.getCollisionCount(); + System.out.println("Collisions: " + collisions); + } + + private static void printQuality(CalculateHashConstantLong test, long[] randomValues) { + int finalCount = randomValues.length * 10; + System.out.println("Quality of " + test); + int[] minMax; + int av = 0; + minMax = test.getDependencies(test, randomValues); + System.out.println("Dependencies: " + minMax[0] + ".." + minMax[1]); + av = 0; + for (int j = 0; j < 100; j++) { + av += test.getAvalanche(test, randomValues[j]); + } + System.out.println("Avalanche: " + (av / 100)); + System.out.println("AvalancheSum: " + av); + minMax = test.getEffect(test, finalCount * 10, 11); + System.out.println("Effect: " + minMax[0] + ".." + minMax[1]); + System.out.println("ok=" + test.testCandidate()); + } + + /** + * Store a random file to be analyzed by the Diehard test. + */ + void storeRandomFile() throws Exception { + File f = new File(System.getProperty("user.home") + "/temp/rand.txt"); + FileOutputStream out = new FileOutputStream(f); + CalculateHashConstantLong test = new CalculateHashConstantLong(); + // Random r = new Random(1); + byte[] buff = new byte[8]; + // tt.constant = 0x29a907; + for (int i = 0; i < 10000000 / 8; i++) { + long y = test.hash(i); + // int y = r.nextInt(); + writeLong(buff, 0, y); + out.write(buff); + } + out.close(); + } + + private static long[] getRandomValues(int count, int seed) { + long[] values = new long[count]; + Random r = new Random(seed); + for (int i = 0; i < count; i++) { + values[i] = r.nextLong(); + } + return values; + } + + @Override + public void run() { + while (true) { + int currentHigh = high.getAndIncrement(); + // if (currentHigh > 0x2d) { + if (currentHigh > 0xffff) { + break; + } + System.out.println("testing " + Integer.toHexString(currentHigh) + "...."); + addCandidates(currentHigh); + } + } + + private void addCandidates(long currentHigh) { + for (int low = 0; low <= 0xffff; low++) { + // the lower 16 bits don't have to be a prime number + // but it seems that's a good restriction + if (!primeNumbers.get(low)) { + continue; + } + long i = (currentHigh << 48) | ((long) low << 32) | (currentHigh << 16) | low; + constant = i; + if (!testCandidate()) { + continue; + } + System.out.println(Long.toHexString(i) + + " hit " + i); + candidates.add(i); + } + } + + private boolean testCandidate() { + // after one bit changes in the input, + // on average 32 bits of the output change + int av = getAvalanche(this, 0); + if (Math.abs(av - 32000) > 1000) { + return false; + } + av = getAvalanche(this, 0xffffffffffffffffL); + if (Math.abs(av - 32000) > 1000) { + return false; + } + long es = getEffectSquare(this, randomValues); + if (es > 1100000) { + System.out.println("fail at a " + es); + return false; + } + int[] minMax = getEffect(this, 10000, 1); + if (!isWithin(4700, 5300, minMax)) { + System.out.println("fail at b " + minMax[0] + " " + minMax[1]); + return false; + } + minMax = getDependencies(this, randomValues); + if (!isWithin(14500, 17000, minMax)) { + System.out.println("fail at c " + minMax[0] + " " + minMax[1]); + return false; + } + return true; + } + + long getCollisionCount() { + // TODO need a way to check this + return 0; + } + + private static boolean isWithin(int min, int max, int[] range) { + return range[0] >= min && range[1] <= max; + } + + /** + * Calculate how much the bit changes (output bits that change if an input + * bit is changed) are independent of each other. + * + * @param h the hash object + * @param values the values to test with + * @return the minimum and maximum number of output bits that are changed in + * combination with another output bit + */ + int[] getDependencies(CalculateHashConstantLong h, long[] values) { + Arrays.fill(fromTo, 0); + for (long x : values) { + for (int shift = 0; shift < 64; shift++) { + long x1 = h.hash(x); + long x2 = h.hash(x ^ (1L << shift)); + long x3 = x1 ^ x2; + for (int s = 0; s < 64; s++) { + if ((x3 & (1L << s)) != 0) { + for (int s2 = 0; s2 < 64; s2++) { + if (s == s2) { + continue; + } + if ((x3 & (1L << s2)) != 0) { + fromTo[s * 64 + s2]++; + } + } + } + } + } + } + int a = Integer.MAX_VALUE, b = Integer.MIN_VALUE; + for (int x : fromTo) { + if (x == 0) { + continue; + } + if (x < a) { + a = x; + } + if (x > b) { + b = x; + } + } + return new int[] {a, b}; + } + + /** + * Calculate the number of bits that change if a single bit is changed + * multiplied by 1000 (expected: 16000 +/- 5%). + * + * @param h the hash object + * @param value the base value + * @return the number of bit changes multiplied by 1000 + */ + int getAvalanche(CalculateHashConstantLong h, long value) { + int changedBitsSum = 0; + for (int i = 0; i < 64; i++) { + long x = value ^ (1L << i); + for (int shift = 0; shift < 64; shift++) { + long x1 = h.hash(x); + long x2 = h.hash(x ^ (1L << shift)); + long x3 = x1 ^ x2; + changedBitsSum += Long.bitCount(x3); + } + } + return changedBitsSum * 1000 / 64 / 64; + } + + /** + * Calculate the sum of the square of the distance to the expected + * probability that an output bit changes if an input bit is changed. The + * lower the value, the better. + * + * @param h the hash object + * @param values the values to test with + * @return sum(distance^2) + */ + long getEffectSquare(CalculateHashConstantLong h, long[] values) { + Arrays.fill(fromTo, 0); + int total = 0; + for (long x : values) { + for (int shift = 0; shift < 64; shift++) { + long x1 = h.hash(x); + long x2 = h.hash(x ^ (1L << shift)); + long x3 = x1 ^ x2; + for (int s = 0; s < 64; s++) { + if ((x3 & (1L << s)) != 0) { + fromTo[shift * 64 + s]++; + total++; + } + } + } + } + long sqDist = 0; + int expected = total / 64 / 64; + for (int x : fromTo) { + int dist = Math.abs(x - expected); + sqDist += dist * dist; + } + return sqDist; + } + + /** + * Calculate if the bit changes (that an output bit changes if an input + * bit is changed) are within a certain range. + * + * @param h the hash object + * @param count the number of values to test + * @param seed the random seed + * @return the minimum and maximum value of all input-to-output bit changes + */ + int[] getEffect(CalculateHashConstantLong h, int count, int seed) { + Random r = new Random(); + r.setSeed(seed); + Arrays.fill(fromTo, 0); + for (int i = 0; i < count; i++) { + long x = r.nextLong(); + for (int shift = 0; shift < 64; shift++) { + long x1 = h.hash(x); + long x2 = h.hash(x ^ (1L << shift)); + long x3 = x1 ^ x2; + for (int s = 0; s < 64; s++) { + if ((x3 & (1L << s)) != 0) { + fromTo[shift * 64 + s]++; + } + } + } + } + int a = Integer.MAX_VALUE, b = Integer.MIN_VALUE; + for (int x : fromTo) { + if (x < a) { + a = x; + } + if (x > b) { + b = x; + } + } + return new int[] {a, b}; + } + + /** + * The hash method. + * + * @param x the input + * @return the output + */ + long hash(long x) { + x = ((x >>> 32) ^ x) * constant; + x = ((x >>> 32) ^ x) * constant; + x = (x >>> 32) ^ x; + return x; + } + + /** + * Calculate a hash using AES. + * + * @param x the input + * @return the output + */ + long secureHash(long x) { + writeLong(data, 0, x); + aes.encrypt(data, 0, 16); + return readLong(data, 0); + } + + private static void writeLong(byte[] buff, int pos, long x) { + writeInt(buff, pos, (int) (x >>> 32)); + writeInt(buff, pos + 4, (int) x); + } + + private static void writeInt(byte[] buff, int pos, int x) { + buff[pos++] = (byte) (x >> 24); + buff[pos++] = (byte) (x >> 16); + buff[pos++] = (byte) (x >> 8); + buff[pos++] = (byte) x; + } + + private static long readLong(byte[] buff, int pos) { + return (((long) readInt(buff, pos)) << 32) | (readInt(buff, pos + 4) & 0xffffffffL); + } + + private static int readInt(byte[] buff, int pos) { + return (buff[pos++] << 24) + ((buff[pos++] & 0xff) << 16) + + ((buff[pos++] & 0xff) << 8) + (buff[pos] & 0xff); + } + +} diff --git a/h2/src/test/org/h2/test/store/FreeSpaceList.java b/h2/src/test/org/h2/test/store/FreeSpaceList.java index 57d197d69a..b6cb3e9031 100644 --- a/h2/src/test/org/h2/test/store/FreeSpaceList.java +++ b/h2/src/test/org/h2/test/store/FreeSpaceList.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -26,7 +26,7 @@ public class FreeSpaceList { */ private final int blockSize; - private List freeSpaceList = new ArrayList(); + private List freeSpaceList = new ArrayList<>(); public FreeSpaceList(int firstFreeBlock, int blockSize) { this.firstFreeBlock = firstFreeBlock; @@ -61,7 +61,7 @@ public synchronized long allocate(int length) { return result * blockSize; } } - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Could not find a free page to allocate"); } @@ -85,12 +85,12 @@ public synchronized void markUsed(long pos, int length) { i++; } if (found == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Cannot find spot to mark as used in free list"); } if (start + required > found.start + found.length) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Runs over edge of free space"); } @@ -136,7 +136,7 @@ public synchronized void free(long pos, int length) { i++; } if (found == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Cannot find spot to mark as unused in free list"); } @@ -172,7 +172,7 @@ public synchronized void free(long pos, int length) { private int getBlockCount(int length) { if (length <= 0) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Free space invalid length"); } return MathUtils.roundUpInt(length, blockSize) / blockSize; diff --git a/h2/src/test/org/h2/test/store/FreeSpaceTree.java b/h2/src/test/org/h2/test/store/FreeSpaceTree.java index 800a434f60..07931a9834 100644 --- a/h2/src/test/org/h2/test/store/FreeSpaceTree.java +++ b/h2/src/test/org/h2/test/store/FreeSpaceTree.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -28,7 +28,7 @@ public class FreeSpaceTree { /** * The list of free space. */ - private TreeSet freeSpace = new TreeSet(); + private TreeSet freeSpace = new TreeSet<>(); public FreeSpaceTree(int firstFreeBlock, int blockSize) { this.firstFreeBlock = firstFreeBlock; @@ -85,7 +85,7 @@ public synchronized void markUsed(long pos, int length) { BlockRange x = new BlockRange(start, blocks); BlockRange prev = freeSpace.floor(x); if (prev == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Free space already marked"); } if (prev.start == start) { @@ -121,7 +121,7 @@ public synchronized void free(long pos, int length) { BlockRange x = new BlockRange(start, blocks); BlockRange next = freeSpace.ceiling(x); if (next == null) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Free space sentinel is missing"); } BlockRange prev = freeSpace.lower(x); @@ -156,7 +156,7 @@ private int getBlock(long pos) { private int getBlockCount(int length) { if (length <= 0) { - throw DataUtils.newIllegalStateException( + throw DataUtils.newMVStoreException( DataUtils.ERROR_INTERNAL, "Free space invalid length"); } return MathUtils.roundUpInt(length, blockSize) / blockSize; @@ -189,7 +189,7 @@ public BlockRange(int start, int blocks) { @Override public int compareTo(BlockRange o) { - return start < o.start ? -1 : start > o.start ? 1 : 0; + return Integer.compare(start, o.start); } @Override diff --git a/h2/src/test/org/h2/test/store/RowDataType.java b/h2/src/test/org/h2/test/store/RowDataType.java index 89489db402..ac4611f294 100644 --- a/h2/src/test/org/h2/test/store/RowDataType.java +++ b/h2/src/test/org/h2/test/store/RowDataType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -8,28 +8,31 @@ import java.nio.ByteBuffer; import org.h2.mvstore.DataUtils; import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.BasicDataType; import org.h2.mvstore.type.DataType; /** * A row type. */ -public class RowDataType implements DataType { +public class RowDataType extends BasicDataType { - static final String PREFIX = "org.h2.test.store.row"; - - private final DataType[] types; + private final DataType[] types; + @SuppressWarnings("unchecked") RowDataType(DataType[] types) { this.types = types; } @Override - public int compare(Object a, Object b) { - if (a == b) { + public Object[][] createStorage(int size) { + return new Object[size][]; + } + + @Override + public int compare(Object[] ax, Object[] bx) { + if (ax == bx) { return 0; } - Object[] ax = (Object[]) a; - Object[] bx = (Object[]) b; int al = ax.length; int bl = bx.length; int len = Math.min(al, bl); @@ -48,8 +51,7 @@ public int compare(Object a, Object b) { } @Override - public int getMemory(Object obj) { - Object[] x = (Object[]) obj; + public int getMemory(Object[] x) { int len = x.length; int memory = 0; for (int i = 0; i < len; i++) { @@ -58,20 +60,6 @@ public int getMemory(Object obj) { return memory; } - @Override - public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - obj[i] = read(buff); - } - } - - @Override - public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { - for (int i = 0; i < len; i++) { - write(buff, obj[i]); - } - } - @Override public Object[] read(ByteBuffer buff) { int len = DataUtils.readVarInt(buff); @@ -83,13 +71,11 @@ public Object[] read(ByteBuffer buff) { } @Override - public void write(WriteBuffer buff, Object obj) { - Object[] x = (Object[]) obj; + public void write(WriteBuffer buff, Object[] x) { int len = x.length; buff.putVarInt(len); for (int i = 0; i < len; i++) { types[i].write(buff, x[i]); } } - } diff --git a/h2/src/test/org/h2/test/store/SequenceMap.java b/h2/src/test/org/h2/test/store/SequenceMap.java index f5f5293548..aa94a5f99c 100644 --- a/h2/src/test/org/h2/test/store/SequenceMap.java +++ b/h2/src/test/org/h2/test/store/SequenceMap.java @@ -1,19 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; import java.util.AbstractSet; -import java.util.HashMap; import java.util.Iterator; +import java.util.Map; import java.util.Set; import org.h2.mvstore.MVMap; -import org.h2.mvstore.MVStore; +import org.h2.mvstore.type.DataType; /** - * A custom map returning the keys and values values 1 .. 10. + * A custom map returning the keys and values 1 .. 10. */ public class SequenceMap extends MVMap { @@ -27,13 +27,8 @@ public class SequenceMap extends MVMap { */ int max = 10; - public SequenceMap() { - super(null, null); - } - - @Override - public void init(MVStore store, HashMap config) { - super.init(store, config); + public SequenceMap(Map config, DataType keyType, DataType valueType) { + super(config, keyType, valueType); } @Override @@ -56,11 +51,6 @@ public Long next() { return Long.valueOf(x++); } - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; } @@ -74,20 +64,11 @@ public int size() { /** * A builder for this class. */ - public static class Builder implements MapBuilder { - - /** - * Create a new builder. - */ - public Builder() { - // ignore - } - + public static class Builder extends MVMap.Builder { @Override - public SequenceMap create() { - return new SequenceMap(); + public SequenceMap create(Map config) { + return new SequenceMap(config, getKeyType(), getValueType()); } } - } diff --git a/h2/src/test/org/h2/test/store/TestBenchmark.java b/h2/src/test/org/h2/test/store/TestBenchmark.java index 9f52bbeee7..1f720479d5 100644 --- a/h2/src/test/org/h2/test/store/TestBenchmark.java +++ b/h2/src/test/org/h2/test/store/TestBenchmark.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -9,15 +9,22 @@ import java.sql.PreparedStatement; import java.sql.Statement; import java.util.Random; - +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStore; import org.h2.store.FileLister; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.Task; /** * Tests performance and helps analyze bottlenecks. */ -public class TestBenchmark extends TestBase { +public class TestBenchmark extends TestDb { /** * Run just this test. @@ -25,13 +32,13 @@ public class TestBenchmark extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { + testConcurrency(); - ; // TODO this test is currently disabled test(true); @@ -42,6 +49,77 @@ public void test() throws Exception { test(false); } + private void testConcurrency() throws Exception { + // String fileName = getBaseDir() + "/" + getTestName(); + String fileName = "nioMemFS:/" + getTestName(); + FileUtils.delete(fileName); + MVStore store = new MVStore.Builder().cacheSize(16). + fileName(fileName).open(); + MVMap map = store.openMap("test"); + byte[] data = new byte[1024]; + int count = 1000000; + for (int i = 0; i < count; i++) { + map.put(i, data); + } + store.close(); + for (int concurrency = 1024; concurrency > 0; concurrency /= 2) { + testConcurrency(fileName, concurrency, count); + testConcurrency(fileName, concurrency, count); + testConcurrency(fileName, concurrency, count); + } + FileUtils.delete(fileName); + } + + private void testConcurrency(String fileName, + int concurrency, final int count) throws Exception { + Thread.sleep(1000); + final MVStore store = new MVStore.Builder().cacheSize(256). + cacheConcurrency(concurrency). + fileName(fileName).open(); + int threadCount = 128; + final CountDownLatch wait = new CountDownLatch(1); + final AtomicInteger counter = new AtomicInteger(); + final AtomicBoolean stopped = new AtomicBoolean(); + Task[] tasks = new Task[threadCount]; + // Profiler prof = new Profiler().startCollecting(); + for (int i = 0; i < threadCount; i++) { + final int x = i; + Task t = new Task() { + @Override + public void call() throws Exception { + MVMap map = store.openMap("test"); + Random random = new Random(x); + wait.await(); + while (!stopped.get()) { + int key = random.nextInt(count); + byte[] data = map.get(key); + if (data.length > 1) { + counter.incrementAndGet(); + } + } + } + }; + t.execute("t" + i); + tasks[i] = t; + } + wait.countDown(); + try { + Thread.sleep(3000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + stopped.set(true); + for (Task t : tasks) { + t.get(); + } + // System.out.println(prof.getTop(5)); + String msg = "concurrency " + concurrency + + " threads " + threadCount + " requests: " + counter; + System.out.println(msg); + trace(msg); + store.close(); + } + private void test(boolean mvStore) throws Exception { // testInsertSelect(mvStore); // testBinary(mvStore); @@ -54,7 +132,8 @@ private void testCreateIndex(boolean mvStore) throws Exception { Statement stat; String url = "mvstore"; if (mvStore) { - url += ";MV_STORE=TRUE"; // ;COMPRESS=TRUE"; + // ;COMPRESS=TRUE"; + url += ";MV_STORE=TRUE"; } url = getURL(url, true); @@ -80,12 +159,12 @@ private void testCreateIndex(boolean mvStore) throws Exception { } } - long start = System.currentTimeMillis(); + long start = System.nanoTime(); // Profiler prof = new Profiler().startCollecting(); stat.execute("create index on test(data)"); // System.out.println(prof.getTop(5)); - System.out.println((System.currentTimeMillis() - start) + " " + System.out.println(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start) + " " + (mvStore ? "mvstore" : "default")); conn.createStatement().execute("shutdown compact"); conn.close(); @@ -115,7 +194,7 @@ private void testBinary(boolean mvStore) throws Exception { int rowCount = 100; int readCount = 20 * rowCount; - long start = System.currentTimeMillis(); + long start = System.nanoTime(); for (int i = 0; i < rowCount; i++) { prep.setInt(1, i); @@ -133,12 +212,12 @@ private void testBinary(boolean mvStore) throws Exception { prep.executeQuery(); } - System.out.println((System.currentTimeMillis() - start) + " " + System.out.println(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start) + " " + (mvStore ? "mvstore" : "default")); conn.close(); } - private void randomize(byte[] data, int i) { + private static void randomize(byte[] data, int i) { Random r = new Random(i); r.nextBytes(data); } @@ -173,7 +252,7 @@ private void testInsertSelect(boolean mvStore) throws Exception { conn.commit(); } } - long start = System.currentTimeMillis(); + long start = System.nanoTime(); prep = conn.prepareStatement("select * from test where id = ?"); for (int i = 0; i < readCount; i++) { @@ -181,7 +260,7 @@ private void testInsertSelect(boolean mvStore) throws Exception { prep.executeQuery(); } - System.out.println((System.currentTimeMillis() - start) + " " + System.out.println(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start) + " " + (mvStore ? "mvstore" : "default")); conn.createStatement().execute("shutdown compact"); conn.close(); diff --git a/h2/src/test/org/h2/test/store/TestCacheConcurrentLIRS.java b/h2/src/test/org/h2/test/store/TestCacheConcurrentLIRS.java index 3a9b0bc711..4c4f4093c1 100644 --- a/h2/src/test/org/h2/test/store/TestCacheConcurrentLIRS.java +++ b/h2/src/test/org/h2/test/store/TestCacheConcurrentLIRS.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -23,7 +23,7 @@ public class TestCacheConcurrentLIRS extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -32,7 +32,9 @@ public void test() throws Exception { } private void testConcurrent() { - final CacheLongKeyLIRS test = new CacheLongKeyLIRS(100); + CacheLongKeyLIRS.Config cc = new CacheLongKeyLIRS.Config(); + cc.maxMemory = 100; + final CacheLongKeyLIRS test = new CacheLongKeyLIRS<>(cc); int threadCount = 8; final CountDownLatch wait = new CountDownLatch(1); final AtomicBoolean stopped = new AtomicBoolean(); diff --git a/h2/src/test/org/h2/test/store/TestCacheLIRS.java b/h2/src/test/org/h2/test/store/TestCacheLIRS.java index be5aa7292e..95b9c167e0 100644 --- a/h2/src/test/org/h2/test/store/TestCacheLIRS.java +++ b/h2/src/test/org/h2/test/store/TestCacheLIRS.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -12,7 +12,6 @@ import java.util.Random; import org.h2.dev.cache.CacheLIRS; import org.h2.test.TestBase; -import org.h2.util.New; /** * Tests the cache algorithm. @@ -25,7 +24,7 @@ public class TestCacheLIRS extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -79,24 +78,9 @@ private void testEdgeCases() { CacheLIRS test = createCache(1); test.put(1, 10, 100); assertEquals(0, test.size()); - try { - test.put(null, 10, 100); - fail(); - } catch (NullPointerException e) { - // expected - } - try { - test.put(1, null, 100); - fail(); - } catch (NullPointerException e) { - // expected - } - try { - test.setMaxMemory(0); - fail(); - } catch (IllegalArgumentException e) { - // expected - } + assertThrows(NullPointerException.class, () -> test.put(null, 10, 100)); + assertThrows(NullPointerException.class, () -> test.put(1, null, 100)); + assertThrows(IllegalArgumentException.class, () -> test.setMaxMemory(0)); } private void testSize() { @@ -276,7 +260,7 @@ private void testPruneStack() { verify(test, "mem: 4 stack: 2 3 4 6 cold: non-resident: 5 0"); test.put(0, 0); test.put(1, 10); - // the the stack was not pruned, the following will fail + // the stack was not pruned, the following will fail verify(test, "mem: 5 stack: 1 0 2 3 4 cold: 1 non-resident: 6 5"); } @@ -450,8 +434,8 @@ private void testScanResistance() { Integer x = test.get(i); Integer y = test.peek(i); if (i < size / 2) { - assertTrue("i: " + i, x != null); - assertTrue("i: " + i, y != null); + assertNotNull("i: " + i, x); + assertNotNull("i: " + i, y); assertEquals(i * 10, x.intValue()); assertEquals(i * 10, y.intValue()); } else { @@ -470,7 +454,7 @@ private void testScanResistance() { for (int i = 0; i < size; i++) { Integer x = test.get(i); if (i < size / 2 || i == size - 1) { - assertTrue("i: " + i, x != null); + assertNotNull("i: " + i, x); assertEquals(i * 10, x.intValue()); } else { assertNull(x); @@ -485,7 +469,7 @@ private void testRandomOperations() { Random r = new Random(1); for (int j = 0; j < 100; j++) { CacheLIRS test = createCache(size / 2); - HashMap good = New.hashMap(); + HashMap good = new HashMap<>(); for (int i = 0; i < 10000; i++) { int key = r.nextInt(size); int value = r.nextInt(); @@ -557,7 +541,7 @@ private void verify(CacheLIRS cache, String expected) { List cold = cache.keys(true, false); List nonResident = cache.keys(true, true); assertEquals(nonResident.size(), cache.sizeNonResident()); - HashSet hot = new HashSet(stack); + HashSet hot = new HashSet<>(stack); hot.removeAll(cold); hot.removeAll(nonResident); assertEquals(hot.size(), cache.sizeHot()); @@ -569,7 +553,7 @@ private void verify(CacheLIRS cache, String expected) { } private static CacheLIRS createCache(int maxSize) { - return new CacheLIRS(maxSize, 1, 0); + return new CacheLIRS<>(maxSize, 1, 0); } } diff --git a/h2/src/test/org/h2/test/store/TestCacheLongKeyLIRS.java b/h2/src/test/org/h2/test/store/TestCacheLongKeyLIRS.java index cfdfa64ed9..487f0d6c47 100644 --- a/h2/src/test/org/h2/test/store/TestCacheLongKeyLIRS.java +++ b/h2/src/test/org/h2/test/store/TestCacheLongKeyLIRS.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -10,10 +10,8 @@ import java.util.List; import java.util.Map.Entry; import java.util.Random; - import org.h2.mvstore.cache.CacheLongKeyLIRS; import org.h2.test.TestBase; -import org.h2.util.New; /** * Tests the cache algorithm. @@ -26,7 +24,7 @@ public class TestCacheLongKeyLIRS extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -48,28 +46,37 @@ private void testCache() { testRandomOperations(); } - private static void testRandomSmallCache() { + private void testRandomSmallCache() { Random r = new Random(1); for (int i = 0; i < 10000; i++) { int j = 0; StringBuilder buff = new StringBuilder(); - CacheLongKeyLIRS test = createCache(1 + r.nextInt(10)); + int maxSize = 1 + r.nextInt(10); + buff.append("size:").append(maxSize).append('\n'); + CacheLongKeyLIRS test = createCache(maxSize); for (; j < 30; j++) { - int key = r.nextInt(5); - switch (r.nextInt(3)) { - case 0: - int memory = r.nextInt(5) + 1; - buff.append("add ").append(key).append(' '). - append(memory).append('\n'); - test.put(key, j, memory); - break; - case 1: - buff.append("remove ").append(key).append('\n'); - test.remove(key); - break; - case 2: - buff.append("get ").append(key).append('\n'); - test.get(key); + String lastState = toString(test); + try { + int key = r.nextInt(5); + switch (r.nextInt(3)) { + case 0: + int memory = r.nextInt(5) + 1; + buff.append("add ").append(key).append(' '). + append(memory).append('\n'); + test.put(key, j, memory); + break; + case 1: + buff.append("remove ").append(key).append('\n'); + test.remove(key); + break; + case 2: + buff.append("get ").append(key).append('\n'); + test.get(key); + } + verify(test, null); + } catch (Throwable ex) { + println(i + "\n" + buff + "\n" + lastState + "\n" + toString(test)); + throw ex; } } } @@ -79,18 +86,8 @@ private void testEdgeCases() { CacheLongKeyLIRS test = createCache(1); test.put(1, 10, 100); assertEquals(0, test.size()); - try { - test.put(1, null, 100); - fail(); - } catch (IllegalArgumentException e) { - // expected - } - try { - test.setMaxMemory(0); - fail(); - } catch (IllegalArgumentException e) { - // expected - } + assertThrows(IllegalArgumentException.class, () -> test.put(1, null, 100)); + assertThrows(IllegalArgumentException.class, () -> test.setMaxMemory(0)); } private void testSize() { @@ -110,11 +107,12 @@ private void testSize() { test.put(j, j); } // for a cache of size 1000, - // there are 62 cold entries (about 6.25%). - assertEquals(62, test.size() - test.sizeHot()); + // there are 32 cold entries (about 1/32). + assertEquals(32, test.size() - test.sizeHot()); // at most as many non-resident elements // as there are entries in the stack - assertEquals(968, test.sizeNonResident()); + assertEquals(1000, test.size()); + assertEquals(1000, test.sizeNonResident()); } private void verifyMapSize(int elements, int expectedMapSize) { @@ -165,22 +163,22 @@ private void testGetPutPeekRemove() { assertEquals(1, test.getMemory(5)); assertEquals(0, test.getMemory(4)); assertEquals(0, test.getMemory(100)); - assertNull(test.peek(4)); - assertNull(test.get(4)); + assertNotNull(test.peek(4)); + assertNotNull(test.get(4)); assertEquals(10, test.get(1).intValue()); assertEquals(20, test.get(2).intValue()); assertEquals(30, test.get(3).intValue()); - verify(test, "mem: 4 stack: 3 2 1 cold: 5 non-resident: 4"); + verify(test, "mem: 5 stack: 3 2 1 cold: 4 5 non-resident:"); assertEquals(50, test.get(5).intValue()); - verify(test, "mem: 4 stack: 5 3 2 1 cold: 5 non-resident: 4"); + verify(test, "mem: 5 stack: 5 3 2 1 cold: 5 4 non-resident:"); assertEquals(50, test.get(5).intValue()); - verify(test, "mem: 4 stack: 5 3 2 cold: 1 non-resident: 4"); + verify(test, "mem: 5 stack: 5 3 2 cold: 1 4 non-resident:"); // remove assertEquals(50, test.remove(5).intValue()); assertNull(test.remove(5)); - verify(test, "mem: 3 stack: 3 2 1 cold: non-resident: 4"); - assertNull(test.remove(4)); + verify(test, "mem: 4 stack: 3 2 1 cold: 4 non-resident:"); + assertNotNull(test.remove(4)); verify(test, "mem: 3 stack: 3 2 1 cold: non-resident:"); assertNull(test.remove(4)); verify(test, "mem: 3 stack: 3 2 1 cold: non-resident:"); @@ -196,7 +194,7 @@ private void testGetPutPeekRemove() { verify(test, "mem: 3 stack: 4 3 2 cold: non-resident: 1"); assertEquals(20, test.remove(2).intValue()); assertFalse(test.containsKey(1)); - assertNull(test.remove(1)); + assertEquals(10, test.remove(1).intValue()); assertFalse(test.containsKey(1)); verify(test, "mem: 2 stack: 4 3 cold: non-resident:"); test.put(1, 10); @@ -224,9 +222,10 @@ private void testGetPutPeekRemove() { test.put(5, 50); assertTrue(test.containsValue(50)); verify(test, "mem: 4 stack: 5 4 3 2 cold: 5 non-resident: 1"); + // 1 was non-resident, so this should make it hot test.put(1, 10); - verify(test, "mem: 4 stack: 1 5 4 3 2 cold: 1 non-resident: 5"); - assertFalse(test.containsValue(50)); + verify(test, "mem: 4 stack: 1 5 4 3 cold: 2 non-resident: 5"); + assertTrue(test.containsValue(50)); test.remove(2); test.remove(3); test.remove(4); @@ -270,7 +269,7 @@ private void testPruneStack() { verify(test, "mem: 4 stack: 2 3 4 6 cold: non-resident: 5 0"); test.put(0, 0); test.put(1, 10); - // the the stack was not pruned, the following will fail + // the stack was not pruned, the following will fail verify(test, "mem: 5 stack: 1 0 2 3 4 cold: 1 non-resident: 6 5"); } @@ -323,8 +322,8 @@ private void testLimitHot() { test.put(i, 10 * i); } assertEquals(100, test.size()); - assertEquals(99, test.sizeNonResident()); - assertEquals(93, test.sizeHot()); + assertEquals(200, test.sizeNonResident()); + assertEquals(96, test.sizeHot()); } private void testLimitNonResident() { @@ -332,8 +331,8 @@ private void testLimitNonResident() { for (int i = 0; i < 20; i++) { test.put(i, 10 * i); } - verify(test, "mem: 4 stack: 19 18 17 16 3 2 1 " + - "cold: 19 non-resident: 18 17 16"); + verify(test, "mem: 4 stack: 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 " + + "cold: 19 non-resident: 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 0"); } private void testLimitMemory() { @@ -344,10 +343,10 @@ private void testLimitMemory() { verify(test, "mem: 4 stack: 4 3 2 1 cold: 4 non-resident: 0"); assertTrue("" + test.getUsedMemory(), test.getUsedMemory() <= 4); test.put(6, 60, 3); - verify(test, "mem: 4 stack: 6 3 cold: 6 non-resident:"); + verify(test, "mem: 4 stack: 6 4 3 cold: 6 non-resident: 2 1 4 0"); assertTrue("" + test.getUsedMemory(), test.getUsedMemory() <= 4); test.put(7, 70, 3); - verify(test, "mem: 4 stack: 7 6 3 cold: 7 non-resident: 6"); + verify(test, "mem: 4 stack: 7 6 4 3 cold: 7 non-resident: 6 2 1 4 0"); assertTrue("" + test.getUsedMemory(), test.getUsedMemory() <= 4); test.put(8, 80, 4); verify(test, "mem: 4 stack: 8 cold: non-resident:"); @@ -357,8 +356,8 @@ private void testLimitMemory() { private void testScanResistance() { boolean log = false; int size = 20; - // cache size 11 (10 hot, 1 cold) - CacheLongKeyLIRS test = createCache(size / 2 + 1); + // cache size 11 (10 hot, 2 cold) + CacheLongKeyLIRS test = createCache(size / 2 + 2); // init the cache with some dummy entries for (int i = 0; i < size; i++) { test.put(-i, -i * 10); @@ -369,7 +368,7 @@ private void testScanResistance() { test.put(i, i * 10); test.get(i); if (log) { - System.out.println("get " + i + " -> " + test); + println("get " + i + " -> " + test); } } verify(test, null); @@ -378,8 +377,8 @@ private void testScanResistance() { Integer x = test.get(i); Integer y = test.peek(i); if (i < size / 2) { - assertTrue("i: " + i, x != null); - assertTrue("i: " + i, y != null); + assertNotNull("i: " + i, x); + assertNotNull("i: " + i, y); assertEquals(i * 10, x.intValue()); assertEquals(i * 10, y.intValue()); } else { @@ -394,14 +393,13 @@ private void testScanResistance() { } verify(test, null); } - // ensure 0..9 are hot, 10..18 are not resident, 19 is cold + + // ensure 0..9 are hot, 10..17 are not resident, 18..19 are cold for (int i = 0; i < size; i++) { Integer x = test.get(i); - if (i < size / 2 || i == size - 1) { - assertTrue("i: " + i, x != null); + if (i < size / 2 || i == size - 1 || i == size - 2) { + assertNotNull("i: " + i, x); assertEquals(i * 10, x.intValue()); - } else { - assertNull(x); } verify(test, null); } @@ -413,7 +411,7 @@ private void testRandomOperations() { Random r = new Random(1); for (int j = 0; j < 100; j++) { CacheLongKeyLIRS test = createCache(size / 2); - HashMap good = New.hashMap(); + HashMap good = new HashMap<>(); for (int i = 0; i < 10000; i++) { int key = r.nextInt(size); int value = r.nextInt(); @@ -453,7 +451,7 @@ private void testRandomOperations() { } } - private static String toString(CacheLongKeyLIRS cache) { + private static String toString(CacheLongKeyLIRS cache) { StringBuilder buff = new StringBuilder(); buff.append("mem: " + cache.getUsedMemory()); buff.append(" stack:"); @@ -471,7 +469,7 @@ private static String toString(CacheLongKeyLIRS cache) { return buff.toString(); } - private void verify(CacheLongKeyLIRS cache, String expected) { + private void verify(CacheLongKeyLIRS cache, String expected) { if (expected != null) { String got = toString(cache); assertEquals(expected, got); @@ -485,7 +483,7 @@ private void verify(CacheLongKeyLIRS cache, String expected) { List cold = cache.keys(true, false); List nonResident = cache.keys(true, true); assertEquals(nonResident.size(), cache.sizeNonResident()); - HashSet hot = new HashSet(stack); + HashSet hot = new HashSet<>(stack); hot.removeAll(cold); hot.removeAll(nonResident); assertEquals(hot.size(), cache.sizeHot()); @@ -497,7 +495,11 @@ private void verify(CacheLongKeyLIRS cache, String expected) { } private static CacheLongKeyLIRS createCache(int maxSize) { - return new CacheLongKeyLIRS(maxSize, 1, 0); + CacheLongKeyLIRS.Config cc = new CacheLongKeyLIRS.Config(); + cc.maxMemory = maxSize; + cc.segmentCount = 1; + cc.stackMoveDistance = 0; + return new CacheLongKeyLIRS<>(cc); } } diff --git a/h2/src/test/org/h2/test/store/TestConcurrent.java b/h2/src/test/org/h2/test/store/TestConcurrent.java deleted file mode 100644 index b9a431b9df..0000000000 --- a/h2/src/test/org/h2/test/store/TestConcurrent.java +++ /dev/null @@ -1,776 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.store; - -import java.io.BufferedInputStream; -import java.io.ByteArrayOutputStream; -import java.io.FileOutputStream; -import java.io.InputStream; -import java.nio.channels.FileChannel; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Comparator; -import java.util.ConcurrentModificationException; -import java.util.Iterator; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.atomic.AtomicInteger; - -import org.h2.mvstore.DataUtils; -import org.h2.mvstore.MVMap; -import org.h2.mvstore.MVStore; -import org.h2.mvstore.WriteBuffer; -import org.h2.mvstore.type.ObjectDataType; -import org.h2.store.fs.FileChannelInputStream; -import org.h2.store.fs.FileUtils; -import org.h2.test.TestBase; -import org.h2.util.New; -import org.h2.util.Task; - -/** - * Tests concurrently accessing a tree map store. - */ -public class TestConcurrent extends TestMVStore { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - FileUtils.createDirectories(getBaseDir()); - FileUtils.deleteRecursive("memFS:", false); - - testInterruptReopen(); - testConcurrentSaveCompact(); - testConcurrentDataType(); - testConcurrentAutoCommitAndChange(); - testConcurrentReplaceAndRead(); - testConcurrentChangeAndCompact(); - testConcurrentChangeAndGetVersion(); - testConcurrentFree(); - testConcurrentStoreAndRemoveMap(); - testConcurrentStoreAndClose(); - testConcurrentOnlineBackup(); - testConcurrentMap(); - testConcurrentIterate(); - testConcurrentWrite(); - testConcurrentRead(); - } - - private void testInterruptReopen() throws Exception { - String fileName = "retry:nio:" + getBaseDir() + "/testInterruptReopen.h3"; - FileUtils.delete(fileName); - final MVStore s = new MVStore.Builder(). - fileName(fileName). - cacheSize(0). - open(); - final Thread mainThread = Thread.currentThread(); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - mainThread.interrupt(); - Thread.sleep(10); - } - } - }; - try { - MVMap map = s.openMap("data"); - task.execute(); - for (int i = 0; i < 1000 && !task.isFinished(); i++) { - map.get(i % 1000); - map.put(i % 1000, new byte[1024]); - s.commit(); - } - } finally { - task.get(); - s.close(); - } - } - - private void testConcurrentSaveCompact() throws Exception { - String fileName = "memFS:testConcurrentSaveCompact"; - FileUtils.delete(fileName); - final MVStore s = new MVStore.Builder(). - fileName(fileName). - cacheSize(0). - open(); - try { - s.setRetentionTime(0); - final MVMap dataMap = s.openMap("data"); - Task task = new Task() { - @Override - public void call() throws Exception { - int i = 0; - while (!stop) { - s.compact(100, 1024 * 1024); - dataMap.put(i % 1000, i * 10); - s.commit(); - i++; - } - } - }; - task.execute(); - for (int i = 0; i < 1000 && !task.isFinished(); i++) { - s.compact(100, 1024 * 1024); - dataMap.put(i % 1000, i * 10); - s.commit(); - } - task.get(); - } finally { - s.close(); - } - } - - private void testConcurrentDataType() throws InterruptedException { - final ObjectDataType type = new ObjectDataType(); - final Object[] data = new Object[]{ - null, - -1, - 1, - 10, - "Hello", - new Object[]{ new byte[]{(byte) -1, (byte) 1}, null}, - new Object[]{ new byte[]{(byte) 1, (byte) -1}, 10}, - new Object[]{ new byte[]{(byte) -1, (byte) 1}, 20L}, - new Object[]{ new byte[]{(byte) 1, (byte) -1}, 5}, - }; - Arrays.sort(data, new Comparator() { - @Override - public int compare(Object o1, Object o2) { - return type.compare(o1, o2); - } - }); - Task[] tasks = new Task[2]; - for (int i = 0; i < tasks.length; i++) { - tasks[i] = new Task() { - @Override - public void call() throws Exception { - Random r = new Random(); - WriteBuffer buff = new WriteBuffer(); - while (!stop) { - int a = r.nextInt(data.length); - int b = r.nextInt(data.length); - int comp; - if (r.nextBoolean()) { - comp = type.compare(a, b); - } else { - comp = -type.compare(b, a); - } - buff.clear(); - type.write(buff, a); - buff.clear(); - type.write(buff, b); - if (a == b) { - assertEquals(0, comp); - } else { - assertEquals(a > b ? 1 : -1, comp); - } - } - } - }; - tasks[i].execute(); - } - Thread.sleep(100); - for (Task t : tasks) { - t.get(); - } - } - - private void testConcurrentAutoCommitAndChange() throws InterruptedException { - String fileName = "memFS:testConcurrentChangeAndBackgroundCompact"; - FileUtils.delete(fileName); - final MVStore s = new MVStore.Builder(). - fileName(fileName).pageSplitSize(1000). - open(); - try { - s.setRetentionTime(1000); - s.setAutoCommitDelay(1); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - s.compact(100, 1024 * 1024); - } - } - }; - final MVMap dataMap = s.openMap("data"); - final MVMap dataSmallMap = s.openMap("dataSmall"); - s.openMap("emptyMap"); - final AtomicInteger counter = new AtomicInteger(); - Task task2 = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - int i = counter.getAndIncrement(); - dataMap.put(i, i * 10); - dataSmallMap.put(i % 100, i * 10); - if (i % 100 == 0) { - dataSmallMap.clear(); - } - } - } - }; - task.execute(); - task2.execute(); - Thread.sleep(1); - for (int i = 0; !task.isFinished() && !task2.isFinished() && i < 1000; i++) { - MVMap map = s.openMap("d" + (i % 3)); - map.put(0, i); - s.commit(); - } - task.get(); - task2.get(); - for (int i = 0; i < counter.get(); i++) { - assertEquals(10 * i, dataMap.get(i).intValue()); - } - } finally { - s.close(); - } - } - - private void testConcurrentReplaceAndRead() throws InterruptedException { - final MVStore s = new MVStore.Builder().open(); - final MVMap map = s.openMap("data"); - for (int i = 0; i < 100; i++) { - map.put(i, i % 100); - } - Task task = new Task() { - @Override - public void call() throws Exception { - int i = 0; - while (!stop) { - map.put(i % 100, i % 100); - i++; - if (i % 1000 == 0) { - s.commit(); - } - } - } - }; - task.execute(); - Thread.sleep(1); - for (int i = 0; !task.isFinished() && i < 1000000; i++) { - assertEquals(i % 100, map.get(i % 100).intValue()); - } - task.get(); - s.close(); - } - - private void testConcurrentChangeAndCompact() throws InterruptedException { - String fileName = "memFS:testConcurrentChangeAndBackgroundCompact"; - FileUtils.delete(fileName); - final MVStore s = new MVStore.Builder().fileName( - fileName). - pageSplitSize(10). - autoCommitDisabled().open(); - s.setRetentionTime(10000); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - s.compact(100, 1024 * 1024); - } - } - }; - task.execute(); - Task task2 = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - s.compact(100, 1024 * 1024); - } - } - }; - task2.execute(); - Thread.sleep(1); - for (int i = 0; !task.isFinished() && !task2.isFinished() && i < 1000; i++) { - MVMap map = s.openMap("d" + (i % 3)); - // MVMap map = s.openMap("d" + (i % 3), - // new MVMapConcurrent.Builder()); - map.put(0, i); - map.get(0); - s.commit(); - } - task.get(); - task2.get(); - s.close(); - } - - private void testConcurrentChangeAndGetVersion() throws InterruptedException { - for (int test = 0; test < 10; test++) { - final MVStore s = new MVStore.Builder(). - autoCommitDisabled().open(); - s.setVersionsToKeep(10); - final MVMap m = s.openMap("data"); - m.put(1, 1); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - m.put(1, 1); - s.commit(); - } - } - }; - task.execute(); - Thread.sleep(1); - for (int i = 0; i < 10000; i++) { - if (task.isFinished()) { - break; - } - for (int j = 0; j < 20; j++) { - m.put(1, 1); - s.commit(); - } - s.setVersionsToKeep(15); - long version = s.getCurrentVersion() - 1; - try { - m.openVersion(version); - } catch (IllegalArgumentException e) { - // ignore - } - s.setVersionsToKeep(20); - } - task.get(); - s.commit(); - s.close(); - } - FileUtils.deleteRecursive("memFS:", false); - } - - private void testConcurrentFree() throws InterruptedException { - String fileName = "memFS:testConcurrentFree.h3"; - for (int test = 0; test < 10; test++) { - FileUtils.delete(fileName); - final MVStore s1 = new MVStore.Builder(). - fileName(fileName).autoCommitDisabled().open(); - s1.setRetentionTime(0); - final int count = 200; - for (int i = 0; i < count; i++) { - MVMap m = s1.openMap("d" + i); - m.put(1, 1); - if (i % 2 == 0) { - s1.commit(); - } - } - s1.close(); - final MVStore s = new MVStore.Builder(). - fileName(fileName).autoCommitDisabled().open(); - s.setRetentionTime(0); - final ArrayList> list = New.arrayList(); - for (int i = 0; i < count; i++) { - MVMap m = s.openMap("d" + i); - list.add(m); - } - - final AtomicInteger counter = new AtomicInteger(); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - int x = counter.getAndIncrement(); - if (x >= count) { - break; - } - MVMap m = list.get(x); - m.clear(); - s.removeMap(m); - } - } - }; - task.execute(); - Thread.sleep(1); - while (true) { - int x = counter.getAndIncrement(); - if (x >= count) { - break; - } - MVMap m = list.get(x); - m.clear(); - s.removeMap(m); - if (x % 5 == 0) { - s.commit(); - } - } - task.get(); - // this will mark old chunks as unused, - // but not remove (and overwrite) them yet - s.commit(); - // this will remove them, so we end up with - // one unused one, and one active one - MVMap m = s.openMap("dummy"); - m.put(1, 1); - s.commit(); - m.put(2, 2); - s.commit(); - - MVMap meta = s.getMetaMap(); - int chunkCount = 0; - for (String k : meta.keyList()) { - if (k.startsWith("chunk.")) { - chunkCount++; - } - } - assertTrue("" + chunkCount, chunkCount < 3); - s.close(); - } - FileUtils.deleteRecursive("memFS:", false); - } - - private void testConcurrentStoreAndRemoveMap() throws InterruptedException { - String fileName = "memFS:testConcurrentStoreAndRemoveMap.h3"; - final MVStore s = openStore(fileName); - int count = 200; - for (int i = 0; i < count; i++) { - MVMap m = s.openMap("d" + i); - m.put(1, 1); - } - final AtomicInteger counter = new AtomicInteger(); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - counter.incrementAndGet(); - s.commit(); - } - } - }; - task.execute(); - Thread.sleep(1); - for (int i = 0; i < count || counter.get() < count; i++) { - MVMap m = s.openMap("d" + i); - m.put(1, 10); - s.removeMap(m); - if (task.isFinished()) { - break; - } - } - task.get(); - s.close(); - FileUtils.deleteRecursive("memFS:", false); - } - - private void testConcurrentStoreAndClose() throws InterruptedException { - String fileName = "memFS:testConcurrentStoreAndClose"; - for (int i = 0; i < 10; i++) { - FileUtils.delete(fileName); - final MVStore s = openStore(fileName); - final AtomicInteger counter = new AtomicInteger(); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - s.setStoreVersion(counter.incrementAndGet()); - s.commit(); - } - } - }; - task.execute(); - while (counter.get() < 5) { - Thread.sleep(1); - } - try { - s.close(); - // sometimes closing works, in which case - // storing must fail at some point (not necessarily - // immediately) - for (int x = counter.get(), y = x; x <= y + 2; x++) { - Thread.sleep(1); - } - Exception e = task.getException(); - assertEquals(DataUtils.ERROR_CLOSED, - DataUtils.getErrorCode(e.getMessage())); - } catch (IllegalStateException e) { - // sometimes storing works, in which case - // closing must fail - assertEquals(DataUtils.ERROR_WRITING_FAILED, - DataUtils.getErrorCode(e.getMessage())); - task.get(); - } - s.close(); - } - FileUtils.deleteRecursive("memFS:", false); - } - - /** - * Test the concurrent map implementation. - */ - private void testConcurrentMap() throws InterruptedException { - final MVStore s = openStore(null); - final MVMap m = s.openMap("data"); - final int size = 20; - final Random rand = new Random(1); - Task task = new Task() { - @Override - public void call() throws Exception { - try { - while (!stop) { - if (rand.nextBoolean()) { - m.put(rand.nextInt(size), 1); - } else { - m.remove(rand.nextInt(size)); - } - m.get(rand.nextInt(size)); - m.firstKey(); - m.lastKey(); - m.ceilingKey(5); - m.floorKey(5); - m.higherKey(5); - m.lowerKey(5); - for (Iterator it = m.keyIterator(null); - it.hasNext();) { - it.next(); - } - } - } catch (Exception e) { - e.printStackTrace(); - } - } - }; - task.execute(); - Thread.sleep(1); - for (int j = 0; j < 100; j++) { - for (int i = 0; i < 100; i++) { - if (rand.nextBoolean()) { - m.put(rand.nextInt(size), 2); - } else { - m.remove(rand.nextInt(size)); - } - m.get(rand.nextInt(size)); - } - s.commit(); - Thread.sleep(1); - } - task.get(); - s.close(); - } - - private void testConcurrentOnlineBackup() throws Exception { - String fileName = getBaseDir() + "/onlineBackup.h3"; - String fileNameRestore = getBaseDir() + "/onlineRestore.h3"; - final MVStore s = openStore(fileName); - final MVMap map = s.openMap("test"); - final Random r = new Random(); - Task t = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - for (int i = 0; i < 10; i++) { - map.put(i, new byte[100 * r.nextInt(100)]); - } - s.commit(); - map.clear(); - s.commit(); - long len = s.getFileStore().size(); - if (len > 1024 * 1024) { - // slow down writing a lot - Thread.sleep(200); - } else if (len > 20 * 1024) { - // slow down writing - Thread.sleep(20); - } - } - } - }; - t.execute(); - for (int i = 0; i < 10; i++) { - // System.out.println("test " + i); - s.setReuseSpace(false); - byte[] buff = readFileSlowly(s.getFileStore().getFile(), - s.getFileStore().size()); - s.setReuseSpace(true); - FileOutputStream out = new FileOutputStream(fileNameRestore); - out.write(buff); - out.close(); - MVStore s2 = openStore(fileNameRestore); - MVMap test = s2.openMap("test"); - for (Integer k : test.keySet()) { - test.get(k); - } - s2.close(); - // let it compact - Thread.sleep(10); - } - t.get(); - s.close(); - } - - private static byte[] readFileSlowly(FileChannel file, long length) - throws Exception { - file.position(0); - InputStream in = new BufferedInputStream(new FileChannelInputStream( - file, false)); - ByteArrayOutputStream buff = new ByteArrayOutputStream(); - for (int j = 0; j < length; j++) { - int x = in.read(); - if (x < 0) { - break; - } - buff.write(x); - } - in.close(); - return buff.toByteArray(); - } - - private void testConcurrentIterate() { - MVStore s = new MVStore.Builder().pageSplitSize(3).open(); - s.setVersionsToKeep(100); - final MVMap map = s.openMap("test"); - final int len = 10; - final Random r = new Random(); - Task t = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - int x = r.nextInt(len); - if (r.nextBoolean()) { - map.remove(x); - } else { - map.put(x, r.nextInt(100)); - } - } - } - }; - t.execute(); - for (int k = 0; k < 10000; k++) { - Iterator it = map.keyIterator(r.nextInt(len)); - long old = s.getCurrentVersion(); - s.commit(); - while (map.getVersion() == old) { - Thread.yield(); - } - while (it.hasNext()) { - it.next(); - } - } - t.get(); - s.close(); - } - - - /** - * Test what happens on concurrent write. Concurrent write may corrupt the - * map, so that keys and values may become null. - */ - private void testConcurrentWrite() throws InterruptedException { - final AtomicInteger detected = new AtomicInteger(); - final AtomicInteger notDetected = new AtomicInteger(); - for (int i = 0; i < 10; i++) { - testConcurrentWrite(detected, notDetected); - } - // in most cases, it should be detected - assertTrue(notDetected.get() * 10 <= detected.get()); - } - - private void testConcurrentWrite(final AtomicInteger detected, - final AtomicInteger notDetected) throws InterruptedException { - final MVStore s = openStore(null); - final MVMap m = s.openMap("data"); - final int size = 20; - final Random rand = new Random(1); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - try { - if (rand.nextBoolean()) { - m.put(rand.nextInt(size), 1); - } else { - m.remove(rand.nextInt(size)); - } - m.get(rand.nextInt(size)); - } catch (ConcurrentModificationException e) { - detected.incrementAndGet(); - } catch (NegativeArraySizeException e) { - notDetected.incrementAndGet(); - } catch (ArrayIndexOutOfBoundsException e) { - notDetected.incrementAndGet(); - } catch (IllegalArgumentException e) { - notDetected.incrementAndGet(); - } catch (NullPointerException e) { - notDetected.incrementAndGet(); - } - } - } - }; - task.execute(); - Thread.sleep(1); - for (int j = 0; j < 10; j++) { - for (int i = 0; i < 10; i++) { - try { - if (rand.nextBoolean()) { - m.put(rand.nextInt(size), 2); - } else { - m.remove(rand.nextInt(size)); - } - m.get(rand.nextInt(size)); - } catch (ConcurrentModificationException e) { - detected.incrementAndGet(); - } catch (NegativeArraySizeException e) { - notDetected.incrementAndGet(); - } catch (ArrayIndexOutOfBoundsException e) { - notDetected.incrementAndGet(); - } catch (IllegalArgumentException e) { - notDetected.incrementAndGet(); - } catch (NullPointerException e) { - notDetected.incrementAndGet(); - } - } - s.commit(); - Thread.sleep(1); - } - task.get(); - s.close(); - } - - private void testConcurrentRead() throws InterruptedException { - final MVStore s = openStore(null); - final MVMap m = s.openMap("data"); - final int size = 3; - int x = (int) s.getCurrentVersion(); - for (int i = 0; i < size; i++) { - m.put(i, x); - } - s.commit(); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - long v = s.getCurrentVersion() - 1; - Map old = m.openVersion(v); - for (int i = 0; i < size; i++) { - Integer x = old.get(i); - if (x == null || (int) v != x) { - Map old2 = m.openVersion(v); - throw new AssertionError(x + "<>" + v + " at " + i + " " + old2); - } - } - } - } - }; - task.execute(); - Thread.sleep(1); - for (int j = 0; j < 100; j++) { - x = (int) s.getCurrentVersion(); - for (int i = 0; i < size; i++) { - m.put(i, x); - } - s.commit(); - Thread.sleep(1); - } - task.get(); - s.close(); - } - -} diff --git a/h2/src/test/org/h2/test/store/TestConcurrentLinkedList.java b/h2/src/test/org/h2/test/store/TestConcurrentLinkedList.java deleted file mode 100644 index 0aa0599b20..0000000000 --- a/h2/src/test/org/h2/test/store/TestConcurrentLinkedList.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.store; - -import java.util.Iterator; -import java.util.LinkedList; -import java.util.Random; -import java.util.concurrent.atomic.AtomicInteger; - -import org.h2.mvstore.ConcurrentArrayList; -import org.h2.test.TestBase; -import org.h2.util.Task; - -/** - * Test the concurrent linked list. - */ -public class TestConcurrentLinkedList extends TestBase { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestConcurrentLinkedList test = (TestConcurrentLinkedList) TestBase.createCaller().init(); - test.test(); - test.testPerformance(); - } - - @Override - public void test() throws Exception { - testRandomized(); - testConcurrent(); - } - - private void testPerformance() { - testPerformance(true); - testPerformance(false); - testPerformance(true); - testPerformance(false); - testPerformance(true); - testPerformance(false); - } - - private void testPerformance(final boolean stock) { - System.out.print(stock ? "stock " : "custom "); - long start = System.currentTimeMillis(); - // final ConcurrentLinkedList test = - // new ConcurrentLinkedList(); - final ConcurrentArrayList test = - new ConcurrentArrayList(); - final LinkedList x = new LinkedList(); - final AtomicInteger counter = new AtomicInteger(); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - if (stock) { - synchronized (x) { - Integer y = x.peekFirst(); - if (y == null) { - counter.incrementAndGet(); - } - } - } else { - Integer y = test.peekFirst(); - if (y == null) { - counter.incrementAndGet(); - } - } - } - } - }; - task.execute(); - test.add(-1); - x.add(-1); - for (int i = 0; i < 2000000; i++) { - Integer value = Integer.valueOf(i & 63); - if (stock) { - synchronized (x) { - Integer f = x.peekLast(); - if (f != value) { - x.add(i); - } - } - Math.sin(i); - synchronized (x) { - if (x.peekFirst() != x.peekLast()) { - x.removeFirst(); - } - } - } else { - Integer f = test.peekLast(); - if (f != value) { - test.add(i); - } - Math.sin(i); - f = test.peekFirst(); - if (f != test.peekLast()) { - test.removeFirst(f); - } - } - } - task.get(); - System.out.println(System.currentTimeMillis() - start); - } - - private void testConcurrent() { - final ConcurrentArrayList test = new ConcurrentArrayList(); - // final ConcurrentRing test = new ConcurrentRing(); - final AtomicInteger counter = new AtomicInteger(); - final AtomicInteger size = new AtomicInteger(); - Task task = new Task() { - @Override - public void call() { - while (!stop) { - if (size.get() < 10) { - test.add(counter.getAndIncrement()); - size.getAndIncrement(); - } - } - } - }; - task.execute(); - for (int i = 0; i < 1000000;) { - Integer x = test.peekFirst(); - if (x == null) { - continue; - } - assertEquals(i, x.intValue()); - if (test.removeFirst(x)) { - size.getAndDecrement(); - i++; - } - } - task.get(); - } - - private void testRandomized() { - Random r = new Random(0); - for (int i = 0; i < 100; i++) { - ConcurrentArrayList test = new ConcurrentArrayList(); - // ConcurrentRing test = new ConcurrentRing(); - LinkedList x = new LinkedList(); - StringBuilder buff = new StringBuilder(); - for (int j = 0; j < 10000; j++) { - buff.append("[" + j + "] "); - int opType = r.nextInt(3); - switch (opType) { - case 0: { - int value = r.nextInt(100); - buff.append("add " + value + "\n"); - test.add(value); - x.add(value); - break; - } - case 1: { - Integer value = x.peek(); - if (value != null && (x.size() > 5 || r.nextBoolean())) { - buff.append("removeFirst\n"); - x.removeFirst(); - test.removeFirst(value); - } else { - buff.append("removeFirst -1\n"); - test.removeFirst(-1); - } - break; - } - case 2: { - Integer value = x.peekLast(); - if (value != null && (x.size() > 5 || r.nextBoolean())) { - buff.append("removeLast\n"); - x.removeLast(); - test.removeLast(value); - } else { - buff.append("removeLast -1\n"); - test.removeLast(-1); - } - break; - } - } - assertEquals(toString(x.iterator()), toString(test.iterator())); - if (x.isEmpty()) { - assertNull(test.peekFirst()); - assertNull(test.peekLast()); - } else { - assertEquals(x.peekFirst().intValue(), test.peekFirst().intValue()); - assertEquals(x.peekLast().intValue(), test.peekLast().intValue()); - } - } - } - } - - private static String toString(Iterator it) { - StringBuilder buff = new StringBuilder(); - while (it.hasNext()) { - buff.append(' ').append(it.next()); - } - return buff.toString(); - } - -} diff --git a/h2/src/test/org/h2/test/store/TestDataUtils.java b/h2/src/test/org/h2/test/store/TestDataUtils.java index 0a28828993..e6b2c4acaf 100644 --- a/h2/src/test/org/h2/test/store/TestDataUtils.java +++ b/h2/src/test/org/h2/test/store/TestDataUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -11,9 +11,9 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Random; - import org.h2.mvstore.Chunk; import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.WriteBuffer; import org.h2.test.TestBase; @@ -28,7 +28,7 @@ public class TestDataUtils extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -54,24 +54,24 @@ private static void testWriteBuffer() { private void testFletcher() { byte[] data = new byte[10000]; for (int i = 0; i < 10000; i += 1000) { - assertEquals(-1, DataUtils.getFletcher32(data, i)); + assertEquals(-1, DataUtils.getFletcher32(data, 0, i)); } Arrays.fill(data, (byte) 255); for (int i = 0; i < 10000; i += 1000) { - assertEquals(-1, DataUtils.getFletcher32(data, i)); + assertEquals(-1, DataUtils.getFletcher32(data, 0, i)); } for (int i = 0; i < 1000; i++) { for (int j = 0; j < 255; j++) { Arrays.fill(data, 0, i, (byte) j); data[i] = 0; - int a = DataUtils.getFletcher32(data, i); + int a = DataUtils.getFletcher32(data, 0, i); if (i % 2 == 1) { // add length: same as appending a 0 - int b = DataUtils.getFletcher32(data, i + 1); + int b = DataUtils.getFletcher32(data, 0, i + 1); assertEquals(a, b); } data[i] = 10; - int c = DataUtils.getFletcher32(data, i); + int c = DataUtils.getFletcher32(data, 0, i); assertEquals(a, c); } } @@ -79,16 +79,18 @@ private void testFletcher() { for (int i = 1; i < 255; i++) { Arrays.fill(data, (byte) i); for (int j = 0; j < 10; j += 2) { - int x = DataUtils.getFletcher32(data, j); + int x = DataUtils.getFletcher32(data, 0, j); assertTrue(x != last); last = x; } } Arrays.fill(data, (byte) 10); assertEquals(0x1e1e1414, - DataUtils.getFletcher32(data, 10000)); + DataUtils.getFletcher32(data, 0, 10000)); + assertEquals(0x1e3fa7ed, + DataUtils.getFletcher32("Fletcher32".getBytes(), 0, 10)); assertEquals(0x1e3fa7ed, - DataUtils.getFletcher32("Fletcher32".getBytes(), 10)); + DataUtils.getFletcher32("XFletcher32".getBytes(), 1, 10)); } private void testMap() { @@ -99,16 +101,34 @@ private void testMap() { DataUtils.appendMap(buff, "c", "1,2"); DataUtils.appendMap(buff, "d", "\"test\""); DataUtils.appendMap(buff, "e", "}"); - assertEquals(":,a:1,b:\",\",c:\"1,2\",d:\"\\\"test\\\"\",e:}", buff.toString()); + DataUtils.appendMap(buff, "name", "1:1\","); + String encoded = buff.toString(); + assertEquals(":,a:1,b:\",\",c:\"1,2\",d:\"\\\"test\\\"\",e:},name:\"1:1\\\",\"", encoded); - HashMap m = DataUtils.parseMap(buff.toString()); - assertEquals(6, m.size()); + HashMap m = DataUtils.parseMap(encoded); + assertEquals(7, m.size()); assertEquals("", m.get("")); assertEquals("1", m.get("a")); assertEquals(",", m.get("b")); assertEquals("1,2", m.get("c")); assertEquals("\"test\"", m.get("d")); assertEquals("}", m.get("e")); + assertEquals("1:1\",", m.get("name")); + assertEquals("1:1\",", DataUtils.getMapName(encoded)); + + buff.setLength(0); + DataUtils.appendMap(buff, "1", "1"); + DataUtils.appendMap(buff, "name", "2"); + DataUtils.appendMap(buff, "3", "3"); + encoded = buff.toString(); + assertEquals("2", DataUtils.parseMap(encoded).get("name")); + assertEquals("2", DataUtils.getMapName(encoded)); + + buff.setLength(0); + DataUtils.appendMap(buff, "name", "xx"); + encoded = buff.toString(); + assertEquals("xx", DataUtils.parseMap(encoded).get("name")); + assertEquals("xx", DataUtils.getMapName(encoded)); } private void testMapRandomized() { @@ -121,9 +141,9 @@ private void testMapRandomized() { } try { HashMap map = DataUtils.parseMap(buff.toString()); - assertFalse(map == null); + assertNotNull(map); // ok - } catch (IllegalStateException e) { + } catch (MVStoreException e) { // ok - but not another exception } } diff --git a/h2/src/test/org/h2/test/store/TestDefrag.java b/h2/src/test/org/h2/test/store/TestDefrag.java new file mode 100644 index 0000000000..b78bab536d --- /dev/null +++ b/h2/src/test/org/h2/test/store/TestDefrag.java @@ -0,0 +1,79 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.store; + +import static org.h2.engine.Constants.SUFFIX_MV_FILE; + +import java.io.File; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; +import java.text.NumberFormat; + +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Test off-line compaction procedure used by SHUTDOWN DEFRAG command + * + * @author Andrei Tokar + */ +public class TestDefrag extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public boolean isEnabled() { + return !config.memory && config.big && !config.ci; + } + + @Override + public void test() throws Exception { + String dbName = getTestName(); + deleteDb(dbName); + File dbFile = new File(getBaseDir(), dbName + SUFFIX_MV_FILE); + NumberFormat nf = NumberFormat.getInstance(); + try (Connection c = getConnection(dbName)) { + try (Statement st = c.createStatement()) { + st.execute("CREATE TABLE IF NOT EXISTS test (id INT PRIMARY KEY, txt varchar)" + + " AS SELECT x, x || SPACE(200) FROM SYSTEM_RANGE(1,10000000)"); + st.execute("checkpoint"); + } + long origSize = dbFile.length(); + String message = "before defrag: " + nf.format(origSize); + trace(message); + assertTrue(message, origSize > 4_000_000_000L); + try (Statement st = c.createStatement()) { + st.execute("shutdown defrag"); + } + } + long compactedSize = dbFile.length(); + String message = "after defrag: " + nf.format(compactedSize); + trace(message); + assertTrue(message, compactedSize < 400_000_000L); + + try (Connection c = getConnection(dbName + ";LAZY_QUERY_EXECUTION=1")) { + try (Statement st = c.createStatement()) { + ResultSet rs = st.executeQuery("SELECT * FROM test"); + int count = 0; + while (rs.next()) { + ++count; + assertEquals(count, rs.getInt(1)); + assertTrue(rs.getString(2).startsWith(count + " ")); + } + assertEquals(10_000_000, count); + } + } + deleteDb(dbName); + } +} diff --git a/h2/src/test/org/h2/test/store/TestFreeSpace.java b/h2/src/test/org/h2/test/store/TestFreeSpace.java index 74f02f04f7..c4867a4eab 100644 --- a/h2/src/test/org/h2/test/store/TestFreeSpace.java +++ b/h2/src/test/org/h2/test/store/TestFreeSpace.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.h2.mvstore.FreeSpaceBitSet; import org.h2.test.TestBase; @@ -22,7 +23,7 @@ public class TestFreeSpace extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); testMemoryUsage(); testPerformance(); } @@ -35,7 +36,7 @@ public void test() throws Exception { private static void testPerformance() { for (int i = 0; i < 10; i++) { - long t = System.currentTimeMillis(); + long t = System.nanoTime(); FreeSpaceBitSet f = new FreeSpaceBitSet(0, 4096); // 75 ms @@ -55,7 +56,7 @@ private static void testPerformance() { for (int j = 0; j < 100000; j++) { f.allocate(4096 * 2); } - System.out.println(System.currentTimeMillis() - t); + System.out.println(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t)); } } diff --git a/h2/src/test/org/h2/test/store/TestImmutableArray.java b/h2/src/test/org/h2/test/store/TestImmutableArray.java index e627c7963c..9b40fdf404 100644 --- a/h2/src/test/org/h2/test/store/TestImmutableArray.java +++ b/h2/src/test/org/h2/test/store/TestImmutableArray.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -8,6 +8,7 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.h2.dev.util.ImmutableArray2; import org.h2.test.TestBase; @@ -48,7 +49,7 @@ private static void testPerformance(final boolean immutable) { // ArrayList time 361 dummy: 60000000 System.out.print(immutable ? "immutable" : "ArrayList"); - long start = System.currentTimeMillis(); + long start = System.nanoTime(); int count = 20000000; Integer x = 1; int sum = 0; @@ -77,7 +78,7 @@ private static void testPerformance(final boolean immutable) { } } } else { - ArrayList test = new ArrayList(); + ArrayList test = new ArrayList<>(); for (int i = 0; i < count; i++) { if (i % 10 != 0) { test.add(test.size(), x); @@ -101,7 +102,7 @@ private static void testPerformance(final boolean immutable) { } } } - System.out.println(" time " + (System.currentTimeMillis() - start) + + System.out.println(" time " + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start) + " dummy: " + sum); } @@ -110,7 +111,7 @@ private void testRandomized() { for (int i = 0; i < 100; i++) { ImmutableArray2 test = ImmutableArray2.empty(); // ConcurrentRing test = new ConcurrentRing(); - ArrayList x = new ArrayList(); + ArrayList x = new ArrayList<>(); StringBuilder buff = new StringBuilder(); for (int j = 0; j < 1000; j++) { buff.append("[" + j + "] "); diff --git a/h2/src/test/org/h2/test/store/TestKillProcessWhileWriting.java b/h2/src/test/org/h2/test/store/TestKillProcessWhileWriting.java index f5a710dd4e..802949a8dd 100644 --- a/h2/src/test/org/h2/test/store/TestKillProcessWhileWriting.java +++ b/h2/src/test/org/h2/test/store/TestKillProcessWhileWriting.java @@ -1,12 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; import java.util.Random; - import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; import org.h2.store.fs.FileUtils; @@ -30,7 +29,7 @@ public class TestKillProcessWhileWriting extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.big = true; - test.test(); + test.testFromMain(); } @Override @@ -47,6 +46,7 @@ public void test() throws Exception { fs.setPartialWrites(false); } } + FileUtils.delete("unstable:memFS:killProcess.h3"); } private void test(String fileName) throws Exception { diff --git a/h2/src/test/org/h2/test/store/TestMVRTree.java b/h2/src/test/org/h2/test/store/TestMVRTree.java index 48db526b25..4af60017df 100644 --- a/h2/src/test/org/h2/test/store/TestMVRTree.java +++ b/h2/src/test/org/h2/test/store/TestMVRTree.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -16,17 +16,20 @@ import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.Objects; import java.util.Random; + import javax.imageio.ImageIO; import javax.imageio.ImageWriter; import javax.imageio.stream.FileImageOutputStream; + import org.h2.mvstore.MVStore; import org.h2.mvstore.rtree.MVRTreeMap; -import org.h2.mvstore.rtree.SpatialKey; +import org.h2.mvstore.rtree.Spatial; +import org.h2.mvstore.db.SpatialKey; import org.h2.mvstore.type.StringDataType; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.util.New; /** * Tests the r-tree. @@ -39,14 +42,11 @@ public class TestMVRTree extends TestMVStore { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() { - FileUtils.deleteRecursive(getBaseDir(), true); - FileUtils.createDirectories(getBaseDir()); - testRemoveAll(); testRandomInsert(); testSpatialKey(); @@ -58,52 +58,47 @@ public void test() { } private void testRemoveAll() { - String fileName = getBaseDir() + "/testRemoveAll.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - s = new MVStore.Builder().fileName(fileName). - pageSplitSize(100).open(); - MVRTreeMap map = s.openMap("data", - new MVRTreeMap.Builder()); - Random r = new Random(1); - for (int i = 0; i < 1000; i++) { - float x = r.nextFloat() * 50, y = r.nextFloat() * 50; - SpatialKey k = new SpatialKey(i % 100, x, x + 2, y, y + 1); - map.put(k, "i:" + i); + try (MVStore s = new MVStore.Builder().fileName(fileName).pageSplitSize(100).open()) { + MVRTreeMap map = s.openMap("data", new MVRTreeMap.Builder<>()); + Random r = new Random(1); + for (int i = 0; i < 1000; i++) { + float x = r.nextFloat() * 50, y = r.nextFloat() * 50; + Spatial k = new SpatialKey(i % 100, x, x + 2, y, y + 1); + map.put(k, "i:" + i); + } + s.commit(); + map.clear(); } - s.commit(); - map.clear(); - s.close(); } private void testRandomInsert() { - String fileName = getBaseDir() + "/testMany.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - s = new MVStore.Builder().fileName(fileName). - pageSplitSize(100).open(); - MVRTreeMap map = s.openMap("data", - new MVRTreeMap.Builder()); - Random r = new Random(1); - for (int i = 0; i < 1000; i++) { - if (i % 100 == 0) { - r.setSeed(1); - } - float x = r.nextFloat() * 50, y = r.nextFloat() * 50; - SpatialKey k = new SpatialKey(i % 100, x, x + 2, y, y + 1); - map.put(k, "i:" + i); - if (i % 10 == 0) { - s.commit(); + try (MVStore s = new MVStore.Builder().fileName(fileName). + pageSplitSize(100).open()) { + MVRTreeMap map = s.openMap("data", new MVRTreeMap.Builder<>()); + Random r = new Random(1); + for (int i = 0; i < 1000; i++) { + if (i % 100 == 0) { + r.setSeed(1); + } + float x = r.nextFloat() * 50, y = r.nextFloat() * 50; + Spatial k = new SpatialKey(i % 100, x, x + 2, y, y + 1); + map.put(k, "i:" + i); + if (i % 10 == 0) { + s.commit(); + } } } - s.close(); } private void testSpatialKey() { - SpatialKey a0 = new SpatialKey(0, 1, 2, 3, 4); - SpatialKey a1 = new SpatialKey(0, 1, 2, 3, 4); - SpatialKey b0 = new SpatialKey(1, 1, 2, 3, 4); - SpatialKey c0 = new SpatialKey(1, 1.1f, 2.2f, 3.3f, 4.4f); + Spatial a0 = new SpatialKey(0, 1, 2, 3, 4); + Spatial a1 = new SpatialKey(0, 1, 2, 3, 4); + Spatial b0 = new SpatialKey(1, 1, 2, 3, 4); + Spatial c0 = new SpatialKey(1, 1.1f, 2.2f, 3.3f, 4.4f); assertEquals(0, a0.hashCode()); assertEquals(1, b0.hashCode()); assertTrue(a0.equals(a0)); @@ -119,163 +114,149 @@ private void testSpatialKey() { private void testExample() { // create an in-memory store - MVStore s = MVStore.open(null); + try (MVStore s = MVStore.open(null)) { - // open an R-tree map - MVRTreeMap r = s.openMap("data", - new MVRTreeMap.Builder()); + // open an R-tree map + MVRTreeMap r = s.openMap("data", new MVRTreeMap.Builder<>()); - // add two key-value pairs - // the first value is the key id (to make the key unique) - // then the min x, max x, min y, max y - r.add(new SpatialKey(0, -3f, -2f, 2f, 3f), "left"); - r.add(new SpatialKey(1, 3f, 4f, 4f, 5f), "right"); + // add two key-value pairs + // the first value is the key id (to make the key unique) + // then the min x, max x, min y, max y + r.add(new SpatialKey(0, -3f, -2f, 2f, 3f), "left"); + r.add(new SpatialKey(1, 3f, 4f, 4f, 5f), "right"); - // iterate over the intersecting keys - Iterator it = r.findIntersectingKeys( - new SpatialKey(0, 0f, 9f, 3f, 6f)); - for (SpatialKey k; it.hasNext();) { - k = it.next(); - // System.out.println(k + ": " + r.get(k)); - assertTrue(k != null); + // iterate over the intersecting keys + Iterator it = r.findIntersectingKeys( + new SpatialKey(0, 0f, 9f, 3f, 6f)); + for (Spatial k; it.hasNext(); ) { + k = it.next(); + // System.out.println(k + ": " + r.get(k)); + assertNotNull(k); + } } - s.close(); } private void testMany() { - String fileName = getBaseDir() + "/testMany.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - s = openStore(fileName); - // s.setMaxPageSize(50); - MVRTreeMap r = s.openMap("data", - new MVRTreeMap.Builder().dimensions(2). - valueType(StringDataType.INSTANCE)); - // r.setQuadraticSplit(true); - Random rand = new Random(1); int len = 1000; - // long t = System.currentTimeMillis(); - // Profiler prof = new Profiler(); - // prof.startCollecting(); - for (int i = 0; i < len; i++) { - float x = rand.nextFloat(), y = rand.nextFloat(); - float p = (float) (rand.nextFloat() * 0.000001); - SpatialKey k = new SpatialKey(i, x - p, x + p, y - p, y + p); - r.add(k, "" + i); - if (i > 0 && (i % len / 10) == 0) { - s.commit(); - } - if (i > 0 && (i % 10000) == 0) { - render(r, getBaseDir() + "/test.png"); + try (MVStore s = openStore(fileName)) { + // s.setMaxPageSize(50); + MVRTreeMap r = s.openMap("data", + new MVRTreeMap.Builder().dimensions(2). + valueType(StringDataType.INSTANCE)); + // r.setQuadraticSplit(true); + Random rand = new Random(1); + // long t = System.nanoTime(); + // Profiler prof = new Profiler(); + // prof.startCollecting(); + for (int i = 0; i < len; i++) { + float x = rand.nextFloat(), y = rand.nextFloat(); + float p = (float) (rand.nextFloat() * 0.000001); + Spatial k = new SpatialKey(i, x - p, x + p, y - p, y + p); + r.add(k, "" + i); + if (i > 0 && (i % len / 10) == 0) { + s.commit(); + } + if (i > 0 && (i % 10000) == 0) { + render(r, getBaseDir() + "/test.png"); + } } } - // System.out.println(prof.getTop(5)); - // System.out.println("add: " + (System.currentTimeMillis() - t)); - s.close(); - s = openStore(fileName); - r = s.openMap("data", - new MVRTreeMap.Builder().dimensions(2). - valueType(StringDataType.INSTANCE)); - // t = System.currentTimeMillis(); - rand = new Random(1); - for (int i = 0; i < len; i++) { - float x = rand.nextFloat(), y = rand.nextFloat(); - float p = (float) (rand.nextFloat() * 0.000001); - SpatialKey k = new SpatialKey(i, x - p, x + p, y - p, y + p); - assertEquals("" + i, r.get(k)); - } - // System.out.println("query: " + (System.currentTimeMillis() - t)); - assertEquals(len, r.size()); - int count = 0; - for (SpatialKey k : r.keySet()) { - assertTrue(r.get(k) != null); - count++; - } - assertEquals(len, count); - // t = System.currentTimeMillis(); - // Profiler prof = new Profiler(); - // prof.startCollecting(); - rand = new Random(1); - for (int i = 0; i < len; i++) { - float x = rand.nextFloat(), y = rand.nextFloat(); - float p = (float) (rand.nextFloat() * 0.000001); - SpatialKey k = new SpatialKey(i, x - p, x + p, y - p, y + p); - r.remove(k); + try (MVStore s = openStore(fileName)) { + MVRTreeMap r = s.openMap("data", + new MVRTreeMap.Builder().dimensions(2). + valueType(StringDataType.INSTANCE)); + Random rand = new Random(1); + for (int i = 0; i < len; i++) { + float x = rand.nextFloat(), y = rand.nextFloat(); + float p = (float) (rand.nextFloat() * 0.000001); + Spatial k = new SpatialKey(i, x - p, x + p, y - p, y + p); + assertEquals("" + i, r.get(k)); + } + assertEquals(len, r.size()); + int count = 0; + for (Spatial k : r.keySet()) { + assertNotNull(r.get(k)); + count++; + } + assertEquals(len, count); + rand = new Random(1); + for (int i = 0; i < len; i++) { + float x = rand.nextFloat(), y = rand.nextFloat(); + float p = (float) (rand.nextFloat() * 0.000001); + Spatial k = new SpatialKey(i, x - p, x + p, y - p, y + p); + r.remove(k); + } + assertEquals(0, r.size()); } - assertEquals(0, r.size()); - s.close(); - // System.out.println(prof.getTop(5)); - // System.out.println("remove: " + (System.currentTimeMillis() - t)); } private void testSimple() { - String fileName = getBaseDir() + "/testTree.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - s = openStore(fileName); - MVRTreeMap r = s.openMap("data", - new MVRTreeMap.Builder().dimensions(2). - valueType(StringDataType.INSTANCE)); + try (MVStore s = openStore(fileName)) { + MVRTreeMap r = s.openMap("data", + new MVRTreeMap.Builder().dimensions(2). + valueType(StringDataType.INSTANCE)); - add(r, "Bern", key(0, 46.57, 7.27, 124381)); - add(r, "Basel", key(1, 47.34, 7.36, 170903)); - add(r, "Zurich", key(2, 47.22, 8.33, 376008)); - add(r, "Lucerne", key(3, 47.03, 8.18, 77491)); - add(r, "Geneva", key(4, 46.12, 6.09, 191803)); - add(r, "Lausanne", key(5, 46.31, 6.38, 127821)); - add(r, "Winterthur", key(6, 47.30, 8.45, 102966)); - add(r, "St. Gallen", key(7, 47.25, 9.22, 73500)); - add(r, "Biel/Bienne", key(8, 47.08, 7.15, 51203)); - add(r, "Lugano", key(9, 46.00, 8.57, 54667)); - add(r, "Thun", key(10, 46.46, 7.38, 42623)); - add(r, "Bellinzona", key(11, 46.12, 9.01, 17373)); - add(r, "Chur", key(12, 46.51, 9.32, 33756)); - // render(r, getBaseDir() + "/test.png"); - ArrayList list = New.arrayList(); - for (SpatialKey x : r.keySet()) { - list.add(r.get(x)); - } - Collections.sort(list); - assertEquals("[Basel, Bellinzona, Bern, Biel/Bienne, Chur, Geneva, " + - "Lausanne, Lucerne, Lugano, St. Gallen, Thun, Winterthur, Zurich]", - list.toString()); + add(r, "Bern", key(0, 46.57, 7.27, 124381)); + add(r, "Basel", key(1, 47.34, 7.36, 170903)); + add(r, "Zurich", key(2, 47.22, 8.33, 376008)); + add(r, "Lucerne", key(3, 47.03, 8.18, 77491)); + add(r, "Geneva", key(4, 46.12, 6.09, 191803)); + add(r, "Lausanne", key(5, 46.31, 6.38, 127821)); + add(r, "Winterthur", key(6, 47.30, 8.45, 102966)); + add(r, "St. Gallen", key(7, 47.25, 9.22, 73500)); + add(r, "Biel/Bienne", key(8, 47.08, 7.15, 51203)); + add(r, "Lugano", key(9, 46.00, 8.57, 54667)); + add(r, "Thun", key(10, 46.46, 7.38, 42623)); + add(r, "Bellinzona", key(11, 46.12, 9.01, 17373)); + add(r, "Chur", key(12, 46.51, 9.32, 33756)); + // render(r, getBaseDir() + "/test.png"); + ArrayList list = new ArrayList<>(r.size()); + for (Spatial x : r.keySet()) { + list.add(r.get(x)); + } + Collections.sort(list); + assertEquals("[Basel, Bellinzona, Bern, Biel/Bienne, Chur, Geneva, " + + "Lausanne, Lucerne, Lugano, St. Gallen, Thun, Winterthur, Zurich]", + list.toString()); - SpatialKey k; - // intersection - list.clear(); - k = key(0, 47.34, 7.36, 0); - for (Iterator it = r.findIntersectingKeys(k); it.hasNext();) { - list.add(r.get(it.next())); - } - Collections.sort(list); - assertEquals("[Basel]", list.toString()); + // intersection + list.clear(); + Spatial k = key(0, 47.34, 7.36, 0); + for (Iterator it = r.findIntersectingKeys(k); it.hasNext(); ) { + list.add(r.get(it.next())); + } + Collections.sort(list); + assertEquals("[Basel]", list.toString()); - // contains - list.clear(); - k = key(0, 47.34, 7.36, 0); - for (Iterator it = r.findContainedKeys(k); it.hasNext();) { - list.add(r.get(it.next())); - } - assertEquals(0, list.size()); - k = key(0, 47.34, 7.36, 171000); - for (Iterator it = r.findContainedKeys(k); it.hasNext();) { - list.add(r.get(it.next())); + // contains + list.clear(); + k = key(0, 47.34, 7.36, 0); + for (Iterator it = r.findContainedKeys(k); it.hasNext(); ) { + list.add(r.get(it.next())); + } + assertEquals(0, list.size()); + k = key(0, 47.34, 7.36, 171000); + for (Iterator it = r.findContainedKeys(k); it.hasNext(); ) { + list.add(r.get(it.next())); + } + assertEquals("[Basel]", list.toString()); } - assertEquals("[Basel]", list.toString()); - - s.close(); } - private static void add(MVRTreeMap r, String name, SpatialKey k) { + private static void add(MVRTreeMap r, String name, Spatial k) { r.put(k, name); } - private static SpatialKey key(int id, double y, double x, int population) { + private static Spatial key(int id, double y, double x, int population) { float a = (float) ((int) x + (x - (int) x) * 5 / 3); float b = 50 - (float) ((int) y + (y - (int) y) * 5 / 3); float s = (float) Math.sqrt(population / 10000000.); - SpatialKey k = new SpatialKey(id, a - s, a + s, b - s, b + s); + Spatial k = new SpatialKey(id, a - s, a + s, b - s, b + s); return k; } @@ -293,23 +274,23 @@ private static void render(MVRTreeMap r, String fileName) { g2d.setColor(Color.BLACK); SpatialKey b = new SpatialKey(0, Float.MAX_VALUE, Float.MIN_VALUE, Float.MAX_VALUE, Float.MIN_VALUE); - for (SpatialKey x : r.keySet()) { + for (Spatial x : r.keySet()) { b.setMin(0, Math.min(b.min(0), x.min(0))); b.setMin(1, Math.min(b.min(1), x.min(1))); b.setMax(0, Math.max(b.max(0), x.max(0))); b.setMax(1, Math.max(b.max(1), x.max(1))); } // System.out.println(b); - for (SpatialKey x : r.keySet()) { + for (Spatial x : r.keySet()) { int[] rect = scale(b, x, width, height); g2d.drawRect(rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1]); String s = r.get(x); g2d.drawChars(s.toCharArray(), 0, s.length(), rect[0], rect[1] - 4); } g2d.setColor(Color.red); - ArrayList list = New.arrayList(); - r.addNodeKeys(list, r.getRoot()); - for (SpatialKey x : list) { + ArrayList list = new ArrayList<>(); + r.addNodeKeys(list, r.getRootPage()); + for (Spatial x : list) { int[] rect = scale(b, x, width, height); g2d.drawRect(rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1]); } @@ -322,7 +303,7 @@ private static void render(MVRTreeMap r, String fileName) { } } - private static int[] scale(SpatialKey b, SpatialKey x, int width, int height) { + private static int[] scale(Spatial b, Spatial x, int width, int height) { int[] rect = { (int) ((x.min(0) - b.min(0)) * (width * 0.9) / (b.max(0) - b.min(0)) + width * 0.05), @@ -342,117 +323,111 @@ private void testRandom() { } private void testRandomFind() { - MVStore s = openStore(null); - MVRTreeMap m = s.openMap("data", - new MVRTreeMap.Builder()); - int max = 100; - for (int x = 0; x < max; x++) { - for (int y = 0; y < max; y++) { - int id = x * max + y; - SpatialKey k = new SpatialKey(id, x, x, y, y); - m.put(k, id); - } - } - Random rand = new Random(1); - int operationCount = 1000; - for (int i = 0; i < operationCount; i++) { - int x1 = rand.nextInt(max), y1 = rand.nextInt(10); - int x2 = rand.nextInt(10), y2 = rand.nextInt(10); - int intersecting = Math.max(0, x2 - x1 + 1) * Math.max(0, y2 - y1 + 1); - int contained = Math.max(0, x2 - x1 - 1) * Math.max(0, y2 - y1 - 1); - SpatialKey k = new SpatialKey(0, x1, x2, y1, y2); - Iterator it = m.findContainedKeys(k); - int count = 0; - while (it.hasNext()) { - SpatialKey t = it.next(); - assertTrue(t.min(0) > x1); - assertTrue(t.min(1) > y1); - assertTrue(t.max(0) < x2); - assertTrue(t.max(1) < y2); - count++; + try (MVStore s = openStore(null)) { + MVRTreeMap m = s.openMap("data", new MVRTreeMap.Builder<>()); + int max = 100; + for (int x = 0; x < max; x++) { + for (int y = 0; y < max; y++) { + int id = x * max + y; + Spatial k = new SpatialKey(id, x, x, y, y); + m.put(k, id); + } } - assertEquals(contained, count); - it = m.findIntersectingKeys(k); - count = 0; - while (it.hasNext()) { - SpatialKey t = it.next(); - assertTrue(t.min(0) >= x1); - assertTrue(t.min(1) >= y1); - assertTrue(t.max(0) <= x2); - assertTrue(t.max(1) <= y2); - count++; + Random rand = new Random(1); + int operationCount = 1000; + for (int i = 0; i < operationCount; i++) { + int x1 = rand.nextInt(max), y1 = rand.nextInt(10); + int x2 = rand.nextInt(10), y2 = rand.nextInt(10); + int intersecting = Math.max(0, x2 - x1 + 1) * Math.max(0, y2 - y1 + 1); + int contained = Math.max(0, x2 - x1 - 1) * Math.max(0, y2 - y1 - 1); + Spatial k = new SpatialKey(0, x1, x2, y1, y2); + Iterator it = m.findContainedKeys(k); + int count = 0; + while (it.hasNext()) { + Spatial t = it.next(); + assertTrue(t.min(0) > x1); + assertTrue(t.min(1) > y1); + assertTrue(t.max(0) < x2); + assertTrue(t.max(1) < y2); + count++; + } + assertEquals(contained, count); + it = m.findIntersectingKeys(k); + count = 0; + while (it.hasNext()) { + Spatial t = it.next(); + assertTrue(t.min(0) >= x1); + assertTrue(t.min(1) >= y1); + assertTrue(t.max(0) <= x2); + assertTrue(t.max(1) <= y2); + count++; + } + assertEquals(intersecting, count); } - assertEquals(intersecting, count); } } private void testRandom(boolean quadraticSplit) { - String fileName = getBaseDir() + "/testRandom.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - - MVRTreeMap m = s.openMap("data", - new MVRTreeMap.Builder()); + try (MVStore s = openStore(fileName)) { + MVRTreeMap m = s.openMap("data", + new MVRTreeMap.Builder<>()); - m.setQuadraticSplit(quadraticSplit); - HashMap map = new HashMap(); - Random rand = new Random(1); - int operationCount = 10000; - int maxValue = 300; - for (int i = 0; i < operationCount; i++) { - int key = rand.nextInt(maxValue); - Random rk = new Random(key); - float x = rk.nextFloat(), y = rk.nextFloat(); - float p = (float) (rk.nextFloat() * 0.000001); - SpatialKey k = new SpatialKey(key, x - p, x + p, y - p, y + p); - String v = "" + rand.nextInt(); - Iterator it; - switch (rand.nextInt(5)) { - case 0: - log(i + ": put " + k + " = " + v + " " + m.size()); - m.put(k, v); - map.put(k, v); - break; - case 1: - log(i + ": remove " + k + " " + m.size()); - m.remove(k); - map.remove(k); - break; - case 2: { - p = (float) (rk.nextFloat() * 0.01); - k = new SpatialKey(key, x - p, x + p, y - p, y + p); - it = m.findIntersectingKeys(k); - while (it.hasNext()) { - SpatialKey n = it.next(); - String a = map.get(n); - assertFalse(a == null); + m.setQuadraticSplit(quadraticSplit); + HashMap map = new HashMap<>(); + Random rand = new Random(1); + int operationCount = 10000; + int maxValue = 300; + for (int i = 0; i < operationCount; i++) { + int key = rand.nextInt(maxValue); + Random rk = new Random(key); + float x = rk.nextFloat(), y = rk.nextFloat(); + float p = (float) (rk.nextFloat() * 0.000001); + Spatial k = new SpatialKey(key, x - p, x + p, y - p, y + p); + String v = "" + rand.nextInt(); + Iterator it; + switch (rand.nextInt(5)) { + case 0: + log(i + ": put " + k + " = " + v + " " + m.size()); + m.put(k, v); + map.put(k, v); + break; + case 1: + log(i + ": remove " + k + " " + m.size()); + m.remove(k); + map.remove(k); + break; + case 2: { + p = (float) (rk.nextFloat() * 0.01); + k = new SpatialKey(key, x - p, x + p, y - p, y + p); + it = m.findIntersectingKeys(k); + while (it.hasNext()) { + Spatial n = it.next(); + String a = map.get(n); + assertNotNull(a); + } + break; + } + case 3: { + p = (float) (rk.nextFloat() * 0.01); + k = new SpatialKey(key, x - p, x + p, y - p, y + p); + it = m.findContainedKeys(k); + while (it.hasNext()) { + Spatial n = it.next(); + String a = map.get(n); + assertNotNull(a); + } + break; + } + default: + String a = map.get(k); + String b = m.get(k); + assertTrue(Objects.equals(a, b)); + break; } - break; + assertEquals(map.size(), m.size()); } - case 3: { - p = (float) (rk.nextFloat() * 0.01); - k = new SpatialKey(key, x - p, x + p, y - p, y + p); - it = m.findContainedKeys(k); - while (it.hasNext()) { - SpatialKey n = it.next(); - String a = map.get(n); - assertFalse(a == null); - } - break; - } - default: - String a = map.get(k); - String b = m.get(k); - if (a == null || b == null) { - assertTrue(a == b); - } else { - assertEquals(a, b); - } - break; - } - assertEquals(map.size(), m.size()); } - s.close(); } - } diff --git a/h2/src/test/org/h2/test/store/TestMVStore.java b/h2/src/test/org/h2/test/store/TestMVStore.java index 4927e6e1b7..3d5072b4b1 100644 --- a/h2/src/test/org/h2/test/store/TestMVStore.java +++ b/h2/src/test/org/h2/test/store/TestMVStore.java @@ -1,27 +1,29 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; -import java.lang.Thread.UncaughtExceptionHandler; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; +import java.util.NoSuchElementException; import java.util.Random; import java.util.TreeMap; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; - import org.h2.mvstore.Chunk; import org.h2.mvstore.Cursor; import org.h2.mvstore.DataUtils; import org.h2.mvstore.FileStore; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; import org.h2.mvstore.OffHeapStore; import org.h2.mvstore.type.DataType; import org.h2.mvstore.type.ObjectDataType; @@ -29,7 +31,7 @@ import org.h2.store.fs.FilePath; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; +import org.h2.util.Utils; /** * Tests the MVStore. @@ -45,13 +47,11 @@ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; test.config.big = true; - test.test(); + test.testFromMain(); } @Override public void test() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - FileUtils.createDirectories(getBaseDir()); testRemoveMapRollback(); testProvidedFileStoreNotOpenedAndClosed(); testVolatileMap(); @@ -84,6 +84,7 @@ public void test() throws Exception { testFileHeader(); testFileHeaderCorruption(); testIndexSkip(); + testIndexSkipReverse(); testMinMaxNextKey(); testStoreVersion(); testIterateOldVersion(); @@ -106,42 +107,45 @@ public void test() throws Exception { testRandom(); testKeyValueClasses(); testIterate(); + testIterateReverse(); testCloseTwice(); testSimple(); + testInvalidSettings(); // longer running tests testLargerThan2G(); } private void testRemoveMapRollback() { - MVStore store = new MVStore.Builder(). - open(); - MVMap map = store.openMap("test"); - map.put("1", "Hello"); - store.commit(); - store.removeMap(map); - store.rollback(); - assertTrue(store.hasMap("test")); - map = store.openMap("test"); - // TODO the data should get back alive - assertNull(map.get("1")); - store.close(); - - String fileName = getBaseDir() + "/testRemoveMapRollback.h3"; - store = new MVStore.Builder(). + try (MVStore store = new MVStore.Builder(). + open()) { + MVMap map = store.openMap("test"); + map.put("1", "Hello"); + store.commit(); + store.removeMap(map); + store.rollback(); + assertTrue(store.hasMap("test")); + map = store.openMap("test"); + assertEquals("Hello", map.get("1")); + } + + FileUtils.createDirectories(getTestDir("")); + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + try (MVStore store = new MVStore.Builder(). autoCommitDisabled(). fileName(fileName). - open(); - map = store.openMap("test"); - map.put("1", "Hello"); - store.commit(); - store.removeMap(map); - store.rollback(); - assertTrue(store.hasMap("test")); - map = store.openMap("test"); - // TODO the data should get back alive - assertNull(map.get("1")); - store.close(); + open()) { + MVMap map = store.openMap("test"); + map.put("1", "Hello"); + store.commit(); + store.removeMap(map); + store.rollback(); + assertTrue(store.hasMap("test")); + map = store.openMap("test"); + // the data will get back alive + assertEquals("Hello", map.get("1")); + } } private void testProvidedFileStoreNotOpenedAndClosed() { @@ -168,44 +172,46 @@ public void close() { } private void testVolatileMap() { - String fileName = getBaseDir() + "/testVolatile.h3"; - MVStore store = new MVStore.Builder(). + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + try (MVStore store = new MVStore.Builder(). fileName(fileName). - open(); - MVMap map = store.openMap("test"); - assertFalse(map.isVolatile()); - map.setVolatile(true); - assertTrue(map.isVolatile()); - map.put("1", "Hello"); - assertEquals("Hello", map.get("1")); - assertEquals(1, map.size()); - store.close(); - store = new MVStore.Builder(). + open()) { + MVMap map = store.openMap("test"); + assertFalse(map.isVolatile()); + map.setVolatile(true); + assertTrue(map.isVolatile()); + map.put("1", "Hello"); + assertEquals("Hello", map.get("1")); + assertEquals(1, map.size()); + } + try (MVStore store = new MVStore.Builder(). fileName(fileName). - open(); - assertTrue(store.hasMap("test")); - map = store.openMap("test"); - assertEquals(0, map.size()); - store.close(); + open()) { + assertTrue(store.hasMap("test")); + MVMap map = store.openMap("test"); + assertEquals(0, map.size()); + } } private void testEntrySet() { - MVStore s = new MVStore.Builder().open(); - MVMap map; - map = s.openMap("data"); - for (int i = 0; i < 20; i++) { - map.put(i, i * 10); - } - int next = 0; - for (Entry e : map.entrySet()) { - assertEquals(next, e.getKey().intValue()); - assertEquals(next * 10, e.getValue().intValue()); - next++; + try (MVStore s = new MVStore.Builder().open()) { + MVMap map = s.openMap("data"); + for (int i = 0; i < 20; i++) { + map.put(i, i * 10); + } + int next = 0; + for (Entry e : map.entrySet()) { + assertEquals(next, e.getKey().intValue()); + assertEquals(next * 10, e.getValue().intValue()); + next++; + } } } private void testCompressEmptyPage() { - String fileName = getBaseDir() + "/testDeletedMap.h3"; + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); MVStore store = new MVStore.Builder(). cacheSize(100).fileName(fileName). compress(). @@ -222,7 +228,9 @@ private void testCompressEmptyPage() { } private void testCompressed() { - String fileName = getBaseDir() + "/testCompressed.h3"; + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + String data = new String(new char[1000]).replace((char) 0, 'x'); long lastSize = 0; for (int level = 0; level <= 2; level++) { FileUtils.delete(fileName); @@ -232,193 +240,183 @@ private void testCompressed() { } else if (level == 2) { builder.compressHigh(); } - MVStore s = builder.open(); - MVMap map = s.openMap("data"); - String data = new String(new char[1000]).replace((char) 0, 'x'); - for (int i = 0; i < 400; i++) { - map.put(data + i, data); + try (MVStore s = builder.open()) { + MVMap map = s.openMap("data"); + for (int i = 0; i < 400; i++) { + map.put(data + i, data); + } } - s.close(); long size = FileUtils.size(fileName); if (level > 0) { assertTrue(size < lastSize); } lastSize = size; - s = new MVStore.Builder().fileName(fileName).open(); - map = s.openMap("data"); - for (int i = 0; i < 400; i++) { - assertEquals(data, map.get(data + i)); + try (MVStore s = new MVStore.Builder().fileName(fileName).open()) { + MVMap map = s.openMap("data"); + for (int i = 0; i < 400; i++) { + assertEquals(data, map.get(data + i)); + } } - s.close(); } } private void testFileFormatExample() { - String fileName = getBaseDir() + "/testFileFormatExample.h3"; - MVStore s = MVStore.open(fileName); - MVMap map = s.openMap("data"); - for (int i = 0; i < 400; i++) { - map.put(i, "Hello"); - } - s.commit(); - for (int i = 0; i < 100; i++) { - map.put(0, "Hi"); + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + try (MVStore s = MVStore.open(fileName)) { + MVMap map = s.openMap("data"); + for (int i = 0; i < 400; i++) { + map.put(i, "Hello"); + } + s.commit(); + for (int i = 0; i < 100; i++) { + map.put(0, "Hi"); + } + s.commit(); } - s.commit(); - s.close(); // ;MVStoreTool.dump(fileName); } private void testMaxChunkLength() { - String fileName = getBaseDir() + "/testMaxChunkLength.h3"; - MVStore s = new MVStore.Builder().fileName(fileName).open(); - MVMap map = s.openMap("data"); - map.put(0, new byte[2 * 1024 * 1024]); - s.commit(); - map.put(1, new byte[10 * 1024]); - s.commit(); - MVMap meta = s.getMetaMap(); - Chunk c = Chunk.fromString(meta.get("chunk.1")); - assertTrue(c.maxLen < Integer.MAX_VALUE); - assertTrue(c.maxLenLive < Integer.MAX_VALUE); - s.close(); + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + try (MVStore s = new MVStore.Builder().fileName(fileName).open()) { + MVMap map = s.openMap("data"); + map.put(0, new byte[2 * 1024 * 1024]); + s.commit(); + map.put(1, new byte[10 * 1024]); + s.commit(); + MVMap layout = s.getLayoutMap(); + Chunk c = Chunk.fromString(layout.get(DataUtils.META_CHUNK + "1")); + assertTrue(c.maxLen < Integer.MAX_VALUE); + assertTrue(c.maxLenLive < Integer.MAX_VALUE); + } } private void testCacheInfo() { - String fileName = getBaseDir() + "/testCloseMap.h3"; - MVStore s = new MVStore.Builder().fileName(fileName).cacheSize(2).open(); - assertEquals(2, s.getCacheSize()); - MVMap map; - map = s.openMap("data"); - byte[] data = new byte[1024]; - for (int i = 0; i < 1000; i++) { - map.put(i, data); - s.commit(); - if (i < 50) { - assertEquals(0, s.getCacheSizeUsed()); - } else if (i > 300) { - assertTrue(s.getCacheSizeUsed() >= 1); + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + try (MVStore s = new MVStore.Builder().fileName(fileName).cacheSize(2).open()) { + assertEquals(2, s.getCacheSize()); + MVMap map; + map = s.openMap("data"); + byte[] data = new byte[1024]; + for (int i = 0; i < 1000; i++) { + map.put(i, data); + s.commit(); + if (i < 50) { + assertEquals(0, s.getCacheSizeUsed()); + } else if (i > 300) { + assertTrue(s.getCacheSizeUsed() >= 1); + } } } - s.close(); - s = new MVStore.Builder().open(); - assertEquals(0, s.getCacheSize()); - assertEquals(0, s.getCacheSizeUsed()); - s.close(); + try (MVStore s = new MVStore.Builder().open()) { + assertEquals(0, s.getCacheSize()); + assertEquals(0, s.getCacheSizeUsed()); + } } - private void testVersionsToKeep() throws Exception { - MVStore s = new MVStore.Builder().open(); - MVMap map; - map = s.openMap("data"); - for (int i = 0; i < 20; i++) { - long version = s.getCurrentVersion(); - map.put(i, i); - s.commit(); - if (version >= 6) { - map.openVersion(version - 5); - try { - map.openVersion(version - 6); - fail(); - } catch (IllegalArgumentException e) { - // expected + private void testVersionsToKeep() { + try (MVStore s = new MVStore.Builder().open()) { + assertEquals(5, s.getVersionsToKeep()); + MVMap map = s.openMap("data"); + for (int i = 0; i < 20; i++) { + map.put(i, i); + s.commit(); + long version = s.getCurrentVersion(); + if (version >= 6) { + map.openVersion(version - 5); + assertThrows(IllegalArgumentException.class, () -> map.openVersion(version - 6)); } } } } private void testVersionsToKeep2() { - MVStore s = new MVStore.Builder().autoCommitDisabled().open(); - s.setVersionsToKeep(2); - final MVMap m = s.openMap("data"); - s.commit(); - assertEquals(1, s.getCurrentVersion()); - m.put(1, "version 1"); - s.commit(); - assertEquals(2, s.getCurrentVersion()); - m.put(1, "version 2"); - s.commit(); - assertEquals(3, s.getCurrentVersion()); - m.put(1, "version 3"); - s.commit(); - m.put(1, "version 4"); - assertEquals("version 4", m.openVersion(4).get(1)); - assertEquals("version 3", m.openVersion(3).get(1)); - assertEquals("version 2", m.openVersion(2).get(1)); - new AssertThrows(IllegalArgumentException.class) { - @Override - public void test() throws Exception { - m.openVersion(1); - } - }; - s.close(); + try (MVStore s = new MVStore.Builder().autoCommitDisabled().open()) { + s.setVersionsToKeep(2); + final MVMap m = s.openMap("data"); + s.commit(); + assertEquals(1, s.getCurrentVersion()); + m.put(1, "version 1"); + s.commit(); + assertEquals(2, s.getCurrentVersion()); + m.put(1, "version 2"); + s.commit(); + assertEquals(3, s.getCurrentVersion()); + m.put(1, "version 3"); + s.commit(); + m.put(1, "version 4"); + assertEquals("version 4", m.openVersion(4).get(1)); + assertEquals("version 3", m.openVersion(3).get(1)); + assertEquals("version 2", m.openVersion(2).get(1)); + assertThrows(IllegalArgumentException.class, () -> m.openVersion(1)); + } } - private void testRemoveMap() throws Exception { - String fileName = getBaseDir() + "/testCloseMap.h3"; + private void testRemoveMap() { + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = new MVStore.Builder(). + try (MVStore s = new MVStore.Builder(). fileName(fileName). - open(); - MVMap map; - - map = s.openMap("data"); - map.put(1, 1); - assertEquals(1, map.get(1).intValue()); - s.commit(); - - s.removeMap(map); - s.commit(); + open()) { + MVMap map = s.openMap("data"); + map.put(1, 1); + assertEquals(1, map.get(1).intValue()); + s.commit(); - map = s.openMap("data"); - assertTrue(map.isEmpty()); - map.put(2, 2); + s.removeMap(map); + s.commit(); - s.close(); + map = s.openMap("data"); + assertTrue(map.isEmpty()); + map.put(2, 2); + } } - private void testIsEmpty() throws Exception { - MVStore s = new MVStore.Builder(). + private void testIsEmpty() { + try (MVStore s = new MVStore.Builder(). pageSplitSize(50). - open(); - Map m = s.openMap("data"); - m.put(1, new byte[50]); - m.put(2, new byte[50]); - m.put(3, new byte[50]); - m.remove(1); - m.remove(2); - m.remove(3); - assertEquals(0, m.size()); - assertTrue(m.isEmpty()); - s.close(); + open()) { + Map m = s.openMap("data"); + m.put(1, new byte[50]); + m.put(2, new byte[50]); + m.put(3, new byte[50]); + m.remove(1); + m.remove(2); + m.remove(3); + assertEquals(0, m.size()); + assertTrue(m.isEmpty()); + } } - private void testOffHeapStorage() throws Exception { + private void testOffHeapStorage() { OffHeapStore offHeap = new OffHeapStore(); - MVStore s = new MVStore.Builder(). - fileStore(offHeap). - open(); int count = 1000; - Map map = s.openMap("data"); - for (int i = 0; i < count; i++) { - map.put(i, "Hello " + i); - s.commit(); + try (MVStore s = new MVStore.Builder(). + fileStore(offHeap). + open()) { + Map map = s.openMap("data"); + for (int i = 0; i < count; i++) { + map.put(i, "Hello " + i); + s.commit(); + } + assertTrue(offHeap.getWriteCount() > count); } - assertTrue(offHeap.getWriteCount() > count); - s.close(); - s = new MVStore.Builder(). + try (MVStore s = new MVStore.Builder(). fileStore(offHeap). - open(); - map = s.openMap("data"); - for (int i = 0; i < count; i++) { - assertEquals("Hello " + i, map.get(i)); + open()) { + Map map = s.openMap("data"); + for (int i = 0; i < count; i++) { + assertEquals("Hello " + i, map.get(i)); + } } - s.close(); } - private void testNewerWriteVersion() throws Exception { - String fileName = getBaseDir() + "/testNewerWriteVersion.h3"; + private void testNewerWriteVersion() { + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); MVStore s = new MVStore.Builder(). encryptionKey("007".toCharArray()). @@ -426,19 +424,12 @@ private void testNewerWriteVersion() throws Exception { open(); s.setRetentionTime(Integer.MAX_VALUE); Map header = s.getStoreHeader(); - assertEquals("1", header.get("format").toString()); - header.put("formatRead", "1"); - header.put("format", "2"); + assertEquals("2", header.get("format").toString()); + header.put("formatRead", "2"); + header.put("format", "3"); + forceWriteStoreHeader(s); MVMap m = s.openMap("data"); - // this is to ensure the file header is overwritten - // the header is written at least every 20 commits - for (int i = 0; i < 30; i++) { - m.put(0, "Hello World " + i); - s.commit(); - if (i > 5) { - s.setRetentionTime(0); - } - } + forceWriteStoreHeader(s); m.put(0, "Hello World"); s.close(); try { @@ -448,9 +439,9 @@ private void testNewerWriteVersion() throws Exception { open(); header = s.getStoreHeader(); fail(header.toString()); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { assertEquals(DataUtils.ERROR_UNSUPPORTED_FORMAT, - DataUtils.getErrorCode(e.getMessage())); + e.getErrorCode()); } s = new MVStore.Builder(). encryptionKey("007".toCharArray()). @@ -474,13 +465,15 @@ private void testNewerWriteVersion() throws Exception { } - private void testCompactFully() throws Exception { - String fileName = getBaseDir() + "/testCompactFully.h3"; + private void testCompactFully() { + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); MVStore s = new MVStore.Builder(). fileName(fileName). autoCommitDisabled(). open(); + s.setRetentionTime(0); + s.setVersionsToKeep(0); MVMap m; for (int i = 0; i < 100; i++) { m = s.openMap("data" + i); @@ -494,85 +487,76 @@ private void testCompactFully() throws Exception { } long sizeOld = s.getFileStore().size(); s.compactMoveChunks(); + s.close(); long sizeNew = s.getFileStore().size(); assertTrue("old: " + sizeOld + " new: " + sizeNew, sizeNew < sizeOld); - s.close(); } private void testBackgroundExceptionListener() throws Exception { - String fileName = getBaseDir() + "/testBackgroundExceptionListener.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - final AtomicReference exRef = - new AtomicReference(); - s = new MVStore.Builder(). + AtomicReference exRef = new AtomicReference<>(); + MVStore s = new MVStore.Builder(). fileName(fileName). - backgroundExceptionHandler(new UncaughtExceptionHandler() { - - @Override - public void uncaughtException(Thread t, Throwable e) { - exRef.set(e); - } - - }). + backgroundExceptionHandler((t, e) -> exRef.set(e)). open(); - s.setAutoCommitDelay(50); - MVMap m; - m = s.openMap("data"); + s.setAutoCommitDelay(10); + MVMap m = s.openMap("data"); s.getFileStore().getFile().close(); - m.put(1, "Hello"); - for (int i = 0; i < 200; i++) { - if (exRef.get() != null) { - break; + try { + m.put(1, "Hello"); + for (int i = 0; i < 200; i++) { + if (exRef.get() != null) { + break; + } + sleep(10); } - Thread.sleep(1); + Throwable e = exRef.get(); + assertNotNull(e); + checkErrorCode(DataUtils.ERROR_WRITING_FAILED, e); + } catch (MVStoreException e) { + // sometimes it is detected right away + assertEquals(DataUtils.ERROR_CLOSED, e.getErrorCode()); } - Throwable e = exRef.get(); - assertTrue(e != null); - assertEquals(DataUtils.ERROR_WRITING_FAILED, - DataUtils.getErrorCode(e.getMessage())); s.closeImmediately(); FileUtils.delete(fileName); } private void testAtomicOperations() { - String fileName = getBaseDir() + "/testAtomicOperations.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap m; - s = new MVStore.Builder(). + try (MVStore s = new MVStore.Builder(). fileName(fileName). - open(); - m = s.openMap("data"); - - // putIfAbsent - assertNull(m.putIfAbsent(1, new byte[1])); - assertEquals(1, m.putIfAbsent(1, new byte[2]).length); - assertEquals(1, m.get(1).length); - - // replace - assertNull(m.replace(2, new byte[2])); - assertNull(m.get(2)); - assertEquals(1, m.replace(1, new byte[2]).length); - assertEquals(2, m.replace(1, new byte[3]).length); - assertEquals(3, m.replace(1, new byte[1]).length); - - // replace with oldValue - assertFalse(m.replace(1, new byte[2], new byte[10])); - assertTrue(m.replace(1, new byte[1], new byte[2])); - assertTrue(m.replace(1, new byte[2], new byte[1])); - - // remove - assertFalse(m.remove(1, new byte[2])); - assertTrue(m.remove(1, new byte[1])); - - s.close(); + open()) { + MVMap m = s.openMap("data"); + + // putIfAbsent + assertNull(m.putIfAbsent(1, new byte[1])); + assertEquals(1, m.putIfAbsent(1, new byte[2]).length); + assertEquals(1, m.get(1).length); + + // replace + assertNull(m.replace(2, new byte[2])); + assertNull(m.get(2)); + assertEquals(1, m.replace(1, new byte[2]).length); + assertEquals(2, m.replace(1, new byte[3]).length); + assertEquals(3, m.replace(1, new byte[1]).length); + + // replace with oldValue + assertFalse(m.replace(1, new byte[2], new byte[10])); + assertTrue(m.replace(1, new byte[1], new byte[2])); + assertTrue(m.replace(1, new byte[2], new byte[1])); + + // remove + assertFalse(m.remove(1, new byte[2])); + assertTrue(m.remove(1, new byte[1])); + } FileUtils.delete(fileName); } private void testWriteBuffer() { - String fileName = getBaseDir() + "/testWriteBuffer.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); MVStore s; MVMap m; @@ -616,8 +600,9 @@ private void testWriteBuffer() { FileUtils.delete(fileName); } - private void testWriteDelay() throws InterruptedException { - String fileName = getBaseDir() + "/testWriteDelay.h3"; + private void testWriteDelay() { + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); MVStore s; MVMap m; @@ -640,34 +625,33 @@ private void testWriteDelay() throws InterruptedException { s = new MVStore.Builder(). fileName(fileName). open(); - s.setAutoCommitDelay(1); m = s.openMap("data"); m.put(1, "Hello"); - s.commit(); - long v = s.getCurrentVersion(); m.put(2, "World."); - Thread.sleep(5); - // must not store, as nothing has been committed yet - s.closeImmediately(); + s.commit(); + s.close(); + s = new MVStore.Builder(). fileName(fileName). open(); - s.setAutoCommitDelay(1); + s.setAutoCommitDelay(2); m = s.openMap("data"); assertEquals("World.", m.get(2)); m.put(2, "World"); s.commit(); - v = s.getCurrentVersion(); + long v = s.getCurrentVersion(); + long time = System.nanoTime(); m.put(3, "!"); - for (int i = 100; i > 0; i--) { + for (int i = 200; i > 0; i--) { if (s.getCurrentVersion() > v) { break; } - if (i < 10) { - fail(); + long diff = System.nanoTime() - time; + if (diff > TimeUnit.SECONDS.toNanos(1)) { + fail("diff=" + TimeUnit.NANOSECONDS.toMillis(diff)); } - Thread.sleep(1); + sleep(10); } s.closeImmediately(); @@ -684,292 +668,291 @@ private void testWriteDelay() throws InterruptedException { } private void testEncryptedFile() { - String fileName = getBaseDir() + "/testEncryptedFile.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap m; char[] passwordChars = "007".toCharArray(); - s = new MVStore.Builder(). - fileName(fileName). - encryptionKey(passwordChars). - open(); - assertEquals(0, passwordChars[0]); - assertEquals(0, passwordChars[1]); - assertEquals(0, passwordChars[2]); - assertTrue(FileUtils.exists(fileName)); - m = s.openMap("test"); - m.put(1, "Hello"); - assertEquals("Hello", m.get(1)); - s.close(); - - passwordChars = "008".toCharArray(); - try { - s = new MVStore.Builder(). - fileName(fileName). - encryptionKey(passwordChars).open(); - fail(); - } catch (IllegalStateException e) { - assertEquals(DataUtils.ERROR_FILE_CORRUPT, - DataUtils.getErrorCode(e.getMessage())); + try (MVStore s = new MVStore.Builder().fileName(fileName).encryptionKey(passwordChars).open()) { + assertPasswordErased(passwordChars); + assertTrue(FileUtils.exists(fileName)); + MVMap m = s.openMap("test"); + m.put(1, "Hello"); + assertEquals("Hello", m.get(1)); } - assertEquals(0, passwordChars[0]); - assertEquals(0, passwordChars[1]); - assertEquals(0, passwordChars[2]); + + char[] passwordChars2 = "008".toCharArray(); + assertThrows(DataUtils.ERROR_FILE_CORRUPT, + () -> new MVStore.Builder().fileName(fileName).encryptionKey(passwordChars2).open()); + assertPasswordErased(passwordChars2); passwordChars = "007".toCharArray(); - s = new MVStore.Builder(). - fileName(fileName). - encryptionKey(passwordChars).open(); - assertEquals(0, passwordChars[0]); - assertEquals(0, passwordChars[1]); - assertEquals(0, passwordChars[2]); - m = s.openMap("test"); - assertEquals("Hello", m.get(1)); - s.close(); + try (MVStore s = new MVStore.Builder().fileName(fileName).encryptionKey(passwordChars).open()) { + assertPasswordErased(passwordChars); + MVMap m = s.openMap("test"); + assertEquals("Hello", m.get(1)); + } FileUtils.setReadOnly(fileName); passwordChars = "007".toCharArray(); - s = new MVStore.Builder(). - fileName(fileName). - encryptionKey(passwordChars).open(); - assertTrue(s.getFileStore().isReadOnly()); - s.close(); + try (MVStore s = new MVStore.Builder().fileName(fileName).encryptionKey(passwordChars).open()) { + assertTrue(s.getFileStore().isReadOnly()); + } FileUtils.delete(fileName); assertFalse(FileUtils.exists(fileName)); } + private void assertPasswordErased(char[] passwordChars) { + assertEquals(0, passwordChars[0]); + assertEquals(0, passwordChars[1]); + assertEquals(0, passwordChars[2]); + } + private void testFileFormatChange() { - String fileName = getBaseDir() + "/testFileFormatChange.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap m; - s = openStore(fileName); - s.setRetentionTime(Integer.MAX_VALUE); - m = s.openMap("test"); - m.put(1, 1); - Map header = s.getStoreHeader(); - int format = Integer.parseInt(header.get("format").toString()); - assertEquals(1, format); - header.put("format", Integer.toString(format + 1)); - // this is to ensure the file header is overwritten - // the header is written at least every 20 commits - for (int i = 0; i < 30; i++) { - if (i > 5) { - s.setRetentionTime(0); - } - m.put(10, 100 * i); - s.commit(); - } - s.close(); - try { - openStore(fileName).close(); - fail(); - } catch (IllegalStateException e) { - assertEquals(DataUtils.ERROR_UNSUPPORTED_FORMAT, - DataUtils.getErrorCode(e.getMessage())); - } + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(Integer.MAX_VALUE); + MVMap m = s.openMap("test"); + m.put(1, 1); + Map header = s.getStoreHeader(); + int format = Integer.parseInt(header.get("format").toString()); + assertEquals(2, format); + header.put("format", Integer.toString(format + 1)); + forceWriteStoreHeader(s); + } + assertThrows(DataUtils.ERROR_UNSUPPORTED_FORMAT, () -> openStore(fileName).close()); FileUtils.delete(fileName); } private void testRecreateMap() { - String fileName = getBaseDir() + "/testRecreateMap.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap m = s.openMap("test"); - m.put(1, 1); - s.commit(); - s.removeMap(m); - s.close(); - s = openStore(fileName); - m = s.openMap("test"); - assertNull(m.get(1)); - s.close(); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("test"); + m.put(1, 1); + s.commit(); + s.removeMap(m); + } + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("test"); + assertNull(m.get(1)); + } } private void testRenameMapRollback() { - MVStore s = openStore(null); - MVMap map; - map = s.openMap("hello"); - map.put(1, 10); - long old = s.commit(); - s.renameMap(map, "world"); - map.put(2, 20); - assertEquals("world", map.getName()); - s.rollbackTo(old); - assertEquals("hello", map.getName()); - s.rollbackTo(0); - assertTrue(map.isClosed()); - s.close(); + try (MVStore s = openStore(null)) { + MVMap map = s.openMap("hello"); + map.put(1, 10); + long old = s.commit(); + s.renameMap(map, "world"); + map.put(2, 20); + assertEquals("world", map.getName()); + s.rollbackTo(old); + assertEquals("hello", map.getName()); + s.rollbackTo(0); + assertTrue(map.isClosed()); + } } private void testCustomMapType() { - String fileName = getBaseDir() + "/testMapType.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - SequenceMap seq = s.openMap("data", new SequenceMap.Builder()); - StringBuilder buff = new StringBuilder(); - for (long x : seq.keySet()) { - buff.append(x).append(';'); + try (MVStore s = openStore(fileName)) { + Map seq = s.openMap("data", new SequenceMap.Builder()); + StringBuilder buff = new StringBuilder(); + for (long x : seq.keySet()) { + buff.append(x).append(';'); + } + assertEquals("1;2;3;4;5;6;7;8;9;10;", buff.toString()); } - assertEquals("1;2;3;4;5;6;7;8;9;10;", buff.toString()); - s.close(); } private void testCacheSize() { - String fileName = getBaseDir() + "/testCacheSize.h3"; - MVStore s; - MVMap map; - s = new MVStore.Builder(). + if (config.memory) { + return; + } + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + try (MVStore s = new MVStore.Builder(). fileName(fileName). autoCommitDisabled(). - compress().open(); - map = s.openMap("test"); - // add 10 MB of data - for (int i = 0; i < 1024; i++) { - map.put(i, new String(new char[10240])); + compress().open()) { + s.setReuseSpace(false); // disable free space scanning + MVMap map = s.openMap("test"); + // add 10 MB of data + for (int i = 0; i < 1024; i++) { + map.put(i, new String(new char[10240])); + } } - s.close(); int[] expectedReadsForCacheSize = { - 3407, 2590, 1924, 1440, 1111, 956, 918 + 1880, 490, 476, 501, 476, 476, 541 // compressed +// 1887, 1775, 1599, 1355, 1035, 732, 507 // uncompressed }; - for (int cacheSize = 0; cacheSize <= 6; cacheSize += 4) { + for (int cacheSize = 0; cacheSize <= 6; cacheSize += 1) { int cacheMB = 1 + 3 * cacheSize; - s = new MVStore.Builder(). + Utils.collectGarbage(); + try (MVStore s = new MVStore.Builder(). fileName(fileName). - cacheSize(cacheMB).open(); - assertEquals(cacheMB, s.getCacheSize()); - map = s.openMap("test"); - for (int i = 0; i < 1024; i += 128) { - for (int j = 0; j < i; j++) { - String x = map.get(j); - assertEquals(10240, x.length()); + autoCommitDisabled(). + cacheSize(cacheMB).open()) { + assertEquals(cacheMB, s.getCacheSize()); + MVMap map = s.openMap("test"); + for (int i = 0; i < 1024; i += 128) { + for (int j = 0; j < i; j++) { + String x = map.get(j); + assertEquals(10240, x.length()); + } } + long readCount = s.getFileStore().getReadCount(); + int expected = expectedReadsForCacheSize[cacheSize]; + assertTrue("Cache " + cacheMB + "Mb, reads: " + readCount + " expected: " + expected + + " size: " + s.getFileStore().getReadBytes() + + " cache used: " + s.getCacheSizeUsed() + + " cache hits: " + s.getCache().getHits() + + " cache misses: " + s.getCache().getMisses() + + " cache requests: " + (s.getCache().getHits() + s.getCache().getMisses()) + + "", + Math.abs(100 - (100 * expected / readCount)) < 15); } - long readCount = s.getFileStore().getReadCount(); - int expected = expectedReadsForCacheSize[cacheSize]; - assertTrue("reads: " + readCount + " expected: " + expected, - Math.abs(100 - (100 * expected / readCount)) < 5); - s.close(); } - } private void testConcurrentOpen() { - String fileName = getBaseDir() + "/testConcurrentOpen.h3"; - MVStore s = new MVStore.Builder().fileName(fileName).open(); - try { - MVStore s1 = new MVStore.Builder().fileName(fileName).open(); - s1.close(); - fail(); - } catch (IllegalStateException e) { - // expected + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + try (MVStore s = new MVStore.Builder().fileName(fileName).open()) { + assertThrows(MVStoreException.class, () -> new MVStore.Builder().fileName(fileName).open().close()); + assertThrows(MVStoreException.class, + () -> new MVStore.Builder().fileName(fileName).readOnly().open().close()); + assertFalse(s.getFileStore().isReadOnly()); } - try { - MVStore s1 = new MVStore.Builder().fileName(fileName).readOnly().open(); - s1.close(); - fail(); - } catch (IllegalStateException e) { - // expected + try (MVStore s = new MVStore.Builder().fileName(fileName).readOnly().open()) { + assertTrue(s.getFileStore().isReadOnly()); } - assertFalse(s.getFileStore().isReadOnly()); - s.close(); - s = new MVStore.Builder().fileName(fileName).readOnly().open(); - assertTrue(s.getFileStore().isReadOnly()); - s.close(); } private void testFileHeader() { - String fileName = getBaseDir() + "/testFileHeader.h3"; - MVStore s = openStore(fileName); - s.setRetentionTime(Integer.MAX_VALUE); - long time = System.currentTimeMillis(); - Map m = s.getStoreHeader(); - assertEquals("1", m.get("format").toString()); - long creationTime = (Long) m.get("created"); - assertTrue(Math.abs(time - creationTime) < 100); - m.put("test", "123"); - MVMap map = s.openMap("test"); + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(Integer.MAX_VALUE); + long time = System.currentTimeMillis(); + Map m = s.getStoreHeader(); + assertEquals("2", m.get("format").toString()); + long creationTime = (Long) m.get("created"); + assertTrue(Math.abs(time - creationTime) < 100); + m.put("test", "123"); + forceWriteStoreHeader(s); + } + + try (MVStore s = openStore(fileName)) { + Object test = s.getStoreHeader().get("test"); + assertNotNull(test); + assertEquals("123", test.toString()); + } + } + + private static void forceWriteStoreHeader(MVStore s) { + MVMap map = s.openMap("dummy"); map.put(10, 100); // this is to ensure the file header is overwritten // the header is written at least every 20 commits for (int i = 0; i < 30; i++) { if (i > 5) { s.setRetentionTime(0); + // ensure that the next save time is different, + // so that blocks can be reclaimed + // (on Windows, resolution is 10 ms) + sleep(1); } map.put(10, 110); s.commit(); } - s.close(); - s = openStore(fileName); - Object test = s.getStoreHeader().get("test"); - assertFalse(test == null); - assertEquals("123", test.toString()); - s.close(); + s.removeMap(map); + s.commit(); } - private void testFileHeaderCorruption() throws Exception { - String fileName = getBaseDir() + "/testFileHeader.h3"; - MVStore s = new MVStore.Builder(). - fileName(fileName).pageSplitSize(1000).autoCommitDisabled().open(); - s.setRetentionTime(0); - MVMap map; - map = s.openMap("test"); - map.put(0, new byte[100]); - for (int i = 0; i < 10; i++) { - map = s.openMap("test" + i); - map.put(0, new byte[1000]); - s.commit(); + private static void sleep(long ms) { + // on Windows, need to sleep in some cases, + // mainly because the milliseconds resolution of + // System.currentTimeMillis is 10 ms. + try { + Thread.sleep(ms); + } catch (InterruptedException e) { + // ignore } - FileStore fs = s.getFileStore(); - long size = fs.getFile().size(); - for (int i = 0; i < 10; i++) { - map = s.openMap("test" + i); - s.removeMap(map); - s.commit(); - s.compact(100, 1); - if (fs.getFile().size() <= size) { - break; + } + + private void testFileHeaderCorruption() throws Exception { + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + MVStore.Builder builder = new MVStore.Builder(). + fileName(fileName).pageSplitSize(1000).autoCommitDisabled(); + try (MVStore s = builder.open()) { + s.setRetentionTime(0); + MVMap map = s.openMap("test"); + map.put(0, new byte[100]); + for (int i = 0; i < 10; i++) { + map = s.openMap("test" + i); + map.put(0, new byte[1000]); + s.commit(); } - } - s.close(); + FileStore fs = s.getFileStore(); + long size = fs.getFile().size(); + for (int i = 0; i < 100; i++) { + map = s.openMap("test" + i); + s.removeMap(map); + s.commit(); + s.compact(100, 1); + if (fs.getFile().size() <= size) { + break; + } + } + // the last chunk is at the end + s.setReuseSpace(false); + map = s.openMap("test2"); + map.put(1, new byte[1000]); + } + FilePath f = FilePath.get(fileName); int blockSize = 4 * 1024; // test corrupt file headers for (int i = 0; i <= blockSize; i += blockSize) { - FileChannel fc = f.open("rw"); - if (i == 0) { - // corrupt the last block (the end header) - fc.write(ByteBuffer.allocate(256), fc.size() - 256); - } - ByteBuffer buff = ByteBuffer.allocate(4 * 1024); - fc.read(buff, i); - String h = new String(buff.array(), "UTF-8").trim(); - int idx = h.indexOf("fletcher:"); - int old = Character.digit(h.charAt(idx + "fletcher:".length()), 16); - int bad = (old + 1) & 15; - buff.put(idx + "fletcher:".length(), - (byte) Character.forDigit(bad, 16)); - buff.rewind(); - fc.write(buff, i); - fc.close(); + try (FileChannel fc = f.open("rw")) { + if (i == 0) { + // corrupt the last block (the end header) + fc.write(ByteBuffer.allocate(256), fc.size() - 256); + } + ByteBuffer buff = ByteBuffer.allocate(4 * 1024); + fc.read(buff, i); + String h = new String(buff.array(), StandardCharsets.UTF_8).trim(); + int idx = h.indexOf("fletcher:"); + int old = Character.digit(h.charAt(idx + "fletcher:".length()), 16); + int bad = (old + 1) & 15; + buff.put(idx + "fletcher:".length(), + (byte) Character.forDigit(bad, 16)); + + // now intentionally corrupt first or both headers + // note that headers may be overwritten upon successfull opening + for (int b = 0; b <= i; b += blockSize) { + buff.rewind(); + fc.write(buff, b); + } + } if (i == 0) { // if the first header is corrupt, the second // header should be used - s = openStore(fileName); - map = s.openMap("test"); - assertEquals(100, map.get(0).length); - s.close(); + try (MVStore s = openStore(fileName)) { + MVMap map = s.openMap("test"); + assertEquals(100, map.get(0).length); + map = s.openMap("test2"); + assertFalse(map.containsKey(1)); + } } else { // both headers are corrupt - try { - s = openStore(fileName); - fail(); - } catch (Exception e) { - // expected - } + assertThrows(Exception.class, () -> openStore(fileName)); } } } @@ -1030,69 +1013,87 @@ private void testIndexSkip() { assertEquals(map.size(), map.keyList().size()); } - private void testMinMaxNextKey() { - MVStore s = openStore(null); + private void testIndexSkipReverse() { + MVStore s = openStore(null, 4); MVMap map = s.openMap("test"); - map.put(10, 100); - map.put(20, 200); - - assertEquals(10, map.firstKey().intValue()); - assertEquals(20, map.lastKey().intValue()); + for (int i = 0; i < 100; i += 2) { + map.put(i, 10 * i); + } - assertEquals(20, map.ceilingKey(15).intValue()); - assertEquals(20, map.ceilingKey(20).intValue()); - assertEquals(10, map.floorKey(15).intValue()); - assertEquals(10, map.floorKey(10).intValue()); - assertEquals(20, map.higherKey(10).intValue()); - assertEquals(10, map.lowerKey(20).intValue()); + Cursor c = map.cursor(50, null, true); + // skip must reset the root of the cursor + c.skip(10); + for (int i = 30; i >= 0; i -= 2) { + assertTrue(c.hasNext()); + assertEquals(i, c.next().intValue()); + } + assertFalse(c.hasNext()); + } - final MVMap m = map; - assertEquals(10, m.ceilingKey(null).intValue()); - assertEquals(10, m.higherKey(null).intValue()); - assertNull(m.lowerKey(null)); - assertNull(m.floorKey(null)); + private void testMinMaxNextKey() { + try (MVStore s = openStore(null)) { + MVMap map = s.openMap("test"); + map.put(10, 100); + map.put(20, 200); - for (int i = 3; i < 20; i++) { - s = openStore(null, 4); - map = s.openMap("test"); - for (int j = 3; j < i; j++) { - map.put(j * 2, j * 20); - } - if (i == 3) { - assertNull(map.firstKey()); - assertNull(map.lastKey()); - } else { - assertEquals(6, map.firstKey().intValue()); - int max = (i - 1) * 2; - assertEquals(max, map.lastKey().intValue()); - - for (int j = 0; j < i * 2 + 2; j++) { - if (j > max) { - assertNull(map.ceilingKey(j)); - } else { - int ceiling = Math.max((j + 1) / 2 * 2, 6); - assertEquals(ceiling, map.ceilingKey(j).intValue()); - } + assertEquals(10, map.firstKey().intValue()); + assertEquals(20, map.lastKey().intValue()); - int floor = Math.min(max, Math.max(j / 2 * 2, 4)); - if (floor < 6) { - assertNull(map.floorKey(j)); - } else { - map.floorKey(j); - } + assertEquals(20, map.ceilingKey(15).intValue()); + assertEquals(20, map.ceilingKey(20).intValue()); + assertEquals(10, map.floorKey(15).intValue()); + assertEquals(10, map.floorKey(10).intValue()); + assertEquals(20, map.higherKey(10).intValue()); + assertEquals(10, map.lowerKey(20).intValue()); - int lower = Math.min(max, Math.max((j - 1) / 2 * 2, 4)); - if (lower < 6) { - assertNull(map.lowerKey(j)); - } else { - assertEquals(lower, map.lowerKey(j).intValue()); - } + assertEquals(10, map.ceilingKey(null).intValue()); + assertEquals(10, map.higherKey(null).intValue()); + assertNull(map.lowerKey(null)); + assertNull(map.floorKey(null)); + } - int higher = Math.max((j + 2) / 2 * 2, 6); - if (higher > max) { - assertNull(map.higherKey(j)); - } else { - assertEquals(higher, map.higherKey(j).intValue()); + for (int i = 3; i < 20; i++) { + try (MVStore s = openStore(null, 4)) { + MVMap map = s.openMap("test"); + for (int j = 3; j < i; j++) { + map.put(j * 2, j * 20); + } + if (i == 3) { + assertNull(map.firstKey()); + assertNull(map.lastKey()); + } else { + assertEquals(6, map.firstKey().intValue()); + int max = (i - 1) * 2; + assertEquals(max, map.lastKey().intValue()); + + for (int j = 0; j < i * 2 + 2; j++) { + if (j > max) { + assertNull(map.ceilingKey(j)); + } else { + int ceiling = Math.max((j + 1) / 2 * 2, 6); + assertEquals(ceiling, map.ceilingKey(j).intValue()); + } + + int floor = Math.min(max, Math.max(j / 2 * 2, 4)); + if (floor < 6) { + assertNull(map.floorKey(j)); + } else { + map.floorKey(j); + } + + int lower = Math.min(max, Math.max((j - 1) / 2 * 2, 4)); + if (lower < 6) { + assertNull(map.lowerKey(j)); + } else { + assertEquals(lower, map.lowerKey(j).intValue()); + } + + int higher = Math.max((j + 2) / 2 * 2, 6); + if (higher > max) { + assertNull(map.higherKey(j)); + } else { + assertEquals(higher, map.higherKey(j).intValue()); + } } } } @@ -1100,685 +1101,663 @@ private void testMinMaxNextKey() { } private void testStoreVersion() { - String fileName = getBaseDir() + "/testStoreVersion.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = MVStore.open(fileName); - assertEquals(0, s.getCurrentVersion()); - assertEquals(0, s.getStoreVersion()); - s.setStoreVersion(0); - s.commit(); - s.setStoreVersion(1); - s.closeImmediately(); - s = MVStore.open(fileName); - assertEquals(1, s.getCurrentVersion()); - assertEquals(0, s.getStoreVersion()); - s.setStoreVersion(1); - s.close(); - s = MVStore.open(fileName); - assertEquals(2, s.getCurrentVersion()); - assertEquals(1, s.getStoreVersion()); - s.close(); - } + MVStore store = MVStore.open(fileName); + assertEquals(0, store.getCurrentVersion()); + assertEquals(0, store.getStoreVersion()); + store.setStoreVersion(0); + store.commit(); + store.setStoreVersion(1); + store.closeImmediately(); - private void testIterateOldVersion() { - MVStore s; - Map map; - s = new MVStore.Builder().open(); - map = s.openMap("test"); - int len = 100; - for (int i = 0; i < len; i++) { - map.put(i, 10 * i); + try (MVStore s = MVStore.open(fileName)) { + assertEquals(1, s.getCurrentVersion()); + assertEquals(0, s.getStoreVersion()); + s.setStoreVersion(1); } - Iterator it = map.keySet().iterator(); - s.commit(); - for (int i = 0; i < len; i += 2) { - map.remove(i); + + try (MVStore s = MVStore.open(fileName)) { + assertEquals(2, s.getCurrentVersion()); + assertEquals(1, s.getStoreVersion()); } - int count = 0; - while (it.hasNext()) { - it.next(); - count++; + } + + private void testIterateOldVersion() { + try (MVStore s = new MVStore.Builder().open()) { + Map map = s.openMap("test"); + int len = 100; + for (int i = 0; i < len; i++) { + map.put(i, 10 * i); + } + int count = 0; + MVStore.TxCounter txCounter = s.registerVersionUsage(); + try { + Iterator it = map.keySet().iterator(); + s.commit(); + for (int i = 0; i < len; i += 2) { + map.remove(i); + } + while (it.hasNext()) { + it.next(); + count++; + } + } finally { + s.deregisterVersionUsage(txCounter); + } + assertEquals(len, count); } - assertEquals(len, count); - s.close(); } private void testObjects() { - String fileName = getBaseDir() + "/testObjects.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - Map map; - s = new MVStore.Builder().fileName(fileName).open(); - map = s.openMap("test"); - map.put(1, "Hello"); - map.put("2", 200); - map.put(new Object[1], new Object[]{1, "2"}); - s.close(); - - s = new MVStore.Builder().fileName(fileName).open(); - map = s.openMap("test"); - assertEquals("Hello", map.get(1).toString()); - assertEquals(200, ((Integer) map.get("2")).intValue()); - Object[] x = (Object[]) map.get(new Object[1]); - assertEquals(2, x.length); - assertEquals(1, ((Integer) x[0]).intValue()); - assertEquals("2", (String) x[1]); - s.close(); + try (MVStore s = new MVStore.Builder().fileName(fileName).open()) { + Map map = s.openMap("test"); + map.put(1, "Hello"); + map.put("2", 200); + map.put(new Object[1], new Object[]{1, "2"}); + } + + try (MVStore s = new MVStore.Builder().fileName(fileName).open()) { + Map map = s.openMap("test"); + assertEquals("Hello", map.get(1).toString()); + assertEquals(200, ((Integer) map.get("2")).intValue()); + Object[] x = (Object[]) map.get(new Object[1]); + assertEquals(2, x.length); + assertEquals(1, ((Integer) x[0]).intValue()); + assertEquals("2", (String) x[1]); + } } private void testExample() { - String fileName = getBaseDir() + "/testExample.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); // open the store (in-memory if fileName is null) - MVStore s = MVStore.open(fileName); + try (MVStore s = MVStore.open(fileName)) { - // create/get the map named "data" - MVMap map = s.openMap("data"); + // create/get the map named "data" + MVMap map = s.openMap("data"); - // add and read some data - map.put(1, "Hello World"); - // System.out.println(map.get(1)); - - // close the store (this will persist changes) - s.close(); - - s = MVStore.open(fileName); - map = s.openMap("data"); - assertEquals("Hello World", map.get(1)); - s.close(); + // add and read some data + map.put(1, "Hello World"); + // System.out.println(map.get(1)); + } + try (MVStore s = MVStore.open(fileName)) { + MVMap map = s.openMap("data"); + assertEquals("Hello World", map.get(1)); + } } private void testExampleMvcc() { - String fileName = getBaseDir() + "/testExampleMvcc.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); // open the store (in-memory if fileName is null) - MVStore s = MVStore.open(fileName); + try (MVStore s = MVStore.open(fileName)) { - // create/get the map named "data" - MVMap map = s.openMap("data"); + // create/get the map named "data" + MVMap map = s.openMap("data"); - // add some data - map.put(1, "Hello"); - map.put(2, "World"); + // add some data + map.put(1, "Hello"); + map.put(2, "World"); - // get the current version, for later use - long oldVersion = s.getCurrentVersion(); + // get the current version, for later use + long oldVersion = s.getCurrentVersion(); - // from now on, the old version is read-only - s.commit(); + // from now on, the old version is read-only + s.commit(); - // more changes, in the new version - // changes can be rolled back if required - // changes always go into "head" (the newest version) - map.put(1, "Hi"); - map.remove(2); - - // access the old data (before the commit) - MVMap oldMap = - map.openVersion(oldVersion); - - // print the old version (can be done - // concurrently with further modifications) - // this will print "Hello" and "World": - // System.out.println(oldMap.get(1)); - assertEquals("Hello", oldMap.get(1)); - // System.out.println(oldMap.get(2)); - assertEquals("World", oldMap.get(2)); - - // print the newest version ("Hi") - // System.out.println(map.get(1)); - assertEquals("Hi", map.get(1)); - - // close the store - s.close(); + // more changes, in the new version + // changes can be rolled back if required + // changes always go into "head" (the newest version) + map.put(1, "Hi"); + map.remove(2); + + // access the old data (before the commit) + MVMap oldMap = + map.openVersion(oldVersion); + + // print the old version (can be done + // concurrently with further modifications) + // this will print "Hello" and "World": + // System.out.println(oldMap.get(1)); + assertEquals("Hello", oldMap.get(1)); + // System.out.println(oldMap.get(2)); + assertEquals("World", oldMap.get(2)); + + // print the newest version ("Hi") + // System.out.println(map.get(1)); + assertEquals("Hi", map.get(1)); + } } private void testOpenStoreCloseLoop() { - String fileName = getBaseDir() + "/testOpenClose.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); for (int k = 0; k < 1; k++) { - // long t = System.currentTimeMillis(); + // long t = System.nanoTime(); for (int j = 0; j < 3; j++) { - MVStore s = openStore(fileName); - Map m = s.openMap("data"); - for (int i = 0; i < 3; i++) { - Integer x = m.get("value"); - m.put("value", x == null ? 0 : x + 1); - s.commit(); + try (MVStore s = openStore(fileName)) { + Map m = s.openMap("data"); + for (int i = 0; i < 3; i++) { + Integer x = m.get("value"); + m.put("value", x == null ? 0 : x + 1); + s.commit(); + } } - s.close(); } // System.out.println("open/close: " + - // (System.currentTimeMillis() - t)); + // TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t)); // System.out.println("size: " + FileUtils.size(fileName)); } } private void testOldVersion() { - MVStore s; for (int op = 0; op <= 1; op++) { for (int i = 0; i < 5; i++) { - s = openStore(null); - s.setVersionsToKeep(Integer.MAX_VALUE); - MVMap m; - m = s.openMap("data"); - for (int j = 0; j < 5; j++) { - if (op == 1) { - m.put("1", "" + s.getCurrentVersion()); + try (MVStore s = openStore(null)) { + s.setVersionsToKeep(Integer.MAX_VALUE); + MVMap m; + m = s.openMap("data"); + for (int j = 0; j < 5; j++) { + if (op == 1) { + m.put("1", "" + s.getCurrentVersion()); + } + s.commit(); } - s.commit(); - } - for (int j = 0; j < s.getCurrentVersion(); j++) { - MVMap old = m.openVersion(j); - if (op == 1) { - assertEquals("" + j, old.get("1")); + for (int j = 0; j < s.getCurrentVersion(); j++) { + MVMap old = m.openVersion(j); + if (op == 1) { + assertEquals("" + j, old.get("1")); + } } } - s.close(); } } } private void testVersion() { - String fileName = getBaseDir() + "/testVersion.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - s = openStore(fileName); - s.setVersionsToKeep(100); - s.setAutoCommitDelay(0); - s.setRetentionTime(Integer.MAX_VALUE); - MVMap m = s.openMap("data"); - s.commit(); - long first = s.getCurrentVersion(); - m.put("0", "test"); - s.commit(); - m.put("1", "Hello"); - m.put("2", "World"); - for (int i = 10; i < 20; i++) { - m.put("" + i, "data"); - } - long old = s.getCurrentVersion(); - s.commit(); - m.put("1", "Hallo"); - m.put("2", "Welt"); - MVMap mFirst; - mFirst = m.openVersion(first); - assertEquals(0, mFirst.size()); - MVMap mOld; - assertEquals("Hallo", m.get("1")); - assertEquals("Welt", m.get("2")); - mOld = m.openVersion(old); - assertEquals("Hello", mOld.get("1")); - assertEquals("World", mOld.get("2")); - assertTrue(mOld.isReadOnly()); - s.getCurrentVersion(); - long old3 = s.commit(); - - // the old version is still available - assertEquals("Hello", mOld.get("1")); - assertEquals("World", mOld.get("2")); - - mOld = m.openVersion(old3); - assertEquals("Hallo", mOld.get("1")); - assertEquals("Welt", mOld.get("2")); - - m.put("1", "Hi"); - assertEquals("Welt", m.remove("2")); - s.close(); + try (MVStore s = openStore(fileName)) { + s.setVersionsToKeep(100); + s.setAutoCommitDelay(0); + s.setRetentionTime(Integer.MAX_VALUE); + MVMap m = s.openMap("data"); + s.commit(); + long first = s.getCurrentVersion(); + assertEquals(1, first); + m.put("0", "test"); + s.commit(); + m.put("1", "Hello"); + m.put("2", "World"); + for (int i = 10; i < 20; i++) { + m.put("" + i, "data"); + } + long old = s.getCurrentVersion(); + s.commit(); + m.put("1", "Hallo"); + m.put("2", "Welt"); + MVMap mFirst; + mFirst = m.openVersion(first); + // openVersion() should restore map at last known state of the version specified + // not at the first known state, as it was before + assertEquals(1, mFirst.size()); + MVMap mOld; + assertEquals("Hallo", m.get("1")); + assertEquals("Welt", m.get("2")); + mOld = m.openVersion(old); + assertEquals("Hello", mOld.get("1")); + assertEquals("World", mOld.get("2")); + assertTrue(mOld.isReadOnly()); + long old3 = s.getCurrentVersion(); + assertEquals(3, old3); + s.commit(); - s = openStore(fileName); - m = s.openMap("data"); - assertEquals("Hi", m.get("1")); - assertEquals(null, m.get("2")); + // the old version is still available + assertEquals("Hello", mOld.get("1")); + assertEquals("World", mOld.get("2")); - mOld = m.openVersion(old3); - assertEquals("Hallo", mOld.get("1")); - assertEquals("Welt", mOld.get("2")); + mOld = m.openVersion(old3); + assertEquals("Hallo", mOld.get("1")); + assertEquals("Welt", mOld.get("2")); - try { - m.openVersion(-3); - fail(); - } catch (IllegalArgumentException e) { - // expected + m.put("1", "Hi"); + assertEquals("Welt", m.remove("2")); + } + + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + assertEquals("Hi", m.get("1")); + assertEquals(null, m.get("2")); + assertThrows(IllegalArgumentException.class, () -> m.openVersion(-3)); } - s.close(); } private void testTruncateFile() { - String fileName = getBaseDir() + "/testTruncate.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap m; - s = openStore(fileName); - m = s.openMap("data"); - String data = new String(new char[10000]).replace((char) 0, 'x'); - for (int i = 1; i < 10; i++) { - m.put(i, data); - s.commit(); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + String data = new String(new char[10000]).replace((char) 0, 'x'); + for (int i = 1; i < 10; i++) { + m.put(i, data); + s.commit(); + } } - s.close(); long len = FileUtils.size(fileName); - s = openStore(fileName); - s.setRetentionTime(0); - // remove 75% - m = s.openMap("data"); - for (int i = 0; i < 10; i++) { - if (i % 4 != 0) { - m.remove(i); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(0); + // remove 75% + MVMap m = s.openMap("data"); + for (int i = 0; i < 10; i++) { + if (i % 4 != 0) { + sleep(2); + m.remove(i); + s.commit(); + } } + assertTrue(s.compact(100, 50 * 1024)); + // compaction alone will not guarantee file size reduction + s.compactMoveChunks(); } - s.commit(); - assertTrue(s.compact(100, 50 * 1024)); - s.close(); long len2 = FileUtils.size(fileName); assertTrue("len2: " + len2 + " len: " + len, len2 < len); } private void testFastDelete() { - String fileName = getBaseDir() + "/testFastDelete.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s; - MVMap m; - s = openStore(fileName, 700); - m = s.openMap("data"); - for (int i = 0; i < 1000; i++) { - m.put(i, "Hello World"); - assertEquals(i + 1, m.size()); + try (MVStore s = openStore(fileName, 700)) { + MVMap m = s.openMap("data"); + for (int i = 0; i < 1000; i++) { + m.put(i, "Hello World"); + assertEquals(i + 1, m.size()); + } + assertEquals(1000, m.size()); + // memory calculations were adjusted, so as this out-of-the-thin-air number + assertEquals(93832, s.getUnsavedMemory()); + s.commit(); + assertEquals(2, s.getFileStore().getWriteCount()); } - assertEquals(1000, m.size()); - assertEquals(131896, s.getUnsavedMemory()); - s.commit(); - assertEquals(2, s.getFileStore().getWriteCount()); - s.close(); - s = openStore(fileName); - m = s.openMap("data"); - m.clear(); - assertEquals(0, m.size()); - s.commit(); - // ensure only nodes are read, but not leaves - assertEquals(41, s.getFileStore().getReadCount()); - assertTrue(s.getFileStore().getWriteCount() < 5); - s.close(); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + m.clear(); + assertEquals(0, m.size()); + s.commit(); + // ensure only nodes are read, but not leaves + assertEquals(7, s.getFileStore().getReadCount()); + assertTrue(s.getFileStore().getWriteCount() < 5); + } } private void testRollback() { - MVStore s = MVStore.open(null); - MVMap m = s.openMap("m"); - m.put(1, -1); - s.commit(); - for (int i = 0; i < 10; i++) { - m.put(1, i); - s.rollback(); - assertEquals(i - 1, m.get(1).intValue()); - m.put(1, i); + try (MVStore s = MVStore.open(null)) { + MVMap m = s.openMap("m"); + m.put(1, -1); s.commit(); + for (int i = 0; i < 10; i++) { + m.put(1, i); + s.rollback(); + assertEquals(i - 1, m.get(1).intValue()); + m.put(1, i); + s.commit(); + } } } private void testRollbackStored() { - String fileName = getBaseDir() + "/testRollback.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVMap meta; - MVStore s = openStore(fileName); - assertEquals(45000, s.getRetentionTime()); - s.setRetentionTime(0); - assertEquals(0, s.getRetentionTime()); - s.setRetentionTime(45000); - assertEquals(45000, s.getRetentionTime()); - assertEquals(0, s.getCurrentVersion()); - assertFalse(s.hasUnsavedChanges()); - MVMap m = s.openMap("data"); - assertTrue(s.hasUnsavedChanges()); - MVMap m0 = s.openMap("data0"); - m.put("1", "Hello"); - assertEquals(1, s.commit()); - s.rollbackTo(1); - assertEquals(1, s.getCurrentVersion()); - assertEquals("Hello", m.get("1")); - // so a new version is created - m.put("1", "Hello"); - - long v2 = s.commit(); - assertEquals(2, v2); - assertEquals(2, s.getCurrentVersion()); - assertFalse(s.hasUnsavedChanges()); - assertEquals("Hello", m.get("1")); - s.close(); - - s = openStore(fileName); - s.setRetentionTime(45000); - assertEquals(2, s.getCurrentVersion()); - meta = s.getMetaMap(); - m = s.openMap("data"); - assertFalse(s.hasUnsavedChanges()); - assertEquals("Hello", m.get("1")); - m0 = s.openMap("data0"); - MVMap m1 = s.openMap("data1"); - m.put("1", "Hallo"); - m0.put("1", "Hallo"); - m1.put("1", "Hallo"); - assertEquals("Hallo", m.get("1")); - assertEquals("Hallo", m1.get("1")); - assertTrue(s.hasUnsavedChanges()); - s.rollbackTo(v2); - assertFalse(s.hasUnsavedChanges()); - assertNull(meta.get("name.data1")); - assertNull(m0.get("1")); - assertEquals("Hello", m.get("1")); - assertEquals(2, s.commit()); - s.close(); - - s = openStore(fileName); - s.setRetentionTime(45000); - assertEquals(2, s.getCurrentVersion()); - meta = s.getMetaMap(); - assertTrue(meta.get("name.data") != null); - assertTrue(meta.get("name.data0") != null); - assertNull(meta.get("name.data1")); - m = s.openMap("data"); - m0 = s.openMap("data0"); - assertNull(m0.get("1")); - assertEquals("Hello", m.get("1")); - assertFalse(m0.isReadOnly()); - m.put("1", "Hallo"); - s.commit(); - long v3 = s.getCurrentVersion(); - assertEquals(3, v3); - s.close(); + long v2; + try (MVStore s = openStore(fileName)) { + assertEquals(45000, s.getRetentionTime()); + s.setRetentionTime(0); + assertEquals(0, s.getRetentionTime()); + s.setRetentionTime(45000); + assertEquals(45000, s.getRetentionTime()); + assertEquals(0, s.getCurrentVersion()); + assertFalse(s.hasUnsavedChanges()); + MVMap m = s.openMap("data"); + assertTrue(s.hasUnsavedChanges()); + MVMap m0 = s.openMap("data0"); + m.put("1", "Hello"); + assertEquals(1, s.commit()); + s.rollbackTo(1); + assertEquals(1, s.getCurrentVersion()); + assertEquals("Hello", m.get("1")); + // so a new version is created + m.put("1", "Hello"); + + v2 = s.commit(); + assertEquals(2, v2); + assertEquals(2, s.getCurrentVersion()); + assertFalse(s.hasUnsavedChanges()); + assertEquals("Hello", m.get("1")); + } + + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(45000); + assertEquals(2, s.getCurrentVersion()); + MVMap meta = s.getMetaMap(); + MVMap m = s.openMap("data"); + assertFalse(s.hasUnsavedChanges()); + assertEquals("Hello", m.get("1")); + MVMap m0 = s.openMap("data0"); + MVMap m1 = s.openMap("data1"); + m.put("1", "Hallo"); + m0.put("1", "Hallo"); + m1.put("1", "Hallo"); + assertEquals("Hallo", m.get("1")); + assertEquals("Hallo", m1.get("1")); + assertTrue(s.hasUnsavedChanges()); + s.rollbackTo(v2); + assertFalse(s.hasUnsavedChanges()); + assertNull(meta.get(DataUtils.META_NAME + "data1")); + assertNull(m0.get("1")); + assertEquals("Hello", m.get("1")); + // no changes - no real commit here + assertEquals(2, s.commit()); + } + + long v3; + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(45000); + assertEquals(2, s.getCurrentVersion()); + MVMap meta = s.getMetaMap(); + assertNotNull(meta.get(DataUtils.META_NAME + "data")); + assertNotNull(meta.get(DataUtils.META_NAME + "data0")); + assertNull(meta.get(DataUtils.META_NAME + "data1")); + MVMap m = s.openMap("data"); + MVMap m0 = s.openMap("data0"); + assertNull(m0.get("1")); + assertEquals("Hello", m.get("1")); + assertFalse(m0.isReadOnly()); + m.put("1", "Hallo"); + s.commit(); + v3 = s.getCurrentVersion(); + assertEquals(3, v3); + } - s = openStore(fileName); - s.setRetentionTime(45000); - assertEquals(3, s.getCurrentVersion()); - m = s.openMap("data"); - m.put("1", "Hi"); - s.close(); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(45000); + assertEquals(3, s.getCurrentVersion()); + MVMap m = s.openMap("data"); + m.put("1", "Hi"); + } - s = openStore(fileName); - s.setRetentionTime(45000); - m = s.openMap("data"); - assertEquals("Hi", m.get("1")); - s.rollbackTo(v3); - assertEquals("Hallo", m.get("1")); - s.close(); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(45000); + MVMap m = s.openMap("data"); + assertEquals("Hi", m.get("1")); + s.rollbackTo(v3); + assertEquals("Hallo", m.get("1")); + } - s = openStore(fileName); - s.setRetentionTime(45000); - m = s.openMap("data"); - assertEquals("Hallo", m.get("1")); - s.close(); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(45000); + MVMap m = s.openMap("data"); + assertEquals("Hallo", m.get("1")); + } } private void testRollbackInMemory() { - String fileName = getBaseDir() + "/testRollback.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName, 5); - s.setAutoCommitDelay(0); - assertEquals(0, s.getCurrentVersion()); - MVMap m = s.openMap("data"); - s.rollbackTo(0); - assertTrue(m.isClosed()); - assertEquals(0, s.getCurrentVersion()); - m = s.openMap("data"); + try (MVStore s = openStore(fileName, 5)) { + s.setAutoCommitDelay(0); + assertEquals(0, s.getCurrentVersion()); + MVMap m = s.openMap("data"); + s.rollbackTo(0); + assertTrue(m.isClosed()); + assertEquals(0, s.getCurrentVersion()); + m = s.openMap("data"); - MVMap m0 = s.openMap("data0"); - MVMap m2 = s.openMap("data2"); - m.put("1", "Hello"); - for (int i = 0; i < 10; i++) { - m2.put("" + i, "Test"); - } - long v1 = s.commit(); - assertEquals(1, v1); - assertEquals(1, s.getCurrentVersion()); - MVMap m1 = s.openMap("data1"); - assertEquals("Test", m2.get("1")); - m.put("1", "Hallo"); - m0.put("1", "Hallo"); - m1.put("1", "Hallo"); - m2.clear(); - assertEquals("Hallo", m.get("1")); - assertEquals("Hallo", m1.get("1")); - s.rollbackTo(v1); - assertEquals(1, s.getCurrentVersion()); - for (int i = 0; i < 10; i++) { - assertEquals("Test", m2.get("" + i)); - } - assertEquals("Hello", m.get("1")); - assertNull(m0.get("1")); - assertTrue(m1.isClosed()); - assertFalse(m0.isReadOnly()); - s.close(); + MVMap m0 = s.openMap("data0"); + MVMap m2 = s.openMap("data2"); + m.put("1", "Hello"); + for (int i = 0; i < 10; i++) { + m2.put("" + i, "Test"); + } + long v1 = s.commit(); + assertEquals(1, v1); + assertEquals(1, s.getCurrentVersion()); + MVMap m1 = s.openMap("data1"); + assertEquals("Test", m2.get("1")); + m.put("1", "Hallo"); + m0.put("1", "Hallo"); + m1.put("1", "Hallo"); + m2.clear(); + assertEquals("Hallo", m.get("1")); + assertEquals("Hallo", m1.get("1")); + s.rollbackTo(v1); + assertEquals(1, s.getCurrentVersion()); + for (int i = 0; i < 10; i++) { + assertEquals("Test", m2.get("" + i)); + } + assertEquals("Hello", m.get("1")); + assertNull(m0.get("1")); + assertTrue(m1.isClosed()); + assertFalse(m0.isReadOnly()); + } } private void testMeta() { - String fileName = getBaseDir() + "/testMeta.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - s.setRetentionTime(Integer.MAX_VALUE); - MVMap m = s.getMetaMap(); - assertEquals("[]", s.getMapNames().toString()); - MVMap data = s.openMap("data"); - data.put("1", "Hello"); - data.put("2", "World"); - s.commit(); - assertEquals(1, s.getCurrentVersion()); - assertFalse(m.containsKey("chunk.2")); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(Integer.MAX_VALUE); + MVMap m = s.getMetaMap(); + assertEquals("[]", s.getMapNames().toString()); + MVMap data = s.openMap("data"); + data.put("1", "Hello"); + data.put("2", "World"); + s.commit(); + assertEquals(1, s.getCurrentVersion()); - assertEquals("[data]", s.getMapNames().toString()); - assertEquals("data", s.getMapName(data.getId())); - assertNull(s.getMapName(s.getMetaMap().getId())); - assertNull(s.getMapName(data.getId() + 1)); + assertEquals("[data]", s.getMapNames().toString()); + assertEquals("data", s.getMapName(data.getId())); + assertNull(s.getMapName(s.getMetaMap().getId())); + assertNull(s.getMapName(data.getId() + 1)); - String id = s.getMetaMap().get("name.data"); - assertEquals("name:data", m.get("map." + id)); - assertEquals("Hello", data.put("1", "Hallo")); - s.commit(); - assertEquals("name:data", m.get("map." + id)); - assertTrue(m.get("root.1").length() > 0); - assertTrue(m.containsKey("chunk.1")); - - assertEquals(2, s.getCurrentVersion()); + String id = s.getMetaMap().get(DataUtils.META_NAME + "data"); + assertEquals("name:data", m.get(DataUtils.META_MAP + id)); + assertEquals("Hello", data.put("1", "Hallo")); + s.commit(); + assertEquals("name:data", m.get(DataUtils.META_MAP + id)); + m = s.getLayoutMap(); + assertTrue(m.get(DataUtils.META_ROOT + id).length() > 0); + assertTrue(m.containsKey(DataUtils.META_CHUNK + "1")); - s.rollbackTo(1); - assertEquals("Hello", data.get("1")); - assertEquals("World", data.get("2")); - assertFalse(m.containsKey("chunk.1")); - assertFalse(m.containsKey("chunk.2")); + assertEquals(2, s.getCurrentVersion()); - s.close(); + s.rollbackTo(1); + assertEquals("Hello", data.get("1")); + assertEquals("World", data.get("2")); + } } private void testInMemory() { for (int j = 0; j < 1; j++) { - MVStore s = openStore(null); - // s.setMaxPageSize(10); - // long t; - int len = 100; - // TreeMap m = new TreeMap(); - // HashMap m = New.hashMap(); - MVMap m = s.openMap("data"); - // t = System.currentTimeMillis(); - for (int i = 0; i < len; i++) { - assertNull(m.put(i, "Hello World")); - } - // System.out.println("put: " + (System.currentTimeMillis() - t)); - // t = System.currentTimeMillis(); - for (int i = 0; i < len; i++) { - assertEquals("Hello World", m.get(i)); - } - // System.out.println("get: " + (System.currentTimeMillis() - t)); - // t = System.currentTimeMillis(); - for (int i = 0; i < len; i++) { - assertEquals("Hello World", m.remove(i)); + try (MVStore s = openStore(null)) { + // s.setMaxPageSize(10); + int len = 100; + // TreeMap m = new TreeMap(); + // HashMap m = New.hashMap(); + MVMap m = s.openMap("data"); + for (int i = 0; i < len; i++) { + assertNull(m.put(i, "Hello World")); + } + for (int i = 0; i < len; i++) { + assertEquals("Hello World", m.get(i)); + } + for (int i = 0; i < len; i++) { + assertEquals("Hello World", m.remove(i)); + } + assertEquals(null, m.get(0)); + assertEquals(0, m.size()); } - // System.out.println("remove: " + - // (System.currentTimeMillis() - t)); - // System.out.println(); - assertEquals(null, m.get(0)); - assertEquals(0, m.size()); - s.close(); } } private void testLargeImport() { - String fileName = getBaseDir() + "/testImport.h3"; + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); int len = 1000; for (int j = 0; j < 5; j++) { FileUtils.delete(fileName); - MVStore s = openStore(fileName, 40); - MVMap m = s.openMap("data", - new MVMap.Builder() - .valueType(new RowDataType(new DataType[] { - new ObjectDataType(), - StringDataType.INSTANCE, - StringDataType.INSTANCE }))); - - // Profiler prof = new Profiler(); - // prof.startCollecting(); - // long t = System.currentTimeMillis(); - for (int i = 0; i < len;) { - Object[] o = new Object[3]; - o[0] = i; - o[1] = "Hello World"; - o[2] = "World"; - m.put(i, o); - i++; - if (i % 10000 == 0) { - s.commit(); + try (MVStore s = openStore(fileName, 40)) { + MVMap m = s.openMap("data", + new MVMap.Builder() + .valueType(new RowDataType(new DataType[]{ + new ObjectDataType(), + StringDataType.INSTANCE, + StringDataType.INSTANCE}))); + + // Profiler prof = new Profiler(); + // prof.startCollecting(); + // long t = System.nanoTime(); + for (int i = 0; i < len; ) { + Object[] o = new Object[3]; + o[0] = i; + o[1] = "Hello World"; + o[2] = "World"; + m.put(i, o); + i++; + if (i % 10000 == 0) { + s.commit(); + } } } - s.close(); // System.out.println(prof.getTop(5)); // System.out.println("store time " + - // (System.currentTimeMillis() - t)); + // TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t)); // System.out.println("store size " + // FileUtils.size(fileName)); } } private void testBtreeStore() { - String fileName = getBaseDir() + "/testBtreeStore.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - s.close(); + MVStore store = openStore(fileName); + store.close(); - s = openStore(fileName); - MVMap m = s.openMap("data"); int count = 2000; - // Profiler p = new Profiler(); - // p.startCollecting(); - // long t = System.currentTimeMillis(); - for (int i = 0; i < count; i++) { - assertNull(m.put(i, "hello " + i)); - assertEquals("hello " + i, m.get(i)); - } - // System.out.println("put: " + (System.currentTimeMillis() - t)); - // System.out.println(p.getTop(5)); - // p = new Profiler(); - //p.startCollecting(); - // t = System.currentTimeMillis(); - s.commit(); - // System.out.println("store: " + (System.currentTimeMillis() - t)); - // System.out.println(p.getTop(5)); - assertEquals("hello 0", m.remove(0)); - assertNull(m.get(0)); - for (int i = 1; i < count; i++) { - assertEquals("hello " + i, m.get(i)); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + for (int i = 0; i < count; i++) { + assertNull(m.put(i, "hello " + i)); + assertEquals("hello " + i, m.get(i)); + } + s.commit(); + assertEquals("hello 0", m.remove(0)); + assertNull(m.get(0)); + for (int i = 1; i < count; i++) { + assertEquals("hello " + i, m.get(i)); + } } - s.close(); - s = openStore(fileName); - m = s.openMap("data"); - assertNull(m.get(0)); - for (int i = 1; i < count; i++) { - assertEquals("hello " + i, m.get(i)); - } - for (int i = 1; i < count; i++) { - m.remove(i); - } - s.commit(); - assertNull(m.get(0)); - for (int i = 0; i < count; i++) { - assertNull(m.get(i)); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + assertNull(m.get(0)); + for (int i = 1; i < count; i++) { + assertEquals("hello " + i, m.get(i)); + } + for (int i = 1; i < count; i++) { + m.remove(i); + } + s.commit(); + assertNull(m.get(0)); + for (int i = 0; i < count; i++) { + assertNull(m.get(i)); + } } - s.close(); } private void testCompactMapNotOpen() { - String fileName = getBaseDir() + "/testCompactNotOpen.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName, 1000); - MVMap m = s.openMap("data"); int factor = 100; - for (int j = 0; j < 10; j++) { - for (int i = j * factor; i < 10 * factor; i++) { - m.put(i, "Hello" + j); + try (MVStore s = openStore(fileName, 1000)) { + s.setAutoCommitDelay(0); + MVMap m = s.openMap("data"); + for (int j = 0; j < 10; j++) { + for (int i = j * factor; i < 10 * factor; i++) { + m.put(i, "Hello" + j); + } + s.commit(); } - s.commit(); } - s.close(); - s = openStore(fileName); - s.setRetentionTime(0); + try (MVStore s = openStore(fileName)) { + s.setAutoCommitDelay(0); + s.setRetentionTime(0); - Map meta = s.getMetaMap(); - int chunkCount1 = 0; - for (String k : meta.keySet()) { - if (k.startsWith("chunk.")) { - chunkCount1++; - } - } - s.compact(80, 1); - s.compact(80, 1); + Map layout = s.getLayoutMap(); + int chunkCount1 = getChunkCount(layout); + s.compact(80, 1); + s.compact(80, 1); - int chunkCount2 = 0; - for (String k : meta.keySet()) { - if (k.startsWith("chunk.")) { - chunkCount2++; - } - } - assertTrue(chunkCount2 >= chunkCount1); + int chunkCount2 = getChunkCount(layout); + assertTrue(chunkCount2 >= chunkCount1); - m = s.openMap("data"); - for (int i = 0; i < 10; i++) { - boolean result = s.compact(50, 50 * 1024); - if (!result) { - break; + MVMap m = s.openMap("data"); + for (int i = 0; i < 10; i++) { + sleep(1); + boolean result = s.compact(50, 50 * 1024); + s.commit(); + if (!result) { + break; + } } - } - assertFalse(s.compact(50, 1024)); + assertFalse(s.compact(50, 1024)); + + int chunkCount3 = getChunkCount(layout); + + assertTrue(chunkCount1 + ">" + chunkCount2 + ">" + chunkCount3, + chunkCount3 < chunkCount1); - int chunkCount3 = 0; - for (String k : meta.keySet()) { - if (k.startsWith("chunk.")) { - chunkCount3++; + for (int i = 0; i < 10 * factor; i++) { + assertEquals("x" + i, "Hello" + (i / factor), m.get(i)); } } + } - assertTrue(chunkCount1 + ">" + chunkCount2 + ">" + chunkCount3, - chunkCount3 < chunkCount1); - - for (int i = 0; i < 10 * factor; i++) { - assertEquals("x" + i, "Hello" + (i / factor), m.get(i)); + private static int getChunkCount(Map layout) { + int chunkCount = 0; + for (String k : layout.keySet()) { + if (k.startsWith(DataUtils.META_CHUNK)) { + chunkCount++; + } } - s.close(); + return chunkCount; } private void testCompact() { - String fileName = getBaseDir() + "/testCompact.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); long initialLength = 0; for (int j = 0; j < 20; j++) { - MVStore s = openStore(fileName); - s.setRetentionTime(0); - MVMap m = s.openMap("data"); - for (int i = 0; i < 100; i++) { - m.put(j + i, "Hello " + j); + sleep(2); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(0); + s.setVersionsToKeep(0); + MVMap m = s.openMap("data"); + for (int i = 0; i < 100; i++) { + m.put(j + i, "Hello " + j); + } + trace("Before - fill rate: " + s.getFillRate() + "%, chunks fill rate: " + + s.getChunksFillRate() + ", len: " + FileUtils.size(fileName)); + s.compact(80, 2048); + s.compactMoveChunks(); + trace("After - fill rate: " + s.getFillRate() + "%, chunks fill rate: " + + s.getChunksFillRate() + ", len: " + FileUtils.size(fileName)); } - s.compact(80, 1024); - s.close(); long len = FileUtils.size(fileName); // System.out.println(" len:" + len); if (initialLength == 0) { @@ -1790,173 +1769,209 @@ private void testCompact() { } // long len = FileUtils.size(fileName); // System.out.println("len0: " + len); - MVStore s = openStore(fileName); - MVMap m = s.openMap("data"); - for (int i = 0; i < 100; i++) { - m.remove(i); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + for (int i = 0; i < 100; i++) { + m.remove(i); + } + s.compact(80, 1024); } - s.compact(80, 1024); - s.close(); + // len = FileUtils.size(fileName); // System.out.println("len1: " + len); - s = openStore(fileName); - m = s.openMap("data"); - s.compact(80, 1024); - s.close(); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + s.compact(80, 1024); + } // len = FileUtils.size(fileName); // System.out.println("len2: " + len); } private void testReuseSpace() { - String fileName = getBaseDir() + "/testReuseSpace.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); long initialLength = 0; for (int j = 0; j < 20; j++) { - MVStore s = openStore(fileName); - s.setRetentionTime(0); - MVMap m = s.openMap("data"); - for (int i = 0; i < 10; i++) { - m.put(i, "Hello"); - } - s.commit(); - for (int i = 0; i < 10; i++) { - assertEquals("Hello", m.get(i)); - assertEquals("Hello", m.remove(i)); + sleep(2); + try (MVStore s = openStore(fileName)) { + s.setRetentionTime(0); + s.setVersionsToKeep(0); + MVMap m = s.openMap("data"); + for (int i = 0; i < 10; i++) { + m.put(i, "Hello"); + } + s.commit(); + for (int i = 0; i < 10; i++) { + assertEquals("Hello", m.get(i)); + assertEquals("Hello", m.remove(i)); + } } - s.close(); long len = FileUtils.size(fileName); if (initialLength == 0) { initialLength = len; } else { assertTrue("len: " + len + " initial: " + initialLength + " j: " + j, - len <= initialLength * 2); + len <= initialLength * 3); } } } private void testRandom() { - String fileName = getBaseDir() + "/testRandom.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap m = s.openMap("data"); - TreeMap map = new TreeMap(); - Random r = new Random(1); - int operationCount = 1000; - int maxValue = 30; - Integer expected, got; - for (int i = 0; i < operationCount; i++) { - int k = r.nextInt(maxValue); - int v = r.nextInt(); - boolean compareAll; - switch (r.nextInt(3)) { - case 0: - log(i + ": put " + k + " = " + v); - expected = map.put(k, v); - got = m.put(k, v); - if (expected == null) { - assertNull(got); - } else { - assertEquals(expected, got); - } - compareAll = true; - break; - case 1: - log(i + ": remove " + k); - expected = map.remove(k); - got = m.remove(k); - if (expected == null) { - assertNull(got); - } else { - assertEquals(expected, got); - } - compareAll = true; - break; - default: - Integer a = map.get(k); - Integer b = m.get(k); - if (a == null || b == null) { - assertTrue(a == b); - } else { - assertEquals(a.intValue(), b.intValue()); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + TreeMap map = new TreeMap<>(); + Random r = new Random(1); + int operationCount = 1000; + int maxValue = 30; + Integer expected, got; + for (int i = 0; i < operationCount; i++) { + int k = r.nextInt(maxValue); + int v = r.nextInt(); + boolean compareAll; + switch (r.nextInt(3)) { + case 0: + log(i + ": put " + k + " = " + v); + expected = map.put(k, v); + got = m.put(k, v); + if (expected == null) { + assertNull(got); + } else { + assertEquals(expected, got); + } + compareAll = true; + break; + case 1: + log(i + ": remove " + k); + expected = map.remove(k); + got = m.remove(k); + if (expected == null) { + assertNull(got); + } else { + assertEquals(expected, got); + } + compareAll = true; + break; + default: + Integer a = map.get(k); + Integer b = m.get(k); + if (a == null || b == null) { + assertTrue(a == b); + } else { + assertEquals(a.intValue(), b.intValue()); + } + compareAll = false; + break; } - compareAll = false; - break; - } - if (compareAll) { - Iterator it = m.keyIterator(null); - Iterator itExpected = map.keySet().iterator(); - while (itExpected.hasNext()) { - assertTrue(it.hasNext()); - expected = itExpected.next(); - got = it.next(); - assertEquals(expected, got); + if (compareAll) { + Iterator it = m.keyIterator(null); + for (Integer integer : map.keySet()) { + assertTrue(it.hasNext()); + expected = integer; + got = it.next(); + assertEquals(expected, got); + } + assertFalse(it.hasNext()); } - assertFalse(it.hasNext()); } } - s.close(); } private void testKeyValueClasses() { - String fileName = getBaseDir() + "/testKeyValueClasses.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap is = s.openMap("intString"); - is.put(1, "Hello"); - MVMap ii = s.openMap("intInt"); - ii.put(1, 10); - MVMap si = s.openMap("stringInt"); - si.put("Test", 10); - MVMap ss = s.openMap("stringString"); - ss.put("Hello", "World"); - s.close(); - s = openStore(fileName); - is = s.openMap("intString"); - assertEquals("Hello", is.get(1)); - ii = s.openMap("intInt"); - assertEquals(10, ii.get(1).intValue()); - si = s.openMap("stringInt"); - assertEquals(10, si.get("Test").intValue()); - ss = s.openMap("stringString"); - assertEquals("World", ss.get("Hello")); - s.close(); + try (MVStore s = openStore(fileName)) { + MVMap is = s.openMap("intString"); + is.put(1, "Hello"); + MVMap ii = s.openMap("intInt"); + ii.put(1, 10); + MVMap si = s.openMap("stringInt"); + si.put("Test", 10); + MVMap ss = s.openMap("stringString"); + ss.put("Hello", "World"); + } + + try (MVStore s = openStore(fileName)) { + MVMap is = s.openMap("intString"); + assertEquals("Hello", is.get(1)); + MVMap ii = s.openMap("intInt"); + assertEquals(10, ii.get(1).intValue()); + MVMap si = s.openMap("stringInt"); + assertEquals(10, si.get("Test").intValue()); + MVMap ss = s.openMap("stringString"); + assertEquals("World", ss.get("Hello")); + } } private void testIterate() { - String fileName = getBaseDir() + "/testIterate.h3"; + int size = config.big ? 1000 : 10; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap m = s.openMap("data"); - Iterator it = m.keyIterator(null); - assertFalse(it.hasNext()); - for (int i = 0; i < 10; i++) { - m.put(i, "hello " + i); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + Iterator it = m.keyIterator(null); + assertFalse(it.hasNext()); + for (int i = 0; i < size; i++) { + m.put(i, "hello " + i); + } + s.commit(); + it = m.keyIterator(null); + it.next(); + assertThrows(UnsupportedOperationException.class, it).remove(); + + it = m.keyIterator(null); + for (int i = 0; i < size; i++) { + assertTrue(it.hasNext()); + assertEquals(i, it.next().intValue()); + } + assertFalse(it.hasNext()); + assertThrows(NoSuchElementException.class, it).next(); + for (int j = 0; j < size; j++) { + it = m.keyIterator(j); + for (int i = j; i < size; i++) { + assertTrue(it.hasNext()); + assertEquals(i, it.next().intValue()); + } + assertFalse(it.hasNext()); + } } - s.commit(); - it = m.keyIterator(null); - it.next(); - assertThrows(UnsupportedOperationException.class, it).remove(); - - it = m.keyIterator(null); - for (int i = 0; i < 10; i++) { - assertTrue(it.hasNext()); - assertEquals(i, it.next().intValue()); - } - assertFalse(it.hasNext()); - assertNull(it.next()); - for (int j = 0; j < 10; j++) { - it = m.keyIterator(j); - for (int i = j; i < 10; i++) { + } + + private void testIterateReverse() { + int size = config.big ? 1000 : 10; + String fileName = getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + for (int i = 0; i < size; i++) { + m.put(i, "hello " + i); + } + s.commit(); + Iterator it = m.keyIteratorReverse(null); + it.next(); + assertThrows(UnsupportedOperationException.class, it).remove(); + + it = m.keyIteratorReverse(null); + for (int i = size - 1; i >= 0; i--) { assertTrue(it.hasNext()); assertEquals(i, it.next().intValue()); } assertFalse(it.hasNext()); + assertThrows(NoSuchElementException.class, it).next(); + for (int j = 0; j < size; j++) { + it = m.keyIteratorReverse(j); + for (int i = j; i >= 0; i--) { + assertTrue(it.hasNext()); + assertEquals(i, it.next().intValue()); + } + assertFalse(it.hasNext()); + } } - s.close(); } private void testCloseTwice() { - String fileName = getBaseDir() + "/testCloseTwice.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); MVStore s = openStore(fileName); MVMap m = s.openMap("data"); @@ -1969,59 +1984,68 @@ private void testCloseTwice() { } private void testSimple() { - String fileName = getBaseDir() + "/testSimple.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); - MVStore s = openStore(fileName); - MVMap m = s.openMap("data"); - for (int i = 0; i < 3; i++) { - m.put(i, "hello " + i); - } - s.commit(); - assertEquals("hello 0", m.remove(0)); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + for (int i = 0; i < 3; i++) { + m.put(i, "hello " + i); + } + s.commit(); + assertEquals("hello 0", m.remove(0)); - assertNull(m.get(0)); - for (int i = 1; i < 3; i++) { - assertEquals("hello " + i, m.get(i)); + assertNull(m.get(0)); + for (int i = 1; i < 3; i++) { + assertEquals("hello " + i, m.get(i)); + } } - s.close(); - s = openStore(fileName); - m = s.openMap("data"); - assertNull(m.get(0)); - for (int i = 1; i < 3; i++) { - assertEquals("hello " + i, m.get(i)); + try (MVStore s = openStore(fileName)) { + MVMap m = s.openMap("data"); + assertNull(m.get(0)); + for (int i = 1; i < 3; i++) { + assertEquals("hello " + i, m.get(i)); + } } - s.close(); + } + + private void testInvalidSettings() { + assertThrows(IllegalArgumentException.class, + () -> new MVStore.Builder().fileName("test").fileStore(new OffHeapStore()).open()); } private void testLargerThan2G() { if (!config.big) { return; } - String fileName = getBaseDir() + "/testLargerThan2G.h3"; + String fileName = getBaseDir() + "/" + getTestName(); FileUtils.delete(fileName); MVStore store = new MVStore.Builder().cacheSize(16). fileName(fileName).open(); - MVMap map = store.openMap("test"); - long last = System.currentTimeMillis(); - String data = new String(new char[2500]).replace((char) 0, 'x'); - for (int i = 0;; i++) { - map.put(i, data); - if (i % 10000 == 0) { - store.commit(); - long time = System.currentTimeMillis(); - if (time - last > 2000) { - long mb = store.getFileStore().size() / 1024 / 1024; - trace(mb + "/4500"); - if (mb > 4500) { - break; + try { + MVMap map = store.openMap("test"); + long last = System.nanoTime(); + String data = new String(new char[2500]).replace((char) 0, 'x'); + for (int i = 0;; i++) { + map.put(i, data); + if (i % 10000 == 0) { + store.commit(); + long time = System.nanoTime(); + if (time - last > TimeUnit.SECONDS.toNanos(2)) { + long mb = store.getFileStore().size() / 1024 / 1024; + trace(mb + "/4500"); + if (mb > 4500) { + break; + } + last = time; } - last = time; } } + store.commit(); + store.close(); + } finally { + store.closeImmediately(); } - store.commit(); - store.close(); FileUtils.delete(fileName); } @@ -2053,6 +2077,7 @@ protected static MVStore openStore(String fileName, int pageSplitSize) { * * @param msg the message */ + @SuppressWarnings("unused") protected static void log(String msg) { // System.out.println(msg); } diff --git a/h2/src/test/org/h2/test/store/TestMVStoreBenchmark.java b/h2/src/test/org/h2/test/store/TestMVStoreBenchmark.java index 29bc5360dd..fc587d290d 100644 --- a/h2/src/test/org/h2/test/store/TestMVStoreBenchmark.java +++ b/h2/src/test/org/h2/test/store/TestMVStoreBenchmark.java @@ -1,21 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; -import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.TimeUnit; import org.h2.mvstore.MVStore; import org.h2.test.TestBase; -import org.h2.util.New; /** * Tests the performance and memory usage claims in the documentation. @@ -35,17 +34,21 @@ public static void main(String... a) throws Exception { } @Override - public void test() throws Exception { + public boolean isEnabled() { if (!config.big) { - return; + return false; } - if (config.coverage || config.codeCoverage) { + if (config.codeCoverage) { // run only when _not_ using a code coverage tool, // because the tool might instrument our code but not // java.util.* - return; + return false; } + return true; + } + @Override + public void test() throws Exception { testPerformanceComparison(); testMemoryUsageComparison(); } @@ -78,25 +81,25 @@ private long[] getMemoryUsed(int count, int size) { ArrayList> mapList; long mem; - mapList = New.arrayList(); + mapList = new ArrayList<>(count); mem = getMemory(); for (int i = 0; i < count; i++) { - mapList.add(new HashMap(size)); + mapList.add(new ConcurrentHashMap(size)); } addEntries(mapList, size); hash = getMemory() - mem; mapList.size(); - mapList = New.arrayList(); + mapList.clear(); mem = getMemory(); for (int i = 0; i < count; i++) { - mapList.add(new TreeMap()); + mapList.add(new ConcurrentSkipListMap()); } addEntries(mapList, size); tree = getMemory() - mem; mapList.size(); - mapList = New.arrayList(); + mapList.clear(); mem = getMemory(); MVStore store = MVStore.open(null); for (int i = 0; i < count; i++) { @@ -123,14 +126,6 @@ private static void addEntries(List> mapList, int size) { } static long getMemory() { - try { - LinkedList list = new LinkedList(); - while (true) { - list.add(new byte[1024]); - } - } catch (OutOfMemoryError e) { - // ok - } for (int i = 0; i < 16; i++) { System.gc(); try { @@ -155,11 +150,10 @@ private void testPerformanceComparison() { MVStore store = MVStore.open(null); map = store.openMap("test"); mv = testPerformance(map, size); - map = new HashMap(size); - // map = new ConcurrentHashMap(size); + store.close(); + map = new ConcurrentHashMap<>(size); hash = testPerformance(map, size); - map = new TreeMap(); - // map = new ConcurrentSkipListMap(); + map = new ConcurrentSkipListMap<>(); tree = testPerformance(map, size); if (hash < tree && mv < tree * 1.5) { break; @@ -175,7 +169,7 @@ private long testPerformance(Map map, int size) { System.gc(); long time = 0; for (int t = 0; t < 3; t++) { - time = System.currentTimeMillis(); + time = System.nanoTime(); for (int b = 0; b < 3; b++) { for (int i = 0; i < size; i++) { map.put(i, "Hello World"); @@ -183,7 +177,7 @@ private long testPerformance(Map map, int size) { for (int a = 0; a < 5; a++) { for (int i = 0; i < size; i++) { String x = map.get(i); - assertTrue(x != null); + assertNotNull(x); } } for (int i = 0; i < size; i++) { @@ -191,9 +185,9 @@ private long testPerformance(Map map, int size) { } assertEquals(0, map.size()); } - time = System.currentTimeMillis() - time; + time = System.nanoTime() - time; } - trace(map.getClass().getName() + ": " + time); + trace(map.getClass().getName() + ": " + TimeUnit.NANOSECONDS.toMillis(time)); return time; } diff --git a/h2/src/test/org/h2/test/store/TestMVStoreCachePerformance.java b/h2/src/test/org/h2/test/store/TestMVStoreCachePerformance.java new file mode 100644 index 0000000000..1576724447 --- /dev/null +++ b/h2/src/test/org/h2/test/store/TestMVStoreCachePerformance.java @@ -0,0 +1,97 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.store; + +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; + +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStore; +import org.h2.store.fs.FileUtils; +import org.h2.test.TestBase; +import org.h2.util.Task; + +/** + * Tests the MVStore cache. + */ +public class TestMVStoreCachePerformance extends TestBase { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase test = TestBase.createCaller().init(); + test.test(); + } + + @Override + public void test() throws Exception { + testCache(1, ""); + testCache(1, "cache:"); + testCache(10, ""); + testCache(10, "cache:"); + testCache(100, ""); + testCache(100, "cache:"); + } + + private void testCache(int threadCount, String fileNamePrefix) { + String fileName = getBaseDir() + "/" + getTestName(); + fileName = fileNamePrefix + fileName; + FileUtils.delete(fileName); + MVStore store = new MVStore.Builder(). + fileName(fileName). + // cacheSize(1024). + open(); + final MVMap map = store.openMap("test"); + final AtomicInteger counter = new AtomicInteger(); + byte[] data = new byte[8 * 1024]; + final int count = 10000; + for (int i = 0; i < count; i++) { + map.put(i, data); + store.commit(); + if (i % 1000 == 0) { + // System.out.println("add " + i); + } + } + Task[] tasks = new Task[threadCount]; + for (int i = 0; i < threadCount; i++) { + tasks[i] = new Task() { + + @Override + public void call() throws Exception { + Random r = new Random(); + do { + int id = r.nextInt(count); + map.get(id); + counter.incrementAndGet(); + } while (!stop); + } + + }; + tasks[i].execute(); + } + for (int i = 0; i < 4; i++) { + // Profiler prof = new Profiler().startCollecting(); + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + // ignore + } + // System.out.println(prof.getTop(5)); + // System.out.println(" " + counter.get() / (i + 1) + " op/s"); + } + // long time = System.nanoTime(); + for (Task t : tasks) { + t.get(); + } + store.close(); + System.out.println(counter.get() / 10000 + " ops/ms; " + + threadCount + " thread(s); " + fileNamePrefix); + } + +} diff --git a/h2/src/test/org/h2/test/store/TestMVStoreConcurrent.java b/h2/src/test/org/h2/test/store/TestMVStoreConcurrent.java new file mode 100644 index 0000000000..e05fcb8bb4 --- /dev/null +++ b/h2/src/test/org/h2/test/store/TestMVStoreConcurrent.java @@ -0,0 +1,804 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.store; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.FileOutputStream; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; +import org.h2.mvstore.Chunk; +import org.h2.mvstore.DataUtils; +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStore; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.ObjectDataType; +import org.h2.store.fs.FileChannelInputStream; +import org.h2.store.fs.FileUtils; +import org.h2.test.TestBase; +import org.h2.util.Task; + +/** + * Tests concurrently accessing a tree map store. + */ +public class TestMVStoreConcurrent extends TestMVStore { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + FileUtils.createDirectories(getBaseDir()); + testInterruptReopenAsync(); + testInterruptReopenRetryNIO(); + testConcurrentSaveCompact(); + testConcurrentDataType(); + testConcurrentAutoCommitAndChange(); + testConcurrentReplaceAndRead(); + testConcurrentChangeAndCompact(); + testConcurrentChangeAndGetVersion(); + testConcurrentFree(); + testConcurrentStoreAndRemoveMap(); + testConcurrentStoreAndClose(); + testConcurrentOnlineBackup(); + testConcurrentMap(); + testConcurrentIterate(); + testConcurrentWrite(); + testConcurrentRead(); + } + + private void testInterruptReopenAsync() { + testInterruptReopen("async:"); + } + + private void testInterruptReopenRetryNIO() { + testInterruptReopen("retry:"); + } + + private void testInterruptReopen(String prefix) { + String fileName = prefix + getBaseDir() + "/" + getTestName(); + FileUtils.delete(fileName); + final MVStore s = new MVStore.Builder(). + fileName(fileName). + cacheSize(0). + open(); + final Thread mainThread = Thread.currentThread(); + Task task = new Task() { + @Override + public void call() throws Exception { + while (!stop) { + mainThread.interrupt(); + Thread.sleep(10); + } + } + }; + try { + MVMap map = s.openMap("data"); + task.execute(); + for (int i = 0; i < 1000 && !task.isFinished(); i++) { + map.get(i % 1000); + map.put(i % 1000, new byte[1024]); + s.commit(); + } + } finally { + task.get(); + s.close(); + } + } + + private void testConcurrentSaveCompact() { + String fileName = "memFS:" + getTestName(); + FileUtils.delete(fileName); + MVStore.Builder builder = new MVStore.Builder(). + fileName(fileName). + cacheSize(0); + try (final MVStore s = builder.open()) { + s.setRetentionTime(0); + final MVMap dataMap = s.openMap("data"); + Task task = new Task() { + @Override + public void call() { + int i = 0; + while (!stop) { + s.compact(100, 1024 * 1024); + MVStore.TxCounter token = s.registerVersionUsage(); + try { + dataMap.put(i % 1000, i * 10); + } finally { + s.deregisterVersionUsage(token); + } + s.commit(); + i++; + } + } + }; + task.execute(); + for (int i = 0; i < 1000 && !task.isFinished(); i++) { + s.compact(100, 1024 * 1024); + MVStore.TxCounter token = s.registerVersionUsage(); + try { + dataMap.put(i % 1000, i * 10); + } finally { + s.deregisterVersionUsage(token); + } + s.commit(); + } + task.get(); + } + } + + private void testConcurrentDataType() throws InterruptedException { + final ObjectDataType type = new ObjectDataType(); + final Object[] data = new Object[]{ + null, + -1, + 1, + 10, + "Hello", + new Object[]{ new byte[]{(byte) -1, (byte) 1}, null}, + new Object[]{ new byte[]{(byte) 1, (byte) -1}, 10}, + new Object[]{ new byte[]{(byte) -1, (byte) 1}, 20L}, + new Object[]{ new byte[]{(byte) 1, (byte) -1}, 5}, + }; + Arrays.sort(data, type::compare); + Task[] tasks = new Task[2]; + for (int i = 0; i < tasks.length; i++) { + tasks[i] = new Task() { + @Override + public void call() { + Random r = new Random(); + WriteBuffer buff = new WriteBuffer(); + while (!stop) { + int a = r.nextInt(data.length); + int b = r.nextInt(data.length); + int comp; + if (r.nextBoolean()) { + comp = type.compare(a, b); + } else { + comp = -type.compare(b, a); + } + buff.clear(); + type.write(buff, a); + buff.clear(); + type.write(buff, b); + if (a == b) { + assertEquals(0, comp); + } else { + assertEquals(a > b ? 1 : -1, comp); + } + } + } + }; + tasks[i].execute(); + } + try { + Thread.sleep(100); + } finally { + for (Task t : tasks) { + t.get(); + } + } + } + + private void testConcurrentAutoCommitAndChange() throws InterruptedException { + String fileName = "memFS:" + getTestName(); + FileUtils.delete(fileName); + MVStore.Builder builder = new MVStore.Builder() + .fileName(fileName) + .pageSplitSize(1000); + try (MVStore s = builder.open()) { + s.setRetentionTime(1000); + s.setAutoCommitDelay(1); + final CountDownLatch latch = new CountDownLatch(2); + Task task = new Task() { + @Override + public void call() { + latch.countDown(); + while (!stop) { + s.compact(100, 1024 * 1024); + } + } + }; + final MVMap dataMap = s.openMap("data"); + final MVMap dataSmallMap = s.openMap("dataSmall"); + s.openMap("emptyMap"); + final AtomicInteger counter = new AtomicInteger(); + Task task2 = new Task() { + @Override + public void call() { + latch.countDown(); + while (!stop) { + int i = counter.getAndIncrement(); + dataMap.put(i, i * 10); + dataSmallMap.put(i % 100, i * 10); + if (i % 100 == 0) { + dataSmallMap.clear(); + } + } + } + }; + task.execute(); + task2.execute(); + latch.await(); + for (int i = 0; !task.isFinished() && !task2.isFinished() && i < 1000; i++) { + MVMap map = s.openMap("d" + (i % 3)); + map.put(0, i); + s.commit(); + } + task.get(); + task2.get(); + for (int i = 0; i < counter.get(); i++) { + assertEquals(10 * i, dataMap.get(i).intValue()); + } + } + } + + private void testConcurrentReplaceAndRead() throws InterruptedException { + final MVStore s = new MVStore.Builder().open(); + final MVMap map = s.openMap("data"); + for (int i = 0; i < 100; i++) { + map.put(i, i % 100); + } + Task task = new Task() { + @Override + public void call() { + int i = 0; + while (!stop) { + map.put(i % 100, i % 100); + i++; + if (i % 1000 == 0) { + s.commit(); + } + } + } + }; + task.execute(); + try { + Thread.sleep(1); + for (int i = 0; !task.isFinished() && i < 1000000; i++) { + assertEquals(i % 100, map.get(i % 100).intValue()); + } + } finally { + task.get(); + } + s.close(); + } + + private void testConcurrentChangeAndCompact() throws InterruptedException { + String fileName = "memFS:" + getTestName(); + FileUtils.delete(fileName); + final MVStore s = new MVStore.Builder().fileName( + fileName). + pageSplitSize(10). + autoCommitDisabled().open(); + s.setRetentionTime(10000); + try { + Task task = new Task() { + @Override + public void call() { + while (!stop) { + s.compact(100, 1024 * 1024); + } + } + }; + task.execute(); + Task task2 = new Task() { + @Override + public void call() { + while (!stop) { + s.compact(100, 1024 * 1024); + } + } + }; + task2.execute(); + Thread.sleep(1); + for (int i = 0; !task.isFinished() && !task2.isFinished() && i < 1000; i++) { + MVMap map = s.openMap("d" + (i % 3)); + // MVMap map = s.openMap("d" + (i % 3), + // new MVMapConcurrent.Builder()); + map.put(0, i); + map.get(0); + s.commit(); + } + task.get(); + task2.get(); + } finally { + s.close(); + } + } + + private static void testConcurrentChangeAndGetVersion() throws InterruptedException { + for (int test = 0; test < 10; test++) { + try (final MVStore s = new MVStore.Builder().autoCommitDisabled().open()) { + s.setVersionsToKeep(10); + final MVMap m = s.openMap("data"); + m.put(1, 1); + Task task = new Task() { + @Override + public void call() { + while (!stop) { + m.put(1, 1); + s.commit(); + } + } + }; + task.execute(); + Thread.sleep(1); + for (int i = 0; i < 10000; i++) { + if (task.isFinished()) { + break; + } + for (int j = 0; j < 20; j++) { + m.put(1, 1); + s.commit(); + } + s.setVersionsToKeep(15); + long version = s.getCurrentVersion() - 1; + try { + m.openVersion(version); + } catch (IllegalArgumentException e) { + // ignore + } + s.setVersionsToKeep(20); + } + task.get(); + s.commit(); + } + } + } + + private void testConcurrentFree() throws InterruptedException { + String fileName = "memFS:" + getTestName(); + for (int test = 0; test < 10; test++) { + FileUtils.delete(fileName); + final MVStore s1 = new MVStore.Builder(). + fileName(fileName).autoCommitDisabled().open(); + s1.setRetentionTime(0); + final int count = 200; + for (int i = 0; i < count; i++) { + MVMap m = s1.openMap("d" + i); + m.put(1, 1); + if (i % 2 == 0) { + s1.commit(); + } + } + s1.close(); + MVStore.Builder builder = new MVStore.Builder(). + fileName(fileName).autoCommitDisabled(); + try (final MVStore s = builder.open()) { + s.setRetentionTime(0); + s.setVersionsToKeep(0); + final ArrayList> list = new ArrayList<>(count); + for (int i = 0; i < count; i++) { + MVMap m = s.openMap("d" + i); + list.add(m); + } + + final AtomicInteger counter = new AtomicInteger(); + Task task = new Task() { + @Override + public void call() { + while (!stop) { + int x = counter.getAndIncrement(); + if (x >= count) { + break; + } + MVMap m = list.get(x); + m.clear(); + s.removeMap(m); + } + } + }; + task.execute(); + Thread.sleep(1); + while (true) { + int x = counter.getAndIncrement(); + if (x >= count) { + break; + } + MVMap m = list.get(x); + m.clear(); + s.removeMap(m); + if (x % 5 == 0) { + s.commit(); + } + } + task.get(); + // this will mark old chunks as unused, + // but not remove (and overwrite) them yet + MVMap m = s.openMap("dummy"); + m.put(0, 0); + s.commit(); + // this will remove them, so we end up with + // one unused one, and one active one + m.put(1, 1); + s.commit(); + m.put(2, 2); + s.commit(); + + MVMap layoutMap = s.getLayoutMap(); + int chunkCount = 0; + for (String k : layoutMap.keyList()) { + if (k.startsWith(DataUtils.META_CHUNK)) { + // dead chunks may stay around for a little while + // discount them + Chunk chunk = Chunk.fromString(layoutMap.get(k)); + if (chunk.maxLenLive > 0) { + chunkCount++; + } + } + } + assertTrue("" + chunkCount, chunkCount < 3); + } + } + } + + private void testConcurrentStoreAndRemoveMap() throws InterruptedException { + String fileName = "memFS:" + getTestName(); + FileUtils.delete(fileName); + try (MVStore s = openStore(fileName)) { + int count = 200; + for (int i = 0; i < count; i++) { + MVMap m = s.openMap("d" + i); + m.put(1, 1); + } + final AtomicInteger counter = new AtomicInteger(); + Task task = new Task() { + @Override + public void call() { + while (!stop) { + counter.incrementAndGet(); + s.commit(); + } + } + }; + task.execute(); + Thread.sleep(1); + for (int i = 0; i < count || counter.get() < count; i++) { + MVMap m = s.openMap("d" + i); + m.put(1, 10); + s.removeMap(m); + if (task.isFinished()) { + break; + } + } + task.get(); + } + } + + private void testConcurrentStoreAndClose() throws InterruptedException { + String fileName = "memFS:" + getTestName(); + for (int i = 0; i < 10; i++) { + FileUtils.delete(fileName); + try (MVStore s = openStore(fileName)) { + final AtomicInteger counter = new AtomicInteger(); + Task task = new Task() { + @Override + public void call() { + while (!stop) { + s.setStoreVersion(counter.incrementAndGet()); + s.commit(); + } + } + }; + task.execute(); + while (counter.get() < 5) { + Thread.sleep(1); + } + try { + s.close(); + // sometimes closing works, in which case + // storing must fail at some point (not necessarily + // immediately) + for (int x = counter.get(), y = x + 2; x <= y; x++) { + Thread.sleep(1); + } + Exception e = task.getException(); + if (e != null) { + checkErrorCode(DataUtils.ERROR_CLOSED, e); + } + } catch (MVStoreException e) { + // sometimes storing works, in which case + // closing must fail + assertEquals(DataUtils.ERROR_WRITING_FAILED, e.getErrorCode()); + task.get(); + } + } + } + } + + /** + * Test the concurrent map implementation. + */ + private static void testConcurrentMap() throws InterruptedException { + try (MVStore s = openStore(null)) { + final MVMap m = s.openMap("data"); + final int size = 20; + final Random rand = new Random(1); + Task task = new Task() { + @Override + public void call() { + try { + while (!stop) { + if (rand.nextBoolean()) { + m.put(rand.nextInt(size), 1); + } else { + m.remove(rand.nextInt(size)); + } + m.get(rand.nextInt(size)); + m.firstKey(); + m.lastKey(); + m.ceilingKey(5); + m.floorKey(5); + m.higherKey(5); + m.lowerKey(5); + for (Iterator it = m.keyIterator(null); + it.hasNext();) { + it.next(); + } + } + } catch (Exception e) { + e.printStackTrace(); + } + } + }; + task.execute(); + Thread.sleep(1); + for (int j = 0; j < 100; j++) { + for (int i = 0; i < 100; i++) { + if (rand.nextBoolean()) { + m.put(rand.nextInt(size), 2); + } else { + m.remove(rand.nextInt(size)); + } + m.get(rand.nextInt(size)); + } + s.commit(); + Thread.sleep(1); + } + task.get(); + } + } + + private void testConcurrentOnlineBackup() throws Exception { + String fileName = getBaseDir() + "/" + getTestName(); + String fileNameRestore = getBaseDir() + "/" + getTestName() + "2"; + try (final MVStore s = openStore(fileName)) { + final MVMap map = s.openMap("test"); + final Random r = new Random(); + Task task = new Task() { + @Override + public void call() throws Exception { + while (!stop) { + for (int i = 0; i < 10; i++) { + map.put(i, new byte[100 * r.nextInt(100)]); + } + s.commit(); + map.clear(); + s.commit(); + long len = s.getFileStore().size(); + if (len > 1024 * 1024) { + // slow down writing a lot + Thread.sleep(200); + } else if (len > 20 * 1024) { + // slow down writing + Thread.sleep(20); + } + } + } + }; + task.execute(); + try { + for (int i = 0; i < 10; i++) { + // System.out.println("test " + i); + s.setReuseSpace(false); + OutputStream out = new BufferedOutputStream( + new FileOutputStream(fileNameRestore)); + long len = s.getFileStore().size(); + copyFileSlowly(s.getFileStore().getFile(), + len, out); + out.close(); + s.setReuseSpace(true); + MVStore s2 = openStore(fileNameRestore); + MVMap test = s2.openMap("test"); + for (Integer k : test.keySet()) { + test.get(k); + } + s2.close(); + // let it compact + Thread.sleep(10); + } + } finally { + task.get(); + } + } + } + + private static void copyFileSlowly(FileChannel file, long length, OutputStream out) + throws Exception { + file.position(0); + try (InputStream in = new BufferedInputStream(new FileChannelInputStream( + file, false))) { + for (int j = 0; j < length; j++) { + int x = in.read(); + if (x < 0) { + break; + } + out.write(x); + } + } + } + + private static void testConcurrentIterate() { + try (MVStore s = new MVStore.Builder().pageSplitSize(3).open()) { + s.setVersionsToKeep(100); + final MVMap map = s.openMap("test"); + final int len = 10; + final Random r = new Random(); + Task task = new Task() { + @Override + public void call() { + while (!stop) { + int x = r.nextInt(len); + if (r.nextBoolean()) { + map.remove(x); + } else { + map.put(x, r.nextInt(100)); + } + } + } + }; + task.execute(); + try { + for (int k = 0; k < 10000; k++) { + Iterator it = map.keyIterator(r.nextInt(len)); + long old = map.getVersion(); + s.commit(); + while (map.getVersion() == old) { + Thread.yield(); + } + while (it.hasNext()) { + it.next(); + } + } + } finally { + task.get(); + } + } + } + + + /** + * Test what happens on concurrent write. Concurrent write may corrupt the + * map, so that keys and values may become null. + */ + private void testConcurrentWrite() throws InterruptedException { + final AtomicInteger detected = new AtomicInteger(); + final AtomicInteger notDetected = new AtomicInteger(); + for (int i = 0; i < 10; i++) { + testConcurrentWrite(detected, notDetected); + } + // in most cases, it should be detected + assertTrue(notDetected.get() * 10 <= detected.get()); + } + + private static void testConcurrentWrite(final AtomicInteger detected, + final AtomicInteger notDetected) throws InterruptedException { + try (final MVStore s = openStore(null)) { + final MVMap m = s.openMap("data"); + final int size = 20; + final Random rand = new Random(1); + Task task = new Task() { + @Override + public void call() { + while (!stop) { + try { + if (rand.nextBoolean()) { + m.put(rand.nextInt(size), 1); + } else { + m.remove(rand.nextInt(size)); + } + m.get(rand.nextInt(size)); + } catch (ConcurrentModificationException e) { + detected.incrementAndGet(); + } catch (NegativeArraySizeException + | ArrayIndexOutOfBoundsException + | IllegalArgumentException + | NullPointerException e) { + notDetected.incrementAndGet(); + } + } + } + }; + task.execute(); + try { + Thread.sleep(1); + for (int j = 0; j < 10; j++) { + for (int i = 0; i < 10; i++) { + try { + if (rand.nextBoolean()) { + m.put(rand.nextInt(size), 2); + } else { + m.remove(rand.nextInt(size)); + } + m.get(rand.nextInt(size)); + } catch (ConcurrentModificationException e) { + detected.incrementAndGet(); + } catch (NegativeArraySizeException + | ArrayIndexOutOfBoundsException + | NullPointerException + | IllegalArgumentException e) { + notDetected.incrementAndGet(); + } + } + s.commit(); + Thread.sleep(1); + } + } finally { + task.get(); + } + } + } + + private static void testConcurrentRead() throws InterruptedException { + try (final MVStore s = openStore(null)) { + s.setVersionsToKeep(100); + final MVMap m = s.openMap("data"); + final int size = 3; + int x = (int) s.getCurrentVersion(); + for (int i = 0; i < size; i++) { + m.put(i, x); + } + s.commit(); + Task task = new Task() { + @Override + public void call() { + while (!stop) { + long v = s.getCurrentVersion() - 1; + Map old = m.openVersion(v); + for (int i = 0; i < size; i++) { + Integer x = old.get(i); + if (x == null || (int) v != x) { + Map old2 = m.openVersion(v); + throw new AssertionError(x + "<>" + v + " at " + i + " " + old2); + } + } + } + } + }; + task.execute(); + try { + Thread.sleep(1); + for (int j = 0; j < 100; j++) { + x = (int) s.getCurrentVersion(); + for (int i = 0; i < size; i++) { + m.put(i, x); + } + s.commit(); + Thread.sleep(1); + } + } finally { + task.get(); + } + } + } +} diff --git a/h2/src/test/org/h2/test/store/TestMVStoreStopCompact.java b/h2/src/test/org/h2/test/store/TestMVStoreStopCompact.java new file mode 100644 index 0000000000..b4c7a885f6 --- /dev/null +++ b/h2/src/test/org/h2/test/store/TestMVStoreStopCompact.java @@ -0,0 +1,80 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.store; + +import java.util.Random; + +import org.h2.mvstore.MVMap; +import org.h2.mvstore.MVStore; +import org.h2.store.fs.FileUtils; +import org.h2.test.TestBase; + +/** + * Test that the MVStore eventually stops optimizing (does not excessively opti + */ +public class TestMVStoreStopCompact extends TestBase { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase test = TestBase.createCaller().init(); + test.config.big = true; + test.test(); + } + + @Override + public void test() throws Exception { + for(int retentionTime = 10; retentionTime < 1000; retentionTime *= 10) { + for(int timeout = 100; timeout <= 1000; timeout *= 10) { + testStopCompact(retentionTime, timeout); + } + } + } + + private void testStopCompact(int retentionTime, int timeout) throws InterruptedException { + String fileName = getBaseDir() + "/testStopCompact.h3"; + FileUtils.createDirectories(getBaseDir()); + FileUtils.delete(fileName); + // store with a very small page size, to make sure + // there are many leaf pages + MVStore.Builder builder = new MVStore.Builder().fileName(fileName); + try (MVStore s = builder.open()) { + s.setRetentionTime(retentionTime); + s.setVersionsToKeep(0); + MVMap map = s.openMap("data"); + long start = System.currentTimeMillis(); + Random r = new Random(1); + for (int i = 0; i < 4_000_000; i++) { + long time = System.currentTimeMillis() - start; + if (time > timeout) { + break; + } + int x = r.nextInt(10_000_000); + map.put(x, "Hello World " + i * 10); + } + s.setAutoCommitDelay(100); + long oldWriteCount = s.getFileStore().getWriteCount(); + long totalWrites = 0; + // expect background write to stop after a few seconds + for (int i = 0; i < 50; i++) { + Thread.sleep(200); + long newWriteCount = s.getFileStore().getWriteCount(); + long delta = newWriteCount - oldWriteCount; + if (delta == 0) { + break; + } + totalWrites += delta; + oldWriteCount = newWriteCount; + } + // expect that compaction didn't cause many writes + assertTrue("writeCount diff: " + retentionTime + "/" + timeout + " " + totalWrites, + totalWrites < 90); + } + } +} diff --git a/h2/src/test/org/h2/test/store/TestMVStoreTool.java b/h2/src/test/org/h2/test/store/TestMVStoreTool.java index 564918edf6..a63d85a785 100644 --- a/h2/src/test/org/h2/test/store/TestMVStoreTool.java +++ b/h2/src/test/org/h2/test/store/TestMVStoreTool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -12,7 +12,8 @@ import org.h2.mvstore.MVStore; import org.h2.mvstore.MVStoreTool; import org.h2.mvstore.rtree.MVRTreeMap; -import org.h2.mvstore.rtree.SpatialKey; +import org.h2.mvstore.rtree.Spatial; +import org.h2.mvstore.db.SpatialKey; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; @@ -30,7 +31,7 @@ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; test.config.big = true; - test.test(); + test.testFromMain(); } @Override @@ -40,6 +41,9 @@ public void test() throws Exception { private void testCompact() { String fileName = getBaseDir() + "/testCompact.h3"; + String fileNameNew = fileName + ".new"; + String fileNameCompressed = fileNameNew + ".compress"; + FileUtils.createDirectories(getBaseDir()); FileUtils.delete(fileName); // store with a very small page size, to make sure @@ -47,10 +51,19 @@ private void testCompact() { MVStore s = new MVStore.Builder(). pageSplitSize(1000). fileName(fileName).autoCommitDisabled().open(); + s.setRetentionTime(0); + long start = System.currentTimeMillis(); MVMap map = s.openMap("data"); - for (int i = 0; i < 10; i++) { + int size = config.big ? 2_000_000 : 20_000; + for (int i = 0; i < size; i++) { map.put(i, "Hello World " + i * 10); - if (i % 3 == 0) { + if (i % 10000 == 0) { + s.commit(); + } + } + for (int i = 0; i < size; i += 2) { + map.remove(i); + if (i % 10000 == 0) { s.commit(); } } @@ -75,29 +88,38 @@ private void testCompact() { } } s.close(); + trace("Created in " + (System.currentTimeMillis() - start) + " ms."); - MVStoreTool.compact(fileName, fileName + ".new", false); - MVStoreTool.compact(fileName, fileName + ".new.compress", true); + start = System.currentTimeMillis(); + MVStoreTool.compact(fileName, fileNameNew, false); + MVStoreTool.compact(fileName, fileNameCompressed, true); + trace("Compacted in " + (System.currentTimeMillis() - start) + " ms."); + long size1 = FileUtils.size(fileName); + long size2 = FileUtils.size(fileNameNew); + long size3 = FileUtils.size(fileNameCompressed); + assertTrue("size1: " + size1 + " size2: " + size2 + " size3: " + size3, + size2 < size1 && size3 < size2); + + start = System.currentTimeMillis(); + MVStoreTool.compact(fileNameNew, false); + assertTrue(100L * Math.abs(size2 - FileUtils.size(fileNameNew)) / size2 < 1); + MVStoreTool.compact(fileNameCompressed, true); + assertEquals(size3, FileUtils.size(fileNameCompressed)); + trace("Re-compacted in " + (System.currentTimeMillis() - start) + " ms."); + + start = System.currentTimeMillis(); MVStore s1 = new MVStore.Builder(). fileName(fileName).readOnly().open(); MVStore s2 = new MVStore.Builder(). - fileName(fileName + ".new").readOnly().open(); + fileName(fileNameNew).readOnly().open(); MVStore s3 = new MVStore.Builder(). - fileName(fileName + ".new.compress").readOnly().open(); + fileName(fileNameCompressed).readOnly().open(); assertEquals(s1, s2); assertEquals(s1, s3); s1.close(); s2.close(); s3.close(); - long size1 = FileUtils.size(fileName); - long size2 = FileUtils.size(fileName + ".new"); - long size3 = FileUtils.size(fileName + ".new.compress"); - assertTrue("size1: " + size1 + " size2: " + size2 + " size3: " + size3, - size2 < size1 && size3 < size2); - MVStoreTool.compact(fileName, false); - assertEquals(size2, FileUtils.size(fileName)); - MVStoreTool.compact(fileName, true); - assertEquals(size3, FileUtils.size(fileName)); + trace("Verified in " + (System.currentTimeMillis() - start) + " ms."); } private void assertEquals(MVStore a, MVStore b) { @@ -109,9 +131,9 @@ private void assertEquals(MVStore a, MVStore b) { MVRTreeMap mb = b.openMap( mapName, new MVRTreeMap.Builder()); assertEquals(ma.sizeAsLong(), mb.sizeAsLong()); - for (Entry e : ma.entrySet()) { + for (Entry e : ma.entrySet()) { Object x = mb.get(e.getKey()); - assertEquals(e.getValue().toString(), x.toString()); + assertEquals(e.getValue(), x.toString()); } } else { diff --git a/h2/src/test/org/h2/test/store/TestMVTableEngine.java b/h2/src/test/org/h2/test/store/TestMVTableEngine.java index 88481481c4..3c2d421eba 100644 --- a/h2/src/test/org/h2/test/store/TestMVTableEngine.java +++ b/h2/src/test/org/h2/test/store/TestMVTableEngine.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; import java.io.ByteArrayInputStream; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.StringReader; @@ -18,27 +19,31 @@ import java.sql.SQLException; import java.sql.Savepoint; import java.sql.Statement; +import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; - import org.h2.api.ErrorCode; import org.h2.engine.Constants; import org.h2.engine.Database; import org.h2.jdbc.JdbcConnection; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; -import org.h2.mvstore.db.TransactionStore; +import org.h2.mvstore.db.LobStorageMap; +import org.h2.mvstore.tx.TransactionStore; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.tools.DeleteDbFiles; +import org.h2.test.TestDb; import org.h2.tools.Recover; import org.h2.tools.Restore; +import org.h2.util.IOUtils; import org.h2.util.JdbcUtils; import org.h2.util.Task; +import org.h2.value.Value; /** * Tests the MVStore in a database. */ -public class TestMVTableEngine extends TestBase { +public class TestMVTableEngine extends TestDb { /** * Run just this test. @@ -46,17 +51,24 @@ public class TestMVTableEngine extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); + } + + @Override + public boolean isEnabled() { + return true; } @Override public void test() throws Exception { +/* + testLobCopy(); testLobReuse(); testShutdownDuringLobCreation(); testLobCreationThenShutdown(); testManyTransactions(); testAppendOnly(); - testLowRetentionTime(); + testNoRetentionTime(); testOldAndNew(); testTemporaryTables(); testUniqueIndex(); @@ -67,7 +79,9 @@ public void test() throws Exception { testMinMaxWithNull(); testTimeout(); testExplainAnalyze(); - testTransactionLogUsuallyNotStored(); + if (!config.memory) { + testTransactionLogEmptyAfterCommit(); + } testShrinkDatabaseFile(); testTwoPhaseCommit(); testRecover(); @@ -79,382 +93,392 @@ public void test() throws Exception { testAutoCommit(); testReopen(); testBlob(); - testExclusiveLock(); testEncryption(); testReadOnly(); testReuseDiskSpace(); +*/ testDataTypes(); - testLocking(); - testSimple(); +// testSimple(); +// if (!config.travis) { +// testReverseDeletePerformance(); +// } } - private void testLobReuse() throws Exception { - deleteDb("testLobReuse"); - Connection conn = getConnection("testLobReuse"); + private void testLobCopy() throws Exception { + deleteDb(getTestName()); + Connection conn = getConnection(getTestName()); Statement stat = conn.createStatement(); - stat.execute("create table test(id identity primary key, lob clob)"); + stat.execute("create table test(id int primary key, data clob)"); + stat = conn.createStatement(); + stat.execute("insert into test(id, data) values(2, space(300))"); + stat.execute("insert into test(id, data) values(1, space(300))"); + stat.execute("alter table test add column x int"); + if (!config.memory) { + conn.close(); + conn = getConnection(getTestName()); + } + stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("select data from test"); + while (rs.next()) { + rs.getString(1); + } conn.close(); - byte[] buffer = new byte[8192]; - for (int i = 0; i < 20; i++) { - conn = getConnection("testLobReuse"); - stat = conn.createStatement(); - stat.execute("insert into test(lob) select space(1025) from system_range(1, 10)"); - stat.execute("delete from test where random() > 0.5"); - ResultSet rs = conn.createStatement().executeQuery( - "select lob from test"); - while (rs.next()) { - InputStream is = rs.getBinaryStream(1); - while (is.read(buffer) != -1) { - // ignore + } + + private void testLobReuse() throws Exception { + deleteDb(getTestName()); + try (Connection conn1 = getConnection(getTestName())) { + Statement stat = conn1.createStatement(); + stat.execute("create table test(id identity primary key, lob clob)"); + byte[] buffer = new byte[8192]; + for (int i = 0; i < 20; i++) { + try (Connection conn2 = getConnection(getTestName())) { + stat = conn2.createStatement(); + stat.execute("insert into test(lob) select space(1025) from system_range(1, 10)"); + stat.execute("delete from test where random() > 0.5"); + ResultSet rs = conn2.createStatement().executeQuery( + "select lob from test"); + while (rs.next()) { + InputStream is = rs.getBinaryStream(1); + while (is.read(buffer) != -1) { + // ignore + } + } } } - conn.close(); } } private void testShutdownDuringLobCreation() throws Exception { - deleteDb("testShutdownDuringLobCreation"); - Connection conn = getConnection("testShutdownDuringLobCreation"); - Statement stat = conn.createStatement(); - stat.execute("create table test(data clob) as select space(10000)"); - final PreparedStatement prep = conn - .prepareStatement("set @lob = ?"); - final AtomicBoolean end = new AtomicBoolean(); - Task t = new Task() { - - @Override - public void call() throws Exception { - prep.setBinaryStream(1, new InputStream() { - - int len; - - @Override - public int read() throws IOException { - if (len++ < 1024 * 1024 * 4) { - return 0; - } - end.set(true); - while (!stop) { - try { - Thread.sleep(1); - } catch (InterruptedException e) { - // ignore + if (config.memory) { + return; + } + deleteDb(getTestName()); + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("create table test(data clob) as select space(10000)"); + final PreparedStatement prep = conn + .prepareStatement("set @lob = ?"); + final AtomicBoolean end = new AtomicBoolean(); + Task t = new Task() { + + @Override + public void call() throws Exception { + prep.setBinaryStream(1, new InputStream() { + + int len; + + @Override + public int read() throws IOException { + if (len++ < 1024 * 1024 * 4) { + return 0; + } + end.set(true); + while (!stop) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + // ignore + } } + return -1; } - return -1; - } - } , -1); + }, -1); + } + }; + t.execute(); + while (!end.get()) { + Thread.sleep(1); } - }; - t.execute(); - while (!end.get()) { - Thread.sleep(1); + stat.execute("checkpoint"); + stat.execute("shutdown immediately"); + Exception ex = t.getException(); + assertNotNull(ex); + IOUtils.closeSilently(conn); } - stat.execute("checkpoint"); - stat.execute("shutdown immediately"); - Exception ex = t.getException(); - assertTrue(ex != null); - try { - conn.close(); - } catch (Exception e) { - // ignore + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("shutdown defrag"); } - conn = getConnection("testShutdownDuringLobCreation"); - stat = conn.createStatement(); - stat.execute("shutdown defrag"); - try { - conn.close(); - } catch (Exception e) { - // ignore + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("select * " + + "from information_schema.settings " + + "where setting_name = 'info.PAGE_COUNT'"); + rs.next(); + int pages = rs.getInt(2); + // only one lob should remain (but it is small and compressed) + assertTrue("p:" + pages, pages <= 7); } - conn = getConnection("testShutdownDuringLobCreation"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * " + - "from information_schema.settings " + - "where name = 'info.PAGE_COUNT'"); - rs.next(); - int pages = rs.getInt(2); - // only one lob should remain (but it is small and compressed) - assertTrue("p:" + pages, pages < 4); - conn.close(); } private void testLobCreationThenShutdown() throws Exception { - deleteDb("testLobCreationThenShutdown"); - Connection conn = getConnection("testLobCreationThenShutdown"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id identity, data clob)"); - PreparedStatement prep = conn - .prepareStatement("insert into test values(?, ?)"); - for (int i = 0; i < 9; i++) { - prep.setInt(1, i); - int size = i * i * i * i * 1024; - prep.setCharacterStream(2, new StringReader(new String( - new char[size]))); - prep.execute(); + if (config.memory) { + return; } - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (Exception e) { - // ignore + deleteDb(getTestName()); + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id identity, data clob)"); + PreparedStatement prep = conn + .prepareStatement("insert into test values(?, ?)"); + for (int i = 0; i < 9; i++) { + prep.setInt(1, i); + int size = i * i * i * i * 1024; + prep.setCharacterStream(2, new StringReader(new String( + new char[size]))); + prep.execute(); + } + stat.execute("shutdown immediately"); + IOUtils.closeSilently(conn); } - conn = getConnection("testLobCreationThenShutdown"); - stat = conn.createStatement(); - stat.execute("drop all objects"); - stat.execute("shutdown defrag"); - try { - conn.close(); - } catch (Exception e) { - // ignore + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("drop all objects"); + stat.execute("shutdown defrag"); + } + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("select * " + + "from information_schema.settings " + + "where setting_name = 'info.PAGE_COUNT'"); + rs.next(); + int pages = rs.getInt(2); + // no lobs should remain + assertTrue("p:" + pages, pages < 4); } - conn = getConnection("testLobCreationThenShutdown"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * " + - "from information_schema.settings " + - "where name = 'info.PAGE_COUNT'"); - rs.next(); - int pages = rs.getInt(2); - // no lobs should remain - assertTrue("p:" + pages, pages < 4); - conn.close(); } private void testManyTransactions() throws Exception { - deleteDb("testManyTransactions"); - Connection conn = getConnection("testManyTransactions"); - Statement stat = conn.createStatement(); - stat.execute("create table test()"); - conn.setAutoCommit(false); - stat.execute("insert into test values()"); + deleteDb(getTestName()); + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("create table test()"); + conn.setAutoCommit(false); + stat.execute("insert into test values()"); - Connection conn2 = getConnection("testManyTransactions"); - Statement stat2 = conn2.createStatement(); - for (long i = 0; i < 100000; i++) { - stat2.execute("insert into test values()"); + try (Connection conn2 = getConnection(getTestName())) { + Statement stat2 = conn2.createStatement(); + for (long i = 0; i < 100000; i++) { + stat2.execute("insert into test values()"); + } + } } - conn2.close(); - conn.close(); } private void testAppendOnly() throws Exception { - deleteDb("testAppendOnly"); - Connection conn = getConnection( - "testAppendOnly"); - Statement stat = conn.createStatement(); - stat.execute("set retention_time 0"); - for (int i = 0; i < 10; i++) { - stat.execute("create table dummy" + i + - " as select x, space(100) from system_range(1, 1000)"); - stat.execute("checkpoint"); + if (config.memory) { + return; } - stat.execute("create table test as select x from system_range(1, 1000)"); - conn.close(); - String fileName = getBaseDir() + "/testAppendOnly" + Constants.SUFFIX_MV_FILE; + deleteDb(getTestName()); + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("set retention_time 0"); + for (int i = 0; i < 10; i++) { + stat.execute("create table dummy" + i + + " as select x, space(100) from system_range(1, 1000)"); + stat.execute("checkpoint"); + } + stat.execute("create table test as select x from system_range(1, 1000)"); + } + + String fileName = getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE; long fileSize = FileUtils.size(fileName); - conn = getConnection( - "testAppendOnly;reuse_space=false"); - stat = conn.createStatement(); - stat.execute("set retention_time 0"); - for (int i = 0; i < 10; i++) { - stat.execute("drop table dummy" + i); - stat.execute("checkpoint"); + try (Connection conn = getConnection(getTestName() + ";reuse_space=false")) { + Statement stat = conn.createStatement(); + stat.execute("set retention_time 0"); + for (int i = 0; i < 10; i++) { + stat.execute("drop table dummy" + i); + stat.execute("checkpoint"); + } + stat.execute("alter table test alter column x rename to y"); + stat.execute("select y from test where 1 = 0"); + stat.execute("create table test2 as select x from system_range(1, 1000)"); } - stat.execute("alter table test alter column x rename to y"); - stat.execute("select y from test where 1 = 0"); - stat.execute("create table test2 as select x from system_range(1, 1000)"); - conn.close(); - FileChannel fc = FileUtils.open(fileName, "rw"); - // undo all changes - fc.truncate(fileSize); + try (FileChannel fc = FileUtils.open(fileName, "rw")) { + // undo all changes + fc.truncate(fileSize); + } - conn = getConnection( - "testAppendOnly"); - stat = conn.createStatement(); - stat.execute("select * from dummy0 where 1 = 0"); - stat.execute("select * from dummy9 where 1 = 0"); - stat.execute("select x from test where 1 = 0"); - conn.close(); + try (Connection conn = getConnection(getTestName())) { + Statement stat = conn.createStatement(); + stat.execute("select * from dummy0 where 1 = 0"); + stat.execute("select * from dummy9 where 1 = 0"); + stat.execute("select x from test where 1 = 0"); + } } - private void testLowRetentionTime() throws SQLException { - deleteDb("testLowRetentionTime"); - Connection conn = getConnection( - "testLowRetentionTime;RETENTION_TIME=10;WRITE_DELAY=10"); - Statement stat = conn.createStatement(); - Connection conn2 = getConnection("testLowRetentionTime"); - Statement stat2 = conn2.createStatement(); - stat.execute("create alias sleep as " + - "$$void sleep(int ms) throws Exception { Thread.sleep(ms); }$$"); - stat.execute("create table test(id identity, name varchar) " + - "as select x, 'Init' from system_range(0, 1999)"); - for (int i = 0; i < 10; i++) { - stat.execute("insert into test values(null, 'Hello')"); - // create and delete a large table: this will force compaction - stat.execute("create table temp(id identity, name varchar) as " + - "select x, space(1000000) from system_range(0, 10)"); - stat.execute("drop table temp"); - } - ResultSet rs = stat2 - .executeQuery("select *, sleep(1) from test order by id"); - for (int i = 0; i < 2000 + 10; i++) { - assertTrue(rs.next()); - assertEquals(i, rs.getInt(1)); + private void testNoRetentionTime() throws SQLException { + deleteDb(getTestName()); + try (Connection conn = getConnection(getTestName() + ";RETENTION_TIME=0;WRITE_DELAY=10")) { + Statement stat = conn.createStatement(); + try (Connection conn2 = getConnection(getTestName())) { + Statement stat2 = conn2.createStatement(); + stat.execute("create alias sleep as " + + "$$void sleep(int ms) throws Exception { Thread.sleep(ms); }$$"); + stat.execute("create table test(id identity, name varchar) " + + "as select x, 'Init' from system_range(0, 1999)"); + for (int i = 0; i < 10; i++) { + stat.execute("insert into test values(null, 'Hello')"); + // create and delete a large table: this will force compaction + stat.execute("create table temp(id identity, name varchar) as " + + "select x, space(1000000) from system_range(0, 10)"); + stat.execute("drop table temp"); + } + ResultSet rs = stat2 + .executeQuery("select *, sleep(1) from test order by id"); + for (int i = 0; i < 2000 + 10; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertFalse(rs.next()); + } } - assertFalse(rs.next()); - conn2.close(); - conn.close(); } private void testOldAndNew() throws SQLException { - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; - - String urlOld = getURL("mvstore;MV_STORE=FALSE", true); - String urlNew = getURL("mvstore;MV_STORE=TRUE", true); - String url = getURL("mvstore", true); + if (config.memory) { + return; + } + deleteDb(getTestName()); + String urlOld = getURL(getTestName() + ";MV_STORE=FALSE", true); + String urlNew = getURL(getTestName() + ";MV_STORE=TRUE", true); + String url = getURL(getTestName(), true); - conn = getConnection(urlOld); - conn.createStatement().execute("create table test_old(id int)"); - conn.close(); - conn = getConnection(url); - conn.createStatement().execute("select * from test_old"); - conn.close(); - conn = getConnection(urlNew); - conn.createStatement().execute("create table test_new(id int)"); - conn.close(); - conn = getConnection(url); - conn.createStatement().execute("select * from test_new"); - conn.close(); - conn = getConnection(urlOld); - conn.createStatement().execute("select * from test_old"); - conn.close(); - conn = getConnection(urlNew); - conn.createStatement().execute("select * from test_new"); - conn.close(); + try (Connection conn = getConnection(urlOld)) { + conn.createStatement().execute("create table test_old(id int)"); + } + try (Connection conn = getConnection(url)) { + conn.createStatement().execute("select * from test_old"); + } + try (Connection conn = getConnection(urlNew)) { + conn.createStatement().execute("create table test_new(id int)"); + } + try (Connection conn = getConnection(url)) { + conn.createStatement().execute("select * from test_new"); + } + try (Connection conn = getConnection(urlOld)) { + conn.createStatement().execute("select * from test_old"); + } + try (Connection conn = getConnection(urlNew)) { + conn.createStatement().execute("select * from test_new"); + } } private void testTemporaryTables() throws SQLException { - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; - Statement stat; - String url = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("set max_memory_rows 100"); - stat.execute("create table t1 as select x from system_range(1, 200)"); - stat.execute("create table t2 as select x from system_range(1, 200)"); - for (int i = 0; i < 20; i++) { - // this will create temporary results that - // internally use temporary tables, which are not all closed - stat.execute("select count(*) from t1 where t1.x in (select t2.x from t2)"); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + stat.execute("set max_memory_rows 100"); + stat.execute("create table t1 as select x from system_range(1, 200)"); + stat.execute("create table t2 as select x from system_range(1, 200)"); + for (int i = 0; i < 20; i++) { + // this will create temporary results that + // internally use temporary tables, which are not all closed + stat.execute("select count(*) from t1 where t1.x in (select t2.x from t2)"); + } } - conn.close(); - conn = getConnection(url); - stat = conn.createStatement(); - for (int i = 0; i < 20; i++) { - stat.execute("create table a" + i + "(id int primary key)"); - ResultSet rs = stat.executeQuery("select count(*) from a" + i); - rs.next(); - assertEquals(0, rs.getInt(1)); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + for (int i = 0; i < 20; i++) { + stat.execute("create table a" + i + "(id int primary key)"); + ResultSet rs = stat.executeQuery("select count(*) from a" + i); + rs.next(); + assertEquals(0, rs.getInt(1)); + } } - conn.close(); } private void testUniqueIndex() throws SQLException { - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; - Statement stat; - String url = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test as select x, 0 from system_range(1, 5000)"); - stat.execute("create unique index on test(x)"); - ResultSet rs = stat.executeQuery("select * from test where x=1"); - assertTrue(rs.next()); - assertFalse(rs.next()); - conn.close(); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + stat.execute("create table test as select x, 0 from system_range(1, 5000)"); + stat.execute("create unique index on test(x)"); + ResultSet rs = stat.executeQuery("select * from test where x=1"); + assertTrue(rs.next()); + assertFalse(rs.next()); + } } private void testSecondaryIndex() throws SQLException { - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; - Statement stat; - String url = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id int)"); - int size = 8 * 1024; - stat.execute("insert into test select mod(x * 111, " + size + ") " + - "from system_range(1, " + size + ")"); - stat.execute("create index on test(id)"); - ResultSet rs = stat.executeQuery( - "select count(*) from test inner join " + - "system_range(1, " + size + ") where " + - "id = mod(x * 111, " + size + ")"); - rs.next(); - assertEquals(size, rs.getInt(1)); - conn.close(); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id int)"); + int size = 8 * 1024; + stat.execute("insert into test select mod(x * 111, " + size + ") " + + "from system_range(1, " + size + ")"); + stat.execute("create index on test(id)"); + ResultSet rs = stat.executeQuery( + "select count(*) from test inner join " + + "system_range(1, " + size + ") where " + + "id = mod(x * 111, " + size + ")"); + rs.next(); + assertEquals(size, rs.getInt(1)); + } } private void testGarbageCollectionForLOB() throws SQLException { - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; - Statement stat; - String url = "mvstore;MV_STORE=TRUE"; + if (config.memory) { + return; + } + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id int, data blob)"); - stat.execute("insert into test select x, repeat('0', 10000) " + - "from system_range(1, 10)"); - stat.execute("drop table test"); - stat.equals("call @temp := cast(repeat('0', 10000) as blob)"); - stat.execute("create table test2(id int, data blob)"); - PreparedStatement prep = conn.prepareStatement( - "insert into test2 values(?, ?)"); - prep.setInt(1, 1); - assertThrows(ErrorCode.IO_EXCEPTION_1, prep). - setBinaryStream(1, createFailingStream(new IOException())); - prep.setInt(1, 2); - assertThrows(ErrorCode.IO_EXCEPTION_1, prep). - setBinaryStream(1, createFailingStream(new IllegalStateException())); - conn.close(); - MVStore s = MVStore.open(getBaseDir()+ "/mvstore.mv.db"); - assertTrue(s.hasMap("lobData")); - MVMap lobData = s.openMap("lobData"); - assertEquals(0, lobData.sizeAsLong()); - assertTrue(s.hasMap("lobMap")); - MVMap lobMap = s.openMap("lobMap"); - assertEquals(0, lobMap.sizeAsLong()); - assertTrue(s.hasMap("lobRef")); - MVMap lobRef = s.openMap("lobRef"); - assertEquals(0, lobRef.sizeAsLong()); - s.close(); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id int, data blob)"); + stat.execute("insert into test select x, repeat('0', 10000) " + + "from system_range(1, 10)"); + stat.execute("drop table test"); + stat.execute("create table test2(id int, data blob)"); + PreparedStatement prep = conn.prepareStatement( + "insert into test2 values(?, ?)"); + prep.setInt(1, 1); + assertThrows(ErrorCode.IO_EXCEPTION_1, prep). + setBinaryStream(1, createFailingStream(new IOException())); + prep.setInt(1, 2); + assertThrows(ErrorCode.IO_EXCEPTION_1, prep). + setBinaryStream(1, createFailingStream(new IllegalStateException())); + } + try (MVStore s = MVStore.open(getBaseDir()+ "/" + getTestName() + ".mv.db")) { + assertTrue(s.hasMap("lobData")); + MVMap lobData = s.openMap("lobData"); + assertEquals(0, lobData.sizeAsLong()); + assertTrue(s.hasMap("lobMap")); + MVMap lobMap = s.openMap("lobMap"); + assertEquals(0, lobMap.sizeAsLong()); + assertTrue(s.hasMap("lobRef")); + MVMap lobRef = s.openMap("lobRef"); + assertEquals(0, lobRef.sizeAsLong()); + } } private void testSpatial() throws SQLException { - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; Statement stat; - String url = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("call rand(1)"); - stat.execute("create table coordinates as select rand()*50 x, " + - "rand()*50 y from system_range(1, 5000)"); - stat.execute("create table test(id identity, data geometry)"); - stat.execute("create spatial index on test(data)"); - stat.execute("insert into test(data) select 'polygon(('||" + - "(1+x)||' '||(1+y)||', '||(2+x)||' '||(2+y)||', "+ - "'||(3+x)||' '||(1+y)||', '||(1+x)||' '||(1+y)||'))' from coordinates;"); - conn.close(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("call rand(1)"); + stat.execute("create table coordinates as select rand()*50 x, " + + "rand()*50 y from system_range(1, 5000)"); + stat.execute("create table test(id identity, data geometry)"); + stat.execute("create spatial index on test(data)"); + stat.execute("insert into test(data) select 'polygon(('||" + + "(1+x)||' '||(1+y)||', '||(2+x)||' '||(2+y)||', " + + "'||(3+x)||' '||(1+y)||', '||(1+x)||' '||(1+y)||'))' from coordinates;"); + } } private void testCount() throws Exception { @@ -462,176 +486,175 @@ private void testCount() throws Exception { return; } - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; - Connection conn2; Statement stat; Statement stat2; - String url = "mvstore;MV_STORE=TRUE;MVCC=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id int)"); - stat.execute("create table test2(id int)"); - stat.execute("insert into test select x from system_range(1, 10000)"); - conn.close(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("create table test(id int)"); + stat.execute("create table test2(id int)"); + stat.execute("insert into test select x from system_range(1, 10000)"); + } - ResultSet rs; String plan; - conn2 = getConnection(url); - stat2 = conn2.createStatement(); - rs = stat2.executeQuery("explain analyze select count(*) from test"); - rs.next(); - plan = rs.getString(1); - assertTrue(plan, plan.indexOf("reads:") < 0); - - conn = getConnection(url); - stat = conn.createStatement(); - conn.setAutoCommit(false); - stat.execute("insert into test select x from system_range(1, 1000)"); - rs = stat.executeQuery("select count(*) from test"); - rs.next(); - assertEquals(11000, rs.getInt(1)); - - // not yet committed - rs = stat2.executeQuery("explain analyze select count(*) from test"); - rs.next(); - plan = rs.getString(1); - // transaction log is small, so no need to read the table - assertTrue(plan, plan.indexOf("reads:") < 0); - rs = stat2.executeQuery("select count(*) from test"); - rs.next(); - assertEquals(10000, rs.getInt(1)); - - stat.execute("insert into test2 select x from system_range(1, 11000)"); - rs = stat2.executeQuery("explain analyze select count(*) from test"); - rs.next(); - plan = rs.getString(1); - // transaction log is larger than the table, so read the table - assertTrue(plan, plan.contains("reads:")); - rs = stat2.executeQuery("select count(*) from test"); - rs.next(); - assertEquals(10000, rs.getInt(1)); + ResultSet rs; + try (Connection conn2 = getConnection(url)) { + stat2 = conn2.createStatement(); + rs = stat2.executeQuery("explain analyze select count(*) from test"); + rs.next(); + plan = rs.getString(1); + assertTrue(plan, !plan.contains("reads:")); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + conn.setAutoCommit(false); + stat.execute("insert into test select x from system_range(1, 1000)"); + rs = stat.executeQuery("select count(*) from test"); + rs.next(); + assertEquals(11000, rs.getInt(1)); + + // not yet committed + rs = stat2.executeQuery("explain analyze select count(*) from test"); + rs.next(); + plan = rs.getString(1); + // transaction log is small, so no need to read the table + assertTrue(plan, !plan.contains("reads:")); + rs = stat2.executeQuery("select count(*) from test"); + rs.next(); + assertEquals(10000, rs.getInt(1)); + + stat2.execute("set cache_size 1024"); // causes cache to be cleared, so reads will occur + + stat.execute("insert into test2 select x from system_range(1, 11000)"); + rs = stat2.executeQuery("explain analyze select count(*) from test"); + rs.next(); + plan = rs.getString(1); + // transaction log is larger than the table, so read the table + assertContains(plan, "reads:"); + rs = stat2.executeQuery("select count(*) from test"); + rs.next(); + assertEquals(10000, rs.getInt(1)); + } + } - conn2.close(); - conn.close(); } private void testMinMaxWithNull() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; - Connection conn2; Statement stat; Statement stat2; - String url = "mvstore;MV_STORE=TRUE;MVCC=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(data int)"); - stat.execute("create index on test(data)"); - stat.execute("insert into test values(null), (2)"); - conn2 = getConnection(url); - stat2 = conn2.createStatement(); - conn.setAutoCommit(false); - conn2.setAutoCommit(false); - stat.execute("insert into test values(1)"); - ResultSet rs; - rs = stat.executeQuery("select min(data) from test"); - rs.next(); - assertEquals(1, rs.getInt(1)); - rs = stat2.executeQuery("select min(data) from test"); - rs.next(); - // not yet committed - assertEquals(2, rs.getInt(1)); - conn2.close(); - conn.close(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("create table test(data int)"); + stat.execute("create index on test(data)"); + stat.execute("insert into test values(null), (2)"); + try (Connection conn2 = getConnection(url)) { + stat2 = conn2.createStatement(); + conn.setAutoCommit(false); + conn2.setAutoCommit(false); + stat.execute("insert into test values(1)"); + ResultSet rs; + rs = stat.executeQuery("select min(data) from test"); + rs.next(); + assertEquals(1, rs.getInt(1)); + rs = stat2.executeQuery("select min(data) from test"); + rs.next(); + // not yet committed + assertEquals(2, rs.getInt(1)); + } + } } private void testTimeout() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; - Connection conn2; Statement stat; Statement stat2; - String url = "mvstore;MV_STORE=TRUE;MVCC=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id identity, name varchar)"); - conn2 = getConnection(url); - stat2 = conn2.createStatement(); - conn.setAutoCommit(false); - conn2.setAutoCommit(false); - stat.execute("insert into test values(1, 'Hello')"); - assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat2). - execute("insert into test values(1, 'Hello')"); - conn2.close(); - conn.close(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("create table test(id identity, name varchar)"); + try (Connection conn2 = getConnection(url)) { + stat2 = conn2.createStatement(); + conn.setAutoCommit(false); + conn2.setAutoCommit(false); + stat.execute("insert into test values(1, 'Hello')"); + assertThrows(ErrorCode.LOCK_TIMEOUT_1, stat2). + execute("insert into test values(1, 'Hello')"); + } + } } private void testExplainAnalyze() throws Exception { if (config.memory) { return; } - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; Statement stat; - String url = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE;WRITE_DELAY=0"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id identity, name varchar) as " + - "select x, space(1000) from system_range(1, 1000)"); - ResultSet rs; - conn.close(); - conn = getConnection(url); - stat = conn.createStatement(); - rs = stat.executeQuery("explain analyze select * from test"); - rs.next(); - String plan = rs.getString(1); - // expect about 1000 reads - String readCount = plan.substring(plan.indexOf("reads: ")); - readCount = readCount.substring("reads: ".length(), readCount.indexOf('\n')); - int rc = Integer.parseInt(readCount); - assertTrue(plan, rc >= 1000 && rc <= 1200); - conn.close(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("create table test(id identity, name varchar) as " + + "select x, space(1000) from system_range(1, 1000)"); + } + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("explain analyze select * from test"); + rs.next(); + String plan = rs.getString(1); + // expect about 1000 reads + String readCount = plan.substring(plan.indexOf("reads: ")); + readCount = readCount.substring("reads: ".length(), readCount.indexOf('\n')); + int rc = Integer.parseInt(readCount); + assertTrue(plan, rc >= 60 && rc <= 80); + } } - private void testTransactionLogUsuallyNotStored() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - Connection conn; + private void testTransactionLogEmptyAfterCommit() throws Exception { Statement stat; - String url = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id identity, name varchar)"); - conn.setAutoCommit(false); - PreparedStatement prep = conn.prepareStatement( - "insert into test(name) values(space(10000))"); - for (int j = 0; j < 100; j++) { - for (int i = 0; i < 100; i++) { - prep.execute(); + try (Connection conn = getConnection(url)) { + stat = conn.createStatement(); + stat.execute("create table test(id identity, name varchar)"); + stat.execute("set write_delay 0"); + conn.setAutoCommit(false); + PreparedStatement prep = conn.prepareStatement( + "insert into test(name) values(space(10000))"); + for (int j = 0; j < 100; j++) { + for (int i = 0; i < 100; i++) { + prep.execute(); + } + conn.commit(); + } + stat.execute("shutdown immediately"); + } catch (Exception ignore) {/**/} + + String file = getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE; + assertTrue(new File(file).exists()); + try (MVStore store = MVStore.open(file)) { + TransactionStore t = new TransactionStore(store); + t.init(); + int openTransactions = t.getOpenTransactions().size(); + if (openTransactions != 0) { + fail("transaction log was not empty"); } - conn.commit(); } - stat.execute("shutdown immediately"); - JdbcUtils.closeSilently(conn); - - String file = getBaseDir() + "/mvstore" + Constants.SUFFIX_MV_FILE; - - MVStore store = MVStore.open(file); - TransactionStore t = new TransactionStore(store); - t.init(); - assertEquals(0, t.getOpenTransactions().size()); - store.close(); } private void testShrinkDatabaseFile() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - String dbName = "mvstore" + - ";MV_STORE=TRUE"; + if (config.memory) { + return; + } + deleteDb(getTestName()); + // set WRITE_DELAY=0 so the free-unused-space runs on commit + String dbName = getTestName() + ";MV_STORE=TRUE;WRITE_DELAY=0"; Connection conn; Statement stat; long maxSize = 0; @@ -648,8 +671,8 @@ private void testShrinkDatabaseFile() throws Exception { retentionTime = 0; } ResultSet rs = stat.executeQuery( - "select value from information_schema.settings " + - "where name='RETENTION_TIME'"); + "select setting_value from information_schema.settings " + + "where setting_name='RETENTION_TIME'"); assertTrue(rs.next()); assertEquals(retentionTime, rs.getInt(1)); stat.execute("create table test(id int primary key, data varchar)"); @@ -672,31 +695,34 @@ private void testShrinkDatabaseFile() throws Exception { } catch (Exception e) { // ignore } - String fileName = getBaseDir() + "/mvstore" + String fileName = getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE; long size = FileUtils.size(fileName); if (i < 10) { - maxSize = (int) (Math.max(size, maxSize) * 1.2); + maxSize = (int) Math.max(size * 1.2, maxSize); } else if (size > maxSize) { fail(i + " size: " + size + " max: " + maxSize); } } - long sizeOld = FileUtils.size(getBaseDir() + "/mvstore" + long sizeOld = FileUtils.size(getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE); conn = getConnection(dbName); stat = conn.createStatement(); stat.execute("shutdown compact"); conn.close(); - long sizeNew = FileUtils.size(getBaseDir() + "/mvstore" + long sizeNew = FileUtils.size(getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE); assertTrue("new: " + sizeNew + " old: " + sizeOld, sizeNew < sizeOld); } private void testTwoPhaseCommit() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); + if (config.memory) { + return; + } Connection conn; Statement stat; - String url = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); conn = getConnection(url); stat = conn.createStatement(); @@ -720,10 +746,13 @@ private void testTwoPhaseCommit() throws Exception { } private void testRecover() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); + if (config.memory) { + return; + } Connection conn; Statement stat; - String url = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; url = getURL(url, true); conn = getConnection(url); stat = conn.createStatement(); @@ -733,11 +762,11 @@ private void testRecover() throws Exception { stat.execute("insert into test2 values('Hello World')"); conn.close(); - Recover.execute(getBaseDir(), "mvstore"); - DeleteDbFiles.execute(getBaseDir(), "mvstore", true); + Recover.execute(getBaseDir(), getTestName()); + deleteDb(getTestName()); conn = getConnection(url); stat = conn.createStatement(); - stat.execute("runscript from '" + getBaseDir() + "/mvstore.h2.sql'"); + stat.execute("runscript from '" + getBaseDir() + "/" + getTestName()+ ".h2.sql'"); ResultSet rs; rs = stat.executeQuery("select * from test"); assertTrue(rs.next()); @@ -750,10 +779,10 @@ private void testRecover() throws Exception { } private void testRollback() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); Connection conn; Statement stat; - String url = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; conn = getConnection(url); stat = conn.createStatement(); stat.execute("create table test(id identity)"); @@ -765,11 +794,13 @@ private void testRollback() throws Exception { } private void testSeparateKey() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); + if (config.memory) { + return; + } Connection conn; Statement stat; - - String url = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; conn = getConnection(url); stat = conn.createStatement(); @@ -791,12 +822,14 @@ private void testSeparateKey() throws Exception { } private void testRollbackAfterCrash() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); + if (config.memory) { + return; + } Connection conn; Statement stat; - - String url = "mvstore;MV_STORE=TRUE"; - String url2 = "mvstore2;MV_STORE=TRUE"; + deleteDb(getTestName()); + String url = getTestName() + ";MV_STORE=TRUE"; + String url2 = getTestName() + "2;MV_STORE=TRUE"; conn = getConnection(url); stat = conn.createStatement(); @@ -839,9 +872,10 @@ private void testRollbackAfterCrash() throws Exception { "from system_range(1, 10)"); conn.setAutoCommit(false); stat.execute("delete from test where id > 5"); - stat.execute("backup to '" + getBaseDir() + "/backup.zip'"); + stat.execute("backup to '" + getBaseDir() + "/" + getTestName() + ".zip'"); conn.rollback(); - Restore.execute(getBaseDir() + "/backup.zip", getBaseDir(), "mvstore2"); + Restore.execute(getBaseDir() + "/" +getTestName() + ".zip", + getBaseDir(), getTestName() + "2"); Connection conn2; conn2 = getConnection(url2); conn.close(); @@ -850,11 +884,10 @@ private void testRollbackAfterCrash() throws Exception { } private void testReferentialIntegrity() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); Connection conn; Statement stat; - - conn = getConnection("mvstore;MV_STORE=TRUE"); + deleteDb(getTestName()); + conn = getConnection(getTestName() + ";MV_STORE=TRUE"); stat = conn.createStatement(); stat.execute("create table test(id int, parent int " + @@ -882,15 +915,8 @@ private void testReferentialIntegrity() throws Exception { stat.execute("create table child(pid int)"); stat.execute("insert into parent values(1)"); stat.execute("insert into child values(2)"); - try { - stat.execute("alter table child add constraint cp " + - "foreign key(pid) references parent(id)"); - fail(); - } catch (SQLException e) { - assertEquals( - ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, - e.getErrorCode()); - } + assertThrows(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, stat).execute( + "alter table child add constraint cp foreign key(pid) references parent(id)"); stat.execute("update child set pid=1"); stat.execute("drop table child, parent"); @@ -898,15 +924,8 @@ private void testReferentialIntegrity() throws Exception { stat.execute("create table child(pid int)"); stat.execute("insert into parent values(1)"); stat.execute("insert into child values(2)"); - try { - stat.execute("alter table child add constraint cp " + - "foreign key(pid) references parent(id)"); - fail(); - } catch (SQLException e) { - assertEquals( - ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, - e.getErrorCode()); - } + assertThrows(ErrorCode.REFERENTIAL_INTEGRITY_VIOLATED_PARENT_MISSING_1, stat).execute( + "alter table child add constraint cp foreign key(pid) references parent(id)"); stat.execute("drop table child, parent"); stat.execute("create table test(id identity, parent bigint, " + @@ -923,11 +942,14 @@ private void testReferentialIntegrity() throws Exception { } private void testWriteDelay() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); + if (config.memory) { + return; + } Connection conn; Statement stat; ResultSet rs; - conn = getConnection("mvstore;MV_STORE=TRUE"); + deleteDb(getTestName()); + conn = getConnection(getTestName() + ";MV_STORE=TRUE"); stat = conn.createStatement(); stat.execute("create table test(id int)"); stat.execute("set write_delay 0"); @@ -938,7 +960,7 @@ private void testWriteDelay() throws Exception { } catch (Exception e) { // ignore } - conn = getConnection("mvstore;MV_STORE=TRUE"); + conn = getConnection(getTestName() + ";MV_STORE=TRUE"); stat = conn.createStatement(); rs = stat.executeQuery("select * from test"); assertTrue(rs.next()); @@ -946,11 +968,11 @@ private void testWriteDelay() throws Exception { } private void testAutoCommit() throws SQLException { - FileUtils.deleteRecursive(getBaseDir(), true); Connection conn; Statement stat; ResultSet rs; - conn = getConnection("mvstore;MV_STORE=TRUE"); + deleteDb(getTestName()); + conn = getConnection(getTestName() + ";MV_STORE=TRUE"); for (int i = 0; i < 2; i++) { stat = conn.createStatement(); stat.execute("create table test(id int primary key, name varchar)"); @@ -980,22 +1002,28 @@ private void testAutoCommit() throws SQLException { } private void testReopen() throws SQLException { - FileUtils.deleteRecursive(getBaseDir(), true); + if (config.memory) { + return; + } Connection conn; Statement stat; - conn = getConnection("mvstore;MV_STORE=TRUE"); + deleteDb(getTestName()); + conn = getConnection(getTestName() + ";MV_STORE=TRUE"); stat = conn.createStatement(); stat.execute("create table test(id int, name varchar)"); conn.close(); - conn = getConnection("mvstore;MV_STORE=TRUE"); + conn = getConnection(getTestName() + ";MV_STORE=TRUE"); stat = conn.createStatement(); stat.execute("drop table test"); conn.close(); } private void testBlob() throws SQLException, IOException { - FileUtils.deleteRecursive(getBaseDir(), true); - String dbName = "mvstore;MV_STORE=TRUE"; + if (config.memory) { + return; + } + deleteDb(getTestName()); + String dbName = getTestName() + ";MV_STORE=TRUE"; Connection conn; Statement stat; conn = getConnection(dbName); @@ -1018,12 +1046,14 @@ private void testBlob() throws SQLException, IOException { assertEquals(129, len); } conn.close(); - FileUtils.deleteRecursive(getBaseDir(), true); } private void testEncryption() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - String dbName = "mvstore;MV_STORE=TRUE"; + if (config.memory) { + return; + } + deleteDb(getTestName()); + String dbName = getTestName() + ";MV_STORE=TRUE"; Connection conn; Statement stat; String url = getURL(dbName + ";CIPHER=AES", true); @@ -1038,73 +1068,49 @@ private void testEncryption() throws Exception { stat.execute("select * from test"); stat.execute("drop table test"); conn.close(); - FileUtils.deleteRecursive(getBaseDir(), true); - } - - private void testExclusiveLock() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - String dbName = "mvstore;MV_STORE=TRUE;MVCC=FALSE"; - Connection conn, conn2; - Statement stat, stat2; - conn = getConnection(dbName); - stat = conn.createStatement(); - stat.execute("create table test(id int)"); - stat.execute("insert into test values(1)"); - conn.setAutoCommit(false); - // stat.execute("update test set id = 2"); - stat.executeQuery("select * from test for update"); - conn2 = getConnection(dbName); - stat2 = conn2.createStatement(); - ResultSet rs2 = stat2.executeQuery( - "select * from information_schema.locks"); - assertTrue(rs2.next()); - assertEquals("TEST", rs2.getString("table_name")); - assertEquals("WRITE", rs2.getString("lock_type")); - conn2.close(); - conn.close(); - FileUtils.deleteRecursive(getBaseDir(), true); } private void testReadOnly() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - String dbName = "mvstore;MV_STORE=TRUE"; + if (config.memory) { + return; + } + deleteDb(getTestName()); + String dbName = getTestName() + ";MV_STORE=TRUE"; Connection conn; Statement stat; conn = getConnection(dbName); stat = conn.createStatement(); stat.execute("create table test(id int)"); conn.close(); - FileUtils.setReadOnly(getBaseDir() + "/mvstore" + + FileUtils.setReadOnly(getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE); conn = getConnection(dbName); Database db = (Database) ((JdbcConnection) conn).getSession() .getDataHandler(); - assertTrue(db.getMvStore().getStore().getFileStore().isReadOnly()); + assertTrue(db.getStore().getMvStore().getFileStore().isReadOnly()); conn.close(); - FileUtils.deleteRecursive(getBaseDir(), true); } private void testReuseDiskSpace() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - String dbName = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + // set WRITE_DELAY=0 so the free-unused-space runs on commit + String dbName = getTestName() + ";MV_STORE=TRUE;WRITE_DELAY=0;RETENTION_TIME=0"; Connection conn; Statement stat; long maxSize = 0; for (int i = 0; i < 20; i++) { conn = getConnection(dbName); - Database db = (Database) ((JdbcConnection) conn). - getSession().getDataHandler(); - db.getMvStore().getStore().setRetentionTime(0); stat = conn.createStatement(); stat.execute("create table test(id int primary key, data varchar)"); stat.execute("insert into test select x, space(1000) " + "from system_range(1, 1000)"); stat.execute("drop table test"); conn.close(); - long size = FileUtils.size(getBaseDir() + "/mvstore" + long size = FileUtils.size(getBaseDir() + "/" + getTestName() + Constants.SUFFIX_MV_FILE); +// trace("Pass #" + i + ": size=" + size); if (i < 10) { - maxSize = (int) (Math.max(size, maxSize) * 1.1); + maxSize = (int) (Math.max(size * 1.1, maxSize)); } else if (size > maxSize) { fail(i + " size: " + size + " max: " + maxSize); } @@ -1112,8 +1118,8 @@ private void testReuseDiskSpace() throws Exception { } private void testDataTypes() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - String dbName = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String dbName = getTestName() + ";MV_STORE=TRUE"; Connection conn = getConnection(dbName); Statement stat = conn.createStatement(); @@ -1124,30 +1130,30 @@ private void testDataTypes() throws Exception { "by tinyint," + "sm smallint," + "bi bigint," + - "de decimal," + + "de decimal(5, 2)," + "re real,"+ "do double," + "ti time," + "da date," + "ts timestamp," + - "bin binary," + + "bin varbinary," + "uu uuid," + "bl blob," + "cl clob)"); stat.execute("insert into test values(1000, '', '', null, 0, 0, 0, " + "9, 2, 3, '10:00:00', '2001-01-01', " - + "'2010-10-10 10:10:10', x'00', 0, x'b1', 'clob')"); + + "'2010-10-10 10:10:10', x'00', '01234567-89AB-CDEF-0123-456789ABCDEF', x'b1', 'clob')"); stat.execute("insert into test values(1, 'vc', 'ch', true, 8, 16, 64, " + "123.00, 64.0, 32.0, '10:00:00', '2001-01-01', " - + "'2010-10-10 10:10:10', x'00', 0, x'b1', 'clob')"); + + "'2010-10-10 10:10:10', x'00', '01234567-89AB-CDEF-0123-456789ABCDEF', x'b1', 'clob')"); stat.execute("insert into test values(-1, " + "'quite a long string \u1234 \u00ff', 'ch', false, -8, -16, -64, " + "0, 0, 0, '10:00:00', '2001-01-01', " - + "'2010-10-10 10:10:10', SECURE_RAND(100), 0, x'b1', 'clob')"); + + "'2010-10-10 10:10:10', SECURE_RAND(100), RANDOM_UUID(), x'b1', 'clob')"); stat.execute("insert into test values(-1000, space(1000), 'ch', " + "false, -8, -16, -64, " + "1, 1, 1, '10:00:00', '2001-01-01', " - + "'2010-10-10 10:10:10', SECURE_RAND(100), 0, x'b1', 'clob')"); + + "'2010-10-10 10:10:10', SECURE_RAND(100), RANDOM_UUID(), x'b1', 'clob')"); if (!config.memory) { conn.close(); conn = getConnection(dbName); @@ -1158,26 +1164,25 @@ private void testDataTypes() throws Exception { rs.next(); assertEquals(1000, rs.getInt(1)); assertEquals("", rs.getString(2)); - assertEquals("", rs.getString(3)); + assertEquals(" ", rs.getString(3)); assertFalse(rs.getBoolean(4)); assertEquals(0, rs.getByte(5)); assertEquals(0, rs.getShort(6)); assertEquals(0, rs.getLong(7)); - assertEquals("9", rs.getBigDecimal(8).toString()); + assertEquals("9.00", rs.getBigDecimal(8).toString()); assertEquals(2d, rs.getDouble(9)); assertEquals(3d, rs.getFloat(10)); assertEquals("10:00:00", rs.getString(11)); assertEquals("2001-01-01", rs.getString(12)); - assertEquals("2010-10-10 10:10:10.0", rs.getString(13)); + assertEquals("2010-10-10 10:10:10", rs.getString(13)); assertEquals(1, rs.getBytes(14).length); - assertEquals("00000000-0000-0000-0000-000000000000", - rs.getString(15)); + assertEquals(UUID.fromString("01234567-89AB-CDEF-0123-456789ABCDEF"), rs.getObject(15)); assertEquals(1, rs.getBytes(16).length); assertEquals("clob", rs.getString(17)); rs.next(); assertEquals(1, rs.getInt(1)); assertEquals("vc", rs.getString(2)); - assertEquals("ch", rs.getString(3)); + assertEquals("ch ", rs.getString(3)); assertTrue(rs.getBoolean(4)); assertEquals(8, rs.getByte(5)); assertEquals(16, rs.getShort(6)); @@ -1187,71 +1192,70 @@ private void testDataTypes() throws Exception { assertEquals(32d, rs.getFloat(10)); assertEquals("10:00:00", rs.getString(11)); assertEquals("2001-01-01", rs.getString(12)); - assertEquals("2010-10-10 10:10:10.0", rs.getString(13)); + assertEquals("2010-10-10 10:10:10", rs.getString(13)); assertEquals(1, rs.getBytes(14).length); - assertEquals("00000000-0000-0000-0000-000000000000", - rs.getString(15)); + assertEquals(UUID.fromString("01234567-89AB-CDEF-0123-456789ABCDEF"), rs.getObject(15)); assertEquals(1, rs.getBytes(16).length); assertEquals("clob", rs.getString(17)); rs.next(); assertEquals(-1, rs.getInt(1)); assertEquals("quite a long string \u1234 \u00ff", rs.getString(2)); - assertEquals("ch", rs.getString(3)); + assertEquals("ch ", rs.getString(3)); assertFalse(rs.getBoolean(4)); assertEquals(-8, rs.getByte(5)); assertEquals(-16, rs.getShort(6)); assertEquals(-64, rs.getLong(7)); - assertEquals("0", rs.getBigDecimal(8).toString()); + assertEquals("0.00", rs.getBigDecimal(8).toString()); assertEquals(0.0d, rs.getDouble(9)); assertEquals(0.0d, rs.getFloat(10)); assertEquals("10:00:00", rs.getString(11)); assertEquals("2001-01-01", rs.getString(12)); - assertEquals("2010-10-10 10:10:10.0", rs.getString(13)); + assertEquals("2010-10-10 10:10:10", rs.getString(13)); assertEquals(100, rs.getBytes(14).length); - assertEquals("00000000-0000-0000-0000-000000000000", - rs.getString(15)); + assertEquals(2, rs.getObject(15, UUID.class).variant()); assertEquals(1, rs.getBytes(16).length); assertEquals("clob", rs.getString(17)); rs.next(); assertEquals(-1000, rs.getInt(1)); assertEquals(1000, rs.getString(2).length()); - assertEquals("ch", rs.getString(3)); + assertEquals("ch ", rs.getString(3)); assertFalse(rs.getBoolean(4)); assertEquals(-8, rs.getByte(5)); assertEquals(-16, rs.getShort(6)); assertEquals(-64, rs.getLong(7)); - assertEquals("1", rs.getBigDecimal(8).toString()); + assertEquals("1.00", rs.getBigDecimal(8).toString()); assertEquals(1.0d, rs.getDouble(9)); assertEquals(1.0d, rs.getFloat(10)); assertEquals("10:00:00", rs.getString(11)); assertEquals("2001-01-01", rs.getString(12)); - assertEquals("2010-10-10 10:10:10.0", rs.getString(13)); + assertEquals("2010-10-10 10:10:10", rs.getString(13)); assertEquals(100, rs.getBytes(14).length); - assertEquals("00000000-0000-0000-0000-000000000000", - rs.getString(15)); + assertEquals(2, rs.getObject(15, UUID.class).variant()); assertEquals(1, rs.getBytes(16).length); assertEquals("clob", rs.getString(17)); stat.execute("drop table test"); stat.execute("create table test(id int, obj object, " + - "rs result_set, arr array, ig varchar_ignorecase)"); + "rs row(a int), arr1 int array, arr2 numeric(1000) array, ig varchar_ignorecase)"); PreparedStatement prep = conn.prepareStatement( - "insert into test values(?, ?, ?, ?, ?)"); + "insert into test values(?, ?, ?, ?, ?, ?)"); prep.setInt(1, 1); prep.setObject(2, new java.lang.AssertionError()); prep.setObject(3, stat.executeQuery("select 1 from dual")); prep.setObject(4, new Object[]{1, 2}); - prep.setObject(5, "test"); + prep.setObject(5, new Object[0]); + prep.setObject(6, "test"); prep.execute(); prep.setInt(1, 1); prep.setObject(2, new java.lang.AssertionError()); prep.setObject(3, stat.executeQuery("select 1 from dual")); - prep.setObject(4, new Object[]{ + prep.setObject(4, new Object[0]); + prep.setObject(5, new Object[]{ new BigDecimal(new String( new char[1000]).replace((char) 0, '1'))}); - prep.setObject(5, "test"); + prep.setObject(6, "test"); prep.execute(); if (!config.memory) { conn.close(); @@ -1271,45 +1275,9 @@ private void testDataTypes() throws Exception { conn.close(); } - private void testLocking() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - String dbName = "mvstore;MV_STORE=TRUE;MVCC=FALSE"; - Connection conn = getConnection(dbName); - Statement stat = conn.createStatement(); - stat.execute("set lock_timeout 1000"); - - stat.execute("create table a(id int primary key, name varchar)"); - stat.execute("create table b(id int primary key, name varchar)"); - - Connection conn1 = getConnection(dbName); - final Statement stat1 = conn1.createStatement(); - stat1.execute("set lock_timeout 1000"); - - conn.setAutoCommit(false); - conn1.setAutoCommit(false); - stat.execute("insert into a values(1, 'Hello')"); - stat1.execute("insert into b values(1, 'Hello')"); - Task t = new Task() { - @Override - public void call() throws Exception { - stat1.execute("insert into a values(2, 'World')"); - } - }; - t.execute(); - try { - stat.execute("insert into b values(2, 'World')"); - throw t.getException(); - } catch (SQLException e) { - assertEquals(e.toString(), ErrorCode.DEADLOCK_1, e.getErrorCode()); - } - - conn1.close(); - conn.close(); - } - private void testSimple() throws Exception { - FileUtils.deleteRecursive(getBaseDir(), true); - String dbName = "mvstore;MV_STORE=TRUE"; + deleteDb(getTestName()); + String dbName = getTestName() + ";MV_STORE=TRUE"; Connection conn = getConnection(dbName); Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, name varchar)"); @@ -1345,12 +1313,7 @@ private void testSimple() throws Exception { assertEquals("Hello", rs.getString(2)); assertFalse(rs.next()); - try { - stat.execute("insert into test(id, name) values(10, 'Hello')"); - fail(); - } catch (SQLException e) { - assertEquals(e.toString(), ErrorCode.DUPLICATE_KEY_1, e.getErrorCode()); - } + assertThrows(ErrorCode.DUPLICATE_KEY_1, stat).execute("insert into test(id, name) values(10, 'Hello')"); rs = stat.executeQuery("select min(id), max(id), " + "min(name), max(name) from test"); @@ -1398,12 +1361,7 @@ private void testSimple() throws Exception { rs = stat.executeQuery("select count(*) from test"); rs.next(); assertEquals(3000, rs.getInt(1)); - try { - stat.execute("insert into test(id) values(1)"); - fail(); - } catch (SQLException e) { - assertEquals(ErrorCode.DUPLICATE_KEY_1, e.getErrorCode()); - } + assertThrows(ErrorCode.DUPLICATE_KEY_1, stat).execute("insert into test(id) values(1)"); stat.execute("delete from test"); stat.execute("insert into test(id, name) values(-1, 'Hello')"); rs = stat.executeQuery("select count(*) from test where id = -1"); @@ -1415,4 +1373,34 @@ private void testSimple() throws Exception { conn.close(); } + private void testReverseDeletePerformance() throws Exception { + long direct = 0; + long reverse = 0; + for (int i = 0; i < 5; i++) { + reverse += testReverseDeletePerformance(true); + direct += testReverseDeletePerformance(false); + } + assertTrue("direct: " + direct + ", reverse: " + reverse, + 3 * Math.abs(reverse - direct) < 2 * (reverse + direct)); + } + + private long testReverseDeletePerformance(boolean reverse) throws Exception { + deleteDb(getTestName()); + String dbName = getTestName() + ";MV_STORE=TRUE"; + try (Connection conn = getConnection(dbName)) { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE test(id INT PRIMARY KEY, name VARCHAR) AS " + + "SELECT x, x || space(1024) || x FROM system_range(1, 1000)"); + conn.setAutoCommit(false); + PreparedStatement prep = conn.prepareStatement("DELETE FROM test WHERE id = ?"); + long start = System.nanoTime(); + for (int i = 0; i < 1000; i++) { + prep.setInt(1, reverse ? 1000 - i : i); + prep.execute(); + } + long end = System.nanoTime(); + conn.commit(); + return TimeUnit.NANOSECONDS.toMillis(end - start); + } + } } diff --git a/h2/src/test/org/h2/test/store/TestObjectDataType.java b/h2/src/test/org/h2/test/store/TestObjectDataType.java index 5ce22646be..8b4cc3adf1 100644 --- a/h2/src/test/org/h2/test/store/TestObjectDataType.java +++ b/h2/src/test/org/h2/test/store/TestObjectDataType.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -28,7 +28,7 @@ public class TestObjectDataType extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -106,7 +106,6 @@ private void testCommonValues() { if (last != null) { int comp = ot.compare(x, last); if (comp <= 0) { - ot.compare(x, last); fail(x.getClass().getSimpleName() + ": " + x.toString() + " " + comp); } diff --git a/h2/src/test/org/h2/test/store/TestRandomMapOps.java b/h2/src/test/org/h2/test/store/TestRandomMapOps.java index 69bf0bb47a..b3f75b45a9 100644 --- a/h2/src/test/org/h2/test/store/TestRandomMapOps.java +++ b/h2/src/test/org/h2/test/store/TestRandomMapOps.java @@ -1,18 +1,23 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; +import java.text.MessageFormat; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; +import java.util.Map; +import java.util.Objects; import java.util.Random; import java.util.TreeMap; - +import org.h2.mvstore.Cursor; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; import org.h2.store.fs.FileUtils; +import org.h2.test.TestAll; import org.h2.test.TestBase; /** @@ -20,10 +25,11 @@ */ public class TestRandomMapOps extends TestBase { - private String fileName; - private int seed; + private static final boolean LOG = false; + private final Random r = new Random(); private int op; + /** * Run just this test. * @@ -31,60 +37,65 @@ public class TestRandomMapOps extends TestBase { */ public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); - test.config.big = true; - test.test(); + TestAll config = test.config; + config.big = true; +// config.memory = true; + + test.println(config.toString()); + for (int i = 0; i < 10; i++) { + test.testFromMain(); + test.println("Done pass #" + i); + } } @Override public void test() throws Exception { - testMap("memFS:randomOps.h3"); + if (config.memory) { + testMap(null); + } else { + String fileName = "memFS:" + getTestName(); + testMap(fileName); + } } - private void testMap(String fileName) throws Exception { - this.fileName = fileName; - int best = Integer.MAX_VALUE; - int bestSeed = 0; - Throwable failException = null; - int size = getSize(100, 1000); - for (seed = 0; seed < 100; seed++) { - FileUtils.delete(fileName); - Throwable ex = null; + private void testMap(String fileName) { + int size = getSize(500, 3000); + long seed = 0; +// seed = System.currentTimeMillis(); +// seed = -3407210256209708616L; + for (int cnt = 0; cnt < 100; cnt++) { try { - testOps(size); - continue; - } catch (Exception e) { - ex = e; - } catch (AssertionError e) { - ex = e; - } - if (op < best) { - trace(seed); - bestSeed = seed; - best = op; - size = best; - failException = ex; - // System.out.println("seed:" + seed + " op:" + op + " " + ex); + testOps(fileName, size, seed); + } catch (Exception | AssertionError ex) { + println("seed:" + seed + " op:" + op + " " + ex); + throw ex; + } finally { + if (fileName != null) { + FileUtils.delete(fileName); + } } - } - if (failException != null) { - throw (AssertionError) new AssertionError("seed = " + bestSeed - + " op = " + best).initCause(failException); + seed = r.nextLong(); } } - private void testOps(int size) throws Exception { - FileUtils.delete(fileName); - MVStore s; - s = openStore(fileName); - MVMap m; - m = s.openMap("data"); - Random r = new Random(seed); + private void testOps(String fileName, int loopCount, long seed) { + r.setSeed(seed); op = 0; - TreeMap map = new TreeMap(); - for (; op < size; op++) { - int k = r.nextInt(100); - byte[] v = new byte[r.nextInt(10) * 10]; - int type = r.nextInt(12); + MVStore s = openStore(fileName); + int keysPerPage = s.getKeysPerPage(); + int keyRange = 2000; + MVMap m = s.openMap("data"); + TreeMap map = new TreeMap<>(); + int[] recentKeys = new int[2 * keysPerPage]; + for (; op < loopCount; op++) { + int k = r.nextInt(3 * keyRange / 2); + if (k >= keyRange) { + k = recentKeys[k % recentKeys.length]; + } else { + recentKeys[op % recentKeys.length] = k; + } + String v = k + "_Value_" + op; + int type = r.nextInt(15); switch (type) { case 0: case 1: @@ -105,23 +116,27 @@ private void testOps(int size) throws Exception { s.compact(90, 1024); break; case 7: - log(op, k, v, "m.clear()"); - m.clear(); - map.clear(); + if (op % 64 == 0) { + log(op, k, v, "m.clear()"); + m.clear(); + map.clear(); + } break; case 8: log(op, k, v, "s.commit()"); s.commit(); break; case 9: - log(op, k, v, "s.commit()"); - s.commit(); - log(op, k, v, "s.close()"); - s.close(); - log(op, k, v, "s = openStore(fileName)"); - s = openStore(fileName); - log(op, k, v, "m = s.openMap(\"data\")"); - m = s.openMap("data"); + if (fileName != null) { + log(op, k, v, "s.commit()"); + s.commit(); + log(op, k, v, "s.close()"); + s.close(); + log(op, k, v, "s = openStore(fileName)"); + s = openStore(fileName); + log(op, k, v, "m = s.openMap(\"data\")"); + m = s.openMap("data"); + } break; case 10: log(op, k, v, "s.commit()"); @@ -129,9 +144,32 @@ private void testOps(int size) throws Exception { log(op, k, v, "s.compactMoveChunks()"); s.compactMoveChunks(); break; - case 11: + case 11: { + int rangeSize = r.nextInt(2 * keysPerPage); + int step = r.nextBoolean() ? 1 : -1; + for (int i = 0; i < rangeSize; i++) { + log(op, k, v, "m.put({0}, {1})"); + m.put(k, v); + map.put(k, v); + k += step; + v = k + "_Value_" + op; + } + break; + } + case 12: { + int rangeSize = r.nextInt(2 * keysPerPage); + int step = r.nextBoolean() ? 1 : -1; + for (int i = 0; i < rangeSize; i++) { + log(op, k, v, "m.remove({0})"); + m.remove(k); + map.remove(k); + k += step; + } + break; + } + default: log(op, k, v, "m.getKeyIndex({0})"); - ArrayList keyList = new ArrayList(map.keySet()); + ArrayList keyList = new ArrayList<>(map.keySet()); int index = Collections.binarySearch(keyList, k, null); int index2 = (int) m.getKeyIndex(k); assertEquals(index, index2); @@ -141,7 +179,7 @@ private void testOps(int size) throws Exception { } break; } - assertEqualsMapValues(map.get(k), m.get(k)); + assertEquals(map.get(k), m.get(k)); assertEquals(map.ceilingKey(k), m.ceilingKey(k)); assertEquals(map.floorKey(k), m.floorKey(k)); assertEquals(map.higherKey(k), m.higherKey(k)); @@ -152,25 +190,81 @@ private void testOps(int size) throws Exception { assertEquals(map.firstKey(), m.firstKey()); assertEquals(map.lastKey(), m.lastKey()); } + + int from = r.nextBoolean() ? r.nextInt(keyRange) : k + r.nextInt(2 * keysPerPage) - keysPerPage; + int to = r.nextBoolean() ? r.nextInt(keyRange) : from + r.nextInt(2 * keysPerPage) - keysPerPage; + + Cursor cursor; + Collection> entrySet; + String msg; + if (from <= to) { + msg = "(" + from + ", null)"; + cursor = m.cursor(from, null, false); + entrySet = map.tailMap(from).entrySet(); + assertEquals(msg, entrySet, cursor); + + msg = "(null, " + from + ")"; + cursor = m.cursor(null, from, false); + entrySet = map.headMap(from + 1).entrySet(); + assertEquals(msg, entrySet, cursor); + + msg = "(" + from + ", " + to + ")"; + cursor = m.cursor(from, to, false); + entrySet = map.subMap(from, to + 1).entrySet(); + assertEquals(msg, entrySet, cursor); + } + + if (from >= to) { + msg = "rev (" + from + ", null)"; + cursor = m.cursor(from, null, true); + entrySet = reverse(map.headMap(from + 1).entrySet()); + assertEquals(msg, entrySet, cursor); + + msg = "rev (null, "+from+")"; + cursor = m.cursor(null, from, true); + entrySet = reverse(map.tailMap(from).entrySet()); + assertEquals(msg, entrySet, cursor); + + msg = "rev (" + from + ", " + to + ")"; + cursor = m.cursor(from, to, true); + entrySet = reverse(map.subMap(to, from + 1).entrySet()); + assertEquals(msg, entrySet, cursor); + } } s.close(); } - private static MVStore openStore(String fileName) { - MVStore s = new MVStore.Builder().fileName(fileName). - pageSplitSize(50).autoCommitDisabled().open(); - s.setRetentionTime(1000); - return s; + private static Collection> reverse(Collection> entrySet) { + ArrayList> list = new ArrayList<>(entrySet); + Collections.reverse(list); + entrySet = list; + return entrySet; } - private void assertEqualsMapValues(byte[] x, byte[] y) { - if (x == null || y == null) { - if (x != y) { - assertTrue(x == y); - } - } else { - assertEquals(x.length, y.length); + private void assertEquals(String msg, Iterable> entrySet, Cursor cursor) { + int cnt = 0; + for (Map.Entry entry : entrySet) { + String message = msg + " " + cnt; + assertTrue(message, cursor.hasNext()); + assertEquals(message, entry.getKey(), cursor.next()); + assertEquals(message, entry.getKey(), cursor.getKey()); + assertEquals(message, entry.getValue(), cursor.getValue()); + ++cnt; } + assertFalse(msg, cursor.hasNext()); + } + + public void assertEquals(String message, Object expected, Object actual) { + if (!Objects.equals(expected, actual)) { + fail(message + " expected: " + expected + " actual: " + actual); + } + } + + private static MVStore openStore(String fileName) { + MVStore s = new MVStore.Builder().fileName(fileName) + .keysPerPage(7).autoCommitDisabled().open(); + s.setRetentionTime(1000); + return s; } /** @@ -181,10 +275,11 @@ private void assertEqualsMapValues(byte[] x, byte[] y) { * @param v the value * @param msg the message */ - private static void log(int op, int k, byte[] v, String msg) { - // msg = MessageFormat.format(msg, k, - // v == null ? null : "new byte[" + v.length + "]"); - // System.out.println(msg + "; // op " + op); + private static void log(int op, int k, String v, String msg) { + if (LOG) { + msg = MessageFormat.format(msg, k, v); + System.out.println(msg + "; // op " + op); + } } } diff --git a/h2/src/test/org/h2/test/store/TestShardedMap.java b/h2/src/test/org/h2/test/store/TestShardedMap.java index 2113004a00..69345601c3 100644 --- a/h2/src/test/org/h2/test/store/TestShardedMap.java +++ b/h2/src/test/org/h2/test/store/TestShardedMap.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -21,7 +21,7 @@ public class TestShardedMap extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -32,9 +32,9 @@ public void test() { } private void testLinearSplit() { - ShardedMap map = new ShardedMap(); - TreeMap a = new TreeMap(); - TreeMap b = new TreeMap(); + ShardedMap map = new ShardedMap<>(); + TreeMap a = new TreeMap<>(); + TreeMap b = new TreeMap<>(); map.addMap(a, null, 5); map.addMap(b, 5, null); for (int i = 0; i < 10; i++) { @@ -54,9 +54,9 @@ private void testLinearSplit() { } private void testReplication() { - ShardedMap map = new ShardedMap(); - TreeMap a = new TreeMap(); - TreeMap b = new TreeMap(); + ShardedMap map = new ShardedMap<>(); + TreeMap a = new TreeMap<>(); + TreeMap b = new TreeMap<>(); map.addMap(a, null, null); map.addMap(b, null, null); for (int i = 0; i < 10; i++) { @@ -76,9 +76,9 @@ private void testReplication() { } private void testOverlap() { - ShardedMap map = new ShardedMap(); - TreeMap a = new TreeMap(); - TreeMap b = new TreeMap(); + ShardedMap map = new ShardedMap<>(); + TreeMap a = new TreeMap<>(); + TreeMap b = new TreeMap<>(); map.addMap(a, null, 10); map.addMap(b, 5, null); for (int i = 0; i < 20; i++) { diff --git a/h2/src/test/org/h2/test/store/TestSpinLock.java b/h2/src/test/org/h2/test/store/TestSpinLock.java index cf0f465380..693d6ab53e 100644 --- a/h2/src/test/org/h2/test/store/TestSpinLock.java +++ b/h2/src/test/org/h2/test/store/TestSpinLock.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -29,7 +29,7 @@ public class TestSpinLock extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/store/TestStreamStore.java b/h2/src/test/org/h2/test/store/TestStreamStore.java index 8c19bda186..1704fdad71 100644 --- a/h2/src/test/org/h2/test/store/TestStreamStore.java +++ b/h2/src/test/org/h2/test/store/TestStreamStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -13,8 +13,8 @@ import java.util.HashMap; import java.util.Map; import java.util.Random; +import java.util.TreeMap; import java.util.concurrent.atomic.AtomicInteger; - import org.h2.mvstore.DataUtils; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; @@ -22,7 +22,6 @@ import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.util.IOUtils; -import org.h2.util.New; import org.h2.util.StringUtils; /** @@ -36,13 +35,13 @@ public class TestStreamStore extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws IOException { - FileUtils.deleteRecursive(getBaseDir(), true); FileUtils.createDirectories(getBaseDir()); + testMaxBlockKey(); testIOException(); testSaveCount(); testExceptionDuringStore(); @@ -56,8 +55,25 @@ public void test() throws IOException { testLoop(); } + private void testMaxBlockKey() throws IOException { + TreeMap map = new TreeMap<>(); + StreamStore s = new StreamStore(map); + s.setMaxBlockSize(128); + s.setMinBlockSize(64); + map.clear(); + for (int len = 1; len < 1024 * 1024; len *= 2) { + byte[] id = s.put(new ByteArrayInputStream(new byte[len])); + long max = s.getMaxBlockKey(id); + if (max == -1) { + assertTrue(map.isEmpty()); + } else { + assertEquals(map.lastKey(), (Long) max); + } + } + } + private void testIOException() throws IOException { - HashMap map = New.hashMap(); + HashMap map = new HashMap<>(); StreamStore s = new StreamStore(map); byte[] id = s.put(new ByteArrayInputStream(new byte[1024 * 1024])); InputStream in = s.get(id); @@ -70,8 +86,7 @@ private void testIOException() throws IOException { } fail(); } catch (IOException e) { - assertEquals(DataUtils.ERROR_BLOCK_NOT_FOUND, - DataUtils.getErrorCode(e.getMessage())); + checkErrorCode(DataUtils.ERROR_BLOCK_NOT_FOUND, e.getCause()); } } @@ -88,23 +103,21 @@ private void testSaveCount() throws IOException { for (int i = 0; i < 8 * 16; i++) { streamStore.put(new RandomStream(blockSize, i)); } - long writeCount = s.getFileStore().getWriteCount(); - assertTrue(writeCount > 2); s.close(); + long writeCount = s.getFileStore().getWriteCount(); + assertTrue(writeCount > 5); } private void testExceptionDuringStore() throws IOException { // test that if there is an IOException while storing // the data, the entries in the map are "rolled back" - HashMap map = New.hashMap(); + HashMap map = new HashMap<>(); StreamStore s = new StreamStore(map); s.setMaxBlockSize(1024); - assertThrows(IOException.class, s). - put(createFailingStream(new IOException())); + assertThrows(IOException.class, () -> s.put(createFailingStream(new IOException()))); assertEquals(0, map.size()); // the runtime exception is converted to an IOException - assertThrows(IOException.class, s). - put(createFailingStream(new IllegalStateException())); + assertThrows(IOException.class, () -> s.put(createFailingStream(new IllegalStateException()))); assertEquals(0, map.size()); } @@ -136,7 +149,7 @@ private void testReadCount() throws IOException { long readCount = s.getFileStore().getReadCount(); // the read count should be low because new blocks // are appended at the end (not between existing blocks) - assertTrue("rc: " + readCount, readCount < 15); + assertTrue("rc: " + readCount, readCount <= 20); map = s.openMap("data"); assertTrue("size: " + map.size(), map.sizeAsLong() >= 200); s.close(); @@ -216,29 +229,14 @@ public int read(byte[] b, int off, int len) { } - private void testDetectIllegalId() throws IOException { - Map map = New.hashMap(); + private void testDetectIllegalId() { + Map map = new HashMap<>(); StreamStore store = new StreamStore(map); - try { - store.length(new byte[]{3, 0, 0}); - fail(); - } catch (IllegalArgumentException e) { - // expected - } - try { - store.remove(new byte[]{3, 0, 0}); - fail(); - } catch (IllegalArgumentException e) { - // expected - } + assertThrows(IllegalArgumentException.class, () -> store.length(new byte[]{3, 0, 0})); + assertThrows(IllegalArgumentException.class, () -> store.remove(new byte[]{3, 0, 0})); map.put(0L, new byte[]{3, 0, 0}); InputStream in = store.get(new byte[]{2, 1, 0}); - try { - in.read(); - fail(); - } catch (IllegalArgumentException e) { - // expected - } + assertThrows(IllegalArgumentException.class, () -> in.read()); } private void testTreeStructure() throws IOException { @@ -267,7 +265,7 @@ public byte[] get(Object k) { } private void testFormat() throws IOException { - Map map = New.hashMap(); + Map map = new HashMap<>(); StreamStore store = new StreamStore(map); store.setMinBlockSize(10); store.setMaxBlockSize(20); @@ -323,7 +321,7 @@ public boolean containsKey(Object k) { assertEquals(10, map.size()); assertEquals(10, tests.get()); for (int i = 0; i < 10; i++) { - map.containsKey(i); + map.containsKey((long)i); } assertEquals(20, tests.get()); store = new StreamStore(map); @@ -338,7 +336,7 @@ public boolean containsKey(Object k) { assertEquals(15, store.getNextKey()); assertEquals(15, map.size()); for (int i = 0; i < 15; i++) { - map.containsKey(i); + map.containsKey((long)i); } } @@ -370,7 +368,7 @@ public boolean containsKey(Object k) { } private void testLoop() throws IOException { - Map map = New.hashMap(); + Map map = new HashMap<>(); StreamStore store = new StreamStore(map); assertEquals(256 * 1024, store.getMaxBlockSize()); assertEquals(256, store.getMinBlockSize()); diff --git a/h2/src/test/org/h2/test/store/TestTransactionStore.java b/h2/src/test/org/h2/test/store/TestTransactionStore.java index 5ebb2c94b4..07fee7007d 100644 --- a/h2/src/test/org/h2/test/store/TestTransactionStore.java +++ b/h2/src/test/org/h2/test/store/TestTransactionStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.store; @@ -10,22 +10,27 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.AbstractMap.SimpleImmutableEntry; import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Map.Entry; import java.util.Random; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; - import org.h2.mvstore.DataUtils; -import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; -import org.h2.mvstore.db.TransactionStore; -import org.h2.mvstore.db.TransactionStore.Change; -import org.h2.mvstore.db.TransactionStore.Transaction; -import org.h2.mvstore.db.TransactionStore.TransactionMap; +import org.h2.mvstore.MVStoreException; +import org.h2.mvstore.tx.Transaction; +import org.h2.mvstore.tx.TransactionMap; +import org.h2.mvstore.tx.TransactionStore; +import org.h2.mvstore.tx.TransactionStore.Change; +import org.h2.mvstore.type.LongDataType; +import org.h2.mvstore.type.MetaType; +import org.h2.mvstore.type.ObjectDataType; +import org.h2.mvstore.type.StringDataType; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.util.New; import org.h2.util.Task; /** @@ -39,53 +44,161 @@ public class TestTransactionStore extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { FileUtils.createDirectories(getBaseDir()); + testHCLFKey(); testConcurrentAddRemove(); testConcurrentAdd(); testCountWithOpenTransactions(); testConcurrentUpdate(); testRepeatedChange(); testTransactionAge(); - testStopWhileCommitting(); testGetModifiedMaps(); testKeyIterator(); - testMultiStatement(); testTwoPhaseCommit(); testSavepoint(); testConcurrentTransactionsReadCommitted(); testSingleConnection(); testCompareWithPostgreSQL(); + testStoreMultiThreadedReads(); + testCommitAfterMapRemoval(); + testDeadLock(); } - private void testConcurrentAddRemove() throws InterruptedException { - MVStore s = MVStore.open(null); - int threadCount = 3; - final int keyCount = 2; - final TransactionStore ts = new TransactionStore(s); - ts.init(); + private void testHCLFKey() { + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction t = ts.begin(); + LongDataType keyType = LongDataType.INSTANCE; + TransactionMap map = t.openMap("test", keyType, keyType); + // firstEntry() & firstKey() + assertNull(map.firstEntry()); + assertNull(map.firstKey()); + // lastEntry() & lastKey() + assertNull(map.lastEntry()); + assertNull(map.lastKey()); + map.put(10L, 100L); + map.put(20L, 200L); + map.put(30L, 300L); + map.put(40L, 400L); + t.commit(); + t = ts.begin(); + map = t.openMap("test", keyType, keyType); + map.put(15L, 150L); + // The same transaction + assertEquals(new SimpleImmutableEntry<>(15L, 150L), map.higherEntry(10L)); + assertEquals((Object) 15L, map.higherKey(10L)); + t = ts.begin(); + map = t.openMap("test", keyType, keyType); + // Another transaction + // firstEntry() & firstKey() + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.firstEntry()); + assertEquals((Object) 10L, map.firstKey()); + // lastEntry() & lastKey() + assertEquals(new SimpleImmutableEntry<>(40L, 400L),map.lastEntry()); + assertEquals((Object) 40L, map.lastKey()); + // higherEntry() & higherKey() + assertEquals(new SimpleImmutableEntry<>(20L, 200L), map.higherEntry(10L)); + assertEquals((Object) 20L, map.higherKey(10L)); + assertEquals(new SimpleImmutableEntry<>(20L, 200L), map.higherEntry(15L)); + assertEquals((Object) 20L, map.higherKey(15L)); + assertNull(map.higherEntry(40L)); + assertNull(map.higherKey(40L)); + // ceilingEntry() & ceilingKey() + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.ceilingEntry(10L)); + assertEquals((Object) 10L, map.ceilingKey(10L)); + assertEquals(new SimpleImmutableEntry<>(20L, 200L), map.ceilingEntry(15L)); + assertEquals((Object) 20L, map.ceilingKey(15L)); + assertEquals(new SimpleImmutableEntry<>(40L, 400L), map.ceilingEntry(40L)); + assertEquals((Object) 40L, map.ceilingKey(40L)); + assertNull(map.higherEntry(45L)); + assertNull(map.higherKey(45L)); + // lowerEntry() & lowerKey() + assertNull(map.lowerEntry(10L)); + assertNull(map.lowerKey(10L)); + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.lowerEntry(15L)); + assertEquals((Object) 10L, map.lowerKey(15L)); + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.lowerEntry(20L)); + assertEquals((Object) 10L, map.lowerKey(20L)); + assertEquals(new SimpleImmutableEntry<>(20L, 200L), map.lowerEntry(25L)); + assertEquals((Object) 20L, map.lowerKey(25L)); + // floorEntry() & floorKey() + assertNull(map.floorEntry(5L)); + assertNull(map.floorKey(5L)); + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.floorEntry(10L)); + assertEquals((Object) 10L, map.floorKey(10L)); + assertEquals(new SimpleImmutableEntry<>(10L, 100L), map.floorEntry(15L)); + assertEquals((Object) 10L, map.floorKey(15L)); + assertEquals(new SimpleImmutableEntry<>(30L, 300L), map.floorEntry(35L)); + assertEquals((Object) 30L, map.floorKey(35L)); + } + } + + private static void testConcurrentAddRemove() throws InterruptedException { + try (MVStore s = MVStore.open(null)) { + int threadCount = 3; + int keyCount = 2; + TransactionStore ts = new TransactionStore(s); + ts.init(); + + final Random r = new Random(1); + + Task[] tasks = new Task[threadCount]; + for (int i = 0; i < threadCount; i++) { + Task task = new Task() { + @Override + public void call() { + while (!stop) { + Transaction tx = ts.begin(); + TransactionMap map = tx.openMap("data"); + int k = r.nextInt(keyCount); + try { + map.remove(k); + map.put(k, r.nextInt()); + } catch (MVStoreException e) { + // ignore and retry + } + tx.commit(); + } + } + }; + task.execute(); + tasks[i] = task; + } + Thread.sleep(1000); + for (Task t : tasks) { + t.get(); + } + } + } + + private void testConcurrentAdd() { + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - final Random r = new Random(1); + Random r = new Random(1); + + AtomicInteger key = new AtomicInteger(); + AtomicInteger failCount = new AtomicInteger(); - Task[] tasks = new Task[threadCount]; - for (int i = 0; i < threadCount; i++) { Task task = new Task() { @Override - public void call() throws Exception { - TransactionMap map = null; + public void call() { while (!stop) { + int k = key.get(); Transaction tx = ts.begin(); - map = tx.openMap("data"); - int k = r.nextInt(keyCount); + TransactionMap map = tx.openMap("data"); try { - map.remove(k); map.put(k, r.nextInt()); - } catch (IllegalStateException e) { + } catch (MVStoreException e) { + failCount.incrementAndGet(); // ignore and retry } tx.commit(); @@ -94,166 +207,115 @@ public void call() throws Exception { }; task.execute(); - tasks[i] = task; - } - Thread.sleep(1000); - for (Task t : tasks) { - t.get(); - } - s.close(); - } - - private void testConcurrentAdd() { - MVStore s; - s = MVStore.open(null); - final TransactionStore ts = new TransactionStore(s); - ts.init(); - - final Random r = new Random(1); - - final AtomicInteger key = new AtomicInteger(); - final AtomicInteger failCount = new AtomicInteger(); - - Task task = new Task() { - - @Override - public void call() throws Exception { - Transaction tx = null; - TransactionMap map = null; - while (!stop) { - int k = key.get(); - tx = ts.begin(); - map = tx.openMap("data"); - try { - map.put(k, r.nextInt()); - } catch (IllegalStateException e) { - failCount.incrementAndGet(); - // ignore and retry - } - tx.commit(); + int count = 100000; + for (int i = 0; i < count; i++) { + key.set(i); + Transaction tx = ts.begin(); + TransactionMap map = tx.openMap("data"); + try { + map.put(i, r.nextInt()); + } catch (MVStoreException e) { + failCount.incrementAndGet(); + // ignore and retry + } + tx.commit(); + if (failCount.get() > 0 && i > 4000) { + // stop earlier, if possible + count = i; + break; } } - - }; - task.execute(); - Transaction tx = null; - int count = 10000; - TransactionMap map = null; - for (int i = 0; i < count; i++) { - int k = i; - key.set(k); - tx = ts.begin(); - map = tx.openMap("data"); - try { - map.put(k, r.nextInt()); - } catch (IllegalStateException e) { - failCount.incrementAndGet(); - // ignore and retry - } - tx.commit(); + task.get(); + // we expect at least 10% the operations were successful + assertTrue(failCount + " >= " + (count * 0.9), + failCount.get() < count * 0.9); + // we expect at least a few failures + assertTrue(failCount.toString(), failCount.get() > 0); } - // we expect at least half the operations were successful - assertTrue(failCount.toString(), failCount.get() < count / 2); - // we expect at least a few failures - assertTrue(failCount.toString(), failCount.get() > 0); - s.close(); } private void testCountWithOpenTransactions() { - MVStore s; - TransactionStore ts; - s = MVStore.open(null); - ts = new TransactionStore(s); - ts.init(); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - Transaction tx1 = ts.begin(); - TransactionMap map1 = tx1.openMap("data"); - int size = 150; - for (int i = 0; i < size; i++) { - map1.put(i, i * 10); - } - tx1.commit(); - tx1 = ts.begin(); - map1 = tx1.openMap("data"); - - Transaction tx2 = ts.begin(); - TransactionMap map2 = tx2.openMap("data"); - - Random r = new Random(1); - for (int i = 0; i < size * 3; i++) { - assertEquals("op: " + i, size, (int) map1.sizeAsLong()); - // keep the first 10%, and add 10% - int k = size / 10 + r.nextInt(size); - if (r.nextBoolean()) { - map2.remove(k); - } else { - map2.put(k, i); + Transaction tx1 = ts.begin(); + TransactionMap map1 = tx1.openMap("data"); + int size = 150; + for (int i = 0; i < size; i++) { + map1.put(i, i * 10); + } + tx1.commit(); + tx1 = ts.begin(); + map1 = tx1.openMap("data"); + + Transaction tx2 = ts.begin(); + TransactionMap map2 = tx2.openMap("data"); + + Random r = new Random(1); + for (int i = 0; i < size * 3; i++) { + assertEquals("op: " + i, size, map1.size()); + assertEquals("op: " + i, size, (int) map1.sizeAsLong()); + // keep the first 10%, and add 10% + int k = size / 10 + r.nextInt(size); + if (r.nextBoolean()) { + map2.remove(k); + } else { + map2.put(k, i); + } } } - s.close(); } private void testConcurrentUpdate() { - MVStore s; - TransactionStore ts; - s = MVStore.open(null); - ts = new TransactionStore(s); - ts.init(); - - Transaction tx1 = ts.begin(); - TransactionMap map1 = tx1.openMap("data"); - map1.put(1, 10); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - Transaction tx2 = ts.begin(); - TransactionMap map2 = tx2.openMap("data"); - try { - map2.put(1, 20); - fail(); - } catch (IllegalStateException e) { - assertEquals(DataUtils.ERROR_TRANSACTION_LOCKED, - DataUtils.getErrorCode(e.getMessage())); + Transaction tx1 = ts.begin(); + TransactionMap map1 = tx1.openMap("data"); + map1.put(1, 10); + + Transaction tx2 = ts.begin(); + TransactionMap map2 = tx2.openMap("data"); + assertThrows(DataUtils.ERROR_TRANSACTION_LOCKED, () -> map2.put(1, 20)); + assertEquals(10, map1.get(1).intValue()); + assertNull(map2.get(1)); + tx1.commit(); + assertEquals(10, map2.get(1).intValue()); } - assertEquals(10, map1.get(1).intValue()); - assertNull(map2.get(1)); - tx1.commit(); - assertEquals(10, map2.get(1).intValue()); - - s.close(); } private void testRepeatedChange() { - MVStore s; - TransactionStore ts; - s = MVStore.open(null); - ts = new TransactionStore(s); - ts.init(); - - Transaction tx0 = ts.begin(); - TransactionMap map0 = tx0.openMap("data"); - map0.put(1, -1); - tx0.commit(); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - Transaction tx = ts.begin(); - TransactionMap map = tx.openMap("data"); - for (int i = 0; i < 2000; i++) { - map.put(1, i); - } + Transaction tx0 = ts.begin(); + TransactionMap map0 = tx0.openMap("data"); + map0.put(1, -1); + tx0.commit(); - Transaction tx2 = ts.begin(); - TransactionMap map2 = tx2.openMap("data"); - assertEquals(-1, map2.get(1).intValue()); + Transaction tx = ts.begin(); + TransactionMap map = tx.openMap("data"); + for (int i = 0; i < 2000; i++) { + map.put(1, i); + } - s.close(); + Transaction tx2 = ts.begin(); + TransactionMap map2 = tx2.openMap("data"); + assertEquals(-1, map2.get(1).intValue()); + } } - private void testTransactionAge() throws Exception { + private void testTransactionAge() { MVStore s; TransactionStore ts; s = MVStore.open(null); ts = new TransactionStore(s); ts.init(); ts.setMaxTransactionId(16); - ArrayList openList = new ArrayList(); + ArrayList openList = new ArrayList<>(); for (int i = 0, j = 1; i < 64; i++) { Transaction t = ts.begin(); openList.add(t); @@ -269,25 +331,19 @@ private void testTransactionAge() throws Exception { } s = MVStore.open(null); - ts = new TransactionStore(s); - ts.init(); - ts.setMaxTransactionId(16); - ArrayList fifo = New.arrayList(); + TransactionStore ts2 = new TransactionStore(s); + ts2.init(); + ts2.setMaxTransactionId(16); + ArrayList fifo = new ArrayList<>(); int open = 0; for (int i = 0; i < 64; i++) { - Transaction t = null; if (open >= 16) { - try { - t = ts.begin(); - fail(); - } catch (IllegalStateException e) { - // expected - too many open - } + assertThrows(MVStoreException.class, () -> ts2.begin()); Transaction first = fifo.remove(0); first.commit(); open--; } - t = ts.begin(); + Transaction t = ts2.begin(); t.openMap("data").put(i, i); fifo.add(t); open++; @@ -295,405 +351,234 @@ private void testTransactionAge() throws Exception { s.close(); } - private void testStopWhileCommitting() throws Exception { - String fileName = getBaseDir() + "/testStopWhileCommitting.h3"; - FileUtils.delete(fileName); - Random r = new Random(0); + private void testGetModifiedMaps() { + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - for (int i = 0; i < 10;) { - MVStore s; - TransactionStore ts; - Transaction tx; - TransactionMap m; + Transaction tx = ts.begin(); + tx.openMap("m1"); + tx.openMap("m2"); + tx.openMap("m3"); + assertFalse(tx.getChanges(0).hasNext()); + tx.commit(); - s = MVStore.open(fileName); - ts = new TransactionStore(s); - ts.init(); tx = ts.begin(); - s.setReuseSpace(false); - m = tx.openMap("test"); - final String value = "x" + i; - for (int j = 0; j < 1000; j++) { - m.put(j, value); - } - final AtomicInteger state = new AtomicInteger(); - final MVStore store = s; - final MVMap other = s.openMap("other"); - Task task = new Task() { + TransactionMap m1 = tx.openMap("m1"); + TransactionMap m2 = tx.openMap("m2"); + TransactionMap m3 = tx.openMap("m3"); + m1.put("1", "100"); + long sp = tx.setSavepoint(); + m2.put("1", "100"); + m3.put("1", "100"); + Iterator it = tx.getChanges(sp); + assertTrue(it.hasNext()); + Change c = it.next(); + assertEquals("m3", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertTrue(it.hasNext()); + c = it.next(); + assertEquals("m2", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertFalse(it.hasNext()); + + it = tx.getChanges(0); + assertTrue(it.hasNext()); + c = it.next(); + assertEquals("m3", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertTrue(it.hasNext()); + c = it.next(); + assertEquals("m2", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertTrue(it.hasNext()); + c = it.next(); + assertEquals("m1", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertFalse(it.hasNext()); + + tx.rollbackToSavepoint(sp); + + it = tx.getChanges(0); + assertTrue(it.hasNext()); + c = it.next(); + assertEquals("m1", c.mapName); + assertEquals("1", c.key.toString()); + assertNull(c.value); + assertFalse(it.hasNext()); - @Override - public void call() throws Exception { - for (int i = 0; !stop; i++) { - state.set(i); - other.put(i, value); - store.commit(); - } - } - }; - task.execute(); - // wait for the task to start - while (state.get() < 1) { - Thread.yield(); - } - // commit while writing in the task tx.commit(); - // wait for the task to stop - task.get(); - store.close(); - s = MVStore.open(fileName); - // roll back a bit, until we have some undo log entries - assertTrue(s.hasMap("undoLog")); - for (int back = 0; back < 100; back++) { - int minus = r.nextInt(10); - s.rollbackTo(Math.max(0, s.getCurrentVersion() - minus)); - MVMap undo = s.openMap("undoLog"); - if (undo.size() > 0) { - break; - } - } - // re-open the store, because we have opened - // the undoLog map with the wrong data type - s.close(); - s = MVStore.open(fileName); - ts = new TransactionStore(s); - List list = ts.getOpenTransactions(); - if (list.size() != 0) { - tx = list.get(0); - if (tx.getStatus() == Transaction.STATUS_COMMITTING) { - i++; - } - } - s.close(); - FileUtils.delete(fileName); - assertFalse(FileUtils.exists(fileName)); } } - private void testGetModifiedMaps() { - MVStore s = MVStore.open(null); - TransactionStore ts = new TransactionStore(s); - ts.init(); - Transaction tx; - TransactionMap m1, m2, m3; - long sp; - - tx = ts.begin(); - m1 = tx.openMap("m1"); - m2 = tx.openMap("m2"); - m3 = tx.openMap("m3"); - assertFalse(tx.getChanges(0).hasNext()); - tx.commit(); - - tx = ts.begin(); - m1 = tx.openMap("m1"); - m2 = tx.openMap("m2"); - m3 = tx.openMap("m3"); - m1.put("1", "100"); - sp = tx.setSavepoint(); - m2.put("1", "100"); - m3.put("1", "100"); - Iterator it = tx.getChanges(sp); - assertTrue(it.hasNext()); - Change c; - c = it.next(); - assertEquals("m3", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertTrue(it.hasNext()); - c = it.next(); - assertEquals("m2", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertFalse(it.hasNext()); - - it = tx.getChanges(0); - assertTrue(it.hasNext()); - c = it.next(); - assertEquals("m3", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertTrue(it.hasNext()); - c = it.next(); - assertEquals("m2", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertTrue(it.hasNext()); - c = it.next(); - assertEquals("m1", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertFalse(it.hasNext()); - - tx.rollbackToSavepoint(sp); - - it = tx.getChanges(0); - assertTrue(it.hasNext()); - c = it.next(); - assertEquals("m1", c.mapName); - assertEquals("1", c.key.toString()); - assertNull(c.value); - assertFalse(it.hasNext()); - - tx.commit(); - - s.close(); - } - private void testKeyIterator() { - MVStore s = MVStore.open(null); - TransactionStore ts = new TransactionStore(s); - ts.init(); - Transaction tx, tx2; - TransactionMap m, m2; - Iterator it, it2; - - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hello"); - m.put("2", "World"); - m.put("3", "."); - tx.commit(); - - tx2 = ts.begin(); - m2 = tx2.openMap("test"); - m2.remove("2"); - m2.put("3", "!"); - m2.put("4", "?"); - - tx = ts.begin(); - m = tx.openMap("test"); - it = m.keyIterator(null); - assertTrue(it.hasNext()); - assertEquals("1", it.next()); - assertTrue(it.hasNext()); - assertEquals("2", it.next()); - assertTrue(it.hasNext()); - assertEquals("3", it.next()); - assertFalse(it.hasNext()); - - it2 = m2.keyIterator(null); - assertTrue(it2.hasNext()); - assertEquals("1", it2.next()); - assertTrue(it2.hasNext()); - assertEquals("3", it2.next()); - assertTrue(it2.hasNext()); - assertEquals("4", it2.next()); - assertFalse(it2.hasNext()); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - s.close(); - } + Transaction tx = ts.begin(); + TransactionMap m = tx.openMap("test"); + m.put("1", "Hello"); + m.put("2", "World"); + m.put("3", "."); + tx.commit(); - /** - * Tests behavior when used for a sequence of SQL statements. Each statement - * uses a savepoint. Within a statement, changes by the statement itself are - * not seen; the change is only seen when the statement finished. - *

    - * Update statements that change the key of multiple rows may use delete/add - * pairs to do so (they don't need to first delete all entries and then - * re-add them). Trying to add multiple values for the same key is not - * allowed (an update statement that would result in a duplicate key). - */ - private void testMultiStatement() { - MVStore s = MVStore.open(null); - TransactionStore ts = new TransactionStore(s); - ts.init(); + Transaction tx2 = ts.begin(); + TransactionMap m2 = tx2.openMap("test"); + m2.remove("2"); + m2.put("3", "!"); + m2.put("4", "?"); - Transaction tx; - TransactionMap m; - long startUpdate; - - tx = ts.begin(); - - // start of statement - // create table test - startUpdate = tx.setSavepoint(); - m = tx.openMap("test"); - m.setSavepoint(startUpdate); - - // start of statement - // insert into test(id, name) values(1, 'Hello'), (2, 'World') - startUpdate = tx.setSavepoint(); - m.setSavepoint(startUpdate); - assertTrue(m.trySet("1", "Hello", true)); - assertTrue(m.trySet("2", "World", true)); - // not seen yet (within the same statement) - assertNull(m.get("1")); - assertNull(m.get("2")); - - // start of statement - startUpdate = tx.setSavepoint(); - // now we see the newest version - m.setSavepoint(startUpdate); - assertEquals("Hello", m.get("1")); - assertEquals("World", m.get("2")); - // update test set primaryKey = primaryKey + 1 - // (this is usually a tricky case) - assertEquals("Hello", m.get("1")); - assertTrue(m.trySet("1", null, true)); - assertTrue(m.trySet("2", "Hello", true)); - assertEquals("World", m.get("2")); - // already updated by this statement, so it has no effect - // but still returns true because it was changed by this transaction - assertTrue(m.trySet("2", null, true)); - - assertTrue(m.trySet("3", "World", true)); - // not seen within this statement - assertEquals("Hello", m.get("1")); - assertEquals("World", m.get("2")); - assertNull(m.get("3")); - - // start of statement - startUpdate = tx.setSavepoint(); - m.setSavepoint(startUpdate); - // select * from test - assertNull(m.get("1")); - assertEquals("Hello", m.get("2")); - assertEquals("World", m.get("3")); - - // start of statement - startUpdate = tx.setSavepoint(); - m.setSavepoint(startUpdate); - // update test set id = 1 - // should fail: duplicate key - assertTrue(m.trySet("2", null, true)); - assertTrue(m.trySet("1", "Hello", true)); - assertTrue(m.trySet("3", null, true)); - assertFalse(m.trySet("1", "World", true)); - tx.rollbackToSavepoint(startUpdate); - - startUpdate = tx.setSavepoint(); - m.setSavepoint(startUpdate); - assertNull(m.get("1")); - assertEquals("Hello", m.get("2")); - assertEquals("World", m.get("3")); - - tx.commit(); - - ts.close(); - s.close(); + tx = ts.begin(); + m = tx.openMap("test"); + Iterator it = m.keyIterator(null); + assertTrue(it.hasNext()); + assertEquals("1", it.next()); + assertTrue(it.hasNext()); + assertEquals("2", it.next()); + assertTrue(it.hasNext()); + assertEquals("3", it.next()); + assertFalse(it.hasNext()); + + Iterator> entryIt = m.entrySet().iterator(); + assertTrue(entryIt.hasNext()); + assertEquals("1", entryIt.next().getKey()); + assertTrue(entryIt.hasNext()); + assertEquals("2", entryIt.next().getKey()); + assertTrue(entryIt.hasNext()); + assertEquals("3", entryIt.next().getKey()); + assertFalse(entryIt.hasNext()); + + Iterator it2 = m2.keyIterator(null); + assertTrue(it2.hasNext()); + assertEquals("1", it2.next()); + assertTrue(it2.hasNext()); + assertEquals("3", it2.next()); + assertTrue(it2.hasNext()); + assertEquals("4", it2.next()); + assertFalse(it2.hasNext()); + } } private void testTwoPhaseCommit() { String fileName = getBaseDir() + "/testTwoPhaseCommit.h3"; FileUtils.delete(fileName); - MVStore s; - TransactionStore ts; - Transaction tx; - Transaction txOld; TransactionMap m; - List list; - s = MVStore.open(fileName); - ts = new TransactionStore(s); - ts.init(); - tx = ts.begin(); - assertEquals(null, tx.getName()); - tx.setName("first transaction"); - assertEquals("first transaction", tx.getName()); - assertEquals(1, tx.getId()); - assertEquals(Transaction.STATUS_OPEN, tx.getStatus()); - m = tx.openMap("test"); - m.put("1", "Hello"); - list = ts.getOpenTransactions(); - assertEquals(1, list.size()); - txOld = list.get(0); - assertTrue(tx.getId() == txOld.getId()); - assertEquals("first transaction", txOld.getName()); - s.commit(); - ts.close(); - s.close(); + try (MVStore s = MVStore.open(fileName)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction tx = ts.begin(); + assertEquals(null, tx.getName()); + tx.setName("first transaction"); + assertEquals("first transaction", tx.getName()); + assertEquals(1, tx.getId()); + assertEquals(Transaction.STATUS_OPEN, tx.getStatus()); + m = tx.openMap("test"); + m.put("1", "Hello"); + List list = ts.getOpenTransactions(); + assertEquals(1, list.size()); + Transaction txOld = list.get(0); + assertTrue(tx.getId() == txOld.getId()); + assertEquals("first transaction", txOld.getName()); + s.commit(); + ts.close(); + } - s = MVStore.open(fileName); - ts = new TransactionStore(s); - ts.init(); - tx = ts.begin(); - assertEquals(2, tx.getId()); - m = tx.openMap("test"); - assertEquals(null, m.get("1")); - m.put("2", "Hello"); - list = ts.getOpenTransactions(); - assertEquals(2, list.size()); - txOld = list.get(0); - assertEquals(1, txOld.getId()); - assertEquals(Transaction.STATUS_OPEN, txOld.getStatus()); - assertEquals("first transaction", txOld.getName()); - txOld.prepare(); - assertEquals(Transaction.STATUS_PREPARED, txOld.getStatus()); - txOld = list.get(1); - txOld.commit(); - s.commit(); - s.close(); + try (MVStore s = MVStore.open(fileName)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction tx = ts.begin(); + assertEquals(2, tx.getId()); + m = tx.openMap("test"); + assertEquals(null, m.get("1")); + m.put("2", "Hello"); + List list = ts.getOpenTransactions(); + assertEquals(2, list.size()); + Transaction txOld = list.get(0); + assertEquals(1, txOld.getId()); + assertEquals(Transaction.STATUS_OPEN, txOld.getStatus()); + assertEquals("first transaction", txOld.getName()); + txOld.prepare(); + assertEquals(Transaction.STATUS_PREPARED, txOld.getStatus()); + txOld = list.get(1); + txOld.commit(); + s.commit(); + } - s = MVStore.open(fileName); - ts = new TransactionStore(s); - ts.init(); - tx = ts.begin(); - m = tx.openMap("test"); - m.put("3", "Test"); - assertEquals(2, tx.getId()); - list = ts.getOpenTransactions(); - assertEquals(2, list.size()); - txOld = list.get(1); - assertEquals(2, txOld.getId()); - assertEquals(Transaction.STATUS_OPEN, txOld.getStatus()); - assertEquals(null, txOld.getName()); - txOld.rollback(); - txOld = list.get(0); - assertEquals(1, txOld.getId()); - assertEquals(Transaction.STATUS_PREPARED, txOld.getStatus()); - assertEquals("first transaction", txOld.getName()); - txOld.commit(); - assertEquals("Hello", m.get("1")); - s.close(); + try (MVStore s = MVStore.open(fileName)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction tx = ts.begin(); + m = tx.openMap("test"); + m.put("3", "Test"); + assertEquals(2, tx.getId()); + List list = ts.getOpenTransactions(); + assertEquals(2, list.size()); + Transaction txOld = list.get(1); + assertEquals(2, txOld.getId()); + assertEquals(Transaction.STATUS_OPEN, txOld.getStatus()); + assertEquals(null, txOld.getName()); + txOld.rollback(); + txOld = list.get(0); + assertEquals(1, txOld.getId()); + assertEquals(Transaction.STATUS_PREPARED, txOld.getStatus()); + assertEquals("first transaction", txOld.getName()); + txOld.commit(); + assertEquals("Hello", m.get("1")); + } FileUtils.delete(fileName); } private void testSavepoint() { - MVStore s = MVStore.open(null); - TransactionStore ts = new TransactionStore(s); - ts.init(); - Transaction tx; - TransactionMap m; + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hello"); - m.put("2", "World"); - m.put("1", "Hallo"); - m.remove("2"); - m.put("3", "!"); - long logId = tx.setSavepoint(); - m.put("1", "Hi"); - m.put("2", "."); - m.remove("3"); - tx.rollbackToSavepoint(logId); - assertEquals("Hallo", m.get("1")); - assertNull(m.get("2")); - assertEquals("!", m.get("3")); - tx.rollback(); - - tx = ts.begin(); - m = tx.openMap("test"); - assertNull(m.get("1")); - assertNull(m.get("2")); - assertNull(m.get("3")); - - ts.close(); - s.close(); + Transaction tx = ts.begin(); + TransactionMap m = tx.openMap("test"); + m.put("1", "Hello"); + m.put("2", "World"); + m.put("1", "Hallo"); + m.remove("2"); + m.put("3", "!"); + long logId = tx.setSavepoint(); + m.put("1", "Hi"); + m.put("2", "."); + m.remove("3"); + tx.rollbackToSavepoint(logId); + assertEquals("Hallo", m.get("1")); + assertNull(m.get("2")); + assertEquals("!", m.get("3")); + tx.rollback(); + + tx = ts.begin(); + m = tx.openMap("test"); + assertNull(m.get("1")); + assertNull(m.get("2")); + assertNull(m.get("3")); + + ts.close(); + } } private void testCompareWithPostgreSQL() throws Exception { - ArrayList statements = New.arrayList(); - ArrayList transactions = New.arrayList(); - ArrayList> maps = New.arrayList(); + ArrayList statements = new ArrayList<>(); + ArrayList transactions = new ArrayList<>(); + ArrayList> maps = new ArrayList<>(); int connectionCount = 3, opCount = 1000, rowCount = 10; try { Class.forName("org.postgresql.Driver"); for (int i = 0; i < connectionCount; i++) { Connection conn = DriverManager.getConnection( - "jdbc:postgresql:test", "sa", "sa"); + "jdbc:postgresql:test?loggerLevel=OFF", "sa", "sa"); statements.add(conn.createStatement()); } } catch (Exception e) { @@ -705,280 +590,420 @@ private void testCompareWithPostgreSQL() throws Exception { statements.get(0).execute( "create table test(id int primary key, name varchar(255))"); - MVStore s = MVStore.open(null); - TransactionStore ts = new TransactionStore(s); - ts.init(); - for (int i = 0; i < connectionCount; i++) { - Statement stat = statements.get(i); - // 100 ms to avoid blocking (the test is single threaded) - stat.execute("set statement_timeout to 100"); - Connection c = stat.getConnection(); - c.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - c.setAutoCommit(false); - Transaction transaction = ts.begin(); - transactions.add(transaction); - TransactionMap map; - map = transaction.openMap("test"); - maps.add(map); - } - StringBuilder buff = new StringBuilder(); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + for (int i = 0; i < connectionCount; i++) { + Statement stat = statements.get(i); + // 100 ms to avoid blocking (the test is single threaded) + stat.execute("set statement_timeout to 100"); + Connection c = stat.getConnection(); + c.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + c.setAutoCommit(false); + Transaction transaction = ts.begin(); + transactions.add(transaction); + TransactionMap map; + map = transaction.openMap("test"); + maps.add(map); + } + StringBuilder buff = new StringBuilder(); - Random r = new Random(1); - try { - for (int i = 0; i < opCount; i++) { - int connIndex = r.nextInt(connectionCount); - Statement stat = statements.get(connIndex); - Transaction transaction = transactions.get(connIndex); - TransactionMap map = maps.get(connIndex); - if (transaction == null) { - transaction = ts.begin(); - map = transaction.openMap("test"); - transactions.set(connIndex, transaction); - maps.set(connIndex, map); - - // read all data, to get a snapshot - ResultSet rs = stat.executeQuery( - "select * from test order by id"); - buff.append(i).append(": [" + connIndex + "]="); - int size = 0; - while (rs.next()) { - buff.append(' '); - int k = rs.getInt(1); - String v = rs.getString(2); - buff.append(k).append(':').append(v); - assertEquals(v, map.get(k)); - size++; - } - buff.append('\n'); - if (size != map.sizeAsLong()) { - assertEquals(size, map.sizeAsLong()); - } - } - int x = r.nextInt(rowCount); - int y = r.nextInt(rowCount); - buff.append(i).append(": [" + connIndex + "]: "); - ResultSet rs = null; - switch (r.nextInt(7)) { - case 0: - buff.append("commit"); - stat.getConnection().commit(); - transaction.commit(); - transactions.set(connIndex, null); - break; - case 1: - buff.append("rollback"); - stat.getConnection().rollback(); - transaction.rollback(); - transactions.set(connIndex, null); - break; - case 2: - // insert or update - String old = map.get(x); - if (old == null) { - buff.append("insert " + x + "=" + y); - if (map.tryPut(x, "" + y)) { - stat.execute("insert into test values(" + x + ", '" + y + "')"); - } else { - buff.append(" -> row was locked"); - // the statement would time out in PostgreSQL - // TODO test sometimes if timeout occurs + Random r = new Random(1); + try { + for (int i = 0; i < opCount; i++) { + int connIndex = r.nextInt(connectionCount); + Statement stat = statements.get(connIndex); + Transaction transaction = transactions.get(connIndex); + TransactionMap map = maps.get(connIndex); + if (transaction == null) { + transaction = ts.begin(); + map = transaction.openMap("test"); + transactions.set(connIndex, transaction); + maps.set(connIndex, map); + + // read all data, to get a snapshot + ResultSet rs = stat.executeQuery( + "select * from test order by id"); + buff.append(i).append(": [" + connIndex + "]="); + int size = 0; + while (rs.next()) { + buff.append(' '); + int k = rs.getInt(1); + String v = rs.getString(2); + buff.append(k).append(':').append(v); + assertEquals(v, map.get(k)); + size++; } - } else { - buff.append("update " + x + "=" + y + " (old:" + old + ")"); - if (map.tryPut(x, "" + y)) { - int c = stat.executeUpdate("update test set name = '" + y - + "' where id = " + x); - assertEquals(1, c); - } else { - buff.append(" -> row was locked"); - // the statement would time out in PostgreSQL - // TODO test sometimes if timeout occurs + buff.append('\n'); + if (size != map.sizeAsLong()) { + assertEquals(size, map.sizeAsLong()); } } - break; - case 3: - buff.append("delete " + x); - try { - int c = stat.executeUpdate("delete from test where id = " + x); - if (c == 1) { - map.remove(x); - } else { - assertNull(map.get(x)); - } - } catch (SQLException e) { - assertTrue(map.get(x) != null); - assertFalse(map.tryRemove(x)); - // PostgreSQL needs to rollback - buff.append(" -> rollback"); - stat.getConnection().rollback(); - transaction.rollback(); - transactions.set(connIndex, null); + int x = r.nextInt(rowCount); + int y = r.nextInt(rowCount); + buff.append(i).append(": [" + connIndex + "]: "); + ResultSet rs = null; + switch (r.nextInt(7)) { + case 0: + buff.append("commit"); + stat.getConnection().commit(); + transaction.commit(); + transactions.set(connIndex, null); + break; + case 1: + buff.append("rollback"); + stat.getConnection().rollback(); + transaction.rollback(); + transactions.set(connIndex, null); + break; + case 2: + // insert or update + String old = map.get(x); + if (old == null) { + buff.append("insert " + x + "=" + y); + if (map.tryPut(x, "" + y)) { + stat.execute("insert into test values(" + x + ", '" + y + "')"); + } else { + buff.append(" -> row was locked"); + // the statement would time out in PostgreSQL + // TODO test sometimes if timeout occurs + } + } else { + buff.append("update " + x + "=" + y + " (old:" + old + ")"); + if (map.tryPut(x, "" + y)) { + int c = stat.executeUpdate("update test set name = '" + y + + "' where id = " + x); + assertEquals(1, c); + } else { + buff.append(" -> row was locked"); + // the statement would time out in PostgreSQL + // TODO test sometimes if timeout occurs + } + } + break; + case 3: + buff.append("delete " + x); + try { + int c = stat.executeUpdate("delete from test where id = " + x); + if (c == 1) { + map.remove(x); + } else { + assertNull(map.get(x)); + } + } catch (SQLException e) { + assertNotNull(map.get(x)); + assertFalse(map.tryRemove(x)); + // PostgreSQL needs to rollback + buff.append(" -> rollback"); + stat.getConnection().rollback(); + transaction.rollback(); + transactions.set(connIndex, null); + } + break; + case 4: + case 5: + case 6: + rs = stat.executeQuery("select * from test where id = " + x); + String expected = rs.next() ? rs.getString(2) : null; + buff.append("select " + x + "=" + expected); + assertEquals("i:" + i, expected, map.get(x)); + break; } - break; - case 4: - case 5: - case 6: - rs = stat.executeQuery("select * from test where id = " + x); - String expected = rs.next() ? rs.getString(2) : null; - buff.append("select " + x + "=" + expected); - assertEquals("i:" + i, expected, map.get(x)); - break; + buff.append('\n'); } - buff.append('\n'); + } catch (Exception e) { + e.printStackTrace(); + fail(buff.toString()); } - } catch (Exception e) { - e.printStackTrace(); - fail(buff.toString()); - } - for (Statement stat : statements) { - stat.getConnection().close(); + for (Statement stat : statements) { + stat.getConnection().close(); + } + ts.close(); } - ts.close(); - s.close(); } private void testConcurrentTransactionsReadCommitted() { - MVStore s = MVStore.open(null); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - TransactionStore ts = new TransactionStore(s); - ts.init(); - Transaction tx1, tx2; - TransactionMap m1, m2; - - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - m1.put("1", "Hi"); - m1.put("3", "."); - tx1.commit(); - - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - m1.put("1", "Hello"); - m1.put("2", "World"); - m1.remove("3"); - tx1.commit(); - - // start new transaction to read old data - tx2 = ts.begin(); - m2 = tx2.openMap("test"); - - // start transaction tx1, update/delete/add - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - m1.put("1", "Hallo"); - m1.remove("2"); - m1.put("3", "!"); - - assertEquals("Hello", m2.get("1")); - assertEquals("World", m2.get("2")); - assertNull(m2.get("3")); - - tx1.commit(); - - assertEquals("Hallo", m2.get("1")); - assertNull(m2.get("2")); - assertEquals("!", m2.get("3")); - - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - m1.put("2", "World"); - - assertNull(m2.get("2")); - assertFalse(m2.tryRemove("2")); - assertFalse(m2.tryPut("2", "Welt")); - - tx2 = ts.begin(); - m2 = tx2.openMap("test"); - assertNull(m2.get("2")); - m1.remove("2"); - assertNull(m2.get("2")); - tx1.commit(); - - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - assertNull(m1.get("2")); - m1.put("2", "World"); - m1.put("2", "Welt"); - tx1.rollback(); - - tx1 = ts.begin(); - m1 = tx1.openMap("test"); - assertNull(m1.get("2")); - - ts.close(); - s.close(); + Transaction tx1 = ts.begin(); + TransactionMap m1 = tx1.openMap("test"); + m1.put("1", "Hi"); + m1.put("3", "."); + tx1.commit(); + + tx1 = ts.begin(); + m1 = tx1.openMap("test"); + m1.put("1", "Hello"); + m1.put("2", "World"); + m1.remove("3"); + tx1.commit(); + + // start new transaction to read old data + Transaction tx2 = ts.begin(); + TransactionMap m2 = tx2.openMap("test"); + + // start transaction tx1, update/delete/add + tx1 = ts.begin(); + m1 = tx1.openMap("test"); + m1.put("1", "Hallo"); + m1.remove("2"); + m1.put("3", "!"); + + assertEquals("Hello", m2.get("1")); + assertEquals("World", m2.get("2")); + assertNull(m2.get("3")); + + tx1.commit(); + + assertEquals("Hallo", m2.get("1")); + assertNull(m2.get("2")); + assertEquals("!", m2.get("3")); + + tx1 = ts.begin(); + m1 = tx1.openMap("test"); + m1.put("2", "World"); + + assertNull(m2.get("2")); + assertFalse(m2.tryRemove("2")); + assertFalse(m2.tryPut("2", "Welt")); + + tx2 = ts.begin(); + m2 = tx2.openMap("test"); + assertNull(m2.get("2")); + m1.remove("2"); + assertNull(m2.get("2")); + tx1.commit(); + + tx1 = ts.begin(); + m1 = tx1.openMap("test"); + assertNull(m1.get("2")); + m1.put("2", "World"); + m1.put("2", "Welt"); + tx1.rollback(); + + tx1 = ts.begin(); + m1 = tx1.openMap("test"); + assertNull(m1.get("2")); + + ts.close(); + } } private void testSingleConnection() { - MVStore s = MVStore.open(null); + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); - TransactionStore ts = new TransactionStore(s); - ts.init(); + // add, rollback + Transaction tx = ts.begin(); + TransactionMap m = tx.openMap("test"); + m.put("1", "Hello"); + assertEquals("Hello", m.get("1")); + m.put("2", "World"); + assertEquals("World", m.get("2")); + tx.rollback(); + tx = ts.begin(); + m = tx.openMap("test"); + assertNull(m.get("1")); + assertNull(m.get("2")); - Transaction tx; - TransactionMap m; + // add, commit + tx = ts.begin(); + m = tx.openMap("test"); + m.put("1", "Hello"); + m.put("2", "World"); + assertEquals("Hello", m.get("1")); + assertEquals("World", m.get("2")); + tx.commit(); + tx = ts.begin(); + m = tx.openMap("test"); + assertEquals("Hello", m.get("1")); + assertEquals("World", m.get("2")); - // add, rollback - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hello"); - assertEquals("Hello", m.get("1")); - m.put("2", "World"); - assertEquals("World", m.get("2")); - tx.rollback(); - tx = ts.begin(); - m = tx.openMap("test"); - assertNull(m.get("1")); - assertNull(m.get("2")); - - // add, commit - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hello"); - m.put("2", "World"); - assertEquals("Hello", m.get("1")); - assertEquals("World", m.get("2")); - tx.commit(); - tx = ts.begin(); - m = tx.openMap("test"); - assertEquals("Hello", m.get("1")); - assertEquals("World", m.get("2")); - - // update+delete+insert, rollback - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hallo"); - m.remove("2"); - m.put("3", "!"); - assertEquals("Hallo", m.get("1")); - assertNull(m.get("2")); - assertEquals("!", m.get("3")); - tx.rollback(); - tx = ts.begin(); - m = tx.openMap("test"); - assertEquals("Hello", m.get("1")); - assertEquals("World", m.get("2")); - assertNull(m.get("3")); - - // update+delete+insert, commit - tx = ts.begin(); - m = tx.openMap("test"); - m.put("1", "Hallo"); - m.remove("2"); - m.put("3", "!"); - assertEquals("Hallo", m.get("1")); - assertNull(m.get("2")); - assertEquals("!", m.get("3")); - tx.commit(); - tx = ts.begin(); - m = tx.openMap("test"); - assertEquals("Hallo", m.get("1")); - assertNull(m.get("2")); - assertEquals("!", m.get("3")); - - ts.close(); - s.close(); + // update+delete+insert, rollback + tx = ts.begin(); + m = tx.openMap("test"); + m.put("1", "Hallo"); + m.remove("2"); + m.put("3", "!"); + assertEquals("Hallo", m.get("1")); + assertNull(m.get("2")); + assertEquals("!", m.get("3")); + tx.rollback(); + tx = ts.begin(); + m = tx.openMap("test"); + assertEquals("Hello", m.get("1")); + assertEquals("World", m.get("2")); + assertNull(m.get("3")); + + // update+delete+insert, commit + tx = ts.begin(); + m = tx.openMap("test"); + m.put("1", "Hallo"); + m.remove("2"); + m.put("3", "!"); + assertEquals("Hallo", m.get("1")); + assertNull(m.get("2")); + assertEquals("!", m.get("3")); + tx.commit(); + tx = ts.begin(); + m = tx.openMap("test"); + assertEquals("Hallo", m.get("1")); + assertNull(m.get("2")); + assertEquals("!", m.get("3")); + + ts.close(); + } + } + + private static void testStoreMultiThreadedReads() { + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction t = ts.begin(); + TransactionMap mapA = t.openMap("a"); + mapA.put(1, 0); + t.commit(); + + Task task = new Task() { + @Override + public void call() { + for (int i = 0; !stop; i++) { + Transaction tx = ts.begin(); + TransactionMap mapA = tx.openMap("a"); + while (!mapA.tryPut(1, i)) { + // repeat + } + tx.commit(); + + // map B transaction + // the other thread will get a map A uncommitted value, + // but by the time it tries to walk back to the committed + // value, the undoLog has changed + tx = ts.begin(); + TransactionMap mapB = tx.openMap("b"); + // put a new value to the map; this will cause a map B + // undoLog entry to be created with a null pre-image value + mapB.tryPut(i, -i); + // this is where the real race condition occurs: + // some other thread might get the B log entry + // for this transaction rather than the uncommitted A log + // entry it is expecting + tx.commit(); + } + } + }; + task.execute(); + try { + for (int i = 0; i < 10000; i++) { + Transaction tx = ts.begin(); + mapA = tx.openMap("a"); + if (mapA.get(1) == null) { + throw new AssertionError("key not found"); + } + tx.commit(); + } + } finally { + task.get(); + } + ts.close(); + } + } + + private void testCommitAfterMapRemoval() { + try (MVStore s = MVStore.open(null)) { + TransactionStore ts = new TransactionStore(s); + ts.init(); + Transaction t = ts.begin(); + TransactionMap map = t.openMap("test", LongDataType.INSTANCE, StringDataType.INSTANCE); + map.put(1L, "A"); + s.removeMap("test"); + try { + t.commit(); + } finally { + // commit should not fail, but even if it does + // transaction should be cleanly removed and store remains operational + assertTrue(ts.getOpenTransactions().isEmpty()); + assertFalse(ts.hasMap("test")); + t = ts.begin(); + map = t.openMap("test", LongDataType.INSTANCE, StringDataType.INSTANCE); + assertTrue(map.isEmpty()); + map.put(2L, "B"); + } + } } + private void testDeadLock() { + int threadCount = 2; + for (int i = 1; i < threadCount; i++) { + testDeadLock(threadCount, i); + } + } + + private void testDeadLock(int threadCount, int stepCount) { + try (MVStore s = MVStore.open(null)) { + s.setAutoCommitDelay(0); + TransactionStore ts = new TransactionStore(s, + new MetaType<>(null, s.backgroundExceptionHandler), new ObjectDataType(), 10000); + ts.init(); + Transaction t = ts.begin(); + TransactionMap m = t.openMap("test", LongDataType.INSTANCE, LongDataType.INSTANCE); + for (int i = 0; i < threadCount; i++) { + m.put((long)i, 0L); + } + t.commit(); + + CountDownLatch latch = new CountDownLatch(threadCount); + Task[] tasks = new Task[threadCount]; + for (int i = 0; i < threadCount; i++) { + long initialKey = i; + tasks[i] = new Task() { + @Override + public void call() throws Exception { + Transaction tx = ts.begin(); + try { + TransactionMap map = tx.openMap("test", LongDataType.INSTANCE, + LongDataType.INSTANCE); + long key = initialKey; + map.computeIfPresent(key, (k, v) -> v + 1); + latch.countDown(); + latch.await(); + for (int j = 0; j < stepCount; j++) { + key = (key + 1) % threadCount; + map.lock(key); + map.put(key, map.get(key) + 1); + } + tx.commit(); + } catch (Throwable e) { + tx.rollback(); + throw e; + } + } + }.execute(); + } + int failureCount = 0; + for (Task task : tasks) { + Exception exception = task.getException(); + if (exception != null) { + ++failureCount; + assertEquals(MVStoreException.class, exception.getClass()); + checkErrorCode(DataUtils.ERROR_TRANSACTIONS_DEADLOCK, exception); + } + } + assertEquals(" "+stepCount, stepCount, failureCount); + t = ts.begin(); + m = t.openMap("test", LongDataType.INSTANCE, LongDataType.INSTANCE); + int count = 0; + for (int i = 0; i < threadCount; i++) { + Long value = m.get((long) i); + assertNotNull("Key " + i, value); + count += value; + } + t.commit(); + assertEquals(" "+stepCount, (stepCount+1) * (threadCount - failureCount), count); + } + } } diff --git a/h2/src/test/org/h2/test/store/package.html b/h2/src/test/org/h2/test/store/package.html index 72f6e03918..f71790e7b3 100644 --- a/h2/src/test/org/h2/test/store/package.html +++ b/h2/src/test/org/h2/test/store/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/synth/BnfRandom.java b/h2/src/test/org/h2/test/synth/BnfRandom.java index 244f24179d..cc35923947 100644 --- a/h2/src/test/org/h2/test/synth/BnfRandom.java +++ b/h2/src/test/org/h2/test/synth/BnfRandom.java @@ -1,18 +1,18 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; import java.util.ArrayList; import java.util.Random; + import org.h2.bnf.Bnf; import org.h2.bnf.BnfVisitor; import org.h2.bnf.Rule; import org.h2.bnf.RuleFixed; import org.h2.bnf.RuleHead; -import org.h2.util.New; /** * A BNF visitor that generates a random SQL statement. @@ -22,13 +22,14 @@ public class BnfRandom implements BnfVisitor { private static final boolean SHOW_SYNTAX = false; private final Random random = new Random(); - private final ArrayList statements = New.arrayList(); + private final ArrayList statements = new ArrayList<>(); private int level; private String sql; - BnfRandom() throws Exception { + public BnfRandom() throws Exception { Bnf config = Bnf.getInstance(null); + config.addAlias("procedure", "@func@"); config.linkStatements(); ArrayList all = config.getStatements(); @@ -151,18 +152,7 @@ private String getRandomFixed(int type) { @Override public void visitRuleList(boolean or, ArrayList list) { if (or) { - if (level > 10) { - if (level > 1000) { - // better than stack overflow - throw new AssertionError(); - } - list.get(0).accept(this); - return; - } - int idx = random.nextInt(list.size()); - level++; - list.get(idx).accept(this); - level--; + visitOr(list); return; } StringBuilder buff = new StringBuilder(); @@ -186,11 +176,42 @@ public void visitRuleOptional(Rule rule) { sql = ""; } + @Override + public void visitRuleOptional(ArrayList list) { + if (level > 10 ? random.nextInt(level) == 1 : random.nextInt(4) == 1) { + level++; + visitOr(list); + level--; + return; + } + sql = ""; + } + + private void visitOr(ArrayList list) throws AssertionError { + if (level > 10) { + if (level > 1000) { + // better than stack overflow + throw new AssertionError(); + } + list.get(0).accept(this); + return; + } + int idx = random.nextInt(list.size()); + level++; + list.get(idx).accept(this); + level--; + } + @Override public void visitRuleRepeat(boolean comma, Rule rule) { rule.accept(this); } + @Override + public void visitRuleExtension(Rule rule, boolean compatibility) { + rule.accept(this); + } + public void setSeed(int seed) { random.setSeed(seed); } diff --git a/h2/src/test/org/h2/test/synth/OutputCatcher.java b/h2/src/test/org/h2/test/synth/OutputCatcher.java index 6a77de5532..2ab3413d44 100644 --- a/h2/src/test/org/h2/test/synth/OutputCatcher.java +++ b/h2/src/test/org/h2/test/synth/OutputCatcher.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -8,7 +8,7 @@ import java.io.IOException; import java.io.InputStream; import java.util.LinkedList; - +import java.util.concurrent.TimeUnit; import org.h2.util.IOUtils; /** @@ -16,7 +16,7 @@ */ public class OutputCatcher extends Thread { private final InputStream in; - private final LinkedList list = new LinkedList(); + private final LinkedList list = new LinkedList<>(); public OutputCatcher(InputStream in) { this.in = in; @@ -29,7 +29,7 @@ public OutputCatcher(InputStream in) { * @return the line */ public String readLine(long wait) { - long start = System.currentTimeMillis(); + long start = System.nanoTime(); while (true) { synchronized (list) { if (list.size() > 0) { @@ -40,8 +40,8 @@ public String readLine(long wait) { } catch (InterruptedException e) { // ignore } - long time = System.currentTimeMillis() - start; - if (time >= wait) { + long time = System.nanoTime() - start; + if (time >= TimeUnit.MILLISECONDS.toNanos(wait)) { return null; } } @@ -50,29 +50,39 @@ public String readLine(long wait) { @Override public void run() { - StringBuilder buff = new StringBuilder(); - while (true) { - try { - int x = in.read(); - if (x < 0) { - break; - } - if (x < ' ') { - if (buff.length() > 0) { - String s = buff.toString(); - buff.setLength(0); - synchronized (list) { - list.add(s); - list.notifyAll(); + final StringBuilder buff = new StringBuilder(); + try { + while (true) { + try { + int x = in.read(); + if (x < 0) { + break; + } + if (x < ' ') { + if (buff.length() > 0) { + String s = buff.toString(); + buff.setLength(0); + synchronized (list) { + list.add(s); + list.notifyAll(); + } } + } else { + buff.append((char) x); } - } else { - buff.append((char) x); + } catch (IOException e) { + break; + } + } + IOUtils.closeSilently(in); + } finally { + // just in case something goes wrong, make sure we store any partial output we got + if (buff.length() > 0) { + synchronized (list) { + list.add(buff.toString()); + list.notifyAll(); } - } catch (IOException e) { - break; } } - IOUtils.closeSilently(in); } } diff --git a/h2/src/test/org/h2/test/synth/TestBtreeIndex.java b/h2/src/test/org/h2/test/synth/TestBtreeIndex.java index 3e01f748c4..42dfae5ec1 100644 --- a/h2/src/test/org/h2/test/synth/TestBtreeIndex.java +++ b/h2/src/test/org/h2/test/synth/TestBtreeIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -12,12 +12,13 @@ import java.sql.Statement; import java.util.Random; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.DeleteDbFiles; /** * A b-tree index test. */ -public class TestBtreeIndex extends TestBase { +public class TestBtreeIndex extends TestDb { /** * Run just this test. @@ -43,8 +44,8 @@ public void test() throws SQLException { } private void testAddDelete() throws SQLException { - deleteDb("index"); - Connection conn = getConnection("index"); + deleteDb(getTestName()); + Connection conn = getConnection(getTestName()); try { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST(ID bigint primary key)"); @@ -54,7 +55,7 @@ private void testAddDelete() throws SQLException { count + ")"); if (!config.memory) { conn.close(); - conn = getConnection("index"); + conn = getConnection(getTestName()); stat = conn.createStatement(); } for (int i = 1; i < count; i++) { @@ -68,17 +69,16 @@ private void testAddDelete() throws SQLException { } finally { conn.close(); } - deleteDb("index"); + deleteDb(getTestName()); } - @Override - public void testCase(int seed) throws SQLException { + private void testCase(int seed) throws SQLException { testOne(seed); } private void testOne(int seed) throws SQLException { org.h2.Driver.load(); - deleteDb("index"); + deleteDb(getTestName()); printTime("testIndex " + seed); Random random = new Random(seed); int distinct, prefixLength; @@ -101,9 +101,8 @@ private void testOne(int seed) throws SQLException { } } String prefix = buff.toString().substring(0, prefixLength); - DeleteDbFiles.execute(getBaseDir() + "/index", null, true); - Connection conn = getConnection("index"); - try { + DeleteDbFiles.execute(getBaseDir() + "/" + getTestName(), null, true); + try (Connection conn = getConnection(getTestName())) { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE a(text VARCHAR PRIMARY KEY)"); PreparedStatement prepInsert = conn.prepareStatement( @@ -189,10 +188,8 @@ private void testOne(int seed) throws SQLException { if (rs.next()) { printError(seed, "testCount:" + testCount + " " + rs.getString(1)); } - } finally { - conn.close(); } - deleteDb("index"); + deleteDb(getTestName()); } private void printError(int seed, String message) { diff --git a/h2/src/test/org/h2/test/synth/TestConcurrentUpdate.java b/h2/src/test/org/h2/test/synth/TestConcurrentUpdate.java index c2e9b17b8d..072029b1a1 100644 --- a/h2/src/test/org/h2/test/synth/TestConcurrentUpdate.java +++ b/h2/src/test/org/h2/test/synth/TestConcurrentUpdate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -10,19 +10,21 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; import java.util.Random; - -import org.h2.api.ErrorCode; +import java.util.concurrent.CountDownLatch; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Task; /** * A concurrent test. */ -public class TestConcurrentUpdate extends TestBase { +public class TestConcurrentUpdate extends TestDb { - private static final int THREADS = 3; - private static final int ROW_COUNT = 10; + private static final int THREADS = 10; + private static final int ROW_COUNT = 3; /** * Run just this test. @@ -30,104 +32,143 @@ public class TestConcurrentUpdate extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase t = TestBase.createCaller().init(); - t.config.memory = true; - t.test(); + org.h2.test.TestAll config = new org.h2.test.TestAll(); +// config.memory = true; +// config.mvStore = false; + System.out.println(config); + TestBase test = createCaller().init(config); + for (int i = 0; i < 10; i++) { + System.out.println("Pass #" + i); + test.testFromMain(); + } } @Override public void test() throws Exception { + testConcurrent(); + testConcurrentShutdown(); + } + + private void testConcurrent() throws Exception { deleteDb("concurrent"); - final String url = getURL("concurrent;MULTI_THREADED=TRUE", true); - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key, name varchar)"); + final String url = getURL("concurrent;LOCK_TIMEOUT=2000", true); + try (Connection conn = getConnection(url)) { + Statement stat = conn.createStatement(); + stat.execute("create table test(id int primary key, name varchar)"); - Task[] tasks = new Task[THREADS]; - for (int i = 0; i < THREADS; i++) { - final int threadId = i; - Task t = new Task() { - @Override - public void call() throws Exception { - Random r = new Random(threadId); - Connection conn = getConnection(url); - PreparedStatement insert = conn.prepareStatement( - "insert into test values(?, ?)"); - PreparedStatement update = conn.prepareStatement( - "update test set name = ? where id = ?"); - PreparedStatement delete = conn.prepareStatement( - "delete from test where id = ?"); - PreparedStatement select = conn.prepareStatement( - "select * from test where id = ?"); - while (!stop) { - try { - int x = r.nextInt(ROW_COUNT); - String data = "x" + r.nextInt(ROW_COUNT); - switch (r.nextInt(3)) { - case 0: - insert.setInt(1, x); - insert.setString(2, data); - insert.execute(); - break; - case 1: - update.setString(1, data); - update.setInt(2, x); - update.execute(); - break; - case 2: - delete.setInt(1, x); - delete.execute(); - break; - case 4: - select.setInt(1, x); - ResultSet rs = select.executeQuery(); - while (rs.next()) { - rs.getString(2); + Task[] tasks = new Task[THREADS]; + for (int i = 0; i < THREADS; i++) { + final int threadId = i; + Task t = new Task() { + @Override + public void call() throws Exception { + Random r = new Random(threadId); + try (Connection conn = getConnection(url)) { + PreparedStatement insert = conn.prepareStatement( + "merge into test values(?, ?)"); + PreparedStatement update = conn.prepareStatement( + "update test set name = ? where id = ?"); + PreparedStatement delete = conn.prepareStatement( + "delete from test where id = ?"); + PreparedStatement select = conn.prepareStatement( + "select * from test where id = ?"); + while (!stop) { + int x = r.nextInt(ROW_COUNT); + String data = "x" + r.nextInt(ROW_COUNT); + switch (r.nextInt(4)) { + case 0: + insert.setInt(1, x); + insert.setString(2, data); + insert.execute(); + break; + case 1: + update.setString(1, data); + update.setInt(2, x); + update.execute(); + break; + case 2: + delete.setInt(1, x); + delete.execute(); + break; + case 3: + select.setInt(1, x); + ResultSet rs = select.executeQuery(); + while (rs.next()) { + rs.getString(2); + } + break; } - break; } - } catch (SQLException e) { - handleException(e); } } - conn.close(); + }; + tasks[i] = t; + t.execute(); + } + // test 2 seconds + Thread.sleep(2000); + boolean success = true; + for (Task t : tasks) { + t.join(); + Throwable exception = t.getException(); + if (exception != null) { + logError("", exception); + success = false; } + } + assert success; + } + } - }; - tasks[i] = t; - t.execute(); + private void testConcurrentShutdown() throws SQLException { + if (config.memory) { + return; } - // test 2 seconds - for (int i = 0; i < 200; i++) { - Thread.sleep(10); - for (Task t : tasks) { - if (t.isFinished()) { - i = 1000; - break; + deleteDb(getTestName()); + final String url = getURL(getTestName(), true); + try (Connection connection = getConnection(url)) { + connection.createStatement().execute("create table test(id int primary key, v int)"); + connection.createStatement().execute("insert into test values(0, 0)"); + } + int len = 2; + final CountDownLatch latch = new CountDownLatch(len + 1); + Collection tasks = new ArrayList<>(); + + tasks.add(new Task() { + @Override + public void call() throws Exception { + try (Connection c = getConnection(url)) { + c.setAutoCommit(false); + c.createStatement().execute("insert into test values(1, 1)"); + latch.countDown(); + latch.await(); } } + }); + + for (int i = 0; i < len; i++) { + tasks.add(new Task() { + @Override + public void call() throws Exception { + try (Connection c = getConnection(url)) { + Statement stmt = c.createStatement(); + latch.countDown(); + latch.await(); + stmt.execute("shutdown"); + } + } + }); } - for (Task t : tasks) { - t.get(); + for (Task task : tasks) { + task.execute(); } - conn.close(); - } - - /** - * Handle or ignore the exception. - * - * @param e the exception - */ - void handleException(SQLException e) throws SQLException { - switch (e.getErrorCode()) { - case ErrorCode.CONCURRENT_UPDATE_1: - case ErrorCode.DUPLICATE_KEY_1: - case ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1: - case ErrorCode.LOCK_TIMEOUT_1: - break; - default: - throw e; + for (Task task : tasks) { + task.getException(); + } + try (Connection connection = getConnection(getTestName())) { + ResultSet rs = connection.createStatement().executeQuery("select count(*) from test"); + rs.next(); + assertEquals(1, rs.getInt(1)); } } - } diff --git a/h2/src/test/org/h2/test/synth/TestCrashAPI.java b/h2/src/test/org/h2/test/synth/TestCrashAPI.java index 530e1eab25..f88c3841e9 100644 --- a/h2/src/test/org/h2/test/synth/TestCrashAPI.java +++ b/h2/src/test/org/h2/test/synth/TestCrashAPI.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -11,6 +11,7 @@ import java.lang.reflect.Array; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.lang.reflect.Modifier; import java.sql.BatchUpdateException; import java.sql.Blob; import java.sql.CallableStatement; @@ -22,34 +23,34 @@ import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; import java.sql.Savepoint; import java.sql.Statement; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.Comparator; +import java.util.GregorianCalendar; import java.util.HashMap; import java.util.Map; import org.h2.api.ErrorCode; -import org.h2.jdbc.JdbcConnection; import org.h2.store.FileLister; import org.h2.store.fs.FileUtils; -import org.h2.test.TestAll; import org.h2.test.TestBase; -import org.h2.test.db.TestScript; +import org.h2.test.TestDb; +import org.h2.test.scripts.TestScript; import org.h2.test.synth.sql.RandomGen; import org.h2.tools.Backup; import org.h2.tools.DeleteDbFiles; import org.h2.tools.Restore; import org.h2.util.MathUtils; -import org.h2.util.New; /** * A test that calls random methods with random parameters from JDBC objects. * This is sometimes called 'Fuzz Testing'. */ -public class TestCrashAPI extends TestBase implements Runnable { +public class TestCrashAPI extends TestDb implements Runnable { private static final boolean RECOVER_ALL = false; @@ -60,11 +61,11 @@ public class TestCrashAPI extends TestBase implements Runnable { private static final String DIR = "synth"; - private final ArrayList objects = New.arrayList(); + private final ArrayList objects = new ArrayList<>(); private final HashMap, ArrayList> classMethods = - New.hashMap(); + new HashMap<>(); private RandomGen random = new RandomGen(); - private final ArrayList statements = New.arrayList(); + private ArrayList statements; private int openCount; private long callCount; private volatile long maxWait = 60; @@ -80,11 +81,10 @@ public class TestCrashAPI extends TestBase implements Runnable { public static void main(String... a) throws Exception { System.setProperty("h2.delayWrongPasswordMin", "0"); System.setProperty("h2.delayWrongPasswordMax", "0"); - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - @SuppressWarnings("deprecation") public void run() { while (--maxWait > 0) { try { @@ -101,11 +101,11 @@ public void run() { if (maxWait == 0 && running) { objects.clear(); if (running) { - println("stopping (force)..."); + println("stopping (trying to interrupt)..."); for (StackTraceElement e : mainThread.getStackTrace()) { System.out.println(e.toString()); } - mainThread.stop(new SQLException("stop")); + mainThread.interrupt(); } } } @@ -113,12 +113,7 @@ public void run() { private static void recoverAll() { org.h2.Driver.load(); File[] files = new File("temp/backup").listFiles(); - Arrays.sort(files, new Comparator() { - @Override - public int compare(File o1, File o2) { - return o1.getName().compareTo(o2.getName()); - } - }); + Arrays.sort(files, Comparator.comparing(File::getName)); for (File f : files) { if (!f.getName().startsWith("db-")) { continue; @@ -146,15 +141,29 @@ public int compare(File o1, File o2) { } } + @Override + public boolean isEnabled() { + if (config.networked) { + return false; + } + return true; + } + @Override public void test() throws Exception { if (RECOVER_ALL) { recoverAll(); return; } - if (config.mvcc || config.networked) { + + if (config.networked) { return; } + + TestScript script = new TestScript(); + statements = script.getAllStatements(config); + initMethods(); + int len = getSize(2, 6); Thread t = new Thread(this); try { @@ -276,8 +285,7 @@ private Connection getConnection(int seed, boolean delete) throws SQLException { return conn; } - @Override - public void testCase(int seed) throws SQLException { + private void testCase(int seed) throws SQLException { printTime("seed: " + seed); callCount = 0; openCount = 0; @@ -308,12 +316,7 @@ public void testCase(int seed) throws SQLException { break; } try { -long start = System.currentTimeMillis(); conn = getConnection(seed, false); -long connectTime = System.currentTimeMillis() - start; -if (connectTime > 2000) { - System.out.println("??? connected2 in " + connectTime); -} } catch (Throwable t) { printIfBad(seed, -i, -1, t); } @@ -332,7 +335,7 @@ public void testCase(int seed) throws SQLException { continue; } if (random.getInt(2000) == 0 && conn != null) { - ((JdbcConnection) conn).setPowerOffCount(random.getInt(50)); + setPowerOffCount(conn, random.getInt(50)); } Object o = objects.get(objectId); if (o == null) { @@ -375,6 +378,11 @@ private void printError(int seed, int id, Throwable t) { } private Object callRandom(int seed, int id, int objectId, Object o, Method m) { + // TODO m.isDefault() can be used on Java 8 + boolean isDefault = + (m.getModifiers() & (Modifier.ABSTRACT | Modifier.PUBLIC | Modifier.STATIC)) == Modifier.PUBLIC + && m.getDeclaringClass().isInterface(); + boolean allowNPE = isDefault || o instanceof Blob && "setBytes".equals(m.getName()); Class[] paramClasses = m.getParameterTypes(); Object[] params = new Object[paramClasses.length]; for (int i = 0; i < params.length; i++) { @@ -384,13 +392,11 @@ private Object callRandom(int seed, int id, int objectId, Object o, Method m) { try { callCount++; result = m.invoke(o, params); - } catch (IllegalArgumentException e) { - TestBase.logError("error", e); - } catch (IllegalAccessException e) { + } catch (IllegalArgumentException | IllegalAccessException e) { TestBase.logError("error", e); } catch (InvocationTargetException e) { Throwable t = e.getTargetException(); - printIfBad(seed, id, objectId, t); + printIfBad(seed, id, objectId, t, allowNPE); } if (result == null) { return null; @@ -403,10 +409,18 @@ private Object callRandom(int seed, int id, int objectId, Object o, Method m) { } private void printIfBad(int seed, int id, int objectId, Throwable t) { + printIfBad(seed, id, objectId, t, false); + } + + private void printIfBad(int seed, int id, int objectId, Throwable t, boolean allowNPE) { if (t instanceof BatchUpdateException) { // do nothing } else if (t.getClass().getName().contains("SQLClientInfoException")) { // do nothing + } else if (t instanceof UnsupportedOperationException) { + // do nothing - new Java8/9 stuff + } else if (t instanceof SQLFeatureNotSupportedException) { + // do nothing } else if (t instanceof SQLException) { SQLException s = (SQLException) t; int errorCode = s.getErrorCode(); @@ -422,6 +436,8 @@ private void printIfBad(int seed, int id, int objectId, Throwable t) { // General error [HY000] printError(seed, id, s); } + } else if (allowNPE && t instanceof NullPointerException) { + // do nothing, this methods may throw this exception } else { printError(seed, id, t); } @@ -441,7 +457,7 @@ private Object getRandomParam(Class type) { } else if (type == boolean.class) { return random.nextBoolean(); } else if (type == double.class) { - return new Double(random.getRandomDouble()); + return random.getRandomDouble(); } else if (type == String.class) { if (random.getInt(10) == 0) { return null; @@ -486,7 +502,7 @@ private Object getRandomParam(Class type) { // TODO should use generated savepoints return null; } else if (type == Calendar.class) { - return Calendar.getInstance(); + return new GregorianCalendar(); } else if (type == java.net.URL.class) { return null; } else if (type == java.math.BigDecimal.class) { @@ -508,29 +524,12 @@ private Class getJdbcInterface(Object o) { private void initMethods() { for (Class inter : INTERFACES) { - classMethods.put(inter, new ArrayList()); - } - for (Class inter : INTERFACES) { - ArrayList list = classMethods.get(inter); + ArrayList list = new ArrayList<>(); for (Method m : inter.getMethods()) { list.add(m); } + classMethods.put(inter, list); } } - @Override - public TestBase init(TestAll conf) throws Exception { - super.init(conf); - if (config.mvcc || config.networked) { - return this; - } - startServerIfRequired(); - TestScript script = new TestScript(); - ArrayList add = script.getAllStatements(config); - initMethods(); - org.h2.Driver.load(); - statements.addAll(add); - return this; - } - } diff --git a/h2/src/test/org/h2/test/synth/TestDiskFull.java b/h2/src/test/org/h2/test/synth/TestDiskFull.java index 8fb3ba2e89..16e4a0a1c3 100644 --- a/h2/src/test/org/h2/test/synth/TestDiskFull.java +++ b/h2/src/test/org/h2/test/synth/TestDiskFull.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -9,15 +9,16 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; - import org.h2.api.ErrorCode; +import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.utils.FilePathUnstable; /** * Test simulated disk full problems. */ -public class TestDiskFull extends TestBase { +public class TestDiskFull extends TestDb { private FilePathUnstable fs; @@ -27,17 +28,13 @@ public class TestDiskFull extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { fs = FilePathUnstable.register(); - if (config.mvStore) { - fs.setPartialWrites(true); - } else { - fs.setPartialWrites(false); - } + fs.setPartialWrites(true); try { test(Integer.MAX_VALUE); int max = Integer.MAX_VALUE - fs.getDiskFullCount() + 10; @@ -129,6 +126,9 @@ private boolean test(int x) throws SQLException { stat.execute("script to 'memFS:test.sql'"); conn.close(); + deleteDb("memFS:", null); + FileUtils.delete("memFS:test.sql"); + return false; } diff --git a/h2/src/test/org/h2/test/synth/TestFuzzOptimizations.java b/h2/src/test/org/h2/test/synth/TestFuzzOptimizations.java index 2a5c9951be..f029e1f361 100644 --- a/h2/src/test/org/h2/test/synth/TestFuzzOptimizations.java +++ b/h2/src/test/org/h2/test/synth/TestFuzzOptimizations.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -10,20 +10,21 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Random; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.db.Db; import org.h2.test.db.Db.Prepared; -import org.h2.util.New; /** * This test executes random SQL statements to test if optimizations are working * correctly. */ -public class TestFuzzOptimizations extends TestBase { +public class TestFuzzOptimizations extends TestDb { private Connection conn; @@ -33,20 +34,20 @@ public class TestFuzzOptimizations extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { - deleteDb("optimizations"); - conn = getConnection("optimizations"); + deleteDb(getTestName()); + conn = getConnection(getTestName()); if (!config.diskResult) { testIn(); } testGroupSorted(); testInSelect(); conn.close(); - deleteDb("optimizations"); + deleteDb(getTestName()); } /* @@ -102,38 +103,42 @@ private void testIn() throws SQLException { int size = getSize(100, 1000); for (int i = 0; i < size; i++) { long seed = seedGenerator.nextLong(); - println("seed: " + seed); + println("testIn() seed: " + seed); Random random = new Random(seed); - ArrayList params = New.arrayList(); + ArrayList params = new ArrayList<>(); String condition = getRandomCondition(random, params, columns, compares, values); - // System.out.println(condition + " " + params); - PreparedStatement prep0 = conn.prepareStatement( - "select * from test0 where " + condition - + " order by 1, 2, 3"); - PreparedStatement prep1 = conn.prepareStatement( - "select * from test1 where " + condition - + " order by 1, 2, 3"); - for (int j = 0; j < params.size(); j++) { - prep0.setString(j + 1, params.get(j)); - prep1.setString(j + 1, params.get(j)); - } - ResultSet rs0 = prep0.executeQuery(); - ResultSet rs1 = prep1.executeQuery(); - assertEquals("seed: " + seed + " " + condition, rs0, rs1); + String message = "testIn() seed: " + seed + " " + condition; + executeAndCompare(condition, params, message); if (params.size() > 0) { for (int j = 0; j < params.size(); j++) { String value = values[random.nextInt(values.length - 2)]; params.set(j, value); - prep0.setString(j + 1, value); - prep1.setString(j + 1, value); } - assertEquals("seed: " + seed + " " + condition, rs0, rs1); + executeAndCompare(condition, params, message); } } + executeAndCompare("a >=0 and b in(?, 2) and a in(1, ?, null)", Arrays.asList("10", "2"), + "testIn() seed=-6191135606105920350L"); db.execute("drop table test0, test1"); } + private void executeAndCompare(String condition, List params, String message) throws SQLException { + PreparedStatement prep0 = conn.prepareStatement( + "select * from test0 where " + condition + + " order by 1, 2, 3"); + PreparedStatement prep1 = conn.prepareStatement( + "select * from test1 where " + condition + + " order by 1, 2, 3"); + for (int j = 0; j < params.size(); j++) { + prep0.setString(j + 1, params.get(j)); + prep1.setString(j + 1, params.get(j)); + } + ResultSet rs0 = prep0.executeQuery(); + ResultSet rs1 = prep1.executeQuery(); + assertEquals(message, rs0, rs1); + } + private String getRandomCondition(Random random, ArrayList params, String[] columns, String[] compares, String[] values) { int comp = 1 + random.nextInt(4); @@ -188,7 +193,7 @@ private void testInSelect() { db.execute("UPDATE TEST SET B = NULL WHERE B = 0"); Random random = new Random(); long seed = random.nextLong(); - println("seed: " + seed); + println("testInSelect() seed: " + seed); for (int i = 0; i < 100; i++) { String column = random.nextBoolean() ? "A" : "B"; String value = new String[] { "NULL", "0", "A", "B" }[random.nextInt(4)]; @@ -202,7 +207,7 @@ private void testInSelect() { " FROM TEST I WHERE I." + compare + "=?) ORDER BY 1, 2"; List> a = db.prepare(sql1).set(x).query(); List> b = db.prepare(sql2).set(x).query(); - assertTrue("seed: " + seed + " sql: " + sql1 + + assertTrue("testInSelect() seed: " + seed + " sql: " + sql1 + " a: " + a + " b: " + b, a.equals(b)); } db.execute("DROP TABLE TEST"); @@ -213,7 +218,7 @@ private void testGroupSorted() { db.execute("CREATE TABLE TEST(A INT, B INT, C INT)"); Random random = new Random(); long seed = random.nextLong(); - println("seed: " + seed); + println("testGroupSorted() seed: " + seed); for (int i = 0; i < 100; i++) { Prepared p = db.prepare("INSERT INTO TEST VALUES(?, ?, ?)"); p.set(new String[] { null, "0", "1", "2" }[random.nextInt(4)]); diff --git a/h2/src/test/org/h2/test/synth/TestHalt.java b/h2/src/test/org/h2/test/synth/TestHalt.java index 2f479a74d3..f6fd68f5ee 100644 --- a/h2/src/test/org/h2/test/synth/TestHalt.java +++ b/h2/src/test/org/h2/test/synth/TestHalt.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -13,16 +13,14 @@ import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; -import java.text.SimpleDateFormat; -import java.util.Date; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; import java.util.Random; - import org.h2.test.TestAll; import org.h2.test.TestBase; import org.h2.test.utils.SelfDestructor; import org.h2.tools.Backup; import org.h2.tools.DeleteDbFiles; -import org.h2.util.IOUtils; import org.h2.util.StringUtils; /** @@ -89,8 +87,7 @@ public abstract class TestHalt extends TestBase { */ protected Random random = new Random(); - private final SimpleDateFormat dateFormat = - new SimpleDateFormat("MM-dd HH:mm:ss "); + private final DateTimeFormatter dateFormat = DateTimeFormatter.ofPattern("MM-dd HH:mm:ss"); private int errorId; private int sequenceId; @@ -188,21 +185,17 @@ protected void traceOperation(String s) { * @param e the exception or null */ protected void traceOperation(String s, Exception e) { - FileWriter writer = null; - try { - File f = new File(getBaseDir() + "/" + TRACE_FILE_NAME); - f.getParentFile().mkdirs(); - writer = new FileWriter(f, true); + File f = new File(getBaseDir() + "/" + TRACE_FILE_NAME); + f.getParentFile().mkdirs(); + try (FileWriter writer = new FileWriter(f, true)) { PrintWriter w = new PrintWriter(writer); - s = dateFormat.format(new Date()) + ": " + s; + s = dateFormat.format(LocalDateTime.now()) + " : " + s; w.println(s); if (e != null) { e.printStackTrace(w); } } catch (IOException e2) { e2.printStackTrace(); - } finally { - IOUtils.closeSilently(writer); } } @@ -232,7 +225,7 @@ void controllerTest() throws Exception { // String classPath = "-cp // .;D:/data/java/hsqldb.jar;D:/data/java/derby.jar"; String selfDestruct = SelfDestructor.getPropertyString(60); - String[] procDef = { "java", selfDestruct, + String[] procDef = { getJVM(), selfDestruct, "-cp", getClassPath(), getClass().getName(), "" + operations, "" + flags, "" + testValue}; traceOperation("start: " + StringUtils.arrayCombine(procDef, ' ')); @@ -303,7 +296,7 @@ protected void disconnect() { // lock.delete(); // System.gc(); // } -// Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance(); +// Class.forName("org.apache.derby.iapi.jdbc.AutoloadedDriver").newInstance(); // try { // return DriverManager.getConnection( // "jdbc:derby:test3;create=true", "sa", "sa"); @@ -329,7 +322,7 @@ protected void disconnect() { // void disconnectDerby() { // // super.disconnect(); // try { -// Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); +// Class.forName("org.apache.derby.iapi.jdbc.AutoloadedDriver"); // DriverManager.getConnection( // "jdbc:derby:;shutdown=true", "sa", "sa"); // } catch (Exception e) { diff --git a/h2/src/test/org/h2/test/synth/TestHaltApp.java b/h2/src/test/org/h2/test/synth/TestHaltApp.java index c93e8ba178..22b5d902e9 100644 --- a/h2/src/test/org/h2/test/synth/TestHaltApp.java +++ b/h2/src/test/org/h2/test/synth/TestHaltApp.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -9,7 +9,6 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; - import org.h2.test.utils.SelfDestructor; /** @@ -38,9 +37,10 @@ public static void main(String... args) throws Exception { } } - private void execute(Statement stat, String sql) throws SQLException { + @Override + protected void execute(Statement stat, String sql) throws SQLException { traceOperation("execute: " + sql); - stat.execute(sql); + super.execute(stat, sql); } /** diff --git a/h2/src/test/org/h2/test/synth/TestJoin.java b/h2/src/test/org/h2/test/synth/TestJoin.java index 1edcfaae9a..ca45c1aedf 100644 --- a/h2/src/test/org/h2/test/synth/TestJoin.java +++ b/h2/src/test/org/h2/test/synth/TestJoin.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -15,18 +15,19 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.h2.test.TestBase; -import org.h2.util.New; +import org.h2.test.TestDb; import org.h2.util.StringUtils; /** * A test that runs random join statements against two databases and compares * the results. */ -public class TestJoin extends TestBase { +public class TestJoin extends TestDb { - private final ArrayList connections = New.arrayList(); + private final ArrayList connections = new ArrayList<>(); private Random random; private int paramCount; private StringBuilder buff; @@ -37,7 +38,7 @@ public class TestJoin extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -56,7 +57,7 @@ private void testJoin() throws Exception { Connection c2 = DriverManager.getConnection("jdbc:postgresql:test", "sa", "sa"); connections.add(c2); - // Class.forName("com.mysql.jdbc.Driver"); + // Class.forName("com.mysql.cj.jdbc.Driver"); // Connection c2 = // DriverManager.getConnection("jdbc:mysql://localhost/test", "sa", // "sa"); @@ -97,12 +98,12 @@ private void testJoin() throws Exception { execute("INSERT INTO TWO VALUES(3, 3)", null); execute("INSERT INTO TWO VALUES(4, NULL)", null); random = new Random(); - long startTime = System.currentTimeMillis(); + long startTime = System.nanoTime(); for (int i = 0;; i++) { paramCount = 0; buff = new StringBuilder(); - long time = System.currentTimeMillis(); - if (time - startTime > 5000) { + long time = System.nanoTime(); + if (time - startTime > TimeUnit.SECONDS.toNanos(5)) { printTime("i:" + i); startTime = time; } @@ -287,7 +288,7 @@ private static String readResult(ResultSet rs) throws SQLException { } b.append(":\n"); String result = b.toString(); - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); while (rs.next()) { b = new StringBuilder(); for (int i = 0; i < columnCount; i++) { diff --git a/h2/src/test/org/h2/test/synth/TestKill.java b/h2/src/test/org/h2/test/synth/TestKill.java index bc636233d6..52baf41465 100644 --- a/h2/src/test/org/h2/test/synth/TestKill.java +++ b/h2/src/test/org/h2/test/synth/TestKill.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -13,6 +13,7 @@ import java.util.Random; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.utils.SelfDestructor; /** @@ -20,7 +21,7 @@ * operations against a database, then kills this process. Afterwards recovery * is tested. */ -public class TestKill extends TestBase { +public class TestKill extends TestDb { private static final String DIR = TestBase.getTestDir("kill"); @@ -35,7 +36,7 @@ public class TestKill extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -49,7 +50,7 @@ public void test() throws Exception { String password = getPassword(); String selfDestruct = SelfDestructor.getPropertyString(60); String[] procDef = { - "java", selfDestruct, + getJVM(), selfDestruct, "-cp", getClassPath(), "org.h2.test.synth.TestKillProcess", url, user, password, getBaseDir(), "" + ACCOUNTS }; diff --git a/h2/src/test/org/h2/test/synth/TestKillProcess.java b/h2/src/test/org/h2/test/synth/TestKillProcess.java index 64208f7523..b432222552 100644 --- a/h2/src/test/org/h2/test/synth/TestKillProcess.java +++ b/h2/src/test/org/h2/test/synth/TestKillProcess.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -10,6 +10,8 @@ import java.sql.PreparedStatement; import java.util.ArrayList; import java.util.Random; +import java.util.concurrent.TimeUnit; + import org.h2.store.FileLister; import org.h2.test.TestBase; import org.h2.test.utils.SelfDestructor; @@ -45,11 +47,11 @@ public static void main(String... args) { PreparedStatement prep1b = conn1.prepareStatement( "UPDATE ACCOUNT SET SUM=SUM+? WHERE ID=?"); conn1.setAutoCommit(false); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); String d = null; for (int i = 0;; i++) { - long t = System.currentTimeMillis(); - if (t > time + 1000) { + long t = System.nanoTime(); + if (t > time + TimeUnit.SECONDS.toNanos(1)) { ArrayList list = FileLister.getDatabaseFiles( baseDir, "kill", true); System.out.println("inserting... i:" + i + " d:" + d + diff --git a/h2/src/test/org/h2/test/synth/TestKillRestart.java b/h2/src/test/org/h2/test/synth/TestKillRestart.java index c37f45cfde..d9ed4920c5 100644 --- a/h2/src/test/org/h2/test/synth/TestKillRestart.java +++ b/h2/src/test/org/h2/test/synth/TestKillRestart.java @@ -1,48 +1,57 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; +import java.io.IOException; import java.io.InputStream; +import java.lang.reflect.Field; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.Statement; import java.util.Random; -import org.h2.test.TestBase; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.h2.test.TestDb; import org.h2.test.utils.SelfDestructor; /** * Standalone recovery test. A new process is started and then killed while it * executes random statements. */ -public class TestKillRestart extends TestBase { +public class TestKillRestart extends TestDb { @Override - public void test() throws Exception { + public boolean isEnabled() { if (config.networked) { - return; + return false; } if (getBaseDir().indexOf(':') > 0) { - return; + return false; } + return true; + } + + @Override + public void test() throws Exception { deleteDb("killRestart"); String url = getURL("killRestart", true); // String url = getURL( // "killRestart;CACHE_SIZE=2048;WRITE_DELAY=0", true); String user = getUser(), password = getPassword(); String selfDestruct = SelfDestructor.getPropertyString(60); - String[] procDef = { "java", selfDestruct, - "-cp", getClassPath(), + String[] procDef = { getJVM(), selfDestruct, + "-cp", getClassPath(), "-ea", getClass().getName(), "-url", url, "-user", user, "-password", password }; int len = getSize(2, 15); for (int i = 0; i < len; i++) { - Process p = Runtime.getRuntime().exec(procDef); + Process p = new ProcessBuilder().redirectErrorStream(true).command(procDef).start(); InputStream in = p.getInputStream(); OutputCatcher catcher = new OutputCatcher(in); catcher.start(); @@ -58,7 +67,7 @@ public void test() throws Exception { Thread.sleep(100); printTime("killing: " + i); p.destroy(); - p.waitFor(); + waitForTimeout(p); break; } else if (s.startsWith("#Fail")) { fail("Failed: " + s); @@ -68,6 +77,58 @@ public void test() throws Exception { deleteDb("killRestart"); } + /** + * Wait for a subprocess with timeout. + */ + private static void waitForTimeout(final Process p) + throws InterruptedException, IOException { + final long pid = getPidOfProcess(p); + if (pid == -1) { + p.waitFor(); + } + // when we hit Java8 we can use the waitFor(1,TimeUnit.MINUTES) method + final CountDownLatch latch = new CountDownLatch(1); + new Thread("waitForTimeout") { + @Override + public void run() { + try { + p.waitFor(); + latch.countDown(); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + }.start(); + if (!latch.await(2, TimeUnit.MINUTES)) { + String[] procDef = { "jstack", "-F", "-m", "-l", "" + pid }; + new ProcessBuilder().redirectErrorStream(true).command(procDef) + .start(); + OutputCatcher catcher = new OutputCatcher(p.getInputStream()); + catcher.start(); + Thread.sleep(500); + throw new IOException("timed out waiting for subprocess to die"); + } + } + + /** + * Get the PID of a subprocess. Only works on Linux and OSX. + */ + private static long getPidOfProcess(Process p) { + // When we hit Java9 we can call getPid() on Process. + long pid = -1; + try { + if (p.getClass().getName().equals("java.lang.UNIXProcess")) { + Field f = p.getClass().getDeclaredField("pid"); + f.setAccessible(true); + pid = f.getLong(p); + f.setAccessible(false); + } + } catch (Exception e) { + pid = -1; + } + return pid; + } + /** * This method is called when executing this application from the command * line. @@ -77,7 +138,7 @@ public void test() throws Exception { public static void main(String... args) { SelfDestructor.startCountdown(60); String driver = "org.h2.Driver"; - String url = "jdbc:h2:test", user = "sa", password = "sa"; + String url = "jdbc:h2:mem:test", user = "sa", password = "sa"; for (int i = 0; i < args.length; i++) { if ("-url".equals(args[i])) { url = args[++i]; diff --git a/h2/src/test/org/h2/test/synth/TestKillRestartMulti.java b/h2/src/test/org/h2/test/synth/TestKillRestartMulti.java index 8ae8cde397..a8858e19ab 100644 --- a/h2/src/test/org/h2/test/synth/TestKillRestartMulti.java +++ b/h2/src/test/org/h2/test/synth/TestKillRestartMulti.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; import java.io.InputStream; +import java.lang.ProcessBuilder.Redirect; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; @@ -17,51 +18,88 @@ import org.h2.api.ErrorCode; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.utils.SelfDestructor; import org.h2.tools.Backup; -import org.h2.util.New; /** * Standalone recovery test. A new process is started and then killed while it * executes random statements using multiple connection. */ -public class TestKillRestartMulti extends TestBase { +public class TestKillRestartMulti extends TestDb { + + /** + * We want self-destruct to occur before the read times out and we kill the + * child process. + */ + private static final int CHILD_READ_TIMEOUT_MS = 7 * 60 * 1000; // 7 minutes + private static final int CHILD_SELFDESTRUCT_TIMEOUT_MINS = 5; private String driver = "org.h2.Driver"; private String url; private String user = "sa"; private String password = "sa"; - private final ArrayList connections = New.arrayList(); - private final ArrayList tables = New.arrayList(); + private final ArrayList connections = new ArrayList<>(); + private final ArrayList tables = new ArrayList<>(); private int openCount; + + /** + * This method is called when executing this application from the command + * line. + * + * Note that this entry can be used in two different ways, either + * (a) running just this test + * (b) or when this test invokes itself in a child process + * + * @param args the command line parameters + */ + public static void main(String... args) throws Exception { + if (args != null && args.length > 0) { + // the child process case + SelfDestructor.startCountdown(CHILD_SELFDESTRUCT_TIMEOUT_MINS); + new TestKillRestartMulti().test(args); + } else { + // the standalone test case + TestBase.createCaller().init().testFromMain(); + } + } + @Override - public void test() throws Exception { + public boolean isEnabled() { if (config.networked) { - return; + return false; } if (getBaseDir().indexOf(':') > 0) { - return; + return false; } + return true; + } + + @Override + public void test() throws Exception { deleteDb("killRestartMulti"); - url = getURL("killRestartMulti", true); + url = getURL("killRestartMulti;RETENTION_TIME=0", true); user = getUser(); password = getPassword(); String selfDestruct = SelfDestructor.getPropertyString(60); - String[] procDef = { "java", selfDestruct, - "-cp", getClassPath(), - getClass().getName(), "-url", url, "-user", user, - "-password", password }; + // Inherit error so that the stacktraces reported from SelfDestructor + // show up in our log. + ProcessBuilder pb = new ProcessBuilder().redirectError(Redirect.INHERIT) + .command(getJVM(), selfDestruct, "-cp", getClassPath(), + "-ea", + getClass().getName(), "-url", url, "-user", user, + "-password", password); deleteDb("killRestartMulti"); int len = getSize(3, 10); Random random = new Random(); for (int i = 0; i < len; i++) { - Process p = Runtime.getRuntime().exec(procDef); + Process p = pb.start(); InputStream in = p.getInputStream(); OutputCatcher catcher = new OutputCatcher(in); catcher.start(); while (true) { - String s = catcher.readLine(5 * 60 * 1000); + String s = catcher.readLine(CHILD_READ_TIMEOUT_MS); // System.out.println("> " + s); if (s == null) { fail("No reply from process"); @@ -73,14 +111,16 @@ public void test() throws Exception { Thread.sleep(sleep); printTime("killing: " + i); p.destroy(); + printTime("killing, waiting for: " + i); p.waitFor(); + printTime("killing, dead: " + i); break; } else if (s.startsWith("#Info")) { // System.out.println("info: " + s); } else if (s.startsWith("#Fail")) { System.err.println(s); while (true) { - String a = catcher.readLine(5 * 60 * 1000); + String a = catcher.readLine(CHILD_READ_TIMEOUT_MS); if (a == null || "#End".endsWith(a)) { break; } @@ -122,17 +162,6 @@ public void test() throws Exception { deleteDb("killRestartMulti"); } - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) { - SelfDestructor.startCountdown(60); - new TestKillRestartMulti().test(args); - } - private void test(String... args) { for (int i = 0; i < args.length; i++) { if ("-url".equals(args[i])) { @@ -287,7 +316,10 @@ private static void testConsistent(Connection conn) throws SQLException { rs.getString("NAME"); } } catch (SQLException e) { - if (e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1) { + if (e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || + e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 || + e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2 + ) { // ok } else { throw e; diff --git a/h2/src/test/org/h2/test/synth/TestLimit.java b/h2/src/test/org/h2/test/synth/TestLimit.java index 461ce9a1a2..5a063b0329 100644 --- a/h2/src/test/org/h2/test/synth/TestLimit.java +++ b/h2/src/test/org/h2/test/synth/TestLimit.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -8,12 +8,15 @@ import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; + +import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * The LIMIT, OFFSET, maxRows. */ -public class TestLimit extends TestBase { +public class TestLimit extends TestDb { private Statement stat; @@ -25,8 +28,7 @@ public class TestLimit extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); // test.config.traceTest = true; - test.config.nestedJoins = true; - test.test(); + test.testFromMain(); } @Override @@ -38,8 +40,8 @@ public void test() throws Exception { "select x from system_range(1, 10)"); for (int maxRows = 0; maxRows < 12; maxRows++) { stat.setMaxRows(maxRows); - for (int limit = -2; limit < 12; limit++) { - for (int offset = -2; offset < 12; offset++) { + for (int limit = -1; limit < 12; limit++) { + for (int offset = -1; offset < 12; offset++) { int l = limit < 0 ? 10 : Math.min(10, limit); for (int d = 0; d < 2; d++) { int m = maxRows <= 0 ? 10 : Math.min(10, maxRows); @@ -47,9 +49,9 @@ public void test() throws Exception { if (offset > 0) { expected = Math.max(0, Math.min(10 - offset, expected)); } - String s = "select " + (d == 1 ? "distinct " : "") + - " * from test limit " + (limit == -2 ? "null" : limit) + - " offset " + (offset == -2 ? "null" : offset); + String s = "select " + (d == 1 ? "distinct " : "") + "* from test" + + (offset >= 0 ? " offset " + offset + " rows" : "") + + (limit >= 0 ? " fetch next " + limit + " rows only" : ""); assertRow(expected, s); String union = "(" + s + ") union (" + s + ")"; assertRow(expected, union); @@ -60,11 +62,13 @@ public void test() throws Exception { expected = Math.min(m, l * 2); union = "(" + s + ") union all (" + s + ")"; assertRow(expected, union); - for (int unionLimit = -2; unionLimit < 5; unionLimit++) { + for (int unionLimit = -1; unionLimit < 5; unionLimit++) { int e = unionLimit < 0 ? 20 : Math.min(20, unionLimit); e = Math.min(expected, e); - String u = union + " limit " + - (unionLimit == -2 ? "null" : unionLimit); + String u = union; + if (unionLimit >= 0) { + u += " fetch first " + unionLimit + " rows only"; + } assertRow(e, u); } } @@ -74,9 +78,7 @@ public void test() throws Exception { assertEquals(0, stat.executeUpdate("delete from test limit 0")); assertEquals(1, stat.executeUpdate("delete from test limit 1")); assertEquals(2, stat.executeUpdate("delete from test limit 2")); - assertEquals(7, stat.executeUpdate("delete from test limit null")); - stat.execute("insert into test select x from system_range(1, 10)"); - assertEquals(10, stat.executeUpdate("delete from test limit -1")); + assertThrows(ErrorCode.INVALID_VALUE_2, stat).executeUpdate("delete from test limit null"); conn.close(); deleteDb("limit"); } diff --git a/h2/src/test/org/h2/test/synth/TestMultiThreaded.java b/h2/src/test/org/h2/test/synth/TestMultiThreaded.java index beed9838b5..4e1fc87c9b 100644 --- a/h2/src/test/org/h2/test/synth/TestMultiThreaded.java +++ b/h2/src/test/org/h2/test/synth/TestMultiThreaded.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -12,11 +12,12 @@ import java.util.Random; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the multi-threaded mode. */ -public class TestMultiThreaded extends TestBase { +public class TestMultiThreaded extends TestDb { /** * Run just this test. @@ -24,7 +25,15 @@ public class TestMultiThreaded extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + org.h2.test.TestAll config = new org.h2.test.TestAll(); + config.memory = true; + config.big = true; + System.out.println(config); + TestBase test = createCaller().init(config); + for (int i = 0; i < 100; i++) { + System.out.println("Pass #" + i); + test.testFromMain(); + } } /** @@ -51,7 +60,7 @@ public void run() { ResultSet rs; try { while (!stop) { - switch(random.nextInt(6)) { + switch (random.nextInt(6)) { case 0: // insert a row for this connection traceThread("insert " + id + " count: " + count); @@ -123,15 +132,11 @@ public void stopNow() { @Override public void test() throws Exception { - if (config.mvcc) { - return; - } deleteDb("multiThreaded"); - int size = getSize(2, 4); + int size = getSize(2, 20); Connection[] connList = new Connection[size]; for (int i = 0; i < size; i++) { - connList[i] = getConnection("multiThreaded;MULTI_THREADED=1;" + - "TRACE_LEVEL_SYSTEM_OUT=1"); + connList[i] = getConnection("multiThreaded"); } Connection conn = connList[0]; Statement stat = conn.createStatement(); @@ -150,32 +155,35 @@ public void test() throws Exception { trace("started " + i); Thread.sleep(100); } - for (int t = 0; t < 2; t++) { - Thread.sleep(1000); + try { + Thread.sleep(2000); + } finally { + trace("stopping"); for (int i = 0; i < size; i++) { Processor p = processors[i]; - if (p.getException() != null) { - throw new Exception("" + i, p.getException()); - } + p.stopNow(); } + for (int i = 0; i < size; i++) { + Processor p = processors[i]; + p.join(1000); + } + trace("close"); + for (int i = 0; i < size; i++) { + connList[i].close(); + } + deleteDb("multiThreaded"); } - trace("stopping"); - for (int i = 0; i < size; i++) { - Processor p = processors[i]; - p.stopNow(); - } + + boolean success = true; for (int i = 0; i < size; i++) { Processor p = processors[i]; - p.join(100); - if (p.getException() != null) { - throw new Exception(p.getException()); + p.join(10000); + Throwable exception = p.getException(); + if (exception != null) { + logError("", exception); + success = false; } } - trace("close"); - for (int i = 0; i < size; i++) { - connList[i].close(); - } - deleteDb("multiThreaded"); + assert success; } - } diff --git a/h2/src/test/org/h2/test/synth/TestNestedJoins.java b/h2/src/test/org/h2/test/synth/TestNestedJoins.java index 7bf7da749d..d72fde3143 100644 --- a/h2/src/test/org/h2/test/synth/TestNestedJoins.java +++ b/h2/src/test/org/h2/test/synth/TestNestedJoins.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -19,15 +19,15 @@ import org.h2.api.ErrorCode; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.util.New; +import org.h2.test.TestDb; import org.h2.util.ScriptReader; /** * Tests nested joins and right outer joins. */ -public class TestNestedJoins extends TestBase { +public class TestNestedJoins extends TestDb { - private final ArrayList dbs = New.arrayList(); + private final ArrayList dbs = new ArrayList<>(); /** * Run just this test. @@ -37,16 +37,13 @@ public class TestNestedJoins extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); // test.config.traceTest = true; - test.config.nestedJoins = true; - test.test(); + test.testFromMain(); } @Override public void test() throws Exception { - if (!config.nestedJoins) { - return; - } deleteDb("nestedJoins"); + // testCases2(); testCases(); testRandom(); deleteDb("nestedJoins"); @@ -58,23 +55,23 @@ private void testRandom() throws Exception { try { Class.forName("org.postgresql.Driver"); - Connection c2 = DriverManager.getConnection("jdbc:postgresql:test", "sa", "sa"); + Connection c2 = DriverManager.getConnection("jdbc:postgresql:test?loggerLevel=OFF", "sa", "sa"); dbs.add(c2.createStatement()); } catch (Exception e) { // database not installed - ok } // Derby doesn't work currently - // deleteDerby(); - // try { - // Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); - // Connection c2 = DriverManager.getConnection( - // "jdbc:derby:" + getBaseDir() + - // "/derby/test;create=true", "sa", "sa"); - // dbs.add(c2.createStatement()); - // } catch (Exception e) { - // // database not installed - ok - // } + deleteDerby(); + try { + Class.forName("org.apache.derby.iapi.jdbc.AutoloadedDriver"); + Connection c2 = DriverManager.getConnection( + "jdbc:derby:" + getBaseDir() + + "/derby/test;create=true", "sa", "sa"); + dbs.add(c2.createStatement()); + } catch (Throwable e) { + // database not installed - ok + } String shortest = null; Throwable shortestEx = null; for (int i = 0; i < 10; i++) { @@ -222,7 +219,7 @@ private void execute(String sql) throws SQLException { } private static String getResult(ResultSet rs) throws SQLException { - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); while (rs.next()) { StringBuilder buff = new StringBuilder(); for (int i = 0; i < rs.getMetaData().getColumnCount(); i++) { @@ -247,7 +244,7 @@ private void testCases() throws Exception { // issue 288 assertThrows(ErrorCode.COLUMN_NOT_FOUND_1, stat). execute("select 1 from dual a right outer join " + - "(select b.x from dual b) c on unknown.x = c.x, dual d"); + "(select b.x from dual b) c on unknown_table.x = c.x, dual d"); // issue 288 stat.execute("create table test(id int primary key)"); @@ -289,10 +286,9 @@ private void testCases() throws Exception { "inner join c on c.id = b.id on b.id = a.id"); assertTrue(rs.next()); sql = rs.getString(1); - assertTrue("nested", sql.contains("(")); + assertContains(sql, "("); stat.execute("drop table a, b, c"); - // see roadmap, tag: swapInnerJoinTables /* create table test(id int primary key, x int) as select x, x from system_range(1, 10); @@ -354,7 +350,7 @@ create table o(id int primary key) "left outer join (test c) on a.id = c.id"); assertTrue(rs.next()); sql = rs.getString(1); - assertTrue(sql.contains("PRIMARY_KEY")); + assertContains(sql, "PRIMARY_KEY"); stat.execute("drop table test"); /* @@ -383,21 +379,23 @@ create table o(id int primary key) "right outer join t3 on t1.b=t3.a right outer join t2 on t2.b=t1.a"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT DISTINCT T1.A, T2.A, T3.A FROM PUBLIC.T2 " + - "LEFT OUTER JOIN ( PUBLIC.T3 LEFT OUTER JOIN ( PUBLIC.T1 ) " + - "ON T1.B = T3.A ) ON T2.B = T1.A", sql); + assertEquals("SELECT DISTINCT \"T1\".\"A\", \"T2\".\"A\", \"T3\".\"A\" FROM \"PUBLIC\".\"T2\" " + + "LEFT OUTER JOIN ( \"PUBLIC\".\"T3\" LEFT OUTER JOIN \"PUBLIC\".\"T1\" " + + "ON \"T1\".\"B\" = \"T3\".\"A\" ) ON \"T2\".\"B\" = \"T1\".\"A\"", sql); rs = stat.executeQuery("select distinct t1.a, t2.a, t3.a from t1 " + "right outer join t3 on t1.b=t3.a " + "right outer join t2 on t2.b=t1.a"); - // expected: 1 1 1; null 2 null - assertTrue(rs.next()); - assertEquals("1", rs.getString(1)); - assertEquals("1", rs.getString(2)); - assertEquals("1", rs.getString(3)); + // expected: + // null 2 null + // 1 1 1 assertTrue(rs.next()); assertEquals(null, rs.getString(1)); assertEquals("2", rs.getString(2)); assertEquals(null, rs.getString(3)); + assertTrue(rs.next()); + assertEquals("1", rs.getString(1)); + assertEquals("1", rs.getString(2)); + assertEquals("1", rs.getString(3)); assertFalse(rs.next()); stat.execute("drop table t1, t2, t3, t4"); @@ -422,8 +420,9 @@ create table o(id int primary key) "inner join b on a.x = b.x right outer join c on c.x = a.x"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT A.X, B.X, C.X FROM PUBLIC.C LEFT OUTER JOIN " + - "( PUBLIC.A INNER JOIN PUBLIC.B ON A.X = B.X ) ON C.X = A.X", sql); + assertEquals("SELECT \"A\".\"X\", \"B\".\"X\", \"C\".\"X\" FROM \"PUBLIC\".\"C\" LEFT OUTER JOIN " + + "( \"PUBLIC\".\"A\" INNER JOIN \"PUBLIC\".\"B\" " + + "ON \"A\".\"X\" = \"B\".\"X\" ) ON \"C\".\"X\" = \"A\".\"X\"", sql); rs = stat.executeQuery("select a.x, b.x, c.x from a " + "inner join b on a.x = b.x " + "right outer join c on c.x = a.x"); @@ -467,11 +466,12 @@ create table o(id int primary key) "on a.x = c.x"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT A.X, B.X, C.X, C.Y FROM PUBLIC.A " + - "LEFT OUTER JOIN ( PUBLIC.B " + - "LEFT OUTER JOIN PUBLIC.C " + - "ON B.X = C.Y ) " + - "ON A.X = C.X", sql); + assertEquals("SELECT \"PUBLIC\".\"A\".\"X\", \"PUBLIC\".\"B\".\"X\", " + + "\"PUBLIC\".\"C\".\"X\", \"PUBLIC\".\"C\".\"Y\" FROM \"PUBLIC\".\"A\" " + + "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + + "LEFT OUTER JOIN \"PUBLIC\".\"C\" " + + "ON \"B\".\"X\" = \"C\".\"Y\" ) " + + "ON \"A\".\"X\" = \"C\".\"X\"", sql); rs = stat.executeQuery("select * from a " + "left outer join (b " + "left outer join c " + @@ -548,9 +548,10 @@ create table o(id int primary key) "inner join c on c.x = 1) on a.x = b.x"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT A.X, B.X, C.X FROM PUBLIC.A " + - "LEFT OUTER JOIN ( PUBLIC.B " + - "INNER JOIN PUBLIC.C ON C.X = 1 ) ON A.X = B.X", sql); + assertEquals("SELECT \"PUBLIC\".\"A\".\"X\", \"PUBLIC\".\"B\".\"X\", \"PUBLIC\".\"C\".\"X\" " + + "FROM \"PUBLIC\".\"A\" " + + "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + + "INNER JOIN \"PUBLIC\".\"C\" ON \"C\".\"X\" = 1 ) ON \"A\".\"X\" = \"B\".\"X\"", sql); stat.execute("drop table a, b, c"); stat.execute("create table test(id int primary key)"); @@ -596,13 +597,13 @@ create table o(id int primary key) "on b.pk = b_base.pk and b_base.deleted = 0) on 1=1"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT A.PK, A_BASE.PK, B.PK, B_BASE.PK " + - "FROM PUBLIC.BASE A_BASE " + - "LEFT OUTER JOIN ( PUBLIC.B " + - "INNER JOIN PUBLIC.BASE B_BASE " + - "ON (B_BASE.DELETED = 0) AND (B.PK = B_BASE.PK) ) " + - "ON TRUE INNER JOIN PUBLIC.A ON 1=1 " + - "WHERE A.PK = A_BASE.PK", sql); + assertEquals("SELECT \"A\".\"PK\", \"A_BASE\".\"PK\", \"B\".\"PK\", \"B_BASE\".\"PK\" " + + "FROM \"PUBLIC\".\"BASE\" \"A_BASE\" " + + "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + + "INNER JOIN \"PUBLIC\".\"BASE\" \"B_BASE\" " + + "ON (\"B_BASE\".\"DELETED\" = 0) AND (\"B\".\"PK\" = \"B_BASE\".\"PK\") ) " + + "ON 1=1 INNER JOIN \"PUBLIC\".\"A\" ON 1=1 " + + "WHERE \"A\".\"PK\" = \"A_BASE\".\"PK\"", sql); rs = stat.executeQuery( "select a.pk, a_base.pk, b.pk, b_base.pk from a " + "inner join base a_base on a.pk = a_base.pk " + @@ -639,4 +640,19 @@ private static String cleanRemarks(String sql) { return sql; } + private void testCases2() throws Exception { + Connection conn = getConnection("nestedJoins"); + Statement stat = conn.createStatement(); + stat.execute("create table a(id int primary key)"); + stat.execute("create table b(id int primary key)"); + stat.execute("create table c(id int primary key)"); + stat.execute("insert into a(id) values(1)"); + stat.execute("insert into c(id) values(1)"); + stat.execute("insert into b(id) values(1)"); + stat.executeQuery("select 1 from a left outer join " + + "(a t0 join b t1 on 1 = 1) on t1.id = 1, c"); + conn.close(); + deleteDb("nestedJoins"); + } + } diff --git a/h2/src/test/org/h2/test/synth/TestOuterJoins.java b/h2/src/test/org/h2/test/synth/TestOuterJoins.java index 9eca9b7435..41e97bbfd8 100644 --- a/h2/src/test/org/h2/test/synth/TestOuterJoins.java +++ b/h2/src/test/org/h2/test/synth/TestOuterJoins.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -15,17 +15,18 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Random; + import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; -import org.h2.util.New; +import org.h2.test.TestDb; import org.h2.util.ScriptReader; /** * Tests nested joins and right outer joins. */ -public class TestOuterJoins extends TestBase { +public class TestOuterJoins extends TestDb { - private final ArrayList dbs = New.arrayList(); + private final ArrayList dbs = new ArrayList<>(); /** * Run just this test. @@ -35,15 +36,11 @@ public class TestOuterJoins extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; - test.config.nestedJoins = true; - test.test(); + test.testFromMain(); } @Override public void test() throws Exception { - if (!config.nestedJoins) { - return; - } deleteDb("outerJoins"); testCases(); testRandom(); @@ -57,19 +54,19 @@ private void testRandom() throws Exception { try { Class.forName("org.postgresql.Driver"); Connection c2 = DriverManager.getConnection( - "jdbc:postgresql:test", "sa", "sa"); + "jdbc:postgresql:test?loggerLevel=OFF", "sa", "sa"); dbs.add(c2.createStatement()); } catch (Exception e) { // database not installed - ok } deleteDerby(); try { - Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); + Class.forName("org.apache.derby.iapi.jdbc.AutoloadedDriver"); Connection c2 = DriverManager.getConnection( "jdbc:derby:" + getBaseDir() + "/derby/test;create=true", "sa", "sa"); dbs.add(c2.createStatement()); - } catch (Exception e) { + } catch (Throwable e) { // database not installed - ok } String shortest = null; @@ -140,7 +137,7 @@ private void testRandom() throws Exception { s.getConnection().close(); } deleteDerby(); - deleteDb("nestedJoins"); + deleteDb("outerJoins"); } private void deleteDerby() { @@ -275,7 +272,7 @@ private void execute(String sql) throws SQLException { } private static String getResult(ResultSet rs) throws SQLException { - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); while (rs.next()) { StringBuilder buff = new StringBuilder(); for (int i = 0; i < rs.getMetaData().getColumnCount(); i++) { @@ -293,7 +290,7 @@ private static String getResult(ResultSet rs) throws SQLException { private void testCases() throws Exception { - Connection conn = getConnection("nestedJoins"); + Connection conn = getConnection("outerJoins"); Statement stat = conn.createStatement(); ResultSet rs; String sql; @@ -308,7 +305,7 @@ private void testCases() throws Exception { "left outer join (test c) on a.id = c.id"); assertTrue(rs.next()); sql = rs.getString(1); - assertTrue(sql.contains("PRIMARY_KEY")); + assertContains(sql, "PRIMARY_KEY"); stat.execute("drop table test"); /* @@ -336,21 +333,23 @@ private void testCases() throws Exception { "right outer join t3 on t1.b=t3.a right outer join t2 on t2.b=t1.a"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT DISTINCT T1.A, T2.A, T3.A FROM PUBLIC.T2 " + - "LEFT OUTER JOIN ( PUBLIC.T3 " + - "LEFT OUTER JOIN ( PUBLIC.T1 ) ON T1.B = T3.A ) " + - "ON T2.B = T1.A", sql); + assertEquals("SELECT DISTINCT \"T1\".\"A\", \"T2\".\"A\", \"T3\".\"A\" FROM \"PUBLIC\".\"T2\" " + + "LEFT OUTER JOIN ( \"PUBLIC\".\"T3\" " + + "LEFT OUTER JOIN \"PUBLIC\".\"T1\" ON \"T1\".\"B\" = \"T3\".\"A\" ) " + + "ON \"T2\".\"B\" = \"T1\".\"A\"", sql); rs = stat.executeQuery("select distinct t1.a, t2.a, t3.a from t1 " + "right outer join t3 on t1.b=t3.a right outer join t2 on t2.b=t1.a"); - // expected: 1 1 1; null 2 null - assertTrue(rs.next()); - assertEquals("1", rs.getString(1)); - assertEquals("1", rs.getString(2)); - assertEquals("1", rs.getString(3)); + // expected: + // null 2 null + // 1 1 1 assertTrue(rs.next()); assertEquals(null, rs.getString(1)); assertEquals("2", rs.getString(2)); assertEquals(null, rs.getString(3)); + assertTrue(rs.next()); + assertEquals("1", rs.getString(1)); + assertEquals("1", rs.getString(2)); + assertEquals("1", rs.getString(3)); assertFalse(rs.next()); stat.execute("drop table t1, t2, t3, t4"); @@ -375,8 +374,9 @@ private void testCases() throws Exception { "inner join b on a.x = b.x right outer join c on c.x = a.x"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT A.X, B.X, C.X FROM PUBLIC.C LEFT OUTER JOIN " + - "( PUBLIC.A INNER JOIN PUBLIC.B ON A.X = B.X ) ON C.X = A.X", sql); + assertEquals("SELECT \"A\".\"X\", \"B\".\"X\", \"C\".\"X\" FROM \"PUBLIC\".\"C\" LEFT OUTER JOIN " + + "( \"PUBLIC\".\"A\" INNER JOIN \"PUBLIC\".\"B\" " + + "ON \"A\".\"X\" = \"B\".\"X\" ) ON \"C\".\"X\" = \"A\".\"X\"", sql); rs = stat.executeQuery("select a.x, b.x, c.x from a " + "inner join b on a.x = b.x " + "right outer join c on c.x = a.x"); @@ -420,11 +420,12 @@ private void testCases() throws Exception { "on a.x = c.x"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT A.X, B.X, C.X, C.Y FROM PUBLIC.A " + - "LEFT OUTER JOIN ( PUBLIC.B " + - "LEFT OUTER JOIN PUBLIC.C " + - "ON B.X = C.Y ) " + - "ON A.X = C.X", sql); + assertEquals("SELECT \"PUBLIC\".\"A\".\"X\", \"PUBLIC\".\"B\".\"X\", " + + "\"PUBLIC\".\"C\".\"X\", \"PUBLIC\".\"C\".\"Y\" FROM \"PUBLIC\".\"A\" " + + "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + + "LEFT OUTER JOIN \"PUBLIC\".\"C\" " + + "ON \"B\".\"X\" = \"C\".\"Y\" ) " + + "ON \"A\".\"X\" = \"C\".\"X\"", sql); rs = stat.executeQuery("select * from a " + "left outer join (b " + "left outer join c " + @@ -501,9 +502,10 @@ private void testCases() throws Exception { "inner join c on c.x = 1) on a.x = b.x"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT A.X, B.X, C.X FROM PUBLIC.A " + - "LEFT OUTER JOIN ( PUBLIC.B " + - "INNER JOIN PUBLIC.C ON C.X = 1 ) ON A.X = B.X", sql); + assertEquals("SELECT \"PUBLIC\".\"A\".\"X\", \"PUBLIC\".\"B\".\"X\", \"PUBLIC\".\"C\".\"X\" " + + "FROM \"PUBLIC\".\"A\" " + + "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + + "INNER JOIN \"PUBLIC\".\"C\" ON \"C\".\"X\" = 1 ) ON \"A\".\"X\" = \"B\".\"X\"", sql); stat.execute("drop table a, b, c"); stat.execute("create table test(id int primary key)"); @@ -548,12 +550,12 @@ private void testCases() throws Exception { "on b.pk = b_base.pk and b_base.deleted = 0) on 1=1"); assertTrue(rs.next()); sql = cleanRemarks(rs.getString(1)); - assertEquals("SELECT A.PK, A_BASE.PK, B.PK, B_BASE.PK " + - "FROM PUBLIC.BASE A_BASE " + - "LEFT OUTER JOIN ( PUBLIC.B " + - "INNER JOIN PUBLIC.BASE B_BASE " + - "ON (B_BASE.DELETED = 0) AND (B.PK = B_BASE.PK) ) " + - "ON TRUE INNER JOIN PUBLIC.A ON 1=1 WHERE A.PK = A_BASE.PK", sql); + assertEquals("SELECT \"A\".\"PK\", \"A_BASE\".\"PK\", \"B\".\"PK\", \"B_BASE\".\"PK\" " + + "FROM \"PUBLIC\".\"BASE\" \"A_BASE\" " + + "LEFT OUTER JOIN ( \"PUBLIC\".\"B\" " + + "INNER JOIN \"PUBLIC\".\"BASE\" \"B_BASE\" " + + "ON (\"B_BASE\".\"DELETED\" = 0) AND (\"B\".\"PK\" = \"B_BASE\".\"PK\") ) " + + "ON 1=1 INNER JOIN \"PUBLIC\".\"A\" ON 1=1 WHERE \"A\".\"PK\" = \"A_BASE\".\"PK\"", sql); rs = stat.executeQuery("select a.pk, a_base.pk, b.pk, b_base.pk from a " + "inner join base a_base on a.pk = a_base.pk " + "left outer join (b inner join base b_base " + @@ -575,7 +577,7 @@ private void testCases() throws Exception { // } conn.close(); - deleteDb("nestedJoins"); + deleteDb("outerJoins"); } private static String cleanRemarks(String sql) { diff --git a/h2/src/test/org/h2/test/synth/TestPowerOffFs.java b/h2/src/test/org/h2/test/synth/TestPowerOffFs.java index 0b87ba8b7c..443b7844d1 100644 --- a/h2/src/test/org/h2/test/synth/TestPowerOffFs.java +++ b/h2/src/test/org/h2/test/synth/TestPowerOffFs.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -9,15 +9,16 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; - import org.h2.api.ErrorCode; +import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.utils.FilePathDebug; /** * Tests that use the debug file system to simulate power failure. */ -public class TestPowerOffFs extends TestBase { +public class TestPowerOffFs extends TestDb { private FilePathDebug fs; @@ -27,7 +28,7 @@ public class TestPowerOffFs extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -42,6 +43,7 @@ public void test() throws Exception { break; } } + deleteDb("memFS:", null); } private boolean test(int x) throws SQLException { @@ -93,6 +95,7 @@ private boolean test(int x) throws SQLException { stat = conn.createStatement(); stat.execute("script to 'memFS:test.sql'"); conn.close(); + FileUtils.delete("memFS:test.sql"); return false; } diff --git a/h2/src/test/org/h2/test/synth/TestPowerOffFs2.java b/h2/src/test/org/h2/test/synth/TestPowerOffFs2.java index 84cebbf451..1799b86fde 100644 --- a/h2/src/test/org/h2/test/synth/TestPowerOffFs2.java +++ b/h2/src/test/org/h2/test/synth/TestPowerOffFs2.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -15,14 +15,14 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.utils.FilePathDebug; -import org.h2.util.New; /** * Tests that use the debug file system to simulate power failure. * This test runs many random operations and stops after some time. */ -public class TestPowerOffFs2 extends TestBase { +public class TestPowerOffFs2 extends TestDb { private static final String USER = "sa"; private static final String PASSWORD = "sa"; @@ -30,8 +30,8 @@ public class TestPowerOffFs2 extends TestBase { private FilePathDebug fs; private String url; - private final ArrayList connections = New.arrayList(); - private final ArrayList tables = New.arrayList(); + private final ArrayList connections = new ArrayList<>(); + private final ArrayList tables = new ArrayList<>(); /** * Run just this test. @@ -39,7 +39,7 @@ public class TestPowerOffFs2 extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -217,7 +217,10 @@ private static void testConsistent(Connection conn) throws SQLException { rs.getString("NAME"); } } catch (SQLException e) { - if (e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1) { + if (e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || + e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 || + e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2 + ) { // ok } else { throw e; diff --git a/h2/src/test/org/h2/test/synth/TestRandomCompare.java b/h2/src/test/org/h2/test/synth/TestRandomCompare.java index 8150648ceb..7cf7657525 100644 --- a/h2/src/test/org/h2/test/synth/TestRandomCompare.java +++ b/h2/src/test/org/h2/test/synth/TestRandomCompare.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -13,15 +13,16 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Random; + import org.h2.test.TestBase; -import org.h2.util.New; +import org.h2.test.TestDb; /** * Tests random compare operations. */ -public class TestRandomCompare extends TestBase { +public class TestRandomCompare extends TestDb { - private final ArrayList dbs = New.arrayList(); + private final ArrayList dbs = new ArrayList<>(); private int aliasId; /** @@ -32,7 +33,7 @@ public class TestRandomCompare extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -50,7 +51,7 @@ private void testRandom() throws Exception { try { Class.forName("org.postgresql.Driver"); Connection c2 = DriverManager.getConnection( - "jdbc:postgresql:test", "sa", "sa"); + "jdbc:postgresql:test?loggerLevel=OFF", "sa", "sa"); dbs.add(c2.createStatement()); } catch (Exception e) { // database not installed - ok @@ -246,7 +247,7 @@ private void execute(String sql) throws SQLException { } private static String getResult(ResultSet rs) throws SQLException { - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); while (rs.next()) { StringBuilder buff = new StringBuilder(); for (int i = 0; i < rs.getMetaData().getColumnCount(); i++) { diff --git a/h2/src/test/org/h2/test/synth/TestRandomSQL.java b/h2/src/test/org/h2/test/synth/TestRandomSQL.java index 9d9677682e..9223e60d6c 100644 --- a/h2/src/test/org/h2/test/synth/TestRandomSQL.java +++ b/h2/src/test/org/h2/test/synth/TestRandomSQL.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -8,21 +8,17 @@ import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; - import org.h2.engine.SysProperties; import org.h2.store.fs.FileUtils; -import org.h2.test.TestAll; import org.h2.test.TestBase; -import org.h2.tools.DeleteDbFiles; +import org.h2.test.TestDb; import org.h2.util.MathUtils; /** * This test executes random SQL statements generated using the BNF tool. */ -public class TestRandomSQL extends TestBase { +public class TestRandomSQL extends TestDb { - private int seed; - private boolean exitOnError = true; private int success, total; /** @@ -31,50 +27,36 @@ public class TestRandomSQL extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - private void processException(String sql, SQLException e) { - if (e.getSQLState().equals("HY000")) { - TestBase.logError("new TestRandomSQL().init(test).testCase(" + seed + "); " + - "// FAIL: " + e.toString() + " sql: " + sql, e); - if (exitOnError) { - System.exit(0); - } - } - } - - private String getDatabaseName() { - return "dataRandomSQL/randomSql" + seed; - } - - private Connection connect() throws SQLException { - return getConnection(getDatabaseName()); + TestBase.createCaller().init().testFromMain(); } - private void deleteDb() { - String name = getDatabaseName(); - if (name.startsWith("memFS:")) { - DeleteDbFiles.execute("memFS:/", name, true); - } else { - DeleteDbFiles.execute(getBaseDir() + "/dataRandomSQL", null, true); - FileUtils.delete(getBaseDir() + "/dataRandomSQL"); + @Override + public boolean isEnabled() { + if (config.networked) { + return false; } + return true; } @Override - public TestBase init(TestAll conf) throws Exception { - super.init(conf); - return this; + public void test() throws Exception { + int len = getSize(2, 6); + for (int a = 0; a < len; a++) { + int s = MathUtils.randomInt(Integer.MAX_VALUE); + testCase(s); + } } - private void testWithSeed() throws Exception { + private void testWithSeed(int seed) throws Exception { Connection conn = null; try { - conn = connect(); + conn = getConnection(getDatabaseName(seed)); } catch (SQLException e) { - processException("connect", e); - conn = connect(); + if (e.getSQLState().equals("HY000")) { + TestBase.logError("new TestRandomSQL().init(test).testCase(" + seed + "); " + + "// FAIL: " + e.toString() + " sql: " + "connect", e); + } + conn = getConnection(getDatabaseName(seed)); } Statement stat = conn.createStatement(); @@ -93,46 +75,48 @@ private void testWithSeed() throws Exception { stat.execute(sql); success++; } catch (SQLException e) { - processException(sql, e); + if (e.getSQLState().equals("HY000")) { + TestBase.logError( + "new TestRandomSQL().init(test).testCase(" + + seed + "); " + "// FAIL: " + + e.toString() + " sql: " + sql, e); + } } } } try { conn.close(); - conn = connect(); + conn = getConnection(getDatabaseName(seed)); conn.createStatement().execute("shutdown immediately"); conn.close(); } catch (SQLException e) { - processException("conn.close", e); + if (e.getSQLState().equals("HY000")) { + TestBase.logError("new TestRandomSQL().init(test).testCase(" + seed + "); " + + "// FAIL: " + e.toString() + " sql: " + "conn.close", e); + } } } - @Override - public void testCase(int i) throws Exception { + private void testCase(int seed) throws Exception { String old = SysProperties.getScriptDirectory(); try { - System.setProperty(SysProperties.H2_SCRIPT_DIRECTORY, "dataScript/"); - seed = i; + System.setProperty(SysProperties.H2_SCRIPT_DIRECTORY, + getBaseDir() + "/" + getTestName()); printTime("seed: " + seed); - deleteDb(); - testWithSeed(); + deleteDb(seed); + testWithSeed(seed); } finally { System.setProperty(SysProperties.H2_SCRIPT_DIRECTORY, old); } - deleteDb(); + deleteDb(seed); } - @Override - public void test() throws Exception { - if (config.networked) { - return; - } - int len = getSize(2, 6); - exitOnError = false; - for (int a = 0; a < len; a++) { - int s = MathUtils.randomInt(Integer.MAX_VALUE); - testCase(s); - } + private String getDatabaseName(int seed) { + return getTestName() + "/db" + seed; + } + + private void deleteDb(int seed) { + FileUtils.delete(getDatabaseName(seed)); } } diff --git a/h2/src/test/org/h2/test/synth/TestReleaseSelectLock.java b/h2/src/test/org/h2/test/synth/TestReleaseSelectLock.java new file mode 100644 index 0000000000..42907fe467 --- /dev/null +++ b/h2/src/test/org/h2/test/synth/TestReleaseSelectLock.java @@ -0,0 +1,79 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.synth; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.concurrent.CountDownLatch; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Tests lock releasing for concurrent select statements + */ +public class TestReleaseSelectLock extends TestDb { + + private static final String TEST_DB_NAME = "releaseSelectLock"; + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase test = TestBase.createCaller().init(); + test.testFromMain(); + } + + @Override + public void test() throws Exception { + deleteDb(TEST_DB_NAME); + + Connection conn = getConnection(TEST_DB_NAME); + final Statement statement = conn.createStatement(); + statement.execute("create table test(id int primary key)"); + + runConcurrentSelects(); + + // check that all locks have been released by dropping the test table + statement.execute("drop table test"); + + statement.close(); + conn.close(); + deleteDb(TEST_DB_NAME); + } + + private void runConcurrentSelects() throws InterruptedException { + int tryCount = 500; + int threadsCount = getSize(2, 4); + for (int tryNumber = 0; tryNumber < tryCount; tryNumber++) { + CountDownLatch allFinished = new CountDownLatch(threadsCount); + + for (int i = 0; i < threadsCount; i++) { + new Thread(() -> { + try { + Connection conn = getConnection(TEST_DB_NAME); + PreparedStatement stmt = conn.prepareStatement("select id from test"); + ResultSet rs = stmt.executeQuery(); + while (rs.next()) { + rs.getInt(1); + } + stmt.close(); + conn.close(); + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + allFinished.countDown(); + } + }).start(); + } + + allFinished.await(); + } + } +} diff --git a/h2/src/test/org/h2/test/synth/TestSimpleIndex.java b/h2/src/test/org/h2/test/synth/TestSimpleIndex.java index a91812efd9..4a0337d45c 100644 --- a/h2/src/test/org/h2/test/synth/TestSimpleIndex.java +++ b/h2/src/test/org/h2/test/synth/TestSimpleIndex.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -11,13 +11,14 @@ import java.sql.Statement; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.synth.sql.RandomGen; /** * A test that runs random operations against a table to test the various index * implementations. */ -public class TestSimpleIndex extends TestBase { +public class TestSimpleIndex extends TestDb { private Connection conn; private Statement stat; @@ -29,7 +30,7 @@ public class TestSimpleIndex extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/TestThreads.java b/h2/src/test/org/h2/test/synth/TestThreads.java index e23cc951e1..f88049ebfd 100644 --- a/h2/src/test/org/h2/test/synth/TestThreads.java +++ b/h2/src/test/org/h2/test/synth/TestThreads.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -13,12 +13,13 @@ import java.util.Random; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * This test starts multiple threads and executes random operations in each * thread. */ -public class TestThreads extends TestBase implements Runnable { +public class TestThreads extends TestDb implements Runnable { private static final int INSERT = 0, UPDATE = 1, DELETE = 2; private static final int SELECT_ONE = 3, SELECT_ALL = 4; @@ -49,7 +50,7 @@ public TestThreads() { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/TestTimer.java b/h2/src/test/org/h2/test/synth/TestTimer.java index dd2309496b..aae2e40ccc 100644 --- a/h2/src/test/org/h2/test/synth/TestTimer.java +++ b/h2/src/test/org/h2/test/synth/TestTimer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth; @@ -11,8 +11,10 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.Backup; import org.h2.tools.DeleteDbFiles; @@ -21,7 +23,7 @@ * then deletes everything and runs in an endless loop executing random * operations. This loop is usually stopped by switching off the computer. */ -public class TestTimer extends TestBase { +public class TestTimer extends TestDb { /** * Run just this test. @@ -29,7 +31,7 @@ public class TestTimer extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -47,7 +49,7 @@ private void loop() throws SQLException { Random random = new Random(); int max = 0; int count = 0; - long startTime = System.currentTimeMillis(); + long startTime = System.nanoTime(); while (true) { int action = random.nextInt(10); int x = max == 0 ? 0 : random.nextInt(max); @@ -81,8 +83,8 @@ private void loop() throws SQLException { rs.next(); int c = rs.getInt(1); assertEquals(count, c); - long time = System.currentTimeMillis(); - if (time > startTime + 5000) { + long time = System.nanoTime(); + if (time > startTime + TimeUnit.SECONDS.toNanos(5)) { println("rows: " + count); startTime = time; } diff --git a/h2/src/test/org/h2/test/synth/package.html b/h2/src/test/org/h2/test/synth/package.html index 755a5c7693..31abc88978 100644 --- a/h2/src/test/org/h2/test/synth/package.html +++ b/h2/src/test/org/h2/test/synth/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/synth/sql/Column.java b/h2/src/test/org/h2/test/synth/sql/Column.java index 00ac6359ba..e797507155 100644 --- a/h2/src/test/org/h2/test/synth/sql/Column.java +++ b/h2/src/test/org/h2/test/synth/sql/Column.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; @@ -173,10 +173,6 @@ Value getRandomValue() { return Value.getRandom(config, type, precision, scale, isNullable); } -// Value getRandomValueNotNull() { -// return Value.getRandom(config, type, precision, scale, false); -// } - /** * Generate a random column. * diff --git a/h2/src/test/org/h2/test/synth/sql/Command.java b/h2/src/test/org/h2/test/synth/sql/Command.java index d572ab4c2d..00997cc057 100644 --- a/h2/src/test/org/h2/test/synth/sql/Command.java +++ b/h2/src/test/org/h2/test/synth/sql/Command.java @@ -1,14 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; import java.sql.SQLException; import java.util.HashMap; -import org.h2.util.New; -import org.h2.util.StatementBuilder; /** * Represents a statement. @@ -53,7 +51,7 @@ private Command(TestSynth config, int type, Table table, String alias) { this.config = config; this.type = type; this.table = table; - this.tables = New.hashMap(); + this.tables = new HashMap<>(); this.tables.put(alias, table); } @@ -289,20 +287,21 @@ private void prepareUpdate() { } private Result select(DbInterface db) throws SQLException { - StatementBuilder buff = new StatementBuilder("SELECT "); - for (String s : selectList) { - buff.appendExceptFirst(", "); - buff.append(s); + StringBuilder builder = new StringBuilder("SELECT "); + for (int i = 0, length = selectList.length; i < length; i++) { + if (i > 0) { + builder.append(", "); + } + builder.append(selectList[i]); } - buff.append(" FROM ").append(table.getName()).append(" M"). - append(' ').append(join); + builder.append(" FROM ").append(table.getName()).append(" M").append(' ').append(join); if (condition != null) { - buff.append(" WHERE ").append(condition); + builder.append(" WHERE ").append(condition); } if (order.trim().length() > 0) { - buff.append(" ORDER BY ").append(order); + builder.append(" ORDER BY ").append(order); } - return db.select(buff.toString()); + return db.select(builder.toString()); } /** @@ -383,10 +382,6 @@ Result run(DbInterface db) throws Exception { return result; } -// public String getNextTableAlias() { -// return "S" + nextAlias++; -// } - /** * Get a random table alias name. * diff --git a/h2/src/test/org/h2/test/synth/sql/DbConnection.java b/h2/src/test/org/h2/test/synth/sql/DbConnection.java index 03d2157f8b..803fc28b6b 100644 --- a/h2/src/test/org/h2/test/synth/sql/DbConnection.java +++ b/h2/src/test/org/h2/test/synth/sql/DbConnection.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; @@ -12,7 +12,6 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; -import org.h2.util.New; /** * Represents a connection to a real database. @@ -45,7 +44,7 @@ public void reset() throws SQLException { log("reset;"); DatabaseMetaData meta = conn.getMetaData(); Statement stat = conn.createStatement(); - ArrayList tables = New.arrayList(); + ArrayList tables = new ArrayList<>(); ResultSet rs = meta.getTables(null, null, null, new String[] { "TABLE" }); while (rs.next()) { String schemaName = rs.getString("TABLE_SCHEM"); diff --git a/h2/src/test/org/h2/test/synth/sql/DbInterface.java b/h2/src/test/org/h2/test/synth/sql/DbInterface.java index 23ae268284..118b7030d3 100644 --- a/h2/src/test/org/h2/test/synth/sql/DbInterface.java +++ b/h2/src/test/org/h2/test/synth/sql/DbInterface.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/DbState.java b/h2/src/test/org/h2/test/synth/sql/DbState.java index 26339e4fb0..0ecee56720 100644 --- a/h2/src/test/org/h2/test/synth/sql/DbState.java +++ b/h2/src/test/org/h2/test/synth/sql/DbState.java @@ -1,12 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; import java.util.ArrayList; -import org.h2.util.New; /** * Represents a connection to a simulated database. @@ -16,8 +15,8 @@ public class DbState implements DbInterface { private boolean connected; private boolean autoCommit; private final TestSynth config; - private ArrayList tables = New.arrayList(); - private ArrayList indexes = New.arrayList(); + private ArrayList
    tables = new ArrayList<>(); + private ArrayList indexes = new ArrayList<>(); DbState(TestSynth config) { this.config = config; @@ -25,8 +24,8 @@ public class DbState implements DbInterface { @Override public void reset() { - tables = New.arrayList(); - indexes = New.arrayList(); + tables = new ArrayList<>(); + indexes = new ArrayList<>(); } @Override diff --git a/h2/src/test/org/h2/test/synth/sql/Expression.java b/h2/src/test/org/h2/test/synth/sql/Expression.java index cec1c3292f..50d615425f 100644 --- a/h2/src/test/org/h2/test/synth/sql/Expression.java +++ b/h2/src/test/org/h2/test/synth/sql/Expression.java @@ -1,13 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; import java.sql.Types; import java.util.ArrayList; -import org.h2.util.New; /** * Represents an expression. @@ -35,7 +34,7 @@ static String[] getRandomSelectList(TestSynth config, Command command) { if (config.random().getBoolean(30)) { return new String[] { "*" }; } - ArrayList exp = New.arrayList(); + ArrayList exp = new ArrayList<>(); String sql = ""; if (config.random().getBoolean(10)) { sql += "DISTINCT "; @@ -47,9 +46,7 @@ static String[] getRandomSelectList(TestSynth config, Command command) { exp.add(sql); sql = ""; } - String[] list = new String[exp.size()]; - exp.toArray(list); - return list; + return exp.toArray(new String[0]); } /** diff --git a/h2/src/test/org/h2/test/synth/sql/Index.java b/h2/src/test/org/h2/test/synth/sql/Index.java index 465af0e88d..544a847667 100644 --- a/h2/src/test/org/h2/test/synth/sql/Index.java +++ b/h2/src/test/org/h2/test/synth/sql/Index.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/RandomGen.java b/h2/src/test/org/h2/test/synth/sql/RandomGen.java index e3bce3fda5..50ce674372 100644 --- a/h2/src/test/org/h2/test/synth/sql/RandomGen.java +++ b/h2/src/test/org/h2/test/synth/sql/RandomGen.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/Result.java b/h2/src/test/org/h2/test/synth/sql/Result.java index 1f6001df96..556bf8c34b 100644 --- a/h2/src/test/org/h2/test/synth/sql/Result.java +++ b/h2/src/test/org/h2/test/synth/sql/Result.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; @@ -14,7 +14,6 @@ import java.util.Collections; import org.h2.test.TestBase; -import org.h2.util.New; /** * Represents an in-memory result. @@ -59,8 +58,8 @@ class Result implements Comparable { this.sql = sql; type = RESULT_SET; try { - rows = New.arrayList(); - header = New.arrayList(); + rows = new ArrayList<>(); + header = new ArrayList<>(); ResultSetMetaData meta = rs.getMetaData(); int len = meta.getColumnCount(); Column[] cols = new Column[len]; diff --git a/h2/src/test/org/h2/test/synth/sql/Row.java b/h2/src/test/org/h2/test/synth/sql/Row.java index c431e495d1..e60988b1d3 100644 --- a/h2/src/test/org/h2/test/synth/sql/Row.java +++ b/h2/src/test/org/h2/test/synth/sql/Row.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/Table.java b/h2/src/test/org/h2/test/synth/sql/Table.java index 7178fe90cf..abf1092715 100644 --- a/h2/src/test/org/h2/test/synth/sql/Table.java +++ b/h2/src/test/org/h2/test/synth/sql/Table.java @@ -1,24 +1,24 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; import java.util.ArrayList; -import org.h2.util.New; /** * Represents a table. */ class Table { + private final TestSynth config; private String name; private boolean temporary; private boolean globalTemporary; private Column[] columns; private Column[] primaryKeys; - private final ArrayList indexes = New.arrayList(); + private final ArrayList indexes = new ArrayList<>(); Table(TestSynth config) { this.config = config; @@ -178,7 +178,7 @@ String getName() { * @return the column */ Column getRandomConditionColumn() { - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); for (Column col : columns) { if (Column.isConditionType(config, col.getType())) { list.add(col); @@ -205,7 +205,7 @@ int getColumnCount() { * @return the column or null if no such column was found */ Column getRandomColumnOfType(int type) { - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); for (Column col : columns) { if (col.getType() == type) { list.add(col); diff --git a/h2/src/test/org/h2/test/synth/sql/TestSynth.java b/h2/src/test/org/h2/test/synth/sql/TestSynth.java index 0d9ef6fd86..389a914f88 100644 --- a/h2/src/test/org/h2/test/synth/sql/TestSynth.java +++ b/h2/src/test/org/h2/test/synth/sql/TestSynth.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; @@ -9,14 +9,14 @@ import org.h2.test.TestAll; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.MathUtils; -import org.h2.util.New; /** * A test that generates random SQL statements against a number of databases * and compares the results. */ -public class TestSynth extends TestBase { +public class TestSynth extends TestDb { // TODO hsqldb: call 1||null should return 1 but returns null // TODO hsqldb: call mod(1) should return invalid parameter count @@ -60,7 +60,7 @@ public class TestSynth extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } /** @@ -145,7 +145,7 @@ private void addRandomCommands() throws Exception { private void testRun(int seed) throws Exception { random.setSeed(seed); - commands = New.arrayList(); + commands = new ArrayList<>(); add(Command.getConnect(this)); add(Command.getReset(this)); @@ -202,8 +202,7 @@ private void testRun(int seed) throws Exception { private boolean process(int seed, int id, Command command) throws Exception { try { - - ArrayList results = New.arrayList(); + ArrayList results = new ArrayList<>(); for (int i = 0; i < databases.size(); i++) { DbInterface db = databases.get(i); Result result = command.run(db); @@ -279,7 +278,7 @@ private void addDatabase(String className, String url, String user, public TestBase init(TestAll conf) throws Exception { super.init(conf); deleteDb("synth/synth"); - databases = New.arrayList(); + databases = new ArrayList<>(); // mode = HSQLDB; // addDatabase("org.hsqldb.jdbcDriver", "jdbc:hsqldb:test", "sa", "" ); @@ -297,11 +296,11 @@ public TestBase init(TestAll conf) throws Exception { addDatabase("org.h2.Driver", "jdbc:h2:" + getBaseDir() + "/synth/synth", "sa", "", false); - // addDatabase("com.mysql.jdbc.Driver", "jdbc:mysql://localhost/test", + // addDatabase("com.mysql.cj.jdbc.Driver", "jdbc:mysql://localhost/test", // "sa", ""); // addDatabase("org.h2.Driver", "jdbc:h2:synth;mode=mysql", "sa", ""); - // addDatabase("com.mysql.jdbc.Driver", "jdbc:mysql://localhost/test", + // addDatabase("com.mysql.cj.jdbc.Driver", "jdbc:mysql://localhost/test", // "sa", ""); // addDatabase("org.ldbc.jdbc.jdbcDriver", // "jdbc:ldbc:mysql://localhost/test", "sa", ""); @@ -325,8 +324,7 @@ public TestBase init(TestAll conf) throws Exception { return this; } - @Override - public void testCase(int seed) throws Exception { + private void testCase(int seed) throws Exception { deleteDb("synth/synth"); try { printTime("TestSynth " + seed); diff --git a/h2/src/test/org/h2/test/synth/sql/Value.java b/h2/src/test/org/h2/test/synth/sql/Value.java index 34ae65e912..6707fee2f2 100644 --- a/h2/src/test/org/h2/test/synth/sql/Value.java +++ b/h2/src/test/org/h2/test/synth/sql/Value.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.sql; diff --git a/h2/src/test/org/h2/test/synth/sql/package.html b/h2/src/test/org/h2/test/synth/sql/package.html index 0454dca98e..6826f682db 100644 --- a/h2/src/test/org/h2/test/synth/sql/package.html +++ b/h2/src/test/org/h2/test/synth/sql/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/synth/thread/TestMulti.java b/h2/src/test/org/h2/test/synth/thread/TestMulti.java index 1427deb7ee..e7e16b7686 100644 --- a/h2/src/test/org/h2/test/synth/thread/TestMulti.java +++ b/h2/src/test/org/h2/test/synth/thread/TestMulti.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.thread; @@ -10,11 +10,12 @@ import java.sql.SQLException; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Starts multiple threads and performs random operations on each thread. */ -public class TestMulti extends TestBase { +public class TestMulti extends TestDb { /** * If set, the test should stop. @@ -27,7 +28,7 @@ public class TestMulti extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/synth/thread/TestMultiNews.java b/h2/src/test/org/h2/test/synth/thread/TestMultiNews.java index 92db20169a..4c2921f6bc 100644 --- a/h2/src/test/org/h2/test/synth/thread/TestMultiNews.java +++ b/h2/src/test/org/h2/test/synth/thread/TestMultiNews.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.thread; diff --git a/h2/src/test/org/h2/test/synth/thread/TestMultiNewsSimple.java b/h2/src/test/org/h2/test/synth/thread/TestMultiNewsSimple.java index b015b03ece..fc043105d8 100644 --- a/h2/src/test/org/h2/test/synth/thread/TestMultiNewsSimple.java +++ b/h2/src/test/org/h2/test/synth/thread/TestMultiNewsSimple.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.thread; diff --git a/h2/src/test/org/h2/test/synth/thread/TestMultiOrder.java b/h2/src/test/org/h2/test/synth/thread/TestMultiOrder.java index 90ea33c734..c10fec4850 100644 --- a/h2/src/test/org/h2/test/synth/thread/TestMultiOrder.java +++ b/h2/src/test/org/h2/test/synth/thread/TestMultiOrder.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.thread; @@ -134,7 +134,7 @@ void first() throws SQLException { c.createStatement().execute("create table customer(" + "id int primary key, name varchar, account decimal)"); c.createStatement().execute("create table orders(" + - "id int identity primary key, customer_id int, total decimal)"); + "id int generated by default as identity primary key, customer_id int, total decimal)"); c.createStatement().execute("create table orderLine(" + "order_id int, line_id int, text varchar, " + "amount decimal, primary key(order_id, line_id))"); diff --git a/h2/src/test/org/h2/test/synth/thread/TestMultiThread.java b/h2/src/test/org/h2/test/synth/thread/TestMultiThread.java index dd04035751..7ed64a1eb6 100644 --- a/h2/src/test/org/h2/test/synth/thread/TestMultiThread.java +++ b/h2/src/test/org/h2/test/synth/thread/TestMultiThread.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.synth.thread; diff --git a/h2/src/test/org/h2/test/synth/thread/package.html b/h2/src/test/org/h2/test/synth/thread/package.html index 274561a1bb..6adf5e5236 100644 --- a/h2/src/test/org/h2/test/synth/thread/package.html +++ b/h2/src/test/org/h2/test/synth/thread/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/testScript.sql b/h2/src/test/org/h2/test/testScript.sql deleted file mode 100644 index eeaf4e080d..0000000000 --- a/h2/src/test/org/h2/test/testScript.sql +++ /dev/null @@ -1,10165 +0,0 @@ --- Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, --- and the EPL 1.0 (http://h2database.com/html/license.html). --- Initial Developer: H2 Group --- ---- special grammar and test cases --------------------------------------------------------------------------------------------- -select * from table(a int=(1)), table(b int=(2)); -> A B -> - - -> 1 2 -> rows: 1 - -select x, x in(2, 3) i from system_range(1, 2) group by x; -> X I -> - ----- -> 1 FALSE -> 2 TRUE -> rows: 2 - -select * from dual join(select x from dual) on 1=1; -> X X -> - - -> 1 1 -> rows: 1 - -select 0 as x from system_range(1, 2) d group by d.x; -> X -> - -> 0 -> 0 -> rows: 2 - -select 1 "a", count(*) from dual group by "a" order by "a"; -> a COUNT(*) -> - -------- -> 1 1 -> rows (ordered): 1 - -create table results(eventId int, points int, studentId int); -> ok - -insert into results values(1, 10, 1), (2, 20, 1), (3, 5, 1); -> update count: 3 - -insert into results values(1, 10, 2), (2, 20, 2), (3, 5, 2); -> update count: 3 - -insert into results values(1, 10, 3), (2, 20, 3), (3, 5, 3); -> update count: 3 - -SELECT SUM(points) FROM RESULTS -WHERE eventID IN -(SELECT eventID FROM RESULTS -WHERE studentID = 2 -ORDER BY points DESC -LIMIT 2 ) -AND studentID = 2; -> SUM(POINTS) -> ----------- -> null -> rows (ordered): 1 - -SELECT eventID X FROM RESULTS -WHERE studentID = 2 -ORDER BY points DESC -LIMIT 2; -> X -> - -> 2 -> 1 -> rows (ordered): 2 - -SELECT SUM(r.points) FROM RESULTS r, -(SELECT eventID FROM RESULTS -WHERE studentID = 2 -ORDER BY points DESC -LIMIT 2 ) r2 -WHERE r2.eventID = r.eventId -AND studentID = 2; -> SUM(R.POINTS) -> ------------- -> 30 -> rows (ordered): 1 - -drop table results; -> ok - -create table test(a int, b int); -> ok - -insert into test values(1, 1); -> update count: 1 - -create index on test(a, b desc); -> ok - -select * from test where a = 1; -> A B -> - - -> 1 1 -> rows: 1 - -drop table test; -> ok - -create table test(id int, name varchar) as select 1, 'a'; -> ok - -(select id from test order by id) union (select id from test order by name); -> ID -> -- -> 1 -> rows (ordered): 1 - -drop table test; -> ok - -create sequence seq; -> ok - -select case seq.nextval when 2 then 'two' when 3 then 'three' when 1 then 'one' else 'other' end result from dual; -> RESULT -> ------ -> one -> rows: 1 - -drop sequence seq; -> ok - -select decode(1, 1, '1', 1, '11') r from dual; -> R -> - -> 1 -> rows: 1 - -create table test(x int); -> ok - -create hash index on test(x); -> ok - -select 1 from test group by x; -> 1 -> - -> rows: 0 - -drop table test; -> ok - -call regexp_replace('x', 'x', '\'); -> exception - -select * from dual where x = x + 1 or x in(2, 0); -> X -> - -> rows: 0 - -select * from system_range(1,1) order by x limit 3 offset 3; -> X -> - -> rows (ordered): 0 - -select * from dual where cast('a' || x as varchar_ignorecase) in ('A1', 'B1'); -> X -> - -> 1 -> rows: 1 - -create sequence seq start with 65 increment by 1; -> ok - -select char(nextval('seq')) as x; -> X -> - -> A -> rows: 1 - -select char(nextval('seq')) as x; -> X -> - -> B -> rows: 1 - -drop sequence seq; -> ok - -create table test(id int, name varchar); -> ok - -insert into test values(5, 'b'), (5, 'b'), (20, 'a'); -> update count: 3 - -select id from test where name in(null, null); -> ID -> -- -> rows: 0 - -select * from (select * from test order by name limit 1) where id < 10; -> ID NAME -> -- ---- -> rows (ordered): 0 - -drop table test; -> ok - -create table test (id int not null, pid int); -> ok - -create index idx_test_pid on test (pid); -> ok - -alter table test add constraint fk_test foreign key (pid) -references test (id) index idx_test_pid; -> ok - -insert into test values (2, null); -> update count: 1 - -update test set pid = 1 where id = 2; -> exception - -drop table test; -> ok - -call cast('null' as uuid); -> exception - -create table test(name varchar(255)); -> ok - -select * from test union select * from test order by test.name; -> exception - -insert into test values('a'), ('b'), ('c'); -> update count: 3 - -select name from test where name > all(select name from test where name<'b'); -> NAME -> ---- -> b -> c -> rows: 2 - -select count(*) from (select name from test where name > all(select name from test where name<'b')) x; -> COUNT(*) -> -------- -> 2 -> rows: 1 - -drop table test; -> ok - -create table test(id int) as select 1; -> ok - -select * from test where id >= all(select id from test where 1=0); -> ID -> -- -> 1 -> rows: 1 - -select * from test where id = all(select id from test where 1=0); -> ID -> -- -> 1 -> rows: 1 - -select * from test where id = all(select id from test union all select id from test); -> ID -> -- -> 1 -> rows: 1 - -select * from test where null >= all(select id from test where 1=0); -> ID -> -- -> 1 -> rows: 1 - -select * from test where null = all(select id from test where 1=0); -> ID -> -- -> 1 -> rows: 1 - -select * from test where null = all(select id from test union all select id from test); -> ID -> -- -> rows: 0 - -select * from test where id >= all(select cast(null as int) from test); -> ID -> -- -> rows: 0 - -select * from test where id = all(select null from test union all select id from test); -> ID -> -- -> rows: 0 - -select * from test where null >= all(select cast(null as int) from test); -> ID -> -- -> rows: 0 - -select * from test where null = all(select null from test union all select id from test); -> ID -> -- -> rows: 0 - -drop table test; -> ok - -select x from dual order by y.x; -> exception - -create table test(id int primary key, name varchar(255), row_number int); -> ok - -insert into test values(1, 'hello', 10), (2, 'world', 20); -> update count: 2 - -select row_number() over(), id, name from test order by id; -> ROWNUM() ID NAME -> -------- -- ----- -> 1 1 hello -> 2 2 world -> rows (ordered): 2 - -select row_number() over(), id, name from test order by name; -> ROWNUM() ID NAME -> -------- -- ----- -> 1 1 hello -> 2 2 world -> rows (ordered): 2 - -select row_number() over(), id, name from test order by name desc; -> ROWNUM() ID NAME -> -------- -- ----- -> 2 2 world -> 1 1 hello -> rows (ordered): 2 - -update test set (id)=(id); -> update count: 2 - -drop table test; -> ok - -create table test(x int) as select x from system_range(1, 2); -> ok - -select * from (select rownum r from test) where r in (1, 2); -> R -> - -> 1 -> 2 -> rows: 2 - -select * from (select rownum r from test) where r = 1 or r = 2; -> R -> - -> 1 -> 2 -> rows: 2 - -drop table test; -> ok - -select 2^2; -> exception - -select * from dual where x in (select x from dual group by x order by max(x)); -> X -> - -> 1 -> rows (ordered): 1 - -create table test(d decimal(1, 2)); -> exception - -call truncate_value('Test 123', 4, false); -> 'Test' -> ------ -> Test -> rows: 1 - -call truncate_value(1234567890.123456789, 4, false); -> exception - -call truncate_value(1234567890.123456789, 4, true); -> 1234567890.1234567 -> ------------------ -> 1234567890.1234567 -> rows: 1 - -select * from dual where cast('xx' as varchar_ignorecase(1)) = 'X' and cast('x x ' as char(2)) = 'x'; -> X -> - -> 1 -> rows: 1 - -explain select -cast(0 as real), -cast(0 as double); -> PLAN -> -------------------------------------------------------------------------------------------- -> SELECT -CAST(0 AS REAL), -CAST(0 AS DOUBLE) FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX */ -> rows: 1 - -select -cast(0 as double) nz; -> NZ -> ---- -> -0.0 -> rows: 1 - -select () empty; -> EMPTY -> ----- -> () -> rows: 1 - -select (1,) one_element; -> ONE_ELEMENT -> ----------- -> (1) -> rows: 1 - -select (1) one; -> ONE -> --- -> 1 -> rows: 1 - -create table test(id int); -> ok - -insert into test values(1), (2), (4); -> update count: 3 - -select * from test order by id limit -1; -> ID -> -- -> 1 -> 2 -> 4 -> rows (ordered): 3 - -select * from test order by id limit 0; -> ID -> -- -> rows (ordered): 0 - -select * from test order by id limit 1; -> ID -> -- -> 1 -> rows (ordered): 1 - -select * from test order by id limit 1+1; -> ID -> -- -> 1 -> 2 -> rows (ordered): 2 - -select * from test order by id limit null; -> ID -> -- -> 1 -> 2 -> 4 -> rows (ordered): 3 - -select a.id, a.id in(select 4) x from test a, test b where a.id in (b.id, b.id - 1); -> ID X -> -- ----- -> 1 FALSE -> 1 FALSE -> 2 FALSE -> 4 TRUE -> rows: 4 - -select a.id, a.id in(select 4) x from test a, test b where a.id in (b.id, b.id - 1) group by a.id; -> ID X -> -- ----- -> 1 FALSE -> 2 FALSE -> 4 TRUE -> rows: 3 - -select a.id, 4 in(select a.id) x from test a, test b where a.id in (b.id, b.id - 1) group by a.id; -> ID X -> -- ----- -> 1 FALSE -> 2 FALSE -> 4 TRUE -> rows: 3 - -delete from test limit 0; -> ok - -delete from test limit 1; -> update count: 1 - -delete from test limit -1; -> update count: 2 - -drop table test; -> ok - -create domain x as int not null; -> ok - -create table test(id x); -> ok - -insert into test values(null); -> exception - -drop table test; -> ok - -drop domain x; -> ok - -create table test(id int primary key); -> ok - -insert into test(id) direct sorted select x from system_range(1, 100); -> update count: 100 - -explain insert into test(id) direct sorted select x from system_range(1, 100); -> PLAN -> ----------------------------------------------------------------------------------------------------- -> INSERT INTO PUBLIC.TEST(ID) DIRECT SORTED SELECT X FROM SYSTEM_RANGE(1, 100) /* PUBLIC.RANGE_INDEX */ -> rows: 1 - -explain select * from test limit 10 sample_size 10; -> PLAN -> ----------------------------------------------------------------------------------- -> SELECT TEST.ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ LIMIT 10 SAMPLE_SIZE 10 -> rows: 1 - -drop table test; -> ok - -create table test(d date, t time, ts timestamp); -> ok - -insert into test values(date '2001-01-01', time '01:00:00', timestamp '2010-01-01 00:00:00'); -> update count: 1 - -select ts + t x from test; -> X -> --------------------- -> 2010-01-01 01:00:00.0 -> rows: 1 - -select ts + t + t - t x from test; -> X -> --------------------- -> 2010-01-01 01:00:00.0 -> rows: 1 - -select ts + t * 0.5 x from test; -> X -> --------------------- -> 2010-01-01 00:30:00.0 -> rows: 1 - -select ts + 0.5 x from test; -> X -> --------------------- -> 2010-01-01 12:00:00.0 -> rows: 1 - -select ts - 1.5 x from test; -> X -> --------------------- -> 2009-12-30 12:00:00.0 -> rows: 1 - -select ts + 0.5 * t + t - t x from test; -> X -> --------------------- -> 2010-01-01 00:30:00.0 -> rows: 1 - -select ts + t / 0.5 x from test; -> X -> --------------------- -> 2010-01-01 02:00:00.0 -> rows: 1 - -select d + t, t + d - t x from test; -> T + D X -> --------------------- --------------------- -> 2001-01-01 01:00:00.0 2001-01-01 00:00:00.0 -> rows: 1 - -select 1 + d + 1, d - 1, 2 + ts + 2, ts - 2 from test; -> DATEADD('DAY', 1, DATEADD('DAY', 1, D)) DATEADD('DAY', -1, D) DATEADD('DAY', 2, DATEADD('DAY', 2, TS)) DATEADD('DAY', -2, TS) -> --------------------------------------- --------------------- ---------------------------------------- ---------------------- -> 2001-01-03 00:00:00.0 2000-12-31 00:00:00.0 2010-01-05 00:00:00.0 2009-12-30 00:00:00.0 -> rows: 1 - -select 1 + d + t + 1 from test; -> DATEADD('DAY', 1, (T + DATEADD('DAY', 1, D))) -> --------------------------------------------- -> 2001-01-03 01:00:00.0 -> rows: 1 - -select ts - t - 2 from test; -> DATEADD('DAY', -2, (TS - T)) -> ---------------------------- -> 2009-12-29 23:00:00.0 -> rows: 1 - -drop table test; -> ok - -create table test(id int primary key); -> ok - -insert into test values(1), (2), (3), (4); -> update count: 4 - -explain analyze select * from test where id is null; -> PLAN -> ---------------------------------------------------------------------------------------------------------- -> SELECT TEST.ID FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID IS NULL */ /* scanCount: 1 */ WHERE ID IS NULL -> rows: 1 - -drop table test; -> ok - -explain analyze select 1; -> PLAN -> ---------------------------------------------------------------------------- -> SELECT 1 FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX */ /* scanCount: 2 */ -> rows: 1 - -create table folder(id int primary key, name varchar(255), parent int); -> ok - -insert into folder values(1, null, null), (2, 'bin', 1), (3, 'docs', 1), (4, 'html', 3), (5, 'javadoc', 3), (6, 'ext', 1), (7, 'service', 1), (8, 'src', 1), (9, 'docsrc', 8), (10, 'installer', 8), (11, 'main', 8), (12, 'META-INF', 11), (13, 'org', 11), (14, 'h2', 13), (15, 'test', 8), (16, 'tools', 8); -> update count: 16 - -with link(id, name, level) as (select id, name, 0 from folder where parent is null union all select folder.id, ifnull(link.name || '/', '') || folder.name, level + 1 from link inner join folder on link.id = folder.parent) select name from link where name is not null order by cast(id as int); -> NAME -> ----------------- -> bin -> docs -> docs/html -> docs/javadoc -> ext -> service -> src -> src/docsrc -> src/installer -> src/main -> src/main/META-INF -> src/main/org -> src/main/org/h2 -> src/test -> src/tools -> rows (ordered): 15 - -drop table folder; -> ok - -create table test(id int); -> ok - -create view x as select * from test; -> ok - -drop table test restrict; -> exception - -drop table test cascade; -> ok - -select 1, 2 from (select * from dual) union all select 3, 4 from dual; -> 1 2 -> - - -> 1 2 -> 3 4 -> rows: 2 - -select 3 from (select * from dual) union all select 2 from dual; -> 3 -> - -> 2 -> 3 -> rows: 2 - -create table a(x int, y int); -> ok - -create unique index a_xy on a(x, y); -> ok - -create table b(x int, y int, foreign key(x, y) references a(x, y)); -> ok - -insert into a values(null, null), (null, 0), (0, null), (0, 0); -> update count: 4 - -insert into b values(null, null), (null, 0), (0, null), (0, 0); -> update count: 4 - -delete from a where x is null and y is null; -> update count: 1 - -delete from a where x is null and y = 0; -> update count: 1 - -delete from a where x = 0 and y is null; -> update count: 1 - -delete from a where x = 0 and y = 0; -> exception - -drop table b; -> ok - -drop table a; -> ok - -select * from (select null as x) where x=1; -> X -> - -> rows: 0 - -create table test(a int primary key, b int references(a)); -> ok - -merge into test values(1, 2); -> exception - -drop table test; -> ok - -create table test(id int primary key, d int); -> ok - -insert into test values(1,1), (2, 1); -> update count: 2 - -select id from test where id in (1, 2) and d = 1; -> ID -> -- -> 1 -> 2 -> rows: 2 - -drop table test; -> ok - -create table test(id decimal(10, 2) primary key) as select 0; -> ok - -select * from test where id = 0.00; -> ID -> ---- -> 0.00 -> rows: 1 - -select * from test where id = 0.0; -> ID -> ---- -> 0.00 -> rows: 1 - -drop table test; -> ok - -select count(*) from (select 1 union (select 2 intersect select 2)) x; -> COUNT(*) -> -------- -> 2 -> rows: 1 - -create table test(id varchar(1) primary key) as select 'X'; -> ok - -select count(*) from (select 1 from dual where x in ((select 1 union select 1))) a; -> COUNT(*) -> -------- -> 1 -> rows: 1 - -insert into test ((select 1 union select 2) union select 3); -> update count: 3 - -select count(*) from test where id = 'X1'; -> COUNT(*) -> -------- -> 0 -> rows: 1 - -drop table test; -> ok - -create table test(id int primary key, name varchar(255), x int); -> ok - -create unique index idx_name1 on test(name); -> ok - -create unique index idx_name2 on test(name); -> ok - -show columns from test; -> FIELD TYPE NULL KEY DEFAULT -> ----- ------------ ---- --- ------- -> ID INTEGER(10) NO PRI NULL -> NAME VARCHAR(255) YES UNI NULL -> X INTEGER(10) YES NULL -> rows: 3 - -show columns from catalogs from information_schema; -> FIELD TYPE NULL KEY DEFAULT -> ------------ ------------------- ---- --- ------- -> CATALOG_NAME VARCHAR(2147483647) YES NULL -> rows: 1 - -show columns from information_schema.catalogs; -> FIELD TYPE NULL KEY DEFAULT -> ------------ ------------------- ---- --- ------- -> CATALOG_NAME VARCHAR(2147483647) YES NULL -> rows: 1 - -drop table test; -> ok - -create table a(a int) as select 1; -> ok - -create table b(b int) as select 1; -> ok - -create table c(c int) as select x from system_range(1, 2); -> ok - -select * from a inner join b on a=b right outer join c on c=a; -> C A B -> - ---- ---- -> 1 1 1 -> 2 null null -> rows: 2 - -select * from c left outer join (a inner join b on b=a) on c=a; -> C A B -> - ---- ---- -> 1 1 1 -> 2 null null -> rows: 2 - -select * from c left outer join a on c=a inner join b on b=a; -> C A B -> - - - -> 1 1 1 -> rows: 1 - -drop table a, b, c; -> ok - -create table test(a int, b int) as select x, x from system_range(1, 100); -> ok - --- the table t1 should be processed first -explain select * from test t2, test t1 where t1.a=1 and t1.b = t2.b; -> PLAN -> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T2.A, T2.B, T1.A, T1.B FROM PUBLIC.TEST T1 /* PUBLIC.TEST.tableScan */ /* WHERE T1.A = 1 */ INNER JOIN PUBLIC.TEST T2 /* PUBLIC.TEST.tableScan */ ON 1=1 WHERE (T1.A = 1) AND (T1.B = T2.B) -> rows: 1 - -explain select * from test t1, test t2 where t1.a=1 and t1.b = t2.b; -> PLAN -> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.A, T1.B, T2.A, T2.B FROM PUBLIC.TEST T1 /* PUBLIC.TEST.tableScan */ /* WHERE T1.A = 1 */ INNER JOIN PUBLIC.TEST T2 /* PUBLIC.TEST.tableScan */ ON 1=1 WHERE (T1.A = 1) AND (T1.B = T2.B) -> rows: 1 - -drop table test; -> ok - -create table test(id int, constraint pk primary key(id), constraint x unique(id)); -> ok - -select constraint_name from information_schema.indexes where table_name = 'TEST'; -> CONSTRAINT_NAME -> --------------- -> PK -> rows: 1 - -drop table test; -> ok - -create table parent(id int primary key); -> ok - -create table child(id int, parent_id int, constraint child_parent foreign key (parent_id) references parent(id)); -> ok - -select constraint_name from information_schema.indexes where table_name = 'CHILD'; -> CONSTRAINT_NAME -> --------------- -> CHILD_PARENT -> rows: 1 - -drop table parent, child; -> ok - -create table test(id int, name varchar(max)); -> ok - -alter table test alter column id identity; -> ok - -drop table test; -> ok - -create table test(id int primary key, name varchar); -> ok - -alter table test alter column id int auto_increment; -> ok - -create table otherTest(id int primary key, name varchar); -> ok - -alter table otherTest add constraint fk foreign key(id) references test(id); -> ok - -alter table otherTest drop foreign key fk; -> ok - -create unique index idx on otherTest(name); -> ok - -alter table otherTest drop index idx; -> ok - -drop table otherTest; -> ok - -insert into test(id) values(1); -> update count: 1 - -alter table test change column id id2 int; -> ok - -select id2 from test; -> ID2 -> --- -> 1 -> rows: 1 - -drop table test; -> ok - -create table test(id identity) as select x from system_range(1, 4); -> ok - -select a.id from test a inner join test b on a.id > b.id and b.id < 3 group by a.id; -> ID -> -- -> 2 -> 3 -> 4 -> rows: 3 - -drop table test; -> ok - -create table test(id identity); -> ok - -set password test; -> exception - -alter user sa set password test; -> exception - -comment on table test is test; -> exception - -select 1 from test a where 1 in(select 1 from test b where b.id in(select 1 from test c where c.id=a.id)); -> 1 -> - -> rows: 0 - -drop table test; -> ok - -select @n := case when x = 1 then 1 else @n * x end f from system_range(1, 4); -> F -> -- -> 1 -> 2 -> 24 -> 6 -> rows: 4 - -select * from (select "x" from dual); -> exception - -select * from(select 1 from system_range(1, 2) group by sin(x) order by sin(x)); -> 1 -> - -> 1 -> 1 -> rows (ordered): 2 - -create table parent as select 1 id, 2 x; -> ok - -create table child(id int references parent(id)) as select 1; -> ok - -delete from parent; -> exception - -drop table parent, child; -> ok - -create domain integer as varchar; -> exception - -create domain int as varchar; -> ok - -create memory table test(id int); -> ok - -script nodata nopasswords nosettings; -> SCRIPT -> ----------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> CREATE DOMAIN INT AS VARCHAR; -> CREATE MEMORY TABLE PUBLIC.TEST( ID VARCHAR ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 4 - -drop table test; -> ok - -drop domain int; -> ok - -create table test(id identity, parent bigint, foreign key(parent) references(id)); -> ok - -insert into test values(0, 0), (1, NULL), (2, 1), (3, 3), (4, 3); -> update count: 5 - -delete from test where id = 3; -> exception - -delete from test where id = 0; -> update count: 1 - -delete from test where id = 1; -> exception - -drop table test; -> ok - -select iso_week('2006-12-31') w, iso_year('2007-12-31') y, iso_day_of_week('2007-12-31') w; -> W Y W -> -- ---- - -> 52 2008 1 -> rows: 1 - -create schema a; -> ok - -set autocommit false; -> ok - -set schema a; -> ok - -create table t1 ( k int, v varchar(10) ); -> ok - -insert into t1 values ( 1, 't1' ); -> update count: 1 - -create table t2 ( k int, v varchar(10) ); -> ok - -insert into t2 values ( 2, 't2' ); -> update count: 1 - -create view v_test(a, b, c, d) as select t1.*, t2.* from t1 join t2 on ( t1.k = t2.k ); -> ok - -select * from v_test; -> A B C D -> - - - - -> rows: 0 - -set schema public; -> ok - -drop schema a; -> ok - -set autocommit true; -> ok - -select x/3 as a, count(*) c from system_range(1, 10) group by a having c>2; -> A C -> - - -> 1 3 -> 2 3 -> rows: 2 - -create table test(id int); -> ok - -insert into test values(1), (2); -> update count: 2 - -select id+1 as x, count(*) from test group by x; -> X COUNT(*) -> - -------- -> 2 1 -> 3 1 -> rows: 2 - -select 1 as id, id as b, count(*) from test group by id; -> ID B COUNT(*) -> -- - -------- -> 1 1 1 -> 1 2 1 -> rows: 2 - -select id+1 as x, count(*) from test group by -x; -> exception - -select id+1 as x, count(*) from test group by x having x>2; -> exception - -select id+1 as x, count(*) from test group by 1; -> exception - -drop table test; -> ok - -create table test(t0 timestamp(0), t1 timestamp(1), t4 timestamp(4)); -> ok - -select column_name, numeric_scale from information_schema.columns c where c.table_name = 'TEST' order by column_name; -> COLUMN_NAME NUMERIC_SCALE -> ----------- ------------- -> T0 0 -> T1 1 -> T4 4 -> rows (ordered): 3 - -drop table test; -> ok - -create table test(id int); -> ok - -insert into test values(null), (1); -> update count: 2 - -select * from test where id not in (select id from test where 1=0); -> ID -> ---- -> 1 -> null -> rows: 2 - -select * from test where null not in (select id from test where 1=0); -> ID -> ---- -> 1 -> null -> rows: 2 - -select * from test where not (id in (select id from test where 1=0)); -> ID -> ---- -> 1 -> null -> rows: 2 - -select * from test where not (null in (select id from test where 1=0)); -> ID -> ---- -> 1 -> null -> rows: 2 - -drop table test; -> ok - -create table test(a int); -> ok - -insert into test values(1), (2); -> update count: 2 - -select -test.a a from test order by test.a; -> A -> -- -> -1 -> -2 -> rows (ordered): 2 - -select -test.a from test order by test.a; -> - TEST.A -> -------- -> -1 -> -2 -> rows (ordered): 2 - -select -test.a aa from test order by a; -> AA -> -- -> -1 -> -2 -> rows (ordered): 2 - -select -test.a aa from test order by aa; -> AA -> -- -> -2 -> -1 -> rows (ordered): 2 - -select -test.a a from test order by a; -> A -> -- -> -2 -> -1 -> rows (ordered): 2 - -drop table test; -> ok - -CREATE TABLE table_a(a_id INT PRIMARY KEY, left_id INT, right_id INT); -> ok - -CREATE TABLE table_b(b_id INT PRIMARY KEY, a_id INT); -> ok - -CREATE TABLE table_c(left_id INT, right_id INT, center_id INT); -> ok - -CREATE VIEW view_a AS -SELECT table_c.center_id, table_a.a_id, table_b.b_id -FROM table_c -INNER JOIN table_a ON table_c.left_id = table_a.left_id -AND table_c.right_id = table_a.right_id -LEFT JOIN table_b ON table_b.a_id = table_a.a_id; -> ok - -SELECT * FROM table_c INNER JOIN view_a -ON table_c.center_id = view_a.center_id; -> LEFT_ID RIGHT_ID CENTER_ID CENTER_ID A_ID B_ID -> ------- -------- --------- --------- ---- ---- -> rows: 0 - -drop view view_a; -> ok - -drop table table_a, table_b, table_c; -> ok - -create table t (pk int primary key, attr int); -> ok - -insert into t values (1, 5), (5, 1); -> update count: 2 - -select t1.pk from t t1, t t2 where t1.pk = t2.attr order by t1.pk; -> PK -> -- -> 1 -> 5 -> rows (ordered): 2 - -drop table t; -> ok - -CREATE ROLE TEST_A; -> ok - -GRANT TEST_A TO TEST_A; -> exception - -CREATE ROLE TEST_B; -> ok - -GRANT TEST_A TO TEST_B; -> ok - -GRANT TEST_B TO TEST_A; -> exception - -DROP ROLE TEST_A; -> ok - -DROP ROLE TEST_B; -> ok - -CREATE ROLE PUBLIC2; -> ok - -GRANT PUBLIC2 TO SA; -> ok - -GRANT PUBLIC2 TO SA; -> ok - -REVOKE PUBLIC2 FROM SA; -> ok - -REVOKE PUBLIC2 FROM SA; -> ok - -DROP ROLE PUBLIC2; -> ok - -create table test(id int primary key, lastname varchar, firstname varchar, parent int references(id)); -> ok - -alter table test add constraint name unique (lastname, firstname); -> ok - -SELECT CONSTRAINT_NAME, UNIQUE_INDEX_NAME, COLUMN_LIST FROM INFORMATION_SCHEMA.CONSTRAINTS ; -> CONSTRAINT_NAME UNIQUE_INDEX_NAME COLUMN_LIST -> --------------- ----------------- ------------------ -> CONSTRAINT_2 PRIMARY_KEY_2 ID -> CONSTRAINT_27 PRIMARY_KEY_2 PARENT -> NAME NAME_INDEX_2 LASTNAME,FIRSTNAME -> rows: 3 - -drop table test; -> ok - -alter table information_schema.help rename to information_schema.help2; -> exception - -help abc; -> ID SECTION TOPIC SYNTAX TEXT -> -- ------- ----- ------ ---- -> rows: 0 - -CREATE TABLE test (id int(25) NOT NULL auto_increment, name varchar NOT NULL, PRIMARY KEY (id,name)); -> ok - -drop table test; -> ok - -CREATE TABLE test (id bigserial NOT NULL primary key); -> ok - -drop table test; -> ok - -CREATE TABLE test (id serial NOT NULL primary key); -> ok - -drop table test; -> ok - -CREATE MEMORY TABLE TEST(ID INT, D DOUBLE, F FLOAT); -> ok - -insert into test values(0, POWER(0, -1), POWER(0, -1)), (1, -POWER(0, -1), -POWER(0, -1)), (2, SQRT(-1), SQRT(-1)); -> update count: 3 - -select * from test order by id; -> ID D F -> -- --------- --------- -> 0 Infinity Infinity -> 1 -Infinity -Infinity -> 2 NaN NaN -> rows (ordered): 3 - -script nopasswords nosettings; -> SCRIPT -> ----------------------------------------------------------------------------------------------------------------------------------------- -> -- 3 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT, D DOUBLE, F FLOAT ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> INSERT INTO PUBLIC.TEST(ID, D, F) VALUES (0, POWER(0, -1), POWER(0, -1)), (1, (-POWER(0, -1)), (-POWER(0, -1))), (2, SQRT(-1), SQRT(-1)); -> rows: 4 - -DROP TABLE TEST; -> ok - -create schema a; -> ok - -create table a.x(ax int); -> ok - -create schema b; -> ok - -create table b.x(bx int); -> ok - -select * from a.x, b.x; -> AX BX -> -- -- -> rows: 0 - -drop schema a; -> ok - -drop schema b; -> ok - -create table t1 (id int primary key); -> ok - -create table t2 (id int primary key); -> ok - -insert into t1 select x from system_range(1, 1000); -> update count: 1000 - -insert into t2 select x from system_range(1, 1000); -> update count: 1000 - -explain select count(*) from t1 where t1.id in ( select t2.id from t2 ); -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -> SELECT COUNT(*) FROM PUBLIC.T1 /* PUBLIC.PRIMARY_KEY_A: ID IN(SELECT T2.ID FROM PUBLIC.T2 /++ PUBLIC.T2.tableScan ++/) */ WHERE T1.ID IN( SELECT T2.ID FROM PUBLIC.T2 /* PUBLIC.T2.tableScan */) -> rows: 1 - -select count(*) from t1 where t1.id in ( select t2.id from t2 ); -> COUNT(*) -> -------- -> 1000 -> rows: 1 - -drop table t1, t2; -> ok - -select * from system_range(1, 3) t1 inner join system_range(2, 3) t2 inner join system_range(1, 2) t3 on t3.x=t2.x on t1.x=t2.x; -> X X X -> - - - -> 2 2 2 -> rows: 1 - -CREATE TABLE p(d date); -> ok - -INSERT INTO p VALUES('-1-01-01'), ('0-01-01'), ('0001-01-01'); -> update count: 3 - -select d, year(d), extract(year from d), cast(d as timestamp) from p; -> D YEAR(D) EXTRACT(YEAR FROM D) CAST(D AS TIMESTAMP) -> ---------- ------- -------------------- --------------------- -> -1-01-01 -1 -1 -1-01-01 00:00:00.0 -> 0-01-01 0 0 0-01-01 00:00:00.0 -> 0001-01-01 1 1 0001-01-01 00:00:00.0 -> rows: 3 - -drop table p; -> ok - -(SELECT X FROM DUAL ORDER BY X+2) UNION SELECT X FROM DUAL; -> X -> - -> 1 -> rows (ordered): 1 - -create table test(a int, b int default 1); -> ok - -insert into test values(1, default), (2, 2), (3, null); -> update count: 3 - -select * from test; -> A B -> - ---- -> 1 1 -> 2 2 -> 3 null -> rows: 3 - -update test set b = default where a = 2; -> update count: 1 - -explain update test set b = default where a = 2; -> PLAN -> -------------------------------------------------------------------------- -> UPDATE PUBLIC.TEST /* PUBLIC.TEST.tableScan */ SET B = DEFAULT WHERE A = 2 -> rows: 1 - -select * from test; -> A B -> - ---- -> 1 1 -> 2 1 -> 3 null -> rows: 3 - -update test set a=default; -> update count: 3 - -drop table test; -> ok - -CREATE ROLE X; -> ok - -GRANT X TO X; -> exception - -CREATE ROLE Y; -> ok - -GRANT Y TO X; -> ok - -DROP ROLE Y; -> ok - -DROP ROLE X; -> ok - -create table test as select * from table(id int=(1, 2, 3)); -> ok - -SELECT * FROM (SELECT * FROM TEST) ORDER BY id; -> ID -> -- -> 1 -> 2 -> 3 -> rows (ordered): 3 - -SELECT * FROM (SELECT * FROM TEST) x ORDER BY id; -> ID -> -- -> 1 -> 2 -> 3 -> rows (ordered): 3 - -drop table test; -> ok - -select top sum(1) 0 from dual; -> exception - -create table test(id int primary key, name varchar) as select 1, 'Hello World'; -> ok - -select * from test; -> ID NAME -> -- ----------- -> 1 Hello World -> rows: 1 - -drop table test; -> ok - -select rtrim() from dual; -> exception - -CREATE TABLE COUNT(X INT); -> ok - -CREATE FORCE TRIGGER T_COUNT BEFORE INSERT ON COUNT CALL "com.Unknown"; -> ok - -INSERT INTO COUNT VALUES(NULL); -> exception - -DROP TRIGGER T_COUNT; -> ok - -CREATE TABLE ITEMS(ID INT CHECK ID < SELECT MAX(ID) FROM COUNT); -> ok - -insert into items values(DEFAULT); -> update count: 1 - -DROP TABLE COUNT; -> exception - -insert into items values(DEFAULT); -> update count: 1 - -drop table items, count; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, LABEL CHAR(20), LOOKUP CHAR(30)); -> ok - -INSERT INTO TEST VALUES (1, 'Mouse', 'MOUSE'), (2, 'MOUSE', 'Mouse'); -> update count: 2 - -SELECT * FROM TEST; -> ID LABEL LOOKUP -> -- ----- ------ -> 1 Mouse MOUSE -> 2 MOUSE Mouse -> rows: 2 - -DROP TABLE TEST; -> ok - -call 'a' regexp '\Ho.*'; -> exception - -set @t = 0; -> ok - -call set(1, 2); -> exception - -select x, set(@t, ifnull(@t, 0) + x) from system_range(1, 3); -> X SET(@T, (IFNULL(@T, 0) + X)) -> - ---------------------------- -> 1 1 -> 2 3 -> 3 6 -> rows: 3 - -select * from system_range(1, 2) a, -(select * from system_range(1, 2) union select * from system_range(1, 2) -union select * from system_range(1, 1)) v where a.x = v.x; -> X X -> - - -> 1 1 -> 2 2 -> rows: 2 - -create table test(id int); -> ok - -select * from ((select * from test) union (select * from test)) where id = 0; -> ID -> -- -> rows: 0 - -select * from ((test d1 inner join test d2 on d1.id = d2.id) inner join test d3 on d1.id = d3.id) inner join test d4 on d4.id = d1.id; -> ID ID ID ID -> -- -- -- -- -> rows: 0 - -drop table test; -> ok - -select count(*) from system_range(1, 2) where x in(1, 1, 1); -> COUNT(*) -> -------- -> 1 -> rows: 1 - -create table person(id bigint auto_increment, name varchar(100)); -> ok - -insert into person(name) values ('a'), ('b'), ('c'); -> update count: 3 - -select * from person order by id; -> ID NAME -> -- ---- -> 1 a -> 2 b -> 3 c -> rows (ordered): 3 - -select * from person order by id limit 2; -> ID NAME -> -- ---- -> 1 a -> 2 b -> rows (ordered): 2 - -select * from person order by id limit 2 offset 1; -> ID NAME -> -- ---- -> 2 b -> 3 c -> rows (ordered): 2 - -select * from person order by id limit 2147483647 offset 1; -> ID NAME -> -- ---- -> 2 b -> 3 c -> rows (ordered): 2 - -select * from person order by id limit 2147483647-1 offset 1; -> ID NAME -> -- ---- -> 2 b -> 3 c -> rows (ordered): 2 - -select * from person order by id limit 2147483647-1 offset 2; -> ID NAME -> -- ---- -> 3 c -> rows (ordered): 1 - -select * from person order by id limit 2147483647-2 offset 2; -> ID NAME -> -- ---- -> 3 c -> rows (ordered): 1 - -drop table person; -> ok - -CREATE TABLE TEST(ID INTEGER NOT NULL, ID2 INTEGER DEFAULT 0); -> ok - -ALTER TABLE test ALTER COLUMN ID2 RENAME TO ID; -> exception - -drop table test; -> ok - -create table test(id int primary key, data array); -> ok - -insert into test values(1, (1, 1)), (2, (1, 2)), (3, (1, 1, 1)); -> update count: 3 - -select * from test order by data; -> ID DATA -> -- --------- -> 1 (1, 1) -> 3 (1, 1, 1) -> 2 (1, 2) -> rows (ordered): 3 - -drop table test; -> ok - -CREATE TABLE FOO (A CHAR(10)); -> ok - -CREATE TABLE BAR AS SELECT * FROM FOO; -> ok - -select table_name, numeric_precision from information_schema.columns where column_name = 'A'; -> TABLE_NAME NUMERIC_PRECISION -> ---------- ----------------- -> BAR 10 -> FOO 10 -> rows: 2 - -DROP TABLE FOO, BAR; -> ok - -create table multi_pages(dir_num int, bh_id int); -> ok - -insert into multi_pages values(1, 1), (2, 2), (3, 3); -> update count: 3 - -create table b_holding(id int primary key, site varchar(255)); -> ok - -insert into b_holding values(1, 'Hello'), (2, 'Hello'), (3, 'Hello'); -> update count: 3 - -select * from (select dir_num, count(*) as cnt from multi_pages t, b_holding bh -where t.bh_id=bh.id and bh.site='Hello' group by dir_num) as x -where cnt < 1000 order by dir_num asc; -> DIR_NUM CNT -> ------- --- -> 1 1 -> 2 1 -> 3 1 -> rows (ordered): 3 - -explain select * from (select dir_num, count(*) as cnt from multi_pages t, b_holding bh -where t.bh_id=bh.id and bh.site='Hello' group by dir_num) as x -where cnt < 1000 order by dir_num asc; -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT X.DIR_NUM, X.CNT FROM ( SELECT DIR_NUM, COUNT(*) AS CNT FROM PUBLIC.MULTI_PAGES T /* PUBLIC.MULTI_PAGES.tableScan */ INNER JOIN PUBLIC.B_HOLDING BH /* PUBLIC.PRIMARY_KEY_3: ID = T.BH_ID */ ON 1=1 WHERE (BH.SITE = 'Hello') AND (T.BH_ID = BH.ID) GROUP BY DIR_NUM ) X /* SELECT DIR_NUM, COUNT(*) AS CNT FROM PUBLIC.MULTI_PAGES T /++ PUBLIC.MULTI_PAGES.tableScan ++/ INNER JOIN PUBLIC.B_HOLDING BH /++ PUBLIC.PRIMARY_KEY_3: ID = T.BH_ID ++/ ON 1=1 WHERE (BH.SITE = 'Hello') AND (T.BH_ID = BH.ID) GROUP BY DIR_NUM HAVING COUNT(*) <= ?1: CNT < 1000 */ WHERE CNT < 1000 ORDER BY 1 -> rows (ordered): 1 - -select dir_num, count(*) as cnt from multi_pages t, b_holding bh -where t.bh_id=bh.id and bh.site='Hello' group by dir_num -having count(*) < 1000 order by dir_num asc; -> DIR_NUM CNT -> ------- --- -> 1 1 -> 2 1 -> 3 1 -> rows (ordered): 3 - -drop table multi_pages, b_holding; -> ok - -select * from dual where x = 1000000000000000000000; -> X -> - -> rows: 0 - -select * from dual where x = 'Hello'; -> exception - -CREATE TABLE PARENT(ID INT PRIMARY KEY); -> ok - -CREATE TABLE CHILD(ID INT PRIMARY KEY); -> ok - -INSERT INTO PARENT VALUES(1); -> update count: 1 - -SELECT * FROM PARENT P LEFT OUTER JOIN CHILD C ON C.PARENTID=P.ID; -> exception - -DROP TABLE PARENT, CHILD; -> ok - -create table test(id smallint primary key); -> ok - -insert into test values(1), (2), (3); -> update count: 3 - -explain select * from test where id = 1; -> PLAN -> ------------------------------------------------------------------------------- -> SELECT TEST.ID FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE ID = 1 -> rows: 1 - -EXPLAIN SELECT * FROM TEST WHERE ID = (SELECT MAX(ID) FROM TEST); -> PLAN -> --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST.ID FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID = (SELECT MAX(ID) FROM PUBLIC.TEST /++ PUBLIC.TEST.tableScan ++/ /++ direct lookup ++/) */ WHERE ID = (SELECT MAX(ID) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ /* direct lookup */) -> rows: 1 - -drop table test; -> ok - -create table test(id tinyint primary key); -> ok - -insert into test values(1), (2), (3); -> update count: 3 - -explain select * from test where id = 3; -> PLAN -> ------------------------------------------------------------------------------- -> SELECT TEST.ID FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID = 3 */ WHERE ID = 3 -> rows: 1 - -explain select * from test where id = 255; -> PLAN -> ----------------------------------------------------------------------------------- -> SELECT TEST.ID FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID = 255 */ WHERE ID = 255 -> rows: 1 - -drop table test; -> ok - -create table test(id int primary key); -> ok - -insert into test values(1), (2), (3); -> update count: 3 - -explain select * from test where id in(1, 2, null); -> PLAN -> ----------------------------------------------------------------------------------------------------- -> SELECT TEST.ID FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID IN(1, 2, NULL) */ WHERE ID IN(1, 2, NULL) -> rows: 1 - -drop table test; -> ok - -create alias "SYSDATE" for "java.lang.Integer.parseInt(java.lang.String)"; -> exception - -create alias "MIN" for "java.lang.Integer.parseInt(java.lang.String)"; -> exception - -create alias "CAST" for "java.lang.Integer.parseInt(java.lang.String)"; -> exception - -CREATE TABLE PARENT(A INT, B INT, PRIMARY KEY(A, B)); -> ok - -CREATE TABLE CHILD(A INT, B INT, CONSTRAINT CP FOREIGN KEY(A, B) REFERENCES PARENT(A, B)); -> ok - -INSERT INTO PARENT VALUES(1, 2); -> update count: 1 - -INSERT INTO CHILD VALUES(2, NULL), (NULL, 3), (NULL, NULL), (1, 2); -> update count: 4 - -set autocommit false; -> ok - -ALTER TABLE CHILD SET REFERENTIAL_INTEGRITY FALSE; -> ok - -ALTER TABLE CHILD SET REFERENTIAL_INTEGRITY TRUE CHECK; -> ok - -set autocommit true; -> ok - -DROP TABLE CHILD, PARENT; -> ok - -CREATE TABLE TEST(BIRTH TIMESTAMP); -> ok - -INSERT INTO TEST VALUES('2006-04-03 10:20:30'), ('2006-04-03 10:20:31'), ('2006-05-05 00:00:00'), ('2006-07-03 22:30:00'), ('2006-07-03 22:31:00'); -> update count: 5 - -SELECT * FROM (SELECT CAST(BIRTH AS DATE) B -FROM TEST GROUP BY CAST(BIRTH AS DATE)) A -WHERE A.B >= '2006-05-05'; -> B -> ---------- -> 2006-05-05 -> 2006-07-03 -> rows: 2 - -DROP TABLE TEST; -> ok - -CREATE TABLE Parent(ID INT PRIMARY KEY, Name VARCHAR); -> ok - -CREATE TABLE Child(ID INT); -> ok - -ALTER TABLE Child ADD FOREIGN KEY(ID) REFERENCES Parent(ID); -> ok - -INSERT INTO Parent VALUES(1, '0'), (2, '0'), (3, '0'); -> update count: 3 - -INSERT INTO Child VALUES(1); -> update count: 1 - -ALTER TABLE Parent ALTER COLUMN Name BOOLEAN NULL; -> ok - -DELETE FROM Parent WHERE ID=3; -> update count: 1 - -DROP TABLE Parent, Child; -> ok - -set autocommit false; -> ok - -CREATE TABLE A(ID INT PRIMARY KEY, SK INT); -> ok - -ALTER TABLE A ADD CONSTRAINT AC FOREIGN KEY(SK) REFERENCES A(ID); -> ok - -INSERT INTO A VALUES(1, 1); -> update count: 1 - -INSERT INTO A VALUES(-2, NULL); -> update count: 1 - -ALTER TABLE A SET REFERENTIAL_INTEGRITY FALSE; -> ok - -ALTER TABLE A SET REFERENTIAL_INTEGRITY TRUE CHECK; -> ok - -ALTER TABLE A SET REFERENTIAL_INTEGRITY FALSE; -> ok - -INSERT INTO A VALUES(2, 3); -> update count: 1 - -ALTER TABLE A SET REFERENTIAL_INTEGRITY TRUE; -> ok - -ALTER TABLE A SET REFERENTIAL_INTEGRITY FALSE; -> ok - -ALTER TABLE A SET REFERENTIAL_INTEGRITY TRUE CHECK; -> exception - -DROP TABLE A; -> ok - -set autocommit true; -> ok - -CREATE TABLE PARENT(ID INT); -> ok - -CREATE TABLE CHILD(PID INT); -> ok - -INSERT INTO PARENT VALUES(1); -> update count: 1 - -INSERT INTO CHILD VALUES(2); -> update count: 1 - -ALTER TABLE CHILD ADD CONSTRAINT CP FOREIGN KEY(PID) REFERENCES PARENT(ID); -> exception - -UPDATE CHILD SET PID=1; -> update count: 1 - -ALTER TABLE CHILD ADD CONSTRAINT CP FOREIGN KEY(PID) REFERENCES PARENT(ID); -> ok - -DROP TABLE CHILD, PARENT; -> ok - -CREATE TABLE A(ID INT PRIMARY KEY, SK INT); -> ok - -INSERT INTO A VALUES(1, 2); -> update count: 1 - -ALTER TABLE A ADD CONSTRAINT AC FOREIGN KEY(SK) REFERENCES A(ID); -> exception - -DROP TABLE A; -> ok - -CREATE TABLE TEST(ID INT); -> ok - -INSERT INTO TEST VALUES(0), (1), (100); -> update count: 3 - -ALTER TABLE TEST ADD CONSTRAINT T CHECK ID<100; -> exception - -UPDATE TEST SET ID=20 WHERE ID=100; -> update count: 1 - -ALTER TABLE TEST ADD CONSTRAINT T CHECK ID<100; -> ok - -DROP TABLE TEST; -> ok - -create table test(id int); -> ok - -set autocommit false; -> ok - -insert into test values(1); -> update count: 1 - -prepare commit tx1; -> ok - -commit transaction tx1; -> ok - -rollback; -> ok - -select * from test; -> ID -> -- -> 1 -> rows: 1 - -drop table test; -> ok - -set autocommit true; -> ok - -CALL REGEXP_REPLACE('abckaboooom', 'o+', 'o'); -> 'abckabom' -> ---------- -> abckabom -> rows: 1 - -SELECT 'Hello' ~ 'He.*' T1, 'HELLO' ~ 'He.*' F2, CAST('HELLO' AS VARCHAR_IGNORECASE) ~ 'He.*' T3; -> T1 F2 T3 -> ---- ----- ---- -> TRUE FALSE TRUE -> rows: 1 - -SELECT 'Hello' ~* 'He.*' T1, 'HELLO' ~* 'He.*' T2, 'hallo' ~* 'He.*' F3; -> T1 T2 F3 -> ---- ---- ----- -> TRUE TRUE FALSE -> rows: 1 - -SELECT 'Hello' !~* 'Ho.*' T1, 'HELLO' !~* 'He.*' F2, 'hallo' !~* 'Ha.*' F3; -> T1 F2 F3 -> ---- ----- ----- -> TRUE FALSE FALSE -> rows: 1 - -create table test(parent int primary key, child int, foreign key(child) references (parent)); -> ok - -insert into test values(1, 1); -> update count: 1 - -insert into test values(2, 3); -> exception - -set autocommit false; -> ok - -set referential_integrity false; -> ok - -insert into test values(4, 4); -> update count: 1 - -insert into test values(5, 6); -> update count: 1 - -set referential_integrity true; -> ok - -insert into test values(7, 7), (8, 9); -> exception - -set autocommit true; -> ok - -drop table test; -> ok - -create table test as select 1, space(10) from dual where 1=0 union all select x, cast(space(100) as varchar(101)) d from system_range(1, 100); -> ok - -drop table test; -> ok - -explain select * from system_range(1, 2) where x=x+1 and x=1; -> PLAN -> --------------------------------------------------------------------------------------------------------------------------------- -> SELECT SYSTEM_RANGE.X FROM SYSTEM_RANGE(1, 2) /* PUBLIC.RANGE_INDEX: X = 1 */ WHERE ((X = 1) AND (X = (X + 1))) AND (1 = (X + 1)) -> rows: 1 - -explain select * from system_range(1, 2) where not (x = 1 and x*2 = 2); -> PLAN -> ------------------------------------------------------------------------------------------------------- -> SELECT SYSTEM_RANGE.X FROM SYSTEM_RANGE(1, 2) /* PUBLIC.RANGE_INDEX */ WHERE (X <> 1) OR ((X * 2) <> 2) -> rows: 1 - -explain select * from system_range(1, 10) where (NOT x >= 5); -> PLAN -> ------------------------------------------------------------------------------------------ -> SELECT SYSTEM_RANGE.X FROM SYSTEM_RANGE(1, 10) /* PUBLIC.RANGE_INDEX: X < 5 */ WHERE X < 5 -> rows: 1 - -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'), (-1, '-1'); -> update count: 2 - -select * from test where name = -1 and name = id; -> ID NAME -> -- ---- -> -1 -1 -> rows: 1 - -explain select * from test where name = -1 and name = id; -> PLAN -> -------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID = -1 */ WHERE ((NAME = -1) AND (NAME = ID)) AND (ID = -1) -> rows: 1 - -DROP TABLE TEST; -> ok - -select * from system_range(1, 2) where x=x+1 and x=1; -> X -> - -> rows: 0 - -CREATE TABLE A as select 6 a; -> ok - -CREATE TABLE B(B INT PRIMARY KEY); -> ok - -CREATE VIEW V(V) AS (SELECT A FROM A UNION SELECT B FROM B); -> ok - -create table C as select * from table(c int = (0,6)); -> ok - -select * from V, C where V.V = C.C; -> V C -> - - -> 6 6 -> rows: 1 - -drop table A, B, C, V cascade; -> ok - -explain select * from table(id int = (1, 2), name varchar=('Hello', 'World')); -> PLAN -> ----------------------------------------------------------------------------------------------------- -> SELECT TABLE.ID, TABLE.NAME FROM TABLE(ID INT=(1, 2), NAME VARCHAR=('Hello', 'World')) /* function */ -> rows: 1 - -CREATE TABLE TEST(ID INT PRIMARY KEY, FLAG BOOLEAN, NAME VARCHAR); -> ok - -CREATE INDEX IDX_FLAG ON TEST(FLAG, NAME); -> ok - -INSERT INTO TEST VALUES(1, TRUE, 'Hello'), (2, FALSE, 'World'); -> update count: 2 - -EXPLAIN SELECT * FROM TEST WHERE FLAG; -> PLAN -> --------------------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.FLAG, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.IDX_FLAG: FLAG = TRUE */ WHERE FLAG -> rows: 1 - -EXPLAIN SELECT * FROM TEST WHERE FLAG AND NAME>'I'; -> PLAN -> ----------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.FLAG, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.IDX_FLAG: FLAG = TRUE AND NAME > 'I' */ WHERE FLAG AND (NAME > 'I') -> rows: 1 - -DROP TABLE TEST; -> ok - -CREATE TABLE test_table (first_col varchar(20), second_col integer); -> ok - -insert into test_table values('a', 10), ('a', 4), ('b', 30), ('b', 3); -> update count: 4 - -CREATE VIEW test_view AS SELECT first_col AS renamed_col, MIN(second_col) AS also_renamed FROM test_table GROUP BY first_col; -> ok - -SELECT * FROM test_view WHERE renamed_col = 'a'; -> RENAMED_COL ALSO_RENAMED -> ----------- ------------ -> a 4 -> rows: 1 - -drop view test_view; -> ok - -drop table test_table; -> ok - -create table test(id int); -> ok - -explain select id+1 a from test group by id+1; -> PLAN -> --------------------------------------------------------------------------------- -> SELECT (ID + 1) AS A FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ GROUP BY ID + 1 -> rows: 1 - -drop table test; -> ok - -set autocommit off; -> ok - -set search_path = public, information_schema; -> ok - -select table_name from tables where 1=0; -> TABLE_NAME -> ---------- -> rows: 0 - -set search_path = public; -> ok - -set autocommit on; -> ok - -create table script.public.x(a int); -> ok - -select * from script.PUBLIC.x; -> A -> - -> rows: 0 - -create index script.public.idx on script.public.x(a); -> ok - -drop table script.public.x; -> ok - -create table t1 (i int); -> ok - -create table t2 (i int); -> ok - -create table t3 (i int); -> ok - -select a.i from t1 a inner join (select a.i from t2 a inner join (select i from t3) b on a.i=b.i) b on a.i=b.i; -> I -> - -> rows: 0 - -drop table t1, t2, t3; -> ok - -create table d(d double, r real); -> ok - -insert into d(d, d, r) values(1.1234567890123456789, 1.1234567890123456789, 3); -> exception - -insert into d values(1.1234567890123456789, 1.1234567890123456789); -> update count: 1 - -select r+d, r+r, d+d from d; -> R + D R + R D + D -> ----------------- --------- ------------------ -> 2.246913624759111 2.2469137 2.2469135780246914 -> rows: 1 - -drop table d; -> ok - -create table test(id int, c char(5), v varchar(5)); -> ok - -insert into test set id = 1, c = 'a', v = 'a'; -> update count: 1 - -insert into test set id = 2, c = 'a ', v = 'a '; -> update count: 1 - -insert into test set id = 3, c = 'abcde ', v = 'abcde'; -> update count: 1 - -select distinct length(c) from test order by length(c); -> LENGTH(C) -> --------- -> 1 -> 5 -> rows (ordered): 2 - -select id, c, v, length(c), length(v) from test order by id; -> ID C V LENGTH(C) LENGTH(V) -> -- ----- ----- --------- --------- -> 1 a a 1 1 -> 2 a a 1 2 -> 3 abcde abcde 5 5 -> rows (ordered): 3 - -select id from test where c='a' order by id; -> ID -> -- -> 1 -> 2 -> rows (ordered): 2 - -select id from test where c='a ' order by id; -> ID -> -- -> 1 -> 2 -> rows (ordered): 2 - -select id from test where c=v order by id; -> ID -> -- -> 1 -> 2 -> 3 -> rows (ordered): 3 - -drop table test; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255), C INT); -> ok - -INSERT INTO TEST VALUES(1, '10', NULL), (2, '0', NULL); -> update count: 2 - -SELECT LEAST(ID, C, NAME), GREATEST(ID, C, NAME), LEAST(NULL, C), GREATEST(NULL, NULL), ID FROM TEST ORDER BY ID; -> LEAST(ID, C, NAME) GREATEST(ID, C, NAME) LEAST(NULL, C) NULL ID -> ------------------ --------------------- -------------- ---- -- -> 1 10 null null 1 -> 0 2 null null 2 -> rows (ordered): 2 - -DROP TABLE IF EXISTS TEST; -> ok - -create table people (family varchar(1) not null, person varchar(1) not null); -> ok - -create table cars (family varchar(1) not null, car varchar(1) not null); -> ok - -insert into people values(1, 1), (2, 1), (2, 2), (3, 1), (5, 1); -> update count: 5 - -insert into cars values(2, 1), (2, 2), (3, 1), (3, 2), (3, 3), (4, 1); -> update count: 6 - -select family, (select count(car) from cars where cars.family = people.family) as x -from people group by family order by family; -> FAMILY X -> ------ - -> 1 0 -> 2 2 -> 3 3 -> 5 0 -> rows (ordered): 4 - -drop table people, cars; -> ok - -select (1, 2); -> 1, 2 -> ------ -> (1, 2) -> rows: 1 - -select * from table(id int=(1, 2), name varchar=('Hello', 'World')) x order by id; -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows (ordered): 2 - -create table array_test(x array); -> ok - -insert into array_test values((1, 2, 3)), ((2, 3, 4)); -> update count: 2 - -select * from array_test where x = (1, 2, 3); -> X -> --------- -> (1, 2, 3) -> rows: 1 - -drop table array_test; -> ok - -select * from (select 1), (select 2); -> 1 2 -> - - -> 1 2 -> rows: 1 - -CREATE TABLE TEST(A VARCHAR, B VARCHAR, C VARCHAR AS LOWER(A)); -> ok - -ALTER TABLE TEST DROP COLUMN B; -> ok - -DROP TABLE TEST; -> ok - -create table t1(c1 int, c2 int); -> ok - -create table t2(c1 int, c2 int); -> ok - -insert into t1 values(1, null), (2, 2), (3, 3); -> update count: 3 - -insert into t2 values(1, 1), (1, 2), (2, null), (3, 3); -> update count: 4 - -select * from t2 where c1 not in(select c2 from t1); -> C1 C2 -> -- -- -> rows: 0 - -select * from t2 where c1 not in(null, 2, 3); -> C1 C2 -> -- -- -> rows: 0 - -select * from t1 where c2 not in(select c1 from t2); -> C1 C2 -> -- -- -> rows: 0 - -select * from t1 where not exists(select * from t2 where t1.c2=t2.c1); -> C1 C2 -> -- ---- -> 1 null -> rows: 1 - -drop table t1; -> ok - -drop table t2; -> ok - -create constant abc value 1; -> ok - -call abc; -> 1 -> - -> 1 -> rows: 1 - -drop all objects; -> ok - -call abc; -> exception - -create table FOO(id integer primary key); -> ok - -create table BAR(fooId integer); -> ok - -alter table bar add foreign key (fooId) references foo (id); -> ok - -truncate table bar; -> ok - -truncate table foo; -> exception - -drop table bar, foo; -> ok - -CREATE TABLE TESTA(ID IDENTITY); -> ok - -CREATE TABLE TESTB(ID IDENTITY); -> ok - -explain SELECT TESTA.ID A, TESTB.ID B FROM TESTA, TESTB ORDER BY TESTA.ID, TESTB.ID; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------ -> SELECT TESTA.ID AS A, TESTB.ID AS B FROM PUBLIC.TESTA /* PUBLIC.TESTA.tableScan */ INNER JOIN PUBLIC.TESTB /* PUBLIC.TESTB.tableScan */ ON 1=1 ORDER BY 1, 2 -> rows (ordered): 1 - -DROP TABLE IF EXISTS TESTA, TESTB; -> ok - -CREATE TABLE test (family_name VARCHAR_IGNORECASE(63) NOT NULL); -> ok - -INSERT INTO test VALUES('Smith'), ('de Smith'), ('el Smith'), ('von Smith'); -> update count: 4 - -SELECT * FROM test WHERE family_name IN ('de Smith', 'Smith'); -> FAMILY_NAME -> ----------- -> Smith -> de Smith -> rows: 2 - -SELECT * FROM test WHERE family_name BETWEEN 'D' AND 'T'; -> FAMILY_NAME -> ----------- -> Smith -> de Smith -> el Smith -> rows: 3 - -CREATE INDEX family_name ON test(family_name); -> ok - -SELECT * FROM test WHERE family_name IN ('de Smith', 'Smith'); -> FAMILY_NAME -> ----------- -> Smith -> de Smith -> rows: 2 - -drop table test; -> ok - -create memory table test(id int primary key, data clob); -> ok - -insert into test values(1, 'abc' || space(20)); -> update count: 1 - -script nopasswords nosettings blocksize 10; -> SCRIPT -> -------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> CALL SYSTEM_COMBINE_BLOB(-1); -> CREATE ALIAS IF NOT EXISTS SYSTEM_COMBINE_BLOB FOR "org.h2.command.dml.ScriptCommand.combineBlob"; -> CREATE ALIAS IF NOT EXISTS SYSTEM_COMBINE_CLOB FOR "org.h2.command.dml.ScriptCommand.combineClob"; -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL, DATA CLOB ); -> CREATE PRIMARY KEY SYSTEM_LOB_STREAM_PRIMARY_KEY ON SYSTEM_LOB_STREAM(ID, PART); -> CREATE TABLE IF NOT EXISTS SYSTEM_LOB_STREAM(ID INT NOT NULL, PART INT NOT NULL, CDATA VARCHAR, BDATA BINARY); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> DROP ALIAS IF EXISTS SYSTEM_COMBINE_BLOB; -> DROP ALIAS IF EXISTS SYSTEM_COMBINE_CLOB; -> DROP TABLE IF EXISTS SYSTEM_LOB_STREAM; -> INSERT INTO PUBLIC.TEST(ID, DATA) VALUES (1, SYSTEM_COMBINE_CLOB(0)); -> INSERT INTO SYSTEM_LOB_STREAM VALUES(0, 0, 'abc ', NULL); -> INSERT INTO SYSTEM_LOB_STREAM VALUES(0, 1, ' ', NULL); -> INSERT INTO SYSTEM_LOB_STREAM VALUES(0, 2, ' ', NULL); -> rows: 16 - -drop table test; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'); -> update count: 2 - -SELECT DISTINCT * FROM TEST ORDER BY ID; -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows (ordered): 2 - -DROP TABLE TEST; -> ok - -create table Foo (A varchar(20), B integer); -> ok - -insert into Foo (A, B) values ('abcd', 1), ('abcd', 2); -> update count: 2 - -select * from Foo where A like 'abc%' escape '\' AND B=1; -> A B -> ---- - -> abcd 1 -> rows: 1 - -drop table Foo; -> ok - -create memory table orders ( orderid varchar(10), name varchar(20), customer_id varchar(10), completed numeric(1) not null, verified numeric(1) ); -> ok - -select * from information_schema.columns where table_name = 'ORDERS'; -> TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_PRECISION_RADIX NUMERIC_SCALE CHARACTER_SET_NAME COLLATION_NAME TYPE_NAME NULLABLE IS_COMPUTED SELECTIVITY CHECK_CONSTRAINT SEQUENCE_NAME REMARKS SOURCE_DATA_TYPE -> ------------- ------------ ---------- ----------- ---------------- -------------- ----------- --------- ------------------------ ---------------------- ----------------- ----------------------- ------------- ------------------ -------------- --------- -------- ----------- ----------- ---------------- ------------- ------- ---------------- -> SCRIPT PUBLIC ORDERS COMPLETED 4 null NO 3 1 1 1 10 0 Unicode OFF DECIMAL 0 FALSE 50 null null -> SCRIPT PUBLIC ORDERS CUSTOMER_ID 3 null YES 12 10 10 10 10 0 Unicode OFF VARCHAR 1 FALSE 50 null null -> SCRIPT PUBLIC ORDERS NAME 2 null YES 12 20 20 20 10 0 Unicode OFF VARCHAR 1 FALSE 50 null null -> SCRIPT PUBLIC ORDERS ORDERID 1 null YES 12 10 10 10 10 0 Unicode OFF VARCHAR 1 FALSE 50 null null -> SCRIPT PUBLIC ORDERS VERIFIED 5 null YES 3 1 1 1 10 0 Unicode OFF DECIMAL 1 FALSE 50 null null -> rows: 5 - -drop table orders; -> ok - -create table test(id int, d timestamp); -> ok - -insert into test values(1, '2006-01-01 12:00:00.000'); -> update count: 1 - -insert into test values(1, '1999-12-01 23:59:00.000'); -> update count: 1 - -select * from test where d= '1999-12-01 23:59:00.000'; -> ID D -> -- --------------------- -> 1 1999-12-01 23:59:00.0 -> rows: 1 - -select * from test where d= timestamp '2006-01-01 12:00:00.000'; -> ID D -> -- --------------------- -> 1 2006-01-01 12:00:00.0 -> rows: 1 - -drop table test; -> ok - -create table test(id int, b binary); -> ok - -insert into test values(1, 'face'); -> update count: 1 - -select * from test where b = 'FaCe'; -> ID B -> -- ---- -> 1 face -> rows: 1 - -drop table test; -> ok - -create sequence main_seq; -> ok - -create schema "TestSchema"; -> ok - -create sequence "TestSchema"."TestSeq"; -> ok - -create sequence "TestSchema"."ABC"; -> ok - -select currval('main_seq'), currval('TestSchema', 'TestSeq'), nextval('TestSchema', 'ABC'); -> CURRVAL('main_seq') CURRVAL('TestSchema', 'TestSeq') NEXTVAL('TestSchema', 'ABC') -> ------------------- -------------------------------- ---------------------------- -> 0 0 1 -> rows: 1 - -set autocommit off; -> ok - -set schema "TestSchema"; -> ok - -select nextval('abc'), currval('Abc'), nextval('TestSchema', 'ABC'); -> NEXTVAL('abc') CURRVAL('Abc') NEXTVAL('TestSchema', 'ABC') -> -------------- -------------- ---------------------------- -> 2 2 3 -> rows: 1 - -set schema public; -> ok - -drop schema "TestSchema"; -> ok - -drop sequence main_seq; -> ok - -create sequence "test"; -> ok - -select nextval('test'); -> NEXTVAL('test') -> --------------- -> 1 -> rows: 1 - -drop sequence "test"; -> ok - -set autocommit on; -> ok - -CREATE TABLE parent(id int PRIMARY KEY); -> ok - -CREATE TABLE child(parentid int REFERENCES parent); -> ok - -select * from INFORMATION_SCHEMA.CROSS_REFERENCES; -> PKTABLE_CATALOG PKTABLE_SCHEMA PKTABLE_NAME PKCOLUMN_NAME FKTABLE_CATALOG FKTABLE_SCHEMA FKTABLE_NAME FKCOLUMN_NAME ORDINAL_POSITION UPDATE_RULE DELETE_RULE FK_NAME PK_NAME DEFERRABILITY -> --------------- -------------- ------------ ------------- --------------- -------------- ------------ ------------- ---------------- ----------- ----------- ------------ ------------- ------------- -> SCRIPT PUBLIC PARENT ID SCRIPT PUBLIC CHILD PARENTID 1 1 1 CONSTRAINT_3 PRIMARY_KEY_8 7 -> rows: 1 - -ALTER TABLE parent ADD COLUMN name varchar; -> ok - -select * from INFORMATION_SCHEMA.CROSS_REFERENCES; -> PKTABLE_CATALOG PKTABLE_SCHEMA PKTABLE_NAME PKCOLUMN_NAME FKTABLE_CATALOG FKTABLE_SCHEMA FKTABLE_NAME FKCOLUMN_NAME ORDINAL_POSITION UPDATE_RULE DELETE_RULE FK_NAME PK_NAME DEFERRABILITY -> --------------- -------------- ------------ ------------- --------------- -------------- ------------ ------------- ---------------- ----------- ----------- ------------ -------------- ------------- -> SCRIPT PUBLIC PARENT ID SCRIPT PUBLIC CHILD PARENTID 1 1 1 CONSTRAINT_3 PRIMARY_KEY_82 7 -> rows: 1 - -drop table parent, child; -> ok - -create table test(id int); -> ok - -create schema TEST_SCHEMA; -> ok - -set autocommit false; -> ok - -set schema TEST_SCHEMA; -> ok - -create table test(id int, name varchar); -> ok - -explain select * from test; -> PLAN -> -------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.NAME FROM TEST_SCHEMA.TEST /* TEST_SCHEMA.TEST.tableScan */ -> rows: 1 - -explain select * from public.test; -> PLAN -> ----------------------------------------------------------- -> SELECT TEST.ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ -> rows: 1 - -drop schema TEST_SCHEMA; -> ok - -set autocommit true; -> ok - -set schema public; -> ok - -select * from test; -> ID -> -- -> rows: 0 - -drop table test; -> ok - -create table content(thread_id int, parent_id int); -> ok - -alter table content add constraint content_parent_id check (parent_id = thread_id) or (parent_id is null) or ( parent_id in (select thread_id from content)); -> ok - -create index content_thread_id ON content(thread_id); -> ok - -insert into content values(0, 0), (0, 0); -> update count: 2 - -insert into content values(0, 1); -> exception - -insert into content values(1, 1), (2, 2); -> update count: 2 - -insert into content values(2, 1); -> update count: 1 - -insert into content values(2, 3); -> exception - -drop table content; -> ok - -select x/10 y from system_range(1, 100) group by x/10; -> Y -> -- -> 0 -> 1 -> 10 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> rows: 11 - -select timestamp '2001-02-03T10:30:33'; -> TIMESTAMP '2001-02-03 10:30:33.0' -> --------------------------------- -> 2001-02-03 10:30:33.0 -> rows: 1 - -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'); -> update count: 2 - -select * from test where id in (select id from test); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -select * from test where id in ((select id from test)); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -select * from test where id in (((select id from test))); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -DROP TABLE TEST; -> ok - -create table test(id int); -> ok - -insert into test (select x from system_range(1, 100)); -> update count: 100 - -select id/1000 from test group by id/1000; -> ID / 1000 -> --------- -> 0 -> rows: 1 - -select id/(10*100) from test group by id/(10*100); -> ID / 1000 -> --------- -> 0 -> rows: 1 - -select id/1000 from test group by id/100; -> exception - -drop table test; -> ok - -select (x/10000) from system_range(10, 20) group by (x/10000); -> X / 10000 -> --------- -> 0 -> rows: 1 - -select sum(x), (x/10) from system_range(10, 100) group by (x/10); -> SUM(X) X / 10 -> ------ ------ -> 100 10 -> 145 1 -> 245 2 -> 345 3 -> 445 4 -> 545 5 -> 645 6 -> 745 7 -> 845 8 -> 945 9 -> rows: 10 - -CREATE FORCE VIEW ADDRESS_VIEW AS SELECT * FROM ADDRESS; -> ok - -CREATE memory TABLE ADDRESS(ID INT); -> ok - -alter view address_view recompile; -> ok - -select * from ADDRESS_VIEW; -> ID -> -- -> rows: 0 - -drop view address_view; -> ok - -drop table address; -> ok - -select cast('12345678123456781234567812345678' as uuid); -> '12345678-1234-5678-1234-567812345678' -> -------------------------------------- -> 12345678-1234-5678-1234-567812345678 -> rows: 1 - -select cast('000102030405060708090a0b0c0d0e0f' as uuid); -> '00010203-0405-0607-0809-0a0b0c0d0e0f' -> -------------------------------------- -> 00010203-0405-0607-0809-0a0b0c0d0e0f -> rows: 1 - -CREATE ALIAS PARSE_INT2 FOR "java.lang.Integer.parseInt(java.lang.String, int)"; -> ok - -select min(SUBSTRING(random_uuid(), 15,1)='4') from system_range(1, 10); -> MIN(SUBSTRING(RANDOM_UUID(), 15, 1) = '4') -> ------------------------------------------ -> TRUE -> rows: 1 - -select min(8=bitand(12, PARSE_INT2(SUBSTRING(random_uuid(), 20,1), 16))) from system_range(1, 10); -> MIN(8 = BITAND(12, PUBLIC.PARSE_INT2(SUBSTRING(RANDOM_UUID(), 20, 1), 16))) -> --------------------------------------------------------------------------- -> TRUE -> rows: 1 - -drop alias PARSE_INT2; -> ok - -create memory table test(name varchar check(name = upper(name))); -> ok - -insert into test values(null); -> update count: 1 - -insert into test values('aa'); -> exception - -insert into test values('AA'); -> update count: 1 - -script nodata nopasswords nosettings; -> SCRIPT -> --------------------------------------------------------------------------- -> -- 2 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> CREATE MEMORY TABLE PUBLIC.TEST( NAME VARCHAR CHECK (NAME = UPPER(NAME)) ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 3 - -drop table test; -> ok - -create domain email as varchar(200) check (position('@' in value) > 1); -> ok - -create domain gmail as email default '@gmail.com' check (position('gmail' in value) > 1); -> ok - -create memory table address(id int primary key, name email, name2 gmail); -> ok - -insert into address(id, name, name2) values(1, 'test@abc', 'test@gmail.com'); -> update count: 1 - -insert into address(id, name, name2) values(2, 'test@abc', 'test@acme'); -> exception - -insert into address(id, name, name2) values(3, 'test_abc', 'test@gmail'); -> exception - -insert into address2(name) values('test@abc'); -> exception - -CREATE DOMAIN STRING AS VARCHAR(255) DEFAULT '' NOT NULL; -> ok - -CREATE DOMAIN IF NOT EXISTS STRING AS VARCHAR(255) DEFAULT '' NOT NULL; -> ok - -CREATE DOMAIN STRING1 AS VARCHAR NULL; -> ok - -CREATE DOMAIN STRING2 AS VARCHAR NOT NULL; -> ok - -CREATE DOMAIN STRING3 AS VARCHAR DEFAULT ''; -> ok - -create domain string_x as string3; -> ok - -create memory table test(a string, b string1, c string2, d string3); -> ok - -insert into test(c) values('x'); -> update count: 1 - -select * from test; -> A B C D -> - ---- - ------- -> null x -> rows: 1 - -select DOMAIN_NAME, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, PRECISION, SCALE, TYPE_NAME, SELECTIVITY, CHECK_CONSTRAINT, REMARKS, SQL from information_schema.domains; -> DOMAIN_NAME COLUMN_DEFAULT IS_NULLABLE DATA_TYPE PRECISION SCALE TYPE_NAME SELECTIVITY CHECK_CONSTRAINT REMARKS SQL -> ----------- -------------- ----------- --------- ---------- ----- --------- ----------- --------------------------------------------------------------- ------- ------------------------------------------------------------------------------------------------------------------------------ -> EMAIL null YES 12 200 0 VARCHAR 50 (POSITION('@', VALUE) > 1) CREATE DOMAIN EMAIL AS VARCHAR(200) CHECK (POSITION('@', VALUE) > 1) -> GMAIL '@gmail.com' YES 12 200 0 VARCHAR 50 ((POSITION('@', VALUE) > 1) AND (POSITION('gmail', VALUE) > 1)) CREATE DOMAIN GMAIL AS VARCHAR(200) DEFAULT '@gmail.com' CHECK ((POSITION('@', VALUE) > 1) AND (POSITION('gmail', VALUE) > 1)) -> STRING '' NO 12 255 0 VARCHAR 50 CREATE DOMAIN STRING AS VARCHAR(255) DEFAULT '' NOT NULL -> STRING1 null YES 12 2147483647 0 VARCHAR 50 CREATE DOMAIN STRING1 AS VARCHAR -> STRING2 null NO 12 2147483647 0 VARCHAR 50 CREATE DOMAIN STRING2 AS VARCHAR NOT NULL -> STRING3 '' YES 12 2147483647 0 VARCHAR 50 CREATE DOMAIN STRING3 AS VARCHAR DEFAULT '' -> STRING_X '' YES 12 2147483647 0 VARCHAR 50 CREATE DOMAIN STRING_X AS VARCHAR DEFAULT '' -> rows: 7 - -script nodata nopasswords nosettings; -> SCRIPT -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.ADDRESS; -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.ADDRESS ADD CONSTRAINT PUBLIC.CONSTRAINT_E PRIMARY KEY(ID); -> CREATE DOMAIN EMAIL AS VARCHAR(200) CHECK (POSITION('@', VALUE) > 1); -> CREATE DOMAIN GMAIL AS VARCHAR(200) DEFAULT '@gmail.com' CHECK ((POSITION('@', VALUE) > 1) AND (POSITION('gmail', VALUE) > 1)); -> CREATE DOMAIN STRING AS VARCHAR(255) DEFAULT '' NOT NULL; -> CREATE DOMAIN STRING1 AS VARCHAR; -> CREATE DOMAIN STRING2 AS VARCHAR NOT NULL; -> CREATE DOMAIN STRING3 AS VARCHAR DEFAULT ''; -> CREATE DOMAIN STRING_X AS VARCHAR DEFAULT ''; -> CREATE MEMORY TABLE PUBLIC.ADDRESS( ID INT NOT NULL, NAME VARCHAR(200) CHECK (POSITION('@', NAME) > 1), NAME2 VARCHAR(200) DEFAULT '@gmail.com' CHECK ((POSITION('@', NAME2) > 1) AND (POSITION('gmail', NAME2) > 1)) ); -> CREATE MEMORY TABLE PUBLIC.TEST( A VARCHAR(255) DEFAULT '' NOT NULL, B VARCHAR, C VARCHAR NOT NULL, D VARCHAR DEFAULT '' ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 13 - -drop table test; -> ok - -drop domain string; -> ok - -drop domain string1; -> ok - -drop domain string2; -> ok - -drop domain string3; -> ok - -drop domain string_x; -> ok - -drop table address; -> ok - -drop domain email; -> ok - -drop domain gmail; -> ok - -create force view address_view as select * from address; -> ok - -create table address(id identity, name varchar check instr(value, '@') > 1); -> exception - -create table address(id identity, name varchar check instr(name, '@') > 1); -> ok - -drop view if exists address_view; -> ok - -drop table address; -> ok - -create memory table a(k10 blob(10k), m20 blob(20m), g30 clob(30g)); -> ok - -script NODATA NOPASSWORDS NOSETTINGS drop; -> SCRIPT -> ------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.A; -> CREATE MEMORY TABLE PUBLIC.A( K10 BLOB(10240), M20 BLOB(20971520), G30 CLOB(32212254720) ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> DROP TABLE IF EXISTS PUBLIC.A CASCADE; -> rows: 4 - -create table b(); -> ok - -create table c(); -> ok - -drop table information_schema.columns; -> exception - -create table columns as select * from information_schema.columns; -> ok - -create table tables as select * from information_schema.tables where false; -> ok - -create table dual2 as select 1 from dual; -> ok - -select * from dual2; -> 1 -> - -> 1 -> rows: 1 - -drop table dual2, columns, tables; -> ok - -drop table a, a; -> ok - -drop table b, c; -> ok - -CREATE SCHEMA CONST; -> ok - -CREATE CONSTANT IF NOT EXISTS ONE VALUE 1; -> ok - -COMMENT ON CONSTANT ONE IS 'Eins'; -> ok - -CREATE CONSTANT IF NOT EXISTS ONE VALUE 1; -> ok - -CREATE CONSTANT CONST.ONE VALUE 1; -> ok - -SELECT CONSTANT_SCHEMA, CONSTANT_NAME, DATA_TYPE, REMARKS, SQL FROM INFORMATION_SCHEMA.CONSTANTS; -> CONSTANT_SCHEMA CONSTANT_NAME DATA_TYPE REMARKS SQL -> --------------- ------------- --------- ------- --- -> CONST ONE 4 1 -> PUBLIC ONE 4 Eins 1 -> rows: 2 - -SELECT ONE, CONST.ONE FROM DUAL; -> 1 1 -> - - -> 1 1 -> rows: 1 - -COMMENT ON CONSTANT ONE IS NULL; -> ok - -DROP SCHEMA CONST; -> ok - -SELECT CONSTANT_SCHEMA, CONSTANT_NAME, DATA_TYPE, REMARKS, SQL FROM INFORMATION_SCHEMA.CONSTANTS; -> CONSTANT_SCHEMA CONSTANT_NAME DATA_TYPE REMARKS SQL -> --------------- ------------- --------- ------- --- -> PUBLIC ONE 4 1 -> rows: 1 - -DROP CONSTANT ONE; -> ok - -DROP CONSTANT IF EXISTS ONE; -> ok - -DROP CONSTANT IF EXISTS ONE; -> ok - -CREATE TABLE A (ID_A int primary key); -> ok - -CREATE TABLE B (ID_B int primary key); -> ok - -CREATE TABLE C (ID_C int primary key); -> ok - -insert into A values (1); -> update count: 1 - -insert into A values (2); -> update count: 1 - -insert into B values (1); -> update count: 1 - -insert into C values (1); -> update count: 1 - -SELECT * FROM C WHERE NOT EXISTS ((SELECT ID_A FROM A) EXCEPT (SELECT ID_B FROM B)); -> ID_C -> ---- -> rows: 0 - -(SELECT ID_A FROM A) EXCEPT (SELECT ID_B FROM B); -> ID_A -> ---- -> 2 -> rows: 1 - -drop table a; -> ok - -drop table b; -> ok - -drop table c; -> ok - -CREATE TABLE X (ID INTEGER PRIMARY KEY); -> ok - -insert into x values(0), (1), (10); -> update count: 3 - -SELECT t1.ID, (SELECT t1.id || ':' || AVG(t2.ID) FROM X t2) FROM X t1; -> ID SELECT ((T1.ID || ':') || AVG(T2.ID)) FROM PUBLIC.X T2 /* PUBLIC.X.tableScan */ /* scanCount: 4 */ -> -- -------------------------------------------------------------------------------------------------- -> 0 0:3 -> 1 1:3 -> 10 10:3 -> rows: 3 - -drop table x; -> ok - -select (select t1.x from system_range(1,1) t2) from system_range(1,1) t1; -> SELECT T1.X FROM SYSTEM_RANGE(1, 1) T2 /* PUBLIC.RANGE_INDEX */ /* scanCount: 2 */ -> ---------------------------------------------------------------------------------- -> 1 -> rows: 1 - -create table test(id int primary key, name varchar); -> ok - -insert into test values(rownum, '11'), (rownum, '22'), (rownum, '33'); -> update count: 3 - -select * from test order by id; -> ID NAME -> -- ---- -> 1 11 -> 2 22 -> 3 33 -> rows (ordered): 3 - -select rownum, (select count(*) from test), rownum from test; -> ROWNUM() SELECT COUNT(*) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ /* direct lookup */ ROWNUM() -> -------- -------------------------------------------------------------------------------- -------- -> 1 3 1 -> 2 3 2 -> 3 3 3 -> rows: 3 - -delete from test t0 where rownum<2; -> update count: 1 - -select rownum, * from (select * from test where id>1 order by id desc); -> ROWNUM() ID NAME -> -------- -- ---- -> 1 3 33 -> 2 2 22 -> rows (ordered): 2 - -update test set name='x' where rownum<2; -> update count: 1 - -select * from test; -> ID NAME -> -- ---- -> 2 x -> 3 33 -> rows: 2 - -merge into test values(2, 'r' || rownum), (10, rownum), (11, rownum); -> update count: 3 - -select * from test; -> ID NAME -> -- ---- -> 10 2 -> 11 3 -> 2 r1 -> 3 33 -> rows: 4 - -call rownum; -> ROWNUM() -> -------- -> 1 -> rows: 1 - -drop table test; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -create index idx_test_name on test(name); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO TEST VALUES(2, 'World'); -> update count: 1 - -set ignorecase true; -> ok - -CREATE TABLE TEST2(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -create unique index idx_test2_name on test2(name); -> ok - -INSERT INTO TEST2 VALUES(1, 'HElLo'); -> update count: 1 - -INSERT INTO TEST2 VALUES(2, 'World'); -> update count: 1 - -INSERT INTO TEST2 VALUES(3, 'WoRlD'); -> exception - -drop index idx_test2_name; -> ok - -select * from test where name='HELLO'; -> ID NAME -> -- ---- -> rows: 0 - -select * from test2 where name='HELLO'; -> ID NAME -> -- ----- -> 1 HElLo -> rows: 1 - -select * from test where name like 'HELLO'; -> ID NAME -> -- ---- -> rows: 0 - -select * from test2 where name like 'HELLO'; -> ID NAME -> -- ----- -> 1 HElLo -> rows: 1 - -explain plan for select * from test2, test where test2.name = test.name; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST2.ID, TEST2.NAME, TEST.ID, TEST.NAME FROM PUBLIC.TEST2 /* PUBLIC.TEST2.tableScan */ INNER JOIN PUBLIC.TEST /* PUBLIC.IDX_TEST_NAME: NAME = TEST2.NAME */ ON 1=1 WHERE TEST2.NAME = TEST.NAME -> rows: 1 - -select * from test2, test where test2.name = test.name; -> ID NAME ID NAME -> -- ----- -- ----- -> 1 HElLo 1 Hello -> 2 World 2 World -> rows: 2 - -explain plan for select * from test, test2 where test2.name = test.name; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.NAME, TEST2.ID, TEST2.NAME FROM PUBLIC.TEST2 /* PUBLIC.TEST2.tableScan */ INNER JOIN PUBLIC.TEST /* PUBLIC.IDX_TEST_NAME: NAME = TEST2.NAME */ ON 1=1 WHERE TEST2.NAME = TEST.NAME -> rows: 1 - -select * from test, test2 where test2.name = test.name; -> ID NAME ID NAME -> -- ----- -- ----- -> 1 Hello 1 HElLo -> 2 World 2 World -> rows: 2 - -create index idx_test2_name on test2(name); -> ok - -explain plan for select * from test2, test where test2.name = test.name; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST2.ID, TEST2.NAME, TEST.ID, TEST.NAME FROM PUBLIC.TEST2 /* PUBLIC.TEST2.tableScan */ INNER JOIN PUBLIC.TEST /* PUBLIC.IDX_TEST_NAME: NAME = TEST2.NAME */ ON 1=1 WHERE TEST2.NAME = TEST.NAME -> rows: 1 - -select * from test2, test where test2.name = test.name; -> ID NAME ID NAME -> -- ----- -- ----- -> 1 HElLo 1 Hello -> 2 World 2 World -> rows: 2 - -explain plan for select * from test, test2 where test2.name = test.name; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -> SELECT TEST.ID, TEST.NAME, TEST2.ID, TEST2.NAME FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ INNER JOIN PUBLIC.TEST2 /* PUBLIC.IDX_TEST2_NAME: NAME = TEST.NAME */ ON 1=1 WHERE TEST2.NAME = TEST.NAME -> rows: 1 - -select * from test, test2 where test2.name = test.name; -> ID NAME ID NAME -> -- ----- -- ----- -> 1 Hello 1 HElLo -> 2 World 2 World -> rows: 2 - -DROP TABLE IF EXISTS TEST; -> ok - -DROP TABLE IF EXISTS TEST2; -> ok - -set ignorecase false; -> ok - -create table test(f1 varchar, f2 varchar); -> ok - -insert into test values('abc','222'); -> update count: 1 - -insert into test values('abc','111'); -> update count: 1 - -insert into test values('abc','333'); -> update count: 1 - -SELECT t.f1, t.f2 FROM test t ORDER BY t.f2; -> F1 F2 -> --- --- -> abc 111 -> abc 222 -> abc 333 -> rows (ordered): 3 - -SELECT t1.f1, t1.f2, t2.f1, t2.f2 FROM test t1, test t2 ORDER BY t2.f2; -> F1 F2 F1 F2 -> --- --- --- --- -> abc 222 abc 111 -> abc 111 abc 111 -> abc 333 abc 111 -> abc 222 abc 222 -> abc 111 abc 222 -> abc 333 abc 222 -> abc 222 abc 333 -> abc 111 abc 333 -> abc 333 abc 333 -> rows (ordered): 9 - -drop table if exists test; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -explain select t0.id, t1.id from test t0, test t1 order by t0.id, t1.id; -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T0.ID, T1.ID FROM PUBLIC.TEST T0 /* PUBLIC.TEST.tableScan */ INNER JOIN PUBLIC.TEST T1 /* PUBLIC.TEST.tableScan */ ON 1=1 ORDER BY 1, 2 -> rows (ordered): 1 - -INSERT INTO TEST VALUES(2, 'World'); -> update count: 1 - -SELECT id, sum(id) FROM test GROUP BY id ORDER BY id*sum(id); -> ID SUM(ID) -> -- ------- -> 1 1 -> 2 2 -> rows (ordered): 2 - -select * -from test t1 -inner join test t2 on t2.id=t1.id -inner join test t3 on t3.id=t2.id -where exists (select 1 from test t4 where t2.id=t4.id); -> ID NAME ID NAME ID NAME -> -- ----- -- ----- -- ----- -> 1 Hello 1 Hello 1 Hello -> 2 World 2 World 2 World -> rows: 2 - -explain select * from test t1 where id in(select id from test t2 where t1.id=t2.id); -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME FROM PUBLIC.TEST T1 /* PUBLIC.TEST.tableScan */ WHERE ID IN( SELECT ID FROM PUBLIC.TEST T2 /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ WHERE T1.ID = T2.ID) -> rows: 1 - -select * from test t1 where id in(select id from test t2 where t1.id=t2.id); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -explain select * from test t1 where id in(id, id+1); -> PLAN -> ----------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME FROM PUBLIC.TEST T1 /* PUBLIC.TEST.tableScan */ WHERE ID IN(ID, (ID + 1)) -> rows: 1 - -select * from test t1 where id in(id, id+1); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -explain select * from test t1 where id in(id); -> PLAN -> ----------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME FROM PUBLIC.TEST T1 /* PUBLIC.TEST.tableScan */ WHERE ID = ID -> rows: 1 - -select * from test t1 where id in(id); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -explain select * from test t1 where id in(select id from test); -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME FROM PUBLIC.TEST T1 /* PUBLIC.PRIMARY_KEY_2: ID IN(SELECT ID FROM PUBLIC.TEST /++ PUBLIC.TEST.tableScan ++/) */ WHERE ID IN( SELECT ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */) -> rows: 1 - -select * from test t1 where id in(select id from test); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -explain select * from test t1 where id in(1, select max(id) from test); -> PLAN -> ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME FROM PUBLIC.TEST T1 /* PUBLIC.PRIMARY_KEY_2: ID IN(1, (SELECT MAX(ID) FROM PUBLIC.TEST /++ PUBLIC.TEST.tableScan ++/ /++ direct lookup ++/)) */ WHERE ID IN(1, (SELECT MAX(ID) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ /* direct lookup */)) -> rows: 1 - -select * from test t1 where id in(1, select max(id) from test); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -explain select * from test t1 where id in(1, select max(id) from test t2 where t1.id=t2.id); -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME FROM PUBLIC.TEST T1 /* PUBLIC.TEST.tableScan */ WHERE ID IN(1, (SELECT MAX(ID) FROM PUBLIC.TEST T2 /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ WHERE T1.ID = T2.ID)) -> rows: 1 - -select * from test t1 where id in(1, select max(id) from test t2 where t1.id=t2.id); -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows: 2 - -DROP TABLE TEST; -> ok - -create force view t1 as select * from t1; -> ok - -select * from t1; -> exception - -drop table t1; -> ok - -create table one (id int primary key); -> ok - -create table two (id int primary key, val date); -> ok - -insert into one values(0); -> update count: 1 - -insert into one values(1); -> update count: 1 - -insert into one values(2); -> update count: 1 - -insert into two values(0, null); -> update count: 1 - -insert into two values(1, DATE'2006-01-01'); -> update count: 1 - -insert into two values(2, DATE'2006-07-01'); -> update count: 1 - -insert into two values(3, null); -> update count: 1 - -select * from one; -> ID -> -- -> 0 -> 1 -> 2 -> rows: 3 - -select * from two; -> ID VAL -> -- ---------- -> 0 null -> 1 2006-01-01 -> 2 2006-07-01 -> 3 null -> rows: 4 - --- Query #1: should return one row --- okay -select * from one natural join two left join two three on -one.id=three.id left join one four on two.id=four.id where three.val -is null; -> ID VAL ID VAL ID -> -- ---- -- ---- -- -> 0 null 0 null 0 -> rows: 1 - --- Query #2: should return one row --- okay -select * from one natural join two left join two three on -one.id=three.id left join one four on two.id=four.id where -three.val>=DATE'2006-07-01'; -> ID VAL ID VAL ID -> -- ---------- -- ---------- -- -> 2 2006-07-01 2 2006-07-01 2 -> rows: 1 - --- Query #3: should return the union of #1 and #2 -select * from one natural join two left join two three on -one.id=three.id left join one four on two.id=four.id where three.val -is null or three.val>=DATE'2006-07-01'; -> ID VAL ID VAL ID -> -- ---------- -- ---------- -- -> 0 null 0 null 0 -> 2 2006-07-01 2 2006-07-01 2 -> rows: 2 - -explain select * from one natural join two left join two three on -one.id=three.id left join one four on two.id=four.id where three.val -is null or three.val>=DATE'2006-07-01'; -> PLAN -> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT ONE.ID, TWO.VAL, THREE.ID, THREE.VAL, FOUR.ID FROM PUBLIC.ONE /* PUBLIC.ONE.tableScan */ INNER JOIN PUBLIC.TWO /* PUBLIC.PRIMARY_KEY_14: ID = PUBLIC.ONE.ID AND ID = PUBLIC.ONE.ID */ ON 1=1 /* WHERE PUBLIC.ONE.ID = PUBLIC.TWO.ID */ LEFT OUTER JOIN PUBLIC.TWO THREE /* PUBLIC.PRIMARY_KEY_14: ID = ONE.ID */ ON ONE.ID = THREE.ID LEFT OUTER JOIN PUBLIC.ONE FOUR /* PUBLIC.PRIMARY_KEY_1: ID = TWO.ID */ ON TWO.ID = FOUR.ID WHERE (PUBLIC.ONE.ID = PUBLIC.TWO.ID) AND ((THREE.VAL IS NULL) OR (THREE.VAL >= DATE '2006-07-01')) -> rows: 1 - --- Query #4: same as #3, but the joins have been manually re-ordered --- Correct result set, same as expected for #3. -select * from one natural join two left join one four on -two.id=four.id left join two three on one.id=three.id where three.val -is null or three.val>=DATE'2006-07-01'; -> ID VAL ID ID VAL -> -- ---------- -- -- ---------- -> 0 null 0 0 null -> 2 2006-07-01 2 2 2006-07-01 -> rows: 2 - -drop table one; -> ok - -drop table two; -> ok - -CREATE TABLE TEST(id INT PRIMARY KEY, foo BIGINT); -> ok - -INSERT INTO TEST VALUES(1, 100); -> update count: 1 - -INSERT INTO TEST VALUES(2, 123456789012345678); -> update count: 1 - -SELECT * FROM TEST WHERE foo = 123456789014567; -> ID FOO -> -- --- -> rows: 0 - -DROP TABLE IF EXISTS TEST; -> ok - -create table test1 (id int primary key); -> ok - -create table test2 (id int primary key); -> ok - -create table test3 (id int primary key); -> ok - -insert into test1 values(1); -> update count: 1 - -insert into test2 values(1); -> update count: 1 - -insert into test3 values(1); -> update count: 1 - -select * from test1 -inner join test2 on test1.id=test2.id left -outer join test3 on test2.id=test3.id -where test3.id is null; -> ID ID ID -> -- -- -- -> rows: 0 - -explain select * from test1 -inner join test2 on test1.id=test2.id left -outer join test3 on test2.id=test3.id -where test3.id is null; -> PLAN -> ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST1.ID, TEST2.ID, TEST3.ID FROM PUBLIC.TEST1 /* PUBLIC.TEST1.tableScan */ INNER JOIN PUBLIC.TEST2 /* PUBLIC.PRIMARY_KEY_4C: ID = TEST1.ID AND ID = TEST1.ID */ ON 1=1 /* WHERE TEST1.ID = TEST2.ID */ LEFT OUTER JOIN PUBLIC.TEST3 /* PUBLIC.PRIMARY_KEY_4C0: ID = TEST2.ID */ ON TEST2.ID = TEST3.ID WHERE (TEST3.ID IS NULL) AND (TEST1.ID = TEST2.ID) -> rows: 1 - -insert into test1 select x from system_range(2, 1000); -> update count: 999 - -select * from test1 -inner join test2 on test1.id=test2.id -left outer join test3 on test2.id=test3.id -where test3.id is null; -> ID ID ID -> -- -- -- -> rows: 0 - -explain select * from test1 -inner join test2 on test1.id=test2.id -left outer join test3 on test2.id=test3.id -where test3.id is null; -> PLAN -> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST1.ID, TEST2.ID, TEST3.ID FROM PUBLIC.TEST2 /* PUBLIC.TEST2.tableScan */ LEFT OUTER JOIN PUBLIC.TEST3 /* PUBLIC.PRIMARY_KEY_4C0: ID = TEST2.ID */ ON TEST2.ID = TEST3.ID INNER JOIN PUBLIC.TEST1 /* PUBLIC.PRIMARY_KEY_4: ID = TEST2.ID */ ON 1=1 WHERE (TEST3.ID IS NULL) AND (TEST1.ID = TEST2.ID) -> rows: 1 - -SELECT TEST1.ID, TEST2.ID, TEST3.ID -FROM TEST2 -LEFT OUTER JOIN TEST3 ON TEST2.ID = TEST3.ID -INNER JOIN TEST1 -WHERE TEST3.ID IS NULL AND TEST1.ID = TEST2.ID; -> ID ID ID -> -- -- -- -> rows: 0 - -drop table test1; -> ok - -drop table test2; -> ok - -drop table test3; -> ok - -create table test(v boolean); -> ok - -insert into test values(null), (true), (false); -> update count: 3 - -SELECT CASE WHEN NOT (false IN (null)) THEN false END; -> NULL -> ---- -> null -> rows: 1 - -select a.v as av, b.v as bv, a.v IN (b.v), not a.v IN (b.v) from test a, test b; -> AV BV A.V = B.V NOT (A.V = B.V) -> ----- ----- --------- --------------- -> FALSE FALSE TRUE FALSE -> FALSE TRUE FALSE TRUE -> FALSE null null null -> TRUE FALSE FALSE TRUE -> TRUE TRUE TRUE FALSE -> TRUE null null null -> null FALSE null null -> null TRUE null null -> null null null null -> rows: 9 - -select a.v as av, b.v as bv, a.v IN (b.v, null), not a.v IN (b.v, null) from test a, test b; -> AV BV A.V IN(B.V, NULL) NOT (A.V IN(B.V, NULL)) -> ----- ----- ----------------- ----------------------- -> FALSE FALSE TRUE FALSE -> FALSE TRUE null null -> FALSE null null null -> TRUE FALSE null null -> TRUE TRUE TRUE FALSE -> TRUE null null null -> null FALSE null null -> null TRUE null null -> null null null null -> rows: 9 - -drop table test; -> ok - -SELECT CASE WHEN NOT (false IN (null)) THEN false END; -> NULL -> ---- -> null -> rows: 1 - -SELECT DATEDIFF('SECOND', '1900-01-01 00:00:00.001', '1900-01-01 00:00:00.002'), DATEDIFF('SECOND', '2000-01-01 00:00:00.001', '2000-01-01 00:00:00.002'); -> 0 0 -> - - -> 0 0 -> rows: 1 - -SELECT DATEDIFF('SECOND', '1900-01-01 00:00:00.000', '1900-01-01 00:00:00.001'), DATEDIFF('SECOND', '2000-01-01 00:00:00.000', '2000-01-01 00:00:00.001'); -> 0 0 -> - - -> 0 0 -> rows: 1 - -SELECT DATEDIFF('MINUTE', '1900-01-01 00:00:00.000', '1900-01-01 00:00:01.000'), DATEDIFF('MINUTE', '2000-01-01 00:00:00.000', '2000-01-01 00:00:01.000'); -> 0 0 -> - - -> 0 0 -> rows: 1 - -SELECT DATEDIFF('MINUTE', '1900-01-01 00:00:01.000', '1900-01-01 00:00:02.000'), DATEDIFF('MINUTE', '2000-01-01 00:00:01.000', '2000-01-01 00:00:02.000'); -> 0 0 -> - - -> 0 0 -> rows: 1 - -SELECT DATEDIFF('HOUR', '1900-01-01 00:00:00.000', '1900-01-01 00:00:01.000'), DATEDIFF('HOUR', '2000-01-01 00:00:00.000', '2000-01-01 00:00:01.000'); -> 0 0 -> - - -> 0 0 -> rows: 1 - -SELECT DATEDIFF('HOUR', '1900-01-01 00:00:00.001', '1900-01-01 00:00:01.000'), DATEDIFF('HOUR', '2000-01-01 00:00:00.001', '2000-01-01 00:00:01.000'); -> 0 0 -> - - -> 0 0 -> rows: 1 - -SELECT DATEDIFF('HOUR', '1900-01-01 01:00:00.000', '1900-01-01 01:00:01.000'), DATEDIFF('HOUR', '2000-01-01 01:00:00.000', '2000-01-01 01:00:01.000'); -> 0 0 -> - - -> 0 0 -> rows: 1 - -SELECT DATEDIFF('HOUR', '1900-01-01 01:00:00.001', '1900-01-01 01:00:01.000'), DATEDIFF('HOUR', '2000-01-01 01:00:00.001', '2000-01-01 01:00:01.000'); -> 0 0 -> - - -> 0 0 -> rows: 1 - -create table test(id int); -> ok - -insert into test values(1), (2), (3), (4); -> update count: 4 - -(select * from test a, test b) minus (select * from test a, test b); -> ID ID -> -- -- -> rows: 0 - -drop table test; -> ok - -call datediff('MS', TIMESTAMP '2001-02-03 04:05:06.789001', TIMESTAMP '2001-02-03 04:05:06.789002'); -> 0 -> - -> 0 -> rows: 1 - -call datediff('MS', TIMESTAMP '1900-01-01 00:00:01.000', TIMESTAMP '2008-01-01 00:00:00.000'); -> 3408134399000 -> ------------- -> 3408134399000 -> rows: 1 - -call select 1.0/3.0*3.0, 100.0/2.0, -25.0/100.0, 0.0/3.0, 6.9/2.0, 0.72179425150347250912311550800000 / 5314251955.21; -> SELECT 0.999999999999999999999999990, 50, -0.25, 0, 3.45, 1.35822361752313607260107721120531135706133161972E-10 FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX */ /* scanCount: 2 */ -> ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> (0.999999999999999999999999990, 50, -0.25, 0, 3.45, 1.35822361752313607260107721120531135706133161972E-10) -> rows: 1 - -call dateadd('MS', 1, TIMESTAMP '2001-02-03 04:05:06.789001'); -> TIMESTAMP '2001-02-03 04:05:06.790001' -> -------------------------------------- -> 2001-02-03 04:05:06.790001 -> rows: 1 - -CALL 1 /* comment */ ;; -> 1 -> - -> 1 -> rows: 1 - -CALL 1 /* comment */ ; -> 1 -> - -> 1 -> rows: 1 - -call /* remark * / * /* ** // end */ 1; -> 1 -> - -> 1 -> rows: 1 - -call (select x from dual where x is null); -> SELECT X FROM SYSTEM_RANGE(1, 1) /* PUBLIC.RANGE_INDEX: X IS NULL */ /* scanCount: 1 */ WHERE X IS NULL -> ------------------------------------------------------------------------------------------------------- -> null -> rows: 1 - -create sequence test_seq; -> ok - -create table test(id int primary key, parent int); -> ok - -create index ni on test(parent); -> ok - -alter table test add constraint nu unique(parent); -> ok - -alter table test add constraint fk foreign key(parent) references(id); -> ok - -select TABLE_NAME, NON_UNIQUE, INDEX_NAME, ORDINAL_POSITION, COLUMN_NAME, CARDINALITY, PRIMARY_KEY from INFORMATION_SCHEMA.INDEXES; -> TABLE_NAME NON_UNIQUE INDEX_NAME ORDINAL_POSITION COLUMN_NAME CARDINALITY PRIMARY_KEY -> ---------- ---------- ------------- ---------------- ----------- ----------- ----------- -> TEST FALSE NU_INDEX_2 1 PARENT 0 FALSE -> TEST FALSE PRIMARY_KEY_2 1 ID 0 TRUE -> TEST TRUE NI 1 PARENT 0 FALSE -> rows: 3 - -select SEQUENCE_NAME, CURRENT_VALUE, INCREMENT, IS_GENERATED, REMARKS from INFORMATION_SCHEMA.SEQUENCES; -> SEQUENCE_NAME CURRENT_VALUE INCREMENT IS_GENERATED REMARKS -> ------------- ------------- --------- ------------ ------- -> TEST_SEQ 0 1 FALSE -> rows: 1 - -drop table test; -> ok - -drop sequence test_seq; -> ok - -create table test(id int); -> ok - -insert into test values(1), (2); -> update count: 2 - -select count(*) from test where id in ((select id from test where 1=0)); -> COUNT(*) -> -------- -> 0 -> rows: 1 - -select count(*) from test where id = ((select id from test where 1=0)+1); -> COUNT(*) -> -------- -> 0 -> rows: 1 - -select count(*) from test where id = (select id from test where 1=0); -> COUNT(*) -> -------- -> 0 -> rows: 1 - -select count(*) from test where id in ((select id from test)); -> COUNT(*) -> -------- -> 2 -> rows: 1 - -select count(*) from test where id = ((select id from test)); -> exception - -select count(*) from test where id = ((select id from test), 1); -> exception - -select (select id from test where 1=0) from test; -> SELECT ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE -> ------------------------------------------------------------------------- -> null -> null -> rows: 2 - -drop table test; -> ok - -select TRIM(' ' FROM ' abc ') from dual; -> 'abc' -> ----- -> abc -> rows: 1 - -create table test(id int primary key, a boolean); -> ok - -insert into test values(1, 'Y'); -> update count: 1 - -call select a from test order by id; -> SELECT A FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2 */ /* scanCount: 2 */ ORDER BY =ID /* index sorted */ -> ------------------------------------------------------------------------------------------------------- -> TRUE -> rows (ordered): 1 - -select select a from test order by id; -> SELECT A FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2 */ /* scanCount: 2 */ ORDER BY =ID /* index sorted */ -> ------------------------------------------------------------------------------------------------------- -> TRUE -> rows (ordered): 1 - -insert into test values(2, 'N'); -> update count: 1 - -insert into test values(3, '1'); -> update count: 1 - -insert into test values(4, '0'); -> update count: 1 - -insert into test values(5, 'T'); -> update count: 1 - -insert into test values(6, 'F'); -> update count: 1 - -select max(id) from test where id = max(id) group by id; -> exception - -select * from test where a=TRUE=a; -> ID A -> -- ----- -> 1 TRUE -> 2 FALSE -> 3 TRUE -> 4 FALSE -> 5 TRUE -> 6 FALSE -> rows: 6 - -drop table test; -> ok - -CREATE memory TABLE TEST(ID INT PRIMARY KEY, PARENT INT REFERENCES TEST); -> ok - -CREATE memory TABLE s(S_NO VARCHAR(5) PRIMARY KEY, name VARCHAR(16), city VARCHAR(16)); -> ok - -CREATE memory TABLE p(p_no VARCHAR(5) PRIMARY KEY, descr VARCHAR(16), color VARCHAR(8)); -> ok - -CREATE memory TABLE sp1(S_NO VARCHAR(5) REFERENCES s, p_no VARCHAR(5) REFERENCES p, qty INT, PRIMARY KEY (S_NO, p_no)); -> ok - -CREATE memory TABLE sp2(S_NO VARCHAR(5), p_no VARCHAR(5), qty INT, constraint c1 FOREIGN KEY (S_NO) references s, PRIMARY KEY (S_NO, p_no)); -> ok - -script NOPASSWORDS NOSETTINGS; -> SCRIPT -> ------------------------------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.P; -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.S; -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.SP1; -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.SP2; -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.P ADD CONSTRAINT PUBLIC.CONSTRAINT_50_0 PRIMARY KEY(P_NO); -> ALTER TABLE PUBLIC.S ADD CONSTRAINT PUBLIC.CONSTRAINT_5 PRIMARY KEY(S_NO); -> ALTER TABLE PUBLIC.SP1 ADD CONSTRAINT PUBLIC.CONSTRAINT_1 FOREIGN KEY(S_NO) REFERENCES PUBLIC.S(S_NO) NOCHECK; -> ALTER TABLE PUBLIC.SP1 ADD CONSTRAINT PUBLIC.CONSTRAINT_14 FOREIGN KEY(P_NO) REFERENCES PUBLIC.P(P_NO) NOCHECK; -> ALTER TABLE PUBLIC.SP1 ADD CONSTRAINT PUBLIC.CONSTRAINT_141 PRIMARY KEY(S_NO, P_NO); -> ALTER TABLE PUBLIC.SP2 ADD CONSTRAINT PUBLIC.C1 FOREIGN KEY(S_NO) REFERENCES PUBLIC.S(S_NO) NOCHECK; -> ALTER TABLE PUBLIC.SP2 ADD CONSTRAINT PUBLIC.CONSTRAINT_1417 PRIMARY KEY(S_NO, P_NO); -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_27 FOREIGN KEY(PARENT) REFERENCES PUBLIC.TEST(ID) NOCHECK; -> CREATE MEMORY TABLE PUBLIC.P( P_NO VARCHAR(5) NOT NULL, DESCR VARCHAR(16), COLOR VARCHAR(8) ); -> CREATE MEMORY TABLE PUBLIC.S( S_NO VARCHAR(5) NOT NULL, NAME VARCHAR(16), CITY VARCHAR(16) ); -> CREATE MEMORY TABLE PUBLIC.SP1( S_NO VARCHAR(5) NOT NULL, P_NO VARCHAR(5) NOT NULL, QTY INT ); -> CREATE MEMORY TABLE PUBLIC.SP2( S_NO VARCHAR(5) NOT NULL, P_NO VARCHAR(5) NOT NULL, QTY INT ); -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL, PARENT INT ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 20 - -drop table test; -> ok - -drop table sp1; -> ok - -drop table sp2; -> ok - -drop table s; -> ok - -drop table p; -> ok - -create table test (id identity, value int not null); -> ok - -create primary key on test(id); -> exception - -alter table test drop primary key; -> ok - -alter table test drop primary key; -> exception - -create primary key on test(id, id, id); -> ok - -alter table test drop primary key; -> ok - -drop table test; -> ok - -set autocommit off; -> ok - -create local temporary table test (id identity, b int, foreign key(b) references(id)); -> ok - -drop table test; -> ok - -script NOPASSWORDS NOSETTINGS drop; -> SCRIPT -> ----------------------------------------------- -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 1 - -create local temporary table test1 (id identity); -> ok - -create local temporary table test2 (id identity); -> ok - -alter table test2 add constraint test2_test1 foreign key (id) references test1; -> ok - -drop table test1; -> ok - -drop table test2; -> ok - -create local temporary table test1 (id identity); -> ok - -create local temporary table test2 (id identity); -> ok - -alter table test2 add constraint test2_test1 foreign key (id) references test1; -> ok - -drop table test1; -> ok - -drop table test2; -> ok - -set autocommit on; -> ok - -create table test(id int primary key, ref int, foreign key(ref) references(id)); -> ok - -insert into test values(1, 1), (2, 2); -> update count: 2 - -update test set ref=3-ref; -> update count: 2 - -alter table test add column dummy int; -> ok - -insert into test values(4, 4, null); -> update count: 1 - -drop table test; -> ok - -create table test(id int primary key); -> ok - -explain select * from test a inner join test b left outer join test c on c.id = a.id; -> PLAN -> --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT A.ID, C.ID, B.ID FROM PUBLIC.TEST A /* PUBLIC.TEST.tableScan */ LEFT OUTER JOIN PUBLIC.TEST C /* PUBLIC.PRIMARY_KEY_2: ID = A.ID */ ON C.ID = A.ID INNER JOIN PUBLIC.TEST B /* PUBLIC.TEST.tableScan */ ON 1=1 -> rows: 1 - -SELECT T.ID FROM TEST "T"; -> ID -> -- -> rows: 0 - -SELECT T."ID" FROM TEST "T"; -> ID -> -- -> rows: 0 - -SELECT "T".ID FROM TEST "T"; -> ID -> -- -> rows: 0 - -SELECT "T"."ID" FROM TEST "T"; -> ID -> -- -> rows: 0 - -SELECT T.ID FROM "TEST" T; -> ID -> -- -> rows: 0 - -SELECT T."ID" FROM "TEST" T; -> ID -> -- -> rows: 0 - -SELECT "T".ID FROM "TEST" T; -> ID -> -- -> rows: 0 - -SELECT "T"."ID" FROM "TEST" T; -> ID -> -- -> rows: 0 - -SELECT T.ID FROM "TEST" "T"; -> ID -> -- -> rows: 0 - -SELECT T."ID" FROM "TEST" "T"; -> ID -> -- -> rows: 0 - -SELECT "T".ID FROM "TEST" "T"; -> ID -> -- -> rows: 0 - -SELECT "T"."ID" FROM "TEST" "T"; -> ID -> -- -> rows: 0 - -select "TEST".id from test; -> ID -> -- -> rows: 0 - -select test."ID" from test; -> ID -> -- -> rows: 0 - -select test."id" from test; -> exception - -select "TEST"."ID" from test; -> ID -> -- -> rows: 0 - -select "test"."ID" from test; -> exception - -select public."TEST".id from test; -> ID -> -- -> rows: 0 - -select public.test."ID" from test; -> ID -> -- -> rows: 0 - -select public."TEST"."ID" from test; -> ID -> -- -> rows: 0 - -select public."test"."ID" from test; -> exception - -select "PUBLIC"."TEST".id from test; -> ID -> -- -> rows: 0 - -select "PUBLIC".test."ID" from test; -> ID -> -- -> rows: 0 - -select public."TEST"."ID" from test; -> ID -> -- -> rows: 0 - -select "public"."TEST"."ID" from test; -> exception - -drop table test; -> ok - -create schema s authorization sa; -> ok - -create memory table s.test(id int); -> ok - -create index if not exists idx_id on s.test(id); -> ok - -create index if not exists idx_id on s.test(id); -> ok - -alter index s.idx_id rename to s.index_id; -> ok - -create sequence s.seq cache 0; -> ok - -alter sequence s.seq restart with 10; -> ok - -alter table s.test add constraint cu_id unique(id); -> ok - -alter table s.test add name varchar; -> ok - -alter table s.test drop column name; -> ok - -alter table s.test drop constraint cu_id; -> ok - -alter table s.test rename to testtab; -> ok - -alter table s.testtab rename to test; -> ok - -create trigger test_trigger before insert on s.test call "org.h2.test.db.TestTriggersConstraints"; -> ok - -script NOPASSWORDS NOSETTINGS drop; -> SCRIPT -> --------------------------------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM S.TEST; -> CREATE FORCE TRIGGER S.TEST_TRIGGER BEFORE INSERT ON S.TEST QUEUE 1024 CALL "org.h2.test.db.TestTriggersConstraints"; -> CREATE INDEX S.INDEX_ID ON S.TEST(ID); -> CREATE MEMORY TABLE S.TEST( ID INT ); -> CREATE SCHEMA IF NOT EXISTS S AUTHORIZATION SA; -> CREATE SEQUENCE S.SEQ START WITH 10 CACHE 1; -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> DROP SEQUENCE IF EXISTS S.SEQ; -> DROP TABLE IF EXISTS S.TEST CASCADE; -> rows: 9 - -drop trigger s.test_trigger; -> ok - -drop schema s; -> ok - -CREATE MEMORY TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255), y int as id+1); -> ok - -INSERT INTO TEST(id, name) VALUES(1, 'Hello'); -> update count: 1 - -create index idx_n_id on test(name, id); -> ok - -alter table test add constraint abc foreign key(id) references (id); -> ok - -alter table test alter column id rename to i; -> ok - -script NOPASSWORDS NOSETTINGS drop; -> SCRIPT -> --------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.ABC FOREIGN KEY(I) REFERENCES PUBLIC.TEST(I) NOCHECK; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(I); -> CREATE INDEX PUBLIC.IDX_N_ID ON PUBLIC.TEST(NAME, I); -> CREATE MEMORY TABLE PUBLIC.TEST( I INT NOT NULL, NAME VARCHAR(255), Y INT AS (I + 1) ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> DROP TABLE IF EXISTS PUBLIC.TEST CASCADE; -> INSERT INTO PUBLIC.TEST(I, NAME, Y) VALUES (1, 'Hello', 2); -> rows: 8 - -INSERT INTO TEST(i, name) VALUES(2, 'World'); -> update count: 1 - -SELECT * FROM TEST ORDER BY I; -> I NAME Y -> - ----- - -> 1 Hello 2 -> 2 World 3 -> rows (ordered): 2 - -UPDATE TEST SET NAME='Hi' WHERE I=1; -> update count: 1 - -DELETE FROM TEST t0 WHERE t0.I=2; -> update count: 1 - -drop table test; -> ok - -create table test(current int); -> ok - -select current from test; -> CURRENT -> ------- -> rows: 0 - -drop table test; -> ok - -CREATE table my_table(my_int integer, my_char varchar); -> ok - -INSERT INTO my_table VALUES(1, 'Testing'); -> update count: 1 - -ALTER TABLE my_table ALTER COLUMN my_int RENAME to my_new_int; -> ok - -SELECT my_new_int FROM my_table; -> MY_NEW_INT -> ---------- -> 1 -> rows: 1 - -UPDATE my_table SET my_new_int = 33; -> update count: 1 - -SELECT * FROM my_table; -> MY_NEW_INT MY_CHAR -> ---------- ------- -> 33 Testing -> rows: 1 - -DROP TABLE my_table; -> ok - -create sequence seq1; -> ok - -create table test(ID INT default next value for seq1); -> ok - -drop sequence seq1; -> exception - -alter table test add column name varchar; -> ok - -insert into test(name) values('Hello'); -> update count: 1 - -select * from test; -> ID NAME -> -- ----- -> 1 Hello -> rows: 1 - -drop table test; -> ok - -drop sequence seq1; -> ok - -create table test(a int primary key, b int, c int); -> ok - -create unique index idx_ba on test(b, a); -> ok - -alter table test add constraint abc foreign key(c, a) references test(b, a); -> ok - -insert into test values(1, 1, null); -> update count: 1 - -drop table test; -> ok - -create table ADDRESS (ADDRESS_ID int primary key, ADDRESS_TYPE int not null, SERVER_ID int not null); -> ok - -create unique index idx_a on address(ADDRESS_TYPE, SERVER_ID); -> ok - -create table SERVER (SERVER_ID int primary key, SERVER_TYPE int not null, ADDRESS_TYPE int); -> ok - -alter table ADDRESS add constraint addr foreign key (SERVER_ID) references SERVER; -> ok - -alter table SERVER add constraint server_const foreign key (ADDRESS_TYPE, SERVER_ID) references ADDRESS (ADDRESS_TYPE, SERVER_ID); -> ok - -insert into SERVER (SERVER_ID, SERVER_TYPE) values (1, 1); -> update count: 1 - -drop table address; -> ok - -drop table server; -> ok - -create table left_hand (id int primary key); -> ok - -create table right_hand (id int primary key); -> ok - -insert into left_hand values(0); -> update count: 1 - -insert into left_hand values(1); -> update count: 1 - -insert into right_hand values(0); -> update count: 1 - --- h2, postgresql, mysql, derby, hsqldb: 2 -select * from left_hand left outer join right_hand on left_hand.id=right_hand.id; -> ID ID -> -- ---- -> 0 0 -> 1 null -> rows: 2 - --- h2, postgresql, mysql, derby, hsqldb: 2 -select * from left_hand left join right_hand on left_hand.id=right_hand.id; -> ID ID -> -- ---- -> 0 0 -> 1 null -> rows: 2 - --- h2: 1 (2 cols); postgresql, mysql: 1 (1 col); derby, hsqldb: no natural join -select * from left_hand natural join right_hand; -> ID -> -- -> 0 -> rows: 1 - --- h2, postgresql, mysql, derby, hsqldb: 1 -select * from left_hand left outer join right_hand on left_hand.id=right_hand.id where left_hand.id=1; -> ID ID -> -- ---- -> 1 null -> rows: 1 - --- h2, postgresql, mysql, derby, hsqldb: 1 -select * from left_hand left join right_hand on left_hand.id=right_hand.id where left_hand.id=1; -> ID ID -> -- ---- -> 1 null -> rows: 1 - --- h2: 0 (2 cols); postgresql, mysql: 0 (1 col); derby, hsqldb: no natural join -select * from left_hand natural join right_hand where left_hand.id=1; -> ID -> -- -> rows: 0 - --- !!! h2: 1; postgresql, mysql, hsqldb: 0; derby: exception -select * from left_hand left outer join right_hand on left_hand.id=right_hand.id where left_hand.id=1 having right_hand.id=2; -> ID ID -> -- -- -> rows: 0 - --- !!! h2: 1; postgresql, mysql, hsqldb: 0; derby: exception -select * from left_hand left join right_hand on left_hand.id=right_hand.id where left_hand.id=1 having right_hand.id=2; -> ID ID -> -- -- -> rows: 0 - --- h2: 0 (2 cols); postgresql: 0 (1 col), mysql: exception; derby, hsqldb: no natural join -select * from left_hand natural join right_hand where left_hand.id=1 having right_hand.id=2; -> exception - --- h2, mysql, hsqldb: 0 rows; postgresql, derby: exception -select * from left_hand left outer join right_hand on left_hand.id=right_hand.id where left_hand.id=1 group by left_hand.id having right_hand.id=2; -> ID ID -> -- -- -> rows: 0 - --- h2, mysql, hsqldb: 0 rows; postgresql, derby: exception -select * from left_hand left join right_hand on left_hand.id=right_hand.id where left_hand.id=1 group by left_hand.id having right_hand.id=2; -> ID ID -> -- -- -> rows: 0 - --- h2: 0 rows; postgresql, mysql: exception; derby, hsqldb: no natural join -select * from left_hand natural join right_hand where left_hand.id=1 group by left_hand.id having right_hand.id=2; -> ID -> -- -> rows: 0 - -drop table right_hand; -> ok - -drop table left_hand; -> ok - -CREATE TABLE PlanElements(id int primary key, name varchar, parent_id int, foreign key(parent_id) references(id) on delete cascade); -> ok - -INSERT INTO PlanElements(id,name,parent_id) VALUES(1, '#1', null), (2, '#1-A', 1), (3, '#1-A-1', 2), (4, '#1-A-2', 2); -> update count: 4 - -INSERT INTO PlanElements(id,name,parent_id) VALUES(5, '#1-B', 1), (6, '#1-B-1', 5), (7, '#1-B-2', 5); -> update count: 3 - -INSERT INTO PlanElements(id,name,parent_id) VALUES(8, '#1-C', 1), (9, '#1-C-1', 8), (10, '#1-C-2', 8); -> update count: 3 - -INSERT INTO PlanElements(id,name,parent_id) VALUES(11, '#1-D', 1), (12, '#1-D-1', 11), (13, '#1-D-2', 11), (14, '#1-D-3', 11); -> update count: 4 - -INSERT INTO PlanElements(id,name,parent_id) VALUES(15, '#1-E', 1), (16, '#1-E-1', 15), (17, '#1-E-2', 15), (18, '#1-E-3', 15), (19, '#1-E-4', 15); -> update count: 5 - -DELETE FROM PlanElements WHERE id = 1; -> update count: 1 - -SELECT * FROM PlanElements; -> ID NAME PARENT_ID -> -- ---- --------- -> rows: 0 - -DROP TABLE PlanElements; -> ok - -CREATE TABLE PARENT(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -CREATE TABLE CHILD(ID INT PRIMARY KEY, NAME VARCHAR(255), FOREIGN KEY(NAME) REFERENCES PARENT(ID)); -> ok - -INSERT INTO PARENT VALUES(1, '1'); -> update count: 1 - -INSERT INTO CHILD VALUES(1, '1'); -> update count: 1 - -INSERT INTO CHILD VALUES(2, 'Hello'); -> exception - -DROP TABLE IF EXISTS CHILD; -> ok - -DROP TABLE IF EXISTS PARENT; -> ok - -(SELECT * FROM DUAL) UNION ALL (SELECT * FROM DUAL); -> X -> - -> 1 -> 1 -> rows: 2 - -DECLARE GLOBAL TEMPORARY TABLE TEST(ID INT PRIMARY KEY); -> ok - -SELECT * FROM TEST; -> ID -> -- -> rows: 0 - -SELECT GROUP_CONCAT(ID) FROM TEST; -> GROUP_CONCAT(ID) -> ---------------- -> null -> rows: 1 - -SELECT * FROM SESSION.TEST; -> ID -> -- -> rows: 0 - -DROP TABLE TEST; -> ok - -VALUES(1, 2); -> C1 C2 -> -- -- -> 1 2 -> rows: 1 - -DROP TABLE IF EXISTS TEST; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO TEST VALUES(2, 'World'); -> update count: 1 - -SELECT group_concat(name) FROM TEST group by id; -> GROUP_CONCAT(NAME) -> ------------------ -> Hello -> World -> rows: 2 - -drop table test; -> ok - ---- script drop --------------------------------------------------------------------------------------------- -create memory table test (id int primary key, im_ie varchar(10)); -> ok - -create sequence test_seq; -> ok - -script NODATA NOPASSWORDS NOSETTINGS drop; -> SCRIPT -> --------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL, IM_IE VARCHAR(10) ); -> CREATE SEQUENCE PUBLIC.TEST_SEQ START WITH 1; -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> DROP SEQUENCE IF EXISTS PUBLIC.TEST_SEQ; -> DROP TABLE IF EXISTS PUBLIC.TEST CASCADE; -> rows: 7 - -drop sequence test_seq; -> ok - -drop table test; -> ok - ---- constraints --------------------------------------------------------------------------------------------- -CREATE MEMORY TABLE TEST(ID IDENTITY(100, 10), NAME VARCHAR); -> ok - -INSERT INTO TEST(NAME) VALUES('Hello'), ('World'); -> update count: 2 - -SELECT * FROM TEST; -> ID NAME -> --- ----- -> 100 Hello -> 110 World -> rows: 2 - -DROP TABLE TEST; -> ok - -CREATE MEMORY TABLE TEST(ID BIGINT NOT NULL IDENTITY(10, 5), NAME VARCHAR); -> ok - -INSERT INTO TEST(NAME) VALUES('Hello'), ('World'); -> update count: 2 - -SELECT * FROM TEST; -> ID NAME -> -- ----- -> 10 Hello -> 15 World -> rows: 2 - -DROP TABLE TEST; -> ok - -CREATE CACHED TABLE account( -id INTEGER NOT NULL IDENTITY, -name VARCHAR NOT NULL, -mail_address VARCHAR NOT NULL, -UNIQUE(name), -PRIMARY KEY(id) -); -> ok - -CREATE CACHED TABLE label( -id INTEGER NOT NULL IDENTITY, -parent_id INTEGER NOT NULL, -account_id INTEGER NOT NULL, -name VARCHAR NOT NULL, -PRIMARY KEY(id), -UNIQUE(parent_id, name), -UNIQUE(id, account_id), -FOREIGN KEY(account_id) REFERENCES account (id), -FOREIGN KEY(parent_id, account_id) REFERENCES label (id, account_id) -); -> ok - -INSERT INTO account VALUES (0, 'example', 'example@example.com'); -> update count: 1 - -INSERT INTO label VALUES ( 0, 0, 0, 'TEST'); -> update count: 1 - -INSERT INTO label VALUES ( 1, 0, 0, 'TEST'); -> exception - -INSERT INTO label VALUES ( 1, 0, 0, 'TEST1'); -> update count: 1 - -INSERT INTO label VALUES ( 2, 2, 1, 'TEST'); -> exception - -drop table label; -> ok - -drop table account; -> ok - ---- constraints and alter table add column --------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT, PARENTID INT, FOREIGN KEY(PARENTID) REFERENCES(ID)); -> ok - -INSERT INTO TEST VALUES(0, 0); -> update count: 1 - -ALTER TABLE TEST ADD COLUMN CHILD_ID INT; -> ok - -ALTER TABLE TEST ALTER COLUMN CHILD_ID VARCHAR; -> ok - -ALTER TABLE TEST ALTER COLUMN PARENTID VARCHAR; -> ok - -ALTER TABLE TEST DROP COLUMN PARENTID; -> ok - -ALTER TABLE TEST DROP COLUMN CHILD_ID; -> ok - -SELECT * FROM TEST; -> ID -> -- -> 0 -> rows: 1 - -DROP TABLE TEST; -> ok - -CREATE MEMORY TABLE A(X INT); -> ok - -CREATE MEMORY TABLE B(XX INT, CONSTRAINT B2A FOREIGN KEY(XX) REFERENCES A(X)); -> ok - -CREATE MEMORY TABLE C(X_MASTER INT); -> ok - -ALTER TABLE A ADD CONSTRAINT A2C FOREIGN KEY(X) REFERENCES C(X_MASTER); -> ok - -insert into c values(1); -> update count: 1 - -insert into a values(1); -> update count: 1 - -insert into b values(1); -> update count: 1 - -ALTER TABLE A ADD COLUMN Y INT; -> ok - -insert into c values(2); -> update count: 1 - -insert into a values(2, 2); -> update count: 1 - -insert into b values(2); -> update count: 1 - -DROP TABLE IF EXISTS A; -> ok - -DROP TABLE IF EXISTS B; -> ok - -DROP TABLE IF EXISTS C; -> ok - ---- quoted keywords --------------------------------------------------------------------------------------------- -CREATE TABLE "CREATE"("SELECT" INT, "PRIMARY" INT, "KEY" INT, "INDEX" INT, "ROWNUM" INT, "NEXTVAL" INT, "FROM" INT); -> ok - -INSERT INTO "CREATE" default values; -> update count: 1 - -INSERT INTO "CREATE" default values; -> update count: 1 - -SELECT "ROWNUM", ROWNUM, "SELECT" "AS", "PRIMARY" AS "X", "KEY", "NEXTVAL", "INDEX", "SELECT" "FROM" FROM "CREATE"; -> ROWNUM ROWNUM() AS X KEY NEXTVAL INDEX FROM -> ------ -------- ---- ---- ---- ------- ----- ---- -> null 1 null null null null null null -> null 2 null null null null null null -> rows: 2 - -DROP TABLE "CREATE"; -> ok - ---- truncate table --------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'); -> update count: 2 - -TRUNCATE TABLE TEST; -> ok - -SELECT * FROM TEST; -> ID NAME -> -- ---- -> rows: 0 - -DROP TABLE TEST; -> ok - -CREATE TABLE PARENT(ID INT PRIMARY KEY, NAME VARCHAR); -> ok - -CREATE TABLE CHILD(PARENTID INT, FOREIGN KEY(PARENTID) REFERENCES PARENT(ID), NAME VARCHAR); -> ok - -TRUNCATE TABLE CHILD; -> ok - -TRUNCATE TABLE PARENT; -> exception - -DROP TABLE CHILD; -> ok - -DROP TABLE PARENT; -> ok - ---- test case for number like string --------------------------------------------------------------------------------------------- -CREATE TABLE test (one bigint primary key, two bigint, three bigint); -> ok - -CREATE INDEX two ON test(two); -> ok - -INSERT INTO TEST VALUES(1, 2, 3), (10, 20, 30), (100, 200, 300); -> update count: 3 - -INSERT INTO TEST VALUES(2, 6, 9), (20, 60, 90), (200, 600, 900); -> update count: 3 - -SELECT * FROM test WHERE one LIKE '2%'; -> ONE TWO THREE -> --- --- ----- -> 2 6 9 -> 20 60 90 -> 200 600 900 -> rows: 3 - -SELECT * FROM test WHERE two LIKE '2%'; -> ONE TWO THREE -> --- --- ----- -> 1 2 3 -> 10 20 30 -> 100 200 300 -> rows: 3 - -SELECT * FROM test WHERE three LIKE '2%'; -> ONE TWO THREE -> --- --- ----- -> rows: 0 - -DROP TABLE TEST; -> ok - ---- merge (upsert) --------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -EXPLAIN SELECT * FROM TEST WHERE ID=1; -> PLAN -> ------------------------------------------------------------------------------------------ -> SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE ID = 1 -> rows: 1 - -EXPLAIN MERGE INTO TEST VALUES(1, 'Hello'); -> PLAN -> ------------------------------------------------------------ -> MERGE INTO PUBLIC.TEST(ID, NAME) KEY(ID) VALUES (1, 'Hello') -> rows: 1 - -MERGE INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -MERGE INTO TEST VALUES(1, 'Hi'); -> update count: 1 - -MERGE INTO TEST VALUES(2, 'World'); -> update count: 1 - -MERGE INTO TEST VALUES(2, 'World!'); -> update count: 1 - -MERGE INTO TEST(ID, NAME) VALUES(3, 'How are you'); -> update count: 1 - -EXPLAIN MERGE INTO TEST(ID, NAME) VALUES(3, 'How are you'); -> PLAN -> ------------------------------------------------------------------ -> MERGE INTO PUBLIC.TEST(ID, NAME) KEY(ID) VALUES (3, 'How are you') -> rows: 1 - -MERGE INTO TEST(ID, NAME) KEY(ID) VALUES(3, 'How do you do'); -> update count: 1 - -EXPLAIN MERGE INTO TEST(ID, NAME) KEY(ID) VALUES(3, 'How do you do'); -> PLAN -> -------------------------------------------------------------------- -> MERGE INTO PUBLIC.TEST(ID, NAME) KEY(ID) VALUES (3, 'How do you do') -> rows: 1 - -MERGE INTO TEST(ID, NAME) KEY(NAME) VALUES(3, 'Fine'); -> exception - -MERGE INTO TEST(ID, NAME) KEY(NAME) VALUES(4, 'Fine!'); -> update count: 1 - -MERGE INTO TEST(ID, NAME) KEY(NAME) VALUES(4, 'Fine! And you'); -> exception - -MERGE INTO TEST(ID, NAME) KEY(NAME, ID) VALUES(5, 'I''m ok'); -> update count: 1 - -MERGE INTO TEST(ID, NAME) KEY(NAME, ID) VALUES(5, 'Oh, fine'); -> exception - -MERGE INTO TEST(ID, NAME) VALUES(6, 'Oh, fine.'); -> update count: 1 - -SELECT * FROM TEST; -> ID NAME -> -- ------------- -> 1 Hi -> 2 World! -> 3 How do you do -> 4 Fine! -> 5 I'm ok -> 6 Oh, fine. -> rows: 6 - -MERGE INTO TEST SELECT ID+4, NAME FROM TEST; -> update count: 6 - -SELECT * FROM TEST; -> ID NAME -> -- ------------- -> 1 Hi -> 10 Oh, fine. -> 2 World! -> 3 How do you do -> 4 Fine! -> 5 Hi -> 6 World! -> 7 How do you do -> 8 Fine! -> 9 I'm ok -> rows: 10 - -DROP TABLE TEST; -> ok - -CREATE TABLE PARENT(ID INT, NAME VARCHAR); -> ok - -CREATE TABLE CHILD(ID INT, PARENTID INT, FOREIGN KEY(PARENTID) REFERENCES PARENT(ID)); -> ok - -INSERT INTO PARENT VALUES(1, 'Mary'), (2, 'John'); -> update count: 2 - -INSERT INTO CHILD VALUES(10, 1), (11, 1), (20, 2), (21, 2); -> update count: 4 - -MERGE INTO PARENT KEY(ID) VALUES(1, 'Marcy'); -> update count: 1 - -SELECT * FROM PARENT; -> ID NAME -> -- ----- -> 1 Marcy -> 2 John -> rows: 2 - -SELECT * FROM CHILD; -> ID PARENTID -> -- -------- -> 10 1 -> 11 1 -> 20 2 -> 21 2 -> rows: 4 - -DROP TABLE PARENT; -> ok - -DROP TABLE CHILD; -> ok - ---- -create table STRING_TEST(label varchar(31), label2 varchar(255)); -> ok - -create table STRING_TEST_ic(label varchar_ignorecase(31), label2 -varchar_ignorecase(255)); -> ok - -insert into STRING_TEST values('HELLO','Bye'); -> update count: 1 - -insert into STRING_TEST values('HELLO','Hello'); -> update count: 1 - -insert into STRING_TEST_ic select * from STRING_TEST; -> update count: 2 - --- Expect rows of STRING_TEST_ic and STRING_TEST to be identical -select * from STRING_TEST; -> LABEL LABEL2 -> ----- ------ -> HELLO Bye -> HELLO Hello -> rows: 2 - --- correct -select * from STRING_TEST_ic; -> LABEL LABEL2 -> ----- ------ -> HELLO Bye -> HELLO Hello -> rows: 2 - -drop table STRING_TEST; -> ok - -drop table STRING_TEST_ic; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR_IGNORECASE); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'), (3, 'hallo'), (4, 'hoi'); -> update count: 4 - -SELECT * FROM TEST WHERE NAME = 'HELLO'; -> ID NAME -> -- ----- -> 1 Hello -> rows: 1 - -SELECT * FROM TEST WHERE NAME = 'HE11O'; -> ID NAME -> -- ---- -> rows: 0 - -SELECT * FROM TEST ORDER BY NAME; -> ID NAME -> -- ----- -> 3 hallo -> 1 Hello -> 4 hoi -> 2 World -> rows (ordered): 4 - -DROP TABLE IF EXISTS TEST; -> ok - ---- complex join --------------------------------------------------------------------------------------------- -CREATE TABLE T1(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -CREATE TABLE T2(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -CREATE TABLE T3(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO T1 VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO T1 VALUES(2, 'World'); -> update count: 1 - -INSERT INTO T1 VALUES(3, 'Peace'); -> update count: 1 - -INSERT INTO T2 VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO T2 VALUES(2, 'World'); -> update count: 1 - -INSERT INTO T3 VALUES(1, 'Hello'); -> update count: 1 - -SELECT * FROM t1 left outer join t2 on t1.id=t2.id; -> ID NAME ID NAME -> -- ----- ---- ----- -> 1 Hello 1 Hello -> 2 World 2 World -> 3 Peace null null -> rows: 3 - -SELECT * FROM t1 left outer join t2 on t1.id=t2.id left outer join t3 on t1.id=t3.id; -> ID NAME ID NAME ID NAME -> -- ----- ---- ----- ---- ----- -> 1 Hello 1 Hello 1 Hello -> 2 World 2 World null null -> 3 Peace null null null null -> rows: 3 - -SELECT * FROM t1 left outer join t2 on t1.id=t2.id inner join t3 on t1.id=t3.id; -> ID NAME ID NAME ID NAME -> -- ----- -- ----- -- ----- -> 1 Hello 1 Hello 1 Hello -> rows: 1 - -drop table t1; -> ok - -drop table t2; -> ok - -drop table t3; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, parent int, sid int); -> ok - -create index idx_p on test(sid); -> ok - -insert into test select x, x, x from system_range(0,20); -> update count: 21 - -select * from test l0 inner join test l1 on l0.sid=l1.sid, test l3 where l0.sid=l3.parent; -> ID PARENT SID ID PARENT SID ID PARENT SID -> -- ------ --- -- ------ --- -- ------ --- -> 0 0 0 0 0 0 0 0 0 -> 1 1 1 1 1 1 1 1 1 -> 10 10 10 10 10 10 10 10 10 -> 11 11 11 11 11 11 11 11 11 -> 12 12 12 12 12 12 12 12 12 -> 13 13 13 13 13 13 13 13 13 -> 14 14 14 14 14 14 14 14 14 -> 15 15 15 15 15 15 15 15 15 -> 16 16 16 16 16 16 16 16 16 -> 17 17 17 17 17 17 17 17 17 -> 18 18 18 18 18 18 18 18 18 -> 19 19 19 19 19 19 19 19 19 -> 2 2 2 2 2 2 2 2 2 -> 20 20 20 20 20 20 20 20 20 -> 3 3 3 3 3 3 3 3 3 -> 4 4 4 4 4 4 4 4 4 -> 5 5 5 5 5 5 5 5 5 -> 6 6 6 6 6 6 6 6 6 -> 7 7 7 7 7 7 7 7 7 -> 8 8 8 8 8 8 8 8 8 -> 9 9 9 9 9 9 9 9 9 -> rows: 21 - -select * from -test l0 -inner join test l1 on l0.sid=l1.sid -inner join test l2 on l0.sid=l2.id, -test l5 -inner join test l3 on l5.sid=l3.sid -inner join test l4 on l5.sid=l4.id -where l2.id is not null -and l0.sid=l5.parent; -> ID PARENT SID ID PARENT SID ID PARENT SID ID PARENT SID ID PARENT SID ID PARENT SID -> -- ------ --- -- ------ --- -- ------ --- -- ------ --- -- ------ --- -- ------ --- -> 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -> 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -> 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 -> 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 -> 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 -> 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 -> 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 -> 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 -> 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 -> 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 -> 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 -> 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 -> 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 -> 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 -> 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 -> 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 -> 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 -> 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 -> 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 -> 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 -> 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 -> rows: 21 - -DROP TABLE IF EXISTS TEST; -> ok - ---- update with list --------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO TEST VALUES(2, 'World'); -> update count: 1 - -SELECT * FROM TEST ORDER BY ID; -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows (ordered): 2 - -UPDATE TEST t0 SET t0.NAME='Hi' WHERE t0.ID=1; -> update count: 1 - -update test set (id, name)=(id+1, name || 'Hi'); -> update count: 2 - -update test set (id, name)=(select id+1, name || 'Ho' from test t1 where test.id=t1.id); -> update count: 2 - -explain update test set (id, name)=(id+1, name || 'Hi'); -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------- -> UPDATE PUBLIC.TEST /* PUBLIC.TEST.tableScan */ SET ID = ARRAY_GET(((ID + 1), (NAME || 'Hi')), 1), NAME = ARRAY_GET(((ID + 1), (NAME || 'Hi')), 2) -> rows: 1 - -explain update test set (id, name)=(select id+1, name || 'Ho' from test t1 where test.id=t1.id); -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> UPDATE PUBLIC.TEST /* PUBLIC.TEST.tableScan */ SET ID = ARRAY_GET((SELECT (ID + 1), (NAME || 'Ho') FROM PUBLIC.TEST T1 /* PUBLIC.PRIMARY_KEY_2: ID = TEST.ID */ WHERE TEST.ID = T1.ID), 1), NAME = ARRAY_GET((SELECT (ID + 1), (NAME || 'Ho') FROM PUBLIC.TEST T1 /* PUBLIC.PRIMARY_KEY_2: ID = TEST.ID */ WHERE TEST.ID = T1.ID), 2) -> rows: 1 - -select * from test; -> ID NAME -> -- --------- -> 3 HiHiHo -> 4 WorldHiHo -> rows: 2 - -DROP TABLE IF EXISTS TEST; -> ok - ---- script --------------------------------------------------------------------------------------------- -create memory table test(id int primary key, c clob, b blob); -> ok - -insert into test values(0, null, null); -> update count: 1 - -insert into test values(1, '', ''); -> update count: 1 - -insert into test values(2, 'Cafe', X'cafe'); -> update count: 1 - -script simple nopasswords nosettings; -> SCRIPT -> --------------------------------------------------------------------------- -> -- 3 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL, C CLOB, B BLOB ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> INSERT INTO PUBLIC.TEST(ID, C, B) VALUES(0, NULL, NULL); -> INSERT INTO PUBLIC.TEST(ID, C, B) VALUES(1, '', X''); -> INSERT INTO PUBLIC.TEST(ID, C, B) VALUES(2, 'Cafe', X'cafe'); -> rows: 7 - -drop table test; -> ok - ---- optimizer --------------------------------------------------------------------------------------------- -create table b(id int primary key, p int); -> ok - -create index bp on b(p); -> ok - -insert into b values(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9); -> update count: 10 - -insert into b select id+10, p+10 from b; -> update count: 10 - -explain select * from b b0, b b1, b b2 where b1.p = b0.id and b2.p = b1.id and b0.id=10; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -> SELECT B0.ID, B0.P, B1.ID, B1.P, B2.ID, B2.P FROM PUBLIC.B B0 /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN PUBLIC.B B1 /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN PUBLIC.B B2 /* PUBLIC.BP: P = B1.ID */ ON 1=1 WHERE (B0.ID = 10) AND ((B1.P = B0.ID) AND (B2.P = B1.ID)) -> rows: 1 - -explain select * from b b0, b b1, b b2, b b3 where b1.p = b0.id and b2.p = b1.id and b3.p = b2.id and b0.id=10; -> PLAN -> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT B0.ID, B0.P, B1.ID, B1.P, B2.ID, B2.P, B3.ID, B3.P FROM PUBLIC.B B0 /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN PUBLIC.B B1 /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN PUBLIC.B B2 /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN PUBLIC.B B3 /* PUBLIC.BP: P = B2.ID */ ON 1=1 WHERE (B0.ID = 10) AND ((B3.P = B2.ID) AND ((B1.P = B0.ID) AND (B2.P = B1.ID))) -> rows: 1 - -explain select * from b b0, b b1, b b2, b b3, b b4 where b1.p = b0.id and b2.p = b1.id and b3.p = b2.id and b4.p = b3.id and b0.id=10; -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT B0.ID, B0.P, B1.ID, B1.P, B2.ID, B2.P, B3.ID, B3.P, B4.ID, B4.P FROM PUBLIC.B B0 /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN PUBLIC.B B1 /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN PUBLIC.B B2 /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN PUBLIC.B B3 /* PUBLIC.BP: P = B2.ID */ ON 1=1 /* WHERE B3.P = B2.ID */ INNER JOIN PUBLIC.B B4 /* PUBLIC.BP: P = B3.ID */ ON 1=1 WHERE (B0.ID = 10) AND ((B4.P = B3.ID) AND ((B3.P = B2.ID) AND ((B1.P = B0.ID) AND (B2.P = B1.ID)))) -> rows: 1 - -analyze; -> ok - -explain select * from b b0, b b1, b b2, b b3, b b4 where b1.p = b0.id and b2.p = b1.id and b3.p = b2.id and b4.p = b3.id and b0.id=10; -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT B0.ID, B0.P, B1.ID, B1.P, B2.ID, B2.P, B3.ID, B3.P, B4.ID, B4.P FROM PUBLIC.B B0 /* PUBLIC.PRIMARY_KEY_4: ID = 10 */ /* WHERE B0.ID = 10 */ INNER JOIN PUBLIC.B B1 /* PUBLIC.BP: P = B0.ID */ ON 1=1 /* WHERE B1.P = B0.ID */ INNER JOIN PUBLIC.B B2 /* PUBLIC.BP: P = B1.ID */ ON 1=1 /* WHERE B2.P = B1.ID */ INNER JOIN PUBLIC.B B3 /* PUBLIC.BP: P = B2.ID */ ON 1=1 /* WHERE B3.P = B2.ID */ INNER JOIN PUBLIC.B B4 /* PUBLIC.BP: P = B3.ID */ ON 1=1 WHERE (B0.ID = 10) AND ((B4.P = B3.ID) AND ((B3.P = B2.ID) AND ((B1.P = B0.ID) AND (B2.P = B1.ID)))) -> rows: 1 - -drop table if exists b; -> ok - -create table test(id int primary key, first_name varchar, name varchar, state int); -> ok - -create index idx_first_name on test(first_name); -> ok - -create index idx_name on test(name); -> ok - -create index idx_state on test(state); -> ok - -insert into test values -(0, 'Anne', 'Smith', 0), (1, 'Tom', 'Smith', 0), -(2, 'Tom', 'Jones', 0), (3, 'Steve', 'Johnson', 0), -(4, 'Steve', 'Martin', 0), (5, 'Jon', 'Jones', 0), -(6, 'Marc', 'Scott', 0), (7, 'Marc', 'Miller', 0), -(8, 'Susan', 'Wood', 0), (9, 'Jon', 'Bennet', 0); -> update count: 10 - -EXPLAIN SELECT * FROM TEST WHERE ID = 3; -> PLAN -> ----------------------------------------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.FIRST_NAME, TEST.NAME, TEST.STATE FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID = 3 */ WHERE ID = 3 -> rows: 1 - -SELECT SELECTIVITY(ID), SELECTIVITY(FIRST_NAME), -SELECTIVITY(NAME), SELECTIVITY(STATE) -FROM TEST WHERE ROWNUM()<100000; -> SELECTIVITY(ID) SELECTIVITY(FIRST_NAME) SELECTIVITY(NAME) SELECTIVITY(STATE) -> --------------- ----------------------- ----------------- ------------------ -> 100 60 80 10 -> rows: 1 - -explain select * from test where name='Smith' and first_name='Tom' and state=0; -> PLAN -> ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.FIRST_NAME, TEST.NAME, TEST.STATE FROM PUBLIC.TEST /* PUBLIC.IDX_FIRST_NAME: FIRST_NAME = 'Tom' */ WHERE (STATE = 0) AND ((NAME = 'Smith') AND (FIRST_NAME = 'Tom')) -> rows: 1 - -alter table test alter column name selectivity 100; -> ok - -explain select * from test where name='Smith' and first_name='Tom' and state=0; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.FIRST_NAME, TEST.NAME, TEST.STATE FROM PUBLIC.TEST /* PUBLIC.IDX_NAME: NAME = 'Smith' */ WHERE (STATE = 0) AND ((NAME = 'Smith') AND (FIRST_NAME = 'Tom')) -> rows: 1 - -drop table test; -> ok - -CREATE TABLE O(X INT PRIMARY KEY, Y INT); -> ok - -INSERT INTO O SELECT X, X+1 FROM SYSTEM_RANGE(1, 1000); -> update count: 1000 - -EXPLAIN SELECT A.X FROM O B, O A, O F, O D, O C, O E, O G, O H, O I, O J -WHERE 1=J.X and J.Y=I.X AND I.Y=H.X AND H.Y=G.X AND G.Y=F.X AND F.Y=E.X -AND E.Y=D.X AND D.Y=C.X AND C.Y=B.X AND B.Y=A.X; -> PLAN -> --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT A.X FROM PUBLIC.O J /* PUBLIC.PRIMARY_KEY_4: X = 1 */ /* WHERE J.X = 1 */ INNER JOIN PUBLIC.O I /* PUBLIC.PRIMARY_KEY_4: X = J.Y */ ON 1=1 /* WHERE J.Y = I.X */ INNER JOIN PUBLIC.O H /* PUBLIC.PRIMARY_KEY_4: X = I.Y */ ON 1=1 /* WHERE I.Y = H.X */ INNER JOIN PUBLIC.O G /* PUBLIC.PRIMARY_KEY_4: X = H.Y */ ON 1=1 /* WHERE H.Y = G.X */ INNER JOIN PUBLIC.O F /* PUBLIC.PRIMARY_KEY_4: X = G.Y */ ON 1=1 /* WHERE G.Y = F.X */ INNER JOIN PUBLIC.O E /* PUBLIC.PRIMARY_KEY_4: X = F.Y */ ON 1=1 /* WHERE F.Y = E.X */ INNER JOIN PUBLIC.O D /* PUBLIC.PRIMARY_KEY_4: X = E.Y */ ON 1=1 /* WHERE E.Y = D.X */ INNER JOIN PUBLIC.O C /* PUBLIC.PRIMARY_KEY_4: X = D.Y */ ON 1=1 /* WHERE D.Y = C.X */ INNER JOIN PUBLIC.O B /* PUBLIC.PRIMARY_KEY_4: X = C.Y */ ON 1=1 /* WHERE C.Y = B.X */ INNER JOIN PUBLIC.O A /* PUBLIC.PRIMARY_KEY_4: X = B.Y */ ON 1=1 WHERE (B.Y = A.X) AND ((C.Y = B.X) AND ((D.Y = C.X) AND ((E.Y = D.X) AND ((F.Y = E.X) AND ((G.Y = F.X) AND ((H.Y = G.X) AND ((I.Y = H.X) AND ((J.X = 1) AND (J.Y = I.X))))))))) -> rows: 1 - -DROP TABLE O; -> ok - -CREATE TABLE PARENT(ID INT PRIMARY KEY, AID INT, BID INT, CID INT, DID INT, EID INT, FID INT, GID INT, HID INT); -> ok - -CREATE TABLE CHILD(ID INT PRIMARY KEY); -> ok - -INSERT INTO PARENT SELECT X, 1, 2, 1, 2, 1, 2, 1, 2 FROM SYSTEM_RANGE(0, 1000); -> update count: 1001 - -INSERT INTO CHILD SELECT X FROM SYSTEM_RANGE(0, 1000); -> update count: 1001 - -SELECT COUNT(*) FROM PARENT, CHILD A, CHILD B, CHILD C, CHILD D, CHILD E, CHILD F, CHILD G, CHILD H -WHERE AID=A.ID AND BID=B.ID AND CID=C.ID -AND DID=D.ID AND EID=E.ID AND FID=F.ID AND GID=G.ID AND HID=H.ID; -> COUNT(*) -> -------- -> 1001 -> rows: 1 - -EXPLAIN SELECT COUNT(*) FROM PARENT, CHILD A, CHILD B, CHILD C, CHILD D, CHILD E, CHILD F, CHILD G, CHILD H -WHERE AID=A.ID AND BID=B.ID AND CID=C.ID -AND DID=D.ID AND EID=E.ID AND FID=F.ID AND GID=G.ID AND HID=H.ID; -> PLAN -> ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT COUNT(*) FROM PUBLIC.PARENT /* PUBLIC.PARENT.tableScan */ INNER JOIN PUBLIC.CHILD A /* PUBLIC.PRIMARY_KEY_3: ID = AID */ ON 1=1 /* WHERE AID = A.ID */ INNER JOIN PUBLIC.CHILD B /* PUBLIC.PRIMARY_KEY_3: ID = BID */ ON 1=1 /* WHERE BID = B.ID */ INNER JOIN PUBLIC.CHILD C /* PUBLIC.PRIMARY_KEY_3: ID = CID */ ON 1=1 /* WHERE CID = C.ID */ INNER JOIN PUBLIC.CHILD D /* PUBLIC.PRIMARY_KEY_3: ID = DID */ ON 1=1 /* WHERE DID = D.ID */ INNER JOIN PUBLIC.CHILD E /* PUBLIC.PRIMARY_KEY_3: ID = EID */ ON 1=1 /* WHERE EID = E.ID */ INNER JOIN PUBLIC.CHILD F /* PUBLIC.PRIMARY_KEY_3: ID = FID */ ON 1=1 /* WHERE FID = F.ID */ INNER JOIN PUBLIC.CHILD G /* PUBLIC.PRIMARY_KEY_3: ID = GID */ ON 1=1 /* WHERE GID = G.ID */ INNER JOIN PUBLIC.CHILD H /* PUBLIC.PRIMARY_KEY_3: ID = HID */ ON 1=1 WHERE (HID = H.ID) AND ((GID = G.ID) AND ((FID = F.ID) AND ((EID = E.ID) AND ((DID = D.ID) AND ((CID = C.ID) AND ((AID = A.ID) AND (BID = B.ID))))))) -> rows: 1 - -CREATE TABLE FAMILY(ID INT PRIMARY KEY, PARENTID INT); -> ok - -INSERT INTO FAMILY SELECT X, X-1 FROM SYSTEM_RANGE(0, 1000); -> update count: 1001 - -EXPLAIN SELECT COUNT(*) FROM CHILD A, CHILD B, FAMILY, CHILD C, CHILD D, PARENT, CHILD E, CHILD F, CHILD G -WHERE FAMILY.ID=1 AND FAMILY.PARENTID=PARENT.ID -AND AID=A.ID AND BID=B.ID AND CID=C.ID AND DID=D.ID AND EID=E.ID AND FID=F.ID AND GID=G.ID; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT COUNT(*) FROM PUBLIC.FAMILY /* PUBLIC.PRIMARY_KEY_7: ID = 1 */ /* WHERE FAMILY.ID = 1 */ INNER JOIN PUBLIC.PARENT /* PUBLIC.PRIMARY_KEY_8: ID = FAMILY.PARENTID */ ON 1=1 /* WHERE FAMILY.PARENTID = PARENT.ID */ INNER JOIN PUBLIC.CHILD A /* PUBLIC.PRIMARY_KEY_3: ID = AID */ ON 1=1 /* WHERE AID = A.ID */ INNER JOIN PUBLIC.CHILD B /* PUBLIC.PRIMARY_KEY_3: ID = BID */ ON 1=1 /* WHERE BID = B.ID */ INNER JOIN PUBLIC.CHILD C /* PUBLIC.PRIMARY_KEY_3: ID = CID */ ON 1=1 /* WHERE CID = C.ID */ INNER JOIN PUBLIC.CHILD D /* PUBLIC.PRIMARY_KEY_3: ID = DID */ ON 1=1 /* WHERE DID = D.ID */ INNER JOIN PUBLIC.CHILD E /* PUBLIC.PRIMARY_KEY_3: ID = EID */ ON 1=1 /* WHERE EID = E.ID */ INNER JOIN PUBLIC.CHILD F /* PUBLIC.PRIMARY_KEY_3: ID = FID */ ON 1=1 /* WHERE FID = F.ID */ INNER JOIN PUBLIC.CHILD G /* PUBLIC.PRIMARY_KEY_3: ID = GID */ ON 1=1 WHERE (GID = G.ID) AND ((FID = F.ID) AND ((EID = E.ID) AND ((DID = D.ID) AND ((CID = C.ID) AND ((BID = B.ID) AND ((AID = A.ID) AND ((FAMILY.ID = 1) AND (FAMILY.PARENTID = PARENT.ID)))))))) -> rows: 1 - -DROP TABLE FAMILY; -> ok - -DROP TABLE PARENT; -> ok - -DROP TABLE CHILD; -> ok - ---- is null / not is null --------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT UNIQUE, NAME VARCHAR CHECK LENGTH(NAME)>3); -> ok - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(ID INT, NAME VARCHAR(255), B INT); -> ok - -CREATE UNIQUE INDEX IDXNAME ON TEST(NAME); -> ok - -CREATE UNIQUE INDEX IDX_NAME_B ON TEST(NAME, B); -> ok - -INSERT INTO TEST(ID, NAME, B) VALUES (0, NULL, NULL); -> update count: 1 - -INSERT INTO TEST(ID, NAME, B) VALUES (1, 'Hello', NULL); -> update count: 1 - -INSERT INTO TEST(ID, NAME, B) VALUES (2, NULL, NULL); -> update count: 1 - -INSERT INTO TEST(ID, NAME, B) VALUES (3, 'World', NULL); -> update count: 1 - -select * from test; -> ID NAME B -> -- ----- ---- -> 0 null null -> 1 Hello null -> 2 null null -> 3 World null -> rows: 4 - -UPDATE test SET name='Hi'; -> exception - -select * from test; -> ID NAME B -> -- ----- ---- -> 0 null null -> 1 Hello null -> 2 null null -> 3 World null -> rows: 4 - -UPDATE test SET name=NULL; -> update count: 4 - -UPDATE test SET B=1; -> update count: 4 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(ID INT, NAME VARCHAR); -> ok - -INSERT INTO TEST VALUES(NULL, NULL), (0, 'Hello'), (1, 'World'); -> update count: 3 - -SELECT * FROM TEST WHERE NOT (1=1); -> ID NAME -> -- ---- -> rows: 0 - -DROP TABLE TEST; -> ok - -create table test_null(a int, b int); -> ok - -insert into test_null values(0, 0); -> update count: 1 - -insert into test_null values(0, null); -> update count: 1 - -insert into test_null values(null, null); -> update count: 1 - -insert into test_null values(null, 0); -> update count: 1 - -select * from test_null where a=0; -> A B -> - ---- -> 0 0 -> 0 null -> rows: 2 - -select * from test_null where not a=0; -> A B -> - - -> rows: 0 - -select * from test_null where (a=0 or b=0); -> A B -> ---- ---- -> 0 0 -> 0 null -> null 0 -> rows: 3 - -select * from test_null where not (a=0 or b=0); -> A B -> - - -> rows: 0 - -select * from test_null where (a=1 or b=0); -> A B -> ---- - -> 0 0 -> null 0 -> rows: 2 - -select * from test_null where not( a=1 or b=0); -> A B -> - - -> rows: 0 - -select * from test_null where not(not( a=1 or b=0)); -> A B -> ---- - -> 0 0 -> null 0 -> rows: 2 - -select * from test_null where a=0 or b=0; -> A B -> ---- ---- -> 0 0 -> 0 null -> null 0 -> rows: 3 - -SELECT count(*) FROM test_null WHERE not ('X'=null and 1=0); -> COUNT(*) -> -------- -> 4 -> rows: 1 - -drop table if exists test_null; -> ok - ---- function alias --------------------------------------------------------------------------------------------- -CREATE ALIAS MY_SQRT FOR "java.lang.Math.sqrt"; -> ok - -SELECT MY_SQRT(2.0) MS, SQRT(2.0); -> MS 1.4142135623730951 -> ------------------ ------------------ -> 1.4142135623730951 1.4142135623730951 -> rows: 1 - -SELECT MY_SQRT(SUM(X)), SUM(X), MY_SQRT(55) FROM SYSTEM_RANGE(1, 10); -> PUBLIC.MY_SQRT(SUM(X)) SUM(X) PUBLIC.MY_SQRT(55) -> ---------------------- ------ ------------------ -> 7.416198487095663 55 7.416198487095663 -> rows: 1 - -SELECT MY_SQRT(-1.0) MS, SQRT(NULL) S; -> MS S -> --- ---- -> NaN null -> rows: 1 - -SCRIPT NOPASSWORDS NOSETTINGS; -> SCRIPT -> ------------------------------------------------------------ -> CREATE FORCE ALIAS PUBLIC.MY_SQRT FOR "java.lang.Math.sqrt"; -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 2 - -SELECT ALIAS_NAME, JAVA_CLASS, JAVA_METHOD, DATA_TYPE, COLUMN_COUNT, RETURNS_RESULT, REMARKS FROM INFORMATION_SCHEMA.FUNCTION_ALIASES; -> ALIAS_NAME JAVA_CLASS JAVA_METHOD DATA_TYPE COLUMN_COUNT RETURNS_RESULT REMARKS -> ---------- -------------- ----------- --------- ------------ -------------- ------- -> MY_SQRT java.lang.Math sqrt 8 1 2 -> rows: 1 - -DROP ALIAS MY_SQRT; -> ok - ---- schema ---------------------------------------------------------------------------------------------- -SELECT DISTINCT TABLE_SCHEMA, TABLE_CATALOG FROM INFORMATION_SCHEMA.TABLES ORDER BY TABLE_SCHEMA; -> TABLE_SCHEMA TABLE_CATALOG -> ------------------ ------------- -> INFORMATION_SCHEMA SCRIPT -> rows (ordered): 1 - -SELECT * FROM INFORMATION_SCHEMA.SCHEMATA; -> CATALOG_NAME SCHEMA_NAME SCHEMA_OWNER DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME IS_DEFAULT REMARKS ID -> ------------ ------------------ ------------ -------------------------- ---------------------- ---------- ------- -- -> SCRIPT INFORMATION_SCHEMA SA Unicode OFF FALSE -1 -> SCRIPT PUBLIC SA Unicode OFF TRUE 0 -> rows: 2 - -SELECT * FROM INFORMATION_SCHEMA.CATALOGS; -> CATALOG_NAME -> ------------ -> SCRIPT -> rows: 1 - -SELECT INFORMATION_SCHEMA.SCHEMATA.SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA; -> SCHEMA_NAME -> ------------------ -> INFORMATION_SCHEMA -> PUBLIC -> rows: 2 - -SELECT INFORMATION_SCHEMA.SCHEMATA.* FROM INFORMATION_SCHEMA.SCHEMATA; -> CATALOG_NAME SCHEMA_NAME SCHEMA_OWNER DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME IS_DEFAULT REMARKS ID -> ------------ ------------------ ------------ -------------------------- ---------------------- ---------- ------- -- -> SCRIPT INFORMATION_SCHEMA SA Unicode OFF FALSE -1 -> SCRIPT PUBLIC SA Unicode OFF TRUE 0 -> rows: 2 - -CREATE SCHEMA TEST_SCHEMA AUTHORIZATION SA; -> ok - -DROP SCHEMA TEST_SCHEMA; -> ok - -create schema Contact_Schema AUTHORIZATION SA; -> ok - -CREATE TABLE Contact_Schema.Address ( -address_id BIGINT NOT NULL -CONSTRAINT address_id_check -CHECK (address_id > 0), -address_type VARCHAR(20) NOT NULL -CONSTRAINT address_type -CHECK (address_type in ('postal','email','web')), -CONSTRAINT X_PKAddress -PRIMARY KEY (address_id) -); -> ok - -create schema ClientServer_Schema AUTHORIZATION SA; -> ok - -CREATE TABLE ClientServer_Schema.PrimaryKey_Seq ( -sequence_name VARCHAR(100) NOT NULL, -seq_number BIGINT NOT NULL, -CONSTRAINT X_PKPrimaryKey_Seq -PRIMARY KEY (sequence_name) -); -> ok - -alter table Contact_Schema.Address add constraint abc foreign key(address_id) -references ClientServer_Schema.PrimaryKey_Seq(seq_number); -> ok - -drop table ClientServer_Schema.PrimaryKey_Seq; -> ok - -drop table Contact_Schema.Address; -> ok - -drop schema Contact_Schema; -> ok - -drop schema ClientServer_Schema; -> ok - ---- alter table add / drop / rename column ---------------------------------------------------------------------------------------------- -CREATE MEMORY TABLE TEST(ID INT PRIMARY KEY); -> ok - -SCRIPT NOPASSWORDS NOSETTINGS; -> SCRIPT -> --------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 4 - -ALTER TABLE TEST ADD CREATEDATE VARCHAR(255) DEFAULT '2001-01-01' NOT NULL; -> ok - -ALTER TABLE TEST ADD NAME VARCHAR(255) NULL BEFORE CREATEDATE; -> ok - -CREATE INDEX IDXNAME ON TEST(NAME); -> ok - -INSERT INTO TEST(ID, NAME) VALUES(1, 'Hi'); -> update count: 1 - -ALTER TABLE TEST ALTER COLUMN NAME SET NOT NULL; -> ok - -ALTER TABLE TEST ALTER COLUMN NAME SET NOT NULL; -> ok - -ALTER TABLE TEST ALTER COLUMN NAME SET NULL; -> ok - -ALTER TABLE TEST ALTER COLUMN NAME SET NULL; -> ok - -ALTER TABLE TEST ALTER COLUMN NAME SET DEFAULT 1; -> ok - -SELECT * FROM TEST; -> ID NAME CREATEDATE -> -- ---- ---------- -> 1 Hi 2001-01-01 -> rows: 1 - -ALTER TABLE TEST ADD MODIFY_DATE TIMESTAMP; -> ok - -CREATE MEMORY TABLE TEST_SEQ(ID INT, NAME VARCHAR); -> ok - -INSERT INTO TEST_SEQ VALUES(-1, '-1'); -> update count: 1 - -ALTER TABLE TEST_SEQ ALTER COLUMN ID IDENTITY; -> ok - -INSERT INTO TEST_SEQ VALUES(NULL, '1'); -> update count: 1 - -ALTER TABLE TEST_SEQ ALTER COLUMN ID RESTART WITH 10; -> ok - -INSERT INTO TEST_SEQ VALUES(NULL, '10'); -> update count: 1 - -alter table test_seq drop primary key; -> ok - -ALTER TABLE TEST_SEQ ALTER COLUMN ID INT DEFAULT 20; -> ok - -INSERT INTO TEST_SEQ VALUES(DEFAULT, '20'); -> update count: 1 - -ALTER TABLE TEST_SEQ ALTER COLUMN NAME RENAME TO DATA; -> ok - -SELECT * FROM TEST_SEQ ORDER BY ID; -> ID DATA -> -- ---- -> -1 -1 -> 1 1 -> 10 10 -> 20 20 -> rows (ordered): 4 - -SCRIPT SIMPLE NOPASSWORDS NOSETTINGS; -> SCRIPT -> -------------------------------------------------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.TEST_SEQ; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> CREATE INDEX PUBLIC.IDXNAME ON PUBLIC.TEST(NAME); -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL, NAME VARCHAR(255) DEFAULT 1, CREATEDATE VARCHAR(255) DEFAULT '2001-01-01' NOT NULL, MODIFY_DATE TIMESTAMP ); -> CREATE MEMORY TABLE PUBLIC.TEST_SEQ( ID INT DEFAULT 20 NOT NULL, DATA VARCHAR ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> INSERT INTO PUBLIC.TEST(ID, NAME, CREATEDATE, MODIFY_DATE) VALUES(1, 'Hi', '2001-01-01', NULL); -> INSERT INTO PUBLIC.TEST_SEQ(ID, DATA) VALUES(-1, '-1'); -> INSERT INTO PUBLIC.TEST_SEQ(ID, DATA) VALUES(1, '1'); -> INSERT INTO PUBLIC.TEST_SEQ(ID, DATA) VALUES(10, '10'); -> INSERT INTO PUBLIC.TEST_SEQ(ID, DATA) VALUES(20, '20'); -> rows: 12 - -CREATE UNIQUE INDEX IDX_NAME_ID ON TEST(ID, NAME); -> ok - -ALTER TABLE TEST DROP COLUMN NAME; -> exception - -DROP INDEX IDX_NAME_ID; -> ok - -DROP INDEX IDX_NAME_ID IF EXISTS; -> ok - -ALTER TABLE TEST DROP NAME; -> ok - -DROP TABLE TEST_SEQ; -> ok - -SCRIPT NOPASSWORDS NOSETTINGS; -> SCRIPT -> --------------------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL, CREATEDATE VARCHAR(255) DEFAULT '2001-01-01' NOT NULL, MODIFY_DATE TIMESTAMP ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> INSERT INTO PUBLIC.TEST(ID, CREATEDATE, MODIFY_DATE) VALUES (1, '2001-01-01', NULL); -> rows: 5 - -ALTER TABLE TEST ADD NAME VARCHAR(255) NULL BEFORE CREATEDATE; -> ok - -SCRIPT NOPASSWORDS NOSETTINGS; -> SCRIPT -> ---------------------------------------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL, NAME VARCHAR(255), CREATEDATE VARCHAR(255) DEFAULT '2001-01-01' NOT NULL, MODIFY_DATE TIMESTAMP ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> INSERT INTO PUBLIC.TEST(ID, NAME, CREATEDATE, MODIFY_DATE) VALUES (1, NULL, '2001-01-01', NULL); -> rows: 5 - -UPDATE TEST SET NAME = 'Hi'; -> update count: 1 - -INSERT INTO TEST VALUES(2, 'Hello', DEFAULT, DEFAULT); -> update count: 1 - -SELECT * FROM TEST; -> ID NAME CREATEDATE MODIFY_DATE -> -- ----- ---------- ----------- -> 1 Hi 2001-01-01 null -> 2 Hello 2001-01-01 null -> rows: 2 - -DROP TABLE TEST; -> ok - ---- autoIncrement ---------------------------------------------------------------------------------------------- -CREATE MEMORY TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR); -> ok - -SCRIPT NOPASSWORDS NOSETTINGS; -> SCRIPT -> --------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL, NAME VARCHAR ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 4 - -INSERT INTO TEST(ID, NAME) VALUES(1, 'Hi'), (2, 'World'); -> update count: 2 - -SELECT * FROM TEST; -> ID NAME -> -- ----- -> 1 Hi -> 2 World -> rows: 2 - -SELECT * FROM TEST WHERE ? IS NULL; -{ -Hello -> ID NAME -> -- ---- -> rows: 0 -}; -> update count: 0 - -DROP TABLE TEST; -> ok - ---- limit/offset ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'), (2, 'World'), (3, 'with'), (4, 'limited'), (5, 'resources'); -> update count: 5 - -SELECT TOP 2 * FROM TEST ORDER BY ID; -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows (ordered): 2 - -SELECT LIMIT (0+0) (2+0) * FROM TEST ORDER BY ID; -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows (ordered): 2 - -SELECT LIMIT (1+0) (2+0) NAME, -ID, ID _ID_ FROM TEST ORDER BY _ID_; -> NAME - ID _ID_ -> ----- ---- ---- -> World -2 2 -> with -3 3 -> rows (ordered): 2 - -EXPLAIN SELECT LIMIT (1+0) (2+0) * FROM TEST ORDER BY ID; -> PLAN -> -------------------------------------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2 */ ORDER BY 1 LIMIT 2 OFFSET 1 /* index sorted */ -> rows (ordered): 1 - -SELECT * FROM TEST ORDER BY ID LIMIT 2+0 OFFSET 1+0; -> ID NAME -> -- ----- -> 2 World -> 3 with -> rows (ordered): 2 - -SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY ID LIMIT 2+0 OFFSET 1+0; -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> rows (ordered): 2 - -SELECT ID FROM TEST GROUP BY ID UNION ALL SELECT ID FROM TEST GROUP BY ID; -> ID -> -- -> 1 -> 1 -> 2 -> 2 -> 3 -> 3 -> 4 -> 4 -> 5 -> 5 -> rows: 10 - -SELECT * FROM (SELECT ID FROM TEST GROUP BY ID); -> ID -> -- -> 1 -> 2 -> 3 -> 4 -> 5 -> rows: 5 - -EXPLAIN SELECT * FROM TEST UNION ALL SELECT * FROM TEST ORDER BY ID LIMIT 2+0 OFFSET 1+0; -> PLAN -> --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> (SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */) UNION ALL (SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */) ORDER BY 1 LIMIT 2 OFFSET 1 -> rows (ordered): 1 - -EXPLAIN DELETE FROM TEST WHERE ID=1; -> PLAN -> ----------------------------------------------------------------------- -> DELETE FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE ID = 1 -> rows: 1 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST2COL(A INT, B INT, C VARCHAR(255), PRIMARY KEY(A, B)); -> ok - -INSERT INTO TEST2COL VALUES(0, 0, 'Hallo'), (0, 1, 'Welt'), (1, 0, 'Hello'), (1, 1, 'World'); -> update count: 4 - -SELECT * FROM TEST2COL WHERE A=0 AND B=0; -> A B C -> - - ----- -> 0 0 Hallo -> rows: 1 - -EXPLAIN SELECT * FROM TEST2COL WHERE A=0 AND B=0; -> PLAN -> -------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST2COL.A, TEST2COL.B, TEST2COL.C FROM PUBLIC.TEST2COL /* PUBLIC.PRIMARY_KEY_E: A = 0 AND B = 0 */ WHERE ((A = 0) AND (B = 0)) AND (A = B) -> rows: 1 - -SELECT * FROM TEST2COL WHERE A=0; -> A B C -> - - ----- -> 0 0 Hallo -> 0 1 Welt -> rows: 2 - -EXPLAIN SELECT * FROM TEST2COL WHERE A=0; -> PLAN -> ------------------------------------------------------------------------------------------------------------ -> SELECT TEST2COL.A, TEST2COL.B, TEST2COL.C FROM PUBLIC.TEST2COL /* PUBLIC.PRIMARY_KEY_E: A = 0 */ WHERE A = 0 -> rows: 1 - -SELECT * FROM TEST2COL WHERE B=0; -> A B C -> - - ----- -> 0 0 Hallo -> 1 0 Hello -> rows: 2 - -EXPLAIN SELECT * FROM TEST2COL WHERE B=0; -> PLAN -> ---------------------------------------------------------------------------------------------------------- -> SELECT TEST2COL.A, TEST2COL.B, TEST2COL.C FROM PUBLIC.TEST2COL /* PUBLIC.TEST2COL.tableScan */ WHERE B = 0 -> rows: 1 - -DROP TABLE TEST2COL; -> ok - ---- testCases ---------------------------------------------------------------------------------------------- -CREATE TABLE t_1 (ch CHARACTER(10), dec DECIMAL(10,2), do DOUBLE, lo BIGINT, "IN" INTEGER, sm SMALLINT, ty TINYINT, -da DATE DEFAULT CURRENT_DATE, ti TIME DEFAULT CURRENT_TIME, ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ); -> ok - -INSERT INTO T_1 (ch, dec, do) VALUES ('name', 10.23, 0); -> update count: 1 - -SELECT COUNT(*) FROM T_1; -> COUNT(*) -> -------- -> 1 -> rows: 1 - -DROP TABLE T_1; -> ok - ---- rights ---------------------------------------------------------------------------------------------- -CREATE USER TEST_USER PASSWORD '123'; -> ok - -CREATE TABLE TEST(ID INT); -> ok - -CREATE ROLE TEST_ROLE; -> ok - -CREATE ROLE IF NOT EXISTS TEST_ROLE; -> ok - -GRANT SELECT, INSERT ON TEST TO TEST_USER; -> ok - -GRANT UPDATE ON TEST TO TEST_ROLE; -> ok - -GRANT TEST_ROLE TO TEST_USER; -> ok - -SELECT NAME FROM INFORMATION_SCHEMA.ROLES; -> NAME -> --------- -> PUBLIC -> TEST_ROLE -> rows: 2 - -SELECT GRANTEE, GRANTEETYPE, GRANTEDROLE, RIGHTS, TABLE_SCHEMA, TABLE_NAME FROM INFORMATION_SCHEMA.RIGHTS; -> GRANTEE GRANTEETYPE GRANTEDROLE RIGHTS TABLE_SCHEMA TABLE_NAME -> --------- ----------- ----------- -------------- ------------ ---------- -> TEST_ROLE ROLE UPDATE PUBLIC TEST -> TEST_USER USER SELECT, INSERT PUBLIC TEST -> TEST_USER USER TEST_ROLE -> rows: 3 - -SELECT * FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES; -> GRANTOR GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE -> ------- --------- ------------- ------------ ---------- -------------- ------------ -> null TEST_ROLE SCRIPT PUBLIC TEST UPDATE NO -> null TEST_USER SCRIPT PUBLIC TEST INSERT NO -> null TEST_USER SCRIPT PUBLIC TEST SELECT NO -> rows: 3 - -SELECT * FROM INFORMATION_SCHEMA.COLUMN_PRIVILEGES; -> GRANTOR GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE -> ------- --------- ------------- ------------ ---------- ----------- -------------- ------------ -> null TEST_ROLE SCRIPT PUBLIC TEST ID UPDATE NO -> null TEST_USER SCRIPT PUBLIC TEST ID INSERT NO -> null TEST_USER SCRIPT PUBLIC TEST ID SELECT NO -> rows: 3 - -REVOKE INSERT ON TEST FROM TEST_USER; -> ok - -REVOKE TEST_ROLE FROM TEST_USER; -> ok - -SELECT GRANTEE, GRANTEETYPE, GRANTEDROLE, RIGHTS, TABLE_NAME FROM INFORMATION_SCHEMA.RIGHTS; -> GRANTEE GRANTEETYPE GRANTEDROLE RIGHTS TABLE_NAME -> --------- ----------- ----------- ------ ---------- -> TEST_ROLE ROLE UPDATE TEST -> TEST_USER USER SELECT TEST -> rows: 2 - -SELECT * FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES; -> GRANTOR GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE -> ------- --------- ------------- ------------ ---------- -------------- ------------ -> null TEST_ROLE SCRIPT PUBLIC TEST UPDATE NO -> null TEST_USER SCRIPT PUBLIC TEST SELECT NO -> rows: 2 - -DROP USER TEST_USER; -> ok - -DROP TABLE TEST; -> ok - -DROP ROLE TEST_ROLE; -> ok - -SELECT * FROM INFORMATION_SCHEMA.ROLES; -> NAME REMARKS ID -> ------ ------- -- -> PUBLIC 0 -> rows: 1 - -SELECT * FROM INFORMATION_SCHEMA.RIGHTS; -> GRANTEE GRANTEETYPE GRANTEDROLE RIGHTS TABLE_SCHEMA TABLE_NAME ID -> ------- ----------- ----------- ------ ------------ ---------- -- -> rows: 0 - ---- plan ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(?, ?); -{ -1, Hello -2, World -3, Peace -}; -> update count: 3 - -EXPLAIN INSERT INTO TEST VALUES(1, 'Test'); -> PLAN -> ---------------------------------------------------- -> INSERT INTO PUBLIC.TEST(ID, NAME) VALUES (1, 'Test') -> rows: 1 - -EXPLAIN INSERT INTO TEST VALUES(1, 'Test'), (2, 'World'); -> PLAN -> ------------------------------------------------------------------ -> INSERT INTO PUBLIC.TEST(ID, NAME) VALUES (1, 'Test'), (2, 'World') -> rows: 1 - -EXPLAIN INSERT INTO TEST SELECT DISTINCT ID+1, NAME FROM TEST; -> PLAN -> ------------------------------------------------------------------------------------------------------------- -> INSERT INTO PUBLIC.TEST(ID, NAME) SELECT DISTINCT (ID + 1), NAME FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ -> rows: 1 - -EXPLAIN SELECT DISTINCT ID + 1, NAME FROM TEST; -> PLAN -> --------------------------------------------------------------------------- -> SELECT DISTINCT (ID + 1), NAME FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ -> rows: 1 - -EXPLAIN SELECT * FROM TEST WHERE 1=0; -> PLAN -> ----------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan: FALSE */ WHERE FALSE -> rows: 1 - -EXPLAIN SELECT TOP 1 * FROM TEST FOR UPDATE; -> PLAN -> ----------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ LIMIT 1 FOR UPDATE -> rows: 1 - -EXPLAIN SELECT COUNT(NAME) FROM TEST WHERE ID=1; -> PLAN -> ----------------------------------------------------------------------------------- -> SELECT COUNT(NAME) FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE ID = 1 -> rows: 1 - -EXPLAIN SELECT * FROM TEST WHERE (ID>=1 AND ID<=2) OR (ID>0 AND ID<3) AND (ID<>6) ORDER BY NAME NULLS FIRST, 1 NULLS LAST, (1+1) DESC; -> PLAN -> ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ WHERE ((ID >= 1) AND (ID <= 2)) OR ((ID <> 6) AND ((ID > 0) AND (ID < 3))) ORDER BY 2 NULLS FIRST, 1 NULLS LAST, =2 DESC -> rows (ordered): 1 - -EXPLAIN SELECT * FROM TEST WHERE ID=1 GROUP BY NAME, ID; -> PLAN -> ------------------------------------------------------------------------------------------------------------ -> SELECT TEST.ID, TEST.NAME FROM PUBLIC.TEST /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ WHERE ID = 1 GROUP BY NAME, ID -> rows: 1 - -EXPLAIN PLAN FOR UPDATE TEST SET NAME='Hello', ID=1 WHERE NAME LIKE 'T%' ESCAPE 'x'; -> PLAN -> --------------------------------------------------------------------------------------------------------- -> UPDATE PUBLIC.TEST /* PUBLIC.TEST.tableScan */ SET NAME = 'Hello', ID = 1 WHERE NAME LIKE 'T%' ESCAPE 'x' -> rows: 1 - -EXPLAIN PLAN FOR DELETE FROM TEST; -> PLAN -> --------------------------------------------------- -> DELETE FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ -> rows: 1 - -EXPLAIN PLAN FOR SELECT NAME, COUNT(*) FROM TEST GROUP BY NAME HAVING COUNT(*) > 1; -> PLAN -> ---------------------------------------------------------------------------------------------------- -> SELECT NAME, COUNT(*) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ GROUP BY NAME HAVING COUNT(*) > 1 -> rows: 1 - -EXPLAIN PLAN FOR SELECT * FROM test t1 inner join test t2 on t1.id=t2.id and t2.name is not null where t1.id=1; -> PLAN -> --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME, T2.ID, T2.NAME FROM PUBLIC.TEST T1 /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ /* WHERE T1.ID = 1 */ INNER JOIN PUBLIC.TEST T2 /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID AND ID = T1.ID */ ON 1=1 WHERE (T1.ID = 1) AND ((T2.NAME IS NOT NULL) AND (T1.ID = T2.ID)) -> rows: 1 - -EXPLAIN PLAN FOR SELECT * FROM test t1 left outer join test t2 on t1.id=t2.id and t2.name is not null where t1.id=1; -> PLAN -> ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME, T2.ID, T2.NAME FROM PUBLIC.TEST T1 /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ /* WHERE T1.ID = 1 */ LEFT OUTER JOIN PUBLIC.TEST T2 /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ ON (T2.NAME IS NOT NULL) AND (T1.ID = T2.ID) WHERE T1.ID = 1 -> rows: 1 - -EXPLAIN PLAN FOR SELECT * FROM test t1 left outer join test t2 on t1.id=t2.id and t2.name is null where t1.id=1; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME, T2.ID, T2.NAME FROM PUBLIC.TEST T1 /* PUBLIC.PRIMARY_KEY_2: ID = 1 */ /* WHERE T1.ID = 1 */ LEFT OUTER JOIN PUBLIC.TEST T2 /* PUBLIC.PRIMARY_KEY_2: ID = T1.ID */ ON (T2.NAME IS NULL) AND (T1.ID = T2.ID) WHERE T1.ID = 1 -> rows: 1 - -EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE EXISTS(SELECT * FROM TEST T2 WHERE T1.ID-1 = T2.ID); -> PLAN -> ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME FROM PUBLIC.TEST T1 /* PUBLIC.TEST.tableScan */ WHERE EXISTS( SELECT T2.ID, T2.NAME FROM PUBLIC.TEST T2 /* PUBLIC.PRIMARY_KEY_2: ID = (T1.ID - 1) */ WHERE (T1.ID - 1) = T2.ID) -> rows: 1 - -EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE ID IN(1, 2); -> PLAN -> --------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME FROM PUBLIC.TEST T1 /* PUBLIC.PRIMARY_KEY_2: ID IN(1, 2) */ WHERE ID IN(1, 2) -> rows: 1 - -EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE ID IN(SELECT ID FROM TEST); -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME FROM PUBLIC.TEST T1 /* PUBLIC.PRIMARY_KEY_2: ID IN(SELECT ID FROM PUBLIC.TEST /++ PUBLIC.TEST.tableScan ++/) */ WHERE ID IN( SELECT ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */) -> rows: 1 - -EXPLAIN PLAN FOR SELECT * FROM TEST T1 WHERE ID NOT IN(SELECT ID FROM TEST); -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------ -> SELECT T1.ID, T1.NAME FROM PUBLIC.TEST T1 /* PUBLIC.TEST.tableScan */ WHERE NOT (ID IN( SELECT ID FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */)) -> rows: 1 - -EXPLAIN PLAN FOR SELECT CAST(ID AS VARCHAR(255)) FROM TEST; -> PLAN -> ---------------------------------------------------------------------------- -> SELECT CAST(ID AS VARCHAR(255)) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ -> rows: 1 - -EXPLAIN PLAN FOR SELECT LEFT(NAME, 2) FROM TEST; -> PLAN -> ----------------------------------------------------------------- -> SELECT LEFT(NAME, 2) FROM PUBLIC.TEST /* PUBLIC.TEST.tableScan */ -> rows: 1 - -EXPLAIN PLAN FOR SELECT * FROM SYSTEM_RANGE(1, 20); -> PLAN -> ----------------------------------------------------------------------- -> SELECT SYSTEM_RANGE.X FROM SYSTEM_RANGE(1, 20) /* PUBLIC.RANGE_INDEX */ -> rows: 1 - -SELECT * FROM test t1 inner join test t2 on t1.id=t2.id and t2.name is not null where t1.id=1; -> ID NAME ID NAME -> -- ----- -- ----- -> 1 Hello 1 Hello -> rows: 1 - -SELECT * FROM test t1 left outer join test t2 on t1.id=t2.id and t2.name is not null where t1.id=1; -> ID NAME ID NAME -> -- ----- -- ----- -> 1 Hello 1 Hello -> rows: 1 - -SELECT * FROM test t1 left outer join test t2 on t1.id=t2.id and t2.name is null where t1.id=1; -> ID NAME ID NAME -> -- ----- ---- ---- -> 1 Hello null null -> rows: 1 - -DROP TABLE TEST; -> ok - ---- union ---------------------------------------------------------------------------------------------- -SELECT * FROM SYSTEM_RANGE(1,2) UNION ALL SELECT * FROM SYSTEM_RANGE(1,2) ORDER BY 1; -> X -> - -> 1 -> 1 -> 2 -> 2 -> rows (ordered): 4 - -EXPLAIN (SELECT * FROM SYSTEM_RANGE(1,2) UNION ALL SELECT * FROM SYSTEM_RANGE(1,2) ORDER BY 1); -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> (SELECT SYSTEM_RANGE.X FROM SYSTEM_RANGE(1, 2) /* PUBLIC.RANGE_INDEX */) UNION ALL (SELECT SYSTEM_RANGE.X FROM SYSTEM_RANGE(1, 2) /* PUBLIC.RANGE_INDEX */) ORDER BY 1 -> rows (ordered): 1 - -CREATE TABLE CHILDREN(ID INT PRIMARY KEY, NAME VARCHAR(255), CLASS INT); -> ok - -CREATE TABLE CLASSES(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO CHILDREN VALUES(?, ?, ?); -{ -0, Joe, 0 -1, Anne, 1 -2, Joerg, 1 -3, Petra, 2 -}; -> update count: 4 - -INSERT INTO CLASSES VALUES(?, ?); -{ -0, Kindergarden -1, Class 1 -2, Class 2 -3, Class 3 -4, Class 4 -}; -> update count: 5 - -SELECT * FROM CHILDREN UNION ALL SELECT * FROM CHILDREN ORDER BY ID, NAME FOR UPDATE; -> ID NAME CLASS -> -- ----- ----- -> 0 Joe 0 -> 0 Joe 0 -> 1 Anne 1 -> 1 Anne 1 -> 2 Joerg 1 -> 2 Joerg 1 -> 3 Petra 2 -> 3 Petra 2 -> rows (ordered): 8 - -EXPLAIN SELECT * FROM CHILDREN UNION ALL SELECT * FROM CHILDREN ORDER BY ID, NAME FOR UPDATE; -> PLAN -> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> (SELECT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.CHILDREN.tableScan */ FOR UPDATE) UNION ALL (SELECT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.CHILDREN.tableScan */ FOR UPDATE) ORDER BY 1, 2 FOR UPDATE -> rows (ordered): 1 - -SELECT 'Child', ID, NAME FROM CHILDREN UNION SELECT 'Class', ID, NAME FROM CLASSES; -> 'Child' ID NAME -> ------- -- ------------ -> Child 0 Joe -> Child 1 Anne -> Child 2 Joerg -> Child 3 Petra -> Class 0 Kindergarden -> Class 1 Class1 -> Class 2 Class2 -> Class 3 Class3 -> Class 4 Class4 -> rows: 9 - -EXPLAIN SELECT 'Child', ID, NAME FROM CHILDREN UNION SELECT 'Class', ID, NAME FROM CLASSES; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> (SELECT 'Child', ID, NAME FROM PUBLIC.CHILDREN /* PUBLIC.CHILDREN.tableScan */) UNION (SELECT 'Class', ID, NAME FROM PUBLIC.CLASSES /* PUBLIC.CLASSES.tableScan */) -> rows: 1 - -SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE CLASS=0; -> ID NAME CLASS -> -- ----- ----- -> 1 Anne 1 -> 2 Joerg 1 -> 3 Petra 2 -> rows: 3 - -EXPLAIN SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE CLASS=0; -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> (SELECT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.CHILDREN.tableScan */) EXCEPT (SELECT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.CHILDREN.tableScan */ WHERE CLASS = 0) -> rows: 1 - -EXPLAIN SELECT CLASS FROM CHILDREN INTERSECT SELECT ID FROM CLASSES; -> PLAN -> -------------------------------------------------------------------------------------------------------------------------------------------- -> (SELECT CLASS FROM PUBLIC.CHILDREN /* PUBLIC.CHILDREN.tableScan */) INTERSECT (SELECT ID FROM PUBLIC.CLASSES /* PUBLIC.CLASSES.tableScan */) -> rows: 1 - -SELECT CLASS FROM CHILDREN INTERSECT SELECT ID FROM CLASSES; -> CLASS -> ----- -> 0 -> 1 -> 2 -> rows: 3 - -EXPLAIN SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE CLASS=0; -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> (SELECT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.CHILDREN.tableScan */) EXCEPT (SELECT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /* PUBLIC.CHILDREN.tableScan */ WHERE CLASS = 0) -> rows: 1 - -SELECT * FROM CHILDREN CH, CLASSES CL WHERE CH.CLASS = CL.ID; -> ID NAME CLASS ID NAME -> -- ----- ----- -- ------------ -> 0 Joe 0 0 Kindergarden -> 1 Anne 1 1 Class1 -> 2 Joerg 1 1 Class1 -> 3 Petra 2 2 Class2 -> rows: 4 - -SELECT CH.ID CH_ID, CH.NAME CH_NAME, CL.ID CL_ID, CL.NAME CL_NAME FROM CHILDREN CH, CLASSES CL WHERE CH.CLASS = CL.ID; -> CH_ID CH_NAME CL_ID CL_NAME -> ----- ------- ----- ------------ -> 0 Joe 0 Kindergarden -> 1 Anne 1 Class1 -> 2 Joerg 1 Class1 -> 3 Petra 2 Class2 -> rows: 4 - -CREATE VIEW CHILDREN_CLASSES(CH_ID, CH_NAME, CL_ID, CL_NAME) AS -SELECT CH.ID CH_ID1, CH.NAME CH_NAME2, CL.ID CL_ID3, CL.NAME CL_NAME4 -FROM CHILDREN CH, CLASSES CL WHERE CH.CLASS = CL.ID; -> ok - -SELECT * FROM CHILDREN_CLASSES WHERE CH_NAME <> 'X'; -> CH_ID CH_NAME CL_ID CL_NAME -> ----- ------- ----- ------------ -> 0 Joe 0 Kindergarden -> 1 Anne 1 Class1 -> 2 Joerg 1 Class1 -> 3 Petra 2 Class2 -> rows: 4 - -CREATE VIEW CHILDREN_CLASS1 AS SELECT * FROM CHILDREN_CLASSES WHERE CL_ID=1; -> ok - -SELECT * FROM CHILDREN_CLASS1; -> CH_ID CH_NAME CL_ID CL_NAME -> ----- ------- ----- ------- -> 1 Anne 1 Class1 -> 2 Joerg 1 Class1 -> rows: 2 - -CREATE VIEW CHILDREN_CLASS2 AS SELECT * FROM CHILDREN_CLASSES WHERE CL_ID=2; -> ok - -SELECT * FROM CHILDREN_CLASS2; -> CH_ID CH_NAME CL_ID CL_NAME -> ----- ------- ----- ------- -> 3 Petra 2 Class2 -> rows: 1 - -CREATE VIEW CHILDREN_CLASS12 AS SELECT * FROM CHILDREN_CLASS1 UNION ALL SELECT * FROM CHILDREN_CLASS1; -> ok - -SELECT * FROM CHILDREN_CLASS12; -> CH_ID CH_NAME CL_ID CL_NAME -> ----- ------- ----- ------- -> 1 Anne 1 Class1 -> 1 Anne 1 Class1 -> 2 Joerg 1 Class1 -> 2 Joerg 1 Class1 -> rows: 4 - -DROP VIEW CHILDREN_CLASS2; -> ok - -DROP VIEW CHILDREN_CLASS1 cascade; -> ok - -DROP VIEW CHILDREN_CLASSES; -> ok - -DROP VIEW CHILDREN_CLASS12; -> exception - -CREATE VIEW V_UNION AS SELECT * FROM CHILDREN UNION ALL SELECT * FROM CHILDREN; -> ok - -SELECT * FROM V_UNION WHERE ID=1; -> ID NAME CLASS -> -- ---- ----- -> 1 Anne 1 -> 1 Anne 1 -> rows: 2 - -EXPLAIN SELECT * FROM V_UNION WHERE ID=1; -> PLAN -> --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT V_UNION.ID, V_UNION.NAME, V_UNION.CLASS FROM PUBLIC.V_UNION /* (SELECT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /++ PUBLIC.PRIMARY_KEY_9: ID IS ?1 ++/ /++ scanCount: 2 ++/ WHERE CHILDREN.ID IS ?1) UNION ALL (SELECT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /++ PUBLIC.PRIMARY_KEY_9: ID IS ?1 ++/ /++ scanCount: 2 ++/ WHERE CHILDREN.ID IS ?1): ID = 1 */ WHERE ID = 1 -> rows: 1 - -CREATE VIEW V_EXCEPT AS SELECT * FROM CHILDREN EXCEPT SELECT * FROM CHILDREN WHERE ID=2; -> ok - -SELECT * FROM V_EXCEPT WHERE ID=1; -> ID NAME CLASS -> -- ---- ----- -> 1 Anne 1 -> rows: 1 - -EXPLAIN SELECT * FROM V_EXCEPT WHERE ID=1; -> PLAN -> --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT V_EXCEPT.ID, V_EXCEPT.NAME, V_EXCEPT.CLASS FROM PUBLIC.V_EXCEPT /* (SELECT DISTINCT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /++ PUBLIC.PRIMARY_KEY_9: ID IS ?1 ++/ /++ scanCount: 2 ++/ WHERE CHILDREN.ID IS ?1) EXCEPT (SELECT DISTINCT CHILDREN.ID, CHILDREN.NAME, CHILDREN.CLASS FROM PUBLIC.CHILDREN /++ PUBLIC.PRIMARY_KEY_9: ID = 2 ++/ /++ scanCount: 2 ++/ WHERE ID = 2): ID = 1 */ WHERE ID = 1 -> rows: 1 - -CREATE VIEW V_INTERSECT AS SELECT ID, NAME FROM CHILDREN INTERSECT SELECT * FROM CLASSES; -> ok - -SELECT * FROM V_INTERSECT WHERE ID=1; -> ID NAME -> -- ---- -> rows: 0 - -EXPLAIN SELECT * FROM V_INTERSECT WHERE ID=1; -> PLAN -> --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT V_INTERSECT.ID, V_INTERSECT.NAME FROM PUBLIC.V_INTERSECT /* (SELECT DISTINCT ID, NAME FROM PUBLIC.CHILDREN /++ PUBLIC.PRIMARY_KEY_9: ID IS ?1 ++/ /++ scanCount: 2 ++/ WHERE ID IS ?1) INTERSECT (SELECT DISTINCT CLASSES.ID, CLASSES.NAME FROM PUBLIC.CLASSES /++ PUBLIC.PRIMARY_KEY_5: ID IS ?1 ++/ /++ scanCount: 2 ++/ WHERE CLASSES.ID IS ?1): ID = 1 */ WHERE ID = 1 -> rows: 1 - -DROP VIEW V_UNION; -> ok - -DROP VIEW V_EXCEPT; -> ok - -DROP VIEW V_INTERSECT; -> ok - -DROP TABLE CHILDREN; -> ok - -DROP TABLE CLASSES; -> ok - ---- view ---------------------------------------------------------------------------------------------- -CREATE CACHED TABLE TEST_A(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -CREATE CACHED TABLE TEST_B(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -SELECT A.ID AID, A.NAME A_NAME, B.ID BID, B.NAME B_NAME FROM TEST_A A INNER JOIN TEST_B B WHERE A.ID = B.ID; -> AID A_NAME BID B_NAME -> --- ------ --- ------ -> rows: 0 - -CREATE VIEW IF NOT EXISTS TEST_ALL AS SELECT A.ID AID, A.NAME A_NAME, B.ID BID, B.NAME B_NAME FROM TEST_A A, TEST_B B WHERE A.ID = B.ID; -> ok - -SELECT COUNT(*) FROM TEST_ALL; -> COUNT(*) -> -------- -> 0 -> rows: 1 - -CREATE VIEW IF NOT EXISTS TEST_ALL AS -SELECT * FROM TEST_A; -> ok - -INSERT INTO TEST_A VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO TEST_B VALUES(1, 'Hallo'); -> update count: 1 - -INSERT INTO TEST_A VALUES(2, 'World'); -> update count: 1 - -INSERT INTO TEST_B VALUES(2, 'Welt'); -> update count: 1 - -INSERT INTO TEST_A VALUES(3, 'Record'); -> update count: 1 - -INSERT INTO TEST_B VALUES(3, 'Rekord'); -> update count: 1 - -SELECT * FROM TEST_ALL; -> AID A_NAME BID B_NAME -> --- ------ --- ------ -> 1 Hello 1 Hallo -> 2 World 2 Welt -> 3 Record 3 Rekord -> rows: 3 - -SELECT * FROM TEST_ALL WHERE AID=1; -> AID A_NAME BID B_NAME -> --- ------ --- ------ -> 1 Hello 1 Hallo -> rows: 1 - -SELECT * FROM TEST_ALL WHERE AID>0; -> AID A_NAME BID B_NAME -> --- ------ --- ------ -> 1 Hello 1 Hallo -> 2 World 2 Welt -> 3 Record 3 Rekord -> rows: 3 - -SELECT * FROM TEST_ALL WHERE AID<2; -> AID A_NAME BID B_NAME -> --- ------ --- ------ -> 1 Hello 1 Hallo -> rows: 1 - -SELECT * FROM TEST_ALL WHERE AID<=2; -> AID A_NAME BID B_NAME -> --- ------ --- ------ -> 1 Hello 1 Hallo -> 2 World 2 Welt -> rows: 2 - -SELECT * FROM TEST_ALL WHERE AID>=2; -> AID A_NAME BID B_NAME -> --- ------ --- ------ -> 2 World 2 Welt -> 3 Record 3 Rekord -> rows: 2 - -CREATE VIEW TEST_A_SUB AS SELECT * FROM TEST_A WHERE ID < 2; -> ok - -SELECT TABLE_NAME, SQL FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW'; -> TABLE_NAME SQL -> ---------- -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> TEST_ALL CREATE FORCE VIEW PUBLIC.TEST_ALL(AID, A_NAME, BID, B_NAME) AS SELECT A.ID AS AID, A.NAME AS A_NAME, B.ID AS BID, B.NAME AS B_NAME FROM PUBLIC.TEST_A A /* PUBLIC.TEST_A.tableScan */ INNER JOIN PUBLIC.TEST_B B /* PUBLIC.PRIMARY_KEY_93: ID = A.ID */ ON 1=1 WHERE A.ID = B.ID -> TEST_A_SUB CREATE FORCE VIEW PUBLIC.TEST_A_SUB(ID, NAME) AS SELECT TEST_A.ID, TEST_A.NAME FROM PUBLIC.TEST_A /* PUBLIC.PRIMARY_KEY_9: ID < 2 */ WHERE ID < 2 -> rows: 2 - -SELECT * FROM TEST_A_SUB WHERE NAME IS NOT NULL; -> ID NAME -> -- ----- -> 1 Hello -> rows: 1 - -DROP VIEW TEST_A_SUB; -> ok - -DROP TABLE TEST_A cascade; -> ok - -DROP TABLE TEST_B cascade; -> ok - -DROP VIEW TEST_ALL; -> exception - -DROP VIEW IF EXISTS TEST_ALL; -> ok - ---- commit/rollback ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -SET AUTOCOMMIT FALSE; -> ok - -INSERT INTO TEST VALUES(1, 'Test'); -> update count: 1 - -ROLLBACK; -> ok - -SELECT * FROM TEST; -> ID NAME -> -- ---- -> rows: 0 - -INSERT INTO TEST VALUES(1, 'Test2'); -> update count: 1 - -SAVEPOINT TEST; -> ok - -INSERT INTO TEST VALUES(2, 'World'); -> update count: 1 - -ROLLBACK TO SAVEPOINT NOT_EXISTING; -> exception - -ROLLBACK TO SAVEPOINT TEST; -> ok - -SELECT * FROM TEST; -> ID NAME -> -- ----- -> 1 Test2 -> rows: 1 - -ROLLBACK WORK; -> ok - -SELECT * FROM TEST; -> ID NAME -> -- ---- -> rows: 0 - -INSERT INTO TEST VALUES(1, 'Test3'); -> update count: 1 - -SAVEPOINT TEST3; -> ok - -INSERT INTO TEST VALUES(2, 'World2'); -> update count: 1 - -ROLLBACK TO SAVEPOINT TEST3; -> ok - -COMMIT WORK; -> ok - -SELECT * FROM TEST; -> ID NAME -> -- ----- -> 1 Test3 -> rows: 1 - -SET AUTOCOMMIT TRUE; -> ok - -DROP TABLE TEST; -> ok - ---- insert..select ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(0, 'Hello'); -> update count: 1 - -INSERT INTO TEST SELECT ID+1, NAME||'+' FROM TEST; -> update count: 1 - -INSERT INTO TEST SELECT ID+2, NAME||'+' FROM TEST; -> update count: 2 - -INSERT INTO TEST SELECT ID+4, NAME||'+' FROM TEST; -> update count: 4 - -SELECT * FROM TEST; -> ID NAME -> -- -------- -> 0 Hello -> 1 Hello+ -> 2 Hello+ -> 3 Hello++ -> 4 Hello+ -> 5 Hello++ -> 6 Hello++ -> 7 Hello+++ -> rows: 8 - -DROP TABLE TEST; -> ok - ---- range ---------------------------------------------------------------------------------------------- ---import java.math.*; ---int s=0;for(int i=2;i<=1000;i++) ---s+=BigInteger.valueOf(i).isProbablePrime(10000)?i:0;s; -select sum(x) from system_range(2, 1000) r where -not exists(select * from system_range(2, 32) r2 where r.x>r2.x and mod(r.x, r2.x)=0); -> SUM(X) -> ------ -> 76127 -> rows: 1 - -SELECT COUNT(*) FROM SYSTEM_RANGE(0, 2111222333); -> COUNT(*) -> ---------- -> 2111222334 -> rows: 1 - -select * from system_range(2, 100) r where -not exists(select * from system_range(2, 11) r2 where r.x>r2.x and mod(r.x, r2.x)=0); -> X -> -- -> 11 -> 13 -> 17 -> 19 -> 2 -> 23 -> 29 -> 3 -> 31 -> 37 -> 41 -> 43 -> 47 -> 5 -> 53 -> 59 -> 61 -> 67 -> 7 -> 71 -> 73 -> 79 -> 83 -> 89 -> 97 -> rows: 25 - ---- joins ---------------------------------------------------------------------------------------------------- -create table t1(id int, name varchar); -> ok - -insert into t1 values(1, 'hi'), (2, 'world'); -> update count: 2 - -create table t2(id int, name varchar); -> ok - -insert into t2 values(1, 'Hallo'), (3, 'Welt'); -> update count: 2 - -select * from t1 join t2 on t1.id=t2.id; -> ID NAME ID NAME -> -- ---- -- ----- -> 1 hi 1 Hallo -> rows: 1 - -select * from t1 left join t2 on t1.id=t2.id; -> ID NAME ID NAME -> -- ----- ---- ----- -> 1 hi 1 Hallo -> 2 world null null -> rows: 2 - -select * from t1 right join t2 on t1.id=t2.id; -> ID NAME ID NAME -> -- ----- ---- ---- -> 1 Hallo 1 hi -> 3 Welt null null -> rows: 2 - -select * from t1 cross join t2; -> ID NAME ID NAME -> -- ----- -- ----- -> 1 hi 1 Hallo -> 1 hi 3 Welt -> 2 world 1 Hallo -> 2 world 3 Welt -> rows: 4 - -select * from t1 natural join t2; -> ID NAME -> -- ---- -> rows: 0 - -explain select * from t1 natural join t2; -> PLAN -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> SELECT T1.ID, T1.NAME FROM PUBLIC.T2 /* PUBLIC.T2.tableScan */ INNER JOIN PUBLIC.T1 /* PUBLIC.T1.tableScan */ ON 1=1 WHERE (PUBLIC.T1.ID = PUBLIC.T2.ID) AND (PUBLIC.T1.NAME = PUBLIC.T2.NAME) -> rows: 1 - -drop table t1; -> ok - -drop table t2; -> ok - -create table customer(customerid int, customer_name varchar); -> ok - -insert into customer values(0, 'Acme'); -> update count: 1 - -create table invoice(customerid int, invoiceid int, invoice_text varchar); -> ok - -insert into invoice values(0, 1, 'Soap'), (0, 2, 'More Soap'); -> update count: 2 - -create table INVOICE_LINE(line_id int, invoiceid int, customerid int, line_text varchar); -> ok - -insert into INVOICE_LINE values(10, 1, 0, 'Super Soap'), (20, 1, 0, 'Regular Soap'); -> update count: 2 - -select c.*, i.*, l.* from customer c natural join invoice i natural join INVOICE_LINE l; -> CUSTOMERID CUSTOMER_NAME INVOICEID INVOICE_TEXT LINE_ID LINE_TEXT -> ---------- ------------- --------- ------------ ------- ------------ -> 0 Acme 1 Soap 10 Super Soap -> 0 Acme 1 Soap 20 Regular Soap -> rows: 2 - -explain select c.*, i.*, l.* from customer c natural join invoice i natural join INVOICE_LINE l; -> PLAN -> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -> SELECT C.CUSTOMERID, C.CUSTOMER_NAME, I.INVOICEID, I.INVOICE_TEXT, L.LINE_ID, L.LINE_TEXT FROM PUBLIC.INVOICE I /* PUBLIC.INVOICE.tableScan */ INNER JOIN PUBLIC.INVOICE_LINE L /* PUBLIC.INVOICE_LINE.tableScan */ ON 1=1 /* WHERE (PUBLIC.I.CUSTOMERID = PUBLIC.L.CUSTOMERID) AND (PUBLIC.I.INVOICEID = PUBLIC.L.INVOICEID) */ INNER JOIN PUBLIC.CUSTOMER C /* PUBLIC.CUSTOMER.tableScan */ ON 1=1 WHERE (PUBLIC.C.CUSTOMERID = PUBLIC.I.CUSTOMERID) AND ((PUBLIC.I.CUSTOMERID = PUBLIC.L.CUSTOMERID) AND (PUBLIC.I.INVOICEID = PUBLIC.L.INVOICEID)) -> rows: 1 - -drop table customer; -> ok - -drop table invoice; -> ok - -drop table INVOICE_LINE; -> ok - ---- outer joins ---------------------------------------------------------------------------------------------- -CREATE TABLE PARENT(ID INT, NAME VARCHAR(20)); -> ok - -CREATE TABLE CHILD(ID INT, PARENTID INT, NAME VARCHAR(20)); -> ok - -INSERT INTO PARENT VALUES(1, 'Sue'); -> update count: 1 - -INSERT INTO PARENT VALUES(2, 'Joe'); -> update count: 1 - -INSERT INTO CHILD VALUES(100, 1, 'Simon'); -> update count: 1 - -INSERT INTO CHILD VALUES(101, 1, 'Sabine'); -> update count: 1 - -SELECT * FROM PARENT P INNER JOIN CHILD C ON P.ID = C.PARENTID; -> ID NAME ID PARENTID NAME -> -- ---- --- -------- ------ -> 1 Sue 100 1 Simon -> 1 Sue 101 1 Sabine -> rows: 2 - -SELECT * FROM PARENT P LEFT OUTER JOIN CHILD C ON P.ID = C.PARENTID; -> ID NAME ID PARENTID NAME -> -- ---- ---- -------- ------ -> 1 Sue 100 1 Simon -> 1 Sue 101 1 Sabine -> 2 Joe null null null -> rows: 3 - -SELECT * FROM CHILD C RIGHT OUTER JOIN PARENT P ON P.ID = C.PARENTID; -> ID NAME ID PARENTID NAME -> -- ---- ---- -------- ------ -> 1 Sue 100 1 Simon -> 1 Sue 101 1 Sabine -> 2 Joe null null null -> rows: 3 - -DROP TABLE PARENT; -> ok - -DROP TABLE CHILD; -> ok - ---- syntax errors ---------------------------------------------------------------------------------------------- -CREATE SOMETHING STRANGE; -> exception - -SELECT T1.* T2; -> exception - -select replace('abchihihi', 'i', 'o') abcehohoho, replace('this is tom', 'i') 1e_th_st_om from test; -> exception - -select monthname(date )'005-0E9-12') d_set fm test; -> exception - -call substring('bob', 2, -1); -> '' -> -- -> -> rows: 1 - ---- like ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(0, NULL); -> update count: 1 - -INSERT INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO TEST VALUES(2, 'World'); -> update count: 1 - -INSERT INTO TEST VALUES(3, 'Word'); -> update count: 1 - -INSERT INTO TEST VALUES(4, 'Wo%'); -> update count: 1 - -SELECT * FROM TEST WHERE NAME IS NULL; -> ID NAME -> -- ---- -> 0 null -> rows: 1 - -SELECT * FROM TEST WHERE NAME IS NOT NULL; -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> 3 Word -> 4 Wo% -> rows: 4 - -SELECT * FROM TEST WHERE NAME BETWEEN 'H' AND 'Word'; -> ID NAME -> -- ----- -> 1 Hello -> 3 Word -> 4 Wo% -> rows: 3 - -SELECT * FROM TEST WHERE ID >= 2 AND ID <= 3 AND ID <> 2; -> ID NAME -> -- ---- -> 3 Word -> rows: 1 - -SELECT * FROM TEST WHERE ID>0 AND ID<4 AND ID!=2; -> ID NAME -> -- ----- -> 1 Hello -> 3 Word -> rows: 2 - -SELECT * FROM TEST WHERE 'Hello' LIKE '_el%'; -> ID NAME -> -- ----- -> 0 null -> 1 Hello -> 2 World -> 3 Word -> 4 Wo% -> rows: 5 - -SELECT * FROM TEST WHERE NAME LIKE 'Hello%'; -> ID NAME -> -- ----- -> 1 Hello -> rows: 1 - -SELECT * FROM TEST WHERE NAME LIKE 'Wo%'; -> ID NAME -> -- ----- -> 2 World -> 3 Word -> 4 Wo% -> rows: 3 - -SELECT * FROM TEST WHERE NAME LIKE 'Wo\%'; -> ID NAME -> -- ---- -> 4 Wo% -> rows: 1 - -SELECT * FROM TEST WHERE NAME LIKE 'WoX%' ESCAPE 'X'; -> ID NAME -> -- ---- -> 4 Wo% -> rows: 1 - -SELECT * FROM TEST WHERE NAME LIKE 'Word_'; -> ID NAME -> -- ---- -> rows: 0 - -SELECT * FROM TEST WHERE NAME LIKE '%Hello%'; -> ID NAME -> -- ----- -> 1 Hello -> rows: 1 - -SELECT * FROM TEST WHERE 'Hello' LIKE NAME; -> ID NAME -> -- ----- -> 1 Hello -> rows: 1 - -SELECT T1.*, T2.* FROM TEST AS T1, TEST AS T2 WHERE T1.ID = T2.ID AND T1.NAME LIKE T2.NAME || '%'; -> ID NAME ID NAME -> -- ----- -- ----- -> 1 Hello 1 Hello -> 2 World 2 World -> 3 Word 3 Word -> 4 Wo% 4 Wo% -> rows: 4 - -SELECT ID, MAX(NAME) FROM TEST GROUP BY ID HAVING MAX(NAME) = 'World'; -> ID MAX(NAME) -> -- --------- -> 2 World -> rows: 1 - -SELECT ID, MAX(NAME) FROM TEST GROUP BY ID HAVING MAX(NAME) LIKE 'World%'; -> ID MAX(NAME) -> -- --------- -> 2 World -> rows: 1 - -DROP TABLE TEST; -> ok - ---- remarks/comments/syntax ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST( -ID INT PRIMARY KEY, -- this is the primary key, type {integer} -NAME VARCHAR(255) -- this is a string -); -> ok - -INSERT INTO TEST VALUES( -1 /* ID */, -'Hello' // NAME -); -> update count: 1 - -SELECT * FROM TEST; -> ID NAME -> -- ----- -> 1 Hello -> rows: 1 - -DROP_ TABLE_ TEST_T; -> exception - -DROP TABLE TEST /*; -> exception - -DROP TABLE TEST; -> ok - ---- exists ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(0, NULL); -> update count: 1 - -INSERT INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO TEST VALUES(2, 'World'); -> update count: 1 - -SELECT * FROM TEST T WHERE NOT EXISTS( -SELECT * FROM TEST T2 WHERE T.ID > T2.ID); -> ID NAME -> -- ---- -> 0 null -> rows: 1 - -DROP TABLE TEST; -> ok - ---- subquery ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -INSERT INTO TEST VALUES(0, NULL); -> update count: 1 - -INSERT INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -select * from test where (select max(t1.id) from test t1) between 0 and 100; -> ID NAME -> -- ----- -> 0 null -> 1 Hello -> rows: 2 - -INSERT INTO TEST VALUES(2, 'World'); -> update count: 1 - -SELECT * FROM TEST T WHERE T.ID = (SELECT T2.ID FROM TEST T2 WHERE T2.ID=T.ID); -> ID NAME -> -- ----- -> 0 null -> 1 Hello -> 2 World -> rows: 3 - -SELECT (SELECT T2.NAME FROM TEST T2 WHERE T2.ID=T.ID), T.NAME FROM TEST T; -> SELECT T2.NAME FROM PUBLIC.TEST T2 /* PUBLIC.PRIMARY_KEY_2: ID = T.ID */ /* scanCount: 2 */ WHERE T2.ID = T.ID NAME -> -------------------------------------------------------------------------------------------------------------- ----- -> Hello Hello -> World World -> null null -> rows: 3 - -SELECT (SELECT SUM(T2.ID) FROM TEST T2 WHERE T2.ID>T.ID), T.ID FROM TEST T; -> SELECT SUM(T2.ID) FROM PUBLIC.TEST T2 /* PUBLIC.PRIMARY_KEY_2: ID > T.ID */ /* scanCount: 2 */ WHERE T2.ID > T.ID ID -> ----------------------------------------------------------------------------------------------------------------- -- -> 2 1 -> 3 0 -> null 2 -> rows: 3 - -select * from test t where t.id+1 in (select id from test); -> ID NAME -> -- ----- -> 0 null -> 1 Hello -> rows: 2 - -select * from test t where t.id in (select id from test where id=t.id); -> ID NAME -> -- ----- -> 0 null -> 1 Hello -> 2 World -> rows: 3 - -select 1 from test, test where 1 in (select 1 from test where id=1); -> 1 -> - -> 1 -> 1 -> 1 -> 1 -> 1 -> 1 -> 1 -> 1 -> 1 -> rows: 9 - -select * from test, test where id=id; -> exception - -select 1 from test, test where id=id; -> exception - -select 1 from test where id in (select id from test, test); -> exception - -DROP TABLE TEST; -> ok - ---- group by ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(A INT, B INT, VALUE INT, UNIQUE(A, B)); -> ok - -INSERT INTO TEST VALUES(?, ?, ?); -{ -NULL, NULL, NULL -NULL, 0, 0 -NULL, 1, 10 -0, 0, -1 -0, 1, 100 -1, 0, 200 -1, 1, 300 -}; -> update count: 7 - -SELECT A, B, COUNT(*) CAL, COUNT(A) CA, COUNT(B) CB, MIN(VALUE) MI, MAX(VALUE) MA, SUM(VALUE) S FROM TEST GROUP BY A, B; -> A B CAL CA CB MI MA S -> ---- ---- --- -- -- ---- ---- ---- -> 0 0 1 1 1 -1 -1 -1 -> 0 1 1 1 1 100 100 100 -> 1 0 1 1 1 200 200 200 -> 1 1 1 1 1 300 300 300 -> null 0 1 0 1 0 0 0 -> null 1 1 0 1 10 10 10 -> null null 1 0 0 null null null -> rows: 7 - -DROP TABLE TEST; -> ok - ---- data types (blob, clob, varchar_ignorecase) ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT, XB BINARY, XBL BLOB, XO OTHER, XCL CLOB, XVI VARCHAR_IGNORECASE); -> ok - -INSERT INTO TEST VALUES(0, X '', '', '', '', ''); -> update count: 1 - -INSERT INTO TEST VALUES(1, X '0101', '0101', '0101', 'abc', 'aa'); -> update count: 1 - -INSERT INTO TEST VALUES(2, X '0AFF', '08FE', 'F0F1', 'AbCdEfG', 'ZzAaBb'); -> update count: 1 - -INSERT INTO TEST VALUES(3, X '112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff', '112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff', '112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff', 'AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz', 'AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz'); -> update count: 1 - -INSERT INTO TEST VALUES(4, NULL, NULL, NULL, NULL, NULL); -> update count: 1 - -SELECT * FROM TEST; -> ID XB XBL XO XCL XVI -> -- ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> 0 -> 1 0101 0101 0101 abc aa -> 2 0aff 08fe f0f1 AbCdEfG ZzAaBb -> 3 112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff 112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff 112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff112233445566778899aabbccddeeff AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz AbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYzAbCdEfGhIjKlMnOpQrStUvWxYz -> 4 null null null null null -> rows: 5 - -SELECT ID FROM TEST WHERE XCL = XCL; -> ID -> -- -> 0 -> 1 -> 2 -> 3 -> rows: 4 - -SELECT ID FROM TEST WHERE XCL LIKE 'abc%'; -> ID -> -- -> 1 -> rows: 1 - -SELECT ID FROM TEST WHERE XVI LIKE 'abc%'; -> ID -> -- -> 3 -> rows: 1 - -SELECT 'abc', 'Papa Joe''s', CAST(-1 AS SMALLINT), CAST(2 AS BIGINT), CAST(0 AS DOUBLE), CAST('0a0f' AS BINARY), CAST(125 AS TINYINT), TRUE, FALSE FROM TEST WHERE ID=1; -> 'abc' 'Papa Joe''s' -1 2 0.0 X'0a0f' 125 TRUE FALSE -> ----- ------------- -- - --- ------- --- ---- ----- -> abc Papa Joe's -1 2 0.0 0a0f 125 TRUE FALSE -> rows: 1 - -SELECT CAST('abcd' AS VARCHAR(255)), CAST('ef_gh' AS VARCHAR(3)); -> 'abcd' 'ef_' -> ------ ----- -> abcd ef_ -> rows: 1 - -DROP TABLE TEST; -> ok - ---- data types (date and time) ---------------------------------------------------------------------------------------------- -CREATE MEMORY TABLE TEST(ID INT, XT TIME, XD DATE, XTS TIMESTAMP); -> ok - -INSERT INTO TEST VALUES(0, '0:0:0','1-2-3','2-3-4 0:0:0'); -> update count: 1 - -INSERT INTO TEST VALUES(1, '01:02:03','2001-02-03','2001-02-29 0:0:0'); -> exception - -INSERT INTO TEST VALUES(1, '24:62:03','2001-02-03','2001-02-01 0:0:0'); -> exception - -INSERT INTO TEST VALUES(1, '23:02:03','2001-04-31','2001-02-01 0:0:0'); -> exception - -INSERT INTO TEST VALUES(1,'1:2:3','4-5-6','7-8-9 0:1:2'); -> update count: 1 - -INSERT INTO TEST VALUES(2,'23:59:59','1999-12-31','1999-12-31 23:59:59.123456789'); -> update count: 1 - -INSERT INTO TEST VALUES(NULL,NULL,NULL,NULL); -> update count: 1 - -SELECT * FROM TEST; -> ID XT XD XTS -> ---- -------- ---------- ----------------------------- -> 0 00:00:00 0001-02-03 0002-03-04 00:00:00.0 -> 1 01:02:03 0004-05-06 0007-08-09 00:01:02.0 -> 2 23:59:59 1999-12-31 1999-12-31 23:59:59.123456789 -> null null null null -> rows: 4 - -SELECT XD+1, XD-1, XD-XD FROM TEST; -> DATEADD('DAY', 1, XD) DATEADD('DAY', -1, XD) DATEDIFF('DAY', XD, XD) -> --------------------- ---------------------- ----------------------- -> 0001-02-04 00:00:00.0 0001-02-02 00:00:00.0 0 -> 0004-05-07 00:00:00.0 0004-05-05 00:00:00.0 0 -> 2000-01-01 00:00:00.0 1999-12-30 00:00:00.0 0 -> null null null -> rows: 4 - -SELECT ID, CAST(XT AS DATE) T2D, CAST(XTS AS DATE) TS2D, -CAST(XD AS TIME) D2T, CAST(XTS AS TIME) TS2T, -CAST(XT AS TIMESTAMP) D2TS, CAST(XD AS TIMESTAMP) D2TS FROM TEST; -> ID T2D TS2D D2T TS2T D2TS D2TS -> ---- ---------- ---------- -------- ------------------ --------------------- --------------------- -> 0 1970-01-01 0002-03-04 00:00:00 00:00:00 1970-01-01 00:00:00.0 0001-02-03 00:00:00.0 -> 1 1970-01-01 0007-08-09 00:00:00 00:01:02 1970-01-01 01:02:03.0 0004-05-06 00:00:00.0 -> 2 1970-01-01 1999-12-31 00:00:00 23:59:59.123456789 1970-01-01 23:59:59.0 1999-12-31 00:00:00.0 -> null null null null null null null -> rows: 4 - -SCRIPT SIMPLE NOPASSWORDS NOSETTINGS; -> SCRIPT -> ---------------------------------------------------------------------------------------------------------------------------------- -> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT, XT TIME, XD DATE, XTS TIMESTAMP ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> INSERT INTO PUBLIC.TEST(ID, XT, XD, XTS) VALUES(0, TIME '00:00:00', DATE '0001-02-03', TIMESTAMP '0002-03-04 00:00:00.0'); -> INSERT INTO PUBLIC.TEST(ID, XT, XD, XTS) VALUES(1, TIME '01:02:03', DATE '0004-05-06', TIMESTAMP '0007-08-09 00:01:02.0'); -> INSERT INTO PUBLIC.TEST(ID, XT, XD, XTS) VALUES(2, TIME '23:59:59', DATE '1999-12-31', TIMESTAMP '1999-12-31 23:59:59.123456789'); -> INSERT INTO PUBLIC.TEST(ID, XT, XD, XTS) VALUES(NULL, NULL, NULL, NULL); -> rows: 7 - -DROP TABLE TEST; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, t0 timestamp(23, 0), t1 timestamp(23, 1), t2 timestamp(23, 2), t5 timestamp(23, 5)); -> ok - -INSERT INTO TEST VALUES(1, '2001-01-01 12:34:56.789123', '2001-01-01 12:34:56.789123', '2001-01-01 12:34:56.789123', '2001-01-01 12:34:56.789123'); -> update count: 1 - -select * from test; -> ID T0 T1 T2 T5 -> -- --------------------- --------------------- ---------------------- ------------------------- -> 1 2001-01-01 12:34:57.0 2001-01-01 12:34:56.8 2001-01-01 12:34:56.79 2001-01-01 12:34:56.78912 -> rows: 1 - -DROP TABLE IF EXISTS TEST; -> ok - ---- data types (decimal) ---------------------------------------------------------------------------------------------- -CALL 1.2E10+1; -> 12000000001 -> ----------- -> 12000000001 -> rows: 1 - -CALL -1.2E-10-1; -> -1.00000000012 -> -------------- -> -1.00000000012 -> rows: 1 - -CALL 1E-1; -> 0.1 -> --- -> 0.1 -> rows: 1 - -CREATE TABLE TEST(ID INT, X1 BIT, XT TINYINT, X_SM SMALLINT, XB BIGINT, XD DECIMAL(10,2), XD2 DOUBLE PRECISION, XR REAL); -> ok - -INSERT INTO TEST VALUES(?, ?, ?, ?, ?, ?, ?, ?); -{ -0,FALSE,0,0,0,0.0,0.0,0.0 -1,TRUE,1,1,1,1.0,1.0,1.0 -4,TRUE,4,4,4,4.0,4.0,4.0 --1,FALSE,-1,-1,-1,-1.0,-1.0,-1.0 -NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL -}; -> update count: 5 - -SELECT *, 0xFF, -0x1234567890abcd FROM TEST; -> ID X1 XT X_SM XB XD XD2 XR 255 -5124095575370701 -> ---- ----- ---- ---- ---- ----- ---- ---- --- ----------------- -> -1 FALSE -1 -1 -1 -1.00 -1.0 -1.0 255 -5124095575370701 -> 0 FALSE 0 0 0 0.00 0.0 0.0 255 -5124095575370701 -> 1 TRUE 1 1 1 1.00 1.0 1.0 255 -5124095575370701 -> 4 TRUE 4 4 4 4.00 4.0 4.0 255 -5124095575370701 -> null null null null null null null null 255 -5124095575370701 -> rows: 5 - -SELECT XD, CAST(XD AS DECIMAL(10,1)) D2DE, CAST(XD2 AS DECIMAL(4, 3)) DO2DE, CAST(XR AS DECIMAL(20,3)) R2DE FROM TEST; -> XD D2DE DO2DE R2DE -> ----- ---- ------ ------ -> -1.00 -1.0 -1.000 -1.000 -> 0.00 0.0 0.000 0.000 -> 1.00 1.0 1.000 1.000 -> 4.00 4.0 4.000 4.000 -> null null null null -> rows: 5 - -SELECT ID, CAST(XB AS DOUBLE) L2D, CAST(X_SM AS DOUBLE) S2D, CAST(XT AS DOUBLE) X2D FROM TEST; -> ID L2D S2D X2D -> ---- ---- ---- ---- -> -1 -1.0 -1.0 -1.0 -> 0 0.0 0.0 0.0 -> 1 1.0 1.0 1.0 -> 4 4.0 4.0 4.0 -> null null null null -> rows: 5 - -SELECT ID, CAST(XB AS REAL) L2D, CAST(X_SM AS REAL) S2D, CAST(XT AS REAL) T2R FROM TEST; -> ID L2D S2D T2R -> ---- ---- ---- ---- -> -1 -1.0 -1.0 -1.0 -> 0 0.0 0.0 0.0 -> 1 1.0 1.0 1.0 -> 4 4.0 4.0 4.0 -> null null null null -> rows: 5 - -SELECT ID, CAST(X_SM AS BIGINT) S2L, CAST(XT AS BIGINT) B2L, CAST(XD2 AS BIGINT) D2L, CAST(XR AS BIGINT) R2L FROM TEST; -> ID S2L B2L D2L R2L -> ---- ---- ---- ---- ---- -> -1 -1 -1 -1 -1 -> 0 0 0 0 0 -> 1 1 1 1 1 -> 4 4 4 4 4 -> null null null null null -> rows: 5 - -SELECT ID, CAST(XB AS INT) L2I, CAST(XD2 AS INT) D2I, CAST(XD2 AS SMALLINT) DO2I, CAST(XR AS SMALLINT) R2I FROM TEST; -> ID L2I D2I DO2I R2I -> ---- ---- ---- ---- ---- -> -1 -1 -1 -1 -1 -> 0 0 0 0 0 -> 1 1 1 1 1 -> 4 4 4 4 4 -> null null null null null -> rows: 5 - -SELECT ID, CAST(XD AS SMALLINT) D2S, CAST(XB AS SMALLINT) L2S, CAST(XT AS SMALLINT) B2S FROM TEST; -> ID D2S L2S B2S -> ---- ---- ---- ---- -> -1 -1 -1 -1 -> 0 0 0 0 -> 1 1 1 1 -> 4 4 4 4 -> null null null null -> rows: 5 - -SELECT ID, CAST(XD2 AS TINYINT) D2B, CAST(XD AS TINYINT) DE2B, CAST(XB AS TINYINT) L2B, CAST(X_SM AS TINYINT) S2B FROM TEST; -> ID D2B DE2B L2B S2B -> ---- ---- ---- ---- ---- -> -1 -1 -1 -1 -1 -> 0 0 0 0 0 -> 1 1 1 1 1 -> 4 4 4 4 4 -> null null null null null -> rows: 5 - -SELECT ID, CAST(XD2 AS BIT) D2B, CAST(XD AS BIT) DE2B, CAST(XB AS BIT) L2B, CAST(X_SM AS BIT) S2B FROM TEST; -> ID D2B DE2B L2B S2B -> ---- ----- ----- ----- ----- -> -1 TRUE TRUE TRUE TRUE -> 0 FALSE FALSE FALSE FALSE -> 1 TRUE TRUE TRUE TRUE -> 4 TRUE TRUE TRUE TRUE -> null null null null null -> rows: 5 - -SELECT CAST('TRUE' AS BIT) NT, CAST('1.0' AS BIT) N1, CAST('0.0' AS BIT) N0; -> NT N1 N0 -> ---- ---- ----- -> TRUE TRUE FALSE -> rows: 1 - -SELECT ID, ID+X1, ID+XT, ID+X_SM, ID+XB, ID+XD, ID+XD2, ID+XR FROM TEST; -> ID ID + X1 ID + XT ID + X_SM ID + XB ID + XD ID + XD2 ID + XR -> ---- ------- ------- --------- ------- ------- -------- ------- -> -1 -1 -2 -2 -2 -2.00 -2.0 -2.0 -> 0 0 0 0 0 0.00 0.0 0.0 -> 1 2 2 2 2 2.00 2.0 2.0 -> 4 5 8 8 8 8.00 8.0 8.0 -> null null null null null null null null -> rows: 5 - -SELECT ID, 10-X1, 10-XT, 10-X_SM, 10-XB, 10-XD, 10-XD2, 10-XR FROM TEST; -> ID 10 - X1 10 - XT 10 - X_SM 10 - XB 10 - XD 10 - XD2 10 - XR -> ---- ------- ------- --------- ------- ------- -------- ------- -> -1 10 11 11 11 11.00 11.0 11.0 -> 0 10 10 10 10 10.00 10.0 10.0 -> 1 9 9 9 9 9.00 9.0 9.0 -> 4 9 6 6 6 6.00 6.0 6.0 -> null null null null null null null null -> rows: 5 - -SELECT ID, 10*X1, 10*XT, 10*X_SM, 10*XB, 10*XD, 10*XD2, 10*XR FROM TEST; -> ID 10 * X1 10 * XT 10 * X_SM 10 * XB 10 * XD 10 * XD2 10 * XR -> ---- ------- ------- --------- ------- ------- -------- ------- -> -1 0 -10 -10 -10 -10.00 -10.0 -10.0 -> 0 0 0 0 0 0.00 0.0 0.0 -> 1 10 10 10 10 10.00 10.0 10.0 -> 4 10 40 40 40 40.00 40.0 40.0 -> null null null null null null null null -> rows: 5 - -SELECT ID, CAST(XT AS NUMBER(10,1)), -CAST(X_SM AS NUMBER(10,1)), CAST(XB AS NUMBER(10,1)), CAST(XD AS NUMBER(10,1)), -CAST(XD2 AS NUMBER(10,1)), CAST(XR AS NUMBER(10,1)) FROM TEST; -> ID CAST(XT AS DECIMAL(10, 1)) CAST(X_SM AS DECIMAL(10, 1)) CAST(XB AS DECIMAL(10, 1)) CAST(XD AS DECIMAL(10, 1)) CAST(XD2 AS DECIMAL(10, 1)) CAST(XR AS DECIMAL(10, 1)) -> ---- -------------------------- ---------------------------- -------------------------- -------------------------- --------------------------- -------------------------- -> -1 -1.0 -1.0 -1.0 -1.0 -1.0 -1.0 -> 0 0.0 0.0 0.0 0.0 0.0 0.0 -> 1 1.0 1.0 1.0 1.0 1.0 1.0 -> 4 4.0 4.0 4.0 4.0 4.0 4.0 -> null null null null null null null -> rows: 5 - -SELECT ID, SIGN(XT), SIGN(X_SM), SIGN(XB), SIGN(XD), SIGN(XD2), SIGN(XR) FROM TEST; -> ID SIGN(XT) SIGN(X_SM) SIGN(XB) SIGN(XD) SIGN(XD2) SIGN(XR) -> ---- -------- ---------- -------- -------- --------- -------- -> -1 -1 -1 -1 -1 -1 -1 -> 0 0 0 0 0 0 0 -> 1 1 1 1 1 1 1 -> 4 1 1 1 1 1 1 -> null null null null null null null -> rows: 5 - -SELECT ID, XT-XT-XT, X_SM-X_SM-X_SM, XB-XB-XB, XD-XD-XD, XD2-XD2-XD2, XR-XR-XR FROM TEST; -> ID (XT - XT) - XT (X_SM - X_SM) - X_SM (XB - XB) - XB (XD - XD) - XD (XD2 - XD2) - XD2 (XR - XR) - XR -> ---- -------------- -------------------- -------------- -------------- ----------------- -------------- -> -1 1 1 1 1.00 1.0 1.0 -> 0 0 0 0 0.00 0.0 0.0 -> 1 -1 -1 -1 -1.00 -1.0 -1.0 -> 4 -4 -4 -4 -4.00 -4.0 -4.0 -> null null null null null null null -> rows: 5 - -SELECT ID, XT+XT, X_SM+X_SM, XB+XB, XD+XD, XD2+XD2, XR+XR FROM TEST; -> ID XT + XT X_SM + X_SM XB + XB XD + XD XD2 + XD2 XR + XR -> ---- ------- ----------- ------- ------- --------- ------- -> -1 -2 -2 -2 -2.00 -2.0 -2.0 -> 0 0 0 0 0.00 0.0 0.0 -> 1 2 2 2 2.00 2.0 2.0 -> 4 8 8 8 8.00 8.0 8.0 -> null null null null null null null -> rows: 5 - -SELECT ID, XT*XT, X_SM*X_SM, XB*XB, XD*XD, XD2*XD2, XR*XR FROM TEST; -> ID XT * XT X_SM * X_SM XB * XB XD * XD XD2 * XD2 XR * XR -> ---- ------- ----------- ------- ------- --------- ------- -> -1 1 1 1 1.0000 1.0 1.0 -> 0 0 0 0 0.0000 0.0 0.0 -> 1 1 1 1 1.0000 1.0 1.0 -> 4 16 16 16 16.0000 16.0 16.0 -> null null null null null null null -> rows: 5 - -SELECT 2/3 FROM TEST WHERE ID=1; -> 0 -> - -> 0 -> rows: 1 - -SELECT ID/ID FROM TEST; -> exception - -SELECT XT/XT FROM TEST; -> exception - -SELECT X_SM/X_SM FROM TEST; -> exception - -SELECT XB/XB FROM TEST; -> exception - -SELECT XD/XD FROM TEST; -> exception - -SELECT XD2/XD2 FROM TEST; -> exception - -SELECT XR/XR FROM TEST; -> exception - -SELECT ID++0, -X1, -XT, -X_SM, -XB, -XD, -XD2, -XR FROM TEST; -> ID + 0 - X1 - XT - X_SM - XB - XD - XD2 - XR -> ------ ----- ---- ------ ---- ----- ----- ---- -> -1 TRUE 1 1 1 1.00 1.0 1.0 -> 0 TRUE 0 0 0 0.00 -0.0 -0.0 -> 1 FALSE -1 -1 -1 -1.00 -1.0 -1.0 -> 4 FALSE -4 -4 -4 -4.00 -4.0 -4.0 -> null null null null null null null null -> rows: 5 - -SELECT ID, X1||'!', XT||'!', X_SM||'!', XB||'!', XD||'!', XD2||'!', XR||'!' FROM TEST; -> ID X1 || '!' XT || '!' X_SM || '!' XB || '!' XD || '!' XD2 || '!' XR || '!' -> ---- --------- --------- ----------- --------- --------- ---------- --------- -> -1 FALSE! -1! -1! -1! -1.00! -1.0! -1.0! -> 0 FALSE! 0! 0! 0! 0.00! 0.0! 0.0! -> 1 TRUE! 1! 1! 1! 1.00! 1.0! 1.0! -> 4 TRUE! 4! 4! 4! 4.00! 4.0! 4.0! -> null null null null null null null null -> rows: 5 - -DROP TABLE TEST; -> ok - ---- in ---------------------------------------------------------------------------------------------- -CREATE TABLE CUSTOMER(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -CREATE TABLE INVOICE(ID INT, CUSTOMER_ID INT, PRIMARY KEY(CUSTOMER_ID, ID), VALUE DECIMAL(10,2)); -> ok - -INSERT INTO CUSTOMER VALUES(?, ?); -{ -1,Lehmann -2,Meier -3,Scott -4,NULL -}; -> update count: 4 - -INSERT INTO INVOICE VALUES(?, ?, ?); -{ -10,1,100.10 -11,1,10.01 -12,1,1.001 -20,2,22.2 -21,2,200.02 -}; -> update count: 5 - -SELECT * FROM CUSTOMER WHERE ID IN(1,2,4,-1); -> ID NAME -> -- ------- -> 1 Lehmann -> 2 Meier -> 4 null -> rows: 3 - -SELECT * FROM CUSTOMER WHERE ID NOT IN(3,4,5,'1'); -> ID NAME -> -- ----- -> 2 Meier -> rows: 1 - -SELECT * FROM CUSTOMER WHERE ID NOT IN(SELECT CUSTOMER_ID FROM INVOICE); -> ID NAME -> -- ----- -> 3 Scott -> 4 null -> rows: 2 - -SELECT * FROM INVOICE WHERE CUSTOMER_ID IN(SELECT C.ID FROM CUSTOMER C); -> ID CUSTOMER_ID VALUE -> -- ----------- ------ -> 10 1 100.10 -> 11 1 10.01 -> 12 1 1.00 -> 20 2 22.20 -> 21 2 200.02 -> rows: 5 - -SELECT * FROM CUSTOMER WHERE NAME IN('Lehmann', 20); -> ID NAME -> -- ------- -> 1 Lehmann -> rows: 1 - -SELECT * FROM CUSTOMER WHERE NAME NOT IN('Scott'); -> ID NAME -> -- ------- -> 1 Lehmann -> 2 Meier -> rows: 2 - -SELECT * FROM CUSTOMER WHERE NAME IN(SELECT NAME FROM CUSTOMER); -> ID NAME -> -- ------- -> 1 Lehmann -> 2 Meier -> 3 Scott -> rows: 3 - -SELECT * FROM CUSTOMER WHERE NAME NOT IN(SELECT NAME FROM CUSTOMER); -> ID NAME -> -- ---- -> rows: 0 - -SELECT * FROM CUSTOMER WHERE NAME = ANY(SELECT NAME FROM CUSTOMER); -> ID NAME -> -- ------- -> 1 Lehmann -> 2 Meier -> 3 Scott -> rows: 3 - -SELECT * FROM CUSTOMER WHERE NAME = ALL(SELECT NAME FROM CUSTOMER); -> ID NAME -> -- ---- -> rows: 0 - -SELECT * FROM CUSTOMER WHERE NAME > ALL(SELECT NAME FROM CUSTOMER); -> ID NAME -> -- ---- -> rows: 0 - -SELECT * FROM CUSTOMER WHERE NAME > ANY(SELECT NAME FROM CUSTOMER); -> ID NAME -> -- ----- -> 2 Meier -> 3 Scott -> rows: 2 - -SELECT * FROM CUSTOMER WHERE NAME < ANY(SELECT NAME FROM CUSTOMER); -> ID NAME -> -- ------- -> 1 Lehmann -> 2 Meier -> rows: 2 - -DROP TABLE INVOICE; -> ok - -DROP TABLE CUSTOMER; -> ok - ---- aggregates ---------------------------------------------------------------------------------------------- -drop table if exists t; -> ok - -create table t(x double precision, y double precision); -> ok - -create view s as -select stddev_pop(x) s_px, stddev_samp(x) s_sx, var_pop(x) v_px, var_samp(x) v_sx, -stddev_pop(y) s_py, stddev_samp(y) s_sy, var_pop(y) v_py, var_samp(y) v_sy from t; -> ok - -select var(100000000.1) z from system_range(1, 1000000); -> Z -> --- -> 0.0 -> rows: 1 - -select * from s; -> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY -> ---- ---- ---- ---- ---- ---- ---- ---- -> null null null null null null null null -> rows: 1 - -select some(y>10), every(y>10), min(y), max(y) from t; -> BOOL_OR(Y > 10) BOOL_AND(Y > 10) MIN(Y) MAX(Y) -> --------------- ---------------- ------ ------ -> null null null null -> rows: 1 - -insert into t values(1000000004, 4); -> update count: 1 - -select * from s; -> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY -> ---- ---- ---- ---- ---- ---- ---- ---- -> 0.0 null 0.0 null 0.0 null 0.0 null -> rows: 1 - -insert into t values(1000000007, 7); -> update count: 1 - -select * from s; -> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY -> ---- ------------------ ---- ---- ---- ------------------ ---- ---- -> 1.5 2.1213203435596424 2.25 4.5 1.5 2.1213203435596424 2.25 4.5 -> rows: 1 - -insert into t values(1000000013, 13); -> update count: 1 - -select * from s; -> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY -> ------------------ ---------------- ---- ---- ------------------ ---------------- ---- ---- -> 3.7416573867739413 4.58257569495584 14.0 21.0 3.7416573867739413 4.58257569495584 14.0 21.0 -> rows: 1 - -insert into t values(1000000016, 16); -> update count: 1 - -select * from s; -> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY -> ----------------- ----------------- ---- ---- ----------------- ----------------- ---- ---- -> 4.743416490252569 5.477225575051661 22.5 30.0 4.743416490252569 5.477225575051661 22.5 30.0 -> rows: 1 - -insert into t values(1000000016, 16); -> update count: 1 - -select * from s; -> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY -> ----------------- ----------------- ----------------- ------------------ ----------------- ----------------- ----- ------------------ -> 4.874423036912116 5.449770630813229 23.75999994277954 29.699999928474426 4.874423042781577 5.449770637375485 23.76 29.700000000000003 -> rows: 1 - -select stddev_pop(distinct x) s_px, stddev_samp(distinct x) s_sx, var_pop(distinct x) v_px, var_samp(distinct x) v_sx, -stddev_pop(distinct y) s_py, stddev_samp(distinct y) s_sy, var_pop(distinct y) v_py, var_samp(distinct y) V_SY from t; -> S_PX S_SX V_PX V_SX S_PY S_SY V_PY V_SY -> ----------------- ----------------- ---- ---- ----------------- ----------------- ---- ---- -> 4.743416490252569 5.477225575051661 22.5 30.0 4.743416490252569 5.477225575051661 22.5 30.0 -> rows: 1 - -select some(y>10), every(y>10), min(y), max(y) from t; -> BOOL_OR(Y > 10) BOOL_AND(Y > 10) MIN(Y) MAX(Y) -> --------------- ---------------- ------ ------ -> TRUE FALSE 4.0 16.0 -> rows: 1 - -drop view s; -> ok - -drop table t; -> ok - -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255), VALUE DECIMAL(10,2)); -> ok - -INSERT INTO TEST VALUES(?, ?, ?); -{ -1,Apples,1.20 -2,Oranges,2.05 -3,Cherries,5.10 -4,Apples,1.50 -5,Apples,1.10 -6,Oranges,1.80 -7,Bananas,2.50 -8,NULL,3.10 -9,NULL,-10.0 -}; -> update count: 9 - -SELECT IFNULL(NAME, '') || ': ' || GROUP_CONCAT(VALUE ORDER BY NAME, VALUE DESC SEPARATOR ', ') FROM TEST GROUP BY NAME; -> (IFNULL(NAME, '') || ': ') || GROUP_CONCAT(VALUE ORDER BY NAME, VALUE DESC SEPARATOR ', ') -> ------------------------------------------------------------------------------------------ -> Apples: 1.50, 1.20, 1.10 -> Oranges: 2.05, 1.80 -> Bananas: 2.50 -> Cherries: 5.10 -> : 3.10, -10.00 -> rows (ordered): 5 - -SELECT GROUP_CONCAT(ID ORDER BY ID) FROM TEST; -> GROUP_CONCAT(ID ORDER BY ID) -> ---------------------------- -> 1,2,3,4,5,6,7,8,9 -> rows (ordered): 1 - -SELECT STRING_AGG(ID,';') FROM TEST; -> GROUP_CONCAT(ID SEPARATOR ';') -> ------------------------------ -> 1;2;3;4;5;6;7;8;9 -> rows: 1 - -SELECT DISTINCT NAME FROM TEST; -> NAME -> -------- -> Apples -> Bananas -> Cherries -> Oranges -> null -> rows: 5 - -SELECT DISTINCT NAME FROM TEST ORDER BY NAME DESC NULLS LAST; -> NAME -> -------- -> Oranges -> Cherries -> Bananas -> Apples -> null -> rows (ordered): 5 - -SELECT DISTINCT NAME FROM TEST ORDER BY NAME DESC NULLS LAST LIMIT 2 OFFSET 1; -> NAME -> -------- -> Cherries -> Bananas -> rows (ordered): 2 - -SELECT NAME, COUNT(*), SUM(VALUE), MAX(VALUE), MIN(VALUE), AVG(VALUE), COUNT(DISTINCT VALUE) FROM TEST GROUP BY NAME; -> NAME COUNT(*) SUM(VALUE) MAX(VALUE) MIN(VALUE) AVG(VALUE) COUNT(DISTINCT VALUE) -> -------- -------- ---------- ---------- ---------- ----------------------------- --------------------- -> Apples 3 3.80 1.50 1.10 1.266666666666666666666666667 3 -> Bananas 1 2.50 2.50 2.50 2.5 1 -> Cherries 1 5.10 5.10 5.10 5.1 1 -> Oranges 2 3.85 2.05 1.80 1.925 2 -> null 2 -6.90 3.10 -10.00 -3.45 2 -> rows: 5 - -SELECT NAME, MAX(VALUE), MIN(VALUE), MAX(VALUE+1)*MIN(VALUE+1) FROM TEST GROUP BY NAME; -> NAME MAX(VALUE) MIN(VALUE) MAX(VALUE + 1) * MIN(VALUE + 1) -> -------- ---------- ---------- ------------------------------- -> Apples 1.50 1.10 5.2500 -> Bananas 2.50 2.50 12.2500 -> Cherries 5.10 5.10 37.2100 -> Oranges 2.05 1.80 8.5400 -> null 3.10 -10.00 -36.9000 -> rows: 5 - -DROP TABLE TEST; -> ok - ---- order by ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -CREATE UNIQUE INDEX IDXNAME ON TEST(NAME); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO TEST VALUES(2, 'World'); -> update count: 1 - -INSERT INTO TEST VALUES(3, NULL); -> update count: 1 - -SELECT * FROM TEST ORDER BY NAME; -> ID NAME -> -- ----- -> 3 null -> 1 Hello -> 2 World -> rows (ordered): 3 - -SELECT * FROM TEST ORDER BY NAME DESC; -> ID NAME -> -- ----- -> 2 World -> 1 Hello -> 3 null -> rows (ordered): 3 - -SELECT * FROM TEST ORDER BY NAME NULLS FIRST; -> ID NAME -> -- ----- -> 3 null -> 1 Hello -> 2 World -> rows (ordered): 3 - -SELECT * FROM TEST ORDER BY NAME DESC NULLS FIRST; -> ID NAME -> -- ----- -> 3 null -> 2 World -> 1 Hello -> rows (ordered): 3 - -SELECT * FROM TEST ORDER BY NAME NULLS LAST; -> ID NAME -> -- ----- -> 1 Hello -> 2 World -> 3 null -> rows (ordered): 3 - -SELECT * FROM TEST ORDER BY NAME DESC NULLS LAST; -> ID NAME -> -- ----- -> 2 World -> 1 Hello -> 3 null -> rows (ordered): 3 - -SELECT ID, '=', NAME FROM TEST ORDER BY 2 FOR UPDATE; -> ID '=' NAME -> -- --- ----- -> 1 = Hello -> 2 = World -> 3 = null -> rows (ordered): 3 - -DROP TABLE TEST; -> ok - ---- having ---------------------------------------------------------------------------------------------- -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -CREATE INDEX IDXNAME ON TEST(NAME); -> ok - -INSERT INTO TEST VALUES(1, 'Hello'); -> update count: 1 - -INSERT INTO TEST VALUES(2, 'Hello'); -> update count: 1 - -INSERT INTO TEST VALUES(3, 'World'); -> update count: 1 - -INSERT INTO TEST VALUES(4, 'World'); -> update count: 1 - -INSERT INTO TEST VALUES(5, 'Orange'); -> update count: 1 - -SELECT NAME, SUM(ID) FROM TEST GROUP BY NAME HAVING COUNT(*)>1 ORDER BY NAME; -> NAME SUM(ID) -> ----- ------- -> Hello 3 -> World 7 -> rows (ordered): 2 - -DROP INDEX IF EXISTS IDXNAME; -> ok - -DROP TABLE TEST; -> ok - ---- help ---------------------------------------------------------------------------------------------- -HELP ABCDE EF_GH; -> ID SECTION TOPIC SYNTAX TEXT -> -- ------- ----- ------ ---- -> rows: 0 - ---- sequence ---------------------------------------------------------------------------------------------- -CREATE CACHED TABLE TEST(ID INT PRIMARY KEY); -> ok - -CREATE CACHED TABLE IF NOT EXISTS TEST(ID INT PRIMARY KEY); -> ok - -CREATE SEQUENCE IF NOT EXISTS TEST_SEQ START WITH 10; -> ok - -CREATE SEQUENCE IF NOT EXISTS TEST_SEQ START WITH 20; -> ok - -INSERT INTO TEST VALUES(NEXT VALUE FOR TEST_SEQ); -> update count: 1 - -CALL CURRVAL('test_seq'); -> CURRVAL('test_seq') -> ------------------- -> 10 -> rows: 1 - -INSERT INTO TEST VALUES(NEXT VALUE FOR TEST_SEQ); -> update count: 1 - -CALL NEXT VALUE FOR TEST_SEQ; -> NEXT VALUE FOR PUBLIC.TEST_SEQ -> ------------------------------ -> 12 -> rows: 1 - -INSERT INTO TEST VALUES(NEXT VALUE FOR TEST_SEQ); -> update count: 1 - -SELECT * FROM TEST; -> ID -> -- -> 10 -> 11 -> 13 -> rows: 3 - -SELECT TOP 2 * FROM TEST; -> ID -> -- -> 10 -> 11 -> rows: 2 - -SELECT TOP 2 * FROM TEST ORDER BY ID DESC; -> ID -> -- -> 13 -> 11 -> rows (ordered): 2 - -ALTER SEQUENCE TEST_SEQ RESTART WITH 20 INCREMENT BY -1; -> ok - -INSERT INTO TEST VALUES(NEXT VALUE FOR TEST_SEQ); -> update count: 1 - -INSERT INTO TEST VALUES(NEXT VALUE FOR TEST_SEQ); -> update count: 1 - -SELECT * FROM TEST ORDER BY ID ASC; -> ID -> -- -> 10 -> 11 -> 13 -> 19 -> 20 -> rows (ordered): 5 - -CALL NEXTVAL('test_seq'); -> NEXTVAL('test_seq') -> ------------------- -> 18 -> rows: 1 - -DROP SEQUENCE IF EXISTS TEST_SEQ; -> ok - -DROP SEQUENCE IF EXISTS TEST_SEQ; -> ok - -CREATE SEQUENCE TEST_LONG START WITH 90123456789012345 MAXVALUE 90123456789012345 INCREMENT BY -1; -> ok - -SET AUTOCOMMIT FALSE; -> ok - -CALL NEXT VALUE FOR TEST_LONG; -> NEXT VALUE FOR PUBLIC.TEST_LONG -> ------------------------------- -> 90123456789012345 -> rows: 1 - -CALL IDENTITY(); -> IDENTITY() -> ----------------- -> 90123456789012345 -> rows: 1 - -SELECT SEQUENCE_NAME, CURRENT_VALUE, INCREMENT FROM INFORMATION_SCHEMA.SEQUENCES; -> SEQUENCE_NAME CURRENT_VALUE INCREMENT -> ------------- ----------------- --------- -> TEST_LONG 90123456789012345 -1 -> rows: 1 - -SET AUTOCOMMIT TRUE; -> ok - -DROP SEQUENCE TEST_LONG; -> ok - -DROP TABLE TEST; -> ok - ---- call ---------------------------------------------------------------------------------------------- -CALL PI(); -> 3.141592653589793 -> ----------------- -> 3.141592653589793 -> rows: 1 - -CALL 1+1; -> 2 -> - -> 2 -> rows: 1 - ---- constraints ---------------------------------------------------------------------------------------------- -CREATE TABLE PARENT(A INT, B INT, PRIMARY KEY(A, B)); -> ok - -CREATE TABLE CHILD(ID INT PRIMARY KEY, PA INT, PB INT, CONSTRAINT AB FOREIGN KEY(PA, PB) REFERENCES PARENT(A, B)); -> ok - -SELECT * FROM INFORMATION_SCHEMA.CROSS_REFERENCES; -> PKTABLE_CATALOG PKTABLE_SCHEMA PKTABLE_NAME PKCOLUMN_NAME FKTABLE_CATALOG FKTABLE_SCHEMA FKTABLE_NAME FKCOLUMN_NAME ORDINAL_POSITION UPDATE_RULE DELETE_RULE FK_NAME PK_NAME DEFERRABILITY -> --------------- -------------- ------------ ------------- --------------- -------------- ------------ ------------- ---------------- ----------- ----------- ------- ------------- ------------- -> SCRIPT PUBLIC PARENT A SCRIPT PUBLIC CHILD PA 1 1 1 AB PRIMARY_KEY_8 7 -> SCRIPT PUBLIC PARENT B SCRIPT PUBLIC CHILD PB 2 1 1 AB PRIMARY_KEY_8 7 -> rows: 2 - -DROP TABLE PARENT; -> ok - -DROP TABLE CHILD; -> ok - -drop table if exists test; -> ok - -create table test(id int primary key, parent int, foreign key(id) references test(parent)); -> ok - -insert into test values(1, 1); -> update count: 1 - -delete from test; -> update count: 1 - -drop table test; -> ok - -drop table if exists child; -> ok - -drop table if exists parent; -> ok - -create table child(a int, id int); -> ok - -create table parent(id int primary key); -> ok - -alter table child add foreign key(id) references parent; -> ok - -insert into parent values(1); -> update count: 1 - -delete from parent; -> update count: 1 - -drop table if exists child; -> ok - -drop table if exists parent; -> ok - -CREATE MEMORY TABLE PARENT(ID INT PRIMARY KEY); -> ok - -CREATE MEMORY TABLE CHILD(ID INT, PARENT_ID INT, FOREIGN KEY(PARENT_ID) REFERENCES PARENT); -> ok - -SCRIPT NOPASSWORDS NOSETTINGS; -> SCRIPT -> ------------------------------------------------------------------------------------------------------------------------ -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.CHILD; -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.PARENT; -> ALTER TABLE PUBLIC.CHILD ADD CONSTRAINT PUBLIC.CONSTRAINT_3 FOREIGN KEY(PARENT_ID) REFERENCES PUBLIC.PARENT(ID) NOCHECK; -> ALTER TABLE PUBLIC.PARENT ADD CONSTRAINT PUBLIC.CONSTRAINT_8 PRIMARY KEY(ID); -> CREATE MEMORY TABLE PUBLIC.CHILD( ID INT, PARENT_ID INT ); -> CREATE MEMORY TABLE PUBLIC.PARENT( ID INT NOT NULL ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 7 - -DROP TABLE PARENT; -> ok - -DROP TABLE CHILD; -> ok - -CREATE TABLE TEST(ID INT, CONSTRAINT PK PRIMARY KEY(ID), NAME VARCHAR, PARENT INT, CONSTRAINT P FOREIGN KEY(PARENT) REFERENCES(ID)); -> ok - -ALTER TABLE TEST DROP PRIMARY KEY; -> exception - -ALTER TABLE TEST DROP CONSTRAINT PK; -> ok - -INSERT INTO TEST VALUES(1, 'Frank', 1); -> update count: 1 - -INSERT INTO TEST VALUES(2, 'Sue', 1); -> update count: 1 - -INSERT INTO TEST VALUES(3, 'Karin', 2); -> update count: 1 - -INSERT INTO TEST VALUES(4, 'Joe', 5); -> exception - -INSERT INTO TEST VALUES(4, 'Joe', 3); -> update count: 1 - -DROP TABLE TEST; -> ok - -CREATE MEMORY TABLE TEST(A_INT INT NOT NULL, B_INT INT NOT NULL, PRIMARY KEY(A_INT, B_INT)); -> ok - -ALTER TABLE TEST ADD CONSTRAINT A_UNIQUE UNIQUE(A_INT); -> ok - -ALTER TABLE TEST DROP PRIMARY KEY; -> ok - -ALTER TABLE TEST DROP PRIMARY KEY; -> exception - -ALTER TABLE TEST DROP CONSTRAINT A_UNIQUE; -> ok - -ALTER TABLE TEST ADD CONSTRAINT C1 FOREIGN KEY(A_INT) REFERENCES TEST(B_INT); -> ok - -SCRIPT NOPASSWORDS NOSETTINGS; -> SCRIPT -> ---------------------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.C1 FOREIGN KEY(A_INT) REFERENCES PUBLIC.TEST(B_INT) NOCHECK; -> CREATE MEMORY TABLE PUBLIC.TEST( A_INT INT NOT NULL, B_INT INT NOT NULL ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 4 - -ALTER TABLE TEST DROP CONSTRAINT C1; -> ok - -ALTER TABLE TEST DROP CONSTRAINT C1; -> exception - -DROP TABLE TEST; -> ok - -CREATE MEMORY TABLE A_TEST(A_INT INT NOT NULL, A_VARCHAR VARCHAR(255) DEFAULT 'x', A_DATE DATE, A_DECIMAL DECIMAL(10,2)); -> ok - -ALTER TABLE A_TEST ADD PRIMARY KEY(A_INT); -> ok - -ALTER TABLE A_TEST ADD CONSTRAINT MIN_LENGTH CHECK LENGTH(A_VARCHAR)>1; -> ok - -ALTER TABLE A_TEST ADD CONSTRAINT DATE_UNIQUE UNIQUE(A_DATE); -> ok - -ALTER TABLE A_TEST ADD CONSTRAINT DATE_UNIQUE_2 UNIQUE(A_DATE); -> ok - -INSERT INTO A_TEST VALUES(NULL, NULL, NULL, NULL); -> exception - -INSERT INTO A_TEST VALUES(1, 'A', NULL, NULL); -> exception - -INSERT INTO A_TEST VALUES(1, 'AB', NULL, NULL); -> update count: 1 - -INSERT INTO A_TEST VALUES(1, 'AB', NULL, NULL); -> exception - -INSERT INTO A_TEST VALUES(2, 'AB', NULL, NULL); -> update count: 1 - -INSERT INTO A_TEST VALUES(3, 'AB', '2004-01-01', NULL); -> update count: 1 - -INSERT INTO A_TEST VALUES(4, 'AB', '2004-01-01', NULL); -> exception - -INSERT INTO A_TEST VALUES(5, 'ABC', '2004-01-02', NULL); -> update count: 1 - -CREATE MEMORY TABLE B_TEST(B_INT INT DEFAULT -1 NOT NULL , B_VARCHAR VARCHAR(255) DEFAULT NULL NULL, CONSTRAINT B_UNIQUE UNIQUE(B_INT)); -> ok - -ALTER TABLE B_TEST ADD CHECK LENGTH(B_VARCHAR)>1; -> ok - -ALTER TABLE B_TEST ADD CONSTRAINT C1 FOREIGN KEY(B_INT) REFERENCES A_TEST(A_INT) ON DELETE CASCADE ON UPDATE CASCADE; -> ok - -ALTER TABLE B_TEST ADD PRIMARY KEY(B_INT); -> ok - -INSERT INTO B_TEST VALUES(10, 'X'); -> exception - -INSERT INTO B_TEST VALUES(1, 'X'); -> exception - -INSERT INTO B_TEST VALUES(1, 'XX'); -> update count: 1 - -SELECT * FROM B_TEST; -> B_INT B_VARCHAR -> ----- --------- -> 1 XX -> rows: 1 - -UPDATE A_TEST SET A_INT = A_INT*10; -> update count: 4 - -SELECT * FROM B_TEST; -> B_INT B_VARCHAR -> ----- --------- -> 10 XX -> rows: 1 - -ALTER TABLE B_TEST DROP CONSTRAINT C1; -> ok - -ALTER TABLE B_TEST ADD CONSTRAINT C2 FOREIGN KEY(B_INT) REFERENCES A_TEST(A_INT) ON DELETE SET NULL ON UPDATE SET NULL; -> ok - -UPDATE A_TEST SET A_INT = A_INT*10; -> exception - -SELECT * FROM B_TEST; -> B_INT B_VARCHAR -> ----- --------- -> 10 XX -> rows: 1 - -ALTER TABLE B_TEST DROP CONSTRAINT C2; -> ok - -UPDATE B_TEST SET B_INT = 20; -> update count: 1 - -SELECT A_INT FROM A_TEST; -> A_INT -> ----- -> 10 -> 20 -> 30 -> 50 -> rows: 4 - -ALTER TABLE B_TEST ADD CONSTRAINT C3 FOREIGN KEY(B_INT) REFERENCES A_TEST(A_INT) ON DELETE SET DEFAULT ON UPDATE SET DEFAULT; -> ok - -UPDATE A_TEST SET A_INT = A_INT*10; -> update count: 4 - -SELECT * FROM B_TEST; -> B_INT B_VARCHAR -> ----- --------- -> -1 XX -> rows: 1 - -DELETE FROM A_TEST; -> update count: 4 - -SELECT * FROM B_TEST; -> B_INT B_VARCHAR -> ----- --------- -> -1 XX -> rows: 1 - -SCRIPT NOPASSWORDS NOSETTINGS; -> SCRIPT -> ---------------------------------------------------------------------------------------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.A_TEST; -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.B_TEST; -> ALTER TABLE PUBLIC.A_TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_7 PRIMARY KEY(A_INT); -> ALTER TABLE PUBLIC.A_TEST ADD CONSTRAINT PUBLIC.DATE_UNIQUE UNIQUE(A_DATE); -> ALTER TABLE PUBLIC.A_TEST ADD CONSTRAINT PUBLIC.DATE_UNIQUE_2 UNIQUE(A_DATE); -> ALTER TABLE PUBLIC.A_TEST ADD CONSTRAINT PUBLIC.MIN_LENGTH CHECK(LENGTH(A_VARCHAR) > 1) NOCHECK; -> ALTER TABLE PUBLIC.B_TEST ADD CONSTRAINT PUBLIC.B_UNIQUE UNIQUE(B_INT); -> ALTER TABLE PUBLIC.B_TEST ADD CONSTRAINT PUBLIC.C3 FOREIGN KEY(B_INT) REFERENCES PUBLIC.A_TEST(A_INT) ON DELETE SET DEFAULT ON UPDATE SET DEFAULT NOCHECK; -> ALTER TABLE PUBLIC.B_TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_76 CHECK(LENGTH(B_VARCHAR) > 1) NOCHECK; -> ALTER TABLE PUBLIC.B_TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_760 PRIMARY KEY(B_INT); -> CREATE MEMORY TABLE PUBLIC.A_TEST( A_INT INT NOT NULL, A_VARCHAR VARCHAR(255) DEFAULT 'x', A_DATE DATE, A_DECIMAL DECIMAL(10, 2) ); -> CREATE MEMORY TABLE PUBLIC.B_TEST( B_INT INT DEFAULT -1 NOT NULL, B_VARCHAR VARCHAR(255) DEFAULT NULL ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> INSERT INTO PUBLIC.B_TEST(B_INT, B_VARCHAR) VALUES (-1, 'XX'); -> rows: 14 - -DROP TABLE A_TEST; -> ok - -DROP TABLE B_TEST; -> ok - -CREATE MEMORY TABLE FAMILY(ID INT, NAME VARCHAR(20)); -> ok - -CREATE INDEX FAMILY_ID_NAME ON FAMILY(ID, NAME); -> ok - -CREATE MEMORY TABLE PARENT(ID INT, FAMILY_ID INT, NAME VARCHAR(20)); -> ok - -ALTER TABLE PARENT ADD CONSTRAINT PARENT_FAMILY FOREIGN KEY(FAMILY_ID) -REFERENCES FAMILY(ID); -> ok - -CREATE MEMORY TABLE CHILD( -ID INT, -PARENTID INT, -FAMILY_ID INT, -UNIQUE(ID, PARENTID), -CONSTRAINT PARENT_CHILD FOREIGN KEY(PARENTID, FAMILY_ID) -REFERENCES PARENT(ID, FAMILY_ID) -ON UPDATE CASCADE -ON DELETE SET NULL, -NAME VARCHAR(20)); -> ok - -INSERT INTO FAMILY VALUES(1, 'Capone'); -> update count: 1 - -INSERT INTO CHILD VALUES(100, 1, 1, 'early'); -> exception - -INSERT INTO PARENT VALUES(1, 1, 'Sue'); -> update count: 1 - -INSERT INTO PARENT VALUES(2, 1, 'Joe'); -> update count: 1 - -INSERT INTO CHILD VALUES(100, 1, 1, 'Simon'); -> update count: 1 - -INSERT INTO CHILD VALUES(101, 1, 1, 'Sabine'); -> update count: 1 - -INSERT INTO CHILD VALUES(200, 2, 1, 'Jim'); -> update count: 1 - -INSERT INTO CHILD VALUES(201, 2, 1, 'Johann'); -> update count: 1 - -UPDATE PARENT SET ID=3 WHERE ID=1; -> update count: 1 - -SELECT * FROM CHILD; -> ID PARENTID FAMILY_ID NAME -> --- -------- --------- ------ -> 100 3 1 Simon -> 101 3 1 Sabine -> 200 2 1 Jim -> 201 2 1 Johann -> rows: 4 - -UPDATE CHILD SET PARENTID=-1 WHERE PARENTID IS NOT NULL; -> exception - -DELETE FROM PARENT WHERE ID=2; -> update count: 1 - -SELECT * FROM CHILD; -> ID PARENTID FAMILY_ID NAME -> --- -------- --------- ------ -> 100 3 1 Simon -> 101 3 1 Sabine -> 200 null null Jim -> 201 null null Johann -> rows: 4 - -SCRIPT SIMPLE NOPASSWORDS NOSETTINGS; -> SCRIPT -> ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.FAMILY; -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.PARENT; -> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.CHILD; -> ALTER TABLE PUBLIC.CHILD ADD CONSTRAINT PUBLIC.CONSTRAINT_3 UNIQUE(ID, PARENTID); -> ALTER TABLE PUBLIC.CHILD ADD CONSTRAINT PUBLIC.PARENT_CHILD FOREIGN KEY(PARENTID, FAMILY_ID) REFERENCES PUBLIC.PARENT(ID, FAMILY_ID) ON DELETE SET NULL ON UPDATE CASCADE NOCHECK; -> ALTER TABLE PUBLIC.PARENT ADD CONSTRAINT PUBLIC.PARENT_FAMILY FOREIGN KEY(FAMILY_ID) REFERENCES PUBLIC.FAMILY(ID) NOCHECK; -> CREATE INDEX PUBLIC.FAMILY_ID_NAME ON PUBLIC.FAMILY(ID, NAME); -> CREATE MEMORY TABLE PUBLIC.CHILD( ID INT, PARENTID INT, FAMILY_ID INT, NAME VARCHAR(20) ); -> CREATE MEMORY TABLE PUBLIC.FAMILY( ID INT, NAME VARCHAR(20) ); -> CREATE MEMORY TABLE PUBLIC.PARENT( ID INT, FAMILY_ID INT, NAME VARCHAR(20) ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> INSERT INTO PUBLIC.CHILD(ID, PARENTID, FAMILY_ID, NAME) VALUES(100, 3, 1, 'Simon'); -> INSERT INTO PUBLIC.CHILD(ID, PARENTID, FAMILY_ID, NAME) VALUES(101, 3, 1, 'Sabine'); -> INSERT INTO PUBLIC.CHILD(ID, PARENTID, FAMILY_ID, NAME) VALUES(200, NULL, NULL, 'Jim'); -> INSERT INTO PUBLIC.CHILD(ID, PARENTID, FAMILY_ID, NAME) VALUES(201, NULL, NULL, 'Johann'); -> INSERT INTO PUBLIC.FAMILY(ID, NAME) VALUES(1, 'Capone'); -> INSERT INTO PUBLIC.PARENT(ID, FAMILY_ID, NAME) VALUES(3, 1, 'Sue'); -> rows: 17 - -ALTER TABLE CHILD DROP CONSTRAINT PARENT_CHILD; -> ok - -SCRIPT SIMPLE NOPASSWORDS NOSETTINGS; -> SCRIPT -> -------------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.FAMILY; -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.PARENT; -> -- 4 +/- SELECT COUNT(*) FROM PUBLIC.CHILD; -> ALTER TABLE PUBLIC.CHILD ADD CONSTRAINT PUBLIC.CONSTRAINT_3 UNIQUE(ID, PARENTID); -> ALTER TABLE PUBLIC.PARENT ADD CONSTRAINT PUBLIC.PARENT_FAMILY FOREIGN KEY(FAMILY_ID) REFERENCES PUBLIC.FAMILY(ID) NOCHECK; -> CREATE INDEX PUBLIC.FAMILY_ID_NAME ON PUBLIC.FAMILY(ID, NAME); -> CREATE MEMORY TABLE PUBLIC.CHILD( ID INT, PARENTID INT, FAMILY_ID INT, NAME VARCHAR(20) ); -> CREATE MEMORY TABLE PUBLIC.FAMILY( ID INT, NAME VARCHAR(20) ); -> CREATE MEMORY TABLE PUBLIC.PARENT( ID INT, FAMILY_ID INT, NAME VARCHAR(20) ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> INSERT INTO PUBLIC.CHILD(ID, PARENTID, FAMILY_ID, NAME) VALUES(100, 3, 1, 'Simon'); -> INSERT INTO PUBLIC.CHILD(ID, PARENTID, FAMILY_ID, NAME) VALUES(101, 3, 1, 'Sabine'); -> INSERT INTO PUBLIC.CHILD(ID, PARENTID, FAMILY_ID, NAME) VALUES(200, NULL, NULL, 'Jim'); -> INSERT INTO PUBLIC.CHILD(ID, PARENTID, FAMILY_ID, NAME) VALUES(201, NULL, NULL, 'Johann'); -> INSERT INTO PUBLIC.FAMILY(ID, NAME) VALUES(1, 'Capone'); -> INSERT INTO PUBLIC.PARENT(ID, FAMILY_ID, NAME) VALUES(3, 1, 'Sue'); -> rows: 16 - -DELETE FROM PARENT; -> update count: 1 - -SELECT * FROM CHILD; -> ID PARENTID FAMILY_ID NAME -> --- -------- --------- ------ -> 100 3 1 Simon -> 101 3 1 Sabine -> 200 null null Jim -> 201 null null Johann -> rows: 4 - -DROP TABLE PARENT; -> ok - -DROP TABLE CHILD; -> ok - -DROP TABLE FAMILY; -> ok - -CREATE TABLE INVOICE(CUSTOMER_ID INT, ID INT, TOTAL_AMOUNT DECIMAL(10,2), PRIMARY KEY(CUSTOMER_ID, ID)); -> ok - -CREATE TABLE INVOICE_LINE(CUSTOMER_ID INT, INVOICE_ID INT, LINE_ID INT, TEXT VARCHAR, AMOUNT DECIMAL(10,2)); -> ok - -CREATE INDEX ON INVOICE_LINE(CUSTOMER_ID); -> ok - -ALTER TABLE INVOICE_LINE ADD FOREIGN KEY(CUSTOMER_ID, INVOICE_ID) REFERENCES INVOICE(CUSTOMER_ID, ID) ON DELETE CASCADE; -> ok - -INSERT INTO INVOICE VALUES(1, 100, NULL), (1, 101, NULL); -> update count: 2 - -INSERT INTO INVOICE_LINE VALUES(1, 100, 10, 'Apples', 20.35), (1, 100, 20, 'Paper', 10.05), (1, 101, 10, 'Pencil', 1.10), (1, 101, 20, 'Chair', 540.40); -> update count: 4 - -INSERT INTO INVOICE_LINE VALUES(1, 102, 20, 'Nothing', 30.00); -> exception - -DELETE FROM INVOICE WHERE ID = 100; -> update count: 1 - -SELECT * FROM INVOICE_LINE; -> CUSTOMER_ID INVOICE_ID LINE_ID TEXT AMOUNT -> ----------- ---------- ------- ------ ------ -> 1 101 10 Pencil 1.10 -> 1 101 20 Chair 540.40 -> rows: 2 - -DROP TABLE INVOICE; -> ok - -DROP TABLE INVOICE_LINE; -> ok - -CREATE MEMORY TABLE TEST(A INT, B INT, FOREIGN KEY (B) REFERENCES(A) ON UPDATE RESTRICT ON DELETE NO ACTION); -> ok - -SCRIPT NOPASSWORDS NOSETTINGS; -> SCRIPT -> ------------------------------------------------------------------------------------------------------------ -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 FOREIGN KEY(B) REFERENCES PUBLIC.TEST(A) NOCHECK; -> CREATE MEMORY TABLE PUBLIC.TEST( A INT, B INT ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> rows: 4 - -DROP TABLE TEST; -> ok - ---- users ---------------------------------------------------------------------------------------------- -CREATE USER TEST PASSWORD 'abc'; -> ok - -CREATE USER TEST_ADMIN_X PASSWORD 'def' ADMIN; -> ok - -ALTER USER TEST_ADMIN_X RENAME TO TEST_ADMIN; -> ok - -ALTER USER TEST_ADMIN ADMIN TRUE; -> ok - -CREATE USER TEST2 PASSWORD '123' ADMIN; -> ok - -ALTER USER TEST2 SET PASSWORD 'abc'; -> ok - -ALTER USER TEST2 ADMIN FALSE; -> ok - -CREATE MEMORY TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -> ok - -CREATE MEMORY TABLE TEST2_X(ID INT); -> ok - -CREATE INDEX IDX_ID ON TEST2_X(ID); -> ok - -ALTER TABLE TEST2_X RENAME TO TEST2; -> ok - -ALTER INDEX IDX_ID RENAME TO IDX_ID2; -> ok - -SCRIPT NOPASSWORDS NOSETTINGS; -> SCRIPT -> --------------------------------------------------------------------------- -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST2; -> -- 0 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> CREATE INDEX PUBLIC.IDX_ID2 ON PUBLIC.TEST2(ID); -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL, NAME VARCHAR(255) ); -> CREATE MEMORY TABLE PUBLIC.TEST2( ID INT ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> CREATE USER IF NOT EXISTS TEST PASSWORD ''; -> CREATE USER IF NOT EXISTS TEST2 PASSWORD ''; -> CREATE USER IF NOT EXISTS TEST_ADMIN PASSWORD '' ADMIN; -> rows: 10 - -SELECT NAME, ADMIN FROM INFORMATION_SCHEMA.USERS; -> NAME ADMIN -> ---------- ----- -> SA true -> TEST false -> TEST2 false -> TEST_ADMIN true -> rows: 4 - -DROP TABLE TEST2; -> ok - -DROP TABLE TEST; -> ok - -DROP USER TEST; -> ok - -DROP USER IF EXISTS TEST; -> ok - -DROP USER IF EXISTS TEST2; -> ok - -DROP USER TEST_ADMIN; -> ok - -SET AUTOCOMMIT FALSE; -> ok - -SET SALT '' HASH ''; -> ok - -CREATE USER SECURE SALT '001122' HASH '1122334455'; -> ok - -ALTER USER SECURE SET SALT '112233' HASH '2233445566'; -> ok - -SCRIPT NOSETTINGS; -> SCRIPT -> ----------------------------------------------------------------- -> CREATE USER IF NOT EXISTS SA SALT '' HASH '' ADMIN; -> CREATE USER IF NOT EXISTS SECURE SALT '112233' HASH '2233445566'; -> rows: 2 - -SET PASSWORD '123'; -> ok - -SET AUTOCOMMIT TRUE; -> ok - -DROP USER SECURE; -> ok - ---- functions ---------------------------------------------------------------------------------------------- -CALL FORMATDATETIME(PARSEDATETIME('2001-02-03 04:05:06 GMT', 'yyyy-MM-dd HH:mm:ss z', 'en', 'GMT'), 'EEE, d MMM yyyy HH:mm:ss z', 'en', 'GMT'); -> 'Sat, 3 Feb 2001 04:05:06 GMT' -> ------------------------------ -> Sat, 3 Feb 2001 04:05:06 GMT -> rows: 1 - -CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', 'yyyy-MM-dd HH:mm:ss'); -> '2001-02-03 04:05:06' -> --------------------- -> 2001-02-03 04:05:06 -> rows: 1 - -CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', 'MM/dd/yyyy HH:mm:ss'); -> '02/03/2001 04:05:06' -> --------------------- -> 02/03/2001 04:05:06 -> rows: 1 - -CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', 'd. MMMM yyyy', 'de'); -> '3. Februar 2001' -> ----------------- -> 3. Februar 2001 -> rows: 1 - -CALL FORMATDATETIME(PARSEDATETIME('Sat, 3 Feb 2001 04:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'en', 'GMT'), 'yyyy-MM-dd HH:mm:ss', 'en', 'GMT'); -> '2001-02-03 04:05:06' -> --------------------- -> 2001-02-03 04:05:06 -> rows: 1 - -CALL PARSEDATETIME('3. Februar 2001', 'd. MMMM yyyy', 'de'); -> TIMESTAMP '2001-02-03 00:00:00.0' -> --------------------------------- -> 2001-02-03 00:00:00.0 -> rows: 1 - -CALL PARSEDATETIME('02/03/2001 04:05:06', 'MM/dd/yyyy HH:mm:ss'); -> TIMESTAMP '2001-02-03 04:05:06.0' -> --------------------------------- -> 2001-02-03 04:05:06.0 -> rows: 1 - -CALL XMLNODE('a', XMLATTR('href', 'http://h2database.com')); -> STRINGDECODE('\n') -> ----------------------------------------------------- -> -> rows: 1 - -CALL XMLNODE('br'); -> STRINGDECODE('
    \n') -> ----------------------- ->
    -> rows: 1 - -CALL XMLNODE('p', null, 'Hello World'); -> STRINGDECODE('

    Hello World

    \n') -> ------------------------------------ ->

    Hello World

    -> rows: 1 - -SELECT XMLNODE('p', null, 'Hello' || chr(10) || 'World') X; -> X -> --------------------- ->

    Hello World

    -> rows: 1 - -SELECT XMLNODE('p', null, 'Hello' || chr(10) || 'World', false) X; -> X -> ------------------- ->

    Hello World

    -> rows: 1 - -CALL XMLCOMMENT('Test'); -> STRINGDECODE('\n') -> ------------------------------- -> -> rows: 1 - -CALL XMLCOMMENT('--- test ---'); -> STRINGDECODE('\n') -> ------------------------------------------- -> -> rows: 1 - -CALL XMLCDATA(''); -> ']]>' -> -------------------------- -> ]]> -> rows: 1 - -CALL XMLCDATA('special text ]]>'); -> 'special text ]]>' -> --------------------- -> special text ]]> -> rows: 1 - -CALL XMLSTARTDOC(); -> STRINGDECODE('\n') -> ----------------------------------------- -> -> rows: 1 - -CALL XMLTEXT('test'); -> 'test' -> ------ -> test -> rows: 1 - -CALL XMLTEXT(''); -> '<test>' -> -------------- -> <test> -> rows: 1 - -SELECT XMLTEXT('hello' || chr(10) || 'world') X; -> X -> ----------- -> hello world -> rows: 1 - -CALL XMLTEXT('hello' || chr(10) || 'world', true); -> 'hello world' -> ----------------- -> hello world -> rows: 1 - -create memory table test(id int primary key, name varchar(255)); -> ok - -INSERT INTO TEST VALUES(2, STRINGDECODE('abcsond\344rzeich\344 ') || char(22222) || STRINGDECODE(' \366\344\374\326\304\334\351\350\340\361!')); -> update count: 1 - -script nopasswords nosettings; -> SCRIPT -> ------------------------------------------------------------------------------------------------------------------------------------------------------------- -> -- 1 +/- SELECT COUNT(*) FROM PUBLIC.TEST; -> ALTER TABLE PUBLIC.TEST ADD CONSTRAINT PUBLIC.CONSTRAINT_2 PRIMARY KEY(ID); -> CREATE MEMORY TABLE PUBLIC.TEST( ID INT NOT NULL, NAME VARCHAR(255) ); -> CREATE USER IF NOT EXISTS SA PASSWORD '' ADMIN; -> INSERT INTO PUBLIC.TEST(ID, NAME) VALUES (2, STRINGDECODE('abcsond\u00e4rzeich\u00e4 \u56ce \u00f6\u00e4\u00fc\u00d6\u00c4\u00dc\u00e9\u00e8\u00e0\u00f1!')); -> rows: 5 - -call STRINGENCODE(STRINGDECODE('abcsond\344rzeich\344 \u56ce \366\344\374\326\304\334\351\350\340\361!')); -> 'abcsond\u00e4rzeich\u00e4 \u56ce \u00f6\u00e4\u00fc\u00d6\u00c4\u00dc\u00e9\u00e8\u00e0\u00f1!' -> ------------------------------------------------------------------------------------------------ -> abcsond\u00e4rzeich\u00e4 \u56ce \u00f6\u00e4\u00fc\u00d6\u00c4\u00dc\u00e9\u00e8\u00e0\u00f1! -> rows: 1 - -delete from test; -> update count: 1 - -insert into test values(1, 'Hello'); -> update count: 1 - -select abs(-1) r1, abs(id) r1b from test; -> R1 R1B -> -- --- -> 1 1 -> rows: 1 - -select abs(sum(id)) r1 from test; -> R1 -> -- -> 1 -> rows: 1 - -select abs(null) vn, abs(-1) r1, abs(1) r2, abs(0) r3, abs(-0.1) r4, abs(0.1) r5 from test; -> VN R1 R2 R3 R4 R5 -> ---- -- -- -- --- --- -> null 1 1 0 0.1 0.1 -> rows: 1 - -select acos(null) vn, acos(-1) r1 from test; -> VN R1 -> ---- ----------------- -> null 3.141592653589793 -> rows: 1 - -select asin(null) vn, asin(-1) r1 from test; -> VN R1 -> ---- ------------------- -> null -1.5707963267948966 -> rows: 1 - -select atan(null) vn, atan(-1) r1 from test; -> VN R1 -> ---- ------------------- -> null -0.7853981633974483 -> rows: 1 - -select cos(null) vn, cos(-1) r1 from test; -> VN R1 -> ---- ------------------ -> null 0.5403023058681398 -> rows: 1 - -select cot(null) vn, cot(-1) r1 from test; -> VN R1 -> ---- ------------------- -> null -0.6420926159343306 -> rows: 1 - -select sin(null) vn, sin(-1) r1 from test; -> VN R1 -> ---- ------------------- -> null -0.8414709848078965 -> rows: 1 - -select tan(null) vn, tan(-1) r1 from test; -> VN R1 -> ---- ------------------- -> null -1.5574077246549023 -> rows: 1 - -select atan2(null, null) vn, atan2(10, 1) r1 from test; -> VN R1 -> ---- ------------------ -> null 1.4711276743037347 -> rows: 1 - -select bitand(null, 1) vn, bitand(1, null) vn1, bitand(null, null) vn2, bitand(3, 6) e2 from test; -> VN VN1 VN2 E2 -> ---- ---- ---- -- -> null null null 2 -> rows: 1 - -select bitor(null, 1) vn, bitor(1, null) vn1, bitor(null, null) vn2, bitor(3, 6) e7 from test; -> VN VN1 VN2 E7 -> ---- ---- ---- -- -> null null null 7 -> rows: 1 - -select bitxor(null, 1) vn, bitxor(1, null) vn1, bitxor(null, null) vn2, bitxor(3, 6) e5 from test; -> VN VN1 VN2 E5 -> ---- ---- ---- -- -> null null null 5 -> rows: 1 - -select mod(null, 1) vn, mod(1, null) vn1, mod(null, null) vn2, mod(10, 2) e1 from test; -> VN VN1 VN2 E1 -> ---- ---- ---- -- -> null null null 0 -> rows: 1 - -select ceil(null) vn, ceil(1) v1, ceiling(1.1) v2, ceil(-1.1) v3, ceiling(1.9) v4, ceiling(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- --- --- ---- --- ---- -> null 1.0 2.0 -1.0 2.0 -1.0 -> rows: 1 - -select floor(null) vn, floor(1) v1, floor(1.1) v2, floor(-1.1) v3, floor(1.9) v4, floor(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- --- --- ---- --- ---- -> null 1.0 1.0 -2.0 1.0 -2.0 -> rows: 1 - -select log(null) vn, log(1) v1, ln(1.1) v2, log(-1.1) v3, log(1.9) v4, log(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- --- ------------------- --- ------------------ --- -> null 0.0 0.09531017980432493 NaN 0.6418538861723947 NaN -> rows: 1 - -select log10(null) vn, log10(0) v1, log10(10) v2, log10(0.0001) v3, log10(1000000) v4, log10(1) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- --------- --- ---- --- --- -> null -Infinity 1.0 -4.0 6.0 0.0 -> rows: 1 - -select log(null) vn, log(1) v1, log(1.1) v2, log(-1.1) v3, log(1.9) v4, log(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- --- ------------------- --- ------------------ --- -> null 0.0 0.09531017980432493 NaN 0.6418538861723947 NaN -> rows: 1 - -select degrees(null) vn, degrees(1) v1, degrees(1.1) v2, degrees(-1.1) v3, degrees(1.9) v4, degrees(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- ----------------- ----------------- ------------------ ------------------ ------------------- -> null 57.29577951308232 63.02535746439057 -63.02535746439057 108.86198107485642 -108.86198107485642 -> rows: 1 - -select exp(null) vn, exp(1) v1, exp(1.1) v2, exp(-1.1) v3, exp(1.9) v4, exp(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- ------------------ ------------------ ------------------ ------------------ ------------------- -> null 2.7182818284590455 3.0041660239464334 0.3328710836980795 6.6858944422792685 0.14956861922263506 -> rows: 1 - -select radians(null) vn, radians(1) v1, radians(1.1) v2, radians(-1.1) v3, radians(1.9) v4, radians(-1.9) v5 from test; -> VN V1 V2 V3 V4 V5 -> ---- -------------------- -------------------- --------------------- ------------------- -------------------- -> null 0.017453292519943295 0.019198621771937624 -0.019198621771937624 0.03316125578789226 -0.03316125578789226 -> rows: 1 - -select sqrt(null) vn, sqrt(0) e0, sqrt(1) e1, sqrt(4) e2, sqrt(100) e10, sqrt(0.25) e05 from test; -> VN E0 E1 E2 E10 E05 -> ---- --- --- --- ---- --- -> null 0.0 1.0 2.0 10.0 0.5 -> rows: 1 - -select pi() pi from test; -> PI -> ----------------- -> 3.141592653589793 -> rows: 1 - -select power(null, null) en, power(2, 3) e8, power(16, 0.5) e4 from test; -> EN E8 E4 -> ---- --- --- -> null 8.0 4.0 -> rows: 1 - -SET AUTOCOMMIT FALSE; -> ok - -select rand(1) e, random() f from test; -> E F -> ------------------ ------------------- -> 0.7308781907032909 0.41008081149220166 -> rows: 1 - -select rand() e from test; -> E -> ------------------- -> 0.20771484130971707 -> rows: 1 - -SET AUTOCOMMIT TRUE; -> ok - -select round(null, null) en, round(10.49, 0) e10, round(10.05, 1) e101 from test; -> EN E10 E101 -> ---- ---- ---- -> null 10.0 10.1 -> rows: 1 - -select round(null) en, round(0.6, null) en2, round(1.05) e1, round(-1.51) em2 from test; -> EN EN2 E1 EM2 -> ---- ---- --- ---- -> null null 1.0 -2.0 -> rows: 1 - -select roundmagic(null) en, roundmagic(cast(3.11 as double) - 3.1) e001, roundmagic(3.11-3.1-0.01) e000, roundmagic(2000000000000) e20x from test; -> EN E001 E000 E20X -> ---- ---- ---- ------ -> null 0.01 0.0 2.0E12 -> rows: 1 - -select sign(null) en, sign(10) e1, sign(0) e0, sign(-0.1) em1 from test; -> EN E1 E0 EM1 -> ---- -- -- --- -> null 1 0 -1 -> rows: 1 - -select truncate(null, null) en, truncate(1.99, 0) e1, truncate(-10.9, 0) em10 from test; -> EN E1 EM10 -> ---- --- ----- -> null 1.0 -10.0 -> rows: 1 - -select trunc(null, null) en, trunc(1.99, 0) e1, trunc(-10.9, 0) em10 from test; -> EN E1 EM10 -> ---- --- ----- -> null 1.0 -10.0 -> rows: 1 - -select ascii(null) en, ascii('') en, ascii('Abc') e65 from test; -> EN EN E65 -> ---- ---- --- -> null null 65 -> rows: 1 - -select bit_length(null) en, bit_length('') e0, bit_length('ab') e32 from test; -> EN E0 E32 -> ---- -- --- -> null 0 32 -> rows: 1 - -select length(null) en, length('') e0, length('ab') e2 from test; -> EN E0 E2 -> ---- -- -- -> null 0 2 -> rows: 1 - -select char_length(null) en, char_length('') e0, char_length('ab') e2 from test; -> EN E0 E2 -> ---- -- -- -> null 0 2 -> rows: 1 - -select character_length(null) en, character_length('') e0, character_length('ab') e2 from test; -> EN E0 E2 -> ---- -- -- -> null 0 2 -> rows: 1 - -select octet_length(null) en, octet_length('') e0, octet_length('ab') e4 from test; -> EN E0 E4 -> ---- -- -- -> null 0 4 -> rows: 1 - -select char(null) en, char(65) ea from test; -> EN EA -> ---- -- -> null A -> rows: 1 - -select concat(null, null) en, concat(null, 'a') ea, concat('b', null) eb, concat('ab', 'c') abc from test; -> EN EA EB ABC -> ---- -- -- --- -> null a b abc -> rows: 1 - -SELECT CONCAT('a', 'b', 'c', 'd') AS test; -> TEST -> ---- -> abcd -> rows: 1 - -select difference(null, null) en, difference('a', null) en1, difference(null, 'a') en2 from test; -> EN EN1 EN2 -> ---- ---- ---- -> null null null -> rows: 1 - -select difference('abc', 'abc') e0, difference('Thomas', 'Tom') e1 from test; -> E0 E1 -> -- -- -> 4 3 -> rows: 1 - -select hextoraw(null) en, rawtohex(null) en1, hextoraw(rawtohex('abc')) abc from test; -> EN EN1 ABC -> ---- ---- --- -> null null abc -> rows: 1 - -select insert(null, null, null, null) en, insert('Rund', 1, 0, 'o') e_round, insert(null, 1, 1, 'a') ea from test; -> EN E_ROUND EA -> ---- ------- -- -> null Rund a -> rows: 1 - -select insert('World', 2, 4, 'e') welt, insert('Hello', 2, 1, 'a') hallo from test; -> WELT HALLO -> ---- ----- -> We Hallo -> rows: 1 - -select lcase(null) en, lcase('Hello') hello, lcase('ABC') abc from test; -> EN HELLO ABC -> ---- ----- --- -> null hello abc -> rows: 1 - -select lower(null) en, lower('Hello') hello, lower('ABC') abc from test; -> EN HELLO ABC -> ---- ----- --- -> null hello abc -> rows: 1 - -select ucase(null) en, ucase('Hello') hello, ucase('ABC') abc from test; -> EN HELLO ABC -> ---- ----- --- -> null HELLO ABC -> rows: 1 - -select upper(null) en, upper('Hello') hello, upper('ABC') abc from test; -> EN HELLO ABC -> ---- ----- --- -> null HELLO ABC -> rows: 1 - -select left(null, 10) en, left('abc', null) en2, left('boat', 2) e_bo, left('', 1) ee, left('a', -1) ee2 from test; -> EN EN2 E_BO EE EE2 -> ---- ---- ---- -- --- -> null null bo -> rows: 1 - -select right(null, 10) en, right('abc', null) en2, right('boat-trip', 2) e_ip, right('', 1) ee, right('a', -1) ee2 from test; -> EN EN2 E_IP EE EE2 -> ---- ---- ---- -- --- -> null null ip -> rows: 1 - -select locate(null, null) en, locate(null, null, null) en1 from test; -> EN EN1 -> ---- ---- -> null null -> rows: 1 - -select locate('World', 'Hello World') e7, locate('hi', 'abchihihi', 2) e3 from test; -> E7 E3 -> -- -- -> 7 4 -> rows: 1 - -select instr('Hello World', 'World') e7, instr('abchihihi', 'hi', 2) e3, instr('abcooo', 'o') e2 from test; -> E7 E3 E2 -> -- -- -- -> 7 4 4 -> rows: 1 - -select position(null, null) en, position(null, 'abc') en1, position('World', 'Hello World') e7, position('hi', 'abchihihi') e1 from test; -> EN EN1 E7 E1 -> ---- ---- -- -- -> null null 7 4 -> rows: 1 - -select ltrim(null) en, '>' || ltrim('a') || '<' ea, '>' || ltrim(' a ') || '<' e_as from test; -> EN EA E_AS -> ---- --- ---- -> null >a< >a < -> rows: 1 - -select TRIM(BOTH '_' FROM '__A__') A, TRIM(LEADING FROM ' B ') BS, TRIM(TRAILING 'x' FROM 'xAx') XA from test; -> A BS XA -> - -- -- -> A B xA -> rows: 1 - -select rtrim(null) en, '>' || rtrim('a') || '<' ea, '>' || rtrim(' a ') || '<' es from test; -> EN EA ES -> ---- --- ---- -> null >a< > a< -> rows: 1 - -select repeat(null, null) en, repeat('Ho', 2) abcehoho , repeat('abc', 0) ee from test; -> EN ABCEHOHO EE -> ---- -------- -- -> null HoHo -> rows: 1 - -select replace(null, null) en, replace(null, null, null) en1 from test; -> EN EN1 -> ---- ---- -> null null -> rows: 1 - -select replace('abchihihi', 'i', 'o') abcehohoho, replace('that is tom', 'i') abcethstom from test; -> ABCEHOHOHO ABCETHSTOM -> ---------- ---------- -> abchohoho that s tom -> rows: 1 - -select soundex(null) en, soundex('tom') et from test; -> EN ET -> ---- ---- -> null t500 -> rows: 1 - -select -soundex('Washington') W252, soundex('Lee') L000, -soundex('Gutierrez') G362, soundex('Pfister') P236, -soundex('Jackson') J250, soundex('Tymczak') T522, -soundex('VanDeusen') V532, soundex('Ashcraft') A261 from test; -> W252 L000 G362 P236 J250 T522 V532 A261 -> ---- ---- ---- ---- ---- ---- ---- ---- -> W252 L000 G362 P236 J250 T522 V532 A261 -> rows: 1 - -select space(null) en, '>' || space(1) || '<' es, '>' || space(3) || '<' e2 from test; -> EN ES E2 -> ---- --- --- -> null > < > < -> rows: 1 - -select substr(null, null) en, substr(null, null, null) e1, substr('bob', 2) e_ob, substr('bob', 2, 1) eo from test; -> EN E1 E_OB EO -> ---- ---- ---- -- -> null null ob o -> rows: 1 - -select substring(null, null) en, substring(null, null, null) e1, substring('bob', 2) e_ob, substring('bob', 2, 1) eo from test; -> EN E1 E_OB EO -> ---- ---- ---- -- -> null null ob o -> rows: 1 - -select substring(null from null) en, substring(null from null for null) e1, substring('bob' from 2) e_ob, substring('bob' from 2 for 1) eo from test; -> EN E1 E_OB EO -> ---- ---- ---- -- -> null null ob o -> rows: 1 - -call hash('SHA256', stringtoutf8('Hello'), 1); -> X'185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969' -> ------------------------------------------------------------------- -> 185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969 -> rows: 1 - -CALL HASH('SHA256', STRINGTOUTF8('Password'), 1000); -> X'c644a176ce920bde361ac336089b06cc2f1514dfa95ba5aabfe33f9a22d577f0' -> ------------------------------------------------------------------- -> c644a176ce920bde361ac336089b06cc2f1514dfa95ba5aabfe33f9a22d577f0 -> rows: 1 - -CALL UTF8TOSTRING(STRINGTOUTF8('This is a test')); -> 'This is a test' -> ---------------- -> This is a test -> rows: 1 - -CALL STRINGENCODE(STRINGDECODE('Lines 1\nLine 2')); -> 'Lines 1\nLine 2' -> ----------------- -> Lines 1\nLine 2 -> rows: 1 - -call encrypt('AES', '00000000000000000000000000000000', stringtoutf8('Hello World Test')); -> X'dbd42d55d4b923c4b03eba0396fac98e' -> ----------------------------------- -> dbd42d55d4b923c4b03eba0396fac98e -> rows: 1 - -call utf8tostring(decrypt('AES', '00000000000000000000000000000000', 'dbd42d55d4b923c4b03eba0396fac98e')); -> 'Hello World Test' -> ------------------ -> Hello World Test -> rows: 1 - -CALL ENCRYPT('XTEA', '00', STRINGTOUTF8('Test')); -> X'8bc9a4601b3062692a72a5941072425f' -> ----------------------------------- -> 8bc9a4601b3062692a72a5941072425f -> rows: 1 - -call encrypt('XTEA', '000102030405060708090a0b0c0d0e0f', '4142434445464748'); -> X'dea0b0b40966b0669fbae58ab503765f' -> ----------------------------------- -> dea0b0b40966b0669fbae58ab503765f -> rows: 1 - -call utf8tostring(decrypt('AES', hash('sha256', stringtoutf8('Hello'), 1000), encrypt('AES', hash('sha256', stringtoutf8('Hello'), 1000), stringtoutf8('Hello World Test')))); -> 'Hello World Test' -> ------------------ -> Hello World Test -> rows: 1 - -select length(curdate()) c1, length(current_date()) c2, substring(curdate(), 5, 1) c3 from test; -> C1 C2 C3 -> -- -- -- -> 10 10 - -> rows: 1 - -select length(curtime())>=8 c1, length(current_time())>=8 c2, substring(curtime(), 3, 1) c3 from test; -> C1 C2 C3 -> ---- ---- -- -> TRUE TRUE : -> rows: 1 - -select length(now())>20 c1, length(current_timestamp())>20 c2, length(now(0))>20 c3, length(now(2))>20 c4, substring(now(5), 20, 1) c5 from test; -> C1 C2 C3 C4 C5 -> ---- ---- ---- ---- -- -> TRUE TRUE TRUE TRUE . -> rows: 1 - -select dateadd('month', 1, timestamp '2003-01-31 10:20:30.012345678') d1 from test; -> D1 -> ----------------------------- -> 2003-02-28 10:20:30.012345678 -> rows: 1 - -select dateadd('year', -1, timestamp '2000-02-29 10:20:30.012345678') d1 from test; -> D1 -> ----------------------------- -> 1999-02-28 10:20:30.012345678 -> rows: 1 - -select datediff('yy', timestamp '2003-12-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') d1 from test; -> D1 -> -- -> 1 -> rows: 1 - -select datediff('year', timestamp '2003-12-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') d1 from test; -> D1 -> -- -> 1 -> rows: 1 - -select datediff('mm', timestamp '2003-11-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') d2 from test; -> D2 -> -- -> 2 -> rows: 1 - -select datediff('month', timestamp '2003-11-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') d2 from test; -> D2 -> -- -> 2 -> rows: 1 - -select datediff('dd', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-05 10:00:00.0') d4 from test; -> D4 -> -- -> 4 -> rows: 1 - -select datediff('day', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-05 10:00:00.0') d4 from test; -> D4 -> -- -> 4 -> rows: 1 - -select datediff('hh', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-02 10:00:00.0') d24 from test; -> D24 -> --- -> 24 -> rows: 1 - -select datediff('hour', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-02 10:00:00.0') d24 from test; -> D24 -> --- -> 24 -> rows: 1 - -select datediff('mi', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') d20 from test; -> D20 -> --- -> -20 -> rows: 1 - -select datediff('minute', timestamp '2004-01-01 10:20:30.0', timestamp '2004-01-01 10:00:00.0') d20 from test; -> D20 -> --- -> -20 -> rows: 1 - -select datediff('ss', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0') d1 from test; -> D1 -> -- -> 1 -> rows: 1 - -select datediff('second', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0') d1 from test; -> D1 -> -- -> 1 -> rows: 1 - -select datediff('ms', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0') d50x from test; -> D50X -> ---- -> 500 -> rows: 1 - -select datediff('millisecond', timestamp '2004-01-01 10:00:00.5', timestamp '2004-01-01 10:00:01.0') d50x from test; -> D50X -> ---- -> 500 -> rows: 1 - -select dayname(date '2005-09-12') d_monday from test; -> D_MONDAY -> -------- -> Monday -> rows: 1 - -select monthname(date '2005-09-12') d_sept from test; -> D_SEPT -> --------- -> September -> rows: 1 - -select dayofmonth(date '2005-09-12') d12 from test; -> D12 -> --- -> 12 -> rows: 1 - -select dayofweek(date '2005-09-12') d2 from test; -> D2 -> -- -> 2 -> rows: 1 - -select dayofyear(date '2005-01-01') d1 from test; -> D1 -> -- -> 1 -> rows: 1 - -select year(date '2005-01-01') d2005 from test; -> D2005 -> ----- -> 2005 -> rows: 1 - -select quarter(date '2005-09-01') d3 from test; -> D3 -> -- -> 3 -> rows: 1 - -select month(date '2005-09-25') d9 from test; -> D9 -> -- -> 9 -> rows: 1 - -select week(date '2003-01-09') d1 from test; -> D1 -> -- -> 2 -> rows: 1 - -select hour(time '23:10:59') d23 from test; -> D23 -> --- -> 23 -> rows: 1 - -select minute(timestamp '2005-01-01 23:10:59') d10 from test; -> D10 -> --- -> 10 -> rows: 1 - -select second(timestamp '2005-01-01 23:10:59') d59 from test; -> D59 -> --- -> 59 -> rows: 1 - -select right(database(), 6) x_script from test; -> X_SCRIPT -> -------- -> SCRIPT -> rows: 1 - -select user() x_sa, current_user() x_sa2 from test; -> X_SA X_SA2 -> ---- ----- -> SA SA -> rows: 1 - -select current_user() x_sa from test; -> X_SA -> ---- -> SA -> rows: 1 - -select autocommit() x_true from test; -> X_TRUE -> ------ -> TRUE -> rows: 1 - -select readonly() x_false from test; -> X_FALSE -> ------- -> FALSE -> rows: 1 - -select ifnull(null, '1') x1, ifnull(null, null) xn, ifnull('a', 'b') xa from test; -> X1 XN XA -> -- ---- -- -> 1 null a -> rows: 1 - -select isnull(null, '1') x1, isnull(null, null) xn, isnull('a', 'b') xa from test; -> X1 XN XA -> -- ---- -- -> 1 null a -> rows: 1 - -select casewhen(null, '1', '2') xn, casewhen(1>0, 'n', 'y') xy, casewhen(0<1, 'a', 'b') xa from test; -> XN XY XA -> -- -- -- -> 2 n a -> rows: 1 - -select x, case when x=0 then 'zero' else 'not zero' end y from system_range(0, 2); -> X Y -> - -------- -> 0 zero -> 1 not zero -> 2 not zero -> rows: 3 - -select x, case when x=0 then 'zero' end y from system_range(0, 1); -> X Y -> - ---- -> 0 zero -> 1 null -> rows: 2 - -select x, case x when 0 then 'zero' else 'not zero' end y from system_range(0, 1); -> X Y -> - -------- -> 0 zero -> 1 not zero -> rows: 2 - -select x, case x when 0 then 'zero' when 1 then 'one' end y from system_range(0, 2); -> X Y -> - ---- -> 0 zero -> 1 one -> 2 null -> rows: 3 - -select convert(null, varchar(255)) xn, convert(' 10', int) x10, convert(' 20 ', int) x20 from test; -> XN X10 X20 -> ---- --- --- -> null 10 20 -> rows: 1 - -select cast(null as varchar(255)) xn, cast(' 10' as int) x10, cast(' 20 ' as int) x20 from test; -> XN X10 X20 -> ---- --- --- -> null 10 20 -> rows: 1 - -select coalesce(null, null) xn, coalesce(null, 'a') xa, coalesce('1', '2') x1 from test; -> XN XA X1 -> ---- -- -- -> null a 1 -> rows: 1 - -select nullif(null, null) xn, nullif('a', 'a') xn, nullif('1', '2') x1 from test; -> XN XN X1 -> ---- ---- -- -> null null 1 -> rows: 1 - -drop table test; -> ok - ---- sequence with manual value ------------------ -drop table if exists test; -> ok - -CREATE TABLE TEST(ID bigint generated by default as identity (start with 1), name varchar); -> ok - -SET AUTOCOMMIT FALSE; -> ok - -insert into test(name) values('Hello'); -> update count: 1 - -insert into test(name) values('World'); -> update count: 1 - -call identity(); -> IDENTITY() -> ---------- -> 2 -> rows: 1 - -insert into test(id, name) values(1234567890123456, 'World'); -> update count: 1 - -call identity(); -> IDENTITY() -> ---------------- -> 1234567890123456 -> rows: 1 - -insert into test(name) values('World'); -> update count: 1 - -call identity(); -> IDENTITY() -> ---------------- -> 1234567890123457 -> rows: 1 - -select * from test order by id; -> ID NAME -> ---------------- ----- -> 1 Hello -> 2 World -> 1234567890123456 World -> 1234567890123457 World -> rows (ordered): 4 - -SET AUTOCOMMIT TRUE; -> ok - -drop table if exists test; -> ok - -CREATE TABLE TEST(ID bigint generated by default as identity (start with 1), name varchar); -> ok - -SET AUTOCOMMIT FALSE; -> ok - -insert into test(name) values('Hello'); -> update count: 1 - -insert into test(name) values('World'); -> update count: 1 - -call identity(); -> IDENTITY() -> ---------- -> 2 -> rows: 1 - -insert into test(id, name) values(1234567890123456, 'World'); -> update count: 1 - -call identity(); -> IDENTITY() -> ---------------- -> 1234567890123456 -> rows: 1 - -insert into test(name) values('World'); -> update count: 1 - -call identity(); -> IDENTITY() -> ---------------- -> 1234567890123457 -> rows: 1 - -select * from test order by id; -> ID NAME -> ---------------- ----- -> 1 Hello -> 2 World -> 1234567890123456 World -> 1234567890123457 World -> rows (ordered): 4 - -SET AUTOCOMMIT TRUE; -> ok - -drop table test; -> ok - ---- test cases --------------------------------------------------------------------------------------------- -create memory table word(word_id integer, name varchar); -> ok - -alter table word alter column word_id integer(10) auto_increment; -> ok - -insert into word(name) values('Hello'); -> update count: 1 - -alter table word alter column word_id restart with 30872; -> ok - -insert into word(name) values('World'); -> update count: 1 - -select * from word; -> WORD_ID NAME -> ------- ----- -> 1 Hello -> 30872 World -> rows: 2 - -drop table word; -> ok - -create table test(id int, name varchar); -> ok - -insert into test values(5, 'b'), (5, 'b'), (20, 'a'); -> update count: 3 - diff --git a/h2/src/test/org/h2/test/testSimple.in.txt b/h2/src/test/org/h2/test/testSimple.in.txt deleted file mode 100644 index 18a71f8ed7..0000000000 --- a/h2/src/test/org/h2/test/testSimple.in.txt +++ /dev/null @@ -1,814 +0,0 @@ -select 1000L / 10; -> 100; -select * from (select x as y from dual order by y); -> 1; -select a.x from dual a, dual b order by x; -> 1; -select trunc(1.3); -> 1.0; -select trunc(timestamp '2001-01-01 14:00:00.0'); -> 2001-01-01 00:00:00.0; -select 1 from(select 2 from(select 1) a right join dual b) c; -> 1; -select 1.00 / 3 * 0.00; -> 0.00000000000000000000000000000; -select 1.00000 / 3 * 0.0000; -> 0.0000000000000000000000000000000000; -select 1.0000000 / 3 * 0.00000; -> 0.0000000000000000000000000000000000000; -select 1.0000000 / 3 * 0.000000; -> 0E-38; -select substr('[Hello]', 2, 5); -> Hello; -select substr('Hello World', -5); -> World; -create table test(id null); -drop table test; -select select decode(null, null, 'a'); -> a; -select select decode(1, 1, 'a'); -> a; -select select decode(1, 2, 'a'); -> null; -select select decode(1, 1, 'a', 'else'); -> a; -select select decode(1, 2, 'a', 'else'); -> else; -select decode(4.0, 2.0, 2.0, 3.0, 3.0); -> null; -select decode('3', 2.0, 2.0, 3, 3.0); -> 3.0; -select decode(4.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 9.0); -> 4.0; -select array_contains((4.0, 2.0, 2.0), 2.0); -> TRUE; -select array_contains((4.0, 2.0, 2.0), 5.0); -> FALSE; -select array_contains(('one', 'two'), 'one'); -> TRUE; -select array_contains(('one', 'two'), 'xxx'); -> FALSE; -select array_contains(('one', 'two'), null); -> FALSE; -select array_contains((null, 'two'), null); -> TRUE; -select array_contains(null, 'one'); -> FALSE; -select array_contains(((1, 2), (3, 4)), (1, 2)); -> TRUE; -select array_contains(((1, 2), (3, 4)), (5, 6)); -> FALSE; -select * from (select group_concat(distinct 1) from system_range(1, 3)); -> 1; -select sum(mod(x, 2) = 1) from system_range(1, 10); -> 5; -create table a(x int); -create table b(x int); -select count(*) from (select b.x from a left join b); -> 0; -drop table a, b; -select count(distinct now()) c from system_range(1, 100), system_range(1, 1000); -> 1; -select {fn TIMESTAMPADD(SQL_TSI_DAY, 1, {ts '2011-10-20 20:30:40.001'})}; -> 2011-10-21 20:30:40.001; -select {fn TIMESTAMPADD(SQL_TSI_SECOND, 1, cast('2011-10-20 20:30:40.001' as timestamp))}; -> 2011-10-20 20:30:41.001; -select cast(128 as binary); -> 00000080; -select cast(65535 as binary); -> 0000ffff; -select cast(cast('ff' as binary) as tinyint) x; -> -1; -select cast(cast('7f' as binary) as tinyint) x; -> 127; -select cast(cast('ff' as binary) as smallint) x; -> 255; -select cast(cast('ff' as binary) as int) x; -> 255; -select cast(cast('ffff' as binary) as long) x; -> 65535; -select cast(cast(65535 as long) as binary); -> 000000000000ffff; -select cast(cast(-1 as tinyint) as binary); -> ff; -select cast(cast(-1 as smallint) as binary); -> ffff; -select cast(cast(-1 as int) as binary); -> ffffffff; -select cast(cast(-1 as long) as binary); -> ffffffffffffffff; -select cast(cast(1 as tinyint) as binary); -> 01; -select cast(cast(1 as smallint) as binary); -> 0001; -select cast(cast(1 as int) as binary); -> 00000001; -select cast(cast(1 as long) as binary); -> 0000000000000001; -select cast(X'ff' as tinyint); -> -1; -select cast(X'ffff' as smallint); -> -1; -select cast(X'ffffffff' as int); -> -1; -select cast(X'ffffffffffffffff' as long); -> -1; -select N'test'; -> test; -select cast(' 011 ' as int); -> 11; -select E'test\\test'; -> test\test; -create table a(id int) as select null; -create table b(id int references a(id)) as select null; -delete from a; -drop table a, b; -create table test(a int, b int) as select 2, 0; -create index idx on test(b, a); -select count(*) from test where a in(2, 10) and b in(0, null); -> 1; -drop table test; -create table test(a int, b int) as select 1, 0; -create index idx on test(b, a); -select count(*) from test where b in(null, 0) and a in(1, null); -> 1; -drop table test; -create cached temp table test(id identity) not persistent; -drop table test; -create table test(a int, b int, unique(a, b)); -insert into test values(1,1), (1,2); -select count(*) from test where a in(1,2) and b in(1,2); -> 2; -drop table test; -create table test(id int); -alter table test alter column id set default 'x'; -select column_default from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; -> 'x'; -alter table test alter column id set not null; -select is_nullable from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; -> NO; -alter table test alter column id set data type varchar; -select type_name from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; -> VARCHAR; -alter table test alter column id type int; -select type_name from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; -> INTEGER; -alter table test alter column id drop default; -select column_default from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; -> null; -alter table test alter column id drop not null; -select is_nullable from information_schema.columns c where c.table_name = 'TEST' and c.column_name = 'ID'; -> YES; -drop table test; -select cast(cast(0.1 as real) as decimal); -> 0.1; -select cast(cast(95605327.73 as float) as decimal); -> 95605327.73; -select timestampdiff(month, '2003-02-01','2003-05-01'); -> 3; -select timestampdiff(YEAR,'2002-05-01','2001-01-01'); -> -1; -select timestampdiff(MINUTE,'2003-02-01','2003-05-01 12:05:55'); -> 128885; -select x from (select *, rownum as r from system_range(1, 3)) where r=2; -> 2; -select cast(cast('01020304-0506-0708-090a-0b0c0d0e0f00' as uuid) as binary); -> 0102030405060708090a0b0c0d0e0f00; -create table test(name varchar(255)) as select 'Hello+World+'; -select count(*) from test where name like 'Hello++World++' escape '+'; -> 1; -select count(*) from test where name like '+H+e+l+l+o++World++' escape '+'; -> 1; -select count(*) from test where name like 'Hello+World++' escape '+'; -> 0; -select count(*) from test where name like 'Hello++World+' escape '+'; -> 0; -drop table test; - -select count(*) from system_range(1, 1); -> 1; -select count(*) from system_range(1, -1); -> 0; - -select 1 from dual where '\' like '\' escape ''; -> 1; -select left(timestamp '2001-02-03 08:20:31+04', 4); -> 2001; - -create table t1$2(id int); -drop table t1$2; - -create table test(id int primary key) as select x from system_range(1, 200); -delete from test; -insert into test(id) values(1); -select * from test order by id; -> 1; -drop table test; - -create memory table test(id int) not persistent as select 1 from dual; -insert into test values(1); -select count(1) from test; -> 2; -@reconnect; -select count(1) from test; -> 0; -drop table test; -create table test(t clob) as select 1; -select distinct t from test; -> 1; -drop table test; -create table test(id int unique not null); -drop table test; -create table test(id int not null unique); -drop table test; -select count(*)from((select 1 from dual limit 1)union(select 2 from dual limit 1)); -> 2; -select sum(cast(x as int)) from system_range(2147483547, 2147483637); -> 195421006872; -select sum(x) from system_range(9223372036854775707, 9223372036854775797); -> 839326855353784593432; -select sum(cast(100 as tinyint)) from system_range(1, 1000); -> 100000; -select sum(cast(100 as smallint)) from system_range(1, 1000); -> 100000; -select avg(cast(x as int)) from system_range(2147483547, 2147483637); -> 2147483592; -select avg(x) from system_range(9223372036854775707, 9223372036854775797); -> 9223372036854775752; -select avg(cast(100 as tinyint)) from system_range(1, 1000); -> 100; -select avg(cast(100 as smallint)) from system_range(1, 1000); -> 100; -select datediff(yyyy, now(), now()); -> 0; -create table t(d date) as select '2008-11-01' union select '2008-11-02'; -select 1 from t group by year(d) order by year(d); -> 1; -drop table t; -create table t(d int) as select 2001 union select 2002; -select 1 from t group by d/10 order by d/10; -> 1; -drop table t; - -create schema test; -create sequence test.report_id_seq; -select nextval('"test".REPORT_ID_SEQ'); -> 1; -select nextval('"test"."report_id_seq"'); -> 2; -select nextval('test.report_id_seq'); -> 3; -drop schema test; - -create table master(id int primary key); -create table detail(id int primary key, x bigint, foreign key(x) references master(id) on delete cascade); -alter table detail alter column x bigint; -insert into master values(0); -insert into detail values(0,0); -delete from master; -drop table master, detail; - -drop all objects; -create table test(id int, parent int references test(id) on delete cascade); -insert into test values(0, 0); -alter table test rename to test2; -delete from test2; -drop table test2; - -SELECT X FROM dual GROUP BY X HAVING X=AVG(X); -> 1; -create view test_view(id,) as select * from dual; -drop view test_view; -create table test(id int,); -insert into test(id,) values(1,); -merge into test(id,) key(id,) values(1,); -drop table test; - -SET MODE DB2; -SELECT * FROM SYSTEM_RANGE(1, 100) OFFSET 99 ROWS; -> 100; -SELECT * FROM SYSTEM_RANGE(1, 100) OFFSET 50 ROWS FETCH FIRST 1 ROW ONLY; -> 51; -SELECT * FROM SYSTEM_RANGE(1, 100) FETCH FIRST 1 ROWS ONLY; -> 1; -SELECT * FROM SYSTEM_RANGE(1, 100) FETCH FIRST ROW ONLY; -> 1; -SET MODE REGULAR; - -create domain email as varchar comment 'e-mail'; -create table test(e email); -select remarks from INFORMATION_SCHEMA.COLUMNS where table_name='TEST'; -> e-mail; -drop table test; -drop domain email; - -create table test$test(id int); -drop table test$test; -create table test$$test(id int); -drop table test$$test; -create table test (id varchar(36) as random_uuid() primary key); -insert into test() values(); -delete from test where id = select id from test; -drop table test; -create table test (id varchar(36) as now() primary key); -insert into test() values(); -delete from test where id = select id from test; -drop table test; -SELECT SOME(X>4) FROM SYSTEM_RANGE(1,6); -> TRUE; -SELECT EVERY(X>4) FROM SYSTEM_RANGE(1,6); -> FALSE; -SELECT BOOL_OR(X>4) FROM SYSTEM_RANGE(1,6); -> TRUE; -SELECT BOOL_AND(X>4) FROM SYSTEM_RANGE(1,6); -> FALSE; -SELECT BIT_OR(X) FROM SYSTEM_RANGE(1,6); -> 7; -SELECT BIT_AND(X) FROM SYSTEM_RANGE(1,6); -> 0; -SELECT BIT_AND(X) FROM SYSTEM_RANGE(1,1); -> 1; -CREATE TABLE TEST(ID IDENTITY); -ALTER TABLE TEST ALTER COLUMN ID RESTART WITH ? {1:10}; -INSERT INTO TEST VALUES(NULL); -SELECT * FROM TEST; -> 10; -DROP TABLE TEST; -CREATE SEQUENCE TEST_SEQ; -ALTER SEQUENCE TEST_SEQ RESTART WITH ? INCREMENT BY ? {1:20, 2: 3}; -SELECT NEXT VALUE FOR TEST_SEQ; -> 20; -SELECT NEXT VALUE FOR TEST_SEQ; -> 23; -DROP SEQUENCE TEST_SEQ; - -create schema Contact; -CREATE TABLE Account (id BIGINT); -CREATE TABLE Person (id BIGINT, FOREIGN KEY (id) REFERENCES Account(id)); -CREATE TABLE Contact.Contact (id BIGINT, FOREIGN KEY (id) REFERENCES public.Person(id)); -drop schema contact; -drop table account, person; - -create schema Contact; -CREATE TABLE Account (id BIGINT primary key); -CREATE TABLE Person (id BIGINT primary key, FOREIGN KEY (id) REFERENCES Account); -CREATE TABLE Contact.Contact (id BIGINT primary key, FOREIGN KEY (id) REFERENCES public.Person); -drop schema contact; -drop table account, person; - -select extract(hour from timestamp '2001-02-03 14:15:16'); -> 14; -select extract(hour from '2001-02-03 14:15:16'); -> 14; -select hour('2001-02-03 14:15:16'); -> 14; -select extract(week from timestamp '2001-02-03 14:15:16'); -> 5; - -CREATE TABLE TEST(A int NOT NULL, B int NOT NULL, C int) ; -ALTER TABLE TEST ADD CONSTRAINT CON UNIQUE(A,B); -ALTER TABLE TEST DROP C; -ALTER TABLE TEST DROP CONSTRAINT CON; -ALTER TABLE TEST DROP B; -DROP TABLE TEST; - -select count(d.*) from dual d group by d.x; -> 1; - -create table test(id int); -select count(*) from (select * from ((select * from test) union (select * from test)) a) b where id = 0; -> 0; -select count(*) from (select * from ((select * from test) union select * from test) a) b where id = 0; -> 0; -select count(*) from (select * from (select * from test union select * from test) a) b where id = 0; -> 0; -select 1 from ((test d1 inner join test d2 on d1.id = d2.id) inner join test d3 on d1.id = d3.id) inner join test d4 on d4.id = d1.id; -drop table test; - -select lpad('string', 10, '+'); -> ++++string; -select rpad('string', 10, '+'); -> string++++; -select lpad('string', 10); -> string; - -select count(*) from (select * from dual union select * from dual) where x = 0; -> 0; -select count(*) from (select * from (select * from dual union select * from dual)) where x = 0; -> 0; - -select instr('abcisj','s', -1) from dual; -> 5; -CREATE TABLE TEST(ID INT); -INSERT INTO TEST VALUES(1), (2), (3); -create index idx_desc on test(id desc); -select * from test where id between 0 and 1; -> 1; -select * from test where id between 3 and 4; -> 3; -drop table test; - -CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR(255)); -INSERT INTO TEST VALUES(1, 'Hello'), (2, 'HelloWorld'), (3, 'HelloWorldWorld'); -SELECT COUNT(*) FROM TEST WHERE NAME REGEXP 'World'; -> 2; -SELECT NAME FROM TEST WHERE NAME REGEXP 'WorldW'; -> HelloWorldWorld; -drop table test; - -select * from (select x from (select x from dual)) where 1=x; -> 1; -CREATE VIEW TEST_VIEW AS SELECT X FROM (SELECT X FROM DUAL); -SELECT * FROM TEST_VIEW; -> 1; -SELECT * FROM TEST_VIEW; -> 1; -DROP VIEW TEST_VIEW; - -SELECT X FROM (SELECT X, X AS "XY" FROM DUAL) WHERE X=1; -> 1; -SELECT X FROM (SELECT X, X AS "X Y" FROM DUAL) WHERE X=1; -> 1; -SELECT X FROM (SELECT X, X AS "X Y" FROM DUAL AS "D Z") WHERE X=1; -> 1; - -select * from (select x from dual union select convert(x, int) from dual) where x=0; -create table test(id int); -insert into scriptSimple.public.test(id) values(1), (2); -update test t set t.id=t.id+1; -update public.test set public.test.id=1; -select count(scriptSimple.public.test.id) from scriptSimple.public.test; -> 2; -update scriptSimple.public.test set scriptSimple.public.test.id=1; -drop table scriptSimple.public.test; - -select year(timestamp '2007-07-26T18:44:26.109000+02:00'); -> 2007; - -create table test(id int primary key); -begin; -insert into test values(1); -rollback; -insert into test values(2); -rollback; -begin; -insert into test values(3); -commit; -insert into test values(4); -rollback; -select group_concat(id order by id) from test; -> 2,3,4; -drop table test; - -create table test(); -insert into test values(); -ALTER TABLE TEST ADD ID INTEGER; -select count(*) from test; -> 1; -drop table test; - -select * from dual where 'a_z' like '%=_%' escape '='; -> 1; - -create table test as select 1 from dual union all select 2 from dual; -drop table test; - -create table test_table(column_a integer); -insert into test_table values(1); -create view test_view AS SELECT * FROM (SELECT DISTINCT * FROM test_table) AS subquery; -select * FROM test_view; -> 1; -drop view test_view; -drop table test_table; - -CREATE TABLE TEST(ID INT); -INSERT INTO TEST VALUES(1); -CREATE VIEW TEST_VIEW AS SELECT COUNT(ID) X FROM TEST; -explain SELECT * FROM TEST_VIEW WHERE X>1; -DROP VIEW TEST_VIEW; -DROP TABLE TEST; - -create table test1(id int); -insert into test1 values(1), (1), (2), (3); -select sum(C0) from (select count(*) AS C0 from (select distinct * from test1) as temp); -> 3; -drop table test1; - -create table test(id int primary key check id>1); -drop table test; -create table table1(f1 int not null primary key); -create table table2(f2 int not null references table1(f1) on delete cascade); -drop table table2; -drop table table1; -create table table1(f1 int not null primary key); -create table table2(f2 int not null primary key references table1(f1)); -drop table table1; -drop table table2; - -select case when 1=null then 1 else 2 end; -> 2; - -select case (1) when 1 then 1 else 2 end; -> 1; - -create table test(id int); -insert into test values(1); -select distinct id from test a order by a.id; -> 1; -drop table test; - -create table FOO (ID int, A number(18, 2)); -insert into FOO (ID, A) values (1, 10.0), (2, 20.0); -select SUM (CASE when ID=1 then 0 ELSE A END) col0 from Foo; -> 20.00; -drop table FOO; - -select (SELECT true)+1 GROUP BY 1; -> 2; -create table FOO (ID int, A number(18, 2)); -insert into FOO (ID, A) values (1, 10.0), (2, 20.0); -select SUM (CASE when ID=1 then A ELSE 0 END) col0 from Foo; -> 10.00; -drop table FOO; - -create table A ( ID integer, a1 varchar(20) ); -create table B ( ID integer, AID integer, b1 varchar(20)); -create table C ( ID integer, BId integer, c1 varchar(20)); -insert into A (ID, a1) values (1, 'a1'); -insert into A (ID, a1) values (2, 'a2'); -select count(*) from A left outer join (B inner join C on C.BID=B.ID ) on B.AID=A.ID where A.id=1; -> 1; -select count(*) from A left outer join (B left join C on C.BID=B.ID ) on B.AID=A.ID where A.id=1; -> 1; -select count(*) from A left outer join B on B.AID=A.ID inner join C on C.BID=B.ID where A.id=1; -> 0; -select count(*) from (A left outer join B on B.AID=A.ID) inner join C on C.BID=B.ID where A.id=1; -> 0; -drop table a, b, c; - -create schema a; -create table a.test(id int); -insert into a.test values(1); -create schema b; -create table b.test(id int); -insert into b.test values(2); -select a.test.id + b.test.id from a.test, b.test; -> 3; -drop schema a; -drop schema b; - -select date '+0011-01-01'; -> 0011-01-01; -select date'-0010-01-01'; -> -10-01-01; -select datediff('HOUR', timestamp '2007-01-06 10:00:00Z', '2007-01-06 10:00:00Z'); -> 0; -select datediff('HOUR', timestamp '1234-05-06 10:00:00+01:00', '1234-05-06 10:00:00+02:00'); -> -1; -select datediff('HOUR', timestamp '1234-05-06 10:00:00+01:00', '1234-05-06 10:00:00-02:00'); -> 3; - -create schema TEST_SCHEMA; -create table TEST_SCHEMA.test(id int); -create sequence TEST_SCHEMA.TEST_SEQ; -select TEST_SCHEMA.TEST_SEQ.CURRVAL; -> 0; -select TEST_SCHEMA.TEST_SEQ.nextval; -> 1; -drop schema TEST_SCHEMA; - -create table test(id int); -create trigger TEST_TRIGGER before insert on test call "org.h2.test.db.TestTriggersConstraints"; -comment on trigger TEST_TRIGGER is 'just testing'; -select remarks from information_schema.triggers where trigger_name = 'TEST_TRIGGER'; -> just testing; -@reconnect; -select remarks from information_schema.triggers where trigger_name = 'TEST_TRIGGER'; -> just testing; -drop trigger TEST_TRIGGER; -@reconnect; - -create alias parse_long for "java.lang.Long.parseLong(java.lang.String)"; -comment on alias parse_long is 'Parse a long with base'; -select remarks from information_schema.function_aliases where alias_name = 'PARSE_LONG'; -> Parse a long with base; -@reconnect; -select remarks from information_schema.function_aliases where alias_name = 'PARSE_LONG'; -> Parse a long with base; -drop alias parse_long; -@reconnect; - -create role hr; -comment on role hr is 'Human Resources'; -select remarks from information_schema.roles where name = 'HR'; -> Human Resources; -@reconnect; -select remarks from information_schema.roles where name = 'HR'; -> Human Resources; -create user abc password 'x'; -grant hr to abc; -drop role hr; -@reconnect; -drop user abc; - -create domain email as varchar(100) check instr(value, '@') > 0; -comment on domain email is 'must contain @'; -select remarks from information_schema.domains where domain_name = 'EMAIL'; -> must contain @; -@reconnect; -select remarks from information_schema.domains where domain_name = 'EMAIL'; -> must contain @; -drop domain email; -@reconnect; - -create schema tests; -set schema tests; -create sequence walk; -comment on schema tests is 'Test Schema'; -comment on sequence walk is 'Walker'; -select remarks from information_schema.schemata where schema_name = 'TESTS'; -> Test Schema; -select remarks from information_schema.sequences where sequence_name = 'WALK'; -> Walker; -@reconnect; -select remarks from information_schema.schemata where schema_name = 'TESTS'; -> Test Schema; -select remarks from information_schema.sequences where sequence_name = 'WALK'; -> Walker; -drop schema tests; -@reconnect; - -create constant abc value 1; -comment on constant abc is 'One'; -select remarks from information_schema.constants where constant_name = 'ABC'; -> One; -@reconnect; -select remarks from information_schema.constants where constant_name = 'ABC'; -> One; -drop constant abc; -drop table test; -@reconnect; - -create table test(id int); -alter table test add constraint const1 unique(id); -create index IDX_ID on test(id); -comment on constraint const1 is 'unique id'; -comment on index IDX_ID is 'id_index'; -select remarks from information_schema.constraints where constraint_name = 'CONST1'; -> unique id; -select remarks from information_schema.indexes where index_name = 'IDX_ID'; -> id_index; -@reconnect; -select remarks from information_schema.constraints where constraint_name = 'CONST1'; -> unique id; -select remarks from information_schema.indexes where index_name = 'IDX_ID'; -> id_index; -drop table test; -@reconnect; - -create user sales password '1'; -comment on user sales is 'mr. money'; -select remarks from information_schema.users where name = 'SALES'; -> mr. money; -@reconnect; -select remarks from information_schema.users where name = 'SALES'; -> mr. money; -alter user sales rename to SALES_USER; -select remarks from information_schema.users where name = 'SALES_USER'; -> mr. money; -@reconnect; -select remarks from information_schema.users where name = 'SALES_USER'; -> mr. money; - -create table test(id int); -create linked table test_link('org.h2.Driver', 'jdbc:h2:mem:', 'sa', 'sa', 'DUAL'); -comment on table test_link is '123'; -select remarks from information_schema.tables where table_name = 'TEST_LINK'; -> 123; -@reconnect; -select remarks from information_schema.tables where table_name = 'TEST_LINK'; -> 123; -comment on table test_link is 'xyz'; -select remarks from information_schema.tables where table_name = 'TEST_LINK'; -> xyz; -alter table test_link rename to test_l; -select remarks from information_schema.tables where table_name = 'TEST_L'; -> xyz; -@reconnect; -select remarks from information_schema.tables where table_name = 'TEST_L'; -> xyz; -drop table test; -@reconnect; - -create table test(id int); -create view test_v as select * from test; -comment on table test_v is 'abc'; -select remarks from information_schema.tables where table_name = 'TEST_V'; -> abc; -@reconnect; -select remarks from information_schema.tables where table_name = 'TEST_V'; -> abc; -alter table test_v rename to TEST_VIEW; -select remarks from information_schema.tables where table_name = 'TEST_VIEW'; -> abc; -@reconnect; -select remarks from information_schema.tables where table_name = 'TEST_VIEW'; -> abc; -drop table test cascade; -@reconnect; - -create table test(a int); -comment on table test is 'hi'; -select remarks from information_schema.tables where table_name = 'TEST'; -> hi; -alter table test add column b int; -select remarks from information_schema.tables where table_name = 'TEST'; -> hi; -alter table test rename to test1; -select remarks from information_schema.tables where table_name = 'TEST1'; -> hi; -@reconnect; -select remarks from information_schema.tables where table_name = 'TEST1'; -> hi; -comment on table test1 is 'ho'; -@reconnect; -select remarks from information_schema.tables where table_name = 'TEST1'; -> ho; -drop table test1; - -create table test(a int, b int); -comment on column test.b is 'test'; -select remarks from information_schema.columns where table_name = 'TEST' and column_name = 'B'; -> test; -@reconnect; -select remarks from information_schema.columns where table_name = 'TEST' and column_name = 'B'; -> test; -alter table test drop column b; -@reconnect; -comment on column test.a is 'ho'; -select remarks from information_schema.columns where table_name = 'TEST' and column_name = 'A'; -> ho; -@reconnect; -select remarks from information_schema.columns where table_name = 'TEST' and column_name = 'A'; -> ho; -drop table test; -@reconnect; - -create table test(a int); -comment on column test.a is 'test'; -alter table test rename to test2; -@reconnect; -select remarks from information_schema.columns where table_name = 'TEST2'; -> test; -@reconnect; -select remarks from information_schema.columns where table_name = 'TEST2'; -> test; -drop table test2; -@reconnect; - -create table test1 (a varchar(10)); -create hash index x1 on test1(a); -insert into test1 values ('abcaaaa'),('abcbbbb'),('abccccc'),('abcdddd'); -insert into test1 values ('abcaaaa'),('abcbbbb'),('abccccc'),('abcdddd'); -insert into test1 values ('abcaaaa'),('abcbbbb'),('abccccc'),('abcdddd'); -insert into test1 values ('abcaaaa'),('abcbbbb'),('abccccc'),('abcdddd'); -select count(*) from test1 where a='abcaaaa'; -> 4; -select count(*) from test1 where a='abcbbbb'; -> 4; -@reconnect; -select count(*) from test1 where a='abccccc'; -> 4; -select count(*) from test1 where a='abcdddd'; -> 4; -update test1 set a='abccccc' where a='abcdddd'; -select count(*) from test1 where a='abccccc'; -> 8; -select count(*) from test1 where a='abcdddd'; -> 0; -delete from test1 where a='abccccc'; -select count(*) from test1 where a='abccccc'; -> 0; -truncate table test1; -insert into test1 values ('abcaaaa'); -insert into test1 values ('abcaaaa'); -delete from test1; -drop table test1; -@reconnect; - -drop table if exists test; -create table if not exists test(col1 int primary key); -insert into test values(1); -insert into test values(2); -insert into test values(3); -select count(*) from test; -> 3; -select max(col1) from test; -> 3; -update test set col1 = col1 + 1 order by col1 asc limit 100; -select count(*) from test; -> 3; -select max(col1) from test; -> 4; -drop table if exists test; - diff --git a/h2/src/test/org/h2/test/todo/TestDiskSpaceLeak.java b/h2/src/test/org/h2/test/todo/TestDiskSpaceLeak.java index 76812c6f46..51aff905d3 100644 --- a/h2/src/test/org/h2/test/todo/TestDiskSpaceLeak.java +++ b/h2/src/test/org/h2/test/todo/TestDiskSpaceLeak.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.todo; @@ -10,7 +10,7 @@ import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; -import org.h2.jdbc.JdbcConnection; +import org.h2.test.TestBase; import org.h2.tools.DeleteDbFiles; import org.h2.tools.Recover; import org.h2.util.JdbcUtils; @@ -49,8 +49,8 @@ public static void main(String... args) throws Exception { Recover.execute("data", "test"); new File("data/test.h2.sql").renameTo(new File("data/test." + i + ".sql")); conn = DriverManager.getConnection("jdbc:h2:data/test"); - // ((JdbcConnection) conn).setPowerOffCount(i); - ((JdbcConnection) conn).setPowerOffCount(28); + // TestBase.setPowerOffCount(conn, i); + TestBase.setPowerOffCount(conn, 28); String last = "connect"; try { conn.createStatement().execute("drop table test if exists"); diff --git a/h2/src/test/org/h2/test/todo/TestDropTableLarge.java b/h2/src/test/org/h2/test/todo/TestDropTableLarge.java index c2ebcfd595..3a050642a1 100644 --- a/h2/src/test/org/h2/test/todo/TestDropTableLarge.java +++ b/h2/src/test/org/h2/test/todo/TestDropTableLarge.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.todo; diff --git a/h2/src/test/org/h2/test/todo/TestLinkedTableFullCondition.java b/h2/src/test/org/h2/test/todo/TestLinkedTableFullCondition.java index 34ecb16847..9770cf6e23 100644 --- a/h2/src/test/org/h2/test/todo/TestLinkedTableFullCondition.java +++ b/h2/src/test/org/h2/test/todo/TestLinkedTableFullCondition.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.todo; diff --git a/h2/src/test/org/h2/test/todo/TestTempTableCrash.java b/h2/src/test/org/h2/test/todo/TestTempTableCrash.java index 1d065a499b..8a4e452975 100644 --- a/h2/src/test/org/h2/test/todo/TestTempTableCrash.java +++ b/h2/src/test/org/h2/test/todo/TestTempTableCrash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.todo; @@ -9,7 +9,8 @@ import java.sql.DriverManager; import java.sql.Statement; import java.util.Random; -import org.h2.store.fs.FilePathRec; +import java.util.concurrent.TimeUnit; +import org.h2.store.fs.rec.FilePathRec; import org.h2.test.unit.TestReopen; import org.h2.tools.DeleteDbFiles; @@ -32,7 +33,6 @@ private static void test() throws Exception { Statement stat; System.setProperty("h2.delayWrongPasswordMin", "0"); - System.setProperty("h2.check2", "false"); FilePathRec.register(); System.setProperty("reopenShift", "4"); TestReopen reopen = new TestReopen(); @@ -48,10 +48,10 @@ private static void test() throws Exception { stat = conn.createStatement(); Random random = new Random(1); - long start = System.currentTimeMillis(); + long start = System.nanoTime(); for (int i = 0; i < 10000; i++) { - long now = System.currentTimeMillis(); - if (now > start + 1000) { + long now = System.nanoTime(); + if (now > start + TimeUnit.SECONDS.toNanos(1)) { System.out.println("i: " + i); start = now; } diff --git a/h2/src/test/org/h2/test/todo/TestUndoLogLarge.java b/h2/src/test/org/h2/test/todo/TestUndoLogLarge.java index 490ca88511..41a463ffb9 100644 --- a/h2/src/test/org/h2/test/todo/TestUndoLogLarge.java +++ b/h2/src/test/org/h2/test/todo/TestUndoLogLarge.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.todo; @@ -10,6 +10,8 @@ import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Statement; +import java.util.concurrent.TimeUnit; + import org.h2.tools.DeleteDbFiles; /** @@ -37,13 +39,13 @@ private static void test() throws SQLException { conn.setAutoCommit(false); PreparedStatement prep = conn.prepareStatement( "insert into test(name) values(space(1024*1024))"); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); for (int i = 0; i < 2500; i++) { prep.execute(); - long now = System.currentTimeMillis(); - if (now > time + 5000) { + long now = System.nanoTime(); + if (now > time + TimeUnit.SECONDS.toNanos(5)) { System.out.println(i); - time = now + 5000; + time = now + TimeUnit.SECONDS.toNanos(5); } } conn.rollback(); diff --git a/h2/src/test/org/h2/test/todo/TestUndoLogMemory.java b/h2/src/test/org/h2/test/todo/TestUndoLogMemory.java deleted file mode 100644 index 19a5478c89..0000000000 --- a/h2/src/test/org/h2/test/todo/TestUndoLogMemory.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.todo; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Statement; -import org.h2.tools.DeleteDbFiles; - -/** - * A test to reproduce out of memory using a large operation. - */ -public class TestUndoLogMemory { - - /** - * Run just this test. - * - * @param args ignored - */ - public static void main(String... args) throws Exception { - TestUndoLogMemory.test(10, "null"); - TestUndoLogMemory.test(100, "space(100000)"); - // new TestUndoLogMemory().test(100000, "null"); - // new TestUndoLogMemory().test(1000, "space(100000)"); - } - - private static void test(int count, String defaultValue) throws SQLException { - - // -Xmx1m -XX:+HeapDumpOnOutOfMemoryError - DeleteDbFiles.execute("data", "test", true); - Connection conn = DriverManager.getConnection( - "jdbc:h2:data/test;large_transactions=true"); - Statement stat = conn.createStatement(); - stat.execute("set cache_size 32"); - stat.execute("SET max_operation_memory 100"); - stat.execute("SET max_memory_undo 100"); - conn.setAutoCommit(false); - - // also a problem: tables without unique index - System.out.println("create--- " + count + " " + defaultValue); - stat.execute("create table test(id int, name varchar default " + - defaultValue + " )"); - System.out.println("insert---"); - stat.execute("insert into test(id) select x from system_range(1, " + - count + ")"); - System.out.println("rollback---"); - conn.rollback(); - - System.out.println("drop---"); - stat.execute("drop table test"); - System.out.println("create---"); - stat.execute("create table test" + - "(id int primary key, name varchar default " + - defaultValue + " )"); - - // INSERT problem - System.out.println("insert---"); - stat.execute( - "insert into test(id) select x from system_range(1, "+count+")"); - System.out.println("delete---"); - stat.execute("delete from test"); - - // DELETE problem - System.out.println("insert---"); - PreparedStatement prep = conn.prepareStatement( - "insert into test(id) values(?)"); - for (int i = 0; i < count; i++) { - prep.setInt(1, i); - prep.execute(); - } - System.out.println("delete---"); - stat.execute("delete from test"); - - System.out.println("close---"); - conn.close(); - } - -} diff --git a/h2/src/test/org/h2/test/todo/package.html b/h2/src/test/org/h2/test/todo/package.html index 0aeeede752..a99d84ed75 100644 --- a/h2/src/test/org/h2/test/todo/package.html +++ b/h2/src/test/org/h2/test/todo/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/todo/supportTemplates.txt b/h2/src/test/org/h2/test/todo/supportTemplates.txt index 9d9a4f52b8..f79ebc4ed3 100644 --- a/h2/src/test/org/h2/test/todo/supportTemplates.txt +++ b/h2/src/test/org/h2/test/todo/supportTemplates.txt @@ -1,4 +1,34 @@ +Old issue tracking +Please send a question to the H2 Google Group or StackOverflow first, +and only then, once you are completely sure it is an issue, submit it here. +The reason is that only very few people actively monitor the issue tracker. + +Before submitting a bug, please also check the FAQ: +https://h2database.com/html/faq.html + +What steps will reproduce the problem? +(simple SQL scripts or simple standalone applications are preferred) +1. +2. +3. + +What is the expected output? What do you see instead? + + +What version of the product are you using? On what operating system, file +system, and virtual machine? + + +Do you know a workaround? + +What is your use case, meaning why do you need this feature? + +How important/urgent is the problem for you? + +Please provide any additional information below. + +------------------ Benchmark: Hi, diff --git a/h2/src/test/org/h2/test/todo/tools.sql b/h2/src/test/org/h2/test/todo/tools.sql index 8b4fde9fce..bd61c7a5d0 100644 --- a/h2/src/test/org/h2/test/todo/tools.sql +++ b/h2/src/test/org/h2/test/todo/tools.sql @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ diff --git a/h2/src/test/org/h2/test/trace/Arg.java b/h2/src/test/org/h2/test/trace/Arg.java index ef4f931f77..55038562d5 100644 --- a/h2/src/test/org/h2/test/trace/Arg.java +++ b/h2/src/test/org/h2/test/trace/Arg.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). */ /* * Licensed to the Apache Software Foundation (ASF) under one or more diff --git a/h2/src/test/org/h2/test/trace/Parser.java b/h2/src/test/org/h2/test/trace/Parser.java index eca84aa3f6..86e995ef58 100644 --- a/h2/src/test/org/h2/test/trace/Parser.java +++ b/h2/src/test/org/h2/test/trace/Parser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -23,7 +23,6 @@ import java.math.BigDecimal; import java.util.ArrayList; -import org.h2.util.New; import org.h2.util.StringUtils; /** @@ -189,7 +188,7 @@ private Arg parseValue() { number.indexOf('.') >= 0) { Double v = Double.parseDouble(number); return new Arg(double.class, v); - } else if (number.endsWith("L") || number.endsWith("l")) { + } else if (number.endsWith("l")) { Long v = Long.parseLong(number.substring(0, number.length() - 1)); return new Arg(long.class, v); } else { @@ -209,13 +208,12 @@ private Arg parseValue() { read("["); read("]"); read("{"); - ArrayList values = New.arrayList(); + ArrayList values = new ArrayList<>(); do { values.add(parseValue().getValue()); } while (readIf(",")); read("}"); - String[] list = new String[values.size()]; - values.toArray(list); + String[] list = values.toArray(new String[0]); return new Arg(String[].class, list); } else if (readIf("BigDecimal")) { read("("); @@ -251,7 +249,7 @@ private Arg parseValue() { private void parseCall(String objectName, Object o, String methodName) { stat.setMethodCall(objectName, o, methodName); - ArrayList args = New.arrayList(); + ArrayList args = new ArrayList<>(); read("("); while (true) { if (readIf(")")) { diff --git a/h2/src/test/org/h2/test/trace/Player.java b/h2/src/test/org/h2/test/trace/Player.java index abf0de99f7..cf0a750200 100644 --- a/h2/src/test/org/h2/test/trace/Player.java +++ b/h2/src/test/org/h2/test/trace/Player.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -26,7 +26,6 @@ import java.io.LineNumberReader; import java.util.HashMap; import org.h2.store.fs.FileUtils; -import org.h2.util.New; /** * This tool can re-run Java style log files. There is no size limit. @@ -51,7 +50,7 @@ public class Player { private static final String[] IMPORTED_PACKAGES = { "", "java.lang.", "java.sql.", "javax.sql." }; private boolean trace; - private final HashMap objects = New.hashMap(); + private final HashMap objects = new HashMap<>(); /** * Execute a trace file using the command line. The log file name to execute diff --git a/h2/src/test/org/h2/test/trace/Statement.java b/h2/src/test/org/h2/test/trace/Statement.java index 8253c491e3..6fcca9d58e 100644 --- a/h2/src/test/org/h2/test/trace/Statement.java +++ b/h2/src/test/org/h2/test/trace/Statement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -80,9 +80,7 @@ Object execute() throws Exception { player.assign(assignVariable, obj); } return obj; - } catch (IllegalArgumentException e) { - e.printStackTrace(); - } catch (IllegalAccessException e) { + } catch (IllegalArgumentException | IllegalAccessException e) { e.printStackTrace(); } catch (InvocationTargetException e) { Throwable t = e.getTargetException(); @@ -162,7 +160,6 @@ void setMethodCall(String variableName, Object object, String methodName) { } public void setArgs(ArrayList list) { - args = new Arg[list.size()]; - list.toArray(args); + args = list.toArray(new Arg[0]); } } diff --git a/h2/src/test/org/h2/test/trace/package.html b/h2/src/test/org/h2/test/trace/package.html index 818df6e68b..5b4b294356 100644 --- a/h2/src/test/org/h2/test/trace/package.html +++ b/h2/src/test/org/h2/test/trace/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/unit/TestAnsCompression.java b/h2/src/test/org/h2/test/unit/TestAnsCompression.java new file mode 100644 index 0000000000..32daf07048 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestAnsCompression.java @@ -0,0 +1,110 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.Random; + +import org.h2.dev.util.AnsCompression; +import org.h2.dev.util.BinaryArithmeticStream; +import org.h2.dev.util.BitStream; +import org.h2.test.TestBase; + +/** + * Tests the ANS (Asymmetric Numeral Systems) compression tool. + */ +public class TestAnsCompression extends TestBase { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testScaleFrequencies(); + testRandomized(); + testCompressionRate(); + } + + private void testCompressionRate() throws IOException { + byte[] data = new byte[1024 * 1024]; + Random r = new Random(1); + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (r.nextInt(4) * r.nextInt(4)); + } + int[] freq = new int[256]; + AnsCompression.countFrequencies(freq, data); + int lenAns = AnsCompression.encode(freq, data).length; + BitStream.Huffman huff = new BitStream.Huffman(freq); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + BitStream.Out o = new BitStream.Out(out); + for (byte x : data) { + huff.write(o, x & 255); + } + o.flush(); + int lenHuff = out.toByteArray().length; + BinaryArithmeticStream.Huffman aHuff = new BinaryArithmeticStream.Huffman( + freq); + out = new ByteArrayOutputStream(); + BinaryArithmeticStream.Out o2 = new BinaryArithmeticStream.Out(out); + for (byte x : data) { + aHuff.write(o2, x & 255); + } + o2.flush(); + int lenArithmetic = out.toByteArray().length; + + assertTrue(lenAns < lenArithmetic); + assertTrue(lenArithmetic < lenHuff); + assertTrue(lenHuff < data.length); + } + + private void testScaleFrequencies() { + Random r = new Random(1); + for (int j = 0; j < 100; j++) { + int symbolCount = r.nextInt(200) + 1; + int[] freq = new int[symbolCount]; + for (int total = symbolCount * 2; total < 10000; total *= 2) { + for (int i = 0; i < freq.length; i++) { + freq[i] = r.nextInt(1000) + 1; + } + AnsCompression.scaleFrequencies(freq, total); + } + } + int[] freq = new int[]{0, 1, 1, 1000}; + AnsCompression.scaleFrequencies(freq, 100); + assertEquals("[0, 1, 1, 98]", Arrays.toString(freq)); + } + + private void testRandomized() { + Random r = new Random(1); + int symbolCount = r.nextInt(200) + 1; + int[] freq = new int[symbolCount]; + for (int i = 0; i < freq.length; i++) { + freq[i] = r.nextInt(1000) + 1; + } + int seed = r.nextInt(); + r.setSeed(seed); + int len = 10000; + byte[] data = new byte[len]; + r.nextBytes(data); + freq = new int[256]; + AnsCompression.countFrequencies(freq, data); + byte[] encoded = AnsCompression.encode(freq, data); + byte[] decoded = AnsCompression.decode(freq, encoded, data.length); + for (int i = 0; i < len; i++) { + int expected = data[i]; + assertEquals(expected, decoded[i]); + } + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestAutoReconnect.java b/h2/src/test/org/h2/test/unit/TestAutoReconnect.java index 3ef6195668..e275d3ed1e 100644 --- a/h2/src/test/org/h2/test/unit/TestAutoReconnect.java +++ b/h2/src/test/org/h2/test/unit/TestAutoReconnect.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -13,12 +13,13 @@ import org.h2.api.DatabaseEventListener; import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.Server; /** * Tests automatic embedded/server mode. */ -public class TestAutoReconnect extends TestBase { +public class TestAutoReconnect extends TestDb { private String url; private boolean autoServer; @@ -32,10 +33,10 @@ public class TestAutoReconnect extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } - private void restart() throws SQLException { + private void restart() throws SQLException, InterruptedException { if (autoServer) { if (connServer != null) { connServer.createStatement().execute("SHUTDOWN"); @@ -45,6 +46,7 @@ private void restart() throws SQLException { connServer = getConnection(url); } else { server.stop(); + Thread.sleep(100); // try to prevent "port may be in use" error server.start(); } } @@ -56,28 +58,24 @@ public void test() throws Exception { testReconnect(); autoServer = false; testReconnect(); - deleteDb("autoReconnect"); + deleteDb(getTestName()); } private void testWrongUrl() throws Exception { - deleteDb("autoReconnect"); + deleteDb(getTestName()); Server tcp = Server.createTcpServer().start(); try { - conn = getConnection("jdbc:h2:" + getBaseDir() + - "/autoReconnect;AUTO_SERVER=TRUE"); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this). - getConnection("jdbc:h2:" + getBaseDir() + - "/autoReconnect;OPEN_NEW=TRUE"); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this). - getConnection("jdbc:h2:" + getBaseDir() + - "/autoReconnect;OPEN_NEW=TRUE"); + conn = getConnection("jdbc:h2:" + getBaseDir() + '/' + getTestName() + ";AUTO_SERVER=TRUE"); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, + () -> getConnection("jdbc:h2:" + getBaseDir() + '/' + getTestName() + ";OPEN_NEW=TRUE")); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, + () -> getConnection("jdbc:h2:" + getBaseDir() + '/' + getTestName() + ";OPEN_NEW=TRUE")); conn.close(); - conn = getConnection("jdbc:h2:tcp://localhost/" + getBaseDir() + - "/autoReconnect"); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this). - getConnection("jdbc:h2:" + getBaseDir() + - "/autoReconnect;AUTO_SERVER=TRUE;OPEN_NEW=TRUE"); + conn = getConnection("jdbc:h2:tcp://localhost:" + tcp.getPort() + '/' + getBaseDir() + '/' // + + getTestName()); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, () -> getConnection( + "jdbc:h2:" + getBaseDir() + '/' + getTestName() + ";AUTO_SERVER=TRUE;OPEN_NEW=TRUE")); conn.close(); } finally { tcp.stop(); @@ -85,15 +83,16 @@ private void testWrongUrl() throws Exception { } private void testReconnect() throws Exception { - deleteDb("autoReconnect"); + deleteDb(getTestName()); if (autoServer) { - url = "jdbc:h2:" + getBaseDir() + "/autoReconnect;" + + url = "jdbc:h2:" + getBaseDir() + "/" + getTestName() + ";" + "FILE_LOCK=SOCKET;" + "AUTO_SERVER=TRUE;OPEN_NEW=TRUE"; restart(); } else { - server = Server.createTcpServer("-tcpPort", "8181").start(); - url = "jdbc:h2:tcp://localhost:8181/" + getBaseDir() + "/autoReconnect;" + + server = Server.createTcpServer("-ifNotExists").start(); + int port = server.getPort(); + url = "jdbc:h2:tcp://localhost:" + port + "/" + getBaseDir() + "/" + getTestName() + ";" + "FILE_LOCK=SOCKET;AUTO_RECONNECT=TRUE"; } @@ -111,7 +110,7 @@ private void testReconnect() throws Exception { stat.execute("create table test(id identity, name varchar)"); restart(); PreparedStatement prep = conn.prepareStatement( - "insert into test values(null, ?)"); + "insert into test(name) values(?)"); restart(); prep.setString(1, "Hello"); restart(); @@ -163,6 +162,7 @@ private void testReconnect() throws Exception { if (i < 10) { throw e; } + break; } } restart(); @@ -184,32 +184,6 @@ private void testReconnect() throws Exception { /** * A database event listener used in this test. */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { - - @Override - public void closingDatabase() { - // ignore - } - - @Override - public void exceptionThrown(SQLException e, String sql) { - // ignore - } - - @Override - public void init(String u) { - // ignore - } - - @Override - public void opened() { - // ignore - } - - @Override - public void setProgress(int state, String name, int x, int max) { - // ignore - } + public static final class MyDatabaseEventListener implements DatabaseEventListener { } } diff --git a/h2/src/test/org/h2/test/unit/TestBinaryArithmeticStream.java b/h2/src/test/org/h2/test/unit/TestBinaryArithmeticStream.java new file mode 100644 index 0000000000..173691dd1d --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestBinaryArithmeticStream.java @@ -0,0 +1,172 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Random; + +import org.h2.dev.util.BinaryArithmeticStream; +import org.h2.dev.util.BinaryArithmeticStream.Huffman; +import org.h2.dev.util.BinaryArithmeticStream.In; +import org.h2.dev.util.BinaryArithmeticStream.Out; +import org.h2.dev.util.BitStream; +import org.h2.test.TestBase; + +/** + * Test the binary arithmetic stream utility. + */ +public class TestBinaryArithmeticStream extends TestBase { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testCompareWithHuffman(); + testHuffmanRandomized(); + testCompressionRatio(); + testRandomized(); + testPerformance(); + } + + private void testCompareWithHuffman() throws IOException { + Random r = new Random(1); + for (int test = 0; test < 10; test++) { + int[] freq = new int[4]; + for (int i = 0; i < freq.length; i++) { + freq[i] = 0 + r.nextInt(1000); + } + BinaryArithmeticStream.Huffman ah = new BinaryArithmeticStream.Huffman( + freq); + BitStream.Huffman hh = new BitStream.Huffman(freq); + ByteArrayOutputStream hbOut = new ByteArrayOutputStream(); + ByteArrayOutputStream abOut = new ByteArrayOutputStream(); + BitStream.Out bOut = new BitStream.Out(hbOut); + BinaryArithmeticStream.Out aOut = new BinaryArithmeticStream.Out(abOut); + for (int i = 0; i < freq.length; i++) { + for (int j = 0; j < freq[i]; j++) { + int x = i; + hh.write(bOut, x); + ah.write(aOut, x); + } + } + assertTrue(hbOut.toByteArray().length >= abOut.toByteArray().length); + } + } + + private void testHuffmanRandomized() throws IOException { + Random r = new Random(1); + int[] freq = new int[r.nextInt(200) + 1]; + for (int i = 0; i < freq.length; i++) { + freq[i] = r.nextInt(1000) + 1; + } + int seed = r.nextInt(); + r.setSeed(seed); + Huffman huff = new Huffman(freq); + ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); + Out out = new Out(byteOut); + for (int i = 0; i < 10000; i++) { + huff.write(out, r.nextInt(freq.length)); + } + out.flush(); + In in = new In(new ByteArrayInputStream(byteOut.toByteArray())); + r.setSeed(seed); + for (int i = 0; i < 10000; i++) { + int expected = r.nextInt(freq.length); + int got = huff.read(in); + assertEquals(expected, got); + } + } + + private void testPerformance() throws IOException { + Random r = new Random(); + // long time = System.nanoTime(); + // Profiler prof = new Profiler().startCollecting(); + for (int seed = 0; seed < 10000; seed++) { + r.setSeed(seed); + ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); + Out out = new Out(byteOut); + int len = 100; + for (int i = 0; i < len; i++) { + boolean v = r.nextBoolean(); + int prob = r.nextInt(BinaryArithmeticStream.MAX_PROBABILITY); + out.writeBit(v, prob); + } + out.flush(); + r.setSeed(seed); + ByteArrayInputStream byteIn = new ByteArrayInputStream( + byteOut.toByteArray()); + In in = new In(byteIn); + for (int i = 0; i < len; i++) { + boolean expected = r.nextBoolean(); + int prob = r.nextInt(BinaryArithmeticStream.MAX_PROBABILITY); + assertEquals(expected, in.readBit(prob)); + } + } + // time = System.nanoTime() - time; + // System.out.println("time: " + TimeUnit.NANOSECONDS.toMillis(time)); + // System.out.println(prof.getTop(5)); + } + + private void testCompressionRatio() throws IOException { + ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); + Out out = new Out(byteOut); + int prob = 1000; + int len = 1024; + for (int i = 0; i < len; i++) { + out.writeBit(true, prob); + } + out.flush(); + ByteArrayInputStream byteIn = new ByteArrayInputStream( + byteOut.toByteArray()); + In in = new In(byteIn); + for (int i = 0; i < len; i++) { + assertTrue(in.readBit(prob)); + } + // System.out.println(len / 8 + " comp: " + + // byteOut.toByteArray().length); + } + + private void testRandomized() throws IOException { + for (int i = 0; i < 10000; i = (int) ((i + 10) * 1.1)) { + testRandomized(i); + } + } + + private void testRandomized(int len) throws IOException { + Random r = new Random(); + int seed = r.nextInt(); + r.setSeed(seed); + ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); + Out out = new Out(byteOut); + for (int i = 0; i < len; i++) { + int prob = r.nextInt(BinaryArithmeticStream.MAX_PROBABILITY); + out.writeBit(r.nextBoolean(), prob); + } + out.flush(); + byteOut.write(r.nextInt(255)); + ByteArrayInputStream byteIn = new ByteArrayInputStream( + byteOut.toByteArray()); + In in = new In(byteIn); + r.setSeed(seed); + for (int i = 0; i < len; i++) { + int prob = r.nextInt(BinaryArithmeticStream.MAX_PROBABILITY); + boolean expected = r.nextBoolean(); + boolean got = in.readBit(prob); + assertEquals(expected, got); + } + assertEquals(r.nextInt(255), byteIn.read()); + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestBinaryOperation.java b/h2/src/test/org/h2/test/unit/TestBinaryOperation.java new file mode 100644 index 0000000000..606d728d44 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestBinaryOperation.java @@ -0,0 +1,109 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import org.h2.engine.SessionLocal; +import org.h2.expression.BinaryOperation; +import org.h2.expression.ExpressionVisitor; +import org.h2.expression.Operation0; +import org.h2.message.DbException; +import org.h2.test.TestBase; +import org.h2.value.TypeInfo; +import org.h2.value.Value; + +/** + * Test the binary operation. + */ +public class TestBinaryOperation extends TestBase { + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testPlusMinus(BinaryOperation.OpType.PLUS); + testPlusMinus(BinaryOperation.OpType.MINUS); + testMultiply(); + testDivide(); + } + + private void testPlusMinus(BinaryOperation.OpType type) { + assertPrecisionScale(2, 0, 2, type, 1, 0, 1, 0); + assertPrecisionScale(3, 1, 2, type, 1, 1, 1, 0); + assertPrecisionScale(3, 1, 2, type, 1, 0, 1, 1); + } + + private void testMultiply() { + assertPrecisionScale(2, 0, 2, BinaryOperation.OpType.MULTIPLY, 1, 0, 1, 0); + assertPrecisionScale(2, 1, 2, BinaryOperation.OpType.MULTIPLY, 1, 1, 1, 0); + assertPrecisionScale(2, 1, 2, BinaryOperation.OpType.MULTIPLY, 1, 0, 1, 1); + } + + private void testDivide() { + assertPrecisionScale(3, 2, 2, BinaryOperation.OpType.DIVIDE, 1, 0, 1, 0); + assertPrecisionScale(3, 3, 2, BinaryOperation.OpType.DIVIDE, 1, 1, 1, 0); + assertPrecisionScale(3, 1, 2, BinaryOperation.OpType.DIVIDE, 1, 0, 1, 1); + assertPrecisionScale(25, 0, 10, BinaryOperation.OpType.DIVIDE, 1, 3, 9, 27); + } + + private void assertPrecisionScale(int expectedPrecision, int expectedScale, int expectedDecfloatPrecision, + BinaryOperation.OpType type, int precision1, int scale1, int precision2, int scale2) { + TestExpression left = new TestExpression(TypeInfo.getTypeInfo(Value.NUMERIC, precision1, scale1, null)); + TestExpression right = new TestExpression(TypeInfo.getTypeInfo(Value.NUMERIC, precision2, scale2, null)); + TypeInfo typeInfo = new BinaryOperation(type, left, right).optimize(null).getType(); + assertEquals(Value.NUMERIC, typeInfo.getValueType()); + assertEquals(expectedPrecision, typeInfo.getPrecision()); + assertEquals(expectedScale, typeInfo.getScale()); + left = new TestExpression(TypeInfo.getTypeInfo(Value.DECFLOAT, precision1, 0, null)); + right = new TestExpression(TypeInfo.getTypeInfo(Value.DECFLOAT, precision2, 0, null)); + typeInfo = new BinaryOperation(type, left, right).optimize(null).getType(); + assertEquals(Value.DECFLOAT, typeInfo.getValueType()); + assertEquals(expectedDecfloatPrecision, typeInfo.getPrecision()); + } + + private static final class TestExpression extends Operation0 { + + private final TypeInfo type; + + TestExpression(TypeInfo type) { + this.type = type; + } + + @Override + public Value getValue(SessionLocal session) { + throw DbException.getUnsupportedException(""); + } + + @Override + public TypeInfo getType() { + return type; + } + + @Override + public StringBuilder getUnenclosedSQL(StringBuilder builder, int sqlFlags) { + throw DbException.getUnsupportedException(""); + } + + @Override + public boolean isEverything(ExpressionVisitor visitor) { + return false; + } + + @Override + public int getCost() { + return 0; + } + + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestBitField.java b/h2/src/test/org/h2/test/unit/TestBitField.java deleted file mode 100644 index 18f42f8bf0..0000000000 --- a/h2/src/test/org/h2/test/unit/TestBitField.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.util.BitSet; -import java.util.Random; -import org.h2.test.TestBase; -import org.h2.util.BitField; - -/** - * A unit test for bit fields. - */ -public class TestBitField extends TestBase { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() { - testNextClearBit(); - testByteOperations(); - testRandom(); - testGetSet(); - testRandomSetRange(); - } - - private void testNextClearBit() { - BitSet set = new BitSet(); - BitField field = new BitField(); - set.set(0, 640); - field.set(0, 640, true); - assertEquals(set.nextClearBit(0), field.nextClearBit(0)); - - Random random = new Random(1); - field = new BitField(); - field.set(0, 500, true); - for (int i = 0; i < 100000; i++) { - int a = random.nextInt(120); - int b = a + 1 + random.nextInt(200); - field.clear(a); - field.clear(b); - assertEquals(b, field.nextClearBit(a + 1)); - field.set(a); - field.set(b); - } - } - - private void testByteOperations() { - BitField used = new BitField(); - testSetFast(used, false); - testSetFast(used, true); - } - - private void testSetFast(BitField used, boolean init) { - int len = 10000; - Random random = new Random(1); - for (int i = 0, x = 0; i < len / 8; i++) { - int mask = random.nextInt() & 255; - if (init) { - assertEquals(mask, used.getByte(x)); - x += 8; - // for (int j = 0; j < 8; j++, x++) { - // if (used.get(x) != ((mask & (1 << j)) != 0)) { - // throw Message.getInternalError( - // "Redo failure, block: " + x + - // " expected in-use bit: " + used.get(x)); - // } - // } - } else { - used.setByte(x, mask); - x += 8; - // for (int j = 0; j < 8; j++, x++) { - // if ((mask & (1 << j)) != 0) { - // used.set(x); - // } - // } - } - } - } - - private void testRandom() { - BitField bits = new BitField(); - BitSet set = new BitSet(); - int max = 300; - int count = 100000; - Random random = new Random(1); - for (int i = 0; i < count; i++) { - int idx = random.nextInt(max); - if (random.nextBoolean()) { - if (random.nextBoolean()) { - bits.set(idx); - set.set(idx); - } else { - bits.clear(idx); - set.clear(idx); - } - } else { - assertEquals(set.get(idx), bits.get(idx)); - assertEquals(set.nextClearBit(idx), bits.nextClearBit(idx)); - assertEquals(set.length(), bits.length()); - } - } - } - - private void testGetSet() { - BitField bits = new BitField(); - for (int i = 0; i < 10000; i++) { - bits.set(i); - if (!bits.get(i)) { - fail("not set: " + i); - } - if (bits.get(i + 1)) { - fail("set: " + i); - } - } - for (int i = 0; i < 10000; i++) { - if (!bits.get(i)) { - fail("not set: " + i); - } - } - for (int i = 0; i < 1000; i++) { - int k = bits.nextClearBit(0); - if (k != 10000) { - fail("" + k); - } - } - } - - private void testRandomSetRange() { - BitField bits = new BitField(); - BitSet set = new BitSet(); - Random random = new Random(1); - int maxOffset = 500; - int maxLen = 500; - int total = maxOffset + maxLen; - int count = 10000; - for (int i = 0; i < count; i++) { - int offset = random.nextInt(maxOffset); - int len = random.nextInt(maxLen); - boolean val = random.nextBoolean(); - set.set(offset, offset + len, val); - bits.set(offset, offset + len, val); - for (int j = 0; j < total; j++) { - assertEquals(set.get(j), bits.get(j)); - } - } - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestBitStream.java b/h2/src/test/org/h2/test/unit/TestBitStream.java new file mode 100644 index 0000000000..dd53cc55bc --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestBitStream.java @@ -0,0 +1,160 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.util.Random; + +import org.h2.dev.util.BitStream; +import org.h2.dev.util.BitStream.In; +import org.h2.dev.util.BitStream.Out; +import org.h2.test.TestBase; + +/** + * Test the bit stream (Golomb code and Huffman code) utility. + */ +public class TestBitStream extends TestBase { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testHuffmanRandomized(); + testHuffman(); + testBitStream(); + testGolomb("11110010", 10, 42); + testGolomb("00", 3, 0); + testGolomb("010", 3, 1); + testGolomb("011", 3, 2); + testGolomb("100", 3, 3); + testGolomb("1010", 3, 4); + testGolombRandomized(); + } + + private void testHuffmanRandomized() { + Random r = new Random(1); + int[] freq = new int[r.nextInt(200) + 1]; + for (int i = 0; i < freq.length; i++) { + freq[i] = r.nextInt(1000) + 1; + } + int seed = r.nextInt(); + r.setSeed(seed); + BitStream.Huffman huff = new BitStream.Huffman(freq); + ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); + BitStream.Out out = new BitStream.Out(byteOut); + for (int i = 0; i < 10000; i++) { + huff.write(out, r.nextInt(freq.length)); + } + out.close(); + BitStream.In in = new BitStream.In(new ByteArrayInputStream(byteOut.toByteArray())); + r.setSeed(seed); + for (int i = 0; i < 10000; i++) { + int expected = r.nextInt(freq.length); + assertEquals(expected, huff.read(in)); + } + } + + private void testHuffman() { + int[] freq = { 36, 18, 12, 9, 7, 6, 5, 4 }; + BitStream.Huffman huff = new BitStream.Huffman(freq); + final StringBuilder buff = new StringBuilder(); + Out o = new Out(null) { + @Override + public void writeBit(int bit) { + buff.append(bit == 0 ? '0' : '1'); + } + }; + for (int i = 0; i < freq.length; i++) { + buff.append(i + ": "); + huff.write(o, i); + buff.append("\n"); + } + assertEquals( + "0: 0\n" + + "1: 110\n" + + "2: 100\n" + + "3: 1110\n" + + "4: 1011\n" + + "5: 1010\n" + + "6: 11111\n" + + "7: 11110\n", buff.toString()); + } + + private void testGolomb(String expected, int div, int value) { + final StringBuilder buff = new StringBuilder(); + Out o = new Out(null) { + @Override + public void writeBit(int bit) { + buff.append(bit == 0 ? '0' : '1'); + } + }; + o.writeGolomb(div, value); + int size = Out.getGolombSize(div, value); + String got = buff.toString(); + assertEquals(size, got.length()); + assertEquals(expected, got); + } + + private void testGolombRandomized() { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Out bitOut = new Out(out); + Random r = new Random(1); + int len = 1000; + for (int i = 0; i < len; i++) { + int div = r.nextInt(100) + 1; + int value = r.nextInt(1000000); + bitOut.writeGolomb(div, value); + } + bitOut.flush(); + bitOut.close(); + byte[] data = out.toByteArray(); + ByteArrayInputStream in = new ByteArrayInputStream(data); + In bitIn = new In(in); + r.setSeed(1); + for (int i = 0; i < len; i++) { + int div = r.nextInt(100) + 1; + int value = r.nextInt(1000000); + int v = bitIn.readGolomb(div); + assertEquals("i=" + i + " div=" + div, value, v); + } + } + + private void testBitStream() { + Random r = new Random(); + for (int test = 0; test < 10000; test++) { + ByteArrayOutputStream buff = new ByteArrayOutputStream(); + int len = r.nextInt(40); + Out out = new Out(buff); + long seed = r.nextLong(); + Random r2 = new Random(seed); + for (int i = 0; i < len; i++) { + out.writeBit(r2.nextBoolean() ? 1 : 0); + } + out.close(); + In in = new In(new ByteArrayInputStream( + buff.toByteArray())); + r2 = new Random(seed); + int i = 0; + for (; i < len; i++) { + int expected = r2.nextBoolean() ? 1 : 0; + assertEquals(expected, in.readBit()); + } + for (; i % 8 != 0; i++) { + assertEquals(0, in.readBit()); + } + assertEquals(-1, in.readBit()); + } + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestBnf.java b/h2/src/test/org/h2/test/unit/TestBnf.java index 8779326953..71f9113c64 100644 --- a/h2/src/test/org/h2/test/unit/TestBnf.java +++ b/h2/src/test/org/h2/test/unit/TestBnf.java @@ -1,27 +1,27 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; +import java.sql.Connection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; import org.h2.bnf.Bnf; import org.h2.bnf.context.DbContents; import org.h2.bnf.context.DbContextRule; import org.h2.bnf.context.DbProcedure; import org.h2.bnf.context.DbSchema; import org.h2.test.TestBase; - -import java.sql.Connection; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; +import org.h2.test.TestDb; /** * Test Bnf Sql parser * @author Nicolas Fortin */ -public class TestBnf extends TestBase { +public class TestBnf extends TestDb { /** * Run just this test. @@ -29,31 +29,26 @@ public class TestBnf extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { deleteDb("bnf"); - Connection conn = getConnection("bnf"); - try { + try (Connection conn = getConnection("bnf")) { testModes(conn); testProcedures(conn, false); - } finally { - conn.close(); } - conn = getConnection("bnf;mode=mysql"); - try { + deleteDb("bnf"); + try (Connection conn = getConnection("bnf;mode=mysql;database_to_lower=true")) { testProcedures(conn, true); - } finally { - conn.close(); } } private void testModes(Connection conn) throws Exception { DbContents dbContents; dbContents = new DbContents(); - dbContents.readContents("jdbc:h2:test", conn); + dbContents.readContents("jdbc:h2:./test", conn); assertTrue(dbContents.isH2()); dbContents = new DbContents(); dbContents.readContents("jdbc:derby:test", conn); @@ -93,18 +88,16 @@ private void testProcedures(Connection conn, boolean isMySQLMode) "CREATE TABLE " + "TABLE_WITH_STRING_FIELD (STRING_FIELD VARCHAR(50), INT_FIELD integer)"); DbContents dbContents = new DbContents(); - dbContents.readContents("jdbc:h2:test", conn); + dbContents.readContents("jdbc:h2:./test", conn); assertTrue(dbContents.isH2()); assertFalse(dbContents.isDerby()); assertFalse(dbContents.isFirebird()); assertEquals(null, dbContents.quoteIdentifier(null)); if (isMySQLMode) { - assertTrue(dbContents.isH2ModeMySQL()); - assertEquals("TEST", dbContents.quoteIdentifier("TEST")); - assertEquals("TEST", dbContents.quoteIdentifier("Test")); - assertEquals("TEST", dbContents.quoteIdentifier("test")); + assertEquals("\"TEST\"", dbContents.quoteIdentifier("TEST")); + assertEquals("\"Test\"", dbContents.quoteIdentifier("Test")); + assertEquals("test", dbContents.quoteIdentifier("test")); } else { - assertFalse(dbContents.isH2ModeMySQL()); assertEquals("TEST", dbContents.quoteIdentifier("TEST")); assertEquals("\"Test\"", dbContents.quoteIdentifier("Test")); assertEquals("\"test\"", dbContents.quoteIdentifier("test")); @@ -116,7 +109,7 @@ private void testProcedures(Connection conn, boolean isMySQLMode) assertFalse(dbContents.isSQLite()); DbSchema defaultSchema = dbContents.getDefaultSchema(); DbProcedure[] procedures = defaultSchema.getProcedures(); - Set procedureName = new HashSet(procedures.length); + Set procedureName = new HashSet<>(procedures.length); for (DbProcedure procedure : procedures) { assertTrue(defaultSchema == procedure.getSchema()); procedureName.add(procedure.getName()); @@ -136,13 +129,37 @@ private void testProcedures(Connection conn, boolean isMySQLMode) DbContextRule columnRule = new DbContextRule(dbContents, DbContextRule.COLUMN); bnf.updateTopic("column_name", columnRule); - bnf.updateTopic("expression", new + bnf.updateTopic("user_defined_function_name", new DbContextRule(dbContents, DbContextRule.PROCEDURE)); bnf.linkStatements(); // Test partial - Map tokens = bnf.getNextTokenList("SELECT CUSTOM_PR"); + Map tokens; + tokens = bnf.getNextTokenList("SELECT CUSTOM_PR"); assertTrue(tokens.values().contains("INT")); + // Test identifiers are working + tokens = bnf.getNextTokenList("create table \"test\" as s" + "el"); + assertTrue(tokens.values().contains("E" + "CT")); + + tokens = bnf.getNextTokenList("create table test as s" + "el"); + assertTrue(tokens.values().contains("E" + "CT")); + + // Test || with and without spaces + tokens = bnf.getNextTokenList("select 1||f"); + assertFalse(tokens.values().contains("R" + "OM")); + tokens = bnf.getNextTokenList("select 1 || f"); + assertFalse(tokens.values().contains("R" + "OM")); + tokens = bnf.getNextTokenList("select 1 || 2 "); + assertTrue(tokens.values().contains("FROM")); + tokens = bnf.getNextTokenList("select 1||2"); + assertTrue(tokens.values().contains("FROM")); + tokens = bnf.getNextTokenList("select 1 || 2"); + assertTrue(tokens.values().contains("FROM")); + + // Test keyword + tokens = bnf.getNextTokenList("SELECT LE" + "AS"); + assertTrue(tokens.values().contains("T")); + // Test parameters tokens = bnf.getNextTokenList("SELECT CUSTOM_PRINT("); assertTrue(tokens.values().contains("STRING_FIELD")); diff --git a/h2/src/test/org/h2/test/unit/TestCache.java b/h2/src/test/org/h2/test/unit/TestCache.java index 67171c6847..4f71f0d317 100644 --- a/h2/src/test/org/h2/test/unit/TestCache.java +++ b/h2/src/test/org/h2/test/unit/TestCache.java @@ -1,21 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; -import java.io.ByteArrayInputStream; -import java.io.InputStream; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.Random; - import org.h2.message.Trace; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.util.Cache; import org.h2.util.CacheLRU; import org.h2.util.CacheObject; @@ -27,7 +25,7 @@ /** * Tests the cache. */ -public class TestCache extends TestBase implements CacheWriter { +public class TestCache extends TestDb implements CacheWriter { private String out; @@ -39,101 +37,17 @@ public class TestCache extends TestBase implements CacheWriter { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); // test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override public void test() throws Exception { - if (!config.mvStore) { - testTQ(); - } testMemoryUsage(); testCache(); testCacheDb(false); testCacheDb(true); } - private void testTQ() throws Exception { - if (config.memory || config.reopen) { - return; - } - deleteDb("cache"); - Connection conn = getConnection( - "cache;LOG=0;UNDO_LOG=0"); - Statement stat = conn.createStatement(); - stat.execute("create table if not exists lob" + - "(id int primary key, data blob)"); - PreparedStatement prep = conn.prepareStatement( - "insert into lob values(?, ?)"); - Random r = new Random(1); - byte[] buff = new byte[2 * 1024 * 1024]; - for (int i = 0; i < 10; i++) { - prep.setInt(1, i); - r.nextBytes(buff); - prep.setBinaryStream(2, new ByteArrayInputStream(buff), -1); - prep.execute(); - } - stat.execute("create table if not exists test" + - "(id int primary key, data varchar)"); - prep = conn.prepareStatement("insert into test values(?, ?)"); - for (int i = 0; i < 20000; i++) { - prep.setInt(1, i); - prep.setString(2, "Hello"); - prep.execute(); - } - conn.close(); - testTQ("LRU", false); - testTQ("TQ", true); - } - - private void testTQ(String cacheType, boolean scanResistant) throws Exception { - Connection conn = getConnection( - "cache;CACHE_TYPE=" + cacheType + ";CACHE_SIZE=4096"); - Statement stat = conn.createStatement(); - PreparedStatement prep; - for (int k = 0; k < 10; k++) { - int rc; - prep = conn.prepareStatement( - "select * from test where id = ?"); - rc = getReadCount(stat); - for (int x = 0; x < 2; x++) { - for (int i = 0; i < 15000; i++) { - prep.setInt(1, i); - prep.executeQuery(); - } - } - int rcData = getReadCount(stat) - rc; - if (scanResistant && k > 0) { - // TQ is expected to keep the data rows in the cache - // even if the LOB is read once in a while - assertEquals(0, rcData); - } else { - assertTrue(rcData > 0); - } - rc = getReadCount(stat); - ResultSet rs = stat.executeQuery( - "select * from lob where id = " + k); - rs.next(); - InputStream in = rs.getBinaryStream(2); - while (in.read() >= 0) { - // ignore - } - in.close(); - int rcLob = getReadCount(stat) - rc; - assertTrue(rcLob > 0); - } - conn.close(); - } - - private static int getReadCount(Statement stat) throws Exception { - ResultSet rs; - rs = stat.executeQuery( - "select value from information_schema.settings " + - "where name = 'info.FILE_READ'"); - rs.next(); - return rs.getInt(1); - } - private void testMemoryUsage() throws SQLException { if (!config.traceTest) { return; @@ -168,8 +82,7 @@ private void testMemoryUsage() throws SQLException { // stat.execute("select data from test where data >= ''"); rs = stat.executeQuery( - "select value from information_schema.settings " + - "where name = 'info.CACHE_SIZE'"); + "SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'info.CACHE_SIZE'"); rs.next(); int calculated = rs.getInt(1); rs = null; @@ -185,12 +98,9 @@ private void testMemoryUsage() throws SQLException { " after closing: " + afterClose); } - private int getRealMemory() { + private static long getRealMemory() { StringUtils.clearCache(); Value.clearCache(); - eatMemory(100); - freeMemory(); - System.gc(); return Utils.getMemoryUsed(); } diff --git a/h2/src/test/org/h2/test/unit/TestCharsetCollator.java b/h2/src/test/org/h2/test/unit/TestCharsetCollator.java new file mode 100644 index 0000000000..e1fb1d13fa --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestCharsetCollator.java @@ -0,0 +1,73 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.nio.charset.UnsupportedCharsetException; +import java.text.Collator; +import org.h2.test.TestBase; +import org.h2.value.CharsetCollator; +import org.h2.value.CompareMode; + +/** + * Unittest for org.h2.value.CharsetCollator + */ +public class TestCharsetCollator extends TestBase { + private CharsetCollator cp500Collator = new CharsetCollator(Charset.forName("cp500")); + private CharsetCollator utf8Collator = new CharsetCollator(StandardCharsets.UTF_8); + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + + @Override + public void test() throws Exception { + testBasicComparison(); + testNumberToCharacterComparison(); + testLengthComparison(); + testCreationFromCompareMode(); + testCreationFromCompareModeWithInvalidCharset(); + testCaseInsensitive(); + } + + private void testCreationFromCompareModeWithInvalidCharset() { + assertThrows(UnsupportedCharsetException.class, () -> CompareMode.getCollator("CHARSET_INVALID")); + } + + private void testCreationFromCompareMode() { + Collator utf8Col = CompareMode.getCollator("CHARSET_UTF-8"); + assertTrue(utf8Col instanceof CharsetCollator); + assertEquals(((CharsetCollator) utf8Col).getCharset(), StandardCharsets.UTF_8); + } + + private void testBasicComparison() { + assertTrue(cp500Collator.compare("A", "B") < 0); + assertTrue(cp500Collator.compare("AA", "AB") < 0); + } + + private void testLengthComparison() { + assertTrue(utf8Collator.compare("AA", "A") > 0); + } + + private void testNumberToCharacterComparison() { + assertTrue(cp500Collator.compare("A", "1") < 0); + assertTrue(utf8Collator.compare("A", "1") > 0); + } + + private void testCaseInsensitive() { + CharsetCollator c = new CharsetCollator(StandardCharsets.UTF_8); + c.setStrength(Collator.SECONDARY); + assertEquals(0, c.compare("a", "A")); + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestClassLoaderLeak.java b/h2/src/test/org/h2/test/unit/TestClassLoaderLeak.java index 7f985e88e0..1a6b4f4719 100644 --- a/h2/src/test/org/h2/test/unit/TestClassLoaderLeak.java +++ b/h2/src/test/org/h2/test/unit/TestClassLoaderLeak.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -12,8 +12,8 @@ import java.sql.Driver; import java.sql.DriverManager; import java.util.ArrayList; + import org.h2.test.TestBase; -import org.h2.util.New; /** * Test that static references within the database engine don't reference the @@ -39,7 +39,7 @@ public class TestClassLoaderLeak extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -50,22 +50,22 @@ public void test() throws Exception { Thread.sleep(10); } ClassLoader cl = ref.get(); - assertTrue(cl == null); + assertNull(cl); // fill the memory, so a heap dump is created // using -XX:+HeapDumpOnOutOfMemoryError // which can be analyzed using EclipseMAT // (check incoming references to TestClassLoader) boolean fillMemory = false; if (fillMemory) { - ArrayList memory = New.arrayList(); + ArrayList memory = new ArrayList<>(); for (int i = 0; i < Integer.MAX_VALUE; i++) { memory.add(new byte[1024]); } } DriverManager.registerDriver((Driver) - Class.forName("org.h2.Driver").newInstance()); + Class.forName("org.h2.Driver").getDeclaredConstructor().newInstance()); DriverManager.registerDriver((Driver) - Class.forName("org.h2.upgrade.v1_1.Driver").newInstance()); + Class.forName("org.h2.upgrade.v1_1.Driver").getDeclaredConstructor().newInstance()); } private static WeakReference createClassLoader() throws Exception { @@ -74,7 +74,7 @@ private static WeakReference createClassLoader() throws Exception { Method testMethod = h2ConnectionTestClass.getDeclaredMethod("runTest"); testMethod.setAccessible(true); testMethod.invoke(null); - return new WeakReference(cl); + return new WeakReference<>(cl); } /** @@ -114,9 +114,7 @@ public synchronized Class loadClass(String name, boolean resolve) if (c == null) { try { c = findClass(name); - } catch (SecurityException e) { - return super.loadClass(name, resolve); - } catch (ClassNotFoundException e) { + } catch (SecurityException | ClassNotFoundException e) { return super.loadClass(name, resolve); } if (resolve) { diff --git a/h2/src/test/org/h2/test/unit/TestClearReferences.java b/h2/src/test/org/h2/test/unit/TestClearReferences.java deleted file mode 100644 index a108972ba6..0000000000 --- a/h2/src/test/org/h2/test/unit/TestClearReferences.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.io.File; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.util.ArrayList; -import org.h2.test.TestBase; -import org.h2.util.MathUtils; -import org.h2.util.New; -import org.h2.value.ValueInt; - -/** - * Tests if Tomcat would clear static fields when re-loading a web application. - * See also - * http://svn.apache.org/repos/asf/tomcat/trunk/java/org/apache/catalina - * /loader/WebappClassLoader.java - */ -public class TestClearReferences extends TestBase { - - private static final String[] KNOWN_REFRESHED = { - "org.h2.compress.CompressLZF.cachedHashTable", - "org.h2.engine.DbSettings.defaultSettings", - "org.h2.engine.SessionRemote.sessionFactory", - "org.h2.jdbcx.JdbcDataSourceFactory.cachedTraceSystem", - "org.h2.store.RecoverTester.instance", - "org.h2.store.fs.FilePath.defaultProvider", - "org.h2.store.fs.FilePath.providers", - "org.h2.store.fs.FilePath.tempRandom", - "org.h2.store.fs.FilePathRec.recorder", - "org.h2.store.fs.FileMemData.data", - "org.h2.tools.CompressTool.cachedBuffer", - "org.h2.util.CloseWatcher.queue", - "org.h2.util.CloseWatcher.refs", - "org.h2.util.MathUtils.cachedSecureRandom", - "org.h2.util.NetUtils.cachedLocalAddress", - "org.h2.util.StringUtils.softCache", - "org.h2.util.JdbcUtils.allowedClassNames", - "org.h2.util.JdbcUtils.allowedClassNamePrefixes", - "org.h2.util.JdbcUtils.userClassFactories", - "org.h2.util.Task.counter", - "org.h2.value.CompareMode.lastUsed", - "org.h2.value.Value.softCache", - }; - - private boolean hasError; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - private void clear() throws Exception { - ArrayList> classes = New.arrayList(); - check(classes, new File("bin/org/h2")); - check(classes, new File("temp/org/h2")); - for (Class clazz : classes) { - clearClass(clazz); - } - } - - @Override - public void test() throws Exception { - // initialize the known classes - MathUtils.secureRandomLong(); - ValueInt.get(1); - Class.forName("org.h2.store.fs.FileMemData"); - - clear(); - - if (hasError) { - fail("Tomcat may clear the field above when reloading the web app"); - } - for (String s : KNOWN_REFRESHED) { - String className = s.substring(0, s.lastIndexOf('.')); - String fieldName = s.substring(s.lastIndexOf('.') + 1); - Class clazz = Class.forName(className); - try { - clazz.getDeclaredField(fieldName); - } catch (Exception e) { - fail(s); - } - } - } - - private void check(ArrayList> classes, File file) { - String name = file.getName(); - if (file.isDirectory()) { - if (name.equals("CVS") || name.equals(".svn")) { - return; - } - for (File f : file.listFiles()) { - check(classes, f); - } - } else { - if (!name.endsWith(".class")) { - return; - } - if (name.indexOf('$') >= 0) { - return; - } - String className = file.getAbsolutePath().replace('\\', '/'); - className = className.substring(className.lastIndexOf("org/h2")); - String packageName = className.substring(0, className.lastIndexOf('/')); - if (!new File("src/main/" + packageName).exists()) { - return; - } - className = className.replace('/', '.'); - className = className.substring(0, className.length() - ".class".length()); - Class clazz = null; - try { - clazz = Class.forName(className); - } catch (NoClassDefFoundError e) { - if (e.toString().contains("lucene")) { - // Lucene is not in the classpath, OK - } - } catch (ClassNotFoundException e) { - fail("Could not load " + className + ": " + e.toString()); - } - if (clazz != null) { - classes.add(clazz); - } - } - } - - /** - * This is how Tomcat resets the fields as of 2009-01-30. - * - * @param clazz the class to clear - */ - private void clearClass(Class clazz) throws Exception { - Field[] fields; - try { - fields = clazz.getDeclaredFields(); - } catch (NoClassDefFoundError e) { - if (e.toString().contains("lucene")) { - // Lucene is not in the classpath, OK - return; - } else if (e.toString().contains("jts")) { - // JTS is not in the classpath, OK - return; - } else if (e.toString().contains("slf4j")) { - // slf4j is not in the classpath, OK - return; - } - throw e; - } - for (Field field : fields) { - if (field.getType().isPrimitive() || field.getName().indexOf("$") != -1) { - continue; - } - int modifiers = field.getModifiers(); - if (!Modifier.isStatic(modifiers)) { - continue; - } - field.setAccessible(true); - Object o = field.get(null); - if (o == null) { - continue; - } - if (Modifier.isFinal(modifiers)) { - if (field.getType().getName().startsWith("java.")) { - continue; - } - if (field.getType().getName().startsWith("javax.")) { - continue; - } - clearInstance(o); - } else { - clearField(clazz.getName() + "." + field.getName() + " = " + o); - } - } - } - - private void clearInstance(Object instance) throws Exception { - for (Field field : instance.getClass().getDeclaredFields()) { - if (field.getType().isPrimitive() || (field.getName().indexOf("$") != -1)) { - continue; - } - int modifiers = field.getModifiers(); - if (Modifier.isStatic(modifiers) && Modifier.isFinal(modifiers)) { - continue; - } - field.setAccessible(true); - Object o = field.get(instance); - if (o == null) { - continue; - } - // loadedByThisOrChild - if (o.getClass().getName().startsWith("java.lang.")) { - continue; - } - if (o.getClass().isArray() && o.getClass().getComponentType().isPrimitive()) { - continue; - } - clearField(instance.getClass().getName() + "." + field.getName() + " = " + o); - } - } - - private void clearField(String s) { - for (String k : KNOWN_REFRESHED) { - if (s.startsWith(k)) { - return; - } - } - hasError = true; - System.out.println(s); - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestCollation.java b/h2/src/test/org/h2/test/unit/TestCollation.java index 088e77e2a3..7e0a9b1520 100644 --- a/h2/src/test/org/h2/test/unit/TestCollation.java +++ b/h2/src/test/org/h2/test/unit/TestCollation.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -10,11 +10,12 @@ import org.h2.api.ErrorCode; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Test the ICU4J collator. */ -public class TestCollation extends TestBase { +public class TestCollation extends TestDb { /** * Run just this test. @@ -22,7 +23,7 @@ public class TestCollation extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestCompress.java b/h2/src/test/org/h2/test/unit/TestCompress.java index baffa4dd09..7aacdf6c0c 100644 --- a/h2/src/test/org/h2/test/unit/TestCompress.java +++ b/h2/src/test/org/h2/test/unit/TestCompress.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -17,20 +17,22 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Random; +import java.util.concurrent.TimeUnit; + import org.h2.compress.CompressLZF; import org.h2.compress.Compressor; import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.CompressTool; import org.h2.util.IOUtils; -import org.h2.util.New; import org.h2.util.Task; /** * Data compression tests. */ -public class TestCompress extends TestBase { +public class TestCompress extends TestDb { private boolean testPerformance; private final byte[] buff = new byte[10]; @@ -41,7 +43,7 @@ public class TestCompress extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -157,9 +159,9 @@ private void testDatabase() throws Exception { int pageSize = Constants.DEFAULT_PAGE_SIZE; byte[] buff2 = new byte[pageSize]; byte[] test = new byte[2 * pageSize]; - compress.compress(buff2, pageSize, test, 0); + compress.compress(buff2, 0, pageSize, test, 0); for (int j = 0; j < 4; j++) { - long time = System.currentTimeMillis(); + long time = System.nanoTime(); for (int i = 0; i < 1000; i++) { InputStream in = FileUtils.newInputStream("memFS:compress.h2.db"); while (true) { @@ -167,36 +169,39 @@ private void testDatabase() throws Exception { if (len < 0) { break; } - compress.compress(buff2, pageSize, test, 0); + compress.compress(buff2, 0, pageSize, test, 0); } in.close(); } - System.out.println("compress: " + (System.currentTimeMillis() - time) + " ms"); + System.out.println("compress: " + + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time) + + " ms"); } for (int j = 0; j < 4; j++) { - ArrayList comp = New.arrayList(); + ArrayList comp = new ArrayList<>(); InputStream in = FileUtils.newInputStream("memFS:compress.h2.db"); while (true) { int len = in.read(buff2); if (len < 0) { break; } - int b = compress.compress(buff2, pageSize, test, 0); - byte[] data = new byte[b]; - System.arraycopy(test, 0, data, 0, b); + int b = compress.compress(buff2, 0, pageSize, test, 0); + byte[] data = Arrays.copyOf(test, b); comp.add(data); } in.close(); byte[] result = new byte[pageSize]; - long time = System.currentTimeMillis(); + long time = System.nanoTime(); for (int i = 0; i < 1000; i++) { for (int k = 0; k < comp.size(); k++) { byte[] data = comp.get(k); compress.expand(data, 0, data.length, result, 0, pageSize); } } - System.out.println("expand: " + (System.currentTimeMillis() - time) + " ms"); + System.out.println("expand: " + + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time) + + " ms"); } } @@ -242,12 +247,17 @@ private void testByteArray(int len) throws IOException { // level 9 is highest, strategy 2 is huffman only for (String a : new String[] { "LZF", "No", "Deflate", "Deflate level 9 strategy 2" }) { - long time = System.currentTimeMillis(); + long time = System.nanoTime(); byte[] out = utils.compress(b, a); byte[] test = utils.expand(out); if (testPerformance) { - System.out.println("p:" + pattern + " len: " + out.length + - " time: " + (System.currentTimeMillis() - time) + " " + a); + System.out.println("p:" + + pattern + + " len: " + + out.length + + " time: " + + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - + time) + " " + a); } assertEquals(b.length, test.length); assertEquals(b, test); diff --git a/h2/src/test/org/h2/test/unit/TestConcurrent.java b/h2/src/test/org/h2/test/unit/TestConcurrent.java deleted file mode 100644 index e0832b2c84..0000000000 --- a/h2/src/test/org/h2/test/unit/TestConcurrent.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Statement; - -import org.h2.api.ErrorCode; -import org.h2.test.TestBase; -import org.h2.util.Task; - -/** - * Test concurrent access to JDBC objects. - */ -public class TestConcurrent extends TestBase { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - String url = "jdbc:h2:mem:"; - for (int i = 0; i < 50; i++) { - final int x = i % 4; - final Connection conn = DriverManager.getConnection(url); - final Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - String sql = ""; - switch (x % 6) { - case 0: - sql = "select 1"; - break; - case 1: - case 2: - sql = "delete from test"; - break; - } - final PreparedStatement prep = conn.prepareStatement(sql); - Task t = new Task() { - @Override - public void call() throws SQLException { - while (!conn.isClosed()) { - switch (x % 6) { - case 0: - prep.executeQuery(); - break; - case 1: - prep.execute(); - break; - case 2: - prep.executeUpdate(); - break; - case 3: - stat.executeQuery("select 1"); - break; - case 4: - stat.execute("select 1"); - break; - case 5: - stat.execute("delete from test"); - break; - } - } - } - }; - t.execute(); - Thread.sleep(100); - conn.close(); - SQLException e = (SQLException) t.getException(); - if (e != null) { - if (ErrorCode.OBJECT_CLOSED != e.getErrorCode() && - ErrorCode.STATEMENT_WAS_CANCELED != e.getErrorCode()) { - throw e; - } - } - } - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestConcurrentJdbc.java b/h2/src/test/org/h2/test/unit/TestConcurrentJdbc.java new file mode 100644 index 0000000000..bf75f5f70e --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestConcurrentJdbc.java @@ -0,0 +1,99 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.concurrent.CountDownLatch; + +import org.h2.api.ErrorCode; +import org.h2.test.TestBase; +import org.h2.util.Task; + +/** + * Test concurrent access to JDBC objects. + */ +public class TestConcurrentJdbc extends TestBase { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + String url = "jdbc:h2:mem:"; + for (int i = 0; i < 50; i++) { + final int x = i % 4; + final Connection conn = DriverManager.getConnection(url); + final Statement stat = conn.createStatement(); + stat.execute("create table test(id int primary key)"); + String sql = ""; + switch (x % 6) { + case 0: + sql = "select 1"; + break; + case 1: + case 2: + sql = "delete from test"; + break; + } + final PreparedStatement prep = conn.prepareStatement(sql); + final CountDownLatch executedUpdate = new CountDownLatch(1); + Task t = new Task() { + @Override + public void call() throws SQLException { + while (!conn.isClosed()) { + executedUpdate.countDown(); + switch (x % 6) { + case 0: + prep.executeQuery(); + break; + case 1: + prep.execute(); + break; + case 2: + prep.executeUpdate(); + break; + case 3: + stat.executeQuery("select 1"); + break; + case 4: + stat.execute("select 1"); + break; + case 5: + stat.execute("delete from test"); + break; + } + } + } + }; + t.execute(); + //Wait until the concurrent task has started + try { + executedUpdate.await(); + } catch (InterruptedException e) { + // ignore + } + conn.close(); + SQLException e = (SQLException) t.getException(); + if (e != null) { + if (ErrorCode.OBJECT_CLOSED != e.getErrorCode() && + ErrorCode.STATEMENT_WAS_CANCELED != e.getErrorCode() && + ErrorCode.DATABASE_CALLED_AT_SHUTDOWN != e.getErrorCode()) { + throw e; + } + } + } + } +} diff --git a/h2/src/test/org/h2/test/unit/TestConnectionInfo.java b/h2/src/test/org/h2/test/unit/TestConnectionInfo.java index fcb51d64d6..22405b1a1e 100644 --- a/h2/src/test/org/h2/test/unit/TestConnectionInfo.java +++ b/h2/src/test/org/h2/test/unit/TestConnectionInfo.java @@ -1,17 +1,16 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.File; -import java.util.Properties; import org.h2.api.ErrorCode; import org.h2.engine.ConnectionInfo; -import org.h2.engine.SysProperties; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.DeleteDbFiles; /** @@ -20,7 +19,7 @@ * @author Kerry Sainsbury * @author Thomas Mueller Graf */ -public class TestConnectionInfo extends TestBase { +public class TestConnectionInfo extends TestDb { /** * Run just this test. @@ -28,7 +27,7 @@ public class TestConnectionInfo extends TestBase { * @param a ignored */ public static void main(String[] a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -40,40 +39,29 @@ public void test() throws Exception { } private void testImplicitRelativePath() throws Exception { - if (SysProperties.IMPLICIT_RELATIVE_PATH) { - return; - } - assertThrows(ErrorCode.URL_RELATIVE_TO_CWD, this). - getConnection("jdbc:h2:test"); - assertThrows(ErrorCode.URL_RELATIVE_TO_CWD, this). - getConnection("jdbc:h2:data/test"); + assertThrows(ErrorCode.URL_RELATIVE_TO_CWD, () -> getConnection("jdbc:h2:" + getTestName())); + assertThrows(ErrorCode.URL_RELATIVE_TO_CWD, () -> getConnection("jdbc:h2:data/" + getTestName())); - getConnection("jdbc:h2:./testDatabase").close(); - DeleteDbFiles.execute(".", "testDatabase", true); + getConnection("jdbc:h2:./data/" + getTestName()).close(); + DeleteDbFiles.execute("data", getTestName(), true); } private void testConnectInitError() throws Exception { - assertThrows(ErrorCode.SYNTAX_ERROR_2, this). - getConnection("jdbc:h2:mem:;init=error"); - assertThrows(ErrorCode.IO_EXCEPTION_2, this). - getConnection("jdbc:h2:mem:;init=runscript from 'wrong.file'"); + assertThrows(ErrorCode.SYNTAX_ERROR_2, () -> getConnection("jdbc:h2:mem:;init=error")); + assertThrows(ErrorCode.IO_EXCEPTION_2, () -> getConnection("jdbc:h2:mem:;init=runscript from 'wrong.file'")); } private void testConnectionInfo() { - Properties info = new Properties(); ConnectionInfo connectionInfo = new ConnectionInfo( - "jdbc:h2:mem:test" + - ";LOG=2" + + "jdbc:h2:mem:" + getTestName() + ";ACCESS_MODE_DATA=rws" + ";INIT=CREATE this...\\;INSERT that..." + ";IFEXISTS=TRUE", - info); + null, null, null); - assertEquals("jdbc:h2:mem:test", + assertEquals("jdbc:h2:mem:" + getTestName(), connectionInfo.getURL()); - assertEquals("2", - connectionInfo.getProperty("LOG", "")); assertEquals("rws", connectionInfo.getProperty("ACCESS_MODE_DATA", "")); assertEquals("CREATE this...;INSERT that...", diff --git a/h2/src/test/org/h2/test/unit/TestDataPage.java b/h2/src/test/org/h2/test/unit/TestDataPage.java deleted file mode 100644 index 3675a2500d..0000000000 --- a/h2/src/test/org/h2/test/unit/TestDataPage.java +++ /dev/null @@ -1,342 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.math.BigDecimal; -import java.sql.Date; -import java.sql.Time; -import java.sql.Types; - -import org.h2.api.JavaObjectSerializer; -import org.h2.store.Data; -import org.h2.store.DataHandler; -import org.h2.store.FileStore; -import org.h2.store.LobStorageBackend; -import org.h2.test.TestBase; -import org.h2.tools.SimpleResultSet; -import org.h2.util.SmallLRUCache; -import org.h2.util.TempFileDeleter; -import org.h2.value.CompareMode; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; -import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; -import org.h2.value.ValueInt; -import org.h2.value.ValueJavaObject; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueStringFixed; -import org.h2.value.ValueStringIgnoreCase; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; -import org.h2.value.ValueUuid; - -/** - * Data page tests. - */ -public class TestDataPage extends TestBase implements DataHandler { - - private boolean testPerformance; - private final CompareMode compareMode = CompareMode.getInstance(null, 0); - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() { - if (testPerformance) { - testPerformance(); - System.exit(0); - return; - } - testValues(); - testAll(); - } - - private static void testPerformance() { - Data data = Data.create(null, 1024); - for (int j = 0; j < 4; j++) { - long time = System.currentTimeMillis(); - for (int i = 0; i < 100000; i++) { - data.reset(); - for (int k = 0; k < 30; k++) { - data.writeString("Hello World"); - } - } - // for (int i = 0; i < 5000000; i++) { - // data.reset(); - // for (int k = 0; k < 100; k++) { - // data.writeInt(k * k); - // } - // } - // for (int i = 0; i < 200000; i++) { - // data.reset(); - // for (int k = 0; k < 100; k++) { - // data.writeVarInt(k * k); - // } - // } - System.out.println("write: " + (System.currentTimeMillis() - time) + " ms"); - } - for (int j = 0; j < 4; j++) { - long time = System.currentTimeMillis(); - for (int i = 0; i < 1000000; i++) { - data.reset(); - for (int k = 0; k < 30; k++) { - data.readString(); - } - } - // for (int i = 0; i < 3000000; i++) { - // data.reset(); - // for (int k = 0; k < 100; k++) { - // data.readVarInt(); - // } - // } - // for (int i = 0; i < 50000000; i++) { - // data.reset(); - // for (int k = 0; k < 100; k++) { - // data.readInt(); - // } - // } - System.out.println("read: " + (System.currentTimeMillis() - time) + " ms"); - } - } - - private void testValues() { - testValue(ValueNull.INSTANCE); - testValue(ValueBoolean.get(false)); - testValue(ValueBoolean.get(true)); - for (int i = 0; i < 256; i++) { - testValue(ValueByte.get((byte) i)); - } - for (int i = 0; i < 256 * 256; i += 10) { - testValue(ValueShort.get((short) i)); - } - for (int i = 0; i < 256 * 256; i += 10) { - testValue(ValueInt.get(i)); - testValue(ValueInt.get(-i)); - testValue(ValueLong.get(i)); - testValue(ValueLong.get(-i)); - } - testValue(ValueInt.get(Integer.MAX_VALUE)); - testValue(ValueInt.get(Integer.MIN_VALUE)); - for (long i = 0; i < Integer.MAX_VALUE; i += 10 + i / 4) { - testValue(ValueInt.get((int) i)); - testValue(ValueInt.get((int) -i)); - } - testValue(ValueLong.get(Long.MAX_VALUE)); - testValue(ValueLong.get(Long.MIN_VALUE)); - for (long i = 0; i >= 0; i += 10 + i / 4) { - testValue(ValueLong.get(i)); - testValue(ValueLong.get(-i)); - } - testValue(ValueDecimal.get(BigDecimal.ZERO)); - testValue(ValueDecimal.get(BigDecimal.ONE)); - testValue(ValueDecimal.get(BigDecimal.TEN)); - testValue(ValueDecimal.get(BigDecimal.ONE.negate())); - testValue(ValueDecimal.get(BigDecimal.TEN.negate())); - for (long i = 0; i >= 0; i += 10 + i / 4) { - testValue(ValueDecimal.get(new BigDecimal(i))); - testValue(ValueDecimal.get(new BigDecimal(-i))); - for (int j = 0; j < 200; j += 50) { - testValue(ValueDecimal.get(new BigDecimal(i).setScale(j))); - testValue(ValueDecimal.get(new BigDecimal(i * i).setScale(j))); - } - testValue(ValueDecimal.get(new BigDecimal(i * i))); - } - testValue(ValueDate.get(new Date(System.currentTimeMillis()))); - testValue(ValueDate.get(new Date(0))); - testValue(ValueTime.get(new Time(System.currentTimeMillis()))); - testValue(ValueTime.get(new Time(0))); - testValue(ValueTimestamp.fromMillis(System.currentTimeMillis())); - testValue(ValueTimestamp.fromMillis(0)); - testValue(ValueJavaObject.getNoCopy(null, new byte[0], this)); - testValue(ValueJavaObject.getNoCopy(null, new byte[100], this)); - for (int i = 0; i < 300; i++) { - testValue(ValueBytes.getNoCopy(new byte[i])); - } - for (int i = 0; i < 65000; i += 10 + i) { - testValue(ValueBytes.getNoCopy(new byte[i])); - } - testValue(ValueUuid.getNewRandom()); - for (int i = 0; i < 100; i++) { - testValue(ValueString.get(new String(new char[i]))); - } - for (int i = 0; i < 65000; i += 10 + i) { - testValue(ValueString.get(new String(new char[i]))); - testValue(ValueStringFixed.get(new String(new char[i]))); - testValue(ValueStringIgnoreCase.get(new String(new char[i]))); - } - testValue(ValueFloat.get(0f)); - testValue(ValueFloat.get(1f)); - testValue(ValueFloat.get(-1f)); - testValue(ValueDouble.get(0)); - testValue(ValueDouble.get(1)); - testValue(ValueDouble.get(-1)); - for (int i = 0; i < 65000; i += 10 + i) { - for (double j = 0.1; j < 65000; j += 10 + j) { - testValue(ValueFloat.get((float) (i / j))); - testValue(ValueDouble.get(i / j)); - testValue(ValueFloat.get((float) -(i / j))); - testValue(ValueDouble.get(-(i / j))); - } - } - testValue(ValueArray.get(new Value[0])); - testValue(ValueArray.get(new Value[] { ValueBoolean.get(true), - ValueInt.get(10) })); - - SimpleResultSet rs = new SimpleResultSet(); - rs.setAutoClose(false); - rs.addColumn("ID", Types.INTEGER, 0, 0); - rs.addColumn("NAME", Types.VARCHAR, 255, 0); - rs.addRow(1, "Hello"); - rs.addRow(2, "World"); - rs.addRow(3, "Peace"); - testValue(ValueResultSet.get(rs)); - } - - private void testValue(Value v) { - Data data = Data.create(null, 1024); - data.checkCapacity((int) v.getPrecision()); - data.writeValue(v); - data.writeInt(123); - data.reset(); - Value v2 = data.readValue(); - assertEquals(v.getType(), v2.getType()); - assertEquals(0, v.compareTo(v2, compareMode)); - assertEquals(123, data.readInt()); - } - - private void testAll() { - Data page = Data.create(this, 128); - - char[] data = new char[0x10000]; - for (int i = 0; i < data.length; i++) { - data[i] = (char) i; - } - String s = new String(data); - page.checkCapacity(s.length() * 4); - page.writeString(s); - int len = page.length(); - assertEquals(len, Data.getStringLen(s)); - page.reset(); - assertEquals(s, page.readString()); - page.reset(); - - page.writeString("H\u1111!"); - page.writeString("John\tBrack's \"how are you\" M\u1111ller"); - page.writeValue(ValueInt.get(10)); - page.writeValue(ValueString.get("test")); - page.writeValue(ValueFloat.get(-2.25f)); - page.writeValue(ValueDouble.get(10.40)); - page.writeValue(ValueNull.INSTANCE); - trace(new String(page.getBytes())); - page.reset(); - - trace(page.readString()); - trace(page.readString()); - trace(page.readValue().getInt()); - trace(page.readValue().getString()); - trace("" + page.readValue().getFloat()); - trace("" + page.readValue().getDouble()); - trace(page.readValue().toString()); - page.reset(); - - page.writeInt(0); - page.writeInt(Integer.MAX_VALUE); - page.writeInt(Integer.MIN_VALUE); - page.writeInt(1); - page.writeInt(-1); - page.writeInt(1234567890); - page.writeInt(54321); - trace(new String(page.getBytes())); - page.reset(); - trace(page.readInt()); - trace(page.readInt()); - trace(page.readInt()); - trace(page.readInt()); - trace(page.readInt()); - trace(page.readInt()); - trace(page.readInt()); - - page = null; - } - - @Override - public String getDatabasePath() { - return null; - } - - @Override - public FileStore openFile(String name, String mode, boolean mustExist) { - return null; - } - - @Override - public void checkPowerOff() { - // nothing to do - } - - @Override - public void checkWritingAllowed() { - // ok - } - - @Override - public int getMaxLengthInplaceLob() { - throw new AssertionError(); - } - - @Override - public String getLobCompressionAlgorithm(int type) { - throw new AssertionError(); - } - - @Override - public Object getLobSyncObject() { - return this; - } - - @Override - public SmallLRUCache getLobFileListCache() { - return null; - } - - @Override - public TempFileDeleter getTempFileDeleter() { - return TempFileDeleter.getInstance(); - } - - @Override - public LobStorageBackend getLobStorage() { - return null; - } - - @Override - public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, - int off, int length) { - return -1; - } - - @Override - public JavaObjectSerializer getJavaObjectSerializer() { - return null; - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestDate.java b/h2/src/test/org/h2/test/unit/TestDate.java index 99cc38c7a5..739c6b1633 100644 --- a/h2/src/test/org/h2/test/unit/TestDate.java +++ b/h2/src/test/org/h2/test/unit/TestDate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -15,20 +15,20 @@ import java.util.TimeZone; import org.h2.api.ErrorCode; -import org.h2.engine.SysProperties; -import org.h2.store.Data; +import org.h2.api.JavaObjectSerializer; +import org.h2.engine.CastDataProvider; +import org.h2.engine.Mode; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; import org.h2.util.DateTimeUtils; -import org.h2.util.New; +import org.h2.util.LegacyDateTimeUtils; +import org.h2.util.TimeZoneProvider; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueDate; import org.h2.value.ValueDouble; -import org.h2.value.ValueInt; -import org.h2.value.ValueNull; -import org.h2.value.ValueString; import org.h2.value.ValueTime; import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; /** * Tests the date parsing. The problem is that some dates are not allowed @@ -38,14 +38,46 @@ */ public class TestDate extends TestBase { + static class SimpleCastDataProvider implements CastDataProvider { + + TimeZoneProvider currentTimeZone = DateTimeUtils.getTimeZone(); + + ValueTimestampTimeZone currentTimestamp = DateTimeUtils.currentTimestamp(currentTimeZone); + + @Override + public Mode getMode() { + return Mode.getRegular(); + } + + @Override + public ValueTimestampTimeZone currentTimestamp() { + return currentTimestamp; + } + + @Override + public TimeZoneProvider currentTimeZone() { + return currentTimeZone; + } + + @Override + public JavaObjectSerializer getJavaObjectSerializer() { + return null; + } + + @Override + public boolean zeroBasedEnums() { + return false; + } + + } + /** * Run just this test. * * @param a ignored */ public static void main(String... a) throws Exception { - // System.setProperty("h2.storeLocalTime", "true"); - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -57,33 +89,28 @@ public void test() throws SQLException { testValidDate(); testAbsoluteDay(); testCalculateLocalMillis(); - testTimeOperationsAcrossTimeZones(); testDateTimeUtils(); } private void testValueDate() { assertEquals("2000-01-01", - ValueDate.get(Date.valueOf("2000-01-01")).getString()); - assertEquals("0-00-00", + LegacyDateTimeUtils.fromDate(null, null, Date.valueOf("2000-01-01")).getString()); + assertEquals("0000-00-00", ValueDate.fromDateValue(0).getString()); assertEquals("9999-12-31", ValueDate.parse("9999-12-31").getString()); assertEquals("-9999-12-31", ValueDate.parse("-9999-12-31").getString()); - assertEquals(Integer.MAX_VALUE + "-12-31", - ValueDate.parse(Integer.MAX_VALUE + "-12-31").getString()); - assertEquals(Integer.MIN_VALUE + "-12-31", - ValueDate.parse(Integer.MIN_VALUE + "-12-31").getString()); ValueDate d1 = ValueDate.parse("2001-01-01"); - assertEquals("2001-01-01", d1.getDate().toString()); - assertEquals("DATE '2001-01-01'", d1.getSQL()); + assertEquals("2001-01-01", LegacyDateTimeUtils.toDate(null, null, d1).toString()); + assertEquals("DATE '2001-01-01'", d1.getTraceSQL()); assertEquals("DATE '2001-01-01'", d1.toString()); - assertEquals(Value.DATE, d1.getType()); + assertEquals(Value.DATE, d1.getValueType()); long dv = d1.getDateValue(); assertEquals((int) ((dv >>> 32) ^ dv), d1.hashCode()); - assertEquals(d1.getString().length(), d1.getDisplaySize()); - assertEquals(ValueDate.PRECISION, d1.getPrecision()); - assertEquals("java.sql.Date", d1.getObject().getClass().getName()); + TypeInfo type = d1.getType(); + assertEquals(d1.getString().length(), type.getDisplaySize()); + assertEquals(ValueDate.PRECISION, type.getPrecision()); ValueDate d1b = ValueDate.parse("2001-01-01"); assertTrue(d1 == d1b); Value.clearCache(); @@ -92,67 +119,35 @@ private void testValueDate() { assertTrue(d1.equals(d1)); assertTrue(d1.equals(d1b)); assertTrue(d1b.equals(d1)); - assertEquals(0, d1.compareTo(d1b, null)); - assertEquals(0, d1b.compareTo(d1, null)); + assertEquals(0, d1.compareTo(d1b, null, null)); + assertEquals(0, d1b.compareTo(d1, null, null)); ValueDate d2 = ValueDate.parse("2002-02-02"); assertFalse(d1.equals(d2)); assertFalse(d2.equals(d1)); - assertEquals(-1, d1.compareTo(d2, null)); - assertEquals(1, d2.compareTo(d1, null)); - - // can't convert using java.util.Date - assertEquals( - Integer.MAX_VALUE + "-12-31 00:00:00.0", - ValueDate.parse(Integer.MAX_VALUE + "-12-31"). - convertTo(Value.TIMESTAMP).getString()); - assertEquals( - Integer.MIN_VALUE + "-12-31 00:00:00.0", - ValueDate.parse(Integer.MIN_VALUE + "-12-31"). - convertTo(Value.TIMESTAMP).getString()); - assertEquals( - "00:00:00", - ValueDate.parse(Integer.MAX_VALUE + "-12-31"). - convertTo(Value.TIME).getString()); - assertEquals( - "00:00:00", - ValueDate.parse(Integer.MIN_VALUE + "-12-31"). - convertTo(Value.TIME).getString()); + assertEquals(-1, d1.compareTo(d2, null, null)); + assertEquals(1, d2.compareTo(d1, null, null)); } private void testValueTime() { - assertEquals("10:20:30", ValueTime.get(Time.valueOf("10:20:30")).getString()); + assertEquals("10:20:30", LegacyDateTimeUtils.fromTime(null, null, Time.valueOf("10:20:30")).getString()); assertEquals("00:00:00", ValueTime.fromNanos(0).getString()); assertEquals("23:59:59", ValueTime.parse("23:59:59").getString()); - assertEquals("99:59:59", ValueTime.parse("99:59:59").getString()); - assertEquals("-99:02:03.001002003", - ValueTime.parse("-99:02:03.001002003").getString()); - assertEquals("-99:02:03.001002", - ValueTime.parse("-99:02:03.001002000").getString()); - assertEquals("-99:02:03", - ValueTime.parse("-99:02:03.0000000000001").getString()); - assertEquals("1999999:59:59.999999999", - ValueTime.parse("1999999:59:59.999999999").getString()); - assertEquals("-1999999:59:59.999999999", - ValueTime.parse("-1999999:59:59.999999999").getString()); + assertEquals("11:22:33.444555666", ValueTime.parse("11:22:33.444555666").getString()); + assertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2, () -> ValueTime.parse("-00:00:00.000000001")); + assertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2, () -> ValueTime.parse("24:00:00")); ValueTime t1 = ValueTime.parse("11:11:11"); - assertEquals("11:11:11", t1.getTime().toString()); - assertEquals("1970-01-01", t1.getDate().toString()); - assertEquals("TIME '11:11:11'", t1.getSQL()); + assertEquals("11:11:11", LegacyDateTimeUtils.toTime(null, null, t1).toString()); + assertEquals("TIME '11:11:11'", t1.getTraceSQL()); assertEquals("TIME '11:11:11'", t1.toString()); - assertEquals(1, t1.getSignum()); - assertEquals(-1, t1.negate().getSignum()); - assertEquals(0, t1.multiply(ValueInt.get(0)).getSignum()); - assertEquals(0, t1.subtract(t1).getSignum()); assertEquals("05:35:35.5", t1.multiply(ValueDouble.get(0.5)).getString()); - assertEquals("22:22:22", t1.divide(ValueDouble.get(0.5)).getString()); - assertEquals("-11:11:11", t1.negate().getString()); - assertEquals("11:11:11", t1.negate().negate().getString()); - assertEquals(Value.TIME, t1.getType()); + assertEquals("22:22:22", t1.divide(ValueDouble.get(0.5), TypeInfo.TYPE_TIME).getString()); + assertEquals(Value.TIME, t1.getValueType()); long nanos = t1.getNanos(); assertEquals((int) ((nanos >>> 32) ^ nanos), t1.hashCode()); - assertEquals(t1.getString().length(), t1.getDisplaySize()); - assertEquals(ValueTime.PRECISION, t1.getPrecision()); - assertEquals("java.sql.Time", t1.getObject().getClass().getName()); + // Literals return maximum precision + TypeInfo type = t1.getType(); + assertEquals(ValueTime.MAXIMUM_PRECISION, type.getDisplaySize()); + assertEquals(ValueTime.MAXIMUM_PRECISION, type.getPrecision()); ValueTime t1b = ValueTime.parse("11:11:11"); assertTrue(t1 == t1b); Value.clearCache(); @@ -161,25 +156,13 @@ private void testValueTime() { assertTrue(t1.equals(t1)); assertTrue(t1.equals(t1b)); assertTrue(t1b.equals(t1)); - assertEquals(0, t1.compareTo(t1b, null)); - assertEquals(0, t1b.compareTo(t1, null)); + assertEquals(0, t1.compareTo(t1b, null, null)); + assertEquals(0, t1b.compareTo(t1, null, null)); ValueTime t2 = ValueTime.parse("22:22:22"); assertFalse(t1.equals(t2)); assertFalse(t2.equals(t1)); - assertEquals("33:33:33", t1.add(t2).getString()); - assertEquals("33:33:33", t1.multiply(ValueInt.get(4)).subtract(t1).getString()); - assertEquals(-1, t1.compareTo(t2, null)); - assertEquals(1, t2.compareTo(t1, null)); - - // can't convert using java.util.Date - assertEquals( - "1969-12-31 23:00:00.0", - ValueTime.parse("-1:00:00"). - convertTo(Value.TIMESTAMP).getString()); - assertEquals( - "1970-01-01", - ValueTime.parse("-1:00:00"). - convertTo(Value.DATE).getString()); + assertEquals(-1, t1.compareTo(t2, null, null)); + assertEquals(1, t2.compareTo(t1, null, null)); } private void testValueTimestampWithTimezone() { @@ -189,148 +172,123 @@ private void testValueTimestampWithTimezone() { String s = "2011-" + (m < 10 ? "0" : "") + m + "-" + (d < 10 ? "0" : "") + d + " " + (h < 10 ? "0" : "") + h + ":00:00"; - ValueTimestamp ts = ValueTimestamp.parse(s + "Z"); + ValueTimestamp ts = ValueTimestamp.parse(s + "Z", null); String s2 = ts.getString(); - ValueTimestamp ts2 = ValueTimestamp.parse(s2); + ValueTimestamp ts2 = ValueTimestamp.parse(s2, null); assertEquals(ts.getString(), ts2.getString()); } } } } + @SuppressWarnings("unlikely-arg-type") private void testValueTimestamp() { assertEquals( - "2001-02-03 04:05:06.0", ValueTimestamp.get( - Timestamp.valueOf( - "2001-02-03 04:05:06")).getString()); + "2001-02-03 04:05:06", + LegacyDateTimeUtils.fromTimestamp(null, null, Timestamp.valueOf("2001-02-03 04:05:06")).getString()); assertEquals( - "2001-02-03 04:05:06.001002003", ValueTimestamp.get( - Timestamp.valueOf( - "2001-02-03 04:05:06.001002003")).getString()); + "2001-02-03 04:05:06.001002003", + LegacyDateTimeUtils.fromTimestamp(null, null, Timestamp.valueOf("2001-02-03 04:05:06.001002003")) + .getString()); assertEquals( - "0-00-00 00:00:00.0", ValueTimestamp.fromDateValueAndNanos(0, 0).getString()); + "0000-00-00 00:00:00", ValueTimestamp.fromDateValueAndNanos(0, 0).getString()); assertEquals( - "9999-12-31 23:59:59.0", - ValueTimestamp.parse( - "9999-12-31 23:59:59").getString()); + "9999-12-31 23:59:59", + ValueTimestamp.parse("9999-12-31 23:59:59", null).getString()); - assertEquals( - Integer.MAX_VALUE + - "-12-31 01:02:03.04050607", - ValueTimestamp.parse(Integer.MAX_VALUE + - "-12-31 01:02:03.0405060708").getString()); - assertEquals( - Integer.MIN_VALUE + - "-12-31 01:02:03.04050607", - ValueTimestamp.parse(Integer.MIN_VALUE + - "-12-31 01:02:03.0405060708").getString()); - - ValueTimestamp t1 = ValueTimestamp.parse("2001-01-01 01:01:01.111"); - assertEquals("2001-01-01 01:01:01.111", t1.getTimestamp().toString()); - assertEquals("2001-01-01", t1.getDate().toString()); - assertEquals("01:01:01", t1.getTime().toString()); - assertEquals("TIMESTAMP '2001-01-01 01:01:01.111'", t1.getSQL()); + ValueTimestamp t1 = ValueTimestamp.parse("2001-01-01 01:01:01.111", null); + assertEquals("2001-01-01 01:01:01.111", LegacyDateTimeUtils.toTimestamp(null, null, t1).toString()); + assertEquals("2001-01-01", LegacyDateTimeUtils.toDate(null, null, t1).toString()); + assertEquals("01:01:01", LegacyDateTimeUtils.toTime(null, null, t1).toString()); + assertEquals("TIMESTAMP '2001-01-01 01:01:01.111'", t1.getTraceSQL()); assertEquals("TIMESTAMP '2001-01-01 01:01:01.111'", t1.toString()); - assertEquals(Value.TIMESTAMP, t1.getType()); + assertEquals(Value.TIMESTAMP, t1.getValueType()); long dateValue = t1.getDateValue(); long nanos = t1.getTimeNanos(); assertEquals((int) ((dateValue >>> 32) ^ dateValue ^ (nanos >>> 32) ^ nanos), t1.hashCode()); - assertEquals(t1.getString().length(), t1.getDisplaySize()); - assertEquals(ValueTimestamp.PRECISION, t1.getPrecision()); - assertEquals(10, t1.getScale()); - assertEquals("java.sql.Timestamp", t1.getObject().getClass().getName()); - ValueTimestamp t1b = ValueTimestamp.parse("2001-01-01 01:01:01.111"); + // Literals return maximum precision + TypeInfo type = t1.getType(); + assertEquals(ValueTimestamp.MAXIMUM_PRECISION, type.getDisplaySize()); + assertEquals(ValueTimestamp.MAXIMUM_PRECISION, type.getPrecision()); + assertEquals(9, type.getScale()); + ValueTimestamp t1b = ValueTimestamp.parse("2001-01-01 01:01:01.111", null); assertTrue(t1 == t1b); Value.clearCache(); - t1b = ValueTimestamp.parse("2001-01-01 01:01:01.111"); + t1b = ValueTimestamp.parse("2001-01-01 01:01:01.111", null); assertFalse(t1 == t1b); assertTrue(t1.equals(t1)); assertTrue(t1.equals(t1b)); assertTrue(t1b.equals(t1)); - assertEquals(0, t1.compareTo(t1b, null)); - assertEquals(0, t1b.compareTo(t1, null)); - ValueTimestamp t2 = ValueTimestamp.parse("2002-02-02 02:02:02.222"); + assertEquals(0, t1.compareTo(t1b, null, null)); + assertEquals(0, t1b.compareTo(t1, null, null)); + ValueTimestamp t2 = ValueTimestamp.parse("2002-02-02 02:02:02.222", null); assertFalse(t1.equals(t2)); assertFalse(t2.equals(t1)); - assertEquals(-1, t1.compareTo(t2, null)); - assertEquals(1, t2.compareTo(t1, null)); - t1 = ValueTimestamp.parse("2001-01-01 01:01:01.123456789"); + assertEquals(-1, t1.compareTo(t2, null, null)); + assertEquals(1, t2.compareTo(t1, null, null)); + SimpleCastDataProvider provider = new SimpleCastDataProvider(); + t1 = ValueTimestamp.parse("2001-01-01 01:01:01.123456789", null); assertEquals("2001-01-01 01:01:01.123456789", t1.getString()); assertEquals("2001-01-01 01:01:01.123456789", - t1.convertScale(true, 10).getString()); - assertEquals("2001-01-01 01:01:01.123456789", - t1.convertScale(true, 9).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 9, null), provider).getString()); assertEquals("2001-01-01 01:01:01.12345679", - t1.convertScale(true, 8).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 8, null), provider).getString()); assertEquals("2001-01-01 01:01:01.1234568", - t1.convertScale(true, 7).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 7, null), provider).getString()); assertEquals("2001-01-01 01:01:01.123457", - t1.convertScale(true, 6).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 6, null), provider).getString()); assertEquals("2001-01-01 01:01:01.12346", - t1.convertScale(true, 5).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 5, null), provider).getString()); assertEquals("2001-01-01 01:01:01.1235", - t1.convertScale(true, 4).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 4, null), provider).getString()); assertEquals("2001-01-01 01:01:01.123", - t1.convertScale(true, 3).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 3, null), provider).getString()); assertEquals("2001-01-01 01:01:01.12", - t1.convertScale(true, 2).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 2, null), provider).getString()); assertEquals("2001-01-01 01:01:01.1", - t1.convertScale(true, 1).getString()); - assertEquals("2001-01-01 01:01:01.0", - t1.convertScale(true, 0).getString()); - t1 = ValueTimestamp.parse("-2001-01-01 01:01:01.123456789"); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 1, null), provider).getString()); + assertEquals("2001-01-01 01:01:01", + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 0, null), provider).getString()); + t1 = ValueTimestamp.parse("-2001-01-01 01:01:01.123456789", null); assertEquals("-2001-01-01 01:01:01.123457", - t1.convertScale(true, 6).getString()); + t1.castTo(TypeInfo.getTypeInfo(Value.TIMESTAMP, 0L, 6, null), provider).getString()); // classes do not match - assertFalse(ValueTimestamp.parse("2001-01-01"). + assertFalse(ValueTimestamp.parse("2001-01-01", null). equals(ValueDate.parse("2001-01-01"))); - assertEquals("2001-01-01 01:01:01.0", - ValueTimestamp.parse("2001-01-01").add( - ValueTime.parse("01:01:01")).getString()); - assertEquals("2001-01-02 01:01:01.0", - ValueTimestamp.parse("2001-01-01").add( - ValueTime.parse("25:01:01")).getString()); - assertEquals("1010-10-10 00:00:00.0", - ValueTimestamp.parse("1010-10-10 10:10:10").subtract( - ValueTime.parse("10:10:10")).getString()); - assertEquals("1010-10-10 10:00:00.0", - ValueTimestamp.parse("1010-10-11 10:10:10").subtract( - ValueTime.parse("24:10:10")).getString()); - assertEquals("-2001-01-01 01:01:01.0", - ValueTimestamp.parse("-2001-01-01").add( - ValueTime.parse("01:01:01")).getString()); - assertEquals("-1010-10-10 00:00:00.0", - ValueTimestamp.parse("-1010-10-10 10:10:10").subtract( - ValueTime.parse("10:10:10")).getString()); + provider.currentTimestamp = ValueTimestampTimeZone.fromDateValueAndNanos(DateTimeUtils.EPOCH_DATE_VALUE, 0, + provider.currentTimeZone.getTimeZoneOffsetUTC(0L)); + assertEquals("2001-01-01 01:01:01", + ValueTimestamp.parse("2001-01-01", null).add( + ValueTime.parse("01:01:01").convertTo(TypeInfo.TYPE_TIMESTAMP, provider)).getString()); + assertEquals("1010-10-10 00:00:00", + ValueTimestamp.parse("1010-10-10 10:10:10", null).subtract( + ValueTime.parse("10:10:10").convertTo(TypeInfo.TYPE_TIMESTAMP, provider)).getString()); + assertEquals("-2001-01-01 01:01:01", + ValueTimestamp.parse("-2001-01-01", null).add( + ValueTime.parse("01:01:01").convertTo(TypeInfo.TYPE_TIMESTAMP, provider)).getString()); + assertEquals("-1010-10-10 00:00:00", + ValueTimestamp.parse("-1010-10-10 10:10:10", null).subtract( + ValueTime.parse("10:10:10").convertTo(TypeInfo.TYPE_TIMESTAMP, provider)).getString()); assertEquals(0, DateTimeUtils.absoluteDayFromDateValue( - ValueTimestamp.parse("1970-01-01").getDateValue())); - assertEquals(0, ValueTimestamp.parse( - "1970-01-01").getTimeNanos()); - assertEquals(0, ValueTimestamp.parse( - "1970-01-01 00:00:00.000 UTC").getTimestamp().getTime()); - assertEquals(0, ValueTimestamp.parse( - "+1970-01-01T00:00:00.000Z").getTimestamp().getTime()); - assertEquals(0, ValueTimestamp.parse( - "1970-01-01T00:00:00.000+00:00").getTimestamp().getTime()); - assertEquals(0, ValueTimestamp.parse( - "1970-01-01T00:00:00.000-00:00").getTimestamp().getTime()); - new AssertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2) { - @Override - public void test() { - ValueTimestamp.parse("1970-01-01 00:00:00.000 ABC"); - } - }; - new AssertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2) { - @Override - public void test() { - ValueTimestamp.parse("1970-01-01T00:00:00.000+ABC"); - } - }; + ValueTimestamp.parse("1970-01-01", null).getDateValue())); + assertEquals(0, ValueTimestamp.parse("1970-01-01", null).getTimeNanos()); + assertEquals(0, LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("1970-01-01 00:00:00.000 UTC", null)).getTime()); + assertEquals(0, LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("+1970-01-01T00:00:00.000Z", null)).getTime()); + assertEquals(0, LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("1970-01-01T00:00:00.000+00:00", null)).getTime()); + assertEquals(0, LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("1970-01-01T00:00:00.000-00:00", null)).getTime()); + assertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2, + () -> ValueTimestamp.parse("1970-01-01 00:00:00.000 ABC", null)); + assertThrows(ErrorCode.INVALID_DATETIME_CONSTANT_2, + () -> ValueTimestamp.parse("1970-01-01T00:00:00.000+ABC", null)); } private void testAbsoluteDay() { @@ -346,19 +304,25 @@ private void testAbsoluteDay() { if (abs != next && next != Long.MIN_VALUE) { assertEquals(abs, next); } + if (m == 1 && d == 1) { + assertEquals(abs, DateTimeUtils.absoluteDayFromYear(y)); + } next = abs + 1; long d2 = DateTimeUtils.dateValueFromAbsoluteDay(abs); assertEquals(date, d2); assertEquals(y, DateTimeUtils.yearFromDateValue(date)); assertEquals(m, DateTimeUtils.monthFromDateValue(date)); assertEquals(d, DateTimeUtils.dayFromDateValue(date)); + long nextDateValue = DateTimeUtils.dateValueFromAbsoluteDay(next); + assertEquals(nextDateValue, DateTimeUtils.incrementDateValue(date)); + assertEquals(date, DateTimeUtils.decrementDateValue(nextDateValue)); } } } } private void testValidDate() { - Calendar c = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + Calendar c = TestDateTimeUtils.createGregorianCalendar(LegacyDateTimeUtils.UTC); c.setLenient(false); for (int y = -2000; y < 3000; y++) { for (int m = -3; m <= 14; m++) { @@ -368,7 +332,7 @@ private void testValidDate() { assertFalse(valid); } else if (d < 1 || d > 31) { assertFalse(valid); - } else if (y != 1582 && d >= 1 && d <= 27) { + } else if (d <= 27) { assertTrue(valid); } else { if (y <= 0) { @@ -424,8 +388,8 @@ private static void testCalculateLocalMillis() { } private static void testDate(int y, int m, int day) { - long millis = DateTimeUtils.getMillis( - TimeZone.getDefault(), y, m, day, 0, 0, 0, 0); + long millis = LegacyDateTimeUtils.getMillis(null, TimeZone.getDefault(), DateTimeUtils.dateValue(y, m, day), + 0); String st = new java.sql.Date(millis).toString(); int y2 = Integer.parseInt(st.substring(0, 4)); int m2 = Integer.parseInt(st.substring(5, 7)); @@ -437,69 +401,13 @@ private static void testDate(int y, int m, int day) { } } - private void testTimeOperationsAcrossTimeZones() { - if (!SysProperties.STORE_LOCAL_TIME) { - return; - } - TimeZone defaultTimeZone = TimeZone.getDefault(); - ArrayList distinct = TestDate.getDistinctTimeZones(); - Data d = Data.create(null, 10240); - try { - for (TimeZone tz : distinct) { - TimeZone.setDefault(tz); - DateTimeUtils.resetCalendar(); - d.reset(); - for (int m = 1; m <= 12; m++) { - for (int h = 0; h <= 23; h++) { - if (h == 0 || h == 2 || h == 3) { - // those hours may not exist for all days in all - // timezones because of daylight saving - continue; - } - String s = "2000-" + (m < 10 ? "0" + m : m) + - "-01 " + (h < 10 ? "0" + h : h) + ":00:00.0"; - d.writeValue(ValueString.get(s)); - d.writeValue(ValueTimestamp.get(Timestamp.valueOf(s))); - } - } - d.writeValue(ValueNull.INSTANCE); - d.reset(); - for (TimeZone target : distinct) { - if ("Pacific/Kiritimati".equals(target.getID())) { - // there is a problem with this time zone, but it seems - // unrelated to this database (possibly wrong timezone - // information?) - continue; - } - TimeZone.setDefault(target); - DateTimeUtils.resetCalendar(); - while (true) { - Value v = d.readValue(); - if (v == ValueNull.INSTANCE) { - break; - } - String a = v.getString(); - String b = d.readValue().getString(); - if (!a.equals(b)) { - assertEquals("source: " + tz.getID() + " target: " + - target.getID(), a, b); - } - } - } - } - } finally { - TimeZone.setDefault(defaultTimeZone); - DateTimeUtils.resetCalendar(); - } - } - /** * Get the list of timezones with distinct rules. * * @return the list */ public static ArrayList getDistinctTimeZones() { - ArrayList distinct = New.arrayList(); + ArrayList distinct = new ArrayList<>(); for (String id : TimeZone.getAvailableIDs()) { TimeZone t = TimeZone.getTimeZone(id); for (TimeZone d : distinct) { @@ -516,24 +424,37 @@ public static ArrayList getDistinctTimeZones() { } private void testDateTimeUtils() { - ValueTimestamp ts1 = ValueTimestamp.parse("-999-08-07 13:14:15.16"); - ValueTimestamp ts2 = ValueTimestamp.parse("19999-08-07 13:14:15.16"); - ValueTime t1 = (ValueTime) ts1.convertTo(Value.TIME); - ValueTime t2 = (ValueTime) ts2.convertTo(Value.TIME); - ValueDate d1 = (ValueDate) ts1.convertTo(Value.DATE); - ValueDate d2 = (ValueDate) ts2.convertTo(Value.DATE); - assertEquals("-999-08-07 13:14:15.16", ts1.getString()); - assertEquals("-999-08-07", d1.getString()); - assertEquals("13:14:15.16", t1.getString()); - assertEquals("19999-08-07 13:14:15.16", ts2.getString()); - assertEquals("19999-08-07", d2.getString()); - assertEquals("13:14:15.16", t2.getString()); - ValueTimestamp ts1a = DateTimeUtils.convertTimestamp( - ts1.getTimestamp(), Calendar.getInstance()); - ValueTimestamp ts2a = DateTimeUtils.convertTimestamp( - ts2.getTimestamp(), Calendar.getInstance()); - assertEquals("-999-08-07 13:14:15.16", ts1a.getString()); - assertEquals("19999-08-07 13:14:15.16", ts2a.getString()); + TimeZone old = TimeZone.getDefault(); + /* + * java.util.TimeZone doesn't support LMT, so perform this test with + * fixed time zone offset + */ + TimeZone.setDefault(TimeZone.getTimeZone("GMT+01")); + DateTimeUtils.resetCalendar(); + try { + ValueTimestamp ts1 = ValueTimestamp.parse("-999-08-07 13:14:15.16", null); + ValueTimestamp ts2 = ValueTimestamp.parse("19999-08-07 13:14:15.16", null); + ValueTime t1 = (ValueTime) ts1.convertTo(TypeInfo.TYPE_TIME); + ValueTime t2 = (ValueTime) ts2.convertTo(TypeInfo.TYPE_TIME); + ValueDate d1 = ts1.convertToDate(null); + ValueDate d2 = ts2.convertToDate(null); + assertEquals("-0999-08-07 13:14:15.16", ts1.getString()); + assertEquals("-0999-08-07", d1.getString()); + assertEquals("13:14:15.16", t1.getString()); + assertEquals("19999-08-07 13:14:15.16", ts2.getString()); + assertEquals("19999-08-07", d2.getString()); + assertEquals("13:14:15.16", t2.getString()); + TimeZone timeZone = TimeZone.getDefault(); + ValueTimestamp ts1a = LegacyDateTimeUtils.fromTimestamp(null, timeZone, + LegacyDateTimeUtils.toTimestamp(null, null, ts1)); + ValueTimestamp ts2a = LegacyDateTimeUtils.fromTimestamp(null, timeZone, + LegacyDateTimeUtils.toTimestamp(null, null, ts2)); + assertEquals("-0999-08-07 13:14:15.16", ts1a.getString()); + assertEquals("19999-08-07 13:14:15.16", ts2a.getString()); + } finally { + TimeZone.setDefault(old); + DateTimeUtils.resetCalendar(); + } } } diff --git a/h2/src/test/org/h2/test/unit/TestDateIso8601.java b/h2/src/test/org/h2/test/unit/TestDateIso8601.java index 09546a7288..b3cfe3fe25 100644 --- a/h2/src/test/org/h2/test/unit/TestDateIso8601.java +++ b/h2/src/test/org/h2/test/unit/TestDateIso8601.java @@ -1,25 +1,29 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Robert Rathsack (firstName dot lastName at gmx dot de) */ package org.h2.test.unit; -import java.sql.Timestamp; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Date; +import static org.h2.util.DateTimeUtils.getIsoDayOfWeek; +import static org.h2.util.DateTimeUtils.getIsoWeekOfYear; +import static org.h2.util.DateTimeUtils.getIsoWeekYear; import org.h2.test.TestBase; -import org.h2.util.DateTimeUtils; +import org.h2.value.ValueDate; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; /** * Test cases for DateTimeIso8601Utils. */ public class TestDateIso8601 extends TestBase { - private final SimpleDateFormat dateFormatter = - new SimpleDateFormat("yyyy-MM-dd"); + private enum Type { + DATE, TIMESTAMP, TIMESTAMP_TIMEZONE_0, TIMESTAMP_TIMEZONE_PLUS_18, TIMESTAMP_TIMEZONE_MINUS_18; + } + + private static Type type; /** * Run just this test. @@ -27,11 +31,44 @@ public class TestDateIso8601 extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); + } + + private static long parse(String s) { + if (type == null) { + throw new IllegalStateException(); + } + switch (type) { + case DATE: + return ValueDate.parse(s).getDateValue(); + case TIMESTAMP: + return ValueTimestamp.parse(s, null).getDateValue(); + case TIMESTAMP_TIMEZONE_0: + return ValueTimestampTimeZone.parse(s + " 00:00:00.0Z", null).getDateValue(); + case TIMESTAMP_TIMEZONE_PLUS_18: + return ValueTimestampTimeZone.parse(s + " 00:00:00+18:00", null).getDateValue(); + case TIMESTAMP_TIMEZONE_MINUS_18: + return ValueTimestampTimeZone.parse(s + " 00:00:00-18:00", null).getDateValue(); + default: + throw new IllegalStateException(); + } } @Override public void test() throws Exception { + type = Type.DATE; + doTest(); + type = Type.TIMESTAMP; + doTest(); + type = Type.TIMESTAMP_TIMEZONE_0; + doTest(); + type = Type.TIMESTAMP_TIMEZONE_PLUS_18; + doTest(); + type = Type.TIMESTAMP_TIMEZONE_MINUS_18; + doTest(); + } + + private void doTest() throws Exception { testIsoDayOfWeek(); testIsoWeekJanuary1thMonday(); testIsoWeekJanuary1thTuesday(); @@ -49,14 +86,6 @@ public void test() throws Exception { testIsoYearJanuary1thSunday(); } - private Date parse(String s) throws ParseException { - return dateFormatter.parse(s); - } - - private static int getIsoDayOfWeek(Date date) { - return DateTimeUtils.getIsoDayOfWeek(date); - } - /** * Test if day of week is returned as Monday = 1 to Sunday = 7. */ @@ -70,66 +99,61 @@ private void testIsoDayOfWeek() throws Exception { assertEquals(7, getIsoDayOfWeek(parse("2008-10-05"))); } - private static int getIsoWeek(Date date) { - Timestamp ts = new Timestamp(date.getTime()); - return DateTimeUtils.getIsoWeek(ts); - } - /** * January 1st is a Monday therefore the week belongs to the next year. */ private void testIsoWeekJanuary1thMonday() throws Exception { - assertEquals(52, getIsoWeek(parse("2006-12-31"))); - assertEquals(1, getIsoWeek(parse("2007-01-01"))); - assertEquals(1, getIsoWeek(parse("2007-01-07"))); - assertEquals(2, getIsoWeek(parse("2007-01-08"))); + assertEquals(52, getIsoWeekOfYear(parse("2006-12-31"))); + assertEquals(1, getIsoWeekOfYear(parse("2007-01-01"))); + assertEquals(1, getIsoWeekOfYear(parse("2007-01-07"))); + assertEquals(2, getIsoWeekOfYear(parse("2007-01-08"))); } /** * January 1st is a Tuesday therefore the week belongs to the next year. */ private void testIsoWeekJanuary1thTuesday() throws Exception { - assertEquals(52, getIsoWeek(parse("2007-12-30"))); - assertEquals(1, getIsoWeek(parse("2007-12-31"))); - assertEquals(1, getIsoWeek(parse("2008-01-01"))); - assertEquals(1, getIsoWeek(parse("2008-01-06"))); - assertEquals(2, getIsoWeek(parse("2008-01-07"))); + assertEquals(52, getIsoWeekOfYear(parse("2007-12-30"))); + assertEquals(1, getIsoWeekOfYear(parse("2007-12-31"))); + assertEquals(1, getIsoWeekOfYear(parse("2008-01-01"))); + assertEquals(1, getIsoWeekOfYear(parse("2008-01-06"))); + assertEquals(2, getIsoWeekOfYear(parse("2008-01-07"))); } /** * January1th is a Wednesday therefore the week belongs to the next year. */ private void testIsoWeekJanuary1thWednesday() throws Exception { - assertEquals(52, getIsoWeek(parse("2002-12-28"))); - assertEquals(52, getIsoWeek(parse("2002-12-29"))); - assertEquals(1, getIsoWeek(parse("2002-12-30"))); - assertEquals(1, getIsoWeek(parse("2002-12-31"))); - assertEquals(1, getIsoWeek(parse("2003-01-01"))); - assertEquals(1, getIsoWeek(parse("2003-01-05"))); - assertEquals(2, getIsoWeek(parse("2003-01-06"))); + assertEquals(52, getIsoWeekOfYear(parse("2002-12-28"))); + assertEquals(52, getIsoWeekOfYear(parse("2002-12-29"))); + assertEquals(1, getIsoWeekOfYear(parse("2002-12-30"))); + assertEquals(1, getIsoWeekOfYear(parse("2002-12-31"))); + assertEquals(1, getIsoWeekOfYear(parse("2003-01-01"))); + assertEquals(1, getIsoWeekOfYear(parse("2003-01-05"))); + assertEquals(2, getIsoWeekOfYear(parse("2003-01-06"))); } /** * January 1st is a Thursday therefore the week belongs to the next year. */ private void testIsoWeekJanuary1thThursday() throws Exception { - assertEquals(52, getIsoWeek(parse("2008-12-28"))); - assertEquals(1, getIsoWeek(parse("2008-12-29"))); - assertEquals(1, getIsoWeek(parse("2008-12-30"))); - assertEquals(1, getIsoWeek(parse("2008-12-31"))); - assertEquals(1, getIsoWeek(parse("2009-01-01"))); - assertEquals(1, getIsoWeek(parse("2009-01-04"))); - assertEquals(2, getIsoWeek(parse("2009-01-09"))); + assertEquals(52, getIsoWeekOfYear(parse("2008-12-28"))); + assertEquals(1, getIsoWeekOfYear(parse("2008-12-29"))); + assertEquals(1, getIsoWeekOfYear(parse("2008-12-30"))); + assertEquals(1, getIsoWeekOfYear(parse("2008-12-31"))); + assertEquals(1, getIsoWeekOfYear(parse("2009-01-01"))); + assertEquals(1, getIsoWeekOfYear(parse("2009-01-04"))); + assertEquals(2, getIsoWeekOfYear(parse("2009-01-09"))); } /** * January 1st is a Friday therefore the week belongs to the previous year. */ private void testIsoWeekJanuary1thFriday() throws Exception { - assertEquals(53, getIsoWeek(parse("2009-12-31"))); - assertEquals(53, getIsoWeek(parse("2010-01-01"))); - assertEquals(53, getIsoWeek(parse("2010-01-03"))); - assertEquals(1, getIsoWeek(parse("2010-01-04"))); + assertEquals(53, getIsoWeekOfYear(parse("2009-12-31"))); + assertEquals(53, getIsoWeekOfYear(parse("2010-01-01"))); + assertEquals(53, getIsoWeekOfYear(parse("2010-01-03"))); + assertEquals(1, getIsoWeekOfYear(parse("2010-01-04"))); } /** @@ -137,39 +161,34 @@ private void testIsoWeekJanuary1thFriday() throws Exception { * year. */ private void testIsoWeekJanuary1thSaturday() throws Exception { - assertEquals(52, getIsoWeek(parse("2010-12-31"))); - assertEquals(52, getIsoWeek(parse("2011-01-01"))); - assertEquals(52, getIsoWeek(parse("2011-01-02"))); - assertEquals(1, getIsoWeek(parse("2011-01-03"))); + assertEquals(52, getIsoWeekOfYear(parse("2010-12-31"))); + assertEquals(52, getIsoWeekOfYear(parse("2011-01-01"))); + assertEquals(52, getIsoWeekOfYear(parse("2011-01-02"))); + assertEquals(1, getIsoWeekOfYear(parse("2011-01-03"))); } /** * January 1st is a Sunday therefore the week belongs to the previous year. */ private void testIsoWeekJanuary1thSunday() throws Exception { - assertEquals(52, getIsoWeek(parse("2011-12-31"))); - assertEquals(52, getIsoWeek(parse("2012-01-01"))); - assertEquals(1, getIsoWeek(parse("2012-01-02"))); - assertEquals(1, getIsoWeek(parse("2012-01-08"))); - assertEquals(2, getIsoWeek(parse("2012-01-09"))); - } - - private static int getIsoYear(Date date) { - Timestamp ts = new Timestamp(date.getTime()); - return DateTimeUtils.getIsoYear(ts); + assertEquals(52, getIsoWeekOfYear(parse("2011-12-31"))); + assertEquals(52, getIsoWeekOfYear(parse("2012-01-01"))); + assertEquals(1, getIsoWeekOfYear(parse("2012-01-02"))); + assertEquals(1, getIsoWeekOfYear(parse("2012-01-08"))); + assertEquals(2, getIsoWeekOfYear(parse("2012-01-09"))); } /** * January 1st is a Monday therefore year is equal to isoYear. */ private void testIsoYearJanuary1thMonday() throws Exception { - assertEquals(2006, getIsoYear(parse("2006-12-28"))); - assertEquals(2006, getIsoYear(parse("2006-12-29"))); - assertEquals(2006, getIsoYear(parse("2006-12-30"))); - assertEquals(2006, getIsoYear(parse("2006-12-31"))); - assertEquals(2007, getIsoYear(parse("2007-01-01"))); - assertEquals(2007, getIsoYear(parse("2007-01-02"))); - assertEquals(2007, getIsoYear(parse("2007-01-03"))); + assertEquals(2006, getIsoWeekYear(parse("2006-12-28"))); + assertEquals(2006, getIsoWeekYear(parse("2006-12-29"))); + assertEquals(2006, getIsoWeekYear(parse("2006-12-30"))); + assertEquals(2006, getIsoWeekYear(parse("2006-12-31"))); + assertEquals(2007, getIsoWeekYear(parse("2007-01-01"))); + assertEquals(2007, getIsoWeekYear(parse("2007-01-02"))); + assertEquals(2007, getIsoWeekYear(parse("2007-01-03"))); } /** @@ -177,14 +196,14 @@ private void testIsoYearJanuary1thMonday() throws Exception { * year. */ private void testIsoYearJanuary1thTuesday() throws Exception { - assertEquals(2007, getIsoYear(parse("2007-12-28"))); - assertEquals(2007, getIsoYear(parse("2007-12-29"))); - assertEquals(2007, getIsoYear(parse("2007-12-30"))); - assertEquals(2008, getIsoYear(parse("2007-12-31"))); - assertEquals(2008, getIsoYear(parse("2008-01-01"))); - assertEquals(2008, getIsoYear(parse("2008-01-02"))); - assertEquals(2008, getIsoYear(parse("2008-01-03"))); - assertEquals(2008, getIsoYear(parse("2008-01-04"))); + assertEquals(2007, getIsoWeekYear(parse("2007-12-28"))); + assertEquals(2007, getIsoWeekYear(parse("2007-12-29"))); + assertEquals(2007, getIsoWeekYear(parse("2007-12-30"))); + assertEquals(2008, getIsoWeekYear(parse("2007-12-31"))); + assertEquals(2008, getIsoWeekYear(parse("2008-01-01"))); + assertEquals(2008, getIsoWeekYear(parse("2008-01-02"))); + assertEquals(2008, getIsoWeekYear(parse("2008-01-03"))); + assertEquals(2008, getIsoWeekYear(parse("2008-01-04"))); } /** @@ -192,13 +211,13 @@ private void testIsoYearJanuary1thTuesday() throws Exception { * the next year. */ private void testIsoYearJanuary1thWednesday() throws Exception { - assertEquals(2002, getIsoYear(parse("2002-12-28"))); - assertEquals(2002, getIsoYear(parse("2002-12-29"))); - assertEquals(2003, getIsoYear(parse("2002-12-30"))); - assertEquals(2003, getIsoYear(parse("2002-12-31"))); - assertEquals(2003, getIsoYear(parse("2003-01-01"))); - assertEquals(2003, getIsoYear(parse("2003-01-02"))); - assertEquals(2003, getIsoYear(parse("2003-12-02"))); + assertEquals(2002, getIsoWeekYear(parse("2002-12-28"))); + assertEquals(2002, getIsoWeekYear(parse("2002-12-29"))); + assertEquals(2003, getIsoWeekYear(parse("2002-12-30"))); + assertEquals(2003, getIsoWeekYear(parse("2002-12-31"))); + assertEquals(2003, getIsoWeekYear(parse("2003-01-01"))); + assertEquals(2003, getIsoWeekYear(parse("2003-01-02"))); + assertEquals(2003, getIsoWeekYear(parse("2003-12-02"))); } /** @@ -206,14 +225,14 @@ private void testIsoYearJanuary1thWednesday() throws Exception { * next year. */ private void testIsoYearJanuary1thThursday() throws Exception { - assertEquals(2008, getIsoYear(parse("2008-12-28"))); - assertEquals(2009, getIsoYear(parse("2008-12-29"))); - assertEquals(2009, getIsoYear(parse("2008-12-30"))); - assertEquals(2009, getIsoYear(parse("2008-12-31"))); - assertEquals(2009, getIsoYear(parse("2009-01-01"))); - assertEquals(2009, getIsoYear(parse("2009-01-02"))); - assertEquals(2009, getIsoYear(parse("2009-01-03"))); - assertEquals(2009, getIsoYear(parse("2009-01-04"))); + assertEquals(2008, getIsoWeekYear(parse("2008-12-28"))); + assertEquals(2009, getIsoWeekYear(parse("2008-12-29"))); + assertEquals(2009, getIsoWeekYear(parse("2008-12-30"))); + assertEquals(2009, getIsoWeekYear(parse("2008-12-31"))); + assertEquals(2009, getIsoWeekYear(parse("2009-01-01"))); + assertEquals(2009, getIsoWeekYear(parse("2009-01-02"))); + assertEquals(2009, getIsoWeekYear(parse("2009-01-03"))); + assertEquals(2009, getIsoWeekYear(parse("2009-01-04"))); } /** @@ -221,14 +240,14 @@ private void testIsoYearJanuary1thThursday() throws Exception { * previous year. */ private void testIsoYearJanuary1thFriday() throws Exception { - assertEquals(2009, getIsoYear(parse("2009-12-28"))); - assertEquals(2009, getIsoYear(parse("2009-12-29"))); - assertEquals(2009, getIsoYear(parse("2009-12-30"))); - assertEquals(2009, getIsoYear(parse("2009-12-31"))); - assertEquals(2009, getIsoYear(parse("2010-01-01"))); - assertEquals(2009, getIsoYear(parse("2010-01-02"))); - assertEquals(2009, getIsoYear(parse("2010-01-03"))); - assertEquals(2010, getIsoYear(parse("2010-01-04"))); + assertEquals(2009, getIsoWeekYear(parse("2009-12-28"))); + assertEquals(2009, getIsoWeekYear(parse("2009-12-29"))); + assertEquals(2009, getIsoWeekYear(parse("2009-12-30"))); + assertEquals(2009, getIsoWeekYear(parse("2009-12-31"))); + assertEquals(2009, getIsoWeekYear(parse("2010-01-01"))); + assertEquals(2009, getIsoWeekYear(parse("2010-01-02"))); + assertEquals(2009, getIsoWeekYear(parse("2010-01-03"))); + assertEquals(2010, getIsoWeekYear(parse("2010-01-04"))); } /** @@ -236,28 +255,28 @@ private void testIsoYearJanuary1thFriday() throws Exception { * previous year. */ private void testIsoYearJanuary1thSaturday() throws Exception { - assertEquals(2010, getIsoYear(parse("2010-12-28"))); - assertEquals(2010, getIsoYear(parse("2010-12-29"))); - assertEquals(2010, getIsoYear(parse("2010-12-30"))); - assertEquals(2010, getIsoYear(parse("2010-12-31"))); - assertEquals(2010, getIsoYear(parse("2011-01-01"))); - assertEquals(2010, getIsoYear(parse("2011-01-02"))); - assertEquals(2011, getIsoYear(parse("2011-01-03"))); - assertEquals(2011, getIsoYear(parse("2011-01-04"))); + assertEquals(2010, getIsoWeekYear(parse("2010-12-28"))); + assertEquals(2010, getIsoWeekYear(parse("2010-12-29"))); + assertEquals(2010, getIsoWeekYear(parse("2010-12-30"))); + assertEquals(2010, getIsoWeekYear(parse("2010-12-31"))); + assertEquals(2010, getIsoWeekYear(parse("2011-01-01"))); + assertEquals(2010, getIsoWeekYear(parse("2011-01-02"))); + assertEquals(2011, getIsoWeekYear(parse("2011-01-03"))); + assertEquals(2011, getIsoWeekYear(parse("2011-01-04"))); } /** * January 1st is a Sunday therefore this day belong to the previous year. */ private void testIsoYearJanuary1thSunday() throws Exception { - assertEquals(2011, getIsoYear(parse("2011-12-28"))); - assertEquals(2011, getIsoYear(parse("2011-12-29"))); - assertEquals(2011, getIsoYear(parse("2011-12-30"))); - assertEquals(2011, getIsoYear(parse("2011-12-31"))); - assertEquals(2011, getIsoYear(parse("2012-01-01"))); - assertEquals(2012, getIsoYear(parse("2012-01-02"))); - assertEquals(2012, getIsoYear(parse("2012-01-03"))); - assertEquals(2012, getIsoYear(parse("2012-01-04"))); + assertEquals(2011, getIsoWeekYear(parse("2011-12-28"))); + assertEquals(2011, getIsoWeekYear(parse("2011-12-29"))); + assertEquals(2011, getIsoWeekYear(parse("2011-12-30"))); + assertEquals(2011, getIsoWeekYear(parse("2011-12-31"))); + assertEquals(2011, getIsoWeekYear(parse("2012-01-01"))); + assertEquals(2012, getIsoWeekYear(parse("2012-01-02"))); + assertEquals(2012, getIsoWeekYear(parse("2012-01-03"))); + assertEquals(2012, getIsoWeekYear(parse("2012-01-04"))); } } diff --git a/h2/src/test/org/h2/test/unit/TestDateTimeUtils.java b/h2/src/test/org/h2/test/unit/TestDateTimeUtils.java new file mode 100644 index 0000000000..e3aa5bf848 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestDateTimeUtils.java @@ -0,0 +1,327 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import static org.h2.util.DateTimeUtils.dateValue; + +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.GregorianCalendar; +import java.util.TimeZone; + +import org.h2.api.IntervalQualifier; +import org.h2.test.TestBase; +import org.h2.util.DateTimeUtils; +import org.h2.util.IntervalUtils; +import org.h2.util.LegacyDateTimeUtils; +import org.h2.value.ValueInterval; +import org.h2.value.ValueTimestamp; + +/** + * Unit tests for the DateTimeUtils and IntervalUtils classes. + */ +public class TestDateTimeUtils extends TestBase { + + /** + * Creates a proleptic Gregorian calendar for the given timezone using the + * default locale. + * + * @param tz timezone for the calendar, is never null + * @return a new calendar instance. + */ + public static GregorianCalendar createGregorianCalendar(TimeZone tz) { + GregorianCalendar c = new GregorianCalendar(tz); + c.setGregorianChange(LegacyDateTimeUtils.PROLEPTIC_GREGORIAN_CHANGE); + return c; + } + + /** + * Run just this test. + * + * @param a + * if {@code "testUtc2Value"} only {@link #testUTC2Value(boolean)} + * will be executed with all time zones (slow). Otherwise all tests + * in this test unit will be executed with local time zone. + */ + public static void main(String... a) throws Exception { + if (a.length == 1) { + if ("testUtc2Value".equals(a[0])) { + new TestDateTimeUtils().testUTC2Value(true); + return; + } + } + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testParseTimeNanosDB2Format(); + testDayOfWeek(); + testWeekOfYear(); + testDateValueFromDenormalizedDate(); + testUTC2Value(false); + testConvertScale(); + testParseInterval(); + testGetTimeZoneOffset(); + } + + private void testParseTimeNanosDB2Format() { + assertEquals(3723004000000L, DateTimeUtils.parseTimeNanos("01:02:03.004", 0, 12)); + assertEquals(3723004000000L, DateTimeUtils.parseTimeNanos("01.02.03.004", 0, 12)); + + assertEquals(3723000000000L, DateTimeUtils.parseTimeNanos("01:02:03", 0, 8)); + assertEquals(3723000000000L, DateTimeUtils.parseTimeNanos("01.02.03", 0, 8)); + } + + /** + * Test for {@link DateTimeUtils#getSundayDayOfWeek(long)} and + * {@link DateTimeUtils#getIsoDayOfWeek(long)}. + */ + private void testDayOfWeek() { + GregorianCalendar gc = createGregorianCalendar(LegacyDateTimeUtils.UTC); + for (int i = -1_000_000; i <= 1_000_000; i++) { + gc.clear(); + gc.setTimeInMillis(i * 86400000L); + int year = gc.get(Calendar.YEAR); + if (gc.get(Calendar.ERA) == GregorianCalendar.BC) { + year = 1 - year; + } + long expectedDateValue = dateValue(year, gc.get(Calendar.MONTH) + 1, + gc.get(Calendar.DAY_OF_MONTH)); + long dateValue = DateTimeUtils.dateValueFromAbsoluteDay(i); + assertEquals(expectedDateValue, dateValue); + assertEquals(i, DateTimeUtils.absoluteDayFromDateValue(dateValue)); + int dow = gc.get(Calendar.DAY_OF_WEEK); + assertEquals(dow, DateTimeUtils.getSundayDayOfWeek(dateValue)); + int isoDow = (dow + 5) % 7 + 1; + assertEquals(isoDow, DateTimeUtils.getIsoDayOfWeek(dateValue)); + assertEquals(gc.get(Calendar.WEEK_OF_YEAR), + DateTimeUtils.getWeekOfYear(dateValue, gc.getFirstDayOfWeek() - 1, + gc.getMinimalDaysInFirstWeek())); + } + } + + /** + * Test for {@link DateTimeUtils#getDayOfYear(long)}, + * {@link DateTimeUtils#getWeekOfYear(long, int, int)} and + * {@link DateTimeUtils#getWeekYear(long, int, int)}. + */ + private void testWeekOfYear() { + GregorianCalendar gc = new GregorianCalendar(LegacyDateTimeUtils.UTC); + for (int firstDay = 1; firstDay <= 7; firstDay++) { + gc.setFirstDayOfWeek(firstDay); + for (int minimalDays = 1; minimalDays <= 7; minimalDays++) { + gc.setMinimalDaysInFirstWeek(minimalDays); + for (int i = 0; i < 150_000; i++) { + long dateValue = DateTimeUtils.dateValueFromAbsoluteDay(i); + gc.clear(); + gc.setTimeInMillis(i * 86400000L); + assertEquals(gc.get(Calendar.DAY_OF_YEAR), DateTimeUtils.getDayOfYear(dateValue)); + assertEquals(gc.get(Calendar.WEEK_OF_YEAR), + DateTimeUtils.getWeekOfYear(dateValue, firstDay - 1, minimalDays)); + assertEquals(gc.getWeekYear(), DateTimeUtils.getWeekYear(dateValue, firstDay - 1, minimalDays)); + } + } + } + } + + /** + * Test for {@link DateTimeUtils#dateValueFromDenormalizedDate(long, long, int)}. + */ + private void testDateValueFromDenormalizedDate() { + assertEquals(dateValue(2017, 1, 1), DateTimeUtils.dateValueFromDenormalizedDate(2018, -11, 0)); + assertEquals(dateValue(2001, 2, 28), DateTimeUtils.dateValueFromDenormalizedDate(2000, 14, 29)); + assertEquals(dateValue(1999, 8, 1), DateTimeUtils.dateValueFromDenormalizedDate(2000, -4, -100)); + assertEquals(dateValue(2100, 12, 31), DateTimeUtils.dateValueFromDenormalizedDate(2100, 12, 2000)); + assertEquals(dateValue(-100, 2, 28), DateTimeUtils.dateValueFromDenormalizedDate(-100, 2, 30)); + } + + private void testUTC2Value(boolean allTimeZones) { + TimeZone def = TimeZone.getDefault(); + GregorianCalendar gc = new GregorianCalendar(); + String[] ids = allTimeZones ? TimeZone.getAvailableIDs() + : new String[] { def.getID(), "+10", + // Any time zone with DST in the future (JDK-8073446) + "America/New_York" }; + try { + for (String id : ids) { + if (allTimeZones) { + System.out.println(id); + } + TimeZone tz = TimeZone.getTimeZone(id); + TimeZone.setDefault(tz); + DateTimeUtils.resetCalendar(); + testUTC2ValueImpl(tz, gc); + } + } finally { + TimeZone.setDefault(def); + DateTimeUtils.resetCalendar(); + } + } + + private void testUTC2ValueImpl(TimeZone tz, GregorianCalendar gc) { + gc.setTimeZone(tz); + gc.set(Calendar.MILLISECOND, 0); + long absoluteStart = DateTimeUtils.absoluteDayFromDateValue(DateTimeUtils.dateValue(1950, 01, 01)); + long absoluteEnd = DateTimeUtils.absoluteDayFromDateValue(DateTimeUtils.dateValue(2050, 01, 01)); + for (long i = absoluteStart; i < absoluteEnd; i++) { + long dateValue = DateTimeUtils.dateValueFromAbsoluteDay(i); + int year = DateTimeUtils.yearFromDateValue(dateValue); + int month = DateTimeUtils.monthFromDateValue(dateValue); + int day = DateTimeUtils.dayFromDateValue(dateValue); + for (int j = 0; j < 48; j++) { + gc.set(year, month - 1, day, j / 2, (j & 1) * 30, 0); + long timeMillis = gc.getTimeInMillis(); + ValueTimestamp ts = LegacyDateTimeUtils.fromTimestamp(null, null, new Timestamp(timeMillis)); + timeMillis += LegacyDateTimeUtils.getTimeZoneOffsetMillis(null, timeMillis); + assertEquals(ts.getDateValue(), LegacyDateTimeUtils.dateValueFromLocalMillis(timeMillis)); + assertEquals(ts.getTimeNanos(), LegacyDateTimeUtils.nanosFromLocalMillis(timeMillis)); + } + } + } + + private void testConvertScale() { + assertEquals(555_555_555_555L, DateTimeUtils.convertScale(555_555_555_555L, 9, Long.MAX_VALUE)); + assertEquals(555_555_555_550L, DateTimeUtils.convertScale(555_555_555_554L, 8, Long.MAX_VALUE)); + assertEquals(555_555_555_500L, DateTimeUtils.convertScale(555_555_555_549L, 7, Long.MAX_VALUE)); + assertEquals(555_555_555_000L, DateTimeUtils.convertScale(555_555_555_499L, 6, Long.MAX_VALUE)); + assertEquals(555_555_550_000L, DateTimeUtils.convertScale(555_555_554_999L, 5, Long.MAX_VALUE)); + assertEquals(555_555_500_000L, DateTimeUtils.convertScale(555_555_549_999L, 4, Long.MAX_VALUE)); + assertEquals(555_555_000_000L, DateTimeUtils.convertScale(555_555_499_999L, 3, Long.MAX_VALUE)); + assertEquals(555_550_000_000L, DateTimeUtils.convertScale(555_554_999_999L, 2, Long.MAX_VALUE)); + assertEquals(555_500_000_000L, DateTimeUtils.convertScale(555_549_999_999L, 1, Long.MAX_VALUE)); + assertEquals(555_000_000_000L, DateTimeUtils.convertScale(555_499_999_999L, 0, Long.MAX_VALUE)); + assertEquals(555_555_555_555L, DateTimeUtils.convertScale(555_555_555_555L, 9, Long.MAX_VALUE)); + assertEquals(555_555_555_560L, DateTimeUtils.convertScale(555_555_555_555L, 8, Long.MAX_VALUE)); + assertEquals(555_555_555_600L, DateTimeUtils.convertScale(555_555_555_550L, 7, Long.MAX_VALUE)); + assertEquals(555_555_556_000L, DateTimeUtils.convertScale(555_555_555_500L, 6, Long.MAX_VALUE)); + assertEquals(555_555_560_000L, DateTimeUtils.convertScale(555_555_555_000L, 5, Long.MAX_VALUE)); + assertEquals(555_555_600_000L, DateTimeUtils.convertScale(555_555_550_000L, 4, Long.MAX_VALUE)); + assertEquals(555_556_000_000L, DateTimeUtils.convertScale(555_555_500_000L, 3, Long.MAX_VALUE)); + assertEquals(555_560_000_000L, DateTimeUtils.convertScale(555_555_000_000L, 2, Long.MAX_VALUE)); + assertEquals(555_600_000_000L, DateTimeUtils.convertScale(555_550_000_000L, 1, Long.MAX_VALUE)); + assertEquals(556_000_000_000L, DateTimeUtils.convertScale(555_500_000_000L, 0, Long.MAX_VALUE)); + assertEquals(100_999_999_999L, DateTimeUtils.convertScale(100_999_999_999L, 9, Long.MAX_VALUE)); + assertEquals(100_999_999_999L, DateTimeUtils.convertScale(100_999_999_999L, 9, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_999_999L, DateTimeUtils.convertScale(86_399_999_999_999L, 9, Long.MAX_VALUE)); + for (int i = 8; i >= 0; i--) { + assertEquals(101_000_000_000L, DateTimeUtils.convertScale(100_999_999_999L, i, Long.MAX_VALUE)); + assertEquals(101_000_000_000L, + DateTimeUtils.convertScale(100_999_999_999L, i, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_400_000_000_000L, DateTimeUtils.convertScale(86_399_999_999_999L, i, Long.MAX_VALUE)); + } + assertEquals(86_399_999_999_999L, + DateTimeUtils.convertScale(86_399_999_999_999L, 9, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_999_990L, + DateTimeUtils.convertScale(86_399_999_999_999L, 8, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_999_900L, + DateTimeUtils.convertScale(86_399_999_999_999L, 7, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_999_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 6, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_990_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 5, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_900_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 4, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_999_000_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 3, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_990_000_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 2, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_900_000_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 1, DateTimeUtils.NANOS_PER_DAY)); + assertEquals(86_399_000_000_000L, + DateTimeUtils.convertScale(86_399_999_999_999L, 0, DateTimeUtils.NANOS_PER_DAY)); + } + + private void testParseInterval() { + testParseIntervalSimple(IntervalQualifier.YEAR); + testParseIntervalSimple(IntervalQualifier.MONTH); + testParseIntervalSimple(IntervalQualifier.DAY); + testParseIntervalSimple(IntervalQualifier.HOUR); + testParseIntervalSimple(IntervalQualifier.MINUTE); + testParseIntervalSimple(IntervalQualifier.SECOND); + + testParseInterval(IntervalQualifier.YEAR_TO_MONTH, 10, 0, "10", "10-0"); + testParseInterval(IntervalQualifier.YEAR_TO_MONTH, 10, 11, "10-11"); + + testParseInterval(IntervalQualifier.DAY_TO_HOUR, 10, 0, "10", "10 00"); + testParseInterval(IntervalQualifier.DAY_TO_HOUR, 10, 11, "10 11"); + + testParseInterval(IntervalQualifier.DAY_TO_MINUTE, 10, 0, "10", "10 00:00"); + testParseInterval(IntervalQualifier.DAY_TO_MINUTE, 10, 11 * 60, "10 11", "10 11:00"); + testParseInterval(IntervalQualifier.DAY_TO_MINUTE, 10, 11 * 60 + 12, "10 11:12"); + + testParseInterval(IntervalQualifier.DAY_TO_SECOND, 10, 0, "10 00:00:00"); + testParseInterval(IntervalQualifier.DAY_TO_SECOND, 10, 11 * 3_600_000_000_000L, "10 11", "10 11:00:00"); + testParseInterval(IntervalQualifier.DAY_TO_SECOND, 10, 11 * 3_600_000_000_000L + 12 * 60_000_000_000L, + "10 11:12", "10 11:12:00"); + testParseInterval(IntervalQualifier.DAY_TO_SECOND, + 10, 11 * 3_600_000_000_000L + 12 * 60_000_000_000L + 13_000_000_000L, + "10 11:12:13"); + testParseInterval(IntervalQualifier.DAY_TO_SECOND, + 10, 11 * 3_600_000_000_000L + 12 * 60_000_000_000L + 13_123_456_789L, + "10 11:12:13.123456789"); + + testParseInterval(IntervalQualifier.HOUR_TO_MINUTE, 10, 0, "10", "10:00"); + testParseInterval(IntervalQualifier.HOUR_TO_MINUTE, 10, 11, "10:11"); + + testParseInterval(IntervalQualifier.HOUR_TO_SECOND, 10, 0, "10", "10:00:00"); + testParseInterval(IntervalQualifier.HOUR_TO_SECOND, 10, 11 * 60_000_000_000L, "10:11", "10:11:00"); + testParseInterval(IntervalQualifier.HOUR_TO_SECOND, 10, 11 * 60_000_000_000L + 12_000_000_000L, + "10:11:12"); + testParseInterval(IntervalQualifier.HOUR_TO_SECOND, 10, 11 * 60_000_000_000L + 12_123_456_789L, + "10:11:12.123456789"); + + testParseInterval(IntervalQualifier.MINUTE_TO_SECOND, 10, 0, "10", "10:00"); + testParseInterval(IntervalQualifier.MINUTE_TO_SECOND, 10, 11_000_000_000L, "10:11", "10:11"); + testParseInterval(IntervalQualifier.MINUTE_TO_SECOND, 10, 11_123_456_789L, "10:11.123456789"); + } + + private void testParseIntervalSimple(IntervalQualifier qualifier) { + testParseInterval(qualifier, 10, 0, "10"); + } + + private void testParseInterval(IntervalQualifier qualifier, long leading, long remaining, String s) { + testParseInterval(qualifier, leading, remaining, s, s); + } + + private void testParseInterval(IntervalQualifier qualifier, long leading, long remaining, String s, String full) { + testParseIntervalImpl(qualifier, false, leading, remaining, s, full); + testParseIntervalImpl(qualifier, true, leading, remaining, s, full); + } + + private void testParseIntervalImpl(IntervalQualifier qualifier, boolean negative, long leading, long remaining, + String s, String full) { + ValueInterval expected = ValueInterval.from(qualifier, negative, leading, remaining); + assertEquals(expected, IntervalUtils.parseInterval(qualifier, negative, s)); + StringBuilder b = new StringBuilder(); + b.append("INTERVAL ").append('\''); + if (negative) { + b.append('-'); + } + b.append(full).append("' ").append(qualifier); + assertEquals(b.toString(), expected.getString()); + } + + private void testGetTimeZoneOffset() { + TimeZone old = TimeZone.getDefault(); + TimeZone timeZone = TimeZone.getTimeZone("Europe/Paris"); + TimeZone.setDefault(timeZone); + DateTimeUtils.resetCalendar(); + try { + long n = -1111971600; + assertEquals(3_600, DateTimeUtils.getTimeZone().getTimeZoneOffsetUTC(n - 1)); + assertEquals(3_600_000, LegacyDateTimeUtils.getTimeZoneOffsetMillis(null, n * 1_000 - 1)); + assertEquals(0, DateTimeUtils.getTimeZone().getTimeZoneOffsetUTC(n)); + assertEquals(0, LegacyDateTimeUtils.getTimeZoneOffsetMillis(null, n * 1_000)); + assertEquals(0, DateTimeUtils.getTimeZone().getTimeZoneOffsetUTC(n + 1)); + assertEquals(0, LegacyDateTimeUtils.getTimeZoneOffsetMillis(null, n * 1_000 + 1)); + } finally { + TimeZone.setDefault(old); + DateTimeUtils.resetCalendar(); + } + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestDbException.java b/h2/src/test/org/h2/test/unit/TestDbException.java new file mode 100644 index 0000000000..014b3d63e8 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestDbException.java @@ -0,0 +1,55 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.sql.SQLException; + +import org.h2.api.ErrorCode; +import org.h2.jdbc.JdbcException; +import org.h2.jdbc.JdbcSQLException; +import org.h2.message.DbException; +import org.h2.test.TestBase; + +/** + * Tests DbException class. + */ +public class TestDbException extends TestBase { + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testGetJdbcSQLException(); + } + + private void testGetJdbcSQLException() throws Exception { + for (Field field : ErrorCode.class.getDeclaredFields()) { + if (field.getModifiers() == (Modifier.PUBLIC | Modifier.STATIC | Modifier.FINAL)) { + int errorCode = field.getInt(null); + SQLException exception = DbException.getJdbcSQLException(errorCode); + if (exception instanceof JdbcSQLException) { + fail("Custom exception expected for " + ErrorCode.class.getName() + '.' + field.getName() + " (" + + errorCode + ')'); + } + if (!(exception instanceof JdbcException)) { + fail("Custom exception for " + ErrorCode.class.getName() + '.' + field.getName() + " (" + errorCode + + ") should implement JdbcException"); + } + } + } + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestExit.java b/h2/src/test/org/h2/test/unit/TestExit.java index 5bed620909..472a627dc3 100644 --- a/h2/src/test/org/h2/test/unit/TestExit.java +++ b/h2/src/test/org/h2/test/unit/TestExit.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -13,12 +13,13 @@ import org.h2.api.DatabaseEventListener; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.utils.SelfDestructor; /** * Tests the flag db_close_on_exit. A new process is started. */ -public class TestExit extends TestBase { +public class TestExit extends TestDb { private static Connection conn; @@ -26,17 +27,22 @@ public class TestExit extends TestBase { OPEN_WITHOUT_CLOSE_ON_EXIT = 2; @Override - public void test() throws Exception { + public boolean isEnabled() { if (config.codeCoverage || config.networked) { - return; + return false; } if (getBaseDir().indexOf(':') > 0) { - return; + return false; } + return true; + } + + @Override + public void test() throws Exception { deleteDb("exit"); String url = getURL(OPEN_WITH_CLOSE_ON_EXIT); String selfDestruct = SelfDestructor.getPropertyString(60); - String[] procDef = { "java", selfDestruct, "-cp", getClassPath(), + String[] procDef = { getJVM(), selfDestruct, "-cp", getClassPath(), getClass().getName(), url }; Process proc = Runtime.getRuntime().exec(procDef); while (true) { @@ -59,7 +65,7 @@ public void test() throws Exception { fail("did not close database"); } url = getURL(OPEN_WITHOUT_CLOSE_ON_EXIT); - procDef = new String[] { "java", "-cp", getClassPath(), + procDef = new String[] { getJVM(), "-cp", getClassPath(), getClass().getName(), url }; proc = Runtime.getRuntime().exec(procDef); proc.waitFor(); @@ -127,13 +133,7 @@ static File getClosedFile() { /** * A database event listener used in this test. */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { - - @Override - public void exceptionThrown(SQLException e, String sql) { - // nothing to do - } + public static final class MyDatabaseEventListener implements DatabaseEventListener { @Override public void closingDatabase() { @@ -144,21 +144,6 @@ public void closingDatabase() { } } - @Override - public void setProgress(int state, String name, int x, int max) { - // nothing to do - } - - @Override - public void init(String url) { - // nothing to do - } - - @Override - public void opened() { - // nothing to do - } - } } diff --git a/h2/src/test/org/h2/test/unit/TestFile.java b/h2/src/test/org/h2/test/unit/TestFile.java index 30e9a6ac84..107d5e4099 100644 --- a/h2/src/test/org/h2/test/unit/TestFile.java +++ b/h2/src/test/org/h2/test/unit/TestFile.java @@ -1,23 +1,22 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.util.Random; - -import org.h2.api.JavaObjectSerializer; import org.h2.store.DataHandler; import org.h2.store.FileStore; -import org.h2.store.LobStorageBackend; +import org.h2.store.LobStorageInterface; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; import org.h2.util.SmallLRUCache; import org.h2.util.TempFileDeleter; +import org.h2.value.CompareMode; /** - * Tests the in-memory file system. + * Tests the in-memory file store. */ public class TestFile extends TestBase implements DataHandler { @@ -27,7 +26,7 @@ public class TestFile extends TestBase implements DataHandler { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -151,11 +150,6 @@ public String getDatabasePath() { return null; } - @Override - public String getLobCompressionAlgorithm(int type) { - return null; - } - @Override public Object getLobSyncObject() { return null; @@ -182,7 +176,7 @@ public TempFileDeleter getTempFileDeleter() { } @Override - public LobStorageBackend getLobStorage() { + public LobStorageInterface getLobStorage() { return null; } @@ -193,8 +187,7 @@ public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, } @Override - public JavaObjectSerializer getJavaObjectSerializer() { - return null; + public CompareMode getCompareMode() { + return CompareMode.getInstance(null, 0); } - } diff --git a/h2/src/test/org/h2/test/unit/TestFileLock.java b/h2/src/test/org/h2/test/unit/TestFileLock.java index 241eb8e07c..716c5b1d22 100644 --- a/h2/src/test/org/h2/test/unit/TestFileLock.java +++ b/h2/src/test/org/h2/test/unit/TestFileLock.java @@ -1,24 +1,25 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.File; import java.sql.Connection; - import org.h2.api.ErrorCode; import org.h2.engine.Constants; import org.h2.message.TraceSystem; import org.h2.store.FileLock; +import org.h2.store.FileLockMethod; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the database file locking facility. Both lock files and sockets locking * is tested. */ -public class TestFileLock extends TestBase implements Runnable { +public class TestFileLock extends TestDb implements Runnable { private static volatile int locks; private static volatile boolean stop; @@ -45,14 +46,19 @@ private String getFile() { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws Exception { + public boolean isEnabled() { if (!getFile().startsWith(TestBase.BASE_TEST_DIR)) { - return; + return false; } + return true; + } + + @Override + public void test() throws Exception { testFsFileLock(); testFutureModificationDate(); testSimple(); @@ -65,36 +71,30 @@ private void testFsFileLock() throws Exception { String url = "jdbc:h2:" + getBaseDir() + "/fileLock;FILE_LOCK=FS;OPEN_NEW=TRUE"; Connection conn = getConnection(url); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, this) - .getConnection(url); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, () -> getConnection(url)); conn.close(); } private void testFutureModificationDate() throws Exception { File f = new File(getFile()); f.delete(); - f.createNewFile(); + assertTrue(f.createNewFile()); f.setLastModified(System.currentTimeMillis() + 10000); FileLock lock = new FileLock(new TraceSystem(null), getFile(), Constants.LOCK_SLEEP); - lock.lock(FileLock.LOCK_FILE); + lock.lock(FileLockMethod.FILE); lock.unlock(); } private void testSimple() { - FileLock lock1 = new FileLock(new TraceSystem(null), getFile(), - Constants.LOCK_SLEEP); - FileLock lock2 = new FileLock(new TraceSystem(null), getFile(), - Constants.LOCK_SLEEP); - lock1.lock(FileLock.LOCK_FILE); - createClassProxy(FileLock.class); - assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, lock2).lock( - FileLock.LOCK_FILE); + FileLock lock1 = new FileLock(new TraceSystem(null), getFile(), Constants.LOCK_SLEEP); + FileLock lock2 = new FileLock(new TraceSystem(null), getFile(), Constants.LOCK_SLEEP); + lock1.lock(FileLockMethod.FILE); + assertThrows(ErrorCode.DATABASE_ALREADY_OPEN_1, () -> lock2.lock(FileLockMethod.FILE)); lock1.unlock(); - lock2 = new FileLock(new TraceSystem(null), getFile(), - Constants.LOCK_SLEEP); - lock2.lock(FileLock.LOCK_FILE); - lock2.unlock(); + FileLock lock3 = new FileLock(new TraceSystem(null), getFile(), Constants.LOCK_SLEEP); + lock3.lock(FileLockMethod.FILE); + lock3.unlock(); } private void test(boolean allowSocketsLock) throws Exception { @@ -123,8 +123,8 @@ public void run() { while (!stop) { lock = new FileLock(new TraceSystem(null), getFile(), 100); try { - lock.lock(allowSockets ? FileLock.LOCK_SOCKET - : FileLock.LOCK_FILE); + lock.lock(allowSockets ? FileLockMethod.SOCKET + : FileLockMethod.FILE); base.trace(lock + " locked"); locks++; if (locks > 1) { diff --git a/h2/src/test/org/h2/test/unit/TestFileLockProcess.java b/h2/src/test/org/h2/test/unit/TestFileLockProcess.java index 2c9a0328e1..b69846f180 100644 --- a/h2/src/test/org/h2/test/unit/TestFileLockProcess.java +++ b/h2/src/test/org/h2/test/unit/TestFileLockProcess.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -8,15 +8,16 @@ import java.sql.Connection; import java.sql.DriverManager; import java.util.ArrayList; + import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.utils.SelfDestructor; -import org.h2.util.New; /** * Tests database file locking. * A new process is started. */ -public class TestFileLockProcess extends TestBase { +public class TestFileLockProcess extends TestDb { /** * This method is called when executing this application from the command @@ -27,7 +28,7 @@ public class TestFileLockProcess extends TestBase { public static void main(String... args) throws Exception { SelfDestructor.startCountdown(60); if (args.length == 0) { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); return; } String url = args[0]; @@ -47,13 +48,18 @@ private static void execute(String url) { } @Override - public void test() throws Exception { + public boolean isEnabled() { if (config.codeCoverage || config.networked) { - return; + return false; } if (getBaseDir().indexOf(':') > 0) { - return; + return false; } + return true; + } + + @Override + public void test() throws Exception { deleteDb("lock"); String url = "jdbc:h2:"+getBaseDir()+"/lock"; @@ -73,10 +79,10 @@ private void test(int count, String url) throws Exception { url = getURL(url, true); Connection conn = getConnection(url); String selfDestruct = SelfDestructor.getPropertyString(60); - String[] procDef = { "java", selfDestruct, + String[] procDef = { getJVM(), selfDestruct, "-cp", getClassPath(), getClass().getName(), url }; - ArrayList processes = New.arrayList(); + ArrayList processes = new ArrayList<>(count); for (int i = 0; i < count; i++) { Thread.sleep(100); if (i % 10 == 0) { @@ -105,8 +111,14 @@ private void test(int count, String url) throws Exception { buff.append((char) ch); } proc.waitFor(); + + // The travis build somehow generates messages like this from javac. + // No idea where it is coming from. + String processOutput = buff.toString(); + processOutput = processOutput.replaceAll("Picked up _JAVA_OPTIONS: -Xmx2048m -Xms512m", "").trim(); + assertEquals(0, proc.exitValue()); - assertTrue(i + ": " + buff.toString(), buff.length() == 0); + assertTrue(i + ": " + buff.toString(), processOutput.isEmpty()); } Thread.sleep(100); conn.close(); diff --git a/h2/src/test/org/h2/test/unit/TestFileLockSerialized.java b/h2/src/test/org/h2/test/unit/TestFileLockSerialized.java deleted file mode 100644 index 8e4a57ee67..0000000000 --- a/h2/src/test/org/h2/test/unit/TestFileLockSerialized.java +++ /dev/null @@ -1,700 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.io.OutputStream; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.List; -import java.util.concurrent.CountDownLatch; - -import org.h2.api.ErrorCode; -import org.h2.jdbc.JdbcConnection; -import org.h2.store.fs.FileUtils; -import org.h2.test.TestBase; -import org.h2.util.SortedProperties; -import org.h2.util.Task; - -/** - * Test the serialized (server-less) mode. - */ -public class TestFileLockSerialized extends TestBase { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - if (config.mvStore) { - return; - } - println("testSequence"); - testSequence(); - println("testAutoIncrement"); - testAutoIncrement(); - println("testSequenceFlush"); - testSequenceFlush(); - println("testLeftLogFiles"); - testLeftLogFiles(); - println("testWrongDatabaseInstanceOnReconnect"); - testWrongDatabaseInstanceOnReconnect(); - println("testCache()"); - testCache(); - println("testBigDatabase(false)"); - testBigDatabase(false); - println("testBigDatabase(true)"); - testBigDatabase(true); - println("testCheckpointInUpdateRaceCondition"); - testCheckpointInUpdateRaceCondition(); - println("testConcurrentUpdates"); - testConcurrentUpdates(); - println("testThreeMostlyReaders true"); - testThreeMostlyReaders(true); - println("testThreeMostlyReaders false"); - testThreeMostlyReaders(false); - println("testTwoReaders"); - testTwoReaders(); - println("testTwoWriters"); - testTwoWriters(); - println("testPendingWrite"); - testPendingWrite(); - println("testKillWriter"); - testKillWriter(); - println("testConcurrentReadWrite"); - testConcurrentReadWrite(); - deleteDb("fileLockSerialized"); - } - - private void testSequence() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized" + - ";FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE;RECONNECT_CHECK_DELAY=10"; - ResultSet rs; - Connection conn1 = getConnection(url); - Statement stat1 = conn1.createStatement(); - stat1.execute("create sequence seq"); - // 5 times RECONNECT_CHECK_DELAY - Thread.sleep(100); - rs = stat1.executeQuery("call seq.nextval"); - rs.next(); - conn1.close(); - } - - private void testSequenceFlush() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - ResultSet rs; - Connection conn1 = getConnection(url); - Statement stat1 = conn1.createStatement(); - stat1.execute("create sequence seq"); - rs = stat1.executeQuery("call seq.nextval"); - rs.next(); - assertEquals(1, rs.getInt(1)); - Connection conn2 = getConnection(url); - Statement stat2 = conn2.createStatement(); - rs = stat2.executeQuery("call seq.nextval"); - rs.next(); - assertEquals(2, rs.getInt(1)); - conn1.close(); - conn2.close(); - } - - private void testThreeMostlyReaders(final boolean write) throws Exception { - boolean longRun = false; - deleteDb("fileLockSerialized"); - final String url = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - - Connection conn = getConnection(url); - conn.createStatement().execute("create table test(id int) as select 1"); - conn.close(); - - final int len = 10; - final Exception[] ex = { null }; - final boolean[] stop = { false }; - Thread[] threads = new Thread[len]; - for (int i = 0; i < len; i++) { - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Connection c = getConnection(url); - PreparedStatement p = c - .prepareStatement("select * from test where id = ?"); - while (!stop[0]) { - Thread.sleep(100); - if (write) { - if (Math.random() > 0.9) { - c.createStatement().execute( - "update test set id = id"); - } - } - p.setInt(1, 1); - p.executeQuery(); - p.clearParameters(); - } - c.close(); - } catch (Exception e) { - ex[0] = e; - } - } - }); - t.start(); - threads[i] = t; - } - if (longRun) { - Thread.sleep(40000); - } else { - Thread.sleep(1000); - } - stop[0] = true; - for (int i = 0; i < len; i++) { - threads[i].join(); - } - if (ex[0] != null) { - throw ex[0]; - } - getConnection(url).close(); - } - - private void testTwoReaders() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - Connection conn1 = getConnection(url); - conn1.createStatement().execute("create table test(id int)"); - Connection conn2 = getConnection(url); - Statement stat2 = conn2.createStatement(); - stat2.execute("drop table test"); - stat2.execute("create table test(id identity) as select 1"); - conn2.close(); - conn1.close(); - getConnection(url).close(); - } - - private void testTwoWriters() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized"; - final String writeUrl = url + ";FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - Connection conn = getConnection(writeUrl, "sa", "sa"); - conn.createStatement() - .execute( - "create table test(id identity) as " + - "select x from system_range(1, 100)"); - conn.close(); - Task task = new Task() { - @Override - public void call() throws Exception { - while (!stop) { - Thread.sleep(10); - Connection c = getConnection(writeUrl, "sa", "sa"); - c.createStatement().execute("select * from test"); - c.close(); - } - } - }.execute(); - Thread.sleep(20); - for (int i = 0; i < 2; i++) { - conn = getConnection(writeUrl, "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("drop table test"); - stat.execute("create table test(id identity) as " + - "select x from system_range(1, 100)"); - conn.createStatement().execute("select * from test"); - conn.close(); - } - Thread.sleep(100); - conn = getConnection(writeUrl, "sa", "sa"); - conn.createStatement().execute("select * from test"); - conn.close(); - task.get(); - } - - private void testPendingWrite() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized"; - String writeUrl = url + - ";FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE;WRITE_DELAY=0"; - - Connection conn = getConnection(writeUrl, "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - Thread.sleep(100); - String propFile = getBaseDir() + "/fileLockSerialized.lock.db"; - SortedProperties p = SortedProperties.loadProperties(propFile); - p.setProperty("changePending", "true"); - p.setProperty("modificationDataId", "1000"); - OutputStream out = FileUtils.newOutputStream(propFile, false); - try { - p.store(out, "test"); - } finally { - out.close(); - } - Thread.sleep(100); - stat.execute("select * from test"); - conn.close(); - } - - private void testKillWriter() throws Exception { - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized"; - String writeUrl = url + - ";FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE;WRITE_DELAY=0"; - - Connection conn = getConnection(writeUrl, "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - ((JdbcConnection) conn).setPowerOffCount(1); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, stat).execute( - "insert into test values(1)"); - - Connection conn2 = getConnection(writeUrl, "sa", "sa"); - Statement stat2 = conn2.createStatement(); - stat2.execute("insert into test values(1)"); - printResult(stat2, "select * from test"); - - conn2.close(); - - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - } - - private void testConcurrentReadWrite() throws Exception { - deleteDb("fileLockSerialized"); - - String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized"; - String writeUrl = url + ";FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - // ;TRACE_LEVEL_SYSTEM_OUT=3 - // String readUrl = writeUrl + ";ACCESS_MODE_DATA=R"; - - trace(" create database"); - Class.forName("org.h2.Driver"); - Connection conn = getConnection(writeUrl, "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - - Connection conn3 = getConnection(writeUrl, "sa", "sa"); - PreparedStatement prep3 = conn3 - .prepareStatement("insert into test values(?)"); - - Connection conn2 = getConnection(writeUrl, "sa", "sa"); - Statement stat2 = conn2.createStatement(); - printResult(stat2, "select * from test"); - - stat2.execute("create local temporary table temp(name varchar) not persistent"); - printResult(stat2, "select * from temp"); - - trace(" insert row 1"); - stat.execute("insert into test values(1)"); - trace(" insert row 2"); - prep3.setInt(1, 2); - prep3.execute(); - printResult(stat2, "select * from test"); - printResult(stat2, "select * from temp"); - - conn.close(); - conn2.close(); - conn3.close(); - } - - private void printResult(Statement stat, String sql) throws SQLException { - trace(" query: " + sql); - ResultSet rs = stat.executeQuery(sql); - int rowCount = 0; - while (rs.next()) { - trace(" " + rs.getString(1)); - rowCount++; - } - trace(" " + rowCount + " row(s)"); - } - - private void testConcurrentUpdates() throws Exception { - boolean longRun = false; - if (longRun) { - for (int waitTime = 100; waitTime < 10000; waitTime += 20) { - for (int howManyThreads = 1; howManyThreads < 10; howManyThreads++) { - testConcurrentUpdates(waitTime, howManyThreads, waitTime * - howManyThreads * 10); - } - } - } else { - testConcurrentUpdates(100, 4, 2000); - } - } - - private void testAutoIncrement() throws Exception { - boolean longRun = false; - if (longRun) { - for (int waitTime = 100; waitTime < 10000; waitTime += 20) { - for (int howManyThreads = 1; howManyThreads < 10; howManyThreads++) { - testAutoIncrement(waitTime, howManyThreads, 2000); - } - } - } else { - testAutoIncrement(400, 2, 2000); - } - } - - private void testAutoIncrement(final int waitTime, int howManyThreads, - int runTime) throws Exception { - println("testAutoIncrement waitTime: " + waitTime + - " howManyThreads: " + howManyThreads + " runTime: " + runTime); - deleteDb("fileLockSerialized"); - final String url = "jdbc:h2:" + - getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE;" + - "AUTO_RECONNECT=TRUE;MAX_LENGTH_INPLACE_LOB=8192;" + - "COMPRESS_LOB=DEFLATE;CACHE_SIZE=65536"; - - Connection conn = getConnection(url); - conn.createStatement().execute( - "create table test(id int auto_increment, id2 int)"); - conn.close(); - - final long endTime = System.currentTimeMillis() + runTime; - final Exception[] ex = { null }; - final Connection[] connList = new Connection[howManyThreads]; - final boolean[] stop = { false }; - final int[] nextInt = { 0 }; - Thread[] threads = new Thread[howManyThreads]; - for (int i = 0; i < howManyThreads; i++) { - final int finalNrOfConnection = i; - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Connection c = getConnection(url); - connList[finalNrOfConnection] = c; - while (!stop[0]) { - synchronized (nextInt) { - ResultSet rs = c.createStatement() - .executeQuery( - "select id, id2 from test"); - while (rs.next()) { - if (rs.getInt(1) != rs.getInt(2)) { - throw new Exception(Thread - .currentThread().getId() + - " nextInt: " + - nextInt[0] + - " rs.getInt(1): " + - rs.getInt(1) + - " rs.getInt(2): " + - rs.getInt(2)); - } - } - nextInt[0]++; - Statement stat = c.createStatement(); - stat.execute("insert into test (id2) values(" + - nextInt[0] + ")"); - ResultSet rsKeys = stat.getGeneratedKeys(); - while (rsKeys.next()) { - assertEquals(nextInt[0], rsKeys.getInt(1)); - } - rsKeys.close(); - } - Thread.sleep(waitTime); - } - c.close(); - } catch (Exception e) { - e.printStackTrace(); - ex[0] = e; - } - } - }); - t.start(); - threads[i] = t; - } - while ((ex[0] == null) && (System.currentTimeMillis() < endTime)) { - Thread.sleep(10); - } - - stop[0] = true; - for (int i = 0; i < howManyThreads; i++) { - threads[i].join(); - } - if (ex[0] != null) { - throw ex[0]; - } - getConnection(url).close(); - deleteDb("fileLockSerialized"); - } - - private void testConcurrentUpdates(final int waitTime, int howManyThreads, - int runTime) throws Exception { - println("testConcurrentUpdates waitTime: " + waitTime + - " howManyThreads: " + howManyThreads + " runTime: " + runTime); - deleteDb("fileLockSerialized"); - final String url = "jdbc:h2:" + - getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE;" + - "AUTO_RECONNECT=TRUE;MAX_LENGTH_INPLACE_LOB=8192;" + - "COMPRESS_LOB=DEFLATE;CACHE_SIZE=65536"; - - Connection conn = getConnection(url); - conn.createStatement().execute("create table test(id int)"); - conn.createStatement().execute("insert into test values(1)"); - conn.close(); - - final long endTime = System.currentTimeMillis() + runTime; - final Exception[] ex = { null }; - final Connection[] connList = new Connection[howManyThreads]; - final boolean[] stop = { false }; - final int[] lastInt = { 1 }; - Thread[] threads = new Thread[howManyThreads]; - for (int i = 0; i < howManyThreads; i++) { - final int finalNrOfConnection = i; - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Connection c = getConnection(url); - connList[finalNrOfConnection] = c; - while (!stop[0]) { - ResultSet rs = c.createStatement().executeQuery( - "select * from test"); - rs.next(); - if (rs.getInt(1) != lastInt[0]) { - throw new Exception(finalNrOfConnection + - " Expected: " + lastInt[0] + " got " + - rs.getInt(1)); - } - Thread.sleep(waitTime); - if (Math.random() > 0.7) { - int newLastInt = (int) (Math.random() * 1000); - c.createStatement().execute( - "update test set id = " + newLastInt); - lastInt[0] = newLastInt; - } - } - c.close(); - } catch (Exception e) { - e.printStackTrace(); - ex[0] = e; - } - } - }); - t.start(); - threads[i] = t; - } - while ((ex[0] == null) && (System.currentTimeMillis() < endTime)) { - Thread.sleep(10); - } - - stop[0] = true; - for (int i = 0; i < howManyThreads; i++) { - threads[i].join(); - } - if (ex[0] != null) { - throw ex[0]; - } - getConnection(url).close(); - deleteDb("fileLockSerialized"); - } - - /** - * If a checkpoint occurs between beforeWriting and checkWritingAllowed then - * the result of checkWritingAllowed is READ_ONLY, which is wrong. - * - * Also, if a checkpoint started before beforeWriting, and ends between - * between beforeWriting and checkWritingAllowed, then the same error - * occurs. - */ - private void testCheckpointInUpdateRaceCondition() throws Exception { - boolean longRun = false; - deleteDb("fileLockSerialized"); - String url = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED;OPEN_NEW=TRUE"; - - Connection conn = getConnection(url); - conn.createStatement().execute("create table test(id int)"); - conn.createStatement().execute("insert into test values(1)"); - for (int i = 0; i < (longRun ? 10000 : 5); i++) { - Thread.sleep(402); - conn.createStatement().execute("update test set id = " + i); - } - conn.close(); - deleteDb("fileLockSerialized"); - } - - /** - * Caches must be cleared. Session.reconnect only closes the DiskFile (which - * is associated with the cache) if there is one session - */ - private void testCache() throws Exception { - deleteDb("fileLockSerialized"); - - String urlShared = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED"; - - Connection connShared1 = getConnection(urlShared); - Statement statement1 = connShared1.createStatement(); - Connection connShared2 = getConnection(urlShared); - Statement statement2 = connShared2.createStatement(); - - statement1.execute("create table test1(id int)"); - statement1.execute("insert into test1 values(1)"); - - ResultSet rs = statement1.executeQuery("select id from test1"); - rs.close(); - rs = statement2.executeQuery("select id from test1"); - rs.close(); - - statement1.execute("update test1 set id=2"); - Thread.sleep(500); - - rs = statement2.executeQuery("select id from test1"); - assertTrue(rs.next()); - assertEquals(2, rs.getInt(1)); - rs.close(); - - connShared1.close(); - connShared2.close(); - deleteDb("fileLockSerialized"); - } - - private void testWrongDatabaseInstanceOnReconnect() throws Exception { - deleteDb("fileLockSerialized"); - - String urlShared = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED"; - String urlForNew = urlShared + ";OPEN_NEW=TRUE"; - - Connection connShared1 = getConnection(urlShared); - Statement statement1 = connShared1.createStatement(); - Connection connShared2 = getConnection(urlShared); - Connection connNew = getConnection(urlForNew); - statement1.execute("create table test1(id int)"); - connShared1.close(); - connShared2.close(); - connNew.close(); - deleteDb("fileLockSerialized"); - } - - private void testBigDatabase(boolean withCache) { - boolean longRun = false; - final int howMuchRows = longRun ? 2000000 : 500000; - deleteDb("fileLockSerialized"); - int cacheSizeKb = withCache ? 5000 : 0; - - final CountDownLatch importFinishedLatch = new CountDownLatch(1); - final CountDownLatch select1FinishedLatch = new CountDownLatch(1); - - final String url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized" + - ";FILE_LOCK=SERIALIZED" + ";OPEN_NEW=TRUE" + ";CACHE_SIZE=" + - cacheSizeKb; - final Task importUpdateTask = new Task() { - @Override - public void call() throws Exception { - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int, id2 int)"); - for (int i = 0; i < howMuchRows; i++) { - stat.execute("insert into test values(" + i + ", " + i + - ")"); - } - importFinishedLatch.countDown(); - - select1FinishedLatch.await(); - - stat.execute("update test set id2=999 where id=500"); - conn.close(); - } - }; - importUpdateTask.execute(); - - Task selectTask = new Task() { - @Override - public void call() throws Exception { - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - importFinishedLatch.await(); - - ResultSet rs = stat - .executeQuery("select id2 from test where id=500"); - assertTrue(rs.next()); - assertEquals(500, rs.getInt(1)); - rs.close(); - select1FinishedLatch.countDown(); - - // wait until the other task finished - importUpdateTask.get(); - - // can't use the exact same query, otherwise it would use - // the query cache - rs = stat.executeQuery("select id2 from test where id=500+0"); - assertTrue(rs.next()); - assertEquals(999, rs.getInt(1)); - rs.close(); - conn.close(); - } - }; - selectTask.execute(); - - importUpdateTask.get(); - selectTask.get(); - deleteDb("fileLockSerialized"); - } - - private void testLeftLogFiles() throws Exception { - deleteDb("fileLockSerialized"); - - // without serialized - String url; - url = "jdbc:h2:" + getBaseDir() + "/fileLockSerialized"; - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - stat.execute("insert into test values(0)"); - conn.close(); - - List filesWithoutSerialized = FileUtils - .newDirectoryStream(getBaseDir()); - deleteDb("fileLockSerialized"); - - // with serialized - url = "jdbc:h2:" + getBaseDir() + - "/fileLockSerialized;FILE_LOCK=SERIALIZED"; - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id int)"); - Thread.sleep(500); - stat.execute("insert into test values(0)"); - conn.close(); - - List filesWithSerialized = FileUtils - .newDirectoryStream(getBaseDir()); - if (filesWithoutSerialized.size() != filesWithSerialized.size()) { - for (int i = 0; i < filesWithoutSerialized.size(); i++) { - if (!filesWithSerialized - .contains(filesWithoutSerialized.get(i))) { - System.out - .println("File left from 'without serialized' mode: " + - filesWithoutSerialized.get(i)); - } - } - for (int i = 0; i < filesWithSerialized.size(); i++) { - if (!filesWithoutSerialized - .contains(filesWithSerialized.get(i))) { - System.out - .println("File left from 'with serialized' mode: " + - filesWithSerialized.get(i)); - } - } - fail("With serialized it must create the same files than without serialized"); - } - deleteDb("fileLockSerialized"); - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestFileSystem.java b/h2/src/test/org/h2/test/unit/TestFileSystem.java index 2fe261c052..8bd7dc1ee3 100644 --- a/h2/src/test/org/h2/test/unit/TestFileSystem.java +++ b/h2/src/test/org/h2/test/unit/TestFileSystem.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -12,32 +12,32 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; -import java.nio.channels.FileChannel.MapMode; import java.nio.channels.FileLock; import java.nio.channels.NonWritableChannelException; import java.sql.Connection; +import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.List; import java.util.Random; +import java.util.concurrent.atomic.AtomicIntegerArray; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; - import org.h2.dev.fs.FilePathZip2; import org.h2.message.DbException; import org.h2.mvstore.DataUtils; import org.h2.mvstore.cache.FilePathCache; import org.h2.store.fs.FilePath; -import org.h2.store.fs.FilePathEncrypt; -import org.h2.store.fs.FilePathRec; import org.h2.store.fs.FileUtils; +import org.h2.store.fs.encrypt.FilePathEncrypt; +import org.h2.store.fs.rec.FilePathRec; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; import org.h2.test.utils.FilePathDebug; import org.h2.tools.Backup; import org.h2.tools.DeleteDbFiles; import org.h2.util.IOUtils; +import org.h2.util.Task; /** * Tests various file system. @@ -52,7 +52,7 @@ public class TestFileSystem extends TestBase { public static void main(String... a) throws Exception { TestBase test = TestBase.createCaller().init(); // test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override @@ -62,7 +62,6 @@ public void test() throws Exception { testAbsoluteRelative(); testDirectories(getBaseDir()); testMoveTo(getBaseDir()); - testUnsupportedFeatures(getBaseDir()); FilePathZip2.register(); FilePath.register(new FilePathCache()); FilePathRec.register(); @@ -82,15 +81,17 @@ public void test() throws Exception { String f = "split:10:" + getBaseDir() + "/fs"; FileUtils.toRealPath(f); testFileSystem(getBaseDir() + "/fs"); + testFileSystem("async:" + getBaseDir() + "/fs"); testFileSystem("memFS:"); testFileSystem("memLZF:"); testFileSystem("nioMemFS:"); - testFileSystem("nioMemLZF:"); + testFileSystem("nioMemLZF:1:"); + // 12% compressLaterCache + testFileSystem("nioMemLZF:12:"); testFileSystem("rec:memFS:"); testUserHome(); try { - testFileSystem("nio:" + getBaseDir() + "/fs"); - testFileSystem("cache:nio:" + getBaseDir() + "/fs"); + testFileSystem("cache:" + getBaseDir() + "/fs"); testFileSystem("nioMapped:" + getBaseDir() + "/fs"); testFileSystem("encrypt:0007:" + getBaseDir() + "/fs"); testFileSystem("cache:encrypt:0007:" + getBaseDir() + "/fs"); @@ -98,10 +99,7 @@ public void test() throws Exception { testFileSystem("split:" + getBaseDir() + "/fs"); testFileSystem("split:nioMapped:" + getBaseDir() + "/fs"); } - } catch (Exception e) { - e.printStackTrace(); - throw e; - } catch (Error e) { + } catch (Exception | Error e) { e.printStackTrace(); throw e; } finally { @@ -203,7 +201,9 @@ private void testZipFileSystem(String prefix, Random r) throws IOException { private void testAbsoluteRelative() { assertFalse(FileUtils.isAbsolute("test/abc")); + assertFalse(FileUtils.isAbsolute("./test/abc")); assertTrue(FileUtils.isAbsolute("~/test/abc")); + assertTrue(FileUtils.isAbsolute("/test/abc")); } private void testMemFsDir() throws IOException { @@ -214,19 +214,19 @@ private void testMemFsDir() throws IOException { } private void testClasspath() throws IOException { - String resource = "org/h2/test/testSimple.in.txt"; + String resource = "org/h2/test/scripts/testSimple.sql"; InputStream in; in = getClass().getResourceAsStream("/" + resource); - assertTrue(in != null); + assertNotNull(in); in.close(); in = getClass().getClassLoader().getResourceAsStream(resource); - assertTrue(in != null); + assertNotNull(in); in.close(); in = FileUtils.newInputStream("classpath:" + resource); - assertTrue(in != null); + assertNotNull(in); in.close(); in = FileUtils.newInputStream("classpath:/" + resource); - assertTrue(in != null); + assertNotNull(in); in.close(); } @@ -250,7 +250,7 @@ private void testSplitDatabaseInZip() throws SQLException { FileUtils.deleteRecursive(dir, false); Connection conn; Statement stat; - conn = getConnection("jdbc:h2:split:18:"+dir+"/test"); + conn = DriverManager.getConnection("jdbc:h2:split:18:"+dir+"/test"); stat = conn.createStatement(); stat.execute( "create table test(id int primary key, name varchar) " + @@ -259,7 +259,7 @@ private void testSplitDatabaseInZip() throws SQLException { conn.close(); Backup.execute(dir + "/test.zip", dir, "", true); DeleteDbFiles.execute("split:" + dir, "test", true); - conn = getConnection( + conn = DriverManager.getConnection( "jdbc:h2:split:zip:"+dir+"/test.zip!/test"); conn.createStatement().execute("select * from test where id=1"); conn.close(); @@ -268,23 +268,24 @@ private void testSplitDatabaseInZip() throws SQLException { private void testDatabaseInMemFileSys() throws SQLException { org.h2.Driver.load(); - deleteDb("fsMem"); - String url = "jdbc:h2:" + getBaseDir() + "/fsMem"; - Connection conn = getConnection(url, "sa", "sa"); + String dir = getBaseDir() + "/fsMem"; + FileUtils.deleteRecursive(dir, false); + String url = "jdbc:h2:" + dir + "/fsMem"; + Connection conn = DriverManager.getConnection(url, "sa", "sa"); conn.createStatement().execute( "CREATE TABLE TEST AS SELECT * FROM DUAL"); conn.createStatement().execute( "BACKUP TO '" + getBaseDir() + "/fsMem.zip'"); conn.close(); - org.h2.tools.Restore.main("-file", getBaseDir() + "/fsMem.zip", "-dir", - "memFS:"); - conn = getConnection("jdbc:h2:memFS:fsMem", "sa", "sa"); + org.h2.tools.Restore.main("-file", getBaseDir() + "/fsMem.zip", "-dir", "memFS:"); + conn = DriverManager.getConnection("jdbc:h2:memFS:fsMem", "sa", "sa"); ResultSet rs = conn.createStatement() .executeQuery("SELECT * FROM TEST"); rs.close(); conn.close(); - deleteDb("fsMem"); + FileUtils.deleteRecursive(dir, false); FileUtils.delete(getBaseDir() + "/fsMem.zip"); + FileUtils.delete("memFS:fsMem.mv.db"); } private void testDatabaseInJar() throws Exception { @@ -295,8 +296,9 @@ private void testDatabaseInJar() throws Exception { return; } org.h2.Driver.load(); - String url = "jdbc:h2:" + getBaseDir() + "/fsJar"; - Connection conn = getConnection(url, "sa", "sa"); + String dir = getBaseDir() + "/fsJar"; + String url = "jdbc:h2:" + dir + "/fsJar"; + Connection conn = DriverManager.getConnection(url, "sa", "sa"); Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, " + "name varchar, b blob, c clob)"); @@ -308,16 +310,16 @@ private void testDatabaseInJar() throws Exception { byte[] b1 = rs.getBytes(3); String s1 = rs.getString(4); conn.close(); - conn = getConnection(url, "sa", "sa"); + conn = DriverManager.getConnection(url, "sa", "sa"); stat = conn.createStatement(); stat.execute("backup to '" + getBaseDir() + "/fsJar.zip'"); conn.close(); - deleteDb("fsJar"); + FileUtils.deleteRecursive(dir, false); for (String f : FileUtils.newDirectoryStream( "zip:" + getBaseDir() + "/fsJar.zip")) { assertFalse(FileUtils.isAbsolute(f)); - assertTrue(!FileUtils.isDirectory(f)); + assertFalse(FileUtils.isDirectory(f)); assertTrue(FileUtils.size(f) > 0); assertTrue(f.endsWith(FileUtils.getName(f))); assertEquals(0, FileUtils.lastModified(f)); @@ -332,7 +334,7 @@ private void testDatabaseInJar() throws Exception { testReadOnly(f); } String urlJar = "jdbc:h2:zip:" + getBaseDir() + "/fsJar.zip!/fsJar"; - conn = getConnection(urlJar, "sa", "sa"); + conn = DriverManager.getConnection(urlJar, "sa", "sa"); stat = conn.createStatement(); rs = stat.executeQuery("select * from test"); rs.next(); @@ -350,38 +352,14 @@ private void testDatabaseInJar() throws Exception { } private void testReadOnly(final String f) throws IOException { - new AssertThrows(IOException.class) { - @Override - public void test() throws IOException { - FileUtils.newOutputStream(f, false); - }}; - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.move(f, f); - }}; - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.move(f, f); - }}; - new AssertThrows(IOException.class) { - @Override - public void test() throws IOException { - FileUtils.createTempFile(f, ".tmp", false, false); - }}; + assertThrows(IOException.class, () -> FileUtils.newOutputStream(f, false)); + assertThrows(DbException.class, () -> FileUtils.move(f, f)); + assertThrows(DbException.class, () -> FileUtils.move(f, f)); + assertThrows(IOException.class, () -> FileUtils.createTempFile(f, ".tmp", false)); final FileChannel channel = FileUtils.open(f, "r"); - new AssertThrows(IOException.class) { - @Override - public void test() throws IOException { - channel.write(ByteBuffer.allocate(1)); - }}; - new AssertThrows(IOException.class) { - @Override - public void test() throws IOException { - channel.truncate(0); - }}; - assertTrue(null == channel.tryLock()); + assertThrows(NonWritableChannelException.class, () -> channel.write(ByteBuffer.allocate(1))); + assertThrows(IOException.class, () -> channel.truncate(0)); + assertNull(channel.tryLock()); channel.force(false); channel.close(); } @@ -393,6 +371,8 @@ private void testUserHome() { } private void testFileSystem(String fsBase) throws Exception { + testConcurrent(fsBase); + testRootExists(fsBase); testPositionedReadWrite(fsBase); testSetReadOnly(fsBase); testParentEventuallyReturnsNull(fsBase); @@ -401,6 +381,15 @@ private void testFileSystem(String fsBase) throws Exception { testRandomAccess(fsBase); } + private void testRootExists(String fsBase) { + String fileName = fsBase + "/testFile"; + FilePath p = FilePath.get(fileName); + while (p.getParent() != null) { + p = p.getParent(); + } + assertTrue(p.exists()); + } + private void testSetReadOnly(String fsBase) { String fileName = fsBase + "/testFile"; if (FileUtils.exists(fileName)) { @@ -413,27 +402,19 @@ private void testSetReadOnly(String fsBase) { } } - private static void testDirectories(String fsBase) { + private void testDirectories(String fsBase) { final String fileName = fsBase + "/testFile"; if (FileUtils.exists(fileName)) { FileUtils.delete(fileName); } if (FileUtils.createFile(fileName)) { - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.createDirectory(fileName); - }}; - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.createDirectories(fileName + "/test"); - }}; + assertThrows(DbException.class, () -> FileUtils.createDirectory(fileName)); + assertThrows(DbException.class, () -> FileUtils.createDirectories(fileName + "/test")); FileUtils.delete(fileName); } } - private static void testMoveTo(String fsBase) { + private void testMoveTo(String fsBase) { final String fileName = fsBase + "/testFile"; final String fileName2 = fsBase + "/testFile2"; if (FileUtils.exists(fileName)) { @@ -442,60 +423,10 @@ private static void testMoveTo(String fsBase) { if (FileUtils.createFile(fileName)) { FileUtils.move(fileName, fileName2); FileUtils.createFile(fileName); - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.move(fileName2, fileName); - }}; + assertThrows(DbException.class, () -> FileUtils.move(fileName2, fileName)); FileUtils.delete(fileName); FileUtils.delete(fileName2); - new AssertThrows(DbException.class) { - @Override - public void test() { - FileUtils.move(fileName, fileName2); - }}; - } - } - - private static void testUnsupportedFeatures(String fsBase) throws IOException { - final String fileName = fsBase + "/testFile"; - if (FileUtils.exists(fileName)) { - FileUtils.delete(fileName); - } - if (FileUtils.createFile(fileName)) { - final FileChannel channel = FileUtils.open(fileName, "rw"); - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.map(MapMode.PRIVATE, 0, channel.size()); - }}; - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.read(new ByteBuffer[]{ByteBuffer.allocate(10)}, 0, 0); - }}; - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.write(new ByteBuffer[]{ByteBuffer.allocate(10)}, 0, 0); - }}; - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.transferFrom(channel, 0, 0); - }}; - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.transferTo(0, 0, channel); - }}; - new AssertThrows(UnsupportedOperationException.class) { - @Override - public void test() throws IOException { - channel.lock(); - }}; - channel.close(); - FileUtils.delete(fileName); + assertThrows(DbException.class, () -> FileUtils.move(fileName, fileName2)); } } @@ -560,18 +491,8 @@ private void testSimple(final String fsBase) throws Exception { FileUtils.readFully(channel, ByteBuffer.wrap(test, 0, 10000)); assertEquals(buffer, test); final FileChannel fc = channel; - new AssertThrows(IOException.class) { - @Override - public void test() throws Exception { - fc.write(ByteBuffer.wrap(test, 0, 10)); - } - }; - new AssertThrows(NonWritableChannelException.class) { - @Override - public void test() throws Exception { - fc.truncate(10); - } - }; + assertThrows(NonWritableChannelException.class, () -> fc.write(ByteBuffer.wrap(test, 0, 10))); + assertThrows(NonWritableChannelException.class, () -> fc.truncate(10)); channel.close(); long lastMod = FileUtils.lastModified(fsBase + "/test"); if (lastMod < time - 1999) { @@ -585,7 +506,7 @@ public void test() throws Exception { IOUtils.copyFiles(fsBase + "/test", fsBase + "/test3"); FileUtils.move(fsBase + "/test3", fsBase + "/test2"); FileUtils.move(fsBase + "/test2", fsBase + "/test2"); - assertTrue(!FileUtils.exists(fsBase + "/test3")); + assertFalse(FileUtils.exists(fsBase + "/test3")); assertTrue(FileUtils.exists(fsBase + "/test2")); assertEquals(10000, FileUtils.size(fsBase + "/test2")); byte[] buffer2 = new byte[10000]; @@ -610,7 +531,7 @@ public void test() throws Exception { assertTrue(FileUtils.isDirectory(fsBase + "/testDir")); if (!fsBase.startsWith("jdbc:")) { FileUtils.deleteRecursive(fsBase + "/testDir", false); - assertTrue(!FileUtils.exists(fsBase + "/testDir")); + assertFalse(FileUtils.exists(fsBase + "/testDir")); } } } @@ -657,14 +578,13 @@ private void testRandomAccess(String fsBase) throws Exception { private void testRandomAccess(String fsBase, int seed) throws Exception { StringBuilder buff = new StringBuilder(); - String s = FileUtils.createTempFile(fsBase + "/tmp", ".tmp", false, false); + String s = FileUtils.createTempFile(fsBase + "/tmp", ".tmp", false); File file = new File(TestBase.BASE_TEST_DIR + "/tmp"); file.getParentFile().mkdirs(); file.delete(); RandomAccessFile ra = new RandomAccessFile(file, "rw"); FileUtils.delete(s); FileChannel f = FileUtils.open(s, "rw"); - assertEquals(s, f.toString()); assertEquals(-1, f.read(ByteBuffer.wrap(new byte[1]))); f.force(true); Random random = new Random(seed); @@ -673,7 +593,7 @@ private void testRandomAccess(String fsBase, int seed) throws Exception { for (int i = 0; i < size; i++) { trace("op " + i); int pos = random.nextInt(10000); - switch(random.nextInt(7)) { + switch (random.nextInt(7)) { case 0: { pos = (int) Math.min(pos, ra.length()); trace("seek " + pos); @@ -771,7 +691,7 @@ private static ByteBuffer createSlicedBuffer(byte[] buffer, int offset, private void testTempFile(String fsBase) throws Exception { int len = 10000; - String s = FileUtils.createTempFile(fsBase + "/tmp", ".tmp", false, false); + String s = FileUtils.createTempFile(fsBase + "/tmp", ".tmp", false); OutputStream out = FileUtils.newOutputStream(s, false); byte[] buffer = new byte[len]; out.write(buffer); @@ -790,4 +710,93 @@ private void testTempFile(String fsBase) throws Exception { FileUtils.delete(s); } + private void testConcurrent(String fsBase) throws Exception { + String s = FileUtils.createTempFile(fsBase + "/tmp", ".tmp", false); + File file = new File(TestBase.BASE_TEST_DIR + "/tmp"); + file.getParentFile().mkdirs(); + file.delete(); + RandomAccessFile ra = new RandomAccessFile(file, "rw"); + FileUtils.delete(s); + final FileChannel f = FileUtils.open(s, "rw"); + final int size = getSize(10, 50); + f.write(ByteBuffer.allocate(size * 64 * 1024)); + AtomicIntegerArray locks = new AtomicIntegerArray(size); + AtomicIntegerArray expected = new AtomicIntegerArray(size); + Random random = new Random(1); + System.gc(); + Task task = new Task() { + @Override + public void call() throws Exception { + ByteBuffer byteBuff = ByteBuffer.allocate(16); + while (!stop) { + for (int pos = 0; pos < size; pos++) { + byteBuff.clear(); + int e; + while (!locks.compareAndSet(pos, 0, 1)) { + } + try { + e = expected.get(pos); + f.read(byteBuff, pos * 64 * 1024); + } finally { + locks.set(pos, 0); + } + byteBuff.position(0); + int x = byteBuff.getInt(); + int y = byteBuff.getInt(); + assertEquals(e, x); + assertEquals(e, y); + Thread.yield(); + } + } + } + }; + task.execute(); + try { + ByteBuffer byteBuff = ByteBuffer.allocate(16); + int operations = 10000; + for (int i = 0; i < operations; i++) { + byteBuff.position(0); + byteBuff.putInt(i); + byteBuff.putInt(i); + byteBuff.flip(); + int pos = random.nextInt(size); + while (!locks.compareAndSet(pos, 0, 1)) { + } + try { + f.write(byteBuff, pos * 64 * 1024); + expected.set(pos, i); + } finally { + locks.set(pos, 0); + } + pos = random.nextInt(size); + byteBuff.clear(); + int e; + while (!locks.compareAndSet(pos, 0, 1)) { + } + try { + e = expected.get(pos); + f.read(byteBuff, pos * 64 * 1024); + } finally { + locks.set(pos, 0); + } + byteBuff.limit(16); + byteBuff.position(0); + int x = byteBuff.getInt(); + int y = byteBuff.getInt(); + assertEquals(e, x); + assertEquals(e, y); + } + } catch (Throwable e) { + e.printStackTrace(); + fail("Exception: " + e); + } finally { + task.get(); + f.close(); + ra.close(); + file.delete(); + FileUtils.delete(s); + System.gc(); + } + } + } diff --git a/h2/src/test/org/h2/test/unit/TestFtp.java b/h2/src/test/org/h2/test/unit/TestFtp.java index f193a71c2e..53ba7d2bcd 100644 --- a/h2/src/test/org/h2/test/unit/TestFtp.java +++ b/h2/src/test/org/h2/test/unit/TestFtp.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -26,14 +26,19 @@ public class TestFtp extends TestBase implements FtpEventListener { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws Exception { + public boolean isEnabled() { if (getBaseDir().indexOf(':') > 0) { - return; + return false; } + return true; + } + + @Override + public void test() throws Exception { FileUtils.delete(getBaseDir() + "/ftp"); test(getBaseDir()); FileUtils.delete(getBaseDir() + "/ftp"); diff --git a/h2/src/test/org/h2/test/unit/TestGeometryUtils.java b/h2/src/test/org/h2/test/unit/TestGeometryUtils.java new file mode 100644 index 0000000000..6b8f1b54c5 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestGeometryUtils.java @@ -0,0 +1,531 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import static org.h2.util.geometry.GeometryUtils.DIMENSION_SYSTEM_XY; +import static org.h2.util.geometry.GeometryUtils.DIMENSION_SYSTEM_XYM; +import static org.h2.util.geometry.GeometryUtils.DIMENSION_SYSTEM_XYZ; +import static org.h2.util.geometry.GeometryUtils.DIMENSION_SYSTEM_XYZM; +import static org.h2.util.geometry.GeometryUtils.GEOMETRY_COLLECTION; +import static org.h2.util.geometry.GeometryUtils.M; +import static org.h2.util.geometry.GeometryUtils.MAX_X; +import static org.h2.util.geometry.GeometryUtils.MAX_Y; +import static org.h2.util.geometry.GeometryUtils.MIN_X; +import static org.h2.util.geometry.GeometryUtils.MIN_Y; +import static org.h2.util.geometry.GeometryUtils.X; +import static org.h2.util.geometry.GeometryUtils.Y; +import static org.h2.util.geometry.GeometryUtils.Z; + +import java.io.ByteArrayOutputStream; +import java.util.Random; + +import org.h2.test.TestBase; +import org.h2.util.StringUtils; +import org.h2.util.geometry.EWKBUtils; +import org.h2.util.geometry.EWKBUtils.EWKBTarget; +import org.h2.util.geometry.EWKTUtils; +import org.h2.util.geometry.EWKTUtils.EWKTTarget; +import org.h2.util.geometry.GeometryUtils; +import org.h2.util.geometry.GeometryUtils.DimensionSystemTarget; +import org.h2.util.geometry.GeometryUtils.EnvelopeTarget; +import org.h2.util.geometry.GeometryUtils.Target; +import org.h2.util.geometry.JTSUtils; +import org.h2.util.geometry.JTSUtils.GeometryTarget; +import org.h2.value.ValueGeometry; +import org.locationtech.jts.geom.CoordinateSequence; +import org.locationtech.jts.geom.Envelope; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryCollection; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.Point; +import org.locationtech.jts.io.ParseException; +import org.locationtech.jts.io.WKBWriter; +import org.locationtech.jts.io.WKTReader; +import org.locationtech.jts.io.WKTWriter; + +/** + * Tests the classes from org.h2.util.geometry package. + */ +public class TestGeometryUtils extends TestBase { + + private static final byte[][] NON_FINITE = { // + // XY + StringUtils.convertHexToBytes("0000000001" // + + "0000000000000000" // + + "7ff8000000000000"), // + // XY + StringUtils.convertHexToBytes("0000000001" // + + "7ff8000000000000" // + + "0000000000000000"), // + // XYZ + StringUtils.convertHexToBytes("0080000001" // + + "0000000000000000" // + + "0000000000000000" // + + "7ff8000000000000"), // + // XYM + StringUtils.convertHexToBytes("0040000001" // + + "0000000000000000" // + + "0000000000000000" // + + "7ff8000000000000") }; + + private static final int[] NON_FINITE_DIMENSIONS = { // + DIMENSION_SYSTEM_XY, // + DIMENSION_SYSTEM_XY, // + DIMENSION_SYSTEM_XYZ, // + DIMENSION_SYSTEM_XYM }; + + private static final String MIXED_WKT = "LINESTRING (1 2, 3 4 5)"; + + private static final byte[] MIXED_WKB = StringUtils.convertHexToBytes("" + // BOM (BigEndian) + + "00" + // Z | LINESTRING + + "80000002" + // 2 items + + "00000002" + // 1.0 + + "3ff0000000000000" + // 2.0 + + "4000000000000000" + // NaN + + "7ff8000000000000" + // 3.0 + + "4008000000000000" + // 4.0 + + "4010000000000000" + // 5.0 + + "4014000000000000"); + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testPoint(); + testLineString(); + testPolygon(); + testMultiPoint(); + testMultiLineString(); + testMultiPolygon(); + testGeometryCollection(); + testEmptyPoint(); + testDimensionXY(); + testDimensionZ(); + testDimensionM(); + testDimensionZM(); + testFiniteOnly(); + testSRID(); + testIntersectionAndUnion(); + testMixedGeometries(); + } + + private void testPoint() throws Exception { + testGeometry("POINT (1 2)", 2); + testGeometry("POINT (-1.3 15)", 2); + testGeometry("POINT (-1E32 1.000001)", "POINT (-1E32 1.000001)", + "POINT (-100000000000000000000000000000000 1.000001)", 2, true); + testGeometry("POINT Z (2.7 -3 34)", 3); + assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("POINTZ(1 2 3)"))); + assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("pointz(1 2 3)"))); + } + + private void testLineString() throws Exception { + testGeometry("LINESTRING (-1 -2, 10 1, 2 20)", 2); + testGeometry("LINESTRING (1 2, 1 3)", 2); + testGeometry("LINESTRING (1 2, 2 2)", 2); + testGeometry("LINESTRING EMPTY", 2); + testGeometry("LINESTRING Z (-1 -2 -3, 10 15.7 3)", 3); + } + + private void testPolygon() throws Exception { + testGeometry("POLYGON ((-1 -2, 10 1, 2 20, -1 -2))", 2); + testGeometry("POLYGON EMPTY", "POLYGON EMPTY", "POLYGON EMPTY", 2, false); + testGeometry("POLYGON ((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5))", 2); + // TODO is EMPTY inner ring valid? + testGeometry("POLYGON ((-1 -2, 10 1, 2 20, -1 -2), EMPTY)", 2); + testGeometry("POLYGON Z ((-1 -2 7, 10 1 7, 2 20 7, -1 -2 7), (0.5 0.5 7, 1 0.5 7, 1 1 7, 0.5 0.5 7))", 3); + } + + private void testMultiPoint() throws Exception { + testGeometry("MULTIPOINT ((1 2), (3 4))", 2); + // Alternative syntax + testGeometry("MULTIPOINT (1 2, 3 4)", "MULTIPOINT ((1 2), (3 4))", "MULTIPOINT ((1 2), (3 4))", 2, true); + testGeometry("MULTIPOINT (1 2)", "MULTIPOINT ((1 2))", "MULTIPOINT ((1 2))", 2, true); + testGeometry("MULTIPOINT EMPTY", 2); + testGeometry("MULTIPOINT Z ((1 2 0.5), (3 4 -3))", 3); + } + + private void testMultiLineString() throws Exception { + testGeometry("MULTILINESTRING ((1 2, 3 4, 5 7))", 2); + testGeometry("MULTILINESTRING ((1 2, 3 4, 5 7), (-1 -1, 0 0, 2 2, 4 6.01))", 2); + testGeometry("MULTILINESTRING EMPTY", 2); + testGeometry("MULTILINESTRING Z ((1 2 0.5, 3 4 -3, 5 7 10))", 3); + } + + private void testMultiPolygon() throws Exception { + testGeometry("MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2)))", 2); + testGeometry("MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2)), ((1 2, 2 2, 3 3, 1 2)))", 2); + testGeometry("MULTIPOLYGON EMPTY", 2); + testGeometry("MULTIPOLYGON (((-1 -2, 10 1, 2 20, -1 -2), (0.5 0.5, 1 0.5, 1 1, 0.5 0.5)))", 2); + testGeometry("MULTIPOLYGON Z (((-1 -2 7, 10 1 7, 2 20 7, -1 -2 7), (0.5 1 7, 1 0.5 7, 1 1 7, 0.5 1 7)))", 3); + } + + private void testGeometryCollection() throws Exception { + testGeometry("GEOMETRYCOLLECTION (POINT (1 2))", 2); + testGeometry("GEOMETRYCOLLECTION (POINT (1 2), " // + + "MULTILINESTRING ((1 2, 3 4, 5 7), (-1 -1, 0 0, 2 2, 4 6.01)), " // + + "POINT (100 130))", 2); + testGeometry("GEOMETRYCOLLECTION EMPTY", 2); + testGeometry( + "GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (1 3)), MULTIPOINT ((4 8)), GEOMETRYCOLLECTION EMPTY)", + 2); + testGeometry("GEOMETRYCOLLECTION Z (POINT Z (1 2 3))", 3); + } + + private void testGeometry(String wkt, int numOfDimensions) throws Exception { + testGeometry(wkt, wkt, wkt, numOfDimensions, true); + } + + private void testGeometry(String wkt, String h2Wkt, String jtsWkt, int numOfDimensions, boolean withEWKB) + throws Exception { + Geometry geometryFromJTS = readWKT(wkt); + byte[] wkbFromJTS = new WKBWriter(numOfDimensions).write(geometryFromJTS); + + // Test WKB->WKT conversion + assertEquals(h2Wkt, EWKTUtils.ewkb2ewkt(wkbFromJTS)); + + if (withEWKB) { + // Test WKT->WKB conversion + assertEquals(wkbFromJTS, EWKTUtils.ewkt2ewkb(wkt)); + + // Test WKB->WKB no-op normalization + assertEquals(wkbFromJTS, EWKBUtils.ewkb2ewkb(wkbFromJTS)); + } + + // Test WKB->Geometry conversion + Geometry geometryFromH2 = JTSUtils.ewkb2geometry(wkbFromJTS); + String got = new WKTWriter(numOfDimensions).write(geometryFromH2); + if (!jtsWkt.equals(got)) { + assertEquals(jtsWkt.replaceAll(" Z ", " Z"), got); + } + + if (withEWKB) { + // Test Geometry->WKB conversion + assertEquals(wkbFromJTS, JTSUtils.geometry2ewkb(geometryFromJTS)); + } + + // Test Envelope + Envelope envelopeFromJTS = geometryFromJTS.getEnvelopeInternal(); + testEnvelope(envelopeFromJTS, GeometryUtils.getEnvelope(wkbFromJTS)); + EnvelopeTarget target = new EnvelopeTarget(); + EWKBUtils.parseEWKB(wkbFromJTS, target); + testEnvelope(envelopeFromJTS, target.getEnvelope()); + + // Test dimensions + int expectedDimensionSystem = numOfDimensions > 2 ? GeometryUtils.DIMENSION_SYSTEM_XYZ + : GeometryUtils.DIMENSION_SYSTEM_XY; + testDimensions(expectedDimensionSystem, wkbFromJTS); + + testValueGeometryProperties(wkbFromJTS); + } + + private void testEnvelope(Envelope envelopeFromJTS, double[] envelopeFromH2) { + if (envelopeFromJTS.isNull()) { + assertNull(envelopeFromH2); + assertNull(EWKBUtils.envelope2wkb(envelopeFromH2)); + } else { + assertEquals(envelopeFromJTS.getMinX(), envelopeFromH2[0]); + assertEquals(envelopeFromJTS.getMaxX(), envelopeFromH2[1]); + assertEquals(envelopeFromJTS.getMinY(), envelopeFromH2[2]); + assertEquals(envelopeFromJTS.getMaxY(), envelopeFromH2[3]); + assertEquals(new WKBWriter(2).write(new GeometryFactory().toGeometry(envelopeFromJTS)), + EWKBUtils.envelope2wkb(envelopeFromH2)); + } + } + + private void testEmptyPoint() { + String ewkt = "POINT EMPTY"; + byte[] ewkb = EWKTUtils.ewkt2ewkb(ewkt); + assertEquals(StringUtils.convertHexToBytes("00000000017ff80000000000007ff8000000000000"), ewkb); + assertEquals(ewkt, EWKTUtils.ewkb2ewkt(ewkb)); + assertNull(GeometryUtils.getEnvelope(ewkb)); + Point p = (Point) JTSUtils.ewkb2geometry(ewkb); + assertTrue(p.isEmpty()); + assertEquals(ewkt, new WKTWriter().write(p)); + assertEquals(ewkb, JTSUtils.geometry2ewkb(p)); + } + + private void testDimensionXY() throws Exception { + byte[] ewkb = EWKTUtils.ewkt2ewkb("POINT (1 2)"); + assertEquals("POINT (1 2)", EWKTUtils.ewkb2ewkt(ewkb)); + Point p = (Point) JTSUtils.ewkb2geometry(ewkb); + CoordinateSequence cs = p.getCoordinateSequence(); + testDimensionXYCheckPoint(cs); + assertEquals(ewkb, JTSUtils.geometry2ewkb(p)); + testDimensions(GeometryUtils.DIMENSION_SYSTEM_XY, ewkb); + testValueGeometryProperties(ewkb); + + p = (Point) readWKT("POINT (1 2)"); + cs = p.getCoordinateSequence(); + testDimensionXYCheckPoint(cs); + ewkb = JTSUtils.geometry2ewkb(p); + assertEquals("POINT (1 2)", EWKTUtils.ewkb2ewkt(ewkb)); + p = (Point) JTSUtils.ewkb2geometry(ewkb); + cs = p.getCoordinateSequence(); + testDimensionXYCheckPoint(cs); + } + + private void testDimensionXYCheckPoint(CoordinateSequence cs) { + assertEquals(2, cs.getDimension()); + assertEquals(0, cs.getMeasures()); + assertEquals(1, cs.getOrdinate(0, X)); + assertEquals(2, cs.getOrdinate(0, Y)); + assertEquals(Double.NaN, cs.getZ(0)); + } + + private void testDimensionZ() throws Exception { + byte[] ewkb = EWKTUtils.ewkt2ewkb("POINT Z (1 2 3)"); + assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(ewkb)); + assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("POINTZ(1 2 3)"))); + assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("pointz(1 2 3)"))); + Point p = (Point) JTSUtils.ewkb2geometry(ewkb); + CoordinateSequence cs = p.getCoordinateSequence(); + testDimensionZCheckPoint(cs); + assertEquals(ewkb, JTSUtils.geometry2ewkb(p)); + testDimensions(GeometryUtils.DIMENSION_SYSTEM_XYZ, ewkb); + testValueGeometryProperties(ewkb); + + p = (Point) readWKT("POINT Z (1 2 3)"); + cs = p.getCoordinateSequence(); + testDimensionZCheckPoint(cs); + ewkb = JTSUtils.geometry2ewkb(p); + assertEquals("POINT Z (1 2 3)", EWKTUtils.ewkb2ewkt(ewkb)); + p = (Point) JTSUtils.ewkb2geometry(ewkb); + cs = p.getCoordinateSequence(); + testDimensionZCheckPoint(cs); + } + + private void testDimensionZCheckPoint(CoordinateSequence cs) { + assertEquals(3, cs.getDimension()); + assertEquals(0, cs.getMeasures()); + assertEquals(1, cs.getOrdinate(0, X)); + assertEquals(2, cs.getOrdinate(0, Y)); + assertEquals(3, cs.getOrdinate(0, Z)); + assertEquals(3, cs.getZ(0)); + } + + private void testDimensionM() throws Exception { + byte[] ewkb = EWKTUtils.ewkt2ewkb("POINT M (1 2 3)"); + assertEquals("POINT M (1 2 3)", EWKTUtils.ewkb2ewkt(ewkb)); + assertEquals("POINT M (1 2 3)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("POINTM(1 2 3)"))); + assertEquals("POINT M (1 2 3)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("pointm(1 2 3)"))); + Point p = (Point) JTSUtils.ewkb2geometry(ewkb); + CoordinateSequence cs = p.getCoordinateSequence(); + testDimensionMCheckPoint(cs); + assertEquals(ewkb, JTSUtils.geometry2ewkb(p)); + testDimensions(GeometryUtils.DIMENSION_SYSTEM_XYM, ewkb); + testValueGeometryProperties(ewkb); + + p = (Point) readWKT("POINT M (1 2 3)"); + cs = p.getCoordinateSequence(); + testDimensionMCheckPoint(cs); + ewkb = JTSUtils.geometry2ewkb(p); + assertEquals("POINT M (1 2 3)", EWKTUtils.ewkb2ewkt(ewkb)); + p = (Point) JTSUtils.ewkb2geometry(ewkb); + cs = p.getCoordinateSequence(); + testDimensionMCheckPoint(cs); + } + + private void testDimensionMCheckPoint(CoordinateSequence cs) { + assertEquals(3, cs.getDimension()); + assertEquals(1, cs.getMeasures()); + assertEquals(1, cs.getOrdinate(0, X)); + assertEquals(2, cs.getOrdinate(0, Y)); + assertEquals(3, cs.getOrdinate(0, 2)); + assertEquals(3, cs.getM(0)); + } + + private void testDimensionZM() throws Exception { + byte[] ewkb = EWKTUtils.ewkt2ewkb("POINT ZM (1 2 3 4)"); + assertEquals("POINT ZM (1 2 3 4)", EWKTUtils.ewkb2ewkt(ewkb)); + assertEquals("POINT ZM (1 2 3 4)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("POINTZM(1 2 3 4)"))); + assertEquals("POINT ZM (1 2 3 4)", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb("pointzm(1 2 3 4)"))); + Point p = (Point) JTSUtils.ewkb2geometry(ewkb); + CoordinateSequence cs = p.getCoordinateSequence(); + testDimensionZMCheckPoint(cs); + assertEquals(ewkb, JTSUtils.geometry2ewkb(p)); + testDimensions(GeometryUtils.DIMENSION_SYSTEM_XYZM, ewkb); + testValueGeometryProperties(ewkb); + + p = (Point) readWKT("POINT ZM (1 2 3 4)"); + cs = p.getCoordinateSequence(); + testDimensionZMCheckPoint(cs); + ewkb = JTSUtils.geometry2ewkb(p); + assertEquals("POINT ZM (1 2 3 4)", EWKTUtils.ewkb2ewkt(ewkb)); + p = (Point) JTSUtils.ewkb2geometry(ewkb); + cs = p.getCoordinateSequence(); + testDimensionZMCheckPoint(cs); + } + + private void testDimensionZMCheckPoint(CoordinateSequence cs) { + assertEquals(4, cs.getDimension()); + assertEquals(1, cs.getMeasures()); + assertEquals(1, cs.getOrdinate(0, X)); + assertEquals(2, cs.getOrdinate(0, Y)); + assertEquals(3, cs.getOrdinate(0, Z)); + assertEquals(3, cs.getZ(0)); + assertEquals(4, cs.getOrdinate(0, M)); + assertEquals(4, cs.getM(0)); + } + + private void testValueGeometryProperties(byte[] ewkb) { + ValueGeometry vg = ValueGeometry.getFromEWKB(ewkb); + DimensionSystemTarget target = new DimensionSystemTarget(); + EWKBUtils.parseEWKB(ewkb, target); + int dimensionSystem = target.getDimensionSystem(); + assertEquals(dimensionSystem, vg.getDimensionSystem()); + String formattedType = EWKTUtils + .formatGeometryTypeAndDimensionSystem(new StringBuilder(), vg.getTypeAndDimensionSystem()).toString(); + assertTrue(EWKTUtils.ewkb2ewkt(ewkb).startsWith(formattedType)); + switch (dimensionSystem) { + case DIMENSION_SYSTEM_XY: + assertTrue(formattedType.indexOf(' ') < 0); + break; + case DIMENSION_SYSTEM_XYZ: + assertTrue(formattedType.endsWith(" Z")); + break; + case DIMENSION_SYSTEM_XYM: + assertTrue(formattedType.endsWith(" M")); + break; + case DIMENSION_SYSTEM_XYZM: + assertTrue(formattedType.endsWith(" ZM")); + break; + } + assertEquals(vg.getTypeAndDimensionSystem(), vg.getGeometryType() + vg.getDimensionSystem() * 1_000); + assertEquals(0, vg.getSRID()); + } + + private void testFiniteOnly() { + for (int i = 0; i < NON_FINITE.length; i++) { + testFiniteOnly(NON_FINITE[i], new EWKBTarget(new ByteArrayOutputStream(), NON_FINITE_DIMENSIONS[i])); + } + for (int i = 0; i < NON_FINITE.length; i++) { + testFiniteOnly(NON_FINITE[i], new EWKTTarget(new StringBuilder(), NON_FINITE_DIMENSIONS[i])); + } + for (int i = 0; i < NON_FINITE.length; i++) { + testFiniteOnly(NON_FINITE[i], new GeometryTarget(NON_FINITE_DIMENSIONS[i])); + } + } + + private void testFiniteOnly(byte[] ewkb, Target target) { + assertThrows(IllegalArgumentException.class, () -> EWKBUtils.parseEWKB(ewkb, target)); + } + + private void testSRID() throws Exception { + byte[] ewkb = EWKTUtils.ewkt2ewkb("SRID=10;GEOMETRYCOLLECTION (POINT (1 2))"); + assertEquals(StringUtils.convertHexToBytes("" + // ******** Geometry collection ******** + // BOM (BigEndian) + + "00" + // Only top-level object has a SRID + // type (SRID | POINT) + + "20000007" + // SRID = 10 + + "0000000a" + // 1 item + + "00000001" + // ******** Point ******** + // BOM (BigEndian) + + "00" + // type (POINT) + + "00000001" + // 1.0 + + "3ff0000000000000" + // 2.0 + + "4000000000000000"), ewkb); + assertEquals("SRID=10;GEOMETRYCOLLECTION (POINT (1 2))", EWKTUtils.ewkb2ewkt(ewkb)); + GeometryCollection gc = (GeometryCollection) JTSUtils.ewkb2geometry(ewkb); + assertEquals(10, gc.getSRID()); + assertEquals(10, gc.getGeometryN(0).getSRID()); + assertEquals(ewkb, JTSUtils.geometry2ewkb(gc)); + ValueGeometry vg = ValueGeometry.getFromEWKB(ewkb); + assertEquals(10, vg.getSRID()); + assertEquals(GEOMETRY_COLLECTION, vg.getTypeAndDimensionSystem()); + assertEquals("SRID=-1;POINT EMPTY", EWKTUtils.ewkb2ewkt(EWKTUtils.ewkt2ewkb(" srid=-1 ; POINT EMPTY "))); + } + + private void testDimensions(int expected, byte[] ewkb) { + DimensionSystemTarget dst = new DimensionSystemTarget(); + EWKBUtils.parseEWKB(ewkb, dst); + assertEquals(expected, dst.getDimensionSystem()); + } + + private void testIntersectionAndUnion() { + double[] zero = new double[4]; + assertFalse(GeometryUtils.intersects(null, null)); + assertFalse(GeometryUtils.intersects(null, zero)); + assertFalse(GeometryUtils.intersects(zero, null)); + assertNull(GeometryUtils.union(null, null)); + assertEquals(zero, GeometryUtils.union(null, zero)); + assertEquals(zero, GeometryUtils.union(zero, null)); + // These 30 values with fixed seed 0 are enough to cover all remaining + // cases + Random r = new Random(0); + for (int i = 0; i < 30; i++) { + double[] envelope1 = getEnvelope(r); + double[] envelope2 = getEnvelope(r); + Envelope e1 = convert(envelope1); + Envelope e2 = convert(envelope2); + assertEquals(e1.intersects(e2), GeometryUtils.intersects(envelope1, envelope2)); + e1.expandToInclude(e2); + assertEquals(e1, convert(GeometryUtils.union(envelope1, envelope2))); + } + } + + private static Envelope convert(double[] envelope) { + return new Envelope(envelope[MIN_X], envelope[MAX_X], envelope[MIN_Y], envelope[MAX_Y]); + } + + private static double[] getEnvelope(Random r) { + double minX = r.nextDouble(); + double maxX = r.nextDouble(); + if (minX > maxX) { + double t = minX; + minX = maxX; + maxX = t; + } + double minY = r.nextDouble(); + double maxY = r.nextDouble(); + if (minY > maxY) { + double t = minY; + minY = maxY; + maxY = t; + } + return new double[] { minX, maxX, minY, maxY }; + } + + private void testMixedGeometries() throws Exception { + assertThrows(IllegalArgumentException.class, () -> EWKTUtils.ewkt2ewkb(MIXED_WKT)); + assertThrows(IllegalArgumentException.class, () -> EWKTUtils.ewkb2ewkt(MIXED_WKB)); + assertThrows(IllegalArgumentException.class, () -> JTSUtils.ewkb2geometry(MIXED_WKB)); + Geometry g = new WKTReader().read(MIXED_WKT); + assertThrows(IllegalArgumentException.class, () -> JTSUtils.geometry2ewkb(g)); + } + + private static Geometry readWKT(String text) throws ParseException { + WKTReader reader = new WKTReader(); + reader.setIsOldJtsCoordinateSyntaxAllowed(false); + return reader.read(text); + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestIntArray.java b/h2/src/test/org/h2/test/unit/TestIntArray.java index 130168a133..04ab6f905d 100644 --- a/h2/src/test/org/h2/test/unit/TestIntArray.java +++ b/h2/src/test/org/h2/test/unit/TestIntArray.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -20,7 +20,7 @@ public class TestIntArray extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestIntIntHashMap.java b/h2/src/test/org/h2/test/unit/TestIntIntHashMap.java deleted file mode 100644 index 7825be66e1..0000000000 --- a/h2/src/test/org/h2/test/unit/TestIntIntHashMap.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.util.Random; - -import org.h2.test.TestBase; -import org.h2.util.IntIntHashMap; - -/** - * Tests the IntHashMap class. - */ -public class TestIntIntHashMap extends TestBase { - - private final Random rand = new Random(); - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() { - IntIntHashMap map = new IntIntHashMap(); - map.put(1, 1); - map.put(1, 2); - assertEquals(1, map.size()); - map.put(0, 1); - map.put(0, 2); - assertEquals(2, map.size()); - rand.setSeed(10); - test(true); - test(false); - } - - private void test(boolean random) { - int len = 2000; - int[] x = new int[len]; - for (int i = 0; i < len; i++) { - int key = random ? rand.nextInt() : i; - x[i] = key; - } - IntIntHashMap map = new IntIntHashMap(); - for (int i = 0; i < len; i++) { - map.put(x[i], i); - } - for (int i = 0; i < len; i++) { - if (map.get(x[i]) != i) { - throw new AssertionError("get " + x[i] + " = " + map.get(i) + - " should be " + i); - } - } - for (int i = 1; i < len; i += 2) { - map.remove(x[i]); - } - for (int i = 1; i < len; i += 2) { - if (map.get(x[i]) != -1) { - throw new AssertionError("get " + x[i] + " = " + map.get(i) + - " should be <=0"); - } - } - for (int i = 1; i < len; i += 2) { - map.put(x[i], i); - } - for (int i = 0; i < len; i++) { - if (map.get(x[i]) != i) { - throw new AssertionError("get " + x[i] + " = " + map.get(i) + - " should be " + i); - } - } - } -} diff --git a/h2/src/test/org/h2/test/unit/TestIntPerfectHash.java b/h2/src/test/org/h2/test/unit/TestIntPerfectHash.java index 90713f27da..1aa4209d0d 100644 --- a/h2/src/test/org/h2/test/unit/TestIntPerfectHash.java +++ b/h2/src/test/org/h2/test/unit/TestIntPerfectHash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -10,6 +10,7 @@ import java.util.HashSet; import java.util.Random; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.h2.dev.hash.IntPerfectHash; import org.h2.dev.hash.IntPerfectHash.BitArray; @@ -39,11 +40,11 @@ public void measure() { int size = 10000; test(size / 10); int s; - long time = System.currentTimeMillis(); + long time = System.nanoTime(); s = test(size); - time = System.currentTimeMillis() - time; + time = System.nanoTime() - time; System.out.println((double) s / size + " bits/key in " + - time + " ms"); + TimeUnit.NANOSECONDS.toMillis(time) + " ms"); } @@ -79,12 +80,11 @@ private void testBitArray() { private int test(int size) { Random r = new Random(size); - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); while (set.size() < size) { set.add(r.nextInt()); } - ArrayList list = new ArrayList(); - list.addAll(set); + ArrayList list = new ArrayList<>(set); byte[] desc = IntPerfectHash.generate(list); int max = test(desc, set); assertEquals(size - 1, max); @@ -93,7 +93,7 @@ private int test(int size) { private int test(byte[] desc, Set set) { int max = -1; - HashSet test = new HashSet(); + HashSet test = new HashSet<>(); IntPerfectHash hash = new IntPerfectHash(desc); for (int x : set) { int h = hash.get(x); diff --git a/h2/src/test/org/h2/test/unit/TestInterval.java b/h2/src/test/org/h2/test/unit/TestInterval.java new file mode 100644 index 0000000000..ddbf276dc6 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestInterval.java @@ -0,0 +1,547 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND; + +import org.h2.api.Interval; +import org.h2.test.TestBase; +import org.h2.util.StringUtils; + +/** + * Test cases for Interval. + */ +public class TestInterval extends TestBase { + + private static final long MAX = 999_999_999_999_999_999L; + + private static final long MIN = -999_999_999_999_999_999L; + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testOfYears(); + testOfMonths(); + testOfDays(); + testOfHours(); + testOfMinutes(); + testOfSeconds(); + testOfSeconds2(); + testOfNanos(); + testOfYearsMonths(); + testOfDaysHours(); + testOfDaysHoursMinutes(); + testOfDaysHoursMinutesSeconds(); + testOfHoursMinutes(); + testOfHoursMinutesSeconds(); + testOfMinutesSeconds(); + } + + private void testOfYears() { + testOfYearsGood(0); + testOfYearsGood(100); + testOfYearsGood(-100); + testOfYearsGood(MAX); + testOfYearsGood(MIN); + testOfYearsBad(MAX + 1); + testOfYearsBad(MIN - 1); + testOfYearsBad(Long.MAX_VALUE); + testOfYearsBad(Long.MIN_VALUE); + } + + private void testOfYearsGood(long years) { + Interval i = Interval.ofYears(years); + assertEquals(years, i.getYears()); + assertEquals("INTERVAL '" + years + "' YEAR", i.toString()); + } + + private void testOfYearsBad(long years) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofYears(years)); + } + + private void testOfMonths() { + testOfMonthsGood(0); + testOfMonthsGood(100); + testOfMonthsGood(-100); + testOfMonthsGood(MAX); + testOfMonthsGood(MIN); + testOfMonthsBad(MAX + 1); + testOfMonthsBad(MIN - 1); + testOfMonthsBad(Long.MAX_VALUE); + testOfMonthsBad(Long.MIN_VALUE); + } + + private void testOfMonthsGood(long months) { + Interval i = Interval.ofMonths(months); + assertEquals(months, i.getMonths()); + assertEquals("INTERVAL '" + months + "' MONTH", i.toString()); + } + + private void testOfMonthsBad(long months) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofMonths(months)); + } + + private void testOfDays() { + testOfDaysGood(0); + testOfDaysGood(100); + testOfDaysGood(-100); + testOfDaysGood(MAX); + testOfDaysGood(MIN); + testOfDaysBad(MAX + 1); + testOfDaysBad(MIN - 1); + testOfDaysBad(Long.MAX_VALUE); + testOfDaysBad(Long.MIN_VALUE); + } + + private void testOfDaysGood(long days) { + Interval i = Interval.ofDays(days); + assertEquals(days, i.getDays()); + assertEquals("INTERVAL '" + days + "' DAY", i.toString()); + } + + private void testOfDaysBad(long days) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofDays(days)); + } + + private void testOfHours() { + testOfHoursGood(0); + testOfHoursGood(100); + testOfHoursGood(-100); + testOfHoursGood(MAX); + testOfHoursGood(MIN); + testOfHoursBad(MAX + 1); + testOfHoursBad(MIN - 1); + testOfHoursBad(Long.MAX_VALUE); + testOfHoursBad(Long.MIN_VALUE); + } + + private void testOfHoursGood(long hours) { + Interval i = Interval.ofHours(hours); + assertEquals(hours, i.getHours()); + assertEquals("INTERVAL '" + hours + "' HOUR", i.toString()); + } + + private void testOfHoursBad(long hours) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofHours(hours)); + } + + private void testOfMinutes() { + testOfMinutesGood(0); + testOfMinutesGood(100); + testOfMinutesGood(-100); + testOfMinutesGood(MAX); + testOfMinutesGood(MIN); + testOfMinutesBad(MAX + 1); + testOfMinutesBad(MIN - 1); + testOfMinutesBad(Long.MAX_VALUE); + testOfMinutesBad(Long.MIN_VALUE); + } + + private void testOfMinutesGood(long minutes) { + Interval i = Interval.ofMinutes(minutes); + assertEquals(minutes, i.getMinutes()); + assertEquals("INTERVAL '" + minutes + "' MINUTE", i.toString()); + } + + private void testOfMinutesBad(long minutes) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofMinutes(minutes)); + } + + private void testOfSeconds() { + testOfSecondsGood(0); + testOfSecondsGood(100); + testOfSecondsGood(-100); + testOfSecondsGood(MAX); + testOfSecondsGood(MIN); + testOfSecondsBad(MAX + 1); + testOfSecondsBad(MIN - 1); + testOfSecondsBad(Long.MAX_VALUE); + testOfSecondsBad(Long.MIN_VALUE); + } + + private void testOfSecondsGood(long seconds) { + Interval i = Interval.ofSeconds(seconds); + assertEquals(seconds, i.getSeconds()); + assertEquals("INTERVAL '" + seconds + "' SECOND", i.toString()); + } + + private void testOfSecondsBad(long seconds) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofSeconds(seconds)); + } + + private void testOfSeconds2() { + testOfSeconds2Good(0, 0); + testOfSeconds2Good(0, -2); + testOfSeconds2Good(100, 5); + testOfSeconds2Good(-100, -1); + testOfSeconds2Good(MAX, 999_999_999); + testOfSeconds2Good(MIN, -999_999_999); + testOfSeconds2Bad(0, 1_000_000_000); + testOfSeconds2Bad(0, -1_000_000_000); + testOfSeconds2Bad(MAX + 1, 0); + testOfSeconds2Bad(MIN - 1, 0); + testOfSeconds2Bad(Long.MAX_VALUE, 0); + testOfSeconds2Bad(Long.MIN_VALUE, 0); + testOfSeconds2Bad(0, Integer.MAX_VALUE); + testOfSeconds2Bad(0, Integer.MIN_VALUE); + } + + private void testOfSeconds2Good(long seconds, int nanos) { + Interval i = Interval.ofSeconds(seconds, nanos); + assertEquals(seconds, i.getSeconds()); + assertEquals(nanos, i.getNanosOfSecond()); + if (Math.abs(seconds) < 9_000_000_000L) { + assertEquals(seconds * NANOS_PER_SECOND + nanos, i.getSecondsAndNanos()); + } + StringBuilder b = new StringBuilder("INTERVAL '"); + if (seconds < 0 || nanos < 0) { + b.append('-'); + } + b.append(Math.abs(seconds)); + if (nanos != 0) { + b.append('.'); + StringUtils.appendZeroPadded(b, 9, Math.abs(nanos)); + stripTrailingZeroes(b); + } + b.append("' SECOND"); + assertEquals(b.toString(), i.toString()); + } + + private void testOfSeconds2Bad(long seconds, int nanos) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofSeconds(seconds, nanos)); + } + + private void testOfNanos() { + testOfNanosGood(0); + testOfNanosGood(100); + testOfNanosGood(-100); + testOfNanosGood(Long.MAX_VALUE); + testOfNanosGood(Long.MIN_VALUE); + } + + private void testOfNanosGood(long nanos) { + Interval i = Interval.ofNanos(nanos); + long seconds = nanos / NANOS_PER_SECOND; + long nanosOfSecond = nanos % NANOS_PER_SECOND; + assertEquals(seconds, i.getSeconds()); + assertEquals(nanosOfSecond, i.getNanosOfSecond()); + assertEquals(nanos, i.getSecondsAndNanos()); + StringBuilder b = new StringBuilder("INTERVAL '"); + if (nanos < 0) { + b.append('-'); + } + b.append(Math.abs(seconds)); + if (nanosOfSecond != 0) { + b.append('.'); + StringUtils.appendZeroPadded(b, 9, Math.abs(nanosOfSecond)); + stripTrailingZeroes(b); + } + b.append("' SECOND"); + assertEquals(b.toString(), i.toString()); + } + + private void testOfYearsMonths() { + testOfYearsMonthsGood(0, 0); + testOfYearsMonthsGood(0, -2); + testOfYearsMonthsGood(100, 5); + testOfYearsMonthsGood(-100, -1); + testOfYearsMonthsGood(MAX, 11); + testOfYearsMonthsGood(MIN, -11); + testOfYearsMonthsBad(0, 12); + testOfYearsMonthsBad(0, -12); + testOfYearsMonthsBad(MAX + 1, 0); + testOfYearsMonthsBad(MIN - 1, 0); + testOfYearsMonthsBad(Long.MAX_VALUE, 0); + testOfYearsMonthsBad(Long.MIN_VALUE, 0); + testOfYearsMonthsBad(0, Integer.MAX_VALUE); + testOfYearsMonthsBad(0, Integer.MIN_VALUE); + } + + private void testOfYearsMonthsGood(long years, int months) { + Interval i = Interval.ofYearsMonths(years, months); + assertEquals(years, i.getYears()); + assertEquals(months, i.getMonths()); + StringBuilder b = new StringBuilder("INTERVAL '"); + if (years < 0 || months < 0) { + b.append('-'); + } + b.append(Math.abs(years)).append('-').append(Math.abs(months)).append("' YEAR TO MONTH"); + assertEquals(b.toString(), i.toString()); + } + + private void testOfYearsMonthsBad(long years, int months) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofYearsMonths(years, months)); + } + + private void testOfDaysHours() { + testOfDaysHoursGood(0, 0); + testOfDaysHoursGood(0, -2); + testOfDaysHoursGood(100, 5); + testOfDaysHoursGood(-100, -1); + testOfDaysHoursGood(MAX, 23); + testOfDaysHoursGood(MIN, -23); + testOfDaysHoursBad(0, 24); + testOfDaysHoursBad(0, -24); + testOfDaysHoursBad(MAX + 1, 0); + testOfDaysHoursBad(MIN - 1, 0); + testOfDaysHoursBad(Long.MAX_VALUE, 0); + testOfDaysHoursBad(Long.MIN_VALUE, 0); + testOfDaysHoursBad(0, Integer.MAX_VALUE); + testOfDaysHoursBad(0, Integer.MIN_VALUE); + } + + private void testOfDaysHoursGood(long days, int hours) { + Interval i = Interval.ofDaysHours(days, hours); + assertEquals(days, i.getDays()); + assertEquals(hours, i.getHours()); + StringBuilder b = new StringBuilder("INTERVAL '"); + if (days < 0 || hours < 0) { + b.append('-'); + } + b.append(Math.abs(days)).append(' '); + StringUtils.appendTwoDigits(b, Math.abs(hours)); + b.append("' DAY TO HOUR"); + assertEquals(b.toString(), i.toString()); + } + + private void testOfDaysHoursBad(long days, int hours) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofDaysHours(days, hours)); + } + + private void testOfDaysHoursMinutes() { + testOfDaysHoursMinutesGood(0, 0, 0); + testOfDaysHoursMinutesGood(0, -2, 0); + testOfDaysHoursMinutesGood(0, 0, -2); + testOfDaysHoursMinutesGood(100, 5, 3); + testOfDaysHoursMinutesGood(-100, -1, -3); + testOfDaysHoursMinutesGood(MAX, 23, 59); + testOfDaysHoursMinutesGood(MIN, -23, -59); + testOfDaysHoursMinutesBad(0, 24, 0); + testOfDaysHoursMinutesBad(0, -24, 0); + testOfDaysHoursMinutesBad(0, 0, 60); + testOfDaysHoursMinutesBad(0, 0, -60); + testOfDaysHoursMinutesBad(MAX + 1, 0, 0); + testOfDaysHoursMinutesBad(MIN - 1, 0, 0); + testOfDaysHoursMinutesBad(Long.MAX_VALUE, 0, 0); + testOfDaysHoursMinutesBad(Long.MIN_VALUE, 0, 0); + testOfDaysHoursMinutesBad(0, Integer.MAX_VALUE, 0); + testOfDaysHoursMinutesBad(0, Integer.MIN_VALUE, 0); + testOfDaysHoursMinutesBad(0, 0, Integer.MAX_VALUE); + testOfDaysHoursMinutesBad(0, 0, Integer.MIN_VALUE); + } + + private void testOfDaysHoursMinutesGood(long days, int hours, int minutes) { + Interval i = Interval.ofDaysHoursMinutes(days, hours, minutes); + assertEquals(days, i.getDays()); + assertEquals(hours, i.getHours()); + assertEquals(minutes, i.getMinutes()); + StringBuilder b = new StringBuilder("INTERVAL '"); + if (days < 0 || hours < 0 || minutes < 0) { + b.append('-'); + } + b.append(Math.abs(days)).append(' '); + StringUtils.appendTwoDigits(b, Math.abs(hours)); + b.append(':'); + StringUtils.appendTwoDigits(b, Math.abs(minutes)); + b.append("' DAY TO MINUTE"); + assertEquals(b.toString(), i.toString()); + } + + private void testOfDaysHoursMinutesBad(long days, int hours, int minutes) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofDaysHoursMinutes(days, hours, minutes)); + } + + private void testOfDaysHoursMinutesSeconds() { + testOfDaysHoursMinutesSecondsGood(0, 0, 0, 0); + testOfDaysHoursMinutesSecondsGood(0, -2, 0, 0); + testOfDaysHoursMinutesSecondsGood(0, 0, -2, 0); + testOfDaysHoursMinutesSecondsGood(0, 0, 0, -2); + testOfDaysHoursMinutesSecondsGood(100, 5, 3, 4); + testOfDaysHoursMinutesSecondsGood(-100, -1, -3, -4); + testOfDaysHoursMinutesSecondsGood(MAX, 23, 59, 59); + testOfDaysHoursMinutesSecondsGood(MIN, -23, -59, -59); + testOfDaysHoursMinutesSecondsBad(0, 24, 0, 0); + testOfDaysHoursMinutesSecondsBad(0, -24, 0, 0); + testOfDaysHoursMinutesSecondsBad(0, 0, 60, 0); + testOfDaysHoursMinutesSecondsBad(0, 0, -60, 0); + testOfDaysHoursMinutesSecondsBad(0, 0, 0, 60); + testOfDaysHoursMinutesSecondsBad(0, 0, 0, -60); + testOfDaysHoursMinutesSecondsBad(MAX + 1, 0, 0, 0); + testOfDaysHoursMinutesSecondsBad(MIN - 1, 0, 0, 0); + testOfDaysHoursMinutesSecondsBad(Long.MAX_VALUE, 0, 0, 0); + testOfDaysHoursMinutesSecondsBad(Long.MIN_VALUE, 0, 0, 0); + testOfDaysHoursMinutesSecondsBad(0, Integer.MAX_VALUE, 0, 0); + testOfDaysHoursMinutesSecondsBad(0, Integer.MIN_VALUE, 0, 0); + testOfDaysHoursMinutesSecondsBad(0, 0, Integer.MAX_VALUE, 0); + testOfDaysHoursMinutesSecondsBad(0, 0, Integer.MIN_VALUE, 0); + testOfDaysHoursMinutesSecondsBad(0, 0, 0, Integer.MAX_VALUE); + testOfDaysHoursMinutesSecondsBad(0, 0, 0, Integer.MIN_VALUE); + } + + private void testOfDaysHoursMinutesSecondsGood(long days, int hours, int minutes, int seconds) { + Interval i = Interval.ofDaysHoursMinutesSeconds(days, hours, minutes, seconds); + assertEquals(days, i.getDays()); + assertEquals(hours, i.getHours()); + assertEquals(minutes, i.getMinutes()); + assertEquals(seconds, i.getSeconds()); + assertEquals(0, i.getNanosOfSecond()); + assertEquals(seconds * NANOS_PER_SECOND, i.getSecondsAndNanos()); + StringBuilder b = new StringBuilder("INTERVAL '"); + if (days < 0 || hours < 0 || minutes < 0 || seconds < 0) { + b.append('-'); + } + b.append(Math.abs(days)).append(' '); + StringUtils.appendTwoDigits(b, Math.abs(hours)); + b.append(':'); + StringUtils.appendTwoDigits(b, Math.abs(minutes)); + b.append(':'); + StringUtils.appendTwoDigits(b, Math.abs(seconds)); + b.append("' DAY TO SECOND"); + assertEquals(b.toString(), i.toString()); + } + + private void testOfDaysHoursMinutesSecondsBad(long days, int hours, int minutes, int seconds) { + assertThrows(IllegalArgumentException.class, + () -> Interval.ofDaysHoursMinutesSeconds(days, hours, minutes, seconds)); + } + + private void testOfHoursMinutes() { + testOfHoursMinutesGood(0, 0); + testOfHoursMinutesGood(0, -2); + testOfHoursMinutesGood(100, 5); + testOfHoursMinutesGood(-100, -1); + testOfHoursMinutesGood(MAX, 59); + testOfHoursMinutesGood(MIN, -59); + testOfHoursMinutesBad(0, 60); + testOfHoursMinutesBad(0, -60); + testOfHoursMinutesBad(MAX + 1, 0); + testOfHoursMinutesBad(MIN - 1, 0); + testOfHoursMinutesBad(Long.MAX_VALUE, 0); + testOfHoursMinutesBad(Long.MIN_VALUE, 0); + testOfHoursMinutesBad(0, Integer.MAX_VALUE); + testOfHoursMinutesBad(0, Integer.MIN_VALUE); + } + + private void testOfHoursMinutesGood(long hours, int minutes) { + Interval i = Interval.ofHoursMinutes(hours, minutes); + assertEquals(hours, i.getHours()); + assertEquals(minutes, i.getMinutes()); + StringBuilder b = new StringBuilder("INTERVAL '"); + if (hours < 0 || minutes < 0) { + b.append('-'); + } + b.append(Math.abs(hours)).append(':'); + StringUtils.appendTwoDigits(b, Math.abs(minutes)); + b.append("' HOUR TO MINUTE"); + assertEquals(b.toString(), i.toString()); + } + + private void testOfHoursMinutesBad(long hours, int minutes) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofHoursMinutes(hours, minutes)); + } + + private void testOfHoursMinutesSeconds() { + testOfHoursMinutesSecondsGood(0, 0, 0); + testOfHoursMinutesSecondsGood(0, -2, 0); + testOfHoursMinutesSecondsGood(0, 0, -2); + testOfHoursMinutesSecondsGood(100, 5, 3); + testOfHoursMinutesSecondsGood(-100, -1, -3); + testOfHoursMinutesSecondsGood(MAX, 59, 59); + testOfHoursMinutesSecondsGood(MIN, -59, -59); + testOfHoursMinutesSecondsBad(0, 60, 0); + testOfHoursMinutesSecondsBad(0, -60, 0); + testOfHoursMinutesSecondsBad(0, 0, 60); + testOfHoursMinutesSecondsBad(0, 0, -60); + testOfHoursMinutesSecondsBad(MAX + 1, 0, 0); + testOfHoursMinutesSecondsBad(MIN - 1, 0, 0); + testOfHoursMinutesSecondsBad(Long.MAX_VALUE, 0, 0); + testOfHoursMinutesSecondsBad(Long.MIN_VALUE, 0, 0); + testOfHoursMinutesSecondsBad(0, Integer.MAX_VALUE, 0); + testOfHoursMinutesSecondsBad(0, Integer.MIN_VALUE, 0); + testOfHoursMinutesSecondsBad(0, 0, Integer.MAX_VALUE); + testOfHoursMinutesSecondsBad(0, 0, Integer.MIN_VALUE); + } + + private void testOfHoursMinutesSecondsGood(long hours, int minutes, int seconds) { + Interval i = Interval.ofHoursMinutesSeconds(hours, minutes, seconds); + assertEquals(hours, i.getHours()); + assertEquals(minutes, i.getMinutes()); + assertEquals(seconds, i.getSeconds()); + assertEquals(0, i.getNanosOfSecond()); + assertEquals(seconds * NANOS_PER_SECOND, i.getSecondsAndNanos()); + StringBuilder b = new StringBuilder("INTERVAL '"); + if (hours < 0 || minutes < 0 || seconds < 0) { + b.append('-'); + } + b.append(Math.abs(hours)).append(':'); + StringUtils.appendTwoDigits(b, Math.abs(minutes)); + b.append(':'); + StringUtils.appendTwoDigits(b, Math.abs(seconds)); + b.append("' HOUR TO SECOND"); + assertEquals(b.toString(), i.toString()); + } + + private void testOfHoursMinutesSecondsBad(long hours, int minutes, int seconds) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofHoursMinutesSeconds(hours, minutes, seconds)); + } + + private void testOfMinutesSeconds() { + testOfMinutesSecondsGood(0, 0); + testOfMinutesSecondsGood(0, -2); + testOfMinutesSecondsGood(100, 5); + testOfMinutesSecondsGood(-100, -1); + testOfMinutesSecondsGood(MAX, 59); + testOfMinutesSecondsGood(MIN, -59); + testOfMinutesSecondsBad(0, 60); + testOfMinutesSecondsBad(0, -60); + testOfMinutesSecondsBad(MAX + 1, 0); + testOfMinutesSecondsBad(MIN - 1, 0); + testOfMinutesSecondsBad(Long.MAX_VALUE, 0); + testOfMinutesSecondsBad(Long.MIN_VALUE, 0); + testOfMinutesSecondsBad(0, Integer.MAX_VALUE); + testOfMinutesSecondsBad(0, Integer.MIN_VALUE); + } + + private void testOfMinutesSecondsGood(long minutes, int seconds) { + Interval i = Interval.ofMinutesSeconds(minutes, seconds); + assertEquals(minutes, i.getMinutes()); + assertEquals(seconds, i.getSeconds()); + assertEquals(0, i.getNanosOfSecond()); + assertEquals(seconds * NANOS_PER_SECOND, i.getSecondsAndNanos()); + StringBuilder b = new StringBuilder("INTERVAL '"); + if (minutes < 0 || seconds < 0) { + b.append('-'); + } + b.append(Math.abs(minutes)).append(':'); + StringUtils.appendTwoDigits(b, Math.abs(seconds)); + b.append("' MINUTE TO SECOND"); + assertEquals(b.toString(), i.toString()); + } + + private void testOfMinutesSecondsBad(long minutes, int seconds) { + assertThrows(IllegalArgumentException.class, () -> Interval.ofMinutesSeconds(minutes, seconds)); + } + + private static void stripTrailingZeroes(StringBuilder b) { + int i = b.length() - 1; + if (b.charAt(i) == '0') { + while (b.charAt(--i) == '0') { + // do nothing + } + b.setLength(i + 1); + } + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestJakartaServlet.java b/h2/src/test/org/h2/test/unit/TestJakartaServlet.java new file mode 100644 index 0000000000..6f6cb83c03 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestJakartaServlet.java @@ -0,0 +1,437 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.io.InputStream; +import java.net.URL; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Enumeration; +import java.util.EventListener; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import jakarta.servlet.Filter; +import jakarta.servlet.FilterRegistration; +import jakarta.servlet.FilterRegistration.Dynamic; +import jakarta.servlet.RequestDispatcher; +import jakarta.servlet.Servlet; +import jakarta.servlet.ServletContext; +import jakarta.servlet.ServletContextEvent; +import jakarta.servlet.ServletException; +import jakarta.servlet.ServletRegistration; +import jakarta.servlet.SessionCookieConfig; +import jakarta.servlet.SessionTrackingMode; +import jakarta.servlet.descriptor.JspConfigDescriptor; +import org.h2.api.ErrorCode; +import org.h2.server.web.JakartaDbStarter; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Tests the JakartaDbStarter servlet. + * This test simulates a minimum servlet container environment. + */ +public class TestJakartaServlet extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + /** + * Minimum ServletContext implementation. + * Most methods are not implemented. + */ + static class TestServletContext implements ServletContext { + + private final Properties initParams = new Properties(); + private final HashMap attributes = new HashMap<>(); + + @Override + public void setAttribute(String key, Object value) { + attributes.put(key, value); + } + + @Override + public Object getAttribute(String key) { + return attributes.get(key); + } + + @Override + public boolean setInitParameter(String key, String value) { + initParams.setProperty(key, value); + return true; + } + + @Override + public String getInitParameter(String key) { + return initParams.getProperty(key); + } + + @Override + public Enumeration getAttributeNames() { + throw new UnsupportedOperationException(); + } + + @Override + public ServletContext getContext(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public Enumeration getInitParameterNames() { + throw new UnsupportedOperationException(); + } + + @Override + public int getMajorVersion() { + throw new UnsupportedOperationException(); + } + + @Override + public String getMimeType(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public int getMinorVersion() { + throw new UnsupportedOperationException(); + } + + @Override + public RequestDispatcher getNamedDispatcher(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public String getRealPath(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public RequestDispatcher getRequestDispatcher(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public URL getResource(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public InputStream getResourceAsStream(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public Set getResourcePaths(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public String getServerInfo() { + throw new UnsupportedOperationException(); + } + + /** + * @deprecated as of servlet API 2.1 + */ + @Override + @Deprecated + public Servlet getServlet(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public String getServletContextName() { + throw new UnsupportedOperationException(); + } + + /** + * @deprecated as of servlet API 2.1 + */ + @Deprecated + @Override + public Enumeration getServletNames() { + throw new UnsupportedOperationException(); + } + + /** + * @deprecated as of servlet API 2.0 + */ + @Deprecated + @Override + public Enumeration getServlets() { + throw new UnsupportedOperationException(); + } + + @Override + public void log(String string) { + throw new UnsupportedOperationException(); + } + + /** + * @deprecated as of servlet API 2.1 + */ + @Deprecated + @Override + public void log(Exception exception, String string) { + throw new UnsupportedOperationException(); + } + + @Override + public void log(String string, Throwable throwable) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeAttribute(String string) { + throw new UnsupportedOperationException(); + } + + @Override + public Dynamic addFilter(String arg0, String arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public Dynamic addFilter(String arg0, Filter arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public Dynamic addFilter(String arg0, Class arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public void addListener(String arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void addListener(T arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void addListener(Class arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public jakarta.servlet.ServletRegistration.Dynamic addServlet( + String arg0, String arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public jakarta.servlet.ServletRegistration.Dynamic addServlet( + String arg0, Servlet arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public jakarta.servlet.ServletRegistration.Dynamic addServlet( + String arg0, Class arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public T createFilter(Class arg0) + throws ServletException { + throw new UnsupportedOperationException(); + } + + @Override + public T createListener(Class arg0) + throws ServletException { + throw new UnsupportedOperationException(); + } + + @Override + public T createServlet(Class arg0) + throws ServletException { + throw new UnsupportedOperationException(); + } + + @Override + public void declareRoles(String... arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public ClassLoader getClassLoader() { + throw new UnsupportedOperationException(); + } + + @Override + public String getContextPath() { + throw new UnsupportedOperationException(); + } + + @Override + public Set getDefaultSessionTrackingModes() { + throw new UnsupportedOperationException(); + } + + @Override + public int getEffectiveMajorVersion() { + throw new UnsupportedOperationException(); + } + + @Override + public int getEffectiveMinorVersion() { + throw new UnsupportedOperationException(); + } + + @Override + public Set getEffectiveSessionTrackingModes() { + throw new UnsupportedOperationException(); + } + + @Override + public FilterRegistration getFilterRegistration(String arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public Map getFilterRegistrations() { + throw new UnsupportedOperationException(); + } + + @Override + public JspConfigDescriptor getJspConfigDescriptor() { + throw new UnsupportedOperationException(); + } + + @Override + public ServletRegistration getServletRegistration(String arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public Map getServletRegistrations() { + throw new UnsupportedOperationException(); + } + + @Override + public SessionCookieConfig getSessionCookieConfig() { + throw new UnsupportedOperationException(); + } + + + @Override + public void setSessionTrackingModes(Set arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public String getVirtualServerName() { + throw new UnsupportedOperationException(); + } + + @Override + public ServletRegistration.Dynamic addJspFile(String servletName, String jspFile) { + throw new UnsupportedOperationException(); + } + + @Override + public int getSessionTimeout() { + throw new UnsupportedOperationException(); + } + + @Override + public void setSessionTimeout(int sessionTimeout) { + throw new UnsupportedOperationException(); + } + + @Override + public String getRequestCharacterEncoding() { + throw new UnsupportedOperationException(); + } + + @Override + public void setRequestCharacterEncoding(String encoding) { + throw new UnsupportedOperationException(); + } + + @Override + public String getResponseCharacterEncoding() { + throw new UnsupportedOperationException(); + } + + @Override + public void setResponseCharacterEncoding(String encoding) { + throw new UnsupportedOperationException(); + } + + } + + @Override + public boolean isEnabled() { + if (config.networked || config.memory) { + return false; + } + return true; + } + + @Override + public void test() throws SQLException { + JakartaDbStarter listener = new JakartaDbStarter(); + + TestServletContext context = new TestServletContext(); + String url = getURL("servlet", true); + context.setInitParameter("db.url", url); + context.setInitParameter("db.user", getUser()); + context.setInitParameter("db.password", getPassword()); + context.setInitParameter("db.tcpServer", "-tcpPort 8888"); + + ServletContextEvent event = new ServletContextEvent(context); + listener.contextInitialized(event); + + Connection conn1 = listener.getConnection(); + Connection conn1a = (Connection) context.getAttribute("connection"); + assertTrue(conn1 == conn1a); + Statement stat1 = conn1.createStatement(); + stat1.execute("CREATE TABLE T(ID INT)"); + + String u2 = url.substring(url.indexOf("servlet")); + u2 = "jdbc:h2:tcp://localhost:8888/" + getBaseDir() + "/" + u2; + Connection conn2 = DriverManager.getConnection( + u2, getUser(), getPassword()); + Statement stat2 = conn2.createStatement(); + stat2.execute("SELECT * FROM T"); + stat2.execute("DROP TABLE T"); + + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat1). + execute("SELECT * FROM T"); + conn2.close(); + + listener.contextDestroyed(event); + + // listener must be stopped + assertThrows(ErrorCode.CONNECTION_BROKEN_1, + () -> getConnection("jdbc:h2:tcp://localhost:8888/" + getBaseDir() + "/servlet", getUser(), + getPassword())); + + // connection must be closed + assertThrows(ErrorCode.OBJECT_CLOSED, stat1). + execute("SELECT * FROM DUAL"); + + deleteDb("servlet"); + + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestJmx.java b/h2/src/test/org/h2/test/unit/TestJmx.java index 258dcb04a9..20f6aea825 100644 --- a/h2/src/test/org/h2/test/unit/TestJmx.java +++ b/h2/src/test/org/h2/test/unit/TestJmx.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -16,13 +16,14 @@ import javax.management.MBeanOperationInfo; import javax.management.MBeanServer; import javax.management.ObjectName; +import org.h2.engine.Constants; import org.h2.test.TestBase; -import org.h2.util.New; +import org.h2.test.TestDb; /** * Tests the JMX feature. */ -public class TestJmx extends TestBase { +public class TestJmx extends TestDb { /** * Run just this test. @@ -30,7 +31,8 @@ public class TestJmx extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase base = TestBase.createCaller().init(); + base.testFromMain(); } @Override @@ -66,26 +68,8 @@ public void test() throws Exception { getAttribute(name, "FileReadCount").toString()); assertEquals("0", mbeanServer. getAttribute(name, "FileWriteCount").toString()); - assertEquals("0", mbeanServer. - getAttribute(name, "FileWriteCountTotal").toString()); - if (config.mvStore) { - assertEquals("1", mbeanServer. - getAttribute(name, "LogMode").toString()); - mbeanServer.setAttribute(name, new Attribute("LogMode", 2)); - assertEquals("2", mbeanServer. - getAttribute(name, "LogMode").toString()); - } assertEquals("REGULAR", mbeanServer. getAttribute(name, "Mode").toString()); - assertEquals("false", mbeanServer. - getAttribute(name, "MultiThreaded").toString()); - if (config.mvStore) { - assertEquals("true", mbeanServer. - getAttribute(name, "Mvcc").toString()); - } else { - assertEquals("false", mbeanServer. - getAttribute(name, "Mvcc").toString()); - } assertEquals("false", mbeanServer. getAttribute(name, "ReadOnly").toString()); assertEquals("1", mbeanServer. @@ -93,36 +77,31 @@ public void test() throws Exception { mbeanServer.setAttribute(name, new Attribute("TraceLevel", 0)); assertEquals("0", mbeanServer. getAttribute(name, "TraceLevel").toString()); - assertTrue(mbeanServer. - getAttribute(name, "Version").toString().startsWith("1.")); - assertEquals(14, info.getAttributes().length); + assertEquals(Constants.FULL_VERSION, mbeanServer.getAttribute(name, "Version").toString()); + assertEquals(10, info.getAttributes().length); result = mbeanServer.invoke(name, "listSettings", null, null).toString(); - assertTrue(result.contains("ANALYZE_AUTO")); + assertContains(result, "ANALYZE_AUTO"); conn.setAutoCommit(false); stat.execute("create table test(id int)"); stat.execute("insert into test values(1)"); result = mbeanServer.invoke(name, "listSessions", null, null).toString(); - assertTrue(result.contains("session id")); - if (config.mvcc) { - assertTrue(result.contains("read lock")); - } else { - assertTrue(result.contains("write lock")); - } + assertContains(result, "session id"); + assertContains(result, "read lock"); assertEquals(2, info.getOperations().length); - assertTrue(info.getDescription().contains("database")); - attrMap = New.hashMap(); + assertContains(info.getDescription(), "database"); + attrMap = new HashMap<>(); for (MBeanAttributeInfo a : info.getAttributes()) { attrMap.put(a.getName(), a); } - assertTrue(attrMap.get("CacheSize").getDescription().contains("KB")); - opMap = New.hashMap(); + assertContains(attrMap.get("CacheSize").getDescription(), "KB"); + opMap = new HashMap<>(); for (MBeanOperationInfo o : info.getOperations()) { opMap.put(o.getName(), o); } - assertTrue(opMap.get("listSessions").getDescription().contains("lock")); + assertContains(opMap.get("listSessions").getDescription(), "lock"); assertEquals(MBeanOperationInfo.INFO, opMap.get("listSessions").getImpact()); conn.close(); @@ -136,37 +115,27 @@ public void test() throws Exception { Set set = mbeanServer.queryNames(name, null); name = (ObjectName) set.iterator().next(); - assertEquals("16384", mbeanServer. - getAttribute(name, "CacheSizeMax").toString()); + if (config.memory) { + assertEquals("0", mbeanServer. + getAttribute(name, "CacheSizeMax").toString()); + } else { + assertEquals("16384", mbeanServer. + getAttribute(name, "CacheSizeMax").toString()); + } mbeanServer.setAttribute(name, new Attribute("CacheSizeMax", 1)); - if (config.mvStore) { + if (config.memory) { + assertEquals("0", mbeanServer. + getAttribute(name, "CacheSizeMax").toString()); + } else { assertEquals("1024", mbeanServer. getAttribute(name, "CacheSizeMax").toString()); assertEquals("0", mbeanServer. getAttribute(name, "CacheSize").toString()); assertTrue(0 < (Long) mbeanServer. getAttribute(name, "FileReadCount")); - assertTrue(0 < (Long) mbeanServer. - getAttribute(name, "FileWriteCount")); - assertEquals("0", mbeanServer. - getAttribute(name, "FileWriteCountTotal").toString()); - } else { - assertEquals("1", mbeanServer. - getAttribute(name, "CacheSizeMax").toString()); - assertTrue(0 < (Integer) mbeanServer. - getAttribute(name, "CacheSize")); - assertTrue(0 < (Long) mbeanServer. - getAttribute(name, "FileSize")); - assertTrue(0 < (Long) mbeanServer. - getAttribute(name, "FileReadCount")); - assertTrue(0 < (Long) mbeanServer. - getAttribute(name, "FileWriteCount")); - assertTrue(0 < (Long) mbeanServer. - getAttribute(name, "FileWriteCountTotal")); + // FileWriteCount can be not yet updated and may return 0 + assertTrue(0 <= (Long) mbeanServer.getAttribute(name, "FileWriteCount")); } - mbeanServer.setAttribute(name, new Attribute("LogMode", 0)); - assertEquals("0", mbeanServer. - getAttribute(name, "LogMode").toString()); conn.close(); diff --git a/h2/src/test/org/h2/test/unit/TestJsonUtils.java b/h2/src/test/org/h2/test/unit/TestJsonUtils.java new file mode 100644 index 0000000000..35b3bae17f --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestJsonUtils.java @@ -0,0 +1,340 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.math.BigDecimal; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.Callable; + +import org.h2.test.TestBase; +import org.h2.util.json.JSONByteArrayTarget; +import org.h2.util.json.JSONBytesSource; +import org.h2.util.json.JSONItemType; +import org.h2.util.json.JSONStringSource; +import org.h2.util.json.JSONStringTarget; +import org.h2.util.json.JSONTarget; +import org.h2.util.json.JSONValidationTargetWithUniqueKeys; +import org.h2.util.json.JSONValidationTargetWithoutUniqueKeys; +import org.h2.util.json.JSONValueTarget; + +/** + * Tests the classes from org.h2.util.json package. + */ +public class TestJsonUtils extends TestBase { + + private static final Charset[] CHARSETS = { StandardCharsets.UTF_8, StandardCharsets.UTF_16BE, + StandardCharsets.UTF_16LE, Charset.forName("UTF-32BE"), Charset.forName("UTF-32LE") }; + + private static final Callable> STRING_TARGET = () -> new JSONStringTarget(); + + private static final Callable> BYTES_TARGET = () -> new JSONByteArrayTarget(); + + private static final Callable> VALUE_TARGET = () -> new JSONValueTarget(); + + private static final Callable> JSON_VALIDATION_TARGET_WITHOUT_UNIQUE_KEYS = // + () -> new JSONValidationTargetWithoutUniqueKeys(); + + private static final Callable> JSON_VALIDATION_TARGET_WITH_UNIQUE_KEYS = // + () -> new JSONValidationTargetWithUniqueKeys(); + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testTargetErrorDetection(); + testSourcesAndTargets(); + testUtfError(); + testLongNesting(); + testEncodeString(); + } + + private void testTargetErrorDetection() throws Exception { + testTargetErrorDetection(STRING_TARGET); + testTargetErrorDetection(BYTES_TARGET); + testTargetErrorDetection(VALUE_TARGET); + testTargetErrorDetection(JSON_VALIDATION_TARGET_WITHOUT_UNIQUE_KEYS); + testTargetErrorDetection(JSON_VALIDATION_TARGET_WITH_UNIQUE_KEYS); + } + + private void testTargetErrorDetection(Callable> constructor) throws Exception { + assertThrows(RuntimeException.class, () -> constructor.call().endObject()); + assertThrows(RuntimeException.class, () -> constructor.call().endArray()); + // Unexpected member without object + assertThrows(RuntimeException.class, () -> constructor.call().member("1")); + // Unexpected member inside array + JSONTarget target1 = constructor.call(); + target1.startArray(); + assertThrows(RuntimeException.class, () -> target1.member("1")); + // Unexpected member without value + JSONTarget target2 = constructor.call(); + target2.startObject(); + target2.member("1"); + assertThrows(RuntimeException.class, () -> target2.member("2")); + JSONTarget target3 = constructor.call(); + target3.startObject(); + target3.member("1"); + assertThrows(RuntimeException.class, () -> target3.endObject()); + // Unexpected value without member name + testJsonStringTargetErrorDetectionAllValues(() -> { + JSONTarget target = constructor.call(); + target.startObject(); + return target; + }); + // Unexpected second value + testJsonStringTargetErrorDetectionAllValues(() -> { + JSONTarget target = constructor.call(); + target.valueNull(); + return target; + }); + // No value + assertIncomplete(constructor.call()); + // Unclosed object + JSONTarget target = constructor.call(); + target.startObject(); + assertIncomplete(target); + // Unclosed array + target = constructor.call(); + target.startObject(); + assertIncomplete(target); + // End of array after start of object or vice versa + JSONTarget target6 = constructor.call(); + target6.startObject(); + assertThrows(RuntimeException.class, () -> target6.endArray()); + JSONTarget target7 = constructor.call(); + target7.startArray(); + assertThrows(RuntimeException.class, () -> target7.endObject()); + } + + private void assertIncomplete(JSONTarget target) { + assertThrows(RuntimeException.class, () -> target.getResult()); + } + + private void testJsonStringTargetErrorDetectionAllValues(Callable> initializer) throws Exception { + assertThrows(RuntimeException.class, () -> initializer.call().valueNull()); + assertThrows(RuntimeException.class, () -> initializer.call().valueFalse()); + assertThrows(RuntimeException.class, () -> initializer.call().valueTrue()); + assertThrows(RuntimeException.class, () -> initializer.call().valueNumber(BigDecimal.ONE)); + assertThrows(RuntimeException.class, () -> initializer.call().valueString("string")); + } + + private void testSourcesAndTargets() throws Exception { + testSourcesAndTargets("1", "1"); + testSourcesAndTargets("\uFEFF0", "0"); + testSourcesAndTargets("\uFEFF-1", "-1"); + testSourcesAndTargets("null", "null"); + testSourcesAndTargets("true", "true"); + testSourcesAndTargets("false", "false"); + testSourcesAndTargets("1.2", "1.2"); + testSourcesAndTargets("1.2e+1", "12"); + testSourcesAndTargets("10000.0", "10000.0"); + testSourcesAndTargets("\t\r\n 1.2E-1 ", "0.12"); + testSourcesAndTargets("9.99e99", "9.99E99"); + testSourcesAndTargets("\"\"", "\"\""); + testSourcesAndTargets("\"\\b\\f\\t\\r\\n\\\"\\/\\\\\\u0019\\u0020\"", "\"\\b\\f\\t\\r\\n\\\"/\\\\\\u0019 \""); + testSourcesAndTargets("{ }", "{}"); + testSourcesAndTargets("{\"a\" : 1}", "{\"a\":1}"); + testSourcesAndTargets("{\"a\" : 1, \"b\":[], \"c\":{}}", "{\"a\":1,\"b\":[],\"c\":{}}"); + testSourcesAndTargets("{\"a\" : 1, \"b\":[1,null, true,false,{}]}", "{\"a\":1,\"b\":[1,null,true,false,{}]}"); + testSourcesAndTargets("{\"1\" : [[[[[[[[[[11.1e-100]]]], null]]], {\n\r}]]]}", + "{\"1\":[[[[[[[[[[1.11E-99]]]],null]]],{}]]]}"); + testSourcesAndTargets("{\"b\":false,\"a\":1,\"a\":null}", "{\"b\":false,\"a\":1,\"a\":null}", true); + testSourcesAndTargets("[[{\"b\":false,\"a\":1,\"a\":null}]]", "[[{\"b\":false,\"a\":1,\"a\":null}]]", true); + testSourcesAndTargets("\"\uD800\uDFFF\"", "\"\uD800\uDFFF\""); + testSourcesAndTargets("\"\\uD800\\uDFFF\"", "\"\uD800\uDFFF\""); + testSourcesAndTargets("\"\u0700\"", "\"\u0700\""); + testSourcesAndTargets("\"\\u0700\"", "\"\u0700\""); + StringBuilder builder = new StringBuilder().append('"'); + for (int cp = 0x80; cp < Character.MIN_SURROGATE; cp++) { + builder.appendCodePoint(cp); + } + for (int cp = Character.MAX_SURROGATE + 1; cp < 0xfffe; cp++) { + builder.appendCodePoint(cp); + } + for (int cp = 0xffff; cp <= Character.MAX_CODE_POINT; cp++) { + builder.appendCodePoint(cp); + } + String s = builder.append('"').toString(); + testSourcesAndTargets(s, s); + testSourcesAndTargetsError("", true); + testSourcesAndTargetsError("\"", true); + testSourcesAndTargetsError("\"\\u", true); + testSourcesAndTargetsError("\u0080", true); + testSourcesAndTargetsError(".1", true); + testSourcesAndTargetsError("1.", true); + testSourcesAndTargetsError("1.1e", true); + testSourcesAndTargetsError("1.1e+", true); + testSourcesAndTargetsError("1.1e-", true); + testSourcesAndTargetsError("\b1", true); + testSourcesAndTargetsError("\"\\u", true); + testSourcesAndTargetsError("\"\\u0", true); + testSourcesAndTargetsError("\"\\u00", true); + testSourcesAndTargetsError("\"\\u000", true); + testSourcesAndTargetsError("\"\\u0000", true); + testSourcesAndTargetsError("{,}", true); + testSourcesAndTargetsError("{,,}", true); + testSourcesAndTargetsError("{}}", true); + testSourcesAndTargetsError("{\"a\":\"\":\"\"}", true); + testSourcesAndTargetsError("[]]", true); + testSourcesAndTargetsError("\"\\uZZZZ\"", true); + testSourcesAndTargetsError("\"\\x\"", true); + testSourcesAndTargetsError("\"\\", true); + testSourcesAndTargetsError("[1,", true); + testSourcesAndTargetsError("[1,,2]", true); + testSourcesAndTargetsError("[1,]", true); + testSourcesAndTargetsError("{\"a\":1,]", true); + testSourcesAndTargetsError("[1 2]", true); + testSourcesAndTargetsError("{\"a\"-1}", true); + testSourcesAndTargetsError("[1;2]", true); + testSourcesAndTargetsError("{\"a\":1,b:2}", true); + testSourcesAndTargetsError("{\"a\":1;\"b\":2}", true); + testSourcesAndTargetsError("fals", true); + testSourcesAndTargetsError("falsE", true); + testSourcesAndTargetsError("False", true); + testSourcesAndTargetsError("nul", true); + testSourcesAndTargetsError("nulL", true); + testSourcesAndTargetsError("Null", true); + testSourcesAndTargetsError("tru", true); + testSourcesAndTargetsError("truE", true); + testSourcesAndTargetsError("True", true); + testSourcesAndTargetsError("\"\uD800\"", false); + testSourcesAndTargetsError("\"\\uD800\"", true); + testSourcesAndTargetsError("\"\uDC00\"", false); + testSourcesAndTargetsError("\"\\uDC00\"", true); + testSourcesAndTargetsError("\"\uDBFF \"", false); + testSourcesAndTargetsError("\"\\uDBFF \"", true); + testSourcesAndTargetsError("\"\uDBFF\\\"", true); + testSourcesAndTargetsError("\"\\uDBFF\\\"", true); + testSourcesAndTargetsError("\"\uDFFF\uD800\"", false); + testSourcesAndTargetsError("\"\\uDFFF\\uD800\"", true); + } + + private void testSourcesAndTargets(String src, String expected) throws Exception { + testSourcesAndTargets(src, expected, false); + } + + private void testSourcesAndTargets(String src, String expected, boolean hasNonUniqueKeys) throws Exception { + JSONItemType itemType; + switch (expected.charAt(0)) { + case '[': + itemType = JSONItemType.ARRAY; + break; + case '{': + itemType = JSONItemType.OBJECT; + break; + default: + itemType = JSONItemType.SCALAR; + } + assertEquals(expected, JSONStringSource.parse(src, new JSONStringTarget())); + assertEquals(expected.getBytes(StandardCharsets.UTF_8), // + JSONStringSource.parse(src, new JSONByteArrayTarget())); + assertEquals(expected, JSONStringSource.parse(src, new JSONValueTarget()).toString()); + assertEquals(itemType, JSONStringSource.parse(src, new JSONValidationTargetWithoutUniqueKeys())); + if (hasNonUniqueKeys) { + testSourcesAndTargetsError(src, JSON_VALIDATION_TARGET_WITH_UNIQUE_KEYS, true); + } else { + assertEquals(itemType, JSONStringSource.parse(src, new JSONValidationTargetWithUniqueKeys())); + } + for (Charset charset : CHARSETS) { + assertEquals(expected, JSONBytesSource.parse(src.getBytes(charset), new JSONStringTarget())); + } + } + + private void testSourcesAndTargetsError(String src, boolean testBytes) throws Exception { + testSourcesAndTargetsError(src, STRING_TARGET, testBytes); + testSourcesAndTargetsError(src, BYTES_TARGET, testBytes); + testSourcesAndTargetsError(src, VALUE_TARGET, testBytes); + testSourcesAndTargetsError(src, JSON_VALIDATION_TARGET_WITHOUT_UNIQUE_KEYS, testBytes); + testSourcesAndTargetsError(src, JSON_VALIDATION_TARGET_WITH_UNIQUE_KEYS, testBytes); + } + + private void testSourcesAndTargetsError(String src, Callable> constructor, boolean testBytes) + throws Exception { + check: { + JSONTarget target = constructor.call(); + try { + JSONStringSource.parse(src, target); + } catch (IllegalArgumentException | IllegalStateException expected) { + // Expected + break check; + } + fail(); + } + /* + * String.getBytes() replaces invalid characters, so some tests are + * disabled. + */ + if (testBytes) { + JSONTarget target = constructor.call(); + try { + JSONBytesSource.parse(src.getBytes(StandardCharsets.UTF_8), target); + } catch (IllegalArgumentException | IllegalStateException expected) { + // Expected + return; + } + fail(); + } + } + + private void testUtfError() { + // 2 bytes + testUtfError(new byte[] { '"', (byte) 0xc2, (byte) 0xc0, '"' }); + testUtfError(new byte[] { '"', (byte) 0xc1, (byte) 0xbf, '"' }); + testUtfError(new byte[] { '"', (byte) 0xc2 }); + // 3 bytes + testUtfError(new byte[] { '"', (byte) 0xe1, (byte) 0xc0, (byte) 0x80, '"' }); + testUtfError(new byte[] { '"', (byte) 0xe1, (byte) 0x80, (byte) 0xc0, '"' }); + testUtfError(new byte[] { '"', (byte) 0xe0, (byte) 0x9f, (byte) 0xbf, '"' }); + testUtfError(new byte[] { '"', (byte) 0xe1, (byte) 0x80 }); + // 4 bytes + testUtfError(new byte[] { '"', (byte) 0xf1, (byte) 0xc0, (byte) 0x80, (byte) 0x80, '"' }); + testUtfError(new byte[] { '"', (byte) 0xf1, (byte) 0x80, (byte) 0xc0, (byte) 0x80, '"' }); + testUtfError(new byte[] { '"', (byte) 0xf1, (byte) 0x80, (byte) 0x80, (byte) 0xc0, '"' }); + testUtfError(new byte[] { '"', (byte) 0xf0, (byte) 0x8f, (byte) 0xbf, (byte) 0xbf, '"' }); + testUtfError(new byte[] { '"', (byte) 0xf4, (byte) 0x90, (byte) 0x80, (byte) 0x80, '"' }); + testUtfError(new byte[] { '"', (byte) 0xf1, (byte) 0x80, (byte) 0x80 }); + } + + private void testUtfError(byte[] bytes) { + assertThrows(IllegalArgumentException.class, + () -> JSONBytesSource.parse(bytes, new JSONValidationTargetWithoutUniqueKeys())); + } + + private void testLongNesting() { + final int halfLevel = 2048; + StringBuilder builder = new StringBuilder(halfLevel * 8); + for (int i = 0; i < halfLevel; i++) { + builder.append("{\"a\":["); + } + for (int i = 0; i < halfLevel; i++) { + builder.append("]}"); + } + String string = builder.toString(); + assertEquals(string, JSONStringSource.parse(string, new JSONStringTarget())); + byte[] bytes = string.getBytes(StandardCharsets.ISO_8859_1); + assertEquals(bytes, JSONBytesSource.normalize(bytes)); + } + + private void testEncodeString() { + testEncodeString("abc \"\u0001\u007f\u0080\u1000\uabcd\n'\t", + "\"abc \\\"\\u0001\u007f\u0080\u1000\uabcd\\n'\\t\"", + "\"abc \\\"\\u0001\u007f\\u0080\\u1000\\uabcd\\n\\u0027\\t\""); + } + + private void testEncodeString(String source, String expected, String expectedPrintable) { + assertEquals(expected, JSONStringTarget.encodeString(new StringBuilder(), source, false).toString()); + assertEquals(expectedPrintable, JSONStringTarget.encodeString(new StringBuilder(), source, true).toString()); + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestKeywords.java b/h2/src/test/org/h2/test/unit/TestKeywords.java new file mode 100644 index 0000000000..b006b5dcb9 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestKeywords.java @@ -0,0 +1,754 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.Statement; +import java.time.Duration; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map.Entry; +import java.util.TreeSet; + +import org.h2.command.Parser; +import org.h2.command.Token; +import org.h2.command.Tokenizer; +import org.h2.message.DbException; +import org.h2.test.TestBase; +import org.h2.util.ParserUtil; +import org.objectweb.asm.ClassReader; +import org.objectweb.asm.ClassVisitor; +import org.objectweb.asm.FieldVisitor; +import org.objectweb.asm.MethodVisitor; +import org.objectweb.asm.Opcodes; + +/** + * Tests keywords. + */ +public class TestKeywords extends TestBase { + + private enum TokenType { + IDENTIFIER, + + KEYWORD, + + CONTEXT_SENSITIVE_KEYWORD; + } + + private static final HashSet SQL92_RESERVED_WORDS = toSet(new String[] { + + "ABSOLUTE", "ACTION", "ADD", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "ARE", "AS", "ASC", "ASSERTION", + "AT", "AUTHORIZATION", "AVG", + + "BEGIN", "BETWEEN", "BIT", "BIT_LENGTH", "BOTH", "BY", + + "CASCADE", "CASCADED", "CASE", "CAST", "CATALOG", "CHAR", "CHARACTER", "CHAR_LENGTH", "CHARACTER_LENGTH", + "CHECK", "CLOSE", "COALESCE", "COLLATE", "COLLATION", "COLUMN", "COMMIT", "CONNECT", "CONNECTION", + "CONSTRAINT", "CONSTRAINTS", "CONTINUE", "CONVERT", "CORRESPONDING", "COUNT", "CREATE", "CROSS", "CURRENT", + "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR", + + "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DEFERRABLE", "DEFERRED", "DELETE", + "DESC", "DESCRIBE", "DESCRIPTOR", "DIAGNOSTICS", "DISCONNECT", "DISTINCT", "DOMAIN", "DOUBLE", "DROP", + + "ELSE", "END", "END-EXEC", "ESCAPE", "EXCEPT", "EXCEPTION", "EXEC", "EXECUTE", "EXISTS", "EXTERNAL", + "EXTRACT", + + "FALSE", "FETCH", "FIRST", "FLOAT", "FOR", "FOREIGN", "FOUND", "FROM", "FULL", + + "GET", "GLOBAL", "GO", "GOTO", "GRANT", "GROUP", + + "HAVING", "HOUR", + + "IDENTITY", "IMMEDIATE", "IN", "INDICATOR", "INITIALLY", "INNER", "INPUT", "INSENSITIVE", "INSERT", "INT", + "INTEGER", "INTERSECT", "INTERVAL", "INTO", "IS", "ISOLATION", + + "JOIN", + + "KEY", + + "LANGUAGE", "LAST", "LEADING", "LEFT", "LEVEL", "LIKE", "LOCAL", "LOWER", + + "MATCH", "MAX", "MIN", "MINUTE", "MODULE", "MONTH", + + "NAMES", "NATIONAL", "NATURAL", "NCHAR", "NEXT", "NO", "NOT", "NULL", "NULLIF", "NUMERIC", + + "OCTET_LENGTH", "OF", "ON", "ONLY", "OPEN", "OPTION", "OR", "ORDER", "OUTER", "OUTPUT", "OVERLAPS", + + "PAD", "PARTIAL", "POSITION", "PRECISION", "PREPARE", "PRESERVE", "PRIMARY", "PRIOR", "PRIVILEGES", + "PROCEDURE", "PUBLIC", + + "READ", "REAL", "REFERENCES", "RELATIVE", "RESTRICT", "REVOKE", "RIGHT", "ROLLBACK", "ROWS", + + "SCHEMA", "SCROLL", "SECOND", "SECTION", "SELECT", "SESSION", "SESSION_USER", "SET", "SIZE", "SMALLINT", + "SOME", "SPACE", "SQL", "SQLCODE", "SQLERROR", "SQLSTATE", "SUBSTRING", "SUM", "SYSTEM_USER", + + "TABLE", "TEMPORARY", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TO", "TRAILING", + "TRANSACTION", "TRANSLATE", "TRANSLATION", "TRIM", "TRUE", + + "UNION", "UNIQUE", "UNKNOWN", "UPDATE", "UPPER", "USAGE", "USER", "USING", + + "VALUE", "VALUES", "VARCHAR", "VARYING", "VIEW", + + "WHEN", "WHENEVER", "WHERE", "WITH", "WORK", "WRITE", + + "YEAR", + + "ZONE", + + }); + + private static final HashSet SQL1999_RESERVED_WORDS = toSet(new String[] { + + "ABSOLUTE", "ACTION", "ADD", "ADMIN", "AFTER", "AGGREGATE", "ALIAS", "ALL", "ALLOCATE", "ALTER", "AND", + "ANY", "ARE", "ARRAY", "AS", "ASC", "ASSERTION", "AT", "AUTHORIZATION", + + "BEFORE", "BEGIN", "BINARY", "BIT", "BLOB", "BOOLEAN", "BOTH", "BREADTH", "BY", + + "CALL", "CASCADE", "CASCADED", "CASE", "CAST", "CATALOG", "CHAR", "CHARACTER", "CHECK", "CLASS", "CLOB", + "CLOSE", "COLLATE", "COLLATION", "COLUMN", "COMMIT", "COMPLETION", "CONNECT", "CONNECTION", "CONSTRAINT", + "CONSTRAINTS", "CONSTRUCTOR", "CONTINUE", "CORRESPONDING", "CREATE", "CROSS", "CUBE", "CURRENT", + "CURRENT_DATE", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", + "CURSOR", "CYCLE", + + "DATA", "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DEFERRABLE", "DEFERRED", + "DELETE", "DEPTH", "DEREF", "DESC", "DESCRIBE", "DESCRIPTOR", "DESTROY", "DESTRUCTOR", "DETERMINISTIC", + "DICTIONARY", "DIAGNOSTICS", "DISCONNECT", "DISTINCT", "DOMAIN", "DOUBLE", "DROP", "DYNAMIC", + + "EACH", "ELSE", "END", "END-EXEC", "EQUALS", "ESCAPE", "EVERY", "EXCEPT", "EXCEPTION", "EXEC", "EXECUTE", + "EXTERNAL", + + "FALSE", "FETCH", "FIRST", "FLOAT", "FOR", "FOREIGN", "FOUND", "FROM", "FREE", "FULL", "FUNCTION", + + "GENERAL", "GET", "GLOBAL", "GO", "GOTO", "GRANT", "GROUP", "GROUPING", + + "HAVING", "HOST", "HOUR", + + "IDENTITY", "IGNORE", "IMMEDIATE", "IN", "INDICATOR", "INITIALIZE", "INITIALLY", "INNER", "INOUT", "INPUT", + "INSERT", "INT", "INTEGER", "INTERSECT", "INTERVAL", "INTO", "IS", "ISOLATION", "ITERATE", + + "JOIN", + + "KEY", + + "LANGUAGE", "LARGE", "LAST", "LATERAL", "LEADING", "LEFT", "LESS", "LEVEL", "LIKE", "LIMIT", "LOCAL", + "LOCALTIME", "LOCALTIMESTAMP", "LOCATOR", + + "MAP", "MATCH", "MINUTE", "MODIFIES", "MODIFY", "MODULE", "MONTH", + + "NAMES", "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NEXT", "NO", "NONE", "NOT", "NULL", "NUMERIC", + + "OBJECT", "OF", "OFF", "OLD", "ON", "ONLY", "OPEN", "OPERATION", "OPTION", "OR", "ORDER", "ORDINALITY", + "OUT", "OUTER", "OUTPUT", + + "PAD", "PARAMETER", "PARAMETERS", "PARTIAL", "PATH", "POSTFIX", "PRECISION", "PREFIX", "PREORDER", + "PREPARE", "PRESERVE", "PRIMARY", "PRIOR", "PRIVILEGES", "PROCEDURE", "PUBLIC", + + "READ", "READS", "REAL", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "RELATIVE", "RESTRICT", "RESULT", + "RETURN", "RETURNS", "REVOKE", "RIGHT", "ROLE", "ROLLBACK", "ROLLUP", "ROUTINE", "ROW", "ROWS", + + "SAVEPOINT", "SCHEMA", "SCROLL", "SCOPE", "SEARCH", "SECOND", "SECTION", "SELECT", "SEQUENCE", "SESSION", + "SESSION_USER", "SET", "SETS", "SIZE", "SMALLINT", "SOME", "SPACE", "SPECIFIC", "SPECIFICTYPE", "SQL", + "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", "START", "STATE", "STATEMENT", "STATIC", "STRUCTURE", + "SYSTEM_USER", + + "TABLE", "TEMPORARY", "TERMINATE", "THAN", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", + "TO", "TRAILING", "TRANSACTION", "TRANSLATION", "TREAT", "TRIGGER", "TRUE", + + "UNDER", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UPDATE", "USAGE", "USER", "USING", + + "VALUE", "VALUES", "VARCHAR", "VARIABLE", "VARYING", "VIEW", + + "WHEN", "WHENEVER", "WHERE", "WITH", "WITHOUT", "WORK", "WRITE", + + "YEAR", "ZONE", + + }); + + private static final HashSet SQL2003_RESERVED_WORDS = toSet(new String[] { + + "ABS", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "ARE", "ARRAY", "AS", "ASENSITIVE", "ASYMMETRIC", "AT", + "ATOMIC", "AUTHORIZATION", "AVG", + + "BEGIN", "BETWEEN", "BIGINT", "BINARY", "BLOB", "BOOLEAN", "BOTH", "BY", + + "CALL", "CALLED", "CARDINALITY", "CASCADED", "CASE", "CAST", "CEIL", "CEILING", "CHAR", "CHAR_LENGTH", + "CHARACTER", "CHARACTER_LENGTH", "CHECK", "CLOB", "CLOSE", "COALESCE", "COLLATE", "COLLECT", "COLUMN", + "COMMIT", "CONDITION", "CONNECT", "CONSTRAINT", "CONVERT", "CORR", "CORRESPONDING", "COUNT", "COVAR_POP", + "COVAR_SAMP", "CREATE", "CROSS", "CUBE", "CUME_DIST", "CURRENT", "CURRENT_DATE", + "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_TIME", "CURRENT_TIMESTAMP", + "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CURRENT_USER", "CURSOR", "CYCLE", + + "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DELETE", "DENSE_RANK", "DEREF", + "DESCRIBE", "DETERMINISTIC", "DISCONNECT", "DISTINCT", "DOUBLE", "DROP", "DYNAMIC", + + "EACH", "ELEMENT", "ELSE", "END", "END-EXEC", "ESCAPE", "EVERY", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", + "EXP", "EXTERNAL", "EXTRACT", + + "FALSE", "FETCH", "FILTER", "FLOAT", "FLOOR", "FOR", "FOREIGN", "FREE", "FROM", "FULL", "FUNCTION", + "FUSION", + + "GET", "GLOBAL", "GRANT", "GROUP", "GROUPING", + + "HAVING", "HOLD", "HOUR", "IDENTITY", "IN", "INDICATOR", "INNER", "INOUT", "INSENSITIVE", + + "INSERT", "INT", "INTEGER", "INTERSECT", "INTERSECTION", "INTERVAL", "INTO", "IS", + + "JOIN", + + "LANGUAGE", "LARGE", "LATERAL", "LEADING", "LEFT", "LIKE", "LN", "LOCAL", "LOCALTIME", "LOCALTIMESTAMP", + "LOWER", + + "MATCH", "MAX", "MEMBER", "MERGE", "METHOD", "MIN", "MINUTE", "MOD", "MODIFIES", "MODULE", "MONTH", + "MULTISET", + + "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NO", "NONE", "NORMALIZE", "NOT", "NULL", "NULLIF", + "NUMERIC", + + "OCTET_LENGTH", "OF", "OLD", "ON", "ONLY", "OPEN", "OR", "ORDER", "OUT", "OUTER", "OVER", "OVERLAPS", + "OVERLAY", + + "PARAMETER", "PARTITION", "PERCENT_RANK", "PERCENTILE_CONT", "PERCENTILE_DISC", "POSITION", "POWER", + "PRECISION", "PREPARE", "PRIMARY", "PROCEDURE", + + "RANGE", "RANK", "READS", "REAL", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REGR_AVGX", // + "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", + "RELEASE", "RESULT", "RETURN", "RETURNS", "REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROW", "ROW_NUMBER", + "ROWS", + + "SAVEPOINT", "SCOPE", "SCROLL", "SEARCH", "SECOND", "SELECT", "SENSITIVE", "SESSION_USER", "SET", // + "SIMILAR", "SMALLINT", "SOME", "SPECIFIC", "SPECIFICTYPE", "SQL", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", + "SQRT", "START", "STATIC", "STDDEV_POP", "STDDEV_SAMP", "SUBMULTISET", "SUBSTRING", "SUM", "SYMMETRIC", + "SYSTEM", "SYSTEM_USER", + + "TABLE", "TABLESAMPLE", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TO", "TRAILING", + "TRANSLATE", "TRANSLATION", "TREAT", "TRIGGER", "TRIM", "TRUE", + + "UESCAPE", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UPDATE", "UPPER", "USER", "USING", + + "VALUE", "VALUES", "VAR_POP", "VAR_SAMP", "VARCHAR", "VARYING", + + "WHEN", "WHENEVER", "WHERE", "WIDTH_BUCKET", "WINDOW", "WITH", "WITHIN", "WITHOUT", + + "YEAR", + + }); + + private static final HashSet SQL2008_RESERVED_WORDS = toSet(new String[] { + + "ABS", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "ARE", "ARRAY", "AS", "ASENSITIVE", "ASYMMETRIC", "AT", + "ATOMIC", "AUTHORIZATION", "AVG", + + "BEGIN", "BETWEEN", "BIGINT", "BINARY", "BLOB", "BOOLEAN", "BOTH", "BY", + + "CALL", "CALLED", "CARDINALITY", "CASCADED", "CASE", "CAST", "CEIL", "CEILING", "CHAR", "CHAR_LENGTH", + "CHARACTER", "CHARACTER_LENGTH", "CHECK", "CLOB", "CLOSE", "COALESCE", "COLLATE", "COLLECT", "COLUMN", + "COMMIT", "CONDITION", "CONNECT", "CONSTRAINT", "CONVERT", "CORR", "CORRESPONDING", "COUNT", "COVAR_POP", + "COVAR_SAMP", "CREATE", "CROSS", "CUBE", "CUME_DIST", "CURRENT", "CURRENT_CATALOG", "CURRENT_DATE", + "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA", "CURRENT_TIME", + "CURRENT_TIMESTAMP", "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CURRENT_USER", "CURSOR", "CYCLE", + + "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DELETE", "DENSE_RANK", "DEREF", + "DESCRIBE", "DETERMINISTIC", "DISCONNECT", "DISTINCT", "DOUBLE", "DROP", "DYNAMIC", + + "EACH", "ELEMENT", "ELSE", "END", "END-EXEC", "ESCAPE", "EVERY", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", + "EXP", "EXTERNAL", "EXTRACT", + + "FALSE", "FETCH", "FILTER", "FLOAT", "FLOOR", "FOR", "FOREIGN", "FREE", "FROM", "FULL", "FUNCTION", + "FUSION", + + "GET", "GLOBAL", "GRANT", "GROUP", "GROUPING", + + "HAVING", "HOLD", "HOUR", + + "IDENTITY", "IN", "INDICATOR", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INT", "INTEGER", "INTERSECT", + "INTERSECTION", "INTERVAL", "INTO", "IS", + + "JOIN", + + "LANGUAGE", "LARGE", "LATERAL", "LEADING", "LEFT", "LIKE", "LIKE_REGEX", "LN", "LOCAL", "LOCALTIME", + "LOCALTIMESTAMP", "LOWER", + + "MATCH", "MAX", "MEMBER", "MERGE", "METHOD", "MIN", "MINUTE", "MOD", "MODIFIES", "MODULE", "MONTH", + "MULTISET", + + "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NO", "NONE", "NORMALIZE", "NOT", "NULL", "NULLIF", + "NUMERIC", + + "OCTET_LENGTH", "OCCURRENCES_REGEX", "OF", "OLD", "ON", "ONLY", "OPEN", "OR", "ORDER", "OUT", "OUTER", + "OVER", "OVERLAPS", "OVERLAY", + + "PARAMETER", "PARTITION", "PERCENT_RANK", "PERCENTILE_CONT", "PERCENTILE_DISC", "POSITION", + "POSITION_REGEX", "POWER", "PRECISION", "PREPARE", "PRIMARY", "PROCEDURE", + + "RANGE", "RANK", "READS", "REAL", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REGR_AVGX", // + "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", + "RELEASE", "RESULT", "RETURN", "RETURNS", "REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROW", "ROW_NUMBER", + "ROWS", + + "SAVEPOINT", "SCOPE", "SCROLL", "SEARCH", "SECOND", "SELECT", "SENSITIVE", "SESSION_USER", "SET", // + "SIMILAR", "SMALLINT", "SOME", "SPECIFIC", "SPECIFICTYPE", "SQL", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", + "SQRT", "START", "STATIC", "STDDEV_POP", "STDDEV_SAMP", "SUBMULTISET", "SUBSTRING", "SUBSTRING_REGEX", + "SUM", "SYMMETRIC", "SYSTEM", "SYSTEM_USER", + + "TABLE", "TABLESAMPLE", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TO", "TRAILING", + "TRANSLATE", "TRANSLATE_REGEX", "TRANSLATION", "TREAT", "TRIGGER", "TRIM", "TRUE", + + "UESCAPE", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UPDATE", "UPPER", "USER", "USING", + + "VALUE", "VALUES", "VAR_POP", "VAR_SAMP", "VARBINARY", "VARCHAR", "VARYING", + + "WHEN", "WHENEVER", "WHERE", "WIDTH_BUCKET", "WINDOW", "WITH", "WITHIN", "WITHOUT", + + "YEAR", + + }); + + private static final HashSet SQL2011_RESERVED_WORDS = toSet(new String[] { + + "ABS", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "ARE", "ARRAY", "ARRAY_AGG", "ARRAY_MAX_CARDINALITY", // + "AS", "ASENSITIVE", "ASYMMETRIC", "AT", "ATOMIC", "AUTHORIZATION", "AVG", + + "BEGIN", "BEGIN_FRAME", "BEGIN_PARTITION", "BETWEEN", "BIGINT", "BINARY", "BLOB", "BOOLEAN", "BOTH", "BY", + + "CALL", "CALLED", "CARDINALITY", "CASCADED", "CASE", "CAST", "CEIL", "CEILING", "CHAR", "CHAR_LENGTH", + "CHARACTER", "CHARACTER_LENGTH", "CHECK", "CLOB", "CLOSE", "COALESCE", "COLLATE", "COLLECT", "COLUMN", + "COMMIT", "CONDITION", "CONNECT", "CONSTRAINT", "CONTAINS", "CONVERT", "CORR", "CORRESPONDING", "COUNT", + "COVAR_POP", "COVAR_SAMP", "CREATE", "CROSS", "CUBE", "CUME_DIST", "CURRENT", "CURRENT_CATALOG", + "CURRENT_DATE", "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_ROW", + "CURRENT_SCHEMA", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CURRENT_USER", + "CURSOR", "CYCLE", + + "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DELETE", "DENSE_RANK", "DEREF", + "DESCRIBE", "DETERMINISTIC", "DISCONNECT", "DISTINCT", "DOUBLE", "DROP", "DYNAMIC", + + "EACH", "ELEMENT", "ELSE", "END", "END_FRAME", "END_PARTITION", "END-EXEC", "EQUALS", "ESCAPE", "EVERY", + "EXCEPT", "EXEC", "EXECUTE", "EXISTS", "EXP", "EXTERNAL", "EXTRACT", + + "FALSE", "FETCH", "FILTER", "FIRST_VALUE", "FLOAT", "FLOOR", "FOR", "FOREIGN", "FRAME_ROW", "FREE", "FROM", + "FULL", "FUNCTION", "FUSION", + + "GET", "GLOBAL", "GRANT", "GROUP", "GROUPING", "GROUPS", + + "HAVING", "HOLD", "HOUR", + + "IDENTITY", "IN", "INDICATOR", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INT", "INTEGER", "INTERSECT", + "INTERSECTION", "INTERVAL", "INTO", "IS", + + "JOIN", + + "LAG", "LANGUAGE", "LARGE", "LAST_VALUE", "LATERAL", "LEAD", "LEADING", "LEFT", "LIKE", "LIKE_REGEX", "LN", + "LOCAL", "LOCALTIME", "LOCALTIMESTAMP", "LOWER", + + "MATCH", "MAX", "MEMBER", "MERGE", "METHOD", "MIN", "MINUTE", "MOD", "MODIFIES", "MODULE", "MONTH", + "MULTISET", + + "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NO", "NONE", "NORMALIZE", "NOT", "NTH_VALUE", "NTILE", + "NULL", "NULLIF", "NUMERIC", + + "OCTET_LENGTH", "OCCURRENCES_REGEX", "OF", "OFFSET", "OLD", "ON", "ONLY", "OPEN", "OR", "ORDER", "OUT", + "OUTER", "OVER", "OVERLAPS", "OVERLAY", + + "PARAMETER", "PARTITION", "PERCENT", "PERCENT_RANK", "PERCENTILE_CONT", "PERCENTILE_DISC", "PERIOD", + "PORTION", "POSITION", "POSITION_REGEX", "POWER", "PRECEDES", "PRECISION", "PREPARE", "PRIMARY", + "PROCEDURE", + + "RANGE", "RANK", "READS", "REAL", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REGR_AVGX", // + "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", + "RELEASE", "RESULT", "RETURN", "RETURNS", "REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROW", "ROW_NUMBER", + "ROWS", + + "SAVEPOINT", "SCOPE", "SCROLL", "SEARCH", "SECOND", "SELECT", "SENSITIVE", "SESSION_USER", "SET", // + "SIMILAR", "SMALLINT", "SOME", "SPECIFIC", "SPECIFICTYPE", "SQL", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", + "SQRT", "START", "STATIC", "STDDEV_POP", "STDDEV_SAMP", "SUBMULTISET", "SUBSTRING", "SUBSTRING_REGEX", + "SUCCEEDS", "SUM", "SYMMETRIC", "SYSTEM", "SYSTEM_TIME", "SYSTEM_USER", + + "TABLE", "TABLESAMPLE", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TO", "TRAILING", + "TRANSLATE", "TRANSLATE_REGEX", "TRANSLATION", "TREAT", "TRIGGER", "TRUNCATE", "TRIM", "TRIM_ARRAY", // + "TRUE", + + "UESCAPE", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UPDATE", "UPPER", "USER", "USING", + + "VALUE", "VALUES", "VALUE_OF", "VAR_POP", "VAR_SAMP", "VARBINARY", "VARCHAR", "VARYING", "VERSIONING", + + "WHEN", "WHENEVER", "WHERE", "WIDTH_BUCKET", "WINDOW", "WITH", "WITHIN", "WITHOUT", + + "YEAR", + + }); + + private static final HashSet SQL2016_RESERVED_WORDS = toSet(new String[] { + + "ABS", "ACOS", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "ARE", "ARRAY", "ARRAY_AGG", + "ARRAY_MAX_CARDINALITY", "AS", "ASENSITIVE", "ASIN", "ASYMMETRIC", "AT", "ATAN", "ATOMIC", "AUTHORIZATION", + "AVG", + + "BEGIN", "BEGIN_FRAME", "BEGIN_PARTITION", "BETWEEN", "BIGINT", "BINARY", "BLOB", "BOOLEAN", "BOTH", "BY", + + "CALL", "CALLED", "CARDINALITY", "CASCADED", "CASE", "CAST", "CEIL", "CEILING", "CHAR", "CHAR_LENGTH", + "CHARACTER", "CHARACTER_LENGTH", "CHECK", "CLASSIFIER", "CLOB", "CLOSE", "COALESCE", "COLLATE", "COLLECT", + "COLUMN", "COMMIT", "CONDITION", "CONNECT", "CONSTRAINT", "CONTAINS", "CONVERT", "COPY", "CORR", + "CORRESPONDING", "COS", "COSH", "COUNT", "COVAR_POP", "COVAR_SAMP", "CREATE", "CROSS", "CUBE", "CUME_DIST", + "CURRENT", "CURRENT_CATALOG", "CURRENT_DATE", "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", + "CURRENT_ROLE", "CURRENT_ROW", "CURRENT_SCHEMA", "CURRENT_TIME", "CURRENT_TIMESTAMP", + "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CURRENT_USER", "CURSOR", "CYCLE", + + "DATE", "DAY", "DEALLOCATE", "DEC", "DECIMAL", "DECFLOAT", "DECLARE", "DEFAULT", "DEFINE", "DELETE", + "DENSE_RANK", "DEREF", "DESCRIBE", "DETERMINISTIC", "DISCONNECT", "DISTINCT", "DOUBLE", "DROP", "DYNAMIC", + + "EACH", "ELEMENT", "ELSE", "EMPTY", "END", "END_FRAME", "END_PARTITION", "END-EXEC", "EQUALS", "ESCAPE", + "EVERY", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", "EXP", "EXTERNAL", "EXTRACT", + + "FALSE", "FETCH", "FILTER", "FIRST_VALUE", "FLOAT", "FLOOR", "FOR", "FOREIGN", "FRAME_ROW", "FREE", "FROM", + "FULL", "FUNCTION", "FUSION", + + "GET", "GLOBAL", "GRANT", "GROUP", "GROUPING", "GROUPS", + + "HAVING", "HOLD", "HOUR", + + "IDENTITY", "IN", "INDICATOR", "INITIAL", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INT", "INTEGER", + "INTERSECT", "INTERSECTION", "INTERVAL", "INTO", "IS", + + "JOIN", "JSON_ARRAY", "JSON_ARRAYAGG", "JSON_EXISTS", "JSON_OBJECT", "JSON_OBJECTAGG", "JSON_QUERY", + "JSON_TABLE", "JSON_TABLE_PRIMITIVE", "JSON_VALUE", + + "LAG", "LANGUAGE", "LARGE", "LAST_VALUE", "LATERAL", "LEAD", "LEADING", "LEFT", "LIKE", "LIKE_REGEX", + "LISTAGG", "LN", "LOCAL", "LOCALTIME", "LOCALTIMESTAMP", "LOG", "LOG10", "LOWER", + + "MATCH", "MATCH_NUMBER", "MATCH_RECOGNIZE", "MATCHES", "MAX", "MEMBER", "MERGE", "METHOD", "MIN", "MINUTE", + "MOD", "MODIFIES", "MODULE", "MONTH", "MULTISET", + + "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NO", "NONE", "NORMALIZE", "NOT", "NTH_VALUE", "NTILE", + "NULL", "NULLIF", "NUMERIC", + + "OCTET_LENGTH", "OCCURRENCES_REGEX", "OF", "OFFSET", "OLD", "OMIT", "ON", "ONE", "ONLY", "OPEN", "OR", + "ORDER", "OUT", "OUTER", "OVER", "OVERLAPS", "OVERLAY", + + "PARAMETER", "PARTITION", "PATTERN", "PER", "PERCENT", "PERCENT_RANK", "PERCENTILE_CONT", // + "PERCENTILE_DISC", "PERIOD", "PORTION", "POSITION", "POSITION_REGEX", "POWER", "PRECEDES", "PRECISION", + "PREPARE", "PRIMARY", "PROCEDURE", "PTF", + + "RANGE", "RANK", "READS", "REAL", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REGR_AVGX", // + "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", + "RELEASE", "RESULT", "RETURN", "RETURNS", "REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROW", "ROW_NUMBER", + "ROWS", "RUNNING", + + "SAVEPOINT", "SCOPE", "SCROLL", "SEARCH", "SECOND", "SEEK", "SELECT", "SENSITIVE", "SESSION_USER", "SET", + "SHOW", "SIMILAR", "SIN", "SINH", "SKIP", "SMALLINT", "SOME", "SPECIFIC", "SPECIFICTYPE", "SQL", + "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", "SQRT", "START", "STATIC", "STDDEV_POP", "STDDEV_SAMP", + "SUBMULTISET", "SUBSET", "SUBSTRING", "SUBSTRING_REGEX", "SUCCEEDS", "SUM", "SYMMETRIC", "SYSTEM", + "SYSTEM_TIME", "SYSTEM_USER", + + "TABLE", "TABLESAMPLE", "TAN", "TANH", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", + "TO", "TRAILING", "TRANSLATE", "TRANSLATE_REGEX", "TRANSLATION", "TREAT", "TRIGGER", "TRIM", "TRIM_ARRAY", + "TRUE", "TRUNCATE", + + "UESCAPE", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UPDATE", "UPPER", "USER", "USING", + + "VALUE", "VALUES", "VALUE_OF", "VAR_POP", "VAR_SAMP", "VARBINARY", "VARCHAR", "VARYING", "VERSIONING", + + "WHEN", "WHENEVER", "WHERE", "WIDTH_BUCKET", "WINDOW", "WITH", "WITHIN", "WITHOUT", + + "YEAR", + + }); + + private static final HashSet STRICT_MODE_NON_KEYWORDS = toSet(new String[] { "LIMIT", "MINUS", "TOP" }); + + private static final HashSet ALL_RESEVED_WORDS; + + private static final HashMap TOKENS; + + static { + HashSet set = new HashSet<>(1024); + set.addAll(SQL92_RESERVED_WORDS); + set.addAll(SQL1999_RESERVED_WORDS); + set.addAll(SQL2003_RESERVED_WORDS); + set.addAll(SQL2008_RESERVED_WORDS); + set.addAll(SQL2011_RESERVED_WORDS); + set.addAll(SQL2016_RESERVED_WORDS); + ALL_RESEVED_WORDS = set; + HashMap tokens = new HashMap<>(); + processClass(Parser.class, tokens); + processClass(ParserUtil.class, tokens); + processClass(Token.class, tokens); + processClass(Tokenizer.class, tokens); + TOKENS = tokens; + } + + private static void processClass(Class clazz, HashMap tokens) { + ClassReader r; + try { + r = new ClassReader(clazz.getResourceAsStream(clazz.getSimpleName() + ".class")); + } catch (IOException e) { + throw DbException.convert(e); + } + r.accept(new ClassVisitor(Opcodes.ASM8) { + @Override + public FieldVisitor visitField(int access, String name, String descriptor, String signature, // + Object value) { + add(value); + return null; + } + + @Override + public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, + String[] exceptions) { + return new MethodVisitor(Opcodes.ASM8) { + @Override + public void visitLdcInsn(Object value) { + add(value); + } + }; + } + + void add(Object value) { + if (!(value instanceof String)) { + return; + } + String s = (String) value; + int l = s.length(); + if (l == 0) { + return; + } + for (int i = 0; i < l; i++) { + char ch = s.charAt(i); + if ((ch < 'A' || ch > 'Z') && ch != '_') { + return; + } + } + final TokenType type; + switch (ParserUtil.getTokenType(s, false, true)) { + case ParserUtil.IDENTIFIER: + type = TokenType.IDENTIFIER; + break; + case ParserUtil.KEYWORD: + type = TokenType.CONTEXT_SENSITIVE_KEYWORD; + break; + default: + type = TokenType.KEYWORD; + } + tokens.put(s, type); + } + }, ClassReader.SKIP_DEBUG | ClassReader.SKIP_FRAMES); + } + + private static HashSet toSet(String[] array) { + HashSet set = new HashSet<>((int) Math.ceil(array.length / .75)); + for (String reservedWord : array) { + if (!set.add(reservedWord)) { + throw new AssertionError(reservedWord); + } + } + return set; + } + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testParser(); + testInformationSchema(); + testMetaData(); + } + + private void testParser() throws Exception { + testParser(false); + testParser(true); + } + + private void testParser(boolean strictMode) throws Exception { + try (Connection conn = DriverManager + .getConnection("jdbc:h2:mem:keywords;MODE=" + (strictMode ? "STRICT" : "REGULAR"))) { + Statement stat = conn.createStatement(); + for (Entry entry : TOKENS.entrySet()) { + String s = entry.getKey(); + TokenType type = entry.getValue(); + if (strictMode && STRICT_MODE_NON_KEYWORDS.contains(s)) { + type = TokenType.IDENTIFIER; + } + Throwable exception1 = null, exception2 = null; + try { + stat.execute("CREATE TABLE " + s + '(' + s + " INT)"); + stat.execute("INSERT INTO " + s + '(' + s + ") VALUES (10)"); + } catch (Throwable t) { + exception1 = t; + } + if (exception1 == null) { + try { + try (ResultSet rs = stat.executeQuery("SELECT " + s + " FROM " + s)) { + assertTrue(rs.next()); + assertEquals(10, rs.getInt(1)); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT SUM(" + s + ") " + s + " FROM " + s + ' ' + s)) { + assertTrue(rs.next()); + assertEquals(10, rs.getInt(1)); + assertFalse(rs.next()); + assertEquals(s, rs.getMetaData().getColumnLabel(1)); + } + try (ResultSet rs = stat.executeQuery("SELECT CASE " + s + " WHEN 10 THEN 1 END FROM " + s)) { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + } + stat.execute("DROP TABLE " + s); + stat.execute("CREATE TABLE TEST(" + s + " VARCHAR) AS VALUES '-'"); + String str; + try (ResultSet rs = stat.executeQuery("SELECT TRIM(" + s + " FROM '--a--') FROM TEST")) { + assertTrue(rs.next()); + str = rs.getString(1); + } + stat.execute("DROP TABLE TEST"); + stat.execute("CREATE TABLE TEST(" + s + " INT) AS (VALUES 10)"); + try (ResultSet rs = stat.executeQuery("SELECT " + s + " V FROM TEST")) { + assertTrue(rs.next()); + assertEquals(10, rs.getInt(1)); + } + try (ResultSet rs = stat.executeQuery("SELECT TEST." + s + " FROM TEST")) { + assertTrue(rs.next()); + assertEquals(10, rs.getInt(1)); + } + stat.execute("DROP TABLE TEST"); + stat.execute("CREATE TABLE TEST(" + s + " INT, _VALUE_ INT DEFAULT 1) AS VALUES (2, 2)"); + stat.execute("UPDATE TEST SET _VALUE_ = " + s); + try (ResultSet rs = stat.executeQuery("SELECT _VALUE_ FROM TEST")) { + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + } + stat.execute("DROP TABLE TEST"); + try (ResultSet rs = stat.executeQuery("SELECT 1 DAY " + s)) { + assertEquals(s, rs.getMetaData().getColumnLabel(1)); + assertTrue(rs.next()); + assertEquals(Duration.ofDays(1L), rs.getObject(1, Duration.class)); + } + try (ResultSet rs = stat.executeQuery("SELECT 1 = " + s + " FROM (VALUES 1) T(" + s + ')')) { + rs.next(); + assertTrue(rs.getBoolean(1)); + } + try (ResultSet rs = stat + .executeQuery("SELECT ROW_NUMBER() OVER(" + s + ") WINDOW " + s + " AS ()")) { + } + if (!"a".equals(str)) { + exception2 = new AssertionError(); + } + } catch (Throwable t) { + exception2 = t; + stat.execute("DROP TABLE IF EXISTS TEST"); + } + } + switch (type) { + case IDENTIFIER: + if (exception1 != null) { + throw new AssertionError(s + " must be a keyword.", exception1); + } + if (exception2 != null) { + throw new AssertionError(s + " must be a context-sensitive keyword.", exception2); + } + break; + case KEYWORD: + if (exception1 == null && exception2 == null) { + throw new AssertionError(s + " may be removed from a list of keywords."); + } + if (exception1 == null) { + throw new AssertionError(s + " may be a context-sensitive keyword."); + } + break; + case CONTEXT_SENSITIVE_KEYWORD: + if (exception1 != null) { + throw new AssertionError(s + " must be a keyword.", exception1); + } + if (exception2 == null) { + throw new AssertionError(s + " may be removed from a list of context-sensitive keywords."); + } + break; + default: + fail(); + } + } + } + } + + private void testInformationSchema() throws Exception { + try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:")) { + Statement stat = conn.createStatement(); + try (ResultSet rs = stat.executeQuery("SELECT TABLE_NAME, COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS")) { + while (rs.next()) { + String table = rs.getString(1); + if (isKeyword(table) && !table.equals("PARAMETERS")) { + fail("Table INFORMATION_SCHEMA.\"" + table + + "\" uses a keyword or SQL reserved word as its name."); + } + String column = rs.getString(2); + if (isKeyword(column)) { + fail("Column INFORMATION_SCHEMA." + table + ".\"" + column + + "\" uses a keyword or SQL reserved word as its name."); + } + } + } + } + } + + private static boolean isKeyword(String identifier) { + return ALL_RESEVED_WORDS.contains(identifier) || ParserUtil.isKeyword(identifier, false); + } + + @SuppressWarnings("incomplete-switch") + private void testMetaData() throws Exception { + TreeSet set = new TreeSet<>(); + for (Entry entry : TOKENS.entrySet()) { + switch (entry.getValue()) { + case KEYWORD: + case CONTEXT_SENSITIVE_KEYWORD: { + String s = entry.getKey(); + if (!SQL2003_RESERVED_WORDS.contains(s)) { + set.add(s); + } + } + } + } + try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:")) { + assertEquals(setToString(set), conn.getMetaData().getSQLKeywords()); + } + try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:;MODE=STRICT")) { + TreeSet set2 = new TreeSet<>(set); + set2.removeAll(STRICT_MODE_NON_KEYWORDS); + assertEquals(setToString(set2), conn.getMetaData().getSQLKeywords()); + } + set.add("INTERSECTS"); + set.add("SYSDATE"); + set.add("SYSTIME"); + set.add("SYSTIMESTAMP"); + set.add("TODAY"); + try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:;OLD_INFORMATION_SCHEMA=TRUE")) { + assertEquals(setToString(set), conn.getMetaData().getSQLKeywords()); + } + } + + private static String setToString(TreeSet set) { + Iterator i = set.iterator(); + if (i.hasNext()) { + StringBuilder builder = new StringBuilder(i.next()); + while (i.hasNext()) { + builder.append(',').append(i.next()); + } + return builder.toString(); + } + return ""; + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestLocale.java b/h2/src/test/org/h2/test/unit/TestLocale.java new file mode 100644 index 0000000000..0c91b9f6cd --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestLocale.java @@ -0,0 +1,88 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Locale; +import org.h2.test.TestBase; +import org.h2.test.TestDb; + +/** + * Tests that change the default locale. + */ +public class TestLocale extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws SQLException { + testSpecialLocale(); + testDatesInJapanLocale(); + } + + private void testSpecialLocale() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(getTestName()); + Statement stat = conn.createStatement(); + Locale old = Locale.getDefault(); + try { + // when using Turkish as the default locale, "i".toUpperCase() is + // not "I" + Locale.setDefault(new Locale("tr")); + stat.execute("create table test(I1 int, i2 int, b int, c int, d int) " + + "as select 1, 1, 1, 1, 1"); + ResultSet rs = stat.executeQuery("select * from test"); + rs.next(); + rs.getString("I1"); + rs.getString("i1"); + rs.getString("I2"); + rs.getString("i2"); + stat.execute("drop table test"); + } finally { + Locale.setDefault(old); + } + conn.close(); + } + + private void testDatesInJapanLocale() throws SQLException { + deleteDb(getTestName()); + Connection conn = getConnection(getTestName()); + Statement stat = conn.createStatement(); + Locale old = Locale.getDefault(); + try { + // when using Japanese as the default locale, the default calendar is + // the imperial japanese calendar + Locale.setDefault(new Locale("ja", "JP", "JP")); + stat.execute("CREATE TABLE test(d TIMESTAMP, dz TIMESTAMP WITH TIME ZONE) " + + "as select '2017-12-03T00:00:00Z', '2017-12-03T00:00:00Z'"); + ResultSet rs = stat.executeQuery("select YEAR(d) y, YEAR(dz) yz from test"); + rs.next(); + assertEquals(2017, rs.getInt("y")); + assertEquals(2017, rs.getInt("yz")); + stat.execute("drop table test"); + + rs = stat.executeQuery( + "CALL FORMATDATETIME(TIMESTAMP '2001-02-03 04:05:06', 'yyyy-MM-dd HH:mm:ss', 'en')"); + rs.next(); + assertEquals("2001-02-03 04:05:06", rs.getString(1)); + + } finally { + Locale.setDefault(old); + } + conn.close(); + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestMVTempResult.java b/h2/src/test/org/h2/test/unit/TestMVTempResult.java new file mode 100644 index 0000000000..3dacb86ead --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestMVTempResult.java @@ -0,0 +1,81 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.lang.ProcessBuilder.Redirect; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.BitSet; + +import org.h2.test.TestBase; +import org.h2.tools.DeleteDbFiles; + +/** + * Tests that MVTempResult implementations do not produce OOME. + */ +public class TestMVTempResult extends TestBase { + + private static final int MEMORY = 64; + + private static final int ROWS = 1_000_000; + + /** + * May be used to run only this test and may be launched by this test in a + * subprocess. + * + * @param a + * if empty run this test, if not empty run the subprocess + */ + public static void main(String... a) throws Exception { + TestMVTempResult test = (TestMVTempResult) TestBase.createCaller().init(); + if (a.length == 0) { + test.test(); + } else { + test.runTest(); + } + } + + @Override + public void test() throws Exception { + ProcessBuilder pb = new ProcessBuilder().redirectError(Redirect.INHERIT); + pb.command(getJVM(), "-Xmx" + MEMORY + "M", "-cp", getClassPath(), "-ea", getClass().getName(), "dummy"); + assertEquals(0, pb.start().waitFor()); + } + + private void runTest() throws SQLException { + String dir = getBaseDir(); + String name = "testResultExternal"; + DeleteDbFiles.execute(dir, name, true); + try (Connection c = DriverManager.getConnection("jdbc:h2:" + dir + '/' + name)) { + Statement s = c.createStatement(); + s.execute("CREATE TABLE TEST(I BIGINT, E ENUM('a', 'b'))" // + + " AS SELECT X, 'a' FROM SYSTEM_RANGE(1, " + ROWS + ')'); + try (ResultSet rs = s.executeQuery("SELECT I, E FROM TEST ORDER BY I DESC")) { + for (int i = ROWS; i > 0; i--) { + assertTrue(rs.next()); + assertEquals(i, rs.getLong(1)); + assertEquals("a", rs.getString(2)); + } + assertFalse(rs.next()); + } + BitSet set = new BitSet(ROWS); + try (ResultSet rs = s.executeQuery("SELECT I, E FROM TEST")) { + for (int i = 1; i <= ROWS; i++) { + assertTrue(rs.next()); + set.set((int) rs.getLong(1)); + assertEquals("a", rs.getString(2)); + } + assertFalse(rs.next()); + assertEquals(ROWS, set.cardinality()); + } + } + DeleteDbFiles.execute(dir, name, true); + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestMathUtils.java b/h2/src/test/org/h2/test/unit/TestMathUtils.java index 04b11c4793..80b2e74428 100644 --- a/h2/src/test/org/h2/test/unit/TestMathUtils.java +++ b/h2/src/test/org/h2/test/unit/TestMathUtils.java @@ -1,14 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; -import java.math.BigInteger; -import java.util.Random; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; import org.h2.util.MathUtils; /** @@ -22,14 +19,13 @@ public class TestMathUtils extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() { testRandom(); - testReverse(); - testFactorial(); + testNextPowerOf2Int(); } private void testRandom() { @@ -53,78 +49,21 @@ private void testRandom() { assertTrue(data.length > 10); } - private void testReverse() { - assertEquals(Integer.reverse(0), Integer.reverse(0)); - assertEquals(Integer.reverse(Integer.MAX_VALUE), - Integer.reverse(Integer.MAX_VALUE)); - assertEquals(Integer.reverse(Integer.MIN_VALUE), - Integer.reverse(Integer.MIN_VALUE)); - assertEquals(Long.reverse(0), Long.reverse(0L)); - assertEquals(Long.reverse(Long.MAX_VALUE), Long.reverse(Long.MAX_VALUE)); - assertEquals(Long.reverse(Long.MIN_VALUE), Long.reverse(Long.MIN_VALUE)); - for (int i = Integer.MIN_VALUE; i < 0; i += 1019) { - int x = Integer.reverse(i); - assertEquals(Integer.reverse(i), x); - } - for (int i = 0; i > 0; i += 1019) { - int x = Integer.reverse(i); - assertEquals(Integer.reverse(i), x); - } - for (long i = Long.MIN_VALUE; i < 0; i += 1018764321251L) { - long x = Long.reverse(i); - assertEquals(Long.reverse(i), x); - } - for (long i = 0; i > 0; i += 1018764321251L) { - long x = Long.reverse(i); - assertEquals(Long.reverse(i), x); - } - Random random = new Random(10); - for (int i = 0; i < 1000000; i++) { - long x = random.nextLong(); - long r = Long.reverse(x); - assertEquals(Long.reverse(x), r); - int y = random.nextInt(); - int s = Integer.reverse(y); - assertEquals(Integer.reverse(y), s); - } - } + private void testNextPowerOf2Int() { + // the largest power of two that fits into an integer + final int largestPower2 = 0x40000000; + int[] testValues = { 0, 1, 2, 3, 4, 12, 17, 500, 1023, + largestPower2 - 500, largestPower2 }; + int[] resultValues = { 1, 1, 2, 4, 4, 16, 32, 512, 1024, + largestPower2, largestPower2 }; - private void testFactorial() { - new AssertThrows(IllegalArgumentException.class) { @Override - public void test() { - factorial(-1); - }}; - assertEquals("1", factorial(0).toString()); - assertEquals("1", factorial(1).toString()); - assertEquals("2", factorial(2).toString()); - assertEquals("6", factorial(3).toString()); - assertEquals("3628800", factorial(10).toString()); - assertEquals("2432902008176640000", factorial(20).toString()); - } - - /** - * Calculate the factorial (n!) of a number. - * This implementation uses a naive multiplication loop, and - * is very slow for large n. - * For n = 1000, it takes about 10 ms. - * For n = 8000, it takes about 800 ms. - * - * @param n the number - * @return the factorial of n - */ - public static BigInteger factorial(int n) { - if (n < 0) { - throw new IllegalArgumentException(n + "<0"); - } else if (n < 2) { - return BigInteger.ONE; + for (int i = 0; i < testValues.length; i++) { + assertEquals(resultValues[i], MathUtils.nextPowerOf2(testValues[i])); } - BigInteger x = new BigInteger("" + n); - BigInteger result = x; - for (int i = n - 1; i >= 2; i--) { - x = x.subtract(BigInteger.ONE); - result = result.multiply(x); + testValues = new int[] { Integer.MIN_VALUE, -1, largestPower2 + 1, Integer.MAX_VALUE }; + for (int v : testValues) { + assertThrows(IllegalArgumentException.class, () -> MathUtils.nextPowerOf2(v)); } - return result; } } diff --git a/h2/src/test/org/h2/test/unit/TestMemoryEstimator.java b/h2/src/test/org/h2/test/unit/TestMemoryEstimator.java new file mode 100644 index 0000000000..31e0e4dc83 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestMemoryEstimator.java @@ -0,0 +1,120 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.nio.ByteBuffer; +import java.util.Random; +import java.util.concurrent.atomic.AtomicLong; +import org.h2.mvstore.WriteBuffer; +import org.h2.mvstore.type.BasicDataType; +import org.h2.test.TestBase; +import org.h2.util.MemoryEstimator; + +/** + * Class TestMemoryEstimator. + *
      + *
    • 12/7/19 10:38 PM initial creation + *
    + * + * @author Andrei Tokar + */ +public class TestMemoryEstimator extends TestBase { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() { + testEstimator(); + testPageEstimator(); + } + + private void testEstimator() { + Random random = new Random(); + AtomicLong stat = new AtomicLong(); + TestDataType dataType = new TestDataType(); + int sum = 0; + int sum2 = 0; + int err2 = 0; + int size = 10000; + for (int i = 0; i < size; i++) { + int x = (int)Math.abs(100 + random.nextGaussian() * 30); + int y = MemoryEstimator.estimateMemory(stat, dataType, x); + sum += x; + sum2 += x * x; + err2 += (x - y) * (x - y); + } + int avg = sum / size; + double err = Math.sqrt(1.0 * err2 / sum2); + int pct = MemoryEstimator.samplingPct(stat); + String msg = "Avg=" + avg + ", err=" + err + ", pct=" + pct + " " + (dataType.getCount() * 100 / size); + assertTrue(msg, err < 0.3); + assertTrue(msg, pct <= 7); + } + + private void testPageEstimator() { + Random random = new Random(); + AtomicLong stat = new AtomicLong(); + TestDataType dataType = new TestDataType(); + long sum = 0; + long sum2 = 0; + long err2 = 0; + int size = 10000; + int pageSz; + for (int i = 0; i < size; i+=pageSz) { + pageSz = random.nextInt(48) + 1; + Integer[] storage = dataType.createStorage(pageSz); + int x = 0; + for (int k = 0; k < pageSz; k++) { + storage[k] = (int)Math.abs(100 + random.nextGaussian() * 30); + x += storage[k]; + } + int y = MemoryEstimator.estimateMemory(stat, dataType, storage, pageSz); + sum += x; + sum2 += x * x; + err2 += (x - y) * (x - y); + } + long avg = sum / size; + double err = Math.sqrt(1.0 * err2 / sum2); + int pct = MemoryEstimator.samplingPct(stat); + String msg = "Avg=" + avg + ", err=" + err + ", pct=" + pct + " " + (dataType.getCount() * 100 / size); + assertTrue(msg, err < 0.12); + assertTrue(msg, pct <= 4); + } + + private static class TestDataType extends BasicDataType { + private int count; + + TestDataType() { + } + + public int getCount() { + return count; + } + + @Override + public int getMemory(Integer obj) { + ++count; + return obj; + } + + @Override + public void write(WriteBuffer buff, Integer obj) {} + + @Override + public Integer read(ByteBuffer buff) { return null; } + + @Override + public Integer[] createStorage(int size) { return new Integer[size]; } + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestMemoryUnmapper.java b/h2/src/test/org/h2/test/unit/TestMemoryUnmapper.java new file mode 100644 index 0000000000..c2d320cb7c --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestMemoryUnmapper.java @@ -0,0 +1,51 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.lang.ProcessBuilder.Redirect; +import java.nio.ByteBuffer; + +import org.h2.test.TestBase; +import org.h2.util.MemoryUnmapper; + +/** + * Tests memory unmapper. + */ +public class TestMemoryUnmapper extends TestBase { + private static final int OK = 0, /* EXCEPTION = 1, */ UNAVAILABLE = 2; + + /** + * May be used to run only this test and may be launched by this test in a + * subprocess. + * + * @param a + * if empty run this test only + */ + public static void main(String... a) throws Exception { + if (a.length == 0) { + TestBase.createCaller().init().testFromMain(); + } else { + ByteBuffer buffer = ByteBuffer.allocateDirect(10); + System.exit(MemoryUnmapper.unmap(buffer) ? OK : UNAVAILABLE); + } + } + + @Override + public void test() throws Exception { + ProcessBuilder pb = new ProcessBuilder().redirectError(Redirect.INHERIT); + // Test that unsafe unmapping is disabled by default + pb.command(getJVM(), "-cp", getClassPath(), "-ea", getClass().getName(), "dummy"); + assertEquals(UNAVAILABLE, pb.start().waitFor()); + // Test that it can be enabled + pb.command(getJVM(), "-cp", getClassPath(), "-ea", "-Dh2.nioCleanerHack=true", getClass().getName(), "dummy"); + assertEquals(OK, pb.start().waitFor()); + // Test that it will not be enabled with a security manager + pb.command(getJVM(), "-cp", getClassPath(), "-ea", "-Djava.security.manager", "-Dh2.nioCleanerHack=true", + getClass().getName(), "dummy"); + assertEquals(UNAVAILABLE, pb.start().waitFor()); + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestMode.java b/h2/src/test/org/h2/test/unit/TestMode.java new file mode 100644 index 0000000000..e8dd8a94fe --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestMode.java @@ -0,0 +1,90 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import org.h2.engine.Mode; +import org.h2.test.TestBase; + +/** + * Unit test for the Mode class. + */ +public class TestMode extends TestBase { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String[] a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + testDb2ClientInfo(); + testDerbyClientInfo(); + testHsqlDbClientInfo(); + testMsSqlServerClientInfo(); + testMySqlClientInfo(); + testOracleClientInfo(); + testPostgresqlClientInfo(); + } + + private void testDb2ClientInfo() { + Mode db2Mode = Mode.getInstance("DB2"); + + assertTrue(db2Mode.supportedClientInfoPropertiesRegEx.matcher( + "ApplicationName").matches()); + assertTrue(db2Mode.supportedClientInfoPropertiesRegEx.matcher( + "ClientAccountingInformation").matches()); + assertTrue(db2Mode.supportedClientInfoPropertiesRegEx.matcher( + "ClientUser").matches()); + assertTrue(db2Mode.supportedClientInfoPropertiesRegEx.matcher( + "ClientCorrelationToken").matches()); + + assertFalse(db2Mode.supportedClientInfoPropertiesRegEx.matcher( + "AnyOtherValue").matches()); + } + + private void testDerbyClientInfo() { + Mode derbyMode = Mode.getInstance("Derby"); + assertNull(derbyMode.supportedClientInfoPropertiesRegEx); + } + + private void testHsqlDbClientInfo() { + Mode hsqlMode = Mode.getInstance("HSQLDB"); + assertNull(hsqlMode.supportedClientInfoPropertiesRegEx); + } + + private void testMsSqlServerClientInfo() { + Mode msSqlMode = Mode.getInstance("MSSQLServer"); + assertNull(msSqlMode.supportedClientInfoPropertiesRegEx); + } + + private void testMySqlClientInfo() { + Mode mySqlMode = Mode.getInstance("MySQL"); + assertTrue(mySqlMode.supportedClientInfoPropertiesRegEx.matcher( + "AnyString").matches()); + } + + private void testOracleClientInfo() { + Mode oracleMode = Mode.getInstance("Oracle"); + assertTrue(oracleMode.supportedClientInfoPropertiesRegEx.matcher( + "anythingContaining.aDot").matches()); + assertFalse(oracleMode.supportedClientInfoPropertiesRegEx.matcher( + "anythingContainingNoDot").matches()); + } + + + private void testPostgresqlClientInfo() { + Mode postgresqlMode = Mode.getInstance("PostgreSQL"); + assertTrue(postgresqlMode.supportedClientInfoPropertiesRegEx.matcher( + "ApplicationName").matches()); + assertFalse(postgresqlMode.supportedClientInfoPropertiesRegEx.matcher( + "AnyOtherValue").matches()); + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestModifyOnWrite.java b/h2/src/test/org/h2/test/unit/TestModifyOnWrite.java deleted file mode 100644 index dda4c841a1..0000000000 --- a/h2/src/test/org/h2/test/unit/TestModifyOnWrite.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.Statement; - -import org.h2.engine.SysProperties; -import org.h2.store.fs.FileUtils; -import org.h2.test.TestBase; -import org.h2.util.IOUtils; -import org.h2.util.Utils; - -/** - * Test that the database file is only modified when writing to the database. - */ -public class TestModifyOnWrite extends TestBase { - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - System.setProperty("h2.modifyOnWrite", "true"); - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - if (!SysProperties.MODIFY_ON_WRITE) { - return; - } - deleteDb("modifyOnWrite"); - String dbFile = getBaseDir() + "/modifyOnWrite.h2.db"; - assertFalse(FileUtils.exists(dbFile)); - Connection conn = getConnection("modifyOnWrite"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - conn.close(); - byte[] test = IOUtils.readBytesAndClose(FileUtils.newInputStream(dbFile), -1); - - conn = getConnection("modifyOnWrite"); - stat = conn.createStatement(); - ResultSet rs; - rs = stat.executeQuery("select * from test"); - assertFalse(rs.next()); - conn.close(); - assertTrue(FileUtils.exists(dbFile)); - byte[] test2 = IOUtils.readBytesAndClose(FileUtils.newInputStream(dbFile), -1); - assertEquals(test, test2); - - conn = getConnection("modifyOnWrite"); - stat = conn.createStatement(); - stat.execute("insert into test values(1)"); - conn.close(); - - conn = getConnection("modifyOnWrite"); - stat = conn.createStatement(); - rs = stat.executeQuery("select * from test"); - assertTrue(rs.next()); - conn.close(); - - test2 = IOUtils.readBytesAndClose(FileUtils.newInputStream(dbFile), -1); - assertFalse(Utils.compareSecure(test, test2)); - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestMultiThreadedKernel.java b/h2/src/test/org/h2/test/unit/TestMultiThreadedKernel.java index bbe611fcb0..658bf5dfac 100644 --- a/h2/src/test/org/h2/test/unit/TestMultiThreadedKernel.java +++ b/h2/src/test/org/h2/test/unit/TestMultiThreadedKernel.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -11,11 +11,12 @@ import java.sql.SQLException; import org.h2.test.TestBase; +import org.h2.test.TestDb; /** * Tests the multi-threaded kernel feature. */ -public class TestMultiThreadedKernel extends TestBase implements Runnable { +public class TestMultiThreadedKernel extends TestDb implements Runnable { private String url, user, password; private int id; @@ -28,20 +29,25 @@ public class TestMultiThreadedKernel extends TestBase implements Runnable { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws Exception { - if (config.networked || config.mvcc) { - return; + public boolean isEnabled() { + if (config.networked) { + return false; } + return true; + } + + @Override + public void test() throws Exception { deleteDb("multiThreadedKernel"); int count = getSize(2, 5); Thread[] list = new Thread[count]; for (int i = 0; i < count; i++) { TestMultiThreadedKernel r = new TestMultiThreadedKernel(); - r.url = getURL("multiThreadedKernel;MULTI_THREADED=1", true); + r.url = getURL("multiThreadedKernel", true); r.user = getUser(); r.password = getPassword(); r.master = this; @@ -64,7 +70,7 @@ public void run() { try { org.h2.Driver.load(); Connection conn = DriverManager.getConnection(url + - ";MULTI_THREADED=1;LOCK_MODE=3;WRITE_DELAY=0", + ";LOCK_MODE=3;WRITE_DELAY=0", user, password); conn.createStatement().execute( "CREATE TABLE TEST" + id + diff --git a/h2/src/test/org/h2/test/unit/TestNetUtils.java b/h2/src/test/org/h2/test/unit/TestNetUtils.java index 9e968c3fe5..30bf100159 100644 --- a/h2/src/test/org/h2/test/unit/TestNetUtils.java +++ b/h2/src/test/org/h2/test/unit/TestNetUtils.java @@ -1,29 +1,41 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Sergi Vladykin */ package org.h2.test.unit; import java.io.IOException; +import java.net.InetAddress; import java.net.ServerSocket; import java.net.Socket; import java.util.HashSet; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLServerSocket; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; +import org.h2.build.BuildBase; +import org.h2.engine.SysProperties; import org.h2.test.TestBase; import org.h2.util.NetUtils; import org.h2.util.Task; +import org.h2.util.Utils10; /** - * Test the network utilities. + * Test the network utilities from {@link NetUtils}. * * @author Sergi Vladykin + * @author Tomas Pospichal */ public class TestNetUtils extends TestBase { private static final int WORKER_COUNT = 10; private static final int PORT = 9111; + private static final int WAIT_MILLIS = 100; + private static final int WAIT_LONGER_MILLIS = 2 * WAIT_MILLIS; + private static final String TASK_PREFIX = "ServerSocketThread-"; /** * Run just this test. @@ -31,16 +43,156 @@ public class TestNetUtils extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { + testAnonymousTlsSession(); + testTlsSessionWithServerSideAnonymousDisabled(); testFrequentConnections(true, 100); testFrequentConnections(false, 1000); + testIpToShortForm(); + testTcpQuickack(); } - private void testFrequentConnections(boolean ssl, int count) throws Exception { + /** + * With default settings, H2 client SSL socket should be able to connect + * to an H2 server SSL socket using an anonymous cipher suite + * (no SSL certificate is needed). + */ + private void testAnonymousTlsSession() throws Exception { + if (config.ci || BuildBase.getJavaVersion() >= 11) { + // Issue #1303 + return; + } + assertTrue("Failed assumption: the default value of ENABLE_ANONYMOUS_TLS" + + " property should be true", SysProperties.ENABLE_ANONYMOUS_TLS); + boolean ssl = true; + Task task = null; + ServerSocket serverSocket = null; + Socket socket = null; + + try { + serverSocket = NetUtils.createServerSocket(PORT, ssl); + serverSocket.setSoTimeout(WAIT_LONGER_MILLIS); + task = createServerSocketTask(serverSocket); + task.execute(TASK_PREFIX + "AnonEnabled"); + Thread.sleep(WAIT_MILLIS); + socket = NetUtils.createLoopbackSocket(PORT, ssl); + assertTrue("loopback anon socket should be connected", socket.isConnected()); + SSLSession session = ((SSLSocket) socket).getSession(); + assertTrue("TLS session should be valid when anonymous TLS is enabled", + session.isValid()); + // in case of handshake failure: + // the cipher suite is the pre-handshake SSL_NULL_WITH_NULL_NULL + assertContains(session.getCipherSuite(), "_anon_"); + } finally { + closeSilently(socket); + closeSilently(serverSocket); + if (task != null) { + // SSL server socket should succeed using an anonymous cipher + // suite, and not throw javax.net.ssl.SSLHandshakeException + assertNull(task.getException()); + task.join(); + } + } + } + + /** + * TLS connections (without trusted certificates) should fail if the server + * does not allow anonymous TLS. + * The global property ENABLE_ANONYMOUS_TLS cannot be modified for the test; + * instead, the server socket is altered. + */ + private void testTlsSessionWithServerSideAnonymousDisabled() throws Exception { + if (config.ci) { + // Issue #1303 + return; + } + boolean ssl = true; + Task task = null; + ServerSocket serverSocket = null; + Socket socket = null; + try { + serverSocket = NetUtils.createServerSocket(PORT, ssl); + serverSocket.setSoTimeout(WAIT_LONGER_MILLIS); + // emulate the situation ENABLE_ANONYMOUS_TLS=false on server side + String[] defaultCipherSuites = SSLContext.getDefault().getServerSocketFactory() + .getDefaultCipherSuites(); + ((SSLServerSocket) serverSocket).setEnabledCipherSuites(defaultCipherSuites); + task = createServerSocketTask(serverSocket); + task.execute(TASK_PREFIX + "AnonDisabled"); + Thread.sleep(WAIT_MILLIS); + socket = NetUtils.createLoopbackSocket(PORT, ssl); + assertTrue("loopback socket should be connected", socket.isConnected()); + // Java 6 API does not have getHandshakeSession() which could + // reveal the actual cipher selected in the attempted handshake + SSLSession session = ((SSLSocket) socket).getSession(); + assertFalse("TLS session should be invalid when the server" + + "disables anonymous TLS", session.isValid()); + // the SSL handshake should fail, because non-anon ciphers require + // a trusted certificate + assertEquals("SSL_NULL_WITH_NULL_NULL", session.getCipherSuite()); + } finally { + closeSilently(socket); + closeSilently(serverSocket); + if (task != null) { + assertNotNull(task.getException()); + assertEquals(javax.net.ssl.SSLHandshakeException.class.getName(), + task.getException().getClass().getName()); + assertContains(task.getException().getMessage(), "certificate_unknown"); + task.join(); + } + } + } + + private Task createServerSocketTask(final ServerSocket serverSocket) { + Task task = new Task() { + + @Override + public void call() throws Exception { + Socket ss = null; + try { + ss = serverSocket.accept(); + ss.getOutputStream().write(123); + } finally { + closeSilently(ss); + } + } + }; + return task; + } + + /** + * Close a socket, ignoring errors + * + * @param socket the socket + */ + void closeSilently(Socket socket) { + try { + if (socket != null) { + socket.close(); + } + } catch (Exception e) { + // ignore + } + } + + /** + * Close a server socket, ignoring errors + * + * @param socket the server socket + */ + void closeSilently(ServerSocket socket) { + try { + socket.close(); + } catch (Exception e) { + // ignore + } + } + + private static void testFrequentConnections(boolean ssl, int count) throws Exception { final ServerSocket serverSocket = NetUtils.createServerSocket(PORT, ssl); final AtomicInteger counter = new AtomicInteger(count); Task serverThread = new Task() { @@ -61,7 +213,7 @@ public void call() { }; serverThread.execute(); try { - Set workers = new HashSet(); + Set workers = new HashSet<>(); for (int i = 0; i < WORKER_COUNT; i++) { workers.add(new ConnectWorker(ssl, counter)); } @@ -96,7 +248,7 @@ private static class ConnectWorker extends Thread { private final AtomicInteger counter; private Exception exception; - public ConnectWorker(boolean ssl, AtomicInteger counter) { + ConnectWorker(boolean ssl, AtomicInteger counter) { this.ssl = ssl; this.counter = counter; } @@ -123,4 +275,60 @@ public Exception getException() { } + private void testIpToShortForm() throws Exception { + testIpToShortForm("1.2.3.4", "1.2.3.4"); + testIpToShortForm("1:2:3:4:a:b:c:d", "1:2:3:4:a:b:c:d"); + testIpToShortForm("::1", "::1"); + testIpToShortForm("1::", "1::"); + testIpToShortForm("c1c1:0:0:2::fffe", "c1c1:0:0:2:0:0:0:fffe"); + } + + private void testIpToShortForm(String expected, String source) throws Exception { + byte[] addr = InetAddress.getByName(source).getAddress(); + testIpToShortForm(expected, addr, false); + if (expected.indexOf(':') >= 0) { + expected = '[' + expected + ']'; + } + testIpToShortForm(expected, addr, true); + } + + private void testIpToShortForm(String expected, byte[] addr, boolean addBrackets) { + assertEquals(expected, NetUtils.ipToShortForm(null, addr, addBrackets).toString()); + assertEquals(expected, NetUtils.ipToShortForm(new StringBuilder(), addr, addBrackets).toString()); + assertEquals(expected, + NetUtils.ipToShortForm(new StringBuilder("*"), addr, addBrackets).deleteCharAt(0).toString()); + } + + private void testTcpQuickack() { + final boolean ssl = !config.ci && BuildBase.getJavaVersion() < 11; + try (ServerSocket serverSocket = NetUtils.createServerSocket(PORT, ssl)) { + Thread thread = new Thread() { + @Override + public void run() { + try (Socket s = NetUtils.createLoopbackSocket(PORT, ssl)) { + s.getInputStream().read(); + } catch (IOException e) { + } + } + }; + thread.start(); + try (Socket socket = serverSocket.accept()) { + boolean supported = Utils10.setTcpQuickack(socket, true); + if (supported) { + assertTrue(Utils10.getTcpQuickack(socket)); + Utils10.setTcpQuickack(socket, false); + assertFalse(Utils10.getTcpQuickack(socket)); + } + socket.getOutputStream().write(1); + } finally { + try { + thread.join(); + } catch (InterruptedException e) { + } + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } diff --git a/h2/src/test/org/h2/test/unit/TestObjectDeserialization.java b/h2/src/test/org/h2/test/unit/TestObjectDeserialization.java index f6684e449a..47274f014d 100644 --- a/h2/src/test/org/h2/test/unit/TestObjectDeserialization.java +++ b/h2/src/test/org/h2/test/unit/TestObjectDeserialization.java @@ -1,11 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: Noah Fontes */ package org.h2.test.unit; -import org.h2.message.DbException; +import org.h2.api.ErrorCode; import org.h2.test.TestBase; import org.h2.util.JdbcUtils; import org.h2.util.StringUtils; @@ -33,7 +33,7 @@ public class TestObjectDeserialization extends TestBase { */ public static void main(String... a) throws Exception { System.setProperty("h2.useThreadContextClassLoader", "true"); - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -44,12 +44,8 @@ public void test() { private void testThreadContextClassLoader() { usesThreadContextClassLoader = false; Thread.currentThread().setContextClassLoader(new TestClassLoader()); - try { - JdbcUtils.deserialize(StringUtils.convertHexToBytes(OBJECT), null); - fail(); - } catch (DbException e) { - // expected - } + assertThrows(ErrorCode.DESERIALIZATION_FAILED_1, + () -> JdbcUtils.deserialize(StringUtils.convertHexToBytes(OBJECT), null)); assertTrue(usesThreadContextClassLoader); } diff --git a/h2/src/test/org/h2/test/unit/TestOldVersion.java b/h2/src/test/org/h2/test/unit/TestOldVersion.java deleted file mode 100644 index 08dc09d28a..0000000000 --- a/h2/src/test/org/h2/test/unit/TestOldVersion.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.lang.reflect.Method; -import java.net.URL; -import java.net.URLClassLoader; -import java.sql.Connection; -import java.sql.Driver; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Types; -import java.util.Properties; - -import org.h2.api.ErrorCode; -import org.h2.test.TestBase; -import org.h2.tools.Server; - -/** - * Tests the compatibility with older versions - */ -public class TestOldVersion extends TestBase { - - private ClassLoader cl; - private Driver driver; - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - if (config.mvStore) { - return; - } - cl = getClassLoader("file:ext/h2-1.2.127.jar"); - driver = getDriver(cl); - if (driver == null) { - println("not found: ext/h2-1.2.127.jar - test skipped"); - return; - } - Connection conn = driver.connect("jdbc:h2:mem:", null); - assertEquals("1.2.127 (2010-01-15)", conn.getMetaData() - .getDatabaseProductVersion()); - conn.close(); - testLobInFiles(); - testOldClientNewServer(); - } - - private void testLobInFiles() throws Exception { - deleteDb("oldVersion"); - Connection conn; - Statement stat; - conn = driver.connect("jdbc:h2:" + getBaseDir() + "/oldVersion", null); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key, b blob, c clob)"); - PreparedStatement prep = conn - .prepareStatement("insert into test values(?, ?, ?)"); - prep.setInt(1, 0); - prep.setNull(2, Types.BLOB); - prep.setNull(3, Types.CLOB); - prep.execute(); - prep.setInt(1, 1); - prep.setBytes(2, new byte[0]); - prep.setString(3, ""); - prep.execute(); - prep.setInt(1, 2); - prep.setBytes(2, new byte[5]); - prep.setString(3, "\u1234\u1234\u1234\u1234\u1234"); - prep.execute(); - prep.setInt(1, 3); - prep.setBytes(2, new byte[100000]); - prep.setString(3, new String(new char[100000])); - prep.execute(); - conn.close(); - conn = DriverManager.getConnection("jdbc:h2:" + getBaseDir() + - "/oldVersion", new Properties()); - stat = conn.createStatement(); - checkResult(stat.executeQuery("select * from test order by id")); - stat.execute("create table test2 as select * from test"); - checkResult(stat.executeQuery("select * from test2 order by id")); - stat.execute("delete from test"); - conn.close(); - } - - private void checkResult(ResultSet rs) throws SQLException { - rs.next(); - assertEquals(0, rs.getInt(1)); - assertEquals(null, rs.getBytes(2)); - assertEquals(null, rs.getString(3)); - rs.next(); - assertEquals(1, rs.getInt(1)); - assertEquals(new byte[0], rs.getBytes(2)); - assertEquals("", rs.getString(3)); - rs.next(); - assertEquals(2, rs.getInt(1)); - assertEquals(new byte[5], rs.getBytes(2)); - assertEquals("\u1234\u1234\u1234\u1234\u1234", rs.getString(3)); - rs.next(); - assertEquals(3, rs.getInt(1)); - assertEquals(new byte[100000], rs.getBytes(2)); - assertEquals(new String(new char[100000]), rs.getString(3)); - } - - private void testOldClientNewServer() throws Exception { - Server server = org.h2.tools.Server.createTcpServer("-tcpPort", "9001"); - server.start(); - assertThrows(ErrorCode.DRIVER_VERSION_ERROR_2, driver).connect( - "jdbc:h2:tcp://localhost:9001/mem:test", null); - server.stop(); - - Class serverClass = cl.loadClass("org.h2.tools.Server"); - Method m; - m = serverClass.getMethod("createTcpServer", String[].class); - Object serverOld = m.invoke(null, new Object[] { new String[] { - "-tcpPort", "9001" } }); - m = serverOld.getClass().getMethod("start"); - m.invoke(serverOld); - Connection conn; - conn = org.h2.Driver.load().connect("jdbc:h2:mem:", null); - Statement stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("call 1"); - rs.next(); - assertEquals(1, rs.getInt(1)); - conn.close(); - m = serverOld.getClass().getMethod("stop"); - m.invoke(serverOld); - } - - private static ClassLoader getClassLoader(String jarFile) throws Exception { - URL[] urls = { new URL(jarFile) }; - return new URLClassLoader(urls, null); - } - - private static Driver getDriver(ClassLoader cl) throws Exception { - Class driverClass; - try { - driverClass = cl.loadClass("org.h2.Driver"); - } catch (ClassNotFoundException e) { - return null; - } - Method m = driverClass.getMethod("load"); - Driver driver = (Driver) m.invoke(null); - return driver; - } - -} diff --git a/h2/src/test/org/h2/test/unit/TestOverflow.java b/h2/src/test/org/h2/test/unit/TestOverflow.java index 9cfbeb9df3..c23d34c858 100644 --- a/h2/src/test/org/h2/test/unit/TestOverflow.java +++ b/h2/src/test/org/h2/test/unit/TestOverflow.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -8,10 +8,10 @@ import java.math.BigInteger; import java.util.ArrayList; import java.util.Random; + import org.h2.test.TestBase; -import org.h2.util.New; import org.h2.value.Value; -import org.h2.value.ValueString; +import org.h2.value.ValueVarchar; /** * Tests numeric overflow on various data types. @@ -30,19 +30,19 @@ public class TestOverflow extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() { - test(Value.BYTE, Byte.MIN_VALUE, Byte.MAX_VALUE); - test(Value.INT, Integer.MIN_VALUE, Integer.MAX_VALUE); - test(Value.LONG, Long.MIN_VALUE, Long.MAX_VALUE); - test(Value.SHORT, Short.MIN_VALUE, Short.MAX_VALUE); + test(Value.TINYINT, Byte.MIN_VALUE, Byte.MAX_VALUE); + test(Value.INTEGER, Integer.MIN_VALUE, Integer.MAX_VALUE); + test(Value.BIGINT, Long.MIN_VALUE, Long.MAX_VALUE); + test(Value.SMALLINT, Short.MIN_VALUE, Short.MAX_VALUE); } private void test(int type, long minValue, long maxValue) { - values = New.arrayList(); + values = new ArrayList<>(); this.dataType = type; this.min = new BigInteger("" + minValue); this.max = new BigInteger("" + maxValue); @@ -124,7 +124,7 @@ private boolean inRange(BigInteger v) { } private void add(long l) { - values.add(ValueString.get("" + l).convertTo(dataType)); + values.add(ValueVarchar.get("" + l).convertTo(dataType)); } } diff --git a/h2/src/test/org/h2/test/unit/TestPageStore.java b/h2/src/test/org/h2/test/unit/TestPageStore.java deleted file mode 100644 index a9863c1234..0000000000 --- a/h2/src/test/org/h2/test/unit/TestPageStore.java +++ /dev/null @@ -1,900 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.io.InputStream; -import java.io.InputStreamReader; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Random; -import java.util.Set; -import java.util.TreeSet; -import org.h2.api.DatabaseEventListener; -import org.h2.api.ErrorCode; -import org.h2.result.Row; -import org.h2.store.Page; -import org.h2.store.fs.FileUtils; -import org.h2.test.TestBase; -import org.h2.util.IOUtils; -import org.h2.util.JdbcUtils; -import org.h2.util.New; - -/** - * Test the page store. - */ -public class TestPageStore extends TestBase { - - /** - * The events log. - */ - static StringBuilder eventBuffer = new StringBuilder(); - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - System.setProperty("h2.check2", "true"); - TestBase.createCaller().init().test(); - } - - @Override - public void test() throws Exception { - deleteDb(null); - testDropTempTable(); - testLogLimitFalsePositive(); - testLogLimit(); - testRecoverLobInDatabase(); - testWriteTransactionLogBeforeData(); - testDefrag(); - testInsertReverse(); - testInsertDelete(); - testCheckpoint(); - testDropRecreate(); - testDropAll(); - testCloseTempTable(); - testDuplicateKey(); - testUpdateOverflow(); - testTruncateReconnect(); - testReverseIndex(); - testLargeUpdates(); - testLargeInserts(); - testLargeDatabaseFastOpen(); - testUniqueIndexReopen(); - testLargeRows(); - testRecoverDropIndex(); - testDropPk(); - testCreatePkLater(); - testTruncate(); - testLargeIndex(); - testUniqueIndex(); - testCreateIndexLater(); - testFuzzOperations(); - deleteDb(null); - } - - private void testDropTempTable() throws SQLException { - deleteDb("pageStoreDropTemp"); - Connection c1 = getConnection("pageStoreDropTemp"); - Connection c2 = getConnection("pageStoreDropTemp"); - c1.setAutoCommit(false); - c2.setAutoCommit(false); - Statement s1 = c1.createStatement(); - Statement s2 = c2.createStatement(); - s1.execute("create local temporary table a(id int primary key)"); - s1.execute("insert into a values(1)"); - c1.commit(); - c1.close(); - s2.execute("create table b(id int primary key)"); - s2.execute("insert into b values(1)"); - c2.commit(); - s2.execute("checkpoint sync"); - s2.execute("shutdown immediately"); - try { - c2.close(); - } catch (SQLException e) { - // ignore - } - c1 = getConnection("pageStoreDropTemp"); - c1.close(); - deleteDb("pageStoreDropTemp"); - } - - private void testLogLimit() throws Exception { - if (config.mvStore) { - return; - } - deleteDb("pageStoreLogLimit"); - Connection conn, conn2; - Statement stat, stat2; - String url = "pageStoreLogLimit;TRACE_LEVEL_FILE=2"; - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - conn.setAutoCommit(false); - stat.execute("insert into test values(1)"); - - conn2 = getConnection(url); - stat2 = conn2.createStatement(); - stat2.execute("create table t2(id identity, name varchar)"); - stat2.execute("set max_log_size 1"); - for (int i = 0; i < 10; i++) { - stat2.execute("insert into t2(name) " + - "select space(100) from system_range(1, 1000)"); - } - InputStream in = FileUtils.newInputStream(getBaseDir() + - "/pageStoreLogLimit.trace.db"); - String s = IOUtils.readStringAndClose(new InputStreamReader(in), -1); - assertTrue(s.indexOf("Transaction log could not be truncated") > 0); - conn.commit(); - ResultSet rs = stat2.executeQuery("select * from test"); - assertTrue(rs.next()); - conn2.close(); - conn.close(); - } - - private void testLogLimitFalsePositive() throws Exception { - deleteDb("pageStoreLogLimitFalsePositive"); - String url = "pageStoreLogLimitFalsePositive;TRACE_LEVEL_FILE=2"; - Connection conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("set max_log_size 1"); - stat.execute("create table test(x varchar)"); - for (int i = 0; i < 1000; ++i) { - stat.execute("insert into test values (space(2000))"); - } - stat.execute("checkpoint"); - InputStream in = FileUtils.newInputStream(getBaseDir() + - "/pageStoreLogLimitFalsePositive.trace.db"); - String s = IOUtils.readStringAndClose(new InputStreamReader(in), -1); - assertFalse(s.indexOf("Transaction log could not be truncated") > 0); - conn.close(); - } - - private void testRecoverLobInDatabase() throws SQLException { - deleteDb("pageStoreRecoverLobInDatabase"); - String url = getURL("pageStoreRecoverLobInDatabase;" + - "MVCC=TRUE;CACHE_SIZE=1", true); - Connection conn; - Statement stat; - conn = getConnection(url, getUser(), getPassword()); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key, name clob)"); - stat.execute("create index idx_id on test(id)"); - stat.execute("insert into test " + - "select x, space(1100+x) from system_range(1, 100)"); - Random r = new Random(1); - ArrayList list = New.arrayList(); - for (int i = 0; i < 10; i++) { - Connection conn2 = getConnection(url, getUser(), getPassword()); - list.add(conn2); - Statement stat2 = conn2.createStatement(); - conn2.setAutoCommit(false); - if (r.nextBoolean()) { - stat2.execute("update test set id = id where id = " + r.nextInt(100)); - } else { - stat2.execute("delete from test where id = " + r.nextInt(100)); - } - } - stat.execute("shutdown immediately"); - JdbcUtils.closeSilently(conn); - for (Connection c : list) { - JdbcUtils.closeSilently(c); - } - conn = getConnection(url, getUser(), getPassword()); - conn.close(); - } - - private void testWriteTransactionLogBeforeData() throws SQLException { - deleteDb("pageStoreWriteTransactionLogBeforeData"); - String url = getURL("pageStoreWriteTransactionLogBeforeData;" + - "CACHE_SIZE=16;WRITE_DELAY=1000000", true); - Connection conn; - Statement stat; - conn = getConnection(url, getUser(), getPassword()); - stat = conn.createStatement(); - stat.execute("create table test(name varchar) as select space(100000)"); - for (int i = 0; i < 100; i++) { - stat.execute("create table test" + i + "(id int) " + - "as select x from system_range(1, 1000)"); - } - conn.close(); - conn = getConnection(url, getUser(), getPassword()); - stat = conn.createStatement(); - stat.execute("drop table test0"); - stat.execute("select * from test"); - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (Exception e) { - // ignore - } - conn = getConnection(url, getUser(), getPassword()); - stat = conn.createStatement(); - for (int i = 1; i < 100; i++) { - stat.execute("select * from test" + i); - } - conn.close(); - } - - private void testDefrag() throws SQLException { - if (config.reopen) { - return; - } - deleteDb("pageStoreDefrag"); - Connection conn = getConnection( - "pageStoreDefrag;LOG=0;UNDO_LOG=0;LOCK_MODE=0"); - Statement stat = conn.createStatement(); - int tableCount = 10; - int rowCount = getSize(1000, 100000); - for (int i = 0; i < tableCount; i++) { - stat.execute("create table test" + i + "(id int primary key, " + - "string1 varchar, string2 varchar, string3 varchar)"); - } - for (int j = 0; j < tableCount; j++) { - PreparedStatement prep = conn.prepareStatement( - "insert into test" + j + " values(?, ?, ?, ?)"); - for (int i = 0; i < rowCount; i++) { - prep.setInt(1, i); - prep.setInt(2, i); - prep.setInt(3, i); - prep.setInt(4, i); - prep.execute(); - } - } - stat.executeUpdate("shutdown defrag"); - conn.close(); - } - - private void testInsertReverse() throws SQLException { - deleteDb("pageStoreInsertReverse"); - Connection conn; - conn = getConnection("pageStoreInsertReverse"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key, data varchar)"); - stat.execute("insert into test select -x, space(100) " + - "from system_range(1, 1000)"); - stat.execute("drop table test"); - stat.execute("create table test(id int primary key, data varchar)"); - stat.execute("insert into test select -x, space(2048) " + - "from system_range(1, 1000)"); - conn.close(); - } - - private void testInsertDelete() { - Row[] x = new Row[0]; - Row r = new Row(null, 0); - x = Page.insert(x, 0, 0, r); - assertTrue(x[0] == r); - Row r2 = new Row(null, 0); - x = Page.insert(x, 1, 0, r2); - assertTrue(x[0] == r2); - assertTrue(x[1] == r); - Row r3 = new Row(null, 0); - x = Page.insert(x, 2, 1, r3); - assertTrue(x[0] == r2); - assertTrue(x[1] == r3); - assertTrue(x[2] == r); - - x = Page.remove(x, 3, 1); - assertTrue(x[0] == r2); - assertTrue(x[1] == r); - x = Page.remove(x, 2, 0); - assertTrue(x[0] == r); - x = Page.remove(x, 1, 0); - } - - private void testCheckpoint() throws SQLException { - deleteDb("pageStoreCheckpoint"); - Connection conn; - conn = getConnection("pageStoreCheckpoint"); - Statement stat = conn.createStatement(); - stat.execute("create table test(data varchar)"); - stat.execute("create sequence seq"); - stat.execute("set max_log_size 1"); - conn.setAutoCommit(false); - stat.execute("insert into test select space(1000) from system_range(1, 1000)"); - long before = System.currentTimeMillis(); - stat.execute("select nextval('SEQ') from system_range(1, 100000)"); - long after = System.currentTimeMillis(); - // it's hard to test - basically it shouldn't checkpoint too often - if (after - before > 10000) { - if (!config.reopen) { - fail("Checkpoint took " + (after - before) + " ms"); - } - } - stat.execute("drop table test"); - stat.execute("drop sequence seq"); - conn.close(); - } - - private void testDropRecreate() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreDropRecreate"); - Connection conn; - conn = getConnection("pageStoreDropRecreate"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int)"); - stat.execute("create index idx_test on test(id)"); - stat.execute("create table test2(id int)"); - stat.execute("drop table test"); - // this will re-used the object id of the test table, - // which is lower than the object id of test2 - stat.execute("create index idx_test on test2(id)"); - conn.close(); - conn = getConnection("pageStoreDropRecreate"); - conn.close(); - } - - private void testDropAll() throws SQLException { - deleteDb("pageStoreDropAll"); - Connection conn; - String url = "pageStoreDropAll"; - conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("CREATE TEMP TABLE A(A INT)"); - stat.execute("CREATE TABLE B(A VARCHAR IDENTITY)"); - stat.execute("CREATE TEMP TABLE C(A INT)"); - conn.close(); - conn = getConnection(url); - stat = conn.createStatement(); - stat.execute("DROP ALL OBJECTS"); - conn.close(); - } - - private void testCloseTempTable() throws SQLException { - deleteDb("pageStoreCloseTempTable"); - Connection conn; - String url = "pageStoreCloseTempTable;CACHE_SIZE=0"; - conn = getConnection(url); - Statement stat = conn.createStatement(); - stat.execute("create local temporary table test(id int)"); - conn.rollback(); - Connection conn2 = getConnection(url); - Statement stat2 = conn2.createStatement(); - stat2.execute("create table test2 as select x from system_range(1, 5000)"); - stat2.execute("shutdown immediately"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn2).close(); - } - - private void testDuplicateKey() throws SQLException { - deleteDb("pageStoreDuplicateKey"); - Connection conn; - conn = getConnection("pageStoreDuplicateKey"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key, name varchar)"); - stat.execute("insert into test values(0, space(3000))"); - try { - stat.execute("insert into test values(0, space(3000))"); - } catch (SQLException e) { - // ignore - } - stat.execute("select * from test"); - conn.close(); - } - - private void testTruncateReconnect() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreTruncateReconnect"); - Connection conn; - conn = getConnection("pageStoreTruncateReconnect"); - conn.createStatement().execute( - "create table test(id int primary key, name varchar)"); - conn.createStatement().execute( - "insert into test(id) select x from system_range(1, 390)"); - conn.createStatement().execute("checkpoint"); - conn.createStatement().execute("shutdown immediately"); - JdbcUtils.closeSilently(conn); - conn = getConnection("pageStoreTruncateReconnect"); - conn.createStatement().execute("truncate table test"); - conn.createStatement().execute( - "insert into test(id) select x from system_range(1, 390)"); - conn.createStatement().execute("shutdown immediately"); - JdbcUtils.closeSilently(conn); - conn = getConnection("pageStoreTruncateReconnect"); - conn.close(); - } - - private void testUpdateOverflow() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreUpdateOverflow"); - Connection conn; - conn = getConnection("pageStoreUpdateOverflow"); - conn.createStatement().execute("create table test" + - "(id int primary key, name varchar)"); - conn.createStatement().execute( - "insert into test values(0, space(3000))"); - conn.createStatement().execute("checkpoint"); - conn.createStatement().execute("shutdown immediately"); - - JdbcUtils.closeSilently(conn); - conn = getConnection("pageStoreUpdateOverflow"); - conn.createStatement().execute("update test set id = 1"); - conn.createStatement().execute("shutdown immediately"); - - JdbcUtils.closeSilently(conn); - conn = getConnection("pageStoreUpdateOverflow"); - conn.close(); - } - - private void testReverseIndex() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreReverseIndex"); - Connection conn = getConnection("pageStoreReverseIndex"); - Statement stat = conn.createStatement(); - stat.execute("create table test(x int, y varchar default space(200))"); - for (int i = 30; i < 100; i++) { - stat.execute("insert into test(x) select null from system_range(1, " + i + ")"); - stat.execute("insert into test(x) select x from system_range(1, " + i + ")"); - stat.execute("create index idx on test(x desc, y)"); - ResultSet rs = stat.executeQuery("select min(x) from test"); - rs.next(); - assertEquals(1, rs.getInt(1)); - stat.execute("drop index idx"); - stat.execute("truncate table test"); - } - conn.close(); - } - - private void testLargeUpdates() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreLargeUpdates"); - Connection conn; - conn = getConnection("pageStoreLargeUpdates"); - Statement stat = conn.createStatement(); - int size = 1500; - stat.execute("call rand(1)"); - stat.execute( - "create table test(id int primary key, data varchar, test int) as " + - "select x, '', 123 from system_range(1, " + size + ")"); - Random random = new Random(1); - PreparedStatement prep = conn.prepareStatement( - "update test set data=space(?) where id=?"); - for (int i = 0; i < 2500; i++) { - int id = random.nextInt(size); - int newSize = random.nextInt(6000); - prep.setInt(1, newSize); - prep.setInt(2, id); - prep.execute(); - } - conn.close(); - conn = getConnection("pageStoreLargeUpdates"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * from test where test<>123"); - assertFalse(rs.next()); - conn.close(); - } - - private void testLargeInserts() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreLargeInserts"); - Connection conn; - conn = getConnection("pageStoreLargeInserts"); - Statement stat = conn.createStatement(); - stat.execute("create table test(data varchar)"); - stat.execute("insert into test values(space(1024 * 1024))"); - stat.execute("insert into test values(space(1024 * 1024))"); - conn.close(); - } - - private void testLargeDatabaseFastOpen() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreLargeDatabaseFastOpen"); - Connection conn; - String url = "pageStoreLargeDatabaseFastOpen"; - conn = getConnection(url); - conn.createStatement().execute( - "CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR)"); - conn.createStatement().execute( - "create unique index idx_test_name on test(name)"); - conn.createStatement().execute( - "INSERT INTO TEST " + - "SELECT X, X || space(10) FROM SYSTEM_RANGE(1, 1000)"); - conn.close(); - conn = getConnection(url); - conn.createStatement().execute("DELETE FROM TEST WHERE ID=1"); - conn.createStatement().execute("CHECKPOINT"); - conn.createStatement().execute("SHUTDOWN IMMEDIATELY"); - try { - conn.close(); - } catch (SQLException e) { - // ignore - } - eventBuffer.setLength(0); - conn = getConnection(url + ";DATABASE_EVENT_LISTENER='" + - MyDatabaseEventListener.class.getName() + "'"); - assertEquals("init;opened;", eventBuffer.toString()); - conn.close(); - } - - private void testUniqueIndexReopen() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreUniqueIndexReopen"); - Connection conn; - String url = "pageStoreUniqueIndexReopen"; - conn = getConnection(url); - conn.createStatement().execute( - "CREATE TABLE test(ID INT PRIMARY KEY, NAME VARCHAR(255))"); - conn.createStatement().execute( - "create unique index idx_test_name on test(name)"); - conn.createStatement().execute("INSERT INTO TEST VALUES(1, 'Hello')"); - conn.close(); - conn = getConnection(url); - assertThrows(ErrorCode.DUPLICATE_KEY_1, conn.createStatement()) - .execute("INSERT INTO TEST VALUES(2, 'Hello')"); - conn.close(); - } - - private void testLargeRows() throws Exception { - if (config.memory) { - return; - } - for (int i = 0; i < 10; i++) { - testLargeRows(i); - } - } - - private void testLargeRows(int seed) throws Exception { - deleteDb("pageStoreLargeRows"); - String url = getURL("pageStoreLargeRows;CACHE_SIZE=16", true); - Connection conn = null; - Statement stat = null; - int count = 0; - try { - Class.forName("org.h2.Driver"); - conn = DriverManager.getConnection(url); - stat = conn.createStatement(); - int tableCount = 1; - PreparedStatement[] insert = new PreparedStatement[tableCount]; - PreparedStatement[] deleteMany = new PreparedStatement[tableCount]; - PreparedStatement[] updateMany = new PreparedStatement[tableCount]; - for (int i = 0; i < tableCount; i++) { - stat.execute("create table test" + i + - "(id int primary key, name varchar)"); - stat.execute("create index idx_test" + i + " on test" + i + - "(name)"); - insert[i] = conn.prepareStatement("insert into test" + i + - " values(?, ? || space(?))"); - deleteMany[i] = conn.prepareStatement("delete from test" + i + - " where id between ? and ?"); - updateMany[i] = conn.prepareStatement("update test" + i + - " set name=? || space(?) where id between ? and ?"); - } - Random random = new Random(seed); - for (int i = 0; i < 1000; i++) { - count = i; - PreparedStatement p; - if (random.nextInt(100) < 95) { - p = insert[random.nextInt(tableCount)]; - p.setInt(1, i); - p.setInt(2, i); - if (random.nextInt(30) == 5) { - p.setInt(3, 3000); - } else { - p.setInt(3, random.nextInt(100)); - } - p.execute(); - } else if (random.nextInt(100) < 90) { - p = updateMany[random.nextInt(tableCount)]; - p.setInt(1, i); - p.setInt(2, random.nextInt(50)); - int first = random.nextInt(1 + i); - p.setInt(3, first); - p.setInt(4, first + random.nextInt(50)); - p.executeUpdate(); - } else { - p = deleteMany[random.nextInt(tableCount)]; - int first = random.nextInt(1 + i); - p.setInt(1, first); - p.setInt(2, first + random.nextInt(100)); - p.executeUpdate(); - } - } - conn.close(); - conn = DriverManager.getConnection(url); - conn.close(); - conn = DriverManager.getConnection(url); - stat = conn.createStatement(); - stat.execute("script to '" + getBaseDir() + "/pageStoreLargeRows.sql'"); - conn.close(); - FileUtils.delete(getBaseDir() + "/pageStoreLargeRows.sql"); - } catch (Exception e) { - if (stat != null) { - try { - stat.execute("shutdown immediately"); - } catch (SQLException e2) { - // ignore - } - } - if (conn != null) { - try { - conn.close(); - } catch (SQLException e2) { - // ignore - } - } - throw new RuntimeException("count: " + count, e); - } - } - - private void testRecoverDropIndex() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreRecoverDropIndex"); - Connection conn = getConnection("pageStoreRecoverDropIndex"); - Statement stat = conn.createStatement(); - stat.execute("set write_delay 0"); - stat.execute("create table test(id int, name varchar) " + - "as select x, x from system_range(1, 1400)"); - stat.execute("create index idx_name on test(name)"); - conn.close(); - conn = getConnection("pageStoreRecoverDropIndex"); - stat = conn.createStatement(); - stat.execute("drop index idx_name"); - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (SQLException e) { - // ignore - } - conn = getConnection("pageStoreRecoverDropIndex;cache_size=1"); - conn.close(); - } - - private void testDropPk() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreDropPk"); - Connection conn; - Statement stat; - conn = getConnection("pageStoreDropPk"); - stat = conn.createStatement(); - stat.execute("create table test(id int primary key)"); - stat.execute("insert into test values(" + Integer.MIN_VALUE + "), (" + - Integer.MAX_VALUE + ")"); - stat.execute("alter table test drop primary key"); - conn.close(); - conn = getConnection("pageStoreDropPk"); - stat = conn.createStatement(); - stat.execute("insert into test values(" + Integer.MIN_VALUE + "), (" + - Integer.MAX_VALUE + ")"); - conn.close(); - } - - private void testCreatePkLater() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreCreatePkLater"); - Connection conn; - Statement stat; - conn = getConnection("pageStoreCreatePkLater"); - stat = conn.createStatement(); - stat.execute("create table test(id int not null) as select 100"); - stat.execute("create primary key on test(id)"); - conn.close(); - conn = getConnection("pageStoreCreatePkLater"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("select * from test where id = 100"); - assertTrue(rs.next()); - conn.close(); - } - - private void testTruncate() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreTruncate"); - Connection conn = getConnection("pageStoreTruncate"); - Statement stat = conn.createStatement(); - stat.execute("set write_delay 0"); - stat.execute("create table test(id int) as select 1"); - stat.execute("truncate table test"); - stat.execute("insert into test values(1)"); - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (SQLException e) { - // ignore - } - conn = getConnection("pageStoreTruncate"); - conn.close(); - } - - private void testLargeIndex() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreLargeIndex"); - Connection conn = getConnection("pageStoreLargeIndex"); - conn.createStatement().execute( - "create table test(id varchar primary key, d varchar)"); - PreparedStatement prep = conn.prepareStatement( - "insert into test values(?, space(500))"); - for (int i = 0; i < 20000; i++) { - prep.setString(1, "" + i); - prep.executeUpdate(); - } - conn.close(); - } - - private void testUniqueIndex() throws SQLException { - if (config.memory) { - return; - } - deleteDb("pageStoreUniqueIndex"); - Connection conn = getConnection("pageStoreUniqueIndex"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(ID INT UNIQUE)"); - stat.execute("INSERT INTO TEST VALUES(1)"); - conn.close(); - conn = getConnection("pageStoreUniqueIndex"); - assertThrows(ErrorCode.DUPLICATE_KEY_1, - conn.createStatement()).execute("INSERT INTO TEST VALUES(1)"); - conn.close(); - } - - private void testCreateIndexLater() throws SQLException { - deleteDb("pageStoreCreateIndexLater"); - Connection conn = getConnection("pageStoreCreateIndexLater"); - Statement stat = conn.createStatement(); - stat.execute("CREATE TABLE TEST(NAME VARCHAR) AS SELECT 1"); - stat.execute("CREATE INDEX IDX_N ON TEST(NAME)"); - stat.execute("INSERT INTO TEST SELECT X FROM SYSTEM_RANGE(20, 100)"); - stat.execute("INSERT INTO TEST SELECT X FROM SYSTEM_RANGE(1000, 1100)"); - stat.execute("SHUTDOWN IMMEDIATELY"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - conn = getConnection("pageStoreCreateIndexLater"); - conn.close(); - } - - private void testFuzzOperations() throws Exception { - int best = Integer.MAX_VALUE; - for (int i = 0; i < 10; i++) { - int x = testFuzzOperationsSeed(i, 10); - if (x >= 0 && x < best) { - best = x; - fail("op:" + x + " seed:" + i); - } - } - } - - private int testFuzzOperationsSeed(int seed, int len) throws SQLException { - deleteDb("pageStoreFuzz"); - Connection conn = getConnection("pageStoreFuzz"); - Statement stat = conn.createStatement(); - log("DROP TABLE IF EXISTS TEST;"); - stat.execute("DROP TABLE IF EXISTS TEST"); - log("CREATE TABLE TEST(ID INT PRIMARY KEY, " + - "NAME VARCHAR DEFAULT 'Hello World');"); - stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, " + - "NAME VARCHAR DEFAULT 'Hello World')"); - Set rows = new TreeSet(); - Random random = new Random(seed); - for (int i = 0; i < len; i++) { - int op = random.nextInt(3); - Integer x = random.nextInt(100); - switch (op) { - case 0: - if (!rows.contains(x)) { - log("insert into test(id) values(" + x + ");"); - stat.execute("INSERT INTO TEST(ID) VALUES(" + x + ");"); - rows.add(x); - } - break; - case 1: - if (rows.contains(x)) { - log("delete from test where id=" + x + ";"); - stat.execute("DELETE FROM TEST WHERE ID=" + x); - rows.remove(x); - } - break; - case 2: - conn.close(); - conn = getConnection("pageStoreFuzz"); - stat = conn.createStatement(); - ResultSet rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); - log("--reconnect"); - for (int test : rows) { - if (!rs.next()) { - log("error: expected next"); - conn.close(); - return i; - } - int y = rs.getInt(1); - // System.out.println(" " + x); - if (y != test) { - log("error: " + y + " <> " + test); - conn.close(); - return i; - } - } - if (rs.next()) { - log("error: unexpected next"); - conn.close(); - return i; - } - } - } - conn.close(); - return -1; - } - - private void log(String m) { - trace(" " + m); - } - - /** - * A database event listener used in this test. - */ - public static final class MyDatabaseEventListener implements - DatabaseEventListener { - - @Override - public void closingDatabase() { - event("closing"); - } - - @Override - public void exceptionThrown(SQLException e, String sql) { - event("exceptionThrown " + e + " " + sql); - } - - @Override - public void init(String url) { - event("init"); - } - - @Override - public void opened() { - event("opened"); - } - - @Override - public void setProgress(int state, String name, int x, int max) { - if (name.startsWith("SYS:SYS_ID")) { - // ignore - return; - } - switch (state) { - case DatabaseEventListener.STATE_STATEMENT_START: - case DatabaseEventListener.STATE_STATEMENT_END: - case DatabaseEventListener.STATE_STATEMENT_PROGRESS: - return; - } - event("setProgress " + state + " " + name + " " + x + " " + max); - } - - private static void event(String s) { - eventBuffer.append(s).append(';'); - } - } -} diff --git a/h2/src/test/org/h2/test/unit/TestPageStoreCoverage.java b/h2/src/test/org/h2/test/unit/TestPageStoreCoverage.java index 67a38fe717..6cbf7a5791 100644 --- a/h2/src/test/org/h2/test/unit/TestPageStoreCoverage.java +++ b/h2/src/test/org/h2/test/unit/TestPageStoreCoverage.java @@ -1,26 +1,23 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; -import java.nio.channels.FileChannel; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; - -import org.h2.api.ErrorCode; -import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.Restore; /** * Test the page store. */ -public class TestPageStoreCoverage extends TestBase { +public class TestPageStoreCoverage extends TestDb { private static final String URL = "pageStoreCoverage;" + "PAGE_SIZE=64;CACHE_SIZE=16;MAX_LOG_SIZE=1"; @@ -31,20 +28,24 @@ public class TestPageStoreCoverage extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws Exception { - // TODO mvcc, 2-phase commit + public boolean isEnabled() { if (config.memory) { - return; + return false; } + return true; + } + + @Override + public void test() throws Exception { + // TODO mvcc, 2-phase commit deleteDb("pageStoreCoverage"); testMoveRoot(); testBasic(); testReadOnly(); - testIncompleteCreate(); testBackupRestore(); testTrim(); testLongTransaction(); @@ -95,55 +96,54 @@ private void testMoveRoot() throws SQLException { } private void testRecoverTemp() throws SQLException { - Connection conn; - conn = getConnection(URL); - Statement stat = conn.createStatement(); - stat.execute("create cached temporary table test(id identity, name varchar)"); - stat.execute("create index idx_test_name on test(name)"); - stat.execute("create index idx_test_name2 on test(name, id)"); - stat.execute("create table test2(id identity, name varchar)"); - stat.execute("create index idx_test2_name on test2(name desc)"); - stat.execute("create index idx_test2_name2 on test2(name, id)"); - stat.execute("insert into test2 " + - "select null, space(10) from system_range(1, 10)"); - stat.execute("create table test3(id identity, name varchar)"); - stat.execute("checkpoint"); - conn.setAutoCommit(false); - stat.execute("create table test4(id identity, name varchar)"); - stat.execute("create index idx_test4_name2 on test(name, id)"); - stat.execute("insert into test " + - "select null, space(10) from system_range(1, 10)"); - stat.execute("insert into test3 " + - "select null, space(10) from system_range(1, 10)"); - stat.execute("insert into test4 " + - "select null, space(10) from system_range(1, 10)"); - stat.execute("truncate table test2"); - stat.execute("drop index idx_test_name"); - stat.execute("drop index idx_test2_name"); - stat.execute("drop table test2"); - stat.execute("insert into test " + - "select null, space(10) from system_range(1, 10)"); - stat.execute("shutdown immediately"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - conn = getConnection(URL); - stat = conn.createStatement(); - stat.execute("drop all objects"); - // re-allocate index root pages - for (int i = 0; i < 10; i++) { - stat.execute("create table test" + i + "(id identity, name varchar)"); + try (Connection conn = getConnection(URL)) { + Statement stat = conn.createStatement(); + stat.execute("create cached temporary table test(id identity, name varchar)"); + stat.execute("create index idx_test_name on test(name)"); + stat.execute("create index idx_test_name2 on test(name, id)"); + stat.execute("create table test2(id identity, name varchar)"); + stat.execute("create index idx_test2_name on test2(name desc)"); + stat.execute("create index idx_test2_name2 on test2(name, id)"); + stat.execute("insert into test2(name) " + + "select space(10) from system_range(1, 10)"); + stat.execute("create table test3(id identity, name varchar)"); + stat.execute("checkpoint"); + conn.setAutoCommit(false); + stat.execute("create table test4(id identity, name varchar)"); + stat.execute("create index idx_test4_name2 on test(name, id)"); + stat.execute("insert into test(name) " + + "select space(10) from system_range(1, 10)"); + stat.execute("insert into test3(name) " + + "select space(10) from system_range(1, 10)"); + stat.execute("insert into test4(name) " + + "select space(10) from system_range(1, 10)"); + stat.execute("truncate table test2"); + stat.execute("drop index idx_test_name"); + stat.execute("drop index idx_test2_name"); + stat.execute("drop table test2"); + stat.execute("insert into test(name) " + + "select space(10) from system_range(1, 10)"); + stat.execute("shutdown immediately"); } - stat.execute("checkpoint"); - for (int i = 0; i < 10; i++) { - stat.execute("drop table test" + i); + try (Connection conn = getConnection(URL)) { + Statement stat = conn.createStatement(); + stat.execute("drop all objects"); + // re-allocate index root pages + for (int i = 0; i < 10; i++) { + stat.execute("create table test" + i + "(id identity, name varchar)"); + } + stat.execute("checkpoint"); + for (int i = 0; i < 10; i++) { + stat.execute("drop table test" + i); + } + for (int i = 0; i < 10; i++) { + stat.execute("create table test" + i + "(id identity, name varchar)"); + } + stat.execute("shutdown immediately"); } - for (int i = 0; i < 10; i++) { - stat.execute("create table test" + i + "(id identity, name varchar)"); + try (Connection conn = getConnection(URL)) { + conn.createStatement().execute("drop all objects"); } - stat.execute("shutdown immediately"); - assertThrows(ErrorCode.DATABASE_IS_CLOSED, conn).close(); - conn = getConnection(URL); - conn.createStatement().execute("drop all objects"); - conn.close(); } private void testLongTransaction() throws SQLException { @@ -152,8 +152,8 @@ private void testLongTransaction() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test(id identity, name varchar)"); conn.setAutoCommit(false); - stat.execute("insert into test " + - "select null, space(10) from system_range(1, 10)"); + stat.execute("insert into test(name) " + + "select space(10) from system_range(1, 10)"); Connection conn2; conn2 = getConnection(URL); Statement stat2 = conn2.createStatement(); @@ -161,8 +161,8 @@ private void testLongTransaction() throws SQLException { // large transaction stat2.execute("create table test2(id identity, name varchar)"); stat2.execute("create index idx_test2_name on test2(name)"); - stat2.execute("insert into test2 " + - "select null, x || space(10000) from system_range(1, 100)"); + stat2.execute("insert into test2(name) " + + "select x || space(10000) from system_range(1, 100)"); stat2.execute("drop table test2"); conn2.close(); stat.execute("drop table test"); @@ -240,25 +240,4 @@ private void testBackupRestore() throws Exception { deleteDb("pageStore2"); } - private void testIncompleteCreate() throws Exception { - deleteDb("pageStoreCoverage"); - Connection conn; - String fileName = getBaseDir() + "/pageStore" + Constants.SUFFIX_PAGE_FILE; - conn = getConnection("pageStoreCoverage"); - Statement stat = conn.createStatement(); - stat.execute("drop table if exists INFORMATION_SCHEMA.LOB_DATA"); - stat.execute("drop table if exists INFORMATION_SCHEMA.LOB_MAP"); - conn.close(); - FileChannel f = FileUtils.open(fileName, "rw"); - // create a new database - conn = getConnection("pageStoreCoverage"); - conn.close(); - f = FileUtils.open(fileName, "rw"); - f.truncate(16); - // create a new database - conn = getConnection("pageStoreCoverage"); - conn.close(); - deleteDb("pageStoreCoverage"); - } - } diff --git a/h2/src/test/org/h2/test/unit/TestPattern.java b/h2/src/test/org/h2/test/unit/TestPattern.java index cddfaf4881..4a56deb722 100644 --- a/h2/src/test/org/h2/test/unit/TestPattern.java +++ b/h2/src/test/org/h2/test/unit/TestPattern.java @@ -1,12 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.text.Collator; -import org.h2.expression.CompareLike; +import org.h2.expression.condition.CompareLike; import org.h2.test.TestBase; import org.h2.value.CompareMode; @@ -21,7 +21,7 @@ public class TestPattern extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -44,7 +44,7 @@ private void testCompareModeReuse() { private void testPattern() { CompareMode mode = CompareMode.getInstance(null, 0); - CompareLike comp = new CompareLike(mode, "\\", null, null, null, false); + CompareLike comp = new CompareLike(mode, "\\", null, false, false, null, null, CompareLike.LikeType.LIKE); test(comp, "B", "%_"); test(comp, "A", "A%"); test(comp, "A", "A%%"); @@ -99,7 +99,7 @@ private String initPatternRegexp(String pattern, char escape) { for (int i = 0; i < len; i++) { char c = pattern.charAt(i); if (escape == c) { - if (i >= len) { + if (i >= len - 1) { fail("escape can't be last char"); } c = pattern.charAt(++i); diff --git a/h2/src/test/org/h2/test/unit/TestPerfectHash.java b/h2/src/test/org/h2/test/unit/TestPerfectHash.java index dbc08eef7f..bc8cac777c 100644 --- a/h2/src/test/org/h2/test/unit/TestPerfectHash.java +++ b/h2/src/test/org/h2/test/unit/TestPerfectHash.java @@ -1,16 +1,18 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; +import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.util.BitSet; import java.util.HashSet; import java.util.Random; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.h2.dev.hash.MinimalPerfectHash; import org.h2.dev.hash.MinimalPerfectHash.LongHash; @@ -47,18 +49,16 @@ private static void largeFile() throws IOException { private static void largeFile(String s) throws IOException { String fileName = System.getProperty("user.home") + "/temp/" + s; + if (!new File(fileName).exists()) { + System.out.println("not found: " + fileName); + return; + } RandomAccessFile f = new RandomAccessFile(fileName, "r"); byte[] data = new byte[(int) f.length()]; f.readFully(data); - UniversalHash hf = new UniversalHash() { - - @Override - public int hashCode(Text o, int index, int seed) { - return o.hashCode(index, seed); - } - - }; - HashSet set = new HashSet(); + UniversalHash hf = Text::hashCode; + f.close(); + HashSet set = new HashSet<>(); Text t = new Text(data, 0); while (true) { set.add(t); @@ -73,10 +73,10 @@ public int hashCode(Text o, int index, int seed) { } System.out.println("file: " + s); System.out.println("size: " + set.size()); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); byte[] desc = MinimalPerfectHash.generate(set, hf); - time = System.currentTimeMillis() - time; - System.out.println("millis: " + time); + time = System.nanoTime() - time; + System.out.println("millis: " + TimeUnit.NANOSECONDS.toMillis(time)); System.out.println("len: " + desc.length); int bits = desc.length * 8; System.out.println(((double) bits / set.size()) + " bits/key"); @@ -89,29 +89,29 @@ public void measure() { int size = 1000000; testMinimal(size / 10); int s; - long time = System.currentTimeMillis(); + long time = System.nanoTime(); s = testMinimal(size); - time = System.currentTimeMillis() - time; + time = System.nanoTime() - time; System.out.println((double) s / size + " bits/key (minimal) in " + - time + " ms"); + TimeUnit.NANOSECONDS.toMillis(time) + " ms"); - time = System.currentTimeMillis(); + time = System.nanoTime(); s = testMinimalWithString(size); - time = System.currentTimeMillis() - time; + time = System.nanoTime() - time; System.out.println((double) s / size + " bits/key (minimal; String keys) in " + - time + " ms"); + TimeUnit.NANOSECONDS.toMillis(time) + " ms"); - time = System.currentTimeMillis(); + time = System.nanoTime(); s = test(size, true); - time = System.currentTimeMillis() - time; + time = System.nanoTime() - time; System.out.println((double) s / size + " bits/key (minimal old) in " + - time + " ms"); - time = System.currentTimeMillis(); + TimeUnit.NANOSECONDS.toMillis(time) + " ms"); + time = System.nanoTime(); s = test(size, false); - time = System.currentTimeMillis() - time; + time = System.nanoTime() - time; System.out.println((double) s / size + " bits/key (not minimal) in " + - time + " ms"); + TimeUnit.NANOSECONDS.toMillis(time) + " ms"); } @Override @@ -136,22 +136,17 @@ public void test() { private void testBrokenHashFunction() { int size = 10000; Random r = new Random(10000); - HashSet set = new HashSet(size); + HashSet set = new HashSet<>(size); while (set.size() < size) { set.add("x " + r.nextDouble()); } for (int test = 1; test < 10; test++) { final int badUntilLevel = test; - UniversalHash badHash = new UniversalHash() { - - @Override - public int hashCode(String o, int index, int seed) { - if (index < badUntilLevel) { - return 0; - } - return StringHash.getFastHash(o, index, seed); + UniversalHash badHash = (o, index, seed) -> { + if (index < badUntilLevel) { + return 0; } - + return StringHash.getFastHash(o, index, seed); }; byte[] desc = MinimalPerfectHash.generate(set, badHash); testMinimal(desc, set, badHash); @@ -160,7 +155,7 @@ public int hashCode(String o, int index, int seed) { private int test(int size, boolean minimal) { Random r = new Random(size); - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); while (set.size() < size) { set.add(r.nextInt()); } @@ -178,7 +173,7 @@ private int test(int size, boolean minimal) { private int test(byte[] desc, Set set) { int max = -1; - HashSet test = new HashSet(); + HashSet test = new HashSet<>(); PerfectHash hash = new PerfectHash(desc); for (int x : set) { int h = hash.get(x); @@ -193,7 +188,7 @@ private int test(byte[] desc, Set set) { private int testMinimal(int size) { Random r = new Random(size); - HashSet set = new HashSet(size); + HashSet set = new HashSet<>(size); while (set.size() < size) { set.add((long) r.nextInt()); } @@ -206,7 +201,7 @@ private int testMinimal(int size) { private int testMinimalWithString(int size) { Random r = new Random(size); - HashSet set = new HashSet(size); + HashSet set = new HashSet<>(size); while (set.size() < size) { set.add("x " + r.nextDouble()); } @@ -220,7 +215,7 @@ private int testMinimalWithString(int size) { private int testMinimal(byte[] desc, Set set, UniversalHash hf) { int max = -1; BitSet test = new BitSet(); - MinimalPerfectHash hash = new MinimalPerfectHash(desc, hf); + MinimalPerfectHash hash = new MinimalPerfectHash<>(desc, hf); for (K x : set) { int h = hash.get(x); assertTrue(h >= 0); diff --git a/h2/src/test/org/h2/test/unit/TestPgServer.java b/h2/src/test/org/h2/test/unit/TestPgServer.java index 5ba3630037..4a0a4741d7 100644 --- a/h2/src/test/org/h2/test/unit/TestPgServer.java +++ b/h2/src/test/org/h2/test/unit/TestPgServer.java @@ -1,12 +1,15 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; +import java.lang.reflect.Field; +import java.math.BigDecimal; import java.sql.Connection; import java.sql.DatabaseMetaData; +import java.sql.Date; import java.sql.DriverManager; import java.sql.ParameterMetaData; import java.sql.PreparedStatement; @@ -14,20 +17,28 @@ import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; import java.sql.Types; -import java.util.concurrent.Callable; +import java.util.Properties; +import java.util.Set; +import java.util.TimeZone; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.h2.api.ErrorCode; +import org.h2.server.pg.PgServer; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.Server; +import org.h2.util.DateTimeUtils; /** * Tests the PostgreSQL server protocol compliant implementation. */ -public class TestPgServer extends TestBase { +public class TestPgServer extends TestDb { /** * Run just this test. @@ -35,43 +46,31 @@ public class TestPgServer extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase test = TestBase.createCaller().init(); + test.config.memory = true; + test.testFromMain(); + } + + @Override + public boolean isEnabled() { + if (!config.memory) { + return false; + } + return true; } @Override public void test() throws Exception { - testLowerCaseIdentifiers(); + // testPgAdapter() starts server by itself without a wait so run it first testPgAdapter(); testKeyAlias(); - testKeyAlias(); testCancelQuery(); - testBinaryTypes(); - } - - private void testLowerCaseIdentifiers() throws SQLException { - if (!getPgJdbcDriver()) { - return; - } - deleteDb("test"); - Connection conn = getConnection( - "test;DATABASE_TO_UPPER=false", "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int, name varchar(255))"); - Server server = Server.createPgServer( - "-baseDir", getBaseDir(), "-pgPort", "5535", "-pgDaemon"); - server.start(); - try { - Connection conn2; - conn2 = DriverManager.getConnection( - "jdbc:postgresql://localhost:5535/test", "sa", "sa"); - stat = conn2.createStatement(); - stat.execute("select * from test"); - conn2.close(); - } finally { - server.stop(); - } - conn.close(); - deleteDb("test"); + testTextualAndBinaryTypes(); + testBinaryNumeric(); + testDateTime(); + testPrepareWithUnspecifiedType(); + testOtherPgClients(); + testArray(); } private boolean getPgJdbcDriver() { @@ -84,10 +83,32 @@ private boolean getPgJdbcDriver() { } } + private Server createPgServer(String... args) throws SQLException { + Server server = Server.createPgServer(args); + int failures = 0; + for (;;) { + try { + server.start(); + return server; + } catch (SQLException e) { + // the sleeps are too mitigate "port in use" exceptions on Jenkins + if (e.getErrorCode() != ErrorCode.EXCEPTION_OPENING_PORT_2 || ++failures > 10) { + throw e; + } + println("Sleeping"); + try { + Thread.sleep(100); + } catch (InterruptedException e2) { + throw new RuntimeException(e2); + } + } + } + } + private void testPgAdapter() throws SQLException { - deleteDb("test"); + deleteDb("pgserver"); Server server = Server.createPgServer( - "-baseDir", getBaseDir(), "-pgPort", "5535", "-pgDaemon"); + "-ifNotExists", "-baseDir", getBaseDir(), "-pgPort", "5535", "-pgDaemon"); assertEquals(5535, server.getPort()); assertEquals("Not started", server.getStatus()); server.start(); @@ -95,6 +116,7 @@ private void testPgAdapter() throws SQLException { try { if (getPgJdbcDriver()) { testPgClient(); + testPgClientSimple(); } } finally { server.stop(); @@ -106,16 +128,15 @@ private void testCancelQuery() throws Exception { return; } - Server server = Server.createPgServer( - "-pgPort", "5535", "-pgDaemon", "-key", "test", "mem:test"); - server.start(); + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); ExecutorService executor = Executors.newSingleThreadExecutor(); try { Connection conn = DriverManager.getConnection( - "jdbc:postgresql://localhost:5535/test", "sa", "sa"); - final Statement stat = conn.createStatement(); - stat.execute("create alias sleep for \"java.lang.Thread.sleep\""); + "jdbc:postgresql://localhost:5535/pgserver", "sa", "sa"); + Statement stat = conn.createStatement(); + stat.execute("create alias sleep for 'java.lang.Thread.sleep'"); // create a table with 200 rows (cancel interval is 127) stat.execute("create table test(id int)"); @@ -123,12 +144,7 @@ private void testCancelQuery() throws Exception { stat.execute("insert into test (id) values (rand())"); } - Future future = executor.submit(new Callable() { - @Override - public Boolean call() throws SQLException { - return stat.execute("select id, sleep(5) from test"); - } - }); + Future future = executor.submit(() -> stat.execute("select id, sleep(5) from test")); // give it a little time to start and then cancel it Thread.sleep(100); @@ -147,12 +163,12 @@ public Boolean call() throws SQLException { server.stop(); executor.shutdown(); } - deleteDb("test"); + deleteDb("pgserver"); } private void testPgClient() throws SQLException { Connection conn = DriverManager.getConnection( - "jdbc:postgresql://localhost:5535/test", "sa", "sa"); + "jdbc:postgresql://localhost:5535/pgserver", "sa", "sa"); Statement stat = conn.createStatement(); assertThrows(SQLException.class, stat). execute("select ***"); @@ -160,11 +176,21 @@ private void testPgClient() throws SQLException { stat.execute("create table test(id int primary key, name varchar)"); stat.execute("create index idx_test_name on test(name, id)"); stat.execute("grant all on test to test"); + int userId; + try (ResultSet rs = stat.executeQuery("call db_object_id('USER', 'test')")) { + rs.next(); + userId = rs.getInt(1); + } + int indexId; + try (ResultSet rs = stat.executeQuery("call db_object_id('INDEX', 'public', 'idx_test_name')")) { + rs.next(); + indexId = rs.getInt(1); + } stat.close(); conn.close(); conn = DriverManager.getConnection( - "jdbc:postgresql://localhost:5535/test", "test", "test"); + "jdbc:postgresql://localhost:5535/pgserver", "test", "test"); stat = conn.createStatement(); ResultSet rs; @@ -186,12 +212,14 @@ private void testPgClient() throws SQLException { prep.setInt(1, 1); prep.setString(2, "Hello"); prep.execute(); - rs = stat.executeQuery("select * from test"); + rs = stat.executeQuery("select *, null nul from test"); rs.next(); ResultSetMetaData rsMeta = rs.getMetaData(); assertEquals(Types.INTEGER, rsMeta.getColumnType(1)); assertEquals(Types.VARCHAR, rsMeta.getColumnType(2)); + assertEquals(Types.VARCHAR, rsMeta.getColumnType(3)); + assertEquals("test", rsMeta.getTableName(1)); prep.close(); assertEquals(1, rs.getInt(1)); @@ -210,14 +238,16 @@ private void testPgClient() throws SQLException { rs.close(); DatabaseMetaData dbMeta = conn.getMetaData(); rs = dbMeta.getTables(null, null, "TEST", null); - rs.next(); - assertEquals("TEST", rs.getString("TABLE_NAME")); assertFalse(rs.next()); - rs = dbMeta.getColumns(null, null, "TEST", null); + rs = dbMeta.getTables(null, null, "test", null); + assertTrue(rs.next()); + assertEquals("test", rs.getString("TABLE_NAME")); + assertFalse(rs.next()); + rs = dbMeta.getColumns(null, null, "test", null); rs.next(); - assertEquals("ID", rs.getString("COLUMN_NAME")); + assertEquals("id", rs.getString("COLUMN_NAME")); rs.next(); - assertEquals("NAME", rs.getString("COLUMN_NAME")); + assertEquals("name", rs.getString("COLUMN_NAME")); assertFalse(rs.next()); rs = dbMeta.getIndexInfo(null, null, "TEST", false, false); // index info is currently disabled @@ -230,11 +260,11 @@ private void testPgClient() throws SQLException { "select version(), pg_postmaster_start_time(), current_schema()"); rs.next(); String s = rs.getString(1); - assertTrue(s.contains("H2")); - assertTrue(s.contains("PostgreSQL")); + assertContains(s, "H2"); + assertContains(s, "PostgreSQL"); s = rs.getString(2); s = rs.getString(3); - assertEquals(s, "PUBLIC"); + assertEquals(s, "public"); assertFalse(rs.next()); conn.setAutoCommit(false); @@ -248,11 +278,9 @@ private void testPgClient() throws SQLException { assertEquals("Hallo", rs.getString(2)); assertFalse(rs.next()); - rs = stat.executeQuery("select id, name, pg_get_userbyid(id) " + - "from information_schema.users order by id"); + rs = stat.executeQuery("select pg_get_userbyid(" + userId + ')'); rs.next(); - assertEquals(rs.getString(2), rs.getString(3)); - assertFalse(rs.next()); + assertEquals("test", rs.getString(1)); rs.close(); rs = stat.executeQuery("select currTid2('x', 1)"); @@ -263,14 +291,18 @@ private void testPgClient() throws SQLException { rs.next(); assertTrue(rs.getBoolean(1)); + rs = stat.executeQuery("select has_schema_privilege(1, 'READ')"); + rs.next(); + assertTrue(rs.getBoolean(1)); + rs = stat.executeQuery("select has_database_privilege(1, 'READ')"); rs.next(); assertTrue(rs.getBoolean(1)); - rs = stat.executeQuery("select pg_get_userbyid(-1)"); + rs = stat.executeQuery("select pg_get_userbyid(1000000000)"); rs.next(); - assertEquals(null, rs.getString(1)); + assertEquals("unknown (OID=1000000000)", rs.getString(1)); rs = stat.executeQuery("select pg_encoding_to_char(0)"); rs.next(); @@ -292,40 +324,72 @@ private void testPgClient() throws SQLException { rs.next(); assertEquals("", rs.getString(1)); - rs = stat.executeQuery("select pg_get_oid('\"WRONG\"')"); + rs = stat.executeQuery("select 0::regclass"); rs.next(); assertEquals(0, rs.getInt(1)); - rs = stat.executeQuery("select pg_get_oid('TEST')"); - rs.next(); - assertTrue(rs.getInt(1) > 0); - rs = stat.executeQuery("select pg_get_indexdef(0, 0, false)"); rs.next(); - assertEquals("", rs.getString(1)); - - rs = stat.executeQuery("select id from information_schema.indexes " + - "where index_name='IDX_TEST_NAME'"); - rs.next(); - int indexId = rs.getInt(1); + assertNull(rs.getString(1)); rs = stat.executeQuery("select pg_get_indexdef("+indexId+", 0, false)"); rs.next(); - assertEquals( - "CREATE INDEX PUBLIC.IDX_TEST_NAME ON PUBLIC.TEST(NAME, ID)", + assertEquals("CREATE INDEX \"public\".\"idx_test_name\" ON \"public\".\"test\"" + + "(\"name\" NULLS LAST, \"id\" NULLS LAST)", rs.getString(1)); rs = stat.executeQuery("select pg_get_indexdef("+indexId+", null, false)"); rs.next(); - assertEquals( - "CREATE INDEX PUBLIC.IDX_TEST_NAME ON PUBLIC.TEST(NAME, ID)", - rs.getString(1)); + assertNull(rs.getString(1)); rs = stat.executeQuery("select pg_get_indexdef("+indexId+", 1, false)"); rs.next(); - assertEquals("NAME", rs.getString(1)); + assertEquals("name", rs.getString(1)); rs = stat.executeQuery("select pg_get_indexdef("+indexId+", 2, false)"); rs.next(); - assertEquals("ID", rs.getString(1)); + assertEquals("id", rs.getString(1)); + rs = stat.executeQuery("select * from pg_type where oid = " + PgServer.PG_TYPE_VARCHAR_ARRAY); + rs.next(); + assertEquals("_varchar", rs.getString("typname")); + assertEquals("_varchar", rs.getObject("typname")); + assertEquals("b", rs.getString("typtype")); + assertEquals(",", rs.getString("typdelim")); + assertEquals(PgServer.PG_TYPE_VARCHAR, rs.getInt("typelem")); + + stat.setMaxRows(10); + rs = stat.executeQuery("select * from generate_series(0, 10)"); + assertNRows(rs, 10); + stat.setMaxRows(0); + + stat.setFetchSize(2); + rs = stat.executeQuery("select * from generate_series(0, 4)"); + assertNRows(rs, 5); + rs = stat.executeQuery("select * from generate_series(0, 1)"); + assertNRows(rs, 2); + stat.setFetchSize(0); + + conn.close(); + } + + private void assertNRows(ResultSet rs, int n) throws SQLException { + for (int i = 0; i < n; i++) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertFalse(rs.next()); + } + + private void testPgClientSimple() throws SQLException { + Connection conn = DriverManager.getConnection( + "jdbc:postgresql://localhost:5535/pgserver?preferQueryMode=simple", "sa", "sa"); + Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery("select 1"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + stat.setMaxRows(0); + stat.execute("create table test2(int integer)"); + stat.execute("drop table test2"); + assertThrows(SQLException.class, stat).execute("drop table test2"); conn.close(); } @@ -333,12 +397,11 @@ private void testKeyAlias() throws SQLException { if (!getPgJdbcDriver()) { return; } - Server server = Server.createPgServer( - "-pgPort", "5535", "-pgDaemon", "-key", "test", "mem:test"); - server.start(); + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); try { Connection conn = DriverManager.getConnection( - "jdbc:postgresql://localhost:5535/test", "sa", "sa"); + "jdbc:postgresql://localhost:5535/pgserver", "sa", "sa"); Statement stat = conn.createStatement(); // confirm that we've got the in memory implementation @@ -346,7 +409,7 @@ private void testKeyAlias() throws SQLException { stat.execute("create table test(id int primary key, name varchar)"); ResultSet rs = stat.executeQuery( "select storage_type from information_schema.tables " + - "where table_name = 'TEST'"); + "where table_name = 'test'"); assertTrue(rs.next()); assertEquals("MEMORY", rs.getString(1)); @@ -356,36 +419,84 @@ private void testKeyAlias() throws SQLException { } } - private void testBinaryTypes() throws SQLException { + private static Set supportedBinaryOids; + + static { + try { + supportedBinaryOids = getSupportedBinaryOids(); + } catch (ReflectiveOperationException e) { + throw new RuntimeException(e); + } + } + + @SuppressWarnings("unchecked") + private static Set getSupportedBinaryOids() throws ReflectiveOperationException { + Field supportedBinaryOidsField = Class + .forName("org.postgresql.jdbc.PgConnection") + .getDeclaredField("SUPPORTED_BINARY_OIDS"); + supportedBinaryOidsField.setAccessible(true); + return (Set) supportedBinaryOidsField.get(null); + } + + private void testTextualAndBinaryTypes() throws SQLException { + testTextualAndBinaryTypes(false); + testTextualAndBinaryTypes(true); + // additional support of NUMERIC for Npgsql + supportedBinaryOids.add(1700); + testTextualAndBinaryTypes(true); + supportedBinaryOids.remove(1700); + } + + private void testTextualAndBinaryTypes(boolean binary) throws SQLException { if (!getPgJdbcDriver()) { return; } - Server server = Server.createPgServer( - "-pgPort", "5535", "-pgDaemon", "-key", "test", "mem:test"); - server.start(); + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); try { + Properties props = new Properties(); + props.setProperty("user", "sa"); + props.setProperty("password", "sa"); + + // force binary + if (binary) { + props.setProperty("prepareThreshold", "-1"); + } + Connection conn = DriverManager.getConnection( - "jdbc:postgresql://localhost:5535/test", "sa", "sa"); + "jdbc:postgresql://localhost:5535/pgserver", props); Statement stat = conn.createStatement(); stat.execute( "create table test(x1 varchar, x2 int, " + - "x3 smallint, x4 bigint, x5 double, x6 float, " + - "x7 real, x8 boolean, x9 char, x10 bytea)"); + "x3 smallint, x4 bigint, x5 double precision, x6 float, " + + "x7 real, x8 boolean, x9 char(3), x10 bytea, " + + "x11 date, x12 time, x13 timestamp, x14 numeric(25, 5)," + + "x15 time with time zone, x16 timestamp with time zone)"); PreparedStatement ps = conn.prepareStatement( - "insert into test values (?,?,?,?,?,?,?,?,?,?)"); + "insert into test values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"); ps.setString(1, "test"); ps.setInt(2, 12345678); ps.setShort(3, (short) 12345); ps.setLong(4, 1234567890123L); ps.setDouble(5, 123.456); ps.setFloat(6, 123.456f); - ps.setDouble(7, 123.456); + ps.setFloat(7, 123.456f); ps.setBoolean(8, true); ps.setByte(9, (byte) 0xfe); - ps.setBytes(10, new byte[] { 'a', (byte) 0xfe, '\127' }); + ps.setBytes(10, new byte[] { 'a', (byte) 0xfe, '\127', 0, 127, '\\' }); + ps.setDate(11, Date.valueOf("2015-01-31")); + ps.setTime(12, Time.valueOf("20:11:15")); + ps.setTimestamp(13, Timestamp.valueOf("2001-10-30 14:16:10.111")); + ps.setBigDecimal(14, new BigDecimal("12345678901234567890.12345")); + ps.setTime(15, Time.valueOf("20:11:15")); + ps.setTimestamp(16, Timestamp.valueOf("2001-10-30 14:16:10.111")); + ps.execute(); + for (int i = 1; i <= 16; i++) { + ps.setNull(i, Types.NULL); + } ps.execute(); ResultSet rs = stat.executeQuery("select * from test"); @@ -396,15 +507,407 @@ private void testBinaryTypes() throws SQLException { assertEquals(1234567890123L, rs.getLong(4)); assertEquals(123.456, rs.getDouble(5)); assertEquals(123.456f, rs.getFloat(6)); - assertEquals(123.456, rs.getDouble(7)); + assertEquals(123.456f, rs.getFloat(7)); assertEquals(true, rs.getBoolean(8)); assertEquals((byte) 0xfe, rs.getByte(9)); - assertEquals(new byte[] { 'a', (byte) 0xfe, '\127' }, + assertEquals(new byte[] { 'a', (byte) 0xfe, '\127', 0, 127, '\\' }, rs.getBytes(10)); + assertEquals(Date.valueOf("2015-01-31"), rs.getDate(11)); + assertEquals(Time.valueOf("20:11:15"), rs.getTime(12)); + assertEquals(Timestamp.valueOf("2001-10-30 14:16:10.111"), rs.getTimestamp(13)); + assertEquals(new BigDecimal("12345678901234567890.12345"), rs.getBigDecimal(14)); + assertEquals(Time.valueOf("20:11:15"), rs.getTime(15)); + assertEquals(Timestamp.valueOf("2001-10-30 14:16:10.111"), rs.getTimestamp(16)); + assertTrue(rs.next()); + for (int i = 1; i <= 16; i++) { + assertNull(rs.getObject(i)); + } + assertFalse(rs.next()); + + conn.close(); + } finally { + server.stop(); + } + } + + private void testBinaryNumeric() throws SQLException { + if (!getPgJdbcDriver()) { + return; + } + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); + supportedBinaryOids.add(1700); + try { + Properties props = new Properties(); + props.setProperty("user", "sa"); + props.setProperty("password", "sa"); + // force binary + props.setProperty("prepareThreshold", "-1"); + + Connection conn = DriverManager.getConnection( + "jdbc:postgresql://localhost:5535/pgserver", props); + Statement stat = conn.createStatement(); + + try (ResultSet rs = stat.executeQuery("SELECT 1E-16383, 1E+1, 1E+89, 1E-16384")) { + rs.next(); + assertEquals(new BigDecimal("1E-16383"), rs.getBigDecimal(1)); + assertEquals(new BigDecimal("10"), rs.getBigDecimal(2)); + assertEquals(new BigDecimal("10").pow(89), rs.getBigDecimal(3)); + // TODO `SELECT 1E+90, 1E+131071` fails due to PgJDBC issue 1935 + try { + rs.getBigDecimal(4); + fail(); + } catch (IllegalArgumentException e) { + // PgJDBC doesn't support scale greater than 16383 + } + } + try (ResultSet rs = stat.executeQuery("SELECT 1E-32768")) { + fail(); + } catch (SQLException e) { + assertEquals("22003", e.getSQLState()); + } + try (ResultSet rs = stat.executeQuery("SELECT 1E+131072")) { + fail(); + } catch (SQLException e) { + assertEquals("22003", e.getSQLState()); + } + + conn.close(); + } finally { + supportedBinaryOids.remove(1700); + server.stop(); + } + } + + private void testDateTime() throws SQLException { + if (!getPgJdbcDriver()) { + return; + } + TimeZone old = TimeZone.getDefault(); + /* + * java.util.TimeZone doesn't support LMT, so perform this test with + * fixed time zone offset + */ + TimeZone.setDefault(TimeZone.getTimeZone("GMT+01")); + DateTimeUtils.resetCalendar(); + try { + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); + try { + Properties props = new Properties(); + props.setProperty("user", "sa"); + props.setProperty("password", "sa"); + // force binary + props.setProperty("prepareThreshold", "-1"); + + Connection conn = DriverManager.getConnection( + "jdbc:postgresql://localhost:5535/pgserver", props); + Statement stat = conn.createStatement(); + + stat.execute( + "create table test(x1 date, x2 time, x3 timestamp)"); + + Date[] dates = { null, Date.valueOf("2017-02-20"), + Date.valueOf("1970-01-01"), Date.valueOf("1969-12-31"), + Date.valueOf("1940-01-10"), Date.valueOf("1950-11-10"), + Date.valueOf("1500-01-01")}; + Time[] times = { null, Time.valueOf("14:15:16"), + Time.valueOf("00:00:00"), Time.valueOf("23:59:59"), + Time.valueOf("00:10:59"), Time.valueOf("08:30:42"), + Time.valueOf("10:00:00")}; + Timestamp[] timestamps = { null, Timestamp.valueOf("2017-02-20 14:15:16.763"), + Timestamp.valueOf("1970-01-01 00:00:00"), Timestamp.valueOf("1969-12-31 23:59:59"), + Timestamp.valueOf("1940-01-10 00:10:59"), Timestamp.valueOf("1950-11-10 08:30:42.12"), + Timestamp.valueOf("1500-01-01 10:00:10")}; + int count = dates.length; + + PreparedStatement ps = conn.prepareStatement( + "insert into test values (?,?,?)"); + for (int i = 0; i < count; i++) { + ps.setDate(1, dates[i]); + ps.setTime(2, times[i]); + ps.setTimestamp(3, timestamps[i]); + ps.execute(); + } + + ResultSet rs = stat.executeQuery("select * from test"); + for (int i = 0; i < count; i++) { + assertTrue(rs.next()); + assertEquals(dates[i], rs.getDate(1)); + assertEquals(times[i], rs.getTime(2)); + assertEquals(timestamps[i], rs.getTimestamp(3)); + } + assertFalse(rs.next()); + + conn.close(); + } finally { + server.stop(); + } + } finally { + TimeZone.setDefault(old); + DateTimeUtils.resetCalendar(); + } + } + + private void testPrepareWithUnspecifiedType() throws Exception { + if (!getPgJdbcDriver()) { + return; + } + + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); + try { + Properties props = new Properties(); + + props.setProperty("user", "sa"); + props.setProperty("password", "sa"); + // force server side prepare + props.setProperty("prepareThreshold", "1"); + + Connection conn = DriverManager.getConnection( + "jdbc:postgresql://localhost:5535/pgserver", props); + + Statement stmt = conn.createStatement(); + stmt.executeUpdate("create table t1 (id integer, v timestamp)"); + stmt.close(); + + PreparedStatement pstmt = conn.prepareStatement("insert into t1 values(100500, ?)"); + // assertTrue(((PGStatement) pstmt).isUseServerPrepare()); + assertEquals(Types.TIMESTAMP, pstmt.getParameterMetaData().getParameterType(1)); + + Timestamp t = new Timestamp(System.currentTimeMillis()); + pstmt.setObject(1, t); + assertEquals(1, pstmt.executeUpdate()); + pstmt.close(); + + pstmt = conn.prepareStatement("SELECT * FROM t1 WHERE v = ?"); + assertEquals(Types.TIMESTAMP, pstmt.getParameterMetaData().getParameterType(1)); + + pstmt.setObject(1, t); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(100500, rs.getInt(1)); + rs.close(); + pstmt.close(); conn.close(); } finally { server.stop(); } } + + private void testOtherPgClients() throws SQLException { + if (!getPgJdbcDriver()) { + return; + } + + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); + try ( + Connection conn = DriverManager.getConnection( + "jdbc:postgresql://localhost:5535/pgserver", "sa", "sa"); + Statement stat = conn.createStatement(); + ) { + stat.execute( + "create table test(id serial primary key, x1 integer)"); + + // pgAdmin + stat.execute("SET client_min_messages=notice"); + try (ResultSet rs = stat.executeQuery("SELECT set_config('bytea_output','escape',false) " + + "FROM pg_settings WHERE name = 'bytea_output'")) { + assertFalse(rs.next()); + } + stat.execute("SET client_encoding='UNICODE'"); + try (ResultSet rs = stat.executeQuery("SELECT version()")) { + assertTrue(rs.next()); + assertNotNull(rs.getString("version")); + } + try (ResultSet rs = stat.executeQuery("SELECT " + + "db.oid as did, db.datname, db.datallowconn, " + + "pg_encoding_to_char(db.encoding) AS serverencoding, " + + "has_database_privilege(db.oid, 'CREATE') as cancreate, datlastsysoid " + + "FROM pg_database db WHERE db.datname = current_database()")) { + assertTrue(rs.next()); + assertEquals("pgserver", rs.getString("datname")); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT " + + "oid as id, rolname as name, rolsuper as is_superuser, " + + "CASE WHEN rolsuper THEN true ELSE rolcreaterole END as can_create_role, " + + "CASE WHEN rolsuper THEN true ELSE rolcreatedb END as can_create_db " + + "FROM pg_catalog.pg_roles WHERE rolname = current_user")) { + assertTrue(rs.next()); + assertEquals("sa", rs.getString("name")); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT " + + "db.oid as did, db.datname as name, ta.spcname as spcname, db.datallowconn, " + + "has_database_privilege(db.oid, 'CREATE') as cancreate, datdba as owner " + + "FROM pg_database db LEFT OUTER JOIN pg_tablespace ta ON db.dattablespace = ta.oid " + + "WHERE db.oid > 100000::OID")) { + assertTrue(rs.next()); + assertEquals("pgserver", rs.getString("name")); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT nsp.oid, nsp.nspname as name, " + + "has_schema_privilege(nsp.oid, 'CREATE') as can_create, " + + "has_schema_privilege(nsp.oid, 'USAGE') as has_usage " + + "FROM pg_namespace nsp WHERE nspname NOT LIKE 'pg\\_%' AND NOT (" + + "(nsp.nspname = 'pg_catalog' AND EXISTS (SELECT 1 FROM pg_class " + + "WHERE relname = 'pg_class' AND relnamespace = nsp.oid LIMIT 1)) OR " + + "(nsp.nspname = 'pgagent' AND EXISTS (SELECT 1 FROM pg_class " + + "WHERE relname = 'pga_job' AND relnamespace = nsp.oid LIMIT 1)) OR " + + "(nsp.nspname = 'information_schema' AND EXISTS (SELECT 1 FROM pg_class " + + "WHERE relname = 'tables' AND relnamespace = nsp.oid LIMIT 1))" + + ") ORDER BY nspname")) { + assertTrue(rs.next()); + assertEquals("public", rs.getString("name")); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT format_type(23, NULL)")) { + assertTrue(rs.next()); + assertEquals("INTEGER", rs.getString(1)); + assertFalse(rs.next()); + } + // pgAdmin sends `SET LOCAL join_collapse_limit=8`, but `LOCAL` is not supported yet + stat.execute("SET join_collapse_limit=8"); + + // HeidiSQL + try (ResultSet rs = stat.executeQuery("SHOW ssl")) { + assertTrue(rs.next()); + assertEquals("off", rs.getString(1)); + } + stat.execute("SET search_path TO 'public', '$user'"); + try (ResultSet rs = stat.executeQuery("SELECT *, NULL AS data_length, " + + "pg_relation_size(QUOTE_IDENT(t.TABLE_SCHEMA) || '.' || QUOTE_IDENT(t.TABLE_NAME))::bigint " + + "AS index_length, " + + "c.reltuples, obj_description(c.oid) AS comment " + + "FROM \"information_schema\".\"tables\" AS t " + + "LEFT JOIN \"pg_namespace\" n ON t.table_schema = n.nspname " + + "LEFT JOIN \"pg_class\" c ON n.oid = c.relnamespace AND c.relname=t.table_name " + + "WHERE t.\"table_schema\"='public'")) { + assertTrue(rs.next()); + assertEquals("test", rs.getString("table_name")); + assertTrue(rs.getLong("index_length") >= 0L); // test pg_relation_size() + assertNull(rs.getString("comment")); // test obj_description() + } + try (ResultSet rs = stat.executeQuery("SELECT \"p\".\"proname\", \"p\".\"proargtypes\" " + + "FROM \"pg_catalog\".\"pg_namespace\" AS \"n\" " + + "JOIN \"pg_catalog\".\"pg_proc\" AS \"p\" ON \"p\".\"pronamespace\" = \"n\".\"oid\" " + + "WHERE \"n\".\"nspname\"='public'")) { + assertFalse(rs.next()); // "pg_proc" always empty + } + try (ResultSet rs = stat.executeQuery("SELECT DISTINCT a.attname AS column_name, " + + "a.attnum, a.atttypid, FORMAT_TYPE(a.atttypid, a.atttypmod) AS data_type, " + + "CASE a.attnotnull WHEN false THEN 'YES' ELSE 'NO' END AS IS_NULLABLE, " + + "com.description AS column_comment, pg_get_expr(def.adbin, def.adrelid) AS column_default, " + + "NULL AS character_maximum_length FROM pg_attribute AS a " + + "JOIN pg_class AS pgc ON pgc.oid = a.attrelid " + + "LEFT JOIN pg_description AS com ON (pgc.oid = com.objoid AND a.attnum = com.objsubid) " + + "LEFT JOIN pg_attrdef AS def ON (a.attrelid = def.adrelid AND a.attnum = def.adnum) " + + "WHERE a.attnum > 0 AND pgc.oid = a.attrelid AND pg_table_is_visible(pgc.oid) " + + "AND NOT a.attisdropped AND pgc.relname = 'test' ORDER BY a.attnum")) { + assertTrue(rs.next()); + assertEquals("id", rs.getString("column_name")); + assertTrue(rs.next()); + assertEquals("x1", rs.getString("column_name")); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SHOW ALL")) { + ResultSetMetaData rsMeta = rs.getMetaData(); + assertEquals("name", rsMeta.getColumnName(1)); + assertEquals("setting", rsMeta.getColumnName(2)); + } + + // DBeaver + try (ResultSet rs = stat.executeQuery("SELECT t.oid,t.*,c.relkind FROM pg_catalog.pg_type t " + + "LEFT OUTER JOIN pg_class c ON c.oid=t.typrelid WHERE typnamespace=-1000")) { + // just no exception + } + stat.execute("SET search_path TO 'ab', 'c\"d', 'e''f'"); + try (ResultSet rs = stat.executeQuery("SHOW search_path")) { + assertTrue(rs.next()); + assertEquals("pg_catalog, ab, \"c\"\"d\", \"e'f\"", rs.getString("search_path")); + } + stat.execute("SET search_path TO ab, \"c\"\"d\", \"e'f\""); + try (ResultSet rs = stat.executeQuery("SHOW search_path")) { + assertTrue(rs.next()); + assertEquals("pg_catalog, ab, \"c\"\"d\", \"e'f\"", rs.getString("search_path")); + } + int oid; + try (ResultSet rs = stat.executeQuery("SELECT oid FROM pg_class WHERE relname = 'test'")) { + rs.next(); + oid = rs.getInt("oid"); + } + try (ResultSet rs = stat.executeQuery("SELECT i.*,i.indkey as keys," + + "c.relname,c.relnamespace,c.relam,c.reltablespace," + + "tc.relname as tabrelname,dsc.description," + + "pg_catalog.pg_get_expr(i.indpred, i.indrelid) as pred_expr," + + "pg_catalog.pg_get_expr(i.indexprs, i.indrelid, true) as expr," + + "pg_catalog.pg_relation_size(i.indexrelid) as index_rel_size," + + "pg_catalog.pg_stat_get_numscans(i.indexrelid) as index_num_scans " + + "FROM pg_catalog.pg_index i " + + "INNER JOIN pg_catalog.pg_class c ON c.oid=i.indexrelid " + + "INNER JOIN pg_catalog.pg_class tc ON tc.oid=i.indrelid " + + "LEFT OUTER JOIN pg_catalog.pg_description dsc ON i.indexrelid=dsc.objoid " + + "WHERE i.indrelid=" + oid + " ORDER BY c.relname")) { + // pg_index is empty + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT c.oid,c.*," + + "t.relname as tabrelname,rt.relnamespace as refnamespace,d.description " + + "FROM pg_catalog.pg_constraint c " + + "INNER JOIN pg_catalog.pg_class t ON t.oid=c.conrelid " + + "LEFT OUTER JOIN pg_catalog.pg_class rt ON rt.oid=c.confrelid " + + "LEFT OUTER JOIN pg_catalog.pg_description d ON d.objoid=c.oid " + + "AND d.objsubid=0 AND d.classoid='pg_constraint'::regclass WHERE c.conrelid=" + oid)) { + assertTrue(rs.next()); + assertEquals("test", rs.getString("tabrelname")); + assertEquals("p", rs.getString("contype")); + assertEquals(Short.valueOf((short) 1), ((Object[]) rs.getArray("conkey").getArray())[0]); + } + } finally { + server.stop(); + } + } + + private void testArray() throws Exception { + if (!getPgJdbcDriver()) { + return; + } + + Server server = createPgServer( + "-ifNotExists", "-pgPort", "5535", "-pgDaemon", "-key", "pgserver", "mem:pgserver"); + try ( + Connection conn = DriverManager.getConnection( + "jdbc:postgresql://localhost:5535/pgserver", "sa", "sa"); + Statement stat = conn.createStatement(); + ) { + stat.execute("CREATE TABLE test (id int primary key, x1 varchar array)"); + stat.execute("INSERT INTO test (id, x1) VALUES (1, ARRAY['abc', 'd\\\"e', '{,}'])"); + try (ResultSet rs = stat.executeQuery( + "SELECT x1 FROM test WHERE id = 1")) { + assertTrue(rs.next()); + Object[] arr = (Object[]) rs.getArray(1).getArray(); + assertEquals("abc", arr[0]); + assertEquals("d\\\"e", arr[1]); + assertEquals("{,}", arr[2]); + } + try (ResultSet rs = stat.executeQuery( + "SELECT data_type FROM information_schema.columns WHERE table_schema = 'pg_catalog' " + + "AND table_name = 'pg_database' AND column_name = 'datacl'")) { + assertTrue(rs.next()); + assertEquals("array", rs.getString(1)); + } + try (ResultSet rs = stat.executeQuery( + "SELECT data_type FROM information_schema.columns WHERE table_schema = 'pg_catalog' " + + "AND table_name = 'pg_tablespace' AND column_name = 'spcacl'")) { + assertTrue(rs.next()); + assertEquals("array", rs.getString(1)); + } + } finally { + server.stop(); + } + } + } diff --git a/h2/src/test/org/h2/test/unit/TestReader.java b/h2/src/test/org/h2/test/unit/TestReader.java index e4f77d434f..2ddb8fcb30 100644 --- a/h2/src/test/org/h2/test/unit/TestReader.java +++ b/h2/src/test/org/h2/test/unit/TestReader.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -25,7 +25,7 @@ public class TestReader extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -35,7 +35,7 @@ public void test() throws Exception { InputStream in = new ReaderInputStream(r); byte[] buff = IOUtils.readBytesAndClose(in, 0); InputStream in2 = new ByteArrayInputStream(buff); - Reader r2 = IOUtils.getBufferedReader(in2); + Reader r2 = IOUtils.getReader(in2); String s2 = IOUtils.readStringAndClose(r2, Integer.MAX_VALUE); assertEquals(s, s2); } diff --git a/h2/src/test/org/h2/test/unit/TestRecovery.java b/h2/src/test/org/h2/test/unit/TestRecovery.java index 3097159cc3..3db1cc1d30 100644 --- a/h2/src/test/org/h2/test/unit/TestRecovery.java +++ b/h2/src/test/org/h2/test/unit/TestRecovery.java @@ -1,30 +1,27 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.ByteArrayOutputStream; -import java.io.InputStreamReader; import java.io.PrintStream; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; import java.sql.Connection; -import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.DeleteDbFiles; import org.h2.tools.Recover; -import org.h2.util.IOUtils; +import org.h2.util.Utils10; /** * Tests database recovery. */ -public class TestRecovery extends TestBase { +public class TestRecovery extends TestDb { /** * Run just this test. @@ -32,36 +29,24 @@ public class TestRecovery extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws Exception { - if (!config.mvStore) { - testRecoverTestMode(); + public boolean isEnabled() { + if (config.memory) { + return false; } + return true; + } + + @Override + public void test() throws Exception { testRecoverClob(); testRecoverFulltext(); - testRedoTransactions(); - testCorrupt(); - testWithTransactionLog(); testCompressedAndUncompressed(); testRunScript(); - } - - private void testRecoverTestMode() throws Exception { - if (config.memory) { - return; - } - String recoverTestLog = getBaseDir() + "/recovery.h2.db.log"; - FileUtils.delete(recoverTestLog); - deleteDb("recovery"); - Connection conn = getConnection("recovery;RECOVER_TEST=1"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int, name varchar)"); - stat.execute("drop all objects delete files"); - conn.close(); - assertTrue(FileUtils.exists(recoverTestLog)); + testRunScript2(); } private void testRecoverClob() throws Exception { @@ -85,8 +70,7 @@ private void testRecoverFulltext() throws Exception { DeleteDbFiles.execute(getBaseDir(), "recovery", true); Connection conn = getConnection("recovery"); Statement stat = conn.createStatement(); - stat.execute("CREATE ALIAS IF NOT EXISTS FTL_INIT " + - "FOR \"org.h2.fulltext.FullTextLucene.init\""); + stat.execute("CREATE ALIAS IF NOT EXISTS FTL_INIT FOR 'org.h2.fulltext.FullTextLucene.init'"); stat.execute("CALL FTL_INIT()"); stat.execute("create table test(id int primary key, name varchar) as " + "select 1, 'Hello'"); @@ -100,131 +84,6 @@ private void testRecoverFulltext() throws Exception { conn.close(); } - private void testRedoTransactions() throws Exception { - if (config.mvStore) { - // not needed for MV_STORE=TRUE - return; - } - DeleteDbFiles.execute(getBaseDir(), "recovery", true); - Connection conn = getConnection("recovery"); - Statement stat = conn.createStatement(); - stat.execute("set write_delay 0"); - stat.execute("create table test(id int primary key, name varchar)"); - stat.execute("insert into test select x, 'Hello' from system_range(1, 5)"); - stat.execute("create table test2(id int primary key)"); - stat.execute("drop table test2"); - stat.execute("update test set name = 'Hallo' where id < 3"); - stat.execute("delete from test where id = 1"); - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (Exception e) { - // ignore - } - Recover.main("-dir", getBaseDir(), "-db", "recovery", "-transactionLog"); - DeleteDbFiles.execute(getBaseDir(), "recovery", true); - conn = getConnection("recovery;init=runscript from '" + - getBaseDir() + "/recovery.h2.sql'"); - stat = conn.createStatement(); - ResultSet rs; - rs = stat.executeQuery("select * from test order by id"); - assertTrue(rs.next()); - assertEquals(2, rs.getInt(1)); - assertEquals("Hallo", rs.getString(2)); - assertTrue(rs.next()); - assertEquals(3, rs.getInt(1)); - assertEquals("Hello", rs.getString(2)); - assertTrue(rs.next()); - assertEquals(4, rs.getInt(1)); - assertEquals("Hello", rs.getString(2)); - assertTrue(rs.next()); - assertEquals(5, rs.getInt(1)); - assertEquals("Hello", rs.getString(2)); - assertFalse(rs.next()); - conn.close(); - } - - private void testCorrupt() throws Exception { - if (config.mvStore) { - // not needed for MV_STORE=TRUE - return; - } - DeleteDbFiles.execute(getBaseDir(), "recovery", true); - Connection conn = getConnection("recovery"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int, name varchar) as " + - "select 1, 'Hello World1'"); - conn.close(); - FileChannel f = FileUtils.open(getBaseDir() + "/recovery.h2.db", "rw"); - byte[] buff = new byte[Constants.DEFAULT_PAGE_SIZE]; - while (f.position() < f.size()) { - FileUtils.readFully(f, ByteBuffer.wrap(buff)); - if (new String(buff).contains("Hello World1")) { - buff[buff.length - 1]++; - f.position(f.position() - buff.length); - f.write(ByteBuffer.wrap(buff)); - } - } - f.close(); - Recover.main("-dir", getBaseDir(), "-db", "recovery"); - String script = IOUtils.readStringAndClose( - new InputStreamReader( - FileUtils.newInputStream(getBaseDir() + "/recovery.h2.sql")), -1); - assertContains(script, "checksum mismatch"); - assertContains(script, "dump:"); - assertContains(script, "Hello World2"); - } - - private void testWithTransactionLog() throws SQLException { - if (config.mvStore) { - // not needed for MV_STORE=TRUE - return; - } - DeleteDbFiles.execute(getBaseDir(), "recovery", true); - Connection conn = getConnection("recovery"); - Statement stat = conn.createStatement(); - stat.execute("create table truncate(id int primary key) as " + - "select x from system_range(1, 1000)"); - stat.execute("create table test(id int primary key, data int, text varchar)"); - stat.execute("create index on test(data, id)"); - stat.execute("insert into test direct select x, 0, null " + - "from system_range(1, 1000)"); - stat.execute("insert into test values(-1, -1, space(10000))"); - stat.execute("checkpoint"); - stat.execute("delete from test where id = -1"); - stat.execute("truncate table truncate"); - conn.setAutoCommit(false); - long base = 0; - while (true) { - ResultSet rs = stat.executeQuery( - "select value from information_schema.settings " + - "where name = 'info.FILE_WRITE'"); - rs.next(); - long count = rs.getLong(1); - if (base == 0) { - base = count; - } else if (count > base + 10) { - break; - } - stat.execute("update test set data=0"); - stat.execute("update test set text=space(10000) where id = 0"); - stat.execute("update test set data=1, text = null"); - conn.commit(); - } - stat.execute("shutdown immediately"); - try { - conn.close(); - } catch (Exception e) { - // expected - } - Recover.main("-dir", getBaseDir(), "-db", "recovery"); - conn = getConnection("recovery"); - conn.close(); - Recover.main("-dir", getBaseDir(), "-db", "recovery", "-removePassword"); - conn = getConnection("recovery", getUser(), ""); - conn.close(); - DeleteDbFiles.execute(getBaseDir(), "recovery", true); - } private void testCompressedAndUncompressed() throws SQLException { DeleteDbFiles.execute(getBaseDir(), "recovery", true); @@ -234,7 +93,6 @@ private void testCompressedAndUncompressed() throws SQLException { Statement stat = conn.createStatement(); stat.execute("create table test(id int primary key, data clob)"); stat.execute("insert into test values(1, space(10000))"); - stat.execute("set compress_lob lzf"); stat.execute("insert into test values(2, space(10000))"); conn.close(); Recover rec = new Recover(); @@ -258,7 +116,7 @@ private void testCompressedAndUncompressed() throws SQLException { DeleteDbFiles.execute(getBaseDir(), "recovery2", true); } - private void testRunScript() throws SQLException { + private void testRunScript() throws Exception { DeleteDbFiles.execute(getBaseDir(), "recovery", true); DeleteDbFiles.execute(getBaseDir(), "recovery2", true); org.h2.Driver.load(); @@ -272,7 +130,7 @@ private void testRunScript() throws SQLException { "select * from test"); stat.execute("create table a(id int primary key) as " + "select * from system_range(1, 100)"); - stat.execute("create table b(id int references a(id)) as " + + stat.execute("create table b(id int primary key references a(id)) as " + "select * from system_range(1, 100)"); stat.execute("create table lob(c clob, b blob) as " + "select space(10000) || 'end', SECURE_RAND(10000)"); @@ -287,10 +145,10 @@ private void testRunScript() throws SQLException { Recover rec = new Recover(); ByteArrayOutputStream buff = new ByteArrayOutputStream(); - rec.setOut(new PrintStream(buff)); + rec.setOut(new PrintStream(buff, false, "UTF-8")); rec.runTool("-dir", getBaseDir(), "-db", "recovery", "-trace"); - String out = new String(buff.toByteArray()); - assertTrue(out.contains("Created file")); + String out = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); + assertContains(out, "Created file"); Connection conn2 = getConnection("recovery2"); Statement stat2 = conn2.createStatement(); @@ -318,4 +176,42 @@ private void testRunScript() throws SQLException { FileUtils.deleteRecursive(dir, false); } + private void testRunScript2() throws Exception { + DeleteDbFiles.execute(getBaseDir(), "recovery", true); + DeleteDbFiles.execute(getBaseDir(), "recovery2", true); + org.h2.Driver.load(); + Connection conn = getConnection("recovery"); + Statement stat = conn.createStatement(); + stat.execute("SET COLLATION EN"); + stat.execute("CREATE TABLE TEST(A VARCHAR)"); + conn.close(); + + final Recover recover = new Recover(); + final ByteArrayOutputStream buff = new ByteArrayOutputStream(); // capture the console output + recover.setOut(new PrintStream(buff, false, "UTF-8")); + recover.runTool("-dir", getBaseDir(), "-db", "recovery", "-trace"); + String consoleOut = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); + assertContains(consoleOut, "Created file"); + + Connection conn2 = getConnection("recovery2"); + Statement stat2 = conn2.createStatement(); + + stat2.execute("runscript from '" + getBaseDir() + "/recovery.h2.sql'"); + stat2.execute("select * from test"); + conn2.close(); + + conn = getConnection("recovery"); + stat = conn.createStatement(); + conn2 = getConnection("recovery2"); + stat2 = conn2.createStatement(); + assertEqualDatabases(stat, stat2); + conn.close(); + conn2.close(); + + deleteDb("recovery"); + deleteDb("recovery2"); + FileUtils.delete(getBaseDir() + "/recovery.h2.sql"); + String dir = getBaseDir() + "/recovery.lobs.db"; + FileUtils.deleteRecursive(dir, false); + } } diff --git a/h2/src/test/org/h2/test/unit/TestReopen.java b/h2/src/test/org/h2/test/unit/TestReopen.java index 3b328872f1..babf456eb9 100644 --- a/h2/src/test/org/h2/test/unit/TestReopen.java +++ b/h2/src/test/org/h2/test/unit/TestReopen.java @@ -1,27 +1,26 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.sql.SQLException; import java.util.HashSet; -import java.util.Properties; +import java.util.concurrent.TimeUnit; import org.h2.api.ErrorCode; import org.h2.engine.ConnectionInfo; import org.h2.engine.Constants; import org.h2.engine.Database; -import org.h2.engine.Session; +import org.h2.engine.SessionLocal; import org.h2.message.DbException; -import org.h2.store.fs.FilePathRec; import org.h2.store.fs.FileUtils; import org.h2.store.fs.Recorder; +import org.h2.store.fs.rec.FilePathRec; import org.h2.test.TestBase; import org.h2.tools.Recover; import org.h2.util.IOUtils; -import org.h2.util.New; import org.h2.util.Profiler; import org.h2.util.Utils; @@ -39,7 +38,7 @@ public class TestReopen extends TestBase implements Recorder { private final long maxFileSize = Utils.getProperty("h2.reopenMaxFileSize", Integer.MAX_VALUE) * 1024L * 1024; private int verifyCount; - private final HashSet knownErrors = New.hashSet(); + private final HashSet knownErrors = new HashSet<>(); private volatile boolean testing; /** @@ -48,7 +47,7 @@ public class TestReopen extends TestBase implements Recorder { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -58,12 +57,12 @@ public void test() throws Exception { FilePathRec.setRecorder(this); config.reopen = true; - long time = System.currentTimeMillis(); + long time = System.nanoTime(); Profiler p = new Profiler(); p.startCollecting(); new TestPageStoreCoverage().init(config).test(); System.out.println(p.getTop(3)); - System.out.println(System.currentTimeMillis() - time); + System.out.println(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time)); System.out.println("counter: " + writeCount); } @@ -72,8 +71,7 @@ public void log(int op, String fileName, byte[] data, long x) { if (op != Recorder.WRITE && op != Recorder.TRUNCATE) { return; } - if (!fileName.endsWith(Constants.SUFFIX_PAGE_FILE) && - !fileName.endsWith(Constants.SUFFIX_MV_FILE)) { + if (!fileName.endsWith(Constants.SUFFIX_MV_FILE)) { return; } if (testing) { @@ -100,25 +98,16 @@ private synchronized void logDb(String fileName) { System.out.println("+ write #" + writeCount + " verify #" + verifyCount); try { - if (fileName.endsWith(Constants.SUFFIX_PAGE_FILE)) { - IOUtils.copyFiles(fileName, testDatabase + - Constants.SUFFIX_PAGE_FILE); - } else { - IOUtils.copyFiles(fileName, testDatabase + - Constants.SUFFIX_MV_FILE); - } + IOUtils.copyFiles(fileName, testDatabase + + Constants.SUFFIX_MV_FILE); verifyCount++; // avoid using the Engine class to avoid deadlocks - Properties p = new Properties(); - String userName = getUser(); - p.setProperty("user", userName); - p.setProperty("password", getPassword()); String url = "jdbc:h2:" + testDatabase + ";FILE_LOCK=NO;TRACE_LEVEL_FILE=0"; - ConnectionInfo ci = new ConnectionInfo(url, p); + ConnectionInfo ci = new ConnectionInfo(url, null, getUser(), getPassword()); Database database = new Database(ci, null); // close the database - Session session = database.getSystemSession(); + SessionLocal session = database.getSystemSession(); session.prepare("script to '" + testDatabase + ".sql'").query(0); session.prepare("shutdown immediately").update(); database.removeSession(null); @@ -156,17 +145,11 @@ private synchronized void logDb(String fileName) { } testDatabase += "X"; try { - if (fileName.endsWith(Constants.SUFFIX_PAGE_FILE)) { - IOUtils.copyFiles(fileName, testDatabase + - Constants.SUFFIX_PAGE_FILE); - } else { - IOUtils.copyFiles(fileName, testDatabase + - Constants.SUFFIX_MV_FILE); - } + IOUtils.copyFiles(fileName, testDatabase + + Constants.SUFFIX_MV_FILE); // avoid using the Engine class to avoid deadlocks - Properties p = new Properties(); String url = "jdbc:h2:" + testDatabase + ";FILE_LOCK=NO"; - ConnectionInfo ci = new ConnectionInfo(url, p); + ConnectionInfo ci = new ConnectionInfo(url, null, null, null); Database database = new Database(ci, null); // close the database database.removeSession(null); diff --git a/h2/src/test/org/h2/test/unit/TestSampleApps.java b/h2/src/test/org/h2/test/unit/TestSampleApps.java index f29a29ced9..2bcafae5b2 100644 --- a/h2/src/test/org/h2/test/unit/TestSampleApps.java +++ b/h2/src/test/org/h2/test/unit/TestSampleApps.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -12,17 +12,19 @@ import java.io.PrintStream; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; - +import java.nio.charset.StandardCharsets; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.tools.DeleteDbFiles; import org.h2.util.IOUtils; import org.h2.util.StringUtils; +import org.h2.util.Utils10; /** * Tests the sample apps. */ -public class TestSampleApps extends TestBase { +public class TestSampleApps extends TestDb { /** * Run just this test. @@ -30,26 +32,31 @@ public class TestSampleApps extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws Exception { + public boolean isEnabled() { if (!getBaseDir().startsWith(TestBase.BASE_TEST_DIR)) { - return; + return false; } - deleteDb("optimizations"); + return true; + } + + @Override + public void test() throws Exception { + deleteDb(getTestName()); InputStream in = getClass().getClassLoader().getResourceAsStream( "org/h2/samples/optimizations.sql"); new File(getBaseDir()).mkdirs(); FileOutputStream out = new FileOutputStream(getBaseDir() + "/optimizations.sql"); IOUtils.copyAndClose(in, out); - String url = "jdbc:h2:" + getBaseDir() + "/optimizations"; + String url = "jdbc:h2:" + getBaseDir() + "/" + getTestName(); testApp("", org.h2.tools.RunScript.class, "-url", url, "-user", "sa", "-password", "sa", "-script", getBaseDir() + "/optimizations.sql", "-checkResults"); - deleteDb("optimizations"); + deleteDb(getTestName()); testApp("Compacting...\nDone.", org.h2.samples.Compact.class); testApp("NAME: Bob Meier\n" + "EMAIL: bob.meier@abcde.abc\n" + @@ -84,9 +91,11 @@ public void test() throws Exception { // process) testApp("The sum is 20.00", org.h2.samples.TriggerSample.class); testApp("Hello: 1\nWorld: 2", org.h2.samples.TriggerPassData.class); - testApp("table test:\n" + + testApp("Key 1 was generated\n" + + "Key 2 was generated\n\n" + + "TEST_TABLE:\n" + "1 Hallo\n\n" + - "test_view:\n" + + "TEST_VIEW:\n" + "1 Hallo", org.h2.samples.UpdatableView.class); testApp( @@ -105,8 +114,6 @@ public void test() throws Exception { // tools testApp("Allows changing the database file encryption password or algorithm*", org.h2.tools.ChangeFileEncryption.class, "-help"); - testApp("Allows changing the database file encryption password or algorithm*", - org.h2.tools.ChangeFileEncryption.class); testApp("Deletes all files belonging to a database.*", org.h2.tools.DeleteDbFiles.class, "-help"); FileUtils.delete(getBaseDir() + "/optimizations.sql"); @@ -131,7 +138,7 @@ private void testApp(String expected, Class clazz, String... args) out.flush(); System.setOut(oldOut); System.setErr(oldErr); - String s = new String(buff.toByteArray(), "UTF-8"); + String s = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); s = StringUtils.replaceAll(s, "\r\n", "\n"); s = s.trim(); expected = expected.trim(); diff --git a/h2/src/test/org/h2/test/unit/TestScriptReader.java b/h2/src/test/org/h2/test/unit/TestScriptReader.java index 913575ec73..6c430e9e76 100644 --- a/h2/src/test/org/h2/test/unit/TestScriptReader.java +++ b/h2/src/test/org/h2/test/unit/TestScriptReader.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -21,7 +21,7 @@ public class TestScriptReader extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -46,16 +46,17 @@ private void testRandom() { } String s = buff.toString(); StringReader reader = new StringReader(s); - ScriptReader source = new ScriptReader(reader); - for (int j = 0; j < l; j++) { - String e = source.readStatement(); - String c = sql[j]; - if (c.length() == 0 && j == l - 1) { - c = null; + try (ScriptReader source = new ScriptReader(reader)) { + for (int j = 0; j < l; j++) { + String e = source.readStatement(); + String c = sql[j]; + if (c.length() == 0 && j == l - 1) { + c = null; + } + assertEquals(c, e); } - assertEquals(c, e); + assertEquals(null, source.readStatement()); } - assertEquals(null, source.readStatement()); } } @@ -125,10 +126,17 @@ private static String randomStatement(Random random) { buff.append('*'); String[] ch = { ";", "-", "//", "/* ", "--", "\n", "\r", "a", "$" }; int l = random.nextInt(4); + int comments = 0; for (int j = 0; j < l; j++) { - buff.append(ch[random.nextInt(ch.length)]); + String s = ch[random.nextInt(ch.length)]; + buff.append(s); + if (s.equals("/* ")) { + comments++; + } + } + while (comments-- >= 0) { + buff.append("*/"); } - buff.append("*/"); } break; } @@ -187,12 +195,50 @@ private void testCommon() { assertEquals(null, source.readStatement()); source.close(); + s = "//"; + source = new ScriptReader(new StringReader(s)); + assertEquals("//", source.readStatement()); + assertTrue(source.isInsideRemark()); + assertFalse(source.isBlockRemark()); + source.close(); + // check handling of unclosed block comments s = "/*xxx"; source = new ScriptReader(new StringReader(s)); assertEquals("/*xxx", source.readStatement()); assertTrue(source.isBlockRemark()); source.close(); + + s = "/*xxx*"; + source = new ScriptReader(new StringReader(s)); + assertEquals("/*xxx*", source.readStatement()); + assertTrue(source.isBlockRemark()); + source.close(); + + s = "/*xxx* "; + source = new ScriptReader(new StringReader(s)); + assertEquals("/*xxx* ", source.readStatement()); + assertTrue(source.isBlockRemark()); + source.close(); + + s = "/*xxx/"; + source = new ScriptReader(new StringReader(s)); + assertEquals("/*xxx/", source.readStatement()); + assertTrue(source.isBlockRemark()); + source.close(); + + // nested comments + s = "/*/**/SCRIPT;*/"; + source = new ScriptReader(new StringReader(s)); + assertEquals("/*/**/SCRIPT;*/", source.readStatement()); + assertTrue(source.isBlockRemark()); + source.close(); + + s = "/* /* */ SCRIPT; */"; + source = new ScriptReader(new StringReader(s)); + assertEquals("/* /* */ SCRIPT; */", source.readStatement()); + assertTrue(source.isBlockRemark()); + source.close(); } } diff --git a/h2/src/test/org/h2/test/unit/TestSecurity.java b/h2/src/test/org/h2/test/unit/TestSecurity.java index 0d8388a0b3..7f3c97050c 100644 --- a/h2/src/test/org/h2/test/unit/TestSecurity.java +++ b/h2/src/test/org/h2/test/unit/TestSecurity.java @@ -1,17 +1,22 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.util.Arrays; +import java.util.Random; + import org.h2.security.BlockCipher; import org.h2.security.CipherFactory; import org.h2.security.SHA256; +import org.h2.security.SHA3; import org.h2.test.TestBase; import org.h2.util.StringUtils; @@ -26,15 +31,18 @@ public class TestSecurity extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws SQLException { testConnectWithHash(); testSHA(); + testSHA3(); testAES(); testBlockCiphers(); + testRemoveAnonFromLegacyAlgorithms(); + // testResetLegacyAlgorithms(); } private static void testConnectWithHash() throws SQLException { @@ -174,6 +182,38 @@ private void checkSHA256(String message, String expected) { assertEquals(expected, hash); } + private void testSHA3() { + try { + MessageDigest md = MessageDigest.getInstance("SHA3-224"); + Random r = new Random(); + byte[] bytes1 = new byte[r.nextInt(1025)]; + byte[] bytes2 = new byte[256]; + r.nextBytes(bytes1); + r.nextBytes(bytes2); + testSHA3(md, SHA3.getSha3_224(), bytes1, bytes2); + testSHA3(MessageDigest.getInstance("SHA3-256"), SHA3.getSha3_256(), bytes1, bytes2); + testSHA3(MessageDigest.getInstance("SHA3-384"), SHA3.getSha3_384(), bytes1, bytes2); + testSHA3(MessageDigest.getInstance("SHA3-512"), SHA3.getSha3_512(), bytes1, bytes2); + } catch (NoSuchAlgorithmException e) { + // Java 8 doesn't support SHA-3 + } + } + + private void testSHA3(MessageDigest md1, SHA3 md2, byte[] bytes1, byte[] bytes2) { + md1.update(bytes1); + md2.update(bytes1); + md1.update(bytes2, 0, 1); + md2.update(bytes2, 0, 1); + md1.update(bytes2, 1, 33); + md2.update(bytes2, 1, 33); + md1.update(bytes2, 34, 222); + md2.update(bytes2, 34, 222); + assertEquals(md1.digest(), md2.digest()); + md1.update(bytes2, 1, 1); + md2.update(bytes2, 1, 1); + assertEquals(md1.digest(), md2.digest()); + } + private void testBlockCiphers() { for (String algorithm : new String[] { "AES", "FOG" }) { byte[] test = new byte[4096]; @@ -251,4 +291,43 @@ private static boolean isCompressible(byte[] data) { return len * r < len * 120; } + private void testRemoveAnonFromLegacyAlgorithms() { + String legacyAlgorithms = "K_NULL, C_NULL, M_NULL, DHE_DSS_EXPORT" + + ", DHE_RSA_EXPORT, DH_anon_EXPORT, DH_DSS_EXPORT, DH_RSA_EXPORT, RSA_EXPORT" + + ", DH_anon, ECDH_anon, RC4_128, RC4_40, DES_CBC, DES40_CBC"; + String expectedLegacyWithoutDhAnon = "K_NULL, C_NULL, M_NULL, DHE_DSS_EXPORT" + + ", DHE_RSA_EXPORT, DH_anon_EXPORT, DH_DSS_EXPORT, DH_RSA_EXPORT, RSA_EXPORT" + + ", RC4_128, RC4_40, DES_CBC, DES40_CBC"; + assertEquals(expectedLegacyWithoutDhAnon, + CipherFactory.removeDhAnonFromCommaSeparatedList(legacyAlgorithms)); + + legacyAlgorithms = "ECDH_anon, DH_anon_EXPORT, DH_anon"; + expectedLegacyWithoutDhAnon = "DH_anon_EXPORT"; + assertEquals(expectedLegacyWithoutDhAnon, + CipherFactory.removeDhAnonFromCommaSeparatedList(legacyAlgorithms)); + + legacyAlgorithms = null; + assertNull(CipherFactory.removeDhAnonFromCommaSeparatedList(legacyAlgorithms)); + } + + /** + * This test is meaningful when run in isolation. However, tests of server + * sockets or ssl connections may modify the global state given by the + * jdk.tls.legacyAlgorithms security property (for a good reason). + * It is best to avoid running it in test suites, as it could itself lead + * to a modification of the global state with hard-to-track consequences. + */ + @SuppressWarnings("unused") + private void testResetLegacyAlgorithms() { + String legacyAlgorithmsBefore = CipherFactory.getLegacyAlgorithmsSilently(); + assertEquals("Failed assumption: jdk.tls.legacyAlgorithms" + + " has been modified from its initial setting", + CipherFactory.DEFAULT_LEGACY_ALGORITHMS, legacyAlgorithmsBefore); + CipherFactory.removeAnonFromLegacyAlgorithms(); + CipherFactory.resetDefaultLegacyAlgorithms(); + String legacyAlgorithmsAfter = CipherFactory.getLegacyAlgorithmsSilently(); + assertEquals(CipherFactory.DEFAULT_LEGACY_ALGORITHMS, legacyAlgorithmsAfter); + } + + } diff --git a/h2/src/test/org/h2/test/unit/TestServlet.java b/h2/src/test/org/h2/test/unit/TestServlet.java index 6400dfea35..8dd911ced6 100644 --- a/h2/src/test/org/h2/test/unit/TestServlet.java +++ b/h2/src/test/org/h2/test/unit/TestServlet.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -17,7 +17,6 @@ import java.util.Map; import java.util.Properties; import java.util.Set; - import javax.servlet.Filter; import javax.servlet.FilterRegistration; import javax.servlet.FilterRegistration.Dynamic; @@ -30,17 +29,16 @@ import javax.servlet.SessionCookieConfig; import javax.servlet.SessionTrackingMode; import javax.servlet.descriptor.JspConfigDescriptor; - import org.h2.api.ErrorCode; import org.h2.server.web.DbStarter; import org.h2.test.TestBase; -import org.h2.util.New; +import org.h2.test.TestDb; /** * Tests the DbStarter servlet. * This test simulates a minimum servlet container environment. */ -public class TestServlet extends TestBase { +public class TestServlet extends TestDb { /** * Run just this test. @@ -48,7 +46,7 @@ public class TestServlet extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } /** @@ -58,7 +56,7 @@ public static void main(String... a) throws Exception { static class TestServletContext implements ServletContext { private final Properties initParams = new Properties(); - private final HashMap attributes = New.hashMap(); + private final HashMap attributes = new HashMap<>(); @Override public void setAttribute(String key, Object value) { @@ -150,6 +148,7 @@ public String getServerInfo() { * @deprecated as of servlet API 2.1 */ @Override + @Deprecated public Servlet getServlet(String string) { throw new UnsupportedOperationException(); } @@ -162,6 +161,7 @@ public String getServletContextName() { /** * @deprecated as of servlet API 2.1 */ + @Deprecated @Override public Enumeration getServletNames() { throw new UnsupportedOperationException(); @@ -170,6 +170,7 @@ public Enumeration getServletNames() { /** * @deprecated as of servlet API 2.0 */ + @Deprecated @Override public Enumeration getServlets() { throw new UnsupportedOperationException(); @@ -183,6 +184,7 @@ public void log(String string) { /** * @deprecated as of servlet API 2.1 */ + @Deprecated @Override public void log(Exception exception, String string) { throw new UnsupportedOperationException(); @@ -335,14 +337,58 @@ public void setSessionTrackingModes(Set arg0) { throw new UnsupportedOperationException(); } + @Override + public String getVirtualServerName() { + throw new UnsupportedOperationException(); + } + + @Override + public ServletRegistration.Dynamic addJspFile(String servletName, String jspFile) { + throw new UnsupportedOperationException(); + } + + @Override + public int getSessionTimeout() { + throw new UnsupportedOperationException(); + } + + @Override + public void setSessionTimeout(int sessionTimeout) { + throw new UnsupportedOperationException(); + } + + @Override + public String getRequestCharacterEncoding() { + throw new UnsupportedOperationException(); + } + + @Override + public void setRequestCharacterEncoding(String encoding) { + throw new UnsupportedOperationException(); + } + + @Override + public String getResponseCharacterEncoding() { + throw new UnsupportedOperationException(); + } + + @Override + public void setResponseCharacterEncoding(String encoding) { + throw new UnsupportedOperationException(); + } } @Override - public void test() throws SQLException { + public boolean isEnabled() { if (config.networked || config.memory) { - return; + return false; } + return true; + } + + @Override + public void test() throws SQLException { DbStarter listener = new DbStarter(); TestServletContext context = new TestServletContext(); @@ -369,16 +415,16 @@ public void test() throws SQLException { stat2.execute("SELECT * FROM T"); stat2.execute("DROP TABLE T"); - assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, stat1). + assertThrows(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, stat1). execute("SELECT * FROM T"); conn2.close(); listener.contextDestroyed(event); // listener must be stopped - assertThrows(ErrorCode.CONNECTION_BROKEN_1, this).getConnection( - "jdbc:h2:tcp://localhost:8888/" + getBaseDir() + "/servlet", - getUser(), getPassword()); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, + () -> getConnection("jdbc:h2:tcp://localhost:8888/" + getBaseDir() + "/servlet", getUser(), + getPassword())); // connection must be closed assertThrows(ErrorCode.OBJECT_CLOSED, stat1). diff --git a/h2/src/test/org/h2/test/unit/TestShell.java b/h2/src/test/org/h2/test/unit/TestShell.java index eb00205719..36d9373293 100644 --- a/h2/src/test/org/h2/test/unit/TestShell.java +++ b/h2/src/test/org/h2/test/unit/TestShell.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -13,9 +13,11 @@ import java.io.PipedInputStream; import java.io.PipedOutputStream; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import org.h2.test.TestBase; import org.h2.tools.Shell; import org.h2.util.Task; +import org.h2.util.Utils10; /** * Test the shell tool. @@ -40,27 +42,27 @@ public class TestShell extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { Shell shell = new Shell(); ByteArrayOutputStream buff = new ByteArrayOutputStream(); - shell.setOut(new PrintStream(buff)); + shell.setOut(new PrintStream(buff, false, "UTF-8")); shell.runTool("-url", "jdbc:h2:mem:", "-driver", "org.h2.Driver", "-user", "sa", "-password", "sa", "-properties", "null", "-sql", "select 'Hello ' || 'World' as hi"); - String s = new String(buff.toByteArray()); + String s = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); assertContains(s, "HI"); assertContains(s, "Hello World"); assertContains(s, "(1 row, "); shell = new Shell(); buff = new ByteArrayOutputStream(); - shell.setOut(new PrintStream(buff)); + shell.setOut(new PrintStream(buff, false, "UTF-8")); shell.runTool("-help"); - s = new String(buff.toByteArray()); + s = Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); assertContains(s, "Interactive command line tool to access a database using JDBC."); @@ -107,8 +109,9 @@ public void call() throws Exception { testOut.println(""); read("Driver"); testOut.println("sa"); - read("User"); testOut.println("sa"); + testOut.println("sa"); + read("User"); read("Password"); } read("Commands are case insensitive"); @@ -194,7 +197,7 @@ public void call() throws Exception { testOut.println("list"); read("sql> Result list mode is now on"); - testOut.println("select 1 first, 2 second;"); + testOut.println("select 1 first, 2 `second`;"); read("sql> FIRST : 1"); read("SECOND: 2"); read("(1 row, "); diff --git a/h2/src/test/org/h2/test/unit/TestSort.java b/h2/src/test/org/h2/test/unit/TestSort.java index f8ff7a641d..ab7efe8e8b 100644 --- a/h2/src/test/org/h2/test/unit/TestSort.java +++ b/h2/src/test/org/h2/test/unit/TestSort.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -21,17 +21,14 @@ public class TestSort extends TestBase { /** * The number of times the compare method was called. */ - AtomicInteger compareCount = new AtomicInteger(); + private AtomicInteger compareCount = new AtomicInteger(); /** * The comparison object used in this test. */ - Comparator comp = new Comparator() { - @Override - public int compare(Long o1, Long o2) { - compareCount.incrementAndGet(); - return Long.valueOf(o1 >> 32).compareTo(o2 >> 32); - } + Comparator comp = (o1, o2) -> { + compareCount.incrementAndGet(); + return Long.compare(o1 >> 32, o2 >> 32); }; private final Long[] array = new Long[100000]; @@ -43,7 +40,7 @@ public int compare(Long o1, Long o2) { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -88,17 +85,17 @@ private void test(Class c) throws Exception { * * @param type the type of data */ - private void test(String type) throws Exception { + private void test(@SuppressWarnings("unused") String type) throws Exception { compareCount.set(0); - // long t = System.currentTimeMillis(); + // long t = System.nanoTime(); clazz.getMethod("sort", Object[].class, Comparator.class).invoke(null, array, comp); // System.out.printf( // "%4d ms; %10d comparisons order: %s data: %s\n", - // (System.currentTimeMillis() - t), + // TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t), // compareCount.get(), clazz, type); verify(array); diff --git a/h2/src/test/org/h2/test/unit/TestStreams.java b/h2/src/test/org/h2/test/unit/TestStreams.java index 0decb9566c..73a3c7cc36 100644 --- a/h2/src/test/org/h2/test/unit/TestStreams.java +++ b/h2/src/test/org/h2/test/unit/TestStreams.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -27,7 +27,7 @@ public class TestStreams extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override diff --git a/h2/src/test/org/h2/test/unit/TestStringCache.java b/h2/src/test/org/h2/test/unit/TestStringCache.java index a35a1b9b61..ccfa2a18b9 100644 --- a/h2/src/test/org/h2/test/unit/TestStringCache.java +++ b/h2/src/test/org/h2/test/unit/TestStringCache.java @@ -1,12 +1,13 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.util.Locale; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.h2.test.TestBase; import org.h2.util.StringUtils; @@ -23,7 +24,6 @@ public class TestStringCache extends TestBase { private final Random random = new Random(1); private final String[] some = { null, "", "ABC", "this is a medium sized string", "1", "2" }; - private boolean returnNew; private boolean useIntern; /** @@ -33,18 +33,13 @@ public class TestStringCache extends TestBase { * @param args the command line parameters */ public static void main(String... args) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); new TestStringCache().runBenchmark(); } @Override public void test() throws InterruptedException { testToUpperToLower(); - returnNew = true; - StringUtils.clearCache(); - testSingleThread(getSize(5000, 20000)); - testMultiThreads(); - returnNew = false; StringUtils.clearCache(); testSingleThread(getSize(5000, 20000)); testMultiThreads(); @@ -63,24 +58,24 @@ private void testToUpperCache() { } int repeat = 100000; int testLen = 0; - long time = System.currentTimeMillis(); + long time = System.nanoTime(); for (int a = 0; a < repeat; a++) { for (String x : test) { String y = StringUtils.toUpperEnglish(x); testLen += y.length(); } } - time = System.currentTimeMillis() - time; - System.out.println("cache " + time); - time = System.currentTimeMillis(); + time = System.nanoTime() - time; + System.out.println("cache " + TimeUnit.NANOSECONDS.toMillis(time)); + time = System.nanoTime(); for (int a = 0; a < repeat; a++) { for (String x : test) { String y = x.toUpperCase(Locale.ENGLISH); testLen -= y.length(); } } - time = System.currentTimeMillis() - time; - System.out.println("toUpperCase " + time); + time = System.nanoTime() - time; + System.out.println("toUpperCase " + TimeUnit.NANOSECONDS.toMillis(time)); assertEquals(0, testLen); } @@ -105,13 +100,13 @@ private void runBenchmark() { testToUpperCache(); testToUpperCache(); testToUpperCache(); - returnNew = false; for (int i = 0; i < 6; i++) { useIntern = (i % 2) == 0; - long time = System.currentTimeMillis(); + long time = System.nanoTime(); testSingleThread(100000); - time = System.currentTimeMillis() - time; - System.out.println(time + " ms (useIntern=" + useIntern + ")"); + time = System.nanoTime() - time; + System.out.println(TimeUnit.NANOSECONDS.toMillis(time) + + " ms (useIntern=" + useIntern + ")"); } } @@ -124,7 +119,8 @@ private String randomString() { } return s; } - int len = random.nextBoolean() ? random.nextInt(1000) : random.nextInt(10); + int len = random.nextBoolean() ? random.nextInt(1000) + : random.nextInt(10); StringBuilder buff = new StringBuilder(len); for (int i = 0; i < len; i++) { buff.append(random.nextInt(0xfff)); @@ -137,29 +133,16 @@ private String randomString() { */ void testString() { String a = randomString(); - if (returnNew) { - String b = StringUtils.fromCacheOrNew(a); - try { - assertEquals(a, b); - } catch (Exception e) { - TestBase.logError("error", e); - } - if (a != null && a == b && a.length() > 0) { - throw new AssertionError("a=" + System.identityHashCode(a) + - " b=" + System.identityHashCode(b)); - } + String b; + if (useIntern) { + b = a == null ? null : a.intern(); } else { - String b; - if (useIntern) { - b = a == null ? null : a.intern(); - } else { - b = StringUtils.cache(a); - } - try { - assertEquals(a, b); - } catch (Exception e) { - TestBase.logError("error", e); - } + b = StringUtils.cache(a); + } + try { + assertEquals(a, b); + } catch (Exception e) { + TestBase.logError("error", e); } } @@ -173,12 +156,9 @@ private void testMultiThreads() throws InterruptedException { int threadCount = getSize(3, 100); Thread[] threads = new Thread[threadCount]; for (int i = 0; i < threadCount; i++) { - Thread t = new Thread(new Runnable() { - @Override - public void run() { - while (!stop) { - testString(); - } + Thread t = new Thread(() -> { + while (!stop) { + testString(); } }); threads[i] = t; diff --git a/h2/src/test/org/h2/test/unit/TestStringUtils.java b/h2/src/test/org/h2/test/unit/TestStringUtils.java index d1f928a2ad..5115c4c374 100644 --- a/h2/src/test/org/h2/test/unit/TestStringUtils.java +++ b/h2/src/test/org/h2/test/unit/TestStringUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; @@ -8,13 +8,13 @@ import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; -import java.util.Date; import java.util.Random; + +import org.h2.expression.function.DateTimeFormatFunction; import org.h2.message.DbException; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; -import org.h2.util.DateTimeUtils; import org.h2.util.StringUtils; +import org.h2.value.ValueTimestampTimeZone; /** * Tests string utility methods. @@ -27,11 +27,12 @@ public class TestStringUtils extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws Exception { + testParseUInt31(); testHex(); testXML(); testSplit(); @@ -39,6 +40,35 @@ public void test() throws Exception { testURL(); testPad(); testReplaceAll(); + testTrim(); + testTrimSubstring(); + testTruncateString(); + } + + private void testParseUInt31() { + assertEquals(0, StringUtils.parseUInt31("101", 1, 2)); + assertEquals(11, StringUtils.parseUInt31("11", 0, 2)); + assertEquals(0, StringUtils.parseUInt31("000", 0, 3)); + assertEquals(1, StringUtils.parseUInt31("01", 0, 2)); + assertEquals(999999999, StringUtils.parseUInt31("X999999999", 1, 10)); + assertEquals(2147483647, StringUtils.parseUInt31("2147483647", 0, 10)); + testParseUInt31Bad(null, 0, 1); + testParseUInt31Bad("1", -1, 1); + testParseUInt31Bad("1", 0, 0); + testParseUInt31Bad("12", 1, 0); + testParseUInt31Bad("-0", 0, 2); + testParseUInt31Bad("+0", 0, 2); + testParseUInt31Bad("2147483648", 0, 10); + testParseUInt31Bad("21474836470", 0, 11); + } + + private void testParseUInt31Bad(String s, int start, int end) { + try { + StringUtils.parseUInt31(s, start, end); + } catch (NullPointerException | IndexOutOfBoundsException | NumberFormatException e) { + return; + } + fail(); } private void testHex() { @@ -51,18 +81,9 @@ private void testHex() { StringUtils.convertHexToBytes("fAcE")); assertEquals(new byte[] { (byte) 0xfa, (byte) 0xce }, StringUtils.convertHexToBytes("FaCe")); - new AssertThrows(DbException.class) { @Override - public void test() { - StringUtils.convertHexToBytes("120"); - }}; - new AssertThrows(DbException.class) { @Override - public void test() { - StringUtils.convertHexToBytes("fast"); - }}; - new AssertThrows(DbException.class) { @Override - public void test() { - StringUtils.convertHexToBytes("012=abcf"); - }}; + assertThrows(DbException.class, () -> StringUtils.convertHexToBytes("120")); + assertThrows(DbException.class, () -> StringUtils.convertHexToBytes("fast")); + assertThrows(DbException.class, () -> StringUtils.convertHexToBytes("012=abcf")); } private void testPad() { @@ -84,7 +105,7 @@ private void testXML() { StringUtils.xmlText("Rand&Blue")); assertEquals("<<[[[]]]>>", StringUtils.xmlCData("<<[[[]]]>>")); - Date dt = DateTimeUtils.parseDateTime( + ValueTimestampTimeZone dt = DateTimeFormatFunction.parseDateTime(null, "2001-02-03 04:05:06 GMT", "yyyy-MM-dd HH:mm:ss z", "en", "GMT"); String s = StringUtils.xmlStartDoc() @@ -94,19 +115,19 @@ private void testXML() { StringUtils.xmlComment("Test Comment\nZeile2") + StringUtils.xmlNode("channel", null, StringUtils.xmlNode("title", null, "H2 Database Engine") - + StringUtils.xmlNode("link", null, "http://www.h2database.com") + + StringUtils.xmlNode("link", null, "https://h2database.com") + StringUtils.xmlNode("description", null, "H2 Database Engine") + StringUtils.xmlNode("language", null, "en-us") + StringUtils.xmlNode("pubDate", null, - DateTimeUtils.formatDateTime(dt, + DateTimeFormatFunction.formatDateTime(null, dt, "EEE, d MMM yyyy HH:mm:ss z", "en", "GMT")) + StringUtils.xmlNode("lastBuildDate", null, - DateTimeUtils.formatDateTime(dt, + DateTimeFormatFunction.formatDateTime(null, dt, "EEE, d MMM yyyy HH:mm:ss z", "en", "GMT")) + StringUtils.xmlNode("item", null, StringUtils.xmlNode("title", null, "New Version 0.9.9.9.9") - + StringUtils.xmlNode("link", null, "http://www.h2database.com") + + StringUtils.xmlNode("link", null, "https://h2database.com") + StringUtils.xmlNode("description", null, StringUtils.xmlCData("\nNew Features\nTest\n"))))); assertEquals( @@ -120,14 +141,14 @@ private void testXML() { + " -->\n" + " \n" + " H2 Database Engine\n" - + " http://www.h2database.com\n" + + " https://h2database.com\n" + " H2 Database Engine\n" + " en-us\n" + " Sat, 3 Feb 2001 04:05:06 GMT\n" + " Sat, 3 Feb 2001 04:05:06 GMT\n" + " \n" + " New Version 0.9.9.9.9\n" - + " http://www.h2database.com\n" + + " https://h2database.com\n" + " \n" + " StringUtils.trimSubstring(" with (", 1, 8)); + } + + private void testTrimSubstringImpl(String expected, String string, int startIndex, int endIndex) { + assertEquals(expected, StringUtils.trimSubstring(string, startIndex, endIndex)); + assertEquals(expected, StringUtils + .trimSubstring(new StringBuilder(endIndex - startIndex), string, startIndex, endIndex).toString()); + } + + private void testTruncateString() { + assertEquals("", StringUtils.truncateString("", 1)); + assertEquals("", StringUtils.truncateString("a", 0)); + assertEquals("_\ud83d\ude00", StringUtils.truncateString("_\ud83d\ude00", 3)); + assertEquals("_", StringUtils.truncateString("_\ud83d\ude00", 2)); + assertEquals("_\ud83d", StringUtils.truncateString("_\ud83d_", 2)); } } diff --git a/h2/src/test/org/h2/test/unit/TestTimeStampWithTimeZone.java b/h2/src/test/org/h2/test/unit/TestTimeStampWithTimeZone.java new file mode 100644 index 0000000000..5d29fce860 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestTimeStampWithTimeZone.java @@ -0,0 +1,231 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.time.OffsetDateTime; +import java.util.TimeZone; + +import org.h2.engine.CastDataProvider; +import org.h2.test.TestBase; +import org.h2.test.TestDb; +import org.h2.util.DateTimeUtils; +import org.h2.util.JSR310Utils; +import org.h2.util.LegacyDateTimeUtils; +import org.h2.util.TimeZoneProvider; +import org.h2.value.TypeInfo; +import org.h2.value.Value; +import org.h2.value.ValueDate; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; + +/** + */ +public class TestTimeStampWithTimeZone extends TestDb { + + /** + * Run just this test. + * + * @param a ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws SQLException { + deleteDb(getTestName()); + test1(); + test2(); + test3(); + test4(); + test5(); + testOrder(); + testConversions(); + deleteDb(getTestName()); + } + + private void test1() throws SQLException { + Connection conn = getConnection(getTestName()); + Statement stat = conn.createStatement(); + stat.execute("create table test(id identity, t1 timestamp(9) with time zone)"); + stat.execute("insert into test(t1) values('1970-01-01 12:00:00.00+00:15')"); + // verify NanosSinceMidnight is in local time and not UTC + stat.execute("insert into test(t1) values('2016-09-24 00:00:00.000000001+00:01')"); + stat.execute("insert into test(t1) values('2016-09-24 00:00:00.000000001-00:01')"); + // verify year month day is in local time and not UTC + stat.execute("insert into test(t1) values('2016-01-01 05:00:00.00+10:00')"); + stat.execute("insert into test(t1) values('2015-12-31 19:00:00.00-10:00')"); + ResultSet rs = stat.executeQuery("select t1 from test"); + rs.next(); + assertEquals("1970-01-01 12:00:00+00:15", rs.getString(1)); + OffsetDateTime ts = (OffsetDateTime) rs.getObject(1); + assertEquals(1970, ts.getYear()); + assertEquals(1, ts.getMonthValue()); + assertEquals(1, ts.getDayOfMonth()); + assertEquals(15 * 60, ts.getOffset().getTotalSeconds()); + OffsetDateTime expected = OffsetDateTime.parse("1970-01-01T12:00+00:15"); + assertEquals(expected, ts); + assertEquals("1970-01-01T12:00+00:15", rs.getObject(1, OffsetDateTime.class).toString()); + rs.next(); + ts = (OffsetDateTime) rs.getObject(1); + assertEquals(2016, ts.getYear()); + assertEquals(9, ts.getMonthValue()); + assertEquals(24, ts.getDayOfMonth()); + assertEquals(1L, ts.toLocalTime().toNanoOfDay()); + assertEquals(60, ts.getOffset().getTotalSeconds()); + assertEquals("2016-09-24T00:00:00.000000001+00:01", rs.getObject(1, OffsetDateTime.class).toString()); + rs.next(); + ts = (OffsetDateTime) rs.getObject(1); + assertEquals(2016, ts.getYear()); + assertEquals(9, ts.getMonthValue()); + assertEquals(24, ts.getDayOfMonth()); + assertEquals(1L, ts.toLocalTime().toNanoOfDay()); + assertEquals(-60, ts.getOffset().getTotalSeconds()); + assertEquals("2016-09-24T00:00:00.000000001-00:01", rs.getObject(1, OffsetDateTime.class).toString()); + rs.next(); + ts = (OffsetDateTime) rs.getObject(1); + assertEquals(2016, ts.getYear()); + assertEquals(1, ts.getMonthValue()); + assertEquals(1, ts.getDayOfMonth()); + assertEquals("2016-01-01T05:00+10:00", rs.getObject(1, OffsetDateTime.class).toString()); + rs.next(); + ts = (OffsetDateTime) rs.getObject(1); + assertEquals(2015, ts.getYear()); + assertEquals(12, ts.getMonthValue()); + assertEquals(31, ts.getDayOfMonth()); + assertEquals("2015-12-31T19:00-10:00", rs.getObject(1, OffsetDateTime.class).toString()); + + ResultSetMetaData metaData = rs.getMetaData(); + int columnType = metaData.getColumnType(1); + assertEquals(Types.TIMESTAMP_WITH_TIMEZONE, columnType); + assertEquals("java.time.OffsetDateTime", metaData.getColumnClassName(1)); + + rs.close(); + + rs = stat.executeQuery("select cast(t1 as varchar) from test"); + assertTrue(rs.next()); + assertEquals(expected, rs.getObject(1, OffsetDateTime.class)); + + stat.close(); + conn.close(); + } + + private void test2() { + ValueTimestampTimeZone a = ValueTimestampTimeZone.parse("1970-01-01 12:00:00.00+00:15", null); + ValueTimestampTimeZone b = ValueTimestampTimeZone.parse("1970-01-01 12:00:01.00+01:15", null); + int c = a.compareTo(b, null, null); + assertEquals(1, c); + c = b.compareTo(a, null, null); + assertEquals(-1, c); + } + + private void test3() { + ValueTimestampTimeZone a = ValueTimestampTimeZone.parse("1970-01-02 00:00:02.00+01:15", null); + ValueTimestampTimeZone b = ValueTimestampTimeZone.parse("1970-01-01 23:00:01.00+00:15", null); + int c = a.compareTo(b, null, null); + assertEquals(1, c); + c = b.compareTo(a, null, null); + assertEquals(-1, c); + } + + private void test4() { + ValueTimestampTimeZone a = ValueTimestampTimeZone.parse("1970-01-02 00:00:01.00+01:15", null); + ValueTimestampTimeZone b = ValueTimestampTimeZone.parse("1970-01-01 23:00:01.00+00:15", null); + int c = a.compareTo(b, null, null); + assertEquals(0, c); + c = b.compareTo(a, null, null); + assertEquals(0, c); + } + + private void test5() throws SQLException { + Connection conn = getConnection(getTestName()); + Statement stat = conn.createStatement(); + stat.execute("create table test5(id identity, t1 timestamp with time zone)"); + stat.execute("insert into test5(t1) values('2016-09-24 00:00:00.000000001+00:01')"); + stat.execute("insert into test5(t1) values('2017-04-20 00:00:00.000000001+00:01')"); + + PreparedStatement preparedStatement = conn.prepareStatement("select id" + + " from test5" + + " where (t1 < ?)"); + Value value = ValueTimestampTimeZone.parse("2016-12-24 00:00:00.000000001+00:01", null); + preparedStatement.setObject(1, JSR310Utils.valueToOffsetDateTime(value, null)); + + ResultSet rs = preparedStatement.executeQuery(); + + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertFalse(rs.next()); + + rs.close(); + preparedStatement.close(); + stat.close(); + conn.close(); + } + + private void testOrder() throws SQLException { + Connection conn = getConnection(getTestName()); + Statement stat = conn.createStatement(); + stat.execute("create table test_order(id identity, t1 timestamp with time zone)"); + stat.execute("insert into test_order(t1) values('1970-01-01 12:00:00.00+00:15')"); + stat.execute("insert into test_order(t1) values('1970-01-01 12:00:01.00+01:15')"); + ResultSet rs = stat.executeQuery("select t1 from test_order order by t1"); + rs.next(); + assertEquals("1970-01-01 12:00:01+01:15", rs.getString(1)); + conn.close(); + } + + private void testConversionsImpl(String timeStr, boolean testReverse, CastDataProvider provider) { + ValueTimestamp ts = ValueTimestamp.parse(timeStr, null); + ValueDate d = ts.convertToDate(provider); + ValueTime t = (ValueTime) ts.convertTo(TypeInfo.TYPE_TIME, provider); + ValueTimestampTimeZone tstz = ValueTimestampTimeZone.parse(timeStr, null); + assertEquals(ts, tstz.convertTo(TypeInfo.TYPE_TIMESTAMP, provider)); + assertEquals(d, tstz.convertToDate(provider)); + assertEquals(t, tstz.convertTo(TypeInfo.TYPE_TIME, provider)); + assertEquals(LegacyDateTimeUtils.toTimestamp(provider, null, ts), + LegacyDateTimeUtils.toTimestamp(provider, null, tstz)); + if (testReverse) { + assertEquals(0, tstz.compareTo(ts.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider), null, null)); + assertEquals(d.convertTo(TypeInfo.TYPE_TIMESTAMP, provider) + .convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider), + d.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider)); + assertEquals(t.convertTo(TypeInfo.TYPE_TIMESTAMP, provider) + .convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider), + t.convertTo(TypeInfo.TYPE_TIMESTAMP_TZ, provider)); + } + } + + private void testConversions() { + TestDate.SimpleCastDataProvider provider = new TestDate.SimpleCastDataProvider(); + TimeZone current = TimeZone.getDefault(); + try { + for (String id : TimeZone.getAvailableIDs()) { + if (id.equals("GMT0")) { + continue; + } + TimeZone.setDefault(TimeZone.getTimeZone(id)); + provider.currentTimeZone = TimeZoneProvider.ofId(id); + DateTimeUtils.resetCalendar(); + testConversionsImpl("2017-12-05 23:59:30.987654321-12:00", true, provider); + testConversionsImpl("2000-01-02 10:20:30.123456789+07:30", true, provider); + boolean testReverse = !"Africa/Monrovia".equals(id); + testConversionsImpl("1960-04-06 12:13:14.777666555+12:00", testReverse, provider); + } + } finally { + TimeZone.setDefault(current); + DateTimeUtils.resetCalendar(); + } + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestTools.java b/h2/src/test/org/h2/test/unit/TestTools.java index d0fe683fae..69b8c9a0b2 100644 --- a/h2/src/test/org/h2/test/unit/TestTools.java +++ b/h2/src/test/org/h2/test/unit/TestTools.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.awt.Button; +import java.awt.HeadlessException; import java.awt.event.ActionEvent; import java.awt.event.MouseEvent; import java.io.ByteArrayOutputStream; @@ -17,8 +18,10 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.math.BigDecimal; +import java.math.BigInteger; import java.net.ServerSocket; import java.net.Socket; +import java.nio.charset.StandardCharsets; import java.sql.Blob; import java.sql.Clob; import java.sql.Connection; @@ -32,20 +35,22 @@ import java.sql.Timestamp; import java.sql.Types; import java.util.ArrayList; +import java.util.List; import java.util.Random; - +import java.util.UUID; import org.h2.api.ErrorCode; import org.h2.engine.SysProperties; import org.h2.store.FileLister; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.test.TestDb; import org.h2.test.trace.Player; -import org.h2.test.utils.AssertThrows; import org.h2.tools.Backup; import org.h2.tools.ChangeFileEncryption; import org.h2.tools.Console; import org.h2.tools.ConvertTraceFile; import org.h2.tools.DeleteDbFiles; +import org.h2.tools.GUIConsole; import org.h2.tools.Recover; import org.h2.tools.Restore; import org.h2.tools.RunScript; @@ -55,14 +60,17 @@ import org.h2.tools.SimpleResultSet.SimpleArray; import org.h2.util.JdbcUtils; import org.h2.util.Task; +import org.h2.util.Utils10; +import org.h2.value.ValueUuid; /** * Tests the database tools. */ -public class TestTools extends TestBase { +public class TestTools extends TestDb { private static String lastUrl; private Server server; + private List remainingServers = new ArrayList<>(3); /** * Run just this test. @@ -70,14 +78,19 @@ public class TestTools extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override - public void test() throws Exception { + public boolean isEnabled() { if (config.networked) { - return; + return false; } + return true; + } + + @Override + public void test() throws Exception { DeleteDbFiles.execute(getBaseDir(), null, true); org.h2.Driver.load(); testSimpleResultSet(); @@ -88,7 +101,6 @@ public void test() throws Exception { testDeleteFiles(); testScriptRunscriptLob(); testServerMain(); - testRemove(); testConvertTraceFile(); testManagementDb(); testChangeFileEncryption(false); @@ -113,15 +125,13 @@ private void testTcpServerWithoutPort() throws Exception { s2.stop(); s1 = Server.createTcpServer("-tcpPort", "9123").start(); assertEquals(9123, s1.getPort()); - createClassProxy(Server.class); - assertThrows(ErrorCode.EXCEPTION_OPENING_PORT_2, - Server.createTcpServer("-tcpPort", "9123")).start(); + assertThrows(ErrorCode.EXCEPTION_OPENING_PORT_2, () -> Server.createTcpServer("-tcpPort", "9123").start()); s1.stop(); } private void testConsole() throws Exception { String old = System.getProperty(SysProperties.H2_BROWSER); - Console c = new Console(); + GUIConsole c = new GUIConsole(); c.setOut(new PrintStream(new ByteArrayOutputStream())); try { @@ -132,7 +142,7 @@ private void testConsole() throws Exception { c.runTool("-web", "-webPort", "9002", "-tool", "-browser", "-tcp", "-tcpPort", "9003", "-pg", "-pgPort", "9004"); assertContains(lastUrl, ":9002"); - c.shutdown(); + shutdownConsole(c); // check if starting the browser works c.runTool("-web", "-webPort", "9002", "-tool"); @@ -142,29 +152,33 @@ private void testConsole() throws Exception { lastUrl = "-"; // double-click prevention is 100 ms Thread.sleep(200); - MouseEvent me = new MouseEvent(new Button(), 0, 0, 0, 0, 0, 0, - false, MouseEvent.BUTTON1); - c.mouseClicked(me); - assertContains(lastUrl, ":9002"); - lastUrl = "-"; - // no delay - ignore because it looks like a double click - c.mouseClicked(me); - assertEquals("-", lastUrl); - // open the window - c.actionPerformed(new ActionEvent(this, 0, "status")); - c.actionPerformed(new ActionEvent(this, 0, "exit")); + try { + MouseEvent me = new MouseEvent(new Button(), 0, 0, 0, 0, 0, 0, + false, MouseEvent.BUTTON1); + c.mouseClicked(me); + assertContains(lastUrl, ":9002"); + lastUrl = "-"; + // no delay - ignore because it looks like a double click + c.mouseClicked(me); + assertEquals("-", lastUrl); + // open the window + c.actionPerformed(new ActionEvent(this, 0, "status")); + c.actionPerformed(new ActionEvent(this, 0, "exit")); + + // check if the service was stopped + c.runTool("-webPort", "9002"); + + } catch (HeadlessException e) { + // ignore + } - // check if the service was stopped - c.runTool("-webPort", "9002"); - c.shutdown(); + shutdownConsole(c); // trying to use the same port for two services should fail, // but also stop the first service - createClassProxy(c.getClass()); - assertThrows(ErrorCode.EXCEPTION_OPENING_PORT_2, c).runTool("-web", - "-webPort", "9002", "-tcp", "-tcpPort", "9002"); + assertThrows(ErrorCode.EXCEPTION_OPENING_PORT_2, + () -> c.runTool("-web", "-webPort", "9002", "-tcp", "-tcpPort", "9002")); c.runTool("-web", "-webPort", "9002"); - c.shutdown(); } finally { if (old != null) { @@ -172,6 +186,19 @@ private void testConsole() throws Exception { } else { System.clearProperty(SysProperties.H2_BROWSER); } + shutdownConsole(c); + } + } + + private static void shutdownConsole(Console c) { + c.shutdown(); + if (Thread.currentThread().isInterrupted()) { + // Clear interrupted state so test can continue its work safely + try { + Thread.sleep(1); + } catch (InterruptedException e) { + // Ignore + } } } @@ -185,14 +212,12 @@ public static void openBrowser(String url) { } private void testSimpleResultSet() throws Exception { - SimpleResultSet rs; rs = new SimpleResultSet(); rs.addColumn(null, 0, 0, 0); rs.addRow(1); - createClassProxy(rs.getClass()); - assertThrows(IllegalStateException.class, rs). - addColumn(null, 0, 0, 0); + SimpleResultSet r = rs; + assertThrows(IllegalStateException.class, () -> r.addColumn(null, 0, 0, 0)); assertEquals(ResultSet.TYPE_FORWARD_ONLY, rs.getType()); rs.next(); @@ -213,11 +238,11 @@ private void testSimpleResultSet() throws Exception { assertTrue(rs.getMetaData().isSearchable(1)); assertTrue(rs.getMetaData().isSigned(1)); assertFalse(rs.getMetaData().isWritable(1)); - assertEquals(null, rs.getMetaData().getCatalogName(1)); - assertEquals(null, rs.getMetaData().getColumnClassName(1)); + assertEquals("", rs.getMetaData().getCatalogName(1)); + assertEquals(Void.class.getName(), rs.getMetaData().getColumnClassName(1)); assertEquals("NULL", rs.getMetaData().getColumnTypeName(1)); - assertEquals(null, rs.getMetaData().getSchemaName(1)); - assertEquals(null, rs.getMetaData().getTableName(1)); + assertEquals("", rs.getMetaData().getSchemaName(1)); + assertEquals("", rs.getMetaData().getTableName(1)); assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, rs.getHoldability()); assertEquals(1, rs.getColumnCount()); @@ -245,7 +270,10 @@ private void testSimpleResultSet() throws Exception { Clob clob = new SimpleClob("Hello World"); Blob blob = new SimpleBlob(new byte[]{(byte) 1, (byte) 2}); rs.addRow(1, b, true, d, "10.3", Math.PI, "-3", a, t, ts, clob, blob); + rs.addRow(BigInteger.ONE, null, true, null, BigDecimal.ONE, 1d, null, null, null, null, null); + rs.addRow(BigInteger.ZERO, null, false, null, BigDecimal.ZERO, 0d, null, null, null, null, null); rs.addRow(null, null, null, null, null, null, null, null, null, null, null); + rs.addRow(null, null, true, null, null, null, null, null, null, null, null); rs.next(); @@ -258,6 +286,7 @@ private void testSimpleResultSet() throws Exception { assertEquals((short) 1, rs.getShort("a")); assertTrue(rs.getObject(1).getClass() == Integer.class); assertTrue(rs.getObject("a").getClass() == Integer.class); + assertTrue(rs.getBoolean(1)); assertEquals(b, rs.getBytes(2)); assertEquals(b, rs.getBytes("b")); @@ -276,6 +305,7 @@ private void testSimpleResultSet() throws Exception { assertTrue(Math.PI == rs.getDouble("f")); assertTrue((float) Math.PI == rs.getFloat(6)); assertTrue((float) Math.PI == rs.getFloat("f")); + assertTrue(rs.getBoolean(6)); assertEquals(-3, rs.getInt(7)); assertEquals(-3, rs.getByte(7)); @@ -314,6 +344,20 @@ private void testSimpleResultSet() throws Exception { rs.next(); + assertTrue(rs.getBoolean(1)); + assertTrue(rs.getBoolean(3)); + assertTrue(rs.getBoolean(5)); + assertTrue(rs.getBoolean(6)); + + rs.next(); + + assertFalse(rs.getBoolean(1)); + assertFalse(rs.getBoolean(3)); + assertFalse(rs.getBoolean(5)); + assertFalse(rs.getBoolean(6)); + + rs.next(); + assertEquals(0, rs.getLong(1)); assertTrue(rs.wasNull()); assertEquals(null, rs.getBytes(2)); @@ -347,6 +391,12 @@ private void testSimpleResultSet() throws Exception { assertNull(rs.getBinaryStream(12)); assertTrue(rs.wasNull()); + assertTrue(rs.next()); + assertTrue(rs.getBoolean(3)); + assertFalse(rs.wasNull()); + assertNull(rs.getObject(6, Float.class)); + assertTrue(rs.wasNull()); + // all updateX methods for (Method m: rs.getClass().getMethods()) { if (m.getName().startsWith("update")) { @@ -354,6 +404,12 @@ private void testSimpleResultSet() throws Exception { continue; } int len = m.getParameterTypes().length; + if (m.getName().equals("updateObject") && m.getParameterTypes().length > 2) { + Class p3 = m.getParameterTypes()[2]; + if (p3.toString().indexOf("SQLType") >= 0) { + continue; + } + } Object[] params = new Object[len]; int i = 0; String expectedValue = null; @@ -427,7 +483,7 @@ private void testSimpleResultSet() throws Exception { assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); assertEquals(0, rs.getFetchSize()); assertEquals(ResultSet.TYPE_SCROLL_INSENSITIVE, rs.getType()); - assertTrue(rs.getStatement() == null); + assertNull(rs.getStatement()); assertFalse(rs.isClosed()); rs.beforeFirst(); @@ -436,6 +492,9 @@ private void testSimpleResultSet() throws Exception { assertFalse(rs.isClosed()); assertEquals(1, rs.getRow()); assertTrue(rs.next()); + assertTrue(rs.next()); + assertTrue(rs.next()); + assertTrue(rs.next()); assertFalse(rs.next()); assertThrows(ErrorCode.NO_DATA_AVAILABLE, (ResultSet) rs). getInt(1); @@ -443,21 +502,53 @@ private void testSimpleResultSet() throws Exception { assertFalse(rs.isClosed()); rs.close(); assertTrue(rs.isClosed()); + rs = new SimpleResultSet(); + rs.addColumn("TEST", Types.BINARY, 0, 0); + UUID uuid = UUID.randomUUID(); + rs.addRow(uuid); + rs.next(); + assertEquals(uuid, rs.getObject(1)); + assertEquals(uuid, ValueUuid.get(rs.getBytes(1)).getUuid()); + + assertTrue(rs.isWrapperFor(Object.class)); + assertTrue(rs.isWrapperFor(ResultSet.class)); + assertTrue(rs.isWrapperFor(rs.getClass())); + assertFalse(rs.isWrapperFor(Integer.class)); + assertTrue(rs == rs.unwrap(Object.class)); + assertTrue(rs == rs.unwrap(ResultSet.class)); + assertTrue(rs == rs.unwrap(rs.getClass())); + SimpleResultSet rs2 = rs; + assertThrows(ErrorCode.INVALID_VALUE_2, () -> rs2.unwrap(Integer.class)); } private void testJdbcDriverUtils() { - assertEquals("org.h2.Driver", - JdbcUtils.getDriver("jdbc:h2:~/test")); - assertEquals("org.postgresql.Driver", - JdbcUtils.getDriver("jdbc:postgresql:test")); - assertEquals(null, - JdbcUtils.getDriver("jdbc:unknown:test")); + assertEquals("org.h2.Driver", JdbcUtils.getDriver("jdbc:h2:~/test")); + assertEquals("org.postgresql.Driver", JdbcUtils.getDriver("jdbc:postgresql:test")); + assertEquals(null, JdbcUtils.getDriver("jdbc:unknown:test")); + try { + JdbcUtils.getConnection("org.h2.Driver", "jdbc:h2x:test", "sa", ""); + fail("Expected SQLException: 08001"); + } catch (SQLException e) { + assertEquals("08001", e.getSQLState()); + } + try { + JdbcUtils.getConnection("javax.naming.InitialContext", "ldap://localhost/ds", "sa", ""); + fail("Expected SQLException: 08001"); + } catch (SQLException e) { + assertEquals("08001", e.getSQLState()); + assertEquals("Only java scheme is supported for JNDI lookups", e.getMessage()); + } + try { + JdbcUtils.getConnection("org.h2.Driver", "jdbc:h2:mem:", "sa", "", null, true); + fail("Expected SQLException: " + ErrorCode.REMOTE_DATABASE_NOT_FOUND_1); + } catch (SQLException e) { + assertEquals(ErrorCode.REMOTE_DATABASE_NOT_FOUND_1, e.getErrorCode()); + } } private void testWrongServer() throws Exception { // try to connect when the server is not running - assertThrows(ErrorCode.CONNECTION_BROKEN_1, this). - getConnection("jdbc:h2:tcp://localhost:9001/test"); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, () -> getConnection("jdbc:h2:tcp://localhost:9001/test")); final ServerSocket serverSocket = new ServerSocket(9001); Task task = new Task() { @Override @@ -473,19 +564,20 @@ public void call() throws Exception { } } }; - task.execute(); - Thread.sleep(100); try { - getConnection("jdbc:h2:tcp://localhost:9001/test"); - fail(); - } catch (SQLException e) { - assertEquals(ErrorCode.CONNECTION_BROKEN_1, e.getErrorCode()); + task.execute(); + Thread.sleep(100); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, () -> getConnection("jdbc:h2:tcp://localhost:9001/test")); + } finally { + serverSocket.close(); + task.getException(); } - serverSocket.close(); - task.getException(); } private void testDeleteFiles() throws SQLException { + if (config.memory) { + return; + } deleteDb("testDeleteFiles"); Connection conn = getConnection("testDeleteFiles"); Statement stat = conn.createStatement(); @@ -503,72 +595,95 @@ private void testDeleteFiles() throws SQLException { deleteDb("testDeleteFiles"); } - private void testServerMain() throws SQLException { + private void testServerMain() throws Exception { + testNonSSL(); + if (!config.ci) { + testSSL(); + } + } + + private void testNonSSL() throws Exception { String result; Connection conn; - result = runServer(0, new String[]{"-?"}); - assertTrue(result.contains("Starts the H2 Console")); - assertTrue(result.indexOf("Unknown option") < 0); - - result = runServer(1, new String[]{"-xy"}); - assertTrue(result.contains("Starts the H2 Console")); - assertTrue(result.contains("Feature not supported")); - result = runServer(0, new String[]{"-tcp", - "-tcpPort", "9001", "-tcpPassword", "abc"}); - assertTrue(result.contains("tcp://")); - assertTrue(result.contains(":9001")); - assertTrue(result.contains("only local")); - assertTrue(result.indexOf("Starts the H2 Console") < 0); - conn = getConnection("jdbc:h2:tcp://localhost:9001/mem:", "sa", "sa"); - conn.close(); - result = runServer(0, new String[]{"-tcpShutdown", - "tcp://localhost:9001", "-tcpPassword", "abc", "-tcpShutdownForce"}); - assertTrue(result.contains("Shutting down")); - - result = runServer(0, new String[]{"-tcp", - "-tcpAllowOthers", "-tcpPort", "9001", "-tcpPassword", "abcdef", "-tcpSSL"}); - assertTrue(result.contains("ssl://")); - assertTrue(result.contains(":9001")); - assertTrue(result.contains("others can")); - assertTrue(result.indexOf("Starts the H2 Console") < 0); - conn = getConnection("jdbc:h2:ssl://localhost:9001/mem:", "sa", "sa"); - conn.close(); + try { + result = runServer(0, new String[]{"-?"}); + assertContains(result, "Starts the H2 Console"); + assertTrue(result.indexOf("Unknown option") < 0); + + result = runServer(1, new String[]{"-xy"}); + assertContains(result, "Starts the H2 Console"); + assertContains(result, "Feature not supported"); + result = runServer(0, new String[]{"-ifNotExists", "-tcp", + "-tcpPort", "9001", "-tcpPassword", "abc"}); + assertContains(result, "tcp://"); + assertContains(result, ":9001"); + assertContains(result, "only local"); + assertTrue(result.indexOf("Starts the H2 Console") < 0); + conn = getConnection("jdbc:h2:tcp://localhost:9001/mem:", "sa", "sa"); + conn.close(); + result = runServer(0, new String[]{"-tcpShutdown", + "tcp://localhost:9001", "-tcpPassword", "abc", "-tcpShutdownForce"}); + assertContains(result, "Shutting down"); + } finally { + shutdownServers(); + } + } - result = runServer(0, new String[]{"-tcpShutdown", - "ssl://localhost:9001", "-tcpPassword", "abcdef"}); - assertTrue(result.contains("Shutting down")); - assertThrows(ErrorCode.CONNECTION_BROKEN_1, this). - getConnection("jdbc:h2:ssl://localhost:9001/mem:", "sa", "sa"); - - result = runServer(0, new String[]{ - "-web", "-webPort", "9002", "-webAllowOthers", "-webSSL", - "-pg", "-pgAllowOthers", "-pgPort", "9003", - "-tcp", "-tcpAllowOthers", "-tcpPort", "9006", "-tcpPassword", "abc"}); - Server stop = server; - assertTrue(result.contains("https://")); - assertTrue(result.contains(":9002")); - assertTrue(result.contains("pg://")); - assertTrue(result.contains(":9003")); - assertTrue(result.contains("others can")); - assertTrue(result.indexOf("only local") < 0); - assertTrue(result.contains("tcp://")); - assertTrue(result.contains(":9006")); - - conn = getConnection("jdbc:h2:tcp://localhost:9006/mem:", "sa", "sa"); - conn.close(); + private void testSSL() throws Exception { + String result; + Connection conn; + + try { + result = runServer(0, new String[]{"-ifNotExists", "-tcp", + "-tcpAllowOthers", "-tcpPort", "9001", "-tcpPassword", "abcdef", "-tcpSSL"}); + assertContains(result, "ssl://"); + assertContains(result, ":9001"); + assertContains(result, "others can"); + assertTrue(result.indexOf("Starts the H2 Console") < 0); + conn = getConnection("jdbc:h2:ssl://localhost:9001/mem:", "sa", "sa"); + conn.close(); + + result = runServer(0, new String[]{"-tcpShutdown", + "ssl://localhost:9001", "-tcpPassword", "abcdef"}); + assertContains(result, "Shutting down"); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, + () -> getConnection("jdbc:h2:ssl://localhost:9001/mem:", "sa", "sa")); + + result = runServer(0, new String[]{ + "-ifNotExists", "-web", "-webPort", "9002", "-webAllowOthers", "-webSSL", + "-pg", "-pgAllowOthers", "-pgPort", "9003", + "-tcp", "-tcpAllowOthers", "-tcpPort", "9006", "-tcpPassword", "abc"}); + Server stop = server; + assertContains(result, "https://"); + assertContains(result, ":9002"); + assertContains(result, "pg://"); + assertContains(result, ":9003"); + assertContains(result, "others can"); + assertTrue(result.indexOf("only local") < 0); + assertContains(result, "tcp://"); + assertContains(result, ":9006"); + + conn = getConnection("jdbc:h2:tcp://localhost:9006/mem:", "sa", "sa"); + conn.close(); - result = runServer(0, new String[]{"-tcpShutdown", - "tcp://localhost:9006", "-tcpPassword", "abc", "-tcpShutdownForce"}); - assertTrue(result.contains("Shutting down")); - stop.shutdown(); - assertThrows(ErrorCode.CONNECTION_BROKEN_1, this). - getConnection("jdbc:h2:tcp://localhost:9006/mem:", "sa", "sa"); + result = runServer(0, new String[]{"-tcpShutdown", + "tcp://localhost:9006", "-tcpPassword", "abc", "-tcpShutdownForce"}); + assertContains(result, "Shutting down"); + stop.shutdown(); + assertThrows(ErrorCode.CONNECTION_BROKEN_1, + () -> getConnection("jdbc:h2:tcp://localhost:9006/mem:", "sa", "sa")); + } finally { + shutdownServers(); + } } - private String runServer(int exitCode, String... args) { + private String runServer(int exitCode, String... args) throws Exception { ByteArrayOutputStream buff = new ByteArrayOutputStream(); - PrintStream ps = new PrintStream(buff); + PrintStream ps = new PrintStream(buff, false, "UTF-8"); + if (server != null) { + remainingServers.add(server); + } server = new Server(); server.setOut(ps); int result = 0; @@ -580,8 +695,19 @@ private String runServer(int exitCode, String... args) { } assertEquals(exitCode, result); ps.flush(); - String s = new String(buff.toByteArray()); - return s; + return Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8); + } + + private void shutdownServers() { + for (Server remainingServer : remainingServers) { + if (remainingServer != null) { + remainingServer.shutdown(); + } + } + remainingServers.clear(); + if (server != null) { + server.shutdown(); + } } private void testConvertTraceFile() throws Exception { @@ -592,7 +718,7 @@ private void testConvertTraceFile() throws Exception { Connection conn = getConnection(url + ";TRACE_LEVEL_FILE=3", "sa", "sa"); Statement stat = conn.createStatement(); stat.execute( - "create table test(id int primary key, name varchar, amount decimal)"); + "create table test(id int primary key, name varchar, amount decimal(4, 2))"); PreparedStatement prep = conn.prepareStatement( "insert into test values(?, ?, ?)"); prep.setInt(1, 1); @@ -601,7 +727,7 @@ private void testConvertTraceFile() throws Exception { prep.executeUpdate(); stat.execute("create table test2(id int primary key,\n" + "a real, b double, c bigint,\n" + - "d smallint, e boolean, f binary, g date, h time, i timestamp)", + "d smallint, e boolean, f varbinary, g date, h time, i timestamp)", Statement.NO_GENERATED_KEYS); prep = conn.prepareStatement( "insert into test2 values(1, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); @@ -644,8 +770,7 @@ private void testConvertTraceFile() throws Exception { private void testTraceFile(String url) throws SQLException { Connection conn; - Recover.main("-removePassword", "-dir", getBaseDir(), "-db", - "toolsConvertTraceFile"); + Recover.main("-dir", getBaseDir(), "-db", "toolsConvertTraceFile"); conn = getConnection(url, "sa", ""); Statement stat = conn.createStatement(); ResultSet rs; @@ -661,42 +786,19 @@ private void testTraceFile(String url) throws SQLException { assertEquals(Double.MIN_VALUE, rs.getDouble("b")); assertEquals(Long.MIN_VALUE, rs.getLong("c")); assertEquals(Short.MIN_VALUE, rs.getShort("d")); - assertTrue(!rs.getBoolean("e")); + assertFalse(rs.getBoolean("e")); assertEquals(new byte[] { (byte) 10, (byte) 20 }, rs.getBytes("f")); assertEquals("2007-12-31", rs.getString("g")); assertEquals("23:59:59", rs.getString("h")); - assertEquals("2007-12-31 23:59:59.0", rs.getString("i")); + assertEquals("2007-12-31 23:59:59", rs.getString("i")); assertFalse(rs.next()); conn.close(); } - private void testRemove() throws SQLException { - if (config.mvStore) { + private void testRecover() throws SQLException { + if (config.memory) { return; } - deleteDb("toolsRemove"); - org.h2.Driver.load(); - String url = "jdbc:h2:" + getBaseDir() + "/toolsRemove"; - Connection conn = getConnection(url, "sa", "sa"); - Statement stat = conn.createStatement(); - stat.execute("create table test(id int primary key, name varchar)"); - stat.execute("insert into test values(1, 'Hello')"); - conn.close(); - Recover.main("-dir", getBaseDir(), "-db", "toolsRemove", - "-removePassword"); - conn = getConnection(url, "sa", ""); - stat = conn.createStatement(); - ResultSet rs; - rs = stat.executeQuery("select * from test"); - rs.next(); - assertEquals(1, rs.getInt(1)); - assertEquals("Hello", rs.getString(2)); - conn.close(); - deleteDb("toolsRemove"); - FileUtils.delete(getBaseDir() + "/toolsRemove.h2.sql"); - } - - private void testRecover() throws SQLException { deleteDb("toolsRecover"); org.h2.Driver.load(); String url = getURL("toolsRecover", true); @@ -757,16 +859,14 @@ private void testManagementDb() throws SQLException { int count = getSize(2, 10); for (int i = 0; i < count; i++) { Server tcpServer = Server. - createTcpServer("-tcpPort", "9192").start(); + createTcpServer().start(); tcpServer.stop(); - tcpServer = Server.createTcpServer("-tcpPassword", "abc", - "-tcpPort", "9192").start(); + tcpServer = Server.createTcpServer("-tcpPassword", "abc").start(); tcpServer.stop(); } } private void testScriptRunscriptLob() throws Exception { - org.h2.Driver.load(); String url = getURL("jdbc:h2:" + getBaseDir() + "/testScriptRunscriptLob", true); String user = "sa", password = "abc"; @@ -792,7 +892,7 @@ private void testScriptRunscriptLob() throws Exception { byte[] large = new byte[getSize(10 * 1024, 100 * 1024)]; random.nextBytes(large); prep.setBytes(2, large); - String largeText = new String(large, "ISO-8859-1"); + String largeText = new String(large, StandardCharsets.ISO_8859_1); prep.setString(3, largeText); prep.execute(); @@ -801,8 +901,8 @@ private void testScriptRunscriptLob() throws Exception { "SELECT * FROM TEST ORDER BY ID"); rs.next(); assertEquals(1, rs.getInt(1)); - assertTrue(rs.getString(2) == null); - assertTrue(rs.getString(3) == null); + assertNull(rs.getString(2)); + assertNull(rs.getString(3)); rs.next(); assertEquals(2, rs.getInt(1)); assertEquals("face", rs.getString(2)); @@ -827,8 +927,7 @@ private void testScriptRunscriptLob() throws Exception { } - private void testScriptRunscript() throws SQLException { - org.h2.Driver.load(); + private void testScriptRunscript() throws Exception { String url = getURL("jdbc:h2:" + getBaseDir() + "/testScriptRunscript", true); String user = "sa", password = "abc"; @@ -865,10 +964,10 @@ private void testScriptRunscript() throws SQLException { "-quiet"); RunScript tool = new RunScript(); ByteArrayOutputStream buff = new ByteArrayOutputStream(); - tool.setOut(new PrintStream(buff)); + tool.setOut(new PrintStream(buff, false, "UTF-8")); tool.runTool("-url", url, "-user", user, "-password", password, "-script", fileName + ".txt", "-showResults"); - assertTrue(buff.toString().contains("Hello")); + assertContains(Utils10.byteArrayOutputStreamToString(buff, StandardCharsets.UTF_8), "Hello"); // test parsing of BLOCKSIZE option @@ -907,14 +1006,9 @@ private void testBackupRestore() throws SQLException { .executeQuery("SELECT * FROM TEST"); assertTrue(rs.next()); assertFalse(rs.next()); - new AssertThrows(ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1) { - @Override - public void test() throws SQLException { - // must fail when the database is in use - Backup.main("-file", fileName, "-dir", getBaseDir(), "-db", - "testBackupRestore"); - } - }; + // must fail when the database is in use + assertThrows(ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1, + () -> Backup.main("-file", fileName, "-dir", getBaseDir(), "-db", "testBackupRestore")); conn.close(); DeleteDbFiles.main("-dir", getBaseDir(), "-db", "testBackupRestore", "-quiet"); @@ -932,21 +1026,16 @@ private void testChangeFileEncryption(boolean split) throws SQLException { conn.close(); String[] args = { "-dir", dir, "-db", "testChangeFileEncryption", "-cipher", "AES", "-decrypt", "abc", "-quiet" }; - ChangeFileEncryption.main(args); + new ChangeFileEncryption().runTool(args); args = new String[] { "-dir", dir, "-db", "testChangeFileEncryption", "-cipher", "AES", "-encrypt", "def", "-quiet" }; - ChangeFileEncryption.main(args); + new ChangeFileEncryption().runTool(args); conn = getConnection(url, "sa", "def 123"); stat = conn.createStatement(); stat.execute("SELECT * FROM TEST"); - new AssertThrows(ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1) { - @Override - public void test() throws SQLException { - ChangeFileEncryption.main(new String[] { "-dir", dir, "-db", - "testChangeFileEncryption", "-cipher", "AES", - "-decrypt", "def", "-quiet" }); - } - }; + assertThrows(ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1, + () -> new ChangeFileEncryption().runTool(new String[] { "-dir", dir, "-db", "testChangeFileEncryption", + "-cipher", "AES", "-decrypt", "def", "-quiet" })); conn.close(); args = new String[] { "-dir", dir, "-db", "testChangeFileEncryption", "-quiet" }; @@ -954,14 +1043,8 @@ public void test() throws SQLException { } private void testChangeFileEncryptionWithWrongPassword() throws SQLException { - if (config.mvStore) { - // the file system encryption abstraction used by the MVStore - // doesn't detect wrong passwords - return; - } org.h2.Driver.load(); final String dir = getBaseDir(); - // TODO: this doesn't seem to work in MVSTORE mode yet String url = "jdbc:h2:" + dir + "/testChangeFileEncryption;CIPHER=AES"; DeleteDbFiles.execute(dir, "testChangeFileEncryption", true); Connection conn = getConnection(url, "sa", "abc 123"); @@ -971,14 +1054,8 @@ private void testChangeFileEncryptionWithWrongPassword() throws SQLException { conn.close(); // try with wrong password, this used to have a bug where it kept the // file handle open - new AssertThrows(SQLException.class) { - @Override - public void test() throws SQLException { - ChangeFileEncryption.execute(dir, "testChangeFileEncryption", - "AES", "wrong".toCharArray(), - "def".toCharArray(), true); - } - }; + assertThrows(SQLException.class, () -> ChangeFileEncryption.execute(dir, "testChangeFileEncryption", "AES", + "wrong".toCharArray(), "def".toCharArray(), true)); ChangeFileEncryption.execute(dir, "testChangeFileEncryption", "AES", "abc".toCharArray(), "def".toCharArray(), true); @@ -993,77 +1070,70 @@ public void test() throws SQLException { private void testServer() throws SQLException { Connection conn; - deleteDb("test"); - Server tcpServer = Server.createTcpServer( - "-baseDir", getBaseDir(), - "-tcpPort", "9192", - "-tcpAllowOthers").start(); - conn = getConnection("jdbc:h2:tcp://localhost:9192/test", "sa", ""); - conn.close(); - // must not be able to use a different base dir - new AssertThrows(ErrorCode.IO_EXCEPTION_1) { - @Override - public void test() throws SQLException { - getConnection("jdbc:h2:tcp://localhost:9192/../test", "sa", ""); - }}; - new AssertThrows(ErrorCode.IO_EXCEPTION_1) { - @Override - public void test() throws SQLException { - getConnection("jdbc:h2:tcp://localhost:9192/../test2/test", "sa", ""); - }}; - tcpServer.stop(); - Server.createTcpServer( - "-ifExists", - "-tcpPassword", "abc", - "-baseDir", getBaseDir(), - "-tcpPort", "9192").start(); - // must not be able to create new db - new AssertThrows(ErrorCode.DATABASE_NOT_FOUND_1) { - @Override - public void test() throws SQLException { - getConnection("jdbc:h2:tcp://localhost:9192/test2", "sa", ""); - }}; - new AssertThrows(ErrorCode.DATABASE_NOT_FOUND_1) { - @Override - public void test() throws SQLException { - getConnection("jdbc:h2:tcp://localhost:9192/test2;ifexists=false", "sa", ""); - }}; - conn = getConnection("jdbc:h2:tcp://localhost:9192/test", "sa", ""); - conn.close(); - new AssertThrows(ErrorCode.WRONG_USER_OR_PASSWORD) { - @Override - public void test() throws SQLException { - Server.shutdownTcpServer("tcp://localhost:9192", "", true, false); - }}; - conn = getConnection("jdbc:h2:tcp://localhost:9192/test", "sa", ""); - // conn.close(); - Server.shutdownTcpServer("tcp://localhost:9192", "abc", true, false); - // check that the database is closed - deleteDb("test"); - // server must have been closed - assertThrows(ErrorCode.CONNECTION_BROKEN_1, this). - getConnection("jdbc:h2:tcp://localhost:9192/test", "sa", ""); - JdbcUtils.closeSilently(conn); - // Test filesystem prefix and escape from baseDir - deleteDb("testSplit"); - server = Server.createTcpServer( - "-baseDir", getBaseDir(), - "-tcpPort", "9192", - "-tcpAllowOthers").start(); - conn = getConnection("jdbc:h2:tcp://localhost:9192/split:testSplit", "sa", ""); - conn.close(); + try { + deleteDb("test"); + Server tcpServer = Server.createTcpServer("-ifNotExists", + "-baseDir", getBaseDir(), + "-tcpAllowOthers").start(); + remainingServers.add(tcpServer); + final int port = tcpServer.getPort(); + conn = getConnection("jdbc:h2:tcp://localhost:" + port + "/test", "sa", ""); + conn.close(); + // must not be able to use a different base dir + assertThrows(ErrorCode.IO_EXCEPTION_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + port + "/../test", "sa", "")); + assertThrows(ErrorCode.IO_EXCEPTION_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + port + "/../test2/test", "sa", "")); + assertThrows(ErrorCode.WRONG_USER_OR_PASSWORD, + () -> Server.shutdownTcpServer("tcp://localhost:" + port, "", true, false)); + tcpServer.stop(); + Server tcpServerWithPassword = Server.createTcpServer( + "-ifExists", + "-tcpPassword", "abc", + "-baseDir", getBaseDir()).start(); + final int prt = tcpServerWithPassword.getPort(); + remainingServers.add(tcpServerWithPassword); + // must not be able to create new db + assertThrows(ErrorCode.REMOTE_DATABASE_NOT_FOUND_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + prt + "/test2", "sa", "")); + assertThrows(ErrorCode.REMOTE_DATABASE_NOT_FOUND_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + prt + "/test2;ifexists=false", "sa", "")); + conn = getConnection("jdbc:h2:tcp://localhost:" + prt + "/test", "sa", ""); + conn.close(); + assertThrows(ErrorCode.WRONG_USER_OR_PASSWORD, + () -> Server.shutdownTcpServer("tcp://localhost:" + prt, "", true, false)); + conn = getConnection("jdbc:h2:tcp://localhost:" + prt + "/test", "sa", ""); + // conn.close(); + Server.shutdownTcpServer("tcp://localhost:" + prt, "abc", true, false); + // check that the database is closed + deleteDb("test"); + // server must have been closed + assertThrows(ErrorCode.CONNECTION_BROKEN_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + prt + "/test", "sa", "")); + JdbcUtils.closeSilently(conn); + // Test filesystem prefix and escape from baseDir + deleteDb("testSplit"); + server = Server.createTcpServer("-ifNotExists", + "-baseDir", getBaseDir(), + "-tcpAllowOthers").start(); + final int p = server.getPort(); + conn = getConnection("jdbc:h2:tcp://localhost:" + p + "/split:testSplit", "sa", ""); + conn.close(); - assertThrows(ErrorCode.IO_EXCEPTION_1, this). - getConnection("jdbc:h2:tcp://localhost:9192/../test", "sa", ""); + assertThrows(ErrorCode.IO_EXCEPTION_1, + () -> getConnection("jdbc:h2:tcp://localhost:" + p + "/../test", "sa", "")); - server.stop(); - deleteDb("testSplit"); + server.stop(); + deleteDb("testSplit"); + } finally { + shutdownServers(); + } } /** * A simple Clob implementation. */ - class SimpleClob implements Clob { + static class SimpleClob implements Clob { private final String data; @@ -1153,7 +1223,7 @@ public void truncate(long len) throws SQLException { /** * A simple Blob implementation. */ - class SimpleBlob implements Blob { + static class SimpleBlob implements Blob { private final byte[] data; diff --git a/h2/src/test/org/h2/test/unit/TestTraceSystem.java b/h2/src/test/org/h2/test/unit/TestTraceSystem.java index 0c2fd80db6..1c6c1e6af9 100644 --- a/h2/src/test/org/h2/test/unit/TestTraceSystem.java +++ b/h2/src/test/org/h2/test/unit/TestTraceSystem.java @@ -1,15 +1,17 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import org.h2.message.TraceSystem; import org.h2.store.fs.FileUtils; import org.h2.test.TestBase; +import org.h2.util.Utils10; /** * Tests the trace system @@ -22,7 +24,7 @@ public class TestTraceSystem extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -32,24 +34,30 @@ public void test() throws Exception { testAdapter(); } - private static void testAdapter() { + private void testAdapter() { TraceSystem ts = new TraceSystem(null); ts.setName("test"); ts.setLevelFile(TraceSystem.ADAPTER); ts.getTrace("test").debug("test"); ts.getTrace("test").info("test"); ts.getTrace("test").error(new Exception(), "test"); + + // The used SLF4J-nop logger has all log levels disabled, + // so this should be reflected in the trace system. + assertFalse(ts.isEnabled(TraceSystem.INFO)); + assertFalse(ts.getTrace("test").isInfoEnabled()); + ts.close(); } - private void testTraceDebug() { + private void testTraceDebug() throws Exception { TraceSystem ts = new TraceSystem(null); ByteArrayOutputStream out = new ByteArrayOutputStream(); - ts.setSysOut(new PrintStream(out)); + ts.setSysOut(new PrintStream(out, false, "UTF-8")); ts.setLevelSystemOut(TraceSystem.DEBUG); ts.getTrace("test").debug(new Exception("error"), "test"); ts.close(); - String outString = new String(out.toByteArray()); + String outString = Utils10.byteArrayOutputStreamToString(out, StandardCharsets.UTF_8); assertContains(outString, "error"); assertContains(outString, "Exception"); assertContains(outString, "test"); diff --git a/h2/src/test/org/h2/test/unit/TestUpgrade.java b/h2/src/test/org/h2/test/unit/TestUpgrade.java new file mode 100644 index 0000000000..b448560ec9 --- /dev/null +++ b/h2/src/test/org/h2/test/unit/TestUpgrade.java @@ -0,0 +1,112 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.unit; + +import java.nio.charset.StandardCharsets; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.Properties; +import java.util.Random; + +import org.h2.engine.Constants; +import org.h2.store.fs.FilePath; +import org.h2.store.fs.FileUtils; +import org.h2.test.TestBase; +import org.h2.tools.Upgrade; + +/** + * Tests upgrade utility. + */ +public class TestUpgrade extends TestBase { + + /** + * Run just this test. + * + * @param a + * ignored + */ + public static void main(String... a) throws Exception { + TestBase.createCaller().init().testFromMain(); + } + + @Override + public void test() throws Exception { + deleteDb(); + testUpgrade(1, 2, 120); + testUpgrade(1, 4, 200); + } + + private void testUpgrade(int major, int minor, int build) throws Exception { + String baseDir = getBaseDir(); + String url = "jdbc:h2:" + baseDir + "/testUpgrade"; + Properties p = new Properties(); + p.put("user", "sa"); + p.put("password", "password"); + Random r = new Random(); + byte[] bytes = new byte[10_000]; + r.nextBytes(bytes); + String s = new String(bytes, StandardCharsets.ISO_8859_1); + java.sql.Driver driver = Upgrade.loadH2(build); + try { + assertEquals(major, driver.getMajorVersion()); + assertEquals(minor, driver.getMinorVersion()); + try (Connection conn = driver.connect(url, p)) { + Statement stat = conn.createStatement(); + stat.execute("CREATE TABLE TEST(ID BIGINT AUTO_INCREMENT PRIMARY KEY, B BINARY, L BLOB, C CLOB)"); + PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST(B, L, C) VALUES (?, ?, ?)"); + prep.setBytes(1, bytes); + prep.setBytes(2, bytes); + prep.setString(3, s); + prep.execute(); + } + } finally { + Upgrade.unloadH2(driver); + } + assertTrue(Upgrade.upgrade(url, p, build)); + try (Connection conn = DriverManager.getConnection(url, p)) { + Statement stat = conn.createStatement(); + try (ResultSet rs = stat.executeQuery("TABLE TEST")) { + assertTrue(rs.next()); + assertEquals(bytes, rs.getBytes(2)); + assertEquals(bytes, rs.getBytes(3)); + assertEquals(s, rs.getString(4)); + assertFalse(rs.next()); + } + try (ResultSet rs = stat.executeQuery("SELECT COLUMN_NAME, DATA_TYPE, CHARACTER_OCTET_LENGTH" + + " FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' ORDER BY ORDINAL_POSITION")) { + assertTrue(rs.next()); + assertEquals("ID", rs.getString(1)); + assertEquals("BIGINT", rs.getString(2)); + assertTrue(rs.next()); + assertEquals("B", rs.getString(1)); + assertEquals("BINARY VARYING", rs.getString(2)); + assertEquals(Constants.MAX_STRING_LENGTH, rs.getLong(3)); + assertTrue(rs.next()); + assertEquals("L", rs.getString(1)); + assertEquals("BINARY LARGE OBJECT", rs.getString(2)); + assertEquals(Long.MAX_VALUE, rs.getLong(3)); + assertTrue(rs.next()); + assertEquals("C", rs.getString(1)); + assertEquals("CHARACTER LARGE OBJECT", rs.getString(2)); + assertEquals(Long.MAX_VALUE, rs.getLong(3)); + assertFalse(rs.next()); + } + } + deleteDb(); + } + + private void deleteDb() { + for (FilePath p : FilePath.get(getBaseDir()).newDirectoryStream()) { + if (p.getName().startsWith("testUpgrade")) { + FileUtils.deleteRecursive(p.toString(), false); + } + } + } + +} diff --git a/h2/src/test/org/h2/test/unit/TestUtils.java b/h2/src/test/org/h2/test/unit/TestUtils.java index fe49f4251d..29fbefae65 100644 --- a/h2/src/test/org/h2/test/unit/TestUtils.java +++ b/h2/src/test/org/h2/test/unit/TestUtils.java @@ -1,24 +1,21 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.ByteArrayInputStream; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.Reader; import java.io.StringReader; import java.math.BigInteger; -import java.sql.Timestamp; -import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; -import java.util.Date; import java.util.Random; import org.h2.test.TestBase; +import org.h2.util.Bits; import org.h2.util.IOUtils; import org.h2.util.Utils; @@ -38,7 +35,7 @@ public class TestUtils extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override @@ -46,11 +43,13 @@ public void test() throws Exception { testIOUtils(); testSortTopN(); testSortTopNRandom(); + testWriteReadInt(); testWriteReadLong(); testGetNonPrimitiveClass(); testGetNonPrimitiveClass(); testGetNonPrimitiveClass(); testReflectionUtils(); + testParseBoolean(); } private void testIOUtils() throws IOException { @@ -92,35 +91,69 @@ private void testIOUtils() throws IOException { } } + private void testWriteReadInt() { + byte[] buff = new byte[4]; + for (int x : new int[]{Integer.MIN_VALUE, Integer.MAX_VALUE, 0, 1, -1, + Short.MIN_VALUE, Short.MAX_VALUE}) { + testIntImpl1(buff, x); + } + Random r = new Random(1); + for (int i = 0; i < 1000; i++) { + testIntImpl1(buff, r.nextInt()); + } + } + + private void testIntImpl1(byte[] buff, int x) { + int r = Integer.reverseBytes(x); + Bits.writeInt(buff, 0, x); + testIntImpl2(buff, x, r); + Bits.writeIntLE(buff, 0, x); + testIntImpl2(buff, r, x); + } + + private void testIntImpl2(byte[] buff, int x, int r) { + assertEquals(x, Bits.readInt(buff, 0)); + assertEquals(r, Bits.readIntLE(buff, 0)); + } + private void testWriteReadLong() { byte[] buff = new byte[8]; for (long x : new long[]{Long.MIN_VALUE, Long.MAX_VALUE, 0, 1, -1, Integer.MIN_VALUE, Integer.MAX_VALUE}) { - Utils.writeLong(buff, 0, x); - long y = Utils.readLong(buff, 0); - assertEquals(x, y); + testLongImpl1(buff, x); } Random r = new Random(1); for (int i = 0; i < 1000; i++) { - long x = r.nextLong(); - Utils.writeLong(buff, 0, x); - long y = Utils.readLong(buff, 0); - assertEquals(x, y); + testLongImpl1(buff, r.nextLong()); } } + private void testLongImpl1(byte[] buff, long x) { + long r = Long.reverseBytes(x); + Bits.writeLong(buff, 0, x); + testLongImpl2(buff, x, r); + Bits.writeLongLE(buff, 0, x); + testLongImpl2(buff, r, x); + Bits.writeDouble(buff, 0, Double.longBitsToDouble(x)); + testLongImpl2(buff, x, r); + Bits.writeDoubleLE(buff, 0, Double.longBitsToDouble(x)); + testLongImpl2(buff, r, x); + } + + private void testLongImpl2(byte[] buff, long x, long r) { + assertEquals(x, Bits.readLong(buff, 0)); + assertEquals(r, Bits.readLongLE(buff, 0)); + assertEquals(Double.longBitsToDouble(x), Bits.readDouble(buff, 0)); + assertEquals(Double.longBitsToDouble(r), Bits.readDoubleLE(buff, 0)); + } + private void testSortTopN() { - Comparator comp = new Comparator() { - @Override - public int compare(Integer o1, Integer o2) { - return o1.compareTo(o2); - } - }; + Comparator comp = Comparator.naturalOrder(); Integer[] arr = new Integer[] {}; - Utils.sortTopN(arr, 0, 5, comp); + Utils.sortTopN(arr, 0, 0, comp); arr = new Integer[] { 1 }; - Utils.sortTopN(arr, 0, 5, comp); + Utils.sortTopN(arr, 0, 1, comp); arr = new Integer[] { 3, 5, 1, 4, 2 }; Utils.sortTopN(arr, 0, 2, comp); @@ -130,23 +163,19 @@ public int compare(Integer o1, Integer o2) { private void testSortTopNRandom() { Random rnd = new Random(); - Comparator comp = new Comparator() { - @Override - public int compare(Integer o1, Integer o2) { - return o1.compareTo(o2); - } - }; + Comparator comp = Comparator.naturalOrder(); for (int z = 0; z < 10000; z++) { - Integer[] arr = new Integer[1 + rnd.nextInt(500)]; - for (int i = 0; i < arr.length; i++) { + int length = 1 + rnd.nextInt(500); + Integer[] arr = new Integer[length]; + for (int i = 0; i < length; i++) { arr[i] = rnd.nextInt(50); } - Integer[] arr2 = Arrays.copyOf(arr, arr.length); - int offset = rnd.nextInt(arr.length); - int limit = rnd.nextInt(arr.length); - Utils.sortTopN(arr, offset, limit, comp); + Integer[] arr2 = Arrays.copyOf(arr, length); + int offset = rnd.nextInt(length); + int limit = rnd.nextInt(length - offset + 1); + Utils.sortTopN(arr, offset, offset + limit, comp); Arrays.sort(arr2, comp); - for (int i = offset, end = Math.min(offset + limit, arr.length); i < end; i++) { + for (int i = offset, end = offset + limit; i < end; i++) { if (!arr[i].equals(arr2[i])) { fail(offset + " " + end + "\n" + Arrays.toString(arr) + "\n" + Arrays.toString(arr2)); @@ -175,10 +204,10 @@ private void testGetNonPrimitiveClass(Class expected, Class p) { private void testReflectionUtils() throws Exception { // Static method call - long currentTimeMillis1 = System.currentTimeMillis(); - long currentTimeMillis2 = (Long) Utils.callStaticMethod( - "java.lang.System.currentTimeMillis"); - assertTrue(currentTimeMillis1 <= currentTimeMillis2); + long currentTimeNanos1 = System.nanoTime(); + long currentTimeNanos2 = (Long) Utils.callStaticMethod( + "java.lang.System.nanoTime"); + assertTrue(currentTimeNanos1 <= currentTimeNanos2); // New Instance Object instance = Utils.newInstance("java.lang.StringBuilder"); // New Instance with int parameter @@ -189,35 +218,60 @@ private void testReflectionUtils() throws Exception { // Instance methods long x = (Long) Utils.callMethod(instance, "longValue"); assertEquals(10, x); - // Static fields - String pathSeparator = (String) Utils - .getStaticField("java.io.File.pathSeparator"); - assertEquals(File.pathSeparator, pathSeparator); // Instance fields - String test = (String) Utils.getField(this, "testField"); - assertEquals(this.testField, test); - // Class present? - assertFalse(Utils.isClassPresent("abc")); - assertTrue(Utils.isClassPresent(getClass().getName())); Utils.callStaticMethod("java.lang.String.valueOf", "a"); Utils.callStaticMethod("java.awt.AWTKeyStroke.getAWTKeyStroke", 'x', java.awt.event.InputEvent.SHIFT_DOWN_MASK); - // Common comparable superclass - assertFalse(Utils.haveCommonComparableSuperclass( - Integer.class, - Long.class)); - assertTrue(Utils.haveCommonComparableSuperclass( - Integer.class, - Integer.class)); - assertTrue(Utils.haveCommonComparableSuperclass( - Timestamp.class, - Date.class)); - assertFalse(Utils.haveCommonComparableSuperclass( - ArrayList.class, - Long.class)); - assertFalse(Utils.haveCommonComparableSuperclass( - Integer.class, - ArrayList.class)); + } + + private void testParseBooleanCheckFalse(String value) { + assertFalse(Utils.parseBoolean(value, false, false)); + assertFalse(Utils.parseBoolean(value, false, true)); + assertFalse(Utils.parseBoolean(value, true, false)); + assertFalse(Utils.parseBoolean(value, true, true)); + } + + private void testParseBooleanCheckTrue(String value) { + assertTrue(Utils.parseBoolean(value, false, false)); + assertTrue(Utils.parseBoolean(value, false, true)); + assertTrue(Utils.parseBoolean(value, true, false)); + assertTrue(Utils.parseBoolean(value, true, true)); + } + + private void testParseBoolean() { + // Test for default value in case of null + assertFalse(Utils.parseBoolean(null, false, false)); + assertFalse(Utils.parseBoolean(null, false, true)); + assertTrue(Utils.parseBoolean(null, true, false)); + assertTrue(Utils.parseBoolean(null, true, true)); + // Test assorted valid strings + testParseBooleanCheckFalse("0"); + testParseBooleanCheckFalse("f"); + testParseBooleanCheckFalse("F"); + testParseBooleanCheckFalse("n"); + testParseBooleanCheckFalse("N"); + testParseBooleanCheckFalse("no"); + testParseBooleanCheckFalse("No"); + testParseBooleanCheckFalse("NO"); + testParseBooleanCheckFalse("false"); + testParseBooleanCheckFalse("False"); + testParseBooleanCheckFalse("FALSE"); + testParseBooleanCheckTrue("1"); + testParseBooleanCheckTrue("t"); + testParseBooleanCheckTrue("T"); + testParseBooleanCheckTrue("y"); + testParseBooleanCheckTrue("Y"); + testParseBooleanCheckTrue("yes"); + testParseBooleanCheckTrue("Yes"); + testParseBooleanCheckTrue("YES"); + testParseBooleanCheckTrue("true"); + testParseBooleanCheckTrue("True"); + testParseBooleanCheckTrue("TRUE"); + // Test other values + assertFalse(Utils.parseBoolean("BAD", false, false)); + assertTrue(Utils.parseBoolean("BAD", true, false)); + assertThrows(IllegalArgumentException.class, () -> Utils.parseBoolean("BAD", false, true)); + assertThrows(IllegalArgumentException.class, () -> Utils.parseBoolean("BAD", true, true)); } } diff --git a/h2/src/test/org/h2/test/unit/TestValue.java b/h2/src/test/org/h2/test/unit/TestValue.java index 40bf4ac8c7..d04d2e18b5 100644 --- a/h2/src/test/org/h2/test/unit/TestValue.java +++ b/h2/src/test/org/h2/test/unit/TestValue.java @@ -1,40 +1,61 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; +import static org.h2.engine.Constants.MAX_ARRAY_CARDINALITY; +import static org.h2.engine.Constants.MAX_NUMERIC_PRECISION; +import static org.h2.engine.Constants.MAX_STRING_LENGTH; + +import java.io.ByteArrayInputStream; +import java.io.InputStreamReader; import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.Date; +import java.sql.DriverManager; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Time; import java.sql.Timestamp; -import java.sql.Types; +import java.util.Arrays; +import java.util.Calendar; +import java.util.TimeZone; import java.util.UUID; - import org.h2.api.ErrorCode; +import org.h2.api.H2Type; +import org.h2.engine.Database; +import org.h2.engine.SessionLocal; +import org.h2.jdbc.JdbcConnection; +import org.h2.store.DataHandler; import org.h2.test.TestBase; -import org.h2.test.utils.AssertThrows; -import org.h2.tools.SimpleResultSet; -import org.h2.value.DataType; +import org.h2.test.TestDb; +import org.h2.util.Bits; +import org.h2.util.JdbcUtils; +import org.h2.util.LegacyDateTimeUtils; +import org.h2.value.TypeInfo; import org.h2.value.Value; import org.h2.value.ValueArray; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDecimal; +import org.h2.value.ValueBlob; +import org.h2.value.ValueClob; import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; -import org.h2.value.ValueLobDb; -import org.h2.value.ValueResultSet; -import org.h2.value.ValueString; +import org.h2.value.ValueInterval; +import org.h2.value.ValueJavaObject; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueToObjectConverter2; import org.h2.value.ValueUuid; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; /** * Tests features of values. */ -public class TestValue extends TestBase { +public class TestValue extends TestDb { /** * Run just this test. @@ -42,155 +63,133 @@ public class TestValue extends TestBase { * @param a ignored */ public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); + TestBase.createCaller().init().testFromMain(); } @Override public void test() throws SQLException { + testBinaryAndUuid(); testCastTrim(); - testValueResultSet(); testDataType(); + testArray(); testUUID(); testDouble(false); testDouble(true); + testTimestamp(); testModulusDouble(); testModulusDecimal(); testModulusOperator(); + testLobComparison(); + testTypeInfo(); + testH2Type(); + testHigherType(); + } + + private void testBinaryAndUuid() throws SQLException { + try (Connection conn = getConnection("binaryAndUuid")) { + UUID uuid = UUID.randomUUID(); + PreparedStatement prep; + ResultSet rs; + // Check conversion to byte[] + prep = conn.prepareStatement("SELECT * FROM TABLE(X BINARY(16)=?)"); + prep.setObject(1, new Object[] { uuid }); + rs = prep.executeQuery(); + rs.next(); + assertTrue(Arrays.equals(Bits.uuidToBytes(uuid), (byte[]) rs.getObject(1))); + // Check conversion to byte[] + prep = conn.prepareStatement("SELECT * FROM TABLE(X VARBINARY=?)"); + prep.setObject(1, new Object[] { uuid }); + rs = prep.executeQuery(); + rs.next(); + assertTrue(Arrays.equals(Bits.uuidToBytes(uuid), (byte[]) rs.getObject(1))); + // Check that type is not changed + prep = conn.prepareStatement("SELECT * FROM TABLE(X UUID=?)"); + prep.setObject(1, new Object[] { uuid }); + rs = prep.executeQuery(); + rs.next(); + assertEquals(uuid, rs.getObject(1)); + } finally { + deleteDb("binaryAndUuid"); + } } private void testCastTrim() { Value v; String spaces = new String(new char[100]).replace((char) 0, ' '); - v = ValueArray.get(new Value[] { ValueString.get("hello"), - ValueString.get("world") }); - assertEquals(10, v.getPrecision()); - assertEquals(5, v.convertPrecision(5, true).getPrecision()); - v = ValueArray.get(new Value[]{ValueString.get(""), ValueString.get("")}); - assertEquals(0, v.getPrecision()); - assertEquals("('')", v.convertPrecision(1, true).toString()); - - v = ValueBytes.get(spaces.getBytes()); - assertEquals(100, v.getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getBytes().length); - assertEquals(32, v.convertPrecision(10, false).getBytes()[9]); - assertEquals(10, v.convertPrecision(10, true).getPrecision()); - - final Value vd = ValueDecimal.get(new BigDecimal("1234567890.123456789")); - assertEquals(19, vd.getPrecision()); - assertEquals("1234567890.1234567", vd.convertPrecision(10, true).getString()); - new AssertThrows(ErrorCode.NUMERIC_VALUE_OUT_OF_RANGE_1) { - @Override - public void test() { - vd.convertPrecision(10, false); - } - }; + v = ValueArray.get(new Value[] { ValueVarchar.get("hello"), ValueVarchar.get("world") }, null); + TypeInfo typeInfo = TypeInfo.getTypeInfo(Value.ARRAY, 1L, 0, TypeInfo.TYPE_VARCHAR); + assertEquals(2, v.getType().getPrecision()); + assertEquals(1, v.castTo(typeInfo, null).getType().getPrecision()); + v = ValueArray.get(new Value[]{ValueVarchar.get(""), ValueVarchar.get("")}, null); + assertEquals(2, v.getType().getPrecision()); + assertEquals("ARRAY ['']", v.castTo(typeInfo, null).toString()); - v = ValueLobDb.createSmallLob(Value.CLOB, spaces.getBytes(), 100); - assertEquals(100, v.getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getString().length()); - assertEquals(" ", v.convertPrecision(10, false).getString()); - assertEquals(10, v.convertPrecision(10, true).getPrecision()); - - v = ValueLobDb.createSmallLob(Value.BLOB, spaces.getBytes(), 100); - assertEquals(100, v.getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getBytes().length); - assertEquals(32, v.convertPrecision(10, false).getBytes()[9]); - assertEquals(10, v.convertPrecision(10, true).getPrecision()); - - ResultSet rs = new SimpleResultSet(); - v = ValueResultSet.get(rs); - assertEquals(Integer.MAX_VALUE, v.getPrecision()); - assertEquals(Integer.MAX_VALUE, v.convertPrecision(10, false).getPrecision()); - assertTrue(rs == v.convertPrecision(10, false).getObject()); - assertFalse(rs == v.convertPrecision(10, true).getObject()); - assertEquals(Integer.MAX_VALUE, v.convertPrecision(10, true).getPrecision()); - - v = ValueString.get(spaces); - assertEquals(100, v.getPrecision()); - assertEquals(10, v.convertPrecision(10, false).getPrecision()); - assertEquals(" ", v.convertPrecision(10, false).getString()); - assertEquals(" ", v.convertPrecision(10, true).getString()); + v = ValueVarbinary.get(spaces.getBytes()); + typeInfo = TypeInfo.getTypeInfo(Value.VARBINARY, 10L, 0, null); + assertEquals(100, v.getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getBytes().length); + assertEquals(32, v.castTo(typeInfo, null).getBytes()[9]); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); - } + v = ValueClob.createSmall(spaces.getBytes(), 100); + typeInfo = TypeInfo.getTypeInfo(Value.CLOB, 10L, 0, null); + assertEquals(100, v.getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getString().length()); + assertEquals(" ", v.castTo(typeInfo, null).getString()); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); + + v = ValueBlob.createSmall(spaces.getBytes()); + typeInfo = TypeInfo.getTypeInfo(Value.BLOB, 10L, 0, null); + assertEquals(100, v.getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getBytes().length); + assertEquals(32, v.castTo(typeInfo, null).getBytes()[9]); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); + + v = ValueVarchar.get(spaces); + typeInfo = TypeInfo.getTypeInfo(Value.VARCHAR, 10L, 0, null); + assertEquals(100, v.getType().getPrecision()); + assertEquals(10, v.castTo(typeInfo, null).getType().getPrecision()); + assertEquals(" ", v.castTo(typeInfo, null).getString()); + assertEquals(" ", v.castTo(typeInfo, null).getString()); - private void testValueResultSet() throws SQLException { - SimpleResultSet rs = new SimpleResultSet(); - rs.setAutoClose(false); - rs.addColumn("ID", Types.INTEGER, 0, 0); - rs.addColumn("NAME", Types.VARCHAR, 255, 0); - rs.addRow(1, "Hello"); - rs.addRow(2, "World"); - rs.addRow(3, "Peace"); - - ValueResultSet v; - v = ValueResultSet.get(rs); - assertTrue(rs == v.getObject()); - - v = ValueResultSet.getCopy(rs, 2); - assertEquals(0, v.hashCode()); - assertEquals(Integer.MAX_VALUE, v.getDisplaySize()); - assertEquals(Integer.MAX_VALUE, v.getPrecision()); - assertEquals(0, v.getScale()); - assertEquals("", v.getSQL()); - assertEquals(Value.RESULT_SET, v.getType()); - assertEquals("((1, Hello), (2, World))", v.getString()); - rs.beforeFirst(); - ValueResultSet v2 = ValueResultSet.getCopy(rs, 2); - assertTrue(v.equals(v)); - assertFalse(v.equals(v2)); - rs.beforeFirst(); - - ResultSet rs2 = v.getResultSet(); - rs2.next(); - rs.next(); - assertEquals(rs.getInt(1), rs2.getInt(1)); - assertEquals(rs.getString(2), rs2.getString(2)); - rs2.next(); - rs.next(); - assertEquals(rs.getInt(1), rs2.getInt(1)); - assertEquals(rs.getString(2), rs2.getString(2)); - assertFalse(rs2.next()); - assertTrue(rs.next()); } private void testDataType() { - testDataType(Value.NULL, null); - testDataType(Value.NULL, Void.class); - testDataType(Value.NULL, void.class); - testDataType(Value.ARRAY, String[].class); - testDataType(Value.STRING, String.class); - testDataType(Value.INT, Integer.class); - testDataType(Value.LONG, Long.class); - testDataType(Value.BOOLEAN, Boolean.class); - testDataType(Value.DOUBLE, Double.class); - testDataType(Value.BYTE, Byte.class); - testDataType(Value.SHORT, Short.class); - testDataType(Value.FLOAT, Float.class); - testDataType(Value.BYTES, byte[].class); - testDataType(Value.UUID, UUID.class); - testDataType(Value.NULL, Void.class); - testDataType(Value.DECIMAL, BigDecimal.class); - testDataType(Value.RESULT_SET, ResultSet.class); - testDataType(Value.BLOB, Value.ValueBlob.class); - testDataType(Value.CLOB, Value.ValueClob.class); - testDataType(Value.DATE, Date.class); - testDataType(Value.TIME, Time.class); - testDataType(Value.TIMESTAMP, Timestamp.class); - testDataType(Value.TIMESTAMP, java.util.Date.class); - testDataType(Value.CLOB, java.io.Reader.class); - testDataType(Value.CLOB, java.sql.Clob.class); - testDataType(Value.BLOB, java.io.InputStream.class); - testDataType(Value.BLOB, java.sql.Blob.class); - testDataType(Value.ARRAY, Object[].class); - testDataType(Value.JAVA_OBJECT, StringBuffer.class); + testDataType(TypeInfo.TYPE_NULL, null); + testDataType(TypeInfo.TYPE_NULL, Void.class); + testDataType(TypeInfo.TYPE_NULL, void.class); + testDataType(TypeInfo.getTypeInfo(Value.ARRAY, Integer.MAX_VALUE, 0, TypeInfo.TYPE_VARCHAR), String[].class); + testDataType(TypeInfo.TYPE_VARCHAR, String.class); + testDataType(TypeInfo.TYPE_INTEGER, Integer.class); + testDataType(TypeInfo.TYPE_BIGINT, Long.class); + testDataType(TypeInfo.TYPE_BOOLEAN, Boolean.class); + testDataType(TypeInfo.TYPE_DOUBLE, Double.class); + testDataType(TypeInfo.TYPE_TINYINT, Byte.class); + testDataType(TypeInfo.TYPE_SMALLINT, Short.class); + testDataType(TypeInfo.TYPE_REAL, Float.class); + testDataType(TypeInfo.TYPE_VARBINARY, byte[].class); + testDataType(TypeInfo.TYPE_UUID, UUID.class); + testDataType(TypeInfo.TYPE_NULL, Void.class); + testDataType(TypeInfo.TYPE_NUMERIC_FLOATING_POINT, BigDecimal.class); + testDataType(TypeInfo.TYPE_DATE, Date.class); + testDataType(TypeInfo.TYPE_TIME, Time.class); + testDataType(TypeInfo.TYPE_TIMESTAMP, Timestamp.class); + testDataType(TypeInfo.TYPE_TIMESTAMP, java.util.Date.class); + testDataType(TypeInfo.TYPE_CLOB, java.io.Reader.class); + testDataType(TypeInfo.TYPE_CLOB, java.sql.Clob.class); + testDataType(TypeInfo.TYPE_BLOB, java.io.InputStream.class); + testDataType(TypeInfo.TYPE_BLOB, java.sql.Blob.class); + testDataType(TypeInfo.getTypeInfo(Value.ARRAY, Integer.MAX_VALUE, 0, TypeInfo.TYPE_JAVA_OBJECT), + Object[].class); + testDataType(TypeInfo.TYPE_JAVA_OBJECT, StringBuffer.class); } - private void testDataType(int type, Class clazz) { - assertEquals(type, DataType.getTypeFromClass(clazz)); + private void testDataType(TypeInfo type, Class clazz) { + assertEquals(type, ValueToObjectConverter2.classToType(clazz)); } private void testDouble(boolean useFloat) { @@ -202,22 +201,74 @@ private void testDouble(boolean useFloat) { Double.POSITIVE_INFINITY, Double.NaN }; + int[] signum = { + -1, + -1, + 0, + 1, + 1, + 0 + }; Value[] values = new Value[d.length]; for (int i = 0; i < d.length; i++) { - Value v = useFloat ? (Value) ValueFloat.get((float) d[i]) + Value v = useFloat ? (Value) ValueReal.get((float) d[i]) : (Value) ValueDouble.get(d[i]); values[i] = v; - assertTrue(values[i].compareTypeSave(values[i], null) == 0); + assertTrue(values[i].compareTypeSafe(values[i], null, null) == 0); assertTrue(v.equals(v)); - assertEquals(i < 2 ? -1 : i > 2 ? 1 : 0, v.getSignum()); + assertEquals(signum[i], v.getSignum()); } for (int i = 0; i < d.length - 1; i++) { - assertTrue(values[i].compareTypeSave(values[i+1], null) < 0); - assertTrue(values[i + 1].compareTypeSave(values[i], null) > 0); - assertTrue(!values[i].equals(values[i+1])); + assertTrue(values[i].compareTypeSafe(values[i+1], null, null) < 0); + assertTrue(values[i + 1].compareTypeSafe(values[i], null, null) > 0); + assertFalse(values[i].equals(values[i+1])); } } + private void testTimestamp() { + ValueTimestamp valueTs = ValueTimestamp.parse("2000-01-15 10:20:30.333222111", null); + Timestamp ts = Timestamp.valueOf("2000-01-15 10:20:30.333222111"); + assertEquals(ts.toString(), valueTs.getString()); + assertEquals(ts, LegacyDateTimeUtils.toTimestamp(null, null, valueTs)); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone("Europe/Berlin")); + c.set(2018, 02, 25, 1, 59, 00); + c.set(Calendar.MILLISECOND, 123); + long expected = c.getTimeInMillis(); + ts = LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("2018-03-25 01:59:00.123123123 Europe/Berlin", null)); + assertEquals(expected, ts.getTime()); + assertEquals(123123123, ts.getNanos()); + ts = LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("2018-03-25 01:59:00.123123123+01", null)); + assertEquals(expected, ts.getTime()); + assertEquals(123123123, ts.getNanos()); + expected += 60000; // 1 minute + ts = LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("2018-03-25 03:00:00.123123123 Europe/Berlin", null)); + assertEquals(expected, ts.getTime()); + assertEquals(123123123, ts.getNanos()); + ts = LegacyDateTimeUtils.toTimestamp(null, null, + ValueTimestamp.parse("2018-03-25 03:00:00.123123123+02", null)); + assertEquals(expected, ts.getTime()); + assertEquals(123123123, ts.getNanos()); + } + + private void testArray() { + ValueArray src = ValueArray.get( + new Value[] {ValueVarchar.get("1"), ValueVarchar.get("22"), ValueVarchar.get("333")}, null); + assertEquals(3, src.getType().getPrecision()); + assertSame(src, src.castTo(TypeInfo.getTypeInfo(Value.ARRAY, 3L, 0, TypeInfo.TYPE_VARCHAR), null)); + ValueArray exp = ValueArray.get( + new Value[] {ValueVarchar.get("1"), ValueVarchar.get("22")}, null); + Value got = src.castTo(TypeInfo.getTypeInfo(Value.ARRAY, 2L, 0, TypeInfo.TYPE_VARCHAR), null); + assertEquals(exp, got); + assertEquals(Value.VARCHAR, ((ValueArray) got).getComponentType().getValueType()); + exp = ValueArray.get(TypeInfo.TYPE_VARCHAR, new Value[0], null); + got = src.castTo(TypeInfo.getTypeInfo(Value.ARRAY, 0L, 0, TypeInfo.TYPE_VARCHAR), null); + assertEquals(exp, got); + assertEquals(Value.VARCHAR, ((ValueArray) got).getComponentType().getValueType()); + } + private void testUUID() { long maxHigh = 0, maxLow = 0, minHigh = -1L, minLow = -1L; for (int i = 0; i < 100; i++) { @@ -231,40 +282,271 @@ private void testUUID() { assertEquals("ffffffff-ffff-4fff-bfff-ffffffffffff", max.getString()); ValueUuid min = ValueUuid.get(minHigh, minLow); assertEquals("00000000-0000-4000-8000-000000000000", min.getString()); + + // Test conversion from ValueJavaObject to ValueUuid + String uuidStr = "12345678-1234-4321-8765-123456789012"; + + UUID origUUID = UUID.fromString(uuidStr); + ValueJavaObject valObj = ValueJavaObject.getNoCopy(JdbcUtils.serialize(origUUID, null)); + ValueUuid valUUID = valObj.convertToUuid(); + assertEquals(uuidStr, valUUID.getString()); + assertEquals(origUUID, valUUID.getUuid()); + + ValueJavaObject voString = ValueJavaObject.getNoCopy(JdbcUtils.serialize( + new String("This is not a ValueUuid object"), null)); + assertThrows(ErrorCode.DESERIALIZATION_FAILED_1, () -> voString.convertToUuid()); } private void testModulusDouble() { final ValueDouble vd1 = ValueDouble.get(12); - new AssertThrows(ErrorCode.DIVISION_BY_ZERO_1) { @Override - public void test() { - vd1.modulus(ValueDouble.get(0)); - }}; + assertThrows(ErrorCode.DIVISION_BY_ZERO_1, () -> vd1.modulus(ValueDouble.ZERO)); ValueDouble vd2 = ValueDouble.get(10); ValueDouble vd3 = vd1.modulus(vd2); assertEquals(2, vd3.getDouble()); } private void testModulusDecimal() { - final ValueDecimal vd1 = ValueDecimal.get(new BigDecimal(12)); - new AssertThrows(ErrorCode.DIVISION_BY_ZERO_1) { @Override - public void test() { - vd1.modulus(ValueDecimal.get(new BigDecimal(0))); - }}; - ValueDecimal vd2 = ValueDecimal.get(new BigDecimal(10)); - ValueDecimal vd3 = vd1.modulus(vd2); + final ValueNumeric vd1 = ValueNumeric.get(new BigDecimal(12)); + assertThrows(ErrorCode.DIVISION_BY_ZERO_1, () -> vd1.modulus(ValueNumeric.ZERO)); + ValueNumeric vd2 = ValueNumeric.get(new BigDecimal(10)); + Value vd3 = vd1.modulus(vd2); assertEquals(2, vd3.getDouble()); } private void testModulusOperator() throws SQLException { - Connection conn = getConnection("modulus"); - try { + try (Connection conn = getConnection("modulus")) { ResultSet rs = conn.createStatement().executeQuery("CALL 12 % 10"); rs.next(); assertEquals(2, rs.getInt(1)); } finally { - conn.close(); deleteDb("modulus"); } } + private void testLobComparison() throws SQLException { + assertEquals(0, testLobComparisonImpl(null, Value.BLOB, 0, 0, 0, 0)); + assertEquals(0, testLobComparisonImpl(null, Value.CLOB, 0, 0, 0, 0)); + assertEquals(-1, testLobComparisonImpl(null, Value.BLOB, 1, 1, 200, 210)); + assertEquals(-1, testLobComparisonImpl(null, Value.CLOB, 1, 1, 'a', 'b')); + assertEquals(1, testLobComparisonImpl(null, Value.BLOB, 512, 512, 210, 200)); + assertEquals(1, testLobComparisonImpl(null, Value.CLOB, 512, 512, 'B', 'A')); + try (Connection c = DriverManager.getConnection("jdbc:h2:mem:testValue")) { + Database dh = ((SessionLocal) ((JdbcConnection) c).getSession()).getDatabase(); + assertEquals(1, testLobComparisonImpl(dh, Value.BLOB, 1_024, 1_024, 210, 200)); + assertEquals(1, testLobComparisonImpl(dh, Value.CLOB, 1_024, 1_024, 'B', 'A')); + assertEquals(-1, testLobComparisonImpl(dh, Value.BLOB, 10_000, 10_000, 200, 210)); + assertEquals(-1, testLobComparisonImpl(dh, Value.CLOB, 10_000, 10_000, 'a', 'b')); + assertEquals(0, testLobComparisonImpl(dh, Value.BLOB, 10_000, 10_000, 0, 0)); + assertEquals(0, testLobComparisonImpl(dh, Value.CLOB, 10_000, 10_000, 0, 0)); + assertEquals(-1, testLobComparisonImpl(dh, Value.BLOB, 1_000, 10_000, 0, 0)); + assertEquals(-1, testLobComparisonImpl(dh, Value.CLOB, 1_000, 10_000, 0, 0)); + assertEquals(1, testLobComparisonImpl(dh, Value.BLOB, 10_000, 1_000, 0, 0)); + assertEquals(1, testLobComparisonImpl(dh, Value.CLOB, 10_000, 1_000, 0, 0)); + } + } + + private static int testLobComparisonImpl(DataHandler dh, int type, int size1, int size2, int suffix1, + int suffix2) { + byte[] bytes1 = new byte[size1]; + byte[] bytes2 = new byte[size2]; + if (size1 > 0) { + bytes1[size1 - 1] = (byte) suffix1; + } + if (size2 > 0) { + bytes2[size2 - 1] = (byte) suffix2; + } + Value lob1 = createLob(dh, type, bytes1); + Value lob2 = createLob(dh, type, bytes2); + return lob1.compareTypeSafe(lob2, null, null); + } + + private static Value createLob(DataHandler dh, int type, byte[] bytes) { + if (dh == null) { + return type == Value.BLOB ? ValueBlob.createSmall(bytes) : ValueClob.createSmall(bytes); + } + ByteArrayInputStream in = new ByteArrayInputStream(bytes); + if (type == Value.BLOB) { + return dh.getLobStorage().createBlob(in, -1); + } else { + return dh.getLobStorage().createClob(new InputStreamReader(in, StandardCharsets.UTF_8), -1); + } + } + + private void testTypeInfo() { + testTypeInfoCheck(Value.UNKNOWN, -1, -1, -1, TypeInfo.TYPE_UNKNOWN); + assertThrows(ErrorCode.UNKNOWN_DATA_TYPE_1, () -> TypeInfo.getTypeInfo(Value.UNKNOWN)); + + testTypeInfoCheck(Value.NULL, 1, 0, 4, TypeInfo.TYPE_NULL, TypeInfo.getTypeInfo(Value.NULL)); + + testTypeInfoCheck(Value.BOOLEAN, 1, 0, 5, TypeInfo.TYPE_BOOLEAN, TypeInfo.getTypeInfo(Value.BOOLEAN)); + + testTypeInfoCheck(Value.TINYINT, 8, 0, 4, TypeInfo.TYPE_TINYINT, TypeInfo.getTypeInfo(Value.TINYINT)); + testTypeInfoCheck(Value.SMALLINT, 16, 0, 6, TypeInfo.TYPE_SMALLINT, TypeInfo.getTypeInfo(Value.SMALLINT)); + testTypeInfoCheck(Value.INTEGER, 32, 0, 11, TypeInfo.TYPE_INTEGER, TypeInfo.getTypeInfo(Value.INTEGER)); + testTypeInfoCheck(Value.BIGINT, 64, 0, 20, TypeInfo.TYPE_BIGINT, TypeInfo.getTypeInfo(Value.BIGINT)); + + testTypeInfoCheck(Value.REAL, 24, 0, 15, TypeInfo.TYPE_REAL, TypeInfo.getTypeInfo(Value.REAL)); + testTypeInfoCheck(Value.DOUBLE, 53, 0, 24, TypeInfo.TYPE_DOUBLE, TypeInfo.getTypeInfo(Value.DOUBLE)); + testTypeInfoCheck(Value.NUMERIC, MAX_NUMERIC_PRECISION, MAX_NUMERIC_PRECISION / 2, MAX_NUMERIC_PRECISION + 2, + TypeInfo.TYPE_NUMERIC_FLOATING_POINT); + + testTypeInfoCheck(Value.TIME, 18, 9, 18, TypeInfo.TYPE_TIME, TypeInfo.getTypeInfo(Value.TIME)); + for (int s = 0; s <= 9; s++) { + int d = s > 0 ? s + 9 : 8; + testTypeInfoCheck(Value.TIME, d, s, d, TypeInfo.getTypeInfo(Value.TIME, 0, s, null)); + } + testTypeInfoCheck(Value.DATE, 10, 0, 10, TypeInfo.TYPE_DATE, TypeInfo.getTypeInfo(Value.DATE)); + testTypeInfoCheck(Value.TIMESTAMP, 29, 9, 29, TypeInfo.TYPE_TIMESTAMP, TypeInfo.getTypeInfo(Value.TIMESTAMP)); + for (int s = 0; s <= 9; s++) { + int d = s > 0 ? s + 20 : 19; + testTypeInfoCheck(Value.TIMESTAMP, d, s, d, TypeInfo.getTypeInfo(Value.TIMESTAMP, 0, s, null)); + } + testTypeInfoCheck(Value.TIMESTAMP_TZ, 35, 9, 35, TypeInfo.TYPE_TIMESTAMP_TZ, + TypeInfo.getTypeInfo(Value.TIMESTAMP_TZ)); + for (int s = 0; s <= 9; s++) { + int d = s > 0 ? s + 26 : 25; + testTypeInfoCheck(Value.TIMESTAMP_TZ, d, s, d, TypeInfo.getTypeInfo(Value.TIMESTAMP_TZ, 0, s, null)); + } + + testTypeInfoCheck(Value.BINARY, 1, 0, 2, TypeInfo.getTypeInfo(Value.BINARY)); + testTypeInfoCheck(Value.VARBINARY, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH * 2, + TypeInfo.getTypeInfo(Value.VARBINARY)); + testTypeInfoCheck(Value.BLOB, Long.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.getTypeInfo(Value.BLOB)); + testTypeInfoCheck(Value.CLOB, Long.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.getTypeInfo(Value.CLOB)); + + testTypeInfoCheck(Value.VARCHAR, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH, TypeInfo.TYPE_VARCHAR, + TypeInfo.getTypeInfo(Value.VARCHAR)); + testTypeInfoCheck(Value.CHAR, 1, 0, 1, TypeInfo.getTypeInfo(Value.CHAR)); + testTypeInfoCheck(Value.VARCHAR_IGNORECASE, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH, + TypeInfo.getTypeInfo(Value.VARCHAR_IGNORECASE)); + + testTypeInfoCheck(Value.ARRAY, MAX_ARRAY_CARDINALITY, 0, Integer.MAX_VALUE, TypeInfo.TYPE_ARRAY_UNKNOWN, + TypeInfo.getTypeInfo(Value.ARRAY)); + testTypeInfoCheck(Value.ROW, Integer.MAX_VALUE, 0, Integer.MAX_VALUE, TypeInfo.TYPE_ROW_EMPTY, + TypeInfo.getTypeInfo(Value.ROW)); + + testTypeInfoCheck(Value.JAVA_OBJECT, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH * 2, TypeInfo.TYPE_JAVA_OBJECT, + TypeInfo.getTypeInfo(Value.JAVA_OBJECT)); + testTypeInfoCheck(Value.UUID, 16, 0, 36, TypeInfo.TYPE_UUID, TypeInfo.getTypeInfo(Value.UUID)); + testTypeInfoCheck(Value.GEOMETRY, MAX_STRING_LENGTH, 0, Integer.MAX_VALUE, TypeInfo.TYPE_GEOMETRY, + TypeInfo.getTypeInfo(Value.GEOMETRY)); + testTypeInfoCheck(Value.ENUM, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH, TypeInfo.TYPE_ENUM_UNDEFINED, + TypeInfo.getTypeInfo(Value.ENUM)); + + testTypeInfoInterval1(Value.INTERVAL_YEAR); + testTypeInfoInterval1(Value.INTERVAL_MONTH); + testTypeInfoInterval1(Value.INTERVAL_DAY); + testTypeInfoInterval1(Value.INTERVAL_HOUR); + testTypeInfoInterval1(Value.INTERVAL_MINUTE); + testTypeInfoInterval2(Value.INTERVAL_SECOND); + testTypeInfoInterval1(Value.INTERVAL_YEAR_TO_MONTH); + testTypeInfoInterval1(Value.INTERVAL_DAY_TO_HOUR); + testTypeInfoInterval1(Value.INTERVAL_DAY_TO_MINUTE); + testTypeInfoInterval2(Value.INTERVAL_DAY_TO_SECOND); + testTypeInfoInterval1(Value.INTERVAL_HOUR_TO_MINUTE); + testTypeInfoInterval2(Value.INTERVAL_HOUR_TO_SECOND); + testTypeInfoInterval2(Value.INTERVAL_MINUTE_TO_SECOND); + + testTypeInfoCheck(Value.JSON, MAX_STRING_LENGTH, 0, MAX_STRING_LENGTH, TypeInfo.TYPE_JSON, + TypeInfo.getTypeInfo(Value.JSON)); + } + + private void testTypeInfoInterval1(int type) { + testTypeInfoCheck(type, 18, 0, ValueInterval.getDisplaySize(type, 18, 0), TypeInfo.getTypeInfo(type)); + for (int p = 1; p <= 18; p++) { + testTypeInfoCheck(type, p, 0, ValueInterval.getDisplaySize(type, p, 0), + TypeInfo.getTypeInfo(type, p, 0, null)); + } + } + + private void testTypeInfoInterval2(int type) { + testTypeInfoCheck(type, 18, 9, ValueInterval.getDisplaySize(type, 18, 9), TypeInfo.getTypeInfo(type)); + for (int p = 1; p <= 18; p++) { + for (int s = 0; s <= 9; s++) { + testTypeInfoCheck(type, p, s, ValueInterval.getDisplaySize(type, p, s), + TypeInfo.getTypeInfo(type, p, s, null)); + } + } + } + + private void testTypeInfoCheck(int valueType, long precision, int scale, int displaySize, TypeInfo... typeInfos) { + for (TypeInfo typeInfo : typeInfos) { + testTypeInfoCheck(valueType, precision, scale, displaySize, typeInfo); + } + } + + private void testTypeInfoCheck(int valueType, long precision, int scale, int displaySize, TypeInfo typeInfo) { + assertEquals(valueType, typeInfo.getValueType()); + assertEquals(precision, typeInfo.getPrecision()); + assertEquals(scale, typeInfo.getScale()); + assertEquals(displaySize, typeInfo.getDisplaySize()); + } + + private void testH2Type() { + assertEquals(Value.CHAR, (int) H2Type.CHAR.getVendorTypeNumber()); + assertEquals(Value.VARCHAR, (int) H2Type.VARCHAR.getVendorTypeNumber()); + assertEquals(Value.CLOB, (int) H2Type.CLOB.getVendorTypeNumber()); + assertEquals(Value.VARCHAR_IGNORECASE, (int) H2Type.VARCHAR_IGNORECASE.getVendorTypeNumber()); + assertEquals(Value.BINARY, (int) H2Type.BINARY.getVendorTypeNumber()); + assertEquals(Value.VARBINARY, (int) H2Type.VARBINARY.getVendorTypeNumber()); + assertEquals(Value.BLOB, (int) H2Type.BLOB.getVendorTypeNumber()); + assertEquals(Value.BOOLEAN, (int) H2Type.BOOLEAN.getVendorTypeNumber()); + assertEquals(Value.TINYINT, (int) H2Type.TINYINT.getVendorTypeNumber()); + assertEquals(Value.SMALLINT, (int) H2Type.SMALLINT.getVendorTypeNumber()); + assertEquals(Value.INTEGER, (int) H2Type.INTEGER.getVendorTypeNumber()); + assertEquals(Value.BIGINT, (int) H2Type.BIGINT.getVendorTypeNumber()); + assertEquals(Value.NUMERIC, (int) H2Type.NUMERIC.getVendorTypeNumber()); + assertEquals(Value.REAL, (int) H2Type.REAL.getVendorTypeNumber()); + assertEquals(Value.DOUBLE, (int) H2Type.DOUBLE_PRECISION.getVendorTypeNumber()); + assertEquals(Value.DECFLOAT, (int) H2Type.DECFLOAT.getVendorTypeNumber()); + assertEquals(Value.DATE, (int) H2Type.DATE.getVendorTypeNumber()); + assertEquals(Value.TIME, (int) H2Type.TIME.getVendorTypeNumber()); + assertEquals(Value.TIME_TZ, (int) H2Type.TIME_WITH_TIME_ZONE.getVendorTypeNumber()); + assertEquals(Value.TIMESTAMP, (int) H2Type.TIMESTAMP.getVendorTypeNumber()); + assertEquals(Value.TIMESTAMP_TZ, (int) H2Type.TIMESTAMP_WITH_TIME_ZONE.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_YEAR, (int) H2Type.INTERVAL_YEAR.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_MONTH, (int) H2Type.INTERVAL_MONTH.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_DAY, (int) H2Type.INTERVAL_DAY.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_HOUR, (int) H2Type.INTERVAL_HOUR.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_MINUTE, (int) H2Type.INTERVAL_MINUTE.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_SECOND, (int) H2Type.INTERVAL_SECOND.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_YEAR_TO_MONTH, (int) H2Type.INTERVAL_YEAR_TO_MONTH.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_DAY_TO_HOUR, (int) H2Type.INTERVAL_DAY_TO_HOUR.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_DAY_TO_MINUTE, (int) H2Type.INTERVAL_DAY_TO_MINUTE.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_DAY_TO_SECOND, (int) H2Type.INTERVAL_DAY_TO_SECOND.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_HOUR_TO_MINUTE, (int) H2Type.INTERVAL_HOUR_TO_MINUTE.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_HOUR_TO_SECOND, (int) H2Type.INTERVAL_HOUR_TO_SECOND.getVendorTypeNumber()); + assertEquals(Value.INTERVAL_MINUTE_TO_SECOND, (int) H2Type.INTERVAL_MINUTE_TO_SECOND.getVendorTypeNumber()); + assertEquals(Value.JAVA_OBJECT, (int) H2Type.JAVA_OBJECT.getVendorTypeNumber()); + assertEquals(Value.ENUM, (int) H2Type.ENUM.getVendorTypeNumber()); + assertEquals(Value.GEOMETRY, (int) H2Type.GEOMETRY.getVendorTypeNumber()); + assertEquals(Value.JSON, (int) H2Type.JSON.getVendorTypeNumber()); + assertEquals(Value.UUID, (int) H2Type.UUID.getVendorTypeNumber()); + assertEquals(Value.ARRAY, (int) H2Type.array(H2Type.VARCHAR).getVendorTypeNumber()); + assertEquals(Value.ROW, (int) H2Type.row(H2Type.VARCHAR).getVendorTypeNumber()); + } + + private void testHigherType() { + testHigherTypeNumeric(15L, 6, 10L, 1, 5L, 6); + testHigherTypeNumeric(15L, 6, 5L, 6, 10L, 1); + TypeInfo intArray10 = TypeInfo.getTypeInfo(Value.ARRAY, 10, 0, TypeInfo.TYPE_INTEGER); + TypeInfo bigintArray1 = TypeInfo.getTypeInfo(Value.ARRAY, 1, 0, TypeInfo.TYPE_BIGINT); + TypeInfo bigintArray10 = TypeInfo.getTypeInfo(Value.ARRAY, 10, 0, TypeInfo.TYPE_BIGINT); + assertEquals(bigintArray10, TypeInfo.getHigherType(intArray10, bigintArray1)); + TypeInfo intArray10Array1 = TypeInfo.getTypeInfo(Value.ARRAY, 1, 0, intArray10); + TypeInfo bigintArray1Array10 = TypeInfo.getTypeInfo(Value.ARRAY, 10, 0, bigintArray1); + TypeInfo bigintArray10Array10 = TypeInfo.getTypeInfo(Value.ARRAY, 10, 0, bigintArray10); + assertEquals(bigintArray10Array10, TypeInfo.getHigherType(intArray10Array1, bigintArray1Array10)); + assertEquals(bigintArray10Array10, TypeInfo.getHigherType(intArray10, bigintArray1Array10)); + TypeInfo bigintArray10Array1 = TypeInfo.getTypeInfo(Value.ARRAY, 1, 0, bigintArray10); + assertEquals(bigintArray10Array1, TypeInfo.getHigherType(intArray10Array1, bigintArray1)); + } + + private void testHigherTypeNumeric(long expectedPrecision, int expectedScale, long precision1, int scale1, + long precision2, int scale2) { + assertEquals(TypeInfo.getTypeInfo(Value.NUMERIC, expectedPrecision, expectedScale, null), + TypeInfo.getHigherType(TypeInfo.getTypeInfo(Value.NUMERIC, precision1, scale1, null), + TypeInfo.getTypeInfo(Value.NUMERIC, precision2, scale2, null))); + } + } diff --git a/h2/src/test/org/h2/test/unit/TestValueHashMap.java b/h2/src/test/org/h2/test/unit/TestValueHashMap.java deleted file mode 100644 index 5c826d6c02..0000000000 --- a/h2/src/test/org/h2/test/unit/TestValueHashMap.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.unit; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.Random; - -import org.h2.api.JavaObjectSerializer; -import org.h2.store.DataHandler; -import org.h2.store.FileStore; -import org.h2.store.LobStorageBackend; -import org.h2.test.TestBase; -import org.h2.util.New; -import org.h2.util.SmallLRUCache; -import org.h2.util.TempFileDeleter; -import org.h2.util.ValueHashMap; -import org.h2.value.CompareMode; -import org.h2.value.Value; -import org.h2.value.ValueDouble; -import org.h2.value.ValueInt; - -/** - * Tests the value hash map. - */ -public class TestValueHashMap extends TestBase implements DataHandler { - - CompareMode compareMode = CompareMode.getInstance(null, 0); - - /** - * Run just this test. - * - * @param a ignored - */ - public static void main(String... a) throws Exception { - TestBase.createCaller().init().test(); - } - - @Override - public void test() { - testNotANumber(); - testRandomized(); - } - - private void testNotANumber() { - ValueHashMap map = ValueHashMap.newInstance(); - for (int i = 1; i < 100; i++) { - double d = Double.longBitsToDouble(0x7ff0000000000000L | i); - ValueDouble v = ValueDouble.get(d); - map.put(v, null); - assertEquals(1, map.size()); - } - } - - private void testRandomized() { - ValueHashMap map = ValueHashMap.newInstance(); - HashMap hash = New.hashMap(); - Random random = new Random(1); - Comparator vc = new Comparator() { - @Override - public int compare(Value v1, Value v2) { - return v1.compareTo(v2, compareMode); - } - }; - for (int i = 0; i < 10000; i++) { - int op = random.nextInt(10); - Value key = ValueInt.get(random.nextInt(100)); - Value value = ValueInt.get(random.nextInt(100)); - switch (op) { - case 0: - map.put(key, value); - hash.put(key, value); - break; - case 1: - map.remove(key); - hash.remove(key); - break; - case 2: - Value v1 = map.get(key); - Value v2 = hash.get(key); - assertTrue(v1 == null ? v2 == null : v1.equals(v2)); - break; - case 3: { - ArrayList a1 = map.keys(); - ArrayList a2 = New.arrayList(hash.keySet()); - assertEquals(a1.size(), a2.size()); - Collections.sort(a1, vc); - Collections.sort(a2, vc); - for (int j = 0; j < a1.size(); j++) { - assertTrue(a1.get(j).equals(a2.get(j))); - } - break; - } - case 4: - ArrayList a1 = map.values(); - ArrayList a2 = New.arrayList(hash.values()); - assertEquals(a1.size(), a2.size()); - Collections.sort(a1, vc); - Collections.sort(a2, vc); - for (int j = 0; j < a1.size(); j++) { - assertTrue(a1.get(j).equals(a2.get(j))); - } - break; - default: - } - } - } - - @Override - public String getDatabasePath() { - return null; - } - - @Override - public FileStore openFile(String name, String mode, boolean mustExist) { - return null; - } - - @Override - public void checkPowerOff() { - // nothing to do - } - - @Override - public void checkWritingAllowed() { - // nothing to do - } - - @Override - public int getMaxLengthInplaceLob() { - return 0; - } - - @Override - public String getLobCompressionAlgorithm(int type) { - return null; - } - - @Override - public Object getLobSyncObject() { - return this; - } - - @Override - public SmallLRUCache getLobFileListCache() { - return null; - } - - @Override - public TempFileDeleter getTempFileDeleter() { - return TempFileDeleter.getInstance(); - } - - @Override - public LobStorageBackend getLobStorage() { - return null; - } - - @Override - public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, - int off, int length) { - return -1; - } - - @Override - public JavaObjectSerializer getJavaObjectSerializer() { - return null; - } -} diff --git a/h2/src/test/org/h2/test/unit/TestValueMemory.java b/h2/src/test/org/h2/test/unit/TestValueMemory.java index 46bcb7c65b..96ac632472 100644 --- a/h2/src/test/org/h2/test/unit/TestValueMemory.java +++ b/h2/src/test/org/h2/test/unit/TestValueMemory.java @@ -1,52 +1,63 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.unit; import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; import java.io.StringReader; import java.math.BigDecimal; import java.sql.SQLException; import java.util.ArrayList; import java.util.IdentityHashMap; import java.util.Random; - -import org.h2.api.JavaObjectSerializer; +import org.h2.api.IntervalQualifier; import org.h2.engine.Constants; import org.h2.store.DataHandler; import org.h2.store.FileStore; -import org.h2.store.LobStorageFrontend; +import org.h2.store.LobStorageInterface; import org.h2.test.TestBase; import org.h2.test.utils.MemoryFootprint; -import org.h2.tools.SimpleResultSet; +import org.h2.util.DateTimeUtils; import org.h2.util.SmallLRUCache; import org.h2.util.TempFileDeleter; import org.h2.util.Utils; -import org.h2.value.DataType; +import org.h2.value.CompareMode; import org.h2.value.Value; import org.h2.value.ValueArray; +import org.h2.value.ValueBigint; +import org.h2.value.ValueBinary; +import org.h2.value.ValueBlob; import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; +import org.h2.value.ValueChar; +import org.h2.value.ValueClob; import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; +import org.h2.value.ValueDecfloat; import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; import org.h2.value.ValueGeometry; -import org.h2.value.ValueInt; +import org.h2.value.ValueInteger; +import org.h2.value.ValueInterval; import org.h2.value.ValueJavaObject; -import org.h2.value.ValueLong; +import org.h2.value.ValueJson; +import org.h2.value.ValueLob; import org.h2.value.ValueNull; -import org.h2.value.ValueResultSet; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueStringFixed; -import org.h2.value.ValueStringIgnoreCase; +import org.h2.value.ValueNumeric; +import org.h2.value.ValueReal; +import org.h2.value.ValueRow; +import org.h2.value.ValueSmallint; import org.h2.value.ValueTime; +import org.h2.value.ValueTimeTimeZone; import org.h2.value.ValueTimestamp; +import org.h2.value.ValueTimestampTimeZone; +import org.h2.value.ValueTinyint; import org.h2.value.ValueUuid; +import org.h2.value.ValueVarbinary; +import org.h2.value.ValueVarchar; +import org.h2.value.ValueVarcharIgnoreCase; /** * Tests the memory consumption of values. Values can estimate how much memory @@ -54,10 +65,14 @@ */ public class TestValueMemory extends TestBase implements DataHandler { + private static final long MIN_ABSOLUTE_DAY = DateTimeUtils.absoluteDayFromDateValue(DateTimeUtils.MIN_DATE_VALUE); + + private static final long MAX_ABSOLUTE_DAY = DateTimeUtils.absoluteDayFromDateValue(DateTimeUtils.MAX_DATE_VALUE); + private final Random random = new Random(1); private final SmallLRUCache lobFileListCache = SmallLRUCache .newInstance(128); - private LobStorageFrontend lobStorage; + private LobStorageTest lobStorage; /** * Run just this test. @@ -68,34 +83,52 @@ public static void main(String... a) throws Exception { // run using -javaagent:ext/h2-1.2.139.jar TestBase test = TestBase.createCaller().init(); test.config.traceTest = true; - test.test(); + test.testFromMain(); } @Override public void test() throws SQLException { testCompare(); for (int i = 0; i < Value.TYPE_COUNT; i++) { + if (i == 23) { + // this used to be "TIMESTAMP UTC", which was a short-lived + // experiment + continue; + } + if (i == Value.ENUM) { + // TODO ENUM + continue; + } Value v = create(i); - String s = "type: " + v.getType() + + String s = "type: " + v.getValueType() + " calculated: " + v.getMemory() + " real: " + MemoryFootprint.getObjectSize(v) + " " + v.getClass().getName() + ": " + v.toString(); trace(s); } for (int i = 0; i < Value.TYPE_COUNT; i++) { + if (i == 23) { + // this used to be "TIMESTAMP UTC", which was a short-lived + // experiment + continue; + } + if (i == Value.ENUM) { + // TODO ENUM + continue; + } Value v = create(i); if (v == ValueNull.INSTANCE && i == Value.GEOMETRY) { // jts not in the classpath, OK continue; } - assertEquals(i, v.getType()); + assertEquals(i, v.getValueType()); testType(i); } } private void testCompare() { - ValueDecimal a = ValueDecimal.get(new BigDecimal("0.0")); - ValueDecimal b = ValueDecimal.get(new BigDecimal("-0.00")); + ValueNumeric a = ValueNumeric.get(new BigDecimal("0.0")); + ValueNumeric b = ValueNumeric.get(new BigDecimal("-0.00")); assertTrue(a.hashCode() != b.hashCode()); assertFalse(a.equals(b)); } @@ -104,7 +137,7 @@ private void testType(int type) throws SQLException { System.gc(); System.gc(); long first = Utils.getMemoryUsed(); - ArrayList list = new ArrayList(); + ArrayList list = new ArrayList<>(); long memory = 0; while (memory < 1000000) { Value v = create(type); @@ -112,7 +145,7 @@ private void testType(int type) throws SQLException { list.add(v); } Object[] array = list.toArray(); - IdentityHashMap map = new IdentityHashMap(); + IdentityHashMap map = new IdentityHashMap<>(); for (Object a : array) { map.put(a, a); } @@ -140,34 +173,41 @@ private Value create(int type) throws SQLException { case Value.NULL: return ValueNull.INSTANCE; case Value.BOOLEAN: - return ValueBoolean.get(false); - case Value.BYTE: - return ValueByte.get((byte) random.nextInt()); - case Value.SHORT: - return ValueShort.get((short) random.nextInt()); - case Value.INT: - return ValueInt.get(random.nextInt()); - case Value.LONG: - return ValueLong.get(random.nextLong()); - case Value.DECIMAL: - return ValueDecimal.get(new BigDecimal(random.nextInt())); + return ValueBoolean.FALSE; + case Value.TINYINT: + return ValueTinyint.get((byte) random.nextInt()); + case Value.SMALLINT: + return ValueSmallint.get((short) random.nextInt()); + case Value.INTEGER: + return ValueInteger.get(random.nextInt()); + case Value.BIGINT: + return ValueBigint.get(random.nextLong()); + case Value.NUMERIC: + return ValueNumeric.get(new BigDecimal(random.nextInt())); // + "12123344563456345634565234523451312312" case Value.DOUBLE: return ValueDouble.get(random.nextDouble()); - case Value.FLOAT: - return ValueFloat.get(random.nextFloat()); + case Value.REAL: + return ValueReal.get(random.nextFloat()); + case Value.DECFLOAT: + return ValueDecfloat.get(new BigDecimal(random.nextInt())); case Value.TIME: - return ValueTime.get(new java.sql.Time(random.nextLong())); + return ValueTime.fromNanos(randomTimeNanos()); + case Value.TIME_TZ: + return ValueTimeTimeZone.fromNanos(randomTimeNanos(), randomZoneOffset()); case Value.DATE: - return ValueDate.get(new java.sql.Date(random.nextLong())); + return ValueDate.fromDateValue(randomDateValue()); case Value.TIMESTAMP: - return ValueTimestamp.fromMillis(random.nextLong()); - case Value.BYTES: - return ValueBytes.get(randomBytes(random.nextInt(1000))); - case Value.STRING: - return ValueString.get(randomString(random.nextInt(100))); - case Value.STRING_IGNORECASE: - return ValueStringIgnoreCase.get(randomString(random.nextInt(100))); + return ValueTimestamp.fromDateValueAndNanos(randomDateValue(), randomTimeNanos()); + case Value.TIMESTAMP_TZ: + return ValueTimestampTimeZone.fromDateValueAndNanos( + randomDateValue(), randomTimeNanos(), randomZoneOffset()); + case Value.VARBINARY: + return ValueVarbinary.get(randomBytes(random.nextInt(1000))); + case Value.VARCHAR: + return ValueVarchar.get(randomString(random.nextInt(100))); + case Value.VARCHAR_IGNORECASE: + return ValueVarcharIgnoreCase.get(randomString(random.nextInt(100))); case Value.BLOB: { int len = (int) Math.abs(random.nextGaussian() * 10); byte[] data = randomBytes(len); @@ -178,33 +218,68 @@ private Value create(int type) throws SQLException { String s = randomString(len); return getLobStorage().createClob(new StringReader(s), len); } - case Value.ARRAY: { - int len = random.nextInt(20); - Value[] list = new Value[len]; - for (int i = 0; i < list.length; i++) { - list[i] = create(Value.STRING); - } - return ValueArray.get(list); - } - case Value.RESULT_SET: - return ValueResultSet.get(new SimpleResultSet()); + case Value.ARRAY: + return ValueArray.get(createArray(), null); + case Value.ROW: + return ValueRow.get(createArray()); case Value.JAVA_OBJECT: - return ValueJavaObject.getNoCopy(null, randomBytes(random.nextInt(100)), this); + return ValueJavaObject.getNoCopy(randomBytes(random.nextInt(100))); case Value.UUID: return ValueUuid.get(random.nextLong(), random.nextLong()); - case Value.STRING_FIXED: - return ValueStringFixed.get(randomString(random.nextInt(100))); + case Value.CHAR: + return ValueChar.get(randomString(random.nextInt(100))); case Value.GEOMETRY: - if (DataType.GEOMETRY_CLASS == null) { - return ValueNull.INSTANCE; - } - return ValueGeometry.get("POINT (" + random.nextInt(100) + " " + - random.nextInt(100) + ")"); + return ValueGeometry.get("POINT (" + random.nextInt(100) + ' ' + random.nextInt(100) + ')'); + case Value.INTERVAL_YEAR: + case Value.INTERVAL_MONTH: + case Value.INTERVAL_DAY: + case Value.INTERVAL_HOUR: + case Value.INTERVAL_MINUTE: + return ValueInterval.from(IntervalQualifier.valueOf(type - Value.INTERVAL_YEAR), + random.nextBoolean(), random.nextInt(Integer.MAX_VALUE), 0); + case Value.INTERVAL_SECOND: + case Value.INTERVAL_DAY_TO_SECOND: + case Value.INTERVAL_HOUR_TO_SECOND: + case Value.INTERVAL_MINUTE_TO_SECOND: + return ValueInterval.from(IntervalQualifier.valueOf(type - Value.INTERVAL_YEAR), + random.nextBoolean(), random.nextInt(Integer.MAX_VALUE), random.nextInt(1_000_000_000)); + case Value.INTERVAL_YEAR_TO_MONTH: + case Value.INTERVAL_DAY_TO_HOUR: + case Value.INTERVAL_DAY_TO_MINUTE: + case Value.INTERVAL_HOUR_TO_MINUTE: + return ValueInterval.from(IntervalQualifier.valueOf(type - Value.INTERVAL_YEAR), + random.nextBoolean(), random.nextInt(Integer.MAX_VALUE), random.nextInt(12)); + case Value.JSON: + return ValueJson.fromJson("{\"key\":\"value\"}"); + case Value.BINARY: + return ValueBinary.get(randomBytes(random.nextInt(1000))); default: throw new AssertionError("type=" + type); } } + private long randomDateValue() { + return DateTimeUtils.dateValueFromAbsoluteDay( + (random.nextLong() & Long.MAX_VALUE) % (MAX_ABSOLUTE_DAY - MIN_ABSOLUTE_DAY + 1) + MIN_ABSOLUTE_DAY); + } + + private long randomTimeNanos() { + return (random.nextLong() & Long.MAX_VALUE) % DateTimeUtils.NANOS_PER_DAY; + } + + private short randomZoneOffset() { + return (short) (random.nextInt() % (18 * 60)); + } + + private Value[] createArray() throws SQLException { + int len = random.nextInt(20); + Value[] list = new Value[len]; + for (int i = 0; i < list.length; i++) { + list[i] = create(Value.VARCHAR); + } + return list; + } + private byte[] randomBytes(int len) { byte[] data = new byte[len]; if (random.nextBoolean()) { @@ -240,11 +315,6 @@ public String getDatabasePath() { return getBaseDir() + "/valueMemory"; } - @Override - public String getLobCompressionAlgorithm(int type) { - return "LZF"; - } - @Override public Object getLobSyncObject() { return this; @@ -271,9 +341,9 @@ public TempFileDeleter getTempFileDeleter() { } @Override - public LobStorageFrontend getLobStorage() { + public LobStorageInterface getLobStorage() { if (lobStorage == null) { - lobStorage = new LobStorageFrontend(this); + lobStorage = new LobStorageTest(); } return lobStorage; } @@ -285,7 +355,71 @@ public int readLob(long lobId, byte[] hmac, long offset, byte[] buff, } @Override - public JavaObjectSerializer getJavaObjectSerializer() { - return null; + public CompareMode getCompareMode() { + return CompareMode.getInstance(null, 0); + } + + + private class LobStorageTest implements LobStorageInterface { + + LobStorageTest() { + } + + @Override + public void removeLob(ValueLob lob) { + // not stored in the database + } + + @Override + public InputStream getInputStream(long lobId, + long byteCount) throws IOException { + // this method is only implemented on the server side of a TCP connection + throw new IllegalStateException(); + } + + @Override + public InputStream getInputStream(long lobId, int tableId, + long byteCount) throws IOException { + // this method is only implemented on the server side of a TCP connection + throw new IllegalStateException(); + } + + @Override + public boolean isReadOnly() { + return false; + } + + @Override + public ValueLob copyLob(ValueLob old, int tableId) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeAllForTable(int tableId) { + throw new UnsupportedOperationException(); + } + + @Override + public ValueBlob createBlob(InputStream in, long maxLength) { + // need to use a temp file, because the input stream could come from + // the same database, which would create a weird situation (trying + // to read a block while writing something) + return ValueBlob.createTempBlob(in, maxLength, TestValueMemory.this); + } + + /** + * Create a CLOB object. + * + * @param reader the reader + * @param maxLength the maximum length (-1 if not known) + * @return the LOB + */ + @Override + public ValueClob createClob(Reader reader, long maxLength) { + // need to use a temp file, because the input stream could come from + // the same database, which would create a weird situation (trying + // to read a block while writing something) + return ValueClob.createTempClob(reader, maxLength, TestValueMemory.this); + } } } diff --git a/h2/src/test/org/h2/test/unit/package.html b/h2/src/test/org/h2/test/unit/package.html index c04cacb576..f87035f40d 100644 --- a/h2/src/test/org/h2/test/unit/package.html +++ b/h2/src/test/org/h2/test/unit/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/test/org/h2/test/utils/AssertThrows.java b/h2/src/test/org/h2/test/utils/AssertThrows.java deleted file mode 100644 index 9a8a81b8c2..0000000000 --- a/h2/src/test/org/h2/test/utils/AssertThrows.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.utils; - -import java.lang.reflect.Method; -import java.sql.SQLException; -import org.h2.message.DbException; - -/** - * Helper class to simplify negative testing. Usage: - *
    - * new AssertThrows() { public void test() {
    - *     Integer.parseInt("not a number");
    - * }};
    - * 
    - */ -public abstract class AssertThrows { - - /** - * Create a new assertion object, and call the test method to verify the - * expected exception is thrown. - * - * @param expectedExceptionClass the expected exception class - */ - public AssertThrows(final Class expectedExceptionClass) { - this(new ResultVerifier() { - @Override - public boolean verify(Object returnValue, Throwable t, Method m, - Object... args) { - if (t == null) { - throw new AssertionError("Expected an exception of type " + - expectedExceptionClass.getSimpleName() + - " to be thrown, but the method returned successfully"); - } - if (!expectedExceptionClass.isAssignableFrom(t.getClass())) { - AssertionError ae = new AssertionError( - "Expected an exception of type\n" + - expectedExceptionClass.getSimpleName() + - " to be thrown, but the method under test " + - "threw an exception of type\n" + - t.getClass().getSimpleName() + - " (see in the 'Caused by' for the exception " + - "that was thrown)"); - ae.initCause(t); - throw ae; - } - return false; - } - }); - } - - /** - * Create a new assertion object, and call the test method to verify the - * expected exception is thrown. - */ - public AssertThrows() { - this(new ResultVerifier() { - @Override - public boolean verify(Object returnValue, Throwable t, Method m, - Object... args) { - if (t != null) { - throw new AssertionError("Expected an exception " + - "to be thrown, but the method returned successfully"); - } - // all exceptions are fine - return false; - } - }); - } - - /** - * Create a new assertion object, and call the test method to verify the - * expected exception is thrown. - * - * @param expectedErrorCode the error code of the exception - */ - public AssertThrows(final int expectedErrorCode) { - this(new ResultVerifier() { - @Override - public boolean verify(Object returnValue, Throwable t, Method m, - Object... args) { - int errorCode; - if (t instanceof DbException) { - errorCode = ((DbException) t).getErrorCode(); - } else if (t instanceof SQLException) { - errorCode = ((SQLException) t).getErrorCode(); - } else { - errorCode = 0; - } - if (errorCode != expectedErrorCode) { - AssertionError ae = new AssertionError( - "Expected an SQLException or DbException with error code " + - expectedErrorCode); - ae.initCause(t); - throw ae; - } - return false; - } - }); - } - - private AssertThrows(ResultVerifier verifier) { - try { - test(); - verifier.verify(null, null, null); - } catch (Exception e) { - verifier.verify(null, e, null); - } - } - - /** - * The test method that is called. - * - * @throws Exception the exception - */ - public abstract void test() throws Exception; - -} diff --git a/h2/src/test/org/h2/test/utils/FilePathDebug.java b/h2/src/test/org/h2/test/utils/FilePathDebug.java index 7a06b37dc3..13144377a0 100644 --- a/h2/src/test/org/h2/test/utils/FilePathDebug.java +++ b/h2/src/test/org/h2/test/utils/FilePathDebug.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; @@ -191,10 +191,9 @@ public void moveTo(FilePath newName, boolean atomicReplace) { } @Override - public FilePath createTempFile(String suffix, boolean deleteOnExit, - boolean inTempDir) throws IOException { - trace(name, "createTempFile", suffix, deleteOnExit, inTempDir); - return super.createTempFile(suffix, deleteOnExit, inTempDir); + public FilePath createTempFile(String suffix, boolean inTempDir) throws IOException { + trace(name, "createTempFile", suffix, inTempDir); + return super.createTempFile(suffix, inTempDir); } /** diff --git a/h2/src/test/org/h2/test/utils/FilePathReorderWrites.java b/h2/src/test/org/h2/test/utils/FilePathReorderWrites.java new file mode 100644 index 0000000000..a8d9c72f28 --- /dev/null +++ b/h2/src/test/org/h2/test/utils/FilePathReorderWrites.java @@ -0,0 +1,379 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.utils; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.util.ArrayList; +import java.util.Random; +import org.h2.store.fs.FileBaseDefault; +import org.h2.store.fs.FilePath; +import org.h2.store.fs.FilePathWrapper; +import org.h2.util.IOUtils; + +/** + * An unstable file system. It is used to simulate file system problems (for + * example out of disk space). + */ +public class FilePathReorderWrites extends FilePathWrapper { + + /** + * Whether trace output of all method calls is enabled. + */ + static final boolean TRACE = false; + + private static final FilePathReorderWrites INSTANCE = new FilePathReorderWrites(); + + private static final IOException POWER_FAILURE = new IOException("Power Failure"); + + private static int powerFailureCountdown; + + private static boolean partialWrites; + + private static Random random = new Random(1); + + /** + * Register the file system. + * + * @return the instance + */ + public static FilePathReorderWrites register() { + FilePath.register(INSTANCE); + return INSTANCE; + } + + /** + * Set the number of write operations before a simulated power failure, and + * the random seed (for partial writes). + * + * @param count the number of write operations (0 to never fail, + * Integer.MAX_VALUE to count the operations) + * @param seed the new seed + */ + public void setPowerOffCountdown(int count, int seed) { + powerFailureCountdown = count; + random.setSeed(seed); + } + + public int getPowerOffCountdown() { + return powerFailureCountdown; + } + + /** + * Whether partial writes are possible (writing only part of the data). + * + * @param b true to enable + */ + public static void setPartialWrites(boolean b) { + partialWrites = b; + } + + static boolean isPartialWrites() { + return partialWrites; + } + + /** + * Get a buffer with a subset (the head) of the data of the source buffer. + * + * @param src the source buffer + * @return a buffer with a subset of the data + */ + ByteBuffer getRandomSubset(ByteBuffer src) { + int len = src.remaining(); + len = Math.min(4096, Math.min(len, 1 + random.nextInt(len))); + ByteBuffer temp = ByteBuffer.allocate(len); + src.get(temp.array()); + return temp; + } + + Random getRandom() { + return random; + } + + /** + * Check if the simulated problem occurred. + * This call will decrement the countdown. + * + * @throws IOException if the simulated power failure occurred + */ + void checkError() throws IOException { + if (powerFailureCountdown == 0) { + return; + } + if (powerFailureCountdown < 0) { + throw POWER_FAILURE; + } + powerFailureCountdown--; + if (powerFailureCountdown == 0) { + powerFailureCountdown--; + throw POWER_FAILURE; + } + } + + @Override + public FileChannel open(String mode) throws IOException { + InputStream in = newInputStream(); + FilePath copy = FilePath.get(getBase().toString() + ".copy"); + OutputStream out = copy.newOutputStream(false); + IOUtils.copy(in, out); + in.close(); + out.close(); + FileChannel base = getBase().open(mode); + FileChannel readBase = copy.open(mode); + return new FileReorderWrites(this, base, readBase); + } + + @Override + public String getScheme() { + return "reorder"; + } + + public long getMaxAge() { + // TODO implement, configurable + return 45000; + } + + @Override + public void delete() { + super.delete(); + FilePath.get(getBase().toString() + ".copy").delete(); + } +} + +/** + * A write-reordering file implementation. + */ +class FileReorderWrites extends FileBaseDefault { + + private final FilePathReorderWrites file; + /** + * The base channel, where not all operations are immediately applied. + */ + private final FileChannel base; + + /** + * The base channel that is used for reading, where all operations are + * immediately applied to get a consistent view before a power failure. + */ + private final FileChannel readBase; + + private boolean closed; + + /** + * The list of not yet applied to the base channel. It is sorted by time. + */ + private ArrayList notAppliedList = new ArrayList<>(); + + private int id; + + FileReorderWrites(FilePathReorderWrites file, FileChannel base, FileChannel readBase) { + this.file = file; + this.base = base; + this.readBase = readBase; + } + + @Override + public void implCloseChannel() throws IOException { + base.close(); + readBase.close(); + closed = true; + } + + @Override + public long size() throws IOException { + return readBase.size(); + } + + @Override + public int read(ByteBuffer dst, long pos) throws IOException { + return readBase.read(dst, pos); + } + + @Override + protected void implTruncate(long newSize) throws IOException { + long oldSize = readBase.size(); + if (oldSize <= newSize) { + return; + } + addOperation(new FileWriteOperation(id++, newSize, null)); + } + + private int addOperation(FileWriteOperation op) throws IOException { + trace("op " + op); + checkError(); + notAppliedList.add(op); + long now = op.getTime(); + for (int i = 0; i < notAppliedList.size() - 1; i++) { + FileWriteOperation old = notAppliedList.get(i); + boolean applyOld = false; + // String reason = ""; + if (old.getTime() + 45000 < now) { + // reason = "old"; + applyOld = true; + } else if (old.overlaps(op)) { + // reason = "overlap"; + applyOld = true; + } else if (file.getRandom().nextInt(100) < 10) { + // reason = "random"; + applyOld = true; + } + if (applyOld) { + trace("op apply " + op); + old.apply(base); + notAppliedList.remove(i); + i--; + } + } + return op.apply(readBase); + } + + private void applyAll() throws IOException { + trace("applyAll"); + for (FileWriteOperation op : notAppliedList) { + op.apply(base); + } + notAppliedList.clear(); + } + + @Override + public void force(boolean metaData) throws IOException { + checkError(); + readBase.force(metaData); + applyAll(); + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + if (FilePathReorderWrites.isPartialWrites() && src.remaining() > 2) { + ByteBuffer buf1 = src.slice(); + ByteBuffer buf2 = src.slice(); + int len1 = src.remaining() / 2; + int len2 = src.remaining() - len1; + buf1.limit(buf1.limit() - len2); + buf2.position(buf2.position() + len1); + int x = addOperation(new FileWriteOperation(id++, position, buf1)); + x += addOperation( + new FileWriteOperation(id++, position + len1, buf2)); + src.position( src.position() + x ); + return x; + } + return addOperation(new FileWriteOperation(id++, position, src)); + } + + private void checkError() throws IOException { + if (closed) { + throw new IOException("Closed"); + } + file.checkError(); + } + + @Override + public synchronized FileLock tryLock(long position, long size, + boolean shared) throws IOException { + return readBase.tryLock(position, size, shared); + } + + @Override + public String toString() { + return file.getScheme() + ":" + file.toString(); + } + + private static void trace(String message) { + if (FilePathReorderWrites.TRACE) { + System.out.println(message); + } + } + + /** + * A file operation (that might be re-ordered with other operations, or not + * be applied on power failure). + */ + static class FileWriteOperation { + private final int id; + private final long time; + private final ByteBuffer buffer; + private final long position; + + FileWriteOperation(int id, long position, ByteBuffer src) { + this.id = id; + this.time = System.currentTimeMillis(); + if (src == null) { + buffer = null; + } else { + int len = src.limit() - src.position(); + this.buffer = ByteBuffer.allocate(len); + buffer.put(src); + buffer.flip(); + } + this.position = position; + } + + public long getTime() { + return time; + } + + /** + * Check whether the file region of this operation overlaps with + * another operation. + * + * @param other the other operation + * @return if there is an overlap + */ + boolean overlaps(FileWriteOperation other) { + if (isTruncate() && other.isTruncate()) { + // we just keep the latest truncate operation + return true; + } + if (isTruncate()) { + return position < other.getEndPosition(); + } else if (other.isTruncate()) { + return getEndPosition() > other.position; + } + return position < other.getEndPosition() && + getEndPosition() > other.position; + } + + private boolean isTruncate() { + return buffer == null; + } + + private long getEndPosition() { + return position + getLength(); + } + + private int getLength() { + return buffer == null ? 0 : buffer.limit() - buffer.position(); + } + + /** + * Apply the operation to the channel. + * + * @param channel the channel + * @return the return value of the operation + */ + int apply(FileChannel channel) throws IOException { + if (isTruncate()) { + channel.truncate(position); + return -1; + } + int len = channel.write(buffer, position); + buffer.flip(); + return len; + } + + @Override + public String toString() { + String s = "[" + id + "]: @" + position + ( + isTruncate() ? "-truncate" : ("+" + getLength())); + return s; + } + } + +} \ No newline at end of file diff --git a/h2/src/test/org/h2/test/utils/FilePathUnstable.java b/h2/src/test/org/h2/test/utils/FilePathUnstable.java index 28b183414c..6343bf5ab6 100644 --- a/h2/src/test/org/h2/test/utils/FilePathUnstable.java +++ b/h2/src/test/org/h2/test/utils/FilePathUnstable.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; @@ -198,9 +198,8 @@ public void moveTo(FilePath newName, boolean atomicReplace) { } @Override - public FilePath createTempFile(String suffix, boolean deleteOnExit, - boolean inTempDir) throws IOException { - return super.createTempFile(suffix, deleteOnExit, inTempDir); + public FilePath createTempFile(String suffix, boolean inTempDir) throws IOException { + return super.createTempFile(suffix, inTempDir); } @Override diff --git a/h2/src/test/org/h2/test/utils/MemoryFootprint.java b/h2/src/test/org/h2/test/utils/MemoryFootprint.java index fd7289ffb1..ecfe077f82 100644 --- a/h2/src/test/org/h2/test/utils/MemoryFootprint.java +++ b/h2/src/test/org/h2/test/utils/MemoryFootprint.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; @@ -33,8 +33,8 @@ public static void main(String... a) { print("BigDecimal", new BigDecimal("0")); print("BigInteger", new BigInteger("0")); print("String", new String("Hello")); - print("Data", Data.create(null, 10)); - print("Row", new Row(new Value[0], 0)); + print("Data", Data.create(10)); + print("Row", Row.get(new Value[0], 0)); System.out.println(); for (int i = 1; i < 128; i += i) { diff --git a/h2/src/test/org/h2/test/utils/OutputCatcher.java b/h2/src/test/org/h2/test/utils/OutputCatcher.java index 7cb0c9a9c6..ef9362199a 100644 --- a/h2/src/test/org/h2/test/utils/OutputCatcher.java +++ b/h2/src/test/org/h2/test/utils/OutputCatcher.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; @@ -46,7 +46,7 @@ public void stop() { System.setOut(out.print); System.err.flush(); System.setErr(err.print); - output = new String(buff.toByteArray()); + output = buff.toString(); } /** diff --git a/h2/src/test/org/h2/test/utils/ProxyCodeGenerator.java b/h2/src/test/org/h2/test/utils/ProxyCodeGenerator.java deleted file mode 100644 index a15d01f17b..0000000000 --- a/h2/src/test/org/h2/test/utils/ProxyCodeGenerator.java +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.test.utils; - -import java.io.PrintWriter; -import java.io.StringWriter; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.util.HashMap; -import java.util.TreeMap; -import java.util.TreeSet; -import org.h2.util.New; -import org.h2.util.SourceCompiler; - -/** - * A code generator for class proxies. - */ -public class ProxyCodeGenerator { - - private static SourceCompiler compiler = new SourceCompiler(); - private static HashMap, Class> proxyMap = New.hashMap(); - - private final TreeSet imports = new TreeSet(); - private final TreeMap methods = new TreeMap(); - private String packageName; - private String className; - private Class extendsClass; - private Constructor constructor; - - /** - * Check whether there is already a proxy class generated. - * - * @param c the class - * @return true if yes - */ - public static boolean isGenerated(Class c) { - return proxyMap.containsKey(c); - } - - /** - * Generate a proxy class. The returned class extends the given class. - * - * @param c the class to extend - * @return the proxy class - */ - public static Class getClassProxy(Class c) throws ClassNotFoundException { - Class p = proxyMap.get(c); - if (p != null) { - return p; - } - // TODO how to extend a class with private constructor - // TODO call right constructor - // TODO use the right package - ProxyCodeGenerator cg = new ProxyCodeGenerator(); - cg.setPackageName("bytecode"); - cg.generateClassProxy(c); - StringWriter sw = new StringWriter(); - cg.write(new PrintWriter(sw)); - String code = sw.toString(); - String proxy = "bytecode."+ c.getSimpleName() + "Proxy"; - compiler.setJavaSystemCompiler(false); - compiler.setSource(proxy, code); - // System.out.println(code); - Class px = compiler.getClass(proxy); - proxyMap.put(c, px); - return px; - } - - private void setPackageName(String packageName) { - this.packageName = packageName; - } - - /** - * Generate a class that implements all static methods of the given class, - * but as non-static. - * - * @param clazz the class to extend - */ - void generateStaticProxy(Class clazz) { - imports.clear(); - addImport(InvocationHandler.class); - addImport(Method.class); - addImport(clazz); - className = getClassName(clazz) + "Proxy"; - for (Method m : clazz.getDeclaredMethods()) { - if (Modifier.isStatic(m.getModifiers())) { - if (!Modifier.isPrivate(m.getModifiers())) { - addMethod(m); - } - } - } - } - - private void generateClassProxy(Class clazz) { - imports.clear(); - addImport(InvocationHandler.class); - addImport(Method.class); - addImport(clazz); - className = getClassName(clazz) + "Proxy"; - extendsClass = clazz; - int doNotOverride = Modifier.FINAL | Modifier.STATIC | - Modifier.PRIVATE | Modifier.ABSTRACT | Modifier.VOLATILE; - Class dc = clazz; - while (dc != null) { - addImport(dc); - for (Method m : dc.getDeclaredMethods()) { - if ((m.getModifiers() & doNotOverride) == 0) { - addMethod(m); - } - } - dc = dc.getSuperclass(); - } - for (Constructor c : clazz.getDeclaredConstructors()) { - if (Modifier.isPrivate(c.getModifiers())) { - continue; - } - if (constructor == null) { - constructor = c; - } else if (c.getParameterTypes().length < - constructor.getParameterTypes().length) { - constructor = c; - } - } - } - - private void addMethod(Method m) { - if (methods.containsKey(getMethodName(m))) { - // already declared in a subclass - return; - } - addImport(m.getReturnType()); - for (Class c : m.getParameterTypes()) { - addImport(c); - } - for (Class c : m.getExceptionTypes()) { - addImport(c); - } - methods.put(getMethodName(m), m); - } - - private static String getMethodName(Method m) { - StringBuilder buff = new StringBuilder(); - buff.append(m.getReturnType()).append(' '); - buff.append(m.getName()); - for (Class p : m.getParameterTypes()) { - buff.append(' '); - buff.append(p.getName()); - } - return buff.toString(); - } - - private void addImport(Class c) { - while (c.isArray()) { - c = c.getComponentType(); - } - if (!c.isPrimitive()) { - if (!"java.lang".equals(c.getPackage().getName())) { - imports.add(c.getName()); - } - } - } - - private static String getClassName(Class c) { - return getClassName(c, false); - } - - private static String getClassName(Class c, boolean varArg) { - if (varArg) { - c = c.getComponentType(); - } - String s = c.getSimpleName(); - while (true) { - c = c.getEnclosingClass(); - if (c == null) { - break; - } - s = c.getSimpleName() + "." + s; - } - if (varArg) { - return s + "..."; - } - return s; - } - - private void write(PrintWriter writer) { - if (packageName != null) { - writer.println("package " + packageName + ";"); - } - for (String imp : imports) { - writer.println("import " + imp + ";"); - } - writer.print("public class " + className); - if (extendsClass != null) { - writer.print(" extends " + getClassName(extendsClass)); - } - writer.println(" {"); - writer.println(" private final InvocationHandler ih;"); - writer.println(" public " + className + "() {"); - writer.println(" this(new InvocationHandler() {"); - writer.println(" public Object invoke(Object proxy,"); - writer.println(" Method method, Object[] args) " + - "throws Throwable {"); - writer.println(" return method.invoke(proxy, args);"); - writer.println(" }});"); - writer.println(" }"); - writer.println(" public " + className + "(InvocationHandler ih) {"); - if (constructor != null) { - writer.print(" super("); - int i = 0; - for (Class p : constructor.getParameterTypes()) { - if (i > 0) { - writer.print(", "); - } - if (p.isPrimitive()) { - if (p == boolean.class) { - writer.print("false"); - } else if (p == byte.class) { - writer.print("(byte) 0"); - } else if (p == char.class) { - writer.print("(char) 0"); - } else if (p == short.class) { - writer.print("(short) 0"); - } else if (p == int.class) { - writer.print("0"); - } else if (p == long.class) { - writer.print("0L"); - } else if (p == float.class) { - writer.print("0F"); - } else if (p == double.class) { - writer.print("0D"); - } - } else { - writer.print("null"); - } - i++; - } - writer.println(");"); - } - writer.println(" this.ih = ih;"); - writer.println(" }"); - writer.println(" @SuppressWarnings(\"unchecked\")"); - writer.println(" private static " + - "T convertException(Throwable e) {"); - writer.println(" if (e instanceof Error) {"); - writer.println(" throw (Error) e;"); - writer.println(" }"); - writer.println(" return (T) e;"); - writer.println(" }"); - for (Method m : methods.values()) { - Class retClass = m.getReturnType(); - writer.print(" "); - if (Modifier.isProtected(m.getModifiers())) { - // 'public' would also work - writer.print("protected "); - } else { - writer.print("public "); - } - writer.print(getClassName(retClass) + - " " + m.getName() + "("); - Class[] pc = m.getParameterTypes(); - for (int i = 0; i < pc.length; i++) { - Class p = pc[i]; - if (i > 0) { - writer.print(", "); - } - boolean varArg = i == pc.length - 1 && m.isVarArgs(); - writer.print(getClassName(p, varArg) + " p" + i); - } - writer.print(")"); - Class[] ec = m.getExceptionTypes(); - writer.print(" throws RuntimeException"); - if (ec.length > 0) { - for (Class e : ec) { - writer.print(", "); - writer.print(getClassName(e)); - } - } - writer.println(" {"); - writer.println(" try {"); - writer.print(" "); - if (retClass != void.class) { - writer.print("return ("); - if (retClass == boolean.class) { - writer.print("Boolean"); - } else if (retClass == byte.class) { - writer.print("Byte"); - } else if (retClass == char.class) { - writer.print("Character"); - } else if (retClass == short.class) { - writer.print("Short"); - } else if (retClass == int.class) { - writer.print("Integer"); - } else if (retClass == long.class) { - writer.print("Long"); - } else if (retClass == float.class) { - writer.print("Float"); - } else if (retClass == double.class) { - writer.print("Double"); - } else { - writer.print(getClassName(retClass)); - } - writer.print(") "); - } - writer.print("ih.invoke(this, "); - writer.println(getClassName(m.getDeclaringClass()) + - ".class.getDeclaredMethod(\"" + m.getName() + - "\","); - writer.print(" new Class[] {"); - int i = 0; - for (Class p : m.getParameterTypes()) { - if (i > 0) { - writer.print(", "); - } - writer.print(getClassName(p) + ".class"); - i++; - } - writer.println("}),"); - writer.print(" new Object[] {"); - for (i = 0; i < m.getParameterTypes().length; i++) { - if (i > 0) { - writer.print(", "); - } - writer.print("p" + i); - } - writer.println("});"); - writer.println(" } catch (Throwable e) {"); - writer.println(" throw convertException(e);"); - writer.println(" }"); - writer.println(" }"); - } - writer.println("}"); - writer.flush(); - } - - /** - * Format a method call, including arguments, for an exception message. - * - * @param m the method - * @param args the arguments - * @return the formatted string - */ - public static String formatMethodCall(Method m, Object... args) { - StringBuilder buff = new StringBuilder(); - buff.append(m.getName()).append('('); - for (int i = 0; i < args.length; i++) { - Object a = args[i]; - if (i > 0) { - buff.append(", "); - } - buff.append(a == null ? "null" : a.toString()); - } - buff.append(")"); - return buff.toString(); - } - -} diff --git a/h2/src/test/org/h2/test/utils/RandomDataUtils.java b/h2/src/test/org/h2/test/utils/RandomDataUtils.java new file mode 100644 index 0000000000..36b15e501c --- /dev/null +++ b/h2/src/test/org/h2/test/utils/RandomDataUtils.java @@ -0,0 +1,62 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.test.utils; + +import java.util.Random; + +/** + * Utilities for random data generation. + */ +public final class RandomDataUtils { + + /** + * Fills the specified character array with random printable code points + * from the limited set of Unicode code points with different length in + * UTF-8 representation. + * + *

    + * Debuggers can have performance problems on some systems when displayed + * values have characters from many different blocks, because too many large + * separate fonts with different sets of glyphs can be needed. + *

    + * + * @param r + * the source of random data + * @param chars + * the character array to fill + */ + public static void randomChars(Random r, char[] chars) { + for (int i = 0, l = chars.length; i < l;) { + int from, to; + switch (r.nextInt(4)) { + case 3: + if (i + 1 < l) { + from = 0x1F030; + to = 0x1F093; + break; + } + //$FALL-THROUGH$ + default: + from = ' '; + to = '~'; + break; + case 1: + from = 0xA0; + to = 0x24F; + break; + case 2: + from = 0x2800; + to = 0x28FF; + break; + } + i += Character.toChars(from + r.nextInt(to - from + 1), chars, i); + } + } + + private RandomDataUtils() { + } + +} diff --git a/h2/src/test/org/h2/test/utils/ResultVerifier.java b/h2/src/test/org/h2/test/utils/ResultVerifier.java index f3066e200c..ed5d73c75e 100644 --- a/h2/src/test/org/h2/test/utils/ResultVerifier.java +++ b/h2/src/test/org/h2/test/utils/ResultVerifier.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; diff --git a/h2/src/test/org/h2/test/utils/SelfDestructor.java b/h2/src/test/org/h2/test/utils/SelfDestructor.java index 8e7ae8598e..6f11ffa745 100644 --- a/h2/src/test/org/h2/test/utils/SelfDestructor.java +++ b/h2/src/test/org/h2/test/utils/SelfDestructor.java @@ -1,13 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.test.utils; -import java.lang.reflect.Method; import java.sql.Timestamp; -import java.util.Map; +import org.h2.util.ThreadDeadlockDetector; /** * This is a self-destructor class to kill a long running process automatically @@ -56,17 +55,8 @@ public void run() { System.out.println(time + " Killing the process after " + minutes + " minute(s)"); try { - Map map = - Thread.getAllStackTraces(); - for (Map.Entry en : - map.entrySet()) { - System.out.println(en.getKey()); - for (StackTraceElement el : en.getValue()) { - System.out.println(" " + el); - } - } - System.out.println(); - System.out.flush(); + ThreadDeadlockDetector.dumpAllThreadsAndLocks( + "SelfDestructor timed out", System.err); try { Thread.sleep(1000); } catch (Exception e) { @@ -75,13 +65,9 @@ public void run() { int activeCount = Thread.activeCount(); Thread[] threads = new Thread[activeCount + 100]; int len = Thread.enumerate(threads); - Method stop = Thread.class.getMethod("stop", Throwable.class); for (int i = 0; i < len; i++) { Thread t = threads[i]; - String threadName = "Thread #" + i + ": " + t.getName(); - Error e = new Error(threadName); if (t != Thread.currentThread()) { - stop.invoke(t, e); t.interrupt(); } } diff --git a/h2/src/test/org/h2/test/utils/package.html b/h2/src/test/org/h2/test/utils/package.html index e233c4e5ce..c2468caa43 100644 --- a/h2/src/test/org/h2/test/utils/package.html +++ b/h2/src/test/org/h2/test/utils/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/WEB-INF/console.html b/h2/src/tools/WEB-INF/console.html index 2413dcc170..2ae76ab4a3 100644 --- a/h2/src/tools/WEB-INF/console.html +++ b/h2/src/tools/WEB-INF/console.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/WEB-INF/web.xml b/h2/src/tools/WEB-INF/web.xml index ba7f27e82c..b1b067f3ca 100644 --- a/h2/src/tools/WEB-INF/web.xml +++ b/h2/src/tools/WEB-INF/web.xml @@ -1,7 +1,7 @@ - - - -Javadoc package documentation -

    - -This package contains classes that are needed to compile the H2 Android database API. - -

    \ No newline at end of file diff --git a/h2/src/tools/android/content/ContentResolver.java b/h2/src/tools/android/content/ContentResolver.java deleted file mode 100644 index 08dafadf4e..0000000000 --- a/h2/src/tools/android/content/ContentResolver.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.content; - -/** - * TODO - */ -public class ContentResolver { - - // empty - -} diff --git a/h2/src/tools/android/content/ContentValues.java b/h2/src/tools/android/content/ContentValues.java deleted file mode 100644 index b46500c5df..0000000000 --- a/h2/src/tools/android/content/ContentValues.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.content; - -/** - * A key-value pair. - */ -public class ContentValues { - - // TODO - -} diff --git a/h2/src/tools/android/content/Context.java b/h2/src/tools/android/content/Context.java deleted file mode 100644 index 03d62ce5c6..0000000000 --- a/h2/src/tools/android/content/Context.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.content; - -/** - * TODO - */ -public class Context { - - /** - * The file may only be accessed by this application, or by application with - * the same user ID. - */ - public static final int MODE_PRIVATE = 0; - -} diff --git a/h2/src/tools/android/content/package.html b/h2/src/tools/android/content/package.html deleted file mode 100644 index 05c109be22..0000000000 --- a/h2/src/tools/android/content/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -This package contains classes that are needed to compile the H2 Android database API. - -

    \ No newline at end of file diff --git a/h2/src/tools/android/database/AbstractCursor.java b/h2/src/tools/android/database/AbstractCursor.java deleted file mode 100644 index 4fa433e157..0000000000 --- a/h2/src/tools/android/database/AbstractCursor.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.database; - -/** - * This class implements some of the cursor operations. - */ -public abstract class AbstractCursor implements Cursor { - - // empty - -} diff --git a/h2/src/tools/android/database/AbstractWindowedCursor.java b/h2/src/tools/android/database/AbstractWindowedCursor.java deleted file mode 100644 index 1ad7c5f3d2..0000000000 --- a/h2/src/tools/android/database/AbstractWindowedCursor.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.database; - -/** - * The base class for a database cursor. - */ -public abstract class AbstractWindowedCursor extends AbstractCursor { - - // empty - -} diff --git a/h2/src/tools/android/database/CharArrayBuffer.java b/h2/src/tools/android/database/CharArrayBuffer.java deleted file mode 100644 index 4ec78c3cb3..0000000000 --- a/h2/src/tools/android/database/CharArrayBuffer.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.database; - -/** - * TODO - */ -public class CharArrayBuffer { - - // empty - -} diff --git a/h2/src/tools/android/database/ContentObserver.java b/h2/src/tools/android/database/ContentObserver.java deleted file mode 100644 index 6aa53dd886..0000000000 --- a/h2/src/tools/android/database/ContentObserver.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.database; - -/** - * TODO - */ -public class ContentObserver { - - // empty - -} diff --git a/h2/src/tools/android/database/Cursor.java b/h2/src/tools/android/database/Cursor.java deleted file mode 100644 index 04e36ae2db..0000000000 --- a/h2/src/tools/android/database/Cursor.java +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.database; - -import android.content.ContentResolver; -import android.net.Uri; -import android.os.Bundle; - -/** - * This interface allows to access the rows in a result set. - */ -public interface Cursor { - - /** - * Get the row count. - * - * @return the row count - */ - int getCount(); - - /** - * Deactivate the cursor. The cursor can be re-activated using requery(). - */ - void deactivate(); - - /** - * Get the column index. The first column is 0. - * - * @param columnName the name of the column - * @return the column index, or -1 if the column was not found - */ - int getColumnIndex(String columnName); - - /** - * Close the cursor. - */ - void close(); - - /** - * Get the column names. - * - * @return the column names - */ - String[] getColumnNames(); - - /** - * Register a data set observer. - * - * @param observer the observer - */ - void registerDataSetObserver(DataSetObserver observer); - - /** - * Re-run the query. - * - * @return TODO - */ - boolean requery(); - - /** - * Move the cursor by the given number of rows forward or backward. - * - * @param offset the row offset - * @return true if the operation was successful - */ - boolean move(int offset); - - /** - * TODO - */ - void copyStringToBuffer(int columnIndex, CharArrayBuffer buffer); - - /** - * Get the value from the current row. - * - * @param columnIndex the column index (0, 1,...) - * @return the value - */ - byte[] getBlob(int columnIndex); - - /** - * Get the number of columns in the result. - * - * @return the column count - */ - int getColumnCount(); - - /** - * Get the column index for the given column name, or throw an exception if - * not found. - * - * @param columnName the column name - * @return the index - */ - int getColumnIndexOrThrow(String columnName); - - /** - * Get the name of the given column. - * - * @param columnIndex the column index (0, 1,...) - * @return the name - */ - String getColumnName(int columnIndex); - - /** - * Get the value from the current row. - * - * @param columnIndex the column index (0, 1,...) - * @return the value - */ - double getDouble(int columnIndex); - - /** - * TODO - * - * @return TODO - */ - Bundle getExtras(); - - /** - * Get the value from the current row. - * - * @param columnIndex the column index (0, 1,...) - * @return the value - */ - float getFloat(int columnIndex); - - /** - * Get the value from the current row. - * - * @param columnIndex the column index (0, 1,...) - * @return the value - */ - int getInt(int columnIndex); - - /** - * Get the value from the current row. - * - * @param columnIndex the column index (0, 1,...) - * @return the value - */ - long getLong(int columnIndex); - - /** - * Get the current row number - * - * @return the row number TODO 0, 1,... - */ - int getPosition(); - - /** - * Get the value from the current row. - * - * @param columnIndex the column index (0, 1,...) - * @return the value - */ - short getShort(int columnIndex); - - /** - * Get the value from the current row. - * - * @param columnIndex the column index (0, 1,...) - * @return the value - */ - String getString(int columnIndex); - - /** - * The method onMove is only called if this method returns true. - * - * @return true if calling onMove is required - */ - boolean getWantsAllOnMoveCalls(); - - /** - * Check if the current position is past the last row. - * - * @return true if it is - */ - boolean isAfterLast(); - - /** - * Check if the current position is before the first row. - * - * @return true if it is - */ - boolean isBeforeFirst(); - - /** - * Check if the cursor is closed. - * - * @return true if it is - */ - boolean isClosed(); - - /** - * Check if the current position is on the first row. - * - * @return true if it is - */ - boolean isFirst(); - - /** - * Check if the current position is on the last row. - * - * @return true if it is - */ - boolean isLast(); - - /** - * Check if the value of the current row is null. - * - * @param columnIndex the column index (0, 1,...) - * @return true if it is - */ - boolean isNull(int columnIndex); - - /** - * Move to the first row. - * - * @return TODO - */ - boolean moveToFirst(); - - /** - * Move to the last row. - * - * @return TODO - */ - boolean moveToLast(); - - /** - * Move to the next row. - * - * @return TODO - */ - boolean moveToNext(); - - /** - * Move to the given row. - * - * @param position TODO - * @return TODO - */ - boolean moveToPosition(int position); - - /** - * Move to the previous row. - * - * @return TODO - */ - boolean moveToPrevious(); - - /** - * TODO - * - * @param observer TODO - */ - void registerContentObserver(ContentObserver observer); - - /** - * TODO - * - * @param extras TODO - * @return TODO - */ - Bundle respond(Bundle extras); - - /** - * TODO - * - * @param cr TODO - * @param uri TODO - */ - void setNotificationUri(ContentResolver cr, Uri uri); - - /** - * TODO - * - * @param observer TODO - */ - void unregisterContentObserver(ContentObserver observer); - - /** - * TODO - * - * @param observer TODO - */ - void unregisterDataSetObserver(DataSetObserver observer); - -} diff --git a/h2/src/tools/android/database/CursorWindow.java b/h2/src/tools/android/database/CursorWindow.java deleted file mode 100644 index 505ab410ef..0000000000 --- a/h2/src/tools/android/database/CursorWindow.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.database; - -/** - * An instance of this class can contain multiple rows. - */ -public class CursorWindow { - - // empty - -} diff --git a/h2/src/tools/android/database/DataSetObserver.java b/h2/src/tools/android/database/DataSetObserver.java deleted file mode 100644 index e6766d07f9..0000000000 --- a/h2/src/tools/android/database/DataSetObserver.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.database; - -/** - * A listener for changes in a data set. - */ -public abstract class DataSetObserver { - - // empty - -} diff --git a/h2/src/tools/android/database/SQLException.java b/h2/src/tools/android/database/SQLException.java deleted file mode 100644 index fbcad5f64f..0000000000 --- a/h2/src/tools/android/database/SQLException.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.database; - -/** - * A database exception. - */ -public class SQLException extends Exception { - - private static final long serialVersionUID = 1L; - - public SQLException() { - super(); - } - - public SQLException(String error) { - super(error); - } - -} diff --git a/h2/src/tools/android/database/package.html b/h2/src/tools/android/database/package.html deleted file mode 100644 index 05c109be22..0000000000 --- a/h2/src/tools/android/database/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -This package contains classes that are needed to compile the H2 Android database API. - -

    \ No newline at end of file diff --git a/h2/src/tools/android/database/sqlite/SQLiteClosable.java b/h2/src/tools/android/database/sqlite/SQLiteClosable.java deleted file mode 100644 index f2336aa3e6..0000000000 --- a/h2/src/tools/android/database/sqlite/SQLiteClosable.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.database.sqlite; - -/** - * An object that can be closed. - */ -public abstract class SQLiteClosable { - - // empty - -} diff --git a/h2/src/tools/android/database/sqlite/package.html b/h2/src/tools/android/database/sqlite/package.html deleted file mode 100644 index 05c109be22..0000000000 --- a/h2/src/tools/android/database/sqlite/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -This package contains classes that are needed to compile the H2 Android database API. - -

    \ No newline at end of file diff --git a/h2/src/tools/android/net/Uri.java b/h2/src/tools/android/net/Uri.java deleted file mode 100644 index 3e8ca65d80..0000000000 --- a/h2/src/tools/android/net/Uri.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.net; - -/** - * TODO - */ -public class Uri { - - // empty - -} diff --git a/h2/src/tools/android/net/package.html b/h2/src/tools/android/net/package.html deleted file mode 100644 index 05c109be22..0000000000 --- a/h2/src/tools/android/net/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -This package contains classes that are needed to compile the H2 Android database API. - -

    \ No newline at end of file diff --git a/h2/src/tools/android/os/Bundle.java b/h2/src/tools/android/os/Bundle.java deleted file mode 100644 index 7bec26677c..0000000000 --- a/h2/src/tools/android/os/Bundle.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.os; - -/** - * TODO - */ -public class Bundle { - - // empty - -} diff --git a/h2/src/tools/android/os/package.html b/h2/src/tools/android/os/package.html deleted file mode 100644 index 05c109be22..0000000000 --- a/h2/src/tools/android/os/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -This package contains classes that are needed to compile the H2 Android database API. - -

    \ No newline at end of file diff --git a/h2/src/tools/android/test/Test.java b/h2/src/tools/android/test/Test.java deleted file mode 100644 index 9927bcffe4..0000000000 --- a/h2/src/tools/android/test/Test.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package android.test; - -import org.h2.android.H2Database; -import org.h2.android.H2Utils; -import android.app.Activity; -import android.database.Cursor; - -/** - * Tests the Android API. - */ -public class Test extends Activity { - - public static void main(String... args) throws Exception { - H2Database db = H2Utils.openOrCreateDatabase( - "helloWorld.db", MODE_PRIVATE, null); - log("opened ps=" + db.getPageSize()); - try { - // db.execSQL("DROP TABLE IF EXISTS test"); - // log("dropped"); - db.execSQL( - "CREATE TABLE if not exists test(ID INTEGER PRIMARY KEY, NAME VARCHAR)"); - log("created"); - for (int j = 0; j < 10; j++) { - Cursor c = db.rawQuery("select * from test", new String[0]); - int count = c.getCount(); - for (int i = 0; i < count; i++) { - c.move(1); - c.getInt(0); - c.getString(1); - } - c.close(); - } - // log("select " + count); - db.execSQL("delete from test"); - log("delete"); - db.beginTransaction(); - for (int i = 0; i < 1000; i++) { - db.execSQL( - "INSERT INTO TEST VALUES(?, 'Hello')", new Object[] { i }); - } - db.setTransactionSuccessful(); - db.endTransaction(); - log("inserted"); - for (int i = 0; i < 10; i++) { - Cursor c = db.rawQuery( - "select * from test where id=?", new String[] { "" + i }); - int count = c.getCount(); - if (count > 0) { - c.move(1); - c.getInt(0); - c.getString(1); - } - c.close(); - } - log("select"); - } finally { - db.close(); - log("closed"); - } - } - - private static void log(String s) { - System.out.println(s); - } - -} diff --git a/h2/src/tools/android/test/package.html b/h2/src/tools/android/test/package.html deleted file mode 100644 index 68e32fbc6b..0000000000 --- a/h2/src/tools/android/test/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -This package contains tests. - -

    \ No newline at end of file diff --git a/h2/src/tools/com/caucho/jdbc/H2MetaData.java.txt b/h2/src/tools/com/caucho/jdbc/H2MetaData.java.txt deleted file mode 100644 index 4eb2166b6c..0000000000 --- a/h2/src/tools/com/caucho/jdbc/H2MetaData.java.txt +++ /dev/null @@ -1,47 +0,0 @@ -package com.caucho.jdbc; - -import com.caucho.util.Log; - -import javax.sql.DataSource; -import java.util.logging.Logger; - -/** - * Metadata for the H2 database. - * For details, see - * http://wondering.ru/java/H2ejb3onResinSupport1.0.zip - */ -public class H2MetaData extends JdbcMetaData { - private static final Logger log = Log.open(H2MetaData.class); - - protected H2MetaData(DataSource ds) { - super(ds); - } - - /** - * Returns the blob type. - */ - public String getBlobType(){ - return "BLOB"; - } - - /** - * Returns the long type. - */ - public String getLongType() { - return "BIGINT"; - } - - /** - * Returns true if identity is supported. - */ - public boolean supportsIdentity() { - return true; - } - - /** - * Returns the identity property - */ - public String createIdentitySQL(String sqlType) { - return "IDENTITY"; - } -} diff --git a/h2/src/tools/com/caucho/jdbc/JdbcMetaData.java.txt b/h2/src/tools/com/caucho/jdbc/JdbcMetaData.java.txt deleted file mode 100644 index 80d6972049..0000000000 --- a/h2/src/tools/com/caucho/jdbc/JdbcMetaData.java.txt +++ /dev/null @@ -1,15 +0,0 @@ -Sorry I can not include this file because -http://ohloh.org says that this may conflict -with another license. - -In the file JdbcMetaData.java in package com.caucho.jdbc -in method public static JdbcMetaData create(DataSource ds), -you need to add - - if ("H2".equalsIgnoreCase(name)) - return new H2MetaData(ds); - else - -just before - - if ("oracle".equalsIgnoreCase(name)) diff --git a/h2/src/tools/net/java/ao/db/H2DatabaseProvider.java.txt b/h2/src/tools/net/java/ao/db/H2DatabaseProvider.java.txt deleted file mode 100644 index 009415cd75..0000000000 --- a/h2/src/tools/net/java/ao/db/H2DatabaseProvider.java.txt +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: Michael Manske - */ -package net.java.ao.db; - -import java.sql.Driver; - -/** - * This is a database provider for ActiveObjects. - * See also https://activeobjects.dev.java.net . - * Usage: - *
    - * EntityManager manager = new EntityManager(new H2DatabaseProvider(
    - *      dbProperties.getProperty("db.uri"),
    - *      dbProperties.getProperty("db.username"),
    - *      dbProperties.getProperty("db.password")));
    - * 
    - * - * @author Michael Manske - * @author Thomas Mueller - */ -public class H2DatabaseProvider extends HSQLDatabaseProvider { - - /** - * Create a new provider. - * - * @param uri the database uri - * @param username the user name - * @param password the password - */ - public H2DatabaseProvider(String uri, String username, String password) { - super(uri, username, password); - } - - public Class< ? extends Driver> getDriverClass() throws ClassNotFoundException { - return (Class< ? extends Driver>) Class.forName("org.h2.Driver"); - } - -} diff --git a/h2/src/tools/oracle/toplink/essentials/platform/database/H2Platform.java.txt b/h2/src/tools/oracle/toplink/essentials/platform/database/H2Platform.java.txt deleted file mode 100644 index 8f42f65a9b..0000000000 --- a/h2/src/tools/oracle/toplink/essentials/platform/database/H2Platform.java.txt +++ /dev/null @@ -1,142 +0,0 @@ -/* - * The contents of this file are subject to the terms - * of the Common Development and Distribution License - * (the "License"). You may not use this file except - * in compliance with the License. - * - * You can obtain a copy of the license at - * glassfish/bootstrap/legal/CDDLv1.0.txt or - * https://glassfish.dev.java.net/public/CDDLv1.0.html. - * See the License for the specific language governing - * permissions and limitations under the License. - * - * When distributing Covered Code, include this CDDL - * HEADER in each file and include the License file at - * glassfish/bootstrap/legal/CDDLv1.0.txt. If applicable, - * add the following below this CDDL HEADER, with the - * fields enclosed by brackets "[]" replaced with your - * own identifying information: Portions Copyright [yyyy] - * [name of copyright owner] - */ -// Copyright (c) 1998, 2006, Oracle. All rights reserved. -package oracle.toplink.essentials.platform.database; - -import java.io.IOException; -import java.io.Writer; -import java.util.Hashtable; - -import oracle.toplink.essentials.exceptions.ValidationException; -import oracle.toplink.essentials.expressions.ExpressionOperator; -import oracle.toplink.essentials.internal.databaseaccess.FieldTypeDefinition; -import oracle.toplink.essentials.queryframework.ValueReadQuery; - -/** - * This platform provides H2 specific behaviour. - * To enable this platform change the following setting in persistence.xml: - *
    - * <property
    - *   name="toplink.target-database"
    - *   value="oracle.toplink.essentials.platform.database.H2Platform"/>
    - * 
    - * In old versions of Glassfish, the property name is - * toplink.platform.class.name. - * See also: https://glassfish.dev.java.net/issues/show_bug.cgi?id=4042 - * - * @author Thomas Mueller - * @author Marcio Borges (http://www.marciowb.net/blog/2008_08_01_) - */ -public class H2Platform extends DatabasePlatform { - - protected Hashtable buildFieldTypes() { - Hashtable fieldTypeMapping; - fieldTypeMapping = super.buildFieldTypes(); - fieldTypeMapping.put(Boolean.class, new FieldTypeDefinition("TINYINT", false)); - fieldTypeMapping.put(Integer.class, new FieldTypeDefinition("INTEGER", false)); - fieldTypeMapping.put(Long.class, new FieldTypeDefinition("NUMERIC", 19)); - fieldTypeMapping.put(Float.class, new FieldTypeDefinition("REAL", false)); - fieldTypeMapping.put(Double.class, new FieldTypeDefinition("REAL", false)); - fieldTypeMapping.put(Short.class, new FieldTypeDefinition("SMALLINT", false)); - fieldTypeMapping.put(Byte.class, new FieldTypeDefinition("SMALLINT", false)); - fieldTypeMapping.put(java.math.BigInteger.class, new FieldTypeDefinition("NUMERIC", 38)); - fieldTypeMapping.put(java.math.BigDecimal.class, new FieldTypeDefinition("NUMERIC", 38).setLimits(38, -19, 19)); - fieldTypeMapping.put(Number.class, new FieldTypeDefinition("NUMERIC", 38).setLimits(38, -19, 19)); - fieldTypeMapping.put(Byte[].class, new FieldTypeDefinition("BINARY", false)); - fieldTypeMapping.put(Character[].class, new FieldTypeDefinition("LONGVARCHAR", false)); - fieldTypeMapping.put(byte[].class, new FieldTypeDefinition("BINARY", false)); - fieldTypeMapping.put(char[].class, new FieldTypeDefinition("LONGVARCHAR", false)); - fieldTypeMapping.put(java.sql.Blob.class, new FieldTypeDefinition("BINARY", false)); - fieldTypeMapping.put(java.sql.Clob.class, new FieldTypeDefinition("LONGVARCHAR", false)); - fieldTypeMapping.put(java.sql.Date.class, new FieldTypeDefinition("DATE", false)); - fieldTypeMapping.put(java.sql.Time.class, new FieldTypeDefinition("TIME", false)); - fieldTypeMapping.put(java.sql.Timestamp.class, new FieldTypeDefinition("TIMESTAMP", false)); - return fieldTypeMapping; - } - - public boolean isH2() { - return true; - } - - public boolean supportsForeignKeyConstraints() { - return true; - } - - public ValueReadQuery buildSelectQueryForNativeSequence(String seqName, Integer size) { - StringBuffer buff = new StringBuffer(); - buff.append("SELECT MAX(NEXT VALUE FOR "); - buff.append(getQualifiedSequenceName(seqName)); - buff.append(") FROM SYSTEM_RANGE(1, "); - buff.append(size); - buff.append(")"); - String sql = buff.toString(); - return new ValueReadQuery(sql); - } - - public boolean supportsNativeSequenceNumbers() { - return true; - } - - protected String getQualifiedSequenceName(String seqName) { - if (getTableQualifier().equals("")) { - return seqName; - } - return getTableQualifier() + "." + seqName; - } - - public boolean supportsSelectForUpdateNoWait() { - return true; - } - - protected ExpressionOperator todayOperator() { - return ExpressionOperator.simpleFunctionNoParentheses(ExpressionOperator.Today, "SYSDATE"); - } - - protected void initializePlatformOperators() { - super.initializePlatformOperators(); - addOperator(ExpressionOperator.simpleMath(ExpressionOperator.Concat, "||")); - } - - public boolean shouldUseJDBCOuterJoinSyntax() { - return false; - } - - public boolean supportsSequenceObjects() { - return true; - } - - public boolean supportsIdentity() { - return true; - } - - public ValueReadQuery buildSelectQueryForIdentity() { - return new ValueReadQuery("SELECT IDENTITY()"); - } - - public void printFieldIdentityClause(Writer writer) throws ValidationException { - try { - writer.write(" IDENTITY"); - } catch (final IOException ioException) { - throw ValidationException.fileError(ioException); - } - } - -} diff --git a/h2/src/tools/org/apache/openjpa/jdbc/sql/H2Dictionary.java.txt b/h2/src/tools/org/apache/openjpa/jdbc/sql/H2Dictionary.java.txt deleted file mode 100644 index 34c87f0ec4..0000000000 --- a/h2/src/tools/org/apache/openjpa/jdbc/sql/H2Dictionary.java.txt +++ /dev/null @@ -1,202 +0,0 @@ -package org.apache.openjpa.jdbc.sql; - -import java.math.BigDecimal; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Types; -import java.util.Arrays; -import java.util.Locale; - -import org.apache.commons.lang.StringUtils; -import org.apache.openjpa.jdbc.kernel.exps.FilterValue; -import org.apache.openjpa.jdbc.schema.Column; -import org.apache.openjpa.jdbc.schema.PrimaryKey; -import org.apache.openjpa.jdbc.schema.Table; -import org.apache.openjpa.jdbc.schema.Unique; -import org.apache.openjpa.meta.JavaTypes; - -public class H2Dictionary extends DBDictionary { - - public H2Dictionary() { - platform = "H2"; - validationSQL = "CALL 1"; - closePoolSQL = "SHUTDOWN"; - - supportsAutoAssign = true; - lastGeneratedKeyQuery = "CALL IDENTITY()"; - autoAssignClause = "IDENTITY"; - autoAssignTypeName = "INTEGER"; - nextSequenceQuery = "CALL NEXT VALUE FOR {0}"; - - crossJoinClause = "CROSS JOIN"; - requiresConditionForCrossJoin = false; - stringLengthFunction = "LENGTH({0})"; - trimLeadingFunction = "LTRIM({0})"; - trimTrailingFunction = "RTRIM({0})"; - trimBothFunction = "TRIM({0})"; - - useSchemaName = true; - supportsSelectForUpdate = true; - supportsSelectStartIndex = true; - supportsSelectEndIndex = true; - rangePosition = RANGE_POST_LOCK; - supportsDeferredConstraints = false; - - blobTypeName = "BLOB"; - doubleTypeName = "DOUBLE"; - - supportsNullTableForGetPrimaryKeys = true; - supportsNullTableForGetIndexInfo = true; - - requiresCastForMathFunctions = false; - requiresCastForComparisons = false; - - reservedWordSet.addAll(Arrays.asList(new String[] { "CURRENT_TIMESTAMP", "CURRENT_TIME", "CURRENT_DATE", "CROSS", "DISTINCT", "EXCEPT", "EXISTS", "FROM", "FOR", "FALSE", - "FULL", "GROUP", "HAVING", "INNER", "INTERSECT", "IS", "JOIN", "LIKE", "MINUS", "NATURAL", "NOT", "NULL", "ON", "ORDER", "PRIMARY", "ROWNUM", "SELECT", "SYSDATE", - "SYSTIME", "SYSTIMESTAMP", "TODAY", "TRUE", "UNION", "WHERE" })); - } - - public int getJDBCType(int metaTypeCode, boolean lob) { - int type = super.getJDBCType(metaTypeCode, lob); - switch (type) { - case Types.BIGINT: - if (metaTypeCode == JavaTypes.BIGINTEGER) - return Types.NUMERIC; - break; - } - return type; - } - - public int getPreferredType(int type) { - return super.getPreferredType(type); - } - - public String[] getAddPrimaryKeySQL(PrimaryKey pk) { - return new String[0]; - } - - public String[] getDropPrimaryKeySQL(PrimaryKey pk) { - return new String[0]; - } - - public String[] getAddColumnSQL(Column column) { - return new String[] { "ALTER TABLE " + getFullName(column.getTable(), false) + " ADD COLUMN " + getDeclareColumnSQL(column, true) }; - } - - public String[] getCreateTableSQL(Table table) { - StringBuffer buf = new StringBuffer(); - buf.append("CREATE TABLE ").append(getFullName(table, false)).append(" ("); - - Column[] cols = table.getColumns(); - for (int i = 0; i < cols.length; i++) { - if (i > 0) - buf.append(", "); - buf.append(getDeclareColumnSQL(cols[i], false)); - } - - PrimaryKey pk = table.getPrimaryKey(); - String pkStr; - if (pk != null) { - pkStr = getPrimaryKeyConstraintSQL(pk); - if (!StringUtils.isEmpty(pkStr)) - buf.append(", ").append(pkStr); - } - - Unique[] uniques = table.getUniques(); - String uniqueStr; - for (int i = 0; i < uniques.length; i++) { - uniqueStr = getUniqueConstraintSQL(uniques[i]); - if (uniqueStr != null) - buf.append(", ").append(uniqueStr); - } - - buf.append(")"); - return new String[] { buf.toString() }; - } - - protected String getPrimaryKeyConstraintSQL(PrimaryKey pk) { - Column[] cols = pk.getColumns(); - if (cols.length == 1 && cols[0].isAutoAssigned()) - return null; - return super.getPrimaryKeyConstraintSQL(pk); - } - - public boolean isSystemIndex(String name, Table table) { - return name.toUpperCase(Locale.ENGLISH).startsWith("SYSTEM_"); - } - - protected String getSequencesSQL(String schemaName, String sequenceName) { - StringBuffer buf = new StringBuffer(); - buf.append("SELECT SEQUENCE_SCHEMA, SEQUENCE_NAME FROM ").append("INFORMATION_SCHEMA.SEQUENCES"); - if (schemaName != null || sequenceName != null) - buf.append(" WHERE "); - if (schemaName != null) { - buf.append("SEQUENCE_SCHEMA = ?"); - if (sequenceName != null) - buf.append(" AND "); - } - if (sequenceName != null) - buf.append("SEQUENCE_NAME = ?"); - return buf.toString(); - } - - protected SQLBuffer toOperation(String op, SQLBuffer selects, SQLBuffer from, SQLBuffer where, SQLBuffer group, SQLBuffer having, SQLBuffer order, boolean distinct, - boolean forUpdate, long start, long end) { - return super.toOperation(op, selects, from, where, group, having, order, distinct, forUpdate, start, end); - } - - public Column[] getColumns(DatabaseMetaData meta, String catalog, String schemaName, String tableName, String columnName, Connection conn) throws SQLException { - Column[] cols = super.getColumns(meta, catalog, schemaName, tableName, columnName, conn); - return cols; - } - - public void setDouble(PreparedStatement stat, int idx, double val, Column col) throws SQLException { - super.setDouble(stat, idx, val, col); - } - - public void setBigDecimal(PreparedStatement stat, int idx, BigDecimal val, Column col) throws SQLException { - super.setBigDecimal(stat, idx, val, col); - } - - protected void appendSelectRange(SQLBuffer buf, long start, long end) { - if (end != Long.MAX_VALUE) - buf.append(" LIMIT ").appendValue(end - start); - if (start != 0) - buf.append(" OFFSET ").appendValue(start); - } - - public void substring(SQLBuffer buf, FilterValue str, FilterValue start, FilterValue end) { - buf.append("SUBSTR("); - str.appendTo(buf); - buf.append(", ("); - start.appendTo(buf); - buf.append(" + 1)"); - if (end != null) { - buf.append(", ("); - end.appendTo(buf); - buf.append(" - "); - start.appendTo(buf); - buf.append(")"); - } - buf.append(")"); - } - - public void indexOf(SQLBuffer buf, FilterValue str, FilterValue find, FilterValue start) { - buf.append("(POSITION("); - find.appendTo(buf); - buf.append(" IN "); - if (start != null) - substring(buf, str, start, null); - else - str.appendTo(buf); - buf.append(") - 1"); - if (start != null) { - buf.append(" + "); - start.appendTo(buf); - } - buf.append(")"); - } - -} diff --git a/h2/src/tools/org/h2/android/H2AbortException.java b/h2/src/tools/org/h2/android/H2AbortException.java deleted file mode 100644 index 49e868f31b..0000000000 --- a/h2/src/tools/org/h2/android/H2AbortException.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -/** - * This exception is thrown when the operation was aborted. - */ -public class H2AbortException extends H2Exception { - - private static final long serialVersionUID = 1L; - - H2AbortException() { - super(); - } - - H2AbortException(String error) { - super(error); - } -} diff --git a/h2/src/tools/org/h2/android/H2Closable.java b/h2/src/tools/org/h2/android/H2Closable.java deleted file mode 100644 index abb8a75287..0000000000 --- a/h2/src/tools/org/h2/android/H2Closable.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -import android.database.sqlite.SQLiteClosable; - -/** - * An object that can be closed. - */ -public abstract class H2Closable extends SQLiteClosable { - - /** - * TODO - */ - public void acquireReference() { - // TODO - } - - /** - * TODO - */ - public void releaseReference() { - // TODO - } - - /** - * TODO - */ - public void releaseReferenceFromContainer() { - // TODO - } - - /** - * TODO - */ - protected abstract void onAllReferencesReleased(); - - /** - * TODO - */ - protected void onAllReferencesReleasedFromContainer() { - // TODO - } - -} diff --git a/h2/src/tools/org/h2/android/H2ConstraintException.java b/h2/src/tools/org/h2/android/H2ConstraintException.java deleted file mode 100644 index 2cad9e1401..0000000000 --- a/h2/src/tools/org/h2/android/H2ConstraintException.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -/** - * This exception is thrown when a constraint was violated. - */ -public class H2ConstraintException extends H2Exception { - - private static final long serialVersionUID = 1L; - - H2ConstraintException() { - super(); - } - - H2ConstraintException(String error) { - super(error); - } -} diff --git a/h2/src/tools/org/h2/android/H2Cursor.java b/h2/src/tools/org/h2/android/H2Cursor.java deleted file mode 100644 index 4247bac07a..0000000000 --- a/h2/src/tools/org/h2/android/H2Cursor.java +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -import org.h2.result.ResultInterface; -import android.content.ContentResolver; -import android.database.AbstractWindowedCursor; -import android.database.CharArrayBuffer; -import android.database.ContentObserver; -import android.database.CursorWindow; -import android.database.DataSetObserver; -import android.net.Uri; -import android.os.Bundle; - -/** - * A cursor implementation. - */ -public class H2Cursor extends AbstractWindowedCursor { - - private H2Database database; - private ResultInterface result; - - H2Cursor(H2Database db, H2CursorDriver driver, String editTable, - H2Query query) { - this.database = db; - // TODO - } - - H2Cursor(ResultInterface result) { - this.result = result; - } - - public void close() { - result.close(); - } - - public void deactivate() { - // TODO - } - - public int getColumnIndex(String columnName) { - return 0; - } - - public String[] getColumnNames() { - return null; - } - - public int getCount() { - return result.getRowCount(); - } - - /** - * Get the database that created this cursor. - * - * @return the database - */ - public H2Database getDatabase() { - return database; - } - - /** - * The cursor moved to a new position. - * - * @param oldPosition the previous position - * @param newPosition the new position - * @return TODO - */ - public boolean onMove(int oldPosition, int newPosition) { - return false; - } - - public void registerDataSetObserver(DataSetObserver observer) { - // TODO - } - - public boolean requery() { - return false; - } - - /** - * Set the parameter values. - * - * @param selectionArgs the parameter values - */ - public void setSelectionArguments(String[] selectionArgs) { - // TODO - } - - /** - * TODO - * - * @param window the window - */ - public void setWindow(CursorWindow window) { - // TODO - } - - public boolean move(int offset) { - if (offset == 1) { - return result.next(); - } - throw H2Database.unsupported(); - } - - public void copyStringToBuffer(int columnIndex, CharArrayBuffer buffer) { - // TODO - - } - - public byte[] getBlob(int columnIndex) { - // TODO - return null; - } - - public int getColumnCount() { - // TODO - return 0; - } - - public int getColumnIndexOrThrow(String columnName) { - // TODO - return 0; - } - - public String getColumnName(int columnIndex) { - // TODO - return null; - } - - public double getDouble(int columnIndex) { - // TODO - return 0; - } - - public Bundle getExtras() { - // TODO - return null; - } - - public float getFloat(int columnIndex) { - // TODO - return 0; - } - - public int getInt(int columnIndex) { - return result.currentRow()[columnIndex].getInt(); - } - - public long getLong(int columnIndex) { - return result.currentRow()[columnIndex].getLong(); - } - - public int getPosition() { - // TODO - return 0; - } - - public short getShort(int columnIndex) { - // TODO - return 0; - } - - public String getString(int columnIndex) { - return result.currentRow()[columnIndex].getString(); - } - - public boolean getWantsAllOnMoveCalls() { - // TODO - return false; - } - - public boolean isAfterLast() { - // TODO - return false; - } - - public boolean isBeforeFirst() { - // TODO - return false; - } - - public boolean isClosed() { - // TODO - return false; - } - - public boolean isFirst() { - // TODO - return false; - } - - public boolean isLast() { - // TODO - return false; - } - - public boolean isNull(int columnIndex) { - // TODO - return false; - } - - public boolean moveToFirst() { - // TODO - return false; - } - - public boolean moveToLast() { - // TODO - return false; - } - - public boolean moveToNext() { - // TODO - return false; - } - - public boolean moveToPosition(int position) { - // TODO - return false; - } - - public boolean moveToPrevious() { - // TODO - return false; - } - - public void registerContentObserver(ContentObserver observer) { - // TODO - - } - - public Bundle respond(Bundle extras) { - // TODO - return null; - } - - public void setNotificationUri(ContentResolver cr, Uri uri) { - // TODO - - } - - public void unregisterContentObserver(ContentObserver observer) { - // TODO - - } - - public void unregisterDataSetObserver(DataSetObserver observer) { - // TODO - - } - -} diff --git a/h2/src/tools/org/h2/android/H2CursorDriver.java b/h2/src/tools/org/h2/android/H2CursorDriver.java deleted file mode 100644 index 904ba7b460..0000000000 --- a/h2/src/tools/org/h2/android/H2CursorDriver.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -import android.database.Cursor; - -/** - * A factory and event listener for cursors. - */ -public interface H2CursorDriver { - - /** - * The cursor was closed. - */ - void cursorClosed(); - - /** - * The cursor was deactivated. - */ - void cursorDeactivated(); - - /** - * The query was re-run. - * - * @param cursor the old cursor - */ - void cursorRequeried(Cursor cursor); - - /** - * Execute the query. - * - * @param factory the cursor factory - * @param bindArgs the parameter values - * @return the cursor - */ - Cursor query(H2Database.CursorFactory factory, String[] bindArgs); - - /** - * Set the parameter values. - * - * @param bindArgs the parameter values. - */ - void setBindArguments(String[] bindArgs); - -} diff --git a/h2/src/tools/org/h2/android/H2Database.java b/h2/src/tools/org/h2/android/H2Database.java deleted file mode 100644 index 6916999de3..0000000000 --- a/h2/src/tools/org/h2/android/H2Database.java +++ /dev/null @@ -1,706 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -import java.io.File; -import java.util.ArrayList; -import java.util.Locale; -import java.util.Map; -import org.h2.command.Prepared; -import org.h2.engine.ConnectionInfo; -import org.h2.engine.Database; -import org.h2.engine.Session; -import org.h2.expression.Parameter; -import org.h2.result.ResultInterface; -import org.h2.value.Value; -import org.h2.value.ValueInt; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; -import org.h2.value.ValueString; -import android.content.ContentValues; -import android.database.Cursor; - -/** - * This class represents a database connection. - */ -public class H2Database { - - /** - * When a conflict occurs, abort the current statement, but don't roll back - * the transaction. This is the default value. - */ - public static final int CONFLICT_ABORT = 2; - - /** - * When a conflict occurs, return SQLITE_CONSTRAINT, but don't roll back the - * transaction. - */ - public static final int CONFLICT_FAIL = 3; - - /** - * When a conflict occurs, continue, but don't modify the conflicting row. - */ - public static final int CONFLICT_IGNORE = 4; - - /** - * When a conflict occurs, do nothing. - */ - public static final int CONFLICT_NONE = 0; - - /** - * When a conflict occurs, the existing rows are replaced. - */ - public static final int CONFLICT_REPLACE = 5; - - /** - * When a conflict occurs, the transaction is rolled back. - */ - public static final int CONFLICT_ROLLBACK = 1; - - /** - * Create a new database if it doesn't exist. - */ - public static final int CREATE_IF_NECESSARY = 0x10000000; - - /** - * This flag has no effect. - */ - public static final int NO_LOCALIZED_COLLATORS = 0x10; - - /** - * Open the database in read-only mode. - */ - public static final int OPEN_READONLY = 1; - - /** - * Open the database in read-write mode (default). - */ - public static final int OPEN_READWRITE = 0; - - private final Session session; - private final CursorFactory factory; - - H2Database(Session session, CursorFactory factory) { - this.factory = factory; - this.session = session; - } - - /** - * Create a new in-memory database. - * - * @param factory the cursor factory - * @return a connection to this database - */ - public static H2Database create(H2Database.CursorFactory factory) { - ConnectionInfo ci = new ConnectionInfo("mem:"); - Database db = new Database(ci, null); - Session s = db.getSystemSession(); - return new H2Database(s, factory); - } - - /** - * Open a connection to the given database. - * - * @param path the database file name - * @param factory the cursor factory - * @param flags 0, or a combination of OPEN_READONLY and CREATE_IF_NECESSARY - * @return a connection to this database - */ - public static H2Database openDatabase(String path, - H2Database.CursorFactory factory, int flags) { - ConnectionInfo ci = new ConnectionInfo(path); - if ((flags & OPEN_READWRITE) != 0) { - // TODO readonly connections - } - if ((flags & CREATE_IF_NECESSARY) == 0) { - ci.setProperty("IFEXISTS", "TRUE"); - } - ci.setProperty("FILE_LOCK", "FS"); - Database db = new Database(ci, null); - Session s = db.getSystemSession(); - return new H2Database(s, factory); - } - - /** - * Open a connection to the given database. The database is created if it - * doesn't exist yet. - * - * @param file the database file - * @param factory the cursor factory - * @return a connection to this database - */ - public static H2Database openOrCreateDatabase(File file, - H2Database.CursorFactory factory) { - return openDatabase(file.getPath(), factory, CREATE_IF_NECESSARY); - } - - /** - * Open a connection to the given database. The database is created if it - * doesn't exist yet. - * - * @param path the database file name - * @param factory the cursor factory - * @return a connection to this database - */ - public static H2Database openOrCreateDatabase(String path, - H2Database.CursorFactory factory) { - return openDatabase(path, factory, CREATE_IF_NECESSARY); - } - - /** - * Start a transaction. - */ - public void beginTransaction() { - session.setAutoCommit(false); - } - - /** - * Start a transaction. - * - * @param transactionListener the transaction listener to use - */ - public void beginTransactionWithListener( - H2TransactionListener transactionListener) { - // TODO H2TransactionListener - session.setAutoCommit(false); - } - - /** - * Close the connection. - */ - public void close() { - session.close(); - } - - /** - * Prepare a statement. - * - * @param sql the statement - * @return the prepared statement - */ - public H2Statement compileStatement(String sql) { - return new H2Statement(session.prepare(sql)); - } - - /** - * Delete a number of rows in this database. - * - * @param table the table - * @param whereClause the condition - * @param whereArgs the parameter values - * @return the number of rows deleted - */ - public int delete(String table, String whereClause, String[] whereArgs) { - return 0; - } - - /** - * End the transaction. - */ - public void endTransaction() { - // TODO - } - - /** - * Execute the given statement. - * - * @param sql the statement - * @param bindArgs the parameter values - */ - public void execSQL(String sql, Object[] bindArgs) { - prepare(sql, bindArgs).update(); - } - - /** - * Execute the given statement. - * - * @param sql the statement - */ - public void execSQL(String sql) { - session.prepare(sql).update(); - } - - /** - * TODO - * - * @param tables the list of tables - * @return TODO - */ - public static String findEditTable(String tables) { - // TODO - return null; - } - - /** - * Get the maximum size of the database file in bytes. - * - * @return the maximum size - */ - public long getMaximumSize() { - return Long.MAX_VALUE; - } - - /** - * Get the page size of the database in bytes. - * - * @return the page size - */ - public long getPageSize() { - return 0; - } - - /** - * Get the name of the database file. - * - * @return the database file name - */ - public String getPath() { - return null; - } - - /** - * TODO - * - * @return TODO - */ - public Map getSyncedTables() { - return null; - } - - /** - * Get the database version. - * - * @return the database version - */ - public int getVersion() { - return 0; - } - - /** - * Check if there is an open transaction. - * - * @return true if there is - */ - public boolean inTransaction() { - return false; - } - - /** - * Insert a row. - * - * @param table the table - * @param nullColumnHack not used - * @param values the values - * @return TODO - */ - public long insert(String table, String nullColumnHack, ContentValues values) { - return 0; - } - - /** - * Try to insert a row. - * - * @param table the table - * @param nullColumnHack not used - * @param values the values - * @return TODO - */ - public long insertOrThrow(String table, String nullColumnHack, - ContentValues values) { - return 0; - } - - /** - * Try to insert a row, using the given conflict resolution option. - * - * @param table the table - * @param nullColumnHack not used - * @param initialValues the values - * @param conflictAlgorithm what conflict resolution to use - * @return TODO - */ - public long insertWithOnConflict(String table, String nullColumnHack, - ContentValues initialValues, int conflictAlgorithm) { - return 0; - } - - /** - * Check if the database is locked by the current thread. - * - * @return true if it is - */ - public boolean isDbLockedByCurrentThread() { - return false; - } - - /** - * Check if the database is locked by a different thread. - * - * @return true if it is - */ - public boolean isDbLockedByOtherThreads() { - return false; - } - - /** - * Check if the connection is open. - * - * @return true if it is - */ - public boolean isOpen() { - return false; - } - - /** - * Check if the connection is read-only. - * - * @return true if it is - */ - public boolean isReadOnly() { - return false; - } - - /** - * TODO - * - * @param table the table - * @param deletedTable TODO - */ - public void markTableSyncable(String table, String deletedTable) { - // TODO - } - - /** - * TODO - * - * @param table the table - * @param foreignKey the foreign key - * @param updateTable TODO - */ - public void markTableSyncable(String table, String foreignKey, - String updateTable) { - // TODO - } - - /** - * Check if an upgrade is required. - * - * @param newVersion the new version - * @return true if the current version doesn't match - */ - public boolean needUpgrade(int newVersion) { - return false; - } - - /** - * Execute the SELECT statement for the given parameters. - * - * @param distinct if only distinct rows should be returned - * @param table the table - * @param columns the list of columns - * @param selection TODO - * @param selectionArgs TODO - * @param groupBy the group by list or null - * @param having the having condition or null - * @param orderBy the order by list or null - * @param limit the limit or null - * @return the cursor - */ - public Cursor query(boolean distinct, String table, String[] columns, - String selection, String[] selectionArgs, String groupBy, - String having, String orderBy, String limit) { - return null; - } - - /** - * Execute the SELECT statement for the given parameters. - * - * @param table the table - * @param columns the list of columns - * @param selection TODO - * @param selectionArgs TODO - * @param groupBy the group by list or null - * @param having the having condition or null - * @param orderBy the order by list or null - * @return the cursor - */ - public Cursor query(String table, String[] columns, String selection, - String[] selectionArgs, String groupBy, String having, - String orderBy) { - return null; - } - - /** - * Execute the SELECT statement for the given parameters. - * - * @param table the table - * @param columns the list of columns - * @param selection TODO - * @param selectionArgs TODO - * @param groupBy the group by list or null - * @param having the having condition or null - * @param orderBy the order by list or null - * @param limit the limit or null - * @return the cursor - */ - public Cursor query(String table, String[] columns, String selection, - String[] selectionArgs, String groupBy, String having, - String orderBy, String limit) { - return null; - } - - /** - * Execute the SELECT statement for the given parameters. - * - * @param cursorFactory the cursor factory to use - * @param distinct if only distinct rows should be returned - * @param table the table - * @param columns the list of columns - * @param selection TODO - * @param selectionArgs TODO - * @param groupBy the group by list or null - * @param having the having condition or null - * @param orderBy the order by list or null - * @param limit the limit or null - * @return the cursor - */ - public Cursor queryWithFactory(H2Database.CursorFactory cursorFactory, - boolean distinct, String table, String[] columns, String selection, - String[] selectionArgs, String groupBy, String having, - String orderBy, String limit) { - return null; - } - - /** - * Execute the query. - * - * @param sql the SQL statement - * @param selectionArgs the parameter values - * @return the cursor - */ - public Cursor rawQuery(String sql, String[] selectionArgs) { - Prepared prep = prepare(sql, selectionArgs); - ResultInterface result = prep.query(0); - return new H2Cursor(result); - } - - /** - * Execute the query using the given cursor factory. - * - * @param cursorFactory the cursor factory - * @param sql the SQL statement - * @param selectionArgs the parameter values - * @param editTable TODO - * @return the cursor - */ - public Cursor rawQueryWithFactory(H2Database.CursorFactory cursorFactory, - String sql, String[] selectionArgs, String editTable) { - return null; - } - - /** - * Try to release memory. - * - * @return TODO - */ - public static int releaseMemory() { - return 0; - } - - /** - * Replace an existing row in the database. - * - * @param table the table - * @param nullColumnHack ignored - * @param initialValues the values - * @return TODO - */ - public long replace(String table, String nullColumnHack, - ContentValues initialValues) { - return 0; - } - - /** - * Try to replace an existing row in the database. - * - * @param table the table - * @param nullColumnHack ignored - * @param initialValues the values - * @return TODO - */ - public long replaceOrThrow(String table, String nullColumnHack, - ContentValues initialValues) { - return 0; - } - - /** - * Set the locale. - * - * @param locale the new locale - */ - public void setLocale(Locale locale) { - // TODO - } - - /** - * Enable or disable thread safety. - * - * @param lockingEnabled the new value - */ - public void setLockingEnabled(boolean lockingEnabled) { - // TODO - } - - /** - * Set the maximum database file size. - * - * @param numBytes the file size in bytes - * @return the effective maximum size - */ - public long setMaximumSize(long numBytes) { - return 0; - } - - /** - * Set the database page size. The value can not be changed once the - * database exists. - * - * @param numBytes the page size - */ - public void setPageSize(long numBytes) { - // TODO - } - - /** - * Mark the transaction as completed successfully. - */ - public void setTransactionSuccessful() { - // TODO - } - - /** - * Update the database version. - * - * @param version the version - */ - public void setVersion(int version) { - // TODO - } - - /** - * Update one or multiple rows. - * - * @param table the table - * @param values the values - * @param whereClause the where condition - * @param whereArgs the parameter values - * @return the number of rows updated - */ - public int update(String table, ContentValues values, String whereClause, - String[] whereArgs) { - return 0; - } - - /** - * Update one or multiple rows. - * - * @param table the table - * @param values the values - * @param whereClause the where condition - * @param whereArgs the parameter values - * @param conflictAlgorithm the conflict resolution option - * @return the number of rows updated - */ - public int updateWithOnConflict(String table, ContentValues values, - String whereClause, String[] whereArgs, int conflictAlgorithm) { - return 0; - } - - /** - * TODO - * - * @deprecated - * @return TODO - */ - public boolean yieldIfContended() { - return false; - } - - /** - * Temporarily pause the transaction. - * - * @param sleepAfterYieldDelay TODO - * @return TODO - */ - public boolean yieldIfContendedSafely(long sleepAfterYieldDelay) { - return false; - } - - /** - * Temporarily pause the transaction. - * - * @return TODO - */ - public boolean yieldIfContendedSafely() { - return false; - } - - /** - * The cursor factory. - */ - public interface CursorFactory { - - /** - * Create a new cursor. - * - * @param db the connection - * @param masterQuery TODO - * @param editTable TODO - * @param query TODO - * @return the cursor - */ - Cursor newCursor(H2Database db, H2CursorDriver masterQuery, - String editTable, H2Query query); - } - - private Prepared prepare(String sql, Object[] args) { - Prepared prep = session.prepare(sql); - int len = args.length; - if (len > 0) { - ArrayList params = prep.getParameters(); - for (int i = 0; i < len; i++) { - Parameter p = params.get(i); - p.setValue(getValue(args[i])); - } - } - return prep; - } - - private Value getValue(Object o) { - if (o == null) { - return ValueNull.INSTANCE; - } else if (o instanceof String) { - return ValueString.get((String) o); - } else if (o instanceof Integer) { - return ValueInt.get((Integer) o); - } else if (o instanceof Long) { - return ValueLong.get((Integer) o); - } - return ValueString.get(o.toString()); - // TODO - } - - /** - * Create a new RuntimeException that says this feature is not supported. - * - * @return the runtime exception - */ - public static RuntimeException unsupported() { - // TODO - return new RuntimeException("Feature not supported"); - } - -} diff --git a/h2/src/tools/org/h2/android/H2DatabaseCorruptException.java b/h2/src/tools/org/h2/android/H2DatabaseCorruptException.java deleted file mode 100644 index 0bad1b297a..0000000000 --- a/h2/src/tools/org/h2/android/H2DatabaseCorruptException.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -/** - * This exception is thrown when the database file is corrupt. - */ -public class H2DatabaseCorruptException extends H2Exception { - private static final long serialVersionUID = 1L; - - H2DatabaseCorruptException() { - super(); - } - - H2DatabaseCorruptException(String error) { - super(error); - } -} diff --git a/h2/src/tools/org/h2/android/H2DiskIOException.java b/h2/src/tools/org/h2/android/H2DiskIOException.java deleted file mode 100644 index b1e83d91ef..0000000000 --- a/h2/src/tools/org/h2/android/H2DiskIOException.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -/** - * This exception is thrown when there was a IO exception. - */ -public class H2DiskIOException extends H2Exception { - private static final long serialVersionUID = 1L; - - H2DiskIOException() { - super(); - } - - H2DiskIOException(String error) { - super(error); - } -} diff --git a/h2/src/tools/org/h2/android/H2DoneException.java b/h2/src/tools/org/h2/android/H2DoneException.java deleted file mode 100644 index e296add603..0000000000 --- a/h2/src/tools/org/h2/android/H2DoneException.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -/** - * This exception is thrown the requested data is not available, for example - * when calling simpleQueryForString() or simpleQueryForLong() for a statement - * that doesn't return a value. - */ -public class H2DoneException extends H2Exception { - private static final long serialVersionUID = 1L; - - H2DoneException() { - super(); - } - - H2DoneException(String error) { - super(error); - } -} diff --git a/h2/src/tools/org/h2/android/H2Exception.java b/h2/src/tools/org/h2/android/H2Exception.java deleted file mode 100644 index 7116b87916..0000000000 --- a/h2/src/tools/org/h2/android/H2Exception.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -import android.database.SQLException; - -/** - * This exception is thrown when there is a syntax error or similar problem. - */ -public class H2Exception extends SQLException { - private static final long serialVersionUID = 1L; - - public H2Exception() { - super(); - } - - public H2Exception(String error) { - super(error); - } -} diff --git a/h2/src/tools/org/h2/android/H2FullException.java b/h2/src/tools/org/h2/android/H2FullException.java deleted file mode 100644 index 7006deb36f..0000000000 --- a/h2/src/tools/org/h2/android/H2FullException.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -/** - * This exception is thrown when the database file is full and can't grow. - */ -public class H2FullException extends H2Exception { - private static final long serialVersionUID = 1L; - - H2FullException() { - super(); - } - - H2FullException(String error) { - super(error); - } -} diff --git a/h2/src/tools/org/h2/android/H2MisuseException.java b/h2/src/tools/org/h2/android/H2MisuseException.java deleted file mode 100644 index c2fad112ce..0000000000 --- a/h2/src/tools/org/h2/android/H2MisuseException.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -/** - * TODO - */ -public class H2MisuseException extends H2Exception { - private static final long serialVersionUID = 1L; - - H2MisuseException() { - super(); - } - - H2MisuseException(String error) { - super(error); - } -} diff --git a/h2/src/tools/org/h2/android/H2OpenHelper.java b/h2/src/tools/org/h2/android/H2OpenHelper.java deleted file mode 100644 index 9fcb4789bd..0000000000 --- a/h2/src/tools/org/h2/android/H2OpenHelper.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -import android.content.Context; - -/** - * This helper class helps creating and managing databases. A subclass typically - * implements the "on" methods. - */ -public abstract class H2OpenHelper { - - /** - * Construct a new instance. - * - * @param context the context to use - * @param name the name of the database (use null for an in-memory database) - * @param factory the cursor factory to use - * @param version the expected database version - */ - H2OpenHelper(Context context, String name, - H2Database.CursorFactory factory, int version) { - // TODO - } - - /** - * Close the connection. - */ - public synchronized void close() { - // TODO - } - - /** - * Open a read-only connection. - * - * @return a new read-only connection - */ - public synchronized H2Database getReadableDatabase() { - return null; - } - - /** - * Open a read-write connection. - * - * @return a new read-write connection - */ - public synchronized H2Database getWritableDatabase() { - return null; - } - - /** - * This method is called when the database did not already exist. - * - * @param db the connection - */ - public abstract void onCreate(H2Database db); - - /** - * This method is called after opening the database. - * - * @param db the connection - */ - public void onOpen(H2Database db) { - // TODO - } - - /** - * This method is called when the version stored in the database file does - * not match the expected value. - * - * @param db the connection - * @param oldVersion the current version - * @param newVersion the expected version - */ - public abstract void onUpgrade(H2Database db, int oldVersion, int newVersion); - -} diff --git a/h2/src/tools/org/h2/android/H2Program.java b/h2/src/tools/org/h2/android/H2Program.java deleted file mode 100644 index 681b7f32df..0000000000 --- a/h2/src/tools/org/h2/android/H2Program.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -import org.h2.command.Prepared; -import org.h2.expression.Parameter; -import org.h2.value.ValueBytes; - -/** - * This class represents a prepared statement. - */ -public class H2Program extends H2Closable { - - /** - * The prepared statement - */ - protected final Prepared prepared; - - H2Program(Prepared prepared) { - this.prepared = prepared; - } - - /** - * Set the specified parameter value. - * - * @param index the parameter index (0, 1,...) - * @param value the new value - */ - public void bindBlob(int index, byte[] value) { - getParameter(index).setValue(ValueBytes.get(value)); - - } - - /** - * Set the specified parameter value. - * - * @param index the parameter index (0, 1,...) - * @param value the new value - */ - public void bindDouble(int index, double value) { - // TODO - } - - /** - * Set the specified parameter value. - * - * @param index the parameter index (0, 1,...) - * @param value the new value - */ - public void bindLong(int index, long value) { - // TODO - } - - /** - * Set the specified parameter to NULL. - * - * @param index the parameter index (0, 1,...) - */ - public void bindNull(int index) { - // TODO - } - - /** - * Set the specified parameter value. - * - * @param index the parameter index (0, 1,...) - * @param value the new value - */ - public void bindString(int index, String value) { - // TODO - } - - /** - * Reset all parameter values. - */ - public void clearBindings() { - // TODO - } - - /** - * Close the statement. - */ - public void close() { - // TODO - } - - /** - * Get the unique id of this statement. - * - * @return the id - */ - public final int getUniqueId() { - return 0; - } - - /** - * TODO - */ - protected void onAllReferencesReleased() { - // TODO - } - - /** - * TODO - */ - protected void onAllReferencesReleasedFromContainer() { - // TODO - } - - private Parameter getParameter(int index) { - return prepared.getParameters().get(index); - } - -} diff --git a/h2/src/tools/org/h2/android/H2Query.java b/h2/src/tools/org/h2/android/H2Query.java deleted file mode 100644 index 23c8bce5d0..0000000000 --- a/h2/src/tools/org/h2/android/H2Query.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -import org.h2.command.Prepared; - -/** - * This class represents a prepared statement that returns a result set. - */ -public class H2Query extends H2Program { - - H2Query(Prepared prepared) { - super(prepared); - } - - /** - * Set the specified parameter value. - * - * @param index the parameter index (0, 1,...) - * @param value the new value - */ - public void bindDouble(int index, double value) { - // TODO - } - - /** - * Set the specified parameter value. - * - * @param index the parameter index (0, 1,...) - * @param value the new value - */ - public void bindLong(int index, long value) { - // TODO - } - - /** - * Set the specified parameter to NULL. - * - * @param index the parameter index (0, 1,...) - */ - public void bindNull(int index) { - // TODO - } - - /** - * Set the specified parameter value. - * - * @param index the parameter index (0, 1,...) - * @param value the new value - */ - public void bindString(int index, String value) { - // TODO - } - - /** - * Close the statement. - */ - public void close() { - // TODO - } - -} diff --git a/h2/src/tools/org/h2/android/H2QueryBuilder.java b/h2/src/tools/org/h2/android/H2QueryBuilder.java deleted file mode 100644 index c00375a1ea..0000000000 --- a/h2/src/tools/org/h2/android/H2QueryBuilder.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -import java.util.Map; -import java.util.Set; -import org.h2.util.StringUtils; -import android.database.Cursor; - -/** - * This helper class is used to build SQL statements. - */ -public class H2QueryBuilder { - - private H2Database.CursorFactory factory; - private boolean distinct; - private String tables; - private Map columnMap; - - /** - * Append the column to the string builder. The columns are separated by - * comma. - * - * @param s the target string builder - * @param columns the columns - */ - static void appendColumns(StringBuilder s, String[] columns) { - for (int i = 0; i < columns.length; i++) { - if (i > 0) { - s.append(", "); - } - s.append(StringUtils.quoteIdentifier(columns[i])); - } - } - - /** - * Return the SELECT statement for the given parameters. - * - * @param distinct if only distinct rows should be returned - * @param tables the list of tables - * @param columns the list of columns - * @param where the where condition or null - * @param groupBy the group by list or null - * @param having the having condition or null - * @param orderBy the order by list or null - * @param limit the limit or null - * @return the query - */ - static String buildQueryString(boolean distinct, String tables, - String[] columns, String where, String groupBy, String having, - String orderBy, String limit) { - StringBuilder s = new StringBuilder(); - s.append("select "); - if (distinct) { - s.append("distinct "); - } - appendColumns(s, columns); - s.append(" from ").append(tables); - if (where != null) { - s.append(" where ").append(where); - } - if (groupBy != null) { - s.append(" group by ").append(groupBy); - } - if (having != null) { - s.append(" having ").append(having); - } - if (orderBy != null) { - s.append(" order by ").append(groupBy); - } - if (limit != null) { - s.append(" limit ").append(limit); - } - return s.toString(); - } - - /** - * Append the text to the where clause. - * - * @param inWhere the text to append - */ - void appendWhere(CharSequence inWhere) { - // TODO - } - - /** - * Append the text to the where clause. The text is escaped. - * - * @param inWhere the text to append - */ - void appendWhereEscapeString(String inWhere) { - // TODO how to escape - } - - /** - * Return the SELECT UNION statement for the given parameters. - * - * @param projectionIn TODO - * @param selection TODO - * @param selectionArgs TODO - * @param groupBy the group by list or null - * @param having the having condition or null - * @param orderBy the order by list or null - * @param limit the limit or null - * @return the query - */ - String buildQuery(String[] projectionIn, String selection, - String[] selectionArgs, String groupBy, String having, - String orderBy, String limit) { - return null; - } - - /** - * Return the SELECT UNION statement for the given parameters. - * - * @param subQueries TODO - * @param orderBy the order by list or null - * @param limit the limit or null - * @return the query - */ - String buildUnionQuery(String[] subQueries, String orderBy, String limit) { - return null; - - } - - /** - * Return the SELECT UNION statement for the given parameters. - * - * @param typeDiscriminatorColumn TODO - * @param unionColumns TODO - * @param columnsPresentInTable TODO - * @param computedColumnsOffset TODO - * @param typeDiscriminatorValue TODO - * @param selection TODO - * @param selectionArgs TODO - * @param groupBy the group by list or null - * @param having the having condition or null - * @return the query - */ - String buildUnionSubQuery(String typeDiscriminatorColumn, - String[] unionColumns, Set columnsPresentInTable, - int computedColumnsOffset, String typeDiscriminatorValue, - String selection, String[] selectionArgs, String groupBy, - String having) { - return null; - - } - - /** - * Get the list of tables. - * - * @return the list of tables - */ - String getTables() { - return tables; - } - - /** - * Run the query for the given parameters. - * - * @param db the connection - * @param projectionIn TODO - * @param selection TODO - * @param selectionArgs TODO - * @param groupBy the group by list or null - * @param having the having condition or null - * @param orderBy the order by list or null - * @return the cursor - */ - Cursor query(H2Database db, String[] projectionIn, String selection, - String[] selectionArgs, String groupBy, String having, - String orderBy) { - return null; - } - - /** - * Run the query for the given parameters. - * - * @param db the connection - * @param projectionIn TODO - * @param selection TODO - * @param selectionArgs TODO - * @param groupBy the group by list or null - * @param having the having condition or null - * @param orderBy the order by list or null - * @param limit the limit or null - * @return the cursor - */ - Cursor query(H2Database db, String[] projectionIn, String selection, - String[] selectionArgs, String groupBy, String having, - String orderBy, String limit) { - return null; - } - - /** - * Set the cursor factory. - * - * @param factory the new value - */ - void setCursorFactory(H2Database.CursorFactory factory) { - this.factory = factory; - } - - /** - * Enable or disable the DISTINCT flag. - * - * @param distinct the new value - */ - void setDistinct(boolean distinct) { - this.distinct = distinct; - } - - /** - * TODO - * - * @param columnMap TODO - */ - void setProjectionMap(Map columnMap) { - this.columnMap = columnMap; - } - - /** - * Set the list of tables. - * - * @param inTables the list of tables - */ - void setTables(String inTables) { - this.tables = inTables; - } - -} diff --git a/h2/src/tools/org/h2/android/H2Statement.java b/h2/src/tools/org/h2/android/H2Statement.java deleted file mode 100644 index 583323c217..0000000000 --- a/h2/src/tools/org/h2/android/H2Statement.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -import org.h2.command.Prepared; -import org.h2.result.ResultInterface; -import org.h2.value.Value; -import org.h2.value.ValueNull; - -/** - * Represents a prepared statement. - */ -public class H2Statement extends H2Program { - - H2Statement(Prepared prepared) { - super(prepared); - } - - /** - * Execute the statement. - */ - public void execute() { - if (prepared.isQuery()) { - prepared.query(0); - } else { - prepared.update(); - } - } - - /** - * Execute the insert statement and return the id of the inserted row. - * - * @return the id of the inserted row - */ - public long executeInsert() { - return prepared.update(); - } - - /** - * Execute the query and return the value of the first column and row as a - * long. - * - * @return the value - */ - public long simpleQueryForLong() { - return simpleQuery().getLong(); - } - - /** - * Execute the query and return the value of the first column and row as a - * string. - * - * @return the value - */ - public String simpleQueryForString() { - return simpleQuery().getString(); - } - - private Value simpleQuery() { - ResultInterface result = prepared.query(1); - try { - if (result.next()) { - Value[] row = result.currentRow(); - if (row.length > 0) { - return row[0]; - } - } - } finally { - result.close(); - } - return ValueNull.INSTANCE; - } - -} diff --git a/h2/src/tools/org/h2/android/H2TransactionListener.java b/h2/src/tools/org/h2/android/H2TransactionListener.java deleted file mode 100644 index 2d0ec9308b..0000000000 --- a/h2/src/tools/org/h2/android/H2TransactionListener.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -/** - * A class that implements this interface can listen to transaction begin and - * end events. - */ -public interface H2TransactionListener { - - /** - * The transaction has been started. - */ - void onBegin(); - - /** - * The transaction will be committed. - */ - void onCommit(); - - /** - * The transaction will be rolled back. - */ - void onRollback(); -} diff --git a/h2/src/tools/org/h2/android/H2Utils.java b/h2/src/tools/org/h2/android/H2Utils.java deleted file mode 100644 index 11568f1a33..0000000000 --- a/h2/src/tools/org/h2/android/H2Utils.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.android; - -/** - * Utility methods. - */ -public class H2Utils { - - /** - * A replacement for Context.openOrCreateDatabase. - * - * @param name the database name - * @param mode the access mode - * @param factory the cursor factory to use - * @return the database connection - */ - public static H2Database openOrCreateDatabase(String name, int mode, - H2Database.CursorFactory factory) { - return H2Database.openOrCreateDatabase(name, factory); - } - -} diff --git a/h2/src/tools/org/h2/android/package.html b/h2/src/tools/org/h2/android/package.html deleted file mode 100644 index 049fc86021..0000000000 --- a/h2/src/tools/org/h2/android/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -This package contains the H2 Android database API. - -

    \ No newline at end of file diff --git a/h2/src/tools/org/h2/build/Build.java b/h2/src/tools/org/h2/build/Build.java index 01b2b5e1a0..7d23858a3b 100644 --- a/h2/src/tools/org/h2/build/Build.java +++ b/h2/src/tools/org/h2/build/Build.java @@ -1,21 +1,31 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build; import java.io.File; import java.io.IOException; +import java.lang.reflect.Method; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.Collections; import java.util.HashMap; -import java.util.TreeMap; import java.util.Map.Entry; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; -import org.h2.build.code.SwitchSource; import org.h2.build.doc.XMLParser; /** @@ -23,6 +33,38 @@ */ public class Build extends BuildBase { + private static final String ASM_VERSION = "8.0.1"; + + private static final String ARGS4J_VERSION = "2.33"; + + private static final String DERBY_VERSION = "10.14.2.0"; + + private static final String HSQLDB_VERSION = "2.5.1"; + + private static final String JACOCO_VERSION = "0.8.5"; + + private static final String JTS_VERSION = "1.17.0"; + + private static final String JUNIT_VERSION = "5.6.2"; + + private static final String LUCENE_VERSION = "8.5.2"; + + private static final String MYSQL_CONNECTOR_VERSION = "8.0.27"; + + private static final String OSGI_VERSION = "5.0.0"; + + private static final String PGJDBC_VERSION = "42.2.14"; + + private static final String PGJDBC_HASH = "45fa6eef266aa80024ef2ab3688d9faa38c642e5"; + + private static final String JAVAX_SERVLET_VERSION = "4.0.1"; + + private static final String JAKARTA_SERVLET_VERSION = "5.0.0"; + + private static final String SLF4J_VERSION = "1.7.30"; + + private static final String APIGUARDIAN_VERSION = "1.1.0"; + private boolean filesMissing; /** @@ -37,51 +79,55 @@ public static void main(String... args) { /** * Run the benchmarks. */ + @Description(summary = "Run the benchmarks.") public void benchmark() { - downloadUsingMaven("ext/hsqldb-2.3.2.jar", - "org/hsqldb", "hsqldb", "2.3.2", - "970fd7b8f635e2c19305160459649569655b843c"); - downloadUsingMaven("ext/derby-10.10.1.1.jar", - "org/apache/derby", "derby", "10.10.1.1", - "09f6f910f0373adc1b23c10f9b4bb151b7e7449f"); - downloadUsingMaven("ext/derbyclient-10.10.1.1.jar", - "org/apache/derby", "derbyclient", "10.10.1.1", - "42d5293b4ac5c5f082583c3564c10f78bd34a4cb"); - downloadUsingMaven("ext/derbynet-10.10.1.1.jar", - "org/apache/derby", "derbynet", "10.10.1.1", - "912b08dca73663d4665e09cd317be1218412d93e"); - downloadUsingMaven("ext/postgresql-8.3-603.jdbc3.jar", - "postgresql", "postgresql", "8.3-603.jdbc3", - "33d531c3c53055ddcbea3d88bfa093466ffef924"); - downloadUsingMaven("ext/mysql-connector-java-5.1.6.jar", - "mysql", "mysql-connector-java", "5.1.6", - "380ef5226de2c85ff3b38cbfefeea881c5fce09d"); + downloadUsingMaven("ext/hsqldb-" + HSQLDB_VERSION + ".jar", + "org.hsqldb", "hsqldb", HSQLDB_VERSION, + "b1f720a63a8756867895cc22dd74b51fb70e90ac"); + downloadUsingMaven("ext/derby-" + DERBY_VERSION + ".jar", + "org.apache.derby", "derby", DERBY_VERSION, + "7efad40ef52fbb1f08142f07a83b42d29e47d8ce"); + downloadUsingMaven("ext/derbyclient-" + DERBY_VERSION + ".jar", + "org.apache.derby", "derbyclient", DERBY_VERSION, + "fdd338d43e09bf7cd16f5523a0f717e5ef79a1a8"); + downloadUsingMaven("ext/derbynet-" + DERBY_VERSION + ".jar", + "org.apache.derby", "derbynet", DERBY_VERSION, + "d03edf879317c7102884c4689e03a4d1a5f84126"); +// downloadUsingMaven("ext/derbyshared-" + DERBY_VERSION + ".jar", +// "org.apache.derby", "derbyshared", DERBY_VERSION, +// "ff2dfb3e2a92d593cf111baad242d156947abbc1"); + downloadUsingMaven("ext/postgresql-" + PGJDBC_VERSION + ".jar", + "org.postgresql", "postgresql", PGJDBC_VERSION, PGJDBC_HASH); + downloadUsingMaven("ext/mysql-connector-java-" + MYSQL_CONNECTOR_VERSION + ".jar", + "mysql", "mysql-connector-java", MYSQL_CONNECTOR_VERSION, + "f1da9f10a3de6348725a413304aab6d0aa04f923"); compile(); String cp = "temp" + File.pathSeparator + "bin/h2" + getJarSuffix() + - File.pathSeparator + "ext/hsqldb.jar" + - File.pathSeparator + "ext/hsqldb-2.3.2.jar" + - File.pathSeparator + "ext/derby-10.10.1.1.jar" + - File.pathSeparator + "ext/derbyclient-10.10.1.1.jar" + - File.pathSeparator + "ext/derbynet-10.10.1.1.jar" + - File.pathSeparator + "ext/postgresql-8.3-603.jdbc3.jar" + - File.pathSeparator + "ext/mysql-connector-java-5.1.6.jar"; + File.pathSeparator + "ext/hsqldb-" + HSQLDB_VERSION + ".jar" + + File.pathSeparator + "ext/derby-" + DERBY_VERSION + ".jar" + + File.pathSeparator + "ext/derbyclient-" + DERBY_VERSION + ".jar" + + File.pathSeparator + "ext/derbynet-" + DERBY_VERSION + ".jar" + +// File.pathSeparator + "ext/derbyshared-" + DERBY_VERSION + ".jar" + + File.pathSeparator + "ext/postgresql-" + PGJDBC_VERSION + ".jar" + + File.pathSeparator + "ext/mysql-connector-java-" + MYSQL_CONNECTOR_VERSION + ".jar"; StringList args = args("-Xmx128m", - "-cp", cp, "org.h2.test.bench.TestPerformance"); - exec("java", args.plus("-init", "-db", "1")); - exec("java", args.plus("-db", "2")); - exec("java", args.plus("-db", "3", "-out", "pe.html")); - exec("java", args.plus("-init", "-db", "4")); - exec("java", args.plus("-db", "5", "-exit")); - exec("java", args.plus("-db", "6")); - exec("java", args.plus("-db", "7")); - exec("java", args.plus("-db", "8", "-out", "ps.html")); + "-cp", cp, "-Dderby.system.durability=test", "org.h2.test.bench.TestPerformance"); + execJava(args.plus("-init", "-db", "1")); + execJava(args.plus("-db", "2")); + execJava(args.plus("-db", "3", "-out", "pe.html")); + execJava(args.plus("-init", "-db", "4")); + execJava(args.plus("-db", "5", "-exit")); + execJava(args.plus("-db", "6")); + execJava(args.plus("-db", "7")); + execJava(args.plus("-db", "8", "-out", "ps.html")); } /** * Clean all jar files, classes, and generated documentation. */ + @Description(summary = "Clean all jar files, classes, and generated documentation.") public void clean() { delete("temp"); delete("docs"); @@ -91,83 +137,189 @@ public void clean() { } /** - * Compile all classes + * Compile all classes. */ + @Description(summary = "Compile all classes.") public void compile() { - compile(true, false, false); + clean(); + mkdir("temp"); + download(); + String classpath = "temp" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar" + + File.pathSeparator + "ext/asm-" + ASM_VERSION + ".jar" + + File.pathSeparator + javaToolsJar; + FileList files = files("src/main"); + StringList args = args("-Xlint:unchecked", "-d", "temp", "-sourcepath", "src/main", "-classpath", classpath); + String version = getTargetJavaVersion(); + if (version != null) { + args = args.plus("-target", version, "-source", version); + } + javac(args, files); + + files = files("src/main/META-INF/services"); + copy("temp", files, "src/main"); + + files = files("src/test"); + files.addAll(files("src/tools")); + // we don't use Junit for this test framework + files = files.exclude("src/test/org/h2/test/TestAllJunit.java"); + args = args("-Xlint:unchecked", "-Xlint:deprecation", + "-d", "temp", "-sourcepath", "src/test" + File.pathSeparator + "src/tools", + "-classpath", classpath); + if (version != null) { + args = args.plus("-target", version, "-source", version); + } + javac(args, files); + + files = files("src/test"). + exclude("*.java"). + exclude("*/package.html"); + copy("temp", files, "src/test"); + + javadoc("-sourcepath", "src/main", + "-d", "docs/javadoc", + "org.h2.tools", "org.h2.jmx", + "-classpath", + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar"); + + files = files("src/main"). + exclude("*.MF"). + exclude("*.java"). + exclude("*/package.html"). + exclude("*/java.sql.Driver"). + exclude("*.DS_Store"); + zip("temp/org/h2/util/data.zip", files, "src/main", true, false); } private void compileTools() { + mkdir("temp"); FileList files = files("src/tools").keep("src/tools/org/h2/build/*"); StringList args = args("-d", "temp", "-sourcepath", "src/tools" + File.pathSeparator + "src/test" + File.pathSeparator + "src/main"); - mkdir("temp"); + String version = getTargetJavaVersion(); + if (version != null) { + args = args.plus("-target", version, "-source", version); + } javac(args, files); } /** - * Run the Emma code coverage. + * Run the JaCoco code coverage. */ + @Description(summary = "Run the JaCoco code coverage.") public void coverage() { + compile(); downloadTest(); - downloadUsingMaven("ext/emma-2.0.5312.jar", - "emma", "emma", "2.0.5312", - "30a40933caf67d88d9e75957950ccf353b181ab7"); - String cp = "temp" + File.pathSeparator + "bin" + - File.pathSeparator + "ext/emma-2.0.5312.jar" + - File.pathSeparator + "ext/postgresql-8.3-603.jdbc3.jar" + - File.pathSeparator + "ext/servlet-api-3.0.1.jar" + - File.pathSeparator + "ext/lucene-core-3.0.2.jar" + - File.pathSeparator + "ext/h2mig_pagestore_addon.jar" + - File.pathSeparator + "ext/org.osgi.core-4.2.0.jar" + - File.pathSeparator + "ext/org.osgi.enterprise-4.2.0.jar" + - File.pathSeparator + "ext/jts-1.13.jar" + - File.pathSeparator + "ext/slf4j-api-1.6.0.jar" + - File.pathSeparator + "ext/slf4j-nop-1.6.0.jar" + - File.pathSeparator + System.getProperty("java.home") + - "/../lib/tools.jar"; - // -XX:-UseSplitVerifier is for Java 7 compatibility - exec("java", args( + downloadUsingMaven("ext/org.jacoco.agent-" + JACOCO_VERSION + ".jar", + "org.jacoco", "org.jacoco.agent", JACOCO_VERSION, + "0fd03a8ab78af3dd03b27647067efa72690d4922"); + URI uri = URI.create("jar:" + + Paths.get("ext/org.jacoco.agent-" + JACOCO_VERSION + ".jar").toAbsolutePath().toUri()); + try (FileSystem fs = FileSystems.newFileSystem(uri, Collections.emptyMap())) { + Files.copy(fs.getPath("jacocoagent.jar"), Paths.get("ext/jacocoagent.jar"), + StandardCopyOption.REPLACE_EXISTING); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + downloadUsingMaven("ext/org.jacoco.cli-" + JACOCO_VERSION + ".jar", + "org.jacoco", "org.jacoco.cli", JACOCO_VERSION, + "30155fcd37821879264365693055290dbfe984bb"); + downloadUsingMaven("ext/org.jacoco.core-" + JACOCO_VERSION + ".jar", + "org.jacoco", "org.jacoco.core", JACOCO_VERSION, + "1ac96769aa83e5492d1a1a694774f6baec4eb704"); + downloadUsingMaven("ext/org.jacoco.report-" + JACOCO_VERSION + ".jar", + "org.jacoco", "org.jacoco.report", JACOCO_VERSION, + "421e4aab2aaa809d1e66a96feb11f61ea698da19"); + downloadUsingMaven("ext/asm-commons-" + ASM_VERSION + ".jar", + "org.ow2.asm", "asm-commons", ASM_VERSION, + "019c7ba355f0737815205518e332a8dc08b417c6"); + downloadUsingMaven("ext/asm-tree-" + ASM_VERSION + ".jar", + "org.ow2.asm", "asm-tree", ASM_VERSION, + "dfcad5abbcff36f8bdad5647fe6f4972e958ad59"); + downloadUsingMaven("ext/args4j-" + ARGS4J_VERSION + ".jar", + "args4j", "args4j", ARGS4J_VERSION, + "bd87a75374a6d6523de82fef51fc3cfe9baf9fc9"); + + delete(files("coverage")); + // Use own copy + copy("coverage/bin", files("temp"), "temp"); + // JaCoCo does not support multiple versions of the same classes + delete(files("coverage/bin/META-INF/versions")); + String cp = "coverage/bin" + + File.pathSeparator + "ext/postgresql-" + PGJDBC_VERSION + ".jar" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar" + + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/slf4j-nop-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + javaToolsJar; + // Run tests + execJava(args( "-Xmx128m", - "-XX:-UseSplitVerifier", - "-cp", cp, "emma", "run", - "-cp", "temp", - "-sp", "src/main", - "-r", "html,txt", - "-ix", "-org.h2.test.*,-org.h2.dev.*," + - "-org.h2.jaqu.*,-org.h2.mode.*,-org.h2.server.pg.*", - "org.h2.test.TestAll")); + "-javaagent:ext/jacocoagent.jar=destfile=coverage/jacoco.exec," + + "excludes=org.h2.test.*:org.h2.tools.*:org.h2.sample.*", + "-cp", cp, + "org.h2.test.TestAll", "codeCoverage")); + // Remove classes that we don't want to include in report + delete(files("coverage/bin/org/h2/test")); + delete(files("coverage/bin/org/h2/tools")); + delete(files("coverage/bin/org/h2/sample")); + // Generate report + execJava(args("-cp", + "ext/org.jacoco.cli-" + JACOCO_VERSION + ".jar" + File.pathSeparator + + "ext/org.jacoco.core-" + JACOCO_VERSION + ".jar" + File.pathSeparator + + "ext/org.jacoco.report-" + JACOCO_VERSION + ".jar" + File.pathSeparator + + "ext/asm-" + ASM_VERSION + ".jar" + File.pathSeparator + + "ext/asm-commons-" + ASM_VERSION + ".jar" + File.pathSeparator + + "ext/asm-tree-" + ASM_VERSION + ".jar" + File.pathSeparator + + "ext/args4j-" + ARGS4J_VERSION + ".jar", + "org.jacoco.cli.internal.Main", "report", "coverage/jacoco.exec", + "--classfiles", "coverage/bin", + "--html", "coverage/report", "--sourcefiles", "h2/src/main")); + try { + tryOpenCoverageInBrowser(); + } catch (Throwable e) { + e.printStackTrace(); + } } - /** - * Switch the source code to the current JDK. - */ - public void switchSource() { - switchSource(true); + private static void tryOpenCoverageInBrowser() throws Exception { + Class desktop = Class.forName("java.awt.Desktop"); + Method m = desktop.getMethod("getDesktop"); + Object d = m.invoke(null); + m = d.getClass().getMethod("open", File.class); + m.invoke(d, new File("coverage/report/index.html")); } - private static void switchSource(boolean enableCheck) { - try { - String version = System.getProperty("version"); - String check = enableCheck ? "+CHECK" : "-CHECK"; - if (version == null) { - SwitchSource.main("-dir", "src", "-auto", check); - } else { - SwitchSource.main("-dir", "src", "-version", version, check); - } - } catch (IOException e) { - throw new RuntimeException(e); - } + private static String getTargetJavaVersion() { + return System.getProperty("version"); } private void compileMVStore(boolean debugInfo) { - switchSource(debugInfo); clean(); mkdir("temp"); - String classpath = "temp"; - FileList files; - files = files("src/main/org/h2/mvstore"). + String classpath = "temp" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar"; + FileList files = files("src/main/org/h2/mvstore"). exclude("src/main/org/h2/mvstore/db/*"); StringList args = args(); if (debugInfo) { @@ -177,76 +329,25 @@ private void compileMVStore(boolean debugInfo) { args = args.plus("-Xlint:unchecked", "-g:none", "-d", "temp", "-sourcepath", "src/main", "-classpath", classpath); } - javac(args, files); - } - - private void compile(boolean debugInfo, boolean clientOnly, - boolean basicResourcesOnly) { - switchSource(debugInfo); - clean(); - mkdir("temp"); - download(); - String classpath = "temp" + - File.pathSeparator + "ext/servlet-api-3.0.1.jar" + - File.pathSeparator + "ext/lucene-core-3.0.2.jar" + - File.pathSeparator + "ext/slf4j-api-1.6.0.jar" + - File.pathSeparator + "ext/org.osgi.core-4.2.0.jar" + - File.pathSeparator + "ext/org.osgi.enterprise-4.2.0.jar" + - File.pathSeparator + "ext/jts-1.13.jar" + - File.pathSeparator + System.getProperty("java.home") + "/../lib/tools.jar"; - FileList files; - if (clientOnly) { - files = files("src/main/org/h2/Driver.java"); - files.addAll(files("src/main/org/h2/jdbc")); - files.addAll(files("src/main/org/h2/jdbcx")); - } else { - files = files("src/main"); - } - StringList args = args(); - if (System.getProperty("version") != null) { - String bcp = System.getProperty("bcp"); - // /System/Library/Frameworks/JavaVM.framework/ - // Versions/1.4/Classes/classes.jar - args = args.plus("-source", "1.5", "-target", "jsr14", "-bootclasspath", bcp); - } - if (debugInfo) { - args = args.plus("-Xlint:unchecked", - "-d", "temp", "-sourcepath", "src/main", "-classpath", classpath); - } else { - args = args.plus("-Xlint:unchecked", "-g:none", - "-d", "temp", "-sourcepath", "src/main", "-classpath", classpath); + String version = getTargetJavaVersion(); + if (version != null) { + args = args.plus("-target", version, "-source", version); } javac(args, files); - - files = files("src/main/META-INF/services"); - copy("temp", files, "src/main"); - - if (!clientOnly) { - files = files("src/test"); - files.addAll(files("src/tools")); - args = args("-Xlint:unchecked", "-Xlint:deprecation", - "-d", "temp", "-sourcepath", "src/test" + File.pathSeparator + "src/tools", - "-classpath", classpath); - javac(args, files); - files = files("src/test"). - exclude("*.java"). - exclude("*/package.html"); - copy("temp", files, "src/test"); - } - resources(clientOnly, basicResourcesOnly); } private static void filter(String source, String target, String old, String replacement) { - String text = new String(readFile(new File(source))); + String text = new String(readFile(Paths.get(source))); text = replaceAll(text, old, replacement); - writeFile(new File(target), text.getBytes()); + writeFile(Paths.get(target), text.getBytes()); } /** * Create the documentation from the documentation sources. API Javadocs are * created as well. */ + @Description(summary = "Create the documentation from sources (incl. API Javadocs).") public void docs() { javadoc(); copy("docs", files("src/docsrc/index.html"), "src/docsrc"); @@ -254,8 +355,6 @@ public void docs() { java("org.h2.build.code.CheckJavadoc", null); java("org.h2.build.code.CheckTextFiles", null); java("org.h2.build.doc.GenerateDoc", null); - java("org.h2.build.doc.GenerateHelp", null); - java("org.h2.build.i18n.PrepareTranslation", null); java("org.h2.build.indexer.Indexer", null); java("org.h2.build.doc.MergeDocs", null); java("org.h2.build.doc.WebSite", null); @@ -270,36 +369,55 @@ public void docs() { * Download all required jar files. Actually those are only compile time * dependencies. The database can be used without any dependencies. */ + @Description(summary = "Download all required jar files.") public void download() { downloadOrVerify(false); } private void downloadOrVerify(boolean offline) { - downloadOrVerify("ext/servlet-api-3.0.1.jar", - "javax/servlet", "javax.servlet-api", "3.0.1", - "6bf0ebb7efd993e222fc1112377b5e92a13b38dd", offline); - downloadOrVerify("ext/lucene-core-3.0.2.jar", - "org/apache/lucene", "lucene-core", "3.0.2", - "c2b48995ab855c1b9ea13867a0f976c994e0105d", offline); - downloadOrVerify("ext/slf4j-api-1.6.0.jar", - "org/slf4j", "slf4j-api", "1.6.0", - "b353147a7d51fcfcd818d8aa6784839783db0915", offline); - downloadOrVerify("ext/org.osgi.core-4.2.0.jar", - "org/osgi", "org.osgi.core", "4.2.0", - "66ab449ff3aa5c4adfc82c89025cc983b422eb95", offline); - downloadOrVerify("ext/org.osgi.enterprise-4.2.0.jar", - "org/osgi", "org.osgi.enterprise", "4.2.0", - "8634dcb0fc62196e820ed0f1062993c377f74972", offline); - downloadOrVerify("ext/jts-1.13.jar", - "com/vividsolutions", "jts", "1.13", - "3ccfb9b60f04d71add996a666ceb8902904fd805", offline); + downloadOrVerify("ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar", + "javax/servlet", "javax.servlet-api", JAVAX_SERVLET_VERSION, + "a27082684a2ff0bf397666c3943496c44541d1ca", offline); + downloadOrVerify("ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar", + "jakarta/servlet", "jakarta.servlet-api", JAKARTA_SERVLET_VERSION, + "2e6b8ccde55522c879434ddec3714683ccae6867", offline); + downloadOrVerify("ext/lucene-core-" + LUCENE_VERSION + ".jar", + "org/apache/lucene", "lucene-core", LUCENE_VERSION, + "b275ca5f39b6dd45d5a7ecb49da65205ad2732ca", offline); + downloadOrVerify("ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar", + "org/apache/lucene", "lucene-analyzers-common", LUCENE_VERSION, + "2c4a7e8583e2061aa35db85705393b8b6e67a679", offline); + downloadOrVerify("ext/lucene-queryparser-" + LUCENE_VERSION + ".jar", + "org/apache/lucene", "lucene-queryparser", LUCENE_VERSION, + "96a104be314d0adaac163635610da8dfc5e4166e", offline); + downloadOrVerify("ext/slf4j-api-" + SLF4J_VERSION + ".jar", + "org/slf4j", "slf4j-api", SLF4J_VERSION, + "b5a4b6d16ab13e34a88fae84c35cd5d68cac922c", offline); + downloadOrVerify("ext/org.osgi.core-" + OSGI_VERSION + ".jar", + "org/osgi", "org.osgi.core", OSGI_VERSION, + "6e5e8cd3c9059c08e1085540442a490b59a7783c", offline); + downloadOrVerify("ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar", + "org/osgi", "org.osgi.enterprise", OSGI_VERSION, + "4f6e081c38b951204e2b6a60d33ab0a90bfa1ad3", offline); + downloadOrVerify("ext/jts-core-" + JTS_VERSION + ".jar", + "org/locationtech/jts", "jts-core", JTS_VERSION, + "7e1973b5babdd98734b1ab903fc1155714402eec", offline); + downloadOrVerify("ext/junit-jupiter-api-" + JUNIT_VERSION + ".jar", + "org.junit.jupiter", "junit-jupiter-api", JUNIT_VERSION, + "c9ba885abfe975cda123bf6f8f0a69a1b46956d0", offline); + downloadUsingMaven("ext/asm-" + ASM_VERSION + ".jar", + "org.ow2.asm", "asm", ASM_VERSION, + "3f5199523fb95304b44563f5d56d9f5a07270669"); + downloadUsingMaven("ext/apiguardian-" + APIGUARDIAN_VERSION + ".jar", + "org.apiguardian", "apiguardian-api", APIGUARDIAN_VERSION, + "fc9dff4bb36d627bdc553de77e1f17efd790876c"); } private void downloadOrVerify(String target, String group, String artifact, String version, String sha1Checksum, boolean offline) { if (offline) { - File targetFile = new File(target); - if (targetFile.exists()) { + Path targetFile = Paths.get(target); + if (Files.exists(targetFile)) { return; } println("Missing file: " + target); @@ -310,27 +428,21 @@ private void downloadOrVerify(String target, String group, String artifact, } private void downloadTest() { - // for TestUpgrade - download("ext/h2mig_pagestore_addon.jar", - "http://h2database.com/h2mig_pagestore_addon.jar", - "6dfafe1b86959c3ba4f7cf03e99535e8b9719965"); // for TestOldVersion downloadUsingMaven("ext/h2-1.2.127.jar", "com/h2database", "h2", "1.2.127", "056e784c7cf009483366ab9cd8d21d02fe47031a"); // for TestPgServer - - downloadUsingMaven("ext/postgresql-8.3-603.jdbc3.jar", - "postgresql", "postgresql", "8.3-603.jdbc3", - "33d531c3c53055ddcbea3d88bfa093466ffef924"); + downloadUsingMaven("ext/postgresql-" + PGJDBC_VERSION + ".jar", + "org.postgresql", "postgresql", PGJDBC_VERSION, PGJDBC_HASH); // for TestTraceSystem - downloadUsingMaven("ext/slf4j-nop-1.6.0.jar", - "org/slf4j", "slf4j-nop", "1.6.0", - "4da67bb4a6eea5dc273f99c50ad2333eadb46f86"); + downloadUsingMaven("ext/slf4j-nop-" + SLF4J_VERSION + ".jar", + "org/slf4j", "slf4j-nop", SLF4J_VERSION, + "55d4c73dd343efebd236abfeb367c9ef41d55063"); } private static String getVersion() { - return getStaticValue("org.h2.engine.Constants", "getVersion"); + return getStaticField("org.h2.engine.Constants", "VERSION"); } private static String getJarSuffix() { @@ -340,15 +452,27 @@ private static String getJarSuffix() { /** * Create the h2.zip file and the Windows installer. */ + @Description(summary = "Create the h2.zip file and the Windows installer.") public void installer() { delete(files("bin").keep("*.jar")); jar(); docs(); try { - exec("soffice", args("-invisible", "macro:///Standard.Module1.H2Pdf")); + exec("soffice", args("--invisible", "macro:///Standard.Module1.H2Pdf")); copy("docs", files("../h2web/h2.pdf"), "../h2web"); } catch (Exception e) { - print("OpenOffice is not available: " + e); + println("OpenOffice / LibreOffice is not available or macros H2Pdf is not installed:"); + println(e.toString()); + println("********************************************************************************"); + println("Install and run LibreOffice or OpenOffice."); + println("Open Tools - Macros - Organize Macros - LibreOffice Basic..."); + println("Navigate to My Macros / Standard / Module1 and press Edit button."); + println("Put content of h2/src/installer/openoffice.txt here."); + println("Edit BaseDir variable value:"); + + println(" BaseDir = \"" + Paths.get(System.getProperty("user.dir")).getParent().toUri() + '"'); + println("Close office application and try to build installer again."); + println("********************************************************************************"); } delete("docs/html/onePage.html"); FileList files = files("../h2").keep("../h2/build.*"); @@ -359,51 +483,74 @@ public void installer() { zip("../h2web/h2.zip", files, "../", false, false); boolean installer = false; try { - exec("makensis", args("/v2", "src/installer/h2.nsi")); + exec("makensis", args(isWindows() ? "/V2" : "-V2", "src/installer/h2.nsi")); installer = true; } catch (Exception e) { - print("NSIS is not available: " + e); + println("NSIS is not available: " + e); } String buildDate = getStaticField("org.h2.engine.Constants", "BUILD_DATE"); - byte[] data = readFile(new File("../h2web/h2.zip")); + byte[] data = readFile(Paths.get("../h2web/h2.zip")); String sha1Zip = getSHA1(data), sha1Exe = null; - writeFile(new File("../h2web/h2-" + buildDate + ".zip"), data); + writeFile(Paths.get("../h2web/h2-" + buildDate + ".zip"), data); if (installer) { - data = readFile(new File("../h2web/h2-setup.exe")); + data = readFile(Paths.get("../h2web/h2-setup.exe")); sha1Exe = getSHA1(data); - writeFile(new File("../h2web/h2-setup-" + buildDate + ".exe"), data); + writeFile(Paths.get("../h2web/h2-setup-" + buildDate + ".exe"), data); } updateChecksum("../h2web/html/download.html", sha1Zip, sha1Exe); } - private static void updateChecksum(String fileName, String sha1Zip, - String sha1Exe) { - String checksums = new String(readFile(new File(fileName))); + private static void updateChecksum(String fileName, String sha1Zip, String sha1Exe) { + Path file = Paths.get(fileName); + String checksums = new String(readFile(file)); checksums = replaceAll(checksums, "", "(SHA1 checksum: " + sha1Zip + ")"); if (sha1Exe != null) { checksums = replaceAll(checksums, "", "(SHA1 checksum: " + sha1Exe + ")"); } - writeFile(new File(fileName), checksums.getBytes()); + writeFile(file, checksums.getBytes()); + } + + private static String canonicalPath(Path file) { + try { + return file.toRealPath().toString(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private FileList excludeTestMetaInfFiles(FileList files) { + FileList testMetaInfFiles = files("src/test/META-INF"); + int basePathLength = canonicalPath(Paths.get("src/test")).length(); + for (Path file : testMetaInfFiles) { + files = files.exclude(canonicalPath(file).substring(basePathLength + 1)); + } + return files; + } + + /** + * Add META-INF/versions for Java 9+. + */ + private void addVersions() { + copy("temp/META-INF/versions/9", files("src/java9/precompiled"), "src/java9/precompiled"); + copy("temp/META-INF/versions/10", files("src/java10/precompiled"), "src/java10/precompiled"); } /** * Create the regular h2.jar file. */ + @Description(summary = "Create the regular h2.jar file.") public void jar() { compile(); - manifest("H2 Database Engine", "org.h2.tools.Console"); + addVersions(); + manifest("src/main/META-INF/MANIFEST.MF"); FileList files = files("temp"). - exclude("temp/android/*"). - exclude("temp/org/h2/android/*"). exclude("temp/org/h2/build/*"). exclude("temp/org/h2/dev/*"). exclude("temp/org/h2/jcr/*"). - exclude("temp/org/h2/jaqu/*"). exclude("temp/org/h2/java/*"). exclude("temp/org/h2/jcr/*"). - exclude("temp/org/h2/mode/*"). exclude("temp/org/h2/samples/*"). exclude("temp/org/h2/server/ftp/*"). exclude("temp/org/h2/test/*"). @@ -411,209 +558,127 @@ public void jar() { exclude("*.sh"). exclude("*.txt"). exclude("*.DS_Store"); + files = excludeTestMetaInfFiles(files); jar("bin/h2" + getJarSuffix(), files, "temp"); filter("src/installer/h2.sh", "bin/h2.sh", "h2.jar", "h2" + getJarSuffix()); filter("src/installer/h2.bat", "bin/h2.bat", "h2.jar", "h2" + getJarSuffix()); filter("src/installer/h2w.bat", "bin/h2w.bat", "h2.jar", "h2" + getJarSuffix()); } - /** - * Create the file h2android.jar. This only contains the embedded database, - * plus the H2 Android API. Debug information is disabled. - */ - public void jarAndroid() { - compile(false, false, true); - FileList files = files("temp"). - exclude("temp/org/h2/bnf/*"). - exclude("temp/org/h2/build/*"). - exclude("temp/org/h2/dev/*"). - exclude("temp/org/h2/fulltext/*"). - exclude("temp/org/h2/jaqu/*"). - exclude("temp/org/h2/java/*"). - exclude("temp/org/h2/jdbcx/*"). - exclude("temp/org/h2/jcr/*"). - exclude("temp/org/h2/jmx/*"). - exclude("temp/org/h2/mode/*"). - exclude("temp/org/h2/samples/*"). - exclude("temp/org/h2/server/*"). - exclude("temp/org/h2/test/*"). - exclude("temp/org/h2/tools/*"). - exclude("*.bat"). - exclude("*.sh"). - exclude("*.txt"). - exclude("*.DS_Store"); - files.add(new File("temp/org/h2/tools/DeleteDbFiles.class")); - files.add(new File("temp/org/h2/tools/CompressTool.class")); - jar("bin/h2android" + getJarSuffix(), files, "temp"); - } - - /** - * Create the h2client.jar. This only contains the remote JDBC - * implementation. - */ - public void jarClient() { - compile(true, true, false); - FileList files = files("temp"). - exclude("temp/org/h2/build/*"). - exclude("temp/org/h2/dev/*"). - exclude("temp/org/h2/jaqu/*"). - exclude("temp/org/h2/java/*"). - exclude("temp/org/h2/jcr/*"). - exclude("temp/org/h2/mode/*"). - exclude("temp/org/h2/samples/*"). - exclude("temp/org/h2/test/*"). - exclude("*.bat"). - exclude("*.sh"). - exclude("*.txt"). - exclude("*.DS_Store"); - long kb = jar("bin/h2-client" + getJarSuffix(), files, "temp"); - if (kb < 350 || kb > 450) { - throw new RuntimeException("Expected file size 350 - 450 KB, got: " + kb); - } - } - /** * Create the file h2mvstore.jar. This only contains the MVStore. */ + @Description(summary = "Create h2mvstore.jar containing only the MVStore.") public void jarMVStore() { compileMVStore(true); - manifestMVStore(); + addVersions(); + manifest("src/installer/mvstore/MANIFEST.MF"); FileList files = files("temp"); files.exclude("*.DS_Store"); + files = excludeTestMetaInfFiles(files); jar("bin/h2-mvstore" + getJarSuffix(), files, "temp"); } /** - * Create the file h2small.jar. This only contains the embedded database. - * Debug information is disabled. - */ - public void jarSmall() { - compile(false, false, true); - FileList files = files("temp"). - exclude("temp/android/*"). - exclude("temp/org/h2/android/*"). - exclude("temp/org/h2/build/*"). - exclude("temp/org/h2/dev/*"). - exclude("temp/org/h2/jcr/*"). - exclude("temp/org/h2/jaqu/*"). - exclude("temp/org/h2/java/*"). - exclude("temp/org/h2/jcr/*"). - exclude("temp/org/h2/mode/*"). - exclude("temp/org/h2/samples/*"). - exclude("temp/org/h2/server/ftp/*"). - exclude("temp/org/h2/test/*"). - exclude("temp/org/h2/bnf/*"). - exclude("temp/org/h2/fulltext/*"). - exclude("temp/org/h2/jdbcx/*"). - exclude("temp/org/h2/jmx/*"). - exclude("temp/org/h2/server/*"). - exclude("temp/org/h2/tools/*"). - exclude("*.bat"). - exclude("*.sh"). - exclude("*.txt"). - exclude("*.DS_Store"); - files.add(new File("temp/org/h2/tools/DeleteDbFiles.class")); - files.add(new File("temp/org/h2/tools/CompressTool.class")); - jar("bin/h2small" + getJarSuffix(), files, "temp"); - } - - /** - * Create the file h2jaqu.jar. This only contains the JaQu (Java Query) - * implementation. All other jar files do not include JaQu. - */ - public void jarJaqu() { - compile(true, false, true); - manifest("H2 JaQu", ""); - FileList files = files("temp/org/h2/jaqu"); - files.addAll(files("temp/META-INF/MANIFEST.MF")); - files.exclude("*.DS_Store"); - jar("bin/h2jaqu" + getJarSuffix(), files, "temp"); - } - - /** - * Create the Javadocs of the API (including the JDBC API) and tools. + * Create the Javadocs of the API (incl. the JDBC API) and tools. */ + @Description(summary = "Create the API Javadocs (incl. JDBC API and tools).") public void javadoc() { compileTools(); delete("docs"); mkdir("docs/javadoc"); - javadoc("-sourcepath", "src/main", "org.h2.jdbc", "org.h2.jdbcx", + javadoc("-sourcepath", "src/main", + "-d", "docs/javadoc", + "org.h2.jdbc", "org.h2.jdbcx", "org.h2.tools", "org.h2.api", "org.h2.engine", "org.h2.fulltext", "-classpath", - "ext/lucene-core-3.0.2.jar" + - File.pathSeparator + "ext/jts-1.13.jar", - "-docletpath", "bin" + File.pathSeparator + "temp", - "-doclet", "org.h2.build.doclet.Doclet"); - copy("docs/javadoc", files("src/docsrc/javadoc"), "src/docsrc/javadoc"); + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar"); } /** * Create the Javadocs of the implementation. */ + @Description(summary = "Create the Javadocs of the implementation.") public void javadocImpl() { compileTools(); mkdir("docs/javadocImpl2"); javadoc("-sourcepath", "src/main" + + // need to be disabled if not enough memory File.pathSeparator + "src/test" + - File.pathSeparator + "src/tools" , + File.pathSeparator + "src/tools", + "-Xdoclint:all,-missing", "-noindex", - "-tag", "h2.resource", "-d", "docs/javadocImpl2", - "-classpath", System.getProperty("java.home") + - "/../lib/tools.jar" + - File.pathSeparator + "ext/slf4j-api-1.6.0.jar" + - File.pathSeparator + "ext/servlet-api-3.0.1.jar" + - File.pathSeparator + "ext/lucene-core-3.0.2.jar" + - File.pathSeparator + "ext/org.osgi.core-4.2.0.jar" + - File.pathSeparator + "ext/org.osgi.enterprise-4.2.0.jar" + - File.pathSeparator + "ext/jts-1.13.jar", + "-classpath", javaToolsJar + + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar" + + File.pathSeparator + "ext/asm-" + ASM_VERSION + ".jar" + + File.pathSeparator + "ext/junit-jupiter-api-" + JUNIT_VERSION + ".jar" + + File.pathSeparator + "ext/apiguardian-api-" + APIGUARDIAN_VERSION + ".jar", "-subpackages", "org.h2", - "-exclude", "org.h2.test.jaqu:org.h2.jaqu"); + "-exclude", "org.h2.dev:org.h2.java:org.h2.test:org.h2.build.code:org.h2.build.doc"); + + mkdir("docs/javadocImpl3"); + javadoc("-sourcepath", "src/main", + "-noindex", + "-d", "docs/javadocImpl3", + "-classpath", javaToolsJar + + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar", + "-subpackages", "org.h2.mvstore", + "-exclude", "org.h2.mvstore.db"); + System.setProperty("h2.interfacesOnly", "false"); System.setProperty("h2.javadocDestDir", "docs/javadocImpl"); javadoc("-sourcepath", "src/main" + File.pathSeparator + "src/test" + File.pathSeparator + "src/tools", - "-classpath", - System.getProperty("java.home") + "/../lib/tools.jar" + - File.pathSeparator + "ext/slf4j-api-1.6.0.jar" + - File.pathSeparator + "ext/servlet-api-3.0.1.jar" + - File.pathSeparator + "ext/lucene-core-3.0.2.jar" + - File.pathSeparator + "ext/org.osgi.core-4.2.0.jar" + - File.pathSeparator + "ext/org.osgi.enterprise-4.2.0.jar" + - File.pathSeparator + "ext/jts-1.13.jar", + "-d", "docs/javadoc", + "-classpath", javaToolsJar + + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar" + + File.pathSeparator + "ext/asm-" + ASM_VERSION + ".jar" + + File.pathSeparator + "ext/junit-jupiter-api-" + JUNIT_VERSION + ".jar" + + File.pathSeparator + "ext/apiguardian-api-" + APIGUARDIAN_VERSION + ".jar", "-subpackages", "org.h2", - "-exclude", "org.h2.test.jaqu:org.h2.jaqu", - "-package", - "-docletpath", "bin" + File.pathSeparator + "temp", - "-doclet", "org.h2.build.doclet.Doclet"); - copy("docs/javadocImpl", files("src/docsrc/javadoc"), "src/docsrc/javadoc"); + "-package"); } - private static void manifest(String title, String mainClassName) { - String manifest = new String(readFile(new File( - "src/main/META-INF/MANIFEST.MF"))); - manifest = replaceAll(manifest, "${title}", title); - manifest = replaceAll(manifest, "${version}", getVersion()); - manifest = replaceAll(manifest, "${buildJdk}", getJavaSpecVersion()); - String createdBy = System.getProperty("java.runtime.version") + - " (" + System.getProperty("java.vm.vendor") + ")"; - manifest = replaceAll(manifest, "${createdBy}", createdBy); - String mainClassTag = manifest == null ? "" : "Main-Class: " + mainClassName; - manifest = replaceAll(manifest, "${mainClassTag}", mainClassTag); - writeFile(new File("temp/META-INF/MANIFEST.MF"), manifest.getBytes()); - } - - private static void manifestMVStore() { - String manifest = new String(readFile(new File( - "src/installer/mvstore/MANIFEST.MF"))); + private static void manifest(String path) { + String manifest = new String(readFile(Paths.get(path)), StandardCharsets.UTF_8); manifest = replaceAll(manifest, "${version}", getVersion()); manifest = replaceAll(manifest, "${buildJdk}", getJavaSpecVersion()); String createdBy = System.getProperty("java.runtime.version") + " (" + System.getProperty("java.vm.vendor") + ")"; manifest = replaceAll(manifest, "${createdBy}", createdBy); mkdir("temp/META-INF"); - writeFile(new File("temp/META-INF/MANIFEST.MF"), manifest.getBytes()); + writeFile(Paths.get("temp/META-INF/MANIFEST.MF"), manifest.getBytes()); } /** @@ -621,16 +686,16 @@ private static void manifestMVStore() { * file:///data/h2database/m2-repo. This is only required when * a new H2 version is made. */ + @Description(summary = "Build H2 release jars and upload to file:///data/h2database/m2-repo.") public void mavenDeployCentral() { // generate and deploy h2*-sources.jar file FileList files = files("src/main"); copy("docs", files, "src/main"); files = files("docs").keep("docs/org/*").keep("*.java"); files.addAll(files("docs").keep("docs/META-INF/*")); - String manifest = new String(readFile(new File( - "src/installer/source-manifest.mf"))); + String manifest = new String(readFile(Paths.get("src/installer/source-manifest.mf"))); manifest = replaceAll(manifest, "${version}", getVersion()); - writeFile(new File("docs/META-INF/MANIFEST.MF"), manifest.getBytes()); + writeFile(Paths.get("docs/META-INF/MANIFEST.MF"), manifest.getBytes()); jar("docs/h2-" + getVersion() + "-sources.jar", files, "docs"); delete("docs/org"); delete("docs/META-INF"); @@ -668,9 +733,9 @@ public void mavenDeployCentral() { // generate and deploy the h2*.jar file jar(); - String pom = new String(readFile(new File("src/installer/pom-template.xml"))); + String pom = new String(readFile(Paths.get("src/installer/pom-template.xml"))); pom = replaceAll(pom, "@version@", getVersion()); - writeFile(new File("bin/pom.xml"), pom.getBytes()); + writeFile(Paths.get("bin/pom.xml"), pom.getBytes()); execScript("mvn", args( "deploy:deploy-file", "-Dfile=bin/h2" + getJarSuffix(), @@ -681,11 +746,54 @@ public void mavenDeployCentral() { "-DartifactId=h2", "-DgroupId=com.h2database")); + // generate the h2-mvstore-*-sources.jar file + files = files("src/main"); + copy("docs", files, "src/main"); + files = files("docs").keep("docs/org/h2/mvstore/*"). + exclude("docs/org/h2/mvstore/db/*"). + keep("*.java"); + files.addAll(files("docs").keep("docs/META-INF/*")); + manifest = new String(readFile(Paths.get("src/installer/source-mvstore-manifest.mf"))); + manifest = replaceAll(manifest, "${version}", getVersion()); + writeFile(Paths.get("docs/META-INF/MANIFEST.MF"), manifest.getBytes()); + jar("docs/h2-mvstore-" + getVersion() + "-sources.jar", files, "docs"); + delete("docs/org"); + delete("docs/META-INF"); + + // deploy the h2-mvstore-*-source.jar file + execScript("mvn", args( + "deploy:deploy-file", + "-Dfile=docs/h2-mvstore-" + getVersion() + "-sources.jar", + "-Durl=file:///data/h2database/m2-repo", + "-Dpackaging=jar", + "-Dclassifier=sources", + "-Dversion=" + getVersion(), + "-DartifactId=h2-mvstore", + "-DgroupId=com.h2database" + // ,"-DgeneratePom=false" + )); + + // generate and deploy the h2-mvstore-*-javadoc.jar file + javadocImpl(); + files = files("docs/javadocImpl3"); + jar("docs/h2-mvstore-" + getVersion() + "-javadoc.jar", files, "docs/javadocImpl3"); + execScript("mvn", args( + "deploy:deploy-file", + "-Dfile=docs/h2-mvstore-" + getVersion() + "-javadoc.jar", + "-Durl=file:///data/h2database/m2-repo", + "-Dpackaging=jar", + "-Dclassifier=javadoc", + "-Dversion=" + getVersion(), + "-DartifactId=h2-mvstore", + "-DgroupId=com.h2database" + // ,"-DgeneratePom=false" + )); + // generate and deploy the h2-mvstore-*.jar file jarMVStore(); - pom = new String(readFile(new File("src/installer/pom-mvstore-template.xml"))); + pom = new String(readFile(Paths.get("src/installer/pom-mvstore-template.xml"))); pom = replaceAll(pom, "@version@", getVersion()); - writeFile(new File("bin/pom.xml"), pom.getBytes()); + writeFile(Paths.get("bin/pom.xml"), pom.getBytes()); execScript("mvn", args( "deploy:deploy-file", "-Dfile=bin/h2-mvstore" + getJarSuffix(), @@ -698,18 +806,19 @@ public void mavenDeployCentral() { } /** - * This will build a 'snapshot' H2 .jar file and upload it the to the local + * This will build a 'snapshot' H2 .jar file and upload it to the local * Maven 2 repository. */ + @Description(summary = "Build a snapshot H2 jar and upload to local Maven 2 repo.") public void mavenInstallLocal() { // MVStore jarMVStore(); - String pom = new String(readFile(new File("src/installer/pom-mvstore-template.xml"))); - pom = replaceAll(pom, "@version@", "1.0-SNAPSHOT"); - writeFile(new File("bin/pom.xml"), pom.getBytes()); + String pom = new String(readFile(Paths.get("src/installer/pom-mvstore-template.xml"))); + pom = replaceAll(pom, "@version@", getVersion()); + writeFile(Paths.get("bin/pom.xml"), pom.getBytes()); execScript("mvn", args( "install:install-file", - "-Dversion=1.0-SNAPSHOT", + "-Dversion=" + getVersion(), "-Dfile=bin/h2-mvstore" + getJarSuffix(), "-Dpackaging=jar", "-DpomFile=bin/pom.xml", @@ -717,12 +826,12 @@ public void mavenInstallLocal() { "-DgroupId=com.h2database")); // database jar(); - pom = new String(readFile(new File("src/installer/pom-template.xml"))); - pom = replaceAll(pom, "@version@", "1.0-SNAPSHOT"); - writeFile(new File("bin/pom.xml"), pom.getBytes()); + pom = new String(readFile(Paths.get("src/installer/pom-template.xml"))); + pom = replaceAll(pom, "@version@", getVersion()); + writeFile(Paths.get("bin/pom.xml"), pom.getBytes()); execScript("mvn", args( "install:install-file", - "-Dversion=1.0-SNAPSHOT", + "-Dversion=" + getVersion(), "-Dfile=bin/h2" + getJarSuffix(), "-Dpackaging=jar", "-DpomFile=bin/pom.xml", @@ -732,48 +841,23 @@ public void mavenInstallLocal() { /** * Build the jar file without downloading any files over the network. If the - * required files are missing, they are are listed, and the jar file is not + * required files are missing, they are listed, and the jar file is not * built. */ + @Description(summary = "Build H2 jar avoiding downloads (list missing files).") public void offline() { downloadOrVerify(true); if (filesMissing) { println("Required files are missing"); - println("Both Lucene 2 and 3 are supported using -Dlucene=x (x=2 or 3)"); } else { jar(); } } - private void resources(boolean clientOnly, boolean basicOnly) { - if (!clientOnly) { - java("org.h2.build.doc.GenerateHelp", null); - javadoc("-sourcepath", "src/main", "org.h2.tools", "org.h2.jmx", - "-classpath", - "ext/lucene-core-3.0.2.jar" + - File.pathSeparator + "ext/jts-1.13.jar", - "-docletpath", "bin" + File.pathSeparator + "temp", - "-doclet", "org.h2.build.doclet.ResourceDoclet"); - } - FileList files = files("src/main"). - exclude("*.MF"). - exclude("*.java"). - exclude("*/package.html"). - exclude("*/java.sql.Driver"). - exclude("*.DS_Store"); - if (basicOnly) { - files = files.keep("src/main/org/h2/res/_messages_en.*"); - } - if (clientOnly) { - files = files.exclude("src/main/org/h2/res/javadoc.properties"); - files = files.exclude("src/main/org/h2/server/*"); - } - zip("temp/org/h2/util/data.zip", files, "src/main", true, false); - } - /** * Just run the spellchecker. */ + @Description(summary = "Run the spellchecker.") public void spellcheck() { java("org.h2.build.doc.SpellChecker", null); } @@ -781,18 +865,70 @@ public void spellcheck() { /** * Compile and run all tests. This does not include the compile step. */ + @Description(summary = "Compile and run all tests (excluding the compile step).") public void test() { + test(false); + } + + /** + * Compile and run all fast tests. This does not include the compile step. + */ + @Description(summary = "Compile and run all tests for CI (excl. the compile step).") + public void testCI() { + test(true); + } + + private void test(boolean ci) { downloadTest(); - String testClass = System.getProperty("test", "org.h2.test.TestAll"); - java(testClass, null); + String cp = "temp" + File.pathSeparator + "bin" + + File.pathSeparator + "ext/postgresql-" + PGJDBC_VERSION + ".jar" + + File.pathSeparator + "ext/javax.servlet-api-" + JAVAX_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/jakarta.servlet-api-" + JAKARTA_SERVLET_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-core-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-analyzers-common-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/lucene-queryparser-" + LUCENE_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.core-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/org.osgi.enterprise-" + OSGI_VERSION + ".jar" + + File.pathSeparator + "ext/jts-core-" + JTS_VERSION + ".jar" + + File.pathSeparator + "ext/slf4j-api-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/slf4j-nop-" + SLF4J_VERSION + ".jar" + + File.pathSeparator + "ext/asm-" + ASM_VERSION + ".jar" + + File.pathSeparator + javaToolsJar; + int version = getJavaVersion(); + if (version >= 9) { + cp = "src/java9/precompiled" + File.pathSeparator + cp; + if (version >= 10) { + cp = "src/java10/precompiled" + File.pathSeparator + cp; + } + } + int ret; + if (ci) { + ret = execJava(args( + "-ea", + "-Xmx128m", + "-XX:MaxDirectMemorySize=2g", + "-cp", cp, + "org.h2.test.TestAll", "ci")); + } else { + ret = execJava(args( + "-ea", + "-Xmx128m", + "-cp", cp, + "org.h2.test.TestAll")); + } + // return a failure code for CI builds + if (ret != 0) { + System.exit(ret); + } } /** - * Print the system properties + * Print the system properties. */ + @Description(summary = "Print the system properties.") public void testSysProperties() { System.out.println("environment settings:"); - for (Entry e : new TreeMap( + for (Entry e : new TreeMap<>( System.getProperties()).entrySet()) { System.out.println(e); } @@ -801,9 +937,10 @@ public void testSysProperties() { /** * Test the local network of this machine. */ + @Description(summary = "Test the local network of this machine.") public void testNetwork() { try { - long start = System.currentTimeMillis(); + long start = System.nanoTime(); System.out.println("localhost:"); System.out.println(" " + InetAddress.getByName("localhost")); for (InetAddress address : InetAddress.getAllByName("localhost")) { @@ -827,7 +964,7 @@ public void testNetwork() { System.out.println(serverSocket); int port = serverSocket.getLocalPort(); final ServerSocket accept = serverSocket; - start = System.currentTimeMillis(); + start = System.nanoTime(); Thread thread = new Thread() { @Override public void run() { @@ -849,9 +986,10 @@ public void run() { } }; thread.start(); - System.out.println("time: " + (System.currentTimeMillis() - start)); + System.out.println("time: " + + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)); Thread.sleep(1000); - start = System.currentTimeMillis(); + start = System.nanoTime(); final Socket socket = new Socket(); socket.setSoTimeout(2000); final InetSocketAddress socketAddress = new InetSocketAddress(address, port); @@ -876,21 +1014,24 @@ public void run() { + socketAddress); socket.connect(localhostAddress, 2000); } - System.out.println("time: " + (System.currentTimeMillis() - start)); + System.out.println("time: " + + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)); Thread.sleep(200); - start = System.currentTimeMillis(); + start = System.nanoTime(); System.out.println("client:" + socket.toString()); socket.getOutputStream().write(123); - System.out.println("time: " + (System.currentTimeMillis() - start)); + System.out.println("time: " + + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)); Thread.sleep(100); - start = System.currentTimeMillis(); + start = System.nanoTime(); System.out.println("client read:" + socket.getInputStream().read()); socket.close(); } catch (Throwable t) { t.printStackTrace(); } thread.join(5000); - System.out.println("time: " + (System.currentTimeMillis() - start)); + System.out.println("time: " + + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)); if (thread.isAlive()) { System.out.println("thread is still alive, interrupting"); thread.interrupt(); @@ -907,20 +1048,24 @@ public void run() { * of the automated build (including test results, newsfeed, code coverage) * to the public web site. */ + @Description(summary = "Upload all build results to the public website.") public void uploadBuild() { String password = System.getProperty("h2.ftpPassword"); if (password == null) { throw new RuntimeException("h2.ftpPassword not set"); } downloadTest(); + mkdir("temp"); FileList files = files("src/tools").keep("*/UploadBuild.java"); StringList args = args("-d", "temp", "-sourcepath", "src/tools" + File.pathSeparator + "src/test" + File.pathSeparator + "src/main"); - mkdir("temp"); + String version = getTargetJavaVersion(); + if (version != null) { + args = args.plus("-target", version, "-source", version); + } javac(args, files); - String cp = "bin" + File.pathSeparator + "temp" + - File.pathSeparator + "ext/h2mig_pagestore_addon.jar"; - exec("java", args("-Xmx512m", "-cp", cp, + String cp = "bin" + File.pathSeparator + "temp"; + execJava(args("-Xmx512m", "-cp", cp, "-Dh2.ftpPassword=" + password, "org.h2.build.doc.UploadBuild")); } @@ -928,6 +1073,7 @@ public void uploadBuild() { /** * Build the h2console.war file. */ + @Description(summary = "Build the h2console.war file.") public void warConsole() { jar(); copy("temp/WEB-INF", files("src/tools/WEB-INF/web.xml"), "src/tools/WEB-INF"); @@ -941,12 +1087,12 @@ public void warConsole() { @Override protected String getLocalMavenDir() { String userHome = System.getProperty("user.home", ""); - File file = new File(userHome, ".m2/settings.xml"); - if (!file.exists()) { + Path file = Paths.get(userHome, ".m2/settings.xml"); + if (!Files.exists(file)) { return super.getLocalMavenDir(); } XMLParser p = new XMLParser(new String(BuildBase.readFile(file))); - HashMap prop = new HashMap(); + HashMap prop = new HashMap<>(); for (String name = ""; p.hasNext();) { int event = p.next(); if (event == XMLParser.START_ELEMENT) { diff --git a/h2/src/tools/org/h2/build/BuildBase.java b/h2/src/tools/org/h2/build/BuildBase.java index 89745ca64b..830747fc2a 100644 --- a/h2/src/tools/org/h2/build/BuildBase.java +++ b/h2/src/tools/org/h2/build/BuildBase.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build; @@ -10,32 +10,44 @@ import java.io.BufferedReader; import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.FileOutputStream; import java.io.FilterOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.PrintStream; -import java.io.RandomAccessFile; +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.jar.JarOutputStream; import java.util.zip.CRC32; import java.util.zip.Deflater; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; + /** * This class is a complete pure Java build tool. It allows to build this * project without any external dependencies except a JDK. @@ -44,6 +56,16 @@ */ public class BuildBase { + /** + * Stores descriptions for methods which can be invoked as build targets. + */ + @Retention(RetentionPolicy.RUNTIME) + @Target(ElementType.METHOD) + @Documented + public static @interface Description { + String summary() default ""; + } + /** * A list of strings. */ @@ -87,7 +109,7 @@ public String[] array() { /** * A list of files. */ - public static class FileList extends ArrayList { + public static class FileList extends ArrayList { private static final long serialVersionUID = 1L; @@ -136,8 +158,8 @@ private FileList filter(boolean keep, String pattern) { // normalize / and \ pattern = BuildBase.replaceAll(pattern, "/", File.separator); FileList list = new FileList(); - for (File f : this) { - String path = f.getPath(); + for (Path f : this) { + String path = f.toString(); boolean match = start ? path.startsWith(pattern) : path.endsWith(pattern); if (match == keep) { list.add(f); @@ -158,13 +180,25 @@ private FileList filter(boolean keep, String pattern) { */ protected boolean quiet; + /** + * The full path to the executable of the current JRE. + */ + protected final String javaExecutable = System.getProperty("java.home") + + File.separator + "bin" + File.separator + "java"; + + /** + * The full path to the tools jar of the current JDK. + */ + protected final String javaToolsJar = System.getProperty("java.home") + File.separator + ".." + + File.separator + "lib" + File.separator + "tools.jar"; + /** * This method should be called by the main method. * * @param args the command line parameters */ protected void run(String... args) { - long time = System.currentTimeMillis(); + long time = System.nanoTime(); if (args.length == 0) { all(); } else { @@ -192,7 +226,7 @@ protected void run(String... args) { } } } - println("Done in " + (System.currentTimeMillis() - time) + " ms"); + println("Done in " + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time) + " ms"); } private boolean runTarget(String target) { @@ -226,13 +260,13 @@ private void runShell() { } else if (line.length() == 0) { line = last; } - long time = System.currentTimeMillis(); + long time = System.nanoTime(); try { runTarget(line); } catch (Exception e) { System.out.println(e); } - println("Done in " + (System.currentTimeMillis() - time) + " ms"); + println("Done in " + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - time) + " ms"); last = line; } } @@ -244,9 +278,7 @@ private static Object invoke(Method m, Object instance, Object[] args) { } catch (InvocationTargetException e) { throw e.getCause(); } - } catch (Error e) { - throw e; - } catch (RuntimeException e) { + } catch (Error | RuntimeException e) { throw e; } catch (Throwable e) { throw new RuntimeException(e); @@ -276,24 +308,26 @@ protected void beep() { */ protected void projectHelp() { Method[] methods = getClass().getDeclaredMethods(); - Arrays.sort(methods, new Comparator() { - @Override - public int compare(Method a, Method b) { - return a.getName().compareTo(b.getName()); - } - }); + Arrays.sort(methods, Comparator.comparing(Method::getName)); sysOut.println("Targets:"); + String description; for (Method m : methods) { int mod = m.getModifiers(); if (!Modifier.isStatic(mod) && Modifier.isPublic(mod) && m.getParameterTypes().length == 0) { - sysOut.println(m.getName()); + if (m.isAnnotationPresent(Description.class)) { + description = String.format("%1$-20s %2$s", + m.getName(), m.getAnnotation(Description.class).summary()); + } else { + description = m.getName(); + } + sysOut.println(description); } } sysOut.println(); } - private static boolean isWindows() { + protected static boolean isWindows() { return System.getProperty("os.name").toLowerCase().contains("windows"); } @@ -307,11 +341,30 @@ private static boolean isWindows() { */ protected int execScript(String script, StringList args) { if (isWindows()) { - script = script + ".bat"; + // Under windows, we use the "cmd" command interpreter since it will + // search the path for us without us having to hard-code an + // extension for the script we want. (Sometimes we don't know if the + // extension will be .bat or .cmd) + StringList newArgs = new StringList(); + newArgs.add("/C"); + newArgs.add(script); + newArgs.addAll(args); + return exec("cmd", newArgs); } return exec(script, args); } + /** + * Execute java in a separate process, but using the java executable of the + * current JRE. + * + * @param args the command line parameters for the java command + * @return the exit value + */ + protected int execJava(StringList args) { + return exec(javaExecutable, args); + } + /** * Execute a program in a separate process. * @@ -382,24 +435,6 @@ protected static String getStaticField(String className, String fieldName) { } } - /** - * Reads the value from a static method of a class using reflection. - * - * @param className the name of the class - * @param methodName the field name - * @return the value as a string - */ - protected static String getStaticValue(String className, String methodName) { - try { - Class clazz = Class.forName(className); - Method method = clazz.getMethod(methodName); - return method.invoke(null).toString(); - } catch (Exception e) { - throw new RuntimeException("Can not read value " + className + "." - + methodName + "()", e); - } - } - /** * Copy files to the specified target directory. * @@ -408,19 +443,18 @@ protected static String getStaticValue(String className, String methodName) { * @param baseDir the base directory */ protected void copy(String targetDir, FileList files, String baseDir) { - File target = new File(targetDir); - File base = new File(baseDir); - println("Copying " + files.size() + " files to " + target.getPath()); - String basePath = base.getPath(); - for (File f : files) { - File t = new File(target, removeBase(basePath, f.getPath())); + Path target = Paths.get(targetDir); + Path base = Paths.get(baseDir); + println("Copying " + files.size() + " files to " + target); + for (Path f : files) { + Path t = target.resolve(base.relativize(f)); byte[] data = readFile(f); - mkdirs(t.getParentFile()); + mkdirs(t.getParent()); writeFile(t, data); } } - private PrintStream filter(PrintStream out, final String[] exclude) { + private static PrintStream filter(PrintStream out, final String[] exclude) { return new PrintStream(new FilterOutputStream(out) { private ByteArrayOutputStream buff = new ByteArrayOutputStream(); @@ -440,7 +474,7 @@ public void write(byte b) throws IOException { buff.write(b); if (b == '\n') { byte[] data = buff.toByteArray(); - String line = new String(data, "UTF-8"); + String line = new String(data, StandardCharsets.UTF_8); boolean print = true; for (String l : exclude) { if (line.startsWith(l)) { @@ -488,7 +522,12 @@ protected void javadoc(String...args) { "Generating ", })); } - Class clazz = Class.forName("com.sun.tools.javadoc.Main"); + Class clazz; + try { + clazz = Class.forName("jdk.javadoc.internal.tool.Main"); + } catch (Exception e) { + clazz = Class.forName("com.sun.tools.javadoc.Main"); + } Method execute = clazz.getMethod("execute", String[].class); result = (Integer) invoke(execute, null, new Object[] { args }); } catch (Exception e) { @@ -540,18 +579,18 @@ protected static String getSHA1(byte[] data) { */ protected void downloadUsingMaven(String target, String group, String artifact, String version, String sha1Checksum) { - String repoDir = "http://repo1.maven.org/maven2"; - File targetFile = new File(target); - if (targetFile.exists()) { + String repoDir = "https://repo1.maven.org/maven2"; + Path targetFile = Paths.get(target); + if (Files.exists(targetFile)) { return; } - String repoFile = group + "/" + artifact + "/" + version + "/" + String repoFile = group.replace('.', '/') + "/" + artifact + "/" + version + "/" + artifact + "-" + version + ".jar"; - mkdirs(targetFile.getAbsoluteFile().getParentFile()); - String localMavenDir = getLocalMavenDir(); - if (new File(localMavenDir).exists()) { - File f = new File(localMavenDir, repoFile); - if (!f.exists()) { + mkdirs(targetFile.toAbsolutePath().getParent()); + Path localMavenDir = Paths.get(getLocalMavenDir()); + if (Files.isDirectory(localMavenDir)) { + Path f = localMavenDir.resolve(repoFile); + if (!Files.exists(f)) { try { execScript("mvn", args( "org.apache.maven.plugins:maven-dependency-plugin:2.1:get", @@ -561,7 +600,7 @@ protected void downloadUsingMaven(String target, String group, println("Could not download using Maven: " + e.toString()); } } - if (f.exists()) { + if (Files.exists(f)) { byte[] data = readFile(f); String got = getSHA1(data); if (sha1Checksum == null) { @@ -571,7 +610,7 @@ protected void downloadUsingMaven(String target, String group, throw new RuntimeException( "SHA1 checksum mismatch; got: " + got + " expected: " + sha1Checksum + - " for file " + f.getAbsolutePath()); + " for file " + f.toAbsolutePath()); } } writeFile(targetFile, data); @@ -596,21 +635,21 @@ protected String getLocalMavenDir() { * @param sha1Checksum the SHA-1 checksum or null */ protected void download(String target, String fileURL, String sha1Checksum) { - File targetFile = new File(target); - if (targetFile.exists()) { + Path targetFile = Paths.get(target); + if (Files.exists(targetFile)) { return; } - mkdirs(targetFile.getAbsoluteFile().getParentFile()); + mkdirs(targetFile.toAbsolutePath().getParent()); ByteArrayOutputStream buff = new ByteArrayOutputStream(); try { println("Downloading " + fileURL); URL url = new URL(fileURL); InputStream in = new BufferedInputStream(url.openStream()); - long last = System.currentTimeMillis(); + long last = System.nanoTime(); int len = 0; while (true) { - long now = System.currentTimeMillis(); - if (now > last + 1000) { + long now = System.nanoTime(); + if (now - last > 1_000_000_000L) { println("Downloaded " + len + " bytes"); last = now; } @@ -647,7 +686,7 @@ protected void download(String target, String fileURL, String sha1Checksum) { */ protected FileList files(String dir) { FileList list = new FileList(); - addFiles(list, new File(dir)); + addFiles(list, Paths.get(dir)); return list; } @@ -661,42 +700,35 @@ protected static StringList args(String...args) { return new StringList(args); } - private void addFiles(FileList list, File file) { - if (file.getName().startsWith(".svn")) { + private static void addFiles(FileList list, Path file) { + if (file.getFileName().toString().startsWith(".svn")) { // ignore - } else if (file.isDirectory()) { - String path = file.getPath(); - for (String fileName : file.list()) { - addFiles(list, new File(path, fileName)); + } else if (Files.isDirectory(file)) { + try { + Files.walkFileTree(file, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + list.add(file); + return FileVisitResult.CONTINUE; + } + }); + } catch (IOException e) { + throw new RuntimeException("Error reading directory " + file, e); } } else { list.add(file); } } - private static String removeBase(String basePath, String path) { - if (path.startsWith(basePath)) { - path = path.substring(basePath.length()); - } - path = path.replace('\\', '/'); - if (path.startsWith("/")) { - path = path.substring(1); - } - return path; - } - /** * Create or overwrite a file. * * @param file the file * @param data the data to write */ - public static void writeFile(File file, byte[] data) { + public static void writeFile(Path file, byte[] data) { try { - RandomAccessFile ra = new RandomAccessFile(file, "rw"); - ra.write(data); - ra.setLength(data.length); - ra.close(); + Files.write(file, data); } catch (IOException e) { throw new RuntimeException("Error writing to file " + file, e); } @@ -708,28 +740,11 @@ public static void writeFile(File file, byte[] data) { * @param file the file * @return the data */ - public static byte[] readFile(File file) { - RandomAccessFile ra = null; + public static byte[] readFile(Path file) { try { - ra = new RandomAccessFile(file, "r"); - long len = ra.length(); - if (len >= Integer.MAX_VALUE) { - throw new RuntimeException("File " + file.getPath() + " is too large"); - } - byte[] buffer = new byte[(int) len]; - ra.readFully(buffer); - ra.close(); - return buffer; + return Files.readAllBytes(file); } catch (IOException e) { throw new RuntimeException("Error reading from file " + file, e); - } finally { - if (ra != null) { - try { - ra.close(); - } catch (IOException e) { - // ignore - } - } } } @@ -777,29 +792,55 @@ private static long zipOrJar(String destFile, FileList files, String basePath, boolean storeOnly, boolean sortBySuffix, boolean jar) { if (sortBySuffix) { // for better compressibility, sort by suffix, then name - Collections.sort(files, new Comparator() { + files.sort((f1, f2) -> { + String p1 = f1.toString(); + String p2 = f2.toString(); + int comp = getSuffix(p1).compareTo(getSuffix(p2)); + if (comp == 0) { + comp = p1.compareTo(p2); + } + return comp; + }); + } else if (jar) { + files.sort(new Comparator() { + private int priority(String path) { + if (path.startsWith("META-INF/")) { + if (path.equals("META-INF/MANIFEST.MF")) { + return 0; + } + if (path.startsWith("services/", 9)) { + return 1; + } + return 2; + } + if (!path.endsWith(".zip")) { + return 3; + } + return 4; + } + @Override - public int compare(File f1, File f2) { - String p1 = f1.getPath(); - String p2 = f2.getPath(); - int comp = getSuffix(p1).compareTo(getSuffix(p2)); - if (comp == 0) { - comp = p1.compareTo(p2); + public int compare(Path f1, Path f2) { + String p1 = f1.toString(); + String p2 = f2.toString(); + int comp = Integer.compare(priority(p1), priority(p2)); + if (comp != 0) { + return comp; } - return comp; + return p1.compareTo(p2); } }); } - mkdirs(new File(destFile).getAbsoluteFile().getParentFile()); - // normalize the path (replace / with \ if required) - basePath = new File(basePath).getPath(); + Path dest = Paths.get(destFile).toAbsolutePath(); + mkdirs(dest.getParent()); + Path base = Paths.get(basePath); try { - if (new File(destFile).isDirectory()) { + if (Files.isDirectory(dest)) { throw new IOException( "Can't create the file as a directory with this name already exists: " + destFile); } - OutputStream out = new BufferedOutputStream(new FileOutputStream(destFile)); + OutputStream out = new BufferedOutputStream(Files.newOutputStream(dest)); ZipOutputStream zipOut; if (jar) { zipOut = new JarOutputStream(out); @@ -810,14 +851,13 @@ public int compare(File f1, File f2) { zipOut.setMethod(ZipOutputStream.STORED); } zipOut.setLevel(Deflater.BEST_COMPRESSION); - for (File file : files) { - String fileName = file.getPath(); - String entryName = removeBase(basePath, fileName); + for (Path file : files) { + String entryName = base.relativize(file).toString().replace('\\', '/'); byte[] data = readFile(file); ZipEntry entry = new ZipEntry(entryName); CRC32 crc = new CRC32(); crc.update(data); - entry.setSize(file.length()); + entry.setSize(data.length); entry.setCrc(crc.getValue()); zipOut.putNextEntry(entry); zipOut.write(data); @@ -825,14 +865,14 @@ public int compare(File f1, File f2) { } zipOut.closeEntry(); zipOut.close(); - return new File(destFile).length() / 1024; + return Files.size(dest) / 1024; } catch (IOException e) { throw new RuntimeException("Error creating file " + destFile, e); } } /** - * Get the current java specification version (for example, 1.4). + * Get the current java specification version (for example, 1.8). * * @return the java specification version */ @@ -840,10 +880,29 @@ protected static String getJavaSpecVersion() { return System.getProperty("java.specification.version"); } + /** + * Get the current Java version as integer value. + * + * @return the Java version (8, 9, 10, 11, 12, 13, etc) + */ + public static int getJavaVersion() { + int version = 8; + String v = getJavaSpecVersion(); + if (v != null) { + int idx = v.indexOf('.'); + if (idx >= 0) { + // 1.8 + v = v.substring(idx + 1); + } + version = Integer.parseInt(v); + } + return version; + } + private static List getPaths(FileList files) { StringList list = new StringList(); - for (File f : files) { - list.add(f.getPath()); + for (Path f : files) { + list.add(f.toString()); } return list; } @@ -870,7 +929,7 @@ protected void javac(StringList args, FileList files) { })); } Method compile = clazz.getMethod("compile", new Class[] { String[].class }); - Object instance = clazz.newInstance(); + Object instance = clazz.getDeclaredConstructor().newInstance(); result = (Integer) invoke(compile, instance, new Object[] { array }); } catch (Exception e) { e.printStackTrace(); @@ -907,22 +966,17 @@ protected void java(String className, StringList args) { * @param dir the directory to create */ protected static void mkdir(String dir) { - File f = new File(dir); - if (f.exists()) { - if (f.isFile()) { - throw new RuntimeException("Can not create directory " + dir - + " because a file with this name exists"); - } - } else { - mkdirs(f); - } + mkdirs(Paths.get(dir)); } - private static void mkdirs(File f) { - if (!f.exists()) { - if (!f.mkdirs()) { - throw new RuntimeException("Can not create directory " + f.getAbsolutePath()); - } + private static void mkdirs(Path f) { + try { + Files.createDirectories(f); + } catch (FileAlreadyExistsException e) { + throw new RuntimeException("Can not create directory " + e.getFile() + + " because a file with this name exists"); + } catch (IOException e) { + throw new RuntimeException("Can not create directory " + f.toAbsolutePath()); } } @@ -933,7 +987,7 @@ private static void mkdirs(File f) { */ protected void delete(String dir) { println("Deleting " + dir); - delete(new File(dir)); + deleteRecursive(Paths.get(dir)); } /** @@ -942,21 +996,37 @@ protected void delete(String dir) { * @param files the name of the files to delete */ protected void delete(FileList files) { - for (File f : files) { - delete(f); + for (Path f : files) { + deleteRecursive(f); } } - private void delete(File file) { - if (file.exists()) { - if (file.isDirectory()) { - String path = file.getPath(); - for (String fileName : file.list()) { - delete(new File(path, fileName)); - } - } - if (!file.delete()) { - throw new RuntimeException("Can not delete " + file.getPath()); + /** + * Delete a file or a directory with its content. + * + * @param file the file or directory to delete + */ + public static void deleteRecursive(Path file) { + if (Files.exists(file)) { + try { + Files.walkFileTree(file, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + if (exc == null) { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + throw exc; + } + }); + } catch (IOException e) { + throw new RuntimeException("Can not delete " + file); } } } diff --git a/h2/src/tools/org/h2/build/code/AbbaDetect.java b/h2/src/tools/org/h2/build/code/AbbaDetect.java index e6f9fa4760..68bc0ab2d7 100644 --- a/h2/src/tools/org/h2/build/code/AbbaDetect.java +++ b/h2/src/tools/org/h2/build/code/AbbaDetect.java @@ -1,13 +1,18 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.code; -import java.io.File; import java.io.IOException; -import java.io.RandomAccessFile; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; /** * Enable / disable AB-BA deadlock detector code. @@ -22,31 +27,29 @@ public class AbbaDetect { */ public static void main(String... args) throws Exception { String baseDir = "src/main"; - process(new File(baseDir), true); + Files.walkFileTree(Paths.get(baseDir), new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + process(file, true); + return FileVisitResult.CONTINUE; + } + }); } - private static void process(File file, boolean enable) throws IOException { - String name = file.getName(); - if (file.isDirectory()) { - if (name.equals("CVS") || name.equals(".svn")) { - return; - } - for (File f : file.listFiles()) { - process(f, enable); - } - return; - } + /** + * Process a file. + * + * @param file the file + */ + static void process(Path file, boolean enable) throws IOException { + String name = file.getFileName().toString(); if (!name.endsWith(".java")) { return; } if (name.endsWith("AbbaDetector.java")) { return; } - RandomAccessFile in = new RandomAccessFile(file, "r"); - byte[] data = new byte[(int) file.length()]; - in.readFully(data); - in.close(); - String source = new String(data, "UTF-8"); + String source = new String(Files.readAllBytes(file), StandardCharsets.UTF_8); String original = source; source = disable(source); @@ -61,15 +64,13 @@ private static void process(File file, boolean enable) throws IOException { if (source.equals(original)) { return; } - File newFile = new File(file + ".new"); - RandomAccessFile out = new RandomAccessFile(newFile, "rw"); - out.write(source.getBytes("UTF-8")); - out.close(); + Path newFile = Paths.get(file.toString() + ".new"); + Files.write(newFile, source.getBytes(StandardCharsets.UTF_8)); - File oldFile = new File(file + ".old"); - file.renameTo(oldFile); - newFile.renameTo(file); - oldFile.delete(); + Path oldFile = Paths.get(file.toString() + ".old"); + Files.move(file, oldFile); + Files.move(newFile, file); + Files.delete(oldFile); } private static String disable(String source) { @@ -81,7 +82,7 @@ private static String disable(String source) { } private static String enable(String source) { - // the word synchronized within single line comments comments + // the word synchronized within single line comments source = source.replaceAll("(// .* synchronized )([^ ])", "$1 $2"); source = source.replaceAll("synchronized \\((.*)\\(\\)\\)", diff --git a/h2/src/tools/org/h2/build/code/CheckJavadoc.java b/h2/src/tools/org/h2/build/code/CheckJavadoc.java index 9f18b00e94..a621e70fc8 100644 --- a/h2/src/tools/org/h2/build/code/CheckJavadoc.java +++ b/h2/src/tools/org/h2/build/code/CheckJavadoc.java @@ -1,13 +1,16 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.code; -import java.io.File; import java.io.IOException; -import java.io.RandomAccessFile; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; /** * This tool checks that for each .java file there is a package.html file, @@ -16,8 +19,8 @@ */ public class CheckJavadoc { - private static final int MAX_COMMENT_LINE_SIZE = 80; - private static final int MAX_SOURCE_LINE_SIZE = 100; + private static final int MAX_COMMENT_LINE_SIZE = 100; + private static final int MAX_SOURCE_LINE_SIZE = 120; private int errorCount; /** @@ -31,32 +34,28 @@ public static void main(String... args) throws Exception { } private void run() throws Exception { - String baseDir = "src"; - check(new File(baseDir)); + check(Paths.get("src")); if (errorCount > 0) { throw new Exception(errorCount + " errors found"); } } - private int check(File file) throws Exception { - String name = file.getName(); - if (file.isDirectory()) { - if (name.equals("CVS") || name.equals(".svn")) { - return 0; - } + private int check(Path file) throws Exception { + String name = file.getFileName().toString(); + if (Files.isDirectory(file)) { boolean foundPackageHtml = false, foundJava = false; - for (File f : file.listFiles()) { - int type = check(f); - if (type == 1) { - foundJava = true; - } else if (type == 2) { - foundPackageHtml = true; + try (DirectoryStream stream = Files.newDirectoryStream(file)) { + for (Path f : stream) { + int type = check(f); + if (type == 1) { + foundJava = true; + } else if (type == 2) { + foundPackageHtml = true; + } } } if (foundJava && !foundPackageHtml) { - System.out.println( - "No package.html file, but a Java file found at: " - + file.getAbsolutePath()); + System.out.println("No package.html file, but a Java file found at: " + file.toAbsolutePath()); errorCount++; } } else { @@ -70,57 +69,49 @@ private int check(File file) throws Exception { return 0; } - private void checkJavadoc(File file) throws IOException { - RandomAccessFile in = new RandomAccessFile(file, "r"); - byte[] data = new byte[(int) file.length()]; - in.readFully(data); - in.close(); - String text = new String(data); - int comment = text.indexOf("/**"); - if (comment < 0) { - System.out.println("No Javadoc comment: " + file.getAbsolutePath()); - errorCount++; - } - int pos = 0; - int lineNumber = 1; - boolean inComment = false; - while (true) { - int next = text.indexOf("\n", pos); - if (next < 0) { - break; - } - String rawLine = text.substring(pos, next); - if (rawLine.endsWith("\r")) { - rawLine = rawLine.substring(0, rawLine.length() - 1); - } + private void checkJavadoc(Path file) throws IOException { + List lines = Files.readAllLines(file); + boolean inComment = false, hasJavadoc = false; + for (int lineNumber = 0, size = lines.size(); lineNumber < size;) { + String rawLine = lines.get(lineNumber++); String line = rawLine.trim(); if (line.startsWith("/*")) { + if (!hasJavadoc && line.startsWith("/**")) { + hasJavadoc = true; + } inComment = true; } + int rawLength = rawLine.length(); if (inComment) { - if (rawLine.length() > MAX_COMMENT_LINE_SIZE - && !line.trim().startsWith("* http://")) { - System.out.println("Long line : " + file.getAbsolutePath() - + " (" + file.getName() + ":" + lineNumber + ")"); - errorCount++; - } - if (line.endsWith("*/")) { + int i = line.indexOf("*/", 2); + if (i >= 0) { inComment = false; } + if (i == rawLength - 2 && rawLength > MAX_COMMENT_LINE_SIZE + && !line.trim().startsWith("* http://") + && !line.trim().startsWith("* https://")) { + System.out.println("Long line: " + file.toAbsolutePath() + + " (" + file.getFileName() + ":" + lineNumber + ")"); + errorCount++; + } } - if (!inComment && line.startsWith("//") - && rawLine.length() > MAX_COMMENT_LINE_SIZE - && !line.trim().startsWith("// http://")) { - System.out.println("Long line: " + file.getAbsolutePath() - + " (" + file.getName() + ":" + lineNumber + ")"); - errorCount++; - } else if (!inComment && rawLine.length() > MAX_SOURCE_LINE_SIZE) { - System.out.println("Long line: " + file.getAbsolutePath() - + " (" + file.getName() + ":" + lineNumber + ")"); + if (!inComment && line.startsWith("//")) { + if (rawLength > MAX_COMMENT_LINE_SIZE + && !line.trim().startsWith("// http://") + && !line.trim().startsWith("// https://")) { + System.out.println("Long line: " + file.toAbsolutePath() + + " (" + file.getFileName() + ":" + lineNumber + ")"); + errorCount++; + } + } else if (!inComment && rawLength > MAX_SOURCE_LINE_SIZE) { + System.out.println("Long line: " + file.toAbsolutePath() + + " (" + file.getFileName() + ":" + lineNumber + ")"); errorCount++; } - lineNumber++; - pos = next + 1; + } + if (!hasJavadoc) { + System.out.println("No Javadoc comment: " + file.toAbsolutePath()); + errorCount++; } } diff --git a/h2/src/tools/org/h2/build/code/CheckTextFiles.java b/h2/src/tools/org/h2/build/code/CheckTextFiles.java index 41b4f692ac..a8b3c2453b 100644 --- a/h2/src/tools/org/h2/build/code/CheckTextFiles.java +++ b/h2/src/tools/org/h2/build/code/CheckTextFiles.java @@ -1,13 +1,19 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.code; import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.RandomAccessFile; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.Arrays; /** @@ -17,20 +23,20 @@ */ public class CheckTextFiles { - private static final int MAX_SOURCE_LINE_SIZE = 100; + private static final int MAX_SOURCE_LINE_SIZE = 120; // must contain "+" otherwise this here counts as well - private static final String COPYRIGHT = "Copyright 2004-2014 " + - "H2 Group."; + private static final String COPYRIGHT1 = "Copyright 2004-2022"; + private static final String COPYRIGHT2 = "H2 Group."; private static final String LICENSE = "Multiple-Licensed " + "under the MPL 2.0"; private static final String[] SUFFIX_CHECK = { "html", "jsp", "js", "css", "bat", "nsi", "java", "txt", "properties", "sql", "xml", "csv", - "Driver", "prefs" }; + "Driver", "Processor", "prefs" }; private static final String[] SUFFIX_IGNORE = { "gif", "png", "odg", "ico", "sxd", "layout", "res", "win", "jar", "task", "svg", "MF", "mf", - "sh", "DS_Store", "prop" }; + "sh", "DS_Store", "prop", "class" }; private static final String[] SUFFIX_CRLF = { "bat" }; private static final boolean ALLOW_TAB = false; @@ -57,73 +63,57 @@ public static void main(String... args) throws Exception { } private void run() throws Exception { - String baseDir = "src"; - check(new File(baseDir)); + Files.walkFileTree(Paths.get("src"), new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + check(file); + return FileVisitResult.CONTINUE; + } + }); if (hasError) { throw new Exception("Errors found"); } } - private void check(File file) throws Exception { - String name = file.getName(); - if (file.isDirectory()) { - if (name.equals("CVS") || name.equals(".svn")) { - return; - } - for (File f : file.listFiles()) { - check(f); - } - } else { - String suffix = ""; - int lastDot = name.lastIndexOf('.'); - if (lastDot >= 0) { - suffix = name.substring(lastDot + 1); - } - boolean check = false, ignore = false; - for (String s : SUFFIX_CHECK) { - if (suffix.equals(s)) { - check = true; - } + void check(Path file) throws IOException { + String name = file.getFileName().toString(); + String suffix = ""; + int lastDot = name.lastIndexOf('.'); + if (lastDot >= 0) { + suffix = name.substring(lastDot + 1); + } + boolean check = false, ignore = false; + for (String s : SUFFIX_CHECK) { + if (suffix.equals(s)) { + check = true; } -// if (name.endsWith(".html") && name.indexOf("_ja") > 0) { -// int todoRemoveJapaneseFiles; -// // Japanese html files are UTF-8 at this time -// check = false; -// ignore = true; -// } - if (name.endsWith(".utf8.txt") || - (name.startsWith("_docs_") && - name.endsWith(".properties"))) { - check = false; + } + for (String s : SUFFIX_IGNORE) { + if (suffix.equals(s)) { ignore = true; } - for (String s : SUFFIX_IGNORE) { - if (suffix.equals(s)) { - ignore = true; - } - } - boolean checkLicense = true; - for (String ig : suffixIgnoreLicense) { - if (suffix.equals(ig) || name.endsWith(ig)) { - checkLicense = false; - break; - } - } - if (ignore == check) { - throw new RuntimeException("Unknown suffix: " + suffix - + " for file: " + file.getAbsolutePath()); - } - useCRLF = false; - for (String s : SUFFIX_CRLF) { - if (suffix.equals(s)) { - useCRLF = true; - break; - } + } + boolean checkLicense = true; + for (String ig : suffixIgnoreLicense) { + if (suffix.equals(ig) || name.endsWith(ig)) { + checkLicense = false; + break; } - if (check) { - checkOrFixFile(file, AUTO_FIX, checkLicense); + } + if (ignore == check) { + throw new RuntimeException("Unknown suffix: " + suffix + + " for file: " + file.toAbsolutePath()); + } + useCRLF = false; + for (String s : SUFFIX_CRLF) { + if (suffix.equals(s)) { + useCRLF = true; + break; } } + if (check) { + checkOrFixFile(file, AUTO_FIX, checkLicense); + } } /** @@ -136,18 +126,17 @@ private void check(File file) throws Exception { * @param fix automatically fix newline characters and trailing spaces * @param checkLicense check the license and copyright */ - public void checkOrFixFile(File file, boolean fix, boolean checkLicense) - throws Exception { - RandomAccessFile in = new RandomAccessFile(file, "r"); - byte[] data = new byte[(int) file.length()]; + public void checkOrFixFile(Path file, boolean fix, boolean checkLicense) throws IOException { + byte[] data = Files.readAllBytes(file); ByteArrayOutputStream out = fix ? new ByteArrayOutputStream() : null; - in.readFully(data); - in.close(); if (checkLicense) { - if (data.length > COPYRIGHT.length() + LICENSE.length()) { + if (data.length > COPYRIGHT1.length() + LICENSE.length()) { // don't check tiny files String text = new String(data); - if (text.indexOf(COPYRIGHT) < 0) { + if (text.indexOf(COPYRIGHT1) < 0) { + fail(file, "copyright is missing", 0); + } + if (text.indexOf(COPYRIGHT2) < 0) { fail(file, "copyright is missing", 0); } if (text.indexOf(LICENSE) < 0) { @@ -189,9 +178,15 @@ public void checkOrFixFile(File file, boolean fix, boolean checkLicense) lastWasWhitespace = false; line++; int lineLength = i - startLinePos; - if (file.getName().endsWith(".java")) { + if (file.getFileName().toString().endsWith(".java")) { + if (i > 0 && data[i - 1] == '\r') { + lineLength--; + } if (lineLength > MAX_SOURCE_LINE_SIZE) { - fail(file, "line too long: " + lineLength, line); + String s = new String(data, startLinePos, lineLength).trim(); + if (!s.startsWith("// http://") && !s.startsWith("// https://")) { + fail(file, "line too long: " + lineLength, line); + } } } startLinePos = i; @@ -257,11 +252,8 @@ public void checkOrFixFile(File file, boolean fix, boolean checkLicense) if (fix) { byte[] changed = out.toByteArray(); if (!Arrays.equals(data, changed)) { - RandomAccessFile f = new RandomAccessFile(file, "rw"); - f.write(changed); - f.setLength(changed.length); - f.close(); - System.out.println("CHANGED: " + file.getName()); + Files.write(file, changed); + System.out.println("CHANGED: " + file.getFileName()); } } line = 1; @@ -282,11 +274,12 @@ public void checkOrFixFile(File file, boolean fix, boolean checkLicense) } } - private void fail(File file, String error, int line) { + private void fail(Path file, String error, int line) { + file = file.toAbsolutePath(); if (line <= 0) { line = 1; } - String name = file.getAbsolutePath(); + String name = file.toString(); int idx = name.lastIndexOf(File.separatorChar); if (idx >= 0) { name = name.replace(File.separatorChar, '.'); @@ -296,8 +289,7 @@ private void fail(File file, String error, int line) { name = name.substring(idx); } } - System.out.println("FAIL at " + name + " " + error + " " - + file.getAbsolutePath()); + System.out.println("FAIL at " + name + " " + error + " " + file.toAbsolutePath()); hasError = true; if (failOnError) { throw new RuntimeException("FAIL"); diff --git a/h2/src/tools/org/h2/build/code/SwitchSource.java b/h2/src/tools/org/h2/build/code/SwitchSource.java deleted file mode 100644 index fc1d91485e..0000000000 --- a/h2/src/tools/org/h2/build/code/SwitchSource.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.code; - -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.util.ArrayList; - -/** - * Switched source code to a specific Java version, automatically to the current - * version, or enable / disable other blocks of source code in Java files. - */ -public class SwitchSource { - - private final ArrayList enable = new ArrayList(); - private final ArrayList disable = new ArrayList(); - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws IOException { - new SwitchSource().run(args); - } - - private void run(String... args) throws IOException { - String dir = null; - String version = null; - for (int i = 0; i < args.length; i++) { - String a = args[i]; - if ("-dir".equals(a)) { - dir = args[++i]; - } else if ("-auto".equals(a)) { - enable.add("AWT"); - version = System.getProperty("java.specification.version"); - } else if ("-version".equals(a)) { - version = args[++i]; - } else if (a.startsWith("-")) { - String x = a.substring(1); - disable.add(x); - enable.remove(x); - } else if (a.startsWith("+")) { - String x = a.substring(1); - enable.add(x); - disable.remove(x); - } else { - showUsage(); - return; - } - } - if (version == null) { - // ok - } else if ("1.5".equals(version)) { - disable.add("Java 1.6"); - disable.add("Java 1.7"); - } else if ("1.6".equals(version)) { - enable.add("Java 1.6"); - disable.add("Java 1.7"); - } else if (version.compareTo("1.7") >= 0) { - enable.add("Java 1.6"); - enable.add("Java 1.7"); - } else { - throw new IllegalArgumentException("version: " + version); - } - if (dir == null) { - showUsage(); - } else { - process(new File(dir)); - } - } - - private void showUsage() { - System.out.println("Switched source code to a specific Java version."); - System.out.println("java "+getClass().getName() + "\n" + - " -dir The target directory\n" + - " [-version] Use the specified Java version (1.4 or newer)\n" + - " [-auto] Auto-detect Java version (1.4 or newer)\n" + - " [+MODE] Enable code labeled MODE\n" + - " [-MODE] Disable code labeled MODE"); - } - - private void process(File f) throws IOException { - String name = f.getName(); - if (name.startsWith(".svn")) { - return; - } else if (name.endsWith(".java")) { - processFile(f); - } else if (f.isDirectory()) { - for (File file : f.listFiles()) { - process(file); - } - } - } - - private void processFile(File f) throws IOException { - RandomAccessFile read = new RandomAccessFile(f, "r"); - byte[] buffer; - try { - long len = read.length(); - if (len >= Integer.MAX_VALUE) { - throw new IOException("Files bigger than Integer.MAX_VALUE are not supported"); - } - buffer = new byte[(int) len]; - read.readFully(buffer); - } finally { - read.close(); - } - boolean found = false; - // check for ## without creating a string - for (int i = 0; i < buffer.length - 1; i++) { - if (buffer[i] == '#' && buffer[i + 1] == '#') { - found = true; - break; - } - } - if (!found) { - return; - } - String source = new String(buffer); - String target = source; - for (String x : enable) { - target = replaceAll(target, "/*## " + x + " ##", "//## " + x + " ##"); - } - for (String x : disable) { - target = replaceAll(target, "//## " + x + " ##", "/*## " + x + " ##"); - } - if (!source.equals(target)) { - String name = f.getPath(); - File fileNew = new File(name + ".new"); - FileWriter write = new FileWriter(fileNew); - write.write(target); - write.close(); - File fileBack = new File(name + ".bak"); - fileBack.delete(); - f.renameTo(fileBack); - File fileCopy = new File(name); - if (!fileNew.renameTo(fileCopy)) { - throw new IOException("Could not rename " - + fileNew.getAbsolutePath() + " to " + name); - } - if (!fileBack.delete()) { - throw new IOException("Could not delete " + fileBack.getAbsolutePath()); - } - // System.out.println(name); - } - } - - private static String replaceAll(String s, String before, String after) { - int index = 0; - while (true) { - int next = s.indexOf(before, index); - if (next < 0) { - return s; - } - s = s.substring(0, next) + after + s.substring(next + before.length()); - index = next + after.length(); - } - } - -} diff --git a/h2/src/tools/org/h2/build/code/package.html b/h2/src/tools/org/h2/build/code/package.html index 6d385a1350..8f33d88b5b 100644 --- a/h2/src/tools/org/h2/build/code/package.html +++ b/h2/src/tools/org/h2/build/code/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/build/doc/BnfRailroad.java b/h2/src/tools/org/h2/build/doc/BnfRailroad.java index 9f1dc791fe..033c3ac149 100644 --- a/h2/src/tools/org/h2/build/doc/BnfRailroad.java +++ b/h2/src/tools/org/h2/build/doc/BnfRailroad.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; @@ -94,7 +94,7 @@ public void visitRuleFixed(int type) { * @return the HTML text */ static String getHtmlText(int type) { - switch(type) { + switch (type) { case RuleFixed.YMD: return "2000-01-01"; case RuleFixed.HMS: @@ -123,6 +123,8 @@ static String getHtmlText(int type) { return "["; case RuleFixed.CLOSE_BRACKET: return "]"; + case RuleFixed.JSON_TEXT: + return "JSON text"; default: throw new AssertionError("type="+type); } @@ -133,15 +135,8 @@ public void visitRuleList(boolean or, ArrayList list) { StringBuilder buff = new StringBuilder(); if (or) { buff.append("
    "); - int i = 0; - for (Rule r : list) { - String a = i == 0 ? "t" : i == list.size() - 1 ? "l" : "k"; - i++; - buff.append(""); + for (int i = 0, l = list.size() - 1; i <= l; i++) { + visitOrItem(buff, list.get(i), i == 0 ? "t" : i == l ? "l" : "k"); } buff.append("
    "); - r.accept(this); - buff.append(html); - buff.append("
    "); } else { @@ -161,9 +156,7 @@ public void visitRuleList(boolean or, ArrayList list) { @Override public void visitRuleOptional(Rule rule) { StringBuilder buff = new StringBuilder(); - buff.append(""); - buff.append("" + - ""); + writeOptionalStart(buff); buff.append("" + "
     
    "); rule.accept(this); @@ -172,4 +165,36 @@ public void visitRuleOptional(Rule rule) { html = buff.toString(); } + @Override + public void visitRuleOptional(ArrayList list) { + StringBuilder buff = new StringBuilder(); + writeOptionalStart(buff); + for (int i = 0, l = list.size() - 1; i <= l; i++) { + visitOrItem(buff, list.get(i), i == l ? "l" : "k"); + } + buff.append("
    "); + html = buff.toString(); + } + + private static void writeOptionalStart(StringBuilder buff) { + buff.append(""); + buff.append("" + + ""); + } + + private void visitOrItem(StringBuilder buff, Rule r, String a) { + buff.append(""); + } + + @Override + public void visitRuleExtension(Rule rule, boolean compatibility) { + StringBuilder buff = new StringBuilder("
    "); + rule.accept(this); + html = buff.append(html).append("
    ").toString(); + } + } diff --git a/h2/src/tools/org/h2/build/doc/BnfSyntax.java b/h2/src/tools/org/h2/build/doc/BnfSyntax.java index b1ae235dbb..d1e8e6188e 100644 --- a/h2/src/tools/org/h2/build/doc/BnfSyntax.java +++ b/h2/src/tools/org/h2/build/doc/BnfSyntax.java @@ -1,10 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.StringTokenizer; import org.h2.bnf.Bnf; @@ -32,20 +33,79 @@ public String getHtml(Bnf bnf, String syntaxLines) { syntaxLines = StringUtils.replaceAll(syntaxLines, "\n ", "\n"); StringTokenizer tokenizer = Bnf.getTokenizer(syntaxLines); StringBuilder buff = new StringBuilder(); + ArrayDeque deque = new ArrayDeque<>(); + boolean extension = false; while (tokenizer.hasMoreTokens()) { String s = tokenizer.nextToken(); + if (s.equals("@c@")) { + if (!extension) { + extension = true; + buff.append(""); + } + s = skipAfterExtensionStart(tokenizer); + } else if (s.equals("@h2@")) { + if (!extension) { + extension = true; + buff.append(""); + } + s = skipAfterExtensionStart(tokenizer); + } + if (extension) { + if (s.length() == 1) { + char c = s.charAt(0); + switch (c) { + case '[': + deque.addLast(']'); + break; + case '{': + deque.addLast('}'); + break; + case ']': + case '}': + char c2 = deque.removeLast(); + if (c != c2) { + throw new AssertionError("Expected " + c2 + " got " + c); + } + break; + default: + if (deque.isEmpty()) { + deque.add('*'); + } + } + } else if (deque.isEmpty()) { + deque.add('*'); + } + } if (s.length() == 1 || StringUtils.toUpperEnglish(s).equals(s)) { buff.append(StringUtils.xmlText(s)); + if (extension && deque.isEmpty()) { + extension = false; + buff.append(""); + } continue; } buff.append(getLink(bnf, s)); } + if (extension) { + if (deque.size() != 1 || deque.getLast() != '*') { + throw new AssertionError("Expected " + deque.getLast() + " got end of data"); + } + buff.append(""); + } String s = buff.toString(); // ensure it works within XHTML comments s = StringUtils.replaceAll(s, "--", "--"); return s; } + private static String skipAfterExtensionStart(StringTokenizer tokenizer) { + String s; + do { + s = tokenizer.nextToken(); + } while (s.equals(" ")); + return s; + } + /** * Get the HTML link to the given token. * @@ -68,9 +128,12 @@ String getLink(Bnf bnf, String token) { return token; } String page = "grammar.html"; - if (found.getSection().startsWith("Data Types")) { + String section = found.getSection(); + if (section.startsWith("Commands")) { + page = "commands.html"; + } if (section.startsWith("Data Types") || section.startsWith("Interval Data Types")) { page = "datatypes.html"; - } else if (found.getSection().startsWith("Functions")) { + } else if (section.startsWith("Functions")) { page = "functions.html"; } else if (token.equals("@func@")) { return "Function"; @@ -103,9 +166,19 @@ public void visitRuleOptional(Rule rule) { // not used } + @Override + public void visitRuleOptional(ArrayList list) { + // not used + } + @Override public void visitRuleRepeat(boolean comma, Rule rule) { // not used } + @Override + public void visitRuleExtension(Rule rule, boolean compatibility) { + // not used + } + } diff --git a/h2/src/tools/org/h2/build/doc/FileConverter.java b/h2/src/tools/org/h2/build/doc/FileConverter.java index 93834f0aef..3d71af469f 100644 --- a/h2/src/tools/org/h2/build/doc/FileConverter.java +++ b/h2/src/tools/org/h2/build/doc/FileConverter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; @@ -8,6 +8,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.charset.StandardCharsets; import java.util.Locale; import org.h2.build.indexer.HtmlConverter; @@ -52,10 +53,10 @@ private void run(String... args) throws Exception { private void convert() throws IOException { InputStream in = FileUtils.newInputStream(inFile); byte[] bytes = IOUtils.readBytesAndClose(in, -1); - String s = new String(bytes, "UTF-8"); + String s = new String(bytes, StandardCharsets.UTF_8); String s2 = HtmlConverter.convertHtmlToString(s); String s3 = StringUtils.javaDecode(s2); - byte[] result = s3.getBytes("UTF-8"); + byte[] result = s3.getBytes(StandardCharsets.UTF_8); OutputStream out = FileUtils.newOutputStream(outFile, false); out.write(result); out.close(); diff --git a/h2/src/tools/org/h2/build/doc/GenerateDoc.java b/h2/src/tools/org/h2/build/doc/GenerateDoc.java index 1ec0b6fa22..4c7378324f 100644 --- a/h2/src/tools/org/h2/build/doc/GenerateDoc.java +++ b/h2/src/tools/org/h2/build/doc/GenerateDoc.java @@ -1,42 +1,47 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.sql.Connection; import java.sql.DriverManager; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.Statement; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import org.h2.bnf.Bnf; import org.h2.engine.Constants; import org.h2.server.web.PageParser; -import org.h2.util.IOUtils; -import org.h2.util.JdbcUtils; +import org.h2.tools.Csv; import org.h2.util.StringUtils; /** * This application generates sections of the documentation - * by converting the built-in help section (INFORMATION_SCHEMA.HELP) + * by converting the built-in help section * to cross linked html. */ public class GenerateDoc { - private static final String IN_HELP = "src/docsrc/help/help.csv"; - private String inDir = "src/docsrc/html"; - private String outDir = "docs/html"; + private static final String IN_HELP = "src/main/org/h2/res/help.csv"; + private Path inDir = Paths.get("src/docsrc/html"); + private Path outDir = Paths.get("docs/html"); private Connection conn; private final HashMap session = - new HashMap(); + new HashMap<>(); private Bnf bnf; /** @@ -52,107 +57,193 @@ public static void main(String... args) throws Exception { private void run(String... args) throws Exception { for (int i = 0; i < args.length; i++) { if (args[i].equals("-in")) { - inDir = args[++i]; + inDir = Paths.get(args[++i]); } else if (args[i].equals("-out")) { - outDir = args[++i]; + outDir = Paths.get(args[++i]); } } Class.forName("org.h2.Driver"); conn = DriverManager.getConnection("jdbc:h2:mem:"); - new File(outDir).mkdirs(); - new RailroadImages().run(outDir + "/images"); + Files.createDirectories(outDir); + new RailroadImages().run(outDir.resolve("images")); bnf = Bnf.getInstance(null); bnf.linkStatements(); - session.put("version", Constants.getVersion()); + session.put("version", Constants.VERSION); session.put("versionDate", Constants.BUILD_DATE); - session.put("stableVersion", Constants.getVersionStable()); - session.put("stableVersionDate", Constants.BUILD_DATE_STABLE); - // String help = "SELECT * FROM INFORMATION_SCHEMA.HELP WHERE SECTION"; + session.put("downloadRoot", + "https://github.com/h2database/h2database/releases/download/version-" + Constants.VERSION); String help = "SELECT ROWNUM ID, * FROM CSVREAD('" + IN_HELP + "', NULL, 'lineComment=#') WHERE SECTION "; - map("commands", - help + "LIKE 'Commands%' ORDER BY ID", true); map("commandsDML", - help + "= 'Commands (DML)' ORDER BY ID", false); + help + "= 'Commands (DML)' ORDER BY ID", true, false); map("commandsDDL", - help + "= 'Commands (DDL)' ORDER BY ID", false); + help + "= 'Commands (DDL)' ORDER BY ID", true, false); map("commandsOther", - help + "= 'Commands (Other)' ORDER BY ID", false); + help + "= 'Commands (Other)' ORDER BY ID", true, false); + map("literals", + help + "= 'Literals' ORDER BY ID", true, false); + map("datetimeFields", + help + "= 'Datetime fields' ORDER BY ID", true, false); map("otherGrammar", - help + "= 'Other Grammar' ORDER BY ID", true); - map("functionsAggregate", - help + "= 'Functions (Aggregate)' ORDER BY ID", false); + help + "= 'Other Grammar' ORDER BY ID", true, false); + map("functionsNumeric", - help + "= 'Functions (Numeric)' ORDER BY ID", false); + help + "= 'Functions (Numeric)' ORDER BY ID", true, false); map("functionsString", - help + "= 'Functions (String)' ORDER BY ID", false); + help + "= 'Functions (String)' ORDER BY ID", true, false); map("functionsTimeDate", - help + "= 'Functions (Time and Date)' ORDER BY ID", false); + help + "= 'Functions (Time and Date)' ORDER BY ID", true, false); map("functionsSystem", - help + "= 'Functions (System)' ORDER BY ID", false); - map("functionsAll", - help + "LIKE 'Functions%' ORDER BY SECTION, ID", true); + help + "= 'Functions (System)' ORDER BY ID", true, false); + map("functionsJson", + help + "= 'Functions (JSON)' ORDER BY ID", true, false); + map("functionsTable", + help + "= 'Functions (Table)' ORDER BY ID", true, false); + + map("aggregateFunctionsGeneral", + help + "= 'Aggregate Functions (General)' ORDER BY ID", true, false); + map("aggregateFunctionsBinarySet", + help + "= 'Aggregate Functions (Binary Set)' ORDER BY ID", true, false); + map("aggregateFunctionsOrdered", + help + "= 'Aggregate Functions (Ordered)' ORDER BY ID", true, false); + map("aggregateFunctionsHypothetical", + help + "= 'Aggregate Functions (Hypothetical Set)' ORDER BY ID", true, false); + map("aggregateFunctionsInverse", + help + "= 'Aggregate Functions (Inverse Distribution)' ORDER BY ID", true, false); + map("aggregateFunctionsJSON", + help + "= 'Aggregate Functions (JSON)' ORDER BY ID", true, false); + + map("windowFunctionsRowNumber", + help + "= 'Window Functions (Row Number)' ORDER BY ID", true, false); + map("windowFunctionsRank", + help + "= 'Window Functions (Rank)' ORDER BY ID", true, false); + map("windowFunctionsLeadLag", + help + "= 'Window Functions (Lead or Lag)' ORDER BY ID", true, false); + map("windowFunctionsNth", + help + "= 'Window Functions (Nth Value)' ORDER BY ID", true, false); + map("windowFunctionsOther", + help + "= 'Window Functions (Other)' ORDER BY ID", true, false); + map("dataTypes", - help + "LIKE 'Data Types%' ORDER BY SECTION, ID", true); - map("informationSchema", "SELECT TABLE_NAME TOPIC, " + - "GROUP_CONCAT(COLUMN_NAME " + - "ORDER BY ORDINAL_POSITION SEPARATOR ', ') SYNTAX " + - "FROM INFORMATION_SCHEMA.COLUMNS " + - "WHERE TABLE_SCHEMA='INFORMATION_SCHEMA' " + - "GROUP BY TABLE_NAME ORDER BY TABLE_NAME", false); - processAll(""); - conn.close(); - } + help + "LIKE 'Data Types%' ORDER BY SECTION, ID", true, true); + map("intervalDataTypes", + help + "LIKE 'Interval Data Types%' ORDER BY SECTION, ID", true, true); + HashMap informationSchemaTables = new HashMap<>(); + HashMap informationSchemaColumns = new HashMap<>(512); + Csv csv = new Csv(); + csv.setLineCommentCharacter('#'); + try (ResultSet rs = csv.read("src/docsrc/help/information_schema.csv", null, null)) { + while (rs.next()) { + String tableName = rs.getString(1); + String columnName = rs.getString(2); + String description = rs.getString(3); + if (columnName != null) { + informationSchemaColumns.put(tableName == null ? columnName : tableName + '.' + columnName, + description); + } else { + informationSchemaTables.put(tableName, description); + } + } + } + int errorCount = 0; + try (Statement stat = conn.createStatement(); + PreparedStatement prep = conn.prepareStatement("SELECT COLUMN_NAME, " + + "DATA_TYPE_SQL('INFORMATION_SCHEMA', TABLE_NAME, 'TABLE', DTD_IDENTIFIER) DT " + + "FROM INFORMATION_SCHEMA.COLUMNS " + + "WHERE TABLE_SCHEMA = 'INFORMATION_SCHEMA' AND TABLE_NAME = ? ORDER BY ORDINAL_POSITION")) { + ResultSet rs = stat.executeQuery("SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES " + + "WHERE TABLE_SCHEMA = 'INFORMATION_SCHEMA' ORDER BY TABLE_NAME"); - private void processAll(String dir) throws Exception { - if (dir.endsWith(".svn")) { - return; + ArrayList> list = new ArrayList<>(); + StringBuilder builder = new StringBuilder(); + while (rs.next()) { + HashMap map = new HashMap<>(8); + String table = rs.getString(1); + map.put("table", table); + map.put("link", "information_schema_" + StringUtils.urlEncode(table.toLowerCase())); + String description = informationSchemaTables.get(table); + if (description == null) { + System.out.println("No documentation for INFORMATION_SCHEMA." + table); + errorCount++; + description = ""; + } + map.put("description", StringUtils.xmlText(description)); + prep.setString(1, table); + ResultSet rs2 = prep.executeQuery(); + builder.setLength(0); + while (rs2.next()) { + if (rs2.getRow() > 1) { + builder.append('\n'); + } + String column = rs2.getString(1); + description = informationSchemaColumns.get(table + '.' + column); + if (description == null) { + description = informationSchemaColumns.get(column); + if (description == null) { + System.out.println("No documentation for INFORMATION_SCHEMA." + table + '.' + column); + errorCount++; + description = ""; + } + } + builder.append(""); + } + map.put("columns", builder.toString()); + list.add(map); + } + putToMap("informationSchema", list); } - File[] list = new File(inDir + "/" + dir).listFiles(); - for (File file : list) { - if (file.isDirectory()) { - processAll(dir + file.getName()); - } else { - process(dir, file.getName()); + Files.walkFileTree(inDir, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + process(file); + return FileVisitResult.CONTINUE; } + }); + conn.close(); + if (errorCount > 0) { + throw new IOException(errorCount + (errorCount == 1 ? " error" : " errors") + " found"); } } - private void process(String dir, String fileName) throws Exception { - String inFile = inDir + "/" + dir + "/" + fileName; - String outFile = outDir + "/" + dir + "/" + fileName; - new File(outFile).getParentFile().mkdirs(); - FileOutputStream out = new FileOutputStream(outFile); - FileInputStream in = new FileInputStream(inFile); - byte[] bytes = IOUtils.readBytesAndClose(in, 0); - if (fileName.endsWith(".html")) { + /** + * Process a file. + * + * @param inFile the file + */ + void process(Path inFile) throws IOException { + Path outFile = outDir.resolve(inDir.relativize(inFile)); + Files.createDirectories(outFile.getParent()); + byte[] bytes = Files.readAllBytes(inFile); + if (inFile.getFileName().toString().endsWith(".html")) { String page = new String(bytes); page = PageParser.parse(page, session); bytes = page.getBytes(); } - out.write(bytes); - out.close(); + Files.write(outFile, bytes); } - private void map(String key, String sql, boolean railroads) + private void map(String key, String sql, boolean railroads, boolean forDataTypes) throws Exception { - ResultSet rs = null; - Statement stat = null; - try { - stat = conn.createStatement(); - rs = stat.executeQuery(sql); + try (Statement stat = conn.createStatement(); + ResultSet rs = stat.executeQuery(sql)) { ArrayList> list = - new ArrayList>(); + new ArrayList<>(); while (rs.next()) { - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); ResultSetMetaData meta = rs.getMetaData(); - for (int i = 0; i < meta.getColumnCount(); i++) { - String k = StringUtils.toLowerEnglish(meta.getColumnLabel(i + 1)); - String value = rs.getString(i + 1); + for (int i = 1; i <= meta.getColumnCount(); i++) { + String k = StringUtils.toLowerEnglish(meta.getColumnLabel(i)); + String value = rs.getString(i); value = value.trim(); map.put(k, PageParser.escapeHtml(value)); } String topic = rs.getString("TOPIC"); + // Convert "INT Type" to "INT" etc. + if (forDataTypes && topic.endsWith(" Type")) { + map.put("topic", topic.substring(0, topic.length() - 5)); + } String syntax = rs.getString("SYNTAX").trim(); if (railroads) { BnfRailroad r = new BnfRailroad(); @@ -172,28 +263,31 @@ private void map(String key, String sql, boolean railroads) text = StringUtils.replaceAll(text, "
    ", " "); text = addCode(text); + text = addLinks(text); map.put("text", text); } String link = topic.toLowerCase(); - link = StringUtils.replaceAll(link, " ", "_"); + link = link.replace(' ', '_'); // link = StringUtils.replaceAll(link, "_", ""); - link = StringUtils.replaceAll(link, "@", "_"); + link = link.replace('@', '_'); map.put("link", StringUtils.urlEncode(link)); list.add(map); } - session.put(key, list); - int div = 3; - int part = (list.size() + div - 1) / div; - for (int i = 0, start = 0; i < div; i++, start += part) { - List> listThird = list.subList(start, - Math.min(start + part, list.size())); - session.put(key + "-" + i, listThird); - } - } finally { - JdbcUtils.closeSilently(rs); - JdbcUtils.closeSilently(stat); + putToMap(key, list); + } + } + + private void putToMap(String key, ArrayList> list) { + session.put(key, list); + int div = 3; + int part = (list.size() + div - 1) / div; + for (int i = 0, start = 0; i < div; i++, start += part) { + int end = Math.min(start + part, list.size()); + List> listThird = start <= end ? list.subList(start, end) + : Collections.emptyList(); + session.put(key + '-' + i, listThird); } } @@ -248,4 +342,54 @@ private static String addCode(String text) { s = StringUtils.replaceAll(s, "GB", "GB"); return s; } + + private static String addLinks(String text) { + int start = nextLink(text, 0); + if (start < 0) { + return text; + } + StringBuilder buff = new StringBuilder(text.length()); + int len = text.length(); + int offset = 0; + do { + if (start > 2 && text.regionMatches(start - 2, "](https://h2database.com/html/", 0, 30)) { + int descEnd = start - 2; + int descStart = text.lastIndexOf('[', descEnd - 1) + 1; + int linkStart = start + 28; + int linkEnd = text.indexOf(')', start + 29); + buff.append(text, offset, descStart - 1) // + .append("") // + .append(text, descStart, descEnd) // + .append(""); + offset = linkEnd + 1; + } else { + int end = start + 7; + for (; end < len && !Character.isWhitespace(text.charAt(end)); end++) { + // Nothing to do + } + buff.append(text, offset, start) // + .append("") // + .append(text, start, end) // + .append(""); + offset = end; + } + } while ((start = nextLink(text, offset)) >= 0); + return buff.append(text, offset, len).toString(); + } + + private static int nextLink(String text, int i) { + int found = -1; + found = findLink(text, i, "http://", found); + found = findLink(text, i, "https://", found); + return found; + } + + private static int findLink(String text, int offset, String prefix, int found) { + int idx = text.indexOf(prefix, offset); + if (idx >= 0 && (found < 0 || idx < found)) { + found = idx; + } + return found; + } + } diff --git a/h2/src/tools/org/h2/build/doc/GenerateHelp.java b/h2/src/tools/org/h2/build/doc/GenerateHelp.java deleted file mode 100644 index 3dd9f9f6de..0000000000 --- a/h2/src/tools/org/h2/build/doc/GenerateHelp.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.doc; - -import java.io.BufferedWriter; -import java.io.FileWriter; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.Types; -import org.h2.tools.Csv; -import org.h2.tools.SimpleResultSet; - -/** - * Generates the help.csv file that is included in the jar file. - */ -public class GenerateHelp { - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws Exception { - String in = "src/docsrc/help/help.csv"; - String out = "src/main/org/h2/res/help.csv"; - Csv csv = new Csv(); - csv.setLineCommentCharacter('#'); - ResultSet rs = csv.read(in, null, null); - SimpleResultSet rs2 = new SimpleResultSet(); - ResultSetMetaData meta = rs.getMetaData(); - int columnCount = meta.getColumnCount() - 1; - for (int i = 0; i < columnCount; i++) { - rs2.addColumn(meta.getColumnLabel(1 + i), Types.VARCHAR, 0, 0); - } - while (rs.next()) { - Object[] row = new Object[columnCount]; - for (int i = 0; i < columnCount; i++) { - String s = rs.getString(1 + i); - if (i == 3) { - int dot = s.indexOf('.'); - if (dot >= 0) { - s = s.substring(0, dot + 1); - } - } - row[i] = s; - } - rs2.addRow(row); - } - BufferedWriter writer = new BufferedWriter(new FileWriter(out)); - writer.write("# Copyright 2004-2014 H2 Group. " + - "Multiple-Licensed under the MPL 2.0,\n" + - "# and the EPL 1.0 " + - "(http://h2database.com/html/license.html).\n" + - "# Initial Developer: H2 Group)\n"); - csv = new Csv(); - csv.setLineSeparator("\n"); - csv.write(writer, rs2); - } - -} diff --git a/h2/src/tools/org/h2/build/doc/LinkChecker.java b/h2/src/tools/org/h2/build/doc/LinkChecker.java index 95aa3570c6..e857bf531d 100644 --- a/h2/src/tools/org/h2/build/doc/LinkChecker.java +++ b/h2/src/tools/org/h2/build/doc/LinkChecker.java @@ -1,21 +1,26 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; import java.io.File; -import java.io.FileReader; import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import org.h2.tools.Server; -import org.h2.util.IOUtils; import org.h2.util.StringUtils; /** @@ -27,11 +32,28 @@ public class LinkChecker { private static final boolean TEST_EXTERNAL_LINKS = false; private static final boolean OPEN_EXTERNAL_LINKS = false; private static final String[] IGNORE_MISSING_LINKS_TO = { - "SysProperties", "ErrorCode" + "SysProperties", "ErrorCode", + // TODO check these replacement link too + "#build_index", + "#datatypes_index", + "#faq_index", + "#commands_index", + "#grammar_index", + "#functions_index", + "#functions_aggregate_index", + "#functions_window_index", + "#tutorial_index", + "docs/javadoc/" }; - private final HashMap targets = new HashMap(); - private final HashMap links = new HashMap(); + private static enum TargetKind { + FILE, ID + } + private final HashMap targets = new HashMap<>(); + /** + * Map of source link (i.e. tag) in the document, to the document path + */ + private final HashMap links = new HashMap<>(); /** * This method is called when executing this application from the command @@ -44,10 +66,10 @@ public static void main(String... args) throws Exception { } private void run(String... args) throws Exception { - String dir = "docs"; + Path dir = Paths.get("docs"); for (int i = 0; i < args.length; i++) { if ("-dir".equals(args[i])) { - dir = args[++i]; + dir = Paths.get(args[++i]); } } process(dir); @@ -114,10 +136,10 @@ private void listExternalLinks() { } private void listBadLinks() throws Exception { - ArrayList errors = new ArrayList(); + ArrayList errors = new ArrayList<>(); for (String link : links.keySet()) { if (!link.startsWith("http") && !link.endsWith("h2.pdf") - && link.indexOf("_ja.") < 0) { + && /* For Javadoc 8 */ !link.startsWith("docs/javadoc")) { if (targets.get(link) == null) { errors.add(links.get(link) + ": Link missing " + link); } @@ -129,7 +151,7 @@ private void listBadLinks() throws Exception { } } for (String name : targets.keySet()) { - if (targets.get(name).equals("id")) { + if (targets.get(name) == TargetKind.ID) { boolean ignore = false; for (String to : IGNORE_MISSING_LINKS_TO) { if (name.contains(to)) { @@ -146,34 +168,37 @@ private void listBadLinks() throws Exception { for (String error : errors) { System.out.println(error); } - if (errors.size() > 0) { + if (!errors.isEmpty()) { throw new Exception("Problems where found by the Link Checker"); } } - private void process(String path) throws Exception { - if (path.endsWith("/CVS") || path.endsWith("/.svn")) { - return; - } - File file = new File(path); - if (file.isDirectory()) { - for (String n : file.list()) { - process(path + "/" + n); + private void process(Path path) throws Exception { + Files.walkFileTree(path, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + processFile(file); + return FileVisitResult.CONTINUE; } - } else { - processFile(path); - } + }); } - private void processFile(String path) throws Exception { - targets.put(path, "file"); - String lower = StringUtils.toLowerEnglish(path); + /** + * Process a file. + * + * @param file the file + */ + void processFile(Path file) throws IOException { + String path = file.toString(); + targets.put(path, TargetKind.FILE); + String fileName = file.getFileName().toString(); + String lower = StringUtils.toLowerEnglish(fileName); if (!lower.endsWith(".html") && !lower.endsWith(".htm")) { return; } - String fileName = new File(path).getName(); - String parent = path.substring(0, path.lastIndexOf('/')); - String html = IOUtils.readStringAndClose(new FileReader(path), -1); + Path parent = file.getParent(); + final String html = new String(Files.readAllBytes(file), StandardCharsets.UTF_8); + // find all the target fragments in the document (those elements marked with id attribute) int idx = -1; while (true) { idx = html.indexOf(" id=\"", idx + 1); @@ -181,26 +206,28 @@ private void processFile(String path) throws Exception { break; } int start = idx + " id=\"".length(); - int end = html.indexOf("\"", start); + int end = html.indexOf('"', start); if (end < 0) { error(fileName, "Expected \" after id= " + html.substring(idx, idx + 100)); } String ref = html.substring(start, end); if (!ref.startsWith("_")) { - targets.put(path + "#" + ref, "id"); + targets.put(path + "#" + ref.replaceAll("%3C|<", "<").replaceAll("%3E|>", ">"), // + TargetKind.ID); } } + // find all the href links in the document idx = -1; while (true) { idx = html.indexOf(" href=\"", idx + 1); if (idx < 0) { break; } - int start = html.indexOf("\"", idx); + int start = html.indexOf('"', idx); if (start < 0) { error(fileName, "Expected \" after href= at " + html.substring(idx, idx + 100)); } - int end = html.indexOf("\"", start + 1); + int end = html.indexOf('"', start + 1); if (end < 0) { error(fileName, "Expected \" after href= at " + html.substring(idx, idx + 100)); } @@ -216,19 +243,22 @@ private void processFile(String path) throws Exception { } else if (ref.startsWith("#")) { ref = path + ref; } else { - String p = parent; + Path p = parent; while (ref.startsWith(".")) { if (ref.startsWith("./")) { ref = ref.substring(2); } else if (ref.startsWith("../")) { ref = ref.substring(3); - p = p.substring(0, p.lastIndexOf('/')); + p = p.getParent(); } } - ref = p + "/" + ref; + ref = p + File.separator + ref; } if (ref != null) { - links.put(ref, path); + links.put(ref.replace('/', File.separatorChar) // + .replaceAll("%5B", "[").replaceAll("%5D", "]") // + .replaceAll("%3C", "<").replaceAll("%3E", ">"), // + path); } } idx = -1; @@ -237,16 +267,16 @@ private void processFile(String path) throws Exception { if (idx < 0) { break; } - int equals = html.indexOf("=", idx); + int equals = html.indexOf('=', idx); if (equals < 0) { error(fileName, "Expected = after "); writer.println("H2 Documentation"); writer.println(""); - writer.println("

    H2 Database Engine

    "); - writer.println("

    Version " + Constants.getFullVersion() + "

    "); + writer.println("

    H2 Database Engine

    "); + writer.println("

    Version " + Constants.FULL_VERSION + "

    "); writer.println(finalText); writer.println(""); writer.close(); @@ -74,6 +78,32 @@ private static String disableRailroads(String text) { return text; } + private static String addLegacyFontTag(String fileName, String text) { + int idx1 = text.indexOf("> 4)); + do { + builder.append(text, idx2, idx1); + boolean compat = text.regionMatches(idx1 + 17, "Compat\">", 0, 8); + boolean h2 = text.regionMatches(idx1 + 17, "H2\">", 0, 4); + if (compat == h2) { + throw new RuntimeException("Unknown BNF rule style in file " + fileName); + } + idx2 = text.indexOf("", idx1 + (compat ? 8 : 4)); + if (idx2 <= 0) { + throw new RuntimeException(" not found in file " + fileName); + } + idx2 += 7; + builder.append("") + .append(text, idx1, idx2).append(""); + idx1 = text.indexOf("= 0); + return builder.append(text, idx2, length).toString(); + } + private static String removeHeaderFooter(String fileName, String text) { // String start = "", + "
     
    "); + r.accept(this); + buff.append(html); + buff.append("
    ").append(column).append("").append(rs2.getString(2)) + .append("
    ") + .append(StringUtils.xmlText(description)).append("
    "); + } + private static String getContent(String fileName) throws Exception { - File file = new File(BASE_DIR, fileName); - int length = (int) file.length(); - char[] data = new char[length]; - FileReader reader = new FileReader(file); - int off = 0; - while (length > 0) { - int len = reader.read(data, off, length); - off += len; - length -= len; - } - reader.close(); - String s = new String(data); - return s; + return new String(Files.readAllBytes(Paths.get(BASE_DIR, fileName)), StandardCharsets.UTF_8); } } diff --git a/h2/src/tools/org/h2/build/doc/RailroadImages.java b/h2/src/tools/org/h2/build/doc/RailroadImages.java index aec94cb846..c27620c303 100644 --- a/h2/src/tools/org/h2/build/doc/RailroadImages.java +++ b/h2/src/tools/org/h2/build/doc/RailroadImages.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; @@ -10,8 +10,12 @@ import java.awt.Graphics2D; import java.awt.RenderingHints; import java.awt.image.BufferedImage; -import java.io.File; import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + import javax.imageio.ImageIO; /** @@ -24,26 +28,28 @@ public class RailroadImages { private static final int DIV = 2; private static final int STROKE = 6; - private String outDir; + private Path outDir; /** * This method is called when executing this application from the command * line. * * @param args the command line parameters + * @throws IOException on I/O exception */ - public static void main(String... args) { - new RailroadImages().run("docs/html/images"); + public static void main(String... args) throws IOException { + new RailroadImages().run(Paths.get("docs/html/images")); } /** * Create the images. * * @param out the target directory + * @throws IOException on I/O exception */ - void run(String out) { + void run(Path out) throws IOException { this.outDir = out; - new File(out).mkdirs(); + Files.createDirectories(outDir); BufferedImage img; Graphics2D g; @@ -111,8 +117,8 @@ private void savePng(BufferedImage img, String fileName) { RenderingHints.VALUE_INTERPOLATION_BILINEAR); g.drawImage(img, 0, 0, w / DIV, h / DIV, 0, 0, w, h, null); g.dispose(); - try { - ImageIO.write(smaller, "png", new File(outDir + "/" + fileName)); + try (OutputStream out = Files.newOutputStream(outDir.resolve(fileName))) { + ImageIO.write(smaller, "png", out); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/h2/src/tools/org/h2/build/doc/SpellChecker.java b/h2/src/tools/org/h2/build/doc/SpellChecker.java index 41a2a37d86..710191f6fa 100644 --- a/h2/src/tools/org/h2/build/doc/SpellChecker.java +++ b/h2/src/tools/org/h2/build/doc/SpellChecker.java @@ -1,16 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; -import java.io.File; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.StringTokenizer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import org.h2.build.BuildBase; import org.h2.util.StringUtils; @@ -25,16 +29,15 @@ public class SpellChecker { private static final String[] SUFFIX = { "html", "java", "sql", "txt", - "xml", "jsp", "css", "bat", "csv", "xml", "js", "Driver", + "xml", "jsp", "css", "bat", "csv", "xml", "js", "Driver", "Processor", "properties", "task", "MF", "mf", "sh", "" }; private static final String[] IGNORE = { "dev", "nsi", "gif", "png", "odg", "ico", "sxd", "zip", "bz2", "rc", "layout", "res", "dll", "jar", - "svg", "prefs", "prop", "iml" }; + "svg", "prefs", "prop", "iml", "class" }; private static final String DELIMITERS = " \n.();-\"=,*/{}_<>+\r:'@[]&\\!#|?$^%~`\t"; private static final String PREFIX_IGNORE = "abc"; - private static final String[] IGNORE_FILES = { "mainWeb.html", - "pg_catalog.sql" }; + private static final String[] IGNORE_FILES = { "mainWeb.html" }; // These are public so we can set them during development testing @@ -49,11 +52,11 @@ public class SpellChecker { public boolean printDictionary; private final HashSet dictionary = - new HashSet(); + new HashSet<>(); private final HashSet used = - new HashSet(); + new HashSet<>(); private final HashMap unknown = - new HashMap(); + new HashMap<>(); private boolean addToDictionary; private int errorCount; private int contextCount; @@ -70,12 +73,16 @@ public static void main(String... args) throws IOException { } private void run(String dictionaryFileName, String dir) throws IOException { - process(new File(dictionaryFileName)); - process(new File(dir)); + process(Paths.get(dictionaryFileName)); + process(Paths.get(dir)); + HashSet unused = new HashSet<>(); + unused.addAll(dictionary); + unused.removeAll(used); + // System.out.println("UNUSED WORDS"); + // System.out.println(unused); if (printDictionary) { System.out.println("USED WORDS"); - String[] list = new String[used.size()]; - used.toArray(list); + String[] list = used.toArray(new String[used.size()]); Arrays.sort(list); StringBuilder buff = new StringBuilder(); for (String s : list) { @@ -107,20 +114,20 @@ private void run(String dictionaryFileName, String dir) throws IOException { } } - private void process(File file) throws IOException { - String name = file.getName(); + private void process(Path file) throws IOException { + String name = file.getFileName().toString(); if (name.endsWith(".svn") || name.endsWith(".DS_Store")) { return; } if (name.startsWith("_") && name.indexOf("_en") < 0) { return; } - if (file.isDirectory()) { - for (File f : file.listFiles()) { + if (Files.isDirectory(file)) { + for (Path f : Files.newDirectoryStream(file)) { process(f); } } else { - String fileName = file.getAbsolutePath(); + String fileName = file.toAbsolutePath().toString(); int idx = fileName.lastIndexOf('.'); String suffix; if (idx < 0) { @@ -160,7 +167,7 @@ private void process(File file) throws IOException { } private void scan(String fileName, String text) { - HashSet notFound = new HashSet(); + HashSet notFound = new HashSet<>(); text = removeLinks(fileName, text); StringTokenizer tokenizer = new StringTokenizer(text, DELIMITERS); while (tokenizer.hasMoreTokens()) { @@ -177,10 +184,7 @@ private void scan(String fileName, String text) { System.out.println(); } } - if (notFound.isEmpty()) { - return; - } - if (notFound.size() > 0) { + if (!notFound.isEmpty()) { System.out.println("file: " + fileName); for (String s : notFound) { System.out.print(s + " "); @@ -190,19 +194,20 @@ private void scan(String fileName, String text) { } private String removeLinks(String fileName, String text) { + Pattern linkPattern = Pattern.compile("http[s]?://"); StringBuilder buff = new StringBuilder(text.length()); int pos = 0, last = 0; if (fileName.endsWith(".properties")) { text = StringUtils.replaceAll(text, "\\:", ":"); } while (true) { - pos = text.indexOf("http://", pos); - if (pos < 0) { + Matcher m = linkPattern.matcher(text.substring(pos)); + if (!m.find()) { break; } - int start = pos; + int start = m.start() + pos; + pos = m.end() + pos; buff.append(text.substring(last, start)); - pos += "http://".length(); while (true) { char c = text.charAt(pos); if (!Character.isJavaIdentifierPart(c) && diff --git a/h2/src/tools/org/h2/build/doc/UploadBuild.java b/h2/src/tools/org/h2/build/doc/UploadBuild.java index 3481b5ca78..dc48eff0e8 100644 --- a/h2/src/tools/org/h2/build/doc/UploadBuild.java +++ b/h2/src/tools/org/h2/build/doc/UploadBuild.java @@ -1,18 +1,20 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileReader; import java.io.IOException; -import java.io.OutputStream; import java.io.StringReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; @@ -21,11 +23,11 @@ import java.util.zip.Deflater; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; + import org.h2.dev.ftp.FtpClient; import org.h2.engine.Constants; import org.h2.store.fs.FileUtils; import org.h2.test.utils.OutputCatcher; -import org.h2.util.IOUtils; import org.h2.util.ScriptReader; import org.h2.util.StringUtils; @@ -41,6 +43,7 @@ public class UploadBuild { * @param args the command line parameters */ public static void main(String... args) throws Exception { + System.exit(0); System.setProperty("h2.socketConnectTimeout", "30000"); String password = System.getProperty("h2.ftpPassword"); if (password == null) { @@ -49,12 +52,11 @@ public static void main(String... args) throws Exception { FtpClient ftp = FtpClient.open("h2database.com"); ftp.login("h2database", password); ftp.changeWorkingDirectory("/httpdocs"); - boolean coverage = new File("coverage/index.html").exists(); + Path coverageFile = Paths.get("coverage/index.html"); + boolean coverage = Files.exists(coverageFile); boolean coverageFailed; if (coverage) { - byte[] data = IOUtils.readBytesAndClose( - new FileInputStream("coverage/index.html"), -1); - String index = new String(data, "ISO-8859-1"); + String index = new String(Files.readAllBytes(coverageFile), StandardCharsets.ISO_8859_1); coverageFailed = index.contains("CLASS=\"h\""); while (true) { int idx = index.indexOf(""); + error = true; + } else { + testOutput = "No log.txt"; + error = true; + } } if (!ftp.exists("/httpdocs", "automated")) { ftp.makeDirectory("/httpdocs/automated"); @@ -123,11 +127,11 @@ public static void main(String... args) throws Exception { (error ? " FAILED" : "") + (coverageFailed ? " COVERAGE" : "") + "', '" + ts + - "', 'Output" + - " - Coverage" + - " - Jar');\n"; buildSql += sql; Connection conn; @@ -139,8 +143,8 @@ public static void main(String... args) throws Exception { conn = DriverManager.getConnection("jdbc:h2v1_1:mem:"); } conn.createStatement().execute(buildSql); - String newsfeed = IOUtils.readStringAndClose( - new FileReader("src/tools/org/h2/build/doc/buildNewsfeed.sql"), -1); + String newsfeed = new String(Files.readAllBytes(Paths.get("src/tools/org/h2/build/doc/buildNewsfeed.sql")), + StandardCharsets.UTF_8); ScriptReader r = new ScriptReader(new StringReader(newsfeed)); Statement stat = conn.createStatement(); ResultSet rs = null; @@ -162,56 +166,87 @@ public static void main(String... args) throws Exception { new ByteArrayInputStream(content.getBytes())); ftp.store("/httpdocs/html/testOutput.html", new ByteArrayInputStream(testOutput.getBytes())); - String jarFileName = "bin/h2-" + Constants.getVersion() + ".jar"; + String jarFileName = "bin/h2-" + Constants.VERSION + ".jar"; if (FileUtils.exists(jarFileName)) { ftp.store("/httpdocs/automated/h2-latest.jar", - new FileInputStream(jarFileName)); + Files.newInputStream(Paths.get(jarFileName))); } if (coverage) { ftp.store("/httpdocs/coverage/overview.html", - new FileInputStream("coverage/overview.html")); + Files.newInputStream(Paths.get("coverage/overview.html"))); ftp.store("/httpdocs/coverage/coverage.zip", - new FileInputStream("coverage.zip")); + Files.newInputStream(Paths.get("coverage.zip"))); FileUtils.delete("coverage.zip"); } + String mavenRepoDir = System.getProperty("user.home") + "/.m2/repository/"; + boolean mavenSnapshot = Files.exists(Paths.get(mavenRepoDir + + "com/h2database/h2/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.jar")); + if (mavenSnapshot) { + if (!ftp.exists("/httpdocs", "m2-repo")) { + ftp.makeDirectory("/httpdocs/m2-repo"); + } + if (!ftp.exists("/httpdocs/m2-repo", "com")) { + ftp.makeDirectory("/httpdocs/m2-repo/com"); + } + if (!ftp.exists("/httpdocs/m2-repo/com", "h2database")) { + ftp.makeDirectory("/httpdocs/m2-repo/com/h2database"); + } + if (!ftp.exists("/httpdocs/m2-repo/com/h2database", "h2")) { + ftp.makeDirectory("/httpdocs/m2-repo/com/h2database/h2"); + } + if (!ftp.exists("/httpdocs/m2-repo/com/h2database/h2", "1.0-SNAPSHOT")) { + ftp.makeDirectory("/httpdocs/m2-repo/com/h2database/h2/1.0-SNAPSHOT"); + } + if (!ftp.exists("/httpdocs/m2-repo/com/h2database", "h2-mvstore")) { + ftp.makeDirectory("/httpdocs/m2-repo/com/h2database/h2-mvstore"); + } + if (!ftp.exists("/httpdocs/m2-repo/com/h2database/h2-mvstore", "1.0-SNAPSHOT")) { + ftp.makeDirectory("/httpdocs/m2-repo/com/h2database/h2-mvstore/1.0-SNAPSHOT"); + } + ftp.store("/httpdocs/m2-repo/com/h2database/h2" + + "/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.pom", + Files.newInputStream(Paths.get(mavenRepoDir + + "com/h2database/h2/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.pom"))); + ftp.store("/httpdocs/m2-repo/com/h2database/h2" + + "/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.jar", + Files.newInputStream(Paths.get(mavenRepoDir + + "com/h2database/h2/1.0-SNAPSHOT/h2-1.0-SNAPSHOT.jar"))); + ftp.store("/httpdocs/m2-repo/com/h2database/h2-mvstore" + + "/1.0-SNAPSHOT/h2-mvstore-1.0-SNAPSHOT.pom", + Files.newInputStream(Paths.get(mavenRepoDir + + "com/h2database/h2-mvstore/1.0-SNAPSHOT/h2-mvstore-1.0-SNAPSHOT.pom"))); + ftp.store("/httpdocs/m2-repo/com/h2database/h2-mvstore" + + "/1.0-SNAPSHOT/h2-mvstore-1.0-SNAPSHOT.jar", + Files.newInputStream(Paths.get(mavenRepoDir + + "com/h2database/h2-mvstore/1.0-SNAPSHOT/h2-mvstore-1.0-SNAPSHOT.jar"))); + } ftp.close(); } - private static void zip(String destFile, String directory, boolean storeOnly) - throws IOException { - OutputStream out = new FileOutputStream(destFile); - ZipOutputStream zipOut = new ZipOutputStream(out); + private static void zip(String destFile, String directory, boolean storeOnly) throws IOException { + ZipOutputStream zipOut = new ZipOutputStream(Files.newOutputStream(Paths.get(destFile))); if (storeOnly) { zipOut.setMethod(ZipOutputStream.STORED); } zipOut.setLevel(Deflater.BEST_COMPRESSION); - addFiles(new File(directory), new File(directory), zipOut); + Path base = Paths.get(directory); + Files.walkFileTree(base, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + byte[] data = Files.readAllBytes(file); + ZipEntry entry = new ZipEntry(base.relativize(file).toString().replace('\\', '/')); + CRC32 crc = new CRC32(); + crc.update(data); + entry.setSize(data.length); + entry.setCrc(crc.getValue()); + zipOut.putNextEntry(entry); + zipOut.write(data); + zipOut.closeEntry(); + return FileVisitResult.CONTINUE; + } + }); zipOut.finish(); zipOut.close(); } - private static void addFiles(File base, File file, ZipOutputStream out) - throws IOException { - if (file.isDirectory()) { - for (File f : file.listFiles()) { - addFiles(base, f, out); - } - } else { - String path = file.getAbsolutePath().substring(base.getAbsolutePath().length()); - path = path.replace('\\', '/'); - if (path.startsWith("/")) { - path = path.substring(1); - } - byte[] data = IOUtils.readBytesAndClose(new FileInputStream(file), -1); - ZipEntry entry = new ZipEntry(path); - CRC32 crc = new CRC32(); - crc.update(data); - entry.setSize(file.length()); - entry.setCrc(crc.getValue()); - out.putNextEntry(entry); - out.write(data); - out.closeEntry(); - } - } - } diff --git a/h2/src/tools/org/h2/build/doc/WebSite.java b/h2/src/tools/org/h2/build/doc/WebSite.java index dedac32964..97bff93d03 100644 --- a/h2/src/tools/org/h2/build/doc/WebSite.java +++ b/h2/src/tools/org/h2/build/doc/WebSite.java @@ -1,18 +1,23 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.DirectoryStream; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.HashMap; +import org.h2.build.BuildBase; import org.h2.samples.Newsfeed; -import org.h2.util.IOUtils; import org.h2.util.StringUtils; /** @@ -24,19 +29,13 @@ public class WebSite { private static final String ANALYTICS_TAG = ""; - private static final String ANALYTICS_SCRIPT = - "\n" + - ""; + private static final String ANALYTICS_SCRIPT = ""; private static final String TRANSLATE_START = ""; - private static final String SOURCE_DIR = "docs"; - private static final String WEB_DIR = "../h2web"; - private final HashMap fragments = new HashMap(); + private static final Path SOURCE_DIR = Paths.get("docs"); + private static final Path WEB_DIR = Paths.get("../h2web"); + private final HashMap fragments = new HashMap<>(); /** * This method is called when executing this application from the command @@ -50,23 +49,19 @@ public static void main(String... args) throws Exception { private void run() throws Exception { // create the web site - deleteRecursive(new File(WEB_DIR)); + BuildBase.deleteRecursive(WEB_DIR); loadFragments(); - copy(new File(SOURCE_DIR), new File(WEB_DIR), true, true); + copy(SOURCE_DIR, WEB_DIR, true, true); Newsfeed.main(WEB_DIR + "/html"); // create the internal documentation - copy(new File(SOURCE_DIR), new File(SOURCE_DIR), true, false); + copy(SOURCE_DIR, SOURCE_DIR, true, false); } private void loadFragments() throws IOException { - File dir = new File(SOURCE_DIR, "html"); - for (File f : dir.listFiles()) { - if (f.getName().startsWith("fragments")) { - FileInputStream in = new FileInputStream(f); - byte[] bytes = IOUtils.readBytesAndClose(in, 0); - String page = new String(bytes, "UTF-8"); - fragments.put(f.getName(), page); + try (DirectoryStream stream = Files.newDirectoryStream(SOURCE_DIR.resolve("html"), "fragments*")) { + for (Path f : stream) { + fragments.put(f.getFileName().toString(), new String(Files.readAllBytes(f), StandardCharsets.UTF_8)); } } } @@ -76,7 +71,7 @@ private String replaceFragments(String fileName, String page) { return page; } String language = ""; - int index = fileName.indexOf("_"); + int index = fileName.indexOf('_'); if (index >= 0) { int end = fileName.indexOf('.'); language = fileName.substring(index, end); @@ -107,65 +102,72 @@ private String replaceFragments(String fileName, String page) { return page; } - private void deleteRecursive(File dir) { - if (dir.isDirectory()) { - for (File f : dir.listFiles()) { - deleteRecursive(f); + private void copy(Path source, Path target, boolean replaceFragments, boolean web) throws IOException { + Files.walkFileTree(source, new SimpleFileVisitor() { + @Override + public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { + Files.createDirectories(target.resolve(source.relativize(dir))); + return FileVisitResult.CONTINUE; } - } - dir.delete(); + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + copyFile(file, target.resolve(source.relativize(file)), replaceFragments, web); + return super.visitFile(file, attrs); + } + }); } - private void copy(File source, File target, boolean replaceFragments, - boolean web) throws IOException { - if (source.isDirectory()) { - target.mkdirs(); - for (File f : source.listFiles()) { - copy(f, new File(target, f.getName()), replaceFragments, web); + /** + * Copy a file. + * + * @param source the source file + * @param target the target file + * @param replaceFragments whether to replace fragments + * @param web whether the target is a public web site (false for local documentation) + */ + void copyFile(Path source, Path target, boolean replaceFragments, boolean web) throws IOException { + String name = source.getFileName().toString(); + if (name.endsWith("onePage.html") || name.startsWith("fragments")) { + return; + } + if (web) { + if (name.endsWith("main.html")) { + return; } } else { - String name = source.getName(); - if (name.endsWith("onePage.html") || name.startsWith("fragments")) { + if (name.endsWith("mainWeb.html")) { return; } + } + byte[] bytes = Files.readAllBytes(source); + if (name.endsWith(".html")) { + String page = new String(bytes, StandardCharsets.UTF_8); if (web) { - if (name.endsWith("main.html") || name.endsWith("main_ja.html")) { - return; - } - } else { - if (name.endsWith("mainWeb.html") || name.endsWith("mainWeb_ja.html")) { - return; - } + page = StringUtils.replaceAll(page, ANALYTICS_TAG, ANALYTICS_SCRIPT); } - FileInputStream in = new FileInputStream(source); - byte[] bytes = IOUtils.readBytesAndClose(in, 0); - if (name.endsWith(".html")) { - String page = new String(bytes, "UTF-8"); - if (web) { - page = StringUtils.replaceAll(page, ANALYTICS_TAG, ANALYTICS_SCRIPT); - } - if (replaceFragments) { - page = replaceFragments(name, page); - page = StringUtils.replaceAll(page, "", "
    ");
    -                    page = StringUtils.replaceAll(page, "", "");
    -                }
    -                bytes = page.getBytes("UTF-8");
    +            if (replaceFragments) {
    +                page = replaceFragments(name, page);
    +                page = StringUtils.replaceAll(page, "", "
    ");
    +                page = StringUtils.replaceAll(page, "", "");
    +            }
    +            if (name.endsWith("changelog.html")) {
    +                page = page.replaceAll("Issue\\s+#?(\\d+)",
    +                        "Issue #$1");
    +                page = page.replaceAll("PR\\s+#?(\\d+)",
    +                        "PR #$1");
    +            }
    +            bytes = page.getBytes(StandardCharsets.UTF_8);
    +        }
    +        Files.write(target, bytes);
    +        if (web) {
    +            if (name.endsWith("mainWeb.html")) {
    +                Files.move(target, target.getParent().resolve("main.html"));
                 }
             }
         }
    diff --git a/h2/src/tools/org/h2/build/doc/XMLChecker.java b/h2/src/tools/org/h2/build/doc/XMLChecker.java
    index ac282a1071..3bb0d65b91 100644
    --- a/h2/src/tools/org/h2/build/doc/XMLChecker.java
    +++ b/h2/src/tools/org/h2/build/doc/XMLChecker.java
    @@ -1,16 +1,20 @@
     /*
    - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
    - * and the EPL 1.0 (http://h2database.com/html/license.html).
    + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
    + * and the EPL 1.0 (https://h2database.com/html/license.html).
      * Initial Developer: H2 Group
      */
     package org.h2.build.doc;
     
    -import java.io.File;
    -import java.io.FileReader;
    +import java.io.IOException;
    +import java.nio.charset.StandardCharsets;
    +import java.nio.file.FileVisitResult;
    +import java.nio.file.Files;
    +import java.nio.file.Path;
    +import java.nio.file.Paths;
    +import java.nio.file.SimpleFileVisitor;
    +import java.nio.file.attribute.BasicFileAttributes;
     import java.util.Stack;
     
    -import org.h2.util.IOUtils;
    -
     /**
      * This class checks that the HTML and XML part of the source code
      * is well-formed XML.
    @@ -24,35 +28,46 @@ public class XMLChecker {
          * @param args the command line parameters
          */
         public static void main(String... args) throws Exception {
    -        new XMLChecker().run(args);
    +        XMLChecker.run(args);
         }
     
    -    private void run(String... args) throws Exception {
    -        String dir = ".";
    +    private static void run(String... args) throws Exception {
    +        Path dir = Paths.get(".");
             for (int i = 0; i < args.length; i++) {
                 if ("-dir".equals(args[i])) {
    -                dir = args[++i];
    +                dir = Paths.get(args[++i]);
                 }
             }
    -        process(dir + "/src");
    -        process(dir + "/docs");
    +        process(dir.resolve("src"));
    +        process(dir.resolve("docs"));
         }
     
    -    private void process(String path) throws Exception {
    -        if (path.endsWith("/CVS") || path.endsWith("/.svn")) {
    -            return;
    -        }
    -        File file = new File(path);
    -        if (file.isDirectory()) {
    -            for (String name : file.list()) {
    -                process(path + "/" + name);
    +    private static void process(Path path) throws Exception {
    +        Files.walkFileTree(path, new SimpleFileVisitor() {
    +            @Override
    +            public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
    +                // For Javadoc 8
    +                if (dir.getFileName().toString().equals("javadoc")) {
    +                    return FileVisitResult.SKIP_SUBTREE;
    +                }
    +                return FileVisitResult.CONTINUE;
                 }
    -        } else {
    -            processFile(path);
    -        }
    +
    +            @Override
    +            public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
    +                processFile(file);
    +                return FileVisitResult.CONTINUE;
    +            }
    +        });
         }
     
    -    private static void processFile(String fileName) throws Exception {
    +    /**
    +     * Process a file.
    +     *
    +     * @param file the file
    +     */
    +    static void processFile(Path file) throws IOException {
    +        String fileName = file.getFileName().toString();
             int idx = fileName.lastIndexOf('.');
             if (idx < 0) {
                 return;
    @@ -62,8 +77,7 @@ private static void processFile(String fileName) throws Exception {
                 return;
             }
             // System.out.println("Checking file:" + fileName);
    -        FileReader reader = new FileReader(fileName);
    -        String s = IOUtils.readStringAndClose(reader, -1);
    +        String s = new String(Files.readAllBytes(file), StandardCharsets.UTF_8);
             Exception last = null;
             try {
                 checkXML(s, !suffix.equals("xml"));
    @@ -80,16 +94,16 @@ private static void checkXML(String xml, boolean html) throws Exception {
             // String lastElement = null;
             // 
  • : replace
  • ([^\r]*[^<]*) with
  • $1
  • // use this for html file, for example if
  • is not closed - String[] noClose = {}; + String[] noClose = {"br", "hr", "input", "link", "meta", "wbr"}; XMLParser parser = new XMLParser(xml); - Stack stack = new Stack(); + Stack stack = new Stack<>(); boolean rootElement = false; - while (true) { + loop: for (;;) { int event = parser.next(); if (event == XMLParser.END_DOCUMENT) { break; } else if (event == XMLParser.START_ELEMENT) { - if (stack.size() == 0) { + if (stack.isEmpty()) { if (rootElement) { throw new Exception("Second root element at " + parser.getRemaining()); } @@ -112,8 +126,7 @@ private static void checkXML(String xml, boolean html) throws Exception { if (html) { for (String n : noClose) { if (name.equals(n)) { - throw new Exception("Unnecessary closing element " - + name + " at " + parser.getRemaining()); + continue loop; } } } @@ -141,7 +154,7 @@ private static void checkXML(String xml, boolean html) throws Exception { + parser.getRemaining()); } } - if (stack.size() != 0) { + if (!stack.isEmpty()) { throw new Exception("Unclosed root element"); } } diff --git a/h2/src/tools/org/h2/build/doc/XMLParser.java b/h2/src/tools/org/h2/build/doc/XMLParser.java index 32d4d57296..bf9cdaad85 100644 --- a/h2/src/tools/org/h2/build/doc/XMLParser.java +++ b/h2/src/tools/org/h2/build/doc/XMLParser.java @@ -1,10 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.doc; +import java.util.Arrays; + /** * This class implements a simple XML pull parser. * Only a subset of the XML pull parser API is implemented. @@ -94,9 +96,7 @@ public void setHTML(boolean html) { private void addAttributeName(String pre, String name) { if (attributeValues.length <= currentAttribute) { - String[] temp = new String[attributeValues.length * 2]; - System.arraycopy(attributeValues, 0, temp, 0, attributeValues.length); - attributeValues = temp; + attributeValues = Arrays.copyOf(attributeValues, attributeValues.length * 2); } attributeValues[currentAttribute++] = pre; attributeValues[currentAttribute++] = name; @@ -394,21 +394,6 @@ public int next() { return eventType; } - /** - * Read the next start, end, or character tag. This method skips comments, - * DTDs, and processing instructions. - * - * @return the event type of the next tag - */ - public int nextTag() { - while (true) { - int type = next(); - if (type != COMMENT && type != DTD && type != PROCESSING_INSTRUCTION) { - return type; - } - } - } - /** * Get the event type of the current token. * @@ -465,46 +450,6 @@ public String getAttributeLocalName(int index) { return attributeValues[index * 3 + 1]; } - /** - * Get the full name of the attribute. If there is no prefix, only the local - * name is returned, otherwise the prefix, ':', and the local name. - * - * @param index the index of the attribute (starting with 0) - * @return the full name - */ - public String getAttributeName(int index) { - String pre = getAttributePrefix(index); - String name = getAttributeLocalName(index); - return pre == null || pre.length() == 0 ? name : pre + ":" + name; - } - - /** - * Get the value of this attribute. - * - * @param index the index of the attribute (starting with 0) - * @return the value - */ - public String getAttributeValue(int index) { - return attributeValues[index * 3 + 2]; - } - - /** - * Get the value of this attribute. - * - * @param namespaceURI the namespace URI (currently ignored) - * @param name the local name of the attribute - * @return the value or null - */ - public String getAttributeValue(String namespaceURI, String name) { - int len = getAttributeCount(); - for (int i = 0; i < len; i++) { - if (getAttributeLocalName(i).equals(name)) { - return getAttributeValue(i); - } - } - return null; - } - /** * Get the full name of the current start or end element. If there is no * prefix, only the local name is returned, otherwise the prefix, ':', and diff --git a/h2/src/tools/org/h2/build/doc/buildNewsfeed.sql b/h2/src/tools/org/h2/build/doc/buildNewsfeed.sql index aa2e3f1078..bd04acf688 100644 --- a/h2/src/tools/org/h2/build/doc/buildNewsfeed.sql +++ b/h2/src/tools/org/h2/build/doc/buildNewsfeed.sql @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ @@ -8,14 +8,14 @@ CREATE TABLE CHANNEL(TITLE VARCHAR, LINK VARCHAR, DESC VARCHAR, LANGUAGE VARCHAR, PUB TIMESTAMP, LAST TIMESTAMP, AUTHOR VARCHAR); INSERT INTO CHANNEL VALUES('H2 Database Automated Build' , - 'http://www.h2database.com/html/build.html#automated', 'H2 Database Automated Build', 'en-us', NOW(), NOW(), 'Thomas Mueller'); + 'https://h2database.com/html/build.html#automated', 'H2 Database Automated Build', 'en-us', LOCALTIMESTAMP, LOCALTIMESTAMP, 'Thomas Mueller'); SELECT XMLSTARTDOC() || XMLNODE('feed', XMLATTR('xmlns', 'http://www.w3.org/2005/Atom') || XMLATTR('xml:lang', C.LANGUAGE), XMLNODE('title', XMLATTR('type', 'text'), C.TITLE) || XMLNODE('id', NULL, XMLTEXT(C.LINK)) || XMLNODE('author', NULL, XMLNODE('name', NULL, C.AUTHOR)) || - XMLNODE('link', XMLATTR('rel', 'self') || XMLATTR('href', 'http://www.h2database.com/automated/news.xml'), NULL) || + XMLNODE('link', XMLATTR('rel', 'self') || XMLATTR('href', 'https://h2database.com/automated/news.xml'), NULL) || XMLNODE('updated', NULL, FORMATDATETIME(C.LAST, 'yyyy-MM-dd''T''HH:mm:ss''Z''', 'en', 'GMT')) || GROUP_CONCAT( XMLNODE('entry', NULL, diff --git a/h2/src/tools/org/h2/build/doc/dictionary.txt b/h2/src/tools/org/h2/build/doc/dictionary.txt index 744c792284..9e48dba39b 100644 --- a/h2/src/tools/org/h2/build/doc/dictionary.txt +++ b/h2/src/tools/org/h2/build/doc/dictionary.txt @@ -1,773 +1,850 @@ -aaaaaa aacute abalance ability able abnormal abnormally aborted about above abs -absence absolute absolutely abstract abstraction abstractions aca accept -acceptable accepted accepts access accessed accesses accessible accessing -accesskey accessor according account accountid accounts achieve achieved achieves -acid acirc acme acos acquire acquired across acting action actions active -actively activity acts actual actually acute adapter adapters adapting adaptive -add added addiction adding addition additional additionally addr address -addresses adds admin administration administrator admins admission adp advanced -advantage advised aeiou aelig aes affect affected affects after afterwards again -against agar age agent agg aggregate aggregated aggregates agrave agree agreeable -agreed agreement agreements ahead ahilmnqbjkcdeopfrsg aid ajax alan alefsym alert -alexahin alexander algo algorithm algorithms alias aliased aliases aliasing align -aligned alive all allclasses alleging allocate allocated allocation allow allowed -allowing allows almost alone along alpha already also alt alter altering -alternate alternative alternatives alters altersequence always ambiguous amount -amp amt analysis analyze analyzer analyzing anchor and andrew android -andy ang angle animal anne annual anon anonymous another ansi ant anti antonio -any anyone anything anyway anywhere apache api apos app apparatus appear appears -append appended appending appendix appends apple apples applets applicable -application applications applied applies apply applying appropriate approx -approximate apps arbitrary arch architectural architecture archival archive -archives archiving are area areas arg args argument arguments argv arial aring -arising around arr arrangement array arraycopy arrays arrow arrows article -artifact asc ascending ascii asf ashcraft asin asked asp aspe aspect assert -asserting assertions assign assignable assigned assignment associated assume -assumed asterisk asymp async asynchronous atan atilde ation atom atomic -atomically atomicity attach attached attack attacker attacks attempt attempted -attempts attnum attorneys attr attrib attribute attributes atttypmod august auml -australia auth authenticated authentication author authorization authorized auto -autocommit autocomplete autoincrement automated automatic automatically -automation autostart availability available avalon average avg avl avoid avoided -avoiding avoids aware awt bach back backed backend background backing backs -backslash backspace backup backups backus backward backwards bad bag bahrain bak -balance balancing bananas band banking bar barcelona base based baseline basic -basically basis bat batch bbalance bcc bcdfghklmnprstwz bdata bdfghklmnpqrst -bdquo beans became because become becomes becoming been before begin beginning -behalf behave behavior behaviour behind being bel believes belong belongs below -bench benchmark benchmarks beneficial bennet berlini best beta better between beware -beyond bfff bgcolor bid big bigger biggest bigint biginteger billion bin binary -bind biological biondi birth birthday bit bitand bitmap bitor bits bitxor -blank blind blob blobs block blocked blockquote blocks blocksize blog -blogs blojsom blowfish blue blur bnf bnot boat bob bodies body bogus bohlen bold -boo book bookkeeping bookmarks books bool boolean boot booted bootstrap bor -border boss boston both bottom bound bounds bout box braces brack bracket -brackets branch branches brasil brazilian breach break breaks broken browse -browser browsers brute brvbar bsr btree btrees bucher bucket buckets buf buff -buffer buffered buffering buffers bug bugdatabase bugfixes bugs build builder -building builds built bulk bull bundle bungisoft business busy but button bxor -bye byte bytea bytes bzip cache cached caching cafe cal calculate calculated -calculates calculating calculation calendar call callable callback called caller -calling calls cally camel can cancel canceled cancels cannot canonical cap -capabilities capability capacity capone caps capture car cardinality care careful -carriage carrier cars cartesian cascade cascading case cases casesensitive -casewhen casqueiro cast cat catalog catalogs catch catcher catches caucho cause -caused causes cavestro cayenne cbc ccc ccedil cdata cdd cddl cdup cedil ceil ceiling cell -cellpadding cells cellspacing cent center central cert certain certificate -certificates certified certs cet cfg cgi chain chained chaining chair challenge -challenger chance change changed changelog changes changing channel channels char -character characters charge chars charset chartered cheating check checkbox -checked checker checking checklist checkpoint checks checkstyle checksum chemical -cherries chf chi child children chile chinese choice choices choose chr chunk cid -cipher circle circumstances citizen city claim claims clancy claros clashes class -classes classification classloader classloaders classpath clause clazz clean -cleaned cleaner clear cleared clearing clearly clears cleartext click clicked -clicking client clients clientside clob clock clone close closed closely closer -closes closing clubs cluster clustered clustering cmd cmu cnt coalesce code -codebase codebook coded codehaus codes codeswitch codist coffee col coldrick coll -collaborative collapse collateral collation collations collator collators collect -collected collecting collection collections collector colon color cols colspan -column columnlist columns com combination combinations combinatorics combine -combined combines combining combo combobox come comes comma command commands -commas comment comments commercial commit commits committed committing common -commonly commons communicates communication community comp compact compacting -companies company comparable comparative comparator compare compared compares -comparing comparison comparisons compatibility compatible compensation -compilation compile compiled compiler compiling complete completed completely -complex complexity compliance compliant complicated comply complying component -compounds compress compressed compresses compression compressor compsci compute -computed computer computers computing con concat concatenate concatenated -concatenates concatenation concentrate concept concerning concur concurrency -concurrent concurrently cond condition conditions conf config configuration -configure configured confirm confirmed conflict conforming confusing confusingly cong -conn connect connected connecting connection connections connects cons -consecutive consequential considerations considered consistency consistent -consisting console conspicuously const constant constants constitute constitutes -constraint constraints construct constructed constructing constructor constructs -construed consult consulting consumes consumption contact contain contained -container containers containing contains content contents context contiguous -continue continued continuent continues contract contracts contrib contribute -contributed contributes contributor contributors control controlled controller -controls convenience convention conversion convert converted converter converting -converts conveyed coordinates copied copies copy copying copyright core correct -corrected correction correctly correctness correlated correlation corresponding -corrupt corrupted corruption cos cosh cost costs cot could council count -countdown counter countries country counts couple course court coverage covered -cpu crarr crash crashed crashes crawl create created createdate creates -createtable creating creation creatively credential credit criteria critical crlf -cross cruncher crypt cryptographic cryptographically cryptoloop css csv csvread -csvwrite cte ctid ctor ctrl ctx cuaz cube cup curation curdate cure curly curren -currency current currently currval cursor cursors curtime curtimestamp curve -curves custom customizable customer customerid customers cut cvs cwd cycle cyclic daemon -daffodil dagger damage damages dangerous darr darwin dash dashes dat data -database databaseaccess databases datalink datanamic datapage datasource -datastore datatype datatypes date dateadd datediff dates datestyle datetime -datetimes datum david davide day dayname dayofmonth dayofweek dayofyear days dba dbcopy -dbcopyplugin dbcp dbev dbid dbmonster dbms dbname dbo dbs dbserv ddl ddlutils -deadlock deadlocks deal dealing deallocate death debug debugging dec december -decimal deck declaration declaratory declare declared decode decoder decodes -decoding decompress decompresser decompression decoration decrement decrypt -decrypted decrypting decryption decrypts deebee deemed deep def default defect -defective defects deferrability deferrable deferred define defined defines -defining definitely definition definitions deflate deflater deg degrees -deinterleave del delay delayed dele delegated delete deleted deleter deletes -deleting deletion delimited delimiter delivered delivery delta demand denial -denied dense department depend dependencies dependency dependent depending -depends deploy deprecated dept depth derby derivation derivative derived des desc -descending descr describe described describes describing description descriptions -descriptor deserialization deserialize design designate designated desired -desktop dest destdir destroy destroyed destroyer destroying destruct destruction -destructor detail detailed details detect detected detection detects determ -determining deterministic deusen deutsch dev developed developer developers -developing development devenish deviation device devices dezign diagram dialect -dialog diams dictionary did didn died diff differ difference differences -different differential differently differs digest digit digital digits dim -dimension dimensional dimensions dip dips dir direct direction directly -directories directory dirs dirty disable disabled disablelastaccess disables -disabling disadvantage disappear disc disclaimed disclaimer disclosed disconnect -disconnected disconnecting disconnections disconnects discontinue discount disk -dispatcher display displayed displays dispose disputes dist distance distinct +aacute aaload aastore abalance abba abbreviate abbreviated abbreviates +abbreviation abi ability able abnormal abnormally abort aborted about above abs +absence absent absolute absolutely abstract abstraction abstractions abstracts +aca accept acceptable acceptance accepted accepting accepts access accessadmin +accessed accesses accessible accessing accesskey accessor accidentally +accompanying accordance according accordingly account accountid accounting +accounts accu accum accumulate accumulated accurate accurately achieve achieved +achieves acid acirc acme acmpeq acmpne aconst acos acquire acquired across acting +action actions activated activating activation activator active actively activemq +activities activity acts actual actually acute adam adamo adams adapter adapters +adapting adaptive add added addiction adding addition additional additionally +additions addon addr address addressed addresses adds adeptia adjacent adjust +adjusted adjusts admin administration administrator admins admission ado adopt +advance advanced advances advantage advantages advised aeiou aejaks aelig aes afaik +affect affected affects affero affine affinity after afterwards again against agar age +agent agentlib agg aggregate aggregated aggregates aggregating aggressive agile +agrave agree agreeable agreed agreement agreements agrees ahead +ahilmnqbjkcdeopfrsg aid air ajax alan alarm ale alefsym alert alessio alexander alfki +algo algorithm algorithms alias aliased aliases aliasing align aligned alignment +alive all allclasses alleged alleging alloc allocate allocated allocates allocating +allocation allow allowed allowing allows almost aload alone along alpha +alphabetical alphabetically already also alt alter altering alternate alternative +alternatives alters although always ambiguity ambiguous america among amount amp +amt analysis analytics analyze analyzed analyzer analyzers analyzing anatr +ancestor anchor and andrew android andy anewarray ang angel angle angus animal +animate aniseed anne annotate annotated annotation annotations annual ano anon +anonymous another ans ansi ansorg answers ant anthony anti antialias antialiasing +anton antonio any anybody anyhow anyone anything anyway anywhere anzo apache apart +api apiguardian apos app apparatus appear appears append appended appender appending +appendix appends apple apples applet applets applicable application applications +applied applies apply applying appreciate approach appropriate appropriateness +approx approximate approximated approximation apps april aquiles arabic arbitrary +arc arch architectural architecture archival archive archives archiving are area +areas areturn arg argb argc args argument arguments argv arial aring arising +arithmetic army arnaud arose around arr arrangement arrangements array arraycopy +arraylength arrays arriving arrow arrows art article articles artifact artifacts +artificially asc ascending ascii asf ashcraft ashwin asin asked aspe aspect +assert asserting assertion assertions asset assign assignable assigned assignment +associate associated assume assumed assumes assuming assumption assurances +asterisk astore asymmetric asymp async asynchronous atan atelier athrow atilde +ation atom atomic atomically atomicity attach attached attachment attachments +attack attacker attacks attempt attempted attempting attempts attnum attorneys +attr attrib attribute attributes atttypmod auckland aug augments august auml +australia austria auth authenticate authenticated authenticating authentication +author authorization authorized auto autocommit autocomplete autoincrement +automate automated automatic automatically automation availability available +avalanche average avg avl avoid avoided avoiding avoids await aware away awt +axiom bach back backed backend backgammon background backing backs backside +backslash backslashes backspace backup backupoperator backups backus backward +backwards bad badly bag bahrain bak balance balanced balancing baload banana +bananas band banking bar barcelona base based basel baseline basic basically +basis bastore bat batch batched batches batching batis bbalance bcc +bcdfghklmnprstwz bdata bdfghklmnpqrst bdquo bean beans beat became because become +becomes becoming been beep before begin beginning behalf behave behaving behavior +behaviour behind being bel belgium believes bellinzona belong belonging belongs +below bench benchmark benchmarks beneficial benefit bennet berger berkeley +berlini bern bernd berne best beta better between beverages beware beyond bfff +bgcolor biased bid biel bienne big bigger biggest bigint biginteger bigserial binlog +bilinear bilingual billion bin binaries binary bind bindings bio biodiversity +biological bipush birth birthday biscuits bit bitand bitmap bitnot bitor bits bitwise +bitxor biz bjorn black blank blanked blanks bleyl blind blitz blob blobs block +blocked blocking blockquote blocks blocksize blog blogs bloom blue blume blur bnf +bnot boat bob bocher bodies body bogus bohlen bold bom bonita boo book +bookkeeping bookmarks books bool boolean boost boosting boot booted bootstrap bor +border bordercolor borg borges boss bot both bottlenecks bottom bound boundaries +boundary bounding bounds bout box boysenberry bpchar bpm brace braces brack +bracket brackets bradmesserle branch branches branda brasil brasilia breach +breadth break breaking breaks bridge bring brings brittain broke broken broker +bronze brought brown browse browser browsers brute brvbar bsdiff bson bsr btc btree +btrfs bucher bucket buckets buddha buf buff buffer buffered buffering buffers bug +bugfix bugfixes buggy bugs build builder building builds built bukkit bulk bull +builtin bundle bundled bundles bungisoft burden business busy but button bxor bye +bypassing byte bytea bytecode bytes bzip cabinet cacao cachable cache cacheable +cached caches caching cafe cajun cal calculate calculated calculates calculating +calculation calculations calendar calendars call callable callback callbacks +called caller calling calls cally caload came camel can cancel canceled canceling +cancellation cancelled cancels candidates cannot canonical cap capabilities +capability capacity capitalization capitalize capitalized capone caps caption capture +captured car card cardinal cardinality care careful carriage carrier cars cartesian +cas cascade cascading case cases casesensitive casewhen cash casing casqueiro cast +casting castore cat catalina catalog catalogs cataloguing catch catcher catches +catching category catlog caucho caught cause caused causes causing cavestro +cayenne cbc cbtree ccedil cdata cdd cddl cdo cdup cease cedil ceil ceiling cell +cellpadding cells cellspacing cement cemo census cent center central centrale +centrally centric century cereals cert certain certificate certificates certified +certs cet cfg cfml cha chafik chai chain chained chaining chair challenge +challenger challenging chamber chance chang change changed changelog changes +changing channel channels char character characteristics characters charge +charindex chars charset chartered chartrand chartreuse chatellier cheaper cheat +cheating check checkbox checkcast checked checker checking checklist checklists +checkout checkpoint checks checkstyle checksum checksums chef chemical chen +cherries chf chi child children chile china chinese chmod chocolate choice +choices choose chowder chr chris christian christos chrome chromium chunk chunked +chunks chur cid cipher ciphers circle circles circuit circumstances cite citizen +city claim claims clam claros clash clashes class classes classification +classifications classloader classloaders classname classpath clause clauses clazz +clean cleaned cleaner cleaning cleanup clear cleared clearer clearing clearly +clears cleartext click clicked clicking client clients clinton clip clipboard +clob clobs clock clocks clone cloneable cloned closable close closeable closed +closely closer closes closest closing cloud cls clubs clue clunky cluster clustered +clustering cmd cms cnf cnrs cnt coalesce code codebase codebook coded codegen +codehaus codes coding codist coffee cognitect col cold coldrick coll +collaborative collapse collate collateral collation collations collator collators +collect collected collecting collection collections collectively collector +collide collision collisions colon color cols colspan column columnlist columns +com combination combinations combinatorics combine combined combines combining +combo combobox come comes coming comma command commands commas comment commented +comments commercial commit commits committed committing common commonly commons +communicates communication community comp compact compacted compacting compaction +compacts companies company comparable comparative comparator compare compared +compares comparing comparison comparisons compatibility compatible +compensation compensating compilable compilation compile compiled +compiler compiles compiling complete completed completely +completion complex complexity compliance compliant +complicate complicated complies comply complying component components composed +compose composite compound compounds compress compressed compresses compressibility +compressible compressing compression compressor compromise compsci computation +compute computed computer computers computing con concat concatenate concatenated +concatenates concatenating concatenation concentrate concept concerning concrete +concur concurrency concurrent concurrently cond condiments condition conditional +conditionally conditions conf confections config configurable configuration +configurations configure configured confirm confirmed conflict conflicting +conflicts conforming conforms confusing confusingly confusion cong conjunction +conjunctive conn connect connected connecting connection connections connector +connects connecturl cons consecutive consequences consequential conservative +conserve consider considerations considered consistency consistent consistently +consisting consists console consortium conspicuously const constant constants +constitute constitutes constraint constraints construct constructed constructing +construction constructor constructors constructs construe construed consult +consulting consumes consumption contact contacts contain contained container +containers containing contains contended contends content contention contents context +contiguous contingent continuation continue continued continues continuous +contract contracts contribute contributed contributes contributing contribution +contributions contributor contributors control controlled controller controls +convenience convenient convention conventions conversion conversions convert +converted converter converting converts conveyed cookie cookies cooperate +cooperative coordinate coordinates cope copied copies copilot copy copying +copyright copyrighted core cores corporate correct corrected correction correctly +correctness correlated correlation correspond corresponding corresponds corrupt +corrupted corruption cos cosh cosine cost costly costs cot cotangent cote could +couldn council count countdown counted counter counterclaim counters counting +countries country counts county couple coupled course court courts covariant +cover coverage covered covering covers cow cpp cpu crab cracking cranberry crarr +crash crashed crashes crashing crawl crc create created createdate creates +creating creation creatively creator credential credit creme cristan criteria +critical crlf cross crossed cruncher crypt cryptographic cryptographically +cryptoloop css csv csvread csvwrite cte ctid ctor ctrl ctx ctxsys cuaz cube +cumulative cup curation curdate cure curious curly curr curren currency current +currently currval cursor cursors curtime curtimestamp curve curves cust custom +customarily customer customerid customers customizable customized customizer +customizers customizing cut cutover cve cvf cvs cwd cycle cycles cyclic cycling cyr +czech dadd daemon daffodil dagger dairy daload dalvik damage damages dan dance +dangerous daniel dark darr darri dartifact darwin dash dashes dastore dat data +database databaseaccess databases dataflyer datagram datalink datareader +datasource datasources datastore datatext datatype datatypes datawriter date +dateadd datediff datepart dates datestyle datetime datetimes datum david davide +day daylight dayname dayofmonth dayofweek dayofyear days dba dbbench dbcp dbid +dbms dbname dbo dbs dbserv dbsnmp dclassifier dcmpg dcmpl dconst dderby ddiv ddl +ddladmin deactivate deactivated deactivation dead deadlock deadlocked deadlocks +deal dealing deallocate death debug debugging dec decade december decide decided decimal +decision deck declaration declarations declarative declaratory declare declared +declaring decode decoded decoder decodes decoding decompile decompiler decompiles +decompiling decompress decompressed decompresser decompression decoration +decouple decreases decrement decremented decrypt decrypted decrypting decryption +decrypts dedicated deductive deemed deep def default defaults defect defective +defects defend defendant defense defensive deferrability deferrable deferred +define defined definer defines defining definitely definition definitions deflate +deflater defrag defragment defragmented deg degenerates degradation degrees +deiconified deinterleave del delay delayed delays dele delegate delegated +delegates delegating delegation delete deleted deleter deletes deleting deletion +deliberate delicious delim delimited delimiter delimiters delivered delivery +delta deltas demand demo demonstrate demonstrates denial denied denmark dense +denydatareader denydatawriter department departments depend dependencies +dependency dependent depending depends deploy deployed deploying deployment +deprecate deprecated deprecation dept depth deque derby derbyclient derbynet +deregister derivation derivative derive derived des desc descendant descending +descent descr describe described describes describing description descriptions +descriptor deserialization deserialize deserializing design designate designated +designator designates designed designer desirable desired desktop dest destdir destination +destroy destroyed destroyer destroying destruct destruction destructor detail +detailed details detect detected detecting detection detector detects determine +determining deterministic detrimental deusen deutsch dev develop developed +developer developers developing development devenish deviation device devices +dfile dgenerate dgroup dhe dhis diabetes diagnostic diagnostics diagram diagrams +dialect dialog diamonds diams dick dictionary did didn died dieguez diehard dies +diff differ difference differences different differential differentiate differently +differs dig digest digit digital digits diligence dim dimension dimensional +dimensions dimitrijs dinamica dining dip dips dir direct direction directly +directories directory directs dirname dirs dirty disable disabled +disablelastaccess disables disabling disadvantage disadvantages disallow +disallowed disappear disappearance disappeared disc disclaimed disclaimer disclaimers +disclaims disclosed disconnect disconnected disconnecting disconnections disconnects +discontinue discount discriminator discussion disjunctive disk disks dispatch +dispatcher display displayed displaying displays dispose disposed disposition +disputes dist distance distinct distinguish distinguishable distinguished distinguishing distribute distributed distributes distributing distribution distributions distributor distributors district districts div divide divided -dividend divider division divisor dll dml dname dobrovolskyi doc docjar doclet -docs docsrc doctype document documentation documented documenting documents does -doesn dog dollar domain domains don donate donation done donors dont dos dose dot -dots double doubt down download downloads dproperty drafter drastic drda drive -driven driver drivers drives drop dropped dropping drops dtd dual due dummy dump -dumps duplicate duplicates durability durable duration during dutch dynamic -dynamically each eacute earlier early ease easier easily east easy ecb ecirc -eclipse edh edit editable edited editing editor edu eduardo eee eeee eeeeee -effect effective effects efficient efficiently egrave eid eing eins einstellung -either eldest elect electronic element elements elephant elig eliminate -elisabetta ell elm else email embedded embedding emergency emit emitted employee -empty emsp emulate emulated enable enabled enables enabling enc encapsulates -enclose enclosed enclosing encode encoded encoder encodes encoding encrypt -encrypted encrypting encryption encrypts end endif ending endings endless endorse -ends enforce enforceable enforced engine engines english enhancement enough ensp -ensure ensuring enter entered entire entities entity entries entry enum -enumeration env environment environments eof eol epsilon equal equality equals -equitable equiv era eremainder err error errorlevel errors esc escape escaped -escapes escaping ese espa essential essentials estimate estimated estimates -estimation eta etc eth etl euml euro europeu eva eval evaluatable evaluate -evaluated evaluation even event events ever every everybody everything exact -exactly example examples exceeds except exception exceptions exclude excluded -excluding exclusion exclusive exclusively exe exec executable executables execute -executed executequery executes executing execution exemplary exercise exercising -exhibit exist existence existing exists exit exited exits exp expand expanded -expands expect expected expenses expensive experimental experiments experts -expiration explain explicitly explorer exponent exported exposed expr express -expressed expression expressions expressly exps extend extended extends -extensible extension extensions extensively extent external externally extra -extract extracted extracter extracting face facility fact factor factory fail -failed fails failure fall falls false family faq faqs far fashioned fast faster -fat fatal fault feature features feb februar february fee feed feedback fees -felix ferguson fetch few ffeecc fff ffff ffffff ffffffff ffffffffffff fid field -fields fifo fifty file filedata filename filepwd files filesystem fill filled -filler fillers filling filter filtered filters fin final finalization finalize -finalizer finalizers finally find finding finds fine finer finish finished fire -firebird firebirdsql firefox firewall first firstname fit fitness fits fitting -fix fixed fixes fkcolumn fktable flag flags flash flat flexible flipped float -floating floor florent flower fluent flush flushed flushes flushing fly focus -focusable folder follow followed following follows font foo footer footprint for -forall force forcefully forces forcing foreign forever forge forget forgotten -form format formatdatetime formats formatted formatting formed forms forth -forward found foundation four fourth fowler fox fqn frac fractional frame frameborder -frames frameset framespacing framework frameworks fran france frank frasl free -freed french frequently fresh freshmeat friendly from front frontend fsutil fsync -ftl ftp ftps fukushima fulfill fulfilled fulfils full fulltext fully fulvio fun -func function functional functionality functions further fuse future fuzz game -games gamma gap gaps garbage gast gaussian gave gcc gcj gecko gem gen general -generally generate generated generates generating generation generator generic -genetic genkey genomics george geos geosysin german get getpart gets getter -getting ghi gid gif gilbert give given glassfish global glossary gmail gmbh gmt -gnu golden good goods goodwill google got governed governing government grabbing -graceful grained grammar grant grantable granted grantedrole grantee granteetype -grantor grants granularity graph graphical graphics gray greater greatest greedy -greenspun gregorian grid gridwidth gridx gridy groovy gross group grouped -grouping groups grover grow grows guarantee guaranteed guest gui guid guide -gutierrez gzip hack had haidinyak half hallo halt hammant hand handle handler -handles handling hans happen happened happens hard harder hardware harm harmless -harpal harr has hash hashcode hashed hashing hashmap hashtable have having -hazorea hbci head header heading headless health heap hearts height held hellip -hello helma help helped helper helps helvetica hen henplus here hereafter hereby -herein hereof hereunder herkules hex hexadecimal hextoraw hey hibernate hibicius -hid hidden hide hiding high higher highest highlight highlights hilbert hint his -historical history hit hits hmmss hms hoc hoi hold holdability holding holes home -homed homepage honoured hook hope hopefully horizontal host hostname hosts hot -hour hours hover how however href hsql hsqldb htime htm html http https hudson -huffman human hundred hundreds hyc hypersonic hyt iacute ibm icirc ico icon icons -idea identical identified identifier identifiers identifying identity idiomatic -idle ids idx idxname iee ietf iexcl iface ifdef ifexists ifnull iframe ifs -igniterealtime ignore ignorecase ignored ignoredriverprivileges ignorelist igor -igrave iict ikemoto ikvm illegal image images img immediately immutable imola imp -impl implement implementation implementations implemented implementing implements -implicit implied import important imported imports impose impossible improve -improved improvement improves improving inability inaccessible inactive inc -incidental include included includes including incoming incompatibility -incompatible incomplete inconsistent incorrect incorrectly increase increased -increment incrementally incremented incrementing incubator incurred indemnify -indemnity indent indentation indented indents independent independently index -indexdef indexed indexer indexers indexes indexid indexing indicates indirect -indirectly individual individually indkey indonesia inet inetsoftware inf infin -infinite infinity inflate inflater info inform information informed infringed -infringement infringements infringes infringing inherit ini init initial -initialization initialize initialized initializer initializes initializing -initially initiate initiation inject injection injections injury inline inlining -inmemory inner inno innodb inplace input ins insecure insensitive insert inserted -inserting insertion inserts insets inside install installation installations -installed installer installing installs instance instanceof instances instantiate -instantiation instead institutes instr instruction instrument int intact integer -integers integrate integrated integration integrity intellectual intelli intended -inter interaction interactive interested interesting interface interfaces -interleave interleaving intermediate intern internal internally international -internationalization internet interpret interpreted interprets interrupted -interrupting interruption intersect interval into intra inv inval invalid -invalidate inversed inverting invisible invocation invoice invoiceid invoke -involved involves iota ipowerb iquest irstv isin iso isolated isolation issue -issued issues italian italiano italy item items iterate iteration iterations -iterator its itself iuml iyama jackcess jackrabbit jackson jakarta jala jam james -janino january japan japanese jar jasonbrome java javac javadoc javadocs -javascript javax javolution jcr jdbc jdbcx jdk jdo jee jefferson jetty jim jira -jndi jnlp job joe joel joerg johann john johnson join joined joins jon jones jpa -jpackage jpox jre jsessionid jsmooth json jsp jsr jsse jts judgment judicial -julia jun june jurisdiction jurisdictions just jvm kappa karin keep keeps kept -kerberos kernel kernelpanic kerry key keyalg keying keypass keys keystore keystores -keytool keyword keywords kill killed killing kills kind kindergarden kinds kit -know knowledge known knows koi konqueror label labeled labels lack lambda lamp -land lang language languages laptop laquo large larger largest larr last -lastmodified lastname late later latest latin launch law layer layout lcase lceil -lck lcurly ldap ldbc ldquo leach lead leading leads leaf leak leaked leaks least -leave leaving lee left leftmost legal legend lehmann len length lenient less let -lets letter letters level levels lfloor liability liable lib libraries library -licensable license licensed licenses lies life lifespan lifetime liftweb light -lightweight like likely lim limit limitation limitations limited limiting limits -line linear linefeed lines lineup link linked links linux liqui list listed -listen listener listeners listening listens lister listing lists lite literal -literals litigation little live llc lnot load loaded loader loading loads lob -lobo lobs local locale locales localhost localized locate located location -locators lock locked locking locks log logfile logged logger logging logic login -logins logo logout logs logsize long longblob longer longest longtext -longvarbinary longvarchar look lookahead looks lookup loop loopback loops lor -lose losing loss losses lossless losslessly lost lot low lowast lower lowercase -lowest loz lpad lpt lrm lru lsaquo lsquo ltrim lucene lumber luntbuild lxabcdef -lynx lzf mac machine machines maciej macr macromedia made magic magyar mail -mailing main mainly maintained maintenance major make makes making malformed -malfunction man manage management manager manifest manipulate manipulation -manipulations manual manually many map mapped mapping maps marc marcy margin -marginheight marginwidth mark marked marker martin mary masahiro mask masks -master masterkey match matcher matches matching materialized materials math -mathematical mathematicians matrix matter matters maurice maven max maxbqualsize -maxgtridsize maximum maxlength maxrows maxvalue may maybe mdash mdd mdtm -mean meaning meaningful means meant meantime meanwhile measure measured mechanism -media median medium mediumblob mediumint mediumtext megabytes meier melbourne mem -member memory menu merchantability merchantable merge merged merges merging -message messages met meta metadata meteorite method methods micro microsoft -middle middleware middot midnight midpoint might migrate migrated migration mill -miller million millis millisecond milliseconds mime min mine minimum minor minus -minute minutes minvalue mirror misc miscellaneous mismatch miss missing mix mixed -mixing mkd mkdirs mmm mmmm mod mode model modes modification modifications -modified modifier modifiers modify modifying module modules modulo modulus moment -mon monday money month monthname months more morning morton most mostly mouse -mouseover move moved moves moving mozilla mpl msg msi mssql msxml mtsystems much -mueller mul multi multiple multiples multiply multithreaded multithreading -multiuser music must mutable mutually mvcc mydb myna myself mysql mystery nabla -naive naked name namecnt named names namespace naming nano nanos nanoseconds -national nations native nativej natural nature naur navigate navigation navigator -nbsp ncgc nchar nclob ndash near nearest necessarily necessary nederlands need -needed needs neg negate negated negating negative negligence neighbor neo nest -nested nesting net netscape network networked networks never new newer newest -newline newlines newly neworder news newsfeed newsfeeds newsgroups newsletter -next nextval nice nicer nielsen nih nio nlst nnnnnnnnn nobody nocache nocheck -nodata node nodelay nodes noframe noframes noll non none noop nopasswords nopmd -noresize normal normalize normalized normally nosettings not nota notation notch -note notes nothing notice notices notification notified notifies notify notifying -notin notwithstanding novelist now nowait nowrap nsi nsis nsub ntext ntfs ntilde -nul null nullable nullif nulls num number numbers numeric numerical nvarchar nvl -oacute obey obj object objects obligation obligations obtain obtained obtains -occupied occupies occupy occur occurred occurrence occurrences occurs -ocirc octal octet october octype odbc odbcad odd odg off offer offered offering -offers office offset often ograve ohloh oid okay old older oldest oline oliver -omega omicron once onchange onclick one ones onfocus onkeydown onkeyup online -onload only onmouseout onmouseover onreadystatechange onresize onsubmit oops open -opened openfire opening openjpa openlinksw openoffice opens opera operand -operands operates operating operation operations operator operators oplus optimal -optimistic optimizable optimization optimizations optimize optimized optimizer -optimizing option optional optionally options ora oracle orange oranges order -ordered orderid ordering orders ordf ordinal ordinary ordm oren org organization -organized oriented orig origin original originally originals orion orld orm -orphan oscar osgi oslash other others otherwise otilde otimes ought ouml our out -outer outperforms output outset outside outstanding over overflow overflows -overhead overload override overview overwrite overwritten own owned owner owners -ownership oymaurice pack package packages packaging packets pad padded padding -page pages pair pairs pal panel panels paolo papa paper para paradox paragraph -paragraphs param parameter parameterized parameters params paren parent -parentheses parentid parse parsed parsedatetime parser parses parsing part -partial partially participant particular particularly parties partition partnership parts -party pass passed passes passing passive password passwords past paste pasv patch -patent patents path paths pattern patterns paul pause paused pay payment pdf -peace peek pencil pending people per percent perform performance performed -performs period permil permission permissions permits permitted permutation -permutations perp persist persisted persistence persistent persister person -personal persons pete peterson petra pfister pgdn pgsql pgup phantom phase phi -philip phone php phrase phrases physical pid pieces pier pilot ping pinned pipe -piv pivot pkcolumn pkcs pktable place placeholders plain plaintext plan planned -plans plant platform platforms play player please pluggable plugin plus plusmn -pmd png point pointer pointers pointing points polar pole poleposition poll -polling polski pool poolable pooled pooling pop populate populated population popup port -portability portable portal portals ported porting portions portlet ports -portugal portugu portuguese pos position positioned positions positive -possibility possible possibly post postal postgre postgres postgresql postmaster -potential potentially pound pow power poweroff practice prd pre prec precision -preferdoslikelineends preferences preferred prefix prefixes premature prep -prepare prepared prepares preparing prepended prepends pres present preserve -press pressed pretty prev prevent prevents previous previously pri price prices primary -prime primitive primitives principal print printed println prints prio prior -priority private privilege privileges probability probable probably problem -problems proc procedural procedure procedures process processed processes -processing processors procurement prod produce produces product production -products profile profiler profiling profit profits program programme programming -programs progress prohibited prohibits project projecthelp projects prominent -promote prompt promptly proof prop propagated properly properties property -proposal proposed prospectively protect protected protecting protection protects -protocol protocols prototype prototyping prove proven provide provided provider -provides providing provision provisions proxy pseudo psi ptn pub public publish -published publishing pull puppy pure purpose purposes pursuant push put pwd pwds -qty quadratic qualified qualifier quality quantified quantifieds quantity quarter -queries query queryframework querying question questions queue queues qui quick -quicker quickly quicksort quickstart quiet quirre quit quite qujd qujdra quot -quote quoted quotes quoting race radians radic radio radix ram ramiere ran rand -random randomized randomly randomness rang range ranges rank rapid raquo rarr -rate rather raw rawbyte rawtohex rceil rcon rcurly rdbms rdonly rdquo reached -read readable reader reading readonly reads ready real really realm realtime -reason reasonable reasonably reasoning reasons rebind rebuild rebuilt rec -recalculate receipt receive received recent recently recipient recipients -reclaimed recognized recommended recompile recompiles reconnect reconnecting -record records recover recovering recovery recreate recreated recurse recursion -recursive red redirect redirected redirects redistribute redistribution -redistributions redo reduce reduced reduces redundancy redundant ref refactor -refactoring refactorings reference referenceable referenced references -referencing referential referred refers reflect reflection refman reformed -refresh reg regarding regex regexp region register registered registry regression -regular regularly regulation rehash rein reindex rejected rekord rel related -relating relation relational relations relationship relative relatively release -released releases relevant reliable relies reload rely relying remain remainder -remaining remains remap remark remarks remco remember remembered remote remotely -remoting remove removed removes removing rename renamed renames renaming -reopen repair repeat repeatable repeated repeatedly repeating repl replace -replaced replacement replaces replacing replayed replicating replication replied -reply repo report reported reporting reports repositories repository represent -representation representations representing represents reproduce reproduced req -request requested requests require required requirement requirements requires res -research resellers reserve reserved reset resets reside resides resin resistant -resizable resize resizing resolution resolved resolver resort resource resources -resp respect responding response responses responsibility responsible rest -restart restarted restarting restore restored restores restoring restrict -restricted restricting restriction restrictions restricts result resulting -results ret retain retr retrieval retrieve retrieved retry return returned -returning returns reuse reused reuses rev revealed reverse reversed review -revised revision revoke revoked revolutions rfc rfcs rfloor rgb rho rid ridvan -right rightmost rights rijndael rioyxlgt risk risks rlm rmd rmdir rmerr rmi -rmiregistry rnd rnfr rnto road roadmap role roles roll rollback rolled rolling -rolls roman ronni room root roots rot round rounded rounding roundmagic rounds -routine row rowcount rowid rownum rows rowsize royalty rpad rpm rsa rsaquo rsquo -rss rtrim ruby rubyforge ruebezahl rule rules run rund rundll runnable running -runs runscript runtime russian rwd rws sabine safari safe safely said sainsbury salary sale -sales salt salz sam same samp sample samples sans sat sata save saved savepoint -savepoints saves saving say saying says sbquo scala scalar scale scan scanned -scanner scanners scanning scans scheduler schem schema schemas schemata schmorp -schoen school sciences scm scope scoped scott scratch screen screenshot script -scriptella scripts scroll scrollable scrolling sdot seam search searchable -searched searcher searches searching sec second secondary seconds secret sect -section sections secure securing security see seed seeded seeds seek -seem seems select selectable selected selection selectivity selects self sell -selling semicolon semmle send sending sends sense sensitive sensitivity sent -sentence sentinel sep sepang separate separated separately separating separator -separators sept september seq sequence sequences sequential sequentially sequoia -serial serializable serialization serialize serialized series serif server -servers service services servicing servlet servlets sesar session -sessions set sets setter setters setting settings settlement setup several -severity sftp sha shadow shall shallow share shared shares shell shellbook shift -shipping short shorter shortest should shouldn show showing shown shows shrink -shuffle shut shutdown shutting shy sid side sides sig sigma sigmaf sign signal -signature signed signs signsoft signum silently silly sim similar similarity -simon simple simpler simplest simplicity simplified simplifies simplify simply -simulate simulated simulates simulator sin since single singleton sinh site sites -situation situations six sixty size sized sizes skill skip skipped skipping slash -slashdot sleep slist slots slow slower slowest slowing slows small smalldatetime -smaller smallest smallint smart smith smpt snapshot snipped snippet soap socket -sockets soft software sold solid solo solution solutions solve solved solving -some somebody something sometime sometimes soon sophisticated sorry sort sorted -sorting sorts sound soundex sounds source sourceforge space spaces spacing spades -span spanish spans spantext sparse spatial spec special specialized specially -specific specification specified specifies specify specifying specs speed speeds -spell spelled spends spent spfile spi split sponsored spots spread spring sql -sqlexpress sqlite sqlnulls sqlserver sqlstate sqlxml sqrt square src ssl sss -stable stack stage standalone standard standardized stands star staring start -started starter starting starts startup starves stat state statement statements -states static stating station statistics status statute stay stays stddev stddevp -step steps steve still stmt stock stolen stop stoppage stopped stopper stopping -stops stor storage storages store stored storepass stores storing story str -strange strategy stream streaming streams street strength stress strict strictfp -string stringdecode stringencode strings stringtoutf strong stru struct -structural structure structures stub stuck studios stuff style stylesheet -stylesheets sub subclasses subdirectories sube subject sublicense sublicenses -submit subqueries subquery subscribe subsequent subsequently subset substance -substitute substituted substitution substr substring substructure subtract -subtree subversion succeed succeeds success successful successfully such suddenly -sue sufficient sufficiently suffix sugar suggest suggested suggestion suite -suites sum summand summary summer sun sunday sup supe super superclass superior -superseded supertable superuser supplied supplier supply support supported -supporter supporters supporting supports supposed suppress sure surrogates -surrounded survive susan svn svr swap swapped swing swiss switch switched -switches switching switzerland sxd sylvain sync synced synchronization -synchronize synchronized synchronizing synchronous synchronously synonym syntax -synth synthetic sys syscs sysdate sysdba syst system systems systime systimestamp -szlig tab tablance table tableid tables tabs tag tags tahoma tail take taken -takes taking tamava tan tanh tanuki tanukisoftware tape tapes tar target targets -task tasks tau tax tbalance tbody tcp technical technology tell teller tellers -telling temp template templated temple temporarily temporary term terminal -terminate terminated terminates terminating termination terms tertiary test testa -testb tested testid testing testlob tests testtab text textarea textbase -texts textual than thanks that the their them themselves then theoretical -theoretically theory there thereafter therefore thereof these theta thetasym they -thin thing things think thinsp third this thomas thorn those thousand thousands -thread threaded threading threads three threshold threw throttle throttling -through throw throwable throwing thrown throws thus ticker tid tilde time timed -timeout timer times timestamp timestamps timing tiny tinyblob -tinyint tinytext tired title titled tls tmendrscan tmfail tmjoin tmnoflags -tmonephase tmp tmresume tmstartrscan tmsuccess tmsuspend today todo together -token tokenize tokenized tokenizer tokens tolerant tom tomcat too took tool -toolbar toolkit tools toolset top topic topics toplink tort total totals touch -tpc tptp trac trace traces tracing tracking trade trademark traditional trailing -trans transaction transactional transactionally transactions transfer transferred -transform transient transitional translatable translate translated translates -translating translation translations translator transmission transmitted -transparent transport tray tread treated tree trees trick tried tries trig -trigger triggered triggers trim trip true trunc truncate truncated truncates -truncation trunk trx try trying tucker tune tunes tuning turkel turkish tutorial -twelve twice two txt tymczak type typeof types typically typing typlen typo typos -uacute uarr ubuntu ucase ucchino ucirc ucs udp udts ugrave uid ukrainian uml -unaligned unary uncached uncaught unchecked unclosed uncommitted uncompressed -undefined undeploy under underline underlined underlying understand understands -understood undetected undo undocumented undone unencrypted unenforceable unescape -unexpected unfortunately unicode uniform unindexed uninstall uninterruptible -union unique uniqueness uniques unit united units universal universally unix -unknown unless unlike unlink unlinked unlock unmaintained unmapped unmodified -unnamed unnecessarily unnecessary unordered unquoted unrecoverable unrelated -unreleased unsafe unscaled unset unsigned unsorted unsuccessful unsupported -untested until untranslated unusable unused unvisited unwrap unwritten unzip upc -upd updatable update updated updates updating upgrade upgraded upgrading upload -uploaded upon upper uppercase uppermost ups upsert upsih upsilon urgent uri url -urls usa usage usd use used useful user userbyid username userpwd users uses -using usual usually utc ute utf util utility utilization utilize utilizes utils -uui uuid uuml val valid validate validated validation validity validly valuable -value values van var varbinary varchar variable variables variance variant -various varp varying vector velasques vendor verified verify versa version -versions vertical very verysmallint veto via vice view viewed viewer views -violate violated violation virtual virtuoso virus viruses visible visit visitor -visualizer vlad void volatile volunteer volunteers von vpn vulnerabilities -vulnerability wait waiting waits walk walker want wants warehouse warehouses warn -warning warnings warranties warranty was washington watchdog watermark way -wayback ways weak web webclient weblog webserver website week weeks wegorkiewicz -weierp weight weights weightx weighty weird welcome well welt were werkzeugkasten -what when whenever where wherever whether which while whirlpool white -whitespace who whole whom why wide widely width wiki wikipedia wildcard wildcards -will william win window windows wiscorp with withdraw withdrawn within without -wizard wlam wondering wood word wordid words work workaround workarounds -workbench worked workgroup working works world worry worst would wrap wrapped -wrapper wraps writable write writecache writer writers writes writing written -wrong www xacon xadb xads xaer xares xatest xbi xbl xbo xby xcdsql xcl xda xdb -xdo xfc xhtml xid xids xmkd xml xmlattr xmlcdata xmlcomment xmlhttp xmlnode xmlns -xmlstartdoc xmltext xor xrmd xrunhprof xsi xsm xtea xti xtime xts xvi xyz yacute -year yen yes yet yield yielding ymd you your yourkit yourself ytd yuml yusuke -yyyy zeile zero zeros zeta zip zloty zone zwj zwnj +dividend divider divides dividing division divisor divisors dll dload dlucene dml +dmoebius dmsys dmul dname dneg doap doc doclet docletpath doclets docs docsrc +doctrines doctype document documentation documented documenting documents doe +does doesn dog doing dollar domain domains don donald donate donation done dong +donor donors dont door dos dose dot dots double doubled doubles doubt douglas +down download downloaded downloading downloads doy dpackaging dpom dproperty +drafter drag drastic draw drda dreamsource dreary drem dreturn dried drive driven +driver drivers drives drop dropped dropping drops dsl dsn dss dst dstore dsts +dsub dtd dtest dtp dual due dummy dump dumping dumps dup duplicate duplicates +durability durable duration during durl duske dutch dversion dynamic dynamically +each eacute earlier earliest early ease eases easier easiest easily east easy eat +eater ebean ecb eccn ecdh echo ecirc eckenfelder eckenfels ecl eclipse eclipsecs +eclipselink ecm ecole eder edge edh edit editable edited editing edition editor +editors edugility effect effective effectively effects efficient efficiently +effort egrave eid eing eins einstellung either elapsed eldest elect electronic +element elements elephant elig eligible eliminate elisabetta ell ellipsis elm else +elsewhere elton email emails embedded embedding embeds emergency emf emit emitted +emma empire employee empty emsp emulate emulated emulates emulation enable +enabled enables enabling enc encapsulate encapsulates enclose enclosed enclosing +encode encoded encoder encodes encoding encountered encounters encrypt encrypted +encrypting encryption encrypts end ended enderbury endif ending endings endless +endlessly endorse ends enforce enforceability enforceable enforced engine engines +english enhance enhanced enhancement enhancer enlarge enough enqueued ensp ensure +ensures ensuring enter entered entering enterprise entire entities entity entrance +entries entry enum enumerate enumerated enumerator enumerators enumeration env envelope +environment environments enwiki eof eol epl epoch epoll epsilon equal equality equally +equals equipment equitable equiv equivalence equivalent equivalents era erase eremainder +eric erik err error errorlevel errors erwan ery esc escape escaped escapes escaping +escargots ese espa essential essentials established estimate estimated estimates +estimating estimation estoppel eta etc eth etl euml euro europe europeu euros eva eval +evaluatable evaluate evaluated evaluates evaluating evaluation evdokimov even evenly +event events eventually ever every everybody everyone everything everywhere evict +evicted eviction evolving exact exactly example examples exceed exceeded exceeds +excel except exception exceptions exchange exclude excluded excludes excluding +exclusion exclusive exclusively exe exec executable executables execute executed +executes executing execution executor executors exemplary exercise exercising +exfsys exhausted exhibit exist existed existence existing exists exit exited +exits exp expand expanded expands expansion expect expected expecting expedites +expense expenses expensive experience experimental experiments experts expiration +expired expires explain explanation explicit explicitconstructorcall explicitly +exploit explorer exponent exponential export exported exports expose exposed exposes +expr express expressed expression expressions expressly exps ext extend extendable +extended extending extends extensible extension extensions extensively extent +extern external externally extra extract extracted extracting extracts extras +extreme extremely extremes extrinsic eye fabien fabric facade face facilitate facility +fact factor factorial factories factory factual fadd fail failed failing fails failure +failures fair fake fall fallback falls faload false familiar families family faq +far fashion fashioned fast faster fastest fastore fat fatal faulhaber fault +favicon favorite fbj fcmpg fcmpl fconst fdiv feature features feb februar +february federal federated federation fedora fedotovs fee feed feedback fees feff fetch +fetched fetching few fewer ffeecc fffe fid field fields fiery fifo fifty file +filed filename filepwd files filesystem fill filled filler fillers filling fills +filo filter filtered filtering filters fin final finalization finalize finalizer +finally find finder finding finds fine finer finish finished finishes finland fire +firebird firebirdsql fired firefox firewall first firstname fish fit fitness fits +fitting five fix fixed fixes fixing fkcolumn fktable flag flags flash flashback +flat flavour fle fletcher flexibility flexible flexive flip flipped fload float floating +flooding floor florent flow flower flows fluent fluid flush flushed flushes +flushing flux fly flyway fmb fmc fml fmrn fmt fmul fmxx fmxxx fneg focus focusable +fog fogh folder follow followed following follows font fontes foo footer footers +footprint for forall forbidden force forced forcefully forces forcing foreign +forever forge forget forgetting forgot forgotten fork form formal format +formatdatetime formats formatted formatter formatting formed forms formula forth +fortin forward forwarding found foundation four fourth fox fqn frac fraction +fractional fragment fragments frame frameborder frames frameset framespacing +framework frameworks fran france frank frasl fred frederico free freed freeing +freely frees freezes frem french freq frequencies frequency frequent frequently +fresh freshmeat freturn friday fried friend friendly from front frontbase +frontend frontends frost fstore fsub fsutil fsync ftl ftp ftps fulfill fulfilled +fulfils full fulltext fully fun func function functional functionality functions +further fuse fusion future futures fuzz fyodor gae gain gallen galois game games +gamma gap gaps garbage garringer gary gast gat gathering gaussian gave gbif gcj +gecko gem gen genealogy general generalized generally generate generated +generates generating generation generator generic generics genetic geneva genkey +genomics gently geo geocoder geocoding geographic geom geometric geometry george +geospatial geospatialnews geqo germany get getdate getenv getfield gets getstatic +getter getters getting ghi gid gif gigabytes gilbert gillet gis git github give +given gives glass glassfish glenn global globally glossary gluco gmail gmb gmbh +gmt gmx gnocchi gnu goes going golden golomb gomes gone good goods goodwill +google googlegroups got goto goubard governed governing government gpg grabbing +graceful graf grails grained grains grajcar grammar grammars grandin grandma +grant grantable granted grantedrole grantee granteetype granting grantor grants +granularity graph graphic graphical graphics grass gray great greater greatest +greatly gredler greece greedy green gregorian grep grew grid gridwidth gridx gridy +groove groovy gross group grouped grouping groups groupware grover grow growing +grows growth guarantee guaranteed guard guardian guess guesses guest gui guid +guide guidelines guides gumbo gustav gutierrez gzip hack had haidinyak half hallo +halt hand handing handle handler handlers handles handling hang hangs happen +happened happening happens happy harbor hard harder hardware harm harmless +harmony harpal harr has hash hashcode hashed hashes hashing hashmap hashtable +have having hazorea hbci head header headers heading headless heads health heap +hearts height held hellip hello help helped helper helpful helping helps +helvetica hen hence her here hereafter hereby herein hereof hereto hereunder +herkules heterogeneous heureuse hex hexadecimal hextoraw hey hibernate hibicius +hickey hid hidden hide hider hides hiding high higher highest highlight +highlights highly hilbert him himself hint hints hir his hispanic histogram +historical history hit hits hitting hmac hmmss hms hoc hohmuth hoi hold +holdability holding holds hole holes home homed homepage honor honoured hook hope +hopefully hops horizontal host hostname hostnames hosts hot hour hours hover how +however hprof href hsql hsqldb htime htm html http httpdocs https huang hub huff +huffman huge human hundred hundreds hurt hyc hyde hyperbolic hyperlink hypersonic +hyt iacute iadd iaload iand iastore ibm iced iceland iciql icirc icmpeq icmpge +icmpgt icmple icmplt icmpne ico icon iconified icons iconst icu ide idea ideal +ideas identical identification identified identifier identifiers identify identifying +identities identity idiomatic idiv idle ids idx idxname iee ieee iexcl iface ifeq +ifexists ifge ifgt ifle iflt ifne ifnonnull ifnull iframe ifx ignore ignorecase ignored +ignoredriverprivileges ignorelist ignores ignoring ignite igrave iinc ikura ikvm ikvmc +illegal illegally iload image imageio images imaginary img iml immediately immutable +imola imp impact imperial impersonate impl imple implement implementation implementations +implemented implementing implements implication implicit implicitly implied +implies import important imported importing imports impose imposes impossible +improperly improve improved improvement improvements improves improving imul +inability inaccessible inaccuracies inactive inactivity inc incidental include +included includes including inclusive incoming incompatibility incompatible +incomplete incompressible inconsistency inconsistent incorporated incorrect +incorrectly increase increased increases increasing increment incremental +incrementally incremented incrementing increments incubator incurred incurring +indemnified indemnify indemnity indent indentation indentations indented indents +independent independently index indexdef indexed indexer indexers indexes indexid +indexing indicate indicated indicates indicating indication indicator indices +indirect indirectly individual individually indkey indonesia industries +inefficient ineg inet inf inferred infin infinite infinity infix inflate inflater info +inform information informational informed informix informs informtn infos +infrastructure infringe infringed infringement infringements infringes infringing +inherent inherit inheritance inherited inherits ini init initial initialization +initialize initialized initializer initializes initializing initially initiate +initiated initiation inits inject injection injections injury inline inlined inliner +inlining inner inno innodb inplace input inputs ins insecure insensitive insert +inserted inserting insertion inserts insets inside insists inspect inspected +inspector inspectors inst install installation installations installed installer +installing installs instance instanceof instances instantiate instantiation +instead institutes instr instruction instructions instrument instrumentation +instrumented int intact integer integers integrate integrated integration +integrity intellectual intelli intended intentional inter interaction interactive +intercepted interest interested interesting interface interfaces interleave +interleaved interleaving intermediate intern internal internally internals +international internationalization internet interpolation interpret interpreted +interpreter interpreting interprets interrupt interrupted interrupting interruption +intersect intersecting intersection intersects intersys interval intervals into +intra introduce introduced introduction inttypes inv inval invalid invalidate +invalidated invectorate invented invention inventor inversed invert inverting +invisible invocation invoice invoiceid invoke invokeinterface invoker +invokespecial invokestatic invokevirtual involve involved involves ior iota ipt +iquest irem ireturn irrespective irstv isam ischildnode isdescendantnode ishl +ishr isin isnull iso isolated isolation israels issamenode issue issued issues +istore isub italiano italy item items iterable iterate iterates iterating +iteration iterations iterator its itself iuml iushr ixor iyama iyy iyyy jack +jackcess jackrabbit jackson jacopo jakarta jakob jalpesh jam james jan january +japan japanese jaqu jar jars jason jaspa java javaagent javac javadoc javadocs +javascript javaw javax jayaprakash jboolean jbyte jcc jchar jcl jconsole jcr jdbc +jdbcx jdbm jdk jdo jdouble jdt jech jefferson jena jenkov jens jentsch jequel +jetty jfloat jia jiang jim jint jlong jmx jmxremote jndi jni jnlp joachim job joe +joel joerg johann john johnny johnson join joined joining joins joist jon jones +joonas jooq jopr jorissen jpa jpox jps jre jsessionid json jsp jsr jsse jstack +jtds jts judged judgment judicial julian july jump jumps jun junctions junit +jurczyk jurisdiction jurisdictions jury just jvm jvoid kaiser kappa karin karl +karlsson kaspersky kawashima keegan keep keeper keeping keeps ken kept kerberos kernel +kerry kevent key keyalg keying keypass keys keystore keystores keytool keyword +keywords khtml kicks kidd kill killed killer killing kills kilobytes kind +kindergarden kinds kingdom kiritimati kit kiwi knife know knowing knowledge known +knows knut kobe koi konqueror korea kotek krenger kritchai kupolov kwajalein +kyoto lab label labeled labels lack lacoin ladd ladislav lager laird laload +lambda lamp land lang language languages laptop laquo large largely larger +largest larr last lastly lastname lastore lastval latch late later latest latin +latitude latvia laughing launch launcher laurent lausanne law laws lawsuit +lawsuits lax layer layers layout lazily lazy lcase lceil lck lcmp lconst ldap +ldbc ldc ldiv ldquo lea leach lead leading leads leaf leak leaked leaks leaning +leap learning least leave leaves leaving lee left leftmost leftover legacy legal +legend lehmann lempel len length lengths lenient leod less lesser let lets letter +letters level levels lexicographical lfloor lgpl liability liable lib liberal libraries +library licensable license licensed licensees licenses licensing lies life lifespan +lifetime liftweb light lightweight like likely lim limit limitation limitations +limited limiting limits line linear linearly linefeed lines linestring lineup +link linkage linked links linq lint linux liq liqui lir lirs lisboa list listed +listen listener listeners listening listens lister listing lists litailang lite +literal literals litigation little live lives ljava llc lload lmul lneg lnot load +loaded loader loading loads lob lobs local localdb locale locales localhost +locality localization localized localname locals locate located locates location +locations locators lock locked locker locking locks log logback logged logger +logging logic logical logically login logins logo logos logout logs logsize long +longblob longer longest longitude longnvarchar longs longtext longvarbinary longvarchar +look lookahead looking looks lookup lookups lookupswitch loop loopback looping +loops loose lor lore lose losing loss losses lossless losslessly lost lot lots +low lowast lower lowercase lowercased lowest loz lpad lrem lreturn lrm lru lsaquo +lshift lshl lshr lsm lsquo lstore lsub lte ltrim lucene lucerne lugano lukas lumber +lumberjack luntbuild lushr lutin lxabcdef lxor lying lynx lzf mac macdonald +machine machines maciej macr macro macromedia macros made magic magnolia magyar +mahon mail mailing main mainly maintain maintained maintaining maintains +maintenance major majority make makensis maker makes making malformed malfunction man +manage management manager managing manifest manifested manipulate manipulating +manipulation manipulations manley manner manske manual manually many map mapped +mapper mapping mappings maps mar marc march marcio marcy margin marginheight +marginwidth mark marked marker market marketing markets marks markup marmalade +marschall marshal martin mary mask masks master masterkey mat match matched +matcher matches matching material materialized materials math mathematical +mathematicians mathematics matrix matter matters maurice maven max maxbqualsize +maxed maxgtridsize maximum maxlength maxrows maxvalue maxwidth may maybe mbean +mbeans mcleod mdash mdd mddata mdsys mdtm mean meaning meaningful means meant +meantime meanwhile measurable measure measured measurement measurements +meat mechanism media median medium +mediumblob mediumint mediumtext megabyte megabytes mehner meier meijer melbourne +mem member members memcpy memmove memo memory mendonca mentioned menu +merchantability merchantable merge merged merges merging meridian message +messager messages messes met meta metadata meteorite method methods mfulton mgmt +michael michi micro microarray microarrays microsoft mid middle middleware middot +midnight midori midpoint might migrate migrated migrating migration mill miller +million millions millis millisecond milliseconds mime mimer min mind mine +minecraft mini minimal minimalistic minimum minneapolis minor mins minus minute +minutes minvalue mirror misc miscellaneous misdirected mishi mismatch miss misses +missing mistake misuse mix mixed mixes mixing mkd mkdir mkdirs mod mode model +modeling models modern modes modification modifications modified modifier +modifiers modifies modify modifying modular module modules modulo modulus moebius +moger moment mon monday money mongodb monitor monitorenter monitorexit monitoring +monitors mono monospace month monthname months moon more moreover morning morton +moscow most mostly mouse mouseover move moved moves moving moz mozilla mozzarella +mpl msg mssql mssqlserver msxml much mueller mul multi multianewarray multipart +multiple multiples multiplication multiplied multiply multiplying multithreaded +multithreading multiuser music must mutable mutate mutation mutationtest muttered +mutton mutually mvc mvcc mvn mvr mvstore mydb myna myself mysql mysqladmin mysqld +mysterious mystery mystic myydd nabla naive naked name namecnt named names namespace +naming nan nano nanos nanosecond nanoseconds nantes napping national nations native +natural nature naur nav navigable navigate navigation navigator nbsp ncgc nchar +nclob ncr ndash near nearest nearly necessarily necessary nederlands need needed +needing needs neg negate negated negating negation negative negligence +negotiations neighbor neither nelson neo nest nested nesterov nesting net +netbeans netherlands netscape netstat network networked networks never nevertheless +new newarray newer newest newline newlines newly news newsfeed newsfeeds newsgroups +newsletter next nextval nfontes nger nice nicer nicolas night nih niklas nikolaj +niku nine nio nls nlst noah nobody nobuffer nocache nocheck nocycle nodata nodded +node nodelay nodes noel noframe noframes noindex noinspection noise nomaxvalue +nominvalue non nonce noncompliance none noop nop nopack nopasswords nopmd nor +noresize normal normalize normalized normally northern northwoods norway nosettings +not nota notably notation notch note notes nothing notice notices notification notified +notifies notify notifying notin notranslate notwithstanding nougat nov novelist +november now nowait nowrap npl nsi nsis nsub ntext ntfs nth ntilde nucleus nul +null nullable nullid nullif nulls nullsoft num number numbering numbers numeral +numerals numeric numerical nuxeo nvarchar nvl oacute obey obj object objects +obligation obligations observer obsolete obtain obtained obtains obviously +occasionally occupied occupies occupy occur occurred occurrence occurrences occurs +ocirc octal octet october octype odbc odbcad odd odg off offending offer offered +offering offers office official offline offset offsets often ogc ograve ohloh oid okay +okra olap olapsys old older oldest oline oliver olivier omega omicron omissions omit +omitted omitting once onchange onclick one ones onfocus ongoing onkeydown onkeyup +online onload only onmousedown onmousemove onmouseout onmouseover onmouseup +onreadystatechange onresize onscroll onsubmit onto ontology ontoprise oome oops +ooq open opened openfire opening openjpa opens opera operand operands operate +operates operating operation operational operations operator operators oplus opposite +ops opt optimal optimisation optimised optimistic optimizable optimization optimizations +optimize optimized optimizer optimizing option optional optionally options ora +oracle orange oranges orchestration order orderable ordered orderid ordering +orders ordf ordinal ordinary ordinate ordm ordplugins ordsys oren org organic +organization organized oriented orig origin original originally originals +originate originates originating originator orion orld orm orphan orphaned +orphans osde osgi oslash osmond other others otherwise otilde otimes otterstrom +ought ouml our out outback outdated outer outfile outline outln outperforms +output outset outside outstanding over overall overcareful overflow overflows +overhead overlap overlapping overlaps overload overloaded overloading overridden overriding +override overrides overtakes overtaking overview overwrite overwrites overwriting +overwritten overwrote owl own ownable owned owner owners ownership owning owns oymaurice +pacific pack package packages packaging packets pad padded padding page paged +pages pagestore pageview pagination pair pairs pal panel panels panic papa paper +para paradox paragraph paragraphs parallel param parameter parameterized +parameters params paren parens parent parentheses parenthesis parenthesized +parentid parents park parse parsed parsedatetime parser parses parsing parslet +part partial partially participant participate participating particular +particularly parties partition partitioning partners partnership parts party pass +passed passes passing passive password passwords past paste pastebin pasted +pasties pasv patadia patch patched patches patching patent patents path pathogen +paths pattern patterns paul pause paused pauses pay payload payment pbkdf pdf pdo +peace pears peculiar peek pencil pending pengxiang people pepper per percent percentage +perfect perform performance performed performing performs perhaps period periodic +periodically periods permanently permil permission permissions permits permitted +permutation permutations perp persist persisted persistence persistent persister +persisting persists person personal persons perspective pervasive pete peter +petra pfgrc pfister pgdn pgup phane phantom phase phi philip philippe +philosophers phone php phrase phrases phromros physical pick picked pickle picks pico +pid pieces pier pietrzak pilot piman ping pinned pipe piped pit pitest piv pivot +pkcolumn pkcs pktable place placed placeholders places placing plain plaintext +plan planned planner planning plans plant plenty platform platforms play player please +plug pluggable plugin plugins plus plusmn png point pointbase pointed pointer pointers +pointing points poker poland polar pole poleposition policies policy polish poll +polling polski poly polygon pom pondered poodle pool poolable pooled pooling +pools poor poormans pop popular populate populated population popup port +portability portable portal portals ported porting portion portions portlet ports +portugal portugu pos position positioned positions positional positive pospichal possibility +possible possibly post postal postfix postgre postgres postgresql posting +postmaster potential potentially poultry pound pow power powerful poweroff +practicable practice prd pre prec precedence precision precisions predicate +predict predicted prediction prefer preferable preferdoslikelineends preferences +preferred prefix prefixes prefs premain premature prep prepare prepared prepares +preparing prepended prepending pres presence present presentation preserve +preserved preserving press pressed pretty prev prevent prevented prevention +prevents previous previously pri price prices primarily primary prime primitive +primitives principal print printable printed printf printing println prints prio +prior prioritize priority private privilege privileges pro prob probability probable +probably problem problematic problems proc procedural procedure procedures +proceed process processed processes processing processor processors procurement +prod produce produced produces product production products prof profile profiler +profiles profiling profit profits program programmed programming programs +progress prohibited prohibits project projection projects prominent promote +prompt promptly proof prop propagate propagated proper properly properties +property proposal proposed prospective prospectively protect protected protecting +protection protects protocol protocols prototype prototyping prove proven provide +provided provider providers provides providing provision provisionally provisions +proxies proxy prune pruned pruning pseudo psi psm psqlodbc pst ptn ptr pub public +publicly publish published publishing pulakka pull puppy pure purely purge purged +purpose purposes pursuant push pushed put putfield puts putstatic putting pwd +pwds qian qty qua quadratic quaere quaint qualified qualifier qualify quality +quantified quantifieds quantity quarter quercus queried queries query querydsl +queryframework querying question questions queue queues qui quick quicker quickly +quicksort quickstart quickstarter quiet quirre quit quite qujd qujdra quot quota +quotas quote quoted quotes quoting race rad radians radic radio radius radix +rafel rail railo railroad railroads rainbow raise ram ramiere ran rand random +randomize randomized randomly randomness rang range ranges ranging rank rapid +rapidshare rapping raquo rarr raspberry rate rates rather rathsack ratio ravioli +raw rawbyte rawtohex rawtypes razor razuna rceil rcon rdbms rdf rdfs rdonly rdquo +reach reachable reached reaches read readability readable reader readers reading +readonly reads readwrite ready real reality really realm realtime reaper reason +reasonable reasonably reasoning reasons rebind rebuild rebuilt rec recalculate +receipt receive received receives receiving recency recent recently recipient +recipients reclaimed reclamation recoding recognized recommendations recommended +recompile recompiles reconnect reconnected reconnecting reconstruct record recorded +recorder recording records recover recovered recovering recovers recovery +recreate recreated recreation rect rectangle rectangular recurse recursing +recursion recursions recursive recursively recycle recycled red redeployment +redirect redirected redirection redirector redirects redistribute redistribution +redistributions redo reduce reduced reduces reduction redundancy redundant reeve ref +refactor refactoring refactorings refer reference referenceable referenced +references referencing referent referential referred refers refill reflect +reflected reflection reflective reflectively reflects reformed refresh refreshed +refs reg regard regarding regardless regards regclass regex regexp region regions +register registered registering registers registration registrations registry +regression regular regularly regulation regulatory rehash rein reindex reinstated +reissued reject rejected rekord rel related relates relating relation relational +relations relationship relative relatively release released releases releasing +relevance relevant reliability reliable relies reload reloading relocating rely +relying rem remain remainder remaining remains remap remark remarks remedy +remember remembered remembers remote remotely remoting removal remove removed +removes removing ren rename renamed renames renaming render rendering rene reopen +reorder reordering repair repeat repeatable repeated repeatedly repeating repeats +repl replace replaced replacement replacements replaces replacing replay replayed +replicating replication replied reply repo report reported reporting reports +repositories repository represent representation representations represented +representing represents reproduce reproduced reproduces reproducing reproduction +republic req requeried requery request requested requesting requests require +required requirement requirements requires res research resellers reserve +reserved reserves reset resets resetting reside resident resides resin resistance +resistant resizable resize resized resizes resizing resolution resolve resolved +resolver resort resource resources resp respect respecting respective respond +responding response responses responsibilities responsibility responsible rest +restart restarted restarting restarts restore restored restores restoring +restrict restricted restricting restriction restrictions restrictive restricts +result resultant resulted resulting results ret retain retained retains retention +retina retr retrieval retrieve retrieved retrieves retrieving retry return +returned returning returns reuse reused reuses rev revealed reverse reversed +revert reverted reverting review revised revision revisions revisit revoke +revoked revolutions rewind rewrite rewriting rfc rfloor rgb rho rice richard rid +ridvan rife right rightmost rights rijndael ring rioyxlgt risk risks risky rlm +rmd rmdir rmerr rmi rmiregistry rnd rnfr rnto road roadmap roads robert roc rogue +rojas role roles roll rollback rollbacks rolled rolling rollover rolls roman room +root rooted roots rot rotate round rounded rounding roundmagic rounds routine routinely +routines row rowcount rowid rowlock rownum rows rowscn rowsize roy royalty rpad rpm rsa +rsaquo rshift rsquo rss rtree rtrim ruby ruebezahl rule rules run rund rundll runnable +runner runners running runs runscript runtime rwd rws sabine safari safe safely +safes safety said sainsbury salary sale sales saload salt salz sam same +sameorigin samp sample samples sanitize sanity sans sastore sat satisfy saturday sauce +sauerkraut sava save saved savepoint savepoints saves saving savings say saying +says sbquo scala scalability scalable scalar scale scaled scales scan scanned +scanner scanners scanning scans scapegoat scc scenarios schedule scheduler schem schema +schemas schemata scheme schmorp school schwietzke sciences scientific scjp scm +scones scoop scope scoped score scott scramble scrambling scratch screen +screenshot script scriptable scriptella scripting scripts scroll scrollable +scrolling sdot seafood seam search searchable searched searcher searches +searching seasoning sec second secondary seconds secret sect section sections +sector secure securing security securityadmin see seed seeded seeds seek seeking +seeks seem seems seen sees segment segments seldom select selectable selected +selecting selection selectivity selector selects self sell selling semantic +semantics semicolon semicolons semmle send sending sends sense sensitive +sensitivity sent sentence sentinel sep sepang separate separated separately +separates separating separator separators sept september seq seque sequence +sequences sequential sequentially sequoia sergey sergi serial serializable +serialization serialize serialized serializer serializes serializing series serif +serious serve server servername servers serves service services servicing servlet +servlets sesar session sessions set setlocal sets setter setters setting settings +settlement setup several severe severity sftp sgtatham sha shadow shall shallow +shao shape shard sharded sharding shards share shared shares sharing sharp she +sheet shell shellbook shift shifted shipping short shortcut shortdesc shortened +shorter shortest shortlist shortly should shouldn show showed showing shown shows +shrink shrinking shrinks shuffle shut shutdown shutting shy sibling sid side +sides sig sigma sigmaf sigmetrics sign signal signature signatures signed signers +significant significantly signs signsoft signum sigurdsson silently sill silly +sim similar similarity simon simple simpler simplest simplicity simplified +simplifies simplify simply simulate simulated simulates simulation simulator +simultaneously sin since sine single singleton singular sinh sip sipush sir site +sites situation situations six sixty size sized sizeof sizes sizing skeletons ski +skiing skill skip skipped skipping skips sky slash slashdot slashes slave sleep +sleeping sleeps slept slice sliced slight slightly slist slot slots slovensky +slow slowed slower slowest slowing slowly slows small smalldatetime smaller +smallest smallint smart smith smol smtp smtps smuggled snake snapshot snapshots snipped +snippet snippets soap social socket sockets soerensen soffice soft software sold +sole solely solid solo solution solutions solve solved solves solving some +somebody somehow someone something sometime sometimes somewhat somewhere song +soon sophisticated sormula sorry sort sorted sorting sorts sound soundex sounds +source sourceforge sourcepath sources space spaces spacing spades span spans +spantext sparql sparse spatial spawn spec special specialized specially specific +specification specified specifies specify specifying specs speed speeds speedup +spell spellcheck spellchecker spelled spelling spends spent sphere spi spiced +spin spliced split splits splitting sponsored spot spots spr spread spring +springframework springfuse sql sqlexpress sqli sqlite sqlj sqlnulls sqlserver +sqlstate sqlxml sqrt square squill squirrel src srcs srid ssd ssl stability +stabilize stable stack stackable stacked stage stages stamp standalone standard +standardized standards standby standing stands star staring start started starter +starting starts startup starvation starves stat state stated statement statements +states static stating station statistic statistical statisticlog statistics stats +status statute statutory stay stays std stdc stddev stddevp stderr stdint stdio +stdlib stdout stealing stels step stephane steps steve steven steward sticc still +stmt stock stolen stop stoppage stopped stopper stopping stops stopwatch stor +storage storages store stored storepass stores storing story str straight +straightforward strange strategy stream streamed streaming streams street +strength stress stretch strict strictfp strictly string stringdecode stringencode +strings stringtoutf strip stroke strong strongly stru struct structural structure +structured structures stub stuck student studios study stuff style styles +stylesheet stylesheets sub subclass subclasses subclipse subdirectories sube +subject sublicense submit submitted submitting subpackages subqueries subquery +subscribe subselect subsequent subsequently subset substance substitute +substituted substitution substr substring substrings substructure subsystem +subtract subtracted subtracting subtraction subversion succeed succeeded succeeds +success successful successfully succession such suddenly sudo sue sufficient +sufficiently suffix sugar suggest suggested suggestion suggestions suit suitable +suite suites sullivan sum summand summary summer summertime sums sun sunday sup supe +super superclass superfluous superinterfaces superior superseded supertable +superuser supplemental supplied supplier supply support supported supporter +supporters supporting supports supposed suppress sure surname surrogate +surrogates surrounded survive survives susan suse suspended suspicious suxxess +sval svg svn swap swapped sweden sweep swing swiss switch switched switches switching +switchstatements switzerland swprintf swt sxd syb sybase syear sylvain symbol +symbolic symbols symmetric sync syncable synced synchronization synchronize +synchronized synchronizers synchronizes synchronizing synchronous synchronously +synonym syntax synth synthetic syrup sys sysadmin syscat syscs sysdate sysdba +sysdummy sysfun sysibm sysibmadm sysibminternal sysibmts sysman sysproc syspublic +sysstat syst system systems systime systimestamp systools syswow syyyy szlig tab +tablance table tables tablespaces tableswitch tabs tabulation tag tags tagtraum +tahoma tail tailored takanori take taken takes taking talking tan tangent tanh +tanuki tape tapes tapping tar target targeted targets task taskkill tasks tau tax +taxon tbalance tbody tcp tea team teams tear teatime technical technologies +technology tell teller tellers telling tells temp template temple temporaries +temporarily temporary ten tera teradata term terminal terminate terminated +terminates terminating termination terms terrence terribly tertiary test testa +testb tested tester testid testing testlob tests testtab text textarea textarray +textbase textbook texts textual thai thailand than thanks that the their theis +them themselves then theoretical theoretically theory there thereafter therefore +thereof these theta thetasym they thimel thin thing things think thinks thinsp +third this thomas thompson thorn those though thought thousand thousands thread +threaded threading threads three threshold threw throttle throttled throttling +through throughput throw throwable throwing thrown throws thumbs thun thursday +thus tick ticker tid tigers tilde time timed timely timeout timer times timestamp +timestampadd timestampdiff timestamps timezone timezones timing tiny tinyblob +tinyint tinytext tip tips tired tis title titled titles tls tme tmendrscan tmfail +tmjoin tmnoflags tmonephase tmp tmpdir tmresume tmstartrscan tmsuccess tmsuspend +tmueller tmzone toast toc today todescato todo tofu together toggle token tokenize +tokenizer tokens tolerant tom tomas tomcat tong too took tool toolbar toolkit +tools toolset top topic topics toplink topology tort total totals touch toward +tpc trace traces tracing track tracked tracker tracking tracks trade trademark +trademarks traditional traditionally trailing train trans transact transaction +transactional transactionally transactions transfer transferred transferring +transform transformation transient transiently transition transitional transitioned +transitions translatable translate translated translates translating translation +translations translator transmission transmitted transparent transport travel +traversal traverse traversing tray tread treat treated treatment trede tree trees +trial trick tricky tried tries trig trigger triggered triggers trigonometric trim +trimmed trims trip trivial trouble true trunc truncate truncated truncates +truncating truncation trunk trust trusted truth trx try trying tsi tsmsys tsv tucc +tucker tuesday tune tunes tuning turkel turkish turn turned turns tutorial tweak +tweaking tweet twelve twice twitter two txt tymczak type typed typeof types typesafe +typical typically typing typlen typname typo typos typtypmod tzd tzh tzm tzr +uacute uarr ubuntu ucase ucb ucirc ucs udt udts uffff ugly ugrave uid uint ujint +ujlong ulimit ultimate uml umlaut umr unable unaligned unary unavailability unbound +uncached uncaught unchanged unchecked uncle unclear unclosed uncommitted uncommon +uncompressed undefined under underflow undergraduate underline underlined +underlying underneath underscore understand understanding understands understood +undetected undo undocumented undone unencrypted unenforceable unescape unexpected +unfortunately unhandled uni unicode unified uniform uniformly unimplemented +unindexed uninitialized uninstall uninteresting uninterpreted uninterruptible +union unique uniquely uniqueness uniques unit united units universal universally +unix unixtime unknown unless unlike unlikely unlimited unlink unlinked unload unloaded +unloading unloads unlock unlocked unlocking unlocks unmaintained unmappable +unmapped unmodified unmounted unnamed unnecessarily unnecessary unneeded uno unoccupied +unofficial unordered unpredictable unquoted unrecognized unrecoverable +unreferenced unregister unregisters unrelated unreleased unsafe unsaved unscaled +unset unsigned unsorted unspecified unstable unsuccessful unsupported +unsynchronized untested until untranslated unusable unused unusual unvisited +unwrap unwrapped unwritten unzip upc upd updatable update updated updates +updating upgrade upgraded upgrader upgrades upgrading upload uploaded upon upper +uppercase uppercased uppermost ups upsert upset upside upsih upsilon urgent urgently +uri url urls usa usable usage usd use used useful user userbyid username userpwd +users uses using usr usual usually utc ute utf util utilities utility utilization +utilize utilizes utils uui uuid uuml vacancy vacuum vacuuming val valid validate +validated validates validating validation validities validity validly valign +valuable value values van var varargs varbinary varchar variable variables +variance variant variants varies various varp varray vars vary varying vasilakis +vast vector vectors vendor venue verbatim verbose verification verified verifier +verifies verify verifying versa version versioned versioning versions versus +vertica vertical very verysmallint veto via vice victor videos view viewed viewer +viewport views vii viii violate violated violation violations virtual virus +viruses visible visibility vision visit visited visitor visitors vista visual visualize +visualizer vividsolutions vladykin void volatile volume volunteer volunteers von +vpda vulnerabilities vulnerability wait waited waiting waits waives wake wakes +walk walker want wants war warehouse warehouses warn warned warning warnings +warranties warranty was washington wasn watch watchdog watcher water watermark +watson way wayback wayne ways wchar wcslen weak weakreference weary web webapp +webclient webkit weblica weblog website wednesday week weeks wegorkiewicz weierp +weight weights weightx weighty weird welcome welford well welt wend were +werkzeugkasten what whatever when whenever where wherever whether which while +whirlpool white whitespace who whole whom whose why wide widely widening wider +widows width wiki wikipedia wildam wildcard wildcards will william willing win +window windowed windows winexe winterthur wire wireless with withdraw withdrawn +within without wizard wkb wkt wlam wmsys wojtek wondering wood word wordid words +work workaround workarounds workbench worked worker workers workflow workflows +workgroup working works world worldwide worry worst would wrap wrapped wrapper +wrapping wraps writable write writer writers writes writing written wrong wrote +xacon xadb xads xaer xaltjvm xares xatest xbi xbl xbo xby xcl xcopy xda xdb xdo +xfc xhtml xiaodong xid xids xii xive xlint xmkd xml xmlattr xmlcdata xmlcomment +xmlhttp xmlnode xmlns xmlstartdoc xmltext xmx xor xrmd xrunhprof xsi xsm xtea xti +xtime xts xvi xyz yacute year years yen yes yet yield yielding yjp ymd york you +young younger youngest your yourself youtube ytd yuml yyfxyy yyyymmdd zeile zen +zepfred zero zeroes zeros zeta zhang zip ziv zloty zone zones zurich zwj zwnj +recompiled incl reveal designators templates invoked candidate handshake altered +accomplished permanent clarify weaken excl alternatively dita imjcc optimizes +dotall multiline xdoclint scenario locationtech -cron ide pageview track gat analytics tracker implicitly ignores pro providers -tmpdir mini owns accordingly snippets receiving rainbow pools groupware biz -greenwich sqli informix pointbase fbj pervasive jtds ifx syb mimer sybase -frontbase intersys maxwidth belonging learning mono typical toggle winexe -hider ikvmc invert recycle filtering lesser recycled assertion runner teradata -christian lgpl elapsed ncr disposed heureuse tera years retrieves unlocked -selecting vista everywhere locations zones fragment svg thought constructors -doubles validating matched established accu accum stats resetting parallel -delays guess downloaded jars advantages interrupt javen sourcepath unneeded -compressibility ext crc enumerate components mkdir jant downloading mismatch -timebomb thinks technotes chmod overloading javase flux solves fastest -quickstarter bridge bpm trust guides improvements customizing easiest -workflow seque npl netstat ano spellcheck eplfaq opensource zdnet burnette -regard epl huge chao derek nls lawsuits counterclaim participate assurances -cooperate indemnified disclaims practicable cease solely receives partners -exchange agrees inconsistency customarily reproduction arose noncompliance -serve contributions publicly facilitate implication jury acceptance revisions -accompanying bring unavailability iii appropriateness originate laws stated -although sole accordance filed originates responsibilities alleged defend -material hereto equipment copyrighted enforceability excludes licensees -estoppel manner reserves defense complies suitable identify infringe -originator brought contribution effectively assumes waives conjunction -informs negotiations collectively omissions trial nor qualify steward neither -worldwide everyone additions expense lawsuit checksums jazoon flashback -dieguez dfile mvn dversion dgroup dpackaging dartifact durl dpom pom -subpackages slowed deactivate throttled noindex expired arizona export -intentional knowing jcl plug facade deployment logback confusion visited -pickle associate subtraction negation multiplication visitors sharp connector -derbynet ado happy derbyclient unspecified federated sysadmin lengths doing -gives clunky cooperative paged conflicts ontology freely regards standards -placing refer informational unlocks memo unlimited unmounted keeping hints -hides heterogeneous construction rutema prepending rowscn overrides jconsole -mbean explicit directs leaves printing holds covariant redirector piped alarm -indicate timezone unmounting beep ignoring gary tong extending respective -overloaded decide clash involve verification dining recursively originating -philosophers technologies modeling federation enterprise semantic deductive -fusion legacy decoded commented trimmed reaches indicating marks scaled tells -monitor benefit performing conditional significant arithmetic instrumented -doclets extremes instructions printable skips sava sources cms bytecode cfml -cold compiles markup spellchecker interleaved poormans programmed swt railo -clobs resizes precisions scales stopwatch shortly puts captured decremented -him uninterpreted entering composed patched rowlock +stefan jobs defunct spain jconn decades chrono courtesy dtl xdg avp lifecycle +experiment throughout staging booth akkuzin observed maxinplace russian +ema sch bodewig forbid compat calc midway prohibit measuring playing kostya +pstmt rosenbaum pretending inverse safer lived blo sane othe multiplicative +introduces bcd nave picking templating clamp temporal berlin intermittently +pstat props bitget travis -tourtiere okra genen cajun compound poultry matjeshering steeleye guarana -elsewhere dried chowder raclette nord chef kobe gumbaer alfki chocolade -perth mascarpone dairy inlagd gruene vegie category sosse sucre tunnbroed -alice rodney flotemysost uncle anton chinois cranberry pavlova courdavault -rogede sauerkraut maxilaku manjimup thueringer ravioli manchego cereals fiery -queso seasoning chartreuse infos giovanni pastora pierrot valkoinen anatr sill -gnocchi kaviar louisiana konbu gorgonzola outback ipoh wimmers nonna chocolate -cabrales sir grains ikura mishi sild linq erable cust jaqu tarte rostbratwurst -tofu nuss nougat klosterbier fried singaporean roessle lakkalikoeoeri malacca -geitost scones aniseed gula mutton grandma teatime carnarvon chai -gudbrandsdalsost shouyu ost rhoenbraeu beverages blaye confections gute -schoggi stout syrup longbreads seafood condiments gummibaerchen koeken pears -cote spiced mee knaeckebroed verte jack lager schokolade longlife pepper -semmelknoedel spegesild frankfurter laughing boysenberry england gustaf gravad -creme meat fabioli sirop mozzarella niku fantastica pate telino angelo lax -filo gumbo pasties marmalade ale suklaa scottish zaanse hokkien sauce -crab northwoods escargots organic sasquatch bourgogne clam camembert tigers -chang lumberjack roed biscuits +toto anatolii callables spurious disregard uniqueidentifier promoted oom doesnt +optimisations roughly contractid succeeding tran fixme iters ovain orgid chosen +arbonaut exposing obscure determined turkey buildings indexhints acct +choosing optimise arte preparator katzyn bla jenkins tot artes pgserver npe +suffers mni significance vise identiy vitalus aka ilike uppercasing reentrant +aff ignite warm upstream producing sfu jit smtm affinity stashed tbl +stumc numbered -usable weblica jena preserved instrumentation inspect jayaprakash ashwin -varargs automate couldn unclear eat dtp disks tablespaces great reproduces -hhh overridden sqle propogation buildid tsv monospace microarrays pathogen -geocoder geocoding longitude estimating microarray latitude magnolia pfgrc -refill analyzers patches popular came growing indication arabic graphic toc -numbering goto outline makensis macro hyperlink dispatch setlocal wend -widows msgbox designer styles families uno soffice orphans stan ucb rem -pdfurl upate pagebreak ren echo atlassian buggy submitted xcopy invention -harbor generics pojo annotations ecl subclipse jmx bean plugins team cha emma -nullsoft annotation cover scriptable guidelines consider batis coding -anlytc art orafusion ery ideas jequel indices quaere dsl accumulated vary -causing nopack water onto resolve ontoprise treatment sparql rdfs piman zen -owl rdf recommendations axiom fabric broker osoa epictetus replacements -brittain wasn november unixtime ddd jason formatter psqlodbc onmousemove -monitoring razuna asset drag pekar devx dmitry fragments onmouseup nav -onmousedown olap valign army blitz backgammon knife abbreviate berger dhuse -strictly greg germany abbreviates frontends cleversafe payload cloneable -scripting jaks reconnected serverlist safes somewhere anzo war contacts helpful -implies looping cataloguing mapper frees javaw geographic borges grass -somehow marcio groove roy gis matt targeted brazil dig opt deregister -classname recaptcha unload unloaded unloads activator statistic hence rathsack -reflects doy bloom minimal gmx conserve panic serious robert thursday -wednesday saturday friday tuesday sharing opposite fassi dario clauses -factorial blogspot displaying thedevcloud dayof safety chrome favorite thumbs -localization olivier hprof jps jstack qua processor casting brasilia leap -daylight vision declarative shape formula webapp catalina study impact -statisticlog activeobjects manske redeployment michael kaspersky datatext -bleyl donald conservative offsets diabetes ansorg allocating osmond gluco -joachim mysqladmin sudo mysqld indicator wire ring relates expedites -approximated approximation dvan dsn dobysoft ebean syswow tmueller dbbench -connecturl problematic transformation lazy querydsl squill empire liq fle -xive evolving mssqlserver eric respond faulhaber fixing northern lying -federal santa america county clara courts california york venue away stages -titles headers grew orchestration social razor finder ranging friend intervals -bot jot delicious rife appenders circles spelling cash sky ecm nuxeo poland -opengeospatial sfs symmetric obsolete failing parenthesis unloading refreshed -grails reloading slightly accepting deploying conflicting recovered counters -versus extracts squirrel misdirected rle looking arc addressed european -soerensen favicon glass restarts flexive fish resulted vpda mvc kotek jan -consistently springfuse grep signatures wrote symbolic parents caches readers -animate scaladoc models disadvantages vladykin sergi trims requesting -handing bonita placed euros embeds reliability singular unregister quotas -overall httpdocs tigris eclemma separates underscore yajsw she her truncating -relocating smtps smtp osde joist catching guesses delimiters shortlist sheet -rowspan cheat partitioning datepart dreamsource toussi locates fred -longnvarchar collate localdb nan bootclasspath bcp retrotranslator iterable -ops jopr googlegroups fletcher prefer djava expires fffe polish articles -attachment transiently cleanup dbsnmp olapsys wmsys tsmsys outln ctxsys mddata -ordsys ordplugins mgmt dmsys exfsys mdsys sysman informtn textarray tmzone cdo -emf decompile streamed setmaxlengthinplacelob setcompresslob compressing -compressible subclass ints seeks kilobytes capitalized sqlj psm sigmod acm -shrinking bsdiff toward markets hispanic rad dinamica treat contributing -fraction splits uni stackable snapshots fyodor kupolov denmark greece christos -oort committer vasilakis docletpath aastore dneg drem bipush lxor lshl fsub -ldiv astore fconst newarray icmple lsub irem iastore lookupswitch athrow -bastore icmpeq dconst dmul lload dup ddiv invokespecial fdiv fstore checkcast -sipush fcmpl icmpge freturn imul putfield dsub ixor nop sastore ldc -invokeinterface lcmp castore lastore iflt ifnonnull invokestatic ljava -arraylength ifge baload ineg caload putstatic invokevirtual ior ifne icmpgt -lneg lconst dadd iinc areturn ishr iload aconst iadd laload dcmpl fmul iconst -dload getfield acmpeq dastore fload icmplt fastore frem idiv ireturn fcmpg -iaload getstatic ifle iand decompiling isub ifgt dreturn iushr aaload aload -lrem lstore monitorexit lmul monitorenter fadd interpreting ishl istore dcmpg -daload dstore saload anewarray tableswitch lushr ladd lshr lreturn acmpne -locals multianewarray icmpne fneg faload ifeq decompiler zeroes forgot -modern slight boost characteristics significantly gae vfs centrally ten -approach risky getters suxxess gmb delegate delegating delegates collisions -linkage superfluous disallow scoop moebius inputs copilot dmoebius leod jenkov -jakob poker docware peter unstable measurable scramble reissued recreation -scrambling distinguish official unofficial distinguishable overwrites lastval -notranslate vince bonfanti alphabetically sysdummy sysibm activation -deactivation concatenating reproducing black railroads railroad radius moz -imageio argb bilinear rendering stroke interpolation flip diagrams draw -delim overlap subselect bitwise dclassifier dgenerate compacts chartrand phane -sval cement slave ulimit eclipselink glenn kidd rapidshare score relevance -autovacuum vacuuming endlessly talking evicted splitting unbound declaring -selector descendant isdescendantnode issamenode ischildnode localname -weakreference ancestor junctions wake fills rail sleeps turns grammars straight -answers attachments emails clipboard prioritize tips urgently standby -checklists serves gbif biodiversity wakes taxon ratio ended ipt auckland -galapagos pacific pastebin mystic posting mysticpaste reject prof tick freeing -sweden abbreviated xmx trede googlecode gustav standing hashes -decompressed expansion ziv abbreviated augments omitted gain -subtracted maxed logical lempel increases sibling impersonate proper remembers -moon centric adeptia workflows generalized bpchar binaries incremental poor -slowly prefs precedence mat verbose nonce complicate speedup replay -profiles superinterfaces conventions brace indentations increments -explicitconstructorcall switchstatements members parens alignment declarations -jdt continuation codegen parenthesized tabulation ellipsis imple inits guardian -postfix iconified deiconified deactivated activated worker frequent utilities -workers appender recovers balanced serializing breaking austria wildam -census genealogy scapegoat gov compacted migrating dies typtypmod latch await -counting dtest fallback infix places formal extern destination stdout memmove -stdio printf jchar sizeof stdlib jbyte jint uint ujlong typedef jdouble stdint -jfloat wchar hotspot jvoid std ujint jlong vars jboolean calloc argc strlen -equivalent synchronizes sullivan surname doe stepan getstart rojas snprintf -pulakka pagination collide visual aejaks simulation joonas finland minneapolis -determine timestampdiff harmony doap shortdesc wireless iceland sigurdsson -darri chunks bjorn chunked watson regardless usefulinc represented pushd -recorder grajciar recording slovensky uninitialized arriving lubomir unchanged -erik dick calculations lutin cite bom evaluating telegard excel bbs deprecation -importing cumulative fired convenient sums judged anybody vacuum encountered -corresponds cnf informatique ilm boundaries shao crossed retroweaver usr pico -pengxiang china timestampadd picked releasing autoboxing conversions -pagestore addon defaults introduced customized histogram transact locker activemq -iml unified regclass netbeans geqo servername creator eclipsecs cacheable -stacked unable seeking underflow violations evaluates repeats minimalistic -licensing appreciate textbook diligence undergraduate afaik mathematics chris -arrangements bugfix premain longs majority crashing behaving inst inventor -javaagent park accurately adopt consists night equally enhance enhanced -skiing honor marketing sleeping dlucene timezones shifted analyzed insists -train joining bilingual existed extremely fog bordercolor overlapping -unlocking webkit dalvik recorded defrag marschall helping victor philippe -pyankov enctype multipart boundary mistake enlarge demonstrates aggregating -bypassing khtml doubled inlined defragmented registers leftover ugly artificially -presentation defragment queried activities knut tailored dhis statistical norway -mailto dong adconion edong twitter consortium ogc geo geospatial flexibility -mbeans unregisters subtracting multiplying dividing contended bindings -projection managing observer misuse windowed discriminator abort familiar rice -reachable mind develop disposition extras arithmetics readwrite syncable -requeried requery closable curr outdated market accurate borg theis welford -ooq exceeded eye hannibal stels garringer czech prevention propagate -compromise portion nodded rapping door stealing napping artifacts lore -pondered curious muttered quaint chamber nearly unwrapped flows weary volume -tapping gently dreary wrapping tis moger udt chafik outfile geom scalable -highly cloud infrastructure elton wayne explanation berne iterates -denydatareader securityadmin backupoperator ddladmin accessadmin -denydatawriter datareader datawriter catlog -raise inspector maintain understanding annotated loose ensures inherits -trouble designed decouple reflectively skeletons retention reflective validates -clip upgrades demonstrate inspectors -exceed identities differentiate inherited tracks strip suggestions -registration sanity improperly annotate inheritance composite inspected -hurt imposes marshal policy upgrader configurations dark varray xlint executor -completion inactivity exports maintains backside schwietzke rene rectangular grandin noel -sine cosine tangent cotangent trigonometric hyperbolic lte abe alphabetical killer -diagnostics checkout somewhat icu delegation classifications karlsson applet -litailang springsource eccn springframework spr growth teams gigabytes europe -mcleod decade experience travel willing scjp himself routinely tsi retrieving -multiplied ross judson closeable watcher enqueued referent refs watch tracked -preserving disallowed restrictive dst regions kiritimati flow wider nanosecond -march april cutover julian transitions enderbury kwajalein viewport onscroll -umlaut reconstruct inclusive proxies verifier slept superclasses -verifying predicate signers verifies handlers kiwi callbacks traditionally -unusual apfel caught overcareful tricky nodep junit eventually concrete -enhancer banana nit cglib challenging intercepted banane assertthrows -objenesis prepend detecting overridable eater forgetting tear -fork tester jaspa redirection johnny brings gone jooq iciql offline pdo mappings largely -pst patadia summertime jalpesh scheme compilable ski takanori dsts kawashima -kokoci seldom jaros ciphers srcs invectorate noah nfontes fontes recoding -minecraft videos youtube dataflyer bukkit alessio adamo jacopo angel leon frost -deserializing eckenfelder daniel serodio dirname semicolons whose stretch -stabilize succeeded widening optimise deprecate increasing leaning rotate git -hub rewind spawn shimizu fumiyuki nelson github laird rollover millions -ljnelson edugility sormula pushed backslashes slashes lukas batched typesafe -bundled jacob terrence policies periodic eases eder corporate regulatory -burden isnull periodically suse optimisation krenger qvortrup huang jmxremote -clusterable shortcut quota wcslen flyway cacao tea memcpy someone iced -korea cpp raspberry inttypes korean hmac swprintf ptr agile rawtypes belgium -jia laurent midori stdc macros clocks xaltjvm teruo dylan debian counted -serializes semantics advances severe defensive maintaining collision -authenticating song lir evict edge adjusts recency lirs prune heads sigmetrics -resident guard hir jiang resistance zhang xiaodong -zurich bern biel lugano geneva gallen lausanne chur basel winterthur -bellinzona thun lucerne bienne visualize modifies -pasted deliberate unsaved invented earliest expose pruning revert -derive bounding greatly extreme terribly iterating pruned percentage -apart render cloned costly antialiasing antialias quercus rect mvr retina -sonatype deployed uffff bhat prashant doug lea retained inefficient segments -segment supplemental adjust evenly pick diehard mixes avalanche candidates -edition voytovych intersecting cow absent hickey fluid chen qian liberal -richard viktor structured continuous inherent kyoto contends abba optimised -rollbacks overtaking trivial mutation pitest rectangle uncommon deltas -purely intersection obviously cabinet berkeley configurable modular locality -subsystem persisting pit jdbm bigserial rtree mutationtest serializer feff mvstore -versioning sector survives goes ssd ambiguity sizing perspective jumps -incompressible distinguished factories throughput vectors tripodi cracking -brown tweak pbkdf sharding ieee galois otterstrom sharded hruda argaul gaul -simo unpredictable overtakes conditionally decreases warned coupled spin -unsynchronized reality cores effort slice addleman koskela ville blocking seen -isam charindex removal getdate jesse fake covers covering cheaper adjacent spot -transition anthony goubard netherlands versioned orderable customizer cachable -customizers retains scalability assuming gili cancelled departments juerg -franklin indicated offending unimplemented executors dumping variants -presence spiess azeckoski aaron cowwoc decompiles canceling vividsolutions -quadtree envelope geometry polygon typname intersects wkt intersects wkb -coordinate geometric rates cope attempting sphere hyde clinton taskkill -mgcodeact cumer reach notably computation varies smuggled stderr sees messes -nico devel nicolas linestring atelier fortin cnrs tweet geospatialnews bundles -srid roads overlaps anyhow poly manifested cardinal invalidated boosting rogue -temporaries encounters editors navigable accumulate underneath -turned irrespective reflected gathering badly overwriting -persists forwarding periods discussion whatever revisit decision -detrimental dedicated kaiser perhaps chromium shortened -layers waited descent spliced abstracts planning interest among sliced -lives pauses allocates kicks introduction straightforward getenv -ordinate tweaking fetching rfe yates cookie btrfs cookies -nocycle nomaxvalue nominvalue cycling proceed prospective exhausted contingent -validities hang degenerates freezes emulation gredler cemo koc blanked -reverting blanked jump capitalization capitalize symbol symbols verbatim -closest resultant savings designator numeral numerals lowercased uppercased -casing epoch century abbreviation scientific circuit emulates blanks substrings -thai tme jean cycles surrogate submitting putting purged clue bottlenecks conforms -sameorigin nobuffer francois hikari duske phromros thailand kritchai mendonca -maginatics jdbclint lint lsm unmappable adams douglas definer invoker -fmrn fmxxx fmday fml syyyy tzd nov iyy iyyy fmc fmb fmxx tzr btc yyfxyy scc syear -overwrote though randomize readability datagram rsync mongodb divides crypto -predicted prediction wojtek hops jurczyk cbtree predict vast assumption upside -adjusted lastly sgtatham cleaning gillet prevented -angus bernd chatellier macdonald eckenfels granting moreover exponential transferring -dedup megabyte breadth traversal affine tucc jentsch yyyymmdd vertica graf -mutate shard shards succession recipients provisionally contributor statutory -inaccuracies detector logos launcher rewrite monitors equivalents trademarks -reinstated uninteresting dead defendant doctrines beat factual fair suspended -exploit noise ongoing disclaimers shrinks remedy party desirable timely construe -deque synchronizers affero kevent nikolaj hohmuth grajcar jens fogh hostnames -operate resized jni yjp ownable starvation reaper biased introduce epoll hangs -compaction aggressive powerful traversing pietrzak michi karl rewriting consequences -linearly patching perfect hole sip enwiki flooding uniformly recursions happening -permanently nucleus forbidden student trusted poodle agentlib -jech ladislav cognitect sergey thompson evdokimov arykov mfulton -dimitrijs fedotovs kingdom manley xso latvia ontwikkeling reeve -extendable republic uniquely datasources accidentally recursing respecting -young sweep clearer accounting disappeared donor oome ken jorissen nesterov -degradation failures fashion disjunctive mentioned conjunctive misses broke -authenticate orphaned registrations topology planner -zepfred frederico thimel arnaud manipulating strongly lots aquiles younger needing +reopening cloudera hive clustername whomooz unittest anymore snowflakecomputing unpadded endpoint redshift backingtable +trimming hadoop azure resolves snowflake testsynonym plays charsettable synonyms nonexisting impala codepage recognize +dbm forwarded amazon stmnt excessive testvalue + +rowids searchers tcnt enforcing timeanddate nullifies determines believe giving +vectorwise preparation corrupting cubrid diffing unrestricted cleanups warns +rowspan specifically unoptimized stand emphasize cascaded exasol minimize rnum figure +emptying goal gathers multithread amend raised iter gathered gather especially requiring +collaboration thank essentially bunch vmlens subroutines nulled + +ndx quoss isn nonquoted pippin variation pierre allowable granny liberty fkey kervin veg banapple unconnected +alphanumeric england acdef landry arun mederp detached lyderic imperator morocco sumx websphere fruit +joaquim overides altertable novalidate udomain managed rewritten unquote identifer jake innocuous golay +bellotti clemens donators domainusername +veryveryveryveryveryverylongveryveryveryveryveryverylongveryveryveryveryveryverylong namer veryveryveryveryveryverylongve +chittanoor carrot + +contextual unknowns enquote respectively sessionid reconnection selfreferential bbddbb instant subprotocol ddbbbb +zzbbzz cldr booleans maria enquotes mtc cbuf checksummed nreturn despite bbzz readlimit retries cceecc reconnects +unconditionally coco aren eecccc decimals charsets zzbb lsb msb usecount outdir endian misleading precompiled +assorted reimplemented hangups confirmation predefined + +mdy destfile hclf forbids spellchecking selfdestruct expects accident jacocoagent cli historic mitigate +jacoco xdata invokes sourcefiles classfiles duplication crypto stacktraces prt directions handled overly asm hardcoded +interpolated thead + +die weekdiff osx subprocess dow proleptic microsecond microseconds divisible cmp denormalized suppressed saturated mcs +london dfs weekdays intermittent looked msec tstz africa monrovia asia tokyo weekday joi callers multipliers ucn +openoffice organize libre systemtables gmane sea borders announced millennium alex nordlund rarely + +opti excessively + +iterators tech enums incompatibilities loses reimplement readme reorganize milli subdirectory linkplain inspections +geometries sourceschema destschema generatedcolumn alphanumerically usages + +sizable instantiates renders sdt txcommit unhelpful optimiser treats rejects referring untrusted computes vacate inverted +reordered colliding evgenij archaic invocations apostrophe hypothetically testref ryazanov useless completes highlighting tends degrade + +summands minuend subtrahend localtime localtimestamp governs unfinished pressure closure discovered victim seemingly +flaw capture coherent removals silence opentransactions picture tokar mailto andrei dur discarded blocker captures txdm +intentionally authenticator authrealm ventura credentials alessandro validator acquisition vital mariadb preventing +ewkt ewkb informations authzpwd realms mappers jaxb realmname configurationfile unmarshal jaas externals customize +authenticators appname interrogate metatable barrier preliminary staticuser staticpassword unregistered inquiry +ldapexample remoteuser assignments djava validators mock relate mapid tighten +retried helpers unclean missed parsers sax myclass suppose mandatory testxml miao ciao +emptied titlecase ask snk dom xif transformer dbf stx stax xof descriptors +inconsistencies discover eliminated violates tweaks postpone leftovers +tied ties +launched unavailable smallmoney erroneously multiplier newid pan streamline unmap preview unexpectedly presumably +converging smth rng curs casts unmapping unmapper +immediate hhmmss scheduled hhmm prematurely postponed arranges subexpression subexpressions encloses plane caution +minxf maxxf minyf maxyf bminxf bmaxxf bminyf bmaxyf +minxd maxxd minyd maxyd bminxd bmaxxd bminyd bmaxyd +interior envelopes multilinestring multipoint packed exterior normalization awkward determination subgeometries +xym normalizes coord setz xyzm geometrycollection multipolygon mixup rings polygons rejection finite +pointzm pointz pointm dimensionality redefine forum measures +mpg casted pzm mls constrained subtypes complains +ranks rno dro rko precede cume reopens preceding unbounded rightly itr lag maximal tiles tile ntile signify +partitioned tri partitions + +discard enhancements nolock surefire logarithm +qualification opportunity jumping exploited unacceptable vrs duplicated +queryparser tokenized freeze factorings recompilation unenclosed rfe dsync +econd irst bcef ordinality nord unnest +analyst occupation distributive josaph aor engineer sajeewa isuru randil kevin doctor businessman artist ashan +corrupts splitted disruption unintentional octets preconditions predicates subq objectweb insn opcodes +preserves masking holder unboxing avert iae transformed subtle reevaluate exclusions subclause ftbl rgr +presorted inclusion contexts aax mwd percentile cont interpolate mwa hypothetical regproc childed listagg foreground +isodow isoyear psql + +waiters reliably httpsdocs privileged narrow spending swallow locally uncomment builders +setjava lift hyperlinks lazarevn nikita lazarev lvl ispras bias dbff fals tru dfff +recognition spared hacky employing occupancy baos shifts littlejohn pushes scrub existent asterisked projections +omits redefined ensured arrayagg objectagg bmp uabcd prefixed incoherence aggressively smb invalidating filesystems +improper subcondition boxes negates abrupt chooses hindi updater zoned tolerable interference elimination +prepend honored evacuated peeked queued transforms inbounded fragmented unprotected adjustment supposedly alloted +housekeeping trail breadcrumb bets seasoned rewritable rpi eliminating projected reenterant varint races outcomes +sparsely shifting vacated evacuation bullet allocations projected evacuatable pin capable rewritable deficiency +successfull deduplication entrant mvmap sporadic irrelevant interrupts +sit sitting sooner hdr considering encounter compete quickack decrementing exhausting caveat aschoerk circular ident +scr ffffl suspend asap ldt lmt movement ago snapshotting paris phenomena backends quirks pgjdbc jupiter grab folds +umcfo iapi autoloaded derbyshared darkred coral mistyrose lightseagreen unmodifiable posix exc attrs relativize +quotient niomem niomapped obtaining rare occasions oversynchronizing disallows opponent adversarial broader decent tmv +prize secured stateful generification bracketed permissible opaque aside indexable daytime uncomparable reevaluates +pct sliding deliberately sampling grabs saw video keyed carries estimator restrain remainer magnitude placeholder +expandable jira meaningless iterated maliciously crafted cdef attention deserialized hurts absorb bufcnt digests +consumer reread relname proargtypes pronamespace relnamespace heidi proname reltuples collects trigraphs nspname +timetz timestamptz psycopg adbin attrdef objoid attnotnull adnum adrelid objsubid atttypid attname attisdropped pgc +attrelid currtid encodings + +rolconnlimit spcname indisclustered tgconstrname relhasoids rolcreaterole usecreatedb datconfig reltablespace relchecks +amname relhasindex tablespace reltriggers tgconstrrelid groname indrelid relhasrules classoid inhseqno tgargs datdba +indisunique rolinherit datacl rolvaliduntil datname indexprs usename typbasetype rolconfig relkind spcacl prorettype +datallowconn atthasdef dattablespace rolcreatedb inhrelid inhparent attlen rolname rolcanlogin aclitem datlastsysoid +indpred tgfoid indisprimary adsrc spcowner tgnargs typtype typinput rolcatupdate typnamespace tgrelid authid indexrelid +usesuper tgdeferrable rolpassword relam relpages tginitdeferred rolsuper autovacuum typnotnull spclocation cancreate +nsp pgagent pga awoken serverencoding untyped ambiguities tons lhs letting rhs opportunities specifications +usefully pipelining fetches reenable joiner visits dcl avxaaa german fold degree supertype overloads hierarchy locator +conrelid conkey tabrelname refnamespace dsc pred typrelid conname contype confrelid numscans beaver typdelim typelem +jsonb und decfloat attnums oids studio smells pvs mention statically deletable insertable reconstructed similarly +submissions explaining cycled assigns separation aimed ababab quotation cleanly beff cdab efgh +xnor bitnand bitcount nand bitnor bitxnor ulshift urshift rotates rotation rotateleft rotateright leaking incomparable +deref corr asensitive sqlexception avgy avgx lateral rollup syy reseved specifictype classifier sqlcode covar uescape +ptf overlay precedes regr slope sqlerror multiset submultiset inout sxx sxy intercept sqlwarning tablesample preorder +orientation eternal consideration erased fedc npgsql powers fffd uencode ampersand noversion ude considerable intro +entirely skeleton discouraged pearson coefficient squares covariance mytab debuggers fonts glyphs +filestore backstop tie breaker lockable lobtx btx waiter accounted aiobe spf resolvers generators +accidental wbr subtree recognising supplementary happier hasn officially rnrn diff --git a/h2/src/tools/org/h2/build/doc/package.html b/h2/src/tools/org/h2/build/doc/package.html index 7db049330e..339d88ba98 100644 --- a/h2/src/tools/org/h2/build/doc/package.html +++ b/h2/src/tools/org/h2/build/doc/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/build/doclet/Doclet.java b/h2/src/tools/org/h2/build/doclet/Doclet.java deleted file mode 100644 index e301a3d310..0000000000 --- a/h2/src/tools/org/h2/build/doclet/Doclet.java +++ /dev/null @@ -1,587 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.doclet; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Comparator; -import java.util.HashSet; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; -import com.sun.javadoc.ClassDoc; -import com.sun.javadoc.ConstructorDoc; -import com.sun.javadoc.ExecutableMemberDoc; -import com.sun.javadoc.FieldDoc; -import com.sun.javadoc.LanguageVersion; -import com.sun.javadoc.MethodDoc; -import com.sun.javadoc.ParamTag; -import com.sun.javadoc.Parameter; -import com.sun.javadoc.RootDoc; -import com.sun.javadoc.Tag; -import com.sun.javadoc.ThrowsTag; -import com.sun.javadoc.Type; - -/** - * This class is a custom doclet implementation to generate the - * Javadoc for this product. - */ -public class Doclet { - - private static final boolean INTERFACES_ONLY = Boolean - .getBoolean("h2.interfacesOnly"); - private String destDir = System.getProperty("h2.javadocDestDir", - "docs/javadoc"); - private int errorCount; - private final HashSet errors = new HashSet(); - - /** - * This method is called by the javadoc framework and is required for all - * doclets. - * - * @param root the root - * @return true if successful - */ - public static boolean start(RootDoc root) throws IOException { - return new Doclet().startDoc(root); - } - - private boolean startDoc(RootDoc root) throws IOException { - ClassDoc[] classes = root.classes(); - String[][] options = root.options(); - for (String[] op : options) { - if (op[0].equals("destdir")) { - destDir = op[1]; - } - } - for (ClassDoc clazz : classes) { - processClass(clazz); - } - if (errorCount > 0) { - throw new IOException("FAILED: " + errorCount + " errors found"); - } - return true; - } - - private static String getClass(ClassDoc clazz) { - String name = clazz.name(); - if (clazz.qualifiedName().indexOf(".jdbc.") > 0 && name.startsWith("Jdbc")) { - return name.substring(4); - } - return name; - } - - private void processClass(ClassDoc clazz) throws IOException { - String packageName = clazz.containingPackage().name(); - String dir = destDir + "/" + packageName.replace('.', '/'); - (new File(dir)).mkdirs(); - String fileName = dir + "/" + clazz.name() + ".html"; - String className = getClass(clazz); - FileWriter out = new FileWriter(fileName); - PrintWriter writer = new PrintWriter(new BufferedWriter(out)); - writer.println(""); - String language = "en"; - writer.println(""); - writer.println("" + - ""); - writer.println(className); - writer.println("" + - ""); - writer.println(""); - writer.println(""); - writer.println("
  • " + - "" + - "
    " + - "
    "); - writer.println("

    " + className + "

    "); - writer.println(formatText(clazz.commentText()) + "

    "); - - // methods - ConstructorDoc[] constructors = clazz.constructors(); - MethodDoc[] methods = clazz.methods(); - ExecutableMemberDoc[] constructorsMethods = - new ExecutableMemberDoc[constructors.length - + methods.length]; - System.arraycopy(constructors, 0, constructorsMethods, 0, - constructors.length); - System.arraycopy(methods, 0, constructorsMethods, constructors.length, - methods.length); - Arrays.sort(constructorsMethods, new Comparator() { - @Override - public int compare(ExecutableMemberDoc a, ExecutableMemberDoc b) { - // sort static method before non-static methods - if (a.isStatic() != b.isStatic()) { - return a.isStatic() ? -1 : 1; - } - return a.name().compareTo(b.name()); - } - }); -// -// -// Arrays.sort(methods, new Comparator() { -// public int compare(MethodDoc a, MethodDoc b) { -// // sort static method before non-static methods -// if (a.isStatic() != b.isStatic()) { -// return a.isStatic() ? -1 : 1; -// } -// return a.name().compareTo(b.name()); -// } -// }); - ArrayList signatures = new ArrayList(); - boolean hasMethods = false; - int id = 0; - for (int i = 0; i < constructorsMethods.length; i++) { - ExecutableMemberDoc method = constructorsMethods[i]; - String name = method.name(); - if (skipMethod(method)) { - continue; - } - if (!hasMethods) { - writer.println("" + - "" + - ""); - hasMethods = true; - } - String type = getTypeName(method.isStatic(), false, - getReturnType(method)); - writer.println(""); - writer.println(""); - writer.println(""); - writer.println(""); - id++; - } - if (hasMethods) { - writer.println("
    Methods
    " + type + - ""); - Parameter[] params = method.parameters(); - StringBuilder buff = new StringBuilder(); - StringBuilder buffSignature = new StringBuilder(name); - buff.append('('); - for (int j = 0; j < params.length; j++) { - if (j > 0) { - buff.append(", "); - } - buffSignature.append('_'); - Parameter param = params[j]; - boolean isVarArgs = method.isVarArgs() && j == params.length - 1; - String typeName = getTypeName(false, isVarArgs, param.type()); - buff.append(typeName); - buffSignature.append(StringUtils.replaceAll(typeName, "[]", "-")); - buff.append(' '); - buff.append(param.name()); - } - buff.append(')'); - if (isDeprecated(method)) { - name = "" + name + ""; - } - String signature = buffSignature.toString(); - while (signatures.size() < i) { - signatures.add(null); - } - signatures.add(i, signature); - writer.println("" + - name + "" + buff.toString()); - String firstSentence = getFirstSentence(method.firstSentenceTags()); - if (firstSentence != null) { - writer.println("
    " + - formatText(firstSentence) + "
    "); - } - writer.println("
    " + - type + ""); - writeMethodDetails(writer, clazz, method, signature); - writer.println("
    "); - } - - // field overview - FieldDoc[] fields = clazz.fields(); - if (clazz.interfaces().length > 0) { - fields = clazz.interfaces()[0].fields(); - } - Arrays.sort(fields, new Comparator() { - @Override - public int compare(FieldDoc a, FieldDoc b) { - return a.name().compareTo(b.name()); - } - }); - int fieldId = 0; - for (FieldDoc field : fields) { - if (skipField(clazz, field)) { - continue; - } - String name = field.name(); - String text = field.commentText(); - if (text == null || text.trim().length() == 0) { - addError("Undocumented field (" + - getLink(clazz, field.position().line()) + ") " + name); - } - if (text != null && text.startsWith("INTERNAL")) { - continue; - } - if (fieldId == 0) { - writer.println("
    "); - } - String type = getTypeName(true, false, field.type()); - writer.println(""); - fieldId++; - } - if (fieldId > 0) { - writer.println("
    Fields
    " + type + - ""); - String constant = field.constantValueExpression(); - - // add a link (a name) if there is a tag - String link = getFieldLink(text, constant, clazz, name); - writer.print("" + name + ""); - if (constant == null) { - writer.println(); - } else { - writer.println(" = " + constant); - } - writer.println("
    "); - } - - // field details - Arrays.sort(fields, new Comparator() { - @Override - public int compare(FieldDoc a, FieldDoc b) { - String ca = a.constantValueExpression(); - if (ca == null) { - ca = a.name(); - } - String cb = b.constantValueExpression(); - if (cb == null) { - cb = b.name(); - } - return ca.compareTo(cb); - } - }); - for (FieldDoc field : fields) { - writeFieldDetails(writer, clazz, field); - } - - writer.println("
    "); - writer.close(); - out.close(); - } - - private void writeFieldDetails(PrintWriter writer, ClassDoc clazz, - FieldDoc field) { - if (skipField(clazz, field)) { - return; - } - String text = field.commentText(); - if (text.startsWith("INTERNAL")) { - return; - } - String name = field.name(); - String constant = field.constantValueExpression(); - String link = getFieldLink(text, constant, clazz, name); - writer.println("

    " + - name); - if (constant == null) { - writer.println(); - } else { - writer.println(" = " + constant); - } - writer.println("

    "); - writer.println("
    " + formatText(text) + "
    "); - writer.println("
    "); - } - - private void writeMethodDetails(PrintWriter writer, ClassDoc clazz, - ExecutableMemberDoc method, String signature) { - String name = method.name(); - if (skipMethod(method)) { - return; - } - Parameter[] params = method.parameters(); - StatementBuilder buff = new StatementBuilder(); - buff.append('('); - int i = 0; - for (Parameter p : params) { - boolean isVarArgs = method.isVarArgs() && i++ == params.length - 1; - buff.appendExceptFirst(", "); - buff.append(getTypeName(false, isVarArgs, p.type())); - buff.append(' '); - buff.append(p.name()); - } - buff.append(')'); - ClassDoc[] exceptions = method.thrownExceptions(); - if (exceptions.length > 0) { - buff.append(" throws "); - buff.resetCount(); - for (ClassDoc ex : exceptions) { - buff.appendExceptFirst(", "); - buff.append(ex.typeName()); - } - } - if (isDeprecated(method)) { - name = "" + name + ""; - } - writer.println("" + - name + "" + buff.toString()); - boolean hasComment = method.commentText() != null && - method.commentText().trim().length() != 0; - writer.println("
    " + - formatText(method.commentText()) + "
    "); - ParamTag[] paramTags = method.paramTags(); - ThrowsTag[] throwsTags = method.throwsTags(); - boolean hasThrowsTag = throwsTags != null && throwsTags.length > 0; - if (paramTags.length != params.length) { - if (hasComment && !method.commentText().startsWith("[")) { - // [Not supported] and such are not problematic - addError("Undocumented parameter(s) (" + - getLink(clazz, method.position().line()) + ") " + - name + " documented: " + paramTags.length + - " params: "+ params.length); - } - } - for (int j = 0; j < paramTags.length; j++) { - String paramName = paramTags[j].parameterName(); - String comment = paramTags[j].parameterComment(); - if (comment.trim().length() == 0) { - addError("Undocumented parameter (" + - getLink(clazz, method.position().line()) + ") " + - name + " " + paramName); - } - String p = paramName + " - " + comment; - if (j == 0) { - writer.println("
    Parameters:
    "); - } - writer.println("
    " + p + "
    "); - } - Tag[] returnTags = method.tags("return"); - Type returnType = getReturnType(method); - if (returnTags != null && returnTags.length > 0) { - writer.println("
    Returns:
    "); - String returnComment = returnTags[0].text(); - if (returnComment.trim().length() == 0) { - addError("Undocumented return value (" + - getLink(clazz, method.position().line()) + ") " + name); - } - writer.println("
    " + returnComment + "
    "); - } else if (returnType != null && !returnType.toString().equals("void")) { - if (hasComment && !method.commentText().startsWith("[") && - !hasThrowsTag) { - // [Not supported] and such are not problematic - // also not problematic are methods that always throw an - // exception - addError("Undocumented return value (" - + getLink(clazz, method.position().line()) + ") " - + name + " " + getReturnType(method)); - } - } - if (hasThrowsTag) { - writer.println("
    Throws:
    "); - for (ThrowsTag tag : throwsTags) { - String p = tag.exceptionName(); - String c = tag.exceptionComment(); - if (c.length() > 0) { - p += " - " + c; - } - writer.println("
    " + p + "
    "); - } - } - } - - private static String getLink(ClassDoc clazz, int line) { - String c = clazz.name(); - int x = c.lastIndexOf('.'); - if (x >= 0) { - c = c.substring(0, x); - } - return c + ".java:" + line; - } - - private String getFieldLink(String text, String constant, ClassDoc clazz, - String name) { - String link = constant != null ? constant : name.toLowerCase(); - int linkStart = text.indexOf(""); - if (linkStart >= 0) { - int linkEnd = text.indexOf("", linkStart); - link = text.substring(linkStart + "".length(), linkEnd); - if (constant != null && !constant.equals(link)) { - System.out.println("Wrong code tag? " + clazz.name() + "." + - name + - " code: " + link + " constant: " + constant); - errorCount++; - } - } - if (link.startsWith("\"")) { - link = name; - } else if (Character.isDigit(link.charAt(0))) { - link = "c" + link; - } - return link; - } - - private static String formatText(String text) { - if (text == null) { - return text; - } - text = StringUtils.replaceAll(text, "\n ", ""); - return text; - } - - private static boolean skipField(ClassDoc clazz, FieldDoc field) { - if (field.isPrivate() || field.containingClass() != clazz) { - return true; - } - return false; - } - - private boolean skipMethod(ExecutableMemberDoc method) { - ClassDoc clazz = method.containingClass(); - boolean isAbstract = method instanceof MethodDoc - && ((MethodDoc) method).isAbstract(); - boolean isInterface = clazz.isInterface() - || (clazz.isAbstract() && isAbstract); - if (INTERFACES_ONLY && !isInterface) { - return true; - } - String name = method.name(); - if (method.isPrivate() || name.equals("finalize")) { - return true; - } - if (method.isConstructor() - && method.getRawCommentText().trim().length() == 0) { - return true; - } - if (method.getRawCommentText().trim() - .startsWith("@deprecated INTERNAL")) { - return true; - } - String firstSentence = getFirstSentence(method.firstSentenceTags()); - String raw = method.getRawCommentText(); - if (firstSentence != null && firstSentence.startsWith("INTERNAL")) { - return true; - } - if ((firstSentence == null || firstSentence.trim().length() == 0) - && raw.indexOf("{@inheritDoc}") < 0) { - if (!doesOverride(method)) { - boolean setterOrGetter = name.startsWith("set") - && method.parameters().length == 1; - setterOrGetter |= name.startsWith("get") - && method.parameters().length == 0; - Type returnType = getReturnType(method); - setterOrGetter |= name.startsWith("is") - && method.parameters().length == 0 - && returnType != null - && returnType.toString().equals("boolean"); - if (!setterOrGetter) { - addError("Undocumented method " + " (" - + getLink(clazz, method.position().line()) + ") " - + clazz + "." + name + " " + raw); - return true; - } - } - } - return false; - } - - private static Type getReturnType(ExecutableMemberDoc method) { - if (method instanceof MethodDoc) { - MethodDoc m = (MethodDoc) method; - return m.returnType(); - } - return null; - } - - private void addError(String s) { - if (errors.add(s)) { - System.out.println(s); - errorCount++; - } - } - - private boolean doesOverride(ExecutableMemberDoc method) { - if (method.isConstructor()) { - return true; - } - ClassDoc clazz = method.containingClass(); - int parameterCount = method.parameters().length; - return foundMethod(clazz, false, method.name(), parameterCount); - } - - private boolean foundMethod(ClassDoc clazz, boolean include, - String methodName, int parameterCount) { - if (include) { - for (MethodDoc m : clazz.methods()) { - if (m.name().equals(methodName) - && m.parameters().length == parameterCount) { - return true; - } - } - } - for (ClassDoc doc : clazz.interfaces()) { - if (foundMethod(doc, true, methodName, parameterCount)) { - return true; - } - } - clazz = clazz.superclass(); - return clazz != null - && foundMethod(clazz, true, methodName, parameterCount); - } - - private static String getFirstSentence(Tag[] tags) { - String firstSentence = null; - if (tags.length > 0) { - Tag first = tags[0]; - firstSentence = first.text(); - } - return firstSentence; - } - - private static String getTypeName(boolean isStatic, boolean isVarArgs, - Type type) { - if (type == null) { - return ""; - } - String s = type.typeName() + type.dimension(); - if (isVarArgs) { - // remove the last "[]" and add "..." instead - s = s.substring(0, s.length() - 2) + "..."; - } - if (isStatic) { - s = "static " + s; - } - return s; - } - - private static boolean isDeprecated(ExecutableMemberDoc method) { - for (Tag t : method.tags()) { - if (t.kind().equals("@deprecated")) { - return true; - } - } - return false; - } - - /** - * Get the language version this doclet supports. - * - * @return the language version - */ - public static LanguageVersion languageVersion() { - // otherwise, isVarArgs always returns false - // (which sounds like a bug but is a feature :-) - return LanguageVersion.JAVA_1_5; - } - -} diff --git a/h2/src/tools/org/h2/build/doclet/ResourceDoclet.java b/h2/src/tools/org/h2/build/doclet/ResourceDoclet.java deleted file mode 100644 index bbee612cd7..0000000000 --- a/h2/src/tools/org/h2/build/doclet/ResourceDoclet.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.doclet; - -import java.io.IOException; -import org.h2.build.doc.XMLParser; -import org.h2.build.indexer.HtmlConverter; -import org.h2.util.SortedProperties; -import com.sun.javadoc.ClassDoc; -import com.sun.javadoc.Doc; -import com.sun.javadoc.MethodDoc; -import com.sun.javadoc.RootDoc; -import com.sun.javadoc.Tag; - -/** - * This custom doclet generates resources from javadoc comments. - * Only comments that contain 'at resource' are included. - * Only class level and method level comments are supported. - */ -public class ResourceDoclet { - - private String destFile = System.getProperty("h2.javadocResourceFile", - "src/main/org/h2/res/javadoc.properties"); - - private final SortedProperties resources = new SortedProperties(); - - /** - * This method is called by the javadoc framework and is required for all - * doclets. - * - * @param root the root - * @return true if successful - */ - public static boolean start(RootDoc root) throws IOException { - return new ResourceDoclet().startDoc(root); - } - - private boolean startDoc(RootDoc root) throws IOException { - ClassDoc[] classes = root.classes(); - String[][] options = root.options(); - for (String[] op : options) { - if (op[0].equals("dest")) { - destFile = op[1]; - } - } - for (ClassDoc clazz : classes) { - processClass(clazz); - } - resources.store(destFile); - return true; - } - - private void processClass(ClassDoc clazz) { - String packageName = clazz.containingPackage().name(); - String className = clazz.name(); - addResource(packageName + "." + className, clazz); - - for (MethodDoc method : clazz.methods()) { - String name = method.name(); - addResource(packageName + "." + className + "." + name, method); - } - } - - - private void addResource(String key, Doc doc) { - if (!isResource(doc)) { - return; - } - String xhtml = doc.commentText(); - XMLParser p = new XMLParser(xhtml); - StringBuilder buff = new StringBuilder(); - int column = 0; - int firstColumnSize = 0; - boolean inColumn = false; - while (p.hasNext()) { - String s; - switch(p.next()) { - case XMLParser.END_ELEMENT: - s = p.getName(); - if ("p".equals(s) || "tr".equals(s) || "br".equals(s)) { - buff.append('\n'); - } - break; - case XMLParser.START_ELEMENT: - s = p.getName(); - if ("table".equals(s)) { - buff.append('\n'); - } else if ("tr".equals(s)) { - column = 0; - } else if ("td".equals(s)) { - inColumn = true; - column++; - if (column == 2) { - buff.append('\t'); - } - } - break; - case XMLParser.CHARACTERS: - s = HtmlConverter.convertHtmlToString(p.getText().trim()); - if (inColumn && column == 1) { - firstColumnSize = Math.max(s.length(), firstColumnSize); - } - buff.append(s); - break; - } - } - for (int i = 0; i < buff.length(); i++) { - if (buff.charAt(i) == '\t') { - buff.deleteCharAt(i); - int length = i - buff.lastIndexOf("\n", i - 1); - for (int k = length; k < firstColumnSize + 3; k++) { - buff.insert(i, ' '); - } - } - } - String text = buff.toString().trim(); - resources.setProperty(key, text); - } - - private static boolean isResource(Doc doc) { - for (Tag t : doc.tags()) { - if (t.kind().equals("@h2.resource")) { - return true; - } - } - return false; - } - -} diff --git a/h2/src/tools/org/h2/build/doclet/package.html b/h2/src/tools/org/h2/build/doclet/package.html deleted file mode 100644 index e74d46fde5..0000000000 --- a/h2/src/tools/org/h2/build/doclet/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -A Javadoc doclet to build nicer and smaller API Javadoc HTML files. - -

    \ No newline at end of file diff --git a/h2/src/tools/org/h2/build/i18n/PrepareTranslation.java b/h2/src/tools/org/h2/build/i18n/PrepareTranslation.java deleted file mode 100644 index 7a0512a29c..0000000000 --- a/h2/src/tools/org/h2/build/i18n/PrepareTranslation.java +++ /dev/null @@ -1,542 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.i18n; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Properties; -import java.util.Stack; -import org.h2.build.doc.XMLParser; -import org.h2.server.web.PageParser; -import org.h2.util.IOUtils; -import org.h2.util.New; -import org.h2.util.SortedProperties; -import org.h2.util.StringUtils; - -/** - * This class updates the translation source code files by parsing - * the HTML documentation. It also generates the translated HTML - * documentation. - */ -public class PrepareTranslation { - private static final String MAIN_LANGUAGE = "en"; - private static final String[] EXCLUDE = { "datatypes.html", - "functions.html", "grammar.html" }; - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws Exception { - String baseDir = "src/docsrc/textbase"; - prepare(baseDir, "src/main/org/h2/res", true); - prepare(baseDir, "src/main/org/h2/server/web/res", true); - - // convert the txt files to properties files - PropertiesToUTF8.textUTF8ToProperties( - "src/docsrc/text/_docs_de.utf8.txt", - "src/docsrc/text/_docs_de.properties"); - PropertiesToUTF8.textUTF8ToProperties( - "src/docsrc/text/_docs_ja.utf8.txt", - "src/docsrc/text/_docs_ja.properties"); - - // create the .jsp files and extract the text in the main language - extractFromHtml("docs/html", "src/docsrc/text"); - - // add missing translations and create a new baseline - prepare(baseDir, "src/docsrc/text", false); - - // create the translated documentation - buildHtml("src/docsrc/text", "docs/html", "en"); - // buildHtml("src/docsrc/text", "docs/html", "de"); - // buildHtml("src/docsrc/text", "docs/html", "ja"); - - // convert the properties files back to utf8 text files, including the - // main language (to be used as a template) - PropertiesToUTF8.propertiesToTextUTF8( - "src/docsrc/text/_docs_en.properties", - "src/docsrc/text/_docs_en.utf8.txt"); - PropertiesToUTF8.propertiesToTextUTF8( - "src/docsrc/text/_docs_de.properties", - "src/docsrc/text/_docs_de.utf8.txt"); - PropertiesToUTF8.propertiesToTextUTF8( - "src/docsrc/text/_docs_ja.properties", - "src/docsrc/text/_docs_ja.utf8.txt"); - - // delete temporary files - for (File f : new File("src/docsrc/text").listFiles()) { - if (!f.getName().endsWith(".utf8.txt")) { - f.delete(); - } - } - } - - private static void buildHtml(String templateDir, String targetDir, - String language) throws IOException { - File[] list = new File(templateDir).listFiles(); - new File(targetDir).mkdirs(); - // load the main 'translation' - String propName = templateDir + "/_docs_" + MAIN_LANGUAGE - + ".properties"; - Properties prop = load(propName, false); - propName = templateDir + "/_docs_" + language + ".properties"; - if (!(new File(propName)).exists()) { - throw new IOException("Translation not found: " + propName); - } - Properties transProp = load(propName, false); - for (Object k : transProp.keySet()) { - String key = (String) k; - String t = transProp.getProperty(key); - // overload with translations, but not the ones starting with # - if (t.startsWith("##")) { - prop.put(key, t.substring(2)); - } else if (!t.startsWith("#")) { - prop.put(key, t); - } - } - ArrayList fileNames = new ArrayList(); - for (File f : list) { - String name = f.getName(); - if (!name.endsWith(".jsp")) { - continue; - } - // remove '.jsp' - name = name.substring(0, name.length() - 4); - fileNames.add(name); - } - for (File f : list) { - String name = f.getName(); - if (!name.endsWith(".jsp")) { - continue; - } - // remove '.jsp' - name = name.substring(0, name.length() - 4); - String template = IOUtils.readStringAndClose(new FileReader( - templateDir + "/" + name + ".jsp"), -1); - HashMap map = New.hashMap(); - for (Object k : prop.keySet()) { - map.put(k.toString(), prop.get(k)); - } - String html = PageParser.parse(template, map); - html = StringUtils.replaceAll(html, "lang=\"" + MAIN_LANGUAGE - + "\"", "lang=\"" + language + "\""); - for (String n : fileNames) { - if ("frame".equals(n)) { - // don't translate 'frame.html' to 'frame_ja.html', - // otherwise we can't switch back to English - continue; - } - html = StringUtils.replaceAll(html, n + ".html\"", n + "_" - + language + ".html\""); - } - html = StringUtils.replaceAll(html, - "_" + MAIN_LANGUAGE + ".html\"", ".html\""); - String target; - if (language.equals(MAIN_LANGUAGE)) { - target = targetDir + "/" + name + ".html"; - } else { - target = targetDir + "/" + name + "_" + language + ".html"; - } - OutputStream out = new FileOutputStream(target); - OutputStreamWriter writer = new OutputStreamWriter(out, "UTF-8"); - writer.write(html); - writer.close(); - } - } - - private static boolean exclude(String fileName) { - for (String e : EXCLUDE) { - if (fileName.endsWith(e)) { - return true; - } - } - return false; - } - - private static void extractFromHtml(String dir, String target) - throws Exception { - for (File f : new File(dir).listFiles()) { - String name = f.getName(); - if (!name.endsWith(".html")) { - continue; - } - if (exclude(name)) { - continue; - } - // remove '.html' - name = name.substring(0, name.length() - 5); - if (name.indexOf('_') >= 0) { - // ignore translated files - continue; - } - String template = extract(name, f, target); - FileWriter writer = new FileWriter(target + "/" + name + ".jsp"); - writer.write(template); - writer.close(); - } - } - - // private static boolean isText(String s) { - // if (s.length() < 2) { - // return false; - // } - // for (int i = 0; i < s.length(); i++) { - // char c = s.charAt(i); - // if (!Character.isDigit(c) && c != '.' && c != '-' && c != '+') { - // return true; - // } - // } - // return false; - // } - - private static String getSpace(String s, boolean start) { - if (start) { - for (int i = 0; i < s.length(); i++) { - if (!Character.isSpaceChar(s.charAt(i))) { - if (i == 0) { - return ""; - } - return s.substring(0, i); - } - } - return s; - } - for (int i = s.length() - 1; i >= 0; i--) { - if (!Character.isSpaceChar(s.charAt(i))) { - if (i == s.length() - 1) { - return ""; - } - return s.substring(i + 1, s.length()); - } - } - // if all spaces, return an empty string to avoid duplicate spaces - return ""; - } - - private static String extract(String documentName, File f, String target) - throws Exception { - String xml = IOUtils.readStringAndClose(new InputStreamReader( - new FileInputStream(f), "UTF-8"), -1); - // the template contains ${} instead of text - StringBuilder template = new StringBuilder(xml.length()); - int id = 0; - SortedProperties prop = new SortedProperties(); - XMLParser parser = new XMLParser(xml); - StringBuilder buff = new StringBuilder(); - Stack stack = new Stack(); - String tag = ""; - boolean ignoreEnd = false; - String nextKey = ""; - // for debugging - boolean templateIsCopy = false; - while (true) { - int event = parser.next(); - if (event == XMLParser.END_DOCUMENT) { - break; - } else if (event == XMLParser.CHARACTERS) { - String s = parser.getText(); - if (s.trim().length() == 0) { - if (buff.length() > 0) { - buff.append(s); - } else { - template.append(s); - } - } else if ("p".equals(tag) || "li".equals(tag) - || "a".equals(tag) || "td".equals(tag) - || "th".equals(tag) || "h1".equals(tag) - || "h2".equals(tag) || "h3".equals(tag) - || "h4".equals(tag) || "body".equals(tag) - || "b".equals(tag) || "code".equals(tag) - || "form".equals(tag) || "span".equals(tag) - || "em".equals(tag) || "div".equals(tag) - || "label".equals(tag)) { - if (buff.length() == 0) { - nextKey = documentName + "_" + (1000 + id++) + "_" - + tag; - template.append(getSpace(s, true)); - } else if (templateIsCopy) { - buff.append(getSpace(s, true)); - } - buff.append(s); - } else if ("pre".equals(tag) || "title".equals(tag) - || "script".equals(tag) || "style".equals(tag)) { - // ignore, don't translate - template.append(s); - } else { - System.out.println(f.getName() - + " invalid wrapper tag for text: " + tag - + " text: " + s); - System.out.println(parser.getRemaining()); - throw new Exception(); - } - } else if (event == XMLParser.START_ELEMENT) { - stack.add(tag); - String name = parser.getName(); - if ("code".equals(name) || "a".equals(name) || "b".equals(name) - || "span".equals(name)) { - // keep tags if wrapped, but not if this is the wrapper - if (buff.length() > 0) { - buff.append(parser.getToken()); - ignoreEnd = false; - } else { - ignoreEnd = true; - template.append(parser.getToken()); - } - } else if ("p".equals(tag) || "li".equals(tag) - || "td".equals(tag) || "th".equals(tag) - || "h1".equals(tag) || "h2".equals(tag) - || "h3".equals(tag) || "h4".equals(tag) - || "body".equals(tag) || "form".equals(tag)) { - if (buff.length() > 0) { - if (templateIsCopy) { - template.append(buff.toString()); - } else { - template.append("${" + nextKey + "}"); - } - add(prop, nextKey, buff); - } - template.append(parser.getToken()); - } else { - template.append(parser.getToken()); - } - tag = name; - } else if (event == XMLParser.END_ELEMENT) { - String name = parser.getName(); - if ("code".equals(name) || "a".equals(name) || "b".equals(name) - || "span".equals(name) || "em".equals(name)) { - if (ignoreEnd) { - if (buff.length() > 0) { - if (templateIsCopy) { - template.append(buff.toString()); - } else { - template.append("${" + nextKey + "}"); - } - add(prop, nextKey, buff); - } - template.append(parser.getToken()); - } else { - if (buff.length() > 0) { - buff.append(parser.getToken()); - } - } - } else { - if (buff.length() > 0) { - if (templateIsCopy) { - template.append(buff.toString()); - } else { - template.append("${" + nextKey + "}"); - } - add(prop, nextKey, buff); - } - template.append(parser.getToken()); - } - tag = stack.pop(); - } else if (event == XMLParser.DTD) { - template.append(parser.getToken()); - } else if (event == XMLParser.COMMENT) { - template.append(parser.getToken()); - } else { - int eventType = parser.getEventType(); - throw new Exception("Unexpected event " + eventType + " at " - + parser.getRemaining()); - } - // if(!xml.startsWith(template.toString())) { - // System.out.println(nextKey); - // System.out.println(template.substring(template.length()-60) - // +";"); - // System.out.println(xml.substring(template.length()-60, - // template.length())); - // System.out.println(template.substring(template.length()-55) - // +";"); - // System.out.println(xml.substring(template.length()-55, - // template.length())); - // break; - // } - } - new File(target).mkdirs(); - String propFileName = target + "/_docs_" + MAIN_LANGUAGE + ".properties"; - Properties old = load(propFileName, false); - prop.putAll(old); - store(prop, propFileName, false); - String t = template.toString(); - if (templateIsCopy && !t.equals(xml)) { - for (int i = 0; i < Math.min(t.length(), xml.length()); i++) { - if (t.charAt(i) != xml.charAt(i)) { - int start = Math.max(0, i - 30), end = Math.min(i + 30, xml.length()); - t = t.substring(start, end); - xml = xml.substring(start, end); - } - } - System.out.println("xml--------------------------------------------------: "); - System.out.println(xml); - System.out.println("t---------------------------------------------------: "); - System.out.println(t); - System.exit(1); - } - return t; - } - - private static String clean(String text) { - if (text.indexOf('\r') < 0 && text.indexOf('\n') < 0) { - return text; - } - text = text.replace('\r', ' '); - text = text.replace('\n', ' '); - while (true) { - String s = StringUtils.replaceAll(text, " ", " "); - if (s.equals(text)) { - break; - } - text = s; - } - return text; - } - - private static void add(Properties prop, String document, StringBuilder text) { - String s = clean(text.toString()); - text.setLength(0); - prop.setProperty(document, s); - } - - private static void prepare(String baseDir, String path, boolean utf8) - throws IOException { - String suffix = utf8 ? ".prop" : ".properties"; - File dir = new File(path); - File main = null; - ArrayList translations = new ArrayList(); - for (File f : dir.listFiles()) { - if (f.getName().endsWith(suffix) && f.getName().indexOf('_') >= 0) { - if (f.getName().endsWith("_" + MAIN_LANGUAGE + suffix)) { - main = f; - } else { - translations.add(f); - } - } - } - SortedProperties p = load(main.getAbsolutePath(), utf8); - Properties base = load(baseDir + "/" + main.getName(), utf8); - store(p, main.getAbsolutePath(), utf8); - for (File trans : translations) { - String language = trans.getName(); - language = language.substring(language.lastIndexOf('_') + 1, - language.lastIndexOf('.')); - prepare(p, base, trans, utf8); - } - store(p, baseDir + "/" + main.getName(), utf8); - } - - private static SortedProperties load(String fileName, boolean utf8) - throws IOException { - if (utf8) { - String s = new String(IOUtils.readBytesAndClose( - new FileInputStream(fileName), -1), "UTF-8"); - return SortedProperties.fromLines(s); - } - return SortedProperties.loadProperties(fileName); - } - - private static void store(SortedProperties p, String fileName, boolean utf8) - throws IOException { - if (utf8) { - String s = p.toLines(); - FileOutputStream f = new FileOutputStream(fileName); - f.write(s.getBytes("UTF-8")); - f.close(); - } else { - p.store(fileName); - } - } - - private static void prepare(Properties main, Properties base, File trans, - boolean utf8) throws IOException { - SortedProperties p = load(trans.getAbsolutePath(), utf8); - Properties oldTranslations = new Properties(); - for (Object k : base.keySet()) { - String key = (String) k; - String m = base.getProperty(key); - String t = p.getProperty(key); - if (t != null && !t.startsWith("#")) { - oldTranslations.setProperty(m, t); - } - } - HashSet toTranslate = new HashSet(); - // add missing keys, using # and the value from the main file - for (Object k : main.keySet()) { - String key = (String) k; - String now = main.getProperty(key); - if (!p.containsKey(key)) { - String t = oldTranslations.getProperty(now); - if (t == null) { - // System.out.println(trans.getName() + - // ": key " + key + " not found in " + - // "translation file; added # 'translation'"); - t = "#" + now; - p.put(key, t); - } else { - p.put(key, t); - } - } else { - String t = p.getProperty(key); - String last = base.getProperty(key); - if (t.startsWith("#") && !t.startsWith("##")) { - // not translated before - t = oldTranslations.getProperty(now); - if (t == null) { - t = "#" + now; - } - p.put(key, t); - } else if (last != null && !last.equals(now)) { - t = oldTranslations.getProperty(now); - if (t == null) { - // main data changed since the last run: review - // translation - System.out.println(trans.getName() + ": key " + key - + " changed, please review; last=" + last - + " now=" + now); - String old = p.getProperty(key); - t = "#" + now + " #" + old; - p.put(key, t); - } else { - p.put(key, t); - } - } - } - } - for (String key : toTranslate) { - String now = main.getProperty(key); - String t; - System.out - .println(trans.getName() - + ": key " - + key - + " not found in translation file; added dummy # 'translation'"); - t = "#" + now; - p.put(key, t); - } - // remove keys that don't exist in the main file - // (deleted or typo in the key) - for (Object k : new ArrayList(p.keySet())) { - String key = (String) k; - if (!main.containsKey(key)) { - p.remove(key); - } - } - store(p, trans.getAbsolutePath(), utf8); - } - -} diff --git a/h2/src/tools/org/h2/build/i18n/PropertiesToUTF8.java b/h2/src/tools/org/h2/build/i18n/PropertiesToUTF8.java deleted file mode 100644 index 9d18523ec5..0000000000 --- a/h2/src/tools/org/h2/build/i18n/PropertiesToUTF8.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.build.i18n; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.InputStreamReader; -import java.io.LineNumberReader; -import java.io.OutputStreamWriter; -import java.io.PrintWriter; -import java.io.RandomAccessFile; -import java.util.Enumeration; -import java.util.Properties; -import org.h2.build.code.CheckTextFiles; -import org.h2.build.indexer.HtmlConverter; -import org.h2.util.IOUtils; -import org.h2.util.SortedProperties; -import org.h2.util.StringUtils; - -/** - * This class converts a file stored in the UTF-8 encoding format to - * a properties file and vice versa. - */ -public class PropertiesToUTF8 { - - private PropertiesToUTF8() { - // utility class - } - - /** - * This method is called when executing this application from the command - * line. - * - * @param args the command line parameters - */ - public static void main(String... args) throws Exception { - convert("bin/org/h2/res"); - convert("bin/org/h2/server/web/res"); - } - - /** - * Convert a properties file to a UTF-8 text file. - * - * @param source the name of the properties file - * @param target the target file name - */ - static void propertiesToTextUTF8(String source, String target) - throws Exception { - if (!new File(source).exists()) { - return; - } - Properties prop = SortedProperties.loadProperties(source); - FileOutputStream out = new FileOutputStream(target); - PrintWriter writer = new PrintWriter(new OutputStreamWriter(out, "UTF-8")); - // keys is sorted - for (Enumeration en = prop.keys(); en.hasMoreElements();) { - String key = (String) en.nextElement(); - String value = prop.getProperty(key, null); - writer.print("@" + key + "\n"); - writer.print(value + "\n\n"); - } - writer.close(); - } - - /** - * Convert a translation file (in UTF-8) to a properties file (without - * special characters). - * - * @param source the source file name - * @param target the target file name - */ - static void textUTF8ToProperties(String source, String target) - throws Exception { - if (!new File(source).exists()) { - return; - } - LineNumberReader reader = new LineNumberReader(new InputStreamReader( - new FileInputStream(source), "UTF-8")); - try { - SortedProperties prop = new SortedProperties(); - StringBuilder buff = new StringBuilder(); - String key = null; - boolean found = false; - while (true) { - String line = reader.readLine(); - if (line == null) { - break; - } - line = line.trim(); - if (line.length() == 0) { - continue; - } - if (line.startsWith("@")) { - if (key != null) { - prop.setProperty(key, buff.toString()); - buff.setLength(0); - } - found = true; - key = line.substring(1); - } else { - if (buff.length() > 0) { - buff.append(System.getProperty("line.separator")); - } - buff.append(line); - } - } - if (found) { - prop.setProperty(key, buff.toString()); - } - prop.store(target); - } finally { - reader.close(); - } - } - - private static void convert(String source) throws Exception { - for (File f : new File(source).listFiles()) { - if (!f.getName().endsWith(".properties")) { - continue; - } - FileInputStream in = new FileInputStream(f); - InputStreamReader r = new InputStreamReader(in, "UTF-8"); - String s = IOUtils.readStringAndClose(r, -1); - in.close(); - String name = f.getName(); - String utf8, html; - if (name.startsWith("utf8")) { - utf8 = HtmlConverter.convertHtmlToString(s); - html = HtmlConverter.convertStringToHtml(utf8); - RandomAccessFile out = new RandomAccessFile("_" + name.substring(4), "rw"); - out.write(html.getBytes()); - out.setLength(out.getFilePointer()); - out.close(); - } else { - new CheckTextFiles().checkOrFixFile(f, false, false); - html = s; - utf8 = HtmlConverter.convertHtmlToString(html); - // s = unescapeHtml(s); - utf8 = StringUtils.javaDecode(utf8); - FileOutputStream out = new FileOutputStream("_utf8" + f.getName()); - OutputStreamWriter w = new OutputStreamWriter(out, "UTF-8"); - w.write(utf8); - w.close(); - out.close(); - } - String java = StringUtils.javaEncode(utf8); - java = StringUtils.replaceAll(java, "\\r", "\r"); - java = StringUtils.replaceAll(java, "\\n", "\n"); - RandomAccessFile out = new RandomAccessFile("_java." + name, "rw"); - out.write(java.getBytes()); - out.setLength(out.getFilePointer()); - out.close(); - } - } - -} diff --git a/h2/src/tools/org/h2/build/i18n/package.html b/h2/src/tools/org/h2/build/i18n/package.html deleted file mode 100644 index b1f5ca17e1..0000000000 --- a/h2/src/tools/org/h2/build/i18n/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -Internationalization tools. - -

    \ No newline at end of file diff --git a/h2/src/tools/org/h2/build/indexer/HtmlConverter.java b/h2/src/tools/org/h2/build/indexer/HtmlConverter.java index 5fcfb97afe..7d226a84e4 100644 --- a/h2/src/tools/org/h2/build/indexer/HtmlConverter.java +++ b/h2/src/tools/org/h2/build/indexer/HtmlConverter.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.indexer; @@ -14,9 +14,9 @@ public class HtmlConverter { private static final HashMap CHAR_MAP = - new HashMap(); + new HashMap<>(); private static final HashMap CODE_MAP = - new HashMap(); + new HashMap<>(); private static final String[] CHARS = { "quot:34", "amp:38", "lt:60", "gt:62", "nbsp:160", "iexcl:161", "cent:162", "pound:163", diff --git a/h2/src/tools/org/h2/build/indexer/Indexer.java b/h2/src/tools/org/h2/build/indexer/Indexer.java index c77917c15a..a324cce6c1 100644 --- a/h2/src/tools/org/h2/build/indexer/Indexer.java +++ b/h2/src/tools/org/h2/build/indexer/Indexer.java @@ -1,21 +1,23 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.indexer; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileWriter; +import java.io.IOException; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.StringTokenizer; -import org.h2.util.IOUtils; import org.h2.util.StringUtils; /** @@ -35,13 +37,13 @@ public class Indexer { "also;back;after;use;two;how;our;work;first;well;way;even;new;want;" + "because;any;these;give;most;us;"; - private final ArrayList pages = new ArrayList(); + private final ArrayList pages = new ArrayList<>(); /** * Lower case word to Word map. */ - private final HashMap words = new HashMap(); - private final HashSet noIndex = new HashSet(); + private final HashMap words = new HashMap<>(); + private final HashSet noIndex = new HashSet<>(); private ArrayList wordList; private PrintWriter output; private Page page; @@ -54,6 +56,7 @@ public class Indexer { * line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { new Indexer().run(args); @@ -69,7 +72,7 @@ private void run(String... args) throws Exception { destDir = args[++i]; } } - File file = new File(dir); + Path directory = Paths.get(dir); setNoIndex("index.html", "html/header.html", "html/search.html", "html/frame.html", "html/fragments.html", "html/sourceError.html", "html/source.html", @@ -78,8 +81,14 @@ private void run(String... args) throws Exception { "javadoc/allclasses-noframe.html", "javadoc/constant-values.html", "javadoc/overview-frame.html", "javadoc/overview-summary.html", "javadoc/serialized-form.html"); - output = new PrintWriter(new FileWriter(destDir + "/index.js")); - readPages("", file, 0); + output = new PrintWriter(Files.newBufferedWriter(Paths.get(destDir + "/index.js"))); + Files.walkFileTree(directory, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + readPages(directory.relativize(file).toString().replace('\\', '/'), file); + return FileVisitResult.CONTINUE; + } + }); output.println("var pages=new Array();"); output.println("var ref=new Array();"); output.println("var ignored='';"); @@ -103,7 +112,7 @@ private void setNoIndex(String... strings) { } private void sortWords() { - for (String name : new ArrayList(words.keySet())) { + for (String name : new ArrayList<>(words.keySet())) { if (name.endsWith("s")) { String singular = name.substring(0, name.length() - 1); if (words.containsKey(singular)) { @@ -116,7 +125,7 @@ private void sortWords() { words.remove(name); } } - wordList = new ArrayList(words.values()); + wordList = new ArrayList<>(words.values()); // ignored very common words (to shrink the index) StringBuilder ignoredBuff = new StringBuilder(";"); int maxSize = pages.size() / 4; @@ -134,12 +143,7 @@ private void sortWords() { ignored = ignoredBuff.toString(); // TODO support A, B, C,... class links in the index file and use them // for combined AND searches - Collections.sort(wordList, new Comparator() { - @Override - public int compare(Word w0, Word w1) { - return w0.name.compareToIgnoreCase(w1.name); - } - }); + wordList.sort((w0, w1) -> w0.name.compareToIgnoreCase(w1.name)); } private void removeOverflowRelations() { @@ -164,12 +168,7 @@ private void removeOverflowRelations() { } private void sortPages() { - Collections.sort(pages, new Comparator() { - @Override - public int compare(Page p0, Page p1) { - return p0.relations == p1.relations ? 0 : p0.relations < p1.relations ? 1 : -1; - } - }); + pages.sort((p0, p1) -> Integer.compare(p1.relations, p0.relations)); for (int i = 0; i < pages.size(); i++) { pages.get(i).id = i; } @@ -182,22 +181,17 @@ private void listPages() { } } - private void readPages(String dir, File file, int level) throws Exception { - String name = file.getName(); - String fileName = dir.length() > 0 ? dir + "/" + name : level > 0 ? name : ""; - if (file.isDirectory()) { - for (File f : file.listFiles()) { - readPages(fileName, f, level + 1); - } - return; - } - String lower = StringUtils.toLowerEnglish(name); + /** + * Read the pages of a file. + * + * @param fileName the file name + * @param file the path + */ + void readPages(String fileName, Path file) throws IOException { + String lower = StringUtils.toLowerEnglish(fileName); if (!lower.endsWith(".html") && !lower.endsWith(".htm")) { return; } - if (lower.contains("_ja.")) { - return; - } if (!noIndex.contains(fileName)) { page = new Page(pages.size(), fileName); pages.add(page); @@ -254,9 +248,8 @@ private void listWords() { output.println("ignored='" + ignored.toLowerCase() + "';"); } - private void readPage(File file) throws Exception { - byte[] data = IOUtils.readBytesAndClose(new FileInputStream(file), 0); - String text = new String(data, "UTF-8"); + private void readPage(Path file) throws IOException { + String text = new String(Files.readAllBytes(file), StandardCharsets.UTF_8); StringTokenizer t = new StringTokenizer(text, "<> \r\n", true); boolean inTag = false; title = false; @@ -311,8 +304,9 @@ private void readPage(File file) throws Exception { } if (page.title == null || page.title.trim().length() == 0) { - System.out.println("Error: not title found in " + file.getName()); - page.title = file.getName(); + String title = file.getFileName().toString(); + System.out.println("Error: not title found in " + title); + page.title = title; } page.title = page.title.trim(); } diff --git a/h2/src/tools/org/h2/build/indexer/Page.java b/h2/src/tools/org/h2/build/indexer/Page.java index 58f8075dd1..4950c9905c 100644 --- a/h2/src/tools/org/h2/build/indexer/Page.java +++ b/h2/src/tools/org/h2/build/indexer/Page.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.indexer; diff --git a/h2/src/tools/org/h2/build/indexer/Weight.java b/h2/src/tools/org/h2/build/indexer/Weight.java index c2d449f775..f44a95ec81 100644 --- a/h2/src/tools/org/h2/build/indexer/Weight.java +++ b/h2/src/tools/org/h2/build/indexer/Weight.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.indexer; diff --git a/h2/src/tools/org/h2/build/indexer/Word.java b/h2/src/tools/org/h2/build/indexer/Word.java index 48095734b0..4015491412 100644 --- a/h2/src/tools/org/h2/build/indexer/Word.java +++ b/h2/src/tools/org/h2/build/indexer/Word.java @@ -1,13 +1,11 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.build.indexer; import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.Map.Entry; @@ -24,7 +22,7 @@ public class Word { /** * The pages map. */ - final HashMap pages = new HashMap(); + final HashMap pages = new HashMap<>(); private ArrayList weightList; @@ -69,13 +67,8 @@ void addAll(Word other) { ArrayList getSortedWeights() { if (weightList == null) { - weightList = new ArrayList(pages.values()); - Collections.sort(weightList, new Comparator() { - @Override - public int compare(Weight w0, Weight w1) { - return w0.value < w1.value ? 1 : w0.value == w1.value ? 0 : -1; - } - }); + weightList = new ArrayList<>(pages.values()); + weightList.sort((w0, w1) -> Integer.compare(w1.value, w0.value)); } return weightList; } diff --git a/h2/src/tools/org/h2/build/indexer/package.html b/h2/src/tools/org/h2/build/indexer/package.html index 8dba009a17..e982aed7a6 100644 --- a/h2/src/tools/org/h2/build/indexer/package.html +++ b/h2/src/tools/org/h2/build/indexer/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/build/package.html b/h2/src/tools/org/h2/build/package.html index d01e3d2667..b4d57cdf3f 100644 --- a/h2/src/tools/org/h2/build/package.html +++ b/h2/src/tools/org/h2/build/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/cache/CacheLIRS.java b/h2/src/tools/org/h2/dev/cache/CacheLIRS.java index 65cd95ae4d..7667cb3a0c 100644 --- a/h2/src/tools/org/h2/dev/cache/CacheLIRS.java +++ b/h2/src/tools/org/h2/dev/cache/CacheLIRS.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.cache; @@ -24,9 +24,9 @@ * at most the specified amount of memory. The memory unit is not relevant, * however it is suggested to use bytes as the unit. *

    - * This class implements an approximation of the the LIRS replacement algorithm + * This class implements an approximation of the LIRS replacement algorithm * invented by Xiaodong Zhang and Song Jiang as described in - * http://www.cse.ohio-state.edu/~zhang/lirs-sigmetrics-02.html with a few + * https://web.cse.ohio-state.edu/~zhang.574/lirs-sigmetrics-02.html with a few * smaller changes: An additional queue for non-resident entries is used, to * prevent unbound memory usage. The maximum size of this queue is at most the * size of the rest of the stack. About 6.25% of the mapped entries are cold. @@ -101,7 +101,7 @@ public CacheLIRS(long maxMemory, int segmentCount, public void clear() { long max = Math.max(1, maxMemory / segmentCount); for (int i = 0; i < segmentCount; i++) { - segments[i] = new Segment( + segments[i] = new Segment<>( this, max, stackMoveDistance, 8); } } @@ -169,7 +169,7 @@ private Segment resizeIfNeeded(Segment s, int segmentIndex) { Segment s2 = segments[segmentIndex]; if (s == s2) { // no other thread resized, so we do - s = new Segment(s, newLen); + s = new Segment<>(s, newLen); segments[segmentIndex] = s; } return s; @@ -194,6 +194,7 @@ public V put(K key, V value) { * @param value the value * @return the size */ + @SuppressWarnings("unused") protected int sizeOf(K key, V value) { return 1; } @@ -204,7 +205,7 @@ protected int sizeOf(K key, V value) { * * @param key the key */ - protected void onRemove(K key) { + protected void onRemove(@SuppressWarnings("unused") K key) { // do nothing } @@ -328,7 +329,7 @@ public long getMaxMemory() { */ @Override public Set> entrySet() { - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); for (K k : keySet()) { map.put(k, find(k).value); } @@ -342,7 +343,7 @@ public Set> entrySet() { */ @Override public Set keySet() { - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); for (Segment s : segments) { set.addAll(s.keySet()); } @@ -411,7 +412,7 @@ public int size() { * @return the key list */ public List keys(boolean cold, boolean nonResident) { - ArrayList keys = new ArrayList(); + ArrayList keys = new ArrayList<>(); for (Segment s : segments) { keys.addAll(s.keys(cold, nonResident)); } @@ -523,11 +524,11 @@ private static class Segment { mask = len - 1; // initialize the stack and queue heads - stack = new Entry(); + stack = new Entry<>(); stack.stackPrev = stack.stackNext = stack; - queue = new Entry(); + queue = new Entry<>(); queue.queuePrev = queue.queueNext = queue; - queue2 = new Entry(); + queue2 = new Entry<>(); queue2.queuePrev = queue2.queueNext = queue2; @SuppressWarnings("unchecked") @@ -601,7 +602,7 @@ private void addToMap(Entry e) { } private static Entry copy(Entry old) { - Entry e = new Entry(); + Entry e = new Entry<>(); e.key = old.key; e.value = old.value; e.memory = old.memory; @@ -728,7 +729,7 @@ synchronized V put(K key, int hash, V value, int memory) { // the new entry is too big to fit return old; } - e = new Entry(); + e = new Entry<>(); e.key = key; e.value = value; e.memory = memory; @@ -950,7 +951,7 @@ private void removeFromQueue(Entry e) { * @return the key list */ synchronized List keys(boolean cold, boolean nonResident) { - ArrayList keys = new ArrayList(); + ArrayList keys = new ArrayList<>(); if (cold) { Entry start = nonResident ? queue2 : queue; for (Entry e = start.queueNext; e != start; @@ -985,7 +986,7 @@ boolean containsKey(Object key, int hash) { * @return the set of keys */ synchronized Set keySet() { - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); for (Entry e = stack.stackNext; e != stack; e = e.stackNext) { set.add(e.key); } diff --git a/h2/src/tools/org/h2/dev/cache/package.html b/h2/src/tools/org/h2/dev/cache/package.html index 24a9d861b4..b72f46deed 100644 --- a/h2/src/tools/org/h2/dev/cache/package.html +++ b/h2/src/tools/org/h2/dev/cache/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/cluster/ShardedMap.java b/h2/src/tools/org/h2/dev/cluster/ShardedMap.java index 117fb2c069..2ac17eb658 100644 --- a/h2/src/tools/org/h2/dev/cluster/ShardedMap.java +++ b/h2/src/tools/org/h2/dev/cluster/ShardedMap.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.cluster; @@ -12,7 +12,6 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.Set; - import org.h2.mvstore.DataUtils; import org.h2.mvstore.type.DataType; import org.h2.mvstore.type.ObjectDataType; @@ -24,10 +23,9 @@ * @param the key type * @param the value type */ -public class ShardedMap extends AbstractMap - implements Map { +public class ShardedMap extends AbstractMap { - private final DataType keyType; + private final DataType keyType; /** * The shards. Each shard has a minimum and a maximum key (null for no @@ -74,7 +72,7 @@ public void addMap(Map map, K min, K max) { } int len = shards.length + 1; Shard[] newShards = Arrays.copyOf(shards, len); - Shard newShard = new Shard(); + Shard newShard = new Shard<>(); newShard.map = map; newShard.minIncluding = min; newShard.maxExcluding = max; @@ -180,7 +178,7 @@ public Set> entrySet() { } } if (isSimpleSplit(copy)) { - return new CombinedSet(size(), copy); + return new CombinedSet<>(size(), copy); } return null; } @@ -278,11 +276,6 @@ public Entry next() { return e; } - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; } diff --git a/h2/src/tools/org/h2/dev/cluster/package.html b/h2/src/tools/org/h2/dev/cluster/package.html index 070c4d4105..5e941c9d23 100644 --- a/h2/src/tools/org/h2/dev/cluster/package.html +++ b/h2/src/tools/org/h2/dev/cluster/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/fs/ArchiveTool.java b/h2/src/tools/org/h2/dev/fs/ArchiveTool.java index c11a45020e..08128e953e 100644 --- a/h2/src/tools/org/h2/dev/fs/ArchiveTool.java +++ b/h2/src/tools/org/h2/dev/fs/ArchiveTool.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.fs; @@ -18,10 +18,15 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; +import java.util.List; import java.util.TreeMap; import java.util.TreeSet; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.zip.Deflater; import java.util.zip.DeflaterOutputStream; @@ -54,13 +59,15 @@ public class ArchiveTool { * @param args the command line arguments */ public static void main(String... args) throws Exception { + Log log = new Log(); + int level = Integer.getInteger("level", Deflater.BEST_SPEED); if (args.length == 1) { File f = new File(args[0]); if (f.exists()) { if (f.isDirectory()) { String fromDir = f.getAbsolutePath(); String toFile = fromDir + ".at"; - compress(fromDir, toFile); + compress(fromDir, toFile, level); return; } String fromFile = f.getAbsolutePath(); @@ -76,23 +83,25 @@ public static void main(String... args) throws Exception { if ("-compress".equals(arg)) { String toFile = args[1]; String fromDir = args[2]; - compress(fromDir, toFile); + compress(fromDir, toFile, level); } else if ("-extract".equals(arg)) { String fromFile = args[1]; String toDir = args[2]; extract(fromFile, toDir); } else { - System.out.println("An archive tool to efficiently compress large directories"); - System.out.println("Command line options:"); - System.out.println(""); - System.out.println(""); - System.out.println("-compress "); - System.out.println("-extract "); + log.println("An archive tool to efficiently compress large directories"); + log.println("Command line options:"); + log.println(""); + log.println(""); + log.println("-compress "); + log.println("-extract "); } } - private static void compress(String fromDir, String toFile) throws IOException { - final long start = System.currentTimeMillis(); + private static void compress(String fromDir, String toFile, int level) throws IOException { + final Log log = new Log(); + final long start = System.nanoTime(); + final long startMs = System.currentTimeMillis(); final AtomicBoolean title = new AtomicBoolean(); long size = getSize(new File(fromDir), new Runnable() { int count; @@ -101,45 +110,48 @@ private static void compress(String fromDir, String toFile) throws IOException { public void run() { count++; if (count % 1000 == 0) { - long now = System.currentTimeMillis(); - if (now - lastTime > 3000) { + long now = System.nanoTime(); + if (now - lastTime > TimeUnit.SECONDS.toNanos(3)) { if (!title.getAndSet(true)) { - System.out.println("Counting files"); + log.println("Counting files"); } - System.out.print(count + " "); + log.print(count + " "); lastTime = now; } } } }); if (title.get()) { - System.out.println(); + log.println(); } - System.out.println("Compressing " + size / MB + " MB"); + log.println("Compressing " + size / MB + " MB at " + + new java.sql.Time(startMs).toString()); InputStream in = getDirectoryInputStream(fromDir); String temp = toFile + ".temp"; OutputStream out = new BufferedOutputStream( new FileOutputStream(toFile), 1024 * 1024); Deflater def = new Deflater(); - def.setLevel(Deflater.BEST_SPEED); + def.setLevel(level); out = new BufferedOutputStream( new DeflaterOutputStream(out, def), 1024 * 1024); - sort(in, out, temp, size); + sort(log, in, out, temp, size); in.close(); out.close(); - System.out.println(); - System.out.println("Compressed to " + + log.println(); + log.println("Compressed to " + new File(toFile).length() / MB + " MB in " + - (System.currentTimeMillis() - start) / 1000 + + TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - start) + " seconds"); - System.out.println(); + log.println(); } private static void extract(String fromFile, String toDir) throws IOException { - long start = System.currentTimeMillis(); + Log log = new Log(); + long start = System.nanoTime(); + long startMs = System.currentTimeMillis(); long size = new File(fromFile).length(); - System.out.println("Extracting " + size / MB + " MB"); + log.println("Extracting " + size / MB + " MB at " + new java.sql.Time(startMs).toString()); InputStream in = new BufferedInputStream( new FileInputStream(fromFile), 1024 * 1024); @@ -147,13 +159,13 @@ private static void extract(String fromFile, String toDir) throws IOException { Inflater inflater = new Inflater(); in = new InflaterInputStream(in, inflater, 1024 * 1024); OutputStream out = getDirectoryOutputStream(toDir); - combine(in, out, temp); + combine(log, in, out, temp); inflater.end(); in.close(); out.close(); - System.out.println(); - System.out.println("Extracted in " + - (System.currentTimeMillis() - start) / 1000 + + log.println(); + log.println("Extracted in " + + TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - start) + " seconds"); } @@ -184,7 +196,7 @@ private static InputStream getDirectoryInputStream(final String dir) { return new InputStream() { private final String baseDir; - private final ArrayList files = new ArrayList(); + private final ArrayList files = new ArrayList<>(); private String current; private ByteArrayInputStream meta; private DataInputStream fileIn; @@ -243,7 +255,7 @@ public int read() throws IOException { fileIn.close(); fileIn = null; } - if (files.size() == 0) { + if (files.isEmpty()) { // EOF return -1; } @@ -389,15 +401,13 @@ public void write(int b) throws IOException { }; } - private static void sort(InputStream in, OutputStream out, + private static void sort(Log log, InputStream in, OutputStream out, String tempFileName, long size) throws IOException { - long lastTime = System.currentTimeMillis(); - int bufferSize = 16 * 1024 * 1024; + int bufferSize = 32 * 1024 * 1024; DataOutputStream tempOut = new DataOutputStream(new BufferedOutputStream( new FileOutputStream(tempFileName), 1024 * 1024)); byte[] bytes = new byte[bufferSize]; - ArrayList segmentStart = new ArrayList(); - long inPos = 0; + List segmentStart = new ArrayList<>(); long outPos = 0; long id = 1; @@ -405,25 +415,24 @@ private static void sort(InputStream in, OutputStream out, // Segment: chunk* 0 // Chunk: pos* 0 sortKey data + log.setRange(0, 30, size); while (true) { int len = readFully(in, bytes, bytes.length); if (len == 0) { break; } - inPos += len; - lastTime = printProgress(lastTime, 0, 50, inPos, size); - TreeMap map = new TreeMap(); + log.printProgress(len); + TreeMap map = new TreeMap<>(); for (int pos = 0; pos < len;) { int[] key = getKey(bytes, pos, len); int l = key[3]; - byte[] buff = new byte[l]; - System.arraycopy(bytes, pos, buff, 0, l); + byte[] buff = Arrays.copyOfRange(bytes, pos, pos + l); pos += l; Chunk c = new Chunk(null, key, buff); Chunk old = map.get(c); if (old == null) { // new entry - c.idList = new ArrayList(); + c.idList = new ArrayList<>(); c.idList.add(id); map.put(c, c); } else { @@ -439,50 +448,81 @@ private static void sort(InputStream in, OutputStream out, outPos += writeVarLong(tempOut, 0); } tempOut.close(); - size = outPos; - inPos = 0; - TreeSet segmentIn = new TreeSet(); - int bufferTotal = 64 * 1024 * 1024; - int bufferPerStream = bufferTotal / segmentStart.size(); - for (int i = 0; i < segmentStart.size(); i++) { - in = new FileInputStream(tempFileName); - in.skip(segmentStart.get(i)); - ChunkStream s = new ChunkStream(i); - s.readKey = true; - s.in = new DataInputStream(new BufferedInputStream(in, bufferPerStream)); - inPos += s.readNext(); - if (s.current != null) { - segmentIn.add(s); + long tempSize = new File(tempFileName).length(); + + // merge blocks if needed + int blockSize = 64; + boolean merge = false; + while (segmentStart.size() > blockSize) { + merge = true; + log.setRange(30, 50, tempSize); + log.println(); + log.println("Merging " + segmentStart.size() + " segments " + blockSize + ":1"); + ArrayList segmentStart2 = new ArrayList<>(); + outPos = 0; + DataOutputStream tempOut2 = new DataOutputStream(new BufferedOutputStream( + new FileOutputStream(tempFileName + ".b"), 1024 * 1024)); + while (segmentStart.size() > 0) { + segmentStart2.add(outPos); + int s = Math.min(segmentStart.size(), blockSize); + List start = segmentStart.subList(0, s); + TreeSet segmentIn = new TreeSet<>(); + long read = openSegments(start, segmentIn, tempFileName, true); + log.printProgress(read); + Chunk last = null; + Iterator it = merge(segmentIn, log); + while (it.hasNext()) { + Chunk c = it.next(); + if (last == null) { + last = c; + } else if (last.compareTo(c) == 0) { + last.idList.addAll(c.idList); + } else { + outPos += last.write(tempOut2, true); + last = c; + } + } + if (last != null) { + outPos += last.write(tempOut2, true); + } + // end of segment + outPos += writeVarLong(tempOut2, 0); + segmentStart = segmentStart.subList(s, segmentStart.size()); } + segmentStart = segmentStart2; + tempOut2.close(); + tempSize = new File(tempFileName).length(); + new File(tempFileName).delete(); + tempFileName += ".b"; } + if (merge) { + log.println(); + log.println("Combining " + segmentStart.size() + " segments"); + } + + TreeSet segmentIn = new TreeSet<>(); + long read = openSegments(segmentStart, segmentIn, tempFileName, true); + log.printProgress(read); DataOutputStream dataOut = new DataOutputStream(out); dataOut.write(HEADER); writeVarLong(dataOut, size); - Chunk last = null; // File: header length chunk* 0 // chunk: pos* 0 data - - while (segmentIn.size() > 0) { - ChunkStream s = segmentIn.first(); - segmentIn.remove(s); - Chunk c = s.current; + log.setRange(50, 100, tempSize); + Chunk last = null; + Iterator it = merge(segmentIn, log); + while (it.hasNext()) { + Chunk c = it.next(); if (last == null) { last = c; } else if (last.compareTo(c) == 0) { - for (long x : c.idList) { - last.idList.add(x); - } + last.idList.addAll(c.idList); } else { last.write(dataOut, false); last = c; } - inPos += s.readNext(); - lastTime = printProgress(lastTime, 50, 100, inPos, size); - if (s.current != null) { - segmentIn.add(s); - } } if (last != null) { last.write(dataOut, false); @@ -492,6 +532,55 @@ private static void sort(InputStream in, OutputStream out, dataOut.flush(); } + private static long openSegments(List segmentStart, TreeSet segmentIn, + String tempFileName, boolean readKey) throws IOException { + long inPos = 0; + int bufferTotal = 64 * 1024 * 1024; + int bufferPerStream = bufferTotal / segmentStart.size(); + // FileChannel fc = new RandomAccessFile(tempFileName, "r"). + // getChannel(); + for (int i = 0; i < segmentStart.size(); i++) { + // long end = i < segmentStart.size() - 1 ? + // segmentStart.get(i+1) : fc.size(); + // InputStream in = + // new SharedInputStream(fc, segmentStart.get(i), end); + InputStream in = new FileInputStream(tempFileName); + in.skip(segmentStart.get(i)); + ChunkStream s = new ChunkStream(i); + s.readKey = readKey; + s.in = new DataInputStream(new BufferedInputStream(in, bufferPerStream)); + inPos += s.readNext(); + if (s.current != null) { + segmentIn.add(s); + } + } + return inPos; + } + + private static Iterator merge(final TreeSet segmentIn, final Log log) { + return new Iterator() { + + @Override + public boolean hasNext() { + return !segmentIn.isEmpty(); + } + + @Override + public Chunk next() { + ChunkStream s = segmentIn.first(); + segmentIn.remove(s); + Chunk c = s.current; + int len = s.readNext(); + log.printProgress(len); + if (s.current != null) { + segmentIn.add(s); + } + return c; + } + + }; + } + /** * Read a number of bytes. This method repeats reading until * either the bytes have been read, or EOF. @@ -519,6 +608,102 @@ private static int readFully(InputStream in, byte[] buffer, int max) * Get the sort key and length of a chunk. */ private static int[] getKey(byte[] data, int start, int maxPos) { + int minLen = 4 * 1024; + int mask = 4 * 1024 - 1; + long min = Long.MAX_VALUE; + int pos = start; + for (int j = 0; pos < maxPos; pos++, j++) { + if (pos <= start + 10) { + continue; + } + long hash = getSipHash24(data, pos - 10, pos, 111, 11224); + if (hash < min) { + min = hash; + } + if (j > minLen) { + if ((hash & mask) == 1) { + break; + } + if (j > minLen * 4 && (hash & (mask >> 1)) == 1) { + break; + } + if (j > minLen * 16) { + break; + } + } + } + int len = pos - start; + int[] counts = new int[8]; + for (int i = start; i < pos; i++) { + int x = data[i] & 0xff; + counts[x >> 5]++; + } + int cs = 0; + for (int i = 0; i < 8; i++) { + cs *= 2; + if (counts[i] > (len / 32)) { + cs += 1; + } + } + int[] key = new int[4]; + // TODO test if cs makes a difference + key[0] = (int) (min >>> 32); + key[1] = (int) min; + key[2] = cs; + key[3] = len; + return key; + } + + private static long getSipHash24(byte[] b, int start, int end, long k0, + long k1) { + long v0 = k0 ^ 0x736f6d6570736575L; + long v1 = k1 ^ 0x646f72616e646f6dL; + long v2 = k0 ^ 0x6c7967656e657261L; + long v3 = k1 ^ 0x7465646279746573L; + int repeat; + for (int off = start; off <= end + 8; off += 8) { + long m; + if (off <= end) { + m = 0; + int i = 0; + for (; i < 8 && off + i < end; i++) { + m |= ((long) b[off + i] & 255) << (8 * i); + } + if (i < 8) { + m |= ((long) end - start) << 56; + } + v3 ^= m; + repeat = 2; + } else { + m = 0; + v2 ^= 0xff; + repeat = 4; + } + for (int i = 0; i < repeat; i++) { + v0 += v1; + v2 += v3; + v1 = Long.rotateLeft(v1, 13); + v3 = Long.rotateLeft(v3, 16); + v1 ^= v0; + v3 ^= v2; + v0 = Long.rotateLeft(v0, 32); + v2 += v1; + v0 += v3; + v1 = Long.rotateLeft(v1, 17); + v3 = Long.rotateLeft(v3, 21); + v1 ^= v2; + v3 ^= v0; + v2 = Long.rotateLeft(v2, 32); + } + v0 ^= m; + } + return v0 ^ v1 ^ v2 ^ v3; + } + + /** + * Get the sort key and length of a chunk. + */ + private static int[] getKeyOld(byte[] data, int start, int maxPos) { int minLen = 4 * 1024; int mask = 4 * 1024 - 1; int min = Integer.MAX_VALUE; @@ -575,9 +760,8 @@ private static int getHash(long key) { return hash; } - private static void combine(InputStream in, OutputStream out, + private static void combine(Log log, InputStream in, OutputStream out, String tempFileName) throws IOException { - long lastTime = System.currentTimeMillis(); int bufferSize = 16 * 1024 * 1024; DataOutputStream tempOut = new DataOutputStream( @@ -591,21 +775,21 @@ private static void combine(InputStream in, OutputStream out, byte[] header = new byte[4]; dataIn.readFully(header); if (!Arrays.equals(header, HEADER)) { + tempOut.close(); throw new IOException("Invalid header"); } long size = readVarLong(dataIn); long outPos = 0; - long inPos = 0; - ArrayList segmentStart = new ArrayList(); + List segmentStart = new ArrayList<>(); boolean end = false; // Temp file: segment* 0 // Segment: chunk* 0 // Chunk: pos* 0 data - + log.setRange(0, 30, size); while (!end) { int segmentSize = 0; - TreeMap map = new TreeMap(); + TreeMap map = new TreeMap<>(); while (segmentSize < bufferSize) { Chunk c = Chunk.read(dataIn, false); if (c == null) { @@ -613,8 +797,7 @@ private static void combine(InputStream in, OutputStream out, break; } int length = c.value.length; - inPos += length; - lastTime = printProgress(lastTime, 0, 50, inPos, size); + log.printProgress(length); segmentSize += length; for (long x : c.idList) { map.put(x, c.value); @@ -635,32 +818,63 @@ private static void combine(InputStream in, OutputStream out, outPos += writeVarLong(tempOut, 0); } tempOut.close(); + long tempSize = new File(tempFileName).length(); size = outPos; - inPos = 0; - TreeSet segmentIn = new TreeSet(); - int bufferTotal = 64 * 1024 * 1024; - int bufferPerStream = bufferTotal / segmentStart.size(); - for (int i = 0; i < segmentStart.size(); i++) { - FileInputStream f = new FileInputStream(tempFileName); - f.skip(segmentStart.get(i)); - ChunkStream s = new ChunkStream(i); - s.in = new DataInputStream(new BufferedInputStream(f, bufferPerStream)); - inPos += s.readNext(); - if (s.current != null) { - segmentIn.add(s); + + // merge blocks if needed + int blockSize = 64; + boolean merge = false; + while (segmentStart.size() > blockSize) { + merge = true; + log.setRange(30, 50, tempSize); + log.println(); + log.println("Merging " + segmentStart.size() + " segments " + blockSize + ":1"); + ArrayList segmentStart2 = new ArrayList<>(); + outPos = 0; + DataOutputStream tempOut2 = new DataOutputStream(new BufferedOutputStream( + new FileOutputStream(tempFileName + ".b"), 1024 * 1024)); + while (segmentStart.size() > 0) { + segmentStart2.add(outPos); + int s = Math.min(segmentStart.size(), blockSize); + List start = segmentStart.subList(0, s); + TreeSet segmentIn = new TreeSet<>(); + long read = openSegments(start, segmentIn, tempFileName, false); + log.printProgress(read); + + Iterator it = merge(segmentIn, log); + while (it.hasNext()) { + Chunk c = it.next(); + outPos += writeVarLong(tempOut2, c.idList.get(0)); + outPos += writeVarLong(tempOut2, 0); + outPos += writeVarLong(tempOut2, c.value.length); + tempOut2.write(c.value); + outPos += c.value.length; + } + outPos += writeVarLong(tempOut2, 0); + + segmentStart = segmentStart.subList(s, segmentStart.size()); } + segmentStart = segmentStart2; + tempOut2.close(); + tempSize = new File(tempFileName).length(); + new File(tempFileName).delete(); + tempFileName += ".b"; } + if (merge) { + log.println(); + log.println("Combining " + segmentStart.size() + " segments"); + } + + TreeSet segmentIn = new TreeSet<>(); DataOutputStream dataOut = new DataOutputStream(out); - while (segmentIn.size() > 0) { - ChunkStream s = segmentIn.first(); - segmentIn.remove(s); - Chunk c = s.current; - dataOut.write(c.value); - inPos += s.readNext(); - lastTime = printProgress(lastTime, 50, 100, inPos, size); - if (s.current != null) { - segmentIn.add(s); - } + log.setRange(50, 100, size); + + long read = openSegments(segmentStart, segmentIn, tempFileName, false); + log.printProgress(read); + + Iterator it = merge(segmentIn, log); + while (it.hasNext()) { + dataOut.write(it.next().value); } new File(tempFileName).delete(); dataOut.flush(); @@ -684,7 +898,8 @@ static class ChunkStream implements Comparable { * * @return the number of bytes read */ - int readNext() throws IOException { + int readNext() { + current = null; current = Chunk.read(in, readKey); if (current == null) { return 0; @@ -708,7 +923,7 @@ public int compareTo(ChunkStream o) { static class Chunk implements Comparable { ArrayList idList; final byte[] value; - private int[] sortKey; + private final int[] sortKey; Chunk(ArrayList idList, int[] sortKey, byte[] value) { this.idList = idList; @@ -723,30 +938,35 @@ static class Chunk implements Comparable { * @param readKey whether to read the sort key * @return the chunk, or null if 0 has been read */ - public static Chunk read(DataInputStream in, boolean readKey) throws IOException { - ArrayList idList = new ArrayList(); - while (true) { - long x = readVarLong(in); - if (x == 0) { - break; + public static Chunk read(DataInputStream in, boolean readKey) { + try { + ArrayList idList = new ArrayList<>(); + while (true) { + long x = readVarLong(in); + if (x == 0) { + break; + } + idList.add(x); } - idList.add(x); - } - if (idList.size() == 0) { - // eof - return null; - } - int[] key = null; - if (readKey) { - key = new int[4]; - for (int i = 0; i < key.length; i++) { - key[i] = in.readInt(); + if (idList.isEmpty()) { + // eof + in.close(); + return null; + } + int[] key = null; + if (readKey) { + key = new int[4]; + for (int i = 0; i < key.length; i++) { + key[i] = in.readInt(); + } } + int len = (int) readVarLong(in); + byte[] value = new byte[len]; + in.readFully(value); + return new Chunk(idList, key, value); + } catch (IOException e) { + throw new RuntimeException(e); } - int len = (int) readVarLong(in); - byte[] value = new byte[len]; - in.readFully(value); - return new Chunk(idList, key, value); } /** @@ -812,6 +1032,81 @@ public int compareTo(Chunk o) { } } + /** + * A logger, including context. + */ + static class Log { + + private long lastTime; + private long current; + private int pos; + private int low; + private int high; + private long total; + + /** + * Print an empty line. + */ + void println() { + System.out.println(); + pos = 0; + } + + /** + * Print a message. + * + * @param msg the message + */ + void print(String msg) { + System.out.print(msg); + } + + /** + * Print a message. + * + * @param msg the message + */ + void println(String msg) { + System.out.println(msg); + pos = 0; + } + + /** + * Set the range. + * + * @param low the percent value if current = 0 + * @param high the percent value if current = total + * @param total the maximum value + */ + void setRange(int low, int high, long total) { + this.low = low; + this.high = high; + this.current = 0; + this.total = total; + } + + /** + * Print the progress. + * + * @param offset the offset since the last operation + */ + void printProgress(long offset) { + current += offset; + long now = System.nanoTime(); + if (now - lastTime > TimeUnit.SECONDS.toNanos(3)) { + String msg = (low + (high - low) * current / total) + "% "; + if (pos > 80) { + System.out.println(); + pos = 0; + } + System.out.print(msg); + pos += msg.length(); + lastTime = now; + } + } + + } + /** * Write a variable size long value. * @@ -861,14 +1156,40 @@ static long readVarLong(InputStream in) throws IOException { return x; } - private static long printProgress(long lastTime, int low, int high, - long current, long total) { - long now = System.currentTimeMillis(); - if (now - lastTime > 3000) { - System.out.print((low + (high - low) * current / total) + "% "); - lastTime = now; + /** + * An input stream that uses a shared file channel. + */ + static class SharedInputStream extends InputStream { + private final FileChannel channel; + private final long endPosition; + private long position; + + SharedInputStream(FileChannel channel, long position, long endPosition) { + this.channel = channel; + this.position = position; + this.endPosition = endPosition; + } + + @Override + public int read() { + throw new UnsupportedOperationException(); } - return lastTime; + + @Override + public int read(byte[] b, int off, int len) throws IOException { + if (len == 0) { + return 0; + } + len = (int) Math.min(len, endPosition - position); + if (len <= 0) { + return -1; + } + ByteBuffer buff = ByteBuffer.wrap(b, off, len); + len = channel.read(buff, position); + position += len; + return len; + } + } } diff --git a/h2/src/tools/org/h2/dev/fs/ArchiveToolStore.java b/h2/src/tools/org/h2/dev/fs/ArchiveToolStore.java index 4809ebf6b3..6324d2f26b 100644 --- a/h2/src/tools/org/h2/dev/fs/ArchiveToolStore.java +++ b/h2/src/tools/org/h2/dev/fs/ArchiveToolStore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.fs; @@ -12,17 +12,15 @@ import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; import java.util.Map.Entry; import java.util.Random; +import java.util.concurrent.TimeUnit; import org.h2.mvstore.Cursor; import org.h2.mvstore.DataUtils; import org.h2.mvstore.MVMap; import org.h2.mvstore.MVStore; import org.h2.store.fs.FileUtils; -import org.h2.util.New; /** * An archive tool to compress directories, using the MVStore backend. @@ -68,7 +66,7 @@ private void compress(String sourceDir) throws Exception { start(); long tempSize = 8 * 1024 * 1024; String tempFileName = fileName + ".temp"; - ArrayList fileNames = New.arrayList(); + ArrayList fileNames = new ArrayList<>(); System.out.println("Reading the file list"); long totalSize = addFiles(sourceDir, fileNames); @@ -100,9 +98,8 @@ private void compress(String sourceDir) throws Exception { } buff.clear(); buff.flip(); - ArrayList posList = new ArrayList(); - FileChannel fc = FileUtils.open(s, "r"); - try { + ArrayList posList = new ArrayList<>(); + try (FileChannel fc = FileUtils.open(s, "r")) { boolean eof = false; while (true) { while (!eof && buff.remaining() < 512 * 1024) { @@ -118,11 +115,11 @@ private void compress(String sourceDir) throws Exception { if (buff.remaining() == 0) { break; } - int c = getChunkLength(buff.array(), buff.position(), - buff.limit()) - buff.position(); - byte[] bytes = new byte[c]; - System.arraycopy(buff.array(), buff.position(), bytes, 0, c); - buff.position(buff.position() + c); + int position = buff.position(); + int c = getChunkLength(buff.array(), position, + buff.limit()) - position; + byte[] bytes = Arrays.copyOfRange(buff.array(), position, position + c); + buff.position(position + c); int[] key = getKey(bucket, bytes); key[3] = segmentId; while (true) { @@ -134,7 +131,7 @@ private void compress(String sourceDir) throws Exception { data.put(key, bytes); break; } - if (old != null && Arrays.equals(old, bytes)) { + if (Arrays.equals(old, bytes)) { // duplicate break; } @@ -153,8 +150,6 @@ private void compress(String sourceDir) throws Exception { } printProgress(0, 50, currentSize, totalSize); } - } finally { - fc.close(); } int[] posArray = new int[posList.size()]; for (int i = 0; i < posList.size(); i++) { @@ -163,7 +158,7 @@ private void compress(String sourceDir) throws Exception { filesTemp.put(name, posArray); } storeTemp.commit(); - ArrayList> list = New.arrayList(); + ArrayList> list = new ArrayList<>(segmentId-1); totalSize = 0; for (int i = 1; i <= segmentId; i++) { MVMap data = storeTemp.openMap("data" + i); @@ -180,28 +175,22 @@ private void compress(String sourceDir) throws Exception { MVMap data = store.openMap("data" + segmentId); MVMap keepSegment = storeTemp.openMap("keep"); while (list.size() > 0) { - Collections.sort(list, new Comparator>() { - - @Override - public int compare(Cursor o1, - Cursor o2) { - int[] k1 = o1.getKey(); - int[] k2 = o2.getKey(); - int comp = 0; - for (int i = 0; i < k1.length - 1; i++) { - long x1 = k1[i]; - long x2 = k2[i]; - if (x1 > x2) { - comp = 1; - break; - } else if (x1 < x2) { - comp = -1; - break; - } - } - return comp; + list.sort((o1, o2) -> { + int[] k1 = o1.getKey(); + int[] k2 = o2.getKey(); + int comp = 0; + for (int i = 0; i < k1.length - 1; i++) { + long x1 = k1[i]; + long x2 = k2[i]; + if (x1 > x2) { + comp = 1; + break; + } else if (x1 < x2) { + comp = -1; + break; } - + } + return comp; }); Cursor top = list.get(0); int[] key = top.getKey(); @@ -277,13 +266,13 @@ public int compare(Cursor o1, } private void start() { - this.start = System.currentTimeMillis(); + this.start = System.nanoTime(); this.lastTime = start; } private void printProgress(int low, int high, long current, long total) { - long now = System.currentTimeMillis(); - if (now - lastTime > 5000) { + long now = System.nanoTime(); + if (now - lastTime > TimeUnit.SECONDS.toNanos(5)) { System.out.print((low + (high - low) * current / total) + "% "); lastTime = now; } @@ -291,7 +280,7 @@ private void printProgress(int low, int high, long current, long total) { private void printDone() { System.out.println("Done in " + - (System.currentTimeMillis() - start) / 1000 + + TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - start) + " seconds"); } @@ -382,7 +371,7 @@ private void expand(String targetDir) throws Exception { storeTemp.commit(); } - ArrayList> list = New.arrayList(); + ArrayList> list = new ArrayList<>(lastSegment-1); totalSize = 0; currentSize = 0; for (int i = 1; i <= lastSegment; i++) { @@ -398,28 +387,22 @@ private void expand(String targetDir) throws Exception { OutputStream file = null; int[] lastKey = null; while (list.size() > 0) { - Collections.sort(list, new Comparator>() { - - @Override - public int compare(Cursor o1, - Cursor o2) { - int[] k1 = o1.getKey(); - int[] k2 = o2.getKey(); - int comp = 0; - for (int i = 0; i < k1.length; i++) { - long x1 = k1[i]; - long x2 = k2[i]; - if (x1 > x2) { - comp = 1; - break; - } else if (x1 < x2) { - comp = -1; - break; - } + list.sort((o1, o2) -> { + int[] k1 = o1.getKey(); + int[] k2 = o2.getKey(); + int comp = 0; + for (int i = 0; i < k1.length; i++) { + long x1 = k1[i]; + long x2 = k2[i]; + if (x1 > x2) { + comp = 1; + break; + } else if (x1 < x2) { + comp = -1; + break; } - return comp; } - + return comp; }); Cursor top = list.get(0); int[] key = top.getKey(); @@ -528,7 +511,7 @@ private static int[] getKey(int bucket, byte[] buff) { } key[0] = cs; key[1] = bucket; - key[2] = DataUtils.getFletcher32(buff, buff.length); + key[2] = DataUtils.getFletcher32(buff, 0, buff.length); return key; } diff --git a/h2/src/tools/org/h2/dev/fs/FilePathZip2.java b/h2/src/tools/org/h2/dev/fs/FilePathZip2.java index fd27feabb7..92578827e0 100644 --- a/h2/src/tools/org/h2/dev/fs/FilePathZip2.java +++ b/h2/src/tools/org/h2/dev/fs/FilePathZip2.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.fs; @@ -10,6 +10,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; +import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.nio.channels.FileLock; import java.util.ArrayList; @@ -17,13 +18,12 @@ import java.util.zip.ZipInputStream; import org.h2.engine.Constants; import org.h2.message.DbException; +import org.h2.store.fs.FakeFileChannel; import org.h2.store.fs.FileBase; -import org.h2.store.fs.FileChannelInputStream; import org.h2.store.fs.FilePath; -import org.h2.store.fs.FilePathDisk; import org.h2.store.fs.FileUtils; +import org.h2.store.fs.disk.FilePathDisk; import org.h2.util.IOUtils; -import org.h2.util.New; /** * This is a read-only file system that allows to access databases stored in a @@ -61,13 +61,11 @@ public boolean createFile() { } @Override - public FilePath createTempFile(String suffix, boolean deleteOnExit, - boolean inTempDir) throws IOException { + public FilePath createTempFile(String suffix, boolean inTempDir) throws IOException { if (!inTempDir) { throw new IOException("File system is read-only"); } - return new FilePathDisk().getPath(name).createTempFile(suffix, - deleteOnExit, true); + return new FilePathDisk().getPath(name).createTempFile(suffix, true); } @Override @@ -216,7 +214,7 @@ public ArrayList newDirectoryStream() { ZipInputStream file = openZip(); String dirName = getEntryName(); String prefix = path.substring(0, path.length() - dirName.length()); - ArrayList list = New.arrayList(); + ArrayList list = new ArrayList<>(); while (true) { ZipEntry entry = file.getNextEntry(); if (entry == null) { @@ -245,7 +243,7 @@ public FilePath toRealPath() { @Override public InputStream newInputStream() throws IOException { - return new FileChannelInputStream(open("r"), true); + return Channels.newInputStream(open("r")); } @Override @@ -426,9 +424,7 @@ public int write(ByteBuffer src) throws IOException { public synchronized FileLock tryLock(long position, long size, boolean shared) throws IOException { if (shared) { - - // cast to FileChannel to avoid JDK 1.7 ambiguity - return new FileLock((FileChannel) null, position, size, shared) { + return new FileLock(FakeFileChannel.INSTANCE, position, size, shared) { @Override public boolean isValid() { diff --git a/h2/src/tools/org/h2/dev/fs/FileShell.java b/h2/src/tools/org/h2/dev/fs/FileShell.java index 0a8bb7f790..be7ce88ba1 100644 --- a/h2/src/tools/org/h2/dev/fs/FileShell.java +++ b/h2/src/tools/org/h2/dev/fs/FileShell.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.fs; @@ -20,13 +20,12 @@ import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import java.util.zip.ZipOutputStream; + import org.h2.command.dml.BackupCommand; import org.h2.engine.Constants; -import org.h2.engine.SysProperties; import org.h2.message.DbException; import org.h2.store.fs.FileUtils; import org.h2.util.IOUtils; -import org.h2.util.New; import org.h2.util.StringUtils; import org.h2.util.Tool; @@ -42,8 +41,9 @@ public class FileShell extends Tool { private String currentWorkingDirectory; /** - * Options are case sensitive. Supported options are: + * Options are case sensitive. * + * * * * @@ -52,9 +52,9 @@ public class FileShell extends Tool { * *
    Supported options
    [-help] or [-?]Print the list of options
    [-verbose]Execute the given commands and exit
    * Multiple commands may be executed if separated by ; - * @h2.resource * * @param args the command line arguments + * @throws SQLException on failure */ public static void main(String... args) throws SQLException { new FileShell().runTool(args); @@ -121,7 +121,7 @@ public void runTool(String... args) throws SQLException { private void promptLoop() { println(""); - println("Welcome to H2 File Shell " + Constants.getFullVersion()); + println("Welcome to H2 File Shell " + Constants.FULL_VERSION); println("Exit with Ctrl+C"); showHelp(); if (reader == null) { @@ -278,7 +278,7 @@ private boolean execute(String[] list) throws IOException { recursive = true; } String target = getFile(list[i++]); - ArrayList source = New.arrayList(); + ArrayList source = new ArrayList<>(); readFileList(list, i, source, recursive); zip(target, currentWorkingDirectory, source); } @@ -343,7 +343,7 @@ private static void zip(String zipFileName, String base, for (String fileName : source) { String f = FileUtils.toRealPath(fileName); if (!f.startsWith(base)) { - DbException.throwInternalError(f + " does not start with " + base); + throw DbException.getInternalError(f + " does not start with " + base); } if (f.endsWith(zipFileName)) { continue; @@ -388,17 +388,13 @@ private void unzip(String zipFileName, String targetDir) { } String fileName = entry.getName(); // restoring windows backups on linux and vice versa - fileName = fileName.replace('\\', - SysProperties.FILE_SEPARATOR.charAt(0)); - fileName = fileName.replace('/', - SysProperties.FILE_SEPARATOR.charAt(0)); - if (fileName.startsWith(SysProperties.FILE_SEPARATOR)) { + fileName = IOUtils.nameSeparatorsToNative(fileName); + if (fileName.startsWith(File.separator)) { fileName = fileName.substring(1); } OutputStream o = null; try { - o = FileUtils.newOutputStream(targetDir - + SysProperties.FILE_SEPARATOR + fileName, false); + o = FileUtils.newOutputStream(targetDir + File.separatorChar + fileName, false); IOUtils.copy(zipIn, o); o.close(); } finally { @@ -451,7 +447,7 @@ private String getFile(String f) { } String unwrapped = FileUtils.unwrap(f); String prefix = f.substring(0, f.length() - unwrapped.length()); - f = prefix + currentWorkingDirectory + SysProperties.FILE_SEPARATOR + unwrapped; + f = prefix + currentWorkingDirectory + File.separatorChar + unwrapped; return FileUtils.toRealPath(f); } diff --git a/h2/src/tools/org/h2/dev/fs/package.html b/h2/src/tools/org/h2/dev/fs/package.html index e082232a14..e541d95b76 100644 --- a/h2/src/tools/org/h2/dev/fs/package.html +++ b/h2/src/tools/org/h2/dev/fs/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/ftp/FtpClient.java b/h2/src/tools/org/h2/dev/ftp/FtpClient.java index 6f92b2ae2c..faf1f36239 100644 --- a/h2/src/tools/org/h2/dev/ftp/FtpClient.java +++ b/h2/src/tools/org/h2/dev/ftp/FtpClient.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp; @@ -17,11 +17,10 @@ import java.io.PrintWriter; import java.net.InetAddress; import java.net.Socket; +import java.nio.charset.StandardCharsets; -import org.h2.engine.Constants; import org.h2.util.IOUtils; import org.h2.util.NetUtils; -import org.h2.util.StatementBuilder; import org.h2.util.StringUtils; /** @@ -57,7 +56,7 @@ private void connect(String url) throws IOException { InputStream in = socket.getInputStream(); OutputStream out = socket.getOutputStream(); reader = new BufferedReader(new InputStreamReader(in)); - writer = new PrintWriter(new OutputStreamWriter(out, Constants.UTF8)); + writer = new PrintWriter(new OutputStreamWriter(out, StandardCharsets.UTF_8)); readCode(220); } @@ -230,12 +229,14 @@ private void passive() throws IOException { int last = message.indexOf(')'); String[] address = StringUtils.arraySplit( message.substring(first, last), ',', true); - StatementBuilder buff = new StatementBuilder(); + StringBuilder builder = new StringBuilder(); for (int i = 0; i < 4; i++) { - buff.appendExceptFirst("."); - buff.append(address[i]); + if (i > 0) { + builder.append('.'); + } + builder.append(address[i]); } - String ip = buff.toString(); + String ip = builder.toString(); InetAddress addr = InetAddress.getByName(ip); int port = (Integer.parseInt(address[4]) << 8) | Integer.parseInt(address[5]); Socket socketData = NetUtils.createSocket(addr, port, false); @@ -377,8 +378,7 @@ public String nameList(String dir) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); IOUtils.copyAndClose(inData, out); readCode(226); - byte[] data = out.toByteArray(); - return new String(data); + return out.toString(); } /** @@ -394,8 +394,7 @@ public String list(String dir) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); IOUtils.copyAndClose(inData, out); readCode(226); - byte[] data = out.toByteArray(); - return new String(data); + return out.toString(); } /** diff --git a/h2/src/tools/org/h2/dev/ftp/package.html b/h2/src/tools/org/h2/dev/ftp/package.html index a147f2be18..fcfd171c67 100644 --- a/h2/src/tools/org/h2/dev/ftp/package.html +++ b/h2/src/tools/org/h2/dev/ftp/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/ftp/server/FtpControl.java b/h2/src/tools/org/h2/dev/ftp/server/FtpControl.java index f1ebb1268a..7e0a42e22e 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/FtpControl.java +++ b/h2/src/tools/org/h2/dev/ftp/server/FtpControl.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp.server; @@ -13,7 +13,8 @@ import java.net.InetAddress; import java.net.ServerSocket; import java.net.Socket; -import org.h2.engine.Constants; +import java.nio.charset.StandardCharsets; + import org.h2.store.fs.FileUtils; import org.h2.util.StringUtils; @@ -47,7 +48,7 @@ public class FtpControl extends Thread { public void run() { try { output = new PrintWriter(new OutputStreamWriter( - control.getOutputStream(), Constants.UTF8)); + control.getOutputStream(), StandardCharsets.UTF_8)); if (stop) { reply(421, "Too many users"); } else { @@ -159,7 +160,7 @@ private void processConnected(String command, String param) throws IOException { } } else if ("CDUP".equals(command)) { if (currentDir.length() > 1) { - int idx = currentDir.lastIndexOf("/", currentDir.length() - 2); + int idx = currentDir.lastIndexOf('/', currentDir.length() - 2); currentDir = currentDir.substring(0, idx + 1); reply(250, "Ok"); } else { @@ -303,7 +304,7 @@ private void processConnected(String command, String param) throws IOException { } else if ("SIZE".equals(command)) { param = getFileName(param); if (FileUtils.exists(param) && !FileUtils.isDirectory(param)) { - reply(250, String.valueOf(FileUtils.size(param))); + reply(250, Long.toString(FileUtils.size(param))); } else { reply(500, "Failed"); } diff --git a/h2/src/tools/org/h2/dev/ftp/server/FtpData.java b/h2/src/tools/org/h2/dev/ftp/server/FtpData.java index 4d60e2ff1b..6faf76518b 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/FtpData.java +++ b/h2/src/tools/org/h2/dev/ftp/server/FtpData.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp.server; diff --git a/h2/src/tools/org/h2/dev/ftp/server/FtpEvent.java b/h2/src/tools/org/h2/dev/ftp/server/FtpEvent.java index 4309041803..55f91f8242 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/FtpEvent.java +++ b/h2/src/tools/org/h2/dev/ftp/server/FtpEvent.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp.server; diff --git a/h2/src/tools/org/h2/dev/ftp/server/FtpEventListener.java b/h2/src/tools/org/h2/dev/ftp/server/FtpEventListener.java index 7e5f67ebbd..e01a19aa9d 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/FtpEventListener.java +++ b/h2/src/tools/org/h2/dev/ftp/server/FtpEventListener.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp.server; diff --git a/h2/src/tools/org/h2/dev/ftp/server/FtpServer.java b/h2/src/tools/org/h2/dev/ftp/server/FtpServer.java index 9c13b23e43..176e5f1f60 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/FtpServer.java +++ b/h2/src/tools/org/h2/dev/ftp/server/FtpServer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.ftp.server; @@ -28,7 +28,7 @@ /** * Small FTP Server. Intended for ad-hoc networks in a secure environment. * Remote connections are possible. - * See also http://cr.yp.to/ftp.html http://www.ftpguide.com/ + * See also https://cr.yp.to/ftp.html http://www.ftpguide.com/ */ public class FtpServer extends Tool implements Service { @@ -81,7 +81,7 @@ public class FtpServer extends Tool implements Service { private String writeUserName = DEFAULT_WRITE, writePassword = DEFAULT_WRITE_PASSWORD; private String readUserName = DEFAULT_READ; - private final HashMap tasks = new HashMap(); + private final HashMap tasks = new HashMap<>(); private boolean trace; private boolean allowTask; @@ -91,9 +91,10 @@ public class FtpServer extends Tool implements Service { /** * When running without options, -tcp, -web, -browser, - * and -pg are started.
    - * Options are case sensitive. Supported options are: + * and -pg are started. + * Options are case sensitive. * + * * * * @@ -145,7 +146,6 @@ public class FtpServer extends Tool implements Service { * * *
    Supported options
    [-help] or [-?]Print the list of options
    [-web]
    [-trace]Print additional trace information; for all servers
    - * @h2.resource * * @param args the command line arguments */ @@ -230,7 +230,7 @@ private void appendFile(StringBuilder buff, String fileName) { buff.append('r'); buff.append(FileUtils.canWrite(fileName) ? 'w' : '-'); buff.append("------- 1 owner group "); - String size = String.valueOf(FileUtils.size(fileName)); + String size = Long.toString(FileUtils.size(fileName)); for (int i = size.length(); i < 15; i++) { buff.append(' '); } diff --git a/h2/src/tools/org/h2/dev/ftp/server/package.html b/h2/src/tools/org/h2/dev/ftp/server/package.html index ba83de9fe6..29801cdf07 100644 --- a/h2/src/tools/org/h2/dev/ftp/server/package.html +++ b/h2/src/tools/org/h2/dev/ftp/server/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/hash/IntPerfectHash.java b/h2/src/tools/org/h2/dev/hash/IntPerfectHash.java index c171ef80b9..58db01ff78 100644 --- a/h2/src/tools/org/h2/dev/hash/IntPerfectHash.java +++ b/h2/src/tools/org/h2/dev/hash/IntPerfectHash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.hash; @@ -91,7 +91,6 @@ public int get(int x) { * * @param pos the start position * @param x the key - * @param isRoot whether this is the root of the tree * @param level the level * @return the hash value */ @@ -194,13 +193,13 @@ private static int getSize(int n) { * @param list the data * @return the hash function description */ - public static byte[] generate(ArrayList list) { + public static byte[] generate(ArrayList list) { ByteStream out = new ByteStream(); generate(list, 0, out); return out.toByteArray(); } - private static void generate(ArrayList list, int level, ByteStream out) { + private static void generate(ArrayList list, int level, ByteStream out) { int size = list.size(); if (size <= 1) { out.write((byte) size); @@ -235,17 +234,14 @@ private static void generate(ArrayList list, int level, ByteStream split = (size - 47) / DIVIDE; } split = Math.max(2, split); - ArrayList> lists; - do { - lists = new ArrayList>(split); - for (int i = 0; i < split; i++) { - lists.add(new ArrayList(size / split)); - } - for (int x : list) { - ArrayList l = lists.get(hash(x, level, 0, split)); - l.add(x); - } - } while (lists == null); + ArrayList> lists = new ArrayList<>(split); + for (int i = 0; i < split; i++) { + lists.add(new ArrayList(size / split)); + } + for (int x : list) { + ArrayList l = lists.get(hash(x, level, 0, split)); + l.add(x); + } if (split >= SPLIT_MANY) { out.write((byte) SPLIT_MANY); } diff --git a/h2/src/tools/org/h2/dev/hash/MinimalPerfectHash.java b/h2/src/tools/org/h2/dev/hash/MinimalPerfectHash.java index c47ce85c2e..3019f11b93 100644 --- a/h2/src/tools/org/h2/dev/hash/MinimalPerfectHash.java +++ b/h2/src/tools/org/h2/dev/hash/MinimalPerfectHash.java @@ -1,13 +1,13 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.hash; import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.security.SecureRandom; import java.util.ArrayList; import java.util.Set; @@ -330,8 +330,7 @@ private static int getSize(int n) { * @return the hash function description */ public static byte[] generate(Set set, UniversalHash hash) { - ArrayList list = new ArrayList(); - list.addAll(set); + ArrayList list = new ArrayList<>(set); ByteArrayOutputStream out = new ByteArrayOutputStream(); int seed = RANDOM.nextInt(); out.write(seed >>> 24); @@ -409,7 +408,7 @@ static void generate(ArrayList list, UniversalHash hash, boolean isRoot = level == 0; ArrayList> lists; do { - lists = new ArrayList>(split); + lists = new ArrayList<>(split); for (int i = 0; i < split; i++) { lists.add(new ArrayList(size / split)); } @@ -452,11 +451,11 @@ private static void generateMultiThreaded( final int seed, ByteArrayOutputStream out) { final ArrayList outList = - new ArrayList(); + new ArrayList<>(); int processors = Runtime.getRuntime().availableProcessors(); Thread[] threads = new Thread[processors]; final AtomicInteger success = new AtomicInteger(); - final AtomicReference failure = new AtomicReference(); + final AtomicReference failure = new AtomicReference<>(); for (int i = 0; i < processors; i++) { threads[i] = new Thread() { @Override @@ -500,9 +499,7 @@ public void run() { for (ByteArrayOutputStream temp : outList) { out.write(temp.toByteArray()); } - } catch (InterruptedException e) { - throw new RuntimeException(e); - } catch (IOException e) { + } catch (InterruptedException | IOException e) { throw new RuntimeException(e); } } @@ -525,15 +522,15 @@ private static int hash(K o, UniversalHash hash, int level, x = ((x >>> 16) ^ x) * 0x45d9f3b; x = ((x >>> 16) ^ x) * 0x45d9f3b; x = (x >>> 16) ^ x; - return Math.abs(x % size); + return (x & (-1 >>> 1)) % size; } - private static int hash(int x, int level, int offset, int size) { + private static int hash(int x, int level, int offset, int size) { x += level + offset * 32; x = ((x >>> 16) ^ x) * 0x45d9f3b; x = ((x >>> 16) ^ x) * 0x45d9f3b; x = (x >>> 16) ^ x; - return Math.abs(x % size); + return (x & (-1 >>> 1)) % size; } private static int writeVarInt(ByteArrayOutputStream out, int x) { @@ -659,7 +656,7 @@ public int hashCode(Long o, int index, int seed) { if (index == 0) { return o.hashCode(); } else if (index < 8) { - long x = o.longValue(); + long x = o; x += index; x = ((x >>> 32) ^ x) * 0x45d9f3b; x = ((x >>> 32) ^ x) * 0x45d9f3b; @@ -667,7 +664,7 @@ public int hashCode(Long o, int index, int seed) { } // get the lower or higher 32 bit depending on the index int shift = (index & 1) * 32; - return (int) (o.longValue() >>> shift); + return (int) (o >>> shift); } } @@ -677,8 +674,6 @@ public int hashCode(Long o, int index, int seed) { */ public static class StringHash implements UniversalHash { - private static final Charset UTF8 = Charset.forName("UTF-8"); - @Override public int hashCode(String o, int index, int seed) { if (index == 0) { @@ -723,7 +718,7 @@ public static int getFastHash(String o, int index, int seed) { * @return the hash value */ public static int getSipHash24(String o, long k0, long k1) { - byte[] b = o.getBytes(UTF8); + byte[] b = o.getBytes(StandardCharsets.UTF_8); return getSipHash24(b, 0, b.length, k0, k1); } @@ -753,7 +748,7 @@ public static int getSipHash24(byte[] b, int start, int end, long k0, long k1) { m |= ((long) b[off + i] & 255) << (8 * i); } if (i < 8) { - m |= ((long) b.length) << 56; + m |= ((long) end - start) << 56; } v3 ^= m; repeat = 2; diff --git a/h2/src/tools/org/h2/dev/hash/PerfectHash.java b/h2/src/tools/org/h2/dev/hash/PerfectHash.java index db9c395694..185c942ec1 100644 --- a/h2/src/tools/org/h2/dev/hash/PerfectHash.java +++ b/h2/src/tools/org/h2/dev/hash/PerfectHash.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.hash; @@ -181,7 +181,7 @@ private static void generate(Collection set, int level, } split = Math.min(MAX_SPLIT - 1, Math.max(2, split)); out.write(split); - List> lists = new ArrayList>(split); + List> lists = new ArrayList<>(split); for (int i = 0; i < split; i++) { lists.add(new ArrayList(size / split)); } diff --git a/h2/src/tools/org/h2/dev/hash/package.html b/h2/src/tools/org/h2/dev/hash/package.html index 660f507454..f8d85f7907 100644 --- a/h2/src/tools/org/h2/dev/hash/package.html +++ b/h2/src/tools/org/h2/dev/hash/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/jpox/H2Adapter.java.txt b/h2/src/tools/org/h2/dev/jpox/H2Adapter.java.txt deleted file mode 100644 index b35f2a9fcc..0000000000 --- a/h2/src/tools/org/h2/dev/jpox/H2Adapter.java.txt +++ /dev/null @@ -1,352 +0,0 @@ -/********************************************************************** -Copyright (c) 2006 Andy Jefferson and others. All rights reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Contributors: -2006 Thomas Mueller - updated the dialect for the H2 database engine -**********************************************************************/ -package org.jpox.store.rdbms.adapter; - -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; - -import javax.sql.DataSource; - -import org.jpox.store.DatastoreContainerObject; -import org.jpox.store.DatastoreIdentifier; -import org.jpox.store.Dictionary; -import org.jpox.store.expression.LogicSetExpression; -import org.jpox.store.expression.NumericExpression; -import org.jpox.store.expression.QueryExpression; -import org.jpox.store.expression.ScalarExpression; -import org.jpox.store.expression.TableExprAsJoins; -import org.jpox.store.rdbms.Column; -import org.jpox.store.rdbms.key.PrimaryKey; -import org.jpox.store.rdbms.table.Table; - -/** - * Provides methods for adapting SQL language elements to the H2 Database Engine. - * - * @version $Revision: 1.1 $ - */ -class H2Adapter extends DatabaseAdapter -{ - private String schemaName; - - /** - * Constructs a H2 adapter based on the given JDBC metadata. - * @param dictionary The Dictionary to use - * @param metadata the database metadata. - */ - public H2Adapter(Dictionary dictionary, DatabaseMetaData metadata) - { - super(dictionary, metadata); - - // Set schema name - try - { - ResultSet rs = metadata.getSchemas(); - while (rs.next()) - { - if (rs.getBoolean("IS_DEFAULT")) - { - schemaName = rs.getString("TABLE_SCHEM"); - } - } - } - catch (SQLException e) - { - e.printStackTrace(); - // ignore - } - } - - /** - * Getter for the vendor ID for this adapter. - * @return The vendor ID - */ - public String getVendorID() - { - return "h2"; - } - - /** - * Accessor for a Connection to the datastore. - * @param ds The data source. Possible to have more than one data source for fail over - * @param userName The username for the datastore - * @param password The password for the datastore - * @param isolationLevel The level of transaction isolation - * @return The Connection - * @throws SQLException Thrown when an error occurs in the creation. - **/ - public Connection getConnection(DataSource[] ds, String userName, String password, int isolationLevel) - throws SQLException - { - return super.getConnection(ds,userName,password,Connection.TRANSACTION_SERIALIZABLE); - } - - /** - * Accessor for the maximum table name length permitted on this - * datastore. - * @return Max table name length - **/ - public int getMaxTableNameLength() - { - return SQLConstants.MAX_IDENTIFIER_LENGTH; - } - - /** - * Accessor for the maximum constraint name length permitted on this - * datastore. - * @return Max constraint name length - **/ - public int getMaxConstraintNameLength() - { - return SQLConstants.MAX_IDENTIFIER_LENGTH; - } - - /** - * Accessor for the maximum index name length permitted on this datastore. - * @return Max index name length - **/ - public int getMaxIndexNameLength() - { - return SQLConstants.MAX_IDENTIFIER_LENGTH; - } - - /** - * Accessor for the maximum column name length permitted on this datastore. - * @return Max column name length - **/ - public int getMaxColumnNameLength() - { - return SQLConstants.MAX_IDENTIFIER_LENGTH; - } - - /** - * Accessor for the SQL statement to add a column to a table. - * @param table The table - * @param col The column - * @return The SQL necessary to add the column - */ - public String getAddColumnStatement(DatastoreContainerObject table, Column col) - { - return "ALTER TABLE " + table.toString() + " ADD COLUMN " + col.getSQLDefinition(); - } - - /** - * Method to return the SQL to append to the SELECT clause of a SELECT statement to handle - * restriction of ranges using the LIMIT keyword. - * @param offset The offset to return from - * @param count The number of items to return - * @return The SQL to append to allow for ranges using LIMIT. - */ - public String getRangeByLimitSelectClause(long offset, long count) - { - if (offset >= 0 && count > 0) - { - return " LIMIT " + offset + " " + count + " "; - } - else if (offset <= 0 && count > 0) - { - return " LIMIT 0 " + count + " "; - } - else - { - return ""; - } - } - - /** - * Accessor for whether the adapter supports the transaction isolation level - * - * @param isolationLevel the isolation level - * @return Whether the transaction isolation level setting is supported. - */ - public boolean supportsTransactionIsolationLevel(int isolationLevel) - { - if (isolationLevel == Connection.TRANSACTION_READ_COMMITTED || isolationLevel == Connection.TRANSACTION_SERIALIZABLE) - { - return true; - } - return false; - } - - /** - * Whether the datastore supports specification of the primary key in CREATE - * TABLE statements. - * @return Whether it allows "PRIMARY KEY ..." - */ - public boolean supportsPrimaryKeyInCreateStatements() - { - return true; - } - - /** - * Accessor for the Schema Name for this datastore. - * - * @param conn Connection to the datastore - * @return The schema name - **/ - public String getSchemaName(Connection conn) - throws SQLException - { - return schemaName; - } - - /** - * @param pk An object describing the primary key. - * @return The PK statement - */ - public String getAddPrimaryKeyStatement(PrimaryKey pk) - { - // PK is created by the CREATE TABLE statement so we just return null - return null; - } - - - /** - * Returns the appropriate SQL to drop the given table. - * It should return something like: - *

    - *

    -     * DROP TABLE FOO
    -     * 
    - * - * @param table The table to drop. - * @return The text of the SQL statement. - */ - public String getDropTableStatement(DatastoreContainerObject table) - { - return "DROP TABLE " + table.toString(); - } - - /** - * Whether we support deferred constraints in keys. - * @return whether we support deferred constraints in keys. - **/ - public boolean supportsDeferredConstraints() - { - return false; - } - - /** - * Whether we support auto incrementing fields. - * @return whether we support auto incrementing fields. - **/ - public boolean supportsAutoIncrementFields() - { - return true; - } - - /** - * Accessor for the auto-increment sql statement for this datastore. - * @param tableName Name of the table that the autoincrement is for - * @param columnName Name of the column that the autoincrement is for - * @return The statement for getting the latest auto-increment key - **/ - public String getAutoIncrementStmt(String tableName, String columnName) - { - return "CALL IDENTITY()"; - } - - /** - * Accessor for the auto-increment keyword for generating DDLs (CREATE TABLE...). - * @return The keyword for a column using auto-increment - **/ - public String getAutoIncrementKeyword() - { - return "IDENTITY"; - } - - /** - * Method to return the INSERT statement to use when inserting into a table that has no - * columns specified. This is the case when we have a single column in the table and that column - * is autoincrement/identity (and so is assigned automatically in the datastore). - * @param table The table - * @return The INSERT statement - */ - public String getInsertStatementForNoColumns(Table table) - { - return "INSERT INTO " + table.toString() + " VALUES(NULL)"; - } - - /** - * Whether to allow Unique statements in the section of CREATE TABLE after the - * column definitions. - * @see org.jpox.store.rdbms.adapter.DatabaseAdapter#supportsUniqueConstraintsInEndCreateStatements() - */ - public boolean supportsUniqueConstraintsInEndCreateStatements() - { - return true; - } - - /** - * Whether this datastore supports the use of CHECK after the column - * definitions in CREATE TABLE statements (DDL). - * e.g. - * CREATE TABLE XYZ - * ( - * COL_A int, - * COL_B char(1), - * PRIMARY KEY (COL_A), - * CHECK (COL_B IN ('Y','N')) - * ) - * @return whether we can use CHECK after the column definitions in CREATE TABLE. - **/ - public boolean supportsCheckConstraintsInEndCreateStatements() - { - return true; - } - - /** - * Accessor for whether the specified type is allow to be part of a PK. - * @param datatype The JDBC type - * @return Whether it is permitted in the PK - */ - public boolean isValidPrimaryKeyType(int datatype) - { - return true; - } - - /** - * Method to generate a modulus expression. The binary % operator is said to - * yield the remainder of its operands from an implied division; the - * left-hand operand is the dividend and the right-hand operand is the - * divisor. This returns MOD(expr1, expr2). - * @param operand1 the left expression - * @param operand2 the right expression - * @return The Expression for modulus - */ - public NumericExpression modOperator(ScalarExpression operand1, ScalarExpression operand2) - { - ArrayList args = new ArrayList(); - args.add(operand1); - args.add(operand2); - return new NumericExpression("MOD", args); - } - /** - * Return a new TableExpression. - * @param qs The QueryStatement to add the expression to - * @param table The table in the expression - * @param rangeVar range variable to assign to the expression. - * @return The expression. - **/ - public LogicSetExpression newTableExpression(QueryExpression qs, DatastoreContainerObject table, DatastoreIdentifier rangeVar) - { - return new TableExprAsJoins(qs, table, rangeVar); - } -} diff --git a/h2/src/tools/org/h2/dev/mail/SendMail.java.txt b/h2/src/tools/org/h2/dev/mail/SendMail.java.txt index 3b52a0f1ea..26018958b7 100644 --- a/h2/src/tools/org/h2/dev/mail/SendMail.java.txt +++ b/h2/src/tools/org/h2/dev/mail/SendMail.java.txt @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.mail; diff --git a/h2/src/tools/org/h2/dev/net/PgTcpRedirect.java b/h2/src/tools/org/h2/dev/net/PgTcpRedirect.java index a61604333e..71ce3f98f1 100644 --- a/h2/src/tools/org/h2/dev/net/PgTcpRedirect.java +++ b/h2/src/tools/org/h2/dev/net/PgTcpRedirect.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.net; @@ -30,14 +30,14 @@ public class PgTcpRedirect { * @param args the command line parameters */ public static void main(String... args) throws Exception { - new PgTcpRedirect().loop(args); + loop(args); } - private void loop(String... args) throws Exception { + private static void loop(String... args) throws Exception { // MySQL protocol: // http://www.redferni.uklinux.net/mysql/MySQL-Protocol.html // PostgreSQL protocol: - // http://developer.postgresql.org/pgdocs/postgres/protocol.html + // https://www.postgresql.org/docs/devel/protocol.html // int portServer = 9083, portClient = 9084; // int portServer = 3306, portClient = 3307; // H2 PgServer @@ -66,7 +66,7 @@ private void loop(String... args) throws Exception { /** * This is the working thread of the TCP redirector. */ - private class TcpRedirectThread implements Runnable { + private static class TcpRedirectThread implements Runnable { private static final int STATE_INIT_CLIENT = 0, STATE_REGULAR = 1; private final Socket read, write; @@ -92,7 +92,7 @@ String readStringNull(InputStream in) throws IOException { return buff.toString(); } - private void println(String s) { + private static void println(String s) { if (DEBUG) { System.out.println(s); } @@ -385,7 +385,7 @@ private boolean processServer(InputStream inStream, break; } String msg = readStringNull(dataIn); - // http://developer.postgresql.org/pgdocs/postgres/protocol-error-fields.html + // https://www.postgresql.org/docs/devel/protocol-error-fields.html // S Severity // C Code: the SQLSTATE code // M Message @@ -420,7 +420,7 @@ private boolean processServer(InputStream inStream, break; } String msg = readStringNull(dataIn); - // http://developer.postgresql.org/pgdocs/postgres/protocol-error-fields.html + // https://www.postgresql.org/docs/devel/protocol-error-fields.html // S Severity // C Code: the SQLSTATE code // M Message diff --git a/h2/src/tools/org/h2/dev/net/package.html b/h2/src/tools/org/h2/dev/net/package.html index 283a835773..4900db526b 100644 --- a/h2/src/tools/org/h2/dev/net/package.html +++ b/h2/src/tools/org/h2/dev/net/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/security/SecureKeyStoreBuilder.java b/h2/src/tools/org/h2/dev/security/SecureKeyStoreBuilder.java index cab11333b0..7deed4834c 100644 --- a/h2/src/tools/org/h2/dev/security/SecureKeyStoreBuilder.java +++ b/h2/src/tools/org/h2/dev/security/SecureKeyStoreBuilder.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.security; diff --git a/h2/src/tools/org/h2/dev/security/package.html b/h2/src/tools/org/h2/dev/security/package.html index bf3552fec2..cb45245dd9 100644 --- a/h2/src/tools/org/h2/dev/security/package.html +++ b/h2/src/tools/org/h2/dev/security/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/sort/InPlaceStableMergeSort.java b/h2/src/tools/org/h2/dev/sort/InPlaceStableMergeSort.java index 4e3014a683..a442391953 100644 --- a/h2/src/tools/org/h2/dev/sort/InPlaceStableMergeSort.java +++ b/h2/src/tools/org/h2/dev/sort/InPlaceStableMergeSort.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.sort; diff --git a/h2/src/tools/org/h2/dev/sort/InPlaceStableQuicksort.java b/h2/src/tools/org/h2/dev/sort/InPlaceStableQuicksort.java index 4532d74c92..dd0632e6ff 100644 --- a/h2/src/tools/org/h2/dev/sort/InPlaceStableQuicksort.java +++ b/h2/src/tools/org/h2/dev/sort/InPlaceStableQuicksort.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.sort; @@ -143,7 +143,7 @@ private int binarySearch(T x, int from, int to) { * @param pivot the pivot * @param from the index of the first element * @param to the index of the last element - * @return the the first element of the second partition + * @return the first element of the second partition */ private int partition(T pivot, int from, int to) { if (to - from < temp.length) { @@ -163,7 +163,7 @@ private int partition(T pivot, int from, int to) { * @param pivot the pivot * @param from the index of the first element * @param to the index of the last element - * @return the the first element of the second partition + * @return the first element of the second partition */ private int partitionSmall(T pivot, int from, int to) { int tempIndex = 0, dataIndex = from; diff --git a/h2/src/tools/org/h2/dev/sort/package.html b/h2/src/tools/org/h2/dev/sort/package.html index 4dee4873b8..3632158b6a 100644 --- a/h2/src/tools/org/h2/dev/sort/package.html +++ b/h2/src/tools/org/h2/dev/sort/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/dev/util/AnsCompression.java b/h2/src/tools/org/h2/dev/util/AnsCompression.java new file mode 100644 index 0000000000..c27c8e37f4 --- /dev/null +++ b/h2/src/tools/org/h2/dev/util/AnsCompression.java @@ -0,0 +1,188 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.dev.util; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +/** + * An ANS (Asymmetric Numeral Systems) compression tool. + * It uses the range variant. + */ +public class AnsCompression { + + private static final long TOP = 1L << 24; + private static final int SHIFT = 12; + private static final int MASK = (1 << SHIFT) - 1; + private static final long MAX = (TOP >> SHIFT) << 32; + + private AnsCompression() { + // a utility class + } + + /** + * Count the frequencies of codes in the data, and increment the target + * frequency table. + * + * @param freq the target frequency table + * @param data the data + */ + public static void countFrequencies(int[] freq, byte[] data) { + for (byte x : data) { + freq[x & 0xff]++; + } + } + + /** + * Scale the frequencies to a new total. Frequencies of 0 are kept as 0; + * larger frequencies result in at least 1. + * + * @param freq the (source and target) frequency table + * @param total the target total (sum of all frequencies) + */ + public static void scaleFrequencies(int[] freq, int total) { + int len = freq.length, sum = 0; + for (int x : freq) { + sum += x; + } + // the list of: (error << 8) + index + int[] errors = new int[len]; + int totalError = -total; + for (int i = 0; i < len; i++) { + int old = freq[i]; + if (old == 0) { + continue; + } + int ideal = (int) (old * total * 256L / sum); + // 1 too high so we can decrement if needed + int x = 1 + ideal / 256; + freq[i] = x; + totalError += x; + errors[i] = ((x * 256 - ideal) << 8) + i; + } + // we don't need to sort, we could just calculate + // which one is the nth element - but sorting is simpler + Arrays.sort(errors); + if (totalError < 0) { + // integer overflow + throw new IllegalArgumentException(); + } + while (totalError > 0) { + for (int i = 0; totalError > 0 && i < len; i++) { + int index = errors[i] & 0xff; + if (freq[index] > 1) { + freq[index]--; + totalError--; + } + } + } + } + + /** + * Generate the cumulative frequency table. + * + * @param freq the source frequency table + * @return the cumulative table, with one entry more + */ + static int[] generateCumulativeFrequencies(int[] freq) { + int len = freq.length; + int[] cumulativeFreq = new int[len + 1]; + for (int i = 0, x = 0; i < len; i++) { + x += freq[i]; + cumulativeFreq[i + 1] = x; + } + return cumulativeFreq; + } + + /** + * Generate the frequency-to-code table. + * + * @param cumulativeFreq the cumulative frequency table + * @return the result + */ + private static byte[] generateFrequencyToCode(int[] cumulativeFreq) { + byte[] freqToCode = new byte[1 << SHIFT]; + int x = 0; + byte s = -1; + for (int i : cumulativeFreq) { + while (x < i) { + freqToCode[x++] = s; + } + s++; + } + return freqToCode; + } + + /** + * Encode the data. + * + * @param freq the frequency table (will be scaled) + * @param data the source data (uncompressed) + * @return the compressed data + */ + public static byte[] encode(int[] freq, byte[] data) { + scaleFrequencies(freq, 1 << SHIFT); + int[] cumulativeFreq = generateCumulativeFrequencies(freq); + ByteBuffer buff = ByteBuffer.allocate(data.length * 2); + buff = encode(data, freq, cumulativeFreq, buff); + return Arrays.copyOfRange(buff.array(), + buff.arrayOffset() + buff.position(), buff.arrayOffset() + buff.limit()); + } + + private static ByteBuffer encode(byte[] data, int[] freq, + int[] cumulativeFreq, ByteBuffer buff) { + long state = TOP; + // encoding happens backwards + int b = buff.limit(); + for (int p = data.length - 1; p >= 0; p--) { + int x = data[p] & 0xff; + int f = freq[x]; + while (state >= MAX * f) { + b -= 4; + buff.putInt(b, (int) state); + state >>>= 32; + } + state = ((state / f) << SHIFT) + (state % f) + cumulativeFreq[x]; + } + b -= 8; + buff.putLong(b, state); + buff.position(b); + return buff.slice(); + } + + /** + * Decode the data. + * + * @param freq the frequency table (will be scaled) + * @param data the compressed data + * @param length the target length + * @return the uncompressed result + */ + public static byte[] decode(int[] freq, byte[] data, int length) { + scaleFrequencies(freq, 1 << SHIFT); + int[] cumulativeFreq = generateCumulativeFrequencies(freq); + byte[] freqToCode = generateFrequencyToCode(cumulativeFreq); + byte[] out = new byte[length]; + decode(data, freq, cumulativeFreq, freqToCode, out); + return out; + } + + private static void decode(byte[] data, int[] freq, int[] cumulativeFreq, + byte[] freqToCode, byte[] out) { + ByteBuffer buff = ByteBuffer.wrap(data); + long state = buff.getLong(); + for (int i = 0, size = out.length; i < size; i++) { + int x = (int) state & MASK; + int c = freqToCode[x] & 0xff; + out[i] = (byte) c; + state = (freq[c] * (state >> SHIFT)) + x - cumulativeFreq[c]; + while (state < TOP) { + state = (state << 32) | (buff.getInt() & 0xffffffffL); + } + } + } + +} diff --git a/h2/src/tools/org/h2/dev/util/ArrayUtils.java b/h2/src/tools/org/h2/dev/util/ArrayUtils.java index 3753a570be..657d7eafdb 100644 --- a/h2/src/tools/org/h2/dev/util/ArrayUtils.java +++ b/h2/src/tools/org/h2/dev/util/ArrayUtils.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; diff --git a/h2/src/tools/org/h2/dev/util/Base64.java b/h2/src/tools/org/h2/dev/util/Base64.java index 345b167b93..3606adfb02 100644 --- a/h2/src/tools/org/h2/dev/util/Base64.java +++ b/h2/src/tools/org/h2/dev/util/Base64.java @@ -1,11 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.util.Random; +import java.util.concurrent.TimeUnit; /** * This class converts binary to base64 and vice versa. @@ -68,7 +69,7 @@ public static void main(String... args) { private static void test(boolean fast, int len) { Random random = new Random(10); - long time = System.currentTimeMillis(); + long time = System.nanoTime(); byte[] bin = new byte[len]; random.nextBytes(bin); for (int i = 0; i < len; i++) { @@ -82,8 +83,8 @@ private static void test(boolean fast, int len) { } test(bin, dec); } - time = System.currentTimeMillis() - time; - System.out.println("fast=" + fast + " time=" + time); + time = System.nanoTime() - time; + System.out.println("fast=" + fast + " time=" + TimeUnit.NANOSECONDS.toMillis(time)); } private static void test(byte[] in, byte[] out) { diff --git a/h2/src/tools/org/h2/dev/util/BinaryArithmeticStream.java b/h2/src/tools/org/h2/dev/util/BinaryArithmeticStream.java new file mode 100644 index 0000000000..e0cacb29b8 --- /dev/null +++ b/h2/src/tools/org/h2/dev/util/BinaryArithmeticStream.java @@ -0,0 +1,269 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.dev.util; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.PriorityQueue; + +/** + * A binary arithmetic stream. + */ +public class BinaryArithmeticStream { + + /** + * The maximum probability. + */ + public static final int MAX_PROBABILITY = (1 << 12) - 1; + + /** + * The low marker. + */ + protected int low; + + /** + * The high marker. + */ + protected int high = 0xffffffff; + + /** + * A binary arithmetic input stream. + */ + public static class In extends BinaryArithmeticStream { + + private final InputStream in; + private int data; + + public In(InputStream in) throws IOException { + this.in = in; + data = ((in.read() & 0xff) << 24) | + ((in.read() & 0xff) << 16) | + ((in.read() & 0xff) << 8) | + (in.read() & 0xff); + } + + /** + * Read a bit. + * + * @param probability the probability that the value is true + * @return the value + */ + public boolean readBit(int probability) throws IOException { + int split = low + probability * ((high - low) >>> 12); + boolean value; + // compare unsigned + if (data + Integer.MIN_VALUE > split + Integer.MIN_VALUE) { + low = split + 1; + value = false; + } else { + high = split; + value = true; + } + while (low >>> 24 == high >>> 24) { + data = (data << 8) | (in.read() & 0xff); + low <<= 8; + high = (high << 8) | 0xff; + } + return value; + } + + /** + * Read a value that is stored as a Golomb code. + * + * @param divisor the divisor + * @return the value + */ + public int readGolomb(int divisor) throws IOException { + int q = 0; + while (readBit(MAX_PROBABILITY / 2)) { + q++; + } + int bit = 31 - Integer.numberOfLeadingZeros(divisor - 1); + int r = 0; + if (bit >= 0) { + int cutOff = (2 << bit) - divisor; + for (; bit > 0; bit--) { + r = (r << 1) + (readBit(MAX_PROBABILITY / 2) ? 1 : 0); + } + if (r >= cutOff) { + r = (r << 1) + (readBit(MAX_PROBABILITY / 2) ? 1 : 0) - cutOff; + } + } + return q * divisor + r; + } + + } + + /** + * A binary arithmetic output stream. + */ + public static class Out extends BinaryArithmeticStream { + + private final OutputStream out; + + public Out(OutputStream out) { + this.out = out; + } + + /** + * Write a bit. + * + * @param value the value + * @param probability the probability that the value is true + */ + public void writeBit(boolean value, int probability) throws IOException { + int split = low + probability * ((high - low) >>> 12); + if (value) { + high = split; + } else { + low = split + 1; + } + while (low >>> 24 == high >>> 24) { + out.write(high >> 24); + low <<= 8; + high = (high << 8) | 0xff; + } + } + + /** + * Flush the stream. + */ + public void flush() throws IOException { + out.write(high >> 24); + out.write(high >> 16); + out.write(high >> 8); + out.write(high); + } + + /** + * Write the Golomb code of a value. + * + * @param divisor the divisor + * @param value the value + */ + public void writeGolomb(int divisor, int value) throws IOException { + int q = value / divisor; + for (int i = 0; i < q; i++) { + writeBit(true, MAX_PROBABILITY / 2); + } + writeBit(false, MAX_PROBABILITY / 2); + int r = value - q * divisor; + int bit = 31 - Integer.numberOfLeadingZeros(divisor - 1); + if (r < ((2 << bit) - divisor)) { + bit--; + } else { + r += (2 << bit) - divisor; + } + for (; bit >= 0; bit--) { + writeBit(((r >>> bit) & 1) == 1, MAX_PROBABILITY / 2); + } + } + + } + + /** + * A Huffman code table / tree. + */ + public static class Huffman { + + private final int[] codes; + private final Node tree; + + public Huffman(int[] frequencies) { + PriorityQueue queue = new PriorityQueue<>(); + for (int i = 0; i < frequencies.length; i++) { + int f = frequencies[i]; + if (f > 0) { + queue.offer(new Node(i, f)); + } + } + while (queue.size() > 1) { + queue.offer(new Node(queue.poll(), queue.poll())); + } + codes = new int[frequencies.length]; + tree = queue.poll(); + if (tree != null) { + tree.initCodes(codes, 1); + } + } + + /** + * Write a value. + * + * @param out the output stream + * @param value the value to write + */ + public void write(Out out, int value) throws IOException { + int code = codes[value]; + int bitCount = 30 - Integer.numberOfLeadingZeros(code); + Node n = tree; + for (int i = bitCount; i >= 0; i--) { + boolean goRight = ((code >> i) & 1) == 1; + int prob = (int) ((long) MAX_PROBABILITY * + n.right.frequency / n.frequency); + out.writeBit(goRight, prob); + n = goRight ? n.right : n.left; + } + } + + /** + * Read a value. + * + * @param in the input stream + * @return the value + */ + public int read(In in) throws IOException { + Node n = tree; + while (n.left != null) { + int prob = (int) ((long) MAX_PROBABILITY * + n.right.frequency / n.frequency); + boolean goRight = in.readBit(prob); + n = goRight ? n.right : n.left; + } + return n.value; + } + + } + + /** + * A Huffman code node. + */ + private static class Node implements Comparable { + + int value; + Node left; + Node right; + final int frequency; + + Node(int value, int frequency) { + this.frequency = frequency; + this.value = value; + } + + Node(Node left, Node right) { + this.left = left; + this.right = right; + this.frequency = left.frequency + right.frequency; + } + + @Override + public int compareTo(Node o) { + return frequency - o.frequency; + } + + void initCodes(int[] codes, int bits) { + if (left == null) { + codes[value] = bits; + } else { + left.initCodes(codes, bits << 1); + right.initCodes(codes, (bits << 1) + 1); + } + } + + } + +} diff --git a/h2/src/tools/org/h2/dev/util/BitStream.java b/h2/src/tools/org/h2/dev/util/BitStream.java new file mode 100644 index 0000000000..7968a4a4f2 --- /dev/null +++ b/h2/src/tools/org/h2/dev/util/BitStream.java @@ -0,0 +1,298 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.dev.util; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.PriorityQueue; + +/** + * A stream that supports Golomb and Huffman coding. + */ +public class BitStream { + + private BitStream() { + // a utility class + } + + /** + * A bit input stream. + */ + public static class In { + + private final InputStream in; + private int current = 0x10000; + + public In(InputStream in) { + this.in = in; + } + + /** + * Read a value that is stored as a Golomb code. + * + * @param divisor the divisor + * @return the value + */ + public int readGolomb(int divisor) { + int q = 0; + while (readBit() == 1) { + q++; + } + int bit = 31 - Integer.numberOfLeadingZeros(divisor - 1); + int r = 0; + if (bit >= 0) { + int cutOff = (2 << bit) - divisor; + for (; bit > 0; bit--) { + r = (r << 1) + readBit(); + } + if (r >= cutOff) { + r = (r << 1) + readBit() - cutOff; + } + } + return q * divisor + r; + } + + /** + * Read a bit. + * + * @return the bit (0 or 1) + */ + public int readBit() { + if (current >= 0x10000) { + try { + current = 0x100 | in.read(); + if (current < 0) { + return -1; + } + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + int bit = (current >>> 7) & 1; + current <<= 1; + return bit; + } + + /** + * Close the stream. This will also close the underlying stream. + */ + public void close() { + try { + in.close(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + } + + /** + * A bit output stream. + */ + public static class Out { + + private final OutputStream out; + private int current = 1; + + public Out(OutputStream out) { + this.out = out; + } + + /** + * Write the Golomb code of a value. + * + * @param divisor the divisor + * @param value the value + */ + public void writeGolomb(int divisor, int value) { + int q = value / divisor; + for (int i = 0; i < q; i++) { + writeBit(1); + } + writeBit(0); + int r = value - q * divisor; + int bit = 31 - Integer.numberOfLeadingZeros(divisor - 1); + if (r < ((2 << bit) - divisor)) { + bit--; + } else { + r += (2 << bit) - divisor; + } + for (; bit >= 0; bit--) { + writeBit((r >>> bit) & 1); + } + } + + /** + * Get the size of the Golomb code for this value. + * + * @param divisor the divisor + * @param value the value + * @return the number of bits + */ + public static int getGolombSize(int divisor, int value) { + int q = value / divisor; + int r = value - q * divisor; + int bit = 31 - Integer.numberOfLeadingZeros(divisor - 1); + if (r < ((2 << bit) - divisor)) { + bit--; + } + return bit + q + 2; + } + + /** + * Write a bit. + * + * @param bit the bit (0 or 1) + */ + public void writeBit(int bit) { + current = (current << 1) + bit; + if (current > 0xff) { + try { + out.write(current & 0xff); + } catch (IOException e) { + throw new IllegalStateException(e); + } + current = 1; + } + } + + /** + * Flush the stream. This will at write at most 7 '0' bits. + * This will also flush the underlying stream. + */ + public void flush() { + while (current > 1) { + writeBit(0); + } + try { + out.flush(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + /** + * Flush and close the stream. + * This will also close the underlying stream. + */ + public void close() { + flush(); + try { + out.close(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + + } + + } + + /** + * A Huffman code. + */ + public static class Huffman { + + private final int[] codes; + private final Node tree; + + public Huffman(int[] frequencies) { + PriorityQueue queue = new PriorityQueue<>(); + for (int i = 0; i < frequencies.length; i++) { + int f = frequencies[i]; + if (f > 0) { + queue.offer(new Node(i, f)); + } + } + while (queue.size() > 1) { + queue.offer(new Node(queue.poll(), queue.poll())); + } + codes = new int[frequencies.length]; + tree = queue.poll(); + if (tree != null) { + tree.initCodes(codes, 1); + } + } + + /** + * Write a value. + * + * @param out the output stream + * @param value the value to write + */ + public void write(BitStream.Out out, int value) { + int code = codes[value]; + int bitCount = 30 - Integer.numberOfLeadingZeros(code); + for (int i = bitCount; i >= 0; i--) { + out.writeBit((code >> i) & 1); + } + } + + /** + * Read a value. + * + * @param in the input stream + * @return the value + */ + public int read(BitStream.In in) { + Node n = tree; + while (n.left != null) { + n = in.readBit() == 1 ? n.right : n.left; + } + return n.value; + } + + /** + * Get the number of bits of the Huffman code for this value. + * + * @param value the value + * @return the number of bits + */ + public int getBitCount(int value) { + int code = codes[value]; + return 30 - Integer.numberOfLeadingZeros(code); + } + + } + + /** + * A Huffman code node. + */ + private static class Node implements Comparable { + + int value; + Node left; + Node right; + private final int frequency; + + Node(int value, int frequency) { + this.frequency = frequency; + this.value = value; + } + + Node(Node left, Node right) { + this.left = left; + this.right = right; + this.frequency = left.frequency + right.frequency; + } + + @Override + public int compareTo(Node o) { + return frequency - o.frequency; + } + + void initCodes(int[] codes, int bits) { + if (left == null) { + codes[value] = bits; + } else { + left.initCodes(codes, bits << 1); + right.initCodes(codes, (bits << 1) + 1); + } + } + + } + +} diff --git a/h2/src/tools/org/h2/dev/util/ConcurrentLinkedList.java b/h2/src/tools/org/h2/dev/util/ConcurrentLinkedList.java index 03cb534b6e..bf82210857 100644 --- a/h2/src/tools/org/h2/dev/util/ConcurrentLinkedList.java +++ b/h2/src/tools/org/h2/dev/util/ConcurrentLinkedList.java @@ -1,15 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.util.Iterator; -import org.h2.mvstore.DataUtils; - - /** * A very simple linked list that supports concurrent access. * Internally, it uses immutable objects. @@ -22,7 +19,7 @@ public class ConcurrentLinkedList { /** * The sentinel entry. */ - static final Entry NULL = new Entry(null, null); + static final Entry NULL = new Entry<>(null, null); /** * The head entry. @@ -112,11 +109,6 @@ public K next() { return x; } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } @@ -135,9 +127,9 @@ private static class Entry { @SuppressWarnings("unchecked") static Entry append(Entry list, K obj) { if (list == NULL) { - return new Entry(obj, (Entry) NULL); + return new Entry<>(obj, (Entry) NULL); } - return new Entry(list.obj, append(list.next, obj)); + return new Entry<>(list.obj, append(list.next, obj)); } @SuppressWarnings("unchecked") @@ -145,7 +137,7 @@ static Entry removeLast(Entry list) { if (list == NULL || list.next == NULL) { return (Entry) NULL; } - return new Entry(list.obj, removeLast(list.next)); + return new Entry<>(list.obj, removeLast(list.next)); } } diff --git a/h2/src/tools/org/h2/dev/util/ConcurrentLinkedListWithTail.java b/h2/src/tools/org/h2/dev/util/ConcurrentLinkedListWithTail.java index a74d3c04e2..72a2ebd786 100644 --- a/h2/src/tools/org/h2/dev/util/ConcurrentLinkedListWithTail.java +++ b/h2/src/tools/org/h2/dev/util/ConcurrentLinkedListWithTail.java @@ -1,14 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.util.Iterator; -import org.h2.mvstore.DataUtils; - /** * A very simple linked list that supports concurrent access. * @@ -52,7 +50,7 @@ public K peekLast() { * @param obj the element */ public void add(K obj) { - Entry x = new Entry(obj); + Entry x = new Entry<>(obj); Entry t = tail; if (t != null) { t.next = x; @@ -132,11 +130,6 @@ public K next() { return x; } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } diff --git a/h2/src/tools/org/h2/dev/util/ConcurrentRing.java b/h2/src/tools/org/h2/dev/util/ConcurrentRing.java index e55a11ecb6..73a06edd5e 100644 --- a/h2/src/tools/org/h2/dev/util/ConcurrentRing.java +++ b/h2/src/tools/org/h2/dev/util/ConcurrentRing.java @@ -1,14 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.util.Iterator; -import org.h2.mvstore.DataUtils; - /** * A ring buffer that supports concurrent access. * @@ -144,11 +142,6 @@ public K next() { return buffer[getIndex(readPos + offset++)]; } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } diff --git a/h2/src/tools/org/h2/dev/util/FileContentHash.java b/h2/src/tools/org/h2/dev/util/FileContentHash.java index 0d63a04c2e..f815c37f6e 100644 --- a/h2/src/tools/org/h2/dev/util/FileContentHash.java +++ b/h2/src/tools/org/h2/dev/util/FileContentHash.java @@ -1,12 +1,13 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Collections; @@ -14,7 +15,6 @@ import java.util.List; import org.h2.store.fs.FileUtils; -import org.h2.util.New; import org.h2.util.SortedProperties; import org.h2.util.StringUtils; @@ -31,7 +31,7 @@ public class FileContentHash { private static final boolean WRITE_HASH_INDEX = true; private static final String HASH_INDEX = ".hash.prop"; private static final int MIN_SIZE = 0; - private final HashMap hashes = New.hashMap(); + private final HashMap hashes = new HashMap<>(); private long nextLog; /** @@ -99,8 +99,8 @@ private Info hash(String path) throws IOException { checkCollision(f, length, StringUtils.convertHexToBytes(hash)); } propNew.put(entry, hash); - mdDir.update(entry.getBytes("UTF-8")); - mdDir.update(hash.getBytes("UTF-8")); + mdDir.update(entry.getBytes(StandardCharsets.UTF_8)); + mdDir.update(hash.getBytes(StandardCharsets.UTF_8)); } String oldFile = propOld.toString(); String newFile = propNew.toString(); diff --git a/h2/src/tools/org/h2/dev/util/FileViewer.java b/h2/src/tools/org/h2/dev/util/FileViewer.java index 8f892bcd09..d92cd51f36 100644 --- a/h2/src/tools/org/h2/dev/util/FileViewer.java +++ b/h2/src/tools/org/h2/dev/util/FileViewer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; @@ -10,6 +10,7 @@ import java.io.RandomAccessFile; import java.sql.SQLException; import java.util.ArrayList; +import java.util.concurrent.TimeUnit; import org.h2.message.DbException; import org.h2.util.Tool; @@ -56,13 +57,13 @@ public void runTool(String... args) throws SQLException { } else if (arg.equals("-find")) { find = args[++i]; } else if (arg.equals("-start")) { - start = Long.decode(args[++i]).longValue(); + start = Long.decode(args[++i]); } else if (arg.equals("-head")) { head = true; } else if (arg.equals("-tail")) { tail = true; } else if (arg.equals("-lines")) { - lines = Integer.decode(args[++i]).intValue(); + lines = Integer.decode(args[++i]); } else if (arg.equals("-quiet")) { quiet = true; } else if (arg.equals("-help") || arg.equals("-?")) { @@ -129,7 +130,7 @@ private static long find(RandomAccessFile file, byte[] find, boolean quiet) long length = file.length(); int bufferSize = 4 * 1024; byte[] data = new byte[bufferSize * 2]; - long last = System.currentTimeMillis(); + long last = System.nanoTime(); while (pos < length) { System.arraycopy(data, bufferSize, data, 0, bufferSize); if (pos + bufferSize > length) { @@ -137,8 +138,8 @@ private static long find(RandomAccessFile file, byte[] find, boolean quiet) return find(data, find, (int) (bufferSize + length - pos - find.length)); } if (!quiet) { - long now = System.currentTimeMillis(); - if (now > last + 5000) { + long now = System.nanoTime(); + if (now > last + TimeUnit.SECONDS.toNanos(5)) { System.out.println((100 * pos / length) + "%"); last = now; } @@ -178,7 +179,7 @@ private static void list(long pos, String header, ArrayList list) { private static ArrayList readLines(RandomAccessFile file, int maxLines) throws IOException { - ArrayList lines = new ArrayList(); + ArrayList lines = new ArrayList<>(); ByteArrayOutputStream buff = new ByteArrayOutputStream(100); boolean lastNewline = false; while (maxLines > 0) { diff --git a/h2/src/tools/org/h2/dev/util/ImmutableArray.java b/h2/src/tools/org/h2/dev/util/ImmutableArray.java index d8849bdd2b..2cdcfb239b 100644 --- a/h2/src/tools/org/h2/dev/util/ImmutableArray.java +++ b/h2/src/tools/org/h2/dev/util/ImmutableArray.java @@ -1,13 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.util.Arrays; import java.util.Iterator; - import org.h2.mvstore.DataUtils; /** @@ -17,7 +16,7 @@ */ public final class ImmutableArray implements Iterable { - private static final ImmutableArray EMPTY = new ImmutableArray( + private static final ImmutableArray EMPTY = new ImmutableArray<>( new Object[0]); /** @@ -56,9 +55,9 @@ public int length() { * @return the new immutable array */ public ImmutableArray set(int index, K obj) { - K[] array = Arrays.copyOf(this.array, this.array.length); + K[] array = this.array.clone(); array[index] = obj; - return new ImmutableArray(array); + return new ImmutableArray<>(array); } /** @@ -74,7 +73,7 @@ public ImmutableArray insert(int index, K obj) { K[] array = (K[]) new Object[len]; DataUtils.copyWithGap(this.array, array, this.array.length, index); array[index] = obj; - return new ImmutableArray(array); + return new ImmutableArray<>(array); } /** @@ -88,7 +87,7 @@ public ImmutableArray remove(int index) { @SuppressWarnings("unchecked") K[] array = (K[]) new Object[len]; DataUtils.copyExcept(this.array, array, this.array.length, index); - return new ImmutableArray(array); + return new ImmutableArray<>(array); } /** @@ -99,11 +98,7 @@ public ImmutableArray remove(int index) { * @return the new immutable array */ public ImmutableArray subArray(int fromIndex, int toIndex) { - int len = toIndex - fromIndex; - @SuppressWarnings("unchecked") - K[] array = (K[]) new Object[len]; - System.arraycopy(this.array, fromIndex, array, 0, toIndex - fromIndex); - return new ImmutableArray(array); + return new ImmutableArray<>(Arrays.copyOfRange(array, fromIndex, toIndex)); } /** @@ -112,9 +107,9 @@ public ImmutableArray subArray(int fromIndex, int toIndex) { * @param array the data * @return the new immutable array */ - @SuppressWarnings("unchecked") + @SafeVarargs public static ImmutableArray create(K... array) { - return new ImmutableArray(array); + return new ImmutableArray<>(array); } /** @@ -168,11 +163,6 @@ public K next() { return a.get(index++); } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } diff --git a/h2/src/tools/org/h2/dev/util/ImmutableArray2.java b/h2/src/tools/org/h2/dev/util/ImmutableArray2.java index e7d2529e14..3e4130fbfe 100644 --- a/h2/src/tools/org/h2/dev/util/ImmutableArray2.java +++ b/h2/src/tools/org/h2/dev/util/ImmutableArray2.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; @@ -8,7 +8,6 @@ import java.util.Arrays; import java.util.Iterator; import java.util.concurrent.atomic.AtomicBoolean; - import org.h2.mvstore.DataUtils; /** @@ -18,7 +17,7 @@ */ public final class ImmutableArray2 implements Iterable { - private static final ImmutableArray2 EMPTY = new ImmutableArray2( + private static final ImmutableArray2 EMPTY = new ImmutableArray2<>( new Object[0], 0); /** @@ -73,7 +72,7 @@ public int length() { public ImmutableArray2 set(int index, K obj) { K[] a2 = Arrays.copyOf(array, length); a2[index] = obj; - return new ImmutableArray2(a2, length); + return new ImmutableArray2<>(a2, length); } /** @@ -95,7 +94,7 @@ public ImmutableArray2 insert(int index, K obj) { canExtend = null; if (array.length > index && x.getAndSet(false)) { array[index] = obj; - return new ImmutableArray2(array, len, true); + return new ImmutableArray2<>(array, len, true); } } extendable = true; @@ -107,7 +106,7 @@ public ImmutableArray2 insert(int index, K obj) { K[] a2 = (K[]) new Object[newLen]; DataUtils.copyWithGap(array, a2, length, index); a2[index] = obj; - return new ImmutableArray2(a2, len, extendable); + return new ImmutableArray2<>(a2, len, extendable); } /** @@ -119,12 +118,12 @@ public ImmutableArray2 insert(int index, K obj) { public ImmutableArray2 remove(int index) { int len = length - 1; if (index == len) { - return new ImmutableArray2(array, len); + return new ImmutableArray2<>(array, len); } @SuppressWarnings("unchecked") K[] a2 = (K[]) new Object[len]; DataUtils.copyExcept(array, a2, length, index); - return new ImmutableArray2(a2, len); + return new ImmutableArray2<>(a2, len); } /** @@ -137,12 +136,9 @@ public ImmutableArray2 remove(int index) { public ImmutableArray2 subArray(int fromIndex, int toIndex) { int len = toIndex - fromIndex; if (fromIndex == 0) { - return new ImmutableArray2(array, len); + return new ImmutableArray2<>(array, len); } - @SuppressWarnings("unchecked") - K[] a2 = (K[]) new Object[len]; - System.arraycopy(array, fromIndex, a2, 0, toIndex - fromIndex); - return new ImmutableArray2(a2, len); + return new ImmutableArray2<>(Arrays.copyOfRange(array, fromIndex, toIndex), len); } /** @@ -151,9 +147,9 @@ public ImmutableArray2 subArray(int fromIndex, int toIndex) { * @param array the data * @return the new immutable array */ - @SuppressWarnings("unchecked") + @SafeVarargs public static ImmutableArray2 create(K... array) { - return new ImmutableArray2(array, array.length); + return new ImmutableArray2<>(array, array.length); } /** @@ -207,11 +203,6 @@ public K next() { return a.get(index++); } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } diff --git a/h2/src/tools/org/h2/dev/util/ImmutableArray3.java b/h2/src/tools/org/h2/dev/util/ImmutableArray3.java index 0858fd1eae..93cde7be31 100644 --- a/h2/src/tools/org/h2/dev/util/ImmutableArray3.java +++ b/h2/src/tools/org/h2/dev/util/ImmutableArray3.java @@ -1,14 +1,12 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; import java.util.Iterator; -import org.h2.mvstore.DataUtils; - /** * An immutable array. * @@ -18,7 +16,7 @@ public abstract class ImmutableArray3 implements Iterable { private static final int MAX_LEVEL = 4; - private static final ImmutableArray3 EMPTY = new Plain(new Object[0]); + private static final ImmutableArray3 EMPTY = new Plain<>(new Object[0]); /** * Get the length. @@ -75,7 +73,7 @@ public ImmutableArray3 subArray(int fromIndex, int toIndex) { for (int i = 0; i < len; i++) { array[i] = get(fromIndex + i); } - return new Plain(array); + return new Plain<>(array); } /** @@ -84,9 +82,9 @@ public ImmutableArray3 subArray(int fromIndex, int toIndex) { * @param array the data * @return the new immutable array */ - @SuppressWarnings("unchecked") + @SafeVarargs public static ImmutableArray3 create(K... array) { - return new Plain(array); + return new Plain<>(array); } /** @@ -153,11 +151,6 @@ public K next() { return a.get(index++); } - @Override - public void remove() { - throw DataUtils.newUnsupportedOperationException("remove"); - } - }; } @@ -190,17 +183,17 @@ public int length() { @Override public ImmutableArray3 set(int index, K obj) { - return new Set(this, index, obj); + return new Set<>(this, index, obj); } @Override public ImmutableArray3 insert(int index, K obj) { - return new Insert(this, index, obj); + return new Insert<>(this, index, obj); } @Override public ImmutableArray3 remove(int index) { - return new Remove(this, index); + return new Remove<>(this, index); } /** @@ -219,7 +212,7 @@ static ImmutableArray3 set(ImmutableArray3 base, int index, K obj) { for (int i = 0; i < len; i++) { array[i] = i == index ? obj : base.get(i); } - return new Plain(array); + return new Plain<>(array); } /** @@ -238,7 +231,7 @@ static ImmutableArray3 insert(ImmutableArray3 base, int index, K obj) for (int i = 0; i < len; i++) { array[i] = i == index ? obj : i < index ? base.get(i) : base.get(i - 1); } - return new Plain(array); + return new Plain<>(array); } /** @@ -256,7 +249,7 @@ static ImmutableArray3 remove(ImmutableArray3 base, int index) { for (int i = 0; i < len; i++) { array[i] = i < index ? base.get(i) : base.get(i + 1); } - return new Plain(array); + return new Plain<>(array); } @Override @@ -297,9 +290,9 @@ public K get(int index) { @Override public ImmutableArray3 set(int index, K obj) { if (index == this.index) { - return new Set(base, index, obj); + return new Set<>(base, index, obj); } else if (level() < MAX_LEVEL) { - return new Set(this, index, obj); + return new Set<>(this, index, obj); } return Plain.set(this, index, obj); } @@ -307,7 +300,7 @@ public ImmutableArray3 set(int index, K obj) { @Override public ImmutableArray3 insert(int index, K obj) { if (level() < MAX_LEVEL) { - return new Insert(this, index, obj); + return new Insert<>(this, index, obj); } return Plain.insert(this, index, obj); } @@ -315,7 +308,7 @@ public ImmutableArray3 insert(int index, K obj) { @Override public ImmutableArray3 remove(int index) { if (level() < MAX_LEVEL) { - return new Remove(this, index); + return new Remove<>(this, index); } return Plain.remove(this, index); } @@ -348,7 +341,7 @@ static class Insert extends ImmutableArray3 { @Override public ImmutableArray3 set(int index, K obj) { if (level() < MAX_LEVEL) { - return new Set(this, index, obj); + return new Set<>(this, index, obj); } return Plain.set(this, index, obj); } @@ -356,7 +349,7 @@ public ImmutableArray3 set(int index, K obj) { @Override public ImmutableArray3 insert(int index, K obj) { if (level() < MAX_LEVEL) { - return new Insert(this, index, obj); + return new Insert<>(this, index, obj); } return Plain.insert(this, index, obj); } @@ -366,7 +359,7 @@ public ImmutableArray3 remove(int index) { if (index == this.index) { return base; } else if (level() < MAX_LEVEL) { - return new Remove(this, index); + return new Remove<>(this, index); } return Plain.remove(this, index); } @@ -412,7 +405,7 @@ static class Remove extends ImmutableArray3 { @Override public ImmutableArray3 set(int index, K obj) { if (level() < MAX_LEVEL) { - return new Set(this, index, obj); + return new Set<>(this, index, obj); } return Plain.set(this, index, obj); } @@ -422,7 +415,7 @@ public ImmutableArray3 insert(int index, K obj) { if (index == this.index) { return base.set(index, obj); } else if (level() < MAX_LEVEL) { - return new Insert(this, index, obj); + return new Insert<>(this, index, obj); } return Plain.insert(this, index, obj); } @@ -430,7 +423,7 @@ public ImmutableArray3 insert(int index, K obj) { @Override public ImmutableArray3 remove(int index) { if (level() < MAX_LEVEL) { - return new Remove(this, index); + return new Remove<>(this, index); } return Plain.remove(this, index); } diff --git a/h2/src/tools/org/h2/dev/util/JavaProcessKiller.java b/h2/src/tools/org/h2/dev/util/JavaProcessKiller.java index e49265dbab..4a45487e7f 100644 --- a/h2/src/tools/org/h2/dev/util/JavaProcessKiller.java +++ b/h2/src/tools/org/h2/dev/util/JavaProcessKiller.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; @@ -8,6 +8,7 @@ import java.io.ByteArrayOutputStream; import java.io.InputStream; import java.io.OutputStream; +import java.nio.charset.StandardCharsets; import java.util.Map.Entry; import java.util.TreeMap; @@ -70,7 +71,7 @@ private void run(String... args) { private static TreeMap getProcesses() { String processList = exec("jps", "-l"); String[] processes = processList.split("\n"); - TreeMap map = new TreeMap(); + TreeMap map = new TreeMap<>(); for (int i = 0; i < processes.length; i++) { String p = processes[i].trim(); int idx = p.indexOf(' '); @@ -91,11 +92,11 @@ private static String exec(String... args) { copyInThread(p.getInputStream(), out); copyInThread(p.getErrorStream(), err); p.waitFor(); - String e = new String(err.toByteArray(), "UTF-8"); + String e = new String(err.toByteArray(), StandardCharsets.UTF_8); if (e.length() > 0) { throw new RuntimeException(e); } - String output = new String(out.toByteArray(), "UTF-8"); + String output = new String(out.toByteArray(), StandardCharsets.UTF_8); return output; } catch (Exception e) { throw new RuntimeException(e); diff --git a/h2/src/tools/org/h2/dev/util/Migrate.java b/h2/src/tools/org/h2/dev/util/Migrate.java index 54d4e8930b..b9e647a88d 100644 --- a/h2/src/tools/org/h2/dev/util/Migrate.java +++ b/h2/src/tools/org/h2/dev/util/Migrate.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; @@ -14,8 +14,11 @@ import java.io.PrintStream; import java.io.RandomAccessFile; import java.net.URL; +import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; +import java.util.concurrent.TimeUnit; + import org.h2.engine.Constants; import org.h2.tools.RunScript; @@ -34,7 +37,7 @@ public class Migrate { private static final String PASSWORD = "sa"; private static final File OLD_H2_FILE = new File("./h2-1.2.127.jar"); private static final String DOWNLOAD_URL = - "http://repo2.maven.org/maven2/com/h2database/h2/1.2.127/h2-1.2.127.jar"; + "https://repo1.maven.org/maven2/com/h2database/h2/1.2.127/h2-1.2.127.jar"; private static final String CHECKSUM = "056e784c7cf009483366ab9cd8d21d02fe47031a"; private static final String TEMP_SCRIPT = "backup.sql"; @@ -93,7 +96,7 @@ public void execute(File file, boolean recursive, String user, "-password", password }); file.renameTo(new File(file.getAbsoluteFile() + ".backup")); - RunScript.execute(url, user, password, TEMP_SCRIPT, Constants.UTF8, true); + RunScript.execute(url, user, password, TEMP_SCRIPT, StandardCharsets.UTF_8, true); new File(TEMP_SCRIPT).delete(); } @@ -124,11 +127,11 @@ private void download(String target, String fileURL, String sha1Checksum) { println("Downloading " + fileURL); URL url = new URL(fileURL); InputStream in = new BufferedInputStream(url.openStream()); - long last = System.currentTimeMillis(); + long last = System.nanoTime(); int len = 0; while (true) { - long now = System.currentTimeMillis(); - if (now > last + 1000) { + long now = System.nanoTime(); + if (now > last + TimeUnit.SECONDS.toNanos(1)) { println("Downloaded " + len + " bytes"); last = now; } diff --git a/h2/src/tools/org/h2/dev/util/ReaderInputStream.java b/h2/src/tools/org/h2/dev/util/ReaderInputStream.java index 7a1f44ddd8..1bb9c6a74c 100644 --- a/h2/src/tools/org/h2/dev/util/ReaderInputStream.java +++ b/h2/src/tools/org/h2/dev/util/ReaderInputStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; @@ -12,6 +12,7 @@ import java.io.OutputStreamWriter; import java.io.Reader; import java.io.Writer; +import java.nio.charset.StandardCharsets; import org.h2.engine.Constants; @@ -33,7 +34,7 @@ public ReaderInputStream(Reader reader) { chars = new char[Constants.IO_BUFFER_SIZE]; this.reader = reader; out = new ByteArrayOutputStream(Constants.IO_BUFFER_SIZE); - writer = new BufferedWriter(new OutputStreamWriter(out, Constants.UTF8)); + writer = new BufferedWriter(new OutputStreamWriter(out, StandardCharsets.UTF_8)); } private void fillBuffer() throws IOException { diff --git a/h2/src/tools/org/h2/dev/util/RemovePasswords.java b/h2/src/tools/org/h2/dev/util/RemovePasswords.java index 7c8de26094..9b915923f9 100644 --- a/h2/src/tools/org/h2/dev/util/RemovePasswords.java +++ b/h2/src/tools/org/h2/dev/util/RemovePasswords.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; @@ -9,6 +9,7 @@ import java.io.RandomAccessFile; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel.MapMode; +import java.nio.charset.StandardCharsets; import org.h2.engine.Constants; import org.h2.security.SHA256; @@ -45,7 +46,7 @@ private static void execute(String fileName) throws IOException { } buff.position(i); buff.get(data); - String s = new String(data, "UTF-8"); + String s = new String(data, StandardCharsets.UTF_8); if (!s.startsWith("CREATE USER ")) { continue; } diff --git a/h2/src/tools/org/h2/dev/util/ThreadDumpCleaner.java b/h2/src/tools/org/h2/dev/util/ThreadDumpCleaner.java index 15b05b25a7..0405a9057e 100644 --- a/h2/src/tools/org/h2/dev/util/ThreadDumpCleaner.java +++ b/h2/src/tools/org/h2/dev/util/ThreadDumpCleaner.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; @@ -51,6 +51,9 @@ public class ThreadDumpCleaner { "\".*?\".*?\n java.lang.Thread.State:.*\n\t" + "at sun.nio.ch.ServerSocketChannelImpl.accept(?s).*?\n\n", + "\".*?\".*?\n java.lang.Thread.State:.*\n\t" + + "at java.net.DualStackPlainSocketImpl.accept0(?s).*\n\n", + "\".*?\".*?\n java.lang.Thread.State:.*\n\t" + "at sun.nio.ch.EPollArrayWrapper.epollWait(?s).*?\n\n", @@ -63,9 +66,12 @@ public class ThreadDumpCleaner { "\".*?\".*?\n java.lang.Thread.State:.*\n\t" + "at java.net.SocketInputStream.socketRead0(?s).*?\n\n", + "\".*?\".*?\n java.lang.Thread.State:.*\n\t" + + "at sun.nio.ch.WindowsSelectorImpl\\$SubSelector.poll0(?s).*?\n\n", + }; - private ArrayList patterns = new ArrayList(); + private final ArrayList patterns = new ArrayList<>(); { for (String s : PATTERN) { diff --git a/h2/src/tools/org/h2/dev/util/ThreadDumpFilter.java b/h2/src/tools/org/h2/dev/util/ThreadDumpFilter.java index 084844621f..acac8b9372 100644 --- a/h2/src/tools/org/h2/dev/util/ThreadDumpFilter.java +++ b/h2/src/tools/org/h2/dev/util/ThreadDumpFilter.java @@ -1,12 +1,16 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.dev.util; -import java.io.InputStreamReader; +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.FileReader; +import java.io.FileWriter; import java.io.LineNumberReader; +import java.io.PrintWriter; /** * Filter full thread dumps from a log file. @@ -14,18 +18,25 @@ public class ThreadDumpFilter { /** - * Usage: java ThreadDumpFilter threadDump.txt - * @param a ignored + * Usage: java ThreadDumpFilter <log.txt >threadDump.txt + * + * @param a the file name */ public static void main(String... a) throws Exception { - LineNumberReader in = new LineNumberReader(new InputStreamReader(System.in)); + String fileName = a[0]; + LineNumberReader in = new LineNumberReader( + new BufferedReader(new FileReader(fileName))); + PrintWriter writer = new PrintWriter(new BufferedWriter( + new FileWriter(fileName + ".filtered.txt"))); for (String s; (s = in.readLine()) != null;) { if (s.startsWith("Full thread")) { do { - System.out.println(s); + writer.println(s); s = in.readLine(); } while(s != null && (s.length() == 0 || " \t\"".indexOf(s.charAt(0)) >= 0)); } } + writer.close(); + in.close(); } } diff --git a/h2/src/tools/org/h2/dev/util/ThreadDumpInliner.java b/h2/src/tools/org/h2/dev/util/ThreadDumpInliner.java new file mode 100644 index 0000000000..0ab1755a11 --- /dev/null +++ b/h2/src/tools/org/h2/dev/util/ThreadDumpInliner.java @@ -0,0 +1,55 @@ +/* + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group + */ +package org.h2.dev.util; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.LineNumberReader; +import java.io.PrintWriter; + +/** + * Convert a list of thread dumps into one line per thread. + */ +public class ThreadDumpInliner { + + /** + * Usage: java ThreadDumpInliner threadDump.txt + * + * @param a the file name + */ + public static void main(String... a) throws Exception { + String fileName = a[0]; + LineNumberReader in = new LineNumberReader( + new BufferedReader(new FileReader(fileName))); + PrintWriter writer = new PrintWriter(new BufferedWriter( + new FileWriter(fileName + ".lines.txt"))); + + StringBuilder buff = new StringBuilder(); + for (String s; (s = in.readLine()) != null;) { + if (s.trim().length() == 0) { + continue; + } + if (s.startsWith(" ") || s.startsWith("\t")) { + buff.append('\t').append(s.trim()); + } else { + printNonEmpty(writer, buff.toString()); + buff = new StringBuilder(s); + } + } + printNonEmpty(writer, buff.toString()); + in.close(); + writer.close(); + } + + private static void printNonEmpty(PrintWriter writer, String s) { + s = s.trim(); + if (!s.isEmpty()) { + writer.println(s); + } + } +} diff --git a/h2/src/tools/org/h2/dev/util/package.html b/h2/src/tools/org/h2/dev/util/package.html index 81efdcf8f6..39f23a4632 100644 --- a/h2/src/tools/org/h2/dev/util/package.html +++ b/h2/src/tools/org/h2/dev/util/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/jaqu/CompareType.java b/h2/src/tools/org/h2/jaqu/CompareType.java deleted file mode 100644 index 9a49180ee8..0000000000 --- a/h2/src/tools/org/h2/jaqu/CompareType.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -/** - * An enumeration of compare operations. - */ -enum CompareType { - EQUAL("=", true), - BIGGER(">", true), - BIGGER_EQUAL(">=", true), - SMALLER("<", true), - SMALLER_EQUAL("<=", true), - NOT_EQUAL("<>", true), - IS_NOT_NULL("IS NOT NULL", false), - IS_NULL("IS NULL", false), - LIKE("LIKE", true); - - private String text; - private boolean hasRightExpression; - - CompareType(String text, boolean hasRightExpression) { - this.text = text; - this.hasRightExpression = hasRightExpression; - } - - String getString() { - return text; - } - - boolean hasRightExpression() { - return hasRightExpression; - } - -} - diff --git a/h2/src/tools/org/h2/jaqu/Condition.java b/h2/src/tools/org/h2/jaqu/Condition.java deleted file mode 100644 index db782dc8ae..0000000000 --- a/h2/src/tools/org/h2/jaqu/Condition.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -/** - * A condition contains one or two operands and a compare operation. - * - * @param the operand type - */ -class Condition implements Token { - CompareType compareType; - A x, y; - - Condition(A x, A y, CompareType compareType) { - this.compareType = compareType; - this.x = x; - this.y = y; - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - query.appendSQL(stat, x); - stat.appendSQL(" "); - stat.appendSQL(compareType.getString()); - if (compareType.hasRightExpression()) { - stat.appendSQL(" "); - query.appendSQL(stat, y); - } - } -} diff --git a/h2/src/tools/org/h2/jaqu/ConditionAndOr.java b/h2/src/tools/org/h2/jaqu/ConditionAndOr.java deleted file mode 100644 index 0ec611dc74..0000000000 --- a/h2/src/tools/org/h2/jaqu/ConditionAndOr.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -/** - * An OR or an AND condition. - */ -enum ConditionAndOr implements Token { - AND("AND"), - OR("OR"); - - private String text; - - ConditionAndOr(String text) { - this.text = text; - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL(text); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/Db.java b/h2/src/tools/org/h2/jaqu/Db.java deleted file mode 100644 index 653fe0f858..0000000000 --- a/h2/src/tools/org/h2/jaqu/Db.java +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import javax.sql.DataSource; -import org.h2.jaqu.DbUpgrader.DefaultDbUpgrader; -import org.h2.jaqu.SQLDialect.DefaultSQLDialect; -import org.h2.jaqu.Table.JQDatabase; -import org.h2.jaqu.Table.JQTable; -import org.h2.jaqu.util.WeakIdentityHashMap; -import org.h2.util.JdbcUtils; -import org.h2.util.New; -import org.h2.util.StringUtils; - -/** - * This class represents a connection to a database. - */ -public class Db { - - /** - * This map It holds unique tokens that are generated by functions such as - * Function.sum(..) in "db.from(p).select(Function.sum(p.unitPrice))". It - * doesn't actually hold column tokens, as those are bound to the query - * itself. - */ - private static final Map TOKENS = Collections - .synchronizedMap(new WeakIdentityHashMap()); - - private final Connection conn; - private final Map, TableDefinition> classMap = New.hashMap(); - private final SQLDialect dialect; - private DbUpgrader dbUpgrader = new DefaultDbUpgrader(); - private final Set> upgradeChecked = Collections - .synchronizedSet(new HashSet>()); - - private int todoDocumentNewFeaturesInHtmlFile; - - public Db(Connection conn) { - this.conn = conn; - dialect = new DefaultSQLDialect(); - } - - static X registerToken(X x, Token token) { - TOKENS.put(x, token); - return x; - } - - static Token getToken(Object x) { - return TOKENS.get(x); - } - - private static T instance(Class clazz) { - try { - return clazz.newInstance(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - public static Db open(String url, String user, String password) { - try { - Connection conn = JdbcUtils - .getConnection(null, url, user, password); - return new Db(conn); - } catch (SQLException e) { - throw convert(e); - } - } - - /** - * Create a new database instance using a data source. This method is fast, - * so that you can always call open() / close() on usage. - * - * @param ds the data source - * @return the database instance. - */ - public static Db open(DataSource ds) { - try { - return new Db(ds.getConnection()); - } catch (SQLException e) { - throw convert(e); - } - } - - public static Db open(String url, String user, char[] password) { - try { - Properties prop = new Properties(); - prop.setProperty("user", user); - prop.put("password", password); - Connection conn = JdbcUtils.getConnection(null, url, prop); - return new Db(conn); - } catch (SQLException e) { - throw convert(e); - } - } - - private static Error convert(Exception e) { - return new Error(e); - } - - public void insert(T t) { - Class clazz = t.getClass(); - define(clazz).createTableIfRequired(this).insert(this, t, false); - } - - public long insertAndGetKey(T t) { - Class clazz = t.getClass(); - return define(clazz).createTableIfRequired(this).insert(this, t, true); - } - - public void merge(T t) { - Class clazz = t.getClass(); - define(clazz).createTableIfRequired(this).merge(this, t); - } - - public void update(T t) { - Class clazz = t.getClass(); - define(clazz).createTableIfRequired(this).update(this, t); - } - - public void delete(T t) { - Class clazz = t.getClass(); - define(clazz).createTableIfRequired(this).delete(this, t); - } - - public Query from(T alias) { - Class clazz = alias.getClass(); - define(clazz).createTableIfRequired(this); - return Query.from(this, alias); - } - - Db upgradeDb() { - if (!upgradeChecked.contains(dbUpgrader.getClass())) { - // flag as checked immediately because calls are nested. - upgradeChecked.add(dbUpgrader.getClass()); - - JQDatabase model = dbUpgrader.getClass().getAnnotation( - JQDatabase.class); - if (model.version() > 0) { - DbVersion v = new DbVersion(); - DbVersion dbVersion = - // (SCHEMA="" && TABLE="") == DATABASE - from(v).where(v.schema).is("").and(v.table).is("") - .selectFirst(); - if (dbVersion == null) { - // database has no version registration, but model specifies - // version: insert DbVersion entry and return. - DbVersion newDb = new DbVersion(model.version()); - insert(newDb); - } else { - // database has a version registration: - // check to see if upgrade is required. - if ((model.version() > dbVersion.version) - && (dbUpgrader != null)) { - // database is an older version than the model - boolean success = dbUpgrader.upgradeDatabase(this, - dbVersion.version, model.version()); - if (success) { - dbVersion.version = model.version(); - update(dbVersion); - } - } - } - } - } - return this; - } - - void upgradeTable(TableDefinition model) { - if (!upgradeChecked.contains(model.getModelClass())) { - // flag is checked immediately because calls are nested - upgradeChecked.add(model.getModelClass()); - - if (model.tableVersion > 0) { - // table is using JaQu version tracking. - DbVersion v = new DbVersion(); - String schema = StringUtils.isNullOrEmpty(model.schemaName) ? "" - : model.schemaName; - DbVersion dbVersion = from(v).where(v.schema).like(schema) - .and(v.table).like(model.tableName).selectFirst(); - if (dbVersion == null) { - // table has no version registration, but model specifies - // version: insert DbVersion entry - DbVersion newTable = new DbVersion(model.tableVersion); - newTable.schema = schema; - newTable.table = model.tableName; - insert(newTable); - } else { - // table has a version registration: - // check if upgrade is required - if ((model.tableVersion > dbVersion.version) - && (dbUpgrader != null)) { - // table is an older version than model - boolean success = dbUpgrader.upgradeTable(this, schema, - model.tableName, dbVersion.version, - model.tableVersion); - if (success) { - dbVersion.version = model.tableVersion; - update(dbVersion); - } - } - } - } - } - } - - TableDefinition define(Class clazz) { - TableDefinition def = getTableDefinition(clazz); - if (def == null) { - upgradeDb(); - def = new TableDefinition(clazz); - def.mapFields(); - classMap.put(clazz, def); - if (Table.class.isAssignableFrom(clazz)) { - T t = instance(clazz); - Table table = (Table) t; - Define.define(def, table); - } else if (clazz.isAnnotationPresent(JQTable.class)) { - // annotated classes skip the Define().define() static - // initializer - T t = instance(clazz); - def.mapObject(t); - } - } - return def; - } - - public synchronized void setDbUpgrader(DbUpgrader upgrader) { - if (!upgrader.getClass().isAnnotationPresent(JQDatabase.class)) { - throw new RuntimeException("DbUpgrader must be annotated with " - + JQDatabase.class.getSimpleName()); - } - this.dbUpgrader = upgrader; - upgradeChecked.clear(); - } - - SQLDialect getDialect() { - return dialect; - } - - public Connection getConnection() { - return conn; - } - - public void close() { - try { - conn.close(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - public TestCondition test(A x) { - return new TestCondition(x); - } - - public void insertAll(List list) { - for (T t : list) { - insert(t); - } - } - - public List insertAllAndGetKeys(List list) { - List identities = new ArrayList(); - for (T t : list) { - identities.add(insertAndGetKey(t)); - } - return identities; - } - - public void updateAll(List list) { - for (T t : list) { - update(t); - } - } - - public void deleteAll(List list) { - for (T t : list) { - delete(t); - } - } - - PreparedStatement prepare(String sql, boolean returnGeneratedKeys) { - try { - if (returnGeneratedKeys) { - return conn.prepareStatement(sql, - Statement.RETURN_GENERATED_KEYS); - } - return conn.prepareStatement(sql); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - - @SuppressWarnings("unchecked") - TableDefinition getTableDefinition(Class clazz) { - return (TableDefinition) classMap.get(clazz); - } - - /** - * Run a SQL query directly against the database. - * - * @param sql the SQL statement - * @return the result set - */ - public ResultSet executeQuery(String sql) { - try { - return conn.createStatement().executeQuery(sql); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - - /** - * Run a SQL statement directly against the database. - * - * @param sql the SQL statement - * @return the update count - */ - public int executeUpdate(String sql) { - try { - Statement stat = conn.createStatement(); - int updateCount = stat.executeUpdate(sql); - stat.close(); - return updateCount; - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - - // FieldDefinition getFieldDefinition(X x) { - // return aliasMap.get(x).getFieldDefinition(); - // } - // - // SelectColumn getSelectColumn(X x) { - // return aliasMap.get(x); - // } - -} diff --git a/h2/src/tools/org/h2/jaqu/DbInspector.java b/h2/src/tools/org/h2/jaqu/DbInspector.java deleted file mode 100644 index eafbe9b1ae..0000000000 --- a/h2/src/tools/org/h2/jaqu/DbInspector.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu; - -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.text.MessageFormat; -import java.util.ArrayList; -import java.util.List; -import org.h2.jaqu.Table.JQTable; -import org.h2.util.JdbcUtils; -import org.h2.util.New; -import org.h2.util.StringUtils; - -/** - * Class to inspect a model and a database for the purposes of model validation - * and automatic model generation. This class finds the available schemas and - * tables and serves as the entry point for model generation and validation. - */ -public class DbInspector { - - private final Db db; - private DatabaseMetaData metaData; - private Class dateTimeClass = java.util.Date.class; - - public DbInspector(Db db) { - this.db = db; - } - - /** - * Set the preferred class to store date and time. Possible values are: - * java.util.Date (default) and java.sql.Timestamp. - * - * @param dateTimeClass the new class - */ - public void setPreferredDateTimeClass( - Class dateTimeClass) { - this.dateTimeClass = dateTimeClass; - } - - /** - * Generates models class skeletons for schemas and tables. If the table - * name is undefined, models will be generated for every table within the - * specified schema. Additionally, if no schema is defined, models will be - * generated for all schemas and all tables. - * - * @param schema the schema name (optional) - * @param table the table name (optional) - * @param packageName the package name (optional) - * @param annotateSchema (includes schema name in annotation) - * @param trimStrings (trims strings to maxLength of column) - * @return a list of complete model classes as strings, each element a class - */ - public List generateModel(String schema, String table, - String packageName, boolean annotateSchema, boolean trimStrings) { - try { - List models = New.arrayList(); - List tables = getTables(schema, table); - for (TableInspector t : tables) { - t.read(metaData); - String model = t.generateModel(packageName, annotateSchema, - trimStrings); - models.add(model); - } - return models; - } catch (SQLException s) { - throw new RuntimeException(s); - } - } - - /** - * Validates a model. - * - * @param the model class - * @param model an instance of the model class - * @param throwOnError if errors should cause validation to fail - * @return a list of validation remarks - */ - public List validateModel(T model, - boolean throwOnError) { - try { - TableInspector inspector = getTable(model); - inspector.read(metaData); - @SuppressWarnings("unchecked") - Class clazz = (Class) model.getClass(); - TableDefinition def = db.define(clazz); - return inspector.validate(def, throwOnError); - } catch (SQLException s) { - throw new RuntimeException(s); - } - } - - private DatabaseMetaData getMetaData() throws SQLException { - if (metaData == null) { - metaData = db.getConnection().getMetaData(); - } - return metaData; - } - - /** - * Get the table in the database based on the model definition. - * - * @param the model class - * @param model an instance of the model class - * @return the table inspector - */ - private TableInspector getTable(T model) throws SQLException { - @SuppressWarnings("unchecked") - Class clazz = (Class) model.getClass(); - TableDefinition def = db.define(clazz); - boolean forceUpperCase = getMetaData().storesUpperCaseIdentifiers(); - String schema = (forceUpperCase && def.schemaName != null) ? def.schemaName - .toUpperCase() : def.schemaName; - String table = forceUpperCase ? def.tableName.toUpperCase() - : def.tableName; - List tables = getTables(schema, table); - return tables.get(0); - } - - /** - * Returns a list of tables. This method always returns at least one - * element. If no table is found, an exception is thrown. - * - * @param schema the schema name - * @param table the table name - * @return a list of table inspectors (always contains at least one element) - */ - private List getTables(String schema, String table) - throws SQLException { - ResultSet rs = null; - try { - rs = getMetaData().getSchemas(); - ArrayList schemaList = New.arrayList(); - while (rs.next()) { - schemaList.add(rs.getString("TABLE_SCHEM")); - } - JdbcUtils.closeSilently(rs); - - String jaquTables = DbVersion.class.getAnnotation(JQTable.class) - .name(); - - List tables = New.arrayList(); - if (schemaList.size() == 0) { - schemaList.add(null); - } - for (String s : schemaList) { - rs = getMetaData().getTables(null, s, null, - new String[] { "TABLE" }); - while (rs.next()) { - String t = rs.getString("TABLE_NAME"); - if (!t.equalsIgnoreCase(jaquTables)) { - tables.add(new TableInspector(s, t, getMetaData() - .storesUpperCaseIdentifiers(), dateTimeClass)); - } - } - } - - if (StringUtils.isNullOrEmpty(schema) - && StringUtils.isNullOrEmpty(table)) { - // all schemas and tables - return tables; - } - // schema subset OR table subset OR exact match - List matches = New.arrayList(); - for (TableInspector t : tables) { - if (t.matches(schema, table)) { - matches.add(t); - } - } - if (matches.size() == 0) { - throw new RuntimeException(MessageFormat.format( - "Failed to find schema={0} table={1}", - schema == null ? "" : schema, table == null ? "" - : table)); - } - return matches; - } finally { - JdbcUtils.closeSilently(rs); - } - } - -} diff --git a/h2/src/tools/org/h2/jaqu/DbUpgrader.java b/h2/src/tools/org/h2/jaqu/DbUpgrader.java deleted file mode 100644 index e8b9613364..0000000000 --- a/h2/src/tools/org/h2/jaqu/DbUpgrader.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu; - -import org.h2.jaqu.Table.JQDatabase; - -/** - * Interface which defines a class to handle table changes based on model - * versions. An implementation of DbUpgrader must be annotated with the - * JQDatabase annotation, which defines the expected database version - * number. - */ -public interface DbUpgrader { - - /** - * Defines method interface to handle database upgrades. This method is only - * called if your DbUpgrader implementation is annotated with - * JQDatabase. - * - * @param db the database - * @param fromVersion the old version - * @param toVersion the new version - * @return true for successful upgrade. If the upgrade is successful, the - * version registry is automatically updated. - */ - boolean upgradeDatabase(Db db, int fromVersion, int toVersion); - - /** - * Defines method interface to handle table upgrades. - * - * @param db the database - * @param schema the schema - * @param table the table - * @param fromVersion the old version - * @param toVersion the new version - * @return true for successful upgrade. If the upgrade is successful, the - * version registry is automatically updated. - */ - boolean upgradeTable(Db db, String schema, String table, int fromVersion, - int toVersion); - - /** - * The default database upgrader. It throws runtime exception instead of - * handling upgrade requests. - */ - @JQDatabase(version = 0) - public static class DefaultDbUpgrader implements DbUpgrader { - - @Override - public boolean upgradeDatabase(Db db, int fromVersion, int toVersion) { - throw new RuntimeException( - "Please provide your own DbUpgrader implementation."); - } - - @Override - public boolean upgradeTable(Db db, String schema, String table, - int fromVersion, int toVersion) { - throw new RuntimeException( - "Please provide your own DbUpgrader implementation."); - } - - } - -} diff --git a/h2/src/tools/org/h2/jaqu/DbVersion.java b/h2/src/tools/org/h2/jaqu/DbVersion.java deleted file mode 100644 index a07beda93a..0000000000 --- a/h2/src/tools/org/h2/jaqu/DbVersion.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu; - -import org.h2.jaqu.Table.JQColumn; -import org.h2.jaqu.Table.JQTable; - -/** - * A system table to track database and table versions. - */ -@JQTable(name = "_jq_versions", - primaryKey = "schemaName tableName", memoryTable = true) -public class DbVersion { - - @JQColumn(name = "schemaName", allowNull = false) - String schema = ""; - - @JQColumn(name = "tableName", allowNull = false) - String table = ""; - - @JQColumn(name = "version") - Integer version; - - public DbVersion() { - // nothing to do - } - - /** - * Constructor for defining a version entry. Both the schema and the table - * are empty strings, which means this is the row for the 'database'. - * - * @param version the database version - */ - public DbVersion(int version) { - this.schema = ""; - this.table = ""; - this.version = version; - } - -} diff --git a/h2/src/tools/org/h2/jaqu/Define.java b/h2/src/tools/org/h2/jaqu/Define.java deleted file mode 100644 index 9488f47c9f..0000000000 --- a/h2/src/tools/org/h2/jaqu/Define.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import org.h2.jaqu.Table.IndexType; - -/** - * This class provides utility methods to define primary keys, indexes, and set - * the name of the table. - */ -public class Define { - - private static TableDefinition currentTableDefinition; - private static Table currentTable; - - public static void primaryKey(Object... columns) { - checkInDefine(); - currentTableDefinition.setPrimaryKey(columns); - } - - public static void index(Object... columns) { - checkInDefine(); - currentTableDefinition.addIndex(IndexType.STANDARD, columns); - } - - public static void uniqueIndex(Object... columns) { - checkInDefine(); - currentTableDefinition.addIndex(IndexType.UNIQUE, columns); - } - - public static void hashIndex(Object column) { - checkInDefine(); - currentTableDefinition - .addIndex(IndexType.HASH, new Object[] { column }); - } - - public static void uniqueHashIndex(Object column) { - checkInDefine(); - currentTableDefinition.addIndex(IndexType.UNIQUE_HASH, - new Object[] { column }); - } - - public static void maxLength(Object column, int length) { - checkInDefine(); - currentTableDefinition.setMaxLength(column, length); - } - - public static void tableName(String tableName) { - currentTableDefinition.setTableName(tableName); - } - - static synchronized void define(TableDefinition tableDefinition, - Table table) { - currentTableDefinition = tableDefinition; - currentTable = table; - tableDefinition.mapObject(table); - table.define(); - currentTable = null; - } - - private static void checkInDefine() { - if (currentTable == null) { - throw new RuntimeException( - "This method may only be called " - + "from within the define() method, and the define() method " - + "is called by the framework."); - } - } - -} diff --git a/h2/src/tools/org/h2/jaqu/Filter.java b/h2/src/tools/org/h2/jaqu/Filter.java deleted file mode 100644 index a633abf7df..0000000000 --- a/h2/src/tools/org/h2/jaqu/Filter.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -/** - * Represents the WHERE clause of a query. - */ -public interface Filter { - boolean where(); -} diff --git a/h2/src/tools/org/h2/jaqu/Function.java b/h2/src/tools/org/h2/jaqu/Function.java deleted file mode 100644 index a96ba61c32..0000000000 --- a/h2/src/tools/org/h2/jaqu/Function.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import org.h2.jaqu.util.ClassUtils; - -/** - * This class provides static methods that represents common SQL functions. - */ -public class Function implements Token { - - // must be a new instance - private static final Long COUNT_STAR = Long.valueOf(0); - - protected Object[] x; - private final String name; - - protected Function(String name, Object... x) { - this.name = name; - this.x = x; - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL(name).appendSQL("("); - int i = 0; - for (Object o : x) { - if (i++ > 0) { - stat.appendSQL(","); - } - query.appendSQL(stat, o); - } - stat.appendSQL(")"); - } - - public static Long count() { - return COUNT_STAR; - } - - public static Integer length(Object x) { - return Db.registerToken( - ClassUtils.newObject(Integer.class), new Function("LENGTH", x)); - } - - @SuppressWarnings("unchecked") - public static T sum(T x) { - return (T) Db.registerToken( - ClassUtils.newObject(x.getClass()), new Function("SUM", x)); - } - - public static Long count(Object x) { - return Db.registerToken( - ClassUtils.newObject(Long.class), new Function("COUNT", x)); - } - - public static Boolean isNull(Object x) { - return Db.registerToken( - ClassUtils.newObject(Boolean.class), new Function("", x) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - query.appendSQL(stat, x[0]); - stat.appendSQL(" IS NULL"); - } - }); - } - - public static Boolean isNotNull(Object x) { - return Db.registerToken( - ClassUtils.newObject(Boolean.class), new Function("", x) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - query.appendSQL(stat, x[0]); - stat.appendSQL(" IS NOT NULL"); - } - }); - } - - public static Boolean not(Boolean x) { - return Db.registerToken( - ClassUtils.newObject(Boolean.class), new Function("", x) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL("NOT "); - query.appendSQL(stat, x[0]); - } - }); - } - - public static Boolean or(Boolean... x) { - return Db.registerToken( - ClassUtils.newObject(Boolean.class), - new Function("", (Object[]) x) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - int i = 0; - for (Object o : x) { - if (i++ > 0) { - stat.appendSQL(" OR "); - } - query.appendSQL(stat, o); - } - } - }); - } - - public static Boolean and(Boolean... x) { - return Db.registerToken( - ClassUtils.newObject(Boolean.class), - new Function("", (Object[]) x) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - int i = 0; - for (Object o : x) { - if (i++ > 0) { - stat.appendSQL(" AND "); - } - query.appendSQL(stat, o); - } - } - }); - } - - @SuppressWarnings("unchecked") - public static X min(X x) { - Class clazz = (Class) x.getClass(); - X o = ClassUtils.newObject(clazz); - return Db.registerToken(o, new Function("MIN", x)); - } - - @SuppressWarnings("unchecked") - public static X max(X x) { - Class clazz = (Class) x.getClass(); - X o = ClassUtils.newObject(clazz); - return Db.registerToken(o, new Function("MAX", x)); - } - - public static Boolean like(String x, String pattern) { - Boolean o = ClassUtils.newObject(Boolean.class); - return Db.registerToken(o, new Function("LIKE", x, pattern) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL("("); - query.appendSQL(stat, x[0]); - stat.appendSQL(" LIKE "); - query.appendSQL(stat, x[1]); - stat.appendSQL(")"); - } - }); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/ModelUtils.java b/h2/src/tools/org/h2/jaqu/ModelUtils.java deleted file mode 100644 index 3bd4a8707d..0000000000 --- a/h2/src/tools/org/h2/jaqu/ModelUtils.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu; - -import java.lang.reflect.Method; -import java.math.BigDecimal; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.regex.Pattern; -import org.h2.jaqu.TableDefinition.FieldDefinition; -import org.h2.util.StringUtils; - -/** - * Utility methods for models related to type mapping, default value validation, - * and class or field name creation. - */ -public class ModelUtils { - - /** - * The list of supported data types. It is used by the runtime mapping for - * CREATE statements. - */ - private static final Map, String> SUPPORTED_TYPES = - new HashMap, String>(); - - static { - Map, String> m = SUPPORTED_TYPES; - m.put(String.class, "VARCHAR"); - m.put(Boolean.class, "BIT"); - m.put(Byte.class, "TINYINT"); - m.put(Short.class, "SMALLINT"); - m.put(Integer.class, "INT"); - m.put(Long.class, "BIGINT"); - m.put(Float.class, "REAL"); - m.put(Double.class, "DOUBLE"); - m.put(BigDecimal.class, "DECIMAL"); - m.put(java.sql.Timestamp.class, "TIMESTAMP"); - m.put(java.util.Date.class, "TIMESTAMP"); - m.put(java.sql.Date.class, "DATE"); - m.put(java.sql.Time.class, "TIME"); - // TODO add blobs, binary types, custom types? - } - - /** - * Convert SQL type aliases to the list of supported types. - * This map is used by generation and validation. - */ - private static final Map SQL_TYPES = - new HashMap(); - - static { - Map m = SQL_TYPES; - m.put("CHAR", "VARCHAR"); - m.put("CHARACTER", "VARCHAR"); - m.put("NCHAR", "VARCHAR"); - m.put("VARCHAR_CASESENSITIVE", "VARCHAR"); - m.put("VARCHAR_IGNORECASE", "VARCHAR"); - m.put("LONGVARCHAR", "VARCHAR"); - m.put("VARCHAR2", "VARCHAR"); - m.put("NVARCHAR", "VARCHAR"); - m.put("NVARCHAR2", "VARCHAR"); - m.put("TEXT", "VARCHAR"); - m.put("NTEXT", "VARCHAR"); - m.put("TINYTEXT", "VARCHAR"); - m.put("MEDIUMTEXT", "VARCHAR"); - m.put("LONGTEXT", "VARCHAR"); - m.put("CLOB", "VARCHAR"); - m.put("NCLOB", "VARCHAR"); - - // logic - m.put("BOOL", "BIT"); - m.put("BOOLEAN", "BIT"); - - // numeric - m.put("BYTE", "TINYINT"); - m.put("INT2", "SMALLINT"); - m.put("YEAR", "SMALLINT"); - m.put("INTEGER", "INT"); - m.put("MEDIUMINT", "INT"); - m.put("INT4", "INT"); - m.put("SIGNED", "INT"); - m.put("INT8", "BIGINT"); - m.put("IDENTITY", "BIGINT"); - - // decimal - m.put("NUMBER", "DECIMAL"); - m.put("DEC", "DECIMAL"); - m.put("NUMERIC", "DECIMAL"); - m.put("FLOAT", "DOUBLE"); - m.put("FLOAT4", "DOUBLE"); - m.put("FLOAT8", "DOUBLE"); - - // date - m.put("DATETIME", "TIMESTAMP"); - m.put("SMALLDATETIME", "TIMESTAMP"); - } - - private static final List KEYWORDS = Arrays.asList("abstract", - "assert", "boolean", "break", "byte", "case", "catch", "char", - "class", "const", "continue", "default", "do", "double", "else", - "enum", "extends", "final", "finally", "float", "for", "goto", - "if", "implements", "import", "instanceof", "int", "interface", - "long", "native", "new", "package", "private", "protected", - "public", "return", "short", "static", "strictfp", "super", - "switch", "synchronized", "this", "throw", "throws", "transient", - "try", "void", "volatile", "while", "false", "null", "true"); - - private int todoReviewWholeClass; - - /** - * Returns a SQL type mapping for a Java class. - * - * @param fieldDef the field to map - * @param strictTypeMapping throws a RuntimeException if type is unsupported - * @return SQL type - */ - static String getDataType(FieldDefinition fieldDef, boolean strictTypeMapping) { - Class fieldClass = fieldDef.field.getType(); - String type = SUPPORTED_TYPES.get(fieldClass); - if (type != null) { - return type; - } - if (!strictTypeMapping) { - return "VARCHAR"; - } - throw new RuntimeException("Unsupported type " + fieldClass.getName()); - } - - /** - * Returns the Java class for a given SQL type. - * - * @param sqlType the SQL type - * @param dateTimeClass the preferred date class (java.util.Date or - * java.sql.Timestamp) - * @return Class of type - */ - static Class getClassForSqlType(String sqlType, - Class dateTimeClass) { - sqlType = sqlType.toUpperCase(); - // TODO dropping "UNSIGNED" or parts like that could be trouble - sqlType = sqlType.split(" ")[0].trim(); - - if (SQL_TYPES.containsKey(sqlType)) { - // convert the sqlType to a standard type - sqlType = SQL_TYPES.get(sqlType); - } - Class mappedClass = null; - for (Class clazz : SUPPORTED_TYPES.keySet()) { - if (SUPPORTED_TYPES.get(clazz).equalsIgnoreCase(sqlType)) { - mappedClass = clazz; - break; - } - } - if (mappedClass != null) { - if (mappedClass.equals(java.util.Date.class) - || mappedClass.equals(java.sql.Timestamp.class)) { - return dateTimeClass; - } - return mappedClass; - } - return null; - } - - /** - * Tries to create a convert a SQL table name to a camel case class name. - * - * @param tableName the SQL table name - * @return the class name - */ - static String convertTableToClassName(String tableName) { - String[] chunks = StringUtils.arraySplit(tableName, '_', false); - StringBuilder className = new StringBuilder(); - for (String chunk : chunks) { - if (chunk.length() == 0) { - // leading or trailing _ - continue; - } - className.append(Character.toUpperCase(chunk.charAt(0))); - className.append(chunk.substring(1).toLowerCase()); - } - return className.toString(); - } - - /** - * Ensures that SQL column names don't collide with Java keywords. - * - * @param columnName the column name - * @return the Java field name - */ - static String convertColumnToFieldName(String columnName) { - String lower = columnName.toLowerCase(); - if (KEYWORDS.contains(lower)) { - lower += "Value"; - } - return lower; - } - - /** - * Checks the formatting of JQColumn.defaultValue(). - * - * @param defaultValue the default value - * @return true if it is - */ - static boolean isProperlyFormattedDefaultValue(String defaultValue) { - if (StringUtils.isNullOrEmpty(defaultValue)) { - return true; - } - Pattern literalDefault = Pattern.compile("'.*'"); - Pattern functionDefault = Pattern.compile("[^'].*[^']"); - return literalDefault.matcher(defaultValue).matches() - || functionDefault.matcher(defaultValue).matches(); - } - - /** - * Checks to see if the default value matches the class. - * - * @param modelClass the class - * @param defaultValue the value - * @return true if it does - */ - static boolean isValidDefaultValue(Class modelClass, - String defaultValue) { - - if (defaultValue == null) { - // NULL - return true; - } - if (defaultValue.trim().length() == 0) { - // NULL (effectively) - return true; - } - - // TODO H2 single-quotes literal values, which is useful. - // MySQL does not single-quote literal values so its hard to - // differentiate a FUNCTION/VARIABLE from a literal value. - - // function / variable - Pattern functionDefault = Pattern.compile("[^'].*[^']"); - if (functionDefault.matcher(defaultValue).matches()) { - // hard to validate this since its in the database - // assume it is good - return true; - } - - // STRING - if (modelClass == String.class) { - Pattern stringDefault = Pattern.compile("'(.|\\n)*'"); - return stringDefault.matcher(defaultValue).matches(); - } - - String dateRegex = "[0-9]{1,4}[-/\\.][0-9]{1,2}[-/\\.][0-9]{1,2}"; - String timeRegex = "[0-2]{1}[0-9]{1}:[0-5]{1}[0-9]{1}:[0-5]{1}[0-9]{1}"; - - // TIMESTAMP - if (modelClass == java.util.Date.class - || modelClass == java.sql.Timestamp.class) { - // this may be a little loose.... - // 00-00-00 00:00:00 - // 00/00/00T00:00:00 - // 00.00.00T00:00:00 - Pattern pattern = Pattern.compile("'" + dateRegex + "." + timeRegex + "'"); - return pattern.matcher(defaultValue).matches(); - } - - // DATE - if (modelClass == java.sql.Date.class) { - // this may be a little loose.... - // 00-00-00 - // 00/00/00 - // 00.00.00 - Pattern pattern = Pattern.compile("'" + dateRegex + "'"); - return pattern.matcher(defaultValue).matches(); - } - - // TIME - if (modelClass == java.sql.Time.class) { - // 00:00:00 - Pattern pattern = Pattern.compile("'" + timeRegex + "'"); - return pattern.matcher(defaultValue).matches(); - } - - // NUMBER - if (Number.class.isAssignableFrom(modelClass)) { - // strip single quotes - String unquoted = defaultValue; - if (unquoted.charAt(0) == '\'') { - unquoted = unquoted.substring(1); - } - if (unquoted.charAt(unquoted.length() - 1) == '\'') { - unquoted = unquoted.substring(0, unquoted.length() - 1); - } - - try { - // delegate to static valueOf() method to parse string - Method m = modelClass.getMethod("valueOf", String.class); - m.invoke(null, unquoted); - } catch (NumberFormatException ex) { - return false; - } catch (Throwable t) { - // TODO is this correct? - // ignore - } - } - return true; - } -} diff --git a/h2/src/tools/org/h2/jaqu/OrderExpression.java b/h2/src/tools/org/h2/jaqu/OrderExpression.java deleted file mode 100644 index ce4f10e537..0000000000 --- a/h2/src/tools/org/h2/jaqu/OrderExpression.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -/** - * An expression to order by in a query. - * - * @param the query data type - */ -class OrderExpression { - private final Query query; - private final Object expression; - private final boolean desc; - private final boolean nullsFirst; - private final boolean nullsLast; - - OrderExpression(Query query, Object expression, boolean desc, - boolean nullsFirst, boolean nullsLast) { - this.query = query; - this.expression = expression; - this.desc = desc; - this.nullsFirst = nullsFirst; - this.nullsLast = nullsLast; - } - - void appendSQL(SQLStatement stat) { - query.appendSQL(stat, expression); - if (desc) { - stat.appendSQL(" DESC"); - } - if (nullsLast) { - stat.appendSQL(" NULLS LAST"); - } - if (nullsFirst) { - stat.appendSQL(" NULLS FIRST"); - } - } - -} diff --git a/h2/src/tools/org/h2/jaqu/Query.java b/h2/src/tools/org/h2/jaqu/Query.java deleted file mode 100644 index c4f36eb463..0000000000 --- a/h2/src/tools/org/h2/jaqu/Query.java +++ /dev/null @@ -1,441 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import java.lang.reflect.Field; -import java.sql.Clob; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.IdentityHashMap; -import java.util.List; -import org.h2.jaqu.bytecode.ClassReader; -import org.h2.jaqu.util.StatementLogger; -import org.h2.jaqu.util.ClassUtils; -import org.h2.util.JdbcUtils; -import org.h2.util.New; - -/** - * This class represents a query. - * - * @param the return type - */ -public class Query { - - private final Db db; - private SelectTable from; - private final ArrayList conditions = New.arrayList(); - private final ArrayList updateColumnDeclarations = New - .arrayList(); - private final ArrayList> joins = New.arrayList(); - private final IdentityHashMap> aliasMap = ClassUtils - .newIdentityHashMap(); - private final ArrayList> orderByList = New.arrayList(); - private Object[] groupByExpressions; - private long limit; - private long offset; - - Query(Db db) { - this.db = db; - } - - @SuppressWarnings("unchecked") - static Query from(Db db, T alias) { - Query query = new Query(db); - TableDefinition def = (TableDefinition) db.define(alias - .getClass()); - query.from = new SelectTable(db, query, alias, false); - def.initSelectObject(query.from, alias, query.aliasMap); - return query; - } - - public long selectCount() { - SQLStatement stat = getSelectStatement(false); - stat.appendSQL("COUNT(*) "); - appendFromWhere(stat); - ResultSet rs = stat.executeQuery(); - Statement s = null; - try { - s = rs.getStatement(); - rs.next(); - long value = rs.getLong(1); - return value; - } catch (SQLException e) { - throw new RuntimeException(e); - } finally { - JdbcUtils.closeSilently(rs); - JdbcUtils.closeSilently(s); - } - } - - public List select() { - return select(false); - } - - public T selectFirst() { - return select(false).get(0); - } - - public List selectDistinct() { - return select(true); - } - - @SuppressWarnings("unchecked") - public X selectFirst(Z x) { - List list = (List) select(x); - return list.isEmpty() ? null : list.get(0); - } - - public String getSQL() { - SQLStatement stat = getSelectStatement(false); - stat.appendSQL("*"); - appendFromWhere(stat); - return stat.getSQL().trim(); - } - - private List select(boolean distinct) { - List result = New.arrayList(); - TableDefinition def = from.getAliasDefinition(); - SQLStatement stat = getSelectStatement(distinct); - def.appendSelectList(stat); - appendFromWhere(stat); - ResultSet rs = stat.executeQuery(); - Statement s = null; - try { - s = rs.getStatement(); - while (rs.next()) { - T item = from.newObject(); - from.getAliasDefinition().readRow(item, rs); - result.add(item); - } - } catch (SQLException e) { - throw new RuntimeException(e); - } finally { - JdbcUtils.closeSilently(rs); - JdbcUtils.closeSilently(s); - } - return result; - } - - public int delete() { - SQLStatement stat = new SQLStatement(db); - stat.appendSQL("DELETE FROM "); - from.appendSQL(stat); - appendWhere(stat); - StatementLogger.delete(stat.getSQL()); - return stat.executeUpdate(); - } - - public UpdateColumnSet set(A field) { - return new UpdateColumnSet(this, field); - } - - public UpdateColumnIncrement increment(A field) { - return new UpdateColumnIncrement(this, field); - } - - public int update() { - if (updateColumnDeclarations.size() == 0) { - throw new RuntimeException("Missing set or increment call."); - } - SQLStatement stat = new SQLStatement(db); - stat.appendSQL("UPDATE "); - from.appendSQL(stat); - stat.appendSQL(" SET "); - int i = 0; - for (UpdateColumn declaration : updateColumnDeclarations) { - if (i++ > 0) { - stat.appendSQL(", "); - } - declaration.appendSQL(stat); - } - appendWhere(stat); - StatementLogger.update(stat.getSQL()); - return stat.executeUpdate(); - } - - public List selectDistinct(Z x) { - return select(x, true); - } - - public List select(Z x) { - return select(x, false); - } - - @SuppressWarnings("unchecked") - private List select(Z x, boolean distinct) { - Class clazz = x.getClass(); - if (ClassUtils.isSimpleType(clazz)) { - return selectSimple((X) x, distinct); - } - clazz = clazz.getSuperclass(); - return select((Class) clazz, (X) x, distinct); - } - - private List select(Class clazz, X x, boolean distinct) { - List result = New.arrayList(); - TableDefinition def = db.define(clazz); - SQLStatement stat = getSelectStatement(distinct); - def.appendSelectList(stat, this, x); - appendFromWhere(stat); - ResultSet rs = stat.executeQuery(); - Statement s = null; - try { - s = rs.getStatement(); - while (rs.next()) { - X row = ClassUtils.newObject(clazz); - def.readRow(row, rs); - result.add(row); - } - } catch (SQLException e) { - throw new RuntimeException(e); - } finally { - JdbcUtils.closeSilently(rs); - JdbcUtils.closeSilently(s); - } - return result; - } - - @SuppressWarnings("unchecked") - private List selectSimple(X x, boolean distinct) { - SQLStatement stat = getSelectStatement(distinct); - appendSQL(stat, x); - appendFromWhere(stat); - ResultSet rs = stat.executeQuery(); - List result = New.arrayList(); - Statement s = null; - try { - s = rs.getStatement(); - while (rs.next()) { - try { - X value; - Object o = rs.getObject(1); - int convertHereIsProbablyWrong; - if (Clob.class.isAssignableFrom(o.getClass())) { - value = (X) ClassUtils.convert(o, String.class); - } else { - value = (X) o; - } - result.add(value); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - } catch (SQLException e) { - throw new RuntimeException(e); - } finally { - JdbcUtils.closeSilently(rs); - JdbcUtils.closeSilently(s); - } - return result; - } - - private SQLStatement getSelectStatement(boolean distinct) { - SQLStatement stat = new SQLStatement(db); - stat.appendSQL("SELECT "); - if (distinct) { - stat.appendSQL("DISTINCT "); - } - return stat; - } - - public QueryCondition where(A x) { - return new QueryCondition(this, x); - } - - public QueryWhere where(Filter filter) { - HashMap fieldMap = New.hashMap(); - for (Field f : filter.getClass().getDeclaredFields()) { - f.setAccessible(true); - try { - Object obj = f.get(filter); - if (obj == from.getAlias()) { - List fields = from - .getAliasDefinition().getFields(); - String name = f.getName(); - for (TableDefinition.FieldDefinition field : fields) { - String n = name + "." + field.field.getName(); - Object o = field.field.get(obj); - fieldMap.put(n, o); - } - } - fieldMap.put(f.getName(), f.get(filter)); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - Token filterCode = new ClassReader().decompile(filter, fieldMap, - "where"); - // String filterQuery = filterCode.toString(); - conditions.add(filterCode); - return new QueryWhere(this); - } - - public QueryWhere whereTrue(Boolean condition) { - Token token = new Function("", condition); - addConditionToken(token); - return new QueryWhere(this); - } - - /** - * Sets the Limit and Offset of a query. - * - * @return the query - */ - public Query limit(long limit) { - this.limit = limit; - return this; - } - - public Query offset(long offset) { - this.offset = offset; - return this; - } - - /** - * Order by a number of columns. - * - * @param expressions the columns - * @return the query - */ - public Query orderBy(Object... expressions) { - for (Object expr : expressions) { - OrderExpression e = new OrderExpression(this, expr, false, - false, false); - addOrderBy(e); - } - return this; - } - - public Query orderByDesc(Object expr) { - OrderExpression e = new OrderExpression(this, expr, true, false, - false); - addOrderBy(e); - return this; - } - - public Query groupBy(Object... groupBy) { - this.groupByExpressions = groupBy; - return this; - } - - /** - * INTERNAL - * - * @param stat the statement - * @param x the alias object - */ - public void appendSQL(SQLStatement stat, Object x) { - if (x == Function.count()) { - stat.appendSQL("COUNT(*)"); - return; - } - Token token = Db.getToken(x); - if (token != null) { - token.appendSQL(stat, this); - return; - } - SelectColumn col = aliasMap.get(x); - if (col != null) { - col.appendSQL(stat); - return; - } - stat.appendSQL("?"); - stat.addParameter(x); - } - - void addConditionToken(Token condition) { - conditions.add(condition); - } - - void addUpdateColumnDeclaration(UpdateColumn declaration) { - updateColumnDeclarations.add(declaration); - } - - void appendWhere(SQLStatement stat) { - if (!conditions.isEmpty()) { - stat.appendSQL(" WHERE "); - for (Token token : conditions) { - token.appendSQL(stat, this); - stat.appendSQL(" "); - } - } - } - - @SuppressWarnings("unchecked") - void appendFromWhere(SQLStatement stat) { - stat.appendSQL(" FROM "); - from.appendSQL(stat); - for (SelectTable join : joins) { - join.appendSQLAsJoin(stat, this); - } - appendWhere(stat); - if (groupByExpressions != null) { - stat.appendSQL(" GROUP BY "); - int i = 0; - for (Object obj : groupByExpressions) { - if (i++ > 0) { - stat.appendSQL(", "); - } - appendSQL(stat, obj); - stat.appendSQL(" "); - } - } - if (!orderByList.isEmpty()) { - stat.appendSQL(" ORDER BY "); - int i = 0; - for (OrderExpression o : orderByList) { - if (i++ > 0) { - stat.appendSQL(", "); - } - o.appendSQL(stat); - stat.appendSQL(" "); - } - } - if (limit > 0) { - db.getDialect().appendLimit(stat, limit); - } - if (offset > 0) { - db.getDialect().appendOffset(stat, offset); - } - StatementLogger.select(stat.getSQL()); - } - - /** - * Join another table. - * - * @param alias an alias for the table to join - * @return the joined query - */ - @SuppressWarnings("unchecked") - public QueryJoin innerJoin(U alias) { - TableDefinition def = (TableDefinition) db.define(alias - .getClass()); - SelectTable join = new SelectTable(db, this, alias, false); - def.initSelectObject(join, alias, aliasMap); - joins.add(join); - return new QueryJoin(this, join); - } - - Db getDb() { - return db; - } - - boolean isJoin() { - return !joins.isEmpty(); - } - - SelectColumn getSelectColumn(Object obj) { - return aliasMap.get(obj); - } - - void addOrderBy(OrderExpression expr) { - orderByList.add(expr); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/QueryCondition.java b/h2/src/tools/org/h2/jaqu/QueryCondition.java deleted file mode 100644 index 63aa756a19..0000000000 --- a/h2/src/tools/org/h2/jaqu/QueryCondition.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -/** - * This class represents a query with an incomplete condition. - * - * @param the return type of the query - * @param the incomplete condition data type - */ -public class QueryCondition { - - private final Query query; - private final A x; - - QueryCondition(Query query, A x) { - this.query = query; - this.x = x; - } - - public QueryWhere is(A y) { - query.addConditionToken( - new Condition(x, y, CompareType.EQUAL)); - return new QueryWhere(query); - } - - public QueryWhere bigger(A y) { - query.addConditionToken( - new Condition(x, y, CompareType.BIGGER)); - return new QueryWhere(query); - } - - public QueryWhere biggerEqual(A y) { - query.addConditionToken( - new Condition(x, y, CompareType.BIGGER_EQUAL)); - return new QueryWhere(query); - } - - public QueryWhere smaller(A y) { - query.addConditionToken( - new Condition(x, y, CompareType.SMALLER)); - return new QueryWhere(query); - } - - public QueryWhere smallerEqual(A y) { - query.addConditionToken( - new Condition(x, y, CompareType.SMALLER_EQUAL)); - return new QueryWhere(query); - } - - public QueryWhere like(A pattern) { - query.addConditionToken( - new Condition(x, pattern, CompareType.LIKE)); - return new QueryWhere(query); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/QueryJoin.java b/h2/src/tools/org/h2/jaqu/QueryJoin.java deleted file mode 100644 index 9c0551292f..0000000000 --- a/h2/src/tools/org/h2/jaqu/QueryJoin.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -/** - * This class represents a query with a join. - */ -public class QueryJoin { - - private final Query query; - private final SelectTable join; - - QueryJoin(Query query, SelectTable join) { - this.query = query; - this.join = join; - } - - public QueryJoinCondition on(A x) { - return new QueryJoinCondition(query, join, x); - } -} diff --git a/h2/src/tools/org/h2/jaqu/QueryJoinCondition.java b/h2/src/tools/org/h2/jaqu/QueryJoinCondition.java deleted file mode 100644 index 0c9f44db1e..0000000000 --- a/h2/src/tools/org/h2/jaqu/QueryJoinCondition.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -/** - * This class represents a query with join and an incomplete condition. - * - * @param the incomplete condition data type - */ -public class QueryJoinCondition { - - private final Query query; - private final SelectTable join; - private final A x; - - QueryJoinCondition(Query query, SelectTable join, A x) { - this.query = query; - this.join = join; - this.x = x; - } - - public Query is(A y) { - join.addConditionToken(new Condition(x, y, CompareType.EQUAL)); - return query; - } -} diff --git a/h2/src/tools/org/h2/jaqu/QueryWhere.java b/h2/src/tools/org/h2/jaqu/QueryWhere.java deleted file mode 100644 index ad59050d76..0000000000 --- a/h2/src/tools/org/h2/jaqu/QueryWhere.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import java.util.List; - -/** - * This class represents a query with a condition. - * - * @param the return type - */ -public class QueryWhere { - - Query query; - - QueryWhere(Query query) { - this.query = query; - } - - public QueryCondition and(A x) { - query.addConditionToken(ConditionAndOr.AND); - return new QueryCondition(query, x); - } - - public QueryCondition or(A x) { - query.addConditionToken(ConditionAndOr.OR); - return new QueryCondition(query, x); - } - - public QueryWhere limit(long limit) { - query.limit(limit); - return this; - } - - public QueryWhere offset(long offset) { - query.offset(offset); - return this; - } - - public List select(Z x) { - return query.select(x); - } - - public String getSQL() { - SQLStatement stat = new SQLStatement(query.getDb()); - stat.appendSQL("SELECT *"); - query.appendFromWhere(stat); - return stat.getSQL().trim(); - } - - public List selectDistinct(Z x) { - return query.selectDistinct(x); - } - - public X selectFirst(Z x) { - List list = query.select(x); - return list.isEmpty() ? null : list.get(0); - } - - public List select() { - return query.select(); - } - - public T selectFirst() { - List list = select(); - return list.isEmpty() ? null : list.get(0); - } - - public List selectDistinct() { - return query.selectDistinct(); - } - - - /** - * Order by a number of columns. - * - * @param expressions the order by expressions - * @return the query - */ - public QueryWhere orderBy(Object... expressions) { - for (Object expr : expressions) { - OrderExpression e = - new OrderExpression(query, expr, false, false, false); - query.addOrderBy(e); - } - return this; - } - - public QueryWhere orderByNullsFirst(Object expr) { - OrderExpression e = - new OrderExpression(query, expr, false, true, false); - query.addOrderBy(e); - return this; - } - - public QueryWhere orderByNullsLast(Object expr) { - OrderExpression e = - new OrderExpression(query, expr, false, false, true); - query.addOrderBy(e); - return this; - } - - public QueryWhere orderByDesc(Object expr) { - OrderExpression e = - new OrderExpression(query, expr, true, false, false); - query.addOrderBy(e); - return this; - } - - public QueryWhere orderByDescNullsFirst(Object expr) { - OrderExpression e = - new OrderExpression(query, expr, true, true, false); - query.addOrderBy(e); - return this; - } - - public QueryWhere orderByDescNullsLast(Object expr) { - OrderExpression e = - new OrderExpression(query, expr, true, false, true); - query.addOrderBy(e); - return this; - } - - public int delete() { - return query.delete(); - } - - public int update() { - return query.update(); - } - - public long selectCount() { - return query.selectCount(); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/SQLDialect.java b/h2/src/tools/org/h2/jaqu/SQLDialect.java deleted file mode 100644 index dcbabe12d8..0000000000 --- a/h2/src/tools/org/h2/jaqu/SQLDialect.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu; - -import org.h2.jaqu.TableDefinition.IndexDefinition; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; - -/** - * This interface defines points where JaQu can build different statements - * depending on the database used. - */ -public interface SQLDialect { - - /** - * Get the SQL snippet for the table name. - * - * @param schema the schema name, or null for no schema - * @param table the table name - * @return the SQL snippet - */ - String getTableName(String schema, String table); - - /** - * Get the CREATE INDEX statement. - * - * @param schema the schema name - * @param table the table name - * @param index the index definition - * @return the SQL statement - */ - String getCreateIndex(String schema, String table, IndexDefinition index); - - /** - * Append "LIMIT limit" to the SQL statement. - * - * @param stat the statement - * @param limit the limit - */ - void appendLimit(SQLStatement stat, long limit); - - /** - * Append "OFFSET offset" to the SQL statement. - * - * @param stat the statement - * @param offset the offset - */ - void appendOffset(SQLStatement stat, long offset); - - /** - * Whether memory tables are supported. - * - * @return true if they are - */ - boolean supportsMemoryTables(); - - /** - * Default implementation of an SQL dialect. Designed for an H2 database, - * and may be suitable for others. - */ - public static class DefaultSQLDialect implements SQLDialect { - - @Override - public String getTableName(String schema, String table) { - if (StringUtils.isNullOrEmpty(schema)) { - return table; - } - return schema + "." + table; - } - - @Override - public boolean supportsMemoryTables() { - return true; - } - - @Override - public String getCreateIndex(String schema, String table, - IndexDefinition index) { - StatementBuilder buff = new StatementBuilder(); - buff.append("CREATE "); - switch (index.type) { - case STANDARD: - break; - case UNIQUE: - buff.append("UNIQUE "); - break; - case HASH: - buff.append("HASH "); - break; - case UNIQUE_HASH: - buff.append("UNIQUE HASH "); - break; - } - buff.append("INDEX IF NOT EXISTS "); - buff.append(index.indexName); - buff.append(" ON "); - buff.append(table); - buff.append("("); - for (String col : index.columnNames) { - buff.appendExceptFirst(", "); - buff.append(col); - } - buff.append(")"); - return buff.toString(); - } - - @Override - public void appendLimit(SQLStatement stat, long limit) { - stat.appendSQL(" LIMIT " + limit); - } - - @Override - public void appendOffset(SQLStatement stat, long offset) { - stat.appendSQL(" OFFSET " + offset); - } - - } - -} diff --git a/h2/src/tools/org/h2/jaqu/SQLStatement.java b/h2/src/tools/org/h2/jaqu/SQLStatement.java deleted file mode 100644 index 8a5cffd52e..0000000000 --- a/h2/src/tools/org/h2/jaqu/SQLStatement.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import org.h2.util.JdbcUtils; - -/** - * This class represents a parameterized SQL statement. - */ -public class SQLStatement { - private final Db db; - private StringBuilder buff = new StringBuilder(); - private String sql; - private final ArrayList params = new ArrayList(); - - SQLStatement(Db db) { - this.db = db; - } - - void setSQL(String sql) { - this.sql = sql; - buff = new StringBuilder(sql); - } - - public SQLStatement appendSQL(String s) { - buff.append(s); - sql = null; - return this; - } - - public SQLStatement appendTable(String schema, String table) { - return appendSQL(db.getDialect().getTableName(schema, table)); - } - - String getSQL() { - if (sql == null) { - sql = buff.toString(); - } - return sql; - } - - SQLStatement addParameter(Object o) { - params.add(o); - return this; - } - - ResultSet executeQuery() { - try { - return prepare(false).executeQuery(); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - - int executeUpdate() { - PreparedStatement ps = null; - try { - ps = prepare(false); - return ps.executeUpdate(); - } catch (SQLException e) { - throw new RuntimeException(e); - } finally { - JdbcUtils.closeSilently(ps); - } - } - - long executeInsert() { - PreparedStatement ps = null; - try { - ps = prepare(true); - ps.executeUpdate(); - long identity = -1; - ResultSet rs = ps.getGeneratedKeys(); - if (rs != null && rs.next()) { - identity = rs.getLong(1); - } - JdbcUtils.closeSilently(rs); - return identity; - } catch (SQLException e) { - throw new RuntimeException(e); - } finally { - JdbcUtils.closeSilently(ps); - } - } - - private static void setValue(PreparedStatement prep, int parameterIndex, - Object x) { - try { - prep.setObject(parameterIndex, x); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - - private PreparedStatement prepare(boolean returnGeneratedKeys) { - PreparedStatement prep = db.prepare(getSQL(), returnGeneratedKeys); - for (int i = 0; i < params.size(); i++) { - Object o = params.get(i); - setValue(prep, i + 1, o); - } - return prep; - } - -} diff --git a/h2/src/tools/org/h2/jaqu/SelectColumn.java b/h2/src/tools/org/h2/jaqu/SelectColumn.java deleted file mode 100644 index dc2054da87..0000000000 --- a/h2/src/tools/org/h2/jaqu/SelectColumn.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import org.h2.jaqu.TableDefinition.FieldDefinition; - -/** - * This class represents a column of a table in a query. - * - * @param the table data type - */ -class SelectColumn { - private final SelectTable selectTable; - private final FieldDefinition fieldDef; - - SelectColumn(SelectTable table, FieldDefinition fieldDef) { - this.selectTable = table; - this.fieldDef = fieldDef; - } - - void appendSQL(SQLStatement stat) { - if (selectTable.getQuery().isJoin()) { - stat.appendSQL(selectTable.getAs() + "." + fieldDef.columnName); - } else { - stat.appendSQL(fieldDef.columnName); - } - } - - FieldDefinition getFieldDefinition() { - return fieldDef; - } - - SelectTable getSelectTable() { - return selectTable; - } - - Object getCurrentValue() { - return fieldDef.getValue(selectTable.getCurrent()); - } -} diff --git a/h2/src/tools/org/h2/jaqu/SelectTable.java b/h2/src/tools/org/h2/jaqu/SelectTable.java deleted file mode 100644 index 7c61bd3c2a..0000000000 --- a/h2/src/tools/org/h2/jaqu/SelectTable.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import java.util.ArrayList; -import org.h2.jaqu.util.ClassUtils; -import org.h2.util.New; - -/** - * This class represents a table in a query. - * - * @param the table class - */ -class SelectTable { - - private static int asCounter; - private final Query query; - private final Class clazz; - private T current; - private final String as; - private final TableDefinition aliasDef; - private final boolean outerJoin; - private final ArrayList joinConditions = New.arrayList(); - private final T alias; - - @SuppressWarnings("unchecked") - SelectTable(Db db, Query query, T alias, boolean outerJoin) { - this.alias = alias; - this.query = query; - this.outerJoin = outerJoin; - aliasDef = (TableDefinition) db.getTableDefinition(alias.getClass()); - clazz = ClassUtils.getClass(alias); - as = "T" + asCounter++; - } - - T getAlias() { - return alias; - } - - T newObject() { - return ClassUtils.newObject(clazz); - } - - TableDefinition getAliasDefinition() { - return aliasDef; - } - - void appendSQL(SQLStatement stat) { - if (query.isJoin()) { - stat.appendTable(aliasDef.schemaName, aliasDef.tableName) - .appendSQL(" AS " + as); - } else { - stat.appendTable(aliasDef.schemaName, aliasDef.tableName); - } - } - - void appendSQLAsJoin(SQLStatement stat, Query q) { - if (outerJoin) { - stat.appendSQL(" LEFT OUTER JOIN "); - } else { - stat.appendSQL(" INNER JOIN "); - } - appendSQL(stat); - if (!joinConditions.isEmpty()) { - stat.appendSQL(" ON "); - for (Token token : joinConditions) { - token.appendSQL(stat, q); - stat.appendSQL(" "); - } - } - } - - boolean getOuterJoin() { - return outerJoin; - } - - Query getQuery() { - return query; - } - - String getAs() { - return as; - } - - void addConditionToken(Token condition) { - joinConditions.add(condition); - } - - T getCurrent() { - return current; - } - - void setCurrent(T current) { - this.current = current; - } - -} diff --git a/h2/src/tools/org/h2/jaqu/Table.java b/h2/src/tools/org/h2/jaqu/Table.java deleted file mode 100644 index d354f673a9..0000000000 --- a/h2/src/tools/org/h2/jaqu/Table.java +++ /dev/null @@ -1,381 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * A class that implements this interface can be used as a database table. - *

    - * You may implement the Table interface on your model object and optionally use - * JQColumn annotations (which imposes a compile-time and runtime-dependency on - * JaQu), or may choose to use the JQTable and JQColumn annotations only (which - * imposes a compile-time and runtime-dependency on this file only). - *

    - * If a class is annotated with JQTable and at the same time implements Table, - * the define() method is not called. - *

    - * Supported data types: - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
    java.lang.StringVARCHAR (maxLength > 0) / TEXT (maxLength == 0)
    java.lang.BooleanBIT
    java.lang.ByteTINYINT
    java.lang.ShortSMALLINT
    java.lang.IntegerINT
    java.lang.LongBIGINT
    java.lang.FloatREAL
    java.lang.DoubleDOUBLE
    java.math.BigDecimalDECIMAL
    java.util.DateTIMESTAMP
    java.sql.DateDATE
    java.sql.TimeTIME
    java.sql.TimestampTIMESTAMP
    - *

    - * Unsupported data types: binary types (BLOB, etc), and custom types. - *

    - * Table and field mapping: by default, the mapped table name is the class name - * and the public fields are reflectively mapped, by their name, to columns. As - * an alternative, you may specify both the table and column definition by - * annotations. - *

    - * Table Interface: you may set additional parameters such as table name, - * primary key, and indexes in the define() method. - *

    - * Annotations: you may use the annotations with or without implementing the - * Table interface. The annotations allow you to decouple your model completely - * from JaQu other than this file. - *

    - * Automatic model generation: you may automatically generate model classes as - * strings with the Db and DbInspector objects: - *

    - * Db db = Db.open("jdbc:h2:mem:", "sa", "sa");
    - * DbInspector inspector = new DbInspector(db);
    - * List<String> models =
    - *         inspector.generateModel(schema, table, packageName,
    - *         annotateSchema, trimStrings)
    - * 
    - * Or you may use the GenerateModels tool to generate and save your classes to - * the file system: - *
    - * java -cp h2jaqu.jar org.h2.jaqu.util.GenerateModels
    - *      -url "jdbc:h2:mem:"
    - *      -user sa -password sa -schema schemaName -table tableName
    - *      -package packageName -folder destination
    - *      -annotateSchema false -trimStrings true
    - * 
    - * - * Model validation: you may validate your model class with DbInspector object. - * The DbInspector will report errors, warnings, and suggestions: - *
    - * Db db = Db.open("jdbc:h2:mem:", "sa", "sa");
    - * DbInspector inspector = new DbInspector(db);
    - * List<Validation> remarks =
    - *         inspector.validateModel(new MyModel(), throwOnError);
    - * for (Validation remark : remarks) {
    - *     System.out.println(remark);
    - * }
    - * 
    - */ -public interface Table { - - /** - * An annotation for a database. - */ - @Retention(RetentionPolicy.RUNTIME) - @Target(ElementType.TYPE) - public @interface JQDatabase { - - /** - * If set to a non-zero value, JaQu - * maintains a "_jq_versions" table within your database. The - * version number is used to call to a registered - * DbUpgrader implementation to perform relevant ALTER statements. - * Default: 0. - * You must specify a DbUpgrader on your Db object to - * use this parameter. - */ - int version() default 0; - - } - - /** - * An annotation for a schema. - */ - @Retention(RetentionPolicy.RUNTIME) - @Target(ElementType.TYPE) - public @interface JQSchema { - - /** - * The schema may be optionally specified. - * Default: unspecified. - */ - String name() default ""; - - } - - /** - * Enumeration defining the 4 index types. - */ - public static enum IndexType { - STANDARD, UNIQUE, HASH, UNIQUE_HASH; - } - - /** - * An index annotation. - */ - @Retention(RetentionPolicy.RUNTIME) - @Target(ElementType.TYPE) - public @interface JQIndex { - - /** - * Standard indexes may be optionally specified. - *
      - *
    • standard = "id, name"
    • - *
    • standard = "id name"
    • - *
    • standard = { "id name", "date" }
    • - *
    - * Standard indexes may still be added in the define() method if - * the model class is not annotated with JQTable. - * Default: unspecified. - */ - String[] standard() default {}; - - /** - * Unique indexes may be optionally specified. - *
      - *
    • unique = "id, name"
    • - *
    • unique = "id name"
    • - *
    • unique = { "id name", "date" }
    • - *
    - * Unique indexes may still be added in the define() method if - * the model class is not annotated with JQTable. - * Default: unspecified. - */ - String[] unique() default {}; - - /** - * Hash indexes may be optionally specified. - *
      - *
    • hash = "name" - *
    • hash = { "name", "date" } - *
    - * Hash indexes may still be added in the define() method if - * the model class is not annotated with JQTable. - * Default: unspecified. - */ - String[] hash() default {}; - - /** - * Unique hash indexes may be optionally specified. - *
      - *
    • uniqueHash = "id" - *
    • uniqueHash = "name" - *
    • uniqueHash = { "id", "name" } - *
    - * Unique hash indexes may still be added in the define() method if - * the model class is not annotated with JQTable. - * Default: unspecified. - */ - String[] uniqueHash() default {}; - - } - - /** - * Annotation to define a table. - */ - @Retention(RetentionPolicy.RUNTIME) - @Target(ElementType.TYPE) - public @interface JQTable { - - /** - * The table name. If not specified the - * class name is used as the table name. - *

    - * The table name may still be overridden in the define() method - * if the model class is not annotated with JQTable. - * Default: unspecified. - */ - String name() default ""; - - /** - * The primary key may be optionally specified. If it is not - * specified, then no primary key is set by the JQTable annotation. - * You may specify a composite primary key. - *

      - *
    • primaryKey = "id, name" - *
    • primaryKey = "id name" - *
    - * The primary key may still be overridden in the define() method - * if the model class is not annotated with JQTable. - * Default: unspecified. - */ - String primaryKey() default ""; - - /** - * The inherit columns allows this model class to inherit columns from - * its super class. Any JQTable annotation present on the super class is - * ignored. - * Default: false. - */ - boolean inheritColumns() default false; - - /** - * Whether or not JaQu tries to create the table and indexes. Default: - * true. - */ - boolean createIfRequired() default true; - - /** - * Whether only supported types are mapped. - * If true, unsupported mapped types will throw a RuntimeException. - * If false, unsupported mapped types will default to VARCHAR. - * Default: true. - */ - boolean strictTypeMapping() default true; - - /** - * If true, only fields that are explicitly - * annotated as JQColumn are mapped. - * - * Default: true. - */ - boolean annotationsOnly() default true; - - /** - * If true, this table is created as a memory table where data is - * persistent, but index data is kept in main memory. - * - * Valid only for H2 databases. - * - * Default: false. - */ - boolean memoryTable() default false; - - /** - * If non-zero, JaQu will - * maintain a "_jq_versions" table within your database. The - * version number is used to call to a registered - * DbUpgrader implementation to perform relevant ALTER - * statements. - * Default: 0. - * You must specify a DbUpgrader on your Db object to - * use this parameter. - */ - int version() default 0; - } - - /** - * Annotation to define a column. Annotated fields may have any scope - * (however, the JVM may raise a SecurityException if the SecurityManager - * doesn't allow JaQu to access the field.) - */ - @Retention(RetentionPolicy.RUNTIME) - @Target(ElementType.FIELD) - public @interface JQColumn { - - /** - * If not specified, the field name is used as the column name. - * Default: the field name. - */ - String name() default ""; - - /** - * This column is the primary key. - * Default: false. - */ - boolean primaryKey() default false; - - /** - * The column is created with a - * sequence as the default value. - * Default: false. - */ - boolean autoIncrement() default false; - - /** - * If larger than zero, it is used during the CREATE TABLE phase. It - * may also be used to prevent database exceptions on INSERT - * and UPDATE statements (see trimString). - *

    - * Any maxLength set in define() may override this annotation - * setting if the model class is not annotated with JQTable. - * Default: 0. - */ - int maxLength() default 0; - - /** - * If true, JaQu will automatically trim the - * string if it exceeds maxLength - * (value.substring(0, maxLength)). - * Default: false. - */ - boolean trimString() default false; - - /** - * If false, JaQu will set - * the column NOT NULL during the CREATE TABLE phase. - * Default: false. - */ - boolean allowNull() default false; - - /** - * The default value assigned to the column during the CREATE TABLE - * phase. This field could contain a literal single-quoted value, or a - * function call. Empty strings are considered NULL. Examples: - *

      - *
    • defaultValue="" (null) - *
    • defaultValue="CURRENT_TIMESTAMP" - *
    • defaultValue="''" (empty string) - *
    • defaultValue="'0'" - *
    • defaultValue="'1970-01-01 00:00:01'" - *
    - * if the default value is specified, and auto increment is disabled, - * and primary key is disabled, then this value is included in the - * "DEFAULT ..." phrase of a column during the CREATE TABLE process. - * Default: unspecified (null). - */ - String defaultValue() default ""; - - } - - /** - * This method is called to let the table define the primary key, indexes, - * and the table name. - */ - void define(); - -} diff --git a/h2/src/tools/org/h2/jaqu/TableDefinition.java b/h2/src/tools/org/h2/jaqu/TableDefinition.java deleted file mode 100644 index c13cc7c537..0000000000 --- a/h2/src/tools/org/h2/jaqu/TableDefinition.java +++ /dev/null @@ -1,639 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.IdentityHashMap; -import java.util.List; -import java.util.Map; -import org.h2.jaqu.Table.IndexType; -import org.h2.jaqu.Table.JQColumn; -import org.h2.jaqu.Table.JQIndex; -import org.h2.jaqu.Table.JQSchema; -import org.h2.jaqu.Table.JQTable; -import org.h2.jaqu.util.StatementLogger; -import org.h2.jaqu.util.ClassUtils; -import org.h2.util.New; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; - -/** - * A table definition contains the index definitions of a table, the field - * definitions, the table name, and other meta data. - * - * @param the table type - */ -class TableDefinition { - - /** - * The meta data of an index. - */ - static class IndexDefinition { - IndexType type; - String indexName; - - List columnNames; - } - - /** - * The meta data of a field. - */ - static class FieldDefinition { - String columnName; - Field field; - String dataType; - int maxLength; - boolean isPrimaryKey; - boolean isAutoIncrement; - boolean trimString; - boolean allowNull; - String defaultValue; - - Object getValue(Object obj) { - try { - return field.get(obj); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - void initWithNewObject(Object obj) { - Object o = ClassUtils.newObject(field.getType()); - setValue(obj, o); - } - - void setValue(Object obj, Object o) { - try { - if (!field.isAccessible()) { - field.setAccessible(true); - } - o = ClassUtils.convert(o, field.getType()); - field.set(obj, o); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - Object read(ResultSet rs, int columnIndex) { - try { - return rs.getObject(columnIndex); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - } - - String schemaName; - String tableName; - int tableVersion; - private boolean createTableIfRequired = true; - private final Class clazz; - private final ArrayList fields = New.arrayList(); - private final IdentityHashMap fieldMap = - ClassUtils.newIdentityHashMap(); - - private List primaryKeyColumnNames; - private final ArrayList indexes = New.arrayList(); - private boolean memoryTable; - - TableDefinition(Class clazz) { - this.clazz = clazz; - schemaName = null; - tableName = clazz.getSimpleName(); - } - - Class getModelClass() { - return clazz; - } - - List getFields() { - return fields; - } - - void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * Define a primary key by the specified model fields. - * - * @param modelFields the ordered list of model fields - */ - void setPrimaryKey(Object[] modelFields) { - List columnNames = mapColumnNames(modelFields); - setPrimaryKey(columnNames); - } - - /** - * Define a primary key by the specified column names. - * - * @param columnNames the ordered list of column names - */ - void setPrimaryKey(List columnNames) { - primaryKeyColumnNames = New.arrayList(columnNames); - // set isPrimaryKey flag for all field definitions - for (FieldDefinition fieldDefinition : fieldMap.values()) { - fieldDefinition.isPrimaryKey = this.primaryKeyColumnNames - .contains(fieldDefinition.columnName); - } - } - - String getColumnName(A fieldObject) { - FieldDefinition def = fieldMap.get(fieldObject); - return def == null ? null : def.columnName; - } - - private ArrayList mapColumnNames(Object[] columns) { - ArrayList columnNames = New.arrayList(); - for (Object column : columns) { - columnNames.add(getColumnName(column)); - } - return columnNames; - } - - /** - * Defines an index with the specified model fields. - * - * @param type the index type (STANDARD, HASH, UNIQUE, UNIQUE_HASH) - * @param modelFields the ordered list of model fields - */ - void addIndex(IndexType type, Object[] modelFields) { - List columnNames = mapColumnNames(modelFields); - addIndex(type, columnNames); - } - - /** - * Defines an index with the specified column names. - * - * @param type the index type (STANDARD, HASH, UNIQUE, UNIQUE_HASH) - * @param columnNames the ordered list of column names - */ - void addIndex(IndexType type, List columnNames) { - IndexDefinition index = new IndexDefinition(); - index.indexName = tableName + "_" + indexes.size(); - index.columnNames = New.arrayList(columnNames); - index.type = type; - indexes.add(index); - } - - public void setMaxLength(Object column, int maxLength) { - String columnName = getColumnName(column); - for (FieldDefinition f: fields) { - if (f.columnName.equals(columnName)) { - f.maxLength = maxLength; - break; - } - } - } - - void mapFields() { - boolean byAnnotationsOnly = false; - boolean inheritColumns = false; - boolean strictTypeMapping = false; - if (clazz.isAnnotationPresent(JQTable.class)) { - JQTable tableAnnotation = clazz.getAnnotation(JQTable.class); - byAnnotationsOnly = tableAnnotation.annotationsOnly(); - inheritColumns = tableAnnotation.inheritColumns(); - strictTypeMapping = tableAnnotation.strictTypeMapping(); - } - - List classFields = New.arrayList(); - classFields.addAll(Arrays.asList(clazz.getDeclaredFields())); - if (inheritColumns) { - Class superClass = clazz.getSuperclass(); - classFields.addAll(Arrays.asList(superClass.getDeclaredFields())); - } - - for (Field f : classFields) { - // default to field name - String columnName = f.getName(); - boolean isAutoIncrement = false; - boolean isPrimaryKey = false; - int maxLength = 0; - boolean trimString = false; - boolean allowNull = true; - String defaultValue = ""; - boolean hasAnnotation = f.isAnnotationPresent(JQColumn.class); - if (hasAnnotation) { - JQColumn col = f.getAnnotation(JQColumn.class); - if (!StringUtils.isNullOrEmpty(col.name())) { - columnName = col.name(); - } - isAutoIncrement = col.autoIncrement(); - isPrimaryKey = col.primaryKey(); - maxLength = col.maxLength(); - trimString = col.trimString(); - allowNull = col.allowNull(); - defaultValue = col.defaultValue(); - } - boolean isPublic = Modifier.isPublic(f.getModifiers()); - boolean reflectiveMatch = isPublic && !byAnnotationsOnly; - if (reflectiveMatch || hasAnnotation) { - FieldDefinition fieldDef = new FieldDefinition(); - fieldDef.field = f; - fieldDef.columnName = columnName; - fieldDef.isAutoIncrement = isAutoIncrement; - fieldDef.isPrimaryKey = isPrimaryKey; - fieldDef.maxLength = maxLength; - fieldDef.trimString = trimString; - fieldDef.allowNull = allowNull; - fieldDef.defaultValue = defaultValue; - fieldDef.dataType = ModelUtils.getDataType(fieldDef, strictTypeMapping); - fields.add(fieldDef); - } - } - List primaryKey = New.arrayList(); - for (FieldDefinition fieldDef : fields) { - if (fieldDef.isPrimaryKey) { - primaryKey.add(fieldDef.columnName); - } - } - if (primaryKey.size() > 0) { - setPrimaryKey(primaryKey); - } - } - - /** - * Optionally truncates strings to the maximum length - */ - private static Object getValue(Object obj, FieldDefinition field) { - Object value = field.getValue(obj); - if (field.trimString && field.maxLength > 0) { - if (value instanceof String) { - // clip strings - String s = (String) value; - if (s.length() > field.maxLength) { - return s.substring(0, field.maxLength); - } - return s; - } - return value; - } - // standard behavior - return value; - } - - long insert(Db db, Object obj, boolean returnKey) { - SQLStatement stat = new SQLStatement(db); - StatementBuilder buff = new StatementBuilder("INSERT INTO "); - buff.append(db.getDialect().getTableName(schemaName, tableName)).append('('); - for (FieldDefinition field : fields) { - buff.appendExceptFirst(", "); - buff.append(field.columnName); - } - buff.append(") VALUES("); - buff.resetCount(); - for (FieldDefinition field : fields) { - buff.appendExceptFirst(", "); - buff.append('?'); - Object value = getValue(obj, field); - stat.addParameter(value); - } - buff.append(')'); - stat.setSQL(buff.toString()); - StatementLogger.insert(stat.getSQL()); - if (returnKey) { - return stat.executeInsert(); - } - return stat.executeUpdate(); - } - - void merge(Db db, Object obj) { - if (primaryKeyColumnNames == null || primaryKeyColumnNames.size() == 0) { - throw new IllegalStateException("No primary key columns defined " - + "for table " + obj.getClass() + " - no update possible"); - } - SQLStatement stat = new SQLStatement(db); - StatementBuilder buff = new StatementBuilder("MERGE INTO "); - buff.append(db.getDialect().getTableName(schemaName, tableName)).append(" ("); - buff.resetCount(); - for (FieldDefinition field : fields) { - buff.appendExceptFirst(", "); - buff.append(field.columnName); - } - buff.append(") KEY("); - buff.resetCount(); - for (FieldDefinition field : fields) { - if (field.isPrimaryKey) { - buff.appendExceptFirst(", "); - buff.append(field.columnName); - } - } - buff.append(") "); - buff.resetCount(); - buff.append("VALUES ("); - for (FieldDefinition field : fields) { - buff.appendExceptFirst(", "); - buff.append('?'); - Object value = getValue(obj, field); - stat.addParameter(value); - } - buff.append(')'); - stat.setSQL(buff.toString()); - StatementLogger.merge(stat.getSQL()); - stat.executeUpdate(); - } - - void update(Db db, Object obj) { - if (primaryKeyColumnNames == null || primaryKeyColumnNames.size() == 0) { - throw new IllegalStateException("No primary key columns defined " - + "for table " + obj.getClass() + " - no update possible"); - } - SQLStatement stat = new SQLStatement(db); - StatementBuilder buff = new StatementBuilder("UPDATE "); - buff.append(db.getDialect().getTableName(schemaName, tableName)) - .append(" SET "); - buff.resetCount(); - - for (FieldDefinition field : fields) { - if (!field.isPrimaryKey) { - buff.appendExceptFirst(", "); - buff.append(field.columnName); - buff.append(" = ?"); - Object value = getValue(obj, field); - stat.addParameter(value); - } - } - Object alias = ClassUtils.newObject(obj.getClass()); - Query query = Query.from(db, alias); - boolean firstCondition = true; - for (FieldDefinition field : fields) { - if (field.isPrimaryKey) { - Object aliasValue = field.getValue(alias); - Object value = field.getValue(obj); - if (!firstCondition) { - query.addConditionToken(ConditionAndOr.AND); - } - firstCondition = false; - query.addConditionToken( - new Condition( - aliasValue, value, CompareType.EQUAL)); - } - } - stat.setSQL(buff.toString()); - query.appendWhere(stat); - StatementLogger.update(stat.getSQL()); - stat.executeUpdate(); - } - - void delete(Db db, Object obj) { - if (primaryKeyColumnNames == null || primaryKeyColumnNames.size() == 0) { - throw new IllegalStateException("No primary key columns defined " - + "for table " + obj.getClass() + " - no update possible"); - } - SQLStatement stat = new SQLStatement(db); - StatementBuilder buff = new StatementBuilder("DELETE FROM "); - buff.append(db.getDialect().getTableName(schemaName, tableName)); - buff.resetCount(); - Object alias = ClassUtils.newObject(obj.getClass()); - Query query = Query.from(db, alias); - boolean firstCondition = true; - for (FieldDefinition field : fields) { - if (field.isPrimaryKey) { - Object aliasValue = field.getValue(alias); - Object value = field.getValue(obj); - if (!firstCondition) { - query.addConditionToken(ConditionAndOr.AND); - } - firstCondition = false; - query.addConditionToken( - new Condition( - aliasValue, value, CompareType.EQUAL)); - } - } - stat.setSQL(buff.toString()); - query.appendWhere(stat); - StatementLogger.delete(stat.getSQL()); - stat.executeUpdate(); - } - - TableDefinition createTableIfRequired(Db db) { - if (!createTableIfRequired) { - // skip table and index creation - // but still check for upgrades - db.upgradeTable(this); - return this; - } - SQLDialect dialect = db.getDialect(); - SQLStatement stat = new SQLStatement(db); - StatementBuilder buff; - if (memoryTable && dialect.supportsMemoryTables()) { - buff = new StatementBuilder("CREATE MEMORY TABLE IF NOT EXISTS "); - } else { - buff = new StatementBuilder("CREATE TABLE IF NOT EXISTS "); - } - - buff.append(dialect.getTableName(schemaName, tableName)).append('('); - - for (FieldDefinition field : fields) { - buff.appendExceptFirst(", "); - buff.append(field.columnName).append(' ').append(field.dataType); - if (field.maxLength > 0) { - buff.append('(').append(field.maxLength).append(')'); - } - - if (field.isAutoIncrement) { - buff.append(" AUTO_INCREMENT"); - } - - if (!field.allowNull) { - buff.append(" NOT NULL"); - } - - // default values - if (!field.isAutoIncrement && !field.isPrimaryKey) { - String dv = field.defaultValue; - if (!StringUtils.isNullOrEmpty(dv)) { - if (ModelUtils.isProperlyFormattedDefaultValue(dv) - && ModelUtils.isValidDefaultValue(field.field.getType(), dv)) { - buff.append(" DEFAULT " + dv); - } - } - } - } - - // primary key - if (primaryKeyColumnNames != null && primaryKeyColumnNames.size() > 0) { - buff.append(", PRIMARY KEY("); - buff.resetCount(); - for (String n : primaryKeyColumnNames) { - buff.appendExceptFirst(", "); - buff.append(n); - } - buff.append(')'); - } - buff.append(')'); - stat.setSQL(buff.toString()); - StatementLogger.create(stat.getSQL()); - stat.executeUpdate(); - - // create indexes - for (IndexDefinition index:indexes) { - String sql = db.getDialect().getCreateIndex(schemaName, tableName, index); - stat.setSQL(sql); - StatementLogger.create(stat.getSQL()); - stat.executeUpdate(); - } - - // tables are created using IF NOT EXISTS - // but we may still need to upgrade - db.upgradeTable(this); - return this; - } - - /** - * Retrieve list of columns from index definition. - * - * @param index the index columns, separated by space - * @return the column list - */ - private static List getColumns(String index) { - List cols = New.arrayList(); - if (index == null || index.length() == 0) { - return null; - } - String[] cs = index.split("(,|\\s)"); - for (String c : cs) { - if (c != null && c.trim().length() > 0) { - cols.add(c.trim()); - } - } - if (cols.size() == 0) { - return null; - } - return cols; - } - - void mapObject(Object obj) { - fieldMap.clear(); - initObject(obj, fieldMap); - - if (clazz.isAnnotationPresent(JQSchema.class)) { - JQSchema schemaAnnotation = clazz.getAnnotation(JQSchema.class); - // setup schema name mapping, if properly annotated - if (!StringUtils.isNullOrEmpty(schemaAnnotation.name())) { - schemaName = schemaAnnotation.name(); - } - } - - if (clazz.isAnnotationPresent(JQTable.class)) { - JQTable tableAnnotation = clazz.getAnnotation(JQTable.class); - - // setup table name mapping, if properly annotated - if (!StringUtils.isNullOrEmpty(tableAnnotation.name())) { - tableName = tableAnnotation.name(); - } - - // allow control over createTableIfRequired() - createTableIfRequired = tableAnnotation.createIfRequired(); - - // model version - if (tableAnnotation.version() > 0) { - tableVersion = tableAnnotation.version(); - } - - // setup the primary index, if properly annotated - List primaryKey = getColumns(tableAnnotation.primaryKey()); - if (primaryKey != null) { - setPrimaryKey(primaryKey); - } - } - - if (clazz.isAnnotationPresent(JQIndex.class)) { - JQIndex indexAnnotation = clazz.getAnnotation(JQIndex.class); - - // setup the indexes, if properly annotated - addIndexes(IndexType.STANDARD, indexAnnotation.standard()); - addIndexes(IndexType.UNIQUE, indexAnnotation.unique()); - addIndexes(IndexType.HASH, indexAnnotation.hash()); - addIndexes(IndexType.UNIQUE_HASH, indexAnnotation.uniqueHash()); - } - } - - void addIndexes(IndexType type, String [] indexes) { - for (String index:indexes) { - List validatedColumns = getColumns(index); - if (validatedColumns == null) { - return; - } - addIndex(type, validatedColumns); - } - } - - List getIndexes(IndexType type) { - List list = New.arrayList(); - for (IndexDefinition def:indexes) { - if (def.type.equals(type)) { - list.add(def); - } - } - return list; - } - - void initObject(Object obj, Map map) { - for (FieldDefinition def : fields) { - def.initWithNewObject(obj); - map.put(def.getValue(obj), def); - } - } - - void initSelectObject(SelectTable table, Object obj, - Map> map) { - for (FieldDefinition def : fields) { - def.initWithNewObject(obj); - SelectColumn column = new SelectColumn(table, def); - map.put(def.getValue(obj), column); - } - } - - void readRow(Object item, ResultSet rs) { - for (int i = 0; i < fields.size(); i++) { - FieldDefinition def = fields.get(i); - Object o = def.read(rs, i + 1); - def.setValue(item, o); - } - } - - void appendSelectList(SQLStatement stat) { - for (int i = 0; i < fields.size(); i++) { - if (i > 0) { - stat.appendSQL(", "); - } - FieldDefinition def = fields.get(i); - stat.appendSQL(def.columnName); - } - } - - void appendSelectList(SQLStatement stat, Query query, X x) { - for (int i = 0; i < fields.size(); i++) { - if (i > 0) { - stat.appendSQL(", "); - } - FieldDefinition def = fields.get(i); - Object obj = def.getValue(x); - query.appendSQL(stat, obj); - } - } - - void copyAttributeValues(Query query, X to, X map) { - for (FieldDefinition def : fields) { - Object obj = def.getValue(map); - SelectColumn col = query.getSelectColumn(obj); - Object value = col.getCurrentValue(); - def.setValue(to, value); - } - } - -} diff --git a/h2/src/tools/org/h2/jaqu/TableInspector.java b/h2/src/tools/org/h2/jaqu/TableInspector.java deleted file mode 100644 index abb706115c..0000000000 --- a/h2/src/tools/org/h2/jaqu/TableInspector.java +++ /dev/null @@ -1,693 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu; - -import java.lang.reflect.Modifier; -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.text.MessageFormat; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.Map; -import java.util.Set; -import org.h2.jaqu.Table.IndexType; -import org.h2.jaqu.Table.JQColumn; -import org.h2.jaqu.Table.JQIndex; -import org.h2.jaqu.Table.JQSchema; -import org.h2.jaqu.Table.JQTable; -import org.h2.jaqu.TableDefinition.FieldDefinition; -import org.h2.jaqu.TableDefinition.IndexDefinition; -import org.h2.util.JdbcUtils; -import org.h2.util.New; -import org.h2.util.StatementBuilder; -import org.h2.util.StringUtils; - -import static org.h2.jaqu.ValidationRemark.consider; -import static org.h2.jaqu.ValidationRemark.error; -import static org.h2.jaqu.ValidationRemark.warn; - -/** - * Class to inspect the contents of a particular table including its indexes. - * This class does the bulk of the work in terms of model generation and model - * validation. - */ -public class TableInspector { - - private static final int todoReviewClass = 0; - private static final String EOL = "\n"; - - private final String schema; - private final String table; - private final boolean forceUpperCase; - private final Class dateTimeClass; - private final List primaryKeys = New.arrayList(); - private Map indexes; - private Map columns; - - TableInspector(String schema, String table, boolean forceUpperCase, - Class dateTimeClass) { - this.schema = schema; - this.table = table; - this.forceUpperCase = forceUpperCase; - this.dateTimeClass = dateTimeClass; - } - - /** - * Tests to see if this TableInspector represents schema.table. - *

    - * - * @param schema the schema name - * @param table the table name - * @return true if the table matches - */ - boolean matches(String schema, String table) { - if (StringUtils.isNullOrEmpty(schema)) { - // table name matching - return this.table.equalsIgnoreCase(table); - } else if (StringUtils.isNullOrEmpty(table)) { - // schema name matching - return this.schema.equalsIgnoreCase(schema); - } else { - // exact table matching - return this.schema.equalsIgnoreCase(schema) - && this.table.equalsIgnoreCase(table); - } - } - - /** - * Reads the DatabaseMetaData for the details of this table including - * primary keys and indexes. - * - * @param metaData the database meta data - */ - void read(DatabaseMetaData metaData) throws SQLException { - ResultSet rs = null; - - // primary keys - try { - rs = metaData.getPrimaryKeys(null, schema, table); - while (rs.next()) { - String c = rs.getString("COLUMN_NAME"); - primaryKeys.add(c); - } - JdbcUtils.closeSilently(rs); - - // indexes - rs = metaData.getIndexInfo(null, schema, table, false, true); - indexes = New.hashMap(); - while (rs.next()) { - IndexInspector info = new IndexInspector(rs); - if (info.type.equals(IndexType.UNIQUE) - && info.name.toLowerCase().startsWith("primary")) { - // skip primary key indexes - continue; - } - if (indexes.containsKey(info.name)) { - indexes.get(info.name).addColumn(rs); - } else { - indexes.put(info.name, info); - } - } - JdbcUtils.closeSilently(rs); - - // columns - rs = metaData.getColumns(null, schema, table, null); - columns = New.hashMap(); - while (rs.next()) { - ColumnInspector col = new ColumnInspector(); - col.name = rs.getString("COLUMN_NAME"); - col.type = rs.getString("TYPE_NAME"); - col.clazz = ModelUtils.getClassForSqlType(col.type, - dateTimeClass); - col.size = rs.getInt("COLUMN_SIZE"); - col.allowNull = rs.getInt("NULLABLE") == DatabaseMetaData.columnNullable; - col.isAutoIncrement = rs.getBoolean("IS_AUTOINCREMENT"); - if (primaryKeys.size() == 1) { - if (col.name.equalsIgnoreCase(primaryKeys.get(0))) { - col.isPrimaryKey = true; - } - } - if (!col.isAutoIncrement) { - col.defaultValue = rs.getString("COLUMN_DEF"); - } - columns.put(col.name, col); - } - } finally { - JdbcUtils.closeSilently(rs); - } - } - - /** - * Generates a model (class definition) from this table. The model includes - * indexes, primary keys, default values, maxLengths, and allowNull - * information. - *

    - * The caller may optionally set a destination package name, whether or not - * to include the schema name (setting schema can be a problem when using - * the model between databases), and if to automatically trim strings for - * those that have a maximum length. - *

    - * - * @return a complete model (class definition) for this table as a string - */ - String generateModel(String packageName, boolean annotateSchema, - boolean trimStrings) { - - // import statements - Set imports = New.hashSet(); - imports.add(JQSchema.class.getCanonicalName()); - imports.add(JQTable.class.getCanonicalName()); - imports.add(JQIndex.class.getCanonicalName()); - imports.add(JQColumn.class.getCanonicalName()); - - // fields - StringBuilder fields = new StringBuilder(); - List sortedColumns = New.arrayList(columns.values()); - Collections.sort(sortedColumns); - for (ColumnInspector col : sortedColumns) { - fields.append(generateColumn(imports, col, trimStrings)); - } - - // build complete class definition - StringBuilder model = new StringBuilder(); - if (!StringUtils.isNullOrEmpty(packageName)) { - // package - model.append("package " + packageName + ";"); - model.append(EOL).append(EOL); - } - - // imports - List sortedImports = new ArrayList(imports); - Collections.sort(sortedImports); - for (String imp : sortedImports) { - model.append("import ").append(imp).append(';').append(EOL); - } - model.append(EOL); - - // @JQSchema - if (annotateSchema && !StringUtils.isNullOrEmpty(schema)) { - model.append('@').append(JQSchema.class.getSimpleName()); - model.append('('); - AnnotationBuilder ap = new AnnotationBuilder(); - ap.addParameter("name", schema); - model.append(ap); - model.append(')').append(EOL); - } - - // @JQTable - model.append('@').append(JQTable.class.getSimpleName()); - model.append('('); - - // JQTable annotation parameters - AnnotationBuilder ap = new AnnotationBuilder(); - ap.addParameter("name", table); - - if (primaryKeys.size() > 1) { - StringBuilder pk = new StringBuilder(); - for (String key : primaryKeys) { - pk.append(key).append(' '); - } - pk.trimToSize(); - ap.addParameter("primaryKey", pk.toString()); - } - - // finish @JQTable annotation - model.append(ap); - model.append(')').append(EOL); - - // @JQIndex - ap = new AnnotationBuilder(); - generateIndexAnnotations(ap, "standard", IndexType.STANDARD); - generateIndexAnnotations(ap, "unique", IndexType.UNIQUE); - generateIndexAnnotations(ap, "hash", IndexType.HASH); - generateIndexAnnotations(ap, "uniqueHash", IndexType.UNIQUE_HASH); - if (ap.length() > 0) { - model.append('@').append(JQIndex.class.getSimpleName()); - model.append('('); - model.append(ap); - model.append(')').append(EOL); - } - - // class declaration - String clazzName = ModelUtils.convertTableToClassName(table); - model.append(MessageFormat.format("public class {0} '{'", clazzName)) - .append(EOL); - model.append(EOL); - - // field declarations - model.append(fields); - - // default constructor - model.append("\t" + "public ").append(clazzName).append("() {") - .append(EOL); - model.append("\t}").append(EOL); - - // end of class body - model.append('}'); - model.trimToSize(); - return model.toString(); - } - - /** - * Generates the specified index annotation. - */ - void generateIndexAnnotations(AnnotationBuilder ap, String parameter, - IndexType type) { - List list = getIndexes(type); - if (list.size() == 0) { - // no matching indexes - return; - } - if (list.size() == 1) { - ap.addParameter(parameter, list.get(0).getColumnsString()); - } else { - List parameters = New.arrayList(); - for (IndexInspector index : list) { - parameters.add(index.getColumnsString()); - } - ap.addParameter(parameter, parameters); - } - - } - - private List getIndexes(IndexType type) { - List list = New.arrayList(); - for (IndexInspector index : indexes.values()) { - if (index.type.equals(type)) { - list.add(index); - } - } - return list; - } - - private StatementBuilder generateColumn(Set imports, - ColumnInspector col, boolean trimStrings) { - StatementBuilder sb = new StatementBuilder(); - Class clazz = col.clazz; - String column = ModelUtils.convertColumnToFieldName(col.name - .toLowerCase()); - sb.append('\t'); - if (clazz == null) { - // unsupported type - clazz = Object.class; - sb.append("// unsupported type " + col.type); - } else { - // @JQColumn - imports.add(clazz.getCanonicalName()); - sb.append('@').append(JQColumn.class.getSimpleName()); - - // JQColumn annotation parameters - AnnotationBuilder ap = new AnnotationBuilder(); - - // JQColumn.name - if (!col.name.equalsIgnoreCase(column)) { - ap.addParameter("name", col.name); - } - - // JQColumn.primaryKey - // composite primary keys are annotated on the table - if (col.isPrimaryKey && primaryKeys.size() == 1) { - ap.addParameter("primaryKey=true"); - } - - // JQColumn.maxLength - if ((clazz == String.class) && (col.size > 0) - && (col.size < Integer.MAX_VALUE)) { - ap.addParameter("maxLength", col.size); - - // JQColumn.trimStrings - if (trimStrings) { - ap.addParameter("trimString=true"); - } - } else { - // JQColumn.AutoIncrement - if (col.isAutoIncrement) { - ap.addParameter("autoIncrement=true"); - } - } - - // JQColumn.allowNull - if (!col.allowNull) { - ap.addParameter("allowNull=false"); - } - - // JQColumn.defaultValue - if (!StringUtils.isNullOrEmpty(col.defaultValue)) { - ap.addParameter("defaultValue=\"" + col.defaultValue + "\""); - } - - // add leading and trailing () - if (ap.length() > 0) { - AnnotationBuilder b = new AnnotationBuilder(); - b.append('(').append(ap.toString()).append(')'); - ap = b; - } - sb.append(ap.toString()); - } - sb.append(EOL); - - // variable declaration - sb.append("\t" + "public "); - sb.append(clazz.getSimpleName()); - sb.append(' '); - sb.append(column); - sb.append(';'); - sb.append(EOL).append(EOL); - return sb; - } - - /** - * Validates that a table definition (annotated, interface, or both) matches - * the current state of the table and indexes in the database. Results are - * returned as a list of validation remarks which includes recommendations, - * warnings, and errors about the model. The caller may choose to have - * validate throw an exception on any validation ERROR. - * - * @param the table type - * @param def the table definition - * @param throwError whether or not to throw an exception if an error was - * found - * @return a list if validation remarks - */ - List validate(TableDefinition def, - boolean throwError) { - List remarks = New.arrayList(); - - // model class definition validation - if (!Modifier.isPublic(def.getModelClass().getModifiers())) { - remarks.add(error( - table, - "SCHEMA", - MessageFormat.format("Class {0} MUST BE PUBLIC!", def - .getModelClass().getCanonicalName())).throwError( - throwError)); - } - - // Schema Validation - if (!StringUtils.isNullOrEmpty(schema)) { - if (StringUtils.isNullOrEmpty(def.schemaName)) { - remarks.add(consider( - table, - "SCHEMA", - MessageFormat.format("@{0}(name={1})", - JQSchema.class.getSimpleName(), schema))); - } else if (!schema.equalsIgnoreCase(def.schemaName)) { - remarks.add(error( - table, - "SCHEMA", - MessageFormat.format("@{0}(name={1}) != {2}", - JQSchema.class.getSimpleName(), def.schemaName, - schema)).throwError(throwError)); - } - } - - // index validation - for (IndexInspector index : indexes.values()) { - validate(remarks, def, index, throwError); - } - - // field column validation - for (FieldDefinition fieldDef : def.getFields()) { - validate(remarks, fieldDef, throwError); - } - return remarks; - } - - /** - * Validates an inspected index from the database against the - * IndexDefinition within the TableDefinition. - */ - private void validate(List remarks, - TableDefinition def, IndexInspector index, boolean throwError) { - List defIndexes = def.getIndexes(IndexType.STANDARD); - List dbIndexes = getIndexes(IndexType.STANDARD); - if (defIndexes.size() > dbIndexes.size()) { - remarks.add(warn(table, IndexType.STANDARD.name(), - "More model indexes than database indexes")); - } else if (defIndexes.size() < dbIndexes.size()) { - remarks.add(warn(table, IndexType.STANDARD.name(), - "Model class is missing indexes")); - } - // TODO complete index validation. - // need to actually compare index types and columns within each index. - } - - /** - * Validates a column against the model's field definition. Checks for - * existence, supported type, type mapping, default value, defined lengths, - * primary key, autoincrement. - */ - private void validate(List remarks, - FieldDefinition fieldDef, boolean throwError) { - // unknown field - String field = forceUpperCase ? fieldDef.columnName.toUpperCase() - : fieldDef.columnName; - if (!columns.containsKey(field)) { - // unknown column mapping - remarks.add(error(table, fieldDef, "Does not exist in database!") - .throwError(throwError)); - return; - } - ColumnInspector col = columns.get(field); - Class fieldClass = fieldDef.field.getType(); - Class jdbcClass = ModelUtils.getClassForSqlType(col.type, - dateTimeClass); - - // supported type check - // JaQu maps to VARCHAR for unsupported types. - if (fieldDef.dataType.equals("VARCHAR") && (fieldClass != String.class)) { - remarks.add(error( - table, - fieldDef, - "JaQu does not currently implement support for " - + fieldClass.getName()).throwError(throwError)); - } - // number types - if (!fieldClass.equals(jdbcClass)) { - if (Number.class.isAssignableFrom(fieldClass)) { - remarks.add(warn( - table, - col, - MessageFormat - .format("Precision mismatch: ModelObject={0}, ColumnObject={1}", - fieldClass.getSimpleName(), - jdbcClass.getSimpleName()))); - } else { - if (!Date.class.isAssignableFrom(jdbcClass)) { - remarks.add(warn( - table, - col, - MessageFormat - .format("Object Mismatch: ModelObject={0}, ColumnObject={1}", - fieldClass.getSimpleName(), - jdbcClass.getSimpleName()))); - } - } - } - - // string types - if (fieldClass == String.class) { - if ((fieldDef.maxLength != col.size) - && (col.size < Integer.MAX_VALUE)) { - remarks.add(warn(table, col, MessageFormat.format( - "{0}.maxLength={1}, ColumnMaxLength={2}", - JQColumn.class.getSimpleName(), fieldDef.maxLength, - col.size))); - } - if (fieldDef.maxLength > 0 && !fieldDef.trimString) { - remarks.add(consider(table, col, MessageFormat.format( - "{0}.truncateToMaxLength=true" - + " will prevent RuntimeExceptions on" - + " INSERT or UPDATE, but will clip data!", - JQColumn.class.getSimpleName()))); - } - } - - // numeric autoIncrement - if (fieldDef.isAutoIncrement != col.isAutoIncrement) { - remarks.add(warn(table, col, MessageFormat.format( - "{0}.isAutoIncrement={1}" - + " while Column autoIncrement={2}", - JQColumn.class.getSimpleName(), fieldDef.isAutoIncrement, - col.isAutoIncrement))); - } - // default value - if (!col.isAutoIncrement && !col.isPrimaryKey) { - // check Model.defaultValue format - if (!ModelUtils - .isProperlyFormattedDefaultValue(fieldDef.defaultValue)) { - remarks.add(error( - table, - col, - MessageFormat.format("{0}.defaultValue=\"{1}\"" - + " is improperly formatted!", - JQColumn.class.getSimpleName(), - fieldDef.defaultValue)).throwError(throwError)); - // next field - return; - } - // compare Model.defaultValue to Column.defaultValue - if (StringUtils.isNullOrEmpty(fieldDef.defaultValue) - && !StringUtils.isNullOrEmpty(col.defaultValue)) { - // Model.defaultValue is NULL, Column.defaultValue is NOT NULL - remarks.add(warn(table, col, MessageFormat.format( - "{0}.defaultValue=\"\"" - + " while column default=\"{1}\"", - JQColumn.class.getSimpleName(), col.defaultValue))); - } else if (!StringUtils.isNullOrEmpty(fieldDef.defaultValue) - && StringUtils.isNullOrEmpty(col.defaultValue)) { - // Column.defaultValue is NULL, Model.defaultValue is NOT NULL - remarks.add(warn(table, col, MessageFormat.format( - "{0}.defaultValue=\"{1}\"" - + " while column default=\"\"", - JQColumn.class.getSimpleName(), fieldDef.defaultValue))); - } else if (!StringUtils.isNullOrEmpty(fieldDef.defaultValue) - && !StringUtils.isNullOrEmpty(col.defaultValue)) { - if (!fieldDef.defaultValue.equals(col.defaultValue)) { - // Model.defaultValue != Column.defaultValue - remarks.add(warn(table, col, MessageFormat.format( - "{0}.defaultValue=\"{1}\"" - + " while column default=\"{2}\"", - JQColumn.class.getSimpleName(), - fieldDef.defaultValue, col.defaultValue))); - } - } - - // sanity check Model.defaultValue literal value - if (!ModelUtils.isValidDefaultValue(fieldDef.field.getType(), - fieldDef.defaultValue)) { - remarks.add(error(table, col, MessageFormat.format( - "{0}.defaultValue=\"{1}\" is invalid!", - JQColumn.class.getSimpleName(), fieldDef.defaultValue))); - } - } - } - - /** - * Represents an index as it exists in the database. - */ - private static class IndexInspector { - - String name; - IndexType type; - private final List columns = new ArrayList(); - - public IndexInspector(ResultSet rs) throws SQLException { - name = rs.getString("INDEX_NAME"); - - // determine index type - boolean hash = rs.getInt("TYPE") == DatabaseMetaData.tableIndexHashed; - boolean unique = !rs.getBoolean("NON_UNIQUE"); - - if (!hash && !unique) { - type = IndexType.STANDARD; - } else if (hash && unique) { - type = IndexType.UNIQUE_HASH; - } else if (unique) { - type = IndexType.UNIQUE; - } else if (hash) { - type = IndexType.HASH; - } - columns.add(rs.getString("COLUMN_NAME")); - } - - public void addColumn(ResultSet rs) throws SQLException { - columns.add(rs.getString("COLUMN_NAME")); - } - - public String getColumnsString() { - StatementBuilder sb = new StatementBuilder(); - for (String col : columns) { - sb.appendExceptFirst(", "); - sb.append(col); - } - return sb.toString().trim(); - } - } - - /** - * Represents a column as it exists in the database. - */ - static class ColumnInspector implements Comparable { - String name; - String type; - int size; - boolean allowNull; - Class clazz; - boolean isPrimaryKey; - boolean isAutoIncrement; - String defaultValue; - - @Override - public int compareTo(ColumnInspector o) { - if (isPrimaryKey && o.isPrimaryKey) { - // both primary sort by name - return name.compareTo(o.name); - } else if (isPrimaryKey && !o.isPrimaryKey) { - // primary first - return -1; - } else if (!isPrimaryKey && o.isPrimaryKey) { - // primary first - return 1; - } else { - // neither primary, sort by name - return name.compareTo(o.name); - } - } - } - - /** - * Convenience class based on StatementBuilder for creating the annotation - * parameter list. - */ - private static class AnnotationBuilder extends StatementBuilder { - AnnotationBuilder() { - super(); - } - - void addParameter(String parameter) { - appendExceptFirst(", "); - append(parameter); - } - - void addParameter(String parameter, T value) { - appendExceptFirst(", "); - append(parameter); - append('='); - if (value instanceof List) { - append("{ "); - @SuppressWarnings("unchecked") - List list = (List) value; - StatementBuilder flat = new StatementBuilder(); - for (Object o : list) { - flat.appendExceptFirst(", "); - if (o instanceof String) { - flat.append('\"'); - } - int todoEscape; - flat.append(o.toString().trim()); - if (o instanceof String) { - flat.append('\"'); - } - } - append(flat.toString()); - append(" }"); - } else { - if (value instanceof String) { - append('\"'); - } - int todoEscape; - append(value.toString().trim()); - if (value instanceof String) { - append('\"'); - } - } - } - } -} \ No newline at end of file diff --git a/h2/src/tools/org/h2/jaqu/TestCondition.java b/h2/src/tools/org/h2/jaqu/TestCondition.java deleted file mode 100644 index ff483b3a82..0000000000 --- a/h2/src/tools/org/h2/jaqu/TestCondition.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -import org.h2.jaqu.util.ClassUtils; - -/** - * This class represents an incomplete condition. - * - * @param the incomplete condition data type - */ -public class TestCondition { - - private final A x; - - public TestCondition(A x) { - this.x = x; - } - - public Boolean is(A y) { - Boolean o = ClassUtils.newObject(Boolean.class); - return Db.registerToken(o, new Function("=", x, y) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL("("); - query.appendSQL(stat, x[0]); - stat.appendSQL(" = "); - query.appendSQL(stat, x[1]); - stat.appendSQL(")"); - } - }); - } - - public Boolean bigger(A y) { - Boolean o = ClassUtils.newObject(Boolean.class); - return Db.registerToken(o, new Function(">", x, y) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL("("); - query.appendSQL(stat, x[0]); - stat.appendSQL(" > "); - query.appendSQL(stat, x[1]); - stat.appendSQL(")"); - } - }); - } - - public Boolean biggerEqual(A y) { - Boolean o = ClassUtils.newObject(Boolean.class); - return Db.registerToken(o, new Function(">=", x, y) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL("("); - query.appendSQL(stat, x[0]); - stat.appendSQL(" >= "); - query.appendSQL(stat, x[1]); - stat.appendSQL(")"); - } - }); - } - - public Boolean smaller(A y) { - Boolean o = ClassUtils.newObject(Boolean.class); - return Db.registerToken(o, new Function("<", x, y) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL("("); - query.appendSQL(stat, x[0]); - stat.appendSQL(" < "); - query.appendSQL(stat, x[1]); - stat.appendSQL(")"); - } - }); - } - - public Boolean smallerEqual(A y) { - Boolean o = ClassUtils.newObject(Boolean.class); - return Db.registerToken(o, new Function("<=", x, y) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL("("); - query.appendSQL(stat, x[0]); - stat.appendSQL(" <= "); - query.appendSQL(stat, x[1]); - stat.appendSQL(")"); - } - }); - } - - public Boolean like(A pattern) { - Boolean o = ClassUtils.newObject(Boolean.class); - return Db.registerToken(o, new Function("LIKE", x, pattern) { - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL("("); - query.appendSQL(stat, x[0]); - stat.appendSQL(" LIKE "); - query.appendSQL(stat, x[1]); - stat.appendSQL(")"); - } - }); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/Token.java b/h2/src/tools/org/h2/jaqu/Token.java deleted file mode 100644 index f1cae40823..0000000000 --- a/h2/src/tools/org/h2/jaqu/Token.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu; - -/** - * Classes implementing this interface can be used as a token in a statement. - */ -public interface Token { - /** - * Append the SQL to the given statement using the given query. - * - * @param stat the statement to append the SQL to - * @param query the query to use - */ - void appendSQL(SQLStatement stat, Query query); -} diff --git a/h2/src/tools/org/h2/jaqu/UpdateColumn.java b/h2/src/tools/org/h2/jaqu/UpdateColumn.java deleted file mode 100644 index 200bbe24d7..0000000000 --- a/h2/src/tools/org/h2/jaqu/UpdateColumn.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu; - -/** - * Classes implementing this interface can be used as a declaration in an - * update statement. - */ -public interface UpdateColumn { - - /** - * Append the SQL to the given statement using the given query. - * - * @param stat the statement to append the SQL to - */ - void appendSQL(SQLStatement stat); - -} diff --git a/h2/src/tools/org/h2/jaqu/UpdateColumnIncrement.java b/h2/src/tools/org/h2/jaqu/UpdateColumnIncrement.java deleted file mode 100644 index 97643114ca..0000000000 --- a/h2/src/tools/org/h2/jaqu/UpdateColumnIncrement.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu; - -/** - * This class represents "SET column = (column + x)" in an UPDATE statement. - * - * @param the query type - * @param the new value data type - */ -public class UpdateColumnIncrement implements UpdateColumn { - - private final Query query; - private final A x; - private A y; - - UpdateColumnIncrement(Query query, A x) { - this.query = query; - this.x = x; - } - - public Query by(A y) { - query.addUpdateColumnDeclaration(this); - this.y = y; - return query; - } - - @Override - public void appendSQL(SQLStatement stat) { - query.appendSQL(stat, x); - stat.appendSQL("=("); - query.appendSQL(stat, x); - stat.appendSQL("+"); - query.appendSQL(stat, y); - stat.appendSQL(")"); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/UpdateColumnSet.java b/h2/src/tools/org/h2/jaqu/UpdateColumnSet.java deleted file mode 100644 index fa468ae721..0000000000 --- a/h2/src/tools/org/h2/jaqu/UpdateColumnSet.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu; - -/** - * This class represents "SET column = value" in an UPDATE statement. - * - * @param the query type - * @param the new value data type - */ -public class UpdateColumnSet implements UpdateColumn { - - private final Query query; - private final A x; - private A y; - - UpdateColumnSet(Query query, A x) { - this.query = query; - this.x = x; - } - - public Query to(A y) { - query.addUpdateColumnDeclaration(this); - this.y = y; - return query; - } - - @Override - public void appendSQL(SQLStatement stat) { - query.appendSQL(stat, x); - stat.appendSQL("=?"); - stat.addParameter(y); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/ValidationRemark.java b/h2/src/tools/org/h2/jaqu/ValidationRemark.java deleted file mode 100644 index ddd78dd7e0..0000000000 --- a/h2/src/tools/org/h2/jaqu/ValidationRemark.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu; - -import org.h2.jaqu.TableDefinition.FieldDefinition; -import org.h2.jaqu.TableInspector.ColumnInspector; -import org.h2.util.StringUtils; - -/** - * A validation remark is a result of running a model validation. Each remark - * has a level, associated component (schema, table, column, index), and a - * message. - */ -public class ValidationRemark { - - /** - * The validation message level. - */ - public static enum Level { - CONSIDER, WARN, ERROR; - } - - private final Level level; - private final String table; - private final String fieldType; - private final String fieldName; - private final String message; - - private ValidationRemark(Level level, String table, String type, - String message) { - this.level = level; - this.table = table; - this.fieldType = type; - this.fieldName = ""; - this.message = message; - } - - private ValidationRemark(Level level, String table, FieldDefinition field, - String message) { - this.level = level; - this.table = table; - this.fieldType = field.dataType; - this.fieldName = field.columnName; - this.message = message; - } - - private ValidationRemark(Level level, String table, ColumnInspector col, - String message) { - this.level = level; - this.table = table; - this.fieldType = col.type; - this.fieldName = col.name; - this.message = message; - } - - public static ValidationRemark consider(String table, String type, - String message) { - return new ValidationRemark(Level.CONSIDER, table, type, message); - } - - public static ValidationRemark consider(String table, ColumnInspector col, - String message) { - return new ValidationRemark(Level.CONSIDER, table, col, message); - } - - public static ValidationRemark warn(String table, ColumnInspector col, - String message) { - return new ValidationRemark(Level.WARN, table, col, message); - } - - public static ValidationRemark warn(String table, String type, - String message) { - return new ValidationRemark(Level.WARN, table, type, message); - } - - public static ValidationRemark error(String table, ColumnInspector col, - String message) { - return new ValidationRemark(Level.ERROR, table, col, message); - } - - public static ValidationRemark error(String table, String type, - String message) { - return new ValidationRemark(Level.ERROR, table, type, message); - } - - public static ValidationRemark error(String table, FieldDefinition field, - String message) { - return new ValidationRemark(Level.ERROR, table, field, message); - } - - public ValidationRemark throwError(boolean throwOnError) { - if (throwOnError && isError()) { - throw new RuntimeException(toString()); - } - return this; - } - - public boolean isError() { - return level.equals(Level.ERROR); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(StringUtils.pad(level.name(), 9, " ", true)); - sb.append(StringUtils.pad(table, 25, " ", true)); - sb.append(StringUtils.pad(fieldName, 20, " ", true)); - sb.append(' '); - sb.append(message); - return sb.toString(); - } - - public String toCSVString() { - StringBuilder sb = new StringBuilder(); - sb.append(level.name()).append(','); - sb.append(table).append(','); - sb.append(fieldType).append(','); - sb.append(fieldName).append(','); - sb.append(message); - return sb.toString(); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/And.java b/h2/src/tools/org/h2/jaqu/bytecode/And.java deleted file mode 100644 index ea13ba7dad..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/And.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; -import org.h2.jaqu.Token; - -/** - * An AND expression. - */ -public class And implements Token { - - private final Token left, right; - - private And(Token left, Token right) { - this.left = left; - this.right = right; - } - - static And get(Token left, Token right) { - return new And(left, right); - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - left.appendSQL(stat, query); - stat.appendSQL(" AND "); - right.appendSQL(stat, query); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/ArrayGet.java b/h2/src/tools/org/h2/jaqu/bytecode/ArrayGet.java deleted file mode 100644 index 44d606aa6d..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/ArrayGet.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; -import org.h2.jaqu.Token; - -/** - * An array access operation. - */ -public class ArrayGet implements Token { - - private final Token variable; - private final Token index; - - private ArrayGet(Token variable, Token index) { - this.variable = variable; - this.index = index; - } - - static ArrayGet get(Token variable, Token index) { - return new ArrayGet(variable, index); - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - // untested - variable.appendSQL(stat, query); - stat.appendSQL("["); - index.appendSQL(stat, query); - stat.appendSQL("]"); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/CaseWhen.java b/h2/src/tools/org/h2/jaqu/bytecode/CaseWhen.java deleted file mode 100644 index a06444c4b2..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/CaseWhen.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; -import org.h2.jaqu.Token; - -/** - * A conditional expression. - */ -public class CaseWhen implements Token { - - private final Token condition, ifTrue, ifFalse; - - private CaseWhen(Token condition, Token ifTrue, Token ifFalse) { - this.condition = condition; - this.ifTrue = ifTrue; - this.ifFalse = ifFalse; - } - - static Token get(Token condition, Token ifTrue, Token ifFalse) { - if ("0".equals(ifTrue.toString()) && "1".equals(ifFalse.toString())) { - return Not.get(condition); - } else if ("1".equals(ifTrue.toString()) && "0".equals(ifFalse.toString())) { - return condition; - } else if ("0".equals(ifTrue.toString())) { - return And.get(Not.get(condition), ifFalse); - } - return new CaseWhen(condition, ifTrue, ifFalse); - } - - @Override - public String toString() { - return "CASEWHEN(" + condition + ", " + ifTrue + ", " + ifFalse + ")"; - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL("CASEWHEN "); - condition.appendSQL(stat, query); - stat.appendSQL(" THEN "); - ifTrue.appendSQL(stat, query); - stat.appendSQL(" ELSE "); - ifFalse.appendSQL(stat, query); - stat.appendSQL(" END"); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/ClassReader.java b/h2/src/tools/org/h2/jaqu/bytecode/ClassReader.java deleted file mode 100644 index b22da0042b..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/ClassReader.java +++ /dev/null @@ -1,1468 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Map; -import java.util.Stack; -import org.h2.jaqu.Token; - -/** - * This class converts a method to a SQL Token by interpreting - * (decompiling) the bytecode of the class. - */ -public class ClassReader { - - private static final boolean DEBUG = false; - - private byte[] data; - private int pos; - private Constant[] constantPool; - private int startByteCode; - private String methodName; - - private String convertMethodName; - private Token result; - private Stack stack = new Stack(); - private ArrayList variables = new ArrayList(); - private boolean endOfMethod; - private boolean condition; - private int nextPc; - private Map fieldMap = new HashMap(); - - private static void debug(String s) { - if (DEBUG) { - System.out.println(s); - } - } - - public Token decompile(Object instance, Map fields, - String method) { - this.fieldMap = fields; - this.convertMethodName = method; - Class clazz = instance.getClass(); - String className = clazz.getName(); - debug("class name " + className); - ByteArrayOutputStream buff = new ByteArrayOutputStream(); - try { - InputStream in = clazz.getClassLoader() - .getResource(className.replace('.', '/') + ".class") - .openStream(); - while (true) { - int x = in.read(); - if (x < 0) { - break; - } - buff.write(x); - } - } catch (IOException e) { - throw new RuntimeException("Could not read class bytecode", e); - } - data = buff.toByteArray(); - int header = readInt(); - debug("header: " + Integer.toHexString(header)); - int minorVersion = readShort(); - int majorVersion = readShort(); - debug("version: " + majorVersion + "." + minorVersion); - int constantPoolCount = readShort(); - constantPool = new Constant[constantPoolCount]; - for (int i = 1; i < constantPoolCount; i++) { - int type = readByte(); - switch (type) { - case 1: - constantPool[i] = ConstantString.get(readString()); - break; - case 3: { - int x = readInt(); - constantPool[i] = ConstantNumber.get(x); - break; - } - case 4: { - int x = readInt(); - constantPool[i] = ConstantNumber.get( - "" + Float.intBitsToFloat(x), x, Constant.Type.FLOAT); - break; - } - case 5: { - long x = readLong(); - constantPool[i] = ConstantNumber.get(x); - i++; - break; - } - case 6: { - long x = readLong(); - constantPool[i] = ConstantNumber.get( - "" + Double.longBitsToDouble(x), x, - Constant.Type.DOUBLE); - i++; - break; - } - case 7: { - int x = readShort(); - constantPool[i] = ConstantNumber.get(null, x, - ConstantNumber.Type.CLASS_REF); - break; - } - case 8: { - int x = readShort(); - constantPool[i] = ConstantNumber.get(null, x, - ConstantNumber.Type.STRING_REF); - break; - } - case 9: { - int x = readInt(); - constantPool[i] = ConstantNumber.get(null, x, - ConstantNumber.Type.FIELD_REF); - break; - } - case 10: { - int x = readInt(); - constantPool[i] = ConstantNumber.get(null, x, - ConstantNumber.Type.METHOD_REF); - break; - } - case 11: { - int x = readInt(); - constantPool[i] = ConstantNumber.get(null, x, - ConstantNumber.Type.INTERFACE_METHOD_REF); - break; - } - case 12: { - int x = readInt(); - constantPool[i] = ConstantNumber.get(null, x, - ConstantNumber.Type.NAME_AND_TYPE); - break; - } - default: - throw new RuntimeException("Unsupported constant pool tag: " + type); - } - } - int accessFlags = readShort(); - debug("access flags: " + accessFlags); - int classRef = readShort(); - debug("class: " + constantPool[constantPool[classRef].intValue()]); - int superClassRef = readShort(); - debug(" extends " + constantPool[constantPool[superClassRef].intValue()]); - int interfaceCount = readShort(); - for (int i = 0; i < interfaceCount; i++) { - int interfaceRef = readShort(); - debug(" implements " + constantPool[constantPool[interfaceRef].intValue()]); - } - int fieldCount = readShort(); - for (int i = 0; i < fieldCount; i++) { - readField(); - } - int methodCount = readShort(); - for (int i = 0; i < methodCount; i++) { - readMethod(); - } - readAttributes(); - return result; - } - - private void readField() { - int accessFlags = readShort(); - int nameIndex = readShort(); - int descIndex = readShort(); - debug(" " + constantPool[descIndex] + " " + constantPool[nameIndex] - + " " + accessFlags); - readAttributes(); - } - - private void readMethod() { - int accessFlags = readShort(); - int nameIndex = readShort(); - int descIndex = readShort(); - String desc = constantPool[descIndex].toString(); - methodName = constantPool[nameIndex].toString(); - debug(" " + desc + " " + methodName + " " + accessFlags); - readAttributes(); - } - - private void readAttributes() { - int attributeCount = readShort(); - for (int i = 0; i < attributeCount; i++) { - int attributeNameIndex = readShort(); - String attributeName = constantPool[attributeNameIndex].toString(); - debug(" attribute " + attributeName); - int attributeLength = readInt(); - int end = pos + attributeLength; - if ("Code".equals(attributeName)) { - readCode(); - } - pos = end; - } - } - - void decompile() { - int maxStack = readShort(); - int maxLocals = readShort(); - debug("stack: " + maxStack + " locals: " + maxLocals); - int codeLength = readInt(); - startByteCode = pos; - int end = pos + codeLength; - while (pos < end) { - readByteCode(); - } - debug(""); - pos = startByteCode + codeLength; - int exceptionTableLength = readShort(); - pos += 2 * exceptionTableLength; - readAttributes(); - } - - private void readCode() { - variables.clear(); - stack.clear(); - int maxStack = readShort(); - int maxLocals = readShort(); - debug("stack: " + maxStack + " locals: " + maxLocals); - int codeLength = readInt(); - startByteCode = pos; - if (methodName.startsWith(convertMethodName)) { - result = getResult(); - } - pos = startByteCode + codeLength; - int exceptionTableLength = readShort(); - pos += 2 * exceptionTableLength; - readAttributes(); - } - - private Token getResult() { - while (true) { - readByteCode(); - if (endOfMethod) { - return stack.pop(); - } - if (condition) { - Token c = stack.pop(); - Stack currentStack = new Stack(); - currentStack.addAll(stack); - ArrayList currentVariables = new ArrayList(); - currentVariables.addAll(variables); - int branch = nextPc; - Token a = getResult(); - stack = currentStack; - variables = currentVariables; - pos = branch + startByteCode; - Token b = getResult(); - if (a.equals("0") && b.equals("1")) { - return c; - } else if (a.equals("1") && b.equals("0")) { - return Not.get(c); - } else if (b.equals("0")) { - return And.get(Not.get(c), a); - } else if (a.equals("0")) { - return And.get(c, b); - } else if (b.equals("1")) { - return Or.get(c, a); - } else if (a.equals("1")) { - return And.get(Not.get(c), b); - } - return CaseWhen.get(c, b, a); - } - if (nextPc != 0) { - pos = nextPc + startByteCode; - } - } - } - - private void readByteCode() { - int startPos = pos - startByteCode; - int opCode = readByte(); - String op; - endOfMethod = false; - condition = false; - nextPc = 0; - switch(opCode) { - case 0: - op = "nop"; - break; - case 1: - op = "aconst_null"; - stack.push(Null.INSTANCE); - break; - case 2: - op = "iconst_m1"; - stack.push(ConstantNumber.get("-1")); - break; - case 3: - op = "iconst_0"; - stack.push(ConstantNumber.get("0")); - break; - case 4: - op = "iconst_1"; - stack.push(ConstantNumber.get("1")); - break; - case 5: - op = "iconst_2"; - stack.push(ConstantNumber.get("2")); - break; - case 6: - op = "iconst_3"; - stack.push(ConstantNumber.get("3")); - break; - case 7: - op = "iconst_4"; - stack.push(ConstantNumber.get("4")); - break; - case 8: - op = "iconst_5"; - stack.push(ConstantNumber.get("5")); - break; - case 9: - op = "lconst_0"; - stack.push(ConstantNumber.get("0")); - break; - case 10: - op = "lconst_1"; - stack.push(ConstantNumber.get("1")); - break; - case 11: - op = "fconst_0"; - stack.push(ConstantNumber.get("0.0")); - break; - case 12: - op = "fconst_1"; - stack.push(ConstantNumber.get("1.0")); - break; - case 13: - op = "fconst_2"; - stack.push(ConstantNumber.get("2.0")); - break; - case 14: - op = "dconst_0"; - stack.push(ConstantNumber.get("0.0")); - break; - case 15: - op = "dconst_1"; - stack.push(ConstantNumber.get("1.0")); - break; - case 16: { - int x = (byte) readByte(); - op = "bipush " + x; - stack.push(ConstantNumber.get(x)); - break; - } - case 17: { - int x = (short) readShort(); - op = "sipush " + x; - stack.push(ConstantNumber.get(x)); - break; - } - case 18: { - Token s = getConstant(readByte()); - op = "ldc " + s; - stack.push(s); - break; - } - case 19: { - Token s = getConstant(readShort()); - op = "ldc_w " + s; - stack.push(s); - break; - } - case 20: { - Token s = getConstant(readShort()); - op = "ldc2_w " + s; - stack.push(s); - break; - } - case 21: { - int x = readByte(); - op = "iload " + x; - stack.push(getVariable(x)); - break; - } - case 22: { - int x = readByte(); - op = "lload " + x; - stack.push(getVariable(x)); - break; - } - case 23: { - int x = readByte(); - op = "fload " + x; - stack.push(getVariable(x)); - break; - } - case 24: { - int x = readByte(); - op = "dload " + x; - stack.push(getVariable(x)); - break; - } - case 25: { - int x = readByte(); - op = "aload " + x; - stack.push(getVariable(x)); - break; - } - case 26: - op = "iload_0"; - stack.push(getVariable(0)); - break; - case 27: - op = "iload_1"; - stack.push(getVariable(1)); - break; - case 28: - op = "iload_2"; - stack.push(getVariable(2)); - break; - case 29: - op = "iload_3"; - stack.push(getVariable(3)); - break; - case 30: - op = "lload_0"; - stack.push(getVariable(0)); - break; - case 31: - op = "lload_1"; - stack.push(getVariable(1)); - break; - case 32: - op = "lload_2"; - stack.push(getVariable(2)); - break; - case 33: - op = "lload_3"; - stack.push(getVariable(3)); - break; - case 34: - op = "fload_0"; - stack.push(getVariable(0)); - break; - case 35: - op = "fload_1"; - stack.push(getVariable(1)); - break; - case 36: - op = "fload_2"; - stack.push(getVariable(2)); - break; - case 37: - op = "fload_3"; - stack.push(getVariable(3)); - break; - case 38: - op = "dload_0"; - stack.push(getVariable(0)); - break; - case 39: - op = "dload_1"; - stack.push(getVariable(1)); - break; - case 40: - op = "dload_2"; - stack.push(getVariable(2)); - break; - case 41: - op = "dload_3"; - stack.push(getVariable(3)); - break; - case 42: - op = "aload_0"; - stack.push(getVariable(0)); - break; - case 43: - op = "aload_1"; - stack.push(getVariable(1)); - break; - case 44: - op = "aload_2"; - stack.push(getVariable(2)); - break; - case 45: - op = "aload_3"; - stack.push(getVariable(3)); - break; - case 46: { - Token index = stack.pop(); - Token ref = stack.pop(); - op = "iaload"; - stack.push(ArrayGet.get(ref, index)); - break; - } - case 47: { - Token index = stack.pop(); - Token ref = stack.pop(); - op = "laload"; - stack.push(ArrayGet.get(ref, index)); - break; - } - case 48: { - Token index = stack.pop(); - Token ref = stack.pop(); - op = "faload"; - stack.push(ArrayGet.get(ref, index)); - break; - } - case 49: { - Token index = stack.pop(); - Token ref = stack.pop(); - op = "daload"; - stack.push(ArrayGet.get(ref, index)); - break; - } - case 50: { - Token index = stack.pop(); - Token ref = stack.pop(); - op = "aaload"; - stack.push(ArrayGet.get(ref, index)); - break; - } - case 51: { - Token index = stack.pop(); - Token ref = stack.pop(); - op = "baload"; - stack.push(ArrayGet.get(ref, index)); - break; - } - case 52: { - Token index = stack.pop(); - Token ref = stack.pop(); - op = "caload"; - stack.push(ArrayGet.get(ref, index)); - break; - } - case 53: { - Token index = stack.pop(); - Token ref = stack.pop(); - op = "saload"; - stack.push(ArrayGet.get(ref, index)); - break; - } - case 54: { - int var = readByte(); - op = "istore " + var; - setVariable(var, stack.pop()); - break; - } - case 55: { - int var = readByte(); - op = "lstore " + var; - setVariable(var, stack.pop()); - break; - } - case 56: { - int var = readByte(); - op = "fstore " + var; - setVariable(var, stack.pop()); - break; - } - case 57: { - int var = readByte(); - op = "dstore " + var; - setVariable(var, stack.pop()); - break; - } - case 58: { - int var = readByte(); - op = "astore " + var; - setVariable(var, stack.pop()); - break; - } - case 59: - op = "istore_0"; - setVariable(0, stack.pop()); - break; - case 60: - op = "istore_1"; - setVariable(1, stack.pop()); - break; - case 61: - op = "istore_2"; - setVariable(2, stack.pop()); - break; - case 62: - op = "istore_3"; - setVariable(3, stack.pop()); - break; - case 63: - op = "lstore_0"; - setVariable(0, stack.pop()); - break; - case 64: - op = "lstore_1"; - setVariable(1, stack.pop()); - break; - case 65: - op = "lstore_2"; - setVariable(2, stack.pop()); - break; - case 66: - op = "lstore_3"; - setVariable(3, stack.pop()); - break; - case 67: - op = "fstore_0"; - setVariable(0, stack.pop()); - break; - case 68: - op = "fstore_1"; - setVariable(1, stack.pop()); - break; - case 69: - op = "fstore_2"; - setVariable(2, stack.pop()); - break; - case 70: - op = "fstore_3"; - setVariable(3, stack.pop()); - break; - case 71: - op = "dstore_0"; - setVariable(0, stack.pop()); - break; - case 72: - op = "dstore_1"; - setVariable(1, stack.pop()); - break; - case 73: - op = "dstore_2"; - setVariable(2, stack.pop()); - break; - case 74: - op = "dstore_3"; - setVariable(3, stack.pop()); - break; - case 75: - op = "astore_0"; - setVariable(0, stack.pop()); - break; - case 76: - op = "astore_1"; - setVariable(1, stack.pop()); - break; - case 77: - op = "astore_2"; - setVariable(2, stack.pop()); - break; - case 78: - op = "astore_3"; - setVariable(3, stack.pop()); - break; - case 79: { - // String value = stack.pop(); - // String index = stack.pop(); - // String ref = stack.pop(); - op = "iastore"; - // TODO side effect - not supported - break; - } - case 80: - op = "lastore"; - // TODO side effect - not supported - break; - case 81: - op = "fastore"; - // TODO side effect - not supported - break; - case 82: - op = "dastore"; - // TODO side effect - not supported - break; - case 83: - op = "aastore"; - // TODO side effect - not supported - break; - case 84: - op = "bastore"; - // TODO side effect - not supported - break; - case 85: - op = "castore"; - // TODO side effect - not supported - break; - case 86: - op = "sastore"; - // TODO side effect - not supported - break; - case 87: - op = "pop"; - stack.pop(); - break; - case 88: - op = "pop2"; - // TODO currently we don't know the stack types - stack.pop(); - stack.pop(); - break; - case 89: { - op = "dup"; - Token x = stack.pop(); - stack.push(x); - stack.push(x); - break; - } - case 90: { - op = "dup_x1"; - Token a = stack.pop(); - Token b = stack.pop(); - stack.push(a); - stack.push(b); - stack.push(a); - break; - } - case 91: { - // TODO currently we don't know the stack types - op = "dup_x2"; - Token a = stack.pop(); - Token b = stack.pop(); - Token c = stack.pop(); - stack.push(a); - stack.push(c); - stack.push(b); - stack.push(a); - break; - } - case 92: { - // TODO currently we don't know the stack types - op = "dup2"; - Token a = stack.pop(); - Token b = stack.pop(); - stack.push(b); - stack.push(a); - stack.push(b); - stack.push(a); - break; - } - case 93: { - // TODO currently we don't know the stack types - op = "dup2_x1"; - Token a = stack.pop(); - Token b = stack.pop(); - Token c = stack.pop(); - stack.push(b); - stack.push(a); - stack.push(c); - stack.push(b); - stack.push(a); - break; - } - case 94: { - // TODO currently we don't know the stack types - op = "dup2_x2"; - Token a = stack.pop(); - Token b = stack.pop(); - Token c = stack.pop(); - Token d = stack.pop(); - stack.push(b); - stack.push(a); - stack.push(d); - stack.push(c); - stack.push(b); - stack.push(a); - break; - } - case 95: { - op = "swap"; - Token a = stack.pop(); - Token b = stack.pop(); - stack.push(a); - stack.push(b); - break; - } - case 96: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "iadd"; - stack.push(Operation.get(a, Operation.Type.ADD, b)); - break; - } - case 97: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "ladd"; - stack.push(Operation.get(a, Operation.Type.ADD, b)); - break; - } - case 98: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "fadd"; - stack.push(Operation.get(a, Operation.Type.ADD, b)); - break; - } - case 99: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "dadd"; - stack.push(Operation.get(a, Operation.Type.ADD, b)); - break; - } - case 100: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "isub"; - stack.push(Operation.get(a, Operation.Type.SUBTRACT, b)); - break; - } - case 101: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "lsub"; - stack.push(Operation.get(a, Operation.Type.SUBTRACT, b)); - break; - } - case 102: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "fsub"; - stack.push(Operation.get(a, Operation.Type.SUBTRACT, b)); - break; - } - case 103: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "dsub"; - stack.push(Operation.get(a, Operation.Type.SUBTRACT, b)); - break; - } - case 104: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "imul"; - stack.push(Operation.get(a, Operation.Type.MULTIPLY, b)); - break; - } - case 105: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "lmul"; - stack.push(Operation.get(a, Operation.Type.MULTIPLY, b)); - break; - } - case 106: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "fmul"; - stack.push(Operation.get(a, Operation.Type.MULTIPLY, b)); - break; - } - case 107: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "dmul"; - stack.push(Operation.get(a, Operation.Type.MULTIPLY, b)); - break; - } - case 108: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "idiv"; - stack.push(Operation.get(a, Operation.Type.DIVIDE, b)); - break; - } - case 109: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "ldiv"; - stack.push(Operation.get(a, Operation.Type.DIVIDE, b)); - break; - } - case 110: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "fdiv"; - stack.push(Operation.get(a, Operation.Type.DIVIDE, b)); - break; - } - case 111: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "ddiv"; - stack.push(Operation.get(a, Operation.Type.DIVIDE, b)); - break; - } - case 112: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "irem"; - stack.push(Operation.get(a, Operation.Type.MOD, b)); - break; - } - case 113: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "lrem"; - stack.push(Operation.get(a, Operation.Type.MOD, b)); - break; - } - case 114: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "frem"; - stack.push(Operation.get(a, Operation.Type.MOD, b)); - break; - } - case 115: { - Token b = stack.pop(); - Token a = stack.pop(); - op = "drem"; - stack.push(Operation.get(a, Operation.Type.MOD, b)); - break; - } -// case 116: -// op = "ineg"; -// break; -// case 117: -// op = "lneg"; -// break; -// case 118: -// op = "fneg"; -// break; -// case 119: -// op = "dneg"; -// break; -// case 120: -// op = "ishl"; -// break; -// case 121: -// op = "lshl"; -// break; -// case 122: -// op = "ishr"; -// break; -// case 123: -// op = "lshr"; -// break; -// case 124: -// op = "iushr"; -// break; -// case 125: -// op = "lushr"; -// break; -// case 126: -// op = "iand"; -// break; -// case 127: -// op = "land"; -// break; -// case 128: -// op = "ior"; -// break; -// case 129: -// op = "lor"; -// break; -// case 130: -// op = "ixor"; -// break; -// case 131: -// op = "lxor"; -// break; -// case 132: { -// int var = readByte(); -// int off = (byte) readByte(); -// op = "iinc " + var + " " + off; -// break; -// } -// case 133: -// op = "i2l"; -// break; -// case 134: -// op = "i2f"; -// break; -// case 135: -// op = "i2d"; -// break; -// case 136: -// op = "l2i"; -// break; -// case 137: -// op = "l2f"; -// break; -// case 138: -// op = "l2d"; -// break; -// case 139: -// op = "f2i"; -// break; -// case 140: -// op = "f2l"; -// break; -// case 141: -// op = "f2d"; -// break; -// case 142: -// op = "d2i"; -// break; -// case 143: -// op = "d2l"; -// break; -// case 144: -// op = "d2f"; -// break; -// case 145: -// op = "i2b"; -// break; -// case 146: -// op = "i2c"; -// break; -// case 147: -// op = "i2s"; -// break; - case 148: { - Token b = stack.pop(), a = stack.pop(); - stack.push(new Function("SIGN", Operation.get(a, - Operation.Type.SUBTRACT, b))); - op = "lcmp"; - break; - } -// case 149: -// op = "fcmpl"; -// break; -// case 150: -// op = "fcmpg"; -// break; -// case 151: -// op = "dcmpl"; -// break; -// case 152: -// op = "dcmpg"; -// break; - case 153: - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - stack.push(Operation.get(stack.pop(), Operation.Type.EQUALS, - ConstantNumber.get(0))); - op = "ifeq " + nextPc; - break; - case 154: - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - stack.push(Operation.get(stack.pop(), Operation.Type.NOT_EQUALS, - ConstantNumber.get(0))); - op = "ifne " + nextPc; - break; - case 155: - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - stack.push(Operation.get(stack.pop(), Operation.Type.SMALLER, - ConstantNumber.get(0))); - op = "iflt " + nextPc; - break; - case 156: - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - stack.push(Operation.get(stack.pop(), Operation.Type.BIGGER_EQUALS, - ConstantNumber.get(0))); - op = "ifge " + nextPc; - break; - case 157: - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - stack.push(Operation.get(stack.pop(), Operation.Type.BIGGER, - ConstantNumber.get(0))); - op = "ifgt " + nextPc; - break; - case 158: - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - stack.push(Operation.get(stack.pop(), - Operation.Type.SMALLER_EQUALS, ConstantNumber.get(0))); - op = "ifle " + nextPc; - break; - case 159: { - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - Token b = stack.pop(), a = stack.pop(); - stack.push(Operation.get(a, Operation.Type.EQUALS, b)); - op = "if_icmpeq " + nextPc; - break; - } - case 160: { - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - Token b = stack.pop(), a = stack.pop(); - stack.push(Operation.get(a, Operation.Type.NOT_EQUALS, b)); - op = "if_icmpne " + nextPc; - break; - } - case 161: { - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - Token b = stack.pop(), a = stack.pop(); - stack.push(Operation.get(a, Operation.Type.SMALLER, b)); - op = "if_icmplt " + nextPc; - break; - } - case 162: { - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - Token b = stack.pop(), a = stack.pop(); - stack.push(Operation.get(a, Operation.Type.BIGGER_EQUALS, b)); - op = "if_icmpge " + nextPc; - break; - } - case 163: { - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - Token b = stack.pop(), a = stack.pop(); - stack.push(Operation.get(a, Operation.Type.BIGGER, b)); - op = "if_icmpgt " + nextPc; - break; - } - case 164: { - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - Token b = stack.pop(), a = stack.pop(); - stack.push(Operation.get(a, Operation.Type.SMALLER_EQUALS, b)); - op = "if_icmple " + nextPc; - break; - } - case 165: { - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - Token b = stack.pop(), a = stack.pop(); - stack.push(Operation.get(a, Operation.Type.EQUALS, b)); - op = "if_acmpeq " + nextPc; - break; - } - case 166: { - condition = true; - nextPc = getAbsolutePos(pos, readShort()); - Token b = stack.pop(), a = stack.pop(); - stack.push(Operation.get(a, Operation.Type.NOT_EQUALS, b)); - op = "if_acmpne " + nextPc; - break; - } - case 167: - nextPc = getAbsolutePos(pos, readShort()); - op = "goto " + nextPc; - break; -// case 168: -// // TODO not supported yet -// op = "jsr " + getAbsolutePos(pos, readShort()); -// break; -// case 169: -// // TODO not supported yet -// op = "ret " + readByte(); -// break; -// case 170: { -// int start = pos; -// pos += 4 - ((pos - startByteCode) & 3); -// int def = readInt(); -// int low = readInt(), high = readInt(); -// int n = high - low + 1; -// op = "tableswitch default:" + getAbsolutePos(start, def); -// StringBuilder buff = new StringBuilder(); -// for (int i = 0; i < n; i++) { -// buff.append(' ').append(low++). -// append(":"). -// append(getAbsolutePos(start, readInt())); -// } -// op += buff.toString(); -// // pos += n * 4; -// break; -// } -// case 171: { -// int start = pos; -// pos += 4 - ((pos - startByteCode) & 3); -// int def = readInt(); -// int n = readInt(); -// op = "lookupswitch default:" + getAbsolutePos(start, def); -// StringBuilder buff = new StringBuilder(); -// for (int i = 0; i < n; i++) { -// buff.append(' '). -// append(readInt()). -// append(":"). -// append(getAbsolutePos(start, readInt())); -// } -// op += buff.toString(); -// // pos += n * 8; -// break; -// } - case 172: - op = "ireturn"; - endOfMethod = true; - break; - case 173: - op = "lreturn"; - endOfMethod = true; - break; - case 174: - op = "freturn"; - endOfMethod = true; - break; - case 175: - op = "dreturn"; - endOfMethod = true; - break; - case 176: - op = "areturn"; - endOfMethod = true; - break; - case 177: - op = "return"; - // no value returned - stack.push(null); - endOfMethod = true; - break; -// case 178: -// op = "getstatic " + getField(readShort()); -// break; -// case 179: -// op = "putstatic " + getField(readShort()); -// break; - case 180: { - String field = getField(readShort()); - Token p = stack.pop(); - String s = p - + "." - + field.substring(field.lastIndexOf('.') + 1, - field.indexOf(' ')); - if (s.startsWith("this.")) { - s = s.substring(5); - } - stack.push(Variable.get(s, fieldMap.get(s))); - op = "getfield " + field; - break; - } -// case 181: -// op = "putfield " + getField(readShort()); -// break; - case 182: { - String method = getMethod(readShort()); - op = "invokevirtual " + method; - if (method.equals("java/lang/String.equals (Ljava/lang/Object;)Z")) { - Token a = stack.pop(); - Token b = stack.pop(); - stack.push(Operation.get(a, Operation.Type.EQUALS, b)); - } else if (method.equals("java/lang/Integer.intValue ()I")) { - // ignore - } else if (method.equals("java/lang/Long.longValue ()J")) { - // ignore - } - break; - } - case 183: { - String method = getMethod(readShort()); - op = "invokespecial " + method; - break; - } - case 184: - op = "invokestatic " + getMethod(readShort()); - break; -// case 185: { -// int methodRef = readShort(); -// readByte(); -// readByte(); -// op = "invokeinterface " + getMethod(methodRef); -// break; -// } - case 187: { - String className = constantPool[constantPool[readShort()] - .intValue()].toString(); - op = "new " + className; - break; - } -// case 188: -// op = "newarray " + readByte(); -// break; -// case 189: -// op = "anewarray " + cpString[readShort()]; -// break; -// case 190: -// op = "arraylength"; -// break; -// case 191: -// op = "athrow"; -// break; -// case 192: -// op = "checkcast " + cpString[readShort()]; -// break; -// case 193: -// op = "instanceof " + cpString[readShort()]; -// break; -// case 194: -// op = "monitorenter"; -// break; -// case 195: -// op = "monitorexit"; -// break; -// case 196: { -// opCode = readByte(); -// switch (opCode) { -// case 21: -// op = "wide iload " + readShort(); -// break; -// case 22: -// op = "wide lload " + readShort(); -// break; -// case 23: -// op = "wide fload " + readShort(); -// break; -// case 24: -// op = "wide dload " + readShort(); -// break; -// case 25: -// op = "wide aload " + readShort(); -// break; -// case 54: -// op = "wide istore " + readShort(); -// break; -// case 55: -// op = "wide lstore " + readShort(); -// break; -// case 56: -// op = "wide fstore " + readShort(); -// break; -// case 57: -// op = "wide dstore " + readShort(); -// break; -// case 58: -// op = "wide astore " + readShort(); -// break; -// case 132: { -// int var = readShort(); -// int off = (short) readShort(); -// op = "wide iinc " + var + " " + off; -// break; -// } -// case 169: -// op = "wide ret " + readShort(); -// break; -// default: -// throw new RuntimeException( -// "Unsupported wide opCode " + opCode); -// } -// break; -// } -// case 197: -// op = "multianewarray " + cpString[readShort()] + " " + readByte(); -// break; -// case 198: { -// condition = true; -// nextPc = getAbsolutePos(pos, readShort()); -// Token a = stack.pop(); -// stack.push("(" + a + " IS NULL)"); -// op = "ifnull " + nextPc; -// break; -// } -// case 199: { -// condition = true; -// nextPc = getAbsolutePos(pos, readShort()); -// Token a = stack.pop(); -// stack.push("(" + a + " IS NOT NULL)"); -// op = "ifnonnull " + nextPc; -// break; -// } - case 200: - op = "goto_w " + getAbsolutePos(pos, readInt()); - break; - case 201: - op = "jsr_w " + getAbsolutePos(pos, readInt()); - break; - default: - throw new RuntimeException("Unsupported opCode " + opCode); - } - debug(" " + startPos + ": " + op); - } - - private void setVariable(int x, Token value) { - while (x >= variables.size()) { - variables.add(Variable.get("p" + variables.size(), null)); - } - variables.set(x, value); - } - - private Token getVariable(int x) { - if (x == 0) { - return Variable.THIS; - } - while (x >= variables.size()) { - variables.add(Variable.get("p" + variables.size(), null)); - } - return variables.get(x); - } - - private String getField(int fieldRef) { - int field = constantPool[fieldRef].intValue(); - int classIndex = field >>> 16; - int nameAndType = constantPool[field & 0xffff].intValue(); - String className = constantPool[constantPool[classIndex].intValue()] - + "." + constantPool[nameAndType >>> 16] + " " - + constantPool[nameAndType & 0xffff]; - return className; - } - - private String getMethod(int methodRef) { - int method = constantPool[methodRef].intValue(); - int classIndex = method >>> 16; - int nameAndType = constantPool[method & 0xffff].intValue(); - String className = constantPool[constantPool[classIndex].intValue()] - + "." + constantPool[nameAndType >>> 16] + " " - + constantPool[nameAndType & 0xffff]; - return className; - } - - private Constant getConstant(int constantRef) { - Constant c = constantPool[constantRef]; - switch (c.getType()) { - case INT: - case FLOAT: - case DOUBLE: - case LONG: - return c; - case STRING_REF: - return constantPool[c.intValue()]; - default: - throw new RuntimeException("Not a constant: " + constantRef); - } - } - - private String readString() { - int size = readShort(); - byte[] buff = data; - int p = pos, end = p + size; - char[] chars = new char[size]; - int j = 0; - for (; p < end; j++) { - int x = buff[p++] & 0xff; - if (x < 0x80) { - chars[j] = (char) x; - } else if (x >= 0xe0) { - chars[j] = (char) (((x & 0xf) << 12) - + ((buff[p++] & 0x3f) << 6) + (buff[p++] & 0x3f)); - } else { - chars[j] = (char) (((x & 0x1f) << 6) + (buff[p++] & 0x3f)); - } - } - pos = p; - return new String(chars, 0, j); - } - - private int getAbsolutePos(int start, int offset) { - return start - startByteCode - 1 + (short) offset; - } - - private int readByte() { - return data[pos++] & 0xff; - } - - private int readShort() { - byte[] buff = data; - return ((buff[pos++] & 0xff) << 8) + (buff[pos++] & 0xff); - } - - private int readInt() { - byte[] buff = data; - return (buff[pos++] << 24) + ((buff[pos++] & 0xff) << 16) - + ((buff[pos++] & 0xff) << 8) + (buff[pos++] & 0xff); - } - - private long readLong() { - return ((long) (readInt()) << 32) + (readInt() & 0xffffffffL); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/Constant.java b/h2/src/tools/org/h2/jaqu/bytecode/Constant.java deleted file mode 100644 index 4631d89af8..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/Constant.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Token; - -/** - * An expression in the constant pool. - */ -public interface Constant extends Token { - - /** - * The constant pool type. - */ - enum Type { - STRING, - INT, - FLOAT, - DOUBLE, - LONG, - CLASS_REF, - STRING_REF, - FIELD_REF, - METHOD_REF, - INTERFACE_METHOD_REF, - NAME_AND_TYPE - } - - Constant.Type getType(); - - int intValue(); - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/ConstantNumber.java b/h2/src/tools/org/h2/jaqu/bytecode/ConstantNumber.java deleted file mode 100644 index fe02d4f399..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/ConstantNumber.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; - -/** - * A literal number. - */ -public class ConstantNumber implements Constant { - - private final String value; - private final Type type; - private final long longValue; - - private ConstantNumber(String value, long longValue, Type type) { - this.value = value; - this.longValue = longValue; - this.type = type; - } - - static ConstantNumber get(String v) { - return new ConstantNumber(v, 0, Type.STRING); - } - - static ConstantNumber get(int v) { - return new ConstantNumber("" + v, v, Type.INT); - } - - static ConstantNumber get(long v) { - return new ConstantNumber("" + v, v, Type.LONG); - } - - static ConstantNumber get(String s, long x, Type type) { - return new ConstantNumber(s, x, type); - } - - @Override - public int intValue() { - return (int) longValue; - } - - @Override - public String toString() { - return value; - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL(toString()); - } - - @Override - public Constant.Type getType() { - return type; - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/ConstantString.java b/h2/src/tools/org/h2/jaqu/bytecode/ConstantString.java deleted file mode 100644 index 8aaab282be..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/ConstantString.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; -import org.h2.util.StringUtils; - -/** - * A string constant. - */ -public class ConstantString implements Constant { - - private final String value; - - private ConstantString(String value) { - this.value = value; - } - - static ConstantString get(String v) { - return new ConstantString(v); - } - - @Override - public String toString() { - return value; - } - - @Override - public int intValue() { - return 0; - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - stat.appendSQL(StringUtils.quoteStringSQL(value)); - } - - @Override - public Constant.Type getType() { - return Constant.Type.STRING; - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/Function.java b/h2/src/tools/org/h2/jaqu/bytecode/Function.java deleted file mode 100644 index 5edeb8df23..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/Function.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; -import org.h2.jaqu.Token; - -/** - * A method call. - */ -class Function implements Token { - - private final String name; - private final Token expr; - - Function(String name, Token expr) { - this.name = name; - this.expr = expr; - } - - @Override - public String toString() { - return name + "(" + expr + ")"; - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - // untested - stat.appendSQL(name + "("); - expr.appendSQL(stat, query); - stat.appendSQL(")"); - } -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/Not.java b/h2/src/tools/org/h2/jaqu/bytecode/Not.java deleted file mode 100644 index 97c13dc102..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/Not.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; -import org.h2.jaqu.Token; - -/** - * A NOT condition. - */ -public class Not implements Token { - - private final Token expr; - - private Not(Token expr) { - this.expr = expr; - } - - static Token get(Token expr) { - if (expr instanceof Not) { - return ((Not) expr).expr; - } else if (expr instanceof Operation) { - return ((Operation) expr).reverse(); - } - return new Not(expr); - } - - Token not() { - return expr; - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - // untested - stat.appendSQL("NOT("); - expr.appendSQL(stat, query); - stat.appendSQL(")"); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/Null.java b/h2/src/tools/org/h2/jaqu/bytecode/Null.java deleted file mode 100644 index e7b8f8898c..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/Null.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; -import org.h2.jaqu.Token; - -/** - * The Java 'null'. - */ -public class Null implements Token { - - static final Null INSTANCE = new Null(); - - private Null() { - // don't allow to create new instances - } - - @Override - public String toString() { - return "null"; - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - // untested - stat.appendSQL("NULL"); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/Operation.java b/h2/src/tools/org/h2/jaqu/bytecode/Operation.java deleted file mode 100644 index bbf72802a6..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/Operation.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; -import org.h2.jaqu.Token; - -/** - * A mathematical or comparison operation. - */ -class Operation implements Token { - - /** - * The operation type. - */ - enum Type { - EQUALS("=") { - @Override - Type reverse() { - return NOT_EQUALS; - } - }, - NOT_EQUALS("<>") { - @Override - Type reverse() { - return EQUALS; - } - }, - BIGGER(">") { - @Override - Type reverse() { - return SMALLER_EQUALS; - } - }, - BIGGER_EQUALS(">=") { - @Override - Type reverse() { - return SMALLER; - } - }, - SMALLER_EQUALS("<=") { - @Override - Type reverse() { - return BIGGER; - } - }, - SMALLER("<") { - @Override - Type reverse() { - return BIGGER_EQUALS; - } - }, - ADD("+"), - SUBTRACT("-"), - MULTIPLY("*"), - DIVIDE("/"), - MOD("%"); - - private final String name; - - Type(String name) { - this.name = name; - } - - @Override - public String toString() { - return name; - } - - Type reverse() { - return null; - } - - } - - private final Token left, right; - private final Type op; - - private Operation(Token left, Type op, Token right) { - this.left = left; - this.op = op; - this.right = right; - } - - static Token get(Token left, Type op, Token right) { - if (op == Type.NOT_EQUALS && "0".equals(right.toString())) { - return left; - } - return new Operation(left, op, right); - } - - @Override - public String toString() { - return left + " " + op + " " + right; - } - - public Token reverse() { - return get(left, op.reverse(), right); - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - left.appendSQL(stat, query); - stat.appendSQL(op.toString()); - right.appendSQL(stat, query); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/Or.java b/h2/src/tools/org/h2/jaqu/bytecode/Or.java deleted file mode 100644 index 8d7c2c95dd..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/Or.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; -import org.h2.jaqu.Token; - -/** - * An OR expression. - */ -public class Or implements Token { - - private final Token left, right; - - private Or(Token left, Token right) { - this.left = left; - this.right = right; - } - - static Or get(Token left, Token right) { - return new Or(left, right); - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - // untested - left.appendSQL(stat, query); - stat.appendSQL(" OR "); - right.appendSQL(stat, query); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/Variable.java b/h2/src/tools/org/h2/jaqu/bytecode/Variable.java deleted file mode 100644 index a1ceb9ad1e..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/Variable.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.bytecode; - -import org.h2.jaqu.Query; -import org.h2.jaqu.SQLStatement; -import org.h2.jaqu.Token; - -/** - * A variable. - */ -public class Variable implements Token { - - static final Variable THIS = new Variable("this", null); - - private final String name; - private final Object obj; - - private Variable(String name, Object obj) { - this.name = name; - this.obj = obj; - } - - static Variable get(String name, Object obj) { - return new Variable(name, obj); - } - - @Override - public String toString() { - return name; - } - - @Override - public void appendSQL(SQLStatement stat, Query query) { - query.appendSQL(stat, obj); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/bytecode/package.html b/h2/src/tools/org/h2/jaqu/bytecode/package.html deleted file mode 100644 index d9add8e470..0000000000 --- a/h2/src/tools/org/h2/jaqu/bytecode/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -The decompiler for the JaQu (Java Query) tool. - -

    \ No newline at end of file diff --git a/h2/src/tools/org/h2/jaqu/package.html b/h2/src/tools/org/h2/jaqu/package.html deleted file mode 100644 index 96da52bafb..0000000000 --- a/h2/src/tools/org/h2/jaqu/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -The implementation of the JaQu (Java Query) tool. - -

    \ No newline at end of file diff --git a/h2/src/tools/org/h2/jaqu/util/ClassUtils.java b/h2/src/tools/org/h2/jaqu/util/ClassUtils.java deleted file mode 100644 index 46c4e8064b..0000000000 --- a/h2/src/tools/org/h2/jaqu/util/ClassUtils.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.util; - -import java.io.Reader; -import java.lang.reflect.Constructor; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.sql.Clob; -import java.util.ArrayList; -import java.util.IdentityHashMap; -import java.util.List; -import java.util.concurrent.atomic.AtomicLong; -import org.h2.util.IOUtils; - -/** - * Generic utility methods. - */ -public class ClassUtils { - - private static final AtomicLong COUNTER = new AtomicLong(0); - - private static final boolean MAKE_ACCESSIBLE = true; - - public static IdentityHashMap newIdentityHashMap() { - return new IdentityHashMap(); - } - - @SuppressWarnings("unchecked") - public static T newObject(Class clazz) { - // must create new instances, cannot use methods like Boolean.FALSE, - // since the caller relies on this creating unique objects - if (clazz == Integer.class) { - return (T) new Integer((int) COUNTER.getAndIncrement()); - } else if (clazz == String.class) { - return (T) ("" + COUNTER.getAndIncrement()); - } else if (clazz == Long.class) { - return (T) new Long(COUNTER.getAndIncrement()); - } else if (clazz == Short.class) { - return (T) new Short((short) COUNTER.getAndIncrement()); - } else if (clazz == Byte.class) { - return (T) new Byte((byte) COUNTER.getAndIncrement()); - } else if (clazz == Float.class) { - return (T) new Float(COUNTER.getAndIncrement()); - } else if (clazz == Double.class) { - return (T) new Double(COUNTER.getAndIncrement()); - } else if (clazz == Boolean.class) { - return (T) new Boolean(false); - } else if (clazz == BigDecimal.class) { - return (T) new BigDecimal(COUNTER.getAndIncrement()); - } else if (clazz == BigInteger.class) { - return (T) new BigInteger("" + COUNTER.getAndIncrement()); - } else if (clazz == java.sql.Date.class) { - return (T) new java.sql.Date(COUNTER.getAndIncrement()); - } else if (clazz == java.sql.Time.class) { - return (T) new java.sql.Time(COUNTER.getAndIncrement()); - } else if (clazz == java.sql.Timestamp.class) { - return (T) new java.sql.Timestamp(COUNTER.getAndIncrement()); - } else if (clazz == java.util.Date.class) { - return (T) new java.util.Date(COUNTER.getAndIncrement()); - } else if (clazz == List.class) { - return (T) new ArrayList(); - } - try { - return clazz.newInstance(); - } catch (Exception e) { - if (MAKE_ACCESSIBLE) { - Constructor[] constructors = clazz.getDeclaredConstructors(); - // try 0 length constructors - for (Constructor c : constructors) { - if (c.getParameterTypes().length == 0) { - c.setAccessible(true); - try { - return clazz.newInstance(); - } catch (Exception e2) { - // ignore - } - } - } - // try 1 length constructors - for (Constructor c : constructors) { - if (c.getParameterTypes().length == 1) { - c.setAccessible(true); - try { - return (T) c.newInstance(new Object[1]); - } catch (Exception e2) { - // ignore - } - } - } - } - throw new RuntimeException("Exception trying to create " - + clazz.getName() + ": " + e, e); - } - } - - public static boolean isSimpleType(Class clazz) { - if (Number.class.isAssignableFrom(clazz)) { - return true; - } else if (clazz == String.class) { - return true; - } - return false; - } - - public static Object convert(Object o, Class targetType) { - if (o == null) { - return null; - } - Class currentType = o.getClass(); - if (targetType.isAssignableFrom(currentType)) { - return o; - } - if (targetType == String.class) { - if (Clob.class.isAssignableFrom(currentType)) { - Clob c = (Clob) o; - try { - Reader r = c.getCharacterStream(); - return IOUtils.readStringAndClose(r, -1); - } catch (Exception e) { - throw new RuntimeException( - "Error converting CLOB to String: " + e.toString(), - e); - } - } - return o.toString(); - } - if (Number.class.isAssignableFrom(currentType)) { - Number n = (Number) o; - if (targetType == Byte.class) { - return n.byteValue(); - } else if (targetType == Short.class) { - return n.shortValue(); - } else if (targetType == Integer.class) { - return n.intValue(); - } else if (targetType == Long.class) { - return n.longValue(); - } else if (targetType == Double.class) { - return n.doubleValue(); - } else if (targetType == Float.class) { - return n.floatValue(); - } - } - throw new RuntimeException("Can not convert the value " + o + " from " - + currentType + " to " + targetType); - } - - @SuppressWarnings("unchecked") - public static Class getClass(X x) { - return (Class) x.getClass(); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/util/GenerateModels.java b/h2/src/tools/org/h2/jaqu/util/GenerateModels.java deleted file mode 100644 index 9bb9739fcc..0000000000 --- a/h2/src/tools/org/h2/jaqu/util/GenerateModels.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu.util; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.io.PrintStream; -import java.io.PrintWriter; -import java.io.Writer; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import org.h2.jaqu.Db; -import org.h2.jaqu.DbInspector; -import org.h2.message.DbException; -import org.h2.util.JdbcUtils; -import org.h2.util.StringUtils; - -/** - * Generates JaQu models. - */ -public class GenerateModels { - - private static final int todoReview = 0; - - /** - * The output stream where this tool writes to. - */ - protected final PrintStream out = System.out; - - public static void main(String... args) throws SQLException { - new GenerateModels().runTool(args); - } - - public void runTool(String... args) throws SQLException { - String url = null; - String user = "sa"; - String password = ""; - String schema = null; - String table = null; - String packageName = ""; - String folder = null; - boolean annotateSchema = true; - boolean trimStrings = false; - for (int i = 0; args != null && i < args.length; i++) { - String arg = args[i]; - if (arg.equals("-url")) { - url = args[++i]; - } else if (arg.equals("-user")) { - user = args[++i]; - } else if (arg.equals("-password")) { - password = args[++i]; - } else if (arg.equals("-schema")) { - schema = args[++i]; - } else if (arg.equals("-table")) { - table = args[++i]; - } else if (arg.equals("-package")) { - packageName = args[++i]; - } else if (arg.equals("-folder")) { - folder = args[++i]; - } else if (arg.equals("-annotateSchema")) { - try { - annotateSchema = Boolean.parseBoolean(args[++i]); - } catch (Throwable t) { - throw new SQLException( - "Can not parse -annotateSchema value"); - } - } else if (arg.equals("-trimStrings")) { - try { - trimStrings = Boolean.parseBoolean(args[++i]); - } catch (Throwable t) { - throw new SQLException("Can not parse -trimStrings value"); - } - } else { - throwUnsupportedOption(arg); - } - } - if (url == null) { - throw new SQLException("URL not set"); - } - execute(url, user, password, schema, table, packageName, folder, - annotateSchema, trimStrings); - } - - /** - * Generates models from the database. - * - * @param url the database URL - * @param user the user name - * @param password the password - * @param schema the schema to read from. null for all schemas. - * @param table the table to model. null for all tables within schema. - * @param packageName the package name of the model classes. - * @param folder destination folder for model classes (package path not - * included) - * @param annotateSchema includes the schema in the table model annotations - * @param trimStrings automatically trim strings that exceed maxLength - */ - public static void execute(String url, String user, String password, - String schema, String table, String packageName, String folder, - boolean annotateSchema, boolean trimStrings) throws SQLException { - Connection conn = null; - try { - org.h2.Driver.load(); - conn = DriverManager.getConnection(url, user, password); - Db db = Db.open(url, user, password.toCharArray()); - DbInspector inspector = new DbInspector(db); - List models = inspector.generateModel(schema, table, - packageName, annotateSchema, trimStrings); - File parentFile; - if (StringUtils.isNullOrEmpty(folder)) { - parentFile = new File(System.getProperty("user.dir")); - } else { - parentFile = new File(folder); - } - parentFile.mkdirs(); - Pattern p = Pattern.compile("class ([a-zA-Z0-9]+)"); - for (String model : models) { - Matcher m = p.matcher(model); - if (m.find()) { - String className = m.group().substring("class".length()) - .trim(); - File classFile = new File(parentFile, className + ".java"); - Writer o = new FileWriter(classFile, false); - PrintWriter writer = new PrintWriter(new BufferedWriter(o)); - writer.write(model); - writer.close(); - System.out.println("Generated " - + classFile.getAbsolutePath()); - } - } - } catch (IOException io) { - throw DbException - .convertIOException(io, "could not generate model") - .getSQLException(); - } finally { - JdbcUtils.closeSilently(conn); - } - } - - /** - * Throw a SQLException saying this command line option is not supported. - * - * @param option the unsupported option - * @return this method never returns normally - */ - protected SQLException throwUnsupportedOption(String option) - throws SQLException { - showUsage(); - throw new SQLException("Unsupported option: " + option); - } - - protected void showUsage() { - out.println("GenerateModels"); - out.println("Usage: java " + getClass().getName()); - out.println(); - out.println("(*) -url jdbc:h2:~test"); - out.println(" -user "); - out.println(" -password "); - out.println(" -schema "); - out.println(" -table "); - out.println(" -package "); - out.println(" -folder "); - out.println(" -annotateSchema "); - out.println(" -trimStrings "); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/util/Message.java b/h2/src/tools/org/h2/jaqu/util/Message.java deleted file mode 100644 index 5934ce550c..0000000000 --- a/h2/src/tools/org/h2/jaqu/util/Message.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.util; - -import java.io.IOException; -import java.lang.reflect.InvocationTargetException; -import java.sql.SQLException; - -/** - * Messages used in the database engine. Use the PropertiesToUTF8 tool to - * translate properties files to UTF-8 and back. - * If the word 'SQL' appears then the whole SQL statement must be a parameter, - * otherwise this may be added: '; SQL statement: ' + sql - */ -public class Message { - - private int todoDelete; - - private Message() { - // utility class - } - - /** - * Convert an exception to a SQL exception using the default mapping. - * - * @param e the root cause - * @return the SQL exception object - */ - public static SQLException convert(Throwable e) { - if (e instanceof SQLException) { - return (SQLException) e; - } - String message; - if (e instanceof InvocationTargetException) { - InvocationTargetException te = (InvocationTargetException) e; - Throwable t = te.getTargetException(); - if (t instanceof SQLException) { - return (SQLException) t; - } - message = "Invocation exception"; - } else if (e instanceof IOException) { - message = "IO exception"; - } else { - message = "General exception"; - } - return new SQLException(message + ": " + e.toString(), e); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/util/StatementLogger.java b/h2/src/tools/org/h2/jaqu/util/StatementLogger.java deleted file mode 100644 index faa1eb6a6b..0000000000 --- a/h2/src/tools/org/h2/jaqu/util/StatementLogger.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: James Moger - */ -package org.h2.jaqu.util; - -import java.io.PrintWriter; -import java.text.DecimalFormat; -import java.util.concurrent.atomic.AtomicLong; - -/** - * Utility class to optionally log generated statements to an output stream.
    - * Default output stream is System.out.
    - * Statement logging is disabled by default. - *

    - * This class also tracks the counts for generated statements by major type. - * - */ -public class StatementLogger { - - public static boolean logStatements; - private static final PrintWriter OUT = new PrintWriter(System.out); - private static final AtomicLong SELECT_COUNT = new AtomicLong(); - private static final AtomicLong CREATE_COUNT = new AtomicLong(); - private static final AtomicLong INSERT_COUNT = new AtomicLong(); - private static final AtomicLong UPDATE_COUNT = new AtomicLong(); - private static final AtomicLong MERGE_COUNT = new AtomicLong(); - private static final AtomicLong DELETE_COUNT = new AtomicLong(); - - public static void create(String statement) { - CREATE_COUNT.incrementAndGet(); - log(statement); - } - - public static void insert(String statement) { - INSERT_COUNT.incrementAndGet(); - log(statement); - } - - public static void update(String statement) { - UPDATE_COUNT.incrementAndGet(); - log(statement); - } - - public static void merge(String statement) { - MERGE_COUNT.incrementAndGet(); - log(statement); - } - - public static void delete(String statement) { - DELETE_COUNT.incrementAndGet(); - log(statement); - } - - public static void select(String statement) { - SELECT_COUNT.incrementAndGet(); - log(statement); - } - - private static void log(String statement) { - if (logStatements) { - OUT.println(statement); - } - } - - public static void printStats() { - OUT.println("JaQu Runtime Statistics"); - OUT.println("======================="); - printStat("CREATE", CREATE_COUNT); - printStat("INSERT", INSERT_COUNT); - printStat("UPDATE", UPDATE_COUNT); - printStat("MERGE", MERGE_COUNT); - printStat("DELETE", DELETE_COUNT); - printStat("SELECT", SELECT_COUNT); - } - - private static void printStat(String name, AtomicLong value) { - if (value.get() > 0) { - DecimalFormat df = new DecimalFormat("###,###,###,###"); - OUT.println(name + "=" + df.format(CREATE_COUNT.get())); - } - } - -} \ No newline at end of file diff --git a/h2/src/tools/org/h2/jaqu/util/WeakIdentityHashMap.java b/h2/src/tools/org/h2/jaqu/util/WeakIdentityHashMap.java deleted file mode 100644 index 9cfae2fdf0..0000000000 --- a/h2/src/tools/org/h2/jaqu/util/WeakIdentityHashMap.java +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: H2 Group - */ -package org.h2.jaqu.util; - -import java.lang.ref.WeakReference; -import java.util.Collection; -import java.util.Map; -import java.util.Set; - -/** - * This hash map uses weak references, so that elements that are no longer - * referenced elsewhere can be garbage collected. It also uses object identity - * to compare keys. The garbage collection happens when trying to add new data, - * or when resizing. - * - * @param the keys - * @param the value - */ -public class WeakIdentityHashMap implements Map { - - private static final int MAX_LOAD = 90; - private static final WeakReference DELETED_KEY = - new WeakReference(null); - private int mask, len, size, deletedCount, level; - private int maxSize, minSize, maxDeleted; - private WeakReference[] keys; - private V[] values; - - public WeakIdentityHashMap() { - reset(2); - } - - @Override - public int size() { - return size; - } - - private void checkSizePut() { - if (deletedCount > size) { - rehash(level); - } - if (size + deletedCount >= maxSize) { - rehash(level + 1); - } - } - - private void checkSizeRemove() { - if (size < minSize && level > 0) { - rehash(level - 1); - } else if (deletedCount > maxDeleted) { - rehash(level); - } - } - - private int getIndex(Object key) { - return System.identityHashCode(key) & mask; - } - - @SuppressWarnings("unchecked") - private void reset(int newLevel) { - minSize = size * 3 / 4; - size = 0; - level = newLevel; - len = 2 << level; - mask = len - 1; - maxSize = (int) (len * MAX_LOAD / 100L); - deletedCount = 0; - maxDeleted = 20 + len / 2; - keys = new WeakReference[len]; - values = (V[]) new Object[len]; - } - - @Override - public V put(K key, V value) { - checkSizePut(); - int index = getIndex(key); - int plus = 1; - int deleted = -1; - do { - WeakReference k = keys[index]; - if (k == null) { - // found an empty record - if (deleted >= 0) { - index = deleted; - deletedCount--; - } - size++; - keys[index] = new WeakReference(key); - values[index] = value; - return null; - } else if (k == DELETED_KEY) { - if (deleted < 0) { - // found the first deleted record - deleted = index; - } - } else { - Object r = k.get(); - if (r == null) { - delete(index); - } else if (r == key) { - // update existing - V old = values[index]; - values[index] = value; - return old; - } - } - index = (index + plus++) & mask; - } while(plus <= len); - throw new RuntimeException("Hashmap is full"); - } - - @Override - public V remove(Object key) { - checkSizeRemove(); - int index = getIndex(key); - int plus = 1; - do { - WeakReference k = keys[index]; - if (k == null) { - // found an empty record - return null; - } else if (k == DELETED_KEY) { - // continue - } else { - Object r = k.get(); - if (r == null) { - delete(index); - } else if (r == key) { - // found the record - V old = values[index]; - delete(index); - return old; - } - } - index = (index + plus++) & mask; - k = keys[index]; - } while(plus <= len); - // not found - return null; - } - - @SuppressWarnings("unchecked") - private void delete(int index) { - keys[index] = (WeakReference) DELETED_KEY; - values[index] = null; - deletedCount++; - size--; - } - - private void rehash(int newLevel) { - WeakReference[] oldKeys = keys; - V[] oldValues = values; - reset(newLevel); - for (int i = 0; i < oldKeys.length; i++) { - WeakReference k = oldKeys[i]; - if (k != null && k != DELETED_KEY) { - K key = k.get(); - if (key != null) { - put(key, oldValues[i]); - } - } - } - } - - @Override - public V get(Object key) { - int index = getIndex(key); - int plus = 1; - do { - WeakReference k = keys[index]; - if (k == null) { - return null; - } else if (k == DELETED_KEY) { - // continue - } else { - Object r = k.get(); - if (r == null) { - delete(index); - } else if (r == key) { - return values[index]; - } - } - index = (index + plus++) & mask; - } while(plus <= len); - return null; - } - - @Override - public void clear() { - reset(2); - } - - @Override - public boolean containsKey(Object key) { - return get(key) != null; - } - - @Override - public boolean containsValue(Object value) { - if (value == null) { - return false; - } - for (V item: values) { - if (value.equals(item)) { - return true; - } - } - return false; - } - - @Override - public Set> entrySet() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean isEmpty() { - return size == 0; - } - - @Override - public Set keySet() { - throw new UnsupportedOperationException(); - } - - @Override - public void putAll(Map m) { - throw new UnsupportedOperationException(); - } - - @Override - public Collection values() { - throw new UnsupportedOperationException(); - } - -} diff --git a/h2/src/tools/org/h2/jaqu/util/package.html b/h2/src/tools/org/h2/jaqu/util/package.html deleted file mode 100644 index 6465ec919a..0000000000 --- a/h2/src/tools/org/h2/jaqu/util/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -Utility classes used for the JaQu (Java Query) tool. - -

    \ No newline at end of file diff --git a/h2/src/tools/org/h2/java/ClassObj.java b/h2/src/tools/org/h2/java/ClassObj.java index 6f1a10da55..88a84beb2d 100644 --- a/h2/src/tools/org/h2/java/ClassObj.java +++ b/h2/src/tools/org/h2/java/ClassObj.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; @@ -21,7 +21,7 @@ public class ClassObj { /** * The list of interfaces that this class implements. */ - ArrayList interfaceNames = new ArrayList(); + ArrayList interfaceNames = new ArrayList<>(); /** @@ -52,30 +52,30 @@ public class ClassObj { /** * The imported classes. */ - ArrayList imports = new ArrayList(); + ArrayList imports = new ArrayList<>(); /** * The per-instance fields. */ LinkedHashMap instanceFields = - new LinkedHashMap(); + new LinkedHashMap<>(); /** * The static fields of this class. */ LinkedHashMap staticFields = - new LinkedHashMap(); + new LinkedHashMap<>(); /** * The methods. */ LinkedHashMap> methods = - new LinkedHashMap>(); + new LinkedHashMap<>(); /** * The list of native statements. */ - ArrayList nativeCode = new ArrayList(); + ArrayList nativeCode = new ArrayList<>(); /** * The class number. @@ -100,7 +100,7 @@ public class ClassObj { void addMethod(MethodObj method) { ArrayList list = methods.get(method.name); if (list == null) { - list = new ArrayList(); + list = new ArrayList<>(); methods.put(method.name, list); } else { // for overloaded methods @@ -246,7 +246,7 @@ class MethodObj { * The parameter list. */ LinkedHashMap parameters = - new LinkedHashMap(); + new LinkedHashMap<>(); /** * Whether this method is final. diff --git a/h2/src/tools/org/h2/java/Expr.java b/h2/src/tools/org/h2/java/Expr.java index cb71081881..ed72d184bd 100644 --- a/h2/src/tools/org/h2/java/Expr.java +++ b/h2/src/tools/org/h2/java/Expr.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; @@ -43,7 +43,7 @@ class CallExpr extends ExprBase { /** * The parameters. */ - final ArrayList args = new ArrayList(); + final ArrayList args = new ArrayList<>(); private final JavaParser context; private final String className; @@ -80,7 +80,7 @@ public String asString() { StringBuilder buff = new StringBuilder(); initMethod(); if (method.isIgnore) { - if (args.size() == 0) { + if (args.isEmpty()) { // ignore } else if (args.size() == 1) { buff.append(args.get(0)); @@ -294,7 +294,7 @@ public String asString() { return "(((u" + left.getType() + ") " + left + ") >> " + right + ")"; } else if (op.equals("+")) { if (left.getType().isObject() || right.getType().isObject()) { - // TODO convert primitive to to String, call toString + // TODO convert primitive to String, call toString StringBuilder buff = new StringBuilder(); if (type.refCount) { buff.append("ptr(new java_lang_StringBuilder("); @@ -378,12 +378,12 @@ class NewExpr extends ExprBase { /** * The constructor parameters (for objects). */ - final ArrayList args = new ArrayList(); + final ArrayList args = new ArrayList<>(); /** * The array bounds (for arrays). */ - final ArrayList arrayInitExpr = new ArrayList(); + final ArrayList arrayInitExpr = new ArrayList<>(); /** * The type. @@ -394,7 +394,7 @@ class NewExpr extends ExprBase { public String asString() { boolean refCount = type.refCount; StringBuilder buff = new StringBuilder(); - if (arrayInitExpr.size() > 0) { + if (!arrayInitExpr.isEmpty()) { if (refCount) { if (classObj.isPrimitive) { buff.append("ptr< array< " + classObj + " > >"); @@ -630,7 +630,7 @@ class ArrayInitExpr extends ExprBase { /** * The expression list. */ - final ArrayList list = new ArrayList(); + final ArrayList list = new ArrayList<>(); /** * The type. diff --git a/h2/src/tools/org/h2/java/Ignore.java b/h2/src/tools/org/h2/java/Ignore.java index ef893c529f..1ed8d3708f 100644 --- a/h2/src/tools/org/h2/java/Ignore.java +++ b/h2/src/tools/org/h2/java/Ignore.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/JavaParser.java b/h2/src/tools/org/h2/java/JavaParser.java index 4727416fc3..9eadb1ddae 100644 --- a/h2/src/tools/org/h2/java/JavaParser.java +++ b/h2/src/tools/org/h2/java/JavaParser.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; @@ -8,13 +8,13 @@ import java.io.IOException; import java.io.PrintWriter; import java.io.RandomAccessFile; +import java.nio.charset.StandardCharsets; import java.text.ParseException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; -import org.h2.util.New; /** * Converts Java to C. @@ -31,8 +31,7 @@ public class JavaParser { */ public static final boolean REF_COUNT_STATIC = false; - private static final HashMap BUILT_IN_CLASSES = New - .hashMap(); + private static final HashMap BUILT_IN_CLASSES = new HashMap<>(); private static final int TOKEN_LITERAL_CHAR = 0; private static final int TOKEN_LITERAL_STRING = 1; @@ -41,11 +40,10 @@ public class JavaParser { private static final int TOKEN_IDENTIFIER = 4; private static final int TOKEN_OTHER = 5; - private static final HashSet RESERVED = New.hashSet(); - private static final HashMap JAVA_IMPORT_MAP = New - .hashMap(); + private static final HashSet RESERVED = new HashSet<>(); + private static final HashMap JAVA_IMPORT_MAP = new HashMap<>(); - private final ArrayList allClasses = New.arrayList(); + private final ArrayList allClasses = new ArrayList<>(); private String source; @@ -56,16 +54,14 @@ public class JavaParser { private int nextClassId; private MethodObj method; private FieldObj thisPointer; - private final HashMap importMap = New.hashMap(); - private final HashMap classes = New.hashMap(); + private final HashMap importMap = new HashMap<>(); + private final HashMap classes = new HashMap<>(); private final LinkedHashMap localVars = - new LinkedHashMap(); - private final HashMap allMethodsMap = New.hashMap(); - private final ArrayList nativeHeaders = New.arrayList(); - private final HashMap stringToStringConstantMap = New - .hashMap(); - private final HashMap stringConstantToStringMap = New - .hashMap(); + new LinkedHashMap<>(); + private final HashMap allMethodsMap = new HashMap<>(); + private final ArrayList nativeHeaders = new ArrayList<>(); + private final HashMap stringToStringConstantMap = new HashMap<>(); + private final HashMap stringConstantToStringMap = new HashMap<>(); public JavaParser() { addBuiltInTypes(); @@ -166,7 +162,7 @@ void parse(String baseDir, String className) { RandomAccessFile file = new RandomAccessFile(fileName, "r"); byte[] buff = new byte[(int) file.length()]; file.readFully(buff); - source = new String(buff, "UTF-8"); + source = new String(buff, StandardCharsets.UTF_8); file.close(); } catch (IOException e) { throw new RuntimeException(e); @@ -317,7 +313,7 @@ private void parseClassBody() { classObj.nativeCode.add(s); } thisPointer = null; - HashSet annotations = New.hashSet(); + HashSet annotations = new HashSet<>(); while (readIf("@")) { String annotation = readIdentifier(); annotations.add(annotation); @@ -1687,8 +1683,7 @@ void writeHeader(PrintWriter out) { } out.println("};"); } - ArrayList constantNames = New - .arrayList(stringConstantToStringMap.keySet()); + ArrayList constantNames = new ArrayList<>(stringConstantToStringMap.keySet()); Collections.sort(constantNames); for (String c : constantNames) { String s = stringConstantToStringMap.get(c); diff --git a/h2/src/tools/org/h2/java/Local.java b/h2/src/tools/org/h2/java/Local.java index a0077b5297..2df19d9527 100644 --- a/h2/src/tools/org/h2/java/Local.java +++ b/h2/src/tools/org/h2/java/Local.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/Statement.java b/h2/src/tools/org/h2/java/Statement.java index e072cef5bf..13a5b2e8bf 100644 --- a/h2/src/tools/org/h2/java/Statement.java +++ b/h2/src/tools/org/h2/java/Statement.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; @@ -155,9 +155,9 @@ public String asString() { class SwitchStatement extends StatementBase { private StatementBlock defaultBlock; - private final ArrayList cases = new ArrayList(); + private final ArrayList cases = new ArrayList<>(); private final ArrayList blocks = - new ArrayList(); + new ArrayList<>(); private final Expr expr; public SwitchStatement(Expr expr) { @@ -320,7 +320,7 @@ class ForStatement extends StatementBase { /** * The update list. */ - ArrayList updates = new ArrayList(); + ArrayList updates = new ArrayList<>(); /** * The type of the iterable. @@ -344,7 +344,7 @@ public void setMethod(MethodObj method) { @Override public String asString() { - StringBuffer buff = new StringBuffer(); + StringBuilder buff = new StringBuilder(); buff.append("for ("); if (iterableType != null) { Type it = iterable.getType(); @@ -391,7 +391,7 @@ class StatementBlock extends StatementBase { /** * The list of instructions. */ - final ArrayList instructions = new ArrayList(); + final ArrayList instructions = new ArrayList<>(); @Override public void setMethod(MethodObj method) { @@ -424,8 +424,8 @@ class VarDecStatement extends StatementBase { */ Type type; - private final ArrayList variables = new ArrayList(); - private final ArrayList values = new ArrayList(); + private final ArrayList variables = new ArrayList<>(); + private final ArrayList values = new ArrayList<>(); @Override public void setMethod(MethodObj method) { diff --git a/h2/src/tools/org/h2/java/Test.java b/h2/src/tools/org/h2/java/Test.java index d26449f5f7..9ce40aece4 100644 --- a/h2/src/tools/org/h2/java/Test.java +++ b/h2/src/tools/org/h2/java/Test.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/TestApp.java b/h2/src/tools/org/h2/java/TestApp.java index 8d15eb149b..cd848c6869 100644 --- a/h2/src/tools/org/h2/java/TestApp.java +++ b/h2/src/tools/org/h2/java/TestApp.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java; diff --git a/h2/src/tools/org/h2/java/io/PrintStream.java b/h2/src/tools/org/h2/java/io/PrintStream.java index 0b10314d71..4eed18ddb9 100644 --- a/h2/src/tools/org/h2/java/io/PrintStream.java +++ b/h2/src/tools/org/h2/java/io/PrintStream.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.io; @@ -15,6 +15,7 @@ public class PrintStream { * * @param s the string */ + @SuppressWarnings("unused") public void println(String s) { // c: int x = s->chars->length(); // c: printf("%.*S\n", x, s->chars->getPointer()); diff --git a/h2/src/tools/org/h2/java/io/package.html b/h2/src/tools/org/h2/java/io/package.html index 127bb2e27f..fb9167e95f 100644 --- a/h2/src/tools/org/h2/java/io/package.html +++ b/h2/src/tools/org/h2/java/io/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/java/lang/Integer.java b/h2/src/tools/org/h2/java/lang/Integer.java index 10731b6ab8..94e98755e9 100644 --- a/h2/src/tools/org/h2/java/lang/Integer.java +++ b/h2/src/tools/org/h2/java/lang/Integer.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/Long.java b/h2/src/tools/org/h2/java/lang/Long.java index 5a360cc0b2..fa99c22cd4 100644 --- a/h2/src/tools/org/h2/java/lang/Long.java +++ b/h2/src/tools/org/h2/java/lang/Long.java @@ -1,7 +1,7 @@ /* /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/Math.java b/h2/src/tools/org/h2/java/lang/Math.java index 5f7e79ec1b..f32cc63669 100644 --- a/h2/src/tools/org/h2/java/lang/Math.java +++ b/h2/src/tools/org/h2/java/lang/Math.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/Object.java b/h2/src/tools/org/h2/java/lang/Object.java index a4a20a4ac6..2f7fb39921 100644 --- a/h2/src/tools/org/h2/java/lang/Object.java +++ b/h2/src/tools/org/h2/java/lang/Object.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/String.java b/h2/src/tools/org/h2/java/lang/String.java index 68f51ec1ba..7f316c6041 100644 --- a/h2/src/tools/org/h2/java/lang/String.java +++ b/h2/src/tools/org/h2/java/lang/String.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/StringBuilder.java b/h2/src/tools/org/h2/java/lang/StringBuilder.java index 81484fbe5f..3d7eb79f11 100644 --- a/h2/src/tools/org/h2/java/lang/StringBuilder.java +++ b/h2/src/tools/org/h2/java/lang/StringBuilder.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/System.java b/h2/src/tools/org/h2/java/lang/System.java index 4ddde5e673..ba75438608 100644 --- a/h2/src/tools/org/h2/java/lang/System.java +++ b/h2/src/tools/org/h2/java/lang/System.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.lang; diff --git a/h2/src/tools/org/h2/java/lang/package.html b/h2/src/tools/org/h2/java/lang/package.html index 127bb2e27f..fb9167e95f 100644 --- a/h2/src/tools/org/h2/java/lang/package.html +++ b/h2/src/tools/org/h2/java/lang/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/java/package.html b/h2/src/tools/org/h2/java/package.html index b89b1f3afa..0beb44f98c 100644 --- a/h2/src/tools/org/h2/java/package.html +++ b/h2/src/tools/org/h2/java/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/java/util/Arrays.java b/h2/src/tools/org/h2/java/util/Arrays.java index 87b10ac56e..463625c980 100644 --- a/h2/src/tools/org/h2/java/util/Arrays.java +++ b/h2/src/tools/org/h2/java/util/Arrays.java @@ -1,6 +1,6 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.java.util; diff --git a/h2/src/tools/org/h2/java/util/package.html b/h2/src/tools/org/h2/java/util/package.html index 127bb2e27f..fb9167e95f 100644 --- a/h2/src/tools/org/h2/java/util/package.html +++ b/h2/src/tools/org/h2/java/util/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/jcr/Railroads.java b/h2/src/tools/org/h2/jcr/Railroads.java index 4dae227227..21d167bddf 100644 --- a/h2/src/tools/org/h2/jcr/Railroads.java +++ b/h2/src/tools/org/h2/jcr/Railroads.java @@ -1,15 +1,15 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). * Initial Developer: H2 Group */ package org.h2.jcr; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.InputStreamReader; import java.io.Reader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.util.ArrayList; @@ -22,7 +22,6 @@ import org.h2.build.doc.RailroadImages; import org.h2.server.web.PageParser; import org.h2.tools.Csv; -import org.h2.util.IOUtils; import org.h2.util.StringUtils; /** @@ -31,13 +30,14 @@ public class Railroads { private Bnf bnf; - private final HashMap session = new HashMap(); + private final HashMap session = new HashMap<>(); /** * This method is called when executing this application from the command * line. * * @param args the command line parameters + * @throws Exception on failure */ public static void main(String... args) throws Exception { new Railroads().process(); @@ -56,21 +56,18 @@ private void process() throws Exception { private void processHtml(String fileName) throws Exception { String source = "src/tools/org/h2/jcr/"; String target = "docs/html/"; - byte[] s = BuildBase.readFile(new File(source + "stylesheet.css")); - BuildBase.writeFile(new File(target + "stylesheet.css"), s); - String inFile = source + fileName; - String outFile = target + fileName; - new File(outFile).getParentFile().mkdirs(); - FileOutputStream out = new FileOutputStream(outFile); - FileInputStream in = new FileInputStream(inFile); - byte[] bytes = IOUtils.readBytesAndClose(in, 0); + byte[] s = BuildBase.readFile(Paths.get(source + "stylesheet.css")); + BuildBase.writeFile(Paths.get(target + "stylesheet.css"), s); + Path inFile = Paths.get(source + fileName); + Path outFile = Paths.get(target + fileName); + Files.createDirectories(outFile.getParent()); + byte[] bytes = Files.readAllBytes(inFile) ; if (fileName.endsWith(".html")) { String page = new String(bytes); page = PageParser.parse(page, session); bytes = page.getBytes(); } - out.write(bytes); - out.close(); + Files.write(outFile, bytes); } private static Reader getReader() { @@ -79,9 +76,9 @@ private static Reader getReader() { private void map(String key, ResultSet rs, boolean railroads) throws Exception { ArrayList> list; - list = new ArrayList>(); + list = new ArrayList<>(); while (rs.next()) { - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); ResultSetMetaData meta = rs.getMetaData(); for (int i = 0; i < meta.getColumnCount(); i++) { String k = StringUtils.toLowerEnglish(meta.getColumnLabel(i + 1)); @@ -109,9 +106,9 @@ private void map(String key, ResultSet rs, boolean railroads) throws Exception { } String link = topic.toLowerCase(); - link = StringUtils.replaceAll(link, " ", "_"); + link = link.replace(' ', '_'); // link = StringUtils.replaceAll(link, "_", ""); - link = StringUtils.replaceAll(link, "@", "_"); + link = link.replace('@', '_'); map.put("link", StringUtils.urlEncode(link)); list.add(map); } diff --git a/h2/src/tools/org/h2/jcr/help.csv b/h2/src/tools/org/h2/jcr/help.csv index 8c3ec667ff..2040b35e94 100644 --- a/h2/src/tools/org/h2/jcr/help.csv +++ b/h2/src/tools/org/h2/jcr/help.csv @@ -1,5 +1,5 @@ -# Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, -# and the EPL 1.0 (http://h2database.com/html/license.html). +# Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, +# and the EPL 1.0 (https://h2database.com/html/license.html). # Initial Developer: H2 Group) "SECTION","TOPIC","SYNTAX","TEXT" diff --git a/h2/src/tools/org/h2/jcr/jcr-sql2.html b/h2/src/tools/org/h2/jcr/jcr-sql2.html index ef378c20eb..4cf12dcc2d 100644 --- a/h2/src/tools/org/h2/jcr/jcr-sql2.html +++ b/h2/src/tools/org/h2/jcr/jcr-sql2.html @@ -1,7 +1,7 @@ @@ -45,9 +45,9 @@

    JCR 2.0 SQL-2 Grammar

    The diagrams are created with a small Java program and this BNF. The program uses the BNF parser / converter -of the the H2 database engine. +of the H2 database engine.

    -Please send feedback to the Jackrabbit User List. +Please send feedback to the Jackrabbit User List.

    diff --git a/h2/src/tools/org/h2/jcr/package.html b/h2/src/tools/org/h2/jcr/package.html index 3b424fac2a..225645d0ff 100644 --- a/h2/src/tools/org/h2/jcr/package.html +++ b/h2/src/tools/org/h2/jcr/package.html @@ -1,7 +1,7 @@ diff --git a/h2/src/tools/org/h2/jcr/stylesheet.css b/h2/src/tools/org/h2/jcr/stylesheet.css index 3fdbeb1aa8..47ea40c2a4 100644 --- a/h2/src/tools/org/h2/jcr/stylesheet.css +++ b/h2/src/tools/org/h2/jcr/stylesheet.css @@ -1,7 +1,7 @@ /* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * * Initial Developer: H2 Group + * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0, + * and the EPL 1.0 (https://h2database.com/html/license.html). + * Initial Developer: H2 Group */ td, input, select, textarea, body, code, pre, td, th { @@ -249,6 +249,8 @@ td.index { margin: 0px 0px; border: 2px solid; -moz-border-radius: 0.4em; + -webkit-border-radius: 0.4em; + -khtml-border-radius: 0.4em; border-radius: 0.4em; background-color: #fff; } @@ -259,9 +261,10 @@ td.index { margin: 0px; border-collapse: collapse; vertical-align: top; + width: 16px; height: 24px; background-image: url(images/div-ts.png); - width: 16px; + background-size: 16px 512px; } .ls { @@ -270,9 +273,10 @@ td.index { margin: 0px; border-collapse: collapse; vertical-align: top; + width: 16px; height: 24px; background-image: url(images/div-ls.png); - width: 16px; + background-size: 16px 512px; } .ks { @@ -281,9 +285,10 @@ td.index { margin: 0px; border-collapse: collapse; vertical-align: top; + width: 16px; height: 24px; background-image: url(images/div-ks.png); - width: 16px; + background-size: 16px 512px; } .te { @@ -292,9 +297,10 @@ td.index { margin: 0px; border-collapse: collapse; vertical-align: top; + width: 16px; height: 24px; background-image: url(images/div-te.png); - width: 16px; + background-size: 16px 512px; } .le { @@ -303,9 +309,10 @@ td.index { margin: 0px; border-collapse: collapse; vertical-align: top; + width: 16px; height: 24px; background-image: url(images/div-le.png); - width: 16px; + background-size: 16px 512px; } .ke { @@ -314,9 +321,10 @@ td.index { margin: 0px; border-collapse: collapse; vertical-align: top; + width: 16px; height: 24px; background-image: url(images/div-ke.png); - width: 16px; + background-size: 16px 512px; } .d { @@ -325,8 +333,8 @@ td.index { margin: 0px; border-collapse: collapse; vertical-align: top; + min-width: 16px; height: 24px; background-image: url(images/div-d.png); - background-repeat: repeat-x; - min-width: 16px; + background-size: 1024px 512px; } diff --git a/h2/src/tools/org/h2/mode/FunctionsMySQL.java b/h2/src/tools/org/h2/mode/FunctionsMySQL.java deleted file mode 100644 index b895f207d7..0000000000 --- a/h2/src/tools/org/h2/mode/FunctionsMySQL.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, - * and the EPL 1.0 (http://h2database.com/html/license.html). - * Initial Developer: Jason Brittain (jason.brittain at gmail.com) - */ -package org.h2.mode; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.Locale; - -import org.h2.util.StringUtils; - -/** - * This class implements some MySQL-specific functions. - * - * @author Jason Brittain - * @author Thomas Mueller - */ -public class FunctionsMySQL { - - /** - * The date format of a MySQL formatted date/time. - * Example: 2008-09-25 08:40:59 - */ - private static final String DATE_TIME_FORMAT = "yyyy-MM-dd HH:mm:ss"; - - /** - * Format replacements for MySQL date formats. - * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_date-format - */ - private static final String[] FORMAT_REPLACE = { - "%a", "EEE", - "%b", "MMM", - "%c", "MM", - "%d", "dd", - "%e", "d", - "%H", "HH", - "%h", "hh", - "%I", "hh", - "%i", "mm", - "%j", "DDD", - "%k", "H", - "%l", "h", - "%M", "MMMM", - "%m", "MM", - "%p", "a", - "%r", "hh:mm:ss a", - "%S", "ss", - "%s", "ss", - "%T", "HH:mm:ss", - "%W", "EEEE", - "%w", "F", - "%Y", "yyyy", - "%y", "yy", - "%%", "%", - }; - - /** - * Register the functionality in the database. - * Nothing happens if the functions are already registered. - * - * @param conn the connection - */ - public static void register(Connection conn) throws SQLException { - String[] init = { - "UNIX_TIMESTAMP", "unixTimestamp", - "FROM_UNIXTIME", "fromUnixTime", - "DATE", "date", - }; - Statement stat = conn.createStatement(); - for (int i = 0; i < init.length; i += 2) { - String alias = init[i], method = init[i + 1]; - stat.execute( - "CREATE ALIAS IF NOT EXISTS " + alias + - " FOR \"" + FunctionsMySQL.class.getName() + "." + method + "\""); - } - } - - /** - * Get the seconds since 1970-01-01 00:00:00 UTC. - * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_unix-timestamp - * - * @return the current timestamp in seconds (not milliseconds). - */ - public static int unixTimestamp() { - return (int) (System.currentTimeMillis() / 1000L); - } - - /** - * Get the seconds since 1970-01-01 00:00:00 UTC of the given timestamp. - * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_unix-timestamp - * - * @param timestamp the timestamp - * @return the current timestamp in seconds (not milliseconds). - */ - public static int unixTimestamp(java.sql.Timestamp timestamp) { - return (int) (timestamp.getTime() / 1000L); - } - - /** - * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_from-unixtime - * - * @param seconds The current timestamp in seconds. - * @return a formatted date/time String in the format "yyyy-MM-dd HH:mm:ss". - */ - public static String fromUnixTime(int seconds) { - SimpleDateFormat formatter = new SimpleDateFormat(DATE_TIME_FORMAT, - Locale.ENGLISH); - return formatter.format(new Date(seconds * 1000L)); - } - - /** - * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_from-unixtime - * - * @param seconds The current timestamp in seconds. - * @param format The format of the date/time String to return. - * @return a formatted date/time String in the given format. - */ - public static String fromUnixTime(int seconds, String format) { - format = convertToSimpleDateFormat(format); - SimpleDateFormat formatter = new SimpleDateFormat(format, Locale.ENGLISH); - return formatter.format(new Date(seconds * 1000L)); - } - - private static String convertToSimpleDateFormat(String format) { - String[] replace = FORMAT_REPLACE; - for (int i = 0; i < replace.length; i += 2) { - format = StringUtils.replaceAll(format, replace[i], replace[i + 1]); - } - return format; - } - - /** - * See - * http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_date - * This function is dependent on the exact formatting of the MySQL date/time - * string. - * - * @param dateTime The date/time String from which to extract just the date - * part. - * @return the date part of the given date/time String argument. - */ - public static String date(String dateTime) { - if (dateTime == null) { - return null; - } - int index = dateTime.indexOf(' '); - if (index != -1) { - return dateTime.substring(0, index); - } - return dateTime; - } - -} diff --git a/h2/src/tools/org/h2/mode/package.html b/h2/src/tools/org/h2/mode/package.html deleted file mode 100644 index ca503f5d22..0000000000 --- a/h2/src/tools/org/h2/mode/package.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - -Javadoc package documentation -

    - -Utility classes for compatibility with other database, for example MySQL. - -

    \ No newline at end of file